From ca8a37bc2290fac9bcdac199ff42d1fcf6245132 Mon Sep 17 00:00:00 2001 From: Alex Xiong Date: Thu, 3 Aug 2023 01:21:08 +0800 Subject: [PATCH 0001/1393] feat: move in struct StakeTable --- hotshot-stake-table/Cargo.toml | 36 + hotshot-stake-table/src/lib.rs | 5 + hotshot-stake-table/src/mt_based.rs | 331 ++++++++ hotshot-stake-table/src/mt_based/config.rs | 21 + hotshot-stake-table/src/mt_based/internal.rs | 767 +++++++++++++++++++ 5 files changed, 1160 insertions(+) create mode 100644 hotshot-stake-table/Cargo.toml create mode 100644 hotshot-stake-table/src/lib.rs create mode 100644 hotshot-stake-table/src/mt_based.rs create mode 100644 hotshot-stake-table/src/mt_based/config.rs create mode 100644 hotshot-stake-table/src/mt_based/internal.rs diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml new file mode 100644 index 0000000000..36bd1b168a --- /dev/null +++ b/hotshot-stake-table/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "hotshot-stake-table" +description = "Stake table implementations for HotShot" +version = { workspace = true } +authors = { workspace = true } +edition = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +ark-bn254 = "0.4.0" +ark-ff = "0.4.0" +ark-serialize = "0.4.0" +ark-std = { version = "0.4.0", default-features = false } +bincode = { version = "1.1.3" } +bitvec = { version = "1.0.1", default-features = false, features = ["alloc", "atomic", "serde"] } +digest = { version = "0.10" } +displaydoc = { version = "0.2.3", default-features = false } +ethereum-types = { version = "0.14.1", features = ["impl-serde"] } +generic-array = "0.14.7" +hotshot-types = { path = "../../types" } +jf-primitives = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } +jf-relation = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } +jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } +serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } +tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag = "0.3.0" } +typenum = { version = "1.16.0" } + +[dev-dependencies] +rand_chacha = { version = "0.3.1", default-features = false } + +[features] +default = ["parallel"] +std = ["ark-std/std", "ark-serialize/std", "ark-ff/std"] +parallel = ["jf-primitives/parallel", "jf-utils/parallel", "ark-ff/parallel"] +# TODO: (alex) what other features should I add to follow the same pattern as other crates? +full-ci = ["std", "parallel"] diff --git a/hotshot-stake-table/src/lib.rs b/hotshot-stake-table/src/lib.rs new file mode 100644 index 0000000000..6ba4d305fb --- /dev/null +++ b/hotshot-stake-table/src/lib.rs @@ -0,0 +1,5 @@ +//! This crate contains some stake table implementations for HotShot system. +#![deny(warnings)] +#![deny(missing_docs)] + +pub mod mt_based; diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs new file mode 100644 index 0000000000..56d2ae24c3 --- /dev/null +++ b/hotshot-stake-table/src/mt_based.rs @@ -0,0 +1,331 @@ +//! A stake table implementation that's based on Append-only Merkle Tree. + +mod config; +mod internal; + +use self::internal::{to_merkle_path, Key, MerkleCommitment, MerkleProof, PersistentMerkleNode}; +use ark_std::{collections::HashMap, rand::SeedableRng, sync::Arc}; +use digest::crypto_common::rand_core::CryptoRngCore; +use ethereum_types::{U256, U512}; +use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; +use serde::{Deserialize, Serialize}; + +/// Locally maintained stake table, generic over public key type `K`. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "K: Key")] +pub struct StakeTable { + /// The most up-to-date stake table, where the incoming transactions shall be performed on. + head: Arc>, + /// The snapshot of stake table at the beginning of the current epoch + epoch_start: Arc>, + /// The stake table used for leader election. + last_epoch_start: Arc>, + + /// Height of the underlying merkle tree, determines the capacity. + /// The capacity is `TREE_BRANCH.pow(height)`. + height: usize, + + /// The mapping from public keys to their location in the Merkle tree. + #[serde(skip)] + mapping: HashMap, +} + +impl StakeTableScheme for StakeTable { + type Key = K; + type Amount = U256; + type Commitment = MerkleCommitment; + type LookupProof = MerkleProof; + type IntoIter = internal::IntoIter; + + fn register( + &mut self, + new_key: Self::Key, + amount: Self::Amount, + ) -> Result<(), StakeTableError> { + match self.mapping.get(&new_key) { + Some(_) => Err(StakeTableError::ExistingKey), + None => { + let pos = self.mapping.len(); + self.head = self.head.register( + self.height, + &to_merkle_path(pos, self.height), + &new_key, + amount, + )?; + self.mapping.insert(new_key, pos); + Ok(()) + } + } + } + + fn deregister(&mut self, _existing_key: &Self::Key) -> Result<(), StakeTableError> { + // TODO: (alex) work on this in a future PR + unimplemented!() + } + + fn commitment(&self, version: SnapshotVersion) -> Result { + let root = Self::get_root(self, version)?; + Ok(MerkleCommitment::new( + root.commitment(), + self.height, + root.num_keys(), + )) + } + + fn total_stake(&self, version: SnapshotVersion) -> Result { + let root = Self::get_root(self, version)?; + Ok(root.total_stakes()) + } + + fn len(&self, version: SnapshotVersion) -> Result { + let root = Self::get_root(self, version)?; + Ok(root.num_keys()) + } + + fn contains_key(&self, key: &Self::Key) -> bool { + self.mapping.contains_key(key) + } + + fn lookup( + &self, + version: SnapshotVersion, + key: &Self::Key, + ) -> Result<(Self::Amount, Self::LookupProof), StakeTableError> { + let root = Self::get_root(self, version)?; + + let proof = match self.mapping.get(key) { + Some(index) => { + let branches = to_merkle_path(*index, self.height); + root.lookup(self.height, &branches) + } + None => Err(StakeTableError::KeyNotFound), + }?; + let amount = *proof.get_value().ok_or(StakeTableError::KeyNotFound)?; + Ok((amount, proof)) + } + + fn simple_lookup( + &self, + version: SnapshotVersion, + key: &K, + ) -> Result { + let root = Self::get_root(self, version)?; + match self.mapping.get(key) { + Some(index) => { + let branches = to_merkle_path(*index, self.height); + root.simple_lookup(self.height, &branches) + } + None => Err(StakeTableError::KeyNotFound), + } + } + + fn update( + &mut self, + key: &Self::Key, + delta: Self::Amount, + negative: bool, + ) -> Result { + match self.mapping.get(key) { + Some(pos) => { + let value: U256; + (self.head, value) = self.head.update( + self.height, + &to_merkle_path(*pos, self.height), + key, + delta, + negative, + )?; + Ok(value) + } + None => Err(StakeTableError::KeyNotFound), + } + } + + /// Almost uniformly samples a key weighted by its stake from the + /// last_epoch_start stake table + fn sample( + &self, + rng: &mut (impl SeedableRng + CryptoRngCore), + ) -> Option<(&Self::Key, &Self::Amount)> { + let mut bytes = [0u8; 64]; + rng.fill_bytes(&mut bytes); + let r = U512::from_big_endian(&bytes); + let m = U512::from(self.last_epoch_start.total_stakes()); + let pos: U256 = (r % m).try_into().unwrap(); // won't fail + self.last_epoch_start.get_key_by_stake(pos) + } + + fn iter(&self, version: SnapshotVersion) -> Result { + let root = Self::get_root(self, version)?; + Ok(internal::IntoIter::new(root)) + } +} + +impl StakeTable { + /// Initiating an empty stake table. + /// Overall capacity is `TREE_BRANCH.pow(height)`. + pub fn new(height: usize) -> Self { + Self { + head: Arc::new(PersistentMerkleNode::Empty), + epoch_start: Arc::new(PersistentMerkleNode::Empty), + last_epoch_start: Arc::new(PersistentMerkleNode::Empty), + height, + mapping: HashMap::new(), + } + } + + // returns the root of stake table at `version` + fn get_root( + &self, + version: SnapshotVersion, + ) -> Result>, StakeTableError> { + match version { + SnapshotVersion::Head => Ok(Arc::clone(&self.head)), + SnapshotVersion::EpochStart => Ok(Arc::clone(&self.epoch_start)), + SnapshotVersion::LastEpochStart => Ok(Arc::clone(&self.last_epoch_start)), + SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), + } + } + + /// Update the stake table when the epoch number advances, should be manually called. + pub fn advance(&mut self) { + self.last_epoch_start = self.epoch_start.clone(); + self.epoch_start = self.head.clone(); + } + + /// Set the stake withheld by `key` to be `value`. + /// Return the previous stake if succeed. + pub fn set_value(&mut self, key: &K, value: U256) -> Result { + match self.mapping.get(key) { + Some(pos) => { + let old_value: U256; + (self.head, old_value) = self.head.set_value( + self.height, + &to_merkle_path(*pos, self.height), + key, + value, + )?; + Ok(old_value) + } + None => Err(StakeTableError::KeyNotFound), + } + } +} + +#[cfg(test)] +mod tests { + use super::StakeTable; + use ark_std::{rand::SeedableRng, vec::Vec}; + use ethereum_types::U256; + use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; + + // Hotshot use bn254::Fq as key type. + type Key = ark_bn254::Fq; + + #[test] + fn test_stake_table() -> Result<(), StakeTableError> { + let mut st = StakeTable::new(3); + let keys = (0..10).map(Key::from).collect::>(); + assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(0)); + + // Registering keys + keys.iter() + .take(4) + .for_each(|&key| st.register(key, U256::from(100)).unwrap()); + assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(400)); + assert_eq!(st.total_stake(SnapshotVersion::EpochStart)?, U256::from(0)); + assert_eq!( + st.total_stake(SnapshotVersion::LastEpochStart)?, + U256::from(0) + ); + // set to zero for futher sampling test + assert_eq!( + st.set_value(&keys[1], U256::from(0)).unwrap(), + U256::from(100) + ); + st.advance(); + keys.iter() + .skip(4) + .take(3) + .for_each(|&key| st.register(key, U256::from(100)).unwrap()); + assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(600)); + assert_eq!( + st.total_stake(SnapshotVersion::EpochStart)?, + U256::from(300) + ); + assert_eq!( + st.total_stake(SnapshotVersion::LastEpochStart)?, + U256::from(0) + ); + st.advance(); + keys.iter() + .skip(7) + .for_each(|&key| st.register(key, U256::from(100)).unwrap()); + assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(900)); + assert_eq!( + st.total_stake(SnapshotVersion::EpochStart)?, + U256::from(600) + ); + assert_eq!( + st.total_stake(SnapshotVersion::LastEpochStart)?, + U256::from(300) + ); + + // No duplicate register + assert!(st.register(keys[0], U256::from(100)).is_err()); + // The 9-th key is still in head stake table + assert!(st.lookup(SnapshotVersion::EpochStart, &keys[9]).is_err()); + assert!(st.lookup(SnapshotVersion::EpochStart, &keys[5]).is_ok()); + // The 6-th key is still frozen + assert!(st + .lookup(SnapshotVersion::LastEpochStart, &keys[6]) + .is_err()); + assert!(st.lookup(SnapshotVersion::LastEpochStart, &keys[2]).is_ok()); + + // Set value shall return the old value + assert_eq!( + st.set_value(&keys[0], U256::from(101)).unwrap(), + U256::from(100) + ); + assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(901)); + assert_eq!( + st.total_stake(SnapshotVersion::EpochStart)?, + U256::from(600) + ); + + // Update that results in a negative stake + assert!(st.update(&keys[0], U256::from(1000), true).is_err()); + // Update should return the updated stake + assert_eq!( + st.update(&keys[0], U256::from(1), true).unwrap(), + U256::from(100) + ); + assert_eq!( + st.update(&keys[0], U256::from(100), false).unwrap(), + U256::from(200) + ); + + // Testing membership proof + let proof = st.lookup(SnapshotVersion::EpochStart, &keys[5])?.1; + assert!(proof + .verify(&st.commitment(SnapshotVersion::EpochStart)?) + .is_ok()); + // Membership proofs are tied with a specific version + assert!(proof + .verify(&st.commitment(SnapshotVersion::Head)?) + .is_err()); + assert!(proof + .verify(&st.commitment(SnapshotVersion::LastEpochStart)?) + .is_err()); + + // Random test for sampling keys + let mut rng = rand_chacha::ChaCha20Rng::seed_from_u64(41u64); + for _ in 0..100 { + let (_key, value) = st.sample(&mut rng).unwrap(); + // Sampled keys should have positive stake + assert!(value > &U256::from(0)); + } + + Ok(()) + } +} diff --git a/hotshot-stake-table/src/mt_based/config.rs b/hotshot-stake-table/src/mt_based/config.rs new file mode 100644 index 0000000000..eacc80a6d5 --- /dev/null +++ b/hotshot-stake-table/src/mt_based/config.rs @@ -0,0 +1,21 @@ +//! Config file for stake table +use ark_ff::PrimeField; +use ark_std::vec; +use ethereum_types::U256; +use jf_primitives::crhf::FixedLengthRescueCRHF; + +/// Branch of merkle tree. +/// Set to 3 because we are currently using RATE-3 rescue hash function +pub(crate) const TREE_BRANCH: usize = 3; + +/// Internal type of Merkle node value(commitment) +pub(crate) type FieldType = ark_bn254::Fq; +/// Hash algorithm used in Merkle tree, using a RATE-3 rescue +pub(crate) type Digest = FixedLengthRescueCRHF; + +/// convert a U256 to a field element. +pub(crate) fn u256_to_field(v: &U256) -> F { + let mut bytes = vec![0u8; 32]; + v.to_little_endian(&mut bytes); + F::from_le_bytes_mod_order(&bytes) +} diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs new file mode 100644 index 0000000000..ffbe7192a4 --- /dev/null +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -0,0 +1,767 @@ +//! Utilities and internals for maintaining a local stake table + +use super::config::{u256_to_field, Digest, FieldType, TREE_BRANCH}; +use ark_ff::{Field, PrimeField}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{hash::Hash, sync::Arc, vec, vec::Vec}; +use ethereum_types::U256; +use hotshot_types::traits::stake_table::StakeTableError; +use jf_primitives::{crhf::CRHF, signatures::bls_over_bn254}; +use jf_utils::canonical; +use serde::{Deserialize, Serialize}; +use tagged_base64::tagged; + +/// Common trait bounds for generic key type `K` for [`PersistentMerkleNode`] +pub trait Key: + Clone + CanonicalSerialize + CanonicalDeserialize + PartialEq + Eq + IntoFields + Hash +{ +} +impl Key for T where + T: Clone + + CanonicalSerialize + + CanonicalDeserialize + + PartialEq + + Eq + + IntoFields + + Hash +{ +} + +/// A trait that converts into a field element. +/// Help avoid "cannot impl foreign traits on foreign types" problem +pub trait IntoFields { + fn into_fields(self) -> [F; 2]; +} + +impl IntoFields for FieldType { + fn into_fields(self) -> [FieldType; 2] { + [FieldType::default(), self] + } +} + +impl IntoFields for bls_over_bn254::VerKey { + fn into_fields(self) -> [FieldType; 2] { + let bytes = jf_utils::to_bytes!(&self.to_affine()).unwrap(); + let x = ::from_le_bytes_mod_order(&bytes[..32]); + let y = ::from_le_bytes_mod_order(&bytes[32..]); + [x, y] + } +} + +/// A persistent merkle tree tailored for the stake table. +/// Generic over the key type `K` +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(bound = "K: Key")] +pub(crate) enum PersistentMerkleNode { + Empty, + Branch { + #[serde(with = "canonical")] + comm: FieldType, + children: [Arc>; TREE_BRANCH], + num_keys: usize, + total_stakes: U256, + }, + Leaf { + #[serde(with = "canonical")] + comm: FieldType, + #[serde(with = "canonical")] + key: K, + value: U256, + }, +} + +/// A compressed Merkle node for Merkle path +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub enum MerklePathEntry { + Branch { + pos: usize, + #[serde(with = "canonical")] + siblings: [FieldType; TREE_BRANCH - 1], + }, + Leaf { + key: K, + value: U256, + }, +} +/// Path from a Merkle root to a leaf +pub type MerklePath = Vec>; + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +/// An existential proof +pub struct MerkleProof { + /// Index for the given key + pub index: usize, + /// A Merkle path for the given leaf + pub path: MerklePath, +} + +impl MerkleProof { + pub fn tree_height(&self) -> usize { + self.path.len() - 1 + } + + pub fn index(&self) -> &usize { + &self.index + } + + pub fn get_key(&self) -> Option<&K> { + match self.path.first() { + Some(MerklePathEntry::Leaf { key, value: _ }) => Some(key), + _ => None, + } + } + + pub fn get_value(&self) -> Option<&U256> { + match self.path.first() { + Some(MerklePathEntry::Leaf { key: _, value }) => Some(value), + _ => None, + } + } + + pub fn get_key_value(&self) -> Option<(&K, &U256)> { + match self.path.first() { + Some(MerklePathEntry::Leaf { key, value }) => Some((key, value)), + _ => None, + } + } + + pub fn compute_root(&self) -> Result { + match self.path.first() { + Some(MerklePathEntry::Leaf { key, value }) => { + let mut input = [FieldType::default(); 3]; + input[..2].copy_from_slice(&(*key).clone().into_fields()[..]); + input[2] = u256_to_field(value); + let init = Digest::evaluate(input).map_err(|_| StakeTableError::RescueError)?[0]; + self.path + .iter() + .skip(1) + .fold(Ok(init), |result, node| match node { + MerklePathEntry::Branch { pos, siblings } => match result { + Ok(comm) => { + let mut input = [FieldType::from(0); TREE_BRANCH]; + input[..*pos].copy_from_slice(&siblings[..*pos]); + input[*pos] = comm; + input[pos + 1..].copy_from_slice(&siblings[*pos..]); + let comm = Digest::evaluate(input) + .map_err(|_| StakeTableError::RescueError)?[0]; + Ok(comm) + } + Err(_) => unreachable!(), + }, + _ => Err(StakeTableError::MalformedProof), + }) + } + _ => Err(StakeTableError::MalformedProof), + } + } + + pub fn verify(&self, comm: &MerkleCommitment) -> Result<(), StakeTableError> { + if self.tree_height() != comm.tree_height() || !self.compute_root()?.eq(comm.digest()) { + Err(StakeTableError::VerificationError) + } else { + Ok(()) + } + } +} + +#[tagged("MERKLE_COMM")] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, CanonicalSerialize, CanonicalDeserialize)] +/// A succint commitment for Merkle tree +pub struct MerkleCommitment { + /// Merkle tree digest + comm: FieldType, + /// Height of a tree + height: usize, + /// Number of leaves + size: usize, +} + +impl MerkleCommitment { + pub fn new(comm: FieldType, height: usize, size: usize) -> Self { + Self { comm, height, size } + } + + pub fn digest(&self) -> &FieldType { + &self.comm + } + + pub fn tree_height(&self) -> usize { + self.height + } + + pub fn size(&self) -> usize { + self.size + } +} + +impl PersistentMerkleNode { + /// Returns the succint commitment of this subtree + pub fn commitment(&self) -> FieldType { + match self { + PersistentMerkleNode::Empty => FieldType::from(0), + PersistentMerkleNode::Branch { + comm, + children: _, + num_keys: _, + total_stakes: _, + } => *comm, + PersistentMerkleNode::Leaf { + comm, + key: _, + value: _, + } => *comm, + } + } + + /// Returns the total number of keys in this subtree + pub fn num_keys(&self) -> usize { + match self { + PersistentMerkleNode::Empty => 0, + PersistentMerkleNode::Branch { + comm: _, + children: _, + num_keys, + total_stakes: _, + } => *num_keys, + PersistentMerkleNode::Leaf { + comm: _, + key: _, + value: _, + } => 1, + } + } + + /// Returns the total stakes in this subtree + pub fn total_stakes(&self) -> U256 { + match self { + PersistentMerkleNode::Empty => U256::zero(), + PersistentMerkleNode::Branch { + comm: _, + children: _, + num_keys: _, + total_stakes, + } => *total_stakes, + PersistentMerkleNode::Leaf { + comm: _, + key: _, + value, + } => *value, + } + } + + /// Returns the stakes withhelded by a public key, None if the key is not registered. + pub fn simple_lookup(&self, height: usize, path: &[usize]) -> Result { + match self { + PersistentMerkleNode::Empty => Err(StakeTableError::KeyNotFound), + PersistentMerkleNode::Branch { + comm: _, + children, + num_keys: _, + total_stakes: _, + } => children[path[height - 1]].simple_lookup(height - 1, path), + PersistentMerkleNode::Leaf { + comm: _, + key: _, + value, + } => Ok(*value), + } + } + + /// Returns a Merkle proof to the given location + pub fn lookup(&self, height: usize, path: &[usize]) -> Result, StakeTableError> { + match self { + PersistentMerkleNode::Empty => Err(StakeTableError::KeyNotFound), + PersistentMerkleNode::Branch { + comm: _, + children, + num_keys: _, + total_stakes: _, + } => { + let pos = path[height - 1]; + let mut proof = children[pos].lookup(height - 1, path)?; + let siblings = children + .iter() + .enumerate() + .filter(|(i, _)| *i != pos) + .map(|(_, node)| node.commitment()) + .collect::>(); + proof.path.push(MerklePathEntry::Branch { + pos, + siblings: siblings.try_into().unwrap(), + }); + Ok(proof) + } + PersistentMerkleNode::Leaf { + comm: _, + key, + value, + } => Ok(MerkleProof { + index: from_merkle_path(path), + path: vec![MerklePathEntry::Leaf { + key: key.clone(), + value: *value, + }], + }), + } + } + + /// Imagine that the keys in this subtree is sorted, returns the first key such that + /// the prefix sum of withholding stakes is greater or equal the given `stake_number`. + /// Useful for key sampling weighted by withholding stakes + pub fn get_key_by_stake(&self, mut stake_number: U256) -> Option<(&K, &U256)> { + if stake_number >= self.total_stakes() { + None + } else { + match self { + PersistentMerkleNode::Empty => None, + PersistentMerkleNode::Branch { + comm: _, + children, + num_keys: _, + total_stakes: _, + } => { + let mut ptr = 0; + while stake_number >= children[ptr].total_stakes() { + stake_number -= children[ptr].total_stakes(); + ptr += 1; + } + children[ptr].get_key_by_stake(stake_number) + } + PersistentMerkleNode::Leaf { + comm: _, + key, + value, + } => Some((key, value)), + } + } + } + + /// Insert a new `key` into the Merkle tree + pub fn register( + &self, + height: usize, + path: &[usize], + key: &K, + value: U256, + ) -> Result, StakeTableError> { + if height == 0 { + if matches!(self, PersistentMerkleNode::Empty) { + let mut input = [FieldType::default(); 3]; + input[..2].copy_from_slice(&(*key).clone().into_fields()[..]); + input[2] = u256_to_field(&value); + Ok(Arc::new(PersistentMerkleNode::Leaf { + comm: Digest::evaluate(input).map_err(|_| StakeTableError::RescueError)?[0], + key: key.clone(), + value, + })) + } else { + Err(StakeTableError::ExistingKey) + } + } else { + let mut children = if let &PersistentMerkleNode::Branch { + comm: _, + children, + num_keys: _, + total_stakes: _, + } = &self + { + children.clone() + } else { + [0; TREE_BRANCH].map(|_| Arc::new(PersistentMerkleNode::Empty)) + }; + children[path[height - 1]] = + children[path[height - 1]].register(height - 1, path, key, value)?; + let num_keys = children.iter().map(|child| child.num_keys()).sum(); + let total_stakes = children + .iter() + .map(|child| child.total_stakes()) + .fold(U256::zero(), |sum, val| sum + val); + let comm = Digest::evaluate(children.clone().map(|child| child.commitment())) + .map_err(|_| StakeTableError::RescueError)?[0]; + Ok(Arc::new(PersistentMerkleNode::Branch { + comm, + children, + num_keys, + total_stakes, + })) + } + } + + /// Update the stake of the `key` with `(negative ? -1 : 1) * delta`. + /// Return the updated stake + pub fn update( + &self, + height: usize, + path: &[usize], + key: &K, + delta: U256, + negative: bool, + ) -> Result<(Arc, U256), StakeTableError> { + match self { + PersistentMerkleNode::Empty => Err(StakeTableError::KeyNotFound), + PersistentMerkleNode::Branch { + comm: _, + children, + num_keys: _, + total_stakes: _, + } => { + let mut children = children.clone(); + let value: U256; + (children[path[height - 1]], value) = + children[path[height - 1]].update(height - 1, path, key, delta, negative)?; + let num_keys = children.iter().map(|child| child.num_keys()).sum(); + let total_stakes = children + .iter() + .map(|child| child.total_stakes()) + .fold(U256::zero(), |sum, val| sum + val); + let comm = Digest::evaluate(children.clone().map(|child| child.commitment())) + .map_err(|_| StakeTableError::RescueError)?[0]; + Ok(( + Arc::new(PersistentMerkleNode::Branch { + comm, + children, + num_keys, + total_stakes, + }), + value, + )) + } + PersistentMerkleNode::Leaf { + comm: _, + key: node_key, + value: old_value, + } => { + if key == node_key { + let value = if negative { + old_value + .checked_sub(delta) + .ok_or(StakeTableError::InsufficientFund) + } else { + old_value + .checked_add(delta) + .ok_or(StakeTableError::StakeOverflow) + }?; + let mut input = [FieldType::default(); 3]; + input[..2].copy_from_slice(&(*key).clone().into_fields()[..]); + input[2] = u256_to_field(&value); + Ok(( + Arc::new(PersistentMerkleNode::Leaf { + comm: Digest::evaluate(input) + .map_err(|_| StakeTableError::RescueError)?[0], + key: key.clone(), + value, + }), + value, + )) + } else { + Err(StakeTableError::MismatchedKey) + } + } + } + } + + /// Set the stake of `key` to be `value`. + /// Return the previous stake + pub fn set_value( + &self, + height: usize, + path: &[usize], + key: &K, + value: U256, + ) -> Result<(Arc, U256), StakeTableError> { + match self { + PersistentMerkleNode::Empty => Err(StakeTableError::KeyNotFound), + PersistentMerkleNode::Branch { + comm: _, + children, + num_keys: _, + total_stakes: _, + } => { + let mut children = children.clone(); + let old_value: U256; + (children[path[height - 1]], old_value) = + children[path[height - 1]].set_value(height - 1, path, key, value)?; + let num_keys = children.iter().map(|child| child.num_keys()).sum(); + if num_keys == 0 { + Ok((Arc::new(PersistentMerkleNode::Empty), value)) + } else { + let total_stakes = children + .iter() + .map(|child| child.total_stakes()) + .fold(U256::zero(), |sum, val| sum + val); + let comm = Digest::evaluate(children.clone().map(|child| child.commitment())) + .map_err(|_| StakeTableError::RescueError)?[0]; + Ok(( + Arc::new(PersistentMerkleNode::Branch { + comm, + children, + num_keys, + total_stakes, + }), + old_value, + )) + } + } + PersistentMerkleNode::Leaf { + comm: _, + key: cur_key, + value: old_value, + } => { + if key == cur_key { + let mut input = [FieldType::default(); 3]; + input[..2].copy_from_slice(&(*key).clone().into_fields()[..]); + input[2] = u256_to_field(&value); + Ok(( + Arc::new(PersistentMerkleNode::Leaf { + comm: Digest::evaluate(input) + .map_err(|_| StakeTableError::RescueError)?[0], + key: key.clone(), + value, + }), + *old_value, + )) + } else { + Err(StakeTableError::MismatchedKey) + } + } + } + } +} + +/// An owning iterator over the (key, value) entries of a `PersistentMerkleNode` +/// Traverse using post-order: children from left to right, finally visit the current. +pub struct IntoIter { + unvisited: Vec>>, + num_visited: usize, +} + +impl IntoIter { + /// create a new merkle tree iterator from a `root`. + /// This (abstract) `root` can be an internal node of a larger tree, our iterator + /// will iterate over all of its children. + pub(crate) fn new(root: Arc>) -> Self { + Self { + unvisited: vec![root], + num_visited: 0, + } + } +} + +impl Iterator for IntoIter { + type Item = (K, U256); + fn next(&mut self) -> Option { + if self.unvisited.is_empty() { + return None; + } + + let visiting = (**self.unvisited.last()?).clone(); + match visiting { + PersistentMerkleNode::Empty => None, + PersistentMerkleNode::Leaf { + comm: _, + key, + value, + } => { + self.unvisited.pop(); + self.num_visited += 1; + Some((key, value)) + } + PersistentMerkleNode::Branch { + comm: _, + children, + num_keys: _, + total_stakes: _, + } => { + self.unvisited.pop(); + // put the left-most child to the last, so it is visited first. + self.unvisited.extend(children.into_iter().rev()); + self.next() + } + } + } +} + +impl IntoIterator for PersistentMerkleNode { + type Item = (K, U256); + type IntoIter = self::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + Self::IntoIter::new(Arc::new(self)) + } +} + +/// Convert an index to a list of Merkle path branches +pub fn to_merkle_path(idx: usize, height: usize) -> Vec { + let mut pos = idx; + let mut ret: Vec = vec![]; + for _ in 0..height { + ret.push(pos % TREE_BRANCH); + pos /= TREE_BRANCH; + } + ret +} + +/// Convert a list of Merkle path branches back to an index +pub fn from_merkle_path(path: &[usize]) -> usize { + path.iter() + .fold((0, 1), |(pos, mul), branch| { + (pos + mul * branch, mul * TREE_BRANCH) + }) + .0 +} + +#[cfg(test)] +mod tests { + use super::super::config; + use super::{to_merkle_path, PersistentMerkleNode}; + use ark_std::{ + rand::{Rng, RngCore}, + sync::Arc, + vec, + vec::Vec, + }; + use ethereum_types::U256; + use jf_utils::test_rng; + + type Key = ark_bn254::Fq; + + #[test] + fn test_persistent_merkle_tree() { + let height = 3; + let mut roots = vec![Arc::new(PersistentMerkleNode::::Empty)]; + let path = (0..10) + .map(|idx| to_merkle_path(idx, height)) + .collect::>(); + let keys = (0..10).map(Key::from).collect::>(); + // Insert key (0..10) with associated value 100 to the persistent merkle tree + for (i, key) in keys.iter().enumerate() { + roots.push( + roots + .last() + .unwrap() + .register(height, &path[i], key, U256::from(100)) + .unwrap(), + ); + } + // Check that if the insertion is perform correctly + for i in 0..10 { + assert!(roots[i].simple_lookup(height, &path[i]).is_err()); + assert_eq!(i, roots[i].num_keys()); + assert_eq!( + U256::from((i as u64 + 1) * 100), + roots[i + 1].total_stakes() + ); + assert_eq!( + U256::from(100), + roots[i + 1].simple_lookup(height, &path[i]).unwrap() + ); + } + // test get_key_by_stake + keys.iter().enumerate().for_each(|(i, key)| { + assert_eq!( + key, + roots + .last() + .unwrap() + .get_key_by_stake(U256::from(i as u64 * 100 + i as u64 + 1)) + .unwrap() + .0 + ); + }); + + // test for `lookup` and Merkle proof + for i in 0..10 { + let proof = roots.last().unwrap().lookup(height, &path[i]).unwrap(); + assert_eq!(height, proof.tree_height()); + assert_eq!(&keys[i], proof.get_key().unwrap()); + assert_eq!(&U256::from(100), proof.get_value().unwrap()); + assert_eq!( + roots.last().unwrap().commitment(), + proof.compute_root().unwrap() + ); + } + + // test for `set_value` + // `set_value` with wrong key should fail + assert!(roots + .last() + .unwrap() + .set_value(height, &path[2], &keys[1], U256::from(100)) + .is_err()); + // A successful `set_value` + let (new_root, value) = roots + .last() + .unwrap() + .set_value(height, &path[2], &keys[2], U256::from(90)) + .unwrap(); + roots.push(new_root); + assert_eq!(U256::from(100), value); + assert_eq!( + U256::from(90), + roots + .last() + .unwrap() + .simple_lookup(height, &path[2]) + .unwrap() + ); + assert_eq!(U256::from(990), roots.last().unwrap().total_stakes()); + + // test for `update` + // `update` with a wrong key should fail + assert!(roots + .last() + .unwrap() + .update(height, &path[3], &keys[0], U256::from(10), false) + .is_err()); + // `update` that results in a negative stake should fail + assert!(roots + .last() + .unwrap() + .update(height, &path[3], &keys[3], U256::from(200), true) + .is_err()); + // A successful `update` + let (new_root, value) = roots + .last() + .unwrap() + .update(height, &path[2], &keys[2], U256::from(10), false) + .unwrap(); + roots.push(new_root); + assert_eq!(U256::from(100), value); + assert_eq!( + value, + roots + .last() + .unwrap() + .simple_lookup(height, &path[2]) + .unwrap() + ); + assert_eq!(U256::from(1000), roots.last().unwrap().total_stakes()); + } + + #[test] + fn test_mt_iter() { + let height = 3; + let capacity = config::TREE_BRANCH.pow(height); + let mut rng = test_rng(); + + for _ in 0..5 { + let num_keys = rng.gen_range(1..capacity); + let keys: Vec = (0..num_keys).map(|i| Key::from(i as u64)).collect(); + let paths = (0..num_keys) + .map(|idx| to_merkle_path(idx, height as usize)) + .collect::>(); + let amounts: Vec = (0..num_keys).map(|_| U256::from(rng.next_u64())).collect(); + + // register all `num_keys` of (key, amount) pair. + let mut root = Arc::new(PersistentMerkleNode::::Empty); + for i in 0..num_keys { + root = root + .register(height as usize, &paths[i], &keys[i], amounts[i]) + .unwrap(); + } + for (i, (k, v)) in (*root).clone().into_iter().enumerate() { + assert_eq!((k, v), (keys[i], amounts[i])); + } + } + } +} From ace77841cf35e3cab6774791cf3ff92913c755c9 Mon Sep 17 00:00:00 2001 From: Alex Xiong Date: Thu, 3 Aug 2023 01:23:53 +0800 Subject: [PATCH 0002/1393] feat: move in struct StakeTable --- hotshot-qc/Cargo.toml | 19 ++ hotshot-qc/src/bit_vector.rs | 344 +++++++++++++++++++++++++++++++++++ hotshot-qc/src/lib.rs | 1 + 3 files changed, 364 insertions(+) create mode 100644 hotshot-qc/Cargo.toml create mode 100644 hotshot-qc/src/bit_vector.rs create mode 100644 hotshot-qc/src/lib.rs diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml new file mode 100644 index 0000000000..a6cf7a2801 --- /dev/null +++ b/hotshot-qc/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "hotshot-qc" +description = "Quorum certificate instantiations" +version = { workspace = true } +authors = { workspace = true } +edition = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +ark-std = "0.4" +bincode = { version = "1.1.3" } +bitvec = { version = "1.0.1", default-features = false, features = ["alloc", "atomic", "serde"] } +ethereum-types = { version = "0.14.1", features = ["impl-serde"] } +generic-array = "0.14.7" +hotshot-types = { path = "../../types" } +jf-primitives = { git = "https://github.com/EspressoSystems/jellyfish", branch = "hotshot-compat" } +jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } +serde = { version = "1.0.164", features = ["derive"] } +typenum = { version = "1.16.0" } diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs new file mode 100644 index 0000000000..47896a5d20 --- /dev/null +++ b/hotshot-qc/src/bit_vector.rs @@ -0,0 +1,344 @@ +use ark_std::{ + fmt::Debug, + format, + marker::PhantomData, + rand::{CryptoRng, RngCore}, + vec, + vec::Vec, +}; +use bitvec::prelude::*; +use ethereum_types::U256; +use generic_array::GenericArray; +use hotshot_types::traits::{ + qc::QuorumCertificate, + stake_table::{SnapshotVersion, StakeTableScheme}, +}; +use jf_primitives::errors::PrimitivesError; +use jf_primitives::errors::PrimitivesError::ParameterError; +use jf_primitives::signatures::AggregateableSignatureSchemes; +use serde::{Deserialize, Serialize}; +use typenum::U32; + +/// An implementation of QC using BLS signature and a bit-vector. +pub struct BitVectorQC( + PhantomData, + PhantomData, +); + +#[derive(Serialize, Deserialize, PartialEq, Debug)] +pub struct QCParams { + pub stake_table: ST, + pub threshold: U256, + pub agg_sig_pp: A::PublicParameter, +} + +impl QuorumCertificate for BitVectorQC +where + A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a> + PartialEq, + ST: StakeTableScheme + + Serialize + + for<'a> Deserialize<'a> + + PartialEq, +{ + type QCProverParams = QCParams; + + // TODO: later with SNARKs we'll use a smaller verifier parameter + type QCVerifierParams = QCParams; + + type QC = (A::Signature, BitVec); + type MessageLength = U32; + type QuorumSize = U256; + + fn sign( + agg_sig_pp: &A::PublicParameter, + message: &GenericArray, + sk: &A::SigningKey, + prng: &mut R, + ) -> Result { + A::sign(agg_sig_pp, sk, message, prng) + } + + fn assemble( + qc_pp: &Self::QCProverParams, + signers: &BitSlice, + sigs: &[A::Signature], + ) -> Result { + let st_len = qc_pp.stake_table.len(SnapshotVersion::LastEpochStart)?; + if signers.len() != st_len { + return Err(ParameterError(format!( + "bit vector len {} != the number of stake entries {}", + signers.len(), + st_len, + ))); + } + let total_weight: U256 = qc_pp + .stake_table + .iter(SnapshotVersion::LastEpochStart)? + .zip(signers.iter()) + .fold( + U256::zero(), + |acc, (entry, b)| { + if *b { + acc + entry.1 + } else { + acc + } + }, + ); + if total_weight < qc_pp.threshold { + return Err(ParameterError(format!( + "total_weight {} less than threshold {}", + total_weight, qc_pp.threshold, + ))); + } + let mut ver_keys = vec![]; + for (entry, b) in qc_pp + .stake_table + .iter(SnapshotVersion::LastEpochStart)? + .zip(signers.iter()) + { + if *b { + ver_keys.push(entry.0.clone()); + } + } + if ver_keys.len() != sigs.len() { + return Err(ParameterError(format!( + "the number of ver_keys {} != the number of partial signatures {}", + ver_keys.len(), + sigs.len(), + ))); + } + let sig = A::aggregate(&qc_pp.agg_sig_pp, &ver_keys[..], sigs)?; + + Ok((sig, signers.into())) + } + + fn check( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray, + qc: &Self::QC, + ) -> Result { + let (sig, signers) = qc; + let st_len = qc_vp.stake_table.len(SnapshotVersion::LastEpochStart)?; + if signers.len() != st_len { + return Err(ParameterError(format!( + "signers bit vector len {} != the number of stake entries {}", + signers.len(), + st_len, + ))); + } + let total_weight: U256 = qc_vp + .stake_table + .iter(SnapshotVersion::LastEpochStart)? + .zip(signers.iter()) + .fold( + U256::zero(), + |acc, (entry, b)| { + if *b { + acc + entry.1 + } else { + acc + } + }, + ); + if total_weight < qc_vp.threshold { + return Err(ParameterError(format!( + "total_weight {} less than threshold {}", + total_weight, qc_vp.threshold, + ))); + } + let mut ver_keys = vec![]; + for (entry, b) in qc_vp + .stake_table + .iter(SnapshotVersion::LastEpochStart)? + .zip(signers.iter()) + { + if *b { + ver_keys.push(entry.0.clone()); + } + } + A::multi_sig_verify(&qc_vp.agg_sig_pp, &ver_keys[..], message, sig)?; + + Ok(total_weight) + } + + fn trace( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray<::MessageUnit, Self::MessageLength>, + qc: &Self::QC, + ) -> Result::VerificationKey>, PrimitivesError> { + let (_sig, signers) = qc; + let st_len = qc_vp.stake_table.len(SnapshotVersion::LastEpochStart)?; + if signers.len() != st_len { + return Err(ParameterError(format!( + "signers bit vector len {} != the number of stake entries {}", + signers.len(), + st_len, + ))); + } + + Self::check(qc_vp, message, qc)?; + + let signer_pks: Vec<_> = qc_vp + .stake_table + .iter(SnapshotVersion::LastEpochStart)? + .zip(signers.iter()) + .filter(|(_, b)| **b) + .map(|(pk, _)| pk.0) + .collect(); + Ok(signer_pks) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hotshot_types::traits::stake_table::{StakeTable, StakeTableScheme}; + use jf_primitives::signatures::bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}; + use jf_primitives::signatures::SignatureScheme; + + macro_rules! test_quorum_certificate { + ($aggsig:tt) => { + type ST = StakeTable<<$aggsig as SignatureScheme>::VerificationKey>; + let mut rng = jf_utils::test_rng(); + + let agg_sig_pp = $aggsig::param_gen(Some(&mut rng)).unwrap(); + let key_pair1 = KeyPair::generate(&mut rng); + let key_pair2 = KeyPair::generate(&mut rng); + let key_pair3 = KeyPair::generate(&mut rng); + + let mut st = ST::new(3); + st.register(key_pair1.ver_key(), U256::from(3u8)).unwrap(); + st.register(key_pair2.ver_key(), U256::from(5u8)).unwrap(); + st.register(key_pair3.ver_key(), U256::from(7u8)).unwrap(); + st.advance(); + st.advance(); + + let qc_pp = QCParams { + stake_table: st, + threshold: U256::from(10u8), + agg_sig_pp, + }; + + let msg = [72u8; 32]; + let sig1 = BitVectorQC::<$aggsig, ST>::sign( + &agg_sig_pp, + &msg.into(), + key_pair1.sign_key_ref(), + &mut rng, + ) + .unwrap(); + let sig2 = BitVectorQC::<$aggsig, ST>::sign( + &agg_sig_pp, + &msg.into(), + key_pair2.sign_key_ref(), + &mut rng, + ) + .unwrap(); + let sig3 = BitVectorQC::<$aggsig, ST>::sign( + &agg_sig_pp, + &msg.into(), + key_pair3.sign_key_ref(), + &mut rng, + ) + .unwrap(); + + // happy path + let signers = bitvec![0, 1, 1]; + let qc = BitVectorQC::<$aggsig, ST>::assemble( + &qc_pp, + signers.as_bitslice(), + &[sig2.clone(), sig3.clone()], + ) + .unwrap(); + assert!(BitVectorQC::<$aggsig, ST>::check(&qc_pp, &msg.into(), &qc).is_ok()); + assert_eq!( + BitVectorQC::<$aggsig, ST>::trace(&qc_pp, &msg.into(), &qc).unwrap(), + vec![key_pair2.ver_key(), key_pair3.ver_key()], + ); + + // Check the QC and the QCParams can be serialized / deserialized + assert_eq!( + qc, + bincode::deserialize(&bincode::serialize(&qc).unwrap()).unwrap() + ); + + // (alex) since deserialized stake table's leaf would contain normalized projective + // points with Z=1, which differs from the original projective representation. + // We compare individual fields for equivalence instead. + let de_qc_pp: QCParams<$aggsig, ST> = + bincode::deserialize(&bincode::serialize(&qc_pp).unwrap()).unwrap(); + assert_eq!( + qc_pp.stake_table.commitment(SnapshotVersion::Head).unwrap(), + de_qc_pp + .stake_table + .commitment(SnapshotVersion::Head) + .unwrap(), + ); + assert_eq!( + qc_pp + .stake_table + .commitment(SnapshotVersion::LastEpochStart) + .unwrap(), + de_qc_pp + .stake_table + .commitment(SnapshotVersion::LastEpochStart) + .unwrap(), + ); + assert_eq!(qc_pp.threshold, de_qc_pp.threshold); + assert_eq!(qc_pp.agg_sig_pp, de_qc_pp.agg_sig_pp); + + // bad paths + // number of signatures unmatch + assert!(BitVectorQC::<$aggsig, ST>::assemble( + &qc_pp, + signers.as_bitslice(), + &[sig2.clone()] + ) + .is_err()); + // total weight under threshold + let active_bad = bitvec![1, 1, 0]; + assert!(BitVectorQC::<$aggsig, ST>::assemble( + &qc_pp, + active_bad.as_bitslice(), + &[sig1.clone(), sig2.clone()] + ) + .is_err()); + // wrong bool vector length + let active_bad_2 = bitvec![0, 1, 1, 0]; + assert!(BitVectorQC::<$aggsig, ST>::assemble( + &qc_pp, + active_bad_2.as_bitslice(), + &[sig2, sig3], + ) + .is_err()); + + assert!(BitVectorQC::<$aggsig, ST>::check( + &qc_pp, + &msg.into(), + &(qc.0.clone(), active_bad) + ) + .is_err()); + assert!(BitVectorQC::<$aggsig, ST>::check( + &qc_pp, + &msg.into(), + &(qc.0.clone(), active_bad_2) + ) + .is_err()); + let bad_msg = [70u8; 32]; + assert!(BitVectorQC::<$aggsig, ST>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); + + let bad_sig = &sig1; + assert!(BitVectorQC::<$aggsig, ST>::check( + &qc_pp, + &msg.into(), + &(bad_sig.clone(), qc.1) + ) + .is_err()); + }; + } + #[test] + fn test_quorum_certificate() { + test_quorum_certificate!(BLSOverBN254CurveSignatureScheme); + } +} diff --git a/hotshot-qc/src/lib.rs b/hotshot-qc/src/lib.rs new file mode 100644 index 0000000000..68e3be355a --- /dev/null +++ b/hotshot-qc/src/lib.rs @@ -0,0 +1 @@ +pub mod bit_vector; From 7c3e0a023bc74af7806086a5008e24cb5b804df6 Mon Sep 17 00:00:00 2001 From: Alex Xiong Date: Thu, 3 Aug 2023 01:32:07 +0800 Subject: [PATCH 0003/1393] feat: move in struct BitVectorQC --- hotshot-qc/Cargo.toml | 10 ++++++++++ hotshot-qc/src/bit_vector.rs | 10 +++++++++- hotshot-qc/src/lib.rs | 4 ++++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index a6cf7a2801..cb600ecf36 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -17,3 +17,13 @@ jf-primitives = { git = "https://github.com/EspressoSystems/jellyfish", branch = jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } serde = { version = "1.0.164", features = ["derive"] } typenum = { version = "1.16.0" } + +[dev-dependencies] +hotshot-stake-table = { path = "../hotshot-stake-table" } + +[features] +default = ["parallel"] +std = ["ark-std/std"] +parallel = ["jf-primitives/parallel", "jf-utils/parallel"] +# TODO: (alex) what other features should I add +full-ci = ["std", "parallel"] diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index 47896a5d20..8ce956b8b0 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -1,3 +1,6 @@ +//! Implementation for BitVectorQC that uses BLS signature + Bit vector. +//! See more details in HotShot paper. + use ark_std::{ fmt::Debug, format, @@ -25,10 +28,14 @@ pub struct BitVectorQC( PhantomData, ); +/// Public parameters of [`BitVectorQC`] #[derive(Serialize, Deserialize, PartialEq, Debug)] pub struct QCParams { + /// the stake table (snapshot) this QC is verified against pub stake_table: ST, + /// threshold for the accumulated "weight" of votes to form a QC pub threshold: U256, + /// public parameter for the aggregated signature scheme pub agg_sig_pp: A::PublicParameter, } @@ -193,7 +200,8 @@ where #[cfg(test)] mod tests { use super::*; - use hotshot_types::traits::stake_table::{StakeTable, StakeTableScheme}; + use hotshot_stake_table::mt_based::StakeTable; + use hotshot_types::traits::stake_table::StakeTableScheme; use jf_primitives::signatures::bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}; use jf_primitives::signatures::SignatureScheme; diff --git a/hotshot-qc/src/lib.rs b/hotshot-qc/src/lib.rs index 68e3be355a..32f324797e 100644 --- a/hotshot-qc/src/lib.rs +++ b/hotshot-qc/src/lib.rs @@ -1 +1,5 @@ +//! This crates offer implementations of quorum certificates used in HotShot. +#![deny(warnings)] +#![deny(missing_docs)] + pub mod bit_vector; From b0f3a9197d8f18586debe22bc3b71b9b24803f72 Mon Sep 17 00:00:00 2001 From: Alex Xiong Date: Thu, 3 Aug 2023 01:48:59 +0800 Subject: [PATCH 0004/1393] move QC circuit --- hotshot-qc/Cargo.toml | 11 +- hotshot-qc/src/lib.rs | 1 + hotshot-qc/src/snarked.rs | 5 + hotshot-qc/src/snarked/circuit.rs | 545 ++++++++++++++++++++++++++++++ 4 files changed, 561 insertions(+), 1 deletion(-) create mode 100644 hotshot-qc/src/snarked.rs create mode 100644 hotshot-qc/src/snarked/circuit.rs diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index cb600ecf36..e3533411d3 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -7,13 +7,22 @@ edition = { workspace = true } rust-version = { workspace = true } [dependencies] -ark-std = "0.4" +ark-bls12-377 = "0.4.0" +ark-bls12-381 = "0.4.0" +ark-bn254 = "0.4.0" +ark-ec = "0.4.0" +ark-ff = "0.4.0" +ark-pallas = "0.4.0" +ark-poly = "0.4.0" +ark-serialize = "0.4.0" +ark-std = { version = "0.4.0", default-features = false } bincode = { version = "1.1.3" } bitvec = { version = "1.0.1", default-features = false, features = ["alloc", "atomic", "serde"] } ethereum-types = { version = "0.14.1", features = ["impl-serde"] } generic-array = "0.14.7" hotshot-types = { path = "../../types" } jf-primitives = { git = "https://github.com/EspressoSystems/jellyfish", branch = "hotshot-compat" } +jf-relation= { git = "https://github.com/EspressoSystems/jellyfish", branch = "hotshot-compat" } jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } serde = { version = "1.0.164", features = ["derive"] } typenum = { version = "1.16.0" } diff --git a/hotshot-qc/src/lib.rs b/hotshot-qc/src/lib.rs index 32f324797e..5b2377d627 100644 --- a/hotshot-qc/src/lib.rs +++ b/hotshot-qc/src/lib.rs @@ -3,3 +3,4 @@ #![deny(missing_docs)] pub mod bit_vector; +pub mod snarked; diff --git a/hotshot-qc/src/snarked.rs b/hotshot-qc/src/snarked.rs new file mode 100644 index 0000000000..31fa537249 --- /dev/null +++ b/hotshot-qc/src/snarked.rs @@ -0,0 +1,5 @@ +//! This is a SNARKed QC implemenation, see more in HotShot paper. + +mod circuit; + +// TODO: add impl: https://github.com/EspressoSystems/hotshot-primitives/issues/5 diff --git a/hotshot-qc/src/snarked/circuit.rs b/hotshot-qc/src/snarked/circuit.rs new file mode 100644 index 0000000000..a231b76d99 --- /dev/null +++ b/hotshot-qc/src/snarked/circuit.rs @@ -0,0 +1,545 @@ +//! Circuit implementation of stake key aggregation for quorum certificates verification. + +use ark_ff::PrimeField; +use ark_std::{format, vec, vec::Vec}; +use jf_primitives::{ + circuit::rescue::RescueNativeGadget, + rescue::{sponge::RescueCRHF, RescueParameter}, +}; +use jf_relation::{ + errors::CircuitError, + gadgets::{ + ecc::{ + emulated::{EmulatedSWPointVariable, EmulatedTEPointVariable, SWPoint}, + TEPoint, + }, + EmulationConfig, SerializableEmulatedStruct, + }, + BoolVar, Circuit, PlonkCircuit, Variable, +}; + +/// Digest a list of verification keys and their associated stake amounts +/// * `stack_amts` - stake amounts +/// * `keys` - list of verification keys +#[allow(dead_code)] +pub fn compute_stake_table_hash>( + stake_amts: &[F], + keys: &[T], +) -> F { + let mut input_vec = vec![]; + for (&amt, key) in stake_amts.iter().zip(keys.iter()) { + input_vec.extend(key.serialize_to_native_elements()); + input_vec.push(amt); + } + RescueCRHF::sponge_with_bit_padding(&input_vec[..], 1)[0] +} + +/// Traits for verification keys +pub trait VerKeyVar: Sized + Clone { + type KeyType: Default; + + /// Returns a list of variables associated with this key variable. + fn native_vars(&self) -> Vec; + + /// Aggregate the verification keys with Boolean selectors. + /// * `circuit` - associated Plonk circuit. + /// * `keys` - list of input verification keys. + /// * `selectors` - list of Boolean selectors. + /// * `coef` - the internal curve parameter. + /// * Returns an aggregated key variable. + fn aggregate_with_selectors( + circuit: &mut PlonkCircuit, + keys: &[Self], + selectors: &[BoolVar], + coef: E, + ) -> Result + where + F: PrimeField, + E: EmulationConfig; + + /// Check whether two input verification key variables are equal. + /// * `circuit` - associated Plonk circuit. + /// * `p0` - first verification key variable. + /// * `p1` - second verification key variable. + /// * Returns a Boolean variable indicates whether `p0 == p1`. + fn is_equal( + circuit: &mut PlonkCircuit, + p0: &Self, + p1: &Self, + ) -> Result + where + F: PrimeField, + E: EmulationConfig; + + /// Enforce that two input verification key variables are equal. + /// * `circuit` - associated Plonk circuit. + /// * `p0` - first verification key variable. + /// * `p1` - second verification key variable. + fn enforce_equal( + circuit: &mut PlonkCircuit, + p0: &Self, + p1: &Self, + ) -> Result<(), CircuitError> + where + F: PrimeField, + E: EmulationConfig; +} + +/// Plonk circuit gadget for stake key aggregation for quorum certificates. +pub trait QCKeyAggregateGadget +where + F: RescueParameter, +{ + /// Key aggregation circuit + /// * `vks` - list of stake public keys. + /// * `bit_vec` - the indicator vector for the quorum set, `bit_vec[i] = 1` if `i` is in the quorum set, o/w `bit_vec[i] = 0`. + /// * `agg_vk` - the public aggregated stake key. + /// * `coef` - the internal curve parameter + fn check_aggregate_vk, V: VerKeyVar>( + &mut self, + vks: &[V], + bit_vec: &[BoolVar], + agg_vk: &V, + coef: E, + ) -> Result<(), CircuitError>; + + /// Stake table commitment checking circuit + /// * `vk` - list of stake public keys. + /// * `stake_amts` - list of stake amounts for the corresponding stake keys. + /// * `digest` - the hash of the stake table. + fn check_stake_table_digest, V: VerKeyVar>( + &mut self, + vks: &[V], + stake_amts: &[Variable], + digest: Variable, + ) -> Result<(), CircuitError>; + + /// Quorum threshold checking circuit + /// * `stake_amts` - list of stake amounts for the corresponding stake keys. + /// * `bit_vec` - the indicator vector for the quorum set. + /// * `threshold` - the public quorum threshold. + fn check_threshold( + &mut self, + stake_amts: &[Variable], + bit_vec: &[BoolVar], + threshold: Variable, + ) -> Result<(), CircuitError>; +} + +impl QCKeyAggregateGadget for PlonkCircuit +where + F: RescueParameter, +{ + fn check_aggregate_vk, V: VerKeyVar>( + &mut self, + vks: &[V], + bit_vec: &[BoolVar], + agg_vk: &V, + coef: E, + ) -> Result<(), CircuitError> { + if vks.len() != bit_vec.len() { + return Err(CircuitError::ParameterError(format!( + "bit vector len {} != the number of stake keys {}", + bit_vec.len(), + vks.len(), + ))); + } + let agg_key_var = V::aggregate_with_selectors::(self, vks, bit_vec, coef)?; + V::enforce_equal(self, &agg_key_var, agg_vk) + } + + fn check_stake_table_digest, V: VerKeyVar>( + &mut self, + vks: &[V], + stake_amts: &[Variable], + digest: Variable, + ) -> Result<(), CircuitError> { + if stake_amts.len() != vks.len() { + return Err(CircuitError::ParameterError(format!( + "the number of stake amounts {} != the number of stake verification keys {}", + stake_amts.len(), + vks.len(), + ))); + } + let mut hash_input = vec![]; + for (vk, &stake_amt) in vks.iter().zip(stake_amts.iter()) { + hash_input.append(&mut vk.native_vars()); + hash_input.push(stake_amt); + } + let expected_digest = + RescueNativeGadget::::rescue_sponge_with_padding(self, &hash_input, 1)?[0]; + self.enforce_equal(expected_digest, digest) + } + + fn check_threshold( + &mut self, + stake_amts: &[Variable], + bit_vec: &[BoolVar], + threshold: Variable, + ) -> Result<(), CircuitError> { + if stake_amts.len() != bit_vec.len() { + return Err(CircuitError::ParameterError(format!( + "bit vector len {} != the number of stake entries {}", + bit_vec.len(), + stake_amts.len(), + ))); + } + let mut active_amts = vec![]; + for (&stake_amt, &bit) in stake_amts.iter().zip(bit_vec.iter()) { + active_amts.push(self.mul(stake_amt, bit.into())?); + } + let sum = self.sum(&active_amts[..])?; + self.enforce_geq(sum, threshold) + } +} + +impl VerKeyVar for EmulatedSWPointVariable +where + E: PrimeField, +{ + type KeyType = SWPoint; + + fn native_vars(&self) -> Vec { + let mut ret = self.0.native_vars(); + ret.append(&mut self.1.native_vars()); + ret.push(self.2 .0); + ret + } + + fn aggregate_with_selectors( + circuit: &mut PlonkCircuit, + keys: &[Self], + selectors: &[BoolVar], + coef: E, + ) -> Result + where + F: PrimeField, + E: EmulationConfig, + { + let neutral_point = Self::KeyType::default(); + let emulated_neutral_point_var = + circuit.create_constant_emulated_sw_point_variable(neutral_point)?; + let mut agg_key_var = emulated_neutral_point_var.clone(); + for (key, &bit) in keys.iter().zip(selectors.iter()) { + let point_var = circuit.binary_emulated_sw_point_vars_select( + bit, + &emulated_neutral_point_var, + key, + )?; + agg_key_var = circuit.emulated_sw_ecc_add::(&agg_key_var, &point_var, coef)?; + } + Ok(agg_key_var) + } + + fn is_equal( + circuit: &mut PlonkCircuit, + p0: &Self, + p1: &Self, + ) -> Result + where + F: PrimeField, + E: EmulationConfig, + { + circuit.is_emulated_sw_point_equal(p0, p1) + } + + fn enforce_equal( + circuit: &mut PlonkCircuit, + p0: &Self, + p1: &Self, + ) -> Result<(), CircuitError> + where + F: PrimeField, + E: EmulationConfig, + { + circuit.enforce_emulated_sw_point_equal(p0, p1) + } +} + +impl VerKeyVar for EmulatedTEPointVariable +where + E: PrimeField, +{ + type KeyType = TEPoint; + + fn native_vars(&self) -> Vec { + let mut ret = self.0.native_vars(); + ret.append(&mut self.1.native_vars()); + ret + } + + fn aggregate_with_selectors( + circuit: &mut PlonkCircuit, + keys: &[Self], + selectors: &[BoolVar], + coef: E, + ) -> Result + where + F: PrimeField, + E: EmulationConfig, + { + let neutral_point = Self::KeyType::default(); + let emulated_neutral_point_var = + circuit.create_constant_emulated_te_point_variable(neutral_point)?; + let mut agg_key_var = emulated_neutral_point_var.clone(); + for (key, &bit) in keys.iter().zip(selectors.iter()) { + let point_var = circuit.binary_emulated_te_point_vars_select( + bit, + &emulated_neutral_point_var, + key, + )?; + agg_key_var = circuit.emulated_te_ecc_add::(&agg_key_var, &point_var, coef)?; + } + Ok(agg_key_var) + } + + fn is_equal( + circuit: &mut PlonkCircuit, + p0: &Self, + p1: &Self, + ) -> Result + where + F: PrimeField, + E: EmulationConfig, + { + circuit.is_emulated_te_point_equal(p0, p1) + } + + fn enforce_equal( + circuit: &mut PlonkCircuit, + p0: &Self, + p1: &Self, + ) -> Result<(), CircuitError> + where + F: PrimeField, + E: EmulationConfig, + { + circuit.enforce_emulated_te_point_equal(p0, p1) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_bls12_377::{g1::Config as Param377, Fq as Fq377}; + use ark_bn254::{g1::Config as Param254, Fq as Fq254, Fr as Fr254}; + use ark_ec::{ + short_weierstrass::{Projective, SWCurveConfig}, + CurveGroup, + }; + use ark_ff::MontFp; + use ark_std::{vec::Vec, UniformRand, Zero}; + use jf_relation::{ + errors::CircuitError, gadgets::ecc::SWToTEConParam, Circuit, PlonkCircuit, Variable, + }; + + #[test] + fn test_vk_aggregate_sw_circuit() -> Result<(), CircuitError> { + let a_ecc = Fq377::zero(); + test_vk_aggregate_sw_circuit_helper::(a_ecc)?; + let a_ecc = Fq254::zero(); + test_vk_aggregate_sw_circuit_helper::(a_ecc) + } + + // TODO: use Aggregate signature APIs to aggregate the keys outside the circuit + fn test_vk_aggregate_sw_circuit_helper(a_ecc: E) -> Result<(), CircuitError> + where + E: EmulationConfig, + F: RescueParameter, + P: SWCurveConfig, + { + let mut rng = jf_utils::test_rng(); + let vk_points: Vec> = + (0..5).map(|_| Projective::

::rand(&mut rng)).collect(); + let selector = vec![false, true, false, true, false]; + let agg_vk_point = + vk_points + .iter() + .zip(selector.iter()) + .fold( + Projective::

::zero(), + |acc, (x, &b)| { + if b { + acc + x + } else { + acc + } + }, + ); + let agg_vk_point: SWPoint = agg_vk_point.into_affine().into(); + let vk_points: Vec> = vk_points.iter().map(|p| p.into_affine().into()).collect(); + let stake_amts: Vec = (0..5).map(|i| F::from((i + 1) as u32)).collect(); + let threshold = F::from(6u8); + let digest = compute_stake_table_hash::>(&stake_amts[..], &vk_points[..]); + + let mut circuit = PlonkCircuit::::new_ultra_plonk(20); + // public input + let agg_vk_var = circuit.create_public_emulated_sw_point_variable(agg_vk_point)?; + let public_input = agg_vk_point.serialize_to_native_elements(); + let threshold_var = circuit.create_variable(threshold)?; + let digest_var = circuit.create_variable(digest)?; + + // add witness + let vk_vars: Vec> = vk_points + .iter() + .map(|&p| circuit.create_emulated_sw_point_variable(p).unwrap()) + .collect(); + let stake_amt_vars: Vec = stake_amts + .iter() + .map(|&amt| circuit.create_variable(amt).unwrap()) + .collect(); + let selector_vars: Vec = selector + .iter() + .map(|&b| circuit.create_boolean_variable(b).unwrap()) + .collect(); + // add circuit gadgets + circuit.check_aggregate_vk::>( + &vk_vars[..], + &selector_vars[..], + &agg_vk_var, + a_ecc, + )?; + circuit.check_stake_table_digest(&vk_vars[..], &stake_amt_vars[..], digest_var)?; + circuit.check_threshold(&stake_amt_vars[..], &selector_vars[..], threshold_var)?; + assert!(circuit.check_circuit_satisfiability(&public_input).is_ok()); + + // bad path: wrong aggregated vk + let tmp_var = agg_vk_var.native_vars()[0]; + let tmp = circuit.witness(tmp_var)?; + *circuit.witness_mut(tmp_var) = F::zero(); + assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); + *circuit.witness_mut(tmp_var) = tmp; + + // bad path: wrong digest + let tmp = circuit.witness(digest_var)?; + *circuit.witness_mut(digest_var) = F::zero(); + assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); + *circuit.witness_mut(digest_var) = tmp; + + // bad path: bad threshold + *circuit.witness_mut(threshold_var) = F::from(7u8); + assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); + + // check input parameter errors + assert!(circuit + .check_aggregate_vk::>( + &vk_vars[..], + &selector_vars[1..], + &agg_vk_var, + a_ecc + ) + .is_err()); + assert!(circuit + .check_stake_table_digest(&vk_vars[..], &stake_amt_vars[1..], digest_var) + .is_err()); + assert!(circuit + .check_threshold(&stake_amt_vars[..], &selector_vars[1..], threshold_var) + .is_err()); + + Ok(()) + } + + #[test] + fn test_vk_aggregate_te_circuit() -> Result<(), CircuitError> { + let d_ecc : Fq377 = MontFp!("122268283598675559488486339158635529096981886914877139579534153582033676785385790730042363341236035746924960903179"); + test_vk_aggregate_te_circuit_helper::(d_ecc) + } + + // TODO: use Aggregate signature APIs to aggregate the keys outside the circuit + fn test_vk_aggregate_te_circuit_helper(d_ecc: E) -> Result<(), CircuitError> + where + E: EmulationConfig + SWToTEConParam, + F: RescueParameter, + P: SWCurveConfig, + { + let mut rng = jf_utils::test_rng(); + let vk_points: Vec> = + (0..5).map(|_| Projective::

::rand(&mut rng)).collect(); + let selector = vec![false, true, false, true, false]; + let agg_vk_point = + vk_points + .iter() + .zip(selector.iter()) + .fold( + Projective::

::zero(), + |acc, (x, &b)| { + if b { + acc + x + } else { + acc + } + }, + ); + let agg_vk_point: TEPoint = agg_vk_point.into_affine().into(); + let vk_points: Vec> = vk_points.iter().map(|p| p.into_affine().into()).collect(); + let stake_amts: Vec = (0..5).map(|i| F::from((i + 1) as u32)).collect(); + let threshold = F::from(6u8); + let digest = compute_stake_table_hash::>(&stake_amts[..], &vk_points[..]); + + let mut circuit = PlonkCircuit::::new_ultra_plonk(20); + // public input + let agg_vk_var = circuit.create_public_emulated_te_point_variable(agg_vk_point)?; + let public_input = agg_vk_point.serialize_to_native_elements(); + let threshold_var = circuit.create_variable(threshold)?; + let digest_var = circuit.create_variable(digest)?; + + // add witness + let vk_vars: Vec> = vk_points + .iter() + .map(|&p| circuit.create_emulated_te_point_variable(p).unwrap()) + .collect(); + let stake_amt_vars: Vec = stake_amts + .iter() + .map(|&amt| circuit.create_variable(amt).unwrap()) + .collect(); + let selector_vars: Vec = selector + .iter() + .map(|&b| circuit.create_boolean_variable(b).unwrap()) + .collect(); + // add circuit gadgets + circuit.check_aggregate_vk::>( + &vk_vars[..], + &selector_vars[..], + &agg_vk_var, + d_ecc, + )?; + circuit.check_stake_table_digest(&vk_vars[..], &stake_amt_vars[..], digest_var)?; + circuit.check_threshold(&stake_amt_vars[..], &selector_vars[..], threshold_var)?; + assert!(circuit.check_circuit_satisfiability(&public_input).is_ok()); + + // bad path: wrong aggregated vk + let tmp_var = agg_vk_var.native_vars()[0]; + let tmp = circuit.witness(tmp_var)?; + *circuit.witness_mut(tmp_var) = F::zero(); + assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); + *circuit.witness_mut(tmp_var) = tmp; + + // bad path: wrong digest + let tmp = circuit.witness(digest_var)?; + *circuit.witness_mut(digest_var) = F::zero(); + assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); + *circuit.witness_mut(digest_var) = tmp; + + // bad path: bad threshold + *circuit.witness_mut(threshold_var) = F::from(7u8); + assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); + + // check input parameter errors + assert!(circuit + .check_aggregate_vk::>( + &vk_vars[..], + &selector_vars[1..], + &agg_vk_var, + d_ecc + ) + .is_err()); + assert!(circuit + .check_stake_table_digest(&vk_vars[..], &stake_amt_vars[1..], digest_var) + .is_err()); + assert!(circuit + .check_threshold(&stake_amt_vars[..], &selector_vars[1..], threshold_var) + .is_err()); + + Ok(()) + } +} From 9524ad6756d7fd6fc52ba5a118b638920ab1500f Mon Sep 17 00:00:00 2001 From: Alex Xiong Date: Thu, 3 Aug 2023 13:22:07 +0800 Subject: [PATCH 0005/1393] fix: clippy err --- hotshot-qc/src/bit_vector.rs | 10 +++++----- hotshot-stake-table/src/mt_based.rs | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index 8ce956b8b0..f208a7fefd 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -80,7 +80,7 @@ where } let total_weight: U256 = qc_pp .stake_table - .iter(SnapshotVersion::LastEpochStart)? + .try_iter(SnapshotVersion::LastEpochStart)? .zip(signers.iter()) .fold( U256::zero(), @@ -101,7 +101,7 @@ where let mut ver_keys = vec![]; for (entry, b) in qc_pp .stake_table - .iter(SnapshotVersion::LastEpochStart)? + .try_iter(SnapshotVersion::LastEpochStart)? .zip(signers.iter()) { if *b { @@ -136,7 +136,7 @@ where } let total_weight: U256 = qc_vp .stake_table - .iter(SnapshotVersion::LastEpochStart)? + .try_iter(SnapshotVersion::LastEpochStart)? .zip(signers.iter()) .fold( U256::zero(), @@ -157,7 +157,7 @@ where let mut ver_keys = vec![]; for (entry, b) in qc_vp .stake_table - .iter(SnapshotVersion::LastEpochStart)? + .try_iter(SnapshotVersion::LastEpochStart)? .zip(signers.iter()) { if *b { @@ -188,7 +188,7 @@ where let signer_pks: Vec<_> = qc_vp .stake_table - .iter(SnapshotVersion::LastEpochStart)? + .try_iter(SnapshotVersion::LastEpochStart)? .zip(signers.iter()) .filter(|(_, b)| **b) .map(|(pk, _)| pk.0) diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index 56d2ae24c3..643ccc8302 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -155,7 +155,7 @@ impl StakeTableScheme for StakeTable { self.last_epoch_start.get_key_by_stake(pos) } - fn iter(&self, version: SnapshotVersion) -> Result { + fn try_iter(&self, version: SnapshotVersion) -> Result { let root = Self::get_root(self, version)?; Ok(internal::IntoIter::new(root)) } From 062c58acf742f950c5df8d038755d8bdab45c08b Mon Sep 17 00:00:00 2001 From: Alex Xiong Date: Thu, 3 Aug 2023 14:34:47 +0800 Subject: [PATCH 0006/1393] chore: remove full-ci feature --- hotshot-qc/Cargo.toml | 2 -- hotshot-stake-table/Cargo.toml | 2 -- 2 files changed, 4 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index e3533411d3..0d0e5e846d 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -34,5 +34,3 @@ hotshot-stake-table = { path = "../hotshot-stake-table" } default = ["parallel"] std = ["ark-std/std"] parallel = ["jf-primitives/parallel", "jf-utils/parallel"] -# TODO: (alex) what other features should I add -full-ci = ["std", "parallel"] diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 36bd1b168a..1e3c802d15 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -32,5 +32,3 @@ rand_chacha = { version = "0.3.1", default-features = false } default = ["parallel"] std = ["ark-std/std", "ark-serialize/std", "ark-ff/std"] parallel = ["jf-primitives/parallel", "jf-utils/parallel", "ark-ff/parallel"] -# TODO: (alex) what other features should I add to follow the same pattern as other crates? -full-ci = ["std", "parallel"] From 9ce1fad85e938cfa9ef823c6d7044448abd1c3ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Aug 2023 03:14:23 +0000 Subject: [PATCH 0007/1393] Bump serde from 1.0.180 to 1.0.183 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.180 to 1.0.183. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.180...v1.0.183) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- hotshot-qc/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 0d0e5e846d..39a319f870 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -24,7 +24,7 @@ hotshot-types = { path = "../../types" } jf-primitives = { git = "https://github.com/EspressoSystems/jellyfish", branch = "hotshot-compat" } jf-relation= { git = "https://github.com/EspressoSystems/jellyfish", branch = "hotshot-compat" } jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } -serde = { version = "1.0.164", features = ["derive"] } +serde = { version = "1.0.183", features = ["derive"] } typenum = { version = "1.16.0" } [dev-dependencies] From 81d60cd11dce41018bc5aebb7c402e4082123092 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Tue, 22 Aug 2023 13:44:14 -0400 Subject: [PATCH 0008/1393] move jf-primitives to inherited workspace dependency --- hotshot-qc/Cargo.toml | 10 +++++++--- hotshot-stake-table/Cargo.toml | 13 ++++++++++--- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 39a319f870..b2e80109b2 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -17,12 +17,16 @@ ark-poly = "0.4.0" ark-serialize = "0.4.0" ark-std = { version = "0.4.0", default-features = false } bincode = { version = "1.1.3" } -bitvec = { version = "1.0.1", default-features = false, features = ["alloc", "atomic", "serde"] } +bitvec = { version = "1.0.1", default-features = false, features = [ + "alloc", + "atomic", + "serde", +] } ethereum-types = { version = "0.14.1", features = ["impl-serde"] } generic-array = "0.14.7" hotshot-types = { path = "../../types" } -jf-primitives = { git = "https://github.com/EspressoSystems/jellyfish", branch = "hotshot-compat" } -jf-relation= { git = "https://github.com/EspressoSystems/jellyfish", branch = "hotshot-compat" } +jf-primitives = { workspace = true } +jf-relation = { git = "https://github.com/EspressoSystems/jellyfish", branch = "hotshot-compat" } jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } serde = { version = "1.0.183", features = ["derive"] } typenum = { version = "1.16.0" } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 1e3c802d15..94dce833c0 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -12,16 +12,23 @@ ark-ff = "0.4.0" ark-serialize = "0.4.0" ark-std = { version = "0.4.0", default-features = false } bincode = { version = "1.1.3" } -bitvec = { version = "1.0.1", default-features = false, features = ["alloc", "atomic", "serde"] } +bitvec = { version = "1.0.1", default-features = false, features = [ + "alloc", + "atomic", + "serde", +] } digest = { version = "0.10" } displaydoc = { version = "0.2.3", default-features = false } ethereum-types = { version = "0.14.1", features = ["impl-serde"] } generic-array = "0.14.7" hotshot-types = { path = "../../types" } -jf-primitives = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } +jf-primitives = { workspace = true } jf-relation = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } -serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } +serde = { version = "1.0", default-features = false, features = [ + "derive", + "rc", +] } tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag = "0.3.0" } typenum = { version = "1.16.0" } From fd2b62273d297b07dc13372105e13e4239c6bba6 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Tue, 22 Aug 2023 14:24:46 -0400 Subject: [PATCH 0009/1393] move ark-std to inherited workspace dep, all packages seem fine without default-features --- hotshot-qc/Cargo.toml | 2 +- hotshot-stake-table/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index b2e80109b2..6c1adf6168 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -15,7 +15,7 @@ ark-ff = "0.4.0" ark-pallas = "0.4.0" ark-poly = "0.4.0" ark-serialize = "0.4.0" -ark-std = { version = "0.4.0", default-features = false } +ark-std = { workspace = true } bincode = { version = "1.1.3" } bitvec = { version = "1.0.1", default-features = false, features = [ "alloc", diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 94dce833c0..7733f25e56 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -10,7 +10,7 @@ rust-version = { workspace = true } ark-bn254 = "0.4.0" ark-ff = "0.4.0" ark-serialize = "0.4.0" -ark-std = { version = "0.4.0", default-features = false } +ark-std = { workspace = true } bincode = { version = "1.1.3" } bitvec = { version = "1.0.1", default-features = false, features = [ "alloc", From b7abe7775a5ab3dcc9edf8c0098a873b17d98b1a Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Tue, 22 Aug 2023 14:50:03 -0400 Subject: [PATCH 0010/1393] ark-ec to inherited workspace deps --- hotshot-qc/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 6c1adf6168..3d505501bb 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -10,7 +10,7 @@ rust-version = { workspace = true } ark-bls12-377 = "0.4.0" ark-bls12-381 = "0.4.0" ark-bn254 = "0.4.0" -ark-ec = "0.4.0" +ark-ec = { workspace = true } ark-ff = "0.4.0" ark-pallas = "0.4.0" ark-poly = "0.4.0" From bebf205d45af35b65cbc597ebee91eb3031b2c3f Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Tue, 22 Aug 2023 14:57:30 -0400 Subject: [PATCH 0011/1393] ark-bls12-381 inherited workspace dep, upgrade to 0.4 --- hotshot-qc/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 3d505501bb..2b36ee15c5 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -8,7 +8,7 @@ rust-version = { workspace = true } [dependencies] ark-bls12-377 = "0.4.0" -ark-bls12-381 = "0.4.0" +ark-bls12-381 = { workspace = true } ark-bn254 = "0.4.0" ark-ec = { workspace = true } ark-ff = "0.4.0" From 7c784703d76ba8b84070fd2b83a2e3e066f8dc6e Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Tue, 22 Aug 2023 15:26:07 -0400 Subject: [PATCH 0012/1393] ark-serialize inherited workspace dep --- hotshot-qc/Cargo.toml | 2 +- hotshot-stake-table/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 2b36ee15c5..d22ce68c8d 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -14,7 +14,7 @@ ark-ec = { workspace = true } ark-ff = "0.4.0" ark-pallas = "0.4.0" ark-poly = "0.4.0" -ark-serialize = "0.4.0" +ark-serialize = { workspace = true } ark-std = { workspace = true } bincode = { version = "1.1.3" } bitvec = { version = "1.0.1", default-features = false, features = [ diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 7733f25e56..3a0f165f85 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -9,7 +9,7 @@ rust-version = { workspace = true } [dependencies] ark-bn254 = "0.4.0" ark-ff = "0.4.0" -ark-serialize = "0.4.0" +ark-serialize = { workspace = true } ark-std = { workspace = true } bincode = { version = "1.1.3" } bitvec = { version = "1.0.1", default-features = false, features = [ From 007473561893d708f335a98e9b99d7b174d7fa4e Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 23 Aug 2023 10:50:36 -0400 Subject: [PATCH 0013/1393] bincode inherited workspace dep --- hotshot-qc/Cargo.toml | 2 +- hotshot-stake-table/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index d22ce68c8d..cf3b37b4f9 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -16,7 +16,7 @@ ark-pallas = "0.4.0" ark-poly = "0.4.0" ark-serialize = { workspace = true } ark-std = { workspace = true } -bincode = { version = "1.1.3" } +bincode = { workspace = true } bitvec = { version = "1.0.1", default-features = false, features = [ "alloc", "atomic", diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 3a0f165f85..1641a3b9fc 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -11,7 +11,7 @@ ark-bn254 = "0.4.0" ark-ff = "0.4.0" ark-serialize = { workspace = true } ark-std = { workspace = true } -bincode = { version = "1.1.3" } +bincode = { workspace = true } bitvec = { version = "1.0.1", default-features = false, features = [ "alloc", "atomic", From b9208e4204c442864ffd9d5eabafc76ef2a36086 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 23 Aug 2023 10:57:28 -0400 Subject: [PATCH 0014/1393] digest inherited workspace dep --- hotshot-stake-table/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 1641a3b9fc..16c9829481 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -17,7 +17,7 @@ bitvec = { version = "1.0.1", default-features = false, features = [ "atomic", "serde", ] } -digest = { version = "0.10" } +digest = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } ethereum-types = { version = "0.14.1", features = ["impl-serde"] } generic-array = "0.14.7" From 04cadc9d8519a538c4b20116141904344b413abb Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 23 Aug 2023 11:38:00 -0400 Subject: [PATCH 0015/1393] rand_chacha inherited workspace dep --- hotshot-stake-table/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 16c9829481..f56f1aa749 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -33,7 +33,7 @@ tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag typenum = { version = "1.16.0" } [dev-dependencies] -rand_chacha = { version = "0.3.1", default-features = false } +rand_chacha = { workspace = true } [features] default = ["parallel"] From 53123bdaac9d127d2e1cb05ece8eb458c9c5284d Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 23 Aug 2023 11:45:44 -0400 Subject: [PATCH 0016/1393] serde inherited workspace dep --- hotshot-qc/Cargo.toml | 2 +- hotshot-stake-table/Cargo.toml | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index cf3b37b4f9..595b80f73b 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -28,7 +28,7 @@ hotshot-types = { path = "../../types" } jf-primitives = { workspace = true } jf-relation = { git = "https://github.com/EspressoSystems/jellyfish", branch = "hotshot-compat" } jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } -serde = { version = "1.0.183", features = ["derive"] } +serde = { workspace = true } typenum = { version = "1.16.0" } [dev-dependencies] diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index f56f1aa749..29231ba7a9 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -25,10 +25,7 @@ hotshot-types = { path = "../../types" } jf-primitives = { workspace = true } jf-relation = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } -serde = { version = "1.0", default-features = false, features = [ - "derive", - "rc", -] } +serde = { workspace = true, features = ["rc"] } tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag = "0.3.0" } typenum = { version = "1.16.0" } From aff713e736622d5e07ae1d99d1e676740883aeca Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 23 Aug 2023 12:13:17 -0400 Subject: [PATCH 0017/1393] ethereum-types inherited workspace dep --- hotshot-qc/Cargo.toml | 2 +- hotshot-stake-table/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 595b80f73b..dd14e93492 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -22,7 +22,7 @@ bitvec = { version = "1.0.1", default-features = false, features = [ "atomic", "serde", ] } -ethereum-types = { version = "0.14.1", features = ["impl-serde"] } +ethereum-types = { workspace = true } generic-array = "0.14.7" hotshot-types = { path = "../../types" } jf-primitives = { workspace = true } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 29231ba7a9..9718dfb384 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -19,7 +19,7 @@ bitvec = { version = "1.0.1", default-features = false, features = [ ] } digest = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } -ethereum-types = { version = "0.14.1", features = ["impl-serde"] } +ethereum-types = { workspace = true } generic-array = "0.14.7" hotshot-types = { path = "../../types" } jf-primitives = { workspace = true } From 00141afdb83c3070df677e292642207bbe448356 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 23 Aug 2023 12:15:37 -0400 Subject: [PATCH 0018/1393] bitvec inherited workspace dep --- hotshot-qc/Cargo.toml | 6 +----- hotshot-stake-table/Cargo.toml | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index dd14e93492..91d5c0c4b8 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -17,11 +17,7 @@ ark-poly = "0.4.0" ark-serialize = { workspace = true } ark-std = { workspace = true } bincode = { workspace = true } -bitvec = { version = "1.0.1", default-features = false, features = [ - "alloc", - "atomic", - "serde", -] } +bitvec = { workspace = true } ethereum-types = { workspace = true } generic-array = "0.14.7" hotshot-types = { path = "../../types" } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 9718dfb384..41c4bd4c10 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -12,11 +12,7 @@ ark-ff = "0.4.0" ark-serialize = { workspace = true } ark-std = { workspace = true } bincode = { workspace = true } -bitvec = { version = "1.0.1", default-features = false, features = [ - "alloc", - "atomic", - "serde", -] } +bitvec = { workspace = true } digest = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } ethereum-types = { workspace = true } From f4f4cb7224089cac7ee25fd77c6c21058040e531 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 23 Aug 2023 12:17:02 -0400 Subject: [PATCH 0019/1393] typenum inherited workspace dep --- hotshot-qc/Cargo.toml | 2 +- hotshot-stake-table/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 91d5c0c4b8..1b5779e8ee 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -25,7 +25,7 @@ jf-primitives = { workspace = true } jf-relation = { git = "https://github.com/EspressoSystems/jellyfish", branch = "hotshot-compat" } jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } serde = { workspace = true } -typenum = { version = "1.16.0" } +typenum = { workspace = true } [dev-dependencies] hotshot-stake-table = { path = "../hotshot-stake-table" } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 41c4bd4c10..38abf7a23e 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -23,7 +23,7 @@ jf-relation = { git = "https://github.com/espressosystems/jellyfish", branch = " jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } serde = { workspace = true, features = ["rc"] } tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag = "0.3.0" } -typenum = { version = "1.16.0" } +typenum = { workspace = true } [dev-dependencies] rand_chacha = { workspace = true } From 0d3e0e72e3a9431e720becd8aba7009ae618dabb Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 25 Aug 2023 15:03:13 -0400 Subject: [PATCH 0020/1393] import granularity for cargo fmt --- hotshot-qc/src/bit_vector.rs | 13 ++++++++----- hotshot-stake-table/src/mt_based/internal.rs | 3 +-- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index f208a7fefd..df05b64de4 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -16,9 +16,10 @@ use hotshot_types::traits::{ qc::QuorumCertificate, stake_table::{SnapshotVersion, StakeTableScheme}, }; -use jf_primitives::errors::PrimitivesError; -use jf_primitives::errors::PrimitivesError::ParameterError; -use jf_primitives::signatures::AggregateableSignatureSchemes; +use jf_primitives::{ + errors::{PrimitivesError, PrimitivesError::ParameterError}, + signatures::AggregateableSignatureSchemes, +}; use serde::{Deserialize, Serialize}; use typenum::U32; @@ -202,8 +203,10 @@ mod tests { use super::*; use hotshot_stake_table::mt_based::StakeTable; use hotshot_types::traits::stake_table::StakeTableScheme; - use jf_primitives::signatures::bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}; - use jf_primitives::signatures::SignatureScheme; + use jf_primitives::signatures::{ + bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, + SignatureScheme, + }; macro_rules! test_quorum_certificate { ($aggsig:tt) => { diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index ffbe7192a4..ac0793678f 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -612,8 +612,7 @@ pub fn from_merkle_path(path: &[usize]) -> usize { #[cfg(test)] mod tests { - use super::super::config; - use super::{to_merkle_path, PersistentMerkleNode}; + use super::{super::config, to_merkle_path, PersistentMerkleNode}; use ark_std::{ rand::{Rng, RngCore}, sync::Arc, From 046294e75067d0cde8b9d1d000c2cbbc573c205b Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Fri, 25 Aug 2023 13:12:45 -0400 Subject: [PATCH 0021/1393] new crate skeleton hotshot-signature-key --- hotshot-signature-key/Cargo.toml | 14 ++++++++++++++ hotshot-signature-key/src/lib.rs | 6 ++++++ 2 files changed, 20 insertions(+) create mode 100644 hotshot-signature-key/Cargo.toml create mode 100644 hotshot-signature-key/src/lib.rs diff --git a/hotshot-signature-key/Cargo.toml b/hotshot-signature-key/Cargo.toml new file mode 100644 index 0000000000..9035cb5cf4 --- /dev/null +++ b/hotshot-signature-key/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "hotshot-signature-key" +description = "Signature instantiations" +version = { workspace = true } +authors = { workspace = true } +edition = { workspace = true } +rust-version = { workspace = true } + +[dependencies] + +[features] +# default = ["parallel"] +# std = ["ark-std/std"] +# parallel = ["jf-primitives/parallel", "jf-utils/parallel"] diff --git a/hotshot-signature-key/src/lib.rs b/hotshot-signature-key/src/lib.rs new file mode 100644 index 0000000000..a6d50ce4f4 --- /dev/null +++ b/hotshot-signature-key/src/lib.rs @@ -0,0 +1,6 @@ +//! This crates offer implementations of quorum certificates used in HotShot. +#![deny(warnings)] +#![deny(missing_docs)] + +// #[cfg(feature = "demo")] +// pub mod bn254; From 07ee638fa0a6a5dc8c7d782fc7df4c335c10ac30 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Fri, 25 Aug 2023 14:09:37 -0400 Subject: [PATCH 0022/1393] move bn254 module from types package to hotshot-signature-key package --- hotshot-signature-key/Cargo.toml | 15 ++ hotshot-signature-key/src/bn254.rs | 8 + hotshot-signature-key/src/bn254/bn254_priv.rs | 71 ++++++++ hotshot-signature-key/src/bn254/bn254_pub.rs | 170 ++++++++++++++++++ hotshot-signature-key/src/lib.rs | 4 +- 5 files changed, 266 insertions(+), 2 deletions(-) create mode 100644 hotshot-signature-key/src/bn254.rs create mode 100644 hotshot-signature-key/src/bn254/bn254_priv.rs create mode 100644 hotshot-signature-key/src/bn254/bn254_pub.rs diff --git a/hotshot-signature-key/Cargo.toml b/hotshot-signature-key/Cargo.toml index 9035cb5cf4..830ffa0133 100644 --- a/hotshot-signature-key/Cargo.toml +++ b/hotshot-signature-key/Cargo.toml @@ -7,8 +7,23 @@ edition = { workspace = true } rust-version = { workspace = true } [dependencies] +bincode = { workspace = true } +bitvec = { workspace = true } +blake3 = { workspace = true } +custom_debug = { workspace = true } +ethereum-types = { workspace = true } +hotshot-primitives = { workspace = true } +hotshot-types = { path = "../../types" } +hotshot-utils = { path = "../../utils" } +jf-primitives = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +serde = { workspace = true } +tracing = { workspace = true } +typenum = { workspace = true } [features] # default = ["parallel"] # std = ["ark-std/std"] # parallel = ["jf-primitives/parallel", "jf-utils/parallel"] +demo = [] diff --git a/hotshot-signature-key/src/bn254.rs b/hotshot-signature-key/src/bn254.rs new file mode 100644 index 0000000000..fc7273afd5 --- /dev/null +++ b/hotshot-signature-key/src/bn254.rs @@ -0,0 +1,8 @@ +//! Demonstration implementation of the [`SignatureKey`] trait using BN254 +use hotshot_types::traits::signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}; +/// `BN254Priv` implementation +mod bn254_priv; +/// `BN254Pub` implementation +mod bn254_pub; + +pub use self::{bn254_priv::BN254Priv, bn254_pub::BN254Pub}; diff --git a/hotshot-signature-key/src/bn254/bn254_priv.rs b/hotshot-signature-key/src/bn254/bn254_priv.rs new file mode 100644 index 0000000000..3b7000eea4 --- /dev/null +++ b/hotshot-signature-key/src/bn254/bn254_priv.rs @@ -0,0 +1,71 @@ +use custom_debug::Debug; +use jf_primitives::signatures::bls_over_bn254::{KeyPair as QCKeyPair, SignKey as QCSignKey}; +use rand::SeedableRng; +use rand_chacha::ChaCha20Rng; +use serde::{Deserialize, Serialize}; +use std::cmp::Ordering; + +/// Private key type for a bn254 keypair +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug)] +pub struct BN254Priv { + /// The private key for this keypair + pub(super) priv_key: QCSignKey, +} + +impl BN254Priv { + /// Generate a new private key from scratch + #[must_use] + pub fn generate() -> Self { + let key_pair = QCKeyPair::generate(&mut rand::thread_rng()); + let priv_key = key_pair.sign_key_ref(); + Self { + priv_key: priv_key.clone(), + } + } + + #[must_use] + /// Get real seed used for random key generation funtion + pub fn get_seed_from_seed_indexed(seed: [u8; 32], index: u64) -> [u8; 32] { + let mut hasher = blake3::Hasher::new(); + hasher.update(&seed); + hasher.update(&index.to_le_bytes()); + let new_seed = *hasher.finalize().as_bytes(); + new_seed + } + + /// Generate a new private key from a seed + #[must_use] + pub fn generate_from_seed(seed: [u8; 32]) -> Self { + let key_pair = QCKeyPair::generate(&mut ChaCha20Rng::from_seed(seed)); + let priv_key = key_pair.sign_key_ref(); + Self { + priv_key: priv_key.clone(), + } + } + + /// Generate a new private key from a seed and a number + /// + /// Hashes the seed and the number together using blake3. This method is + /// useful for testing + #[must_use] + pub fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> Self { + let new_seed = Self::get_seed_from_seed_indexed(seed, index); + Self::generate_from_seed(new_seed) + } +} + +impl PartialOrd for BN254Priv { + fn partial_cmp(&self, other: &Self) -> Option { + let self_bytes = &self.priv_key.to_string(); + let other_bytes = &other.priv_key.to_string(); + self_bytes.partial_cmp(other_bytes) + } +} + +impl Ord for BN254Priv { + fn cmp(&self, other: &Self) -> Ordering { + let self_bytes = &self.priv_key.to_string(); + let other_bytes = &other.priv_key.to_string(); + self_bytes.cmp(other_bytes) + } +} diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs new file mode 100644 index 0000000000..597fb222af --- /dev/null +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -0,0 +1,170 @@ +use super::{BN254Priv, EncodedPublicKey, EncodedSignature, SignatureKey}; +use bincode::Options; +use bitvec::prelude::*; +use blake3::traits::digest::generic_array::GenericArray; +use ethereum_types::U256; +use hotshot_primitives::qc::{ + bit_vector::{BitVectorQC, QCParams as JFQCParams, StakeTableEntry as JFStakeTableEntry}, + QuorumCertificate, +}; +use hotshot_utils::bincode::bincode_opts; +use jf_primitives::signatures::{ + bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey}, + SignatureScheme, +}; +use serde::{Deserialize, Serialize}; +use std::{cmp::Ordering, fmt::Debug}; +use tracing::{debug, instrument, warn}; +use typenum::U32; + +/// Public key type for an bn254 [`SignatureKey`] pair +/// +/// This type makes use of noise for non-determinisitc signatures. +#[derive(Clone, PartialEq, Eq, Hash, Copy, Serialize, Deserialize, Debug)] + +pub struct BN254Pub { + /// The public key for this keypair + pub_key: VerKey, +} + +impl PartialOrd for BN254Pub { + fn partial_cmp(&self, other: &Self) -> Option { + let self_bytes = &self.pub_key.to_string(); + let other_bytes = &other.pub_key.to_string(); + self_bytes.partial_cmp(other_bytes) + } +} + +impl Ord for BN254Pub { + fn cmp(&self, other: &Self) -> Ordering { + let self_bytes = &self.pub_key.to_string(); + let other_bytes = &other.pub_key.to_string(); + self_bytes.cmp(other_bytes) + } +} + +impl SignatureKey for BN254Pub { + type PrivateKey = BN254Priv; + type StakeTableEntry = JFStakeTableEntry; + type QCParams = JFQCParams< + ::VerificationKey, + ::PublicParameter, + >; + type QCType = ( + ::Signature, + BitVec, + ); + // as AssembledQuorumCertificate>::QC; + + #[instrument(skip(self))] + fn validate(&self, signature: &EncodedSignature, data: &[u8]) -> bool { + let ver_key = self.pub_key; + let x: Result<::Signature, _> = + bincode_opts().deserialize(&signature.0); + match x { + Ok(s) => { + // This is the validation for QC partial signature before append(). + let generic_msg: &GenericArray = GenericArray::from_slice(data); + BLSOverBN254CurveSignatureScheme::verify(&(), &ver_key, generic_msg, &s).is_ok() + } + Err(_) => false, + } + } + + fn sign(sk: &Self::PrivateKey, data: &[u8]) -> EncodedSignature { + let generic_msg = GenericArray::from_slice(data); + let agg_signature_wrap = BitVectorQC::::sign( + &(), + generic_msg, + &sk.priv_key, + &mut rand::thread_rng(), + ); + match agg_signature_wrap { + Ok(agg_signature) => { + // Convert the signature to bytes and return + let bytes = bincode_opts().serialize(&agg_signature); + match bytes { + Ok(bytes) => EncodedSignature(bytes), + Err(e) => { + warn!(?e, "Failed to serialize signature in sign()"); + EncodedSignature(vec![]) + } + } + } + Err(e) => { + warn!(?e, "Failed to sign"); + EncodedSignature(vec![]) + } + } + } + + fn from_private(private_key: &Self::PrivateKey) -> Self { + let pub_key = VerKey::from(&private_key.priv_key); + Self { pub_key } + } + + fn to_bytes(&self) -> EncodedPublicKey { + let pub_key_bytes = bincode_opts() + .serialize(&self.pub_key) + .expect("This serialization shouldn't be able to fail"); + EncodedPublicKey(pub_key_bytes) + } + + #[instrument] + fn from_bytes(bytes: &EncodedPublicKey) -> Option { + let x: Result = bincode_opts().deserialize(&bytes.0); + match x { + Ok(pub_key) => Some(BN254Pub { pub_key }), + Err(e) => { + debug!(?e, "Failed to deserialize public key"); + None + } + } + } + + fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey) { + let priv_key = Self::PrivateKey::generated_from_seed_indexed(seed, index); + (Self::from_private(&priv_key), priv_key) + } + + fn get_stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry { + JFStakeTableEntry { + stake_key: self.pub_key, + stake_amount: U256::from(stake), + } + } + + fn get_public_parameter( + stake_entries: Vec, + threshold: U256, + ) -> Self::QCParams { + JFQCParams { + stake_entries, + threshold, + agg_sig_pp: (), + } + } + + fn check(real_qc_pp: &Self::QCParams, data: &[u8], qc: &Self::QCType) -> bool { + let msg = GenericArray::from_slice(data); + BitVectorQC::::check(real_qc_pp, msg, qc).is_ok() + } + + fn get_sig_proof( + signature: &Self::QCType, + ) -> ( + ::Signature, + BitVec, + ) { + signature.clone() + } + + fn assemble( + real_qc_pp: &Self::QCParams, + signers: &BitSlice, + sigs: &[::Signature], + ) -> Self::QCType { + BitVectorQC::::assemble(real_qc_pp, signers, sigs) + .expect("this assembling shouldn't fail") + } +} diff --git a/hotshot-signature-key/src/lib.rs b/hotshot-signature-key/src/lib.rs index a6d50ce4f4..2f31dc2e57 100644 --- a/hotshot-signature-key/src/lib.rs +++ b/hotshot-signature-key/src/lib.rs @@ -2,5 +2,5 @@ #![deny(warnings)] #![deny(missing_docs)] -// #[cfg(feature = "demo")] -// pub mod bn254; +#[cfg(feature = "demo")] +pub mod bn254; From 156c1ea53321a0261f21e23a4e47b61a61761b85 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Fri, 25 Aug 2023 19:01:40 -0400 Subject: [PATCH 0023/1393] WIP: replace dependency hotshot-primitives with hotshot-qc, build fails --- hotshot-signature-key/Cargo.toml | 2 +- hotshot-signature-key/src/bn254/bn254_pub.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hotshot-signature-key/Cargo.toml b/hotshot-signature-key/Cargo.toml index 830ffa0133..b3d4f1cb7d 100644 --- a/hotshot-signature-key/Cargo.toml +++ b/hotshot-signature-key/Cargo.toml @@ -12,7 +12,7 @@ bitvec = { workspace = true } blake3 = { workspace = true } custom_debug = { workspace = true } ethereum-types = { workspace = true } -hotshot-primitives = { workspace = true } +hotshot-qc = { path = "../../crates/hotshot-qc" } hotshot-types = { path = "../../types" } hotshot-utils = { path = "../../utils" } jf-primitives = { workspace = true } diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 597fb222af..ebbf4bdc6a 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -3,10 +3,10 @@ use bincode::Options; use bitvec::prelude::*; use blake3::traits::digest::generic_array::GenericArray; use ethereum_types::U256; -use hotshot_primitives::qc::{ - bit_vector::{BitVectorQC, QCParams as JFQCParams, StakeTableEntry as JFStakeTableEntry}, - QuorumCertificate, +use hotshot_qc::bit_vector::{ + BitVectorQC, QCParams as JFQCParams, StakeTableEntry as JFStakeTableEntry, }; +use hotshot_types::traits::qc::QuorumCertificate; use hotshot_utils::bincode::bincode_opts; use jf_primitives::signatures::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey}, From 3710f9b24f38b8eaa863661d29414d1328335fb7 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Fri, 25 Aug 2023 19:35:00 -0400 Subject: [PATCH 0024/1393] add bit_vector_old for compatibility with bn254_pub --- hotshot-qc/src/bit_vector_old.rs | 316 +++++++++++++++++++ hotshot-qc/src/lib.rs | 1 + hotshot-signature-key/Cargo.toml | 2 + hotshot-signature-key/src/bn254/bn254_pub.rs | 2 +- 4 files changed, 320 insertions(+), 1 deletion(-) create mode 100644 hotshot-qc/src/bit_vector_old.rs diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs new file mode 100644 index 0000000000..43c53afe84 --- /dev/null +++ b/hotshot-qc/src/bit_vector_old.rs @@ -0,0 +1,316 @@ +//! Implementation for BitVectorQC that uses BLS signature + Bit vector. +//! See more details in HotShot paper. + +use ark_std::{ + fmt::Debug, + format, + marker::PhantomData, + rand::{CryptoRng, RngCore}, + vec, + vec::Vec, +}; +use bitvec::prelude::*; +use ethereum_types::U256; +use generic_array::GenericArray; +use hotshot_types::traits::qc::QuorumCertificate; +use jf_primitives::errors::PrimitivesError; +use jf_primitives::errors::PrimitivesError::ParameterError; +use jf_primitives::signatures::AggregateableSignatureSchemes; +use serde::{Deserialize, Serialize}; +use typenum::U32; + +/// An implementation of QC using BLS signature and a bit-vector. +#[derive(Serialize, Deserialize)] +pub struct BitVectorQC Deserialize<'a>>( + PhantomData, +); + +/// Stake table entry +#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash, Eq)] +pub struct StakeTableEntry { + /// Stake table key + pub stake_key: V, + /// Stake table value + pub stake_amount: U256, +} + +/// Public parameters of [`BitVectorQC`] +#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash)] +pub struct QCParams { + /// the stake table (snapshot) this QC is verified against + pub stake_entries: Vec>, + /// threshold for the accumulated "weight" of votes to form a QC + pub threshold: U256, + /// public parameter for the aggregated signature scheme + pub agg_sig_pp: P, +} + +impl QuorumCertificate for BitVectorQC +where + A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a>, +{ + type QCProverParams = QCParams; + + // TODO: later with SNARKs we'll use a smaller verifier parameter + type QCVerifierParams = QCParams; + + type QC = (A::Signature, BitVec); + type MessageLength = U32; + type QuorumSize = U256; + + fn sign( + agg_sig_pp: &A::PublicParameter, + message: &GenericArray, + sk: &A::SigningKey, + prng: &mut R, + ) -> Result { + A::sign(agg_sig_pp, sk, message, prng) + } + + fn assemble( + qc_pp: &Self::QCProverParams, + signers: &BitSlice, + sigs: &[A::Signature], + ) -> Result { + if signers.len() != qc_pp.stake_entries.len() { + return Err(ParameterError(format!( + "bit vector len {} != the number of stake entries {}", + signers.len(), + qc_pp.stake_entries.len(), + ))); + } + let total_weight: U256 = + qc_pp + .stake_entries + .iter() + .zip(signers.iter()) + .fold(U256::zero(), |acc, (entry, b)| { + if *b { + acc + entry.stake_amount + } else { + acc + } + }); + if total_weight < qc_pp.threshold { + return Err(ParameterError(format!( + "total_weight {} less than threshold {}", + total_weight, qc_pp.threshold, + ))); + } + let mut ver_keys = vec![]; + for (entry, b) in qc_pp.stake_entries.iter().zip(signers.iter()) { + if *b { + ver_keys.push(entry.stake_key.clone()); + } + } + if ver_keys.len() != sigs.len() { + return Err(ParameterError(format!( + "the number of ver_keys {} != the number of partial signatures {}", + ver_keys.len(), + sigs.len(), + ))); + } + let sig = A::aggregate(&qc_pp.agg_sig_pp, &ver_keys[..], sigs)?; + + Ok((sig, signers.into())) + } + + fn check( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray, + qc: &Self::QC, + ) -> Result { + let (sig, signers) = qc; + if signers.len() != qc_vp.stake_entries.len() { + return Err(ParameterError(format!( + "signers bit vector len {} != the number of stake entries {}", + signers.len(), + qc_vp.stake_entries.len(), + ))); + } + let total_weight: U256 = + qc_vp + .stake_entries + .iter() + .zip(signers.iter()) + .fold(U256::zero(), |acc, (entry, b)| { + if *b { + acc + entry.stake_amount + } else { + acc + } + }); + if total_weight < qc_vp.threshold { + return Err(ParameterError(format!( + "total_weight {} less than threshold {}", + total_weight, qc_vp.threshold, + ))); + } + let mut ver_keys = vec![]; + for (entry, b) in qc_vp.stake_entries.iter().zip(signers.iter()) { + if *b { + ver_keys.push(entry.stake_key.clone()); + } + } + A::multi_sig_verify(&qc_vp.agg_sig_pp, &ver_keys[..], message, sig)?; + + Ok(total_weight) + } + + fn trace( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray<::MessageUnit, Self::MessageLength>, + qc: &Self::QC, + ) -> Result::VerificationKey>, PrimitivesError> { + let (_sig, signers) = qc; + if signers.len() != qc_vp.stake_entries.len() { + return Err(ParameterError(format!( + "signers bit vector len {} != the number of stake entries {}", + signers.len(), + qc_vp.stake_entries.len(), + ))); + } + + Self::check(qc_vp, message, qc)?; + + let signer_pks: Vec<_> = qc_vp + .stake_entries + .iter() + .zip(signers.iter()) + .filter(|(_, b)| **b) + .map(|(pk, _)| pk.stake_key.clone()) + .collect(); + Ok(signer_pks) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use jf_primitives::signatures::bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}; + use jf_primitives::signatures::SignatureScheme; + + macro_rules! test_quorum_certificate { + ($aggsig:tt) => { + let mut rng = jf_utils::test_rng(); + let agg_sig_pp = $aggsig::param_gen(Some(&mut rng)).unwrap(); + let key_pair1 = KeyPair::generate(&mut rng); + let key_pair2 = KeyPair::generate(&mut rng); + let key_pair3 = KeyPair::generate(&mut rng); + let entry1 = StakeTableEntry { + stake_key: key_pair1.ver_key(), + stake_amount: U256::from(3u8), + }; + let entry2 = StakeTableEntry { + stake_key: key_pair2.ver_key(), + stake_amount: U256::from(5u8), + }; + let entry3 = StakeTableEntry { + stake_key: key_pair3.ver_key(), + stake_amount: U256::from(7u8), + }; + let qc_pp = QCParams { + stake_entries: vec![entry1, entry2, entry3], + threshold: U256::from(10u8), + agg_sig_pp, + }; + let msg = [72u8; 32]; + let sig1 = BitVectorQC::<$aggsig>::sign( + &agg_sig_pp, + &msg.into(), + key_pair1.sign_key_ref(), + &mut rng, + ) + .unwrap(); + let sig2 = BitVectorQC::<$aggsig>::sign( + &agg_sig_pp, + &msg.into(), + key_pair2.sign_key_ref(), + &mut rng, + ) + .unwrap(); + let sig3 = BitVectorQC::<$aggsig>::sign( + &agg_sig_pp, + &msg.into(), + key_pair3.sign_key_ref(), + &mut rng, + ) + .unwrap(); + + // happy path + let signers = bitvec![0, 1, 1]; + let qc = BitVectorQC::<$aggsig>::assemble( + &qc_pp, + signers.as_bitslice(), + &[sig2.clone(), sig3.clone()], + ) + .unwrap(); + assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &qc).is_ok()); + assert_eq!( + BitVectorQC::<$aggsig>::trace(&qc_pp, &msg.into(), &qc).unwrap(), + vec![key_pair2.ver_key(), key_pair3.ver_key()], + ); + + // Check the QC and the QCParams can be serialized / deserialized + assert_eq!( + qc, + bincode::deserialize(&bincode::serialize(&qc).unwrap()).unwrap() + ); + + assert_eq!( + qc_pp, + bincode::deserialize(&bincode::serialize(&qc_pp).unwrap()).unwrap() + ); + + // bad paths + // number of signatures unmatch + assert!(BitVectorQC::<$aggsig>::assemble( + &qc_pp, + signers.as_bitslice(), + &[sig2.clone()] + ) + .is_err()); + // total weight under threshold + let active_bad = bitvec![1, 1, 0]; + assert!(BitVectorQC::<$aggsig>::assemble( + &qc_pp, + active_bad.as_bitslice(), + &[sig1.clone(), sig2.clone()] + ) + .is_err()); + // wrong bool vector length + let active_bad_2 = bitvec![0, 1, 1, 0]; + assert!(BitVectorQC::<$aggsig>::assemble( + &qc_pp, + active_bad_2.as_bitslice(), + &[sig2, sig3], + ) + .is_err()); + + assert!(BitVectorQC::<$aggsig>::check( + &qc_pp, + &msg.into(), + &(qc.0.clone(), active_bad) + ) + .is_err()); + assert!(BitVectorQC::<$aggsig>::check( + &qc_pp, + &msg.into(), + &(qc.0.clone(), active_bad_2) + ) + .is_err()); + let bad_msg = [70u8; 32]; + assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); + + let bad_sig = &sig1; + assert!( + BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &(bad_sig.clone(), qc.1)) + .is_err() + ); + }; + } + #[test] + fn test_quorum_certificate() { + test_quorum_certificate!(BLSOverBN254CurveSignatureScheme); + } +} diff --git a/hotshot-qc/src/lib.rs b/hotshot-qc/src/lib.rs index 5b2377d627..7c56aa6393 100644 --- a/hotshot-qc/src/lib.rs +++ b/hotshot-qc/src/lib.rs @@ -3,4 +3,5 @@ #![deny(missing_docs)] pub mod bit_vector; +pub mod bit_vector_old; pub mod snarked; diff --git a/hotshot-signature-key/Cargo.toml b/hotshot-signature-key/Cargo.toml index b3d4f1cb7d..75b462bef9 100644 --- a/hotshot-signature-key/Cargo.toml +++ b/hotshot-signature-key/Cargo.toml @@ -26,4 +26,6 @@ typenum = { workspace = true } # default = ["parallel"] # std = ["ark-std/std"] # parallel = ["jf-primitives/parallel", "jf-utils/parallel"] +full-ci = ["demo"] # needed only because `just test_*` needs `demo` code +tokio-ci = ["demo"] # needed only because `just test_*` needs `demo` code demo = [] diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index ebbf4bdc6a..c25fad4be0 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -3,7 +3,7 @@ use bincode::Options; use bitvec::prelude::*; use blake3::traits::digest::generic_array::GenericArray; use ethereum_types::U256; -use hotshot_qc::bit_vector::{ +use hotshot_qc::bit_vector_old::{ BitVectorQC, QCParams as JFQCParams, StakeTableEntry as JFStakeTableEntry, }; use hotshot_types::traits::qc::QuorumCertificate; From 128063e72be1203578a0443406e1b7cc7ea12e00 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Fri, 25 Aug 2023 22:28:41 -0400 Subject: [PATCH 0025/1393] jf-relation, jf-utils inherited workspace deps --- hotshot-qc/Cargo.toml | 4 ++-- hotshot-stake-table/Cargo.toml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 1b5779e8ee..e072aa8e0d 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -22,8 +22,8 @@ ethereum-types = { workspace = true } generic-array = "0.14.7" hotshot-types = { path = "../../types" } jf-primitives = { workspace = true } -jf-relation = { git = "https://github.com/EspressoSystems/jellyfish", branch = "hotshot-compat" } -jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } +jf-relation = { workspace = true } +jf-utils = { workspace = true } serde = { workspace = true } typenum = { workspace = true } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 38abf7a23e..870e87cfcd 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -19,8 +19,8 @@ ethereum-types = { workspace = true } generic-array = "0.14.7" hotshot-types = { path = "../../types" } jf-primitives = { workspace = true } -jf-relation = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } -jf-utils = { git = "https://github.com/espressosystems/jellyfish", branch = "hotshot-compat" } +jf-relation = { workspace = true } +jf-utils = { workspace = true } serde = { workspace = true, features = ["rc"] } tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag = "0.3.0" } typenum = { workspace = true } From 9083b3faf61cb9e3647088c2f87a3123906f4e43 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Sun, 27 Aug 2023 11:27:46 -0700 Subject: [PATCH 0026/1393] Add just command for linting imports --- hotshot-qc/src/bit_vector_old.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs index 43c53afe84..36ba155da8 100644 --- a/hotshot-qc/src/bit_vector_old.rs +++ b/hotshot-qc/src/bit_vector_old.rs @@ -13,9 +13,10 @@ use bitvec::prelude::*; use ethereum_types::U256; use generic_array::GenericArray; use hotshot_types::traits::qc::QuorumCertificate; -use jf_primitives::errors::PrimitivesError; -use jf_primitives::errors::PrimitivesError::ParameterError; -use jf_primitives::signatures::AggregateableSignatureSchemes; +use jf_primitives::{ + errors::{PrimitivesError, PrimitivesError::ParameterError}, + signatures::AggregateableSignatureSchemes, +}; use serde::{Deserialize, Serialize}; use typenum::U32; @@ -187,8 +188,10 @@ where #[cfg(test)] mod tests { use super::*; - use jf_primitives::signatures::bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}; - use jf_primitives::signatures::SignatureScheme; + use jf_primitives::signatures::{ + bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, + SignatureScheme, + }; macro_rules! test_quorum_certificate { ($aggsig:tt) => { From d58fb23bad79ca635c89174cc5d39fb4e8852f23 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 27 Aug 2023 10:10:03 -0400 Subject: [PATCH 0027/1393] feat: different feature flags --- hotshot-signature-key/Cargo.toml | 8 -------- hotshot-signature-key/src/lib.rs | 1 - 2 files changed, 9 deletions(-) diff --git a/hotshot-signature-key/Cargo.toml b/hotshot-signature-key/Cargo.toml index 75b462bef9..70a6b6aca0 100644 --- a/hotshot-signature-key/Cargo.toml +++ b/hotshot-signature-key/Cargo.toml @@ -21,11 +21,3 @@ rand_chacha = { workspace = true } serde = { workspace = true } tracing = { workspace = true } typenum = { workspace = true } - -[features] -# default = ["parallel"] -# std = ["ark-std/std"] -# parallel = ["jf-primitives/parallel", "jf-utils/parallel"] -full-ci = ["demo"] # needed only because `just test_*` needs `demo` code -tokio-ci = ["demo"] # needed only because `just test_*` needs `demo` code -demo = [] diff --git a/hotshot-signature-key/src/lib.rs b/hotshot-signature-key/src/lib.rs index 2f31dc2e57..2e8f711bab 100644 --- a/hotshot-signature-key/src/lib.rs +++ b/hotshot-signature-key/src/lib.rs @@ -2,5 +2,4 @@ #![deny(warnings)] #![deny(missing_docs)] -#[cfg(feature = "demo")] pub mod bn254; From 6d773daab680c2683b8c1988d087cab430fe6396 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 27 Aug 2023 11:02:49 -0400 Subject: [PATCH 0028/1393] feat: crates directory used by all crates --- consensus/Cargo.toml | 33 + consensus/src/da_member.rs | 214 +++ consensus/src/leader.rs | 29 + consensus/src/lib.rs | 338 ++++ consensus/src/next_leader.rs | 26 + consensus/src/replica.rs | 0 consensus/src/sequencing_leader.rs | 555 ++++++ consensus/src/sequencing_replica.rs | 642 +++++++ consensus/src/traits.rs | 171 ++ consensus/src/utils.rs | 82 + hotshot-qc/Cargo.toml | 2 +- hotshot-signature-key/Cargo.toml | 6 +- hotshot-stake-table/Cargo.toml | 2 +- hotshot/Cargo.toml | 122 ++ hotshot/examples/infra/mod.rs | 98 + hotshot/examples/infra/modDA.rs | 722 ++++++++ hotshot/examples/libp2p/orchestrator.rs | 24 + hotshot/examples/libp2p/types.rs | 60 + hotshot/examples/libp2p/validator.rs | 23 + hotshot/examples/web-server-da/README.md | 29 + .../examples/web-server-da/multi-validator.rs | 69 + .../web-server-da/multi-web-server.rs | 59 + .../examples/web-server-da/orchestrator.rs | 40 + hotshot/examples/web-server-da/types.rs | 78 + hotshot/examples/web-server-da/validator.rs | 41 + hotshot/examples/web-server-da/web-server.rs | 29 + hotshot/src/certificate.rs | 1 + hotshot/src/demos.rs | 8 + hotshot/src/demos/sdemo.rs | 384 ++++ hotshot/src/demos/vdemo.rs | 607 ++++++ hotshot/src/documentation.rs | 19 + hotshot/src/lib.rs | 1650 +++++++++++++++++ hotshot/src/tasks/mod.rs | 565 ++++++ hotshot/src/traits.rs | 28 + hotshot/src/traits/election.rs | 4 + .../src/traits/election/static_committee.rs | 192 ++ hotshot/src/traits/election/vrf.rs | 1024 ++++++++++ hotshot/src/traits/networking.rs | 56 + .../src/traits/networking/libp2p_network.rs | 884 +++++++++ .../src/traits/networking/memory_network.rs | 978 ++++++++++ .../networking/web_server_libp2p_fallback.rs | 318 ++++ .../traits/networking/web_server_network.rs | 1112 +++++++++++ .../networking/web_sever_libp2p_fallback.rs | 318 ++++ hotshot/src/traits/node_implementation.rs | 8 + hotshot/src/traits/storage.rs | 5 + hotshot/src/traits/storage/atomic_storage.rs | 261 +++ .../atomic_storage/dual_key_value_store.rs | 215 +++ .../storage/atomic_storage/hash_map_store.rs | 91 + hotshot/src/traits/storage/memory_storage.rs | 211 +++ hotshot/src/types.rs | 7 + hotshot/src/types/event.rs | 3 + hotshot/src/types/handle.rs | 414 +++++ libp2p-networking/.cargo/config | 2 + libp2p-networking/.gitignore | 4 + libp2p-networking/Cargo.toml | 78 + libp2p-networking/README.md | 111 ++ .../examples/common/lossy_network.rs | 579 ++++++ libp2p-networking/examples/common/mod.rs | 807 ++++++++ libp2p-networking/examples/common/web.rs | 114 ++ libp2p-networking/examples/counter.rs | 54 + libp2p-networking/flamegraph.sh | 1 + libp2p-networking/src/lib.rs | 21 + libp2p-networking/src/message.rs | 12 + .../src/network/behaviours/dht.rs | 700 +++++++ .../src/network/behaviours/direct_message.rs | 311 ++++ .../behaviours/direct_message_codec.rs | 95 + .../network/behaviours/exponential_backoff.rs | 75 + .../src/network/behaviours/gossip.rs | 265 +++ .../src/network/behaviours/mod.rs | 15 + libp2p-networking/src/network/def.rs | 181 ++ libp2p-networking/src/network/error.rs | 105 ++ libp2p-networking/src/network/mod.rs | 256 +++ libp2p-networking/src/network/node.rs | 622 +++++++ libp2p-networking/src/network/node/config.rs | 65 + libp2p-networking/src/network/node/handle.rs | 681 +++++++ libp2p-networking/test.py | 121 ++ libp2p-networking/tests/common/mod.rs | 264 +++ libp2p-networking/tests/counter.rs | 676 +++++++ libp2p-networking/web/index.html | 105 ++ orchestrator/Cargo.toml | 31 + orchestrator/README.md | 7 + orchestrator/api.toml | 50 + orchestrator/default-libp2p-run-config.toml | 78 + orchestrator/default-run-config.toml | 68 + .../default-web-server-run-config.toml | 77 + orchestrator/src/client.rs | 136 ++ orchestrator/src/config.rs | 236 +++ orchestrator/src/lib.rs | 255 +++ task-impls/Cargo.toml | 34 + task-impls/HotShot_event_architecture.drawio | 294 +++ task-impls/HotShot_event_architecture.png | Bin 0 -> 123290 bytes task-impls/README.md | 6 + task-impls/src/consensus.rs | 1306 +++++++++++++ task-impls/src/da.rs | 681 +++++++ task-impls/src/events.rs | 65 + task-impls/src/harness.rs | 113 ++ task-impls/src/lib.rs | 30 + task-impls/src/network.rs | 356 ++++ task-impls/src/view_sync.rs | 1052 +++++++++++ task/Cargo.toml | 25 + task/src/event_stream.rs | 268 +++ task/src/global_registry.rs | 214 +++ task/src/lib.rs | 393 ++++ task/src/task.rs | 637 +++++++ task/src/task_impls.rs | 456 +++++ task/src/task_launcher.rs | 67 + task/src/task_state.rs | 182 ++ testing/.gitignore | 2 + testing/Cargo.toml | 48 + testing/README.md | 72 + testing/src/completion_task.rs | 127 ++ testing/src/lib.rs | 43 + testing/src/network_reliability.rs | 163 ++ testing/src/node_ctx.rs | 56 + testing/src/node_types.rs | 643 +++++++ testing/src/overall_safety_task.rs | 611 ++++++ testing/src/per_node_safety_task.rs | 258 +++ testing/src/soundness_task.rs | 1 + testing/src/spinning_task.rs | 139 ++ testing/src/task_helpers.rs | 165 ++ testing/src/test_builder.rs | 276 +++ testing/src/test_launcher.rs | 209 +++ testing/src/test_runner.rs | 259 +++ testing/src/timeout_task.rs | 1 + testing/src/txn_task.rs | 160 ++ testing/tests/atomic_storage.rs | 222 +++ testing/tests/basic.rs | 170 ++ testing/tests/consensus_task.rs | 171 ++ testing/tests/da_task.rs | 92 + testing/tests/fallback_network.rs | 58 + testing/tests/libp2p.rs | 58 + testing/tests/lossy.rs | 118 ++ testing/tests/network_task.rs | 100 + testing/tests/timeout.rs | 49 + testing/tests/web_server.rs | 39 + types/Cargo.toml | 59 + types/src/certificate.rs | 382 ++++ types/src/consensus.rs | 338 ++++ types/src/constants.rs | 14 + types/src/data.rs | 956 ++++++++++ types/src/error.rs | 116 ++ types/src/event.rs | 64 + types/src/lib.rs | 70 + types/src/message.rs | 434 +++++ types/src/traits.rs | 15 + types/src/traits/block_contents.rs | 179 ++ types/src/traits/consensus_api.rs | 171 ++ types/src/traits/consensus_type.rs | 0 .../consensus_type/sequencing_consensus.rs | 0 .../consensus_type/validating_consensus.rs | 16 + types/src/traits/election.rs | 1357 ++++++++++++++ types/src/traits/metrics.rs | 286 +++ types/src/traits/network.rs | 370 ++++ types/src/traits/node_implementation.rs | 582 ++++++ types/src/traits/qc.rs | 91 + types/src/traits/signature_key.rs | 127 ++ types/src/traits/stake_table.rs | 205 ++ types/src/traits/state.rs | 201 ++ types/src/traits/storage.rs | 179 ++ types/src/utils.rs | 82 + types/src/vote.rs | 474 +++++ utils/Cargo.toml | 10 + utils/src/bincode.rs | 28 + utils/src/lib.rs | 13 + web_server/Cargo.toml | 42 + web_server/README.md | 14 + web_server/api.toml | 139 ++ web_server/src/config.rs | 62 + web_server/src/lib.rs | 654 +++++++ 169 files changed, 38329 insertions(+), 5 deletions(-) create mode 100644 consensus/Cargo.toml create mode 100644 consensus/src/da_member.rs create mode 100644 consensus/src/leader.rs create mode 100644 consensus/src/lib.rs create mode 100644 consensus/src/next_leader.rs create mode 100644 consensus/src/replica.rs create mode 100644 consensus/src/sequencing_leader.rs create mode 100644 consensus/src/sequencing_replica.rs create mode 100644 consensus/src/traits.rs create mode 100644 consensus/src/utils.rs create mode 100644 hotshot/Cargo.toml create mode 100644 hotshot/examples/infra/mod.rs create mode 100644 hotshot/examples/infra/modDA.rs create mode 100644 hotshot/examples/libp2p/orchestrator.rs create mode 100644 hotshot/examples/libp2p/types.rs create mode 100644 hotshot/examples/libp2p/validator.rs create mode 100644 hotshot/examples/web-server-da/README.md create mode 100644 hotshot/examples/web-server-da/multi-validator.rs create mode 100644 hotshot/examples/web-server-da/multi-web-server.rs create mode 100644 hotshot/examples/web-server-da/orchestrator.rs create mode 100644 hotshot/examples/web-server-da/types.rs create mode 100644 hotshot/examples/web-server-da/validator.rs create mode 100644 hotshot/examples/web-server-da/web-server.rs create mode 100644 hotshot/src/certificate.rs create mode 100644 hotshot/src/demos.rs create mode 100644 hotshot/src/demos/sdemo.rs create mode 100644 hotshot/src/demos/vdemo.rs create mode 100644 hotshot/src/documentation.rs create mode 100644 hotshot/src/lib.rs create mode 100644 hotshot/src/tasks/mod.rs create mode 100644 hotshot/src/traits.rs create mode 100644 hotshot/src/traits/election.rs create mode 100644 hotshot/src/traits/election/static_committee.rs create mode 100644 hotshot/src/traits/election/vrf.rs create mode 100644 hotshot/src/traits/networking.rs create mode 100644 hotshot/src/traits/networking/libp2p_network.rs create mode 100644 hotshot/src/traits/networking/memory_network.rs create mode 100644 hotshot/src/traits/networking/web_server_libp2p_fallback.rs create mode 100644 hotshot/src/traits/networking/web_server_network.rs create mode 100644 hotshot/src/traits/networking/web_sever_libp2p_fallback.rs create mode 100644 hotshot/src/traits/node_implementation.rs create mode 100644 hotshot/src/traits/storage.rs create mode 100644 hotshot/src/traits/storage/atomic_storage.rs create mode 100644 hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs create mode 100644 hotshot/src/traits/storage/atomic_storage/hash_map_store.rs create mode 100644 hotshot/src/traits/storage/memory_storage.rs create mode 100644 hotshot/src/types.rs create mode 100644 hotshot/src/types/event.rs create mode 100644 hotshot/src/types/handle.rs create mode 100644 libp2p-networking/.cargo/config create mode 100644 libp2p-networking/.gitignore create mode 100644 libp2p-networking/Cargo.toml create mode 100644 libp2p-networking/README.md create mode 100644 libp2p-networking/examples/common/lossy_network.rs create mode 100644 libp2p-networking/examples/common/mod.rs create mode 100644 libp2p-networking/examples/common/web.rs create mode 100644 libp2p-networking/examples/counter.rs create mode 100755 libp2p-networking/flamegraph.sh create mode 100644 libp2p-networking/src/lib.rs create mode 100644 libp2p-networking/src/message.rs create mode 100644 libp2p-networking/src/network/behaviours/dht.rs create mode 100644 libp2p-networking/src/network/behaviours/direct_message.rs create mode 100644 libp2p-networking/src/network/behaviours/direct_message_codec.rs create mode 100644 libp2p-networking/src/network/behaviours/exponential_backoff.rs create mode 100644 libp2p-networking/src/network/behaviours/gossip.rs create mode 100644 libp2p-networking/src/network/behaviours/mod.rs create mode 100644 libp2p-networking/src/network/def.rs create mode 100644 libp2p-networking/src/network/error.rs create mode 100644 libp2p-networking/src/network/mod.rs create mode 100644 libp2p-networking/src/network/node.rs create mode 100644 libp2p-networking/src/network/node/config.rs create mode 100644 libp2p-networking/src/network/node/handle.rs create mode 100755 libp2p-networking/test.py create mode 100644 libp2p-networking/tests/common/mod.rs create mode 100644 libp2p-networking/tests/counter.rs create mode 100644 libp2p-networking/web/index.html create mode 100644 orchestrator/Cargo.toml create mode 100644 orchestrator/README.md create mode 100644 orchestrator/api.toml create mode 100644 orchestrator/default-libp2p-run-config.toml create mode 100644 orchestrator/default-run-config.toml create mode 100644 orchestrator/default-web-server-run-config.toml create mode 100644 orchestrator/src/client.rs create mode 100644 orchestrator/src/config.rs create mode 100644 orchestrator/src/lib.rs create mode 100644 task-impls/Cargo.toml create mode 100644 task-impls/HotShot_event_architecture.drawio create mode 100644 task-impls/HotShot_event_architecture.png create mode 100644 task-impls/README.md create mode 100644 task-impls/src/consensus.rs create mode 100644 task-impls/src/da.rs create mode 100644 task-impls/src/events.rs create mode 100644 task-impls/src/harness.rs create mode 100644 task-impls/src/lib.rs create mode 100644 task-impls/src/network.rs create mode 100644 task-impls/src/view_sync.rs create mode 100644 task/Cargo.toml create mode 100644 task/src/event_stream.rs create mode 100644 task/src/global_registry.rs create mode 100644 task/src/lib.rs create mode 100644 task/src/task.rs create mode 100644 task/src/task_impls.rs create mode 100644 task/src/task_launcher.rs create mode 100644 task/src/task_state.rs create mode 100644 testing/.gitignore create mode 100644 testing/Cargo.toml create mode 100644 testing/README.md create mode 100644 testing/src/completion_task.rs create mode 100644 testing/src/lib.rs create mode 100644 testing/src/network_reliability.rs create mode 100644 testing/src/node_ctx.rs create mode 100644 testing/src/node_types.rs create mode 100644 testing/src/overall_safety_task.rs create mode 100644 testing/src/per_node_safety_task.rs create mode 100644 testing/src/soundness_task.rs create mode 100644 testing/src/spinning_task.rs create mode 100644 testing/src/task_helpers.rs create mode 100644 testing/src/test_builder.rs create mode 100644 testing/src/test_launcher.rs create mode 100644 testing/src/test_runner.rs create mode 100644 testing/src/timeout_task.rs create mode 100644 testing/src/txn_task.rs create mode 100644 testing/tests/atomic_storage.rs create mode 100644 testing/tests/basic.rs create mode 100644 testing/tests/consensus_task.rs create mode 100644 testing/tests/da_task.rs create mode 100644 testing/tests/fallback_network.rs create mode 100644 testing/tests/libp2p.rs create mode 100644 testing/tests/lossy.rs create mode 100644 testing/tests/network_task.rs create mode 100644 testing/tests/timeout.rs create mode 100644 testing/tests/web_server.rs create mode 100644 types/Cargo.toml create mode 100644 types/src/certificate.rs create mode 100644 types/src/consensus.rs create mode 100644 types/src/constants.rs create mode 100644 types/src/data.rs create mode 100644 types/src/error.rs create mode 100644 types/src/event.rs create mode 100644 types/src/lib.rs create mode 100644 types/src/message.rs create mode 100644 types/src/traits.rs create mode 100644 types/src/traits/block_contents.rs create mode 100644 types/src/traits/consensus_api.rs create mode 100644 types/src/traits/consensus_type.rs create mode 100644 types/src/traits/consensus_type/sequencing_consensus.rs create mode 100644 types/src/traits/consensus_type/validating_consensus.rs create mode 100644 types/src/traits/election.rs create mode 100644 types/src/traits/metrics.rs create mode 100644 types/src/traits/network.rs create mode 100644 types/src/traits/node_implementation.rs create mode 100644 types/src/traits/qc.rs create mode 100644 types/src/traits/signature_key.rs create mode 100644 types/src/traits/stake_table.rs create mode 100644 types/src/traits/state.rs create mode 100644 types/src/traits/storage.rs create mode 100644 types/src/utils.rs create mode 100644 types/src/vote.rs create mode 100644 utils/Cargo.toml create mode 100644 utils/src/bincode.rs create mode 100644 utils/src/lib.rs create mode 100644 web_server/Cargo.toml create mode 100644 web_server/README.md create mode 100644 web_server/api.toml create mode 100644 web_server/src/config.rs create mode 100644 web_server/src/lib.rs diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml new file mode 100644 index 0000000000..157be985ea --- /dev/null +++ b/consensus/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "hotshot-consensus" +version = "0.1.0" +edition = "2021" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] +default = [] + +[dependencies] +async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } +async-trait = { workspace = true } +# TODO ed: Delete this dependency after https://github.com/EspressoSystems/HotShot/issues/614 is finished +bincode = { workspace = true } +blake3 = { workspace = true, features = ["traits-preview"] } +commit = { workspace = true } +custom_debug = { workspace = true } +derivative = "2.2" +either = { workspace = true } +futures = { workspace = true } +hotshot-types = { path = "../types", default-features = false } +hotshot-utils = { path = "../utils" } +snafu = { workspace = true } +tracing = { workspace = true } +time = { workspace = true } +bitvec = { workspace = true } +jf-primitives = { workspace = true } + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } diff --git a/consensus/src/da_member.rs b/consensus/src/da_member.rs new file mode 100644 index 0000000000..977a7e8fc1 --- /dev/null +++ b/consensus/src/da_member.rs @@ -0,0 +1,214 @@ +//! Contains the [`DAMember`] struct used for the committee member step in the consensus algorithm +//! with DA committee, i.e. in the sequencing consensus. + +use crate::{ + utils::{View, ViewInner}, + Consensus, SequencingConsensusApi, +}; +use async_compatibility_layer::channel::UnboundedReceiver; +use async_lock::{Mutex, RwLock}; +use commit::Committable; +use either::{Left, Right}; +use hotshot_types::{ + certificate::QuorumCertificate, + data::SequencingLeaf, + message::{ + ConsensusMessageType, ProcessedCommitteeConsensusMessage, ProcessedGeneralConsensusMessage, + ProcessedSequencingMessage, SequencingMessage, + }, + traits::{ + election::{CommitteeExchangeType, ConsensusExchange}, + node_implementation::{ + CommitteeEx, CommitteeProposalType, CommitteeVote, NodeImplementation, NodeType, + }, + signature_key::SignatureKey, + }, +}; +use std::{marker::PhantomData, sync::Arc}; +use tracing::{error, info, instrument, warn}; + +/// This view's DA committee member. +#[derive(Debug, Clone)] +pub struct DAMember< + A: SequencingConsensusApi, I>, + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +> { + /// ID of node. + pub id: u64, + /// Reference to consensus. DA committee member will require a write lock on this. + pub consensus: Arc>>>, + /// Channel for accepting leader proposals and timeouts messages. + #[allow(clippy::type_complexity)] + pub proposal_collection_chan: + Arc>>>, + /// View number this view is executing in. + pub cur_view: TYPES::Time, + /// The High QC. + pub high_qc: QuorumCertificate>, + /// HotShot consensus API. + pub api: A, + + /// the committee exchange + pub exchange: Arc>, + + /// needed for type checking + pub _pd: PhantomData, +} + +impl< + A: SequencingConsensusApi, I>, + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + > DAMember +{ + /// DA committee member task that spins until a valid DA proposal can be signed or timeout is + /// hit. + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Member Task", level = "error")] + #[allow(clippy::type_complexity)] + async fn find_valid_msg<'a>( + &self, + view_leader_key: TYPES::SignatureKey, + ) -> Option { + let lock = self.proposal_collection_chan.lock().await; + let leaf = loop { + let msg = lock.recv().await; + info!("recv-ed message {:?}", msg.clone()); + if let Ok(msg) = msg { + // If the message is for a different view number, skip it. + if Into::>::into(msg.clone()).view_number() != self.cur_view + { + continue; + } + match msg { + Left(general_message) => { + match general_message { + ProcessedGeneralConsensusMessage::InternalTrigger(_trigger) => { + warn!("DA committee member receieved an internal trigger message. This is not what the member expects. Skipping."); + // After run_view refactor we will handle timeout triggers properly + return None; + } + ProcessedGeneralConsensusMessage::Vote(_, _) => { + // Should only be for DA leader, never member. + warn!("DA committee member receieved a vote message. This is not what the member expects. Skipping."); + continue; + } + ProcessedGeneralConsensusMessage::Proposal(_, _) => { + warn!("DA committee member receieved a Non DA Proposal message. This is not what the member expects. Skipping."); + continue; + } + ProcessedGeneralConsensusMessage::ViewSyncCertificate(_) => { + todo!() + } + ProcessedGeneralConsensusMessage::ViewSyncVote(_) => { + todo!() + } + } + } + Right(committee_message) => { + match committee_message { + ProcessedCommitteeConsensusMessage::DAProposal(p, sender) => { + if view_leader_key != sender { + continue; + } + let block_commitment = p.data.deltas.commit(); + if !view_leader_key + .validate(&p.signature, block_commitment.as_ref()) + { + warn!(?p.signature, "Could not verify proposal."); + continue; + } + let vote_token = self.exchange.make_vote_token(self.cur_view); + match vote_token { + Err(e) => { + error!( + "Failed to generate vote token for {:?} {:?}", + self.cur_view, e + ); + } + Ok(None) => { + info!( + "We were not chosen for DA committee on {:?}", + self.cur_view + ); + } + Ok(Some(vote_token)) => { + info!( + "We were chosen for DA committee on {:?}", + self.cur_view + ); + + // Generate and send vote + let message = self.exchange.create_da_message( + block_commitment, + self.cur_view, + vote_token, + ); + + info!("Sending vote to the leader {:?}", message); + + let consensus = self.consensus.read().await; + if self.api.send_direct_da_message::, CommitteeVote>(sender, SequencingMessage(Right(message))).await.is_err() { + consensus.metrics.failed_to_send_messages.add(1); + warn!("Failed to send vote to the leader"); + } else { + consensus.metrics.outgoing_direct_messages.add(1); + } + } + } + break p.data.deltas; + } + ProcessedCommitteeConsensusMessage::DAVote(_, _) => { + // Should only be for DA leader, never member. + warn!("DA committee member receieved a vote message. This is not what the member expects. Skipping."); + continue; + } + ProcessedCommitteeConsensusMessage::DACertificate(_, _) => { + continue; + } + } + } + } + } + // fall through logic if we did not receive successfully from channel + warn!("DA committee member did not receive successfully from channel."); + return None; + }; + Some(leaf) + } + + /// Run one view of DA committee member. + #[instrument(skip(self), fields(id = self.id, view = *self.cur_view), name = "DA Member Task", level = "error")] + pub async fn run_view(self) -> QuorumCertificate> { + info!("DA Committee Member task started!"); + let view_leader_key = self.exchange.get_leader(self.cur_view); + + let maybe_block = self.find_valid_msg(view_leader_key).await; + + if let Some(block) = maybe_block { + let mut consensus = self.consensus.write().await; + + // Ensure this view is in the view map for garbage collection, but do not overwrite if + // there is already a view there: the replica task may have inserted a `Leaf` view which + // contains strictly more information. + consensus.state_map.entry(self.cur_view).or_insert(View { + view_inner: ViewInner::DA { + block: block.commit(), + }, + }); + + // Record the block we have promised to make available. + consensus.saved_blocks.insert(block); + }; + + self.high_qc + } +} diff --git a/consensus/src/leader.rs b/consensus/src/leader.rs new file mode 100644 index 0000000000..b7872eafe2 --- /dev/null +++ b/consensus/src/leader.rs @@ -0,0 +1,29 @@ +//! Contains the [`ValidatingLeader`] struct used for the leader step in the hotstuff consensus algorithm. + +use crate::{CommitmentMap, Consensus}; +use async_compatibility_layer::{ + art::{async_sleep, async_timeout}, + async_primitives::subscribable_rwlock::{ReadView, SubscribableRwLock}, +}; +use async_lock::RwLock; +use commit::Committable; +use hotshot_types::message::Message; +use hotshot_types::{ + certificate::QuorumCertificate, + data::{ValidatingLeaf, ValidatingProposal}, + message::GeneralConsensusMessage, + traits::{ + consensus_type::validating_consensus::ValidatingConsensus, + election::SignedCertificate, + node_implementation::{NodeImplementation, NodeType, QuorumProposalType, QuorumVoteType}, + signature_key::SignatureKey, + Block, State, + }, +}; +use hotshot_types::{ + message::Proposal, + traits::election::{ConsensusExchange, QuorumExchangeType}, +}; +use std::marker::PhantomData; +use std::{sync::Arc, time::Instant}; +use tracing::{error, info, instrument, warn}; diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs new file mode 100644 index 0000000000..425f0b1b25 --- /dev/null +++ b/consensus/src/lib.rs @@ -0,0 +1,338 @@ +//! Provides the core consensus types + +pub use crate::traits::node_implementation::ViewQueue; +pub use crate::utils::{View, ViewInner}; +use async_compatibility_layer::async_primitives::subscribable_rwlock::SubscribableRwLock; +use std::collections::HashSet; + +use crate::utils::Terminator; +use crate::{ + certificate::QuorumCertificate, + data::LeafType, + error::HotShotError, + traits::{ + metrics::{Counter, Gauge, Histogram, Metrics}, + node_implementation::NodeType, + }, +}; +use commit::{Commitment, Committable}; +use derivative::Derivative; +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap}, + sync::Arc, +}; +use tracing::error; + +/// A type alias for `HashMap, T>` +type CommitmentMap = HashMap, T>; + +/// A reference to the consensus algorithm +/// +/// This will contain the state of all rounds. +#[derive(custom_debug::Debug)] +pub struct Consensus> { + /// The phases that are currently loaded in memory + // TODO(https://github.com/EspressoSystems/hotshot/issues/153): Allow this to be loaded from `Storage`? + pub state_map: BTreeMap>, + + /// cur_view from pseudocode + pub cur_view: TYPES::Time, + + /// last view had a successful decide event + pub last_decided_view: TYPES::Time, + + /// A list of undecided transactions + pub transactions: Arc>>, + + /// A list of transactions we've seen decided, but didn't receive + pub seen_transactions: HashSet>, + + /// Map of leaf hash -> leaf + /// - contains undecided leaves + /// - includes the MOST RECENT decided leaf + pub saved_leaves: CommitmentMap, + + /// Saved blocks + /// + /// Contains the full block for every leaf in `saved_leaves` if that block is available. + pub saved_blocks: BlockStore, + + /// The `locked_qc` view number + pub locked_view: TYPES::Time, + + /// the highqc per spec + pub high_qc: QuorumCertificate, + + /// A reference to the metrics trait + #[debug(skip)] + pub metrics: Arc, + + /// Amount of invalid QCs we've seen since the last commit + /// Used for metrics. This resets to 0 on every decide event. + pub invalid_qc: usize, +} + +/// The metrics being collected for the consensus algorithm +pub struct ConsensusMetrics { + /// The current view + pub current_view: Box, + /// The duration to collect votes in a view (only applies when this insance is the leader) + pub vote_validate_duration: Box, + /// The duration we waited for txns before building the proposal + pub proposal_wait_duration: Box, + /// The duration to build the proposal + pub proposal_build_duration: Box, + /// The duration of each view, in seconds + pub view_duration: Box, + /// Number of views that are in-flight since the last committed view + pub number_of_views_since_last_commit: Box, + /// Number of views that are in-flight since the last anchor view + pub number_of_views_per_decide_event: Box, + /// Number of invalid QCs between anchors + pub invalid_qc_views: Box, + /// Number of views that were discarded since from one achor to the next + pub discarded_views_per_decide_event: Box, + /// Views where no proposal was seen from one anchor to the next + pub empty_views_per_decide_event: Box, + /// Number of rejected transactions + pub rejected_transactions: Box, + /// Number of outstanding transactions + pub outstanding_transactions: Box, + /// Memory size in bytes of the serialized transactions still outstanding + pub outstanding_transactions_memory_size: Box, + /// Number of views that timed out + pub number_of_timeouts: Box, + /// Total direct messages this node sent out + pub outgoing_direct_messages: Box, + /// Total broadcasts sent + pub outgoing_broadcast_messages: Box, + /// Total messages received + pub direct_messages_received: Box, + /// Total broadcast messages received + pub broadcast_messages_received: Box, + /// Total number of messages which couldn't be sent + pub failed_to_send_messages: Box, +} + +impl ConsensusMetrics { + /// Create a new instance of this [`ConsensusMetrics`] struct, setting all the counters and gauges + #[must_use] + pub fn new(metrics: &dyn Metrics) -> Self { + Self { + current_view: metrics.create_gauge(String::from("current_view"), None), + vote_validate_duration: metrics.create_histogram( + String::from("vote_validate_duration"), + Some(String::from("seconds")), + ), + proposal_build_duration: metrics.create_histogram( + String::from("proposal_build_duration"), + Some(String::from("seconds")), + ), + proposal_wait_duration: metrics.create_histogram( + String::from("proposal_wait_duration"), + Some(String::from("seconds")), + ), + view_duration: metrics + .create_histogram(String::from("view_duration"), Some(String::from("seconds"))), + number_of_views_since_last_commit: metrics + .create_gauge(String::from("number_of_views_since_last_commit"), None), + number_of_views_per_decide_event: metrics + .create_histogram(String::from("number_of_views_per_decide_event"), None), + invalid_qc_views: metrics.create_histogram(String::from("invalid_qc_views"), None), + discarded_views_per_decide_event: metrics + .create_histogram(String::from("discarded_views_per_decide_event"), None), + empty_views_per_decide_event: metrics + .create_histogram(String::from("empty_views_per_decide_event"), None), + rejected_transactions: metrics + .create_counter(String::from("rejected_transactions"), None), + outstanding_transactions: metrics + .create_gauge(String::from("outstanding_transactions"), None), + outstanding_transactions_memory_size: metrics + .create_gauge(String::from("outstanding_transactions_memory_size"), None), + outgoing_direct_messages: metrics + .create_counter(String::from("outgoing_direct_messages"), None), + outgoing_broadcast_messages: metrics + .create_counter(String::from("outgoing_broadcast_messages"), None), + direct_messages_received: metrics + .create_counter(String::from("direct_messages_received"), None), + broadcast_messages_received: metrics + .create_counter(String::from("broadcast_messages_received"), None), + failed_to_send_messages: metrics + .create_counter(String::from("failed_to_send_messages"), None), + number_of_timeouts: metrics + .create_counter(String::from("number_of_views_timed_out"), None), + } + } +} + +impl> Consensus { + /// increment the current view + /// NOTE may need to do gc here + pub fn increment_view(&mut self) -> TYPES::Time { + self.cur_view += 1; + self.cur_view + } + + /// gather information from the parent chain of leafs + /// # Errors + /// If the leaf or its ancestors are not found in storage + pub fn visit_leaf_ancestors( + &self, + start_from: TYPES::Time, + terminator: Terminator, + ok_when_finished: bool, + mut f: F, + ) -> Result<(), HotShotError> + where + F: FnMut(&LEAF) -> bool, + { + let mut next_leaf = if let Some(view) = self.state_map.get(&start_from) { + view.get_leaf_commitment() + .ok_or_else(|| HotShotError::InvalidState { + context: format!( + "Visited failed view {start_from:?} leaf. Expected successfuil leaf" + ), + })? + } else { + return Err(HotShotError::InvalidState { + context: format!("View {start_from:?} leaf does not exist in state map "), + }); + }; + + while let Some(leaf) = self.saved_leaves.get(&next_leaf) { + if let Terminator::Exclusive(stop_before) = terminator { + if stop_before == leaf.get_view_number() { + if ok_when_finished { + return Ok(()); + } + break; + } + } + next_leaf = leaf.get_parent_commitment(); + if !f(leaf) { + return Ok(()); + } + if let Terminator::Inclusive(stop_after) = terminator { + if stop_after == leaf.get_view_number() { + if ok_when_finished { + return Ok(()); + } + break; + } + } + } + Err(HotShotError::LeafNotFound {}) + } + + /// garbage collects based on state change + /// right now, this removes from both the `saved_blocks` + /// and `state_map` fields of `Consensus` + #[allow(clippy::unused_async)] // async for API compatibility reasons + pub async fn collect_garbage( + &mut self, + old_anchor_view: TYPES::Time, + new_anchor_view: TYPES::Time, + ) { + // state check + let anchor_entry = self + .state_map + .iter() + .next() + .expect("INCONSISTENT STATE: anchor leaf not in state map!"); + if *anchor_entry.0 != old_anchor_view { + error!( + "Something about GC has failed. Older leaf exists than the previous anchor leaf." + ); + } + // perform gc + self.state_map + .range(old_anchor_view..new_anchor_view) + .filter_map(|(_view_number, view)| view.get_block_commitment()) + .for_each(|block| { + self.saved_blocks.remove(block); + }); + self.state_map + .range(old_anchor_view..new_anchor_view) + .filter_map(|(_view_number, view)| view.get_leaf_commitment()) + .for_each(|leaf| { + if let Some(removed) = self.saved_leaves.remove(&leaf) { + self.saved_blocks.remove(removed.get_deltas_commitment()); + } + }); + self.state_map = self.state_map.split_off(&new_anchor_view); + } + + /// return a clone of the internal storage of unclaimed transactions + #[must_use] + pub fn get_transactions(&self) -> Arc>> { + self.transactions.clone() + } + + /// Gets the last decided state + /// # Panics + /// if the last decided view's state does not exist in the state map + /// this should never happen. + #[must_use] + pub fn get_decided_leaf(&self) -> LEAF { + let decided_view_num = self.last_decided_view; + let view = self.state_map.get(&decided_view_num).unwrap(); + let leaf = view + .get_leaf_commitment() + .expect("Decided state not found! Consensus internally inconsistent"); + self.saved_leaves.get(&leaf).unwrap().clone() + } +} + +/// Mapping from block commitments to full blocks. +/// +/// Entries in this mapping are reference-counted, so multiple consensus objects can refer to the +/// same block, and the block will only be deleted after _all_ such objects are garbage collected. +/// For example, multiple leaves may temporarily reference the same block on different branches, +/// before all but one branch are ultimately garbage collected. +#[derive(Clone, Debug, Derivative)] +#[derivative(Default(bound = ""))] +pub struct BlockStore(HashMap, (BLOCK, u64)>); + +impl BlockStore { + /// Save `block` for later retrieval. + /// + /// After calling this function, and before the corresponding call to [`remove`](Self::remove), + /// `self.get(block.commit())` will return `Some(block)`. + /// + /// This function will increment a reference count on the saved block, so that multiple calls to + /// [`insert`](Self::insert) for the same block result in multiple owning references to the + /// block. [`remove`](Self::remove) must be called once for each reference before the block will + /// be deallocated. + pub fn insert(&mut self, block: BLOCK) { + self.0 + .entry(block.commit()) + .and_modify(|(_, refcount)| *refcount += 1) + .or_insert((block, 1)); + } + + /// Get a saved block, if available. + /// + /// If a block has been saved with [`insert`](Self::insert), this function will retrieve it. It + /// may return [`None`] if a block with the given commitment has not been saved or if the block + /// has been dropped with [`remove`](Self::remove). + #[must_use] + pub fn get(&self, block: Commitment) -> Option<&BLOCK> { + self.0.get(&block).map(|(block, _)| block) + } + + /// Drop a reference to a saved block. + /// + /// If the block exists and this call drops the last reference to it, the block will be + /// returned. Otherwise, the return value is [`None`]. + pub fn remove(&mut self, block: Commitment) -> Option { + if let Entry::Occupied(mut e) = self.0.entry(block) { + let (_, refcount) = e.get_mut(); + *refcount -= 1; + if *refcount == 0 { + let (block, _) = e.remove(); + return Some(block); + } + } + None + } +} diff --git a/consensus/src/next_leader.rs b/consensus/src/next_leader.rs new file mode 100644 index 0000000000..abd447a6ff --- /dev/null +++ b/consensus/src/next_leader.rs @@ -0,0 +1,26 @@ +//! Contains the [`NextValidatingLeader`] struct used for the next leader step in the hotstuff consensus algorithm. + +use crate::ConsensusMetrics; +use async_compatibility_layer::channel::UnboundedReceiver; +use async_lock::Mutex; +use either::Either; +use hotshot_types::data::ValidatingLeaf; +use hotshot_types::message::Message; +use hotshot_types::message::ProcessedGeneralConsensusMessage; +use hotshot_types::traits::election::ConsensusExchange; +use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; +use hotshot_types::traits::signature_key::SignatureKey; +use hotshot_types::vote::VoteAccumulator; +use hotshot_types::{ + certificate::QuorumCertificate, + message::{ConsensusMessageType, InternalTrigger}, + traits::consensus_type::validating_consensus::ValidatingConsensus, + vote::QuorumVote, +}; +use std::marker::PhantomData; +use std::time::Instant; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; +use tracing::{info, instrument, warn}; diff --git a/consensus/src/replica.rs b/consensus/src/replica.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/consensus/src/sequencing_leader.rs b/consensus/src/sequencing_leader.rs new file mode 100644 index 0000000000..99cd6a438f --- /dev/null +++ b/consensus/src/sequencing_leader.rs @@ -0,0 +1,555 @@ +//! Contains the [`DALeader`], [`ConsensusLeader`] and [`ConsensusNextLeader`] structs used for the +//! leader steps in the consensus algorithm with DA committee, i.e. in the sequencing consensus. + +use crate::{CommitmentMap, Consensus, SequencingConsensusApi}; +use async_compatibility_layer::{ + art::async_timeout, + async_primitives::subscribable_rwlock::{ReadView, SubscribableRwLock}, + channel::UnboundedReceiver, +}; +use async_lock::{Mutex, RwLock}; +use bitvec::prelude::*; +use commit::{Commitment, Committable}; +use either::{Either, Left, Right}; +use hotshot_types::{ + certificate::{AssembledSignature, DACertificate, QuorumCertificate}, + data::{DAProposal, QuorumProposal, SequencingLeaf}, + message::{ + CommitteeConsensusMessage, ConsensusMessageType, GeneralConsensusMessage, InternalTrigger, + Message, ProcessedCommitteeConsensusMessage, ProcessedGeneralConsensusMessage, + ProcessedSequencingMessage, Proposal, SequencingMessage, + }, + traits::{ + election::{ + CommitteeExchangeType, ConsensusExchange, QuorumExchangeType, SignedCertificate, + }, + node_implementation::{ + CommitteeEx, NodeImplementation, NodeType, QuorumProposalType, QuorumVoteType, + SequencingQuorumEx, + }, + signature_key::SignatureKey, + state::State, + Block, + }, + vote::{QuorumVote, VoteAccumulator}, +}; +use std::{ + collections::{HashMap, HashSet}, + marker::PhantomData, + num::NonZeroU64, + sync::Arc, + time::Instant, +}; +use tracing::{error, info, instrument, warn}; +/// This view's DA committee leader +#[derive(Debug, Clone)] +pub struct DALeader< + A: SequencingConsensusApi, I>, + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +> { + /// id of node + pub id: u64, + /// Reference to consensus. Leader will require a read lock on this. + pub consensus: Arc>>>, + /// The `high_qc` per spec + pub high_qc: QuorumCertificate>, + /// The view number we're running on + pub cur_view: TYPES::Time, + /// Lock over the transactions list + pub transactions: Arc>>, + /// Limited access to the consensus protocol + pub api: A, + + /// the committee exchange + pub committee_exchange: Arc>, + /// the quorum exchange + pub quorum_exchange: Arc>, + /// channel through which the leader collects votes + #[allow(clippy::type_complexity)] + pub vote_collection_chan: Arc>>>, + /// needed to typecheck + pub _pd: PhantomData, +} +impl< + A: SequencingConsensusApi, I>, + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + > DALeader +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + /// Accumulate votes for a proposal and return either the cert or None if the threshold was not reached in time + async fn wait_for_votes( + &self, + cur_view: TYPES::Time, + threshold: NonZeroU64, + total_nodes_num: usize, + block_commitment: Commitment<::BlockType>, + ) -> Option> { + let lock = self.vote_collection_chan.lock().await; + let mut accumulator = VoteAccumulator { + total_vote_outcomes: HashMap::new(), + da_vote_outcomes: HashMap::new(), + yes_vote_outcomes: HashMap::new(), + no_vote_outcomes: HashMap::new(), + viewsync_precommit_vote_outcomes: HashMap::new(), + viewsync_commit_vote_outcomes: HashMap::new(), + viewsync_finalize_vote_outcomes: HashMap::new(), + success_threshold: threshold, + failure_threshold: threshold, + sig_lists: Vec::new(), + signers: bitvec![0; total_nodes_num], + }; + + while let Ok(msg) = lock.recv().await { + if Into::>::into(msg.clone()).view_number() != cur_view { + continue; + } + match msg { + Left(general_message) => match general_message { + ProcessedGeneralConsensusMessage::Vote(_vote, _sender) => { + warn!("The leader received an unexpext Quorum Vote!"); + continue; + } + ProcessedGeneralConsensusMessage::InternalTrigger(trigger) => match trigger { + InternalTrigger::Timeout(_) => { + self.api.send_next_leader_timeout(self.cur_view).await; + break; + } + }, + ProcessedGeneralConsensusMessage::Proposal(_p, _sender) => { + warn!("The next leader has received an unexpected proposal!"); + } + ProcessedGeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), + ProcessedGeneralConsensusMessage::ViewSyncVote(_) => todo!(), + }, + Right(committee_message) => match committee_message { + ProcessedCommitteeConsensusMessage::DAVote(vote, sender) => { + if vote.signature.0 + != ::to_bytes(&sender) + { + continue; + } + if vote.block_commitment != block_commitment { + continue; + } + match self.committee_exchange.accumulate_vote( + &vote.signature.0, + &vote.signature.1, + vote.block_commitment, + vote.vote_data, + vote.vote_token.clone(), + self.cur_view, + accumulator, + None, + ) { + Either::Left(acc) => { + accumulator = acc; + } + Either::Right(qc) => { + match qc.clone().signatures { + AssembledSignature::Yes(_signature) => {} + AssembledSignature::DA(_signature) => {} + _ => unimplemented!(), + }; + return Some(qc); + } + } + } + ProcessedCommitteeConsensusMessage::DAProposal(_p, _sender) => { + warn!("The next leader has received an unexpected proposal!"); + } + ProcessedCommitteeConsensusMessage::DACertificate(_, _) => { + continue; + } + }, + } + } + None + } + /// Returns the parent leaf of the proposal we are building + async fn parent_leaf(&self) -> Option> { + let parent_view_number = &self.high_qc.view_number(); + let consensus = self.consensus.read().await; + let Some(parent_view) = consensus.state_map.get(parent_view_number) else { + warn!("Couldn't find high QC parent in state map."); + return None; + }; + let Some(leaf) = parent_view.get_leaf_commitment() else { + warn!( + ?parent_view_number, + ?parent_view, + "Parent of high QC points to a view without a proposal" + ); + return None; + }; + let Some(leaf) = consensus.saved_leaves.get(&leaf) else { + warn!("Failed to find high QC parent."); + return None; + }; + Some(leaf.clone()) + } + /// return None if we can't get transactions + async fn wait_for_transactions(&self) -> Option> { + let task_start_time = Instant::now(); + + let parent_leaf = self.parent_leaf().await?; + let previous_used_txns = match parent_leaf.deltas { + Either::Left(block) => block.contained_transactions(), + Either::Right(_commitment) => HashSet::new(), + }; + let receiver = self.transactions.subscribe().await; + + while task_start_time.elapsed() < self.api.propose_max_round_time() { + let txns = self.transactions.cloned().await; + let unclaimed_txns: Vec<_> = txns + .iter() + .filter(|(txn_hash, _txn)| !previous_used_txns.contains(txn_hash)) + .collect(); + + let time_past = task_start_time.elapsed(); + if unclaimed_txns.len() < self.api.min_transactions() + && (time_past < self.api.propose_max_round_time()) + { + let duration = self.api.propose_max_round_time() - time_past; + let result = async_timeout(duration, receiver.recv()).await; + match result { + Err(_) => { + // Fall through below to updating new block + info!("propose_max_round_time passed, sending transactions we have so far"); + } + Ok(Err(e)) => { + // Something unprecedented is wrong, and `transactions` has been dropped + error!("Channel receiver error for SubscribableRwLock {:?}", e); + return None; + } + Ok(Ok(_)) => continue, + } + } + let mut txns = vec![]; + for (_hash, txn) in unclaimed_txns { + txns.push(txn.clone()); + } + return Some(txns); + } + None + } + /// Run one view of the DA leader task + #[instrument(skip(self), fields(id = self.id, view = *self.cur_view), name = "Sequencing DALeader Task", level = "error")] + pub async fn run_view( + self, + ) -> Option<( + DACertificate, + TYPES::BlockType, + SequencingLeaf, + )> { + // Prepare the DA Proposal + let Some(parent_leaf) = self.parent_leaf().await else { + warn!("Couldn't find high QC parent in state map."); + return None; + }; + + let mut block = ::StateType::next_block(None); + let txns = self.wait_for_transactions().await?; + + for txn in txns { + if let Ok(new_block) = block.add_transaction_raw(&txn) { + block = new_block; + continue; + } + } + let block_commitment = block.commit(); + + let consensus = self.consensus.read().await; + let signature = self.committee_exchange.sign_da_proposal(&block.commit()); + let data: DAProposal = DAProposal { + deltas: block.clone(), + view_number: self.cur_view, + }; + let message = + SequencingMessage::(Right(CommitteeConsensusMessage::DAProposal(Proposal { + data, + signature, + }))); + // Brodcast DA proposal + if let Err(e) = self.api.send_da_broadcast(message.clone()).await { + consensus.metrics.failed_to_send_messages.add(1); + warn!(?message, ?e, "Could not broadcast leader proposal"); + } else { + consensus.metrics.outgoing_broadcast_messages.add(1); + } + + // Drop the lock on the consensus. + drop(consensus); + + // Wait for DA votes or Timeout + if let Some(cert) = self + .wait_for_votes( + self.cur_view, + self.committee_exchange.success_threshold(), + self.committee_exchange.total_nodes(), + block_commitment, + ) + .await + { + return Some((cert, block, parent_leaf)); + } + None + } +} + +/// Implemenation of the consensus leader for a DA/Sequencing consensus. Handles sending out a proposal to the entire network +/// For now this step happens after the `DALeader` completes it's proposal and collects enough votes. +pub struct ConsensusLeader< + A: SequencingConsensusApi, I>, + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +> { + /// id of node + pub id: u64, + /// Reference to consensus. Leader will require a read lock on this. + pub consensus: Arc>>>, + /// The `high_qc` per spec + pub high_qc: QuorumCertificate>, + /// The view number we're running on + pub cur_view: TYPES::Time, + /// The Certificate generated for the transactions commited to in the proposal the leader will build + pub cert: DACertificate, + /// The block corresponding to the DA cert + pub block: TYPES::BlockType, + /// Leaf this proposal will chain from + pub parent: SequencingLeaf, + /// Limited access to the consensus protocol + pub api: A, + + /// the quorum exchange + pub quorum_exchange: Arc>, + + /// needed to tyep check + pub _pd: PhantomData, +} +impl< + A: SequencingConsensusApi, I>, + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + > ConsensusLeader +where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + >, +{ + /// Run one view of the DA leader task + #[instrument(skip(self), fields(id = self.id, view = *self.cur_view), name = "Sequencing DALeader Task", level = "error")] + pub async fn run_view(self) -> QuorumCertificate> { + let block_commitment = self.block.commit(); + let leaf = SequencingLeaf { + view_number: self.cur_view, + height: self.parent.height + 1, + justify_qc: self.high_qc.clone(), + parent_commitment: self.parent.commit(), + // Use the block commitment rather than the block, so that the replica can construct + // the same leaf with the commitment. + deltas: Right(block_commitment), + rejected: vec![], + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: self.api.public_key().to_bytes(), + }; + let signature = self + .quorum_exchange + .sign_validating_or_commitment_proposal::(&leaf.commit()); + // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. + let proposal = QuorumProposal { + block_commitment, + view_number: leaf.view_number, + height: leaf.height, + justify_qc: self.high_qc.clone(), + timeout_certificate: None, + dac: Some(self.cert), + proposer_id: leaf.proposer_id, + }; + let message = + SequencingMessage::(Left(GeneralConsensusMessage::Proposal(Proposal { + data: proposal, + signature, + }))); + if let Err(e) = self + .api + .send_broadcast_message::, QuorumVoteType>( + message.clone(), + ) + .await + { + warn!(?message, ?e, "Could not broadcast leader proposal"); + } + self.high_qc + } +} + +/// Implenting the next leader. Collect votes on the previous leaders proposal and return the QC +pub struct ConsensusNextLeader< + A: SequencingConsensusApi, I>, + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +> { + /// id of node + pub id: u64, + /// Reference to consensus. Leader will require a read lock on this. + pub consensus: Arc>>>, + /// The view number we're running on + pub cur_view: TYPES::Time, + /// Limited access to the consensus protocol + pub api: A, + /// generic_qc before starting this + pub generic_qc: QuorumCertificate>, + /// channel through which the leader collects votes + #[allow(clippy::type_complexity)] + pub vote_collection_chan: Arc>>>, + + /// the quorum exchnage + pub quorum_exchange: Arc>, + + /// needed to type check + pub _pd: PhantomData, +} + +impl< + A: SequencingConsensusApi, I>, + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + > ConsensusNextLeader +where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + >, +{ + /// Run one view of the next leader, collect votes and build a QC for the last views `QuorumProposal` + /// # Panics + /// While we are unwrapping, this function can logically never panic + /// unless there is a bug in std + pub async fn run_view(self) -> QuorumCertificate> { + let mut qcs = HashSet::>>::new(); + qcs.insert(self.generic_qc.clone()); + let mut accumulator = VoteAccumulator { + total_vote_outcomes: HashMap::new(), + da_vote_outcomes: HashMap::new(), + yes_vote_outcomes: HashMap::new(), + no_vote_outcomes: HashMap::new(), + viewsync_precommit_vote_outcomes: HashMap::new(), + viewsync_commit_vote_outcomes: HashMap::new(), + viewsync_finalize_vote_outcomes: HashMap::new(), + success_threshold: self.quorum_exchange.success_threshold(), + failure_threshold: self.quorum_exchange.failure_threshold(), + sig_lists: Vec::new(), + signers: bitvec![0; self.quorum_exchange.total_nodes()], + }; + + let lock = self.vote_collection_chan.lock().await; + while let Ok(msg) = lock.recv().await { + // If the message is for a different view number, skip it. + if Into::>::into(msg.clone()).view_number() != self.cur_view { + continue; + } + match msg { + Left(general_message) => match general_message { + ProcessedGeneralConsensusMessage::Vote(vote_message, sender) => { + match vote_message { + QuorumVote::Yes(vote) => { + if vote.signature.0 + != ::to_bytes(&sender) + { + continue; + } + + match self.quorum_exchange.accumulate_vote( + &vote.signature.0, + &vote.signature.1, + vote.leaf_commitment, + vote.vote_data, + vote.vote_token.clone(), + self.cur_view, + accumulator, + None, + ) { + Either::Left(acc) => { + accumulator = acc; + } + Either::Right(qc) => { + match qc.clone().signatures { + AssembledSignature::Yes(_signature) => {} + _ => unimplemented!(), + }; + return qc; + } + } + } + QuorumVote::Timeout(vote) => { + qcs.insert(vote.high_qc); + } + QuorumVote::No(_) => { + warn!("The next leader has received an unexpected vote!"); + } + } + } + ProcessedGeneralConsensusMessage::InternalTrigger(trigger) => match trigger { + InternalTrigger::Timeout(_) => { + self.api.send_next_leader_timeout(self.cur_view).await; + break; + } + }, + ProcessedGeneralConsensusMessage::Proposal(_p, _sender) => { + warn!("The next leader has received an unexpected proposal!"); + } + ProcessedGeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), + ProcessedGeneralConsensusMessage::ViewSyncVote(_) => todo!(), + }, + Right(committee_message) => match committee_message { + ProcessedCommitteeConsensusMessage::DAProposal(_p, _sender) => { + warn!("The next leader has received an unexpected proposal!"); + } + ProcessedCommitteeConsensusMessage::DAVote(_, _sender) => { + warn!("The next leader has received an unexpected DA vote!"); + } + ProcessedCommitteeConsensusMessage::DACertificate(_, _) => { + continue; + } + }, + } + } + qcs.into_iter().max_by_key(|qc| qc.view_number).unwrap() + } +} diff --git a/consensus/src/sequencing_replica.rs b/consensus/src/sequencing_replica.rs new file mode 100644 index 0000000000..fece660dee --- /dev/null +++ b/consensus/src/sequencing_replica.rs @@ -0,0 +1,642 @@ +//! Contains the [`SequencingReplica`] struct used for the replica step in the consensus algorithm with DA +//! committee, i.e. in the sequencing consensus. + +use crate::{ + utils::{Terminator, View, ViewInner}, + Consensus, SequencingConsensusApi, +}; +use async_compatibility_layer::channel::UnboundedReceiver; +use async_lock::{Mutex, RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; +use bincode::Options; +use commit::Committable; +use either::{Left, Right}; +use hotshot_types::{ + certificate::{DACertificate, QuorumCertificate}, + data::{LeafType, QuorumProposal, SequencingLeaf}, + message::{ + ConsensusMessageType, InternalTrigger, Message, ProcessedCommitteeConsensusMessage, + ProcessedGeneralConsensusMessage, ProcessedSequencingMessage, SequencingMessage, + }, + traits::{ + election::{ConsensusExchange, QuorumExchangeType, SignedCertificate}, + node_implementation::{ + CommitteeEx, NodeImplementation, NodeType, QuorumProposalType, QuorumVoteType, + SequencingQuorumEx, + }, + signature_key::SignatureKey, + state::ConsensusTime, + Block, + }, +}; +use hotshot_utils::bincode::bincode_opts; +use std::{ + collections::HashSet, + marker::PhantomData, + ops::Bound::{Excluded, Included}, + sync::Arc, +}; +use tracing::{error, info, instrument, warn}; +/// This view's replica for sequencing consensus. +#[derive(Debug, Clone)] +pub struct SequencingReplica< + A: SequencingConsensusApi, I>, + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +> { + /// ID of node. + pub id: u64, + /// Reference to consensus. The replica will require a write lock on this. + pub consensus: Arc>>>, + /// Channel for accepting leader proposals and timeouts messages. + #[allow(clippy::type_complexity)] + pub proposal_collection_chan: + Arc>>>, + /// View number this view is executing in. + pub cur_view: TYPES::Time, + /// The High QC. + pub high_qc: QuorumCertificate>, + /// HotShot consensus API. + pub api: A, + + /// the committee exchange + pub committee_exchange: Arc>, + + /// the quorum exchange + pub quorum_exchange: Arc>, + + /// needed to typecheck + pub _pd: PhantomData, +} + +impl< + A: SequencingConsensusApi, I>, + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + > SequencingReplica +where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + >, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + /// The leaf from the genesis view. + /// + /// This will be used as the parent leaf for the proposal in the first view after genesis. + async fn genesis_leaf(&self) -> Option> { + let consensus = self.consensus.read().await; + let Some(genesis_view) = consensus.state_map.get(&TYPES::Time::genesis()) else { + warn!("Couldn't find genesis view in state map."); + return None; + }; + let Some(leaf) = genesis_view.get_leaf_commitment() else { + warn!( + ?genesis_view, + "Genesis view points to a view without a leaf" + ); + return None; + }; + let Some(leaf) = consensus.saved_leaves.get(&leaf) else { + warn!("Failed to find genesis leaf."); + return None; + }; + Some(leaf.clone()) + } + + /// Replica task for sequencing consensus that spins until a vote can be made or timeout is + /// hit. + /// + /// Returns the new leaf if it's valid. + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Sequencing Replica Task", level = "error")] + #[allow(clippy::type_complexity)] + async fn find_valid_msg<'a>( + &self, + view_leader_key: TYPES::SignatureKey, + consensus: RwLockUpgradableReadGuard<'a, Consensus>>, + ) -> ( + RwLockUpgradableReadGuard<'a, Consensus>>, + Option>, + ) { + let lock = self.proposal_collection_chan.lock().await; + let mut invalid_qc = false; + let leaf = loop { + let msg = lock.recv().await; + info!("recv-ed message {:?}", msg.clone()); + if let Ok(msg) = msg { + // stale/newer view messages should never reach this specific task's receive channel + if Into::>::into(msg.clone()).view_number() != self.cur_view + { + continue; + } + match msg { + Left(general_message) => { + match general_message { + ProcessedGeneralConsensusMessage::Proposal(p, sender) => { + if view_leader_key != sender { + continue; + } + + let mut valid_leaf = None; + let vote_token = + self.quorum_exchange.make_vote_token(self.cur_view); + match vote_token { + Err(e) => { + error!( + "Failed to generate vote token for {:?} {:?}", + self.cur_view, e + ); + } + Ok(None) => { + info!( + "We were not chosen for consensus committee on {:?}", + self.cur_view + ); + } + Ok(Some(vote_token)) => { + info!( + "We were chosen for consensus committee on {:?}", + self.cur_view + ); + + let message; + + // Construct the leaf. + let justify_qc = p.data.justify_qc; + let parent = if justify_qc.is_genesis() { + self.genesis_leaf().await + } else { + consensus + .saved_leaves + .get(&justify_qc.leaf_commitment()) + .cloned() + }; + let Some(parent) = parent else { + warn!("Proposal's parent missing from storage"); + continue; + }; + let parent_commitment = parent.commit(); + let block_commitment = p.data.block_commitment; + let leaf = SequencingLeaf { + view_number: self.cur_view, + height: p.data.height, + justify_qc: justify_qc.clone(), + parent_commitment, + deltas: Right(p.data.block_commitment), + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc() + .unix_timestamp_nanos(), + proposer_id: sender.to_bytes(), + }; + let justify_qc_commitment = justify_qc.commit(); + let leaf_commitment = leaf.commit(); + let Some(dac) = p.data.dac else { + warn!("No DAC in proposal! Skipping proposal."); + continue; + }; + + // Validate the `justify_qc`. + if !self + .quorum_exchange + .is_valid_cert(&justify_qc, parent_commitment) + { + invalid_qc = true; + warn!("Invalid justify_qc in proposal!."); + message = self.quorum_exchange.create_no_message( + justify_qc_commitment, + leaf_commitment, + self.cur_view, + vote_token, + ); + } + // Validate the `height`. + else if leaf.height != parent.height + 1 { + invalid_qc = true; + warn!( + "Incorrect height in proposal (expected {}, got {})", + parent.height + 1, + leaf.height + ); + message = self.quorum_exchange.create_no_message( + justify_qc_commitment, + leaf_commitment, + self.cur_view, + vote_token, + ); + } + // Validate the DAC. + else if !self + .committee_exchange + .is_valid_cert(&dac, block_commitment) + { + warn!("Invalid DAC in proposal! Skipping proposal."); + message = self.quorum_exchange.create_no_message( + justify_qc_commitment, + leaf_commitment, + self.cur_view, + vote_token, + ); + } + // Validate the signature. + else if !view_leader_key + .validate(&p.signature, leaf_commitment.as_ref()) + { + warn!(?p.signature, "Could not verify proposal."); + message = self.quorum_exchange.create_no_message( + justify_qc_commitment, + leaf_commitment, + self.cur_view, + vote_token, + ); + } + // Create a positive vote if either liveness or safety check + // passes. + else { + // Liveness check. + let liveness_check = + justify_qc.view_number > consensus.locked_view; + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = consensus.visit_leaf_ancestors( + justify_qc.view_number, + Terminator::Inclusive(consensus.locked_view), + false, + |leaf| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.view_number != consensus.locked_view + }, + ); + let safety_check = outcome.is_ok(); + if let Err(e) = outcome { + self.api + .send_view_error(self.cur_view, Arc::new(e)) + .await; + } + + // Skip if both saftey and liveness checks fail. + if !safety_check && !liveness_check { + warn!("Failed safety check and liveness check"); + message = self.quorum_exchange.create_no_message( + justify_qc_commitment, + leaf_commitment, + self.cur_view, + vote_token, + ); + } else { + // A valid leaf is found. + valid_leaf = Some(leaf); + + // Generate a message with yes vote. + message = self.quorum_exchange.create_yes_message( + justify_qc_commitment, + leaf_commitment, + self.cur_view, + vote_token, + ); + } + } + + info!("Sending vote to next leader {:?}", message); + let next_leader = + self.quorum_exchange.get_leader(self.cur_view + 1); + if self + .api + .send_direct_message::, QuorumVoteType>(next_leader, SequencingMessage(Left(message))) + .await + .is_err() + { + consensus.metrics.failed_to_send_messages.add(1); + warn!("Failed to send vote to next leader"); + } else { + consensus.metrics.outgoing_direct_messages.add(1); + } + } + } + break valid_leaf; + } + ProcessedGeneralConsensusMessage::InternalTrigger(trigger) => { + match trigger { + InternalTrigger::Timeout(_) => { + let next_leader = + self.quorum_exchange.get_leader(self.cur_view + 1); + + consensus.metrics.number_of_timeouts.add(1); + + let vote_token = + self.quorum_exchange.make_vote_token(self.cur_view); + + match vote_token { + Err(e) => { + error!( + "Failed to generate vote token for {:?} {:?}", + self.cur_view, e + ); + } + Ok(None) => { + info!( + "We were not chosen for committee on {:?}", + self.cur_view + ); + } + Ok(Some(vote_token)) => { + let timed_out_msg = + self.quorum_exchange.create_timeout_message( + self.high_qc.clone(), + self.cur_view, + vote_token, + ); + warn!( + "Timed out! Sending timeout to next leader {:?}", + timed_out_msg + ); + + // send timedout message to the next leader + if let Err(e) = self + .api + .send_direct_message::, QuorumVoteType< + TYPES, + I, + >>( + next_leader.clone(), + SequencingMessage(Left(timed_out_msg)), + ) + .await + { + consensus + .metrics + .failed_to_send_messages + .add(1); + warn!( + ?next_leader, + ?e, + "Could not send time out message to next_leader" + ); + } else { + consensus + .metrics + .outgoing_direct_messages + .add(1); + } + + // exits from entire function + self.api.send_replica_timeout(self.cur_view).await; + } + } + return (consensus, None); + } + } + } + ProcessedGeneralConsensusMessage::Vote(_, _) => { + // should only be for leader, never replica + warn!("Replica receieved a vote message. This is not what the replica expects. Skipping."); + continue; + } + ProcessedGeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), + ProcessedGeneralConsensusMessage::ViewSyncVote(_) => todo!(), + } + } + Right(committee_message) => { + match committee_message { + ProcessedCommitteeConsensusMessage::DAProposal(_p, _sender) => { + warn!("Replica receieved a DA Proposal. This is not what the replica expects. Skipping."); + // TODO (Keyao) why not continue here? + } + ProcessedCommitteeConsensusMessage::DAVote(_, _) => { + // should only be for leader, never replica + warn!("Replica receieved a vote message. This is not what the replica expects. Skipping."); + continue; + } + ProcessedCommitteeConsensusMessage::DACertificate(_, _) => { + continue; + } + } + } + } + } + // fall through logic if we did not receive successfully from channel + warn!("Replica did not receive successfully from channel. Terminating Replica."); + return (consensus, None); + }; + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + if invalid_qc { + consensus.invalid_qc += 1; + } + (RwLockWriteGuard::downgrade_to_upgradable(consensus), leaf) + } + + /// Run one view of the replica for sequencing consensus. + #[instrument(skip(self), fields(id = self.id, view = *self.cur_view), name = "Sequencing Replica Task", level = "error")] + pub async fn run_view(self) -> QuorumCertificate> { + info!("Sequencing replica task started!"); + let view_leader_key = self.quorum_exchange.get_leader(self.cur_view); + let consensus = self.consensus.upgradable_read().await; + + let (consensus, maybe_leaf) = self.find_valid_msg(view_leader_key, consensus).await; + + let Some(leaf) = maybe_leaf else { + // We either timed out or for some reason could not vote on a proposal. + return self.high_qc; + }; + + let mut new_anchor_view = consensus.last_decided_view; + let mut new_locked_view = consensus.locked_view; + let mut last_view_number_visited = self.cur_view; + let mut new_commit_reached: bool = false; + let mut new_decide_reached = false; + let mut new_decide_qc = None; + let mut leaf_views = Vec::new(); + let mut included_txns = HashSet::new(); + let old_anchor_view = consensus.last_decided_view; + let parent_view = leaf.justify_qc.view_number; + let mut current_chain_length = 0usize; + if parent_view + 1 == self.cur_view { + current_chain_length += 1; + if let Err(e) = consensus.visit_leaf_ancestors( + parent_view, + Terminator::Exclusive(old_anchor_view), + true, + |leaf| { + if !new_decide_reached { + if last_view_number_visited == leaf.view_number + 1 { + last_view_number_visited = leaf.view_number; + current_chain_length += 1; + if current_chain_length == 2 { + new_locked_view = leaf.view_number; + new_commit_reached = true; + // The next leaf in the chain, if there is one, is decided, so this + // leaf's justify_qc would become the QC for the decided chain. + new_decide_qc = Some(leaf.justify_qc.clone()); + } else if current_chain_length == 3 { + new_anchor_view = leaf.view_number; + new_decide_reached = true; + } + } else { + // nothing more to do here... we don't have a new chain extension + return false; + } + } + // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above + if new_decide_reached { + let mut leaf = leaf.clone(); + + // If the full block is available for this leaf, include it in the leaf + // chain that we send to the client. + if let Some(block) = + consensus.saved_blocks.get(leaf.get_deltas_commitment()) + { + if let Err(err) = leaf.fill_deltas(block.clone()) { + warn!("unable to fill leaf {} with block {}, block will not be available: {}", + leaf.commit(), block.commit(), err); + } + } + + leaf_views.push(leaf.clone()); + if let Left(block) = &leaf.deltas { + let txns = block.contained_transactions(); + for txn in txns { + included_txns.insert(txn); + } + } + } + true + }, + ) { + self.api.send_view_error(self.cur_view, Arc::new(e)).await; + } + } + + let included_txns_set: HashSet<_> = if new_decide_reached { + included_txns + } else { + HashSet::new() + }; + + // promote lock here + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + consensus.state_map.insert( + self.cur_view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + }, + }, + ); + + consensus.metrics.number_of_views_since_last_commit.set( + consensus + .state_map + .range(( + Excluded(consensus.last_decided_view), + Included(self.cur_view), + )) + .count(), + ); + + consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + if new_commit_reached { + consensus.locked_view = new_locked_view; + } + #[allow(clippy::cast_precision_loss)] + if new_decide_reached { + let num_views_since_last_anchor = + (*self.cur_view - *consensus.last_decided_view) as f64; + let views_seen = consensus + .state_map + .range(( + Excluded(consensus.last_decided_view), + Included(self.cur_view), + )) + .count(); + // A count of all veiws we saw that aren't in the current chain (so won't be commited) + consensus + .metrics + .discarded_views_per_decide_event + .add_point((views_seen - current_chain_length) as f64); + // An empty view is one we didn't see a leaf for but we moved past that view number + consensus + .metrics + .empty_views_per_decide_event + .add_point(num_views_since_last_anchor - views_seen as f64); + consensus + .metrics + .number_of_views_per_decide_event + .add_point(num_views_since_last_anchor); + consensus + .metrics + .invalid_qc_views + .add_point(consensus.invalid_qc as f64); + + let mut included_txn_size = 0; + consensus + .transactions + .modify(|txns| { + *txns = txns + .drain() + .filter(|(txn_hash, txn)| { + if included_txns_set.contains(txn_hash) { + included_txn_size += + bincode_opts().serialized_size(txn).unwrap_or_default(); + false + } else { + true + } + }) + .collect(); + }) + .await; + consensus + .metrics + .outstanding_transactions + .update(-(included_txns_set.len() as i64)); + consensus + .metrics + .outstanding_transactions_memory_size + .update(-(i64::try_from(included_txn_size).unwrap_or(i64::MAX))); + + consensus + .metrics + .rejected_transactions + .add(leaf.rejected.len()); + + let decide_sent = self.api.send_decide( + consensus.last_decided_view, + leaf_views, + new_decide_qc.unwrap(), + ); + let old_anchor_view = consensus.last_decided_view; + consensus + .collect_garbage(old_anchor_view, new_anchor_view) + .await; + consensus.last_decided_view = new_anchor_view; + consensus.invalid_qc = 0; + + // We're only storing the last QC. We could store more but we're realistically only going to retrieve the last one. + if let Err(e) = self.api.store_leaf(old_anchor_view, leaf).await { + error!("Could not insert new anchor into the storage API: {:?}", e); + } + + decide_sent.await; + } + self.high_qc + } +} diff --git a/consensus/src/traits.rs b/consensus/src/traits.rs new file mode 100644 index 0000000000..2687107886 --- /dev/null +++ b/consensus/src/traits.rs @@ -0,0 +1,171 @@ +//! Contains the [`SequencingConsensusApi`] and [`ValidatingConsensusApi`] traits. + +use crate::{ + certificate::QuorumCertificate, + data::{LeafType, ProposalType}, + error::HotShotError, + event::{Event, EventType}, + message::{DataMessage, SequencingMessage}, + traits::{ + network::NetworkError, + node_implementation::{NodeImplementation, NodeType}, + signature_key::SignatureKey, + storage::StorageError, + }, + vote::VoteType, +}; +use async_trait::async_trait; + +use std::{num::NonZeroUsize, sync::Arc, time::Duration}; + +/// The API that [`HotStuff`] needs to talk to the system, implemented for both validating and +/// sequencing consensus. +#[async_trait] +pub trait ConsensusSharedApi< + TYPES: NodeType, + LEAF: LeafType, + I: NodeImplementation, +>: Send + Sync +{ + /// Total number of nodes in the network. Also known as `n`. + fn total_nodes(&self) -> NonZeroUsize; + + /// The minimum amount of time a leader has to wait before sending a propose + fn propose_min_round_time(&self) -> Duration; + + /// The maximum amount of time a leader can wait before sending a propose. + /// If this time is reached, the leader has to send a propose without transactions. + fn propose_max_round_time(&self) -> Duration; + + /// Store a leaf in the storage + async fn store_leaf( + &self, + old_anchor_view: TYPES::Time, + leaf: LEAF, + ) -> Result<(), StorageError>; + + /// Retuns the maximum transactions allowed in a block + fn max_transactions(&self) -> NonZeroUsize; + + /// Returns the minimum transactions that must be in a block + fn min_transactions(&self) -> usize; + + /// Returns `true` if hotstuff should start the given round. A round can also be started manually by sending `NewView` to the leader. + /// + /// In production code this should probably always return `true`. + async fn should_start_round(&self, view_number: TYPES::Time) -> bool; + + /// Notify the system of an event within `hotshot-consensus`. + async fn send_event(&self, event: Event); + + /// Get a reference to the public key. + fn public_key(&self) -> &TYPES::SignatureKey; + + /// Get a reference to the private key. + fn private_key(&self) -> &::PrivateKey; + + // Utility functions + + /// notifies client of an error + async fn send_view_error(&self, view_number: TYPES::Time, error: Arc>) { + self.send_event(Event { + view_number, + event: EventType::Error { error }, + }) + .await; + } + + /// notifies client of a replica timeout + async fn send_replica_timeout(&self, view_number: TYPES::Time) { + self.send_event(Event { + view_number, + event: EventType::ReplicaViewTimeout { view_number }, + }) + .await; + } + + /// notifies client of a next leader timeout + async fn send_next_leader_timeout(&self, view_number: TYPES::Time) { + self.send_event(Event { + view_number, + event: EventType::NextLeaderViewTimeout { view_number }, + }) + .await; + } + + /// sends a decide event down the channel + async fn send_decide( + &self, + view_number: TYPES::Time, + leaf_views: Vec, + decide_qc: QuorumCertificate, + ) { + self.send_event(Event { + view_number, + event: EventType::Decide { + leaf_chain: Arc::new(leaf_views), + qc: Arc::new(decide_qc), + block_size: None, + }, + }) + .await; + } + + /// Sends a `ViewFinished` event + async fn send_view_finished(&self, view_number: TYPES::Time) { + self.send_event(Event { + view_number, + event: EventType::ViewFinished { view_number }, + }) + .await; + } +} + +/// The API that [`HotStuff`] needs to talk to the system, for sequencing consensus. +#[async_trait] +pub trait SequencingConsensusApi< + TYPES: NodeType, + LEAF: LeafType, + I: NodeImplementation>, +>: ConsensusSharedApi +{ + /// Send a direct message to the given recipient + async fn send_direct_message, VOTE: VoteType>( + &self, + recipient: TYPES::SignatureKey, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError>; + + /// send a direct message using the DA communication channel + async fn send_direct_da_message< + PROPOSAL: ProposalType, + VOTE: VoteType, + >( + &self, + recipient: TYPES::SignatureKey, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError>; + + /// Send a broadcast message to the entire network. + async fn send_broadcast_message< + PROPOSAL: ProposalType, + VOTE: VoteType, + >( + &self, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError>; + + /// Send a broadcast to the DA comitee, stub for now + async fn send_da_broadcast( + &self, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError>; + + /// Send a message with a transaction. + /// This function is deprecated in favor of `submit_transaction` in `handle.rs` + #[deprecated] + async fn send_transaction( + &self, + message: DataMessage, + ) -> std::result::Result<(), NetworkError>; +} diff --git a/consensus/src/utils.rs b/consensus/src/utils.rs new file mode 100644 index 0000000000..38c4b5a852 --- /dev/null +++ b/consensus/src/utils.rs @@ -0,0 +1,82 @@ +//! Utility functions, type aliases, helper structs and enum definitions. + +use crate::{ + data::{LeafBlock, LeafType}, + traits::node_implementation::NodeType, +}; +use commit::Commitment; +use std::ops::Deref; + +/// A view's state +#[derive(Debug)] +pub enum ViewInner> { + /// A pending view with an available block but not leaf proposal yet. + /// + /// Storing this state allows us to garbage collect blocks for views where a proposal is never + /// made. This saves memory when a leader fails and subverts a DoS attack where malicious + /// leaders repeatedly request availability for blocks that they never propose. + DA { + /// Available block. + block: Commitment>, + }, + /// Undecided view + Leaf { + /// Proposed leaf + leaf: Commitment, + }, + /// Leaf has failed + Failed, +} + +impl> ViewInner { + /// return the underlying leaf hash if it exists + #[must_use] + pub fn get_leaf_commitment(&self) -> Option> { + if let Self::Leaf { leaf } = self { + Some(*leaf) + } else { + None + } + } + + /// return the underlying block hash if it exists + #[must_use] + pub fn get_block_commitment(&self) -> Option>> { + if let Self::DA { block } = self { + Some(*block) + } else { + None + } + } +} + +impl> Deref for View { + type Target = ViewInner; + + fn deref(&self) -> &Self::Target { + &self.view_inner + } +} + +/// This exists so we can perform state transitions mutably +#[derive(Debug)] +pub struct View> { + /// The view data. Wrapped in a struct so we can mutate + pub view_inner: ViewInner, +} + +/// A struct containing information about a finished round. +#[derive(Debug, Clone)] +pub struct RoundFinishedEvent { + /// The round that finished + pub view_number: TYPES::Time, +} + +/// Whether or not to stop inclusively or exclusively when walking +#[derive(Copy, Clone, Debug)] +pub enum Terminator { + /// Stop right before this view number + Exclusive(T), + /// Stop including this view number + Inclusive(T), +} diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index e072aa8e0d..0faa1f0a87 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -20,7 +20,7 @@ bincode = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } generic-array = "0.14.7" -hotshot-types = { path = "../../types" } +hotshot-types = { path = "../types" } jf-primitives = { workspace = true } jf-relation = { workspace = true } jf-utils = { workspace = true } diff --git a/hotshot-signature-key/Cargo.toml b/hotshot-signature-key/Cargo.toml index 70a6b6aca0..a98f1b73f7 100644 --- a/hotshot-signature-key/Cargo.toml +++ b/hotshot-signature-key/Cargo.toml @@ -12,9 +12,9 @@ bitvec = { workspace = true } blake3 = { workspace = true } custom_debug = { workspace = true } ethereum-types = { workspace = true } -hotshot-qc = { path = "../../crates/hotshot-qc" } -hotshot-types = { path = "../../types" } -hotshot-utils = { path = "../../utils" } +hotshot-qc = { path = "../hotshot-qc" } +hotshot-types = { path = "../types" } +hotshot-utils = { path = "../utils" } jf-primitives = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 870e87cfcd..8f11e4d66f 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -17,7 +17,7 @@ digest = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } ethereum-types = { workspace = true } generic-array = "0.14.7" -hotshot-types = { path = "../../types" } +hotshot-types = { path = "../types" } jf-primitives = { workspace = true } jf-relation = { workspace = true } jf-utils = { workspace = true } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml new file mode 100644 index 0000000000..47e7fdc6a3 --- /dev/null +++ b/hotshot/Cargo.toml @@ -0,0 +1,122 @@ +[package] +authors = ["Espresso Systems "] +description = "HotShot consesus module" +edition = "2021" +name = "hotshot" +readme = "README.md" +version = "0.3.3" +rust-version = "1.65.0" + +[features] +default = ["demo", "docs", "doc-images"] + +# Enable demo/testing logic +demo = [ + "hotshot-types/demo", + "libp2p/rsa", + "dep:derivative", +] + +# Features required for binaries +bin-orchestrator = ["clap"] + +# Build the extended documentation +docs = [] +doc-images = [] +hotshot-testing = [] + +# [[example]] +# name = "libp2p-validator" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/libp2p/validator.rs" +# +# [[example]] +# name = "libp2p-orchestrator" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/libp2p/orchestrator.rs" +# +# [[example]] +# name = "web-server-orchestrator" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/web-server/orchestrator.rs" +# +# [[example]] +# name = "web-server-validator" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/web-server/validator.rs" + +[[example]] +name = "web-server" +required-features = ["demo", "libp2p/rsa"] +path = "examples/web-server-da/web-server.rs" + +[[example]] +name = "web-server-da-orchestrator" +required-features = ["demo", "libp2p/rsa"] +path = "examples/web-server-da/orchestrator.rs" + +[[example]] +name = "web-server-da-validator" +required-features = ["demo", "libp2p/rsa"] +path = "examples/web-server-da/validator.rs" + +[[example]] +name = "multi-validator" +required-features = ["demo", "libp2p/rsa"] +path = "examples/web-server-da/multi-validator.rs" + +[[example]] +name = "multi-web-server" +required-features = ["demo", "libp2p/rsa"] +path = "examples/web-server-da/multi-web-server.rs" + +[dependencies] +# TODO ED We should upgrade ark libraries to 0.4 +async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } +async-trait = { workspace = true } +bimap = "0.6.3" +bincode = { workspace = true } +bitvec = { workspace = true } +clap = { version = "4.4", features = ["derive", "env"], optional = true } +commit = { workspace = true } +custom_debug = { workspace = true } +dashmap = "5.5.1" +derivative = { version = "2.2.0", optional = true } +either = { workspace = true } +embed-doc-image = "0.1.4" +espresso-systems-common = { workspace = true } +ethereum-types = { workspace = true } +futures = { workspace = true } +hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } +hotshot-consensus = { path = "../consensus", version = "0.1.0", default-features = false } +hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } +hotshot-signature-key = { path = "../hotshot-signature-key" } +hotshot-types = { path = "../types", version = "0.1.0", default-features = false } +hotshot-utils = { path = "../utils" } +hotshot-task = { path = "../task", version = "0.1.0", default-features = false } +hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +libp2p = { workspace = true } +libp2p-identity = { workspace = true } +libp2p-networking = { workspace = true } +nll = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +serde = { workspace = true, features = ["rc"] } +snafu = { workspace = true } +surf-disco = { workspace = true } +time = { workspace = true } + +tracing = { workspace = true } +typenum = { workspace = true } +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } + +[dev-dependencies] +blake3 = { workspace = true } +clap = { version = "4.4", features = ["derive", "env"] } +serde_json = "1.0.105" +toml = { workspace = true } + diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs new file mode 100644 index 0000000000..efd3b8169d --- /dev/null +++ b/hotshot/examples/infra/mod.rs @@ -0,0 +1,98 @@ +use clap::Parser; +use hotshot_orchestrator::{ + self, + config::{NetworkConfig, NetworkConfigFile}, +}; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; +use libp2p::{ + identity::{ + ed25519::{Keypair as EdKeypair, SecretKey}, + Keypair, + }, + multiaddr::{self}, + Multiaddr, +}; +use std::{fmt::Debug, fs, net::IpAddr, str::FromStr}; + +// ORCHESTRATOR + +#[derive(Parser, Debug, Clone)] +#[command( + name = "Multi-machine consensus", + about = "Simulates consensus among multiple machines" +)] +/// Arguments passed to the orchestrator +pub struct OrchestratorArgs { + /// The address the orchestrator runs on + pub host: IpAddr, + /// The port the orchestrator runs on + pub port: u16, + /// The configuration file to be used for this run + pub config_file: String, +} + +/// Reads a network configuration from a given filepath +pub fn load_config_from_file( + config_file: String, +) -> NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, +> { + let config_file_as_string: String = fs::read_to_string(config_file.as_str()) + .unwrap_or_else(|_| panic!("Could not read config file located at {config_file}")); + let config_toml: NetworkConfigFile = + toml::from_str::(&config_file_as_string) + .expect("Unable to convert config file to TOML"); + + let mut config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + > = config_toml.into(); + + // Generate network's public keys + config.config.known_nodes = (0..config.config.total_nodes.get()) + .map(|node_id| { + TYPES::SignatureKey::generated_from_seed_indexed( + config.seed, + node_id.try_into().unwrap(), + ) + .0 + }) + .collect(); + + config.config.known_nodes_with_stake = (0..config.config.total_nodes.get()) + .map(|node_id| config.config.known_nodes[node_id].get_stake_table_entry(1u64)) + .collect(); + + config +} + +/// yeesh maybe we should just implement SignatureKey for this... +pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { + let mut hasher = blake3::Hasher::new(); + hasher.update(&seed); + hasher.update(&index.to_le_bytes()); + let new_seed = *hasher.finalize().as_bytes(); + let sk_bytes = SecretKey::try_from_bytes(new_seed).unwrap(); + >::from(sk_bytes).into() +} + +/// libp2p helper function +/// convert node string into multi addr +/// node string of the form: "$IP:$PORT" +pub fn parse_dns(s: &str) -> Result { + let mut i = s.split(':'); + let ip = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; + let port = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; + Multiaddr::from_str(&format!("/dns/{ip}/udp/{port}/quic-v1")) +} + +/// libp2p helper function +pub fn parse_ip(s: &str) -> Result { + let mut i = s.split(':'); + let ip = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; + let port = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; + Multiaddr::from_str(&format!("/ip4/{ip}/udp/{port}/quic-v1")) +} diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs new file mode 100644 index 0000000000..729cbfd79f --- /dev/null +++ b/hotshot/examples/infra/modDA.rs @@ -0,0 +1,722 @@ +use crate::infra::{load_config_from_file, OrchestratorArgs}; + +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use async_trait::async_trait; +use futures::StreamExt; +use hotshot::{ + traits::{ + implementations::{MemoryStorage, WebCommChannel, WebServerNetwork}, + NodeImplementation, + }, + types::{SignatureKey, SystemContextHandle}, + HotShotType, SystemContext, +}; +use hotshot_orchestrator::{ + self, + client::{OrchestratorClient, ValidatorArgs}, + config::{NetworkConfig, WebServerConfig}, +}; +use hotshot_task::task::FilterEvent; +use hotshot_types::{ + certificate::ViewSyncCertificate, + data::{DAProposal, QuorumProposal, SequencingLeaf, TestableLeaf}, + event::{Event, EventType}, + message::{Message, SequencingMessage}, + traits::{ + election::{ + CommitteeExchange, ConsensusExchange, Membership, QuorumExchange, ViewSyncExchange, + }, + metrics::NoMetrics, + network::CommunicationChannel, + node_implementation::{ + CommitteeEx, ExchangesType, NodeType, QuorumEx, SequencingExchanges, + }, + state::{ConsensusTime, TestableBlock, TestableState}, + }, + vote::{DAVote, QuorumVote, ViewSyncVote}, + HotShotConfig, +}; +// use libp2p::{ +// identity::{ +// ed25519::{Keypair as EdKeypair, SecretKey}, +// Keypair, +// }, +// multiaddr::{self, Protocol}, +// Multiaddr, +// }; +// use libp2p_identity::PeerId; +// use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}; +use std::{ + //collections::{BTreeSet, VecDeque}, + collections::VecDeque, + //fs, + mem, + net::IpAddr, + //num::NonZeroUsize, + //str::FromStr, + //sync::Arc, + //time::{Duration, Instant}, + time::Instant, +}; +use std::{fmt::Debug, net::Ipv4Addr}; +//use surf_disco::error::ClientError; +//use surf_disco::Client; +use tracing::{debug, error, info, warn}; + +/// Runs the orchestrator +pub async fn run_orchestrator_da< + TYPES: NodeType, + MEMBERSHIP: Membership + Debug, + DANETWORK: CommunicationChannel< + TYPES, + Message, + DAProposal, + DAVote, + MEMBERSHIP, + > + Debug, + QUORUMNETWORK: CommunicationChannel< + TYPES, + Message, + QuorumProposal>, + QuorumVote>, + MEMBERSHIP, + > + Debug, + VIEWSYNCNETWORK: CommunicationChannel< + TYPES, + Message, + ViewSyncCertificate, + ViewSyncVote, + MEMBERSHIP, + > + Debug, + NODE: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + Exchanges = SequencingExchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + SequencingLeaf, + QuorumProposal>, + MEMBERSHIP, + QUORUMNETWORK, + Message, + >, + CommitteeExchange>, + ViewSyncExchange< + TYPES, + ViewSyncCertificate, + MEMBERSHIP, + VIEWSYNCNETWORK, + Message, + >, + >, + Storage = MemoryStorage>, + ConsensusMessage = SequencingMessage, + >, +>( + OrchestratorArgs { + host, + port, + config_file, + }: OrchestratorArgs, +) { + error!("Starting orchestrator",); + let run_config = load_config_from_file::(config_file); + let _result = hotshot_orchestrator::run_orchestrator::< + TYPES::SignatureKey, + TYPES::ElectionConfigType, + >(run_config, host, port) + .await; +} + +/// Defines the behavior of a "run" of the network with a given configuration +#[async_trait] +pub trait RunDA< + TYPES: NodeType, + MEMBERSHIP: Membership + Debug, + DANETWORK: CommunicationChannel< + TYPES, + Message, + DAProposal, + DAVote, + MEMBERSHIP, + > + Debug, + QUORUMNETWORK: CommunicationChannel< + TYPES, + Message, + QuorumProposal>, + QuorumVote>, + MEMBERSHIP, + > + Debug, + VIEWSYNCNETWORK: CommunicationChannel< + TYPES, + Message, + ViewSyncCertificate, + ViewSyncVote, + MEMBERSHIP, + > + Debug, + NODE: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + Exchanges = SequencingExchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + SequencingLeaf, + QuorumProposal>, + MEMBERSHIP, + QUORUMNETWORK, + Message, + >, + CommitteeExchange>, + ViewSyncExchange< + TYPES, + ViewSyncCertificate, + MEMBERSHIP, + VIEWSYNCNETWORK, + Message, + >, + >, + Storage = MemoryStorage>, + ConsensusMessage = SequencingMessage, + >, +> where + ::StateType: TestableState, + ::BlockType: TestableBlock, + SequencingLeaf: TestableLeaf, + Self: Sync, + SystemContext: HotShotType, +{ + /// Initializes networking, returns self + async fn initialize_networking( + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + ) -> Self; + + /// Initializes the genesis state and HotShot instance; does not start HotShot consensus + /// # Panics if it cannot generate a genesis block, fails to initialize HotShot, or cannot + /// get the anchored view + /// Note: sequencing leaf does not have state, so does not return state + async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { + let genesis_block = TYPES::BlockType::genesis(); + let initializer = + hotshot::HotShotInitializer::>::from_genesis( + genesis_block, + ) + .expect("Couldn't generate genesis block"); + + let config = self.get_config(); + + // Get KeyPair for certificate Aggregation + let (pk, sk) = + TYPES::SignatureKey::generated_from_seed_indexed(config.seed, config.node_index); + let known_nodes = config.config.known_nodes.clone(); + let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); + let entry = pk.get_stake_table_entry(1u64); + + let da_network = self.get_da_network(); + let quorum_network = self.get_quorum_network(); + let view_sync_network = self.get_view_sync_network(); + + // Since we do not currently pass the election config type in the NetworkConfig, this will always be the default election config + let quorum_election_config = config.config.election_config.clone().unwrap_or_else(|| { + as ConsensusExchange< + TYPES, + Message, + >>::Membership::default_election_config(config.config.total_nodes.get() as u64) + }); + + let committee_election_config = as ConsensusExchange< + TYPES, + Message, + >>::Membership::default_election_config( + config.config.da_committee_size.try_into().unwrap(), + ); + + let exchanges = NODE::Exchanges::create( + known_nodes_with_stake.clone(), + known_nodes.clone(), + (quorum_election_config, committee_election_config), + ( + quorum_network.clone(), + da_network.clone(), + view_sync_network.clone(), + ), + pk.clone(), + entry.clone(), + sk.clone(), + ); + + SystemContext::init( + pk, + sk, + config.node_index, + config.config, + MemoryStorage::empty(), + exchanges, + initializer, + NoMetrics::boxed(), + ) + .await + .expect("Could not init hotshot") + .0 + } + + /// Starts HotShot consensus, returns when consensus has finished + async fn run_hotshot(&self, mut context: SystemContextHandle) { + let NetworkConfig { + padding, + rounds, + transactions_per_round, + node_index, + config: HotShotConfig { total_nodes, .. }, + .. + } = self.get_config(); + + let size = mem::size_of::(); + let adjusted_padding = if padding < size { 0 } else { padding - size }; + let mut txns: VecDeque = VecDeque::new(); + + // TODO ED: In the future we should have each node generate transactions every round to simulate a more realistic network + let tx_to_gen = transactions_per_round * rounds * 3; + { + let mut txn_rng = rand::thread_rng(); + for _ in 0..tx_to_gen { + let txn = + <::StateType as TestableState>::create_random_transaction( + None, + &mut txn_rng, + padding as u64, + ); + txns.push_back(txn); + } + } + debug!("Generated {} transactions", tx_to_gen); + + debug!("Adjusted padding size is {:?} bytes", adjusted_padding); + let mut round = 0; + let mut total_transactions = 0; + + let start = Instant::now(); + + info!("Starting hotshot!"); + let (mut event_stream, _streamid) = context.get_event_stream(FilterEvent::default()).await; + let mut anchor_view: TYPES::Time = ::genesis(); + let mut num_successful_commits = 0; + + let total_nodes_u64 = total_nodes.get() as u64; + + context.hotshot.start_consensus().await; + + loop { + match event_stream.next().await { + None => { + panic!("Error! Event stream completed before consensus ended."); + } + Some(Event { event, .. }) => { + match event { + EventType::Error { error } => { + error!("Error in consensus: {:?}", error); + // TODO what to do here + } + EventType::Decide { + leaf_chain, + qc: _, + block_size, + } => { + // this might be a obob + if let Some(leaf) = leaf_chain.get(0) { + info!("Decide event for leaf: {}", *leaf.view_number); + + let new_anchor = leaf.view_number; + if new_anchor >= anchor_view { + anchor_view = leaf.view_number; + } + } + + if let Some(size) = block_size { + total_transactions += size; + } + + num_successful_commits += leaf_chain.len(); + if num_successful_commits >= rounds { + break; + } + + if leaf_chain.len() > 1 { + warn!("Leaf chain is greater than 1 with len {}", leaf_chain.len()); + } + // when we make progress, submit new events + } + EventType::ReplicaViewTimeout { view_number } => { + warn!("Timed out as a replicas in view {:?}", view_number); + } + EventType::NextLeaderViewTimeout { view_number } => { + warn!("Timed out as the next leader in view {:?}", view_number); + } + EventType::ViewFinished { view_number } => { + if *view_number > round { + round = *view_number; + info!("view finished: {:?}", view_number); + for _ in 0..transactions_per_round { + if node_index >= total_nodes_u64 - 10 { + let txn = txns.pop_front().unwrap(); + + debug!("Submitting txn on round {}", round); + + let result = context.submit_transaction(txn).await; + + if result.is_err() { + error! ( + "Could not send transaction to web server on round {}", + round + ) + } + } + } + } + } + _ => unimplemented!(), + } + } + } + + round += 1; + } + + // Output run results + let total_time_elapsed = start.elapsed(); + error!("{rounds} rounds completed in {total_time_elapsed:?} - Total transactions committed: {total_transactions} - Total commitments: {num_successful_commits}"); + } + + /// Returns the da network for this run + fn get_da_network(&self) -> DANETWORK; + + /// Returns the quorum network for this run + fn get_quorum_network(&self) -> QUORUMNETWORK; + + ///Returns view sync network for this run + fn get_view_sync_network(&self) -> VIEWSYNCNETWORK; + + /// Returns the config for this run + fn get_config( + &self, + ) -> NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >; +} + +// WEB SERVER + +/// Alias for the [`WebCommChannel`] for sequencing consensus. +type StaticDAComm = + WebCommChannel, DAVote, MEMBERSHIP>; + +/// Alias for the ['WebCommChannel'] for validating consensus +type StaticQuorumComm = WebCommChannel< + TYPES, + I, + QuorumProposal>, + QuorumVote>, + MEMBERSHIP, +>; + +/// Alias for the ['WebCommChannel'] for view sync consensus +type StaticViewSyncComm = + WebCommChannel, ViewSyncVote, MEMBERSHIP>; + +/// Represents a web server-based run +pub struct WebServerDARun< + TYPES: NodeType, + I: NodeImplementation, + MEMBERSHIP: Membership, +> { + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + quorum_network: StaticQuorumComm, + da_network: StaticDAComm, + view_sync_network: StaticViewSyncComm, +} + +#[async_trait] +impl< + TYPES: NodeType, + MEMBERSHIP: Membership + Debug, + NODE: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + Exchanges = SequencingExchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + SequencingLeaf, + QuorumProposal>, + MEMBERSHIP, + WebCommChannel< + TYPES, + NODE, + QuorumProposal>, + QuorumVote>, + MEMBERSHIP, + >, + Message, + >, + CommitteeExchange< + TYPES, + MEMBERSHIP, + WebCommChannel, DAVote, MEMBERSHIP>, + Message, + >, + ViewSyncExchange< + TYPES, + ViewSyncCertificate, + MEMBERSHIP, + WebCommChannel< + TYPES, + NODE, + ViewSyncCertificate, + ViewSyncVote, + MEMBERSHIP, + >, + Message, + >, + >, + Storage = MemoryStorage>, + ConsensusMessage = SequencingMessage, + >, + > + RunDA< + TYPES, + MEMBERSHIP, + StaticDAComm, + StaticQuorumComm, + StaticViewSyncComm, + NODE, + > for WebServerDARun +where + ::StateType: TestableState, + ::BlockType: TestableBlock, + SequencingLeaf: TestableLeaf, + Self: Sync, +{ + async fn initialize_networking( + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + ) -> WebServerDARun { + // Generate our own key + let (pub_key, _priv_key) = + <::SignatureKey as SignatureKey>::generated_from_seed_indexed( + config.seed, + config.node_index, + ); + + // Get the configuration for the web server + let WebServerConfig { + host, + port, + wait_between_polls, + }: WebServerConfig = config.clone().web_server_config.unwrap(); + + let underlying_quorum_network = WebServerNetwork::create( + &host.to_string(), + port, + wait_between_polls, + pub_key.clone(), + false, + ); + + // Create the network + let quorum_network: WebCommChannel< + TYPES, + NODE, + QuorumProposal>, + QuorumVote>, + MEMBERSHIP, + > = WebCommChannel::new(underlying_quorum_network.clone().into()); + + let view_sync_network: WebCommChannel< + TYPES, + NODE, + ViewSyncCertificate, + ViewSyncVote, + MEMBERSHIP, + > = WebCommChannel::new(underlying_quorum_network.into()); + + let WebServerConfig { + host, + port, + wait_between_polls, + }: WebServerConfig = config.clone().da_web_server_config.unwrap(); + + // Each node runs the DA network so that leaders have access to transactions and DA votes + let da_network: WebCommChannel, DAVote, MEMBERSHIP> = + WebCommChannel::new( + WebServerNetwork::create( + &host.to_string(), + port, + wait_between_polls, + pub_key, + true, + ) + .into(), + ); + + WebServerDARun { + config, + quorum_network, + da_network, + view_sync_network, + } + } + + fn get_da_network( + &self, + ) -> WebCommChannel, DAVote, MEMBERSHIP> { + self.da_network.clone() + } + + fn get_quorum_network( + &self, + ) -> WebCommChannel< + TYPES, + NODE, + QuorumProposal>, + QuorumVote>, + MEMBERSHIP, + > { + self.quorum_network.clone() + } + + fn get_view_sync_network( + &self, + ) -> WebCommChannel, ViewSyncVote, MEMBERSHIP> + { + self.view_sync_network.clone() + } + + fn get_config( + &self, + ) -> NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + > { + self.config.clone() + } +} + +/// Main entry point for validators +pub async fn main_entry_point< + TYPES: NodeType, + MEMBERSHIP: Membership + Debug, + DANETWORK: CommunicationChannel< + TYPES, + Message, + DAProposal, + DAVote, + MEMBERSHIP, + > + Debug, + QUORUMNETWORK: CommunicationChannel< + TYPES, + Message, + QuorumProposal>, + QuorumVote>, + MEMBERSHIP, + > + Debug, + VIEWSYNCNETWORK: CommunicationChannel< + TYPES, + Message, + ViewSyncCertificate, + ViewSyncVote, + MEMBERSHIP, + > + Debug, + NODE: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + Exchanges = SequencingExchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + SequencingLeaf, + QuorumProposal>, + MEMBERSHIP, + QUORUMNETWORK, + Message, + >, + CommitteeExchange>, + ViewSyncExchange< + TYPES, + ViewSyncCertificate, + MEMBERSHIP, + VIEWSYNCNETWORK, + Message, + >, + >, + Storage = MemoryStorage>, + ConsensusMessage = SequencingMessage, + >, + RUNDA: RunDA, +>( + args: ValidatorArgs, +) where + ::StateType: TestableState, + ::BlockType: TestableBlock, + SequencingLeaf: TestableLeaf, +{ + setup_logging(); + setup_backtrace(); + + info!("Starting validator"); + + let orchestrator_client: OrchestratorClient = + OrchestratorClient::connect_to_orchestrator(args.clone()).await; + + // Identify with the orchestrator + let public_ip = match args.public_ip { + Some(ip) => ip, + None => IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + }; + info!( + "Identifying with orchestrator using IP address {}", + public_ip.to_string() + ); + let node_index: u16 = orchestrator_client + .identify_with_orchestrator(public_ip.to_string()) + .await; + info!("Finished identifying; our node index is {node_index}"); + info!("Getting config from orchestrator"); + + let mut run_config = orchestrator_client + .get_config_from_orchestrator::(node_index) + .await; + + run_config.node_index = node_index.into(); + //run_config.libp2p_config.as_mut().unwrap().public_ip = args.public_ip.unwrap(); + + info!("Initializing networking"); + let run = RUNDA::initialize_networking(run_config.clone()).await; + let hotshot = run.initialize_state_and_hotshot().await; + + info!("Waiting for start command from orchestrator"); + orchestrator_client + .wait_for_all_nodes_ready(run_config.clone().node_index) + .await; + + info!("All nodes are ready! Starting HotShot"); + run.run_hotshot(hotshot).await; +} diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs new file mode 100644 index 0000000000..3bb08103d0 --- /dev/null +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -0,0 +1,24 @@ +pub mod types; + +use clap::Parser; +use hotshot::demos::vdemo::VDemoTypes; +use tracing::instrument; +use types::ThisMembership; + +use crate::infra::{run_orchestrator, OrchestratorArgs}; +use crate::types::{NodeImpl, ThisNetwork}; + +#[path = "../infra/mod.rs"] +pub mod infra; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + let args = OrchestratorArgs::parse(); + + run_orchestrator::(args).await; +} diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs new file mode 100644 index 0000000000..8b3c70e5e1 --- /dev/null +++ b/hotshot/examples/libp2p/types.rs @@ -0,0 +1,60 @@ +use crate::infra::Libp2pRun; +use hotshot::traits::implementations::MemoryStorage; +use hotshot::{ + demos::vdemo::VDemoTypes, + traits::{ + election::static_committee::GeneralStaticCommittee, implementations::Libp2pCommChannel, + }, +}; +use hotshot_types::message::{Message, ValidatingMessage}; +use hotshot_types::traits::{ + election::QuorumExchange, + node_implementation::{ChannelMaps, NodeImplementation, ValidatingExchanges}, +}; +use hotshot_types::{ + data::{ValidatingLeaf, ValidatingProposal}, + traits::node_implementation::NodeType, + vote::QuorumVote, +}; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; + +#[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] +pub struct NodeImpl {} + +pub type ThisLeaf = ValidatingLeaf; +pub type ThisMembership = + GeneralStaticCommittee::SignatureKey>; +pub type ThisNetwork = + Libp2pCommChannel; + +pub type ThisProposal = ValidatingProposal; +pub type ThisVote = QuorumVote; + +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; + type Leaf = ValidatingLeaf; + type Exchanges = ValidatingExchanges< + VDemoTypes, + Message, + QuorumExchange< + VDemoTypes, + Self::Leaf, + ThisProposal, + ThisMembership, + ThisNetwork, + Message, + >, + >; + type ConsensusMessage = ValidatingMessage; + + fn new_channel_maps( + start_view: ::Time, + ) -> ( + ChannelMaps, + Option>, + ) { + (ChannelMaps::new(start_view), None) + } +} +pub type ThisRun = Libp2pRun; diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs new file mode 100644 index 0000000000..59009c9d8f --- /dev/null +++ b/hotshot/examples/libp2p/validator.rs @@ -0,0 +1,23 @@ +use crate::infra::main_entry_point; +use clap::Parser; +use hotshot::demos::vdemo::VDemoTypes; +use hotshot_orchestrator::client::ValidatorArgs; +use tracing::instrument; + +use crate::types::{NodeImpl, ThisMembership, ThisNetwork, ThisRun}; + +pub mod types; + +#[path = "../infra/mod.rs"] +pub mod infra; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + let args = ValidatorArgs::parse(); + main_entry_point::(args).await; +} diff --git a/hotshot/examples/web-server-da/README.md b/hotshot/examples/web-server-da/README.md new file mode 100644 index 0000000000..df22dc3357 --- /dev/null +++ b/hotshot/examples/web-server-da/README.md @@ -0,0 +1,29 @@ +Commands to run da examples: +1a)Start web servers by either running 3 servers: +cargo run --example web-server --profile=release-lto --features="full-ci" +cargo run --example web-server --profile=release-lto --features="full-ci" +cargo run --example web-server --profile=release-lto --features="full-ci" + +1b)Or use multi-web-server to spin up all three: +cargo run --example multi-web-server --profile=release-lto --features="full-ci" + +2) Start orchestrator: +cargo run --example web-server-da-orchestrator --features="full-ci,channel-async-std" --profile=release-lto -- + +3a) Start validator: +cargo run --profile=release-lto --example web-server-da-validator --features="full-ci" + +3b) Or start multiple validators: +cargo run --profile=release-lto --example multi-validator --features="full-ci" + +I.e. +cargo run --example web-server --profile=release-lto --features="full-ci" 9000 +cargo run --example web-server --profile=release-lto --features="full-ci" 9001 +cargo run --example web-server --profile=release-lto --features="full-ci" 9002 +cargo run --example web-server-da-orchestrator --features="full-ci,channel-async-std" --profile=release-lto -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml +cargo run --profile=release-lto --example web-server-da-validator --features="full-ci" 2 0.0.0.0 4444 + +OR: +cargo run --example multi-web-server --profile=release-lto --features="full-ci" 9000 9001 9002 +cargo run --example web-server-da-orchestrator --features="full-ci,channel-async-std" --profile=release-lto -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml +cargo run --profile=release-lto --example multi-validator --features="full-ci" 10 0.0.0.0 4444 \ No newline at end of file diff --git a/hotshot/examples/web-server-da/multi-validator.rs b/hotshot/examples/web-server-da/multi-validator.rs new file mode 100644 index 0000000000..8a0f53c1c9 --- /dev/null +++ b/hotshot/examples/web-server-da/multi-validator.rs @@ -0,0 +1,69 @@ +use async_compatibility_layer::{ + art::async_spawn, + logging::{setup_backtrace, setup_logging}, +}; +use clap::Parser; +use hotshot::demos::sdemo::SDemoTypes; +use hotshot_orchestrator::client::ValidatorArgs; +use std::net::IpAddr; +use tracing::instrument; + +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; + +pub mod types; + +#[path = "../infra/mod.rs"] +pub mod infra; +#[path = "../infra/modDA.rs"] +pub mod infra_da; + +#[derive(Parser, Debug, Clone)] +struct MultiValidatorArgs { + /// Number of validators to run + pub num_nodes: u16, + /// The address the orchestrator runs on + pub host: IpAddr, + /// The port the orchestrator runs on + pub port: u16, + /// This node's public IP address, for libp2p + /// If no IP address is passed in, it will default to 127.0.0.1 + pub public_ip: Option, +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + let args = MultiValidatorArgs::parse(); + tracing::error!( + "connecting to orchestrator at {:?}:{:?}", + args.host, + args.port + ); + let mut nodes = Vec::new(); + for _ in 0..args.num_nodes { + let node = async_spawn(async move { + infra_da::main_entry_point::< + SDemoTypes, + ThisMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + NodeImpl, + ThisRun, + >(ValidatorArgs { + host: args.host.to_string(), + port: args.port, + public_ip: args.public_ip, + }) + .await + }); + nodes.push(node); + } + let _result = futures::future::join_all(nodes).await; +} diff --git a/hotshot/examples/web-server-da/multi-web-server.rs b/hotshot/examples/web-server-da/multi-web-server.rs new file mode 100644 index 0000000000..c41c9c0b03 --- /dev/null +++ b/hotshot/examples/web-server-da/multi-web-server.rs @@ -0,0 +1,59 @@ +use std::sync::Arc; + +use async_compatibility_layer::{art::async_spawn, channel::oneshot}; +use clap::Parser; +use hotshot::demos::sdemo::SDemoTypes; +use tracing::error; + +#[derive(Parser, Debug)] +struct MultiWebServerArgs { + cdn_port: u16, + da_port: u16, + view_sync_port: u16, +} +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +async fn main() { + let args = MultiWebServerArgs::parse(); + let (server_shutdown_sender_cdn, server_shutdown_cdn) = oneshot(); + let (server_shutdown_sender_da, server_shutdown_da) = oneshot(); + let (server_shutdown_sender_view_sync, server_shutdown_view_sync) = oneshot(); + let _sender = Arc::new(server_shutdown_sender_cdn); + let _sender = Arc::new(server_shutdown_sender_da); + let _sender = Arc::new(server_shutdown_sender_view_sync); + + let cdn_server = async_spawn(async move { + if let Err(e) = hotshot_web_server::run_web_server::< + ::SignatureKey, + >(Some(server_shutdown_cdn), args.cdn_port) + .await + { + error!("Problem starting cdn web server: {:?}", e); + } + error!("cdn"); + }); + let da_server = async_spawn(async move { + if let Err(e) = hotshot_web_server::run_web_server::< + ::SignatureKey, + >(Some(server_shutdown_da), args.da_port) + .await + { + error!("Problem starting da web server: {:?}", e); + } + error!("da"); + }); + let vs_server = async_spawn(async move { + if let Err(e) = hotshot_web_server::run_web_server::< + ::SignatureKey, + >(Some(server_shutdown_view_sync), args.view_sync_port) + .await + { + error!("Problem starting view sync web server: {:?}", e); + } + error!("vs"); + }); + let _result = futures::future::join_all(vec![cdn_server, da_server, vs_server]).await; +} diff --git a/hotshot/examples/web-server-da/orchestrator.rs b/hotshot/examples/web-server-da/orchestrator.rs new file mode 100644 index 0000000000..772d3aa12e --- /dev/null +++ b/hotshot/examples/web-server-da/orchestrator.rs @@ -0,0 +1,40 @@ +pub mod types; + +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use clap::Parser; +use hotshot::demos::sdemo::SDemoTypes; +use tracing::instrument; +use types::ThisMembership; + +use crate::{ + infra::OrchestratorArgs, + infra_da::run_orchestrator_da, + types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}, +}; + +#[path = "../infra/mod.rs"] +pub mod infra; +#[path = "../infra/modDA.rs"] +pub mod infra_da; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + let args = OrchestratorArgs::parse(); + + run_orchestrator_da::< + SDemoTypes, + ThisMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + NodeImpl, + >(args) + .await; +} diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs new file mode 100644 index 0000000000..9ea1f9a694 --- /dev/null +++ b/hotshot/examples/web-server-da/types.rs @@ -0,0 +1,78 @@ +use crate::infra_da::WebServerDARun; +use hotshot::{ + demos::sdemo::SDemoTypes, + traits::{ + election::static_committee::GeneralStaticCommittee, + implementations::{MemoryStorage, WebCommChannel}, + }, +}; +use hotshot_types::{ + certificate::ViewSyncCertificate, + data::{DAProposal, QuorumProposal, SequencingLeaf}, + message::{Message, SequencingMessage}, + traits::{ + election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}, + node_implementation::{ChannelMaps, NodeImplementation, NodeType, SequencingExchanges}, + }, + vote::{DAVote, QuorumVote, ViewSyncVote}, +}; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; + +#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] +pub struct NodeImpl {} + +pub type ThisLeaf = SequencingLeaf; +pub type ThisMembership = + GeneralStaticCommittee::SignatureKey>; +pub type DANetwork = + WebCommChannel; +pub type QuorumNetwork = + WebCommChannel; +pub type ViewSyncNetwork = + WebCommChannel; + +pub type ThisDAProposal = DAProposal; +pub type ThisDAVote = DAVote; + +pub type ThisQuorumProposal = QuorumProposal; +pub type ThisQuorumVote = QuorumVote; + +pub type ThisViewSyncProposal = ViewSyncCertificate; +pub type ThisViewSyncVote = ViewSyncVote; + +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; + type Leaf = SequencingLeaf; + type Exchanges = SequencingExchanges< + SDemoTypes, + Message, + QuorumExchange< + SDemoTypes, + Self::Leaf, + ThisQuorumProposal, + ThisMembership, + QuorumNetwork, + Message, + >, + CommitteeExchange>, + ViewSyncExchange< + SDemoTypes, + ThisViewSyncProposal, + ThisMembership, + ViewSyncNetwork, + Message, + >, + >; + type ConsensusMessage = SequencingMessage; + + fn new_channel_maps( + start_view: ::Time, + ) -> ( + ChannelMaps, + Option>, + ) { + (ChannelMaps::new(start_view), None) + } +} +pub type ThisRun = WebServerDARun; diff --git a/hotshot/examples/web-server-da/validator.rs b/hotshot/examples/web-server-da/validator.rs new file mode 100644 index 0000000000..ec2415fd65 --- /dev/null +++ b/hotshot/examples/web-server-da/validator.rs @@ -0,0 +1,41 @@ +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use clap::Parser; +use hotshot::demos::sdemo::SDemoTypes; +use tracing::{info, instrument}; + +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; + +use hotshot_orchestrator::client::ValidatorArgs; + +pub mod types; + +#[path = "../infra/mod.rs"] +pub mod infra; +#[path = "../infra/modDA.rs"] +pub mod infra_da; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + let args = ValidatorArgs::parse(); + info!( + "connecting to orchestrator at {:?}:{:?}", + args.host, args.port + ); + infra_da::main_entry_point::< + SDemoTypes, + ThisMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + NodeImpl, + ThisRun, + >(args) + .await; +} diff --git a/hotshot/examples/web-server-da/web-server.rs b/hotshot/examples/web-server-da/web-server.rs new file mode 100644 index 0000000000..99d0b12f63 --- /dev/null +++ b/hotshot/examples/web-server-da/web-server.rs @@ -0,0 +1,29 @@ +use hotshot::demos::sdemo::SDemoTypes; +use std::sync::Arc; + +use async_compatibility_layer::{ + channel::oneshot, + logging::{setup_backtrace, setup_logging}, +}; +use clap::Parser; + +#[derive(Parser, Debug)] +struct WebServerArgs { + port: u16, +} +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +async fn main() { + setup_backtrace(); + setup_logging(); + let args = WebServerArgs::parse(); + let (server_shutdown_sender, server_shutdown) = oneshot(); + let _sender = Arc::new(server_shutdown_sender); + let _result = hotshot_web_server::run_web_server::< + ::SignatureKey, + >(Some(server_shutdown), args.port) + .await; +} diff --git a/hotshot/src/certificate.rs b/hotshot/src/certificate.rs new file mode 100644 index 0000000000..c1b18b078b --- /dev/null +++ b/hotshot/src/certificate.rs @@ -0,0 +1 @@ +pub use hotshot_types::certificate::QuorumCertificate; diff --git a/hotshot/src/demos.rs b/hotshot/src/demos.rs new file mode 100644 index 0000000000..7ddbef89c6 --- /dev/null +++ b/hotshot/src/demos.rs @@ -0,0 +1,8 @@ +//! Contains implementations of the `HotShot` traits used in the examples and integration testing. +//! +//! These implementations are not suitable for production use. + +/// this is a demo for sequencing consensus +pub mod sdemo; +/// this is a demo for validating consensus +pub mod vdemo; diff --git a/hotshot/src/demos/sdemo.rs b/hotshot/src/demos/sdemo.rs new file mode 100644 index 0000000000..97117347a9 --- /dev/null +++ b/hotshot/src/demos/sdemo.rs @@ -0,0 +1,384 @@ +//! Sequencing consensus demo +//! +//! This module provides an implementation of the `HotShot` suite of traits that implements a +//! basic demonstration of sequencing consensus. +//! +//! These implementations are useful in examples and integration testing, but are not suitable for +//! production use. +use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; +use std::{ + collections::HashSet, + fmt::{Debug, Display}, + marker::PhantomData, + ops::Deref, +}; + +use commit::{Commitment, Committable}; +use derivative::Derivative; +use either::Either; +use hotshot_signature_key::bn254::BN254Pub; +use hotshot_types::{ + certificate::{AssembledSignature, QuorumCertificate}, + constants::genesis_proposer_id, + data::{fake_commitment, random_commitment, LeafType, SequencingLeaf, ViewNumber}, + traits::{ + block_contents::Transaction, + election::Membership, + node_implementation::NodeType, + state::{ConsensusTime, TestableBlock, TestableState}, + Block, State, + }, +}; +use rand::Rng; +use serde::{Deserialize, Serialize}; +use snafu::Snafu; + +/// The transaction for the sequencing demo +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct SDemoTransaction { + /// identifier for the transaction + pub id: u64, + /// padding to add to txn (to make it larger and thereby more realistic) + pub padding: Vec, +} + +impl Deref for SDemoTransaction { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.id + } +} + +impl Committable for SDemoTransaction { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("SDemo Txn Comm") + .u64_field("id", self.id) + .finalize() + } + + fn tag() -> String { + "SEQUENCING_DEMO_TXN".to_string() + } +} + +impl Transaction for SDemoTransaction {} + +impl SDemoTransaction { + /// create a new transaction + #[must_use] + pub fn new(id: u64) -> Self { + Self { + id, + padding: vec![], + } + } +} + +/// genesis block +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct SDemoGenesisBlock {} + +/// Any block after genesis +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct SDemoNormalBlock { + /// Block state commitment + pub previous_state: (), + /// Transaction vector + pub transactions: Vec, +} + +/// The block for the sequencing demo +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub enum SDemoBlock { + /// genesis block + Genesis(SDemoGenesisBlock), + /// normal block + Normal(SDemoNormalBlock), +} + +impl Committable for SDemoBlock { + fn commit(&self) -> Commitment { + match &self { + SDemoBlock::Genesis(_) => { + commit::RawCommitmentBuilder::new("SDemo Genesis Comm").finalize() + } + SDemoBlock::Normal(block) => { + let mut builder = commit::RawCommitmentBuilder::new("SDemo Normal Comm"); + for txn in &block.transactions { + builder = builder.u64_field("transaction", **txn); + } + builder.finalize() + } + } + } + + fn tag() -> String { + "SEQUENCING_DEMO_BLOCK".to_string() + } +} + +/// sequencing demo entry state +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct SDemoState { + /// the block height + block_height: u64, + /// the view number + view_number: ViewNumber, + /// the previous state commitment + prev_state_commitment: Commitment, +} + +impl Committable for SDemoState { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("SDemo State Commit") + .u64_field("block_height", self.block_height) + .u64_field("view_number", *self.view_number) + .field("prev_state_commitment", self.prev_state_commitment) + .finalize() + } + + fn tag() -> String { + "SEQUENCING_DEMO_STATE".to_string() + } +} + +impl Default for SDemoState { + fn default() -> Self { + Self { + block_height: 0, + view_number: ViewNumber::genesis(), + prev_state_commitment: fake_commitment(), + } + } +} + +/// The error type for the sequencing demo +#[derive(Snafu, Debug)] +pub enum SDemoError { + /// Previous state commitment does not match + PreviousStateMismatch, + /// Nonce was reused + ReusedTxn, + /// Genesis failure + GenesisFailed, + /// Genesis reencountered after initialization + GenesisAfterStart, + /// no transasctions added to genesis + GenesisCantHaveTransactions, + /// invalid block + InvalidBlock, +} + +impl Display for SDemoBlock { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + SDemoBlock::Genesis(_) => { + write!(f, "SDemo Genesis Block") + } + SDemoBlock::Normal(block) => { + write!(f, "SDemo Normal Block #txns={}", block.transactions.len()) + } + } + } +} + +impl TestableBlock for SDemoBlock { + fn genesis() -> Self { + SDemoBlock::Genesis(SDemoGenesisBlock {}) + } + + fn txn_count(&self) -> u64 { + match self { + SDemoBlock::Genesis(_) => 0, + SDemoBlock::Normal(n) => n.transactions.len() as u64, + } + } +} + +impl Block for SDemoBlock { + type Error = SDemoError; + + type Transaction = SDemoTransaction; + + fn new() -> Self { + ::genesis() + } + + fn add_transaction_raw( + &self, + tx: &Self::Transaction, + ) -> std::result::Result { + match self { + SDemoBlock::Genesis(_) => Err(SDemoError::GenesisCantHaveTransactions), + SDemoBlock::Normal(n) => { + let mut new = n.clone(); + new.transactions.push(tx.clone()); + Ok(SDemoBlock::Normal(new)) + } + } + } + + fn contained_transactions(&self) -> HashSet> { + match self { + SDemoBlock::Genesis(_) => HashSet::new(), + SDemoBlock::Normal(n) => n + .transactions + .iter() + .map(commit::Committable::commit) + .collect(), + } + } +} + +impl State for SDemoState { + type Error = SDemoError; + + type BlockType = SDemoBlock; + + type Time = ViewNumber; + + fn next_block(_state: Option) -> Self::BlockType { + SDemoBlock::Normal(SDemoNormalBlock { + previous_state: (), + transactions: Vec::new(), + }) + } + + fn validate_block(&self, block: &Self::BlockType, view_number: &Self::Time) -> bool { + match block { + SDemoBlock::Genesis(_) => { + view_number == &ViewNumber::genesis() && view_number == &self.view_number + } + SDemoBlock::Normal(_n) => self.view_number < *view_number, + } + } + + fn append( + &self, + block: &Self::BlockType, + view_number: &Self::Time, + ) -> Result { + if !self.validate_block(block, view_number) { + return Err(SDemoError::InvalidBlock); + } + + Ok(SDemoState { + block_height: self.block_height + 1, + view_number: *view_number, + prev_state_commitment: self.commit(), + }) + } + + fn on_commit(&self) {} +} + +impl TestableState for SDemoState { + fn create_random_transaction( + _state: Option<&Self>, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction { + SDemoTransaction { + id: rng.gen_range(0..10), + padding: vec![0; padding as usize], + } + } +} +/// Implementation of [`NodeType`] for [`VDemoNode`] +#[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, +)] +pub struct SDemoTypes; + +impl NodeType for SDemoTypes { + type Time = ViewNumber; + type BlockType = SDemoBlock; + type SignatureKey = BN254Pub; + type VoteTokenType = StaticVoteToken; + type Transaction = SDemoTransaction; + type ElectionConfigType = StaticElectionConfig; + type StateType = SDemoState; +} + +/// The node implementation for the sequencing demo +#[derive(Derivative)] +#[derivative(Clone(bound = ""))] +pub struct SDemoNode(PhantomData) +where + MEMBERSHIP: Membership + std::fmt::Debug; + +impl SDemoNode +where + MEMBERSHIP: Membership + std::fmt::Debug, +{ + /// Create a new `SDemoNode` + #[must_use] + pub fn new() -> Self { + SDemoNode(PhantomData) + } +} + +impl Debug for SDemoNode +where + MEMBERSHIP: Membership + std::fmt::Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SDemoNode") + .field("_phantom", &"phantom") + .finish() + } +} + +impl Default for SDemoNode +where + MEMBERSHIP: Membership + std::fmt::Debug, +{ + fn default() -> Self { + Self::new() + } +} + +/// Provides a random [`QuorumCertificate`] +pub fn random_quorum_certificate>( + rng: &mut dyn rand::RngCore, +) -> QuorumCertificate { + QuorumCertificate { + // block_commitment: random_commitment(rng), + leaf_commitment: random_commitment(rng), + view_number: TYPES::Time::new(rng.gen()), + signatures: AssembledSignature::Genesis(), + is_genesis: rng.gen(), + } +} + +/// Provides a random [`SequencingLeaf`] +pub fn random_sequencing_leaf( + deltas: Either>, + rng: &mut dyn rand::RngCore, +) -> SequencingLeaf { + let justify_qc = random_quorum_certificate(rng); + // let state = TYPES::StateType::default() + // .append(&deltas, &TYPES::Time::new(42)) + // .unwrap_or_default(); + SequencingLeaf { + view_number: justify_qc.view_number, + height: rng.next_u64(), + justify_qc, + parent_commitment: random_commitment(rng), + deltas, + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: genesis_proposer_id(), + } +} diff --git a/hotshot/src/demos/vdemo.rs b/hotshot/src/demos/vdemo.rs new file mode 100644 index 0000000000..1d091c60b1 --- /dev/null +++ b/hotshot/src/demos/vdemo.rs @@ -0,0 +1,607 @@ +// //! Validating (vanilla) consensus demo +// //! +// //! This module provides an implementation of the `HotShot` suite of traits that implements a +// //! basic demonstration of validating consensus. +// //! +// //! These implementations are useful in examples and integration testing, but are not suitable for +// //! production use. +// +// use crate::traits::{ +// election::static_committee::{StaticElectionConfig, StaticVoteToken}, +// Block, +// }; +// use commit::{Commitment, Committable}; +// use derivative::Derivative; +// +// use hotshot_types::{ +// certificate::{QuorumCertificate, YesNoSignature}, +// constants::genesis_proposer_id, +// data::{random_commitment, LeafType, ValidatingLeaf, ViewNumber}, +// traits::{ +// block_contents::Transaction, +// consensus_type::validating_consensus::ValidatingConsensus, +// election::Membership, +// node_implementation::NodeType, +// signature_key::ed25519::Ed25519Pub, +// state::{ConsensusTime, TestableBlock, TestableState}, +// State, +// }, +// }; +// +// use rand::Rng; +// use serde::{Deserialize, Serialize}; +// use snafu::{ensure, Snafu}; +// use std::{ +// collections::{BTreeMap, HashSet}, +// fmt::{Debug, Display}, +// marker::PhantomData, +// }; +// use tracing::error; +// +// /// The account identifier type used by the demo +// /// +// /// This is a type alias to [`String`] for simplicity. +// pub type Account = String; +// +// /// The account balance type used by the demo +// /// +// /// This is a type alias to [`u64`] for simplicity. +// pub type Balance = u64; +// +// /// Records a reduction in an account balance +// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +// pub struct Subtraction { +// /// An account identifier +// pub account: Account, +// /// An account balance +// pub amount: Balance, +// } +// +// /// Records an increase in an account balance +// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +// pub struct Addition { +// /// An account identifier +// pub account: Account, +// /// An account balance +// pub amount: Balance, +// } +// +// /// The error type for the validating demo +// #[derive(Snafu, Debug)] +// pub enum VDemoError { +// /// The subtraction and addition amounts for this transaction were not equal +// InconsistentTransaction, +// /// No such input account exists +// NoSuchInputAccount, +// /// No such output account exists +// NoSuchOutputAccount, +// /// Tried to move more money than was in the account +// InsufficentBalance, +// /// Previous state commitment does not match +// PreviousStateMismatch, +// /// Nonce was reused +// ReusedNonce, +// /// Genesis failure +// GenesisFailed, +// /// Genesis reencountered after initialization +// GenesisAfterStart, +// } +// +// /// The transaction for the validating demo +// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +// pub struct VDemoTransaction { +// /// An increment to an account balance +// pub add: Addition, +// /// A decrement to an account balance +// pub sub: Subtraction, +// /// The nonce for a transaction, no two transactions can have the same nonce +// pub nonce: u64, +// /// Number of bytes to pad to each transaction +// pub padding: Vec, +// } +// +// impl Transaction for VDemoTransaction {} +// +// impl VDemoTransaction { +// /// Ensures that this transaction is at least consistent with itself +// #[must_use] +// pub fn validate_independence(&self) -> bool { +// // Ensure that we are adding to one account exactly as much as we are subtracting from +// // another +// self.add.amount <= self.sub.amount // TODO why not strict equality? +// } +// } +// +// /// The state for the validating demo +// /// NOTE both fields are btrees because we need +// /// ordered-ing otherwise commitments will not match +// #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, Hash)] +// pub struct VDemoState { +// /// Key/value store of accounts and balances +// pub balances: BTreeMap, +// // /// Set of previously seen nonces +// // pub nonces: BTreeSet, +// } +// +// impl Committable for VDemoState { +// fn commit(&self) -> Commitment { +// let mut builder = commit::RawCommitmentBuilder::new("VDemo State Comm"); +// +// for (k, v) in &self.balances { +// builder = builder.u64_field(k, *v); +// } +// builder = builder.constant_str("nonces"); +// +// // for nonce in &self.nonces { +// // builder = builder.u64(*nonce); +// // } +// +// builder.finalize() +// } +// +// fn tag() -> String { +// "VALIDATING_DEMO_STATE".to_string() +// } +// } +// +// /// initializes the first state on genesis +// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +// pub struct VDemoGenesisBlock { +// /// initializes the first state +// pub accounts: BTreeMap, +// } +// +// /// Any block after genesis +// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +// pub struct VDemoNormalBlock { +// /// Block state commitment +// pub previous_state: Commitment, +// /// Transaction vector +// pub transactions: Vec, +// } +// +// /// The block for the validating demo +// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +// pub enum VDemoBlock { +// /// genesis block +// Genesis(VDemoGenesisBlock), +// /// normal block +// Normal(VDemoNormalBlock), +// } +// +// impl Committable for VDemoBlock { +// fn commit(&self) -> Commitment { +// match &self { +// VDemoBlock::Genesis(block) => { +// let mut builder = commit::RawCommitmentBuilder::new("VDemo Genesis Comm") +// .u64_field("account_count", block.accounts.len() as u64); +// for account in &block.accounts { +// builder = builder.u64_field(account.0, *account.1); +// } +// builder.finalize() +// } +// VDemoBlock::Normal(block) => { +// let mut builder = commit::RawCommitmentBuilder::new("VDemo Block Comm") +// .var_size_field("Previous State", block.previous_state.as_ref()); +// +// for txn in &block.transactions { +// builder = builder +// .u64_field(&txn.add.account, txn.add.amount) +// .u64_field(&txn.sub.account, txn.sub.amount) +// .constant_str("nonce") +// .u64_field("nonce", txn.nonce); +// } +// +// builder.finalize() +// } +// } +// } +// +// fn tag() -> String { +// "VALIDATING_DEMO_BLOCK".to_string() +// } +// } +// +// impl Committable for VDemoTransaction { +// fn commit(&self) -> Commitment { +// commit::RawCommitmentBuilder::new("VDemo Txn Comm") +// .u64_field(&self.add.account, self.add.amount) +// .u64_field(&self.sub.account, self.sub.amount) +// .constant_str("nonce") +// .u64_field("nonce", self.nonce) +// .finalize() +// } +// +// fn tag() -> String { +// "VALIDATING_DEMO_TXN".to_string() +// } +// } +// +// impl VDemoBlock { +// /// generate a genesis block with the provided initial accounts and balances +// #[must_use] +// pub fn genesis_from(accounts: BTreeMap) -> Self { +// Self::Genesis(VDemoGenesisBlock { accounts }) +// } +// } +// +// impl State for VDemoState { +// type Error = VDemoError; +// +// type BlockType = VDemoBlock; +// +// type Time = ViewNumber; +// +// #[allow(clippy::panic)] +// fn next_block(state: Option) -> Self::BlockType { +// match state { +// Some(state) => VDemoBlock::Normal(VDemoNormalBlock { +// previous_state: state.commit(), +// transactions: Vec::new(), +// }), +// None => panic!("State is required for the next block"), +// } +// } +// +// // Note: validate_block is actually somewhat redundant, its meant to be a quick and dirty check +// // for clarity, the logic is duplicated with append_to +// fn validate_block(&self, block: &Self::BlockType, _view_number: &Self::Time) -> bool { +// match block { +// VDemoBlock::Genesis(_) => self.balances.is_empty(), // && self.nonces.is_empty(), +// VDemoBlock::Normal(block) => { +// let state = self; +// // A valid block is one in which every transaction is internally consistent, and results in +// // nobody having a negative balance +// // +// // We will check this, in this case, by making a trial copy of our balances map, making +// // trial modifications to it, and then asserting that no balances are negative +// let mut trial_balances = state.balances.clone(); +// for tx in &block.transactions { +// // This is a macro from SNAFU that returns an Err if the condition is not satisfied +// // +// // We first check that the transaction is internally consistent, then apply the change +// // to our trial map +// if !tx.validate_independence() { +// error!("validate_independence failed"); +// return false; +// } +// // Find the input account, and subtract the transfer balance from it, failing if it +// // doesn't exist +// if let Some(input_account) = trial_balances.get_mut(&tx.sub.account) { +// *input_account -= tx.sub.amount; +// } else { +// error!("no such input account"); +// return false; +// } +// // Find the output account, and add the transfer balance to it, failing if it doesn't +// // exist +// if let Some(output_account) = trial_balances.get_mut(&tx.add.account) { +// *output_account += tx.add.amount; +// } else { +// error!("no such output account"); +// return false; +// } +// // // Check to make sure the nonce isn't used +// // if state.nonces.contains(&tx.nonce) { +// // warn!(?state, ?tx, "State nonce is used for transaction"); +// // return false; +// // } +// } +// // This block has now passed all our tests, and thus has not done anything bad, so the block +// // is valid if its previous state hash matches that of the previous state +// let result = block.previous_state == state.commit(); +// if !result { +// error!( +// "hash failure. previous_block: {:?} hash_state: {:?}", +// block.previous_state, +// state.commit() +// ); +// } +// result +// } +// } +// } +// +// fn append( +// &self, +// block: &Self::BlockType, +// _view_number: &Self::Time, +// ) -> std::result::Result { +// match block { +// VDemoBlock::Genesis(block) => { +// if self.balances.is_empty() { +// // && self.nonces.is_empty() +// let mut new_state = Self::default(); +// for account in &block.accounts { +// if new_state +// .balances +// .insert(account.0.clone(), *account.1) +// .is_some() +// { +// error!("Adding the same account twice during application of genesis block!"); +// return Err(VDemoError::GenesisFailed); +// } +// } +// Ok(new_state) +// } else { +// Err(VDemoError::GenesisAfterStart) +// } +// } +// VDemoBlock::Normal(block) => { +// let state = self; +// // A valid block is one in which every transaction is internally consistent, and results in +// // nobody having a negative balance +// // +// // We will check this, in this case, by making a trial copy of our balances map, making +// // trial modifications to it, and then asserting that no balances are negative +// let mut trial_balances = state.balances.clone(); +// for tx in &block.transactions { +// // This is a macro from SNAFU that returns an Err if the condition is not satisfied +// // +// // We first check that the transaction is internally consistent, then apply the change +// // to our trial map +// ensure!(tx.validate_independence(), InconsistentTransactionSnafu); +// // Find the input account, and subtract the transfer balance from it, failing if it +// // doesn't exist +// if let Some(input_account) = trial_balances.get_mut(&tx.sub.account) { +// *input_account -= tx.sub.amount; +// } else { +// return Err(VDemoError::NoSuchInputAccount); +// } +// // Find the output account, and add the transfer balance to it, failing if it doesn't +// // exist +// if let Some(output_account) = trial_balances.get_mut(&tx.add.account) { +// *output_account += tx.add.amount; +// } else { +// return Err(VDemoError::NoSuchOutputAccount); +// } +// // // Check for nonce reuse +// // if state.nonces.contains(&tx.nonce) { +// // return Err(VDemoError::ReusedNonce); +// // } +// } +// // Make sure our previous state commitment matches the provided state +// if block.previous_state == state.commit() { +// // This block has now passed all our tests, and thus has not done anything bad +// // Add the nonces from this block +// // let mut nonces = state.nonces.clone(); +// // for tx in &block.transactions { +// // nonces.insert(tx.nonce); +// // } +// Ok(VDemoState { +// balances: trial_balances, +// // nonces, +// }) +// } else { +// Err(VDemoError::PreviousStateMismatch) +// } +// } +// } +// } +// +// fn on_commit(&self) { +// // Does nothing in this implementation +// } +// } +// +// impl Display for VDemoBlock { +// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +// match self { +// VDemoBlock::Genesis(block) => { +// write!(f, "VDemo Genesis Block: {block:#?}") +// } +// VDemoBlock::Normal(block) => { +// write!(f, "VDemo Normal Block #txns={}", block.transactions.len()) +// } +// } +// } +// } +// +// impl TestableState for VDemoState { +// fn create_random_transaction( +// state: Option<&Self>, +// rng: &mut dyn rand::RngCore, +// padding: u64, +// ) -> ::Transaction { +// use rand::seq::IteratorRandom; +// +// let state = state.expect("Missing state"); +// +// let non_zero_balances = state +// .balances +// .iter() +// .filter(|b| *b.1 > 0) +// .collect::>(); +// +// assert!( +// !non_zero_balances.is_empty(), +// "No nonzero balances were available! Unable to generate transaction." +// ); +// +// let input_account = non_zero_balances.iter().choose(rng).unwrap().0; +// let output_account = state.balances.keys().choose(rng).unwrap(); +// let amount = rng.gen_range(0..100); +// +// VDemoTransaction { +// add: Addition { +// account: output_account.to_string(), +// amount, +// }, +// sub: Subtraction { +// account: input_account.to_string(), +// amount, +// }, +// nonce: rng.gen(), +// padding: vec![0; padding as usize], +// } +// } +// } +// +// impl TestableBlock for VDemoBlock { +// fn genesis() -> Self { +// let accounts: BTreeMap = vec![ +// ("Joe", 1_000_000), +// ("Nathan M", 500_000), +// ("John", 400_000), +// ("Nathan Y", 600_000), +// ("Ian", 5_000_000), +// ] +// .into_iter() +// .map(|(x, y)| (x.to_string(), y)) +// .collect(); +// Self::Genesis(VDemoGenesisBlock { accounts }) +// } +// +// fn txn_count(&self) -> u64 { +// if let VDemoBlock::Normal(block) = self { +// block.transactions.len() as u64 +// } else { +// 0 +// } +// } +// } +// +// impl Block for VDemoBlock { +// type Transaction = VDemoTransaction; +// +// type Error = VDemoError; +// +// fn new() -> Self { +// ::genesis() +// } +// +// fn add_transaction_raw( +// &self, +// tx: &Self::Transaction, +// ) -> std::result::Result { +// match self { +// VDemoBlock::Genesis(_) => Err(VDemoError::GenesisAfterStart), +// VDemoBlock::Normal(block) => { +// // first, make sure that the transaction is internally valid +// if tx.validate_independence() { +// // Then check the previous transactions in the block +// if block.transactions.iter().any(|x| x.nonce == tx.nonce) { +// return Err(VDemoError::ReusedNonce); +// } +// let mut new_block = block.clone(); +// // Insert our transaction and return +// new_block.transactions.push(tx.clone()); +// Ok(VDemoBlock::Normal(new_block)) +// } else { +// Err(VDemoError::InconsistentTransaction) +// } +// } +// } +// } +// fn contained_transactions(&self) -> HashSet> { +// match self { +// VDemoBlock::Genesis(_) => HashSet::new(), +// VDemoBlock::Normal(block) => block +// .transactions +// .clone() +// .into_iter() +// .map(|tx| tx.commit()) +// .collect(), +// } +// } +// } +// +// /// Implementation of [`NodeType`] for [`VDemoNode`] +// #[derive( +// Copy, +// Clone, +// Debug, +// Default, +// Hash, +// PartialEq, +// Eq, +// PartialOrd, +// Ord, +// serde::Serialize, +// serde::Deserialize, +// )] +// pub struct VDemoTypes; +// +// impl NodeType for VDemoTypes { +// type ConsensusType = ValidatingConsensus; +// type Time = ViewNumber; +// type BlockType = VDemoBlock; +// type SignatureKey = Ed25519Pub; +// type VoteTokenType = StaticVoteToken; +// type Transaction = VDemoTransaction; +// type ElectionConfigType = StaticElectionConfig; +// type StateType = VDemoState; +// } +// +// /// The node implementation for the validating demo +// #[derive(Derivative)] +// #[derivative(Clone(bound = ""))] +// pub struct VDemoNode(PhantomData) +// where +// MEMBERSHIP: Membership + Debug; +// +// impl VDemoNode +// where +// MEMBERSHIP: Membership + Debug, +// { +// /// Create a new `VDemoNode` +// #[must_use] +// pub fn new() -> Self { +// VDemoNode(PhantomData) +// } +// } +// +// impl Debug for VDemoNode +// where +// MEMBERSHIP: Membership + Debug, +// { +// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +// f.debug_struct("VDemoNode") +// .field("_phantom", &"phantom") +// .finish() +// } +// } +// +// impl Default for VDemoNode +// where +// MEMBERSHIP: Membership + Debug, +// { +// fn default() -> Self { +// Self::new() +// } +// } +// +// /// Provides a random [`QuorumCertificate`] +// pub fn random_quorum_certificate>( +// rng: &mut dyn rand::RngCore, +// ) -> QuorumCertificate { +// QuorumCertificate { +// // block_commitment: random_commitment(rng), +// leaf_commitment: random_commitment(rng), +// view_number: TYPES::Time::new(rng.gen()), +// signatures: YesNoSignature::Yes(BTreeMap::default()), +// is_genesis: rng.gen(), +// } +// } +// +// /// Provides a random [`ValidatingLeaf`] +// pub fn random_validating_leaf>( +// deltas: TYPES::BlockType, +// rng: &mut dyn rand::RngCore, +// ) -> ValidatingLeaf { +// let justify_qc = random_quorum_certificate(rng); +// let state = TYPES::StateType::default() +// .append(&deltas, &TYPES::Time::new(42)) +// .unwrap_or_default(); +// ValidatingLeaf { +// view_number: justify_qc.view_number, +// height: rng.next_u64(), +// justify_qc, +// parent_commitment: random_commitment(rng), +// deltas, +// state, +// rejected: Vec::new(), +// timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), +// proposer_id: genesis_proposer_id(), +// } +// } diff --git a/hotshot/src/documentation.rs b/hotshot/src/documentation.rs new file mode 100644 index 0000000000..86850a7eec --- /dev/null +++ b/hotshot/src/documentation.rs @@ -0,0 +1,19 @@ +// This is prosaic documentation, we don't need clippy +#![allow( + clippy::all, + clippy::pedantic, + missing_docs, + clippy::missing_docs_in_private_items, + non_camel_case_types +)] +#![cfg_attr(feature = "doc-images", +cfg_attr(all(), +doc = ::embed_doc_image::embed_image!("basic_hotstuff", "../../docs/HotShotDocs/img/basic_hotstuff.svg")), +doc = ::embed_doc_image::embed_image!("chained_hotstuff", "../../docs/HotShotDocs/img/chained_hotstuff.svg")) +] +#![cfg_attr( + not(feature = "doc-images"), + doc = "**Doc images not enabled**. Compile with feature `doc-images` and Rust version >= 1.54 \ + to enable." +)] +#![ doc = include_str!("../../../docs/HotShotDocs/main.md")] diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs new file mode 100644 index 0000000000..0f75e7a673 --- /dev/null +++ b/hotshot/src/lib.rs @@ -0,0 +1,1650 @@ +#![warn( + clippy::all, + clippy::pedantic, + rust_2018_idioms, + missing_docs, + clippy::missing_docs_in_private_items, + clippy::panic +)] +#![allow(clippy::module_name_repetitions)] +// Temporary +#![allow(clippy::cast_possible_truncation)] +// Temporary, should be disabled after the completion of the NodeImplementation refactor +#![allow(clippy::type_complexity)] +//! Provides a generic rust implementation of the `HotShot` BFT protocol +//! +//! See the [protocol documentation](https://github.com/EspressoSystems/hotshot-spec) for a protocol description. + +// Documentation module +#[cfg(feature = "docs")] +pub mod documentation; + +/// Data availability support +// pub mod da; +/// Contains structures and functions for committee election +pub mod certificate; +#[cfg(feature = "demo")] +pub mod demos; +/// Contains traits consumed by [`HotShot`] +pub mod traits; +/// Contains types used by the crate +pub mod types; + +pub mod tasks; + +use crate::{ + certificate::QuorumCertificate, + tasks::{ + add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, + add_view_sync_task, + }, + traits::{NodeImplementation, Storage}, + types::{Event, SystemContextHandle}, +}; +use async_compatibility_layer::{ + art::{async_spawn, async_spawn_local}, + async_primitives::{broadcast::BroadcastSender, subscribable_rwlock::SubscribableRwLock}, + channel::{unbounded, UnboundedReceiver, UnboundedSender}, +}; +use async_lock::{Mutex, RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; +use async_trait::async_trait; +use commit::{Commitment, Committable}; +use custom_debug::Debug; +use hotshot_task::{ + event_stream::{ChannelStream, EventStream}, + task_launcher::TaskRunner, +}; +use hotshot_task_impls::{events::SequencingHotShotEvent, network::NetworkTaskKind}; + +use hotshot_types::{ + certificate::{DACertificate, ViewSyncCertificate}, + consensus::{BlockStore, Consensus, ConsensusMetrics, View, ViewInner, ViewQueue}, + data::{DAProposal, DeltasType, LeafType, ProposalType, QuorumProposal, SequencingLeaf}, + error::StorageSnafu, + message::{ + ConsensusMessageType, DataMessage, InternalTrigger, Message, MessageKind, + ProcessedGeneralConsensusMessage, SequencingMessage, + }, + traits::{ + consensus_api::{ConsensusSharedApi, SequencingConsensusApi}, + election::{ConsensusExchange, Membership, SignedCertificate}, + metrics::Metrics, + network::{CommunicationChannel, NetworkError}, + node_implementation::{ + ChannelMaps, CommitteeEx, ExchangesType, NodeType, SendToTasks, SequencingQuorumEx, + ViewSyncEx, + }, + signature_key::SignatureKey, + state::ConsensusTime, + storage::StoredView, + State, + }, + vote::{ViewSyncData, VoteType}, + HotShotConfig, +}; +use snafu::ResultExt; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + marker::PhantomData, + num::NonZeroUsize, + sync::Arc, + time::Duration, +}; +use tracing::{debug, error, info, instrument, trace, warn}; +// -- Rexports +// External +/// Reexport rand crate +pub use rand; +// Internal +/// Reexport error type +pub use hotshot_types::error::HotShotError; + +/// Length, in bytes, of a 512 bit hash +pub const H_512: usize = 64; +/// Length, in bytes, of a 256 bit hash +pub const H_256: usize = 32; + +/// Holds the state needed to participate in `HotShot` consensus +pub struct SystemContextInner> { + /// The public key of this node + public_key: TYPES::SignatureKey, + + /// The private key of this node + private_key: ::PrivateKey, + + /// Configuration items for this hotshot instance + config: HotShotConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + + /// Networking interface for this hotshot instance + // networking: I::Networking, + + /// This `HotShot` instance's storage backend + storage: I::Storage, + + /// This `HotShot` instance's way to interact with the nodes needed to form a quorum and/or DA certificate. + pub exchanges: Arc, + + /// Sender for [`Event`]s + event_sender: RwLock>>>, + + /// a reference to the metrics that the implementor is using. + _metrics: Box, + + /// Transactions + /// (this is shared btwn hotshot and `Consensus`) + transactions: + Arc, TYPES::Transaction>>>, + + /// The hotstuff implementation + consensus: Arc>>, + + /// Channels for sending/recv-ing proposals and votes for quorum and committee exchanges, the + /// latter of which is only applicable for sequencing consensus. + channel_maps: (ChannelMaps, Option>), + + /// for receiving messages in the network lookup task + recv_network_lookup: Arc>>>, + + // global_registry: GlobalRegistry, + /// Access to the output event stream. + output_event_stream: ChannelStream>, + + /// access to the internal event stream, in case we need to, say, shut something down + internal_event_stream: ChannelStream>, + + /// uid for instrumentation + id: u64, +} + +/// Thread safe, shared view of a `HotShot` +// TODO Perhaps we can delete SystemContext since we only consume it in run_tasks() +#[derive(Clone)] +pub struct SystemContext> { + /// Handle to internal hotshot implementation + pub inner: Arc>, +} + +impl> SystemContext { + /// Creates a new hotshot with the given configuration options and sets it up with the given + /// genesis block + #[allow(clippy::too_many_arguments)] + #[instrument(skip(private_key, storage, exchanges, initializer, metrics))] + pub async fn new( + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + nonce: u64, + config: HotShotConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + storage: I::Storage, + exchanges: I::Exchanges, + initializer: HotShotInitializer, + metrics: Box, + ) -> Result> { + debug!("Creating a new hotshot"); + + let consensus_metrics = Arc::new(ConsensusMetrics::new( + &*metrics.subgroup("consensus".to_string()), + )); + let anchored_leaf = initializer.inner; + + // insert to storage + storage + .append(vec![anchored_leaf.clone().into()]) + .await + .context(StorageSnafu)?; + + // insert genesis (or latest block) to state map + let mut state_map = BTreeMap::default(); + state_map.insert( + anchored_leaf.get_view_number(), + View { + view_inner: ViewInner::Leaf { + leaf: anchored_leaf.commit(), + }, + }, + ); + + let mut saved_leaves = HashMap::new(); + let mut saved_blocks = BlockStore::default(); + saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); + if let Ok(block) = anchored_leaf.get_deltas().try_resolve() { + saved_blocks.insert(block); + } + + let start_view = anchored_leaf.get_view_number(); + + let consensus = Consensus { + state_map, + cur_view: start_view, + last_decided_view: anchored_leaf.get_view_number(), + transactions: Arc::default(), + seen_transactions: HashSet::new(), + saved_leaves, + saved_blocks, + // TODO this is incorrect + // https://github.com/EspressoSystems/HotShot/issues/560 + locked_view: anchored_leaf.get_view_number(), + high_qc: anchored_leaf.get_justify_qc(), + metrics: consensus_metrics, + invalid_qc: 0, + }; + let consensus = Arc::new(RwLock::new(consensus)); + let txns = consensus.read().await.get_transactions(); + + let (_send_network_lookup, recv_network_lookup) = unbounded(); + let inner: Arc> = Arc::new(SystemContextInner { + recv_network_lookup: Arc::new(Mutex::new(recv_network_lookup)), + id: nonce, + channel_maps: I::new_channel_maps(start_view), + consensus, + transactions: txns, + public_key, + private_key, + config, + // networking, + storage, + exchanges: Arc::new(exchanges), + event_sender: RwLock::default(), + _metrics: metrics, + internal_event_stream: ChannelStream::new(), + output_event_stream: ChannelStream::new(), + }); + + Ok(Self { inner }) + } + + /// "Starts" consensus by sending a `ViewChange` event + pub async fn start_consensus(&self) { + self.inner + .internal_event_stream + .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new(1))) + .await; + + // ED This isn't ideal... + // async_sleep(Duration::new(1, 0)).await; + + // self.inner + // .internal_event_stream + // .publish(SequencingHotShotEvent::QCFormed( + // QuorumCertificate::genesis(), + // )) + // .await; + } + + /// Marks a given view number as timed out. This should be called a fixed period after a round is started. + /// + /// If the round has already ended then this function will essentially be a no-op. Otherwise `run_round` will return shortly after this function is called. + /// # Panics + /// Panics if the current view is not in the channel map + #[instrument( + skip_all, + fields(id = self.inner.id, view = *current_view), + name = "Timeout consensus tasks", + level = "warn" + )] + pub async fn timeout_view( + &self, + current_view: TYPES::Time, + send_replica: UnboundedSender< + >::ProcessedConsensusMessage, + >, + send_next_leader: Option< + UnboundedSender< + >::ProcessedConsensusMessage, + >, + >, + ) where + >::ProcessedConsensusMessage: + From>, + { + let msg = ProcessedGeneralConsensusMessage::::InternalTrigger( + InternalTrigger::Timeout(current_view), + ); + if let Some(chan) = send_next_leader { + if chan.send(msg.clone().into()).await.is_err() { + debug!("Error timing out next leader task"); + } + }; + // NOTE this should always exist + if send_replica.send(msg.into()).await.is_err() { + debug!("Error timing out replica task"); + }; + } + + /// Publishes a transaction asynchronously to the network + /// + /// # Errors + /// + /// Always returns Ok; does not return an error if the transaction couldn't be published to the network + #[instrument(skip(self), err)] + pub async fn publish_transaction_async( + &self, + transaction: TYPES::Transaction, + ) -> Result<(), HotShotError> { + trace!("Adding transaction to our own queue"); + // Wrap up a message + // TODO place a view number here that makes sense + // we haven't worked out how this will work yet + let message = DataMessage::SubmitTransaction(transaction, TYPES::Time::new(0)); + let api = self.clone(); + // TODO We should have a function that can return a network error if there is one + // but first we'd need to ensure our network implementations can support that + // (and not hang instead) + async_spawn(async move { + let _result = api + .inner + .exchanges + .committee_exchange() + .network() + .broadcast_message( + Message { + sender: api.inner.public_key.clone(), + kind: MessageKind::from(message), + _phantom: PhantomData, + }, + &api.inner + .exchanges + .committee_exchange() + .membership() + .clone(), + ) + .await; + }); + Ok(()) + } + + /// Returns a copy of the state + /// + /// # Panics + /// + /// Panics if internal state for consensus is inconsistent + pub async fn get_state(&self) -> ::MaybeState { + self.inner + .consensus + .read() + .await + .get_decided_leaf() + .get_state() + } + + /// Returns a copy of the consensus struct + #[must_use] + pub fn get_consensus(&self) -> Arc>> { + self.inner.consensus.clone() + } + + /// Returns a copy of the last decided leaf + /// # Panics + /// Panics if internal state for consensus is inconsistent + pub async fn get_decided_leaf(&self) -> I::Leaf { + self.inner.consensus.read().await.get_decided_leaf() + } + + /// Initializes a new hotshot and does the work of setting up all the background tasks + /// + /// Assumes networking implementation is already primed. + /// + /// Underlying `HotShot` instance starts out paused, and must be unpaused + /// + /// Upon encountering an unrecoverable error, such as a failure to send to a broadcast channel, + /// the `HotShot` instance will log the error and shut down. + /// + /// # Errors + /// + /// Will return an error when the storage failed to insert the first `QuorumCertificate` + #[allow(clippy::too_many_arguments)] + pub async fn init( + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + node_id: u64, + config: HotShotConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + storage: I::Storage, + exchanges: I::Exchanges, + initializer: HotShotInitializer, + metrics: Box, + ) -> Result< + ( + SystemContextHandle, + ChannelStream>, + ), + HotShotError, + > + where + SystemContext: HotShotType, + { + // Save a clone of the storage for the handle + let hotshot = Self::new( + public_key, + private_key, + node_id, + config, + storage, + exchanges, + initializer, + metrics, + ) + .await?; + let handle = hotshot.clone().run_tasks().await; + let internal_event_stream = hotshot.inner.internal_event_stream.clone(); + + Ok((handle, internal_event_stream)) + } + + /// Send a broadcast message. + /// + /// This is an alias for `hotshot.inner.networking.broadcast_message(msg.into())`. + /// + /// # Errors + /// + /// Will return any errors that the underlying `broadcast_message` can return. + pub async fn send_broadcast_message( + &self, + kind: impl Into>, + ) -> std::result::Result<(), NetworkError> { + let inner = self.inner.clone(); + let pk = self.inner.public_key.clone(); + let kind = kind.into(); + + async_spawn_local(async move { + if inner + .exchanges + .quorum_exchange() + .network() + .broadcast_message( + Message { + sender: pk, + kind, + _phantom: PhantomData, + }, + // TODO this is morally wrong + &inner.exchanges.quorum_exchange().membership().clone(), + ) + .await + .is_err() + { + warn!("Failed to broadcast message"); + }; + }); + Ok(()) + } + + /// Send a direct message to a given recipient. + /// + /// This is an alias for `hotshot.inner.networking.message_node(msg.into(), recipient)`. + /// + /// # Errors + /// + /// Will return any errors that the underlying `message_node` can return. + pub async fn send_direct_message( + &self, + kind: impl Into>, + recipient: TYPES::SignatureKey, + ) -> std::result::Result<(), NetworkError> { + self.inner + .exchanges + .quorum_exchange() + .network() + .direct_message( + Message { + sender: self.inner.public_key.clone(), + kind: kind.into(), + _phantom: PhantomData, + }, + recipient, + ) + .await?; + Ok(()) + } + + /// return the timeout for a view for `self` + #[must_use] + pub fn get_next_view_timeout(&self) -> u64 { + self.inner.config.next_view_timeout + } + + /// given a view number and a upgradable read lock on a channel map, inserts entry into map if it + /// doesn't exist, or creates entry. Then returns a clone of the entry + pub async fn create_or_obtain_chan_from_read( + view_num: TYPES::Time, + channel_map: RwLockUpgradableReadGuard<'_, SendToTasks>, + ) -> ViewQueue { + // check if we have the entry + // if we don't, insert + if let Some(vq) = channel_map.channel_map.get(&view_num) { + vq.clone() + } else { + let mut channel_map = + RwLockUpgradableReadGuard::<'_, SendToTasks>::upgrade(channel_map).await; + let new_view_queue = ViewQueue::default(); + let vq = new_view_queue.clone(); + // NOTE: the read lock is held until all other read locks are DROPPED and + // the read lock may be turned into a write lock. + // This means that the `channel_map` will not change. So we don't need + // to check again to see if a channel was added + + channel_map.channel_map.insert(view_num, new_view_queue); + vq + } + } + + /// given a view number and a write lock on a channel map, inserts entry into map if it + /// doesn't exist, or creates entry. Then returns a clone of the entry + #[allow(clippy::unused_async)] // async for API compatibility reasons + pub async fn create_or_obtain_chan_from_write( + view_num: TYPES::Time, + mut channel_map: RwLockWriteGuard<'_, SendToTasks>, + ) -> ViewQueue { + channel_map.channel_map.entry(view_num).or_default().clone() + } +} + +/// [`HotShot`] implementations that depend on [`TYPES::ConsensusType`]. +#[async_trait] +pub trait HotShotType> { + /// Get the [`transactions`] field of [`HotShot`]. + fn transactions( + &self, + ) -> &Arc, TYPES::Transaction>>>; + + /// Get the [`hotstuff`] field of [`HotShot`]. + fn consensus(&self) -> &Arc>>; + + /// Spawn all tasks that operate on the given [`HotShot`]. + /// + /// For a list of which tasks are being spawned, see this module's documentation. + async fn run_tasks(self) -> SystemContextHandle; + + // decide which handler to call based on the message variant and `transmit_type` + // async fn handle_message(&self, item: Message, transmit_type: TransmitType) { + // match (item.kind, transmit_type) { + // (MessageKind::Consensus(msg), TransmitType::Broadcast) => { + // self.handle_broadcast_consensus_message(msg, item.sender) + // .await; + // } + // (MessageKind::Consensus(msg), TransmitType::Direct) => { + // self.handle_direct_consensus_message(msg, item.sender).await; + // } + // (MessageKind::Data(msg), TransmitType::Broadcast) => { + // self.handle_broadcast_data_message(msg, item.sender).await; + // } + // (MessageKind::Data(msg), TransmitType::Direct) => { + // self.handle_direct_data_message(msg, item.sender).await; + // } + // (MessageKind::_Unreachable(_), _) => unimplemented!(), + // }; + // } + + // Handle an incoming [`ConsensusMessage`] that was broadcasted on the network. + // async fn handle_broadcast_consensus_message( + // &self, + // msg: I::ConsensusMessage, + // sender: TYPES::SignatureKey, + // ); + + // Handle an incoming [`ConsensusMessage`] directed at this node. + // async fn handle_direct_consensus_message( + // &self, + // msg: I::ConsensusMessage, + // sender: TYPES::SignatureKey, + // ); + + // Handle an incoming [`DataMessage`] that was broadcasted on the network + // async fn handle_broadcast_data_message( + // &self, + // msg: DataMessage, + // _sender: TYPES::SignatureKey, + // ) { + // // TODO validate incoming broadcast message based on sender signature key + // match msg { + // DataMessage::SubmitTransaction(transaction, _view_number) => { + // let size = bincode_opts().serialized_size(&transaction).unwrap_or(0); + // + // // The API contract requires the hash to be unique + // // so we can assume entry == incoming txn + // // even if eq not satisfied + // // so insert is an idempotent operation + // let mut new = false; + // self.transactions() + // .modify(|txns| { + // new = txns.insert(transaction.commit(), transaction).is_none(); + // }) + // .await; + // + // if new { + // // If this is a new transaction, update metrics. + // let consensus = self.consensus().read().await; + // consensus.metrics.outstanding_transactions.update(1); + // consensus + // .metrics + // .outstanding_transactions_memory_size + // .update(i64::try_from(size).unwrap_or_else(|e| { + // warn!("Conversion failed: {e}. Using the max value."); + // i64::MAX + // })); + // } + // } + // } + // } + + // Handle an incoming [`DataMessage`] that directed at this node + // #[allow(clippy::unused_async)] // async for API compatibility reasons + // async fn handle_direct_data_message( + // &self, + // msg: DataMessage, + // _sender: TYPES::SignatureKey, + // ) { + // debug!(?msg, "Incoming direct data message"); + // match msg { + // DataMessage::SubmitTransaction(_, _) => { + // // Log exceptional situation and proceed + // warn!(?msg, "Broadcast message received over direct channel"); + // } + // } + // } +} + +#[async_trait] +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + MEMBERSHIP: Membership, + > HotShotType for SystemContext +where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + Membership = MEMBERSHIP, + > + 'static, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Proposal = DAProposal, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + Membership = MEMBERSHIP, + > + 'static, + ViewSyncEx: ConsensusExchange< + TYPES, + Message, + Proposal = ViewSyncCertificate, + Certificate = ViewSyncCertificate, + Commitment = ViewSyncData, + Membership = MEMBERSHIP, + > + 'static, +{ + fn transactions( + &self, + ) -> &Arc, TYPES::Transaction>>> { + &self.inner.transactions + } + + fn consensus(&self) -> &Arc>> { + &self.inner.consensus + } + + async fn run_tasks(self) -> SystemContextHandle { + // ED Need to set first first number to 1, or properly trigger the change upon start + let task_runner = TaskRunner::new(); + let registry = task_runner.registry.clone(); + + let output_event_stream = self.inner.output_event_stream.clone(); + let internal_event_stream = self.inner.internal_event_stream.clone(); + + let quorum_exchange = self.inner.exchanges.quorum_exchange().clone(); + let committee_exchange = self.inner.exchanges.committee_exchange().clone(); + let view_sync_exchange = self.inner.exchanges.view_sync_exchange().clone(); + + let handle = SystemContextHandle { + registry, + output_event_stream: output_event_stream.clone(), + internal_event_stream: internal_event_stream.clone(), + hotshot: self.clone(), + storage: self.inner.storage.clone(), + }; + + let task_runner = add_network_message_task( + task_runner, + internal_event_stream.clone(), + quorum_exchange.clone(), + ) + .await; + let task_runner = add_network_message_task( + task_runner, + internal_event_stream.clone(), + committee_exchange.clone(), + ) + .await; + let task_runner = add_network_message_task( + task_runner, + internal_event_stream.clone(), + view_sync_exchange.clone(), + ) + .await; + let task_runner = add_network_event_task( + task_runner, + internal_event_stream.clone(), + quorum_exchange, + NetworkTaskKind::Quorum, + ) + .await; + let task_runner = add_network_event_task( + task_runner, + internal_event_stream.clone(), + committee_exchange.clone(), + NetworkTaskKind::Committee, + ) + .await; + let task_runner = add_network_event_task( + task_runner, + internal_event_stream.clone(), + view_sync_exchange.clone(), + NetworkTaskKind::ViewSync, + ) + .await; + let task_runner = add_consensus_task( + task_runner, + internal_event_stream.clone(), + output_event_stream.clone(), + handle.clone(), + ) + .await; + let task_runner = add_da_task( + task_runner, + internal_event_stream.clone(), + committee_exchange.clone(), + handle.clone(), + ) + .await; + let task_runner = add_view_sync_task::( + task_runner, + internal_event_stream.clone(), + handle.clone(), + ) + .await; + async_spawn(async move { + task_runner.launch().await; + info!("Task runner exited!"); + }); + + handle + + // let shut_down = Arc::new(AtomicBool::new(false)); + // let started = Arc::new(AtomicBool::new(false)); + // + // let exchange = self.inner.exchanges.quorum_exchange(); + // let committee_exchange = self.inner.exchanges.committee_exchange(); + // + // let network_broadcast_task_handle = async_spawn( + // tasks::network_task( + // self.clone(), + // shut_down.clone(), + // TransmitType::Broadcast, + // exchange.clone().into(), + // ) + // .instrument(info_span!("HotShot Broadcast Task",)), + // ); + // let network_direct_task_handle = async_spawn( + // tasks::network_task( + // self.clone(), + // shut_down.clone(), + // TransmitType::Direct, + // exchange.clone().into(), + // ) + // .instrument(info_span!("HotShot Direct Task",)), + // ); + // + // let committee_network_broadcast_task_handle = async_spawn( + // tasks::network_task( + // self.clone(), + // shut_down.clone(), + // TransmitType::Broadcast, + // committee_exchange.clone().into(), + // ) + // .instrument(info_span!("HotShot DA Broadcast Task",)), + // ); + // let committee_network_direct_task_handle = async_spawn( + // tasks::network_task( + // self.clone(), + // shut_down.clone(), + // TransmitType::Direct, + // committee_exchange.clone().into(), + // ) + // .instrument(info_span!("HotShot DA Direct Task",)), + // ); + // + // async_spawn( + // tasks::network_lookup_task(self.clone(), shut_down.clone()) + // .instrument(info_span!("HotShot Network Lookup Task",)), + // ); + // + // let (handle_channels, task_channels) = match self.inner.config.execution_type { + // ExecutionType::Continuous => (None, None), + // ExecutionType::Incremental => { + // let (send_consensus_start, recv_consensus_start) = unbounded(); + // (Some(send_consensus_start), Some(recv_consensus_start)) + // } + // }; + // + // let consensus_task_handle = async_spawn( + // tasks::view_runner( + // self.clone(), + // ) + // .instrument(info_span!("Consensus Task Handle",)), + // ); + // + // let (broadcast_sender, broadcast_receiver) = channel(); + // + // let handle = SystemContextHandle { + // sender_handle: Arc::new(broadcast_sender.clone()), + // hotshot: self.clone(), + // stream_output: broadcast_receiver, + // storage: self.inner.storage.clone(), + // shut_down, + // }; + // *self.inner.event_sender.write().await = Some(broadcast_sender); + // + // let mut background_task_handle = self.inner.background_task_handle.inner.write().await; + // *background_task_handle = Some(TaskHandleInner { + // network_broadcast_task_handle, + // network_direct_task_handle, + // committee_network_broadcast_task_handle: Some(committee_network_broadcast_task_handle), + // committee_network_direct_task_handle: Some(committee_network_direct_task_handle), + // consensus_task_handle: nll_todo(), + // shutdown_timeout: Duration::from_millis(self.inner.config.next_view_timeout), + // run_view_channels: handle_channels, + // started, + // }); + // + // handle + } + + // #[instrument( + // skip(self), + // name = "Handle broadcast consensus message", + // level = "error" + // )] + // async fn handle_broadcast_consensus_message( + // &self, + // msg: SequencingMessage, + // sender: TYPES::SignatureKey, + // ) { + // let msg_time = msg.view_number(); + // + // match msg.0 { + // Left(general_message) => { + // match general_message { + // // this is ONLY intended for replica + // GeneralConsensusMessage::Proposal(_) => { + // let channel_map = self + // .inner + // .channel_maps + // .0 + // .vote_channel + // .upgradable_read() + // .await; + // + // // skip if the proposal is stale + // if msg_time < channel_map.cur_view { + // warn!( + // "Throwing away {} for view number: {:?}", + // std::any::type_name::>>(), + // msg_time + // ); + // return; + // } + // + // let chan: ViewQueue = + // Self::create_or_obtain_chan_from_read(msg_time, channel_map).await; + // + // if !chan.has_received_proposal.swap(true, Ordering::Relaxed) + // && chan + // .sender_chan + // .send(Left(ProcessedGeneralConsensusMessage::new( + // general_message, + // sender, + // ))) + // .await + // .is_err() + // { + // warn!("Failed to send to next leader!"); + // } + // } + // GeneralConsensusMessage::InternalTrigger(_) => { + // warn!("Received an internal trigger. This shouldn't be possible."); + // } + // GeneralConsensusMessage::Vote(_) => { + // warn!( + // "Received a broadcast for a vote message. This shouldn't be possible." + // ); + // } + // GeneralConsensusMessage::ViewSync(_) => todo!(), + // } + // } + // Right(committee_message) => { + // match committee_message { + // CommitteeConsensusMessage::DAVote(_) => { + // warn!( + // "Received a broadcast for a vote message. This shouldn't be possible." + // ); + // } + // CommitteeConsensusMessage::DAProposal(_) => { + // let channel_map = match &self.inner.channel_maps.1 { + // Some(committee_channels) => { + // committee_channels.vote_channel.upgradable_read().await + // } + // None => { + // warn!("Committee channels not found."); + // return; + // } + // }; + // + // // skip if the proposal is stale + // if msg_time < channel_map.cur_view { + // warn!( + // "Throwing away {} for view number: {:?}", + // std::any::type_name::>>(), + // msg_time + // ); + // return; + // } + // + // let chan: ViewQueue = + // Self::create_or_obtain_chan_from_read(msg_time, channel_map).await; + // + // if !chan.has_received_proposal.swap(true, Ordering::Relaxed) + // && chan + // .sender_chan + // .send(Right(ProcessedCommitteeConsensusMessage::new( + // committee_message, + // sender, + // ))) + // .await + // .is_err() + // { + // warn!("Failed to send to next leader!"); + // } + // } + // } + // } + // }; + // } + + // #[instrument(skip(self), name = "Handle direct consensus message", level = "error")] + // async fn handle_direct_consensus_message( + // &self, + // msg: SequencingMessage, + // sender: TYPES::SignatureKey, + // ) { + // let msg_time = msg.view_number(); + // + // // We can only recv from a replicas + // // replicas should only send votes or if they timed out, timeouts + // match msg.0 { + // Left(general_message) => match general_message { + // GeneralConsensusMessage::Proposal(_) + // | GeneralConsensusMessage::InternalTrigger(_) => { + // warn!("Received a direct message for a proposal. This shouldn't be possible."); + // } + // // this is ONLY intended for next leader + // c @ GeneralConsensusMessage::Vote(_) => { + // let channel_map = self + // .inner + // .channel_maps + // .0 + // .proposal_channel + // .upgradable_read() + // .await; + // + // // check if + // // - is in fact, actually is the next leader + // // - the message is not stale + // let is_leader = self + // .inner + // .clone() + // .exchanges + // .quorum_exchange() + // .is_leader(msg_time + 1); + // if !is_leader || msg_time < channel_map.cur_view { + // warn!( + // "Throwing away {} message for view number: {:?}", + // std::any::type_name::>(), + // msg_time + // ); + // return; + // } + // + // let chan = Self::create_or_obtain_chan_from_read(msg_time, channel_map).await; + // + // if chan + // .sender_chan + // .send(Left(ProcessedGeneralConsensusMessage::new(c, sender))) + // .await + // .is_err() + // { + // error!("Failed to send to next leader!"); + // } + // } + // GeneralConsensusMessage::ViewSync(_) => todo!(), + // }, + // Right(committee_message) => { + // match committee_message { + // c @ CommitteeConsensusMessage::DAVote(_) => { + // let channel_map = match &self.inner.channel_maps.1 { + // Some(committee_channels) => { + // committee_channels.proposal_channel.upgradable_read().await + // } + // None => { + // warn!("Committee channels not found."); + // return; + // } + // }; + // + // // check if + // // - is in fact, actually is the next leader + // // - the message is not stale + // let is_leader = self + // .inner + // .clone() + // .exchanges + // .committee_exchange() + // .is_leader(msg_time); + // if !is_leader || msg_time < channel_map.cur_view { + // warn!( + // "Throwing away {} message for view number: {:?}, Channel cur view: {:?}", + // std::any::type_name::>(), + // msg_time, + // channel_map.cur_view, + // ); + // return; + // } + // + // let chan = + // Self::create_or_obtain_chan_from_read(msg_time, channel_map).await; + // + // if chan + // .sender_chan + // .send(Right(ProcessedCommitteeConsensusMessage::new(c, sender))) + // .await + // .is_err() + // { + // error!("Failed to send to next leader!"); + // } + // } + // CommitteeConsensusMessage::DAProposal(_) => todo!(), + // } + // } + // } + // } +} + +<<<<<<< HEAD:src/lib.rs +======= +/// A view runner implemented by [HotShot] for different types of consensus. +#[async_trait] +pub trait ViewRunner> { + /// Executes one view of consensus + async fn run_view(hotshot: SystemContext) -> Result<(), ()>; +} + +#[async_trait] +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + > ViewRunner for SystemContext +where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + >, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + // #[instrument] + #[allow(clippy::too_many_lines)] + async fn run_view(hotshot: SystemContext) -> Result<(), ()> { + let c_api = HotShotSequencingConsensusApi { + inner: hotshot.inner.clone(), + }; + + // Setup channel for recieving DA votes + let mut send_to_leader = match &hotshot.inner.channel_maps.1 { + Some(committee_channels) => committee_channels.proposal_channel.write().await, + None => { + warn!("Committee channels not found."); + return Err(()); + } + }; + let leader_last_view: TYPES::Time = send_to_leader.cur_view; + send_to_leader.channel_map.remove(&leader_last_view); + send_to_leader.cur_view += 1; + let (send_da_vote_chan, recv_da_vote, cur_view) = { + let mut consensus = hotshot.inner.consensus.write().await; + let cur_view = consensus.increment_view(); + let vq = SystemContext::::create_or_obtain_chan_from_write( + cur_view, + send_to_leader, + ) + .await; + (vq.sender_chan, vq.receiver_chan, cur_view) + }; + + // Set up vote collection channel for commitment proposals/votes + let mut send_to_next_leader = hotshot.inner.channel_maps.0.proposal_channel.write().await; + let leader_last_view: TYPES::Time = send_to_next_leader.cur_view; + send_to_next_leader.channel_map.remove(&leader_last_view); + send_to_next_leader.cur_view += 1; + let (send_commitment_vote_chan, recv_commitment_vote_chan) = { + let vq = SystemContext::::create_or_obtain_chan_from_write( + cur_view, + send_to_next_leader, + ) + .await; + (vq.sender_chan, vq.receiver_chan) + }; + + let (high_qc, txns) = { + // OBTAIN read lock on consensus + let consensus = hotshot.inner.consensus.read().await; + let high_qc = consensus.high_qc.clone(); + let txns = consensus.transactions.clone(); + (high_qc, txns) + }; + let mut send_to_member = match &hotshot.inner.channel_maps.1 { + Some(committee_channels) => committee_channels.vote_channel.write().await, + None => { + warn!("Committee channels not found."); + return Err(()); + } + }; + let member_last_view: TYPES::Time = send_to_member.cur_view; + send_to_member.channel_map.remove(&member_last_view); + send_to_member.cur_view += 1; + let ViewQueue { + sender_chan: send_member, + receiver_chan: recv_member, + has_received_proposal: _, + } = SystemContext::::create_or_obtain_chan_from_write( + send_to_member.cur_view, + send_to_member, + ) + .await; + let mut send_to_replica = hotshot.inner.channel_maps.0.vote_channel.write().await; + let replica_last_view: TYPES::Time = send_to_replica.cur_view; + send_to_replica.channel_map.remove(&replica_last_view); + send_to_replica.cur_view += 1; + let ViewQueue { + sender_chan: send_replica, + receiver_chan: recv_replica, + has_received_proposal: _, + } = SystemContext::::create_or_obtain_chan_from_write( + send_to_replica.cur_view, + send_to_replica, + ) + .await; + + let mut task_handles = Vec::new(); + let committee_exchange = c_api.inner.exchanges.committee_exchange().clone(); + let quorum_exchange = c_api.inner.exchanges.quorum_exchange().clone(); + + if quorum_exchange.clone().is_leader(cur_view) { + let da_leader = DALeader { + id: hotshot.inner.id, + consensus: hotshot.inner.consensus.clone(), + high_qc: high_qc.clone(), + cur_view, + transactions: txns, + api: c_api.clone(), + committee_exchange: committee_exchange.clone().into(), + quorum_exchange: quorum_exchange.clone().into(), + vote_collection_chan: recv_da_vote, + _pd: PhantomData, + }; + let consensus = hotshot.inner.consensus.clone(); + let qc = high_qc.clone(); + let api = c_api.clone(); + let leader_handle = { + let id = hotshot.inner.id; + async_spawn(async move { + let Some((da_cert, block, parent)) = da_leader.run_view().await else { + return qc; + }; + let consensus_leader = ConsensusLeader { + id, + consensus, + high_qc: qc, + cert: da_cert, + block, + parent, + cur_view, + api: api.clone(), + quorum_exchange: quorum_exchange.clone().into(), + _pd: PhantomData, + }; + consensus_leader.run_view().await + }) + }; + task_handles.push(leader_handle); + } + + let quorum_exchange = c_api.inner.exchanges.quorum_exchange(); + if quorum_exchange.clone().is_leader(cur_view + 1) { + let next_leader = ConsensusNextLeader { + id: hotshot.inner.id, + consensus: hotshot.inner.consensus.clone(), + cur_view, + api: c_api.clone(), + generic_qc: high_qc.clone(), + vote_collection_chan: recv_commitment_vote_chan, + quorum_exchange: quorum_exchange.clone().into(), + _pd: PhantomData, + }; + let next_leader_handle = async_spawn(async move { next_leader.run_view().await }); + task_handles.push(next_leader_handle); + } + let da_member = DAMember { + id: hotshot.inner.id, + consensus: hotshot.inner.consensus.clone(), + proposal_collection_chan: recv_member, + cur_view, + high_qc: high_qc.clone(), + api: c_api.clone(), + exchange: committee_exchange.clone().into(), + _pd: PhantomData, + }; + let member_handle = async_spawn(async move { da_member.run_view().await }); + task_handles.push(member_handle); + let replica = SequencingReplica { + id: hotshot.inner.id, + consensus: hotshot.inner.consensus.clone(), + proposal_collection_chan: recv_replica, + cur_view, + high_qc: high_qc.clone(), + api: c_api.clone(), + committee_exchange: committee_exchange.clone().into(), + quorum_exchange: quorum_exchange.clone().into(), + _pd: PhantomData, + }; + let replica_handle = async_spawn(async move { replica.run_view().await }); + task_handles.push(replica_handle); + + let children_finished = futures::future::join_all(task_handles); + + async_spawn({ + let next_view_timeout = hotshot.inner.config.next_view_timeout; + let hotshot: SystemContext = hotshot.clone(); + async move { + async_sleep(Duration::from_millis(next_view_timeout)).await; + hotshot + .timeout_view(cur_view, send_member, Some(send_commitment_vote_chan)) + .await; + hotshot + .timeout_view(cur_view, send_replica, Some(send_da_vote_chan)) + .await; + } + }); + + let results = children_finished.await; + + // unwrap is fine since results must have >= 1 item(s) + #[cfg(async_executor_impl = "async-std")] + let high_qc = results + .into_iter() + .max_by_key(|qc: &QuorumCertificate>| qc.view_number) + .unwrap(); + #[cfg(async_executor_impl = "tokio")] + let high_qc = results + .into_iter() + .filter_map(std::result::Result::ok) + .max_by_key(|qc| qc.view_number) + .unwrap(); + + let mut consensus = hotshot.inner.consensus.write().await; + consensus.high_qc = high_qc; + c_api.send_view_finished(consensus.cur_view).await; + Ok(()) + } +} + +>>>>>>> c407e538b (feat: crates directory used by all crates):crates/hotshot/src/lib.rs +/// A handle that exposes the interface that hotstuff needs to interact with [`HotShot`] +#[derive(Clone)] +struct HotShotValidatingConsensusApi> { + /// Reference to the [`SystemContextInner`] + inner: Arc>, +} + +#[async_trait] +impl> ConsensusSharedApi + for HotShotValidatingConsensusApi +{ + fn total_nodes(&self) -> NonZeroUsize { + self.inner.config.total_nodes + } + + fn propose_min_round_time(&self) -> Duration { + self.inner.config.propose_min_round_time + } + + fn propose_max_round_time(&self) -> Duration { + self.inner.config.propose_max_round_time + } + + fn max_transactions(&self) -> NonZeroUsize { + self.inner.config.max_transactions + } + + fn min_transactions(&self) -> usize { + self.inner.config.min_transactions + } + + /// Generates and encodes a vote token + + async fn should_start_round(&self, _: TYPES::Time) -> bool { + false + } + + async fn send_event(&self, event: Event) { + debug!(?event, "send_event"); + let mut event_sender = self.inner.event_sender.write().await; + if let Some(sender) = &*event_sender { + if let Err(e) = sender.send_async(event).await { + error!(?e, "Could not send event to event_sender"); + *event_sender = None; + } + } + } + + fn public_key(&self) -> &TYPES::SignatureKey { + &self.inner.public_key + } + + fn private_key(&self) -> &::PrivateKey { + &self.inner.private_key + } + + async fn store_leaf( + &self, + old_anchor_view: TYPES::Time, + leaf: I::Leaf, + ) -> std::result::Result<(), hotshot_types::traits::storage::StorageError> { + let view_to_insert = StoredView::from(leaf); + let storage = &self.inner.storage; + storage.append_single_view(view_to_insert).await?; + storage.cleanup_storage_up_to_view(old_anchor_view).await?; + storage.commit().await?; + Ok(()) + } +} + +/// A handle that exposes the interface that hotstuff needs to interact with [`HotShot`] +#[derive(Clone, Debug)] +pub struct HotShotSequencingConsensusApi> { + /// Reference to the [`SystemContextInner`] + pub inner: Arc>, +} + +#[async_trait] +impl> ConsensusSharedApi + for HotShotSequencingConsensusApi +{ + fn total_nodes(&self) -> NonZeroUsize { + self.inner.config.total_nodes + } + + fn propose_min_round_time(&self) -> Duration { + self.inner.config.propose_min_round_time + } + + fn propose_max_round_time(&self) -> Duration { + self.inner.config.propose_max_round_time + } + + fn max_transactions(&self) -> NonZeroUsize { + self.inner.config.max_transactions + } + + fn min_transactions(&self) -> usize { + self.inner.config.min_transactions + } + + /// Generates and encodes a vote token + + async fn should_start_round(&self, _: TYPES::Time) -> bool { + false + } + + async fn send_event(&self, event: Event) { + debug!(?event, "send_event"); + let mut event_sender = self.inner.event_sender.write().await; + if let Some(sender) = &*event_sender { + if let Err(e) = sender.send_async(event).await { + error!(?e, "Could not send event to event_sender"); + *event_sender = None; + } + } + } + + fn public_key(&self) -> &TYPES::SignatureKey { + &self.inner.public_key + } + + fn private_key(&self) -> &::PrivateKey { + &self.inner.private_key + } + + async fn store_leaf( + &self, + old_anchor_view: TYPES::Time, + leaf: I::Leaf, + ) -> std::result::Result<(), hotshot_types::traits::storage::StorageError> { + let view_to_insert = StoredView::from(leaf); + let storage = &self.inner.storage; + storage.append_single_view(view_to_insert).await?; + storage.cleanup_storage_up_to_view(old_anchor_view).await?; + storage.commit().await?; + Ok(()) + } +} + +#[async_trait] +impl< + TYPES: NodeType, + I: NodeImplementation>, + > SequencingConsensusApi for HotShotSequencingConsensusApi +{ + async fn send_direct_message< + PROPOSAL: ProposalType, + VOTE: VoteType, + >( + &self, + recipient: TYPES::SignatureKey, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError> { + let inner = self.inner.clone(); + debug!(?message, ?recipient, "send_direct_message"); + async_spawn_local(async move { + inner + .exchanges + .quorum_exchange() + .network() + .direct_message( + Message { + sender: inner.public_key.clone(), + kind: MessageKind::from_consensus_message(message), + _phantom: PhantomData, + }, + recipient, + ) + .await + }); + Ok(()) + } + + async fn send_direct_da_message< + PROPOSAL: ProposalType, + VOTE: VoteType, + >( + &self, + recipient: TYPES::SignatureKey, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError> { + let inner = self.inner.clone(); + debug!(?message, ?recipient, "send_direct_message"); + async_spawn_local(async move { + inner + .exchanges + .committee_exchange() + .network() + .direct_message( + Message { + sender: inner.public_key.clone(), + kind: MessageKind::from_consensus_message(message), + _phantom: PhantomData, + }, + recipient, + ) + .await + }); + Ok(()) + } + + // TODO (DA) Refactor ConsensusApi and HotShot to use SystemContextInner directly. + // + async fn send_broadcast_message< + PROPOSAL: ProposalType, + VOTE: VoteType, + >( + &self, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError> { + debug!(?message, "send_broadcast_message"); + self.inner + .exchanges + .quorum_exchange() + .network() + .broadcast_message( + Message { + sender: self.inner.public_key.clone(), + kind: MessageKind::from_consensus_message(message), + _phantom: PhantomData, + }, + &self.inner.exchanges.quorum_exchange().membership().clone(), + ) + .await?; + Ok(()) + } + + async fn send_da_broadcast( + &self, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError> { + debug!(?message, "send_da_broadcast_message"); + self.inner + .exchanges + .committee_exchange() + .network() + .broadcast_message( + Message { + sender: self.inner.public_key.clone(), + kind: MessageKind::from_consensus_message(message), + _phantom: PhantomData, + }, + &self + .inner + .exchanges + .committee_exchange() + .membership() + .clone(), + ) + .await?; + Ok(()) + } + + async fn send_transaction( + &self, + message: DataMessage, + ) -> std::result::Result<(), NetworkError> { + debug!(?message, "send_broadcast_message"); + let api = self.clone(); + async_spawn(async move { + let _result = api + .inner + .exchanges + .committee_exchange() + .network() + .broadcast_message( + Message { + sender: api.inner.public_key.clone(), + kind: MessageKind::from(message), + _phantom: PhantomData, + }, + &api.inner + .exchanges + .committee_exchange() + .membership() + .clone(), + ) + .await; + }); + Ok(()) + } +} + +/// initializer struct for creating starting block +pub struct HotShotInitializer> { + /// the leaf specified initialization + inner: LEAF, +} + +impl> HotShotInitializer { + /// initialize from genesis + /// # Errors + /// If we are unable to apply the genesis block to the default state + pub fn from_genesis(genesis_block: TYPES::BlockType) -> Result> { + let state = TYPES::StateType::default() + .append(&genesis_block, &TYPES::Time::new(0)) + .map_err(|err| HotShotError::Misc { + context: err.to_string(), + })?; + let time = TYPES::Time::genesis(); + let justify_qc = QuorumCertificate::::genesis(); + + Ok(Self { + inner: LEAF::new(time, justify_qc, genesis_block, state), + }) + } + + /// reload previous state based on most recent leaf + pub fn from_reload(anchor_leaf: LEAF) -> Self { + Self { inner: anchor_leaf } + } +} diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs new file mode 100644 index 0000000000..55e430cbe4 --- /dev/null +++ b/hotshot/src/tasks/mod.rs @@ -0,0 +1,565 @@ +//! Provides a number of tasks that run continuously on a [`HotShot`] + +use crate::{ + async_spawn, types::SystemContextHandle, DACertificate, HotShotSequencingConsensusApi, + QuorumCertificate, SequencingQuorumEx, SystemContext, +}; +use async_compatibility_layer::art::{async_sleep, async_spawn_local}; +use futures::FutureExt; +use hotshot_task::{ + boxed_sync, + event_stream::ChannelStream, + task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes}, + task_impls::TaskBuilder, + task_launcher::TaskRunner, + GeneratedStream, Merge, +}; +use hotshot_task_impls::{ + consensus::{consensus_event_filter, ConsensusTaskTypes, SequencingConsensusTaskState}, + da::{DATaskState, DATaskTypes}, + events::SequencingHotShotEvent, + network::{ + NetworkEventTaskState, NetworkEventTaskTypes, NetworkMessageTaskState, + NetworkMessageTaskTypes, NetworkTaskKind, + }, + view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, +}; +use hotshot_types::{ + certificate::ViewSyncCertificate, + constants::LOOK_AHEAD, + data::{ProposalType, QuorumProposal, SequencingLeaf}, + event::Event, + message::{Message, Messages, SequencingMessage}, + traits::{ + election::{ConsensusExchange, Membership}, + network::{CommunicationChannel, TransmitType}, + node_implementation::{ + CommitteeEx, ExchangesType, NodeImplementation, NodeType, ViewSyncEx, + }, + state::ConsensusTime, + Block, + }, + vote::{ViewSyncData, VoteType}, +}; +use std::{ + collections::HashMap, + marker::PhantomData, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; +use tracing::info; + +/// Task to look up a node in the future as needed +pub async fn network_lookup_task>( + hotshot: SystemContext, + shut_down: Arc, +) { + info!("Launching network lookup task"); + let networking = hotshot.inner.exchanges.quorum_exchange().network().clone(); + + let inner = hotshot.inner.clone(); + + let mut completion_map: HashMap> = HashMap::default(); + + while !shut_down.load(Ordering::Relaxed) { + let lock = hotshot.inner.recv_network_lookup.lock().await; + + if let Ok(Some(cur_view)) = lock.recv().await { + let view_to_lookup = cur_view + LOOK_AHEAD; + + // perform pruning + // TODO in the future btreemap would be better + completion_map = completion_map + .drain() + .filter(|(view, is_done)| { + if !is_done.load(Ordering::Relaxed) { + // we are past the view where this is useful + if cur_view >= *view { + is_done.store(true, Ordering::Relaxed); + return true; + } + // we aren't done + return false; + } + true + }) + .collect(); + + // logic to look ahead + if !inner.exchanges.quorum_exchange().is_leader(view_to_lookup) { + let is_done = Arc::new(AtomicBool::new(false)); + completion_map.insert(view_to_lookup, is_done.clone()); + let inner = inner.clone(); + let networking = networking.clone(); + async_spawn_local(async move { + info!("starting lookup for {:?}", view_to_lookup); + let _result = networking + .lookup_node(inner.exchanges.quorum_exchange().get_leader(view_to_lookup)) + .await; + info!("finished lookup for {:?}", view_to_lookup); + }); + } + } + } + + // shut down all child tasks + for (_, is_done) in completion_map { + is_done.store(true, Ordering::Relaxed); + } +} + +/// event for global event stream +#[derive(Clone, Debug)] +pub enum GlobalEvent { + /// shut everything down + Shutdown, + /// dummy (TODO delete later) + Dummy, +} + +/// Add the network task to handle messages and publish events. +/// # Panics +/// Is unable to panic. This section here is just to satisfy clippy +pub async fn add_network_message_task< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + EXCHANGE: ConsensusExchange< + TYPES, + Message, + Proposal = PROPOSAL, + Vote = VOTE, + Membership = MEMBERSHIP, + > + 'static, +>( + task_runner: TaskRunner, + event_stream: ChannelStream>, + exchange: EXCHANGE, +) -> TaskRunner +// This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. +where + EXCHANGE::Networking: + CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, +{ + let channel = exchange.network().clone(); + let broadcast_stream = GeneratedStream::>::new(Arc::new(move || { + let network = channel.clone(); + let closure = async move { + loop { + let msgs = Messages( + network + .recv_msgs(TransmitType::Broadcast) + .await + .expect("Failed to receive broadcast messages"), + ); + if msgs.0.is_empty() { + async_sleep(Duration::new(0, 500)).await; + } else { + break msgs; + } + } + }; + Some(boxed_sync(closure)) + })); + let channel = exchange.network().clone(); + let direct_stream = GeneratedStream::>::new(Arc::new(move || { + let network = channel.clone(); + let closure = async move { + loop { + let msgs = Messages( + network + .recv_msgs(TransmitType::Direct) + .await + .expect("Failed to receive direct messages"), + ); + if msgs.0.is_empty() { + async_sleep(Duration::new(0, 500)).await; + } else { + break msgs; + } + } + }; + Some(boxed_sync(closure)) + })); + let message_stream = Merge::new(broadcast_stream, direct_stream); + let network_state: NetworkMessageTaskState<_, _> = NetworkMessageTaskState { + event_stream: event_stream.clone(), + }; + let registry = task_runner.registry.clone(); + let network_message_handler = HandleMessage(Arc::new( + move |messages: either::Either, Messages>, + mut state: NetworkMessageTaskState| { + let messages = match messages { + either::Either::Left(messages) | either::Either::Right(messages) => messages, + }; + async move { + state.handle_messages(messages.0).await; + (None, state) + } + .boxed() + }, + )); + let networking_name = "Networking Task"; + + let networking_task_builder = + TaskBuilder::>::new(networking_name.to_string()) + .register_message_stream(message_stream) + .register_registry(&mut registry.clone()) + .await + .register_state(network_state) + .register_message_handler(network_message_handler); + + // impossible for unwraps to fail + // we *just* registered + let networking_task_id = networking_task_builder.get_task_id().unwrap(); + let networking_task = NetworkMessageTaskTypes::build(networking_task_builder).launch(); + + task_runner.add_task( + networking_task_id, + networking_name.to_string(), + networking_task, + ) +} + +/// Add the network task to handle events and send messages. +/// # Panics +/// Is unable to panic. This section here is just to satisfy clippy +pub async fn add_network_event_task< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + EXCHANGE: ConsensusExchange< + TYPES, + Message, + Proposal = PROPOSAL, + Vote = VOTE, + Membership = MEMBERSHIP, + > + 'static, +>( + task_runner: TaskRunner, + event_stream: ChannelStream>, + exchange: EXCHANGE, + task_kind: NetworkTaskKind, +) -> TaskRunner +// This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. +where + EXCHANGE::Networking: + CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, +{ + let filter = NetworkEventTaskState::< + TYPES, + I, + PROPOSAL, + VOTE, + MEMBERSHIP, + >::Networking, + >::filter(task_kind); + let channel = exchange.network().clone(); + let network_state: NetworkEventTaskState<_, _, _, _, _, _> = NetworkEventTaskState { + channel, + event_stream: event_stream.clone(), + view: TYPES::Time::genesis(), + phantom: PhantomData, + }; + let registry = task_runner.registry.clone(); + let network_event_handler = HandleEvent(Arc::new( + move |event, mut state: NetworkEventTaskState<_, _, _, _, MEMBERSHIP, _>| { + let membership = exchange.membership().clone(); + async move { + let completion_status = state.handle_event(event, &membership).await; + (completion_status, state) + } + .boxed() + }, + )); + let networking_name = "Networking Task"; + + let networking_task_builder = + TaskBuilder::>::new(networking_name.to_string()) + .register_event_stream(event_stream.clone(), filter) + .await + .register_registry(&mut registry.clone()) + .await + .register_state(network_state) + .register_event_handler(network_event_handler); + + // impossible for unwraps to fail + // we *just* registered + let networking_task_id = networking_task_builder.get_task_id().unwrap(); + let networking_task = NetworkEventTaskTypes::build(networking_task_builder).launch(); + + task_runner.add_task( + networking_task_id, + networking_name.to_string(), + networking_task, + ) +} + +/// add the consensus task +/// # Panics +/// Is unable to panic. This section here is just to satisfy clippy +pub async fn add_consensus_task< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +>( + task_runner: TaskRunner, + event_stream: ChannelStream>, + output_stream: ChannelStream>, + handle: SystemContextHandle, +) -> TaskRunner +where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + >, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + let consensus = handle.hotshot.get_consensus(); + let c_api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let registry = task_runner.registry.clone(); + // build the consensus task + let consensus_state = SequencingConsensusTaskState { + registry: registry.clone(), + consensus, + timeout: handle.hotshot.inner.config.next_view_timeout, + cur_view: TYPES::Time::new(0), + block: TYPES::BlockType::new(), + quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), + api: c_api.clone(), + committee_exchange: c_api.inner.exchanges.committee_exchange().clone().into(), + _pd: PhantomData, + vote_collector: None, + timeout_task: async_spawn(async move {}), + event_stream: event_stream.clone(), + output_event_stream: output_stream, + certs: HashMap::new(), + current_proposal: None, + id: handle.hotshot.inner.id, + qc: None, + }; + let filter = FilterEvent(Arc::new(consensus_event_filter)); + let consensus_name = "Consensus Task"; + let consensus_event_handler = HandleEvent(Arc::new( + move |event, + mut state: SequencingConsensusTaskState< + TYPES, + I, + HotShotSequencingConsensusApi, + >| { + async move { + if let SequencingHotShotEvent::Shutdown = event { + (Some(HotShotTaskCompleted::ShutDown), state) + } else { + state.handle_event(event).await; + (None, state) + } + } + .boxed() + }, + )); + let consensus_task_builder = TaskBuilder::< + ConsensusTaskTypes>, + >::new(consensus_name.to_string()) + .register_event_stream(event_stream.clone(), filter) + .await + .register_registry(&mut registry.clone()) + .await + .register_state(consensus_state) + .register_event_handler(consensus_event_handler); + // impossible for unwrap to fail + // we *just* registered + let consensus_task_id = consensus_task_builder.get_task_id().unwrap(); + let consensus_task = ConsensusTaskTypes::build(consensus_task_builder).launch(); + + task_runner.add_task( + consensus_task_id, + consensus_name.to_string(), + consensus_task, + ) +} + +/// add the Data Availability task +/// # Panics +/// Is unable to panic. This section here is just to satisfy clippy +pub async fn add_da_task< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +>( + task_runner: TaskRunner, + event_stream: ChannelStream>, + committee_exchange: CommitteeEx, + handle: SystemContextHandle, +) -> TaskRunner +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + // build the da task + let c_api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let registry = task_runner.registry.clone(); + let da_state = DATaskState { + registry: registry.clone(), + api: c_api.clone(), + consensus: handle.hotshot.get_consensus(), + cur_view: TYPES::Time::new(0), + committee_exchange: committee_exchange.into(), + vote_collector: None, + event_stream: event_stream.clone(), + id: handle.hotshot.inner.id, + }; + let da_event_handler = HandleEvent(Arc::new( + move |event, mut state: DATaskState>| { + async move { + let completion_status = state.handle_event(event).await; + (completion_status, state) + } + .boxed() + }, + )); + let da_name = "DA Task"; + let da_event_filter = FilterEvent(Arc::new( + DATaskState::>::filter, + )); + + let da_task_builder = TaskBuilder::< + DATaskTypes>, + >::new(da_name.to_string()) + .register_event_stream(event_stream.clone(), da_event_filter) + .await + .register_registry(&mut registry.clone()) + .await + .register_state(da_state) + .register_event_handler(da_event_handler); + // impossible for unwrap to fail + // we *just* registered + let da_task_id = da_task_builder.get_task_id().unwrap(); + let da_task = DATaskTypes::build(da_task_builder).launch(); + task_runner.add_task(da_task_id, da_name.to_string(), da_task) +} + +/// add the view sync task +/// # Panics +/// Is unable to panic. This section here is just to satisfy clippy +pub async fn add_view_sync_task< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +>( + task_runner: TaskRunner, + event_stream: ChannelStream>, + handle: SystemContextHandle, +) -> TaskRunner +where + ViewSyncEx: ConsensusExchange< + TYPES, + Message, + Proposal = ViewSyncCertificate, + Certificate = ViewSyncCertificate, + Commitment = ViewSyncData, + >, +{ + let api = HotShotSequencingConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + // build the view sync task + let view_sync_state = ViewSyncTaskState { + registry: task_runner.registry.clone(), + event_stream: event_stream.clone(), + current_view: TYPES::Time::new(0), + next_view: TYPES::Time::new(0), + exchange: (*api.inner.exchanges.view_sync_exchange()).clone().into(), + api, + num_timeouts_tracked: 0, + replica_task_map: HashMap::default(), + relay_task_map: HashMap::default(), + view_sync_timeout: Duration::new(5, 0), + id: handle.hotshot.inner.id, + last_garbage_collected_view: TYPES::Time::new(0), + }; + let registry = task_runner.registry.clone(); + let view_sync_event_handler = + HandleEvent(Arc::new( + move |event, + mut state: ViewSyncTaskState< + TYPES, + I, + HotShotSequencingConsensusApi, + >| { + async move { + if let SequencingHotShotEvent::Shutdown = event { + (Some(HotShotTaskCompleted::ShutDown), state) + } else { + state.handle_event(event).await; + (None, state) + } + } + .boxed() + }, + )); + let view_sync_name = "ViewSync Task"; + let view_sync_event_filter = FilterEvent(Arc::new( + ViewSyncTaskState::>::filter, + )); + + let view_sync_task_builder = TaskBuilder::< + ViewSyncTaskStateTypes>, + >::new(view_sync_name.to_string()) + .register_event_stream(event_stream.clone(), view_sync_event_filter) + .await + .register_registry(&mut registry.clone()) + .await + .register_state(view_sync_state) + .register_event_handler(view_sync_event_handler); + // impossible for unwrap to fail + // we *just* registered + let view_sync_task_id = view_sync_task_builder.get_task_id().unwrap(); + + let view_sync_task = ViewSyncTaskStateTypes::build(view_sync_task_builder).launch(); + task_runner.add_task( + view_sync_task_id, + view_sync_name.to_string(), + view_sync_task, + ) +} diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs new file mode 100644 index 0000000000..0f751912c5 --- /dev/null +++ b/hotshot/src/traits.rs @@ -0,0 +1,28 @@ +/// Sortition trait +pub mod election; +mod networking; +mod node_implementation; +mod storage; + +pub use hotshot_types::traits::{Block, State}; +pub use networking::{NetworkError, NetworkReliability}; +pub use node_implementation::{NodeImplementation, TestableNodeImplementation}; +pub use storage::{Result as StorageResult, Storage}; + +/// Module for publicly usable implementations of the traits +pub mod implementations { + pub use super::{ + networking::{ + libp2p_network::{Libp2pCommChannel, Libp2pNetwork, PeerInfoVec}, + memory_network::{DummyReliability, MasterMap, MemoryCommChannel, MemoryNetwork}, + web_server_libp2p_fallback::{CombinedNetworks, WebServerWithFallbackCommChannel}, + web_server_network::{WebCommChannel, WebServerNetwork}, + }, + storage::memory_storage::MemoryStorage, // atomic_storage::AtomicStorage, + }; +} + +/// Dummy testing implementations +pub mod dummy { + pub use hotshot_types::traits::state::dummy::DummyState; +} diff --git a/hotshot/src/traits/election.rs b/hotshot/src/traits/election.rs new file mode 100644 index 0000000000..b3753bbc09 --- /dev/null +++ b/hotshot/src/traits/election.rs @@ -0,0 +1,4 @@ +//! elections used for consensus + +/// static (round robin) committee election +pub mod static_committee; diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs new file mode 100644 index 0000000000..2c95746d4b --- /dev/null +++ b/hotshot/src/traits/election/static_committee.rs @@ -0,0 +1,192 @@ +// use ark_bls12_381::Parameters as Param381; +use commit::{Commitment, Committable, RawCommitmentBuilder}; +use espresso_systems_common::hotshot::tag; +use hotshot_signature_key::bn254::BN254Pub; +use hotshot_types::{ + data::LeafType, + traits::{ + election::{Checked, ElectionConfig, ElectionError, Membership, VoteToken}, + node_implementation::NodeType, + signature_key::{EncodedSignature, SignatureKey}, + }, +}; +#[allow(deprecated)] +use serde::{Deserialize, Serialize}; +use std::{marker::PhantomData, num::NonZeroU64}; +use tracing::debug; + +/// Dummy implementation of [`Membership`] + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct GeneralStaticCommittee, PUBKEY: SignatureKey> { + /// All the nodes participating + nodes: Vec, + /// All the nodes participating and their stake + nodes_with_stake: Vec, + /// The nodes on the static committee + committee_nodes: Vec, + /// The nodes on the static committee and their stake + committee_nodes_with_stake: Vec, + /// Node type phantom + _type_phantom: PhantomData, + /// Leaf phantom + _leaf_phantom: PhantomData, +} + +/// static committee using a vrf kp +pub type StaticCommittee = GeneralStaticCommittee; + +impl, PUBKEY: SignatureKey> + GeneralStaticCommittee +{ + /// Creates a new dummy elector + #[must_use] + pub fn new(nodes: Vec, nodes_with_stake: Vec) -> Self { + Self { + nodes: nodes.clone(), + nodes_with_stake: nodes_with_stake.clone(), + committee_nodes: nodes, + committee_nodes_with_stake: nodes_with_stake, + _type_phantom: PhantomData, + _leaf_phantom: PhantomData, + } + } +} + +#[derive(Serialize, Deserialize, Clone, Debug, Hash, PartialEq, Eq)] +#[serde(bound(deserialize = ""))] +/// Vote token for a static committee +pub struct StaticVoteToken { + /// signature + signature: EncodedSignature, + /// public key + pub_key: K, +} + +impl VoteToken for StaticVoteToken { + fn vote_count(&self) -> NonZeroU64 { + NonZeroU64::new(1).unwrap() + } +} + +impl Committable for StaticVoteToken { + fn commit(&self) -> Commitment { + RawCommitmentBuilder::new("StaticVoteToken") + .var_size_field("signature", &self.signature.0) + .var_size_field("pub_key", &self.pub_key.to_bytes().0) + .finalize() + } + + fn tag() -> String { + tag::STATIC_VOTE_TOKEN.to_string() + } +} + +/// configuration for static committee. stub for now +#[derive(Default, Clone, Serialize, Deserialize, core::fmt::Debug)] +pub struct StaticElectionConfig { + /// Number of nodes on the committee + num_nodes: u64, +} + +impl ElectionConfig for StaticElectionConfig {} + +impl, PUBKEY: SignatureKey + 'static> Membership + for GeneralStaticCommittee +where + TYPES: NodeType< + SignatureKey = PUBKEY, + VoteTokenType = StaticVoteToken, + ElectionConfigType = StaticElectionConfig, + >, +{ + /// Clone the public key and corresponding stake table for current elected committee + fn get_committee_qc_stake_table(&self) -> Vec { + self.committee_nodes_with_stake.clone() + } + + /// Index the vector of public keys with the current view number + fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { + let index = (*view_number % self.nodes.len() as u64) as usize; + self.nodes[index].clone() + } + + /// Simply make the partial signature + fn make_vote_token( + &self, + view_number: TYPES::Time, + private_key: &::PrivateKey, + ) -> std::result::Result>, ElectionError> { + // TODO ED Below + let pub_key = PUBKEY::from_private(private_key); + if !self.committee_nodes.contains(&pub_key) { + return Ok(None); + } + let mut message: Vec = vec![]; + message.extend(view_number.to_le_bytes()); + // Change the length from 8 to 32 to make it consistent with other commitments, use defined constant? instead of 32. + message.extend_from_slice(&[0u8; 32 - 8]); + let signature = PUBKEY::sign(private_key, &message); + Ok(Some(StaticVoteToken { signature, pub_key })) + } + + fn validate_vote_token( + &self, + pub_key: PUBKEY, + token: Checked, + ) -> Result, ElectionError> { + match token { + Checked::Valid(t) | Checked::Unchecked(t) => { + if self.committee_nodes.contains(&pub_key) { + Ok(Checked::Valid(t)) + } else { + Ok(Checked::Inval(t)) + } + } + Checked::Inval(t) => Ok(Checked::Inval(t)), + } + } + + fn default_election_config(num_nodes: u64) -> TYPES::ElectionConfigType { + StaticElectionConfig { num_nodes } + } + + fn create_election( + keys_qc: Vec, + keys: Vec, + config: TYPES::ElectionConfigType, + ) -> Self { + let mut committee_nodes = keys.clone(); + let mut committee_nodes_with_stake = keys_qc.clone(); + committee_nodes.truncate(config.num_nodes.try_into().unwrap()); + debug!("Election Membership Size: {}", config.num_nodes); + committee_nodes_with_stake.truncate(config.num_nodes.try_into().unwrap()); + Self { + nodes_with_stake: keys_qc, + nodes: keys, + committee_nodes, + committee_nodes_with_stake, + _type_phantom: PhantomData, + _leaf_phantom: PhantomData, + } + } + + fn total_nodes(&self) -> usize { + self.committee_nodes.len() + } + + fn success_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(((self.committee_nodes.len() as u64 * 2) / 3) + 1).unwrap() + } + + fn failure_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(((self.committee_nodes.len() as u64) / 3) + 1).unwrap() + } + + fn get_committee( + &self, + _view_number: ::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.committee_nodes.clone().into_iter().collect() + } +} diff --git a/hotshot/src/traits/election/vrf.rs b/hotshot/src/traits/election/vrf.rs new file mode 100644 index 0000000000..d1151e0ab2 --- /dev/null +++ b/hotshot/src/traits/election/vrf.rs @@ -0,0 +1,1024 @@ +use hotshot_types::traits::signature_key::EncodedPublicKey; + +#[allow(deprecated)] +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, fmt::Debug, marker::PhantomData, num::NonZeroU64}; + +// TODO wrong palce for this +/// the sortition committee size parameter +pub const SORTITION_PARAMETER: u64 = 100; + +// TODO compatibility this function's impl into a trait +// TODO do we necessariy want the units of stake to be a u64? or generics +/// The stake table for VRFs +#[derive(Serialize, Deserialize, Debug)] +pub struct VRFStakeTable { + /// the mapping of id -> stake + mapping: BTreeMap, + /// total stake present + total_stake: NonZeroU64, + /// PhantomData + _pd: PhantomData<(VRF, VRFHASHER, VRFPARAMS)>, +} + +impl Clone for VRFStakeTable { + fn clone(&self) -> Self { + Self { + mapping: self.mapping.clone(), + total_stake: self.total_stake, + _pd: PhantomData, + } + } +} + +// impl VRFStakeTable { +// /// get total stake +// #[must_use] +// pub fn get_all_stake(&self) -> NonZeroU64 { +// self.total_stake +// } +// } + +// impl VRFStakeTable +// where +// VRF: Vrf, +// VRFPARAMS: Bls12Parameters, +// ::G1Parameters: TEHashToGroup, +// VRF::PublicKey: Clone, +// { +// /// get total stake +// /// # Panics +// /// If converting non-zero stake into `NonZeroU64` fails +// pub fn get_stake(&self, pk: &JfPubKey) -> Option +// where +// SIGSCHEME: SignatureScheme< +// VerificationKey = VRF::PublicKey, +// PublicParameter = (), +// MessageUnit = u8, +// >, +// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, +// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, +// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, +// { +// let encoded = pk.to_bytes(); +// let stake = self.mapping.get(&encoded).map(|val| val.get()); +// stake.and_then(NonZeroU64::new) +// } +// } + +// /// the vrf implementation +// #[derive(Derivative)] +// #[derivative(Debug, Eq, PartialEq)] +// pub struct VrfImpl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> +// where +// VRF: Vrf + Sync + Send, +// TYPES: NodeType, +// { +// /// the stake table +// #[derivative(Debug = "ignore", PartialEq = "ignore")] +// stake_table: VRFStakeTable, +// /// the proof params +// #[derivative(Debug = "ignore", PartialEq = "ignore")] +// proof_parameters: VRF::PublicParameter, +// /// the rng +// #[derivative(PartialEq = "ignore")] +// prng: std::sync::Arc>, +// /// the committee parameter +// sortition_parameter: NonZeroU64, +// /// the chain commitment seed +// chain_seed: [u8; 32], +// /// pdf cache +// #[derivative(PartialEq = "ignore")] +// _sortition_cache: std::sync::Arc>>>, + +// /// phantom data +// _pd: PhantomData<(TYPES, LEAF, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS)>, +// } + +// impl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> Clone +// for VrfImpl +// where +// VRF: Vrf + Sync + Send, +// TYPES: NodeType, +// { +// fn clone(&self) -> Self { +// Self { +// stake_table: self.stake_table.clone(), +// proof_parameters: (), +// prng: self.prng.clone(), +// sortition_parameter: self.sortition_parameter, +// chain_seed: self.chain_seed, +// _sortition_cache: Arc::default(), +// _pd: PhantomData, +// } +// } +// } + +// /// TODO doc me +// #[derive(Serialize, Deserialize, Clone)] +// pub struct VRFVoteToken { +// /// The public key assocaited with this token +// pub pub_key: PUBKEY, +// /// The list of signatures +// pub proof: PROOF, +// /// The number of signatures that are valid +// /// TODO (ct) this should be the sorition outbput +// pub count: NonZeroU64, +// } + +// impl Hash for VRFVoteToken +// where +// PUBKEY: serde::Serialize, +// PROOF: serde::Serialize, +// { +// fn hash(&self, state: &mut H) { +// bincode_opts().serialize(&self.pub_key).unwrap().hash(state); +// bincode_opts().serialize(&self.proof).unwrap().hash(state); +// self.count.hash(state); +// } +// } + +// impl Debug for VRFVoteToken { +// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +// f.debug_struct("VRFVoteToken") +// .field("pub_key", &std::any::type_name::()) +// .field("proof", &std::any::type_name::()) +// .field("count", &self.count) +// .finish() +// } +// } + +// impl PartialEq for VRFVoteToken +// where +// PUBKEY: serde::Serialize, +// PROOF: serde::Serialize, +// { +// fn eq(&self, other: &Self) -> bool { +// self.count == other.count +// && bincode_opts().serialize(&self.pub_key).unwrap() +// == bincode_opts().serialize(&other.pub_key).unwrap() +// && bincode_opts().serialize(&self.proof).unwrap() +// == bincode_opts().serialize(&other.proof).unwrap() +// } +// } + +// impl VoteToken for VRFVoteToken +// where +// PUBKEY: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static, +// PROOF: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static, +// { +// fn vote_count(&self) -> NonZeroU64 { +// self.count +// } +// } + +// impl Committable for VRFVoteToken +// where +// PUBKEY: serde::Serialize, +// PROOF: serde::Serialize, +// { +// fn commit(&self) -> Commitment { +// RawCommitmentBuilder::new(std::any::type_name::()) +// .u64(self.count.get()) +// .var_size_bytes(bincode_opts().serialize(&self.pub_key).unwrap().as_slice()) +// .var_size_bytes(bincode_opts().serialize(&self.proof).unwrap().as_slice()) +// .finalize() +// } + +// fn tag() -> String { +// tag::VRF_VOTE_TOKEN.to_string() +// } +// } + +// // KEY is VRFPubKey +// impl> +// Membership for VrfImpl +// where +// SIGSCHEME: SignatureScheme + Sync + Send + 'static, +// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, +// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, +// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, +// VRF: Vrf< +// PublicParameter = (), +// Input = Vec, +// Output = Vec, +// PublicKey = SIGSCHEME::VerificationKey, +// SecretKey = SIGSCHEME::SigningKey, +// > + Sync +// + Send +// + 'static, +// VRF::Proof: Clone + Sync + Send + Serialize + for<'a> Deserialize<'a>, +// VRF::PublicParameter: Sync + Send, +// VRFHASHER: digest::Digest + Clone + Sync + Send + 'static, +// VRFPARAMS: Sync + Send + Bls12Parameters, +// ::G1Parameters: TEHashToGroup, +// TYPES: NodeType< +// VoteTokenType = VRFVoteToken, +// ElectionConfigType = VRFStakeTableConfig, +// SignatureKey = JfPubKey, +// >, +// { +// // pubkey -> unit of stake +// type StakeTable = VRFStakeTable; + +// // FIXED STAKE +// // just return the state +// fn get_stake_table( +// &self, +// _view_number: TYPES::Time, +// _state: &TYPES::StateType, +// ) -> Self::StakeTable { +// self.stake_table.clone() +// } + +// fn get_leader(&self, view_number: TYPES::Time) -> JfPubKey { +// // TODO fst2 (ct) this is round robin, we should make this dependent on +// // the VRF + some source of randomness + +// // TODO for now do by stake table of how much stake each +// // participant has +// let mapping = &self.stake_table.mapping; +// let index = ((*view_number) as usize) % mapping.len(); +// let encoded = mapping.keys().nth(index).unwrap(); +// SignatureKey::from_bytes(encoded).unwrap() +// } + +// // what this is doing: +// // - +// fn make_vote_token( +// // TODO see if we can make this take &mut self +// // because we're using a mutable prng +// &self, +// view_number: TYPES::Time, +// private_key: &(SIGSCHEME::SigningKey, SIGSCHEME::VerificationKey), +// ) -> Result, ElectionError> { +// let pub_key = JfPubKey::::from_native(private_key.1.clone()); +// let Some(replicas_stake) = self.stake_table.get_stake(&pub_key) else { return Ok(None) }; + +// let view_seed = generate_view_seed::(view_number, &self.chain_seed); + +// let proof = Self::internal_get_vrf_proof( +// &private_key.0, +// &self.proof_parameters, +// &mut self.prng.lock().unwrap(), +// &view_seed, +// )?; + +// let selected_stake = Self::internal_get_sortition_for_proof( +// &self.proof_parameters, +// &proof, +// self.stake_table.get_all_stake(), +// replicas_stake, +// self.sortition_parameter, +// ); + +// match selected_stake { +// Some(count) => { +// // TODO (ct) this can fail, return Result::Err +// let proof = VRF::prove( +// &self.proof_parameters, +// &private_key.0, +// &view_seed, +// &mut *self.prng.lock().unwrap(), +// ) +// .unwrap(); + +// Ok(Some(VRFVoteToken { +// pub_key: private_key.1.clone(), +// proof, +// count, +// })) +// } +// None => Ok(None), +// } +// } + +// fn validate_vote_token( +// &self, +// view_number: TYPES::Time, +// pub_key: JfPubKey, +// token: Checked, +// ) -> Result, ElectionError> { +// match token { +// Checked::Unchecked(token) => { +// let stake: Option = self.stake_table.get_stake(&pub_key); +// let view_seed = +// generate_view_seed::(view_number, &self.chain_seed); +// if let Some(stake) = stake { +// Self::internal_check_sortition( +// &pub_key.pk, +// &self.proof_parameters, +// &token.proof, +// self.stake_table.get_all_stake(), +// stake, +// self.sortition_parameter, +// token.count, +// &view_seed, +// ) +// .map(|c| match c { +// Checked::Inval(_) => Checked::Inval(token), +// Checked::Valid(_) => Checked::Valid(token), +// Checked::Unchecked(_) => Checked::Unchecked(token), +// }) +// } else { +// // TODO better error +// Err(ElectionError::StubError) +// } +// } +// already_checked => Ok(already_checked), +// } +// } + +// fn create_election(keys: Vec>, config: TYPES::ElectionConfigType) -> Self { +// // This all needs to be refactored. For one thing, having the stake table - even an initial +// // stake table - hardcoded like this is flat-out broken. This is, obviously, an artifact +// let genesis_seed = [0u8; 32]; +// VrfImpl::with_initial_stake(keys, &config, genesis_seed) +// } + +// fn default_election_config(num_nodes: u64) -> TYPES::ElectionConfigType { +// let mut stake = Vec::new(); +// let units_of_stake_per_node = NonZeroU64::new(100).unwrap(); +// for _ in 0..num_nodes { +// stake.push(units_of_stake_per_node); +// } +// VRFStakeTableConfig { +// sortition_parameter: NonZeroU64::new(SORTITION_PARAMETER).unwrap(), +// distribution: stake, +// } +// } + +// fn success_threshold(&self) -> NonZeroU64 { +// NonZeroU64::new(((u64::from(self.sortition_parameter) * 2) / 3) + 1).unwrap() +// } + +// fn failure_threshold(&self) -> NonZeroU64 { +// NonZeroU64::new(((u64::from(self.sortition_parameter)) / 3) + 1).unwrap() +// } +// /// TODO if we ever come back to using this, we'll need to change this +// /// this stub is incorrect as it stands right now +// fn get_committee( +// &self, +// _view_number: ::Time, +// ) -> std::collections::BTreeSet<::SignatureKey> { +// self.stake_table +// .mapping +// .keys() +// .clone() +// .filter_map(::SignatureKey::from_bytes) +// .collect() +// } +// } + +// /// checks that the expected aomunt of stake matches the VRF output +// /// TODO this can be optimized most likely +// fn check_bin_idx( +// expected_amount_of_stake: u64, +// replicas_stake: u64, +// total_stake: u64, +// sortition_parameter: u64, +// unnormalized_seed: &[u8; 32], +// cache: &mut HashMap>, +// ) -> Option { +// let bin_idx = find_bin_idx( +// replicas_stake, +// total_stake, +// sortition_parameter, +// unnormalized_seed, +// cache, +// ); +// bin_idx.map(|idx| idx == NonZeroU64::new(expected_amount_of_stake).unwrap()) +// } + +// /// generates the seed from algorand paper +// /// baseed on `view_number` and a constant as of now, but in the future will be other things +// /// this is a stop-gap +// fn generate_view_seed( +// view_number: TYPES::Time, +// vrf_seed: &[u8; 32], +// ) -> [u8; 32] { +// let mut hasher = HASHER::new(); +// hasher.update(vrf_seed); +// hasher.update(view_number.deref().to_le_bytes()); +// let mut output = [0u8; 32]; +// output.copy_from_slice(hasher.finalize().as_ref()); +// output +// } + +// /// represents a binomial query made by sortition +// /// `B(stake_attempt; replicas_stake; sortition_parameter / total_stake)` +// #[derive(Hash, Eq, PartialEq, Clone, Debug)] +// pub struct BinomialQuery { +// /// the number of heads +// stake_attempt: u64, +// /// the total number of coin flips +// replicas_stake: u64, +// /// the total amount of stake +// total_stake: u64, +// /// the sortition parameter +// sortition_parameter: u64, +// } + +// impl BinomialQuery { +// /// get the committee parameter +// /// for this query +// #[must_use] +// pub fn get_p(&self) -> Ratio { +// let sortition_parameter_big: BigUint = BigUint::from(self.sortition_parameter); +// let total_stake_big: BigUint = BigUint::from(self.total_stake); +// Ratio::new(sortition_parameter_big, total_stake_big) +// } +// } + +// #[instrument] +// fn calculate_threshold_from_cache( +// previous_calculation: Option<(BinomialQuery, Ratio)>, +// query: BinomialQuery, +// ) -> Option> { +// if let Some((previous_query, previous_result)) = previous_calculation { +// let expected_previous_query = BinomialQuery { +// stake_attempt: query.stake_attempt - 1, +// ..query +// }; +// if previous_query == expected_previous_query { +// let permutation = Ratio::new( +// BigUint::from(query.replicas_stake - query.stake_attempt + 1), +// BigUint::from(query.stake_attempt), +// ); +// let p = query.get_p(); +// assert!(p.numer() < p.denom()); +// let reciprocal = Ratio::recip(&(Ratio::from_integer(BigUint::from(1_u32)) - p.clone())); +// let result = previous_result * p * reciprocal * permutation; +// assert!(result.numer() < result.denom()); + +// return Some(result); +// } +// } +// calculate_threshold(query) +// } + +// // Calculates B(j; w; p) where B means bernoulli distribution. +// // That is: run w trials, with p probability of success for each trial, and return the probability +// // of j successes. +// // p = tau / W, where tau is the sortition parameter (controlling committee size) +// // this is the only usage of W and tau +// // +// // Translation: +// // stake_attempt: our guess at what the stake might be. This is j +// // replicas_stake: the units of stake owned by the replica. This is w +// // total_stake: the units of stake owned in total. This is W +// // sorition_parameter: the parameter controlling the committee size. This is tau +// // +// // TODO (ct) better error handling +// // returns none if one of our calculations fails +// // +// // TODO keep data around from last iteration so less calculation is needed +// // TODO test this "correct/simple" implementation against any optimized version +// #[instrument] +// // fn calculate_threshold(stake_attempt: u32, replicas_stake: u64, total_stake: u64, sortition_parameter: u64) -> Option> { +// fn calculate_threshold(query: BinomialQuery) -> Option> { +// let stake_attempt = query.stake_attempt; +// tracing::info!("Running calculate threshold"); +// // TODO (ct) better error handling +// if stake_attempt > query.replicas_stake { +// error!("j is larger than amount of stake we are allowed"); +// return None; +// } + +// let sortition_parameter_big: BigUint = BigUint::from(query.sortition_parameter); +// let total_stake_big: BigUint = BigUint::from(query.total_stake); +// let one_big = BigUint::from(1_u32); + +// // this is the p parameter for the bernoulli distribution +// let p = Ratio::new(sortition_parameter_big, total_stake_big); + +// assert!(p.numer() <= p.denom()); + +// info!("p is {p:?}"); + +// // number of tails in bernoulli +// let failed_num = query.replicas_stake - stake_attempt; + +// // TODO cancel things out (avoid calculating factorial) +// // TODO can just do division +// let num_permutations = Ratio::new( +// factorial(query.replicas_stake), +// factorial(stake_attempt) * factorial(failed_num), +// ); + +// info!("num permutations is {num_permutations:?}, failed_num is {failed_num:?}"); + +// let one = Ratio::from_integer(one_big); + +// // TODO can keep results from last try +// let result = num_permutations +// * (p.pow(i32::try_from(stake_attempt).ok()?) +// * (one - p).pow(i32::try_from(failed_num).ok()?)); + +// assert!(result.numer() < result.denom()); + +// info!("result is is {result:?}"); + +// Some(result) +// } + +// /// compute i! as a biguint +// fn factorial(mut i: u64) -> BigUint { +// if i == 0 { +// return BigUint::from(1u32); +// } + +// let mut result = BigUint::from(1u32); +// while i > 0 { +// result *= i; +// i -= 1; +// } +// result +// } + +// /// find the amount of stake we rolled. +// /// NOTE: in the future this requires a view numb +// /// Returns None if zero stake was rolled +// #[instrument] +// fn find_bin_idx( +// replicas_stake: u64, +// total_stake: u64, +// sortition_parameter: u64, +// unnormalized_seed: &[u8; 32], +// cache: &mut HashMap>, +// ) -> Option { +// let unnormalized_seed = BigUint::from_bytes_le(unnormalized_seed); +// let normalized_seed = Ratio::new(unnormalized_seed, BigUint::from(2_u32).pow(256)); +// assert!(normalized_seed.numer() < normalized_seed.denom()); +// let mut j: u64 = 0; + +// // [j, j+1) +// // [cdf(j),cdf(j+1)) + +// // left_threshold corresponds to the sum of all bernoulli distributions +// // from i in 0 to j: B(i; replicas_stake; p). Where p is calculated later and corresponds to +// // algorands paper +// let mut left_threshold = Ratio::from_integer(BigUint::from(0u32)); + +// loop { +// // check cache + +// // if cache miss, feed in with previous val from cache +// // that *probably* exists + +// assert!(left_threshold.numer() < left_threshold.denom()); +// let query = BinomialQuery { +// stake_attempt: j + 1, +// replicas_stake, +// total_stake, +// sortition_parameter, +// }; + +// let bin_val = { +// // we already computed this value +// if let Some(result) = cache.get(&query) { +// result.clone() +// } else { +// // we haven't computed this value, but maybe +// // we already computed the previous value + +// let mut maybe_old_query = query.clone(); +// maybe_old_query.stake_attempt -= 1; +// let old_result = cache +// .get(&maybe_old_query) +// .map(|x| (maybe_old_query, x.clone())); +// let result = calculate_threshold_from_cache(old_result, query.clone())?; +// cache.insert(query, result.clone()); +// result +// } +// }; + +// // corresponds to right range from apper +// let right_threshold = left_threshold + bin_val.clone(); + +// // debugging info. Unnecessary +// { +// let right_threshold_float = ToPrimitive::to_f64(&right_threshold.clone()); +// let bin_val_float = ToPrimitive::to_f64(&bin_val.clone()); +// let normalized_seed_float = ToPrimitive::to_f64(&normalized_seed.clone()); +// info!("rightthreshold: {right_threshold_float:?}, bin: {bin_val_float:?}, seed: {normalized_seed_float:?}"); +// } + +// // from i in 0 to j + 1: B(i; replicas_stake; p) +// if normalized_seed < right_threshold { +// match j { +// 0 => return None, +// _ => return Some(NonZeroU64::new(j).unwrap()), +// } +// } +// left_threshold = right_threshold; +// j += 1; +// } +// } + +// impl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> +// VrfImpl +// where +// SIGSCHEME: SignatureScheme + Sync + Send, +// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, +// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, +// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, +// VRF: Vrf< +// PublicParameter = (), +// Input = [u8; 32], +// Output = [u8; 32], +// PublicKey = SIGSCHEME::VerificationKey, +// SecretKey = SIGSCHEME::SigningKey, +// > + Sync +// + Send, +// VRF::Proof: Clone + Sync + Send + Serialize + for<'a> Deserialize<'a>, +// VRF::PublicParameter: Sync + Send, +// VRFHASHER: digest::Digest + Clone + Sync + Send, +// VRFPARAMS: Sync + Send + Bls12Parameters, +// ::G1Parameters: TEHashToGroup, +// TYPES: NodeType, +// { +// /// create stake table with this initial stake +// /// # Panics +// /// TODO +// #[must_use] +// pub fn with_initial_stake( +// known_nodes: Vec>, +// config: &VRFStakeTableConfig, +// genesis_seed: [u8; 32], +// ) -> Self { +// assert_eq!(known_nodes.iter().len(), config.distribution.len()); +// let key_with_stake = known_nodes +// .into_iter() +// .map(|x| x.to_bytes()) +// .zip(config.distribution.clone()) +// .collect(); +// VrfImpl { +// stake_table: { +// let st = VRFStakeTable { +// mapping: key_with_stake, +// total_stake: NonZeroU64::new(config.distribution.iter().map(|x| x.get()).sum()) +// .unwrap(), +// _pd: PhantomData, +// }; +// st +// }, +// proof_parameters: (), +// chain_seed: genesis_seed, +// prng: Arc::new(Mutex::new(ChaChaRng::from_seed(Default::default()))), +// _pd: PhantomData, +// sortition_parameter: config.sortition_parameter, +// _sortition_cache: Arc::default(), +// } +// } + +// /// stateless delegate for VRF proof generation +// /// # Errors +// /// + +// fn internal_get_vrf_proof( +// private_key: &SIGSCHEME::SigningKey, +// proof_param: &VRF::PublicParameter, +// to_refactor: &mut rand_chacha::ChaChaRng, +// vrf_in_seed: &VRF::Input, +// ) -> Result { +// VRF::prove(proof_param, private_key, vrf_in_seed, to_refactor) +// .map_err(|_| ElectionError::StubError) +// } + +// /// stateless delegate for VRF sortition generation +// fn internal_get_sortition_for_proof( +// proof_param: &VRF::PublicParameter, +// proof: &VRF::Proof, +// total_stake: NonZeroU64, +// voter_stake: NonZeroU64, +// sortition_parameter: NonZeroU64, +// ) -> Option { +// // TODO (ct) this can fail, return result::err +// let hash = VRF::evaluate(proof_param, proof).unwrap(); +// let mut cache: HashMap> = HashMap::new(); + +// find_bin_idx( +// u64::from(voter_stake), +// u64::from(total_stake), +// sortition_parameter.into(), +// &hash, +// &mut cache, +// ) +// } + +// /// stateless delegate for VRF sortition confirmation +// /// # Errors +// /// if the proof is malformed +// #[allow(clippy::too_many_arguments)] +// fn internal_check_sortition( +// public_key: &SIGSCHEME::VerificationKey, +// proof_param: &VRF::PublicParameter, +// proof: &VRF::Proof, +// total_stake: NonZeroU64, +// voter_stake: NonZeroU64, +// sortition_parameter: NonZeroU64, +// sortition_claim: NonZeroU64, +// vrf_in_seed: &VRF::Input, +// ) -> Result, hotshot_types::traits::election::ElectionError> { +// if let Ok(true) = VRF::verify(proof_param, proof, public_key, vrf_in_seed) { +// let seed = VRF::evaluate(proof_param, proof).map_err(|_| ElectionError::StubError)?; +// if let Some(res) = check_bin_idx( +// u64::from(sortition_claim), +// u64::from(voter_stake), +// u64::from(total_stake), +// u64::from(sortition_parameter), +// &seed, +// &mut HashMap::new(), +// ) { +// if res { +// Ok(Checked::Valid(())) +// } else { +// Ok(Checked::Inval(())) +// } +// } else { +// Ok(Checked::Unchecked(())) +// } +// } else { +// Ok(Checked::Inval(())) +// } +// } + +// /// Stateless method to produce VRF proof and sortition for a given view number +// /// # Errors +// /// +// pub fn get_sortition_proof( +// private_key: &SIGSCHEME::SigningKey, +// proof_param: &VRF::PublicParameter, +// chain_seed: &VRF::Input, +// view_number: TYPES::Time, +// total_stake: NonZeroU64, +// voter_stake: NonZeroU64, +// sortition_parameter: NonZeroU64, +// ) -> Result<(VRF::Proof, Option), hotshot_types::traits::election::ElectionError> +// { +// let mut rng = ChaChaRng::from_seed(Default::default()); // maybe use something else that isn't deterministic? +// let view_seed = generate_view_seed::(view_number, chain_seed); +// let proof = Self::internal_get_vrf_proof(private_key, proof_param, &mut rng, &view_seed)?; +// let sortition = Self::internal_get_sortition_for_proof( +// proof_param, +// &proof, +// total_stake, +// voter_stake, +// sortition_parameter, +// ); +// Ok((proof, sortition)) +// } + +// /// Stateless method to verify VRF proof and sortition for a given view number +// /// # Errors +// /// +// #[allow(clippy::too_many_arguments)] +// pub fn check_sortition_proof( +// public_key: &JfPubKey, +// proof_param: &VRF::PublicParameter, +// proof: &VRF::Proof, +// total_stake: NonZeroU64, +// voter_stake: NonZeroU64, +// sortition_parameter: NonZeroU64, +// sortition_claim: NonZeroU64, +// chain_seed: &VRF::Input, +// view_number: TYPES::Time, +// ) -> Result { +// let view_seed = generate_view_seed::(view_number, chain_seed); +// Self::internal_check_sortition( +// &public_key.pk, +// proof_param, +// proof, +// total_stake, +// voter_stake, +// sortition_parameter, +// sortition_claim, +// &view_seed, +// ) +// .map(|c| matches!(c, Checked::Valid(_))) +// } +// } + +// impl> TestableElection +// for VrfImpl +// where +// TYPES: NodeType< +// VoteTokenType = VRFVoteToken< +// BLSVerKey, +// BLSSignature, +// >, +// ElectionConfigType = VRFStakeTableConfig, +// SignatureKey = JfPubKey, +// >, +// { +// fn generate_test_vote_token() -> TYPES::VoteTokenType { +// VRFVoteToken { +// count: NonZeroU64::new(1234).unwrap(), +// proof: BLSSignature::default(), +// pub_key: BLSVerKey::default(), +// } +// } +// } + +// /// configuration specifying the stake table +// #[derive(Clone, Serialize, Deserialize, core::fmt::Debug)] +// pub struct VRFStakeTableConfig { +// /// the committee size parameter +// pub sortition_parameter: NonZeroU64, +// /// the ordered distribution of stake across nodes +// pub distribution: Vec, +// } + +// impl Default for VRFStakeTableConfig { +// fn default() -> Self { +// VRFStakeTableConfig { +// sortition_parameter: NonZeroU64::new(SORTITION_PARAMETER).unwrap(), +// distribution: Vec::new(), +// } +// } +// } + +// impl ElectionConfig for VRFStakeTableConfig {} + +// Tests have been commented out, so `mod tests` isn't used. +// #[cfg(test)] +// mod tests { +// use super::*; +// use ark_bls12_381::Parameters as Param381; +// use ark_std::test_rng; + +// use blake3::Hasher; +// use hotshot_types::{ +// data::ViewNumber, +// traits::{ +// block_contents::dummy::{DummyBlock, DummyTransaction}, +// consensus_type::validating_consensus::ValidatingConsensus, +// state::dummy::DummyState, +// }, +// }; +// use jf_primitives::{ +// signatures::{ +// bls::{BLSSignature, BLSVerKey}, +// BLSSignatureScheme, +// }, +// vrf::blsvrf::BLSVRFScheme, +// }; +// use std::{num::NonZeroUsize, time::Duration}; + +// #[derive( +// Copy, +// Clone, +// Debug, +// Default, +// Hash, +// PartialEq, +// Eq, +// PartialOrd, +// Ord, +// serde::Serialize, +// serde::Deserialize, +// )] +// struct TestTypes; +// impl NodeType for TestTypes { +// // TODO (da) can this be SequencingConsensus? +// type ConsensusType = ValidatingConsensus; +// type Time = ViewNumber; +// type BlockType = DummyBlock; +// type SignatureKey = JfPubKey; +// type VoteTokenType = VRFVoteToken< +// BLSVerKey, +// BLSSignature, +// >; +// type Transaction = DummyTransaction; +// type ElectionConfigType = VRFStakeTableConfig; +// type StateType = DummyState; +// } + +// fn gen_vrf_impl>( +// num_nodes: usize, +// ) -> ( +// VrfImpl< +// TestTypes, +// LEAF, +// BLSSignatureScheme, +// BLSVRFScheme, +// Hasher, +// Param381, +// >, +// Vec<( +// jf_primitives::signatures::bls::BLSSignKey, +// jf_primitives::signatures::bls::BLSVerKey, +// )>, +// ) { +// let mut known_nodes = Vec::new(); +// let mut keys = Vec::new(); +// let rng = &mut test_rng(); +// let mut stake_distribution = Vec::new(); +// let stake_per_node = NonZeroU64::new(100).unwrap(); +// let genesis_seed = [0u8; 32]; +// for _i in 0..num_nodes { +// let (sk, pk) = BLSSignatureScheme::::key_gen(&(), rng).unwrap(); +// keys.push((sk.clone(), pk.clone())); +// known_nodes.push(JfPubKey::from_native(pk.clone())); +// stake_distribution.push(stake_per_node); +// } +// let stake_table = VrfImpl::with_initial_stake( +// known_nodes, +// &VRFStakeTableConfig { +// sortition_parameter: std::num::NonZeroU64::new(SORTITION_PARAMETER).unwrap(), +// distribution: stake_distribution, +// }, +// genesis_seed, +// ); +// (stake_table, keys) +// } + +// pub fn check_if_valid(token: &Checked) -> bool { +// match token { +// Checked::Valid(_) => true, +// Checked::Inval(_) | Checked::Unchecked(_) => false, +// } +// } + +// // #[test] +// // pub fn test_sortition() { +// // setup_logging(); +// // let (vrf_impl, keys) = gen_vrf_impl::>(10); +// // let views = 100; + +// // for view in 0..views { +// // for (node_idx, (sk, pk)) in keys.iter().enumerate() { +// // let token_result = vrf_impl +// // .make_vote_token(ViewNumber::new(view), &(sk.clone(), pk.clone())) +// // .unwrap(); +// // match token_result { +// // Some(token) => { +// // let count = token.count; +// // let result = vrf_impl +// // .validate_vote_token( +// // ViewNumber::new(view), +// // JfPubKey::from_native(pk.clone()), +// // Checked::Unchecked(token), +// // ) +// // .unwrap(); +// // let result_is_valid = check_if_valid(&result); +// // error!("view {view:?}, node_idx {node_idx:?}, stake {count:?} "); +// // assert!(result_is_valid); +// // } +// // _ => continue, +// // } +// // } +// // } +// // } + +// #[test] +// pub fn test_factorial() { +// assert_eq!(factorial(0), BigUint::from(1u32)); +// assert_eq!(factorial(1), BigUint::from(1u32)); +// assert_eq!(factorial(2), BigUint::from(2u32)); +// assert_eq!(factorial(3), BigUint::from(6u32)); +// assert_eq!(factorial(4), BigUint::from(24u32)); +// assert_eq!(factorial(5), BigUint::from(120u32)); +// } + +// // TODO add failure case + +// #[test] +// fn network_config_is_serializable() { +// // validate that `RunResults` can be serialized +// // Note that there is currently an issue with `VRFPubKey` where it can't be serialized with toml +// // so instead we only test with serde_json +// let key = +// as TestableSignatureKey>::generate_test_key(1); +// let pub_key = JfPubKey::::from_private(&key); +// let mut config = hotshot_centralized_server::NetworkConfig { +// config: hotshot_types::HotShotConfig { +// election_config: Some(super::VRFStakeTableConfig { +// distribution: vec![NonZeroU64::new(1).unwrap()], +// sortition_parameter: NonZeroU64::new(1).unwrap(), +// }), +// known_nodes: vec![pub_key], +// execution_type: hotshot_types::ExecutionType::Incremental, +// total_nodes: NonZeroUsize::new(1).unwrap(), +// min_transactions: 1, +// max_transactions: NonZeroUsize::new(1).unwrap(), +// next_view_timeout: 1, +// timeout_ratio: (1, 1), +// round_start_delay: 1, +// start_delay: 1, +// num_bootstrap: 1, +// propose_min_round_time: Duration::from_secs(1), +// propose_max_round_time: Duration::from_secs(1), +// }, +// ..Default::default() +// }; +// serde_json::to_string(&config).unwrap(); +// assert!(toml::to_string(&config).is_err()); + +// // validate that this is indeed a `pub_key` issue +// config.config.known_nodes.clear(); +// serde_json::to_string(&config).unwrap(); +// toml::to_string(&config).unwrap(); +// } +// } diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs new file mode 100644 index 0000000000..5c8935e9b1 --- /dev/null +++ b/hotshot/src/traits/networking.rs @@ -0,0 +1,56 @@ +//! Network access compatibility +//! +//! This module contains a trait abstracting over network access, as well as implementations of that +//! trait. Currently this includes +//! - [`MemoryNetwork`](memory_network::MemoryNetwork), an in memory testing-only implementation +//! - [`Libp2pNetwork`](libp2p_network::Libp2pNetwork), a production-ready networking impelmentation built on top of libp2p-rs. + +pub mod libp2p_network; +pub mod memory_network; +pub mod web_server_libp2p_fallback; +pub mod web_server_network; + +pub use hotshot_types::traits::network::{ + ChannelSendSnafu, CouldNotDeliverSnafu, FailedToDeserializeSnafu, FailedToSerializeSnafu, + NetworkError, NetworkReliability, NoSuchNodeSnafu, ShutDownSnafu, +}; + +use hotshot_types::traits::metrics::{Counter, Gauge, Metrics}; + +/// Contains the metrics that we're interested in from the networking interfaces +pub(self) struct NetworkingMetrics { + #[allow(dead_code)] + /// A [`Gauge`] which tracks how many peers are connected + pub connected_peers: Box, + /// A [`Counter`] which tracks how many messages have been received + pub incoming_message_count: Box, + /// A [`Counter`] which tracks how many messages have been send + pub outgoing_message_count: Box, + /// A [`Counter`] which tracks how many messages failed to send + pub message_failed_to_send: Box, + // A [`Gauge`] which tracks how many connected entries there are in the gossipsub mesh + // pub gossipsub_mesh_connected: Box, + // A [`Gauge`] which tracks how many kademlia entries there are + // pub kademlia_entries: Box, + // A [`Gauge`] which tracks how many kademlia buckets there are + // pub kademlia_buckets: Box, +} + +impl NetworkingMetrics { + /// Create a new instance of this [`NetworkingMetrics`] struct, setting all the counters and gauges + pub(self) fn new(metrics: &dyn Metrics) -> Self { + Self { + connected_peers: metrics.create_gauge(String::from("connected_peers"), None), + incoming_message_count: metrics + .create_counter(String::from("incoming_message_count"), None), + outgoing_message_count: metrics + .create_counter(String::from("outgoing_message_count"), None), + message_failed_to_send: metrics + .create_counter(String::from("message_failed_to_send"), None), + // gossipsub_mesh_connected: metrics + // .create_gauge(String::from("gossipsub_mesh_connected"), None), + // kademlia_entries: metrics.create_gauge(String::from("kademlia_entries"), None), + // kademlia_buckets: metrics.create_gauge(String::from("kademlia_buckets"), None), + } + } +} diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs new file mode 100644 index 0000000000..c01bf53982 --- /dev/null +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -0,0 +1,884 @@ +//! Libp2p based/production networking implementation +//! This module provides a libp2p based networking implementation where each node in the +//! network forms a tcp or udp connection to a subset of other nodes in the network + +use super::NetworkingMetrics; +use crate::NodeImplementation; +use async_compatibility_layer::{ + art::{async_block_on, async_sleep, async_spawn}, + channel::{unbounded, UnboundedReceiver, UnboundedSender}, +}; +use async_lock::RwLock; +use async_trait::async_trait; +use bimap::BiHashMap; +use bincode::Options; +use hotshot_task::{boxed_sync, BoxSyncFuture}; +use hotshot_types::{ + data::ProposalType, + message::{Message, MessageKind}, + traits::{ + election::Membership, + metrics::{Metrics, NoMetrics}, + network::{ + CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, + NetworkError, NetworkMsg, TestableChannelImplementation, + TestableNetworkingImplementation, TransmitType, ViewMessage, + }, + node_implementation::NodeType, + signature_key::SignatureKey, + }, + vote::VoteType, +}; +use hotshot_utils::bincode::bincode_opts; +use libp2p_identity::PeerId; +use libp2p_networking::{ + network::{ + MeshParams, + NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, + NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, + NetworkNodeType, + }, + reexport::Multiaddr, +}; + +use serde::Serialize; +use snafu::ResultExt; +use std::{ + collections::{BTreeSet, HashSet}, + fmt::Debug, + marker::PhantomData, + num::NonZeroUsize, + str::FromStr, + sync::{atomic::AtomicBool, Arc}, + time::Duration, +}; +use tracing::{error, info, instrument}; + +/// hardcoded topic of QC used +pub const QC_TOPIC: &str = "global"; + +/// Stubbed out Ack +#[derive(Serialize)] +pub enum Empty { + /// Empty value + Empty, +} + +impl Debug for Libp2pNetwork { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Libp2p").field("inner", &"inner").finish() + } +} + +/// Type alias for a shared collection of peerid, multiaddrs +pub type PeerInfoVec = Arc, Multiaddr)>>>; + +/// The underlying state of the libp2p network +struct Libp2pNetworkInner { + /// this node's public key + pk: K, + /// handle to control the network + handle: Arc>, + /// Bidirectional map from public key provided by espresso + /// to public key provided by libp2p + pubkey_pid_map: RwLock>, + /// map of known replica peer ids to public keys + broadcast_recv: UnboundedReceiver, + /// Sender for broadcast messages + broadcast_send: UnboundedSender, + /// Sender for direct messages (only used for sending messages back to oneself) + direct_send: UnboundedSender, + /// Receiver for direct messages + direct_recv: UnboundedReceiver, + /// this is really cheating to enable local tests + /// hashset of (bootstrap_addr, peer_id) + bootstrap_addrs: PeerInfoVec, + /// bootstrap + bootstrap_addrs_len: usize, + /// whether or not the network is ready to send + is_ready: Arc, + /// max time before dropping message due to DHT error + dht_timeout: Duration, + /// whether or not we've bootstrapped into the DHT yet + is_bootstrapped: Arc, + /// The networking metrics we're keeping track of + metrics: NetworkingMetrics, + /// topic map + /// hash(hashset) -> topic + /// btreemap ordered so is hashable + topic_map: RwLock, String>>, +} + +/// Networking implementation that uses libp2p +/// generic over `M` which is the message type +#[derive(Clone)] +pub struct Libp2pNetwork { + /// holds the state of the libp2p network + inner: Arc>, +} + +impl> + TestableNetworkingImplementation> + for Libp2pNetwork, TYPES::SignatureKey> +where + MessageKind: ViewMessage, +{ + /// Returns a boxed function `f(node_id, public_key) -> Libp2pNetwork` + /// with the purpose of generating libp2p networks. + /// Generates `num_bootstrap` bootstrap nodes. The remainder of nodes are normal + /// nodes with sane defaults. + /// # Panics + /// Returned function may panic either: + /// - An invalid configuration + /// (probably an issue with the defaults of this function) + /// - An inability to spin up the replica's network + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + _is_da: bool, + ) -> Box Self + 'static> { + assert!( + da_committee_size <= expected_node_count, + "DA committee size must be less than or equal to total # nodes" + ); + let bootstrap_addrs: PeerInfoVec = Arc::default(); + let mut all_keys = BTreeSet::new(); + let mut da_keys = BTreeSet::new(); + + for i in 0u64..(expected_node_count as u64) { + let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; + let pubkey = TYPES::SignatureKey::from_private(&privkey); + if i < da_committee_size as u64 { + da_keys.insert(pubkey.clone()); + } + all_keys.insert(pubkey); + } + + // NOTE uncomment this for easier debugging + // let start_port = 5000; + Box::new({ + move |node_id| { + info!( + "GENERATOR: Node id {:?}, is bootstrap: {:?}", + node_id, + node_id < num_bootstrap as u64 + ); + let addr = + // Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/0/quic-v1")).unwrap(); + Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{}{}/quic-v1", 5000 + node_id, network_id)).unwrap(); + let privkey = + TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; + let pubkey = TYPES::SignatureKey::from_private(&privkey); + // we want the majority of peers to have this lying around. + let replication_factor = NonZeroUsize::new(2 * expected_node_count / 3).unwrap(); + let config = if node_id < num_bootstrap as u64 { + NetworkNodeConfigBuilder::default() + // NOTICE the implicit assumption that bootstrap is less + // than half the network. This seems reasonable. + .mesh_params(Some(MeshParams { + mesh_n_high: expected_node_count, + mesh_n_low: 5, + mesh_outbound_min: 3, + // the worst case of 7/2+3 > 5 + mesh_n: (expected_node_count / 2 + 3), + })) + .replication_factor(replication_factor) + .node_type(NetworkNodeType::Bootstrap) + .bound_addr(Some(addr)) + .to_connect_addrs(HashSet::default()) + // setting to sane defaults + .ttl(None) + .republication_interval(None) + .build() + .unwrap() + } else { + NetworkNodeConfigBuilder::default() + // NOTE I'm hardcoding these because this is probably the MAX + // parameters. If there aren't this many nodes, gossip keeps looking + // for more. That is fine. + .mesh_params(Some(MeshParams { + mesh_n_high: 15, + mesh_n_low: 5, + mesh_outbound_min: 4, + mesh_n: 8, + })) + .replication_factor(replication_factor) + .node_type(NetworkNodeType::Regular) + .bound_addr(Some(addr)) + .to_connect_addrs(HashSet::default()) + // setting to sane defaults + .ttl(None) + .republication_interval(None) + .build() + .unwrap() + }; + let bootstrap_addrs_ref = bootstrap_addrs.clone(); + let keys = all_keys.clone(); + let da = da_keys.clone(); + async_block_on(async move { + Libp2pNetwork::new( + NoMetrics::boxed(), + config, + pubkey, + bootstrap_addrs_ref, + num_bootstrap, + node_id as usize, + keys, + da, + ) + .await + .unwrap() + }) + } + }) + } + + fn in_flight_message_count(&self) -> Option { + None + } +} + +impl Libp2pNetwork { + /// Returns when network is ready + pub async fn wait_for_ready(&self) { + loop { + if self + .inner + .is_ready + .load(std::sync::atomic::Ordering::Relaxed) + { + break; + } + async_sleep(Duration::from_secs(1)).await; + } + info!("LIBP2P: IS READY GOT TRIGGERED!!"); + } + + /// Constructs new network for a node. Note that this network is unconnected. + /// One must call `connect` in order to connect. + /// * `config`: the configuration of the node + /// * `pk`: public key associated with the node + /// * `bootstrap_addrs`: rwlock containing the bootstrap addrs + /// # Errors + /// Returns error in the event that the underlying libp2p network + /// is unable to create a network. + /// + /// # Panics + /// + /// This will panic if there are less than 5 bootstrap nodes + #[allow(clippy::too_many_arguments)] + pub async fn new( + metrics: Box, + config: NetworkNodeConfig, + pk: K, + bootstrap_addrs: Arc, Multiaddr)>>>, + bootstrap_addrs_len: usize, + id: usize, + // HACK + committee_pks: BTreeSet, + da_pks: BTreeSet, + ) -> Result, NetworkError> { + assert!(bootstrap_addrs_len > 4, "Need at least 5 bootstrap nodes"); + let network_handle = Arc::new( + NetworkNodeHandle::<()>::new(config, id) + .await + .map_err(Into::::into)?, + ); + + // Make bootstrap mappings known + if matches!( + network_handle.config().node_type, + NetworkNodeType::Bootstrap + ) { + let addr = network_handle.listen_addr(); + let pid = network_handle.peer_id(); + let mut bs_cp = bootstrap_addrs.write().await; + bs_cp.push((Some(pid), addr)); + drop(bs_cp); + } + + let mut pubkey_pid_map = BiHashMap::new(); + + pubkey_pid_map.insert(pk.clone(), network_handle.peer_id()); + + let mut topic_map = BiHashMap::new(); + topic_map.insert(committee_pks, QC_TOPIC.to_string()); + topic_map.insert(da_pks, "DA".to_string()); + + let topic_map = RwLock::new(topic_map); + + // unbounded channels may not be the best choice (spammed?) + // if bounded figure out a way to log dropped msgs + let (direct_send, direct_recv) = unbounded(); + let (broadcast_send, broadcast_recv) = unbounded(); + + let result = Libp2pNetwork { + inner: Arc::new(Libp2pNetworkInner { + handle: network_handle, + pubkey_pid_map: RwLock::new(pubkey_pid_map), + broadcast_recv, + direct_send: direct_send.clone(), + direct_recv, + pk, + broadcast_send: broadcast_send.clone(), + bootstrap_addrs_len, + bootstrap_addrs, + is_ready: Arc::new(AtomicBool::new(false)), + dht_timeout: Duration::from_secs(30), + is_bootstrapped: Arc::new(AtomicBool::new(false)), + metrics: NetworkingMetrics::new(&*metrics), + topic_map, + }), + }; + + result.spawn_event_generator(direct_send, broadcast_send); + + result.spawn_connect(id); + + Ok(result) + } + + /// Initiates connection to the outside world + fn spawn_connect(&self, id: usize) { + let pk = self.inner.pk.clone(); + let bootstrap_ref = self.inner.bootstrap_addrs.clone(); + let num_bootstrap = self.inner.bootstrap_addrs_len; + let handle = self.inner.handle.clone(); + let is_bootstrapped = self.inner.is_bootstrapped.clone(); + let node_type = self.inner.handle.config().node_type; + async_spawn({ + let is_ready = self.inner.is_ready.clone(); + async move { + let bs_addrs = loop { + let bss = bootstrap_ref.read().await; + let bs_addrs = bss.clone(); + drop(bss); + if bs_addrs.len() >= num_bootstrap { + break bs_addrs; + } + info!( + "NODE {:?} bs addr len {:?}, number of bootstrap expected {:?}", + id, + bs_addrs.len(), + num_bootstrap + ); + }; + handle.add_known_peers(bs_addrs).await.unwrap(); + + // 10 minute timeout + let timeout_duration = Duration::from_secs(600); + // perform connection + info!("WAITING TO CONNECT ON NODE {:?}", id); + handle + .wait_to_connect(4, id, timeout_duration) + .await + .unwrap(); + + while !is_bootstrapped.load(std::sync::atomic::Ordering::Relaxed) { + async_sleep(Duration::from_secs(1)).await; + } + + handle.subscribe(QC_TOPIC.to_string()).await.unwrap(); + handle.subscribe("DA".to_string()).await.unwrap(); + // TODO figure out some way of passing in ALL keypairs. That way we can add the + // global topic to the topic map + // NOTE this wont' work without this change + + info!( + "peer {:?} waiting for publishing, type: {:?}", + handle.peer_id(), + node_type + ); + + // we want our records published before + // we begin participating in consensus + while handle.put_record(&pk, &handle.peer_id()).await.is_err() { + async_sleep(Duration::from_secs(1)).await; + } + + info!( + "Node {:?} is ready, type: {:?}", + handle.peer_id(), + node_type + ); + + while handle.put_record(&handle.peer_id(), &pk).await.is_err() { + async_sleep(Duration::from_secs(1)).await; + } + + info!( + "node {:?} is barring bootstrap, type: {:?}", + handle.peer_id(), + node_type + ); + + is_ready.store(true, std::sync::atomic::Ordering::Relaxed); + info!("STARTING CONSENSUS ON {:?}", handle.peer_id()); + Ok::<(), NetworkError>(()) + } + }); + } + + /// make network aware of known peers + async fn _add_known_peers( + &self, + known_peers: Vec<(Option, Multiaddr)>, + ) -> Result<(), NetworkError> { + self.inner + .handle + .add_known_peers(known_peers) + .await + .map_err(Into::::into) + } + + /// task to propagate messages to handlers + /// terminates on shut down of network + fn spawn_event_generator( + &self, + direct_send: UnboundedSender, + broadcast_send: UnboundedSender, + ) { + let handle = self.clone(); + let is_bootstrapped = self.inner.is_bootstrapped.clone(); + async_spawn(async move { + while let Ok(msg) = handle.inner.handle.receiver().recv().await { + match msg { + GossipMsg(msg, _topic) => { + let result: Result = bincode_opts().deserialize(&msg); + if let Ok(result) = result { + broadcast_send + .send(result) + .await + .map_err(|_| NetworkError::ChannelSend)?; + } + } + DirectRequest(msg, _pid, chan) => { + let result: Result = bincode_opts() + .deserialize(&msg) + .context(FailedToSerializeSnafu); + if let Ok(result) = result { + direct_send + .send(result) + .await + .map_err(|_| NetworkError::ChannelSend)?; + } + if handle + .inner + .handle + .direct_response(chan, &Empty::Empty) + .await + .is_err() + { + error!("failed to ack!"); + }; + } + DirectResponse(msg, _) => { + let _result: Result = bincode_opts() + .deserialize(&msg) + .context(FailedToSerializeSnafu); + } + NetworkEvent::IsBootstrapped => { + is_bootstrapped.store(true, std::sync::atomic::Ordering::Relaxed); + } + } + } + error!("Network receiever shut down!"); + Ok::<(), NetworkError>(()) + }); + } +} + +#[async_trait] +impl ConnectedNetwork for Libp2pNetwork { + #[instrument(name = "Libp2pNetwork::ready_blocking", skip_all)] + async fn wait_for_ready(&self) { + self.wait_for_ready().await; + } + + #[instrument(name = "Libp2pNetwork::ready_nonblocking", skip_all)] + async fn is_ready(&self) -> bool { + self.inner + .is_ready + .load(std::sync::atomic::Ordering::Relaxed) + } + + #[instrument(name = "Libp2pNetwork::shut_down", skip_all)] + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + if self.inner.handle.is_killed() { + error!("Called shut down when already shut down! Noop."); + } else { + self.inner.handle.shutdown().await.unwrap(); + } + }; + boxed_sync(closure) + } + + #[instrument(name = "Libp2pNetwork::broadcast_message", skip_all)] + async fn broadcast_message( + &self, + message: M, + recipients: BTreeSet, + ) -> Result<(), NetworkError> { + if self.inner.handle.is_killed() { + return Err(NetworkError::ShutDown); + } + + self.wait_for_ready().await; + info!( + "broadcasting msg: {:?} with nodes: {:?} connected", + message, + self.inner.handle.connected_pids().await + ); + + let topic_map = self.inner.topic_map.read().await; + let topic = topic_map + .get_by_left(&recipients) + .ok_or(NetworkError::Libp2p { + source: NetworkNodeHandleError::NoSuchTopic, + })? + .clone(); + error!("Broadcasting to topic {}", topic); + + // gossip doesn't broadcast from itself, so special case + if recipients.contains(&self.inner.pk) { + // send to self + self.inner + .broadcast_send + .send(message.clone()) + .await + .map_err(|_| NetworkError::ShutDown)?; + } + + match self.inner.handle.gossip(topic, &message).await { + Ok(()) => { + self.inner.metrics.outgoing_message_count.add(1); + Ok(()) + } + Err(e) => { + self.inner.metrics.message_failed_to_send.add(1); + Err(e.into()) + } + } + } + + #[instrument(name = "Libp2pNetwork::direct_message", skip_all)] + async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError> { + if self.inner.handle.is_killed() { + return Err(NetworkError::ShutDown); + } + + // short circuit if we're dming ourselves + if recipient == self.inner.pk { + // panic if we already shut down? + self.inner + .direct_send + .send(message) + .await + .map_err(|_x| NetworkError::ShutDown)?; + return Ok(()); + } + + self.wait_for_ready().await; + // check local cache. if that fails, initiate search + // if search fails, just error out + // NOTE: relay may be a good way to fix this in the future . + let pid: PeerId = if let Some(pid) = self + .inner + .pubkey_pid_map + .read() + .await + .get_by_left(&recipient) + { + *pid + } else { + match self + .inner + .handle + .get_record_timeout(&recipient, self.inner.dht_timeout) + .await + { + Ok(r) => r, + Err(e) => { + self.inner.metrics.message_failed_to_send.add(1); + error!("Failed to message {:?} because could not find recipient peer id for pk {:?}", message, recipient); + return Err(NetworkError::Libp2p { source: e }); + } + } + }; + + if let Err(e) = self.inner.handle.lookup_pid(pid).await { + self.inner.metrics.message_failed_to_send.add(1); + return Err(e.into()); + } + match self.inner.handle.direct_request(pid, &message).await { + Ok(()) => { + self.inner.metrics.outgoing_message_count.add(1); + Ok(()) + } + Err(e) => { + self.inner.metrics.message_failed_to_send.add(1); + Err(e.into()) + } + } + } + + #[instrument(name = "Libp2pNetwork::recv_msgs", skip_all)] + fn recv_msgs<'a, 'b>( + &'a self, + transmit_type: TransmitType, + ) -> BoxSyncFuture<'b, Result, NetworkError>> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + if self.inner.handle.is_killed() { + Err(NetworkError::ShutDown) + } else { + match transmit_type { + TransmitType::Direct => { + let result = self + .inner + .direct_recv + .drain_at_least_one() + .await + .map_err(|_x| NetworkError::ShutDown)?; + self.inner.metrics.incoming_message_count.add(result.len()); + Ok(result) + } + TransmitType::Broadcast => { + let result = self + .inner + .broadcast_recv + .drain_at_least_one() + .await + .map_err(|_x| NetworkError::ShutDown)?; + self.inner.metrics.incoming_message_count.add(result.len()); + Ok(result) + } + } + } + }; + boxed_sync(closure) + } + + #[instrument(name = "Libp2pNetwork::lookup_node", skip_all)] + async fn lookup_node(&self, pk: K) -> Result<(), NetworkError> { + self.wait_for_ready().await; + + if self.inner.handle.is_killed() { + return Err(NetworkError::ShutDown); + } + + let maybe_pid = self + .inner + .handle + .get_record_timeout(&pk, self.inner.dht_timeout) + .await + .map_err(Into::::into); + + if let Ok(pid) = maybe_pid { + if self.inner.handle.lookup_pid(pid).await.is_err() { + error!("Failed to look up pid"); + return Err(NetworkError::Libp2p { + source: NetworkNodeHandleError::DHTError { + source: libp2p_networking::network::error::DHTError::NotFound, + }, + }); + }; + } else { + error!("Unable to look up pubkey {:?}", pk); + return Err(NetworkError::Libp2p { + source: NetworkNodeHandleError::DHTError { + source: libp2p_networking::network::error::DHTError::NotFound, + }, + }); + } + + Ok(()) + } + + async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { + // Not required + } +} + +/// libp2p identity communication channel +#[derive(Clone, Debug)] +pub struct Libp2pCommChannel< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, +>( + Arc, TYPES::SignatureKey>>, + PhantomData<(TYPES, I, PROPOSAL, VOTE, MEMBERSHIP)>, +); + +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > Libp2pCommChannel +{ + /// create a new libp2p communication channel + #[must_use] + pub fn new(network: Arc, TYPES::SignatureKey>>) -> Self { + Self(network, PhantomData) + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > TestableNetworkingImplementation> + for Libp2pCommChannel +where + MessageKind: ViewMessage, +{ + /// Returns a boxed function `f(node_id, public_key) -> Libp2pNetwork` + /// with the purpose of generating libp2p networks. + /// Generates `num_bootstrap` bootstrap nodes. The remainder of nodes are normal + /// nodes with sane defaults. + /// # Panics + /// Returned function may panic either: + /// - An invalid configuration + /// (probably an issue with the defaults of this function) + /// - An inability to spin up the replica's network + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + is_da: bool, + ) -> Box Self + 'static> { + let generator = , + TYPES::SignatureKey, + > as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da + ); + Box::new(move |node_id| Self(generator(node_id).into(), PhantomData)) + } + + fn in_flight_message_count(&self) -> Option { + None + } +} + +// FIXME maybe we should macro this...? It's repeated at verbatum EXCEPT for impl generics at the +// top +// we don't really want to make this the default implementation because that forces it to require ConnectedNetwork to be implemented. The struct we implement over might use multiple ConnectedNetworks +#[async_trait] +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> + for Libp2pCommChannel +where + MessageKind: ViewMessage, +{ + type NETWORK = Libp2pNetwork, TYPES::SignatureKey>; + + async fn wait_for_ready(&self) { + self.0.wait_for_ready().await; + } + + async fn is_ready(&self) -> bool { + self.0.is_ready().await + } + + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + self.0.shut_down().await; + }; + boxed_sync(closure) + } + + async fn broadcast_message( + &self, + message: Message, + membership: &MEMBERSHIP, + ) -> Result<(), NetworkError> { + let recipients = >::get_committee( + membership, + message.kind.get_view_number(), + ); + self.0.broadcast_message(message, recipients).await + } + + async fn direct_message( + &self, + message: Message, + recipient: TYPES::SignatureKey, + ) -> Result<(), NetworkError> { + self.0.direct_message(message, recipient).await + } + + fn recv_msgs<'a, 'b>( + &'a self, + transmit_type: TransmitType, + ) -> BoxSyncFuture<'b, Result>, NetworkError>> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { self.0.recv_msgs(transmit_type).await }; + boxed_sync(closure) + } + + async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { + self.0.lookup_node(pk).await + } + + async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { + // Not required + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > + TestableChannelImplementation< + TYPES, + Message, + PROPOSAL, + VOTE, + MEMBERSHIP, + Libp2pNetwork, TYPES::SignatureKey>, + > for Libp2pCommChannel +{ + fn generate_network( + ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> + { + Box::new(move |network| Libp2pCommChannel::new(network)) + } +} diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs new file mode 100644 index 0000000000..8cdd70351f --- /dev/null +++ b/hotshot/src/traits/networking/memory_network.rs @@ -0,0 +1,978 @@ +//! In memory network simulator +//! +//! This module provides an in-memory only simulation of an actual network, useful for unit and +//! integration tests. + +use super::{FailedToSerializeSnafu, NetworkError, NetworkReliability, NetworkingMetrics}; +use crate::NodeImplementation; +use async_compatibility_layer::{ + art::{async_sleep, async_spawn}, + channel::{bounded, Receiver, SendError, Sender}, +}; +use async_lock::{Mutex, RwLock}; +use async_trait::async_trait; +use bincode::Options; +use dashmap::DashMap; +use futures::StreamExt; +use hotshot_task::{boxed_sync, BoxSyncFuture}; +use hotshot_types::{ + data::ProposalType, + message::{Message, MessageKind}, + traits::{ + election::Membership, + metrics::{Metrics, NoMetrics}, + network::{ + CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, NetworkMsg, + TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, + ViewMessage, + }, + node_implementation::NodeType, + signature_key::SignatureKey, + }, + vote::VoteType, +}; +use hotshot_utils::bincode::bincode_opts; +use rand::Rng; +use snafu::ResultExt; +use std::{ + collections::BTreeSet, + fmt::Debug, + marker::PhantomData, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; +use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; + +#[derive(Debug, Clone, Copy)] +/// dummy implementation of network reliability +pub struct DummyReliability {} +impl NetworkReliability for DummyReliability { + fn sample_keep(&self) -> bool { + true + } + fn sample_delay(&self) -> std::time::Duration { + std::time::Duration::ZERO + } +} + +/// Shared state for in-memory mock networking. +/// +/// This type is responsible for keeping track of the channels to each [`MemoryNetwork`], and is +/// used to group the [`MemoryNetwork`] instances. +#[derive(custom_debug::Debug)] +pub struct MasterMap { + /// The list of `MemoryNetwork`s + #[debug(skip)] + map: DashMap>, + /// The id of this `MemoryNetwork` cluster + id: u64, +} + +impl MasterMap { + /// Create a new, empty, `MasterMap` + #[must_use] + pub fn new() -> Arc> { + Arc::new(MasterMap { + map: DashMap::new(), + id: rand::thread_rng().gen(), + }) + } +} + +/// Internal enum for combining streams +enum Combo { + /// Direct message + Direct(T), + /// Broadcast message + Broadcast(T), +} + +/// Internal state for a `MemoryNetwork` instance +struct MemoryNetworkInner { + /// Input for broadcast messages + broadcast_input: RwLock>>>, + /// Input for direct messages + direct_input: RwLock>>>, + /// Output for broadcast messages + broadcast_output: Mutex>, + /// Output for direct messages + direct_output: Mutex>, + /// The master map + master_map: Arc>, + + /// Count of messages that are in-flight (send but not processed yet) + in_flight_message_count: AtomicUsize, + + /// The networking metrics we're keeping track of + metrics: NetworkingMetrics, +} + +/// In memory only network simulator. +/// +/// This provides an in memory simulation of a networking implementation, allowing nodes running on +/// the same machine to mock networking while testing other functionality. +/// +/// Under the hood, this simply maintains mpmc channels to every other `MemoryNetwork` insane of the +/// same group. +#[derive(Clone)] +pub struct MemoryNetwork { + /// The actual internal state + inner: Arc>, +} + +impl Debug for MemoryNetwork { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("MemoryNetwork") + .field("inner", &"inner") + .finish() + } +} + +impl MemoryNetwork { + /// Creates a new `MemoryNetwork` and hooks it up to the group through the provided `MasterMap` + #[instrument(skip(metrics))] + pub fn new( + pub_key: K, + metrics: Box, + master_map: Arc>, + reliability_config: Option>, + ) -> MemoryNetwork { + info!("Attaching new MemoryNetwork"); + let (broadcast_input, broadcast_task_recv) = bounded(128); + let (direct_input, direct_task_recv) = bounded(128); + let (broadcast_task_send, broadcast_output) = bounded(128); + let (direct_task_send, direct_output) = bounded(128); + let in_flight_message_count = AtomicUsize::new(0); + trace!("Channels open, spawning background task"); + + async_spawn( + async move { + debug!("Starting background task"); + // direct input is right stream + let direct = direct_task_recv.into_stream().map(Combo::>::Direct); + // broadcast input is left stream + let broadcast = broadcast_task_recv + .into_stream() + .map(Combo::>::Broadcast); + // Combine the streams + let mut combined = futures::stream::select(direct, broadcast); + trace!("Entering processing loop"); + while let Some(message) = combined.next().await { + match message { + Combo::Direct(vec) => { + trace!(?vec, "Incoming direct message"); + // Attempt to decode message + let x = bincode_opts().deserialize(&vec); + match x { + Ok(x) => { + let dts = direct_task_send.clone(); + if let Some(r) = reliability_config.clone() { + async_spawn(async move { + if r.sample_keep() { + let delay = r.sample_delay(); + if delay > std::time::Duration::ZERO { + async_sleep(delay).await; + } + let res = dts.send(x).await; + if res.is_ok() { + trace!("Passed message to output queue"); + } else { + error!("Output queue receivers are shutdown"); + } + } else { + warn!("dropping packet!"); + } + }); + } else { + let res = dts.send(x).await; + if res.is_ok() { + trace!("Passed message to output queue"); + } else { + error!("Output queue receivers are shutdown"); + } + } + } + Err(e) => { + warn!(?e, "Failed to decode incoming message, skipping"); + } + } + } + Combo::Broadcast(vec) => { + trace!(?vec, "Incoming broadcast message"); + // Attempt to decode message + let x = bincode_opts().deserialize(&vec); + match x { + Ok(x) => { + let bts = broadcast_task_send.clone(); + if let Some(r) = reliability_config.clone() { + async_spawn(async move { + if r.sample_keep() { + let delay = r.sample_delay(); + if delay > std::time::Duration::ZERO { + async_sleep(delay).await; + } + let res = bts.send(x).await; + if res.is_ok() { + trace!("Passed message to output queue"); + } else { + warn!("dropping packet!"); + } + } + }); + } else { + let res = bts.send(x).await; + if res.is_ok() { + trace!("Passed message to output queue"); + } else { + warn!("dropping packet!"); + } + } + } + Err(e) => { + warn!(?e, "Failed to decode incoming message, skipping"); + } + } + } + } + } + warn!("Stream shutdown"); + } + .instrument(info_span!("MemoryNetwork Background task", map = ?master_map)), + ); + trace!("Notifying other networks of the new connected peer"); + trace!("Task spawned, creating MemoryNetwork"); + let mn = MemoryNetwork { + inner: Arc::new(MemoryNetworkInner { + broadcast_input: RwLock::new(Some(broadcast_input)), + direct_input: RwLock::new(Some(direct_input)), + broadcast_output: Mutex::new(broadcast_output), + direct_output: Mutex::new(direct_output), + master_map: master_map.clone(), + in_flight_message_count, + metrics: NetworkingMetrics::new(&*metrics), + }), + }; + master_map.map.insert(pub_key, mn.clone()); + trace!("Master map updated"); + + mn + } + + /// Send a [`Vec`] message to the inner `broadcast_input` + async fn broadcast_input(&self, message: Vec) -> Result<(), SendError>> { + self.inner + .in_flight_message_count + .fetch_add(1, Ordering::Relaxed); + let input = self.inner.broadcast_input.read().await; + if let Some(input) = &*input { + self.inner.metrics.outgoing_message_count.add(1); + input.send(message).await + } else { + Err(SendError(message)) + } + } + + /// Send a [`Vec`] message to the inner `direct_input` + async fn direct_input(&self, message: Vec) -> Result<(), SendError>> { + self.inner + .in_flight_message_count + .fetch_add(1, Ordering::Relaxed); + let input = self.inner.direct_input.read().await; + if let Some(input) = &*input { + self.inner.metrics.outgoing_message_count.add(1); + input.send(message).await + } else { + Err(SendError(message)) + } + } +} + +impl> + TestableNetworkingImplementation> + for MemoryNetwork, TYPES::SignatureKey> +{ + fn generator( + _expected_node_count: usize, + _num_bootstrap: usize, + _network_id: usize, + _da_committee_size: usize, + _is_da: bool, + ) -> Box Self + 'static> { + let master: Arc<_> = MasterMap::new(); + Box::new(move |node_id| { + let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; + let pubkey = TYPES::SignatureKey::from_private(&privkey); + MemoryNetwork::new(pubkey, NoMetrics::boxed(), master.clone(), None) + }) + } + + fn in_flight_message_count(&self) -> Option { + Some(self.inner.in_flight_message_count.load(Ordering::Relaxed)) + } +} + +// TODO instrument these functions +#[async_trait] +impl ConnectedNetwork for MemoryNetwork { + #[instrument(name = "MemoryNetwork::ready_blocking")] + async fn wait_for_ready(&self) {} + + #[instrument(name = "MemoryNetwork::ready_nonblocking")] + async fn is_ready(&self) -> bool { + true + } + + #[instrument(name = "MemoryNetwork::shut_down")] + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + *self.inner.broadcast_input.write().await = None; + *self.inner.direct_input.write().await = None; + }; + boxed_sync(closure) + } + + #[instrument(name = "MemoryNetwork::broadcast_message")] + async fn broadcast_message( + &self, + message: M, + recipients: BTreeSet, + ) -> Result<(), NetworkError> { + debug!(?message, "Broadcasting message"); + // Bincode the message + let vec = bincode_opts() + .serialize(&message) + .context(FailedToSerializeSnafu)?; + trace!("Message bincoded, sending"); + for node in self.inner.master_map.map.iter() { + let (key, node) = node.pair(); + if !recipients.contains(key) { + continue; + } + trace!(?key, "Sending message to node"); + let res = node.broadcast_input(vec.clone()).await; + match res { + Ok(_) => { + self.inner.metrics.outgoing_message_count.add(1); + trace!(?key, "Delivered message to remote"); + } + Err(e) => { + self.inner.metrics.message_failed_to_send.add(1); + warn!(?e, ?key, "Error sending broadcast message to node"); + } + } + } + Ok(()) + } + + #[instrument(name = "MemoryNetwork::direct_message")] + async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError> { + debug!(?message, ?recipient, "Sending direct message"); + // Bincode the message + let vec = bincode_opts() + .serialize(&message) + .context(FailedToSerializeSnafu)?; + trace!("Message bincoded, finding recipient"); + if let Some(node) = self.inner.master_map.map.get(&recipient) { + let node = node.value(); + let res = node.direct_input(vec).await; + match res { + Ok(_) => { + self.inner.metrics.outgoing_message_count.add(1); + trace!(?recipient, "Delivered message to remote"); + Ok(()) + } + Err(e) => { + self.inner.metrics.message_failed_to_send.add(1); + warn!(?e, ?recipient, "Error delivering direct message"); + Err(NetworkError::CouldNotDeliver) + } + } + } else { + self.inner.metrics.message_failed_to_send.add(1); + warn!( + "{:#?} {:#?} {:#?}", + recipient, self.inner.master_map.map, "Node does not exist in map" + ); + Err(NetworkError::NoSuchNode) + } + } + + #[instrument(name = "MemoryNetwork::recv_msgs", skip_all)] + fn recv_msgs<'a, 'b>( + &'a self, + transmit_type: TransmitType, + ) -> BoxSyncFuture<'b, Result, NetworkError>> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + match transmit_type { + TransmitType::Direct => { + let ret = self + .inner + .direct_output + .lock() + .await + .drain_at_least_one() + .await + .map_err(|_x| NetworkError::ShutDown)?; + self.inner + .in_flight_message_count + .fetch_sub(ret.len(), Ordering::Relaxed); + self.inner.metrics.incoming_message_count.add(ret.len()); + Ok(ret) + } + TransmitType::Broadcast => { + let ret = self + .inner + .broadcast_output + .lock() + .await + .drain_at_least_one() + .await + .map_err(|_x| NetworkError::ShutDown)?; + self.inner + .in_flight_message_count + .fetch_sub(ret.len(), Ordering::Relaxed); + self.inner.metrics.incoming_message_count.add(ret.len()); + Ok(ret) + } + } + }; + boxed_sync(closure) + } + + #[instrument(name = "MemoryNetwork::lookup_node", skip_all)] + async fn lookup_node(&self, _pk: K) -> Result<(), NetworkError> { + // no lookup required + Ok(()) + } + + async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { + // Not required + } +} + +/// memory identity communication channel +#[derive(Clone, Debug)] +pub struct MemoryCommChannel< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, +>( + Arc, TYPES::SignatureKey>>, + PhantomData<(I, PROPOSAL, VOTE, MEMBERSHIP)>, +); + +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > MemoryCommChannel +{ + /// create new communication channel + #[must_use] + pub fn new(network: Arc, TYPES::SignatureKey>>) -> Self { + Self(network, PhantomData) + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > TestableNetworkingImplementation> + for MemoryCommChannel +where + MessageKind: ViewMessage, +{ + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + is_da: bool, + ) -> Box Self + 'static> { + let generator = , + TYPES::SignatureKey, + > as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da + ); + Box::new(move |node_id| Self(generator(node_id).into(), PhantomData)) + } + + fn in_flight_message_count(&self) -> Option { + Some(self.0.inner.in_flight_message_count.load(Ordering::Relaxed)) + } +} + +#[async_trait] +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> + for MemoryCommChannel +where + MessageKind: ViewMessage, +{ + type NETWORK = MemoryNetwork, TYPES::SignatureKey>; + + async fn wait_for_ready(&self) { + self.0.wait_for_ready().await; + } + + async fn is_ready(&self) -> bool { + self.0.is_ready().await + } + + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + self.0.shut_down().await; + }; + boxed_sync(closure) + } + + async fn broadcast_message( + &self, + message: Message, + election: &MEMBERSHIP, + ) -> Result<(), NetworkError> { + let recipients = >::get_committee( + election, + message.kind.get_view_number(), + ); + self.0.broadcast_message(message, recipients).await + } + + async fn direct_message( + &self, + message: Message, + recipient: TYPES::SignatureKey, + ) -> Result<(), NetworkError> { + self.0.direct_message(message, recipient).await + } + + fn recv_msgs<'a, 'b>( + &'a self, + transmit_type: TransmitType, + ) -> BoxSyncFuture<'b, Result>, NetworkError>> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { self.0.recv_msgs(transmit_type).await }; + boxed_sync(closure) + } + + async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { + self.0.lookup_node(pk).await + } + + async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { + // Not required + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > + TestableChannelImplementation< + TYPES, + Message, + PROPOSAL, + VOTE, + MEMBERSHIP, + MemoryNetwork, TYPES::SignatureKey>, + > for MemoryCommChannel +{ + fn generate_network( + ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> + { + Box::new(move |network| MemoryCommChannel::new(network)) + } +} + +#[cfg(test)] +// panic in tests +#[allow(clippy::panic)] +mod tests { + // use super::*; + // use crate::{ + // demos::vdemo::{Addition, Subtraction, VDemoBlock, VDemoState, VDemoTransaction}, + // traits::election::static_committee::{ + // GeneralStaticCommittee, StaticElectionConfig, StaticVoteToken, + // }, + // }; + // + // use crate::traits::implementations::MemoryStorage; + // use async_compatibility_layer::logging::setup_logging; + // use hotshot_types::traits::election::QuorumExchange; + // use hotshot_types::traits::node_implementation::{ChannelMaps, ValidatingExchanges}; + // use hotshot_types::{ + // data::ViewNumber, + // message::{DataMessage, MessageKind, ValidatingMessage}, + // traits::{ + // signature_key::ed25519::{Ed25519Priv, Ed25519Pub}, + // state::ConsensusTime, + // }, + // vote::QuorumVote, + // }; + // use hotshot_types::{ + // data::{ValidatingLeaf, ValidatingProposal}, + // traits::consensus_type::validating_consensus::ValidatingConsensus, + // }; + // use serde::{Deserialize, Serialize}; + // + // #[derive( + // Copy, + // Clone, + // Debug, + // Default, + // Hash, + // PartialEq, + // Eq, + // PartialOrd, + // Ord, + // serde::Serialize, + // serde::Deserialize, + // )] + // struct Test {} + // #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] + // struct TestImpl {} + // + // // impl NetworkMsg for Test {} + // + // impl NodeType for Test { + // // TODO (da) can this be SequencingConsensus? + // type ConsensusType = ValidatingConsensus; + // type Time = ViewNumber; + // type BlockType = VDemoBlock; + // type SignatureKey = Ed25519Pub; + // type VoteTokenType = StaticVoteToken; + // type Transaction = VDemoTransaction; + // type ElectionConfigType = StaticElectionConfig; + // type StateType = VDemoState; + // } + // + // type TestMembership = GeneralStaticCommittee; + // type TestNetwork = MemoryCommChannel; + // + // impl NodeImplementation for TestImpl { + // type ConsensusMessage = ValidatingMessage; + // type Exchanges = ValidatingExchanges< + // Test, + // Message, + // QuorumExchange< + // Test, + // TestLeaf, + // TestProposal, + // TestMembership, + // TestNetwork, + // Message, + // >, + // ViewSyncExchange>, + // >; + // type Leaf = TestLeaf; + // type Storage = MemoryStorage; + // + // fn new_channel_maps( + // start_view: ViewNumber, + // ) -> (ChannelMaps, Option>) { + // (ChannelMaps::new(start_view), None) + // } + // } + // + // type TestLeaf = ValidatingLeaf; + // type TestVote = QuorumVote; + // type TestProposal = ValidatingProposal; + // + // /// fake Eq + // /// we can't compare the votetokentype for equality, so we can't + // /// derive EQ on `VoteType` and thereby message + // /// we are only sending data messages, though so we compare key and + // /// data message + // fn fake_message_eq(message_1: Message, message_2: Message) { + // assert_eq!(message_1.sender, message_2.sender); + // if let MessageKind::Data(DataMessage::SubmitTransaction(d_1, _)) = message_1.kind { + // if let MessageKind::Data(DataMessage::SubmitTransaction(d_2, _)) = message_2.kind { + // assert_eq!(d_1, d_2); + // } + // } else { + // panic!("Got unexpected message type in memory test!"); + // } + // } + // + // #[instrument] + // fn get_pubkey() -> Ed25519Pub { + // let priv_key = Ed25519Priv::generate(); + // Ed25519Pub::from_private(&priv_key) + // } + // + // /// create a message + // fn gen_messages(num_messages: u64, seed: u64, pk: Ed25519Pub) -> Vec> { + // let mut messages = Vec::new(); + // for i in 0..num_messages { + // let message = Message { + // sender: pk, + // kind: MessageKind::Data(DataMessage::SubmitTransaction( + // VDemoTransaction { + // add: Addition { + // account: "A".to_string(), + // amount: 50 + i + seed, + // }, + // sub: Subtraction { + // account: "B".to_string(), + // amount: 50 + i + seed, + // }, + // nonce: seed + i, + // padding: vec![50; 0], + // }, + // ::new(0), + // )), + // _phantom: PhantomData, + // }; + // messages.push(message); + // } + // messages + // } + // + // // Spawning a single MemoryNetwork should produce no errors + // #[cfg_attr( + // feature = "tokio-executor", + // tokio::test(flavor = "multi_thread", worker_threads = 2) + // )] + // #[cfg_attr(feature = "async-std-executor", async_std::test)] + // #[instrument] + // async fn spawn_single() { + // setup_logging(); + // let group: Arc, ::SignatureKey>> = + // MasterMap::new(); + // trace!(?group); + // let pub_key = get_pubkey(); + // let _network = MemoryNetwork::new(pub_key, NoMetrics::boxed(), group, Option::None); + // } + // + // // // Spawning a two MemoryNetworks and connecting them should produce no errors + // #[cfg_attr( + // feature = "tokio-executor", + // tokio::test(flavor = "multi_thread", worker_threads = 2) + // )] + // #[cfg_attr(feature = "async-std-executor", async_std::test)] + // #[instrument] + // async fn spawn_double() { + // setup_logging(); + // let group: Arc, ::SignatureKey>> = + // MasterMap::new(); + // trace!(?group); + // let pub_key_1 = get_pubkey(); + // let _network_1 = + // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + // let pub_key_2 = get_pubkey(); + // let _network_2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + // } + // + // // Check to make sure direct queue works + // #[cfg_attr( + // feature = "tokio-executor", + // tokio::test(flavor = "multi_thread", worker_threads = 2) + // )] + // #[cfg_attr(feature = "async-std-executor", async_std::test)] + // #[allow(deprecated)] + // #[instrument] + // async fn direct_queue() { + // setup_logging(); + // // Create some dummy messages + // + // // Make and connect the networking instances + // let group: Arc, ::SignatureKey>> = + // MasterMap::new(); + // trace!(?group); + // let pub_key_1 = get_pubkey(); + // let network1 = + // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + // let pub_key_2 = get_pubkey(); + // let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + // + // let first_messages: Vec> = gen_messages(5, 100, pub_key_1); + // + // // Test 1 -> 2 + // // Send messages + // for sent_message in first_messages { + // network1 + // .direct_message(sent_message.clone(), pub_key_2) + // .await + // .expect("Failed to message node"); + // let mut recv_messages = network2 + // .recv_msgs(TransmitType::Direct) + // .await + // .expect("Failed to receive message"); + // let recv_message = recv_messages.pop().unwrap(); + // assert!(recv_messages.is_empty()); + // fake_message_eq(sent_message, recv_message); + // } + // + // let second_messages: Vec> = gen_messages(5, 200, pub_key_2); + // + // // Test 2 -> 1 + // // Send messages + // for sent_message in second_messages { + // network2 + // .direct_message(sent_message.clone(), pub_key_1) + // .await + // .expect("Failed to message node"); + // let mut recv_messages = network1 + // .recv_msgs(TransmitType::Direct) + // .await + // .expect("Failed to receive message"); + // let recv_message = recv_messages.pop().unwrap(); + // assert!(recv_messages.is_empty()); + // fake_message_eq(sent_message, recv_message); + // } + // } + // + // // Check to make sure direct queue works + // #[cfg_attr( + // feature = "tokio-executor", + // tokio::test(flavor = "multi_thread", worker_threads = 2) + // )] + // #[cfg_attr(feature = "async-std-executor", async_std::test)] + // #[allow(deprecated)] + // #[instrument] + // async fn broadcast_queue() { + // setup_logging(); + // // Make and connect the networking instances + // let group: Arc, ::SignatureKey>> = + // MasterMap::new(); + // trace!(?group); + // let pub_key_1 = get_pubkey(); + // let network1 = + // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + // let pub_key_2 = get_pubkey(); + // let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + // + // let first_messages: Vec> = gen_messages(5, 100, pub_key_1); + // + // // Test 1 -> 2 + // // Send messages + // for sent_message in first_messages { + // network1 + // .broadcast_message( + // sent_message.clone(), + // vec![pub_key_2].into_iter().collect::>(), + // ) + // .await + // .expect("Failed to message node"); + // let mut recv_messages = network2 + // .recv_msgs(TransmitType::Broadcast) + // .await + // .expect("Failed to receive message"); + // let recv_message = recv_messages.pop().unwrap(); + // assert!(recv_messages.is_empty()); + // fake_message_eq(sent_message, recv_message); + // } + // + // let second_messages: Vec> = gen_messages(5, 200, pub_key_2); + // + // // Test 2 -> 1 + // // Send messages + // for sent_message in second_messages { + // network2 + // .broadcast_message( + // sent_message.clone(), + // vec![pub_key_1].into_iter().collect::>(), + // ) + // .await + // .expect("Failed to message node"); + // let mut recv_messages = network1 + // .recv_msgs(TransmitType::Broadcast) + // .await + // .expect("Failed to receive message"); + // let recv_message = recv_messages.pop().unwrap(); + // assert!(recv_messages.is_empty()); + // fake_message_eq(sent_message, recv_message); + // } + // } + // + // #[cfg_attr( + // feature = "tokio-executor", + // tokio::test(flavor = "multi_thread", worker_threads = 2) + // )] + // #[cfg_attr(feature = "async-std-executor", async_std::test)] + // #[instrument] + // #[allow(deprecated)] + // async fn test_in_flight_message_count() { + // // setup_logging(); + // + // // let group: Arc, ::SignatureKey>> = + // // MasterMap::new(); + // // trace!(?group); + // // let pub_key_1 = get_pubkey(); + // // let network1 = + // // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + // // let pub_key_2 = get_pubkey(); + // // let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + // + // // // Create some dummy messages + // // let messages: Vec> = gen_messages(5, 100, pub_key_1); + // + // // // assert_eq!(network1.in_flight_message_count(), Some(0)); + // // // assert_eq!(network2.in_flight_message_count(), Some(0)); + // + // // for (_count, message) in messages.iter().enumerate() { + // // network1 + // // .direct_message(message.clone(), pub_key_2) + // // .await + // // .unwrap(); + // // // network 2 has received `count` broadcast messages and `count + 1` direct messages + // // // assert_eq!(network2.in_flight_message_count(), Some(count + count + 1)); + // + // // // network2.broadcast_message(message.clone()).await.unwrap(); + // // // network 1 has received `count` broadcast messages + // // // assert_eq!(network1.in_flight_message_count(), Some(count + 1)); + // + // // // network 2 has received `count + 1` broadcast messages and `count + 1` direct messages + // // // assert_eq!(network2.in_flight_message_count(), Some((count + 1) * 2)); + // // } + // + // // for _count in (0..messages.len()).rev() { + // // network1.recv_msgs(TransmitType::Broadcast).await.unwrap(); + // // // assert_eq!(network1.in_flight_message_count(), Some(count)); + // + // // network2.recv_msgs(TransmitType::Broadcast).await.unwrap(); + // // network2.recv_msgs(TransmitType::Direct).await.unwrap(); + // // // assert_eq!(network2.in_flight_message_count(), Some(count * 2)); + // // } + // + // // // assert_eq!(network1.in_flight_message_count(), Some(0)); + // // // assert_eq!(network2.in_flight_message_count(), Some(0)); + // } +} diff --git a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs new file mode 100644 index 0000000000..723c7c071d --- /dev/null +++ b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs @@ -0,0 +1,318 @@ +//! Networking Implementation that has a primary and a fallback newtork. If the primary +//! Errors we will use the backup to send or receive +use super::NetworkError; +use crate::{ + traits::implementations::{Libp2pNetwork, WebServerNetwork}, + NodeImplementation, +}; + +use async_trait::async_trait; + +use futures::join; + +use hotshot_task::{boxed_sync, BoxSyncFuture}; +use hotshot_types::{ + data::ProposalType, + message::Message, + traits::{ + election::Membership, + network::{ + CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, + TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, + ViewMessage, + }, + node_implementation::NodeType, + }, + vote::VoteType, +}; +use std::{marker::PhantomData, sync::Arc}; +use tracing::error; +/// A communication channel with 2 networks, where we can fall back to the slower network if the +/// primary fails +#[derive(Clone, Debug)] +pub struct WebServerWithFallbackCommChannel< + TYPES: NodeType, + I: NodeImplementation, + MEMBERSHIP: Membership, +> { + /// The two networks we'll use for send/recv + networks: Arc>, +} + +impl, MEMBERSHIP: Membership> + WebServerWithFallbackCommChannel +{ + /// Constructor + #[must_use] + pub fn new(networks: Arc>) -> Self { + Self { networks } + } + + /// Get a ref to the primary network + #[must_use] + pub fn network(&self) -> &WebServerNetwork, TYPES::SignatureKey, TYPES> { + &self.networks.0 + } + + /// Get a ref to the backup network + #[must_use] + pub fn fallback(&self) -> &Libp2pNetwork, TYPES::SignatureKey> { + &self.networks.1 + } +} + +/// Wrapper for the tuple of `WebServerNetwork` and `Libp2pNetwork` +/// We need this so we can impl `TestableNetworkingImplementation` +/// on the tuple +#[derive(Debug, Clone)] +pub struct CombinedNetworks< + TYPES: NodeType, + I: NodeImplementation, + MEMBERSHIP: Membership, +>( + pub WebServerNetwork, TYPES::SignatureKey, TYPES>, + pub Libp2pNetwork, TYPES::SignatureKey>, + pub PhantomData, +); + +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for CombinedNetworks +{ + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + is_da: bool, + ) -> Box Self + 'static> { + let generators = ( + , + TYPES::SignatureKey, + TYPES, + > as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da + ), + , TYPES::SignatureKey> as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da + ) + ); + Box::new(move |node_id| { + CombinedNetworks(generators.0(node_id), generators.1(node_id), PhantomData) + }) + } + + /// Get the number of messages in-flight. + /// + /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. + fn in_flight_message_count(&self) -> Option { + None + } +} + +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for WebServerWithFallbackCommChannel +{ + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + is_da: bool, + ) -> Box Self + 'static> { + let generator = as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da + ); + Box::new(move |node_id| Self { + networks: generator(node_id).into(), + }) + } + + /// Get the number of messages in-flight. + /// + /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. + fn in_flight_message_count(&self) -> Option { + None + } +} + +#[async_trait] +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> + for WebServerWithFallbackCommChannel +{ + type NETWORK = CombinedNetworks; + + async fn wait_for_ready(&self) { + join!( + self.network().wait_for_ready(), + self.fallback().wait_for_ready() + ); + } + + async fn is_ready(&self) -> bool { + self.network().is_ready().await && self.fallback().is_ready().await + } + + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + join!(self.network().shut_down(), self.fallback().shut_down()); + }; + boxed_sync(closure) + } + + async fn broadcast_message( + &self, + message: Message, + election: &MEMBERSHIP, + ) -> Result<(), NetworkError> { + let recipients = + >::get_committee(election, message.get_view_number()); + let fallback = self + .fallback() + .broadcast_message(message.clone(), recipients.clone()); + let network = self.network().broadcast_message(message, recipients); + match join!(fallback, network) { + (Err(e1), Err(e2)) => { + error!( + "Both network broadcasts failed primary error: {}, fallback error: {}", + e1, e2 + ); + Err(e1) + } + (Err(e), _) => { + error!("Failed primary broadcast with error: {}", e); + Ok(()) + } + (_, Err(e)) => { + error!("Failed backup broadcast with error: {}", e); + Ok(()) + } + _ => Ok(()), + } + } + + async fn direct_message( + &self, + message: Message, + recipient: TYPES::SignatureKey, + ) -> Result<(), NetworkError> { + match self + .network() + .direct_message(message.clone(), recipient.clone()) + .await + { + Ok(_) => Ok(()), + Err(e) => { + error!( + "Falling back on direct message, error on primary network: {}", + e + ); + self.fallback().direct_message(message, recipient).await + } + } + } + + fn recv_msgs<'a, 'b>( + &'a self, + transmit_type: TransmitType, + ) -> BoxSyncFuture<'b, Result>, NetworkError>> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + match self.network().recv_msgs(transmit_type).await { + Ok(msgs) => Ok(msgs), + Err(e) => { + error!( + "Falling back on recv message, error on primary network: {}", + e + ); + self.fallback().recv_msgs(transmit_type).await + } + } + }; + boxed_sync(closure) + } + + async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { + match join!( + self.network().lookup_node(pk.clone()), + self.fallback().lookup_node(pk) + ) { + (Err(e1), Err(e2)) => { + error!( + "Both network lookups failed primary error: {}, fallback error: {}", + e1, e2 + ); + Err(e1) + } + (Err(e), _) => { + error!("Failed primary lookup with error: {}", e); + Ok(()) + } + (_, Err(e)) => { + error!("Failed backup lookup with error: {}", e); + Ok(()) + } + _ => Ok(()), + } + } + + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::inject_consensus_info(self.network(), event) + .await; + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > + TestableChannelImplementation< + TYPES, + Message, + PROPOSAL, + VOTE, + MEMBERSHIP, + CombinedNetworks, + > for WebServerWithFallbackCommChannel +{ + fn generate_network() -> Box) -> Self + 'static> { + Box::new(move |network| WebServerWithFallbackCommChannel::new(network)) + } +} diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs new file mode 100644 index 0000000000..e0001b94ab --- /dev/null +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -0,0 +1,1112 @@ +//! A network implementation that connects to a web server. +//! +//! To run the web server, see the `./web_server/` folder in this repo. +//! + +use async_compatibility_layer::channel::{unbounded, UnboundedReceiver, UnboundedSender}; + +use async_compatibility_layer::{ + art::{async_sleep, async_spawn}, + channel::{oneshot, OneShotSender}, +}; +use async_lock::RwLock; +use async_trait::async_trait; +use hotshot_task::{boxed_sync, BoxSyncFuture}; +use hotshot_types::{ + data::ProposalType, + message::{Message, MessagePurpose}, + traits::{ + election::Membership, + network::{ + CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, NetworkError, NetworkMsg, + TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, + WebServerNetworkError, + }, + node_implementation::{NodeImplementation, NodeType}, + signature_key::SignatureKey, + }, + vote::VoteType, +}; +use hotshot_web_server::{self, config}; +use rand::random; +use serde::{Deserialize, Serialize}; + +use hotshot_types::traits::network::ViewMessage; +use std::{ + collections::{hash_map::Entry, BTreeSet, HashMap}, + marker::PhantomData, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; +use surf_disco::error::ClientError; +use tracing::{debug, error, info}; +/// Represents the communication channel abstraction for the web server +#[derive(Clone, Debug)] +pub struct WebCommChannel< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, +>( + Arc, TYPES::SignatureKey, TYPES>>, + PhantomData<(MEMBERSHIP, I, PROPOSAL, VOTE)>, +); + +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > WebCommChannel +{ + /// Create new communication channel + #[must_use] + pub fn new( + network: Arc, TYPES::SignatureKey, TYPES>>, + ) -> Self { + Self(network, PhantomData) + } +} + +/// The web server network state +#[derive(Clone, Debug)] +pub struct WebServerNetwork { + /// The inner, core state of the web server network + inner: Arc>, + /// An optional shutdown signal. This is only used when this connection is created through the `TestableNetworkingImplementation` API. + server_shutdown_signal: Option>>, +} + +impl WebServerNetwork { + /// Post a message to the web server and return the result + async fn post_message_to_web_server(&self, message: SendMsg) -> Result<(), NetworkError> { + let result: Result<(), ClientError> = self + .inner + .client + .post(&message.get_endpoint()) + .body_binary(&message.get_message()) + .unwrap() + .send() + .await; + // error!("POST message error for endpoint {} is {:?}", &message.get_endpoint(), result.clone()); + result.map_err(|_e| NetworkError::WebServer { + source: WebServerNetworkError::ClientError, + }) + } +} + +/// Represents the core of web server networking +#[derive(Debug)] +struct Inner { + /// Phantom data for generic types + phantom: PhantomData<(KEY, TYPES::ElectionConfigType)>, + /// Our own key + _own_key: TYPES::SignatureKey, + /// Queue for broadcasted messages + broadcast_poll_queue: Arc>>>, + /// Queue for direct messages + direct_poll_queue: Arc>>>, + /// Client is running + running: AtomicBool, + /// The web server connection is ready + connected: AtomicBool, + /// The connectioni to the web server + client: surf_disco::Client, + /// The duration to wait between poll attempts + wait_between_polls: Duration, + /// Whether we are connecting to a DA server + is_da: bool, + + /// The last tx_index we saw from the web server + tx_index: Arc>, + + // TODO ED This should be TYPES::Time + // Theoretically there should never be contention for this lock... + /// Task map for quorum proposals. + proposal_task_map: Arc>>>, + /// Task map for quorum votes. + vote_task_map: Arc>>>, + /// Task map for DACs. + dac_task_map: Arc>>>, + /// Task map for view sync certificates. + view_sync_cert_task_map: Arc>>>, + /// Task map for view sync votes. + view_sync_vote_task_map: Arc>>>, + /// Task map for transactions + txn_task_map: Arc>>>, +} + +impl Inner { + #![allow(clippy::too_many_lines)] + /// Pull a web server. + async fn poll_web_server( + &self, + receiver: UnboundedReceiver, + message_purpose: MessagePurpose, + view_number: u64, + ) -> Result<(), NetworkError> { + let mut vote_index = 0; + let mut tx_index = 0; + + if message_purpose == MessagePurpose::Data { + tx_index = *self.tx_index.read().await; + debug!("Previous tx index was {}", tx_index); + }; + + while self.running.load(Ordering::Relaxed) { + let endpoint = match message_purpose { + MessagePurpose::Proposal => config::get_proposal_route(view_number), + MessagePurpose::Vote => config::get_vote_route(view_number, vote_index), + MessagePurpose::Data => config::get_transactions_route(tx_index), + MessagePurpose::Internal => unimplemented!(), + MessagePurpose::ViewSyncProposal => { + config::get_view_sync_proposal_route(view_number, vote_index) + } + MessagePurpose::ViewSyncVote => { + config::get_view_sync_vote_route(view_number, vote_index) + } + MessagePurpose::DAC => config::get_da_certificate_route(view_number), + }; + + if message_purpose == MessagePurpose::Data { + let possible_message = self.get_txs_from_web_server(endpoint).await; + match possible_message { + Ok(Some((index, deserialized_messages))) => { + let mut broadcast_poll_queue = self.broadcast_poll_queue.write().await; + if index > tx_index + 1 { + debug!("missed txns from {} to {}", tx_index + 1, index - 1); + tx_index = index - 1; + } + for tx in &deserialized_messages { + tx_index += 1; + broadcast_poll_queue.push(tx.clone()); + } + debug!("tx index is {}", tx_index); + } + Ok(None) => { + async_sleep(self.wait_between_polls).await; + } + Err(_e) => { + async_sleep(self.wait_between_polls).await; + } + } + } else { + let possible_message = self.get_message_from_web_server(endpoint).await; + + match possible_message { + Ok(Some(deserialized_messages)) => { + match message_purpose { + MessagePurpose::Data => { + error!("We should not receive transactions in this function"); + } + MessagePurpose::Proposal => { + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + self.broadcast_poll_queue + .write() + .await + .push(deserialized_messages[0].clone()); + + return Ok(()); + // Wait for the view to change before polling for proposals again + // let event = receiver.recv().await; + // match event { + // Ok(event) => view_number = event.view_number(), + // Err(_r) => { + // error!("Proposal receiver error! It was likely shutdown") + // } + // } + } + MessagePurpose::Vote => { + // error!( + // "Received {} votes from web server for view {} is da {}", + // deserialized_messages.len(), + // view_number, + // self.is_da + // ); + let mut direct_poll_queue = self.direct_poll_queue.write().await; + for vote in &deserialized_messages { + vote_index += 1; + direct_poll_queue.push(vote.clone()); + } + } + MessagePurpose::DAC => { + debug!( + "Received DAC from web server for view {} {}", + view_number, self.is_da + ); + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + self.broadcast_poll_queue + .write() + .await + .push(deserialized_messages[0].clone()); + + // return if we found a DAC, since there will only be 1 per view + // In future we should check to make sure DAC is valid + return Ok(()); + } + MessagePurpose::ViewSyncVote => { + // error!( + // "Received {} view sync votes from web server for view {} is da {}", + // deserialized_messages.len(), + // view_number, + // self.is_da + // ); + let mut direct_poll_queue = self.direct_poll_queue.write().await; + for vote in &deserialized_messages { + vote_index += 1; + direct_poll_queue.push(vote.clone()); + } + } + MessagePurpose::ViewSyncProposal => { + // error!( + // "Received {} view sync certs from web server for view {} is da {}", + // deserialized_messages.len(), + // view_number, + // self.is_da + // ); + let mut broadcast_poll_queue = + self.broadcast_poll_queue.write().await; + // TODO ED Special case this for view sync + // TODO ED Need to add vote indexing to web server for view sync certs + for cert in &deserialized_messages { + vote_index += 1; + broadcast_poll_queue.push(cert.clone()); + } + } + + MessagePurpose::Internal => { + error!("Received internal message in web server network"); + } + } + } + Ok(None) => { + async_sleep(self.wait_between_polls).await; + } + Err(_e) => { + // error!("error is {:?}", _e); + async_sleep(self.wait_between_polls).await; + } + } + } + let maybe_event = receiver.try_recv(); + match maybe_event { + Ok(event) => { + match event { + // TODO ED Should add extra error checking here to make sure we are intending to cancel a task + ConsensusIntentEvent::CancelPollForVotes(event_view) + | ConsensusIntentEvent::CancelPollForProposal(event_view) + | ConsensusIntentEvent::CancelPollForDAC(event_view) + | ConsensusIntentEvent::CancelPollForViewSyncCertificate(event_view) + | ConsensusIntentEvent::CancelPollForViewSyncVotes(event_view) => { + if view_number == event_view { + debug!("Shutting down polling task for view {}", event_view); + return Ok(()); + } + } + ConsensusIntentEvent::CancelPollForTransactions(event_view) => { + // Write the most recent tx index so we can pick up where we left off later + + let mut lock = self.tx_index.write().await; + *lock = tx_index; + + if view_number == event_view { + debug!("Shutting down polling task for view {}", event_view); + return Ok(()); + } + } + + _ => unimplemented!(), + } + } + // Nothing on receiving channel + Err(_) => { + debug!("Nothing on receiving channel"); + } + } + } + Err(NetworkError::ShutDown) + } + + /// Fetches transactions from web server + async fn get_txs_from_web_server( + &self, + endpoint: String, + ) -> Result>)>, NetworkError> { + let result: Result>)>, ClientError> = + self.client.get(&endpoint).send().await; + match result { + Err(_error) => Err(NetworkError::WebServer { + source: WebServerNetworkError::ClientError, + }), + Ok(Some((index, messages))) => { + let mut deserialized_messages = Vec::new(); + for message in &messages { + let deserialized_message = bincode::deserialize(message); + if let Err(e) = deserialized_message { + return Err(NetworkError::FailedToDeserialize { source: e }); + } + deserialized_messages.push(deserialized_message.unwrap()); + } + Ok(Some((index, deserialized_messages))) + } + Ok(None) => Ok(None), + } + } + + /// Sends a GET request to the webserver for some specified endpoint + /// Returns a vec of deserialized, received messages or an error + async fn get_message_from_web_server( + &self, + endpoint: String, + ) -> Result>>, NetworkError> { + let result: Result>>, ClientError> = + self.client.get(&endpoint).send().await; + match result { + Err(_error) => Err(NetworkError::WebServer { + source: WebServerNetworkError::ClientError, + }), + Ok(Some(messages)) => { + let mut deserialized_messages = Vec::new(); + for message in &messages { + let deserialized_message = bincode::deserialize(message); + if let Err(e) = deserialized_message { + return Err(NetworkError::FailedToDeserialize { source: e }); + } + deserialized_messages.push(deserialized_message.unwrap()); + } + Ok(Some(deserialized_messages)) + } + Ok(None) => Ok(None), + } + } +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[serde(bound(deserialize = ""))] +/// A message being sent to the web server +pub struct SendMsg { + /// The optional message, or body, to send + message: Option, + /// The endpoint to send the message to + endpoint: String, +} + +/// A message being received from the web server +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[serde(bound(deserialize = ""))] +pub struct RecvMsg { + /// The optional message being received + message: Option, +} + +/// Trait for messages being sent to the web server +pub trait SendMsgTrait { + /// Returns the endpoint to send the message to + fn get_endpoint(&self) -> String; + /// Returns the actual message being sent + fn get_message(&self) -> Option; +} + +/// Trait for messages being received from the web server +pub trait RecvMsgTrait { + /// Returns the actual message being received + fn get_message(&self) -> Option; +} + +impl SendMsgTrait for SendMsg { + fn get_endpoint(&self) -> String { + self.endpoint.clone() + } + + fn get_message(&self) -> Option { + self.message.clone() + } +} + +impl RecvMsgTrait for RecvMsg { + fn get_message(&self) -> Option { + self.message.clone() + } +} + +impl NetworkMsg for SendMsg {} +impl NetworkMsg for RecvMsg {} + +impl< + M: NetworkMsg + 'static + ViewMessage, + K: SignatureKey + 'static, + TYPES: NodeType + 'static, + > WebServerNetwork +{ + /// Creates a new instance of the `WebServerNetwork` + /// # Panics + /// if the web server url is malformed + pub fn create( + host: &str, + port: u16, + wait_between_polls: Duration, + key: TYPES::SignatureKey, + is_da_server: bool, + ) -> Self { + let base_url_string = format!("http://{host}:{port}"); + info!("Connecting to web server at {base_url_string:?} is da: {is_da_server}"); + + let base_url = base_url_string.parse(); + if base_url.is_err() { + error!("Web server url {:?} is malformed", base_url_string); + } + + // TODO ED Wait for healthcheck + let client = surf_disco::Client::::new(base_url.unwrap()); + + let inner = Arc::new(Inner { + phantom: PhantomData, + broadcast_poll_queue: Arc::default(), + direct_poll_queue: Arc::default(), + running: AtomicBool::new(true), + connected: AtomicBool::new(false), + client, + wait_between_polls, + _own_key: key, + is_da: is_da_server, + tx_index: Arc::default(), + proposal_task_map: Arc::default(), + vote_task_map: Arc::default(), + dac_task_map: Arc::default(), + view_sync_cert_task_map: Arc::default(), + view_sync_vote_task_map: Arc::default(), + txn_task_map: Arc::default(), + }); + + inner.connected.store(true, Ordering::Relaxed); + + Self { + inner, + server_shutdown_signal: None, + } + } + + /// Parses a message to find the appropriate endpoint + /// Returns a `SendMsg` containing the endpoint + fn parse_post_message(message: M) -> Result, WebServerNetworkError> { + let view_number: TYPES::Time = message.get_view_number(); + + let endpoint = match &message.purpose() { + MessagePurpose::Proposal => config::post_proposal_route(*view_number), + MessagePurpose::Vote => config::post_vote_route(*view_number), + MessagePurpose::Data => config::post_transactions_route(), + MessagePurpose::Internal => return Err(WebServerNetworkError::EndpointError), + MessagePurpose::ViewSyncProposal => { + // error!("Posting view sync proposal route is: {}", config::post_view_sync_proposal_route(*view_number)); + config::post_view_sync_proposal_route(*view_number) + } + MessagePurpose::ViewSyncVote => config::post_view_sync_vote_route(*view_number), + MessagePurpose::DAC => config::post_da_certificate_route(*view_number), + }; + + let network_msg: SendMsg = SendMsg { + message: Some(message), + endpoint, + }; + Ok(network_msg) + } +} + +#[async_trait] +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> + for WebCommChannel +{ + type NETWORK = WebServerNetwork, TYPES::SignatureKey, TYPES>; + /// Blocks until node is successfully initialized + /// into the network + async fn wait_for_ready(&self) { + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::wait_for_ready(&self.0) + .await; + } + + /// checks if the network is ready + /// nonblocking + async fn is_ready(&self) -> bool { + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::is_ready(&self.0) + .await + } + + /// Shut down this network. Afterwards this network should no longer be used. + /// + /// This should also cause other functions to immediately return with a [`NetworkError`] + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::shut_down(&self.0) + .await; + }; + boxed_sync(closure) + } + + /// broadcast message to those listening on the communication channel + /// blocking + async fn broadcast_message( + &self, + message: Message, + _election: &MEMBERSHIP, + ) -> Result<(), NetworkError> { + self.0.broadcast_message(message, BTreeSet::new()).await + } + + /// Sends a direct message to a specific node + /// blocking + async fn direct_message( + &self, + message: Message, + recipient: TYPES::SignatureKey, + ) -> Result<(), NetworkError> { + self.0.direct_message(message, recipient).await + } + + /// Moves out the entire queue of received messages of 'transmit_type` + /// + /// Will unwrap the underlying `NetworkMessage` + /// blocking + fn recv_msgs<'a, 'b>( + &'a self, + transmit_type: TransmitType, + ) -> BoxSyncFuture<'b, Result>, NetworkError>> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::recv_msgs(&self.0, transmit_type) + .await + }; + boxed_sync(closure) + } + + /// look up a node + /// blocking + async fn lookup_node(&self, _pk: TYPES::SignatureKey) -> Result<(), NetworkError> { + Ok(()) + } + + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::inject_consensus_info(&self.0, event) + .await; + } +} + +#[async_trait] +impl< + M: NetworkMsg + 'static + ViewMessage, + K: SignatureKey + 'static, + TYPES: NodeType + 'static, + > ConnectedNetwork for WebServerNetwork +{ + /// Blocks until the network is successfully initialized + async fn wait_for_ready(&self) { + while !self.inner.connected.load(Ordering::Relaxed) { + async_sleep(Duration::from_secs(1)).await; + } + } + + /// checks if the network is ready + /// nonblocking + async fn is_ready(&self) -> bool { + self.inner.connected.load(Ordering::Relaxed) + } + + /// Blocks until the network is shut down + /// then returns true + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + self.inner.running.store(false, Ordering::Relaxed); + }; + boxed_sync(closure) + } + + /// broadcast message to some subset of nodes + /// blocking + async fn broadcast_message( + &self, + message: M, + _recipients: BTreeSet, + ) -> Result<(), NetworkError> { + let network_msg = Self::parse_post_message(message); + match network_msg { + Ok(network_msg) => self.post_message_to_web_server(network_msg).await, + Err(network_msg) => Err(NetworkError::WebServer { + source: network_msg, + }), + } + } + + /// Sends a direct message to a specific node + /// blocking + async fn direct_message(&self, message: M, _recipient: K) -> Result<(), NetworkError> { + let network_msg = Self::parse_post_message(message); + match network_msg { + Ok(network_msg) => { + // error!("network msg is {:?}", network_msg.clone()); + + self.post_message_to_web_server(network_msg).await + } + Err(network_msg) => Err(NetworkError::WebServer { + source: network_msg, + }), + } + } + + /// Moves out the entire queue of received messages of 'transmit_type` + /// + /// Will unwrap the underlying `NetworkMessage` + /// blocking + fn recv_msgs<'a, 'b>( + &'a self, + transmit_type: TransmitType, + ) -> BoxSyncFuture<'b, Result, NetworkError>> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + match transmit_type { + TransmitType::Direct => { + let mut queue = self.inner.direct_poll_queue.write().await; + Ok(queue + .drain(..) + .collect::>() + .iter() + .map(|x| x.get_message().unwrap()) + .collect()) + } + TransmitType::Broadcast => { + let mut queue = self.inner.broadcast_poll_queue.write().await; + Ok(queue + .drain(..) + .collect::>() + .iter() + .map(|x| x.get_message().unwrap()) + .collect()) + } + } + }; + boxed_sync(closure) + } + + /// look up a node + /// blocking + async fn lookup_node(&self, _pk: K) -> Result<(), NetworkError> { + Ok(()) + } + + #[allow(clippy::too_many_lines)] + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + debug!( + "Injecting event: {:?} is da {}", + event.clone(), + self.inner.is_da + ); + + // TODO ED Need to handle canceling tasks that don't receive their expected output (such a proposal that never comes) + // TODO ED Need to GC all old views, not just singular views, could lead to a network leak + + match event { + ConsensusIntentEvent::PollForProposal(view_number) => { + // Check if we already have a task for this (we shouldn't) + + // Going to do a write lock since mostly likely we will need it - can change to upgradable read in the future + let mut task_map = self.inner.proposal_task_map.write().await; + if let Entry::Vacant(e) = task_map.entry(view_number) { + // create new task + let (sender, receiver) = unbounded(); + e.insert(sender); + + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server(receiver, MessagePurpose::Proposal, view_number) + .await + { + error!( + "Background receive proposal polling encountered an error: {:?}", + e + ); + } + } + }); + } else { + error!("Somehow task already existed!"); + } + + // GC proposal collection if we are two views in the future + if let Some((_, sender)) = task_map.remove_entry(&view_number.wrapping_sub(2)) { + // Send task cancel message to old task + + // If task already exited we expect an error + let _res = sender + .send(ConsensusIntentEvent::CancelPollForProposal( + view_number.wrapping_sub(2), + )) + .await; + } + } + ConsensusIntentEvent::PollForVotes(view_number) => { + let mut task_map = self.inner.vote_task_map.write().await; + if let Entry::Vacant(e) = task_map.entry(view_number) { + // create new task + let (sender, receiver) = unbounded(); + e.insert(sender); + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server(receiver, MessagePurpose::Vote, view_number) + .await + { + error!( + "Background receive proposal polling encountered an error: {:?}", + e + ); + } + } + }); + } else { + error!("Somehow task already existed!"); + } + + // GC proposal collection if we are two views in the future + // TODO ED This won't work for vote collection, last task is more than 2 view ago depending on size of network, will need to rely on cancel task from consensus + if let Some((_, sender)) = task_map.remove_entry(&(view_number.wrapping_sub(2))) { + // Send task cancel message to old task + + // If task already exited we expect an error + let _res = sender + .send(ConsensusIntentEvent::CancelPollForVotes( + view_number.wrapping_sub(2), + )) + .await; + } + } + ConsensusIntentEvent::PollForDAC(view_number) => { + let mut task_map = self.inner.dac_task_map.write().await; + if let Entry::Vacant(e) = task_map.entry(view_number) { + // create new task + let (sender, receiver) = unbounded(); + e.insert(sender); + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server(receiver, MessagePurpose::DAC, view_number) + .await + { + error!( + "Background receive proposal polling encountered an error: {:?}", + e + ); + } + } + }); + } else { + error!("Somehow task already existed!"); + } + + // GC proposal collection if we are two views in the future + if let Some((_, sender)) = task_map.remove_entry(&(view_number.wrapping_sub(2))) { + // Send task cancel message to old task + + // If task already exited we expect an error + let _res = sender + .send(ConsensusIntentEvent::CancelPollForDAC( + view_number.wrapping_sub(2), + )) + .await; + } + } + ConsensusIntentEvent::CancelPollForVotes(view_number) => { + let mut task_map = self.inner.vote_task_map.write().await; + + if let Some((_, sender)) = task_map.remove_entry(&(view_number)) { + // Send task cancel message to old task + + // If task already exited we expect an error + let _res = sender + .send(ConsensusIntentEvent::CancelPollForVotes(view_number)) + .await; + } + } + + ConsensusIntentEvent::PollForViewSyncCertificate(view_number) => { + let mut task_map = self.inner.view_sync_cert_task_map.write().await; + if let Entry::Vacant(e) = task_map.entry(view_number) { + // create new task + let (sender, receiver) = unbounded(); + e.insert(sender); + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server( + receiver, + MessagePurpose::ViewSyncProposal, + view_number, + ) + .await + { + error!( + "Background receive proposal polling encountered an error: {:?}", + e + ); + } + } + }); + } else { + error!("Somehow task already existed!"); + } + + // TODO ED Do we need to GC before returning? Or will view sync task handle that? + } + ConsensusIntentEvent::PollForViewSyncVotes(view_number) => { + let mut task_map = self.inner.view_sync_vote_task_map.write().await; + if let Entry::Vacant(e) = task_map.entry(view_number) { + // create new task + let (sender, receiver) = unbounded(); + e.insert(sender); + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server( + receiver, + MessagePurpose::ViewSyncVote, + view_number, + ) + .await + { + error!( + "Background receive proposal polling encountered an error: {:?}", + e + ); + } + } + }); + } else { + error!("Somehow task already existed!"); + } + } + + ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) => { + let mut task_map = self.inner.view_sync_cert_task_map.write().await; + + if let Some((_, sender)) = task_map.remove_entry(&(view_number)) { + // Send task cancel message to old task + + // If task already exited we expect an error + let _res = sender + .send(ConsensusIntentEvent::CancelPollForViewSyncCertificate( + view_number, + )) + .await; + } + } + ConsensusIntentEvent::CancelPollForViewSyncVotes(view_number) => { + let mut task_map = self.inner.view_sync_vote_task_map.write().await; + + if let Some((_, sender)) = task_map.remove_entry(&(view_number)) { + // Send task cancel message to old task + + // If task already exited we expect an error + let _res = sender + .send(ConsensusIntentEvent::CancelPollForViewSyncVotes( + view_number, + )) + .await; + } + } + ConsensusIntentEvent::PollForTransactions(view_number) => { + let mut task_map = self.inner.txn_task_map.write().await; + if let std::collections::hash_map::Entry::Vacant(e) = task_map.entry(view_number) { + // create new task + let (sender, receiver) = unbounded(); + e.insert(sender); + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server(receiver, MessagePurpose::Data, view_number) + .await + { + error!( + "Background receive transaction polling encountered an error: {:?}", + e + ); + } + } + }); + } else { + error!("Somehow task already existed!"); + } + + // TODO ED Do we need to GC before returning? Or will view sync task handle that? + } + ConsensusIntentEvent::CancelPollForTransactions(view_number) => { + let mut task_map = self.inner.txn_task_map.write().await; + + if let Some((_view, sender)) = task_map.remove_entry(&(view_number)) { + // Send task cancel message to old task + + // If task already exited we expect an error + let _res = sender + .send(ConsensusIntentEvent::CancelPollForTransactions(view_number)) + .await; + } else { + info!("Task map entry should have existed"); + }; + } + + _ => error!("Unexpected event!"), + } + } +} + +impl> + TestableNetworkingImplementation> + for WebServerNetwork, TYPES::SignatureKey, TYPES> +{ + fn generator( + expected_node_count: usize, + _num_bootstrap: usize, + _network_id: usize, + _da_committee_size: usize, + is_da: bool, + ) -> Box Self + 'static> { + let (server_shutdown_sender, server_shutdown) = oneshot(); + let sender = Arc::new(server_shutdown_sender); + // TODO ED Restrict this to be an open port using portpicker + let port = random::(); + info!("Launching web server on port {port}"); + // Start web server + async_spawn(hotshot_web_server::run_web_server::( + Some(server_shutdown), + port, + )); + + let known_nodes = (0..expected_node_count as u64) + .map(|id| { + TYPES::SignatureKey::from_private( + &TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], id).1, + ) + }) + .collect::>(); + + // Start each node's web server client + Box::new(move |id| { + let sender = Arc::clone(&sender); + let mut network = WebServerNetwork::create( + "0.0.0.0", + port, + Duration::from_millis(100), + known_nodes[id as usize].clone(), + is_da, + ); + network.server_shutdown_signal = Some(sender); + network + }) + } + + fn in_flight_message_count(&self) -> Option { + None + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > TestableNetworkingImplementation> + for WebCommChannel +{ + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + is_da: bool, + ) -> Box Self + 'static> { + let generator = , + TYPES::SignatureKey, + TYPES, + > as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da, + ); + Box::new(move |node_id| Self(generator(node_id).into(), PhantomData)) + } + + fn in_flight_message_count(&self) -> Option { + None + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > + TestableChannelImplementation< + TYPES, + Message, + PROPOSAL, + VOTE, + MEMBERSHIP, + WebServerNetwork, TYPES::SignatureKey, TYPES>, + > for WebCommChannel +{ + fn generate_network() -> Box< + dyn Fn(Arc, TYPES::SignatureKey, TYPES>>) -> Self + + 'static, + > { + Box::new(move |network| WebCommChannel::new(network)) + } +} diff --git a/hotshot/src/traits/networking/web_sever_libp2p_fallback.rs b/hotshot/src/traits/networking/web_sever_libp2p_fallback.rs new file mode 100644 index 0000000000..723c7c071d --- /dev/null +++ b/hotshot/src/traits/networking/web_sever_libp2p_fallback.rs @@ -0,0 +1,318 @@ +//! Networking Implementation that has a primary and a fallback newtork. If the primary +//! Errors we will use the backup to send or receive +use super::NetworkError; +use crate::{ + traits::implementations::{Libp2pNetwork, WebServerNetwork}, + NodeImplementation, +}; + +use async_trait::async_trait; + +use futures::join; + +use hotshot_task::{boxed_sync, BoxSyncFuture}; +use hotshot_types::{ + data::ProposalType, + message::Message, + traits::{ + election::Membership, + network::{ + CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, + TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, + ViewMessage, + }, + node_implementation::NodeType, + }, + vote::VoteType, +}; +use std::{marker::PhantomData, sync::Arc}; +use tracing::error; +/// A communication channel with 2 networks, where we can fall back to the slower network if the +/// primary fails +#[derive(Clone, Debug)] +pub struct WebServerWithFallbackCommChannel< + TYPES: NodeType, + I: NodeImplementation, + MEMBERSHIP: Membership, +> { + /// The two networks we'll use for send/recv + networks: Arc>, +} + +impl, MEMBERSHIP: Membership> + WebServerWithFallbackCommChannel +{ + /// Constructor + #[must_use] + pub fn new(networks: Arc>) -> Self { + Self { networks } + } + + /// Get a ref to the primary network + #[must_use] + pub fn network(&self) -> &WebServerNetwork, TYPES::SignatureKey, TYPES> { + &self.networks.0 + } + + /// Get a ref to the backup network + #[must_use] + pub fn fallback(&self) -> &Libp2pNetwork, TYPES::SignatureKey> { + &self.networks.1 + } +} + +/// Wrapper for the tuple of `WebServerNetwork` and `Libp2pNetwork` +/// We need this so we can impl `TestableNetworkingImplementation` +/// on the tuple +#[derive(Debug, Clone)] +pub struct CombinedNetworks< + TYPES: NodeType, + I: NodeImplementation, + MEMBERSHIP: Membership, +>( + pub WebServerNetwork, TYPES::SignatureKey, TYPES>, + pub Libp2pNetwork, TYPES::SignatureKey>, + pub PhantomData, +); + +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for CombinedNetworks +{ + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + is_da: bool, + ) -> Box Self + 'static> { + let generators = ( + , + TYPES::SignatureKey, + TYPES, + > as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da + ), + , TYPES::SignatureKey> as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da + ) + ); + Box::new(move |node_id| { + CombinedNetworks(generators.0(node_id), generators.1(node_id), PhantomData) + }) + } + + /// Get the number of messages in-flight. + /// + /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. + fn in_flight_message_count(&self) -> Option { + None + } +} + +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for WebServerWithFallbackCommChannel +{ + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + is_da: bool, + ) -> Box Self + 'static> { + let generator = as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da + ); + Box::new(move |node_id| Self { + networks: generator(node_id).into(), + }) + } + + /// Get the number of messages in-flight. + /// + /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. + fn in_flight_message_count(&self) -> Option { + None + } +} + +#[async_trait] +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> + for WebServerWithFallbackCommChannel +{ + type NETWORK = CombinedNetworks; + + async fn wait_for_ready(&self) { + join!( + self.network().wait_for_ready(), + self.fallback().wait_for_ready() + ); + } + + async fn is_ready(&self) -> bool { + self.network().is_ready().await && self.fallback().is_ready().await + } + + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + join!(self.network().shut_down(), self.fallback().shut_down()); + }; + boxed_sync(closure) + } + + async fn broadcast_message( + &self, + message: Message, + election: &MEMBERSHIP, + ) -> Result<(), NetworkError> { + let recipients = + >::get_committee(election, message.get_view_number()); + let fallback = self + .fallback() + .broadcast_message(message.clone(), recipients.clone()); + let network = self.network().broadcast_message(message, recipients); + match join!(fallback, network) { + (Err(e1), Err(e2)) => { + error!( + "Both network broadcasts failed primary error: {}, fallback error: {}", + e1, e2 + ); + Err(e1) + } + (Err(e), _) => { + error!("Failed primary broadcast with error: {}", e); + Ok(()) + } + (_, Err(e)) => { + error!("Failed backup broadcast with error: {}", e); + Ok(()) + } + _ => Ok(()), + } + } + + async fn direct_message( + &self, + message: Message, + recipient: TYPES::SignatureKey, + ) -> Result<(), NetworkError> { + match self + .network() + .direct_message(message.clone(), recipient.clone()) + .await + { + Ok(_) => Ok(()), + Err(e) => { + error!( + "Falling back on direct message, error on primary network: {}", + e + ); + self.fallback().direct_message(message, recipient).await + } + } + } + + fn recv_msgs<'a, 'b>( + &'a self, + transmit_type: TransmitType, + ) -> BoxSyncFuture<'b, Result>, NetworkError>> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + match self.network().recv_msgs(transmit_type).await { + Ok(msgs) => Ok(msgs), + Err(e) => { + error!( + "Falling back on recv message, error on primary network: {}", + e + ); + self.fallback().recv_msgs(transmit_type).await + } + } + }; + boxed_sync(closure) + } + + async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { + match join!( + self.network().lookup_node(pk.clone()), + self.fallback().lookup_node(pk) + ) { + (Err(e1), Err(e2)) => { + error!( + "Both network lookups failed primary error: {}, fallback error: {}", + e1, e2 + ); + Err(e1) + } + (Err(e), _) => { + error!("Failed primary lookup with error: {}", e); + Ok(()) + } + (_, Err(e)) => { + error!("Failed backup lookup with error: {}", e); + Ok(()) + } + _ => Ok(()), + } + } + + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::inject_consensus_info(self.network(), event) + .await; + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + > + TestableChannelImplementation< + TYPES, + Message, + PROPOSAL, + VOTE, + MEMBERSHIP, + CombinedNetworks, + > for WebServerWithFallbackCommChannel +{ + fn generate_network() -> Box) -> Self + 'static> { + Box::new(move |network| WebServerWithFallbackCommChannel::new(network)) + } +} diff --git a/hotshot/src/traits/node_implementation.rs b/hotshot/src/traits/node_implementation.rs new file mode 100644 index 0000000000..5bd8cfbe77 --- /dev/null +++ b/hotshot/src/traits/node_implementation.rs @@ -0,0 +1,8 @@ +//! Composite trait for node behavior +//! +//! This module defines the [`NodeImplementation`] trait, which is a composite trait used for +//! describing the overall behavior of a node, as a composition of implementations of the node trait. + +pub use hotshot_types::traits::node_implementation::{ + NodeImplementation, TestableNodeImplementation, +}; diff --git a/hotshot/src/traits/storage.rs b/hotshot/src/traits/storage.rs new file mode 100644 index 0000000000..1961871de7 --- /dev/null +++ b/hotshot/src/traits/storage.rs @@ -0,0 +1,5 @@ +//! Abstraction over on-disk storage of node state +// pub mod atomic_storage; +pub mod memory_storage; + +pub use hotshot_types::traits::storage::{Result, Storage}; diff --git a/hotshot/src/traits/storage/atomic_storage.rs b/hotshot/src/traits/storage/atomic_storage.rs new file mode 100644 index 0000000000..9d054fe1ac --- /dev/null +++ b/hotshot/src/traits/storage/atomic_storage.rs @@ -0,0 +1,261 @@ +//! On-disk storage of node state. Based on [`atomic_store`](https://github.com/EspressoSystems/atomicstore). + +mod dual_key_value_store; +mod hash_map_store; + +use self::{dual_key_value_store::DualKeyValueStore, hash_map_store::HashMapStore}; +use crate::{data::Leaf, traits::StateContents, QuorumCertificate}; +use async_std::sync::Mutex; +use async_trait::async_trait; +use atomic_store::{AtomicStore, AtomicStoreLoader}; +use commit::Commitment; +use hotshot_types::{ + traits::storage::{ + AtomicStoreSnafu, Storage, StorageError, StorageResult, StorageState, StorageUpdater, + TestableStorage, + }, +}; +use serde::{de::DeserializeOwned, Serialize}; +use snafu::ResultExt; +use std::{path::Path, sync::Arc}; +use tempfile::{tempdir, TempDir}; +use tracing::{instrument, trace}; + +/// Inner state of an atomic storage +struct AtomicStorageInner +where + STATE: DeserializeOwned + Serialize + StateContents, +{ + /// Temporary directory storage might live in + /// (we want to delete the temporary directory when storage is droppped) + _temp_dir: Option, + /// The atomic store loader + atomic_store: Mutex, + + /// The Blocks stored by this [`AtomicStorage`] + blocks: HashMapStore, STATE::Block>, + + /// The [`QuorumCertificate`]s stored by this [`AtomicStorage`] + qcs: DualKeyValueStore>, + + /// The [`Leaf`s stored by this [`AtomicStorage`] + /// + /// In order to maintain the struct constraints, this list must be append only. Once a QC is + /// inserted, it index _must not_ change + leaves: DualKeyValueStore>, + + /// The store of states + states: HashMapStore>, STATE>, +} + +/// Persistent [`Storage`] implementation, based upon [`atomic_store`]. +#[derive(Clone)] +pub struct AtomicStorage +where + STATE: DeserializeOwned + Serialize + StateContents, +{ + /// Inner state of the atomic storage + inner: Arc>, +} + +impl TestableStorage for AtomicStorage { + fn construct_tmp_storage() -> StorageResult { + let tempdir = tempdir().map_err(|e| StorageError::InconsistencyError { + description: e.to_string(), + })?; + let loader = AtomicStoreLoader::create(tempdir.path(), "hotshot").map_err(|e| { + StorageError::InconsistencyError { + description: e.to_string(), + } + })?; + Self::init_from_loader(loader, Some(tempdir)) + .map_err(|e| StorageError::AtomicStore { source: e }) + } +} + +impl AtomicStorage +where + STATE: StateContents, +{ + /// Creates an atomic storage at a given path. If files exist, will back up existing directory before creating. + /// + /// # Errors + /// + /// Returns the underlying errors that the following types can throw: + /// - [`atomic_store::AtomicStoreLoader`] + /// - [`atomic_store::AtomicStore`] + /// - [`atomic_store::RollingLog`] + /// - [`atomic_store::AppendLog`] + pub fn create(path: &Path) -> atomic_store::Result { + let loader = AtomicStoreLoader::create(path, "hotshot")?; + Self::init_from_loader(loader, None) + } + + /// Open an atomic storage at a given path. + /// + /// # Errors + /// + /// Returns the underlying errors that the following types can throw: + /// - [`atomic_store::AtomicStoreLoader`] + /// - [`atomic_store::AtomicStore`] + /// - [`atomic_store::RollingLog`] + /// - [`atomic_store::AppendLog`] + pub fn open(path: &Path) -> atomic_store::Result { + let loader = AtomicStoreLoader::load(path, "hotshot")?; + Self::init_from_loader(loader, None) + } + + /// Open an atomic storage with a given [`AtomicStoreLoader`] + /// + /// # Errors + /// + /// Returns the underlying errors that the following types can throw: + /// - [`atomic_store::AtomicStore`] + /// - [`atomic_store::RollingLog`] + /// - [`atomic_store::AppendLog`] + pub fn init_from_loader( + mut loader: AtomicStoreLoader, + dir: Option, + ) -> atomic_store::Result { + let blocks = HashMapStore::load(&mut loader, "hotshot_blocks")?; + let qcs = DualKeyValueStore::open(&mut loader, "hotshot_qcs")?; + let leaves = DualKeyValueStore::open(&mut loader, "hotshot_leaves")?; + let states = HashMapStore::load(&mut loader, "hotshot_states")?; + + let atomic_store = AtomicStore::open(loader)?; + + Ok(Self { + inner: Arc::new(AtomicStorageInner { + _temp_dir: dir, + atomic_store: Mutex::new(atomic_store), + blocks, + qcs, + leaves, + states, + }), + }) + } +} + +#[async_trait] +impl Storage for AtomicStorage { + #[instrument(name = "AtomicStorage::get_block", skip_all)] + async fn get_block( + &self, + hash: &Commitment, + ) -> StorageResult> { + Ok(self.inner.blocks.get(hash).await) + } + + #[instrument(name = "AtomicStorage::get_qc", skip_all)] + async fn get_qc( + &self, + hash: &Commitment, + ) -> StorageResult>> { + Ok(self.inner.qcs.load_by_key_1_ref(hash).await) + } + + #[instrument(name = "AtomicStorage::get_newest_qc", skip_all)] + async fn get_newest_qc(&self) -> StorageResult>> { + Ok(self.inner.qcs.load_latest(|qc| qc.view_number()).await) + } + + #[instrument(name = "AtomicStorage::get_qc_for_view", skip_all)] + async fn get_qc_for_view( + &self, + view: TYPES::Time, + ) -> StorageResult>> { + Ok(self.inner.qcs.load_by_key_2(view).await) + } + + #[instrument(name = "AtomicStorage::get_leaf", skip_all)] + async fn get_leaf(&self, hash: &Commitment>) -> StorageResult>> { + Ok(self.inner.leaves.load_by_key_1_ref(hash).await) + } + + #[instrument(name = "AtomicStorage::get_leaf_by_block", skip_all)] + async fn get_leaf_by_block( + &self, + hash: &Commitment, + ) -> StorageResult>> { + Ok(self.inner.leaves.load_by_key_2_ref(hash).await) + } + + #[instrument(name = "AtomicStorage::get_state", skip_all)] + async fn get_state(&self, hash: &Commitment>) -> StorageResult> { + Ok(self.inner.states.get(hash).await) + } + + async fn get_internal_state(&self) -> StorageState { + let mut blocks: Vec<(Commitment, STATE::Block)> = + self.inner.blocks.load_all().await.into_iter().collect(); + + blocks.sort_by_key(|(hash, _)| *hash); + let blocks = blocks.into_iter().map(|(_, block)| block).collect(); + + let mut leafs: Vec> = self.inner.leaves.load_all().await; + leafs.sort_by_cached_key(Leaf::hash); + + let mut quorum_certificates = self.inner.qcs.load_all().await; + quorum_certificates.sort_by_key(|qc| qc.view_number()); + + let mut states: Vec<(Commitment>, STATE)> = + self.inner.states.load_all().await.into_iter().collect(); + states.sort_by_key(|(hash, _)| *hash); + let states = states.into_iter().map(|(_, state)| state).collect(); + + StorageState { + blocks, + quorum_certificates, + leafs, + states, + } + } +} + +/// Implementation of [`StorageUpdater`] for the [`AtomicStorage`] +struct AtomicStorageUpdater<'a, S: StateContents> { + /// A reference to the internals of the [`AtomicStorage`] + inner: &'a AtomicStorageInner, +} + +#[async_trait] +impl<'a, STATE: StateContents + 'static> StorageUpdater<'a, STATE> + for AtomicStorageUpdater<'a, STATE> +{ + #[instrument(name = "AtomicStorage::get_block", skip_all)] + async fn insert_block( + &mut self, + hash: Commitment, + block: STATE::Block, + ) -> StorageResult { + trace!(?block, "inserting block"); + self.inner + .blocks + .insert(hash, block) + .await + .context(AtomicStoreSnafu)?; + Ok(()) + } + + #[instrument(name = "AtomicStorage::insert_leaf", skip_all)] + async fn insert_leaf(&mut self, leaf: Leaf) -> StorageResult { + self.inner.leaves.insert(leaf).await + } + + #[instrument(name = "AtomicStorage::insert_qc", skip_all)] + async fn insert_qc(&mut self, qc: QuorumCertificate) -> StorageResult { + self.inner.qcs.insert(qc).await + } + + #[instrument(name = "AtomicStorage::insert_state", skip_all)] + async fn insert_state(&mut self, state: STATE, hash: Commitment>) -> StorageResult { + trace!(?hash, "Inserting state"); + self.inner + .states + .insert(hash, state) + .await + .context(AtomicStoreSnafu)?; + Ok(()) + } +} diff --git a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs new file mode 100644 index 0000000000..70431af012 --- /dev/null +++ b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs @@ -0,0 +1,215 @@ +//! A store that operates on a value with 2 different keys. +//! +//! Implementations should implement [`DualKeyValue`] before they can use [`DualKeyValueStore`]. + +use async_std::sync::RwLock; +use atomic_store::{load_store::BincodeLoadStore, AppendLog, AtomicStoreLoader}; +use commit::{Commitment, Committable}; +use hotshot_types::{ + data::{Leaf, QuorumCertificate, ViewNumber}, + traits::{ + storage::{AtomicStoreSnafu, InconsistencySnafu, StorageError}, + StateContents, + }, +}; +use serde::{de::DeserializeOwned, Serialize}; +use snafu::ResultExt; +use std::{collections::HashMap, hash::Hash}; + +/// A store that allows lookup of a value by 2 different keys. +pub struct DualKeyValueStore { + /// inner value + inner: RwLock>, +} + +/// The inner struct of the [`DualKeyValueStore`] +struct Inner { + /// The underlying store + store: AppendLog>, + + /// Key 1 to index + key_1: HashMap, + + /// Key 2 to index + key_2: HashMap, + + /// Actual values. This list should be append-only + values: Vec, +} + +impl DualKeyValueStore { + /// Open the [`DualKeyValueStore`] with the given loader and name. + /// + /// # Errors + /// + /// Returns any errors that [`AppendLog`]'s `load` returns. + pub fn open( + loader: &mut AtomicStoreLoader, + name: &str, + ) -> Result { + let store = AppendLog::load(loader, BincodeLoadStore::default(), name, 1024)?; + let values = store + .iter() + .collect::, atomic_store::PersistenceError>>() + .unwrap_or_default(); + let key_1 = values + .iter() + .enumerate() + .map(|(idx, v)| (v.key_1(), idx)) + .collect(); + let key_2 = values + .iter() + .enumerate() + .map(|(idx, v)| (v.key_2(), idx)) + .collect(); + Ok(Self { + inner: RwLock::new(Inner { + store, + key_1, + key_2, + values, + }), + }) + } + + /// Load the `K` value based on the 1st key. + pub async fn load_by_key_1_ref(&self, k: &K::Key1) -> Option { + let read = self.inner.read().await; + let idx = read.key_1.get(k).copied()?; + Some(read.values[idx].clone()) + } + + /// Load the `K` value based on a reference of the 2nd key. + pub async fn load_by_key_2_ref(&self, k: &K::Key2) -> Option { + let read = self.inner.read().await; + let idx = read.key_2.get(k).copied()?; + Some(read.values[idx].clone()) + } + + /// Load the `K` value based on the 2nd key. + pub async fn load_by_key_2(&self, k: K::Key2) -> Option { + self.load_by_key_2_ref(&k).await + } + + /// Load the latest inserted entry in this [`DualKeyValueStore`] + pub async fn load_latest(&self, cb: F) -> Option + where + F: FnMut(&&K) -> V, + V: std::cmp::Ord, + { + let read = self.inner.read().await; + read.values.iter().max_by_key::(cb).cloned() + } + + /// Load all entries in this [`DualKeyValueStore`] + pub async fn load_all(&self) -> Vec { + self.inner.read().await.values.clone() + } + + /// Insert a value into this [`DualKeyValueStore`] + /// + /// # Errors + /// + /// Returns any errors that [`AppendLog`]'s `store_resource` returns. + pub async fn insert(&self, val: K) -> Result<(), StorageError> { + let mut lock = self.inner.write().await; + + match (lock.key_1.get(&val.key_1()), lock.key_2.get(&val.key_2())) { + (Some(idx), Some(key_2_idx)) if idx == key_2_idx => { + // updating + let idx = *idx; + + // TODO: This still adds a duplicate `K` in the storage + // ideally we'd update this record instead + lock.store.store_resource(&val).context(AtomicStoreSnafu)?; + lock.values[idx] = val; + Ok(()) + } + (Some(_), Some(_)) => InconsistencySnafu { + description: format!("Could not insert {}, both {} and {} already exist, but point at different records", std::any::type_name::(), K::KEY_1_NAME, K::KEY_2_NAME), + } + .fail(), + (Some(_), None) => InconsistencySnafu { + description: format!("Could not insert {}, {} already exists but {} does not", std::any::type_name::(), K::KEY_1_NAME, K::KEY_2_NAME), + } + .fail(), + (None, Some(_)) => InconsistencySnafu { + description: format!("Could not insert {}, {} already exists but {} does not", std::any::type_name::(), K::KEY_2_NAME, K::KEY_1_NAME), + } + .fail(), + (None, None) => { + // inserting + lock.store.store_resource(&val).context(AtomicStoreSnafu)?; + + let idx = lock.values.len(); + lock.key_1.insert(val.key_1(), idx); + lock.key_2.insert(val.key_2(), idx); + lock.values.push(val); + + Ok(()) + } + } + } + + /// Commit this [`DualKeyValueStore`]. + /// + /// # Errors + /// + /// Returns any errors that [`AppendLog`]'s `commit_version` returns. + pub async fn commit_version(&self) -> atomic_store::Result<()> { + let mut lock = self.inner.write().await; + lock.store.commit_version()?; + Ok(()) + } +} + +/// A dual key value. Used for [`DualKeyValueStore`] +pub trait DualKeyValue: Serialize + DeserializeOwned + Clone { + /// The name of the first key + const KEY_1_NAME: &'static str; + /// The first key type + type Key1: Serialize + DeserializeOwned + Hash + Eq; + /// Get a copy of the first key + fn key_1(&self) -> Self::Key1; + + /// The name of the second key + const KEY_2_NAME: &'static str; + /// The second key type + type Key2: Serialize + DeserializeOwned + Hash + Eq; + /// Get a clone of the second key + fn key_2(&self) -> Self::Key2; +} + +impl DualKeyValue for QuorumCertificate { + type Key1 = Commitment; + type Key2 = ViewNumber; + + const KEY_1_NAME: &'static str = "block_commitment"; + const KEY_2_NAME: &'static str = "view_number"; + + fn key_1(&self) -> Self::Key1 { + self.block_commitment + } + fn key_2(&self) -> Self::Key2 { + self.view_number + } +} + +impl DualKeyValue for Leaf +where + STATE: StateContents, +{ + type Key1 = Commitment>; + type Key2 = Commitment; + + const KEY_1_NAME: &'static str = "leaf_commitment"; + const KEY_2_NAME: &'static str = "block_commitment"; + + fn key_1(&self) -> Self::Key1 { + self.commit() + } + + fn key_2(&self) -> Self::Key2 { + ::commit(&self.deltas) + } +} diff --git a/hotshot/src/traits/storage/atomic_storage/hash_map_store.rs b/hotshot/src/traits/storage/atomic_storage/hash_map_store.rs new file mode 100644 index 0000000000..305b9ce650 --- /dev/null +++ b/hotshot/src/traits/storage/atomic_storage/hash_map_store.rs @@ -0,0 +1,91 @@ +//! A store based on [`RollingLog`] + +use async_std::sync::RwLock; +use atomic_store::{load_store::BincodeLoadStore, AtomicStoreLoader, RollingLog}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{collections::HashMap, hash::Hash}; + +/// A store with [`RollingLog`] as the storage system. +pub struct HashMapStore +where + K: Eq + Hash, + HashMap: Serialize + DeserializeOwned, +{ + /// Inner value + inner: RwLock>, +} + +/// The inner value of the [`HashMapStore`] +struct Inner +where + K: Eq + Hash, + HashMap: Serialize + DeserializeOwned, +{ + /// The underlying atomic_store store + store: RollingLog>>, + /// Data currently loaded in the store + data: HashMap, +} + +impl HashMapStore +where + K: Eq + Hash, + V: Clone, + HashMap: Serialize + DeserializeOwned + Clone, +{ + /// Load a `HashMapStore` with the given loader and name. + /// + /// # Errors + /// + /// Returns any errors that [`RollingLog`]'s `load` returns. + pub fn load(loader: &mut AtomicStoreLoader, name: &str) -> atomic_store::Result { + let store = RollingLog::load(loader, BincodeLoadStore::default(), name, 1024)?; + let data = store.load_latest().unwrap_or_default(); + Ok(Self { + inner: RwLock::new(Inner { store, data }), + }) + } + + /// Get an entry in this store. Returning `Some(V)` if it was found. + pub async fn get(&self, hash: &K) -> Option { + let read = self.inner.read().await; + read.data.get(hash).cloned() + } + + /// Insert a new key-value entry into the store. This won't be committed untill `commit` is called. + pub async fn insert(&self, key: K, val: V) -> atomic_store::Result<()> { + let mut lock = self.inner.write().await; + // Make sure to commit the store first before updating the internal value + // this makes sure that in a case of an error, the internal state is still correct + let mut data = lock.data.clone(); + data.insert(key, val); + lock.store.store_resource(&data)?; + + lock.data = data; + Ok(()) + } + + /// Commit this rolling store, returning a commit lock. + /// + /// Once all stores are committed, you need to call `apply` on this lock, or else the commit will be reverted once the lock goes out of scope. + /// + /// # Errors + /// + /// Returns any errors that [`RollingLog`]'s `store_resource` and `commit_version` returns. + pub async fn commit_version(&self) -> atomic_store::Result<()> { + let mut lock = self.inner.write().await; + lock.store.commit_version()?; + Ok(()) + } +} + +impl HashMapStore +where + HashMap: Serialize + DeserializeOwned + Clone, + K: Eq + Hash, +{ + /// Returns all data stored in this [`HashMapStore`]. + pub async fn load_all(&self) -> HashMap { + self.inner.read().await.data.clone() + } +} diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs new file mode 100644 index 0000000000..2a9e570a76 --- /dev/null +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -0,0 +1,211 @@ +//! [`HashMap`](std::collections::HashMap) and [`Vec`] based implementation of the storage trait +//! +//! This module provides a non-persisting, dummy adapter for the [`Storage`] trait +use async_lock::RwLock; +use async_trait::async_trait; +use hotshot_types::{ + data::LeafType, + traits::{ + node_implementation::NodeType, + storage::{ + Result, Storage, StorageError, StorageState, StoredView, TestableStorage, ViewEntry, + }, + }, +}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; + +/// Internal state for a [`MemoryStorage`] +struct MemoryStorageInternal> { + /// The views that have been stored + stored: BTreeMap>, + /// The views that have failed + failed: BTreeSet, +} + +/// In memory, ephemeral, storage for a [`HotShot`](crate::HotShot) instance +#[derive(Clone)] +pub struct MemoryStorage> { + /// The inner state of this [`MemoryStorage`] + inner: Arc>>, +} + +impl> MemoryStorage { + /// Create a new instance of the memory storage with the given block and state + #[must_use] + pub fn empty() -> Self { + let inner = MemoryStorageInternal { + stored: BTreeMap::new(), + failed: BTreeSet::new(), + }; + Self { + inner: Arc::new(RwLock::new(inner)), + } + } +} + +#[async_trait] +impl> TestableStorage + for MemoryStorage +{ + fn construct_tmp_storage() -> Result { + Ok(Self::empty()) + } + + async fn get_full_state(&self) -> StorageState { + let inner = self.inner.read().await; + StorageState { + stored: inner.stored.clone(), + failed: inner.failed.clone(), + } + } +} + +#[async_trait] +impl> Storage + for MemoryStorage +{ + async fn append(&self, views: Vec>) -> Result { + let mut inner = self.inner.write().await; + for view in views { + match view { + ViewEntry::Failed(num) => { + inner.failed.insert(num); + } + ViewEntry::Success(view) => { + inner.stored.insert(view.view_number, view); + } + } + } + Ok(()) + } + + async fn cleanup_storage_up_to_view(&self, view: TYPES::Time) -> Result { + let mut inner = self.inner.write().await; + + // .split_off will return everything after the given key, including the key. + let stored_after = inner.stored.split_off(&view); + // .split_off will return the map we want to keep stored, so we need to swap them + let old_stored = std::mem::replace(&mut inner.stored, stored_after); + + // same for the BTreeSet + let failed_after = inner.failed.split_off(&view); + let old_failed = std::mem::replace(&mut inner.failed, failed_after); + + Ok(old_stored.len() + old_failed.len()) + } + + async fn get_anchored_view(&self) -> Result> { + let inner = self.inner.read().await; + let last = inner + .stored + .values() + .next_back() + .ok_or(StorageError::NoGenesisView)?; + Ok(last.clone()) + } + + async fn commit(&self) -> Result { + Ok(()) // do nothing + } +} + +#[cfg(test)] +mod test { + use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; + + use super::*; + use hotshot_signature_key::bn254::BN254Pub; + use hotshot_types::{ + certificate::{AssembledSignature, QuorumCertificate}, + constants::genesis_proposer_id, + data::{fake_commitment, ValidatingLeaf, ViewNumber}, + traits::{ + block_contents::dummy::{DummyBlock, DummyState}, + node_implementation::NodeType, + state::ConsensusTime, + Block, + }, + }; + use std::{fmt::Debug, hash::Hash}; + use tracing::instrument; + + #[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, + )] + struct DummyTypes; + + impl NodeType for DummyTypes { + type Time = ViewNumber; + type BlockType = DummyBlock; + type SignatureKey = BN254Pub; + type VoteTokenType = StaticVoteToken; + type Transaction = ::Transaction; + type ElectionConfigType = StaticElectionConfig; + type StateType = DummyState; + } + + #[instrument(skip(rng))] + fn random_stored_view( + rng: &mut dyn rand::RngCore, + view_number: ::Time, + ) -> StoredView> { + // TODO is it okay to be using genesis here? + let _dummy_block_commit = fake_commitment::(); + let dummy_leaf_commit = fake_commitment::>(); + StoredView::from_qc_block_and_state( + QuorumCertificate { + // block_commitment: dummy_block_commit, + is_genesis: view_number == ::Time::genesis(), + leaf_commitment: dummy_leaf_commit, + signatures: AssembledSignature::Genesis(), + view_number, + }, + DummyBlock::random(rng), + DummyState::random(rng), + rng.next_u64(), + dummy_leaf_commit, + Vec::new(), + genesis_proposer_id(), + ) + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn memory_storage() { + let mut rng = rand::thread_rng(); + let storage = MemoryStorage::construct_tmp_storage().unwrap(); + let genesis = random_stored_view(&mut rng, ::Time::genesis()); + storage + .append_single_view(genesis.clone()) + .await + .expect("Could not append block"); + assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); + storage + .cleanup_storage_up_to_view(genesis.view_number) + .await + .unwrap(); + assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); + storage + .cleanup_storage_up_to_view(genesis.view_number + 1) + .await + .unwrap(); + assert!(storage.get_anchored_view().await.is_err()); + } +} diff --git a/hotshot/src/types.rs b/hotshot/src/types.rs new file mode 100644 index 0000000000..6956d8a795 --- /dev/null +++ b/hotshot/src/types.rs @@ -0,0 +1,7 @@ +mod event; +mod handle; + +pub use event::{Event, EventType}; +pub use handle::SystemContextHandle; +pub use hotshot_signature_key::bn254; +pub use hotshot_types::{message::Message, traits::signature_key::SignatureKey, vote::VoteType}; diff --git a/hotshot/src/types/event.rs b/hotshot/src/types/event.rs new file mode 100644 index 0000000000..a64b1be068 --- /dev/null +++ b/hotshot/src/types/event.rs @@ -0,0 +1,3 @@ +//! Events that a [`HotShot`](crate::HotShot) instance can emit + +pub use hotshot_types::event::{Event, EventType}; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs new file mode 100644 index 0000000000..5b8e737c1a --- /dev/null +++ b/hotshot/src/types/handle.rs @@ -0,0 +1,414 @@ +//! Provides an event-streaming handle for a [`HotShot`] running in the background + +use crate::{traits::NodeImplementation, types::Event, Message, QuorumCertificate, SystemContext}; +use async_compatibility_layer::channel::UnboundedStream; +use async_lock::RwLock; +use commit::Committable; +use futures::Stream; +use hotshot_task::{ + boxed_sync, + event_stream::{ChannelStream, EventStream, StreamId}, + global_registry::GlobalRegistry, + task::FilterEvent, + BoxSyncFuture, +}; +use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot_types::{ + consensus::Consensus, + data::LeafType, + error::HotShotError, + event::EventType, + message::{GeneralConsensusMessage, MessageKind}, + traits::{ + election::{ConsensusExchange, QuorumExchangeType, SignedCertificate}, + node_implementation::{ExchangesType, NodeType, QuorumEx}, + state::ConsensusTime, + storage::Storage, + }, +}; + +use std::sync::Arc; +use tracing::error; + +#[cfg(feature = "hotshot-testing")] +use commit::Commitment; +#[cfg(feature = "hotshot-testing")] +use hotshot_types::traits::signature_key::EncodedSignature; + +/// Event streaming handle for a [`SystemContext`] instance running in the background +/// +/// This type provides the means to message and interact with a background [`SystemContext`] instance, +/// allowing the ability to receive [`Event`]s from it, send transactions to it, and interact with +/// the underlying storage. +pub struct SystemContextHandle> { + /// The [sender](BroadcastSender) for the output stream from the background process + /// + /// This is kept around as an implementation detail, as the [`BroadcastSender::handle_async`] + /// method is needed to generate new receivers to expose to the user + pub(crate) output_event_stream: ChannelStream>, + /// access to the internal ev ent stream, in case we need to, say, shut something down + pub(crate) internal_event_stream: ChannelStream>, + /// registry for controlling tasks + pub(crate) registry: GlobalRegistry, + + /// Internal reference to the underlying [`HotShot`] + pub hotshot: SystemContext, + + /// Our copy of the `Storage` view for a hotshot + pub(crate) storage: I::Storage, +} + +impl + 'static> Clone + for SystemContextHandle +{ + fn clone(&self) -> Self { + Self { + registry: self.registry.clone(), + output_event_stream: self.output_event_stream.clone(), + internal_event_stream: self.internal_event_stream.clone(), + hotshot: self.hotshot.clone(), + storage: self.storage.clone(), + } + } +} + +impl + 'static> SystemContextHandle { + // /// Will return the next event in the queue + // /// + // /// # Errors + // /// + // /// Will return [`HotShotError::NetworkFault`] if the underlying [`SystemContext`] has been closed. + // pub async fn next_event(&mut self) -> Result, HotShotError> { + // let result = self.stream_output.recv_async().await; + // match result { + // Ok(result) => Ok(result), + // Err(_) => Err(NetworkFault { source: ShutDown }), + // } + // } + // /// Will attempt to immediately pull an event out of the queue + // /// + // /// # Errors + // /// + // /// Will return [`HotShotError::NetworkFault`] if the underlying [`HotShot`] instance has shut down + // pub fn try_next_event(&mut self) -> Result>, HotShotError> { + // self.stream.await + // // let result = self.stream_output.try_recv(); + // // Ok(result) + // } + + /// Will pull all the currently available events out of the event queue. + /// + /// # Errors + /// + /// Will return [`HotShotError::NetworkFault`] if the underlying [`HotShot`] instance has been shut + /// down. + // pub async fn available_events(&mut self) -> Result>, HotShotError> { + // let mut stream = self.output_stream; + // let _ = > as StreamExt/* :: */>::next(&mut *stream); + // let mut output = vec![]; + // Loop to pull out all the outputs + // loop { + // let _ = > as StreamExt/* :: */>::next(stream); + // let _ = FutureExt::::next(*self.output_stream).await; + // match FutureExt output.push(x), + // Ok(None) => break, + // // try_next event can only return HotShotError { source: NetworkError::ShutDown } + // Err(x) => return Err(x), + // } + // } + // Ok(output) + // nll_todo() + // } + + /// obtains a stream to expose to the user + pub async fn get_event_stream( + &mut self, + filter: FilterEvent>, + ) -> (impl Stream>, StreamId) { + self.output_event_stream.subscribe(filter).await + } + + /// HACK so we can know the types when running tests... + /// there are two cleaner solutions: + /// - make the stream generic and in nodetypes or nodeimpelmentation + /// - type wrapper + pub async fn get_event_stream_known_impl( + &mut self, + filter: FilterEvent>, + ) -> (UnboundedStream>, StreamId) { + self.output_event_stream.subscribe(filter).await + } + + /// HACK so we can know the types when running tests... + /// there are two cleaner solutions: + /// - make the stream generic and in nodetypes or nodeimpelmentation + /// - type wrapper + /// NOTE: this is only used for sanity checks in our tests + pub async fn get_internal_event_stream_known_impl( + &mut self, + filter: FilterEvent>, + ) -> (UnboundedStream>, StreamId) { + self.internal_event_stream.subscribe(filter).await + } + + /// Gets the current committed state of the [`HotShot`] instance + /// + /// # Errors + /// + /// Returns an error if the underlying `Storage` returns an error + pub async fn get_state(&self) -> ::MaybeState { + self.hotshot.get_state().await + } + + /// Gets most recent decided leaf + /// # Panics + /// + /// Panics if internal consensus is in an inconsistent state. + pub async fn get_decided_leaf(&self) -> I::Leaf { + self.hotshot.get_decided_leaf().await + } + + /// Submits a transaction to the backing [`HotShot`] instance. + /// + /// The current node broadcasts the transaction to all nodes on the network. + /// + /// # Errors + /// + /// Will return a [`HotShotError`] if some error occurs in the underlying + /// [`SystemContext`] instance. + pub async fn submit_transaction( + &self, + tx: TYPES::Transaction, + ) -> Result<(), HotShotError> { + self.hotshot.publish_transaction_async(tx).await + } + + /// performs the genesis initializaiton + pub async fn maybe_do_genesis_init(&self) { + let _anchor = self.storage(); + if let Ok(anchor_leaf) = self.storage().get_anchored_view().await { + if anchor_leaf.view_number == TYPES::Time::genesis() { + let leaf: I::Leaf = I::Leaf::from_stored_view(anchor_leaf); + let mut qc = QuorumCertificate::::genesis(); + qc.set_leaf_commitment(leaf.commit()); + let event = Event { + view_number: TYPES::Time::genesis(), + event: EventType::Decide { + leaf_chain: Arc::new(vec![leaf]), + qc: Arc::new(qc), + block_size: None, + }, + }; + self.output_event_stream.publish(event).await; + } + } else { + // TODO (justin) this seems bad. I think we should hard error in this case?? + error!("Hotshot storage has no anchor leaf!"); + } + } + + /// begin consensus by sending a genesis event + /// Use `start_consensus` on `SystemContext` instead + #[deprecated] + pub async fn start_consensus_deprecated(&self) { + self.maybe_do_genesis_init().await; + } + + /// iterate through all events on a [`NodeImplementation`] and determine if the node finished + /// successfully + /// # Errors + /// Errors if unable to obtain storage + /// # Panics + /// Panics if the event stream is shut down while this is running + // pub async fn collect_round_events( + // &mut self, + // ) -> Result< + // ( + // Vec<>::Leaf>, + // QuorumCertificate>::Leaf>, + // ), + // HotShotError, + // > { + // // TODO we should probably do a view check + // // but we can do that later. It's non-obvious how to get the view number out + // // to check against + // + // // drain all events from this node + // let mut results = Ok((vec![], QuorumCertificate::genesis())); + // loop { + // // unwrap is fine here since the thing hasn't been shut down + // let event = self.next_event().await.unwrap(); + // match event.event { + // EventType::ReplicaViewTimeout { view_number: time } => { + // error!(?event, "Replica timed out!"); + // results = Err(HotShotError::ViewTimeoutError { + // view_number: time, + // state: RoundTimedoutState::TestCollectRoundEventsTimedOut, + // }); + // } + // EventType::Decide { leaf_chain, qc } => { + // results = Ok((leaf_chain.to_vec(), (*qc).clone())); + // } + // EventType::ViewFinished { view_number: _ } => return results, + // event => { + // debug!("recv-ed event {:?}", event); + // } + // } + // } + // } + + /// Provides a reference to the underlying storage for this [`SystemContext`], allowing access to + /// historical data + pub fn storage(&self) -> &I::Storage { + &self.storage + } + + /// Get the underyling consensus state for this [`SystemContext`] + pub fn get_consensus(&self) -> Arc>> { + self.hotshot.get_consensus() + } + + /// Block the underlying quorum (and committee) networking interfaces until node is + /// successfully initialized into the networks. + pub async fn wait_for_networks_ready(&self) { + self.hotshot.inner.exchanges.wait_for_networks_ready().await; + } + + /// Shut down the the inner hotshot and wait until all background threads are closed. + // pub async fn shut_down(mut self) { + // self.registry.shutdown_all().await + pub fn shut_down<'a, 'b>(&'a mut self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b, + { + boxed_sync(async move { self.registry.shutdown_all().await }) + } + + /// return the timeout for a view of the underlying `SystemContext` + pub fn get_next_view_timeout(&self) -> u64 { + self.hotshot.get_next_view_timeout() + } + + // Below is for testing only: + + /// Wrapper for `HotShotConsensusApi`'s `get_leader` function + #[allow(clippy::unused_async)] // async for API compatibility reasons + #[cfg(feature = "hotshot-testing")] + pub async fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + self.hotshot + .inner + .exchanges + .quorum_exchange() + .get_leader(view_number) + } + + /// Wrapper to get this node's public key + #[cfg(feature = "hotshot-testing")] + pub fn get_public_key(&self) -> TYPES::SignatureKey { + self.hotshot.inner.public_key.clone() + } + + /// Wrapper to get this node's current view + #[cfg(feature = "hotshot-testing")] + pub async fn get_current_view(&self) -> TYPES::Time { + self.hotshot.inner.consensus.read().await.cur_view + } + + /// Wrapper around `HotShotConsensusApi`'s `sign_validating_or_commitment_proposal` function + #[cfg(feature = "hotshot-testing")] + pub fn sign_validating_or_commitment_proposal( + &self, + leaf_commitment: &Commitment, + ) -> EncodedSignature { + let inner = self.hotshot.inner.clone(); + inner + .exchanges + .quorum_exchange() + .sign_validating_or_commitment_proposal::(leaf_commitment) + } + + /// create a yes message + #[cfg(feature = "hotshot-testing")] + pub fn create_yes_message( + &self, + justify_qc_commitment: Commitment>, + leaf_commitment: Commitment, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage + where + QuorumEx: ConsensusExchange< + TYPES, + Message, + Certificate = QuorumCertificate, + >, + { + let inner = self.hotshot.inner.clone(); + inner.exchanges.quorum_exchange().create_yes_message( + justify_qc_commitment, + leaf_commitment, + current_view, + vote_token, + ) + } + + /// Wrapper around `HotShotConsensusApi`'s `send_broadcast_consensus_message` function + #[cfg(feature = "hotshot-testing")] + pub async fn send_broadcast_consensus_message(&self, msg: I::ConsensusMessage) { + let _result = self + .hotshot + .send_broadcast_message(MessageKind::from_consensus_message(msg)) + .await; + } + + /// Wrapper around `HotShotConsensusApi`'s `send_direct_consensus_message` function + #[cfg(feature = "hotshot-testing")] + pub async fn send_direct_consensus_message( + &self, + msg: I::ConsensusMessage, + recipient: TYPES::SignatureKey, + ) { + let _result = self + .hotshot + .send_direct_message(MessageKind::from_consensus_message(msg), recipient) + .await; + } + + /// Get length of the replica's receiver channel + #[cfg(feature = "hotshot-testing")] + pub async fn get_replica_receiver_channel_len( + &self, + view_number: TYPES::Time, + ) -> Option { + use async_compatibility_layer::channel::UnboundedReceiver; + + let channel_map = self.hotshot.inner.channel_maps.0.vote_channel.read().await; + let chan = channel_map.channel_map.get(&view_number)?; + let receiver = chan.receiver_chan.lock().await; + UnboundedReceiver::len(&*receiver) + } + + /// Get length of the next leaders's receiver channel + #[cfg(feature = "hotshot-testing")] + pub async fn get_next_leader_receiver_channel_len( + &self, + view_number: TYPES::Time, + ) -> Option { + use async_compatibility_layer::channel::UnboundedReceiver; + + let channel_map = self + .hotshot + .inner + .channel_maps + .0 + .proposal_channel + .read() + .await; + let chan = channel_map.channel_map.get(&view_number)?; + + let receiver = chan.receiver_chan.lock().await; + UnboundedReceiver::len(&*receiver) + } +} diff --git a/libp2p-networking/.cargo/config b/libp2p-networking/.cargo/config new file mode 100644 index 0000000000..656e08b011 --- /dev/null +++ b/libp2p-networking/.cargo/config @@ -0,0 +1,2 @@ +[net] +git-fetch-with-cli = true \ No newline at end of file diff --git a/libp2p-networking/.gitignore b/libp2p-networking/.gitignore new file mode 100644 index 0000000000..949ddcb61c --- /dev/null +++ b/libp2p-networking/.gitignore @@ -0,0 +1,4 @@ +/target +/result +/outfile_0 +/out*.txt diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml new file mode 100644 index 0000000000..e8d9a8df46 --- /dev/null +++ b/libp2p-networking/Cargo.toml @@ -0,0 +1,78 @@ +[package] +description = "Libp2p Networking Layer" +name = "libp2p-networking" +version = "0.1.0" +edition = "2021" +authors = ["Espresso Systems "] +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + + +[features] +default = ["webui"] +webui = ["tide"] + +# # this only has effect on linux +# lossy_network = [ +# "nix", +# "netlink-packet-route", +# "netlink-packet-utils", +# "netlink-packet-core", +# "netlink-proto", +# "netlink-sys", +# "netlink-packet-generic", +# "rtnetlink", +# ] + +[dependencies] +async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } +async-trait = { workspace = true } +bincode = { workspace = true } +blake3 = { workspace = true } +color-eyre = "0.6.2" +custom_debug = { workspace = true } +derive_builder = "0.12.0" +either = { workspace = true } +futures = { workspace = true } +hotshot-utils = { path = "../utils" } +libp2p-swarm-derive = { workspace = true } +libp2p-identity = { workspace = true } +libp2p-noise = { version = "0.43.0", default-features = false } +parking_lot = "0.12.1" +rand = { workspace = true } +serde = { workspace = true } +serde_json = "1.0.105" +snafu = { workspace = true } +tide = { version = "0.16", optional = true, default-features = false, features = [ + "h1-server", +] } +# TODO do we want this as non optional?? +tokio-stream = "0.1.14" +tracing = { workspace = true } +void = "1.0.2" + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +libp2p = { workspace = true, features = ["tokio"] } +tokio = { workspace = true } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +libp2p = { workspace = true, features = ["async-std"] } +async-std = { workspace = true } + + +[target.'cfg(target_os = "linux")'.dependencies] +## lossy_network dependencies +nix = { version = "0.26.2", optional = true } +rtnetlink = { git = "https://github.com/espressosystems/netlink.git", version = "0.9.1", features = [ + "smol_socket", +], default-features = false, optional = true } +netlink-packet-route = { git = "https://github.com/espressosystems/netlink.git", version = "0.11.0", optional = true } +netlink-packet-utils = { git = "https://github.com/espressosystems/netlink.git", version = "0.5.1", optional = true } +netlink-packet-core = { git = "https://github.com/espressosystems/netlink.git", version = "0.4.2", optional = true } +netlink-proto = { git = "https://github.com/espressosystems/netlink.git", version = "0.9.2", optional = true } +netlink-sys = { git = "https://github.com/espressosystems/netlink.git", version = "0.8.2", features = [ + "smol_socket", +], optional = true } +netlink-packet-generic = { git = "https://github.com/espressosystems/netlink.git", version = "0.2.0", optional = true } + +[dev-dependencies] +clap = { version = "4.4", features = ["derive", "env"] } diff --git a/libp2p-networking/README.md b/libp2p-networking/README.md new file mode 100644 index 0000000000..fc7a00f656 --- /dev/null +++ b/libp2p-networking/README.md @@ -0,0 +1,111 @@ +# USAGE + +Networking library inteded for use with HotShot. Builds upon abstractions from libp2p-rs. + +## CLI Demo + +To get very verbose logging: + +```bash +RUST_LOG_OUTPUT=OUTFILE RUST_LOG="trace" cargo run --features=async-std-executor --release +``` + +The idea here is to spin up several nodes in a p2p network. These nodes can share messages with each other. + +``` +nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- -p 1111" +nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/1111/quic-v1 -p 2222" +nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/2222/quic-v1 -p 3333" +nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/3333/quic-v1 -p 4444" +nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/4444/quic-v1 -p 5555" +nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/5555/quic-v1 -p 6666" +nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/6666/quic-v1 -p 7777" +nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/7777/quic-v1 -p 8888" +nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/8888/quic-v1 -p 9999" +``` + +At this point the idea is that each node will continue to attempt to connect to nodes +until it hits at least 5 peers. + +Use `Tab` to switch between messages and prompt. Press `Enter` to broadcast a message to all connected nodes. +Press `Right Arrow` to direct-send a message to a randomly selected peer. +Press `q` to quit the program from the messages view. + +## Counter Single Machine Tests + +Each node has its own counter. The idea behind these tests is to support "broadcast" messages and "direct" messages to increment each nodes counter. + +`cargo test --features=async-std-executor --release stress` + +spawns off five integration tests. + +- Two that uses gossipsub to broadcast a counter increment from one node to all other nodes +- Two where one node increments its counter, then direct messages all nodes to increment their counters +- One that intersperses both broadcast and increments. +- One that intersperses both broadcast and increments. +- Two that publishes entries to the DHT and checks that other nodes can access these entries. + +This can fail on MacOS (and linux) due to ["too many open files."](https://github.com/EspressoSystems/hotshot-networking-demo/issues/18) The fix is: + +```bash +ulimit -n 4096 +``` + +## Counter Multi-machine tests + +In these tests, there are three types of nodes. `Regular` nodes that limit the number of incoming connections, `Bootstrap` nodes that allow all connections, and `Conductor` nodes that all nodes (bootstrap and regular) connect to and periodically ping with their state. This "conductor" node instructs nodes in the swarm to increment their state either via broadcast or direct messages in the same fashion as the single machine tests. + +In the direct message case, the conductor will increment the state of a randomly chosen node, `i`. Then the conductor will direct message all other nodes to request node `i`'s counter and increment their counter to the value in `i`'s node. In the broadcast case, the conductor will increment the state of a randomly chose node, `i`, and tell `i` to broadcast this incremented state. + +In both cases, the test terminates as successful when the conductor receives the incremented state from all other nodes. Then, the conductor sends a special "kill" message to all known nodes and waits for them to disconnect. + +Metadata about the toplogy is currently read from an `identity_mapping.json` file that manually labels the type of node (bootstrap, regular, conductor). The conductor uses this to figure out information about all nodes in the network. The regular nodes use this to learn about their ip address and the addresses necessary to bootstrap onto the network. The boostrap nodes only use this to learn about their ip addresses. + +### Running counter multi-machine tests + +A sample invocation locally: + +```bash +# run each line in a separate terminal +nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9000 --node_type Bootstrap --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8000 +nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9001 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8001 +nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9002 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8002 +nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9003 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8003 +nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9004 --node_type Conductor --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8004 +``` + +### Network Emulation +One may introduce simulated network latency via the network emulationn queueing discipline. This is implemented in two ways: on what is assumed to be a AWS EC2 instance, and in a docker container. Example usage on AWS EC2 instance: + +```bash +# run each line in a separate AWS instance +nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9000 --node_type Bootstrap --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal +nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9001 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal +nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9002 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal +nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9003 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal +nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9004 --node_type Conductor --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal +``` + +And on docker: + +```bash +# run each line in a separate Docker container instance +nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9000 --node_type Bootstrap --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker +nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9001 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker +nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9002 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker +nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9003 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker +nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9004 --node_type Conductor --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker +``` + +On an AWS instance, a separate network namespace is created and connected to `ens5` via a network bridge, and a netem qdisc is introduced to the veth interface in the namespace. Within a docker container, a netem qdisc is added on interface `eth0`. + +### Network Emulation Dockerfile + +Usage: + +``` +docker build . -t libp2p-networking +# expose ports +docker run -P 8000:8000 -P 9000:9000 libp2p-networking +``` + diff --git a/libp2p-networking/examples/common/lossy_network.rs b/libp2p-networking/examples/common/lossy_network.rs new file mode 100644 index 0000000000..e5b207a327 --- /dev/null +++ b/libp2p-networking/examples/common/lossy_network.rs @@ -0,0 +1,579 @@ +use super::ExecutionEnvironment; +use async_compatibility_layer::art::async_spawn; +use futures::TryStreamExt; +use netlink_packet_route::DecodeError; +use nix::{ + errno::Errno, + sched::{setns, CloneFlags}, +}; +use rtnetlink::{ + new_connection_with_socket, sys::SmolSocket, AddressHandle, Handle, NetemQdisc, + NetworkNamespace, RouteHandle, TcNetemCorrelations, TcNetemCorrupt, TcNetemDelay, TcNetemQopt, + NETNS_PATH, +}; +use snafu::{ResultExt, Snafu}; +use std::{ + fs::File, + net::{AddrParseError, Ipv4Addr}, + os::unix::{io::IntoRawFd, prelude::AsRawFd}, + path::Path, + process::Command, +}; +use tracing::{error, info}; + +/// hardcoded default values +pub const LOSSY_QDISC: NetemQdisc = NetemQdisc { + config: TcNetemQopt { + limit: 10240, + loss: 2, + gap: 0, + duplicate: 0, + }, + delay: Some(TcNetemDelay { + delay: 500000, + stddev: 0, + }), + correlations: Some(TcNetemCorrelations { + delay_corr: 0, + loss_corr: 0, + dup_corr: 0, + }), + corruption: Some(TcNetemCorrupt { prob: 2, corr: 0 }), + reorder: None, +}; + +async fn del_link(handle: Handle, name: String) -> Result<(), LossyNetworkError> { + let mut links = handle.link().get().match_name(name.clone()).execute(); + if let Some(link) = links.try_next().await.context(RtNetlinkSnafu)? { + Ok(handle + .link() + .del(link.header.index) + .execute() + .await + .context(RtNetlinkSnafu)?) + } else { + error!("link {} not found", name); + Ok(()) + } +} + +/// represent the current network namespace +/// (useful if returning) +struct Netns { + cur: File, +} + +impl Netns { + /// creates new network namespace + /// and enters namespace + async fn new(path: &str) -> Result { + // create new ns + NetworkNamespace::add(path.to_string()) + .await + .context(RtNetlinkSnafu)?; + + // entry new ns + let ns_path = Path::new(NETNS_PATH); + let file = File::open(ns_path.join(path)).context(IoSnafu)?; + + Ok(Self { cur: file }) + } +} + +/// A description of a lossy network +#[derive(Clone, derive_builder::Builder, custom_debug::Debug)] +pub struct LossyNetwork { + /// Ethernet interface that is connected to WAN + eth_name: String, + /// metadata describing how to isolate. Only used when `env_type` is `Metal` + isolation_config: Option, + /// The network loss conditions + netem_config: NetemQdisc, + /// the execution environment + env_type: ExecutionEnvironment, +} + +impl LossyNetwork { + /// Create isolated environment in separate network namespace via network bridge + pub async fn isolate(&self) -> Result<(), LossyNetworkError> { + if let Some(ref isolation_config) = self.isolation_config { + isolation_config.isolate_netlink(&self.eth_name).await? + } + Ok(()) + } + + /// Delete isolated environment and network bridge + pub async fn undo_isolate(&self) -> Result<(), LossyNetworkError> { + if let Some(ref isolation_config) = self.isolation_config { + isolation_config + .undo_isolate_netlink(self.eth_name.clone()) + .await? + } + Ok(()) + } + + /// Create a network qdisc + pub async fn create_qdisc(&self) -> Result<(), LossyNetworkError> { + match self.env_type { + ExecutionEnvironment::Docker => { + self.create_qdisc_netlink(&self.eth_name).await?; + } + ExecutionEnvironment::Metal => match self.isolation_config { + Some(ref isolation_config) => { + self.create_qdisc_netlink(&isolation_config.veth2_name.clone()) + .await?; + } + None => return Err(LossyNetworkError::InvalidConfig), + }, + } + Ok(()) + } + + /// Internal invocation to netlink library + /// to create the qdisc + async fn create_qdisc_netlink(&self, veth: &str) -> Result<(), LossyNetworkError> { + let (connection, handle, _) = + new_connection_with_socket::().context(IoSnafu)?; + async_spawn(connection); + let mut links = handle.link().get().match_name(veth.to_string()).execute(); + if let Some(link) = links.try_next().await.context(RtNetlinkSnafu)? { + handle + .qdisc() + .add(link.header.index as i32) + .netem(self.netem_config.clone()) + .context(DecodeSnafu)? + .execute() + .await + .context(RtNetlinkSnafu)? + } else { + return Err(LossyNetworkError::InvalidConfig); + } + Ok(()) + } +} + +/// Hardcoded default values for current AWS setup +impl Default for IsolationConfig { + fn default() -> Self { + Self { + counter_ns: "COUNTER_NS".to_string(), + bridge_addr: "172.13.0.1".to_string(), + bridge_name: "br0".to_string(), + veth_name: "veth1".to_string(), + veth2_name: "veth2".to_string(), + veth2_addr: "172.13.0.2".to_string(), + } + } +} + +/// A description of how the network should be isolated +#[derive(Clone, derive_builder::Builder, custom_debug::Debug)] +#[builder(default)] +pub struct IsolationConfig { + /// the network namespace name to create + counter_ns: String, + /// the bridge ip address + bridge_addr: String, + /// the bridge name + bridge_name: String, + /// the virtual ethernet interface name + /// that lives in the default/root network namespace + veth_name: String, + /// the virtual ethernet interface name + /// that lives in `counter_ns` + veth2_name: String, + /// the virtual ethernet interface ip address + /// that lives in `counter_ns` + veth2_addr: String, +} + +impl IsolationConfig { + /// Prepares server for latency by: + /// - creating a separate network namespace denoted `counter_ns` + /// - creating a virtual ethernet device (veth2) in this namespace + /// - bridging the virtual ethernet device within COUNTER_NS to the default/root network namespace + /// - adding firewall rules to allow traffic to flow between the network bridge and outside world + /// - execute the demo inside network namespace + async fn isolate_netlink(&self, eth_name: &str) -> Result<(), LossyNetworkError> { + let (connection, handle, _) = + new_connection_with_socket::().context(IoSnafu)?; + async_spawn(connection); + + // create new netns + let counter_ns_name = self.counter_ns.clone(); + let counter_ns = Netns::new(&counter_ns_name).await?; + + // create veth interfaces + let veth = self.veth_name.clone(); + let veth_2 = self.veth2_name.clone(); + + handle + .link() + .add() + .veth(veth.clone(), veth_2.clone()) + .execute() + .await + .context(RtNetlinkSnafu)?; + let veth_idx = handle + .link() + .get() + .match_name(veth.clone()) + .execute() + .try_next() + .await + .context(RtNetlinkSnafu)? + .ok_or(LossyNetworkError::InvalidConfig)? + .header + .index; + let veth_2_idx = handle + .link() + .get() + .match_name(veth_2.clone()) + .execute() + .try_next() + .await + .context(RtNetlinkSnafu)? + .ok_or(LossyNetworkError::InvalidConfig)? + .header + .index; + + // set interfaces up + handle + .link() + .set(veth_idx) + .up() + .execute() + .await + .context(RtNetlinkSnafu)?; + handle + .link() + .set(veth_2_idx) + .up() + .execute() + .await + .context(RtNetlinkSnafu)?; + + // move veth_2 to counter_ns + handle + .link() + .set(veth_2_idx) + .setns_by_fd(counter_ns.cur.as_raw_fd()) + .execute() + .await + .context(RtNetlinkSnafu)?; + + let bridge_name = self.bridge_name.clone(); + + handle + .link() + .add() + .bridge(bridge_name.clone()) + .execute() + .await + .context(RtNetlinkSnafu)?; + let bridge_idx = handle + .link() + .get() + .match_name(bridge_name.clone()) + .execute() + .try_next() + .await + .context(RtNetlinkSnafu)? + .ok_or(LossyNetworkError::InvalidConfig)? + .header + .index; + + // set bridge up + handle + .link() + .set(bridge_idx) + .up() + .execute() + .await + .context(RtNetlinkSnafu)?; + + // set veth master to bridge + handle + .link() + .set(veth_idx) + .master(bridge_idx) + .execute() + .await + .context(RtNetlinkSnafu)?; + + // add ip address to bridge + let bridge_addr = self + .bridge_addr + .parse::() + .context(AddrParseSnafu)?; + let bridge_range = 16; + AddressHandle::new(handle) + .add(bridge_idx, std::net::IpAddr::V4(bridge_addr), bridge_range) + .execute() + .await + .context(RtNetlinkSnafu)?; + + // switch to counter_ns + setns(counter_ns.cur.as_raw_fd(), CloneFlags::CLONE_NEWNET).context(SetNsSnafu)?; + + // get connection metadata in new net namespace + let (connection, handle, _) = + new_connection_with_socket::().context(IoSnafu)?; + async_spawn(connection); + + // set lo interface to up + let lo_idx = handle + .link() + .get() + .match_name("lo".to_string()) + .execute() + .try_next() + .await + .context(RtNetlinkSnafu)? + .ok_or(LossyNetworkError::InvalidConfig)? + .header + .index; + handle + .link() + .set(lo_idx) + .up() + .execute() + .await + .context(RtNetlinkSnafu)?; + + // set veth2 to up + let veth_2_idx = handle + .link() + .get() + .match_name(veth_2) + .execute() + .try_next() + .await + .context(RtNetlinkSnafu)? + .ok_or(LossyNetworkError::InvalidConfig)? + .header + .index; + handle + .link() + .set(veth_2_idx) + .up() + .execute() + .await + .context(RtNetlinkSnafu)?; + + // set veth2 address + let veth_2_addr = self + .veth2_addr + .parse::() + .context(AddrParseSnafu)?; + let veth_2_range = 16; + AddressHandle::new(handle.clone()) + .add(veth_2_idx, veth_2_addr, veth_2_range) + .execute() + .await + .context(RtNetlinkSnafu)?; + + // add route + let route = RouteHandle::new(handle).add(); + route + .v4() + .gateway(bridge_addr) + .execute() + .await + .context(RtNetlinkSnafu)?; + self.enable_firewall(eth_name).await?; + + Ok(()) + } + + /// Enables firewall rules to allow network bridge to function properly (e.g. no packets + /// dropped). Assumes firewall is via iptables + async fn enable_firewall(&self, eth_name: &str) -> Result<(), LossyNetworkError> { + // accept traffic on bridge + // iptables -A FORWARD -o ens5 -i br0 -j ACCEPT + info!( + "{:?}", + Command::new("iptables") + .args(["-A", "FORWARD", "-o", eth_name, "-i", "br0", "-j", "ACCEPT"]) + .output() + .context(IoSnafu)? + .status + ); + // iptables -A FORWARD -i ens5 -o br0 -j ACCEPT + info!( + "{:?}", + Command::new("iptables") + .args(["-A", "FORWARD", "-i", eth_name, "-o", "br0", "-j", "ACCEPT"]) + .output() + .context(IoSnafu)? + .status + ); + // NAT + // iptables -t nat -A POSTROUTING -s 172.20.0.1 -o ens5 -j MASQUERADE + info!( + "{:?}", + Command::new("iptables") + .args([ + "-t", + "nat", + "-A", + "POSTROUTING", + "-s", + &self.bridge_addr, + "-o", + "ens5", + "-j", + "MASQUERADE" + ]) + .output() + .context(IoSnafu)? + .status + ); + // iptables -t nat -A POSTROUTING -o br0 -j MASQUERADE + info!( + "{:?}", + Command::new("iptables") + .args([ + "-t", + "nat", + "-A", + "POSTROUTING", + "-o", + &self.bridge_name, + "-j", + "MASQUERADE" + ]) + .output() + .context(IoSnafu)? + .status + ); + // iptables -t nat -A POSTROUTING -s 172.20.0.0/16 -j MASQUERADE + info!( + "{:?}", + Command::new("iptables") + .args([ + "-t", + "nat", + "-A", + "POSTROUTING", + "-s", + &format!("{}/16", &self.bridge_addr), + "-j", + "MASQUERADE" + ]) + .output() + .context(IoSnafu)? + .status + ); + + Ok(()) + } + + /// tears down all created interfaces + /// deletes all iptables rules + /// deletes namespace + async fn undo_isolate_netlink(&self, eth_name: String) -> Result<(), LossyNetworkError> { + let root_ns_fd = File::open("/proc/1/ns/net").context(IoSnafu)?.into_raw_fd(); + setns(root_ns_fd, CloneFlags::CLONE_NEWNET).context(SetNsSnafu)?; + NetworkNamespace::del(self.counter_ns.to_string()) + .await + .context(RtNetlinkSnafu)?; + let (connection, handle, _) = + new_connection_with_socket::().context(IoSnafu)?; + async_spawn(connection); + del_link(handle, self.bridge_name.clone()).await?; + // delete creates iptables rules + self.undo_firewall(eth_name).await?; + Ok(()) + } + + /// deletes created iptables rules + async fn undo_firewall(&self, eth_name: String) -> Result<(), LossyNetworkError> { + // iptables -D FORWARD -o ens5 -i br0 -j ACCEPT + info!( + "{:?}", + Command::new("iptables") + .args(["-D", "FORWARD", "-o", ð_name, "-i", "br0", "-j", "ACCEPT"]) + .output() + .context(IoSnafu)? + .status + ); + // iptables -D FORWARD -i ens5 -o br0 -j ACCEPT + info!( + "{:?}", + Command::new("iptables") + .args(["-D", "FORWARD", "-i", ð_name, "-o", "br0", "-j", "ACCEPT"]) + .output() + .context(IoSnafu)? + .status + ); + // iptables -t nat -D POSTROUTING -s $bridge_addr -o ens5 -j MASQUERADE + info!( + "{:?}", + Command::new("iptables") + .args([ + "-t", + "nat", + "-D", + "POSTROUTING", + "-s", + &self.bridge_addr, + "-o", + "ens5", + "-j", + "MASQUERADE" + ]) + .output() + .context(IoSnafu)? + .status + ); + // iptables -t nat -D POSTROUTING -o br0 -j MASQUERADE + info!( + "{:?}", + Command::new("iptables") + .args([ + "-t", + "nat", + "-D", + "POSTROUTING", + "-o", + &self.bridge_name, + "-j", + "MASQUERADE" + ]) + .output() + .context(IoSnafu)? + .status + ); + // iptables -t nat -D POSTROUTING -s $bridge_addr/16 -j MASQUERADE + info!( + "{:?}", + Command::new("iptables") + .args([ + "-t", + "nat", + "-D", + "POSTROUTING", + "-s", + &format!("{}/16", self.bridge_addr), + "-j", + "MASQUERADE" + ]) + .output() + .context(IoSnafu)? + .status + ); + Ok(()) + } +} + +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum LossyNetworkError { + RtNetlink { source: rtnetlink::Error }, + Io { source: std::io::Error }, + SetNs { source: Errno }, + InvalidConfig, + Decode { source: DecodeError }, + AddrParse { source: AddrParseError }, +} diff --git a/libp2p-networking/examples/common/mod.rs b/libp2p-networking/examples/common/mod.rs new file mode 100644 index 0000000000..8be084db29 --- /dev/null +++ b/libp2p-networking/examples/common/mod.rs @@ -0,0 +1,807 @@ +#[cfg(feature = "webui")] +pub mod web; + +#[cfg(all(feature = "lossy_network", target_os = "linux"))] +pub mod lossy_network; + +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +#[cfg(async_executor_impl = "async-std")] +use async_std::prelude::StreamExt; +#[cfg(async_executor_impl = "tokio")] +use tokio_stream::StreamExt; +#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] +compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} + +use async_compatibility_layer::{ + art::{async_sleep, async_spawn}, + channel::oneshot, +}; +use clap::{Args, Parser}; +use libp2p::{multiaddr, request_response::ResponseChannel, Multiaddr}; +use libp2p_identity::PeerId; +use libp2p_networking::network::{ + behaviours::direct_message_codec::DirectMessageResponse, deserialize_msg, + network_node_handle_error::NodeConfigSnafu, spin_up_swarm, NetworkEvent, + NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeType, +}; +use rand::{ + distributions::Bernoulli, prelude::Distribution, seq::IteratorRandom, thread_rng, RngCore, +}; +use serde::{Deserialize, Serialize}; +use snafu::{ResultExt, Snafu}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + str::FromStr, + sync::Arc, + time::{Duration, SystemTime}, +}; +use tracing::{debug, error, info, instrument, warn}; + +#[cfg(feature = "webui")] +use std::net::SocketAddr; + +// number of success responses we need in order +// to increment the round number. +const SUCCESS_NUMBER: usize = 15; + +/// probability numerator that recv-er node sends back timing stats +const SEND_NUMERATOR: u32 = 40; +/// probaiblity denominator that recv-er node sends back timing states +const SEND_DENOMINATOR: u32 = 100; + +/// the timeout before ending rounding +const TIMEOUT: Duration = Duration::from_secs(500); + +/// timeout before failing to broadcast +const BROADCAST_TIMEOUT: Duration = Duration::from_secs(10); + +// we want message size of 32kb +// so we pad with a randomly generated number +// in order to do this use: +// 8 bytes per u64 +// 32kb = 32000 bytes +// so 32000/8 usizes +const PADDING_SIZE: usize = 32000 / 8; + +pub type CounterState = Epoch; +pub type Epoch = u32; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum EpochType { + BroadcastViaGossip, + BroadcastViaDM, + DMViaDM, +} + +#[derive(Debug, Clone)] +pub struct ConductorState { + ready_set: HashSet, + current_epoch: EpochData, + previous_epochs: HashMap, +} + +#[derive(Debug, Clone)] +pub struct EpochData { + epoch_idx: Epoch, + epoch_type: EpochType, + node_states: HashMap, + message_durations: Vec, +} + +impl ConductorState { + /// returns time per data + pub fn aggregate_epochs(&self, num_nodes: usize) -> (Duration, usize) { + let tmp_entry = NormalMessage { + req: CounterRequest::StateRequest, + relay_to_conductor: false, + sent_ts: SystemTime::now(), + epoch: 0, + padding: vec![0; PADDING_SIZE], + }; + let data_size = std::mem::size_of_val(&tmp_entry.req) + + std::mem::size_of_val(&tmp_entry.relay_to_conductor) + + std::mem::size_of_val(&tmp_entry.sent_ts) + + std::mem::size_of_val(&tmp_entry.epoch) + + PADDING_SIZE * 8; + + let mut total_time = Duration::ZERO; + let mut total_data = 0; + for epoch_data in self.previous_epochs.values() { + if epoch_data.message_durations.iter().len() != num_nodes { + error!( + "didn't match! expected {} got {} ", + num_nodes, + epoch_data.message_durations.iter().len() + ); + } + if let Some(max_prop_time) = epoch_data.message_durations.iter().max() { + info!("data size is {}", data_size); + total_time += *max_prop_time; + total_data += data_size; + } else { + error!("No timing data available for this round!"); + } + } + (total_time, total_data) + } +} + +impl EpochData { + pub fn increment_epoch(&mut self) { + self.epoch_idx += 1; + } +} + +impl Default for ConductorState { + fn default() -> Self { + Self { + ready_set: Default::default(), + current_epoch: EpochData { + epoch_idx: 0, + epoch_type: EpochType::BroadcastViaGossip, + node_states: Default::default(), + message_durations: Default::default(), + }, + previous_epochs: Default::default(), + } + } +} + +impl ConductorState { + /// Increment conductor to the next epoch + pub fn complete_round(&mut self, next_epoch_type: EpochType) { + let current_epoch = self.current_epoch.clone(); + self.previous_epochs + .insert(current_epoch.epoch_idx, current_epoch); + self.current_epoch.epoch_type = next_epoch_type; + self.current_epoch.message_durations = Default::default(); + self.current_epoch.increment_epoch(); + } +} + +#[cfg(feature = "webui")] +impl web::WebInfo for ConductorState { + type Serialized = serde_json::Value; + + fn get_serializable(&self) -> Self::Serialized { + let mut map = serde_json::map::Map::new(); + for (peer, state) in self.current_epoch.node_states.iter() { + map.insert(peer.to_base58(), (*state).into()); + } + serde_json::Value::Object(map) + } +} + +#[cfg(feature = "webui")] +impl web::WebInfo for (CounterState, Option) { + type Serialized = (u32, Option); + fn get_serializable(&self) -> Self::Serialized { + *self + } +} + +/// Normal message. Sent amongst [`NetworkNodeType::Regular`] and [`NetworkNodeType::Bootstrap`] nodes +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub enum CounterRequest { + /// Request state + StateRequest, + /// Reply with state + StateResponse(CounterState), + /// kill node + Kill, +} + +/// Message sent between non-[`NetworkNodeType::Conductor`] nodes +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct NormalMessage { + /// timestamp when message was sent + sent_ts: SystemTime, + /// whether or not message shuld be relayed to conductor + relay_to_conductor: bool, + /// the underlying request the recv-ing node should take + req: CounterRequest, + /// the epoch the message was sent on + epoch: Epoch, + /// arbitrary amount of padding to vary message length + padding: Vec, +} + +/// A message sent and recv-ed by a ['NetworkNodeType::Regular'] or ['NetworkNodeType::Bootstrap'] node +/// that is to be relayed back to a [`NetworkNodeType::Conductor`] node +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct RelayedMessage { + /// peer + from_peer: PeerId, + /// time message took to propagate from sender to recv-er + duration: Duration, + /// the requeset being made + req: CounterRequest, + /// the epoch the request was made on + epoch: Epoch, +} + +/// A message sent and recv-ed by a ['NetworkNodeType::Regular'] or ['NetworkNodeType::Bootstrap'] node +/// that is to be relayed back to a [`NetworkNodeType::Conductor`] node +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct ConductorMessage { + /// the request the recv-ing node should make + req: CounterRequest, + state: Epoch, + /// the type of broadcast (direct or broadcast) + broadcast_type: ConductorMessageMethod, +} + +/// overall message +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub enum Message { + /// message to end from a peer to a peer + Normal(NormalMessage), + /// messaged recved and relayed to conductor + Relayed(RelayedMessage), + /// conductor requests that message is sent to node + /// that the node must send to other node(s) + Conductor(ConductorMessage), + // announce the conductor + ConductorIdIs(PeerId), + /// recv-ed the conductor id + RecvdConductor, + DummyRecv, +} + +impl NormalMessage { + /// convert a normal message into a message to relay to conductor + pub fn normal_to_relayed(&self, peer_id: PeerId) -> RelayedMessage { + let recv_ts = SystemTime::now(); + let elapsed_time = recv_ts + .duration_since(self.sent_ts) + .unwrap_or(Duration::MAX); + RelayedMessage { + from_peer: peer_id, + duration: elapsed_time, + req: self.req.clone(), + epoch: self.epoch, + } + } +} + +/// ways to send messages between nodes +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub enum ConductorMessageMethod { + /// broadcast message to all nodes + Broadcast, + /// direct message [`PeerId`] + DirectMessage(PeerId), +} + +/// handler for non-conductor nodes for normal messages +pub async fn handle_normal_msg( + handle: Arc)>>, + msg: NormalMessage, + // in case we need to reply to direct message + chan: Option>, +) -> Result<(), NetworkNodeHandleError> { + debug!("node={} handling normal msg {:?}", handle.id(), msg); + // send reply logic + match msg.req { + // direct message only + CounterRequest::StateResponse(c) => { + handle + .modify_state(|s| { + debug!( + "node={} performing modify_state with c={c}, s={:?}", + handle.id(), + s + ); + if c >= s.0 { + s.0 = c + } + }) + .await; + if let Some(chan) = chan { + handle.direct_response(chan, &Message::DummyRecv).await?; + } + } + // only as a response + CounterRequest::StateRequest => { + if let Some(chan) = chan { + let state = handle.state().await; + let data = { + let mut rng = thread_rng(); + vec![rng.next_u64(); PADDING_SIZE] + }; + let response = Message::Normal(NormalMessage { + sent_ts: SystemTime::now(), + relay_to_conductor: true, + req: CounterRequest::StateResponse(state.0), + epoch: 0, + padding: data, + }); + handle.direct_response(chan, &response).await?; + } else { + error!("Error deserializing, channel closed!"); + } + } + CounterRequest::Kill => { + handle.shutdown().await?; + } + } + // relay the message to conductor + if msg.relay_to_conductor { + info!("Recv-ed message. Deciding if should relay to conductor."); + if let Some(conductor_id) = handle.state().await.1 { + // do a dice roll here to decide if we want to keep the thing + if Bernoulli::from_ratio(SEND_NUMERATOR, SEND_DENOMINATOR) + .unwrap() + .sample(&mut rand::thread_rng()) + { + info!("Deciding to relay to conductor"); + let relayed_msg = Message::Relayed(msg.normal_to_relayed(handle.peer_id())); + handle.direct_request(conductor_id, &relayed_msg).await?; + } + } else { + error!("We have a message to send to the conductor, but we do not know who the conductor is!"); + } + } + Ok(()) +} + +/// event handler for events from the swarm +/// - updates state based on events received +/// - replies to direct messages +#[instrument] +pub async fn regular_handle_network_event( + event: NetworkEvent, + handle: Arc)>>, +) -> Result<(), NetworkNodeHandleError> { + debug!("node={} handling event {:?}", handle.id(), event); + + use NetworkEvent::*; + match event { + IsBootstrapped => {} + GossipMsg(m, _) | DirectResponse(m, _) => { + if let Ok(msg) = deserialize_msg::(&m) { + info!("regular msg recved: {:?}", msg.clone()); + match msg { + Message::DummyRecv => { }, + Message::ConductorIdIs(peerid) => { + handle.modify_state(|s| { + s.1 = Some(peerid); + }).await; + } + Message::Normal(msg) => { + handle_normal_msg(handle.clone(), msg, None).await?; + } + // do nothing. We only expect to be reached out to by the conductor via + // direct message + Message::Conductor(..) /* only the conductor expects to receive a relayed message */ | Message::Relayed(..) => { } + // only sent to conductor node + Message::RecvdConductor => { + unreachable!(); + } + } + } else { + info!("FAILED TO PARSE GOSSIP OR DIRECT RESPONSE MESSAGE"); + } + } + DirectRequest(msg, _peer_id, chan) => { + if let Ok(msg) = deserialize_msg::(&msg) { + info!("from pid {:?} msg recved: {:?}", msg.clone(), _peer_id); + match msg { + Message::DummyRecv => { + handle.direct_response(chan, &Message::DummyRecv).await?; + } + // this is only done via broadcast + Message::ConductorIdIs(_) + // these are only sent to the conductor + | Message::Relayed(_) | Message::RecvdConductor => + { + handle.direct_response(chan, &Message::DummyRecv).await?; + } + Message::Normal(msg) => { + handle_normal_msg(handle.clone(), msg, Some(chan)).await?; + } + Message::Conductor(msg) => { + let data = { + let mut rng = thread_rng(); + vec![rng.next_u64(); PADDING_SIZE] + }; + let response = + Message::Normal(NormalMessage { + sent_ts: SystemTime::now(), + relay_to_conductor: true, + req: msg.req, + epoch: msg.state, + padding: data, + }); + match msg.broadcast_type { + // if the conductor says to broadcast + // perform broadcast with gossip protocol + ConductorMessageMethod::Broadcast => { + handle.gossip("global".to_string(), &response).await?; + } + ConductorMessageMethod::DirectMessage(pid) => { + handle.direct_request( + pid, + &response + ).await?; + } + } + handle.direct_response(chan, &Message::DummyRecv).await?; + } + } + } else { + } + } + } + Ok(()) +} + +/// convert node string into multi addr +pub fn parse_node(s: &str) -> Result { + let mut i = s.split(':'); + let ip = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; + let port = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; + Multiaddr::from_str(&format!("/ip4/{ip}/udp/{port}/quic-v1")) +} + +#[cfg(feature = "webui")] +/// This will be flattened into CliOpt +#[derive(Args, Debug)] +pub struct WebUi { + /// Doc comment + #[arg(long = "webui")] + pub webui_addr: Option, +} + +#[cfg(not(feature = "webui"))] +/// This will be flattened into CliOpt +#[derive(Args, Debug)] +pub struct WebUi {} + +#[cfg(all(feature = "lossy_network", target_os = "linux"))] +/// This will be flattened into CliOpt +#[derive(Args, Debug)] +pub struct EnvType { + /// Doc comment + #[arg(long = "env")] + pub env_type: ExecutionEnvironment, +} +#[cfg(not(all(feature = "lossy_network", target_os = "linux")))] +/// This will be flattened into CliOpt +#[derive(Args, Debug)] +pub struct EnvType {} + +#[derive(Parser, Debug)] +pub struct CliOpt { + /// list of bootstrap node addrs + #[arg(long, value_parser = parse_node)] + pub to_connect_addrs: Vec, + /// total number of nodes + #[arg(long)] + pub num_nodes: usize, + /// the role this node plays + #[arg(long)] + pub node_type: NetworkNodeType, + /// internal interface to bind to + #[arg(long, value_parser = parse_node)] + pub bound_addr: Multiaddr, + /// If this value is set, a webserver will be spawned on this address with debug info + #[arg(long, value_parser = parse_node)] + pub conductor_addr: Multiaddr, + + #[command(flatten)] + pub webui_delegate: WebUi, + + #[command(flatten)] + pub env_type_delegate: EnvType, + + /// number of rounds of gossip + #[arg(long)] + pub num_gossip: u32, +} + +/// The execution environemnt type +#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[cfg(all(feature = "lossy_network", target_os = "linux"))] +pub enum ExecutionEnvironment { + /// execution environment is within docker + Docker, + /// execution environment is on metal + Metal, +} + +#[cfg(all(feature = "lossy_network", target_os = "linux"))] +impl FromStr for ExecutionEnvironment { + type Err = String; + + fn from_str(input: &str) -> Result { + match input { + "Docker" => Ok(ExecutionEnvironment::Docker), + "Metal" => Ok(ExecutionEnvironment::Metal), + _ => Err( + "Couldn't parse execution environment. Must be one of Metal, Docker".to_string(), + ), + } + } +} + +/// ['bootstrap_addrs`] list of bootstrap multiaddrs. Needed to bootstrap into network +/// [`num_nodes`] total number of nodes. Needed to create pruning rules +/// [`node_type`] the type of this node +/// ['bound_addr`] the address to bind to +pub async fn start_main(opts: CliOpt) -> Result<(), CounterError> { + setup_logging(); + setup_backtrace(); + let bootstrap_nodes = opts + .to_connect_addrs + .iter() + .cloned() + .map(|a| (None, a)) + .collect::>(); + + match opts.node_type { + NetworkNodeType::Conductor => { + let config = NetworkNodeConfigBuilder::default() + .bound_addr(Some(opts.bound_addr)) + .node_type(NetworkNodeType::Conductor) + .build() + .context(NodeConfigSnafu) + .context(HandleSnafu)?; + let handle = Arc::new( + NetworkNodeHandle::::new(config.clone(), 0) + .await + .context(HandleSnafu)?, + ); + + #[cfg(feature = "webui")] + if let Some(addr) = opts.webui_delegate.webui_addr { + web::spawn_server(Arc::clone(&handle), addr); + } + + spin_up_swarm(TIMEOUT, bootstrap_nodes, config, 0, &handle) + .await + .context(HandleSnafu)?; + info!("spun up!"); + + let handler_fut = handle.spawn_handler(conductor_handle_network_event).await; + info!("spawned handler"); + + handle.notify_webui().await; + + let conductor_peerid = handle.peer_id(); + + let (s, _r) = oneshot::(); + + async_spawn({ + let handle = handle.clone(); + // the "conductor id" + // periodically say "ignore me!" + async move { + loop { + // must wait for the listener to start + let msg = Message::ConductorIdIs(conductor_peerid); + if let Err(e) = handle + .gossip("global".to_string(), &msg) + .await + .context(HandleSnafu) + { + error!("Error {:?} gossiping the conductor ID to cluster.", e); + } + async_sleep(Duration::from_secs(1)).await; + } + } + }); + + // For now, just do a sleep waiting for nodes to spin up. It's easier. + async_sleep(Duration::from_secs(10)).await; + + // kill conductor id broadcast thread + s.send(true); + + for i in 0..opts.num_gossip { + info!("iteration i: {}", i); + handle + .modify_state(|s| s.current_epoch.epoch_type = EpochType::BroadcastViaGossip) + .await; + conductor_broadcast(BROADCAST_TIMEOUT, handle.clone()) + .await + .context(HandleSnafu)?; + handle + .modify_state(|s| s.complete_round(EpochType::BroadcastViaGossip)) + .await; + } + handler_fut.await; + + #[cfg(feature = "benchmark-output")] + { + trace!("result raw: {:?}", handle.state().await); + trace!( + "result: {:?}", + handle.state().await.aggregate_epochs(opts.num_nodes) + ); + } + } + // regular and bootstrap nodes + NetworkNodeType::Regular | NetworkNodeType::Bootstrap => { + let config = NetworkNodeConfigBuilder::default() + .bound_addr(Some(opts.bound_addr)) + .node_type(opts.node_type) + .build() + .context(NodeConfigSnafu) + .context(HandleSnafu)?; + + let node = NetworkNodeHandle::<(CounterState, Option)>::new(config.clone(), 0) + .await + .context(HandleSnafu)?; + + let handle = Arc::new(node); + #[cfg(feature = "webui")] + if let Some(addr) = opts.webui_delegate.webui_addr { + web::spawn_server(Arc::clone(&handle), addr); + } + + spin_up_swarm(TIMEOUT, bootstrap_nodes, config, 0, &handle) + .await + .context(HandleSnafu)?; + let handler_fut = handle.spawn_handler(regular_handle_network_event).await; + handler_fut.await; + // while !handle.is_killed().await { + // async_sleep(Duration::from_millis(100)).await; + // } + } + } + + Ok(()) +} + +pub async fn conductor_broadcast( + timeout: Duration, + handle: Arc>, +) -> Result<(), NetworkNodeHandleError> { + let new_state = handle.state().await.current_epoch.epoch_idx; + // nOTE it's probably easier to pass in a hard coded list of PIDs + // from test.py orchestration + let mut connected_peers = handle.connected_pids().await.unwrap(); + connected_peers.remove(&handle.peer_id()); + + let chosen_peer = *connected_peers.iter().choose(&mut thread_rng()).unwrap(); + + let request = CounterRequest::StateResponse(new_state); + + // tell the "leader" to do a "broadcast" message using gosisp protocol + let msg = Message::Conductor(ConductorMessage { + state: new_state, + req: request.clone(), + broadcast_type: ConductorMessageMethod::Broadcast, + }); + + let mut res_fut = Box::pin( + handle.state_wait_timeout_until_with_trigger(timeout, |state| { + state + .current_epoch + .node_states + .iter() + .filter(|(_, &s)| s >= new_state) + .count() + >= SUCCESS_NUMBER + }), + ); + + // wait for ready signal + res_fut.next().await.unwrap().unwrap(); + + // send direct message from conductor to leader to do broadcast + handle + .direct_request(chosen_peer, &msg) + .await + .context(HandleSnafu) + .unwrap(); + + if res_fut.next().await.unwrap().is_err() { + error!( + "TIMED OUT with {} msgs recv-ed", + handle.state().await.current_epoch.message_durations.len() + ); + } + + Ok(()) +} + +/// network event handler for conductor +#[instrument] +pub async fn conductor_handle_network_event( + event: NetworkEvent, + handle: Arc>, +) -> Result<(), NetworkNodeHandleError> { + use NetworkEvent::*; + match event { + IsBootstrapped => {} + GossipMsg(_m, _t) => { + // this node isn't going to participate in gossip/dms to update state + // it's only purpose is to recv relayed messages + } + DirectRequest(m, peer_id, chan) => { + info!("recv: {:?}", m); + async_spawn({ + let handle = handle.clone(); + async move { + handle.direct_response(chan, &Message::DummyRecv).await?; + Result::<(), NetworkNodeHandleError>::Ok(()) + } + }); + info!("finished spawning now deserializing"); + if let Ok(msg) = deserialize_msg::(&m) { + info!("desrialized MESSAGE IS {:?}", msg); + match msg { + Message::Relayed(msg) => { + match handle.state().await.current_epoch.epoch_type { + EpochType::BroadcastViaGossip => { + if let CounterRequest::StateResponse(..) = msg.req { + handle + .modify_state(|s| { + if msg.epoch >= s.current_epoch.epoch_idx { + s.current_epoch.message_durations.push(msg.duration) + } + + if msg.epoch > s.current_epoch.epoch_idx { + warn!("listening on epcoh {:?} but recv message on epoch {:?}", s.current_epoch.epoch_idx, msg.epoch); + } + + }) + .await; + let _ = handle.prune_peer(msg.from_peer).await; + } + } + EpochType::DMViaDM => { + info!("modifying state DM VIA DM {:?}", msg); + // NOTE maybe should check epoch + if let CounterRequest::StateRequest = msg.req { + handle + .modify_state(|s| { + s.current_epoch.message_durations.push(msg.duration); + }) + .await; + } + } + EpochType::BroadcastViaDM => { + unimplemented!("BroadcastViaDM is currently unimplemented"); + } + } + if let CounterRequest::StateResponse(state) = msg.req { + handle + .modify_state(|s| { + s.current_epoch.node_states.insert(peer_id, state); + }) + .await; + } + } + Message::RecvdConductor => { + handle + .modify_state(|s| { + s.ready_set.insert(peer_id); + }) + .await; + } + msg => { + info!("Unexpected message {:?}", msg); + + /* Do nothing. Conductor doesn't care about these messages. */ + } + } + } else { + error!("failed to deserialize msg"); + } + } + DirectResponse(_m, _peer_id) => { /* nothing to do here */ } + } + Ok(()) +} + +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum CounterError { + Handle { source: NetworkNodeHandleError }, + FileRead { source: std::io::Error }, + MissingBootstrap, +} diff --git a/libp2p-networking/examples/common/web.rs b/libp2p-networking/examples/common/web.rs new file mode 100644 index 0000000000..bbb635de11 --- /dev/null +++ b/libp2p-networking/examples/common/web.rs @@ -0,0 +1,114 @@ +use async_compatibility_layer::art::async_spawn; +use libp2p_networking::network::NetworkNodeHandle; +use std::{net::SocketAddr, sync::Arc}; +use tracing::{debug, error, info}; + +/// Spawn a web server on the given `addr`. +/// This web server will host the static HTML page `/web/index.html` and expose a `sse` endpoint. +/// This `sse` endpoint will send status updates to the connected clients whenever `NetworkNodeHandle::state_changed` triggers. +/// +/// # Links +/// - SSE on wikipedia: +/// - SEE in `tide`: +pub fn spawn_server(state: Arc>, addr: SocketAddr) +where + S: WebInfo + Send + 'static + Clone, +{ + let mut tide = tide::with_state(state); + // Unwrap this in the calling thread so that if it fails we fail completely + // instead of not knowing why the web UI does not work + tide.at("/").get(|_| async move { + Ok(tide::Response::builder(200) + .content_type(tide::http::mime::HTML) + .body(include_str!("../../web/index.html")) + .build()) + }); + tide.at("/sse").get(tide::sse::endpoint( + |req: tide::Request>>, sender| async move { + let peer_addr = req.peer_addr(); + debug!(?peer_addr, "Web client connected, sending initial state"); + + let state = Arc::clone(req.state()); + network_state::State::new(&state) + .await + .send(&sender) + .await?; + + // Register a `Sender<()>` with the `NetworkNodeHandle` so we get notified when it changes + let mut receiver = state.register_webui_listener().await; + + while let Ok(()) = receiver.recv().await { + // TODO: I think this will not work as this `.lock` will conflict with the other lock, but we'll see + if let Err(e) = network_state::State::new(&state).await.send(&sender).await { + debug!(?peer_addr, ?e, "Could not send to client, aborting"); + break; + } + } + Ok(()) + }, + )); + async_spawn(async move { + info!(?addr, "Web UI listening on"); + if let Err(e) = tide.listen(addr).await { + error!(?e, "Web UI crashed, this is a bug"); + } + }); +} + +mod network_state { + + use libp2p_identity::PeerId; + use libp2p_networking::network::{NetworkNodeConfig, NetworkNodeHandle}; + + #[derive(serde::Serialize)] + pub struct State { + pub network_config: NetworkConfig, + pub state: S, + } + + #[derive(serde::Serialize)] + pub struct NetworkConfig { + pub node_type: String, + pub identity: String, + } + + #[derive(serde::Serialize)] + pub struct ConnectionState { + pub connected_peers: Vec, + pub connecting_peers: Vec, + pub known_peers: Vec, + } + + impl State { + pub async fn new(handle: &NetworkNodeHandle) -> Self + where + W: super::WebInfo + Send + 'static, + { + Self { + network_config: NetworkConfig::new(handle.peer_id(), handle.config()), + state: handle.state().await.get_serializable(), + } + } + pub async fn send(self, sender: &tide::sse::Sender) -> std::io::Result<()> { + let str = serde_json::to_string(&self).unwrap(); // serializing JSON should never fail + sender.send("node_state", &str, None).await + } + } + impl NetworkConfig { + fn new(identity: PeerId, c: &NetworkNodeConfig) -> Self { + Self { + node_type: format!("{:?}", c.node_type), + identity: identity.to_string(), + } + } + } +} + +/// Trait to unify the info that can be send to the web interface. +/// +/// This has to be implemented for all `S` in `NetworkNodeHandle`, e.g. `CounterState`, `ConductorState`, etc. +pub trait WebInfo: Sync + Send { + type Serialized: serde::Serialize + Send; + + fn get_serializable(&self) -> Self::Serialized; +} diff --git a/libp2p-networking/examples/counter.rs b/libp2p-networking/examples/counter.rs new file mode 100644 index 0000000000..b3c33fdb55 --- /dev/null +++ b/libp2p-networking/examples/counter.rs @@ -0,0 +1,54 @@ +// pub mod common; +// +// use async_compatibility_layer::art::async_main; +// use clap::Parser; +// use color_eyre::eyre::Result; +// #[cfg(all(feature = "lossy_network", target_os = "linux"))] +// use common::{ +// lossy_network::{IsolationConfig, LossyNetworkBuilder}, +// ExecutionEnvironment, +// }; +// use common::{start_main, CliOpt}; +// use tracing::instrument; +// +// #[async_main] +// #[instrument] +// async fn main() -> Result<()> { +fn main() -> Result<(), ()> { + // let args = CliOpt::parse(); + // + // #[cfg(all(feature = "lossy_network", target_os = "linux"))] + // let network = { + // use crate::common::lossy_network::LOSSY_QDISC; + // let mut builder = LossyNetworkBuilder::default(); + // builder + // .env_type(args.env_type_delegate.env_type) + // .netem_config(LOSSY_QDISC); + // match args.env_type_delegate.env_type { + // ExecutionEnvironment::Docker => { + // builder.eth_name("eth0".to_string()).isolation_config(None) + // } + // ExecutionEnvironment::Metal => builder + // .eth_name("ens5".to_string()) + // .isolation_config(Some(IsolationConfig::default())), + // }; + // builder.build() + // }?; + // + // #[cfg(all(feature = "lossy_network", target_os = "linux"))] + // { + // network.isolate().await?; + // network.create_qdisc().await?; + // } + // + // start_main(args).await?; + // + // #[cfg(all(feature = "lossy_network", target_os = "linux"))] + // { + // // implicitly deletes qdisc in the case of metal run + // // leaves qdisc alive in docker run with expectation docker does cleanup + // network.undo_isolate().await?; + // } + // + Ok(()) +} diff --git a/libp2p-networking/flamegraph.sh b/libp2p-networking/flamegraph.sh new file mode 100755 index 0000000000..190d0d4940 --- /dev/null +++ b/libp2p-networking/flamegraph.sh @@ -0,0 +1 @@ +sudo nix develop -c flamegraph -- $(fd -I "counter*" -t x | rg debug) test_request_response_one_round diff --git a/libp2p-networking/src/lib.rs b/libp2p-networking/src/lib.rs new file mode 100644 index 0000000000..210cd22421 --- /dev/null +++ b/libp2p-networking/src/lib.rs @@ -0,0 +1,21 @@ +#![warn( + clippy::all, + clippy::pedantic, + rust_2018_idioms, + missing_docs, + clippy::panic +)] +#![allow(clippy::module_name_repetitions)] +//! Library for p2p communication + +/// Example message used by the UI library +pub mod message; + +/// Network logic +pub mod network; + +/// symbols needed to implement a networking instance over libp2p-netorking +pub mod reexport { + pub use libp2p::Multiaddr; + pub use libp2p_identity::PeerId; +} diff --git a/libp2p-networking/src/message.rs b/libp2p-networking/src/message.rs new file mode 100644 index 0000000000..e7382d0f03 --- /dev/null +++ b/libp2p-networking/src/message.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +/// example message that may be sent to the swarm. Used in the UI +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct Message { + /// the peerid of the sender + pub sender: String, + /// the content of the message + pub content: String, + /// the topic associated with the msg + pub topic: String, +} diff --git a/libp2p-networking/src/network/behaviours/dht.rs b/libp2p-networking/src/network/behaviours/dht.rs new file mode 100644 index 0000000000..46adb05d0c --- /dev/null +++ b/libp2p-networking/src/network/behaviours/dht.rs @@ -0,0 +1,700 @@ +use std::{ + collections::{HashMap, HashSet, VecDeque}, + num::NonZeroUsize, + task::Poll, + time::Duration, +}; + +use futures::channel::oneshot::Sender; +use libp2p::{ + kad::{ + /* handler::KademliaHandlerIn, */ store::MemoryStore, BootstrapError, BootstrapOk, + GetClosestPeersOk, GetRecordOk, GetRecordResult, Kademlia, KademliaEvent, Mode, + ProgressStep, PutRecordResult, QueryId, QueryResult, Quorum, Record, + }, + swarm::{NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm}, + Multiaddr, +}; +use libp2p_identity::PeerId; +use tracing::{error, info, warn}; + +pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; +const MAX_DHT_QUERY_SIZE: usize = 5; + +use super::exponential_backoff::ExponentialBackoff; + +/// Behaviour wrapping libp2p's kademlia +/// included: +/// - publishing API +/// - Request API +/// - bootstrapping into the network +/// - peer discovery +pub struct DHTBehaviour { + /// client approval to begin bootstrap + pub begin_bootstrap: bool, + /// in progress queries for nearby peers + pub in_progress_get_closest_peers: HashMap>, + /// bootstrap nodes + pub bootstrap_nodes: HashMap>, + /// List of kademlia events + pub event_queue: Vec, + /// List of in-progress get requests + in_progress_get_record_queries: HashMap, + /// List of in-progress put requests + in_progress_put_record_queries: HashMap, + /// List of previously failled get requests + queued_get_record_queries: VecDeque, + /// List of previously failled put requests + queued_put_record_queries: VecDeque, + /// Kademlia behaviour + pub kadem: Kademlia, + /// State of bootstrapping + pub bootstrap_state: Bootstrap, + /// State of last random walk + pub random_walk: RandomWalk, + /// the peer id (useful only for debugging right now) + pub peer_id: PeerId, + /// replication factor + pub replication_factor: NonZeroUsize, +} + +/// State of bootstrapping +#[derive(Debug, Clone)] +pub struct Bootstrap { + /// State of bootstrap + pub state: State, + /// Retry timeout + pub backoff: ExponentialBackoff, +} + +/// State of the periodic random walk +pub struct RandomWalk { + /// State of random walk + state: State, + /// Retry timeout + backoff: ExponentialBackoff, +} + +/// State used for random walk and bootstrapping +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum State { + /// Not in progress + NotStarted, + /// In progress + Started, + /// Sucessfully completed + Finished, +} + +/// DHT event enum +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum DHTEvent { + /// Only event tracked currently is when we successfully bootstrap into the network + IsBootstrapped, +} + +impl DHTBehaviour { + /// Begin the bootstrap process + pub fn begin_bootstrap(&mut self) { + self.begin_bootstrap = true; + } + + /// Start a query for the closest peers + pub fn query_closest_peers(&mut self, random_peer: PeerId) { + self.kadem.get_closest_peers(random_peer); + } + + /// Create a new DHT behaviour + #[must_use] + pub fn new( + mut kadem: Kademlia, + pid: PeerId, + replication_factor: NonZeroUsize, + ) -> Self { + // needed because otherwise we stay in client mode when testing locally + // and don't publish keys stuff + // e.g. dht just doesn't work. We'd need to add mdns and that doesn't seem worth it since + // we won't have a local network + // + kadem.set_mode(Some(Mode::Server)); + Self { + begin_bootstrap: false, + bootstrap_nodes: HashMap::default(), + peer_id: pid, + event_queue: Vec::default(), + in_progress_get_record_queries: HashMap::default(), + in_progress_put_record_queries: HashMap::default(), + queued_get_record_queries: VecDeque::default(), + queued_put_record_queries: VecDeque::default(), + kadem, + bootstrap_state: Bootstrap { + state: State::NotStarted, + backoff: ExponentialBackoff::new(2, Duration::from_secs(1)), + }, + random_walk: RandomWalk { + state: State::NotStarted, + // TODO jr this may be way too frequent + backoff: ExponentialBackoff::new(2, Duration::from_secs(1)), + }, + in_progress_get_closest_peers: HashMap::default(), + replication_factor, + } + } + + /// query a peer (e.g. obtain its address if it exists) + pub fn lookup_peer(&mut self, peer_id: PeerId, chan: Sender<()>) { + let qid = self.kadem.get_closest_peers(peer_id); + self.in_progress_get_closest_peers.insert(qid, chan); + } + + /// print out the routing table to stderr + pub fn print_routing_table(&mut self) { + let mut err = format!("KBUCKETS: PID: {:?}, ", self.peer_id); + let v = self.kadem.kbuckets().collect::>(); + for i in v { + for j in i.iter() { + let s = format!( + "node: key: {:?}, val {:?}, status: {:?}", + j.node.key, j.node.value, j.status + ); + err.push_str(&s); + } + } + error!("{:?}", err); + } + + /// Passthru to kademlia + /// Associate address with kademlia peer + pub fn add_address(&mut self, peer_id: &PeerId, addr: Multiaddr) { + // add address to kademlia + self.kadem.add_address(peer_id, addr); + } + + /// Save in case kademlia forgets about bootstrap nodes + pub fn add_bootstrap_nodes(&mut self, nodes: HashMap>) { + for (k, v) in nodes { + self.bootstrap_nodes.insert(k, v); + } + } + + /// Publish a key/value to the kv store. + /// Once replicated upon all nodes, the caller is notified over + /// `chan`. If there is an error, a [`crate::network::error::DHTError`] is + /// sent instead. + pub fn put_record(&mut self, mut query: KadPutQuery) { + let record = Record::new(query.key.clone(), query.value.clone()); + + match self + .kadem + .put_record(record, Quorum::N(self.replication_factor)) + { + Err(e) => { + // failed try again later + query.progress = DHTProgress::NotStarted; + query.backoff.start_next(false); + error!("Error publishing to DHT: {e:?} for peer {:?}", self.peer_id); + self.queued_put_record_queries.push_back(query); + } + Ok(qid) => { + info!("Success publishing {:?} to DHT", qid); + let query = KadPutQuery { + progress: DHTProgress::InProgress(qid), + ..query + }; + self.in_progress_put_record_queries.insert(qid, query); + } + } + } + + /// Retrieve a value for a key from the DHT. + /// Value (serialized) is sent over `chan`, and if a value is not found, + /// a [`crate::network::error::DHTError`] is sent instead. + /// NOTE: noop if `retry_count` is 0 + pub fn get_record( + &mut self, + key: Vec, + chan: Sender>, + factor: NonZeroUsize, + backoff: ExponentialBackoff, + retry_count: u8, + ) { + // noop + if retry_count == 0 { + return; + } + + let qid = self.kadem.get_record(key.clone().into()); + let query = KadGetQuery { + backoff, + progress: DHTProgress::InProgress(qid), + notify: chan, + num_replicas: factor, + key, + retry_count: retry_count - 1, + records: HashMap::default(), + }; + self.in_progress_get_record_queries.insert(qid, query); + } + + /// update state based on recv-ed get query + fn handle_get_query(&mut self, record_results: GetRecordResult, id: QueryId, last: bool) { + if let Some(query) = self.in_progress_get_record_queries.get_mut(&id) { + if let Ok(GetRecordOk::FoundRecord(record)) = record_results { + match query.records.entry(record.record.value) { + std::collections::hash_map::Entry::Occupied(mut o) => { + let num_entries = o.get_mut(); + *num_entries += 1; + } + std::collections::hash_map::Entry::Vacant(v) => { + v.insert(1); + } + } + } + } else { + // inactive entry + return; + } + + if last { + if let Some(KadGetQuery { + backoff, + progress, + notify, + num_replicas, + key, + retry_count, + records, + }) = self.in_progress_get_record_queries.remove(&id) + { + // if channel has been dropped, cancel request + if notify.is_canceled() { + return; + } + + let records_len = records.iter().fold(0, |acc, (_k, v)| acc + v); + + // NOTE case where multiple nodes agree on different + // values is not handles + if let Some((r, _)) = records + .into_iter() + .find(|(_, v)| *v >= NUM_REPLICATED_TO_TRUST) + { + if notify.send(r).is_err() { + warn!("Get DHT: channel closed before get record request result could be sent"); + } + } + // lack of replication => error + else if records_len < NUM_REPLICATED_TO_TRUST { + warn!("Get DHT: Record not replicated enough for {:?}! requerying with more nodes", progress); + self.get_record(key, notify, num_replicas, backoff, retry_count); + } + // many records that don't match => disagreement + else if records_len > MAX_DHT_QUERY_SIZE { + warn!( + "Get DHT: Record disagreed upon; {:?}! requerying with more nodes", + progress + ); + self.get_record(key, notify, num_replicas, backoff, retry_count); + } + // disagreement => query more nodes + else { + // there is some internal disagreement. + // Initiate new query that hits more replicas + let new_factor = + NonZeroUsize::new(num_replicas.get() + 1).unwrap_or(num_replicas); + + self.get_record(key, notify, new_factor, backoff, retry_count); + warn!("Get DHT: Internal disagreement for get dht request {:?}! requerying with more nodes", progress); + } + } + } + } + + /// Update state based on put query + fn handle_put_query(&mut self, record_results: PutRecordResult, id: QueryId) { + if let Some(mut query) = self.in_progress_put_record_queries.remove(&id) { + // dropped so we handle further + if query.notify.is_canceled() { + return; + } + + match record_results { + Ok(_) => { + if query.notify.send(()).is_err() { + warn!("Put DHT: client channel closed before put record request could be sent"); + } + } + Err(e) => { + query.progress = DHTProgress::NotStarted; + query.backoff.start_next(false); + + warn!( + "Put DHT: error performing put: {:?}. Retrying on pid {:?}.", + e, self.peer_id + ); + // push back onto the queue + self.queued_put_record_queries.push_back(query); + } + } + } else { + warn!("Put DHT: completed DHT query that is no longer tracked."); + } + } +} + +impl DHTBehaviour { + #![allow(clippy::too_many_lines)] + fn dht_handle_event(&mut self, event: KademliaEvent) { + match event { + KademliaEvent::OutboundQueryProgressed { + result: QueryResult::PutRecord(record_results), + id, + step: ProgressStep { last: true, .. }, + .. + } => { + self.handle_put_query(record_results, id); + } + KademliaEvent::OutboundQueryProgressed { + result: QueryResult::GetClosestPeers(r), + id: query_id, + stats, + step: ProgressStep { last: true, .. }, + .. + } => match r { + Ok(GetClosestPeersOk { key, peers }) => { + if let Some(chan) = self.in_progress_get_closest_peers.remove(&query_id) { + if chan.send(()).is_err() { + warn!("DHT: finished query but client no longer interested"); + }; + } else { + self.random_walk.state = State::NotStarted; + self.random_walk.backoff.start_next(true); + } + info!( + "peer {:?} successfully completed get closest peers for {:?} with peers {:?}", + self.peer_id, key, peers + ); + } + Err(e) => { + if let Some(chan) = self.in_progress_get_closest_peers.remove(&query_id) { + let _: Result<_, _> = chan.send(()); + } else { + self.random_walk.state = State::NotStarted; + self.random_walk.backoff.start_next(true); + } + warn!( + "peer {:?} failed to get closest peers with {:?} and stats {:?}", + self.peer_id, e, stats + ); + } + }, + KademliaEvent::OutboundQueryProgressed { + result: QueryResult::GetRecord(record_results), + id, + step: ProgressStep { last, .. }, + .. + } => { + self.handle_get_query(record_results, id, last); + } + KademliaEvent::OutboundQueryProgressed { + result: + QueryResult::Bootstrap(Ok(BootstrapOk { + peer: _, + num_remaining, + })), + step: ProgressStep { last: true, .. }, + .. + } => { + if num_remaining == 0 { + // if bootstrap is successful, restart. + info!("Finished bootstrap for peer {:?}", self.peer_id); + self.bootstrap_state.state = State::Finished; + self.event_queue.push(DHTEvent::IsBootstrapped); + self.begin_bootstrap = false; + } else { + warn!( + "Bootstrap in progress: num remaining nodes to ping {:?}", + num_remaining + ); + } + } + KademliaEvent::OutboundQueryProgressed { + result: QueryResult::Bootstrap(Err(e)), + .. + } => { + warn!("DHT: Bootstrap attempt failed. Retrying shortly."); + let BootstrapError::Timeout { num_remaining, .. } = e; + if num_remaining.is_none() { + error!( + "Peer {:?} failed bootstrap with error {:?}. This should not happen and means all bootstrap nodes are down or were evicted from our local DHT. Readding bootstrap nodes {:?}", + self.peer_id, e, self.bootstrap_nodes + ); + for (peer, addrs) in self.bootstrap_nodes.clone() { + for addr in addrs { + self.kadem.add_address(&peer, addr); + } + } + } + self.bootstrap_state.state = State::NotStarted; + self.bootstrap_state.backoff.start_next(true); + } + KademliaEvent::RoutablePeer { peer, address: _ } => { + info!("on peer {:?} found routable peer {:?}", self.peer_id, peer); + } + KademliaEvent::PendingRoutablePeer { peer, address: _ } => { + info!( + "on peer {:?} have pending routable peer {:?}", + self.peer_id, peer + ); + } + KademliaEvent::UnroutablePeer { peer } => { + info!("on peer {:?} have unroutable peer {:?}", self.peer_id, peer); + } + KademliaEvent::InboundRequest { request: _r } => {} + KademliaEvent::RoutingUpdated { + peer: _, + is_new_peer: _, + addresses: _, + bucket_range: _, + old_peer: _, + } => {} + e @ KademliaEvent::OutboundQueryProgressed { .. } => { + info!("Not handling dht event {:?}", e); + } + } + } +} + +/// Metadata holder for get query +#[derive(Debug)] +pub(crate) struct KadGetQuery { + /// Exponential retry backoff + pub(crate) backoff: ExponentialBackoff, + /// progress through DHT query + pub(crate) progress: DHTProgress, + /// notify client of result + pub(crate) notify: Sender>, + /// number of replicas required to replicate over + pub(crate) num_replicas: NonZeroUsize, + /// the key to look up + pub(crate) key: Vec, + /// the number of remaining retries before giving up + pub(crate) retry_count: u8, + /// already received records + pub(crate) records: HashMap, usize>, +} + +/// Metadata holder for get query +#[derive(Debug)] +pub struct KadPutQuery { + /// Exponential retry backoff + pub(crate) backoff: ExponentialBackoff, + /// progress through DHT query + pub(crate) progress: DHTProgress, + /// notify client of result + pub(crate) notify: Sender<()>, + /// the key to put + pub(crate) key: Vec, + /// the value to put + pub(crate) value: Vec, +} + +/// represents progress through DHT +#[derive(Debug, Clone, Eq, Hash, PartialEq)] +pub enum DHTProgress { + /// The query has been started + InProgress(QueryId), + /// The query has not been started + NotStarted, +} + +// Diagnostics: +// 1. use of deprecated associated function `libp2p::libp2p_swarm::NetworkBehaviour::inject_event`: Implement `NetworkBehaviour::on_connection_handler_event` instead. The default implementation of this `inject_*` method delegates to it. + +impl NetworkBehaviour for DHTBehaviour { + type ConnectionHandler = as NetworkBehaviour>::ConnectionHandler; + + type ToSwarm = DHTEvent; + + // fn new_handler(&mut self) -> Self::ConnectionHandler { + // self.kadem.new_handler() + // } + + fn poll( + &mut self, + cx: &mut std::task::Context<'_>, + params: &mut impl libp2p::swarm::PollParameters, + ) -> Poll>> { + if matches!(self.bootstrap_state.state, State::NotStarted) + && self.bootstrap_state.backoff.is_expired() + && self.begin_bootstrap + { + match self.kadem.bootstrap() { + Ok(_) => { + info!("started bootstrap for peer {:?}", self.peer_id); + self.bootstrap_state.state = State::Started; + } + Err(e) => { + error!( + "peer id {:?} FAILED TO START BOOTSTRAP {:?} adding peers {:?}", + self.peer_id, e, self.bootstrap_nodes + ); + for (peer, addrs) in self.bootstrap_nodes.clone() { + for addr in addrs { + self.kadem.add_address(&peer, addr); + } + } + } + } + } + + if matches!(self.random_walk.state, State::NotStarted) + && self.random_walk.backoff.is_expired() + && matches!(self.bootstrap_state.state, State::Finished) + { + self.kadem.get_closest_peers(PeerId::random()); + self.random_walk.state = State::Started; + } + + // retry put/gets if they are ready + while let Some(req) = self.queued_get_record_queries.pop_front() { + if req.backoff.is_expired() { + self.get_record( + req.key, + req.notify, + req.num_replicas, + req.backoff, + req.retry_count, + ); + } else { + self.queued_get_record_queries.push_back(req); + } + } + while let Some(req) = self.queued_put_record_queries.pop_front() { + if req.backoff.is_expired() { + self.put_record(req); + } else { + self.queued_put_record_queries.push_back(req); + } + } + + // poll behaviour which is a passthrough and call inject event + while let Poll::Ready(ready) = NetworkBehaviour::poll(&mut self.kadem, cx, params) { + match ready { + ToSwarm::GenerateEvent(e) => { + self.dht_handle_event(e); + } + ToSwarm::Dial { opts } => { + return Poll::Ready(ToSwarm::Dial { opts }); + } + ToSwarm::NotifyHandler { + peer_id, + handler, + event, + } => { + return Poll::Ready(ToSwarm::NotifyHandler { + peer_id, + handler, + event, + }); + } + ToSwarm::CloseConnection { + peer_id, + connection, + } => { + return Poll::Ready(ToSwarm::CloseConnection { + peer_id, + connection, + }); + } + ToSwarm::ListenOn { opts } => { + return Poll::Ready(ToSwarm::ListenOn { opts }); + } + ToSwarm::RemoveListener { id } => { + return Poll::Ready(ToSwarm::RemoveListener { id }); + } + ToSwarm::NewExternalAddrCandidate(c) => { + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(c)); + } + ToSwarm::ExternalAddrConfirmed(c) => { + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(c)); + } + ToSwarm::ExternalAddrExpired(c) => { + return Poll::Ready(ToSwarm::ExternalAddrExpired(c)); + } + } + } + if !self.event_queue.is_empty() { + return Poll::Ready(ToSwarm::GenerateEvent(self.event_queue.remove(0))); + } + Poll::Pending + } + + fn on_swarm_event( + &mut self, + event: libp2p::swarm::derive_prelude::FromSwarm<'_, Self::ConnectionHandler>, + ) { + self.kadem.on_swarm_event(event); + } + + fn on_connection_handler_event( + &mut self, + peer_id: PeerId, + connection_id: libp2p::swarm::derive_prelude::ConnectionId, + event: THandlerOutEvent, + ) { + self.kadem + .on_connection_handler_event(peer_id, connection_id, event); + } + + fn handle_pending_inbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + local_addr: &Multiaddr, + remote_addr: &Multiaddr, + ) -> Result<(), libp2p::swarm::ConnectionDenied> { + self.kadem + .handle_pending_inbound_connection(connection_id, local_addr, remote_addr) + } + + fn handle_established_inbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + peer: PeerId, + local_addr: &Multiaddr, + remote_addr: &Multiaddr, + ) -> Result, libp2p::swarm::ConnectionDenied> { + self.kadem.handle_established_inbound_connection( + connection_id, + peer, + local_addr, + remote_addr, + ) + } + + fn handle_pending_outbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + maybe_peer: Option, + addresses: &[Multiaddr], + effective_role: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + self.kadem.handle_pending_outbound_connection( + connection_id, + maybe_peer, + addresses, + effective_role, + ) + } + + fn handle_established_outbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + peer: PeerId, + addr: &Multiaddr, + role_override: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + self.kadem + .handle_established_outbound_connection(connection_id, peer, addr, role_override) + } +} diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs new file mode 100644 index 0000000000..a50538e4ae --- /dev/null +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -0,0 +1,311 @@ +use std::{ + collections::{HashMap, VecDeque}, + task::Poll, +}; + +use libp2p::{ + request_response::{Behaviour, Event, Message, RequestId, ResponseChannel}, + swarm::{NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm}, + Multiaddr, +}; +use libp2p_identity::PeerId; +use tracing::{error, info}; + +use super::{ + direct_message_codec::{DirectMessageCodec, DirectMessageRequest, DirectMessageResponse}, + exponential_backoff::ExponentialBackoff, +}; + +/// Request to direct message a peert +pub struct DMRequest { + /// the recv-ers peer id + pub peer_id: PeerId, + /// the data + pub data: Vec, + /// backoff since last attempted request + pub backoff: ExponentialBackoff, + /// the number of remaining retries before giving up + pub(crate) retry_count: u8, +} + +/// Wrapper metadata around libp2p's request response +/// usage: direct message peer +pub struct DMBehaviour { + /// The wrapped behaviour + request_response: Behaviour, + /// In progress queries + in_progress_rr: HashMap, + /// Failed queries to be retried + failed_rr: VecDeque, + /// lsit of out events for parent behaviour + out_event_queue: Vec, +} + +/// Lilst of direct message output events +#[derive(Debug)] +pub enum DMEvent { + /// We received as Direct Request + DirectRequest(Vec, PeerId, ResponseChannel), + /// We received a Direct Response + DirectResponse(Vec, PeerId), +} + +impl DMBehaviour { + fn handle_dm_event(&mut self, event: Event) { + match event { + Event::InboundFailure { + peer, + request_id, + error, + } => { + error!( + "inbound failure to send message to {:?} with error {:?}", + peer, error + ); + if let Some(mut req) = self.in_progress_rr.remove(&request_id) { + req.backoff.start_next(false); + self.failed_rr.push_back(req); + } + } + Event::OutboundFailure { + peer, + request_id, + error, + } => { + error!( + "outbound failure to send message to {:?} with error {:?}", + peer, error + ); + if let Some(mut req) = self.in_progress_rr.remove(&request_id) { + req.backoff.start_next(false); + self.failed_rr.push_back(req); + } + } + Event::Message { message, peer, .. } => match message { + Message::Request { + request: DirectMessageRequest(msg), + channel, + .. + } => { + info!("recv-ed DIRECT REQUEST {:?}", msg); + // receiver, not initiator. + // don't track. If we are disconnected, sender will reinitiate + self.out_event_queue + .push(DMEvent::DirectRequest(msg, peer, channel)); + } + Message::Response { + request_id, + response: DirectMessageResponse(msg), + } => { + // success, finished. + if let Some(req) = self.in_progress_rr.remove(&request_id) { + info!("recv-ed DIRECT RESPONSE {:?}", msg); + self.out_event_queue + .push(DMEvent::DirectResponse(msg, req.peer_id)); + } else { + error!("recv-ed a direct response, but is no longer tracking message!"); + } + } + }, + e @ Event::ResponseSent { .. } => { + info!(?e, " sending response"); + } + } + } +} + +impl NetworkBehaviour for DMBehaviour { + type ConnectionHandler = as NetworkBehaviour>::ConnectionHandler; + + type ToSwarm = DMEvent; + + fn on_swarm_event( + &mut self, + event: libp2p::swarm::derive_prelude::FromSwarm<'_, Self::ConnectionHandler>, + ) { + self.request_response.on_swarm_event(event); + } + + fn on_connection_handler_event( + &mut self, + peer_id: PeerId, + connection_id: libp2p::swarm::derive_prelude::ConnectionId, + event: THandlerOutEvent, + ) { + self.request_response + .on_connection_handler_event(peer_id, connection_id, event); + } + + fn poll( + &mut self, + cx: &mut std::task::Context<'_>, + params: &mut impl libp2p::swarm::PollParameters, + ) -> Poll>> { + while let Some(req) = self.failed_rr.pop_front() { + if req.backoff.is_expired() { + self.add_direct_request(req); + } else { + self.failed_rr.push_back(req); + } + } + while let Poll::Ready(ready) = + NetworkBehaviour::poll(&mut self.request_response, cx, params) + { + match ready { + // NOTE: this generates request + ToSwarm::GenerateEvent(e) => { + self.handle_dm_event(e); + } + ToSwarm::Dial { opts } => { + return Poll::Ready(ToSwarm::Dial { opts }); + } + ToSwarm::NotifyHandler { + peer_id, + handler, + event, + } => { + return Poll::Ready(ToSwarm::NotifyHandler { + peer_id, + handler, + event, + }); + } + ToSwarm::CloseConnection { + peer_id, + connection, + } => { + return Poll::Ready(ToSwarm::CloseConnection { + peer_id, + connection, + }); + } + ToSwarm::ListenOn { opts } => { + return Poll::Ready(ToSwarm::ListenOn { opts }); + } + ToSwarm::RemoveListener { id } => { + return Poll::Ready(ToSwarm::RemoveListener { id }); + } + ToSwarm::NewExternalAddrCandidate(c) => { + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(c)); + } + ToSwarm::ExternalAddrConfirmed(c) => { + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(c)); + } + ToSwarm::ExternalAddrExpired(c) => { + return Poll::Ready(ToSwarm::ExternalAddrExpired(c)); + } + } + } + if !self.out_event_queue.is_empty() { + return Poll::Ready(ToSwarm::GenerateEvent(self.out_event_queue.remove(0))); + } + Poll::Pending + } + + fn handle_pending_inbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + local_addr: &Multiaddr, + remote_addr: &Multiaddr, + ) -> Result<(), libp2p::swarm::ConnectionDenied> { + self.request_response.handle_pending_inbound_connection( + connection_id, + local_addr, + remote_addr, + ) + } + + fn handle_established_inbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + peer: PeerId, + local_addr: &Multiaddr, + remote_addr: &Multiaddr, + ) -> Result, libp2p::swarm::ConnectionDenied> { + self.request_response.handle_established_inbound_connection( + connection_id, + peer, + local_addr, + remote_addr, + ) + } + + fn handle_pending_outbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + maybe_peer: Option, + addresses: &[Multiaddr], + effective_role: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + self.request_response.handle_pending_outbound_connection( + connection_id, + maybe_peer, + addresses, + effective_role, + ) + } + + fn handle_established_outbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + peer: PeerId, + addr: &Multiaddr, + role_override: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + self.request_response + .handle_established_outbound_connection(connection_id, peer, addr, role_override) + } +} + +impl DMBehaviour { + /// Create new behaviour based on request response + #[must_use] + pub fn new(request_response: Behaviour) -> Self { + Self { + request_response, + in_progress_rr: HashMap::default(), + failed_rr: VecDeque::default(), + out_event_queue: Vec::default(), + } + } + + /// Add address to request response behaviour + pub fn add_address(&mut self, peer_id: &PeerId, address: Multiaddr) { + self.request_response.add_address(peer_id, address); + } + + /// Remove address from request response behaviour + pub fn remove_address(&mut self, peer_id: &PeerId, address: &Multiaddr) { + self.request_response.remove_address(peer_id, address); + } + + /// Add a direct request for a given peer + pub fn add_direct_request(&mut self, mut req: DMRequest) { + if req.retry_count == 0 { + return; + } + + req.retry_count -= 1; + + let request_id = self + .request_response + .send_request(&req.peer_id, DirectMessageRequest(req.data.clone())); + info!("direct message request with id {:?}", request_id); + + self.in_progress_rr.insert(request_id, req); + } + + /// Add a direct response for a channel + pub fn add_direct_response( + &mut self, + chan: ResponseChannel, + msg: Vec, + ) { + let res = self + .request_response + .send_response(chan, DirectMessageResponse(msg)); + if let Err(e) = res { + error!("Error replying to direct message. {:?}", e); + } + } +} diff --git a/libp2p-networking/src/network/behaviours/direct_message_codec.rs b/libp2p-networking/src/network/behaviours/direct_message_codec.rs new file mode 100644 index 0000000000..9abe0984b9 --- /dev/null +++ b/libp2p-networking/src/network/behaviours/direct_message_codec.rs @@ -0,0 +1,95 @@ +use async_trait::async_trait; +use futures::{AsyncRead, AsyncWrite, AsyncWriteExt}; +use libp2p::{ + core::upgrade::{read_length_prefixed, write_length_prefixed}, + request_response::Codec, +}; +use serde::{Deserialize, Serialize}; +use std::io; + +/// Protocol for direct messages +#[derive(Debug, Clone)] +pub struct DirectMessageProtocol(); +/// Codec for direct messages +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +pub struct DirectMessageCodec(); +/// Wrapper type describing a serialized direct message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DirectMessageRequest(pub Vec); +/// wrapper type describing the response to direct message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DirectMessageResponse(pub Vec); + +/// Maximum size of a direct message +pub const MAX_MSG_SIZE_DM: usize = 100_000_000; + +impl AsRef for DirectMessageProtocol { + fn as_ref(&self) -> &str { + "/HotShot/request_response/1.0" + } +} + +#[async_trait] +impl Codec for DirectMessageCodec { + type Protocol = DirectMessageProtocol; + + type Request = DirectMessageRequest; + + type Response = DirectMessageResponse; + + async fn read_request( + &mut self, + _: &DirectMessageProtocol, + io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let msg = read_length_prefixed(io, MAX_MSG_SIZE_DM).await?; + + // NOTE we don't error here unless message is too big. + // We'll wrap this in a networkbehaviour and get parsing messages there + Ok(DirectMessageRequest(msg)) + } + + async fn read_response( + &mut self, + _: &DirectMessageProtocol, + io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let msg = read_length_prefixed(io, MAX_MSG_SIZE_DM).await?; + Ok(DirectMessageResponse(msg)) + } + + async fn write_request( + &mut self, + _: &DirectMessageProtocol, + io: &mut T, + DirectMessageRequest(msg): DirectMessageRequest, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + write_length_prefixed(io, msg).await?; + io.close().await?; + + Ok(()) + } + + async fn write_response( + &mut self, + _: &DirectMessageProtocol, + io: &mut T, + DirectMessageResponse(msg): DirectMessageResponse, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + write_length_prefixed(io, msg).await?; + io.close().await?; + Ok(()) + } +} diff --git a/libp2p-networking/src/network/behaviours/exponential_backoff.rs b/libp2p-networking/src/network/behaviours/exponential_backoff.rs new file mode 100644 index 0000000000..62391a2ff0 --- /dev/null +++ b/libp2p-networking/src/network/behaviours/exponential_backoff.rs @@ -0,0 +1,75 @@ +use std::time::{Duration, Instant}; + +/// Track (with exponential backoff) +/// sending of some sort of message +#[derive(Debug, Clone, Eq, Hash, PartialEq)] +pub struct ExponentialBackoff { + /// Value to reset to when reset is called + reset_val: Duration, + /// factor to back off by + backoff_factor: u32, + /// the current timeout amount + timeout: Duration, + /// when we started the timeout + started: Option, +} + +impl ExponentialBackoff { + /// Create new backoff + #[must_use] + pub fn new(backoff_factor: u32, next_timeout: Duration) -> Self { + ExponentialBackoff { + backoff_factor, + timeout: next_timeout * backoff_factor, + reset_val: next_timeout, + started: None, + } + } + + /// reset backoff + pub fn reset(&mut self) { + self.timeout = self.reset_val; + } + + /// start next timeout + /// result: whether or not we succeeded + /// if we succeeded, reset the timeout + /// else increment the timeout by a factor + /// of `timeout` + pub fn start_next(&mut self, result: bool) { + // success + if result { + self.timeout = self.reset_val; + self.started = Some(Instant::now()); + } + // failure + else { + // note we want to prevent overflow. + if let Some(r) = self.timeout.checked_mul(self.backoff_factor) { + self.timeout = r; + } + self.started = Some(Instant::now()); + } + } + + /// Whether or not the timeout is expired + #[must_use] + pub fn is_expired(&self) -> bool { + if let Some(then) = self.started { + then.elapsed() > self.timeout + } else { + true + } + } +} + +impl Default for ExponentialBackoff { + fn default() -> Self { + Self { + reset_val: Duration::from_millis(500), + backoff_factor: 2, + timeout: Duration::from_millis(500), + started: None, + } + } +} diff --git a/libp2p-networking/src/network/behaviours/gossip.rs b/libp2p-networking/src/network/behaviours/gossip.rs new file mode 100644 index 0000000000..1119ad27f3 --- /dev/null +++ b/libp2p-networking/src/network/behaviours/gossip.rs @@ -0,0 +1,265 @@ +use std::{ + collections::{HashSet, VecDeque}, + task::Poll, +}; + +use libp2p::{ + gossipsub::{Behaviour, Event, IdentTopic, TopicHash}, + swarm::{NetworkBehaviour, PollParameters, THandlerInEvent, THandlerOutEvent, ToSwarm}, + Multiaddr, +}; +use libp2p_identity::PeerId; + +use tracing::{error, info, warn}; + +use super::exponential_backoff::ExponentialBackoff; + +/// wrapper metadata around libp2p's gossip protocol +pub struct GossipBehaviour { + /// Timeout trackidng when to retry gossip + backoff: ExponentialBackoff, + /// The in progress gossip queries + in_progress_gossip: VecDeque<(IdentTopic, Vec)>, + /// The gossip behaviour + gossipsub: Behaviour, + /// Output events to parent behavioru + out_event_queue: Vec, + /// Set of topics we are subscribed to + subscribed_topics: HashSet, +} + +/// Output event +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum GossipEvent { + /// We received a gossip + GossipMsg(Vec, TopicHash), +} + +impl GossipBehaviour { + fn gossip_handle_event(&mut self, event: Event) { + match event { + Event::Message { message, .. } => { + // if we get an event from the gossipsub behaviour, push it + // onto the event queue (which will get popped during poll) + // and propagated back to the overall behaviour + self.out_event_queue + .push(GossipEvent::GossipMsg(message.data, message.topic)); + } + Event::Subscribed { topic, .. } => { + info!("subscribed to topic {}", topic); + } + Event::Unsubscribed { topic, .. } => { + info!("unsubscribed to topic {}", topic); + } + Event::GossipsubNotSupported { peer_id } => { + error!("gossipsub not supported on {}!", peer_id); + } + } + } +} + +impl NetworkBehaviour for GossipBehaviour { + type ConnectionHandler = ::ConnectionHandler; + + type ToSwarm = GossipEvent; + + fn on_swarm_event( + &mut self, + event: libp2p::swarm::derive_prelude::FromSwarm<'_, Self::ConnectionHandler>, + ) { + self.gossipsub.on_swarm_event(event); + } + + fn poll( + &mut self, + cx: &mut std::task::Context<'_>, + params: &mut impl PollParameters, + ) -> Poll>> { + // retry sending shit + if self.backoff.is_expired() { + let published = self.drain_publish_gossips(); + self.backoff.start_next(published); + } + if let Poll::Ready(ready) = NetworkBehaviour::poll(&mut self.gossipsub, cx, params) { + match ready { + ToSwarm::GenerateEvent(e) => { + // add event to event queue which will be subsequently popped off. + self.gossip_handle_event(e); + } + ToSwarm::Dial { opts } => { + return Poll::Ready(ToSwarm::Dial { opts }); + } + ToSwarm::NotifyHandler { + peer_id, + handler, + event, + } => { + return Poll::Ready(ToSwarm::NotifyHandler { + peer_id, + handler, + event, + }); + } + ToSwarm::CloseConnection { + peer_id, + connection, + } => { + return Poll::Ready(ToSwarm::CloseConnection { + peer_id, + connection, + }); + } + ToSwarm::ListenOn { opts } => { + return Poll::Ready(ToSwarm::ListenOn { opts }); + } + ToSwarm::RemoveListener { id } => { + return Poll::Ready(ToSwarm::RemoveListener { id }); + } + ToSwarm::NewExternalAddrCandidate(c) => { + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(c)); + } + ToSwarm::ExternalAddrConfirmed(c) => { + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(c)); + } + ToSwarm::ExternalAddrExpired(c) => { + return Poll::Ready(ToSwarm::ExternalAddrExpired(c)); + } + } + } + if !self.out_event_queue.is_empty() { + return Poll::Ready(ToSwarm::GenerateEvent(self.out_event_queue.remove(0))); + } + Poll::Pending + } + + fn on_connection_handler_event( + &mut self, + peer_id: PeerId, + connection_id: libp2p::swarm::derive_prelude::ConnectionId, + event: THandlerOutEvent, + ) { + self.gossipsub + .on_connection_handler_event(peer_id, connection_id, event); + } + + fn handle_pending_inbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + local_addr: &Multiaddr, + remote_addr: &Multiaddr, + ) -> Result<(), libp2p::swarm::ConnectionDenied> { + self.gossipsub + .handle_pending_inbound_connection(connection_id, local_addr, remote_addr) + } + + fn handle_established_inbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + peer: PeerId, + local_addr: &Multiaddr, + remote_addr: &Multiaddr, + ) -> Result, libp2p::swarm::ConnectionDenied> { + self.gossipsub.handle_established_inbound_connection( + connection_id, + peer, + local_addr, + remote_addr, + ) + } + + fn handle_pending_outbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + maybe_peer: Option, + addresses: &[Multiaddr], + effective_role: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + self.gossipsub.handle_pending_outbound_connection( + connection_id, + maybe_peer, + addresses, + effective_role, + ) + } + + fn handle_established_outbound_connection( + &mut self, + connection_id: libp2p::swarm::ConnectionId, + peer: PeerId, + addr: &Multiaddr, + role_override: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + self.gossipsub.handle_established_outbound_connection( + connection_id, + peer, + addr, + role_override, + ) + } +} + +impl GossipBehaviour { + /// Create new gossip behavioru based on gossipsub + #[must_use] + pub fn new(gossipsub: Behaviour) -> Self { + Self { + backoff: ExponentialBackoff::default(), + in_progress_gossip: VecDeque::default(), + gossipsub, + out_event_queue: Vec::default(), + subscribed_topics: HashSet::default(), + } + } + + /// Publish a given gossip + pub fn publish_gossip(&mut self, topic: IdentTopic, contents: Vec) { + let res = self.gossipsub.publish(topic.clone(), contents.clone()); + if res.is_err() { + error!("error publishing gossip message {:?}", res); + self.in_progress_gossip.push_back((topic, contents)); + } + } + + /// Subscribe to a given topic + pub fn subscribe_gossip(&mut self, t: &str) { + if self.subscribed_topics.contains(t) { + warn!( + "tried to subscribe to already subscribed topic {:?}. Noop.", + t + ); + } else if self.gossipsub.subscribe(&IdentTopic::new(t)).is_err() { + error!("error subscribing to topic {}", t); + } else { + info!("subscribed req to {:?}", t); + self.subscribed_topics.insert(t.to_string()); + } + } + + /// Unsubscribe from a given topic + pub fn unsubscribe_gossip(&mut self, t: &str) { + if self.subscribed_topics.contains(t) { + if self.gossipsub.unsubscribe(&IdentTopic::new(t)).is_err() { + error!("error unsubscribing to topic {}", t); + } else { + self.subscribed_topics.remove(t); + } + } else { + warn!("tried to unsubscribe to untracked topic {:?}. Noop.", t); + } + } + + /// Attempt to drain the internal gossip list, publishing each gossip + pub fn drain_publish_gossips(&mut self) -> bool { + let mut r_val = true; + + while let Some((topic, contents)) = self.in_progress_gossip.pop_front() { + let res = self.gossipsub.publish(topic.clone(), contents.clone()); + if res.is_err() { + self.in_progress_gossip.push_back((topic, contents)); + r_val = false; + break; + } + } + r_val + } +} diff --git a/libp2p-networking/src/network/behaviours/mod.rs b/libp2p-networking/src/network/behaviours/mod.rs new file mode 100644 index 0000000000..7d0feeeb25 --- /dev/null +++ b/libp2p-networking/src/network/behaviours/mod.rs @@ -0,0 +1,15 @@ +/// Wrapper around gossipsub +pub mod gossip; + +/// Wrapper around `RequestResponse` +pub mod direct_message; + +/// exponential backoff type +pub mod exponential_backoff; + +/// Implementation of a codec for sending messages +/// for `RequestResponse` +pub mod direct_message_codec; + +/// Wrapper around Kademlia +pub mod dht; diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs new file mode 100644 index 0000000000..06f7cb24e5 --- /dev/null +++ b/libp2p-networking/src/network/def.rs @@ -0,0 +1,181 @@ +use futures::channel::oneshot::Sender; +use libp2p::{ + gossipsub::IdentTopic as Topic, + identify::{Behaviour as IdentifyBehaviour, Event as IdentifyEvent}, + request_response::ResponseChannel, + Multiaddr, +}; +use libp2p_identity::PeerId; +use std::num::NonZeroUsize; +use tracing::debug; + +use super::{ + behaviours::{ + dht::{DHTBehaviour, DHTEvent, KadPutQuery}, + direct_message::{DMBehaviour, DMEvent, DMRequest}, + direct_message_codec::DirectMessageResponse, + exponential_backoff::ExponentialBackoff, + gossip::{GossipBehaviour, GossipEvent}, + }, + NetworkEventInternal, +}; + +use libp2p_swarm_derive::NetworkBehaviour; + +pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; + +/// Overarching network behaviour performing: +/// - network topology discovoery +/// - direct messaging +/// - p2p broadcast +/// - connection management +#[derive(NetworkBehaviour, custom_debug::Debug)] +#[behaviour(to_swarm = "NetworkEventInternal")] +pub struct NetworkDef { + /// purpose: broadcasting messages to many peers + /// NOTE gossipsub works ONLY for sharing messsages right now + /// in the future it may be able to do peer discovery and routing + /// + #[debug(skip)] + gossipsub: GossipBehaviour, + + /// purpose: peer routing + /// purpose: storing pub key <-> peer id bijection + #[debug(skip)] + pub dht: DHTBehaviour, + + /// purpose: identifying the addresses from an outside POV + #[debug(skip)] + identify: IdentifyBehaviour, + + /// purpose: directly messaging peer + #[debug(skip)] + pub request_response: DMBehaviour, +} + +impl NetworkDef { + /// Create a new instance of a `NetworkDef` + #[must_use] + pub fn new( + gossipsub: GossipBehaviour, + dht: DHTBehaviour, + identify: IdentifyBehaviour, + request_response: DMBehaviour, + ) -> NetworkDef { + Self { + gossipsub, + dht, + identify, + request_response, + } + } +} + +/// Address functions +impl NetworkDef { + /// Add an address + pub fn add_address(&mut self, peer_id: &PeerId, address: Multiaddr) { + // NOTE to get this address to play nice with the other + // behaviours using the DHT for ouring + // we only need to add this address to the DHT since it + // is always enabled. If it were not always enabled, + // we would need to manually add the address to + // the direct message behaviour + self.dht.add_address(peer_id, address); + } +} + +/// Gossip functions +impl NetworkDef { + /// Publish a given gossip + pub fn publish_gossip(&mut self, topic: Topic, contents: Vec) { + self.gossipsub.publish_gossip(topic, contents); + } + + /// Subscribe to a given topic + pub fn subscribe_gossip(&mut self, t: &str) { + self.gossipsub.subscribe_gossip(t); + } + + /// Unsubscribe from a given topic + pub fn unsubscribe_gossip(&mut self, t: &str) { + self.gossipsub.unsubscribe_gossip(t); + } +} + +/// DHT functions +impl NetworkDef { + /// Publish a key/value to the kv store. + /// Once replicated upon all nodes, the caller is notified over + /// `chan`. If there is an error, a [`super::error::DHTError`] is + /// sent instead. + pub fn put_record(&mut self, query: KadPutQuery) { + self.dht.put_record(query); + } + + /// Retrieve a value for a key from the DHT. + /// Value (serialized) is sent over `chan`, and if a value is not found, + /// a [`super::error::DHTError`] is sent instead. + pub fn get_record( + &mut self, + key: Vec, + chan: Sender>, + factor: NonZeroUsize, + retry_count: u8, + ) { + self.dht.get_record( + key, + chan, + factor, + ExponentialBackoff::default(), + retry_count, + ); + } +} + +/// Request/response functions +impl NetworkDef { + /// Add a direct request for a given peer + pub fn add_direct_request(&mut self, peer_id: PeerId, data: Vec, retry_count: u8) { + let request = DMRequest { + peer_id, + data, + backoff: ExponentialBackoff::default(), + retry_count, + }; + self.request_response.add_direct_request(request); + } + + /// Add a direct response for a channel + pub fn add_direct_response( + &mut self, + chan: ResponseChannel, + msg: Vec, + ) { + self.request_response.add_direct_response(chan, msg); + } +} + +impl From for NetworkEventInternal { + fn from(event: DMEvent) -> Self { + Self::DMEvent(event) + } +} + +impl From for NetworkEventInternal { + fn from(event: GossipEvent) -> Self { + Self::GossipEvent(event) + } +} + +impl From for NetworkEventInternal { + fn from(event: DHTEvent) -> Self { + Self::DHTEvent(event) + } +} + +impl From for NetworkEventInternal { + fn from(event: IdentifyEvent) -> Self { + Self::IdentifyEvent(Box::new(event)) + } +} diff --git a/libp2p-networking/src/network/error.rs b/libp2p-networking/src/network/error.rs new file mode 100644 index 0000000000..dd83e06584 --- /dev/null +++ b/libp2p-networking/src/network/error.rs @@ -0,0 +1,105 @@ +//! Contains the [`NetworkError`] snafu types + +use futures::channel::oneshot::Canceled; +use libp2p::{ + gossipsub::PublishError, + kad::{GetRecordError, PutRecordError}, + swarm::DialError, + TransportError, +}; +use snafu::Snafu; +use std::fmt::{Debug, Display}; + +/// wrapper type for errors generated by the `Network` +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum NetworkError { + /// Error initiating dial of peer + DialError { + /// The underlying source of the error + source: DialError, + }, + /// Error during dialing or listening + Transport { + /// The underlying source of the error + source: TransportError, + }, + /// Error establishing backend connection + TransportLaunch { + /// The underlying source of the error + source: std::io::Error, + }, + /// Error building the gossipsub configuration + #[snafu(display("Error building the gossipsub configuration: {message}"))] + GossipsubConfig { + /// The underlying source of the error + message: String, + }, + /// Error building the gossipsub instance + #[snafu(display("Error building the gossipsub implementation {message}"))] + GossipsubBuild { + /// The underlying source of the error + message: String, + }, + /// Error if one of the channels to or from the swarm is closed + StreamClosed, + /// Error publishing a gossipsub message + PublishError { + /// The underlying source of the error + source: PublishError, + }, + /// Error when there are no known peers to bootstrap off + NoKnownPeers, +} + +/// Error enum for querying store +/// because for some reason, [`libp2p::kad::GetRecordError`] +/// does not derive `Error` +#[derive(Debug, Clone, Snafu)] +#[snafu(visibility(pub))] +pub enum DHTError { + /// Get Record Error + #[snafu(display("DHT GET internal error: {source}"))] + GetRecord { + /// source of error + source: GetRecordWrapperError, + }, + /// Get Record Error + #[snafu(display("DHT PUT internal error: {source}"))] + PutRecord { + /// source of error + source: PutRecordError, + }, + /// nodes disagreed on the value + #[snafu(display("Nodes disagreed on value"))] + Disagreement, + /// could not find 2 or more nodes that had the value + #[snafu(display("Could not find key in DHT"))] + NotFound, + /// request was ignored serverside + CancelledRequest { + /// source of error + source: Canceled, + }, +} + +/// Wrapper Error enum for [`libp2p::kad::GetRecordError`]. +/// [`libp2p::kad::GetRecordError`] does not derive [`std::error::Error`] +/// so in order to feed this into [`DHTError`] and snafu derive, +/// we need a wrapper type +#[derive(Debug, Clone)] +pub enum GetRecordWrapperError { + /// wrapper + GetRecordError { + /// source of error + source: GetRecordError, + }, +} + +impl Display for GetRecordWrapperError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{self:?}") + } +} + +impl std::error::Error for GetRecordWrapperError {} diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs new file mode 100644 index 0000000000..3ec1c07b73 --- /dev/null +++ b/libp2p-networking/src/network/mod.rs @@ -0,0 +1,256 @@ +/// networking behaviours wrapping libp2p's behaviours +pub mod behaviours; +mod def; +pub mod error; +mod node; + +pub use self::{ + def::NetworkDef, + error::NetworkError, + node::{ + network_node_handle_error, MeshParams, NetworkNode, NetworkNodeConfig, + NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, NetworkNodeHandle, + NetworkNodeHandleError, + }, +}; + +use self::behaviours::{ + dht::DHTEvent, direct_message::DMEvent, direct_message_codec::DirectMessageResponse, + gossip::GossipEvent, +}; +use bincode::Options; +use futures::channel::oneshot::Sender; +use hotshot_utils::bincode::bincode_opts; +use libp2p::{ + build_multiaddr, + core::{muxing::StreamMuxerBox, transport::Boxed}, + gossipsub::TopicHash, + identify::Event as IdentifyEvent, + identity::Keypair, + quic, + request_response::ResponseChannel, + Multiaddr, Transport, +}; +use libp2p_identity::PeerId; +use rand::seq::IteratorRandom; +use serde::{Deserialize, Serialize}; +use std::{collections::HashSet, fmt::Debug, str::FromStr, sync::Arc, time::Duration}; +use tracing::{info, instrument}; + +#[cfg(async_executor_impl = "async-std")] +use libp2p::dns::DnsConfig; +#[cfg(async_executor_impl = "tokio")] +use libp2p::dns::TokioDnsConfig as DnsConfig; +#[cfg(async_executor_impl = "async-std")] +use quic::async_std::Transport as QuicTransport; +#[cfg(async_executor_impl = "tokio")] +use quic::tokio::Transport as QuicTransport; +#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] +compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} + +/// this is mostly to estimate how many network connections +/// a node should allow +#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)] +pub enum NetworkNodeType { + /// bootstrap node accepts all connections + Bootstrap, + /// regular node has a limit to the + /// number of connections to accept + Regular, + /// conductor node is never pruned + Conductor, +} + +impl FromStr for NetworkNodeType { + type Err = String; + + fn from_str(input: &str) -> Result { + match input { + "Conductor" => Ok(NetworkNodeType::Conductor), + "Regular" => Ok(NetworkNodeType::Regular), + "Bootstrap" => Ok(NetworkNodeType::Bootstrap), + _ => Err( + "Couldn't parse node type. Must be one of Conductor, Bootstrap, Regular" + .to_string(), + ), + } + } +} + +/// Serialize an arbitrary message +/// # Errors +/// When unable to serialize a message +pub fn serialize_msg(msg: &T) -> Result, Box> { + bincode_opts().serialize(&msg) +} + +/// Deserialize an arbitrary message +/// # Errors +/// When unable to deserialize a message +pub fn deserialize_msg<'a, T: Deserialize<'a>>( + msg: &'a [u8], +) -> Result> { + bincode_opts().deserialize(msg) +} + +impl Default for NetworkNodeType { + fn default() -> Self { + Self::Bootstrap + } +} + +/// Actions to send from the client to the swarm +#[derive(Debug)] +pub enum ClientRequest { + /// Start the bootstrap process to kademlia + BeginBootstrap, + /// kill the swarm + Shutdown, + /// broadcast a serialized message + GossipMsg(String, Vec), + /// subscribe to a topic + Subscribe(String, Option>), + /// unsubscribe from a topic + Unsubscribe(String, Option>), + /// client request to send a direct serialized message + DirectRequest { + /// peer id + pid: PeerId, + /// msg contents + contents: Vec, + /// number of retries + retry_count: u8, + }, + /// client request to send a direct reply to a message + DirectResponse(ResponseChannel, Vec), + /// prune a peer + Prune(PeerId), + /// add vec of known peers or addresses + AddKnownPeers(Vec<(Option, Multiaddr)>), + /// Ignore peers. Only here for debugging purposes. + /// Allows us to have nodes that are never pruned + IgnorePeers(Vec), + /// Put(Key, Value) into DHT + /// relay success back on channel + PutDHT { + /// Key to publish under + key: Vec, + /// Value to publish under + value: Vec, + /// Channel to notify caller of result of publishing + notify: Sender<()>, + }, + /// Get(Key, Chan) + GetDHT { + /// Key to search for + key: Vec, + /// Channel to notify caller of value (or failure to find value) + notify: Sender>, + /// number of retries to make + retry_count: u8, + }, + /// Request the number of connected peers + GetConnectedPeerNum(Sender), + /// Request the set of connected peers + GetConnectedPeers(Sender>), + /// Print the routing table to stderr, debugging only + GetRoutingTable(Sender<()>), + /// Get address of peer + LookupPeer(PeerId, Sender<()>), +} + +/// events generated by the swarm that we wish +/// to relay to the client +#[derive(Debug)] +pub enum NetworkEvent { + /// Recv-ed a broadcast + GossipMsg(Vec, TopicHash), + /// Recv-ed a direct message from a node + DirectRequest(Vec, PeerId, ResponseChannel), + /// Recv-ed a direct response from a node (that hopefully was initiated by this node) + DirectResponse(Vec, PeerId), + /// Report that kademlia has successfully bootstrapped into the network + IsBootstrapped, +} + +#[derive(Debug)] +/// internal representation of the network events +/// only used for event processing before relaying to client +pub enum NetworkEventInternal { + /// a DHT event + DHTEvent(DHTEvent), + /// a identify event. Is boxed because this event is much larger than the other ones so we want + /// to store it on the heap. + IdentifyEvent(Box), + /// a gossip event + GossipEvent(GossipEvent), + /// a direct message event + DMEvent(DMEvent), +} + +/// Bind all interfaces on port `port` +/// NOTE we may want something more general in the fture. +#[must_use] +pub fn gen_multiaddr(port: u16) -> Multiaddr { + build_multiaddr!(Ip4([0, 0, 0, 0]), Udp(port), QuicV1) +} + +/// Generate authenticated transport +/// # Errors +/// could not sign the quic key with `identity` +#[instrument(skip(identity))] +pub async fn gen_transport( + identity: Keypair, +) -> Result, NetworkError> { + let quic_transport = { + let mut config = quic::Config::new(&identity); + config.handshake_timeout = std::time::Duration::from_secs(20); + QuicTransport::new(config) + }; + + let dns_quic = { + #[cfg(async_executor_impl = "async-std")] + { + DnsConfig::system(quic_transport).await + } + + #[cfg(async_executor_impl = "tokio")] + { + DnsConfig::system(quic_transport) + } + } + .map_err(|e| NetworkError::TransportLaunch { source: e })?; + + Ok(dns_quic + .map(|(peer_id, connection), _| (peer_id, StreamMuxerBox::new(connection))) + .boxed()) +} + +/// a single node, connects them to each other +/// and waits for connections to propagate to all nodes. +#[instrument] +pub async fn spin_up_swarm( + timeout_len: Duration, + known_nodes: Vec<(Option, Multiaddr)>, + config: NetworkNodeConfig, + idx: usize, + handle: &Arc>, +) -> Result<(), NetworkNodeHandleError> { + info!("known_nodes{:?}", known_nodes); + handle.add_known_peers(known_nodes).await?; + handle.wait_to_connect(4, idx, timeout_len).await?; + handle.subscribe("global".to_string()).await?; + + Ok(()) +} + +/// Given a slice of handles assumed to be larger than 0, +/// chooses one +/// # Panics +/// panics if handles is of length 0 +pub fn get_random_handle( + handles: &[Arc>], + rng: &mut dyn rand::RngCore, +) -> Arc> { + handles.iter().choose(rng).unwrap().clone() +} diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs new file mode 100644 index 0000000000..37c9683428 --- /dev/null +++ b/libp2p-networking/src/network/node.rs @@ -0,0 +1,622 @@ +mod config; +mod handle; + +pub use self::{ + config::{ + MeshParams, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, + }, + handle::{ + network_node_handle_error, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, + }, +}; + +use super::{ + behaviours::gossip::GossipBehaviour, + error::{GossipsubBuildSnafu, GossipsubConfigSnafu, NetworkError, TransportSnafu}, + gen_transport, ClientRequest, NetworkDef, NetworkEvent, NetworkEventInternal, NetworkNodeType, +}; +use crate::network::{ + behaviours::{ + dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery}, + direct_message::{DMBehaviour, DMEvent}, + direct_message_codec::{DirectMessageProtocol, MAX_MSG_SIZE_DM}, + exponential_backoff::ExponentialBackoff, + gossip::GossipEvent, + }, + def::NUM_REPLICATED_TO_TRUST, +}; +use async_compatibility_layer::{ + art::async_spawn, + channel::{unbounded, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, +}; +use either::Either; +use futures::{select, FutureExt, StreamExt}; +use libp2p::{ + core::{muxing::StreamMuxerBox, transport::Boxed}, + gossipsub::{ + Behaviour as Gossipsub, ConfigBuilder as GossipsubConfigBuilder, + Message as GossipsubMessage, MessageAuthenticity, MessageId, Topic, ValidationMode, + }, + identify::{ + Behaviour as IdentifyBehaviour, Config as IdentifyConfig, Event as IdentifyEvent, + Info as IdentifyInfo, + }, + identity::Keypair, + kad::{store::MemoryStore, Kademlia, KademliaConfig}, + request_response::{ + Behaviour as RequestResponse, Config as RequestResponseConfig, ProtocolSupport, + }, + swarm::{SwarmBuilder, SwarmEvent}, + Multiaddr, Swarm, +}; +use libp2p_identity::PeerId; +use rand::{prelude::SliceRandom, thread_rng}; +use snafu::ResultExt; +use std::{ + collections::{HashMap, HashSet}, + io::Error, + iter, + num::{NonZeroU32, NonZeroUsize}, + time::Duration, +}; +use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; + +/// Wrapped num of connections +pub const ESTABLISHED_LIMIT: NonZeroU32 = + unsafe { NonZeroU32::new_unchecked(ESTABLISHED_LIMIT_UNWR) }; +/// Number of connections to a single peer before logging an error +pub const ESTABLISHED_LIMIT_UNWR: u32 = 10; + +/// Network definition +#[derive(custom_debug::Debug)] +pub struct NetworkNode { + /// pub/private key from with peer_id is derived + identity: Keypair, + /// peer id of network node + peer_id: PeerId, + /// the swarm of networkbehaviours + #[debug(skip)] + swarm: Swarm, + /// the configuration parameters of the netework + config: NetworkNodeConfig, +} + +impl NetworkNode { + /// Returns number of peers this node is connected to + pub fn num_connected(&self) -> usize { + self.swarm.connected_peers().count() + } + + /// return hashset of PIDs this node is connected to + pub fn connected_pids(&self) -> HashSet { + self.swarm.connected_peers().copied().collect() + } + + /// starts the swarm listening on `listen_addr` + /// and optionally dials into peer `known_peer` + /// returns the address the swarm is listening upon + #[instrument(skip(self))] + pub async fn start_listen( + &mut self, + listen_addr: Multiaddr, + ) -> Result { + self.swarm.listen_on(listen_addr).context(TransportSnafu)?; + let addr = loop { + if let Some(SwarmEvent::NewListenAddr { address, .. }) = self.swarm.next().await { + break address; + } + }; + info!("peerid {:?} started on addr: {:?}", self.peer_id, addr); + Ok(addr) + } + + /// initialize the DHT with known peers + /// add the peers to kademlia and then + /// the `spawn_listeners` function + /// will start connecting to peers + #[instrument(skip(self))] + pub fn add_known_peers(&mut self, known_peers: &[(Option, Multiaddr)]) { + info!("Adding nodes {:?} to {:?}", known_peers, self.peer_id); + let behaviour = self.swarm.behaviour_mut(); + let mut bs_nodes = HashMap::>::new(); + let mut shuffled = known_peers.iter().collect::>(); + shuffled.shuffle(&mut thread_rng()); + for (peer_id, addr) in shuffled { + match peer_id { + Some(peer_id) => { + // if we know the peerid, add address. + if *peer_id != self.peer_id { + behaviour.dht.add_address(peer_id, addr.clone()); + bs_nodes.insert(*peer_id, iter::once(addr.clone()).collect()); + } + } + None => { + // + // TODO actually implement this part + // if we don't know the peerid, dial to find out what the peerid is + } + } + } + behaviour.dht.add_bootstrap_nodes(bs_nodes); + } + + /// Creates a new `Network` with the given settings. + /// + /// Currently: + /// * Generates a random key pair and associated [`PeerId`] + /// * Launches a hopefully production ready transport: + /// QUIC v1 (RFC 9000) + DNS + Websocket + XX auth + /// * Generates a connection to the "broadcast" topic + /// * Creates a swarm to manage peers and events + #[instrument] + pub async fn new(config: NetworkNodeConfig) -> Result { + // Generate a random PeerId + let identity = if let Some(ref kp) = config.identity { + kp.clone() + } else { + Keypair::generate_ed25519() + }; + let peer_id = PeerId::from(identity.public()); + debug!(?peer_id); + let transport: Boxed<(PeerId, StreamMuxerBox)> = gen_transport(identity.clone()).await?; + trace!("Launched network transport"); + // Generate the swarm + let mut swarm: Swarm = { + // Use the hash of the message's contents as the ID + // Use blake3 for much paranoia at very high speeds + let message_id_fn = |message: &GossipsubMessage| { + let hash = blake3::hash(&message.data); + MessageId::from(hash.as_bytes().to_vec()) + }; + + let params = if let Some(ref params) = config.mesh_params { + params.clone() + } else { + // NOTE this should most likely be a builder pattern + // at some point in the future. + match config.node_type { + NetworkNodeType::Bootstrap => MeshParams { + mesh_n_high: 1000, // make this super high in case we end up scaling to 1k + // nodes + mesh_n_low: 10, + mesh_outbound_min: 5, + mesh_n: 15, + }, + NetworkNodeType::Regular => MeshParams { + mesh_n_high: 15, + mesh_n_low: 8, + mesh_outbound_min: 4, + mesh_n: 12, + }, + NetworkNodeType::Conductor => MeshParams { + mesh_n_high: 21, + mesh_n_low: 8, + mesh_outbound_min: 4, + mesh_n: 12, + }, + } + }; + + // Create a custom gossipsub + let gossipsub_config = GossipsubConfigBuilder::default() + .opportunistic_graft_ticks(3) + .heartbeat_interval(Duration::from_secs(1)) + // Force all messages to have valid signatures + .validation_mode(ValidationMode::Strict) + .history_gossip(50) + .mesh_n_high(params.mesh_n_high) + .mesh_n_low(params.mesh_n_low) + .mesh_outbound_min(params.mesh_outbound_min) + .mesh_n(params.mesh_n) + .history_length(500) + .max_transmit_size(2 * MAX_MSG_SIZE_DM) + // Use the (blake3) hash of a message as its ID + .message_id_fn(message_id_fn) + .build() + .map_err(|s| GossipsubConfigSnafu { message: s }.build())?; + + // - Build a gossipsub network behavior + let gossipsub: Gossipsub = Gossipsub::new( + // TODO do we even need this? + // + // if messages are signed at the the consensus level AND the network + // level (noise), this feels redundant. + MessageAuthenticity::Signed(identity.clone()), + gossipsub_config, + ) + .map_err(|s| GossipsubBuildSnafu { message: s }.build())?; + + // Build a identify network behavior needed for own + // node connection information + // E.g. this will answer the question: how are other nodes + // seeing the peer from behind a NAT + let identify_cfg = + IdentifyConfig::new("HotShot/identify/1.0".to_string(), identity.public()); + let identify = IdentifyBehaviour::new(identify_cfg); + + // - Build DHT needed for peer discovery + let mut kconfig = KademliaConfig::default(); + // 8 hours by default + let record_republication_interval = config + .republication_interval + .unwrap_or(Duration::from_secs(28800)); + let ttl = Some(config.ttl.unwrap_or(16 * record_republication_interval)); + kconfig + .set_parallelism(NonZeroUsize::new(1).unwrap()) + .set_provider_publication_interval(Some(record_republication_interval)) + .set_publication_interval(Some(record_republication_interval)) + .set_record_ttl(ttl); + + if let Some(factor) = config.replication_factor { + kconfig.set_replication_factor(factor); + } + + let kadem = Kademlia::with_config(peer_id, MemoryStore::new(peer_id), kconfig); + + let rrconfig = RequestResponseConfig::default(); + + let request_response = RequestResponse::new( + [(DirectMessageProtocol(), ProtocolSupport::Full)].into_iter(), + rrconfig, + ); + + let network = NetworkDef::new( + GossipBehaviour::new(gossipsub), + DHTBehaviour::new( + kadem, + peer_id, + config + .replication_factor + .unwrap_or_else(|| NonZeroUsize::new(4).unwrap()), + ), + identify, + DMBehaviour::new(request_response), + ); + let executor = Box::new(|fut| { + async_spawn(fut); + }); + + SwarmBuilder::with_executor(transport, network, peer_id, executor) + .dial_concurrency_factor(std::num::NonZeroU8::new(1).unwrap()) + .build() + }; + for (peer, addr) in &config.to_connect_addrs { + if let Some(peer) = peer { + if peer != swarm.local_peer_id() { + swarm.behaviour_mut().add_address(peer, addr.clone()); + } + } + } + + Ok(Self { + identity, + peer_id, + swarm, + config, + }) + } + + /// event handler for client events + /// currectly supported actions include + /// - shutting down the swarm + /// - gossipping a message to known peers on the `global` topic + /// - returning the id of the current peer + /// - subscribing to a topic + /// - unsubscribing from a toipc + /// - direct messaging a peer + #[instrument(skip(self))] + async fn handle_client_requests( + &mut self, + msg: Result, + ) -> Result { + let behaviour = self.swarm.behaviour_mut(); + match msg { + Ok(msg) => { + match msg { + ClientRequest::BeginBootstrap => { + self.swarm.behaviour_mut().dht.begin_bootstrap(); + } + ClientRequest::LookupPeer(pid, chan) => { + self.swarm.behaviour_mut().dht.lookup_peer(pid, chan); + } + ClientRequest::GetRoutingTable(chan) => { + self.swarm.behaviour_mut().dht.print_routing_table(); + if chan.send(()).is_err() { + warn!("Tried to notify client but client not tracking anymore"); + } + } + ClientRequest::PutDHT { key, value, notify } => { + let query = KadPutQuery { + progress: DHTProgress::NotStarted, + notify, + key, + value, + backoff: ExponentialBackoff::default(), + }; + self.swarm.behaviour_mut().put_record(query); + } + ClientRequest::GetConnectedPeerNum(s) => { + if s.send(self.num_connected()).is_err() { + error!("error sending peer number to client"); + } + } + ClientRequest::GetConnectedPeers(s) => { + if s.send(self.connected_pids()).is_err() { + error!("error sending peer set to client"); + } + } + ClientRequest::GetDHT { + key, + notify, + retry_count, + } => { + self.swarm.behaviour_mut().get_record( + key, + notify, + NonZeroUsize::new(NUM_REPLICATED_TO_TRUST).unwrap(), + retry_count, + ); + } + ClientRequest::IgnorePeers(_peers) => { + // NOTE used by test with conductor only + } + ClientRequest::Shutdown => { + warn!("Libp2p listener shutting down"); + return Ok(true); + } + ClientRequest::GossipMsg(topic, contents) => { + behaviour.publish_gossip(Topic::new(topic), contents); + } + ClientRequest::Subscribe(t, chan) => { + behaviour.subscribe_gossip(&t); + if let Some(chan) = chan { + if chan.send(()).is_err() { + error!("finished subscribing but response channel dropped"); + } + } + } + ClientRequest::Unsubscribe(t, chan) => { + behaviour.unsubscribe_gossip(&t); + if let Some(chan) = chan { + if chan.send(()).is_err() { + error!("finished unsubscribing but response channel dropped"); + } + } + } + ClientRequest::DirectRequest { + pid, + contents, + retry_count, + } => { + info!("pid {:?} adding direct request", self.peer_id); + behaviour.add_direct_request(pid, contents, retry_count); + } + ClientRequest::DirectResponse(chan, msg) => { + behaviour.add_direct_response(chan, msg); + } + ClientRequest::AddKnownPeers(peers) => { + self.add_known_peers(&peers); + } + ClientRequest::Prune(pid) => { + if self.swarm.disconnect_peer_id(pid).is_err() { + error!( + "Peer {:?} could not disconnect from pid {:?}", + self.peer_id, pid + ); + } + } + } + } + Err(e) => { + error!("Error receiving msg in main behaviour loop: {:?}", e); + } + } + Ok(false) + } + + /// event handler for events emited from the swarm + #[allow(clippy::type_complexity)] + #[instrument(skip(self))] + async fn handle_swarm_events( + &mut self, + event: SwarmEvent< + NetworkEventInternal, + Either, Error>, void::Void>, + >, + send_to_client: &UnboundedSender, + ) -> Result<(), NetworkError> { + // Make the match cleaner + info!("event observed {:?}", event); + + #[allow(deprecated)] + match event { + SwarmEvent::ConnectionEstablished { + connection_id: _, + peer_id, + endpoint, + num_established, + concurrent_dial_errors, + established_in: _established_in, + } => { + if num_established > ESTABLISHED_LIMIT { + error!( + "Num concurrent connections to a single peer exceeding {:?} at {:?}!", + ESTABLISHED_LIMIT, num_established + ); + } else { + info!("peerid {:?} connection is established to {:?} with endpoint {:?} with concurrent dial errors {:?}. {:?} connections left", self.peer_id, peer_id, endpoint, concurrent_dial_errors, num_established); + } + } + SwarmEvent::ConnectionClosed { + connection_id: _, + peer_id, + endpoint, + num_established, + cause, + } => { + if num_established > ESTABLISHED_LIMIT_UNWR { + error!( + "Num concurrent connections to a single peer exceeding {:?} at {:?}!", + ESTABLISHED_LIMIT, num_established + ); + } else { + info!("peerid {:?} connection is closed to {:?} with endpoint {:?}. {:?} connections left. Cause: {:?}", self.peer_id, peer_id, endpoint, num_established, cause); + } + } + SwarmEvent::Dialing { + peer_id, + connection_id: _, + } => { + info!("{:?} is dialing {:?}", self.peer_id, peer_id); + } + SwarmEvent::ListenerClosed { + listener_id: _, + addresses: _, + reason: _, + } + | SwarmEvent::NewListenAddr { + listener_id: _, + address: _, + } + | SwarmEvent::ExpiredListenAddr { + listener_id: _, + address: _, + } + | SwarmEvent::IncomingConnection { + connection_id: _, + local_addr: _, + send_back_addr: _, + } => {} + SwarmEvent::Behaviour(b) => { + let maybe_event = match b { + NetworkEventInternal::DHTEvent(e) => match e { + DHTEvent::IsBootstrapped => Some(NetworkEvent::IsBootstrapped), + }, + NetworkEventInternal::IdentifyEvent(e) => { + // NOTE feed identified peers into kademlia's routing table for peer discovery. + if let IdentifyEvent::Received { + peer_id, + info: + IdentifyInfo { + listen_addrs, + protocols: _, + public_key: _, + protocol_version: _, + agent_version: _, + observed_addr, + }, + } = *e + { + let behaviour = self.swarm.behaviour_mut(); + // NOTE in practice, we will want to NOT include this. E.g. only DNS/non localhost IPs + // NOTE I manually checked and peer_id corresponds to listen_addrs. + // NOTE Once we've tested on DNS addresses, this should be swapped out to play nicely + // with autonat + info!( + "local peer {:?} IDENTIFY ADDRS LISTEN: {:?} for peer {:?}, ADDRS OBSERVED: {:?} ", + behaviour.dht.peer_id, peer_id, listen_addrs, observed_addr + ); + // into hashset to delete duplicates (I checked: there are duplicates) + for addr in listen_addrs.iter().collect::>() { + behaviour.dht.add_address(&peer_id, addr.clone()); + } + } + None + } + NetworkEventInternal::GossipEvent(e) => match e { + GossipEvent::GossipMsg(data, topic) => { + Some(NetworkEvent::GossipMsg(data, topic)) + } + }, + NetworkEventInternal::DMEvent(e) => Some(match e { + DMEvent::DirectRequest(data, pid, chan) => { + NetworkEvent::DirectRequest(data, pid, chan) + } + DMEvent::DirectResponse(data, pid) => { + NetworkEvent::DirectResponse(data, pid) + } + }), + }; + + if let Some(event) = maybe_event { + // forward messages directly to Client + send_to_client + .send(event) + .await + .map_err(|_e| NetworkError::StreamClosed)?; + } + } + SwarmEvent::OutgoingConnectionError { + connection_id: _, + peer_id: _, + error, + } => { + info!(?error, "OUTGOING CONNECTION ERROR, {:?}", error); + } + SwarmEvent::IncomingConnectionError { + connection_id: _, + local_addr, + send_back_addr, + error, + } => { + info!( + "INCOMING CONNECTION ERROR: {:?} {:?} {:?}", + local_addr, send_back_addr, error + ); + } + SwarmEvent::ListenerError { listener_id, error } => { + info!("LISTENER ERROR {:?} {:?}", listener_id, error); + } + } + Ok(()) + } + + /// Spawn a task to listen for requests on the returned channel + /// as well as any events produced by libp2p + #[instrument] + pub async fn spawn_listeners( + mut self, + ) -> Result< + ( + UnboundedSender, + UnboundedReceiver, + ), + NetworkError, + > { + let (s_input, s_output) = unbounded::(); + let (r_input, r_output) = unbounded::(); + + async_spawn( + async move { + let mut fuse = s_output.recv().boxed().fuse(); + loop { + select! { + event = self.swarm.next() => { + debug!("peerid {:?}\t\thandling maybe event {:?}", self.peer_id, event); + if let Some(event) = event { + info!("peerid {:?}\t\thandling event {:?}", self.peer_id, event); + self.handle_swarm_events(event, &r_input).await?; + } + }, + msg = fuse => { + debug!("peerid {:?}\t\thandling msg {:?}", self.peer_id, msg); + let shutdown = self.handle_client_requests(msg).await?; + if shutdown { + break + } + fuse = s_output.recv().boxed().fuse(); + } + } + } + Ok::<(), NetworkError>(()) + } + .instrument(info_span!("Libp2p NetworkBehaviour Handler")), + ); + Ok((s_input, r_output)) + } + + /// Get a reference to the network node's peer id. + pub fn peer_id(&self) -> PeerId { + self.peer_id + } +} diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs new file mode 100644 index 0000000000..4d19b6f516 --- /dev/null +++ b/libp2p-networking/src/network/node/config.rs @@ -0,0 +1,65 @@ +use crate::network::NetworkNodeType; +use libp2p::{identity::Keypair, Multiaddr}; +use libp2p_identity::PeerId; +use std::{collections::HashSet, num::NonZeroUsize, time::Duration}; + +/// replication factor for kademlia +pub const DEFAULT_REPLICATION_FACTOR: Option = NonZeroUsize::new(20); + +/// describe the configuration of the network +#[derive(Clone, Default, derive_builder::Builder, custom_debug::Debug)] +pub struct NetworkNodeConfig { + #[builder(default)] + /// The type of node (bootstrap etc) + pub node_type: NetworkNodeType, + /// optional identity + #[builder(setter(into, strip_option), default)] + #[debug(skip)] + pub identity: Option, + /// address to bind to + #[builder(default)] + pub bound_addr: Option, + /// replication factor for entries in the DHT + /// default is [`libp2p::kad::K_VALUE`] which is 20 + #[builder(setter(into, strip_option), default = "DEFAULT_REPLICATION_FACTOR")] + pub replication_factor: Option, + + #[builder(default)] + /// parameters for gossipsub mesh network + pub mesh_params: Option, + + /// list of addresses to connect to at initialization + pub to_connect_addrs: HashSet<(Option, Multiaddr)>, + /// republication interval in DHT, must be much less than `ttl` + #[builder(default)] + pub republication_interval: Option, + /// expiratiry for records in DHT + #[builder(default)] + pub ttl: Option, +} + +/// NOTE: `mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high` +/// NOTE: `mesh_outbound_min <= self.config.mesh_n / 2` +/// parameters fed into gossipsub controlling the structure of the mesh +#[derive(Clone, Debug)] +pub struct MeshParams { + /// mesh_n_high from gossipsub + pub mesh_n_high: usize, + /// mesh_n_low from gossipsub + pub mesh_n_low: usize, + /// mesh_outbound_min from gossipsub + pub mesh_outbound_min: usize, + /// mesh_n from gossipsub + pub mesh_n: usize, +} + +impl Default for MeshParams { + fn default() -> Self { + Self { + mesh_n_high: 15, + mesh_n_low: 8, + mesh_outbound_min: 4, + mesh_n: 12, + } + } +} diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs new file mode 100644 index 0000000000..164a2e5a78 --- /dev/null +++ b/libp2p-networking/src/network/node/handle.rs @@ -0,0 +1,681 @@ +use crate::network::{ + behaviours::direct_message_codec::DirectMessageResponse, error::DHTError, gen_multiaddr, + ClientRequest, NetworkError, NetworkEvent, NetworkNode, NetworkNodeConfig, + NetworkNodeConfigBuilderError, +}; +use async_compatibility_layer::{ + art::{async_sleep, async_spawn, async_timeout, future::to, stream}, + async_primitives::subscribable_mutex::SubscribableMutex, + channel::{ + bounded, oneshot, OneShotReceiver, OneShotSender, Receiver, SendError, Sender, + UnboundedReceiver, UnboundedRecvError, UnboundedSender, + }, +}; +use async_lock::Mutex; +use bincode::Options; +use futures::{stream::FuturesOrdered, Future, FutureExt}; +use hotshot_utils::bincode::bincode_opts; +use libp2p::{request_response::ResponseChannel, Multiaddr}; +use libp2p_identity::PeerId; +use serde::{Deserialize, Serialize}; +use snafu::{ResultExt, Snafu}; +use std::{ + collections::HashSet, + fmt::Debug, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; +use tracing::{debug, info, instrument}; + +/// A handle containing: +/// - A reference to the state +/// - Controls for the swarm +#[derive(Debug)] +pub struct NetworkNodeHandle { + /// network configuration + network_config: NetworkNodeConfig, + /// the state of the replica + state: Arc>, + /// send an action to the networkbehaviour + send_network: UnboundedSender, + + /// the local address we're listening on + listen_addr: Multiaddr, + /// the peer id of the networkbehaviour + peer_id: PeerId, + /// human readable id + id: usize, + + /// A list of webui listeners that are listening for changes on this node + webui_listeners: Arc>>>, + + receiver: NetworkNodeReceiver, +} + +/// internal network node receiver +#[derive(Debug)] +pub struct NetworkNodeReceiver { + /// whether or not the receiver is started + receiver_spawned: AtomicBool, + + /// whether or not the handle has been killed + killed: AtomicBool, + + /// the receiver + receiver: Mutex>, + + ///kill switch + recv_kill: Mutex>>, + + /// kill the event handler for events from the swarm + kill_switch: Mutex>>, +} + +impl NetworkNodeReceiver { + pub async fn recv(&self) -> Result { + if self.killed.load(Ordering::Relaxed) { + return Err(NetworkNodeHandleError::Killed); + } + let lock = self.receiver.lock().await; + lock.recv().await.context(ReceiverEndedSnafu) + } +} + +impl NetworkNodeHandle { + /// constructs a new node listening on `known_addr` + #[instrument] + pub async fn new(config: NetworkNodeConfig, id: usize) -> Result { + //`randomly assigned port + let listen_addr = config + .bound_addr + .clone() + .unwrap_or_else(|| gen_multiaddr(0)); + let mut network = NetworkNode::new(config.clone()) + .await + .context(NetworkSnafu)?; + + let peer_id = network.peer_id(); + let listen_addr = network + .start_listen(listen_addr) + .await + .context(NetworkSnafu)?; + info!("LISTEN ADDRESS IS {:?}", listen_addr); + // pin here to force the future onto the heap since it can be large + // in the case of flume + let (send_chan, recv_chan) = Box::pin(network.spawn_listeners()) + .await + .context(NetworkSnafu)?; + let (kill_switch, recv_kill) = oneshot(); + + let kill_switch = Mutex::new(Some(kill_switch)); + let recv_kill = Mutex::new(Some(recv_kill)); + + Ok(NetworkNodeHandle { + network_config: config, + state: std::sync::Arc::default(), + send_network: send_chan, + listen_addr, + peer_id, + id, + webui_listeners: Arc::default(), + receiver: NetworkNodeReceiver { + kill_switch, + killed: AtomicBool::new(false), + receiver: Mutex::new(recv_chan), + recv_kill, + receiver_spawned: AtomicBool::new(false), + }, + }) + } + + /// Spawn a handler `F` that will be notified every time a new [`NetworkEvent`] arrives. + /// + /// # Panics + /// + /// Will panic if a handler is already spawned + pub async fn spawn_handler(self: &Arc, cb: F) -> impl Future + where + F: Fn(NetworkEvent, Arc>) -> RET + Sync + Send + 'static, + RET: Future> + Send + 'static, + S: Send + 'static, + { + assert!( + !self.receiver.receiver_spawned.swap(true, Ordering::Relaxed), + "Handler is already spawned, this is a bug" + ); + + let handle = Arc::clone(self); + async_spawn(async move { + let receiver = handle.receiver.receiver.lock().await; + let Some(kill_switch) = handle.receiver.recv_kill.lock().await.take() else { + tracing::error!( + "`spawn_handle` was called on a network handle that was already closed" + ); + return; + }; + let mut next_msg = receiver.recv().boxed(); + let mut kill_switch = kill_switch.recv().boxed(); + loop { + match futures::future::select(next_msg, kill_switch).await { + futures::future::Either::Left((incoming_message, other_stream)) => { + let incoming_message = match incoming_message { + Ok(msg) => msg, + Err(e) => { + tracing::warn!(?e, "NetworkNodeHandle::spawn_handle was unable to receive more messages"); + return; + } + }; + if let Err(e) = cb(incoming_message, handle.clone()).await { + tracing::error!( + ?e, + "NetworkNodeHandle::spawn_handle returned an error" + ); + return; + } + + // re-set the `kill_switch` for the next loop + kill_switch = other_stream; + // re-set `receiver.recv()` for the next loop + next_msg = receiver.recv().boxed(); + } + futures::future::Either::Right(_) => { + // killed + handle.receiver.killed.store(true, Ordering::Relaxed); + return; + } + } + } + }).map(|_| ()) + } + + /// Wait until at least `num_peers` have connected, or until `timeout` time has passed. + /// + /// # Errors + /// + /// Will return any networking error encountered, or `ConnectTimeout` if the `timeout` has elapsed. + pub async fn wait_to_connect( + &self, + num_peers: usize, + node_id: usize, + timeout: Duration, + ) -> Result<(), NetworkNodeHandleError> + where + S: Default + Debug, + { + let start = Instant::now(); + self.begin_bootstrap().await?; + let mut connected_ok = false; + while !connected_ok { + if start.elapsed() >= timeout { + return Err(NetworkNodeHandleError::ConnectTimeout); + } + async_sleep(Duration::from_secs(1)).await; + let num_connected = self.num_connected().await?; + info!( + "WAITING TO CONNECT, connected to {} / {} peers ON NODE {}", + num_connected, num_peers, node_id + ); + connected_ok = num_connected >= num_peers; + } + Ok(()) + } + + /// Receives a reference of the internal `NetworkNodeReceiver`, which can be used to query for incoming messages. + pub fn receiver(&self) -> &NetworkNodeReceiver { + &self.receiver + } + + /// Cleanly shuts down a swarm node + /// This is done by sending a message to + /// the swarm event handler to stop handling events + /// and a message to the swarm itself to spin down + #[instrument] + pub async fn shutdown(&self) -> Result<(), NetworkNodeHandleError> { + self.send_request(ClientRequest::Shutdown).await?; + // if this fails, the thread has already been killed. + if let Some(kill_switch) = self.receiver.kill_switch.lock().await.take() { + kill_switch.send(()); + } else { + tracing::warn!("The network node handle is shutting down, but the kill switch was already consumed"); + } + Ok(()) + } + + /// Notify the network to begin the bootstrap process + /// # Errors + /// If unable to send via `send_network`. This should only happen + /// if the network is shut down. + pub async fn begin_bootstrap(&self) -> Result<(), NetworkNodeHandleError> { + let req = ClientRequest::BeginBootstrap; + self.send_request(req).await + } + + /// Get a reference to the network node handle's listen addr. + pub fn listen_addr(&self) -> Multiaddr { + self.listen_addr.clone() + } +} + +impl NetworkNodeHandle { + /// Print out the routing table used by kademlia + /// NOTE: only for debugging purposes currently + /// # Errors + /// if the client has stopped listening for a response + pub async fn print_routing_table(&self) -> Result<(), NetworkNodeHandleError> { + let (s, r) = futures::channel::oneshot::channel(); + let req = ClientRequest::GetRoutingTable(s); + self.send_request(req).await?; + r.await.map_err(|_| NetworkNodeHandleError::RecvError) + } + + /// Look up a peer's addresses in kademlia + /// NOTE: this should always be called before any `request_response` is initiated + /// # Errors + /// if the client has stopped listening for a response + pub async fn lookup_pid(&self, peer_id: PeerId) -> Result<(), NetworkNodeHandleError> { + let (s, r) = futures::channel::oneshot::channel(); + let req = ClientRequest::LookupPeer(peer_id, s); + self.send_request(req).await?; + r.await.map_err(|_| NetworkNodeHandleError::RecvError) + } + + /// Insert a record into the kademlia DHT + /// # Errors + /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT + /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key or value + pub async fn put_record( + &self, + key: &impl Serialize, + value: &impl Serialize, + ) -> Result<(), NetworkNodeHandleError> { + use crate::network::error::CancelledRequestSnafu; + + let (s, r) = futures::channel::oneshot::channel(); + let req = ClientRequest::PutDHT { + key: bincode_opts().serialize(key).context(SerializationSnafu)?, + value: bincode_opts() + .serialize(value) + .context(SerializationSnafu)?, + notify: s, + }; + + self.send_request(req).await?; + + r.await.context(CancelledRequestSnafu).context(DHTSnafu) + } + + /// Receive a record from the kademlia DHT if it exists. + /// Must be replicated on at least 2 nodes + /// # Errors + /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT + /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key + /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value + pub async fn get_record Deserialize<'a>>( + &self, + key: &impl Serialize, + retry_count: u8, + ) -> Result { + use crate::network::error::CancelledRequestSnafu; + + let (s, r) = futures::channel::oneshot::channel(); + let req = ClientRequest::GetDHT { + key: bincode_opts().serialize(key).context(SerializationSnafu)?, + notify: s, + retry_count, + }; + self.send_request(req).await?; + + match r.await.context(CancelledRequestSnafu) { + Ok(result) => bincode_opts() + .deserialize(&result) + .context(DeserializationSnafu), + Err(e) => Err(e).context(DHTSnafu), + } + } + + /// Get a record from the kademlia DHT with a timeout + /// # Errors + /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT + /// - Will return [`NetworkNodeHandleError::TimeoutError`] when times out + /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key + /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value + pub async fn get_record_timeout Deserialize<'a>>( + &self, + key: &impl Serialize, + timeout: Duration, + ) -> Result { + let result = async_timeout(timeout, self.get_record(key, 1)).await; + match result { + Err(e) => Err(e).context(TimeoutSnafu), + Ok(r) => r, + } + } + + /// Insert a record into the kademlia DHT with a timeout + /// # Errors + /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT + /// - Will return [`NetworkNodeHandleError::TimeoutError`] when times out + /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key or value + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + pub async fn put_record_timeout( + &self, + key: &impl Serialize, + value: &impl Serialize, + timeout: Duration, + ) -> Result<(), NetworkNodeHandleError> { + let result = async_timeout(timeout, self.put_record(key, value)).await; + match result { + Err(e) => Err(e).context(TimeoutSnafu), + Ok(r) => r, + } + } + + /// Notify the webui that either the `state` or `connection_state` has changed. + /// + /// If the webui is not started, this will do nothing. + /// # Errors + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + pub async fn notify_webui(&self) { + let mut lock = self.webui_listeners.lock().await; + // Keep a list of indexes that are unable to send the update + let mut indexes_to_remove = Vec::new(); + for (idx, sender) in lock.iter().enumerate() { + if sender.send(()).await.is_err() { + indexes_to_remove.push(idx); + } + } + // Make sure to remove the indexes in reverse other, else removing an index will invalidate the following indexes. + for idx in indexes_to_remove.into_iter().rev() { + lock.remove(idx); + } + } + + /// Subscribe to a topic + /// # Errors + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + pub async fn subscribe(&self, topic: String) -> Result<(), NetworkNodeHandleError> { + let (s, r) = futures::channel::oneshot::channel(); + let req = ClientRequest::Subscribe(topic, Some(s)); + self.send_request(req).await?; + r.await.map_err(|_| NetworkNodeHandleError::RecvError) + } + + /// Unsubscribe from a topic + /// # Errors + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + pub async fn unsubscribe(&self, topic: String) -> Result<(), NetworkNodeHandleError> { + let (s, r) = futures::channel::oneshot::channel(); + let req = ClientRequest::Unsubscribe(topic, Some(s)); + self.send_request(req).await?; + r.await.map_err(|_| NetworkNodeHandleError::RecvError) + } + + /// Ignore `peers` when pruning + /// e.g. maintain their connection + /// # Errors + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + pub async fn ignore_peers(&self, peers: Vec) -> Result<(), NetworkNodeHandleError> { + let req = ClientRequest::IgnorePeers(peers); + self.send_request(req).await + } + + /// Make a direct request to `peer_id` containing `msg` + /// # Errors + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` + pub async fn direct_request( + &self, + pid: PeerId, + msg: &impl Serialize, + ) -> Result<(), NetworkNodeHandleError> { + let serialized_msg = bincode_opts().serialize(msg).context(SerializationSnafu)?; + let req = ClientRequest::DirectRequest { + pid, + contents: serialized_msg, + retry_count: 1, + }; + self.send_request(req).await + } + + /// Reply with `msg` to a request over `chan` + /// # Errors + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` + pub async fn direct_response( + &self, + chan: ResponseChannel, + msg: &impl Serialize, + ) -> Result<(), NetworkNodeHandleError> { + let serialized_msg = bincode_opts().serialize(msg).context(SerializationSnafu)?; + let req = ClientRequest::DirectResponse(chan, serialized_msg); + self.send_request(req).await + } + + /// Forcefully disconnet from a peer + /// # Errors + /// If the channel is closed somehow + /// Shouldnt' happen. + /// # Panics + /// If channel errors out + /// shouldn't happen. + pub async fn prune_peer(&self, pid: PeerId) -> Result<(), NetworkNodeHandleError> { + let req = ClientRequest::Prune(pid); + self.send_request(req).await + } + + /// Gossip a message to peers + /// # Errors + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` + pub async fn gossip( + &self, + topic: String, + msg: &impl Serialize, + ) -> Result<(), NetworkNodeHandleError> { + let serialized_msg = bincode_opts().serialize(msg).context(SerializationSnafu)?; + let req = ClientRequest::GossipMsg(topic, serialized_msg); + self.send_request(req).await + } + + /// Tell libp2p about known network nodes + /// # Errors + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + pub async fn add_known_peers( + &self, + known_peers: Vec<(Option, Multiaddr)>, + ) -> Result<(), NetworkNodeHandleError> { + info!("ADDING KNOWN PEERS TO {:?}", self.peer_id); + let req = ClientRequest::AddKnownPeers(known_peers); + self.send_request(req).await + } + + /// Send a client request to the network + /// + /// # Errors + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + async fn send_request(&self, req: ClientRequest) -> Result<(), NetworkNodeHandleError> { + debug!("peerid {:?}\t\tsending message {:?}", self.peer_id, req); + self.send_network + .send(req) + .await + .map_err(|_| NetworkNodeHandleError::SendError)?; + Ok(()) + } + + /// Returns number of peers this node is connected to + /// # Errors + /// If the channel is closed somehow + /// Shouldnt' happen. + /// # Panics + /// If channel errors out + /// shouldn't happen. + pub async fn num_connected(&self) -> Result { + let (s, r) = futures::channel::oneshot::channel(); + let req = ClientRequest::GetConnectedPeerNum(s); + self.send_request(req).await?; + Ok(r.await.unwrap()) + } + + /// return hashset of PIDs this node is connected to + /// # Errors + /// If the channel is closed somehow + /// Shouldnt' happen. + /// # Panics + /// If channel errors out + /// shouldn't happen. + pub async fn connected_pids(&self) -> Result, NetworkNodeHandleError> { + let (s, r) = futures::channel::oneshot::channel(); + let req = ClientRequest::GetConnectedPeers(s); + self.send_request(req).await?; + Ok(r.await.unwrap()) + } + + /// Get a reference to the network node handle's id. + pub fn id(&self) -> usize { + self.id + } + + /// Get a reference to the network node handle's peer id. + pub fn peer_id(&self) -> PeerId { + self.peer_id + } + + /// Return a reference to the network config + pub fn config(&self) -> &NetworkNodeConfig { + &self.network_config + } + + /// Modify the state. This will automatically call `state_changed` and `notify_webui` + pub async fn modify_state(&self, cb: F) + where + F: FnMut(&mut S), + { + self.state.modify(cb).await; + } + + /// Returns `true` if the network state is killed + pub fn is_killed(&self) -> bool { + self.receiver.killed.load(Ordering::Relaxed) + } + + /// Register a webui listener + pub async fn register_webui_listener(&self) -> Receiver<()> { + let (sender, receiver) = bounded(100); + let mut lock = self.webui_listeners.lock().await; + lock.push(sender); + receiver + } + + /// Call `wait_timeout_until` on the state's [`SubscribableMutex`] + /// # Errors + /// Will throw a [`NetworkNodeHandleError::TimeoutError`] error upon timeout + pub async fn state_wait_timeout_until( + &self, + timeout: Duration, + f: F, + ) -> Result<(), NetworkNodeHandleError> + where + F: FnMut(&S) -> bool, + { + self.state + .wait_timeout_until(timeout, f) + .await + .context(TimeoutSnafu) + } + + /// Call `wait_timeout_until_with_trigger` on the state's [`SubscribableMutex`] + pub fn state_wait_timeout_until_with_trigger<'a, F>( + &'a self, + timeout: Duration, + f: F, + ) -> stream::to::Timeout + 'a>> + where + F: FnMut(&S) -> bool + 'a, + { + self.state.wait_timeout_until_with_trigger(timeout, f) + } + + /// Call `wait_until` on the state's [`SubscribableMutex`] + /// # Errors + /// Will throw a [`NetworkNodeHandleError::TimeoutError`] error upon timeout + pub async fn state_wait_until(&self, f: F) -> Result<(), NetworkNodeHandleError> + where + F: FnMut(&S) -> bool, + { + self.state.wait_until(f).await; + Ok(()) + } +} + +impl NetworkNodeHandle { + /// Get a clone of the internal state + pub async fn state(&self) -> S { + self.state.cloned().await + } +} + +/// Error wrapper type for interacting with swarm handle +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum NetworkNodeHandleError { + /// Error generating network + NetworkError { + /// source of error + source: NetworkError, + }, + /// Failure to serialize a message + SerializationError { + /// source of error + source: Box, + }, + /// Failure to deserialize a message + DeserializationError { + /// source of error + source: Box, + }, + /// Error sending request to network + SendError, + /// Error receiving message from network + RecvError, + /// Error building Node config + NodeConfigError { + /// source of error + source: NetworkNodeConfigBuilderError, + }, + /// Error waiting for connections + TimeoutError { + /// source of error + source: to::TimeoutError, + }, + /// Could not connect to the network in time + ConnectTimeout, + /// Error in the kademlia DHT + DHTError { + /// source of error + source: DHTError, + }, + /// The inner [`NetworkNode`] has already been killed + CantKillTwice { + /// dummy source + source: SendError<()>, + }, + /// The network node has been killed + Killed, + /// The receiver was unable to receive a new message + ReceiverEnded { + /// source of error + source: UnboundedRecvError, + }, + /// no known topic matches the hashset of keys + NoSuchTopic, +} + +/// Re-exports of the snafu errors that [`NetworkNodeHandleError`] can throw +pub mod network_node_handle_error { + pub use super::{ + NetworkSnafu, NodeConfigSnafu, RecvSnafu, SendSnafu, SerializationSnafu, TimeoutSnafu, + }; +} diff --git a/libp2p-networking/test.py b/libp2p-networking/test.py new file mode 100755 index 0000000000..66368b297e --- /dev/null +++ b/libp2p-networking/test.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 + +from enum import Enum +from functools import reduce +from typing import Final +from subprocess import run, Popen +from time import sleep +from os import environ + +class NodeType(Enum): + CONDUCTOR = "Conductor" + REGULAR = "Regular" + BOOTSTRAP = "Bootstrap" + +def gen_invocation( + node_type: NodeType, + num_nodes: int, + to_connect_addrs: list[str], + conductor_addr: str, + num_rounds: int, + bound_addr: str, + ) -> tuple[list[str], str]: + aggr_list = lambda x, y: f'{x},{y}' + to_connect_list : Final[str] = reduce(aggr_list, to_connect_addrs); + out_file_name : Final[str] = f'out_{node_type}_{bound_addr[-4:]}'; + fmt_cmd = [ + f'cargo run --no-default-features --features=async-std-executor --example=counter --profile=release-lto -- ' \ + f' --bound_addr={bound_addr} '\ + f' --node_type={node_type.value} '\ + f' --num_nodes={num_nodes} '\ + f' --num_gossip={num_rounds} '\ + f' --to_connect_addrs={to_connect_list} '\ + f' --conductor_addr={conductor_addr} ']; + return (fmt_cmd, out_file_name) + +# construct a map: + +if __name__ == "__main__": + # cleanup + + run("rm -f out_*".split()) + + + # params + START_PORT : Final[int] = 9100; + NUM_REGULAR_NODES : Final[int] = 100; + NUM_NODES_PER_BOOTSTRAP : Final[int] = 10; + NUM_BOOTSTRAP : Final[int] = (int) (NUM_REGULAR_NODES / NUM_NODES_PER_BOOTSTRAP); + TOTAL_NUM_NODES: Final[int] = NUM_BOOTSTRAP + NUM_REGULAR_NODES + 1; + NUM_ROUNDS = 100; + + bootstrap_addrs : Final[list[str]] = list(map(lambda x: f'127.0.0.1:{x + START_PORT}', range(0, NUM_BOOTSTRAP))); + normal_nodes_addrs : Final[list[str]] = list(map(lambda x: f'127.0.0.1:{x + START_PORT + NUM_BOOTSTRAP}', range(0, NUM_REGULAR_NODES))); + conductor_addr : str = f'127.0.0.1:{START_PORT + NUM_BOOTSTRAP + NUM_REGULAR_NODES + 1}'; + + regular_cmds : list[tuple[list[str], str]] = []; + bootstrap_cmds : list[tuple[list[str], str]] = []; + print("doing conductor") + conductor_cmd : Final[tuple[list[str], str]] = \ + gen_invocation( + node_type=NodeType.CONDUCTOR, + num_nodes=TOTAL_NUM_NODES, + to_connect_addrs=bootstrap_addrs + normal_nodes_addrs,# + normal_nodes_addrs + [conductor_addr], + conductor_addr=conductor_addr, + num_rounds=NUM_ROUNDS, + bound_addr=conductor_addr + ); + print("dfone concuctor") + + for i in range(0, len(bootstrap_addrs)): + bootstrap_addr = bootstrap_addrs[i]; + regulars_list = normal_nodes_addrs[i * NUM_NODES_PER_BOOTSTRAP: (i + 1) * NUM_NODES_PER_BOOTSTRAP]; + + bootstrap_cmd = gen_invocation( + node_type=NodeType.BOOTSTRAP, + num_nodes=TOTAL_NUM_NODES, + to_connect_addrs=bootstrap_addrs, + conductor_addr=conductor_addr, + num_rounds=NUM_ROUNDS, + bound_addr=bootstrap_addr, + ); + bootstrap_cmds.append(bootstrap_cmd); + + for regular_addr in regulars_list: + regular_cmd = gen_invocation( + node_type=NodeType.REGULAR, + num_nodes=TOTAL_NUM_NODES, + # NOTE may need to remove regular_addr from regulars_list + to_connect_addrs= [bootstrap_addr], + num_rounds=NUM_ROUNDS, + bound_addr=regular_addr, + conductor_addr=conductor_addr + ); + regular_cmds.append(regular_cmd); + + print(regular_cmds) + + TIME_TO_SPIN_UP_BOOTSTRAP : Final[int] = 0; + TIME_TO_SPIN_UP_REGULAR : Final[int] = 0; + env = environ.copy(); + env["RUST_BACKTRACE"] = "full" + + print("spinning up bootstrap") + for (node_cmd, file_name) in bootstrap_cmds: + print("running bootstrap", file_name) + file = open(file_name, 'w') + Popen(node_cmd[0].split(), start_new_session=True, stdout=file, stderr=file, env=env); + + sleep(TIME_TO_SPIN_UP_BOOTSTRAP); + + print("spinning up regulars") + for (node_cmd, file_name) in regular_cmds: + file = open(file_name, 'w') + Popen(node_cmd[0].split(), start_new_session=True, stdout=file, stderr=file, env=env); + + sleep(TIME_TO_SPIN_UP_REGULAR); + + file = open(conductor_cmd[1], 'w') + print("spinning up conductor") + Popen(conductor_cmd[0][0].split(), start_new_session=True, stdout=file, stderr=file, env=env); + diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs new file mode 100644 index 0000000000..ca9e967319 --- /dev/null +++ b/libp2p-networking/tests/common/mod.rs @@ -0,0 +1,264 @@ +use async_compatibility_layer::{ + art::async_sleep, + channel::RecvError, + logging::{setup_backtrace, setup_logging}, +}; +use futures::{future::join_all, Future, FutureExt}; +use libp2p::{identity::Keypair, Multiaddr}; +use libp2p_identity::PeerId; +use libp2p_networking::network::{ + network_node_handle_error::NodeConfigSnafu, NetworkEvent, NetworkNodeConfigBuilder, + NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeType, +}; +use snafu::{ResultExt, Snafu}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + num::NonZeroUsize, + str::FromStr, + sync::Arc, + time::Duration, +}; +use tracing::{info, instrument, warn}; + +/// General function to spin up testing infra +/// perform tests by calling `run_test` +/// then cleans up tests +/// # Panics +/// Panics if unable to: +/// - Initialize logging +/// - Initialize network nodes +/// - Kill network nodes +/// - A test assertion fails +pub async fn test_bed( + run_test: F, + client_handler: G, + num_nodes: usize, + num_of_bootstrap: usize, + timeout: Duration, +) where + FutF: Future, + FutG: Future> + 'static + Send + Sync, + F: FnOnce(Vec>>, Duration) -> FutF, + G: Fn(NetworkEvent, Arc>) -> FutG + 'static + Send + Sync, +{ + setup_logging(); + setup_backtrace(); + + // NOTE we want this to panic if we can't spin up the swarms. + // that amounts to a failed test. + let handles = spin_up_swarms(num_nodes, timeout, num_of_bootstrap) + .await + .unwrap(); + + let mut handler_futures = Vec::new(); + for handle in &handles { + let handler_fut = handle.spawn_handler(client_handler.clone()).await; + handler_futures.push(handler_fut); + } + + run_test(handles.clone(), timeout).await; + + // cleanup + for handle in handles { + handle.shutdown().await.unwrap(); + } + + for fut in handler_futures { + fut.await; + } +} + +fn gen_peerid_map(handles: &[Arc>]) -> HashMap { + let mut r_val = HashMap::new(); + for handle in handles { + r_val.insert(handle.peer_id(), handle.id()); + } + r_val +} + +/// print the connections for each handle in `handles` +/// useful for debugging +pub async fn print_connections(handles: &[Arc>]) { + let m = gen_peerid_map(handles); + warn!("PRINTING CONNECTION STATES"); + for handle in handles.iter() { + warn!( + "peer {}, connected to {:?}", + handle.id(), + handle + .connected_pids() + .await + .unwrap() + .iter() + .map(|pid| m.get(pid).unwrap()) + .collect::>() + ); + } +} + +/// Spins up `num_of_nodes` nodes, connects them to each other +/// and waits for connections to propagate to all nodes. +#[instrument] +pub async fn spin_up_swarms( + num_of_nodes: usize, + timeout_len: Duration, + num_bootstrap: usize, +) -> Result>>, TestError> { + let mut handles = Vec::new(); + let mut bootstrap_addrs = Vec::<(PeerId, Multiaddr)>::new(); + let mut connecting_futs = Vec::new(); + // should never panic unless num_nodes is 0 + let replication_factor = NonZeroUsize::new(num_of_nodes - 1).unwrap(); + + for i in 0..num_bootstrap { + let mut config = NetworkNodeConfigBuilder::default(); + let identity = Keypair::generate_ed25519(); + // let start_port = 5000; + // NOTE use this if testing locally and want human readable ports + // as opposed to random ports. These are harder to track + // especially since the "listener"/inbound connection sees a different + // port + // let addr = Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{}/quic-v1", start_port + i)).unwrap(); + + let addr = Multiaddr::from_str("/ip4/127.0.0.1/udp/0/quic-v1").unwrap(); + config + .identity(identity) + .replication_factor(replication_factor) + .node_type(NetworkNodeType::Bootstrap) + .to_connect_addrs(HashSet::default()) + .bound_addr(Some(addr)) + .ttl(None) + .republication_interval(None); + let node = NetworkNodeHandle::new( + config + .build() + .context(NodeConfigSnafu) + .context(HandleSnafu)?, + i, + ) + .await + .context(HandleSnafu)?; + let node = Arc::new(node); + let addr = node.listen_addr(); + info!("listen addr for {} is {:?}", i, addr); + bootstrap_addrs.push((node.peer_id(), addr)); + connecting_futs.push({ + let node = node.clone(); + async move { node.wait_to_connect(4, i, timeout_len).await }.boxed_local() + }); + handles.push(node); + } + + for j in 0..(num_of_nodes - num_bootstrap) { + let addr = Multiaddr::from_str("/ip4/127.0.0.1/udp/0/quic-v1").unwrap(); + // NOTE use this if testing locally and want human readable ports + // let addr = Multiaddr::from_str(&format!( + // "/ip4/127.0.0.1/udp/{}/quic-v1", + // start_port + num_bootstrap + j + // )).unwrap(); + let regular_node_config = NetworkNodeConfigBuilder::default() + .node_type(NetworkNodeType::Regular) + .replication_factor(replication_factor) + .bound_addr(Some(addr.clone())) + .to_connect_addrs(HashSet::default()) + .build() + .context(NodeConfigSnafu) + .context(HandleSnafu)?; + let node = NetworkNodeHandle::new(regular_node_config.clone(), j + num_bootstrap) + .await + .context(HandleSnafu)?; + let node = Arc::new(node); + connecting_futs.push({ + let node = node.clone(); + async move { + node.wait_to_connect(4, num_bootstrap + j, timeout_len) + .await + } + .boxed_local() + }); + + handles.push(node); + } + info!("BSADDRS ARE: {:?}", bootstrap_addrs); + + info!( + "known nodes: {:?}", + bootstrap_addrs + .iter() + .map(|(a, b)| (Some(*a), b.clone())) + .collect::>() + ); + + for (_idx, handle) in handles[0..num_of_nodes].iter().enumerate() { + let to_share = bootstrap_addrs.clone(); + handle + .add_known_peers( + to_share + .iter() + .map(|(a, b)| (Some(*a), b.clone())) + .collect::>(), + ) + .await + .context(HandleSnafu)?; + } + + let res = join_all(connecting_futs.into_iter()).await; + let mut failing_nodes = Vec::new(); + for (idx, a_node) in res.iter().enumerate() { + if a_node.is_err() { + failing_nodes.push(idx); + } + } + if !failing_nodes.is_empty() { + return Err(TestError::SpinupTimeout { failing_nodes }); + } + + for handle in &handles { + handle + .subscribe("global".to_string()) + .await + .context(HandleSnafu)?; + } + + async_sleep(Duration::from_secs(5)).await; + + Ok(handles) +} + +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum TestError { + #[snafu(display("Channel error {source:?}"))] + Recv { + source: RecvError, + }, + #[snafu(display( + "Timeout while running direct message round. Timed out when {requester} dmed {requestee}" + ))] + DirectTimeout { + requester: usize, + requestee: usize, + }, + #[snafu(display("Timeout while running gossip round. Timed out on {failing:?}."))] + GossipTimeout { + failing: Vec, + }, + #[snafu(display( + "Inconsistent state while running test. Expected {expected:?}, got {actual:?} on node {id}" + ))] + State { + id: usize, + expected: S, + actual: S, + }, + #[snafu(display("Handler error while running test. {source:?}"))] + Handle { + source: NetworkNodeHandleError, + }, + #[snafu(display("Failed to spin up nodes. Hit timeout instead. {failing_nodes:?}"))] + SpinupTimeout { + failing_nodes: Vec, + }, + DHTTimeout, +} diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs new file mode 100644 index 0000000000..ae3277b29f --- /dev/null +++ b/libp2p-networking/tests/counter.rs @@ -0,0 +1,676 @@ +mod common; + +use crate::common::print_connections; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_lock::RwLock; +use bincode::Options; +use common::{test_bed, HandleSnafu, TestError}; +use hotshot_utils::bincode::bincode_opts; +use libp2p_networking::network::{ + get_random_handle, NetworkEvent, NetworkNodeHandle, NetworkNodeHandleError, +}; +use serde::{Deserialize, Serialize}; +use snafu::ResultExt; +use std::{fmt::Debug, sync::Arc, time::Duration}; +use tracing::{error, info, instrument, warn}; + +#[cfg(async_executor_impl = "async-std")] +use async_std::prelude::StreamExt; +#[cfg(async_executor_impl = "tokio")] +use tokio_stream::StreamExt; +#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] +compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} + +pub type CounterState = u32; + +const NUM_ROUNDS: usize = 100; + +const TOTAL_NUM_PEERS_COVERAGE: usize = 10; +const NUM_OF_BOOTSTRAP_COVERAGE: usize = 5; +const TIMEOUT_COVERAGE: Duration = Duration::from_secs(120); + +const TOTAL_NUM_PEERS_STRESS: usize = 100; +const NUM_OF_BOOTSTRAP_STRESS: usize = 25; +const TIMEOUT_STRESS: Duration = Duration::from_secs(60); + +const DHT_KV_PADDING: usize = 1024; + +/// Message types. We can either +/// - increment the Counter +/// - request a counter value +/// - reply with a counter value +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub enum CounterMessage { + IncrementCounter { + from: CounterState, + to: CounterState, + }, + AskForCounter, + MyCounterIs(CounterState), + Noop, +} + +/// event handler for events from the swarm +/// - updates state based on events received +/// - replies to direct messages +#[instrument] +pub async fn counter_handle_network_event( + event: NetworkEvent, + handle: Arc>, +) -> Result<(), NetworkNodeHandleError> { + use CounterMessage::*; + use NetworkEvent::*; + match event { + IsBootstrapped => {} + GossipMsg(m, _) | DirectResponse(m, _) => { + if let Ok(msg) = bincode_opts().deserialize::(&m) { + match msg { + // direct message only + MyCounterIs(c) => { + handle.modify_state(|s| *s = c).await; + } + // gossip message only + IncrementCounter { from, to, .. } => { + handle + .modify_state(|s| { + if *s == from { + *s = to; + } + }) + .await; + } + // only as a response + AskForCounter | Noop => {} + } + } else { + error!("FAILED TO DESERIALIZE MSG {:?}", m); + } + } + DirectRequest(m, _, chan) => { + if let Ok(msg) = bincode_opts().deserialize::(&m) { + match msg { + // direct message request + IncrementCounter { from, to, .. } => { + handle + .modify_state(|s| { + if *s == from { + *s = to + } + }) + .await; + handle.direct_response(chan, &CounterMessage::Noop).await?; + } + // direct message response + AskForCounter => { + let response = MyCounterIs(handle.state().await); + handle.direct_response(chan, &response).await?; + } + MyCounterIs(_) => { + handle.direct_response(chan, &CounterMessage::Noop).await?; + } + Noop => { + handle.direct_response(chan, &CounterMessage::Noop).await?; + } + } + } + } + }; + Ok(()) +} + +/// `requester_handle` asks for `requestee_handle`'s state, +/// and then `requester_handle` updates its state to equal `requestee_handle`. +async fn run_request_response_increment<'a>( + requester_handle: Arc>, + requestee_handle: Arc>, + timeout: Duration, +) -> Result<(), TestError> { + async move { + let new_state = requestee_handle.state().await; + + // set up state change listener + #[cfg(async_executor_impl = "async-std")] + let mut stream = requester_handle + .state_wait_timeout_until_with_trigger(timeout, move |state| *state == new_state); + #[cfg(async_executor_impl = "tokio")] + let mut stream = Box::pin( + requester_handle + .state_wait_timeout_until_with_trigger(timeout, move |state| *state == new_state), + ); + #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] + compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} + + let requestee_pid = requestee_handle.peer_id(); + + stream.next().await.unwrap().unwrap(); + requester_handle + .direct_request(requestee_pid, &CounterMessage::AskForCounter) + .await + .context(HandleSnafu)?; + stream.next().await.unwrap().unwrap(); + + let s1 = requester_handle.state().await; + + // sanity check + if s1 != new_state { + Err(TestError::State { + id: requester_handle.id(), + expected: new_state, + actual: s1, + }) + } else { + Ok(()) + } + } + .await +} + +/// broadcasts `msg` from a randomly chosen handle +/// then asserts that all nodes match `new_state` +async fn run_gossip_round( + handles: &[Arc>], + msg: CounterMessage, + new_state: CounterState, + timeout_duration: Duration, +) -> Result<(), TestError> { + let mut rng = rand::thread_rng(); + let msg_handle = get_random_handle(handles, &mut rng); + msg_handle.modify_state(|s| *s = new_state).await; + + let mut futs = Vec::new(); + + let len = handles.len(); + for handle in handles { + // already modified, so skip msg_handle + if handle.peer_id() != msg_handle.peer_id() { + let stream = handle.state_wait_timeout_until_with_trigger(timeout_duration, |state| { + *state == new_state + }); + futs.push(Box::pin(stream)); + } + } + + #[cfg(async_executor_impl = "async-std")] + let mut merged_streams = futures::stream::select_all(futs); + #[cfg(async_executor_impl = "tokio")] + let mut merged_streams = Box::pin(futures::stream::select_all(futs)); + #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] + compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} + + // make sure all are ready/listening + for i in 0..len - 1 { + // unwrap is okay because stream must have 2 * (len - 1) elements + match merged_streams.next().await.unwrap() { + Ok(()) => {} + Err(e) => panic!("timeout : {e:?} waiting handle {i:?} to subscribe to state events"), + } + } + + msg_handle + .gossip("global".to_string(), &msg) + .await + .context(HandleSnafu)?; + + for _ in 0..len - 1 { + // wait for all events to finish + // then check for failures + let _ = merged_streams.next().await; + } + + let mut failing = Vec::new(); + for handle in handles.iter() { + let handle_state = handle.state().await; + if handle_state != new_state { + failing.push(handle.id()); + println!("state: {handle_state:?}, expected: {new_state:?}"); + } + } + if !failing.is_empty() { + print_connections(handles).await; + return Err(TestError::GossipTimeout { failing }); + } + + Ok(()) +} + +async fn run_intersperse_many_rounds( + handles: Vec>>, + timeout: Duration, +) { + for i in 0..NUM_ROUNDS as u32 { + if i % 2 == 0 { + run_request_response_increment_all(&handles, timeout).await; + } else { + run_gossip_rounds(&handles, 1, i, timeout).await + } + } + for h in handles.into_iter() { + assert_eq!(h.state().await, NUM_ROUNDS as u32); + } +} + +async fn run_dht_many_rounds( + handles: Vec>>, + timeout: Duration, +) { + run_dht_rounds(&handles, timeout, 0, NUM_ROUNDS).await; +} + +async fn run_dht_one_round(handles: Vec>>, timeout: Duration) { + run_dht_rounds(&handles, timeout, 0, 1).await; +} + +async fn run_request_response_many_rounds( + handles: Vec>>, + timeout: Duration, +) { + for _i in 0..NUM_ROUNDS { + run_request_response_increment_all(&handles, timeout).await; + } + for h in handles.into_iter() { + assert_eq!(h.state().await, NUM_ROUNDS as u32); + } +} + +pub async fn run_request_response_one_round( + handles: Vec>>, + timeout: Duration, +) { + run_request_response_increment_all(&handles, timeout).await; + for h in handles.into_iter() { + assert_eq!(h.state().await, 1); + } +} + +pub async fn run_gossip_many_rounds( + handles: Vec>>, + timeout: Duration, +) { + run_gossip_rounds(&handles, NUM_ROUNDS, 0, timeout).await +} + +async fn run_gossip_one_round( + handles: Vec>>, + timeout: Duration, +) { + run_gossip_rounds(&handles, 1, 0, timeout).await +} + +async fn run_dht_rounds( + handles: &[Arc>], + timeout: Duration, + starting_val: usize, + num_rounds: usize, +) { + let mut rng = rand::thread_rng(); + for i in 0..num_rounds { + error!("round: {:?}", i); + let msg_handle = get_random_handle(handles, &mut rng); + let mut key = vec![0; DHT_KV_PADDING]; + key.push((starting_val + i) as u8); + let mut value = vec![0; DHT_KV_PADDING]; + value.push((starting_val + i) as u8); + + // put the key + msg_handle.put_record(&key, &value).await.unwrap(); + + // get the key from the other nodes + for handle in handles.iter() { + let result: Result, NetworkNodeHandleError> = + handle.get_record_timeout(&key, timeout).await; + match result { + Err(e) => { + panic!("DHT error {e:?} during GET"); + } + Ok(v) => { + assert_eq!(v, value); + break; + } + } + } + } +} + +/// runs `num_rounds` of message broadcast, incrementing the state of all nodes each broadcast +async fn run_gossip_rounds( + handles: &[Arc>], + num_rounds: usize, + starting_state: CounterState, + timeout: Duration, +) { + let mut old_state = starting_state; + for i in 0..num_rounds { + info!("running gossip round {}", i); + let new_state = old_state + 1; + let msg = CounterMessage::IncrementCounter { + from: old_state, + to: new_state, + }; + run_gossip_round(handles, msg, new_state, timeout) + .await + .unwrap(); + old_state = new_state; + } +} + +/// chooses a random handle from `handles` +/// increments its state by 1, +/// then has all other peers request its state +/// and update their state to the recv'ed state +async fn run_request_response_increment_all( + handles: &[Arc>], + timeout: Duration, +) { + let mut rng = rand::thread_rng(); + let requestee_handle = get_random_handle(handles, &mut rng); + requestee_handle.modify_state(|s| *s += 1).await; + info!("RR REQUESTEE IS {:?}", requestee_handle.peer_id()); + let mut futs = Vec::new(); + for (_i, h) in handles.iter().enumerate() { + if h.lookup_pid(requestee_handle.peer_id()).await.is_err() { + error!("ERROR LOOKING UP REQUESTEE ADDRS"); + } + // NOTE uncomment if debugging + // let _ = h.print_routing_table().await; + // skip `requestee_handle` + if h.peer_id() != requestee_handle.peer_id() { + let requester_handle = h.clone(); + futs.push(run_request_response_increment( + requester_handle, + requestee_handle.clone(), + timeout, + )); + } + } + + // NOTE this was originally join_all + // but this is simpler. + let results = Arc::new(RwLock::new(vec![])); + + let len = futs.len(); + + for _ in 0..futs.len() { + let fut = futs.pop().unwrap(); + let results = results.clone(); + async_spawn(async move { + let res = fut.await; + results.write().await.push(res); + }); + } + loop { + let l = results.read().await.iter().len(); + if l >= len { + break; + } + info!("NUMBER OF RESULTS for increment all is: {}", l); + async_sleep(Duration::from_secs(1)).await; + } + + if results.read().await.iter().any(|x| x.is_err()) { + print_connections(handles).await; + let mut states = vec![]; + for handle in handles { + states.push(handle.state().await); + } + panic!("states: {states:?}"); + } +} + +/// simple case of direct message +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_coverage_request_response_one_round() { + test_bed( + run_request_response_one_round, + counter_handle_network_event, + TOTAL_NUM_PEERS_COVERAGE, + NUM_OF_BOOTSTRAP_COVERAGE, + TIMEOUT_COVERAGE, + ) + .await +} + +/// stress test of direct messsage +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_coverage_request_response_many_rounds() { + test_bed( + run_request_response_many_rounds, + counter_handle_network_event, + TOTAL_NUM_PEERS_COVERAGE, + NUM_OF_BOOTSTRAP_COVERAGE, + TIMEOUT_COVERAGE, + ) + .await +} + +/// stress test of broadcast + direct message +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_coverage_intersperse_many_rounds() { + test_bed( + run_intersperse_many_rounds, + counter_handle_network_event, + TOTAL_NUM_PEERS_COVERAGE, + NUM_OF_BOOTSTRAP_COVERAGE, + TIMEOUT_COVERAGE, + ) + .await +} + +/// stress teset that we can broadcast a message out and get counter increments +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_coverage_gossip_many_rounds() { + test_bed( + run_gossip_many_rounds, + counter_handle_network_event, + TOTAL_NUM_PEERS_COVERAGE, + NUM_OF_BOOTSTRAP_COVERAGE, + TIMEOUT_COVERAGE, + ) + .await; +} + +/// simple case of broadcast message +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_coverage_gossip_one_round() { + test_bed( + run_gossip_one_round, + counter_handle_network_event, + TOTAL_NUM_PEERS_COVERAGE, + NUM_OF_BOOTSTRAP_COVERAGE, + TIMEOUT_COVERAGE, + ) + .await; +} + +/// simple case of direct message +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +#[ignore] +async fn test_stress_request_response_one_round() { + test_bed( + run_request_response_one_round, + counter_handle_network_event, + TOTAL_NUM_PEERS_STRESS, + NUM_OF_BOOTSTRAP_STRESS, + TIMEOUT_STRESS, + ) + .await +} + +/// stress test of direct messsage +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +#[ignore] +async fn test_stress_request_response_many_rounds() { + test_bed( + run_request_response_many_rounds, + counter_handle_network_event, + TOTAL_NUM_PEERS_STRESS, + NUM_OF_BOOTSTRAP_STRESS, + TIMEOUT_STRESS, + ) + .await +} + +/// stress test of broadcast + direct message +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +#[ignore] +async fn test_stress_intersperse_many_rounds() { + test_bed( + run_intersperse_many_rounds, + counter_handle_network_event, + TOTAL_NUM_PEERS_STRESS, + NUM_OF_BOOTSTRAP_STRESS, + TIMEOUT_STRESS, + ) + .await +} + +/// stress teset that we can broadcast a message out and get counter increments +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +#[ignore] +async fn test_stress_gossip_many_rounds() { + test_bed( + run_gossip_many_rounds, + counter_handle_network_event, + TOTAL_NUM_PEERS_STRESS, + NUM_OF_BOOTSTRAP_STRESS, + TIMEOUT_STRESS, + ) + .await; +} + +/// simple case of broadcast message +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +#[ignore] +async fn test_stress_gossip_one_round() { + test_bed( + run_gossip_one_round, + counter_handle_network_event, + TOTAL_NUM_PEERS_STRESS, + NUM_OF_BOOTSTRAP_STRESS, + TIMEOUT_STRESS, + ) + .await; +} + +/// simple case of one dht publish event +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +#[ignore] +async fn test_stress_dht_one_round() { + test_bed( + run_dht_one_round, + counter_handle_network_event, + TOTAL_NUM_PEERS_STRESS, + NUM_OF_BOOTSTRAP_STRESS, + TIMEOUT_STRESS, + ) + .await; +} + +/// many dht publishing events +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +#[ignore] +async fn test_stress_dht_many_rounds() { + test_bed( + run_dht_many_rounds, + counter_handle_network_event, + TOTAL_NUM_PEERS_STRESS, + NUM_OF_BOOTSTRAP_STRESS, + TIMEOUT_STRESS, + ) + .await; +} + +/// simple case of one dht publish event +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_coverage_dht_one_round() { + test_bed( + run_dht_one_round, + counter_handle_network_event, + TOTAL_NUM_PEERS_COVERAGE, + NUM_OF_BOOTSTRAP_COVERAGE, + TIMEOUT_COVERAGE, + ) + .await; +} + +/// many dht publishing events +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_coverage_dht_many_rounds() { + test_bed( + run_dht_many_rounds, + counter_handle_network_event, + TOTAL_NUM_PEERS_COVERAGE, + NUM_OF_BOOTSTRAP_COVERAGE, + TIMEOUT_COVERAGE, + ) + .await; +} diff --git a/libp2p-networking/web/index.html b/libp2p-networking/web/index.html new file mode 100644 index 0000000000..449d4a7d30 --- /dev/null +++ b/libp2p-networking/web/index.html @@ -0,0 +1,105 @@ + + + + HotShot web UI + + + +

+ + + + \ No newline at end of file diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml new file mode 100644 index 0000000000..3febbbce65 --- /dev/null +++ b/orchestrator/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "hotshot-orchestrator" +version = "0.1.1" +edition = "2021" + +[dependencies] +async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } +async-trait = { workspace = true } +bincode = { workspace = true } +clap = { version = "4.0", features = ["derive", "env"], optional = false } +futures = { workspace = true } +libp2p-core = { version = "0.40.0", default-features = false } +libp2p = { workspace = true } +blake3 = { workspace = true, features = ["traits-preview"] } +hotshot-types = { version = "0.1.0", path = "../types", default-features = false } +hotshot-utils = { path = "../utils" } +libp2p-networking = { workspace = true } +nll = { workspace = true } +tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.4.1" } +surf-disco = { workspace = true } +tracing = { workspace = true } +serde = { workspace = true } +serde_json = "1.0.96" +snafu = { workspace = true } +toml = "0.5.9" # TODO GG upgrade to toml = { workspace = true } + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } diff --git a/orchestrator/README.md b/orchestrator/README.md new file mode 100644 index 0000000000..43e8a6e28c --- /dev/null +++ b/orchestrator/README.md @@ -0,0 +1,7 @@ +# Orchestrator + +This crate implements an orchestrator that coordinates starting the network with a particular configuration. It is useful for testing and benchmarking. Like the web server, the orchestrator is built using [Tide Disco](https://github.com/EspressoSystems/tide-disco). + +To run the orchestrator for a libp2p network: `cargo run --example libp2p-orchestrator --features="full-ci,channel-async-std" 0.0.0.0 3333 ./orchestrator/default-libp2p-run-config.toml ` + +To run the orchestrator for a libp2p network: `cargo run --example web-server-orchestrator --features="full-ci,channel-async-std" 0.0.0.0 3333 ./orchestrator/default-web-server-run-config.toml ` \ No newline at end of file diff --git a/orchestrator/api.toml b/orchestrator/api.toml new file mode 100644 index 0000000000..e9bc32c270 --- /dev/null +++ b/orchestrator/api.toml @@ -0,0 +1,50 @@ +[meta] +NAME = "orchestrator" +DESCRIPTION = "Orchestrator for HotShot" +FORMAT_VERSION = "0.1.0" + +# POST node's identity +[route.postidentity] +PATH = ["identity/:identity"] +METHOD = "POST" +":identity" = "Literal" +DOC = """ +POST a node's identity (IP address) to the orchestrator. Returns the node's node_index +""" + +# POST retrieve the network configuration +[route.post_getconfig] +PATH = ["config/:node_index"] +METHOD = "POST" +":node_index" = "Integer" +DOC = """ +Get networking configuration needed for nodes to initialize HotShot and themselves. See `config.rs` for more information. +This must be a POST request so we can update the OrchestratorState in the server accordingly. Must use the node_index previously +received from the 'identity' endpoint +""" + +# POST whether the node is ready to begin the run +# TODO ED Use the node index parameter +[route.postready] +PATH = ["ready"] +METHOD = "POST" +":node_index" = "Integer" +DOC = """ +Post whether the node with node_index is ready to start the run +""" + +# GET whether or not to start the run +[route.getstart] +PATH = ["start"] +DOC = """ +Get whether the node should start the run, returns a boolean +""" + +# POST the run results +[route.postresults] +PATH = ["results"] +":run_results" = "TaggedBase64" +METHOD = "POST" +DOC = """ +Post run results. +""" \ No newline at end of file diff --git a/orchestrator/default-libp2p-run-config.toml b/orchestrator/default-libp2p-run-config.toml new file mode 100644 index 0000000000..5d43c940fa --- /dev/null +++ b/orchestrator/default-libp2p-run-config.toml @@ -0,0 +1,78 @@ +rounds = 100 +transactions_per_round = 10 +node_index = 0 +seed = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, +] +padding = 10 +start_delay_seconds = 60 + +[libp2p_config] +num_bootstrap_nodes = 5 +index_ports = true +bootstrap_mesh_n_high = 4 +bootstrap_mesh_n_low = 4 +bootstrap_mesh_outbound_min = 2 +bootstrap_mesh_n = 4 +mesh_n_high = 4 +mesh_n_low = 4 +mesh_outbound_min = 2 +mesh_n = 4 +next_view_timeout = 10 +propose_min_round_time = 0 +propose_max_round_time = 10 +online_time = 10 +num_txn_per_round = 10 +base_port = 9000 + +[config] +total_nodes = 5 +max_transactions = 100 +min_transactions = 0 +next_view_timeout = 10000 +timeout_ratio = [ + 11, + 10, +] +round_start_delay = 1 +start_delay = 1 +num_bootstrap = 5 + +[config.propose_min_round_time] +secs = 0 +nanos = 0 + +[config.propose_max_round_time] +secs = 1 +nanos = 0 diff --git a/orchestrator/default-run-config.toml b/orchestrator/default-run-config.toml new file mode 100644 index 0000000000..fe34d75811 --- /dev/null +++ b/orchestrator/default-run-config.toml @@ -0,0 +1,68 @@ +rounds = 10 +transactions_per_round = 1 +node_index = 0 +seed = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, +] +padding = 100 +start_delay_seconds = 30 + +[config] +total_nodes = 1 +max_transactions = 100 +min_transactions = 0 +next_view_timeout = 30000 +timeout_ratio = [ + 11, + 10, +] +round_start_delay = 1 +start_delay = 1 +num_bootstrap = 4 + +[config.propose_min_round_time] +secs = 0 +nanos = 0 + +[config.propose_max_round_time] +secs = 1 +nanos = 0 + +[web_server_config] +host = "0.0.0.0" +port = 9000 + +[web_server_config.wait_between_polls] +secs = 0 +nanos = 100000000 # 100 ms diff --git a/orchestrator/default-web-server-run-config.toml b/orchestrator/default-web-server-run-config.toml new file mode 100644 index 0000000000..0ea0f86ccf --- /dev/null +++ b/orchestrator/default-web-server-run-config.toml @@ -0,0 +1,77 @@ +rounds = 10 +transactions_per_round = 1 +node_index = 0 +seed = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, +] +padding = 100 +start_delay_seconds = 30 + +[config] +total_nodes = 10 +committee_nodes = 5 +max_transactions = 100 +min_transactions = 0 +next_view_timeout = 30000 +timeout_ratio = [ + 11, + 10, +] +round_start_delay = 1 +start_delay = 1 +num_bootstrap = 4 + +[config.propose_min_round_time] +secs = 0 +nanos = 0 + +[config.propose_max_round_time] +secs = 1 +nanos = 0 + +[web_server_config] +host = "127.0.0.1" +port = 9000 + +[da_web_server_config] +host = "127.0.0.1" +port = 9001 + +[web_server_config.wait_between_polls] +secs = 0 +nanos = 100000000 # 100 ms + +[da_web_server_config.wait_between_polls] +secs = 0 +nanos = 100000000 # 100 ms diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs new file mode 100644 index 0000000000..57522da4fd --- /dev/null +++ b/orchestrator/src/client.rs @@ -0,0 +1,136 @@ +use std::{net::IpAddr, time::Duration}; + +use crate::config::NetworkConfig; +use async_compatibility_layer::art::async_sleep; +use clap::Parser; +use futures::{Future, FutureExt}; + +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; +use surf_disco::{error::ClientError, Client}; + +/// Holds the client connection to the orchestrator +pub struct OrchestratorClient { + client: surf_disco::Client, +} + +// VALIDATOR + +#[derive(Parser, Debug, Clone)] +#[command( + name = "Multi-machine consensus", + about = "Simulates consensus among multiple machines" +)] +/// Arguments passed to the validator +pub struct ValidatorArgs { + /// The address the orchestrator runs on + pub host: String, + /// The port the orchestrator runs on + pub port: u16, + /// This node's public IP address, for libp2p + /// If no IP address is passed in, it will default to 127.0.0.1 + pub public_ip: Option, +} + +impl OrchestratorClient { + /// Creates the client that connects to the orchestrator + pub async fn connect_to_orchestrator(args: ValidatorArgs) -> Self { + let base_url = format!("{0}:{1}", args.host, args.port); + let base_url = format!("http://{base_url}").parse().unwrap(); + let client = surf_disco::Client::::new(base_url); + // TODO ED: Add healthcheck wait here + OrchestratorClient { client } + } + + /// Sends an identify message to the server + /// Returns this validator's node_index in the network + pub async fn identify_with_orchestrator(&self, identity: String) -> u16 { + let identity = identity.as_str(); + let f = |client: Client| { + async move { + let node_index: Result = client + .post(&format!("api/identity/{identity}")) + .send() + .await; + node_index + } + .boxed() + }; + self.wait_for_fn_from_orchestrator(f).await + } + + /// Returns the run configuration from the orchestrator + /// Will block until the configuration is returned + #[allow(clippy::type_complexity)] + + pub async fn get_config_from_orchestrator( + &self, + node_index: u16, + ) -> NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + > { + let f = |client: Client| { + async move { + let config: Result< + NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + ClientError, + > = client + .post(&format!("api/config/{node_index}")) + .send() + .await; + config + } + .boxed() + }; + self.wait_for_fn_from_orchestrator(f).await + } + + /// Tells the orchestrator this validator is ready to start + /// Blocks until the orchestrator indicates all nodes are ready to start + pub async fn wait_for_all_nodes_ready(&self, node_index: u64) -> bool { + let send_ready_f = |client: Client| { + async move { + let result: Result<_, ClientError> = client + .post("api/ready") + .body_json(&node_index) + .unwrap() + .send() + .await; + result + } + .boxed() + }; + self.wait_for_fn_from_orchestrator::<_, _, ()>(send_ready_f) + .await; + + let wait_for_all_nodes_ready_f = |client: Client| { + async move { client.get("api/start").send().await }.boxed() + }; + self.wait_for_fn_from_orchestrator(wait_for_all_nodes_ready_f) + .await + } + + /// Generic function that waits for the orchestrator to return a non-error + /// Returns whatever type the given function returns + async fn wait_for_fn_from_orchestrator(&self, f: F) -> GEN + where + F: Fn(Client) -> Fut, + Fut: Future>, + { + loop { + let client = self.client.clone(); + let res = f(client).await; + match res { + Ok(x) => break x, + Err(_x) => { + async_sleep(Duration::from_millis(250)).await; + } + } + } + } +} diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs new file mode 100644 index 0000000000..061ab256d5 --- /dev/null +++ b/orchestrator/src/config.rs @@ -0,0 +1,236 @@ +use hotshot_types::{ExecutionType, HotShotConfig}; +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + num::NonZeroUsize, + time::Duration, +}; +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct Libp2pConfig { + pub bootstrap_nodes: Vec<(SocketAddr, Vec)>, + pub num_bootstrap_nodes: u64, + pub public_ip: IpAddr, + pub base_port: u16, + pub node_index: u64, + pub index_ports: bool, + pub bootstrap_mesh_n_high: usize, + pub bootstrap_mesh_n_low: usize, + pub bootstrap_mesh_outbound_min: usize, + pub bootstrap_mesh_n: usize, + pub mesh_n_high: usize, + pub mesh_n_low: usize, + pub mesh_outbound_min: usize, + pub mesh_n: usize, + pub next_view_timeout: u64, + pub propose_min_round_time: u64, + pub propose_max_round_time: u64, + pub online_time: u64, + pub num_txn_per_round: u64, +} + +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct Libp2pConfigFile { + pub num_bootstrap_nodes: u64, + pub index_ports: bool, + pub bootstrap_mesh_n_high: usize, + pub bootstrap_mesh_n_low: usize, + pub bootstrap_mesh_outbound_min: usize, + pub bootstrap_mesh_n: usize, + pub mesh_n_high: usize, + pub mesh_n_low: usize, + pub mesh_outbound_min: usize, + pub mesh_n: usize, + pub next_view_timeout: u64, + pub propose_min_round_time: u64, + pub propose_max_round_time: u64, + pub online_time: u64, + pub num_txn_per_round: u64, + pub base_port: u16, +} + +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct WebServerConfig { + pub host: IpAddr, + pub port: u16, + pub wait_between_polls: Duration, +} + +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct NetworkConfig { + pub rounds: usize, + pub transactions_per_round: usize, + pub node_index: u64, + pub seed: [u8; 32], + pub padding: usize, + pub start_delay_seconds: u64, + pub key_type_name: String, + pub election_config_type_name: String, + pub libp2p_config: Option, + pub config: HotShotConfig, + pub web_server_config: Option, + pub da_web_server_config: Option, +} + +impl Default for NetworkConfig { + fn default() -> Self { + Self { + rounds: default_rounds(), + transactions_per_round: default_transactions_per_round(), + node_index: 0, + seed: [0u8; 32], + padding: default_padding(), + libp2p_config: None, + config: default_config().into(), + start_delay_seconds: 60, + key_type_name: std::any::type_name::().to_string(), + election_config_type_name: std::any::type_name::().to_string(), + web_server_config: None, + da_web_server_config: None, + } + } +} + +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] +pub struct NetworkConfigFile { + #[serde(default = "default_rounds")] + pub rounds: usize, + #[serde(default = "default_transactions_per_round")] + pub transactions_per_round: usize, + #[serde(default)] + pub node_index: u64, + #[serde(default)] + pub seed: [u8; 32], + #[serde(default = "default_padding")] + pub padding: usize, + #[serde(default = "default_start_delay_seconds")] + pub start_delay_seconds: u64, + #[serde(default)] + pub libp2p_config: Option, + #[serde(default = "default_config")] + pub config: HotShotConfigFile, + #[serde(default = "default_web_server_config")] + pub web_server_config: Option, + #[serde(default = "default_web_server_config")] + pub da_web_server_config: Option, +} + +fn default_web_server_config() -> Option { + None +} + +impl From for NetworkConfig { + fn from(val: NetworkConfigFile) -> Self { + NetworkConfig { + rounds: val.rounds, + transactions_per_round: val.transactions_per_round, + node_index: 0, + seed: val.seed, + padding: val.padding, + libp2p_config: val.libp2p_config.map(|libp2p_config| Libp2pConfig { + num_bootstrap_nodes: libp2p_config.num_bootstrap_nodes, + index_ports: libp2p_config.index_ports, + bootstrap_nodes: Vec::new(), + public_ip: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + base_port: libp2p_config.base_port, + node_index: 0, + bootstrap_mesh_n_high: libp2p_config.bootstrap_mesh_n_high, + bootstrap_mesh_n_low: libp2p_config.bootstrap_mesh_n_low, + bootstrap_mesh_outbound_min: libp2p_config.bootstrap_mesh_outbound_min, + bootstrap_mesh_n: libp2p_config.bootstrap_mesh_n, + mesh_n_high: libp2p_config.mesh_n_high, + mesh_n_low: libp2p_config.mesh_n_low, + mesh_outbound_min: libp2p_config.mesh_outbound_min, + mesh_n: libp2p_config.mesh_n, + next_view_timeout: libp2p_config.next_view_timeout, + propose_min_round_time: libp2p_config.propose_min_round_time, + propose_max_round_time: libp2p_config.propose_max_round_time, + online_time: libp2p_config.online_time, + num_txn_per_round: libp2p_config.num_txn_per_round, + }), + config: val.config.into(), + key_type_name: std::any::type_name::().to_string(), + election_config_type_name: std::any::type_name::().to_string(), + start_delay_seconds: val.start_delay_seconds, + web_server_config: val.web_server_config, + da_web_server_config: val.da_web_server_config, + } + } +} + +/// Holds configuration for a `HotShot` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct HotShotConfigFile { + /// Total number of nodes in the network + pub total_nodes: NonZeroUsize, + /// Number of committee nodes + pub committee_nodes: usize, + /// Maximum transactions per block + pub max_transactions: NonZeroUsize, + /// Minimum transactions per block + pub min_transactions: usize, + /// Base duration for next-view timeout, in milliseconds + pub next_view_timeout: u64, + /// The exponential backoff ration for the next-view timeout + pub timeout_ratio: (u64, u64), + /// The delay a leader inserts before starting pre-commit, in milliseconds + pub round_start_delay: u64, + /// Delay after init before starting consensus, in milliseconds + pub start_delay: u64, + /// Number of network bootstrap nodes + pub num_bootstrap: usize, + /// The minimum amount of time a leader has to wait to start a round + pub propose_min_round_time: Duration, + /// The maximum amount of time a leader can wait to start a round + pub propose_max_round_time: Duration, +} + +impl From for HotShotConfig { + fn from(val: HotShotConfigFile) -> Self { + HotShotConfig { + execution_type: ExecutionType::Continuous, + total_nodes: val.total_nodes, + max_transactions: val.max_transactions, + min_transactions: val.min_transactions, + known_nodes: Vec::new(), + known_nodes_with_stake: Vec::new(), + da_committee_size: val.committee_nodes, + next_view_timeout: val.next_view_timeout, + timeout_ratio: val.timeout_ratio, + round_start_delay: val.round_start_delay, + start_delay: val.start_delay, + num_bootstrap: val.num_bootstrap, + propose_min_round_time: val.propose_min_round_time, + propose_max_round_time: val.propose_max_round_time, + election_config: None, + } + } +} + +// This is hacky, blame serde for not having something like `default_value = "10"` +fn default_rounds() -> usize { + 10 +} +fn default_transactions_per_round() -> usize { + 10 +} +fn default_padding() -> usize { + 100 +} +fn default_config() -> HotShotConfigFile { + HotShotConfigFile { + total_nodes: NonZeroUsize::new(10).unwrap(), + committee_nodes: 5, + max_transactions: NonZeroUsize::new(100).unwrap(), + min_transactions: 0, + next_view_timeout: 10000, + timeout_ratio: (11, 10), + round_start_delay: 1, + start_delay: 1, + propose_min_round_time: Duration::from_secs(0), + propose_max_round_time: Duration::from_secs(10), + num_bootstrap: 5, + } +} + +fn default_start_delay_seconds() -> u64 { + 60 +} diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs new file mode 100644 index 0000000000..4391fc43e6 --- /dev/null +++ b/orchestrator/src/lib.rs @@ -0,0 +1,255 @@ +pub mod client; +pub mod config; + +use async_lock::RwLock; +use hotshot_types::traits::{election::ElectionConfig, signature_key::SignatureKey}; +use std::{ + io, + io::ErrorKind, + net::{IpAddr, SocketAddr}, +}; +use tide_disco::{Api, App}; + +use surf_disco::error::ClientError; +use tide_disco::{ + api::ApiError, + error::ServerError, + method::{ReadState, WriteState}, +}; + +use futures::FutureExt; + +use crate::config::NetworkConfig; + +use libp2p::identity::{ + ed25519::{Keypair as EdKeypair, SecretKey}, + Keypair, +}; + +/// yeesh maybe we should just implement SignatureKey for this... +pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { + let mut hasher = blake3::Hasher::new(); + hasher.update(&seed); + hasher.update(&index.to_le_bytes()); + let new_seed = *hasher.finalize().as_bytes(); + let sk_bytes = SecretKey::try_from_bytes(new_seed).unwrap(); + >::from(sk_bytes).into() +} + +#[derive(Default, Clone)] +struct OrchestratorState { + /// Tracks the latest node index we have generated a configuration for + latest_index: u16, + /// The network configuration + config: NetworkConfig, + /// Whether nodes should start their HotShot instances + /// Will be set to true once all nodes post they are ready to start + start: bool, + /// The total nodes that have posted they are ready to start + pub nodes_connected: u64, + /// connection to the web server + client: Option>, +} + +impl + OrchestratorState +{ + pub fn new(network_config: NetworkConfig) -> Self { + let mut web_client = None; + if network_config.web_server_config.is_some() { + let base_url = "http://0.0.0.0/9000".to_string().parse().unwrap(); + web_client = Some(surf_disco::Client::::new(base_url)); + } + OrchestratorState { + latest_index: 0, + config: network_config, + start: false, + nodes_connected: 0, + client: web_client, + } + } +} + +pub trait OrchestratorApi { + fn post_identity(&mut self, identity: IpAddr) -> Result; + fn post_getconfig( + &mut self, + node_index: u16, + ) -> Result, ServerError>; + fn get_start(&self) -> Result; + fn post_ready(&mut self) -> Result<(), ServerError>; + fn post_run_results(&mut self) -> Result<(), ServerError>; +} + +impl OrchestratorApi + for OrchestratorState +where + KEY: serde::Serialize + Clone + SignatureKey, + ELECTION: serde::Serialize + Clone + Send, +{ + fn post_identity(&mut self, identity: IpAddr) -> Result { + let node_index = self.latest_index; + self.latest_index += 1; + + // TODO https://github.com/EspressoSystems/HotShot/issues/850 + if usize::from(node_index) >= self.config.config.total_nodes.get() { + return Err(ServerError { + status: tide_disco::StatusCode::BadRequest, + message: "Network has reached capacity".to_string(), + }); + } + + //add new node's key to stake table + if self.config.web_server_config.clone().is_some() { + let new_key = KEY::generated_from_seed_indexed(self.config.seed, node_index.into()).0; + let client_clone = self.client.clone().unwrap(); + async move { + client_clone + .post::<()>("api/staketable") + .body_binary(&new_key) + .unwrap() + .send() + .await + } + .boxed(); + } + + if self.config.libp2p_config.clone().is_some() { + let libp2p_config_clone = self.config.libp2p_config.clone().unwrap(); + // Designate node as bootstrap node and store its identity information + if libp2p_config_clone.bootstrap_nodes.len() + < libp2p_config_clone.num_bootstrap_nodes.try_into().unwrap() + { + let port_index = match libp2p_config_clone.index_ports { + true => node_index, + false => 0, + }; + let socketaddr = + SocketAddr::new(identity, libp2p_config_clone.base_port + port_index); + let keypair = libp2p_generate_indexed_identity(self.config.seed, node_index.into()); + self.config + .libp2p_config + .as_mut() + .unwrap() + .bootstrap_nodes + .push((socketaddr, keypair.to_protobuf_encoding().unwrap())); + } + } + Ok(node_index) + } + + // Assumes nodes will set their own index that they received from the + // 'identity' endpoint + fn post_getconfig( + &mut self, + _node_index: u16, + ) -> Result, ServerError> { + if self.config.libp2p_config.is_some() { + let libp2p_config = self.config.clone().libp2p_config.unwrap(); + if libp2p_config.bootstrap_nodes.len() + < libp2p_config.num_bootstrap_nodes.try_into().unwrap() + { + return Err(ServerError { + status: tide_disco::StatusCode::BadRequest, + message: "Not enough bootstrap nodes have registered".to_string(), + }); + } + } + Ok(self.config.clone()) + } + + fn get_start(&self) -> Result { + // println!("{}", self.start); + if !self.start { + return Err(ServerError { + status: tide_disco::StatusCode::BadRequest, + message: "Network is not ready to start".to_string(), + }); + } + Ok(self.start) + } + + // Assumes nodes do not post 'ready' twice + // TODO ED Add a map to verify which nodes have posted they're ready + fn post_ready(&mut self) -> Result<(), ServerError> { + self.nodes_connected += 1; + println!("Nodes connected: {}", self.nodes_connected); + if self.nodes_connected >= self.config.config.known_nodes.len().try_into().unwrap() { + self.start = true; + } + Ok(()) + } + + fn post_run_results(&mut self) -> Result<(), ServerError> { + Ok(()) + } +} + +/// Sets up all API routes +fn define_api() -> Result, ApiError> +where + State: 'static + Send + Sync + ReadState + WriteState, + ::State: Send + Sync + OrchestratorApi, + KEY: serde::Serialize, + ENTRY: serde::Serialize, + ELECTION: serde::Serialize, +{ + let api_toml = toml::from_str::(include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/api.toml" + ))) + .expect("API file is not valid toml"); + let mut api = Api::::new(api_toml)?; + api.post("postidentity", |req, state| { + async move { + let identity = req.string_param("identity")?.parse::(); + if identity.is_err() { + return Err(ServerError { + status: tide_disco::StatusCode::BadRequest, + message: "Identity is not a properly formed IP address".to_string(), + }); + } + state.post_identity(identity.unwrap()) + } + .boxed() + })? + .post("post_getconfig", |req, state| { + async move { + let node_index = req.integer_param("node_index")?; + state.post_getconfig(node_index) + } + .boxed() + })? + .post("postready", |_req, state| { + async move { state.post_ready() }.boxed() + })? + .get("getstart", |_req, state| { + async move { state.get_start() }.boxed() + })? + .post("postresults", |_req, state| { + async move { state.post_run_results() }.boxed() + })?; + Ok(api) +} + +/// Runs the orchestrator +pub async fn run_orchestrator( + network_config: NetworkConfig, + host: IpAddr, + port: u16, +) -> io::Result<()> +where + KEY: SignatureKey + 'static + serde::Serialize, + ELECTION: ElectionConfig + 'static + serde::Serialize, +{ + let api = define_api().map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api")); + + let state: RwLock> = + RwLock::new(OrchestratorState::new(network_config)); + + let mut app = App::>, ServerError>::with_state(state); + app.register_module("api", api.unwrap()) + .expect("Error registering api"); + tracing::error!("lisening on {:?}:{:?}", host, port); + app.serve(format!("http://{host}:{port}")).await +} diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml new file mode 100644 index 0000000000..71b7e467d8 --- /dev/null +++ b/task-impls/Cargo.toml @@ -0,0 +1,34 @@ +[package] +authors = ["Espresso Systems "] +description = "Async task implementations for consensus" +edition = "2021" +name = "hotshot-task-impls" +version = "0.1.0" + +[dependencies] +async-compatibility-layer = { workspace = true } +async-trait = { workspace = true } +either = { workspace = true } +futures = { workspace = true } +nll = { workspace = true } +serde = { workspace = true } +snafu = { workspace = true } +async-lock = { workspace = true } +tracing = { workspace = true } +atomic_enum = "0.2.0" +pin-project = "1.1.3" +async-stream = "0.3.5" +hotshot-types = { path = "../types", default-features = false } +hotshot-task = { path = "../task", default-features = false } +time = { workspace = true } +commit = { workspace = true } +jf-primitives = { workspace = true } +rand_chacha = { workspace = true } +hotshot-utils = { path = "../utils" } +bincode = { workspace = true } +bitvec = { workspace = true } + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } diff --git a/task-impls/HotShot_event_architecture.drawio b/task-impls/HotShot_event_architecture.drawio new file mode 100644 index 0000000000..ee5702aa4e --- /dev/null +++ b/task-impls/HotShot_event_architecture.drawio @@ -0,0 +1,294 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/task-impls/HotShot_event_architecture.png b/task-impls/HotShot_event_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..1c1891624c2c3a9848ea19963d2c319b7048cff5 GIT binary patch literal 123290 zcmeEP2OyQ}|1YVC=!m2w>tydu_RcOVLXLBcjKi@vsjRYvkTOykWh5i3C@UJuj?Bm| zWW@hDoKw>M-Ftuis(ZWle|5`w&-*^l^M3dD^Zh>W8>p@-zjMdF9h)|7+Nr1@b9B?D zEi9Wh;gsNQ14pLic4dHnHrpSSm)exwNIAS|QwWuVtgeH#i>ZYbViS}{dhItTH>Ww$ z-T}%Z1LfwHwL+LUppZ7;S8&`K3AZq{K)~0&}rp@V=aSva6a;Spfx;RlBql@v5pl%d?x;CCwvO9c2w z9${jML|+0&89TFEAi?R}TrbNrK=-IBW+9&whoTQ zfJEp~EHEr^VdH`GTO5%$KB|PYlQy<;Rkl>$I3jjjYZrNxv8}lZ623}kxXaqmT->~1 zMD(C=*R>;pT!2vM17;}fJY1g-9J6r4EQ$*(c=a?#3pm0aGuqlA2PD$U!NT^-gCcxjW3Pwm^)&xCnjVn2E8~*GF|M;0{2b(2^_2hglx_Yz2gc znK^c5E>6rh*2dVAF$e6;jp0b=wd+?8t>?@d9kJ9zA(3Fbb&Om8HTEQH7iokQTB@;h z#LNQ(>c78M$6iNE&gGcBg(Sk+!Nx{K;i$4a8y`>sG!LANtsF7r&@{HUv|l@F@8F6h z(%J!AW7H>&{O)T3O0;1qU#$X#bhLr5+7B+E_Ri)O4v1s6#y|^rWSoI;f@9_m)>hyX z`fFg8tfY}vNYpArcuWON5GE$zxIHjPmWXxVzy$et0k_e1WOV@ulmh}Q-Cy7L@6hr0 z`T3cv@(5r<98j)+@Ys2HFv5z_Aua(7d7VE?BsW&>%-2aH5B8ujMhnb7jg7vSKO-sN z9E_m-{`%1deuF&zCM|e)e=H;6#t1>v-()1ei68=I`qwg2=u1ZOVfnL;k-UN!=B{TX z7cb`E--nUhJU^C^rltt~-_}UDfDjB2?yuBHzJHKLer@Dc5f08slqIkbXmh{D=5^No zf3dR&E;t`T;J56o5WfJAF$nVhN;@kAtRmVlV(cu>*JhSi0ON?(YbXETPB}4VW}V|c zim=E!WsPQ1cF%f&3-4&%E<|V|NdJ334Cc@xv@dyI`hhfc6;dC{(Cd< zYvZblZ~>_ar!vAAjzEE5(9V0^$4^{c{R*A*aQ*Z7EA+L3{v!P45?GVMbsGAKv!7V` zk7ppyZv?Wx2m^WezG9&Enq58SYJ)}@J{jEaXk&$hq^0FRcJ~+hNbU`h4<9x%{RM68 z*OAXg7FrpDH0+LonO}#a(|5ota4EW zVGUc2bbgpW{QGh7*9}%9CbC+W)*#>j z(#4>aNR)#)(hO;1Y$f~osMP0_7L6->K1Uge1i0ZUV0a4Q;NXhEr;QyQkl?&^a3TOc zQLeh+RP^<$AC99xu=8O*%Am2lPhZNQGfecSQ;ycQ*rgex(74qah0sd|r+^O_YrURi zzX_~i8DNieM6F_;;M(6j5##k7j8SF?3~K$G=dDis`=H-%f|e+Rm9c|`)0fNtE!^tQ zjE$W^5`{v$VovNhwn$LlS(T$+dn$<&%wNyBG0ij1W6Upl|0g*5`{=%9^sK>Xf1 zHiWiltD|fT10}pKjGM8Qg&DfqXaWoy@W?z;paAIrz$7b4%oo-ca5%aaB!xl%4jRK& z>q2PrhEY>s$oQn7d@|q&n#~w1%7w1OSfQtrGB&X^TeZR1!ix>k20b47Z0sQ@Hz;SI z{{zR@j3jy)8)`9_G9YFN;4}c&pVaD;31Q=8=j0Ro(vGkJ>Y@q! zLk!|qW`=*mEfJE@zD^-_y0MS+Q@>T1FK=f_Lo4CW&jJ^#fBoj9C0(YPTv)-yo1bMBaX=P3PHQcK;pO z7l^D^eapJ^>$BNChOmLJeZ4{Y|7m7-RmZQ4Gi-GLD@lLvKz!rsd9iJB-zgN|I6js-|K0Gv z7$V1oF(zO61T0?mukj137U_TT3;Y7S8_*Vz#h??JpQGJuT$t%FZUU2JVU_z=49KzB z@XvJX3pK~TsMDZhfv>AJKRl3M#d*Mfjz6r?{LS6a22k=-Z!=a@f1n>iOVl52 zgg~DIxQi~W{Drj=Y>98ZWtNshqO8GQ25x?1Yg+(#+raG6|Exl0Umn6B{nBVi-V}iH z4hZa}|9oU+&1!tRDF>iE7bhk_#n@7e=KUN)&V$J=uzCoXUh`RhxU`TL7s*Oc8|E$wka~)U*@>M3karl1+ z?!=9;XX`wfj3hR{SgS+-e5bT#8@?x?zh-Z~K}|j*OWa(7m>B+tsLogN$hI+I;r$1= z9`3&cTl$nV{DbWNY6kfY0{ugT_lMejfepym_pb7882;Z;=H>pa81Szw@%{jt|39Ke zpolBLivemdsm9N(;<90#GZr+$WIDfc?t$H)`EvvR@7N#oX9IZbPKQ4Pg#JC1UEU3q z-G7Ir#qPfPBEpvbtDyVuZ)Vpt`WqsVb)mH&&zF$SPYtxO48mAhtm6L4Ru-Eo{9G&h zpTcZFQ4HMjfijKarVY-hCG~@;O9F5EZ=GTU#y7Bi?M89n13lNq>1G} z#|5mJoBzoL2nb;_5{w!Csr{0mi|0!Rgn_4jWgozUaiTxh2mBS0_Ue{{Z;-wp0)>2q zSlRl41=T-8lRrSkH?IEQfr|5BGVL#nHX0rIt8DhqZh&7i9)CuD(Ay`! z-dqJ5G5N4?`Co#aV1Z_=H{rwthFGop73;Rxg4a5!{PO7+KXJ=8wm9*tdKWA$|F96d z_SBef(1f4lT{bk9eK+s&SGU+;x3FvwcJ!lq{!NB>P0zkT{63>A{M;LM%l~8)8~aS! z4SwKPw#2+xnBnsSb$`MxSA>g&gD$$ek)4~97Y(U^&&R>%)v6}={WH3P{(vU;Y9A!{ zppHO+-Q}x0!N2n}EdIP5{d-60JlK{#jFj?Y8`!a8`u7ggxi=&k{NMJ-ivQRg^VRrR zKl7cip|8mQqsPax_rF{JFNQr~A9?;8K2#HBVP=MYRue!8e)4AC?_YvgGsoZVBstl! zC>=)oe=_)l+1iSkobMm7Ie_P{b&%_vBPYASYWd+m`^W$A9H_O+&|iM`tE-=|j`jON zU4Pn%VqnQXtSSG!l9nG@wcx@&H3g$R*k`f-cZU5xIzFxq&tCk&!}AG#Q_bZU^Gn#^ z!*1mGwE?Vekg1=8 zOMQCb2}WeNH%2vUz}x@uV4<%HEnFLNAU;fD1B{Wz+hTV*YRnlM4m+<}K z554=-`NwKr{0-vvb7YGLi+5mT3yUFPMeFZv|2ICS|I-Vk{yPi(A2XSa)cMilWAn2A zZu!5MgTY2MUptt;vPkv)5w^!Ri1dtJYLE9&~DrS-pWXh!)tqQ$!*wEnf+&c@Y$V`A_h-Ok4GvDEoT zy9HiMK)B8=pgVQY%?s-Qu7682*_xsHpPUDe5cZ)TKS!teG4%_KyTE?LMv}i`l7ijJ z_;cOGf7%oBOIKI^R{rRRR#yZzY}xzH<&TZS|4Ygrm@?jPc!z)14(v6P^X-NoG&*6e z)?bk`uwf%?%;a3yZFRr08^Au_ZG-Shn;YAJ_ZO|UgRFn^2BtOPkE69e`!;d@uUqTa zW?ko(?9GjB(Jv#ib}<8Qfj4J2Mxl_-_T22w=q5X0?~u+i#`fk2I7kZCztIT>9+SN8 zO(ymL?{UzHGGXX z-=Ht+-{gqxY{I^OXZ4L#*n>YOdin}#!Up|V#rgilA8tX6Twx6bIw3*#HesjyX7Fzn zky^LM;rH2tIr}@lmx=$kbBXl|yH0h`LiZ1QD)^ek_-lYqHt7Mp6#Dv$9b_Yx5Nl8-;cNB zJlF@s{HG~@T|55UQUAZlj;~sbU)hdxZEzxg%LnqUMj$98`VEb;P)_vQ7eD{O#xm%r zS_KJ5zcp~(Iji5oEsR0XZH@T?bAhP^`u%|DQ$?2*A8y)2y-86Cu7>%)cA@Fc4!D#;8F{ubL416#C^vlP7Tu>9h~2 zT|PhFRYGUeS@_}OTz5F1r8cWk8Rh)+fW4~=H=-_8CMreq#?h@?wh>Bzg9hXAg13zy zZ1b3HGixpL6mfh1##b@&P-(ADmfjss_gSk{<+x{TOKH2;uO{5jM<*cqR{8QigA%9e zkrx>nxp2k9o(r0Rn_b^{P50x!@>;ZxkvmpZ*P3IZks`NqpeX=CHwl*0*L>p``%L{) zSA`tfrH7#hZ+Ur6yb{&CuD)3-L%W*X$m8{%Kv79tA znaYX6WD!@xQ2jL!QdZqva}(V<*32rj-cTSP*6RKklHi-1Yc*%re28X9jd%og@vBAgjAKpJ#({hJcKkz4h zq+{fnwzNw`+MmdiEswSj|FXgG^K@eN6NOh6%Ix~K6*~-{1~k5r`67zxSQwo=(bCep zCiCalE|8qOuN7Nh)3q;A*m1``=dt!XGzE1R+4c*G8Bjv;!!}|6_%Pv6acD0uFCwdg z$dvH?PZXg_ar*p-#nfq26vAmgOxJVd*!9WHEH2EvmBtORBFi|IPEHLc_MEUzu?dQEemUN7Igwb+qm2N`oJwe~JUBDH}nr$Z6He>B!Fj_k;-ch6WH=Fwm;X_%&O%-loC9!PzC^ zWW}cnInZ*;$Q!XgR@$XTS2nd~k4W3LLz!ap{kyBLI8VG9F}we^E}A9CVyd_LX@Rx= zK$7jsT(|d&SpkUkqtJu$!_U&}7H6$v%!-vgVXd+2bhCmVO*X2-E?1};pmZ9Ux;Z)( zwSJC(#Tqv?Z>U$-0a`>UQ1G3KIwfG*uy=-26kf9naV!@vqI8#taUhwidAZ9JEG$6C zX>{-O;Ny%cNpM+IYj^=ne%WXVvYD2a!E5Qmk<9lK-9~}?g?MPayUV@HbMKlpQO!)2 z!>V^ch5KI{wb%Q3m%7bF>EdOT!3AV`6trclj@~)-B23iX>7L7!RG@HePXHc;fDI*J zQu63F@jTk&*HHs?Hx}+os6foS2 zx>o@Pwaob2YeN*yDLfQ4e?lFdh0mldWwRyYc;2-zAukKF=tYnvRf}n-6A?MPAcv9_ACl>LLTFGnGBT$r`(x~C3hCczo}1N(Ci>n<7{*CfJ8(83HoOMjld$63^vhvv`(zgfQEnMEj$G&F;u$dII_} z-7map1>@#hyI4Xh>mC#yR<Ge5|V+hr5xNqAsOO!*~QT63Y zsdIN-(Vg;9s5f>MLelw99{=J#R=*8o4IdO#Tpuh{Jit@WbkVJN!;ZD=t0rD%4+s~+ zq0cB8zBuf%cXJRKF~Q!hFG2+08Yv$bG*{+QAN?1R12LUMPOtIil#)UG(GCYn0rppN zJ;4&4Wo}LPB=s=yiLZhb_3`$q0200(J9KI<@V4Fe5;vf)tTb+y@PSott3i%P8S=Z% zPa|sdhX%5ySzkh0OJ45{`LJ?+^ehjJWQ5Djlqx0?-4+_Q$Ti>O69Xx9es^`>)3YWq zc5@H6kX{elU2Qq=E>{C3I4%Rml;Q2I4o-j7I?gzKalST^fzGIG#oK(#mM!7H`T=J~ z0K7KA&Bfkog7XK(sXi{uNOu<6y=c-eunL6G$x#FI`sQiM_CqR{-Um4Yn|Xhv{L4CBVgd9W_TFG)%M_%1?)e3z#`1pa(<}J4^ zo*zQT4FWcu5+Hn02VoE)rJ#CXbh=s+vn1WXt5Wyt>Q_(a&VaBdc2annkL+qh1p^iL zfD-xqwk8OcOx3vTlqIKi?O@6M?ps;M0|)8o0B?6|kmuxjeRPXr(NR4f&UysIB~R~I z>Of^uJ*I0Jn2({xJB8r&7k8$}hiZ$*lsmtDsq*oahH8@7g|Uvieb3dzwP3QNTbr_h z%x@=WKT&a0D^hzq{l(L4qd!~@snk8z1d>E+Gx_3ZePtXgG`OsWe z`YXYJmiQ4g)j_eRUfd+REbbjr9ZcQ_m>aO0A*Aklf~dQ8;I7)@nhAt{>s4ugO>JmC ztyJ$&lDAwuzjgC1w1?+u+oqkann)e~yu=wflLuA0*aKWV2m<#lef+33Xj&aaR!0nB zV$;i4_f%4hVhktYlnvg)$K}>!z94%G!DyJbiG*ROjkGCXZ|RyEOlsonA6)E<#8)}| z!gFEl8ZM)@GVUSX9B!A%lZV~j;~h&^t!^>$z5qz`I^$N^{k}(`(IQ^&>b<$|3Y zYD{E2cCCShN`W5=q9W$EG;NR@EJOPAk+k{`ohDvgpp^}x1_A3$)gV$Pbi@ih<264( z?l_Kkaz*k~No+uJB!e2QS!04iqVOFO4)2wZ0>X|Xsa278V=T>(unFf!>Y3Z-p1Q50 z>+Ue=xe5A!3(0)326mpj_a1El0+BD#vl(FeExCmdVXT zu!mJ5`$UoYT^oZtD)kyN2?3jg><0-!{5s#xfBO=j<;^h@S~(9756)A~jCZOVG(h~C zS#Ubbuyh}wu;v(tL2;_hzDR3}-r$79+C-7N?WYIFWE*d$HbtL1PKG4reI&n{tn}=` z9q)1rs87$nYB5cj4s@|sV( zxmdaV;1$QWFI)4>LhIUV3b=$?NZ9mv&+iu?diyf#*!K6{3&RWvAUp5wZuOaZ^d3fd(LH}wiGEw`B^ zZM~DkWpXapeX3WM^03RUL=k6g*qg>gkt0jI7SCj?CrHHMEP5c)y>0vY{*^4#h#8Q1 z?R=Se+yvHq`MG9eM)Sh;(?f#FY8X#Y@c@6aGsu7Ylbv1Zx^{Nz^jqx5+WDZguty67w*(qYOn8#Rw}xK3AJKzqY}Md4ZMe>^8xd~| ztP9n9`?f(2j`tT0IKbky#A8|%YgDo-gA>Fr_Ti$qB1A}wJ}8FJ)rf2wfH4Z~A7ezF zk+iuI-ABM>@-V;I0OTZ7&k9-a)iVnt#1_}=aKL$7+IZdYyh>ooE6Z1wgLgBiUY{G| zGk+ps;6N{1uvqW(@ou-*{<5;N5f7AY4z2uVl0(V?i*pkh?;21PMCF;M-A)~o9BiHq++f(HWv3X)xa zw-;xZ49}?f6CG&wwrt6+25Hrc3%RsW#ZF^E>;}cxl^;2Gd!`%uO##!IkL)%q)s~hC zhfDP%dTS$~#~i!8S3DhSBbPk;9$rw0VL#A+wr}Tte$9zRg3HG#xL~+I8OV8GddyGX zXo%>9mms^vX7mVMhm!7{GI^G&cRMbCW_?j==0oj=r$NrcFK&YcQ789>ZM+)?E zn3i9aI1}sNweghA)XdOUL3mXqZP`wAeL+5iQp2;}8&#hL*OwU$*n3F;dAY8Nl*91E zg$!hOS&%RMs!X$jwj4JvFdo|a{W6^znT7QXabB$;;;G)#FnR`)+nvUfamtwn4uyG& zri}=)C*npdpXJ5600n-@Gv|_$jKXxom1WQG<`q`(yS{j@ER8O{@2}8ZiMoCA9#zzE4fy?>(-eJA{UkQE{_=5kYl(6M<2C+#e~wB@K4kHcO|x~12o z!LQaey8VRbZ%5<=B;Gider}!#W_p(3d<)RjsrDzRlWV$ZbA&oKn5<10s-NG#*^sJ= zoK@E+0;+V=JkTe6OdZ%S4euCM=}GI8Az^h*GMNh7q=-i93z%vH1YcD$aNx)Au+wVy z5A;UZh4;EZ3gL47X#=@r%0U4{LM(KiO55aL-MtGUnLCe_WI_W7YnubQQHnlzaZjp_ zfFY;dU5DqBWrL$yPk}^d->sAyku3=Qn1B?Zw))m8ky&=M%15s$2ZB?Qjmo{4-_kAD zlx%vy3?Qxr<8=K&pn|bx3IrG<(?jT6b4a_NGRP{%ZL0Td9lyt9<@S*!`CEpiVM6w% zGn+Vu#8YbsN(Lw&0V5jC$jPkTZ|nsUvk~Q55R0W1ATLmf>IGhw^xJlW&5U+0NV$^> zE(GohW32&V(VWy93RefQf`60v3T1r^hYqbm!41Vh85`=zhTtUz3P=$(_2C@az8mUS zar(FMxH1*B#$+l3bIgUPBaOBTw>Es{pKuovPg_9mM-NlVRYBqV2JC}qAi?c4df*&y zNkNC`nuTvz*DNaet^j42Y$MzQq+XI5k;?LUrjMK3cDA9Q9a->vl;Y? z5!VUN2hc(G!sHF**!7!J%iyMH>|=%pLQYH7Z(H8Hvuvlbj6d9fQkH0_UT8N+A;}49 ztj&6ZNr%;PC28~F8Sy9DSaDR?w}zG1$L)Ph-BB*=Jg%J6OHIA~V*j=?VX~(gb~Cx$ zY|NgKYA(EP*!G-Z?o@r{HIUhFcjVZfyn~eS3aO)c^;0qAt9!=9qp^&YEH$sBioz?y zFXda_bDEXTfiYOmVzEpKI0=i>L;Y-mi^_HS$kMdN9EI>L;{Y}~+EkG1*f3@9Z-f%- z2O)w4$jG97?4sTcHD$zC%CHIs1yHwTL&YP<%xJy(+nVEg1qC^V^iG$X4Fb6A2t3d{ zNFh)&j!fek2_91}xq7x=q$ny`4}lY)8}>dbQFgrVqCN>c=?WvPsM5G5jHYgDn>?e` zU{9r=@%RA2D>2q`+02@qgaFFZ);i;t+S_080tBXGl-XIH8kwy@@ri4ZLKp~NFW6Vm zXwyIFeGEMs7~SK0o&*}yuEwyqkXl0}tymklBf2N}bo3TQT3VT%b(M6rwl&Kl8Y#wI z38`RiRrGC!VZ;Dqf$3A5II!0<(WmR_4>m#`&Xp%SeOcW z#-JY=7fECh^<6EwW}LtVGH+!tAt+-|A5{!evvDYNe5-wZtbbQj>ehix7XyT+j25DL zcWFj=k@^k^z8_FM2ppYr&^Aw+H3vtC8+$y5qYkBO$jw~o+~3+#;wh?;xx}Ci54~)) zC33s75H7ucDE%UB98u8&JsZ}rQdg8^rT;;%y(6B_Q1Mdcb9(ij&%x?T@`K4(u8pVe z(MPouP%4vD($h*Gb$pgi`Gh2gM!%xp%f_`<)!eB%rf01B91yglr)hoYP3RXP2!c+K zZ@ifBUK|r}ysem>8TmkkRzb>=p`)t5S|*p_m`!w;Xdvy;RJC~k!h6eB2+AaR9uru) z%&2|etZE;|N?eHqTu{A~4+Zgy6oh5^_CzZclp+>TP_pq%&Bs=hO8h-dhWy3Ufiz*% zb>{xkml03#R5BF~(%iLEJST6shaN|2mMW+_+fuB^peE3FoZ6uEvtsz_~k<$)-x^s+9 zHmBDHa#eh7iLX?F)9IkITKTZLLi?dct(FH&eB{ueVitX>CkJ4%XOzt66zgB7-5$R~ z#I8-hnOSAGWh5KL3NOb+*z?A7T;HX z?kDGOB3jG57`V+ENcv41laEi%R~=_Gx(18Qt{>yDEYlBkwtQLLkt5~3#ZKnY$5-3= zp4>4Z)@0U!YVN*k(=C+~wDmr(_dv5sdNI*0@$~aK-1st5X!L>e%^u=77!(JV<~}!m z^%a9CK;P9T?p$0SN)i{bnc^!}huk>49o-5W^)r3V-{q%1L(&ScDYzgJD^)+~|g>T$)1z-3wtMJ}4?v79n)I zdj&57`l5uY`IIhMm&lCNz_gaMEW`2ck~yzqX zcP!mKIhv|bX!my85i*vnkyM3?IDYc)<*yOO20=aw$a?_@0=*pD--x)!mJ@~hsE5|JdY-;KE)Qdof<26_6ws-%1NSR z%OzeCmlIsM+MOx6O>55dz@UKmTp{lRaZjiKeR=_zEL*0_(tQf4LsR*!p{0-n2ef~t zhIU)O+4RV2jMXEaQJYY&S~N!B&jL!+-~=#-Zz1C z=1&W_Ox5B-AC-$gY>tb(ZCP&!Rb7~$l`5y&GNl-=Hf3CL=(^sGZ9!!0A$1ec((%L{ zyR{G5nuXfgY^}Gynjl8qJE`1-X!*lCV%85Hc44|)&ywyw z-W=kO>i`hNr;_3Pr+0&rqQ|5jP)o!8dPEI4I7B(m%3DrCkI1MK4vVlVWYVklr{wM_ zxjQaDqG{1AR3L?T2kQ^xV${gje8PX5+}D@UFo&9GT=1O6ZvVc{vv~@a=;KJZlw-$Svkg$#RsatgyT2VGospU_*`7P z&+p~lW-WZD>L&E2S!hBGl|{7BDVvlAyX!Z(?0AH z9;lzda?6d731ZHA2sxQdnG1VVtY?Jt-?=>TTbU0Y3S*W%b>}SKhs}$SKL6gqtot>f2-}nJFf?hss&~pdHN)Jy6wWf@8ij3`#n9~jk}5E!ww&Rzh^S%K`fW?p`&TaSykFE8`_>s^)>G5 zwujdT=8-q{)Ta?NQ^b8-_SZ;PWZOHYw%hFz&E{LQ6%9c;XP-}3Dsa=@8*&dKtA9{I zbV3#|QLDf076S;(w8zB@L!Z3}dJO{bisU$@Fac*TQpQArE$%xd-wqx$X}NsByensG zLDo)*X*g3@x(ok|Gh_=&p&YSzjk%B8q?grUqWd^P0L~Jd010o9P0^l-?xNiqeA921 zHfZp%?wLae`8)s$2CTC1@q+X;wRp)k!YojC>NUEgX~AXHQ*%YEBi}r@ajb;Kd-(Cf zTrIX2;6$w0GC{P;{8W0PdCQ#(B(RCQ*Hap6_6|4g?o&=utT+V}N_+P#`ywgJD>Ai^ zhl=2{DvrsFf(mu2E@W@WmT6jMP$T+y?aoL=NlJ}tsH%0z^%d(UOQMvAQ4$75ON}CS z;du(Cga>)|9ptx2d(}-lVJB2D#7k18c4e6?ODQQt_paTAx@T2xEPDpMm(zrKXklqK zS#BS$NFGDz6d-CJ!=^8j<#2qgc~O@ZRDLgx(*+2j(|DUNQJ0vu772s$`5g!rOwgTgP2QZI8-9>zZ-=>t1^>;VP1g~lf^gIj3u z-;rj(r_;yh1kz({K`UA%yyL;?7@)1Cvv~qV9*d1^OY=iX3i130lI4SKYTxvCX*~-) zv4h$uXl}gw*lERr?Ak?Rn^$GNtZ}(|FUz5R$r>cKCSZIUN=gBT3$W^)eo`@`U>?{( z%UKyrQv*oM&%UepY87j>kj@0)ZsWJtJnl@dDU)zeKKQC6x+?9V_TJat-D$^a1xvaHXR)4Py8DyolVWMy8;2l%r@6I4plQxN!}raoG&+j03NC z|GASgCj_M{0z`wz%mQ+=#f&#v=*f_~y_y(|tam6c(^KAj5rrP)1 zRb@w>YGlfcOxjPbQH7lv$P>;RekTjC#=a1UI}o#72!l{sa9HDYa^BimhUlsh!RZg@ zX!b(L8+UQSLSfYu$@@l*sOyf>RRAjIn5to@Tp`d}(oOkfp2Pt|XEbdzr48sLpFY{C zvZ?1qb;35xsV6x$n`(~^956%{Cy5vshk_7-(Q>(ToZ8Zfi4>DFc|QBtIlKcboL|kLOR}KNGjzme=DoE-QY5T8 z<+L1z!iO*V1Vq=18%dX{F==-~129uo54NR>)bN)~XmpK%Vw$(u0zurnYwsA#hu2_% zP53UfzMk=+$-0j{6yinCG89ili@o(4cQ^~U8J~V_oVsJQ?2+Rmdlo8kZeis$RNWVs zJ|ow;58}anHE_GzL@E2H3x?7$7u| z9l?vco?vgSiDB*5Mr~&Dtpg@@lgGVFn^RaB=EahQ$Cuy{d*7H~j(TtD()g%p>U)(r z`*ao{nyrT&9k;ic&WRR?KvvU zq&?6kzId$mWY+B)1j#nton4pNZStjYu0j+pzTB>njKjnOmCo7cieLK}{|%N03~^|Z zij@F^famW+mSoq(@7S@MUc0rKugzg`aD3LmE(Kj|Z(`^fe;W;1Vbr#|>@Rm3mZvlt zf~z30+ZR->?l(25eH?oGHlzCH>H?_20=8b>S~g6TVdGw0_>>3~iJjLmLw1!JPN0Y~ zxeaG}-YKf0LV_*Rp`+QlDO~HRs14iXa{Y%l?^OiEyRg$wP+wUrdQLWro<6wP&nj;b zJv}ucPKY;+^OBytlk8oUuG!OOS`H!lE%eVt4V~|%2k##Pg=!b#&0S52*|+6$VDCJd z55VKQ%$$L^81KSc=IAe8h8m$Slrq(FxbWvLyt)f-o?~1%m$2A+q<~ycTlP_*qY7tN zpQH1qTZMdPz@>`p33N{YD*?Tv; zHqHIsxQ!aQKsx|LCdF_*>8tAvw7xct1J?uagvwh?MDrBnPmh*)xTY7@KFCo8qkz5^ z)q{yRq^gM3yAp1Dx(Cg`-@YrN%`9SlXi|WT==KfE+~L{BU1P(&aw34nm(?n|T3G0o z(Mm*o0k_&Tru6_ANe)NbjvIu-GD#;q#L9x=${kY0Zk8| z@fjad)d7w?$2E8O|H<)7lf$PTAB|<_m^^wYGI1~Eam-LtlC)SnK-u}Mp9eJ+qD=@s zFN@VBqKDqw{=oP*KEMMVLfg*$8fXF&ufSmAO@xCYH;{Tj){C{ zx%a`RDe^k(pi>nW-OXC;_;wOh5zmcWO!4fk3Vh(OIQja)o3=a#(CVm}2+DZIpk=b= zB3u0M)!8>sw}7s+s`MnUQ|%Wq?kPo=OTaoaz+Wh|^%77OeY`h@Mkhc31fr4aYhvE% zQvn(mXySb64F(+wt>cJXcat+6I!X+H#NF28}mcO5NwQrQU!}TOLuv zAhPFpamTrQ!@3C$_2e+&hgIyy|Fj^*Y|MfJDnKbUniHRpP}4Bb{Dxtv@a$NJzg^$M z*PzNN=rkHjfh*MkO+|vnD|o zsmWwdWnDVp_|0;qYi6UZIfc#>ROt4Z(OSWCLs?o`vuRs^aDk7d;k=$act=xcg zHtblHBIPsTDj-n5ciyMumX6#K+Lj#MhadEqJx7EXy6b3WXPKwI=#zTC!o#E9A7@%l z>0ewD0IiP=mqqu3ZU%|Vhh3v;0w{P6fhuI0*TgOsFE6hVGsltUN`Y<=Y zZ3ATjJc#IY9w=TJgVOb>w+R6Ur`}ct?ly0}4!Ou-l%2~nZ4WW)+_quYL1%POMf)8~bYBvUANbiJV&EQ* z;tYX1MHdpI1S8JNnL9!4d(Am|ubr$s9V=viZDbKtgtcLPV5pIm>X3xv;h=n|zA!tM zu@uQ#*MTh0stP8py>uQ_7xBq!M?Nep+)Ppu2ZEwSv3JTIU4R_AW7(p8oGBzub7t*!}po|@Og^O;8Dsd3ro27+cX&oggd8dx^I*<&qhnBS8w-g8@^K)v@ zObXX;y)5cZz5U$**#eZJoNO*fZlY2hFa$65^?PSgr#4TOtR%6i1G2EW%RcCdh=PA9c69lLhE z?xCQdU{xJxM1S#;drGM)gtSfXPyuKSST?pDx}$OzIc~$zs{vO+#~*Z`L)_@K5Vuhp zKbDLIr5|H86~U&*1*=l0xCk2cGBN|;l<>FBt94if#fwq}E}()&%%FBz^RztGIOxRP zN3RVRiNo8*gwmF)It|yBp%26Zjkzk+=f*)TF^!dTD>OuKgUSNE!JYB!%;qz4gfT^eL@itW~SZWQwW!qX|B1t&=W`I z8bChvwA9`CPT~=nz=Q*kN^1RT83{^puM>e6^zNK89;k~RIm981<4Cux_$IG%rwIV^3$>bW;I8d-p;nuE* zhO=;80C3(bYgLTpe7)b|974y7>RxGrL3l?AyPi3en~=(gr|5Y*apPVgNKs;|9p`9{6BsE zUmpHnU;eH3q21gRY|mrV$)>D};Xs<);wX70YUI$7cV9j+%rUeDpe1m5eKcC4@)<4J zzJ``065!cV6>ci7Wrvy`1s&hkN;Pq~1l`m}pNv z*V!04!t(OuyG6&YMh<5?M&p;65o()~>FeTAAu- zoNgWoNW8y%zOZD|i_)^fRg@x65sgxaUoTlqSM6PXN>{k@P7!+Kna*{*!|vC-lBlhV zW4y1q&ok7&#f6cvo=^8DCQ;doOJ!I@@;=^Q=T^__8A}(JVG^0D0CUBtTRuuv!E_~H z7pl4CsKc%O#bX)(^dVXWeO?xLp?Ef_d3Bv*5WtCIoT_~}oTe*D4P#(W6?R^-$YRJJ zzkqwWQ-qY4b$&5&yZmJT_~w9sz{K3cu=JZGH>!fM4tzOv6#?qIp9?K4Jhhbs-f#x@ z#!)6?DUDtaQc#aEJH#eabz<3$VHcY))iMeaHzFW0c<3>mDJxFaiD-kTr8aL0KJjmPz~lS-rrEUE;Jy z&hM>RlH5&S7h{w(rOsMJTaxVF^<)*}i^@Y|e9xXQj91bWx;qHU22myC!!~dBHJOVi0!H3ke-g76jiF4n_OlEJaP+okYRfiDx`3Z+o2W) zzuB@WE`CJZ2RWzBxf+LoxN0yDElnC#dLl$m7U33;td3Oe%8lL+dOz!5l#acQ^rrUM z5o)_7w4n$a*V)@w48JV)8_nN>G`BS#hMhQ3x??{6PR~vw;mUKcU|2P*A|(ESSG!-8 z>pL3zygltA`8=N)_zN84KQu zQy2SX)_X#j=RyuhFF#aqh)Ycr06iNyNH@xlZgJ4l)s3S;1~ui{M7vY#Pf*pZMK{#pRK1H&WGJhue%>w2lvBF`nUcks9I7m@FeOe46^x&Cjpw zMDAuO?6mU($f*fDq$X&P%SBj-8fz9we#PaO1!Ky{; z%R;=3xz`geRV~Q#@;xh#89(Xl8|U0l08Z2Saq4)-ZoAma`sED~4%wv#U;}j*_Sa}H zic;z=??fRQN&8RE%qCHL&sw$Kwr3dpFn`shEa0G2xy#tTOLJqPbY_jzHVLgY@qGK; z@~wRJ5cROf#NsC_NBDF_tZ&-evR-EsI9p3`k=E}cr}fOf7k;;H-5S0;T9^ICP{5%B z4RuAYLS5(YI8k03ddXpOPq&^2U$F6ZYSz3cEIqT|&hH6UjGwDIhdnMX_m(h9Wzjot zrrB`XAr~xI_QcKRYi!JWSb}`V+L?*9Pec`9djTdoOG5LtZo--(sskwKwmnN%4s?b+ za(cEH^I~Or=wN^5G4hFMj?f&@V!Bh!*FQMbYdcVg?i&)Yk*M(9Vne50)m>lW1gpwy z`!F)ZQ|E2awo4?J1w&%m_mCZ@;IfAsT;Z8BFC5c)B6FZk*Y`fC)}vCEc;(?e*J%Y` z5q#1ocNHr+>4|{Yc1EvKNW@)HhH6&ZV7MLaQ0+Zg2LAX2f^;VCp zK%^@WBpDDCkmvN`aOLpQ*z>2eC9OZd(6VzPY>6TXI7ddZr;IOcB!H1YRCL?go6 z?0Tq!hU)#ZO&<2%e>gFz-4H8DKbWa|BbP72kwVCl=vn%a8)uYWL_oRBt{;Rw3OcJ{ zKL~4kc;Ti>%BdHeh0^Yw-tYH1mN>{3-9LNVc1$=`DV*}1Ke7DF_rtfvPHsP^oa8Hr zd?Z!7G!GHCIWbWGxZ6uQ)UI!;FFXbCpWx8!_}Nhk?lTn8EG6DD1&v+J6OINVR-kw_ zSA4aDs`>~n`RSEQs|M_Bw_d3{RMN&!jZ%#H!ouOnim-|xlyla36q3(4%El^mzS527t=0R26>*wh?E?sW<7V1VjZ9FA59i>-1ElkFIvh(!Ty=`ec zhuD`$7S%3Sm8BV7)qOsQr^`v4ad*XM!n-~AabB6r)*!1}5w7g8w4o39g@wb2ZD})C zXc#XF2(*{3h^ZRf3)sS-dULeCO4g{igyMO0EJ@C^?F3a-5XG_i_gx{0=H1t64zcT< z*_l*+&%(TOX{(^?^5Maq2hT;3F(+HT8yq>gk~h@!#JWK+;dr?!!u~#U?g3W!7OIh! z;#`)Htdqr~XWtuOspMFRwy$puM^BZvO! zC6##DRG|)Y>XYfZZTWBI?`YlBd3zCPeF3%I>zd3=0=x>fMcfPi+ieDp z6|=hCwMnZA4%pYWWA^dj<5TvlwI4nkd4c4lZvCx4iFwKpyN-Jr>b^G$aG$$&oT6r6*} zKJaIa#qWBXUDEMfX+q0VE!(=XOWH5IpfzJf!5ER0uYxl!(=%Q@FOhkM>fJKgCs>xD zLP0&#$wKLYo5xhji?9bd$aLNLA@C#x{+qhU>iezZT~G40RR$8JL9^Pvtz-T~k3wV# zcG0;xI8cfmMMwzqo;o9(SaPy{Tolma1_8OIlT2gI`8Urfyq9OlcLsKQDqgcGvA{cQ-%vTEqr@GH8*&(0#0 zY$s}H`jPiDgQ622im0 zMR{u{ipv-@z1s z!D&hjH+2*%gZ8EV#^;d42eqR(ZvPsclMH|#tCP(I6A{I`kG9r#{2EK zL8|ISWv%G=iu>)w&=$4{L5E#4EIQhbi=O-xin|$(c08w)Z7ic#N@yLnjLic6zV4;q z!*q1athSVY-_+$@&6^2FbO*+HlwAQ;9&;m!i6`<<4^!JZNv#EviW*+N5h=JbAoXgK zDcMPe(PGd=cHdJ!Bj|Q`N8L2y@i`IV@r?a7P8^1r*}VMvZp2g7`4jx+;w8xe>#7$-8DY)E;?bLYrp&0fA)8L-|_vQbKLhmt}(8@&Qn_) z3zX9BjxVU`osCY2X;+6H{G)VF3<}g{6Vs9f+&@WX$Jde!U#f$N%A6k}X2-LGkgTyX z@yKiE;oRcC+z&WDN{`H5cj=!9d$|Cy9-5mf|2bK4roC1w`ZzDs)3#h2ulXMh3W#tN z1pCK@8u`=P{%}6!Y9-*Y+9Q2Hs7u;RQR`oN$R4IMQW+r>?mGP)Y-aursPfA{?6Z$r z*498115uvLFyB^wQ2tL>*d9bqzdW7vO#3~ikX{nUBMw0@eS7N74!$$3NWi1b@dSR9 znGR+5=-d}%rgFoY??4rjo}T{UXYs#%{jX^^zJ7mB?%xdw&r zn!csxk4MV=X;~0d=AL{cxK^j5d2vxL`b7ULy#F5Di~;m6INHf|eZo0aZVA&kTKS_Rrw3_pfg$`v3{sT@X4kd6hH3VIAYsMiebH1}U6t)gWL0+y9OV-J+** z^1hzA{X`>+hcJ`h+dsBnLX+DHv;i)e1<#Kewz+HGwRqklUn2cB=8KGi>EkviPg`^K z#doP&tA9DQZ&x=kpIC? z8k3BzZMqS>)llLuFkZ~AIXCdc+v^>9KN!v-=f}n`y2|4U`WU!OUPo?(;=2EKLnkhM zwU1pVCzDT=HJ%izQc6%rWWxGy&UUhoh>7>ALZ;`jTsckHKPr=M6d0hbefJ&@BujSU z9zwolR9H)-CG?^`VWUH#un12^E)vHr?aS6isODc!00Qkc`yKz`8=2~|otRiXElek3 zw|;|qTe*Tj@o(XH>wsR7tn(U-E><@8wde4aSvaJckjDZg+p}8BQc&9LZ!a*k^3>|a zX+E&9oI?5f?w<7-2Ai4 zyh16tH&t?0=R-aiuDXcEh0q_kv!g^ztSK^HWJ!5u{rqk1v%}hn=uC+j|jLokRiF~s(bDlls8=*Fyk0fE;ZFe_WVO zSC;TmI*1q&8EktxFg;BNoKvCAg85WW=^}x@W6NiFzhlc#lB5gs&T7z;6Caiza|jv~ zsExmAQ(=S-$l}}w8mcH(pyZKC&On@WsT+4^3883 z+?67N7`4UV^xOAIREV3QhW(DY+eTj7^NEr@E*t*cB=?yP5o(iOvi$5us4@dT+~ohG zP~$!pIO&K|i&+tL=LIl>4$yz@-kjl|(+WQK{;SS$^qbGk`d@x7x1pSxP5?=ENF5U& zMX5gy6CsHAuqiV$P6{1--`Y#QN)rc{p9;X8&&YSGqiRkb+s zlf>+~?+I~fL@)0e^mcAX`FLJj*|Dz(Lb>!g@bH=0J={xVPJi}zdXKEUjMoL}#xTE_ z!}tG_P)c1r!NI;Zt?kFSar0gJ^i1$b1U_Ncx-j>~H0QbIP;4XXp>7tbxR zFG}7qXaKoC*q3!@G?GEf013?)tB60j5vdpAUGjhQ@)210^#%6HGp$?oxK7XW8oa0J z0#7c}9F@ha*Bh(9K$<&Or90Z1y;~N7&#aCjpn#vX%6ys?5yz@W^qn zZe8FY9=KfS3kZmch3to%TyTNbLy-_c)ozgr#!F0;h4kLJ*+kOW$YqpKv&>S7ZjO< zcAao>#Y6c*`Mc2TEm`_9JP-HU)AQ!I`7{`>-1NVnv5IN{YQgRm2jR~#qW_Q#mBF1j z_q-M_b#UHc-h}@ zCwi}GuX38Nuj~d3fQpI{Dgyna|EdISxS?95LHcpoU^S!FSL*L!qh%!8q+IC3PVE6t zVl0C6Rky(SdkzZi!f|lFtDBa%i)1+nG;Cxk{{67uSP~fXV-zMw zRl~U!A-%|lg@gfdG%LkOY@Np?djg!0RQr}Fe&St1xpnByqaSpVAvo1nI< zD!p;SfC*(~8!Gk#(ihJDb1o+!bxSqCes)J4b=_c~v~$Z7qoARE;wOZcZf)Z+f@)4l zU~fDF<0Iae?UhIJ^^mdJqC@kh|l zE!#Gi?ZpNSmkl|DgXssO$m~BC@;EeK*cP}N`x)YnU^&G-kh{FomoQXSuj4e%FG8wu zs8DEh|1sPQ^~RW~{c)o!G(5WBNEN1krrS)o8JdX&_DI;Lr)21IF?&3@zpx@(Ka?bc zz(^c&V^=b=|J;hℑfZP?E_D^ImGO+`9|#{*EPZX z4}x0#j~}x|qVG5bjO>z}`_j#F3&4ZHP36yZ@~&JfGkeXExAqYni2ztH=8Z3#wf2{@ zYM+1dX&>Yhge1rKZ>DdUZXBpvNw0FxJJrj@GmP!k>wz=I*y08SAp@_R`k&N`J7PFd zh!|3J+Mg^8mb-TfjxF+vqUEH1UezWAO<2wuaAGML{&(WL@fseQX{}#53|jZDz;p(= z5uyF)g~=p0B6~jg|22_CUmhl3opWEE9qTiN(~MvBNFAI2*v6d;1XfD~i9!xH5RBb_ z5scYptt&wY80ZY4<;*UA)|`92bIbw~+9Du{OJSp%1DWu?V3~Sz&biq88I1c>K3UP* z)-~joH!v_r6~;a)aYm(v$bNOFmh}whFj{QGB2#&JaiUR>sT7aDz0@7Hi$h7@qiste zb0lB;9ONetxiNK;)F?JSW~3Cd;ad5Yl$)D9OCp^awS~11oiT3Zc}*Ov%W&Z zHv=d2Q*O5g>UZ0s_Rr$WNG2Ney|2TXIAx6nesMkdEcQIB?DA!P+alF*L0^8{P@NQi zncXFAV#N;Rp0n+fLL&z2$@SWsz8=_vN00e)MSsXnx_p6A@LHBRII5gG8&*-}7#O1T zd@_Cgz;MVhBo2}x#|RB4$LI8#8`ghrx@Qs&*Cs=@_!Xi;$0%ge(kozg?sVC=gxR)u$zqM8+T|)@#;xe5TUHr@`d)8qzN@BLev$7H z#3_~l`r!S)>b70lLWlD!W2XEus`2`j$cY5i*^m0(&FR}-O?`Fft~>S~#y=$mq$J7w+5?a_U*A5d?e0JAje)uyW})(Ki)A>5$T^E4u} z44t=+S@rj>c?%~_4QLWaRmevrou|Ezhij**oAfeCAvgK!{Cw^OpmN&H~|!PhWa{NtmDK&zm+57Mij z2ImRfbm3ZTa zhe_62frj(`iI)2Y`8}KQuhO!EiNd^Jy*xAJqi0HALA{q0)w(|5q(HNL#s2*MDKGd5^eE&(7+5&lU%9U(2ljilBj;*SuRiBiG$tHJctNJ>XIoB`zKFVk9t|jG7fvXBK$T=o4jE6 zE!wi(o~x3|&NL2R7j`m0Be`h8Jv3k3agsa}BpG+_Yk;~p=4R6sa)HeS3%q%cA2+3&a$aRnEcWM$sfAE_0v?#N6 zgbPQ7UYl}5(usm`$@*d1#ww3XcNA+ZM7&Did%$ge==?>g=*rKEyJ%i&M6sF_633>s z0=MO2gcR7jsQaiZ`M-Fa(iC{%-Q%2$luQS6C3-&p(}Z z8E^F*DOqOv5N_@tvoAEatZ$QDAFSRqhIa2R!gkJTp*N01u8gzP!Y$Bp82F_<%Sw~I zg|b8SPasf}BUBWWhjj**p#6k|BX4DR5P;i3BC!?B(`CwGl_D?02_e@K-jSXkxzeV{ z^k>$~toyT67>KF((Eu_wm1nm43nbI7rEXWASA>EoB7yfDm(#5i_)ZF3?U`>ejTe3e zwV*=2Z!%b=I2#i?v+~pn6e7egsDxxcWIm0%ML0|EnYlWQCuK1#TZm1;V{mA(AfG_^ zj#9wQy8oRUmR6bcBDHXz=cxlTWtsE(*ERL{2LvQVrc&*S34Ue1Z(Y9&TYp;Pm)l#A zHz?uMrKIrim?`%X=Zd8M-tx3MzfKkDyKSjOmV^`g+EQ;6u4yOQVg1<;f)JH6$Fciy z43u?-$Yn_q5lOXfvq$r7Po}DcB5`TqEs2r9w~I`VK};mjz8-(E4?M{P{2JBaP*>@Z z1QklR&#=?k1`4$^6V*QFyn&B4cg>_EmlpC|SgU8Kt2cE`z7k}^DDxyP>otcg1FCwq>3Ny z7rfs0+DM+3dz%Ae7M*Pos(aKVPrY(A>X2CFimYvRaXdNXb(fvECy2q?(s&S0uCK$E zffhv(bDys5aVSaf6M}J!xi-Yw8zP+#>I8HXx^UQTP0qgX(2p~+?}$(oJJ3$G*33ac zDjGI*4kt20#L+Q?Is;*CtzbO;sTMno0bGvitNq@JWxAXqVbg{pYu(#e- zU|8XN?G+7sW7~?&6hTGiD0!iB$R-g!S%f-X?V^FSx^%L;Qta@BP@HY9O=C2^T*OJ$ znMrDa)gf9>dbqVG={ci|&BETkR1~{<-&Zv@b(t1KQIsS!ftDr^ID|5cVU_&iH_b(J z*q6iWS~q#M+Oe_mmX4U>VU>bW^oc2v+fRuQi0FiMyMqYyuwgS~N{BL-5jAMr(5tXNP9uaw7V{P%6w5v(X$oW4euF>! z8DP)rvg18{d)?W1qlr`gM+gj~vbLMoC)eFuHjH6^F#b!3&)Lzr)PsY;hjFzr;Xfj} z=XtDp(3qiBg5SxP9$a}IQP=Sg&u?TKt|_XlWn#n?u*Y!|tsPuYmzRYHcfFDub-cJM z?|C2h*!yI=I@iKh;Dgm_?CeP8`j6??6@;&hSW4#bA&oMj8Ob@t>~p~^Atbvmzz`po zJBZ4o2q7Y5`eP1~NL9V?CodOA!^lo%yD09bNa~Awbe4UUE!Iy?!_ccUA5t{cmbIM< zHwKX4`t8;n{UWa$ls(V0u@#>b@|kaHF;jmP@N&A&H_?%`8u4M1ytqZcRHVzHn=YOI zHg0}6pK*F;_QZ2dt!95nJG=h%<#|rntp(?T)vK!q_w7;%A4MV`jRs*iR*skJ@9NrY zxrv!#F1Y+G8z?fB_jp?{I{6JxBA)-waO;J4wSPvTiK&rHRGzf_P@}*7c5!bss}l90 zB?0#&ySl38JsO!7%rIdjk?qI4dy6yh@-FxTg^lY&f5{P<3Hb$^)h@w;Qqjn?$sdTd zMqL|8Ma^2gE4Q=!G1|l4`d<6X9^~-tv0JrTYM!XsQgCVsN0PSQ>rmdLb%uF&4Jw`3J^&)Cf%tw?kJ+$ z8StxHTXrau3JAIIR+tWa;3ClC@^AxIu`T&@@bpw$a^XkxTxmOx@f#nJ{Nn4*t2LL% zP0PE4Xo~v&@1M>YD=weyB$evDC8;#0hui<+Orc}TK7^K;gbl|luN!YhX0Rqn^7rV} zkuYUaD}~@IXjDtKx>%+@P}bkdmyUg3jd%SpwnM!6Kzl6&c7ua3>skLM99GD#7~4VU z#H!u6!Qp(oj++o?^z75z$f#q!K^c27;a-<~S`6c^UY!SP3@>B$vjjiw`L;W{y^y=o zEdl7UQxFX_@sX5DCoEwP&%q&f;ZBfU{?Uu8Uz=U__NlPlDPNwXYw8u3HXIk1+~)%L zZ$mvvuqy9sOMplGmKFb;!qY>o`nKiJOh>J|>%p*oRx;$HNGG8V{5`oS>0*(|bM3Co zAG7D$QM73)Rj*e{jRHH++M$KP03TXrdx|xMlZvqIZAOd2kGtbjT3j%nWc6%=?Jo+C z%#1Siiu7uKnIbt%$;uhNl({40DG2YlPRP)EM~b)Pvf9G(qlt-pImNe@&_tr|N2|mW zVw_}0L$f=b{f9SqEJW-GL{;%fUVvWtFnFLG!fY_2axJsB7qTlpl_(6?H zw1~z$KE$Dp32y&m5jl1>M(V_&w#X!5p(`pR?n9|UR<0#Orw@gt-QtqC*x5X{8g~L# ziePqXc4Dmq)hHEq7moFU7;hc1GDayk`&yz?(#2XfEKWEw+&SXZu(tS zl(Na-4c-3y97IQte{f%x8MoNQrHcR^&0Pequ#+t^7)V<4x)SDNe(HB6U_t-1H<=x| z|BI1~ladr}S<1no`khWd&tMh7{!svOma7$Li%Z**Ki_J&vDBRpCZLE(B+s!wSG|57 ze4L|{9Nt61K6CYsB_4D!JrFcO-?~0!u~1A3jqlIM!G5-Lmm(gPBw`5WuGYFj-!l&9 zrOkv8tZmmv(uBRoAgs9yMa$O>TMWi!qhrqaX|rooL&N{IgYxZ3bR8uLSGet186p@^ zckE8p$_M;{>|8fT;?kLnYtPY&s~wpku17?%RyZQLu5+h0CHr66?VRplF zu_(tgT!=}Ahdy(Nvm)$6R1 zF6O$aq^-wEC!TY@+3+bZFF8h~FEGkCS{f2EF62xmact{~8Ilr(bI=V@C#bSOF;*3= zmTe3!ic%Pj9qw-lQAMTHHvLSy_N@K391yl`jCD@0Fvc|X)3QMI-gNH|mCC3+yRNNu zzbeyx*JezzwFBZl{^o$wXXi?g?dG?Lyb7o5x7`IzJOw1p+NmSgeA&7kJ^Ixd4iXi@ z4Rg+?vEn+U9cb?n$?w_R~Ce^r|gkcc^QT>``IUf#Ki`S zgBA7`8eF;e!JMNgw@w3vi%aQ9zOYmFaW5Jao<$!d>ch*MG;8dVBK~4lyO{F+!~SrJ zK^q;fUd^X8+_V+l+%C&TvzlCCp9R%@nu_(d#J%&bYU7HF<>!_uIGWpkGD(9vF$A!q|i$;{0bq=go2gRitEm#rsO^HOhwF?tM zSJX%CS5!h0i;dP@``qnL8<9(s3Y;Cp3{4l7x;u~^78{XKv3-*~=k`VL^lBQSz}#`X zgi`AAM3w6{t!isT4z5NfSU^6JHfxDx zRi0)>4d+-{YnjDasT&eRHp@H_hd)_rPLtQh1OAe&q$!lqX#rRJqpiflSXW*Gh}(jJ zQ(z^n*m1y}2Yfn;kTn{+b%v=L_o#z*YwtTH)MEO+>h+}8*2Z*dZQ&JB`MUnmZ4>lM zjU!kPEzf&X#Sc2F2`LQ_KMwOKwxd#)(i-GdFexq^<*@l|8)FKL!UALieMcQ~8`jJV zit5joQCgx|6|EG z#yaLp4nnI+R3J}vqo(t#&X4`{)85}G?@Y>jM_{>Z&-E0W{YbeN;bkns*+=zye%F{- z{-e@2=IL%^zqq7e+zjVon~LL|v}|fld6V$%L99WA(<$G!zDN|KE*3Z-96|*XxJ_WD zd>2p(xtFs-;p9bTy^*uclD3zt<%%g1G$atHbmD`$a?wN0Z7>#ZK2;+;l&>EJnpYWa zP*15#9|vAPe&*+1Pb2Cqz4q&SKNy#%fA;&sF;tIKG-tu!u4@?sDxU_=mdXzREzJnf zrh`@qM$)2#bZ4meO6GUoIGq@0x1!ES=yShi}C9W2O4+ldBc60c^HVTr64X<`+O*G1i*pMz$Zjd%f~ z>&{rjmgUN;nc-s9n=qeSj~c+*KYzJBQRyFmK{N+YX6+mtFn3fBV!^aLf^6m__W__b z+L)@P0*Jt>k^pR!+U;;H5WF{o70feC$TzOR%%{IsH)Hei>U=w1^17a=0{k-c2HXE@ zlyyHvYYeZktaLaz0l;5Z7aFgyH%6@m6SjMGr(hyF4Gc9`xF2(Yq1vtcN7-_ob(#cb5iT^ zG6X=5j^Wf61hd?fl9vZW0BRr@zaWhQip_dk*N5{90a@C64o%fY+|3*Erpp_&(+)iVY~E5#*nll?R%uxx-h{cVBt-l8 zd=6F6dMp6YIT&<*5@5YH{k(TGyj#eUhz~`-KjBbs-kSya}vfq}R{0_1EC^6tGvD?*F96(wl z5&xaEFdt?574uVSeBbDt`t_xg$FyA)PR&pGs^dLky*Win88p|1zaZl_vWnWu8Y(3E z$)pf8RBCB>(`oCuN(hah@vkXepE(>aCm;IG`16aRFZLq)iRJWX(Qk3AZzB)a(`*9w zSf{fE2oVusOHYaBRn}g+9DaM2Ey#m z(-l1|26#Q+`wD>;pa(G0=BJArNkB@*0P!MT$d)!wuNKp5aOo~TxKtLiPw{ZcGEky` zAQ%Awg9XF)Mog)ebm_P0^Z{6MdKmy7HJ-;1DP1lq5=ho~cMjoty2lKN(ZT@42PQl~ z6_z%fdHfOi)f+67aVjAjR$CecaC7h(Ww94m0WdT&U^7EmVq;cHguPh-q$o?5*8nv` zgSHL;cVr89y?NY#OU6Zg17o`+{3w3vfu3WdA8H7lWZjvl6a~iO23}e)J|Z#heD~{Y z_c%~>Ga%7>f-+Ve+3+PtNTm$syM3{#DI^a$ND-^^Zr1Bpy8&*V}3yL zGGx|>(8|)y;}H3kv)(y17ooQz1hR{YpNQ$<*yh8eyI7>HNi^g5jO@yvy@FRN_vhBW zMr4hAr4bQ&{b+Fx(U5uRV<6`r7q%`Q!}Q2Q&0<*ZjV7PogWzw2mN(P_Q(?`N0v1`Q zRAg^2Iuu790Dda1h0W=D{X;Oxek}{IGKd4r4Gnaj5Smxun~3_|*?$7)4l)hq0eZt) z*hwUf=z9$&-Ky8@HQ`uFUWZFa?@R}?)h!t0(J6!-n8>G^Zy+)jI^q->ajY$c?+4H# z#Vx>3QqpB5T_VcldPDVLvpT6Rk$OZ;tZ8wyBz`Du@$}7okC4!orvVbhAoZ+ArRTYz z#c*Ck$*TeO)URgSfbFm2MYFI{FHUn0dcy_9?MpD9ff;@O;q3J4E0c;5WrE?xFWp8a z@+n$xKi}K7U72b{J#{pdTK39_p|`QZ-k74qD2|if#*OHQ+nAQDh>))iFtzPSY(CGl z;5WGz_TTHOz)cB9PePK+4+oCWe-;wriw0d{y&G!vQWx|Hw3jdj$dy0O7`(17 zHJ`;2JbU&jyc&-#J37CsUcG4_`5gbKjc#|Xo2sSbGu^P@o9_jMvq_>1-bWi(1~QiU zjiF104Des<+5l5y^Y{+rhVaf28TP2n}WgyaD5XV{6uP0x=uJHqCOpy;eO&?jIM z20z;6;(M8gO~u}zmw3N?q8!Wg^u72M!-ZYUcM@>d9y?LuK@Jbpt|zjdGsdkb8>F)Y z32NSuNz{G9f_qRE%q=bk0@cR)!0-y4!5JWfVCt<6<%R*|5J*1lGHop@w!*@MQ3bse zx$yCc?FDx;4x@C44+z}8t32k!4%Z`gJ4{SUiZjQCWVQt{m~#Lsrx(MT_rDMwrJxS= z&<3_(%rnTfPxHQdf~Ov=_ii!Sdv+$M5)-PYd!>EJbzNzW19jhQC?TjNV0rO;Lb4Lo z-U!if#mEvft%XCVHxzE>7_;jNP^PT%^EtSYOwaiOk8;$-9=S^KwXRKE?2u2qdO zMWeWDd(mV@YGR%Q#sQ$-3>8nZF3JZFH?IztJJeVBz5K=~Hh!(eg^)o6=SVmLniWVH zor*G9f2J}xTqSA-`kLlz(Rgazmjm%GujQbHqJG_D=0$#NMgKT6ZauV-DO%VpQdD0f zg1VMWhu-4t(MN85D8_LBg_{~+m-u$oDMRn5w=zqg$U_!*tEudwsJmtPhJmsrguo(4 z$qTVGye;m29YpyAiXNOL2syrRL4PY~TliXcTB24vZl{0>Gn0 zh)bbiFubbUAa_04F%;-sAuyc%ir1xXm^EBv+WBiDvrTEd!q-qghEWbmOV;xo2TDHo zMD*a-t3b#1AhNqcc$tTB4I*HnopNF z4qHMGwRyATEKN9IsJ3}qk524ni6=~)^SBC(hMJG4N^blIm53m$-%iyu2^^oiqW0dS zKyUKU*{!WD9L_Ry(nJvu`t{af!coi?G9#X2qLeu87CrmiUt_D!S`b1*l|Nkr{YW+R zorqHH0w+Ru5>%VCNb=&up5MJl6wLLDTl`#}>ijV;8)x?AVFfR10(XjE9D(lD!#aHl zIxbQb_xb|WFP?J0SYM2R!D4AQul^xSsY)?1v7Lo>qBzE;CRB#zqWHe9$qy%s3A4J` zi-z$yw*zL4F4(VtLd(`oAa{`JZ3A#Qzi^Ew2smDL#dlafu9#^$0QSJ5u|0o9M~AOG zFibvZgJ)KMf649+s~eOYx0R+d-nF}~ud|7JZVYMP(%7uEQ?`r?_sSM)885F25YB#gnEbPHDAdX7wSu=DccT@wYGhqPoCC5%CxwQGT!P~fwXc=!= z5Ez?AKxLXOIYI0v%!7@?NY1O7@>$rfhp-GoW@EUWP10bjaqNNnF6M`4R6!`33NsC7 z!cS5}gk_brQypNkhnI{GVVV-2H}gPq(x%;VV=Q z87arING4Q4y)*`RiLgCm(L^Rx7( z^2kKtjGB8Kcqw9!N?4!c_w<(4_3GEPbXOlo-?hIhjZP=gw}mn^WsB^N7r0;)i3K1- zN~OpvOF5ctj1ZxV)>ln|SZfd=m$N-h@{VQ%14El_8qU3wI|uyrPT%Qj#SZFrIxBM# z;U6O!LUY&7^W}P}q-shY=S$W)1IJ6Xx_Q^1thv0TNr&0y)}vbtYUZ?(b-g8IMxw*tnM05Vb!vr*t>0$w>}9mq_LrY^uTP^NuameUwb*4IgIFQ)QbNvw%9Bb zw_(F~J4ci5>Mr}~7$I~*W!$iY>Mo*A@p8X{BJ)x*3&HKQwTnbvoU1XXhX%|i1B=y` zY~Z~Ai13=Cy78B>1PUJ8hqLOa6r2!`vySVpr)1<=2E2SZzA^}BJ5x^fQ`RqY6gYP? zl3XA;8N)bYn5MecC?SKw`^>~WdFD+S3)0P`yIv`zzPu}u_DL9`m3AB?b6);g$Tr)= z(=THB@r>729B#eZo+m4lB#%y&Gx94{CYg}}Nwhb}AbM%jPaQFk`Bk44n&u-RL5|p8 z+IaFOdxP*WGJ!Fd-;CKc{-~RYaF7E(SbaPzP6tqhM}_;UrI#9_;YR-#h&O z%0EgW!{1xXAe9p`&n8OL`dWmsL0AhmCxk1k_nKNIB~!|*5qGu8*6B!mNiHe12K@#N zzdT_ks}SVCFk^Y+4RgXpE!Q8%P4Yj-=C;x`oIGwza1xFw++8;2g~P)2Tqv!3NxM;? zR?piTL?X!-RBl6Xs2X!&_P!*@%b(O0*MYTbUTm<0N($|d+Q*j+1?k2<)>iBVMj0xy>O?RMSo>|Fdw0A1gY6)? zW31m1kut=rbF;q5x!P$4bv2}`G1oR=Q98U^3j;cuudX0qDB=Ta`hlbEA zVQ??>rE;_iMBpky!sI`*i&;)C(>1<4DJD31JutS7=NmT@RhY|(3bj)2NCg=nUo2sR z3D1}E9lTpcUzHgqC5^3If)uO;Kn zUek__=ilX&ZLEAqyQo>{Kmg;t`Dxei$S?`8w_L7lA7$;7;xKJYI|6oo{8$W!HgpdV z(HTq2^&V}83tCr*gDr>!3iW1#A0xw6s_({8HVV75|JjMy+)Ia)H4N!++~K?-Z*O$? zh)8y0K1%_i$EM$)ssXe4C_0B^jVBf`+sv~P=9L_p&E1_?bQ#jPB&m;Yqe2gq8UNbj z$L78yIIx{rPMy#1;{x1sl(Oj079}RJ(CLKLd@yZg3=kJ@?-dx;wa#f`1hpc!DnG1W z<)2C9Wz^}6dqHZVJ#TS~BZfCu>c*!tMsqN(ETgk>T9;Fj7o*8GiKJ)vZMwlS8RC=>nFaw$3 zR?*`PnQOKezRGlkmm~HxA2PouU=sIyLOo%?#$0^;xEIRVUGtu;417~3`(N@!w=PIu zt*{=}tB8unoM9~6e0OtrK00poZsRT2DA{dNpH0yL)zr)3OR_IO3d1mrSaeADsl^1_ zm=k|}_V-I;LZE{$&JL~FK;Hez6}(k`VS_xnCC5i1)D88A1_(U_3RJ?gXzT6ZncP4* zsxL_f35ol3EYN>TQC}NEPbsnB0R^oN!p|G@P*I>8>WT(3;Myb-V5K*dr}}^&WU$`s zEE0qPUZ|r)T%d8=m&Bcf2u1+wmo#hfW8L9oy?HY0e||DJc)b*b^q3x4;JxD|LvK=X z90i0;l%0#xx7|sA0@dL4y-@{@vK%2u(|*5SEB*g}t^cdLb%$GT_wQbS|K|lNa-WvR zp3r$i@|MS0$YxvQdI8h3ZnadqA&!UfJS&5l7??}l#!Fum3pv3>8Um=E4TFOuJv}{V z5foOl0HM(*#?G)r+~0QC&e81arBC=GE~U+|mH$>Vi{bcm$~4{hQTutbRjt3}w6F{5 zOfZk#<_Jm;>f`BiDFfyTMq77LUy|`szRa%~dJa@M>VA$Sm)$0z+rQlVBd42$H7fVw zPxPKws?E7-tEmtLoKCifb_^xNU2$0AZiNV6z={Os_A zT%6La%+{>^>U85hSeoJQrAcAWQ)9&ibf`Mdo3kC;hrBA4weh4w(EgKx7lZhAw)W*G zxgQx>o*y;-gi>lM-!6}!+U12R9#{?jbW7Urzfthbe=Ht4PC-*&zP0GF(K^!~>%@J$ zO642EJ3BUdWB5qi<%X_+;ybeZniJR{KeH#(6+@&g>J&Rctjpi!7U69^gq@G4Eh?-5 zG*}H#P``Hr4R*?4uVj@pfzcED*=X<;G2H7ix|l*@vIKOV7%;=6EH%JeR4@G0*EC=@ zz;W3c55AqzW@H(RdnQAQ^+ENw&6`Ka^nT#v801b#x>0=h|B@>uO5DA7$6LQR(E4ZHCz2q*eWYm*v6H?=5KUx4)~5_0CCH5IPv*a@aXd`$uF<=vkP z?U!C#I(5m}0&R&0oPRHzT#LrF0@X|h#)>Lpvi!e1mhS=Q6rT&70-dITSr9=RY5j@> z5yUiA6Y!0DXz;}sYeN1#F^tzDUGqnzU)(djk&|Ubtiui$jelKquRygOH4jGt6Ap7i zT3s17gY7mAkqmhP+q^<qJt($x}BRI)&Lb=%Sip&9UN8xzLKDZ(1y@7BKG&>!0Q=$m~d0ODI6jP zC3#;G#zA83Blh1#;<|OJKfXE(C?v_F4cWKQ7oS5V8}-B>Xi9uopD;0@H-W@sB_-DP z1MLj8WWAwYqrisV_%cWkC{4n4hCYx)>*${Z2)Mf1YJ3HejA)t$h=$w5FB(HWt@5I|$v3}AR?mzL1X%*-mPsz6bx=MfxR ze)W=_%%N}wCSFC|YAhR=!d!V&o!h8d1AAf%M zNM$x9SG&Ava&pq0fDZ0^I1n<-1E`WNRgEteuii{I-Y$vm>&jHO?GFtVcP9px zQI7N4DdM-s`yY!F_hGvjY@Zn9+|}#I03zkrFXF-Iq#Or9{q}>#N1dWpkSS8ObGsIbe$=Q2%p+zrouU~`5`HS$K9yh@2eDxq39ZsGh)(l5S(c-J-g+>&nA9jb}+@e0D7Lvs{ z&jnsnenJV467XI;wJ0xZ_-;bPe;&JFSy)1`3-?-g;@gD7^^8Y#9#f1v=Lf4#2v~4I zP2T)CtLDu$?f%iGRJTqgzOaX@{^pxnT8PHn*-WGJcmE(C{rCMo#eY4z6?}>+@f%_|Vw4 z$cRUA?4#oBc=|OD6C-Ktjep=XMN$lCxa;Nb|NK(H?@=-7Z2iiB7=EBvib>EED3xDi zdfC{40AC{VzA@C9NU_Gu{=Lhhdq5nhP8wfbT$hFQd=h|Exok&%60kh`^b>$nw*Z7> z`{f>*mMA*?ODj-+SQ*Ow>>_^UzVIW4*KtYlhQb{bE}(Y1^HGvxd+TXy_|jg0)mqH+ z?3X^(k!5Q*#Z9R%_TDqhqqo;DEipDKs~r~QZ(yPU!D;#6`|Rbc>7*?RdsQK<5V(Dm;fLrVScFZLu z9L1SNP|xXrRn}@Opt#&N!{nV6YcyvWfomasSK#7;Q7d8=^48KW3bOcqcIOO)I zaj5eYgH&I-ogwz|UK1!5#cXvpK{>ZR2{>jgGiB+|Zj(S&zbWOW&I3r}k&;(LpaktN zU^$cuc$npjKvMUvVN*Z|FD5-9T?1fb^+wF= zN!lpH4W~6IRGs?AfpXw)Ty?-05>5XI$!em4fP`JGq0aMsr~dkqJ1gyyT?gDxCOV*z z2A*0ZWZWnM-T$ikq#$tY(SqApltQv)UY7;mEHxIMCXG=+$s^3 z6wACmgyY%qQNyR-7=dY`lt@ksYHPE;$mSw0ToVwrhH*T)KI>BlwT@((&QAhZzi7`j z%unnxm7{MZDSVKgM?&@JuGx3lPH*T-s}Q$}KPv*=2l#y*pl^#;z;fU<-?FRo$ZkX4&>R;xrM z88k?|e^_mVz-Lrt+LpkClb_)8HV`l4i9-NLzO*f!iVlE}UYs2%G7YQh0C`4UPRnqD zU4~2;DW*eWVj_=0!!3X}KS2c@$pdu50Eb@krrb5NayE)IBGsKrT)Xu|iKJDv&&z1As`ZbaX|Pfpp&Q;;p}K)>AJ%P!4f zn)sl@KeoWpOfgHBO5I^WW#)1I?amXeQWYU(S`-%SjlRJgU%hWng0MSYyb!(tu{y%` z9L{Zyu^cYQQ6P)6mCEl6u^3dNV^{u^c{N_FnDB5D5k>l}vEvjthC{&`k3U<0_M7MQ zynA8Kv<()Acjq5f53CKo7&P0M%B^7v5JHk4<?di0FL{+A1xBPIOGm4^~1EsfxiUMIRN~76QG_8zo97wdFLM~l9KG|*_n@j=IPf* zb|vr|1K{qdf~JZ0063pL3jYyYv-oB9HVX5-QMm|8bzd^;pYNo72ta(#yQ=Z5i}EH1 zAVKgSE~6-%$AjH-d#ZzK2lUNjLLHctBiCL?EDN7-V3S$uW#Q}9d4{oCkI}z^!zu*L zqxYmy?m$0Kix96k>Fg~^T|2GwoaNW=Y#drTaRzx1K-0@DgDDQzAz4O-=D()FPzf|o z$9JqVk(B_z?gf9#kh}9>9-Yr-u`|QGHN4L2s+5PvO}S^Xaz00*mae81C0Ve}Id|_9 z&+y{U%X3WVr>+G5_!vgxjgJ$7o*%gxduJLkp-9EYea%vh9c)5E7w=hP>aT$Dytzqg z27ed(W;RBPE1Wk!$-hhydsC!!Qf@Jb3Ao%<^)UQUYVUeq>;}uLu(TWib_&2Sq2}|b zx5NJq9zKf_H7f57O1ugY!()_*Dkp_EnxNU`OX^&6Fkb4%AnO%fh5%wN9dbaua8&og z?HQ;hz6C7;#-OhLz-5ctX1XqS4ZmQe)=;z5f`pJo2`(nE!T*TGs3m0WIK`*=syOAQ z3Fc5PoILHJ0?5!HeDoo_hBsN9vj73N$yX)f+FMEn2+}xg_?@I@0SSSP!4vk%fMhECH4KJFoo4;^;;A?K zk?TQkM7yYS7Bxu|W^-w_u<&;~R??t=N$)EiUp0fHo{2gdRyaOu#r*Ik0i#D?e*e+1 zLEsyvz=1C>Qrq@c78NGE_Q37k>~^q!{6I30C2+l|;c$3`cp% z4QALdp-R=&jK)m?Kru-0ckz&r!(VTXsXuB3HEuO?5-Iqe=V(M!wEf<~Q=M8j^}wK0 zY-D3Pj5~my1G@8Y$6zFm^d(|~#1Hx14hDtdlGy`N>j8)x;8+U4B2#L_v6_ zVb8vN*QmnIfd0VaalZF*x`fQ} z)@?nr`R^~{>nzD4k5)4tW_88$Vlu_+&lkQamXFo{mHD>gGnYX1f9R0@zzOs2@KqewM=-oFyHiS+_AqNZe z83ZzDb?v(DFX4c0ADkul##pJsBNT60gYQSo8#@>bLN65onTXVFq>7r!*4 zE~(Yg;%_cJm@a7RUlU$FeBXl6aBg!^7z$_1@_9q)6sF zSfTTvu=>VrBSZN#T$;Ztl-Jk_f_~sEbrCW*L5?sxL(grUkGW7uojQBEW5KA00XtXa zWy$lEWa6#4El$es%BuCtkoDY^4NrMI$0vLB%E|61={1gU)w!GwNV}e%ai$ZS`|m@} z7_5tWt1lBYbtcTxRAcmaEYqw6^<@nZFTW$f&42d_mra2504F;Itc}2b!yHy~JR1=D*W`;=c44jM!}Z7c}LDkVaS( zWpd&@OVAst1J{i@*Z%1v#2B>QE7;BDBbS9)p--OUKImwq7Q{YImelCh5*E$TNoaRk zOMN^9XWg1jBfa8KTw#&!?<9iNuYNb=^LS`zfc+-pTafn!DGv1FN>m`7`^kD2 z#i8-m8A1Zzf<2rr%Bd|X$J0f4a=preDqS;G)Eqq6>(%+;*B&rf6d~Hn8L5_AFFbI0^^HMS#ne!Ic#j7O*9-<@7tSn~p-z`&;H)<2 z;!vM8^eX3ixhE>lH<6|=UKKw@)PyTlU9rjI>#s);lt2P{5K&SA`4F2ew)ow#Aw<4@ zJ+zWLfFt|JmYxx;+BP^*K-t?2PmIC(}qW8C-kzb+je{s+o*GO*wHVlAQ9 zpBNcZC(#EkZpl+1nNF-rc64NX(BdfjF}$q^7a45+Rc=12aIUL#{+R5$fylkS z^(_pYj@dU&b&mvUHNMuK!drW@quwRzU6ZcKaHKP7(8HF@zeWU z@ivP$WF5TkFktx7=Z}RCxIKJNUmLWi<8d~XlCe=xVUP{|J{ZgpQ5Oh@cm3B(%z(4B zV0~cvBW;XhD68JjPh93B7qj?DZX8imLgHY3BXF&sdrwnuRIuki<7>yd^&5?fwm4>K zk(*7H8ZN4(ZkHHm zu=NA1g9m7}>vBm)sxc}|FMiJ76z$4z80|evZE&ZcKYrJ`G!aiEi%Wi`04`z_XLM58 z;36hMoG$3|_ab)o5=H)8axX*0aPP{Cudeyr`Fxecw(Sd|XxORk%XG1Ezx~V`{?N!LQY$sy@(E50lMm;34n&NBOKEm> zeVGU;sCJ1bT;;*u-;-h8IU-V7pDLw?y<4x#db4_j!R{We@7rgN-!83x_qhodHcQM1fI~JAovC^i4c_598Q_q^TOTr7+!#~*B#8zS7 zJ`1!=Gd3Xn*mj|srd-(|fpW<%SHbC4qS0W1zT#G4jmOX_8M?`|NY^{`622{#78Wke z3;GvNLvsuR&kNktJr~MQSU{tU1C!DP+n1=*oO&p}-*pnFbD9+5TApnr8O%02?{Zf$@74CgcP5M)k__&T zM+$f9wzyhQNH_}7kyx+7aAr9B@ZmXrPg_aKgMc*}6hqXY?vrcWuw3n{uZwXQDic8!_{ktLYvv*>ZzK-^eZA!>o zCl}$VHRvSj*f8^C3)l+5+wVbAIKHFfXtj_0cyAjO zKrOO_yVT^TRUrk-^rh)eDUQ=71xbR6v;}Rta_s^5<`0TEa${!Eo!we zujxJSNRm5Y=T|L~24^qw6$2c0JC=V(eypkAr1e>?b!M6i5A5E>YYSG3j%{?X6}+%~ zN*jc;i48F(z6-PutGP;%?_#E1v?D3En>4 zT5poyOs7x^Tsu0yUx!8pb}2nQJQB-)&eHMBnr@vxp3}5zVVGThq!};XCx(j4Ej5=) zBv3LYWEs#Xmn>;^>lOoRRH4mn=k`i`{G9y@OR!SCu{_T}2Uly=@N~hmH$5Kxb#>Nw zG_q@xPxg^8E_tmZ^s8#dVAxBiOX3+U-A}W6ZP8E8d@enFTSC7ewp|U=`bWy26McYe3cX_8G&)6i?pgqCuT_0uozV z^{<3XGW0g(kwsDT4)*~tk-JYsvyle&;HmpZa(Q=$P=yVEX0jr#0H|!gy*=*T8`sW~=X!16K!_Nu5p; zXbk5so=;q9`^6WvJ7((r`00ARCsa)>oG<#$=zNigu2E0B+9=!o@2Y6WddDd z1}Ys^6xa})4jXarLfA>(Gp)Ziiy42U(>9}~tVg*K$iBb+%Sas=VH5z2!^mC-2i=%) z<^m|oZGmcG&QX^s7DX^NU`1dFsJ;Ua_3k5JWwYe`ly}% z)o%CX(m+EdZs-9dQp1IupBMl{Mdto2toW`r4_?cz7awRv1`+ia8=cq~@W@iVBeK!< zT? ztLlBrvKk!kS#ZDpRv_#FuBj)S_1rE>tBU4xm&}>1)|o%@K8zJ8V{c=)Xs=H5rc!wK z)*wIt;nw|`zDkbNR{y$^H|IkNhin3KV-x2q^3$|$&Yjt)l8}J-t5VKe^C}N%5}l@7 zG;WWJDQJ_LRJ@t@@q#g-l_1tR64|i4MpW1-LBY?X+e7 z4`*`nzgo?f`v$3CCoYE=Z`v|E7{OTf#m%ChxfU`F6!3~jtKWPW7RsR**q4eYRbis@y1t2H1IqnS6-dyZoJByai0mx5|SvCCbw3Thg3yD zYRxxN4;0C@ULK#H-T&@qnf{^RxxdihwQE#sHcUHKqDcTU;aba@?ux2oY^N*esf0bZ zbfdIkG9mazPqVJgJr#K<(0N3~l7%5@ z+W-8&(UHx3q;l9v0b1%-W*ilYIF{s&RF5+qrS?6pGg3Kq5}zyVmR)&+%G@~BeDE+rRll)v9>SKJngjMM*#!f z#>)q2d(oa*hf$l{+wbLoi-@v?ZWg=t+EoEt7?H1B5U5i8HDSalmt zA81;fw6Aj`!nwi}kHIycboUG%O+jRNY2w|T1kT>M7(Tt`eVwMs%Jo#DYS6i8XDd0F zD~O=rU74?U;q$)XBj+;9eAKZ{Q>(S?rUJMuF*uasdT0ci5rm+^15A}}^PI!pFY~Jf zgn|;=<;(o%_<~BnSw1bN%Af@SSw1TZtt8-pA*oyz zwoXim4yisFREE9qp5?LPcSPw;;gwmevOK&PhlJla7@c>g@W!hfDHKBsc-~`d@LB}q zmK!Y(Hr)qxeqnLFyW;)>tm+rmdkl|(n`G5{OrG!7#m>?8R2&FR_xnC(&;r~?dJUme z5NLC;mxnFm1eHKgmY&|9CAsv$R!&VhSJ&om;>d)G84DTj1dreK>HD~yvpi97)4CnN zB!M?SYS((f+pk2suMM%vVO@L`p?cTSwl{HFMaH0=s;aNdvcpl<0C~tj>iVVo?hLn6 z<~rzY+8r#3*_I)bumliNy}Mj$K@7k=cN5d7vC-9O^dtl2ObI_E^q(Z0K)x>lieAdq zR+@xNs9KmoiU>d}pE1R*0nyu`WI{TY(EjEX9Z=o-z6)3PoT z3Sg;X?W^1K#qp+N=KWtCW9c0KCCoc(^kS?*qYG3!!u}N^T=h zf07D7-IU;cB2W^7JTN4k`r&nM66I}d z7~=UWEd<`JN)K(9cLiG}x;+i4|Ikv(sv9k$KyD@CqWJL9o3be4;dAf1oy)9Z^>L?t zES#(F@6v@cUfSsB!$xd|X%8OAiYL*5wmsgnPsU}7n1>!NG|{0M z=ZWWn%hu3Cx8n_DoK^ZI`Lvg6URcYgRl-BFtvF9^lwO#RJYC0rwgwyYybGt~AE_@> zNa1M*)Z^0c-{xJNv6&mKE;tau)5OE3hiRddajq?wKj8cF70`dcV^ynNn_miQ|lPDWs?} zB>p>^K#EL=R=8QZp?Qc7*+JC513|;8Vd0=VM}hTl*)Kq!lgwox5H-smGb)N&1SOGI zTtG`ukc23p4TSm@@%(HeX3;8`T{nV*YCRj{VD``kVm@MJsx4SfP}WP)w?$_PpuQ^s z)C*IeJUFa26_2D0ZZ7e?curryf!0%bw9<|luP^-uDFgXNL%CFkACRLux1~p9Fw05S zQ)NGD=8O8?=pm823q+*H-a1R1frGfj?r#!DCyLY{`=G|JOX@BC$-t0R9wbJObNUaW z3PWlK=u2(@X8R}kq+qDGP3c7|dT*gh{@Ah6+0JwiZ7cyMtnjd{h~1g2 zcVF4AhX#`aKN+?$$-Y{w=cNj#4EeO&>ehzQvO1S0&}b8qj_WSD)nFhy`B}Xznpc6G z=&Gvd{p+>IFOU*YutbY$GtPG>G1|Jf8u&rG4c}=mT!ey4DSEAP=(TuRZ?Vfhykmka z$L!Cfc7!UTRsT^CQXEMoH4n0$MII|7K`FD`mnOfj^S`r=0#^x`Y2eE%&q4wwH0}U2 zZ|?P0e@s;2Hgp=DDR#F7(=1-;*Rks>fzd7Pt)LY+?w76K%V(Dh>-k31d4Ab+@W-yui zwHUggA2s$Cy&)Bb5WSH5$;Ys;u%qgu^OV4*eJt)%bcjyV2ndm`YUm8;zP({z;l&Ey z))mJq>%~>AUUrm);L%B|E*tlytycO4QlDu~=?hz4>@aCol8drz9CU&yMmV;}DE%n+E!OHL>` zQ9L#WRn+tvydwta1Yze}%BgylJ>j!e(_eE8=-um`;cf_jHZCi0Zo5Ahh&Vob8PDL` zkib?9W9(lAP805&rrCDDltrcSW>bGDfzAH9n}9D*)|b08lJUaz;D&#q+9pDbf@i7! z`kc>oUG5_lc6G#j7UI{*;^4C|l2u`sjRVA}bAO3p~sB5r&UKlksec|fH3700jk53YvR3>c#>S6fftMi4`;;>%;cyj6 zqJHZKy(F&amSOkj=TQjBA040-7Od9Y~{yEg?&evpnuNtxJYw@@%q!Uc)&i5w}Y&S1$nnKQ4ptjs4ueDOl%o#lb3<&pr5}I_eH- z-Bl2pplx-V)|O4aVgh=s@m$S&jv&i&&M9%(bhKS zzgD~^Z*SQcU=3ei6$Q5gMS5lj_Di*}*?x|PA^PRAHS!v{m=X2d>>(ip{sb}3dk;r-*SWn(CBG9A|KuV;N>k$)bG7`L8sN`zPeZ* ztmQ=7|NJVIvo}sOZY6KXqFFTj&y^cR-*ps~;Y`IkLkt@mO0{KJJ4FM#%^iZglS_Nn z=lmg<3-!AFvRI^=z=vO7K+Mklp>dq~0wpRjt7r#C6XU@pqm#wWLKTI>?G5M5B=&;Q z6u4|Oj}mm+%pJtvESM~d$2(?&yRM)qz0Fub!3(z+cP|~K%@g6SFx=fhiFL1Uu%!fq zU<7cBnT65Zk7r%vFmTXSSG&%xey_A@;J(Nf#OIvFhAprdl3vYSF7uI502g>kn zNAL9Qw@z9?cRCQTii=C)eji(L%x}8dhp-wkFUI5cG4=$1;b=1>k$Cgb$scYrk?@?{ z*k!f)nCjmUPuESQUeZFX;qxlg+U1=Oi#ONCzEB(Y@53RC2HpU&-9GxDxEJprYDKCG zPF8LUoLL!qgiV*T-#d>JE(9IkU9uVm>L;T;)4923o>@4*sc2U!0h=+74+4tr7PIH& zK`)sQ*NK^#e%}-iyN2NoIFAsht>8DA*c;iufMGXqo=hDeORdo7MpvM}d8%1=!S8mV z&oYqsO8dh$SXD^mjVFn&-QsM@yT8z*E2UYu{*0oI%XtZ>{{CILsZnQ6Anp7+{0p~b zrdFroCqfzojP<@kZ(*|;t`pl)@`Sqv-3$0%kpC9rP(69Q*jOuQ_hdz~+?v|`bjbBWDbO_!9Px(UO)YI1NK@=G zCvooWVdPT&-$TRtd)p{_tUkSVgt9>-*G3Z386u@e^oz;?*y2HptvGsf+3+x1 zJVkbX++W=sYHtkrvZD@al-~WG)$FToWo0sFHFvCrQjVjC@04X~(;F@9X+ z%SiYW?Gh@xLmf8y3LAyt{rJLu5-wvrbV&Go4kV-fdC5>-2cJ}bWh~)_3bo&(@y%yK@nuN7Axcmgz%=I3;!r5cSd$ zBMLG->1Ly>gg`y&y{9dF5_|FeloTEK6&W{eh@FY&twZeIS*u^1Sn%6r{3_OgL~}qQ zSh?PkS7&i25UGRX)+bcvVFN|&I^5g9-fvcbfW8v_%n=u82MKsTNMv~OD}h>NYq```gQ5+IoSp-UkaxYUZvewUz!wUNUv zUt+i1Y7sOf(UMBoiXlWvKE=+OsNKcE9Nzs|*f213@QXM&$~@zNy_@mzA$Tv)9)T#Vci?2ZNFPW%03iSLEF6m54~pAyRSbBt!4`5dmeXr(`W zN>`xBxl9N+)HCidI*O&gaOpLX94aSuep<5SfhB9H4Hsuivd@mgFG46-{6!yRCrX5NAtBek|8DpC z9FG^ue9D;<27h;J^`~_TRs-_!3!0be>ND$-Nhy&96EYeVmh; zyFEjxrhqd|yRqDIROk*+-B^GMZBC9F&_`=nzWawq{ij(4D1;b`P?o=h*5YZu>%#VS z%Jzq>a_SQ;DjGJk9i4vVA2rC=jOsU!31sbe@Ekp03jw1--kg!aT;a*?}%x^ zx3Uja@aOaL|H-kkdgYa$!a*O(IUYB|)eO#WlRwymX8syiP2Z8ta{v=AP)(1Db=uK< z3=LIv@U?7ynFE0&LAYgcJa{!F-4in8yc^4MTCV6K0IQD^dA<7)ZfySz?Vn>c~o21fZh%nHJl=fJu0L5n<`_& zi`O1u3`@Y*7h}CYM-T#K1;ZvI0|=`iXcaCnDD&EKUFs*O<@pE0iD|fH8ZjbF#avTP zCxVawf)aeR-7~^VSIl62+b_Yv9BEUva0&WV9xWo@)3bGW9}h)ho?cwKabN zFtYXcMjJjJ#7H0)96$kkl414_GMOH4U?ymAgLIN2j5t8X*;=K;|6Z(``%zFd%~-$> zMvGyeL)tm@Qpx=w)-O1>M~q~C1?sTaqRhEg^wwnSbUp(Xp_$Xd{*(TTrHm@ zF{QQQwo9s3K?}X+(Z4?lXF^qgs>3q^8T!HOr{kdBNyZDJixa%rsK=9ruac7sMVCu7 zKT@}5Dk(mPV_bc&lZ>>%IFx_^`3Iv#fh0%HI`0Nve=2++Y&0tje`MbLn|B~oX^ zav*CTa8X8gePF-PH};Lko#5lri4@4t^a?H+%FI#rA_U-8?%#(G?CjJ|KJw# z9f5DbI;SrCi*!0uu!nl=-&=c*K|$esSpo!Fjw#VBHhKh~Qwt-#TnN%*eqbE=F&>ER zX>@AsOcu31p7rzXzcOg2kszZb0orN2q;=XXRj|t)qk>flA3dwF#OM)|r6+xw4JgCe zk{y*KY|38^9?CnqNTeom#K(tCmgOi;0>KH;Xn*GRLitdlPqL0d)QZo>Hnuy_wm5#5 z&Wpex2NawI>FtuaPk>f>02_${T1eD!nfw+ zL*sJqP1dpPVZ@wzr?#I?+e1ZAg*X5%ACh3E$^)Pj;}px{rV<62eVb$rJ1;ueb6^isfBw(}7h z!85EcRwv@KV*Yjuj*oeEr~Q3Zz8bEsy=vT}aqc5UyIzf=ao0aiKK@9r*u(<=zEbubwq%)Z@hk0oLY!T$V=7Ff;O{T@eEw^?Y;Hf+NcQM9^o z++ddDGWUx&mZk#^NIa(znMwI)`?WQWs6bm*B*;YuNRNEOHaz;bPfx7pF-*&J#BQd_ zfz9KG6f28(^7=X&moLm_Z>xIL4e4Q*<(6GffFzahTwhZV@)G&Gog;Wz;zWMvU;by3 zBEcsqa7ILcJj5Kp&6FIFf>{n&Rec=q*#ixU5@mw9`5(GoLbklFXsoWKQUM3c4cqd% zby#i{@7t4l6F80rr>8i5N}Iy$@a8HOA5aRo5UT-B;kwQ#nnR5#Jc*YYJk#bqpF6QI z@^KEUjtqu)5o(ifdj73|=v#_=+-7nYvZb7DBz@o51xl(m^Yy`?hiNz&;}gy8^)h_x zCb+`sYJ`5gX}iWI$8|x_pb0_XH9d?SCXug5)rm3QE`g4^W<-1@t~@o>6fXA> z=hO?3vI$>;=R@XGXh0^4n1{L62~&JbB>%;F0Rbe~wti{2KG@i$7@x!#2pq11vXso! z5w#z!N&BZa;BtsdhHp=U8Vt24Na>E=Pta#K=ur(%7a(~>@KiF`-AQSGQs4flRqBLX zfVln?-1bZv$X4(f`3DF&jkV5TP=hD8DV0#F3|CvB{zBN`z z00*c*gB=m{Wm|yiA8zOehY8;I#3|qJ@9cz&2Vxi=EVX>15_-7-r1F&f4oe&TDQ2_P zEO2c)uy8a4BKMvNP*xuTg?OEMXKeb{Uw#2AE#LqsKBzAvfdXBSJj{QBE~i{IomHI{tXWjkBy;2_}vXrF^bB(j#>dZ=C*=x@sSVYl7Y%0qq~p_rze_YOUdO$Uvv z=RTC4E~xm5K85Gnf?)>uX-Xsnv36a*=J`O)t>q?z;$jvB7MgOEB>D&QwQPrQIUe)S zFp7!QGOugGzHg*L7~p~eRT|6E(*^c)R4kSgqAM4NYCbjcDM8nO&5=Le;H$A=`Qx-7 zg1}!1aYEUTxH?|Ioq?SU`zx%ETNIMVCRXzgtc!wjj5( zj*b#2Dk{3(UU37HsAzsilcx&FglPgU(*7veN=2%S@P54*#_Uh`Q#moc%fjpbXaVGb ze_-5~65jVcgncQzIO=73QsqYN(Lx^1T-Gy$rKP1BKV8e4L$L=@=!(FrQ$^W8D#Lr^ zY8DE%;G09De_r)O6kKPZhZsNW)cm=@=lUoa#z@kNI8)W0=X0;N5Rd)&`BjB%*+UY# zfOxPCv{gwt?q}r7r;^_HCL{mX+0R0?>LM~-fUVSK<`akG<}y7=j&6McjMJ~B{WM)R z_DQ1_-)Qg1Pj@Hg+3%VxrDTy`YGswb7pusuTMtzZc^@e4q=?}Bu)L2<(XJ_=i~@=5 z*#-?)3$TjSB^tt>-)r-1P3iG>pYt=-E59CWYaU`kcCM$W=HqWGShSYT&a5vuXeC9nQu)WLP!v*v?yR*|S5AG#y*S500~cs#2IAK@9~kDj7sf6a8Vu_F z{8;%~Lh!~Tp$W;=!sld%pJ;)*baT^_#_x&a+efTg1qSP(lPG!dgY4{gsqUpI>{O3~ zuuf6B9ayv(H4GDzj_^S*)oR2emO)SO^ql4Ttmyi1u3W{d)J6>iTSWe0&veSS@d1)qysyXWE6}RmMdiuEhfQ1A; z`;|=N&Pa@RaQGCoxmUrPtY_Qd{?6<4_!f|hfTRG-xgbS&0XSN|kpdU4t5@5$aPsuH zQ~mFuV<<6fxFo*VVE4kFDIhfP)yZ&Qd1-I`IRS$+6vU+0aa(L*L$tPt4fCMW6-%wU{_&R* zsC4!*Mi#?w4u<6-<&y|g1?Ou}f#gaB^K3`0*rN@@@ZCUiRS&0x*|0(2eW`CK3m`hE z+@EZT70!OEKT0+emC-QgHAC~hWTbO2xnk?2_levg6>>?(Shi5#bW!tJ8Z&o2St}IL zU>t$rX^jOEhENN9xvC#5&x!zLftBgr1dOde=UX9n)-yM##(1;^2b`^;U+aay z9)s5QP@tYmiHFRcgwy@Qy0u~9+n;6XXhMmwe2_3nrW4Jn3J zTJr9t)8WOmWBh8fT|P;f9*yZ~a)Ij4#H$s>C-36;OsY$V9c|soUOETA@2k6of4Frxf4d zZD`|)dv28ADrvlZs#sEOViH2el8iAt@&|cc6CDG3Mky3@tX{^%*4ftiMyW{Caozs% z{Oqx7Us6tMgI<$2qQ!ifg5@}BP}Qz3`$nMN9fX7Z%nnqzDR-!_yt)~4(?oIcZn@bp)MDFMbHGkVgOKZu!%tbQK zqj>mxZz}Kg%cr^Zx%FbAZ;{Y{vzj~Qo!_TUF3#WYRrD?hoDa+;P45dvMb%w*B<=IN zoZa^E?|bwnsC(!pgVP{r*=GX&5<}5pt;_czB1*v`HYFdfx3_ly(en)KJs!G}+WaSm z?LxOxvkMrw(35jELn)sKbcmt_b4rIcHhH*H{rMBapRUpSZ(|y;juv}6J)?ddt`;9< zqJVs()~fA4X!9BzMg}b~LyNLs*E~x1-@Q=(IPc@}l`^;X5;Nr*NOf%C$(iIycMe47 z_&-jH*DQt>R?t5rWYn#5yKk(p-y5PSP;1m#8tyi-o%rw@cHI>Nkh-qyfH;X4Oh-{V z;M6ab<&KST9mtIMc|ScS;uL|~A;58{uct%|G3J@uiz?K$gwOeL*1LZu+6zk4)CyDL zG;Y0@#g}4+5UAMn8pIE!6;z+eiqcuc632N}Lcitth(ro;(|KRjK(m3z= z`EG>Ccrm0N`2i}xh7!v3itiV>oG@aa%Yi>(NwWEEPn`qwUTaszT)j+jX}CT6eb88; z+GWBN!?mK^T5oXCu0Vm1T!nq(n=o57{`Bv<<<Dl0bnIOIANXlI-#igg$$TMFvm^B*3i#`C@v{@&^eW8*h;$hjfN4XU+b7$OEJT_OB~6JW$1=&S>6?rH_#pEy{f*@pvRg7yYUirie9a^f@OgPT^H| z%E!;E`16wL{I6KH63lxB?>$d69>1bIiOuM}MG}&}7#_|4GiBKQH9+u@Rs{{Xvxv{c zF}4aaU4e}YBh}C1tnvEi_0mK9Ul9~mKRs}$X*W8li*Gq&tqBCy($nv5Ysl4`WlM*+ z&@5Rm-*&ZDZmxnijO^!U5i^x5drpTdQdOPw($NlL(V76_eO5K|*2f*ZQywZRt)aZ*H4AeA zMBA6)#1#4P4}U|4Tv^I%aA+7;z9HeN&z6C<@0{C8U!y_xJOY!5&bhg^(E^An-GiKy z#DEXRo!*m@tuhY7c7uq1Vcjl*<~|!+NmvwYr7-~=vS(>Ffrkb$ImOz2;g1kNblV`R zF|2Rg8rnjxZCD3uY+1mlo*kBM4ZYT*FR3ld(v*2pk~q%!y%+1B)rs)$cegPUTnjFo z^0+|5Js|c_Om{GT>O8LEyTV_A7(?r&uNruLEL&`q=JPY-p_1tXaKxt{$y1?=>q=B^lie{2K&T(gP#?vbs>K6c zU#X_A3PbtU6m(zDx+nvP&fh=%-(Qc_LZ|c+ab!$e;XdtMIZ z8+&jj5|&~I#M{wh)I2n#*9l>s{RlJCoE)}81YXQrY%l)(8(HDzLe^Rr@y`otpQJFR zTmh%>nsTG;H=U2ZerGFC2H!zf68v!zm64(v9L1()31sje-$w=`Ku!Qn9n+9&I(vMB zT>sMfLj`I|lJ3JM7VQHb8_E;&(V4NY_|Q2Q9K&`m?EaSshV3QkS-K`lAHh-~0c!>f zw*6aUei}>l1lGBfm0UfjIdS3p*NL~+@4%SL06hDD{+RfxSMVdlu8WYV@xspp-==@b zVO&_xeX2nx$b7R6{mPvJrhz5+CI4$05Rmuk5jBnEC8^nqLyEa$u+O~#>)X4N^p!j9 z|Mf=`Ea#BC-}m4{Q-^vi6bMVKWlB1e8UAeKNwLpy+vNmP${Q41R6s{?8TM-qH~KNkOY zeiOjQ1h>0Y0S&U@KxNpjbH`rK4OWfEr!zMd_^SCs@Xv%?2>_ebbIA%jg4WO72}SqWbjSYqfg%yR)SBi2nkXx}~#ZQ__A$h0vFo1M>Dgj0N)~u-~9hy@&A*j2=$Mjv|XgN zSnE=po4!sFJUs6VR?cbNY{QwS>YaZ`b$g6sBpLQ~=kP>I{hOvw6DctK zVZniGCn+#*AQtA9uQi6hrflfBGvlXnnDp|FD`mUrn<_tqtU+aOB$?*K9a^Jx)bL zwfi%{5`MQqVVYb&76e;~)v4p1Smjr(c=zh!>Izuvka|?y0#&FzIL>j986I6e?o&K0M)t{L0iv|LW8c42q3DNH=20})|L}CHm5IqZKeHvcMyXIl=ejKM1kK8d$u(8 znqm^ZTE4v4=hi@FdQ(MaVFx4v=e=L018IlDsMMM3e#i_N+>oO6fyaF3>YtxwIX8M1 z=x2Vsn)Z_jhW$QYEQe`wVolfLneNRg-|u#Z!B$>z&r^ec8iVl6ky_;>k9hXS)xc6N zW!iA}Xbs&?{xL2QCFRG7!clpYAy>gr#%`b8WAg4*ERw>;fYdRq)X;r#S#+U~4|vz% z^@kC+$I{8D&Mta++20P+<^GAvk~X?=Qbj!uV+H?B8-wzJM1y+)uj4b~$HL*a^YsvI5sOij zd{3{KT_4=1C?B+{vGQ#fJ0>C@#|zQRL`j3R(l`{6+ban9U{X#85&Q8NSR`?=>0~ld zo4A~i^uq`;8L;CGL%w%2|8sUgf^UcZ{h6J#ElJu>N^bJsu6KWqVK9-u0oE&5_nOz| z#mwE*h^S1Wh0#I+Pazc>HH$+A6U;XP=CM7{@Y!<2CVZ${2xYC$krrDS zuzy_p0Xfo|G02Is zJ#U|Np(zUM_KHzYuTxR;)aj?IEGgg_)OePA z7bu$K;-gKwaFHPq7{rp_e!49leta-o6^wIuvgk9O{-v7!brFxxTzzog?e%III>8u6 zM|xeNCYZzQ&HCr1LP{eElrm2vQV3#{lk=0=c;#17?rT=mlJLt7iul85t4}*= zF_-jVv>q1kR2rLuDIYvZ`mzrMY3l~DH2AeK*N@z8-B(eBDFiicHTL*5Q~CZhr)9EP!tNuQ2rR){uB$ibjgBBkEX^KH7*L4W`9ZRb$O z=k}&|P)ZLm@fq56bz$pSl-V0^pfrBEKP(Y%-~H+x7&WXt_8Bm77}gFypDQrM}pvKHF*eGMbNADXUmJ zQ1}eMz^kT%2b(#avMDc+K{)%$7@s@$jL--69v1z)$%DEm4{*1=xVU&C9ZpJ>vVFQe z`3aw14tA@!Pmu~+;rOkoe3l zJw=TAqXU}Q&Jh3F`^w(m1BzVCFC3oMePJm+YjHioz<}`X&r8hj3E5waG5)%lLiM>_ zjRN_~ zL3m3GT!9)5N~UeV<|>9!<(bP@B`!PK6(d^ZP4l`#8#;hi)AiZ<`m5W?8u0-aa4jT$-6G=w)iJPBFIUq+CIIMuu4 zD3$7%sn?YHrK4k#EZ?;SPx-7M)p47LCqdK=MQ10vz6cB5)w&Z2bw;)o9j+eSrv9k4 zKW9};7l}fRqK5EkHJP$SrVIOvAFddTUxsN^TaigeQigyEHS)}}=vdIai0@|5t>Xgr zRskUN3hqK@2FfeIa3%uS0>dlW4BFKtS3Z~Xpuu7HXN>U!fP6r;q7#+cGxG%(Je&!T zO9%5Vi-kUGq{{$FGzO+xvZ;LK7T2ef`m2D_^K1>0|;GJf(ph@C6WI3PgR~E(N3G5+e5Bu~7w=9&&9i`{tf0 zw`2RA6EbE}4(2JZv_@BSH;akZZ)vm3yz8jB3NZ+2}i?+qf@yc12cnYVYIC;vcp zD)hyAI<)HZvjY2@XKz~K8oiXB$UNc(9$zH(CtJ$Enu$o|tGsejJlwlXcDl)@No%V! zj06W*1*(dCmbwKQQtW`2syf)8u6zz+t%~IGL9Ht*lLnrPbj{&;VTVi$B)GnD$qk;D z_#iT@4UV1&e+I@|t02n^gFUyQc_fwJ@t~Kj$pBbCVz@H_0pyb$+r>uTqUWpBX&yIV(*aJ}DxDfhH z*Z!J!ZeRF&gX#;kIwt~f8ecOuO@W{C7hC@G(*1iO{q+#i(D|nMcNj^z$x48Mi{ti! zSMGHMLaEJsIL)YO00sox{ZzWlHYIE4zVRK1edIAa?4RWGJR-cw5a|RRqwz#dCN*>d zs9vLcU`T_+VJ zmH9eprCmidhRghIyaJvbgMQ=BWc3Or1T=)lKn<#@iT(o#K`~vh@Cq$gHYrI5fgLn= zR5T;Nw#Kj&%^bNSfCr1g-a`;H9Y}2fy#;okJI{D)GA=WvCna5c2$~3F0?s?lpzEW$ zAvFVBIjO<=fFAc~#B#^ThcSFMIGpo&v z(^VE+ZKPIyF&We4>0`T}y%kA-Fq8x^BRY2NeT)a~NE(Z=0*e?n4+#*Im7gqYBGFTd z1kDYQd%@rU{M#^4ai~;NR_Og{AJ8N`Xa{T;oB>6C^;Y;dI<4lola2! zk%;i@J^Phk%CKL3p@AZ>q3!pur-$C(U6(O6k@HxQ(#k}Z^bF)?h(@Qbahvp!A```~ z$N1!06$A9A#X#!GGciA;KQ7?qqe5P)Q7*n~>2|mh$V5Sq37$>JOLkF3ZUTXNmwhUx z64~h_oeCf6CsJpd?3G5#IBBQ~&r4jD<1ZdDBatLWS%rnb(Jx zAe&4vjEqU`9q=ak^x@V?jh2eiFqX7fqdJEc4@)DaEaE|jmAEEZZ$Z{fVt9Dd9Eus} ztW4czs{)}UUZrNOgsH6wQ<#qO3gwomazB+n89cn960uh4%hjO9Os9Z z&no}hX91-1bNWu+9#F~W8Ur9044Wl?=UV>h~P0SHVZX6f_;b`LE*{0DLT3@Zdj)M$%pB?Lnnb$uFmFD&S%7wpVt2?EZlO`hfW^RG}|mzjfTgQJ%dEZ(y|l?+_Z+g;7w*k0aZpI z@HiveyO*OM>7V&2KK&{khjBZkauZLtV}i!X)(S-lIC|BAnWdv)$xJ|!` zUUIXEb>nlJkJLUl9q~76P;HzIggeaGnUduG7vT<_X$^nM$&tj9a>W3GBi-pA^ka_= zwtb7yKJ<{ZIlwjgaqz!sz#DdARu4qDv9!;a$!xuiM&cY$(HuYD=85Q<*oA3b0Jjgw zQuSQ@DXIddM?_ixjeivWZ@y-0(RRGX^qSuX?*AU0BFoiS;oqGqsLxQ@W90+_DVqOn zOw5UwcC7^1Ug|hrjR3OxUxi(O(44#^td5$0)q05$Fj6?RKY)nb!A)4go646P_SksU|R5C=sd?e_m_9ZS&`oLH0Pl|XW#2*ZwxI?D4^%y)_ zXo|_l|NM$hi~m@H#13}sm!LolufA_j7(s`zx)*Jh!Pbf4CJF0O2A9RSE(Qo0a{*~e zM_(NVLGYuL#%bVH+T;@AJB?;mv|`#vYc0|C`lE}yw*U-NviDtM1u-QYY5Uh$Zitjh zT@n_k1_9>_H&B|#6Xq5kC7he9yf+;aK^R_c?HDxFVLKP#8!D$bCWyi%7aHH;JHQ5Q5ezKyR7PoNhiJ9JD3jF z;5z!%N?GQvp@@ZYy=EcQ^(<)ECnlIpv#q$+{dvz{fsA61Z;%iU1Jrf^Z^v*Z;4{0@ z5*5<{g zhQg49%z)X(*T(|!=jLik{t7jC30j{OK$!_4e5gC!ZkbZ)AT4v3k)|FvH5)$>W2R33 zZiXQd@XR?&^8SgGA7NCt5I85*#+M#F$dZ~LMGqp>yAY`ZfT@W>vSU0G@F-nriw;NH z*%<4EUus;J4soQHrL#Bs<^3I;4!?K6QFY;y)O`bRR^G8~-Wb*Du6Q4%$RU+q@pCW0 zpCrm%jn$)e`>J+<5OtpODZP~wBm>(%$@%EJx<9eh8rgpV`a_)lM1;3JCQ`BBg4|&R zZ2i&Vu29PGm~2DiBlS~%uUB-$=o8;XZ&v3c;X}O0)SiZ@8e$8SAUCj^e49`(H%U-4 z^S1A8i?e9motF1#?9gA3<+u%ZK8q&1K55D7Z5wYXpF?B5tM0?OCAqg6* zc+tKs^zQ>-;!n<&+kpw#Zz^8V!66-e-kZkg2a}MBv=rzirK>1Ip1%Xi-R8YB-zN_z zvh?s;-b1Ja5bx|9B;zDfizHo#ZbNHJS=c1C(>84cx{TTB2y)g|BZ&P_L;1#sWdD6aSW3^R##f3 z?zIA((}bV3tq<(9Lth9g>9ZoS`k}NP=F56(9*8}Z35;O4iM-l&#VVf?fh;9dC%pJM zR&(S$Koo@IHa#^yOE2Q&aQeOtkWVT`!XCHBJA4mYR_f0SS!OHneI$9Yh0E~$2m&3I zbC^7p0QUlk`zZ{an2hD2MXQJDfd;|=Y^U6e;0`mylwKb% zQrGqGfVyT;FrOhFt$_e+#qVc)g`*Ymr|#Y|^R^)dfHblsLdV5Dl zhKCS`xeyAC@!{2TVJEp-zRxy0Gji!@-O)5Hp`m!GYva=HG7!_0qiQH+eA_L+H$`7n~v6me8oKI%iKg^7EpuuzP?i7R-dpyx}i4-V1s+-lNC{b`pF$uId4$_gC+qq}9veWILB|riPyWxZMu5 z@RIkxxU!9h33;z?u2uJ6Iaik- z%S2K8+WXR$wImC&?`Rk<4Gi0jJx5T(@9)NLAt){1c@3E4?MK_Nv77F()*{OBprP`yi zBy;ei>gaP7W=5jGgI6i+SnsO*+T>*4EB8ypk73xPb?)56+-c_6O|B;p6^9F<&NH17 zcf6PUZ0{r;q=9>itfTmsen7fJ|)^V%lMYAyTGj%vREbL>G2Ja@%%j?+H`vLTf1Ilpy&XQ?*Nhx&}acrEO$Q9 z_k`5yeII^)Uu4M;6p1wUOmwY?ySMfk_BgMCux)eD)alv@0EQB1u4UtXbuREgd$^Zc zMDTsM%d!RE2sA0BISH_U=3K9Lnn+74_&)JcOakdCpa7J##FrBg12|l6RSyW%naC0E z-J2?m0pv+U(*e;Faqp{nb3o`D@P`$*dNU8#*I}D3rewa$rJ_RGt-O_+$Qf-2Vj|FX zmxAQ^bud(a;O==qMGR!>MY$lJoWbe^4uI^O_5pkAu4`#Uh*$2PVDMAe7LDu@Q}>TH zgP5Kr2Yx;8!aR@=){Md3orpESmca<1SOYPFxoTthpOyNRM~?th3}ABns6@O$K)(35 zHVbut(issZ)rX=QVgMWN1<2AYfcgo$?b2%|AcoutWGewA=mPG3SyFmW>bob)e z-FaP|pZA=PI8!K^JJUOmn?{*T{wqCEv=HhsM$c+UzQ1s&jzY!b^|6$aP*=PzA3B$@ z#P@r4g7vN8NO#RzC1ETe(8vc@sY@*a?xVp25ULgMY6y%BMj==S z^h6NtfmG`-G;{!Koy7C%Ftk*?vV;YAkrD!^Z_t8OEA@x~fLRQ{gOzf`5SH3Juz=?W zWM%H0wl4T`-6KO-?#|oDCsz#q5U?z5MgF+3>t=)R?B1bs$>NHIVd8wjgx$0H`I8m< z{&gM@33;1s!yOrzpv|}DXO8CIC@kzB_*zd@I2;O!^6a{oTAJmpGWJ_XY>Jyvg%QPu zU94XQ;!m$hK`|Hp9TdY^B;2rRT*iBKzhWes^Y|H^IQ0nH&JE zrj;d6G*JU!#C%qk>~5KrHV;iWv+(Fwc7ceB0bg<;3@LNmQ?8|9i~QrUzt~ri87O55 zLq-6aoiJR6FkpmACs_x;vI+E|K*4mCHty`?m*_WIH5pTRTt=OutF(yMEKmS2 zEb=3bhm_iPNbmJf6%hH-`5hf2z*4g+Za@j>GSX^eQOaO%dGLL)DZFQ19f-kIf}~x0 zAzH1dW;&_7)bd`v3-w^fzzby3`v9~Enrkj(sg@N$nm56jBsGoVSrjH_;tN0|N-O@$ zP`}r_6q5}Y$k?RuW_#`{lc}NOf;ylBh|@JF7+<86TOxN`;|44X`jWN}^3=6cWUHtm zz3@;tSx~BP^MI>kKg`QNySx$S43~>X65kIC;zFYFV2nBdnk$Ge3Fir}fN&4d%{g!p zf#n2rTu`?S=ckW>7#Q#r0-%ZE-Pe6V1O^8kz-|zsfeC=5H4p@P19$fw)o77M)FkVG z2M4GE7oZOsK~hhM0UabugL2gnI*8SrAjdp>%V{VcJ-C*n>W`T8!Vkr76DBBv0DjSp zQ`LKyTxDdhx=3n)oDeYu43q&R@M&CoOu0Ou`8_lSAuiO0eQQhi!E@}1{V6D6=^e6h69)$C$_a~}| zag_;hCsTw(0-%d(|2PXWpbZ%EkbGr)2y}2Ez50P+%XFd`>9PCoyi=!>S{Sve$$SuCLyFqTFx>{!>F3=qPNO7*;U^&KLWap_kTR z;kN#b&JHKrzX$Zb${&~-B<@fuWng4~VORCG!~p{_xA1=k;{SgGaX#!!;MW|+W#=Ex zmH8H;D}GU-%?O@49}1+jc9PIMJpHisCiw@DkV`h_cam;FZ8#jjL49{3UBmqa=B;QL z_ux0)s5>quuXq3K293Ix-H>cJRW4+YQ@IMo7o{VfomPChZ37WY{d63VPNrntd$=YB zh+`stc<{|)ZPlH9Dqs^bjxFN$yRP3m%n?!TqDNe6dsa<$JXpb6dOF9qUGjl~ObUaY zdaFVk7_Rw{*N6INU;IHEfQ3)cXg6GOaIN2$(5tS*t!~-oi{%eBuaY_`|3(~kv<%n| zqbu1qs$DLyFa}W%zPbUj%}^gt)Uw$t(0YjVUhLhUZEEson=Tjd`E4Ljq{h-KS#RF5 z$J?IU8_=R-0Q*)9hUuJ`JO>KZBJ>SDq6TY$NkN~C!$1B0Qv0Xhj8o;yemIM$`r-kH z#Yxf=;)bYiCSHEa9)0dho&p8MAY>yH%6dDMg`vt%MG2n+bqtst zLq*{H%I}((nnh3VGV&~3{A(CS4_^$!cFdkhbl9_Og0F|z2h{hl?}7M5PZeOJ6d-93 z&O>oSy)-@?dj9PVeA1uZEM>^cKc+OT!z|1)>|bdR`U5s^96*wpS&M|fs~Ys=_*h)K zFlJzyDPR9nRny<9uAGe%GP=w)l}SB}e5-R8jT|9p(Sre-6by&+B?*y`*98T5J}m&oSRH8WD*!f856CU7B=47uQ4G~5 z{MhpLAJeAscR0%T79y7;-{@}|fchA4=93YF06dP;J8~`%aZVGG20nGJI4Iq2BKF$s z@8FOB9sKRRAG&@}^BUl?Ff)Yt7)0)JV8wkbp8JY8rwlo7O>&+qRWx`QR{UV~_h(;g z{w$U4y(V?>TIWM4SRD|6jn)*^;yf;|D7wDGFjfB}f!t8L@xqXH@Mn zLmb2t@4pI-K+G(xo=?}jcKzpk@cVNJ6~wEzl!%i~Ps1 zn_P}*dV|lupJ^3bi}^2Y^f| zkdV91dRFxOF-`E2F8JQHFB{rx`n9-hg7|8;@u_snhm-;2g=&o>c8i&6ynS{{{F{y9 zjS$1$6jP1H^I6568;@&FiVxplk_E?}^av!-_CDc+n~UO>Nyiolw^OZVo8D*GF1Gtj zy(!ZYB#ARGuc9n#15^KQxfqy)Wn*?al^Ej{^tpT8v8-hb{xNp5bKLor!^^hrU^`sd zv~!Z*Y%prJLPu->*X8Z$y+cIOU@Ql9ht1SXJ`dxA1Kb%tol*-F-1Tql8_g7UPjsq? zH~wQn&rxUMHNOT){K#Ann;7RiyeH_t-PRzke~% zkTvfY!shd4)7E9fEg!*RP$?<#lLy+zDf6FYzoJz-22Ok7nT>j^Ir;-N<5*nX{nFZR zm&lXD0Pi`=IpD*)M^l`OH_V7J-{gT-8wE5g=5jp_|<(|ZNk9i!u@vDaKaEVj#S zxHp=#+BYf}MC`bIGV#5|Sdyn78*~^Kb(ytUM|GK$L0(XSV`R(j9v7lIQ>J8dk7o7ptd&$6I}f8ScQ{kH z*#MZjV&k|oGutll*#7Fmu1N8gfZb*WVF#QpaV$S}Iw3j1;yLV{2Tp?hbm#hgLf=te*O)+7h^n*R7Hb87@3RxG-Jc@D;D-Wl=)>Oqiwl5F zr-}L)95pZ@Dr77ScAay2Koopn)><2|J6-yCL$6O7AA>aZJb-#UP?CR+9WfArkF>~H z-hICFyxw{xpo%PA!e_OuO!eqDzTVD@%^= z31mOYb>E$uEV_`$`?|d)_3mVUKZ{JE39sQf(qXQl*h1BmZ}%mN2X^`k3XWY5I zUw@8=rd?;ZRNzLpG}HA302j$suT}_^X=9mG-j^*q=rH`uhxxE_-hpHFfAC6lRs>DHC?UIVA#2TJpyjUrXYOqz;m_L3^`>>Fu3hW-kr+N-@o?w z6>Hh8mZj*x|2#{=Gu7X}ZzG8VQTOqV!*S=4uvocJbkS_pV83lfDrO(cTf30#LE?il z(vY_%W-;s&^&VS}_~Y=dfB`!qF2m;(9HiXg*-OPrX)7s&xe3vG9QJx!9in1E>1c_u zMZ=?r(59OK{0HW1YGSF(fVZr5hWtU8ia?MHUa#pjJ_i#>~D3`uV zB{}sEk^a&7@p?H?7c^G}Xe@!=vrzgDJaB5kTeS;>;>ViEdnI}|%LmU>mThoa5KHe} zG52r`9a9HwGJz!cF;o`s$FI`Z=5Gz+VTnq~t+A%Gv=H&!*nS)qKXH-F6}l^I2w1!y zMin0?r`+-+5TMz7#;0nti=I1FJ9hG7>Gdmw5Z;t7|)>QTmBHuYOzC45FuJdcWyWMu7tM=@J zqSt7K&Mb(iFpyZUmpy z)oncBJ~NdM(wAQi=!$^&+-9@f>V$NLM_a(k>L~sA{0rL4-e6TeKHXxXfv@F?sAR=L zmNQSY^v4aoDE}d^CqkhgUzFxlztPY_dr9Rt>#f0byggRmFr`nPMqAtZ)8MW0HhyT6 zjoLP~#J=P6o;T&Zg_;^^q2uVIAQY#Ld5Z<; z0S;rozuxca^AlKDnOF`U$6a$mo+i8l5JUHj?2g0QG=sHPE1?iWa<~>s3Y~U?onH8N z=~!T+i&!do?=SeJ7Um;c0LI)h16ay1N7*qEK$1gvYZSWd+dQKgEm~U`kQu!OUcuGx zlo_!`-(D7%9{aoxmk5G4C!(8oWe~peo!Sf)Tb>WQ0e8>j@YjYjH6Bvu(w=H9O=f-X z=~K=>LG3QYhiCi7{Y3mKdLC|a=vWwC+9h?B|L{1=Rxk8%BslZ!ead$E9PZ&Ke020I zBl)+q?9WJ}sF%OrB;_M(!QncC!@dPbZ%bWuo5){f2gZXUOLAQb1rgm%&+k4v6{^8W z)`*6n9ri_9C01K^e^4!-he-EA3Cu7W4036j4VW_U9n?z(LE{*bC{x3sb}_s{*uVdlWLp;2_ai6($_8G$Z% z@Nh;t@4`eeGYXjL)RHkb`bk10`XM-}<}irhjo7}MS(g;ytF}!hRl*%`hO#MzG9wkz zB*$WOiR0@bi$Ma70hfnCk?9ImRR;9@(L%e~$DWSam+ebha)qb&?=-LIzCD+~-MM}YbzQzI6-C>OmAevm%vZk8V)aXb_F(N- zU!$#ZT%W|cxqrqg(QmmXs4&{RK=D&6pgg^souZ}UGo>z>-!etfbTKO*#$E^af3t->J4+d@Y@FyZojA1R4cf0H(|S z!T#=_tCP-UOX_a`bF-B(kaFt@BW3k+!5rcp>`@xF@Vto?Zq-I_}olPtA(41NEH8 z#`G8NpgbT2f}c`z*9R!2#8jbQBMSL+XBh@U6&!HSnuT8x@H!v$85&@=%t zvNiNXE%(LN(TAilkKAYd?dBn7gRu?@LQm@i`zAHgHY>o@6 z?NizK)<0j*0JV6gg9%qT+A2Ztt*P$z#}ehXSytn zfB$RU6v}XGppW1yqv$rXQ0np2-XN2g!{sDwJcG}}4e6VofJ+&-y>=C8mE9wg^L^-> z92;)=zz80bVuK}?Q^&&y%pbLryZ{O2&J@LV3d0uBQ%O6EL1!#?!ncEM2kX)^_a zLR<;uOn~VuCLLSSSX?ug$?WJ)TNo?5zsqJe6fE>3?R~y-vs!4}w{j^SjrtT6CV59uu(&t32r;k{YFbV5}Mj#v8=hcsfKD>>J_(=27WLw%( z)RC(95_!*G5Ne@Z;yg^AyW8RyPd$ENc6UiF$a;XI!#Rs4aRP9McBQDOBu3|6Pqzvj ztu*H^MBEw%@FEM#1WKSB<*(7$w!zSPk>A6@`3R|X&x}9ms!FHQlWldtT5gVcD{92V z=9Lqu7sEEvgLWRdl>(xzv!ZaQD}EaS8~aN}RBkvds_2y7@bQ_MJfKc~`KD3XOFNH# z*kL`DFqYXjuQG5dsd{6rxl?GL%Zljcr{D39G1}e+`%A{n`?g)aD$HpnD6I0-X+;)F z$tM98XG!0AJY>+1biq(wwXjmw-a-Z9 z-cwD_LS#U0_jI@0hyAXF@agV=X1!0L$VmIDc)COb28d(yl_oN&J=o~9>X?Q z8JT2;KJe9nx9^=}S!FsiY)%g*^i`KehCCcCBs4OGX!fixFERjk9f@{r`J~*SKahEkgr>0WksyygnQbv>)viB$KI9cBgb)_C*-)t&|FbHLWk`qrV z@R%}6M>PlMzB$r|65Td_OZ!YXg8w`sg5n4(phi#cWl|YNctChobh6l3LGG|_vd>1n z6%8eCG*WA>0M(hf<&!y9<&4R<=|r=EnDyBWAy3*PnbQCAr zE9)hM0nNHA2)uN43nf2hh& zg@;r1!ryiB0RJc8Mn51n__i>3NLv5+9@mySxLOzB>bfRAJuR|$6Osqt*%IifcI^Ym z*XWkWe=-vU0Jb&uf{V$%0BWE=<>dcAQqEjW1E0(CE$Y$gr$)#wyO1l37B|;P{eU#s zFmf4+EVDQ6G{f|j(+%O9 z&356Q-uzH2;vkgCR5bL7$FKF>dA4;ZB*VTr?oFp&jdlTn)bvaOfZ-<@?zJ)h0yH}? z!RZ{?gk(9~7POCd(^iooMJYJOi#6N%v}a)!UgSZg`Vsm!O+by;s(**U?FpmK-Fq}X zZ8l8%=?OL&*ntDkn<57r@4x2ucIo#|pk0mP!0Rl!uCQG|4tT@9*KQ}xh7ms`ei$_ZpXwXjMet+%^Qm9=^SKNFEKQ6fR@hwz zL5CkFCZ71QwTVl-@b$^hDu#23p4-;Sc8>PhZOXFO9G}uN zfB~`-SK2*a_>06B{DH>{KJwP~hE1m>TCNY2g;5=+w8mMdE%Pj*$*LbSH)azzL{+UD ziiqMzfl(egu6`o_X-XT8co0q{on4}Mhwf4D0VGBgq*+M3U5R(Vq#OdBSu{Dr)liP{ zJ-raUzNp4TPA0KWp7StjzK6H%E(X!|>nV9KN>3P%(XioXAY?fT#1sF8vZl`)isLFw zMZsCLaJjN}Lb;X*xq)oVKxt|+UVMUFbynt|GhAPm%_xq|KaeraKag=$RgN^L>)BT* zS54<8GGG#L>g(z$%PgkGXxk58mU0r^`akIOziRu7>28w39@gyIi=+Ne3KMFc^IOd# z8Z7GH|Ewk!?*AC9o-nHUxgk-+In#+>VSqZ0`t;<_gsL1luF{K|5vNa8MipOb|03-l z{)L_g;mFlg$Aw^5jpX#=@Bq>UMfv3nhT^HVpgq&Xm-V58@$b|6itYu(Mx!xE6c3?u zL^e2)8!%UR@Z+mTYbr-;2;x^ zI!qQJ?|S#Ly4Pv{E>$a24Ufxp{fF}ljCBf`ir?Gh!vD-0BIw`d%^Xj|!}}P{6ZGM8 z!^Tf?ub8j^Egc~@Lec#lk)4mN%mt>>$XYItp~?lbd1@BvD;*w@@f)oh|7F^%e zY%)ErG!uctd(=|)EOT{V4g=9%s->YnJ3i>Z49r<2I_?g_n3W`8Ht^ryJUAdQGjHt* zTUxsw3nw)UbGF#z<2&)4l{@cy;;3bI{*9x<>j6ORkmenHgPKTrf{kmN?+)5DNfqZ<@T6W{h%gK2SLkY6WX2= z&IEfSy&#M#qPlHZIJNIplb)-ta}f0v5FxR@=^_R$-8zrj6|TDXA+uvAt0QF356iQ` z;DbGc<$!phsH2{wV;RGf{qTJcve`~QA@KoBhjEiGojkBEcBOuYa4dVWfaO&nDZ?~> zeT!TdJc(~moY$-&=o!vJ$PlXrm-cv~IS`d9^A;Q5Df{6$Yq#&*ugcO$fpED2IqvFy zhs&$r<-vQkIhgq7_BI<3!m-R| zc;3#JgB&1monT|PsKzQg)aHPu>v!$m{q0A#A2RXHz^5cnG=S+K`~8&Q>)8V26co*% zrcmXzt`?rTY76>|`{zh7-Lzz%-Tkk_?B8?M?t}{pOUN__Zy{oQKFlJO7;iqG=q!F+IaFM^h#TxPb))k2xVyt zofymJPlVi+-=gan4nGxq!Y%Uhy*gn`@Lh%UBV3Hc#&^t-xW|x6rs4WQv=Y#(jZ3a| z5$#}dfJyPVsWN>mfD@`>9%y`e>hT$_`+~Q_7#5^(1pVBZ`PMF3t7yV7PfPK!!5pQ5 z;dNx|>{C}^W}^6IL$5Bae)UKWd7DC^i3&jD@_(;^puq9F(+wU(p}p`pvT1p$*P>@` z`hB8YmL=ECCSv*Wb?1>zXd&8hiTUt>I@8QZnDJs_FSQxrH=8+>+u@(V-h%p_o&hop z@M}NfYm+t>U%7aGc%M>sOA0QQbgRsjDHZ9qt+JZ`{_a$cz0ps67U!jHH+VPqXnBS5tUn6kP(NL(P9sa`{g=MhxH^9s{5-0Cjv|mOc!na z3DU#qt#Rv2t>tb@>6c9Q`eF9+*RyIqKWcsTP&ry7`Ua{u;02jX1c!`9eDt}i(PEQh zA{2v{#94CH8B7>7h(FVEDQ%3@UN6NES@7C-n87c;9K92SP%X|tE)_DX-A`JY){ybQ zLu6XT7(XB(AnXt-cUGS;z71RaF0~eK#n*8oYK`0bA!uisboAd;)?y9s^-IK`yB2uX zS}(Bk#AM<*9-WD=?exBXzrs;$U4)^yr9j%g;}D6l9K)%5wS>HCuPYL<%9TlG)$NF= z*Dy+y{q&lPq=8+-TFT56)Ok0UR{4$gIW|J#44DVp`R4f&CS9gqypU!whUXd2zO9<5 z^BSM3?jAGIcd_!xAbfeG-}yb@)v1O`S5_*MOU6_xRw)}>rwbvnY*4-L^%a6kosH3q z@rplae%9x%a|qDlM$z=bKaDHsK9PV$7XaqcYFZa^c$=qp#^~Upd+es#jR-s5t6~ma zdplArPby}@^J2fNFXUSw&qb{GtiRTLyk4lXnIZ&*ePZDUxr?$SOnsk4l1vl2ECWW( zma?LGkFu#u65z=0X*C4IHV{3ou8+FHG|r}ii1*JItX2$7=Poih)IR2WKT&Gmul>Yf z$LCgvw?QZx4EIk2Ke8+~#@rDKwYWha6xepz3^SuJ^07UB7L1yoq{;Hu3`UhO8C!CkTV>e9|6@W1S=}QN>WeM=5|>xOFOddg z>A>V?Z%R60cWP%z`b)!`iDZ`YHN>GlACmTVL|0ellVTN_=BFJ$q23D9IP9zk`&}F= zmj@Y!ANl;Wo0MA`%7HF-U1qQ-HTD*~?g5^y-1XWnYQ$7XH(SQM8>f${7A)TQ5}b57 ztQb*^MZIsfB`5Y7R6*~BE!V-%MUM6Gjg-ORM zmsIrM;r)utnYHfm!WuCGbgNs@IeOgO5W=kGQFt%{ba~&47~G~a8Ws~3$Xx^TZfvI& zEL51^z*X=UvL=gE0)bKE=l$dxmstgFH7Ncd6lqu7}> zb+*#s9_*574S`t-uT&a4>C1;bMS5JgGVaov#%v#3QNdmqZo+fe`klq>aJ2-BfgA@~ zFJ9|j7g+1-ipPK_4%Sky)cM8?PdzC!aR^GUUx>naQa3i&Z`69I#+|Q+HIXZ>@g;29 z!==8!480v<;}H)gOi8zW@xaBMUhPiCRON)1Wuuk27?%f7b-2-Lst)0>Ta@`Km`!l9 z^Oe-&hhsc0B>xms$0LHGK8NG%-6cX}Zql=b#WUS^Lx7`$BEGK3L~{k7H7KuW$^3ll z_^qhKm0ve1kXC!#^Wsf}g|eb4JtoCHg@({jhW93x=%vT~70M5i#ZSOuDCRta*Bq8i z@LnY*=USCGuAD473k7%FXmy7@1SIwa>zp-kNKVLnA>^QKV0xX~wdbS0mJxqSpvp!A zxmfMa-@rup=@)@rQ4NL%0pVH6>;Zgz(gMF%QjG#5jYpigdh|SewHdH}3{1z+d~lYA zQWvulr+?ce@x124i&hV(psB=SM$$^b)iW5%At%#QhBEuf%G`QmyfE?nlwi?Lp`!SZ z`m+RC=I!SF?`yTaT=H7w@@4yro3mxC*0Y4-VpitKPKf~}4gR)BPif|6CsXIldJ;Cz zpURmNS&967o3Nn@Wlj47XO7}B^rp|LCO(~e>~F$@8$p@ph+}s-b~6{RtM?LU1&}<@ zgS`vx7nt{+o}Y=Rij30g zz#Lf-K2l0TYWOD%NQ}(`fnt6RV6+%-jFSEm1QLwl8&dLrzVTuJfSI}?-0u51Qj5{` zo|m-V0F%CZwA#vStFQC9s4+{qrp`pw3t8I_P-mu~YU-@15|gQ-I+#$+uSrzzEaBuJYE|^gotu$nAht2AMkCfS6t&Md z*Ee&o1ly#iqjN?NBOrdAqXWsC{J=jx@1u}fsuyJ{IRphIzA`xz-RG%HHY zqt;H_Q};V5ou<8>l+@Ha-L}}{4IObnWmVbQoP!E?zF7-zD`8m!%|LY*TqCvLXxY0tmO)_jX00|cZ-O`<~{o{#Uj z4400mcR_+QtXnkMSDvrhVA=`i5pR)sWjZ&n&*6N+JBe4-ousZdLKxN5Zl{NQu!f+) z|Kb81s5K{(8xvlLhe3nn4>hs&cR;LcB1}MPG%t|+W1EO+94sdc;}626K*lMzQrp<% zcAq`ByeD=Ns)lD}0Vax4atKOzbSb#ImHPA7s?_Fh%gQnZg-vx~LCU62A0QL?05Jig z%Q-Ik7p`*M2mB=6o^jy53#UIQm<0&H`~vv+!-Ratnhv`H1C`aZ0bj?`qbBo#$N@ap zXm7R6$n*-X<{+445{g739F@k)p8jTQr>5(x!#1jQ7}LHhvI|qNa47wzDGMnz{Nwor z={B-OP6u^&wM8Owh>3SzUCI2~=i$*R1M!s{*IP2kl17q%Kz)v$vufSx#p$mDdBJdW ztQ8L!b^>N7lUwidu8ya7nGy3X8`UM_hSz)D=8r9zqQ3%5~LwY9>2@&!%*<-tFB3}bUn41QQa{;$t@oTUFny6f}s%II5dqH4{5FXW$R~t zY0~N#q)!{j2E(Pag=NC_J~73kt=Ge0BUlgA@-A zBA+6gPuA!Bp`LF*8Xy9ZCHWb9zsMkfjOIQz5h;#W?R|F`3QpD_(ieHkjbi$Owui3* z=Et9K@Y(9X<>aXcU*g9N`jp_f%m$#xmO?44QIay1@`_M(5KSLXu&cN0enrdCY32<4cY0TRi#Ic9TnCQw2g27gO0>uM=F?6otdjadAa@5AuuL4lQ5s28dlooHn5xFq+4=AZ7BGgoisxmAtw65 zbs5En4BT@#4wb^2LCTGAPN;Xe9SmWeB+tdZj140|^oWf-oWq>4l%99#Gm6_iUoPe1 zLnl;*KMua!RqRk=y9|CIVahN6c;Jdt49-s*<|q~wug8;P3_z!W49C2LZaaA<_#NJ- zO6?whQ@~+zocOIMYW^0TWFYK1s+BSGPFz4X&wDrq#yzi=vw~WPh zA@rzMZSPc@ix-Zm@bQ~Bx1vlmVLJ$!Eb{0XJtO7OvCuz;4-bHnEYQwWGPxk$82AZwD8aO}iV@=z6^Edh;6Z=gHR`ben!Nwm~C=2qa6JO1BdcN(gdi3)w zNYAG#l)0BotDz!)&HY9MM!I!eiRD;?BQ`ASgOeorvr;8rTx$G#F932$)@0!{13XJ9 zyVi6#v#-|j3gmX=rc5E;wcdk1)beQSzk*RU90*7DfBn*Z^Mf`V)y=SX`H3-fxSyB? znk_CuIitm8Manuw0NDqRSX~lDdn-1!CR6?S)e!T@o@K_3vffLL=NK=iS;QkClc3RV z!}7xRMvMaFgARc7Wjr~m9bDb>Wnmq-kBt*g!+X5})G>PktM~m}z@A{FcW>?F@-gXa zjs#%W>}bO2YM);kYJoHa)8)?6#cAG+fWnp&*Et|g6whxbpbfB`Ow9V$n*LIg3--HM;n-|MpV%gyaT@XPy&|=%zLGj1%@BRDA^x-}> zqEnX5jpdG(%`N6`H}U<0bH)BoJWB#T0H4fV`Pz~?f8y$gHbi6?ObV2qd1)jl=D%#D zT~8Q2#FpnOm#5Q&p)k6;1EM(|(Mm9^9r#!rPt(y-kc)ngvoI3%xS2F__Th6_u5dPx z2V*ZX7=YD_h_BNA4fgzzT15solG6i8pDlufN0hQ;nJoW{ftQeL-A3c&>LJ0QNE&&| zsh1*Zd?=zSgCGk0F~ORr+U%Ys-Ryd)DhLPsaW6FH4}9fLhN<(q7gc1V=eK8haid}1 z=|CT;@L-BBBX1k-o8S+CUcK1%D=vggf-6^#!5xY5PV#D_f$q(DI=M_W3^YJ58>jH! z8@E`6$=-^20Y^CABOXH4s8s+kF@e$~L?0pCQ07%>pdUOqS zU!hilaU?;MWlSR*Ilb?@FNjqT=l~)gc`_Jt*nm67-1oDZPdxGX>@ zatsEhRx`dA{_4aFL?(QQ0eVFz5zJ%QCsiMrN!QkH`@=gV@f6g_M{d}MtXGwY5`^P* z8|QW|1lxu{_dL6j?7yf{@AKdn5ktcYCY_QEMh9fVvS-l|NDz~ENJY(PNx!|K6vb|5 zMN8iCp!$p{Q$R+c!o7N_e|(*G%>6sg=gQ-11zZdE(^bd&;gR9Be9-u_w)Od-W45+E z83vYUMe*#JV zU@RJCPWMx?$pX3iM&Bqxo_!0ZKHKQbHlBV*lfjwFJ2vZDd@JMBhWj)pnG zhZE6@<5d#B$4fj+3aPbP-GhF=Gr!aIKH}dBnS#q-3J0MW_CF_RkiwgN+C2uI?T#0# zP(M7Jg9ibKuFS{nQ@~d|y0mHFwdV$)vWJ0m)*ngWela1*;a|I6Z7yUCsu5H`Q> zI&Q9I@qKXrAMJhjSCdQEwuD}!gsvz}I?_UwDjh`uDJDlsAQ%vkD!q%ePy_@F(ovD7 z5Q>6@ngoFm@Sqf>S3#sn2jLq$=Xuxr{s-R=59?l8A<5kLls$X)%-;LDK&G*%;qPES z+*y40_rUn6jf+K?TB&iXzfl?=4plw;w3WhGj?Ypa>7}R zr!Rb+z&+gH&TSw0+g}*sx*E%E-wNQ;*0#TS37ceGz-k27wsl_n>*#mGki8zje(Qf` z!VWUOza6ITyx*J8H3o(D;kaR|NEH<8dY!iE&#m?l86;@U)8zv5rJr5(;@H1wMXmD0Q^x! z+&L8s-qFaNvB3FNGp)S~6*ez1zH{9;m*#!c<3my10c9s!%e|EfBE5HHK_Sf<$(71Ua;E*mI)! zE|)0x&xhJauJ^)g7GWV&}ylk5TE;w z0_YVzASo%{(;ePF1}`p;fA}VC&%nW;ToGP7+0o4cVYnn?@1B>=sTRoJ4A8lMrTMJG z&MNrM8uO_Kke228X|xr0(~Ram(&n;oYwPT$!BdtBj(lii%deU31UuEDd(WVpCR?ND z&B!LT&Rxm7xF@&p7!kPV1f$-TP0VJ1TxXZ|ScfB`p?Yol>$NP^HV|mJ8o{j*pFya$Q`$Jxy% z9|-g~T_tj?X>@B5mi@X2nYwpFBs*IEH1VDgw^B?91K-LnKuR4;$TK=0jHjBGUY~9$ zpWd7FUiY0w-27OScSkc@-Ny%^PPzOzg@re~SYW#2P1v3<0OKXnG1+%0NXxVXYy&5N zWH3Kr>&huTV9~j;?(x6&2=C=ZRoi}dNH+n9ozEJ%rd0*&gA}R4ZU*|X?UxNO&~CQg$M?p zxyhoZPIC08i$~=3EnNa*HzYymq#k3F^9a&g3>hGkuRN&yQG30!6p`~j?_5D zB<}FQ6b?AXg3lC+O_-?=!7y+28g%7H%9vm0uvYj|BVr>ihdqbX=~D*q%1>Q$(~LducFH}o@ahD89ppCTr_LRb3vLanN4avP&t~#QIfMX0mDW_jV7a)J5t=yNca+g zfm^xK!OSVvhdYv-GW*y*q{}&&X9CWtSa7(YwEQTCf4{p6{Qc~3E%o^KzqT-`wot}bd3!^6r?JrtE2cS7(yWH+CcKVRB6$jcX5O{>1# z2}aS}>T#Z2>9Xn_lCpW$Urwm+|iZ{-tHit%B{$6Z@skUoX9!oyrCsy&mgG9Y6~YpJ{ifVV(0;h28aD-{FS`; z+A2dI0&TVvA!NY76weR{1B3KqjhCfmoib|3fKw`rPSaq;-H&#E&QkUpV(TK&;spJKZ$5``^Dxygp@B~ib5b+b5dwstY1`SoiU+>qD zJIzV`D(Fls+tfOwpxQB!XNK=<>i#||y9DQ)QqnsRzCP2bG9ps&VxC6fryptv6EAi> z&sEZv=iA$20x!<#A_L7Ero5(Bp(IWkGATh$@3LU~8H3Ve9=cnrou&70OR)FUV-b_O zNXZlgr;KDlQgJv2189|VGNdIPDm4-jtlMYv;f|AJYGSQ;>HK$gYvsNyIlXpM$knbO zV%);DU9rA;Iao)A^m#s%1ZN~UmdCnE&cBQH2}`y=JcT9vGl_xvx|IPrzCbB0?7O(L zD$Twju01~Xev0i1I%JnX;MQ!kbj|-oqfNmJWH z9S!e-{c%f=PM+n1aQ>zh4hlAJ$>`w(POnb+KMZ(Zu;-|?`)kL? zu!>a(CdssY@%cGKmA7s=kvGb+&G$t?MOF%h(%GKuvXJs=L;<0SmRl=JElb;7A}4W5 zjVW7JzM~Da$sM6fBo-A!w`|eSpb_E`L7Fgn(kZaU_&*A(5vL1^=z!{LtU8{P&8!Nbu|1>}mnq9FT!XU5$4SM@<&aMFKomVw?LH%p$*U`Ud=KJC`UyYz` z2yVqs?CAe!5TCo(+};bZa??@iRtBrFbUtlqsidZ`6m1EpFY#uC?_ zeGKM`Z@<6b5|kPj;cXNJNeYli9uAxPzStb;1U+Y#m=*uBzW?Y0r~a*{%b)!Ob{)zZ zhB2|o34XU<#p?-Cbf-Mor8J(lb8pxDC9FKr^rC5ppCXa-7^kF^MRHn8)A}J2?0V&* z9N5}JI!4Mr=#wkpT5)axBFcVXPk%P#?M*8ER(6UviJZM9zvzvtc70OelEfiySlx8C zo@oT0&3phVMM3ykMlL~H>uSD(p528?;fh~`8t8yq*3&>PgTn*X%r8c*pfD9*6Y7$) zjfg?FRb#*wW?jN>kPrAzEaz9dUumdbgG!|$Ui<1(>BdDVND0ESQh5{b7*wfl>adB9 z6XE)k6&$D1?rVFy_kQ0DHy8hgZkujwa-nlz?KI08P97R6HerO(^qu3 zW31z3zz7^HHiz$+!AsB0L4(#5@b9RqJ9!)7L>T!cyc%{v05>h%$olS@6QPA&5;yJ8 zTq^_dJo%J}@O#|{l_D4L2QLVND)YC+4&Q}J9p#&Yk=n@eXY)S!RE7McE<79Vvwiqc zo&RJg=w$GokKpU84>n?{HHsW+O@j-WzaI>#Be*Hp=xD&H2hyD7rOm2;SXKW<$M?Jl1Llo9;s@dc`Rr0f^8f9cXqcHZILq*Gc4A zsbvff^o#4ow5S#WLyJePQ{pK6?UX

82bcq;B8ZyvW?5>Q)1XF!7oJ4*ER2AZLCTOWg_B3ltQcm>ZrobI8%X7y|)tcx_7C_|Aorhz4C?r7gw(edx(c%(?^>8DKXjN zDT3jb9A}yz8E1<7;2T@8^1OsAnG|(0GZSYqRU^Jm2iaTVH*seS!`37Ki3o-s6^HtC zvoJT*?aQ46@&}r!rL3Ny4_!&()}@f|fu+(Tav|+Zv!pC;_h$QKd9NL#jAte(+Fh2< z6q{`|BDp&D0|^D^ZlzTt9XI*4xSBR*cS4x&6@ytZ3fc^PGq2A2Gz(5ob$g4=8;mf8 z(aJ|=NF!-1(>AGF4|@1|bmwx-3_2&9xr8Z|FnX7Ng{G@_X)aCOXJ z;?>Lk@&z5fuPtXKEH&c*P(hLTw)4j)zv-*Sg894xyiy>sNz>hPS59sZKtC=Uf1DGf0#VU=~-VIy|vrtwr4DcMMJr(lQZ;By}c)7q-{T(^{>cm z%@fJ3@f`^RT)Ssr3oj#(x*Z)IY+PJuauw6+#Od7aLWP#qFE}*}hioiJo1Q*+Y6MDO z&(jKSA3nNAW)LnFw_z@9PQ@qkw;eXYY69k7KyjW15cNfLe zwa@)k|1++BQDJiY!#ke8-@pcc?w?20&V3eQr(k1^ZUI{8Q%OxOm_B3rxBJ+rh$PJ@ zr@cBszx~YAHKXfP@j6BL?l`3$`Q_eMu4A~zYL-rkj8b*AXoAr4x4P4^gYFy@Lhi4{ zfb6=wXILPwii0k|OfCCAH+w$CtFD!hGL!tyyYS=287%&Ak*S`+=tR?jMK z0tL|t-JNTZez$H(A50+ zqp(STXut92J8Rl;l_EB9g)i0Le(d^=a9uFQvQmb;R^m+)t!^-?BsSFC85=51ULFpN zzZUS!zf+~Z?)Y0{@QfKq(c<{xHmJAL+`%dv5k@*Vv=r7-Ue&%!eytyI7As;SlcybD z=W>1KLt*rTa)3joar4UfB;D zlG{JWJr8f;_SPs}Ty;a(?sx;GWY1m%ABo%)yB7);3wPJIYsEkrp2rw-H>S5{+2b-Wqqe>d#HxV zl16p6k|>UkdlJm6I3#cIiS$_M)IFBWt&>`3#~aN{kF59P43v_RZX>hd2t_Ytn~M%n z^o8UOF?J&q;A+0#E~Q5#eSB0cV3=CyN!48wk?(UfDmxkYA$iZ0%k4`zp_Dg+7s#%; zP|dHkJI_qdu*hb2v0dr-8-Cvu%-WCaMJvDckbBISEN=}vx!lN0Y1TJc>7D^I>SIIY zP4AyetMXc_Z7@l0@-gHnM-av3XFund;&tv_(0X21TFv@3h!)sRLmb4yI2G^GQ9wQYYovc8xNLC-& z-qC+g`!J?I=F)e^iBL`@;ZgBeHW@-)s4()#i}`2AVU{{cD&-5%HTtc$AeU@cFG|>P zvjd$fU;NzN&Hy=;ZP^%c^*Qp@6UE&8Ecp8Q?v( z_D_X)nJm7=Let=h67qJdXCOVV^V=J4NmBduKGbqY#^9h`P4C| zS_BmAo^XVu_{dGtHUV=}z%1(RiG2|axl{?=u;kmMxKc$G&M+I*LeF=+$3W~;k0Ph^ z?7x#53FHAajK7vJ$_9R}1_`5tFYFs^zMW3XMJzBGI}z0U1kDbOk?Vt&jXvspgsl%! z5zm%2fH_IdWe>gp5n{bWwS9h%-h@4VYNBuHs(_x0O}+{=k#nIpQDcLhqPcsQn13K{ zVLsuuDuz#mN#Uj&CDQdkuL|lS`_c!DpNOA#(-td+3RwcqapNl00JmAJHl2CR=zBkY zv#T|`l+`mHSE^J!ugs=_HIk0C$lN4D{QQz}*@IS(OObS-9)-fet}qIQ>|23WP&CZO z=*c6v&usli2{0Vo?8wEN8TyXhWsO11WAktEm~Uv)qp!MAE^wRGOR6yNFv*oX-Jy_+xaqIuk7jTpt#2Nzhr(CJX|HgR zL_a}q6*0eea3VxM5_$|I&DJPtB(tu?UTNZKhFE2D@wh+xci(ss8(XbtU+rzGY!CHV zYjrb0_c$x)MO^2iI3uv(o-nHk1A9&gLV9)~fjjOct=Utza=!g`Bko?ivL;C!)D2d# zzH2%;Y<9E8<3J)$Ba|93@It@zoC4~ zPh=I(x-zTNzN^#wcgVNLS4-uc$1>scBjsOOo%AMmbEJxb_NGXj58^xMcp9+m!wtSP z0Wb4BJ(pF{;iS{}jZ~I*H<_-q;O^aelN1GdUx$8{l*!=esIwwn&Z^B+CKwGhKU!KY z1ZJ*lEZD{D0)|^SPkZ@3Wc29(aIwb{8u zI#38)jVG()y9KlHq$nvms;i1rl@2#eF?nF43uY7;#ghr&cA?9F&09ShnHY!M`mw#2 zaSTT4Gqko&;wg8o6_~+)8)c@QVydqgnrmt!@hAyF1oAsa4e=+@gX937Is2DE( zX%%P!=z-J!GzPp511(8F7b%EAgw_HfZQb6WK`;Mp40vuR`1Npyg$?MLOvI-%f7*Cb zothj_mtLLQ*Fm4`_WM6yuwDh)2}f1-4Xp0}>dHTF4)uTr(YC#9{eM2y7sv({rxU&T zA9egkb^fDscH>VhhsN#-piQ}{BK8->oVzpW>7f_arrqp>{_{xIA)t8h4ZK4^29ttR zAfzGoKmL88>3{wEH~?P0xje!?PH1Xqec?`h$E(W0{K2i`E`ys$%an5xe-RYA}+ zJu0#%`=TMzT*LN52xmv!X_&Ul_8f4A!&=_mTZAfmjVGUwT!KIpLYknS_gpt;z55U| zC<41b3p}nN0y7=SUF3Xq3qW0u@&d5*NW<9Q{=f@(nA`F~2UO+JdNrsFapn7`r|FuZ z0ZjZ_e1NJ`WoE7gV5+A^9v@7IzEgC<)0uD9yMBU%A8acHxR9_E45Icu6P3UM{a1Ld z16d0Ucxy?eo84MrU1{DD5iZOMkT7H|A+N&^0yKV+V4|)rMdV+iz|+;dcK|x|a_Obo1tXy*8*y9V@?%>li-!=FGtJ!FIkatRr-dXovs9q3Jwc^g|9f|BMm}b|~XTd?1ZRGcUK@e#{bhcIY7)DZ27@UGZ zIdqnjo;xnT0kUmKCpXZZtrZ?3?1VkNIMJeg5>Nm_9;R}=nlA%RQ(^etB1l7EA|Bar$30GJ<5?I#(>`LG096?8 zJ5}Iv!*}+hW34dLG@{lY(B)dj-Bx1A1u;5Lr0HQC98eDpj=qdK8N0Uv?t@i^iU_AU z5c))9lzopR512bYh>xEEG^?eNKU&|5;oAMA>Uj}WYsq9lCLv60cMn(|c2u|koIVPW z2Pueb5Y)m;(Qx;hGT=y1bz&k3SjeUD!id8+PL4tVap5`^ zkr>vDhe}DmnRYw?$|k4wB~S$CVJ>7v7bAE#0a{K;%RIR@)#3WJzFe(S*P^`;_TA-m zEP{(>mqU<4LHgIFkWE)SFZ;L|8!PbO1jNuTQZ5*RI53W>S3q>IzYggsx0ASKpWL`uaMN+dJs}6N_j_l;XL}jp+>_nw ze61=&9s|h^!L7@w_f@WqCi+hn&`t+mkK3uSsOpc~dM$@Wz?HW@3O|g}jo~e-zccXU zZ)t|Ak50FTUfHqnvNMRHEwCb(JVde1^eQ~nP_W%si9tR)*N0D|v;6>2BK@m=B=a!(jrT5+Y&MMJ_MVS1yX)OgCA=? z#&IN1{A!n`@977DDlSZg>Vf3U*gqaes}gkd@}k{|nf=eD!M5vA_;+4IL~@47Dez}v Ma9#hEF7p2W1EktFwg3PC literal 0 HcmV?d00001 diff --git a/task-impls/README.md b/task-impls/README.md new file mode 100644 index 0000000000..9dd0c2ae81 --- /dev/null +++ b/task-impls/README.md @@ -0,0 +1,6 @@ +HotShot uses an event-based architecture. This architecture is made of 4 main tasks: Network Task, View Sync Task, Consensus Task, and DA Task. The Network Task handles all incoming and outgoing messages. It forwards incoming messages to the correct task and listens for outgoing messages from the other tasks. The View Sync Task coordinates the view sync protocol. It listens for timeout events from the Consensus Task. Once a certain threshold of timeouts seen has been reached, the View Sync Task starts the View Sync protocol to bring the network back into agreement on which view it should be in. The Consensus Task handles the core HotShot consensus logic. It manages replicas that listen for quourm propsoals and vote on them, leaders who send quorum proposals, and next leaders who listen for quorum votes and form QCs. The DA task handles the data availability protocol of HotShot. It listens for DA proposals, sends DA proposals, and forms a Data Availability Certificate (DAC) + +A diagram of how events interact with each task is below: +![HotShot Event Architecture](HotShot_event_architecture.png) + +For more information about each event see `./src/events.rs` \ No newline at end of file diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs new file mode 100644 index 0000000000..03fcc387a8 --- /dev/null +++ b/task-impls/src/consensus.rs @@ -0,0 +1,1306 @@ +use crate::events::SequencingHotShotEvent; +use async_compatibility_layer::{ + art::{async_sleep, async_spawn}, + async_primitives::subscribable_rwlock::ReadView, +}; +use async_lock::{RwLock, RwLockUpgradableReadGuard}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +use bincode::Options; +use bitvec::prelude::*; +use commit::Committable; +use core::time::Duration; +use either::{Either, Left, Right}; +use futures::FutureExt; +use hotshot_task::{ + event_stream::{ChannelStream, EventStream}, + global_registry::GlobalRegistry, + task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, + task_impls::{HSTWithEvent, TaskBuilder}, +}; +use hotshot_types::{ + certificate::{DACertificate, QuorumCertificate}, + consensus::{Consensus, View}, + data::{LeafType, ProposalType, QuorumProposal, SequencingLeaf}, + event::{Event, EventType}, + message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, + traits::{ + consensus_api::SequencingConsensusApi, + election::{ConsensusExchange, QuorumExchangeType, SignedCertificate}, + network::{CommunicationChannel, ConsensusIntentEvent}, + node_implementation::{CommitteeEx, NodeImplementation, NodeType, SequencingQuorumEx}, + signature_key::SignatureKey, + state::ConsensusTime, + Block, + }, + utils::{Terminator, ViewInner}, + vote::{QuorumVote, VoteAccumulator, VoteType}, +}; +use hotshot_utils::bincode::bincode_opts; +use snafu::Snafu; +use std::{ + collections::{HashMap, HashSet}, + marker::PhantomData, + sync::Arc, +}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; +use tracing::{debug, error, instrument}; + +/// Error returned by the consensus task +#[derive(Snafu, Debug)] +pub struct ConsensusTaskError {} + +/// The state for the consensus task. Contains all of the information for the implementation +/// of consensus +pub struct SequencingConsensusTaskState< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, +> where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + >, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + /// The global task registry + pub registry: GlobalRegistry, + /// Reference to consensus. The replica will require a write lock on this. + pub consensus: Arc>>>, + /// View timeout from config. + pub timeout: u64, + /// View number this view is executing in. + pub cur_view: TYPES::Time, + + /// Current block submitted to DA + pub block: TYPES::BlockType, + + /// the quorum exchange + pub quorum_exchange: Arc>, + + /// Consensus api + pub api: A, + + /// the committee exchange + pub committee_exchange: Arc>, + + /// needed to typecheck + pub _pd: PhantomData, + + /// Current Vote collection task, with it's view. + pub vote_collector: Option<(TYPES::Time, usize, usize)>, + + /// Have we already sent a proposal for a particular view + /// since proposal can be sent either on QCFormed event or ViewChange event + // pub proposal_sent: HashMap, + + /// timeout task handle + pub timeout_task: JoinHandle<()>, + + /// Global events stream to publish events + pub event_stream: ChannelStream>, + + /// Event stream to publish events to the application layer + pub output_event_stream: ChannelStream>, + + /// All the DA certs we've received for current and future views. + pub certs: HashMap>, + + /// The most recent proposal we have, will correspond to the current view if Some() + /// Will be none if the view advanced through timeout/view_sync + pub current_proposal: Option>, + + // ED Should replace this with config information since we need it anyway + /// The node's id + pub id: u64, + + /// The most Recent QC we've formed from votes, if we've formed it. + pub qc: Option>, +} + +/// State for the vote collection task. This handles the building of a QC from a votes received +pub struct VoteCollectionTaskState< + TYPES: NodeType, + I: NodeImplementation>, +> where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + >, +{ + /// the quorum exchange + pub quorum_exchange: Arc>, + #[allow(clippy::type_complexity)] + /// Accumulator for votes + pub accumulator: + Either, QuorumCertificate>, + /// View which this vote collection task is collecting votes in + pub cur_view: TYPES::Time, + /// The event stream shared by all tasks + pub event_stream: ChannelStream>, + /// Node id + pub id: u64, +} + +impl>> TS + for VoteCollectionTaskState +where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + >, +{ +} + +#[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "Quorum Vote Collection Task", level = "error")] + +async fn vote_handle>>( + mut state: VoteCollectionTaskState, + event: SequencingHotShotEvent, +) -> ( + std::option::Option, + VoteCollectionTaskState, +) +where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + >, +{ + // TODO ED Emit a view change event upon new proposal? + match event { + SequencingHotShotEvent::QuorumVoteRecv(vote) => match vote { + QuorumVote::Yes(vote) => { + // For the case where we receive votes after we've made a certificate + if state.accumulator.is_right() { + return (None, state); + } + + if vote.current_view != state.cur_view { + error!( + "Vote view does not match! vote view is {} current view is {}", + *vote.current_view, *state.cur_view + ); + return (None, state); + } + + let accumulator = state.accumulator.left().unwrap(); + match state.quorum_exchange.accumulate_vote( + &vote.signature.0, + &vote.signature.1, + vote.leaf_commitment, + vote.vote_data, + vote.vote_token.clone(), + state.cur_view, + accumulator, + None, + ) { + Either::Left(acc) => { + state.accumulator = Either::Left(acc); + return (None, state); + } + Either::Right(qc) => { + debug!("QCFormed! {:?}", qc.view_number); + state + .event_stream + .publish(SequencingHotShotEvent::QCFormed(qc.clone())) + .await; + state.accumulator = Either::Right(qc.clone()); + + // No longer need to poll for votes + state + .quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *qc.view_number, + )) + .await; + + return (Some(HotShotTaskCompleted::ShutDown), state); + } + } + } + QuorumVote::Timeout(_vote) => { + error!("The next leader has received an unexpected vote!"); + return (None, state); + } + QuorumVote::No(_) => { + error!("The next leader has received an unexpected vote!"); + } + }, + SequencingHotShotEvent::Shutdown => { + return (Some(HotShotTaskCompleted::ShutDown), state); + } + _ => {} + } + (None, state) +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > SequencingConsensusTaskState +where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + >, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus genesis leaf", level = "error")] + + async fn genesis_leaf(&self) -> Option> { + let consensus = self.consensus.read().await; + + let Some(genesis_view) = consensus.state_map.get(&TYPES::Time::genesis()) else { + error!("Couldn't find genesis view in state map."); + return None; + }; + let Some(leaf) = genesis_view.get_leaf_commitment() else { + error!( + ?genesis_view, + "Genesis view points to a view without a leaf" + ); + return None; + }; + let Some(leaf) = consensus.saved_leaves.get(&leaf) else { + error!("Failed to find genesis leaf."); + return None; + }; + Some(leaf.clone()) + } + + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] + + async fn vote_if_able(&self) -> bool { + if let Some(proposal) = &self.current_proposal { + // ED Need to account for the genesis DA cert + if proposal.justify_qc.is_genesis() && proposal.view_number == TYPES::Time::new(1) { + // warn!("Proposal is genesis!"); + + let view = TYPES::Time::new(*proposal.view_number); + let vote_token = self.quorum_exchange.make_vote_token(view); + + match vote_token { + Err(e) => { + error!("Failed to generate vote token for {:?} {:?}", view, e); + } + Ok(None) => { + debug!("We were not chosen for consensus committee on {:?}", view); + } + Ok(Some(vote_token)) => { + let justify_qc = proposal.justify_qc.clone(); + let parent = if justify_qc.is_genesis() { + self.genesis_leaf().await + } else { + self.consensus + .read() + .await + .saved_leaves + .get(&justify_qc.leaf_commitment()) + .cloned() + }; + + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some(parent) = parent else { + error!( + "Proposal's parent missing from storage with commitment: {:?}", + justify_qc.leaf_commitment() + ); + return false; + }; + let parent_commitment = parent.commit(); + + let leaf: SequencingLeaf<_> = SequencingLeaf { + view_number: view, + height: proposal.height, + justify_qc: proposal.justify_qc.clone(), + parent_commitment, + deltas: Right(proposal.block_commitment), + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), + }; + + let message: GeneralConsensusMessage = + self.quorum_exchange.create_yes_message( + proposal.justify_qc.commit(), + leaf.commit(), + view, + vote_token, + ); + + if let GeneralConsensusMessage::Vote(vote) = message { + debug!( + "Sending vote to next quorum leader {:?}", + vote.current_view() + ); + self.event_stream + .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) + .await; + return true; + } + } + } + } + + // Only vote if you have the DA cert + // ED Need to update the view number this is stored under? + if let Some(cert) = self.certs.get(&(proposal.get_view_number())) { + let view = cert.view_number; + let vote_token = self.quorum_exchange.make_vote_token(view); + // TODO: do some of this logic without the vote token check, only do that when voting. + match vote_token { + Err(e) => { + error!("Failed to generate vote token for {:?} {:?}", view, e); + } + Ok(None) => { + debug!("We were not chosen for consensus committee on {:?}", view); + } + Ok(Some(vote_token)) => { + let justify_qc = proposal.justify_qc.clone(); + let parent = if justify_qc.is_genesis() { + self.genesis_leaf().await + } else { + self.consensus + .read() + .await + .saved_leaves + .get(&justify_qc.leaf_commitment()) + .cloned() + }; + + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some(parent) = parent else { + error!( + "Proposal's parent missing from storage with commitment: {:?}", + justify_qc.leaf_commitment() + ); + return false; + }; + let parent_commitment = parent.commit(); + + let leaf: SequencingLeaf<_> = SequencingLeaf { + view_number: view, + height: proposal.height, + justify_qc: proposal.justify_qc.clone(), + parent_commitment, + deltas: Right(proposal.block_commitment), + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), + }; + let message: GeneralConsensusMessage= + // Validate the DAC. + if self + .committee_exchange + .is_valid_cert(cert, proposal.block_commitment) + { + self.quorum_exchange.create_yes_message( + proposal.justify_qc.commit(), + leaf.commit(), + cert.view_number, + vote_token) + } else { + error!("Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", cert.view_number, self.cur_view ); + return false; + + }; + + // TODO ED Only publish event in vote if able + if let GeneralConsensusMessage::Vote(vote) = message { + debug!( + "Sending vote to next quorum leader {:?}", + vote.current_view() + ); + self.event_stream + .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) + .await; + return true; + } + } + } + } + debug!( + "Couldn't find DAC cert in certs, meaning we haven't received it yet for view {:?}", + *proposal.get_view_number(), + ); + return false; + } + debug!( + "Could not vote because we don't have a proposal yet for view {}", + *self.cur_view + ); + false + } + + /// Must only update the view and GC if the view actually changes + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus update view", level = "error")] + + async fn update_view(&mut self, new_view: TYPES::Time) -> bool { + if *self.cur_view < *new_view { + debug!( + "Updating view from {} to {} in consensus task", + *self.cur_view, *new_view + ); + + // Remove old certs, we won't vote on past views + // TODO ED Put back in once we fix other errors + // for view in *self.cur_view..*new_view - 1 { + // let v = TYPES::Time::new(view); + // self.certs.remove(&v); + // } + self.cur_view = new_view; + self.current_proposal = None; + + // Start polling for proposals for the new view + self.quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForProposal(*self.cur_view)) + .await; + + self.quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForDAC(*self.cur_view)) + .await; + + if self.quorum_exchange.is_leader(self.cur_view + 1) { + debug!("Polling for quorum votes for view {}", *self.cur_view); + self.quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForVotes(*self.cur_view)) + .await; + } + + self.event_stream + .publish(SequencingHotShotEvent::ViewChange(new_view)) + .await; + + // Spawn a timeout task if we did actually update view + let timeout = self.timeout; + self.timeout_task = async_spawn({ + let stream = self.event_stream.clone(); + let view_number = self.cur_view; + async move { + async_sleep(Duration::from_millis(timeout)).await; + stream + .publish(SequencingHotShotEvent::Timeout(TYPES::Time::new( + *view_number, + ))) + .await; + } + }); + + return true; + } + false + } + + /// Handles a consensus event received on the event stream + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] + pub async fn handle_event(&mut self, event: SequencingHotShotEvent) { + match event { + SequencingHotShotEvent::QuorumProposalRecv(proposal, sender) => { + debug!( + "Receved Quorum Propsoal for view {}", + *proposal.data.view_number + ); + + let view = proposal.data.get_view_number(); + if view < self.cur_view { + error!("view too high"); + return; + } + + let view_leader_key = self.quorum_exchange.get_leader(view); + if view_leader_key != sender { + error!("Leader key does not match key in proposal"); + return; + } + + self.current_proposal = Some(proposal.data.clone()); + + let vote_token = self.quorum_exchange.make_vote_token(view); + // TODO: do some of this logic without the vote token check, only do that when voting. + match vote_token { + Err(e) => { + error!("Failed to generate vote token for {:?} {:?}", view, e); + } + Ok(None) => { + debug!("We were not chosen for consensus committee on {:?}", view); + } + Ok(Some(vote_token)) => { + debug!("We were chosen for consensus committee on {:?}", view); + let consensus = self.consensus.upgradable_read().await; + let message; + + // TODO ED Insert TC logic here + + // Construct the leaf. + let justify_qc = proposal.data.justify_qc; + let parent = if justify_qc.is_genesis() { + self.genesis_leaf().await + } else { + consensus + .saved_leaves + .get(&justify_qc.leaf_commitment()) + .cloned() + }; + + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some(parent) = parent else { + error!( + "Proposal's parent missing from storage with commitment: {:?}", + justify_qc.leaf_commitment() + ); + return; + }; + let parent_commitment = parent.commit(); + let leaf: SequencingLeaf<_> = SequencingLeaf { + view_number: view, + height: proposal.data.height, + justify_qc: justify_qc.clone(), + parent_commitment, + deltas: Right(proposal.data.block_commitment), + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: sender.to_bytes(), + }; + let justify_qc_commitment = justify_qc.commit(); + let leaf_commitment = leaf.commit(); + + // Validate the `justify_qc`. + if !self + .quorum_exchange + .is_valid_cert(&justify_qc, parent_commitment) + { + error!("Invalid justify_qc in proposal!. parent commitment is {:?} justify qc is {:?}", parent_commitment, justify_qc.clone()); + + message = self.quorum_exchange.create_no_message::( + justify_qc_commitment, + leaf_commitment, + view, + vote_token, + ); + } + // Validate the `height`. + else if leaf.height != parent.height + 1 { + error!( + "Incorrect height in proposal (expected {}, got {})", + parent.height + 1, + leaf.height + ); + message = self.quorum_exchange.create_no_message( + justify_qc_commitment, + leaf_commitment, + view, + vote_token, + ); + } + // Validate the signature. + else if !view_leader_key + .validate(&proposal.signature, leaf_commitment.as_ref()) + { + error!(?proposal.signature, "Could not verify proposal."); + message = self.quorum_exchange.create_no_message( + justify_qc_commitment, + leaf_commitment, + view, + vote_token, + ); + } + // Create a positive vote if either liveness or safety check + // passes. + else { + // Liveness check. + let liveness_check = justify_qc.view_number > consensus.locked_view; + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = consensus.visit_leaf_ancestors( + justify_qc.view_number, + Terminator::Inclusive(consensus.locked_view), + false, + |leaf| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.view_number != consensus.locked_view + }, + ); + let safety_check = outcome.is_ok(); + if let Err(e) = outcome { + self.api.send_view_error(view, Arc::new(e)).await; + } + + // Skip if both saftey and liveness checks fail. + if !safety_check && !liveness_check { + error!("Failed safety check and liveness check"); + message = self.quorum_exchange.create_no_message( + justify_qc_commitment, + leaf_commitment, + view, + vote_token, + ); + } else { + // Generate a message with yes vote. + message = self.quorum_exchange.create_yes_message( + justify_qc_commitment, + leaf_commitment, + view, + vote_token, + ); + } + } + + let high_qc = leaf.justify_qc.clone(); + let mut new_anchor_view = consensus.last_decided_view; + let mut new_locked_view = consensus.locked_view; + let mut last_view_number_visited = view; + let mut new_commit_reached: bool = false; + let mut new_decide_reached = false; + let mut new_decide_qc = None; + let mut leaf_views = Vec::new(); + let mut included_txns = HashSet::new(); + let old_anchor_view = consensus.last_decided_view; + let parent_view = leaf.justify_qc.view_number; + let mut current_chain_length = 0usize; + if parent_view + 1 == view { + current_chain_length += 1; + if let Err(e) = consensus.visit_leaf_ancestors( + parent_view, + Terminator::Exclusive(old_anchor_view), + true, + |leaf| { + if !new_decide_reached { + if last_view_number_visited == leaf.view_number + 1 { + last_view_number_visited = leaf.view_number; + current_chain_length += 1; + if current_chain_length == 2 { + new_locked_view = leaf.view_number; + new_commit_reached = true; + // The next leaf in the chain, if there is one, is decided, so this + // leaf's justify_qc would become the QC for the decided chain. + new_decide_qc = Some(leaf.justify_qc.clone()); + } else if current_chain_length == 3 { + new_anchor_view = leaf.view_number; + new_decide_reached = true; + } + } else { + // nothing more to do here... we don't have a new chain extension + return false; + } + } + // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above + if new_decide_reached { + let mut leaf = leaf.clone(); + + // If the full block is available for this leaf, include it in the leaf + // chain that we send to the client. + if let Some(block) = + consensus.saved_blocks.get(leaf.get_deltas_commitment()) + { + if let Err(err) = leaf.fill_deltas(block.clone()) { + error!("unable to fill leaf {} with block {}, block will not be available: {}", + leaf.commit(), block.commit(), err); + } + } + + leaf_views.push(leaf.clone()); + match &leaf.deltas { + Left(block) => { + let txns = block.contained_transactions(); + for txn in txns { + included_txns.insert(txn); + } + } + Right(_) => {} + } + } + true + }, + ) { + error!("publishing view error"); + self.output_event_stream.publish(Event { + view_number: view, + event: EventType::Error { error: e.into() }, + }).await; + } + } + + let included_txns_set: HashSet<_> = if new_decide_reached { + included_txns + } else { + HashSet::new() + }; + + // promote lock here to add proposal to statemap + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + if high_qc.view_number > consensus.high_qc.view_number { + consensus.high_qc = high_qc; + } + consensus.state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + }, + }, + ); + consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + if new_commit_reached { + consensus.locked_view = new_locked_view; + } + #[allow(clippy::cast_precision_loss)] + if new_decide_reached { + let mut included_txn_size = 0; + let mut included_txn_count = 0; + let txns = consensus.transactions.cloned().await; + // store transactions in this block we never added to our transactions. + let _ = included_txns_set.iter().map(|hash| { + if !txns.contains_key(hash) { + consensus.seen_transactions.insert(*hash); + } + }); + drop(txns); + consensus + .transactions + .modify(|txns| { + *txns = txns + .drain() + .filter(|(txn_hash, txn)| { + if included_txns_set.contains(txn_hash) { + included_txn_count += 1; + included_txn_size += bincode_opts() + .serialized_size(txn) + .unwrap_or_default(); + false + } else { + true + } + }) + .collect(); + }) + .await; + + consensus + .metrics + .outstanding_transactions + .update(-included_txn_count); + consensus + .metrics + .outstanding_transactions_memory_size + .update(-(i64::try_from(included_txn_size).unwrap_or(i64::MAX))); + + debug!("about to publish decide"); + let decide_sent = self.output_event_stream.publish(Event { + view_number: consensus.last_decided_view, + event: EventType::Decide { + leaf_chain: Arc::new(leaf_views), + qc: Arc::new(new_decide_qc.unwrap()), + block_size: Some(included_txns_set.len().try_into().unwrap()), + }, + }); + let old_anchor_view = consensus.last_decided_view; + consensus + .collect_garbage(old_anchor_view, new_anchor_view) + .await; + consensus.last_decided_view = new_anchor_view; + consensus.invalid_qc = 0; + + // We're only storing the last QC. We could store more but we're realistically only going to retrieve the last one. + if let Err(e) = self.api.store_leaf(old_anchor_view, leaf).await { + error!("Could not insert new anchor into the storage API: {:?}", e); + } + + debug!("Sending Decide for view {:?}", consensus.last_decided_view); + debug!("Decided txns len {:?}", included_txns_set.len()); + decide_sent.await; + } + + let new_view = self.current_proposal.clone().unwrap().view_number + 1; + // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = self.quorum_exchange.is_leader(new_view) + && consensus.high_qc.view_number + == self.current_proposal.clone().unwrap().view_number; + // todo get rid of this clone + let qc = consensus.high_qc.clone(); + + drop(consensus); + if should_propose { + debug!( + "Attempting to publish proposal after voting; now in view: {}", + *new_view + ); + self.publish_proposal_if_able(qc).await; + } + if !self.vote_if_able().await { + // TOOD ED This means we publish the proposal without updating our own view, which doesn't seem right + return; + } + + // ED Only do this GC if we are able to vote + for v in (*self.cur_view)..=(*view) { + let time = TYPES::Time::new(v); + self.certs.remove(&time); + } + + // Update current view and publish a view change event so other tasks also update + self.update_view(new_view).await; + + if let GeneralConsensusMessage::Vote(vote) = message { + debug!("Sending vote to next leader {:?}", vote); + }; + } + } + } + SequencingHotShotEvent::QuorumVoteRecv(vote) => { + debug!("Received quroum vote: {:?}", vote.current_view()); + + if !self.quorum_exchange.is_leader(vote.current_view() + 1) { + error!( + "We are not the leader for view {} are we the leader for view + 1? {}", + *vote.current_view() + 1, + self.quorum_exchange.is_leader(vote.current_view() + 2) + ); + return; + } + + match vote { + QuorumVote::Yes(vote) => { + let handle_event = HandleEvent(Arc::new(move |event, state| { + async move { vote_handle(state, event).await }.boxed() + })); + let collection_view = if let Some((collection_view, collection_task, _)) = + &self.vote_collector + { + if vote.current_view > *collection_view { + // ED I think we'd want to let that task timeout to avoid a griefing vector + self.registry.shutdown_task(*collection_task).await; + } + *collection_view + } else { + TYPES::Time::new(0) + }; + + let acc = VoteAccumulator { + total_vote_outcomes: HashMap::new(), + da_vote_outcomes: HashMap::new(), + yes_vote_outcomes: HashMap::new(), + no_vote_outcomes: HashMap::new(), + viewsync_precommit_vote_outcomes: HashMap::new(), + viewsync_commit_vote_outcomes: HashMap::new(), + viewsync_finalize_vote_outcomes: HashMap::new(), + success_threshold: self.quorum_exchange.success_threshold(), + failure_threshold: self.quorum_exchange.failure_threshold(), + sig_lists: Vec::new(), + signers: bitvec![0; self.quorum_exchange.total_nodes()], + }; + + // Todo check if we are the leader + let accumulator = self.quorum_exchange.accumulate_vote( + &vote.clone().signature.0, + &vote.clone().signature.1, + vote.clone().leaf_commitment, + vote.clone().vote_data.clone(), + vote.clone().vote_token.clone(), + vote.clone().current_view, + acc, + None, + ); + + if vote.current_view > collection_view { + let state = VoteCollectionTaskState { + quorum_exchange: self.quorum_exchange.clone(), + accumulator, + cur_view: vote.current_view, + event_stream: self.event_stream.clone(), + id: self.id, + }; + let name = "Quorum Vote Collection"; + let filter = FilterEvent(Arc::new(|event| { + matches!(event, SequencingHotShotEvent::QuorumVoteRecv(_)) + })); + + let builder = + TaskBuilder::>::new(name.to_string()) + .register_event_stream(self.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(state) + .register_event_handler(handle_event); + let id = builder.get_task_id().unwrap(); + let stream_id = builder.get_stream_id().unwrap(); + + self.vote_collector = Some((vote.current_view, id, stream_id)); + + let _task = async_spawn(async move { + VoteCollectionTypes::build(builder).launch().await; + }); + debug!("Starting vote handle for view {:?}", vote.current_view); + } else if let Some((_, _, stream_id)) = self.vote_collector { + self.event_stream + .direct_message( + stream_id, + SequencingHotShotEvent::QuorumVoteRecv(QuorumVote::Yes(vote)), + ) + .await; + } + } + QuorumVote::Timeout(_) | QuorumVote::No(_) => { + error!("The next leader has received an unexpected vote!"); + } + } + } + SequencingHotShotEvent::QCFormed(qc) => { + debug!("QC Formed event happened!"); + + let mut consensus = self.consensus.write().await; + consensus.high_qc = qc.clone(); + + drop(consensus); + + // View may have already been updated by replica if they voted for this QC + // TODO ED We should separate leader state from replica state, they shouldn't share the same view + // Leader task should only run for a specific view, and never update its current view, but instead spawn another task + // let _res = self.update_view(qc.view_number + 1).await; + + // Start polling for votes for the next view + // if _res { + // if self.quorum_exchange.is_leader(qc.view_number + 2) { + // self.quorum_exchange + // .network() + // .inject_consensus_info( + // (ConsensusIntentEvent::PollForVotes(*qc.view_number + 1)), + // ) + // .await; + // } + // } + + // So we don't create a QC on the first view unless we are the leader + debug!( + "Attempting to publish proposal after forming a QC for view {}", + *qc.view_number + ); + + if self.publish_proposal_if_able(qc.clone()).await { + self.update_view(qc.view_number + 1).await; + } + } + SequencingHotShotEvent::DACRecv(cert) => { + debug!("DAC Recved for view ! {}", *cert.view_number); + + let view = cert.view_number; + self.certs.insert(view, cert); + + // TODO Make sure we aren't voting for an arbitrarily old round for no reason + if self.vote_if_able().await { + self.update_view(view + 1).await; + } + } + + SequencingHotShotEvent::ViewChange(new_view) => { + debug!("View Change event for view {}", *new_view); + + let old_view_number = self.cur_view; + + // update the view in state to the one in the message + // Publish a view change event to the application + if !self.update_view(new_view).await { + debug!("view not updated"); + return; + } + + self.output_event_stream + .publish(Event { + view_number: old_view_number, + event: EventType::ViewFinished { + view_number: old_view_number, + }, + }) + .await; + + debug!("View changed to {}", *new_view); + + // ED Need to update the view here? What does otherwise? + // self.update_view(qc.view_number + 1).await; + // So we don't create a QC on the first view unless we are the leader + if !self.quorum_exchange.is_leader(self.cur_view) { + return; + } + + let consensus = self.consensus.read().await; + let qc = consensus.high_qc.clone(); + drop(consensus); + if !self.publish_proposal_if_able(qc).await { + error!( + "Failed to publish proposal on view change. View = {:?}", + self.cur_view + ); + } + } + SequencingHotShotEvent::Timeout(view) => { + // The view sync module will handle updating views in the case of timeout + // TODO ED In the future send a timeout vote + self.quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) + .await; + debug!( + "We received a timeout event in the consensus task for view {}!", + *view + ); + } + SequencingHotShotEvent::SendDABlockData(block) => { + // ED TODO Should make sure this is actually the most recent block + self.block = block; + } + _ => {} + } + } + + /// Sends a proposal if possible from the high qc we have + pub async fn publish_proposal_if_able(&self, qc: QuorumCertificate) -> bool { + // TODO ED This should not be qc view number + 1 + if !self.quorum_exchange.is_leader(qc.view_number + 1) { + error!( + "Somehow we formed a QC but are not the leader for the next view {:?}", + qc.view_number + 1 + ); + return false; + } + + let consensus = self.consensus.read().await; + let parent_view_number = &consensus.high_qc.view_number(); + let mut reached_decided = false; + + let Some(parent_view) = consensus.state_map.get(parent_view_number) else { + // This should have been added by the replica? + error!("Couldn't find parent view in state map, waiting for replica to see proposal\n parent view number: {}", **parent_view_number); + return false; + }; + // Leaf hash in view inner does not match high qc hash - Why? + let Some(leaf_commitment) = parent_view.get_leaf_commitment() else { + error!( + ?parent_view_number, + ?parent_view, + "Parent of high QC points to a view without a proposal" + ); + return false; + }; + if leaf_commitment != consensus.high_qc.leaf_commitment() { + debug!( + "They don't equal: {:?} {:?}", + leaf_commitment, + consensus.high_qc.leaf_commitment() + ); + } + let Some(leaf) = consensus.saved_leaves.get(&leaf_commitment) else { + error!("Failed to find high QC of parent."); + return false; + }; + if leaf.view_number == consensus.last_decided_view { + reached_decided = true; + } + + let parent_leaf = leaf.clone(); + + let original_parent_hash = parent_leaf.commit(); + + let mut next_parent_hash = original_parent_hash; + + // Walk back until we find a decide + if !reached_decided { + debug!("not reached decide fro view {:?}", self.cur_view); + while let Some(next_parent_leaf) = consensus.saved_leaves.get(&next_parent_hash) { + if next_parent_leaf.view_number <= consensus.last_decided_view { + break; + } + next_parent_hash = next_parent_leaf.parent_commitment; + } + debug!("updated saved leaves"); + // TODO do some sort of sanity check on the view number that it matches decided + } + + let block_commitment = self.block.commit(); + if block_commitment == TYPES::BlockType::new().commit() { + debug!("Block is generic block! {:?}", self.cur_view); + } + + let leaf = SequencingLeaf { + view_number: *parent_view_number + 1, + height: parent_leaf.height + 1, + justify_qc: consensus.high_qc.clone(), + parent_commitment: parent_leaf.commit(), + // Use the block commitment rather than the block, so that the replica can construct + // the same leaf with the commitment. + deltas: Right(block_commitment), + rejected: vec![], + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: self.api.public_key().to_bytes(), + }; + + let signature = self + .quorum_exchange + .sign_validating_or_commitment_proposal::(&leaf.commit()); + // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. + let proposal = QuorumProposal { + block_commitment, + view_number: leaf.view_number, + height: leaf.height, + justify_qc: consensus.high_qc.clone(), + // TODO ED Update this to be the actual TC if there is one + timeout_certificate: None, + proposer_id: leaf.proposer_id, + dac: None, + }; + + let message = Proposal { + data: proposal, + signature, + }; + debug!("Sending proposal for view {:?} \n {:?}", self.cur_view, ""); + + self.event_stream + .publish(SequencingHotShotEvent::QuorumProposalSend( + message, + self.quorum_exchange.public_key().clone(), + )) + .await; + true + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I>, + > TS for SequencingConsensusTaskState +where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + >, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ +} + +/// Type allias for consensus' vote collection task +pub type VoteCollectionTypes = HSTWithEvent< + ConsensusTaskError, + SequencingHotShotEvent, + ChannelStream>, + VoteCollectionTaskState, +>; + +/// Type alias for Consensus task +pub type ConsensusTaskTypes = HSTWithEvent< + ConsensusTaskError, + SequencingHotShotEvent, + ChannelStream>, + SequencingConsensusTaskState, +>; + +/// Event handle for consensus +pub async fn sequencing_consensus_handle< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, +>( + event: SequencingHotShotEvent, + mut state: SequencingConsensusTaskState, +) -> ( + std::option::Option, + SequencingConsensusTaskState, +) +where + SequencingQuorumEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>, + Commitment = SequencingLeaf, + >, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + if let SequencingHotShotEvent::Shutdown = event { + (Some(HotShotTaskCompleted::ShutDown), state) + } else { + state.handle_event(event).await; + (None, state) + } +} + +/// Filter for consensus, returns true for event types the consensus task subscribes to. +pub fn consensus_event_filter>( + event: &SequencingHotShotEvent, +) -> bool { + matches!( + event, + SequencingHotShotEvent::QuorumProposalRecv(_, _) + | SequencingHotShotEvent::QuorumVoteRecv(_) + | SequencingHotShotEvent::QCFormed(_) + | SequencingHotShotEvent::DACRecv(_) + | SequencingHotShotEvent::ViewChange(_) + | SequencingHotShotEvent::SendDABlockData(_) + | SequencingHotShotEvent::Timeout(_) + | SequencingHotShotEvent::Shutdown, + ) +} diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs new file mode 100644 index 0000000000..56d4caeac4 --- /dev/null +++ b/task-impls/src/da.rs @@ -0,0 +1,681 @@ +use crate::events::SequencingHotShotEvent; +use async_compatibility_layer::{ + art::{async_spawn, async_timeout}, + async_primitives::subscribable_rwlock::ReadView, +}; +use async_lock::RwLock; +use bincode::config::Options; +use bitvec::prelude::*; +use commit::Committable; +use either::{Either, Left, Right}; +use futures::FutureExt; +use hotshot_task::{ + event_stream::{ChannelStream, EventStream}, + global_registry::GlobalRegistry, + task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, + task_impls::{HSTWithEvent, TaskBuilder}, +}; +use hotshot_types::{ + certificate::DACertificate, + consensus::{Consensus, View}, + data::{DAProposal, ProposalType, SequencingLeaf}, + message::{CommitteeConsensusMessage, Message, Proposal, SequencingMessage}, + traits::{ + consensus_api::SequencingConsensusApi, + election::{CommitteeExchangeType, ConsensusExchange, Membership}, + network::{CommunicationChannel, ConsensusIntentEvent}, + node_implementation::{CommitteeEx, NodeImplementation, NodeType}, + signature_key::SignatureKey, + state::ConsensusTime, + Block, State, + }, + utils::ViewInner, + vote::VoteAccumulator, +}; +use hotshot_utils::bincode::bincode_opts; +use snafu::Snafu; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::Instant, +}; +use tracing::{debug, error, instrument, warn}; + +#[derive(Snafu, Debug)] +/// Error type for consensus tasks +pub struct ConsensusTaskError {} + +/// Tracks state of a DA task +pub struct DATaskState< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, +> where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + /// The state's api + pub api: A, + /// Global registry task for the state + pub registry: GlobalRegistry, + + /// View number this view is executing in. + pub cur_view: TYPES::Time, + + // pub transactions: Arc>>, + /// Reference to consensus. Leader will require a read lock on this. + pub consensus: Arc>>>, + + /// the committee exchange + pub committee_exchange: Arc>, + + /// The view and ID of the current vote collection task, if there is one. + pub vote_collector: Option<(TYPES::Time, usize, usize)>, + + /// Global events stream to publish events + pub event_stream: ChannelStream>, + + /// This state's ID + pub id: u64, +} + +/// Struct to maintain DA Vote Collection task state +pub struct DAVoteCollectionTaskState< + TYPES: NodeType, + I: NodeImplementation>, +> where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + /// the committee exchange + pub committee_exchange: Arc>, + /// the vote accumulator + pub accumulator: + Either, DACertificate>, + // TODO ED Make this just "view" since it is only for this task + /// the current view + pub cur_view: TYPES::Time, + /// event stream for channel events + pub event_stream: ChannelStream>, + /// the id of this task state + pub id: u64, +} + +impl>> TS + for DAVoteCollectionTaskState +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ +} + +#[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "DA Vote Collection Task", level = "error")] +async fn vote_handle>>( + mut state: DAVoteCollectionTaskState, + event: SequencingHotShotEvent, +) -> ( + std::option::Option, + DAVoteCollectionTaskState, +) +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + match event { + SequencingHotShotEvent::DAVoteRecv(vote) => { + debug!("DA vote recv, collection task {:?}", vote.current_view); + // panic!("Vote handle received DA vote for view {}", *vote.current_view); + + // For the case where we receive votes after we've made a certificate + if state.accumulator.is_right() { + debug!("DA accumulator finished view: {:?}", state.cur_view); + return (None, state); + } + + let accumulator = state.accumulator.left().unwrap(); + match state.committee_exchange.accumulate_vote( + &vote.signature.0, + &vote.signature.1, + vote.block_commitment, + vote.vote_data, + vote.vote_token.clone(), + state.cur_view, + accumulator, + None, + ) { + Left(acc) => { + state.accumulator = Either::Left(acc); + // debug!("Not enough DA votes! "); + return (None, state); + } + Right(dac) => { + debug!("Sending DAC! {:?}", dac.view_number); + state + .event_stream + .publish(SequencingHotShotEvent::DACSend( + dac.clone(), + state.committee_exchange.public_key().clone(), + )) + .await; + + state.accumulator = Right(dac.clone()); + state + .committee_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *dac.view_number, + )) + .await; + + // Return completed at this point + return (Some(HotShotTaskCompleted::ShutDown), state); + } + } + } + SequencingHotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), + _ => {} + } + (None, state) +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > DATaskState +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + /// main task event handler + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] + + pub async fn handle_event( + &mut self, + event: SequencingHotShotEvent, + ) -> Option { + match event { + SequencingHotShotEvent::TransactionsRecv(transactions) => { + // TODO ED Add validation checks + + let mut consensus = self.consensus.write().await; + consensus + .get_transactions() + .modify(|txns| { + for transaction in transactions { + let size = bincode_opts().serialized_size(&transaction).unwrap_or(0); + + // If we didn't already know about this transaction, update our mempool metrics. + if !consensus.seen_transactions.remove(&transaction.commit()) + && txns.insert(transaction.commit(), transaction).is_none() + { + consensus.metrics.outstanding_transactions.update(1); + consensus + .metrics + .outstanding_transactions_memory_size + .update(i64::try_from(size).unwrap_or_else(|e| { + warn!("Conversion failed: {e}. Using the max value."); + i64::MAX + })); + } + } + }) + .await; + + return None; + } + SequencingHotShotEvent::DAProposalRecv(proposal, sender) => { + debug!( + "DA proposal received for view: {:?}", + proposal.data.get_view_number() + ); + // ED NOTE: Assuming that the next view leader is the one who sends DA proposal for this view + let view = proposal.data.get_view_number(); + + // Allow a DA proposal that is one view older, in case we have voted on a quorum + // proposal and updated the view. + // `self.cur_view` should be at least 1 since there is a view change before getting + // the `DAProposalRecv` event. Otherewise, the view number subtraction below will + // cause an overflow error. + if view < self.cur_view - 1 { + warn!("Throwing away DA proposal that is more than one view older"); + return None; + } + + debug!( + "Got a DA block with {} transactions!", + proposal.data.deltas.contained_transactions().len() + ); + let block_commitment = proposal.data.deltas.commit(); + + // ED Is this the right leader? + let view_leader_key = self.committee_exchange.get_leader(view); + if view_leader_key != sender { + error!("DA proposal doesn't have expected leader key for view {} \n DA proposal is: {:?}", *view, proposal.data.clone()); + return None; + } + + if !view_leader_key.validate(&proposal.signature, block_commitment.as_ref()) { + error!("Could not verify proposal."); + return None; + } + + let vote_token = self.committee_exchange.make_vote_token(view); + match vote_token { + Err(e) => { + error!("Failed to generate vote token for {:?} {:?}", view, e); + } + Ok(None) => { + debug!("We were not chosen for DA committee on {:?}", view); + } + Ok(Some(vote_token)) => { + // Generate and send vote + let message = self.committee_exchange.create_da_message( + block_commitment, + view, + vote_token, + ); + + // ED Don't think this is necessary? + // self.cur_view = view; + + if let CommitteeConsensusMessage::DAVote(vote) = message { + debug!("Sending vote to the DA leader {:?}", vote.current_view); + self.event_stream + .publish(SequencingHotShotEvent::DAVoteSend(vote)) + .await; + } + let mut consensus = self.consensus.write().await; + + // Ensure this view is in the view map for garbage collection, but do not overwrite if + // there is already a view there: the replica task may have inserted a `Leaf` view which + // contains strictly more information. + consensus.state_map.entry(view).or_insert(View { + view_inner: ViewInner::DA { + block: block_commitment, + }, + }); + + // Record the block we have promised to make available. + consensus.saved_blocks.insert(proposal.data.deltas); + } + } + } + SequencingHotShotEvent::DAVoteRecv(vote) => { + // warn!( + // "DA vote recv, Main Task {:?}, key: {:?}", + // vote.current_view, + // self.committee_exchange.public_key() + // ); + // Check if we are the leader and the vote is from the sender. + let view = vote.current_view; + if !self.committee_exchange.is_leader(view) { + error!("We are not the committee leader for view {} are we leader for next view? {}", *view, self.committee_exchange.is_leader(view + 1)); + return None; + } + + let handle_event = HandleEvent(Arc::new(move |event, state| { + async move { vote_handle(state, event).await }.boxed() + })); + let collection_view = + if let Some((collection_view, collection_id, _)) = &self.vote_collector { + // TODO: Is this correct for consecutive leaders? + if view > *collection_view { + // warn!("shutting down for view {:?}", collection_view); + self.registry.shutdown_task(*collection_id).await; + } + *collection_view + } else { + TYPES::Time::new(0) + }; + let acc = VoteAccumulator { + total_vote_outcomes: HashMap::new(), + da_vote_outcomes: HashMap::new(), + yes_vote_outcomes: HashMap::new(), + no_vote_outcomes: HashMap::new(), + viewsync_precommit_vote_outcomes: HashMap::new(), + viewsync_commit_vote_outcomes: HashMap::new(), + viewsync_finalize_vote_outcomes: HashMap::new(), + success_threshold: self.committee_exchange.success_threshold(), + failure_threshold: self.committee_exchange.failure_threshold(), + sig_lists: Vec::new(), + signers: bitvec![0; self.committee_exchange.total_nodes()], + }; + let accumulator = self.committee_exchange.accumulate_vote( + &vote.clone().signature.0, + &vote.clone().signature.1, + vote.clone().block_commitment, + vote.clone().vote_data.clone(), + vote.clone().vote_token.clone(), + vote.clone().current_view, + acc, + None, + ); + if view > collection_view { + let state = DAVoteCollectionTaskState { + committee_exchange: self.committee_exchange.clone(), + accumulator, + cur_view: view, + event_stream: self.event_stream.clone(), + id: self.id, + }; + let name = "DA Vote Collection"; + let filter = FilterEvent(Arc::new(|event| { + matches!(event, SequencingHotShotEvent::DAVoteRecv(_)) + })); + let builder = + TaskBuilder::>::new(name.to_string()) + .register_event_stream(self.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(state) + .register_event_handler(handle_event); + let id = builder.get_task_id().unwrap(); + let stream_id = builder.get_stream_id().unwrap(); + let _task = + async_spawn( + async move { DAVoteCollectionTypes::build(builder).launch().await }, + ); + self.vote_collector = Some((view, id, stream_id)); + } else if let Some((_, _, stream_id)) = self.vote_collector { + self.event_stream + .direct_message(stream_id, SequencingHotShotEvent::DAVoteRecv(vote)) + .await; + }; + } + // TODO ED Update high QC through QCFormed event + SequencingHotShotEvent::ViewChange(view) => { + if *self.cur_view >= *view { + return None; + } + + if *view - *self.cur_view > 1 { + error!("View changed by more than 1"); + } + self.cur_view = view; + // Inject view info into network + // ED I think it is possible that you receive a quorum proposal, vote on it and update your view before the da leader has sent their proposal, and therefore you skip polling for this view? + + // TODO ED Only poll if you are on the committee + let is_da = self + .committee_exchange + .membership() + .get_committee(self.cur_view + 1) + .contains(self.committee_exchange.public_key()); + + if is_da { + debug!("Polling for DA proposals for view {}", *self.cur_view + 1); + self.committee_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForProposal( + *self.cur_view + 1, + )) + .await; + } + if self.committee_exchange.is_leader(self.cur_view + 3) { + debug!("Polling for transactions for view {}", *self.cur_view + 3); + self.committee_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForTransactions( + *self.cur_view + 3, + )) + .await; + } + + // TODO ED Make this a new task so it doesn't block main DA task + + // If we are not the next leader (DA leader for this view) immediately exit + if !self.committee_exchange.is_leader(self.cur_view + 1) { + // panic!("We are not the DA leader for view {}", *self.cur_view + 1); + return None; + } + debug!("Polling for DA votes for view {}", *self.cur_view + 1); + + // Start polling for DA votes for the "next view" + self.committee_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForVotes(*self.cur_view + 1)) + .await; + + // ED Copy of parent_leaf() function from sequencing leader + + let consensus = self.consensus.read().await; + let parent_view_number = &consensus.high_qc.view_number; + + let Some(parent_view) = consensus.state_map.get(parent_view_number) else { + error!( + "Couldn't find high QC parent in state map. Parent view {:?}", + parent_view_number + ); + return None; + }; + let Some(leaf) = parent_view.get_leaf_commitment() else { + error!( + ?parent_view_number, + ?parent_view, + "Parent of high QC points to a view without a proposal" + ); + return None; + }; + let Some(leaf) = consensus.saved_leaves.get(&leaf) else { + error!("Failed to find high QC parent."); + return None; + }; + let parent_leaf = leaf.clone(); + + // Prepare the DA Proposal + // let Some(parent_leaf) = self.parent_leaf().await else { + // warn!("Couldn't find high QC parent in state map."); + // return None; + // }; + + drop(consensus); + + let mut block = ::StateType::next_block(None); + let txns = self.wait_for_transactions(parent_leaf).await?; + + self.committee_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions( + *self.cur_view + 1, + )) + .await; + + for txn in txns { + if let Ok(new_block) = block.add_transaction_raw(&txn) { + block = new_block; + continue; + } + } + + let signature = self.committee_exchange.sign_da_proposal(&block.commit()); + let data: DAProposal = DAProposal { + deltas: block.clone(), + // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? + view_number: self.cur_view + 1, + }; + debug!("Sending DA proposal for view {:?}", data.view_number); + + // let message = SequencingMessage::(Right( + // CommitteeConsensusMessage::DAProposal(Proposal { data, signature }), + // )); + let message = Proposal { data, signature }; + // Brodcast DA proposal + // TODO ED We should send an event to do this, but just getting it to work for now + + self.event_stream + .publish(SequencingHotShotEvent::SendDABlockData(block.clone())) + .await; + // if let Err(e) = self.api.send_da_broadcast(message.clone()).await { + // consensus.metrics.failed_to_send_messages.add(1); + // warn!(?message, ?e, "Could not broadcast leader proposal"); + // } else { + // consensus.metrics.outgoing_broadcast_messages.add(1); + // } + self.event_stream + .publish(SequencingHotShotEvent::DAProposalSend( + message, + self.committee_exchange.public_key().clone(), + )) + .await; + + return None; + } + + SequencingHotShotEvent::Timeout(view) => { + self.committee_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) + .await; + } + + SequencingHotShotEvent::Shutdown => { + return Some(HotShotTaskCompleted::ShutDown); + } + _ => {} + } + None + } + + /// return None if we can't get transactions + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Vote Collection Task", level = "error")] + + async fn wait_for_transactions( + &self, + parent_leaf: SequencingLeaf, + ) -> Option> { + let task_start_time = Instant::now(); + + // let parent_leaf = self.parent_leaf().await?; + let previous_used_txns = match parent_leaf.deltas { + Either::Left(block) => block.contained_transactions(), + Either::Right(_commitment) => HashSet::new(), + }; + + let consensus = self.consensus.read().await; + + let receiver = consensus.transactions.subscribe().await; + + loop { + let all_txns = consensus.transactions.cloned().await; + debug!("Size of transactions: {}", all_txns.len()); + let unclaimed_txns: Vec<_> = all_txns + .iter() + .filter(|(txn_hash, _txn)| !previous_used_txns.contains(txn_hash)) + .collect(); + + let time_past = task_start_time.elapsed(); + if unclaimed_txns.len() < self.api.min_transactions() + && (time_past < self.api.propose_max_round_time()) + { + let duration = self.api.propose_max_round_time() - time_past; + let result = async_timeout(duration, receiver.recv()).await; + match result { + Err(_) => { + // Fall through below to updating new block + error!( + "propose_max_round_time passed, sending transactions we have so far" + ); + } + Ok(Err(e)) => { + // Something unprecedented is wrong, and `transactions` has been dropped + error!("Channel receiver error for SubscribableRwLock {:?}", e); + return None; + } + Ok(Ok(_)) => continue, + } + } + break; + } + let all_txns = consensus.transactions.cloned().await; + let txns: Vec = all_txns + .iter() + .filter_map(|(txn_hash, txn)| { + if previous_used_txns.contains(txn_hash) { + None + } else { + Some(txn.clone()) + } + }) + .collect(); + Some(txns) + } + + /// Filter the DA event. + pub fn filter(event: &SequencingHotShotEvent) -> bool { + matches!( + event, + SequencingHotShotEvent::DAProposalRecv(_, _) + | SequencingHotShotEvent::DAVoteRecv(_) + | SequencingHotShotEvent::Shutdown + | SequencingHotShotEvent::TransactionsRecv(_) + | SequencingHotShotEvent::Timeout(_) + | SequencingHotShotEvent::ViewChange(_) + ) + } +} + +/// task state implementation for DA Task +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > TS for DATaskState +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ +} + +/// Type alias for DA Vote Collection Types +pub type DAVoteCollectionTypes = HSTWithEvent< + ConsensusTaskError, + SequencingHotShotEvent, + ChannelStream>, + DAVoteCollectionTaskState, +>; + +/// Type alias for DA Task Types +pub type DATaskTypes = HSTWithEvent< + ConsensusTaskError, + SequencingHotShotEvent, + ChannelStream>, + DATaskState, +>; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs new file mode 100644 index 0000000000..98819108a4 --- /dev/null +++ b/task-impls/src/events.rs @@ -0,0 +1,65 @@ +use hotshot_types::{ + certificate::{DACertificate, QuorumCertificate}, + data::DAProposal, + message::Proposal, + traits::node_implementation::{ + NodeImplementation, NodeType, QuorumProposalType, ViewSyncProposalType, + }, + vote::{DAVote, QuorumVote, ViewSyncVote}, +}; + +use crate::view_sync::ViewSyncPhase; + +/// All of the possible events that can be passed between Sequecning `HotShot` tasks +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub enum SequencingHotShotEvent> { + /// Shutdown the task + Shutdown, + /// A quorum proposal has been received from the network; handled by the consensus task + QuorumProposalRecv(Proposal>, TYPES::SignatureKey), + /// A quorum vote has been received from the network; handled by the consensus task + QuorumVoteRecv(QuorumVote), + /// A DA proposal has been received from the network; handled by the DA task + DAProposalRecv(Proposal>, TYPES::SignatureKey), + /// A DA vote has been received by the network; handled by the DA task + DAVoteRecv(DAVote), + /// A Data Availability Certificate (DAC) has been recieved by the network; handled by the consensus task + DACRecv(DACertificate), + /// Send a quorum proposal to the network; emitted by the leader in the consensus task + QuorumProposalSend(Proposal>, TYPES::SignatureKey), + /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal + QuorumVoteSend(QuorumVote), + /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task + DAProposalSend(Proposal>, TYPES::SignatureKey), + /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal + DAVoteSend(DAVote), + /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only + QCFormed(QuorumCertificate), + /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task + DACSend(DACertificate, TYPES::SignatureKey), + /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks + ViewChange(TYPES::Time), + /// Timeout for the view sync protocol; emitted by a replica in the view sync task + ViewSyncTimeout(TYPES::Time, u64, ViewSyncPhase), + /// Send a view sync vote to the network; emitted by a replica in the view sync task + ViewSyncVoteSend(ViewSyncVote), + /// Send a view sync certificate to the network; emitted by a relay in the view sync task + ViewSyncCertificateSend( + Proposal>, + TYPES::SignatureKey, + ), + /// Receive a view sync vote from the network; received by a relay in the view sync task + ViewSyncVoteRecv(ViewSyncVote), + /// Receive a view sync certificate from the network; received by a replica in the view sync task + ViewSyncCertificateRecv(Proposal>), + /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only + ViewSyncTrigger(TYPES::Time), + /// A consensus view has timed out; emitted by a replica in the consensus task; received by the view sync task; internal event only + Timeout(TYPES::Time), + /// Receive transactions from the network + TransactionsRecv(Vec), + /// Send transactions to the network + TransactionSend(TYPES::Transaction, TYPES::SignatureKey), + /// Event to send DA block data from DA leader to next quorum leader (which should always be the same node); internal event only + SendDABlockData(TYPES::BlockType), +} diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs new file mode 100644 index 0000000000..136093bc3e --- /dev/null +++ b/task-impls/src/harness.rs @@ -0,0 +1,113 @@ +use crate::events::SequencingHotShotEvent; +use async_compatibility_layer::art::async_spawn; + +use futures::FutureExt; +use hotshot_task::{ + event_stream::{self, ChannelStream, EventStream}, + task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, + task_impls::{HSTWithEvent, TaskBuilder}, + task_launcher::TaskRunner, +}; +use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; +use snafu::Snafu; +use std::{collections::HashMap, future::Future, sync::Arc}; + +/// The state for the test harness task. Keeps track of which events and how many we expect to get +pub struct TestHarnessState> { + /// The expected events we get from the test. Maps an event to the number of times we expect to see it + expected_output: HashMap, usize>, +} + +impl> TS for TestHarnessState {} + +/// Error emitted if the test harness task fails +#[derive(Snafu, Debug)] +pub struct TestHarnessTaskError {} + +/// Type alias for the Test Harness Task +pub type TestHarnessTaskTypes = HSTWithEvent< + TestHarnessTaskError, + SequencingHotShotEvent, + ChannelStream>, + TestHarnessState, +>; + +/// Runs a test by building the task using `build_fn` and then passing it the `input` events +/// and testing the make sure all of the `expected_output` events are seen +/// +/// `event_stream` - if given, will be used to register the task builder. +/// +/// # Panics +/// Panics if any state the test expects is not set. Panicing causes a test failure +#[allow(clippy::implicit_hasher)] +pub async fn run_harness( + input: Vec>, + expected_output: HashMap, usize>, + event_stream: Option>>, + build_fn: impl FnOnce(TaskRunner, ChannelStream>) -> Fut, +) where + TYPES: NodeType, + I: NodeImplementation, + Fut: Future, +{ + let task_runner = TaskRunner::new(); + let registry = task_runner.registry.clone(); + let event_stream = event_stream.unwrap_or(event_stream::ChannelStream::new()); + let state = TestHarnessState { expected_output }; + let handler = HandleEvent(Arc::new(move |event, state| { + async move { handle_event(event, state) }.boxed() + })); + let filter = FilterEvent::default(); + let builder = TaskBuilder::>::new("test_harness".to_string()) + .register_event_stream(event_stream.clone(), filter) + .await + .register_registry(&mut registry.clone()) + .await + .register_state(state) + .register_event_handler(handler); + + let id = builder.get_task_id().unwrap(); + + let task = TestHarnessTaskTypes::build(builder).launch(); + + let task_runner = task_runner.add_task(id, "test_harness".to_string(), task); + let task_runner = build_fn(task_runner, event_stream.clone()).await; + + let runner = async_spawn(async move { task_runner.launch().await }); + + for event in input { + let _ = event_stream.publish(event).await; + } + + let _ = runner.await; +} + +/// Handles an event for the Test Harness Task. If the event is expected, remove it from +/// the `expected_output` in state. If unexpected fail test. +/// +/// # Panics +/// Will panic to fail the test when it receives and unexpected event +#[allow(clippy::needless_pass_by_value)] +pub fn handle_event>( + event: SequencingHotShotEvent, + mut state: TestHarnessState, +) -> ( + std::option::Option, + TestHarnessState, +) { + assert!( + state.expected_output.contains_key(&event), + "Got an unexpected event: {event:?}", + ); + let num_expected = state.expected_output.get_mut(&event).unwrap(); + if *num_expected == 1 { + state.expected_output.remove(&event); + } else { + *num_expected -= 1; + } + + if state.expected_output.is_empty() { + return (Some(HotShotTaskCompleted::ShutDown), state); + } + (None, state) +} diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs new file mode 100644 index 0000000000..aca2cafdea --- /dev/null +++ b/task-impls/src/lib.rs @@ -0,0 +1,30 @@ +//! The consensus layer for hotshot. This currently implements sequencing +//! consensus in an event driven way + +#![warn( + clippy::all, + clippy::pedantic, + rust_2018_idioms, + missing_docs, + clippy::missing_docs_in_private_items, + clippy::panic +)] +#![allow(clippy::module_name_repetitions)] + +/// the task which implements the main parts of consensus +pub mod consensus; + +/// The task which implements the main parts of data availability. +pub mod da; + +/// Defines the events passed between tasks +pub mod events; + +/// The task which implements the network. +pub mod network; + +/// Defines the types to run unit tests for a task. +pub mod harness; + +/// The task which implements view synchronization +pub mod view_sync; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs new file mode 100644 index 0000000000..a4b083ba37 --- /dev/null +++ b/task-impls/src/network.rs @@ -0,0 +1,356 @@ +use crate::events::SequencingHotShotEvent; +use either::Either::{self, Left, Right}; +use hotshot_task::{ + event_stream::{ChannelStream, EventStream}, + task::{FilterEvent, HotShotTaskCompleted, TS}, + task_impls::{HSTWithEvent, HSTWithMessage}, + GeneratedStream, Merge, +}; +use hotshot_types::{ + data::{ProposalType, SequencingLeaf}, + message::{ + CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, Messages, + SequencingMessage, + }, + traits::{ + election::Membership, + network::{CommunicationChannel, TransmitType}, + node_implementation::{NodeImplementation, NodeType}, + }, + vote::VoteType, +}; +use snafu::Snafu; +use std::{marker::PhantomData, sync::Arc}; +use tracing::error; + +/// the type of network task +#[derive(Clone, Copy, Debug)] +pub enum NetworkTaskKind { + /// quorum: the normal "everyone" committee + Quorum, + /// da committee + Committee, + /// view sync + ViewSync, +} + +/// the network message task state +pub struct NetworkMessageTaskState< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +> { + /// event stream (used for publishing) + pub event_stream: ChannelStream>, +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + > TS for NetworkMessageTaskState +{ +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + > NetworkMessageTaskState +{ + /// Handle the message. + pub async fn handle_messages(&mut self, messages: Vec>) { + // We will send only one event for a vector of transactions. + let mut transactions = Vec::new(); + for message in messages { + let sender = message.sender; + match message.kind { + MessageKind::Consensus(consensus_message) => { + let event = match consensus_message.0 { + Either::Left(general_message) => match general_message { + GeneralConsensusMessage::Proposal(proposal) => { + SequencingHotShotEvent::QuorumProposalRecv(proposal.clone(), sender) + } + GeneralConsensusMessage::Vote(vote) => { + SequencingHotShotEvent::QuorumVoteRecv(vote.clone()) + } + GeneralConsensusMessage::ViewSyncVote(view_sync_message) => { + SequencingHotShotEvent::ViewSyncVoteRecv(view_sync_message) + } + GeneralConsensusMessage::ViewSyncCertificate(view_sync_message) => { + SequencingHotShotEvent::ViewSyncCertificateRecv(view_sync_message) + } + GeneralConsensusMessage::InternalTrigger(_) => { + error!("Got unexpected message type in network task!"); + return; + } + }, + Either::Right(committee_message) => match committee_message { + CommitteeConsensusMessage::DAProposal(proposal) => { + SequencingHotShotEvent::DAProposalRecv(proposal.clone(), sender) + } + CommitteeConsensusMessage::DAVote(vote) => { + // error!("DA Vote message recv {:?}", vote.current_view); + SequencingHotShotEvent::DAVoteRecv(vote.clone()) + } + CommitteeConsensusMessage::DACertificate(cert) => { + // panic!("Recevid DA C! "); + SequencingHotShotEvent::DACRecv(cert) + } + }, + }; + // TODO (Keyao benchmarking) Update these event variants (similar to the + // `TransactionsRecv` event) so we can send one event for a vector of messages. + // + self.event_stream.publish(event).await; + } + MessageKind::Data(message) => match message { + hotshot_types::message::DataMessage::SubmitTransaction(transaction, _) => { + transactions.push(transaction); + } + }, + MessageKind::_Unreachable(_) => unimplemented!(), + }; + } + if !transactions.is_empty() { + self.event_stream + .publish(SequencingHotShotEvent::TransactionsRecv(transactions)) + .await; + } + } +} + +/// network event task state +pub struct NetworkEventTaskState< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + COMMCHANNEL: CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, +> { + /// comm channel + pub channel: COMMCHANNEL, + /// event stream + pub event_stream: ChannelStream>, + /// view number + pub view: TYPES::Time, + /// phantom data + pub phantom: PhantomData<(PROPOSAL, VOTE, MEMBERSHIP)>, + // TODO ED Need to add exchange so we can get the recipient key and our own key? +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + COMMCHANNEL: CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, + > TS for NetworkEventTaskState +{ +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + COMMCHANNEL: CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, + > NetworkEventTaskState +{ + /// Handle the given event. + /// + /// Returns the completion status. + /// # Panics + /// Panic sif a direct message event is received with no recipient + pub async fn handle_event( + &mut self, + event: SequencingHotShotEvent, + membership: &MEMBERSHIP, + ) -> Option { + let (sender, message_kind, transmit_type, recipient) = match event.clone() { + SequencingHotShotEvent::QuorumProposalSend(proposal, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::Proposal(proposal), + ))), + TransmitType::Broadcast, + None, + ), + + // ED Each network task is subscribed to all these message types. Need filters per network task + SequencingHotShotEvent::QuorumVoteSend(vote) => ( + vote.signature_key(), + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::Vote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.current_view() + 1)), + ), + + SequencingHotShotEvent::DAProposalSend(proposal, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::DAProposal(proposal), + ))), + TransmitType::Broadcast, + None, + ), + SequencingHotShotEvent::DAVoteSend(vote) => ( + vote.signature_key(), + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::DAVote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.current_view)), + ), + // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee + SequencingHotShotEvent::DACSend(certificate, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::DACertificate(certificate), + ))), + TransmitType::Broadcast, + None, + ), + SequencingHotShotEvent::ViewSyncCertificateSend(certificate_proposal, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::ViewSyncCertificate(certificate_proposal), + ))), + TransmitType::Broadcast, + None, + ), + SequencingHotShotEvent::ViewSyncVoteSend(vote) => { + // error!("Sending view sync vote in network task to relay with index: {:?}", vote.round() + vote.relay()); + ( + vote.signature_key(), + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::ViewSyncVote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.round() + vote.relay())), + ) + } + SequencingHotShotEvent::ViewChange(view) => { + self.view = view; + return None; + } + SequencingHotShotEvent::Shutdown => { + return Some(HotShotTaskCompleted::ShutDown); + } + event => { + error!("Receieved unexpected message in network task {:?}", event); + return None; + } + }; + + let message = Message { + sender, + kind: message_kind, + _phantom: PhantomData, + }; + let transmit_result = match transmit_type { + TransmitType::Direct => { + self.channel + .direct_message(message, recipient.unwrap()) + .await + } + TransmitType::Broadcast => self.channel.broadcast_message(message, membership).await, + }; + + match transmit_result { + Ok(()) => {} + Err(e) => error!("Failed to send message from network task: {:?}", e), + } + + None + } + + /// network filter + pub fn filter(task_kind: NetworkTaskKind) -> FilterEvent> { + match task_kind { + NetworkTaskKind::Quorum => FilterEvent(Arc::new(Self::quorum_filter)), + NetworkTaskKind::Committee => FilterEvent(Arc::new(Self::committee_filter)), + NetworkTaskKind::ViewSync => FilterEvent(Arc::new(Self::view_sync_filter)), + } + } + + /// quorum filter + fn quorum_filter(event: &SequencingHotShotEvent) -> bool { + matches!( + event, + SequencingHotShotEvent::QuorumProposalSend(_, _) + | SequencingHotShotEvent::QuorumVoteSend(_) + | SequencingHotShotEvent::Shutdown + | SequencingHotShotEvent::DACSend(_, _) + | SequencingHotShotEvent::ViewChange(_) + ) + } + + /// committee filter + fn committee_filter(event: &SequencingHotShotEvent) -> bool { + matches!( + event, + SequencingHotShotEvent::DAProposalSend(_, _) + | SequencingHotShotEvent::DAVoteSend(_) + | SequencingHotShotEvent::Shutdown + | SequencingHotShotEvent::ViewChange(_) + ) + } + + /// view sync filter + fn view_sync_filter(event: &SequencingHotShotEvent) -> bool { + matches!( + event, + SequencingHotShotEvent::ViewSyncVoteSend(_) + | SequencingHotShotEvent::ViewSyncCertificateSend(_, _) + | SequencingHotShotEvent::Shutdown + | SequencingHotShotEvent::ViewChange(_) + ) + } +} + +/// network error (no errors right now, only stub) +#[derive(Snafu, Debug)] +pub struct NetworkTaskError {} + +/// networking message task types +pub type NetworkMessageTaskTypes = HSTWithMessage< + NetworkTaskError, + Either, Messages>, + // A combination of broadcast and direct streams. + Merge>, GeneratedStream>>, + NetworkMessageTaskState, +>; + +/// network event task types +pub type NetworkEventTaskTypes = HSTWithEvent< + NetworkTaskError, + SequencingHotShotEvent, + ChannelStream>, + NetworkEventTaskState, +>; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs new file mode 100644 index 0000000000..de1e3025bf --- /dev/null +++ b/task-impls/src/view_sync.rs @@ -0,0 +1,1052 @@ +#![allow(clippy::module_name_repetitions)] +use crate::events::SequencingHotShotEvent; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use commit::Committable; +use either::Either::{self, Left, Right}; +use futures::FutureExt; +use hotshot_task::{ + event_stream::{ChannelStream, EventStream}, + task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, + task_impls::{HSTWithEvent, TaskBuilder}, +}; +use hotshot_types::traits::{election::Membership, network::ConsensusIntentEvent}; + +use bitvec::prelude::*; +use hotshot_task::global_registry::GlobalRegistry; +use hotshot_types::{ + certificate::ViewSyncCertificate, + data::SequencingLeaf, + message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, + traits::{ + consensus_api::SequencingConsensusApi, + election::{ConsensusExchange, ViewSyncExchangeType}, + network::CommunicationChannel, + node_implementation::{NodeImplementation, NodeType, ViewSyncEx}, + signature_key::SignatureKey, + state::ConsensusTime, + }, + vote::{ViewSyncData, ViewSyncVote, VoteAccumulator}, +}; +use snafu::Snafu; +use std::{collections::HashMap, sync::Arc, time::Duration}; +use tracing::{debug, error, instrument}; +#[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] +/// Phases of view sync +pub enum ViewSyncPhase { + /// No phase; before the protocol has begun + None, + /// PreCommit phase + PreCommit, + /// Commit phase + Commit, + /// Finalize phase + Finalize, +} + +#[derive(Default)] +/// Information about view sync sub-tasks +pub struct ViewSyncTaskInfo { + /// Id of the event stream of a certain task + event_stream_id: usize, +} + +#[derive(Snafu, Debug)] +/// Stub of a view sync error +pub struct ViewSyncTaskError {} + +/// Main view sync task state +pub struct ViewSyncTaskState< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static + std::clone::Clone, +> where + ViewSyncEx: ConsensusExchange< + TYPES, + Message, + Proposal = ViewSyncCertificate, + Certificate = ViewSyncCertificate, + Commitment = ViewSyncData, + >, +{ + /// Registry to register sub tasks + pub registry: GlobalRegistry, + /// Event stream to publish events to + pub event_stream: ChannelStream>, + /// View HotShot is currently in + pub current_view: TYPES::Time, + /// View HotShot wishes to be in + pub next_view: TYPES::Time, + /// View sync exchange + pub exchange: Arc>, + /// HotShot consensus API + pub api: A, + /// Our node id; for logging + pub id: u64, + + /// How many timeouts we've seen in a row; is reset upon a successful view change + pub num_timeouts_tracked: u64, + + /// Map of running replica tasks + pub replica_task_map: HashMap, + + /// Map of running relay tasks + pub relay_task_map: HashMap, + + /// Timeout duration for view sync rounds + pub view_sync_timeout: Duration, + + /// Last view we garbage collected old tasks + pub last_garbage_collected_view: TYPES::Time, +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static + std::clone::Clone, + > TS for ViewSyncTaskState +where + ViewSyncEx: ConsensusExchange< + TYPES, + Message, + Proposal = ViewSyncCertificate, + Certificate = ViewSyncCertificate, + Commitment = ViewSyncData, + >, +{ +} + +/// Types for the main view sync task +pub type ViewSyncTaskStateTypes = HSTWithEvent< + ViewSyncTaskError, + SequencingHotShotEvent, + ChannelStream>, + ViewSyncTaskState, +>; + +/// State of a view sync replica task +pub struct ViewSyncReplicaTaskState< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, +> where + ViewSyncEx: ConsensusExchange< + TYPES, + Message, + Proposal = ViewSyncCertificate, + Certificate = ViewSyncCertificate, + Commitment = ViewSyncData, + >, +{ + /// Timeout for view sync rounds + pub view_sync_timeout: Duration, + /// Current round HotShot is in + pub current_view: TYPES::Time, + /// Round HotShot wishes to be in + pub next_view: TYPES::Time, + /// The last seen phase of the view sync protocol + pub phase: ViewSyncPhase, + /// The relay index we are currently on + pub relay: u64, + /// Whether we have seen a finalized certificate + pub finalized: bool, + /// Whether we have already sent a view change event for `next_view` + pub sent_view_change_event: bool, + /// Our node id; for logging + pub id: u64, + + /// View sync exchange + pub exchange: Arc>, + /// HotShot consensus API + pub api: A, + /// Event stream to publish events to + pub event_stream: ChannelStream>, +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > TS for ViewSyncReplicaTaskState +where + ViewSyncEx: ConsensusExchange< + TYPES, + Message, + Proposal = ViewSyncCertificate, + Certificate = ViewSyncCertificate, + Commitment = ViewSyncData, + >, +{ +} + +/// Types for view sync replica state +pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< + ViewSyncTaskError, + SequencingHotShotEvent, + ChannelStream>, + ViewSyncReplicaTaskState, +>; + +/// State of a view sync relay task +pub struct ViewSyncRelayTaskState< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +> { + /// Event stream to publish events to + pub event_stream: ChannelStream>, + /// View sync exchange + pub exchange: Arc>, + /// Vote accumulator + pub accumulator: Either< + VoteAccumulator>, + ViewSyncCertificate, + >, + /// Our node id; for logging + pub id: u64, +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + > TS for ViewSyncRelayTaskState +{ +} + +/// Types used by the view sync relay task +pub type ViewSyncRelayTaskStateTypes = HSTWithEvent< + ViewSyncTaskError, + SequencingHotShotEvent, + ChannelStream>, + ViewSyncRelayTaskState, +>; + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static + std::clone::Clone, + > ViewSyncTaskState +where + ViewSyncEx: ConsensusExchange< + TYPES, + Message, + Proposal = ViewSyncCertificate, + Certificate = ViewSyncCertificate, + Commitment = ViewSyncData, + >, +{ + #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] + /// Handles incoming events for the main view sync task + pub async fn handle_event(&mut self, event: SequencingHotShotEvent) { + match &event { + SequencingHotShotEvent::ViewSyncCertificateRecv(message) => { + let (certificate_internal, last_seen_certificate) = match &message.data { + ViewSyncCertificate::PreCommit(certificate_internal) => { + (certificate_internal, ViewSyncPhase::PreCommit) + } + ViewSyncCertificate::Commit(certificate_internal) => { + (certificate_internal, ViewSyncPhase::Commit) + } + ViewSyncCertificate::Finalize(certificate_internal) => { + (certificate_internal, ViewSyncPhase::Finalize) + } + }; + debug!( + "Received view sync cert for phase {:?}", + last_seen_certificate + ); + + // This certificate is old, we can throw it away + // If next view = cert round, then that means we should already have a task running for it + if self.current_view > certificate_internal.round { + debug!("Already in a higher view than the view sync message"); + return; + } + + if let Some(replica_task) = self.replica_task_map.get(&certificate_internal.round) { + // Forward event then return + debug!("Forwarding message"); + self.event_stream + .direct_message(replica_task.event_stream_id, event) + .await; + return; + } + + // We do not have a replica task already running, so start one + let mut replica_state = ViewSyncReplicaTaskState { + current_view: certificate_internal.round, + next_view: certificate_internal.round, + relay: 0, + finalized: false, + sent_view_change_event: false, + phase: ViewSyncPhase::None, + exchange: self.exchange.clone(), + api: self.api.clone(), + event_stream: self.event_stream.clone(), + view_sync_timeout: self.view_sync_timeout, + id: self.id, + }; + + let result = replica_state.handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } + + replica_state = result.1; + + let name = format!( + "View Sync Replica Task: Attempting to enter view {:?} from view {:?}", + self.next_view, self.current_view + ); + + let replica_handle_event = HandleEvent(Arc::new( + move |event, state: ViewSyncReplicaTaskState| { + async move { state.handle_event(event).await }.boxed() + }, + )); + + let filter = FilterEvent::default(); + let builder = TaskBuilder::>::new(name) + .register_event_stream(replica_state.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(replica_state) + .register_event_handler(replica_handle_event); + + let event_stream_id = builder.get_stream_id().unwrap(); + + self.replica_task_map.insert( + certificate_internal.round, + ViewSyncTaskInfo { event_stream_id }, + ); + + let _view_sync_replica_task = async_spawn(async move { + ViewSyncReplicaTaskStateTypes::build(builder).launch().await + }); + } + + SequencingHotShotEvent::ViewSyncVoteRecv(vote) => { + let vote_internal = match vote { + ViewSyncVote::PreCommit(vote_internal) + | ViewSyncVote::Commit(vote_internal) + | ViewSyncVote::Finalize(vote_internal) => vote_internal, + }; + + if let Some(relay_task) = self.relay_task_map.get(&vote_internal.round) { + // Forward event then return + self.event_stream + .direct_message(relay_task.event_stream_id, event) + .await; + return; + } + + // We do not have a relay task already running, so start one + + if !self + .exchange + .is_leader(vote_internal.round + vote_internal.relay) + { + // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` + debug!("View sync vote sent to wrong leader"); + return; + } + + let accumulator = VoteAccumulator { + total_vote_outcomes: HashMap::new(), + da_vote_outcomes: HashMap::new(), + yes_vote_outcomes: HashMap::new(), + no_vote_outcomes: HashMap::new(), + viewsync_precommit_vote_outcomes: HashMap::new(), + viewsync_commit_vote_outcomes: HashMap::new(), + viewsync_finalize_vote_outcomes: HashMap::new(), + success_threshold: self.exchange.success_threshold(), + failure_threshold: self.exchange.failure_threshold(), + sig_lists: Vec::new(), + signers: bitvec![0; self.exchange.total_nodes()], + }; + + let mut relay_state = ViewSyncRelayTaskState { + event_stream: self.event_stream.clone(), + exchange: self.exchange.clone(), + accumulator: either::Left(accumulator), + id: self.id, + }; + + let result = relay_state.handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } + + relay_state = result.1; + + let name = format!("View Sync Relay Task for view {:?}", vote_internal.round); + + let relay_handle_event = HandleEvent(Arc::new( + move |event, state: ViewSyncRelayTaskState| { + async move { state.handle_event(event).await }.boxed() + }, + )); + + let filter = FilterEvent::default(); + let builder = TaskBuilder::>::new(name) + .register_event_stream(relay_state.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(relay_state) + .register_event_handler(relay_handle_event); + + let event_stream_id = builder.get_stream_id().unwrap(); + + self.relay_task_map + .insert(vote_internal.round, ViewSyncTaskInfo { event_stream_id }); + let _view_sync_relay_task = async_spawn(async move { + ViewSyncRelayTaskStateTypes::build(builder).launch().await + }); + } + + &SequencingHotShotEvent::ViewChange(new_view) => { + let new_view = TYPES::Time::new(*new_view); + if self.current_view < new_view { + debug!( + "Change from view {} to view {} in view sync task", + *self.current_view, *new_view + ); + + self.current_view = new_view; + self.next_view = self.current_view; + self.num_timeouts_tracked = 0; + + // Garbage collect old tasks + // We could put this into a separate async task, but that would require making several fields on ViewSyncTaskState thread-safe and harm readability. In the common case this will have zero tasks to clean up. + for i in *self.last_garbage_collected_view..*self.current_view { + if let Some((_key, replica_task_info)) = + self.replica_task_map.remove_entry(&TYPES::Time::new(i)) + { + self.event_stream + .direct_message( + replica_task_info.event_stream_id, + SequencingHotShotEvent::Shutdown, + ) + .await; + } + if let Some((_key, relay_task_info)) = + self.relay_task_map.remove_entry(&TYPES::Time::new(i)) + { + self.event_stream + .direct_message( + relay_task_info.event_stream_id, + SequencingHotShotEvent::Shutdown, + ) + .await; + } + } + + self.last_garbage_collected_view = self.current_view - 1; + } + } + &SequencingHotShotEvent::Timeout(view_number) => { + // This is an old timeout and we can ignore it + if view_number < TYPES::Time::new(*self.current_view) { + return; + } + + self.num_timeouts_tracked += 1; + error!("Num timeouts tracked is {}", self.num_timeouts_tracked); + + if self.num_timeouts_tracked > 2 { + error!("Too many timeouts! This shouldn't happen"); + } + + // TODO ED Make this a configurable variable + if self.num_timeouts_tracked == 2 { + // Start polling for view sync certificates + self.exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncCertificate( + *view_number + 1, + )) + .await; + + self.exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncVotes( + *view_number + 1, + )) + .await; + // panic!("Starting view sync!"); + // Spawn replica task + + let mut replica_state = ViewSyncReplicaTaskState { + current_view: self.current_view, + next_view: TYPES::Time::new(*view_number + 1), + relay: 0, + finalized: false, + sent_view_change_event: false, + phase: ViewSyncPhase::None, + exchange: self.exchange.clone(), + api: self.api.clone(), + event_stream: self.event_stream.clone(), + view_sync_timeout: self.view_sync_timeout, + id: self.id, + }; + + // TODO ED Make all these view numbers into a single variable to avoid errors + let result = replica_state + .handle_event(SequencingHotShotEvent::ViewSyncTrigger(view_number + 1)) + .await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } + + replica_state = result.1; + + let name = format!( + "View Sync Replica Task: Attempting to enter view {:?} from view {:?}", + self.next_view, self.current_view + ); + + let replica_handle_event = HandleEvent(Arc::new( + move |event, state: ViewSyncReplicaTaskState| { + async move { state.handle_event(event).await }.boxed() + }, + )); + + let filter = FilterEvent(Arc::new(Self::filter)); + let builder = + TaskBuilder::>::new(name) + .register_event_stream(replica_state.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(replica_state) + .register_event_handler(replica_handle_event); + + let event_stream_id = builder.get_stream_id().unwrap(); + + self.replica_task_map.insert( + TYPES::Time::new(*view_number + 1), + ViewSyncTaskInfo { event_stream_id }, + ); + + let _view_sync_replica_task = async_spawn(async move { + ViewSyncReplicaTaskStateTypes::build(builder).launch().await + }); + } else { + // If this is the first timeout we've seen advance to the next view + self.current_view += 1; + self.event_stream + .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new( + *self.current_view, + ))) + .await; + } + } + + _ => {} + } + } + + /// Filter view sync related events. + pub fn filter(event: &SequencingHotShotEvent) -> bool { + matches!( + event, + SequencingHotShotEvent::ViewSyncCertificateRecv(_) + | SequencingHotShotEvent::ViewSyncCertificateSend(_, _) + | SequencingHotShotEvent::ViewSyncVoteRecv(_) + | SequencingHotShotEvent::ViewSyncVoteSend(_) + | SequencingHotShotEvent::Shutdown + | SequencingHotShotEvent::Timeout(_) + | SequencingHotShotEvent::ViewSyncTimeout(_, _, _) + | SequencingHotShotEvent::ViewChange(_) + ) + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > ViewSyncReplicaTaskState +where + ViewSyncEx: ConsensusExchange< + TYPES, + Message, + Proposal = ViewSyncCertificate, + Certificate = ViewSyncCertificate, + Commitment = ViewSyncData, + >, +{ + #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Replica Task", level = "error")] + /// Handle incoming events for the view sync replica task + pub async fn handle_event( + mut self, + event: SequencingHotShotEvent, + ) -> ( + std::option::Option, + ViewSyncReplicaTaskState, + ) { + match event { + SequencingHotShotEvent::ViewSyncCertificateRecv(message) => { + let (certificate_internal, last_seen_certificate) = match message.data.clone() { + ViewSyncCertificate::PreCommit(certificate_internal) => { + (certificate_internal, ViewSyncPhase::PreCommit) + } + ViewSyncCertificate::Commit(certificate_internal) => { + (certificate_internal, ViewSyncPhase::Commit) + } + ViewSyncCertificate::Finalize(certificate_internal) => { + (certificate_internal, ViewSyncPhase::Finalize) + } + }; + + // Ignore certificate if it is for an older round + if certificate_internal.round < self.next_view { + debug!("We're already in a higher round"); + + return (None, self); + } + + let relay_key = self + .exchange + .get_leader(certificate_internal.round + certificate_internal.relay); + + if !relay_key.validate(&message.signature, message.data.commit().as_ref()) { + error!("Key does not validate for certificate sender"); + return (None, self); + } + + // If certificate is not valid, return current state + if !self + .exchange + .is_valid_view_sync_cert(message.data, certificate_internal.round) + { + error!("Not valid view sync cert!"); + + return (None, self); + } + + // If certificate is for a higher round shutdown this task + // since another task should have been started for the higher round + // TODO ED Perhaps in the future this should return an error giving more + // context + if certificate_internal.round > self.next_view { + return (Some(HotShotTaskCompleted::ShutDown), self); + } + + // Ignore if the certificate is for an already seen phase + if last_seen_certificate <= self.phase { + return (None, self); + } + + self.phase = last_seen_certificate; + + // Send ViewChange event if necessary + if self.phase >= ViewSyncPhase::Commit && !self.sent_view_change_event { + error!("VIEW SYNC UPDATING VIEW TO {}", *self.next_view); + self.event_stream + .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new( + *self.next_view, + ))) + .await; + self.sent_view_change_event = true; + } + + // The protocol has ended + if self.phase == ViewSyncPhase::Finalize { + self.exchange + .network() + .inject_consensus_info( + ConsensusIntentEvent::CancelPollForViewSyncCertificate(*self.next_view), + ) + .await; + self.exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForViewSyncVotes( + *self.next_view, + )) + .await; + return ((Some(HotShotTaskCompleted::ShutDown)), self); + } + + if certificate_internal.relay > self.relay { + self.relay = certificate_internal.relay; + } + + // TODO ED Assuming that nodes must have stake for the view they are voting to enter + let maybe_vote_token = self + .exchange + .membership() + .make_vote_token(self.next_view, self.exchange.private_key()); + + match maybe_vote_token { + Ok(Some(vote_token)) => { + let message = match self.phase { + ViewSyncPhase::None => unimplemented!(), + ViewSyncPhase::PreCommit => self.exchange.create_commit_message::( + self.next_view, + self.relay, + vote_token.clone(), + ), + ViewSyncPhase::Commit => self.exchange.create_finalize_message::( + self.next_view, + self.relay, + vote_token.clone(), + ), + // Should never hit this + ViewSyncPhase::Finalize => unimplemented!(), + }; + + if let GeneralConsensusMessage::ViewSyncVote(vote) = message { + // error!("Sending vs vote {:?}", vote.clone()); + + self.event_stream + .publish(SequencingHotShotEvent::ViewSyncVoteSend(vote)) + .await; + } + + // Send to the first relay after sending to k_th relay + if self.relay > 0 { + let message = match self.phase { + ViewSyncPhase::None => unimplemented!(), + ViewSyncPhase::PreCommit => { + self.exchange.create_precommit_message::( + self.next_view, + 0, + vote_token.clone(), + ) + } + ViewSyncPhase::Commit => self.exchange.create_commit_message::( + self.next_view, + 0, + vote_token.clone(), + ), + ViewSyncPhase::Finalize => unimplemented!(), + }; + // error!("Sending vs vote {:?}", message.clone()); + if let GeneralConsensusMessage::ViewSyncVote(vote) = message { + // error!("Sending vs vote {:?}", vote.clone()); + + self.event_stream + .publish(SequencingHotShotEvent::ViewSyncVoteSend(vote)) + .await; + } + } + + // TODO ED Add event to shutdown this task if a view is completed + async_spawn({ + let stream = self.event_stream.clone(); + let phase = self.phase.clone(); + async move { + async_sleep(self.view_sync_timeout).await; + stream + .publish(SequencingHotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*self.next_view), + self.relay, + phase, + )) + .await; + } + }); + + return (None, self); + } + Ok(None) => { + debug!( + "We were not chosen for committee on view {}", + *self.next_view + ); + return (None, self); + } + Err(_) => { + error!("Problem generating vote token"); + return (None, self); + } + } + } + + SequencingHotShotEvent::ViewSyncTrigger(view_number) => { + if self.next_view != TYPES::Time::new(*view_number) { + error!("Unexpected view number to triger view sync"); + return (None, self); + } + let maybe_vote_token = self + .exchange + .membership() + .make_vote_token(self.next_view, self.exchange.private_key()); + + match maybe_vote_token { + Ok(Some(vote_token)) => { + let message = self.exchange.create_precommit_message::( + self.next_view, + self.relay, + vote_token.clone(), + ); + + if let GeneralConsensusMessage::ViewSyncVote(vote) = message { + debug!( + "Sending precommit vote to start protocol for next view = {}", + *vote.round() + ); + // error!("Sending vs vote {:?}", vote.clone()); + + self.event_stream + .publish(SequencingHotShotEvent::ViewSyncVoteSend(vote)) + .await; + } + + // TODO ED Add event to shutdown this task + async_spawn({ + let stream = self.event_stream.clone(); + async move { + async_sleep(self.view_sync_timeout).await; + stream + .publish(SequencingHotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*self.next_view), + self.relay, + ViewSyncPhase::None, + )) + .await; + } + }); + return (None, self); + } + Ok(None) => { + debug!("We were not chosen for committee on view {}", *view_number); + return (None, self); + } + Err(_) => { + error!("Problem generating vote token"); + return (None, self); + } + } + } + + SequencingHotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { + // Shouldn't ever receive a timeout for a relay higher than ours + if TYPES::Time::new(*round) == self.next_view + && relay == self.relay + && last_seen_certificate == self.phase + { + let maybe_vote_token = self + .exchange + .membership() + .make_vote_token(self.next_view, self.exchange.private_key()); + + match maybe_vote_token { + Ok(Some(vote_token)) => { + self.relay += 1; + let message = match self.phase { + ViewSyncPhase::None => self.exchange.create_precommit_message::( + self.next_view, + self.relay, + vote_token.clone(), + ), + ViewSyncPhase::PreCommit => { + self.exchange.create_commit_message::( + self.next_view, + self.relay, + vote_token.clone(), + ) + } + ViewSyncPhase::Commit => { + self.exchange.create_finalize_message::( + self.next_view, + self.relay, + vote_token.clone(), + ) + } + ViewSyncPhase::Finalize => unimplemented!(), + }; + + if let GeneralConsensusMessage::ViewSyncVote(vote) = message { + self.event_stream + .publish(SequencingHotShotEvent::ViewSyncVoteSend(vote)) + .await; + } + + // TODO ED Add event to shutdown this task + async_spawn({ + let stream = self.event_stream.clone(); + async move { + async_sleep(self.view_sync_timeout).await; + stream + .publish(SequencingHotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*self.next_view), + self.relay, + last_seen_certificate, + )) + .await; + } + }); + return (None, self); + } + Ok(None) | Err(_) => return (None, self), + } + } + } + _ => return (None, self), + } + (None, self) + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + > ViewSyncRelayTaskState +where + ViewSyncEx: ConsensusExchange< + TYPES, + Message, + Proposal = ViewSyncCertificate, + Certificate = ViewSyncCertificate, + Commitment = ViewSyncData, + >, +{ + /// Handles incoming events for the view sync relay task + #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] + pub async fn handle_event( + mut self, + event: SequencingHotShotEvent, + ) -> ( + std::option::Option, + ViewSyncRelayTaskState, + ) { + match event { + SequencingHotShotEvent::ViewSyncVoteRecv(vote) => { + if self.accumulator.is_right() { + return (Some(HotShotTaskCompleted::ShutDown), self); + } + + let (vote_internal, phase) = match vote { + ViewSyncVote::PreCommit(vote_internal) => { + (vote_internal, ViewSyncPhase::PreCommit) + } + ViewSyncVote::Commit(vote_internal) => (vote_internal, ViewSyncPhase::Commit), + ViewSyncVote::Finalize(vote_internal) => { + (vote_internal, ViewSyncPhase::Finalize) + } + }; + + debug!( + "Recved vote for next view {}, and relay {}, and phase {:?}", + *vote_internal.round, vote_internal.relay, phase + ); + + // Ignore this vote if we are not the correct relay + if !self + .exchange + .is_leader(vote_internal.round + vote_internal.relay) + { + debug!("We are not the correct relay"); + return (None, self); + } + + let view_sync_data = ViewSyncData:: { + round: vote_internal.round, + relay: self.exchange.public_key().to_bytes(), + } + .commit(); + + debug!( + "Accumulating view sync vote {} relay {}", + *vote_internal.round, vote_internal.relay + ); + + let accumulator = self.exchange.accumulate_vote( + &vote_internal.signature.0, + &vote_internal.signature.1, + view_sync_data, + vote_internal.vote_data, + vote_internal.vote_token.clone(), + vote_internal.round, + self.accumulator.left().unwrap(), + Some(vote_internal.relay), + ); + + self.accumulator = match accumulator { + Left(new_accumulator) => Either::Left(new_accumulator), + Right(certificate) => { + let signature = + self.exchange.sign_certificate_proposal(certificate.clone()); + let message = Proposal { + data: certificate.clone(), + signature, + }; + // error!("Sending view sync cert {:?}", message.clone()); + self.event_stream + .publish(SequencingHotShotEvent::ViewSyncCertificateSend( + message, + self.exchange.public_key().clone(), + )) + .await; + + // Reset accumulator for new certificate + either::Left(VoteAccumulator { + total_vote_outcomes: HashMap::new(), + da_vote_outcomes: HashMap::new(), + yes_vote_outcomes: HashMap::new(), + no_vote_outcomes: HashMap::new(), + viewsync_precommit_vote_outcomes: HashMap::new(), + viewsync_commit_vote_outcomes: HashMap::new(), + viewsync_finalize_vote_outcomes: HashMap::new(), + success_threshold: self.exchange.success_threshold(), + failure_threshold: self.exchange.failure_threshold(), + sig_lists: Vec::new(), + signers: bitvec![0; self.exchange.total_nodes()], + }) + } + }; + + if phase == ViewSyncPhase::Finalize { + (Some(HotShotTaskCompleted::ShutDown), self) + } else { + (None, self) + } + } + _ => (None, self), + } + } +} diff --git a/task/Cargo.toml b/task/Cargo.toml new file mode 100644 index 0000000000..720a7aca6a --- /dev/null +++ b/task/Cargo.toml @@ -0,0 +1,25 @@ +[package] +authors = ["Espresso Systems "] +description = "Async task abstraction for use in consensus" +edition = "2021" +name = "hotshot-task" +version = "0.1.0" + +[dependencies] +async-compatibility-layer = { workspace = true } +async-trait = { workspace = true } +either = { workspace = true } +futures = { workspace = true } +nll = { workspace = true } +serde = { workspace = true } +snafu = { workspace = true } +async-lock = { workspace = true } +tracing = { workspace = true } +atomic_enum = "0.2.0" +pin-project = "1.1.3" + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } + +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } diff --git a/task/src/event_stream.rs b/task/src/event_stream.rs new file mode 100644 index 0000000000..875d045994 --- /dev/null +++ b/task/src/event_stream.rs @@ -0,0 +1,268 @@ +use async_compatibility_layer::channel::{unbounded, UnboundedSender, UnboundedStream}; +use async_lock::RwLock; +use std::{ + collections::HashMap, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +use async_trait::async_trait; +use futures::Stream; + +use crate::task::{FilterEvent, PassType}; + +/// a stream that does nothing. +/// it's immediately closed +#[derive(Clone)] +pub struct DummyStream; + +impl Stream for DummyStream { + type Item = (); + + fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(None) + } +} + +#[async_trait] +impl EventStream for DummyStream { + type EventType = (); + + type StreamType = DummyStream; + + async fn publish(&self, _event: Self::EventType) {} + + async fn subscribe( + &self, + _filter: FilterEvent, + ) -> (Self::StreamType, StreamId) { + (DummyStream, 0) + } + + async fn unsubscribe(&self, _id: StreamId) {} + + async fn direct_message(&self, _id: StreamId, _event: Self::EventType) {} +} + +impl SendableStream for DummyStream {} + +/// this is only used for indexing +pub type StreamId = usize; + +/// a stream that plays nicely with async +pub trait SendableStream: Stream + Sync + Send + 'static {} + +/// Async pub sub event stream +/// NOTE: static bound indicates that if the type points to data, that data lives for the lifetime +/// of the program +#[async_trait] +pub trait EventStream: Clone + 'static + Sync + Send { + /// the type of event to process + type EventType: PassType; + /// the type of stream to use + type StreamType: SendableStream; + + /// publish an event to the event stream + async fn publish(&self, event: Self::EventType); + + /// subscribe to a particular set of events + /// specified by `filter`. Filter returns true if the event should be propagated + /// TODO (justin) rethink API, we might be able just to use `StreamExt::filter` and `Filter` + /// That would certainly be cleaner + async fn subscribe(&self, filter: FilterEvent) + -> (Self::StreamType, StreamId); + + /// unsubscribe from the stream + async fn unsubscribe(&self, id: StreamId); + + /// send direct message to node + async fn direct_message(&self, id: StreamId, event: Self::EventType); +} + +/// Event stream implementation using channels as the underlying primitive. +/// We want it to be cloneable +#[derive(Clone)] +pub struct ChannelStream { + /// inner field. Useful for having the stream itself + /// be clone + inner: Arc>>, +} + +/// trick to make the event stream clonable +struct ChannelStreamInner { + /// the subscribers to the channel + subscribers: HashMap, UnboundedSender)>, + /// the next unused assignable id + next_stream_id: StreamId, +} + +impl ChannelStream { + /// construct a new event stream + #[must_use] + pub fn new() -> Self { + Self { + inner: Arc::new(RwLock::new(ChannelStreamInner { + subscribers: HashMap::new(), + next_stream_id: 0, + })), + } + } +} + +impl Default for ChannelStream { + fn default() -> Self { + Self::new() + } +} + +impl SendableStream for UnboundedStream {} + +#[async_trait] +impl EventStream for ChannelStream { + type EventType = EVENT; + type StreamType = UnboundedStream; + + async fn direct_message(&self, id: StreamId, event: Self::EventType) { + let inner = self.inner.write().await; + match inner.subscribers.get(&id) { + Some((filter, sender)) => { + if filter(&event) { + match sender.send(event.clone()).await { + Ok(_) => (), + // error sending => stream is closed so remove it + Err(_) => self.unsubscribe(id).await, + } + } + } + None => { + tracing::debug!("Requested stream id not found"); + } + } + } + + /// publish an event to the event stream + async fn publish(&self, event: Self::EventType) { + let inner = self.inner.read().await; + for (uid, (filter, sender)) in &inner.subscribers { + if filter(&event) { + match sender.send(event.clone()).await { + Ok(_) => (), + // error sending => stream is closed so remove it + Err(_) => { + self.unsubscribe(*uid).await; + } + } + } + } + } + + async fn subscribe( + &self, + filter: FilterEvent, + ) -> (Self::StreamType, StreamId) { + let mut inner = self.inner.write().await; + let new_stream_id = inner.next_stream_id; + let (s, r) = unbounded(); + inner.next_stream_id += 1; + // NOTE: can never be already existing. + // so, this should always return `None` + inner.subscribers.insert(new_stream_id, (filter, s)); + (r.into_stream(), new_stream_id) + } + + async fn unsubscribe(&self, uid: StreamId) { + let mut inner = self.inner.write().await; + inner.subscribers.remove(&uid); + } +} + +#[cfg(test)] +pub mod test { + use crate::{event_stream::EventStream, StreamExt}; + use async_compatibility_layer::art::{async_sleep, async_spawn}; + use std::time::Duration; + + #[derive(Clone, Debug, PartialEq, Eq)] + enum TestMessage { + One, + Two, + Three, + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn test_channel_stream_basic() { + use crate::task::FilterEvent; + + use super::ChannelStream; + + let channel_stream = ChannelStream::::new(); + let (mut stream, _) = channel_stream.subscribe(FilterEvent::default()).await; + let dup_channel_stream = channel_stream.clone(); + + let dup_dup_channel_stream = channel_stream.clone(); + + async_spawn(async move { + let (mut stream, _) = dup_channel_stream.subscribe(FilterEvent::default()).await; + assert!(stream.next().await.unwrap() == TestMessage::Three); + assert!(stream.next().await.unwrap() == TestMessage::One); + assert!(stream.next().await.unwrap() == TestMessage::Two); + }); + + async_spawn(async move { + dup_dup_channel_stream.publish(TestMessage::Three).await; + dup_dup_channel_stream.publish(TestMessage::One).await; + dup_dup_channel_stream.publish(TestMessage::Two).await; + }); + async_sleep(Duration::new(3, 0)).await; + + assert!(stream.next().await.unwrap() == TestMessage::Three); + assert!(stream.next().await.unwrap() == TestMessage::One); + assert!(stream.next().await.unwrap() == TestMessage::Two); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn test_channel_stream_xtreme() { + use crate::task::FilterEvent; + + use super::ChannelStream; + + let channel_stream = ChannelStream::::new(); + let mut streams = Vec::new(); + + for _i in 0..1000 { + let dup_channel_stream = channel_stream.clone(); + let (stream, _) = dup_channel_stream.subscribe(FilterEvent::default()).await; + streams.push(stream); + } + + let dup_dup_channel_stream = channel_stream.clone(); + + for _i in 0..1000 { + let mut stream = streams.pop().unwrap(); + async_spawn(async move { + for event in [TestMessage::One, TestMessage::Two, TestMessage::Three] { + for _ in 0..100 { + assert!(stream.next().await.unwrap() == event); + } + } + }); + } + + async_spawn(async move { + for event in [TestMessage::One, TestMessage::Two, TestMessage::Three] { + for _ in 0..100 { + dup_dup_channel_stream.publish(event.clone()).await; + } + } + }); + } +} diff --git a/task/src/global_registry.rs b/task/src/global_registry.rs new file mode 100644 index 0000000000..de16b86a60 --- /dev/null +++ b/task/src/global_registry.rs @@ -0,0 +1,214 @@ +use async_lock::RwLock; +use either::Either; +use futures::{future::BoxFuture, FutureExt}; +use std::{ + collections::{BTreeMap, BTreeSet}, + ops::Deref, + sync::Arc, +}; + +use crate::task_state::{TaskState, TaskStatus}; + +/// function to shut down gobal registry +#[derive(Clone)] +pub struct ShutdownFn(pub Arc BoxFuture<'static, ()> + Sync + Send>); + +// TODO this might cleaner as `run()` +// but then this pattern should change everywhere +impl Deref for ShutdownFn { + type Target = dyn Fn() -> BoxFuture<'static, ()> + Sync + Send; + + fn deref(&self) -> &Self::Target { + &*self.0 + } +} + +/// id of task. Usize instead of u64 because +/// used for primarily for indexing +pub type HotShotTaskId = usize; + +/// the global registry provides a place to: +/// - inquire about the state of various tasks +/// - gracefully shut down tasks +#[derive(Debug, Clone)] +pub struct GlobalRegistry { + /// up-to-date shared list of statuses + /// only used if `state_cache` is out of date + /// or if appending + state_list: Arc>>, + /// possibly stale read version of state + /// NOTE: must include entire state in order to + /// support both incrementing and reading. + /// Writing to the status should gracefully shut down the task + state_cache: BTreeMap, +} + +/// function to modify state +#[allow(clippy::type_complexity)] +struct Modifier(Box Either + Send>); + +impl Default for GlobalRegistry { + fn default() -> Self { + Self::new() + } +} + +impl GlobalRegistry { + /// create new registry + #[must_use] + pub fn new() -> Self { + Self { + state_list: Arc::new(RwLock::new(BTreeMap::default())), + state_cache: BTreeMap::default(), + } + } + + /// register with the global registry + /// return a function to the caller (task) that can be used to deregister + /// returns a function to call to shut down the task + /// and the unique identifier of the task + pub async fn register(&mut self, name: &str, status: TaskState) -> (ShutdownFn, HotShotTaskId) { + let mut list = self.state_list.write().await; + let next_id = list + .last_key_value() + .map(|(k, _v)| k) + .copied() + .unwrap_or_default() + + 1; + let new_entry = (status.clone(), name.to_string()); + let new_entry_dup = new_entry.0.clone(); + list.insert(next_id, new_entry.clone()); + + self.state_cache.insert(next_id, new_entry); + + let shutdown_fn = ShutdownFn(Arc::new(move || { + new_entry_dup.set_state(TaskStatus::Completed); + async move {}.boxed() + })); + (shutdown_fn, next_id) + } + + /// update the cache + async fn update_cache(&mut self) { + // NOTE: this can be done much more cleverly + // avoid one intersection by comparing max keys (constant time op vs O(n + m)) + // and debatable how often the other op needs to be run + // probably much much less often + let list = self.state_list.read().await; + let list_keys: BTreeSet = list.keys().copied().collect(); + let cache_keys: BTreeSet = self.state_cache.keys().copied().collect(); + // bleh not as efficient + let missing_key_list = list_keys.difference(&cache_keys); + let expired_key_list = cache_keys.difference(&list_keys); + + for expired_key in expired_key_list { + self.state_cache.remove(expired_key); + } + + for key in missing_key_list { + // technically shouldn't be possible for this to be none since + // we have a read lock + // nevertheless, this seems easier + if let Some(val) = list.get(key) { + self.state_cache.insert(*key, val.clone()); + } + } + } + + /// internal function to run `modifier` on `uid` + /// if it exists + async fn operate_on_task( + &mut self, + uid: HotShotTaskId, + modifier: Modifier, + ) -> Either { + // the happy path + if let Some(ele) = self.state_cache.get(&uid) { + modifier.0(&ele.0) + } + // the sad path + else { + self.update_cache().await; + if let Some(ele) = self.state_cache.get(&uid) { + modifier.0(&ele.0) + } else { + Either::Right(false) + } + } + } + + /// set `uid`'s state to paused + /// returns true upon success and false if `uid` is not registered + pub async fn pause_task(&mut self, uid: HotShotTaskId) -> bool { + let modifier = Modifier(Box::new(|state| { + state.set_state(TaskStatus::Paused); + Either::Right(true) + })); + match self.operate_on_task(uid, modifier).await { + Either::Left(_) => unreachable!(), + Either::Right(b) => b, + } + } + + /// set `uid`'s state to running + /// returns true upon success and false if `uid` is not registered + pub async fn run_task(&mut self, uid: HotShotTaskId) -> bool { + let modifier = Modifier(Box::new(|state| { + state.set_state(TaskStatus::Running); + Either::Right(true) + })); + match self.operate_on_task(uid, modifier).await { + Either::Left(_) => unreachable!(), + Either::Right(b) => b, + } + } + + /// if the `uid` is registered with the global registry + /// return its task status + /// this is a way to subscribe to state changes from the taskstatus + /// since `HotShotTaskStatus` implements stream + pub async fn get_task_state(&mut self, uid: HotShotTaskId) -> Option { + let modifier = Modifier(Box::new(|state| Either::Left(state.get_status()))); + match self.operate_on_task(uid, modifier).await { + Either::Left(state) => Some(state), + Either::Right(false) => None, + Either::Right(true) => unreachable!(), + } + } + + /// shut down a task from a different thread + /// returns true if succeeded + /// returns false if the task does not exist + pub async fn shutdown_task(&mut self, uid: usize) -> bool { + let modifier = Modifier(Box::new(|state| { + state.set_state(TaskStatus::Completed); + Either::Right(true) + })); + let result = match self.operate_on_task(uid, modifier).await { + Either::Left(_) => unreachable!(), + Either::Right(b) => b, + }; + let mut list = self.state_list.write().await; + list.remove(&uid); + result + } + + /// checks if all registered tasks have completed + pub async fn is_shutdown(&mut self) -> bool { + let task_list = self.state_list.read().await; + for (_uid, task) in task_list.iter() { + if task.0.get_status() != TaskStatus::Completed { + return false; + } + } + true + } + + /// shut down all tasks in registry + pub async fn shutdown_all(&mut self) { + let mut task_list = self.state_list.write().await; + while let Some((_uid, task)) = task_list.pop_last() { + task.0.set_state(TaskStatus::Completed); + } + } +} diff --git a/task/src/lib.rs b/task/src/lib.rs new file mode 100644 index 0000000000..1d4c8b602e --- /dev/null +++ b/task/src/lib.rs @@ -0,0 +1,393 @@ +//! Abstractions meant for usage with long running consensus tasks +//! and testing harness +#![warn( + clippy::all, + clippy::pedantic, + rust_2018_idioms, + missing_docs, + clippy::missing_docs_in_private_items, + clippy::panic +)] + +use crate::task::PassType; +use either::Either; +use event_stream::SendableStream; +use Poll::{Pending, Ready}; +// The spawner of the task should be able to fire and forget the task if it makes sense. +use futures::{stream::Fuse, Future, Stream, StreamExt}; +use std::{ + pin::Pin, + slice::SliceIndex, + sync::Arc, + task::{Context, Poll}, +}; +// NOTE use pin_project here because we're already bring in procedural macros elsewhere +// so there is no reason to use pin_project_lite +use pin_project::pin_project; + +/// Astractions over the state of a task and a stream +/// interface for task changes. Allows in the happy path +/// for lockless manipulation of tasks +/// and in the sad case, only the use of a `std::sync::mutex` +pub mod task_state; + +/// the global registry storing the status of all tasks +/// as well as the abiliity to terminate them +pub mod global_registry; + +/// mpmc streamable to all subscribed tasks +pub mod event_stream; + +/// The `HotShot` Task. The main point of this library. Uses all other abstractions +/// to create an abstraction over tasks +pub mod task; + +/// The hotshot task launcher. Useful for constructing tasks +pub mod task_launcher; + +/// the task implementations with different features +pub mod task_impls; + +/// merge `N` streams of the same type +#[pin_project] +pub struct MergeN { + /// Streams to be merged. + #[pin] + streams: Vec>, + /// idx to start polling + idx: usize, +} + +impl MergeN { + /// create a new stream + #[must_use] + pub fn new(streams: Vec) -> MergeN { + let fused_streams = streams.into_iter().map(StreamExt::fuse).collect(); + MergeN { + streams: fused_streams, + idx: 0, + } + } +} + +impl PassType for T {} + +impl SendableStream for MergeN {} + +// NOTE: yoinked from https://github.com/yoshuawuyts/futures-concurrency/ +// we should really just use `futures-concurrency`. I'm being lazy here +// and not bringing in yet another dependency. Note: their merge is implemented much +// more cleverly than this rather naive impl + +// NOTE: If this is implemented through the trait, this will work on both vecs and +// slices. +// +// From: https://github.com/rust-lang/rust/pull/78370/files +/// Get a pinned mutable pointer from a list. +pub(crate) fn get_pin_mut_from_vec( + slice: Pin<&mut Vec>, + index: I, +) -> Option> +where + I: SliceIndex<[T]>, +{ + // SAFETY: `get_unchecked_mut` is never used to move the slice inside `self` (`SliceIndex` + // is sealed and all `SliceIndex::get_mut` implementations never move elements). + // `x` is guaranteed to be pinned because it comes from `self` which is pinned. + unsafe { + slice + .get_unchecked_mut() + .get_mut(index) + .map(|x| Pin::new_unchecked(x)) + } +} + +impl Stream for MergeN { + // idx of the stream, item + type Item = (usize, ::Item); + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut me = self.project(); + + let idx = *me.idx; + *me.idx = (idx + 1) % me.streams.len(); + + let first_half = idx..me.streams.len(); + let second_half = 0..idx; + + let iterator = first_half.chain(second_half); + + let mut done = false; + + for i in iterator { + let stream = get_pin_mut_from_vec(me.streams.as_mut(), i).unwrap(); + + match stream.poll_next(cx) { + Ready(Some(val)) => return Ready(Some((i, val))), + Ready(None) => {} + Pending => done = false, + } + } + + if done { + Ready(None) + } else { + Pending + } + } +} + +// NOTE: yoinked /from async-std +// except this is executor agnostic (doesn't rely on async-std streamext/fuse) +// NOTE: usage of this is for combining streams into one main stream +// for usage with `MessageStream` +// TODO move this to async-compatibility-layer +#[pin_project] +/// Stream returned by the [`merge`](super::StreamExt::merge) method. +pub struct Merge { + /// first stream to merge + #[pin] + a: Fuse, + /// second stream to merge + #[pin] + b: Fuse, + /// When `true`, poll `a` first, otherwise, `poll` b`. + a_first: bool, +} + +impl Merge { + /// create a new Merged stream + pub fn new(a: T, b: U) -> Merge + where + T: Stream, + U: Stream, + { + Merge { + a: a.fuse(), + b: b.fuse(), + a_first: true, + } + } +} + +impl Stream for Merge +where + T: Stream, + U: Stream, +{ + type Item = Either; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let me = self.project(); + let a_first = *me.a_first; + + // Toggle the flag + *me.a_first = !a_first; + + poll_next(me.a, me.b, cx, a_first) + } + + fn size_hint(&self) -> (usize, Option) { + let (a_lower, a_upper) = self.a.size_hint(); + let (b_lower, b_upper) = self.b.size_hint(); + + let upper = match (a_upper, b_upper) { + (Some(a_upper), Some(b_upper)) => Some(a_upper + b_upper), + _ => None, + }; + + (a_lower + b_lower, upper) + } +} + +impl SendableStream for Merge +where + T: Stream + Send + Sync + 'static, + U: Stream + Send + Sync + 'static, +{ +} + +/// poll the next item in the merged stream +fn poll_next( + first: Pin<&mut T>, + second: Pin<&mut U>, + cx: &mut Context<'_>, + order: bool, +) -> Poll>> +where + T: Stream, + U: Stream, +{ + let mut done = true; + + // there's definitely a better way to do this + if order { + match first.poll_next(cx) { + Ready(Some(val)) => return Ready(Some(Either::Left(val))), + Ready(None) => {} + Pending => done = false, + } + + match second.poll_next(cx) { + Ready(Some(val)) => return Ready(Some(Either::Right(val))), + Ready(None) => {} + Pending => done = false, + } + } else { + match second.poll_next(cx) { + Ready(Some(val)) => return Ready(Some(Either::Right(val))), + Ready(None) => {} + Pending => done = false, + } + + match first.poll_next(cx) { + Ready(Some(val)) => return Ready(Some(Either::Left(val))), + Ready(None) => {} + Pending => done = false, + } + } + + if done { + Ready(None) + } else { + Pending + } +} + +/// gotta make the futures sync +pub type BoxSyncFuture<'a, T> = Pin + Send + Sync + 'a>>; + +/// may be treated as a stream +#[pin_project(project = ProjectedStreamableThing)] +pub struct GeneratedStream { + // todo maybe type wrapper is in order + /// Stream generator. + generator: Arc Option> + Sync + Send>, + /// Optional in-progress future. + in_progress_fut: Option>, +} + +impl GeneratedStream { + /// create a generator + pub fn new( + generator: Arc Option> + Sync + Send>, + ) -> Self { + GeneratedStream { + generator, + in_progress_fut: None, + } + } +} + +impl Stream for GeneratedStream { + type Item = O; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let projection = self.project(); + match projection.in_progress_fut { + Some(fut) => { + // NOTE: this is entirely safe. + // We will ONLY poll if we've been awakened. + // otherwise, we won't poll. + match fut.as_mut().poll(cx) { + Ready(val) => { + *projection.in_progress_fut = None; + Poll::Ready(Some(val)) + } + Pending => Poll::Pending, + } + } + None => { + let wrapped_fut = (*projection.generator)(); + let Some(mut fut) = wrapped_fut else { + return Poll::Ready(None); + }; + match fut.as_mut().poll(cx) { + Ready(val) => { + *projection.in_progress_fut = None; + Poll::Ready(Some(val)) + } + Pending => { + *projection.in_progress_fut = Some(fut); + Poll::Pending + } + } + } + } + } +} + +/// yoinked from futures crate +pub fn assert_future(future: F) -> F +where + F: Future, +{ + future +} + +/// yoinked from futures crate, adds sync bound that we need +pub fn boxed_sync<'a, F>(fut: F) -> BoxSyncFuture<'a, F::Output> +where + F: Future + Sized + Send + Sync + 'a, +{ + assert_future::(Box::pin(fut)) +} + +impl SendableStream for GeneratedStream {} + +#[cfg(test)] +pub mod test { + use crate::{boxed_sync, Arc, GeneratedStream, StreamExt}; + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn test_stream_basic() { + let mut stream = GeneratedStream:: { + generator: Arc::new(move || { + let closure = async move { 5 }; + Some(boxed_sync(closure)) + }), + in_progress_fut: None, + }; + assert!(stream.next().await == Some(5)); + assert!(stream.next().await == Some(5)); + assert!(stream.next().await == Some(5)); + assert!(stream.next().await == Some(5)); + } + + #[cfg(test)] + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn test_stream_fancy() { + use async_compatibility_layer::art::async_sleep; + use std::{sync::atomic::Ordering, time::Duration}; + + let value = Arc::::default(); + let mut stream = GeneratedStream:: { + generator: Arc::new(move || { + let value = value.clone(); + let closure = async move { + let actual_value = value.load(Ordering::Relaxed); + value.store(actual_value + 1, Ordering::Relaxed); + async_sleep(Duration::new(0, 500)).await; + u32::from(actual_value) + }; + Some(boxed_sync(closure)) + }), + in_progress_fut: None, + }; + assert!(stream.next().await == Some(0)); + assert!(stream.next().await == Some(1)); + assert!(stream.next().await == Some(2)); + assert!(stream.next().await == Some(3)); + } +} diff --git a/task/src/task.rs b/task/src/task.rs new file mode 100644 index 0000000000..37b6f2d56f --- /dev/null +++ b/task/src/task.rs @@ -0,0 +1,637 @@ +use std::{ + fmt::{Debug, Formatter}, + ops::Deref, + pin::Pin, + task::{Context, Poll}, +}; + +use async_compatibility_layer::art::async_yield_now; +use either::Either::{self, Left, Right}; +use futures::{future::BoxFuture, stream::Fuse, Future, FutureExt, Stream, StreamExt}; +use pin_project::pin_project; +use std::sync::Arc; + +use crate::{ + event_stream::{EventStream, SendableStream, StreamId}, + global_registry::{GlobalRegistry, HotShotTaskId, ShutdownFn}, + task_impls::TaskBuilder, + task_state::{TaskState, TaskStatus}, +}; + +/// restrictions on types we wish to pass around. +/// Includes messages and events +pub trait PassType: Clone + Debug + Sync + Send + 'static {} + +/// the task state +pub trait TS: Sync + Send + 'static {} + +/// a task error that has nice qualities +#[allow(clippy::module_name_repetitions)] +pub trait TaskErr: std::error::Error + Sync + Send + 'static {} + +impl TaskErr for T {} + +/// group of types needed for a hotshot task +pub trait HotShotTaskTypes: 'static { + /// the event type from the event stream + type Event: PassType; + /// the state of the task + type State: TS; + /// the global event stream + type EventStream: EventStream; + /// the message stream to receive + type Message: PassType; + /// the steam of messages from other tasks + type MessageStream: SendableStream; + /// the error to return + type Error: TaskErr + 'static + ?Sized; + + /// build a task + /// NOTE: done here and not on `TaskBuilder` because + /// we want specific checks done on each variant + /// NOTE: all generics implement `Sized`, but this bound is + /// NOT applied to `Self` unless we specify + fn build(builder: TaskBuilder) -> HST + where + Self: Sized; +} + +/// hot shot task +#[pin_project(project = ProjectedHST)] +#[allow(clippy::type_complexity)] +pub struct HST { + /// Optional ID of the stream. + pub(crate) stream_id: Option, + /// the eventual return value, post-cleanup + r_val: Option, + /// if we have a future for tracking shutdown progress + in_progress_shutdown_fut: Option>, + /// the in progress future + in_progress_fut: Option, HSTT::State)>>, + /// name of task + name: String, + /// state of the task + /// TODO make this boxed. We don't want to assume this is a small future. + /// since it concievably may be stored on the stack + #[pin] + status: TaskState, + /// functions performing cleanup + /// one should shut down the task + /// if we're tracking with a global registry + /// the other should unsubscribe from the stream + shutdown_fns: Vec, + /// shared stream + event_stream: MaybePinnedEventStream, + /// stream of messages + message_stream: Option>>>, + /// state + state: Option, + /// handler for events + handle_event: Option>, + /// handler for messages + handle_message: Option>, + /// task id + pub(crate) tid: Option, +} + +/// an option of a pinned boxed fused event stream +pub type MaybePinnedEventStream = + Option::EventStream as EventStream>::StreamType>>>>; + +/// ADT for wrapping all possible handler types +#[allow(dead_code)] +pub(crate) enum HotShotTaskHandler { + /// handle an event + HandleEvent(HandleEvent), + /// handle a message + HandleMessage(HandleMessage), + /// filter an event + FilterEvent(FilterEvent), + /// deregister with the registry + Shutdown(ShutdownFn), +} + +/// Type wrapper for handling an event +#[allow(clippy::type_complexity)] +pub struct HandleEvent( + pub Arc< + dyn Fn( + HSTT::Event, + HSTT::State, + ) -> BoxFuture<'static, (Option, HSTT::State)> + + Sync + + Send, + >, +); + +impl Default for HandleEvent { + fn default() -> Self { + Self(Arc::new(|_event, state| { + async move { (None, state) }.boxed() + })) + } +} + +impl Deref for HandleEvent { + type Target = dyn Fn( + HSTT::Event, + HSTT::State, + ) -> BoxFuture<'static, (Option, HSTT::State)>; + + fn deref(&self) -> &Self::Target { + &*self.0 + } +} + +/// Type wrapper for handling a message +#[allow(clippy::type_complexity)] +pub struct HandleMessage( + pub Arc< + dyn Fn( + HSTT::Message, + HSTT::State, + ) -> BoxFuture<'static, (Option, HSTT::State)> + + Sync + + Send, + >, +); +impl Deref for HandleMessage { + type Target = dyn Fn( + HSTT::Message, + HSTT::State, + ) -> BoxFuture<'static, (Option, HSTT::State)>; + + fn deref(&self) -> &Self::Target { + &*self.0 + } +} + +/// Return `true` if the event should be filtered +#[derive(Clone)] +pub struct FilterEvent(pub Arc bool + Send + 'static + Sync>); + +impl Default for FilterEvent { + fn default() -> Self { + Self(Arc::new(|_| true)) + } +} + +impl Deref for FilterEvent { + type Target = dyn Fn(&EVENT) -> bool + Send + 'static + Sync; + + fn deref(&self) -> &Self::Target { + &*self.0 + } +} + +impl HST { + /// Do a consistency check on the `HST` construction + pub(crate) fn base_check(&self) { + assert!(!self.shutdown_fns.is_empty(), "No shutdown functions"); + assert!( + self.in_progress_fut.is_none(), + "This future has already been polled" + ); + + assert!(self.state.is_some(), "Didn't register state"); + + assert!(self.tid.is_some(), "Didn't register global registry"); + } + + /// perform event sanity checks + pub(crate) fn event_check(&self) { + assert!( + self.shutdown_fns.len() == 2, + "Expected 2 shutdown functions" + ); + assert!(self.event_stream.is_some(), "Didn't register event stream"); + assert!(self.handle_event.is_some(), "Didn't register event handler"); + } + + /// perform message sanity checks + pub(crate) fn message_check(&self) { + assert!( + self.handle_message.is_some(), + "Didn't register message handler" + ); + assert!( + self.message_stream.is_some(), + "Didn't register message stream" + ); + } + + /// register a handler with the task + #[must_use] + pub(crate) fn register_handler(self, handler: HotShotTaskHandler) -> Self { + match handler { + HotShotTaskHandler::HandleEvent(handler) => Self { + handle_event: Some(handler), + ..self + }, + HotShotTaskHandler::HandleMessage(handler) => Self { + handle_message: Some(handler), + ..self + }, + HotShotTaskHandler::FilterEvent(_handler) => unimplemented!(), + HotShotTaskHandler::Shutdown(_handler) => unimplemented!(), + } + } + + /// register an event stream with the task + pub(crate) async fn register_event_stream( + self, + event_stream: HSTT::EventStream, + filter: FilterEvent, + ) -> Self { + let (stream, uid) = event_stream.subscribe(filter).await; + + let mut shutdown_fns = self.shutdown_fns; + { + let event_stream = event_stream.clone(); + shutdown_fns.push(ShutdownFn(Arc::new(move || -> BoxFuture<'static, ()> { + let event_stream = event_stream.clone(); + async move { + event_stream.clone().unsubscribe(uid).await; + } + .boxed() + }))); + } + // TODO perhaps GC the event stream + // (unsunscribe) + Self { + event_stream: Some(Box::pin(stream.fuse())), + shutdown_fns, + stream_id: Some(uid), + ..self + } + } + + /// register a message with the task + #[must_use] + pub(crate) fn register_message_stream(self, stream: HSTT::MessageStream) -> Self { + Self { + message_stream: Some(Box::pin(stream.fuse())), + ..self + } + } + + /// register state with the task + #[must_use] + pub(crate) fn register_state(self, state: HSTT::State) -> Self { + Self { + state: Some(state), + ..self + } + } + + /// register with the registry + pub(crate) async fn register_registry(self, registry: &mut GlobalRegistry) -> Self { + let (shutdown_fn, id) = registry.register(&self.name, self.status.clone()).await; + let mut shutdown_fns = self.shutdown_fns; + shutdown_fns.push(shutdown_fn); + Self { + shutdown_fns, + tid: Some(id), + ..self + } + } + + /// create a new task + pub(crate) fn new(name: String) -> Self { + Self { + stream_id: None, + r_val: None, + name, + status: TaskState::new(), + event_stream: None, + state: None, + handle_event: None, + handle_message: None, + shutdown_fns: vec![], + message_stream: None, + in_progress_fut: None, + in_progress_shutdown_fut: None, + tid: None, + } + } + + /// launch the task + /// NOTE: the only way to get a `HST` is by usage + /// of one of the impls. Those all have checks enabled. + /// So, it should be safe to launch. + pub fn launch(self) -> BoxFuture<'static, HotShotTaskCompleted> { + Box::pin(self) + } +} + +/// enum describing how the tasks completed +pub enum HotShotTaskCompleted { + /// the task shut down successfully + ShutDown, + /// the task encountered an error + Error(Box), + /// the streams the task was listening for died + StreamsDied, + /// we somehow lost the state + /// this is definitely a bug. + LostState, + /// lost the return value somehow + LostReturnValue, + /// Stream exists but missing handler + MissingHandler, +} + +impl std::fmt::Debug for HotShotTaskCompleted { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + HotShotTaskCompleted::ShutDown => f.write_str("HotShotTaskCompleted::ShutDown"), + HotShotTaskCompleted::Error(_) => f.write_str("HotShotTaskCompleted::Error"), + HotShotTaskCompleted::StreamsDied => f.write_str("HotShotTaskCompleted::StreamsDied"), + HotShotTaskCompleted::LostState => f.write_str("HotShotTaskCompleted::LostState"), + HotShotTaskCompleted::LostReturnValue => { + f.write_str("HotShotTaskCompleted::LostReturnValue") + } + HotShotTaskCompleted::MissingHandler => { + f.write_str("HotShotTaskCompleted::MissingHandler") + } + } + } +} + +impl PartialEq for HotShotTaskCompleted { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::Error(_l0), Self::Error(_r0)) => false, + _ => core::mem::discriminant(self) == core::mem::discriminant(other), + } + } +} + +impl<'pin, HSTT: HotShotTaskTypes> ProjectedHST<'pin, HSTT> { + /// launches the shutdown future + fn launch_shutdown_fut(&mut self, cx: &mut Context<'_>) -> Poll { + let fut = self.create_shutdown_fut(); + self.check_ip_shutdown_fut(fut, cx) + } + + /// checks the in progress shutdown future, `fut` + fn check_ip_shutdown_fut( + &mut self, + mut fut: Pin + Send>>, + cx: &mut Context<'_>, + ) -> Poll { + match fut.as_mut().poll(cx) { + Poll::Ready(_) => Poll::Ready( + self.r_val + .take() + .unwrap_or_else(|| HotShotTaskCompleted::LostReturnValue), + ), + Poll::Pending => { + *self.in_progress_shutdown_fut = Some(fut); + Poll::Pending + } + } + } + + /// creates the shutdown future and returns it + fn create_shutdown_fut(&mut self) -> Pin + Send>> { + let shutdown_fns = self.shutdown_fns.clone(); + let fut = async move { + for shutdown_fn in shutdown_fns { + shutdown_fn().await; + } + } + .boxed(); + fut + } + + /// check the event stream + /// returns either a poll if there's a future IP + /// or a bool stating whether or not the stream is finished + fn check_event_stream( + &mut self, + cx: &mut Context<'_>, + ) -> Either, bool> { + let event_stream = self.event_stream.take(); + if let Some(mut inner_event_stream) = event_stream { + while let Poll::Ready(maybe_event) = inner_event_stream.as_mut().poll_next(cx) { + if let Some(event) = maybe_event { + if let Some(handle_event) = self.handle_event { + let maybe_state = self.state.take(); + if let Some(state) = maybe_state { + let mut fut = handle_event(event, state); + match fut.as_mut().poll(cx) { + Poll::Ready((result, state)) => { + if let Some(completed) = result { + *self.in_progress_fut = None; + *self.state = Some(state); + *self.r_val = Some(completed); + let result = self.launch_shutdown_fut(cx); + *self.event_stream = Some(inner_event_stream); + return Left(result); + } + // run a yield to tell the executor to go do work on other + // tasks if they are available + // this is necessary otherwise we could end up with one + // task that returns really quickly blocking the executor + // from dealing with other tasks. + let mut fut = async move { + async_yield_now().await; + (None, state) + } + .boxed(); + // if the executor has no extra work to do, + // continue to poll the event stream + if let Poll::Ready((_, state)) = fut.as_mut().poll(cx) { + *self.state = Some(state); + *self.in_progress_fut = None; + // NOTE: don't need to set event stream because + // that will be done on the next iteration + continue; + } + // otherwise, return pending and finish executing the + // yield later + *self.event_stream = Some(inner_event_stream); + *self.in_progress_fut = Some(fut); + return Left(Poll::Pending); + } + Poll::Pending => { + *self.in_progress_fut = Some(fut); + *self.event_stream = Some(inner_event_stream); + return Left(Poll::Pending); + } + } + } + // lost state case + *self.r_val = Some(HotShotTaskCompleted::LostState); + let result = self.launch_shutdown_fut(cx); + *self.event_stream = Some(inner_event_stream); + return Left(result); + } + // no handler case + *self.r_val = Some(HotShotTaskCompleted::MissingHandler); + let result = self.launch_shutdown_fut(cx); + *self.event_stream = Some(inner_event_stream); + return Left(result); + } + // this is a fused future so `None` will come every time after the stream + // finishes + *self.event_stream = Some(inner_event_stream); + return Right(true); + } + *self.event_stream = Some(inner_event_stream); + return Right(false); + } + // stream doesn't exist so trivially true + *self.event_stream = event_stream; + Right(true) + } + + /// check the message stream + /// returns either a poll if there's a future IP + /// or a bool stating whether or not the stream is finished + fn check_message_stream( + &mut self, + cx: &mut Context<'_>, + ) -> Either, bool> { + let message_stream = self.message_stream.take(); + if let Some(mut inner_message_stream) = message_stream { + while let Poll::Ready(maybe_msg) = inner_message_stream.as_mut().poll_next(cx) { + if let Some(msg) = maybe_msg { + if let Some(handle_msg) = self.handle_message { + let maybe_state = self.state.take(); + if let Some(state) = maybe_state { + let mut fut = handle_msg(msg, state); + match fut.as_mut().poll(cx) { + Poll::Ready((result, state)) => { + if let Some(completed) = result { + *self.in_progress_fut = None; + *self.state = Some(state); + *self.r_val = Some(completed); + let result = self.launch_shutdown_fut(cx); + *self.message_stream = Some(inner_message_stream); + return Left(result); + } + // run a yield to tell the executor to go do work on other + // tasks if they are available + // this is necessary otherwise we could end up with one + // task that returns really quickly blocking the executor + // from dealing with other tasks. + let mut fut = async move { + async_yield_now().await; + (None, state) + } + .boxed(); + // if the executor has no extra work to do, + // continue to poll the event stream + if let Poll::Ready((_, state)) = fut.as_mut().poll(cx) { + *self.state = Some(state); + *self.in_progress_fut = None; + // NOTE: don't need to set event stream because + // that will be done on the next iteration + continue; + } + // otherwise, return pending and finish executing the + // yield later + *self.message_stream = Some(inner_message_stream); + *self.in_progress_fut = Some(fut); + return Left(Poll::Pending); + } + Poll::Pending => { + *self.in_progress_fut = Some(fut); + *self.message_stream = Some(inner_message_stream); + return Left(Poll::Pending); + } + } + } + // lost state case + *self.r_val = Some(HotShotTaskCompleted::LostState); + let result = self.launch_shutdown_fut(cx); + *self.message_stream = Some(inner_message_stream); + return Left(result); + } + // no handler case + *self.r_val = Some(HotShotTaskCompleted::MissingHandler); + let result = self.launch_shutdown_fut(cx); + *self.message_stream = Some(inner_message_stream); + return Left(result); + } + // this is a fused future so `None` will come every time after the stream + // finishes + *self.message_stream = Some(inner_message_stream); + return Right(true); + } + *self.message_stream = Some(inner_message_stream); + return Right(false); + } + // stream doesn't exist so trivially true + *self.message_stream = message_stream; + Right(true) + } +} + +// NOTE: this is a Future, but it could easily be a stream. +// but these are semantically equivalent because instead of +// returning when paused, we just return `Poll::Pending` +impl Future for HST { + type Output = HotShotTaskCompleted; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut projected = self.as_mut().project(); + + if let Some(fut) = projected.in_progress_shutdown_fut.take() { + return projected.check_ip_shutdown_fut(fut, cx); + } + + // check if task is complete + if let Some(state_change) = projected.status.as_mut().try_next() { + match state_change { + TaskStatus::NotStarted | TaskStatus::Paused => { + return Poll::Pending; + } + TaskStatus::Running => {} + TaskStatus::Completed => { + *projected.r_val = Some(HotShotTaskCompleted::ShutDown); + return projected.launch_shutdown_fut(cx); + } + } + } + + // check if there's an in progress future + if let Some(in_progress_fut) = projected.in_progress_fut { + match in_progress_fut.as_mut().poll(cx) { + Poll::Ready((result, state)) => { + *projected.in_progress_fut = None; + *projected.state = Some(state); + // if the future errored out, return it, we're done + if let Some(completed) = result { + *projected.r_val = Some(completed); + return projected.launch_shutdown_fut(cx); + } + } + Poll::Pending => { + return Poll::Pending; + } + } + } + + let event_stream_finished = match projected.check_event_stream(cx) { + Left(result) => return result, + Right(finished) => finished, + }; + + let message_stream_finished = match projected.check_message_stream(cx) { + Left(result) => return result, + Right(finished) => finished, + }; + + if message_stream_finished && event_stream_finished { + tracing::error!("Message and event stream both finished!"); + *projected.r_val = Some(HotShotTaskCompleted::StreamsDied); + let result = projected.launch_shutdown_fut(cx); + return result; + } + + Poll::Pending + } +} diff --git a/task/src/task_impls.rs b/task/src/task_impls.rs new file mode 100644 index 0000000000..057717057b --- /dev/null +++ b/task/src/task_impls.rs @@ -0,0 +1,456 @@ +use futures::Stream; +use std::marker::PhantomData; + +use crate::{ + event_stream::{DummyStream, EventStream, SendableStream, StreamId}, + global_registry::{GlobalRegistry, HotShotTaskId}, + task::{ + FilterEvent, HandleEvent, HandleMessage, HotShotTaskHandler, HotShotTaskTypes, PassType, + TaskErr, HST, TS, + }, +}; + +/// trait to specify features +pub trait ImplMessageStream {} + +/// trait to specify features +pub trait ImplEventStream {} + +/// builder for task +pub struct TaskBuilder(HST); + +impl TaskBuilder { + /// register an event handler + #[must_use] + pub fn register_event_handler(self, handler: HandleEvent) -> Self + where + HSTT: ImplEventStream, + { + Self( + self.0 + .register_handler(HotShotTaskHandler::HandleEvent(handler)), + ) + } + + /// obtains stream id if it exists + pub fn get_stream_id(&self) -> Option { + self.0.stream_id + } + + /// register a message handler + #[must_use] + pub fn register_message_handler(self, handler: HandleMessage) -> Self + where + HSTT: ImplMessageStream, + { + Self( + self.0 + .register_handler(HotShotTaskHandler::HandleMessage(handler)), + ) + } + + /// register a message stream + #[must_use] + pub fn register_message_stream(self, stream: HSTT::MessageStream) -> Self + where + HSTT: ImplMessageStream, + { + Self(self.0.register_message_stream(stream)) + } + + /// register an event stream + pub async fn register_event_stream( + self, + stream: HSTT::EventStream, + filter: FilterEvent, + ) -> Self + where + HSTT: ImplEventStream, + { + Self(self.0.register_event_stream(stream, filter).await) + } + + /// register the state + #[must_use] + pub fn register_state(self, state: HSTT::State) -> Self { + Self(self.0.register_state(state)) + } + + /// register with the global registry + pub async fn register_registry(self, registry: &mut GlobalRegistry) -> Self { + Self(self.0.register_registry(registry).await) + } + + /// get the task id in the global registry + pub fn get_task_id(&self) -> Option { + self.0.tid + } + + /// create a new task builder + #[must_use] + pub fn new(name: String) -> Self { + Self(HST::new(name)) + } +} + +/// a hotshot task with an event stream +pub struct HSTWithEvent< + ERR: std::error::Error, + EVENT: PassType, + ESTREAM: EventStream, + STATE: TS, +> { + /// phantom data + _pd: PhantomData<(ERR, EVENT, ESTREAM, STATE)>, +} + +impl< + ERR: std::error::Error, + EVENT: PassType, + ESTREAM: EventStream, + STATE: TS, + > ImplEventStream for HSTWithEvent +{ +} + +impl, STATE: TS> + ImplMessageStream for HSTWithMessage +{ +} + +impl, STATE: TS> + HotShotTaskTypes for HSTWithEvent +{ + type Event = EVENT; + type State = STATE; + type EventStream = ESTREAM; + type Message = (); + type MessageStream = DummyStream; + type Error = ERR; + + fn build(builder: TaskBuilder) -> HST + where + Self: Sized, + { + builder.0.base_check(); + builder.0.event_check(); + builder.0 + } +} + +/// a hotshot task with a message +pub struct HSTWithMessage< + ERR: std::error::Error, + MSG: PassType, + MSTREAM: Stream, + STATE: TS, +> { + /// phantom data + _pd: PhantomData<(ERR, MSG, MSTREAM, STATE)>, +} + +impl, STATE: TS> HotShotTaskTypes + for HSTWithMessage +{ + type Event = (); + type State = STATE; + type EventStream = DummyStream; + type Message = MSG; + type MessageStream = MSTREAM; + type Error = ERR; + + fn build(builder: TaskBuilder) -> HST + where + Self: Sized, + { + builder.0.base_check(); + builder.0.message_check(); + builder.0 + } +} + +/// hotshot task with even and message +pub struct HSTWithEventAndMessage< + ERR: std::error::Error, + EVENT: PassType, + ESTREAM: EventStream, + MSG: PassType, + MSTREAM: Stream, + STATE: TS, +> { + /// phantom data + _pd: PhantomData<(ERR, EVENT, ESTREAM, MSG, MSTREAM, STATE)>, +} + +impl< + ERR: std::error::Error, + EVENT: PassType, + ESTREAM: EventStream, + MSG: PassType, + MSTREAM: Stream, + STATE: TS, + > ImplEventStream for HSTWithEventAndMessage +{ +} + +impl< + ERR: std::error::Error, + EVENT: PassType, + ESTREAM: EventStream, + MSG: PassType, + MSTREAM: Stream, + STATE: TS, + > ImplMessageStream for HSTWithEventAndMessage +{ +} + +impl< + ERR: TaskErr, + EVENT: PassType, + ESTREAM: EventStream, + MSG: PassType, + MSTREAM: SendableStream, + STATE: TS, + > HotShotTaskTypes for HSTWithEventAndMessage +{ + type Event = EVENT; + type State = STATE; + type EventStream = ESTREAM; + type Message = MSG; + type MessageStream = MSTREAM; + type Error = ERR; + + fn build(builder: TaskBuilder) -> HST + where + Self: Sized, + { + builder.0.base_check(); + builder.0.message_check(); + builder.0.event_check(); + builder.0 + } +} + +#[cfg(test)] +pub mod test { + use async_compatibility_layer::channel::{unbounded, UnboundedStream}; + use snafu::Snafu; + + use crate::{event_stream, event_stream::ChannelStream, task::TS}; + + use super::{HSTWithEvent, HSTWithEventAndMessage, HSTWithMessage}; + use crate::{event_stream::EventStream, task::HotShotTaskTypes, task_impls::TaskBuilder}; + use async_compatibility_layer::art::async_spawn; + use futures::FutureExt; + use std::sync::Arc; + + use crate::{ + global_registry::GlobalRegistry, + task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted}, + }; + use async_compatibility_layer::logging::setup_logging; + + #[derive(Snafu, Debug)] + pub struct Error {} + + #[derive(Clone, Debug, Eq, PartialEq, Hash)] + pub struct State {} + + #[derive(Clone, Debug, Eq, PartialEq, Hash, Default)] + pub struct CounterState { + num_events_recved: u64, + } + + #[derive(Clone, Debug, Eq, PartialEq, Hash)] + pub enum Event { + Finished, + Dummy, + } + + impl TS for State {} + + impl TS for CounterState {} + + #[derive(Clone, Debug, PartialEq, Eq, Hash)] + pub enum Message { + Finished, + Dummy, + } + + // TODO fill in generics for stream + + pub type AppliedHSTWithEvent = HSTWithEvent, State>; + pub type AppliedHSTWithEventCounterState = + HSTWithEvent, CounterState>; + pub type AppliedHSTWithMessage = + HSTWithMessage, State>; + pub type AppliedHSTWithEventMessage = HSTWithEventAndMessage< + Error, + Event, + ChannelStream, + Message, + UnboundedStream, + State, + >; + + #[cfg(test)] + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[should_panic] + async fn test_init_with_event_stream() { + setup_logging(); + let task = TaskBuilder::::new("Test Task".to_string()); + AppliedHSTWithEvent::build(task).launch().await; + } + + // TODO this should be moved to async-compatibility-layer + #[cfg(test)] + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn test_channel_stream() { + use futures::StreamExt; + let (s, r) = unbounded(); + let mut stream: UnboundedStream = r.into_stream(); + s.send(Message::Dummy).await.unwrap(); + s.send(Message::Finished).await.unwrap(); + assert!(stream.next().await.unwrap() == Message::Dummy); + assert!(stream.next().await.unwrap() == Message::Finished); + } + + #[cfg(test)] + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn test_task_with_event_stream() { + setup_logging(); + let event_stream: event_stream::ChannelStream = event_stream::ChannelStream::new(); + let mut registry = GlobalRegistry::new(); + + let mut task_runner = crate::task_launcher::TaskRunner::default(); + + for i in 0..10000 { + let state = CounterState::default(); + let event_handler = HandleEvent(Arc::new(move |event, mut state: CounterState| { + async move { + if let Event::Dummy = event { + state.num_events_recved += 1; + } + + if state.num_events_recved == 100 { + (Some(HotShotTaskCompleted::ShutDown), state) + } else { + (None, state) + } + } + .boxed() + })); + let name = format!("Test Task {i:?}").to_string(); + let built_task = TaskBuilder::::new(name.clone()) + .register_event_stream(event_stream.clone(), FilterEvent::default()) + .await + .register_registry(&mut registry) + .await + .register_state(state) + .register_event_handler(event_handler); + let id = built_task.get_task_id().unwrap(); + let result = AppliedHSTWithEventCounterState::build(built_task).launch(); + task_runner = task_runner.add_task(id, name, result); + } + + async_spawn(async move { + for _ in 0..100 { + event_stream.publish(Event::Dummy).await; + } + }); + + let results = task_runner.launch().await; + for result in results { + assert!(result.1 == HotShotTaskCompleted::ShutDown); + } + } + + #[cfg(test)] + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn test_task_with_event_stream_xtreme() { + setup_logging(); + let event_stream: event_stream::ChannelStream = event_stream::ChannelStream::new(); + + let state = State {}; + + let mut registry = GlobalRegistry::new(); + + let event_handler = HandleEvent(Arc::new(move |event, state| { + async move { + if let Event::Finished = event { + (Some(HotShotTaskCompleted::ShutDown), state) + } else { + (None, state) + } + } + .boxed() + })); + + let built_task = TaskBuilder::::new("Test Task".to_string()) + .register_event_stream(event_stream.clone(), FilterEvent::default()) + .await + .register_registry(&mut registry) + .await + .register_state(state) + .register_event_handler(event_handler); + event_stream.publish(Event::Dummy).await; + event_stream.publish(Event::Dummy).await; + event_stream.publish(Event::Finished).await; + AppliedHSTWithEvent::build(built_task).launch().await; + } + + #[cfg(test)] + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn test_task_with_message_stream() { + setup_logging(); + let state = State {}; + + let mut registry = GlobalRegistry::new(); + + let (s, r) = async_compatibility_layer::channel::unbounded(); + + let message_handler = HandleMessage(Arc::new(move |message, state| { + async move { + if let Message::Finished = message { + (Some(HotShotTaskCompleted::ShutDown), state) + } else { + (None, state) + } + } + .boxed() + })); + + let built_task = TaskBuilder::::new("Test Task".to_string()) + .register_message_handler(message_handler) + .register_message_stream(r.into_stream()) + .register_registry(&mut registry) + .await + .register_state(state); + async_spawn(async move { + s.send(Message::Dummy).await.unwrap(); + s.send(Message::Finished).await.unwrap(); + }); + let result = AppliedHSTWithMessage::build(built_task).launch().await; + assert!(result == HotShotTaskCompleted::ShutDown); + } +} diff --git a/task/src/task_launcher.rs b/task/src/task_launcher.rs new file mode 100644 index 0000000000..cb5539fb38 --- /dev/null +++ b/task/src/task_launcher.rs @@ -0,0 +1,67 @@ +use futures::future::{join_all, BoxFuture}; + +use crate::{ + global_registry::{GlobalRegistry, HotShotTaskId}, + task::HotShotTaskCompleted, +}; + +// TODO use genericarray + typenum to make this use the number of tasks as a parameter +/// runner for tasks +/// `N` specifies the number of tasks to ensure that the user +/// doesn't forget how many tasks they wished to add. +pub struct TaskRunner +// < +// const N: usize, +// > +{ + /// internal set of tasks to launch + tasks: Vec<( + HotShotTaskId, + String, + BoxFuture<'static, HotShotTaskCompleted>, + )>, + /// global registry + pub registry: GlobalRegistry, +} + +impl Default for TaskRunner { + fn default() -> Self { + Self::new() + } +} + +impl TaskRunner /* */ { + /// create new runner + #[must_use] + pub fn new() -> Self { + Self { + tasks: Vec::new(), + registry: GlobalRegistry::new(), + } + } + + // `name` is for logging purposes only and may be duplicated or inconsistent. + /// to support builder pattern + #[must_use] + pub fn add_task( + mut self, + id: HotShotTaskId, + name: String, + task: BoxFuture<'static, HotShotTaskCompleted>, + ) -> TaskRunner { + self.tasks.push((id, name, task)); + self + } + + /// returns a `Vec` because type isn't known + pub async fn launch(self) -> Vec<(String, HotShotTaskCompleted)> { + let names = self + .tasks + .iter() + .map(|(_id, name, _)| name.clone()) + .collect::>(); + let result = join_all(self.tasks.into_iter().map(|(_, _, task)| task)).await; + + names.into_iter().zip(result).collect::>() + } +} diff --git a/task/src/task_state.rs b/task/src/task_state.rs new file mode 100644 index 0000000000..01758965a1 --- /dev/null +++ b/task/src/task_state.rs @@ -0,0 +1,182 @@ +use atomic_enum::atomic_enum; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::Debug, + sync::{atomic::Ordering, Arc}, +}; + +/// Nit: wish this was for u8 but sadly no +/// Represents the status of a hotshot task +#[atomic_enum] +#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub enum TaskStatus { + /// the task hasn't started running + NotStarted = 0, + /// the task is running + Running = 1, + /// NOTE: not useful generally, but VERY useful for byzantine nodes + /// and testing malfunctions + /// we'll have a granular way to, from the registry, stop a task momentarily + /// and inspect/modify its state + Paused = 2, + /// the task completed + Completed = 3, +} + +/// The state of a task +/// `AtomicTaskStatus` + book keeping to notify btwn tasks +#[derive(Clone)] +pub struct TaskState { + /// previous status + prev: Arc, + /// next status + next: Arc, + // using `std::sync::mutex` here because it's faster than async's version + // wakers: Arc>>, +} + +impl Debug for TaskState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TaskState") + .field("status", &self.get_status()) + .finish() + } +} +impl Default for TaskState { + fn default() -> Self { + Self::new() + } +} + +impl TaskState { + /// create a new state + #[must_use] + pub fn new() -> Self { + Self { + prev: Arc::new(TaskStatus::NotStarted.into()), + next: Arc::new(TaskStatus::NotStarted.into()), + // wakers: Arc::default(), + } + } + + /// create a task state from a task status + #[must_use] + pub fn from_status(state: Arc) -> Self { + let prev_state = AtomicTaskStatus::new(state.load(Ordering::SeqCst)); + Self { + prev: Arc::new(prev_state), + next: state, + // wakers: Arc::default(), + } + } + + /// sets the state + /// # Panics + /// should never panic unless internally a lock poison happens + /// this should NOT be possible + pub fn set_state(&self, state: TaskStatus) { + self.next.swap(state, Ordering::SeqCst); + // no panics, so can never be poisoned. + // let mut wakers = self.wakers.lock().unwrap(); + + // drain the wakers + // for waker in wakers.drain(..) { + // waker.wake(); + // } + } + /// gets a possibly stale version of the state + #[must_use] + pub fn get_status(&self) -> TaskStatus { + self.next.load(Ordering::SeqCst) + } +} + +// GNARLY bug @jbearer found +// cx gets *really* large in some cases +// impl Stream for TaskState { +// type Item = TaskStatus; +// +// #[unstable] +// fn poll_next( +// self: std::pin::Pin<&mut Self>, +// cx: &mut std::task::Context<'_>, +// ) -> std::task::Poll> { +// let next = self.next.load(Ordering::SeqCst); +// let prev = self.prev.swap(next, Ordering::SeqCst); +// // a new value has been set +// if prev == next { +// // no panics, so impossible to be poisoned +// self.wakers.lock().unwrap().push(cx.waker().clone()); +// +// // no value has been set, poll again later +// std::task::Poll::Pending +// } else { +// std::task::Poll::Ready(Some(next)) +// } +// } +// } + +impl TaskState { + /// Try to get the next task status. + #[must_use] + pub fn try_next(self: std::pin::Pin<&mut Self>) -> Option { + let next = self.next.load(Ordering::SeqCst); + let prev = self.prev.swap(next, Ordering::SeqCst); + // a new value has been set + if prev == next { + None + } else { + // drain the wakers to wake up the stream. + // we did change value + // let mut wakers = self.wakers.lock().unwrap(); + // for waker in wakers.drain(..) { + // waker.wake(); + // } + Some(next) + } + } +} + +#[cfg(test)] +pub mod test { + + // #[cfg(test)] + // #[cfg_attr( + // feature = "tokio-executor", + // tokio::test(flavor = "multi_thread", worker_threads = 2) + // )] + // #[cfg_attr(feature = "async-std-executor", async_std::test)] + // async fn test_state_stream() { + // setup_logging(); + // + // let mut task = crate::task_state::TaskState::new(); + // + // let task_dup = task.clone(); + // + // async_spawn(async move { + // async_sleep(std::time::Duration::from_secs(1)).await; + // task_dup.set_state(crate::task_state::TaskStatus::Running); + // async_sleep(std::time::Duration::from_secs(1)).await; + // task_dup.set_state(crate::task_state::TaskStatus::Paused); + // async_sleep(std::time::Duration::from_secs(1)).await; + // task_dup.set_state(crate::task_state::TaskStatus::Completed); + // }); + // + // // spawn new task that sleeps then increments + // + // assert_eq!( + // task.try_next().unwrap(), + // crate::task_state::TaskStatus::Running + // ); + // assert_eq!( + // task.next().unwrap(), + // crate::task_state::TaskStatus::Paused + // ); + // assert_eq!( + // task.next().unwrap(), + // crate::task_state::TaskStatus::Completed + // ); + // } + // TODO test global registry using either global + lazy_static + // or passing around global registry +} diff --git a/testing/.gitignore b/testing/.gitignore new file mode 100644 index 0000000000..ae1fb8ba9e --- /dev/null +++ b/testing/.gitignore @@ -0,0 +1,2 @@ +/target +/out*.txt diff --git a/testing/Cargo.toml b/testing/Cargo.toml new file mode 100644 index 0000000000..e75c328a5d --- /dev/null +++ b/testing/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "hotshot-testing" +version = "0.1.0" +edition = "2021" +description = "Types and traits for the HotShot consesus module" +authors = ["Espresso Systems "] + +[features] +default = ["demo"] +demo = ["hotshot/demo"] + +# NOTE this is used to activate the slow tests we don't wish to run in CI +slow-tests = [] + +[dependencies] +ark-bls12-381 = { workspace = true } +async-compatibility-layer = { workspace = true } +async-trait = { workspace = true } +# needed for vrf demo +# so non-optional for now +blake3 = { workspace = true, features = ["traits-preview"] } +commit = { workspace = true } +either = { workspace = true } +futures = { workspace = true } +hotshot = { path = "../hotshot", features = [ + "hotshot-testing", +], default-features = false } +hotshot-types = { path = "../types", default-features = false } +hotshot-utils = { path = "../utils" } +hotshot-task = { path = "../task", version = "0.1.0", default-features = false } +hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +jf-primitives = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +snafu = { workspace = true } +tracing = { workspace = true } +nll = { workspace = true } +serde = { workspace = true } +ethereum-types = { workspace = true } +bitvec = { workspace = true } + +[dev-dependencies] +async-lock = { workspace = true } + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } diff --git a/testing/README.md b/testing/README.md new file mode 100644 index 0000000000..a477450092 --- /dev/null +++ b/testing/README.md @@ -0,0 +1,72 @@ +# Purpose + +Infrastructure and integration tests for hotshot. + +# Usage + +The overall control flow is: + +```ignore +TestBuilder::default().build() -> TestLauncher::launch() -> TestRunner::execute() +| | | +- easy override setup fn - more explicit overrides - executes the test +| | for networking, storage, +- easy override correctness fn | hooks/overrides etc +| +- easily add in hooks +| +- easily override launching +``` + +Easily overriding setup/correctness checks/hooks and launching is all done by anonymous functions. Fairly sane and configurable setup and correct check functions may be generated from the round builder. The intended workflow should look like: + +```rust +use std::sync::Arc; +use futures::FutureExt; +use hotshot_testing::test_types::StaticNodeImplType; +use hotshot_testing::round::RoundHook; +use hotshot_testing::test_types::StaticCommitteeTestTypes; +use hotshot_testing::test_builder::TestBuilder; +use hotshot_testing::test_builder::TestMetadata; + +async { + // specify general characteristics of the test in TestMetadata + let metadata = TestMetadata { + total_nodes: 10, + start_nodes: 10, + num_succeeds: 5, + failure_threshold: 10, + ..Default::default() + }; + + // construct the builder + let mut test_builder = TestBuilder { + metadata, + /// we could build a check + check: None, + /// or a round setup if we want + setup: None + }; + + // construct the launcher + // this may be used to manually override any of the round functions + let test_launcher = test_builder.build::(); + + /// now let's add in a custom hook to print some debugging information at the beginning + /// of each view + let hook = + RoundHook(Arc::new(move |_runner, ctx| { + async move { + tracing::error!("Context for this view is {:#?})", ctx); + Ok(()) + } + .boxed_local() + })); + + /// add the hook, launch the test, then run it. + test_launcher.push_hook(hook).launch().run_test().await.unwrap(); + +}; +``` + +See TODO for examples. diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs new file mode 100644 index 0000000000..9e405519cf --- /dev/null +++ b/testing/src/completion_task.rs @@ -0,0 +1,127 @@ +use std::{sync::Arc, time::Duration}; + +use async_compatibility_layer::art::async_sleep; +use futures::FutureExt; +use hotshot::traits::TestableNodeImplementation; +use hotshot_task::{ + boxed_sync, + event_stream::{ChannelStream, EventStream}, + task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, + task_impls::{HSTWithEventAndMessage, TaskBuilder}, + GeneratedStream, +}; +use hotshot_types::traits::node_implementation::NodeType; +use snafu::Snafu; + +use crate::test_runner::Node; + +use super::{test_launcher::TaskGenerator, GlobalTestEvent}; + +/// the idea here is to run as long as we want + +/// Data Availability task error +#[derive(Snafu, Debug)] +pub struct CompletionTaskErr {} + +/// Data availability task state +pub struct CompletionTask> { + pub(crate) test_event_stream: ChannelStream, + pub(crate) handles: Vec>, +} + +impl> TS for CompletionTask {} + +/// Completion task types +pub type CompletionTaskTypes = HSTWithEventAndMessage< + CompletionTaskErr, + GlobalTestEvent, + ChannelStream, + (), + GeneratedStream<()>, + CompletionTask, +>; + +/// Description for a time-based completion task. +#[derive(Clone, Debug)] +pub struct TimeBasedCompletionTaskDescription { + /// Duration of the task. + pub duration: Duration, +} + +/// Description for a completion task. +#[derive(Clone, Debug)] +pub enum CompletionTaskDescription { + /// Time-based completion task. + TimeBasedCompletionTaskBuilder(TimeBasedCompletionTaskDescription), +} + +impl CompletionTaskDescription { + /// Build and launch a completion task. + pub fn build_and_launch>( + self, + ) -> TaskGenerator> { + match self { + CompletionTaskDescription::TimeBasedCompletionTaskBuilder(td) => td.build_and_launch(), + } + } +} + +impl TimeBasedCompletionTaskDescription { + /// create the task and launch it + pub fn build_and_launch>( + self, + ) -> TaskGenerator> { + Box::new(move |state, mut registry, test_event_stream| { + async move { + let event_handler = + HandleEvent::>(Arc::new(move |event, state| { + async move { + match event { + GlobalTestEvent::ShutDown => { + (Some(HotShotTaskCompleted::ShutDown), state) + } + } + } + .boxed() + })); + let message_handler = + HandleMessage::>(Arc::new(move |_, state| { + async move { + state + .test_event_stream + .publish(GlobalTestEvent::ShutDown) + .await; + for node in &state.handles { + node.handle.clone().shut_down().await; + } + (Some(HotShotTaskCompleted::ShutDown), state) + } + .boxed() + })); + // normally I'd say "let's use Interval from async-std!" + // but doing this is easier than unifying async-std with tokio's slightly different + // interval abstraction + let stream_generator = GeneratedStream::new(Arc::new(move || { + let fut = async move { + async_sleep(self.duration).await; + }; + Some(boxed_sync(fut)) + })); + let builder = TaskBuilder::>::new( + "Test Completion Task".to_string(), + ) + .register_event_stream(test_event_stream, FilterEvent::default()) + .await + .register_registry(&mut registry) + .await + .register_state(state) + .register_event_handler(event_handler) + .register_message_handler(message_handler) + .register_message_stream(stream_generator); + let task_id = builder.get_task_id().unwrap(); + (task_id, CompletionTaskTypes::build(builder).launch()) + } + .boxed() + }) + } +} diff --git a/testing/src/lib.rs b/testing/src/lib.rs new file mode 100644 index 0000000000..da15e2dfde --- /dev/null +++ b/testing/src/lib.rs @@ -0,0 +1,43 @@ +use hotshot_task::{event_stream::ChannelStream, task_impls::HSTWithEvent}; + +/// Helpers for initializing system context handle and building tasks. +pub mod task_helpers; + +/// builder +pub mod test_builder; + +/// launcher +pub mod test_launcher; + +/// runner +pub mod test_runner; + +/// task that's consuming events and asserting safety +pub mod overall_safety_task; + +/// task that's submitting transactions to the stream +pub mod txn_task; + +/// task that decides when things are complete +pub mod completion_task; + +/// node types +pub mod node_types; + +/// task to spin nodes up and down +pub mod spinning_task; + +// TODO node changer (spin up and down) + +#[derive(Clone, Debug)] +pub enum GlobalTestEvent { + ShutDown, +} + +pub enum ShutDownReason { + SafetyViolation, + SuccessfullyCompleted, +} + +pub type TestTask = + HSTWithEvent, STATE>; diff --git a/testing/src/network_reliability.rs b/testing/src/network_reliability.rs new file mode 100644 index 0000000000..97b22b533b --- /dev/null +++ b/testing/src/network_reliability.rs @@ -0,0 +1,163 @@ +use std::time::Duration; + +use hotshot::traits::NetworkReliability; +use rand::{ + distributions::{Bernoulli, Uniform}, + prelude::Distribution, +}; + +/// A synchronous network. Packets may be delayed, but are guaranteed +/// to arrive within `timeout` ns +#[derive(Clone, Copy, Debug, Default)] +pub struct SynchronousNetwork { + /// Max delay of packet before arrival + timeout_ms: u64, + /// Lowest value in milliseconds that a packet may be delayed + delay_low_ms: u64, +} + +impl NetworkReliability for SynchronousNetwork { + /// never drop a packet + fn sample_keep(&self) -> bool { + true + } + fn sample_delay(&self) -> Duration { + Duration::from_millis( + Uniform::new_inclusive(self.delay_low_ms, self.timeout_ms) + .sample(&mut rand::thread_rng()), + ) + } +} + +/// An asynchronous network. Packets may be dropped entirely +/// or delayed for arbitrarily long periods +/// probability that packet is kept = `keep_numerator` / `keep_denominator` +/// packet delay is obtained by sampling from a uniform distribution +/// between `delay_low_ms` and `delay_high_ms`, inclusive +#[derive(Debug, Clone, Copy)] +pub struct AsynchronousNetwork { + /// numerator for probability of keeping packets + keep_numerator: u32, + /// denominator for probability of keeping packets + keep_denominator: u32, + /// lowest value in milliseconds that a packet may be delayed + delay_low_ms: u64, + /// highest value in milliseconds that a packet may be delayed + delay_high_ms: u64, +} + +impl NetworkReliability for AsynchronousNetwork { + fn sample_keep(&self) -> bool { + Bernoulli::from_ratio(self.keep_numerator, self.keep_denominator) + .unwrap() + .sample(&mut rand::thread_rng()) + } + fn sample_delay(&self) -> Duration { + Duration::from_millis( + Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) + .sample(&mut rand::thread_rng()), + ) + } +} + +/// An partially synchronous network. Behaves asynchronously +/// until some arbitrary time bound, GST, +/// then synchronously after GST +#[derive(Debug, Clone, Copy)] +pub struct PartiallySynchronousNetwork { + /// asynchronous portion of network + asynchronous: AsynchronousNetwork, + /// synchronous portion of network + synchronous: SynchronousNetwork, + /// time when GST occurs + gst: std::time::Duration, + /// when the network was started + start: std::time::Instant, +} + +impl NetworkReliability for PartiallySynchronousNetwork { + /// never drop a packet + fn sample_keep(&self) -> bool { + true + } + fn sample_delay(&self) -> Duration { + // act asyncronous before gst + if self.start.elapsed() < self.gst { + if self.asynchronous.sample_keep() { + self.asynchronous.sample_delay() + } else { + // assume packet was "dropped" and will arrive after gst + self.synchronous.sample_delay() + self.gst + } + } else { + // act syncronous after gst + self.synchronous.sample_delay() + } + } +} + +impl Default for AsynchronousNetwork { + // disable all chance of failure + fn default() -> Self { + AsynchronousNetwork { + keep_numerator: 1, + keep_denominator: 1, + delay_low_ms: 0, + delay_high_ms: 0, + } + } +} + +impl Default for PartiallySynchronousNetwork { + fn default() -> Self { + PartiallySynchronousNetwork { + synchronous: SynchronousNetwork::default(), + asynchronous: AsynchronousNetwork::default(), + gst: std::time::Duration::new(0, 0), + start: std::time::Instant::now(), + } + } +} + +impl SynchronousNetwork { + /// create new `SynchronousNetwork` + pub fn new(timeout: u64, delay_low_ms: u64) -> Self { + SynchronousNetwork { + timeout_ms: timeout, + delay_low_ms, + } + } +} + +impl AsynchronousNetwork { + /// create new `AsynchronousNetwork` + pub fn new( + keep_numerator: u32, + keep_denominator: u32, + delay_low_ms: u64, + delay_high_ms: u64, + ) -> Self { + AsynchronousNetwork { + keep_numerator, + keep_denominator, + delay_low_ms, + delay_high_ms, + } + } +} + +impl PartiallySynchronousNetwork { + /// create new `PartiallySynchronousNetwork` + pub fn new( + asynchronous: AsynchronousNetwork, + synchronous: SynchronousNetwork, + gst: std::time::Duration, + ) -> Self { + PartiallySynchronousNetwork { + asynchronous, + synchronous, + gst, + start: std::time::Instant::now(), + } + } +} diff --git a/testing/src/node_ctx.rs b/testing/src/node_ctx.rs new file mode 100644 index 0000000000..57a50d6a53 --- /dev/null +++ b/testing/src/node_ctx.rs @@ -0,0 +1,56 @@ +use std::{collections::HashMap, sync::Arc}; + +use hotshot::{traits::TestableNodeImplementation, HotShotError}; +use hotshot_types::{data::LeafType, traits::node_implementation::NodeType}; + +/// context for a round +// TODO eventually we want these to just be futures +// that we poll when things are event driven +// this context will be passed around +#[derive(Debug, Clone)] +pub struct NodeCtx> { + /// results from previous rounds + pub round_results: HashMap>, +} + +impl> Default + for NodeCtx +{ + fn default() -> Self { + Self { + round_results: Default::default(), + } + } +} + +/// Status of a view. +#[derive(Debug, Clone)] +pub enum ViewStatus> { + /// The view is in progress. + InProgress(InProgress), + /// The view is failed. + ViewFailed(ViewFailed), + /// The view is a success. + ViewSuccess(ViewSuccess), +} + +/// In-progress status of a view. +#[derive(Debug, Clone)] +pub struct InProgress {} + +/// Failed status of a view. +#[derive(Debug, Clone)] +pub struct ViewFailed(pub Arc>); + +/// Success status of a view. +#[derive(Debug, Clone)] +pub struct ViewSuccess> { + /// state after decide event + pub agreed_state: LEAF::MaybeState, + + /// block after decide event + pub agreed_block: LEAF::DeltasType, + + /// leaf after decide event + pub agreed_leaf: LEAF, +} diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs new file mode 100644 index 0000000000..e188de25af --- /dev/null +++ b/testing/src/node_types.rs @@ -0,0 +1,643 @@ +use hotshot::traits::implementations::CombinedNetworks; +use std::{marker::PhantomData, sync::Arc}; + +use hotshot::{ + demos::sdemo::{SDemoBlock, SDemoState, SDemoTransaction}, + traits::{ + election::static_committee::{StaticCommittee, StaticElectionConfig, StaticVoteToken}, + implementations::{ + Libp2pCommChannel, Libp2pNetwork, MemoryCommChannel, MemoryNetwork, MemoryStorage, + WebCommChannel, WebServerNetwork, WebServerWithFallbackCommChannel, + }, + NodeImplementation, + }, + types::bn254::BN254Pub, +}; +use hotshot_types::{ + certificate::ViewSyncCertificate, + data::{DAProposal, QuorumProposal, SequencingLeaf, ViewNumber}, + message::{Message, SequencingMessage}, + traits::{ + election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}, + network::{TestableChannelImplementation, TestableNetworkingImplementation}, + node_implementation::{ChannelMaps, NodeType, SequencingExchanges, TestableExchange}, + }, + vote::{DAVote, QuorumVote, ViewSyncVote}, +}; +use serde::{Deserialize, Serialize}; + +#[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, +)] +pub struct SequencingTestTypes; +impl NodeType for SequencingTestTypes { + type Time = ViewNumber; + type BlockType = SDemoBlock; + type SignatureKey = BN254Pub; + type VoteTokenType = StaticVoteToken; + type Transaction = SDemoTransaction; + type ElectionConfigType = StaticElectionConfig; + type StateType = SDemoState; +} + +#[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] +pub struct SequencingMemoryImpl; + +#[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] +pub struct SequencingLibp2pImpl; + +#[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] +pub struct SequencingWebImpl; + +#[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] +pub struct StaticFallbackImpl; + +pub type StaticMembership = + StaticCommittee>; + +pub type StaticMemoryDAComm = MemoryCommChannel< + SequencingTestTypes, + SequencingMemoryImpl, + DAProposal, + DAVote, + StaticMembership, +>; + +type StaticLibp2pDAComm = Libp2pCommChannel< + SequencingTestTypes, + SequencingLibp2pImpl, + DAProposal, + DAVote, + StaticMembership, +>; + +type StaticWebDAComm = WebCommChannel< + SequencingTestTypes, + SequencingWebImpl, + DAProposal, + DAVote, + StaticMembership, +>; + +type StaticFallbackComm = + WebServerWithFallbackCommChannel; + +pub type StaticMemoryQuorumComm = MemoryCommChannel< + SequencingTestTypes, + SequencingMemoryImpl, + QuorumProposal>, + QuorumVote>, + StaticMembership, +>; + +type StaticLibp2pQuorumComm = Libp2pCommChannel< + SequencingTestTypes, + SequencingLibp2pImpl, + QuorumProposal>, + QuorumVote>, + StaticMembership, +>; + +type StaticWebQuorumComm = WebCommChannel< + SequencingTestTypes, + SequencingWebImpl, + QuorumProposal>, + QuorumVote>, + StaticMembership, +>; + +pub type StaticMemoryViewSyncComm = MemoryCommChannel< + SequencingTestTypes, + SequencingMemoryImpl, + ViewSyncCertificate, + ViewSyncVote, + StaticMembership, +>; + +type StaticLibp2pViewSyncComm = Libp2pCommChannel< + SequencingTestTypes, + SequencingLibp2pImpl, + ViewSyncCertificate, + ViewSyncVote, + StaticMembership, +>; + +type StaticWebViewSyncComm = WebCommChannel< + SequencingTestTypes, + SequencingWebImpl, + ViewSyncCertificate, + ViewSyncVote, + StaticMembership, +>; + +pub type SequencingLibp2pExchange = SequencingExchanges< + SequencingTestTypes, + Message, + QuorumExchange< + SequencingTestTypes, + >::Leaf, + QuorumProposal>, + StaticMembership, + StaticLibp2pQuorumComm, + Message, + >, + CommitteeExchange< + SequencingTestTypes, + StaticMembership, + StaticLibp2pDAComm, + Message, + >, + ViewSyncExchange< + SequencingTestTypes, + ViewSyncCertificate, + StaticMembership, + StaticLibp2pViewSyncComm, + Message, + >, +>; + +impl NodeImplementation for SequencingLibp2pImpl { + type Storage = MemoryStorage>; + type Leaf = SequencingLeaf; + type Exchanges = SequencingLibp2pExchange; + type ConsensusMessage = SequencingMessage; + + fn new_channel_maps( + start_view: ::Time, + ) -> ( + ChannelMaps, + Option>, + ) { + ( + ChannelMaps::new(start_view), + Some(ChannelMaps::new(start_view)), + ) + } +} + +impl + TestableExchange< + SequencingTestTypes, + >::Leaf, + Message, + > for SequencingLibp2pExchange +{ + fn gen_comm_channels( + expected_node_count: usize, + num_bootstrap: usize, + da_committee_size: usize, + ) -> Box< + dyn Fn( + u64, + ) -> ( + , + >>::Networking, + , + >>::Networking, + , + >>::Networking, + ) + 'static, + > { + let network_generator = Arc::new(, + ::SignatureKey, + > as TestableNetworkingImplementation< + SequencingTestTypes, + Message, + >>::generator( + expected_node_count, + num_bootstrap, + 0, + da_committee_size, + false, + )); + + Box::new(move |id| { + let network = Arc::new(network_generator(id)); + let quorum_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); + let committee_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); + let view_sync_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network); + + (quorum_chan, committee_chan, view_sync_chan) + }) + } +} + +pub type SequencingMemoryExchange = SequencingExchanges< + SequencingTestTypes, + Message, + QuorumExchange< + SequencingTestTypes, + >::Leaf, + QuorumProposal>, + StaticMembership, + StaticMemoryQuorumComm, + Message, + >, + CommitteeExchange< + SequencingTestTypes, + StaticMembership, + StaticMemoryDAComm, + Message, + >, + ViewSyncExchange< + SequencingTestTypes, + ViewSyncCertificate, + StaticMembership, + StaticMemoryViewSyncComm, + Message, + >, +>; + +impl + TestableExchange< + SequencingTestTypes, + >::Leaf, + Message, + > for SequencingMemoryExchange +{ + fn gen_comm_channels( + expected_node_count: usize, + num_bootstrap: usize, + da_committee_size: usize, + ) -> Box< + dyn Fn( + u64, + ) -> ( + , + >>::Networking, + , + >>::Networking, + , + >>::Networking, + ) + 'static, + > { + let network_generator = Arc::new(, + ::SignatureKey, + > as TestableNetworkingImplementation< + SequencingTestTypes, + Message, + >>::generator( + expected_node_count, + num_bootstrap, + 0, + da_committee_size, + false, + )); + let network_da_generator = Arc::new(, + ::SignatureKey, + > as TestableNetworkingImplementation< + SequencingTestTypes, + Message, + >>::generator( + expected_node_count, + num_bootstrap, + 1, + da_committee_size, + true, + )); + Box::new(move |id| { + let network = Arc::new(network_generator(id)); + let network_da = Arc::new(network_da_generator(id)); + let quorum_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); + let committee_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network_da); + let view_sync_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network); + + (quorum_chan, committee_chan, view_sync_chan) + }) + } +} + +impl NodeImplementation for SequencingMemoryImpl { + type Storage = MemoryStorage>; + type Leaf = SequencingLeaf; + type Exchanges = SequencingMemoryExchange; + type ConsensusMessage = SequencingMessage; + + fn new_channel_maps( + start_view: ::Time, + ) -> ( + ChannelMaps, + Option>, + ) { + ( + ChannelMaps::new(start_view), + Some(ChannelMaps::new(start_view)), + ) + } +} + +// man these generics are big oof +// they're a LOT +// when are we getting HKT for rust +// smh my head + +pub type SequencingWebExchanges = SequencingExchanges< + SequencingTestTypes, + Message, + QuorumExchange< + SequencingTestTypes, + >::Leaf, + QuorumProposal>, + StaticMembership, + StaticWebQuorumComm, + Message, + >, + CommitteeExchange< + SequencingTestTypes, + StaticMembership, + StaticWebDAComm, + Message, + >, + ViewSyncExchange< + SequencingTestTypes, + ViewSyncCertificate, + StaticMembership, + StaticWebViewSyncComm, + Message, + >, +>; + +impl + TestableExchange< + SequencingTestTypes, + >::Leaf, + Message, + > for SequencingWebExchanges +{ + fn gen_comm_channels( + expected_node_count: usize, + num_bootstrap: usize, + da_committee_size: usize, + ) -> Box< + dyn Fn( + u64, + ) -> ( + , + >>::Networking, + , + >>::Networking, + , + >>::Networking, + ) + 'static, + > { + let network_generator = Arc::new(, + ::SignatureKey, + _, + > as TestableNetworkingImplementation< + SequencingTestTypes, + Message, + >>::generator( + expected_node_count, + num_bootstrap, + 0, + da_committee_size, + false, + )); + let network_da_generator = Arc::new(, + ::SignatureKey, + SequencingTestTypes, + > as TestableNetworkingImplementation< + SequencingTestTypes, + Message, + >>::generator( + expected_node_count, + num_bootstrap, + 1, + da_committee_size, + true, + )); + Box::new(move |id| { + let network = Arc::new(network_generator(id)); + let network_da = Arc::new(network_da_generator(id)); + let quorum_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); + let committee_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network_da); + let view_sync_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network); + + (quorum_chan, committee_chan, view_sync_chan) + }) + } +} + +impl NodeImplementation for SequencingWebImpl { + type Storage = MemoryStorage>; + type Leaf = SequencingLeaf; + type Exchanges = SequencingWebExchanges; + type ConsensusMessage = SequencingMessage; + + fn new_channel_maps( + start_view: ::Time, + ) -> ( + ChannelMaps, + Option>, + ) { + ( + ChannelMaps::new(start_view), + Some(ChannelMaps::new(start_view)), + ) + } +} + +pub type SequencingFallbackExchange = SequencingExchanges< + SequencingTestTypes, + Message, + QuorumExchange< + SequencingTestTypes, + >::Leaf, + QuorumProposal>, + StaticMembership, + StaticFallbackComm, + Message, + >, + CommitteeExchange< + SequencingTestTypes, + StaticMembership, + StaticFallbackComm, + Message, + >, + ViewSyncExchange< + SequencingTestTypes, + ViewSyncCertificate, + StaticMembership, + StaticFallbackComm, + Message, + >, +>; + +impl + TestableExchange< + SequencingTestTypes, + >::Leaf, + Message, + > for SequencingFallbackExchange +{ + fn gen_comm_channels( + expected_node_count: usize, + num_bootstrap: usize, + da_committee_size: usize, + ) -> Box< + dyn Fn( + u64, + ) -> ( + , + >>::Networking, + , + >>::Networking, + , + >>::Networking, + ) + 'static, + > { + let libp2p_generator = Arc::new(, + ::SignatureKey, + > as TestableNetworkingImplementation< + SequencingTestTypes, + Message, + >>::generator( + expected_node_count, + num_bootstrap, + 0, + da_committee_size, + true, + )); + let ws_generator = Arc::new(, + ::SignatureKey, + _, + > as TestableNetworkingImplementation< + SequencingTestTypes, + Message, + >>::generator( + expected_node_count, + num_bootstrap, + 1, + da_committee_size, + false, + )); + let ws_da_generator = Arc::new(, + ::SignatureKey, + SequencingTestTypes, + > as TestableNetworkingImplementation< + SequencingTestTypes, + Message, + >>::generator( + expected_node_count, + num_bootstrap, + 2, + da_committee_size, + true, + )); + + Box::new(move |id| { + let libp2p_network = libp2p_generator(id); + let ws = ws_generator(id); + let ws_da = ws_da_generator(id); + + // TODO make a proper constructor + let network = Arc::new(CombinedNetworks(ws, libp2p_network.clone(), PhantomData)); + let network_da = Arc::new(CombinedNetworks(ws_da, libp2p_network, PhantomData)); + + let quorum_chan = + <, + >>::Networking as TestableChannelImplementation< + _, + _, + QuorumProposal< + SequencingTestTypes, + >::Leaf, + >, + QuorumVote< + SequencingTestTypes, + >::Leaf, + >, + _, + _, + >>::generate_network()(network.clone()); + let committee_chan = + <, + >>::Networking as TestableChannelImplementation< + _, + _, + DAProposal, + DAVote, + _, + _, + >>::generate_network()(network_da); + let view_sync_chan = + <, + >>::Networking as TestableChannelImplementation< + _, + _, + ViewSyncCertificate, + ViewSyncVote, + _, + _, + >>::generate_network()(network); + (quorum_chan, committee_chan, view_sync_chan) + }) + } +} + +impl NodeImplementation for StaticFallbackImpl { + type Storage = MemoryStorage>; + type Leaf = SequencingLeaf; + type Exchanges = SequencingFallbackExchange; + type ConsensusMessage = SequencingMessage; + + fn new_channel_maps( + start_view: ::Time, + ) -> ( + ChannelMaps, + Option>, + ) { + ( + ChannelMaps::new(start_view), + Some(ChannelMaps::new(start_view)), + ) + } +} diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs new file mode 100644 index 0000000000..4bda48adcc --- /dev/null +++ b/testing/src/overall_safety_task.rs @@ -0,0 +1,611 @@ +use commit::Commitment; +use either::Either; +use hotshot_task::{event_stream::EventStream, Merge}; +use hotshot_task_impls::events::SequencingHotShotEvent; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + sync::Arc, +}; + +use async_compatibility_layer::channel::UnboundedStream; +use futures::FutureExt; +use hotshot::{ + traits::{NodeImplementation, TestableNodeImplementation}, + HotShotError, +}; +use hotshot_task::{ + event_stream::ChannelStream, + task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, + task_impls::{HSTWithEventAndMessage, TaskBuilder}, + MergeN, +}; +use hotshot_types::{ + certificate::QuorumCertificate, + data::{DeltasType, LeafBlock, LeafType}, + error::RoundTimedoutState, + event::{Event, EventType}, + traits::node_implementation::NodeType, +}; +use snafu::Snafu; + +use crate::{test_launcher::TaskGenerator, test_runner::Node}; +pub type StateAndBlock = (Vec, Vec); + +use super::GlobalTestEvent; + +/// the status of a view +#[derive(Debug, Clone)] +pub enum ViewStatus { + /// success + Ok, + /// failure + Failed, + /// safety violation + Err(OverallSafetyTaskErr), + /// in progress + InProgress, +} + +/// possible errors +#[derive(Snafu, Debug, Clone)] +pub enum OverallSafetyTaskErr { + /// inconsistent txn nums + InconsistentTxnsNum { map: HashMap }, + /// too many failed views + TooManyFailures { + /// expected number of failures + expected: usize, + /// actual number of failures + got: usize, + }, + /// not enough decides + NotEnoughDecides { + /// expected number of decides + expected: usize, + /// acutal number of decides + got: usize, + }, + /// mismatched leaves for a view + MismatchedLeaf, + /// mismatched states for a view + InconsistentStates, + /// mismatched blocks for a view + InconsistentBlocks, +} + +/// Data availability task state +pub struct OverallSafetyTask> { + /// handles + pub handles: Vec>, + /// ctx + pub ctx: RoundCtx, + /// event stream for publishing safety violations + pub test_event_stream: ChannelStream, +} + +impl> TS for OverallSafetyTask {} + +/// Result of running a round of consensus +#[derive(Debug)] +pub struct RoundResult> { + /// Transactions that were submitted + // pub txns: Vec, + /// Nodes that committed this round + /// id -> (leaf, qc) + pub success_nodes: HashMap, QuorumCertificate)>, + /// Nodes that failed to commit this round + pub failed_nodes: HashMap>>>, + + /// whether or not the round succeeded (for a custom defn of succeeded) + pub status: ViewStatus, + + /// NOTE: technically a map is not needed + /// left one anyway for ease of viewing + /// leaf -> # entries decided on that leaf + pub leaf_map: HashMap, + + /// block -> # entries decided on that block + pub block_map: HashMap>, usize>, + + /// state -> # entries decided on that state + pub state_map: HashMap<::MaybeState, usize>, + + pub num_txns_map: HashMap, +} + +impl> Default for RoundResult { + fn default() -> Self { + Self { + success_nodes: Default::default(), + failed_nodes: Default::default(), + leaf_map: Default::default(), + block_map: Default::default(), + state_map: Default::default(), + num_txns_map: Default::default(), + status: ViewStatus::InProgress, + } + } +} + +/// smh my head I shouldn't need to implement this +/// Rust doesn't realize I doesn't need to implement default +impl> Default for RoundCtx { + fn default() -> Self { + Self { + round_results: Default::default(), + failed_views: Default::default(), + successful_views: Default::default(), + } + } +} + +/// context for a round +/// TODO eventually we want these to just be futures +/// that we poll when things are event driven +/// this context will be passed around +#[derive(Debug)] +pub struct RoundCtx> { + /// results from previous rounds + /// view number -> round result + pub round_results: + HashMap>::Leaf>>, + /// during the run view refactor + pub failed_views: HashSet, + /// successful views + pub successful_views: HashSet, +} + +impl> RoundCtx { + /// inserts an error into the context + pub fn insert_error_to_context( + &mut self, + view_number: TYPES::Time, + error: Arc>, + ) { + match self.round_results.entry(view_number) { + Entry::Occupied(mut o) => match o.get_mut().failed_nodes.entry(*view_number) { + Entry::Occupied(mut o2) => { + o2.get_mut().push(error); + } + Entry::Vacant(v) => { + v.insert(vec![error]); + } + }, + Entry::Vacant(v) => { + let mut round_result = RoundResult::default(); + round_result.failed_nodes.insert(*view_number, vec![error]); + v.insert(round_result); + } + } + } +} + +impl> RoundResult { + /// insert into round result + pub fn insert_into_result( + &mut self, + idx: usize, + result: (Vec, QuorumCertificate), + maybe_block_size: Option, + ) -> Option { + self.success_nodes.insert(idx as u64, result.clone()); + + let maybe_leaf: Option = result.0.into_iter().last(); + if let Some(leaf) = maybe_leaf.clone() { + match self.leaf_map.entry(leaf.clone()) { + std::collections::hash_map::Entry::Occupied(mut o) => { + *o.get_mut() += 1; + } + std::collections::hash_map::Entry::Vacant(v) => { + v.insert(1); + } + } + + let (state, block) = (leaf.get_state(), leaf.get_deltas()); + + match self.state_map.entry(state.clone()) { + std::collections::hash_map::Entry::Occupied(mut o) => { + *o.get_mut() += 1; + } + std::collections::hash_map::Entry::Vacant(v) => { + v.insert(1); + } + } + match self.block_map.entry(block.clone().block_commitment()) { + std::collections::hash_map::Entry::Occupied(mut o) => { + *o.get_mut() += 1; + } + std::collections::hash_map::Entry::Vacant(v) => { + v.insert(1); + } + } + + if let Some(num_txns) = maybe_block_size { + match self.num_txns_map.entry(num_txns) { + Entry::Occupied(mut o) => { + *o.get_mut() += 1; + } + Entry::Vacant(v) => { + v.insert(1); + } + } + } + } + maybe_leaf + } + + /// determines whether or not the round passes + /// also do a safety check + #[allow(clippy::too_many_arguments)] + pub fn update_status( + &mut self, + threshold: usize, + total_num_nodes: usize, + key: LEAF, + check_leaf: bool, + check_state: bool, + check_block: bool, + transaction_threshold: u64, + ) { + let num_decided = self.success_nodes.len(); + let num_failed = self.failed_nodes.len(); + let remaining_nodes = total_num_nodes - (num_decided + num_failed); + + if check_leaf && self.leaf_map.len() != 1 { + self.status = ViewStatus::Err(OverallSafetyTaskErr::MismatchedLeaf); + return; + } + + if check_state && self.state_map.len() != 1 { + self.status = ViewStatus::Err(OverallSafetyTaskErr::InconsistentStates); + return; + } + + if check_block && self.block_map.len() != 1 { + self.status = ViewStatus::Err(OverallSafetyTaskErr::InconsistentBlocks); + return; + } + + if transaction_threshold >= 1 { + if self.num_txns_map.len() > 1 { + self.status = ViewStatus::Err(OverallSafetyTaskErr::InconsistentTxnsNum { + map: self.num_txns_map.clone(), + }); + return; + } + if *self.num_txns_map.iter().last().unwrap().0 < transaction_threshold { + self.status = ViewStatus::Failed; + return; + } + } + + // check for success + if num_decided >= threshold { + // decide on if we've succeeded. + // if so, set state and return + // if not, return error + // if neither, continue through + + let state_key = key.get_state(); + let block_key = key.get_deltas().block_commitment(); + + if *self.block_map.get(&block_key).unwrap() == threshold + && *self.state_map.get(&state_key).unwrap() == threshold + && *self.leaf_map.get(&key).unwrap() == threshold + { + self.status = ViewStatus::Ok; + return; + } + } + + let is_success_possible = remaining_nodes + num_decided >= threshold; + if !is_success_possible { + self.status = ViewStatus::Failed; + } + } + + /// generate leaves + pub fn gen_leaves(&self) -> HashMap { + let mut leaves = HashMap::::new(); + + for (leaf_vec, _) in self.success_nodes.values() { + let most_recent_leaf = leaf_vec.iter().last(); + if let Some(leaf) = most_recent_leaf { + match leaves.entry(leaf.clone()) { + std::collections::hash_map::Entry::Occupied(mut o) => { + *o.get_mut() += 1; + } + std::collections::hash_map::Entry::Vacant(v) => { + v.insert(1); + } + } + } + } + leaves + } +} + +/// cross node safety properties +#[derive(Clone)] +pub struct OverallSafetyPropertiesDescription { + /// required number of successful views + pub num_successful_views: usize, + /// whether or not to check the leaf + pub check_leaf: bool, + /// whether or not to check the state + pub check_state: bool, + /// whether or not to check the block + pub check_block: bool, + /// whether or not to check that we have threshold amounts of transactions each block + /// if 0: don't check + /// if n > 0, check that at least n transactions are decided upon if such information + /// is available + pub transaction_threshold: u64, + /// num of total rounds allowed to fail + pub num_failed_views: usize, + /// threshold calculator. Given number of live and total nodes, provide number of successes + /// required to mark view as successful + pub threshold_calculator: Arc usize + Send + Sync>, +} + +impl std::fmt::Debug for OverallSafetyPropertiesDescription { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OverallSafetyPropertiesDescription") + .field("num successful views", &self.num_successful_views) + .field("check leaf", &self.check_leaf) + .field("check_state", &self.check_state) + .field("check_block", &self.check_block) + .field("num_failed_rounds_total", &self.num_failed_views) + .finish() + } +} + +impl Default for OverallSafetyPropertiesDescription { + fn default() -> Self { + Self { + num_successful_views: 50, + check_leaf: false, + check_state: true, + check_block: true, + num_failed_views: 10, + transaction_threshold: 0, + // very strict + threshold_calculator: Arc::new(|_num_live, num_total| 2 * num_total / 3 + 1), + } + } +} + +impl OverallSafetyPropertiesDescription { + /// build a task + pub fn build>( + self, + ) -> TaskGenerator> { + let Self { + check_leaf, + check_state, + check_block, + num_failed_views: num_failed_rounds_total, + num_successful_views, + threshold_calculator, + transaction_threshold, + }: Self = self; + + Box::new(move |mut state, mut registry, test_event_stream| { + async move { + let event_handler = HandleEvent::>(Arc::new( + move |event, state| { + async move { + match event { + GlobalTestEvent::ShutDown => { + let num_incomplete_views = state.ctx.round_results.len() + - state.ctx.successful_views.len() + - state.ctx.failed_views.len(); + + if state.ctx.successful_views.len() < num_successful_views { + return ( + Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::NotEnoughDecides { + got: state.ctx.successful_views.len(), + expected: num_successful_views, + }, + ))), + state, + ); + } + + if state.ctx.failed_views.len() + num_incomplete_views + >= num_failed_rounds_total + { + return ( + Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::TooManyFailures { + got: state.ctx.failed_views.len(), + expected: num_failed_rounds_total, + }, + ))), + state, + ); + } + // TODO check if we got enough successful views + (Some(HotShotTaskCompleted::ShutDown), state) + } + } + } + .boxed() + }, + )); + + let message_handler = HandleMessage::>(Arc::new( + move |msg, mut state| { + let threshold_calculator = threshold_calculator.clone(); + async move { + + let (idx, maybe_event ) : (usize, Either<_, _>)= msg; + if let Either::Left(Event { view_number, event }) = maybe_event { + let key = match event { + EventType::Error { error } => { + state.ctx.insert_error_to_context(view_number, error); + None + } + EventType::Decide { + leaf_chain, + qc, + block_size: maybe_block_size, + } => { + let paired_up = (leaf_chain.to_vec(), (*qc).clone()); + match state.ctx.round_results.entry(view_number) { + Entry::Occupied(mut o) => o.get_mut().insert_into_result( + idx, + paired_up, + maybe_block_size, + ), + Entry::Vacant(v) => { + let mut round_result = RoundResult::default(); + let key = round_result.insert_into_result( + idx, + paired_up, + maybe_block_size, + ); + v.insert(round_result); + key + } + } + } + // TODO Emit this event in the consensus task once canceling the timeout task is implemented + EventType::ReplicaViewTimeout { view_number } => { + let error = Arc::new(HotShotError::::ViewTimeoutError { + view_number, + state: RoundTimedoutState::TestCollectRoundEventsTimedOut, + }); + state.ctx.insert_error_to_context(view_number, error); + None + } + _ => return (None, state), + }; + + // update view count + let threshold = + (threshold_calculator)(state.handles.len(), state.handles.len()); + + let view = state.ctx.round_results.get_mut(&view_number).unwrap(); + + if let Some(key) = key { + view.update_status( + threshold, + state.handles.len(), + key, + check_leaf, + check_state, + check_block, + transaction_threshold, + ); + match view.status.clone() { + ViewStatus::Ok => { + state.ctx.successful_views.insert(view_number); + if state.ctx.successful_views.len() + >= self.num_successful_views + { + state + .test_event_stream + .publish(GlobalTestEvent::ShutDown) + .await; + return (Some(HotShotTaskCompleted::ShutDown), state); + } + return (None, state); + } + ViewStatus::Failed => { + state.ctx.failed_views.insert(view_number); + if state.ctx.failed_views.len() >= self.num_failed_views { + state + .test_event_stream + .publish(GlobalTestEvent::ShutDown) + .await; + return ( + Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::TooManyFailures { + got: state.ctx.failed_views.len(), + expected: num_failed_rounds_total, + }, + ))), + state, + ); + } + return (None, state); + } + ViewStatus::Err(e) => { + return ( + Some(HotShotTaskCompleted::Error(Box::new(e))), + state, + ); + } + ViewStatus::InProgress => { + return (None, state); + } + } + } + + } + + (None, state) + } + .boxed() + }, + )); + + let mut streams = vec![]; + for handle in &mut state.handles { + let s1 = + handle + .handle + .get_event_stream_known_impl(FilterEvent::default()) + .await + .0; + let s2 = + handle + .handle + .get_internal_event_stream_known_impl(FilterEvent::default()) + .await + .0; + streams.push( + Merge::new(s1, s2) + ); + } + let builder = TaskBuilder::>::new( + "Test Overall Safety Task".to_string(), + ) + .register_event_stream(test_event_stream, FilterEvent::default()) + .await + .register_registry(&mut registry) + .await + .register_message_handler(message_handler) + .register_message_stream(MergeN::new(streams)) + .register_event_handler(event_handler) + .register_state(state); + let task_id = builder.get_task_id().unwrap(); + (task_id, OverallSafetyTaskTypes::build(builder).launch()) + } + .boxed() + }) + } +} + +/// overall types for safety task +pub type OverallSafetyTaskTypes = HSTWithEventAndMessage< + OverallSafetyTaskErr, + GlobalTestEvent, + ChannelStream, + ( + usize, + Either< + Event>::Leaf>, + SequencingHotShotEvent, + >, + ), + MergeN< + Merge< + UnboundedStream>::Leaf>>, + UnboundedStream>, + >, + >, + OverallSafetyTask, +>; diff --git a/testing/src/per_node_safety_task.rs b/testing/src/per_node_safety_task.rs new file mode 100644 index 0000000000..82acef4042 --- /dev/null +++ b/testing/src/per_node_safety_task.rs @@ -0,0 +1,258 @@ +// // TODO rename this file to per-node +// +// use std::{ops::Deref, sync::Arc}; +// +// use async_compatibility_layer::channel::UnboundedStream; +// use either::Either; +// use futures::{ +// future::{BoxFuture, LocalBoxFuture}, +// FutureExt, +// }; +// use hotshot::traits::TestableNodeImplementation; +// use hotshot_task::{ +// event_stream::ChannelStream, +// global_registry::{GlobalRegistry, HotShotTaskId}, +// task::{ +// FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TaskErr, +// HST, TS, +// }, +// task_impls::{HSTWithEvent, HSTWithEventAndMessage, TaskBuilder}, +// }; +// use hotshot_types::{ +// event::{Event, EventType}, +// traits::node_implementation::NodeType, +// }; +// use nll::nll_todo::nll_todo; +// use snafu::Snafu; +// use tracing::log::warn; +// +// use crate::test_errors::ConsensusTestError; +// +// use super::{ +// completion_task::CompletionTask, +// node_ctx::{NodeCtx, ViewFailed, ViewStatus, ViewSuccess}, +// GlobalTestEvent, +// }; +// +// #[derive(Snafu, Debug)] +// pub enum PerNodeSafetyTaskErr { +// // TODO make this more detailed +// TooManyFailures, +// NotEnoughDecides, +// } +// impl TaskErr for PerNodeSafetyTaskErr {} +// +// /// Data availability task state +// /// +// #[derive(Debug)] +// pub struct PerNodeSafetyTask> { +// pub(crate) ctx: NodeCtx, +// } +// +// impl> Default +// for PerNodeSafetyTask +// { +// fn default() -> Self { +// Self { +// ctx: Default::default(), +// } +// } +// } +// +// impl> TS +// for PerNodeSafetyTask +// { +// } +// +// /// builder describing custom safety properties +// #[derive(Clone)] +// pub enum PerNodeSafetyTaskDescription< +// TYPES: NodeType, +// I: TestableNodeImplementation, +// > { +// GenProperties(PerNodeSafetyPropertiesDescription), +// CustomProperties(PerNodeSafetyFinisher), +// } +// +// /// properties used for gen +// #[derive(Clone, Debug)] +// pub struct PerNodeSafetyPropertiesDescription { +// /// number failed views +// pub num_failed_views: Option, +// /// number decide events +// pub num_decide_events: Option, +// } +// +// // basic consistency check for single node +// /// Exists for easier overriding +// /// runs at end of all tasks +// #[derive(Clone)] +// #[allow(clippy::type_complexity)] +// pub struct PerNodeSafetyFinisher< +// TYPES: NodeType, +// I: TestableNodeImplementation, +// >( +// pub Arc< +// dyn for<'a> Fn(&'a mut NodeCtx) -> BoxFuture<'a, Result<(), PerNodeSafetyTaskErr>> +// + Send +// + 'static +// + Sync, +// >, +// ); +// +// impl> Deref +// for PerNodeSafetyFinisher +// { +// type Target = dyn for<'a> Fn(&'a mut NodeCtx) -> BoxFuture<'a, Result<(), PerNodeSafetyTaskErr>> +// + Send +// + 'static +// + Sync; +// +// fn deref(&self) -> &Self::Target { +// &*self.0 +// } +// } +// +// impl> +// PerNodeSafetyTaskDescription +// { +// fn gen_finisher(self) -> PerNodeSafetyFinisher { +// match self { +// PerNodeSafetyTaskDescription::CustomProperties(finisher) => finisher, +// PerNodeSafetyTaskDescription::GenProperties(PerNodeSafetyPropertiesDescription { +// num_failed_views, +// num_decide_events, +// }) => PerNodeSafetyFinisher(Arc::new(move |ctx: &mut NodeCtx| { +// async move { +// let mut num_failed = 0; +// let mut num_decided = 0; +// for (_view_num, view_status) in &ctx.round_results { +// match view_status { +// ViewStatus::InProgress(_) => {} +// ViewStatus::ViewFailed(_) => { +// num_failed += 1; +// } +// ViewStatus::ViewSuccess(_) => { +// num_decided += 1; +// } +// } +// } +// if let Some(num_failed_views) = num_failed_views { +// if num_failed >= num_failed_views { +// return Err(PerNodeSafetyTaskErr::TooManyFailures); +// } +// } +// +// if let Some(num_decide_events) = num_decide_events { +// if num_decided < num_decide_events { +// return Err(PerNodeSafetyTaskErr::NotEnoughDecides); +// } +// } +// Ok(()) +// } +// .boxed() +// })), +// } +// } +// +// /// build +// pub fn build( +// self, +// // registry: &mut GlobalRegistry, +// // test_event_stream: ChannelStream, +// // hotshot_event_stream: UnboundedStream>, +// ) -> TaskGenerator< +// PerNodeSafetyTask +// > { +// Box::new( +// move |state, mut registry, test_event_stream, hotshot_event_stream| { +// // TODO this is cursed, there's definitely a better way to do this +// let desc = self.clone(); +// async move { +// let test_event_handler = HandleEvent::>(Arc::new( +// move |event, mut state| { +// let finisher = desc.clone().gen_finisher(); +// async move { +// match event { +// GlobalTestEvent::ShutDown => { +// let finished = finisher(&mut state.ctx).await; +// let result = match finished { +// Ok(()) => HotShotTaskCompleted::ShutDown, +// Err(err) => HotShotTaskCompleted::Error(Box::new(err)), +// }; +// return (Some(result), state); +// } +// _ => { +// unimplemented!() +// } +// } +// } +// .boxed() +// }, +// )); +// let message_handler = HandleMessage::>(Arc::new( +// move |msg, mut state| { +// async move { +// let Event { view_number, event } = msg; +// match event { +// EventType::Error { error } => { +// // TODO better warn with node idx +// warn!("View {:?} failed for a replica", view_number); +// state.ctx.round_results.insert( +// view_number, +// ViewStatus::ViewFailed(ViewFailed(error)), +// ); +// } +// EventType::Decide { leaf_chain, qc } => { +// state.ctx.round_results.insert( +// view_number, +// ViewStatus::ViewSuccess(ViewSuccess { +// agreed_state: +// +// }), +// ); +// } +// // these aren't failures +// EventType::ReplicaViewTimeout { view_number } +// | EventType::NextLeaderViewTimeout { view_number } +// | EventType::ViewFinished { view_number } => todo!(), +// _ => todo!(), +// } +// (None, state) +// } +// .boxed() +// }, +// )); +// +// let builder = TaskBuilder::>::new( +// "Safety Check Task".to_string(), +// ) +// .register_event_stream(test_event_stream, FilterEvent::default()) +// .await +// .register_registry(&mut registry) +// .await +// .register_state(state) +// .register_event_handler(test_event_handler) +// .register_message_handler(message_handler) +// .register_message_stream(hotshot_event_stream); +// let task_id = builder.get_task_id().unwrap(); +// (task_id, PerNodeSafetyTaskTypes::build(builder).launch()) +// } +// .boxed() +// }, +// ) +// } +// } +// +// // /// Data Availability task types +// pub type PerNodeSafetyTaskTypes< +// TYPES: NodeType, +// I: TestableNodeImplementation, +// > = HSTWithEventAndMessage< +// PerNodeSafetyTaskErr, +// GlobalTestEvent, +// ChannelStream, +// Event, +// UnboundedStream>, +// PerNodeSafetyTask, +// >; diff --git a/testing/src/soundness_task.rs b/testing/src/soundness_task.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/testing/src/soundness_task.rs @@ -0,0 +1 @@ + diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs new file mode 100644 index 0000000000..ac3224f468 --- /dev/null +++ b/testing/src/spinning_task.rs @@ -0,0 +1,139 @@ +use std::{ + sync::{atomic::AtomicUsize, Arc}, + time::Duration, +}; + +use async_compatibility_layer::art::async_sleep; +use futures::FutureExt; +use hotshot::traits::TestableNodeImplementation; +use hotshot_task::{ + boxed_sync, + event_stream::ChannelStream, + task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, + task_impls::{HSTWithEventAndMessage, TaskBuilder}, + GeneratedStream, +}; +use hotshot_types::traits::node_implementation::NodeType; +use snafu::Snafu; + +use crate::{test_launcher::TaskGenerator, test_runner::Node, GlobalTestEvent}; +#[derive(Snafu, Debug)] +pub struct SpinningTaskErr {} + +/// Completion task types +pub type SpinningTaskTypes = HSTWithEventAndMessage< + SpinningTaskErr, + GlobalTestEvent, + ChannelStream, + (), + GeneratedStream<()>, + SpinningTask, +>; + +pub struct SpinningTask> { + pub(crate) handles: Vec>, + pub(crate) changes: Vec>, +} + +impl> TS for SpinningTask {} + +/// Spin the node up or down +#[derive(Clone, Debug)] +pub enum UpDown { + /// spin the node up + Up, + /// spin the node down + Down, +} + +/// denotes a change in node state +#[derive(Clone, Debug)] +pub struct ChangeNode { + /// the index of the node + pub idx: usize, + /// spin the node up or down + pub updown: UpDown, +} + +#[derive(Clone, Debug)] +pub struct SpinningTaskDescription { + pub node_changes: Vec<(Duration, Vec)>, +} + +impl SpinningTaskDescription { + pub fn build>( + self, + ) -> TaskGenerator> { + Box::new(move |state, mut registry, test_event_stream| { + async move { + let event_handler = + HandleEvent::>(Arc::new(move |event, state| { + async move { + match event { + GlobalTestEvent::ShutDown => { + (Some(HotShotTaskCompleted::ShutDown), state) + } + } + } + .boxed() + })); + let atomic_idx = Arc::new(AtomicUsize::new(0)); + let sleep_durations = Arc::new( + self.node_changes + .clone() + .into_iter() + .map(|(d, _)| d) + .collect::>(), + ); + let stream_generator = GeneratedStream::new(Arc::new(move || { + let atomic_idx = atomic_idx.clone(); + let sleep_durations = sleep_durations.clone(); + let atomic_idx = atomic_idx.fetch_add(1, std::sync::atomic::Ordering::SeqCst); + sleep_durations.get(atomic_idx).copied().map(|duration| { + let fut = async move { + async_sleep(duration).await; + }; + boxed_sync(fut) + }) + })); + let message_handler = HandleMessage::>(Arc::new( + move |_msg, mut state| { + async move { + if let Some(nodes_to_change) = state.changes.pop() { + for ChangeNode { idx, updown } in nodes_to_change { + match updown { + UpDown::Up => { + // TODO... we don't need this right now anyway. We haven't + // implemented catchup + } + UpDown::Down => { + if let Some(node) = state.handles.get_mut(idx) { + node.handle.shut_down().await; + } + } + } + } + } + (None, state) + } + .boxed() + }, + )); + let builder = TaskBuilder::>::new( + "Test Spinning Task".to_string(), + ) + .register_event_stream(test_event_stream, FilterEvent::default()) + .await + .register_registry(&mut registry) + .await + .register_state(state) + .register_event_handler(event_handler) + .register_message_handler(message_handler) + .register_message_stream(stream_generator); + let task_id = builder.get_task_id().unwrap(); + (task_id, SpinningTaskTypes::build(builder).launch()) + } + .boxed() + }) + } +} diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs new file mode 100644 index 0000000000..13868721d4 --- /dev/null +++ b/testing/src/task_helpers.rs @@ -0,0 +1,165 @@ +use crate::{ + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + test_builder::TestMetadata, +}; +use commit::Committable; +use either::Right; +use hotshot::{ + certificate::QuorumCertificate, + traits::{Block, NodeImplementation, TestableNodeImplementation}, + types::{bn254::BN254Pub, SignatureKey, SystemContextHandle}, + HotShotInitializer, HotShotSequencingConsensusApi, SystemContext, +}; +use hotshot_task::event_stream::ChannelStream; +use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot_types::{ + data::{QuorumProposal, SequencingLeaf, ViewNumber}, + message::{Message, Proposal}, + traits::{ + consensus_api::ConsensusSharedApi, + election::{ConsensusExchange, Membership, SignedCertificate}, + metrics::NoMetrics, + node_implementation::{CommitteeEx, ExchangesType, NodeType, QuorumEx}, + signature_key::EncodedSignature, + state::ConsensusTime, + }, +}; + +pub async fn build_system_handle( + node_id: u64, +) -> ( + SystemContextHandle, + ChannelStream>, +) { + let builder = TestMetadata::default_multiple_rounds(); + + let launcher = builder.gen_launcher::(); + + let networks = (launcher.resource_generator.channel_generator)(node_id); + let storage = (launcher.resource_generator.storage)(node_id); + let config = launcher.resource_generator.config.clone(); + + let initializer = HotShotInitializer::< + SequencingTestTypes, + >::Leaf, + >::from_genesis(>::block_genesis()) + .unwrap(); + + let known_nodes = config.known_nodes.clone(); + let known_nodes_with_stake = config.known_nodes_with_stake.clone(); + let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; + let public_key = ::SignatureKey::from_private(&private_key); + let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { + as ConsensusExchange< + SequencingTestTypes, + Message, + >>::Membership::default_election_config(config.total_nodes.get() as u64) + }); + + let committee_election_config = config.election_config.clone().unwrap_or_else(|| { + as ConsensusExchange< + SequencingTestTypes, + Message, + >>::Membership::default_election_config(config.total_nodes.get() as u64) + }); + let exchanges = + >::Exchanges::create( + known_nodes_with_stake.clone(), + known_nodes.clone(), + (quorum_election_config, committee_election_config), + networks, + public_key, + public_key.get_stake_table_entry(1u64), + private_key.clone(), + ); + SystemContext::init( + public_key, + private_key, + node_id, + config, + storage, + exchanges, + initializer, + NoMetrics::boxed(), + ) + .await + .expect("Could not init hotshot") +} + +async fn build_quorum_proposal_and_signature( + handle: &SystemContextHandle, + private_key: &::PrivateKey, + view: u64, +) -> ( + QuorumProposal>, + EncodedSignature, +) { + let consensus_lock = handle.get_consensus(); + let consensus = consensus_lock.read().await; + let api: HotShotSequencingConsensusApi = + HotShotSequencingConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let _quorum_exchange = api.inner.exchanges.quorum_exchange().clone(); + + let parent_view_number = &consensus.high_qc.view_number(); + let Some(parent_view) = consensus.state_map.get(parent_view_number) else { + panic!("Couldn't find high QC parent in state map."); + }; + let Some(leaf) = parent_view.get_leaf_commitment() else { + panic!("Parent of high QC points to a view without a proposal"); + }; + let Some(leaf) = consensus.saved_leaves.get(&leaf) else { + panic!("Failed to find high QC parent."); + }; + let parent_leaf = leaf.clone(); + + // every event input is seen on the event stream in the output. + + let block_commitment = ::BlockType::new().commit(); + let leaf = SequencingLeaf { + view_number: ViewNumber::new(view), + height: parent_leaf.height + 1, + justify_qc: consensus.high_qc.clone(), + parent_commitment: parent_leaf.commit(), + // Use the block commitment rather than the block, so that the replica can construct + // the same leaf with the commitment. + deltas: Right(block_commitment), + rejected: vec![], + timestamp: 0, + proposer_id: api.public_key().to_bytes(), + }; + let signature = ::sign(private_key, leaf.commit().as_ref()); + let proposal = QuorumProposal::> { + block_commitment, + view_number: ViewNumber::new(view), + height: 1, + justify_qc: QuorumCertificate::genesis(), + timeout_certificate: None, + proposer_id: leaf.proposer_id, + dac: None, + }; + + (proposal, signature) +} + +pub async fn build_quorum_proposal( + handle: &SystemContextHandle, + private_key: &::PrivateKey, + view: u64, +) -> Proposal>> { + let (proposal, signature) = + build_quorum_proposal_and_signature(handle, private_key, view).await; + Proposal { + data: proposal, + signature, + } +} + +pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey, BN254Pub) { + let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; + let public_key = ::SignatureKey::from_private(&private_key); + (private_key, public_key) +} diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs new file mode 100644 index 0000000000..92e784de52 --- /dev/null +++ b/testing/src/test_builder.rs @@ -0,0 +1,276 @@ +use hotshot::types::SignatureKey; +use hotshot_types::traits::election::{ConsensusExchange, Membership}; +use std::{num::NonZeroUsize, sync::Arc, time::Duration}; + +use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; +use hotshot_types::message::{Message, SequencingMessage}; + +use hotshot_types::{ + traits::node_implementation::{NodeType, QuorumEx, TestableExchange}, + ExecutionType, HotShotConfig, +}; + +use super::completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}; +use crate::{ + spinning_task::SpinningTaskDescription, + test_launcher::{ResourceGenerators, TestLauncher}, +}; + +use super::{ + overall_safety_task::OverallSafetyPropertiesDescription, txn_task::TxnTaskDescription, +}; + +/// data describing how a round should be timed. +#[derive(Clone, Debug, Copy)] +pub struct TimingData { + /// Base duration for next-view timeout, in milliseconds + pub next_view_timeout: u64, + /// The exponential backoff ration for the next-view timeout + pub timeout_ratio: (u64, u64), + /// The delay a leader inserts before starting pre-commit, in milliseconds + pub round_start_delay: u64, + /// Delay after init before starting consensus, in milliseconds + pub start_delay: u64, + /// The minimum amount of time a leader has to wait to start a round + pub propose_min_round_time: Duration, + /// The maximum amount of time a leader can wait to start a round + pub propose_max_round_time: Duration, +} + +/// metadata describing a test +#[derive(Clone, Debug)] +pub struct TestMetadata { + /// Total number of nodes in the test + pub total_nodes: usize, + /// nodes available at start + pub start_nodes: usize, + /// number of bootstrap nodes (libp2p usage only) + pub num_bootstrap_nodes: usize, + /// Size of the DA committee for the test + pub da_committee_size: usize, + // overall safety property description + pub overall_safety_properties: OverallSafetyPropertiesDescription, + /// spinning properties + pub spinning_properties: SpinningTaskDescription, + // txns timing + pub txn_description: TxnTaskDescription, + // completion task + pub completion_task_description: CompletionTaskDescription, + /// Minimum transactions required for a block + pub min_transactions: usize, + /// timing data + pub timing_data: TimingData, +} + +impl Default for TimingData { + fn default() -> Self { + Self { + next_view_timeout: 10000, + timeout_ratio: (11, 10), + round_start_delay: 1, + start_delay: 1, + propose_min_round_time: Duration::new(0, 0), + propose_max_round_time: Duration::new(5, 0), + } + } +} + +impl TestMetadata { + pub fn default_stress() -> Self { + TestMetadata { + num_bootstrap_nodes: 15, + total_nodes: 100, + start_nodes: 100, + overall_safety_properties: OverallSafetyPropertiesDescription { + num_successful_views: 50, + check_leaf: true, + check_state: true, + check_block: true, + num_failed_views: 15, + transaction_threshold: 0, + threshold_calculator: Arc::new(|_active, total| (2 * total / 3 + 1)), + }, + timing_data: TimingData { + next_view_timeout: 2000, + timeout_ratio: (1, 1), + start_delay: 20000, + round_start_delay: 25, + ..TimingData::default() + }, + ..TestMetadata::default() + } + } + + pub fn default_multiple_rounds() -> TestMetadata { + TestMetadata { + total_nodes: 10, + start_nodes: 10, + overall_safety_properties: OverallSafetyPropertiesDescription { + num_successful_views: 20, + check_leaf: true, + check_state: true, + check_block: true, + num_failed_views: 8, + transaction_threshold: 0, + threshold_calculator: Arc::new(|_active, total| (2 * total / 3 + 1)), + }, + timing_data: TimingData { + start_delay: 120000, + round_start_delay: 25, + ..TimingData::default() + }, + ..TestMetadata::default() + } + } + + /// Default setting with 20 nodes and 10 views of successful views. + pub fn default_more_nodes_less_success() -> TestMetadata { + TestMetadata { + total_nodes: 20, + start_nodes: 20, + num_bootstrap_nodes: 20, + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + da_committee_size: 14, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + // Increase the duration to get the expected number of successful views. + duration: Duration::new(40, 0), + }, + ), + overall_safety_properties: OverallSafetyPropertiesDescription { + num_successful_views: 10, + ..Default::default() + }, + ..TestMetadata::default() + } + } +} + +impl Default for TestMetadata { + /// by default, just a single round + fn default() -> Self { + Self { + timing_data: TimingData::default(), + min_transactions: 0, + total_nodes: 5, + start_nodes: 5, + num_bootstrap_nodes: 5, + da_committee_size: 5, + spinning_properties: SpinningTaskDescription { + node_changes: vec![], + }, + overall_safety_properties: OverallSafetyPropertiesDescription::default(), + // arbitrary, haven't done the math on this + txn_description: TxnTaskDescription::RoundRobinTimeBased(Duration::from_millis(10)), + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + // TODO ED Put a configurable time here - 10 seconds for now + duration: Duration::from_millis(10000), + }, + ), + } + } +} + +impl TestMetadata { + pub fn gen_launcher>( + self, + ) -> TestLauncher + where + I: NodeImplementation>, + >::Exchanges: + TestableExchange>::Leaf, Message>, + { + let TestMetadata { + total_nodes, + num_bootstrap_nodes, + min_transactions, + timing_data, + da_committee_size, + txn_description, + completion_task_description, + overall_safety_properties, + spinning_properties, + .. + } = self.clone(); + + let known_nodes: Vec<::SignatureKey> = (0..total_nodes) + .map(|id| { + let priv_key = + TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], id as u64).1; + TYPES::SignatureKey::from_private(&priv_key) + }) + .collect(); + let known_nodes_with_stake: Vec<::StakeTableEntry> = + (0..total_nodes) + .map(|id| known_nodes[id].get_stake_table_entry(1u64)) + .collect(); + // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); + let config = HotShotConfig { + // TODO this doesn't exist anymore + execution_type: ExecutionType::Incremental, + total_nodes: NonZeroUsize::new(total_nodes).unwrap(), + num_bootstrap: num_bootstrap_nodes, + min_transactions, + max_transactions: NonZeroUsize::new(99999).unwrap(), + known_nodes, + known_nodes_with_stake, + da_committee_size, + next_view_timeout: 500, + timeout_ratio: (11, 10), + round_start_delay: 1, + start_delay: 1, + // TODO do we use these fields?? + propose_min_round_time: Duration::from_millis(0), + propose_max_round_time: Duration::from_millis(1000), + // TODO what's the difference between this and the second config? + election_config: Some( as ConsensusExchange< + TYPES, + Message, + >>::Membership::default_election_config( + total_nodes as u64 + )), + }; + let TimingData { + next_view_timeout, + timeout_ratio, + round_start_delay, + start_delay, + propose_min_round_time, + propose_max_round_time, + } = timing_data; + let mod_config = + // TODO this should really be using the timing config struct + |a: &mut HotShotConfig::StakeTableEntry, TYPES::ElectionConfigType>| { + a.next_view_timeout = next_view_timeout; + a.timeout_ratio = timeout_ratio; + a.round_start_delay = round_start_delay; + a.start_delay = start_delay; + a.propose_min_round_time = propose_min_round_time; + a.propose_max_round_time = propose_max_round_time; + }; + + let txn_task_generator = txn_description.build(); + let completion_task_generator = completion_task_description.build_and_launch(); + let overall_safety_task_generator = overall_safety_properties.build(); + let spinning_task_generator = spinning_properties.build(); + TestLauncher { + resource_generator: ResourceGenerators { + channel_generator: <>::Exchanges as TestableExchange<_, _, _>>::gen_comm_channels(total_nodes, num_bootstrap_nodes, da_committee_size), + storage: Box::new(|_| I::construct_tmp_storage().unwrap()), + config, + }, + metadata: self, + txn_task_generator, + overall_safety_task_generator, + completion_task_generator, + spinning_task_generator, + hooks: vec![], + } + .modify_default_config(mod_config) + } +} diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs new file mode 100644 index 0000000000..8d98b94165 --- /dev/null +++ b/testing/src/test_launcher.rs @@ -0,0 +1,209 @@ +use std::sync::Arc; + +use futures::future::BoxFuture; +use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; +use hotshot_task::{ + event_stream::ChannelStream, + global_registry::{GlobalRegistry, HotShotTaskId}, + task::HotShotTaskCompleted, + task_launcher::TaskRunner, +}; +use hotshot_types::{ + message::Message, + traits::{ + election::ConsensusExchange, + network::CommunicationChannel, + node_implementation::{ + ExchangesType, NodeType, QuorumCommChannel, QuorumEx, QuorumNetwork, + }, + signature_key::SignatureKey, + }, + HotShotConfig, +}; + +use crate::spinning_task::SpinningTask; + +use super::{ + completion_task::CompletionTask, overall_safety_task::OverallSafetyTask, + test_builder::TestMetadata, test_runner::TestRunner, txn_task::TxnTask, GlobalTestEvent, +}; + +pub type Networks = ( + <<>::Exchanges as ExchangesType< + TYPES, + >::Leaf, + Message, + >>::QuorumExchange as ConsensusExchange>>::Networking, + <<>::Exchanges as ExchangesType< + TYPES, + >::Leaf, + Message, + >>::CommitteeExchange as ConsensusExchange>>::Networking, + <<>::Exchanges as ExchangesType< + TYPES, + >::Leaf, + Message, + >>::ViewSyncExchange as ConsensusExchange>>::Networking, +); + +/// Wrapper for a function that takes a `node_id` and returns an instance of `T`. +pub type Generator = Box T + 'static>; + +/// Wrapper Type for quorum function that takes a `ConnectedNetwork` and returns a `CommunicationChannel` +pub type QuorumNetworkGenerator = + Box>) -> T + 'static>; + +/// Wrapper Type for committee function that takes a `ConnectedNetwork` and returns a `CommunicationChannel` +pub type CommitteeNetworkGenerator = Box) -> T + 'static>; + +pub type ViewSyncNetworkGenerator = Box) -> T + 'static>; + +/// Wrapper type for a task generator. +pub type TaskGenerator = Box< + dyn FnOnce( + TASK, + GlobalRegistry, + ChannelStream, + ) + -> BoxFuture<'static, (HotShotTaskId, BoxFuture<'static, HotShotTaskCompleted>)>, +>; + +/// Wrapper type for a hook. +pub type Hook = Box< + dyn FnOnce( + GlobalRegistry, + ChannelStream, + ) + -> BoxFuture<'static, (HotShotTaskId, BoxFuture<'static, HotShotTaskCompleted>)>, +>; + +/// generators for resources used by each node +pub struct ResourceGenerators> +where + QuorumCommChannel: CommunicationChannel< + TYPES, + Message, + as ConsensusExchange>>::Proposal, + as ConsensusExchange>>::Vote, + as ConsensusExchange>>::Membership, + >, +{ + // generate channels + pub channel_generator: Generator>, + /// generate a new storage for each node + pub storage: Generator<>::Storage>, + /// configuration used to generate each hotshot node + pub config: HotShotConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, +} + +/// test launcher +pub struct TestLauncher> { + /// generator for resources + pub resource_generator: ResourceGenerators, + /// metadasta used for tasks + pub metadata: TestMetadata, + /// overrideable txn task generator function + pub txn_task_generator: TaskGenerator>, + /// overrideable timeout task generator function + pub completion_task_generator: TaskGenerator>, + /// overall safety task generator + pub overall_safety_task_generator: TaskGenerator>, + + pub spinning_task_generator: TaskGenerator>, + + pub hooks: Vec, +} + +impl> TestLauncher { + /// launch the test + pub fn launch(self) -> TestRunner { + TestRunner { + launcher: self, + nodes: Vec::new(), + next_node_id: 0, + task_runner: TaskRunner::default(), + } + } + + /// override the safety task generator + pub fn with_overall_safety_task_generator( + self, + overall_safety_task_generator: TaskGenerator>, + ) -> Self { + Self { + overall_safety_task_generator, + ..self + } + } + + /// override the safety task generator + pub fn with_spinning_task_generator( + self, + spinning_task_generator: TaskGenerator>, + ) -> Self { + Self { + spinning_task_generator, + ..self + } + } + + /// overridde the completion task generator + pub fn with_completion_task_generator( + self, + completion_task_generator: TaskGenerator>, + ) -> Self { + Self { + completion_task_generator, + ..self + } + } + + /// override the txn task generator + pub fn with_txn_task_generator( + self, + txn_task_generator: TaskGenerator>, + ) -> Self { + Self { + txn_task_generator, + ..self + } + } + + /// override resource generators + pub fn with_resource_generator(self, resource_generator: ResourceGenerators) -> Self { + Self { + resource_generator, + ..self + } + } + + /// add a hook + pub fn add_hook(mut self, hook: Hook) -> Self { + self.hooks.push(hook); + self + } + + /// overwrite hooks with more hooks + pub fn with_hooks(self, hooks: Vec) -> Self { + Self { hooks, ..self } + } + + /// Modifies the config used when generating nodes with `f` + pub fn modify_default_config( + mut self, + mut f: impl FnMut( + &mut HotShotConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + ), + ) -> Self { + f(&mut self.resource_generator.config); + self + } +} diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs new file mode 100644 index 0000000000..bd7bb60d6f --- /dev/null +++ b/testing/src/test_runner.rs @@ -0,0 +1,259 @@ +use super::{ + completion_task::CompletionTask, + overall_safety_task::{OverallSafetyTask, RoundCtx}, + txn_task::TxnTask, +}; +use crate::test_launcher::{Networks, TestLauncher}; +use hotshot::types::SystemContextHandle; + +use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, HotShotType, SystemContext}; +use hotshot_task::{ + event_stream::ChannelStream, global_registry::GlobalRegistry, task_launcher::TaskRunner, +}; +use hotshot_types::{ + message::Message, + traits::{ + election::{ConsensusExchange, Membership}, + metrics::NoMetrics, + network::CommunicationChannel, + node_implementation::{ExchangesType, NodeType, QuorumCommChannel, QuorumEx}, + signature_key::SignatureKey, + }, + HotShotConfig, +}; +#[allow(deprecated)] +use tracing::info; + +#[derive(Clone)] +pub struct Node> { + pub node_id: u64, + pub handle: SystemContextHandle, +} + +/// The runner of a test network +/// spin up and down nodes, execute rounds +pub struct TestRunner> +where + QuorumCommChannel: CommunicationChannel< + TYPES, + Message, + as ConsensusExchange>>::Proposal, + as ConsensusExchange>>::Vote, + as ConsensusExchange>>::Membership, + >, +{ + pub(crate) launcher: TestLauncher, + pub(crate) nodes: Vec>, + pub(crate) next_node_id: u64, + pub(crate) task_runner: TaskRunner, +} + +impl> TestRunner +where + SystemContext: HotShotType, + QuorumCommChannel: CommunicationChannel< + TYPES, + Message, + as ConsensusExchange>>::Proposal, + as ConsensusExchange>>::Vote, + as ConsensusExchange>>::Membership, + >, +{ + /// excecute test + pub async fn run_test(mut self) + where + I::Exchanges: ExchangesType< + TYPES, + I::Leaf, + Message, + ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), + >, + { + let spinning_changes = self + .launcher + .metadata + .spinning_properties + .node_changes + .clone(); + self.add_nodes(self.launcher.metadata.start_nodes).await; + + let TestRunner { + launcher, + nodes, + next_node_id: _, + mut task_runner, + } = self; + let registry = GlobalRegistry::default(); + let test_event_stream = ChannelStream::new(); + + // add transaction task + let txn_task_state = TxnTask { + handles: nodes.clone(), + next_node_idx: Some(0), + }; + let (id, task) = (launcher.txn_task_generator)( + txn_task_state, + registry.clone(), + test_event_stream.clone(), + ) + .await; + task_runner = + task_runner.add_task(id, "Test Transaction Submission Task".to_string(), task); + + // add completion task + let completion_task_state = CompletionTask { + handles: nodes.clone(), + test_event_stream: test_event_stream.clone(), + }; + let (id, task) = (launcher.completion_task_generator)( + completion_task_state, + registry.clone(), + test_event_stream.clone(), + ) + .await; + task_runner = task_runner.add_task(id, "Test Completion Task".to_string(), task); + + // add spinning task + let spinning_task_state = crate::spinning_task::SpinningTask { + handles: nodes.clone(), + changes: spinning_changes.into_iter().map(|(_, b)| b).collect(), + }; + let (id, task) = (launcher.spinning_task_generator)( + spinning_task_state, + registry.clone(), + test_event_stream.clone(), + ) + .await; + task_runner = task_runner.add_task(id, "Test Spinning Task".to_string(), task); + + // add safety task + let overall_safety_task_state = OverallSafetyTask { + handles: nodes.clone(), + ctx: RoundCtx::default(), + test_event_stream: test_event_stream.clone(), + }; + let (id, task) = (launcher.overall_safety_task_generator)( + overall_safety_task_state, + registry.clone(), + test_event_stream.clone(), + ) + .await; + task_runner = task_runner.add_task(id, "Test Overall Safety Task".to_string(), task); + + // Start hotshot + for node in nodes { + node.handle.hotshot.start_consensus().await; + } + + let results = task_runner.launch().await; + + let mut error_list = vec![]; + for (name, result) in results { + match result { + hotshot_task::task::HotShotTaskCompleted::ShutDown => { + info!("Task {} shut down successfully", name) + } + hotshot_task::task::HotShotTaskCompleted::Error(e) => error_list.push((name, e)), + _ => { + panic!("Future impl for task abstraction failed! This should never happen"); + } + } + } + if !error_list.is_empty() { + panic!("TEST FAILED! Results: {:?}", error_list); + } + } + + /// add nodes + pub async fn add_nodes(&mut self, count: usize) -> Vec + where + I::Exchanges: ExchangesType< + TYPES, + I::Leaf, + Message, + ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), + >, + { + let mut results = vec![]; + for _i in 0..count { + tracing::error!("running node{}", _i); + let node_id = self.next_node_id; + let storage = (self.launcher.resource_generator.storage)(node_id); + let config = self.launcher.resource_generator.config.clone(); + let initializer = + HotShotInitializer::::from_genesis(I::block_genesis()).unwrap(); + let networks = (self.launcher.resource_generator.channel_generator)(node_id); + let node_id = self + .add_node_with_config(networks, storage, initializer, config) + .await; + results.push(node_id); + } + + results + } + + /// add a specific node with a config + pub async fn add_node_with_config( + &mut self, + networks: Networks, + storage: I::Storage, + initializer: HotShotInitializer, + config: HotShotConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + ) -> u64 + where + I::Exchanges: ExchangesType< + TYPES, + I::Leaf, + Message, + ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), + >, + { + let node_id = self.next_node_id; + self.next_node_id += 1; + + let known_nodes = config.known_nodes.clone(); + let known_nodes_with_stake = config.known_nodes_with_stake.clone(); + // Generate key pair for certificate aggregation + let private_key = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; + let public_key = TYPES::SignatureKey::from_private(&private_key); + let entry = public_key.get_stake_table_entry(1u64); + let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { + as ConsensusExchange< + TYPES, + Message, + >>::Membership::default_election_config(config.total_nodes.get() as u64) + }); + let committee_election_config = I::committee_election_config_generator(); + let exchanges = I::Exchanges::create( + known_nodes_with_stake.clone(), + known_nodes.clone(), + ( + quorum_election_config, + committee_election_config(config.da_committee_size as u64), + ), + networks, + public_key.clone(), + entry.clone(), + private_key.clone(), + ); + let handle = SystemContext::init( + public_key, + private_key, + node_id, + config, + storage, + exchanges, + initializer, + NoMetrics::boxed(), + ) + .await + .expect("Could not init hotshot") + .0; + self.nodes.push(Node { handle, node_id }); + node_id + } +} diff --git a/testing/src/timeout_task.rs b/testing/src/timeout_task.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/testing/src/timeout_task.rs @@ -0,0 +1 @@ + diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs new file mode 100644 index 0000000000..4d61b36a20 --- /dev/null +++ b/testing/src/txn_task.rs @@ -0,0 +1,160 @@ +use crate::test_runner::Node; +use async_compatibility_layer::art::async_sleep; +use futures::FutureExt; +use hotshot::traits::TestableNodeImplementation; +use hotshot_task::{ + boxed_sync, + event_stream::ChannelStream, + task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, + task_impls::{HSTWithEventAndMessage, TaskBuilder}, + GeneratedStream, +}; +use hotshot_types::{ + message::SequencingMessage, + traits::node_implementation::{NodeImplementation, NodeType}, +}; +use rand::thread_rng; +use snafu::Snafu; +use std::{sync::Arc, time::Duration}; + +use super::{test_launcher::TaskGenerator, GlobalTestEvent}; + +// the obvious idea here is to pass in a "stream" that completes every `n` seconds +// the stream construction can definitely be fancier but that's the baseline idea + +/// Data Availability task error +#[derive(Snafu, Debug)] +pub struct TxnTaskErr {} + +/// state of task that decides when things are completed +pub struct TxnTask> { + // TODO should this be in a rwlock? Or maybe a similar abstraction to the registry is in order + /// Handles for all nodes. + pub handles: Vec>, + /// Optional index of the next node. + pub next_node_idx: Option, +} + +impl> TS for TxnTask {} + +/// types for task that deices when things are completed +pub type TxnTaskTypes = HSTWithEventAndMessage< + TxnTaskErr, + GlobalTestEvent, + ChannelStream, + (), + GeneratedStream<()>, + TxnTask, +>; + +/// build the transaction task +#[derive(Clone, Debug)] +pub enum TxnTaskDescription { + /// submit transactions in a round robin style using + /// every `Duration` seconds + RoundRobinTimeBased(Duration), + /// TODO + DistributionBased, // others? +} + +impl TxnTaskDescription { + /// build a task + pub fn build>( + self, + ) -> TaskGenerator> + where + TYPES: NodeType, + I: NodeImplementation>, + { + Box::new(move |state, mut registry, test_event_stream| { + async move { + // consistency check + match self { + TxnTaskDescription::RoundRobinTimeBased(_) => { + assert!(state.next_node_idx.is_some()) + } + TxnTaskDescription::DistributionBased => assert!(state.next_node_idx.is_none()), + } + // TODO we'll possibly want multiple criterion including: + // - certain number of txns committed + // - anchor of certain depth + // - some other stuff? probably? + let event_handler = + HandleEvent::>(Arc::new(move |event, state| { + async move { + match event { + GlobalTestEvent::ShutDown => { + (Some(HotShotTaskCompleted::ShutDown), state) + } + } + } + .boxed() + })); + let message_handler = + HandleMessage::>(Arc::new(move |_, mut state| { + async move { + if let Some(idx) = state.next_node_idx { + // submit to idx handle + // increment state + state.next_node_idx = Some((idx + 1) % state.handles.len()); + match state.handles.get(idx) { + None => { + // should do error + unimplemented!() + } + Some(node) => { + // use rand::seq::IteratorRandom; + // we're assuming all nodes have the same leaf. + // If they don't match, this is probably fine since + // it should be caught by an assertion (and the txn will be rejected anyway) + let leaf = node.handle.get_decided_leaf().await; + let txn = I::leaf_create_random_transaction( + &leaf, + &mut thread_rng(), + 0, + ); + node.handle + .submit_transaction(txn.clone()) + .await + .expect("Could not send transaction"); + (None, state) + } + } + } else { + // TODO make an issue + // in the case that this is random + // which I haven't implemented yet + unimplemented!() + } + } + .boxed() + })); + let stream_generator = match self { + TxnTaskDescription::RoundRobinTimeBased(duration) => { + GeneratedStream::new(Arc::new(move || { + let fut = async move { + async_sleep(duration).await; + }; + Some(boxed_sync(fut)) + })) + } + TxnTaskDescription::DistributionBased => unimplemented!(), + }; + let builder = TaskBuilder::>::new( + "Test Transaction Submission Task".to_string(), + ) + .register_event_stream(test_event_stream, FilterEvent::default()) + .await + .register_registry(&mut registry) + .await + .register_state(state) + .register_event_handler(event_handler) + .register_message_handler(message_handler) + .register_message_stream(stream_generator); + let task_id = builder.get_task_id().unwrap(); + (task_id, TxnTaskTypes::build(builder).launch()) + } + .boxed() + }) + } +} diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs new file mode 100644 index 0000000000..3b97a60c9c --- /dev/null +++ b/testing/tests/atomic_storage.rs @@ -0,0 +1,222 @@ +#![cfg(foo)] +use hotshot::{ + certificate::QuorumCertificate, + data::LeafType, + demos::vdemo::{ + random_quorom_certificate, random_transaction, random_validating_leaf, VDemoBlock, + VDemoState, + }, + traits::{Block, State, Storage}, +}; +use hotshot_types::{data::ViewNumber, traits::state::TestableState}; +use rand::thread_rng; + +type AtomicStorage = hotshot::traits::implementations::AtomicStorage; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_happy_path_blocks() { + // This folder will be destroyed when the last handle to it closes + let file = tempfile::tempdir().expect("Could not create temp dir"); + let path = file.path(); + println!("Using store in {:?}", path); + let mut store = AtomicStorage::open(path).expect("Could not open atomic store"); + + let block = VDEntryBlock::default(); + let hash = block.hash(); + store + .update(|mut m| { + let block = block.clone(); + async move { m.insert_block(hash, block).await } + }) + .await + .unwrap(); + + // Make sure the data is still there after re-opening + drop(store); + store = AtomicStorage::open(path).expect("Could not open atomic store"); + assert_eq!( + store.get_block(&hash).await.unwrap(), + Some(DEntryBlock::default()) + ); + + // Add some transactions + let mut rng = thread_rng(); + let state = >::get_starting_state(); + let mut hashes = Vec::new(); + let mut block = block; + for _ in 0..10 { + let new = block + .add_transaction_raw(&random_transaction(&state, &mut rng)) + .expect("Could not add transaction"); + println!("Inserting {:?}: {:?}", new.hash(), new); + store + .update(|mut m| { + let new = new.clone(); + async move { m.insert_block(new.hash(), new.clone()).await } + }) + .await + .unwrap(); + hashes.push(new.hash()); + block = new; + } + + // read them all back 3 times + // 1st time: normal readback + // 2nd: after dropping and re-opening the store + for i in 0..3 { + if i == 1 { + drop(store); + store = AtomicStorage::open(path).expect("Could not open atomic store"); + } + + // read them all back + for (idx, hash) in hashes.iter().enumerate() { + match store.get_block(hash).await.expect("Could not read hash") { + Some(block) => println!("read {:?}", block), + None => panic!("Could not read hash {} {:?}", idx, hash), + } + } + } +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_happy_path_qcs() { + // This folder will be destroyed when the last handle to it closes + let file = tempfile::tempdir().expect("Could not create temp dir"); + let path = file.path(); + println!("Using store in {:?}", path); + let mut store = AtomicStorage::open(path).expect("Could not open atomic store"); + + // Add some certificates + let mut certs = Vec::>::new(); + for i in 0..10 { + let cert = QuorumCertificate { + view_number: ViewNumber::new(i), + ..random_quorom_certificate() + }; + println!("Inserting {:?}", cert); + store + .update(|mut m| { + let cert = cert.clone(); + async move { m.insert_qc(cert).await } + }) + .await + .unwrap(); + certs.push(cert); + } + + // read them all back 3 times + // 1st time: normal readback + // 2nd: after dropping and re-opening the store + for i in 0..2 { + if i == 1 { + drop(store); + store = AtomicStorage::open(path).expect("Could not open atomic store"); + } + + for cert in &certs { + match store + .get_qc_for_view(cert.view_number) + .await + .expect("Could not read view_number") + { + Some(c) => { + println!("read {:?}", c); + assert_eq!(&c, cert); + } + None => panic!("Could not read {:?}: {:?}", cert.view_number, cert), + } + match store + .get_qc(&cert.block_hash) + .await + .expect("Could not read qc by hash") + { + Some(c) => { + println!("read {:?}", c); + assert_eq!(&c, cert); + } + None => panic!( + "Could not read block_hash {:?}: {:?}", + cert.block_hash, cert + ), + } + } + } +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_happy_path_leaves() { + // This folder will be destroyed when the last handle to it closes + let file = tempfile::tempdir().expect("Could not create temp dir"); + let path = file.path(); + println!("Using store in {:?}", path); + let mut store = AtomicStorage::open(path).expect("Could not open atomic store"); + + // Add some leaves + let mut leaves = Vec::>::new(); + for _ in 0..10 { + let leaf = random_validating_leaf(DEntryBlock { + previous_block: StateHash::random(), + ..Default::default() + }); + println!("Inserting {:?}", leaf); + store + .update(|mut m| { + let leaf = leaf.clone(); + async move { m.insert_leaf(leaf).await } + }) + .await + .unwrap(); + leaves.push(leaf); + } + + // read them all back 2 times + // 1st time: normal readback + // 2nd: after dropping and re-opening the store + for i in 0..2 { + if i == 1 { + drop(store); + store = AtomicStorage::open(path).expect("Could not open atomic store"); + } + + for leaf in &leaves { + match store + .get_leaf(&leaf.hash()) + .await + .expect("Could not read leaf hash") + { + Some(l) => { + println!("read {:?}", l); + assert_eq!(&l, leaf); + } + None => { + panic!("Could not read leaf hash {:?}: {:?}", leaf.hash(), leaf) + } + } + let hash = BlockContents::hash(&leaf.deltas); + match store + .get_leaf_by_block(&hash) + .await + .expect("Could not read leaf by block") + { + Some(l) => { + println!("read {:?}", l); + assert_eq!(&l, leaf); + } + None => panic!("Could not read leaf hash {:?}: {:?}", hash, leaf), + } + } + } +} diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs new file mode 100644 index 0000000000..fc4b6017f7 --- /dev/null +++ b/testing/tests/basic.rs @@ -0,0 +1,170 @@ +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_success() { + use hotshot_testing::{ + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + test_builder::TestMetadata, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata::default(); + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + +/// Test one node leaving the network. +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_with_failures_one() { + use std::time::Duration; + + use hotshot_testing::{ + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestMetadata, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata = TestMetadata::default_more_nodes_less_success(); + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ChangeNode { + idx: 19, + updown: UpDown::Down, + }]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::new(4, 0), dead_nodes)], + }; + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + +/// Test f/2 nodes leaving the network. +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_with_failures_half_f() { + use std::time::Duration; + + use hotshot_testing::{ + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestMetadata, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata = TestMetadata::default_more_nodes_less_success(); + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 17, + updown: UpDown::Down, + }, + ChangeNode { + idx: 18, + updown: UpDown::Down, + }, + ChangeNode { + idx: 19, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::new(4, 0), dead_nodes)], + }; + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + +/// Test f nodes leaving the network. +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_with_failures_f() { + use std::time::Duration; + + use hotshot_testing::{ + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestMetadata, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata = TestMetadata::default_more_nodes_less_success(); + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 14, + updown: UpDown::Down, + }, + ChangeNode { + idx: 15, + updown: UpDown::Down, + }, + ChangeNode { + idx: 16, + updown: UpDown::Down, + }, + ChangeNode { + idx: 17, + updown: UpDown::Down, + }, + ChangeNode { + idx: 18, + updown: UpDown::Down, + }, + ChangeNode { + idx: 19, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::new(4, 0), dead_nodes)], + }; + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs new file mode 100644 index 0000000000..ec1bd7b05c --- /dev/null +++ b/testing/tests/consensus_task.rs @@ -0,0 +1,171 @@ +use commit::Committable; +use either::Right; +use hotshot::{ + tasks::add_consensus_task, + types::{SignatureKey, SystemContextHandle}, + HotShotSequencingConsensusApi, +}; +use hotshot_task::event_stream::ChannelStream; +use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot_testing::{ + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + task_helpers::{build_quorum_proposal, key_pair_for_id}, +}; +use hotshot_types::{ + data::{QuorumProposal, SequencingLeaf, ViewNumber}, + message::GeneralConsensusMessage, + traits::{ + election::{ConsensusExchange, QuorumExchangeType, SignedCertificate}, + node_implementation::ExchangesType, + state::ConsensusTime, + }, +}; + +use std::collections::HashMap; + +async fn build_vote( + handle: &SystemContextHandle, + proposal: QuorumProposal>, + view: ViewNumber, +) -> GeneralConsensusMessage { + let consensus_lock = handle.get_consensus(); + let consensus = consensus_lock.read().await; + let api: HotShotSequencingConsensusApi = + HotShotSequencingConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let quorum_exchange = api.inner.exchanges.quorum_exchange().clone(); + let vote_token = quorum_exchange.make_vote_token(view).unwrap().unwrap(); + + let justify_qc = proposal.justify_qc.clone(); + let view = ViewNumber::new(*proposal.view_number); + let parent = if justify_qc.is_genesis() { + let Some(genesis_view) = consensus.state_map.get(&ViewNumber::new(0)) else { + panic!("Couldn't find genesis view in state map."); + }; + let Some(leaf) = genesis_view.get_leaf_commitment() else { + panic!("Genesis view points to a view without a leaf"); + }; + let Some(leaf) = consensus.saved_leaves.get(&leaf) else { + panic!("Failed to find genesis leaf."); + }; + leaf.clone() + } else { + consensus + .saved_leaves + .get(&justify_qc.leaf_commitment()) + .cloned() + .unwrap() + }; + + let parent_commitment = parent.commit(); + + let leaf: SequencingLeaf<_> = SequencingLeaf { + view_number: view, + height: proposal.height, + justify_qc: proposal.justify_qc.clone(), + parent_commitment, + deltas: Right(proposal.block_commitment), + rejected: Vec::new(), + timestamp: 0, + proposer_id: quorum_exchange.get_leader(view).to_bytes(), + }; + + quorum_exchange.create_yes_message( + proposal.justify_qc.commit(), + leaf.commit(), + view, + vote_token, + ) +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_consensus_task() { + use hotshot_task_impls::harness::run_harness; + use hotshot_testing::task_helpers::build_system_handle; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(1).await.0; + let (private_key, public_key) = key_pair_for_id(1); + + let mut input = Vec::new(); + let mut output = HashMap::new(); + + input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); + input.push(SequencingHotShotEvent::Shutdown); + + output.insert( + SequencingHotShotEvent::QuorumProposalSend( + build_quorum_proposal(&handle, &private_key, 1).await, + public_key, + ), + 1, + ); + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 2); + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 2); + output.insert(SequencingHotShotEvent::Shutdown, 1); + + let build_fn = |task_runner, event_stream| { + add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) + }; + + run_harness(input, output, None, build_fn).await; +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_consensus_vote() { + use hotshot_task_impls::harness::run_harness; + use hotshot_testing::task_helpers::build_system_handle; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let (private_key, public_key) = key_pair_for_id(1); + + let mut input = Vec::new(); + let mut output = HashMap::new(); + + let proposal = build_quorum_proposal(&handle, &private_key, 1).await; + + input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(SequencingHotShotEvent::QuorumProposalRecv( + proposal.clone(), + public_key, + )); + + input.push(SequencingHotShotEvent::Shutdown); + + output.insert( + SequencingHotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), + 1, + ); + let proposal = proposal.data; + if let GeneralConsensusMessage::Vote(vote) = + build_vote(&handle, proposal, ViewNumber::new(1)).await + { + output.insert(SequencingHotShotEvent::QuorumVoteSend(vote), 1); + } + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 2); + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); + output.insert(SequencingHotShotEvent::Shutdown, 1); + + let build_fn = |task_runner, event_stream| { + add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) + }; + + run_harness(input, output, None, build_fn).await; +} diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs new file mode 100644 index 0000000000..0322909203 --- /dev/null +++ b/testing/tests/da_task.rs @@ -0,0 +1,92 @@ +use commit::Committable; +use hotshot::HotShotSequencingConsensusApi; +use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot_testing::node_types::{SequencingMemoryImpl, SequencingTestTypes}; +use hotshot_types::{ + data::{DAProposal, ViewNumber}, + traits::{ + consensus_api::ConsensusSharedApi, election::ConsensusExchange, + node_implementation::ExchangesType, state::ConsensusTime, + }, +}; +use std::collections::HashMap; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_da_task() { + use hotshot::{ + demos::sdemo::{SDemoBlock, SDemoNormalBlock}, + tasks::add_da_task, + }; + use hotshot_task_impls::harness::run_harness; + use hotshot_testing::task_helpers::build_system_handle; + use hotshot_types::{ + message::{CommitteeConsensusMessage, Proposal}, + traits::election::CommitteeExchangeType, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + // Build the API for node 2. + let handle = build_system_handle(2).await.0; + let api: HotShotSequencingConsensusApi = + HotShotSequencingConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let committee_exchange = api.inner.exchanges.committee_exchange().clone(); + let pub_key = *api.public_key(); + let block = SDemoBlock::Normal(SDemoNormalBlock { + previous_state: (), + transactions: Vec::new(), + }); + let block_commitment = block.commit(); + let signature = committee_exchange.sign_da_proposal(&block_commitment); + let proposal = DAProposal { + deltas: block.clone(), + view_number: ViewNumber::new(2), + }; + let message = Proposal { + data: proposal, + signature, + }; + + // Every event input is seen on the event stream in the output. + let mut input = Vec::new(); + let mut output = HashMap::new(); + + // In view 1, node 2 is the next leader. + input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); + input.push(SequencingHotShotEvent::DAProposalRecv( + message.clone(), + pub_key, + )); + input.push(SequencingHotShotEvent::Shutdown); + + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); + output.insert(SequencingHotShotEvent::SendDABlockData(block), 1); + output.insert( + SequencingHotShotEvent::DAProposalSend(message.clone(), pub_key), + 1, + ); + if let Ok(Some(vote_token)) = committee_exchange.make_vote_token(ViewNumber::new(2)) { + let da_message = + committee_exchange.create_da_message(block_commitment, ViewNumber::new(2), vote_token); + if let CommitteeConsensusMessage::DAVote(vote) = da_message { + output.insert(SequencingHotShotEvent::DAVoteSend(vote), 1); + } + } + output.insert(SequencingHotShotEvent::DAProposalRecv(message, pub_key), 1); + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); + output.insert(SequencingHotShotEvent::Shutdown, 1); + + let build_fn = |task_runner, event_stream| { + add_da_task(task_runner, event_stream, committee_exchange, handle) + }; + + run_harness(input, output, None, build_fn).await; +} diff --git a/testing/tests/fallback_network.rs b/testing/tests/fallback_network.rs new file mode 100644 index 0000000000..7a66e653c6 --- /dev/null +++ b/testing/tests/fallback_network.rs @@ -0,0 +1,58 @@ +use std::time::Duration; + +use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingLibp2pImpl, SequencingTestTypes}, + overall_safety_task::OverallSafetyPropertiesDescription, + test_builder::TestMetadata, +}; +use tracing::instrument; + +/// web server with libp2p network test +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn webserver_libp2p_network() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + overall_safety_properties: OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::new(240, 0), + }, + ), + ..TestMetadata::default_multiple_rounds() + }; + + metadata + .gen_launcher::() + .launch() + .run_test() + .await +} + +// stress test for web server with libp2p +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +#[ignore] +async fn test_stress_webserver_libp2p_network() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata::default_stress(); + metadata + .gen_launcher::() + .launch() + .run_test() + .await +} diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs new file mode 100644 index 0000000000..26bdf11200 --- /dev/null +++ b/testing/tests/libp2p.rs @@ -0,0 +1,58 @@ +use std::time::Duration; + +use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingLibp2pImpl, SequencingTestTypes}, + overall_safety_task::OverallSafetyPropertiesDescription, + test_builder::TestMetadata, +}; +use tracing::instrument; + +/// libp2p network test +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn libp2p_network() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + overall_safety_properties: OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::new(240, 0), + }, + ), + ..TestMetadata::default_multiple_rounds() + }; + + metadata + .gen_launcher::() + .launch() + .run_test() + .await +} + +/// stress test for libp2p +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +#[ignore] +async fn test_stress_libp2p_network() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata::default_stress(); + metadata + .gen_launcher::() + .launch() + .run_test() + .await +} diff --git a/testing/tests/lossy.rs b/testing/tests/lossy.rs new file mode 100644 index 0000000000..c829b07d01 --- /dev/null +++ b/testing/tests/lossy.rs @@ -0,0 +1,118 @@ +// TODO these should be config options for lossy network +// #![allow(clippy::type_complexity)] +// use hotshot_testing::{ +// network_reliability::{AsynchronousNetwork, PartiallySynchronousNetwork, SynchronousNetwork}, +// test_builder::{TestBuilder, TestMetadata}, +// test_types::{StaticCommitteeTestTypes, StaticNodeImplType}, +// }; +// use std::sync::Arc; +// use tracing::instrument; +// +// // tests base level of working synchronous network +// #[cfg_attr( +// feature = "tokio-executor", +// tokio::test(flavor = "multi_thread", worker_threads = 2) +// )] +// #[cfg_attr(feature = "async-std-executor", async_std::test)] +// #[instrument] +// async fn test_no_loss_network() { +// let builder = TestBuilder { +// metadata: TestMetadata { +// total_nodes: 10, +// start_nodes: 10, +// network_reliability: Some(Arc::new(SynchronousNetwork::default())), +// ..TestMetadata::default() +// }, +// ..Default::default() +// }; +// builder +// .build::() +// .launch() +// .run_test() +// .await +// .unwrap(); +// } +// +// // // tests network with forced packet delay +// #[cfg_attr( +// feature = "tokio-executor", +// tokio::test(flavor = "multi_thread", worker_threads = 2) +// )] +// #[cfg_attr(feature = "async-std-executor", async_std::test)] +// #[instrument] +// async fn test_synchronous_network() { +// let builder = TestBuilder { +// metadata: TestMetadata { +// total_nodes: 5, +// start_nodes: 5, +// num_succeeds: 2, +// ..TestMetadata::default() +// }, +// ..Default::default() +// }; +// builder +// .build::() +// .launch() +// .run_test() +// .await +// .unwrap(); +// } +// +// // tests network with small packet delay and dropped packets +// #[cfg_attr( +// feature = "tokio-executor", +// tokio::test(flavor = "multi_thread", worker_threads = 2) +// )] +// #[cfg_attr(feature = "async-std-executor", async_std::test)] +// #[instrument] +// #[ignore] +// async fn test_asynchronous_network() { +// let builder = TestBuilder { +// metadata: TestMetadata { +// total_nodes: 5, +// start_nodes: 5, +// num_succeeds: 2, +// failure_threshold: 5, +// network_reliability: Some(Arc::new(AsynchronousNetwork::new(97, 100, 0, 5))), +// ..TestMetadata::default() +// }, +// ..Default::default() +// }; +// builder +// .build::() +// .launch() +// .run_test() +// .await +// .unwrap(); +// } +// +// /// tests network with asynchronous patch that eventually becomes synchronous +// #[cfg_attr( +// feature = "tokio-executor", +// tokio::test(flavor = "multi_thread", worker_threads = 2) +// )] +// #[cfg_attr(feature = "async-std-executor", async_std::test)] +// #[instrument] +// #[ignore] +// async fn test_partially_synchronous_network() { +// let asn = AsynchronousNetwork::new(90, 100, 0, 0); +// let sn = SynchronousNetwork::new(10, 0); +// let gst = std::time::Duration::new(10, 0); +// +// let builder = TestBuilder { +// metadata: TestMetadata { +// total_nodes: 5, +// start_nodes: 5, +// num_succeeds: 2, +// network_reliability: Some(Arc::new(PartiallySynchronousNetwork::new(asn, sn, gst))), +// ..TestMetadata::default() +// }, +// ..Default::default() +// }; +// builder +// .build::() +// .launch() +// .run_test() +// .await +// .unwrap(); +// } diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs new file mode 100644 index 0000000000..f3d8d9f8bb --- /dev/null +++ b/testing/tests/network_task.rs @@ -0,0 +1,100 @@ +use commit::Committable; +use hotshot::HotShotSequencingConsensusApi; +use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot_testing::{ + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + task_helpers::build_quorum_proposal, +}; +use hotshot_types::{ + data::{DAProposal, ViewNumber}, + traits::{ + consensus_api::ConsensusSharedApi, node_implementation::ExchangesType, state::ConsensusTime, + }, +}; +use std::collections::HashMap; + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_network_task() { + use hotshot::demos::sdemo::{SDemoBlock, SDemoNormalBlock}; + use hotshot_task_impls::harness::run_harness; + use hotshot_testing::task_helpers::build_system_handle; + use hotshot_types::{message::Proposal, traits::election::CommitteeExchangeType}; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + // Build the API for node 2. + let (handle, event_stream) = build_system_handle(2).await; + let api: HotShotSequencingConsensusApi = + HotShotSequencingConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let committee_exchange = api.inner.exchanges.committee_exchange().clone(); + let pub_key = *api.public_key(); + let priv_key = api.private_key(); + let block = SDemoBlock::Normal(SDemoNormalBlock { + previous_state: (), + transactions: Vec::new(), + }); + let block_commitment = block.commit(); + let signature = committee_exchange.sign_da_proposal(&block_commitment); + let da_proposal = Proposal { + data: DAProposal { + deltas: block.clone(), + view_number: ViewNumber::new(2), + }, + signature, + }; + let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; + + // Every event input is seen on the event stream in the output. + let mut input = Vec::new(); + let mut output = HashMap::new(); + + input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(SequencingHotShotEvent::DAProposalSend( + da_proposal.clone(), + pub_key, + )); + input.push(SequencingHotShotEvent::QuorumProposalSend( + quorum_proposal.clone(), + pub_key, + )); + input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); + input.push(SequencingHotShotEvent::Shutdown); + + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 2); + // One output from the input, the other from the DA task. + output.insert( + SequencingHotShotEvent::DAProposalSend(da_proposal.clone(), pub_key), + 2, + ); + // Only one output from the input. + // The consensus task will fail to send a second proposal, like the DA task does, due to the + // view number check in `publish_proposal_if_able` in consensus.rs, and we will see an error in + // logging, but that is fine for testing as long as the network task is correctly handling + // events. + output.insert( + SequencingHotShotEvent::QuorumProposalSend(quorum_proposal.clone(), pub_key), + 1, + ); + output.insert(SequencingHotShotEvent::SendDABlockData(block), 1); + output.insert( + SequencingHotShotEvent::DAProposalRecv(da_proposal, pub_key), + 1, + ); + output.insert( + SequencingHotShotEvent::QuorumProposalRecv(quorum_proposal, pub_key), + 1, + ); + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 2); + output.insert(SequencingHotShotEvent::Shutdown, 1); + + let build_fn = |task_runner, _| async { task_runner }; + run_harness(input, output, Some(event_stream), build_fn).await; +} diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs new file mode 100644 index 0000000000..7ebeddd577 --- /dev/null +++ b/testing/tests/timeout.rs @@ -0,0 +1,49 @@ +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] +async fn test_timeout() { + use std::time::Duration; + + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestMetadata, TimingData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let timing_data = TimingData { + next_view_timeout: 1000, + ..Default::default() + }; + let mut metadata = TestMetadata::default(); + let dead_nodes = vec![ChangeNode { + idx: 0, + updown: UpDown::Down, + }]; + + metadata.timing_data = timing_data; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::new(1, 0), dead_nodes)], + }; + + // TODO ED Add safety task, etc to confirm TCs are being formed + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(10000), + }, + ); + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs new file mode 100644 index 0000000000..75d0e4f89c --- /dev/null +++ b/testing/tests/web_server.rs @@ -0,0 +1,39 @@ +use async_compatibility_layer::logging::shutdown_logging; +use hotshot_testing::{ + node_types::{SequencingTestTypes, SequencingWebImpl}, + overall_safety_task::OverallSafetyPropertiesDescription, + test_builder::{TestMetadata, TimingData}, +}; +use tracing::instrument; + +/// Web server network test +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn web_server_network() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + timing_data: TimingData { + round_start_delay: 25, + next_view_timeout: 10000, + start_delay: 120000, + + ..Default::default() + }, + overall_safety_properties: OverallSafetyPropertiesDescription { + num_successful_views: 35, + ..Default::default() + }, + ..TestMetadata::default() + }; + metadata + .gen_launcher::() + .launch() + .run_test() + .await; + shutdown_logging(); +} diff --git a/types/Cargo.toml b/types/Cargo.toml new file mode 100644 index 0000000000..7be4cc5a36 --- /dev/null +++ b/types/Cargo.toml @@ -0,0 +1,59 @@ +[package] +authors = ["Espresso Systems "] +description = "Types and traits for the HotShot consesus module" +edition = "2021" +name = "hotshot-types" +readme = "../README.md" +version = "0.1.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +demo = ["ed25519-compact"] + +[dependencies] +arbitrary = { version = "1.3", features = ["derive"] } +async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } +async-trait = { workspace = true } +ark-serialize = { version = "0.3", features = [ + "derive", +] } # TODO GG upgrade to 0.4 and inherit this dep from workspace +ark-std = { workspace = true } +bincode = { workspace = true } +bitvec = { workspace = true } +blake3 = { workspace = true } +commit = { workspace = true } +custom_debug = { workspace = true } +derivative = "2.2.0" +digest = { workspace = true } +displaydoc = { version = "0.2.3", default-features = false } +ed25519-compact = { version = "2.0.4", optional = true } +either = { workspace = true, features = ["serde"] } +espresso-systems-common = { workspace = true } +futures = { workspace = true } +generic-array = "0.14.7" +hex_fmt = "0.3.0" +hotshot-utils = { path = "../utils" } +hotshot-task = { path = "../task", default-features = false } +jf-primitives = { workspace = true } +nll = { workspace = true } +libp2p-networking = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +serde = { workspace = true } +snafu = { workspace = true } +tagged-base64 = { git = "https://github.com/EspressoSystems/tagged-base64", tag = "0.2.4" } +time = { workspace = true } +tracing = { workspace = true } +ethereum-types = { workspace = true } +bit-vec = "0.6.3" +typenum = { workspace = true } + +[dev-dependencies] +serde_json = "1.0.105" + +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } diff --git a/types/src/certificate.rs b/types/src/certificate.rs new file mode 100644 index 0000000000..47720eba75 --- /dev/null +++ b/types/src/certificate.rs @@ -0,0 +1,382 @@ +//! Provides two types of cerrtificates and their accumulators. + +use crate::{ + data::{fake_commitment, serialize_signature, LeafType}, + traits::{ + election::{SignedCertificate, VoteData, VoteToken}, + node_implementation::NodeType, + signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, + state::ConsensusTime, + }, + vote::ViewSyncData, +}; +use bincode::Options; +use commit::{Commitment, Committable}; +use espresso_systems_common::hotshot::tag; +use hotshot_utils::bincode::bincode_opts; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{self, Debug, Display, Formatter}, + ops::Deref, +}; +use tracing::debug; + +/// A `DACertificate` is a threshold signature that some data is available. +/// It is signed by the members of the DA committee, not the entire network. It is used +/// to prove that the data will be made available to those outside of the DA committee. +#[derive(Clone, PartialEq, custom_debug::Debug, serde::Serialize, serde::Deserialize, Hash)] +#[serde(bound(deserialize = ""))] +pub struct DACertificate { + /// The view number this quorum certificate was generated during + /// + /// This value is covered by the threshold signature. + pub view_number: TYPES::Time, + + /// committment to the block + pub block_commitment: Commitment, + + /// Assembled signature for certificate aggregation + pub signatures: AssembledSignature, +} + +/// The type used for Quorum Certificates +/// +/// A Quorum Certificate is a threshold signature of the `Leaf` being proposed, as well as some +/// metadata, such as the `Stage` of consensus the quorum certificate was generated during. +#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct QuorumCertificate> { + /// commitment to previous leaf + #[debug(skip)] + pub leaf_commitment: Commitment, + /// Which view this QC relates to + pub view_number: TYPES::Time, + /// assembled signature for certificate aggregation + pub signatures: AssembledSignature, + /// If this QC is for the genesis block + pub is_genesis: bool, +} + +impl> Display for QuorumCertificate { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "view: {:?}, is_genesis: {:?}", + self.view_number, self.is_genesis + ) + } +} + +/// Timeout Certificate +#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct TimeoutCertificate { + /// View that timed out + pub view_number: TYPES::Time, + /// assembled signature for certificate aggregation + pub signatures: AssembledSignature, +} + +/// Certificate for view sync. +#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash)] +#[serde(bound(deserialize = ""))] +pub enum ViewSyncCertificate { + /// Pre-commit phase. + PreCommit(ViewSyncCertificateInternal), + /// Commit phase. + Commit(ViewSyncCertificateInternal), + /// Finalize phase. + Finalize(ViewSyncCertificateInternal), +} + +impl ViewSyncCertificate { + /// Serialize the certificate into bytes. + /// # Panics + /// If the serialization fails. + pub fn as_bytes(&self) -> Vec { + bincode_opts().serialize(&self).unwrap() + } +} + +/// A view sync certificate representing a quorum of votes for a particular view sync phase +#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct ViewSyncCertificateInternal { + /// Relay the votes are intended for + pub relay: u64, + /// View number the network is attempting to synchronize on + pub round: TYPES::Time, + /// Aggregated QC + pub signatures: AssembledSignature, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +/// Enum representing whether a signatures is for a 'Yes' or 'No' or 'DA' or 'Genesis' certificate +pub enum AssembledSignature { + // (enum, signature) + /// These signatures are for a 'Yes' certificate + Yes(::QCType), + /// These signatures are for a 'No' certificate + No(::QCType), + /// These signatures are for a 'DA' certificate + DA(::QCType), + /// These signatures are for genesis certificate + Genesis(), + /// These signatures are for ViewSyncPreCommit + ViewSyncPreCommit(::QCType), + /// These signatures are for ViewSyncCommit + ViewSyncCommit(::QCType), + /// These signatures are for ViewSyncFinalize + ViewSyncFinalize(::QCType), +} + +/// Data from a vote needed to accumulate into a `SignedCertificate` +pub struct VoteMetaData { + /// Voter's public key + pub encoded_key: EncodedPublicKey, + /// Votes signature + pub encoded_signature: EncodedSignature, + /// Commitment to what's voted on. E.g. the leaf for a `QuorumCertificate` + pub commitment: Commitment, + /// Data of the vote, yes, no, timeout, or DA + pub data: VoteData, + /// The votes's token + pub vote_token: T, + /// View number for the vote + pub view_number: TIME, + /// The relay index for view sync + // TODO ED Make VoteMetaData more generic to avoid this variable that only ViewSync uses + pub relay: Option, +} + +impl> + SignedCertificate + for QuorumCertificate +{ + fn from_signatures_and_commitment( + view_number: TYPES::Time, + signatures: AssembledSignature, + commit: Commitment, + _relay: Option, + ) -> Self { + let qc = QuorumCertificate { + leaf_commitment: commit, + view_number, + signatures, + is_genesis: false, + }; + debug!("QC commitment when formed is {:?}", qc.leaf_commitment); + qc + } + + fn view_number(&self) -> TYPES::Time { + self.view_number + } + + fn signatures(&self) -> AssembledSignature { + self.signatures.clone() + } + + fn leaf_commitment(&self) -> Commitment { + self.leaf_commitment + } + + fn set_leaf_commitment(&mut self, commitment: Commitment) { + self.leaf_commitment = commitment; + } + + fn is_genesis(&self) -> bool { + self.is_genesis + } + + fn genesis() -> Self { + Self { + leaf_commitment: fake_commitment::(), + view_number: ::genesis(), + signatures: AssembledSignature::Genesis(), + is_genesis: true, + } + } +} + +impl> Eq for QuorumCertificate {} + +impl> Committable + for QuorumCertificate +{ + fn commit(&self) -> Commitment { + let signatures_bytes = serialize_signature(&self.signatures); + + commit::RawCommitmentBuilder::new("Quorum Certificate Commitment") + .field("leaf commitment", self.leaf_commitment) + .u64_field("view number", *self.view_number.deref()) + .constant_str("justify_qc signatures") + .var_size_bytes(&signatures_bytes) + .finalize() + } + + fn tag() -> String { + tag::QC.to_string() + } +} + +impl SignedCertificate + for DACertificate +{ + fn from_signatures_and_commitment( + view_number: TYPES::Time, + signatures: AssembledSignature, + commit: Commitment, + _relay: Option, + ) -> Self { + DACertificate { + view_number, + signatures, + block_commitment: commit, + } + } + + fn view_number(&self) -> TYPES::Time { + self.view_number + } + + fn signatures(&self) -> AssembledSignature { + self.signatures.clone() + } + + fn leaf_commitment(&self) -> Commitment { + self.block_commitment + } + + fn set_leaf_commitment(&mut self, _commitment: Commitment) { + // This function is only useful for QC. Will be removed after we have separated cert traits. + } + + fn is_genesis(&self) -> bool { + // This function is only useful for QC. Will be removed after we have separated cert traits. + false + } + + fn genesis() -> Self { + // This function is only useful for QC. Will be removed after we have separated cert traits. + unimplemented!() + } +} + +impl Eq for DACertificate {} + +impl Committable for ViewSyncCertificate { + fn commit(&self) -> Commitment { + let signatures_bytes = serialize_signature(&self.signatures()); + + let mut builder = commit::RawCommitmentBuilder::new("View Sync Certificate Commitment") + // .field("leaf commitment", self.leaf_commitment) + // .u64_field("view number", *self.view_number.deref()) + .constant_str("justify_qc signatures") + .var_size_bytes(&signatures_bytes); + + // builder = builder + // .field("Leaf commitment", self.leaf_commitment) + // .u64_field("View number", *self.view_number.deref()); + + let certificate_internal = match &self { + // TODO ED Not the best way to do this + ViewSyncCertificate::PreCommit(certificate_internal) => { + builder = builder.var_size_field("View Sync Phase", "PreCommit".as_bytes()); + certificate_internal + } + ViewSyncCertificate::Commit(certificate_internal) => { + builder = builder.var_size_field("View Sync Phase", "Commit".as_bytes()); + certificate_internal + } + ViewSyncCertificate::Finalize(certificate_internal) => { + builder = builder.var_size_field("View Sync Phase", "Finalize".as_bytes()); + certificate_internal + } + }; + + builder = builder + .u64_field("Relay", certificate_internal.relay) + .u64_field("Round", *certificate_internal.round); + builder.finalize() + } + + fn tag() -> String { + // TODO ED Update this repo with a view sync tag + tag::QC.to_string() + } +} + +impl + SignedCertificate> + for ViewSyncCertificate +{ + /// Build a QC from the threshold signature and commitment + fn from_signatures_and_commitment( + view_number: TYPES::Time, + signatures: AssembledSignature, + _commit: Commitment>, + relay: Option, + ) -> Self { + let certificate_internal = ViewSyncCertificateInternal { + round: view_number, + relay: relay.unwrap(), + signatures: signatures.clone(), + }; + match signatures { + AssembledSignature::ViewSyncPreCommit(_) => { + ViewSyncCertificate::PreCommit(certificate_internal) + } + AssembledSignature::ViewSyncCommit(_) => { + ViewSyncCertificate::Commit(certificate_internal) + } + AssembledSignature::ViewSyncFinalize(_) => { + ViewSyncCertificate::Finalize(certificate_internal) + } + _ => unimplemented!(), + } + } + + /// Get the view number. + fn view_number(&self) -> TYPES::Time { + match self.clone() { + ViewSyncCertificate::PreCommit(certificate_internal) + | ViewSyncCertificate::Commit(certificate_internal) + | ViewSyncCertificate::Finalize(certificate_internal) => certificate_internal.round, + } + } + + /// Get signatures. + fn signatures(&self) -> AssembledSignature { + match self.clone() { + ViewSyncCertificate::PreCommit(certificate_internal) + | ViewSyncCertificate::Commit(certificate_internal) + | ViewSyncCertificate::Finalize(certificate_internal) => { + certificate_internal.signatures + } + } + } + + // TODO (da) the following functions should be refactored into a QC-specific trait. + /// Get the leaf commitment. + fn leaf_commitment(&self) -> Commitment> { + todo!() + } + + /// Set the leaf commitment. + fn set_leaf_commitment(&mut self, _commitment: Commitment>) { + todo!() + } + + /// Get whether the certificate is for the genesis block. + fn is_genesis(&self) -> bool { + todo!() + } + + /// To be used only for generating the genesis quorum certificate; will fail if used anywhere else + fn genesis() -> Self { + todo!() + } +} +impl Eq for ViewSyncCertificate {} diff --git a/types/src/consensus.rs b/types/src/consensus.rs new file mode 100644 index 0000000000..425f0b1b25 --- /dev/null +++ b/types/src/consensus.rs @@ -0,0 +1,338 @@ +//! Provides the core consensus types + +pub use crate::traits::node_implementation::ViewQueue; +pub use crate::utils::{View, ViewInner}; +use async_compatibility_layer::async_primitives::subscribable_rwlock::SubscribableRwLock; +use std::collections::HashSet; + +use crate::utils::Terminator; +use crate::{ + certificate::QuorumCertificate, + data::LeafType, + error::HotShotError, + traits::{ + metrics::{Counter, Gauge, Histogram, Metrics}, + node_implementation::NodeType, + }, +}; +use commit::{Commitment, Committable}; +use derivative::Derivative; +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap}, + sync::Arc, +}; +use tracing::error; + +/// A type alias for `HashMap, T>` +type CommitmentMap = HashMap, T>; + +/// A reference to the consensus algorithm +/// +/// This will contain the state of all rounds. +#[derive(custom_debug::Debug)] +pub struct Consensus> { + /// The phases that are currently loaded in memory + // TODO(https://github.com/EspressoSystems/hotshot/issues/153): Allow this to be loaded from `Storage`? + pub state_map: BTreeMap>, + + /// cur_view from pseudocode + pub cur_view: TYPES::Time, + + /// last view had a successful decide event + pub last_decided_view: TYPES::Time, + + /// A list of undecided transactions + pub transactions: Arc>>, + + /// A list of transactions we've seen decided, but didn't receive + pub seen_transactions: HashSet>, + + /// Map of leaf hash -> leaf + /// - contains undecided leaves + /// - includes the MOST RECENT decided leaf + pub saved_leaves: CommitmentMap, + + /// Saved blocks + /// + /// Contains the full block for every leaf in `saved_leaves` if that block is available. + pub saved_blocks: BlockStore, + + /// The `locked_qc` view number + pub locked_view: TYPES::Time, + + /// the highqc per spec + pub high_qc: QuorumCertificate, + + /// A reference to the metrics trait + #[debug(skip)] + pub metrics: Arc, + + /// Amount of invalid QCs we've seen since the last commit + /// Used for metrics. This resets to 0 on every decide event. + pub invalid_qc: usize, +} + +/// The metrics being collected for the consensus algorithm +pub struct ConsensusMetrics { + /// The current view + pub current_view: Box, + /// The duration to collect votes in a view (only applies when this insance is the leader) + pub vote_validate_duration: Box, + /// The duration we waited for txns before building the proposal + pub proposal_wait_duration: Box, + /// The duration to build the proposal + pub proposal_build_duration: Box, + /// The duration of each view, in seconds + pub view_duration: Box, + /// Number of views that are in-flight since the last committed view + pub number_of_views_since_last_commit: Box, + /// Number of views that are in-flight since the last anchor view + pub number_of_views_per_decide_event: Box, + /// Number of invalid QCs between anchors + pub invalid_qc_views: Box, + /// Number of views that were discarded since from one achor to the next + pub discarded_views_per_decide_event: Box, + /// Views where no proposal was seen from one anchor to the next + pub empty_views_per_decide_event: Box, + /// Number of rejected transactions + pub rejected_transactions: Box, + /// Number of outstanding transactions + pub outstanding_transactions: Box, + /// Memory size in bytes of the serialized transactions still outstanding + pub outstanding_transactions_memory_size: Box, + /// Number of views that timed out + pub number_of_timeouts: Box, + /// Total direct messages this node sent out + pub outgoing_direct_messages: Box, + /// Total broadcasts sent + pub outgoing_broadcast_messages: Box, + /// Total messages received + pub direct_messages_received: Box, + /// Total broadcast messages received + pub broadcast_messages_received: Box, + /// Total number of messages which couldn't be sent + pub failed_to_send_messages: Box, +} + +impl ConsensusMetrics { + /// Create a new instance of this [`ConsensusMetrics`] struct, setting all the counters and gauges + #[must_use] + pub fn new(metrics: &dyn Metrics) -> Self { + Self { + current_view: metrics.create_gauge(String::from("current_view"), None), + vote_validate_duration: metrics.create_histogram( + String::from("vote_validate_duration"), + Some(String::from("seconds")), + ), + proposal_build_duration: metrics.create_histogram( + String::from("proposal_build_duration"), + Some(String::from("seconds")), + ), + proposal_wait_duration: metrics.create_histogram( + String::from("proposal_wait_duration"), + Some(String::from("seconds")), + ), + view_duration: metrics + .create_histogram(String::from("view_duration"), Some(String::from("seconds"))), + number_of_views_since_last_commit: metrics + .create_gauge(String::from("number_of_views_since_last_commit"), None), + number_of_views_per_decide_event: metrics + .create_histogram(String::from("number_of_views_per_decide_event"), None), + invalid_qc_views: metrics.create_histogram(String::from("invalid_qc_views"), None), + discarded_views_per_decide_event: metrics + .create_histogram(String::from("discarded_views_per_decide_event"), None), + empty_views_per_decide_event: metrics + .create_histogram(String::from("empty_views_per_decide_event"), None), + rejected_transactions: metrics + .create_counter(String::from("rejected_transactions"), None), + outstanding_transactions: metrics + .create_gauge(String::from("outstanding_transactions"), None), + outstanding_transactions_memory_size: metrics + .create_gauge(String::from("outstanding_transactions_memory_size"), None), + outgoing_direct_messages: metrics + .create_counter(String::from("outgoing_direct_messages"), None), + outgoing_broadcast_messages: metrics + .create_counter(String::from("outgoing_broadcast_messages"), None), + direct_messages_received: metrics + .create_counter(String::from("direct_messages_received"), None), + broadcast_messages_received: metrics + .create_counter(String::from("broadcast_messages_received"), None), + failed_to_send_messages: metrics + .create_counter(String::from("failed_to_send_messages"), None), + number_of_timeouts: metrics + .create_counter(String::from("number_of_views_timed_out"), None), + } + } +} + +impl> Consensus { + /// increment the current view + /// NOTE may need to do gc here + pub fn increment_view(&mut self) -> TYPES::Time { + self.cur_view += 1; + self.cur_view + } + + /// gather information from the parent chain of leafs + /// # Errors + /// If the leaf or its ancestors are not found in storage + pub fn visit_leaf_ancestors( + &self, + start_from: TYPES::Time, + terminator: Terminator, + ok_when_finished: bool, + mut f: F, + ) -> Result<(), HotShotError> + where + F: FnMut(&LEAF) -> bool, + { + let mut next_leaf = if let Some(view) = self.state_map.get(&start_from) { + view.get_leaf_commitment() + .ok_or_else(|| HotShotError::InvalidState { + context: format!( + "Visited failed view {start_from:?} leaf. Expected successfuil leaf" + ), + })? + } else { + return Err(HotShotError::InvalidState { + context: format!("View {start_from:?} leaf does not exist in state map "), + }); + }; + + while let Some(leaf) = self.saved_leaves.get(&next_leaf) { + if let Terminator::Exclusive(stop_before) = terminator { + if stop_before == leaf.get_view_number() { + if ok_when_finished { + return Ok(()); + } + break; + } + } + next_leaf = leaf.get_parent_commitment(); + if !f(leaf) { + return Ok(()); + } + if let Terminator::Inclusive(stop_after) = terminator { + if stop_after == leaf.get_view_number() { + if ok_when_finished { + return Ok(()); + } + break; + } + } + } + Err(HotShotError::LeafNotFound {}) + } + + /// garbage collects based on state change + /// right now, this removes from both the `saved_blocks` + /// and `state_map` fields of `Consensus` + #[allow(clippy::unused_async)] // async for API compatibility reasons + pub async fn collect_garbage( + &mut self, + old_anchor_view: TYPES::Time, + new_anchor_view: TYPES::Time, + ) { + // state check + let anchor_entry = self + .state_map + .iter() + .next() + .expect("INCONSISTENT STATE: anchor leaf not in state map!"); + if *anchor_entry.0 != old_anchor_view { + error!( + "Something about GC has failed. Older leaf exists than the previous anchor leaf." + ); + } + // perform gc + self.state_map + .range(old_anchor_view..new_anchor_view) + .filter_map(|(_view_number, view)| view.get_block_commitment()) + .for_each(|block| { + self.saved_blocks.remove(block); + }); + self.state_map + .range(old_anchor_view..new_anchor_view) + .filter_map(|(_view_number, view)| view.get_leaf_commitment()) + .for_each(|leaf| { + if let Some(removed) = self.saved_leaves.remove(&leaf) { + self.saved_blocks.remove(removed.get_deltas_commitment()); + } + }); + self.state_map = self.state_map.split_off(&new_anchor_view); + } + + /// return a clone of the internal storage of unclaimed transactions + #[must_use] + pub fn get_transactions(&self) -> Arc>> { + self.transactions.clone() + } + + /// Gets the last decided state + /// # Panics + /// if the last decided view's state does not exist in the state map + /// this should never happen. + #[must_use] + pub fn get_decided_leaf(&self) -> LEAF { + let decided_view_num = self.last_decided_view; + let view = self.state_map.get(&decided_view_num).unwrap(); + let leaf = view + .get_leaf_commitment() + .expect("Decided state not found! Consensus internally inconsistent"); + self.saved_leaves.get(&leaf).unwrap().clone() + } +} + +/// Mapping from block commitments to full blocks. +/// +/// Entries in this mapping are reference-counted, so multiple consensus objects can refer to the +/// same block, and the block will only be deleted after _all_ such objects are garbage collected. +/// For example, multiple leaves may temporarily reference the same block on different branches, +/// before all but one branch are ultimately garbage collected. +#[derive(Clone, Debug, Derivative)] +#[derivative(Default(bound = ""))] +pub struct BlockStore(HashMap, (BLOCK, u64)>); + +impl BlockStore { + /// Save `block` for later retrieval. + /// + /// After calling this function, and before the corresponding call to [`remove`](Self::remove), + /// `self.get(block.commit())` will return `Some(block)`. + /// + /// This function will increment a reference count on the saved block, so that multiple calls to + /// [`insert`](Self::insert) for the same block result in multiple owning references to the + /// block. [`remove`](Self::remove) must be called once for each reference before the block will + /// be deallocated. + pub fn insert(&mut self, block: BLOCK) { + self.0 + .entry(block.commit()) + .and_modify(|(_, refcount)| *refcount += 1) + .or_insert((block, 1)); + } + + /// Get a saved block, if available. + /// + /// If a block has been saved with [`insert`](Self::insert), this function will retrieve it. It + /// may return [`None`] if a block with the given commitment has not been saved or if the block + /// has been dropped with [`remove`](Self::remove). + #[must_use] + pub fn get(&self, block: Commitment) -> Option<&BLOCK> { + self.0.get(&block).map(|(block, _)| block) + } + + /// Drop a reference to a saved block. + /// + /// If the block exists and this call drops the last reference to it, the block will be + /// returned. Otherwise, the return value is [`None`]. + pub fn remove(&mut self, block: Commitment) -> Option { + if let Entry::Occupied(mut e) = self.0.entry(block) { + let (_, refcount) = e.get_mut(); + *refcount -= 1; + if *refcount == 0 { + let (block, _) = e.remove(); + return Some(block); + } + } + None + } +} diff --git a/types/src/constants.rs b/types/src/constants.rs new file mode 100644 index 0000000000..26acfb1abc --- /dev/null +++ b/types/src/constants.rs @@ -0,0 +1,14 @@ +//! configurable constants for hotshot + +use crate::traits::signature_key::EncodedPublicKey; + +/// the number of views to gather information for ahead of time +pub const LOOK_AHEAD: u64 = 5; + +/// the genesis proposer pk +/// unfortunately need to allocate on the heap (for vec), so this ends up as a function instead of a +/// const +#[must_use] +pub fn genesis_proposer_id() -> EncodedPublicKey { + EncodedPublicKey(vec![4, 2]) +} diff --git a/types/src/data.rs b/types/src/data.rs new file mode 100644 index 0000000000..97a023ef3e --- /dev/null +++ b/types/src/data.rs @@ -0,0 +1,956 @@ +//! Provides types useful for representing `HotShot`'s data structures +//! +//! This module provides types for representing consensus internal state, such as leaves, +//! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. + +use crate::{ + certificate::{ + AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, + ViewSyncCertificate, + }, + constants::genesis_proposer_id, + traits::{ + election::SignedCertificate, + node_implementation::NodeType, + signature_key::{EncodedPublicKey, SignatureKey}, + state::{ConsensusTime, TestableBlock, TestableState}, + storage::StoredView, + Block, State, + }, +}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use bincode::Options; +use commit::{Commitment, Committable}; +use derivative::Derivative; +use either::Either; +use espresso_systems_common::hotshot::tag; +use hotshot_utils::bincode::bincode_opts; +use rand::Rng; +use serde::{Deserialize, Serialize}; +use snafu::{ensure, Snafu}; +use std::{ + fmt::{Debug, Display}, + hash::Hash, +}; + +/// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. +#[derive( + Copy, + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + // std::ops::Add, + // std::ops::Div, + // std::ops::Rem, + Deserialize, + CanonicalSerialize, + CanonicalDeserialize, +)] +pub struct ViewNumber(u64); + +impl ConsensusTime for ViewNumber { + /// Create a genesis view number (0) + fn genesis() -> Self { + Self(0) + } + /// Create a new `ViewNumber` with the given value. + fn new(n: u64) -> Self { + Self(n) + } +} + +impl Committable for ViewNumber { + fn commit(&self) -> Commitment { + let builder = commit::RawCommitmentBuilder::new("View Number Commitment"); + builder.u64(self.0).finalize() + } +} + +impl std::ops::Add for ViewNumber { + type Output = ViewNumber; + + fn add(self, rhs: u64) -> Self::Output { + Self(self.0 + rhs) + } +} + +impl std::ops::AddAssign for ViewNumber { + fn add_assign(&mut self, rhs: u64) { + self.0 += rhs; + } +} + +impl std::ops::Deref for ViewNumber { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::Sub for ViewNumber { + type Output = ViewNumber; + fn sub(self, rhs: u64) -> Self::Output { + Self(self.0 - rhs) + } +} + +/// The `Transaction` type associated with a `State`, as a syntactic shortcut +pub type Transaction = <::BlockType as Block>::Transaction; +/// `Commitment` to the `Transaction` type associated with a `State`, as a syntactic shortcut +pub type TxnCommitment = Commitment>; + +/// subset of state that we stick into a leaf. +/// original hotstuff proposal +#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Derivative, Eq)] +#[serde(bound(deserialize = ""))] +#[derivative(PartialEq, Hash)] +pub struct ValidatingProposal> +where + LEAF: Committable, +{ + /// current view's block commitment + pub block_commitment: Commitment, + + /// CurView from leader when proposing leaf + pub view_number: TYPES::Time, + + /// Height from leader when proposing leaf + pub height: u64, + + /// Per spec, justification + pub justify_qc: QuorumCertificate, + + /// The hash of the parent `Leaf` + /// So we can ask if it extends + #[debug(skip)] + pub parent_commitment: Commitment, + + /// Block leaf wants to apply + pub deltas: TYPES::BlockType, + + /// What the state should be after applying `self.deltas` + pub state_commitment: Commitment, + + /// Transactions that were marked for rejection while collecting deltas + pub rejected: Vec<::Transaction>, + + /// the propser id + pub proposer_id: EncodedPublicKey, +} + +/// A proposal to start providing data availability for a block. +#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +pub struct DAProposal { + /// Block leaf wants to apply + pub deltas: TYPES::BlockType, + /// View this proposal applies to + pub view_number: TYPES::Time, +} + +/// Proposal to append a block. +#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct QuorumProposal> { + /// The commitment to append. + pub block_commitment: Commitment, + + /// CurView from leader when proposing leaf + pub view_number: TYPES::Time, + + /// Height from leader when proposing leaf + pub height: u64, + + /// Per spec, justification + pub justify_qc: QuorumCertificate, + + /// Possible timeout certificate. Only present if the justify_qc is not for the preceding view + pub timeout_certificate: Option>, + + /// the propser id + pub proposer_id: EncodedPublicKey, + + /// Data availibity certificate + // TODO We should be able to remove this + pub dac: Option>, +} + +impl> ProposalType + for ValidatingProposal +{ + type NodeType = TYPES; + fn get_view_number(&self) -> ::Time { + self.view_number + } +} + +impl ProposalType for DAProposal { + type NodeType = TYPES; + fn get_view_number(&self) -> ::Time { + self.view_number + } +} + +impl> ProposalType + for QuorumProposal +{ + type NodeType = TYPES; + fn get_view_number(&self) -> ::Time { + self.view_number + } +} + +impl ProposalType for ViewSyncCertificate { + type NodeType = TYPES; + fn get_view_number(&self) -> ::Time { + match self { + ViewSyncCertificate::PreCommit(certificate_internal) + | ViewSyncCertificate::Commit(certificate_internal) + | ViewSyncCertificate::Finalize(certificate_internal) => certificate_internal.round, + } + } +} + +/// A proposal to a network of voting nodes. +pub trait ProposalType: + Debug + Clone + 'static + Serialize + for<'a> Deserialize<'a> + Send + Sync + PartialEq + Eq + Hash +{ + /// Type of nodes that can vote on this proposal. + type NodeType: NodeType; + + /// Time at which this proposal is valid. + fn get_view_number(&self) -> ::Time; +} + +/// A state change encoded in a leaf. +/// +/// [`DeltasType`] represents a [block](NodeType::BlockType), but it may not contain the block in +/// full. It is guaranteed to contain, at least, a cryptographic commitment to the block, and it +/// provides an interface for resolving the commitment to a full block if the full block is +/// available. +pub trait DeltasType: + Clone + Debug + for<'a> Deserialize<'a> + PartialEq + Eq + std::hash::Hash + Send + Serialize + Sync +{ + /// Errors reported by this type. + type Error: std::error::Error; + + /// Get a cryptographic commitment to the block represented by this delta. + fn block_commitment(&self) -> Commitment; + + /// Get the full block if it is available, otherwise return this object unchanged. + /// + /// # Errors + /// + /// Returns the original [`DeltasType`], unchanged, in an [`Err`] variant in the case where the + /// full block is not currently available. + fn try_resolve(self) -> Result; + + /// Fill this [`DeltasType`] by providing a complete block. + /// + /// After this function succeeds, [`try_resolve`](Self::try_resolve) is guaranteed to return + /// `Ok(block)`. + /// + /// # Errors + /// + /// Fails if `block` does not match `self.block_commitment()`, or if the block is not able to be + /// stored for some implementation-defined reason. + fn fill(&mut self, block: Block) -> Result<(), Self::Error>; +} + +/// Error which occurs when [`DeltasType::fill`] is called with a block that does not match the +/// deltas' internal block commitment. +#[derive(Clone, Copy, Debug, Snafu)] +#[snafu(display("the block {:?} has commitment {} (expected {})", block, block.commit(), commitment))] +pub struct InconsistentDeltasError { + /// The block with the wrong commitment. + block: BLOCK, + /// The expected commitment. + commitment: Commitment, +} + +impl DeltasType for BLOCK +where + BLOCK: Committable + + Clone + + Debug + + for<'a> Deserialize<'a> + + PartialEq + + Eq + + std::hash::Hash + + Send + + Serialize + + Sync, +{ + type Error = InconsistentDeltasError; + + fn block_commitment(&self) -> Commitment { + self.commit() + } + + fn try_resolve(self) -> Result { + Ok(self) + } + + fn fill(&mut self, block: BLOCK) -> Result<(), Self::Error> { + ensure!( + block.commit() == self.commit(), + InconsistentDeltasSnafu { + block, + commitment: self.commit() + } + ); + // If the commitments are equal the blocks are equal, and we already have the block, so we + // don't have to do anything. + Ok(()) + } +} + +impl DeltasType for Either> +where + BLOCK: Committable + + Clone + + Debug + + for<'a> Deserialize<'a> + + PartialEq + + Eq + + std::hash::Hash + + Send + + Serialize + + Sync, +{ + type Error = InconsistentDeltasError; + + fn block_commitment(&self) -> Commitment { + match self { + Either::Left(block) => block.commit(), + Either::Right(comm) => *comm, + } + } + + fn try_resolve(self) -> Result { + match self { + Either::Left(block) => Ok(block), + Either::Right(_) => Err(self), + } + } + + fn fill(&mut self, block: BLOCK) -> Result<(), Self::Error> { + match self { + Either::Left(curr) => curr.fill(block), + Either::Right(comm) => { + ensure!( + *comm == block.commit(), + InconsistentDeltasSnafu { + block, + commitment: *comm + } + ); + *self = Either::Left(block); + Ok(()) + } + } + } +} + +/// An item which is appended to a blockchain. +pub trait LeafType: + Debug + + Display + + Clone + + 'static + + Committable + + Serialize + + for<'a> Deserialize<'a> + + Send + + Sync + + Eq + + std::hash::Hash +{ + /// Type of nodes participating in the network. + type NodeType: NodeType; + /// Type of block contained by this leaf. + type DeltasType: DeltasType>; + /// Either state or empty + type MaybeState: Clone + + Debug + + for<'a> Deserialize<'a> + + PartialEq + + Eq + + std::hash::Hash + + Send + + Serialize + + Sync; + + /// Create a new leaf from its components. + fn new( + view_number: LeafTime, + justify_qc: QuorumCertificate, + deltas: LeafBlock, + state: LeafState, + ) -> Self; + /// Time when this leaf was created. + fn get_view_number(&self) -> LeafTime; + /// Height of this leaf in the chain. + /// + /// Equivalently, this is the number of leaves before this one in the chain. + fn get_height(&self) -> u64; + /// Change the height of this leaf. + fn set_height(&mut self, height: u64); + /// The QC linking this leaf to its parent in the chain. + fn get_justify_qc(&self) -> QuorumCertificate; + /// Commitment to this leaf's parent. + fn get_parent_commitment(&self) -> Commitment; + /// The block contained in this leaf. + fn get_deltas(&self) -> Self::DeltasType; + /// Fill this leaf with the entire corresponding block. + /// + /// After this function succeeds, `self.get_deltas().try_resolve()` is guaranteed to return + /// `Ok(block)`. + /// + /// # Errors + /// + /// Fails if `block` does not match `self.get_deltas_commitment()`, or if the block is not able + /// to be stored for some implementation-defined reason. + fn fill_deltas(&mut self, block: LeafBlock) -> Result<(), LeafDeltasError>; + /// The blockchain state after appending this leaf. + fn get_state(&self) -> Self::MaybeState; + /// Transactions rejected or invalidated by the application of this leaf. + fn get_rejected(&self) -> Vec>; + /// Real-world time when this leaf was created. + fn get_timestamp(&self) -> i128; + /// Identity of the network participant who proposed this leaf. + fn get_proposer_id(&self) -> EncodedPublicKey; + /// Create a leaf from information stored about a view. + fn from_stored_view(stored_view: StoredView) -> Self; + + /// A commitment to the block contained in this leaf. + fn get_deltas_commitment(&self) -> Commitment> { + self.get_deltas().block_commitment() + } +} + +/// The [`DeltasType`] in a [`LeafType`]. +pub type LeafDeltas = ::DeltasType; +/// Errors reported by the [`DeltasType`] in a [`LeafType`]. +pub type LeafDeltasError = as DeltasType>>::Error; +/// The [`NodeType`] in a [`LeafType`]. +pub type LeafNode = ::NodeType; +/// The [`StateType`] in a [`LeafType`]. +pub type LeafState = as NodeType>::StateType; +/// The [`Block`] in a [`LeafType`]. +pub type LeafBlock = as NodeType>::BlockType; +/// The [`Transaction`] in a [`LeafType`]. +pub type LeafTransaction = as Block>::Transaction; +/// The [`ConsensusTime`] used by a [`LeafType`]. +pub type LeafTime = as NodeType>::Time; + +/// Additional functions required to use a [`LeafType`] with hotshot-testing. +pub trait TestableLeaf { + /// Type of nodes participating in the network. + type NodeType: NodeType; + + /// Create a transaction that can be added to the block contained in this leaf. + fn create_random_transaction( + &self, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> <::BlockType as Block>::Transaction; +} + +/// This is the consensus-internal analogous concept to a block, and it contains the block proper, +/// as well as the hash of its parent `Leaf`. +/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::Block` +#[derive(Serialize, Deserialize, Clone, Debug, Derivative)] +#[serde(bound(deserialize = ""))] +#[derivative(Hash, PartialEq, Eq)] +pub struct ValidatingLeaf { + /// CurView from leader when proposing leaf + pub view_number: TYPES::Time, + + /// Number of leaves before this one in the chain + pub height: u64, + + /// Per spec, justification + pub justify_qc: QuorumCertificate, + + /// The hash of the parent `Leaf` + /// So we can ask if it extends + pub parent_commitment: Commitment>, + + /// Block leaf wants to apply + pub deltas: TYPES::BlockType, + + /// What the state should be AFTER applying `self.deltas` + pub state: TYPES::StateType, + + /// Transactions that were marked for rejection while collecting deltas + pub rejected: Vec<::Transaction>, + + /// the timestamp the leaf was constructed at, in nanoseconds. Only exposed for dashboard stats + #[derivative(PartialEq = "ignore")] + #[derivative(Hash = "ignore")] + pub timestamp: i128, + + /// the proposer id of the leaf + #[derivative(PartialEq = "ignore")] + #[derivative(Hash = "ignore")] + pub proposer_id: EncodedPublicKey, +} + +/// This is the consensus-internal analogous concept to a block, and it contains the block proper, +/// as well as the hash of its parent `Leaf`. +/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::Block` +#[derive(Serialize, Deserialize, Clone, Debug, Derivative, Eq)] +#[serde(bound(deserialize = ""))] +pub struct SequencingLeaf { + /// CurView from leader when proposing leaf + pub view_number: TYPES::Time, + + /// Number of leaves before this one in the chain + pub height: u64, + + /// Per spec, justification + pub justify_qc: QuorumCertificate, + + /// The hash of the parent `SequencingLeaf` + /// So we can ask if it extends + pub parent_commitment: Commitment>, + + /// The block or block commitment to be applied + pub deltas: Either>, + + /// Transactions that were marked for rejection while collecting deltas + pub rejected: Vec<::Transaction>, + + /// the timestamp the leaf was constructed at, in nanoseconds. Only exposed for dashboard stats + pub timestamp: i128, + + /// the proposer id of the leaf + pub proposer_id: EncodedPublicKey, +} + +impl PartialEq for SequencingLeaf { + fn eq(&self, other: &Self) -> bool { + let delta_left = match &self.deltas { + Either::Left(deltas) => deltas.commit(), + Either::Right(deltas) => *deltas, + }; + let delta_right = match &other.deltas { + Either::Left(deltas) => deltas.commit(), + Either::Right(deltas) => *deltas, + }; + self.view_number == other.view_number + && self.height == other.height + && self.justify_qc == other.justify_qc + && self.parent_commitment == other.parent_commitment + && delta_left == delta_right + && self.rejected == other.rejected + } +} + +impl Hash for SequencingLeaf { + fn hash(&self, state: &mut H) { + self.view_number.hash(state); + self.height.hash(state); + self.justify_qc.hash(state); + self.parent_commitment.hash(state); + match &self.deltas { + Either::Left(deltas) => { + deltas.commit().hash(state); + } + Either::Right(commitment) => { + commitment.hash(state); + } + } + // self.deltas.hash(state.commit()); + self.rejected.hash(state); + } +} + +impl Display for ValidatingLeaf { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "view: {:?}, height: {:?}, justify: {}", + self.view_number, self.height, self.justify_qc + ) + } +} + +impl LeafType for ValidatingLeaf { + type NodeType = TYPES; + type DeltasType = TYPES::BlockType; + type MaybeState = TYPES::StateType; + + fn new( + view_number: ::Time, + justify_qc: QuorumCertificate, + deltas: ::BlockType, + state: ::StateType, + ) -> Self { + Self { + view_number, + height: 0, + justify_qc, + parent_commitment: fake_commitment(), + deltas, + state, + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: genesis_proposer_id(), + } + } + + fn get_view_number(&self) -> TYPES::Time { + self.view_number + } + + fn get_height(&self) -> u64 { + self.height + } + + fn set_height(&mut self, height: u64) { + self.height = height; + } + + fn get_justify_qc(&self) -> QuorumCertificate { + self.justify_qc.clone() + } + + fn get_parent_commitment(&self) -> Commitment { + self.parent_commitment + } + + fn get_deltas(&self) -> Self::DeltasType { + self.deltas.clone() + } + + fn get_deltas_commitment(&self) -> Commitment<::BlockType> { + self.deltas.block_commitment() + } + + fn fill_deltas(&mut self, block: LeafBlock) -> Result<(), LeafDeltasError> { + self.deltas.fill(block) + } + + fn get_state(&self) -> Self::MaybeState { + self.state.clone() + } + + fn get_rejected(&self) -> Vec<::Transaction> { + self.rejected.clone() + } + + fn get_timestamp(&self) -> i128 { + self.timestamp + } + + fn get_proposer_id(&self) -> EncodedPublicKey { + self.proposer_id.clone() + } + + fn from_stored_view(stored_view: StoredView) -> Self { + Self { + view_number: stored_view.view_number, + height: 0, + justify_qc: stored_view.justify_qc, + parent_commitment: stored_view.parent, + deltas: stored_view.deltas, + state: stored_view.state, + rejected: stored_view.rejected, + timestamp: stored_view.timestamp, + proposer_id: stored_view.proposer_id, + } + } +} + +impl TestableLeaf for ValidatingLeaf +where + TYPES::StateType: TestableState, + TYPES::BlockType: TestableBlock, +{ + type NodeType = TYPES; + + fn create_random_transaction( + &self, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> <::BlockType as Block>::Transaction { + ::create_random_transaction( + Some(&self.state), + rng, + padding, + ) + } +} + +impl Display for SequencingLeaf { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "view: {:?}, height: {:?}, justify: {}", + self.view_number, self.height, self.justify_qc + ) + } +} + +impl LeafType for SequencingLeaf { + type NodeType = TYPES; + type DeltasType = Either>; + type MaybeState = (); + + fn new( + view_number: ::Time, + justify_qc: QuorumCertificate, + deltas: ::BlockType, + _state: ::StateType, + ) -> Self { + Self { + view_number, + height: 0, + justify_qc, + parent_commitment: fake_commitment(), + deltas: Either::Left(deltas), + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: genesis_proposer_id(), + } + } + + fn get_view_number(&self) -> TYPES::Time { + self.view_number + } + + fn get_height(&self) -> u64 { + self.height + } + + fn set_height(&mut self, height: u64) { + self.height = height; + } + + fn get_justify_qc(&self) -> QuorumCertificate { + self.justify_qc.clone() + } + + fn get_parent_commitment(&self) -> Commitment { + self.parent_commitment + } + + fn get_deltas(&self) -> Self::DeltasType { + self.deltas.clone() + } + + fn get_deltas_commitment(&self) -> Commitment<::BlockType> { + self.deltas.block_commitment() + } + + fn fill_deltas(&mut self, block: LeafBlock) -> Result<(), LeafDeltasError> { + self.deltas.fill(block) + } + + // The Sequencing Leaf doesn't have a state. + fn get_state(&self) -> Self::MaybeState {} + + fn get_rejected(&self) -> Vec<::Transaction> { + self.rejected.clone() + } + + fn get_timestamp(&self) -> i128 { + self.timestamp + } + + fn get_proposer_id(&self) -> EncodedPublicKey { + self.proposer_id.clone() + } + + fn from_stored_view(stored_view: StoredView) -> Self { + Self { + view_number: stored_view.view_number, + height: 0, + justify_qc: stored_view.justify_qc, + parent_commitment: stored_view.parent, + deltas: stored_view.deltas, + rejected: stored_view.rejected, + timestamp: stored_view.timestamp, + proposer_id: stored_view.proposer_id, + } + } +} + +impl TestableLeaf for SequencingLeaf +where + TYPES::StateType: TestableState, + TYPES::BlockType: TestableBlock, +{ + type NodeType = TYPES; + + fn create_random_transaction( + &self, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> <::BlockType as Block>::Transaction { + TYPES::StateType::create_random_transaction(None, rng, padding) + } +} +/// Fake the thing a genesis block points to. Needed to avoid infinite recursion +#[must_use] +pub fn fake_commitment() -> Commitment { + commit::RawCommitmentBuilder::new("Dummy commitment for arbitrary genesis").finalize() +} + +/// create a random commitment +#[must_use] +pub fn random_commitment(rng: &mut dyn rand::RngCore) -> Commitment { + let random_array: Vec = (0u8..100u8).map(|_| rng.gen_range(0..255)).collect(); + commit::RawCommitmentBuilder::new("Random Commitment") + .constant_str("Random Field") + .var_size_bytes(&random_array) + .finalize() +} + +/// Serialization for the QC assembled signature +pub fn serialize_signature(signature: &AssembledSignature) -> Vec { + let mut signatures_bytes = vec![]; + let signatures: Option<::QCType> = match &signature { + AssembledSignature::DA(signatures) => { + signatures_bytes.extend("DA".as_bytes()); + Some(signatures.clone()) + } + AssembledSignature::Yes(signatures) => { + signatures_bytes.extend("Yes".as_bytes()); + Some(signatures.clone()) + } + AssembledSignature::No(signatures) => { + signatures_bytes.extend("No".as_bytes()); + Some(signatures.clone()) + } + AssembledSignature::ViewSyncPreCommit(signatures) => { + signatures_bytes.extend("ViewSyncPreCommit".as_bytes()); + Some(signatures.clone()) + } + AssembledSignature::ViewSyncCommit(signatures) => { + signatures_bytes.extend("ViewSyncCommit".as_bytes()); + Some(signatures.clone()) + } + AssembledSignature::ViewSyncFinalize(signatures) => { + signatures_bytes.extend("ViewSyncFinalize".as_bytes()); + Some(signatures.clone()) + } + AssembledSignature::Genesis() => None, + }; + if let Some(sig) = signatures { + let (sig, proof) = TYPES::SignatureKey::get_sig_proof(&sig); + let proof_bytes = bincode_opts() + .serialize(&proof.as_bitslice()) + .expect("This serialization shouldn't be able to fail"); + signatures_bytes.extend("bitvec proof".as_bytes()); + signatures_bytes.extend(proof_bytes.as_slice()); + let sig_bytes = bincode_opts() + .serialize(&sig) + .expect("This serialization shouldn't be able to fail"); + signatures_bytes.extend("aggregated signature".as_bytes()); + signatures_bytes.extend(sig_bytes.as_slice()); + } else { + signatures_bytes.extend("genesis".as_bytes()); + } + + signatures_bytes +} + +impl Committable for ValidatingLeaf { + fn commit(&self) -> commit::Commitment { + let signatures_bytes = serialize_signature(&self.justify_qc.signatures); + + commit::RawCommitmentBuilder::new("leaf commitment") + .u64_field("view number", *self.view_number) + .u64_field("height", self.height) + .field("parent Leaf commitment", self.parent_commitment) + .field("block commitment", self.deltas.commit()) + .field("state commitment", self.state.commit()) + .constant_str("justify_qc view number") + .u64(*self.justify_qc.view_number) + .field( + "justify_qc leaf commitment", + self.justify_qc.leaf_commitment(), + ) + .constant_str("justify_qc signatures") + .var_size_bytes(&signatures_bytes) + .finalize() + } + + fn tag() -> String { + tag::LEAF.to_string() + } +} + +impl Committable for SequencingLeaf { + fn commit(&self) -> commit::Commitment { + // Commit the block commitment, rather than the block, so that the replicas can reconstruct + // the leaf. + let block_commitment = match &self.deltas { + Either::Left(block) => block.commit(), + Either::Right(commitment) => *commitment, + }; + + let signatures_bytes = serialize_signature(&self.justify_qc.signatures); + + commit::RawCommitmentBuilder::new("leaf commitment") + .u64_field("view number", *self.view_number) + .u64_field("height", self.height) + .field("parent Leaf commitment", self.parent_commitment) + .field("block commitment", block_commitment) + .constant_str("justify_qc view number") + .u64(*self.justify_qc.view_number) + .field( + "justify_qc leaf commitment", + self.justify_qc.leaf_commitment(), + ) + .constant_str("justify_qc signatures") + .var_size_bytes(&signatures_bytes) + .finalize() + } +} + +impl From> + for ValidatingProposal> +{ + fn from(leaf: ValidatingLeaf) -> Self { + Self { + view_number: leaf.view_number, + height: leaf.height, + justify_qc: leaf.justify_qc, + parent_commitment: leaf.parent_commitment, + deltas: leaf.deltas.clone(), + state_commitment: leaf.state.commit(), + rejected: leaf.rejected, + proposer_id: leaf.proposer_id, + block_commitment: leaf.deltas.commit(), + } + } +} + +impl From for StoredView +where + TYPES: NodeType, + LEAF: LeafType, +{ + fn from(leaf: LEAF) -> Self { + StoredView { + view_number: leaf.get_view_number(), + height: leaf.get_height(), + parent: leaf.get_parent_commitment(), + justify_qc: leaf.get_justify_qc(), + state: leaf.get_state(), + deltas: leaf.get_deltas(), + rejected: leaf.get_rejected(), + timestamp: leaf.get_timestamp(), + proposer_id: leaf.get_proposer_id(), + } + } +} diff --git a/types/src/error.rs b/types/src/error.rs new file mode 100644 index 0000000000..c6ccc7af1a --- /dev/null +++ b/types/src/error.rs @@ -0,0 +1,116 @@ +//! Error type for `HotShot` +//! +//! This module provides [`HotShotError`], which is an enum representing possible faults that can +//! occur while interacting with this crate. + +use crate::traits::{node_implementation::NodeType, storage::StorageError}; +use snafu::Snafu; +use std::num::NonZeroU64; + +#[cfg(async_executor_impl = "async-std")] +use async_std::future::TimeoutError; +#[cfg(async_executor_impl = "tokio")] +use tokio::time::error::Elapsed as TimeoutError; +#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] +compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} + +/// Error type for `HotShot` +#[derive(Snafu, Debug)] +#[snafu(visibility(pub))] +#[non_exhaustive] +pub enum HotShotError { + /// Failed to Message the leader in the given stage + #[snafu(display("Failed to message leader with error: {source}"))] + FailedToMessageLeader { + /// The underlying network fault + source: crate::traits::network::NetworkError, + }, + /// Failed to broadcast a message on the network + #[snafu(display("Failed to broadcast a message"))] + FailedToBroadcast { + /// The underlying network fault + source: crate::traits::network::NetworkError, + }, + /// A block failed verification + #[snafu(display("Failed verification of block"))] + BadBlock {}, + /// A block was not consistent with the existing state + #[snafu(display("Inconsistent block"))] + InconsistentBlock {}, + /// Failure in networking layer + #[snafu(display("Failure in networking layer: {source}"))] + NetworkFault { + /// Underlying network fault + source: crate::traits::network::NetworkError, + }, + /// Item was not present in storage + LeafNotFound {/* TODO we should create a way to to_string */}, + /// Error accesing storage + StorageError { + /// Underlying error + source: StorageError, + }, + /// Invalid state machine state + #[snafu(display("Invalid state machine state: {}", context))] + InvalidState { + /// Context + context: String, + }, + /// HotShot timed out waiting for msgs + TimeoutError { + /// source of error + source: TimeoutError, + }, + /// HotShot timed out during round + ViewTimeoutError { + /// view number + view_number: TYPES::Time, + /// The state that the round was in when it timed out + state: RoundTimedoutState, + }, + /// Not enough valid signatures for a quorum + #[snafu(display("Insufficient number of valid signatures: the threshold is {}, but only {} signatures were valid", threshold, num_valid_signatures))] + InsufficientValidSignatures { + /// Number of valid signatures + num_valid_signatures: usize, + /// Threshold of signatures needed for a quorum + threshold: NonZeroU64, + }, + /// Miscelaneous error + /// TODO fix this with + /// #181 + Misc { + /// source of error + context: String, + }, + /// Internal value used to drive the state machine + Continue, +} + +/// Contains information about what the state of the hotshot-consensus was when a round timed out +#[derive(Debug, Clone)] +#[non_exhaustive] +pub enum RoundTimedoutState { + /// Leader is in a Prepare phase and is waiting for a HighQC + LeaderWaitingForHighQC, + /// Leader is in a Prepare phase and timed out before the round min time is reached + LeaderMinRoundTimeNotReached, + /// Leader is waiting for prepare votes + LeaderWaitingForPrepareVotes, + /// Leader is waiting for precommit votes + LeaderWaitingForPreCommitVotes, + /// Leader is waiting for commit votes + LeaderWaitingForCommitVotes, + + /// Replica is waiting for a prepare message + ReplicaWaitingForPrepare, + /// Replica is waiting for a pre-commit message + ReplicaWaitingForPreCommit, + /// Replica is waiting for a commit message + ReplicaWaitingForCommit, + /// Replica is waiting for a decide message + ReplicaWaitingForDecide, + + /// HotShot-testing tried to collect round events, but it timed out + TestCollectRoundEventsTimedOut, +} diff --git a/types/src/event.rs b/types/src/event.rs new file mode 100644 index 0000000000..f33a39f8cd --- /dev/null +++ b/types/src/event.rs @@ -0,0 +1,64 @@ +//! Events that a `HotShot` instance can emit + +use crate::{ + certificate::QuorumCertificate, data::LeafType, error::HotShotError, + traits::node_implementation::NodeType, +}; +use std::sync::Arc; +/// A status event emitted by a `HotShot` instance +/// +/// This includes some metadata, such as the stage and view number that the event was generated in, +/// as well as an inner [`EventType`] describing the event proper. +#[derive(Clone, Debug)] +pub struct Event> { + /// The view number that this event originates from + pub view_number: TYPES::Time, + /// The underlying event + pub event: EventType, +} + +/// The type and contents of a status event emitted by a `HotShot` instance +/// +/// This enum does not include metadata shared among all variants, such as the stage and view +/// number, and is thus always returned wrapped in an [`Event`]. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub enum EventType> { + /// A view encountered an error and was interrupted + Error { + /// The underlying error + error: Arc>, + }, + /// A new decision event was issued + Decide { + /// The chain of Leafs that were committed by this decision + /// + /// This list is sorted in reverse view number order, with the newest (highest view number) + /// block first in the list. + /// + /// This list may be incomplete if the node is currently performing catchup. + leaf_chain: Arc>, + /// The QC signing the most recent leaf in `leaf_chain`. + /// + /// Note that the QC for each additional leaf in the chain can be obtained from the leaf + /// before it using + qc: Arc>, + /// Optional information of the number of transactions in the block, for logging purposes. + block_size: Option, + }, + /// A replica task was canceled by a timeout interrupt + ReplicaViewTimeout { + /// The view that timed out + view_number: TYPES::Time, + }, + /// A next leader task was canceled by a timeout interrupt + NextLeaderViewTimeout { + /// The view that timed out + view_number: TYPES::Time, + }, + /// The view has finished. If values were decided on, a `Decide` event will also be emitted. + ViewFinished { + /// The view number that has just finished + view_number: TYPES::Time, + }, +} diff --git a/types/src/lib.rs b/types/src/lib.rs new file mode 100644 index 0000000000..e574bef876 --- /dev/null +++ b/types/src/lib.rs @@ -0,0 +1,70 @@ +//! Types and Traits for the `HotShot` consensus module +#![warn( + clippy::all, + clippy::pedantic, + rust_2018_idioms, + missing_docs, + clippy::missing_docs_in_private_items, + clippy::panic +)] +#![allow(clippy::module_name_repetitions)] + +use std::{num::NonZeroUsize, time::Duration}; + +pub mod certificate; +pub mod consensus; +pub mod constants; +pub mod data; +pub mod error; +pub mod event; +pub mod message; +pub mod traits; +pub mod utils; +pub mod vote; +/// the type of consensus to run. Either: +/// wait for a signal to start a view, +/// or constantly run +/// you almost always want continuous +/// incremental is just for testing +#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] +pub enum ExecutionType { + /// constantly increment view as soon as view finishes + Continuous, + /// wait for a signal + Incremental, +} + +/// Holds configuration for a `HotShot` +#[derive(Clone, custom_debug::Debug, serde::Serialize, serde::Deserialize)] +pub struct HotShotConfig { + /// Whether to run one view or continuous views + pub execution_type: ExecutionType, + /// Total number of nodes in the network + pub total_nodes: NonZeroUsize, + /// Minimum transactions per block + pub min_transactions: usize, + /// Maximum transactions per block + pub max_transactions: NonZeroUsize, + /// List of known node's public keys, including own, sorted by nonce () + pub known_nodes: Vec, + /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter + pub known_nodes_with_stake: Vec, + /// List of DA committee nodes for static DA committe + pub da_committee_size: usize, + /// Base duration for next-view timeout, in milliseconds + pub next_view_timeout: u64, + /// The exponential backoff ration for the next-view timeout + pub timeout_ratio: (u64, u64), + /// The delay a leader inserts before starting pre-commit, in milliseconds + pub round_start_delay: u64, + /// Delay after init before starting consensus, in milliseconds + pub start_delay: u64, + /// Number of network bootstrap nodes + pub num_bootstrap: usize, + /// The minimum amount of time a leader has to wait to start a round + pub propose_min_round_time: Duration, + /// The maximum amount of time a leader can wait to start a round + pub propose_max_round_time: Duration, + /// the election configuration + pub election_config: Option, +} diff --git a/types/src/message.rs b/types/src/message.rs new file mode 100644 index 0000000000..89d2864aa9 --- /dev/null +++ b/types/src/message.rs @@ -0,0 +1,434 @@ +//! Network message types +//! +//! This module contains types used to represent the various types of messages that +//! `HotShot` nodes can send among themselves. + +use crate::{ + certificate::DACertificate, + data::{DAProposal, ProposalType}, + traits::{ + network::{NetworkMsg, ViewMessage}, + node_implementation::{ + ExchangesType, NodeImplementation, NodeType, QuorumProposalType, ViewSyncProposalType, + }, + signature_key::EncodedSignature, + }, + vote::{DAVote, QuorumVote, ViewSyncVote, VoteType}, +}; +use derivative::Derivative; +use either::Either::{self, Left, Right}; +use serde::{Deserialize, Serialize}; +use std::{fmt::Debug, marker::PhantomData}; + +/// Incoming message +#[derive(Serialize, Deserialize, Clone, Debug, Derivative)] +#[serde(bound(deserialize = "", serialize = ""))] +#[derivative(PartialEq)] +pub struct Message> { + /// The sender of this message + pub sender: TYPES::SignatureKey, + + /// The message kind + #[derivative(PartialEq = "ignore")] + pub kind: MessageKind, + + /// Phantom data. + pub _phantom: PhantomData, +} + +impl> NetworkMsg for Message {} + +impl> ViewMessage for Message { + /// get the view number out of a message + fn get_view_number(&self) -> TYPES::Time { + self.kind.get_view_number() + } + fn purpose(&self) -> MessagePurpose { + self.kind.purpose() + } +} + +/// A wrapper type for implementing `PassType` on a vector of `Message`. +#[derive(Clone, Debug)] +pub struct Messages>(pub Vec>); + +/// A message type agnostic description of a messages purpose +#[derive(PartialEq, Copy, Clone)] +pub enum MessagePurpose { + /// Message with a quorum proposal. + Proposal, + /// Message with a quorum vote. + Vote, + /// Message with a view sync vote. + ViewSyncVote, + /// Message with a view sync proposal. + ViewSyncProposal, + /// Message with a DAC. + DAC, + /// Message for internal use + Internal, + /// Data message + Data, +} + +// TODO (da) make it more customized to the consensus layer, maybe separating the specific message +// data from the kind enum. +/// Enum representation of any message type +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[serde(bound(deserialize = "", serialize = ""))] +pub enum MessageKind> { + /// Messages related to the consensus protocol + Consensus(I::ConsensusMessage), + /// Messages relating to sharing data between nodes + Data(DataMessage), + /// Phantom data. + _Unreachable(PhantomData), +} + +impl> MessageKind { + // Can't implement `From` directly due to potential conflict with + // `From`. + /// Construct a [`MessageKind`] from [`I::ConsensusMessage`]. + pub fn from_consensus_message(m: I::ConsensusMessage) -> Self { + Self::Consensus(m) + } +} + +impl> From> + for MessageKind +{ + fn from(m: DataMessage) -> Self { + Self::Data(m) + } +} + +impl> ViewMessage for MessageKind { + fn get_view_number(&self) -> TYPES::Time { + match &self { + MessageKind::Consensus(message) => message.view_number(), + MessageKind::Data(DataMessage::SubmitTransaction(_, v)) => *v, + MessageKind::_Unreachable(_) => unimplemented!(), + } + } + + fn purpose(&self) -> MessagePurpose { + match &self { + MessageKind::Consensus(message) => message.purpose(), + MessageKind::Data(message) => match message { + DataMessage::SubmitTransaction(_, _) => MessagePurpose::Data, + }, + MessageKind::_Unreachable(_) => unimplemented!(), + } + } +} + +/// Internal triggers sent by consensus messages. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +#[serde(bound(deserialize = ""))] +pub enum InternalTrigger { + // May add other triggers if necessary. + /// Internal timeout at the specified view number. + Timeout(TYPES::Time), +} + +/// A processed consensus message for both validating and sequencing consensus. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[serde(bound(deserialize = ""))] +pub enum ProcessedGeneralConsensusMessage> +where + I::Exchanges: ExchangesType>, +{ + /// Message with a quorum proposal. + Proposal(Proposal>, TYPES::SignatureKey), + /// Message with a quorum vote. + Vote(QuorumVote, TYPES::SignatureKey), + /// Message with a view sync vote. + ViewSyncVote(ViewSyncVote), + /// Message with a view sync certificate. + ViewSyncCertificate(Proposal>), + /// Internal ONLY message indicating a view interrupt. + #[serde(skip)] + InternalTrigger(InternalTrigger), +} + +impl> From> + for GeneralConsensusMessage +where + I::Exchanges: ExchangesType>, +{ + fn from(value: ProcessedGeneralConsensusMessage) -> Self { + match value { + ProcessedGeneralConsensusMessage::Proposal(p, _) => { + GeneralConsensusMessage::Proposal(p) + } + ProcessedGeneralConsensusMessage::Vote(v, _) => GeneralConsensusMessage::Vote(v), + ProcessedGeneralConsensusMessage::InternalTrigger(a) => { + GeneralConsensusMessage::InternalTrigger(a) + } + ProcessedGeneralConsensusMessage::ViewSyncCertificate(certificate) => { + GeneralConsensusMessage::ViewSyncCertificate(certificate) + } + ProcessedGeneralConsensusMessage::ViewSyncVote(vote) => { + GeneralConsensusMessage::ViewSyncVote(vote) + } + } + } +} + +impl> ProcessedGeneralConsensusMessage +where + I::Exchanges: ExchangesType>, +{ + /// Create a [`ProcessedGeneralConsensusMessage`] from a [`GeneralConsensusMessage`]. + /// # Panics + /// if reaching the unimplemented `ViewSync` case. + pub fn new(value: GeneralConsensusMessage, sender: TYPES::SignatureKey) -> Self { + match value { + GeneralConsensusMessage::Proposal(p) => { + ProcessedGeneralConsensusMessage::Proposal(p, sender) + } + GeneralConsensusMessage::Vote(v) => ProcessedGeneralConsensusMessage::Vote(v, sender), + GeneralConsensusMessage::InternalTrigger(a) => { + ProcessedGeneralConsensusMessage::InternalTrigger(a) + } + GeneralConsensusMessage::ViewSyncVote(_) + | GeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), + } + } +} + +/// A processed consensus message for the DA committee in sequencing consensus. +#[derive(Serialize, Clone, Debug, PartialEq)] +#[serde(bound(deserialize = ""))] +pub enum ProcessedCommitteeConsensusMessage { + /// Proposal for the DA committee. + DAProposal(Proposal>, TYPES::SignatureKey), + /// Vote from the DA committee. + DAVote(DAVote, TYPES::SignatureKey), + /// Certificate for the DA. + DACertificate(DACertificate, TYPES::SignatureKey), +} + +impl From> + for CommitteeConsensusMessage +{ + fn from(value: ProcessedCommitteeConsensusMessage) -> Self { + match value { + ProcessedCommitteeConsensusMessage::DAProposal(p, _) => { + CommitteeConsensusMessage::DAProposal(p) + } + ProcessedCommitteeConsensusMessage::DAVote(v, _) => { + CommitteeConsensusMessage::DAVote(v) + } + ProcessedCommitteeConsensusMessage::DACertificate(cert, _) => { + CommitteeConsensusMessage::DACertificate(cert) + } + } + } +} + +impl ProcessedCommitteeConsensusMessage { + /// Create a [`ProcessedCommitteeConsensusMessage`] from a [`CommitteeConsensusMessage`]. + pub fn new(value: CommitteeConsensusMessage, sender: TYPES::SignatureKey) -> Self { + match value { + CommitteeConsensusMessage::DAProposal(p) => { + ProcessedCommitteeConsensusMessage::DAProposal(p, sender) + } + CommitteeConsensusMessage::DAVote(v) => { + ProcessedCommitteeConsensusMessage::DAVote(v, sender) + } + CommitteeConsensusMessage::DACertificate(cert) => { + ProcessedCommitteeConsensusMessage::DACertificate(cert, sender) + } + } + } +} + +/// A processed consensus message for sequencing consensus. +pub type ProcessedSequencingMessage = + Either, ProcessedCommitteeConsensusMessage>; + +impl< + TYPES: NodeType, + I: NodeImplementation>, + > From> for SequencingMessage +{ + fn from(value: ProcessedSequencingMessage) -> Self { + match value { + Left(message) => SequencingMessage(Left(message.into())), + Right(message) => SequencingMessage(Right(message.into())), + } + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation>, + > From> for ProcessedSequencingMessage +{ + fn from(value: ProcessedGeneralConsensusMessage) -> Self { + Left(value) + } +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[serde(bound(deserialize = "", serialize = ""))] +/// Messages related to both validating and sequencing consensus. +pub enum GeneralConsensusMessage> +where + I::Exchanges: ExchangesType>, +{ + /// Message with a quorum proposal. + Proposal(Proposal>), + + /// Message with a quorum vote. + Vote(QuorumVote), + + /// Message with a view sync vote. + ViewSyncVote(ViewSyncVote), + + /// Message with a view sync certificate. + ViewSyncCertificate(Proposal>), + + /// Internal ONLY message indicating a view interrupt. + #[serde(skip)] + InternalTrigger(InternalTrigger), +} + +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)] +#[serde(bound(deserialize = "", serialize = ""))] +/// Messages related to the sequencing consensus protocol for the DA committee. +pub enum CommitteeConsensusMessage { + /// Proposal for data availability committee + DAProposal(Proposal>), + + /// vote for data availability committee + DAVote(DAVote), + + /// Certificate data is available + DACertificate(DACertificate), +} + +/// Messages related to the consensus protocol. +pub trait ConsensusMessageType> { + /// The type of messages for both validating and sequencing consensus. + type GeneralConsensusMessage; + + /// The type of processed consensus messages. + type ProcessedConsensusMessage: Send; + + /// Get the view number when the message was sent or the view of the timeout. + fn view_number(&self) -> TYPES::Time; + + /// Get the message purpose. + fn purpose(&self) -> MessagePurpose; +} + +/// Messages related to the sequencing consensus protocol. +pub trait SequencingMessageType>: + ConsensusMessageType +{ + /// Messages for DA committee only. + type CommitteeConsensusMessage; +} + +/// Messages for sequencing consensus. +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(bound(deserialize = "", serialize = ""))] +pub struct SequencingMessage< + TYPES: NodeType, + I: NodeImplementation>, +>(pub Either, CommitteeConsensusMessage>); + +impl< + TYPES: NodeType, + I: NodeImplementation>, + > ConsensusMessageType for SequencingMessage +{ + type GeneralConsensusMessage = GeneralConsensusMessage; + type ProcessedConsensusMessage = ProcessedSequencingMessage; + + // TODO: Disable panic after the `ViewSync` case is implemented. + #[allow(clippy::panic)] + fn view_number(&self) -> TYPES::Time { + match &self.0 { + Left(general_message) => { + match general_message { + GeneralConsensusMessage::Proposal(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.get_view_number() + } + GeneralConsensusMessage::Vote(vote_message) => vote_message.current_view(), + GeneralConsensusMessage::InternalTrigger(trigger) => match trigger { + InternalTrigger::Timeout(time) => *time, + }, + GeneralConsensusMessage::ViewSyncVote(message) => message.round(), + GeneralConsensusMessage::ViewSyncCertificate(message) => { + message.data.get_view_number() + } + } + } + Right(committee_message) => { + match committee_message { + CommitteeConsensusMessage::DAProposal(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.get_view_number() + } + CommitteeConsensusMessage::DAVote(vote_message) => vote_message.current_view(), + CommitteeConsensusMessage::DACertificate(cert) => cert.view_number, + } + } + } + } + + // TODO: Disable panic after the `ViewSync` case is implemented. + #[allow(clippy::panic)] + fn purpose(&self) -> MessagePurpose { + match &self.0 { + Left(general_message) => match general_message { + GeneralConsensusMessage::Proposal(_) => MessagePurpose::Proposal, + GeneralConsensusMessage::Vote(_) => MessagePurpose::Vote, + GeneralConsensusMessage::InternalTrigger(_) => MessagePurpose::Internal, + GeneralConsensusMessage::ViewSyncVote(_) => MessagePurpose::ViewSyncVote, + GeneralConsensusMessage::ViewSyncCertificate(_) => MessagePurpose::ViewSyncProposal, + }, + Right(committee_message) => match committee_message { + CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, + CommitteeConsensusMessage::DAVote(_) => MessagePurpose::Vote, + CommitteeConsensusMessage::DACertificate(_) => MessagePurpose::DAC, + }, + } + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation>, + > SequencingMessageType for SequencingMessage +{ + type CommitteeConsensusMessage = CommitteeConsensusMessage; +} + +#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +/// Messages related to sending data between nodes +pub enum DataMessage { + /// Contains a transaction to be submitted + /// TODO rethink this when we start to send these messages + /// we only need the view number for broadcast + SubmitTransaction(TYPES::Transaction, TYPES::Time), +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +/// Prepare qc from the leader +pub struct Proposal { + // NOTE: optimization could include view number to help look up parent leaf + // could even do 16 bit numbers if we want + /// The data being proposed. + pub data: PROPOSAL, + /// The proposal must be signed by the view leader + pub signature: EncodedSignature, +} diff --git a/types/src/traits.rs b/types/src/traits.rs new file mode 100644 index 0000000000..c6a76acea3 --- /dev/null +++ b/types/src/traits.rs @@ -0,0 +1,15 @@ +//! Common traits for the `HotShot` protocol +pub mod block_contents; +pub mod consensus_api; +pub mod election; +pub mod metrics; +pub mod network; +pub mod node_implementation; +pub mod qc; +pub mod signature_key; +pub mod stake_table; +pub mod state; +pub mod storage; + +pub use block_contents::Block; +pub use state::State; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs new file mode 100644 index 0000000000..b7d9b4da44 --- /dev/null +++ b/types/src/traits/block_contents.rs @@ -0,0 +1,179 @@ +//! Abstraction over the contents of a block +//! +//! This module provides the [`Block`] trait, which describes the behaviors that a block is +//! expected to have. + +use commit::{Commitment, Committable}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + +use std::{ + collections::HashSet, + error::Error, + fmt::{Debug, Display}, + hash::Hash, +}; + +/// Abstraction over the full contents of a block +/// +/// This trait encapsulates the behaviors that a block must have in order to be used by consensus: +/// * Must have a predefined error type ([`Block::Error`]) +/// * Must have a transaction type that can be compared for equality, serialized and serialized, +/// sent between threads, and can have a hash produced of it +/// * Must be able to be produced incrementally by appending transactions +/// ([`add_transaction_raw`](Block::add_transaction_raw)) +/// * Must be hashable +pub trait Block: + Serialize + + Clone + + Debug + + Display + + Hash + + PartialEq + + Eq + + Send + + Sync + + Committable + + DeserializeOwned +{ + /// The error type for this type of block + type Error: Error + Debug + Send + Sync; + + /// The type of the transitions we are applying + type Transaction: Transaction; + + /// Construct an empty or genesis block. + fn new() -> Self; + + /// Attempts to add a transaction, returning an Error if it would result in a structurally + /// invalid block + /// + /// # Errors + /// + /// Should return an error if this transaction leads to an invalid block + fn add_transaction_raw(&self, tx: &Self::Transaction) + -> std::result::Result; + + /// returns hashes of all the transactions in this block + /// TODO make this ordered with a vec + fn contained_transactions(&self) -> HashSet>; +} + +/// Commitment to a block, used by data availibity +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] +#[serde(bound(deserialize = ""), transparent)] +pub struct BlockCommitment(pub Commitment); + +/// Abstraction over any type of transaction. Used by [`Block`]. +pub trait Transaction: + Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash +{ +} + +/// Dummy implementation of `BlockContents` for unit tests +pub mod dummy { + use std::fmt::Display; + + use super::{Block, Commitment, Committable, Debug, Hash, HashSet, Serialize}; + use rand::Rng; + use serde::Deserialize; + + pub use crate::traits::state::dummy::DummyState; + use crate::traits::state::TestableBlock; + + /// The dummy block + #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)] + pub struct DummyBlock { + /// Some dummy data + pub nonce: u64, + } + + impl DummyBlock { + /// Generate a random `DummyBlock` + pub fn random(rng: &mut dyn rand::RngCore) -> Self { + Self { nonce: rng.gen() } + } + } + + /// Dummy error + #[derive(Debug)] + pub struct DummyError; + + /// dummy transaction. No functionality + #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash)] + pub enum DummyTransaction { + /// the only variant. Dummy. + Dummy, + } + + impl Committable for DummyTransaction { + fn commit(&self) -> commit::Commitment { + commit::RawCommitmentBuilder::new("Dummy Block Comm") + .u64_field("Dummy Field", 0) + .finalize() + } + + fn tag() -> String { + "DUMMY_TXN".to_string() + } + } + impl super::Transaction for DummyTransaction {} + + impl std::error::Error for DummyError {} + + impl std::fmt::Display for DummyError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("A bad thing happened") + } + } + + impl Display for DummyBlock { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{self:#?}") + } + } + + impl Block for DummyBlock { + type Error = DummyError; + + type Transaction = DummyTransaction; + + fn new() -> Self { + ::genesis() + } + + fn add_transaction_raw( + &self, + _tx: &Self::Transaction, + ) -> std::result::Result { + Ok(Self { + nonce: self.nonce + 1, + }) + } + + fn contained_transactions(&self) -> HashSet> { + HashSet::new() + } + } + + impl TestableBlock for DummyBlock { + fn genesis() -> Self { + Self { nonce: 0 } + } + + fn txn_count(&self) -> u64 { + 1 + } + } + + impl Committable for DummyBlock { + fn commit(&self) -> commit::Commitment { + commit::RawCommitmentBuilder::new("Dummy Block Comm") + .u64_field("Nonce", self.nonce) + .finalize() + } + + fn tag() -> String { + "DUMMY_BLOCK".to_string() + } + } +} diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs new file mode 100644 index 0000000000..2687107886 --- /dev/null +++ b/types/src/traits/consensus_api.rs @@ -0,0 +1,171 @@ +//! Contains the [`SequencingConsensusApi`] and [`ValidatingConsensusApi`] traits. + +use crate::{ + certificate::QuorumCertificate, + data::{LeafType, ProposalType}, + error::HotShotError, + event::{Event, EventType}, + message::{DataMessage, SequencingMessage}, + traits::{ + network::NetworkError, + node_implementation::{NodeImplementation, NodeType}, + signature_key::SignatureKey, + storage::StorageError, + }, + vote::VoteType, +}; +use async_trait::async_trait; + +use std::{num::NonZeroUsize, sync::Arc, time::Duration}; + +/// The API that [`HotStuff`] needs to talk to the system, implemented for both validating and +/// sequencing consensus. +#[async_trait] +pub trait ConsensusSharedApi< + TYPES: NodeType, + LEAF: LeafType, + I: NodeImplementation, +>: Send + Sync +{ + /// Total number of nodes in the network. Also known as `n`. + fn total_nodes(&self) -> NonZeroUsize; + + /// The minimum amount of time a leader has to wait before sending a propose + fn propose_min_round_time(&self) -> Duration; + + /// The maximum amount of time a leader can wait before sending a propose. + /// If this time is reached, the leader has to send a propose without transactions. + fn propose_max_round_time(&self) -> Duration; + + /// Store a leaf in the storage + async fn store_leaf( + &self, + old_anchor_view: TYPES::Time, + leaf: LEAF, + ) -> Result<(), StorageError>; + + /// Retuns the maximum transactions allowed in a block + fn max_transactions(&self) -> NonZeroUsize; + + /// Returns the minimum transactions that must be in a block + fn min_transactions(&self) -> usize; + + /// Returns `true` if hotstuff should start the given round. A round can also be started manually by sending `NewView` to the leader. + /// + /// In production code this should probably always return `true`. + async fn should_start_round(&self, view_number: TYPES::Time) -> bool; + + /// Notify the system of an event within `hotshot-consensus`. + async fn send_event(&self, event: Event); + + /// Get a reference to the public key. + fn public_key(&self) -> &TYPES::SignatureKey; + + /// Get a reference to the private key. + fn private_key(&self) -> &::PrivateKey; + + // Utility functions + + /// notifies client of an error + async fn send_view_error(&self, view_number: TYPES::Time, error: Arc>) { + self.send_event(Event { + view_number, + event: EventType::Error { error }, + }) + .await; + } + + /// notifies client of a replica timeout + async fn send_replica_timeout(&self, view_number: TYPES::Time) { + self.send_event(Event { + view_number, + event: EventType::ReplicaViewTimeout { view_number }, + }) + .await; + } + + /// notifies client of a next leader timeout + async fn send_next_leader_timeout(&self, view_number: TYPES::Time) { + self.send_event(Event { + view_number, + event: EventType::NextLeaderViewTimeout { view_number }, + }) + .await; + } + + /// sends a decide event down the channel + async fn send_decide( + &self, + view_number: TYPES::Time, + leaf_views: Vec, + decide_qc: QuorumCertificate, + ) { + self.send_event(Event { + view_number, + event: EventType::Decide { + leaf_chain: Arc::new(leaf_views), + qc: Arc::new(decide_qc), + block_size: None, + }, + }) + .await; + } + + /// Sends a `ViewFinished` event + async fn send_view_finished(&self, view_number: TYPES::Time) { + self.send_event(Event { + view_number, + event: EventType::ViewFinished { view_number }, + }) + .await; + } +} + +/// The API that [`HotStuff`] needs to talk to the system, for sequencing consensus. +#[async_trait] +pub trait SequencingConsensusApi< + TYPES: NodeType, + LEAF: LeafType, + I: NodeImplementation>, +>: ConsensusSharedApi +{ + /// Send a direct message to the given recipient + async fn send_direct_message, VOTE: VoteType>( + &self, + recipient: TYPES::SignatureKey, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError>; + + /// send a direct message using the DA communication channel + async fn send_direct_da_message< + PROPOSAL: ProposalType, + VOTE: VoteType, + >( + &self, + recipient: TYPES::SignatureKey, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError>; + + /// Send a broadcast message to the entire network. + async fn send_broadcast_message< + PROPOSAL: ProposalType, + VOTE: VoteType, + >( + &self, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError>; + + /// Send a broadcast to the DA comitee, stub for now + async fn send_da_broadcast( + &self, + message: SequencingMessage, + ) -> std::result::Result<(), NetworkError>; + + /// Send a message with a transaction. + /// This function is deprecated in favor of `submit_transaction` in `handle.rs` + #[deprecated] + async fn send_transaction( + &self, + message: DataMessage, + ) -> std::result::Result<(), NetworkError>; +} diff --git a/types/src/traits/consensus_type.rs b/types/src/traits/consensus_type.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/types/src/traits/consensus_type/sequencing_consensus.rs b/types/src/traits/consensus_type/sequencing_consensus.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/types/src/traits/consensus_type/validating_consensus.rs b/types/src/traits/consensus_type/validating_consensus.rs new file mode 100644 index 0000000000..21b6630301 --- /dev/null +++ b/types/src/traits/consensus_type/validating_consensus.rs @@ -0,0 +1,16 @@ +//! The [`ValidatingConsensusType`] trait allows consensus-specific customization points. + +use crate::traits::consensus_type::ConsensusType; + +/// Marker trait for consensus which provides ordering and execution. +pub trait ValidatingConsensusType +where + Self: ConsensusType, +{ +} + +/// Consensus which provides ordering and execution. +#[derive(Clone, Debug)] +pub struct ValidatingConsensus; +impl ConsensusType for ValidatingConsensus {} +impl ValidatingConsensusType for ValidatingConsensus {} diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs new file mode 100644 index 0000000000..81a173e954 --- /dev/null +++ b/types/src/traits/election.rs @@ -0,0 +1,1357 @@ +//! The election trait, used to decide which node is the leader and determine if a vote is valid. + +// Needed to avoid the non-biding `let` warning. +#![allow(clippy::let_underscore_untyped)] + +use super::{ + node_implementation::{NodeImplementation, NodeType}, + signature_key::{EncodedPublicKey, EncodedSignature}, +}; +use crate::{ + certificate::{ + AssembledSignature, DACertificate, QuorumCertificate, ViewSyncCertificate, VoteMetaData, + }, + data::{DAProposal, ProposalType}, +}; + +use crate::{ + message::{CommitteeConsensusMessage, GeneralConsensusMessage, Message}, + vote::ViewSyncVoteInternal, +}; + +use crate::{ + data::LeafType, + traits::{ + network::{CommunicationChannel, NetworkMsg}, + node_implementation::ExchangesType, + signature_key::SignatureKey, + state::ConsensusTime, + }, + vote::{ + Accumulator, DAVote, QuorumVote, TimeoutVote, ViewSyncData, ViewSyncVote, VoteAccumulator, + VoteType, YesOrNoVote, + }, +}; +use bincode::Options; +use commit::{Commitment, Committable}; +use derivative::Derivative; +use either::Either; +use ethereum_types::U256; +use hotshot_utils::bincode::bincode_opts; +use serde::{Deserialize, Serialize}; +use snafu::Snafu; +use std::{collections::BTreeSet, fmt::Debug, hash::Hash, marker::PhantomData, num::NonZeroU64}; +use tracing::error; + +/// Error for election problems +#[derive(Snafu, Debug)] +pub enum ElectionError { + /// stub error to be filled in + StubError, + /// Math error doing something + /// NOTE: it would be better to make Election polymorphic over + /// the election error and then have specific math errors + MathError, +} + +/// For items that will always have the same validity outcome on a successful check, +/// allows for the case of "not yet possible to check" where the check might be +/// attempted again at a later point in time, but saves on repeated checking when +/// the outcome is already knowable. +/// +/// This would be a useful general utility. +pub enum Checked { + /// This item has been checked, and is valid + Valid(T), + /// This item has been checked, and is not valid + Inval(T), + /// This item has not been checked + Unchecked(T), +} + +/// Data to vote on for different types of votes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub enum VoteData { + /// Vote to provide availability for a block. + DA(Commitment), + /// Vote to append a leaf to the log. + Yes(Commitment), + /// Vote to reject a leaf from the log. + No(Commitment), + /// Vote to time out and proceed to the next view. + Timeout(Commitment), + /// Vote to pre-commit the view sync. + ViewSyncPreCommit(Commitment), + /// Vote to commit the view sync. + ViewSyncCommit(Commitment), + /// Vote to finalize the view sync. + ViewSyncFinalize(Commitment), +} + +/// Make different types of `VoteData` committable +impl Committable for VoteData { + fn commit(&self) -> Commitment { + match self { + VoteData::DA(block_commitment) => commit::RawCommitmentBuilder::new("DA Block Commit") + .field("block_commitment", *block_commitment) + .finalize(), + VoteData::Yes(leaf_commitment) => commit::RawCommitmentBuilder::new("Yes Vote Commit") + .field("leaf_commitment", *leaf_commitment) + .finalize(), + VoteData::No(leaf_commitment) => commit::RawCommitmentBuilder::new("No Vote Commit") + .field("leaf_commitment", *leaf_commitment) + .finalize(), + VoteData::Timeout(view_number_commitment) => { + commit::RawCommitmentBuilder::new("Timeout View Number Commit") + .field("view_number_commitment", *view_number_commitment) + .finalize() + } + VoteData::ViewSyncPreCommit(commitment) => { + commit::RawCommitmentBuilder::new("ViewSyncPreCommit") + .field("commitment", *commitment) + .finalize() + } + VoteData::ViewSyncCommit(commitment) => { + commit::RawCommitmentBuilder::new("ViewSyncCommit") + .field("commitment", *commitment) + .finalize() + } + VoteData::ViewSyncFinalize(commitment) => { + commit::RawCommitmentBuilder::new("ViewSyncFinalize") + .field("commitment", *commitment) + .finalize() + } + } + } + + fn tag() -> String { + ("VOTE_DATA_COMMIT").to_string() + } +} + +impl VoteData { + #[must_use] + /// Convert vote data into bytes. + /// + /// # Panics + /// Panics if the serialization fails. + pub fn as_bytes(&self) -> Vec { + bincode_opts().serialize(&self).unwrap() + } +} + +/// Proof of this entity's right to vote, and of the weight of those votes +pub trait VoteToken: + Clone + + Debug + + Send + + Sync + + serde::Serialize + + for<'de> serde::Deserialize<'de> + + PartialEq + + Hash + + Eq + + Committable +{ + // type StakeTable; + // type KeyPair: SignatureKey; + // type ConsensusTime: ConsensusTime; + + /// the count, which validation will confirm + fn vote_count(&self) -> NonZeroU64; +} + +/// election config +pub trait ElectionConfig: + Default + + Clone + + serde::Serialize + + for<'de> serde::Deserialize<'de> + + Sync + + Send + + core::fmt::Debug +{ +} + +/// A certificate of some property which has been signed by a quroum of nodes. +pub trait SignedCertificate +where + Self: Send + Sync + Clone + Serialize + for<'a> Deserialize<'a>, + COMMITTABLE: Committable + Serialize + Clone, + TOKEN: VoteToken, +{ + /// Build a QC from the threshold signature and commitment + fn from_signatures_and_commitment( + view_number: TIME, + signatures: AssembledSignature, + commit: Commitment, + relay: Option, + ) -> Self; + + /// Get the view number. + fn view_number(&self) -> TIME; + + /// Get signatures. + fn signatures(&self) -> AssembledSignature; + + // TODO (da) the following functions should be refactored into a QC-specific trait. + + /// Get the leaf commitment. + fn leaf_commitment(&self) -> Commitment; + + /// Set the leaf commitment. + fn set_leaf_commitment(&mut self, commitment: Commitment); + + /// Get whether the certificate is for the genesis block. + fn is_genesis(&self) -> bool; + + /// To be used only for generating the genesis quorum certificate; will fail if used anywhere else + fn genesis() -> Self; +} + +/// A protocol for determining membership in and participating in a ccommittee. +pub trait Membership: + Clone + Debug + Eq + PartialEq + Send + Sync + 'static +{ + /// generate a default election configuration + fn default_election_config(num_nodes: u64) -> TYPES::ElectionConfigType; + + /// create an election + /// TODO may want to move this to a testableelection trait + fn create_election( + entries: Vec<::StakeTableEntry>, + keys: Vec, + config: TYPES::ElectionConfigType, + ) -> Self; + + /// Clone the public key and corresponding stake table for current elected committee + fn get_committee_qc_stake_table( + &self, + ) -> Vec<::StakeTableEntry>; + + /// The leader of the committee for view `view_number`. + fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey; + + /// The members of the committee for view `view_number`. + fn get_committee(&self, view_number: TYPES::Time) -> BTreeSet; + + /// Attempts to generate a vote token for self + /// + /// Returns `None` if the number of seats would be zero + /// # Errors + /// TODO tbd + fn make_vote_token( + &self, + view_number: TYPES::Time, + priv_key: &::PrivateKey, + ) -> Result, ElectionError>; + + /// Checks the claims of a received vote token + /// + /// # Errors + /// TODO tbd + fn validate_vote_token( + &self, + pub_key: TYPES::SignatureKey, + token: Checked, + ) -> Result, ElectionError>; + + /// Returns the number of total nodes in the committee + fn total_nodes(&self) -> usize; + + /// Returns the threshold for a specific `Membership` implementation + fn success_threshold(&self) -> NonZeroU64; + + /// Returns the threshold for a specific `Membership` implementation + fn failure_threshold(&self) -> NonZeroU64; +} + +/// Protocol for exchanging proposals and votes to make decisions in a distributed network. +/// +/// An instance of [`ConsensusExchange`] represents the state of one participant in the protocol, +/// allowing them to vote and query information about the overall state of the protocol (such as +/// membership and leader status). +pub trait ConsensusExchange: Send + Sync { + /// A proposal for participants to vote on. + type Proposal: ProposalType; + /// A vote on a [`Proposal`](Self::Proposal). + type Vote: VoteType; + /// A [`SignedCertificate`] attesting to a decision taken by the committee. + type Certificate: SignedCertificate + + Hash + + Eq; + /// The committee eligible to make decisions. + type Membership: Membership; + /// Network used by [`Membership`](Self::Membership) to communicate. + type Networking: CommunicationChannel; + /// Commitments to items which are the subject of proposals and decisions. + type Commitment: Committable + Serialize + Clone; + + /// Join a [`ConsensusExchange`] with the given identity (`pk` and `sk`). + fn create( + entries: Vec<::StakeTableEntry>, + keys: Vec, + config: TYPES::ElectionConfigType, + network: Self::Networking, + pk: TYPES::SignatureKey, + entry: ::StakeTableEntry, + sk: ::PrivateKey, + ) -> Self; + + /// The network being used by this exchange. + fn network(&self) -> &Self::Networking; + + /// The leader of the [`Membership`](Self::Membership) at time `view_number`. + fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + self.membership().get_leader(view_number) + } + + /// Whether this participant is leader at time `view_number`. + fn is_leader(&self, view_number: TYPES::Time) -> bool { + &self.get_leader(view_number) == self.public_key() + } + + /// Threshold required to approve a [`Proposal`](Self::Proposal). + fn success_threshold(&self) -> NonZeroU64 { + self.membership().success_threshold() + } + + /// Threshold required to know a success threshold will not be reached + fn failure_threshold(&self) -> NonZeroU64 { + self.membership().failure_threshold() + } + + /// The total number of nodes in the committee. + fn total_nodes(&self) -> usize { + self.membership().total_nodes() + } + + /// Attempts to generate a vote token for participation at time `view_number`. + /// + /// # Errors + /// When unable to make a vote token because not part of the committee + fn make_vote_token( + &self, + view_number: TYPES::Time, + ) -> std::result::Result, ElectionError> { + self.membership() + .make_vote_token(view_number, self.private_key()) + } + + /// The contents of a vote on `commit`. + fn vote_data(&self, commit: Commitment) -> VoteData; + + /// Validate a QC. + fn is_valid_cert(&self, qc: &Self::Certificate, commit: Commitment) -> bool { + if qc.is_genesis() && qc.view_number() == TYPES::Time::genesis() { + return true; + } + let leaf_commitment = qc.leaf_commitment(); + + if leaf_commitment != commit { + error!("Leaf commitment does not equal parent commitment"); + return false; + } + + match qc.signatures() { + AssembledSignature::DA(qc) => { + let real_commit = VoteData::DA(leaf_commitment).commit(); + let real_qc_pp = ::get_public_parameter( + self.membership().get_committee_qc_stake_table(), + U256::from(self.membership().success_threshold().get()), + ); + ::check(&real_qc_pp, real_commit.as_ref(), &qc) + } + AssembledSignature::Yes(qc) => { + let real_commit = VoteData::Yes(leaf_commitment).commit(); + let real_qc_pp = ::get_public_parameter( + self.membership().get_committee_qc_stake_table(), + U256::from(self.membership().success_threshold().get()), + ); + ::check(&real_qc_pp, real_commit.as_ref(), &qc) + } + AssembledSignature::No(qc) => { + let real_commit = VoteData::No(leaf_commitment).commit(); + let real_qc_pp = ::get_public_parameter( + self.membership().get_committee_qc_stake_table(), + U256::from(self.membership().success_threshold().get()), + ); + ::check(&real_qc_pp, real_commit.as_ref(), &qc) + } + AssembledSignature::Genesis() => true, + AssembledSignature::ViewSyncPreCommit(_) + | AssembledSignature::ViewSyncCommit(_) + | AssembledSignature::ViewSyncFinalize(_) => { + error!("QC should not be ViewSync type here"); + false + } + } + } + + /// Validate a vote by checking its signature and token. + fn is_valid_vote( + &self, + encoded_key: &EncodedPublicKey, + encoded_signature: &EncodedSignature, + data: VoteData, + vote_token: Checked, + ) -> bool { + let mut is_valid_vote_token = false; + let mut is_valid_signature = false; + if let Some(key) = ::from_bytes(encoded_key) { + is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); + let valid_vote_token = self.membership().validate_vote_token(key, vote_token); + is_valid_vote_token = match valid_vote_token { + Err(_) => { + error!("Vote token was invalid"); + false + } + Ok(Checked::Valid(_)) => true, + Ok(Checked::Inval(_) | Checked::Unchecked(_)) => false, + }; + } + is_valid_signature && is_valid_vote_token + } + + #[doc(hidden)] + fn accumulate_internal( + &self, + vota_meta: VoteMetaData, + accumulator: VoteAccumulator, + ) -> Either, Self::Certificate> { + if !self.is_valid_vote( + &vota_meta.encoded_key, + &vota_meta.encoded_signature, + vota_meta.data.clone(), + // Ignoring deserialization errors below since we are getting rid of it soon + Checked::Unchecked(vota_meta.vote_token.clone()), + ) { + error!("Invalid vote!"); + return Either::Left(accumulator); + } + + if let Some(key) = ::from_bytes(&vota_meta.encoded_key) + { + let stake_table_entry = key.get_stake_table_entry(1u64); + let append_node_id = self + .membership() + .get_committee_qc_stake_table() + .iter() + .position(|x| *x == stake_table_entry.clone()) + .unwrap(); + + match accumulator.append(( + vota_meta.commitment, + ( + vota_meta.encoded_key.clone(), + ( + vota_meta.encoded_signature.clone(), + self.membership().get_committee_qc_stake_table(), + append_node_id, + vota_meta.data, + vota_meta.vote_token, + ), + ), + )) { + Either::Left(accumulator) => Either::Left(accumulator), + Either::Right(signatures) => { + Either::Right(Self::Certificate::from_signatures_and_commitment( + vota_meta.view_number, + signatures, + vota_meta.commitment, + vota_meta.relay, + )) + } + } + } else { + Either::Left(accumulator) + } + } + + /// Add a vote to the accumulating signature. Return The certificate if the vote + /// brings us over the threshould, Else return the accumulator. + #[allow(clippy::too_many_arguments)] + fn accumulate_vote( + &self, + encoded_key: &EncodedPublicKey, + encoded_signature: &EncodedSignature, + leaf_commitment: Commitment, + vote_data: VoteData, + vote_token: TYPES::VoteTokenType, + view_number: TYPES::Time, + accumlator: VoteAccumulator, + relay: Option, + ) -> Either, Self::Certificate>; + + /// The committee which votes on proposals. + fn membership(&self) -> &Self::Membership; + + /// This participant's public key. + fn public_key(&self) -> &TYPES::SignatureKey; + + /// This participant's private key. + fn private_key(&self) -> &::PrivateKey; +} + +/// A [`ConsensusExchange`] where participants vote to provide availability for blobs of data. +pub trait CommitteeExchangeType: + ConsensusExchange +{ + /// Sign a DA proposal. + fn sign_da_proposal(&self, block_commitment: &Commitment) + -> EncodedSignature; + + /// Sign a vote on DA proposal. + /// + /// The block commitment and the type of the vote (DA) are signed, which is the minimum amount + /// of information necessary for checking that this node voted on that block. + fn sign_da_vote( + &self, + block_commitment: Commitment, + ) -> (EncodedPublicKey, EncodedSignature); + + /// Create a message with a vote on DA proposal. + fn create_da_message( + &self, + block_commitment: Commitment, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> CommitteeConsensusMessage; +} + +/// Standard implementation of [`CommitteeExchangeType`] utilizing a DA committee. +#[derive(Derivative)] +#[derivative(Clone, Debug)] +pub struct CommitteeExchange< + TYPES: NodeType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, DAVote, MEMBERSHIP>, + M: NetworkMsg, +> { + /// The network being used by this exchange. + network: NETWORK, + /// The committee which votes on proposals. + membership: MEMBERSHIP, + /// This participant's public key. + public_key: TYPES::SignatureKey, + /// Entry with public key and staking value for certificate aggregation + entry: ::StakeTableEntry, + /// This participant's private key. + #[derivative(Debug = "ignore")] + private_key: ::PrivateKey, + #[doc(hidden)] + _pd: PhantomData<(TYPES, MEMBERSHIP, M)>, +} + +impl< + TYPES: NodeType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, DAVote, MEMBERSHIP>, + M: NetworkMsg, + > CommitteeExchangeType for CommitteeExchange +{ + /// Sign a DA proposal. + fn sign_da_proposal( + &self, + block_commitment: &Commitment, + ) -> EncodedSignature { + let signature = TYPES::SignatureKey::sign(&self.private_key, block_commitment.as_ref()); + signature + } + /// Sign a vote on DA proposal. + /// + /// The block commitment and the type of the vote (DA) are signed, which is the minimum amount + /// of information necessary for checking that this node voted on that block. + fn sign_da_vote( + &self, + block_commitment: Commitment, + ) -> (EncodedPublicKey, EncodedSignature) { + let signature = TYPES::SignatureKey::sign( + &self.private_key, + VoteData::::DA(block_commitment) + .commit() + .as_ref(), + ); + (self.public_key.to_bytes(), signature) + } + /// Create a message with a vote on DA proposal. + fn create_da_message( + &self, + block_commitment: Commitment, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> CommitteeConsensusMessage { + let signature = self.sign_da_vote(block_commitment); + CommitteeConsensusMessage::::DAVote(DAVote { + signature, + block_commitment, + current_view, + vote_token, + vote_data: VoteData::DA(block_commitment), + }) + } +} + +impl< + TYPES: NodeType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, DAVote, MEMBERSHIP>, + M: NetworkMsg, + > ConsensusExchange for CommitteeExchange +{ + type Proposal = DAProposal; + type Vote = DAVote; + type Certificate = DACertificate; + type Membership = MEMBERSHIP; + type Networking = NETWORK; + type Commitment = TYPES::BlockType; + + fn create( + entries: Vec<::StakeTableEntry>, + keys: Vec, + config: TYPES::ElectionConfigType, + network: Self::Networking, + pk: TYPES::SignatureKey, + entry: ::StakeTableEntry, + sk: ::PrivateKey, + ) -> Self { + let membership = >::Membership::create_election( + entries, keys, config, + ); + Self { + network, + membership, + public_key: pk, + entry, + private_key: sk, + _pd: PhantomData, + } + } + fn network(&self) -> &NETWORK { + &self.network + } + fn make_vote_token( + &self, + view_number: TYPES::Time, + ) -> std::result::Result, ElectionError> { + self.membership + .make_vote_token(view_number, &self.private_key) + } + + fn vote_data(&self, commit: Commitment) -> VoteData { + VoteData::DA(commit) + } + + /// Add a vote to the accumulating signature. Return The certificate if the vote + /// brings us over the threshould, Else return the accumulator. + fn accumulate_vote( + &self, + encoded_key: &EncodedPublicKey, + encoded_signature: &EncodedSignature, + leaf_commitment: Commitment, + vote_data: VoteData, + vote_token: TYPES::VoteTokenType, + view_number: TYPES::Time, + accumlator: VoteAccumulator, + _relay: Option, + ) -> Either, Self::Certificate> { + let meta = VoteMetaData { + encoded_key: encoded_key.clone(), + encoded_signature: encoded_signature.clone(), + commitment: leaf_commitment, + data: vote_data, + vote_token, + view_number, + relay: None, + }; + self.accumulate_internal(meta, accumlator) + } + fn membership(&self) -> &Self::Membership { + &self.membership + } + fn public_key(&self) -> &TYPES::SignatureKey { + &self.public_key + } + fn private_key(&self) -> &<::SignatureKey as SignatureKey>::PrivateKey { + &self.private_key + } +} + +/// A [`ConsensusExchange`] where participants vote to append items to a log. +pub trait QuorumExchangeType, M: NetworkMsg>: + ConsensusExchange +{ + /// Create a message with a positive vote on validating or commitment proposal. + fn create_yes_message>( + &self, + justify_qc_commitment: Commitment, + leaf_commitment: Commitment, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage + where + >::Certificate: commit::Committable, + I::Exchanges: ExchangesType>; + + /// Sign a validating or commitment proposal. + fn sign_validating_or_commitment_proposal>( + &self, + leaf_commitment: &Commitment, + ) -> EncodedSignature; + + /// Sign a positive vote on validating or commitment proposal. + /// + /// The leaf commitment and the type of the vote (yes) are signed, which is the minimum amount + /// of information necessary for any user of the subsequently constructed QC to check that this + /// node voted `Yes` on that leaf. The leaf is expected to be reconstructed based on other + /// information in the yes vote. + fn sign_yes_vote( + &self, + leaf_commitment: Commitment, + ) -> (EncodedPublicKey, EncodedSignature); + + /// Sign a neagtive vote on validating or commitment proposal. + /// + /// The leaf commitment and the type of the vote (no) are signed, which is the minimum amount + /// of information necessary for any user of the subsequently constructed QC to check that this + /// node voted `No` on that leaf. + fn sign_no_vote( + &self, + leaf_commitment: Commitment, + ) -> (EncodedPublicKey, EncodedSignature); + + /// Sign a timeout vote. + /// + /// We only sign the view number, which is the minimum amount of information necessary for + /// checking that this node timed out on that view. + /// + /// This also allows for the high QC included with the vote to be spoofed in a MITM scenario, + /// but it is outside our threat model. + fn sign_timeout_vote(&self, view_number: TYPES::Time) -> (EncodedPublicKey, EncodedSignature); + + /// Create a message with a negative vote on validating or commitment proposal. + fn create_no_message>( + &self, + justify_qc_commitment: Commitment>, + leaf_commitment: Commitment, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage + where + I::Exchanges: ExchangesType>; + + /// Create a message with a timeout vote on validating or commitment proposal. + fn create_timeout_message>( + &self, + justify_qc: QuorumCertificate, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage + where + I::Exchanges: ExchangesType>; +} + +/// Standard implementation of [`QuroumExchangeType`] based on Hot Stuff consensus. +#[derive(Derivative)] +#[derivative(Clone, Debug)] +pub struct QuorumExchange< + TYPES: NodeType, + LEAF: LeafType, + PROPOSAL: ProposalType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, MEMBERSHIP>, + M: NetworkMsg, +> { + /// The network being used by this exchange. + network: NETWORK, + /// The committee which votes on proposals. + membership: MEMBERSHIP, + /// This participant's public key. + public_key: TYPES::SignatureKey, + /// Entry with public key and staking value for certificate aggregation + entry: ::StakeTableEntry, + /// This participant's private key. + #[derivative(Debug = "ignore")] + private_key: ::PrivateKey, + #[doc(hidden)] + _pd: PhantomData<(LEAF, PROPOSAL, MEMBERSHIP, M)>, +} + +impl< + TYPES: NodeType, + LEAF: LeafType, + MEMBERSHIP: Membership, + PROPOSAL: ProposalType, + NETWORK: CommunicationChannel, MEMBERSHIP>, + M: NetworkMsg, + > QuorumExchangeType + for QuorumExchange +{ + /// Create a message with a positive vote on validating or commitment proposal. + fn create_yes_message>( + &self, + justify_qc_commitment: Commitment>, + leaf_commitment: Commitment, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage + where + I::Exchanges: ExchangesType>, + { + let signature = self.sign_yes_vote(leaf_commitment); + GeneralConsensusMessage::::Vote(QuorumVote::Yes(YesOrNoVote { + justify_qc_commitment, + signature, + leaf_commitment, + current_view, + vote_token, + vote_data: VoteData::Yes(leaf_commitment), + })) + } + /// Sign a validating or commitment proposal. + fn sign_validating_or_commitment_proposal>( + &self, + leaf_commitment: &Commitment, + ) -> EncodedSignature { + let signature = TYPES::SignatureKey::sign(&self.private_key, leaf_commitment.as_ref()); + signature + } + + /// Sign a positive vote on validating or commitment proposal. + /// + /// The leaf commitment and the type of the vote (yes) are signed, which is the minimum amount + /// of information necessary for any user of the subsequently constructed QC to check that this + /// node voted `Yes` on that leaf. The leaf is expected to be reconstructed based on other + /// information in the yes vote. + fn sign_yes_vote( + &self, + leaf_commitment: Commitment, + ) -> (EncodedPublicKey, EncodedSignature) { + let signature = TYPES::SignatureKey::sign( + &self.private_key, + VoteData::::Yes(leaf_commitment).commit().as_ref(), + ); + (self.public_key.to_bytes(), signature) + } + + /// Sign a neagtive vote on validating or commitment proposal. + /// + /// The leaf commitment and the type of the vote (no) are signed, which is the minimum amount + /// of information necessary for any user of the subsequently constructed QC to check that this + /// node voted `No` on that leaf. + fn sign_no_vote( + &self, + leaf_commitment: Commitment, + ) -> (EncodedPublicKey, EncodedSignature) { + let signature = TYPES::SignatureKey::sign( + &self.private_key, + VoteData::::No(leaf_commitment).commit().as_ref(), + ); + (self.public_key.to_bytes(), signature) + } + + /// Sign a timeout vote. + /// + /// We only sign the view number, which is the minimum amount of information necessary for + /// checking that this node timed out on that view. + /// + /// This also allows for the high QC included with the vote to be spoofed in a MITM scenario, + /// but it is outside our threat model. + fn sign_timeout_vote(&self, view_number: TYPES::Time) -> (EncodedPublicKey, EncodedSignature) { + let signature = TYPES::SignatureKey::sign( + &self.private_key, + VoteData::::Timeout(view_number.commit()) + .commit() + .as_ref(), + ); + (self.public_key.to_bytes(), signature) + } + /// Create a message with a negative vote on validating or commitment proposal. + fn create_no_message>( + &self, + justify_qc_commitment: Commitment>, + leaf_commitment: Commitment, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage + where + I::Exchanges: ExchangesType>, + { + let signature = self.sign_no_vote(leaf_commitment); + GeneralConsensusMessage::::Vote(QuorumVote::No(YesOrNoVote { + justify_qc_commitment, + signature, + leaf_commitment, + current_view, + vote_token, + vote_data: VoteData::No(leaf_commitment), + })) + } + + /// Create a message with a timeout vote on validating or commitment proposal. + fn create_timeout_message>( + &self, + high_qc: QuorumCertificate, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage + where + I::Exchanges: ExchangesType>, + { + let signature = self.sign_timeout_vote(current_view); + GeneralConsensusMessage::::Vote(QuorumVote::Timeout(TimeoutVote { + high_qc, + signature, + current_view, + vote_token, + vote_data: VoteData::Timeout(current_view.commit()), + })) + } +} + +impl< + TYPES: NodeType, + LEAF: LeafType, + PROPOSAL: ProposalType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, MEMBERSHIP>, + M: NetworkMsg, + > ConsensusExchange + for QuorumExchange +{ + type Proposal = PROPOSAL; + type Vote = QuorumVote; + type Certificate = QuorumCertificate; + type Membership = MEMBERSHIP; + type Networking = NETWORK; + type Commitment = LEAF; + + fn create( + entries: Vec<::StakeTableEntry>, + keys: Vec, + config: TYPES::ElectionConfigType, + network: Self::Networking, + pk: TYPES::SignatureKey, + entry: ::StakeTableEntry, + sk: ::PrivateKey, + ) -> Self { + let membership = >::Membership::create_election( + entries, keys, config, + ); + Self { + network, + membership, + public_key: pk, + entry, + private_key: sk, + _pd: PhantomData, + } + } + + fn network(&self) -> &NETWORK { + &self.network + } + + fn vote_data(&self, commit: Commitment) -> VoteData { + VoteData::Yes(commit) + } + + /// Add a vote to the accumulating signature. Return The certificate if the vote + /// brings us over the threshould, Else return the accumulator. + fn accumulate_vote( + &self, + encoded_key: &EncodedPublicKey, + encoded_signature: &EncodedSignature, + leaf_commitment: Commitment, + vote_data: VoteData, + vote_token: TYPES::VoteTokenType, + view_number: TYPES::Time, + accumlator: VoteAccumulator, + _relay: Option, + ) -> Either, Self::Certificate> { + let meta = VoteMetaData { + encoded_key: encoded_key.clone(), + encoded_signature: encoded_signature.clone(), + commitment: leaf_commitment, + data: vote_data, + vote_token, + view_number, + relay: None, + }; + self.accumulate_internal(meta, accumlator) + } + fn membership(&self) -> &Self::Membership { + &self.membership + } + fn public_key(&self) -> &TYPES::SignatureKey { + &self.public_key + } + fn private_key(&self) -> &<::SignatureKey as SignatureKey>::PrivateKey { + &self.private_key + } +} + +/// A [`ConsensusExchange`] where participants synchronize which view the network should be in. +pub trait ViewSyncExchangeType: + ConsensusExchange +{ + /// Creates a precommit vote + fn create_precommit_message>( + &self, + round: TYPES::Time, + relay: u64, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage; + + /// Signs a precommit vote + fn sign_precommit_message( + &self, + commitment: Commitment>, + ) -> (EncodedPublicKey, EncodedSignature); + + /// Creates a commit vote + fn create_commit_message>( + &self, + round: TYPES::Time, + relay: u64, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage; + + /// Signs a commit vote + fn sign_commit_message( + &self, + commitment: Commitment>, + ) -> (EncodedPublicKey, EncodedSignature); + + /// Creates a finalize vote + fn create_finalize_message>( + &self, + round: TYPES::Time, + relay: u64, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage; + + /// Sings a finalize vote + fn sign_finalize_message( + &self, + commitment: Commitment>, + ) -> (EncodedPublicKey, EncodedSignature); + + /// Validate a certificate. + fn is_valid_view_sync_cert(&self, certificate: Self::Certificate, round: TYPES::Time) -> bool; + + /// Sign a certificate. + fn sign_certificate_proposal(&self, certificate: Self::Certificate) -> EncodedSignature; +} + +/// Standard implementation of [`ViewSyncExchangeType`] based on Hot Stuff consensus. +#[derive(Derivative)] +#[derivative(Clone, Debug)] +pub struct ViewSyncExchange< + TYPES: NodeType, + PROPOSAL: ProposalType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, MEMBERSHIP>, + M: NetworkMsg, +> { + /// The network being used by this exchange. + network: NETWORK, + /// The committee which votes on proposals. + membership: MEMBERSHIP, + /// This participant's public key. + public_key: TYPES::SignatureKey, + /// Entry with public key and staking value for certificate aggregation in the stake table. + entry: ::StakeTableEntry, + /// This participant's private key. + #[derivative(Debug = "ignore")] + private_key: ::PrivateKey, + #[doc(hidden)] + _pd: PhantomData<(PROPOSAL, MEMBERSHIP, M)>, +} + +impl< + TYPES: NodeType, + MEMBERSHIP: Membership, + PROPOSAL: ProposalType, + NETWORK: CommunicationChannel, MEMBERSHIP>, + M: NetworkMsg, + > ViewSyncExchangeType for ViewSyncExchange +{ + fn create_precommit_message>( + &self, + round: TYPES::Time, + relay: u64, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage { + let relay_pub_key = self.get_leader(round + relay).to_bytes(); + + let vote_data_internal: ViewSyncData = ViewSyncData { + relay: relay_pub_key.clone(), + round, + }; + + let vote_data_internal_commitment = vote_data_internal.commit(); + + let signature = self.sign_precommit_message(vote_data_internal_commitment); + + GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::PreCommit( + ViewSyncVoteInternal { + relay_pub_key, + relay, + round, + signature, + vote_token, + vote_data: VoteData::ViewSyncPreCommit(vote_data_internal_commitment), + }, + )) + } + + fn sign_precommit_message( + &self, + commitment: Commitment>, + ) -> (EncodedPublicKey, EncodedSignature) { + let signature = TYPES::SignatureKey::sign( + &self.private_key, + VoteData::ViewSyncPreCommit(commitment).commit().as_ref(), + ); + + (self.public_key.to_bytes(), signature) + } + + fn create_commit_message>( + &self, + round: TYPES::Time, + relay: u64, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage { + let relay_pub_key = self.get_leader(round + relay).to_bytes(); + + let vote_data_internal: ViewSyncData = ViewSyncData { + relay: relay_pub_key.clone(), + round, + }; + + let vote_data_internal_commitment = vote_data_internal.commit(); + + let signature = self.sign_commit_message(vote_data_internal_commitment); + + GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::Commit( + ViewSyncVoteInternal { + relay_pub_key, + relay, + round, + signature, + vote_token, + vote_data: VoteData::ViewSyncCommit(vote_data_internal_commitment), + }, + )) + } + + fn sign_commit_message( + &self, + commitment: Commitment>, + ) -> (EncodedPublicKey, EncodedSignature) { + let signature = TYPES::SignatureKey::sign( + &self.private_key, + VoteData::ViewSyncCommit(commitment).commit().as_ref(), + ); + + (self.public_key.to_bytes(), signature) + } + + fn create_finalize_message>( + &self, + round: TYPES::Time, + relay: u64, + vote_token: TYPES::VoteTokenType, + ) -> GeneralConsensusMessage { + let relay_pub_key = self.get_leader(round + relay).to_bytes(); + + let vote_data_internal: ViewSyncData = ViewSyncData { + relay: relay_pub_key.clone(), + round, + }; + + let vote_data_internal_commitment = vote_data_internal.commit(); + + let signature = self.sign_finalize_message(vote_data_internal_commitment); + + GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::Finalize( + ViewSyncVoteInternal { + relay_pub_key, + relay, + round, + signature, + vote_token, + vote_data: VoteData::ViewSyncFinalize(vote_data_internal_commitment), + }, + )) + } + + fn sign_finalize_message( + &self, + commitment: Commitment>, + ) -> (EncodedPublicKey, EncodedSignature) { + let signature = TYPES::SignatureKey::sign( + &self.private_key, + VoteData::ViewSyncFinalize(commitment).commit().as_ref(), + ); + + (self.public_key.to_bytes(), signature) + } + + fn is_valid_view_sync_cert(&self, certificate: Self::Certificate, round: TYPES::Time) -> bool { + // Sishan NOTE TODO: would be better to test this, looks like this func is never called. + let (certificate_internal, _threshold, vote_data) = match certificate.clone() { + ViewSyncCertificate::PreCommit(certificate_internal) => { + let vote_data = ViewSyncData:: { + relay: self + .get_leader(round + certificate_internal.relay) + .to_bytes(), + round, + }; + (certificate_internal, self.failure_threshold(), vote_data) + } + ViewSyncCertificate::Commit(certificate_internal) + | ViewSyncCertificate::Finalize(certificate_internal) => { + let vote_data = ViewSyncData:: { + relay: self + .get_leader(round + certificate_internal.relay) + .to_bytes(), + round, + }; + (certificate_internal, self.success_threshold(), vote_data) + } + }; + match certificate_internal.signatures { + AssembledSignature::ViewSyncPreCommit(raw_signatures) => { + let real_commit = VoteData::ViewSyncPreCommit(vote_data.commit()).commit(); + let real_qc_pp = ::get_public_parameter( + self.membership().get_committee_qc_stake_table(), + U256::from(self.membership().failure_threshold().get()), + ); + ::check( + &real_qc_pp, + real_commit.as_ref(), + &raw_signatures, + ) + } + AssembledSignature::ViewSyncCommit(raw_signatures) => { + let real_commit = VoteData::ViewSyncCommit(vote_data.commit()).commit(); + let real_qc_pp = ::get_public_parameter( + self.membership().get_committee_qc_stake_table(), + U256::from(self.membership().success_threshold().get()), + ); + ::check( + &real_qc_pp, + real_commit.as_ref(), + &raw_signatures, + ) + } + AssembledSignature::ViewSyncFinalize(raw_signatures) => { + let real_commit = VoteData::ViewSyncFinalize(vote_data.commit()).commit(); + let real_qc_pp = ::get_public_parameter( + self.membership().get_committee_qc_stake_table(), + U256::from(self.membership().success_threshold().get()), + ); + ::check( + &real_qc_pp, + real_commit.as_ref(), + &raw_signatures, + ) + } + _ => true, + } + } + + fn sign_certificate_proposal(&self, certificate: Self::Certificate) -> EncodedSignature { + let signature = TYPES::SignatureKey::sign(&self.private_key, certificate.commit().as_ref()); + signature + } +} + +impl< + TYPES: NodeType, + PROPOSAL: ProposalType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, MEMBERSHIP>, + M: NetworkMsg, + > ConsensusExchange for ViewSyncExchange +{ + type Proposal = PROPOSAL; + type Vote = ViewSyncVote; + type Certificate = ViewSyncCertificate; + type Membership = MEMBERSHIP; + type Networking = NETWORK; + type Commitment = ViewSyncData; + + fn create( + entries: Vec<::StakeTableEntry>, + keys: Vec, + config: TYPES::ElectionConfigType, + network: Self::Networking, + pk: TYPES::SignatureKey, + entry: ::StakeTableEntry, + sk: ::PrivateKey, + ) -> Self { + let membership = >::Membership::create_election( + entries, keys, config, + ); + Self { + network, + membership, + public_key: pk, + entry, + private_key: sk, + _pd: PhantomData, + } + } + + fn network(&self) -> &NETWORK { + &self.network + } + + fn vote_data(&self, _commit: Commitment) -> VoteData { + unimplemented!() + } + + fn accumulate_vote( + &self, + encoded_key: &EncodedPublicKey, + encoded_signature: &EncodedSignature, + leaf_commitment: Commitment>, + vote_data: VoteData, + vote_token: TYPES::VoteTokenType, + view_number: TYPES::Time, + accumlator: VoteAccumulator>, + relay: Option, + ) -> Either>, Self::Certificate> { + let meta = VoteMetaData { + encoded_key: encoded_key.clone(), + encoded_signature: encoded_signature.clone(), + commitment: leaf_commitment, + data: vote_data, + vote_token, + view_number, + relay, + }; + self.accumulate_internal(meta, accumlator) + } + + fn membership(&self) -> &Self::Membership { + &self.membership + } + fn public_key(&self) -> &TYPES::SignatureKey { + &self.public_key + } + fn private_key(&self) -> &<::SignatureKey as SignatureKey>::PrivateKey { + &self.private_key + } +} + +/// Testable implementation of a [`Membership`]. Will expose a method to generate a vote token used for testing. +pub trait TestableElection: Membership { + /// Generate a vote token used for testing. + fn generate_test_vote_token() -> TYPES::VoteTokenType; +} diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs new file mode 100644 index 0000000000..0253e129d5 --- /dev/null +++ b/types/src/traits/metrics.rs @@ -0,0 +1,286 @@ +//! The [`Metrics`] trait is used to collect information from multiple components in the entire system. +//! +//! This trait can be used to spawn the following traits: +//! - [`Counter`]: an ever-increasing value (example usage: total bytes send/received) +//! - [`Gauge`]: a value that store the latest value, and can go up and down (example usage: amount of users logged in) +//! - [`Histogram`]: stores multiple float values based for a graph (example usage: CPU %) +//! - [`Label`]: Stores the last string (example usage: current version, network online/offline) + +/// The metrics type. +pub trait Metrics: Send + Sync { + /// Create a [`Counter`] with an optional `unit_label`. + /// + /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" + fn create_counter(&self, label: String, unit_label: Option) -> Box; + /// Create a [`Gauge`] with an optional `unit_label`. + /// + /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" + fn create_gauge(&self, label: String, unit_label: Option) -> Box; + /// Create a [`Histogram`] with an optional `unit_label`. + /// + /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" + fn create_histogram(&self, label: String, unit_label: Option) -> Box; + /// Create a [`Label`]. + fn create_label(&self, label: String) -> Box; + + /// Create a subgroup with a specified prefix. + fn subgroup(&self, subgroup_name: String) -> Box; +} + +/// Use this if you're not planning to use any metrics. All methods are implemented as a no-op +#[derive(Clone, Copy, Debug, Default)] +pub struct NoMetrics; + +impl NoMetrics { + /// Create a new `Box` with this [`NoMetrics`] + #[must_use] + pub fn boxed() -> Box { + Box::::default() + } +} + +impl Metrics for NoMetrics { + fn create_counter(&self, _: String, _: Option) -> Box { + Box::new(NoMetrics) + } + + fn create_gauge(&self, _: String, _: Option) -> Box { + Box::new(NoMetrics) + } + + fn create_histogram(&self, _: String, _: Option) -> Box { + Box::new(NoMetrics) + } + + fn create_label(&self, _: String) -> Box { + Box::new(NoMetrics) + } + + fn subgroup(&self, _: String) -> Box { + Box::new(NoMetrics) + } +} + +impl Counter for NoMetrics { + fn add(&self, _: usize) {} +} +impl Gauge for NoMetrics { + fn set(&self, _: usize) {} + fn update(&self, _: i64) {} +} +impl Histogram for NoMetrics { + fn add_point(&self, _: f64) {} +} +impl Label for NoMetrics { + fn set(&self, _: String) {} +} + +/// An ever-incrementing counter +pub trait Counter: Send + Sync { + /// Add a value to the counter + fn add(&self, amount: usize); +} +/// A gauge that stores the latest value. +pub trait Gauge: Send + Sync { + /// Set the gauge value + fn set(&self, amount: usize); + + /// Update the guage value + fn update(&self, delts: i64); +} + +/// A histogram which will record a series of points. +pub trait Histogram: Send + Sync { + /// Add a point to this histogram. + fn add_point(&self, point: f64); +} + +/// A label that stores the last string value. +pub trait Label: Send + Sync { + /// Set the label value + fn set(&self, value: String); +} + +#[cfg(test)] +mod test { + use super::*; + use std::{ + collections::HashMap, + sync::{Arc, Mutex}, + }; + + struct TestMetrics { + prefix: String, + values: Arc>, + } + + impl TestMetrics { + fn sub(&self, name: String) -> Self { + let prefix = if self.prefix.is_empty() { + name + } else { + format!("{}-{name}", self.prefix) + }; + Self { + prefix, + values: Arc::clone(&self.values), + } + } + } + + impl Metrics for TestMetrics { + fn create_counter( + &self, + label: String, + _unit_label: Option, + ) -> Box { + Box::new(self.sub(label)) + } + + fn create_gauge( + &self, + label: String, + _unit_label: Option, + ) -> Box { + Box::new(self.sub(label)) + } + + fn create_histogram( + &self, + label: String, + _unit_label: Option, + ) -> Box { + Box::new(self.sub(label)) + } + + fn create_label(&self, label: String) -> Box { + Box::new(self.sub(label)) + } + + fn subgroup(&self, subgroup_name: String) -> Box { + Box::new(self.sub(subgroup_name)) + } + } + + impl Counter for TestMetrics { + fn add(&self, amount: usize) { + *self + .values + .lock() + .unwrap() + .counters + .entry(self.prefix.clone()) + .or_default() += amount; + } + } + + impl Gauge for TestMetrics { + fn set(&self, amount: usize) { + *self + .values + .lock() + .unwrap() + .gauges + .entry(self.prefix.clone()) + .or_default() = amount; + } + fn update(&self, delta: i64) { + let mut values = self.values.lock().unwrap(); + let value = values.gauges.entry(self.prefix.clone()).or_default(); + let signed_value = i64::try_from(*value).unwrap_or(i64::MAX); + *value = usize::try_from(signed_value + delta).unwrap_or(0); + } + } + + impl Histogram for TestMetrics { + fn add_point(&self, point: f64) { + self.values + .lock() + .unwrap() + .histograms + .entry(self.prefix.clone()) + .or_default() + .push(point); + } + } + + impl Label for TestMetrics { + fn set(&self, value: String) { + *self + .values + .lock() + .unwrap() + .labels + .entry(self.prefix.clone()) + .or_default() = value; + } + } + + #[derive(Default, Debug)] + struct Inner { + counters: HashMap, + gauges: HashMap, + histograms: HashMap>, + labels: HashMap, + } + + #[test] + fn test() { + let values = Arc::default(); + // This is all scoped so all the arcs should go out of scope + { + let metrics: Box = Box::new(TestMetrics { + prefix: String::new(), + values: Arc::clone(&values), + }); + + let gauge = metrics.create_gauge("foo".to_string(), None); + let counter = metrics.create_counter("bar".to_string(), None); + let histogram = metrics.create_histogram("baz".to_string(), None); + + gauge.set(5); + gauge.update(-2); + + for i in 0..5 { + counter.add(i); + } + + for i in 0..10 { + histogram.add_point(f64::from(i)); + } + + let sub = metrics.subgroup("child".to_string()); + + let sub_gauge = sub.create_gauge("foo".to_string(), None); + let sub_counter = sub.create_counter("bar".to_string(), None); + let sub_histogram = sub.create_histogram("baz".to_string(), None); + + sub_gauge.set(10); + + for i in 0..5 { + sub_counter.add(i * 2); + } + + for i in 0..10 { + sub_histogram.add_point(f64::from(i) * 2.0); + } + } + + // The above variables are scoped so they should be dropped at this point + // One of the rare times we can use `Arc::try_unwrap`! + let values = Arc::try_unwrap(values).unwrap().into_inner().unwrap(); + assert_eq!(values.gauges["foo"], 3); + assert_eq!(values.counters["bar"], 10); // 0..5 + assert_eq!( + values.histograms["baz"], + vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] + ); + + assert_eq!(values.gauges["child-foo"], 10); + assert_eq!(values.counters["child-bar"], 20); // 0..5 *2 + assert_eq!( + values.histograms["child-baz"], + vec![0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0] + ); + } +} diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs new file mode 100644 index 0000000000..ee2d09672d --- /dev/null +++ b/types/src/traits/network.rs @@ -0,0 +1,370 @@ +//! Network access compatibility +//! +//! Contains types and traits used by `HotShot` to abstract over network access + +#[cfg(async_executor_impl = "async-std")] +use async_std::future::TimeoutError; +use hotshot_task::BoxSyncFuture; +use libp2p_networking::network::NetworkNodeHandleError; +#[cfg(async_executor_impl = "tokio")] +use tokio::time::error::Elapsed as TimeoutError; +#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] +compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} +use super::{election::Membership, node_implementation::NodeType, signature_key::SignatureKey}; +use crate::{data::ProposalType, message::MessagePurpose, vote::VoteType}; +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use snafu::Snafu; +use std::{collections::BTreeSet, fmt::Debug, sync::Arc, time::Duration}; + +impl From for NetworkError { + fn from(error: NetworkNodeHandleError) -> Self { + match error { + NetworkNodeHandleError::SerializationError { source } => { + NetworkError::FailedToSerialize { source } + } + NetworkNodeHandleError::DeserializationError { source } => { + NetworkError::FailedToDeserialize { source } + } + NetworkNodeHandleError::TimeoutError { source } => NetworkError::Timeout { source }, + NetworkNodeHandleError::Killed => NetworkError::ShutDown, + source => NetworkError::Libp2p { source }, + } + } +} + +/// for any errors we decide to add to memory network +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum MemoryNetworkError { + /// stub + Stub, +} + +/// Centralized server specific errors +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum CentralizedServerNetworkError { + /// The centralized server could not find a specific message. + NoMessagesInQueue, +} + +/// Web server specific errors +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum WebServerNetworkError { + /// The injected consensus data is incorrect + IncorrectConsensusData, + /// The client returned an error + ClientError, + /// Endpoint parsed incorrectly + EndpointError, + /// Client disconnected + ClientDisconnected, +} + +/// the type of transmission +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub enum TransmitType { + /// directly transmit + Direct, + /// broadcast the message to all + Broadcast, +} + +/// Error type for networking +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum NetworkError { + /// Libp2p specific errors + Libp2p { + /// source of error + source: NetworkNodeHandleError, + }, + /// memory network specific errors + MemoryNetwork { + /// source of error + source: MemoryNetworkError, + }, + /// Centralized server specific errors + CentralizedServer { + /// source of error + source: CentralizedServerNetworkError, + }, + + /// Web server specific errors + WebServer { + /// source of error + source: WebServerNetworkError, + }, + /// unimplemented functionality + UnimplementedFeature, + /// Could not deliver a message to a specified recipient + CouldNotDeliver, + /// Attempted to deliver a message to an unknown node + NoSuchNode, + /// Failed to serialize a network message + FailedToSerialize { + /// Originating bincode error + source: bincode::Error, + }, + /// Failed to deserealize a network message + FailedToDeserialize { + /// originating bincode error + source: bincode::Error, + }, + /// A timeout occurred + Timeout { + /// Source of error + source: TimeoutError, + }, + /// Error sending output to consumer of NetworkingImplementation + /// TODO this should have more information + ChannelSend, + /// The underlying connection has been shut down + ShutDown, + /// unable to cancel a request, the request has already been cancelled + UnableToCancel, +} + +#[derive(Clone, Debug)] +// Storing view number as a u64 to avoid the need TYPES generic +/// Events to poll or cancel consensus processes. +pub enum ConsensusIntentEvent { + /// Poll for votes for a particular view + PollForVotes(u64), + /// Poll for a proposal for a particular view + PollForProposal(u64), + /// Poll for a DAC for a particular view + PollForDAC(u64), + /// Poll for view sync votes starting at a particular view + PollForViewSyncVotes(u64), + /// Poll for view sync proposals (certificates) for a particular view + PollForViewSyncCertificate(u64), + /// Poll for new transactions + PollForTransactions(u64), + /// Cancel polling for votes + CancelPollForVotes(u64), + /// Cancel polling for view sync votes. + CancelPollForViewSyncVotes(u64), + /// Cancel polling for proposals. + CancelPollForProposal(u64), + /// Cancal polling for DAC. + CancelPollForDAC(u64), + /// Cancel polling for view sync certificate. + CancelPollForViewSyncCertificate(u64), + /// Cancel polling for transactions + CancelPollForTransactions(u64), +} + +impl ConsensusIntentEvent { + /// Get the view number of the event. + #[must_use] + pub fn view_number(&self) -> u64 { + match &self { + ConsensusIntentEvent::PollForVotes(view_number) + | ConsensusIntentEvent::PollForProposal(view_number) + | ConsensusIntentEvent::PollForDAC(view_number) + | ConsensusIntentEvent::PollForViewSyncVotes(view_number) + | ConsensusIntentEvent::CancelPollForViewSyncVotes(view_number) + | ConsensusIntentEvent::CancelPollForVotes(view_number) + | ConsensusIntentEvent::CancelPollForProposal(view_number) + | ConsensusIntentEvent::CancelPollForDAC(view_number) + | ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) + | ConsensusIntentEvent::PollForViewSyncCertificate(view_number) + | ConsensusIntentEvent::PollForTransactions(view_number) + | ConsensusIntentEvent::CancelPollForTransactions(view_number) => *view_number, + } + } +} + +/// common traits we would like our network messages to implement +pub trait NetworkMsg: + Serialize + for<'a> Deserialize<'a> + Clone + Sync + Send + Debug + 'static +{ +} + +/// a message +pub trait ViewMessage { + /// get the view out of the message + fn get_view_number(&self) -> TYPES::Time; + // TODO move out of this trait. + /// get the purpose of the message + fn purpose(&self) -> MessagePurpose; +} + +/// API for interacting directly with a consensus committee +/// intended to be implemented for both DA and for validating consensus committees +#[async_trait] +pub trait CommunicationChannel< + TYPES: NodeType, + M: NetworkMsg, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, +>: Clone + Debug + Send + Sync + 'static +{ + /// Underlying Network implementation's type + type NETWORK; + /// Blocks until node is successfully initialized + /// into the network + async fn wait_for_ready(&self); + + /// checks if the network is ready + /// nonblocking + async fn is_ready(&self) -> bool; + + /// Shut down this network. Afterwards this network should no longer be used. + /// + /// This should also cause other functions to immediately return with a [`NetworkError`] + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b; + + /// broadcast message to those listening on the communication channel + /// blocking + async fn broadcast_message( + &self, + message: M, + election: &MEMBERSHIP, + ) -> Result<(), NetworkError>; + + /// Sends a direct message to a specific node + /// blocking + async fn direct_message( + &self, + message: M, + recipient: TYPES::SignatureKey, + ) -> Result<(), NetworkError>; + + /// Moves out the entire queue of received messages of 'transmit_type` + /// + /// Will unwrap the underlying `NetworkMessage` + /// blocking + fn recv_msgs<'a, 'b>( + &'a self, + transmit_type: TransmitType, + ) -> BoxSyncFuture<'b, Result, NetworkError>> + where + 'a: 'b, + Self: 'b; + + /// look up a node + /// blocking + async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError>; + + /// Injects consensus data such as view number into the networking implementation + /// blocking + async fn inject_consensus_info(&self, event: ConsensusIntentEvent); +} + +/// represents a networking implmentration +/// exposes low level API for interacting with a network +/// intended to be implemented for libp2p, the centralized server, +/// and memory network +#[async_trait] +pub trait ConnectedNetwork: + Clone + Send + Sync + 'static +{ + /// Blocks until the network is successfully initialized + async fn wait_for_ready(&self); + + /// checks if the network is ready + /// nonblocking + async fn is_ready(&self) -> bool; + + /// Blocks until the network is shut down + /// then returns true + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b; + + /// broadcast message to some subset of nodes + /// blocking + async fn broadcast_message( + &self, + message: M, + recipients: BTreeSet, + ) -> Result<(), NetworkError>; + + /// Sends a direct message to a specific node + /// blocking + async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError>; + + /// Moves out the entire queue of received messages of 'transmit_type` + /// + /// Will unwrap the underlying `NetworkMessage` + /// blocking + fn recv_msgs<'a, 'b>( + &'a self, + transmit_type: TransmitType, + ) -> BoxSyncFuture<'b, Result, NetworkError>> + where + 'a: 'b, + Self: 'b; + + /// look up a node + /// blocking + async fn lookup_node(&self, pk: K) -> Result<(), NetworkError>; + + /// Injects consensus data such as view number into the networking implementation + /// blocking + /// Ideally we would pass in the `Time` type, but that requires making the entire trait generic over NodeType + async fn inject_consensus_info(&self, event: ConsensusIntentEvent); +} + +/// Describes additional functionality needed by the test network implementation +pub trait TestableNetworkingImplementation { + /// generates a network given an expected node count + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + is_da: bool, + ) -> Box Self + 'static>; + + /// Get the number of messages in-flight. + /// + /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. + fn in_flight_message_count(&self) -> Option; +} +/// Describes additional functionality needed by the test communication channel +pub trait TestableChannelImplementation< + TYPES: NodeType, + M: NetworkMsg, + PROPOSAL: ProposalType, + VOTE: VoteType, + MEMBERSHIP: Membership, + NETWORK, +>: CommunicationChannel +{ + /// generates the `CommunicationChannel` given it's associated network type + fn generate_network() -> Box) -> Self + 'static>; +} + +/// Changes that can occur in the network +#[derive(Debug)] +pub enum NetworkChange { + /// A node is connected + NodeConnected(P), + + /// A node is disconnected + NodeDisconnected(P), +} + +/// interface describing how reliable the network is +pub trait NetworkReliability: Debug + Sync + std::marker::Send { + /// Sample from bernoulli distribution to decide whether + /// or not to keep a packet + /// # Panics + /// + /// Panics if `self.keep_numerator > self.keep_denominator` + /// + fn sample_keep(&self) -> bool; + /// sample from uniform distribution to decide whether + /// or not to keep a packet + fn sample_delay(&self) -> Duration; +} diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs new file mode 100644 index 0000000000..8ba5abdebb --- /dev/null +++ b/types/src/traits/node_implementation.rs @@ -0,0 +1,582 @@ +//! Composite trait for node behavior +//! +//! This module defines the [`NodeImplementation`] trait, which is a composite trait used for +//! describing the overall behavior of a node, as a composition of implementations of the node trait. + +use super::{ + block_contents::Transaction, + election::{ + CommitteeExchangeType, ConsensusExchange, ElectionConfig, QuorumExchangeType, + ViewSyncExchangeType, VoteToken, + }, + network::{CommunicationChannel, NetworkMsg, TestableNetworkingImplementation}, + state::{ConsensusTime, TestableBlock, TestableState}, + storage::{StorageError, StorageState, TestableStorage}, + State, +}; +use crate::{ + data::{LeafType, SequencingLeaf, TestableLeaf}, + message::{ConsensusMessageType, Message, SequencingMessage}, + traits::{ + election::Membership, network::TestableChannelImplementation, signature_key::SignatureKey, + storage::Storage, Block, + }, +}; +use async_compatibility_layer::channel::{unbounded, UnboundedReceiver, UnboundedSender}; +use async_lock::{Mutex, RwLock}; +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use std::{ + collections::BTreeMap, + fmt::Debug, + hash::Hash, + marker::PhantomData, + sync::{atomic::AtomicBool, Arc}, +}; +/// Alias for the [`ProcessedConsensusMessage`] type of a [`NodeImplementation`]. +type ProcessedConsensusMessageType = <>::ConsensusMessage as ConsensusMessageType>::ProcessedConsensusMessage; + +/// struct containing messages for a view to send to a replica or DA committee member. +#[derive(Clone)] +pub struct ViewQueue> { + /// to send networking events to a replica or DA committee member. + pub sender_chan: UnboundedSender>, + + /// to recv networking events for a replica or DA committee member. + pub receiver_chan: Arc>>>, + + /// `true` if this queue has already received a proposal + pub has_received_proposal: Arc, +} + +impl> Default for ViewQueue { + /// create new view queue + fn default() -> Self { + let (s, r) = unbounded(); + ViewQueue { + sender_chan: s, + receiver_chan: Arc::new(Mutex::new(r)), + has_received_proposal: Arc::new(AtomicBool::new(false)), + } + } +} + +/// metadata for sending information to the leader, replica, or DA committee member. +pub struct SendToTasks> { + /// the current view number + /// this should always be in sync with `Consensus` + pub cur_view: TYPES::Time, + + /// a map from view number to ViewQueue + /// one of (replica|next leader)'s' task for view i will be listening on the channel in here + pub channel_map: BTreeMap>, +} + +impl> SendToTasks { + /// create new sendtosasks + #[must_use] + pub fn new(view_num: TYPES::Time) -> Self { + SendToTasks { + cur_view: view_num, + channel_map: BTreeMap::default(), + } + } +} + +/// Channels for sending/recv-ing proposals and votes. +#[derive(Clone)] +pub struct ChannelMaps> { + /// Channel for the next consensus leader or DA leader. + pub proposal_channel: Arc>>, + + /// Channel for the replica or DA committee member. + pub vote_channel: Arc>>, +} + +impl> ChannelMaps { + /// Create channels starting from a given view. + pub fn new(start_view: TYPES::Time) -> Self { + Self { + proposal_channel: Arc::new(RwLock::new(SendToTasks::new(start_view))), + vote_channel: Arc::new(RwLock::new(SendToTasks::new(start_view))), + } + } +} + +/// Node implementation aggregate trait +/// +/// This trait exists to collect multiple behavior implementations into one type, to allow +/// `HotShot` to avoid annoying numbers of type arguments and type patching. +/// +/// It is recommended you implement this trait on a zero sized type, as `HotShot`does not actually +/// store or keep a reference to any value implementing this trait. + +pub trait NodeImplementation: + Send + Sync + Debug + Clone + Eq + Hash + 'static + Serialize + for<'de> Deserialize<'de> +{ + /// Leaf type for this consensus implementation + type Leaf: LeafType; + + /// Storage type for this consensus implementation + type Storage: Storage + Clone; + + /// Consensus message type. + type ConsensusMessage: ConsensusMessageType + + Clone + + Debug + + Send + + Sync + + 'static + + for<'a> Deserialize<'a> + + Serialize; + + /// Consensus type selected exchanges. + /// + /// Implements either `ValidatingExchangesType` or `SequencingExchangesType`. + type Exchanges: ExchangesType>; + + /// Create channels for sending/recv-ing proposals and votes for quorum and committee + /// exchanges, the latter of which is only applicable for sequencing consensus. + fn new_channel_maps( + start_view: TYPES::Time, + ) -> (ChannelMaps, Option>); +} + +/// Contains the protocols for exchanging proposals and votes. +#[allow(clippy::type_complexity)] +#[async_trait] +pub trait ExchangesType, MESSAGE: NetworkMsg>: + Send + Sync +{ + /// Protocol for exchanging data availability proposals and votes. + type CommitteeExchange: CommitteeExchangeType + Clone + Debug; + + /// Get the committee exchange. + fn committee_exchange(&self) -> &Self::CommitteeExchange; + + /// Protocol for exchanging quorum proposals and votes. + type QuorumExchange: QuorumExchangeType + Clone + Debug; + + /// Protocol for exchanging view sync proposals and votes. + type ViewSyncExchange: ViewSyncExchangeType + Clone + Debug; + + /// Election configurations for exchanges + type ElectionConfigs; + + /// Create all exchanges. + fn create( + entries: Vec<::StakeTableEntry>, + keys: Vec, + configs: Self::ElectionConfigs, + networks: ( + >::Networking, + >::Networking, + >::Networking, + ), + pk: TYPES::SignatureKey, + entry: ::StakeTableEntry, + sk: ::PrivateKey, + ) -> Self; + + /// Get the quorum exchange. + fn quorum_exchange(&self) -> &Self::QuorumExchange; + + /// Get the view sync exchange. + fn view_sync_exchange(&self) -> &Self::ViewSyncExchange; + + /// Block the underlying networking interfaces until node is successfully initialized into the + /// networks. + async fn wait_for_networks_ready(&self); + + /// Shut down the the underlying networking interfaces. + async fn shut_down_networks(&self); +} + +/// an exchange that is testable +pub trait TestableExchange, MESSAGE: NetworkMsg>: + ExchangesType +{ + /// generate communication channels + #[allow(clippy::type_complexity)] + fn gen_comm_channels( + expected_node_count: usize, + num_bootstrap: usize, + da_committee_size: usize, + ) -> Box< + dyn Fn( + u64, + ) -> ( + >::Networking, + >::Networking, + >::Networking, + ) + 'static, + >; +} + +/// Implementes [`SequencingExchangesType`]. +#[derive(Clone, Debug)] +pub struct SequencingExchanges< + TYPES: NodeType, + MESSAGE: NetworkMsg, + QUORUMEXCHANGE: QuorumExchangeType, MESSAGE>, + COMMITTEEEXCHANGE: CommitteeExchangeType, + VIEWSYNCEXCHANGE: ViewSyncExchangeType, +> { + /// Quorum exchange. + quorum_exchange: QUORUMEXCHANGE, + + /// View sync exchange. + view_sync_exchange: VIEWSYNCEXCHANGE, + + /// Committee exchange. + committee_exchange: COMMITTEEEXCHANGE, + + /// Phantom data. + _phantom: PhantomData<(TYPES, MESSAGE)>, +} + +#[async_trait] +impl + ExchangesType, MESSAGE> + for SequencingExchanges +where + TYPES: NodeType, + MESSAGE: NetworkMsg, + QUORUMEXCHANGE: QuorumExchangeType, MESSAGE> + Clone + Debug, + COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, + VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, +{ + type CommitteeExchange = COMMITTEEEXCHANGE; + type QuorumExchange = QUORUMEXCHANGE; + type ViewSyncExchange = VIEWSYNCEXCHANGE; + type ElectionConfigs = (TYPES::ElectionConfigType, TYPES::ElectionConfigType); + + fn committee_exchange(&self) -> &COMMITTEEEXCHANGE { + &self.committee_exchange + } + + fn create( + entries: Vec<::StakeTableEntry>, + keys: Vec, + configs: Self::ElectionConfigs, + networks: ( + >::Networking, + >::Networking, + >::Networking, + ), + pk: TYPES::SignatureKey, + entry: ::StakeTableEntry, + sk: ::PrivateKey, + ) -> Self { + let quorum_exchange = QUORUMEXCHANGE::create( + entries.clone(), + keys.clone(), + configs.0.clone(), + networks.0, + pk.clone(), + entry.clone(), + sk.clone(), + ); + let view_sync_exchange = VIEWSYNCEXCHANGE::create( + entries.clone(), + keys.clone(), + configs.0, + networks.2, + pk.clone(), + entry.clone(), + sk.clone(), + ); + let committee_exchange = + COMMITTEEEXCHANGE::create(entries, keys, configs.1, networks.1, pk, entry, sk); + + Self { + quorum_exchange, + committee_exchange, + view_sync_exchange, + _phantom: PhantomData, + } + } + + fn quorum_exchange(&self) -> &Self::QuorumExchange { + &self.quorum_exchange + } + + fn view_sync_exchange(&self) -> &Self::ViewSyncExchange { + &self.view_sync_exchange + } + + async fn wait_for_networks_ready(&self) { + self.quorum_exchange.network().wait_for_ready().await; + self.committee_exchange.network().wait_for_ready().await; + } + + async fn shut_down_networks(&self) { + self.quorum_exchange.network().shut_down().await; + self.committee_exchange.network().shut_down().await; + } +} + +/// Alias for the [`QuorumExchange`] type. +pub type QuorumEx = <>::Exchanges as ExchangesType< + TYPES, + >::Leaf, + Message, +>>::QuorumExchange; + +/// Alias for the [`CommitteeExchange`] type for sequencing consensus. +pub type SequencingQuorumEx = + <>::Exchanges as ExchangesType< + TYPES, + >::Leaf, + Message, + >>::QuorumExchange; + +/// Alias for the [`CommitteeExchange`] type. +pub type CommitteeEx = <>::Exchanges as ExchangesType< + TYPES, + >::Leaf, + Message, +>>::CommitteeExchange; + +/// Alias for the [`ViewSyncExchange`] type. +pub type ViewSyncEx = <>::Exchanges as ExchangesType< + TYPES, + >::Leaf, + Message, +>>::ViewSyncExchange; + +/// extra functions required on a node implementation to be usable by hotshot-testing +#[allow(clippy::type_complexity)] +#[async_trait] +pub trait TestableNodeImplementation: NodeImplementation { + /// Election config for the DA committee + type CommitteeElectionConfig; + + /// Generates a committee-specific election + fn committee_election_config_generator( + ) -> Box Self::CommitteeElectionConfig + 'static>; + + /// Creates random transaction if possible + /// otherwise panics + /// `padding` is the bytes of padding to add to the transaction + fn state_create_random_transaction( + state: Option<&TYPES::StateType>, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction; + + /// Creates random transaction if possible + /// otherwise panics + /// `padding` is the bytes of padding to add to the transaction + fn leaf_create_random_transaction( + leaf: &Self::Leaf, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction; + + /// generate a genesis block + fn block_genesis() -> TYPES::BlockType; + + /// the number of transactions in a block + fn txn_count(block: &TYPES::BlockType) -> u64; + + /// Create ephemeral storage + /// Will be deleted/lost immediately after storage is dropped + /// # Errors + /// Errors if it is not possible to construct temporary storage. + fn construct_tmp_storage() -> Result; + + /// Return the full internal state. This is useful for debugging. + async fn get_full_state(storage: &Self::Storage) -> StorageState; +} + +#[async_trait] +impl< + TYPES: NodeType, + I: NodeImplementation>, + > TestableNodeImplementation for I +where + CommitteeNetwork: TestableNetworkingImplementation>, + QuorumNetwork: TestableNetworkingImplementation>, + QuorumCommChannel: TestableChannelImplementation< + TYPES, + Message, + QuorumProposalType, + QuorumVoteType, + QuorumMembership, + QuorumNetwork, + >, + CommitteeCommChannel: TestableChannelImplementation< + TYPES, + Message, + CommitteeProposalType, + CommitteeVote, + CommitteeMembership, + QuorumNetwork, + >, + ViewSyncCommChannel: TestableChannelImplementation< + TYPES, + Message, + ViewSyncProposalType, + ViewSyncVoteType, + ViewSyncMembership, + QuorumNetwork, + >, + TYPES::StateType: TestableState, + TYPES::BlockType: TestableBlock, + I::Storage: TestableStorage, + I::Leaf: TestableLeaf, +{ + type CommitteeElectionConfig = TYPES::ElectionConfigType; + + fn committee_election_config_generator( + ) -> Box Self::CommitteeElectionConfig + 'static> { + Box::new(|num_nodes| >::default_election_config(num_nodes)) + } + + fn state_create_random_transaction( + state: Option<&TYPES::StateType>, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction { + ::create_random_transaction(state, rng, padding) + } + + fn leaf_create_random_transaction( + leaf: &Self::Leaf, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction { + ::create_random_transaction(leaf, rng, padding) + } + + fn block_genesis() -> TYPES::BlockType { + ::genesis() + } + + fn txn_count(block: &TYPES::BlockType) -> u64 { + ::txn_count(block) + } + + fn construct_tmp_storage() -> Result { + >::construct_tmp_storage() + } + + async fn get_full_state(storage: &Self::Storage) -> StorageState { + >::get_full_state(storage).await + } +} + +/// A proposal to append a new leaf to the log which is output by consensus. +pub type QuorumProposalType = + as ConsensusExchange>>::Proposal; + +/// A proposal to provide data availability for a new leaf. +pub type CommitteeProposalType = + as ConsensusExchange>>::Proposal; + +/// A proposal to sync the view. +pub type ViewSyncProposalType = + as ConsensusExchange>>::Proposal; + +/// A vote on a [`QuorumProposalType`]. +pub type QuorumVoteType = + as ConsensusExchange>>::Vote; + +/// A vote on a [`ComitteeProposal`]. +pub type CommitteeVote = + as ConsensusExchange>>::Vote; + +/// A vote on a [`ViewSyncProposal`]. +pub type ViewSyncVoteType = + as ConsensusExchange>>::Vote; + +/// Communication channel for [`QuorumProposalType`] and [`QuorumVote`]. +pub type QuorumCommChannel = + as ConsensusExchange>>::Networking; + +/// Communication channel for [`ViewSyncProposalType`] and [`ViewSyncVote`]. +pub type ViewSyncCommChannel = + as ConsensusExchange>>::Networking; + +/// Communication channel for [`CommitteeProposalType`] and [`DAVote`]. +pub type CommitteeCommChannel = + as ConsensusExchange>>::Networking; + +/// Protocol for determining membership in a consensus committee. +pub type QuorumMembership = + as ConsensusExchange>>::Membership; + +/// Protocol for determining membership in a DA committee. +pub type CommitteeMembership = + as ConsensusExchange>>::Membership; + +/// Protocol for determining membership in a view sync committee. +pub type ViewSyncMembership = QuorumMembership; + +/// Type for the underlying quorum `ConnectedNetwork` that will be shared (for now) b/t Communication Channels +pub type QuorumNetwork = as CommunicationChannel< + TYPES, + Message, + QuorumProposalType, + QuorumVoteType, + QuorumMembership, +>>::NETWORK; + +/// Type for the underlying committee `ConnectedNetwork` that will be shared (for now) b/t Communication Channels +pub type CommitteeNetwork = as CommunicationChannel< + TYPES, + Message, + CommitteeProposalType, + CommitteeVote, + CommitteeMembership, +>>::NETWORK; + +/// Type for the underlying view sync `ConnectedNetwork` that will be shared (for now) b/t Communication Channels +pub type ViewSyncNetwork = as CommunicationChannel< + TYPES, + Message, + ViewSyncProposalType, + ViewSyncVoteType, + ViewSyncMembership, +>>::NETWORK; + +/// Trait with all the type definitions that are used in the current hotshot setup. +pub trait NodeType: + Clone + + Copy + + Debug + + Hash + + PartialEq + + Eq + + PartialOrd + + Ord + + Default + + serde::Serialize + + for<'de> Deserialize<'de> + + Send + + Sync + + 'static +{ + /// The time type that this hotshot setup is using. + /// + /// This should be the same `Time` that `StateType::Time` is using. + type Time: ConsensusTime; + /// The block type that this hotshot setup is using. + /// + /// This should be the same block that `StateType::BlockType` is using. + type BlockType: Block; + /// The signature key that this hotshot setup is using. + type SignatureKey: SignatureKey; + /// The vote token that this hotshot setup is using. + type VoteTokenType: VoteToken; + /// The transaction type that this hotshot setup is using. + /// + /// This should be equal to `Block::Transaction` + type Transaction: Transaction; + /// The election config type that this hotshot setup is using. + type ElectionConfigType: ElectionConfig; + + /// The state type that this hotshot setup is using. + type StateType: State; +} diff --git a/types/src/traits/qc.rs b/types/src/traits/qc.rs new file mode 100644 index 0000000000..1d3529d5fb --- /dev/null +++ b/types/src/traits/qc.rs @@ -0,0 +1,91 @@ +//! The quorum certificate (QC) trait is a certificate of a sufficient quorum of distinct +//! parties voted for a message or statement. + +use ark_std::{ + rand::{CryptoRng, RngCore}, + vec::Vec, +}; +use bitvec::prelude::*; +use generic_array::{ArrayLength, GenericArray}; +use jf_primitives::{errors::PrimitivesError, signatures::AggregateableSignatureSchemes}; +use serde::{Deserialize, Serialize}; + +/// Trait for validating a QC built from different signatures on the same message +pub trait QuorumCertificate Deserialize<'a>> +{ + /// Public parameters for generating the QC + /// E.g: snark proving/verifying keys, list of (or pointer to) public keys stored in the smart contract. + type QCProverParams: Serialize + for<'a> Deserialize<'a>; + + /// Public parameters for validating the QC + /// E.g: verifying keys, stake table commitment + type QCVerifierParams: Serialize + for<'a> Deserialize<'a>; + + /// Allows to fix the size of the message at compilation time. + type MessageLength: ArrayLength; + + /// Type of the actual quorum certificate object + type QC; + + /// Type of the quorum size (e.g. number of votes or accumulated weight of signatures) + type QuorumSize; + + /// Produces a partial signature on a message with a single user signing key + /// NOTE: the original message (vote) should be prefixed with the hash of the stake table. + /// * `agg_sig_pp` - public parameters for aggregate signature + /// * `message` - message to be signed + /// * `sk` - user signing key + /// * `returns` - a "simple" signature + /// + /// # Errors + /// + /// Should return error if the underlying signature scheme fail to sign. + fn sign( + agg_sig_pp: &A::PublicParameter, + message: &GenericArray, + sk: &A::SigningKey, + prng: &mut R, + ) -> Result; + + /// Computes an aggregated signature from a set of partial signatures and the verification keys involved + /// * `qc_pp` - public parameters for generating the QC + /// * `signers` - a bool vector indicating the list of verification keys corresponding to the set of partial signatures + /// * `sigs` - partial signatures on the same message + /// + /// # Errors + /// + /// Will return error if some of the partial signatures provided are invalid or the number of + /// partial signatures / verifications keys are different. + fn assemble( + qc_pp: &Self::QCProverParams, + signers: &BitSlice, + sigs: &[A::Signature], + ) -> Result; + + /// Checks an aggregated signature over some message provided as input + /// * `qc_vp` - public parameters for validating the QC + /// * `message` - message to check the aggregated signature against + /// * `qc` - quroum certificate + /// * `returns` - the quorum size if the qc is valid, an error otherwise. + /// + /// # Errors + /// + /// Return error if the QC is invalid, either because accumulated weight didn't exceed threshold, + /// or some partial signatures are invalid. + fn check( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray, + qc: &Self::QC, + ) -> Result; + + /// Trace the list of signers given a qc. + /// + /// # Errors + /// + /// Return error if the inputs mismatch (e.g. wrong verifier parameter or original message). + fn trace( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray, + qc: &Self::QC, + ) -> Result, PrimitivesError>; +} diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs new file mode 100644 index 0000000000..5daed175bd --- /dev/null +++ b/types/src/traits/signature_key.rs @@ -0,0 +1,127 @@ +//! Minimal compatibility over public key signatures +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use bitvec::prelude::*; +use espresso_systems_common::hotshot::tag; +use ethereum_types::U256; +use jf_primitives::signatures::{ + bls_over_bn254::BLSOverBN254CurveSignatureScheme, SignatureScheme, +}; +use serde::{Deserialize, Serialize}; +use std::{fmt::Debug, hash::Hash}; +use tagged_base64::tagged; + +/// Type saftey wrapper for byte encoded keys +#[tagged(tag::ENCODED_PUB_KEY)] +#[derive( + Clone, + custom_debug::Debug, + Hash, + CanonicalSerialize, + CanonicalDeserialize, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct EncodedPublicKey( + #[debug(with = "custom_debug::hexbuf")] pub Vec, // pub ::VerificationKey +); + +/// Type saftey wrapper for byte encoded signature +#[derive( + Clone, custom_debug::Debug, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, +)] +pub struct EncodedSignature( + #[debug(with = "custom_debug::hexbuf")] pub Vec, // pub ::Signature +); + +impl AsRef<[u8]> for EncodedSignature { + fn as_ref(&self) -> &[u8] { + self.0.as_slice() + } +} + +/// Trait for abstracting public key signatures +pub trait SignatureKey: + Send + + Sync + + Clone + + Sized + + Debug + + Hash + + Serialize + + for<'a> Deserialize<'a> + + PartialEq + + Eq + + PartialOrd + + Ord +{ + /// The private key type for this signature algorithm + type PrivateKey: Send + Sync + Sized + Clone; + /// The type of the entry that contain both public key and stake value + type StakeTableEntry: Send + + Sync + + Sized + + Clone + + Debug + + Hash + + Eq + + Serialize + + for<'a> Deserialize<'a>; + /// The type of the quorum certificate parameters used for assembled signature + type QCParams: Send + Sync + Sized + Clone + Debug + Hash; + /// The type of the assembled qc: assembled signature + `BitVec` + type QCType: Send + + Sync + + Sized + + Clone + + Debug + + Hash + + PartialEq + + Eq + + Serialize + + for<'a> Deserialize<'a>; + + // Signature type represented as a vec/slice of bytes to let the implementer handle the nuances + // of serialization, to avoid Cryptographic pitfalls + /// Validate a signature + fn validate(&self, signature: &EncodedSignature, data: &[u8]) -> bool; + /// Produce a signature + fn sign(private_key: &Self::PrivateKey, data: &[u8]) -> EncodedSignature; + /// Produce a public key from a private key + fn from_private(private_key: &Self::PrivateKey) -> Self; + /// Serialize a public key to bytes + fn to_bytes(&self) -> EncodedPublicKey; + /// Deserialize a public key from bytes + fn from_bytes(bytes: &EncodedPublicKey) -> Option; + + /// Generate a new key pair + fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey); + + /// get the stake table entry from the public key and stake value + fn get_stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry; + + /// get the public parameter for the assembled signature checking + fn get_public_parameter( + stake_entries: Vec, + threshold: U256, + ) -> Self::QCParams; + + /// check the quorum certificate for the assembled signature + fn check(real_qc_pp: &Self::QCParams, data: &[u8], qc: &Self::QCType) -> bool; + + /// get the assembled signature and the `BitVec` separately from the assembled signature + fn get_sig_proof( + signature: &Self::QCType, + ) -> ( + ::Signature, + BitVec, + ); + + /// assemble the signature from the partial signature and the indication of signers in `BitVec` + fn assemble( + real_qc_pp: &Self::QCParams, + signers: &BitSlice, + sigs: &[::Signature], + ) -> Self::QCType; +} diff --git a/types/src/traits/stake_table.rs b/types/src/traits/stake_table.rs new file mode 100644 index 0000000000..64e748016a --- /dev/null +++ b/types/src/traits/stake_table.rs @@ -0,0 +1,205 @@ +//! Trait for stake table data structures + +use ark_std::{rand::SeedableRng, string::ToString, vec::Vec}; +use digest::crypto_common::rand_core::CryptoRngCore; +use displaydoc::Display; +use jf_primitives::errors::PrimitivesError; + +/// Snapshots of the stake table +pub enum SnapshotVersion { + /// the latest "Head" where all new changes are applied to + Head, + /// marks the snapshot at the beginning of the current epoch + EpochStart, + /// marks the beginning of the last epoch + LastEpochStart, + /// at arbitrary block height + BlockNum(u64), +} + +/// Common interfaces required for a stake table used in `HotShot` System. +/// APIs that doesn't take `version: SnapshotVersion` as an input by default works on the head/latest version. +pub trait StakeTableScheme { + /// type for stake key + type Key: Clone; + /// type for the staked amount + type Amount: Clone + Copy; + /// type for the commitment to the current stake table + type Commitment; + /// type for the proof associated with the lookup result (if any) + type LookupProof; + /// type for the iterator over (key, value) entries + type IntoIter: Iterator; + + /// Register a new key into the stake table. + /// + /// # Errors + /// + /// Return err if key is already registered. + fn register(&mut self, new_key: Self::Key, amount: Self::Amount) + -> Result<(), StakeTableError>; + + /// Batch register a list of new keys. A default implementation is provided + /// w/o batch optimization. + /// + /// # Errors + /// + /// Return err if any of `new_keys` fails to register. + fn batch_register(&mut self, new_keys: I, amounts: J) -> Result<(), StakeTableError> + where + I: IntoIterator, + J: IntoIterator, + { + let _ = new_keys + .into_iter() + .zip(amounts.into_iter()) + .try_for_each(|(key, amount)| Self::register(self, key, amount)); + Ok(()) + } + + /// Deregister an existing key from the stake table. + /// Returns error if some keys are not found. + /// + /// # Errors + /// Return err if `existing_key` wasn't registered. + fn deregister(&mut self, existing_key: &Self::Key) -> Result<(), StakeTableError>; + + /// Batch deregister a list of keys. A default implementation is provided + /// w/o batch optimization. + /// + /// # Errors + /// Return err if any of `existing_keys` fail to deregister. + fn batch_deregister<'a, I>(&mut self, existing_keys: I) -> Result<(), StakeTableError> + where + I: IntoIterator::Key>, + ::Key: 'a, + { + let _ = existing_keys + .into_iter() + .try_for_each(|key| Self::deregister(self, key)); + Ok(()) + } + + /// Returns the commitment to the `version` of stake table. + /// + /// # Errors + /// Return err if the `version` is not supported. + fn commitment(&self, version: SnapshotVersion) -> Result; + + /// Returns the accumulated stakes of all registered keys of the `version` + /// of stake table. + /// + /// # Errors + /// Return err if the `version` is not supported. + fn total_stake(&self, version: SnapshotVersion) -> Result; + + /// Returns the number of keys in the `version` of the table. + /// + /// # Errors + /// Return err if the `version` is not supported. + fn len(&self, version: SnapshotVersion) -> Result; + + /// Returns true if `key` is currently registered, else returns false. + fn contains_key(&self, key: &Self::Key) -> bool; + + /// Lookup the stake under a key against a specific historical `version`, + /// returns error if keys unregistered. + /// + /// # Errors + /// Return err if the `version` is not supported or `key` doesn't exist. + fn lookup( + &self, + version: SnapshotVersion, + key: &Self::Key, + ) -> Result<(Self::Amount, Self::LookupProof), StakeTableError>; + + /// Returns the stakes withhelded by a public key, None if the key is not registered. + /// If you need a lookup proof, use [`Self::lookup()`] instead (which is usually more expensive). + /// + /// # Errors + /// Return err if the `version` is not supported or `key` doesn't exist. + fn simple_lookup( + &self, + version: SnapshotVersion, + key: &Self::Key, + ) -> Result; + + /// Update the stake of the `key` with `(negative ? -1 : 1) * delta`. + /// Return the updated stake or error. + /// + /// # Errors + /// Return err if the `key` doesn't exist of if the update overflow/underflow. + fn update( + &mut self, + key: &Self::Key, + delta: Self::Amount, + negative: bool, + ) -> Result; + + /// Batch update the stake balance of `keys`. Read documentation about + /// [`Self::update()`]. By default, we call `Self::update()` on each + /// (key, amount, negative) tuple. + /// + /// # Errors + /// Return err if any one of the `update` failed. + fn batch_update( + &mut self, + keys: &[Self::Key], + amounts: &[Self::Amount], + negative_flags: Vec, + ) -> Result, StakeTableError> { + let updated_amounts = keys + .iter() + .zip(amounts.iter()) + .zip(negative_flags.iter()) + .map(|((key, &amount), negative)| Self::update(self, key, amount, *negative)) + .collect::, _>>()?; + + Ok(updated_amounts) + } + + /// Randomly sample a (key, stake amount) pair proportional to the stake distributions, + /// given a fixed seed for `rng`, this sampling should be deterministic. + fn sample( + &self, + rng: &mut (impl SeedableRng + CryptoRngCore), + ) -> Option<(&Self::Key, &Self::Amount)>; + + /// Returns an iterator over all (key, value) entries of the `version` of the table + /// + /// # Errors + /// Return err if the `version` is not supported. + fn try_iter(&self, version: SnapshotVersion) -> Result; +} + +/// Error type for [`StakeTableScheme`] +#[derive(Debug, Display)] +pub enum StakeTableError { + /// Internal error caused by Rescue + RescueError, + /// Key mismatched + MismatchedKey, + /// Key not found + KeyNotFound, + /// Key already exists + ExistingKey, + /// Malformed Merkle proof + MalformedProof, + /// Verification Error + VerificationError, + /// Insufficient fund: the number of stake cannot be negative + InsufficientFund, + /// The number of stake exceed U256 + StakeOverflow, + /// The historical snapshot requested is not supported. + SnapshotUnsupported, +} + +impl ark_std::error::Error for StakeTableError {} + +impl From for PrimitivesError { + fn from(value: StakeTableError) -> Self { + // FIXME: (alex) should we define a PrimitivesError::General()? + Self::ParameterError(value.to_string()) + } +} diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs new file mode 100644 index 0000000000..83fb250442 --- /dev/null +++ b/types/src/traits/state.rs @@ -0,0 +1,201 @@ +//! Abstraction over the global state that blocks modify +//! +//! This module provides the [`State`] trait, which serves as an compatibility over the current +//! network state, which is modified by the transactions contained within blocks. + +use crate::traits::Block; +use commit::Committable; +use espresso_systems_common::hotshot::tag; +use serde::{de::DeserializeOwned, Serialize}; +use std::{ + error::Error, + fmt::Debug, + hash::Hash, + ops, + ops::{Deref, Sub}, +}; + +/// Abstraction over the state that blocks modify +/// +/// This trait represents the behaviors that the 'global' ledger state must have: +/// * A defined error type ([`Error`](State::Error)) +/// * The type of block that modifies this type of state ([`Block`](State::BlockType)) +/// * A method to get a template (empty) next block from the current state +/// ([`next_block`](State::next_block)) +/// * The ability to validate that a block is actually a valid extension of this state +/// ([`validate_block`](State::validate_block)) +/// * The ability to produce a new state, with the modifications from the block applied +/// ([`append`](State::append)) +pub trait State: + Serialize + + DeserializeOwned + + Clone + + Debug + + Default + + Hash + + PartialEq + + Eq + + Send + + Sync + + Committable +{ + /// The error type for this particular type of ledger state + type Error: Error + Debug + Send + Sync; + /// The type of block this state is associated with + type BlockType: Block; + /// Time compatibility needed for reward collection + type Time: ConsensusTime; + + /// Returns an empty, template next block given this current state + fn next_block(prev_commitment: Option) -> Self::BlockType; + + /// Returns true if and only if the provided block is valid and can extend this state + fn validate_block(&self, block: &Self::BlockType, view_number: &Self::Time) -> bool; + + /// Appends the given block to this state, returning an new state + /// + /// # Errors + /// + /// Should produce and error if appending this block would lead to an invalid state + fn append( + &self, + block: &Self::BlockType, + view_number: &Self::Time, + ) -> Result; + + /// Gets called to notify the persistence backend that this state has been committed + fn on_commit(&self); +} + +// TODO Seuqnecing here means involving DA in consensus + +/// Trait for time compatibility needed for reward collection +pub trait ConsensusTime: + PartialOrd + + Ord + + Send + + Sync + + Debug + + Clone + + Copy + + Hash + + Deref + + serde::Serialize + + for<'de> serde::Deserialize<'de> + + ops::AddAssign + + ops::Add + + Sub + + 'static + + Committable +{ + /// Create a new instance of this time unit at time number 0 + #[must_use] + fn genesis() -> Self { + Self::new(0) + } + /// Create a new instance of this time unit + fn new(val: u64) -> Self; +} + +/// extra functions required on state to be usable by hotshot-testing +pub trait TestableState: State +where + ::BlockType: TestableBlock, +{ + /// Creates random transaction if possible + /// otherwise panics + /// `padding` is the bytes of padding to add to the transaction + fn create_random_transaction( + state: Option<&Self>, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction; +} + +/// extra functions required on block to be usable by hotshot-testing +pub trait TestableBlock: Block + Debug { + /// generate a genesis block + fn genesis() -> Self; + + /// the number of transactions in this block + fn txn_count(&self) -> u64; +} + +/// Dummy implementation of `State` for unit tests +pub mod dummy { + use super::{tag, Committable, Debug, Hash, Serialize, State, TestableState}; + use crate::{ + data::ViewNumber, + traits::block_contents::dummy::{DummyBlock, DummyError, DummyTransaction}, + }; + use rand::Rng; + use serde::Deserialize; + + /// The dummy state + #[derive(Clone, Debug, Default, Hash, PartialEq, Eq, Serialize, Deserialize)] + pub struct DummyState { + /// Some dummy data + nonce: u64, + } + + impl Committable for DummyState { + fn commit(&self) -> commit::Commitment { + commit::RawCommitmentBuilder::new("Dummy State Comm") + .u64_field("Nonce", self.nonce) + .finalize() + } + + fn tag() -> String { + tag::DUMMY_STATE.to_string() + } + } + + impl DummyState { + /// Generate a random `DummyState` + pub fn random(r: &mut dyn rand::RngCore) -> Self { + Self { + nonce: r.gen_range(1..1_000_000), + } + } + } + + impl State for DummyState { + type Error = DummyError; + + type BlockType = DummyBlock; + type Time = ViewNumber; + + fn next_block(state: Option) -> Self::BlockType { + match state { + Some(state) => DummyBlock { nonce: state.nonce }, + None => unimplemented!(), + } + } + + fn validate_block(&self, _block: &Self::BlockType, _view_number: &Self::Time) -> bool { + false + } + + fn append( + &self, + _block: &Self::BlockType, + _view_number: &Self::Time, + ) -> Result { + Ok(Self { + nonce: self.nonce + 1, + }) + } + + fn on_commit(&self) {} + } + + impl TestableState for DummyState { + fn create_random_transaction( + _state: Option<&Self>, + _: &mut dyn rand::RngCore, + _: u64, + ) -> DummyTransaction { + DummyTransaction::Dummy + } + } +} diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs new file mode 100644 index 0000000000..41b7cc225a --- /dev/null +++ b/types/src/traits/storage.rs @@ -0,0 +1,179 @@ +//! Abstraction over on-disk storage of node state + +use super::{node_implementation::NodeType, signature_key::EncodedPublicKey}; +use crate::{ + certificate::QuorumCertificate, + data::LeafType, + traits::{election::SignedCertificate, Block}, +}; +use async_trait::async_trait; +use commit::Commitment; +use derivative::Derivative; +use snafu::Snafu; +use std::collections::{BTreeMap, BTreeSet}; +/// Errors that can occur in the storage layer. +#[derive(Clone, Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum StorageError { + /// No genesis view was inserted + NoGenesisView, +} + +/// Result for a storage type +pub type Result = std::result::Result; + +/// Abstraction over on disk persistence of node state +/// +/// This should be a cloneable handle to an underlying storage, with each clone pointing to the same +/// underlying storage. +/// +/// This trait has been constructed for object saftey over convenience. +#[async_trait] +pub trait Storage: Clone + Send + Sync + Sized + 'static +where + TYPES: NodeType + 'static, + LEAF: LeafType + 'static, +{ + /// Append the list of views to this storage + async fn append(&self, views: Vec>) -> Result; + /// Cleans up the storage up to the given view. The given view number will still persist in this storage afterwards. + async fn cleanup_storage_up_to_view(&self, view: TYPES::Time) -> Result; + /// Get the latest anchored view + async fn get_anchored_view(&self) -> Result>; + /// Commit this storage. + async fn commit(&self) -> Result; + + /// Insert a single view. Shorthand for + /// ```rust,ignore + /// storage.append(vec![ViewEntry::Success(view)]).await + /// ``` + async fn append_single_view(&self, view: StoredView) -> Result { + self.append(vec![ViewEntry::Success(view)]).await + } + // future improvement: + // async fn get_future_views(&self) -> Vec; + // async fn add_transaction(&self, transactions: Transaction) -> TransactionHash; + // async fn get_transactions(&self) -> Vec; + // async fn get_transaction(&self, hash: TransactionHash) -> Option; + // async fn remove_transaction(&self, hash: TransactionHash) -> Option; +} + +/// Extra requirements on Storage implementations required for testing +#[async_trait] +pub trait TestableStorage>: + Clone + Send + Sync + Storage +where + TYPES: NodeType + 'static, +{ + /// Create ephemeral storage + /// Will be deleted/lost immediately after storage is dropped + /// # Errors + /// Errors if it is not possible to construct temporary storage. + fn construct_tmp_storage() -> Result; + + /// Return the full internal state. This is useful for debugging. + async fn get_full_state(&self) -> StorageState; +} + +/// An internal representation of the data stored in a [`Storage`]. +/// +/// This should only be used for testing, never in production code. +#[derive(Debug, PartialEq)] +pub struct StorageState> { + /// The views that have been successful + pub stored: BTreeMap>, + /// The views that have failed + pub failed: BTreeSet, +} + +/// An entry to `Storage::append`. This makes it possible to commit both succeeded and failed views at the same time +#[derive(Debug, PartialEq)] +pub enum ViewEntry +where + TYPES: NodeType, + LEAF: LeafType, +{ + /// A succeeded view + Success(StoredView), + /// A failed view + Failed(TYPES::Time), + // future improvement: + // InProgress(InProgressView), +} + +impl From> for ViewEntry +where + TYPES: NodeType, + LEAF: LeafType, +{ + fn from(view: StoredView) -> Self { + Self::Success(view) + } +} + +impl From for ViewEntry +where + TYPES: NodeType, + LEAF: LeafType, +{ + fn from(leaf: LEAF) -> Self { + Self::Success(StoredView::from(leaf)) + } +} + +/// A view stored in the [`Storage`] +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq)] +pub struct StoredView> { + /// The view number of this view + pub view_number: TYPES::Time, + /// The index of `parent` in the chain + pub height: u64, + /// The parent of this view + pub parent: Commitment, + /// The justify QC of this view. See the hotstuff paper for more information on this. + pub justify_qc: QuorumCertificate, + /// The state of this view + pub state: LEAF::MaybeState, + /// The deltas of this view + pub deltas: LEAF::DeltasType, + /// transactions rejected in this view + pub rejected: Vec, + /// the timestamp this view was recv-ed in nanonseconds + #[derivative(PartialEq = "ignore")] + pub timestamp: i128, + /// the proposer id + #[derivative(PartialEq = "ignore")] + pub proposer_id: EncodedPublicKey, +} + +impl StoredView +where + TYPES: NodeType, + LEAF: LeafType, +{ + /// Create a new `StoredView` from the given QC, Block and State. + /// + /// Note that this will set the `parent` to `LeafHash::default()`, so this will not have a parent. + pub fn from_qc_block_and_state( + qc: QuorumCertificate, + deltas: LEAF::DeltasType, + state: LEAF::MaybeState, + height: u64, + parent_commitment: Commitment, + rejected: Vec<::Transaction>, + proposer_id: EncodedPublicKey, + ) -> Self { + Self { + deltas, + view_number: qc.view_number(), + height, + parent: parent_commitment, + justify_qc: qc, + state, + rejected, + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id, + } + } +} diff --git a/types/src/utils.rs b/types/src/utils.rs new file mode 100644 index 0000000000..38c4b5a852 --- /dev/null +++ b/types/src/utils.rs @@ -0,0 +1,82 @@ +//! Utility functions, type aliases, helper structs and enum definitions. + +use crate::{ + data::{LeafBlock, LeafType}, + traits::node_implementation::NodeType, +}; +use commit::Commitment; +use std::ops::Deref; + +/// A view's state +#[derive(Debug)] +pub enum ViewInner> { + /// A pending view with an available block but not leaf proposal yet. + /// + /// Storing this state allows us to garbage collect blocks for views where a proposal is never + /// made. This saves memory when a leader fails and subverts a DoS attack where malicious + /// leaders repeatedly request availability for blocks that they never propose. + DA { + /// Available block. + block: Commitment>, + }, + /// Undecided view + Leaf { + /// Proposed leaf + leaf: Commitment, + }, + /// Leaf has failed + Failed, +} + +impl> ViewInner { + /// return the underlying leaf hash if it exists + #[must_use] + pub fn get_leaf_commitment(&self) -> Option> { + if let Self::Leaf { leaf } = self { + Some(*leaf) + } else { + None + } + } + + /// return the underlying block hash if it exists + #[must_use] + pub fn get_block_commitment(&self) -> Option>> { + if let Self::DA { block } = self { + Some(*block) + } else { + None + } + } +} + +impl> Deref for View { + type Target = ViewInner; + + fn deref(&self) -> &Self::Target { + &self.view_inner + } +} + +/// This exists so we can perform state transitions mutably +#[derive(Debug)] +pub struct View> { + /// The view data. Wrapped in a struct so we can mutate + pub view_inner: ViewInner, +} + +/// A struct containing information about a finished round. +#[derive(Debug, Clone)] +pub struct RoundFinishedEvent { + /// The round that finished + pub view_number: TYPES::Time, +} + +/// Whether or not to stop inclusively or exclusively when walking +#[derive(Copy, Clone, Debug)] +pub enum Terminator { + /// Stop right before this view number + Exclusive(T), + /// Stop including this view number + Inclusive(T), +} diff --git a/types/src/vote.rs b/types/src/vote.rs new file mode 100644 index 0000000000..7931001d60 --- /dev/null +++ b/types/src/vote.rs @@ -0,0 +1,474 @@ +//! Vote and vote accumulator types +//! +//! This module contains types used to represent the various types of votes that `HotShot` nodes +//! can send, and vote accumulator that converts votes into certificates. + +use crate::{ + certificate::{AssembledSignature, QuorumCertificate}, + data::LeafType, + traits::{ + election::{VoteData, VoteToken}, + node_implementation::NodeType, + signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, + }, +}; +use bincode::Options; +use bitvec::prelude::*; +use commit::{Commitment, Committable}; +use either::Either; +use ethereum_types::U256; +use hotshot_utils::bincode::bincode_opts; +use jf_primitives::signatures::{ + bls_over_bn254::BLSOverBN254CurveSignatureScheme, SignatureScheme, +}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::{BTreeMap, HashMap}, + fmt::Debug, + num::NonZeroU64, +}; + +/// The vote sent by consensus messages. +pub trait VoteType: + Debug + Clone + 'static + Serialize + for<'a> Deserialize<'a> + Send + Sync + PartialEq +{ + /// The view this vote was cast for. + fn current_view(&self) -> TYPES::Time; +} + +/// A vote on DA proposal. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +#[serde(bound(deserialize = ""))] +pub struct DAVote { + /// The signature share associated with this vote + pub signature: (EncodedPublicKey, EncodedSignature), + /// The block commitment being voted on. + pub block_commitment: Commitment, + /// The view this vote was cast for + pub current_view: TYPES::Time, + /// The vote token generated by this replica + pub vote_token: TYPES::VoteTokenType, + /// The vote data this vote is signed over + pub vote_data: VoteData, +} + +/// A positive or negative vote on validating or commitment proposal. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct YesOrNoVote> { + /// TODO we should remove this + /// this is correct, but highly inefficient + /// we should check a cache, and if that fails request the qc + pub justify_qc_commitment: Commitment>, + /// The signature share associated with this vote + pub signature: (EncodedPublicKey, EncodedSignature), + /// The leaf commitment being voted on. + pub leaf_commitment: Commitment, + /// The view this vote was cast for + pub current_view: TYPES::Time, + /// The vote token generated by this replica + pub vote_token: TYPES::VoteTokenType, + /// The vote data this vote is signed over + pub vote_data: VoteData, +} + +/// A timeout vote. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct TimeoutVote> { + /// The highest valid QC this node knows about + pub high_qc: QuorumCertificate, + /// The signature share associated with this vote + pub signature: (EncodedPublicKey, EncodedSignature), + /// The view this vote was cast for + pub current_view: TYPES::Time, + /// The vote token generated by this replica + pub vote_token: TYPES::VoteTokenType, + /// The vote data this vote is signed over + pub vote_data: VoteData, +} + +/// The internals of a view sync vote +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct ViewSyncVoteInternal { + /// The public key associated with the relay. + pub relay_pub_key: EncodedPublicKey, + /// The relay this vote is intended for + pub relay: u64, + /// The view number we are trying to sync on + pub round: TYPES::Time, + /// This node's signature over the VoteData + pub signature: (EncodedPublicKey, EncodedSignature), + /// The vote token generated by this replica + pub vote_token: TYPES::VoteTokenType, + /// The vote data this vote is signed over + pub vote_data: VoteData>, +} + +/// The data View Sync votes are signed over +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +#[serde(bound(deserialize = ""))] +pub struct ViewSyncData { + /// The relay this vote is intended for + pub relay: EncodedPublicKey, + /// The view number we are trying to sync on + pub round: TYPES::Time, +} + +impl Committable for ViewSyncData { + fn commit(&self) -> Commitment { + let builder = commit::RawCommitmentBuilder::new("Quorum Certificate Commitment"); + + builder + .var_size_field("Relay public key", &self.relay.0) + .u64(*self.round) + .finalize() + } +} + +/// Votes to synchronize the network on a single view +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub enum ViewSyncVote { + /// PreCommit vote + PreCommit(ViewSyncVoteInternal), + /// Commit vote + Commit(ViewSyncVoteInternal), + /// Finalize vote + Finalize(ViewSyncVoteInternal), +} + +impl ViewSyncVote { + /// Get the encoded signature. + pub fn signature(&self) -> EncodedSignature { + match &self { + ViewSyncVote::PreCommit(vote_internal) + | ViewSyncVote::Commit(vote_internal) + | ViewSyncVote::Finalize(vote_internal) => vote_internal.signature.1.clone(), + } + } + /// Get the signature key. + /// # Panics + /// If the deserialization fails. + pub fn signature_key(&self) -> TYPES::SignatureKey { + let encoded = match &self { + ViewSyncVote::PreCommit(vote_internal) + | ViewSyncVote::Commit(vote_internal) + | ViewSyncVote::Finalize(vote_internal) => vote_internal.signature.0.clone(), + }; + ::from_bytes(&encoded).unwrap() + } + /// Get the relay. + pub fn relay(&self) -> u64 { + match &self { + ViewSyncVote::PreCommit(vote_internal) + | ViewSyncVote::Commit(vote_internal) + | ViewSyncVote::Finalize(vote_internal) => vote_internal.relay, + } + } + /// Get the round number. + pub fn round(&self) -> TYPES::Time { + match &self { + ViewSyncVote::PreCommit(vote_internal) + | ViewSyncVote::Commit(vote_internal) + | ViewSyncVote::Finalize(vote_internal) => vote_internal.round, + } + } +} + +/// Votes on validating or commitment proposal. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub enum QuorumVote> { + /// Posivite vote. + Yes(YesOrNoVote), + /// Negative vote. + No(YesOrNoVote), + /// Timeout vote. + Timeout(TimeoutVote), +} + +impl VoteType for DAVote { + fn current_view(&self) -> TYPES::Time { + self.current_view + } +} + +impl DAVote { + /// Get the signature key. + /// # Panics + /// If the deserialization fails. + pub fn signature_key(&self) -> TYPES::SignatureKey { + ::from_bytes(&self.signature.0).unwrap() + } +} + +impl> VoteType + for QuorumVote +{ + fn current_view(&self) -> TYPES::Time { + match self { + QuorumVote::Yes(v) | QuorumVote::No(v) => v.current_view, + QuorumVote::Timeout(v) => v.current_view, + } + } +} + +impl> QuorumVote { + /// Get the encoded signature. + pub fn signature(&self) -> EncodedSignature { + match &self { + Self::Yes(vote) | Self::No(vote) => vote.signature.1.clone(), + Self::Timeout(vote) => vote.signature.1.clone(), + } + } + /// Get the signature key. + /// # Panics + /// If the deserialization fails. + pub fn signature_key(&self) -> TYPES::SignatureKey { + let encoded = match &self { + Self::Yes(vote) | Self::No(vote) => vote.signature.0.clone(), + Self::Timeout(vote) => vote.signature.0.clone(), + }; + ::from_bytes(&encoded).unwrap() + } +} + +impl VoteType for ViewSyncVote { + fn current_view(&self) -> TYPES::Time { + match self { + ViewSyncVote::PreCommit(v) | ViewSyncVote::Commit(v) | ViewSyncVote::Finalize(v) => { + v.round + } + } + } +} + +/// The aggreation of votes, implemented by `VoteAccumulator`. +pub trait Accumulator: Sized { + /// Accumate the `val` to the current state. + /// + /// If a threshold is reached, returns `U` (e.g., a certificate). Else, returns `Self` and + /// continues accumulating items. + fn append(self, val: T) -> Either; +} + +/// Mapping of commitments to vote tokens by key. +type VoteMap = HashMap< + Commitment, + ( + u64, + BTreeMap, TOKEN)>, + ), +>; + +/// Describe the process of collecting signatures on block or leaf commitment, to form a DAC or QC, +/// respectively. +pub struct VoteAccumulator { + /// Map of all signatures accumlated so far + pub total_vote_outcomes: VoteMap, + /// Map of all da signatures accumlated so far + pub da_vote_outcomes: VoteMap, + /// Map of all yes signatures accumlated so far + pub yes_vote_outcomes: VoteMap, + /// Map of all no signatures accumlated so far + pub no_vote_outcomes: VoteMap, + /// Map of all view sync precommit votes accumulated thus far + pub viewsync_precommit_vote_outcomes: VoteMap, + /// Map of all view sync commit votes accumulated thus far + pub viewsync_commit_vote_outcomes: VoteMap, + /// Map of all view sync finalize votes accumulated thus far + pub viewsync_finalize_vote_outcomes: VoteMap, + /// A quorum's worth of stake, generall 2f + 1 + pub success_threshold: NonZeroU64, + /// Enough stake to know that we cannot possibly get a quorum, generally f + 1 + pub failure_threshold: NonZeroU64, + /// A list of valid signatures for certificate aggregation + pub sig_lists: Vec<::Signature>, + /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check + pub signers: BitVec, +} + +impl + Accumulator< + ( + Commitment, + ( + EncodedPublicKey, + ( + EncodedSignature, + Vec<::StakeTableEntry>, + usize, + VoteData, + TOKEN, + ), + ), + ), + AssembledSignature, + > for VoteAccumulator +where + TOKEN: Clone + VoteToken, +{ + #![allow(clippy::too_many_lines)] + fn append( + mut self, + val: ( + Commitment, + ( + EncodedPublicKey, + ( + EncodedSignature, + Vec<::StakeTableEntry>, + usize, + VoteData, + TOKEN, + ), + ), + ), + ) -> Either> { + let (commitment, (key, (sig, entries, node_id, vote_data, token))) = val; + + // Desereialize the sig so that it can be assembeld into a QC + let origianl_sig: ::Signature = + bincode_opts() + .deserialize(&sig.0) + .expect("Deserialization on the signature shouldn't be able to fail."); + + let (total_stake_casted, total_vote_map) = self + .total_vote_outcomes + .entry(commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (da_stake_casted, da_vote_map) = self + .da_vote_outcomes + .entry(commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (yes_stake_casted, yes_vote_map) = self + .yes_vote_outcomes + .entry(commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (no_stake_casted, no_vote_map) = self + .no_vote_outcomes + .entry(commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (viewsync_precommit_stake_casted, viewsync_precommit_vote_map) = self + .viewsync_precommit_vote_outcomes + .entry(commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (viewsync_commit_stake_casted, viewsync_commit_vote_map) = self + .viewsync_commit_vote_outcomes + .entry(commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (viewsync_finalize_stake_casted, viewsync_finalize_vote_map) = self + .viewsync_finalize_vote_outcomes + .entry(commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + // Accumulate the stake for each leaf commitment rather than the total + // stake of all votes, in case they correspond to inconsistent + // commitments. + + // Check for duplicate vote + if total_vote_map.contains_key(&key) { + // error!("Duplicate vote"); + return Either::Left(self); + } + + // update the active_keys and sig_lists + self.signers.set(node_id, true); + self.sig_lists.push(origianl_sig); + + *total_stake_casted += u64::from(token.vote_count()); + total_vote_map.insert(key.clone(), (sig.clone(), vote_data.clone(), token.clone())); + + match vote_data { + VoteData::DA(_) => { + *da_stake_casted += u64::from(token.vote_count()); + da_vote_map.insert(key, (sig, vote_data, token)); + } + VoteData::Yes(_) => { + *yes_stake_casted += u64::from(token.vote_count()); + yes_vote_map.insert(key, (sig, vote_data, token)); + } + VoteData::No(_) => { + *no_stake_casted += u64::from(token.vote_count()); + no_vote_map.insert(key, (sig, vote_data, token)); + } + VoteData::ViewSyncPreCommit(_) => { + *viewsync_precommit_stake_casted += u64::from(token.vote_count()); + viewsync_precommit_vote_map.insert(key, (sig, vote_data, token)); + } + VoteData::ViewSyncCommit(_) => { + *viewsync_commit_stake_casted += u64::from(token.vote_count()); + viewsync_commit_vote_map.insert(key, (sig, vote_data, token)); + } + VoteData::ViewSyncFinalize(_) => { + *viewsync_finalize_stake_casted += u64::from(token.vote_count()); + viewsync_finalize_vote_map.insert(key, (sig, vote_data, token)); + } + VoteData::Timeout(_) => { + unimplemented!() + } + } + + // This is a messy way of accounting for the different vote types, but we will be replacing this code very soon + if *total_stake_casted >= u64::from(self.success_threshold) { + // Do assemble for QC here + let real_qc_pp = ::get_public_parameter( + entries.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + + if *yes_stake_casted >= u64::from(self.success_threshold) { + self.yes_vote_outcomes.remove(&commitment); + return Either::Right(AssembledSignature::Yes(real_qc_sig)); + } else if *no_stake_casted >= u64::from(self.failure_threshold) { + self.total_vote_outcomes.remove(&commitment); + return Either::Right(AssembledSignature::No(real_qc_sig)); + } else if *da_stake_casted >= u64::from(self.success_threshold) { + self.da_vote_outcomes.remove(&commitment); + return Either::Right(AssembledSignature::DA(real_qc_sig)); + } else if *viewsync_commit_stake_casted >= u64::from(self.success_threshold) { + self.viewsync_commit_vote_outcomes + .remove(&commitment) + .unwrap(); + return Either::Right(AssembledSignature::ViewSyncCommit(real_qc_sig)); + } else if *viewsync_finalize_stake_casted >= u64::from(self.success_threshold) { + self.viewsync_finalize_vote_outcomes + .remove(&commitment) + .unwrap(); + return Either::Right(AssembledSignature::ViewSyncFinalize(real_qc_sig)); + } + } + if *viewsync_precommit_stake_casted >= u64::from(self.failure_threshold) { + let real_qc_pp = ::get_public_parameter( + entries, + U256::from(self.failure_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + + self.viewsync_precommit_vote_outcomes + .remove(&commitment) + .unwrap(); + return Either::Right(AssembledSignature::ViewSyncPreCommit(real_qc_sig)); + } + Either::Left(self) + } +} diff --git a/utils/Cargo.toml b/utils/Cargo.toml new file mode 100644 index 0000000000..c438a33888 --- /dev/null +++ b/utils/Cargo.toml @@ -0,0 +1,10 @@ +[package] +authors = ["Espresso Systems "] +description = "Accompanying utilities used by hotshot." +edition = "2021" +name = "hotshot-utils" +readme = "../README.md" +version = "0.1.0" + +[dependencies] +bincode = { workspace = true } diff --git a/utils/src/bincode.rs b/utils/src/bincode.rs new file mode 100644 index 0000000000..6371179b86 --- /dev/null +++ b/utils/src/bincode.rs @@ -0,0 +1,28 @@ +#![allow(clippy::module_name_repetitions, clippy::type_complexity)] +use bincode::{ + config::{ + LittleEndian, RejectTrailing, VarintEncoding, WithOtherEndian, WithOtherIntEncoding, + WithOtherLimit, WithOtherTrailing, + }, + DefaultOptions, Options, +}; + +/// For the wire format, we use bincode with the following options: +/// - No upper size limit +/// - Litte endian encoding +/// - Varint encoding +/// - Reject trailing bytes +#[must_use] +pub fn bincode_opts() -> WithOtherTrailing< + WithOtherIntEncoding< + WithOtherEndian, LittleEndian>, + VarintEncoding, + >, + RejectTrailing, +> { + bincode::DefaultOptions::new() + .with_no_limit() + .with_little_endian() + .with_varint_encoding() + .reject_trailing_bytes() +} diff --git a/utils/src/lib.rs b/utils/src/lib.rs new file mode 100644 index 0000000000..325ab683b1 --- /dev/null +++ b/utils/src/lib.rs @@ -0,0 +1,13 @@ +//! Contains general utility structures and methods + +#![warn( + clippy::all, + clippy::pedantic, + rust_2018_idioms, + missing_docs, + clippy::missing_docs_in_private_items, + clippy::panic +)] + +/// Provides bincode options +pub mod bincode; diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml new file mode 100644 index 0000000000..827d5a5258 --- /dev/null +++ b/web_server/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "hotshot-web-server" +description = "HotShot web server" +version = "0.1.1" +readme = "README.md" +edition = "2021" + +[features] +default = ["demo"] +demo = ["hotshot-types/demo"] + +[dependencies] +ark-bls12-381 = { workspace = true } +async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } +async-trait = { workspace = true } +bincode = { workspace = true } +clap = { version = "4.0", features = ["derive", "env"], optional = false } +futures = { workspace = true } +libp2p-core = { version = "0.40.0", default-features = false } +hotshot-types = { path = "../types", default-features = false } +hotshot-utils = { path = "../utils" } +jf-primitives = { workspace = true } +tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.4.1" } +nll = { workspace = true } +tracing = { workspace = true } +rand = { workspace = true } +serde = { workspace = true } +serde_json = "1.0.96" +snafu = { workspace = true } +tide = { version = "0.16.0", default-features = false } +toml = { workspace = true } +portpicker = "0.1" +surf-disco = { workspace = true } + +[dev-dependencies] +hotshot-types = { path = "../types", default-features = false } + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } diff --git a/web_server/README.md b/web_server/README.md new file mode 100644 index 0000000000..3d616c1bc6 --- /dev/null +++ b/web_server/README.md @@ -0,0 +1,14 @@ +# Web Server + +This crate implements a web server that HotShot can use to increase its throughput. The web server is designed to be run behind several Nginx instances for high performance. + +The run the web server: `cargo run --features=full-ci --example web_server`. +This will start the web server at `0.0.0.0:9000` + +## How it works +Unlike the other networking implementations in `HotShot` that use a *pushing* paradigm over a TCP connection, the web server uses a *pulling* paradigm over HTTP. In the older centralized server, for example, messages are pushed to nodes from the server. In contrast, in the web server nodes must poll (pull from) the server periodically to download the latest data. The direction of dataflow is reversed. This design allows us to take advantage of Nginx's powerful caching mode, which will improve performance in the large networks `HotShot` is designed for. Keep in mind that `HotShot` will also be using `Libp2p` in tandem with the web server. The web server allows high bandwidth through the network under optimistic conditions while `Libp2p` protects against centralization and censorship. + +The web server is built on [Tide Disco](https://github.com/EspressoSystems/tide-disco), an expansion on the popular [Tide](https://docs.rs/tide/0.16.0/tide/index.html) Rust web application framework. It has several endpoints specified in its `api.toml` file. These endpoints are what nodes use to either POST or GET messages. For example, a replica node will poll the web server periodically through the `proposal` endpoint to ask if there is a proposal for its current view. The server will respond with either an error if there is no proposal for that view yet, or with the proposal. It works similarly for transactions: Nodes can post transactions to the web server at any time. They can also download all the transactions they haven't received yet from the web server. + + + diff --git a/web_server/api.toml b/web_server/api.toml new file mode 100644 index 0000000000..5f749bcdb0 --- /dev/null +++ b/web_server/api.toml @@ -0,0 +1,139 @@ +[meta] +NAME = "hotshot_web_server" +DESCRIPTION = "Web server for HotShot" +FORMAT_VERSION = "0.1.0" + +# GET the proposal for a view, where the view is passed as an argument +[route.getproposal] +PATH = ["proposal/:view_number"] +":view_number" = "Integer" +DOC = """ +Return the proposal for a given view number +""" + +# POST a proposal, where the view is passed as an argument +[route.postproposal] +PATH = ["proposal/:view_number"] +METHOD = "POST" +":view_number" = "Integer" +DOC = """ +Post the proposal for a given view_number +""" + +# GET the DA certificate for a view, where the view is passed as an argument +[route.getcertificate] +PATH = ["certificate/:view_number"] +":view_number" = "Integer" +DOC = """ +Return the DA certificate for a given view number +""" + +# POST a DA certificate, where the view is passed as an argument +[route.postcertificate] +PATH = ["certificate/:view_number"] +METHOD = "POST" +":view_number" = "Integer" +DOC = """ +Post the DA certificate for a given view_number +""" + +# GET all the votes from a given index for a given view number +[route.getvotes] +PATH = ["votes/:view_number/:index"] +":view_number" = "Integer" +":index" = "Integer" +METHOD = "GET" +DOC = """ +Get all votes for a view number +""" + +# POST a vote, where the view number is passed as an argument +[route.postvote] +PATH = ["votes/:view_number"] +":view_number" = "Integer" +METHOD = "POST" +DOC = """ +Send a vote +""" + +# GET all transactions starting at :index +[route.gettransactions] +PATH = ["transactions/:index"] +":index" = "Integer" +METHOD = "GET" +DOC = """ +Get all transactions since given index +""" + + +# POST a transaction +[route.posttransaction] +PATH = ["transactions"] +METHOD = "POST" +DOC = """ +Post a transaction to the web server +""" + +# POST a transaction removal +[route.postcompletedtransaction] +PATH = ["transactionscomplet"] +METHOD = "POST" +DOC = """ +Post a transaction removal to the web server +""" + +# POST stake table +[route.poststaketable] +PATH = ["staketable"] +METHOD = "POST" +DOC = """ +Post the stake table to the web server +""" + +# POST secret proposal +[route.secret] +PATH = ["secret/:view_number/:secret"] +METHOD = "POST" +":view_number" = "Integer" +":secret" = "Literal" +DOC = """ +Secret path for leader to post proposal for a given view +""" + +# POST a view sync vote, where the view number is passed as an argument +[route.postviewsyncvote] +PATH = ["view_sync_vote/:view_number"] +":view_number" = "Integer" +METHOD = "POST" +DOC = """ +Send a view sync vote +""" + +# GET a view sync vote, where the view number is passed as an argument +[route.getviewsyncvotes] +PATH = ["view_sync_vote/:view_number/:index"] +":view_number" = "Integer" +":index" = "Integer" +METHOD = "GET" +DOC = """ +GET a view sync vote +""" + +# POST a view sync proposal, where the view number is passed as an argument +[route.postviewsyncproposal] +PATH = ["view_sync_proposal/:view_number"] +":view_number" = "Integer" +METHOD = "POST" +DOC = """ +Send a view sync vote +""" + +# GET a view sync certificate, where the view number is passed as an argument +[route.getviewsyncproposal] +PATH = ["view_sync_proposal/:view_number/:index"] +":view_number" = "Integer" +":index" = "Integer" +METHOD = "GET" +DOC = """ +GET a view sync proposal +""" diff --git a/web_server/src/config.rs b/web_server/src/config.rs new file mode 100644 index 0000000000..1da2781c55 --- /dev/null +++ b/web_server/src/config.rs @@ -0,0 +1,62 @@ +pub const DEFAULT_WEB_SERVER_PORT: u16 = 9000; +pub const DEFAULT_WEB_SERVER_DA_PORT: u16 = 9001; +pub const DEFAULT_WEB_SERVER_VIEW_SYNC_PORT: u16 = 9002; + +/// How many views to keep in memory +pub const MAX_VIEWS: usize = 25; +/// How many transactions to keep in memory +pub const MAX_TXNS: usize = 500; +/// How many transactions to return at once +pub const TX_BATCH_SIZE: u64 = 1; + +pub fn get_proposal_route(view_number: u64) -> String { + format!("api/proposal/{view_number}") +} + +pub fn post_proposal_route(view_number: u64) -> String { + format!("api/proposal/{view_number}") +} + +pub fn get_da_certificate_route(view_number: u64) -> String { + format!("api/certificate/{view_number}") +} + +pub fn post_da_certificate_route(view_number: u64) -> String { + format!("api/certificate/{view_number}") +} + +pub fn get_vote_route(view_number: u64, index: u64) -> String { + format!("api/votes/{view_number}/{index}") +} + +pub fn post_vote_route(view_number: u64) -> String { + format!("api/votes/{view_number}") +} + +pub fn get_transactions_route(index: u64) -> String { + format!("api/transactions/{index}") +} + +pub fn post_transactions_route() -> String { + "api/transactions".to_string() +} + +pub fn post_staketable_route() -> String { + "api/staketable".to_string() +} + +pub fn post_view_sync_proposal_route(view_number: u64) -> String { + format!("api/view_sync_proposal/{view_number}") +} + +pub fn get_view_sync_proposal_route(view_number: u64, index: u64) -> String { + format!("api/view_sync_proposal/{view_number}/{index}") +} + +pub fn post_view_sync_vote_route(view_number: u64) -> String { + format!("api/view_sync_vote/{view_number}") +} + +pub fn get_view_sync_vote_route(view_number: u64, index: u64) -> String { + format!("api/view_sync_vote/{view_number}/{index}") +} diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs new file mode 100644 index 0000000000..ef7850cbb0 --- /dev/null +++ b/web_server/src/lib.rs @@ -0,0 +1,654 @@ +pub mod config; + +use crate::config::{MAX_TXNS, MAX_VIEWS, TX_BATCH_SIZE}; +use async_compatibility_layer::channel::OneShotReceiver; +use async_lock::RwLock; +use clap::Args; +use futures::FutureExt; + +use hotshot_types::traits::signature_key::{EncodedPublicKey, SignatureKey}; +use rand::{distributions::Alphanumeric, rngs::StdRng, thread_rng, Rng, SeedableRng}; +use std::{collections::HashMap, io, path::PathBuf}; +use tide_disco::{ + api::ApiError, + error::ServerError, + method::{ReadState, WriteState}, + Api, App, StatusCode, +}; +use tracing::{debug, info}; + +type State = RwLock>; +type Error = ServerError; + +/// State that tracks proposals and votes the server receives +/// Data is stored as a `Vec` to not incur overhead from deserializing +struct WebServerState { + /// view number -> (secret, proposal) + proposals: HashMap)>, + + view_sync_proposals: HashMap)>>, + + view_sync_proposal_index: HashMap, + /// view number -> (secret, da_certificates) + da_certificates: HashMap)>, + /// view for oldest proposals in memory + oldest_proposal: u64, + /// view for teh oldest DA certificate + oldest_certificate: u64, + + oldest_view_sync_proposal: u64, + /// view number -> Vec(index, vote) + votes: HashMap)>>, + + view_sync_votes: HashMap)>>, + /// view number -> highest vote index for that view number + vote_index: HashMap, + + view_sync_vote_index: HashMap, + /// view number of oldest votes in memory + oldest_vote: u64, + + oldest_view_sync_vote: u64, + + /// index -> transaction + // TODO ED Make indexable by hash of tx + transactions: HashMap>, + txn_lookup: HashMap, u64>, + /// highest transaction index + num_txns: u64, + + /// shutdown signal + shutdown: Option>, + /// stake table with leader keys + stake_table: Vec, + /// prng for generating endpoint + _prng: StdRng, +} + +impl WebServerState { + fn new() -> Self { + Self { + proposals: HashMap::new(), + da_certificates: HashMap::new(), + votes: HashMap::new(), + num_txns: 0, + oldest_vote: 0, + oldest_proposal: 0, + oldest_certificate: 0, + shutdown: None, + stake_table: Vec::new(), + vote_index: HashMap::new(), + transactions: HashMap::new(), + txn_lookup: HashMap::new(), + _prng: StdRng::from_entropy(), + view_sync_proposals: HashMap::new(), + view_sync_votes: HashMap::new(), + view_sync_vote_index: HashMap::new(), + oldest_view_sync_vote: 0, + oldest_view_sync_proposal: 0, + view_sync_proposal_index: HashMap::new(), + } + } + pub fn with_shutdown_signal(mut self, shutdown_listener: Option>) -> Self { + if self.shutdown.is_some() { + panic!("A shutdown signal is already registered and can not be registered twice"); + } + self.shutdown = shutdown_listener; + self + } +} + +/// Trait defining methods needed for the `WebServerState` +pub trait WebServerDataSource { + fn get_proposal(&self, view_number: u64) -> Result>>, Error>; + fn get_view_sync_proposal( + &self, + view_number: u64, + index: u64, + ) -> Result>>, Error>; + + fn get_votes(&self, view_number: u64, index: u64) -> Result>>, Error>; + fn get_view_sync_votes( + &self, + view_number: u64, + index: u64, + ) -> Result>>, Error>; + + #[allow(clippy::type_complexity)] + fn get_transactions(&self, index: u64) -> Result>)>, Error>; + fn get_da_certificate(&self, index: u64) -> Result>>, Error>; + fn post_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; + fn post_view_sync_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; + + fn post_proposal(&mut self, view_number: u64, proposal: Vec) -> Result<(), Error>; + fn post_view_sync_proposal(&mut self, view_number: u64, proposal: Vec) + -> Result<(), Error>; + + fn post_da_certificate(&mut self, view_number: u64, cert: Vec) -> Result<(), Error>; + fn post_transaction(&mut self, txn: Vec) -> Result<(), Error>; + fn post_staketable(&mut self, key: Vec) -> Result<(), Error>; + fn post_completed_transaction(&mut self, block: Vec) -> Result<(), Error>; + fn post_secret_proposal(&mut self, _view_number: u64, _proposal: Vec) -> Result<(), Error>; + fn proposal(&self, view_number: u64) -> Option<(String, Vec)>; +} + +impl WebServerDataSource for WebServerState { + fn proposal(&self, view_number: u64) -> Option<(String, Vec)> { + self.proposals.get(&view_number).cloned() + } + /// Return the proposal the server has received for a particular view + fn get_proposal(&self, view_number: u64) -> Result>>, Error> { + match self.proposals.get(&view_number) { + Some(proposal) => { + if proposal.1.is_empty() { + Err(ServerError { + status: StatusCode::NotImplemented, + message: format!("Proposal not found for view {view_number}"), + }) + } else { + Ok(Some(vec![proposal.1.clone()])) + } + } + None => Err(ServerError { + status: StatusCode::NotImplemented, + message: format!("Proposal not found for view {view_number}"), + }), + } + } + + fn get_view_sync_proposal( + &self, + view_number: u64, + index: u64, + ) -> Result>>, Error> { + let proposals = self.view_sync_proposals.get(&view_number); + let mut ret_proposals = vec![]; + if let Some(cert) = proposals { + for i in index..*self.view_sync_proposal_index.get(&view_number).unwrap() { + ret_proposals.push(cert[i as usize].1.clone()); + } + } + if !ret_proposals.is_empty() { + Ok(Some(ret_proposals)) + } else { + Ok(None) + } + } + + /// Return all votes the server has received for a particular view from provided index to most recent + fn get_votes(&self, view_number: u64, index: u64) -> Result>>, Error> { + let votes = self.votes.get(&view_number); + let mut ret_votes = vec![]; + if let Some(votes) = votes { + for i in index..*self.vote_index.get(&view_number).unwrap() { + ret_votes.push(votes[i as usize].1.clone()); + } + } + if !ret_votes.is_empty() { + Ok(Some(ret_votes)) + } else { + Ok(None) + } + } + + fn get_view_sync_votes( + &self, + view_number: u64, + index: u64, + ) -> Result>>, Error> { + let votes = self.view_sync_votes.get(&view_number); + let mut ret_votes = vec![]; + if let Some(votes) = votes { + // error!("Passed in index is: {} self index is: {}", index, *self.vote_index.get(&view_number).unwrap()); + for i in index..*self.view_sync_vote_index.get(&view_number).unwrap() { + ret_votes.push(votes[i as usize].1.clone()); + } + } + if !ret_votes.is_empty() { + Ok(Some(ret_votes)) + } else { + Ok(None) + } + } + + #[allow(clippy::type_complexity)] + /// Return the transaction at the specified index (which will help with Nginx caching, but reduce performance otherwise) + /// In the future we will return batches of transactions + fn get_transactions(&self, index: u64) -> Result>)>, Error> { + let mut txns_to_return = vec![]; + + let lowest_in_memory_txs = if self.num_txns < MAX_TXNS.try_into().unwrap() { + 0 + } else { + self.num_txns as usize - MAX_TXNS + }; + + let starting_index = if (index as usize) < lowest_in_memory_txs { + lowest_in_memory_txs + } else { + index as usize + }; + + for idx in starting_index..=self.num_txns.try_into().unwrap() { + if let Some(txn) = self.transactions.get(&(idx as u64)) { + txns_to_return.push(txn.clone()) + } + if txns_to_return.len() >= TX_BATCH_SIZE as usize { + break; + } + } + + if !txns_to_return.is_empty() { + debug!("Returning this many txs {}", txns_to_return.len()); + //starting_index is the oldest index of the returned txns + Ok(Some((starting_index as u64, txns_to_return))) + } else { + Err(ServerError { + // TODO ED: Why does NoContent status code cause errors? + status: StatusCode::NotImplemented, + message: format!("Transaction not found for index {index}"), + }) + } + } + + /// Return the da certificate the server has received for a particular view + fn get_da_certificate(&self, index: u64) -> Result>>, Error> { + match self.da_certificates.get(&index) { + Some(cert) => { + if cert.1.is_empty() { + Err(ServerError { + status: StatusCode::NotImplemented, + message: format!("DA Certificate not found for view {index}"), + }) + } else { + Ok(Some(vec![cert.1.clone()])) + } + } + None => Err(ServerError { + status: StatusCode::NotImplemented, + message: format!("Proposal not found for view {index}"), + }), + } + } + + /// Stores a received vote in the `WebServerState` + fn post_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error> { + // Only keep vote history for MAX_VIEWS number of views + if self.votes.len() >= MAX_VIEWS { + self.votes.remove(&self.oldest_vote); + while !self.votes.contains_key(&self.oldest_vote) { + self.oldest_vote += 1; + } + } + let highest_index = self.vote_index.entry(view_number).or_insert(0); + self.votes + .entry(view_number) + .and_modify(|current_votes| current_votes.push((*highest_index, vote.clone()))) + .or_insert_with(|| vec![(*highest_index, vote)]); + self.vote_index + .entry(view_number) + .and_modify(|index| *index += 1); + Ok(()) + } + + fn post_view_sync_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error> { + // Only keep vote history for MAX_VIEWS number of views + if self.view_sync_votes.len() >= MAX_VIEWS { + self.view_sync_votes.remove(&self.oldest_view_sync_vote); + while !self + .view_sync_votes + .contains_key(&self.oldest_view_sync_vote) + { + self.oldest_view_sync_vote += 1; + } + } + let highest_index = self.view_sync_vote_index.entry(view_number).or_insert(0); + self.view_sync_votes + .entry(view_number) + .and_modify(|current_votes| current_votes.push((*highest_index, vote.clone()))) + .or_insert_with(|| vec![(*highest_index, vote)]); + self.view_sync_vote_index + .entry(view_number) + .and_modify(|index| *index += 1); + Ok(()) + } + /// Stores a received proposal in the `WebServerState` + fn post_proposal(&mut self, view_number: u64, mut proposal: Vec) -> Result<(), Error> { + debug!("Received proposal for view {}", view_number); + + // Only keep proposal history for MAX_VIEWS number of view + if self.proposals.len() >= MAX_VIEWS { + self.proposals.remove(&self.oldest_proposal); + while !self.proposals.contains_key(&self.oldest_proposal) { + self.oldest_proposal += 1; + } + } + self.proposals + .entry(view_number) + .and_modify(|(_, empty_proposal)| empty_proposal.append(&mut proposal)) + .or_insert_with(|| (String::new(), proposal)); + Ok(()) + } + + fn post_view_sync_proposal( + &mut self, + view_number: u64, + proposal: Vec, + ) -> Result<(), Error> { + // Only keep proposal history for MAX_VIEWS number of view + if self.view_sync_proposals.len() >= MAX_VIEWS { + self.view_sync_proposals.remove(&self.oldest_view_sync_vote); + while !self + .view_sync_proposals + .contains_key(&self.oldest_view_sync_proposal) + { + self.oldest_view_sync_proposal += 1; + } + } + let highest_index = self + .view_sync_proposal_index + .entry(view_number) + .or_insert(0); + self.view_sync_proposals + .entry(view_number) + .and_modify(|current_props| current_props.push((*highest_index, proposal.clone()))) + .or_insert_with(|| vec![(*highest_index, proposal)]); + self.view_sync_proposal_index + .entry(view_number) + .and_modify(|index| *index += 1); + Ok(()) + } + + /// Stores a received DA certificate in the `WebServerState` + fn post_da_certificate(&mut self, view_number: u64, mut cert: Vec) -> Result<(), Error> { + debug!("Received DA Certificate for view {}", view_number); + + // Only keep proposal history for MAX_VIEWS number of view + if self.da_certificates.len() >= MAX_VIEWS { + self.da_certificates.remove(&self.oldest_certificate); + while !self.da_certificates.contains_key(&self.oldest_certificate) { + self.oldest_certificate += 1; + } + } + self.da_certificates + .entry(view_number) + .and_modify(|(_, empty_cert)| empty_cert.append(&mut cert)) + .or_insert_with(|| (String::new(), cert)); + Ok(()) + } + /// Stores a received group of transactions in the `WebServerState` + fn post_transaction(&mut self, txn: Vec) -> Result<(), Error> { + if self.transactions.len() >= MAX_TXNS { + let old_txn = self.transactions.remove(&(self.num_txns - MAX_TXNS as u64)); + if let Some(old_txn) = old_txn { + self.txn_lookup.remove(&old_txn); + } + } + self.txn_lookup.insert(txn.clone(), self.num_txns); + self.transactions.insert(self.num_txns, txn); + self.num_txns += 1; + + debug!( + "Received transaction! Number of transactions received is: {}", + self.num_txns + ); + + Ok(()) + } + + fn post_staketable(&mut self, key: Vec) -> Result<(), Error> { + // KALEY TODO: need security checks here + let new_key = KEY::from_bytes(&(EncodedPublicKey(key))); + if let Some(new_key) = new_key { + let node_index = self.stake_table.len() as u64; + //generate secret for leader's first submission endpoint when key is added + let secret = thread_rng() + .sample_iter(&Alphanumeric) + .take(30) + .map(char::from) + .collect(); + self.proposals.insert(node_index, (secret, Vec::new())); + self.stake_table.push(new_key); + Ok(()) + } else { + Err(ServerError { + status: StatusCode::BadRequest, + message: "Only signature keys can be added to stake table".to_string(), + }) + } + } + + fn post_completed_transaction(&mut self, txn: Vec) -> Result<(), Error> { + if let Some(idx) = self.txn_lookup.remove(&txn) { + self.transactions.remove(&idx); + Ok(()) + } else { + Err(ServerError { + status: StatusCode::BadRequest, + message: "Transaction Not Found".to_string(), + }) + } + } + + //KALEY TODO: this will be merged with post_proposal once it is fully working, + //but keeping it separate to not break things in the meantime + fn post_secret_proposal( + &mut self, + view_number: u64, + mut proposal: Vec, + ) -> Result<(), Error> { + debug!("Received proposal for view {}", view_number); + + // Only keep proposal history for MAX_VIEWS number of views + if self.proposals.len() >= MAX_VIEWS { + self.proposals.remove(&self.oldest_proposal); + while !self.proposals.contains_key(&self.oldest_proposal) { + self.oldest_proposal += 1; + } + } + self.proposals + .entry(view_number) + .and_modify(|(_, empty_proposal)| empty_proposal.append(&mut proposal)); + + //generate new secret for the next time this node is leader + let secret = thread_rng() + .sample_iter(&Alphanumeric) + .take(30) + .map(char::from) + .collect(); + let next_view_for_leader = view_number + self.stake_table.len() as u64; + self.proposals + .insert(next_view_for_leader, (secret, Vec::new())); + Ok(()) + } +} + +#[derive(Args, Default)] +pub struct Options { + #[arg(long = "web-server-api-path", env = "WEB_SERVER_API_PATH")] + pub api_path: Option, +} + +/// Sets up all API routes +fn define_api(options: &Options) -> Result, ApiError> +where + State: 'static + Send + Sync + ReadState + WriteState, + ::State: Send + Sync + WebServerDataSource, + KEY: SignatureKey, +{ + let mut api = match &options.api_path { + Some(path) => Api::::from_file(path)?, + None => { + let toml = toml::from_str(include_str!("../api.toml")).map_err(|err| { + ApiError::CannotReadToml { + reason: err.to_string(), + } + })?; + Api::::new(toml)? + } + }; + api.get("getproposal", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + state.get_proposal(view_number) + } + .boxed() + })? + .get("getviewsyncproposal", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let index: u64 = req.integer_param("index")?; + state.get_view_sync_proposal(view_number, index) + } + .boxed() + })? + .get("getcertificate", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + state.get_da_certificate(view_number) + } + .boxed() + })? + .get("getvotes", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let index: u64 = req.integer_param("index")?; + state.get_votes(view_number, index) + } + .boxed() + })? + .get("getviewsyncvotes", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let index: u64 = req.integer_param("index")?; + state.get_view_sync_votes(view_number, index) + } + .boxed() + })? + .get("gettransactions", |req, state| { + async move { + let index: u64 = req.integer_param("index")?; + state.get_transactions(index) + } + .boxed() + })? + .post("postvote", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + // Using body_bytes because we don't want to deserialize; body_auto or body_json deserializes automatically + let vote = req.body_bytes(); + state.post_vote(view_number, vote) + } + .boxed() + })? + .post("postviewsyncvote", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + // Using body_bytes because we don't want to deserialize; body_auto or body_json deserializes automatically + let vote = req.body_bytes(); + state.post_view_sync_vote(view_number, vote) + } + .boxed() + })? + .post("postproposal", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let proposal = req.body_bytes(); + state.post_proposal(view_number, proposal) + } + .boxed() + })? + .post("postviewsyncproposal", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let proposal = req.body_bytes(); + state.post_view_sync_proposal(view_number, proposal) + } + .boxed() + })? + .post("postcertificate", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let cert = req.body_bytes(); + state.post_da_certificate(view_number, cert) + } + .boxed() + })? + .post("posttransaction", |req, state| { + async move { + let txns = req.body_bytes(); + state.post_transaction(txns) + } + .boxed() + })? + .post("poststaketable", |req, state| { + async move { + //works one key at a time for now + let key = req.body_bytes(); + state.post_staketable(key) + } + .boxed() + })? + .post("postcompletedtransaction", |req, state| { + async move { + //works one txn at a time for now + let txn = req.body_bytes(); + state.post_completed_transaction(txn) + } + .boxed() + })? + .post("secret", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let secret: &str = req.string_param("secret")?; + //if secret is correct and view_number->proposal is empty, proposal is valid + if let Some(prop) = state.proposal(view_number) { + if prop.1.is_empty() { + if prop.0 == secret { + let proposal = req.body_bytes(); + state.post_secret_proposal(view_number, proposal) + } else { + Err(ServerError { + status: StatusCode::BadRequest, + message: format!( + "Wrong secret value for proposal for view {:?}", + view_number + ), + }) + } + } else { + Err(ServerError { + status: StatusCode::BadRequest, + message: format!("Proposal already submitted for view {:?}", view_number), + }) + } + } else { + Err(ServerError { + status: StatusCode::BadRequest, + message: format!("No endpoint for view number {} yet", view_number), + }) + } + } + .boxed() + })?; + Ok(api) +} + +pub async fn run_web_server( + shutdown_listener: Option>, + port: u16, +) -> io::Result<()> { + let options = Options::default(); + + let api = define_api(&options).unwrap(); + let state = State::new(WebServerState::new().with_shutdown_signal(shutdown_listener)); + let mut app = App::, Error>::with_state(state); + + app.register_module("api", api).unwrap(); + + let app_future = app.serve(format!("http://0.0.0.0:{port}")); + + info!("Web server started on port {port}"); + + app_future.await +} From e7e52533d8145c09765e53c057f9075a569a558f Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 3 Sep 2023 18:24:43 -0400 Subject: [PATCH 0029/1393] feat: cleanup --- consensus/Cargo.toml | 33 - consensus/src/da_member.rs | 214 ------ consensus/src/leader.rs | 29 - consensus/src/lib.rs | 338 --------- consensus/src/next_leader.rs | 26 - consensus/src/replica.rs | 0 consensus/src/sequencing_leader.rs | 555 --------------- consensus/src/sequencing_replica.rs | 642 ------------------ consensus/src/traits.rs | 171 ----- consensus/src/utils.rs | 82 --- hotshot-qc/src/snarked/circuit.rs | 4 +- hotshot-stake-table/src/mt_based/internal.rs | 23 +- hotshot/Cargo.toml | 1 - hotshot/src/lib.rs | 549 +-------------- hotshot/src/traits/networking.rs | 2 +- .../src/traits/networking/memory_network.rs | 2 +- libp2p-networking/src/network/node/handle.rs | 1 + task/src/global_registry.rs | 2 +- testing/src/node_types.rs | 4 + types/src/consensus.rs | 2 + types/src/data.rs | 2 + types/src/traits/stake_table.rs | 2 +- 22 files changed, 27 insertions(+), 2657 deletions(-) delete mode 100644 consensus/Cargo.toml delete mode 100644 consensus/src/da_member.rs delete mode 100644 consensus/src/leader.rs delete mode 100644 consensus/src/lib.rs delete mode 100644 consensus/src/next_leader.rs delete mode 100644 consensus/src/replica.rs delete mode 100644 consensus/src/sequencing_leader.rs delete mode 100644 consensus/src/sequencing_replica.rs delete mode 100644 consensus/src/traits.rs delete mode 100644 consensus/src/utils.rs diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml deleted file mode 100644 index 157be985ea..0000000000 --- a/consensus/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "hotshot-consensus" -version = "0.1.0" -edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[features] -default = [] - -[dependencies] -async-compatibility-layer = { workspace = true } -async-lock = { workspace = true } -async-trait = { workspace = true } -# TODO ed: Delete this dependency after https://github.com/EspressoSystems/HotShot/issues/614 is finished -bincode = { workspace = true } -blake3 = { workspace = true, features = ["traits-preview"] } -commit = { workspace = true } -custom_debug = { workspace = true } -derivative = "2.2" -either = { workspace = true } -futures = { workspace = true } -hotshot-types = { path = "../types", default-features = false } -hotshot-utils = { path = "../utils" } -snafu = { workspace = true } -tracing = { workspace = true } -time = { workspace = true } -bitvec = { workspace = true } -jf-primitives = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] -tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } diff --git a/consensus/src/da_member.rs b/consensus/src/da_member.rs deleted file mode 100644 index 977a7e8fc1..0000000000 --- a/consensus/src/da_member.rs +++ /dev/null @@ -1,214 +0,0 @@ -//! Contains the [`DAMember`] struct used for the committee member step in the consensus algorithm -//! with DA committee, i.e. in the sequencing consensus. - -use crate::{ - utils::{View, ViewInner}, - Consensus, SequencingConsensusApi, -}; -use async_compatibility_layer::channel::UnboundedReceiver; -use async_lock::{Mutex, RwLock}; -use commit::Committable; -use either::{Left, Right}; -use hotshot_types::{ - certificate::QuorumCertificate, - data::SequencingLeaf, - message::{ - ConsensusMessageType, ProcessedCommitteeConsensusMessage, ProcessedGeneralConsensusMessage, - ProcessedSequencingMessage, SequencingMessage, - }, - traits::{ - election::{CommitteeExchangeType, ConsensusExchange}, - node_implementation::{ - CommitteeEx, CommitteeProposalType, CommitteeVote, NodeImplementation, NodeType, - }, - signature_key::SignatureKey, - }, -}; -use std::{marker::PhantomData, sync::Arc}; -use tracing::{error, info, instrument, warn}; - -/// This view's DA committee member. -#[derive(Debug, Clone)] -pub struct DAMember< - A: SequencingConsensusApi, I>, - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, -> { - /// ID of node. - pub id: u64, - /// Reference to consensus. DA committee member will require a write lock on this. - pub consensus: Arc>>>, - /// Channel for accepting leader proposals and timeouts messages. - #[allow(clippy::type_complexity)] - pub proposal_collection_chan: - Arc>>>, - /// View number this view is executing in. - pub cur_view: TYPES::Time, - /// The High QC. - pub high_qc: QuorumCertificate>, - /// HotShot consensus API. - pub api: A, - - /// the committee exchange - pub exchange: Arc>, - - /// needed for type checking - pub _pd: PhantomData, -} - -impl< - A: SequencingConsensusApi, I>, - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - > DAMember -{ - /// DA committee member task that spins until a valid DA proposal can be signed or timeout is - /// hit. - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Member Task", level = "error")] - #[allow(clippy::type_complexity)] - async fn find_valid_msg<'a>( - &self, - view_leader_key: TYPES::SignatureKey, - ) -> Option { - let lock = self.proposal_collection_chan.lock().await; - let leaf = loop { - let msg = lock.recv().await; - info!("recv-ed message {:?}", msg.clone()); - if let Ok(msg) = msg { - // If the message is for a different view number, skip it. - if Into::>::into(msg.clone()).view_number() != self.cur_view - { - continue; - } - match msg { - Left(general_message) => { - match general_message { - ProcessedGeneralConsensusMessage::InternalTrigger(_trigger) => { - warn!("DA committee member receieved an internal trigger message. This is not what the member expects. Skipping."); - // After run_view refactor we will handle timeout triggers properly - return None; - } - ProcessedGeneralConsensusMessage::Vote(_, _) => { - // Should only be for DA leader, never member. - warn!("DA committee member receieved a vote message. This is not what the member expects. Skipping."); - continue; - } - ProcessedGeneralConsensusMessage::Proposal(_, _) => { - warn!("DA committee member receieved a Non DA Proposal message. This is not what the member expects. Skipping."); - continue; - } - ProcessedGeneralConsensusMessage::ViewSyncCertificate(_) => { - todo!() - } - ProcessedGeneralConsensusMessage::ViewSyncVote(_) => { - todo!() - } - } - } - Right(committee_message) => { - match committee_message { - ProcessedCommitteeConsensusMessage::DAProposal(p, sender) => { - if view_leader_key != sender { - continue; - } - let block_commitment = p.data.deltas.commit(); - if !view_leader_key - .validate(&p.signature, block_commitment.as_ref()) - { - warn!(?p.signature, "Could not verify proposal."); - continue; - } - let vote_token = self.exchange.make_vote_token(self.cur_view); - match vote_token { - Err(e) => { - error!( - "Failed to generate vote token for {:?} {:?}", - self.cur_view, e - ); - } - Ok(None) => { - info!( - "We were not chosen for DA committee on {:?}", - self.cur_view - ); - } - Ok(Some(vote_token)) => { - info!( - "We were chosen for DA committee on {:?}", - self.cur_view - ); - - // Generate and send vote - let message = self.exchange.create_da_message( - block_commitment, - self.cur_view, - vote_token, - ); - - info!("Sending vote to the leader {:?}", message); - - let consensus = self.consensus.read().await; - if self.api.send_direct_da_message::, CommitteeVote>(sender, SequencingMessage(Right(message))).await.is_err() { - consensus.metrics.failed_to_send_messages.add(1); - warn!("Failed to send vote to the leader"); - } else { - consensus.metrics.outgoing_direct_messages.add(1); - } - } - } - break p.data.deltas; - } - ProcessedCommitteeConsensusMessage::DAVote(_, _) => { - // Should only be for DA leader, never member. - warn!("DA committee member receieved a vote message. This is not what the member expects. Skipping."); - continue; - } - ProcessedCommitteeConsensusMessage::DACertificate(_, _) => { - continue; - } - } - } - } - } - // fall through logic if we did not receive successfully from channel - warn!("DA committee member did not receive successfully from channel."); - return None; - }; - Some(leaf) - } - - /// Run one view of DA committee member. - #[instrument(skip(self), fields(id = self.id, view = *self.cur_view), name = "DA Member Task", level = "error")] - pub async fn run_view(self) -> QuorumCertificate> { - info!("DA Committee Member task started!"); - let view_leader_key = self.exchange.get_leader(self.cur_view); - - let maybe_block = self.find_valid_msg(view_leader_key).await; - - if let Some(block) = maybe_block { - let mut consensus = self.consensus.write().await; - - // Ensure this view is in the view map for garbage collection, but do not overwrite if - // there is already a view there: the replica task may have inserted a `Leaf` view which - // contains strictly more information. - consensus.state_map.entry(self.cur_view).or_insert(View { - view_inner: ViewInner::DA { - block: block.commit(), - }, - }); - - // Record the block we have promised to make available. - consensus.saved_blocks.insert(block); - }; - - self.high_qc - } -} diff --git a/consensus/src/leader.rs b/consensus/src/leader.rs deleted file mode 100644 index b7872eafe2..0000000000 --- a/consensus/src/leader.rs +++ /dev/null @@ -1,29 +0,0 @@ -//! Contains the [`ValidatingLeader`] struct used for the leader step in the hotstuff consensus algorithm. - -use crate::{CommitmentMap, Consensus}; -use async_compatibility_layer::{ - art::{async_sleep, async_timeout}, - async_primitives::subscribable_rwlock::{ReadView, SubscribableRwLock}, -}; -use async_lock::RwLock; -use commit::Committable; -use hotshot_types::message::Message; -use hotshot_types::{ - certificate::QuorumCertificate, - data::{ValidatingLeaf, ValidatingProposal}, - message::GeneralConsensusMessage, - traits::{ - consensus_type::validating_consensus::ValidatingConsensus, - election::SignedCertificate, - node_implementation::{NodeImplementation, NodeType, QuorumProposalType, QuorumVoteType}, - signature_key::SignatureKey, - Block, State, - }, -}; -use hotshot_types::{ - message::Proposal, - traits::election::{ConsensusExchange, QuorumExchangeType}, -}; -use std::marker::PhantomData; -use std::{sync::Arc, time::Instant}; -use tracing::{error, info, instrument, warn}; diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs deleted file mode 100644 index 425f0b1b25..0000000000 --- a/consensus/src/lib.rs +++ /dev/null @@ -1,338 +0,0 @@ -//! Provides the core consensus types - -pub use crate::traits::node_implementation::ViewQueue; -pub use crate::utils::{View, ViewInner}; -use async_compatibility_layer::async_primitives::subscribable_rwlock::SubscribableRwLock; -use std::collections::HashSet; - -use crate::utils::Terminator; -use crate::{ - certificate::QuorumCertificate, - data::LeafType, - error::HotShotError, - traits::{ - metrics::{Counter, Gauge, Histogram, Metrics}, - node_implementation::NodeType, - }, -}; -use commit::{Commitment, Committable}; -use derivative::Derivative; -use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, - sync::Arc, -}; -use tracing::error; - -/// A type alias for `HashMap, T>` -type CommitmentMap = HashMap, T>; - -/// A reference to the consensus algorithm -/// -/// This will contain the state of all rounds. -#[derive(custom_debug::Debug)] -pub struct Consensus> { - /// The phases that are currently loaded in memory - // TODO(https://github.com/EspressoSystems/hotshot/issues/153): Allow this to be loaded from `Storage`? - pub state_map: BTreeMap>, - - /// cur_view from pseudocode - pub cur_view: TYPES::Time, - - /// last view had a successful decide event - pub last_decided_view: TYPES::Time, - - /// A list of undecided transactions - pub transactions: Arc>>, - - /// A list of transactions we've seen decided, but didn't receive - pub seen_transactions: HashSet>, - - /// Map of leaf hash -> leaf - /// - contains undecided leaves - /// - includes the MOST RECENT decided leaf - pub saved_leaves: CommitmentMap, - - /// Saved blocks - /// - /// Contains the full block for every leaf in `saved_leaves` if that block is available. - pub saved_blocks: BlockStore, - - /// The `locked_qc` view number - pub locked_view: TYPES::Time, - - /// the highqc per spec - pub high_qc: QuorumCertificate, - - /// A reference to the metrics trait - #[debug(skip)] - pub metrics: Arc, - - /// Amount of invalid QCs we've seen since the last commit - /// Used for metrics. This resets to 0 on every decide event. - pub invalid_qc: usize, -} - -/// The metrics being collected for the consensus algorithm -pub struct ConsensusMetrics { - /// The current view - pub current_view: Box, - /// The duration to collect votes in a view (only applies when this insance is the leader) - pub vote_validate_duration: Box, - /// The duration we waited for txns before building the proposal - pub proposal_wait_duration: Box, - /// The duration to build the proposal - pub proposal_build_duration: Box, - /// The duration of each view, in seconds - pub view_duration: Box, - /// Number of views that are in-flight since the last committed view - pub number_of_views_since_last_commit: Box, - /// Number of views that are in-flight since the last anchor view - pub number_of_views_per_decide_event: Box, - /// Number of invalid QCs between anchors - pub invalid_qc_views: Box, - /// Number of views that were discarded since from one achor to the next - pub discarded_views_per_decide_event: Box, - /// Views where no proposal was seen from one anchor to the next - pub empty_views_per_decide_event: Box, - /// Number of rejected transactions - pub rejected_transactions: Box, - /// Number of outstanding transactions - pub outstanding_transactions: Box, - /// Memory size in bytes of the serialized transactions still outstanding - pub outstanding_transactions_memory_size: Box, - /// Number of views that timed out - pub number_of_timeouts: Box, - /// Total direct messages this node sent out - pub outgoing_direct_messages: Box, - /// Total broadcasts sent - pub outgoing_broadcast_messages: Box, - /// Total messages received - pub direct_messages_received: Box, - /// Total broadcast messages received - pub broadcast_messages_received: Box, - /// Total number of messages which couldn't be sent - pub failed_to_send_messages: Box, -} - -impl ConsensusMetrics { - /// Create a new instance of this [`ConsensusMetrics`] struct, setting all the counters and gauges - #[must_use] - pub fn new(metrics: &dyn Metrics) -> Self { - Self { - current_view: metrics.create_gauge(String::from("current_view"), None), - vote_validate_duration: metrics.create_histogram( - String::from("vote_validate_duration"), - Some(String::from("seconds")), - ), - proposal_build_duration: metrics.create_histogram( - String::from("proposal_build_duration"), - Some(String::from("seconds")), - ), - proposal_wait_duration: metrics.create_histogram( - String::from("proposal_wait_duration"), - Some(String::from("seconds")), - ), - view_duration: metrics - .create_histogram(String::from("view_duration"), Some(String::from("seconds"))), - number_of_views_since_last_commit: metrics - .create_gauge(String::from("number_of_views_since_last_commit"), None), - number_of_views_per_decide_event: metrics - .create_histogram(String::from("number_of_views_per_decide_event"), None), - invalid_qc_views: metrics.create_histogram(String::from("invalid_qc_views"), None), - discarded_views_per_decide_event: metrics - .create_histogram(String::from("discarded_views_per_decide_event"), None), - empty_views_per_decide_event: metrics - .create_histogram(String::from("empty_views_per_decide_event"), None), - rejected_transactions: metrics - .create_counter(String::from("rejected_transactions"), None), - outstanding_transactions: metrics - .create_gauge(String::from("outstanding_transactions"), None), - outstanding_transactions_memory_size: metrics - .create_gauge(String::from("outstanding_transactions_memory_size"), None), - outgoing_direct_messages: metrics - .create_counter(String::from("outgoing_direct_messages"), None), - outgoing_broadcast_messages: metrics - .create_counter(String::from("outgoing_broadcast_messages"), None), - direct_messages_received: metrics - .create_counter(String::from("direct_messages_received"), None), - broadcast_messages_received: metrics - .create_counter(String::from("broadcast_messages_received"), None), - failed_to_send_messages: metrics - .create_counter(String::from("failed_to_send_messages"), None), - number_of_timeouts: metrics - .create_counter(String::from("number_of_views_timed_out"), None), - } - } -} - -impl> Consensus { - /// increment the current view - /// NOTE may need to do gc here - pub fn increment_view(&mut self) -> TYPES::Time { - self.cur_view += 1; - self.cur_view - } - - /// gather information from the parent chain of leafs - /// # Errors - /// If the leaf or its ancestors are not found in storage - pub fn visit_leaf_ancestors( - &self, - start_from: TYPES::Time, - terminator: Terminator, - ok_when_finished: bool, - mut f: F, - ) -> Result<(), HotShotError> - where - F: FnMut(&LEAF) -> bool, - { - let mut next_leaf = if let Some(view) = self.state_map.get(&start_from) { - view.get_leaf_commitment() - .ok_or_else(|| HotShotError::InvalidState { - context: format!( - "Visited failed view {start_from:?} leaf. Expected successfuil leaf" - ), - })? - } else { - return Err(HotShotError::InvalidState { - context: format!("View {start_from:?} leaf does not exist in state map "), - }); - }; - - while let Some(leaf) = self.saved_leaves.get(&next_leaf) { - if let Terminator::Exclusive(stop_before) = terminator { - if stop_before == leaf.get_view_number() { - if ok_when_finished { - return Ok(()); - } - break; - } - } - next_leaf = leaf.get_parent_commitment(); - if !f(leaf) { - return Ok(()); - } - if let Terminator::Inclusive(stop_after) = terminator { - if stop_after == leaf.get_view_number() { - if ok_when_finished { - return Ok(()); - } - break; - } - } - } - Err(HotShotError::LeafNotFound {}) - } - - /// garbage collects based on state change - /// right now, this removes from both the `saved_blocks` - /// and `state_map` fields of `Consensus` - #[allow(clippy::unused_async)] // async for API compatibility reasons - pub async fn collect_garbage( - &mut self, - old_anchor_view: TYPES::Time, - new_anchor_view: TYPES::Time, - ) { - // state check - let anchor_entry = self - .state_map - .iter() - .next() - .expect("INCONSISTENT STATE: anchor leaf not in state map!"); - if *anchor_entry.0 != old_anchor_view { - error!( - "Something about GC has failed. Older leaf exists than the previous anchor leaf." - ); - } - // perform gc - self.state_map - .range(old_anchor_view..new_anchor_view) - .filter_map(|(_view_number, view)| view.get_block_commitment()) - .for_each(|block| { - self.saved_blocks.remove(block); - }); - self.state_map - .range(old_anchor_view..new_anchor_view) - .filter_map(|(_view_number, view)| view.get_leaf_commitment()) - .for_each(|leaf| { - if let Some(removed) = self.saved_leaves.remove(&leaf) { - self.saved_blocks.remove(removed.get_deltas_commitment()); - } - }); - self.state_map = self.state_map.split_off(&new_anchor_view); - } - - /// return a clone of the internal storage of unclaimed transactions - #[must_use] - pub fn get_transactions(&self) -> Arc>> { - self.transactions.clone() - } - - /// Gets the last decided state - /// # Panics - /// if the last decided view's state does not exist in the state map - /// this should never happen. - #[must_use] - pub fn get_decided_leaf(&self) -> LEAF { - let decided_view_num = self.last_decided_view; - let view = self.state_map.get(&decided_view_num).unwrap(); - let leaf = view - .get_leaf_commitment() - .expect("Decided state not found! Consensus internally inconsistent"); - self.saved_leaves.get(&leaf).unwrap().clone() - } -} - -/// Mapping from block commitments to full blocks. -/// -/// Entries in this mapping are reference-counted, so multiple consensus objects can refer to the -/// same block, and the block will only be deleted after _all_ such objects are garbage collected. -/// For example, multiple leaves may temporarily reference the same block on different branches, -/// before all but one branch are ultimately garbage collected. -#[derive(Clone, Debug, Derivative)] -#[derivative(Default(bound = ""))] -pub struct BlockStore(HashMap, (BLOCK, u64)>); - -impl BlockStore { - /// Save `block` for later retrieval. - /// - /// After calling this function, and before the corresponding call to [`remove`](Self::remove), - /// `self.get(block.commit())` will return `Some(block)`. - /// - /// This function will increment a reference count on the saved block, so that multiple calls to - /// [`insert`](Self::insert) for the same block result in multiple owning references to the - /// block. [`remove`](Self::remove) must be called once for each reference before the block will - /// be deallocated. - pub fn insert(&mut self, block: BLOCK) { - self.0 - .entry(block.commit()) - .and_modify(|(_, refcount)| *refcount += 1) - .or_insert((block, 1)); - } - - /// Get a saved block, if available. - /// - /// If a block has been saved with [`insert`](Self::insert), this function will retrieve it. It - /// may return [`None`] if a block with the given commitment has not been saved or if the block - /// has been dropped with [`remove`](Self::remove). - #[must_use] - pub fn get(&self, block: Commitment) -> Option<&BLOCK> { - self.0.get(&block).map(|(block, _)| block) - } - - /// Drop a reference to a saved block. - /// - /// If the block exists and this call drops the last reference to it, the block will be - /// returned. Otherwise, the return value is [`None`]. - pub fn remove(&mut self, block: Commitment) -> Option { - if let Entry::Occupied(mut e) = self.0.entry(block) { - let (_, refcount) = e.get_mut(); - *refcount -= 1; - if *refcount == 0 { - let (block, _) = e.remove(); - return Some(block); - } - } - None - } -} diff --git a/consensus/src/next_leader.rs b/consensus/src/next_leader.rs deleted file mode 100644 index abd447a6ff..0000000000 --- a/consensus/src/next_leader.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! Contains the [`NextValidatingLeader`] struct used for the next leader step in the hotstuff consensus algorithm. - -use crate::ConsensusMetrics; -use async_compatibility_layer::channel::UnboundedReceiver; -use async_lock::Mutex; -use either::Either; -use hotshot_types::data::ValidatingLeaf; -use hotshot_types::message::Message; -use hotshot_types::message::ProcessedGeneralConsensusMessage; -use hotshot_types::traits::election::ConsensusExchange; -use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; -use hotshot_types::traits::signature_key::SignatureKey; -use hotshot_types::vote::VoteAccumulator; -use hotshot_types::{ - certificate::QuorumCertificate, - message::{ConsensusMessageType, InternalTrigger}, - traits::consensus_type::validating_consensus::ValidatingConsensus, - vote::QuorumVote, -}; -use std::marker::PhantomData; -use std::time::Instant; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; -use tracing::{info, instrument, warn}; diff --git a/consensus/src/replica.rs b/consensus/src/replica.rs deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/consensus/src/sequencing_leader.rs b/consensus/src/sequencing_leader.rs deleted file mode 100644 index 99cd6a438f..0000000000 --- a/consensus/src/sequencing_leader.rs +++ /dev/null @@ -1,555 +0,0 @@ -//! Contains the [`DALeader`], [`ConsensusLeader`] and [`ConsensusNextLeader`] structs used for the -//! leader steps in the consensus algorithm with DA committee, i.e. in the sequencing consensus. - -use crate::{CommitmentMap, Consensus, SequencingConsensusApi}; -use async_compatibility_layer::{ - art::async_timeout, - async_primitives::subscribable_rwlock::{ReadView, SubscribableRwLock}, - channel::UnboundedReceiver, -}; -use async_lock::{Mutex, RwLock}; -use bitvec::prelude::*; -use commit::{Commitment, Committable}; -use either::{Either, Left, Right}; -use hotshot_types::{ - certificate::{AssembledSignature, DACertificate, QuorumCertificate}, - data::{DAProposal, QuorumProposal, SequencingLeaf}, - message::{ - CommitteeConsensusMessage, ConsensusMessageType, GeneralConsensusMessage, InternalTrigger, - Message, ProcessedCommitteeConsensusMessage, ProcessedGeneralConsensusMessage, - ProcessedSequencingMessage, Proposal, SequencingMessage, - }, - traits::{ - election::{ - CommitteeExchangeType, ConsensusExchange, QuorumExchangeType, SignedCertificate, - }, - node_implementation::{ - CommitteeEx, NodeImplementation, NodeType, QuorumProposalType, QuorumVoteType, - SequencingQuorumEx, - }, - signature_key::SignatureKey, - state::State, - Block, - }, - vote::{QuorumVote, VoteAccumulator}, -}; -use std::{ - collections::{HashMap, HashSet}, - marker::PhantomData, - num::NonZeroU64, - sync::Arc, - time::Instant, -}; -use tracing::{error, info, instrument, warn}; -/// This view's DA committee leader -#[derive(Debug, Clone)] -pub struct DALeader< - A: SequencingConsensusApi, I>, - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, -> { - /// id of node - pub id: u64, - /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>>, - /// The `high_qc` per spec - pub high_qc: QuorumCertificate>, - /// The view number we're running on - pub cur_view: TYPES::Time, - /// Lock over the transactions list - pub transactions: Arc>>, - /// Limited access to the consensus protocol - pub api: A, - - /// the committee exchange - pub committee_exchange: Arc>, - /// the quorum exchange - pub quorum_exchange: Arc>, - /// channel through which the leader collects votes - #[allow(clippy::type_complexity)] - pub vote_collection_chan: Arc>>>, - /// needed to typecheck - pub _pd: PhantomData, -} -impl< - A: SequencingConsensusApi, I>, - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - > DALeader -where - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = TYPES::BlockType, - >, -{ - /// Accumulate votes for a proposal and return either the cert or None if the threshold was not reached in time - async fn wait_for_votes( - &self, - cur_view: TYPES::Time, - threshold: NonZeroU64, - total_nodes_num: usize, - block_commitment: Commitment<::BlockType>, - ) -> Option> { - let lock = self.vote_collection_chan.lock().await; - let mut accumulator = VoteAccumulator { - total_vote_outcomes: HashMap::new(), - da_vote_outcomes: HashMap::new(), - yes_vote_outcomes: HashMap::new(), - no_vote_outcomes: HashMap::new(), - viewsync_precommit_vote_outcomes: HashMap::new(), - viewsync_commit_vote_outcomes: HashMap::new(), - viewsync_finalize_vote_outcomes: HashMap::new(), - success_threshold: threshold, - failure_threshold: threshold, - sig_lists: Vec::new(), - signers: bitvec![0; total_nodes_num], - }; - - while let Ok(msg) = lock.recv().await { - if Into::>::into(msg.clone()).view_number() != cur_view { - continue; - } - match msg { - Left(general_message) => match general_message { - ProcessedGeneralConsensusMessage::Vote(_vote, _sender) => { - warn!("The leader received an unexpext Quorum Vote!"); - continue; - } - ProcessedGeneralConsensusMessage::InternalTrigger(trigger) => match trigger { - InternalTrigger::Timeout(_) => { - self.api.send_next_leader_timeout(self.cur_view).await; - break; - } - }, - ProcessedGeneralConsensusMessage::Proposal(_p, _sender) => { - warn!("The next leader has received an unexpected proposal!"); - } - ProcessedGeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), - ProcessedGeneralConsensusMessage::ViewSyncVote(_) => todo!(), - }, - Right(committee_message) => match committee_message { - ProcessedCommitteeConsensusMessage::DAVote(vote, sender) => { - if vote.signature.0 - != ::to_bytes(&sender) - { - continue; - } - if vote.block_commitment != block_commitment { - continue; - } - match self.committee_exchange.accumulate_vote( - &vote.signature.0, - &vote.signature.1, - vote.block_commitment, - vote.vote_data, - vote.vote_token.clone(), - self.cur_view, - accumulator, - None, - ) { - Either::Left(acc) => { - accumulator = acc; - } - Either::Right(qc) => { - match qc.clone().signatures { - AssembledSignature::Yes(_signature) => {} - AssembledSignature::DA(_signature) => {} - _ => unimplemented!(), - }; - return Some(qc); - } - } - } - ProcessedCommitteeConsensusMessage::DAProposal(_p, _sender) => { - warn!("The next leader has received an unexpected proposal!"); - } - ProcessedCommitteeConsensusMessage::DACertificate(_, _) => { - continue; - } - }, - } - } - None - } - /// Returns the parent leaf of the proposal we are building - async fn parent_leaf(&self) -> Option> { - let parent_view_number = &self.high_qc.view_number(); - let consensus = self.consensus.read().await; - let Some(parent_view) = consensus.state_map.get(parent_view_number) else { - warn!("Couldn't find high QC parent in state map."); - return None; - }; - let Some(leaf) = parent_view.get_leaf_commitment() else { - warn!( - ?parent_view_number, - ?parent_view, - "Parent of high QC points to a view without a proposal" - ); - return None; - }; - let Some(leaf) = consensus.saved_leaves.get(&leaf) else { - warn!("Failed to find high QC parent."); - return None; - }; - Some(leaf.clone()) - } - /// return None if we can't get transactions - async fn wait_for_transactions(&self) -> Option> { - let task_start_time = Instant::now(); - - let parent_leaf = self.parent_leaf().await?; - let previous_used_txns = match parent_leaf.deltas { - Either::Left(block) => block.contained_transactions(), - Either::Right(_commitment) => HashSet::new(), - }; - let receiver = self.transactions.subscribe().await; - - while task_start_time.elapsed() < self.api.propose_max_round_time() { - let txns = self.transactions.cloned().await; - let unclaimed_txns: Vec<_> = txns - .iter() - .filter(|(txn_hash, _txn)| !previous_used_txns.contains(txn_hash)) - .collect(); - - let time_past = task_start_time.elapsed(); - if unclaimed_txns.len() < self.api.min_transactions() - && (time_past < self.api.propose_max_round_time()) - { - let duration = self.api.propose_max_round_time() - time_past; - let result = async_timeout(duration, receiver.recv()).await; - match result { - Err(_) => { - // Fall through below to updating new block - info!("propose_max_round_time passed, sending transactions we have so far"); - } - Ok(Err(e)) => { - // Something unprecedented is wrong, and `transactions` has been dropped - error!("Channel receiver error for SubscribableRwLock {:?}", e); - return None; - } - Ok(Ok(_)) => continue, - } - } - let mut txns = vec![]; - for (_hash, txn) in unclaimed_txns { - txns.push(txn.clone()); - } - return Some(txns); - } - None - } - /// Run one view of the DA leader task - #[instrument(skip(self), fields(id = self.id, view = *self.cur_view), name = "Sequencing DALeader Task", level = "error")] - pub async fn run_view( - self, - ) -> Option<( - DACertificate, - TYPES::BlockType, - SequencingLeaf, - )> { - // Prepare the DA Proposal - let Some(parent_leaf) = self.parent_leaf().await else { - warn!("Couldn't find high QC parent in state map."); - return None; - }; - - let mut block = ::StateType::next_block(None); - let txns = self.wait_for_transactions().await?; - - for txn in txns { - if let Ok(new_block) = block.add_transaction_raw(&txn) { - block = new_block; - continue; - } - } - let block_commitment = block.commit(); - - let consensus = self.consensus.read().await; - let signature = self.committee_exchange.sign_da_proposal(&block.commit()); - let data: DAProposal = DAProposal { - deltas: block.clone(), - view_number: self.cur_view, - }; - let message = - SequencingMessage::(Right(CommitteeConsensusMessage::DAProposal(Proposal { - data, - signature, - }))); - // Brodcast DA proposal - if let Err(e) = self.api.send_da_broadcast(message.clone()).await { - consensus.metrics.failed_to_send_messages.add(1); - warn!(?message, ?e, "Could not broadcast leader proposal"); - } else { - consensus.metrics.outgoing_broadcast_messages.add(1); - } - - // Drop the lock on the consensus. - drop(consensus); - - // Wait for DA votes or Timeout - if let Some(cert) = self - .wait_for_votes( - self.cur_view, - self.committee_exchange.success_threshold(), - self.committee_exchange.total_nodes(), - block_commitment, - ) - .await - { - return Some((cert, block, parent_leaf)); - } - None - } -} - -/// Implemenation of the consensus leader for a DA/Sequencing consensus. Handles sending out a proposal to the entire network -/// For now this step happens after the `DALeader` completes it's proposal and collects enough votes. -pub struct ConsensusLeader< - A: SequencingConsensusApi, I>, - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, -> { - /// id of node - pub id: u64, - /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>>, - /// The `high_qc` per spec - pub high_qc: QuorumCertificate>, - /// The view number we're running on - pub cur_view: TYPES::Time, - /// The Certificate generated for the transactions commited to in the proposal the leader will build - pub cert: DACertificate, - /// The block corresponding to the DA cert - pub block: TYPES::BlockType, - /// Leaf this proposal will chain from - pub parent: SequencingLeaf, - /// Limited access to the consensus protocol - pub api: A, - - /// the quorum exchange - pub quorum_exchange: Arc>, - - /// needed to tyep check - pub _pd: PhantomData, -} -impl< - A: SequencingConsensusApi, I>, - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - > ConsensusLeader -where - SequencingQuorumEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal>, - >, -{ - /// Run one view of the DA leader task - #[instrument(skip(self), fields(id = self.id, view = *self.cur_view), name = "Sequencing DALeader Task", level = "error")] - pub async fn run_view(self) -> QuorumCertificate> { - let block_commitment = self.block.commit(); - let leaf = SequencingLeaf { - view_number: self.cur_view, - height: self.parent.height + 1, - justify_qc: self.high_qc.clone(), - parent_commitment: self.parent.commit(), - // Use the block commitment rather than the block, so that the replica can construct - // the same leaf with the commitment. - deltas: Right(block_commitment), - rejected: vec![], - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: self.api.public_key().to_bytes(), - }; - let signature = self - .quorum_exchange - .sign_validating_or_commitment_proposal::(&leaf.commit()); - // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. - let proposal = QuorumProposal { - block_commitment, - view_number: leaf.view_number, - height: leaf.height, - justify_qc: self.high_qc.clone(), - timeout_certificate: None, - dac: Some(self.cert), - proposer_id: leaf.proposer_id, - }; - let message = - SequencingMessage::(Left(GeneralConsensusMessage::Proposal(Proposal { - data: proposal, - signature, - }))); - if let Err(e) = self - .api - .send_broadcast_message::, QuorumVoteType>( - message.clone(), - ) - .await - { - warn!(?message, ?e, "Could not broadcast leader proposal"); - } - self.high_qc - } -} - -/// Implenting the next leader. Collect votes on the previous leaders proposal and return the QC -pub struct ConsensusNextLeader< - A: SequencingConsensusApi, I>, - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, -> { - /// id of node - pub id: u64, - /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>>, - /// The view number we're running on - pub cur_view: TYPES::Time, - /// Limited access to the consensus protocol - pub api: A, - /// generic_qc before starting this - pub generic_qc: QuorumCertificate>, - /// channel through which the leader collects votes - #[allow(clippy::type_complexity)] - pub vote_collection_chan: Arc>>>, - - /// the quorum exchnage - pub quorum_exchange: Arc>, - - /// needed to type check - pub _pd: PhantomData, -} - -impl< - A: SequencingConsensusApi, I>, - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - > ConsensusNextLeader -where - SequencingQuorumEx: ConsensusExchange< - TYPES, - Message, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, - >, -{ - /// Run one view of the next leader, collect votes and build a QC for the last views `QuorumProposal` - /// # Panics - /// While we are unwrapping, this function can logically never panic - /// unless there is a bug in std - pub async fn run_view(self) -> QuorumCertificate> { - let mut qcs = HashSet::>>::new(); - qcs.insert(self.generic_qc.clone()); - let mut accumulator = VoteAccumulator { - total_vote_outcomes: HashMap::new(), - da_vote_outcomes: HashMap::new(), - yes_vote_outcomes: HashMap::new(), - no_vote_outcomes: HashMap::new(), - viewsync_precommit_vote_outcomes: HashMap::new(), - viewsync_commit_vote_outcomes: HashMap::new(), - viewsync_finalize_vote_outcomes: HashMap::new(), - success_threshold: self.quorum_exchange.success_threshold(), - failure_threshold: self.quorum_exchange.failure_threshold(), - sig_lists: Vec::new(), - signers: bitvec![0; self.quorum_exchange.total_nodes()], - }; - - let lock = self.vote_collection_chan.lock().await; - while let Ok(msg) = lock.recv().await { - // If the message is for a different view number, skip it. - if Into::>::into(msg.clone()).view_number() != self.cur_view { - continue; - } - match msg { - Left(general_message) => match general_message { - ProcessedGeneralConsensusMessage::Vote(vote_message, sender) => { - match vote_message { - QuorumVote::Yes(vote) => { - if vote.signature.0 - != ::to_bytes(&sender) - { - continue; - } - - match self.quorum_exchange.accumulate_vote( - &vote.signature.0, - &vote.signature.1, - vote.leaf_commitment, - vote.vote_data, - vote.vote_token.clone(), - self.cur_view, - accumulator, - None, - ) { - Either::Left(acc) => { - accumulator = acc; - } - Either::Right(qc) => { - match qc.clone().signatures { - AssembledSignature::Yes(_signature) => {} - _ => unimplemented!(), - }; - return qc; - } - } - } - QuorumVote::Timeout(vote) => { - qcs.insert(vote.high_qc); - } - QuorumVote::No(_) => { - warn!("The next leader has received an unexpected vote!"); - } - } - } - ProcessedGeneralConsensusMessage::InternalTrigger(trigger) => match trigger { - InternalTrigger::Timeout(_) => { - self.api.send_next_leader_timeout(self.cur_view).await; - break; - } - }, - ProcessedGeneralConsensusMessage::Proposal(_p, _sender) => { - warn!("The next leader has received an unexpected proposal!"); - } - ProcessedGeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), - ProcessedGeneralConsensusMessage::ViewSyncVote(_) => todo!(), - }, - Right(committee_message) => match committee_message { - ProcessedCommitteeConsensusMessage::DAProposal(_p, _sender) => { - warn!("The next leader has received an unexpected proposal!"); - } - ProcessedCommitteeConsensusMessage::DAVote(_, _sender) => { - warn!("The next leader has received an unexpected DA vote!"); - } - ProcessedCommitteeConsensusMessage::DACertificate(_, _) => { - continue; - } - }, - } - } - qcs.into_iter().max_by_key(|qc| qc.view_number).unwrap() - } -} diff --git a/consensus/src/sequencing_replica.rs b/consensus/src/sequencing_replica.rs deleted file mode 100644 index fece660dee..0000000000 --- a/consensus/src/sequencing_replica.rs +++ /dev/null @@ -1,642 +0,0 @@ -//! Contains the [`SequencingReplica`] struct used for the replica step in the consensus algorithm with DA -//! committee, i.e. in the sequencing consensus. - -use crate::{ - utils::{Terminator, View, ViewInner}, - Consensus, SequencingConsensusApi, -}; -use async_compatibility_layer::channel::UnboundedReceiver; -use async_lock::{Mutex, RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; -use bincode::Options; -use commit::Committable; -use either::{Left, Right}; -use hotshot_types::{ - certificate::{DACertificate, QuorumCertificate}, - data::{LeafType, QuorumProposal, SequencingLeaf}, - message::{ - ConsensusMessageType, InternalTrigger, Message, ProcessedCommitteeConsensusMessage, - ProcessedGeneralConsensusMessage, ProcessedSequencingMessage, SequencingMessage, - }, - traits::{ - election::{ConsensusExchange, QuorumExchangeType, SignedCertificate}, - node_implementation::{ - CommitteeEx, NodeImplementation, NodeType, QuorumProposalType, QuorumVoteType, - SequencingQuorumEx, - }, - signature_key::SignatureKey, - state::ConsensusTime, - Block, - }, -}; -use hotshot_utils::bincode::bincode_opts; -use std::{ - collections::HashSet, - marker::PhantomData, - ops::Bound::{Excluded, Included}, - sync::Arc, -}; -use tracing::{error, info, instrument, warn}; -/// This view's replica for sequencing consensus. -#[derive(Debug, Clone)] -pub struct SequencingReplica< - A: SequencingConsensusApi, I>, - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, -> { - /// ID of node. - pub id: u64, - /// Reference to consensus. The replica will require a write lock on this. - pub consensus: Arc>>>, - /// Channel for accepting leader proposals and timeouts messages. - #[allow(clippy::type_complexity)] - pub proposal_collection_chan: - Arc>>>, - /// View number this view is executing in. - pub cur_view: TYPES::Time, - /// The High QC. - pub high_qc: QuorumCertificate>, - /// HotShot consensus API. - pub api: A, - - /// the committee exchange - pub committee_exchange: Arc>, - - /// the quorum exchange - pub quorum_exchange: Arc>, - - /// needed to typecheck - pub _pd: PhantomData, -} - -impl< - A: SequencingConsensusApi, I>, - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - > SequencingReplica -where - SequencingQuorumEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, - >, - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = TYPES::BlockType, - >, -{ - /// The leaf from the genesis view. - /// - /// This will be used as the parent leaf for the proposal in the first view after genesis. - async fn genesis_leaf(&self) -> Option> { - let consensus = self.consensus.read().await; - let Some(genesis_view) = consensus.state_map.get(&TYPES::Time::genesis()) else { - warn!("Couldn't find genesis view in state map."); - return None; - }; - let Some(leaf) = genesis_view.get_leaf_commitment() else { - warn!( - ?genesis_view, - "Genesis view points to a view without a leaf" - ); - return None; - }; - let Some(leaf) = consensus.saved_leaves.get(&leaf) else { - warn!("Failed to find genesis leaf."); - return None; - }; - Some(leaf.clone()) - } - - /// Replica task for sequencing consensus that spins until a vote can be made or timeout is - /// hit. - /// - /// Returns the new leaf if it's valid. - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Sequencing Replica Task", level = "error")] - #[allow(clippy::type_complexity)] - async fn find_valid_msg<'a>( - &self, - view_leader_key: TYPES::SignatureKey, - consensus: RwLockUpgradableReadGuard<'a, Consensus>>, - ) -> ( - RwLockUpgradableReadGuard<'a, Consensus>>, - Option>, - ) { - let lock = self.proposal_collection_chan.lock().await; - let mut invalid_qc = false; - let leaf = loop { - let msg = lock.recv().await; - info!("recv-ed message {:?}", msg.clone()); - if let Ok(msg) = msg { - // stale/newer view messages should never reach this specific task's receive channel - if Into::>::into(msg.clone()).view_number() != self.cur_view - { - continue; - } - match msg { - Left(general_message) => { - match general_message { - ProcessedGeneralConsensusMessage::Proposal(p, sender) => { - if view_leader_key != sender { - continue; - } - - let mut valid_leaf = None; - let vote_token = - self.quorum_exchange.make_vote_token(self.cur_view); - match vote_token { - Err(e) => { - error!( - "Failed to generate vote token for {:?} {:?}", - self.cur_view, e - ); - } - Ok(None) => { - info!( - "We were not chosen for consensus committee on {:?}", - self.cur_view - ); - } - Ok(Some(vote_token)) => { - info!( - "We were chosen for consensus committee on {:?}", - self.cur_view - ); - - let message; - - // Construct the leaf. - let justify_qc = p.data.justify_qc; - let parent = if justify_qc.is_genesis() { - self.genesis_leaf().await - } else { - consensus - .saved_leaves - .get(&justify_qc.leaf_commitment()) - .cloned() - }; - let Some(parent) = parent else { - warn!("Proposal's parent missing from storage"); - continue; - }; - let parent_commitment = parent.commit(); - let block_commitment = p.data.block_commitment; - let leaf = SequencingLeaf { - view_number: self.cur_view, - height: p.data.height, - justify_qc: justify_qc.clone(), - parent_commitment, - deltas: Right(p.data.block_commitment), - rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc() - .unix_timestamp_nanos(), - proposer_id: sender.to_bytes(), - }; - let justify_qc_commitment = justify_qc.commit(); - let leaf_commitment = leaf.commit(); - let Some(dac) = p.data.dac else { - warn!("No DAC in proposal! Skipping proposal."); - continue; - }; - - // Validate the `justify_qc`. - if !self - .quorum_exchange - .is_valid_cert(&justify_qc, parent_commitment) - { - invalid_qc = true; - warn!("Invalid justify_qc in proposal!."); - message = self.quorum_exchange.create_no_message( - justify_qc_commitment, - leaf_commitment, - self.cur_view, - vote_token, - ); - } - // Validate the `height`. - else if leaf.height != parent.height + 1 { - invalid_qc = true; - warn!( - "Incorrect height in proposal (expected {}, got {})", - parent.height + 1, - leaf.height - ); - message = self.quorum_exchange.create_no_message( - justify_qc_commitment, - leaf_commitment, - self.cur_view, - vote_token, - ); - } - // Validate the DAC. - else if !self - .committee_exchange - .is_valid_cert(&dac, block_commitment) - { - warn!("Invalid DAC in proposal! Skipping proposal."); - message = self.quorum_exchange.create_no_message( - justify_qc_commitment, - leaf_commitment, - self.cur_view, - vote_token, - ); - } - // Validate the signature. - else if !view_leader_key - .validate(&p.signature, leaf_commitment.as_ref()) - { - warn!(?p.signature, "Could not verify proposal."); - message = self.quorum_exchange.create_no_message( - justify_qc_commitment, - leaf_commitment, - self.cur_view, - vote_token, - ); - } - // Create a positive vote if either liveness or safety check - // passes. - else { - // Liveness check. - let liveness_check = - justify_qc.view_number > consensus.locked_view; - - // Safety check. - // Check if proposal extends from the locked leaf. - let outcome = consensus.visit_leaf_ancestors( - justify_qc.view_number, - Terminator::Inclusive(consensus.locked_view), - false, - |leaf| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.view_number != consensus.locked_view - }, - ); - let safety_check = outcome.is_ok(); - if let Err(e) = outcome { - self.api - .send_view_error(self.cur_view, Arc::new(e)) - .await; - } - - // Skip if both saftey and liveness checks fail. - if !safety_check && !liveness_check { - warn!("Failed safety check and liveness check"); - message = self.quorum_exchange.create_no_message( - justify_qc_commitment, - leaf_commitment, - self.cur_view, - vote_token, - ); - } else { - // A valid leaf is found. - valid_leaf = Some(leaf); - - // Generate a message with yes vote. - message = self.quorum_exchange.create_yes_message( - justify_qc_commitment, - leaf_commitment, - self.cur_view, - vote_token, - ); - } - } - - info!("Sending vote to next leader {:?}", message); - let next_leader = - self.quorum_exchange.get_leader(self.cur_view + 1); - if self - .api - .send_direct_message::, QuorumVoteType>(next_leader, SequencingMessage(Left(message))) - .await - .is_err() - { - consensus.metrics.failed_to_send_messages.add(1); - warn!("Failed to send vote to next leader"); - } else { - consensus.metrics.outgoing_direct_messages.add(1); - } - } - } - break valid_leaf; - } - ProcessedGeneralConsensusMessage::InternalTrigger(trigger) => { - match trigger { - InternalTrigger::Timeout(_) => { - let next_leader = - self.quorum_exchange.get_leader(self.cur_view + 1); - - consensus.metrics.number_of_timeouts.add(1); - - let vote_token = - self.quorum_exchange.make_vote_token(self.cur_view); - - match vote_token { - Err(e) => { - error!( - "Failed to generate vote token for {:?} {:?}", - self.cur_view, e - ); - } - Ok(None) => { - info!( - "We were not chosen for committee on {:?}", - self.cur_view - ); - } - Ok(Some(vote_token)) => { - let timed_out_msg = - self.quorum_exchange.create_timeout_message( - self.high_qc.clone(), - self.cur_view, - vote_token, - ); - warn!( - "Timed out! Sending timeout to next leader {:?}", - timed_out_msg - ); - - // send timedout message to the next leader - if let Err(e) = self - .api - .send_direct_message::, QuorumVoteType< - TYPES, - I, - >>( - next_leader.clone(), - SequencingMessage(Left(timed_out_msg)), - ) - .await - { - consensus - .metrics - .failed_to_send_messages - .add(1); - warn!( - ?next_leader, - ?e, - "Could not send time out message to next_leader" - ); - } else { - consensus - .metrics - .outgoing_direct_messages - .add(1); - } - - // exits from entire function - self.api.send_replica_timeout(self.cur_view).await; - } - } - return (consensus, None); - } - } - } - ProcessedGeneralConsensusMessage::Vote(_, _) => { - // should only be for leader, never replica - warn!("Replica receieved a vote message. This is not what the replica expects. Skipping."); - continue; - } - ProcessedGeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), - ProcessedGeneralConsensusMessage::ViewSyncVote(_) => todo!(), - } - } - Right(committee_message) => { - match committee_message { - ProcessedCommitteeConsensusMessage::DAProposal(_p, _sender) => { - warn!("Replica receieved a DA Proposal. This is not what the replica expects. Skipping."); - // TODO (Keyao) why not continue here? - } - ProcessedCommitteeConsensusMessage::DAVote(_, _) => { - // should only be for leader, never replica - warn!("Replica receieved a vote message. This is not what the replica expects. Skipping."); - continue; - } - ProcessedCommitteeConsensusMessage::DACertificate(_, _) => { - continue; - } - } - } - } - } - // fall through logic if we did not receive successfully from channel - warn!("Replica did not receive successfully from channel. Terminating Replica."); - return (consensus, None); - }; - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - if invalid_qc { - consensus.invalid_qc += 1; - } - (RwLockWriteGuard::downgrade_to_upgradable(consensus), leaf) - } - - /// Run one view of the replica for sequencing consensus. - #[instrument(skip(self), fields(id = self.id, view = *self.cur_view), name = "Sequencing Replica Task", level = "error")] - pub async fn run_view(self) -> QuorumCertificate> { - info!("Sequencing replica task started!"); - let view_leader_key = self.quorum_exchange.get_leader(self.cur_view); - let consensus = self.consensus.upgradable_read().await; - - let (consensus, maybe_leaf) = self.find_valid_msg(view_leader_key, consensus).await; - - let Some(leaf) = maybe_leaf else { - // We either timed out or for some reason could not vote on a proposal. - return self.high_qc; - }; - - let mut new_anchor_view = consensus.last_decided_view; - let mut new_locked_view = consensus.locked_view; - let mut last_view_number_visited = self.cur_view; - let mut new_commit_reached: bool = false; - let mut new_decide_reached = false; - let mut new_decide_qc = None; - let mut leaf_views = Vec::new(); - let mut included_txns = HashSet::new(); - let old_anchor_view = consensus.last_decided_view; - let parent_view = leaf.justify_qc.view_number; - let mut current_chain_length = 0usize; - if parent_view + 1 == self.cur_view { - current_chain_length += 1; - if let Err(e) = consensus.visit_leaf_ancestors( - parent_view, - Terminator::Exclusive(old_anchor_view), - true, - |leaf| { - if !new_decide_reached { - if last_view_number_visited == leaf.view_number + 1 { - last_view_number_visited = leaf.view_number; - current_chain_length += 1; - if current_chain_length == 2 { - new_locked_view = leaf.view_number; - new_commit_reached = true; - // The next leaf in the chain, if there is one, is decided, so this - // leaf's justify_qc would become the QC for the decided chain. - new_decide_qc = Some(leaf.justify_qc.clone()); - } else if current_chain_length == 3 { - new_anchor_view = leaf.view_number; - new_decide_reached = true; - } - } else { - // nothing more to do here... we don't have a new chain extension - return false; - } - } - // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above - if new_decide_reached { - let mut leaf = leaf.clone(); - - // If the full block is available for this leaf, include it in the leaf - // chain that we send to the client. - if let Some(block) = - consensus.saved_blocks.get(leaf.get_deltas_commitment()) - { - if let Err(err) = leaf.fill_deltas(block.clone()) { - warn!("unable to fill leaf {} with block {}, block will not be available: {}", - leaf.commit(), block.commit(), err); - } - } - - leaf_views.push(leaf.clone()); - if let Left(block) = &leaf.deltas { - let txns = block.contained_transactions(); - for txn in txns { - included_txns.insert(txn); - } - } - } - true - }, - ) { - self.api.send_view_error(self.cur_view, Arc::new(e)).await; - } - } - - let included_txns_set: HashSet<_> = if new_decide_reached { - included_txns - } else { - HashSet::new() - }; - - // promote lock here - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus.state_map.insert( - self.cur_view, - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - }, - }, - ); - - consensus.metrics.number_of_views_since_last_commit.set( - consensus - .state_map - .range(( - Excluded(consensus.last_decided_view), - Included(self.cur_view), - )) - .count(), - ); - - consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); - if new_commit_reached { - consensus.locked_view = new_locked_view; - } - #[allow(clippy::cast_precision_loss)] - if new_decide_reached { - let num_views_since_last_anchor = - (*self.cur_view - *consensus.last_decided_view) as f64; - let views_seen = consensus - .state_map - .range(( - Excluded(consensus.last_decided_view), - Included(self.cur_view), - )) - .count(); - // A count of all veiws we saw that aren't in the current chain (so won't be commited) - consensus - .metrics - .discarded_views_per_decide_event - .add_point((views_seen - current_chain_length) as f64); - // An empty view is one we didn't see a leaf for but we moved past that view number - consensus - .metrics - .empty_views_per_decide_event - .add_point(num_views_since_last_anchor - views_seen as f64); - consensus - .metrics - .number_of_views_per_decide_event - .add_point(num_views_since_last_anchor); - consensus - .metrics - .invalid_qc_views - .add_point(consensus.invalid_qc as f64); - - let mut included_txn_size = 0; - consensus - .transactions - .modify(|txns| { - *txns = txns - .drain() - .filter(|(txn_hash, txn)| { - if included_txns_set.contains(txn_hash) { - included_txn_size += - bincode_opts().serialized_size(txn).unwrap_or_default(); - false - } else { - true - } - }) - .collect(); - }) - .await; - consensus - .metrics - .outstanding_transactions - .update(-(included_txns_set.len() as i64)); - consensus - .metrics - .outstanding_transactions_memory_size - .update(-(i64::try_from(included_txn_size).unwrap_or(i64::MAX))); - - consensus - .metrics - .rejected_transactions - .add(leaf.rejected.len()); - - let decide_sent = self.api.send_decide( - consensus.last_decided_view, - leaf_views, - new_decide_qc.unwrap(), - ); - let old_anchor_view = consensus.last_decided_view; - consensus - .collect_garbage(old_anchor_view, new_anchor_view) - .await; - consensus.last_decided_view = new_anchor_view; - consensus.invalid_qc = 0; - - // We're only storing the last QC. We could store more but we're realistically only going to retrieve the last one. - if let Err(e) = self.api.store_leaf(old_anchor_view, leaf).await { - error!("Could not insert new anchor into the storage API: {:?}", e); - } - - decide_sent.await; - } - self.high_qc - } -} diff --git a/consensus/src/traits.rs b/consensus/src/traits.rs deleted file mode 100644 index 2687107886..0000000000 --- a/consensus/src/traits.rs +++ /dev/null @@ -1,171 +0,0 @@ -//! Contains the [`SequencingConsensusApi`] and [`ValidatingConsensusApi`] traits. - -use crate::{ - certificate::QuorumCertificate, - data::{LeafType, ProposalType}, - error::HotShotError, - event::{Event, EventType}, - message::{DataMessage, SequencingMessage}, - traits::{ - network::NetworkError, - node_implementation::{NodeImplementation, NodeType}, - signature_key::SignatureKey, - storage::StorageError, - }, - vote::VoteType, -}; -use async_trait::async_trait; - -use std::{num::NonZeroUsize, sync::Arc, time::Duration}; - -/// The API that [`HotStuff`] needs to talk to the system, implemented for both validating and -/// sequencing consensus. -#[async_trait] -pub trait ConsensusSharedApi< - TYPES: NodeType, - LEAF: LeafType, - I: NodeImplementation, ->: Send + Sync -{ - /// Total number of nodes in the network. Also known as `n`. - fn total_nodes(&self) -> NonZeroUsize; - - /// The minimum amount of time a leader has to wait before sending a propose - fn propose_min_round_time(&self) -> Duration; - - /// The maximum amount of time a leader can wait before sending a propose. - /// If this time is reached, the leader has to send a propose without transactions. - fn propose_max_round_time(&self) -> Duration; - - /// Store a leaf in the storage - async fn store_leaf( - &self, - old_anchor_view: TYPES::Time, - leaf: LEAF, - ) -> Result<(), StorageError>; - - /// Retuns the maximum transactions allowed in a block - fn max_transactions(&self) -> NonZeroUsize; - - /// Returns the minimum transactions that must be in a block - fn min_transactions(&self) -> usize; - - /// Returns `true` if hotstuff should start the given round. A round can also be started manually by sending `NewView` to the leader. - /// - /// In production code this should probably always return `true`. - async fn should_start_round(&self, view_number: TYPES::Time) -> bool; - - /// Notify the system of an event within `hotshot-consensus`. - async fn send_event(&self, event: Event); - - /// Get a reference to the public key. - fn public_key(&self) -> &TYPES::SignatureKey; - - /// Get a reference to the private key. - fn private_key(&self) -> &::PrivateKey; - - // Utility functions - - /// notifies client of an error - async fn send_view_error(&self, view_number: TYPES::Time, error: Arc>) { - self.send_event(Event { - view_number, - event: EventType::Error { error }, - }) - .await; - } - - /// notifies client of a replica timeout - async fn send_replica_timeout(&self, view_number: TYPES::Time) { - self.send_event(Event { - view_number, - event: EventType::ReplicaViewTimeout { view_number }, - }) - .await; - } - - /// notifies client of a next leader timeout - async fn send_next_leader_timeout(&self, view_number: TYPES::Time) { - self.send_event(Event { - view_number, - event: EventType::NextLeaderViewTimeout { view_number }, - }) - .await; - } - - /// sends a decide event down the channel - async fn send_decide( - &self, - view_number: TYPES::Time, - leaf_views: Vec, - decide_qc: QuorumCertificate, - ) { - self.send_event(Event { - view_number, - event: EventType::Decide { - leaf_chain: Arc::new(leaf_views), - qc: Arc::new(decide_qc), - block_size: None, - }, - }) - .await; - } - - /// Sends a `ViewFinished` event - async fn send_view_finished(&self, view_number: TYPES::Time) { - self.send_event(Event { - view_number, - event: EventType::ViewFinished { view_number }, - }) - .await; - } -} - -/// The API that [`HotStuff`] needs to talk to the system, for sequencing consensus. -#[async_trait] -pub trait SequencingConsensusApi< - TYPES: NodeType, - LEAF: LeafType, - I: NodeImplementation>, ->: ConsensusSharedApi -{ - /// Send a direct message to the given recipient - async fn send_direct_message, VOTE: VoteType>( - &self, - recipient: TYPES::SignatureKey, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError>; - - /// send a direct message using the DA communication channel - async fn send_direct_da_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( - &self, - recipient: TYPES::SignatureKey, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError>; - - /// Send a broadcast message to the entire network. - async fn send_broadcast_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( - &self, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError>; - - /// Send a broadcast to the DA comitee, stub for now - async fn send_da_broadcast( - &self, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError>; - - /// Send a message with a transaction. - /// This function is deprecated in favor of `submit_transaction` in `handle.rs` - #[deprecated] - async fn send_transaction( - &self, - message: DataMessage, - ) -> std::result::Result<(), NetworkError>; -} diff --git a/consensus/src/utils.rs b/consensus/src/utils.rs deleted file mode 100644 index 38c4b5a852..0000000000 --- a/consensus/src/utils.rs +++ /dev/null @@ -1,82 +0,0 @@ -//! Utility functions, type aliases, helper structs and enum definitions. - -use crate::{ - data::{LeafBlock, LeafType}, - traits::node_implementation::NodeType, -}; -use commit::Commitment; -use std::ops::Deref; - -/// A view's state -#[derive(Debug)] -pub enum ViewInner> { - /// A pending view with an available block but not leaf proposal yet. - /// - /// Storing this state allows us to garbage collect blocks for views where a proposal is never - /// made. This saves memory when a leader fails and subverts a DoS attack where malicious - /// leaders repeatedly request availability for blocks that they never propose. - DA { - /// Available block. - block: Commitment>, - }, - /// Undecided view - Leaf { - /// Proposed leaf - leaf: Commitment, - }, - /// Leaf has failed - Failed, -} - -impl> ViewInner { - /// return the underlying leaf hash if it exists - #[must_use] - pub fn get_leaf_commitment(&self) -> Option> { - if let Self::Leaf { leaf } = self { - Some(*leaf) - } else { - None - } - } - - /// return the underlying block hash if it exists - #[must_use] - pub fn get_block_commitment(&self) -> Option>> { - if let Self::DA { block } = self { - Some(*block) - } else { - None - } - } -} - -impl> Deref for View { - type Target = ViewInner; - - fn deref(&self) -> &Self::Target { - &self.view_inner - } -} - -/// This exists so we can perform state transitions mutably -#[derive(Debug)] -pub struct View> { - /// The view data. Wrapped in a struct so we can mutate - pub view_inner: ViewInner, -} - -/// A struct containing information about a finished round. -#[derive(Debug, Clone)] -pub struct RoundFinishedEvent { - /// The round that finished - pub view_number: TYPES::Time, -} - -/// Whether or not to stop inclusively or exclusively when walking -#[derive(Copy, Clone, Debug)] -pub enum Terminator { - /// Stop right before this view number - Exclusive(T), - /// Stop including this view number - Inclusive(T), -} diff --git a/hotshot-qc/src/snarked/circuit.rs b/hotshot-qc/src/snarked/circuit.rs index a231b76d99..00a0478afa 100644 --- a/hotshot-qc/src/snarked/circuit.rs +++ b/hotshot-qc/src/snarked/circuit.rs @@ -351,7 +351,7 @@ mod tests { let mut rng = jf_utils::test_rng(); let vk_points: Vec> = (0..5).map(|_| Projective::

::rand(&mut rng)).collect(); - let selector = vec![false, true, false, true, false]; + let selector = [false, true, false, true, false]; let agg_vk_point = vk_points .iter() @@ -455,7 +455,7 @@ mod tests { let mut rng = jf_utils::test_rng(); let vk_points: Vec> = (0..5).map(|_| Projective::

::rand(&mut rng)).collect(); - let selector = vec![false, true, false, true, false]; + let selector = [false, true, false, true, false]; let agg_vk_point = vk_points .iter() diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index ac0793678f..4fe8e7b02d 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -135,19 +135,16 @@ impl MerkleProof { self.path .iter() .skip(1) - .fold(Ok(init), |result, node| match node { - MerklePathEntry::Branch { pos, siblings } => match result { - Ok(comm) => { - let mut input = [FieldType::from(0); TREE_BRANCH]; - input[..*pos].copy_from_slice(&siblings[..*pos]); - input[*pos] = comm; - input[pos + 1..].copy_from_slice(&siblings[*pos..]); - let comm = Digest::evaluate(input) - .map_err(|_| StakeTableError::RescueError)?[0]; - Ok(comm) - } - Err(_) => unreachable!(), - }, + .try_fold(init, |comm, node| match node { + MerklePathEntry::Branch { pos, siblings } => { + let mut input = [FieldType::from(0); TREE_BRANCH]; + input[..*pos].copy_from_slice(&siblings[..*pos]); + input[*pos] = comm; + input[pos + 1..].copy_from_slice(&siblings[*pos..]); + let comm = Digest::evaluate(input) + .map_err(|_| StakeTableError::RescueError)?[0]; + Ok(comm) + } _ => Err(StakeTableError::MalformedProof), }) } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 47e7fdc6a3..fd1ff23375 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -89,7 +89,6 @@ espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } futures = { workspace = true } hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } -hotshot-consensus = { path = "../consensus", version = "0.1.0", default-features = false } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-signature-key = { path = "../hotshot-signature-key" } hotshot-types = { path = "../types", version = "0.1.0", default-features = false } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 0f75e7a673..fa435e1f1d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -448,6 +448,8 @@ impl> SystemContext { /// # Errors /// /// Will return any errors that the underlying `broadcast_message` can return. + // this clippy lint is silly. This is async by requirement of the trait. + #[allow(clippy::unused_async)] pub async fn send_broadcast_message( &self, kind: impl Into>, @@ -785,556 +787,9 @@ where }); handle - - // let shut_down = Arc::new(AtomicBool::new(false)); - // let started = Arc::new(AtomicBool::new(false)); - // - // let exchange = self.inner.exchanges.quorum_exchange(); - // let committee_exchange = self.inner.exchanges.committee_exchange(); - // - // let network_broadcast_task_handle = async_spawn( - // tasks::network_task( - // self.clone(), - // shut_down.clone(), - // TransmitType::Broadcast, - // exchange.clone().into(), - // ) - // .instrument(info_span!("HotShot Broadcast Task",)), - // ); - // let network_direct_task_handle = async_spawn( - // tasks::network_task( - // self.clone(), - // shut_down.clone(), - // TransmitType::Direct, - // exchange.clone().into(), - // ) - // .instrument(info_span!("HotShot Direct Task",)), - // ); - // - // let committee_network_broadcast_task_handle = async_spawn( - // tasks::network_task( - // self.clone(), - // shut_down.clone(), - // TransmitType::Broadcast, - // committee_exchange.clone().into(), - // ) - // .instrument(info_span!("HotShot DA Broadcast Task",)), - // ); - // let committee_network_direct_task_handle = async_spawn( - // tasks::network_task( - // self.clone(), - // shut_down.clone(), - // TransmitType::Direct, - // committee_exchange.clone().into(), - // ) - // .instrument(info_span!("HotShot DA Direct Task",)), - // ); - // - // async_spawn( - // tasks::network_lookup_task(self.clone(), shut_down.clone()) - // .instrument(info_span!("HotShot Network Lookup Task",)), - // ); - // - // let (handle_channels, task_channels) = match self.inner.config.execution_type { - // ExecutionType::Continuous => (None, None), - // ExecutionType::Incremental => { - // let (send_consensus_start, recv_consensus_start) = unbounded(); - // (Some(send_consensus_start), Some(recv_consensus_start)) - // } - // }; - // - // let consensus_task_handle = async_spawn( - // tasks::view_runner( - // self.clone(), - // ) - // .instrument(info_span!("Consensus Task Handle",)), - // ); - // - // let (broadcast_sender, broadcast_receiver) = channel(); - // - // let handle = SystemContextHandle { - // sender_handle: Arc::new(broadcast_sender.clone()), - // hotshot: self.clone(), - // stream_output: broadcast_receiver, - // storage: self.inner.storage.clone(), - // shut_down, - // }; - // *self.inner.event_sender.write().await = Some(broadcast_sender); - // - // let mut background_task_handle = self.inner.background_task_handle.inner.write().await; - // *background_task_handle = Some(TaskHandleInner { - // network_broadcast_task_handle, - // network_direct_task_handle, - // committee_network_broadcast_task_handle: Some(committee_network_broadcast_task_handle), - // committee_network_direct_task_handle: Some(committee_network_direct_task_handle), - // consensus_task_handle: nll_todo(), - // shutdown_timeout: Duration::from_millis(self.inner.config.next_view_timeout), - // run_view_channels: handle_channels, - // started, - // }); - // - // handle - } - - // #[instrument( - // skip(self), - // name = "Handle broadcast consensus message", - // level = "error" - // )] - // async fn handle_broadcast_consensus_message( - // &self, - // msg: SequencingMessage, - // sender: TYPES::SignatureKey, - // ) { - // let msg_time = msg.view_number(); - // - // match msg.0 { - // Left(general_message) => { - // match general_message { - // // this is ONLY intended for replica - // GeneralConsensusMessage::Proposal(_) => { - // let channel_map = self - // .inner - // .channel_maps - // .0 - // .vote_channel - // .upgradable_read() - // .await; - // - // // skip if the proposal is stale - // if msg_time < channel_map.cur_view { - // warn!( - // "Throwing away {} for view number: {:?}", - // std::any::type_name::>>(), - // msg_time - // ); - // return; - // } - // - // let chan: ViewQueue = - // Self::create_or_obtain_chan_from_read(msg_time, channel_map).await; - // - // if !chan.has_received_proposal.swap(true, Ordering::Relaxed) - // && chan - // .sender_chan - // .send(Left(ProcessedGeneralConsensusMessage::new( - // general_message, - // sender, - // ))) - // .await - // .is_err() - // { - // warn!("Failed to send to next leader!"); - // } - // } - // GeneralConsensusMessage::InternalTrigger(_) => { - // warn!("Received an internal trigger. This shouldn't be possible."); - // } - // GeneralConsensusMessage::Vote(_) => { - // warn!( - // "Received a broadcast for a vote message. This shouldn't be possible." - // ); - // } - // GeneralConsensusMessage::ViewSync(_) => todo!(), - // } - // } - // Right(committee_message) => { - // match committee_message { - // CommitteeConsensusMessage::DAVote(_) => { - // warn!( - // "Received a broadcast for a vote message. This shouldn't be possible." - // ); - // } - // CommitteeConsensusMessage::DAProposal(_) => { - // let channel_map = match &self.inner.channel_maps.1 { - // Some(committee_channels) => { - // committee_channels.vote_channel.upgradable_read().await - // } - // None => { - // warn!("Committee channels not found."); - // return; - // } - // }; - // - // // skip if the proposal is stale - // if msg_time < channel_map.cur_view { - // warn!( - // "Throwing away {} for view number: {:?}", - // std::any::type_name::>>(), - // msg_time - // ); - // return; - // } - // - // let chan: ViewQueue = - // Self::create_or_obtain_chan_from_read(msg_time, channel_map).await; - // - // if !chan.has_received_proposal.swap(true, Ordering::Relaxed) - // && chan - // .sender_chan - // .send(Right(ProcessedCommitteeConsensusMessage::new( - // committee_message, - // sender, - // ))) - // .await - // .is_err() - // { - // warn!("Failed to send to next leader!"); - // } - // } - // } - // } - // }; - // } - - // #[instrument(skip(self), name = "Handle direct consensus message", level = "error")] - // async fn handle_direct_consensus_message( - // &self, - // msg: SequencingMessage, - // sender: TYPES::SignatureKey, - // ) { - // let msg_time = msg.view_number(); - // - // // We can only recv from a replicas - // // replicas should only send votes or if they timed out, timeouts - // match msg.0 { - // Left(general_message) => match general_message { - // GeneralConsensusMessage::Proposal(_) - // | GeneralConsensusMessage::InternalTrigger(_) => { - // warn!("Received a direct message for a proposal. This shouldn't be possible."); - // } - // // this is ONLY intended for next leader - // c @ GeneralConsensusMessage::Vote(_) => { - // let channel_map = self - // .inner - // .channel_maps - // .0 - // .proposal_channel - // .upgradable_read() - // .await; - // - // // check if - // // - is in fact, actually is the next leader - // // - the message is not stale - // let is_leader = self - // .inner - // .clone() - // .exchanges - // .quorum_exchange() - // .is_leader(msg_time + 1); - // if !is_leader || msg_time < channel_map.cur_view { - // warn!( - // "Throwing away {} message for view number: {:?}", - // std::any::type_name::>(), - // msg_time - // ); - // return; - // } - // - // let chan = Self::create_or_obtain_chan_from_read(msg_time, channel_map).await; - // - // if chan - // .sender_chan - // .send(Left(ProcessedGeneralConsensusMessage::new(c, sender))) - // .await - // .is_err() - // { - // error!("Failed to send to next leader!"); - // } - // } - // GeneralConsensusMessage::ViewSync(_) => todo!(), - // }, - // Right(committee_message) => { - // match committee_message { - // c @ CommitteeConsensusMessage::DAVote(_) => { - // let channel_map = match &self.inner.channel_maps.1 { - // Some(committee_channels) => { - // committee_channels.proposal_channel.upgradable_read().await - // } - // None => { - // warn!("Committee channels not found."); - // return; - // } - // }; - // - // // check if - // // - is in fact, actually is the next leader - // // - the message is not stale - // let is_leader = self - // .inner - // .clone() - // .exchanges - // .committee_exchange() - // .is_leader(msg_time); - // if !is_leader || msg_time < channel_map.cur_view { - // warn!( - // "Throwing away {} message for view number: {:?}, Channel cur view: {:?}", - // std::any::type_name::>(), - // msg_time, - // channel_map.cur_view, - // ); - // return; - // } - // - // let chan = - // Self::create_or_obtain_chan_from_read(msg_time, channel_map).await; - // - // if chan - // .sender_chan - // .send(Right(ProcessedCommitteeConsensusMessage::new(c, sender))) - // .await - // .is_err() - // { - // error!("Failed to send to next leader!"); - // } - // } - // CommitteeConsensusMessage::DAProposal(_) => todo!(), - // } - // } - // } - // } -} - -<<<<<<< HEAD:src/lib.rs -======= -/// A view runner implemented by [HotShot] for different types of consensus. -#[async_trait] -pub trait ViewRunner> { - /// Executes one view of consensus - async fn run_view(hotshot: SystemContext) -> Result<(), ()>; -} - -#[async_trait] -impl< - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - > ViewRunner for SystemContext -where - SequencingQuorumEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, - >, - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = TYPES::BlockType, - >, -{ - // #[instrument] - #[allow(clippy::too_many_lines)] - async fn run_view(hotshot: SystemContext) -> Result<(), ()> { - let c_api = HotShotSequencingConsensusApi { - inner: hotshot.inner.clone(), - }; - - // Setup channel for recieving DA votes - let mut send_to_leader = match &hotshot.inner.channel_maps.1 { - Some(committee_channels) => committee_channels.proposal_channel.write().await, - None => { - warn!("Committee channels not found."); - return Err(()); - } - }; - let leader_last_view: TYPES::Time = send_to_leader.cur_view; - send_to_leader.channel_map.remove(&leader_last_view); - send_to_leader.cur_view += 1; - let (send_da_vote_chan, recv_da_vote, cur_view) = { - let mut consensus = hotshot.inner.consensus.write().await; - let cur_view = consensus.increment_view(); - let vq = SystemContext::::create_or_obtain_chan_from_write( - cur_view, - send_to_leader, - ) - .await; - (vq.sender_chan, vq.receiver_chan, cur_view) - }; - - // Set up vote collection channel for commitment proposals/votes - let mut send_to_next_leader = hotshot.inner.channel_maps.0.proposal_channel.write().await; - let leader_last_view: TYPES::Time = send_to_next_leader.cur_view; - send_to_next_leader.channel_map.remove(&leader_last_view); - send_to_next_leader.cur_view += 1; - let (send_commitment_vote_chan, recv_commitment_vote_chan) = { - let vq = SystemContext::::create_or_obtain_chan_from_write( - cur_view, - send_to_next_leader, - ) - .await; - (vq.sender_chan, vq.receiver_chan) - }; - - let (high_qc, txns) = { - // OBTAIN read lock on consensus - let consensus = hotshot.inner.consensus.read().await; - let high_qc = consensus.high_qc.clone(); - let txns = consensus.transactions.clone(); - (high_qc, txns) - }; - let mut send_to_member = match &hotshot.inner.channel_maps.1 { - Some(committee_channels) => committee_channels.vote_channel.write().await, - None => { - warn!("Committee channels not found."); - return Err(()); - } - }; - let member_last_view: TYPES::Time = send_to_member.cur_view; - send_to_member.channel_map.remove(&member_last_view); - send_to_member.cur_view += 1; - let ViewQueue { - sender_chan: send_member, - receiver_chan: recv_member, - has_received_proposal: _, - } = SystemContext::::create_or_obtain_chan_from_write( - send_to_member.cur_view, - send_to_member, - ) - .await; - let mut send_to_replica = hotshot.inner.channel_maps.0.vote_channel.write().await; - let replica_last_view: TYPES::Time = send_to_replica.cur_view; - send_to_replica.channel_map.remove(&replica_last_view); - send_to_replica.cur_view += 1; - let ViewQueue { - sender_chan: send_replica, - receiver_chan: recv_replica, - has_received_proposal: _, - } = SystemContext::::create_or_obtain_chan_from_write( - send_to_replica.cur_view, - send_to_replica, - ) - .await; - - let mut task_handles = Vec::new(); - let committee_exchange = c_api.inner.exchanges.committee_exchange().clone(); - let quorum_exchange = c_api.inner.exchanges.quorum_exchange().clone(); - - if quorum_exchange.clone().is_leader(cur_view) { - let da_leader = DALeader { - id: hotshot.inner.id, - consensus: hotshot.inner.consensus.clone(), - high_qc: high_qc.clone(), - cur_view, - transactions: txns, - api: c_api.clone(), - committee_exchange: committee_exchange.clone().into(), - quorum_exchange: quorum_exchange.clone().into(), - vote_collection_chan: recv_da_vote, - _pd: PhantomData, - }; - let consensus = hotshot.inner.consensus.clone(); - let qc = high_qc.clone(); - let api = c_api.clone(); - let leader_handle = { - let id = hotshot.inner.id; - async_spawn(async move { - let Some((da_cert, block, parent)) = da_leader.run_view().await else { - return qc; - }; - let consensus_leader = ConsensusLeader { - id, - consensus, - high_qc: qc, - cert: da_cert, - block, - parent, - cur_view, - api: api.clone(), - quorum_exchange: quorum_exchange.clone().into(), - _pd: PhantomData, - }; - consensus_leader.run_view().await - }) - }; - task_handles.push(leader_handle); - } - - let quorum_exchange = c_api.inner.exchanges.quorum_exchange(); - if quorum_exchange.clone().is_leader(cur_view + 1) { - let next_leader = ConsensusNextLeader { - id: hotshot.inner.id, - consensus: hotshot.inner.consensus.clone(), - cur_view, - api: c_api.clone(), - generic_qc: high_qc.clone(), - vote_collection_chan: recv_commitment_vote_chan, - quorum_exchange: quorum_exchange.clone().into(), - _pd: PhantomData, - }; - let next_leader_handle = async_spawn(async move { next_leader.run_view().await }); - task_handles.push(next_leader_handle); - } - let da_member = DAMember { - id: hotshot.inner.id, - consensus: hotshot.inner.consensus.clone(), - proposal_collection_chan: recv_member, - cur_view, - high_qc: high_qc.clone(), - api: c_api.clone(), - exchange: committee_exchange.clone().into(), - _pd: PhantomData, - }; - let member_handle = async_spawn(async move { da_member.run_view().await }); - task_handles.push(member_handle); - let replica = SequencingReplica { - id: hotshot.inner.id, - consensus: hotshot.inner.consensus.clone(), - proposal_collection_chan: recv_replica, - cur_view, - high_qc: high_qc.clone(), - api: c_api.clone(), - committee_exchange: committee_exchange.clone().into(), - quorum_exchange: quorum_exchange.clone().into(), - _pd: PhantomData, - }; - let replica_handle = async_spawn(async move { replica.run_view().await }); - task_handles.push(replica_handle); - - let children_finished = futures::future::join_all(task_handles); - - async_spawn({ - let next_view_timeout = hotshot.inner.config.next_view_timeout; - let hotshot: SystemContext = hotshot.clone(); - async move { - async_sleep(Duration::from_millis(next_view_timeout)).await; - hotshot - .timeout_view(cur_view, send_member, Some(send_commitment_vote_chan)) - .await; - hotshot - .timeout_view(cur_view, send_replica, Some(send_da_vote_chan)) - .await; - } - }); - - let results = children_finished.await; - - // unwrap is fine since results must have >= 1 item(s) - #[cfg(async_executor_impl = "async-std")] - let high_qc = results - .into_iter() - .max_by_key(|qc: &QuorumCertificate>| qc.view_number) - .unwrap(); - #[cfg(async_executor_impl = "tokio")] - let high_qc = results - .into_iter() - .filter_map(std::result::Result::ok) - .max_by_key(|qc| qc.view_number) - .unwrap(); - - let mut consensus = hotshot.inner.consensus.write().await; - consensus.high_qc = high_qc; - c_api.send_view_finished(consensus.cur_view).await; - Ok(()) } } ->>>>>>> c407e538b (feat: crates directory used by all crates):crates/hotshot/src/lib.rs /// A handle that exposes the interface that hotstuff needs to interact with [`HotShot`] #[derive(Clone)] struct HotShotValidatingConsensusApi> { diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index 5c8935e9b1..e48e71d29b 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -18,7 +18,7 @@ pub use hotshot_types::traits::network::{ use hotshot_types::traits::metrics::{Counter, Gauge, Metrics}; /// Contains the metrics that we're interested in from the networking interfaces -pub(self) struct NetworkingMetrics { +pub struct NetworkingMetrics { #[allow(dead_code)] /// A [`Gauge`] which tracks how many peers are connected pub connected_peers: Box, diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 8cdd70351f..d6269e2c38 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -349,7 +349,7 @@ impl ConnectedNetwork for Memory .serialize(&message) .context(FailedToSerializeSnafu)?; trace!("Message bincoded, sending"); - for node in self.inner.master_map.map.iter() { + for node in &self.inner.master_map.map { let (key, node) = node.pair(); if !recipients.contains(key) { continue; diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 164a2e5a78..3a7236c353 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -136,6 +136,7 @@ impl NetworkNodeHandle { /// # Panics /// /// Will panic if a handler is already spawned + #[allow(clippy::unused_async)] pub async fn spawn_handler(self: &Arc, cb: F) -> impl Future where F: Fn(NetworkEvent, Arc>) -> RET + Sync + Send + 'static, diff --git a/task/src/global_registry.rs b/task/src/global_registry.rs index de16b86a60..1977c21c76 100644 --- a/task/src/global_registry.rs +++ b/task/src/global_registry.rs @@ -196,7 +196,7 @@ impl GlobalRegistry { /// checks if all registered tasks have completed pub async fn is_shutdown(&mut self) -> bool { let task_list = self.state_list.read().await; - for (_uid, task) in task_list.iter() { + for task in (*task_list).values() { if task.0.get_status() != TaskStatus::Completed { return false; } diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index e188de25af..1d2985c56d 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -192,6 +192,7 @@ impl Message, > for SequencingLibp2pExchange { + #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, num_bootstrap: usize, @@ -272,6 +273,7 @@ impl Message, > for SequencingMemoryExchange { + #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, num_bootstrap: usize, @@ -389,6 +391,7 @@ impl Message, > for SequencingWebExchanges { + #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, num_bootstrap: usize, @@ -503,6 +506,7 @@ impl Message, > for SequencingFallbackExchange { + #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, num_bootstrap: usize, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 425f0b1b25..a29c840056 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -227,6 +227,8 @@ impl> Consensus { /// garbage collects based on state change /// right now, this removes from both the `saved_blocks` /// and `state_map` fields of `Consensus` + /// # Panics + /// On inconsistent stored entries #[allow(clippy::unused_async)] // async for API compatibility reasons pub async fn collect_garbage( &mut self, diff --git a/types/src/data.rs b/types/src/data.rs index 97a023ef3e..68fc0ec064 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -815,6 +815,8 @@ pub fn random_commitment(rng: &mut dyn rand::RngCore) -> Commitm } /// Serialization for the QC assembled signature +/// # Panics +/// if serialization fails pub fn serialize_signature(signature: &AssembledSignature) -> Vec { let mut signatures_bytes = vec![]; let signatures: Option<::QCType> = match &signature { diff --git a/types/src/traits/stake_table.rs b/types/src/traits/stake_table.rs index 64e748016a..49bcfd150a 100644 --- a/types/src/traits/stake_table.rs +++ b/types/src/traits/stake_table.rs @@ -52,7 +52,7 @@ pub trait StakeTableScheme { { let _ = new_keys .into_iter() - .zip(amounts.into_iter()) + .zip(amounts) .try_for_each(|(key, amount)| Self::register(self, key, amount)); Ok(()) } From 6b317f2e914d12bca8a29a9045c8f59d6ffefd9f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 01:41:33 +0000 Subject: [PATCH 0030/1393] Bump nix from 0.26.2 to 0.27.1 Bumps [nix](https://github.com/nix-rust/nix) from 0.26.2 to 0.27.1. - [Changelog](https://github.com/nix-rust/nix/blob/master/CHANGELOG.md) - [Commits](https://github.com/nix-rust/nix/compare/v0.26.2...v0.27.1) --- updated-dependencies: - dependency-name: nix dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- libp2p-networking/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index e8d9a8df46..31988a9bac 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -61,7 +61,7 @@ async-std = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] ## lossy_network dependencies -nix = { version = "0.26.2", optional = true } +nix = { version = "0.27.1", optional = true } rtnetlink = { git = "https://github.com/espressosystems/netlink.git", version = "0.9.1", features = [ "smol_socket", ], default-features = false, optional = true } From 7a73a22884bccfbb728eaeea0b3a9b58027f7129 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 5 Sep 2023 16:23:32 -0700 Subject: [PATCH 0031/1393] Fix lint --- testing/tests/view_sync_task.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 21e9c681bb..25cbe55b9a 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,24 +1,24 @@ use commit::Committable; -use hotshot::types::SignatureKey; -use hotshot::HotShotSequencingConsensusApi; -use hotshot_consensus::traits::ConsensusSharedApi; +use hotshot::{types::SignatureKey, HotShotSequencingConsensusApi}; use hotshot_task_impls::events::SequencingHotShotEvent; use hotshot_testing::node_types::{SequencingMemoryImpl, SequencingTestTypes}; -use hotshot_types::traits::election::ViewSyncExchangeType; use hotshot_types::{ data::ViewNumber, traits::{ - election::ConsensusExchange, node_implementation::ExchangesType, state::ConsensusTime, + consensus_api::ConsensusSharedApi, + election::{ConsensusExchange, ViewSyncExchangeType}, + node_implementation::ExchangesType, + state::ConsensusTime, }, }; use std::collections::HashMap; #[cfg(test)] #[cfg_attr( - feature = "tokio-executor", + async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) )] -#[cfg_attr(feature = "async-std-executor", async_std::test)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_task() { use core::panic; From 4c5cc62d155e718b23e9e0579cfb0ac505d7e364 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 7 Sep 2023 09:30:58 -0400 Subject: [PATCH 0032/1393] fix duplicate votes, split catchup tests --- task-impls/src/consensus.rs | 2 +- testing/src/test_runner.rs | 2 +- testing/tests/catchup.rs | 135 ++++++++++++++++++++++++++++++++++-- types/src/vote.rs | 13 ++-- 4 files changed, 139 insertions(+), 13 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index dd65059a84..03fcc387a8 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1195,7 +1195,7 @@ where data: proposal, signature, }; - error!("Sending proposal for view {:?} \n {:?}", self.cur_view, ""); + debug!("Sending proposal for view {:?} \n {:?}", self.cur_view, ""); self.event_stream .publish(SequencingHotShotEvent::QuorumProposalSend( diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index d39eccd4ab..57c82d24e9 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -167,7 +167,7 @@ where // Start hotshot for node in nodes { - if !late_start_nodes.contains(&node.node_id.try_into().unwrap()) { + if !late_start_nodes.contains(&node.node_id) { node.handle.hotshot.start_consensus().await; } } diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 15c55abdca..56a90af16f 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -23,23 +23,146 @@ async fn test_catchup() { }; let mut metadata = TestMetadata::default(); let catchup_nodes = vec![ - ChangeNode { + ChangeNode { + idx: 18, + updown: UpDown::Up, + }, + ChangeNode { + idx: 19, + updown: UpDown::Up, + }, + ]; + + metadata.timing_data = timing_data; + metadata.start_nodes = 18; + metadata.total_nodes = 20; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::new(1, 0), catchup_nodes)], + }; + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(10000), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }; + + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + +/// Test that one node catches up and has sucessful views after coming back +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_catchup_one_node() { + use std::time::Duration; + + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestMetadata, TimingData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let timing_data = TimingData { + next_view_timeout: 1000, + ..Default::default() + }; + let mut metadata = TestMetadata::default(); + let catchup_nodes = vec![ChangeNode { idx: 18, updown: UpDown::Up, - }, - ChangeNode { - idx: 19, - updown: UpDown::Up, }]; metadata.timing_data = timing_data; - metadata.start_nodes = 18; + metadata.start_nodes = 19; metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(Duration::new(1, 0), catchup_nodes)], }; + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(10000), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }; + // only alow for the view which the catchup node hasn't started to fail + metadata.overall_safety_properties.num_failed_views = 1; + + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + +/// Same as `test_catchup` except we start the nodes after their leadership so they join during view sync +/// This fails for the same reason as the timeout test and should work once that is fixed. +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] +async fn test_catchup_in_view_sync() { + use std::time::Duration; + + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestMetadata, TimingData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let timing_data = TimingData { + next_view_timeout: 1000, + ..Default::default() + }; + let mut metadata = TestMetadata::default(); + let catchup_nodes = vec![ + ChangeNode { + idx: 18, + updown: UpDown::Up, + }, + ChangeNode { + idx: 19, + updown: UpDown::Up, + }, + ]; + + metadata.timing_data = timing_data; + metadata.start_nodes = 18; + metadata.total_nodes = 20; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::new(5, 0), catchup_nodes)], + }; + metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { diff --git a/types/src/vote.rs b/types/src/vote.rs index 7931001d60..cdbb8cb03f 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -27,6 +27,7 @@ use std::{ fmt::Debug, num::NonZeroU64, }; +use tracing::error; /// The vote sent by consensus messages. pub trait VoteType: @@ -340,6 +341,10 @@ where .entry(commitment) .or_insert_with(|| (0, BTreeMap::new())); + // Check for duplicate vote + if total_vote_map.contains_key(&key) { + return Either::Left(self); + } let (da_stake_casted, da_vote_map) = self .da_vote_outcomes .entry(commitment) @@ -374,13 +379,11 @@ where // stake of all votes, in case they correspond to inconsistent // commitments. - // Check for duplicate vote - if total_vote_map.contains_key(&key) { - // error!("Duplicate vote"); + // update the active_keys and sig_lists + if self.signers.get(node_id).as_deref() == Some(&true) { + error!("node id already in signers"); return Either::Left(self); } - - // update the active_keys and sig_lists self.signers.set(node_id, true); self.sig_lists.push(origianl_sig); From 472b210936d2af43a64c2e1e554de682457ff4b0 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 7 Sep 2023 10:33:55 -0400 Subject: [PATCH 0033/1393] increase timeout of catchup tests for ci --- testing/tests/catchup.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 56a90af16f..2cbc6c8a16 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -44,7 +44,7 @@ async fn test_catchup() { metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(10000), + duration: Duration::from_millis(100000), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { @@ -100,7 +100,7 @@ async fn test_catchup_one_node() { metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(10000), + duration: Duration::from_millis(100000), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { From ad4a3deebc44d336db114760fecccbc985008ecf Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 7 Sep 2023 18:22:31 -0400 Subject: [PATCH 0034/1393] add examples to justfile (#1683) --- hotshot/examples/web-server-da/README.md | 30 ++++++++++++------------ 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/hotshot/examples/web-server-da/README.md b/hotshot/examples/web-server-da/README.md index df22dc3357..a65ec0bb92 100644 --- a/hotshot/examples/web-server-da/README.md +++ b/hotshot/examples/web-server-da/README.md @@ -1,29 +1,29 @@ Commands to run da examples: 1a)Start web servers by either running 3 servers: -cargo run --example web-server --profile=release-lto --features="full-ci" -cargo run --example web-server --profile=release-lto --features="full-ci" -cargo run --example web-server --profile=release-lto --features="full-ci" +just async_std example web-server -- +just async_std example web-server -- +just async_std example web-server -- 1b)Or use multi-web-server to spin up all three: -cargo run --example multi-web-server --profile=release-lto --features="full-ci" +just async_std example multi-web-server -- 2) Start orchestrator: -cargo run --example web-server-da-orchestrator --features="full-ci,channel-async-std" --profile=release-lto -- +just async_std example web-server-da-orchestrator -- 3a) Start validator: -cargo run --profile=release-lto --example web-server-da-validator --features="full-ci" +just async_std example web-server-da-validator -- 3b) Or start multiple validators: -cargo run --profile=release-lto --example multi-validator --features="full-ci" +just async_std example multi-validator -- I.e. -cargo run --example web-server --profile=release-lto --features="full-ci" 9000 -cargo run --example web-server --profile=release-lto --features="full-ci" 9001 -cargo run --example web-server --profile=release-lto --features="full-ci" 9002 -cargo run --example web-server-da-orchestrator --features="full-ci,channel-async-std" --profile=release-lto -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml -cargo run --profile=release-lto --example web-server-da-validator --features="full-ci" 2 0.0.0.0 4444 +just async_std example web-server -- 9000 +just async_std example web-server -- 9001 +just async_std example web-server -- 9002 +just async_std example web-server-da-orchestrator -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml +just async_std example web-server-da-validator -- 2 0.0.0.0 4444 OR: -cargo run --example multi-web-server --profile=release-lto --features="full-ci" 9000 9001 9002 -cargo run --example web-server-da-orchestrator --features="full-ci,channel-async-std" --profile=release-lto -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml -cargo run --profile=release-lto --example multi-validator --features="full-ci" 10 0.0.0.0 4444 \ No newline at end of file +just async_std example multi-web-server -- 9000 9001 9002 +just async_std example web-server-da-orchestrator -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml +just async_std example multi-validator -- 10 0.0.0.0 4444 \ No newline at end of file From 245fb524af3d1049d3af1e6a0e90174c3d62cc33 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 7 Sep 2023 15:38:22 -0700 Subject: [PATCH 0035/1393] Rename Block to BlockPayload --- hotshot/src/demos/sdemo.rs | 16 ++++--- hotshot/src/tasks/mod.rs | 2 +- hotshot/src/traits.rs | 2 +- hotshot/src/traits/storage/atomic_storage.rs | 16 +++---- .../atomic_storage/dual_key_value_store.rs | 6 +-- hotshot/src/traits/storage/memory_storage.rs | 4 +- task-impls/src/consensus.rs | 4 +- task-impls/src/da.rs | 2 +- testing/src/task_helpers.rs | 2 +- testing/tests/atomic_storage.rs | 2 +- types/src/data.rs | 42 +++++++++---------- types/src/traits.rs | 2 +- types/src/traits/block_contents.rs | 27 ++++++------ types/src/traits/election.rs | 8 ++-- types/src/traits/node_implementation.rs | 16 +++---- types/src/traits/state.rs | 10 ++--- types/src/traits/storage.rs | 6 +-- 17 files changed, 87 insertions(+), 80 deletions(-) diff --git a/hotshot/src/demos/sdemo.rs b/hotshot/src/demos/sdemo.rs index 97117347a9..f8264ab612 100644 --- a/hotshot/src/demos/sdemo.rs +++ b/hotshot/src/demos/sdemo.rs @@ -26,7 +26,7 @@ use hotshot_types::{ election::Membership, node_implementation::NodeType, state::{ConsensusTime, TestableBlock, TestableState}, - Block, State, + BlockPayload, State, }, }; use rand::Rng; @@ -82,7 +82,7 @@ pub struct SDemoGenesisBlock {} /// Any block after genesis #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct SDemoNormalBlock { - /// Block state commitment + /// BlockPayload state commitment pub previous_state: (), /// Transaction vector pub transactions: Vec, @@ -174,10 +174,14 @@ impl Display for SDemoBlock { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { SDemoBlock::Genesis(_) => { - write!(f, "SDemo Genesis Block") + write!(f, "SDemo Genesis BlockPayload") } SDemoBlock::Normal(block) => { - write!(f, "SDemo Normal Block #txns={}", block.transactions.len()) + write!( + f, + "SDemo Normal BlockPayload #txns={}", + block.transactions.len() + ) } } } @@ -196,7 +200,7 @@ impl TestableBlock for SDemoBlock { } } -impl Block for SDemoBlock { +impl BlockPayload for SDemoBlock { type Error = SDemoError; type Transaction = SDemoTransaction; @@ -278,7 +282,7 @@ impl TestableState for SDemoState { _state: Option<&Self>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> ::Transaction { SDemoTransaction { id: rng.gen_range(0..10), padding: vec![0; padding as usize], diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 55e430cbe4..9d3b95bbdb 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -37,7 +37,7 @@ use hotshot_types::{ CommitteeEx, ExchangesType, NodeImplementation, NodeType, ViewSyncEx, }, state::ConsensusTime, - Block, + BlockPayload, }, vote::{ViewSyncData, VoteType}, }; diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 0f751912c5..a0e56cf86d 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -4,7 +4,7 @@ mod networking; mod node_implementation; mod storage; -pub use hotshot_types::traits::{Block, State}; +pub use hotshot_types::traits::{BlockPayload, State}; pub use networking::{NetworkError, NetworkReliability}; pub use node_implementation::{NodeImplementation, TestableNodeImplementation}; pub use storage::{Result as StorageResult, Storage}; diff --git a/hotshot/src/traits/storage/atomic_storage.rs b/hotshot/src/traits/storage/atomic_storage.rs index 9d054fe1ac..4654d65ca7 100644 --- a/hotshot/src/traits/storage/atomic_storage.rs +++ b/hotshot/src/traits/storage/atomic_storage.rs @@ -33,7 +33,7 @@ where atomic_store: Mutex, /// The Blocks stored by this [`AtomicStorage`] - blocks: HashMapStore, STATE::Block>, + blocks: HashMapStore, STATE::BlockPayload>, /// The [`QuorumCertificate`]s stored by this [`AtomicStorage`] qcs: DualKeyValueStore>, @@ -142,15 +142,15 @@ impl Storage for AtomicStorage { #[instrument(name = "AtomicStorage::get_block", skip_all)] async fn get_block( &self, - hash: &Commitment, - ) -> StorageResult> { + hash: &Commitment, + ) -> StorageResult> { Ok(self.inner.blocks.get(hash).await) } #[instrument(name = "AtomicStorage::get_qc", skip_all)] async fn get_qc( &self, - hash: &Commitment, + hash: &Commitment, ) -> StorageResult>> { Ok(self.inner.qcs.load_by_key_1_ref(hash).await) } @@ -176,7 +176,7 @@ impl Storage for AtomicStorage { #[instrument(name = "AtomicStorage::get_leaf_by_block", skip_all)] async fn get_leaf_by_block( &self, - hash: &Commitment, + hash: &Commitment, ) -> StorageResult>> { Ok(self.inner.leaves.load_by_key_2_ref(hash).await) } @@ -187,7 +187,7 @@ impl Storage for AtomicStorage { } async fn get_internal_state(&self) -> StorageState { - let mut blocks: Vec<(Commitment, STATE::Block)> = + let mut blocks: Vec<(Commitment, STATE::BBlockPayloadlock)> = self.inner.blocks.load_all().await.into_iter().collect(); blocks.sort_by_key(|(hash, _)| *hash); @@ -226,8 +226,8 @@ impl<'a, STATE: StateContents + 'static> StorageUpdater<'a, STATE> #[instrument(name = "AtomicStorage::get_block", skip_all)] async fn insert_block( &mut self, - hash: Commitment, - block: STATE::Block, + hash: Commitment, + block: STATE::BlockPayload, ) -> StorageResult { trace!(?block, "inserting block"); self.inner diff --git a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs index 70431af012..84cf8c76a2 100644 --- a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs +++ b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs @@ -181,7 +181,7 @@ pub trait DualKeyValue: Serialize + DeserializeOwned + Clone { } impl DualKeyValue for QuorumCertificate { - type Key1 = Commitment; + type Key1 = Commitment; type Key2 = ViewNumber; const KEY_1_NAME: &'static str = "block_commitment"; @@ -200,7 +200,7 @@ where STATE: StateContents, { type Key1 = Commitment>; - type Key2 = Commitment; + type Key2 = Commitment; const KEY_1_NAME: &'static str = "leaf_commitment"; const KEY_2_NAME: &'static str = "block_commitment"; @@ -210,6 +210,6 @@ where } fn key_2(&self) -> Self::Key2 { - ::commit(&self.deltas) + ::commit(&self.deltas) } } diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 2a9e570a76..a8727ee79b 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -126,7 +126,7 @@ mod test { block_contents::dummy::{DummyBlock, DummyState}, node_implementation::NodeType, state::ConsensusTime, - Block, + BlockPayload, }, }; use std::{fmt::Debug, hash::Hash}; @@ -152,7 +152,7 @@ mod test { type BlockType = DummyBlock; type SignatureKey = BN254Pub; type VoteTokenType = StaticVoteToken; - type Transaction = ::Transaction; + type Transaction = ::Transaction; type ElectionConfigType = StaticElectionConfig; type StateType = DummyState; } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 03fcc387a8..dc1d170116 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -31,7 +31,7 @@ use hotshot_types::{ node_implementation::{CommitteeEx, NodeImplementation, NodeType, SequencingQuorumEx}, signature_key::SignatureKey, state::ConsensusTime, - Block, + BlockPayload, }, utils::{Terminator, ViewInner}, vote::{QuorumVote, VoteAccumulator, VoteType}, @@ -1160,7 +1160,7 @@ where let block_commitment = self.block.commit(); if block_commitment == TYPES::BlockType::new().commit() { - debug!("Block is generic block! {:?}", self.cur_view); + debug!("BlockPayload is generic block! {:?}", self.cur_view); } let leaf = SequencingLeaf { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index c7d20786ab..a0011ad8af 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -27,7 +27,7 @@ use hotshot_types::{ node_implementation::{CommitteeEx, NodeImplementation, NodeType}, signature_key::SignatureKey, state::ConsensusTime, - Block, State, + BlockPayload, State, }, utils::ViewInner, vote::VoteAccumulator, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 13868721d4..6bba6f8d87 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -6,7 +6,7 @@ use commit::Committable; use either::Right; use hotshot::{ certificate::QuorumCertificate, - traits::{Block, NodeImplementation, TestableNodeImplementation}, + traits::{BlockPayload, NodeImplementation, TestableNodeImplementation}, types::{bn254::BN254Pub, SignatureKey, SystemContextHandle}, HotShotInitializer, HotShotSequencingConsensusApi, SystemContext, }; diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs index 3b97a60c9c..c8183af164 100644 --- a/testing/tests/atomic_storage.rs +++ b/testing/tests/atomic_storage.rs @@ -6,7 +6,7 @@ use hotshot::{ random_quorom_certificate, random_transaction, random_validating_leaf, VDemoBlock, VDemoState, }, - traits::{Block, State, Storage}, + traits::{BlockPayload, State, Storage}, }; use hotshot_types::{data::ViewNumber, traits::state::TestableState}; use rand::thread_rng; diff --git a/types/src/data.rs b/types/src/data.rs index 68fc0ec064..4a79038703 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -15,7 +15,7 @@ use crate::{ signature_key::{EncodedPublicKey, SignatureKey}, state::{ConsensusTime, TestableBlock, TestableState}, storage::StoredView, - Block, State, + BlockPayload, State, }, }; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; @@ -101,7 +101,7 @@ impl std::ops::Sub for ViewNumber { } /// The `Transaction` type associated with a `State`, as a syntactic shortcut -pub type Transaction = <::BlockType as Block>::Transaction; +pub type Transaction = <::BlockType as BlockPayload>::Transaction; /// `Commitment` to the `Transaction` type associated with a `State`, as a syntactic shortcut pub type TxnCommitment = Commitment>; @@ -131,14 +131,14 @@ where #[debug(skip)] pub parent_commitment: Commitment, - /// Block leaf wants to apply + /// BlockPayload leaf wants to apply pub deltas: TYPES::BlockType, /// What the state should be after applying `self.deltas` pub state_commitment: Commitment, /// Transactions that were marked for rejection while collecting deltas - pub rejected: Vec<::Transaction>, + pub rejected: Vec<::Transaction>, /// the propser id pub proposer_id: EncodedPublicKey, @@ -147,7 +147,7 @@ where /// A proposal to start providing data availability for a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct DAProposal { - /// Block leaf wants to apply + /// BlockPayload leaf wants to apply pub deltas: TYPES::BlockType, /// View this proposal applies to pub view_number: TYPES::Time, @@ -233,14 +233,14 @@ pub trait ProposalType: /// full. It is guaranteed to contain, at least, a cryptographic commitment to the block, and it /// provides an interface for resolving the commitment to a full block if the full block is /// available. -pub trait DeltasType: +pub trait DeltasType: Clone + Debug + for<'a> Deserialize<'a> + PartialEq + Eq + std::hash::Hash + Send + Serialize + Sync { /// Errors reported by this type. type Error: std::error::Error; /// Get a cryptographic commitment to the block represented by this delta. - fn block_commitment(&self) -> Commitment; + fn block_commitment(&self) -> Commitment; /// Get the full block if it is available, otherwise return this object unchanged. /// @@ -248,7 +248,7 @@ pub trait DeltasType: /// /// Returns the original [`DeltasType`], unchanged, in an [`Err`] variant in the case where the /// full block is not currently available. - fn try_resolve(self) -> Result; + fn try_resolve(self) -> Result; /// Fill this [`DeltasType`] by providing a complete block. /// @@ -259,7 +259,7 @@ pub trait DeltasType: /// /// Fails if `block` does not match `self.block_commitment()`, or if the block is not able to be /// stored for some implementation-defined reason. - fn fill(&mut self, block: Block) -> Result<(), Self::Error>; + fn fill(&mut self, block: BlockPayload) -> Result<(), Self::Error>; } /// Error which occurs when [`DeltasType::fill`] is called with a block that does not match the @@ -442,10 +442,10 @@ pub type LeafDeltasError = as DeltasType pub type LeafNode = ::NodeType; /// The [`StateType`] in a [`LeafType`]. pub type LeafState = as NodeType>::StateType; -/// The [`Block`] in a [`LeafType`]. +/// The [`BlockPayload`] in a [`LeafType`]. pub type LeafBlock = as NodeType>::BlockType; /// The [`Transaction`] in a [`LeafType`]. -pub type LeafTransaction = as Block>::Transaction; +pub type LeafTransaction = as BlockPayload>::Transaction; /// The [`ConsensusTime`] used by a [`LeafType`]. pub type LeafTime = as NodeType>::Time; @@ -459,12 +459,12 @@ pub trait TestableLeaf { &self, rng: &mut dyn rand::RngCore, padding: u64, - ) -> <::BlockType as Block>::Transaction; + ) -> <::BlockType as BlockPayload>::Transaction; } /// This is the consensus-internal analogous concept to a block, and it contains the block proper, /// as well as the hash of its parent `Leaf`. -/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::Block` +/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::BlockPayload` #[derive(Serialize, Deserialize, Clone, Debug, Derivative)] #[serde(bound(deserialize = ""))] #[derivative(Hash, PartialEq, Eq)] @@ -482,14 +482,14 @@ pub struct ValidatingLeaf { /// So we can ask if it extends pub parent_commitment: Commitment>, - /// Block leaf wants to apply + /// BlockPayload leaf wants to apply pub deltas: TYPES::BlockType, /// What the state should be AFTER applying `self.deltas` pub state: TYPES::StateType, /// Transactions that were marked for rejection while collecting deltas - pub rejected: Vec<::Transaction>, + pub rejected: Vec<::Transaction>, /// the timestamp the leaf was constructed at, in nanoseconds. Only exposed for dashboard stats #[derivative(PartialEq = "ignore")] @@ -504,7 +504,7 @@ pub struct ValidatingLeaf { /// This is the consensus-internal analogous concept to a block, and it contains the block proper, /// as well as the hash of its parent `Leaf`. -/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::Block` +/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::BlockPayload` #[derive(Serialize, Deserialize, Clone, Debug, Derivative, Eq)] #[serde(bound(deserialize = ""))] pub struct SequencingLeaf { @@ -525,7 +525,7 @@ pub struct SequencingLeaf { pub deltas: Either>, /// Transactions that were marked for rejection while collecting deltas - pub rejected: Vec<::Transaction>, + pub rejected: Vec<::Transaction>, /// the timestamp the leaf was constructed at, in nanoseconds. Only exposed for dashboard stats pub timestamp: i128, @@ -642,7 +642,7 @@ impl LeafType for ValidatingLeaf { self.state.clone() } - fn get_rejected(&self) -> Vec<::Transaction> { + fn get_rejected(&self) -> Vec<::Transaction> { self.rejected.clone() } @@ -680,7 +680,7 @@ where &self, rng: &mut dyn rand::RngCore, padding: u64, - ) -> <::BlockType as Block>::Transaction { + ) -> <::BlockType as BlockPayload>::Transaction { ::create_random_transaction( Some(&self.state), rng, @@ -757,7 +757,7 @@ impl LeafType for SequencingLeaf { // The Sequencing Leaf doesn't have a state. fn get_state(&self) -> Self::MaybeState {} - fn get_rejected(&self) -> Vec<::Transaction> { + fn get_rejected(&self) -> Vec<::Transaction> { self.rejected.clone() } @@ -794,7 +794,7 @@ where &self, rng: &mut dyn rand::RngCore, padding: u64, - ) -> <::BlockType as Block>::Transaction { + ) -> <::BlockType as BlockPayload>::Transaction { TYPES::StateType::create_random_transaction(None, rng, padding) } } diff --git a/types/src/traits.rs b/types/src/traits.rs index c6a76acea3..9c25e5fcb8 100644 --- a/types/src/traits.rs +++ b/types/src/traits.rs @@ -11,5 +11,5 @@ pub mod stake_table; pub mod state; pub mod storage; -pub use block_contents::Block; +pub use block_contents::BlockPayload; pub use state::State; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index b7d9b4da44..076914e8c7 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -1,7 +1,7 @@ //! Abstraction over the contents of a block //! -//! This module provides the [`Block`] trait, which describes the behaviors that a block is -//! expected to have. +//! This module provides the [`BlockPayload`] and [`BlockHeader`] traits, which describe the +//! behaviors that a block is expected to have. use commit::{Commitment, Committable}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; @@ -15,14 +15,15 @@ use std::{ /// Abstraction over the full contents of a block /// -/// This trait encapsulates the behaviors that a block must have in order to be used by consensus: -/// * Must have a predefined error type ([`Block::Error`]) +/// This trait encapsulates the behaviors that the transactions of a block must have in order to be +/// used by consensus +/// * Must have a predefined error type ([`BlockPayload::Error`]) /// * Must have a transaction type that can be compared for equality, serialized and serialized, /// sent between threads, and can have a hash produced of it /// * Must be able to be produced incrementally by appending transactions -/// ([`add_transaction_raw`](Block::add_transaction_raw)) +/// ([`add_transaction_raw`](BlockPayload::add_transaction_raw)) /// * Must be hashable -pub trait Block: +pub trait BlockPayload: Serialize + Clone + Debug @@ -61,19 +62,19 @@ pub trait Block: /// Commitment to a block, used by data availibity #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] #[serde(bound(deserialize = ""), transparent)] -pub struct BlockCommitment(pub Commitment); +pub struct BlockCommitment(pub Commitment); -/// Abstraction over any type of transaction. Used by [`Block`]. +/// Abstraction over any type of transaction. Used by [`BlockPayload`]. pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash { } -/// Dummy implementation of `BlockContents` for unit tests +/// Dummy implementation of `BlockPayload` for unit tests pub mod dummy { use std::fmt::Display; - use super::{Block, Commitment, Committable, Debug, Hash, HashSet, Serialize}; + use super::{BlockPayload, Commitment, Committable, Debug, Hash, HashSet, Serialize}; use rand::Rng; use serde::Deserialize; @@ -107,7 +108,7 @@ pub mod dummy { impl Committable for DummyTransaction { fn commit(&self) -> commit::Commitment { - commit::RawCommitmentBuilder::new("Dummy Block Comm") + commit::RawCommitmentBuilder::new("Dummy BlockPayload Comm") .u64_field("Dummy Field", 0) .finalize() } @@ -132,7 +133,7 @@ pub mod dummy { } } - impl Block for DummyBlock { + impl BlockPayload for DummyBlock { type Error = DummyError; type Transaction = DummyTransaction; @@ -167,7 +168,7 @@ pub mod dummy { impl Committable for DummyBlock { fn commit(&self) -> commit::Commitment { - commit::RawCommitmentBuilder::new("Dummy Block Comm") + commit::RawCommitmentBuilder::new("Dummy BlockPayload Comm") .u64_field("Nonce", self.nonce) .finalize() } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 81a173e954..adb3386bdc 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -93,9 +93,11 @@ pub enum VoteData { impl Committable for VoteData { fn commit(&self) -> Commitment { match self { - VoteData::DA(block_commitment) => commit::RawCommitmentBuilder::new("DA Block Commit") - .field("block_commitment", *block_commitment) - .finalize(), + VoteData::DA(block_commitment) => { + commit::RawCommitmentBuilder::new("DA BlockPayload Commit") + .field("block_commitment", *block_commitment) + .finalize() + } VoteData::Yes(leaf_commitment) => commit::RawCommitmentBuilder::new("Yes Vote Commit") .field("leaf_commitment", *leaf_commitment) .finalize(), diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 8ba5abdebb..e7a52675b0 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -19,7 +19,7 @@ use crate::{ message::{ConsensusMessageType, Message, SequencingMessage}, traits::{ election::Membership, network::TestableChannelImplementation, signature_key::SignatureKey, - storage::Storage, Block, + storage::Storage, BlockPayload, }, }; use async_compatibility_layer::channel::{unbounded, UnboundedReceiver, UnboundedSender}; @@ -184,7 +184,7 @@ pub trait ExchangesType, MESSA /// Get the view sync exchange. fn view_sync_exchange(&self) -> &Self::ViewSyncExchange; - /// Block the underlying networking interfaces until node is successfully initialized into the + /// BlockPayload the underlying networking interfaces until node is successfully initialized into the /// networks. async fn wait_for_networks_ready(&self); @@ -363,7 +363,7 @@ pub trait TestableNodeImplementation: NodeImplementation state: Option<&TYPES::StateType>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> ::Transaction; /// Creates random transaction if possible /// otherwise panics @@ -372,7 +372,7 @@ pub trait TestableNodeImplementation: NodeImplementation leaf: &Self::Leaf, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> ::Transaction; /// generate a genesis block fn block_genesis() -> TYPES::BlockType; @@ -438,7 +438,7 @@ where state: Option<&TYPES::StateType>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> ::Transaction { ::create_random_transaction(state, rng, padding) } @@ -446,7 +446,7 @@ where leaf: &Self::Leaf, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> ::Transaction { ::create_random_transaction(leaf, rng, padding) } @@ -565,14 +565,14 @@ pub trait NodeType: /// The block type that this hotshot setup is using. /// /// This should be the same block that `StateType::BlockType` is using. - type BlockType: Block; + type BlockType: BlockPayload; /// The signature key that this hotshot setup is using. type SignatureKey: SignatureKey; /// The vote token that this hotshot setup is using. type VoteTokenType: VoteToken; /// The transaction type that this hotshot setup is using. /// - /// This should be equal to `Block::Transaction` + /// This should be equal to `BlockPayload::Transaction` type Transaction: Transaction; /// The election config type that this hotshot setup is using. type ElectionConfigType: ElectionConfig; diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 83fb250442..93ff3e6603 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -3,7 +3,7 @@ //! This module provides the [`State`] trait, which serves as an compatibility over the current //! network state, which is modified by the transactions contained within blocks. -use crate::traits::Block; +use crate::traits::BlockPayload; use commit::Committable; use espresso_systems_common::hotshot::tag; use serde::{de::DeserializeOwned, Serialize}; @@ -19,7 +19,7 @@ use std::{ /// /// This trait represents the behaviors that the 'global' ledger state must have: /// * A defined error type ([`Error`](State::Error)) -/// * The type of block that modifies this type of state ([`Block`](State::BlockType)) +/// * The type of block that modifies this type of state ([`BlockPayload`](State::BlockType)) /// * A method to get a template (empty) next block from the current state /// ([`next_block`](State::next_block)) /// * The ability to validate that a block is actually a valid extension of this state @@ -42,7 +42,7 @@ pub trait State: /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; /// The type of block this state is associated with - type BlockType: Block; + type BlockType: BlockPayload; /// Time compatibility needed for reward collection type Time: ConsensusTime; @@ -109,11 +109,11 @@ where state: Option<&Self>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> ::Transaction; } /// extra functions required on block to be usable by hotshot-testing -pub trait TestableBlock: Block + Debug { +pub trait TestableBlock: BlockPayload + Debug { /// generate a genesis block fn genesis() -> Self; diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 41b7cc225a..20c8f87f9a 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -4,7 +4,7 @@ use super::{node_implementation::NodeType, signature_key::EncodedPublicKey}; use crate::{ certificate::QuorumCertificate, data::LeafType, - traits::{election::SignedCertificate, Block}, + traits::{election::SignedCertificate, BlockPayload}, }; use async_trait::async_trait; use commit::Commitment; @@ -152,7 +152,7 @@ where TYPES: NodeType, LEAF: LeafType, { - /// Create a new `StoredView` from the given QC, Block and State. + /// Create a new `StoredView` from the given QC, `BlockPayload` and State. /// /// Note that this will set the `parent` to `LeafHash::default()`, so this will not have a parent. pub fn from_qc_block_and_state( @@ -161,7 +161,7 @@ where state: LEAF::MaybeState, height: u64, parent_commitment: Commitment, - rejected: Vec<::Transaction>, + rejected: Vec<::Transaction>, proposer_id: EncodedPublicKey, ) -> Self { Self { From 9a970d16727f7cddddce086c245d41255531ec78 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 7 Sep 2023 16:06:40 -0700 Subject: [PATCH 0036/1393] Add commitment function --- hotshot/src/demos/sdemo.rs | 6 ++++++ types/src/traits/block_contents.rs | 9 +++++++++ 2 files changed, 15 insertions(+) diff --git a/hotshot/src/demos/sdemo.rs b/hotshot/src/demos/sdemo.rs index f8264ab612..55ce5c1f3e 100644 --- a/hotshot/src/demos/sdemo.rs +++ b/hotshot/src/demos/sdemo.rs @@ -233,6 +233,12 @@ impl BlockPayload for SDemoBlock { .collect(), } } + + fn commitment(&self) -> Commitment { + // TODO: Get the payload commitment after VID is added. + // https://github.com/EspressoSystems/HotShot/issues/1673 + unimplemented!(); + } } impl State for SDemoState { diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 076914e8c7..4a1bd0a4e1 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -57,6 +57,9 @@ pub trait BlockPayload: /// returns hashes of all the transactions in this block /// TODO make this ordered with a vec fn contained_transactions(&self) -> HashSet>; + + /// Compute the VID payload commitment. + fn commitment(&self) -> Commitment; } /// Commitment to a block, used by data availibity @@ -154,6 +157,12 @@ pub mod dummy { fn contained_transactions(&self) -> HashSet> { HashSet::new() } + + fn commitment(&self) -> Commitment { + // TODO: Get the payload commitment after VID is added. + // https://github.com/EspressoSystems/HotShot/issues/1673 + unimplemented!(); + } } impl TestableBlock for DummyBlock { From fa40cf2d6845a90a9735e230ab77a940b160d522 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 08:13:18 -0400 Subject: [PATCH 0037/1393] Bump serde_json from 1.0.105 to 1.0.106 (#1708) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.105 to 1.0.106. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.105...v1.0.106) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- hotshot/Cargo.toml | 2 +- libp2p-networking/Cargo.toml | 2 +- types/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index fd1ff23375..7edc7363c5 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -116,6 +116,6 @@ async-std = { workspace = true } [dev-dependencies] blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } -serde_json = "1.0.105" +serde_json = "1.0.106" toml = { workspace = true } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 31988a9bac..9e9df1cd98 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -41,7 +41,7 @@ libp2p-noise = { version = "0.43.0", default-features = false } parking_lot = "0.12.1" rand = { workspace = true } serde = { workspace = true } -serde_json = "1.0.105" +serde_json = "1.0.106" snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", diff --git a/types/Cargo.toml b/types/Cargo.toml index 7be4cc5a36..f65eac0983 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -50,7 +50,7 @@ bit-vec = "0.6.3" typenum = { workspace = true } [dev-dependencies] -serde_json = "1.0.105" +serde_json = "1.0.106" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } From 7a024f763217bd38e545fb20b9e39c9c6914872f Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 11 Sep 2023 14:22:42 -0400 Subject: [PATCH 0038/1393] Accumulator2 trait: updated generics so trait is over TYPES, VOTE --- types/src/certificate.rs | 8 +++++++- types/src/traits/election.rs | 5 +++++ types/src/vote.rs | 22 ++++++++++++++++++++-- 3 files changed, 32 insertions(+), 3 deletions(-) diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 47720eba75..ee19e96e39 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -8,7 +8,7 @@ use crate::{ signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, state::ConsensusTime, }, - vote::ViewSyncData, + vote::{ViewSyncData, VoteAccumulator}, }; use bincode::Options; use commit::{Commitment, Committable}; @@ -20,6 +20,8 @@ use std::{ ops::Deref, }; use tracing::debug; +use crate::vote::QuorumVote; +use crate::vote::AccumulatorPlaceholder; /// A `DACertificate` is a threshold signature that some data is available. /// It is signed by the members of the DA committee, not the entire network. It is used @@ -113,6 +115,7 @@ pub struct ViewSyncCertificateInternal { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] /// Enum representing whether a signatures is for a 'Yes' or 'No' or 'DA' or 'Genesis' certificate +// TODO ED Should this be a trait? pub enum AssembledSignature { // (enum, signature) /// These signatures are for a 'Yes' certificate @@ -154,6 +157,9 @@ impl> SignedCertificate for QuorumCertificate { + type Vote = QuorumVote; + type VoteAccumulator = AccumulatorPlaceholder; + fn from_signatures_and_commitment( view_number: TYPES::Time, signatures: AssembledSignature, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 81a173e954..3c2136bc57 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -42,6 +42,7 @@ use serde::{Deserialize, Serialize}; use snafu::Snafu; use std::{collections::BTreeSet, fmt::Debug, hash::Hash, marker::PhantomData, num::NonZeroU64}; use tracing::error; +use crate::vote::Accumulator2; /// Error for election problems #[derive(Snafu, Debug)] @@ -181,6 +182,10 @@ where COMMITTABLE: Committable + Serialize + Clone, TOKEN: VoteToken, { + type Vote: VoteType; + + type VoteAccumulator: Accumulator2; + /// Build a QC from the threshold signature and commitment fn from_signatures_and_commitment( view_number: TIME, diff --git a/types/src/vote.rs b/types/src/vote.rs index cdbb8cb03f..d71dc18f25 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -7,7 +7,7 @@ use crate::{ certificate::{AssembledSignature, QuorumCertificate}, data::LeafType, traits::{ - election::{VoteData, VoteToken}, + election::{SignedCertificate, VoteData, VoteToken}, node_implementation::NodeType, signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, }, @@ -25,7 +25,7 @@ use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, - num::NonZeroU64, + num::NonZeroU64, marker::PhantomData, }; use tracing::error; @@ -187,6 +187,7 @@ pub enum QuorumVote> { /// Negative vote. No(YesOrNoVote), /// Timeout vote. + // TODO ED Remove this and make it it's own vote type, since it is not part of the QC type Timeout(TimeoutVote), } @@ -255,6 +256,22 @@ pub trait Accumulator: Sized { fn append(self, val: T) -> Either; } + +pub trait Accumulator2>: Sized +{ + fn append(self, vote: VOTE) -> Either>; +} + +pub struct AccumulatorPlaceholder> { + phantom: PhantomData<(TYPES, VOTE)> +} + +impl > Accumulator2 for AccumulatorPlaceholder { + fn append(self, vote: VOTE) -> Either> { + either::Left(self) + } +} + /// Mapping of commitments to vote tokens by key. type VoteMap = HashMap< Commitment, @@ -314,6 +331,7 @@ where #![allow(clippy::too_many_lines)] fn append( mut self, + // TODO ED Make this its own type to avoid extra long type signature val: ( Commitment, ( From 2d0685c1d9bc3204d42bd9dfe0152d239dbbdccc Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 11 Sep 2023 14:32:13 -0400 Subject: [PATCH 0039/1393] Placeholder accumulator in place for each certificate type --- types/src/certificate.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/types/src/certificate.rs b/types/src/certificate.rs index ee19e96e39..b59a180255 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -8,7 +8,7 @@ use crate::{ signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, state::ConsensusTime, }, - vote::{ViewSyncData, VoteAccumulator}, + vote::{ViewSyncData, VoteAccumulator, DAVote, ViewSyncVote}, }; use bincode::Options; use commit::{Commitment, Committable}; @@ -230,6 +230,9 @@ impl> Committable impl SignedCertificate for DACertificate { + type Vote = DAVote; + type VoteAccumulator = AccumulatorPlaceholder; + fn from_signatures_and_commitment( view_number: TYPES::Time, signatures: AssembledSignature, @@ -318,6 +321,8 @@ impl SignedCertificate> for ViewSyncCertificate { + type Vote = ViewSyncVote; + type VoteAccumulator = AccumulatorPlaceholder; /// Build a QC from the threshold signature and commitment fn from_signatures_and_commitment( view_number: TYPES::Time, From 6b0f303fa2bb5f30c454cee15f081bda00e277a1 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 11 Sep 2023 14:55:48 -0400 Subject: [PATCH 0040/1393] Placeholder accumulator in DA task --- task-impls/src/da.rs | 17 +++++++++++++++++ types/src/vote.rs | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index c7d20786ab..9d0cc36e88 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -15,6 +15,8 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::traits::election::SignedCertificate; +use hotshot_types::vote::AccumulatorPlaceholder; use hotshot_types::{ certificate::DACertificate, consensus::{Consensus, View}, @@ -34,6 +36,7 @@ use hotshot_types::{ }; use hotshot_utils::bincode::bincode_opts; use snafu::Snafu; +use std::marker::PhantomData; use std::{ collections::{HashMap, HashSet}, sync::Arc, @@ -104,6 +107,16 @@ pub struct DAVoteCollectionTaskState< /// the vote accumulator pub accumulator: Either, DACertificate>, + + pub accumulator2: Either< + as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + TYPES::BlockType, + >>::VoteAccumulator, + DACertificate, + >, // TODO ED Make this just "view" since it is only for this task /// the current view pub cur_view: TYPES::Time, @@ -379,10 +392,14 @@ where acc, None, ); + let accumulator2 = AccumulatorPlaceholder { + phantom: PhantomData::default(), + }; if view > collection_view { let state = DAVoteCollectionTaskState { committee_exchange: self.committee_exchange.clone(), accumulator, + accumulator2: either::Left(accumulator2), cur_view: view, event_stream: self.event_stream.clone(), id: self.id, diff --git a/types/src/vote.rs b/types/src/vote.rs index d71dc18f25..55804d1a02 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -263,7 +263,7 @@ pub trait Accumulator2>: Sized } pub struct AccumulatorPlaceholder> { - phantom: PhantomData<(TYPES, VOTE)> + pub phantom: PhantomData<(TYPES, VOTE)> } impl > Accumulator2 for AccumulatorPlaceholder { From 11c0d7a4e22e513bac16b10423c29346b5961aef Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 11 Sep 2023 15:30:46 -0400 Subject: [PATCH 0041/1393] Added get_view, get_signature, and get_key functions to Vote trait --- task-impls/src/consensus.rs | 12 +++++------ task-impls/src/network.rs | 2 +- types/src/certificate.rs | 10 +++++---- types/src/message.rs | 4 ++-- types/src/traits/election.rs | 17 ++++++++++++++++ types/src/vote.rs | 39 ++++++++++++++++++++++++++++++++---- 6 files changed, 67 insertions(+), 17 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 03fcc387a8..9462b389c4 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -368,7 +368,7 @@ where if let GeneralConsensusMessage::Vote(vote) = message { debug!( "Sending vote to next quorum leader {:?}", - vote.current_view() + vote.get_view() ); self.event_stream .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) @@ -446,7 +446,7 @@ where if let GeneralConsensusMessage::Vote(vote) = message { debug!( "Sending vote to next quorum leader {:?}", - vote.current_view() + vote.get_view() ); self.event_stream .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) @@ -890,13 +890,13 @@ where } } SequencingHotShotEvent::QuorumVoteRecv(vote) => { - debug!("Received quroum vote: {:?}", vote.current_view()); + debug!("Received quroum vote: {:?}", vote.get_view()); - if !self.quorum_exchange.is_leader(vote.current_view() + 1) { + if !self.quorum_exchange.is_leader(vote.get_view() + 1) { error!( "We are not the leader for view {} are we the leader for view + 1? {}", - *vote.current_view() + 1, - self.quorum_exchange.is_leader(vote.current_view() + 2) + *vote.get_view() + 1, + self.quorum_exchange.is_leader(vote.get_view() + 2) ); return; } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a4b083ba37..b50a4143ea 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -208,7 +208,7 @@ impl< GeneralConsensusMessage::Vote(vote.clone()), ))), TransmitType::Direct, - Some(membership.get_leader(vote.current_view() + 1)), + Some(membership.get_leader(vote.get_view() + 1)), ), SequencingHotShotEvent::DAProposalSend(proposal, sender) => ( diff --git a/types/src/certificate.rs b/types/src/certificate.rs index b59a180255..dea84d01f1 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -1,5 +1,8 @@ //! Provides two types of cerrtificates and their accumulators. +use crate::vote::Accumulator; +use crate::vote::AccumulatorPlaceholder; +use crate::vote::QuorumVote; use crate::{ data::{fake_commitment, serialize_signature, LeafType}, traits::{ @@ -8,10 +11,11 @@ use crate::{ signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, state::ConsensusTime, }, - vote::{ViewSyncData, VoteAccumulator, DAVote, ViewSyncVote}, + vote::{DAVote, ViewSyncData, ViewSyncVote, VoteAccumulator}, }; use bincode::Options; use commit::{Commitment, Committable}; +use either::Either; use espresso_systems_common::hotshot::tag; use hotshot_utils::bincode::bincode_opts; use serde::{Deserialize, Serialize}; @@ -20,8 +24,6 @@ use std::{ ops::Deref, }; use tracing::debug; -use crate::vote::QuorumVote; -use crate::vote::AccumulatorPlaceholder; /// A `DACertificate` is a threshold signature that some data is available. /// It is signed by the members of the DA committee, not the entire network. It is used @@ -115,7 +117,7 @@ pub struct ViewSyncCertificateInternal { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] /// Enum representing whether a signatures is for a 'Yes' or 'No' or 'DA' or 'Genesis' certificate -// TODO ED Should this be a trait? +// TODO ED Should this be a trait? pub enum AssembledSignature { // (enum, signature) /// These signatures are for a 'Yes' certificate diff --git a/types/src/message.rs b/types/src/message.rs index 89d2864aa9..16d542551c 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -359,7 +359,7 @@ impl< // this should match replica upon receipt p.data.get_view_number() } - GeneralConsensusMessage::Vote(vote_message) => vote_message.current_view(), + GeneralConsensusMessage::Vote(vote_message) => vote_message.get_view(), GeneralConsensusMessage::InternalTrigger(trigger) => match trigger { InternalTrigger::Timeout(time) => *time, }, @@ -376,7 +376,7 @@ impl< // this should match replica upon receipt p.data.get_view_number() } - CommitteeConsensusMessage::DAVote(vote_message) => vote_message.current_view(), + CommitteeConsensusMessage::DAVote(vote_message) => vote_message.get_view(), CommitteeConsensusMessage::DACertificate(cert) => cert.view_number, } } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 3c2136bc57..cdc47b1898 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -186,7 +186,24 @@ where type VoteAccumulator: Accumulator2; + fn accumulate_vote(accumulator: Self::VoteAccumulator, vote: Self::Vote, commit: COMMITTABLE) -> Either { + // if !self.is_valid_vote( + // &vote.encoded_key, + // &vote.encoded_signature, + // vote.data.clone(), + // // Ignoring deserialization errors below since we are getting rid of it soon + // Checked::Unchecked(vote.vote_token.clone()), + // ) { + // error!("Invalid vote!"); + // return Either::Left(accumulator); + // } + // Call append + + todo!() + } + /// Build a QC from the threshold signature and commitment + // TODO ED Rename this function fn from_signatures_and_commitment( view_number: TIME, signatures: AssembledSignature, diff --git a/types/src/vote.rs b/types/src/vote.rs index 55804d1a02..d4c697508e 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -34,7 +34,12 @@ pub trait VoteType: Debug + Clone + 'static + Serialize + for<'a> Deserialize<'a> + Send + Sync + PartialEq { /// The view this vote was cast for. - fn current_view(&self) -> TYPES::Time; + fn get_view(&self) -> TYPES::Time; + fn get_key(self) -> TYPES::SignatureKey; + fn get_signature(self) -> EncodedSignature; + fn get_data(self) -> TYPES::SignatureKey { + todo!() + } } /// A vote on DA proposal. @@ -192,15 +197,25 @@ pub enum QuorumVote> { } impl VoteType for DAVote { - fn current_view(&self) -> TYPES::Time { + fn get_view(&self) -> TYPES::Time { self.current_view } + fn get_key(self) -> ::SignatureKey { + self.signature_key() + } + fn get_signature(self) -> EncodedSignature { + // TODO ED Revisit this function + self.signature.1 + } } +#[deprecated] +// TODO ED Remove this impl DAVote { /// Get the signature key. /// # Panics /// If the deserialization fails. + #[deprecated] pub fn signature_key(&self) -> TYPES::SignatureKey { ::from_bytes(&self.signature.0).unwrap() } @@ -209,16 +224,24 @@ impl DAVote { impl> VoteType for QuorumVote { - fn current_view(&self) -> TYPES::Time { + fn get_view(&self) -> TYPES::Time { match self { QuorumVote::Yes(v) | QuorumVote::No(v) => v.current_view, QuorumVote::Timeout(v) => v.current_view, } } + + fn get_key(self) -> ::SignatureKey { + self.signature_key() + } + fn get_signature(self) -> EncodedSignature { + self.signature() + } } impl> QuorumVote { /// Get the encoded signature. + #[deprecated] pub fn signature(&self) -> EncodedSignature { match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.1.clone(), @@ -228,6 +251,7 @@ impl> QuorumVote /// Get the signature key. /// # Panics /// If the deserialization fails. + #[deprecated] pub fn signature_key(&self) -> TYPES::SignatureKey { let encoded = match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.0.clone(), @@ -238,13 +262,20 @@ impl> QuorumVote } impl VoteType for ViewSyncVote { - fn current_view(&self) -> TYPES::Time { + fn get_view(&self) -> TYPES::Time { match self { ViewSyncVote::PreCommit(v) | ViewSyncVote::Commit(v) | ViewSyncVote::Finalize(v) => { v.round } } } + fn get_key(self) -> ::SignatureKey { + self.signature_key() + } + + fn get_signature(self) -> EncodedSignature { + self.signature() + } } /// The aggreation of votes, implemented by `VoteAccumulator`. From 12dbbee784528e7f2c526a87c1260305a9717afd Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 11 Sep 2023 17:42:15 -0700 Subject: [PATCH 0042/1393] Modify the return type of commit --- hotshot/Cargo.toml | 1 + hotshot/src/demos/sdemo.rs | 8 +++++--- types/src/traits/block_contents.rs | 18 ++++++++---------- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 7edc7363c5..e37d6a90b5 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -95,6 +95,7 @@ hotshot-types = { path = "../types", version = "0.1.0", default-features = false hotshot-utils = { path = "../utils" } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +jf-primitives = { workspace = true } libp2p = { workspace = true } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } diff --git a/hotshot/src/demos/sdemo.rs b/hotshot/src/demos/sdemo.rs index 55ce5c1f3e..de8858cfbe 100644 --- a/hotshot/src/demos/sdemo.rs +++ b/hotshot/src/demos/sdemo.rs @@ -29,6 +29,7 @@ use hotshot_types::{ BlockPayload, State, }, }; +use jf_primitives::vid::VidResult; use rand::Rng; use serde::{Deserialize, Serialize}; use snafu::Snafu; @@ -234,9 +235,10 @@ impl BlockPayload for SDemoBlock { } } - fn commitment(&self) -> Commitment { - // TODO: Get the payload commitment after VID is added. - // https://github.com/EspressoSystems/HotShot/issues/1673 + fn commitment(&self) -> VidResult> { + // TODO: Get the payload commitment after VID integration. + // + // unimplemented!(); } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 4a1bd0a4e1..444767fb09 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -4,7 +4,8 @@ //! behaviors that a block is expected to have. use commit::{Commitment, Committable}; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use jf_primitives::vid::VidResult; +use serde::{de::DeserializeOwned, Serialize}; use std::{ collections::HashSet, @@ -59,14 +60,9 @@ pub trait BlockPayload: fn contained_transactions(&self) -> HashSet>; /// Compute the VID payload commitment. - fn commitment(&self) -> Commitment; + fn commitment(&self) -> VidResult>; } -/// Commitment to a block, used by data availibity -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] -#[serde(bound(deserialize = ""), transparent)] -pub struct BlockCommitment(pub Commitment); - /// Abstraction over any type of transaction. Used by [`BlockPayload`]. pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash @@ -78,6 +74,7 @@ pub mod dummy { use std::fmt::Display; use super::{BlockPayload, Commitment, Committable, Debug, Hash, HashSet, Serialize}; + use jf_primitives::vid::VidResult; use rand::Rng; use serde::Deserialize; @@ -158,9 +155,10 @@ pub mod dummy { HashSet::new() } - fn commitment(&self) -> Commitment { - // TODO: Get the payload commitment after VID is added. - // https://github.com/EspressoSystems/HotShot/issues/1673 + fn commitment(&self) -> VidResult> { + // TODO: Get the payload commitment after VID integration. + // + // unimplemented!(); } } From 5dddbba3ece9f8e8ead8a05d8ebe1187c6971df4 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 12 Sep 2023 10:17:23 -0400 Subject: [PATCH 0043/1393] In process of adding COMMITTABLE as generic to VoteType trait --- types/src/certificate.rs | 6 +++--- types/src/traits/consensus_api.rs | 7 ++++--- types/src/traits/election.rs | 4 ++-- types/src/traits/network.rs | 2 ++ types/src/vote.rs | 16 ++++++++-------- 5 files changed, 19 insertions(+), 16 deletions(-) diff --git a/types/src/certificate.rs b/types/src/certificate.rs index dea84d01f1..51dd5b4206 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -160,7 +160,7 @@ impl> for QuorumCertificate { type Vote = QuorumVote; - type VoteAccumulator = AccumulatorPlaceholder; + type VoteAccumulator = AccumulatorPlaceholder; fn from_signatures_and_commitment( view_number: TYPES::Time, @@ -233,7 +233,7 @@ impl SignedCertificate { type Vote = DAVote; - type VoteAccumulator = AccumulatorPlaceholder; + type VoteAccumulator = AccumulatorPlaceholder; fn from_signatures_and_commitment( view_number: TYPES::Time, @@ -324,7 +324,7 @@ impl for ViewSyncCertificate { type Vote = ViewSyncVote; - type VoteAccumulator = AccumulatorPlaceholder; + type VoteAccumulator = AccumulatorPlaceholder, Self::Vote>; /// Build a QC from the threshold signature and commitment fn from_signatures_and_commitment( view_number: TYPES::Time, diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index 2687107886..1f83df69f1 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -128,9 +128,10 @@ pub trait SequencingConsensusApi< LEAF: LeafType, I: NodeImplementation>, >: ConsensusSharedApi +// TODO ED VoteType should not always be over LEAF, but for the API it doesn't matter and we are removing it soon anyway { /// Send a direct message to the given recipient - async fn send_direct_message, VOTE: VoteType>( + async fn send_direct_message, VOTE: VoteType>( &self, recipient: TYPES::SignatureKey, message: SequencingMessage, @@ -139,7 +140,7 @@ pub trait SequencingConsensusApi< /// send a direct message using the DA communication channel async fn send_direct_da_message< PROPOSAL: ProposalType, - VOTE: VoteType, + VOTE: VoteType, >( &self, recipient: TYPES::SignatureKey, @@ -149,7 +150,7 @@ pub trait SequencingConsensusApi< /// Send a broadcast message to the entire network. async fn send_broadcast_message< PROPOSAL: ProposalType, - VOTE: VoteType, + VOTE: VoteType, >( &self, message: SequencingMessage, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index cdc47b1898..715e9c0050 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -182,9 +182,9 @@ where COMMITTABLE: Committable + Serialize + Clone, TOKEN: VoteToken, { - type Vote: VoteType; + type Vote: VoteType; - type VoteAccumulator: Accumulator2; + type VoteAccumulator: Accumulator2; fn accumulate_vote(accumulator: Self::VoteAccumulator, vote: Self::Vote, commit: COMMITTABLE) -> Either { // if !self.is_valid_vote( diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index ee2d09672d..b82d896d8a 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -195,6 +195,7 @@ pub trait ViewMessage { /// API for interacting directly with a consensus committee /// intended to be implemented for both DA and for validating consensus committees +// TODO ED Why is this generic over VOTE? #[async_trait] pub trait CommunicationChannel< TYPES: NodeType, @@ -332,6 +333,7 @@ pub trait TestableNetworkingImplementation { fn in_flight_message_count(&self) -> Option; } /// Describes additional functionality needed by the test communication channel +// TODO ED Why is this generic over VOTE? pub trait TestableChannelImplementation< TYPES: NodeType, M: NetworkMsg, diff --git a/types/src/vote.rs b/types/src/vote.rs index d4c697508e..e7f6fa9b7c 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -30,14 +30,14 @@ use std::{ use tracing::error; /// The vote sent by consensus messages. -pub trait VoteType: +pub trait VoteType: Debug + Clone + 'static + Serialize + for<'a> Deserialize<'a> + Send + Sync + PartialEq { /// The view this vote was cast for. fn get_view(&self) -> TYPES::Time; fn get_key(self) -> TYPES::SignatureKey; fn get_signature(self) -> EncodedSignature; - fn get_data(self) -> TYPES::SignatureKey { + fn get_data(self) -> VoteData { todo!() } } @@ -196,7 +196,7 @@ pub enum QuorumVote> { Timeout(TimeoutVote), } -impl VoteType for DAVote { +impl VoteType for DAVote { fn get_view(&self) -> TYPES::Time { self.current_view } @@ -221,7 +221,7 @@ impl DAVote { } } -impl> VoteType +impl> VoteType for QuorumVote { fn get_view(&self) -> TYPES::Time { @@ -261,7 +261,7 @@ impl> QuorumVote } } -impl VoteType for ViewSyncVote { +impl VoteType> for ViewSyncVote { fn get_view(&self) -> TYPES::Time { match self { ViewSyncVote::PreCommit(v) | ViewSyncVote::Commit(v) | ViewSyncVote::Finalize(v) => { @@ -288,16 +288,16 @@ pub trait Accumulator: Sized { } -pub trait Accumulator2>: Sized +pub trait Accumulator2>: Sized { fn append(self, vote: VOTE) -> Either>; } -pub struct AccumulatorPlaceholder> { +pub struct AccumulatorPlaceholder> { pub phantom: PhantomData<(TYPES, VOTE)> } -impl > Accumulator2 for AccumulatorPlaceholder { +impl > Accumulator2 for AccumulatorPlaceholder { fn append(self, vote: VOTE) -> Either> { either::Left(self) } From 5d7a6dd44bbfd61e017d4d408f38a68bd96bee68 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 12 Sep 2023 09:01:59 -0700 Subject: [PATCH 0044/1393] Fix lint --- types/src/traits/block_contents.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 444767fb09..afc714e169 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -60,6 +60,11 @@ pub trait BlockPayload: fn contained_transactions(&self) -> HashSet>; /// Compute the VID payload commitment. + /// + /// # Errors + /// - `VidResult::Err` in case of actual error. + /// - `VidResult::Ok(Result::Err)` if verification fails. + /// - `VidResult::Ok(Result::Ok)` if verification succeeds. fn commitment(&self) -> VidResult>; } From 013da108cba01536be57f8e248bd7f8ee9730bd3 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 12 Sep 2023 12:43:28 -0400 Subject: [PATCH 0045/1393] creating transactions task for block building --- task-impls/src/consensus.rs | 3 +- task-impls/src/events.rs | 4 + task-impls/src/transactions.rs | 181 +++++++++++++++++++++++++++++++++ 3 files changed, 187 insertions(+), 1 deletion(-) create mode 100644 task-impls/src/transactions.rs diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 03fcc387a8..80e6510873 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -543,7 +543,7 @@ where let view = proposal.data.get_view_number(); if view < self.cur_view { - error!("view too high"); + debug!("Proposal is from an older view. "); return; } @@ -827,6 +827,7 @@ where .update(-(i64::try_from(included_txn_size).unwrap_or(i64::MAX))); debug!("about to publish decide"); + self.event_stream.publish(SequencingHotShotEvent::LeafDecided(leaf_views.clone())).await; let decide_sent = self.output_event_stream.publish(Event { view_number: consensus.last_decided_view, event: EventType::Decide { diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 98819108a4..e3b50979d4 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -62,4 +62,8 @@ pub enum SequencingHotShotEvent> { TransactionSend(TYPES::Transaction, TYPES::SignatureKey), /// Event to send DA block data from DA leader to next quorum leader (which should always be the same node); internal event only SendDABlockData(TYPES::BlockType), + /// Event when the transactions task has a block formed + BlockReady(TYPES::BlockType), + /// Event when consensus decided on a leaf + LeafDecided(Vec), } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs new file mode 100644 index 0000000000..8ea635d750 --- /dev/null +++ b/task-impls/src/transactions.rs @@ -0,0 +1,181 @@ +use crate::events::SequencingHotShotEvent; +use async_compatibility_layer::{ + art::{async_spawn, async_timeout}, + async_primitives::subscribable_rwlock::ReadView, +}; +use async_lock::RwLock; +use bincode::config::Options; +use bitvec::prelude::*; +use commit::Committable; +use either::{Either, Left, Right}; +use futures::FutureExt; +use hotshot_task::{ + event_stream::{ChannelStream, EventStream}, + global_registry::GlobalRegistry, + task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, + task_impls::{HSTWithEvent, TaskBuilder}, +}; +use hotshot_types::{ + certificate::DACertificate, + consensus::{Consensus, View}, + data::{DAProposal, ProposalType, SequencingLeaf}, + message::{CommitteeConsensusMessage, Message, Proposal, SequencingMessage}, + traits::{ + consensus_api::SequencingConsensusApi, + election::{CommitteeExchangeType, ConsensusExchange, Membership}, + network::{CommunicationChannel, ConsensusIntentEvent}, + node_implementation::{CommitteeEx, NodeImplementation, NodeType}, + signature_key::SignatureKey, + state::ConsensusTime, + Block, State, + }, + utils::ViewInner, + vote::VoteAccumulator, +}; +use hotshot_utils::bincode::bincode_opts; +use snafu::Snafu; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::Instant, +}; +use tracing::{debug, error, instrument, warn}; + +#[derive(Snafu, Debug)] +/// Error type for consensus tasks +pub struct ConsensusTaskError {} + +/// Tracks state of a DA task +pub struct TransactionTaskState< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, +> where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + /// The state's api + pub api: A, + /// Global registry task for the state + pub registry: GlobalRegistry, + + /// View number this view is executing in. + pub cur_view: TYPES::Time, + + // pub transactions: Arc>>, + /// Reference to consensus. Leader will require a read lock on this. + pub consensus: Arc>>>, + + /// the committee exchange + pub committee_exchange: Arc>, + + /// The view and ID of the current vote collection task, if there is one. + pub vote_collector: Option<(TYPES::Time, usize, usize)>, + + /// Global events stream to publish events + pub event_stream: ChannelStream>, + + /// This state's ID + pub id: u64, + + /// Event stream to publish events to the application layer + pub output_event_stream: ChannelStream>, +} + + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > DATaskState +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + /// main task event handler + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] + + pub async fn handle_event( + &mut self, + event: SequencingHotShotEvent, + ) -> Option { + match event { + SequencingHotShotEvent::TransactionsRecv(transactions) => { + // TODO ED Add validation checks + let mut consensus = self.consensus.write().await; + consensus + .get_transactions() + .modify(|txns| { + for transaction in transactions { + let size = bincode_opts().serialized_size(&transaction).unwrap_or(0); + + // If we didn't already know about this transaction, update our mempool metrics. + if !consensus.seen_transactions.remove(&transaction.commit()) + && txns.insert(transaction.commit(), transaction).is_none() + { + consensus.metrics.outstanding_transactions.update(1); + consensus + .metrics + .outstanding_transactions_memory_size + .update(i64::try_from(size).unwrap_or_else(|e| { + warn!("Conversion failed: {e}. Using the max value."); + i64::MAX + })); + } + } + }) + .await; + + return None; + } + SequencingHotShotEvent::DAProposalRecv(proposal, sender) => { + } + SequencingHotShotEvent::LeafDecided(leaf_chain) => { + for leaf in leaf_chain { + match &leaf.deltas { + Left(block) => { + let txns = block.contained_transactions(); + for txn in txns { + included_txns.insert(txn); + } + } + Right(_) => {} + } + } + + } + // TODO ED Update high QC through QCFormed event + SequencingHotShotEvent::ViewChange(view) => { + } + + SequencingHotShotEvent::Timeout(view) => { + self.committee_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) + .await; + } + + SequencingHotShotEvent::Shutdown => { + return Some(HotShotTaskCompleted::ShutDown); + } + _ => {} + } + None + } +} \ No newline at end of file From 117897426842b4e81f22aa9353543bcfaaa07080 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 12 Sep 2023 18:09:35 -0400 Subject: [PATCH 0046/1393] Remove PROPOSAL and VOTE generics from CommunicalChannel trait --- types/src/traits/consensus_api.rs | 2 +- types/src/traits/election.rs | 30 ++++++++++++++----------- types/src/traits/network.rs | 6 +---- types/src/traits/node_implementation.rs | 17 +++++--------- 4 files changed, 24 insertions(+), 31 deletions(-) diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index 1f83df69f1..f9e19e917b 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -150,7 +150,7 @@ pub trait SequencingConsensusApi< /// Send a broadcast message to the entire network. async fn send_broadcast_message< PROPOSAL: ProposalType, - VOTE: VoteType, + VOTE: VoteType, >( &self, message: SequencingMessage, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 715e9c0050..63abcff9fb 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -19,6 +19,7 @@ use crate::{ vote::ViewSyncVoteInternal, }; +use crate::vote::Accumulator2; use crate::{ data::LeafType, traits::{ @@ -42,7 +43,6 @@ use serde::{Deserialize, Serialize}; use snafu::Snafu; use std::{collections::BTreeSet, fmt::Debug, hash::Hash, marker::PhantomData, num::NonZeroU64}; use tracing::error; -use crate::vote::Accumulator2; /// Error for election problems #[derive(Snafu, Debug)] @@ -184,9 +184,13 @@ where { type Vote: VoteType; - type VoteAccumulator: Accumulator2; + type VoteAccumulator: Accumulator2; - fn accumulate_vote(accumulator: Self::VoteAccumulator, vote: Self::Vote, commit: COMMITTABLE) -> Either { + fn accumulate_vote( + accumulator: Self::VoteAccumulator, + vote: Self::Vote, + commit: COMMITTABLE, + ) -> Either { // if !self.is_valid_vote( // &vote.encoded_key, // &vote.encoded_signature, @@ -306,7 +310,7 @@ pub trait ConsensusExchange: Send + Sync { /// The committee eligible to make decisions. type Membership: Membership; /// Network used by [`Membership`](Self::Membership) to communicate. - type Networking: CommunicationChannel; + type Networking: CommunicationChannel; /// Commitments to items which are the subject of proposals and decisions. type Commitment: Committable + Serialize + Clone; @@ -548,7 +552,7 @@ pub trait CommitteeExchangeType: pub struct CommitteeExchange< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, DAVote, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > { /// The network being used by this exchange. @@ -569,7 +573,7 @@ pub struct CommitteeExchange< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, DAVote, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > CommitteeExchangeType for CommitteeExchange { @@ -618,7 +622,7 @@ impl< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, DAVote, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > ConsensusExchange for CommitteeExchange { @@ -782,7 +786,7 @@ pub struct QuorumExchange< LEAF: LeafType, PROPOSAL: ProposalType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > { /// The network being used by this exchange. @@ -805,7 +809,7 @@ impl< LEAF: LeafType, MEMBERSHIP: Membership, PROPOSAL: ProposalType, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > QuorumExchangeType for QuorumExchange @@ -937,7 +941,7 @@ impl< LEAF: LeafType, PROPOSAL: ProposalType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > ConsensusExchange for QuorumExchange @@ -1074,7 +1078,7 @@ pub struct ViewSyncExchange< TYPES: NodeType, PROPOSAL: ProposalType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > { /// The network being used by this exchange. @@ -1096,7 +1100,7 @@ impl< TYPES: NodeType, MEMBERSHIP: Membership, PROPOSAL: ProposalType, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > ViewSyncExchangeType for ViewSyncExchange { @@ -1297,7 +1301,7 @@ impl< TYPES: NodeType, PROPOSAL: ProposalType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > ConsensusExchange for ViewSyncExchange { diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index b82d896d8a..b2ad46588e 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -200,8 +200,6 @@ pub trait ViewMessage { pub trait CommunicationChannel< TYPES: NodeType, M: NetworkMsg, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, >: Clone + Debug + Send + Sync + 'static { @@ -337,11 +335,9 @@ pub trait TestableNetworkingImplementation { pub trait TestableChannelImplementation< TYPES: NodeType, M: NetworkMsg, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, NETWORK, ->: CommunicationChannel +>: CommunicationChannel { /// generates the `CommunicationChannel` given it's associated network type fn generate_network() -> Box) -> Self + 'static>; diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 8ba5abdebb..217e1a0244 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -401,24 +401,21 @@ where QuorumCommChannel: TestableChannelImplementation< TYPES, Message, - QuorumProposalType, - QuorumVoteType, + QuorumMembership, QuorumNetwork, >, CommitteeCommChannel: TestableChannelImplementation< TYPES, Message, - CommitteeProposalType, - CommitteeVote, + CommitteeMembership, QuorumNetwork, >, ViewSyncCommChannel: TestableChannelImplementation< TYPES, Message, - ViewSyncProposalType, - ViewSyncVoteType, + ViewSyncMembership, QuorumNetwork, >, @@ -518,8 +515,7 @@ pub type ViewSyncMembership = QuorumMembership; pub type QuorumNetwork = as CommunicationChannel< TYPES, Message, - QuorumProposalType, - QuorumVoteType, + QuorumMembership, >>::NETWORK; @@ -527,8 +523,7 @@ pub type QuorumNetwork = as Communication pub type CommitteeNetwork = as CommunicationChannel< TYPES, Message, - CommitteeProposalType, - CommitteeVote, + CommitteeMembership, >>::NETWORK; @@ -536,8 +531,6 @@ pub type CommitteeNetwork = as Communi pub type ViewSyncNetwork = as CommunicationChannel< TYPES, Message, - ViewSyncProposalType, - ViewSyncVoteType, ViewSyncMembership, >>::NETWORK; From 93329cf1fe7050daccc98dea2a6b50de8c22684e Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 12 Sep 2023 18:19:43 -0400 Subject: [PATCH 0047/1393] More removal of PROPOSAL and VOTE from CommunicationChannel trait --- hotshot/src/tasks/mod.rs | 7 ++- .../src/traits/networking/libp2p_network.rs | 41 +++++------------- .../src/traits/networking/memory_network.rs | 43 +++++-------------- .../networking/web_server_libp2p_fallback.rs | 8 ++-- .../traits/networking/web_server_network.rs | 33 +++++--------- task-impls/src/network.rs | 22 ++++------ types/src/traits/election.rs | 2 +- types/src/vote.rs | 2 +- 8 files changed, 47 insertions(+), 111 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 55e430cbe4..30054a342b 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -148,7 +148,7 @@ pub async fn add_network_message_task< // This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. where EXCHANGE::Networking: - CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, + CommunicationChannel, MEMBERSHIP>, { let channel = exchange.network().clone(); let broadcast_stream = GeneratedStream::>::new(Arc::new(move || { @@ -259,13 +259,12 @@ pub async fn add_network_event_task< // This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. where EXCHANGE::Networking: - CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, + CommunicationChannel, MEMBERSHIP>, { let filter = NetworkEventTaskState::< TYPES, I, - PROPOSAL, - VOTE, + MEMBERSHIP, >::Networking, >::filter(task_kind); diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index c01bf53982..22fc6da209 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -715,21 +715,14 @@ impl ConnectedNetwork for Libp2p pub struct Libp2pCommChannel< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, >( Arc, TYPES::SignatureKey>>, - PhantomData<(TYPES, I, PROPOSAL, VOTE, MEMBERSHIP)>, + PhantomData<(TYPES, I, MEMBERSHIP)>, ); -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > Libp2pCommChannel +impl, MEMBERSHIP: Membership> + Libp2pCommChannel { /// create a new libp2p communication channel #[must_use] @@ -741,11 +734,10 @@ impl< impl< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, + MEMBERSHIP: Membership, > TestableNetworkingImplementation> - for Libp2pCommChannel + for Libp2pCommChannel where MessageKind: ViewMessage, { @@ -787,14 +779,9 @@ where // top // we don't really want to make this the default implementation because that forces it to require ConnectedNetwork to be implemented. The struct we implement over might use multiple ConnectedNetworks #[async_trait] -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> - for Libp2pCommChannel +impl, MEMBERSHIP: Membership> + CommunicationChannel, MEMBERSHIP> + for Libp2pCommChannel where MessageKind: ViewMessage, { @@ -860,21 +847,13 @@ where } } -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > +impl, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, Message, - PROPOSAL, - VOTE, MEMBERSHIP, Libp2pNetwork, TYPES::SignatureKey>, - > for Libp2pCommChannel + > for Libp2pCommChannel { fn generate_network( ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index d6269e2c38..b2e7ddf6f3 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -465,21 +465,18 @@ impl ConnectedNetwork for Memory pub struct MemoryCommChannel< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, >( Arc, TYPES::SignatureKey>>, - PhantomData<(I, PROPOSAL, VOTE, MEMBERSHIP)>, + PhantomData<(I, MEMBERSHIP)>, ); impl< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, + MEMBERSHIP: Membership, - > MemoryCommChannel + > MemoryCommChannel { /// create new communication channel #[must_use] @@ -488,14 +485,9 @@ impl< } } -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > TestableNetworkingImplementation> - for MemoryCommChannel +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for MemoryCommChannel where MessageKind: ViewMessage, { @@ -525,14 +517,9 @@ where } #[async_trait] -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> - for MemoryCommChannel +impl, MEMBERSHIP: Membership> + CommunicationChannel, MEMBERSHIP> + for MemoryCommChannel where MessageKind: ViewMessage, { @@ -598,21 +585,13 @@ where } } -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > +impl, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, Message, - PROPOSAL, - VOTE, MEMBERSHIP, MemoryNetwork, TYPES::SignatureKey>, - > for MemoryCommChannel + > for MemoryCommChannel { fn generate_network( ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> diff --git a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs index 723c7c071d..825fbd4216 100644 --- a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs +++ b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs @@ -161,7 +161,7 @@ impl< PROPOSAL: ProposalType, VOTE: VoteType, MEMBERSHIP: Membership, - > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> + > CommunicationChannel,MEMBERSHIP> for WebServerWithFallbackCommChannel { type NETWORK = CombinedNetworks; @@ -299,15 +299,13 @@ impl< impl< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, + MEMBERSHIP: Membership, > TestableChannelImplementation< TYPES, Message, - PROPOSAL, - VOTE, + MEMBERSHIP, CombinedNetworks, > for WebServerWithFallbackCommChannel diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index e0001b94ab..9b3d269282 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -48,21 +48,18 @@ use tracing::{debug, error, info}; pub struct WebCommChannel< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, >( Arc, TYPES::SignatureKey, TYPES>>, - PhantomData<(MEMBERSHIP, I, PROPOSAL, VOTE)>, + PhantomData<(MEMBERSHIP, I)>, ); impl< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, + MEMBERSHIP: Membership, - > WebCommChannel + > WebCommChannel { /// Create new communication channel #[must_use] @@ -521,11 +518,10 @@ impl< impl< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, + MEMBERSHIP: Membership, - > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> - for WebCommChannel + > CommunicationChannel, MEMBERSHIP> + for WebCommChannel { type NETWORK = WebServerNetwork, TYPES::SignatureKey, TYPES>; /// Blocks until node is successfully initialized @@ -1055,11 +1051,10 @@ impl> impl< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, + MEMBERSHIP: Membership, > TestableNetworkingImplementation> - for WebCommChannel + for WebCommChannel { fn generator( expected_node_count: usize, @@ -1087,21 +1082,13 @@ impl< } } -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > +impl, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, Message, - PROPOSAL, - VOTE, MEMBERSHIP, WebServerNetwork, TYPES::SignatureKey, TYPES>, - > for WebCommChannel + > for WebCommChannel { fn generate_network() -> Box< dyn Fn(Arc, TYPES::SignatureKey, TYPES>>) -> Self diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index b50a4143ea..cdaf0a17ae 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -137,10 +137,8 @@ pub struct NetworkEventTaskState< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, - COMMCHANNEL: CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, + COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, > { /// comm channel pub channel: COMMCHANNEL, @@ -149,7 +147,7 @@ pub struct NetworkEventTaskState< /// view number pub view: TYPES::Time, /// phantom data - pub phantom: PhantomData<(PROPOSAL, VOTE, MEMBERSHIP)>, + pub phantom: PhantomData<(MEMBERSHIP)>, // TODO ED Need to add exchange so we can get the recipient key and our own key? } @@ -160,11 +158,9 @@ impl< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, - COMMCHANNEL: CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, - > TS for NetworkEventTaskState + COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, + > TS for NetworkEventTaskState { } @@ -175,11 +171,9 @@ impl< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, - COMMCHANNEL: CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, - > NetworkEventTaskState + COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, + > NetworkEventTaskState { /// Handle the given event. /// @@ -348,9 +342,9 @@ pub type NetworkMessageTaskTypes = HSTWithMessage< >; /// network event task types -pub type NetworkEventTaskTypes = HSTWithEvent< +pub type NetworkEventTaskTypes = HSTWithEvent< NetworkTaskError, SequencingHotShotEvent, ChannelStream>, - NetworkEventTaskState, + NetworkEventTaskState, >; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 63abcff9fb..4f598ee5f1 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -302,7 +302,7 @@ pub trait ConsensusExchange: Send + Sync { /// A proposal for participants to vote on. type Proposal: ProposalType; /// A vote on a [`Proposal`](Self::Proposal). - type Vote: VoteType; + type Vote: VoteType; /// A [`SignedCertificate`] attesting to a decision taken by the committee. type Certificate: SignedCertificate + Hash diff --git a/types/src/vote.rs b/types/src/vote.rs index e7f6fa9b7c..720805af65 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -294,7 +294,7 @@ pub trait Accumulator2> { - pub phantom: PhantomData<(TYPES, VOTE)> + pub phantom: PhantomData<(TYPES, VOTE, COMMITTABLE)> } impl > Accumulator2 for AccumulatorPlaceholder { From f0fab5873ad6f13dc613f499b8588ca1c7f9b57a Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 12 Sep 2023 18:48:38 -0400 Subject: [PATCH 0048/1393] Lints --- hotshot/examples/infra/modDA.rs | 157 +++------------ hotshot/examples/web-server-da/types.rs | 9 +- hotshot/src/lib.rs | 19 +- hotshot/src/tasks/mod.rs | 21 +- .../src/traits/networking/libp2p_network.rs | 12 +- .../src/traits/networking/memory_network.rs | 10 +- .../networking/web_server_libp2p_fallback.rs | 19 +- .../traits/networking/web_server_network.rs | 30 +-- task-impls/src/consensus.rs | 10 +- task-impls/src/da.rs | 6 +- task-impls/src/network.rs | 4 +- testing/src/node_types.rs | 181 ++++++++---------- testing/src/test_launcher.rs | 2 - testing/src/test_runner.rs | 4 - types/src/certificate.rs | 5 +- types/src/traits/consensus_api.rs | 16 +- types/src/traits/election.rs | 22 +-- types/src/traits/network.rs | 13 +- types/src/traits/node_implementation.rs | 5 - types/src/vote.rs | 49 +++-- 20 files changed, 199 insertions(+), 395 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 729cbfd79f..63e222faed 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -19,7 +19,7 @@ use hotshot_orchestrator::{ use hotshot_task::task::FilterEvent; use hotshot_types::{ certificate::ViewSyncCertificate, - data::{DAProposal, QuorumProposal, SequencingLeaf, TestableLeaf}, + data::{QuorumProposal, SequencingLeaf, TestableLeaf}, event::{Event, EventType}, message::{Message, SequencingMessage}, traits::{ @@ -33,7 +33,6 @@ use hotshot_types::{ }, state::{ConsensusTime, TestableBlock, TestableState}, }, - vote::{DAVote, QuorumVote, ViewSyncVote}, HotShotConfig, }; // use libp2p::{ @@ -67,27 +66,9 @@ use tracing::{debug, error, info, warn}; pub async fn run_orchestrator_da< TYPES: NodeType, MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel< - TYPES, - Message, - DAProposal, - DAVote, - MEMBERSHIP, - > + Debug, - QUORUMNETWORK: CommunicationChannel< - TYPES, - Message, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, - > + Debug, - VIEWSYNCNETWORK: CommunicationChannel< - TYPES, - Message, - ViewSyncCertificate, - ViewSyncVote, - MEMBERSHIP, - > + Debug, + DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -135,27 +116,9 @@ pub async fn run_orchestrator_da< pub trait RunDA< TYPES: NodeType, MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel< - TYPES, - Message, - DAProposal, - DAVote, - MEMBERSHIP, - > + Debug, - QUORUMNETWORK: CommunicationChannel< - TYPES, - Message, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, - > + Debug, - VIEWSYNCNETWORK: CommunicationChannel< - TYPES, - Message, - ViewSyncCertificate, - ViewSyncVote, - MEMBERSHIP, - > + Debug, + DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -416,21 +379,13 @@ pub trait RunDA< // WEB SERVER /// Alias for the [`WebCommChannel`] for sequencing consensus. -type StaticDAComm = - WebCommChannel, DAVote, MEMBERSHIP>; +type StaticDAComm = WebCommChannel; /// Alias for the ['WebCommChannel'] for validating consensus -type StaticQuorumComm = WebCommChannel< - TYPES, - I, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, ->; +type StaticQuorumComm = WebCommChannel; /// Alias for the ['WebCommChannel'] for view sync consensus -type StaticViewSyncComm = - WebCommChannel, ViewSyncVote, MEMBERSHIP>; +type StaticViewSyncComm = WebCommChannel; /// Represents a web server-based run pub struct WebServerDARun< @@ -463,32 +418,20 @@ impl< SequencingLeaf, QuorumProposal>, MEMBERSHIP, - WebCommChannel< - TYPES, - NODE, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, - >, + WebCommChannel, Message, >, CommitteeExchange< TYPES, MEMBERSHIP, - WebCommChannel, DAVote, MEMBERSHIP>, + WebCommChannel, Message, >, ViewSyncExchange< TYPES, ViewSyncCertificate, MEMBERSHIP, - WebCommChannel< - TYPES, - NODE, - ViewSyncCertificate, - ViewSyncVote, - MEMBERSHIP, - >, + WebCommChannel, Message, >, >, @@ -540,21 +483,11 @@ where ); // Create the network - let quorum_network: WebCommChannel< - TYPES, - NODE, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, - > = WebCommChannel::new(underlying_quorum_network.clone().into()); + let quorum_network: WebCommChannel = + WebCommChannel::new(underlying_quorum_network.clone().into()); - let view_sync_network: WebCommChannel< - TYPES, - NODE, - ViewSyncCertificate, - ViewSyncVote, - MEMBERSHIP, - > = WebCommChannel::new(underlying_quorum_network.into()); + let view_sync_network: WebCommChannel = + WebCommChannel::new(underlying_quorum_network.into()); let WebServerConfig { host, @@ -563,17 +496,10 @@ where }: WebServerConfig = config.clone().da_web_server_config.unwrap(); // Each node runs the DA network so that leaders have access to transactions and DA votes - let da_network: WebCommChannel, DAVote, MEMBERSHIP> = - WebCommChannel::new( - WebServerNetwork::create( - &host.to_string(), - port, - wait_between_polls, - pub_key, - true, - ) + let da_network: WebCommChannel = WebCommChannel::new( + WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true) .into(), - ); + ); WebServerDARun { config, @@ -583,28 +509,15 @@ where } } - fn get_da_network( - &self, - ) -> WebCommChannel, DAVote, MEMBERSHIP> { + fn get_da_network(&self) -> WebCommChannel { self.da_network.clone() } - fn get_quorum_network( - &self, - ) -> WebCommChannel< - TYPES, - NODE, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, - > { + fn get_quorum_network(&self) -> WebCommChannel { self.quorum_network.clone() } - fn get_view_sync_network( - &self, - ) -> WebCommChannel, ViewSyncVote, MEMBERSHIP> - { + fn get_view_sync_network(&self) -> WebCommChannel { self.view_sync_network.clone() } @@ -623,27 +536,9 @@ where pub async fn main_entry_point< TYPES: NodeType, MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel< - TYPES, - Message, - DAProposal, - DAVote, - MEMBERSHIP, - > + Debug, - QUORUMNETWORK: CommunicationChannel< - TYPES, - Message, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, - > + Debug, - VIEWSYNCNETWORK: CommunicationChannel< - TYPES, - Message, - ViewSyncCertificate, - ViewSyncVote, - MEMBERSHIP, - > + Debug, + DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Leaf = SequencingLeaf, diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index 9ea1f9a694..6827a00fe8 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -25,12 +25,9 @@ pub struct NodeImpl {} pub type ThisLeaf = SequencingLeaf; pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; -pub type DANetwork = - WebCommChannel; -pub type QuorumNetwork = - WebCommChannel; -pub type ViewSyncNetwork = - WebCommChannel; +pub type DANetwork = WebCommChannel; +pub type QuorumNetwork = WebCommChannel; +pub type ViewSyncNetwork = WebCommChannel; pub type ThisDAProposal = DAProposal; pub type ThisDAVote = DAVote; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index fa435e1f1d..a56ff576b5 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -59,7 +59,7 @@ use hotshot_task_impls::{events::SequencingHotShotEvent, network::NetworkTaskKin use hotshot_types::{ certificate::{DACertificate, ViewSyncCertificate}, consensus::{BlockStore, Consensus, ConsensusMetrics, View, ViewInner, ViewQueue}, - data::{DAProposal, DeltasType, LeafType, ProposalType, QuorumProposal, SequencingLeaf}, + data::{DAProposal, DeltasType, LeafType, QuorumProposal, SequencingLeaf}, error::StorageSnafu, message::{ ConsensusMessageType, DataMessage, InternalTrigger, Message, MessageKind, @@ -79,7 +79,7 @@ use hotshot_types::{ storage::StoredView, State, }, - vote::{ViewSyncData, VoteType}, + vote::ViewSyncData, HotShotConfig, }; use snafu::ResultExt; @@ -936,10 +936,7 @@ impl< I: NodeImplementation>, > SequencingConsensusApi for HotShotSequencingConsensusApi { - async fn send_direct_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( + async fn send_direct_message( &self, recipient: TYPES::SignatureKey, message: SequencingMessage, @@ -964,10 +961,7 @@ impl< Ok(()) } - async fn send_direct_da_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( + async fn send_direct_da_message( &self, recipient: TYPES::SignatureKey, message: SequencingMessage, @@ -994,10 +988,7 @@ impl< // TODO (DA) Refactor ConsensusApi and HotShot to use SystemContextInner directly. // - async fn send_broadcast_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( + async fn send_broadcast_message( &self, message: SequencingMessage, ) -> std::result::Result<(), NetworkError> { diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 30054a342b..9c42165895 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -5,6 +5,7 @@ use crate::{ QuorumCertificate, SequencingQuorumEx, SystemContext, }; use async_compatibility_layer::art::{async_sleep, async_spawn_local}; +use commit::Committable; use futures::FutureExt; use hotshot_task::{ boxed_sync, @@ -41,6 +42,7 @@ use hotshot_types::{ }, vote::{ViewSyncData, VoteType}, }; +use serde::Serialize; use std::{ collections::HashMap, marker::PhantomData, @@ -130,8 +132,9 @@ pub async fn add_network_message_task< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, + COMMITTABLE: Committable + Serialize + Clone, PROPOSAL: ProposalType, - VOTE: VoteType, + VOTE: VoteType, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange< TYPES, @@ -147,8 +150,7 @@ pub async fn add_network_message_task< ) -> TaskRunner // This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. where - EXCHANGE::Networking: - CommunicationChannel, MEMBERSHIP>, + EXCHANGE::Networking: CommunicationChannel, MEMBERSHIP>, { let channel = exchange.network().clone(); let broadcast_stream = GeneratedStream::>::new(Arc::new(move || { @@ -240,8 +242,9 @@ pub async fn add_network_event_task< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, + COMMITTABLE: Committable + Serialize + Clone, PROPOSAL: ProposalType, - VOTE: VoteType, + VOTE: VoteType, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange< TYPES, @@ -258,18 +261,16 @@ pub async fn add_network_event_task< ) -> TaskRunner // This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. where - EXCHANGE::Networking: - CommunicationChannel, MEMBERSHIP>, + EXCHANGE::Networking: CommunicationChannel, MEMBERSHIP>, { let filter = NetworkEventTaskState::< TYPES, I, - MEMBERSHIP, >::Networking, >::filter(task_kind); let channel = exchange.network().clone(); - let network_state: NetworkEventTaskState<_, _, _, _, _, _> = NetworkEventTaskState { + let network_state: NetworkEventTaskState<_, _, _, _> = NetworkEventTaskState { channel, event_stream: event_stream.clone(), view: TYPES::Time::genesis(), @@ -277,7 +278,7 @@ where }; let registry = task_runner.registry.clone(); let network_event_handler = HandleEvent(Arc::new( - move |event, mut state: NetworkEventTaskState<_, _, _, _, MEMBERSHIP, _>| { + move |event, mut state: NetworkEventTaskState<_, _, MEMBERSHIP, _>| { let membership = exchange.membership().clone(); async move { let completion_status = state.handle_event(event, &membership).await; @@ -289,7 +290,7 @@ where let networking_name = "Networking Task"; let networking_task_builder = - TaskBuilder::>::new(networking_name.to_string()) + TaskBuilder::>::new(networking_name.to_string()) .register_event_stream(event_stream.clone(), filter) .await .register_registry(&mut registry.clone()) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 22fc6da209..2c7731c003 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -14,7 +14,6 @@ use bimap::BiHashMap; use bincode::Options; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::ProposalType, message::{Message, MessageKind}, traits::{ election::Membership, @@ -27,7 +26,6 @@ use hotshot_types::{ node_implementation::NodeType, signature_key::SignatureKey, }, - vote::VoteType, }; use hotshot_utils::bincode::bincode_opts; use libp2p_identity::PeerId; @@ -731,13 +729,9 @@ impl, MEMBERSHIP: Membership, - - MEMBERSHIP: Membership, - > TestableNetworkingImplementation> - for Libp2pCommChannel +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for Libp2pCommChannel where MessageKind: ViewMessage, { diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index b2e7ddf6f3..fb4ff4709a 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -16,7 +16,6 @@ use dashmap::DashMap; use futures::StreamExt; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::ProposalType, message::{Message, MessageKind}, traits::{ election::Membership, @@ -29,7 +28,6 @@ use hotshot_types::{ node_implementation::NodeType, signature_key::SignatureKey, }, - vote::VoteType, }; use hotshot_utils::bincode::bincode_opts; use rand::Rng; @@ -471,12 +469,8 @@ pub struct MemoryCommChannel< PhantomData<(I, MEMBERSHIP)>, ); -impl< - TYPES: NodeType, - I: NodeImplementation, - - MEMBERSHIP: Membership, - > MemoryCommChannel +impl, MEMBERSHIP: Membership> + MemoryCommChannel { /// create new communication channel #[must_use] diff --git a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs index 825fbd4216..7ed9c86ed5 100644 --- a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs +++ b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs @@ -12,7 +12,6 @@ use futures::join; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::ProposalType, message::Message, traits::{ election::Membership, @@ -23,7 +22,6 @@ use hotshot_types::{ }, node_implementation::NodeType, }, - vote::VoteType, }; use std::{marker::PhantomData, sync::Arc}; use tracing::error; @@ -155,13 +153,8 @@ impl, MEMBERSHIP: Membership, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > CommunicationChannel,MEMBERSHIP> +impl, MEMBERSHIP: Membership> + CommunicationChannel, MEMBERSHIP> for WebServerWithFallbackCommChannel { type NETWORK = CombinedNetworks; @@ -296,16 +289,10 @@ impl< } } -impl< - TYPES: NodeType, - I: NodeImplementation, - - MEMBERSHIP: Membership, - > +impl, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, Message, - MEMBERSHIP, CombinedNetworks, > for WebServerWithFallbackCommChannel diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 9b3d269282..6586ec9b80 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -13,7 +13,6 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::ProposalType, message::{Message, MessagePurpose}, traits::{ election::Membership, @@ -25,7 +24,6 @@ use hotshot_types::{ node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, }, - vote::VoteType, }; use hotshot_web_server::{self, config}; use rand::random; @@ -54,12 +52,8 @@ pub struct WebCommChannel< PhantomData<(MEMBERSHIP, I)>, ); -impl< - TYPES: NodeType, - I: NodeImplementation, - - MEMBERSHIP: Membership, - > WebCommChannel +impl, MEMBERSHIP: Membership> + WebCommChannel { /// Create new communication channel #[must_use] @@ -515,13 +509,9 @@ impl< } #[async_trait] -impl< - TYPES: NodeType, - I: NodeImplementation, - - MEMBERSHIP: Membership, - > CommunicationChannel, MEMBERSHIP> - for WebCommChannel +impl, MEMBERSHIP: Membership> + CommunicationChannel, MEMBERSHIP> + for WebCommChannel { type NETWORK = WebServerNetwork, TYPES::SignatureKey, TYPES>; /// Blocks until node is successfully initialized @@ -1048,13 +1038,9 @@ impl> } } -impl< - TYPES: NodeType, - I: NodeImplementation, - - MEMBERSHIP: Membership, - > TestableNetworkingImplementation> - for WebCommChannel +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for WebCommChannel { fn generator( expected_node_count: usize, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9462b389c4..c52fe7e301 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -366,10 +366,7 @@ where ); if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.get_view() - ); + debug!("Sending vote to next quorum leader {:?}", vote.get_view()); self.event_stream .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) .await; @@ -444,10 +441,7 @@ where // TODO ED Only publish event in vote if able if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.get_view() - ); + debug!("Sending vote to next quorum leader {:?}", vote.get_view()); self.event_stream .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) .await; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 9d0cc36e88..a160a37509 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -108,6 +108,8 @@ pub struct DAVoteCollectionTaskState< pub accumulator: Either, DACertificate>, + /// The accumulator + #[allow(clippy::type_complexity)] pub accumulator2: Either< as SignedCertificate< TYPES, @@ -393,13 +395,13 @@ where None, ); let accumulator2 = AccumulatorPlaceholder { - phantom: PhantomData::default(), + phantom: PhantomData, }; if view > collection_view { let state = DAVoteCollectionTaskState { committee_exchange: self.committee_exchange.clone(), accumulator, - accumulator2: either::Left(accumulator2), + accumulator2: either::Left(accumulator2), cur_view: view, event_stream: self.event_stream.clone(), id: self.id, diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index cdaf0a17ae..5cd4ba3d19 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -7,7 +7,7 @@ use hotshot_task::{ GeneratedStream, Merge, }; use hotshot_types::{ - data::{ProposalType, SequencingLeaf}, + data::SequencingLeaf, message::{ CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, Messages, SequencingMessage, @@ -147,7 +147,7 @@ pub struct NetworkEventTaskState< /// view number pub view: TYPES::Time, /// phantom data - pub phantom: PhantomData<(MEMBERSHIP)>, + pub phantom: PhantomData, // TODO ED Need to add exchange so we can get the recipient key and our own key? } diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 1d2985c56d..c788006984 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -15,14 +15,13 @@ use hotshot::{ }; use hotshot_types::{ certificate::ViewSyncCertificate, - data::{DAProposal, QuorumProposal, SequencingLeaf, ViewNumber}, + data::{QuorumProposal, SequencingLeaf, ViewNumber}, message::{Message, SequencingMessage}, traits::{ election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}, network::{TestableChannelImplementation, TestableNetworkingImplementation}, node_implementation::{ChannelMaps, NodeType, SequencingExchanges, TestableExchange}, }, - vote::{DAVote, QuorumVote, ViewSyncVote}, }; use serde::{Deserialize, Serialize}; @@ -65,80 +64,33 @@ pub struct StaticFallbackImpl; pub type StaticMembership = StaticCommittee>; -pub type StaticMemoryDAComm = MemoryCommChannel< - SequencingTestTypes, - SequencingMemoryImpl, - DAProposal, - DAVote, - StaticMembership, ->; +pub type StaticMemoryDAComm = + MemoryCommChannel; -type StaticLibp2pDAComm = Libp2pCommChannel< - SequencingTestTypes, - SequencingLibp2pImpl, - DAProposal, - DAVote, - StaticMembership, ->; +type StaticLibp2pDAComm = + Libp2pCommChannel; -type StaticWebDAComm = WebCommChannel< - SequencingTestTypes, - SequencingWebImpl, - DAProposal, - DAVote, - StaticMembership, ->; +type StaticWebDAComm = WebCommChannel; type StaticFallbackComm = WebServerWithFallbackCommChannel; -pub type StaticMemoryQuorumComm = MemoryCommChannel< - SequencingTestTypes, - SequencingMemoryImpl, - QuorumProposal>, - QuorumVote>, - StaticMembership, ->; +pub type StaticMemoryQuorumComm = + MemoryCommChannel; -type StaticLibp2pQuorumComm = Libp2pCommChannel< - SequencingTestTypes, - SequencingLibp2pImpl, - QuorumProposal>, - QuorumVote>, - StaticMembership, ->; +type StaticLibp2pQuorumComm = + Libp2pCommChannel; -type StaticWebQuorumComm = WebCommChannel< - SequencingTestTypes, - SequencingWebImpl, - QuorumProposal>, - QuorumVote>, - StaticMembership, ->; +type StaticWebQuorumComm = WebCommChannel; -pub type StaticMemoryViewSyncComm = MemoryCommChannel< - SequencingTestTypes, - SequencingMemoryImpl, - ViewSyncCertificate, - ViewSyncVote, - StaticMembership, ->; +pub type StaticMemoryViewSyncComm = + MemoryCommChannel; -type StaticLibp2pViewSyncComm = Libp2pCommChannel< - SequencingTestTypes, - SequencingLibp2pImpl, - ViewSyncCertificate, - ViewSyncVote, - StaticMembership, ->; +type StaticLibp2pViewSyncComm = + Libp2pCommChannel; -type StaticWebViewSyncComm = WebCommChannel< - SequencingTestTypes, - SequencingWebImpl, - ViewSyncCertificate, - ViewSyncVote, - StaticMembership, ->; +type StaticWebViewSyncComm = + WebCommChannel; pub type SequencingLibp2pExchange = SequencingExchanges< SequencingTestTypes, @@ -231,9 +183,24 @@ impl Box::new(move |id| { let network = Arc::new(network_generator(id)); - let quorum_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); - let committee_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); - let view_sync_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network); + let quorum_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network.clone()); + let committee_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network.clone()); + let view_sync_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network); (quorum_chan, committee_chan, view_sync_chan) }) @@ -325,9 +292,24 @@ impl Box::new(move |id| { let network = Arc::new(network_generator(id)); let network_da = Arc::new(network_da_generator(id)); - let quorum_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); - let committee_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network_da); - let view_sync_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network); + let quorum_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network.clone()); + let committee_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network_da); + let view_sync_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network); (quorum_chan, committee_chan, view_sync_chan) }) @@ -445,9 +427,24 @@ impl Box::new(move |id| { let network = Arc::new(network_generator(id)); let network_da = Arc::new(network_da_generator(id)); - let quorum_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); - let committee_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network_da); - let view_sync_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network); + let quorum_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network.clone()); + let committee_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network_da); + let view_sync_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network); (quorum_chan, committee_chan, view_sync_chan) }) @@ -584,44 +581,20 @@ impl <, - >>::Networking as TestableChannelImplementation< - _, - _, - QuorumProposal< - SequencingTestTypes, - >::Leaf, - >, - QuorumVote< - SequencingTestTypes, - >::Leaf, - >, - _, - _, - >>::generate_network()(network.clone()); + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network.clone()); let committee_chan = <, - >>::Networking as TestableChannelImplementation< - _, - _, - DAProposal, - DAVote, - _, - _, - >>::generate_network()(network_da); + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network_da); let view_sync_chan = <, - >>::Networking as TestableChannelImplementation< - _, - _, - ViewSyncCertificate, - ViewSyncVote, - _, - _, - >>::generate_network()(network); + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network); (quorum_chan, committee_chan, view_sync_chan) }) } diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 4a7e6bd9b1..a184323edd 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -83,8 +83,6 @@ where QuorumCommChannel: CommunicationChannel< TYPES, Message, - as ConsensusExchange>>::Proposal, - as ConsensusExchange>>::Vote, as ConsensusExchange>>::Membership, >, { diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 57c82d24e9..3679e333cb 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -41,8 +41,6 @@ where QuorumCommChannel: CommunicationChannel< TYPES, Message, - as ConsensusExchange>>::Proposal, - as ConsensusExchange>>::Vote, as ConsensusExchange>>::Membership, >, { @@ -59,8 +57,6 @@ where QuorumCommChannel: CommunicationChannel< TYPES, Message, - as ConsensusExchange>>::Proposal, - as ConsensusExchange>>::Vote, as ConsensusExchange>>::Membership, >, { diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 51dd5b4206..1efeb6b70e 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -1,6 +1,5 @@ //! Provides two types of cerrtificates and their accumulators. -use crate::vote::Accumulator; use crate::vote::AccumulatorPlaceholder; use crate::vote::QuorumVote; use crate::{ @@ -11,11 +10,11 @@ use crate::{ signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, state::ConsensusTime, }, - vote::{DAVote, ViewSyncData, ViewSyncVote, VoteAccumulator}, + vote::{DAVote, ViewSyncData, ViewSyncVote}, }; use bincode::Options; use commit::{Commitment, Committable}; -use either::Either; + use espresso_systems_common::hotshot::tag; use hotshot_utils::bincode::bincode_opts; use serde::{Deserialize, Serialize}; diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index f9e19e917b..ab3f899944 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -2,7 +2,7 @@ use crate::{ certificate::QuorumCertificate, - data::{LeafType, ProposalType}, + data::LeafType, error::HotShotError, event::{Event, EventType}, message::{DataMessage, SequencingMessage}, @@ -12,7 +12,6 @@ use crate::{ signature_key::SignatureKey, storage::StorageError, }, - vote::VoteType, }; use async_trait::async_trait; @@ -128,30 +127,23 @@ pub trait SequencingConsensusApi< LEAF: LeafType, I: NodeImplementation>, >: ConsensusSharedApi -// TODO ED VoteType should not always be over LEAF, but for the API it doesn't matter and we are removing it soon anyway { /// Send a direct message to the given recipient - async fn send_direct_message, VOTE: VoteType>( + async fn send_direct_message( &self, recipient: TYPES::SignatureKey, message: SequencingMessage, ) -> std::result::Result<(), NetworkError>; /// send a direct message using the DA communication channel - async fn send_direct_da_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( + async fn send_direct_da_message( &self, recipient: TYPES::SignatureKey, message: SequencingMessage, ) -> std::result::Result<(), NetworkError>; /// Send a broadcast message to the entire network. - async fn send_broadcast_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( + async fn send_broadcast_message( &self, message: SequencingMessage, ) -> std::result::Result<(), NetworkError>; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 4f598ee5f1..fa95f86d32 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -182,27 +182,19 @@ where COMMITTABLE: Committable + Serialize + Clone, TOKEN: VoteToken, { + /// `VoteType` that is used in this certificate type Vote: VoteType; + /// `Accumulator` type to accumulate votes. type VoteAccumulator: Accumulator2; + /// Accumulates votes given an accumulator, vote, and commit. + /// Returns either the accumulator or a certificate fn accumulate_vote( - accumulator: Self::VoteAccumulator, - vote: Self::Vote, - commit: COMMITTABLE, + _accumulator: Self::VoteAccumulator, + _vote: Self::Vote, + _commit: COMMITTABLE, ) -> Either { - // if !self.is_valid_vote( - // &vote.encoded_key, - // &vote.encoded_signature, - // vote.data.clone(), - // // Ignoring deserialization errors below since we are getting rid of it soon - // Checked::Unchecked(vote.vote_token.clone()), - // ) { - // error!("Invalid vote!"); - // return Either::Left(accumulator); - // } - // Call append - todo!() } diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index b2ad46588e..73b3f69ccf 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -11,7 +11,7 @@ use tokio::time::error::Elapsed as TimeoutError; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use super::{election::Membership, node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{data::ProposalType, message::MessagePurpose, vote::VoteType}; +use crate::message::MessagePurpose; use async_trait::async_trait; use serde::{Deserialize, Serialize}; use snafu::Snafu; @@ -195,13 +195,10 @@ pub trait ViewMessage { /// API for interacting directly with a consensus committee /// intended to be implemented for both DA and for validating consensus committees -// TODO ED Why is this generic over VOTE? +// TODO ED Why is this generic over VOTE? #[async_trait] -pub trait CommunicationChannel< - TYPES: NodeType, - M: NetworkMsg, - MEMBERSHIP: Membership, ->: Clone + Debug + Send + Sync + 'static +pub trait CommunicationChannel>: + Clone + Debug + Send + Sync + 'static { /// Underlying Network implementation's type type NETWORK; @@ -331,7 +328,7 @@ pub trait TestableNetworkingImplementation { fn in_flight_message_count(&self) -> Option; } /// Describes additional functionality needed by the test communication channel -// TODO ED Why is this generic over VOTE? +// TODO ED Why is this generic over VOTE? pub trait TestableChannelImplementation< TYPES: NodeType, M: NetworkMsg, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 217e1a0244..b6bd7550ad 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -401,21 +401,18 @@ where QuorumCommChannel: TestableChannelImplementation< TYPES, Message, - QuorumMembership, QuorumNetwork, >, CommitteeCommChannel: TestableChannelImplementation< TYPES, Message, - CommitteeMembership, QuorumNetwork, >, ViewSyncCommChannel: TestableChannelImplementation< TYPES, Message, - ViewSyncMembership, QuorumNetwork, >, @@ -515,7 +512,6 @@ pub type ViewSyncMembership = QuorumMembership; pub type QuorumNetwork = as CommunicationChannel< TYPES, Message, - QuorumMembership, >>::NETWORK; @@ -523,7 +519,6 @@ pub type QuorumNetwork = as Communication pub type CommitteeNetwork = as CommunicationChannel< TYPES, Message, - CommitteeMembership, >>::NETWORK; diff --git a/types/src/vote.rs b/types/src/vote.rs index 720805af65..fab0b1711e 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -7,7 +7,7 @@ use crate::{ certificate::{AssembledSignature, QuorumCertificate}, data::LeafType, traits::{ - election::{SignedCertificate, VoteData, VoteToken}, + election::{VoteData, VoteToken}, node_implementation::NodeType, signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, }, @@ -25,7 +25,8 @@ use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, - num::NonZeroU64, marker::PhantomData, + marker::PhantomData, + num::NonZeroU64, }; use tracing::error; @@ -33,10 +34,13 @@ use tracing::error; pub trait VoteType: Debug + Clone + 'static + Serialize + for<'a> Deserialize<'a> + Send + Sync + PartialEq { - /// The view this vote was cast for. + /// Get the view this vote was cast for fn get_view(&self) -> TYPES::Time; - fn get_key(self) -> TYPES::SignatureKey; + /// Get the signature key associated with this vote + fn get_key(self) -> TYPES::SignatureKey; + /// Get the signature associated with this vote fn get_signature(self) -> EncodedSignature; + /// Get the data this vote was signed over fn get_data(self) -> VoteData { todo!() } @@ -209,13 +213,12 @@ impl VoteType for DAVote { } } -#[deprecated] // TODO ED Remove this impl DAVote { /// Get the signature key. /// # Panics /// If the deserialization fails. - #[deprecated] + // #[deprecated] pub fn signature_key(&self) -> TYPES::SignatureKey { ::from_bytes(&self.signature.0).unwrap() } @@ -241,7 +244,7 @@ impl> VoteType impl> QuorumVote { /// Get the encoded signature. - #[deprecated] + // #[deprecated] pub fn signature(&self) -> EncodedSignature { match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.1.clone(), @@ -251,7 +254,7 @@ impl> QuorumVote /// Get the signature key. /// # Panics /// If the deserialization fails. - #[deprecated] + // #[deprecated] pub fn signature_key(&self) -> TYPES::SignatureKey { let encoded = match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.0.clone(), @@ -287,18 +290,36 @@ pub trait Accumulator: Sized { fn append(self, val: T) -> Either; } - -pub trait Accumulator2>: Sized +/// Accumulator trait used to accumulate votes into an `AssembledSignature` +pub trait Accumulator2< + TYPES: NodeType, + COMMITTABLE: Committable + Serialize + Clone, + VOTE: VoteType, +>: Sized { + /// Append 1 vote to the accumulator. If the threshold is not reached, return + /// the accumulator, else return the `AssembledSignature` + /// Only called from inside `accumulate_internal` fn append(self, vote: VOTE) -> Either>; } -pub struct AccumulatorPlaceholder> { - pub phantom: PhantomData<(TYPES, VOTE, COMMITTABLE)> +/// Placeholder accumulator; will be replaced by accumulator for each certificate type +pub struct AccumulatorPlaceholder< + TYPES: NodeType, + COMMITTABLE: Committable + Serialize + Clone, + VOTE: VoteType, +> { + /// Phantom data to make compiler happy + pub phantom: PhantomData<(TYPES, VOTE, COMMITTABLE)>, } -impl > Accumulator2 for AccumulatorPlaceholder { - fn append(self, vote: VOTE) -> Either> { +impl< + TYPES: NodeType, + COMMITTABLE: Committable + Serialize + Clone, + VOTE: VoteType, + > Accumulator2 for AccumulatorPlaceholder +{ + fn append(self, _vote: VOTE) -> Either> { either::Left(self) } } From 310dc74821616fa456dbf27679617fa336d0e20f Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 12 Sep 2023 19:26:49 -0400 Subject: [PATCH 0049/1393] Implement leader lookahead and corresponding GC (#1670) --- constants/Cargo.toml | 5 + constants/src/lib.rs | 10 + hotshot/Cargo.toml | 1 + hotshot/src/demos/sdemo.rs | 6 +- hotshot/src/lib.rs | 9 +- hotshot/src/tasks/mod.rs | 75 +---- .../src/traits/networking/libp2p_network.rs | 228 ++++++++----- .../src/traits/networking/memory_network.rs | 23 +- .../networking/web_server_libp2p_fallback.rs | 46 ++- .../traits/networking/web_server_network.rs | 38 +-- .../networking/web_sever_libp2p_fallback.rs | 318 ------------------ hotshot/src/traits/storage/memory_storage.rs | 3 +- libp2p-networking/Cargo.toml | 2 + libp2p-networking/src/network/mod.rs | 12 +- libp2p-networking/src/network/node.rs | 3 +- libp2p-networking/src/network/node/handle.rs | 179 +++++++++- libp2p-networking/tests/common/mod.rs | 10 +- libp2p-networking/tests/counter.rs | 31 +- task-impls/Cargo.toml | 1 + task-impls/src/consensus.rs | 13 + types/Cargo.toml | 1 + types/src/constants.rs | 14 - types/src/data.rs | 8 +- types/src/lib.rs | 1 - types/src/traits/network.rs | 42 ++- 25 files changed, 459 insertions(+), 620 deletions(-) create mode 100644 constants/Cargo.toml create mode 100644 constants/src/lib.rs delete mode 100644 hotshot/src/traits/networking/web_sever_libp2p_fallback.rs delete mode 100644 types/src/constants.rs diff --git a/constants/Cargo.toml b/constants/Cargo.toml new file mode 100644 index 0000000000..6f04253d2e --- /dev/null +++ b/constants/Cargo.toml @@ -0,0 +1,5 @@ +[package] +name = "hotshot-constants" +version.workspace = true + +[dependencies] diff --git a/constants/src/lib.rs b/constants/src/lib.rs new file mode 100644 index 0000000000..8a4d9d5d46 --- /dev/null +++ b/constants/src/lib.rs @@ -0,0 +1,10 @@ +//! configurable constants for hotshot + +/// the ID of the genesis block proposer +pub const GENESIS_PROPOSER_ID: [u8; 2] = [4, 2]; + +/// the number of views to gather information for ahead of time +pub const LOOK_AHEAD: u64 = 5; + +/// the default kademlia record republication interval (in seconds) +pub const KAD_DEFAULT_REPUB_INTERVAL_SEC: u64 = 28800; diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 7edc7363c5..a085531227 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -80,6 +80,7 @@ bincode = { workspace = true } bitvec = { workspace = true } clap = { version = "4.4", features = ["derive", "env"], optional = true } commit = { workspace = true } +hotshot-constants = { path = "../constants" } custom_debug = { workspace = true } dashmap = "5.5.1" derivative = { version = "2.2.0", optional = true } diff --git a/hotshot/src/demos/sdemo.rs b/hotshot/src/demos/sdemo.rs index 97117347a9..67df1c7aea 100644 --- a/hotshot/src/demos/sdemo.rs +++ b/hotshot/src/demos/sdemo.rs @@ -19,8 +19,10 @@ use either::Either; use hotshot_signature_key::bn254::BN254Pub; use hotshot_types::{ certificate::{AssembledSignature, QuorumCertificate}, - constants::genesis_proposer_id, - data::{fake_commitment, random_commitment, LeafType, SequencingLeaf, ViewNumber}, + data::{ + fake_commitment, genesis_proposer_id, random_commitment, LeafType, SequencingLeaf, + ViewNumber, + }, traits::{ block_contents::Transaction, election::Membership, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index fa435e1f1d..4c2a42a0b4 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -44,9 +44,9 @@ use crate::{ use async_compatibility_layer::{ art::{async_spawn, async_spawn_local}, async_primitives::{broadcast::BroadcastSender, subscribable_rwlock::SubscribableRwLock}, - channel::{unbounded, UnboundedReceiver, UnboundedSender}, + channel::UnboundedSender, }; -use async_lock::{Mutex, RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; +use async_lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; use async_trait::async_trait; use commit::{Commitment, Committable}; use custom_debug::Debug; @@ -146,9 +146,6 @@ pub struct SystemContextInner> { /// latter of which is only applicable for sequencing consensus. channel_maps: (ChannelMaps, Option>), - /// for receiving messages in the network lookup task - recv_network_lookup: Arc>>>, - // global_registry: GlobalRegistry, /// Access to the output event stream. output_event_stream: ChannelStream>, @@ -238,9 +235,7 @@ impl> SystemContext { let consensus = Arc::new(RwLock::new(consensus)); let txns = consensus.read().await.get_transactions(); - let (_send_network_lookup, recv_network_lookup) = unbounded(); let inner: Arc> = Arc::new(SystemContextInner { - recv_network_lookup: Arc::new(Mutex::new(recv_network_lookup)), id: nonce, channel_maps: I::new_channel_maps(start_view), consensus, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 55e430cbe4..e0dc9be155 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -2,9 +2,9 @@ use crate::{ async_spawn, types::SystemContextHandle, DACertificate, HotShotSequencingConsensusApi, - QuorumCertificate, SequencingQuorumEx, SystemContext, + QuorumCertificate, SequencingQuorumEx, }; -use async_compatibility_layer::art::{async_sleep, async_spawn_local}; +use async_compatibility_layer::art::async_sleep; use futures::FutureExt; use hotshot_task::{ boxed_sync, @@ -26,7 +26,6 @@ use hotshot_task_impls::{ }; use hotshot_types::{ certificate::ViewSyncCertificate, - constants::LOOK_AHEAD, data::{ProposalType, QuorumProposal, SequencingLeaf}, event::Event, message::{Message, Messages, SequencingMessage}, @@ -41,75 +40,7 @@ use hotshot_types::{ }, vote::{ViewSyncData, VoteType}, }; -use std::{ - collections::HashMap, - marker::PhantomData, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::Duration, -}; -use tracing::info; - -/// Task to look up a node in the future as needed -pub async fn network_lookup_task>( - hotshot: SystemContext, - shut_down: Arc, -) { - info!("Launching network lookup task"); - let networking = hotshot.inner.exchanges.quorum_exchange().network().clone(); - - let inner = hotshot.inner.clone(); - - let mut completion_map: HashMap> = HashMap::default(); - - while !shut_down.load(Ordering::Relaxed) { - let lock = hotshot.inner.recv_network_lookup.lock().await; - - if let Ok(Some(cur_view)) = lock.recv().await { - let view_to_lookup = cur_view + LOOK_AHEAD; - - // perform pruning - // TODO in the future btreemap would be better - completion_map = completion_map - .drain() - .filter(|(view, is_done)| { - if !is_done.load(Ordering::Relaxed) { - // we are past the view where this is useful - if cur_view >= *view { - is_done.store(true, Ordering::Relaxed); - return true; - } - // we aren't done - return false; - } - true - }) - .collect(); - - // logic to look ahead - if !inner.exchanges.quorum_exchange().is_leader(view_to_lookup) { - let is_done = Arc::new(AtomicBool::new(false)); - completion_map.insert(view_to_lookup, is_done.clone()); - let inner = inner.clone(); - let networking = networking.clone(); - async_spawn_local(async move { - info!("starting lookup for {:?}", view_to_lookup); - let _result = networking - .lookup_node(inner.exchanges.quorum_exchange().get_leader(view_to_lookup)) - .await; - info!("finished lookup for {:?}", view_to_lookup); - }); - } - } - } - - // shut down all child tasks - for (_, is_done) in completion_map { - is_done.store(true, Ordering::Relaxed); - } -} +use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; /// event for global event stream #[derive(Clone, Debug)] diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index c01bf53982..ddc13b9571 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -5,16 +5,17 @@ use super::NetworkingMetrics; use crate::NodeImplementation; use async_compatibility_layer::{ - art::{async_block_on, async_sleep, async_spawn}, - channel::{unbounded, UnboundedReceiver, UnboundedSender}, + art::{async_block_on, async_sleep, async_spawn, async_timeout}, + channel::{unbounded, UnboundedReceiver, UnboundedSendError, UnboundedSender}, }; use async_lock::RwLock; use async_trait::async_trait; use bimap::BiHashMap; use bincode::Options; +use hotshot_constants::{KAD_DEFAULT_REPUB_INTERVAL_SEC, LOOK_AHEAD}; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::ProposalType, + data::{ProposalType, ViewNumber}, message::{Message, MessageKind}, traits::{ election::Membership, @@ -26,6 +27,7 @@ use hotshot_types::{ }, node_implementation::NodeType, signature_key::SignatureKey, + state::ConsensusTime, }, vote::VoteType, }; @@ -49,10 +51,13 @@ use std::{ marker::PhantomData, num::NonZeroUsize, str::FromStr, - sync::{atomic::AtomicBool, Arc}, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }, time::Duration, }; -use tracing::{error, info, instrument}; +use tracing::{error, info, instrument, warn}; /// hardcoded topic of QC used pub const QC_TOPIC: &str = "global"; @@ -78,10 +83,7 @@ struct Libp2pNetworkInner { /// this node's public key pk: K, /// handle to control the network - handle: Arc>, - /// Bidirectional map from public key provided by espresso - /// to public key provided by libp2p - pubkey_pid_map: RwLock>, + handle: Arc>, /// map of known replica peer ids to public keys broadcast_recv: UnboundedReceiver, /// Sender for broadcast messages @@ -90,6 +92,10 @@ struct Libp2pNetworkInner { direct_send: UnboundedSender, /// Receiver for direct messages direct_recv: UnboundedReceiver, + /// Sender for node lookup (relevant view number, key of node) (None for shutdown) + node_lookup_send: UnboundedSender>, + /// Sender for shutdown of the peer cache's garbage collection task + cache_gc_shutdown_send: UnboundedSender<()>, /// this is really cheating to enable local tests /// hashset of (bootstrap_addr, peer_id) bootstrap_addrs: PeerInfoVec, @@ -107,6 +113,10 @@ struct Libp2pNetworkInner { /// hash(hashset) -> topic /// btreemap ordered so is hashable topic_map: RwLock, String>>, + /// the latest view number (for node lookup purposes) + /// NOTE: supposed to represent a ViewNumber but we + /// haven't made that atomic yet and we prefer lock-free + latest_seen_view: Arc, } /// Networking implementation that uses libp2p @@ -244,11 +254,7 @@ impl Libp2pNetwork { /// Returns when network is ready pub async fn wait_for_ready(&self) { loop { - if self - .inner - .is_ready - .load(std::sync::atomic::Ordering::Relaxed) - { + if self.inner.is_ready.load(Ordering::Relaxed) { break; } async_sleep(Duration::from_secs(1)).await; @@ -282,7 +288,7 @@ impl Libp2pNetwork { ) -> Result, NetworkError> { assert!(bootstrap_addrs_len > 4, "Need at least 5 bootstrap nodes"); let network_handle = Arc::new( - NetworkNodeHandle::<()>::new(config, id) + NetworkNodeHandle::<(), K>::new(config, id) .await .map_err(Into::::into)?, ); @@ -300,7 +306,6 @@ impl Libp2pNetwork { } let mut pubkey_pid_map = BiHashMap::new(); - pubkey_pid_map.insert(pk.clone(), network_handle.peer_id()); let mut topic_map = BiHashMap::new(); @@ -313,11 +318,12 @@ impl Libp2pNetwork { // if bounded figure out a way to log dropped msgs let (direct_send, direct_recv) = unbounded(); let (broadcast_send, broadcast_recv) = unbounded(); + let (node_lookup_send, node_lookup_recv) = unbounded(); + let (cache_gc_shutdown_send, cache_gc_shutdown_recv) = unbounded::<()>(); let result = Libp2pNetwork { inner: Arc::new(Libp2pNetworkInner { handle: network_handle, - pubkey_pid_map: RwLock::new(pubkey_pid_map), broadcast_recv, direct_send: direct_send.clone(), direct_recv, @@ -330,16 +336,74 @@ impl Libp2pNetwork { is_bootstrapped: Arc::new(AtomicBool::new(false)), metrics: NetworkingMetrics::new(&*metrics), topic_map, + node_lookup_send, + cache_gc_shutdown_send, + // Start the latest view from 0. "Latest" refers to "most recent view we are polling for + // proposals on". We need this because to have consensus info injected we need a working + // network already. In the worst case, we send a few lookups we don't need. + latest_seen_view: Arc::new(AtomicU64::new(0)), }), }; result.spawn_event_generator(direct_send, broadcast_send); - + result.spawn_node_lookup(node_lookup_recv, cache_gc_shutdown_recv); result.spawn_connect(id); Ok(result) } + /// Spawns task for looking up nodes pre-emptively + /// as well as garbage collecting the peer cache + #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] + fn spawn_node_lookup( + &self, + node_lookup_recv: UnboundedReceiver>, + cache_gc_shutdown_send: UnboundedReceiver<()>, + ) { + let handle = self.inner.handle.clone(); + let dht_timeout = self.inner.dht_timeout; + let latest_seen_view = self.inner.latest_seen_view.clone(); + + // deals with handling lookup queue. should be infallible + let handle_ = handle.clone(); + async_spawn(async move { + // cancels on shutdown + while let Ok(Some((view_number, pk))) = node_lookup_recv.recv().await { + /// defines lookahead threshold based on the constant + const THRESHOLD: u64 = (LOOK_AHEAD as f64 * 0.8) as u64; + + info!("Performing lookup for peer {:?}", pk); + + // only run if we are not too close to the next view number + if latest_seen_view.load(Ordering::Relaxed) + THRESHOLD <= *view_number { + // look up node, caching if applicable + if let Err(err) = handle_.lookup_node::(pk.clone(), dht_timeout).await { + error!("Failed to perform lookup for key {:?}: {}", pk, err); + }; + } + } + }); + + // deals with garbage collecting the lookup queue + let handle_ = handle.clone(); + async_spawn(async move { + loop { + let ttl = handle_ + .config() + .ttl + .unwrap_or(Duration::from_secs(KAD_DEFAULT_REPUB_INTERVAL_SEC * 8)); + if async_timeout(ttl, cache_gc_shutdown_send.recv()) + .await + .is_err() + { + handle_.prune_peer_cache().await; + } else { + break; + } + } + }); + } + /// Initiates connection to the outside world fn spawn_connect(&self, id: usize) { let pk = self.inner.pk.clone(); @@ -376,7 +440,7 @@ impl Libp2pNetwork { .await .unwrap(); - while !is_bootstrapped.load(std::sync::atomic::Ordering::Relaxed) { + while !is_bootstrapped.load(Ordering::Relaxed) { async_sleep(Duration::from_secs(1)).await; } @@ -414,7 +478,7 @@ impl Libp2pNetwork { node_type ); - is_ready.store(true, std::sync::atomic::Ordering::Relaxed); + is_ready.store(true, Ordering::Relaxed); info!("STARTING CONSENSUS ON {:?}", handle.peer_id()); Ok::<(), NetworkError>(()) } @@ -480,7 +544,7 @@ impl Libp2pNetwork { .context(FailedToSerializeSnafu); } NetworkEvent::IsBootstrapped => { - is_bootstrapped.store(true, std::sync::atomic::Ordering::Relaxed); + is_bootstrapped.store(true, Ordering::Relaxed); } } } @@ -499,9 +563,7 @@ impl ConnectedNetwork for Libp2p #[instrument(name = "Libp2pNetwork::ready_nonblocking", skip_all)] async fn is_ready(&self) -> bool { - self.inner - .is_ready - .load(std::sync::atomic::Ordering::Relaxed) + self.inner.is_ready.load(Ordering::Relaxed) } #[instrument(name = "Libp2pNetwork::shut_down", skip_all)] @@ -511,6 +573,8 @@ impl ConnectedNetwork for Libp2p Self: 'b, { let closure = async move { + self.inner.node_lookup_send.send(None).await.unwrap(); + self.inner.cache_gc_shutdown_send.send(()).await.unwrap(); if self.inner.handle.is_killed() { error!("Called shut down when already shut down! Noop."); } else { @@ -586,37 +650,24 @@ impl ConnectedNetwork for Libp2p } self.wait_for_ready().await; - // check local cache. if that fails, initiate search - // if search fails, just error out - // NOTE: relay may be a good way to fix this in the future . - let pid: PeerId = if let Some(pid) = self + + let pid = match self .inner - .pubkey_pid_map - .read() + .handle + .lookup_node::(recipient.clone(), self.inner.dht_timeout) .await - .get_by_left(&recipient) { - *pid - } else { - match self - .inner - .handle - .get_record_timeout(&recipient, self.inner.dht_timeout) - .await - { - Ok(r) => r, - Err(e) => { - self.inner.metrics.message_failed_to_send.add(1); - error!("Failed to message {:?} because could not find recipient peer id for pk {:?}", message, recipient); - return Err(NetworkError::Libp2p { source: e }); - } + Ok(pid) => pid, + Err(err) => { + self.inner.metrics.message_failed_to_send.add(1); + error!( + "Failed to message {:?} because could not find recipient peer id for pk {:?}", + message, recipient + ); + return Err(NetworkError::Libp2p { source: err }); } }; - if let Err(e) = self.inner.handle.lookup_pid(pid).await { - self.inner.metrics.message_failed_to_send.add(1); - return Err(e.into()); - } match self.inner.handle.direct_request(pid, &message).await { Ok(()) => { self.inner.metrics.outgoing_message_count.add(1); @@ -669,44 +720,37 @@ impl ConnectedNetwork for Libp2p boxed_sync(closure) } - #[instrument(name = "Libp2pNetwork::lookup_node", skip_all)] - async fn lookup_node(&self, pk: K) -> Result<(), NetworkError> { - self.wait_for_ready().await; - - if self.inner.handle.is_killed() { - return Err(NetworkError::ShutDown); - } - - let maybe_pid = self - .inner - .handle - .get_record_timeout(&pk, self.inner.dht_timeout) + #[instrument(name = "Libp2pNetwork::queue_node_lookup", skip_all)] + async fn queue_node_lookup( + &self, + view_number: ViewNumber, + pk: K, + ) -> Result<(), UnboundedSendError>> { + self.inner + .node_lookup_send + .send(Some((view_number, pk))) .await - .map_err(Into::::into); - - if let Ok(pid) = maybe_pid { - if self.inner.handle.lookup_pid(pid).await.is_err() { - error!("Failed to look up pid"); - return Err(NetworkError::Libp2p { - source: NetworkNodeHandleError::DHTError { - source: libp2p_networking::network::error::DHTError::NotFound, - }, - }); - }; - } else { - error!("Unable to look up pubkey {:?}", pk); - return Err(NetworkError::Libp2p { - source: NetworkNodeHandleError::DHTError { - source: libp2p_networking::network::error::DHTError::NotFound, - }, - }); - } - - Ok(()) } - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { - // Not required + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + match event { + ConsensusIntentEvent::PollFutureLeader(future_view, future_leader) => { + let _ = self + .queue_node_lookup(ViewNumber::new(future_view), future_leader) + .await + .map_err(|err| warn!("failed to process node lookup request: {}", err)); + } + + ConsensusIntentEvent::PollForProposal(new_view) => { + if new_view > self.inner.latest_seen_view.load(Ordering::Relaxed) { + self.inner + .latest_seen_view + .store(new_view, Ordering::Relaxed); + } + } + + _ => {} + } } } @@ -851,12 +895,20 @@ where boxed_sync(closure) } - async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { - self.0.lookup_node(pk).await + async fn queue_node_lookup( + &self, + view_number: ViewNumber, + pk: TYPES::SignatureKey, + ) -> Result<(), UnboundedSendError>> { + self.0.queue_node_lookup(view_number, pk).await } - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { - // Not required + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::inject_consensus_info(&self.0, event) + .await; } } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index d6269e2c38..d18b938f06 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -22,9 +22,8 @@ use hotshot_types::{ election::Membership, metrics::{Metrics, NoMetrics}, network::{ - CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, NetworkMsg, - TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, - ViewMessage, + CommunicationChannel, ConnectedNetwork, NetworkMsg, TestableChannelImplementation, + TestableNetworkingImplementation, TransmitType, ViewMessage, }, node_implementation::NodeType, signature_key::SignatureKey, @@ -448,16 +447,6 @@ impl ConnectedNetwork for Memory }; boxed_sync(closure) } - - #[instrument(name = "MemoryNetwork::lookup_node", skip_all)] - async fn lookup_node(&self, _pk: K) -> Result<(), NetworkError> { - // no lookup required - Ok(()) - } - - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { - // Not required - } } /// memory identity communication channel @@ -588,14 +577,6 @@ where let closure = async move { self.0.recv_msgs(transmit_type).await }; boxed_sync(closure) } - - async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { - self.0.lookup_node(pk).await - } - - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { - // Not required - } } impl< diff --git a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs index 723c7c071d..170ad630f5 100644 --- a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs +++ b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs @@ -10,9 +10,10 @@ use async_trait::async_trait; use futures::join; +use async_compatibility_layer::channel::UnboundedSendError; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::ProposalType, + data::{ProposalType, ViewNumber}, message::Message, traits::{ election::Membership, @@ -263,36 +264,31 @@ impl< boxed_sync(closure) } - async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { - match join!( - self.network().lookup_node(pk.clone()), - self.fallback().lookup_node(pk) - ) { - (Err(e1), Err(e2)) => { - error!( - "Both network lookups failed primary error: {}, fallback error: {}", - e1, e2 - ); - Err(e1) - } - (Err(e), _) => { - error!("Failed primary lookup with error: {}", e); - Ok(()) - } - (_, Err(e)) => { - error!("Failed backup lookup with error: {}", e); - Ok(()) - } - _ => Ok(()), - } + async fn queue_node_lookup( + &self, + view_number: ViewNumber, + pk: TYPES::SignatureKey, + ) -> Result<(), UnboundedSendError>> { + self.network() + .queue_node_lookup(view_number, pk.clone()) + .await?; + self.fallback().queue_node_lookup(view_number, pk).await?; + + Ok(()) } - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { as ConnectedNetwork< Message, TYPES::SignatureKey, - >>::inject_consensus_info(self.network(), event) + >>::inject_consensus_info(self.network(), event.clone()) .await; + + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::inject_consensus_info(self.fallback(), event) + .await; } } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index e0001b94ab..374c8a40e7 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -128,17 +128,23 @@ struct Inner { // TODO ED This should be TYPES::Time // Theoretically there should never be contention for this lock... /// Task map for quorum proposals. - proposal_task_map: Arc>>>, + proposal_task_map: + Arc>>>>, /// Task map for quorum votes. - vote_task_map: Arc>>>, + vote_task_map: + Arc>>>>, /// Task map for DACs. - dac_task_map: Arc>>>, + dac_task_map: + Arc>>>>, /// Task map for view sync certificates. - view_sync_cert_task_map: Arc>>>, + view_sync_cert_task_map: + Arc>>>>, /// Task map for view sync votes. - view_sync_vote_task_map: Arc>>>, + view_sync_vote_task_map: + Arc>>>>, /// Task map for transactions - txn_task_map: Arc>>>, + txn_task_map: + Arc>>>>, } impl Inner { @@ -146,7 +152,7 @@ impl Inner { /// Pull a web server. async fn poll_web_server( &self, - receiver: UnboundedReceiver, + receiver: UnboundedReceiver>, message_purpose: MessagePurpose, view_number: u64, ) -> Result<(), NetworkError> { @@ -608,13 +614,7 @@ impl< boxed_sync(closure) } - /// look up a node - /// blocking - async fn lookup_node(&self, _pk: TYPES::SignatureKey) -> Result<(), NetworkError> { - Ok(()) - } - - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { as ConnectedNetwork< Message, TYPES::SignatureKey, @@ -725,14 +725,8 @@ impl< boxed_sync(closure) } - /// look up a node - /// blocking - async fn lookup_node(&self, _pk: K) -> Result<(), NetworkError> { - Ok(()) - } - #[allow(clippy::too_many_lines)] - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { debug!( "Injecting event: {:?} is da {}", event.clone(), @@ -997,7 +991,7 @@ impl< }; } - _ => error!("Unexpected event!"), + _ => {} } } } diff --git a/hotshot/src/traits/networking/web_sever_libp2p_fallback.rs b/hotshot/src/traits/networking/web_sever_libp2p_fallback.rs deleted file mode 100644 index 723c7c071d..0000000000 --- a/hotshot/src/traits/networking/web_sever_libp2p_fallback.rs +++ /dev/null @@ -1,318 +0,0 @@ -//! Networking Implementation that has a primary and a fallback newtork. If the primary -//! Errors we will use the backup to send or receive -use super::NetworkError; -use crate::{ - traits::implementations::{Libp2pNetwork, WebServerNetwork}, - NodeImplementation, -}; - -use async_trait::async_trait; - -use futures::join; - -use hotshot_task::{boxed_sync, BoxSyncFuture}; -use hotshot_types::{ - data::ProposalType, - message::Message, - traits::{ - election::Membership, - network::{ - CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, - TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, - ViewMessage, - }, - node_implementation::NodeType, - }, - vote::VoteType, -}; -use std::{marker::PhantomData, sync::Arc}; -use tracing::error; -/// A communication channel with 2 networks, where we can fall back to the slower network if the -/// primary fails -#[derive(Clone, Debug)] -pub struct WebServerWithFallbackCommChannel< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, -> { - /// The two networks we'll use for send/recv - networks: Arc>, -} - -impl, MEMBERSHIP: Membership> - WebServerWithFallbackCommChannel -{ - /// Constructor - #[must_use] - pub fn new(networks: Arc>) -> Self { - Self { networks } - } - - /// Get a ref to the primary network - #[must_use] - pub fn network(&self) -> &WebServerNetwork, TYPES::SignatureKey, TYPES> { - &self.networks.0 - } - - /// Get a ref to the backup network - #[must_use] - pub fn fallback(&self) -> &Libp2pNetwork, TYPES::SignatureKey> { - &self.networks.1 - } -} - -/// Wrapper for the tuple of `WebServerNetwork` and `Libp2pNetwork` -/// We need this so we can impl `TestableNetworkingImplementation` -/// on the tuple -#[derive(Debug, Clone)] -pub struct CombinedNetworks< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, ->( - pub WebServerNetwork, TYPES::SignatureKey, TYPES>, - pub Libp2pNetwork, TYPES::SignatureKey>, - pub PhantomData, -); - -impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> - for CombinedNetworks -{ - fn generator( - expected_node_count: usize, - num_bootstrap: usize, - network_id: usize, - da_committee_size: usize, - is_da: bool, - ) -> Box Self + 'static> { - let generators = ( - , - TYPES::SignatureKey, - TYPES, - > as TestableNetworkingImplementation<_, _>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da - ), - , TYPES::SignatureKey> as TestableNetworkingImplementation<_, _>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da - ) - ); - Box::new(move |node_id| { - CombinedNetworks(generators.0(node_id), generators.1(node_id), PhantomData) - }) - } - - /// Get the number of messages in-flight. - /// - /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. - fn in_flight_message_count(&self) -> Option { - None - } -} - -impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> - for WebServerWithFallbackCommChannel -{ - fn generator( - expected_node_count: usize, - num_bootstrap: usize, - network_id: usize, - da_committee_size: usize, - is_da: bool, - ) -> Box Self + 'static> { - let generator = as TestableNetworkingImplementation<_, _>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da - ); - Box::new(move |node_id| Self { - networks: generator(node_id).into(), - }) - } - - /// Get the number of messages in-flight. - /// - /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. - fn in_flight_message_count(&self) -> Option { - None - } -} - -#[async_trait] -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> - for WebServerWithFallbackCommChannel -{ - type NETWORK = CombinedNetworks; - - async fn wait_for_ready(&self) { - join!( - self.network().wait_for_ready(), - self.fallback().wait_for_ready() - ); - } - - async fn is_ready(&self) -> bool { - self.network().is_ready().await && self.fallback().is_ready().await - } - - fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - join!(self.network().shut_down(), self.fallback().shut_down()); - }; - boxed_sync(closure) - } - - async fn broadcast_message( - &self, - message: Message, - election: &MEMBERSHIP, - ) -> Result<(), NetworkError> { - let recipients = - >::get_committee(election, message.get_view_number()); - let fallback = self - .fallback() - .broadcast_message(message.clone(), recipients.clone()); - let network = self.network().broadcast_message(message, recipients); - match join!(fallback, network) { - (Err(e1), Err(e2)) => { - error!( - "Both network broadcasts failed primary error: {}, fallback error: {}", - e1, e2 - ); - Err(e1) - } - (Err(e), _) => { - error!("Failed primary broadcast with error: {}", e); - Ok(()) - } - (_, Err(e)) => { - error!("Failed backup broadcast with error: {}", e); - Ok(()) - } - _ => Ok(()), - } - } - - async fn direct_message( - &self, - message: Message, - recipient: TYPES::SignatureKey, - ) -> Result<(), NetworkError> { - match self - .network() - .direct_message(message.clone(), recipient.clone()) - .await - { - Ok(_) => Ok(()), - Err(e) => { - error!( - "Falling back on direct message, error on primary network: {}", - e - ); - self.fallback().direct_message(message, recipient).await - } - } - } - - fn recv_msgs<'a, 'b>( - &'a self, - transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - match self.network().recv_msgs(transmit_type).await { - Ok(msgs) => Ok(msgs), - Err(e) => { - error!( - "Falling back on recv message, error on primary network: {}", - e - ); - self.fallback().recv_msgs(transmit_type).await - } - } - }; - boxed_sync(closure) - } - - async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { - match join!( - self.network().lookup_node(pk.clone()), - self.fallback().lookup_node(pk) - ) { - (Err(e1), Err(e2)) => { - error!( - "Both network lookups failed primary error: {}, fallback error: {}", - e1, e2 - ); - Err(e1) - } - (Err(e), _) => { - error!("Failed primary lookup with error: {}", e); - Ok(()) - } - (_, Err(e)) => { - error!("Failed backup lookup with error: {}", e); - Ok(()) - } - _ => Ok(()), - } - } - - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { - as ConnectedNetwork< - Message, - TYPES::SignatureKey, - >>::inject_consensus_info(self.network(), event) - .await; - } -} - -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > - TestableChannelImplementation< - TYPES, - Message, - PROPOSAL, - VOTE, - MEMBERSHIP, - CombinedNetworks, - > for WebServerWithFallbackCommChannel -{ - fn generate_network() -> Box) -> Self + 'static> { - Box::new(move |network| WebServerWithFallbackCommChannel::new(network)) - } -} diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 2a9e570a76..42c1487e53 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -120,8 +120,7 @@ mod test { use hotshot_signature_key::bn254::BN254Pub; use hotshot_types::{ certificate::{AssembledSignature, QuorumCertificate}, - constants::genesis_proposer_id, - data::{fake_commitment, ValidatingLeaf, ViewNumber}, + data::{fake_commitment, genesis_proposer_id, ValidatingLeaf, ViewNumber}, traits::{ block_contents::dummy::{DummyBlock, DummyState}, node_implementation::NodeType, diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 9e9df1cd98..9e0efc24d3 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -34,6 +34,7 @@ custom_debug = { workspace = true } derive_builder = "0.12.0" either = { workspace = true } futures = { workspace = true } +hotshot-constants = { path = "../constants" } hotshot-utils = { path = "../utils" } libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } @@ -50,6 +51,7 @@ tide = { version = "0.16", optional = true, default-features = false, features = tokio-stream = "0.1.14" tracing = { workspace = true } void = "1.0.2" +dashmap = "5.5.3" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] libp2p = { workspace = true, features = ["tokio"] } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 3ec1c07b73..54b3d89035 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -34,7 +34,7 @@ use libp2p::{ use libp2p_identity::PeerId; use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; -use std::{collections::HashSet, fmt::Debug, str::FromStr, sync::Arc, time::Duration}; +use std::{collections::HashSet, fmt::Debug, hash::Hash, str::FromStr, sync::Arc, time::Duration}; use tracing::{info, instrument}; #[cfg(async_executor_impl = "async-std")] @@ -229,12 +229,12 @@ pub async fn gen_transport( /// a single node, connects them to each other /// and waits for connections to propagate to all nodes. #[instrument] -pub async fn spin_up_swarm( +pub async fn spin_up_swarm( timeout_len: Duration, known_nodes: Vec<(Option, Multiaddr)>, config: NetworkNodeConfig, idx: usize, - handle: &Arc>, + handle: &Arc>, ) -> Result<(), NetworkNodeHandleError> { info!("known_nodes{:?}", known_nodes); handle.add_known_peers(known_nodes).await?; @@ -248,9 +248,9 @@ pub async fn spin_up_swarm( /// chooses one /// # Panics /// panics if handles is of length 0 -pub fn get_random_handle( - handles: &[Arc>], +pub fn get_random_handle( + handles: &[Arc>], rng: &mut dyn rand::RngCore, -) -> Arc> { +) -> Arc> { handles.iter().choose(rng).unwrap().clone() } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 37c9683428..2f7dd46ba4 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -31,6 +31,7 @@ use async_compatibility_layer::{ }; use either::Either; use futures::{select, FutureExt, StreamExt}; +use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; use libp2p::{ core::{muxing::StreamMuxerBox, transport::Boxed}, gossipsub::{ @@ -239,7 +240,7 @@ impl NetworkNode { // 8 hours by default let record_republication_interval = config .republication_interval - .unwrap_or(Duration::from_secs(28800)); + .unwrap_or(Duration::from_secs(KAD_DEFAULT_REPUB_INTERVAL_SEC)); let ttl = Some(config.ttl.unwrap_or(16 * record_republication_interval)); kconfig .set_parallelism(NonZeroUsize::new(1).unwrap()) diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 3a7236c353..04961140c6 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -11,22 +11,25 @@ use async_compatibility_layer::{ UnboundedReceiver, UnboundedRecvError, UnboundedSender, }, }; -use async_lock::Mutex; +use async_lock::{Mutex, RwLock}; use bincode::Options; +use dashmap::DashMap; use futures::{stream::FuturesOrdered, Future, FutureExt}; +use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; use hotshot_utils::bincode::bincode_opts; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; use std::{ - collections::HashSet, + collections::{BTreeMap, HashSet}, fmt::Debug, + hash::Hash, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, - time::{Duration, Instant}, + time::{Duration, Instant, SystemTime}, }; use tracing::{debug, info, instrument}; @@ -34,7 +37,7 @@ use tracing::{debug, info, instrument}; /// - A reference to the state /// - Controls for the swarm #[derive(Debug)] -pub struct NetworkNodeHandle { +pub struct NetworkNodeHandle { /// network configuration network_config: NetworkNodeConfig, /// the state of the replica @@ -48,6 +51,10 @@ pub struct NetworkNodeHandle { peer_id: PeerId, /// human readable id id: usize, + /// the cache for peers we've looked up + peer_cache: Arc>, + /// the expiries for the peer cache, in order + peer_cache_expiries: Arc>>, /// A list of webui listeners that are listening for changes on this node webui_listeners: Arc>>>, @@ -84,7 +91,7 @@ impl NetworkNodeReceiver { } } -impl NetworkNodeHandle { +impl NetworkNodeHandle { /// constructs a new node listening on `known_addr` #[instrument] pub async fn new(config: NetworkNodeConfig, id: usize) -> Result { @@ -112,7 +119,6 @@ impl NetworkNodeHandle { let kill_switch = Mutex::new(Some(kill_switch)); let recv_kill = Mutex::new(Some(recv_kill)); - Ok(NetworkNodeHandle { network_config: config, state: std::sync::Arc::default(), @@ -120,6 +126,8 @@ impl NetworkNodeHandle { listen_addr, peer_id, id, + peer_cache: Arc::new(DashMap::new()), + peer_cache_expiries: Arc::new(RwLock::new(BTreeMap::new())), webui_listeners: Arc::default(), receiver: NetworkNodeReceiver { kill_switch, @@ -139,9 +147,10 @@ impl NetworkNodeHandle { #[allow(clippy::unused_async)] pub async fn spawn_handler(self: &Arc, cb: F) -> impl Future where - F: Fn(NetworkEvent, Arc>) -> RET + Sync + Send + 'static, + F: Fn(NetworkEvent, Arc>) -> RET + Sync + Send + 'static, RET: Future> + Send + 'static, S: Send + 'static, + K: Send + Sync + 'static, { assert!( !self.receiver.receiver_spawned.swap(true, Ordering::Relaxed), @@ -260,7 +269,7 @@ impl NetworkNodeHandle { } } -impl NetworkNodeHandle { +impl NetworkNodeHandle { /// Print out the routing table used by kademlia /// NOTE: only for debugging purposes currently /// # Errors @@ -283,6 +292,55 @@ impl NetworkNodeHandle { r.await.map_err(|_| NetworkNodeHandleError::RecvError) } + /// Prunes the peer lookup cache, removing old entries + /// Should be 1:1 with kademlia expiries + pub async fn prune_peer_cache(&self) { + let now = SystemTime::now(); + let mut expiries = self.peer_cache_expiries.write().await; + + while let Some((expires, key)) = expiries.pop_first() { + if now > expires { + self.peer_cache.remove(&key); + } else { + expiries.insert(expires, key); + break; + } + } + } + + /// Looks up a node's `PeerId` and attempts to validate routing + /// Will use cached `PeerId` if available + /// # Errors + /// if the peer was unable to be looked up (did not provide a response, DNE) + pub async fn lookup_node Deserialize<'a>>( + &self, + key: K, + dht_timeout: Duration, + ) -> Result { + let pid = if let Some(record) = self.peer_cache.get(&key) { + // exists in cache. look up routing but skip kademlia + *record.value() + } else { + // does not exist in cache. look up in kademlia, store in cache + let pid = self.get_record_timeout::(&key, dht_timeout).await?; + self.peer_cache.insert(key.clone(), pid); + self.peer_cache_expiries.write().await.insert( + SystemTime::now() + + self + .network_config + .ttl + .unwrap_or(Duration::from_secs(KAD_DEFAULT_REPUB_INTERVAL_SEC * 16)), + key, + ); + pid + }; + + // pid lookup for routing + self.lookup_pid(pid).await?; + + Ok(pid) + } + /// Insert a record into the kademlia DHT /// # Errors /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT @@ -611,7 +669,7 @@ impl NetworkNodeHandle { } } -impl NetworkNodeHandle { +impl NetworkNodeHandle { /// Get a clone of the internal state pub async fn state(&self) -> S { self.state.cloned().await @@ -680,3 +738,106 @@ pub mod network_node_handle_error { NetworkSnafu, NodeConfigSnafu, RecvSnafu, SendSnafu, SerializationSnafu, TimeoutSnafu, }; } + +#[cfg(test)] +mod test { + use super::*; + + /// libp2p peer cache test + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_libp2p_cache_eviction() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle: NetworkNodeHandle<(), PeerId> = + NetworkNodeHandle::new(NetworkNodeConfig::default(), 0) + .await + .unwrap(); + + let now = SystemTime::now(); + let later = now + Duration::from_secs(1); + + // present insert + let present_key = PeerId::random(); + let present_pid = PeerId::random(); + handle.peer_cache.insert(present_key, present_pid); + handle + .peer_cache_expiries + .write() + .await + .insert(now, present_key); + + // later insert + let later_key = PeerId::random(); + let later_pid = PeerId::random(); + handle.peer_cache.insert(later_key, later_pid); + handle + .peer_cache_expiries + .write() + .await + .insert(now + Duration::from_secs(1), later_key); + + // check that now and later exist + assert!(handle + .peer_cache + .get(&present_key) + .is_some_and(|entry| entry.value() == &present_pid)); + assert!(handle + .peer_cache + .get(&later_key) + .is_some_and(|entry| entry.value() == &later_pid)); + assert!(handle + .peer_cache_expiries + .read() + .await + .get(&now) + .is_some_and(|entry| entry == &present_key)); + assert!(handle + .peer_cache_expiries + .read() + .await + .get(&later) + .is_some_and(|entry| entry == &later_key)); + + // prune + handle.prune_peer_cache().await; + + // check that now doesn't exist and later does + assert!(handle.peer_cache.get(&present_key).is_none()); + assert!(handle + .peer_cache + .get(&later_key) + .is_some_and(|entry| entry.value() == &later_pid)); + assert!(handle.peer_cache_expiries.read().await.get(&now).is_none()); + assert!(handle + .peer_cache_expiries + .read() + .await + .get(&later) + .is_some_and(|entry| entry == &later_key)); + + // wait for later to expire + async_sleep(Duration::from_secs(1)).await; + + // prune + handle.prune_peer_cache().await; + + // check that later doesn't exist + assert!(handle.peer_cache.get(&later_key).is_none()); + assert!(handle + .peer_cache_expiries + .read() + .await + .get(&later) + .is_none()); + + // check that the expiries and cache are empty + assert!(handle.peer_cache_expiries.read().await.is_empty()); + assert!(handle.peer_cache.is_empty()); + } +} diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index ca9e967319..5a531bbafb 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -39,8 +39,8 @@ pub async fn test_bed, FutG: Future> + 'static + Send + Sync, - F: FnOnce(Vec>>, Duration) -> FutF, - G: Fn(NetworkEvent, Arc>) -> FutG + 'static + Send + Sync, + F: FnOnce(Vec>>, Duration) -> FutF, + G: Fn(NetworkEvent, Arc>) -> FutG + 'static + Send + Sync, { setup_logging(); setup_backtrace(); @@ -69,7 +69,7 @@ pub async fn test_bed(handles: &[Arc>]) -> HashMap { +fn gen_peerid_map(handles: &[Arc>]) -> HashMap { let mut r_val = HashMap::new(); for handle in handles { r_val.insert(handle.peer_id(), handle.id()); @@ -79,7 +79,7 @@ fn gen_peerid_map(handles: &[Arc>]) -> HashMap(handles: &[Arc>]) { +pub async fn print_connections(handles: &[Arc>]) { let m = gen_peerid_map(handles); warn!("PRINTING CONNECTION STATES"); for handle in handles.iter() { @@ -104,7 +104,7 @@ pub async fn spin_up_swarms( num_of_nodes: usize, timeout_len: Duration, num_bootstrap: usize, -) -> Result>>, TestError> { +) -> Result>>, TestError> { let mut handles = Vec::new(); let mut bootstrap_addrs = Vec::<(PeerId, Multiaddr)>::new(); let mut connecting_futs = Vec::new(); diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index ae3277b29f..e7dad7c8c6 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -56,7 +56,7 @@ pub enum CounterMessage { #[instrument] pub async fn counter_handle_network_event( event: NetworkEvent, - handle: Arc>, + handle: Arc>, ) -> Result<(), NetworkNodeHandleError> { use CounterMessage::*; use NetworkEvent::*; @@ -121,8 +121,8 @@ pub async fn counter_handle_network_event( /// `requester_handle` asks for `requestee_handle`'s state, /// and then `requester_handle` updates its state to equal `requestee_handle`. async fn run_request_response_increment<'a>( - requester_handle: Arc>, - requestee_handle: Arc>, + requester_handle: Arc>, + requestee_handle: Arc>, timeout: Duration, ) -> Result<(), TestError> { async move { @@ -168,7 +168,7 @@ async fn run_request_response_increment<'a>( /// broadcasts `msg` from a randomly chosen handle /// then asserts that all nodes match `new_state` async fn run_gossip_round( - handles: &[Arc>], + handles: &[Arc>], msg: CounterMessage, new_state: CounterState, timeout_duration: Duration, @@ -234,7 +234,7 @@ async fn run_gossip_round( } async fn run_intersperse_many_rounds( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { for i in 0..NUM_ROUNDS as u32 { @@ -250,18 +250,21 @@ async fn run_intersperse_many_rounds( } async fn run_dht_many_rounds( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { run_dht_rounds(&handles, timeout, 0, NUM_ROUNDS).await; } -async fn run_dht_one_round(handles: Vec>>, timeout: Duration) { +async fn run_dht_one_round( + handles: Vec>>, + timeout: Duration, +) { run_dht_rounds(&handles, timeout, 0, 1).await; } async fn run_request_response_many_rounds( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { for _i in 0..NUM_ROUNDS { @@ -273,7 +276,7 @@ async fn run_request_response_many_rounds( } pub async fn run_request_response_one_round( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { run_request_response_increment_all(&handles, timeout).await; @@ -283,21 +286,21 @@ pub async fn run_request_response_one_round( } pub async fn run_gossip_many_rounds( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { run_gossip_rounds(&handles, NUM_ROUNDS, 0, timeout).await } async fn run_gossip_one_round( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { run_gossip_rounds(&handles, 1, 0, timeout).await } async fn run_dht_rounds( - handles: &[Arc>], + handles: &[Arc>], timeout: Duration, starting_val: usize, num_rounds: usize, @@ -333,7 +336,7 @@ async fn run_dht_rounds( /// runs `num_rounds` of message broadcast, incrementing the state of all nodes each broadcast async fn run_gossip_rounds( - handles: &[Arc>], + handles: &[Arc>], num_rounds: usize, starting_state: CounterState, timeout: Duration, @@ -358,7 +361,7 @@ async fn run_gossip_rounds( /// then has all other peers request its state /// and update their state to the recv'ed state async fn run_request_response_increment_all( - handles: &[Arc>], + handles: &[Arc>], timeout: Duration, ) { let mut rng = rand::thread_rng(); diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 71b7e467d8..5978e10629 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -18,6 +18,7 @@ tracing = { workspace = true } atomic_enum = "0.2.0" pin-project = "1.1.3" async-stream = "0.3.5" +hotshot-constants = { path = "../constants", default-features = false } hotshot-types = { path = "../types", default-features = false } hotshot-task = { path = "../task", default-features = false } time = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 03fcc387a8..8767e96af2 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -12,6 +12,7 @@ use commit::Committable; use core::time::Duration; use either::{Either, Left, Right}; use futures::FutureExt; +use hotshot_constants::LOOK_AHEAD; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, @@ -488,6 +489,18 @@ where self.cur_view = new_view; self.current_proposal = None; + // Poll the future leader for lookahead + let lookahead_view = new_view + LOOK_AHEAD; + if !self.quorum_exchange.is_leader(lookahead_view) { + self.quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollFutureLeader( + *lookahead_view, + self.quorum_exchange.get_leader(lookahead_view), + )) + .await; + } + // Start polling for proposals for the new view self.quorum_exchange .network() diff --git a/types/Cargo.toml b/types/Cargo.toml index f65eac0983..5ea10f246c 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -33,6 +33,7 @@ espresso-systems-common = { workspace = true } futures = { workspace = true } generic-array = "0.14.7" hex_fmt = "0.3.0" +hotshot-constants = { path = "../constants" } hotshot-utils = { path = "../utils" } hotshot-task = { path = "../task", default-features = false } jf-primitives = { workspace = true } diff --git a/types/src/constants.rs b/types/src/constants.rs deleted file mode 100644 index 26acfb1abc..0000000000 --- a/types/src/constants.rs +++ /dev/null @@ -1,14 +0,0 @@ -//! configurable constants for hotshot - -use crate::traits::signature_key::EncodedPublicKey; - -/// the number of views to gather information for ahead of time -pub const LOOK_AHEAD: u64 = 5; - -/// the genesis proposer pk -/// unfortunately need to allocate on the heap (for vec), so this ends up as a function instead of a -/// const -#[must_use] -pub fn genesis_proposer_id() -> EncodedPublicKey { - EncodedPublicKey(vec![4, 2]) -} diff --git a/types/src/data.rs b/types/src/data.rs index 68fc0ec064..3f12fb7c82 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -8,7 +8,6 @@ use crate::{ AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCertificate, }, - constants::genesis_proposer_id, traits::{ election::SignedCertificate, node_implementation::NodeType, @@ -24,6 +23,7 @@ use commit::{Commitment, Committable}; use derivative::Derivative; use either::Either; use espresso_systems_common::hotshot::tag; +use hotshot_constants::GENESIS_PROPOSER_ID; use hotshot_utils::bincode::bincode_opts; use rand::Rng; use serde::{Deserialize, Serialize}; @@ -100,6 +100,12 @@ impl std::ops::Sub for ViewNumber { } } +/// Generate the genesis block proposer ID from the defined constant +#[must_use] +pub fn genesis_proposer_id() -> EncodedPublicKey { + EncodedPublicKey(GENESIS_PROPOSER_ID.to_vec()) +} + /// The `Transaction` type associated with a `State`, as a syntactic shortcut pub type Transaction = <::BlockType as Block>::Transaction; /// `Commitment` to the `Transaction` type associated with a `State`, as a syntactic shortcut diff --git a/types/src/lib.rs b/types/src/lib.rs index e574bef876..72ad6ce093 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -13,7 +13,6 @@ use std::{num::NonZeroUsize, time::Duration}; pub mod certificate; pub mod consensus; -pub mod constants; pub mod data; pub mod error; pub mod event; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index ee2d09672d..2f7ff00f1b 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -11,7 +11,12 @@ use tokio::time::error::Elapsed as TimeoutError; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use super::{election::Membership, node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{data::ProposalType, message::MessagePurpose, vote::VoteType}; +use crate::{ + data::{ProposalType, ViewNumber}, + message::MessagePurpose, + vote::VoteType, +}; +use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; use serde::{Deserialize, Serialize}; use snafu::Snafu; @@ -130,7 +135,7 @@ pub enum NetworkError { #[derive(Clone, Debug)] // Storing view number as a u64 to avoid the need TYPES generic /// Events to poll or cancel consensus processes. -pub enum ConsensusIntentEvent { +pub enum ConsensusIntentEvent { /// Poll for votes for a particular view PollForVotes(u64), /// Poll for a proposal for a particular view @@ -143,6 +148,8 @@ pub enum ConsensusIntentEvent { PollForViewSyncCertificate(u64), /// Poll for new transactions PollForTransactions(u64), + /// Poll for future leader + PollFutureLeader(u64, K), /// Cancel polling for votes CancelPollForVotes(u64), /// Cancel polling for view sync votes. @@ -157,7 +164,7 @@ pub enum ConsensusIntentEvent { CancelPollForTransactions(u64), } -impl ConsensusIntentEvent { +impl ConsensusIntentEvent { /// Get the view number of the event. #[must_use] pub fn view_number(&self) -> u64 { @@ -173,7 +180,8 @@ impl ConsensusIntentEvent { | ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) | ConsensusIntentEvent::PollForViewSyncCertificate(view_number) | ConsensusIntentEvent::PollForTransactions(view_number) - | ConsensusIntentEvent::CancelPollForTransactions(view_number) => *view_number, + | ConsensusIntentEvent::CancelPollForTransactions(view_number) + | ConsensusIntentEvent::PollFutureLeader(view_number, _) => *view_number, } } } @@ -250,13 +258,18 @@ pub trait CommunicationChannel< 'a: 'b, Self: 'b; - /// look up a node - /// blocking - async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError>; + /// queues looking up a node + async fn queue_node_lookup( + &self, + _view_number: ViewNumber, + _pk: TYPES::SignatureKey, + ) -> Result<(), UnboundedSendError>> { + Ok(()) + } /// Injects consensus data such as view number into the networking implementation /// blocking - async fn inject_consensus_info(&self, event: ConsensusIntentEvent); + async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} } /// represents a networking implmentration @@ -305,14 +318,19 @@ pub trait ConnectedNetwork: 'a: 'b, Self: 'b; - /// look up a node - /// blocking - async fn lookup_node(&self, pk: K) -> Result<(), NetworkError>; + /// queues lookup of a node + async fn queue_node_lookup( + &self, + _view_number: ViewNumber, + _pk: K, + ) -> Result<(), UnboundedSendError>> { + Ok(()) + } /// Injects consensus data such as view number into the networking implementation /// blocking /// Ideally we would pass in the `Time` type, but that requires making the entire trait generic over NodeType - async fn inject_consensus_info(&self, event: ConsensusIntentEvent); + async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} } /// Describes additional functionality needed by the test network implementation From 2d9c2b909b974691c16e5296e4d40fa6a66b56a9 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 12 Sep 2023 19:35:38 -0400 Subject: [PATCH 0050/1393] Implement get_data for VoteType --- types/src/vote.rs | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/types/src/vote.rs b/types/src/vote.rs index fab0b1711e..3986a8a4c3 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -41,9 +41,7 @@ pub trait VoteType EncodedSignature; /// Get the data this vote was signed over - fn get_data(self) -> VoteData { - todo!() - } + fn get_data(self) -> VoteData; } /// A vote on DA proposal. @@ -211,6 +209,9 @@ impl VoteType for DAVote { // TODO ED Revisit this function self.signature.1 } + fn get_data(self) -> VoteData { + self.vote_data + } } // TODO ED Remove this @@ -218,7 +219,7 @@ impl DAVote { /// Get the signature key. /// # Panics /// If the deserialization fails. - // #[deprecated] + #[deprecated] pub fn signature_key(&self) -> TYPES::SignatureKey { ::from_bytes(&self.signature.0).unwrap() } @@ -240,11 +241,17 @@ impl> VoteType fn get_signature(self) -> EncodedSignature { self.signature() } + fn get_data(self) -> VoteData { + match self { + QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_data, + QuorumVote::Timeout(v) => unimplemented!() + } + } } impl> QuorumVote { /// Get the encoded signature. - // #[deprecated] + #[deprecated] pub fn signature(&self) -> EncodedSignature { match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.1.clone(), @@ -254,7 +261,7 @@ impl> QuorumVote /// Get the signature key. /// # Panics /// If the deserialization fails. - // #[deprecated] + #[deprecated] pub fn signature_key(&self) -> TYPES::SignatureKey { let encoded = match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.0.clone(), @@ -279,6 +286,13 @@ impl VoteType> for ViewSyncVote EncodedSignature { self.signature() } + fn get_data(self) -> VoteData> { + match self { + ViewSyncVote::PreCommit(vote_internal) | ViewSyncVote::Commit(vote_internal) | ViewSyncVote::Finalize(vote_internal) => { + vote_internal.vote_data + } + } + } } /// The aggreation of votes, implemented by `VoteAccumulator`. From 57b45793106316f720ab6f86fa094c65b5b6df52 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 12 Sep 2023 19:52:38 -0400 Subject: [PATCH 0051/1393] Add get_vote_token function to VoteType trait --- types/src/traits/election.rs | 15 ++++++++++++--- types/src/vote.rs | 19 +++++++++++++++++++ 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index fa95f86d32..42106e49e9 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -191,13 +191,18 @@ where /// Accumulates votes given an accumulator, vote, and commit. /// Returns either the accumulator or a certificate fn accumulate_vote( - _accumulator: Self::VoteAccumulator, - _vote: Self::Vote, - _commit: COMMITTABLE, + self, + accumulator: Self::VoteAccumulator, + vote: Self::Vote, + commit: COMMITTABLE, ) -> Either { todo!() } + fn is_valid_vote() -> bool { + todo!() + } + /// Build a QC from the threshold signature and commitment // TODO ED Rename this function fn from_signatures_and_commitment( @@ -502,6 +507,10 @@ pub trait ConsensusExchange: Send + Sync { relay: Option, ) -> Either, Self::Certificate>; + fn accumulate_vote_2(&self, vote: Self::Vote)-> Either, Self::Certificate> { + todo!() + } + /// The committee which votes on proposals. fn membership(&self) -> &Self::Membership; diff --git a/types/src/vote.rs b/types/src/vote.rs index 3986a8a4c3..07b0b23c7e 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -42,6 +42,8 @@ pub trait VoteType EncodedSignature; /// Get the data this vote was signed over fn get_data(self) -> VoteData; + // Get the vote token of this vote + fn get_vote_token(self) -> TYPES::VoteTokenType; } /// A vote on DA proposal. @@ -212,6 +214,9 @@ impl VoteType for DAVote { fn get_data(self) -> VoteData { self.vote_data } + fn get_vote_token(self) -> ::VoteTokenType { + self.vote_token + } } // TODO ED Remove this @@ -247,6 +252,12 @@ impl> VoteType QuorumVote::Timeout(v) => unimplemented!() } } + fn get_vote_token(self) -> ::VoteTokenType { + match self { + QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_token, + QuorumVote::Timeout(v) => unimplemented!() + } + } } impl> QuorumVote { @@ -293,6 +304,14 @@ impl VoteType> for ViewSyncVote ::VoteTokenType { + match self { + ViewSyncVote::PreCommit(vote_internal) | ViewSyncVote::Commit(vote_internal) | ViewSyncVote::Finalize(vote_internal) => { + vote_internal.vote_token + } + } + } } /// The aggreation of votes, implemented by `VoteAccumulator`. From 7b35460350a6e402e4081ab991d2a3cdaec1da61 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 12 Sep 2023 20:16:04 -0400 Subject: [PATCH 0052/1393] Add new append function to accumulate_vote function --- types/src/traits/election.rs | 37 ++++++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 42106e49e9..4554ccc94f 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -199,10 +199,6 @@ where todo!() } - fn is_valid_vote() -> bool { - todo!() - } - /// Build a QC from the threshold signature and commitment // TODO ED Rename this function fn from_signatures_and_commitment( @@ -299,6 +295,7 @@ pub trait ConsensusExchange: Send + Sync { /// A proposal for participants to vote on. type Proposal: ProposalType; /// A vote on a [`Proposal`](Self::Proposal). + // TODO ED Use default associated type if it becomes stable type Vote: VoteType; /// A [`SignedCertificate`] attesting to a decision taken by the committee. type Certificate: SignedCertificate @@ -507,8 +504,36 @@ pub trait ConsensusExchange: Send + Sync { relay: Option, ) -> Either, Self::Certificate>; - fn accumulate_vote_2(&self, vote: Self::Vote)-> Either, Self::Certificate> { - todo!() + // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the SignedCertificate + // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. + fn accumulate_vote_2( + &self, + accumulator: <>::Certificate as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Self::Commitment, + >>::VoteAccumulator, + vote: <>::Certificate as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Self::Commitment, + >>::Vote, + commit: Self::Commitment, + ) -> Either< + <>::Certificate as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Self::Commitment, + >>::VoteAccumulator, + Self::Certificate, + > { + match accumulator.append(vote) { + Either::Left(_) => todo!(), + Either::Right(_) => todo!(), + } } /// The committee which votes on proposals. From 83c08c00203dd500eb20b838f20e18d1b49a15a5 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 12 Sep 2023 20:31:01 -0400 Subject: [PATCH 0053/1393] Add is_valid_vote check to accumulate_vote_2 --- types/src/traits/election.rs | 39 ++++++++++++++++++++++++++++++ types/src/vote.rs | 46 ++++++++++++++++++------------------ 2 files changed, 62 insertions(+), 23 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 4554ccc94f..9ba8c2d324 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -61,6 +61,7 @@ pub enum ElectionError { /// the outcome is already knowable. /// /// This would be a useful general utility. +#[derive(Clone)] pub enum Checked { /// This item has been checked, and is valid Valid(T), @@ -434,6 +435,31 @@ pub trait ConsensusExchange: Send + Sync { is_valid_signature && is_valid_vote_token } + /// Validate a vote by checking its signature and token. + fn is_valid_vote_2( + &self, + key: &TYPES::SignatureKey, + encoded_signature: &EncodedSignature, + data: &VoteData, + vote_token: &Checked, + ) -> bool { + let mut is_valid_vote_token = false; + let mut is_valid_signature = false; + + is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); + let valid_vote_token = self.membership().validate_vote_token(key.clone(), vote_token.clone()); + is_valid_vote_token = match valid_vote_token { + Err(_) => { + error!("Vote token was invalid"); + false + } + Ok(Checked::Valid(_)) => true, + Ok(Checked::Inval(_) | Checked::Unchecked(_)) => false, + }; + + is_valid_signature && is_valid_vote_token + } + #[doc(hidden)] fn accumulate_internal( &self, @@ -506,6 +532,8 @@ pub trait ConsensusExchange: Send + Sync { // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the SignedCertificate // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. + /// Accumulate vote + /// Returns either the accumulate if no threshold was reached, or a SignedCertificate if the threshold was reached fn accumulate_vote_2( &self, accumulator: <>::Certificate as SignedCertificate< @@ -530,6 +558,17 @@ pub trait ConsensusExchange: Send + Sync { >>::VoteAccumulator, Self::Certificate, > { + if !self.is_valid_vote_2( + &vote.get_key(), + &vote.get_signature(), + &vote.get_data(), + // Ignoring deserialization errors below since we are getting rid of it soon + &Checked::Unchecked(vote.get_vote_token()), + ) { + error!("Invalid vote!"); + return Either::Left(accumulator); + } + match accumulator.append(vote) { Either::Left(_) => todo!(), Either::Right(_) => todo!(), diff --git a/types/src/vote.rs b/types/src/vote.rs index 07b0b23c7e..b202f95a00 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -37,13 +37,13 @@ pub trait VoteType TYPES::Time; /// Get the signature key associated with this vote - fn get_key(self) -> TYPES::SignatureKey; + fn get_key(&self) -> TYPES::SignatureKey; /// Get the signature associated with this vote - fn get_signature(self) -> EncodedSignature; + fn get_signature(&self) -> EncodedSignature; /// Get the data this vote was signed over - fn get_data(self) -> VoteData; + fn get_data(&self) -> VoteData; // Get the vote token of this vote - fn get_vote_token(self) -> TYPES::VoteTokenType; + fn get_vote_token(&self) -> TYPES::VoteTokenType; } /// A vote on DA proposal. @@ -204,18 +204,18 @@ impl VoteType for DAVote { fn get_view(&self) -> TYPES::Time { self.current_view } - fn get_key(self) -> ::SignatureKey { + fn get_key(&self) -> ::SignatureKey { self.signature_key() } - fn get_signature(self) -> EncodedSignature { + fn get_signature(&self) -> EncodedSignature { // TODO ED Revisit this function - self.signature.1 + self.signature.1.clone() } - fn get_data(self) -> VoteData { - self.vote_data + fn get_data(&self) -> VoteData { + self.vote_data.clone() } - fn get_vote_token(self) -> ::VoteTokenType { - self.vote_token + fn get_vote_token(&self) -> ::VoteTokenType { + self.vote_token.clone() } } @@ -240,21 +240,21 @@ impl> VoteType } } - fn get_key(self) -> ::SignatureKey { + fn get_key(&self) -> ::SignatureKey { self.signature_key() } - fn get_signature(self) -> EncodedSignature { + fn get_signature(&self) -> EncodedSignature { self.signature() } - fn get_data(self) -> VoteData { + fn get_data(&self) -> VoteData { match self { - QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_data, + QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_data.clone(), QuorumVote::Timeout(v) => unimplemented!() } } - fn get_vote_token(self) -> ::VoteTokenType { + fn get_vote_token(&self) -> ::VoteTokenType { match self { - QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_token, + QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_token.clone(), QuorumVote::Timeout(v) => unimplemented!() } } @@ -290,25 +290,25 @@ impl VoteType> for ViewSyncVote ::SignatureKey { + fn get_key(&self) -> ::SignatureKey { self.signature_key() } - fn get_signature(self) -> EncodedSignature { + fn get_signature(&self) -> EncodedSignature { self.signature() } - fn get_data(self) -> VoteData> { + fn get_data(&self) -> VoteData> { match self { ViewSyncVote::PreCommit(vote_internal) | ViewSyncVote::Commit(vote_internal) | ViewSyncVote::Finalize(vote_internal) => { - vote_internal.vote_data + vote_internal.vote_data.clone() } } } - fn get_vote_token(self) -> ::VoteTokenType { + fn get_vote_token(&self) -> ::VoteTokenType { match self { ViewSyncVote::PreCommit(vote_internal) | ViewSyncVote::Commit(vote_internal) | ViewSyncVote::Finalize(vote_internal) => { - vote_internal.vote_token + vote_internal.vote_token.clone() } } } From ca23f527a4e0c08094239db9b8d65fc50fff7d50 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 13 Sep 2023 10:14:41 -0400 Subject: [PATCH 0054/1393] Add accumulate_vote_2 function to DA task --- task-impls/src/da.rs | 9 +++++++++ types/src/traits/election.rs | 7 ++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index a160a37509..f51bf8a8b7 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -15,6 +15,7 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::vote::VoteType; use hotshot_types::traits::election::SignedCertificate; use hotshot_types::vote::AccumulatorPlaceholder; use hotshot_types::{ @@ -167,6 +168,14 @@ where return (None, state); } + let accumulator2 = state.accumulator2.left().unwrap(); + // TODO ED Maybe we don't need this to take in commitment? Can just get it from the vote directly if it is always + // going to be passed in as the vote.commitment + match state.committee_exchange.accumulate_vote_2(accumulator2, &vote, &vote.block_commitment) { + Left(_) => todo!(), + Right(_) => todo!(), + } + let accumulator = state.accumulator.left().unwrap(); match state.committee_exchange.accumulate_vote( &vote.signature.0, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 9ba8c2d324..529ffec4c1 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -542,13 +542,13 @@ pub trait ConsensusExchange: Send + Sync { TYPES::VoteTokenType, Self::Commitment, >>::VoteAccumulator, - vote: <>::Certificate as SignedCertificate< + vote: &<>::Certificate as SignedCertificate< TYPES, TYPES::Time, TYPES::VoteTokenType, Self::Commitment, >>::Vote, - commit: Self::Commitment, + commit: &Commitment, ) -> Either< <>::Certificate as SignedCertificate< TYPES, @@ -569,7 +569,8 @@ pub trait ConsensusExchange: Send + Sync { return Either::Left(accumulator); } - match accumulator.append(vote) { + // TODO ED Should make append function take a reference to vote + match accumulator.append(vote.clone()) { Either::Left(_) => todo!(), Either::Right(_) => todo!(), } From 5ade250790f6a4ce3208873945ddeb070c232683 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 13 Sep 2023 10:32:16 -0400 Subject: [PATCH 0055/1393] Add accumulator2 match to DA task --- task-impls/src/da.rs | 18 ++++++++++++++---- types/src/traits/election.rs | 26 ++++++++++++++++++++++---- types/src/vote.rs | 1 + 3 files changed, 37 insertions(+), 8 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index f51bf8a8b7..acb9064b29 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -15,9 +15,9 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::vote::VoteType; use hotshot_types::traits::election::SignedCertificate; use hotshot_types::vote::AccumulatorPlaceholder; +use hotshot_types::vote::VoteType; use hotshot_types::{ certificate::DACertificate, consensus::{Consensus, View}, @@ -171,9 +171,19 @@ where let accumulator2 = state.accumulator2.left().unwrap(); // TODO ED Maybe we don't need this to take in commitment? Can just get it from the vote directly if it is always // going to be passed in as the vote.commitment - match state.committee_exchange.accumulate_vote_2(accumulator2, &vote, &vote.block_commitment) { - Left(_) => todo!(), - Right(_) => todo!(), + match state.committee_exchange.accumulate_vote_2( + accumulator2, + &vote, + &vote.block_commitment, + ) { + Left(new_accumulator) => { + error!("DA cert still accumulating"); + state.accumulator2 = either::Left(new_accumulator); + } + Right(dac) => { + error!("DA cert made!"); + state.accumulator2 = either::Right(dac); + } } let accumulator = state.accumulator.left().unwrap(); diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 529ffec4c1..49fad30dd7 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -447,7 +447,9 @@ pub trait ConsensusExchange: Send + Sync { let mut is_valid_signature = false; is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); - let valid_vote_token = self.membership().validate_vote_token(key.clone(), vote_token.clone()); + let valid_vote_token = self + .membership() + .validate_vote_token(key.clone(), vote_token.clone()); is_valid_vote_token = match valid_vote_token { Err(_) => { error!("Vote token was invalid"); @@ -562,17 +564,33 @@ pub trait ConsensusExchange: Send + Sync { &vote.get_key(), &vote.get_signature(), &vote.get_data(), - // Ignoring deserialization errors below since we are getting rid of it soon + // TODO ED We've had this comment for a while: Ignoring deserialization errors below since we are getting rid of it soon &Checked::Unchecked(vote.get_vote_token()), ) { error!("Invalid vote!"); return Either::Left(accumulator); } + let stake_table_entry = vote.get_key().get_stake_table_entry(1u64); + let append_node_id = self + .membership() + .get_committee_qc_stake_table() + .iter() + .position(|x| *x == stake_table_entry.clone()) + .unwrap(); + // TODO ED Should make append function take a reference to vote match accumulator.append(vote.clone()) { - Either::Left(_) => todo!(), - Either::Right(_) => todo!(), + Either::Left(accumulator) => Either::Left(accumulator), + Either::Right(signatures) => { + // TODO ED Update this function to just take in the signatures and most recent vote + Either::Right(Self::Certificate::from_signatures_and_commitment( + vote.get_view(), + signatures, + *commit, + Some(0), + )) + } } } diff --git a/types/src/vote.rs b/types/src/vote.rs index b202f95a00..33a1f60450 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -353,6 +353,7 @@ impl< > Accumulator2 for AccumulatorPlaceholder { fn append(self, _vote: VOTE) -> Either> { + todo!(); either::Left(self) } } From 32579b1b01e0745a0035f8723f1deec62af320df Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 13 Sep 2023 11:42:57 -0400 Subject: [PATCH 0056/1393] Append function implemented for DA vote; debugging why cert is not formed --- task-impls/src/da.rs | 11 +++- types/src/certificate.rs | 3 +- types/src/traits/election.rs | 3 +- types/src/vote.rs | 122 +++++++++++++++++++++++++++++++---- 4 files changed, 121 insertions(+), 18 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index acb9064b29..cab8284db0 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -15,6 +15,7 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::vote::DAVoteAccumulator; use hotshot_types::traits::election::SignedCertificate; use hotshot_types::vote::AccumulatorPlaceholder; use hotshot_types::vote::VoteType; @@ -163,7 +164,7 @@ where // panic!("Vote handle received DA vote for view {}", *vote.current_view); // For the case where we receive votes after we've made a certificate - if state.accumulator.is_right() { + if state.accumulator2.is_right() { debug!("DA accumulator finished view: {:?}", state.cur_view); return (None, state); } @@ -181,7 +182,7 @@ where state.accumulator2 = either::Left(new_accumulator); } Right(dac) => { - error!("DA cert made!"); + panic!("DA cert made!"); state.accumulator2 = either::Right(dac); } } @@ -413,7 +414,11 @@ where acc, None, ); - let accumulator2 = AccumulatorPlaceholder { + let accumulator2 = DAVoteAccumulator { + da_vote_outcomes: HashMap::new(), + success_threshold: self.committee_exchange.success_threshold(), + sig_lists: Vec::new(), + signers: bitvec![0; self.committee_exchange.total_nodes()], phantom: PhantomData, }; if view > collection_view { diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 1efeb6b70e..124d762a8c 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -1,6 +1,7 @@ //! Provides two types of cerrtificates and their accumulators. use crate::vote::AccumulatorPlaceholder; +use crate::vote::DAVoteAccumulator; use crate::vote::QuorumVote; use crate::{ data::{fake_commitment, serialize_signature, LeafType}, @@ -232,7 +233,7 @@ impl SignedCertificate { type Vote = DAVote; - type VoteAccumulator = AccumulatorPlaceholder; + type VoteAccumulator = DAVoteAccumulator; fn from_signatures_and_commitment( view_number: TYPES::Time, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 49fad30dd7..ca4aa9c1d8 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -572,6 +572,7 @@ pub trait ConsensusExchange: Send + Sync { } let stake_table_entry = vote.get_key().get_stake_table_entry(1u64); + // TODO ED Could we make this part of the vote in the future? It's only a usize. let append_node_id = self .membership() .get_committee_qc_stake_table() @@ -580,7 +581,7 @@ pub trait ConsensusExchange: Send + Sync { .unwrap(); // TODO ED Should make append function take a reference to vote - match accumulator.append(vote.clone()) { + match accumulator.append(vote.clone(), append_node_id, self.membership().get_committee_qc_stake_table()) { Either::Left(accumulator) => Either::Left(accumulator), Either::Right(signatures) => { // TODO ED Update this function to just take in the signatures and most recent vote diff --git a/types/src/vote.rs b/types/src/vote.rs index 33a1f60450..ee1e474b04 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -43,7 +43,7 @@ pub trait VoteType VoteData; // Get the vote token of this vote - fn get_vote_token(&self) -> TYPES::VoteTokenType; + fn get_vote_token(&self) -> TYPES::VoteTokenType; } /// A vote on DA proposal. @@ -249,13 +249,13 @@ impl> VoteType fn get_data(&self) -> VoteData { match self { QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_data.clone(), - QuorumVote::Timeout(v) => unimplemented!() + QuorumVote::Timeout(v) => unimplemented!(), } } fn get_vote_token(&self) -> ::VoteTokenType { match self { QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_token.clone(), - QuorumVote::Timeout(v) => unimplemented!() + QuorumVote::Timeout(v) => unimplemented!(), } } } @@ -299,17 +299,17 @@ impl VoteType> for ViewSyncVote VoteData> { match self { - ViewSyncVote::PreCommit(vote_internal) | ViewSyncVote::Commit(vote_internal) | ViewSyncVote::Finalize(vote_internal) => { - vote_internal.vote_data.clone() - } + ViewSyncVote::PreCommit(vote_internal) + | ViewSyncVote::Commit(vote_internal) + | ViewSyncVote::Finalize(vote_internal) => vote_internal.vote_data.clone(), } } fn get_vote_token(&self) -> ::VoteTokenType { match self { - ViewSyncVote::PreCommit(vote_internal) | ViewSyncVote::Commit(vote_internal) | ViewSyncVote::Finalize(vote_internal) => { - vote_internal.vote_token.clone() - } + ViewSyncVote::PreCommit(vote_internal) + | ViewSyncVote::Commit(vote_internal) + | ViewSyncVote::Finalize(vote_internal) => vote_internal.vote_token.clone(), } } } @@ -333,10 +333,106 @@ pub trait Accumulator2< /// Append 1 vote to the accumulator. If the threshold is not reached, return /// the accumulator, else return the `AssembledSignature` /// Only called from inside `accumulate_internal` - fn append(self, vote: VOTE) -> Either>; + fn append(self, vote: VOTE, vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>,) -> Either>; +} + +pub struct DAVoteAccumulator< + TYPES: NodeType, + COMMITTABLE: Committable + Serialize + Clone, + VOTE: VoteType, +> { + /// Map of all da signatures accumlated so far + pub da_vote_outcomes: VoteMap, + /// A quorum's worth of stake, generally 2f + 1 + pub success_threshold: NonZeroU64, + /// A list of valid signatures for certificate aggregation + pub sig_lists: Vec<::Signature>, + /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check + pub signers: BitVec, + + pub phantom: PhantomData, +} + +impl< + TYPES: NodeType, + COMMITTABLE: Committable + Serialize + Clone, + VOTE: VoteType, + > Accumulator2 for DAVoteAccumulator +{ + fn append( + mut self, + vote: VOTE, + vote_node_id: usize, + stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either> { + // TODO ED Make this a function on VoteType trait + let vote_commitment = match vote.get_data() { + VoteData::DA(commitment) => commitment, + _ => return Either::Left(self), + }; + + let encoded_key = vote.get_key().to_bytes(); + + // Deserialize the signature so that it can be assembeld into a QC + // TODO ED Update this once we've gotten rid of EncodedSignature + let original_signature: ::Signature = + bincode_opts() + .deserialize(&vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); + + let (da_stake_casted, da_vote_map) = self + .da_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + // Check for duplicate vote + // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey + // Have to do this because SignatureKey is not hashable + if da_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + + // Update the active_keys and signature lists + // TODO ED How does this differ than the check above? + if self.signers.get(vote_node_id).as_deref() == Some(&true) { + error!("Node id is already in signers list"); + return Either::Left(self); + } + self.signers.set(vote_node_id, true); + self.sig_lists.push(original_signature); + + // Already checked that vote data was for a DA vote above + *da_stake_casted += u64::from(vote.get_vote_token().vote_count()); + da_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + + if *da_stake_casted >= u64::from(self.success_threshold) { + // Assemble QC + let real_qc_pp = ::get_public_parameter( + // TODO ED Something about stake table entries. Might be easier to just pass in membership? + stake_table_entries.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + + // TODO ED Why do we need this line if we have the above line? + self.da_vote_outcomes.remove(&vote_commitment); + + return Either::Right(AssembledSignature::DA(real_qc_sig)); + } + Either::Left(self) + } } /// Placeholder accumulator; will be replaced by accumulator for each certificate type +#[deprecated] pub struct AccumulatorPlaceholder< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, @@ -352,13 +448,13 @@ impl< VOTE: VoteType, > Accumulator2 for AccumulatorPlaceholder { - fn append(self, _vote: VOTE) -> Either> { - todo!(); - either::Left(self) + fn append(self, vote: VOTE, vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>) -> Either> { + todo!() } } /// Mapping of commitments to vote tokens by key. +// TODO ED Remove this whole token generic type VoteMap = HashMap< Commitment, ( From 0d243753d3aed67b669769b60219830efc007abc Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 13 Sep 2023 11:50:04 -0400 Subject: [PATCH 0057/1393] DA cert is properly formed with new accumulator --- task-impls/src/da.rs | 8 ++++++-- types/src/vote.rs | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index cab8284db0..b4143b3844 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -414,18 +414,22 @@ where acc, None, ); - let accumulator2 = DAVoteAccumulator { + let new_accumulator = DAVoteAccumulator { da_vote_outcomes: HashMap::new(), success_threshold: self.committee_exchange.success_threshold(), sig_lists: Vec::new(), signers: bitvec![0; self.committee_exchange.total_nodes()], phantom: PhantomData, }; + + // TODO ED Get vote data here instead of cloning into block commitment field of vote + let accumulator2 = self.committee_exchange.accumulate_vote_2(new_accumulator, &vote, &vote.clone().block_commitment); + if view > collection_view { let state = DAVoteCollectionTaskState { committee_exchange: self.committee_exchange.clone(), accumulator, - accumulator2: either::Left(accumulator2), + accumulator2: accumulator2, cur_view: view, event_stream: self.event_stream.clone(), id: self.id, diff --git a/types/src/vote.rs b/types/src/vote.rs index ee1e474b04..31e98a5c65 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -427,6 +427,7 @@ impl< return Either::Right(AssembledSignature::DA(real_qc_sig)); } + error!("DA stake casted is {da_stake_casted}"); Either::Left(self) } } From 998c5d771278607b789e8060cde86ddb45d523ae Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 13 Sep 2023 13:39:40 -0400 Subject: [PATCH 0058/1393] Hello-world VID integration (#1684) * quick and dirty type wrappers for VID * WIP: get dispersal shares * WIP: add disperse data to VidDisperseSend event * catch VidDisperseSend event in network task handle_event, plus attendant boilerplate * WIP catch VidDisperseRecv event in DA task handle_event, plus attendant boilerplate * new VidVoteSend event * new event VidVoteRecv * blind copy-paste code from DAVoteRecv to VidVoteRecv * new event VidCertSend * new message type VidCertificate * new ProcessedCommitteeConsensusMessage variants VidVote, VidCertificate * new event VidCertRecv * fix test_da_task * WIP begin fixing test_network_task * tidy use statement * todos * fix test_network_task (no idea why it works now) * add TODOs for #1685 * clippy pacification and link to #1686 * clippy pacification * [no ci] log unexpected events * [no ci] demote a few noisy error logs * fix test_web_server * lint * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1690 * [no ci] remove duplicate issue link * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1691 * [no ci] link to https://github.com/EspressoSystems/jellyfish/issues/369 * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1692 * [no ci] another link to https://github.com/EspressoSystems/HotShot/issues/1686 * don't make release builds * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1693 * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1695 * demote another noisy error log * lint argh ci is killing me * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1696 * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1697 * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1698 * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1699 * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1700 * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1701 * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1702 * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1703 * [no ci] cover my tracks * [no ci] link to https://github.com/EspressoSystems/HotShot/issues/1704 * allow test more time to pass ci :fingers-crossed: * more time for web_server, basic tests to complete * address https://github.com/EspressoSystems/HotShot/pull/1684#discussion_r1320390489 * allow more time for tests to pass * address https://github.com/EspressoSystems/HotShot/pull/1684#discussion_r1320403528 * experiment: restore use of release build for tests * add commented code that hangs test * remove release build again * WIP toward a fix for #1695 * add commitment member to VidDisperse, ensure Proposal.signature matches the commitment * use the correct commitment in VidDisperseRecv, update comments * restore commented code to check whether I'm the VID leader * add sanity log msg and TODO * tidy logs and comments * add VidVote message purpose * message type Vid (unimplemented) -> VidDisperse (now implemented) * VidCert message type * restore original test timeout durations, #1713 now fixed * type: VidCert use cert route * more time for tests to complete * [no ci] more comment links to github issues * [no ci] comment link to #1717 * add commented VID test code * close #1697, fix test with VID * make_vote_token failure should cause test to fail * fix #1717 * add ASYNC_STD_THREAD_COUNT=1 to test_success, test_web_server, test_consensus_task --- .../src/traits/networking/libp2p_network.rs | 2 +- .../traits/networking/web_server_network.rs | 49 +++ .../src/network/behaviours/gossip.rs | 12 +- libp2p-networking/tests/counter.rs | 4 +- orchestrator/Cargo.toml | 3 +- task-impls/src/consensus.rs | 11 + task-impls/src/da.rs | 294 ++++++++++++++++-- task-impls/src/events.rs | 26 +- task-impls/src/network.rs | 38 ++- testing/Cargo.toml | 1 + testing/src/task_helpers.rs | 9 +- testing/src/test_runner.rs | 4 +- testing/tests/basic.rs | 12 +- testing/tests/da_task.rs | 61 +++- testing/tests/network_task.rs | 33 +- testing/tests/web_server.rs | 9 + types/Cargo.toml | 7 +- types/src/data.rs | 47 +++ types/src/message.rs | 61 +++- types/src/traits/election.rs | 55 +++- web_server/src/config.rs | 24 ++ 21 files changed, 700 insertions(+), 62 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index ddc13b9571..1d94be7ee9 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -608,7 +608,7 @@ impl ConnectedNetwork for Libp2p source: NetworkNodeHandleError::NoSuchTopic, })? .clone(); - error!("Broadcasting to topic {}", topic); + info!("broadcasting to topic: {}", topic); // gossip doesn't broadcast from itself, so special case if recipients.contains(&self.inner.pk) { diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 374c8a40e7..40d250f501 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -177,6 +177,9 @@ impl Inner { config::get_view_sync_vote_route(view_number, vote_index) } MessagePurpose::DAC => config::get_da_certificate_route(view_number), + MessagePurpose::VidDisperse => config::get_vid_disperse_route(view_number), // like `Proposal` + MessagePurpose::VidVote => config::get_vid_vote_route(view_number, vote_index), // like `Vote` + MessagePurpose::VidCert => config::get_vid_cert_route(view_number), // like `DAC` }; if message_purpose == MessagePurpose::Data { @@ -240,6 +243,14 @@ impl Inner { direct_poll_queue.push(vote.clone()); } } + MessagePurpose::VidVote => { + // TODO copy-pasted from `MessagePurpose::Vote` https://github.com/EspressoSystems/HotShot/issues/1690 + let mut direct_poll_queue = self.direct_poll_queue.write().await; + for vote in &deserialized_messages { + vote_index += 1; + direct_poll_queue.push(vote.clone()); + } + } MessagePurpose::DAC => { debug!( "Received DAC from web server for view {} {}", @@ -255,6 +266,41 @@ impl Inner { // In future we should check to make sure DAC is valid return Ok(()); } + MessagePurpose::VidCert => { + // TODO copy-pasted from `MessagePurpose::DAC` https://github.com/EspressoSystems/HotShot/issues/1690 + debug!( + "Received VID cert from web server for view {} {}", + view_number, self.is_da + ); + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + self.broadcast_poll_queue + .write() + .await + .push(deserialized_messages[0].clone()); + + // return if we found a VID cert, since there will only be 1 per view + // In future we should check to make sure VID cert is valid + return Ok(()); + } + MessagePurpose::VidDisperse => { + // TODO copy-pasted from `MessagePurpose::Proposal` https://github.com/EspressoSystems/HotShot/issues/1690 + + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + self.broadcast_poll_queue + .write() + .await + .push(deserialized_messages[0].clone()); + + return Ok(()); + // Wait for the view to change before polling for proposals again + // let event = receiver.recv().await; + // match event { + // Ok(event) => view_number = event.view_number(), + // Err(_r) => { + // error!("Proposal receiver error! It was likely shutdown") + // } + // } + } MessagePurpose::ViewSyncVote => { // error!( // "Received {} view sync votes from web server for view {} is da {}", @@ -513,6 +559,9 @@ impl< } MessagePurpose::ViewSyncVote => config::post_view_sync_vote_route(*view_number), MessagePurpose::DAC => config::post_da_certificate_route(*view_number), + MessagePurpose::VidVote => config::post_vid_vote_route(*view_number), + MessagePurpose::VidDisperse => config::post_vid_disperse_route(*view_number), + MessagePurpose::VidCert => config::post_vid_cert_route(*view_number), }; let network_msg: SendMsg = SendMsg { diff --git a/libp2p-networking/src/network/behaviours/gossip.rs b/libp2p-networking/src/network/behaviours/gossip.rs index 1119ad27f3..bfc946c9b5 100644 --- a/libp2p-networking/src/network/behaviours/gossip.rs +++ b/libp2p-networking/src/network/behaviours/gossip.rs @@ -4,13 +4,13 @@ use std::{ }; use libp2p::{ - gossipsub::{Behaviour, Event, IdentTopic, TopicHash}, + gossipsub::{Behaviour, Event, IdentTopic, PublishError::Duplicate, TopicHash}, swarm::{NetworkBehaviour, PollParameters, THandlerInEvent, THandlerOutEvent, ToSwarm}, Multiaddr, }; use libp2p_identity::PeerId; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use super::exponential_backoff::ExponentialBackoff; @@ -214,8 +214,12 @@ impl GossipBehaviour { /// Publish a given gossip pub fn publish_gossip(&mut self, topic: IdentTopic, contents: Vec) { let res = self.gossipsub.publish(topic.clone(), contents.clone()); - if res.is_err() { - error!("error publishing gossip message {:?}", res); + if let Err(e) = res { + if matches!(e, Duplicate) { + debug!("duplicate gossip message"); + } else { + error!("error publishing gossip message {:?}", e); + } self.in_progress_gossip.push_back((topic, contents)); } } diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index e7dad7c8c6..85b7cb2a7f 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -12,7 +12,7 @@ use libp2p_networking::network::{ use serde::{Deserialize, Serialize}; use snafu::ResultExt; use std::{fmt::Debug, sync::Arc, time::Duration}; -use tracing::{error, info, instrument, warn}; +use tracing::{debug, error, info, instrument, warn}; #[cfg(async_executor_impl = "async-std")] use async_std::prelude::StreamExt; @@ -307,7 +307,7 @@ async fn run_dht_rounds( ) { let mut rng = rand::thread_rng(); for i in 0..num_rounds { - error!("round: {:?}", i); + debug!("begin round {}", i); let msg_handle = get_random_handle(handles, &mut rng); let mut key = vec![0; DHT_KV_PADDING]; key.push((starting_val + i) as u8); diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 3febbbce65..f3f57ada3a 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -23,7 +23,8 @@ tracing = { workspace = true } serde = { workspace = true } serde_json = "1.0.96" snafu = { workspace = true } -toml = "0.5.9" # TODO GG upgrade to toml = { workspace = true } +# TODO upgrade to toml = { workspace = true } https://github.com/EspressoSystems/HotShot/issues/1698 +toml = "0.5.9" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 8767e96af2..82e22eedf7 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1047,7 +1047,17 @@ where self.update_view(view + 1).await; } } + SequencingHotShotEvent::VidCertRecv(cert) => { + debug!("VID cert received for view ! {}", *cert.view_number); + let view = cert.view_number; + self.certs.insert(view, cert); // TODO new cert type for VID https://github.com/EspressoSystems/HotShot/issues/1701 + + // TODO Make sure we aren't voting for an arbitrarily old round for no reason + if self.vote_if_able().await { + self.update_view(view + 1).await; + } + } SequencingHotShotEvent::ViewChange(new_view) => { debug!("View Change event for view {}", *new_view); @@ -1311,6 +1321,7 @@ pub fn consensus_event_filter>( | SequencingHotShotEvent::QuorumVoteRecv(_) | SequencingHotShotEvent::QCFormed(_) | SequencingHotShotEvent::DACRecv(_) + | SequencingHotShotEvent::VidCertRecv(_) | SequencingHotShotEvent::ViewChange(_) | SequencingHotShotEvent::SendDABlockData(_) | SequencingHotShotEvent::Timeout(_) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index c7d20786ab..d05588f1c8 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -18,8 +18,8 @@ use hotshot_task::{ use hotshot_types::{ certificate::DACertificate, consensus::{Consensus, View}, - data::{DAProposal, ProposalType, SequencingLeaf}, - message::{CommitteeConsensusMessage, Message, Proposal, SequencingMessage}, + data::{DAProposal, ProposalType, SequencingLeaf, VidDisperse, VidScheme, VidSchemeTrait}, + message::{Message, Proposal, SequencingMessage}, traits::{ consensus_api::SequencingConsensusApi, election::{CommitteeExchangeType, ConsensusExchange, Membership}, @@ -146,13 +146,14 @@ where debug!("DA vote recv, collection task {:?}", vote.current_view); // panic!("Vote handle received DA vote for view {}", *vote.current_view); - // For the case where we receive votes after we've made a certificate - if state.accumulator.is_right() { - debug!("DA accumulator finished view: {:?}", state.cur_view); - return (None, state); - } - - let accumulator = state.accumulator.left().unwrap(); + let accumulator = match state.accumulator { + Right(_) => { + // For the case where we receive votes after we've made a certificate + debug!("DA accumulator finished view: {:?}", state.cur_view); + return (None, state); + } + Left(a) => a, + }; match state.committee_exchange.accumulate_vote( &vote.signature.0, &vote.signature.1, @@ -192,8 +193,63 @@ where } } } + SequencingHotShotEvent::VidVoteRecv(vote) => { + // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 + + debug!("VID vote recv, collection task {:?}", vote.current_view); + // panic!("Vote handle received DA vote for view {}", *vote.current_view); + + let accumulator = match state.accumulator { + Right(_) => { + // For the case where we receive votes after we've made a certificate + debug!("VID accumulator finished view: {:?}", state.cur_view); + return (None, state); + } + Left(a) => a, + }; + match state.committee_exchange.accumulate_vote( + &vote.signature.0, + &vote.signature.1, + vote.block_commitment, + vote.vote_data, + vote.vote_token.clone(), + state.cur_view, + accumulator, + None, + ) { + Left(acc) => { + state.accumulator = Either::Left(acc); + // debug!("Not enough VID votes! "); + return (None, state); + } + Right(vid_cert) => { + debug!("Sending VID cert! {:?}", vid_cert.view_number); + state + .event_stream + .publish(SequencingHotShotEvent::VidCertSend( + vid_cert.clone(), + state.committee_exchange.public_key().clone(), + )) + .await; + + state.accumulator = Right(vid_cert.clone()); + state + .committee_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *vid_cert.view_number, + )) + .await; + + // Return completed at this point + return (Some(HotShotTaskCompleted::ShutDown), state); + } + } + } SequencingHotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), - _ => {} + _ => { + error!("unexpected event {:?}", event); + } } (None, state) } @@ -217,7 +273,6 @@ where { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] - pub async fn handle_event( &mut self, event: SequencingHotShotEvent, @@ -298,7 +353,7 @@ where } Ok(Some(vote_token)) => { // Generate and send vote - let message = self.committee_exchange.create_da_message( + let vote = self.committee_exchange.create_da_message( block_commitment, view, vote_token, @@ -307,12 +362,10 @@ where // ED Don't think this is necessary? // self.cur_view = view; - if let CommitteeConsensusMessage::DAVote(vote) = message { - debug!("Sending vote to the DA leader {:?}", vote.current_view); - self.event_stream - .publish(SequencingHotShotEvent::DAVoteSend(vote)) - .await; - } + debug!("Sending vote to the DA leader {:?}", vote.current_view); + self.event_stream + .publish(SequencingHotShotEvent::DAVoteSend(vote)) + .await; let mut consensus = self.consensus.write().await; // Ensure this view is in the view map for garbage collection, but do not overwrite if @@ -412,6 +465,170 @@ where .await; }; } + SequencingHotShotEvent::VidVoteRecv(vote) => { + // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 + + // warn!( + // "VID vote recv, Main Task {:?}, key: {:?}", + // vote.current_view, + // self.committee_exchange.public_key() + // ); + // Check if we are the leader and the vote is from the sender. + let view = vote.current_view; + if !self.committee_exchange.is_leader(view) { + error!( + "We are not the VID leader for view {} are we leader for next view? {}", + *view, + self.committee_exchange.is_leader(view + 1) + ); + return None; + } + + let handle_event = HandleEvent(Arc::new(move |event, state| { + async move { vote_handle(state, event).await }.boxed() + })); + let collection_view = + if let Some((collection_view, collection_id, _)) = &self.vote_collector { + // TODO: Is this correct for consecutive leaders? + if view > *collection_view { + // warn!("shutting down for view {:?}", collection_view); + self.registry.shutdown_task(*collection_id).await; + } + *collection_view + } else { + TYPES::Time::new(0) + }; + let acc = VoteAccumulator { + total_vote_outcomes: HashMap::new(), + da_vote_outcomes: HashMap::new(), + yes_vote_outcomes: HashMap::new(), + no_vote_outcomes: HashMap::new(), + viewsync_precommit_vote_outcomes: HashMap::new(), + viewsync_commit_vote_outcomes: HashMap::new(), + viewsync_finalize_vote_outcomes: HashMap::new(), + success_threshold: self.committee_exchange.success_threshold(), + failure_threshold: self.committee_exchange.failure_threshold(), + sig_lists: Vec::new(), + signers: bitvec![0; self.committee_exchange.total_nodes()], + }; + let accumulator = self.committee_exchange.accumulate_vote( + &vote.clone().signature.0, + &vote.clone().signature.1, + vote.clone().block_commitment, + vote.clone().vote_data.clone(), + vote.clone().vote_token.clone(), + vote.clone().current_view, + acc, + None, + ); + if view > collection_view { + let state = DAVoteCollectionTaskState { + committee_exchange: self.committee_exchange.clone(), + accumulator, + cur_view: view, + event_stream: self.event_stream.clone(), + id: self.id, + }; + let name = "VID Vote Collection"; + let filter = FilterEvent(Arc::new(|event| { + matches!(event, SequencingHotShotEvent::VidVoteRecv(_)) + })); + let builder = + TaskBuilder::>::new(name.to_string()) + .register_event_stream(self.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(state) + .register_event_handler(handle_event); + let id = builder.get_task_id().unwrap(); + let stream_id = builder.get_stream_id().unwrap(); + let _task = + async_spawn( + async move { DAVoteCollectionTypes::build(builder).launch().await }, + ); + self.vote_collector = Some((view, id, stream_id)); + } else if let Some((_, _, stream_id)) = self.vote_collector { + self.event_stream + .direct_message(stream_id, SequencingHotShotEvent::VidVoteRecv(vote)) + .await; + }; + } + SequencingHotShotEvent::VidDisperseRecv(disperse, sender) => { + // TODO copy-pasted from DAProposalRecv https://github.com/EspressoSystems/HotShot/issues/1690 + debug!( + "VID disperse received for view: {:?}", + disperse.data.get_view_number() + ); + + // ED NOTE: Assuming that the next view leader is the one who sends DA proposal for this view + let view = disperse.data.get_view_number(); + + // Allow a DA proposal that is one view older, in case we have voted on a quorum + // proposal and updated the view. + // `self.cur_view` should be at least 1 since there is a view change before getting + // the `DAProposalRecv` event. Otherewise, the view number subtraction below will + // cause an overflow error. + if view < self.cur_view - 1 { + warn!("Throwing away VID disperse data that is more than one view older"); + return None; + } + + debug!("VID disperse data is fresh."); + let block_commitment = disperse.data.commitment; + + // ED Is this the right leader? + let view_leader_key = self.committee_exchange.get_leader(view); + if view_leader_key != sender { + error!("VID proposal doesn't have expected leader key for view {} \n DA proposal is: [N/A for VID]", *view); + return None; + } + + if !view_leader_key.validate(&disperse.signature, block_commitment.as_ref()) { + error!("Could not verify VID proposal sig."); + return None; + } + + let vote_token = self.committee_exchange.make_vote_token(view); + match vote_token { + Err(e) => { + error!("Failed to generate vote token for {:?} {:?}", view, e); + } + Ok(None) => { + debug!("We were not chosen for VID quorum on {:?}", view); + } + Ok(Some(vote_token)) => { + // Generate and send vote + let vote = self.committee_exchange.create_vid_message( + block_commitment, + view, + vote_token, + ); + + // ED Don't think this is necessary? + // self.cur_view = view; + + debug!("Sending vote to the VID leader {:?}", vote.current_view); + self.event_stream + .publish(SequencingHotShotEvent::VidVoteSend(vote)) + .await; + let mut consensus = self.consensus.write().await; + + // Ensure this view is in the view map for garbage collection, but do not overwrite if + // there is already a view there: the replica task may have inserted a `Leaf` view which + // contains strictly more information. + consensus.state_map.entry(view).or_insert(View { + view_inner: ViewInner::DA { + block: block_commitment, + }, + }); + + // Record the block we have promised to make available. + // TODO https://github.com/EspressoSystems/HotShot/issues/1692 + // consensus.saved_blocks.insert(proposal.data.deltas); + } + } + } // TODO ED Update high QC through QCFormed event SequencingHotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { @@ -543,11 +760,44 @@ where // } self.event_stream .publish(SequencingHotShotEvent::DAProposalSend( - message, + message.clone(), self.committee_exchange.public_key().clone(), )) .await; + debug!("Prepare VID shares"); + { + /// TODO https://github.com/EspressoSystems/HotShot/issues/1693 + const NUM_STORAGE_NODES: usize = 10; + /// TODO https://github.com/EspressoSystems/HotShot/issues/1693 + const NUM_CHUNKS: usize = 5; + + // TODO https://github.com/EspressoSystems/HotShot/issues/1686 + let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); + + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + let message_bytes = bincode::serialize(&message).unwrap(); + let (shares, common) = vid.dispersal_data(&message_bytes).unwrap(); + // TODO for now reuse the same block commitment and signature as DA committee + // https://github.com/EspressoSystems/jellyfish/issues/369 + + self.event_stream + .publish(SequencingHotShotEvent::VidDisperseSend( + Proposal { + data: VidDisperse { + view_number: self.cur_view + 1, // copied from `data` above + commitment: block.commit(), + shares, + common, + }, + signature: message.signature, + }, + // TODO don't send to committee, send to quorum (consensus.rs) https://github.com/EspressoSystems/HotShot/issues/1696 + self.committee_exchange.public_key().clone(), + )) + .await; + } + return None; } @@ -561,7 +811,9 @@ where SequencingHotShotEvent::Shutdown => { return Some(HotShotTaskCompleted::ShutDown); } - _ => {} + _ => { + error!("unexpected event {:?}", event); + } } None } @@ -639,6 +891,8 @@ where | SequencingHotShotEvent::Shutdown | SequencingHotShotEvent::TransactionsRecv(_) | SequencingHotShotEvent::Timeout(_) + | SequencingHotShotEvent::VidDisperseRecv(_, _) + | SequencingHotShotEvent::VidVoteRecv(_) | SequencingHotShotEvent::ViewChange(_) ) } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 98819108a4..3d6091a285 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,6 +1,6 @@ use hotshot_types::{ certificate::{DACertificate, QuorumCertificate}, - data::DAProposal, + data::{DAProposal, VidDisperse}, message::Proposal, traits::node_implementation::{ NodeImplementation, NodeType, QuorumProposalType, ViewSyncProposalType, @@ -62,4 +62,28 @@ pub enum SequencingHotShotEvent> { TransactionSend(TYPES::Transaction, TYPES::SignatureKey), /// Event to send DA block data from DA leader to next quorum leader (which should always be the same node); internal event only SendDABlockData(TYPES::BlockType), + /// Send VID shares to VID storage nodes; emitted by the DA leader + /// + /// Like [`DAProposalSend`]. + VidDisperseSend(Proposal>, TYPES::SignatureKey), + /// Vid disperse data has been received from the network; handled by the DA task + /// + /// Like [`DAProposalRecv`]. + VidDisperseRecv(Proposal>, TYPES::SignatureKey), + /// Send a VID vote to the VID leader; emitted by VID storage nodes in the DA task after seeing a valid VID dispersal + /// + /// Like [`DAVoteSend`] + VidVoteSend(DAVote), + /// A VID vote has been received by the network; handled by the DA task + /// + /// Like [`DAVoteRecv`] + VidVoteRecv(DAVote), + /// The VID leader has collected enough votes to form a VID cert; emitted by the VID leader in the DA task; sent to the entire network via the networking task + /// + /// Like [`DACSend`] + VidCertSend(DACertificate, TYPES::SignatureKey), + /// A VID cert has been recieved by the network; handled by the consensus task + /// + /// Like [`DACRecv`] + VidCertRecv(DACertificate), } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a4b083ba37..9b0410e46f 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -106,6 +106,15 @@ impl< // panic!("Recevid DA C! "); SequencingHotShotEvent::DACRecv(cert) } + CommitteeConsensusMessage::VidDisperseMsg(proposal) => { + SequencingHotShotEvent::VidDisperseRecv(proposal, sender) + } + CommitteeConsensusMessage::VidVote(vote) => { + SequencingHotShotEvent::VidVoteRecv(vote.clone()) + } + CommitteeConsensusMessage::VidCertificate(cert) => { + SequencingHotShotEvent::VidCertRecv(cert) + } }, }; // TODO (Keyao benchmarking) Update these event variants (similar to the @@ -186,6 +195,7 @@ impl< /// Returns the completion status. /// # Panics /// Panic sif a direct message event is received with no recipient + #[allow(clippy::too_many_lines)] // TODO https://github.com/EspressoSystems/HotShot/issues/1704 pub async fn handle_event( &mut self, event: SequencingHotShotEvent, @@ -210,7 +220,14 @@ impl< TransmitType::Direct, Some(membership.get_leader(vote.current_view() + 1)), ), - + SequencingHotShotEvent::VidDisperseSend(proposal, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::VidDisperseMsg(proposal), + ))), // TODO not a CommitteeConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 + TransmitType::Broadcast, // TODO not a broadcast https://github.com/EspressoSystems/HotShot/issues/1696 + None, + ), SequencingHotShotEvent::DAProposalSend(proposal, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Right( @@ -219,6 +236,14 @@ impl< TransmitType::Broadcast, None, ), + SequencingHotShotEvent::VidVoteSend(vote) => ( + vote.signature_key(), + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::VidVote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.current_view)), // TODO who is VID leader? https://github.com/EspressoSystems/HotShot/issues/1699 + ), SequencingHotShotEvent::DAVoteSend(vote) => ( vote.signature_key(), MessageKind::::from_consensus_message(SequencingMessage(Right( @@ -227,6 +252,14 @@ impl< TransmitType::Direct, Some(membership.get_leader(vote.current_view)), ), + SequencingHotShotEvent::VidCertSend(certificate, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::VidCertificate(certificate), + ))), + TransmitType::Broadcast, + None, + ), // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee SequencingHotShotEvent::DACSend(certificate, sender) => ( sender, @@ -307,6 +340,7 @@ impl< | SequencingHotShotEvent::QuorumVoteSend(_) | SequencingHotShotEvent::Shutdown | SequencingHotShotEvent::DACSend(_, _) + | SequencingHotShotEvent::VidCertSend(_, _) | SequencingHotShotEvent::ViewChange(_) ) } @@ -318,6 +352,8 @@ impl< SequencingHotShotEvent::DAProposalSend(_, _) | SequencingHotShotEvent::DAVoteSend(_) | SequencingHotShotEvent::Shutdown + | SequencingHotShotEvent::VidDisperseSend(_, _) + | SequencingHotShotEvent::VidVoteSend(_) | SequencingHotShotEvent::ViewChange(_) ) } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index e75c328a5d..b361b241d7 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -41,6 +41,7 @@ bitvec = { workspace = true } [dev-dependencies] async-lock = { workspace = true } +bincode = { workspace = true } # GG any better options for serialization? [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 13868721d4..4684fb5284 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -13,7 +13,7 @@ use hotshot::{ use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::SequencingHotShotEvent; use hotshot_types::{ - data::{QuorumProposal, SequencingLeaf, ViewNumber}, + data::{QuorumProposal, SequencingLeaf, VidScheme, ViewNumber}, message::{Message, Proposal}, traits::{ consensus_api::ConsensusSharedApi, @@ -163,3 +163,10 @@ pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey, let public_key = ::SignatureKey::from_private(&private_key); (private_key, public_key) } + +pub fn vid_init() -> VidScheme { + const NUM_STORAGE_NODES: usize = 10; + const NUM_CHUNKS: usize = 5; + let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); + VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap() +} diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 57c82d24e9..e03292e639 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -202,8 +202,8 @@ where >, { let mut results = vec![]; - for _i in 0..total { - tracing::error!("running node{}", _i); + for i in 0..total { + tracing::debug!("launch node {}", i); let node_id = self.next_node_id; let storage = (self.launcher.resource_generator.storage)(node_id); let config = self.launcher.resource_generator.config.clone(); diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index fc4b6017f7..35e75fffa7 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -6,13 +6,23 @@ #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_success() { use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, node_types::{SequencingMemoryImpl, SequencingTestTypes}, test_builder::TestMetadata, }; + use std::time::Duration; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata::default(); + let metadata = TestMetadata { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(1_200_000), + }, + ), + ..TestMetadata::default() + }; metadata .gen_launcher::() .launch() diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 0322909203..64b70ef40c 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,9 +1,12 @@ use commit::Committable; use hotshot::HotShotSequencingConsensusApi; use hotshot_task_impls::events::SequencingHotShotEvent; -use hotshot_testing::node_types::{SequencingMemoryImpl, SequencingTestTypes}; +use hotshot_testing::{ + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + task_helpers::vid_init, +}; use hotshot_types::{ - data::{DAProposal, ViewNumber}, + data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, traits::{ consensus_api::ConsensusSharedApi, election::ConsensusExchange, node_implementation::ExchangesType, state::ConsensusTime, @@ -23,10 +26,7 @@ async fn test_da_task() { }; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{ - message::{CommitteeConsensusMessage, Proposal}, - traits::election::CommitteeExchangeType, - }; + use hotshot_types::{message::Proposal, traits::election::CommitteeExchangeType}; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -53,6 +53,20 @@ async fn test_da_task() { data: proposal, signature, }; + let vid = vid_init(); + let message_bytes = bincode::serialize(&message).unwrap(); + let (shares, common) = vid.dispersal_data(&message_bytes).unwrap(); + let vid_proposal = Proposal { + data: VidDisperse { + view_number: message.data.view_number, + commitment: block_commitment, + shares, + common, + }, + signature: message.signature.clone(), + }; + // TODO for now reuse the same block commitment and signature as DA committee + // https://github.com/EspressoSystems/jellyfish/issues/369 // Every event input is seen on the event stream in the output. let mut input = Vec::new(); @@ -65,6 +79,10 @@ async fn test_da_task() { message.clone(), pub_key, )); + input.push(SequencingHotShotEvent::VidDisperseRecv( + vid_proposal.clone(), + pub_key, + )); input.push(SequencingHotShotEvent::Shutdown); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); @@ -73,14 +91,31 @@ async fn test_da_task() { SequencingHotShotEvent::DAProposalSend(message.clone(), pub_key), 1, ); - if let Ok(Some(vote_token)) = committee_exchange.make_vote_token(ViewNumber::new(2)) { - let da_message = - committee_exchange.create_da_message(block_commitment, ViewNumber::new(2), vote_token); - if let CommitteeConsensusMessage::DAVote(vote) = da_message { - output.insert(SequencingHotShotEvent::DAVoteSend(vote), 1); - } - } + let vote_token = committee_exchange + .make_vote_token(ViewNumber::new(2)) + .unwrap() + .unwrap(); + let da_vote = + committee_exchange.create_da_message(block_commitment, ViewNumber::new(2), vote_token); + output.insert(SequencingHotShotEvent::DAVoteSend(da_vote), 1); + output.insert( + SequencingHotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), + 1, + ); + + let vote_token = committee_exchange + .make_vote_token(ViewNumber::new(2)) + .unwrap() + .unwrap(); + let vid_vote = + committee_exchange.create_vid_message(block_commitment, ViewNumber::new(2), vote_token); + output.insert(SequencingHotShotEvent::VidVoteSend(vid_vote), 1); + output.insert(SequencingHotShotEvent::DAProposalRecv(message, pub_key), 1); + output.insert( + SequencingHotShotEvent::VidDisperseRecv(vid_proposal, pub_key), + 1, + ); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(SequencingHotShotEvent::Shutdown, 1); diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index f3d8d9f8bb..c14052bae3 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -3,10 +3,10 @@ use hotshot::HotShotSequencingConsensusApi; use hotshot_task_impls::events::SequencingHotShotEvent; use hotshot_testing::{ node_types::{SequencingMemoryImpl, SequencingTestTypes}, - task_helpers::build_quorum_proposal, + task_helpers::{build_quorum_proposal, vid_init}, }; use hotshot_types::{ - data::{DAProposal, ViewNumber}, + data::{DAProposal, VidSchemeTrait, ViewNumber}, traits::{ consensus_api::ConsensusSharedApi, node_implementation::ExchangesType, state::ConsensusTime, }, @@ -23,7 +23,9 @@ async fn test_network_task() { use hotshot::demos::sdemo::{SDemoBlock, SDemoNormalBlock}; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{message::Proposal, traits::election::CommitteeExchangeType}; + use hotshot_types::{ + data::VidDisperse, message::Proposal, traits::election::CommitteeExchangeType, + }; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -51,6 +53,20 @@ async fn test_network_task() { signature, }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; + let vid = vid_init(); + let da_proposal_bytes = bincode::serialize(&da_proposal).unwrap(); + let (shares, common) = vid.dispersal_data(&da_proposal_bytes).unwrap(); + // TODO for now reuse the same block commitment and signature as DA committee + // https://github.com/EspressoSystems/jellyfish/issues/369 + let da_vid_disperse = Proposal { + data: VidDisperse { + view_number: da_proposal.data.view_number, + commitment: block_commitment, + shares, + common, + }, + signature: da_proposal.signature.clone(), + }; // Every event input is seen on the event stream in the output. let mut input = Vec::new(); @@ -61,6 +77,10 @@ async fn test_network_task() { da_proposal.clone(), pub_key, )); + input.push(SequencingHotShotEvent::VidDisperseSend( + da_vid_disperse.clone(), + pub_key, + )); input.push(SequencingHotShotEvent::QuorumProposalSend( quorum_proposal.clone(), pub_key, @@ -69,10 +89,13 @@ async fn test_network_task() { input.push(SequencingHotShotEvent::Shutdown); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 2); - // One output from the input, the other from the DA task. output.insert( SequencingHotShotEvent::DAProposalSend(da_proposal.clone(), pub_key), - 2, + 2, // 2 occurrences: 1 from `input`, 1 from the DA task + ); + output.insert( + SequencingHotShotEvent::VidDisperseSend(da_vid_disperse, pub_key), + 2, // 2 occurrences: 1 from `input`, 1 from the DA task ); // Only one output from the input. // The consensus task will fail to send a second proposal, like the DA task does, due to the diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index 75d0e4f89c..6c95318c7b 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -1,5 +1,8 @@ +use std::time::Duration; + use async_compatibility_layer::logging::shutdown_logging; use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, node_types::{SequencingTestTypes, SequencingWebImpl}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::{TestMetadata, TimingData}, @@ -28,6 +31,12 @@ async fn web_server_network() { num_successful_views: 35, ..Default::default() }, + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(1_200_000), + }, + ), ..TestMetadata::default() }; metadata diff --git a/types/Cargo.toml b/types/Cargo.toml index 5ea10f246c..58ac6c48a9 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -12,12 +12,13 @@ demo = ["ed25519-compact"] [dependencies] arbitrary = { version = "1.3", features = ["derive"] } +ark-bls12-381 = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } ark-serialize = { version = "0.3", features = [ "derive", -] } # TODO GG upgrade to 0.4 and inherit this dep from workspace +] } # TODO upgrade to 0.4 and inherit this dep from workspace https://github.com/EspressoSystems/HotShot/issues/1700 ark-std = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } @@ -36,12 +37,14 @@ hex_fmt = "0.3.0" hotshot-constants = { path = "../constants" } hotshot-utils = { path = "../utils" } hotshot-task = { path = "../task", default-features = false } -jf-primitives = { workspace = true } +jf-primitives = { workspace = true, features = ["test-srs"] } +jf-utils = { workspace = true } nll = { workspace = true } libp2p-networking = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } +sha2 = { workspace = true } snafu = { workspace = true } tagged-base64 = { git = "https://github.com/EspressoSystems/tagged-base64", tag = "0.2.4" } time = { workspace = true } diff --git a/types/src/data.rs b/types/src/data.rs index 3f12fb7c82..2578ee7289 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -17,6 +17,7 @@ use crate::{ Block, State, }, }; +use ark_bls12_381::Bls12_381; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; use bincode::Options; use commit::{Commitment, Committable}; @@ -25,6 +26,7 @@ use either::Either; use espresso_systems_common::hotshot::tag; use hotshot_constants::GENESIS_PROPOSER_ID; use hotshot_utils::bincode::bincode_opts; +use jf_primitives::pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; use snafu::{ensure, Snafu}; @@ -159,6 +161,44 @@ pub struct DAProposal { pub view_number: TYPES::Time, } +/// The VID scheme type used in `HotShot`. +pub type VidScheme = jf_primitives::vid::advz::Advz; +pub use jf_primitives::vid::VidScheme as VidSchemeTrait; + +/// VID dispersal data +/// +/// Like [`DAProposal`]. +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +pub struct VidDisperse { + /// The view number for which this VID data is intended + pub view_number: TYPES::Time, + /// Block commitment + pub commitment: Commitment, + /// VID shares dispersed among storage nodes + pub shares: Vec<::StorageShare>, + /// VID common data sent to all storage nodes + pub common: ::StorageCommon, +} + +/// Trusted KZG setup for VID. +/// +/// TESTING ONLY: don't use this in production +/// TODO +/// +/// # Panics +/// ...because this is only for tests. This comment exists to pacify clippy. +#[must_use] +pub fn test_srs( + num_storage_nodes: usize, +) -> as PolynomialCommitmentScheme>::SRS { + let mut rng = jf_utils::test_rng(); + UnivariateKzgPCS::::gen_srs_for_testing( + &mut rng, + checked_fft_size(num_storage_nodes).unwrap(), + ) + .unwrap() +} + /// Proposal to append a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound(deserialize = ""))] @@ -202,6 +242,13 @@ impl ProposalType for DAProposal { } } +impl ProposalType for VidDisperse { + type NodeType = TYPES; + fn get_view_number(&self) -> ::Time { + self.view_number + } +} + impl> ProposalType for QuorumProposal { diff --git a/types/src/message.rs b/types/src/message.rs index 89d2864aa9..0a5563ad5f 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -5,7 +5,7 @@ use crate::{ certificate::DACertificate, - data::{DAProposal, ProposalType}, + data::{DAProposal, ProposalType, VidDisperse}, traits::{ network::{NetworkMsg, ViewMessage}, node_implementation::{ @@ -52,7 +52,7 @@ impl> ViewMessage for Messa #[derive(Clone, Debug)] pub struct Messages>(pub Vec>); -/// A message type agnostic description of a messages purpose +/// A message type agnostic description of a message's purpose #[derive(PartialEq, Copy, Clone)] pub enum MessagePurpose { /// Message with a quorum proposal. @@ -69,6 +69,12 @@ pub enum MessagePurpose { Internal, /// Data message Data, + /// VID disperse, like [`Proposal`]. + VidDisperse, + /// VID vote, like [`Vote`]. + VidVote, + /// VID certificate, like [`DAC`]. + VidCert, } // TODO (da) make it more customized to the consensus layer, maybe separating the specific message @@ -207,6 +213,12 @@ pub enum ProcessedCommitteeConsensusMessage { DAVote(DAVote, TYPES::SignatureKey), /// Certificate for the DA. DACertificate(DACertificate, TYPES::SignatureKey), + /// VID dispersal data. Like [`DAProposal`] + VidDisperseMsg(Proposal>, TYPES::SignatureKey), + /// Vote from VID storage node. Like [`DAVote`] + VidVote(DAVote, TYPES::SignatureKey), + /// Certificate for VID. Like [`DACertificate`] + VidCertificate(DACertificate, TYPES::SignatureKey), } impl From> @@ -223,6 +235,15 @@ impl From> ProcessedCommitteeConsensusMessage::DACertificate(cert, _) => { CommitteeConsensusMessage::DACertificate(cert) } + ProcessedCommitteeConsensusMessage::VidDisperseMsg(disperse, _) => { + CommitteeConsensusMessage::VidDisperseMsg(disperse) + } + ProcessedCommitteeConsensusMessage::VidVote(v, _) => { + CommitteeConsensusMessage::VidVote(v) + } + ProcessedCommitteeConsensusMessage::VidCertificate(cert, _) => { + CommitteeConsensusMessage::VidCertificate(cert) + } } } } @@ -240,6 +261,15 @@ impl ProcessedCommitteeConsensusMessage { CommitteeConsensusMessage::DACertificate(cert) => { ProcessedCommitteeConsensusMessage::DACertificate(cert, sender) } + CommitteeConsensusMessage::VidDisperseMsg(disperse) => { + ProcessedCommitteeConsensusMessage::VidDisperseMsg(disperse, sender) + } + CommitteeConsensusMessage::VidVote(v) => { + ProcessedCommitteeConsensusMessage::VidVote(v, sender) + } + CommitteeConsensusMessage::VidCertificate(cert) => { + ProcessedCommitteeConsensusMessage::VidCertificate(cert, sender) + } } } } @@ -307,6 +337,23 @@ pub enum CommitteeConsensusMessage { /// Certificate data is available DACertificate(DACertificate), + + /// Initiate VID dispersal. + /// + /// Like [`DAProposal`]. Use `Msg` suffix to distinguish from [`VidDisperse`]. + /// TODO this variant should not be a [`CommitteeConsensusMessage`] because + VidDisperseMsg(Proposal>), + + /// Vote for VID disperse data + /// + /// Like [`DAVote`]. + /// TODO currently re-using [`DAVote`]; do we need a separate VID vote? + VidVote(DAVote), + /// VID certificate data is available + /// + /// Like [`DACertificate`] + /// TODO currently re-using [`DACertificate`]; do we need a separate VID cert? + VidCertificate(DACertificate), } /// Messages related to the consensus protocol. @@ -377,7 +424,12 @@ impl< p.data.get_view_number() } CommitteeConsensusMessage::DAVote(vote_message) => vote_message.current_view(), - CommitteeConsensusMessage::DACertificate(cert) => cert.view_number, + CommitteeConsensusMessage::DACertificate(cert) + | CommitteeConsensusMessage::VidCertificate(cert) => cert.view_number, + CommitteeConsensusMessage::VidDisperseMsg(disperse) => { + disperse.data.get_view_number() + } + CommitteeConsensusMessage::VidVote(vote) => vote.current_view(), } } } @@ -397,7 +449,10 @@ impl< Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, CommitteeConsensusMessage::DAVote(_) => MessagePurpose::Vote, + CommitteeConsensusMessage::VidVote(_) => MessagePurpose::VidVote, CommitteeConsensusMessage::DACertificate(_) => MessagePurpose::DAC, + CommitteeConsensusMessage::VidDisperseMsg(_) => MessagePurpose::VidDisperse, + CommitteeConsensusMessage::VidCertificate(_) => todo!(), }, } } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 81a173e954..c24f90b99b 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -15,7 +15,7 @@ use crate::{ }; use crate::{ - message::{CommitteeConsensusMessage, GeneralConsensusMessage, Message}, + message::{GeneralConsensusMessage, Message}, vote::ViewSyncVoteInternal, }; @@ -517,7 +517,23 @@ pub trait CommitteeExchangeType: block_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, - ) -> CommitteeConsensusMessage; + ) -> DAVote; + + // TODO temporary vid methods, move to quorum https://github.com/EspressoSystems/HotShot/issues/1696 + + /// Create a message with a vote on VID disperse data. + fn create_vid_message( + &self, + block_commitment: Commitment, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> DAVote; + + /// Sign a vote on VID proposal. + fn sign_vid_vote( + &self, + block_commitment: Commitment, + ) -> (EncodedPublicKey, EncodedSignature); } /// Standard implementation of [`CommitteeExchangeType`] utilizing a DA committee. @@ -581,15 +597,44 @@ impl< block_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, - ) -> CommitteeConsensusMessage { + ) -> DAVote { let signature = self.sign_da_vote(block_commitment); - CommitteeConsensusMessage::::DAVote(DAVote { + DAVote { + signature, + block_commitment, + current_view, + vote_token, + vote_data: VoteData::DA(block_commitment), + } + } + + fn create_vid_message( + &self, + block_commitment: Commitment, + current_view: ::Time, + vote_token: ::VoteTokenType, + ) -> DAVote { + let signature = self.sign_vid_vote(block_commitment); + DAVote { signature, block_commitment, current_view, vote_token, vote_data: VoteData::DA(block_commitment), - }) + } + } + + fn sign_vid_vote( + &self, + block_commitment: Commitment<::BlockType>, + ) -> (EncodedPublicKey, EncodedSignature) { + let signature = TYPES::SignatureKey::sign( + &self.private_key, + VoteData::::DA(block_commitment) + .commit() + .as_ref(), + ); + (self.public_key.to_bytes(), signature) } } diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 1da2781c55..cf6873877a 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -33,6 +33,30 @@ pub fn post_vote_route(view_number: u64) -> String { format!("api/votes/{view_number}") } +pub fn get_vid_disperse_route(view_number: u64) -> String { + format!("api/vid/disperse/{view_number}") +} + +pub fn post_vid_disperse_route(view_number: u64) -> String { + format!("api/vid/disperse/{view_number}") +} + +pub fn get_vid_vote_route(view_number: u64, index: u64) -> String { + format!("api/vid/votes/{view_number}/{index}") +} + +pub fn post_vid_vote_route(view_number: u64) -> String { + format!("api/vid/votes/{view_number}") +} + +pub fn get_vid_cert_route(view_number: u64) -> String { + format!("api/vid/cert/{view_number}") +} + +pub fn post_vid_cert_route(view_number: u64) -> String { + format!("api/vid/cert/{view_number}") +} + pub fn get_transactions_route(index: u64) -> String { format!("api/transactions/{index}") } From 8fe468c0f1fa1c536c4decae56a2c5aced4eb38f Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 13 Sep 2023 13:47:39 -0400 Subject: [PATCH 0059/1393] DA accumulator works --- task-impls/src/da.rs | 65 ++++++------------------------------ types/src/traits/election.rs | 2 +- types/src/vote.rs | 2 +- 3 files changed, 12 insertions(+), 57 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index b4143b3844..8f6ec2469e 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -15,10 +15,8 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::vote::DAVoteAccumulator; use hotshot_types::traits::election::SignedCertificate; -use hotshot_types::vote::AccumulatorPlaceholder; -use hotshot_types::vote::VoteType; +use hotshot_types::vote::DAVoteAccumulator; use hotshot_types::{ certificate::DACertificate, consensus::{Consensus, View}, @@ -106,11 +104,7 @@ pub struct DAVoteCollectionTaskState< { /// the committee exchange pub committee_exchange: Arc>, - /// the vote accumulator - pub accumulator: - Either, DACertificate>, - /// The accumulator #[allow(clippy::type_complexity)] pub accumulator2: Either< as SignedCertificate< @@ -178,31 +172,9 @@ where &vote.block_commitment, ) { Left(new_accumulator) => { - error!("DA cert still accumulating"); state.accumulator2 = either::Left(new_accumulator); } - Right(dac) => { - panic!("DA cert made!"); - state.accumulator2 = either::Right(dac); - } - } - let accumulator = state.accumulator.left().unwrap(); - match state.committee_exchange.accumulate_vote( - &vote.signature.0, - &vote.signature.1, - vote.block_commitment, - vote.vote_data, - vote.vote_token.clone(), - state.cur_view, - accumulator, - None, - ) { - Left(acc) => { - state.accumulator = Either::Left(acc); - // debug!("Not enough DA votes! "); - return (None, state); - } Right(dac) => { debug!("Sending DAC! {:?}", dac.view_number); state @@ -213,7 +185,8 @@ where )) .await; - state.accumulator = Right(dac.clone()); + // TODO ED Rename this to just accumulator + state.accumulator2 = Right(dac.clone()); state .committee_exchange .network() @@ -391,29 +364,7 @@ where } else { TYPES::Time::new(0) }; - let acc = VoteAccumulator { - total_vote_outcomes: HashMap::new(), - da_vote_outcomes: HashMap::new(), - yes_vote_outcomes: HashMap::new(), - no_vote_outcomes: HashMap::new(), - viewsync_precommit_vote_outcomes: HashMap::new(), - viewsync_commit_vote_outcomes: HashMap::new(), - viewsync_finalize_vote_outcomes: HashMap::new(), - success_threshold: self.committee_exchange.success_threshold(), - failure_threshold: self.committee_exchange.failure_threshold(), - sig_lists: Vec::new(), - signers: bitvec![0; self.committee_exchange.total_nodes()], - }; - let accumulator = self.committee_exchange.accumulate_vote( - &vote.clone().signature.0, - &vote.clone().signature.1, - vote.clone().block_commitment, - vote.clone().vote_data.clone(), - vote.clone().vote_token.clone(), - vote.clone().current_view, - acc, - None, - ); + let new_accumulator = DAVoteAccumulator { da_vote_outcomes: HashMap::new(), success_threshold: self.committee_exchange.success_threshold(), @@ -423,12 +374,16 @@ where }; // TODO ED Get vote data here instead of cloning into block commitment field of vote - let accumulator2 = self.committee_exchange.accumulate_vote_2(new_accumulator, &vote, &vote.clone().block_commitment); + let accumulator2 = self.committee_exchange.accumulate_vote_2( + new_accumulator, + &vote, + &vote.clone().block_commitment, + ); if view > collection_view { let state = DAVoteCollectionTaskState { committee_exchange: self.committee_exchange.clone(), - accumulator, + accumulator2: accumulator2, cur_view: view, event_stream: self.event_stream.clone(), diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index ca4aa9c1d8..4a70d2f732 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -296,7 +296,7 @@ pub trait ConsensusExchange: Send + Sync { /// A proposal for participants to vote on. type Proposal: ProposalType; /// A vote on a [`Proposal`](Self::Proposal). - // TODO ED Use default associated type if it becomes stable + // TODO ED Make this equal Certificate vote (if possible?) type Vote: VoteType; /// A [`SignedCertificate`] attesting to a decision taken by the committee. type Certificate: SignedCertificate diff --git a/types/src/vote.rs b/types/src/vote.rs index 31e98a5c65..1048a65b05 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -359,6 +359,7 @@ impl< VOTE: VoteType, > Accumulator2 for DAVoteAccumulator { + // TODO ED We could make this the default impl, so it works for both TC and DAC fn append( mut self, vote: VOTE, @@ -427,7 +428,6 @@ impl< return Either::Right(AssembledSignature::DA(real_qc_sig)); } - error!("DA stake casted is {da_stake_casted}"); Either::Left(self) } } From f2945b86ca6e3319c65b232a61f4f1ce71abc4da Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 13 Sep 2023 14:05:11 -0400 Subject: [PATCH 0060/1393] Basic setup for consensus accumulator --- task-impls/src/consensus.rs | 78 +++++++++++++++++++------------------ task-impls/src/da.rs | 1 - 2 files changed, 40 insertions(+), 39 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index c52fe7e301..4ac149dd11 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -148,8 +148,15 @@ pub struct VoteCollectionTaskState< pub quorum_exchange: Arc>, #[allow(clippy::type_complexity)] /// Accumulator for votes - pub accumulator: - Either, QuorumCertificate>, + pub accumulator: Either< + > as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + SequencingLeaf, + >>::VoteAccumulator, + QuorumCertificate>, + >, /// View which this vote collection task is collecting votes in pub cur_view: TYPES::Time, /// The event stream shared by all tasks @@ -191,32 +198,29 @@ where { // TODO ED Emit a view change event upon new proposal? match event { - SequencingHotShotEvent::QuorumVoteRecv(vote) => match vote { - QuorumVote::Yes(vote) => { + SequencingHotShotEvent::QuorumVoteRecv(vote) => match vote.clone() { + QuorumVote::Yes(vote_internal) => { // For the case where we receive votes after we've made a certificate if state.accumulator.is_right() { return (None, state); } - if vote.current_view != state.cur_view { + if vote_internal.current_view != state.cur_view { error!( "Vote view does not match! vote view is {} current view is {}", - *vote.current_view, *state.cur_view + *vote_internal.current_view, *state.cur_view ); return (None, state); } let accumulator = state.accumulator.left().unwrap(); - match state.quorum_exchange.accumulate_vote( - &vote.signature.0, - &vote.signature.1, - vote.leaf_commitment, - vote.vote_data, - vote.vote_token.clone(), - state.cur_view, + // TODO ED Maybe we don't need this to take in commitment? Can just get it from the vote directly if it is always + // going to be passed in as the vote.commitment + match state.quorum_exchange.accumulate_vote_2( accumulator, - None, - ) { + &vote, + &vote_internal.leaf_commitment, + ) { Either::Left(acc) => { state.accumulator = Either::Left(acc); return (None, state); @@ -895,15 +899,17 @@ where return; } + // TODO ED Should remove this match because we'd always want to collect votes no matter the type on qc + // Though will need a sep accumulator for Timeout votes match vote { - QuorumVote::Yes(vote) => { + QuorumVote::Yes(vote_internal) => { let handle_event = HandleEvent(Arc::new(move |event, state| { async move { vote_handle(state, event).await }.boxed() })); let collection_view = if let Some((collection_view, collection_task, _)) = &self.vote_collector { - if vote.current_view > *collection_view { + if vote_internal.current_view > *collection_view { // ED I think we'd want to let that task timeout to avoid a griefing vector self.registry.shutdown_task(*collection_task).await; } @@ -912,37 +918,33 @@ where TYPES::Time::new(0) }; - let acc = VoteAccumulator { + + // Todo check if we are the leader + let new_accumulator = QuorumVoteAccumulator { total_vote_outcomes: HashMap::new(), - da_vote_outcomes: HashMap::new(), yes_vote_outcomes: HashMap::new(), no_vote_outcomes: HashMap::new(), - viewsync_precommit_vote_outcomes: HashMap::new(), - viewsync_commit_vote_outcomes: HashMap::new(), - viewsync_finalize_vote_outcomes: HashMap::new(), + success_threshold: self.quorum_exchange.success_threshold(), failure_threshold: self.quorum_exchange.failure_threshold(), + sig_lists: Vec::new(), signers: bitvec![0; self.quorum_exchange.total_nodes()], + phantom: PhantomData, }; - - // Todo check if we are the leader - let accumulator = self.quorum_exchange.accumulate_vote( - &vote.clone().signature.0, - &vote.clone().signature.1, - vote.clone().leaf_commitment, - vote.clone().vote_data.clone(), - vote.clone().vote_token.clone(), - vote.clone().current_view, - acc, - None, + + // TODO ED Get vote data here instead of cloning into block commitment field of vote + let accumulator = self.quorum_exchange.accumulate_vote_2( + new_accumulator, + &vote, + &vote_internal.clone().leaf_commitment, ); - if vote.current_view > collection_view { + if vote_internal.current_view > collection_view { let state = VoteCollectionTaskState { quorum_exchange: self.quorum_exchange.clone(), accumulator, - cur_view: vote.current_view, + cur_view: vote_internal.current_view, event_stream: self.event_stream.clone(), id: self.id, }; @@ -962,17 +964,17 @@ where let id = builder.get_task_id().unwrap(); let stream_id = builder.get_stream_id().unwrap(); - self.vote_collector = Some((vote.current_view, id, stream_id)); + self.vote_collector = Some((vote_internal.current_view, id, stream_id)); let _task = async_spawn(async move { VoteCollectionTypes::build(builder).launch().await; }); - debug!("Starting vote handle for view {:?}", vote.current_view); + debug!("Starting vote handle for view {:?}", vote_internal.current_view); } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream .direct_message( stream_id, - SequencingHotShotEvent::QuorumVoteRecv(QuorumVote::Yes(vote)), + SequencingHotShotEvent::QuorumVoteRecv(QuorumVote::Yes(vote_internal)), ) .await; } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 8f6ec2469e..510cd40f2a 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -32,7 +32,6 @@ use hotshot_types::{ Block, State, }, utils::ViewInner, - vote::VoteAccumulator, }; use hotshot_utils::bincode::bincode_opts; use snafu::Snafu; From 93fb339468b2ebba37bc1392d47f23d12439acd3 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 13 Sep 2023 14:18:20 -0400 Subject: [PATCH 0061/1393] Add QuorumVoteAccumulator and ViewSyncVoteAccumulator placeholders --- task-impls/src/consensus.rs | 17 ++++--- types/src/certificate.rs | 7 +-- types/src/vote.rs | 96 +++++++++++++++++++++++++++++++++++-- 3 files changed, 107 insertions(+), 13 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 4ac149dd11..0879ee9d1b 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -18,6 +18,7 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::vote::QuorumVoteAccumulator; use hotshot_types::{ certificate::{DACertificate, QuorumCertificate}, consensus::{Consensus, View}, @@ -220,7 +221,7 @@ where accumulator, &vote, &vote_internal.leaf_commitment, - ) { + ) { Either::Left(acc) => { state.accumulator = Either::Left(acc); return (None, state); @@ -901,7 +902,7 @@ where // TODO ED Should remove this match because we'd always want to collect votes no matter the type on qc // Though will need a sep accumulator for Timeout votes - match vote { + match vote.clone() { QuorumVote::Yes(vote_internal) => { let handle_event = HandleEvent(Arc::new(move |event, state| { async move { vote_handle(state, event).await }.boxed() @@ -918,7 +919,6 @@ where TYPES::Time::new(0) }; - // Todo check if we are the leader let new_accumulator = QuorumVoteAccumulator { total_vote_outcomes: HashMap::new(), @@ -932,7 +932,7 @@ where signers: bitvec![0; self.quorum_exchange.total_nodes()], phantom: PhantomData, }; - + // TODO ED Get vote data here instead of cloning into block commitment field of vote let accumulator = self.quorum_exchange.accumulate_vote_2( new_accumulator, @@ -969,12 +969,17 @@ where let _task = async_spawn(async move { VoteCollectionTypes::build(builder).launch().await; }); - debug!("Starting vote handle for view {:?}", vote_internal.current_view); + debug!( + "Starting vote handle for view {:?}", + vote_internal.current_view + ); } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream .direct_message( stream_id, - SequencingHotShotEvent::QuorumVoteRecv(QuorumVote::Yes(vote_internal)), + SequencingHotShotEvent::QuorumVoteRecv(QuorumVote::Yes( + vote_internal, + )), ) .await; } diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 124d762a8c..8a7c394167 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -1,8 +1,9 @@ //! Provides two types of cerrtificates and their accumulators. -use crate::vote::AccumulatorPlaceholder; use crate::vote::DAVoteAccumulator; use crate::vote::QuorumVote; +use crate::vote::QuorumVoteAccumulator; +use crate::vote::ViewSyncVoteAccumulator; use crate::{ data::{fake_commitment, serialize_signature, LeafType}, traits::{ @@ -160,7 +161,7 @@ impl> for QuorumCertificate { type Vote = QuorumVote; - type VoteAccumulator = AccumulatorPlaceholder; + type VoteAccumulator = QuorumVoteAccumulator; fn from_signatures_and_commitment( view_number: TYPES::Time, @@ -324,7 +325,7 @@ impl for ViewSyncCertificate { type Vote = ViewSyncVote; - type VoteAccumulator = AccumulatorPlaceholder, Self::Vote>; + type VoteAccumulator = ViewSyncVoteAccumulator, Self::Vote>; /// Build a QC from the threshold signature and commitment fn from_signatures_and_commitment( view_number: TYPES::Time, diff --git a/types/src/vote.rs b/types/src/vote.rs index 1048a65b05..f9c2a646ea 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -333,7 +333,12 @@ pub trait Accumulator2< /// Append 1 vote to the accumulator. If the threshold is not reached, return /// the accumulator, else return the `AssembledSignature` /// Only called from inside `accumulate_internal` - fn append(self, vote: VOTE, vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>,) -> Either>; + fn append( + self, + vote: VOTE, + vote_node_id: usize, + stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either>; } pub struct DAVoteAccumulator< @@ -412,7 +417,7 @@ impl< if *da_stake_casted >= u64::from(self.success_threshold) { // Assemble QC let real_qc_pp = ::get_public_parameter( - // TODO ED Something about stake table entries. Might be easier to just pass in membership? + // TODO ED Something about stake table entries. Might be easier to just pass in membership? stake_table_entries.clone(), U256::from(self.success_threshold.get()), ); @@ -423,7 +428,7 @@ impl< &self.sig_lists[..], ); - // TODO ED Why do we need this line if we have the above line? + // TODO ED Why do we need this line if we have the above line? self.da_vote_outcomes.remove(&vote_commitment); return Either::Right(AssembledSignature::DA(real_qc_sig)); @@ -432,6 +437,84 @@ impl< } } +// TODO ED Should make these fields a trait for Accumulator, like success threshold, etc. +pub struct QuorumVoteAccumulator< + TYPES: NodeType, + COMMITTABLE: Committable + Serialize + Clone, + VOTE: VoteType, +> { + /// Map of all da signatures accumlated so far + pub total_vote_outcomes: VoteMap, + pub yes_vote_outcomes: VoteMap, + + pub no_vote_outcomes: VoteMap, + + /// A quorum's worth of stake, generally 2f + 1 + pub success_threshold: NonZeroU64, + + pub failure_threshold: NonZeroU64, + /// A list of valid signatures for certificate aggregation + pub sig_lists: Vec<::Signature>, + /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check + pub signers: BitVec, + + pub phantom: PhantomData, +} + +impl< + TYPES: NodeType, + COMMITTABLE: Committable + Serialize + Clone, + VOTE: VoteType, + > Accumulator2 for QuorumVoteAccumulator +{ + fn append( + self, + vote: VOTE, + vote_node_id: usize, + stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either> { + todo!() + } +} + +pub struct ViewSyncVoteAccumulator< + TYPES: NodeType, + COMMITTABLE: Committable + Serialize + Clone, + VOTE: VoteType, +> { + /// Map of all da signatures accumlated so far + pub pre_commit_vote_outcomes: VoteMap, + pub commit_vote_outcomes: VoteMap, + pub finalize_vote_outcomes: VoteMap, + + /// A quorum's worth of stake, generally 2f + 1 + pub success_threshold: NonZeroU64, + + pub failure_threshold: NonZeroU64, + /// A list of valid signatures for certificate aggregation + pub sig_lists: Vec<::Signature>, + /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check + pub signers: BitVec, + + pub phantom: PhantomData, +} + +impl< + TYPES: NodeType, + COMMITTABLE: Committable + Serialize + Clone, + VOTE: VoteType, + > Accumulator2 for ViewSyncVoteAccumulator +{ + fn append( + self, + vote: VOTE, + vote_node_id: usize, + stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either> { + todo!() + } +} + /// Placeholder accumulator; will be replaced by accumulator for each certificate type #[deprecated] pub struct AccumulatorPlaceholder< @@ -449,7 +532,12 @@ impl< VOTE: VoteType, > Accumulator2 for AccumulatorPlaceholder { - fn append(self, vote: VOTE, vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>) -> Either> { + fn append( + self, + vote: VOTE, + vote_node_id: usize, + stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either> { todo!() } } From 8f427bef59af713515fb27859e0690caef744451 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 13 Sep 2023 14:37:16 -0400 Subject: [PATCH 0062/1393] Consensus accumulator working --- types/src/vote.rs | 103 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 98 insertions(+), 5 deletions(-) diff --git a/types/src/vote.rs b/types/src/vote.rs index f9c2a646ea..70c54bbcc5 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -437,7 +437,7 @@ impl< } } -// TODO ED Should make these fields a trait for Accumulator, like success threshold, etc. +// TODO ED Should make these fields a trait for Accumulator, like success threshold, etc. pub struct QuorumVoteAccumulator< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, @@ -468,12 +468,105 @@ impl< > Accumulator2 for QuorumVoteAccumulator { fn append( - self, + mut self, vote: VOTE, vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>, ) -> Either> { - todo!() + let vote_commitment = match vote.get_data() { + VoteData::Yes(commitment) | VoteData::No(commitment) => commitment, + + _ => return Either::Left(self), + }; + + let encoded_key = vote.get_key().to_bytes(); + + // Deserialize the signature so that it can be assembeld into a QC + // TODO ED Update this once we've gotten rid of EncodedSignature + let original_signature: ::Signature = + bincode_opts() + .deserialize(&vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); + + let (total_stake_casted, total_vote_map) = self + .total_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (yes_stake_casted, yes_vote_map) = self + .yes_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (no_stake_casted, no_vote_map) = self + .no_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + // Check for duplicate vote + // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey + // Have to do this because SignatureKey is not hashable + if total_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + + // Update the active_keys and signature lists + // TODO ED How does this differ than the check above? + if self.signers.get(vote_node_id).as_deref() == Some(&true) { + error!("Node id is already in signers list"); + return Either::Left(self); + } + self.signers.set(vote_node_id, true); + self.sig_lists.push(original_signature); + + // TODO ED Make all these get calls as local variables to avoid constantly calling them + *total_stake_casted += u64::from(vote.get_vote_token().vote_count()); + total_vote_map.insert( + encoded_key.clone(), + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + + match vote.get_data() { + VoteData::Yes(_) => { + *yes_stake_casted += u64::from(vote.get_vote_token().vote_count()); + yes_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + } + VoteData::No(_) => { + *no_stake_casted += u64::from(vote.get_vote_token().vote_count()); + no_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + } + _ => return Either::Left(self), + } + + if *total_stake_casted >= u64::from(self.success_threshold) { + // Assemble QC + let real_qc_pp = ::get_public_parameter( + // TODO ED Something about stake table entries. Might be easier to just pass in membership? + stake_table_entries.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + + if *yes_stake_casted >= u64::from(self.success_threshold) { + self.yes_vote_outcomes.remove(&vote_commitment); + return Either::Right(AssembledSignature::Yes(real_qc_sig)); + } else if *no_stake_casted >= u64::from(self.failure_threshold) { + self.total_vote_outcomes.remove(&vote_commitment); + return Either::Right(AssembledSignature::No(real_qc_sig)); + } + } + Either::Left(self) } } @@ -490,7 +583,7 @@ pub struct ViewSyncVoteAccumulator< /// A quorum's worth of stake, generally 2f + 1 pub success_threshold: NonZeroU64, - pub failure_threshold: NonZeroU64, + pub failure_threshold: NonZeroU64, /// A list of valid signatures for certificate aggregation pub sig_lists: Vec<::Signature>, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check @@ -506,7 +599,7 @@ impl< > Accumulator2 for ViewSyncVoteAccumulator { fn append( - self, + mut self, vote: VOTE, vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>, From 6c59fc34194034832caece1b5af2e1e3c1be1b5e Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:25:57 -0400 Subject: [PATCH 0063/1393] View sync accumulator compiles --- task-impls/src/view_sync.rs | 74 ++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 30 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 18c8ae581f..28decf328d 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -9,7 +9,14 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::traits::{election::Membership, network::ConsensusIntentEvent}; +use hotshot_types::traits::election::VoteData; +use hotshot_types::{ + traits::{ + election::{Membership, SignedCertificate}, + network::ConsensusIntentEvent, + }, + vote::{ViewSyncVoteAccumulator, VoteType}, +}; use bitvec::prelude::*; use hotshot_task::global_registry::GlobalRegistry; @@ -28,7 +35,7 @@ use hotshot_types::{ vote::{ViewSyncData, ViewSyncVote, VoteAccumulator}, }; use snafu::Snafu; -use std::{collections::HashMap, sync::Arc, time::Duration}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; use tracing::{debug, error, instrument}; #[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] /// Phases of view sync @@ -217,7 +224,12 @@ pub struct ViewSyncRelayTaskState< pub exchange: Arc>, /// Vote accumulator pub accumulator: Either< - VoteAccumulator>, + as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + ViewSyncData, + >>::VoteAccumulator, ViewSyncCertificate, >, /// Our node id; for logging @@ -380,24 +392,23 @@ where return; } - let accumulator = VoteAccumulator { - total_vote_outcomes: HashMap::new(), - da_vote_outcomes: HashMap::new(), - yes_vote_outcomes: HashMap::new(), - no_vote_outcomes: HashMap::new(), - viewsync_precommit_vote_outcomes: HashMap::new(), - viewsync_commit_vote_outcomes: HashMap::new(), - viewsync_finalize_vote_outcomes: HashMap::new(), + let new_accumulator = ViewSyncVoteAccumulator { + pre_commit_vote_outcomes: HashMap::new(), + commit_vote_outcomes: HashMap::new(), + finalize_vote_outcomes: HashMap::new(), + success_threshold: self.exchange.success_threshold(), failure_threshold: self.exchange.failure_threshold(), + sig_lists: Vec::new(), signers: bitvec![0; self.exchange.total_nodes()], + phantom: PhantomData, }; let mut relay_state = ViewSyncRelayTaskState { event_stream: self.event_stream.clone(), exchange: self.exchange.clone(), - accumulator: either::Left(accumulator), + accumulator: either::Left(new_accumulator), id: self.id, }; @@ -958,7 +969,7 @@ where return (Some(HotShotTaskCompleted::ShutDown), self); } - let (vote_internal, phase) = match vote { + let (vote_internal, phase) = match vote.clone() { ViewSyncVote::PreCommit(vote_internal) => { (vote_internal, ViewSyncPhase::PreCommit) } @@ -993,15 +1004,18 @@ where *vote_internal.round, vote_internal.relay ); - let accumulator = self.exchange.accumulate_vote( - &vote_internal.signature.0, - &vote_internal.signature.1, - view_sync_data, - vote_internal.vote_data, - vote_internal.vote_token.clone(), - vote_internal.round, + // TODO ED This isn't ideal, should fix this + let vote_data = match vote.get_data() { + VoteData::ViewSyncPreCommit(data) => data, + VoteData::ViewSyncCommit(data) => data, + VoteData::ViewSyncFinalize(data) => data, + _ => unimplemented!(), + }; + + let accumulator = self.exchange.accumulate_vote_2( self.accumulator.left().unwrap(), - Some(vote_internal.relay), + &vote, + &vote_data, ); self.accumulator = match accumulator { @@ -1022,19 +1036,19 @@ where .await; // Reset accumulator for new certificate - either::Left(VoteAccumulator { - total_vote_outcomes: HashMap::new(), - da_vote_outcomes: HashMap::new(), - yes_vote_outcomes: HashMap::new(), - no_vote_outcomes: HashMap::new(), - viewsync_precommit_vote_outcomes: HashMap::new(), - viewsync_commit_vote_outcomes: HashMap::new(), - viewsync_finalize_vote_outcomes: HashMap::new(), + let new_accumulator = ViewSyncVoteAccumulator { + pre_commit_vote_outcomes: HashMap::new(), + commit_vote_outcomes: HashMap::new(), + finalize_vote_outcomes: HashMap::new(), + success_threshold: self.exchange.success_threshold(), failure_threshold: self.exchange.failure_threshold(), + sig_lists: Vec::new(), signers: bitvec![0; self.exchange.total_nodes()], - }) + phantom: PhantomData, + }; + either::Left(new_accumulator) } }; From e112486d92501129b6124ad06e48917939a8d6a4 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 13 Sep 2023 13:47:29 -0700 Subject: [PATCH 0064/1393] removing known_nodes in favor of using known_nodes_with_stake --- hotshot-signature-key/src/bn254/bn254_pub.rs | 6 ++++ hotshot/examples/infra/mod.rs | 4 +-- hotshot/examples/infra/modDA.rs | 2 -- hotshot/src/lib.rs | 3 -- .../src/traits/election/static_committee.rs | 35 ++++++++----------- orchestrator/src/config.rs | 9 +++-- orchestrator/src/lib.rs | 2 +- testing/src/task_helpers.rs | 2 -- testing/src/test_builder.rs | 3 +- testing/src/test_launcher.rs | 2 -- testing/src/test_runner.rs | 3 -- types/src/lib.rs | 4 +-- types/src/traits/election.rs | 11 ++---- types/src/traits/node_implementation.rs | 6 +--- types/src/traits/signature_key.rs | 3 ++ 15 files changed, 39 insertions(+), 56 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index c25fad4be0..714ee98687 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -134,6 +134,12 @@ impl SignatureKey for BN254Pub { } } + fn get_public_key(entry: &Self::StakeTableEntry) -> Self { + Self { + pub_key: entry.stake_key, + } + } + fn get_public_parameter( stake_entries: Vec, threshold: U256, diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index efd3b8169d..6fafd81727 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -52,7 +52,7 @@ pub fn load_config_from_file( > = config_toml.into(); // Generate network's public keys - config.config.known_nodes = (0..config.config.total_nodes.get()) + let known_nodes = (0..config.config.total_nodes.get()) .map(|node_id| { TYPES::SignatureKey::generated_from_seed_indexed( config.seed, @@ -63,7 +63,7 @@ pub fn load_config_from_file( .collect(); config.config.known_nodes_with_stake = (0..config.config.total_nodes.get()) - .map(|node_id| config.config.known_nodes[node_id].get_stake_table_entry(1u64)) + .map(|node_id| known_nodes[node_id].get_stake_table_entry(1u64)) .collect(); config diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 729cbfd79f..fca5b4b64d 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -215,7 +215,6 @@ pub trait RunDA< // Get KeyPair for certificate Aggregation let (pk, sk) = TYPES::SignatureKey::generated_from_seed_indexed(config.seed, config.node_index); - let known_nodes = config.config.known_nodes.clone(); let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let entry = pk.get_stake_table_entry(1u64); @@ -240,7 +239,6 @@ pub trait RunDA< let exchanges = NODE::Exchanges::create( known_nodes_with_stake.clone(), - known_nodes.clone(), (quorum_election_config, committee_election_config), ( quorum_network.clone(), diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index fa435e1f1d..34a50ea7ec 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -114,7 +114,6 @@ pub struct SystemContextInner> { /// Configuration items for this hotshot instance config: HotShotConfig< - TYPES::SignatureKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -178,7 +177,6 @@ impl> SystemContext { private_key: ::PrivateKey, nonce: u64, config: HotShotConfig< - TYPES::SignatureKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -405,7 +403,6 @@ impl> SystemContext { private_key: ::PrivateKey, node_id: u64, config: HotShotConfig< - TYPES::SignatureKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 2c95746d4b..0f0d94feae 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -19,12 +19,8 @@ use tracing::debug; #[derive(Clone, Debug, Eq, PartialEq)] pub struct GeneralStaticCommittee, PUBKEY: SignatureKey> { - /// All the nodes participating - nodes: Vec, /// All the nodes participating and their stake nodes_with_stake: Vec, - /// The nodes on the static committee - committee_nodes: Vec, /// The nodes on the static committee and their stake committee_nodes_with_stake: Vec, /// Node type phantom @@ -41,11 +37,9 @@ impl, PUBKEY: SignatureKey> { /// Creates a new dummy elector #[must_use] - pub fn new(nodes: Vec, nodes_with_stake: Vec) -> Self { + pub fn new(_nodes: Vec, nodes_with_stake: Vec) -> Self { Self { - nodes: nodes.clone(), nodes_with_stake: nodes_with_stake.clone(), - committee_nodes: nodes, committee_nodes_with_stake: nodes_with_stake, _type_phantom: PhantomData, _leaf_phantom: PhantomData, @@ -107,8 +101,9 @@ where /// Index the vector of public keys with the current view number fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { - let index = (*view_number % self.nodes.len() as u64) as usize; - self.nodes[index].clone() + let index = (*view_number % self.nodes_with_stake.len() as u64) as usize; + let res = self.nodes_with_stake[index].clone(); + TYPES::SignatureKey::get_public_key(&res) } /// Simply make the partial signature @@ -119,7 +114,8 @@ where ) -> std::result::Result>, ElectionError> { // TODO ED Below let pub_key = PUBKEY::from_private(private_key); - if !self.committee_nodes.contains(&pub_key) { + let entry = pub_key.get_stake_table_entry(1u64); + if !self.committee_nodes_with_stake.contains(&entry) { return Ok(None); } let mut message: Vec = vec![]; @@ -137,7 +133,8 @@ where ) -> Result, ElectionError> { match token { Checked::Valid(t) | Checked::Unchecked(t) => { - if self.committee_nodes.contains(&pub_key) { + let entry = pub_key.get_stake_table_entry(1u64); + if self.committee_nodes_with_stake.contains(&entry) { Ok(Checked::Valid(t)) } else { Ok(Checked::Inval(t)) @@ -153,18 +150,13 @@ where fn create_election( keys_qc: Vec, - keys: Vec, config: TYPES::ElectionConfigType, ) -> Self { - let mut committee_nodes = keys.clone(); let mut committee_nodes_with_stake = keys_qc.clone(); - committee_nodes.truncate(config.num_nodes.try_into().unwrap()); debug!("Election Membership Size: {}", config.num_nodes); committee_nodes_with_stake.truncate(config.num_nodes.try_into().unwrap()); Self { nodes_with_stake: keys_qc, - nodes: keys, - committee_nodes, committee_nodes_with_stake, _type_phantom: PhantomData, _leaf_phantom: PhantomData, @@ -172,21 +164,24 @@ where } fn total_nodes(&self) -> usize { - self.committee_nodes.len() + self.committee_nodes_with_stake.len() } fn success_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(((self.committee_nodes.len() as u64 * 2) / 3) + 1).unwrap() + NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64 * 2) / 3) + 1).unwrap() } fn failure_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(((self.committee_nodes.len() as u64) / 3) + 1).unwrap() + NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64) / 3) + 1).unwrap() } fn get_committee( &self, _view_number: ::Time, ) -> std::collections::BTreeSet<::SignatureKey> { - self.committee_nodes.clone().into_iter().collect() + // Transfer from committee_nodes_with_stake to pure committee_nodes + (0..(self.committee_nodes_with_stake.len())) + .map(|node_id| ::SignatureKey::get_public_key(&self.committee_nodes_with_stake[node_id])) + .collect() } } diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 061ab256d5..0f6fc4e96d 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -4,6 +4,7 @@ use std::{ num::NonZeroUsize, time::Duration, }; +use std::marker::PhantomData; #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { pub bootstrap_nodes: Vec<(SocketAddr, Vec)>, @@ -65,9 +66,10 @@ pub struct NetworkConfig { pub key_type_name: String, pub election_config_type_name: String, pub libp2p_config: Option, - pub config: HotShotConfig, + pub config: HotShotConfig, pub web_server_config: Option, pub da_web_server_config: Option, + _key_type_phantom: PhantomData, } impl Default for NetworkConfig { @@ -85,6 +87,7 @@ impl Default for NetworkConfig { election_config_type_name: std::any::type_name::().to_string(), web_server_config: None, da_web_server_config: None, + _key_type_phantom: PhantomData, } } } @@ -152,6 +155,7 @@ impl From for NetworkConfig { start_delay_seconds: val.start_delay_seconds, web_server_config: val.web_server_config, da_web_server_config: val.da_web_server_config, + _key_type_phantom: PhantomData, } } } @@ -183,14 +187,13 @@ pub struct HotShotConfigFile { pub propose_max_round_time: Duration, } -impl From for HotShotConfig { +impl From for HotShotConfig { fn from(val: HotShotConfigFile) -> Self { HotShotConfig { execution_type: ExecutionType::Continuous, total_nodes: val.total_nodes, max_transactions: val.max_transactions, min_transactions: val.min_transactions, - known_nodes: Vec::new(), known_nodes_with_stake: Vec::new(), da_committee_size: val.committee_nodes, next_view_timeout: val.next_view_timeout, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 4391fc43e6..574cc8c38a 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -174,7 +174,7 @@ where fn post_ready(&mut self) -> Result<(), ServerError> { self.nodes_connected += 1; println!("Nodes connected: {}", self.nodes_connected); - if self.nodes_connected >= self.config.config.known_nodes.len().try_into().unwrap() { + if self.nodes_connected >= self.config.config.known_nodes_with_stake.len().try_into().unwrap() { self.start = true; } Ok(()) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 13868721d4..3d2cbe3bf5 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -47,7 +47,6 @@ pub async fn build_system_handle( >>::block_genesis()) .unwrap(); - let known_nodes = config.known_nodes.clone(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; let public_key = ::SignatureKey::from_private(&private_key); @@ -67,7 +66,6 @@ pub async fn build_system_handle( let exchanges = >::Exchanges::create( known_nodes_with_stake.clone(), - known_nodes.clone(), (quorum_election_config, committee_election_config), networks, public_key, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index f1db2a9320..326debb69b 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -219,7 +219,6 @@ impl TestMetadata { num_bootstrap: num_bootstrap_nodes, min_transactions, max_transactions: NonZeroUsize::new(99999).unwrap(), - known_nodes, known_nodes_with_stake, da_committee_size, next_view_timeout: 500, @@ -247,7 +246,7 @@ impl TestMetadata { } = timing_data; let mod_config = // TODO this should really be using the timing config struct - |a: &mut HotShotConfig::StakeTableEntry, TYPES::ElectionConfigType>| { + |a: &mut HotShotConfig<::StakeTableEntry, TYPES::ElectionConfigType>| { a.next_view_timeout = next_view_timeout; a.timeout_ratio = timeout_ratio; a.round_start_delay = round_start_delay; diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 4a7e6bd9b1..cbc8ba64bb 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -94,7 +94,6 @@ where pub storage: Generator<>::Storage>, /// configuration used to generate each hotshot node pub config: HotShotConfig< - TYPES::SignatureKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -198,7 +197,6 @@ impl> TestLauncher::StakeTableEntry, TYPES::ElectionConfigType, >, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 57c82d24e9..777d8769f5 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -234,7 +234,6 @@ where storage: I::Storage, initializer: HotShotInitializer, config: HotShotConfig< - TYPES::SignatureKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -250,7 +249,6 @@ where let node_id = self.next_node_id; self.next_node_id += 1; - let known_nodes = config.known_nodes.clone(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); // Generate key pair for certificate aggregation let private_key = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; @@ -265,7 +263,6 @@ where let committee_election_config = I::committee_election_config_generator(); let exchanges = I::Exchanges::create( known_nodes_with_stake.clone(), - known_nodes.clone(), ( quorum_election_config, committee_election_config(config.da_committee_size as u64), diff --git a/types/src/lib.rs b/types/src/lib.rs index e574bef876..c1b447c117 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -36,7 +36,7 @@ pub enum ExecutionType { /// Holds configuration for a `HotShot` #[derive(Clone, custom_debug::Debug, serde::Serialize, serde::Deserialize)] -pub struct HotShotConfig { +pub struct HotShotConfig { /// Whether to run one view or continuous views pub execution_type: ExecutionType, /// Total number of nodes in the network @@ -45,8 +45,6 @@ pub struct HotShotConfig { pub min_transactions: usize, /// Maximum transactions per block pub max_transactions: NonZeroUsize, - /// List of known node's public keys, including own, sorted by nonce () - pub known_nodes: Vec, /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter pub known_nodes_with_stake: Vec, /// List of DA committee nodes for static DA committe diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 81a173e954..a138de6ce9 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -221,7 +221,6 @@ pub trait Membership: /// TODO may want to move this to a testableelection trait fn create_election( entries: Vec<::StakeTableEntry>, - keys: Vec, config: TYPES::ElectionConfigType, ) -> Self; @@ -291,7 +290,6 @@ pub trait ConsensusExchange: Send + Sync { /// Join a [`ConsensusExchange`] with the given identity (`pk` and `sk`). fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, config: TYPES::ElectionConfigType, network: Self::Networking, pk: TYPES::SignatureKey, @@ -609,7 +607,6 @@ impl< fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, config: TYPES::ElectionConfigType, network: Self::Networking, pk: TYPES::SignatureKey, @@ -617,7 +614,7 @@ impl< sk: ::PrivateKey, ) -> Self { let membership = >::Membership::create_election( - entries, keys, config, + entries, config, ); Self { network, @@ -929,7 +926,6 @@ impl< fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, config: TYPES::ElectionConfigType, network: Self::Networking, pk: TYPES::SignatureKey, @@ -937,7 +933,7 @@ impl< sk: ::PrivateKey, ) -> Self { let membership = >::Membership::create_election( - entries, keys, config, + entries, config, ); Self { network, @@ -1288,7 +1284,6 @@ impl< fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, config: TYPES::ElectionConfigType, network: Self::Networking, pk: TYPES::SignatureKey, @@ -1296,7 +1291,7 @@ impl< sk: ::PrivateKey, ) -> Self { let membership = >::Membership::create_election( - entries, keys, config, + entries, config, ); Self { network, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 8ba5abdebb..451d071e07 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -166,7 +166,6 @@ pub trait ExchangesType, MESSA /// Create all exchanges. fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, configs: Self::ElectionConfigs, networks: ( >::Networking, @@ -257,7 +256,6 @@ where fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, configs: Self::ElectionConfigs, networks: ( >::Networking, @@ -270,7 +268,6 @@ where ) -> Self { let quorum_exchange = QUORUMEXCHANGE::create( entries.clone(), - keys.clone(), configs.0.clone(), networks.0, pk.clone(), @@ -279,7 +276,6 @@ where ); let view_sync_exchange = VIEWSYNCEXCHANGE::create( entries.clone(), - keys.clone(), configs.0, networks.2, pk.clone(), @@ -287,7 +283,7 @@ where sk.clone(), ); let committee_exchange = - COMMITTEEEXCHANGE::create(entries, keys, configs.1, networks.1, pk, entry, sk); + COMMITTEEEXCHANGE::create(entries, configs.1, networks.1, pk, entry, sk); Self { quorum_exchange, diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 5daed175bd..d8f79a080e 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -101,6 +101,9 @@ pub trait SignatureKey: /// get the stake table entry from the public key and stake value fn get_stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry; + /// only get the public key from the stake table entry + fn get_public_key(entry: &Self::StakeTableEntry) -> Self; + /// get the public parameter for the assembled signature checking fn get_public_parameter( stake_entries: Vec, From 7ec2536745bf57c701358e8a3575140e8131a7c3 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 13 Sep 2023 14:10:38 -0700 Subject: [PATCH 0065/1393] fix fmt --- hotshot/src/traits/election/static_committee.rs | 10 +++++++--- orchestrator/src/config.rs | 2 +- orchestrator/src/lib.rs | 10 +++++++++- types/src/traits/election.rs | 15 ++++++--------- 4 files changed, 23 insertions(+), 14 deletions(-) diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 0f0d94feae..871b3cfd42 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -180,8 +180,12 @@ where _view_number: ::Time, ) -> std::collections::BTreeSet<::SignatureKey> { // Transfer from committee_nodes_with_stake to pure committee_nodes - (0..(self.committee_nodes_with_stake.len())) - .map(|node_id| ::SignatureKey::get_public_key(&self.committee_nodes_with_stake[node_id])) - .collect() + (0..self.committee_nodes_with_stake.len()) + .map(|node_id| { + ::SignatureKey::get_public_key( + &self.committee_nodes_with_stake[node_id], + ) + }) + .collect() } } diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 0f6fc4e96d..05a38b615c 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,10 +1,10 @@ use hotshot_types::{ExecutionType, HotShotConfig}; +use std::marker::PhantomData; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, num::NonZeroUsize, time::Duration, }; -use std::marker::PhantomData; #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { pub bootstrap_nodes: Vec<(SocketAddr, Vec)>, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 574cc8c38a..a7e7ae0c28 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -174,7 +174,15 @@ where fn post_ready(&mut self) -> Result<(), ServerError> { self.nodes_connected += 1; println!("Nodes connected: {}", self.nodes_connected); - if self.nodes_connected >= self.config.config.known_nodes_with_stake.len().try_into().unwrap() { + if self.nodes_connected + >= self + .config + .config + .known_nodes_with_stake + .len() + .try_into() + .unwrap() + { self.start = true; } Ok(()) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index a138de6ce9..dffc8b6f9a 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -613,9 +613,8 @@ impl< entry: ::StakeTableEntry, sk: ::PrivateKey, ) -> Self { - let membership = >::Membership::create_election( - entries, config, - ); + let membership = + >::Membership::create_election(entries, config); Self { network, membership, @@ -932,9 +931,8 @@ impl< entry: ::StakeTableEntry, sk: ::PrivateKey, ) -> Self { - let membership = >::Membership::create_election( - entries, config, - ); + let membership = + >::Membership::create_election(entries, config); Self { network, membership, @@ -1290,9 +1288,8 @@ impl< entry: ::StakeTableEntry, sk: ::PrivateKey, ) -> Self { - let membership = >::Membership::create_election( - entries, config, - ); + let membership = + >::Membership::create_election(entries, config); Self { network, membership, From 96144a3ff9e4c5e26b8b14bd942b953a304b4823 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 13 Sep 2023 14:11:31 -0700 Subject: [PATCH 0066/1393] pass async_std check --- hotshot/examples/infra/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 6fafd81727..1ffbb04a7f 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -52,7 +52,7 @@ pub fn load_config_from_file( > = config_toml.into(); // Generate network's public keys - let known_nodes = (0..config.config.total_nodes.get()) + let known_nodes: Vec<_> = (0..config.config.total_nodes.get()) .map(|node_id| { TYPES::SignatureKey::generated_from_seed_indexed( config.seed, From 65edf42a7ff4adf73172c66eb3e9e70e70880a1d Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 13 Sep 2023 14:49:23 -0700 Subject: [PATCH 0067/1393] pass just lint and add blank line to the settings.json.example --- hotshot/src/traits/election/static_committee.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 871b3cfd42..0e79efacab 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -37,7 +37,7 @@ impl, PUBKEY: SignatureKey> { /// Creates a new dummy elector #[must_use] - pub fn new(_nodes: Vec, nodes_with_stake: Vec) -> Self { + pub fn new(_nodes: &[PUBKEY], nodes_with_stake: Vec) -> Self { Self { nodes_with_stake: nodes_with_stake.clone(), committee_nodes_with_stake: nodes_with_stake, From 120458e027ca0671b32a640d460f65b259dc7b78 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 13 Sep 2023 20:46:09 -0400 Subject: [PATCH 0068/1393] View sync accumulator in place; need to update from_signatures_and_commitment function to take vote instead of individual fields --- task-impls/src/view_sync.rs | 3 +- types/src/vote.rs | 123 +++++++++++++++++++++++++++++++++++- 2 files changed, 123 insertions(+), 3 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 28decf328d..b29943ced4 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1015,7 +1015,7 @@ where let accumulator = self.exchange.accumulate_vote_2( self.accumulator.left().unwrap(), &vote, - &vote_data, + &view_sync_data, ); self.accumulator = match accumulator { @@ -1027,7 +1027,6 @@ where data: certificate.clone(), signature, }; - // error!("Sending view sync cert {:?}", message.clone()); self.event_stream .publish(SequencingHotShotEvent::ViewSyncCertificateSend( message, diff --git a/types/src/vote.rs b/types/src/vote.rs index 70c54bbcc5..d76a0a395f 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -604,7 +604,128 @@ impl< vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>, ) -> Either> { - todo!() + let vote_commitment = match vote.get_data() { + VoteData::ViewSyncPreCommit(commitment) + | VoteData::ViewSyncCommit(commitment) + | VoteData::ViewSyncFinalize(commitment) => commitment, + + _ => return Either::Left(self), + }; + + error!("Vote is {:?}", vote.clone()); + + let encoded_key = vote.get_key().to_bytes(); + + // Deserialize the signature so that it can be assembeld into a QC + // TODO ED Update this once we've gotten rid of EncodedSignature + let original_signature: ::Signature = + bincode_opts() + .deserialize(&vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); + + let (pre_commit_stake_casted, pre_commit_vote_map) = self + .pre_commit_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (commit_stake_casted, commit_vote_map) = self + .commit_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (finalize_stake_casted, finalize_vote_map) = self + .finalize_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + // update the active_keys and sig_lists + // TODO ED Possible bug where a node sends precommit vote and then commit vote after + // precommit cert is formed, their commit vote won't be counted because of this check + // Probably need separate signers vecs. + if self.signers.get(vote_node_id).as_deref() == Some(&true) { + error!("node id already in signers"); + return Either::Left(self); + } + self.signers.set(vote_node_id, true); + self.sig_lists.push(original_signature); + + match vote.get_data() { + VoteData::ViewSyncPreCommit(_) => { + *pre_commit_stake_casted += u64::from(vote.get_vote_token().vote_count()); + pre_commit_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + } + VoteData::ViewSyncCommit(_) => { + *commit_stake_casted += u64::from(vote.get_vote_token().vote_count()); + commit_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + } + VoteData::ViewSyncFinalize(_) => { + *finalize_stake_casted += u64::from(vote.get_vote_token().vote_count()); + finalize_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + } + _ => unimplemented!(), + } + + if *pre_commit_stake_casted >= u64::from(self.failure_threshold) { + let real_qc_pp = ::get_public_parameter( + stake_table_entries, + U256::from(self.failure_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + + self.pre_commit_vote_outcomes + .remove(&vote_commitment) + .unwrap(); + return Either::Right(AssembledSignature::ViewSyncPreCommit(real_qc_sig)); + } + + if *commit_stake_casted >= u64::from(self.success_threshold) { + let real_qc_pp = ::get_public_parameter( + stake_table_entries.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + self.commit_vote_outcomes.remove(&vote_commitment).unwrap(); + return Either::Right(AssembledSignature::ViewSyncCommit(real_qc_sig)); + } + + if *finalize_stake_casted >= u64::from(self.success_threshold) { + let real_qc_pp = ::get_public_parameter( + stake_table_entries.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + // TODO ED Why remove? + self.finalize_vote_outcomes + .remove(&vote_commitment) + .unwrap(); + return Either::Right(AssembledSignature::ViewSyncFinalize(real_qc_sig)); + } + + Either::Left(self) } } From 6b0445f5875dc213d903b611e47159185430bc1a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Sep 2023 00:10:27 -0400 Subject: [PATCH 0069/1393] add endpoint to get a recent proposal to catchup w/ webserver --- .../traits/networking/web_server_network.rs | 30 ++++++++++ task-impls/src/consensus.rs | 7 ++- testing/src/spinning_task.rs | 3 +- testing/tests/catchup.rs | 60 ++++++++++++++++++- testing/tests/timeout.rs | 2 +- types/src/message.rs | 2 + types/src/traits/network.rs | 3 + web_server/api.toml | 7 +++ web_server/src/config.rs | 4 ++ web_server/src/lib.rs | 15 +++++ 10 files changed, 128 insertions(+), 5 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index e0001b94ab..f2c6c1c842 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -161,6 +161,7 @@ impl Inner { while self.running.load(Ordering::Relaxed) { let endpoint = match message_purpose { MessagePurpose::Proposal => config::get_proposal_route(view_number), + MessagePurpose::CurrentProposal => config::get_recent_proposal_route(), MessagePurpose::Vote => config::get_vote_route(view_number, vote_index), MessagePurpose::Data => config::get_transactions_route(tx_index), MessagePurpose::Internal => unimplemented!(), @@ -221,6 +222,15 @@ impl Inner { // } // } } + MessagePurpose::CurrentProposal => { + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + self.broadcast_poll_queue + .write() + .await + .push(deserialized_messages[0].clone()); + + return Ok(()); + } MessagePurpose::Vote => { // error!( // "Received {} votes from web server for view {} is da {}", @@ -498,6 +508,7 @@ impl< let endpoint = match &message.purpose() { MessagePurpose::Proposal => config::post_proposal_route(*view_number), + MessagePurpose::CurrentProposal => return Err(WebServerNetworkError::EndpointError), MessagePurpose::Vote => config::post_vote_route(*view_number), MessagePurpose::Data => config::post_transactions_route(), MessagePurpose::Internal => return Err(WebServerNetworkError::EndpointError), @@ -783,6 +794,25 @@ impl< .await; } } + ConsensusIntentEvent::PollForCurrentProposal => { + // create new task + let (_, receiver) = unbounded(); + + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server(receiver, MessagePurpose::CurrentProposal, 1) + .await + { + error!( + "Background receive proposal polling encountered an error: {:?}", + e + ); + } + } + }); + } ConsensusIntentEvent::PollForVotes(view_number) => { let mut task_map = self.inner.vote_task_map.write().await; if let Entry::Vacant(e) = task_map.entry(view_number) { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 03fcc387a8..35b27c9722 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -487,7 +487,12 @@ where // } self.cur_view = new_view; self.current_proposal = None; - + if new_view == TYPES::Time::new(1) { + self.quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) + .await; + } // Start polling for proposals for the new view self.quorum_exchange .network() diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index c8ef07d51c..a9d9e5d586 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -113,7 +113,8 @@ impl SpinningTaskDescription { state.late_start.remove(&idx.try_into().unwrap()) { tracing::error!("Spinning up node late"); - node.run_tasks().await; + let handle = node.run_tasks().await; + handle.hotshot.start_consensus().await; } } UpDown::Down => { diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 2cbc6c8a16..9c6bd0bb38 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -59,6 +59,61 @@ async fn test_catchup() { .await; } +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_catchup_web() { + use std::time::Duration; + + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingTestTypes, SequencingWebImpl}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestMetadata, TimingData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let timing_data = TimingData { + next_view_timeout: 1000, + ..Default::default() + }; + let mut metadata = TestMetadata::default(); + let catchup_nodes = vec![ChangeNode { + idx: 18, + updown: UpDown::Up, + }]; + + metadata.timing_data = timing_data; + metadata.start_nodes = 19; + metadata.total_nodes = 20; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::from_millis(400), catchup_nodes)], + }; + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(100000), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }; + + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + /// Test that one node catches up and has sucessful views after coming back #[cfg(test)] #[cfg_attr( @@ -66,6 +121,7 @@ async fn test_catchup() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_catchup_one_node() { use std::time::Duration; @@ -94,13 +150,13 @@ async fn test_catchup_one_node() { metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), catchup_nodes)], + node_changes: vec![(Duration::from_millis(400), catchup_nodes)], }; metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(100000), + duration: Duration::from_millis(20000), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 7ebeddd577..f8963c9d52 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -30,7 +30,7 @@ async fn test_timeout() { metadata.timing_data = timing_data; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), dead_nodes)], + node_changes: vec![(Duration::new(0, 5000), dead_nodes)], }; // TODO ED Add safety task, etc to confirm TCs are being formed diff --git a/types/src/message.rs b/types/src/message.rs index 89d2864aa9..c499309a43 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -57,6 +57,8 @@ pub struct Messages>(pub Vec *view_number, + ConsensusIntentEvent::PollForCurrentProposal => 1, } } } diff --git a/web_server/api.toml b/web_server/api.toml index 5f749bcdb0..cc610fc9c9 100644 --- a/web_server/api.toml +++ b/web_server/api.toml @@ -11,6 +11,13 @@ DOC = """ Return the proposal for a given view number """ +# GET the proposal for a view, where the view is passed as an argument +[route.getrecentproposal] +PATH = ["proposal/"] +DOC = """ +Return the proposal for the most recent view the server has +""" + # POST a proposal, where the view is passed as an argument [route.postproposal] PATH = ["proposal/:view_number"] diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 1da2781c55..c06f365bd3 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -17,6 +17,10 @@ pub fn post_proposal_route(view_number: u64) -> String { format!("api/proposal/{view_number}") } +pub fn get_recent_proposal_route() -> String { + "api/proposal".to_string() +} + pub fn get_da_certificate_route(view_number: u64) -> String { format!("api/certificate/{view_number}") } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index ef7850cbb0..2bd746e1cd 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -33,6 +33,8 @@ struct WebServerState { da_certificates: HashMap)>, /// view for oldest proposals in memory oldest_proposal: u64, + /// view for the most recent proposal to help nodes catchup + recent_proposal: u64, /// view for teh oldest DA certificate oldest_certificate: u64, @@ -74,6 +76,7 @@ impl WebServerState { num_txns: 0, oldest_vote: 0, oldest_proposal: 0, + recent_proposal: 0, oldest_certificate: 0, shutdown: None, stake_table: Vec::new(), @@ -101,6 +104,7 @@ impl WebServerState { /// Trait defining methods needed for the `WebServerState` pub trait WebServerDataSource { fn get_proposal(&self, view_number: u64) -> Result>>, Error>; + fn get_recent_proposal(&self) -> Result>>, Error>; fn get_view_sync_proposal( &self, view_number: u64, @@ -156,6 +160,10 @@ impl WebServerDataSource for WebServerState { } } + fn get_recent_proposal(&self) -> Result>>, Error> { + self.get_proposal(self.recent_proposal) + } + fn get_view_sync_proposal( &self, view_number: u64, @@ -316,6 +324,10 @@ impl WebServerDataSource for WebServerState { fn post_proposal(&mut self, view_number: u64, mut proposal: Vec) -> Result<(), Error> { debug!("Received proposal for view {}", view_number); + if view_number > self.recent_proposal { + self.recent_proposal = view_number; + } + // Only keep proposal history for MAX_VIEWS number of view if self.proposals.len() >= MAX_VIEWS { self.proposals.remove(&self.oldest_proposal); @@ -494,6 +506,9 @@ where } .boxed() })? + .get("getrecentproposal", |_req, state| { + async move { state.get_recent_proposal() }.boxed() + })? .get("getviewsyncproposal", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; From d34bc40e2b59c2f5807b3c3eb5e7f2279f3d098e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Sep 2023 08:25:08 -0400 Subject: [PATCH 0070/1393] Bump serde_json from 1.0.106 to 1.0.107 (#1746) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.106 to 1.0.107. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.106...v1.0.107) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- hotshot/Cargo.toml | 2 +- libp2p-networking/Cargo.toml | 2 +- types/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index a085531227..67ddb6b9ee 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -117,6 +117,6 @@ async-std = { workspace = true } [dev-dependencies] blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } -serde_json = "1.0.106" +serde_json = "1.0.107" toml = { workspace = true } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 9e0efc24d3..82b5ed7d87 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -42,7 +42,7 @@ libp2p-noise = { version = "0.43.0", default-features = false } parking_lot = "0.12.1" rand = { workspace = true } serde = { workspace = true } -serde_json = "1.0.106" +serde_json = "1.0.107" snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", diff --git a/types/Cargo.toml b/types/Cargo.toml index 58ac6c48a9..74ace15850 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -54,7 +54,7 @@ bit-vec = "0.6.3" typenum = { workspace = true } [dev-dependencies] -serde_json = "1.0.106" +serde_json = "1.0.107" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } From c8ac2eacad167e83e8352051b894542e163fd3a6 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Sep 2023 10:21:46 -0400 Subject: [PATCH 0071/1393] transactions task building --- hotshot/src/tasks/mod.rs | 69 ++++++++++ task-impls/src/consensus.rs | 43 +----- task-impls/src/da.rs | 148 +-------------------- task-impls/src/lib.rs | 3 + task-impls/src/transactions.rs | 236 ++++++++++++++++++++++++++++----- 5 files changed, 285 insertions(+), 214 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index e0dc9be155..4486914dfc 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -17,6 +17,7 @@ use hotshot_task::{ use hotshot_task_impls::{ consensus::{consensus_event_filter, ConsensusTaskTypes, SequencingConsensusTaskState}, da::{DATaskState, DATaskTypes}, + transactions::{TransactionTaskState, TransactionsTaskTypes}, events::SequencingHotShotEvent, network::{ NetworkEventTaskState, NetworkEventTaskTypes, NetworkMessageTaskState, @@ -407,6 +408,74 @@ where task_runner.add_task(da_task_id, da_name.to_string(), da_task) } + +/// add the Transaction Handling task +/// # Panics +/// Is unable to panic. This section here is just to satisfy clippy +pub async fn add_transaction_task< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +>( + task_runner: TaskRunner, + event_stream: ChannelStream>, + committee_exchange: CommitteeEx, + handle: SystemContextHandle, +) -> TaskRunner +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ + // build the da task + let c_api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let registry = task_runner.registry.clone(); + let transactions_state = TransactionTaskState { + registry: registry.clone(), + api: c_api.clone(), + consensus: handle.hotshot.get_consensus(), + cur_view: TYPES::Time::new(0), + committee_exchange: committee_exchange.into(), + event_stream: event_stream.clone(), + id: handle.hotshot.inner.id, + }; + let transactions_event_handler = HandleEvent(Arc::new( + move |event, mut state: TransactionTaskState>| { + async move { + let completion_status = state.handle_event(event).await; + (completion_status, state) + } + .boxed() + }, + )); + let transactions_name = "Transactions Task"; + let transactions_event_filter = FilterEvent(Arc::new( + TransactionTaskState::>::filter, + )); + + let transactions_task_builder = TaskBuilder::< + TransactionsTaskTypes>, + >::new(transactions_name.to_string()) + .register_event_stream(event_stream.clone(), transactions_event_filter) + .await + .register_registry(&mut registry.clone()) + .await + .register_state(transactions_state) + .register_event_handler(transactions_event_handler); + // impossible for unwrap to fail + // we *just* registered + let da_task_id = transactions_task_builder.get_task_id().unwrap(); + let da_task = TransactionsTaskTypes::build(transactions_task_builder).launch(); + task_runner.add_task(da_task_id, transactions_name.to_string(), da_task) +} /// add the view sync task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 1446e7bd86..5cd782405f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -800,47 +800,10 @@ where } #[allow(clippy::cast_precision_loss)] if new_decide_reached { - let mut included_txn_size = 0; - let mut included_txn_count = 0; - let txns = consensus.transactions.cloned().await; - // store transactions in this block we never added to our transactions. - let _ = included_txns_set.iter().map(|hash| { - if !txns.contains_key(hash) { - consensus.seen_transactions.insert(*hash); - } - }); - drop(txns); - consensus - .transactions - .modify(|txns| { - *txns = txns - .drain() - .filter(|(txn_hash, txn)| { - if included_txns_set.contains(txn_hash) { - included_txn_count += 1; - included_txn_size += bincode_opts() - .serialized_size(txn) - .unwrap_or_default(); - false - } else { - true - } - }) - .collect(); - }) - .await; - - consensus - .metrics - .outstanding_transactions - .update(-included_txn_count); - consensus - .metrics - .outstanding_transactions_memory_size - .update(-(i64::try_from(included_txn_size).unwrap_or(i64::MAX))); - debug!("about to publish decide"); - self.event_stream.publish(SequencingHotShotEvent::LeafDecided(leaf_views.clone())).await; + self.event_stream + .publish(SequencingHotShotEvent::LeafDecided(leaf_views.clone())) + .await; let decide_sent = self.output_event_stream.publish(Event { view_number: consensus.last_decided_view, event: EventType::Decide { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index d05588f1c8..8579996569 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -33,6 +33,7 @@ use hotshot_types::{ vote::VoteAccumulator, }; use hotshot_utils::bincode::bincode_opts; +use rand_chacha::rand_core::block; use snafu::Snafu; use std::{ collections::{HashMap, HashSet}, @@ -70,7 +71,6 @@ pub struct DATaskState< /// View number this view is executing in. pub cur_view: TYPES::Time, - // pub transactions: Arc>>, /// Reference to consensus. Leader will require a read lock on this. pub consensus: Arc>>>, @@ -278,35 +278,6 @@ where event: SequencingHotShotEvent, ) -> Option { match event { - SequencingHotShotEvent::TransactionsRecv(transactions) => { - // TODO ED Add validation checks - - let mut consensus = self.consensus.write().await; - consensus - .get_transactions() - .modify(|txns| { - for transaction in transactions { - let size = bincode_opts().serialized_size(&transaction).unwrap_or(0); - - // If we didn't already know about this transaction, update our mempool metrics. - if !consensus.seen_transactions.remove(&transaction.commit()) - && txns.insert(transaction.commit(), transaction).is_none() - { - consensus.metrics.outstanding_transactions.update(1); - consensus - .metrics - .outstanding_transactions_memory_size - .update(i64::try_from(size).unwrap_or_else(|e| { - warn!("Conversion failed: {e}. Using the max value."); - i64::MAX - })); - } - } - }) - .await; - - return None; - } SequencingHotShotEvent::DAProposalRecv(proposal, sender) => { debug!( "DA proposal received for view: {:?}", @@ -668,8 +639,6 @@ where .await; } - // TODO ED Make this a new task so it doesn't block main DA task - // If we are not the next leader (DA leader for this view) immediately exit if !self.committee_exchange.is_leader(self.cur_view + 1) { // panic!("We are not the DA leader for view {}", *self.cur_view + 1); @@ -683,43 +652,9 @@ where .inject_consensus_info(ConsensusIntentEvent::PollForVotes(*self.cur_view + 1)) .await; - // ED Copy of parent_leaf() function from sequencing leader - - let consensus = self.consensus.read().await; - let parent_view_number = &consensus.high_qc.view_number; - - let Some(parent_view) = consensus.state_map.get(parent_view_number) else { - error!( - "Couldn't find high QC parent in state map. Parent view {:?}", - parent_view_number - ); - return None; - }; - let Some(leaf) = parent_view.get_leaf_commitment() else { - error!( - ?parent_view_number, - ?parent_view, - "Parent of high QC points to a view without a proposal" - ); - return None; - }; - let Some(leaf) = consensus.saved_leaves.get(&leaf) else { - error!("Failed to find high QC parent."); - return None; - }; - let parent_leaf = leaf.clone(); - - // Prepare the DA Proposal - // let Some(parent_leaf) = self.parent_leaf().await else { - // warn!("Couldn't find high QC parent in state map."); - // return None; - // }; - - drop(consensus); - - let mut block = ::StateType::next_block(None); - let txns = self.wait_for_transactions(parent_leaf).await?; - + return None; + } + SequencingHotShotEvent::BlockReady(block) => { self.committee_exchange .network() .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions( @@ -727,13 +662,6 @@ where )) .await; - for txn in txns { - if let Ok(new_block) = block.add_transaction_raw(&txn) { - block = new_block; - continue; - } - } - let signature = self.committee_exchange.sign_da_proposal(&block.commit()); let data: DAProposal = DAProposal { deltas: block.clone(), @@ -797,8 +725,6 @@ where )) .await; } - - return None; } SequencingHotShotEvent::Timeout(view) => { @@ -818,70 +744,6 @@ where None } - /// return None if we can't get transactions - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Vote Collection Task", level = "error")] - - async fn wait_for_transactions( - &self, - parent_leaf: SequencingLeaf, - ) -> Option> { - let task_start_time = Instant::now(); - - // let parent_leaf = self.parent_leaf().await?; - let previous_used_txns = match parent_leaf.deltas { - Either::Left(block) => block.contained_transactions(), - Either::Right(_commitment) => HashSet::new(), - }; - - let consensus = self.consensus.read().await; - - let receiver = consensus.transactions.subscribe().await; - - loop { - let all_txns = consensus.transactions.cloned().await; - debug!("Size of transactions: {}", all_txns.len()); - let unclaimed_txns: Vec<_> = all_txns - .iter() - .filter(|(txn_hash, _txn)| !previous_used_txns.contains(txn_hash)) - .collect(); - - let time_past = task_start_time.elapsed(); - if unclaimed_txns.len() < self.api.min_transactions() - && (time_past < self.api.propose_max_round_time()) - { - let duration = self.api.propose_max_round_time() - time_past; - let result = async_timeout(duration, receiver.recv()).await; - match result { - Err(_) => { - // Fall through below to updating new block - error!( - "propose_max_round_time passed, sending transactions we have so far" - ); - } - Ok(Err(e)) => { - // Something unprecedented is wrong, and `transactions` has been dropped - error!("Channel receiver error for SubscribableRwLock {:?}", e); - return None; - } - Ok(Ok(_)) => continue, - } - } - break; - } - let all_txns = consensus.transactions.cloned().await; - let txns: Vec = all_txns - .iter() - .filter_map(|(txn_hash, txn)| { - if previous_used_txns.contains(txn_hash) { - None - } else { - Some(txn.clone()) - } - }) - .collect(); - Some(txns) - } - /// Filter the DA event. pub fn filter(event: &SequencingHotShotEvent) -> bool { matches!( @@ -889,7 +751,7 @@ where SequencingHotShotEvent::DAProposalRecv(_, _) | SequencingHotShotEvent::DAVoteRecv(_) | SequencingHotShotEvent::Shutdown - | SequencingHotShotEvent::TransactionsRecv(_) + | SequencingHotShotEvent::BlockReady(_) | SequencingHotShotEvent::Timeout(_) | SequencingHotShotEvent::VidDisperseRecv(_, _) | SequencingHotShotEvent::VidVoteRecv(_) diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index aca2cafdea..5e7492d84a 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -17,6 +17,9 @@ pub mod consensus; /// The task which implements the main parts of data availability. pub mod da; +/// The task which implements all transaction handling +pub mod transactions; + /// Defines the events passed between tasks pub mod events; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 8ea635d750..fa900ebe79 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -12,30 +12,25 @@ use futures::FutureExt; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, - task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEvent, TaskBuilder}, + task::{HotShotTaskCompleted, TS}, + task_impls::{HSTWithEvent}, }; use hotshot_types::{ certificate::DACertificate, - consensus::{Consensus, View}, - data::{DAProposal, ProposalType, SequencingLeaf}, - message::{CommitteeConsensusMessage, Message, Proposal, SequencingMessage}, + consensus::{Consensus}, + data::{SequencingLeaf}, + message::{Message, SequencingMessage}, traits::{ consensus_api::SequencingConsensusApi, - election::{CommitteeExchangeType, ConsensusExchange, Membership}, - network::{CommunicationChannel, ConsensusIntentEvent}, + election::{ConsensusExchange}, node_implementation::{CommitteeEx, NodeImplementation, NodeType}, - signature_key::SignatureKey, - state::ConsensusTime, Block, State, }, - utils::ViewInner, - vote::VoteAccumulator, }; use hotshot_utils::bincode::bincode_opts; use snafu::Snafu; use std::{ - collections::{HashMap, HashSet}, + collections::HashSet, sync::Arc, time::Instant, }; @@ -77,17 +72,11 @@ pub struct TransactionTaskState< /// the committee exchange pub committee_exchange: Arc>, - /// The view and ID of the current vote collection task, if there is one. - pub vote_collector: Option<(TYPES::Time, usize, usize)>, - /// Global events stream to publish events pub event_stream: ChannelStream>, /// This state's ID pub id: u64, - - /// Event stream to publish events to the application layer - pub output_event_stream: ChannelStream>, } @@ -99,7 +88,7 @@ impl< ConsensusMessage = SequencingMessage, >, A: SequencingConsensusApi, I> + 'static, - > DATaskState + > TransactionTaskState where CommitteeEx: ConsensusExchange< TYPES, @@ -109,7 +98,7 @@ where >, { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] pub async fn handle_event( &mut self, @@ -144,9 +133,10 @@ where return None; } - SequencingHotShotEvent::DAProposalRecv(proposal, sender) => { - } SequencingHotShotEvent::LeafDecided(leaf_chain) => { + let mut included_txns = HashSet::new(); + let mut included_txn_size = 0; + let mut included_txn_count = 0; for leaf in leaf_chain { match &leaf.deltas { Left(block) => { @@ -158,19 +148,103 @@ where Right(_) => {} } } + let mut consensus = self.consensus.write().await; + let txns = consensus.transactions.cloned().await; + + let _ = included_txns.iter().map(|hash| { + if !txns.contains_key(hash) { + consensus.seen_transactions.insert(*hash); + } + }); + drop(txns); + consensus + .transactions + .modify(|txns| { + *txns = txns + .drain() + .filter(|(txn_hash, txn)| { + if included_txns.contains(txn_hash) { + included_txn_count += 1; + included_txn_size += bincode_opts() + .serialized_size(txn) + .unwrap_or_default(); + false + } else { + true + } + }) + .collect(); + }) + .await; + + consensus + .metrics + .outstanding_transactions + .update(-included_txn_count); + consensus + .metrics + .outstanding_transactions_memory_size + .update(-(i64::try_from(included_txn_size).unwrap_or(i64::MAX))); + return None; } - // TODO ED Update high QC through QCFormed event SequencingHotShotEvent::ViewChange(view) => { - } + if *self.cur_view >= *view { + return None; + } - SequencingHotShotEvent::Timeout(view) => { - self.committee_exchange - .network() - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) - .await; - } + if *view - *self.cur_view > 1 { + error!("View changed by more than 1 going to view {:?}", view); + } + self.cur_view = view; + + // If we are not the next leader (DA leader for this view) immediately exit + if !self.committee_exchange.is_leader(self.cur_view + 1) { + // panic!("We are not the DA leader for view {}", *self.cur_view + 1); + return None; + } + + + // ED Copy of parent_leaf() function from sequencing leader + + let consensus = self.consensus.read().await; + let parent_view_number = &consensus.high_qc.view_number; + + let Some(parent_view) = consensus.state_map.get(parent_view_number) else { + error!( + "Couldn't find high QC parent in state map. Parent view {:?}", + parent_view_number + ); + return None; + }; + let Some(leaf) = parent_view.get_leaf_commitment() else { + error!( + ?parent_view_number, + ?parent_view, + "Parent of high QC points to a view without a proposal" + ); + return None; + }; + let Some(leaf) = consensus.saved_leaves.get(&leaf) else { + error!("Failed to find high QC parent."); + return None; + }; + let parent_leaf = leaf.clone(); + drop(consensus); + + let mut block = ::StateType::next_block(None); + let txns = self.wait_for_transactions(parent_leaf).await?; + + for txn in txns { + if let Ok(new_block) = block.add_transaction_raw(&txn) { + block = new_block; + continue; + } + } + self.event_stream.publish(SequencingHotShotEvent::BlockReady(block)).await; + return None; + } SequencingHotShotEvent::Shutdown => { return Some(HotShotTaskCompleted::ShutDown); } @@ -178,4 +252,104 @@ where } None } -} \ No newline at end of file + + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] + async fn wait_for_transactions( + &self, + parent_leaf: SequencingLeaf, + ) -> Option> { + let task_start_time = Instant::now(); + + // let parent_leaf = self.parent_leaf().await?; + let previous_used_txns = match parent_leaf.deltas { + Either::Left(block) => block.contained_transactions(), + Either::Right(_commitment) => HashSet::new(), + }; + + let consensus = self.consensus.read().await; + + let receiver = consensus.transactions.subscribe().await; + + loop { + let all_txns = consensus.transactions.cloned().await; + debug!("Size of transactions: {}", all_txns.len()); + let unclaimed_txns: Vec<_> = all_txns + .iter() + .filter(|(txn_hash, _txn)| !previous_used_txns.contains(txn_hash)) + .collect(); + + let time_past = task_start_time.elapsed(); + if unclaimed_txns.len() < self.api.min_transactions() + && (time_past < self.api.propose_max_round_time()) + { + let duration = self.api.propose_max_round_time() - time_past; + let result = async_timeout(duration, receiver.recv()).await; + match result { + Err(_) => { + // Fall through below to updating new block + error!( + "propose_max_round_time passed, sending transactions we have so far" + ); + } + Ok(Err(e)) => { + // Something unprecedented is wrong, and `transactions` has been dropped + error!("Channel receiver error for SubscribableRwLock {:?}", e); + return None; + } + Ok(Ok(_)) => continue, + } + } + break; + } + let all_txns = consensus.transactions.cloned().await; + let txns: Vec = all_txns + .iter() + .filter_map(|(txn_hash, txn)| { + if previous_used_txns.contains(txn_hash) { + None + } else { + Some(txn.clone()) + } + }) + .collect(); + Some(txns) + } + + pub fn filter(event: &SequencingHotShotEvent) -> bool { + matches!( + event, + SequencingHotShotEvent::TransactionsRecv(_) + | SequencingHotShotEvent::DAVoteRecv(_) + | SequencingHotShotEvent::Shutdown + | SequencingHotShotEvent::ViewChange(_) + ) + } +} + +/// task state implementation for Transactions Task +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > TS for TransactionTaskState +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ +} + +/// Type alias for DA Task Types +pub type TransactionsTaskTypes = HSTWithEvent< + ConsensusTaskError, + SequencingHotShotEvent, + ChannelStream>, + TransactionTaskState, +>; \ No newline at end of file From 69c8c3dea76b56a1525769155c1c050a97b7933c Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Sep 2023 11:22:52 -0400 Subject: [PATCH 0072/1393] hook up transacion task and work on tests --- hotshot/src/lib.rs | 9 ++++++++- hotshot/src/tasks/mod.rs | 10 +++++++--- task-impls/src/consensus.rs | 2 -- task-impls/src/transactions.rs | 30 ++++++++++++------------------ testing/tests/da_task.rs | 2 ++ testing/tests/network_task.rs | 32 ++++++++++++++++++++------------ 6 files changed, 49 insertions(+), 36 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 4c2a42a0b4..cae3844716 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -36,7 +36,7 @@ use crate::{ certificate::QuorumCertificate, tasks::{ add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, - add_view_sync_task, + add_transaction_task, add_view_sync_task, }, traits::{NodeImplementation, Storage}, types::{Event, SystemContextHandle}, @@ -770,6 +770,13 @@ where handle.clone(), ) .await; + let task_runner = add_transaction_task( + task_runner, + internal_event_stream.clone(), + committee_exchange.clone(), + handle.clone(), + ) + .await; let task_runner = add_view_sync_task::( task_runner, internal_event_stream.clone(), diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 4486914dfc..b7d66a4055 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -17,12 +17,12 @@ use hotshot_task::{ use hotshot_task_impls::{ consensus::{consensus_event_filter, ConsensusTaskTypes, SequencingConsensusTaskState}, da::{DATaskState, DATaskTypes}, - transactions::{TransactionTaskState, TransactionsTaskTypes}, events::SequencingHotShotEvent, network::{ NetworkEventTaskState, NetworkEventTaskTypes, NetworkMessageTaskState, NetworkMessageTaskTypes, NetworkTaskKind, }, + transactions::{TransactionTaskState, TransactionsTaskTypes}, view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; use hotshot_types::{ @@ -408,7 +408,6 @@ where task_runner.add_task(da_task_id, da_name.to_string(), da_task) } - /// add the Transaction Handling task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy @@ -448,7 +447,12 @@ where id: handle.hotshot.inner.id, }; let transactions_event_handler = HandleEvent(Arc::new( - move |event, mut state: TransactionTaskState>| { + move |event, + mut state: TransactionTaskState< + TYPES, + I, + HotShotSequencingConsensusApi, + >| { async move { let completion_status = state.handle_event(event).await; (completion_status, state) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 5cd782405f..2304641d6f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,12 +1,10 @@ use crate::events::SequencingHotShotEvent; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, - async_primitives::subscribable_rwlock::ReadView, }; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use bincode::Options; use bitvec::prelude::*; use commit::Committable; use core::time::Duration; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index fa900ebe79..4c3c747cf3 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -13,33 +13,29 @@ use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, task::{HotShotTaskCompleted, TS}, - task_impls::{HSTWithEvent}, + task_impls::HSTWithEvent, }; use hotshot_types::{ certificate::DACertificate, - consensus::{Consensus}, - data::{SequencingLeaf}, + consensus::Consensus, + data::SequencingLeaf, message::{Message, SequencingMessage}, traits::{ consensus_api::SequencingConsensusApi, - election::{ConsensusExchange}, + election::ConsensusExchange, node_implementation::{CommitteeEx, NodeImplementation, NodeType}, Block, State, }, }; use hotshot_utils::bincode::bincode_opts; use snafu::Snafu; -use std::{ - collections::HashSet, - sync::Arc, - time::Instant, -}; +use std::{collections::HashSet, sync::Arc, time::Instant}; use tracing::{debug, error, instrument, warn}; #[derive(Snafu, Debug)] /// Error type for consensus tasks pub struct ConsensusTaskError {} - + /// Tracks state of a DA task pub struct TransactionTaskState< TYPES: NodeType, @@ -79,7 +75,6 @@ pub struct TransactionTaskState< pub id: u64, } - impl< TYPES: NodeType, I: NodeImplementation< @@ -165,9 +160,8 @@ where .filter(|(txn_hash, txn)| { if included_txns.contains(txn_hash) { included_txn_count += 1; - included_txn_size += bincode_opts() - .serialized_size(txn) - .unwrap_or_default(); + included_txn_size += + bincode_opts().serialized_size(txn).unwrap_or_default(); false } else { true @@ -186,7 +180,6 @@ where .outstanding_transactions_memory_size .update(-(i64::try_from(included_txn_size).unwrap_or(i64::MAX))); return None; - } SequencingHotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { @@ -204,7 +197,6 @@ where return None; } - // ED Copy of parent_leaf() function from sequencing leader let consensus = self.consensus.read().await; @@ -242,7 +234,9 @@ where continue; } } - self.event_stream.publish(SequencingHotShotEvent::BlockReady(block)).await; + self.event_stream + .publish(SequencingHotShotEvent::BlockReady(block)) + .await; return None; } SequencingHotShotEvent::Shutdown => { @@ -352,4 +346,4 @@ pub type TransactionsTaskTypes = HSTWithEvent< SequencingHotShotEvent, ChannelStream>, TransactionTaskState, ->; \ No newline at end of file +>; diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 64b70ef40c..6c5273e8df 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -75,6 +75,7 @@ async fn test_da_task() { // In view 1, node 2 is the next leader. input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); + input.push(SequencingHotShotEvent::BlockReady(block.clone())); input.push(SequencingHotShotEvent::DAProposalRecv( message.clone(), pub_key, @@ -86,6 +87,7 @@ async fn test_da_task() { input.push(SequencingHotShotEvent::Shutdown); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); + output.insert(SequencingHotShotEvent::BlockReady(block.clone()), 1); output.insert(SequencingHotShotEvent::SendDABlockData(block), 1); output.insert( SequencingHotShotEvent::DAProposalSend(message.clone(), pub_key), diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index c14052bae3..ed1e274ff1 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -73,18 +73,19 @@ async fn test_network_task() { let mut output = HashMap::new(); input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(SequencingHotShotEvent::DAProposalSend( - da_proposal.clone(), - pub_key, - )); - input.push(SequencingHotShotEvent::VidDisperseSend( - da_vid_disperse.clone(), - pub_key, - )); - input.push(SequencingHotShotEvent::QuorumProposalSend( - quorum_proposal.clone(), - pub_key, - )); + input.push(SequencingHotShotEvent::BlockReady(block.clone())); + // input.push(SequencingHotShotEvent::DAProposalSend( + // da_proposal.clone(), + // pub_key, + // )); + // input.push(SequencingHotShotEvent::VidDisperseSend( + // da_vid_disperse.clone(), + // pub_key, + // )); + // input.push(SequencingHotShotEvent::QuorumProposalSend( + // quorum_proposal.clone(), + // pub_key, + // )); input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); input.push(SequencingHotShotEvent::Shutdown); @@ -93,6 +94,11 @@ async fn test_network_task() { SequencingHotShotEvent::DAProposalSend(da_proposal.clone(), pub_key), 2, // 2 occurrences: 1 from `input`, 1 from the DA task ); + output.insert(SequencingHotShotEvent::BlockReady(block.clone()), 2); + output.insert( + SequencingHotShotEvent::VidDisperseRecv(da_vid_disperse.clone(), pub_key), + 1, + ); output.insert( SequencingHotShotEvent::VidDisperseSend(da_vid_disperse, pub_key), 2, // 2 occurrences: 1 from `input`, 1 from the DA task @@ -102,6 +108,8 @@ async fn test_network_task() { // view number check in `publish_proposal_if_able` in consensus.rs, and we will see an error in // logging, but that is fine for testing as long as the network task is correctly handling // events. + output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(1)), 1); + output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(2)), 1); output.insert( SequencingHotShotEvent::QuorumProposalSend(quorum_proposal.clone(), pub_key), 1, From 9b6261f4691552b8e8f4113a732a77b0641512c8 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Sep 2023 11:26:02 -0400 Subject: [PATCH 0073/1393] lint --- task-impls/src/consensus.rs | 6 ++---- task-impls/src/da.rs | 18 +++++------------- task-impls/src/transactions.rs | 7 ++----- 3 files changed, 9 insertions(+), 22 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 2304641d6f..c7a96c0b84 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,7 +1,5 @@ use crate::events::SequencingHotShotEvent; -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, -}; +use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; @@ -35,7 +33,7 @@ use hotshot_types::{ utils::{Terminator, ViewInner}, vote::{QuorumVote, VoteAccumulator, VoteType}, }; -use hotshot_utils::bincode::bincode_opts; + use snafu::Snafu; use std::{ collections::{HashMap, HashSet}, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 8579996569..0ac48bcad7 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -1,10 +1,7 @@ use crate::events::SequencingHotShotEvent; -use async_compatibility_layer::{ - art::{async_spawn, async_timeout}, - async_primitives::subscribable_rwlock::ReadView, -}; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -use bincode::config::Options; + use bitvec::prelude::*; use commit::Committable; use either::{Either, Left, Right}; @@ -27,19 +24,14 @@ use hotshot_types::{ node_implementation::{CommitteeEx, NodeImplementation, NodeType}, signature_key::SignatureKey, state::ConsensusTime, - Block, State, + Block, }, utils::ViewInner, vote::VoteAccumulator, }; -use hotshot_utils::bincode::bincode_opts; -use rand_chacha::rand_core::block; + use snafu::Snafu; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - time::Instant, -}; +use std::{collections::HashMap, sync::Arc}; use tracing::{debug, error, instrument, warn}; #[derive(Snafu, Debug)] diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4c3c747cf3..abb37af883 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -1,14 +1,11 @@ use crate::events::SequencingHotShotEvent; use async_compatibility_layer::{ - art::{async_spawn, async_timeout}, - async_primitives::subscribable_rwlock::ReadView, + art::async_timeout, async_primitives::subscribable_rwlock::ReadView, }; use async_lock::RwLock; use bincode::config::Options; -use bitvec::prelude::*; use commit::Committable; use either::{Either, Left, Right}; -use futures::FutureExt; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, @@ -61,7 +58,6 @@ pub struct TransactionTaskState< /// View number this view is executing in. pub cur_view: TYPES::Time, - // pub transactions: Arc>>, /// Reference to consensus. Leader will require a read lock on this. pub consensus: Arc>>>, @@ -309,6 +305,7 @@ where Some(txns) } + /// Event filter for the transaction task pub fn filter(event: &SequencingHotShotEvent) -> bool { matches!( event, From b806f6d34023f71820b0bc4a0256c85403286f2d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Sep 2023 11:39:49 -0400 Subject: [PATCH 0074/1393] fix network test --- testing/tests/network_task.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index ed1e274ff1..5ad5adefdd 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -74,18 +74,18 @@ async fn test_network_task() { input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); input.push(SequencingHotShotEvent::BlockReady(block.clone())); - // input.push(SequencingHotShotEvent::DAProposalSend( - // da_proposal.clone(), - // pub_key, - // )); - // input.push(SequencingHotShotEvent::VidDisperseSend( - // da_vid_disperse.clone(), - // pub_key, - // )); - // input.push(SequencingHotShotEvent::QuorumProposalSend( - // quorum_proposal.clone(), - // pub_key, - // )); + input.push(SequencingHotShotEvent::DAProposalSend( + da_proposal.clone(), + pub_key, + )); + input.push(SequencingHotShotEvent::VidDisperseSend( + da_vid_disperse.clone(), + pub_key, + )); + input.push(SequencingHotShotEvent::QuorumProposalSend( + quorum_proposal.clone(), + pub_key, + )); input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); input.push(SequencingHotShotEvent::Shutdown); From 892d6a58e147db86ee26b8a49a48e4b93064314d Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Sep 2023 12:23:52 -0400 Subject: [PATCH 0075/1393] Middle of updated cert formation functions --- types/src/traits/election.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 4a70d2f732..fd3f99db11 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -201,12 +201,12 @@ where } /// Build a QC from the threshold signature and commitment - // TODO ED Rename this function + // TODO ED Rename this function and rework this function parameters + // Assumes last vote was valid since it caused a QC to form. + // Removes need for relay on other cert specific fields fn from_signatures_and_commitment( - view_number: TIME, signatures: AssembledSignature, - commit: Commitment, - relay: Option, + vote: Self::Vote ) -> Self; /// Get the view number. @@ -216,6 +216,7 @@ where fn signatures(&self) -> AssembledSignature; // TODO (da) the following functions should be refactored into a QC-specific trait. + // TODO ED Make an issue for this /// Get the leaf commitment. fn leaf_commitment(&self) -> Commitment; From 2b4dbfa898d27e364e5b8b684168328da6d34a2d Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Sep 2023 12:54:28 -0400 Subject: [PATCH 0076/1393] hotfix publish_proposal_if_able function --- task-impls/src/consensus.rs | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 03fcc387a8..63f3f45f1d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -543,7 +543,7 @@ where let view = proposal.data.get_view_number(); if view < self.cur_view { - error!("view too high"); + error!("view too high {:?}", proposal.data.clone()); return; } @@ -867,7 +867,8 @@ where "Attempting to publish proposal after voting; now in view: {}", *new_view ); - self.publish_proposal_if_able(qc).await; + self.publish_proposal_if_able(qc.clone(), qc.view_number + 1) + .await; } if !self.vote_if_able().await { // TOOD ED This means we publish the proposal without updating our own view, which doesn't seem right @@ -1019,7 +1020,10 @@ where *qc.view_number ); - if self.publish_proposal_if_able(qc.clone()).await { + if self + .publish_proposal_if_able(qc.clone(), qc.view_number + 1) + .await + { self.update_view(qc.view_number + 1).await; } } @@ -1068,7 +1072,7 @@ where let consensus = self.consensus.read().await; let qc = consensus.high_qc.clone(); drop(consensus); - if !self.publish_proposal_if_able(qc).await { + if !self.publish_proposal_if_able(qc, self.cur_view).await { error!( "Failed to publish proposal on view change. View = {:?}", self.cur_view @@ -1096,12 +1100,16 @@ where } /// Sends a proposal if possible from the high qc we have - pub async fn publish_proposal_if_able(&self, qc: QuorumCertificate) -> bool { + pub async fn publish_proposal_if_able( + &self, + _qc: QuorumCertificate, + view: TYPES::Time, + ) -> bool { // TODO ED This should not be qc view number + 1 - if !self.quorum_exchange.is_leader(qc.view_number + 1) { + if !self.quorum_exchange.is_leader(view) { error!( "Somehow we formed a QC but are not the leader for the next view {:?}", - qc.view_number + 1 + view ); return false; } @@ -1164,7 +1172,7 @@ where } let leaf = SequencingLeaf { - view_number: *parent_view_number + 1, + view_number: view, height: parent_leaf.height + 1, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), From 29b213f92c255766ea429b6be8862702aef95180 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Sep 2023 13:52:23 -0400 Subject: [PATCH 0077/1393] Updated cert creation function signatures; still need to update logic --- types/src/certificate.rs | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 8a7c394167..7a3820b054 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -4,6 +4,7 @@ use crate::vote::DAVoteAccumulator; use crate::vote::QuorumVote; use crate::vote::QuorumVoteAccumulator; use crate::vote::ViewSyncVoteAccumulator; +use crate::vote::VoteType; use crate::{ data::{fake_commitment, serialize_signature, LeafType}, traits::{ @@ -164,14 +165,13 @@ impl> type VoteAccumulator = QuorumVoteAccumulator; fn from_signatures_and_commitment( - view_number: TYPES::Time, signatures: AssembledSignature, - commit: Commitment, - _relay: Option, + vote: Self::Vote ) -> Self { let qc = QuorumCertificate { - leaf_commitment: commit, - view_number, + // TODO ED Change this to getter functions + leaf_commitment: vote.get_data(), + view_number: vote.get_view(), signatures, is_genesis: false, }; @@ -237,15 +237,13 @@ impl SignedCertificate; fn from_signatures_and_commitment( - view_number: TYPES::Time, signatures: AssembledSignature, - commit: Commitment, - _relay: Option, + vote: Self::Vote ) -> Self { DACertificate { - view_number, + view_number: vote.get_view(), signatures, - block_commitment: commit, + block_commitment: vote.block_commitment, } } @@ -328,14 +326,12 @@ impl type VoteAccumulator = ViewSyncVoteAccumulator, Self::Vote>; /// Build a QC from the threshold signature and commitment fn from_signatures_and_commitment( - view_number: TYPES::Time, signatures: AssembledSignature, - _commit: Commitment>, - relay: Option, + vote: Self::Vote ) -> Self { let certificate_internal = ViewSyncCertificateInternal { - round: view_number, - relay: relay.unwrap(), + round: vote.get_view(), + relay: vote.relay, signatures: signatures.clone(), }; match signatures { From 26a10e5bd6d4a9e22b15dabba802620cac84a8d6 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Sep 2023 13:53:13 -0400 Subject: [PATCH 0078/1393] Ignore network test --- testing/tests/network_task.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index f3d8d9f8bb..aa0c80024f 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -19,6 +19,7 @@ use std::collections::HashMap; tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_network_task() { use hotshot::demos::sdemo::{SDemoBlock, SDemoNormalBlock}; use hotshot_task_impls::harness::run_harness; From 369d1090e1521f0e5e2feb6d2d8b631f737fcfee Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 14 Sep 2023 11:46:32 -0700 Subject: [PATCH 0079/1393] Remove commitment --- hotshot/src/demos/sdemo.rs | 7 ------- types/src/traits/block_contents.rs | 15 --------------- 2 files changed, 22 deletions(-) diff --git a/hotshot/src/demos/sdemo.rs b/hotshot/src/demos/sdemo.rs index 7e7472683a..66f9ec8133 100644 --- a/hotshot/src/demos/sdemo.rs +++ b/hotshot/src/demos/sdemo.rs @@ -236,13 +236,6 @@ impl BlockPayload for SDemoBlock { .collect(), } } - - fn commitment(&self) -> VidResult> { - // TODO: Get the payload commitment after VID integration. - // - // - unimplemented!(); - } } impl State for SDemoState { diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index afc714e169..861645d7fc 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -58,14 +58,6 @@ pub trait BlockPayload: /// returns hashes of all the transactions in this block /// TODO make this ordered with a vec fn contained_transactions(&self) -> HashSet>; - - /// Compute the VID payload commitment. - /// - /// # Errors - /// - `VidResult::Err` in case of actual error. - /// - `VidResult::Ok(Result::Err)` if verification fails. - /// - `VidResult::Ok(Result::Ok)` if verification succeeds. - fn commitment(&self) -> VidResult>; } /// Abstraction over any type of transaction. Used by [`BlockPayload`]. @@ -159,13 +151,6 @@ pub mod dummy { fn contained_transactions(&self) -> HashSet> { HashSet::new() } - - fn commitment(&self) -> VidResult> { - // TODO: Get the payload commitment after VID integration. - // - // - unimplemented!(); - } } impl TestableBlock for DummyBlock { From 0d1d2c4e8a736efdfc97aa92356d642c31890576 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 14 Sep 2023 13:51:33 -0700 Subject: [PATCH 0080/1393] Fix lint --- hotshot/src/demos/sdemo.rs | 1 - types/src/traits/block_contents.rs | 2 -- 2 files changed, 3 deletions(-) diff --git a/hotshot/src/demos/sdemo.rs b/hotshot/src/demos/sdemo.rs index 66f9ec8133..30d12bc579 100644 --- a/hotshot/src/demos/sdemo.rs +++ b/hotshot/src/demos/sdemo.rs @@ -31,7 +31,6 @@ use hotshot_types::{ BlockPayload, State, }, }; -use jf_primitives::vid::VidResult; use rand::Rng; use serde::{Deserialize, Serialize}; use snafu::Snafu; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 861645d7fc..eb4dce2753 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -4,7 +4,6 @@ //! behaviors that a block is expected to have. use commit::{Commitment, Committable}; -use jf_primitives::vid::VidResult; use serde::{de::DeserializeOwned, Serialize}; use std::{ @@ -71,7 +70,6 @@ pub mod dummy { use std::fmt::Display; use super::{BlockPayload, Commitment, Committable, Debug, Hash, HashSet, Serialize}; - use jf_primitives::vid::VidResult; use rand::Rng; use serde::Deserialize; From 9e2af8b5e4330dcd3d8c223565a5ec1b4ebf05b1 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Sep 2023 18:21:51 -0400 Subject: [PATCH 0081/1393] Update finished to cert creation functions --- types/src/certificate.rs | 10 +++-- types/src/traits/election.rs | 71 +++++++----------------------------- types/src/vote.rs | 2 +- 3 files changed, 21 insertions(+), 62 deletions(-) diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 7a3820b054..5eeda26510 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -168,9 +168,13 @@ impl> signatures: AssembledSignature, vote: Self::Vote ) -> Self { + let leaf_commitment = match vote.clone() { + QuorumVote::Yes(vote_internal) | QuorumVote::No (vote_internal) => vote_internal.leaf_commitment, + _ => unimplemented!() + }; let qc = QuorumCertificate { - // TODO ED Change this to getter functions - leaf_commitment: vote.get_data(), + // TODO ED Change this to getter functions, make get_commitment function + leaf_commitment, view_number: vote.get_view(), signatures, is_genesis: false, @@ -331,7 +335,7 @@ impl ) -> Self { let certificate_internal = ViewSyncCertificateInternal { round: vote.get_view(), - relay: vote.relay, + relay: vote.relay(), signatures: signatures.clone(), }; match signatures { diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index fd3f99db11..ddb22aeaf5 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -202,11 +202,11 @@ where /// Build a QC from the threshold signature and commitment // TODO ED Rename this function and rework this function parameters - // Assumes last vote was valid since it caused a QC to form. - // Removes need for relay on other cert specific fields + // Assumes last vote was valid since it caused a QC to form. + // Removes need for relay on other cert specific fields fn from_signatures_and_commitment( signatures: AssembledSignature, - vote: Self::Vote + vote: Self::Vote, ) -> Self; /// Get the view number. @@ -216,7 +216,7 @@ where fn signatures(&self) -> AssembledSignature; // TODO (da) the following functions should be refactored into a QC-specific trait. - // TODO ED Make an issue for this + // TODO ED Make an issue for this /// Get the leaf commitment. fn leaf_commitment(&self) -> Commitment; @@ -469,53 +469,7 @@ pub trait ConsensusExchange: Send + Sync { vota_meta: VoteMetaData, accumulator: VoteAccumulator, ) -> Either, Self::Certificate> { - if !self.is_valid_vote( - &vota_meta.encoded_key, - &vota_meta.encoded_signature, - vota_meta.data.clone(), - // Ignoring deserialization errors below since we are getting rid of it soon - Checked::Unchecked(vota_meta.vote_token.clone()), - ) { - error!("Invalid vote!"); - return Either::Left(accumulator); - } - - if let Some(key) = ::from_bytes(&vota_meta.encoded_key) - { - let stake_table_entry = key.get_stake_table_entry(1u64); - let append_node_id = self - .membership() - .get_committee_qc_stake_table() - .iter() - .position(|x| *x == stake_table_entry.clone()) - .unwrap(); - - match accumulator.append(( - vota_meta.commitment, - ( - vota_meta.encoded_key.clone(), - ( - vota_meta.encoded_signature.clone(), - self.membership().get_committee_qc_stake_table(), - append_node_id, - vota_meta.data, - vota_meta.vote_token, - ), - ), - )) { - Either::Left(accumulator) => Either::Left(accumulator), - Either::Right(signatures) => { - Either::Right(Self::Certificate::from_signatures_and_commitment( - vota_meta.view_number, - signatures, - vota_meta.commitment, - vota_meta.relay, - )) - } - } - } else { - Either::Left(accumulator) - } + todo!() // TODO ED Remove this function } /// Add a vote to the accumulating signature. Return The certificate if the vote @@ -573,7 +527,7 @@ pub trait ConsensusExchange: Send + Sync { } let stake_table_entry = vote.get_key().get_stake_table_entry(1u64); - // TODO ED Could we make this part of the vote in the future? It's only a usize. + // TODO ED Could we make this part of the vote in the future? It's only a usize. let append_node_id = self .membership() .get_committee_qc_stake_table() @@ -582,15 +536,16 @@ pub trait ConsensusExchange: Send + Sync { .unwrap(); // TODO ED Should make append function take a reference to vote - match accumulator.append(vote.clone(), append_node_id, self.membership().get_committee_qc_stake_table()) { + match accumulator.append( + vote.clone(), + append_node_id, + self.membership().get_committee_qc_stake_table(), + ) { Either::Left(accumulator) => Either::Left(accumulator), Either::Right(signatures) => { - // TODO ED Update this function to just take in the signatures and most recent vote + // TODO ED Update this function to just take in the signatures and most recent vote Either::Right(Self::Certificate::from_signatures_and_commitment( - vote.get_view(), - signatures, - *commit, - Some(0), + signatures, vote.clone(), )) } } diff --git a/types/src/vote.rs b/types/src/vote.rs index d76a0a395f..bbf511df6e 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -612,7 +612,7 @@ impl< _ => return Either::Left(self), }; - error!("Vote is {:?}", vote.clone()); + // error!("Vote is {:?}", vote.clone()); let encoded_key = vote.get_key().to_bytes(); From 137d2c1a64e2fc493fdd2c6a6cee87d239033148 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Sep 2023 19:15:25 -0400 Subject: [PATCH 0082/1393] Fix finalize view sync cert bug --- types/src/vote.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/types/src/vote.rs b/types/src/vote.rs index bbf511df6e..76a2bc9aa7 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -628,20 +628,33 @@ impl< .entry(vote_commitment) .or_insert_with(|| (0, BTreeMap::new())); + // Check for duplicate vote + if pre_commit_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + let (commit_stake_casted, commit_vote_map) = self .commit_vote_outcomes .entry(vote_commitment) .or_insert_with(|| (0, BTreeMap::new())); + if commit_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + let (finalize_stake_casted, finalize_vote_map) = self .finalize_vote_outcomes .entry(vote_commitment) .or_insert_with(|| (0, BTreeMap::new())); + if finalize_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + // update the active_keys and sig_lists // TODO ED Possible bug where a node sends precommit vote and then commit vote after // precommit cert is formed, their commit vote won't be counted because of this check - // Probably need separate signers vecs. + // Probably need separate signers vecs. if self.signers.get(vote_node_id).as_deref() == Some(&true) { error!("node id already in signers"); return Either::Left(self); @@ -885,6 +898,7 @@ where // update the active_keys and sig_lists if self.signers.get(node_id).as_deref() == Some(&true) { error!("node id already in signers"); + panic!(); return Either::Left(self); } self.signers.set(node_id, true); From cd5805865bafede030a1ce9d914fa53eb9fd75bc Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Sep 2023 19:49:49 -0400 Subject: [PATCH 0083/1393] lints --- hotshot/src/tasks/mod.rs | 2 +- .../src/traits/election/static_committee.rs | 1 - .../src/traits/networking/libp2p_network.rs | 6 +--- task-impls/src/consensus.rs | 9 +----- task-impls/src/da.rs | 31 +++++++------------ task-impls/src/view_sync.rs | 1 - types/src/certificate.rs | 14 ++++----- types/src/message.rs | 2 +- types/src/traits/election.rs | 3 +- types/src/traits/network.rs | 2 -- types/src/vote.rs | 10 ------ 11 files changed, 24 insertions(+), 57 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index c2949cbd3e..8b96117428 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -4,7 +4,6 @@ use crate::{ async_spawn, types::SystemContextHandle, DACertificate, HotShotSequencingConsensusApi, QuorumCertificate, SequencingQuorumEx, }; -use serde::Serialize; use async_compatibility_layer::art::{async_sleep, async_spawn_local}; use commit::Committable; use futures::FutureExt; @@ -42,6 +41,7 @@ use hotshot_types::{ }, vote::{ViewSyncData, VoteType}, }; +use serde::Serialize; use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; /// event for global event stream diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 0e79efacab..44c4075e6f 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -112,7 +112,6 @@ where view_number: TYPES::Time, private_key: &::PrivateKey, ) -> std::result::Result>, ElectionError> { - // TODO ED Below let pub_key = PUBKEY::from_private(private_key); let entry = pub_key.get_stake_table_entry(1u64); if !self.committee_nodes_with_stake.contains(&entry) { diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index a0ae5b9d32..cdabb98091 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -894,11 +894,7 @@ where } } -impl< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, - > +impl, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, Message, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 6d4a8d4218..f4d9d8ea3f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -198,7 +198,6 @@ where Commitment = SequencingLeaf, >, { - // TODO ED Emit a view change event upon new proposal? match event { SequencingHotShotEvent::QuorumVoteRecv(vote) => match vote.clone() { QuorumVote::Yes(vote_internal) => { @@ -216,8 +215,7 @@ where } let accumulator = state.accumulator.left().unwrap(); - // TODO ED Maybe we don't need this to take in commitment? Can just get it from the vote directly if it is always - // going to be passed in as the vote.commitment + match state.quorum_exchange.accumulate_vote_2( accumulator, &vote, @@ -445,7 +443,6 @@ where }; - // TODO ED Only publish event in vote if able if let GeneralConsensusMessage::Vote(vote) = message { debug!("Sending vote to next quorum leader {:?}", vote.get_view()); self.event_stream @@ -914,8 +911,6 @@ where return; } - // TODO ED Should remove this match because we'd always want to collect votes no matter the type on qc - // Though will need a sep accumulator for Timeout votes match vote.clone() { QuorumVote::Yes(vote_internal) => { let handle_event = HandleEvent(Arc::new(move |event, state| { @@ -947,7 +942,6 @@ where phantom: PhantomData, }; - // TODO ED Get vote data here instead of cloning into block commitment field of vote let accumulator = self.quorum_exchange.accumulate_vote_2( new_accumulator, &vote, @@ -1129,7 +1123,6 @@ where _qc: QuorumCertificate, view: TYPES::Time, ) -> bool { - // TODO ED This should not be qc view number + 1 if !self.quorum_exchange.is_leader(view) { error!( "Somehow we formed a QC but are not the leader for the next view {:?}", diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index d89b5b65d8..9d1b6840f1 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -105,7 +105,7 @@ pub struct DAVoteCollectionTaskState< pub committee_exchange: Arc>, #[allow(clippy::type_complexity)] - pub accumulator2: Either< + pub accumulator: Either< as SignedCertificate< TYPES, TYPES::Time, @@ -114,7 +114,6 @@ pub struct DAVoteCollectionTaskState< >>::VoteAccumulator, DACertificate, >, - // TODO ED Make this just "view" since it is only for this task /// the current view pub cur_view: TYPES::Time, /// event stream for channel events @@ -157,21 +156,20 @@ where // panic!("Vote handle received DA vote for view {}", *vote.current_view); // For the case where we receive votes after we've made a certificate - if state.accumulator2.is_right() { + if state.accumulator.is_right() { debug!("DA accumulator finished view: {:?}", state.cur_view); return (None, state); } - let accumulator2 = state.accumulator2.left().unwrap(); - // TODO ED Maybe we don't need this to take in commitment? Can just get it from the vote directly if it is always - // going to be passed in as the vote.commitment + let accumulator = state.accumulator.left().unwrap(); + match state.committee_exchange.accumulate_vote_2( - accumulator2, + accumulator, &vote, &vote.block_commitment, ) { Left(new_accumulator) => { - state.accumulator2 = either::Left(new_accumulator); + state.accumulator = either::Left(new_accumulator); } Right(dac) => { @@ -184,8 +182,7 @@ where )) .await; - // TODO ED Rename this to just accumulator - state.accumulator2 = Right(dac.clone()); + state.accumulator = Right(dac.clone()); state .committee_exchange .network() @@ -286,8 +283,6 @@ where ) -> Option { match event { SequencingHotShotEvent::TransactionsRecv(transactions) => { - // TODO ED Add validation checks - let mut consensus = self.consensus.write().await; consensus .get_transactions() @@ -425,8 +420,7 @@ where phantom: PhantomData, }; - // TODO ED Get vote data here instead of cloning into block commitment field of vote - let accumulator2 = self.committee_exchange.accumulate_vote_2( + let accumulator = self.committee_exchange.accumulate_vote_2( new_accumulator, &vote, &vote.clone().block_commitment, @@ -436,7 +430,7 @@ where let state = DAVoteCollectionTaskState { committee_exchange: self.committee_exchange.clone(), - accumulator2: accumulator2, + accumulator: accumulator, cur_view: view, event_stream: self.event_stream.clone(), id: self.id, @@ -508,8 +502,7 @@ where phantom: PhantomData, }; - // TODO ED Get vote data here instead of cloning into block commitment field of vote - let accumulator2 = self.committee_exchange.accumulate_vote_2( + let accumulator = self.committee_exchange.accumulate_vote_2( new_accumulator, &vote, &vote.clone().block_commitment, @@ -519,7 +512,7 @@ where let state = DAVoteCollectionTaskState { committee_exchange: self.committee_exchange.clone(), - accumulator2: accumulator2, + accumulator: accumulator, cur_view: view, event_stream: self.event_stream.clone(), id: self.id, @@ -625,7 +618,6 @@ where // } // } } - // TODO ED Update high QC through QCFormed event SequencingHotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { return None; @@ -638,7 +630,6 @@ where // Inject view info into network // ED I think it is possible that you receive a quorum proposal, vote on it and update your view before the da leader has sent their proposal, and therefore you skip polling for this view? - // TODO ED Only poll if you are on the committee let is_da = self .committee_exchange .membership() diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index b29943ced4..e7db70324d 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1004,7 +1004,6 @@ where *vote_internal.round, vote_internal.relay ); - // TODO ED This isn't ideal, should fix this let vote_data = match vote.get_data() { VoteData::ViewSyncPreCommit(data) => data, VoteData::ViewSyncCommit(data) => data, diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 5eeda26510..ea0f801345 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -119,7 +119,6 @@ pub struct ViewSyncCertificateInternal { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] /// Enum representing whether a signatures is for a 'Yes' or 'No' or 'DA' or 'Genesis' certificate -// TODO ED Should this be a trait? pub enum AssembledSignature { // (enum, signature) /// These signatures are for a 'Yes' certificate @@ -166,14 +165,15 @@ impl> fn from_signatures_and_commitment( signatures: AssembledSignature, - vote: Self::Vote + vote: Self::Vote, ) -> Self { let leaf_commitment = match vote.clone() { - QuorumVote::Yes(vote_internal) | QuorumVote::No (vote_internal) => vote_internal.leaf_commitment, - _ => unimplemented!() + QuorumVote::Yes(vote_internal) | QuorumVote::No(vote_internal) => { + vote_internal.leaf_commitment + } + _ => unimplemented!(), }; let qc = QuorumCertificate { - // TODO ED Change this to getter functions, make get_commitment function leaf_commitment, view_number: vote.get_view(), signatures, @@ -242,7 +242,7 @@ impl SignedCertificate, - vote: Self::Vote + vote: Self::Vote, ) -> Self { DACertificate { view_number: vote.get_view(), @@ -331,7 +331,7 @@ impl /// Build a QC from the threshold signature and commitment fn from_signatures_and_commitment( signatures: AssembledSignature, - vote: Self::Vote + vote: Self::Vote, ) -> Self { let certificate_internal = ViewSyncCertificateInternal { round: vote.get_view(), diff --git a/types/src/message.rs b/types/src/message.rs index 4e7eeb82e7..79bcca050c 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -486,4 +486,4 @@ pub struct Proposal { pub data: PROPOSAL, /// The proposal must be signed by the view leader pub signature: EncodedSignature, -} \ No newline at end of file +} diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 5a02c2ff81..0549504955 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -545,7 +545,8 @@ pub trait ConsensusExchange: Send + Sync { Either::Right(signatures) => { // TODO ED Update this function to just take in the signatures and most recent vote Either::Right(Self::Certificate::from_signatures_and_commitment( - signatures, vote.clone(), + signatures, + vote.clone(), )) } } diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index f9aea5f835..73bcc0bcb8 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -203,7 +203,6 @@ pub trait ViewMessage { /// API for interacting directly with a consensus committee /// intended to be implemented for both DA and for validating consensus committees -// TODO ED Why is this generic over VOTE? #[async_trait] pub trait CommunicationChannel>: Clone + Debug + Send + Sync + 'static @@ -346,7 +345,6 @@ pub trait TestableNetworkingImplementation { fn in_flight_message_count(&self) -> Option; } /// Describes additional functionality needed by the test communication channel -// TODO ED Why is this generic over VOTE? pub trait TestableChannelImplementation< TYPES: NodeType, M: NetworkMsg, diff --git a/types/src/vote.rs b/types/src/vote.rs index 76a2bc9aa7..e4f6b04a1c 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -196,7 +196,6 @@ pub enum QuorumVote> { /// Negative vote. No(YesOrNoVote), /// Timeout vote. - // TODO ED Remove this and make it it's own vote type, since it is not part of the QC type Timeout(TimeoutVote), } @@ -208,7 +207,6 @@ impl VoteType for DAVote { self.signature_key() } fn get_signature(&self) -> EncodedSignature { - // TODO ED Revisit this function self.signature.1.clone() } fn get_data(&self) -> VoteData { @@ -219,7 +217,6 @@ impl VoteType for DAVote { } } -// TODO ED Remove this impl DAVote { /// Get the signature key. /// # Panics @@ -398,8 +395,6 @@ impl< return Either::Left(self); } - // Update the active_keys and signature lists - // TODO ED How does this differ than the check above? if self.signers.get(vote_node_id).as_deref() == Some(&true) { error!("Node id is already in signers list"); return Either::Left(self); @@ -428,7 +423,6 @@ impl< &self.sig_lists[..], ); - // TODO ED Why do we need this line if we have the above line? self.da_vote_outcomes.remove(&vote_commitment); return Either::Right(AssembledSignature::DA(real_qc_sig)); @@ -510,8 +504,6 @@ impl< return Either::Left(self); } - // Update the active_keys and signature lists - // TODO ED How does this differ than the check above? if self.signers.get(vote_node_id).as_deref() == Some(&true) { error!("Node id is already in signers list"); return Either::Left(self); @@ -731,7 +723,6 @@ impl< self.signers.as_bitslice(), &self.sig_lists[..], ); - // TODO ED Why remove? self.finalize_vote_outcomes .remove(&vote_commitment) .unwrap(); @@ -829,7 +820,6 @@ where #![allow(clippy::too_many_lines)] fn append( mut self, - // TODO ED Make this its own type to avoid extra long type signature val: ( Commitment, ( From d51f3a489e7a9e37052c265f05f342335e32ea60 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Sep 2023 19:58:34 -0400 Subject: [PATCH 0084/1393] lints --- types/src/traits/election.rs | 6 +++--- types/src/traits/network.rs | 6 +----- types/src/vote.rs | 21 +++++++++------------ 3 files changed, 13 insertions(+), 20 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 0549504955..e76be94349 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -29,8 +29,8 @@ use crate::{ state::ConsensusTime, }, vote::{ - Accumulator, DAVote, QuorumVote, TimeoutVote, ViewSyncData, ViewSyncVote, VoteAccumulator, - VoteType, YesOrNoVote, + DAVote, QuorumVote, TimeoutVote, ViewSyncData, ViewSyncVote, VoteAccumulator, VoteType, + YesOrNoVote, }, }; use bincode::Options; @@ -505,7 +505,7 @@ pub trait ConsensusExchange: Send + Sync { TYPES::VoteTokenType, Self::Commitment, >>::Vote, - commit: &Commitment, + _commit: &Commitment, ) -> Either< <>::Certificate as SignedCertificate< TYPES, diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 73bcc0bcb8..20639a308b 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -11,11 +11,7 @@ use tokio::time::error::Elapsed as TimeoutError; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use super::{election::Membership, node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{ - data::{ProposalType, ViewNumber}, - message::MessagePurpose, - vote::VoteType, -}; +use crate::{data::ViewNumber, message::MessagePurpose}; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; use serde::{Deserialize, Serialize}; diff --git a/types/src/vote.rs b/types/src/vote.rs index e4f6b04a1c..70bb95d8b1 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -221,7 +221,6 @@ impl DAVote { /// Get the signature key. /// # Panics /// If the deserialization fails. - #[deprecated] pub fn signature_key(&self) -> TYPES::SignatureKey { ::from_bytes(&self.signature.0).unwrap() } @@ -259,7 +258,7 @@ impl> VoteType impl> QuorumVote { /// Get the encoded signature. - #[deprecated] + pub fn signature(&self) -> EncodedSignature { match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.1.clone(), @@ -269,7 +268,7 @@ impl> QuorumVote /// Get the signature key. /// # Panics /// If the deserialization fails. - #[deprecated] + pub fn signature_key(&self) -> TYPES::SignatureKey { let encoded = match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.0.clone(), @@ -567,7 +566,7 @@ pub struct ViewSyncVoteAccumulator< COMMITTABLE: Committable + Serialize + Clone, VOTE: VoteType, > { - /// Map of all da signatures accumlated so far + /// Map of all pre_commit signatures accumlated so far pub pre_commit_vote_outcomes: VoteMap, pub commit_vote_outcomes: VoteMap, pub finalize_vote_outcomes: VoteMap, @@ -590,18 +589,18 @@ impl< VOTE: VoteType, > Accumulator2 for ViewSyncVoteAccumulator { + #[allow(clippy::too_many_lines)] fn append( mut self, vote: VOTE, vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>, ) -> Either> { - let vote_commitment = match vote.get_data() { - VoteData::ViewSyncPreCommit(commitment) - | VoteData::ViewSyncCommit(commitment) - | VoteData::ViewSyncFinalize(commitment) => commitment, - - _ => return Either::Left(self), + let (VoteData::ViewSyncPreCommit(vote_commitment) + | VoteData::ViewSyncCommit(vote_commitment) + | VoteData::ViewSyncFinalize(vote_commitment)) = vote.get_data() + else { + return Either::Left(self); }; // error!("Vote is {:?}", vote.clone()); @@ -734,7 +733,6 @@ impl< } /// Placeholder accumulator; will be replaced by accumulator for each certificate type -#[deprecated] pub struct AccumulatorPlaceholder< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, @@ -888,7 +886,6 @@ where // update the active_keys and sig_lists if self.signers.get(node_id).as_deref() == Some(&true) { error!("node id already in signers"); - panic!(); return Either::Left(self); } self.signers.set(node_id, true); From 498dd88696f9c71af20bd010ef7ac5e8db2980b2 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Sep 2023 20:15:49 -0400 Subject: [PATCH 0085/1393] Finish lints --- hotshot/src/tasks/mod.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 2 +- .../networking/web_server_libp2p_fallback.rs | 2 +- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 9 ++-- task-impls/src/view_sync.rs | 13 ++--- types/src/certificate.rs | 2 +- types/src/traits/election.rs | 28 +++-------- types/src/vote.rs | 49 ++++++++++--------- 9 files changed, 46 insertions(+), 63 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 8b96117428..64bb7d6213 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -4,7 +4,7 @@ use crate::{ async_spawn, types::SystemContextHandle, DACertificate, HotShotSequencingConsensusApi, QuorumCertificate, SequencingQuorumEx, }; -use async_compatibility_layer::art::{async_sleep, async_spawn_local}; +use async_compatibility_layer::art::async_sleep; use commit::Committable; use futures::FutureExt; use hotshot_task::{ diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index cdabb98091..da8784e0a7 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -15,7 +15,7 @@ use bincode::Options; use hotshot_constants::{KAD_DEFAULT_REPUB_INTERVAL_SEC, LOOK_AHEAD}; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::{ProposalType, ViewNumber}, + data::ViewNumber, message::{Message, MessageKind}, traits::{ election::Membership, diff --git a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs index b1655325d7..b3324681a5 100644 --- a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs +++ b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs @@ -13,7 +13,7 @@ use futures::join; use async_compatibility_layer::channel::UnboundedSendError; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::{ProposalType, ViewNumber}, + data::ViewNumber, message::Message, traits::{ election::Membership, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f4d9d8ea3f..d92f286c3f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -36,7 +36,7 @@ use hotshot_types::{ BlockPayload, }, utils::{Terminator, ViewInner}, - vote::{QuorumVote, VoteAccumulator, VoteType}, + vote::{QuorumVote, VoteType}, }; use hotshot_utils::bincode::bincode_opts; use snafu::Snafu; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 9d1b6840f1..3bcb699be8 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -105,6 +105,7 @@ pub struct DAVoteCollectionTaskState< pub committee_exchange: Arc>, #[allow(clippy::type_complexity)] + /// Accumulates DA votes pub accumulator: Either< as SignedCertificate< TYPES, @@ -196,7 +197,7 @@ where } } } - SequencingHotShotEvent::VidVoteRecv(vote) => { + SequencingHotShotEvent::VidVoteRecv(_vote) => { // TODO ED Make accumulator for VID // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 @@ -430,7 +431,7 @@ where let state = DAVoteCollectionTaskState { committee_exchange: self.committee_exchange.clone(), - accumulator: accumulator, + accumulator, cur_view: view, event_stream: self.event_stream.clone(), id: self.id, @@ -512,7 +513,7 @@ where let state = DAVoteCollectionTaskState { committee_exchange: self.committee_exchange.clone(), - accumulator: accumulator, + accumulator, cur_view: view, event_stream: self.event_stream.clone(), id: self.id, @@ -542,7 +543,7 @@ where .await; }; } - SequencingHotShotEvent::VidDisperseRecv(disperse, sender) => { + SequencingHotShotEvent::VidDisperseRecv(_disperse, _sender) => { // TODO ED Make accumulator for this // TODO copy-pasted from DAProposalRecv https://github.com/EspressoSystems/HotShot/issues/1690 // debug!( diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index e7db70324d..f92752f487 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -9,13 +9,12 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::traits::election::VoteData; use hotshot_types::{ traits::{ election::{Membership, SignedCertificate}, network::ConsensusIntentEvent, }, - vote::{ViewSyncVoteAccumulator, VoteType}, + vote::ViewSyncVoteAccumulator, }; use bitvec::prelude::*; @@ -32,7 +31,7 @@ use hotshot_types::{ signature_key::SignatureKey, state::ConsensusTime, }, - vote::{ViewSyncData, ViewSyncVote, VoteAccumulator}, + vote::{ViewSyncData, ViewSyncVote}, }; use snafu::Snafu; use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; @@ -223,6 +222,7 @@ pub struct ViewSyncRelayTaskState< /// View sync exchange pub exchange: Arc>, /// Vote accumulator + #[allow(clippy::type_complexity)] pub accumulator: Either< as SignedCertificate< TYPES, @@ -1004,13 +1004,6 @@ where *vote_internal.round, vote_internal.relay ); - let vote_data = match vote.get_data() { - VoteData::ViewSyncPreCommit(data) => data, - VoteData::ViewSyncCommit(data) => data, - VoteData::ViewSyncFinalize(data) => data, - _ => unimplemented!(), - }; - let accumulator = self.exchange.accumulate_vote_2( self.accumulator.left().unwrap(), &vote, diff --git a/types/src/certificate.rs b/types/src/certificate.rs index ea0f801345..9792026dcf 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -171,7 +171,7 @@ impl> QuorumVote::Yes(vote_internal) | QuorumVote::No(vote_internal) => { vote_internal.leaf_commitment } - _ => unimplemented!(), + QuorumVote::Timeout(_) => unimplemented!(), }; let qc = QuorumCertificate { leaf_commitment, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index e76be94349..37f0be4753 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -191,17 +191,6 @@ where /// `Accumulator` type to accumulate votes. type VoteAccumulator: Accumulator2; - /// Accumulates votes given an accumulator, vote, and commit. - /// Returns either the accumulator or a certificate - fn accumulate_vote( - self, - accumulator: Self::VoteAccumulator, - vote: Self::Vote, - commit: COMMITTABLE, - ) -> Either { - todo!() - } - /// Build a QC from the threshold signature and commitment // TODO ED Rename this function and rework this function parameters // Assumes last vote was valid since it caused a QC to form. @@ -444,14 +433,11 @@ pub trait ConsensusExchange: Send + Sync { data: &VoteData, vote_token: &Checked, ) -> bool { - let mut is_valid_vote_token = false; - let mut is_valid_signature = false; - - is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); + let is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); let valid_vote_token = self .membership() .validate_vote_token(key.clone(), vote_token.clone()); - is_valid_vote_token = match valid_vote_token { + let is_valid_vote_token = match valid_vote_token { Err(_) => { error!("Vote token was invalid"); false @@ -464,10 +450,11 @@ pub trait ConsensusExchange: Send + Sync { } #[doc(hidden)] + fn accumulate_internal( &self, - vota_meta: VoteMetaData, - accumulator: VoteAccumulator, + _vota_meta: VoteMetaData, + _accumulator: VoteAccumulator, ) -> Either, Self::Certificate> { todo!() // TODO ED Remove this function } @@ -487,10 +474,11 @@ pub trait ConsensusExchange: Send + Sync { relay: Option, ) -> Either, Self::Certificate>; - // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the SignedCertificate + // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the `SignedCertificate` // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. /// Accumulate vote - /// Returns either the accumulate if no threshold was reached, or a SignedCertificate if the threshold was reached + /// Returns either the accumulate if no threshold was reached, or a `SignedCertificate` if the threshold was reached + #[allow(clippy::type_complexity)] fn accumulate_vote_2( &self, accumulator: <>::Certificate as SignedCertificate< diff --git a/types/src/vote.rs b/types/src/vote.rs index 70bb95d8b1..a0ae42c926 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -42,7 +42,7 @@ pub trait VoteType EncodedSignature; /// Get the data this vote was signed over fn get_data(&self) -> VoteData; - // Get the vote token of this vote + /// Get the vote token of this vote fn get_vote_token(&self) -> TYPES::VoteTokenType; } @@ -245,13 +245,13 @@ impl> VoteType fn get_data(&self) -> VoteData { match self { QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_data.clone(), - QuorumVote::Timeout(v) => unimplemented!(), + QuorumVote::Timeout(_) => unimplemented!(), } } fn get_vote_token(&self) -> ::VoteTokenType { match self { QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_token.clone(), - QuorumVote::Timeout(v) => unimplemented!(), + QuorumVote::Timeout(_) => unimplemented!(), } } } @@ -337,6 +337,7 @@ pub trait Accumulator2< ) -> Either>; } +/// Accumulates DA votes pub struct DAVoteAccumulator< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, @@ -350,7 +351,7 @@ pub struct DAVoteAccumulator< pub sig_lists: Vec<::Signature>, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check pub signers: BitVec, - + /// Phantom data to specify the vote this accumulator is for pub phantom: PhantomData, } @@ -360,17 +361,14 @@ impl< VOTE: VoteType, > Accumulator2 for DAVoteAccumulator { - // TODO ED We could make this the default impl, so it works for both TC and DAC fn append( mut self, vote: VOTE, vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>, ) -> Either> { - // TODO ED Make this a function on VoteType trait - let vote_commitment = match vote.get_data() { - VoteData::DA(commitment) => commitment, - _ => return Either::Left(self), + let VoteData::DA(vote_commitment) = vote.get_data() else { + return Either::Left(self); }; let encoded_key = vote.get_key().to_bytes(); @@ -430,27 +428,28 @@ impl< } } -// TODO ED Should make these fields a trait for Accumulator, like success threshold, etc. +/// Accumulate quorum votes pub struct QuorumVoteAccumulator< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, VOTE: VoteType, > { - /// Map of all da signatures accumlated so far + /// Map of all signatures accumlated so far pub total_vote_outcomes: VoteMap, + /// Map of all yes signatures accumlated so far pub yes_vote_outcomes: VoteMap, - + /// Map of all no signatures accumlated so far pub no_vote_outcomes: VoteMap, /// A quorum's worth of stake, generally 2f + 1 pub success_threshold: NonZeroU64, - + /// A failure threshold, generally f + 1 pub failure_threshold: NonZeroU64, /// A list of valid signatures for certificate aggregation pub sig_lists: Vec<::Signature>, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check pub signers: BitVec, - + /// Phantom data to ensure this struct is over a specific `VoteType` implementation pub phantom: PhantomData, } @@ -466,10 +465,9 @@ impl< vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>, ) -> Either> { - let vote_commitment = match vote.get_data() { - VoteData::Yes(commitment) | VoteData::No(commitment) => commitment, - - _ => return Either::Left(self), + let (VoteData::Yes(vote_commitment) | VoteData::No(vote_commitment)) = vote.get_data() + else { + return Either::Left(self); }; let encoded_key = vote.get_key().to_bytes(); @@ -561,6 +559,7 @@ impl< } } +/// Accumulates view sync votes pub struct ViewSyncVoteAccumulator< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, @@ -568,18 +567,20 @@ pub struct ViewSyncVoteAccumulator< > { /// Map of all pre_commit signatures accumlated so far pub pre_commit_vote_outcomes: VoteMap, + /// Map of all ommit signatures accumlated so far pub commit_vote_outcomes: VoteMap, + /// Map of all finalize signatures accumlated so far pub finalize_vote_outcomes: VoteMap, /// A quorum's worth of stake, generally 2f + 1 pub success_threshold: NonZeroU64, - + /// A quorum's failure threshold, generally f + 1 pub failure_threshold: NonZeroU64, /// A list of valid signatures for certificate aggregation pub sig_lists: Vec<::Signature>, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check pub signers: BitVec, - + /// Phantom data since we want the accumulator to be attached to a single `VoteType` pub phantom: PhantomData, } @@ -750,11 +751,11 @@ impl< { fn append( self, - vote: VOTE, - vote_node_id: usize, - stake_table_entries: Vec<::StakeTableEntry>, + _vote: VOTE, + _vote_node_id: usize, + _stake_table_entries: Vec<::StakeTableEntry>, ) -> Either> { - todo!() + either::Left(self) } } From 326db1b2e1d3cfaf6b596096cdd0ac321f80590a Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Sep 2023 20:20:27 -0400 Subject: [PATCH 0086/1393] Update VID --- task-impls/src/da.rs | 225 ++++++++++++++++++++----------------------- 1 file changed, 106 insertions(+), 119 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 3bcb699be8..6d5d58c5a1 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -17,6 +17,7 @@ use hotshot_task::{ }; use hotshot_types::traits::election::SignedCertificate; use hotshot_types::vote::DAVoteAccumulator; +use hotshot_types::vote::VoteType; use hotshot_types::{ certificate::DACertificate, consensus::{Consensus, View}, @@ -197,59 +198,46 @@ where } } } - SequencingHotShotEvent::VidVoteRecv(_vote) => { - // TODO ED Make accumulator for VID + SequencingHotShotEvent::VidVoteRecv(vote) => { // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 - // debug!("VID vote recv, collection task {:?}", vote.current_view); - // // panic!("Vote handle received DA vote for view {}", *vote.current_view); - - // let accumulator = match state.accumulator { - // Right(_) => { - // // For the case where we receive votes after we've made a certificate - // debug!("VID accumulator finished view: {:?}", state.cur_view); - // return (None, state); - // } - // Left(a) => a, - // }; - // match state.committee_exchange.accumulate_vote( - // &vote.signature.0, - // &vote.signature.1, - // vote.block_commitment, - // vote.vote_data, - // vote.vote_token.clone(), - // state.cur_view, - // accumulator, - // None, - // ) { - // Left(acc) => { - // state.accumulator = Either::Left(acc); - // // debug!("Not enough VID votes! "); - // return (None, state); - // } - // Right(vid_cert) => { - // debug!("Sending VID cert! {:?}", vid_cert.view_number); - // state - // .event_stream - // .publish(SequencingHotShotEvent::VidCertSend( - // vid_cert.clone(), - // state.committee_exchange.public_key().clone(), - // )) - // .await; - - // state.accumulator = Right(vid_cert.clone()); - // state - // .committee_exchange - // .network() - // .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - // *vid_cert.view_number, - // )) - // .await; - - // // Return completed at this point - // return (Some(HotShotTaskCompleted::ShutDown), state); - // } - // } + debug!("VID vote recv, collection task {:?}", vote.get_view()); + // panic!("Vote handle received DA vote for view {}", *vote.current_view); + + let accumulator = state.accumulator.left().unwrap(); + + match state.committee_exchange.accumulate_vote_2( + accumulator, + &vote, + &vote.block_commitment, + ) { + Left(new_accumulator) => { + state.accumulator = either::Left(new_accumulator); + } + + Right(vid_cert) => { + debug!("Sending VID cert! {:?}", vid_cert.view_number); + state + .event_stream + .publish(SequencingHotShotEvent::VidCertSend( + vid_cert.clone(), + state.committee_exchange.public_key().clone(), + )) + .await; + + state.accumulator = Right(vid_cert.clone()); + state + .committee_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *vid_cert.view_number, + )) + .await; + + // Return completed at this point + return (Some(HotShotTaskCompleted::ShutDown), state); + } + } } SequencingHotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), _ => { @@ -543,81 +531,80 @@ where .await; }; } - SequencingHotShotEvent::VidDisperseRecv(_disperse, _sender) => { - // TODO ED Make accumulator for this + SequencingHotShotEvent::VidDisperseRecv(disperse, sender) => { // TODO copy-pasted from DAProposalRecv https://github.com/EspressoSystems/HotShot/issues/1690 - // debug!( - // "VID disperse received for view: {:?}", - // disperse.data.get_view_number() - // ); + debug!( + "VID disperse received for view: {:?}", + disperse.data.get_view_number() + ); - // // ED NOTE: Assuming that the next view leader is the one who sends DA proposal for this view - // let view = disperse.data.get_view_number(); + // ED NOTE: Assuming that the next view leader is the one who sends DA proposal for this view + let view = disperse.data.get_view_number(); - // // Allow a DA proposal that is one view older, in case we have voted on a quorum - // // proposal and updated the view. - // // `self.cur_view` should be at least 1 since there is a view change before getting - // // the `DAProposalRecv` event. Otherewise, the view number subtraction below will - // // cause an overflow error. - // if view < self.cur_view - 1 { - // warn!("Throwing away VID disperse data that is more than one view older"); - // return None; - // } + // Allow a DA proposal that is one view older, in case we have voted on a quorum + // proposal and updated the view. + // `self.cur_view` should be at least 1 since there is a view change before getting + // the `DAProposalRecv` event. Otherewise, the view number subtraction below will + // cause an overflow error. + if view < self.cur_view - 1 { + warn!("Throwing away VID disperse data that is more than one view older"); + return None; + } - // debug!("VID disperse data is fresh."); - // let block_commitment = disperse.data.commitment; + debug!("VID disperse data is fresh."); + let block_commitment = disperse.data.commitment; - // // ED Is this the right leader? - // let view_leader_key = self.committee_exchange.get_leader(view); - // if view_leader_key != sender { - // error!("VID proposal doesn't have expected leader key for view {} \n DA proposal is: [N/A for VID]", *view); - // return None; - // } + // ED Is this the right leader? + let view_leader_key = self.committee_exchange.get_leader(view); + if view_leader_key != sender { + error!("VID proposal doesn't have expected leader key for view {} \n DA proposal is: [N/A for VID]", *view); + return None; + } - // if !view_leader_key.validate(&disperse.signature, block_commitment.as_ref()) { - // error!("Could not verify VID proposal sig."); - // return None; - // } + if !view_leader_key.validate(&disperse.signature, block_commitment.as_ref()) { + error!("Could not verify VID proposal sig."); + return None; + } - // let vote_token = self.committee_exchange.make_vote_token(view); - // match vote_token { - // Err(e) => { - // error!("Failed to generate vote token for {:?} {:?}", view, e); - // } - // Ok(None) => { - // debug!("We were not chosen for VID quorum on {:?}", view); - // } - // Ok(Some(vote_token)) => { - // // Generate and send vote - // let vote = self.committee_exchange.create_vid_message( - // block_commitment, - // view, - // vote_token, - // ); - - // // ED Don't think this is necessary? - // // self.cur_view = view; - - // debug!("Sending vote to the VID leader {:?}", vote.current_view); - // self.event_stream - // .publish(SequencingHotShotEvent::VidVoteSend(vote)) - // .await; - // let mut consensus = self.consensus.write().await; - - // // Ensure this view is in the view map for garbage collection, but do not overwrite if - // // there is already a view there: the replica task may have inserted a `Leaf` view which - // // contains strictly more information. - // consensus.state_map.entry(view).or_insert(View { - // view_inner: ViewInner::DA { - // block: block_commitment, - // }, - // }); - - // // Record the block we have promised to make available. - // // TODO https://github.com/EspressoSystems/HotShot/issues/1692 - // // consensus.saved_blocks.insert(proposal.data.deltas); - // } - // } + let vote_token = self.committee_exchange.make_vote_token(view); + match vote_token { + Err(e) => { + error!("Failed to generate vote token for {:?} {:?}", view, e); + } + Ok(None) => { + debug!("We were not chosen for VID quorum on {:?}", view); + } + Ok(Some(vote_token)) => { + // Generate and send vote + let vote = self.committee_exchange.create_vid_message( + block_commitment, + view, + vote_token, + ); + + // ED Don't think this is necessary? + // self.cur_view = view; + + debug!("Sending vote to the VID leader {:?}", vote.current_view); + self.event_stream + .publish(SequencingHotShotEvent::VidVoteSend(vote)) + .await; + let mut consensus = self.consensus.write().await; + + // Ensure this view is in the view map for garbage collection, but do not overwrite if + // there is already a view there: the replica task may have inserted a `Leaf` view which + // contains strictly more information. + consensus.state_map.entry(view).or_insert(View { + view_inner: ViewInner::DA { + block: block_commitment, + }, + }); + + // Record the block we have promised to make available. + // TODO https://github.com/EspressoSystems/HotShot/issues/1692 + // consensus.saved_blocks.insert(proposal.data.deltas); + } + } } SequencingHotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { From 17009ee049a4130de579c161c92ff4c0f269be9c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 14 Sep 2023 21:23:08 -0700 Subject: [PATCH 0087/1393] create PureAssembledSignatureType in SignatureKey --- hotshot-signature-key/src/bn254/bn254_pub.rs | 7 ++++--- types/src/traits/signature_key.rs | 15 ++++++++++++++- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 714ee98687..2051244371 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -50,11 +50,12 @@ impl SignatureKey for BN254Pub { ::VerificationKey, ::PublicParameter, >; + type SignatureKeySignatureScheme = BLSOverBN254CurveSignatureScheme; + type PureAssembledSignatureType = ::Signature; type QCType = ( - ::Signature, + Self::PureAssembledSignatureType, BitVec, ); - // as AssembledQuorumCertificate>::QC; #[instrument(skip(self))] fn validate(&self, signature: &EncodedSignature, data: &[u8]) -> bool { @@ -159,7 +160,7 @@ impl SignatureKey for BN254Pub { fn get_sig_proof( signature: &Self::QCType, ) -> ( - ::Signature, + Self::PureAssembledSignatureType, BitVec, ) { signature.clone() diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index d8f79a080e..7feac165e6 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -70,6 +70,19 @@ pub trait SignatureKey: + for<'a> Deserialize<'a>; /// The type of the quorum certificate parameters used for assembled signature type QCParams: Send + Sync + Sized + Clone + Debug + Hash; + /// An aggregateable signature scheme used from jellyfish + type SignatureKeySignatureScheme: Send + Sync + Sized + Debug; + /// The type of the assembled signature, without `BitVec` + type PureAssembledSignatureType: Send + + Sync + + Sized + + Clone + + Debug + + Hash + + PartialEq + + Eq + + Serialize + + for<'a> Deserialize<'a>; /// The type of the assembled qc: assembled signature + `BitVec` type QCType: Send + Sync @@ -117,7 +130,7 @@ pub trait SignatureKey: fn get_sig_proof( signature: &Self::QCType, ) -> ( - ::Signature, + Self::PureAssembledSignatureType, BitVec, ); From a532fe38143af1eacf6e1175979342572896f86c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 14 Sep 2023 22:20:56 -0700 Subject: [PATCH 0088/1393] formalize all the types so that SignatureKey trait doesn't have any BN254 specific parameters --- hotshot-signature-key/src/bn254/bn254_pub.rs | 3 +-- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 2 +- task-impls/src/view_sync.rs | 2 +- types/src/traits/election.rs | 20 ++++++++++---------- types/src/traits/signature_key.rs | 7 +------ types/src/vote.rs | 11 ++++------- 7 files changed, 19 insertions(+), 28 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 2051244371..30f022209c 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -50,7 +50,6 @@ impl SignatureKey for BN254Pub { ::VerificationKey, ::PublicParameter, >; - type SignatureKeySignatureScheme = BLSOverBN254CurveSignatureScheme; type PureAssembledSignatureType = ::Signature; type QCType = ( Self::PureAssembledSignatureType, @@ -169,7 +168,7 @@ impl SignatureKey for BN254Pub { fn assemble( real_qc_pp: &Self::QCParams, signers: &BitSlice, - sigs: &[::Signature], + sigs: &[Self::PureAssembledSignatureType], ) -> Self::QCType { BitVectorQC::::assemble(real_qc_pp, signers, sigs) .expect("this assembling shouldn't fail") diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f499c5f248..38eb0a0622 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -150,7 +150,7 @@ pub struct VoteCollectionTaskState< #[allow(clippy::type_complexity)] /// Accumulator for votes pub accumulator: - Either, QuorumCertificate>, + Either, QuorumCertificate>, /// View which this vote collection task is collecting votes in pub cur_view: TYPES::Time, /// The event stream shared by all tasks diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index e1dc74f349..fa451ea947 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -103,7 +103,7 @@ pub struct DAVoteCollectionTaskState< pub committee_exchange: Arc>, /// the vote accumulator pub accumulator: - Either, DACertificate>, + Either, DACertificate>, // TODO ED Make this just "view" since it is only for this task /// the current view pub cur_view: TYPES::Time, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 18c8ae581f..3c3f2dad55 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -217,7 +217,7 @@ pub struct ViewSyncRelayTaskState< pub exchange: Arc>, /// Vote accumulator pub accumulator: Either< - VoteAccumulator>, + VoteAccumulator, TYPES>, ViewSyncCertificate, >, /// Our node id; for logging diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 771a1f1821..56977b6def 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -418,8 +418,8 @@ pub trait ConsensusExchange: Send + Sync { fn accumulate_internal( &self, vota_meta: VoteMetaData, - accumulator: VoteAccumulator, - ) -> Either, Self::Certificate> { + accumulator: VoteAccumulator, + ) -> Either, Self::Certificate> { if !self.is_valid_vote( &vota_meta.encoded_key, &vota_meta.encoded_signature, @@ -480,9 +480,9 @@ pub trait ConsensusExchange: Send + Sync { vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, - accumlator: VoteAccumulator, + accumlator: VoteAccumulator, relay: Option, - ) -> Either, Self::Certificate>; + ) -> Either, Self::Certificate>; /// The committee which votes on proposals. fn membership(&self) -> &Self::Membership; @@ -696,9 +696,9 @@ impl< vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, - accumlator: VoteAccumulator, + accumlator: VoteAccumulator, _relay: Option, - ) -> Either, Self::Certificate> { + ) -> Either, Self::Certificate> { let meta = VoteMetaData { encoded_key: encoded_key.clone(), encoded_signature: encoded_signature.clone(), @@ -1008,9 +1008,9 @@ impl< vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, - accumlator: VoteAccumulator, + accumlator: VoteAccumulator, _relay: Option, - ) -> Either, Self::Certificate> { + ) -> Either, Self::Certificate> { let meta = VoteMetaData { encoded_key: encoded_key.clone(), encoded_signature: encoded_signature.clone(), @@ -1363,9 +1363,9 @@ impl< vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, - accumlator: VoteAccumulator>, + accumlator: VoteAccumulator, TYPES>, relay: Option, - ) -> Either>, Self::Certificate> { + ) -> Either, TYPES>, Self::Certificate> { let meta = VoteMetaData { encoded_key: encoded_key.clone(), encoded_signature: encoded_signature.clone(), diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 7feac165e6..5fa56609f3 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -3,9 +3,6 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, Serializatio use bitvec::prelude::*; use espresso_systems_common::hotshot::tag; use ethereum_types::U256; -use jf_primitives::signatures::{ - bls_over_bn254::BLSOverBN254CurveSignatureScheme, SignatureScheme, -}; use serde::{Deserialize, Serialize}; use std::{fmt::Debug, hash::Hash}; use tagged_base64::tagged; @@ -70,8 +67,6 @@ pub trait SignatureKey: + for<'a> Deserialize<'a>; /// The type of the quorum certificate parameters used for assembled signature type QCParams: Send + Sync + Sized + Clone + Debug + Hash; - /// An aggregateable signature scheme used from jellyfish - type SignatureKeySignatureScheme: Send + Sync + Sized + Debug; /// The type of the assembled signature, without `BitVec` type PureAssembledSignatureType: Send + Sync @@ -138,6 +133,6 @@ pub trait SignatureKey: fn assemble( real_qc_pp: &Self::QCParams, signers: &BitSlice, - sigs: &[::Signature], + sigs: &[Self::PureAssembledSignatureType], ) -> Self::QCType; } diff --git a/types/src/vote.rs b/types/src/vote.rs index cdbb8cb03f..ecdd8ce494 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -18,9 +18,6 @@ use commit::{Commitment, Committable}; use either::Either; use ethereum_types::U256; use hotshot_utils::bincode::bincode_opts; -use jf_primitives::signatures::{ - bls_over_bn254::BLSOverBN254CurveSignatureScheme, SignatureScheme, -}; use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, HashMap}, @@ -266,7 +263,7 @@ type VoteMap = HashMap< /// Describe the process of collecting signatures on block or leaf commitment, to form a DAC or QC, /// respectively. -pub struct VoteAccumulator { +pub struct VoteAccumulator { /// Map of all signatures accumlated so far pub total_vote_outcomes: VoteMap, /// Map of all da signatures accumlated so far @@ -286,7 +283,7 @@ pub struct VoteAccumulator { /// Enough stake to know that we cannot possibly get a quorum, generally f + 1 pub failure_threshold: NonZeroU64, /// A list of valid signatures for certificate aggregation - pub sig_lists: Vec<::Signature>, + pub sig_lists: Vec<::PureAssembledSignatureType>, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check pub signers: BitVec, } @@ -307,7 +304,7 @@ impl ), ), AssembledSignature, - > for VoteAccumulator + > for VoteAccumulator where TOKEN: Clone + VoteToken, { @@ -331,7 +328,7 @@ where let (commitment, (key, (sig, entries, node_id, vote_data, token))) = val; // Desereialize the sig so that it can be assembeld into a QC - let origianl_sig: ::Signature = + let origianl_sig: ::PureAssembledSignatureType = bincode_opts() .deserialize(&sig.0) .expect("Deserialization on the signature shouldn't be able to fail."); From 40688bcc5ea88197f8bd46aa683a995ad20e8541 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 15 Sep 2023 00:26:43 -0700 Subject: [PATCH 0089/1393] clean up BLSOverBN254CurveSignatureScheme so that it only appears in bit_vector code and bn254_pub code --- types/src/traits/signature_key.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 5fa56609f3..4f115dbbe3 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -21,7 +21,7 @@ use tagged_base64::tagged; Ord, )] pub struct EncodedPublicKey( - #[debug(with = "custom_debug::hexbuf")] pub Vec, // pub ::VerificationKey + #[debug(with = "custom_debug::hexbuf")] pub Vec, ); /// Type saftey wrapper for byte encoded signature @@ -29,7 +29,7 @@ pub struct EncodedPublicKey( Clone, custom_debug::Debug, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, )] pub struct EncodedSignature( - #[debug(with = "custom_debug::hexbuf")] pub Vec, // pub ::Signature + #[debug(with = "custom_debug::hexbuf")] pub Vec, ); impl AsRef<[u8]> for EncodedSignature { From f12cc7f1f3d46f6ce950f3dd41bbc847da99b974 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 15 Sep 2023 10:58:29 -0400 Subject: [PATCH 0090/1393] address comments --- hotshot/src/tasks/mod.rs | 2 +- task-impls/src/transactions.rs | 4 ++-- testing/tests/network_task.rs | 5 +++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 5e406b29e1..04ba43f74a 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -432,7 +432,7 @@ where Commitment = TYPES::BlockType, >, { - // build the da task + // build the transactions task let c_api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { inner: handle.hotshot.inner.clone(), }; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index bc4f359f55..c283291683 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -33,7 +33,7 @@ use tracing::{debug, error, instrument, warn}; /// Error type for consensus tasks pub struct ConsensusTaskError {} -/// Tracks state of a DA task +/// Tracks state of a Transaction task pub struct TransactionTaskState< TYPES: NodeType, I: NodeImplementation< @@ -310,7 +310,7 @@ where matches!( event, SequencingHotShotEvent::TransactionsRecv(_) - | SequencingHotShotEvent::DAVoteRecv(_) + | SequencingHotShotEvent::LeafDecided(_) | SequencingHotShotEvent::Shutdown | SequencingHotShotEvent::ViewChange(_) ) diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 360cf66feb..b28404f835 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -110,13 +110,14 @@ async fn test_network_task() { SequencingHotShotEvent::VidDisperseSend(da_vid_disperse, pub_key), 2, // 2 occurrences: 1 from `input`, 1 from the DA task ); + output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(1)), 1); + output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(2)), 1); + // Only one output from the input. // The consensus task will fail to send a second proposal, like the DA task does, due to the // view number check in `publish_proposal_if_able` in consensus.rs, and we will see an error in // logging, but that is fine for testing as long as the network task is correctly handling // events. - output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(1)), 1); - output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(2)), 1); output.insert( SequencingHotShotEvent::QuorumProposalSend(quorum_proposal.clone(), pub_key), 1, From adc19a06e9c78eda8365ebfeffb90a729f353652 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 15 Sep 2023 12:13:04 -0700 Subject: [PATCH 0091/1393] Remove vdemo, rename and move sdemo, extract block impl from demo --- .../examples/web-server-da/multi-validator.rs | 2 +- .../web-server-da/multi-web-server.rs | 2 +- .../examples/web-server-da/orchestrator.rs | 2 +- hotshot/examples/web-server-da/types.rs | 2 +- hotshot/examples/web-server-da/validator.rs | 2 +- hotshot/examples/web-server-da/web-server.rs | 2 +- hotshot/src/block_impl.rs | 174 +++++ hotshot/src/{demos/sdemo.rs => demo.rs} | 200 +----- hotshot/src/demos.rs | 8 - hotshot/src/demos/vdemo.rs | 607 ------------------ hotshot/src/lib.rs | 5 +- testing/src/node_types.rs | 11 +- testing/tests/da_task.rs | 4 +- testing/tests/network_task.rs | 4 +- 14 files changed, 207 insertions(+), 818 deletions(-) create mode 100644 hotshot/src/block_impl.rs rename hotshot/src/{demos/sdemo.rs => demo.rs} (52%) delete mode 100644 hotshot/src/demos.rs delete mode 100644 hotshot/src/demos/vdemo.rs diff --git a/hotshot/examples/web-server-da/multi-validator.rs b/hotshot/examples/web-server-da/multi-validator.rs index 8a0f53c1c9..a16f2951ed 100644 --- a/hotshot/examples/web-server-da/multi-validator.rs +++ b/hotshot/examples/web-server-da/multi-validator.rs @@ -3,7 +3,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::SDemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use std::net::IpAddr; use tracing::instrument; diff --git a/hotshot/examples/web-server-da/multi-web-server.rs b/hotshot/examples/web-server-da/multi-web-server.rs index c41c9c0b03..39a9b7a87e 100644 --- a/hotshot/examples/web-server-da/multi-web-server.rs +++ b/hotshot/examples/web-server-da/multi-web-server.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use async_compatibility_layer::{art::async_spawn, channel::oneshot}; use clap::Parser; -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::SDemoTypes; use tracing::error; #[derive(Parser, Debug)] diff --git a/hotshot/examples/web-server-da/orchestrator.rs b/hotshot/examples/web-server-da/orchestrator.rs index 772d3aa12e..785daeecb1 100644 --- a/hotshot/examples/web-server-da/orchestrator.rs +++ b/hotshot/examples/web-server-da/orchestrator.rs @@ -2,7 +2,7 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::SDemoTypes; use tracing::instrument; use types::ThisMembership; diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index 6827a00fe8..4b4905062e 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -1,6 +1,6 @@ use crate::infra_da::WebServerDARun; use hotshot::{ - demos::sdemo::SDemoTypes, + demo::SDemoTypes, traits::{ election::static_committee::GeneralStaticCommittee, implementations::{MemoryStorage, WebCommChannel}, diff --git a/hotshot/examples/web-server-da/validator.rs b/hotshot/examples/web-server-da/validator.rs index ec2415fd65..caf46bdc74 100644 --- a/hotshot/examples/web-server-da/validator.rs +++ b/hotshot/examples/web-server-da/validator.rs @@ -1,6 +1,6 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::SDemoTypes; use tracing::{info, instrument}; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; diff --git a/hotshot/examples/web-server-da/web-server.rs b/hotshot/examples/web-server-da/web-server.rs index 99d0b12f63..bbf0bbc691 100644 --- a/hotshot/examples/web-server-da/web-server.rs +++ b/hotshot/examples/web-server-da/web-server.rs @@ -1,4 +1,4 @@ -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::SDemoTypes; use std::sync::Arc; use async_compatibility_layer::{ diff --git a/hotshot/src/block_impl.rs b/hotshot/src/block_impl.rs new file mode 100644 index 0000000000..d3e4755129 --- /dev/null +++ b/hotshot/src/block_impl.rs @@ -0,0 +1,174 @@ +//! This module provides an implementation of the `HotShot` suite of traits. +use std::{ + collections::HashSet, + fmt::{Debug, Display}, + ops::Deref, +}; + +use commit::{Commitment, Committable}; +use hotshot_types::traits::{block_contents::Transaction, state::TestableBlock, BlockPayload}; +use serde::{Deserialize, Serialize}; +use snafu::Snafu; + +/// The transaction in a [`VIDBlockPayload`]. +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct VIDTransaction { + /// identifier for the transaction + pub id: u64, + /// padding to add to txn (to make it larger and thereby more realistic) + pub padding: Vec, +} + +impl Deref for VIDTransaction { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.id + } +} + +impl Committable for VIDTransaction { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("SDemo Txn Comm") + .u64_field("id", self.id) + .finalize() + } + + fn tag() -> String { + "SEQUENCING_DEMO_TXN".to_string() + } +} + +impl Transaction for VIDTransaction {} + +impl VIDTransaction { + /// create a new transaction + #[must_use] + pub fn new(id: u64) -> Self { + Self { + id, + padding: vec![], + } + } +} + +/// The error type for block payload. +#[derive(Snafu, Debug)] +pub enum BlockPayloadError { + /// Previous state commitment does not match + PreviousStateMismatch, + /// Nonce was reused + ReusedTxn, + /// Genesis failure + GenesisFailed, + /// Genesis reencountered after initialization + GenesisAfterStart, + /// no transasctions added to genesis + GenesisCantHaveTransactions, + /// invalid block + InvalidBlock, +} + +/// genesis block +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct GenesisBlockPayload {} + +/// Any block after genesis +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct NormalBlockPayload { + /// [`BlockPayload`] state commitment + pub previous_state: (), + /// [`VIDTransaction`] vector + pub transactions: Vec, +} + +/// The block for the sequencing demo +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub enum VIDBlockPayload { + /// genesis block payload + Genesis(GenesisBlockPayload), + /// normal block payload + Normal(NormalBlockPayload), +} + +impl Committable for VIDBlockPayload { + fn commit(&self) -> Commitment { + match &self { + VIDBlockPayload::Genesis(_) => { + commit::RawCommitmentBuilder::new("Genesis Comm").finalize() + } + VIDBlockPayload::Normal(block) => { + let mut builder = commit::RawCommitmentBuilder::new("Normal Comm"); + for txn in &block.transactions { + builder = builder.u64_field("transaction", **txn); + } + builder.finalize() + } + } + } + + fn tag() -> String { + "VID_BLOCK_PAYLOAD".to_string() + } +} + +impl Display for VIDBlockPayload { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + VIDBlockPayload::Genesis(_) => { + write!(f, "Genesis BlockPayload") + } + VIDBlockPayload::Normal(block) => { + write!(f, "Normal BlockPayload #txns={}", block.transactions.len()) + } + } + } +} + +impl TestableBlock for VIDBlockPayload { + fn genesis() -> Self { + VIDBlockPayload::Genesis(GenesisBlockPayload {}) + } + + fn txn_count(&self) -> u64 { + match self { + VIDBlockPayload::Genesis(_) => 0, + VIDBlockPayload::Normal(n) => n.transactions.len() as u64, + } + } +} + +impl BlockPayload for VIDBlockPayload { + type Error = BlockPayloadError; + + type Transaction = VIDTransaction; + + fn new() -> Self { + ::genesis() + } + + fn add_transaction_raw( + &self, + tx: &Self::Transaction, + ) -> std::result::Result { + match self { + VIDBlockPayload::Genesis(_) => Err(BlockPayloadError::GenesisCantHaveTransactions), + VIDBlockPayload::Normal(n) => { + let mut new = n.clone(); + new.transactions.push(tx.clone()); + Ok(VIDBlockPayload::Normal(new)) + } + } + } + + fn contained_transactions(&self) -> HashSet> { + match self { + VIDBlockPayload::Genesis(_) => HashSet::new(), + VIDBlockPayload::Normal(n) => n + .transactions + .iter() + .map(commit::Committable::commit) + .collect(), + } + } +} diff --git a/hotshot/src/demos/sdemo.rs b/hotshot/src/demo.rs similarity index 52% rename from hotshot/src/demos/sdemo.rs rename to hotshot/src/demo.rs index 30d12bc579..f5b7361d89 100644 --- a/hotshot/src/demos/sdemo.rs +++ b/hotshot/src/demo.rs @@ -5,14 +5,10 @@ //! //! These implementations are useful in examples and integration testing, but are not suitable for //! production use. -use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; -use std::{ - collections::HashSet, - fmt::{Debug, Display}, - marker::PhantomData, - ops::Deref, +use crate::{ + block_impl::{BlockPayloadError, NormalBlockPayload, VIDBlockPayload, VIDTransaction}, + traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}, }; - use commit::{Commitment, Committable}; use derivative::Derivative; use either::Either; @@ -24,101 +20,15 @@ use hotshot_types::{ ViewNumber, }, traits::{ - block_contents::Transaction, election::Membership, node_implementation::NodeType, - state::{ConsensusTime, TestableBlock, TestableState}, + state::{ConsensusTime, TestableState}, BlockPayload, State, }, }; use rand::Rng; use serde::{Deserialize, Serialize}; -use snafu::Snafu; - -/// The transaction for the sequencing demo -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct SDemoTransaction { - /// identifier for the transaction - pub id: u64, - /// padding to add to txn (to make it larger and thereby more realistic) - pub padding: Vec, -} - -impl Deref for SDemoTransaction { - type Target = u64; - - fn deref(&self) -> &Self::Target { - &self.id - } -} - -impl Committable for SDemoTransaction { - fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("SDemo Txn Comm") - .u64_field("id", self.id) - .finalize() - } - - fn tag() -> String { - "SEQUENCING_DEMO_TXN".to_string() - } -} - -impl Transaction for SDemoTransaction {} - -impl SDemoTransaction { - /// create a new transaction - #[must_use] - pub fn new(id: u64) -> Self { - Self { - id, - padding: vec![], - } - } -} - -/// genesis block -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct SDemoGenesisBlock {} - -/// Any block after genesis -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct SDemoNormalBlock { - /// BlockPayload state commitment - pub previous_state: (), - /// Transaction vector - pub transactions: Vec, -} - -/// The block for the sequencing demo -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub enum SDemoBlock { - /// genesis block - Genesis(SDemoGenesisBlock), - /// normal block - Normal(SDemoNormalBlock), -} - -impl Committable for SDemoBlock { - fn commit(&self) -> Commitment { - match &self { - SDemoBlock::Genesis(_) => { - commit::RawCommitmentBuilder::new("SDemo Genesis Comm").finalize() - } - SDemoBlock::Normal(block) => { - let mut builder = commit::RawCommitmentBuilder::new("SDemo Normal Comm"); - for txn in &block.transactions { - builder = builder.u64_field("transaction", **txn); - } - builder.finalize() - } - } - } - - fn tag() -> String { - "SEQUENCING_DEMO_BLOCK".to_string() - } -} +use std::{fmt::Debug, marker::PhantomData}; /// sequencing demo entry state #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] @@ -155,97 +65,15 @@ impl Default for SDemoState { } } -/// The error type for the sequencing demo -#[derive(Snafu, Debug)] -pub enum SDemoError { - /// Previous state commitment does not match - PreviousStateMismatch, - /// Nonce was reused - ReusedTxn, - /// Genesis failure - GenesisFailed, - /// Genesis reencountered after initialization - GenesisAfterStart, - /// no transasctions added to genesis - GenesisCantHaveTransactions, - /// invalid block - InvalidBlock, -} - -impl Display for SDemoBlock { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - SDemoBlock::Genesis(_) => { - write!(f, "SDemo Genesis BlockPayload") - } - SDemoBlock::Normal(block) => { - write!( - f, - "SDemo Normal BlockPayload #txns={}", - block.transactions.len() - ) - } - } - } -} - -impl TestableBlock for SDemoBlock { - fn genesis() -> Self { - SDemoBlock::Genesis(SDemoGenesisBlock {}) - } - - fn txn_count(&self) -> u64 { - match self { - SDemoBlock::Genesis(_) => 0, - SDemoBlock::Normal(n) => n.transactions.len() as u64, - } - } -} - -impl BlockPayload for SDemoBlock { - type Error = SDemoError; - - type Transaction = SDemoTransaction; - - fn new() -> Self { - ::genesis() - } - - fn add_transaction_raw( - &self, - tx: &Self::Transaction, - ) -> std::result::Result { - match self { - SDemoBlock::Genesis(_) => Err(SDemoError::GenesisCantHaveTransactions), - SDemoBlock::Normal(n) => { - let mut new = n.clone(); - new.transactions.push(tx.clone()); - Ok(SDemoBlock::Normal(new)) - } - } - } - - fn contained_transactions(&self) -> HashSet> { - match self { - SDemoBlock::Genesis(_) => HashSet::new(), - SDemoBlock::Normal(n) => n - .transactions - .iter() - .map(commit::Committable::commit) - .collect(), - } - } -} - impl State for SDemoState { - type Error = SDemoError; + type Error = BlockPayloadError; - type BlockType = SDemoBlock; + type BlockType = VIDBlockPayload; type Time = ViewNumber; fn next_block(_state: Option) -> Self::BlockType { - SDemoBlock::Normal(SDemoNormalBlock { + VIDBlockPayload::Normal(NormalBlockPayload { previous_state: (), transactions: Vec::new(), }) @@ -253,10 +81,10 @@ impl State for SDemoState { fn validate_block(&self, block: &Self::BlockType, view_number: &Self::Time) -> bool { match block { - SDemoBlock::Genesis(_) => { + VIDBlockPayload::Genesis(_) => { view_number == &ViewNumber::genesis() && view_number == &self.view_number } - SDemoBlock::Normal(_n) => self.view_number < *view_number, + VIDBlockPayload::Normal(_n) => self.view_number < *view_number, } } @@ -266,7 +94,7 @@ impl State for SDemoState { view_number: &Self::Time, ) -> Result { if !self.validate_block(block, view_number) { - return Err(SDemoError::InvalidBlock); + return Err(BlockPayloadError::InvalidBlock); } Ok(SDemoState { @@ -285,7 +113,7 @@ impl TestableState for SDemoState { rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { - SDemoTransaction { + VIDTransaction { id: rng.gen_range(0..10), padding: vec![0; padding as usize], } @@ -309,10 +137,10 @@ pub struct SDemoTypes; impl NodeType for SDemoTypes { type Time = ViewNumber; - type BlockType = SDemoBlock; + type BlockType = VIDBlockPayload; type SignatureKey = BN254Pub; type VoteTokenType = StaticVoteToken; - type Transaction = SDemoTransaction; + type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = SDemoState; } diff --git a/hotshot/src/demos.rs b/hotshot/src/demos.rs deleted file mode 100644 index 7ddbef89c6..0000000000 --- a/hotshot/src/demos.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! Contains implementations of the `HotShot` traits used in the examples and integration testing. -//! -//! These implementations are not suitable for production use. - -/// this is a demo for sequencing consensus -pub mod sdemo; -/// this is a demo for validating consensus -pub mod vdemo; diff --git a/hotshot/src/demos/vdemo.rs b/hotshot/src/demos/vdemo.rs deleted file mode 100644 index 1d091c60b1..0000000000 --- a/hotshot/src/demos/vdemo.rs +++ /dev/null @@ -1,607 +0,0 @@ -// //! Validating (vanilla) consensus demo -// //! -// //! This module provides an implementation of the `HotShot` suite of traits that implements a -// //! basic demonstration of validating consensus. -// //! -// //! These implementations are useful in examples and integration testing, but are not suitable for -// //! production use. -// -// use crate::traits::{ -// election::static_committee::{StaticElectionConfig, StaticVoteToken}, -// Block, -// }; -// use commit::{Commitment, Committable}; -// use derivative::Derivative; -// -// use hotshot_types::{ -// certificate::{QuorumCertificate, YesNoSignature}, -// constants::genesis_proposer_id, -// data::{random_commitment, LeafType, ValidatingLeaf, ViewNumber}, -// traits::{ -// block_contents::Transaction, -// consensus_type::validating_consensus::ValidatingConsensus, -// election::Membership, -// node_implementation::NodeType, -// signature_key::ed25519::Ed25519Pub, -// state::{ConsensusTime, TestableBlock, TestableState}, -// State, -// }, -// }; -// -// use rand::Rng; -// use serde::{Deserialize, Serialize}; -// use snafu::{ensure, Snafu}; -// use std::{ -// collections::{BTreeMap, HashSet}, -// fmt::{Debug, Display}, -// marker::PhantomData, -// }; -// use tracing::error; -// -// /// The account identifier type used by the demo -// /// -// /// This is a type alias to [`String`] for simplicity. -// pub type Account = String; -// -// /// The account balance type used by the demo -// /// -// /// This is a type alias to [`u64`] for simplicity. -// pub type Balance = u64; -// -// /// Records a reduction in an account balance -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub struct Subtraction { -// /// An account identifier -// pub account: Account, -// /// An account balance -// pub amount: Balance, -// } -// -// /// Records an increase in an account balance -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub struct Addition { -// /// An account identifier -// pub account: Account, -// /// An account balance -// pub amount: Balance, -// } -// -// /// The error type for the validating demo -// #[derive(Snafu, Debug)] -// pub enum VDemoError { -// /// The subtraction and addition amounts for this transaction were not equal -// InconsistentTransaction, -// /// No such input account exists -// NoSuchInputAccount, -// /// No such output account exists -// NoSuchOutputAccount, -// /// Tried to move more money than was in the account -// InsufficentBalance, -// /// Previous state commitment does not match -// PreviousStateMismatch, -// /// Nonce was reused -// ReusedNonce, -// /// Genesis failure -// GenesisFailed, -// /// Genesis reencountered after initialization -// GenesisAfterStart, -// } -// -// /// The transaction for the validating demo -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub struct VDemoTransaction { -// /// An increment to an account balance -// pub add: Addition, -// /// A decrement to an account balance -// pub sub: Subtraction, -// /// The nonce for a transaction, no two transactions can have the same nonce -// pub nonce: u64, -// /// Number of bytes to pad to each transaction -// pub padding: Vec, -// } -// -// impl Transaction for VDemoTransaction {} -// -// impl VDemoTransaction { -// /// Ensures that this transaction is at least consistent with itself -// #[must_use] -// pub fn validate_independence(&self) -> bool { -// // Ensure that we are adding to one account exactly as much as we are subtracting from -// // another -// self.add.amount <= self.sub.amount // TODO why not strict equality? -// } -// } -// -// /// The state for the validating demo -// /// NOTE both fields are btrees because we need -// /// ordered-ing otherwise commitments will not match -// #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, Hash)] -// pub struct VDemoState { -// /// Key/value store of accounts and balances -// pub balances: BTreeMap, -// // /// Set of previously seen nonces -// // pub nonces: BTreeSet, -// } -// -// impl Committable for VDemoState { -// fn commit(&self) -> Commitment { -// let mut builder = commit::RawCommitmentBuilder::new("VDemo State Comm"); -// -// for (k, v) in &self.balances { -// builder = builder.u64_field(k, *v); -// } -// builder = builder.constant_str("nonces"); -// -// // for nonce in &self.nonces { -// // builder = builder.u64(*nonce); -// // } -// -// builder.finalize() -// } -// -// fn tag() -> String { -// "VALIDATING_DEMO_STATE".to_string() -// } -// } -// -// /// initializes the first state on genesis -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub struct VDemoGenesisBlock { -// /// initializes the first state -// pub accounts: BTreeMap, -// } -// -// /// Any block after genesis -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub struct VDemoNormalBlock { -// /// Block state commitment -// pub previous_state: Commitment, -// /// Transaction vector -// pub transactions: Vec, -// } -// -// /// The block for the validating demo -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub enum VDemoBlock { -// /// genesis block -// Genesis(VDemoGenesisBlock), -// /// normal block -// Normal(VDemoNormalBlock), -// } -// -// impl Committable for VDemoBlock { -// fn commit(&self) -> Commitment { -// match &self { -// VDemoBlock::Genesis(block) => { -// let mut builder = commit::RawCommitmentBuilder::new("VDemo Genesis Comm") -// .u64_field("account_count", block.accounts.len() as u64); -// for account in &block.accounts { -// builder = builder.u64_field(account.0, *account.1); -// } -// builder.finalize() -// } -// VDemoBlock::Normal(block) => { -// let mut builder = commit::RawCommitmentBuilder::new("VDemo Block Comm") -// .var_size_field("Previous State", block.previous_state.as_ref()); -// -// for txn in &block.transactions { -// builder = builder -// .u64_field(&txn.add.account, txn.add.amount) -// .u64_field(&txn.sub.account, txn.sub.amount) -// .constant_str("nonce") -// .u64_field("nonce", txn.nonce); -// } -// -// builder.finalize() -// } -// } -// } -// -// fn tag() -> String { -// "VALIDATING_DEMO_BLOCK".to_string() -// } -// } -// -// impl Committable for VDemoTransaction { -// fn commit(&self) -> Commitment { -// commit::RawCommitmentBuilder::new("VDemo Txn Comm") -// .u64_field(&self.add.account, self.add.amount) -// .u64_field(&self.sub.account, self.sub.amount) -// .constant_str("nonce") -// .u64_field("nonce", self.nonce) -// .finalize() -// } -// -// fn tag() -> String { -// "VALIDATING_DEMO_TXN".to_string() -// } -// } -// -// impl VDemoBlock { -// /// generate a genesis block with the provided initial accounts and balances -// #[must_use] -// pub fn genesis_from(accounts: BTreeMap) -> Self { -// Self::Genesis(VDemoGenesisBlock { accounts }) -// } -// } -// -// impl State for VDemoState { -// type Error = VDemoError; -// -// type BlockType = VDemoBlock; -// -// type Time = ViewNumber; -// -// #[allow(clippy::panic)] -// fn next_block(state: Option) -> Self::BlockType { -// match state { -// Some(state) => VDemoBlock::Normal(VDemoNormalBlock { -// previous_state: state.commit(), -// transactions: Vec::new(), -// }), -// None => panic!("State is required for the next block"), -// } -// } -// -// // Note: validate_block is actually somewhat redundant, its meant to be a quick and dirty check -// // for clarity, the logic is duplicated with append_to -// fn validate_block(&self, block: &Self::BlockType, _view_number: &Self::Time) -> bool { -// match block { -// VDemoBlock::Genesis(_) => self.balances.is_empty(), // && self.nonces.is_empty(), -// VDemoBlock::Normal(block) => { -// let state = self; -// // A valid block is one in which every transaction is internally consistent, and results in -// // nobody having a negative balance -// // -// // We will check this, in this case, by making a trial copy of our balances map, making -// // trial modifications to it, and then asserting that no balances are negative -// let mut trial_balances = state.balances.clone(); -// for tx in &block.transactions { -// // This is a macro from SNAFU that returns an Err if the condition is not satisfied -// // -// // We first check that the transaction is internally consistent, then apply the change -// // to our trial map -// if !tx.validate_independence() { -// error!("validate_independence failed"); -// return false; -// } -// // Find the input account, and subtract the transfer balance from it, failing if it -// // doesn't exist -// if let Some(input_account) = trial_balances.get_mut(&tx.sub.account) { -// *input_account -= tx.sub.amount; -// } else { -// error!("no such input account"); -// return false; -// } -// // Find the output account, and add the transfer balance to it, failing if it doesn't -// // exist -// if let Some(output_account) = trial_balances.get_mut(&tx.add.account) { -// *output_account += tx.add.amount; -// } else { -// error!("no such output account"); -// return false; -// } -// // // Check to make sure the nonce isn't used -// // if state.nonces.contains(&tx.nonce) { -// // warn!(?state, ?tx, "State nonce is used for transaction"); -// // return false; -// // } -// } -// // This block has now passed all our tests, and thus has not done anything bad, so the block -// // is valid if its previous state hash matches that of the previous state -// let result = block.previous_state == state.commit(); -// if !result { -// error!( -// "hash failure. previous_block: {:?} hash_state: {:?}", -// block.previous_state, -// state.commit() -// ); -// } -// result -// } -// } -// } -// -// fn append( -// &self, -// block: &Self::BlockType, -// _view_number: &Self::Time, -// ) -> std::result::Result { -// match block { -// VDemoBlock::Genesis(block) => { -// if self.balances.is_empty() { -// // && self.nonces.is_empty() -// let mut new_state = Self::default(); -// for account in &block.accounts { -// if new_state -// .balances -// .insert(account.0.clone(), *account.1) -// .is_some() -// { -// error!("Adding the same account twice during application of genesis block!"); -// return Err(VDemoError::GenesisFailed); -// } -// } -// Ok(new_state) -// } else { -// Err(VDemoError::GenesisAfterStart) -// } -// } -// VDemoBlock::Normal(block) => { -// let state = self; -// // A valid block is one in which every transaction is internally consistent, and results in -// // nobody having a negative balance -// // -// // We will check this, in this case, by making a trial copy of our balances map, making -// // trial modifications to it, and then asserting that no balances are negative -// let mut trial_balances = state.balances.clone(); -// for tx in &block.transactions { -// // This is a macro from SNAFU that returns an Err if the condition is not satisfied -// // -// // We first check that the transaction is internally consistent, then apply the change -// // to our trial map -// ensure!(tx.validate_independence(), InconsistentTransactionSnafu); -// // Find the input account, and subtract the transfer balance from it, failing if it -// // doesn't exist -// if let Some(input_account) = trial_balances.get_mut(&tx.sub.account) { -// *input_account -= tx.sub.amount; -// } else { -// return Err(VDemoError::NoSuchInputAccount); -// } -// // Find the output account, and add the transfer balance to it, failing if it doesn't -// // exist -// if let Some(output_account) = trial_balances.get_mut(&tx.add.account) { -// *output_account += tx.add.amount; -// } else { -// return Err(VDemoError::NoSuchOutputAccount); -// } -// // // Check for nonce reuse -// // if state.nonces.contains(&tx.nonce) { -// // return Err(VDemoError::ReusedNonce); -// // } -// } -// // Make sure our previous state commitment matches the provided state -// if block.previous_state == state.commit() { -// // This block has now passed all our tests, and thus has not done anything bad -// // Add the nonces from this block -// // let mut nonces = state.nonces.clone(); -// // for tx in &block.transactions { -// // nonces.insert(tx.nonce); -// // } -// Ok(VDemoState { -// balances: trial_balances, -// // nonces, -// }) -// } else { -// Err(VDemoError::PreviousStateMismatch) -// } -// } -// } -// } -// -// fn on_commit(&self) { -// // Does nothing in this implementation -// } -// } -// -// impl Display for VDemoBlock { -// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -// match self { -// VDemoBlock::Genesis(block) => { -// write!(f, "VDemo Genesis Block: {block:#?}") -// } -// VDemoBlock::Normal(block) => { -// write!(f, "VDemo Normal Block #txns={}", block.transactions.len()) -// } -// } -// } -// } -// -// impl TestableState for VDemoState { -// fn create_random_transaction( -// state: Option<&Self>, -// rng: &mut dyn rand::RngCore, -// padding: u64, -// ) -> ::Transaction { -// use rand::seq::IteratorRandom; -// -// let state = state.expect("Missing state"); -// -// let non_zero_balances = state -// .balances -// .iter() -// .filter(|b| *b.1 > 0) -// .collect::>(); -// -// assert!( -// !non_zero_balances.is_empty(), -// "No nonzero balances were available! Unable to generate transaction." -// ); -// -// let input_account = non_zero_balances.iter().choose(rng).unwrap().0; -// let output_account = state.balances.keys().choose(rng).unwrap(); -// let amount = rng.gen_range(0..100); -// -// VDemoTransaction { -// add: Addition { -// account: output_account.to_string(), -// amount, -// }, -// sub: Subtraction { -// account: input_account.to_string(), -// amount, -// }, -// nonce: rng.gen(), -// padding: vec![0; padding as usize], -// } -// } -// } -// -// impl TestableBlock for VDemoBlock { -// fn genesis() -> Self { -// let accounts: BTreeMap = vec![ -// ("Joe", 1_000_000), -// ("Nathan M", 500_000), -// ("John", 400_000), -// ("Nathan Y", 600_000), -// ("Ian", 5_000_000), -// ] -// .into_iter() -// .map(|(x, y)| (x.to_string(), y)) -// .collect(); -// Self::Genesis(VDemoGenesisBlock { accounts }) -// } -// -// fn txn_count(&self) -> u64 { -// if let VDemoBlock::Normal(block) = self { -// block.transactions.len() as u64 -// } else { -// 0 -// } -// } -// } -// -// impl Block for VDemoBlock { -// type Transaction = VDemoTransaction; -// -// type Error = VDemoError; -// -// fn new() -> Self { -// ::genesis() -// } -// -// fn add_transaction_raw( -// &self, -// tx: &Self::Transaction, -// ) -> std::result::Result { -// match self { -// VDemoBlock::Genesis(_) => Err(VDemoError::GenesisAfterStart), -// VDemoBlock::Normal(block) => { -// // first, make sure that the transaction is internally valid -// if tx.validate_independence() { -// // Then check the previous transactions in the block -// if block.transactions.iter().any(|x| x.nonce == tx.nonce) { -// return Err(VDemoError::ReusedNonce); -// } -// let mut new_block = block.clone(); -// // Insert our transaction and return -// new_block.transactions.push(tx.clone()); -// Ok(VDemoBlock::Normal(new_block)) -// } else { -// Err(VDemoError::InconsistentTransaction) -// } -// } -// } -// } -// fn contained_transactions(&self) -> HashSet> { -// match self { -// VDemoBlock::Genesis(_) => HashSet::new(), -// VDemoBlock::Normal(block) => block -// .transactions -// .clone() -// .into_iter() -// .map(|tx| tx.commit()) -// .collect(), -// } -// } -// } -// -// /// Implementation of [`NodeType`] for [`VDemoNode`] -// #[derive( -// Copy, -// Clone, -// Debug, -// Default, -// Hash, -// PartialEq, -// Eq, -// PartialOrd, -// Ord, -// serde::Serialize, -// serde::Deserialize, -// )] -// pub struct VDemoTypes; -// -// impl NodeType for VDemoTypes { -// type ConsensusType = ValidatingConsensus; -// type Time = ViewNumber; -// type BlockType = VDemoBlock; -// type SignatureKey = Ed25519Pub; -// type VoteTokenType = StaticVoteToken; -// type Transaction = VDemoTransaction; -// type ElectionConfigType = StaticElectionConfig; -// type StateType = VDemoState; -// } -// -// /// The node implementation for the validating demo -// #[derive(Derivative)] -// #[derivative(Clone(bound = ""))] -// pub struct VDemoNode(PhantomData) -// where -// MEMBERSHIP: Membership + Debug; -// -// impl VDemoNode -// where -// MEMBERSHIP: Membership + Debug, -// { -// /// Create a new `VDemoNode` -// #[must_use] -// pub fn new() -> Self { -// VDemoNode(PhantomData) -// } -// } -// -// impl Debug for VDemoNode -// where -// MEMBERSHIP: Membership + Debug, -// { -// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -// f.debug_struct("VDemoNode") -// .field("_phantom", &"phantom") -// .finish() -// } -// } -// -// impl Default for VDemoNode -// where -// MEMBERSHIP: Membership + Debug, -// { -// fn default() -> Self { -// Self::new() -// } -// } -// -// /// Provides a random [`QuorumCertificate`] -// pub fn random_quorum_certificate>( -// rng: &mut dyn rand::RngCore, -// ) -> QuorumCertificate { -// QuorumCertificate { -// // block_commitment: random_commitment(rng), -// leaf_commitment: random_commitment(rng), -// view_number: TYPES::Time::new(rng.gen()), -// signatures: YesNoSignature::Yes(BTreeMap::default()), -// is_genesis: rng.gen(), -// } -// } -// -// /// Provides a random [`ValidatingLeaf`] -// pub fn random_validating_leaf>( -// deltas: TYPES::BlockType, -// rng: &mut dyn rand::RngCore, -// ) -> ValidatingLeaf { -// let justify_qc = random_quorum_certificate(rng); -// let state = TYPES::StateType::default() -// .append(&deltas, &TYPES::Time::new(42)) -// .unwrap_or_default(); -// ValidatingLeaf { -// view_number: justify_qc.view_number, -// height: rng.next_u64(), -// justify_qc, -// parent_commitment: random_commitment(rng), -// deltas, -// state, -// rejected: Vec::new(), -// timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), -// proposer_id: genesis_proposer_id(), -// } -// } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 065ed8ee80..76a08ba010 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -19,12 +19,11 @@ #[cfg(feature = "docs")] pub mod documentation; -/// Data availability support -// pub mod da; +pub mod block_impl; /// Contains structures and functions for committee election pub mod certificate; #[cfg(feature = "demo")] -pub mod demos; +pub mod demo; /// Contains traits consumed by [`HotShot`] pub mod traits; /// Contains types used by the crate diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index c788006984..9e06d968d0 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -1,8 +1,11 @@ -use hotshot::traits::implementations::CombinedNetworks; +use hotshot::{ + block_impl::{VIDBlockPayload, VIDTransaction}, + traits::implementations::CombinedNetworks, +}; use std::{marker::PhantomData, sync::Arc}; use hotshot::{ - demos::sdemo::{SDemoBlock, SDemoState, SDemoTransaction}, + demo::SDemoState, traits::{ election::static_committee::{StaticCommittee, StaticElectionConfig, StaticVoteToken}, implementations::{ @@ -41,10 +44,10 @@ use serde::{Deserialize, Serialize}; pub struct SequencingTestTypes; impl NodeType for SequencingTestTypes { type Time = ViewNumber; - type BlockType = SDemoBlock; + type BlockType = VIDBlockPayload; type SignatureKey = BN254Pub; type VoteTokenType = StaticVoteToken; - type Transaction = SDemoTransaction; + type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = SDemoState; } diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 64b70ef40c..f3fb3b0cfb 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -21,7 +21,7 @@ use std::collections::HashMap; #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_da_task() { use hotshot::{ - demos::sdemo::{SDemoBlock, SDemoNormalBlock}, + block_impl::{NormalBlockPayload, VIDBlockPayload}, tasks::add_da_task, }; use hotshot_task_impls::harness::run_harness; @@ -39,7 +39,7 @@ async fn test_da_task() { }; let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); - let block = SDemoBlock::Normal(SDemoNormalBlock { + let block = VIDBlockPayload::Normal(NormalBlockPayload { previous_state: (), transactions: Vec::new(), }); diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 2dcd331396..a05cc0bd5e 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -21,7 +21,7 @@ use std::collections::HashMap; #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[ignore] async fn test_network_task() { - use hotshot::demos::sdemo::{SDemoBlock, SDemoNormalBlock}; + use hotshot::block_impl::{NormalBlockPayload, VIDBlockPayload}; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::{ @@ -40,7 +40,7 @@ async fn test_network_task() { let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); let priv_key = api.private_key(); - let block = SDemoBlock::Normal(SDemoNormalBlock { + let block = VIDBlockPayload::Normal(NormalBlockPayload { previous_state: (), transactions: Vec::new(), }); From d479948427695faf87ebfdccccf10a6c85ba51fb Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 15 Sep 2023 13:48:15 -0700 Subject: [PATCH 0092/1393] rename BN254Priv to BLSPrivKey --- hotshot-signature-key/src/bn254.rs | 4 ++-- hotshot-signature-key/src/bn254/bn254_priv.rs | 8 ++++---- hotshot-signature-key/src/bn254/bn254_pub.rs | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hotshot-signature-key/src/bn254.rs b/hotshot-signature-key/src/bn254.rs index fc7273afd5..0c2b8a0bad 100644 --- a/hotshot-signature-key/src/bn254.rs +++ b/hotshot-signature-key/src/bn254.rs @@ -1,8 +1,8 @@ //! Demonstration implementation of the [`SignatureKey`] trait using BN254 use hotshot_types::traits::signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}; -/// `BN254Priv` implementation +/// `BLSPrivKey` implementation mod bn254_priv; /// `BN254Pub` implementation mod bn254_pub; -pub use self::{bn254_priv::BN254Priv, bn254_pub::BN254Pub}; +pub use self::{bn254_priv::BLSPrivKey, bn254_pub::BN254Pub}; diff --git a/hotshot-signature-key/src/bn254/bn254_priv.rs b/hotshot-signature-key/src/bn254/bn254_priv.rs index 3b7000eea4..439b52ff39 100644 --- a/hotshot-signature-key/src/bn254/bn254_priv.rs +++ b/hotshot-signature-key/src/bn254/bn254_priv.rs @@ -7,12 +7,12 @@ use std::cmp::Ordering; /// Private key type for a bn254 keypair #[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug)] -pub struct BN254Priv { +pub struct BLSPrivKey { /// The private key for this keypair pub(super) priv_key: QCSignKey, } -impl BN254Priv { +impl BLSPrivKey { /// Generate a new private key from scratch #[must_use] pub fn generate() -> Self { @@ -54,7 +54,7 @@ impl BN254Priv { } } -impl PartialOrd for BN254Priv { +impl PartialOrd for BLSPrivKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.priv_key.to_string(); let other_bytes = &other.priv_key.to_string(); @@ -62,7 +62,7 @@ impl PartialOrd for BN254Priv { } } -impl Ord for BN254Priv { +impl Ord for BLSPrivKey { fn cmp(&self, other: &Self) -> Ordering { let self_bytes = &self.priv_key.to_string(); let other_bytes = &other.priv_key.to_string(); diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 30f022209c..d851b8c155 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -1,4 +1,4 @@ -use super::{BN254Priv, EncodedPublicKey, EncodedSignature, SignatureKey}; +use super::{BLSPrivKey, EncodedPublicKey, EncodedSignature, SignatureKey}; use bincode::Options; use bitvec::prelude::*; use blake3::traits::digest::generic_array::GenericArray; @@ -44,7 +44,7 @@ impl Ord for BN254Pub { } impl SignatureKey for BN254Pub { - type PrivateKey = BN254Priv; + type PrivateKey = BLSPrivKey; type StakeTableEntry = JFStakeTableEntry; type QCParams = JFQCParams< ::VerificationKey, From 97d4c010efd358a38db2d3920e3d22c1c34dbaa5 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 15 Sep 2023 18:41:12 -0700 Subject: [PATCH 0093/1393] rename BN254PubKey to BLSPubKey --- hotshot-signature-key/src/bn254.rs | 4 ++-- hotshot-signature-key/src/bn254/bn254_pub.rs | 10 +++++----- hotshot/src/demos/sdemo.rs | 4 ++-- hotshot/src/traits/election/static_committee.rs | 4 ++-- hotshot/src/traits/storage/memory_storage.rs | 4 ++-- testing/src/node_types.rs | 4 ++-- testing/src/task_helpers.rs | 14 +++++++------- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/hotshot-signature-key/src/bn254.rs b/hotshot-signature-key/src/bn254.rs index 0c2b8a0bad..2414c89c9c 100644 --- a/hotshot-signature-key/src/bn254.rs +++ b/hotshot-signature-key/src/bn254.rs @@ -2,7 +2,7 @@ use hotshot_types::traits::signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}; /// `BLSPrivKey` implementation mod bn254_priv; -/// `BN254Pub` implementation +/// `BLSPubKey` implementation mod bn254_pub; -pub use self::{bn254_priv::BLSPrivKey, bn254_pub::BN254Pub}; +pub use self::{bn254_priv::BLSPrivKey, bn254_pub::BLSPubKey}; diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index d851b8c155..2ff84a32c8 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -22,12 +22,12 @@ use typenum::U32; /// This type makes use of noise for non-determinisitc signatures. #[derive(Clone, PartialEq, Eq, Hash, Copy, Serialize, Deserialize, Debug)] -pub struct BN254Pub { +pub struct BLSPubKey { /// The public key for this keypair pub_key: VerKey, } -impl PartialOrd for BN254Pub { +impl PartialOrd for BLSPubKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.pub_key.to_string(); let other_bytes = &other.pub_key.to_string(); @@ -35,7 +35,7 @@ impl PartialOrd for BN254Pub { } } -impl Ord for BN254Pub { +impl Ord for BLSPubKey { fn cmp(&self, other: &Self) -> Ordering { let self_bytes = &self.pub_key.to_string(); let other_bytes = &other.pub_key.to_string(); @@ -43,7 +43,7 @@ impl Ord for BN254Pub { } } -impl SignatureKey for BN254Pub { +impl SignatureKey for BLSPubKey { type PrivateKey = BLSPrivKey; type StakeTableEntry = JFStakeTableEntry; type QCParams = JFQCParams< @@ -114,7 +114,7 @@ impl SignatureKey for BN254Pub { fn from_bytes(bytes: &EncodedPublicKey) -> Option { let x: Result = bincode_opts().deserialize(&bytes.0); match x { - Ok(pub_key) => Some(BN254Pub { pub_key }), + Ok(pub_key) => Some(BLSPubKey { pub_key }), Err(e) => { debug!(?e, "Failed to deserialize public key"); None diff --git a/hotshot/src/demos/sdemo.rs b/hotshot/src/demos/sdemo.rs index 30d12bc579..d5214a834e 100644 --- a/hotshot/src/demos/sdemo.rs +++ b/hotshot/src/demos/sdemo.rs @@ -16,7 +16,7 @@ use std::{ use commit::{Commitment, Committable}; use derivative::Derivative; use either::Either; -use hotshot_signature_key::bn254::BN254Pub; +use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ certificate::{AssembledSignature, QuorumCertificate}, data::{ @@ -310,7 +310,7 @@ pub struct SDemoTypes; impl NodeType for SDemoTypes { type Time = ViewNumber; type BlockType = SDemoBlock; - type SignatureKey = BN254Pub; + type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; type Transaction = SDemoTransaction; type ElectionConfigType = StaticElectionConfig; diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 0e79efacab..011d77c37c 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,7 +1,7 @@ // use ark_bls12_381::Parameters as Param381; use commit::{Commitment, Committable, RawCommitmentBuilder}; use espresso_systems_common::hotshot::tag; -use hotshot_signature_key::bn254::BN254Pub; +use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ data::LeafType, traits::{ @@ -30,7 +30,7 @@ pub struct GeneralStaticCommittee, PUBKEY: Signa } /// static committee using a vrf kp -pub type StaticCommittee = GeneralStaticCommittee; +pub type StaticCommittee = GeneralStaticCommittee; impl, PUBKEY: SignatureKey> GeneralStaticCommittee diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index dbc631d379..dd6e0217d5 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -117,7 +117,7 @@ mod test { use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; use super::*; - use hotshot_signature_key::bn254::BN254Pub; + use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ certificate::{AssembledSignature, QuorumCertificate}, data::{fake_commitment, genesis_proposer_id, ValidatingLeaf, ViewNumber}, @@ -149,7 +149,7 @@ mod test { impl NodeType for DummyTypes { type Time = ViewNumber; type BlockType = DummyBlock; - type SignatureKey = BN254Pub; + type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; type Transaction = ::Transaction; type ElectionConfigType = StaticElectionConfig; diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 1d2985c56d..db1bfce3b5 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -11,7 +11,7 @@ use hotshot::{ }, NodeImplementation, }, - types::bn254::BN254Pub, + types::bn254::BLSPubKey, }; use hotshot_types::{ certificate::ViewSyncCertificate, @@ -43,7 +43,7 @@ pub struct SequencingTestTypes; impl NodeType for SequencingTestTypes { type Time = ViewNumber; type BlockType = SDemoBlock; - type SignatureKey = BN254Pub; + type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; type Transaction = SDemoTransaction; type ElectionConfigType = StaticElectionConfig; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 652ee6b8d9..8f492f051e 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -7,7 +7,7 @@ use either::Right; use hotshot::{ certificate::QuorumCertificate, traits::{BlockPayload, NodeImplementation, TestableNodeImplementation}, - types::{bn254::BN254Pub, SignatureKey, SystemContextHandle}, + types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, HotShotInitializer, HotShotSequencingConsensusApi, SystemContext, }; use hotshot_task::event_stream::ChannelStream; @@ -48,7 +48,7 @@ pub async fn build_system_handle( .unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; + let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; let public_key = ::SignatureKey::from_private(&private_key); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< @@ -88,7 +88,7 @@ pub async fn build_system_handle( async fn build_quorum_proposal_and_signature( handle: &SystemContextHandle, - private_key: &::PrivateKey, + private_key: &::PrivateKey, view: u64, ) -> ( QuorumProposal>, @@ -129,7 +129,7 @@ async fn build_quorum_proposal_and_signature( timestamp: 0, proposer_id: api.public_key().to_bytes(), }; - let signature = ::sign(private_key, leaf.commit().as_ref()); + let signature = ::sign(private_key, leaf.commit().as_ref()); let proposal = QuorumProposal::> { block_commitment, view_number: ViewNumber::new(view), @@ -145,7 +145,7 @@ async fn build_quorum_proposal_and_signature( pub async fn build_quorum_proposal( handle: &SystemContextHandle, - private_key: &::PrivateKey, + private_key: &::PrivateKey, view: u64, ) -> Proposal>> { let (proposal, signature) = @@ -156,8 +156,8 @@ pub async fn build_quorum_proposal( } } -pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey, BN254Pub) { - let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; +pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey, BLSPubKey) { + let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; let public_key = ::SignatureKey::from_private(&private_key); (private_key, public_key) } From a07cb0ad7fe451db3bb213d6bbcf69b9bc9f130a Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Sat, 16 Sep 2023 00:08:43 -0700 Subject: [PATCH 0094/1393] fix lint --- hotshot-signature-key/src/bn254/bn254_pub.rs | 15 ++++----------- testing/src/task_helpers.rs | 6 ++++-- types/src/traits/election.rs | 9 ++++++--- types/src/traits/signature_key.rs | 15 +++------------ 4 files changed, 17 insertions(+), 28 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 2ff84a32c8..025d455129 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -50,11 +50,9 @@ impl SignatureKey for BLSPubKey { ::VerificationKey, ::PublicParameter, >; - type PureAssembledSignatureType = ::Signature; - type QCType = ( - Self::PureAssembledSignatureType, - BitVec, - ); + type PureAssembledSignatureType = + ::Signature; + type QCType = (Self::PureAssembledSignatureType, BitVec); #[instrument(skip(self))] fn validate(&self, signature: &EncodedSignature, data: &[u8]) -> bool { @@ -156,12 +154,7 @@ impl SignatureKey for BLSPubKey { BitVectorQC::::check(real_qc_pp, msg, qc).is_ok() } - fn get_sig_proof( - signature: &Self::QCType, - ) -> ( - Self::PureAssembledSignatureType, - BitVec, - ) { + fn get_sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec) { signature.clone() } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 8f492f051e..8e0f85a95c 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -48,7 +48,8 @@ pub async fn build_system_handle( .unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; + let private_key = + ::generated_from_seed_indexed([0u8; 32], node_id).1; let public_key = ::SignatureKey::from_private(&private_key); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< @@ -157,7 +158,8 @@ pub async fn build_quorum_proposal( } pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey, BLSPubKey) { - let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; + let private_key = + ::generated_from_seed_indexed([0u8; 32], node_id).1; let public_key = ::SignatureKey::from_private(&private_key); (private_key, public_key) } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 509c337ed3..81dfd46d57 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -455,7 +455,8 @@ pub trait ConsensusExchange: Send + Sync { &self, _vota_meta: VoteMetaData, _accumulator: VoteAccumulator, - ) -> Either, Self::Certificate> { + ) -> Either, Self::Certificate> + { todo!() // TODO ED Remove this function } @@ -754,7 +755,8 @@ impl< view_number: TYPES::Time, accumlator: VoteAccumulator, _relay: Option, - ) -> Either, Self::Certificate> { + ) -> Either, Self::Certificate> + { let meta = VoteMetaData { encoded_key: encoded_key.clone(), encoded_signature: encoded_signature.clone(), @@ -1421,7 +1423,8 @@ impl< view_number: TYPES::Time, accumlator: VoteAccumulator, TYPES>, relay: Option, - ) -> Either, TYPES>, Self::Certificate> { + ) -> Either, TYPES>, Self::Certificate> + { let meta = VoteMetaData { encoded_key: encoded_key.clone(), encoded_signature: encoded_signature.clone(), diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 4f115dbbe3..d5141816f5 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -20,17 +20,13 @@ use tagged_base64::tagged; PartialOrd, Ord, )] -pub struct EncodedPublicKey( - #[debug(with = "custom_debug::hexbuf")] pub Vec, -); +pub struct EncodedPublicKey(#[debug(with = "custom_debug::hexbuf")] pub Vec); /// Type saftey wrapper for byte encoded signature #[derive( Clone, custom_debug::Debug, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, )] -pub struct EncodedSignature( - #[debug(with = "custom_debug::hexbuf")] pub Vec, -); +pub struct EncodedSignature(#[debug(with = "custom_debug::hexbuf")] pub Vec); impl AsRef<[u8]> for EncodedSignature { fn as_ref(&self) -> &[u8] { @@ -122,12 +118,7 @@ pub trait SignatureKey: fn check(real_qc_pp: &Self::QCParams, data: &[u8], qc: &Self::QCType) -> bool; /// get the assembled signature and the `BitVec` separately from the assembled signature - fn get_sig_proof( - signature: &Self::QCType, - ) -> ( - Self::PureAssembledSignatureType, - BitVec, - ); + fn get_sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec); /// assemble the signature from the partial signature and the indication of signers in `BitVec` fn assemble( From 0b2970a9dc69b9b730809d160bd0b386d017e776 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:17:15 -0400 Subject: [PATCH 0095/1393] TimeoutCert trait and TimeoutVote2 trait done --- types/src/certificate.rs | 41 ++++++++++++++++++++++++++++++++++++++++ types/src/vote.rs | 34 +++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 9792026dcf..b86e96cce8 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -1,8 +1,10 @@ //! Provides two types of cerrtificates and their accumulators. +use crate::vote::AccumulatorPlaceholder; use crate::vote::DAVoteAccumulator; use crate::vote::QuorumVote; use crate::vote::QuorumVoteAccumulator; +use crate::vote::TimeoutVote2; use crate::vote::ViewSyncVoteAccumulator; use crate::vote::VoteType; use crate::{ @@ -83,6 +85,45 @@ pub struct TimeoutCertificate { pub signatures: AssembledSignature, } +impl SignedCertificate + for TimeoutCertificate +{ + type Vote = TimeoutVote2; + + type VoteAccumulator = AccumulatorPlaceholder; + + fn from_signatures_and_commitment( + signatures: AssembledSignature, + vote: Self::Vote, + ) -> Self { + todo!() + } + + fn view_number(&self) -> TYPES::Time { + todo!() + } + + fn signatures(&self) -> AssembledSignature { + todo!() + } + + fn leaf_commitment(&self) -> Commitment { + todo!() + } + + fn set_leaf_commitment(&mut self, commitment: Commitment) { + todo!() + } + + fn is_genesis(&self) -> bool { + todo!() + } + + fn genesis() -> Self { + todo!() + } +} + /// Certificate for view sync. #[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash)] #[serde(bound(deserialize = ""))] diff --git a/types/src/vote.rs b/types/src/vote.rs index d5dda8d825..e92d6175d0 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -95,6 +95,40 @@ pub struct TimeoutVote> { pub vote_data: VoteData, } +/// A timeout vote +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct TimeoutVote2 { + /// The signature share associated with this vote + pub signature: (EncodedPublicKey, EncodedSignature), + /// The view this vote was cast for + pub current_view: TYPES::Time, + /// The vote token generated by this replica + pub vote_token: TYPES::VoteTokenType, +} + +impl VoteType for TimeoutVote2 { + fn get_view(&self) -> ::Time { + todo!() + } + + fn get_key(&self) -> ::SignatureKey { + todo!() + } + + fn get_signature(&self) -> EncodedSignature { + todo!() + } + + fn get_data(&self) -> VoteData { + todo!() + } + + fn get_vote_token(&self) -> ::VoteTokenType { + todo!() + } +} + /// The internals of a view sync vote #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] From c49050b3a78d90d3721e4cf2c5c5cf29f9f50bf8 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:22:04 -0400 Subject: [PATCH 0096/1393] TimeoutExchange wireframe --- types/src/traits/election.rs | 100 ++++++++++++++++++++++++++++++++++- 1 file changed, 98 insertions(+), 2 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 81dfd46d57..832d9fe4b0 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -9,9 +9,9 @@ use super::{ }; use crate::{ certificate::{ - AssembledSignature, DACertificate, QuorumCertificate, ViewSyncCertificate, VoteMetaData, + AssembledSignature, DACertificate, QuorumCertificate, ViewSyncCertificate, VoteMetaData, TimeoutCertificate, }, - data::{DAProposal, ProposalType}, + data::{DAProposal, ProposalType}, vote::TimeoutVote2, }; use crate::{ @@ -1448,6 +1448,102 @@ impl< } } +// TODO ED All the exchange structs are the same. We could just considate them into one struct +/// Standard implementation of a Timeout Exchange based on Hot Stuff consensus. +#[derive(Derivative)] +#[derivative(Clone, Debug)] +pub struct TimeoutExchange< + TYPES: NodeType, + PROPOSAL: ProposalType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, + M: NetworkMsg, +> { + /// The network being used by this exchange. + network: NETWORK, + /// The committee which votes on proposals. + membership: MEMBERSHIP, + /// This participant's public key. + public_key: TYPES::SignatureKey, + /// Entry with public key and staking value for certificate aggregation in the stake table. + entry: ::StakeTableEntry, + /// This participant's private key. + #[derivative(Debug = "ignore")] + private_key: ::PrivateKey, + #[doc(hidden)] + _pd: PhantomData<(PROPOSAL, MEMBERSHIP, M)>, +} + +// TODO ED Get rid of ProposalType as generic, is debt left over from Validating Consensus +impl< + TYPES: NodeType, + PROPOSAL: ProposalType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, + M: NetworkMsg, + > ConsensusExchange for TimeoutExchange +{ + type Proposal = PROPOSAL; + type Vote = TimeoutVote2; + type Certificate = TimeoutCertificate; + type Membership = MEMBERSHIP; + type Networking = NETWORK; + type Commitment = TYPES::Time; + + fn create( + entries: Vec<::StakeTableEntry>, + config: TYPES::ElectionConfigType, + network: Self::Networking, + pk: TYPES::SignatureKey, + entry: ::StakeTableEntry, + sk: ::PrivateKey, + ) -> Self { + let membership = + >::Membership::create_election(entries, config); + Self { + network, + membership, + public_key: pk, + entry, + private_key: sk, + _pd: PhantomData, + } + } + + fn network(&self) -> &NETWORK { + &self.network + } + + fn vote_data(&self, _commit: Commitment) -> VoteData { + unimplemented!() + } + + + fn membership(&self) -> &Self::Membership { + &self.membership + } + fn public_key(&self) -> &TYPES::SignatureKey { + &self.public_key + } + fn private_key(&self) -> &<::SignatureKey as SignatureKey>::PrivateKey { + &self.private_key + } + + fn accumulate_vote( + &self, + encoded_key: &EncodedPublicKey, + encoded_signature: &EncodedSignature, + leaf_commitment: Commitment, + vote_data: VoteData, + vote_token: ::VoteTokenType, + view_number: ::Time, + accumlator: VoteAccumulator<::VoteTokenType, Self::Commitment, TYPES>, + relay: Option, + ) -> Either::VoteTokenType, Self::Commitment, TYPES>, Self::Certificate> { + todo!() + } +} + /// Testable implementation of a [`Membership`]. Will expose a method to generate a vote token used for testing. pub trait TestableElection: Membership { /// Generate a vote token used for testing. From 225459d56ad98283f08f31fd8f02f9ea727e3992 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 18 Sep 2023 15:08:58 -0400 Subject: [PATCH 0097/1393] Add TimeoutExchangeType trait --- types/src/traits/election.rs | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 832d9fe4b0..ce58bdd62b 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -9,9 +9,11 @@ use super::{ }; use crate::{ certificate::{ - AssembledSignature, DACertificate, QuorumCertificate, ViewSyncCertificate, VoteMetaData, TimeoutCertificate, + AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, + ViewSyncCertificate, VoteMetaData, }, - data::{DAProposal, ProposalType}, vote::TimeoutVote2, + data::{DAProposal, ProposalType}, + vote::TimeoutVote2, }; use crate::{ @@ -1474,6 +1476,18 @@ pub struct TimeoutExchange< _pd: PhantomData<(PROPOSAL, MEMBERSHIP, M)>, } +pub trait TimeoutExchangeType: ConsensusExchange {} + +impl< + TYPES: NodeType, + PROPOSAL: ProposalType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, + M: NetworkMsg, + > TimeoutExchangeType for TimeoutExchange +{ +} + // TODO ED Get rid of ProposalType as generic, is debt left over from Validating Consensus impl< TYPES: NodeType, @@ -1518,7 +1532,6 @@ impl< unimplemented!() } - fn membership(&self) -> &Self::Membership { &self.membership } @@ -1539,7 +1552,10 @@ impl< view_number: ::Time, accumlator: VoteAccumulator<::VoteTokenType, Self::Commitment, TYPES>, relay: Option, - ) -> Either::VoteTokenType, Self::Commitment, TYPES>, Self::Certificate> { + ) -> Either< + VoteAccumulator<::VoteTokenType, Self::Commitment, TYPES>, + Self::Certificate, + > { todo!() } } From 8b3e430aa1ae0cf002fadace69832145aef2b770 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 18 Sep 2023 15:58:26 -0400 Subject: [PATCH 0098/1393] Compiles with timeout_exchange added to consensus task --- hotshot/src/lib.rs | 1 + hotshot/src/tasks/mod.rs | 1 + task-impls/src/consensus.rs | 3 ++ types/src/traits/node_implementation.rs | 37 ++++++++++++++++++++++--- 4 files changed, 38 insertions(+), 4 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index b09ea6235d..e2582b85b8 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -182,6 +182,7 @@ impl> SystemContext { initializer: HotShotInitializer, metrics: Box, ) -> Result> { + debug!("Creating a new hotshot"); let consensus_metrics = Arc::new(ConsensusMetrics::new( diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 619fdb6d59..87c776563a 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -286,6 +286,7 @@ where cur_view: TYPES::Time::new(0), block: TYPES::BlockType::new(), quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), + timeout_exchange: c_api.inner.exchanges.timeout_exchange().clone().into(), api: c_api.clone(), committee_exchange: c_api.inner.exchanges.committee_exchange().clone().into(), _pd: PhantomData, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index dccf7919c3..defa7a6475 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -34,6 +34,7 @@ use hotshot_types::{ utils::{Terminator, ViewInner}, vote::{QuorumVote, VoteType}, }; +use hotshot_types::traits::node_implementation::SequencingTimeoutEx; use snafu::Snafu; use std::{ @@ -89,6 +90,8 @@ pub struct SequencingConsensusTaskState< /// the quorum exchange pub quorum_exchange: Arc>, + pub timeout_exchange: Arc>, + /// Consensus api pub api: A, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 49d9184bc4..7219743536 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -7,7 +7,7 @@ use super::{ block_contents::Transaction, election::{ CommitteeExchangeType, ConsensusExchange, ElectionConfig, QuorumExchangeType, - ViewSyncExchangeType, VoteToken, + TimeoutExchange, TimeoutExchangeType, ViewSyncExchangeType, VoteToken, }, network::{CommunicationChannel, NetworkMsg, TestableNetworkingImplementation}, state::{ConsensusTime, TestableBlock, TestableState}, @@ -154,12 +154,16 @@ pub trait ExchangesType, MESSA /// Get the committee exchange. fn committee_exchange(&self) -> &Self::CommitteeExchange; + fn timeout_exchange(&self) -> &Self::TimeoutExchange; + /// Protocol for exchanging quorum proposals and votes. type QuorumExchange: QuorumExchangeType + Clone + Debug; /// Protocol for exchanging view sync proposals and votes. type ViewSyncExchange: ViewSyncExchangeType + Clone + Debug; + type TimeoutExchange: TimeoutExchangeType + Clone + Debug; + /// Election configurations for exchanges type ElectionConfigs; @@ -217,9 +221,9 @@ pub trait TestableExchange, ME pub struct SequencingExchanges< TYPES: NodeType, MESSAGE: NetworkMsg, - QUORUMEXCHANGE: QuorumExchangeType, MESSAGE>, - COMMITTEEEXCHANGE: CommitteeExchangeType, - VIEWSYNCEXCHANGE: ViewSyncExchangeType, + QUORUMEXCHANGE: QuorumExchangeType, MESSAGE> + Clone + Debug, + COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, + VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, > { /// Quorum exchange. quorum_exchange: QUORUMEXCHANGE, @@ -230,6 +234,9 @@ pub struct SequencingExchanges< /// Committee exchange. committee_exchange: COMMITTEEEXCHANGE, + // TODO ED Make this not public + pub timeout_exchange: TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE>, + /// Phantom data. _phantom: PhantomData<(TYPES, MESSAGE)>, } @@ -248,12 +255,17 @@ where type CommitteeExchange = COMMITTEEEXCHANGE; type QuorumExchange = QUORUMEXCHANGE; type ViewSyncExchange = VIEWSYNCEXCHANGE; + type TimeoutExchange = TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE>; type ElectionConfigs = (TYPES::ElectionConfigType, TYPES::ElectionConfigType); fn committee_exchange(&self) -> &COMMITTEEEXCHANGE { &self.committee_exchange } + fn timeout_exchange(&self) -> &Self::TimeoutExchange { + &self.timeout_exchange + } + fn create( entries: Vec<::StakeTableEntry>, configs: Self::ElectionConfigs, @@ -267,6 +279,14 @@ where sk: ::PrivateKey, ) -> Self { let quorum_exchange = QUORUMEXCHANGE::create( + entries.clone(), + configs.0.clone(), + networks.0.clone(), + pk.clone(), + entry.clone(), + sk.clone(), + ); + let timeout_exchange: TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE> = TimeoutExchange::create( entries.clone(), configs.0.clone(), networks.0, @@ -274,6 +294,7 @@ where entry.clone(), sk.clone(), ); + let view_sync_exchange = VIEWSYNCEXCHANGE::create( entries.clone(), configs.0, @@ -289,6 +310,7 @@ where quorum_exchange, committee_exchange, view_sync_exchange, + timeout_exchange, _phantom: PhantomData, } } @@ -327,6 +349,13 @@ pub type SequencingQuorumEx = Message, >>::QuorumExchange; + pub type SequencingTimeoutEx = + <>::Exchanges as ExchangesType< + TYPES, + >::Leaf, + Message, + >>::TimeoutExchange; + /// Alias for the [`CommitteeExchange`] type. pub type CommitteeEx = <>::Exchanges as ExchangesType< TYPES, From 96b96270b2f4e6e4935bc97570a77f9152d35487 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 18 Sep 2023 16:27:04 -0400 Subject: [PATCH 0099/1393] Rough addition of create_timeout_message on timeout_exchange --- task-impls/src/consensus.rs | 10 ++++--- types/src/traits/election.rs | 35 ++++++++++++++++++++++++- types/src/traits/node_implementation.rs | 2 +- 3 files changed, 42 insertions(+), 5 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index defa7a6475..90ed192393 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -15,6 +15,8 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::traits::election::TimeoutExchangeType; +use hotshot_types::traits::node_implementation::SequencingTimeoutEx; use hotshot_types::vote::QuorumVoteAccumulator; use hotshot_types::{ certificate::{DACertificate, QuorumCertificate}, @@ -34,7 +36,6 @@ use hotshot_types::{ utils::{Terminator, ViewInner}, vote::{QuorumVote, VoteType}, }; -use hotshot_types::traits::node_implementation::SequencingTimeoutEx; use snafu::Snafu; use std::{ @@ -90,6 +91,7 @@ pub struct SequencingConsensusTaskState< /// the quorum exchange pub quorum_exchange: Arc>, + /// The timeout exchange pub timeout_exchange: Arc>, /// Consensus api @@ -1068,8 +1070,10 @@ where } } SequencingHotShotEvent::Timeout(view) => { - // The view sync module will handle updating views in the case of timeout - // TODO ED In the future send a timeout vote + + // TODO ED Why I here and not in quorum exchange? + self.timeout_exchange.create_timeout_message::(view); + self.quorum_exchange .network() .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index ce58bdd62b..c0da8fef1d 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -1476,7 +1476,40 @@ pub struct TimeoutExchange< _pd: PhantomData<(PROPOSAL, MEMBERSHIP, M)>, } -pub trait TimeoutExchangeType: ConsensusExchange {} +impl< + TYPES: NodeType, + PROPOSAL: ProposalType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, + M: NetworkMsg, + > TimeoutExchange +{ +} + +pub trait TimeoutExchangeType: ConsensusExchange { + // TODO ED Clean this function up + fn create_timeout_message>( + &self, + view: TYPES::Time, + ) -> GeneralConsensusMessage + where + I::Exchanges: ExchangesType>, + { + let signature = TYPES::SignatureKey::sign( + &self.private_key(), + VoteData::::Timeout(view.commit()) + .commit() + .as_ref(), + ); + + // GeneralConsensusMessage::::TODO { + // signature: (self.public_key.to_bytes(), signature), + // current_view: view, + // vote_data: VoteData::Timeout(view.commit()), + // }) + todo!() + } +} impl< TYPES: NodeType, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 7219743536..f7aafe8615 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -349,7 +349,7 @@ pub type SequencingQuorumEx = Message, >>::QuorumExchange; - pub type SequencingTimeoutEx = +pub type SequencingTimeoutEx = <>::Exchanges as ExchangesType< TYPES, >::Leaf, From a60d21f2366934f03a9a57596d478bc31807f4fd Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 18 Sep 2023 14:37:24 -0700 Subject: [PATCH 0100/1393] Combine genesis and normal block, remove previous_state, rename SDemoTypes. --- .../examples/web-server-da/multi-validator.rs | 4 +- .../web-server-da/multi-web-server.rs | 8 +- .../examples/web-server-da/orchestrator.rs | 4 +- hotshot/examples/web-server-da/types.rs | 54 ++++++------- hotshot/examples/web-server-da/validator.rs | 4 +- hotshot/examples/web-server-da/web-server.rs | 4 +- hotshot/src/block_impl.rs | 75 ++++--------------- hotshot/src/demo.rs | 30 ++++---- testing/tests/da_task.rs | 10 +-- testing/tests/network_task.rs | 7 +- types/src/traits/block_contents.rs | 2 +- 11 files changed, 70 insertions(+), 132 deletions(-) diff --git a/hotshot/examples/web-server-da/multi-validator.rs b/hotshot/examples/web-server-da/multi-validator.rs index a16f2951ed..3ed46fa979 100644 --- a/hotshot/examples/web-server-da/multi-validator.rs +++ b/hotshot/examples/web-server-da/multi-validator.rs @@ -3,7 +3,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demo::SDemoTypes; +use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use std::net::IpAddr; use tracing::instrument; @@ -49,7 +49,7 @@ async fn main() { for _ in 0..args.num_nodes { let node = async_spawn(async move { infra_da::main_entry_point::< - SDemoTypes, + DemoTypes, ThisMembership, DANetwork, QuorumNetwork, diff --git a/hotshot/examples/web-server-da/multi-web-server.rs b/hotshot/examples/web-server-da/multi-web-server.rs index 39a9b7a87e..f954050ad0 100644 --- a/hotshot/examples/web-server-da/multi-web-server.rs +++ b/hotshot/examples/web-server-da/multi-web-server.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use async_compatibility_layer::{art::async_spawn, channel::oneshot}; use clap::Parser; -use hotshot::demo::SDemoTypes; +use hotshot::demo::DemoTypes; use tracing::error; #[derive(Parser, Debug)] @@ -27,7 +27,7 @@ async fn main() { let cdn_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >(Some(server_shutdown_cdn), args.cdn_port) .await { @@ -37,7 +37,7 @@ async fn main() { }); let da_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >(Some(server_shutdown_da), args.da_port) .await { @@ -47,7 +47,7 @@ async fn main() { }); let vs_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >(Some(server_shutdown_view_sync), args.view_sync_port) .await { diff --git a/hotshot/examples/web-server-da/orchestrator.rs b/hotshot/examples/web-server-da/orchestrator.rs index 785daeecb1..594d004a93 100644 --- a/hotshot/examples/web-server-da/orchestrator.rs +++ b/hotshot/examples/web-server-da/orchestrator.rs @@ -2,7 +2,7 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::SDemoTypes; +use hotshot::demo::DemoTypes; use tracing::instrument; use types::ThisMembership; @@ -29,7 +29,7 @@ async fn main() { let args = OrchestratorArgs::parse(); run_orchestrator_da::< - SDemoTypes, + DemoTypes, ThisMembership, DANetwork, QuorumNetwork, diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index 4b4905062e..017784b354 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -1,6 +1,6 @@ use crate::infra_da::WebServerDARun; use hotshot::{ - demo::SDemoTypes, + demo::DemoTypes, traits::{ election::static_committee::GeneralStaticCommittee, implementations::{MemoryStorage, WebCommChannel}, @@ -22,54 +22,54 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type ThisLeaf = SequencingLeaf; +pub type ThisLeaf = SequencingLeaf; pub type ThisMembership = - GeneralStaticCommittee::SignatureKey>; -pub type DANetwork = WebCommChannel; -pub type QuorumNetwork = WebCommChannel; -pub type ViewSyncNetwork = WebCommChannel; + GeneralStaticCommittee::SignatureKey>; +pub type DANetwork = WebCommChannel; +pub type QuorumNetwork = WebCommChannel; +pub type ViewSyncNetwork = WebCommChannel; -pub type ThisDAProposal = DAProposal; -pub type ThisDAVote = DAVote; +pub type ThisDAProposal = DAProposal; +pub type ThisDAVote = DAVote; -pub type ThisQuorumProposal = QuorumProposal; -pub type ThisQuorumVote = QuorumVote; +pub type ThisQuorumProposal = QuorumProposal; +pub type ThisQuorumVote = QuorumVote; -pub type ThisViewSyncProposal = ViewSyncCertificate; -pub type ThisViewSyncVote = ViewSyncVote; +pub type ThisViewSyncProposal = ViewSyncCertificate; +pub type ThisViewSyncVote = ViewSyncVote; -impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; - type Leaf = SequencingLeaf; +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; + type Leaf = SequencingLeaf; type Exchanges = SequencingExchanges< - SDemoTypes, - Message, + DemoTypes, + Message, QuorumExchange< - SDemoTypes, + DemoTypes, Self::Leaf, ThisQuorumProposal, ThisMembership, QuorumNetwork, - Message, + Message, >, - CommitteeExchange>, + CommitteeExchange>, ViewSyncExchange< - SDemoTypes, + DemoTypes, ThisViewSyncProposal, ThisMembership, ViewSyncNetwork, - Message, + Message, >, >; - type ConsensusMessage = SequencingMessage; + type ConsensusMessage = SequencingMessage; fn new_channel_maps( - start_view: ::Time, + start_view: ::Time, ) -> ( - ChannelMaps, - Option>, + ChannelMaps, + Option>, ) { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = WebServerDARun; +pub type ThisRun = WebServerDARun; diff --git a/hotshot/examples/web-server-da/validator.rs b/hotshot/examples/web-server-da/validator.rs index caf46bdc74..ab44e02991 100644 --- a/hotshot/examples/web-server-da/validator.rs +++ b/hotshot/examples/web-server-da/validator.rs @@ -1,6 +1,6 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::SDemoTypes; +use hotshot::demo::DemoTypes; use tracing::{info, instrument}; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; @@ -29,7 +29,7 @@ async fn main() { args.host, args.port ); infra_da::main_entry_point::< - SDemoTypes, + DemoTypes, ThisMembership, DANetwork, QuorumNetwork, diff --git a/hotshot/examples/web-server-da/web-server.rs b/hotshot/examples/web-server-da/web-server.rs index bbf0bbc691..9c4912c6a3 100644 --- a/hotshot/examples/web-server-da/web-server.rs +++ b/hotshot/examples/web-server-da/web-server.rs @@ -1,4 +1,4 @@ -use hotshot::demo::SDemoTypes; +use hotshot::demo::DemoTypes; use std::sync::Arc; use async_compatibility_layer::{ @@ -23,7 +23,7 @@ async fn main() { let (server_shutdown_sender, server_shutdown) = oneshot(); let _sender = Arc::new(server_shutdown_sender); let _result = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >(Some(server_shutdown), args.port) .await; } diff --git a/hotshot/src/block_impl.rs b/hotshot/src/block_impl.rs index d3e4755129..dc7ebf878d 100644 --- a/hotshot/src/block_impl.rs +++ b/hotshot/src/block_impl.rs @@ -63,48 +63,21 @@ pub enum BlockPayloadError { GenesisFailed, /// Genesis reencountered after initialization GenesisAfterStart, - /// no transasctions added to genesis - GenesisCantHaveTransactions, /// invalid block InvalidBlock, } -/// genesis block +/// A [`BlockPayload`] that contains a list of `VIDTransaction`. #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct GenesisBlockPayload {} - -/// Any block after genesis -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct NormalBlockPayload { - /// [`BlockPayload`] state commitment - pub previous_state: (), - /// [`VIDTransaction`] vector - pub transactions: Vec, -} - -/// The block for the sequencing demo -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub enum VIDBlockPayload { - /// genesis block payload - Genesis(GenesisBlockPayload), - /// normal block payload - Normal(NormalBlockPayload), -} +pub struct VIDBlockPayload(pub Vec); impl Committable for VIDBlockPayload { fn commit(&self) -> Commitment { - match &self { - VIDBlockPayload::Genesis(_) => { - commit::RawCommitmentBuilder::new("Genesis Comm").finalize() - } - VIDBlockPayload::Normal(block) => { - let mut builder = commit::RawCommitmentBuilder::new("Normal Comm"); - for txn in &block.transactions { - builder = builder.u64_field("transaction", **txn); - } - builder.finalize() - } + let mut builder = commit::RawCommitmentBuilder::new("Normal Comm"); + for txn in &self.0 { + builder = builder.u64_field("transaction", **txn); } + builder.finalize() } fn tag() -> String { @@ -114,27 +87,17 @@ impl Committable for VIDBlockPayload { impl Display for VIDBlockPayload { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - VIDBlockPayload::Genesis(_) => { - write!(f, "Genesis BlockPayload") - } - VIDBlockPayload::Normal(block) => { - write!(f, "Normal BlockPayload #txns={}", block.transactions.len()) - } - } + write!(f, "BlockPayload #txns={}", self.0.len()) } } impl TestableBlock for VIDBlockPayload { fn genesis() -> Self { - VIDBlockPayload::Genesis(GenesisBlockPayload {}) + VIDBlockPayload(Vec::new()) } fn txn_count(&self) -> u64 { - match self { - VIDBlockPayload::Genesis(_) => 0, - VIDBlockPayload::Normal(n) => n.transactions.len() as u64, - } + self.0.len() as u64 } } @@ -151,24 +114,12 @@ impl BlockPayload for VIDBlockPayload { &self, tx: &Self::Transaction, ) -> std::result::Result { - match self { - VIDBlockPayload::Genesis(_) => Err(BlockPayloadError::GenesisCantHaveTransactions), - VIDBlockPayload::Normal(n) => { - let mut new = n.clone(); - new.transactions.push(tx.clone()); - Ok(VIDBlockPayload::Normal(new)) - } - } + let mut new = self.0.clone(); + new.push(tx.clone()); + Ok(VIDBlockPayload(new)) } fn contained_transactions(&self) -> HashSet> { - match self { - VIDBlockPayload::Genesis(_) => HashSet::new(), - VIDBlockPayload::Normal(n) => n - .transactions - .iter() - .map(commit::Committable::commit) - .collect(), - } + self.0.iter().map(commit::Committable::commit).collect() } } diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index a4836faebd..eed7f2b785 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -6,7 +6,7 @@ //! These implementations are useful in examples and integration testing, but are not suitable for //! production use. use crate::{ - block_impl::{BlockPayloadError, NormalBlockPayload, VIDBlockPayload, VIDTransaction}, + block_impl::{BlockPayloadError, VIDBlockPayload, VIDTransaction}, traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}, }; use commit::{Commitment, Committable}; @@ -73,18 +73,14 @@ impl State for SDemoState { type Time = ViewNumber; fn next_block(_state: Option) -> Self::BlockType { - VIDBlockPayload::Normal(NormalBlockPayload { - previous_state: (), - transactions: Vec::new(), - }) + VIDBlockPayload(Vec::new()) } - fn validate_block(&self, block: &Self::BlockType, view_number: &Self::Time) -> bool { - match block { - VIDBlockPayload::Genesis(_) => { - view_number == &ViewNumber::genesis() && view_number == &self.view_number - } - VIDBlockPayload::Normal(_n) => self.view_number < *view_number, + fn validate_block(&self, _block: &Self::BlockType, view_number: &Self::Time) -> bool { + if view_number == &ViewNumber::genesis() { + &self.view_number == view_number + } else { + self.view_number < *view_number } } @@ -133,9 +129,9 @@ impl TestableState for SDemoState { serde::Serialize, serde::Deserialize, )] -pub struct SDemoTypes; +pub struct DemoTypes; -impl NodeType for SDemoTypes { +impl NodeType for DemoTypes { type Time = ViewNumber; type BlockType = VIDBlockPayload; type SignatureKey = BLSPubKey; @@ -150,11 +146,11 @@ impl NodeType for SDemoTypes { #[derivative(Clone(bound = ""))] pub struct SDemoNode(PhantomData) where - MEMBERSHIP: Membership + std::fmt::Debug; + MEMBERSHIP: Membership + std::fmt::Debug; impl SDemoNode where - MEMBERSHIP: Membership + std::fmt::Debug, + MEMBERSHIP: Membership + std::fmt::Debug, { /// Create a new `SDemoNode` #[must_use] @@ -165,7 +161,7 @@ where impl Debug for SDemoNode where - MEMBERSHIP: Membership + std::fmt::Debug, + MEMBERSHIP: Membership + std::fmt::Debug, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("SDemoNode") @@ -176,7 +172,7 @@ where impl Default for SDemoNode where - MEMBERSHIP: Membership + std::fmt::Debug, + MEMBERSHIP: Membership + std::fmt::Debug, { fn default() -> Self { Self::new() diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 545442fa20..a9dcbd0c2e 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -20,10 +20,7 @@ use std::collections::HashMap; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_da_task() { - use hotshot::{ - block_impl::{NormalBlockPayload, VIDBlockPayload}, - tasks::add_da_task, - }; + use hotshot::{block_impl::VIDBlockPayload, tasks::add_da_task}; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::{message::Proposal, traits::election::CommitteeExchangeType}; @@ -39,10 +36,7 @@ async fn test_da_task() { }; let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); - let block = VIDBlockPayload::Normal(NormalBlockPayload { - previous_state: (), - transactions: Vec::new(), - }); + let block = VIDBlockPayload(Vec::new()); let block_commitment = block.commit(); let signature = committee_exchange.sign_da_proposal(&block_commitment); let proposal = DAProposal { diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index f1d720946f..8239fc72d4 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -21,7 +21,7 @@ use std::collections::HashMap; #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[ignore] async fn test_network_task() { - use hotshot::block_impl::{NormalBlockPayload, VIDBlockPayload}; + use hotshot::block_impl::VIDBlockPayload; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::{ @@ -40,10 +40,7 @@ async fn test_network_task() { let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); let priv_key = api.private_key(); - let block = VIDBlockPayload::Normal(NormalBlockPayload { - previous_state: (), - transactions: Vec::new(), - }); + let block = VIDBlockPayload(Vec::new()); let block_commitment = block.commit(); let signature = committee_exchange.sign_da_proposal(&block_commitment); let da_proposal = Proposal { diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index eb4dce2753..a365dd7144 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -134,7 +134,7 @@ pub mod dummy { type Transaction = DummyTransaction; fn new() -> Self { - ::genesis() + Self { nonce: 0 } } fn add_transaction_raw( From b01f5cfc231786437ee847c0b34c9aef005f9870 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 18 Sep 2023 15:04:26 -0700 Subject: [PATCH 0101/1393] Remove id from txn --- hotshot/src/block_impl.rs | 44 ++++++++++++++------------------------- hotshot/src/demo.rs | 7 ++----- 2 files changed, 18 insertions(+), 33 deletions(-) diff --git a/hotshot/src/block_impl.rs b/hotshot/src/block_impl.rs index dc7ebf878d..25ae971406 100644 --- a/hotshot/src/block_impl.rs +++ b/hotshot/src/block_impl.rs @@ -2,7 +2,6 @@ use std::{ collections::HashSet, fmt::{Debug, Display}, - ops::Deref, }; use commit::{Commitment, Committable}; @@ -12,30 +11,17 @@ use snafu::Snafu; /// The transaction in a [`VIDBlockPayload`]. #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct VIDTransaction { - /// identifier for the transaction - pub id: u64, - /// padding to add to txn (to make it larger and thereby more realistic) - pub padding: Vec, -} - -impl Deref for VIDTransaction { - type Target = u64; - - fn deref(&self) -> &Self::Target { - &self.id - } -} +pub struct VIDTransaction(pub Vec); impl Committable for VIDTransaction { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("SDemo Txn Comm") - .u64_field("id", self.id) - .finalize() + // TODO: Use use VID block commitment. + // + commit::RawCommitmentBuilder::new("Txn Comm").finalize() } fn tag() -> String { - "SEQUENCING_DEMO_TXN".to_string() + "SEQUENCING_TXN".to_string() } } @@ -44,11 +30,14 @@ impl Transaction for VIDTransaction {} impl VIDTransaction { /// create a new transaction #[must_use] - pub fn new(id: u64) -> Self { - Self { - id, - padding: vec![], - } + pub fn new() -> Self { + Self(Vec::new()) + } +} + +impl Default for VIDTransaction { + fn default() -> Self { + Self::new() } } @@ -73,10 +62,9 @@ pub struct VIDBlockPayload(pub Vec); impl Committable for VIDBlockPayload { fn commit(&self) -> Commitment { - let mut builder = commit::RawCommitmentBuilder::new("Normal Comm"); - for txn in &self.0 { - builder = builder.u64_field("transaction", **txn); - } + // TODO: Use use VID block commitment. + // + let builder = commit::RawCommitmentBuilder::new("BlockPayload Comm"); builder.finalize() } diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index eed7f2b785..feaed2fe6b 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -106,13 +106,10 @@ impl State for SDemoState { impl TestableState for SDemoState { fn create_random_transaction( _state: Option<&Self>, - rng: &mut dyn rand::RngCore, + _rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { - VIDTransaction { - id: rng.gen_range(0..10), - padding: vec![0; padding as usize], - } + VIDTransaction(vec![0; padding as usize]) } } /// Implementation of [`NodeType`] for [`VDemoNode`] From a4066fece1b8642824ae3810d53e19e6a5cf7f1f Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 18 Sep 2023 18:17:36 -0400 Subject: [PATCH 0102/1393] Add Timeout events to networking task --- task-impls/src/consensus.rs | 26 ++++++++++++++++++++++++-- task-impls/src/events.rs | 4 +++- task-impls/src/network.rs | 15 +++++++++++++++ types/src/message.rs | 11 ++++++++++- types/src/traits/election.rs | 30 ++++++++++++++++-------------- 5 files changed, 68 insertions(+), 18 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 90ed192393..36dced718b 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1069,10 +1069,32 @@ where ); } } + SequencingHotShotEvent::TimeoutVoteRecv(vote) => { + panic!() + } SequencingHotShotEvent::Timeout(view) => { + let vote_token = self.timeout_exchange.make_vote_token(view); - // TODO ED Why I here and not in quorum exchange? - self.timeout_exchange.create_timeout_message::(view); + match vote_token { + Err(e) => { + error!("Failed to generate vote token for {:?} {:?}", view, e); + } + Ok(None) => { + debug!("We were not chosen for consensus committee on {:?}", view); + } + Ok(Some(vote_token)) => { + // TODO ED Why I here and not in quorum exchange? + let message = self + .timeout_exchange + .create_timeout_message::(view, vote_token); + + if let GeneralConsensusMessage::TimeoutVote(vote) = message { + self.event_stream + .publish(SequencingHotShotEvent::TimeoutVoteSend(vote)) + .await; + } + } + } self.quorum_exchange .network() diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index de8e31fd03..6c3d978bdb 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -5,7 +5,7 @@ use hotshot_types::{ traits::node_implementation::{ NodeImplementation, NodeType, QuorumProposalType, ViewSyncProposalType, }, - vote::{DAVote, QuorumVote, ViewSyncVote}, + vote::{DAVote, QuorumVote, ViewSyncVote, TimeoutVote2}, }; use crate::view_sync::ViewSyncPhase; @@ -19,6 +19,8 @@ pub enum SequencingHotShotEvent> { QuorumProposalRecv(Proposal>, TYPES::SignatureKey), /// A quorum vote has been received from the network; handled by the consensus task QuorumVoteRecv(QuorumVote), + TimeoutVoteRecv(TimeoutVote2), + TimeoutVoteSend(TimeoutVote2), /// A DA proposal has been received from the network; handled by the DA task DAProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 5fbada8539..2070eda63f 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -89,6 +89,9 @@ impl< GeneralConsensusMessage::ViewSyncCertificate(view_sync_message) => { SequencingHotShotEvent::ViewSyncCertificateRecv(view_sync_message) } + GeneralConsensusMessage::TimeoutVote(message) => { + SequencingHotShotEvent::TimeoutVoteRecv(message) + } GeneralConsensusMessage::InternalTrigger(_) => { error!("Got unexpected message type in network task!"); return; @@ -282,6 +285,16 @@ impl< Some(membership.get_leader(vote.round() + vote.relay())), ) } + SequencingHotShotEvent::TimeoutVoteSend(vote) => { + ( + vote.get_key(), + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::TimeoutVote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.get_view())), + ) + } SequencingHotShotEvent::ViewChange(view) => { self.view = view; return None; @@ -336,6 +349,8 @@ impl< | SequencingHotShotEvent::DACSend(_, _) | SequencingHotShotEvent::VidCertSend(_, _) | SequencingHotShotEvent::ViewChange(_) + | SequencingHotShotEvent::TimeoutVoteSend(_) + ) } diff --git a/types/src/message.rs b/types/src/message.rs index d5897c0ef8..28daf64b43 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -13,7 +13,7 @@ use crate::{ }, signature_key::EncodedSignature, }, - vote::{DAVote, QuorumVote, ViewSyncVote, VoteType}, + vote::{DAVote, QuorumVote, TimeoutVote, TimeoutVote2, ViewSyncVote, VoteType}, }; use derivative::Derivative; use either::Either::{self, Left, Right}; @@ -201,6 +201,8 @@ where } GeneralConsensusMessage::ViewSyncVote(_) | GeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), + | GeneralConsensusMessage::TimeoutVote(_) => todo!(), + } } } @@ -322,6 +324,9 @@ where /// Message with a view sync certificate. ViewSyncCertificate(Proposal>), + /// Message with a Timeout vote + TimeoutVote(TimeoutVote2), + /// Internal ONLY message indicating a view interrupt. #[serde(skip)] InternalTrigger(InternalTrigger), @@ -416,6 +421,9 @@ impl< GeneralConsensusMessage::ViewSyncCertificate(message) => { message.data.get_view_number() } + GeneralConsensusMessage::TimeoutVote(message) => { + message.get_view() + } } } Right(committee_message) => { @@ -447,6 +455,7 @@ impl< GeneralConsensusMessage::InternalTrigger(_) => MessagePurpose::Internal, GeneralConsensusMessage::ViewSyncVote(_) => MessagePurpose::ViewSyncVote, GeneralConsensusMessage::ViewSyncCertificate(_) => MessagePurpose::ViewSyncProposal, + GeneralConsensusMessage::TimeoutVote(_) => todo!(), }, Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index c0da8fef1d..7448423c80 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -1491,6 +1491,7 @@ pub trait TimeoutExchangeType: ConsensusExchange fn create_timeout_message>( &self, view: TYPES::Time, + vote_token: TYPES::VoteTokenType, ) -> GeneralConsensusMessage where I::Exchanges: ExchangesType>, @@ -1502,12 +1503,13 @@ pub trait TimeoutExchangeType: ConsensusExchange .as_ref(), ); - // GeneralConsensusMessage::::TODO { - // signature: (self.public_key.to_bytes(), signature), - // current_view: view, - // vote_data: VoteData::Timeout(view.commit()), - // }) - todo!() + GeneralConsensusMessage::::TimeoutVote(TimeoutVote2 { + signature: (self.public_key().to_bytes(), signature), + current_view: view, + vote_token + }) + + } } @@ -1577,14 +1579,14 @@ impl< fn accumulate_vote( &self, - encoded_key: &EncodedPublicKey, - encoded_signature: &EncodedSignature, - leaf_commitment: Commitment, - vote_data: VoteData, - vote_token: ::VoteTokenType, - view_number: ::Time, - accumlator: VoteAccumulator<::VoteTokenType, Self::Commitment, TYPES>, - relay: Option, + _encoded_key: &EncodedPublicKey, + _encoded_signature: &EncodedSignature, + _leaf_commitment: Commitment, + _vote_data: VoteData, + _vote_token: ::VoteTokenType, + _view_number: ::Time, + _accumlator: VoteAccumulator<::VoteTokenType, Self::Commitment, TYPES>, + _relay: Option, ) -> Either< VoteAccumulator<::VoteTokenType, Self::Commitment, TYPES>, Self::Certificate, From f5d2bc4657a36d53cfe725c7c859dd43c0807d4f Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 18 Sep 2023 18:24:39 -0400 Subject: [PATCH 0103/1393] Consensus task properly recieves TimeoutVotes --- task-impls/src/consensus.rs | 1 + task-impls/src/network.rs | 2 +- types/src/vote.rs | 6 +++--- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 36dced718b..04c28681eb 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1323,6 +1323,7 @@ pub fn consensus_event_filter>( | SequencingHotShotEvent::ViewChange(_) | SequencingHotShotEvent::SendDABlockData(_) | SequencingHotShotEvent::Timeout(_) + | SequencingHotShotEvent::TimeoutVoteRecv(_) | SequencingHotShotEvent::Shutdown, ) } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 2070eda63f..bfba4a413d 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -292,7 +292,7 @@ impl< GeneralConsensusMessage::TimeoutVote(vote.clone()), ))), TransmitType::Direct, - Some(membership.get_leader(vote.get_view())), + Some(membership.get_leader(vote.get_view() + 1)), ) } SequencingHotShotEvent::ViewChange(view) => { diff --git a/types/src/vote.rs b/types/src/vote.rs index e92d6175d0..6513044394 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -109,15 +109,15 @@ pub struct TimeoutVote2 { impl VoteType for TimeoutVote2 { fn get_view(&self) -> ::Time { - todo!() + self.current_view } fn get_key(&self) -> ::SignatureKey { - todo!() + ::from_bytes(&self.signature.0).unwrap() } fn get_signature(&self) -> EncodedSignature { - todo!() + self.signature.1.clone() } fn get_data(&self) -> VoteData { From dfdbb56ab4591c8658ee6699a64b7b2a91a345ed Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 18 Sep 2023 17:09:39 -0700 Subject: [PATCH 0104/1393] Add Keccak hash --- hotshot/Cargo.toml | 1 + hotshot/src/block_impl.rs | 17 +++++++---------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 1f56a6be66..8f395476b9 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -104,6 +104,7 @@ nll = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true, features = ["rc"] } +sha3 = "^0.10" snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } diff --git a/hotshot/src/block_impl.rs b/hotshot/src/block_impl.rs index 25ae971406..9991c23a33 100644 --- a/hotshot/src/block_impl.rs +++ b/hotshot/src/block_impl.rs @@ -7,17 +7,20 @@ use std::{ use commit::{Commitment, Committable}; use hotshot_types::traits::{block_contents::Transaction, state::TestableBlock, BlockPayload}; use serde::{Deserialize, Serialize}; +use sha3::{Digest, Keccak256}; use snafu::Snafu; /// The transaction in a [`VIDBlockPayload`]. -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +#[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct VIDTransaction(pub Vec); impl Committable for VIDTransaction { fn commit(&self) -> Commitment { - // TODO: Use use VID block commitment. - // - commit::RawCommitmentBuilder::new("Txn Comm").finalize() + let builder = commit::RawCommitmentBuilder::new("Txn Comm"); + let mut hasher = Keccak256::new(); + hasher.update(self.0.clone()); + let generic_array = hasher.finalize(); + builder.generic_byte_array(&generic_array).finalize() } fn tag() -> String { @@ -35,12 +38,6 @@ impl VIDTransaction { } } -impl Default for VIDTransaction { - fn default() -> Self { - Self::new() - } -} - /// The error type for block payload. #[derive(Snafu, Debug)] pub enum BlockPayloadError { From 9125306890f159b90e0cf5ba67e53278353dd4e7 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 18 Sep 2023 17:18:07 -0700 Subject: [PATCH 0105/1393] Replace clone with borrow --- hotshot/src/block_impl.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/src/block_impl.rs b/hotshot/src/block_impl.rs index 9991c23a33..8960d79158 100644 --- a/hotshot/src/block_impl.rs +++ b/hotshot/src/block_impl.rs @@ -18,7 +18,7 @@ impl Committable for VIDTransaction { fn commit(&self) -> Commitment { let builder = commit::RawCommitmentBuilder::new("Txn Comm"); let mut hasher = Keccak256::new(); - hasher.update(self.0.clone()); + hasher.update(&self.0); let generic_array = hasher.finalize(); builder.generic_byte_array(&generic_array).finalize() } From 65a3ccf74c24f00c8352fb547ce66b61395e8165 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 18 Sep 2023 21:25:02 -0400 Subject: [PATCH 0106/1393] Add timeout exchange and accumulator to consensus task, compiles --- hotshot/src/lib.rs | 12 ++- hotshot/src/tasks/mod.rs | 9 ++ task-impls/src/consensus.rs | 189 +++++++++++++++++++++++++++++++++++- types/src/certificate.rs | 4 +- types/src/vote.rs | 1 + 5 files changed, 209 insertions(+), 6 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e2582b85b8..3617952591 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -55,6 +55,8 @@ use hotshot_task::{ task_launcher::TaskRunner, }; use hotshot_task_impls::{events::SequencingHotShotEvent, network::NetworkTaskKind}; +use hotshot_types::certificate::TimeoutCertificate; +use hotshot_types::traits::node_implementation::SequencingTimeoutEx; use hotshot_types::{ certificate::{DACertificate, ViewSyncCertificate}, @@ -182,7 +184,6 @@ impl> SystemContext { initializer: HotShotInitializer, metrics: Box, ) -> Result> { - debug!("Creating a new hotshot"); let consensus_metrics = Arc::new(ConsensusMetrics::new( @@ -684,6 +685,14 @@ where Commitment = ViewSyncData, Membership = MEMBERSHIP, > + 'static, + SequencingTimeoutEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = TimeoutCertificate, + Commitment = TYPES::Time, + Membership = MEMBERSHIP, + > + 'static, { fn transactions( &self, @@ -706,6 +715,7 @@ where let quorum_exchange = self.inner.exchanges.quorum_exchange().clone(); let committee_exchange = self.inner.exchanges.committee_exchange().clone(); let view_sync_exchange = self.inner.exchanges.view_sync_exchange().clone(); + let timeout_exchange = self.inner.exchanges.timeout_exchange().clone(); let handle = SystemContextHandle { registry, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 87c776563a..4dc66b5539 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -26,6 +26,8 @@ use hotshot_task_impls::{ transactions::{TransactionTaskState, TransactionsTaskTypes}, view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; +use hotshot_types::certificate::TimeoutCertificate; +use hotshot_types::traits::node_implementation::SequencingTimeoutEx; use hotshot_types::{ certificate::ViewSyncCertificate, data::{ProposalType, QuorumProposal, SequencingLeaf}, @@ -272,6 +274,13 @@ where Certificate = DACertificate, Commitment = TYPES::BlockType, >, + SequencingTimeoutEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = TimeoutCertificate, + Commitment = TYPES::Time, + >, { let consensus = handle.hotshot.get_consensus(); let c_api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 04c28681eb..d5310e8156 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -15,8 +15,10 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::certificate::TimeoutCertificate; use hotshot_types::traits::election::TimeoutExchangeType; use hotshot_types::traits::node_implementation::SequencingTimeoutEx; +use hotshot_types::vote::DAVoteAccumulator; use hotshot_types::vote::QuorumVoteAccumulator; use hotshot_types::{ certificate::{DACertificate, QuorumCertificate}, @@ -75,6 +77,13 @@ pub struct SequencingConsensusTaskState< Certificate = DACertificate, Commitment = TYPES::BlockType, >, + SequencingTimeoutEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = TimeoutCertificate, + Commitment = TYPES::Time, + >, { /// The global task registry pub registry: GlobalRegistry, @@ -146,9 +155,19 @@ pub struct VoteCollectionTaskState< Certificate = QuorumCertificate>, Commitment = SequencingLeaf, >, + SequencingTimeoutEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = TimeoutCertificate, + Commitment = TYPES::Time, + >, { /// the quorum exchange pub quorum_exchange: Arc>, + + pub timeout_exchange: Arc>, + #[allow(clippy::type_complexity)] /// Accumulator for votes pub accumulator: Either< @@ -160,6 +179,17 @@ pub struct VoteCollectionTaskState< >>::VoteAccumulator, QuorumCertificate>, >, + + /// Accumulator for votes + pub timeout_accumulator: Either< + as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + TYPES::Time, + >>::VoteAccumulator, + TimeoutCertificate, + >, /// View which this vote collection task is collecting votes in pub cur_view: TYPES::Time, /// The event stream shared by all tasks @@ -178,6 +208,13 @@ where Certificate = QuorumCertificate>, Commitment = SequencingLeaf, >, + SequencingTimeoutEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = TimeoutCertificate, + Commitment = TYPES::Time, + >, { } @@ -198,6 +235,13 @@ where Certificate = QuorumCertificate>, Commitment = SequencingLeaf, >, + SequencingTimeoutEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = TimeoutCertificate, + Commitment = TYPES::Time, + >, { match event { SequencingHotShotEvent::QuorumVoteRecv(vote) => match vote.clone() { @@ -247,6 +291,7 @@ where } } } + QuorumVote::Timeout(_vote) => { error!("The next leader has received an unexpected vote!"); return (None, state); @@ -255,6 +300,9 @@ where error!("The next leader has received an unexpected vote!"); } }, + SequencingHotShotEvent::TimeoutVoteRecv(vote) => { + panic!() + } SequencingHotShotEvent::Shutdown => { return (Some(HotShotTaskCompleted::ShutDown), state); } @@ -286,6 +334,13 @@ where Certificate = DACertificate, Commitment = TYPES::BlockType, >, + SequencingTimeoutEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = TimeoutCertificate, + Commitment = TYPES::Time, + >, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus genesis leaf", level = "error")] @@ -883,6 +938,8 @@ where return; } + // TODO ED Insert TimeoutVote accumulator stuff here + match vote.clone() { QuorumVote::Yes(vote_internal) => { let handle_event = HandleEvent(Arc::new(move |event, state| { @@ -920,10 +977,23 @@ where &vote_internal.clone().leaf_commitment, ); + let timeout_accumulator = DAVoteAccumulator { + da_vote_outcomes: HashMap::new(), + + // TODO ED Don't use quorum exchange here + success_threshold: self.quorum_exchange.success_threshold(), + + sig_lists: Vec::new(), + signers: bitvec![0; self.quorum_exchange.total_nodes()], + phantom: PhantomData, + }; + if vote_internal.current_view > collection_view { let state = VoteCollectionTaskState { quorum_exchange: self.quorum_exchange.clone(), + timeout_exchange: self.timeout_exchange.clone(), accumulator, + timeout_accumulator: either::Left(timeout_accumulator), cur_view: vote_internal.current_view, event_stream: self.event_stream.clone(), id: self.id, @@ -969,6 +1039,108 @@ where } } } + SequencingHotShotEvent::TimeoutVoteRecv(vote) => { + // debug!("Received quroum vote: {:?}", vote.get_view()); + + if !self.timeout_exchange.is_leader(vote.get_view() + 1) { + error!( + "We are not the leader for view {} are we the leader for view + 1? {}", + *vote.get_view() + 1, + self.timeout_exchange.is_leader(vote.get_view() + 2) + ); + return; + } + + // // TODO ED Insert TimeoutVote accumulator stuff here + + // match vote.clone() { + // QuorumVote::Yes(vote_internal)=> { + let handle_event = HandleEvent(Arc::new(move |event, state| { + async move { vote_handle(state, event).await }.boxed() + })); + let collection_view = + if let Some((collection_view, collection_task, _)) = &self.vote_collector { + if vote.get_view() > *collection_view { + // ED I think we'd want to let that task timeout to avoid a griefing vector + self.registry.shutdown_task(*collection_task).await; + } + *collection_view + } else { + TYPES::Time::new(0) + }; + + // // Todo check if we are the leader + // TODO ED Make this a default accum + let new_accumulator = DAVoteAccumulator { + da_vote_outcomes: HashMap::new(), + + // TODO ED Don't use quorum exchange here + success_threshold: self.quorum_exchange.success_threshold(), + + sig_lists: Vec::new(), + signers: bitvec![0; self.quorum_exchange.total_nodes()], + phantom: PhantomData, + }; + + let timeout_accumulator = self.timeout_exchange.accumulate_vote_2( + new_accumulator, + &vote, + &vote.get_view().commit(), + ); + + let quorum_accumulator = QuorumVoteAccumulator { + total_vote_outcomes: HashMap::new(), + yes_vote_outcomes: HashMap::new(), + no_vote_outcomes: HashMap::new(), + + success_threshold: self.quorum_exchange.success_threshold(), + failure_threshold: self.quorum_exchange.failure_threshold(), + + sig_lists: Vec::new(), + signers: bitvec![0; self.quorum_exchange.total_nodes()], + phantom: PhantomData, + }; + + // self.timeout_accumulator = accumulator; + + if vote.get_view() > collection_view { + let state = VoteCollectionTaskState { + quorum_exchange: self.quorum_exchange.clone(), + timeout_exchange: self.timeout_exchange.clone(), + accumulator: either::Left(quorum_accumulator), + timeout_accumulator, + cur_view: vote.get_view(), + event_stream: self.event_stream.clone(), + id: self.id, + }; + let name = "Quorum Vote Collection"; + let filter = FilterEvent(Arc::new(|event| { + matches!(event, SequencingHotShotEvent::QuorumVoteRecv(_)) + })); + + let builder = + TaskBuilder::>::new(name.to_string()) + .register_event_stream(self.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(state) + .register_event_handler(handle_event); + let id = builder.get_task_id().unwrap(); + let stream_id = builder.get_stream_id().unwrap(); + + self.vote_collector = Some((vote.get_view(), id, stream_id)); + + let _task = async_spawn(async move { + VoteCollectionTypes::build(builder).launch().await; + }); + debug!("Starting vote handle for view {:?}", vote.get_view()); + } else if let Some((_, _, stream_id)) = self.vote_collector { + self.event_stream + .direct_message(stream_id, SequencingHotShotEvent::TimeoutVoteRecv(vote)) + .await; + } + } SequencingHotShotEvent::QCFormed(qc) => { debug!("QC Formed event happened!"); @@ -1069,9 +1241,6 @@ where ); } } - SequencingHotShotEvent::TimeoutVoteRecv(vote) => { - panic!() - } SequencingHotShotEvent::Timeout(view) => { let vote_token = self.timeout_exchange.make_vote_token(view); @@ -1251,6 +1420,13 @@ where Certificate = DACertificate, Commitment = TYPES::BlockType, >, + SequencingTimeoutEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = TimeoutCertificate, + Commitment = TYPES::Time, + >, { } @@ -1300,6 +1476,13 @@ where Certificate = DACertificate, Commitment = TYPES::BlockType, >, + SequencingTimeoutEx: ConsensusExchange< + TYPES, + Message, + Proposal = QuorumProposal>, + Certificate = TimeoutCertificate, + Commitment = TYPES::Time, + >, { if let SequencingHotShotEvent::Shutdown = event { (Some(HotShotTaskCompleted::ShutDown), state) diff --git a/types/src/certificate.rs b/types/src/certificate.rs index b86e96cce8..3457f8523d 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -90,8 +90,8 @@ impl SignedCertificate; - type VoteAccumulator = AccumulatorPlaceholder; - + type VoteAccumulator = DAVoteAccumulator; + fn from_signatures_and_commitment( signatures: AssembledSignature, vote: Self::Vote, diff --git a/types/src/vote.rs b/types/src/vote.rs index 6513044394..e593f6aab7 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -593,6 +593,7 @@ impl< /// Accumulates view sync votes pub struct ViewSyncVoteAccumulator< TYPES: NodeType, + // TODO ED : Doesn't need to be generic over vote or committable, we can infer that from the accumulator type (unless it is a generic type) COMMITTABLE: Committable + Serialize + Clone, VOTE: VoteType, > { From 3cd1cf46c6687d84b2861c7eb2295b6dfc04a292 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 19 Sep 2023 08:55:55 -0400 Subject: [PATCH 0107/1393] Comments --- types/src/traits/election.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 7448423c80..ac449c2b8a 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -300,6 +300,7 @@ pub trait ConsensusExchange: Send + Sync { /// Network used by [`Membership`](Self::Membership) to communicate. type Networking: CommunicationChannel; /// Commitments to items which are the subject of proposals and decisions. + // TODO ED The above isn't true for all proposals (Timeout, ViewSync) type Commitment: Committable + Serialize + Clone; /// Join a [`ConsensusExchange`] with the given identity (`pk` and `sk`). @@ -786,6 +787,7 @@ pub trait QuorumExchangeType, ConsensusExchange { /// Create a message with a positive vote on validating or commitment proposal. + // TODO ED This returns just a general message type, it's not even bound to a proposal, and this is just a function on the QC. Make proprosal doesn't really apply to all cert types. fn create_yes_message>( &self, justify_qc_commitment: Commitment, From 3fb6c688ca7f451cf08ebe3d50a17479beea8d25 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 19 Sep 2023 09:38:55 -0400 Subject: [PATCH 0108/1393] Vote collection is receiving timeout votes correctly --- task-impls/src/consensus.rs | 15 +++-- task-impls/src/network.rs | 1 + types/src/certificate.rs | 5 +- types/src/data.rs | 4 ++ types/src/traits/election.rs | 8 +++ types/src/vote.rs | 106 ++++++++++++++++++++++++++++++++++- 6 files changed, 130 insertions(+), 9 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index d5310e8156..0572db796d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -20,6 +20,7 @@ use hotshot_types::traits::election::TimeoutExchangeType; use hotshot_types::traits::node_implementation::SequencingTimeoutEx; use hotshot_types::vote::DAVoteAccumulator; use hotshot_types::vote::QuorumVoteAccumulator; +use hotshot_types::vote::TimeoutVoteAccumulator; use hotshot_types::{ certificate::{DACertificate, QuorumCertificate}, consensus::{Consensus, View}, @@ -306,7 +307,7 @@ where SequencingHotShotEvent::Shutdown => { return (Some(HotShotTaskCompleted::ShutDown), state); } - _ => {} + _ => {error!("Unexpected event")} } (None, state) } @@ -977,7 +978,7 @@ where &vote_internal.clone().leaf_commitment, ); - let timeout_accumulator = DAVoteAccumulator { + let timeout_accumulator = TimeoutVoteAccumulator { da_vote_outcomes: HashMap::new(), // TODO ED Don't use quorum exchange here @@ -1000,7 +1001,7 @@ where }; let name = "Quorum Vote Collection"; let filter = FilterEvent(Arc::new(|event| { - matches!(event, SequencingHotShotEvent::QuorumVoteRecv(_)) + matches!(event, SequencingHotShotEvent::QuorumVoteRecv(_) | SequencingHotShotEvent::TimeoutVoteRecv(_)) })); let builder = @@ -1041,6 +1042,7 @@ where } SequencingHotShotEvent::TimeoutVoteRecv(vote) => { // debug!("Received quroum vote: {:?}", vote.get_view()); + if !self.timeout_exchange.is_leader(vote.get_view() + 1) { error!( @@ -1051,6 +1053,8 @@ where return; } + + // // TODO ED Insert TimeoutVote accumulator stuff here // match vote.clone() { @@ -1071,7 +1075,7 @@ where // // Todo check if we are the leader // TODO ED Make this a default accum - let new_accumulator = DAVoteAccumulator { + let new_accumulator = TimeoutVoteAccumulator { da_vote_outcomes: HashMap::new(), // TODO ED Don't use quorum exchange here @@ -1115,7 +1119,7 @@ where }; let name = "Quorum Vote Collection"; let filter = FilterEvent(Arc::new(|event| { - matches!(event, SequencingHotShotEvent::QuorumVoteRecv(_)) + matches!(event, SequencingHotShotEvent::QuorumVoteRecv(_) | SequencingHotShotEvent::TimeoutVoteRecv(_)) })); let builder = @@ -1257,6 +1261,7 @@ where .timeout_exchange .create_timeout_message::(view, vote_token); + // error!("Sending timeout vote for view {}", *view); if let GeneralConsensusMessage::TimeoutVote(vote) = message { self.event_stream .publish(SequencingHotShotEvent::TimeoutVoteSend(vote)) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index bfba4a413d..c7df58d90a 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -90,6 +90,7 @@ impl< SequencingHotShotEvent::ViewSyncCertificateRecv(view_sync_message) } GeneralConsensusMessage::TimeoutVote(message) => { + // error!("Recv timeout vote in network task for view {:?}", message.get_view()); SequencingHotShotEvent::TimeoutVoteRecv(message) } GeneralConsensusMessage::InternalTrigger(_) => { diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 3457f8523d..c4e180bb48 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -5,6 +5,7 @@ use crate::vote::DAVoteAccumulator; use crate::vote::QuorumVote; use crate::vote::QuorumVoteAccumulator; use crate::vote::TimeoutVote2; +use crate::vote::TimeoutVoteAccumulator; use crate::vote::ViewSyncVoteAccumulator; use crate::vote::VoteType; use crate::{ @@ -90,7 +91,7 @@ impl SignedCertificate; - type VoteAccumulator = DAVoteAccumulator; + type VoteAccumulator = TimeoutVoteAccumulator; fn from_signatures_and_commitment( signatures: AssembledSignature, @@ -168,6 +169,8 @@ pub enum AssembledSignature { No(::QCType), /// These signatures are for a 'DA' certificate DA(::QCType), + + Timeout(::QCType), /// These signatures are for genesis certificate Genesis(), /// These signatures are for ViewSyncPreCommit diff --git a/types/src/data.rs b/types/src/data.rs index 8275c8d9b8..e749c39251 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -885,6 +885,10 @@ pub fn serialize_signature(signature: &AssembledSignature { + signatures_bytes.extend("Timeout".as_bytes()); + Some(signatures.clone()) + } AssembledSignature::ViewSyncPreCommit(signatures) => { signatures_bytes.extend("ViewSyncPreCommit".as_bytes()); Some(signatures.clone()) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index ac449c2b8a..b4196ce666 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -393,6 +393,14 @@ pub trait ConsensusExchange: Send + Sync { ); ::check(&real_qc_pp, real_commit.as_ref(), &qc) } + AssembledSignature::Timeout(qc) => { + let real_commit = VoteData::Timeout(leaf_commitment).commit(); + let real_qc_pp = ::get_public_parameter( + self.membership().get_committee_qc_stake_table(), + U256::from(self.membership().success_threshold().get()), + ); + ::check(&real_qc_pp, real_commit.as_ref(), &qc) + } AssembledSignature::Genesis() => true, AssembledSignature::ViewSyncPreCommit(_) | AssembledSignature::ViewSyncCommit(_) diff --git a/types/src/vote.rs b/types/src/vote.rs index e593f6aab7..e19464e141 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -25,7 +25,7 @@ use std::{ marker::PhantomData, num::NonZeroU64, }; -use tracing::error; +use tracing::{error, warn}; /// The vote sent by consensus messages. pub trait VoteType: @@ -121,11 +121,11 @@ impl VoteType for TimeoutVote2 { } fn get_data(&self) -> VoteData { - todo!() + VoteData::Timeout(self.get_view().commit()) } fn get_vote_token(&self) -> ::VoteTokenType { - todo!() + self.vote_token.clone() } } @@ -368,6 +368,98 @@ pub trait Accumulator2< ) -> Either>; } +// TODO ED Make a default accumulator +pub struct TimeoutVoteAccumulator< + TYPES: NodeType, + COMMITTABLE: Committable + Serialize + Clone, + VOTE: VoteType, +> { + /// Map of all da signatures accumlated so far + pub da_vote_outcomes: VoteMap, + /// A quorum's worth of stake, generally 2f + 1 + pub success_threshold: NonZeroU64, + /// A list of valid signatures for certificate aggregation + pub sig_lists: Vec<::PureAssembledSignatureType>, + /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check + pub signers: BitVec, + /// Phantom data to specify the vote this accumulator is for + pub phantom: PhantomData, +} + +impl< + TYPES: NodeType, + COMMITTABLE: Committable + Serialize + Clone, + VOTE: VoteType, + > Accumulator2 for TimeoutVoteAccumulator +{ + fn append( + mut self, + vote: VOTE, + vote_node_id: usize, + stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either> { + + let VoteData::Timeout(vote_commitment) = vote.get_data() else { + return Either::Left(self); + }; + + let encoded_key = vote.get_key().to_bytes(); + + // Deserialize the signature so that it can be assembeld into a QC + // TODO ED Update this once we've gotten rid of EncodedSignature + let original_signature: ::PureAssembledSignatureType = + bincode_opts() + .deserialize(&vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); + + let (da_stake_casted, da_vote_map) = self + .da_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + // Check for duplicate vote + // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey + // Have to do this because SignatureKey is not hashable + if da_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + + if self.signers.get(vote_node_id).as_deref() == Some(&true) { + error!("Node id is already in signers list"); + return Either::Left(self); + } + self.signers.set(vote_node_id, true); + self.sig_lists.push(original_signature); + + // Already checked that vote data was for a DA vote above + *da_stake_casted += u64::from(vote.get_vote_token().vote_count()); + da_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + + if *da_stake_casted >= u64::from(self.success_threshold) { + // Assemble QC + let real_qc_pp = ::get_public_parameter( + // TODO ED Something about stake table entries. Might be easier to just pass in membership? + stake_table_entries.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + + self.da_vote_outcomes.remove(&vote_commitment); + + return Either::Right(AssembledSignature::Timeout(real_qc_sig)); + } + Either::Left(self) + } +} + /// Accumulates DA votes pub struct DAVoteAccumulator< TYPES: NodeType, @@ -398,6 +490,14 @@ impl< vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>, ) -> Either> { + + match vote.get_data() { + VoteData::DA(_) => warn!("DA vote data"), + VoteData::Timeout(_) => panic!(), + _ => error!("Wrong vote data") + + } + let VoteData::DA(vote_commitment) = vote.get_data() else { return Either::Left(self); }; From 6d5047fbafa91319c7341dadfea6d14c1628970c Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 19 Sep 2023 09:58:13 -0400 Subject: [PATCH 0109/1393] Fix libp2p examples (#1735) * fix libp2p examples * fmt * update libp2p run config * libp2p to 10 rounds * fix viewsync by changing DA keys * remove signaturekey comments * update flake * update flake lock * merge main into examples --- hotshot/Cargo.toml | 25 +- hotshot/examples/infra/mod.rs | 1 - hotshot/examples/infra/modDA.rs | 278 ++++++++++++++++++-- hotshot/examples/libp2p/multi-validator.rs | 69 +++++ hotshot/examples/libp2p/orchestrator.rs | 24 +- hotshot/examples/libp2p/types.rs | 83 +++--- hotshot/examples/libp2p/validator.rs | 30 ++- orchestrator/default-libp2p-run-config.toml | 9 +- orchestrator/src/lib.rs | 1 - 9 files changed, 442 insertions(+), 78 deletions(-) create mode 100644 hotshot/examples/libp2p/multi-validator.rs diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 1f56a6be66..bb587595d2 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -25,16 +25,21 @@ docs = [] doc-images = [] hotshot-testing = [] -# [[example]] -# name = "libp2p-validator" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/libp2p/validator.rs" -# -# [[example]] -# name = "libp2p-orchestrator" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/libp2p/orchestrator.rs" -# +[[example]] +name = "libp2p-validator" +required-features = ["demo", "libp2p/rsa"] +path = "examples/libp2p/validator.rs" + +[[example]] +name = "libp2p-multi-validator" +required-features = ["demo", "libp2p/rsa"] +path = "examples/libp2p/multi-validator.rs" + +[[example]] +name = "libp2p-orchestrator" +required-features = ["demo", "libp2p/rsa"] +path = "examples/libp2p/orchestrator.rs" + # [[example]] # name = "web-server-orchestrator" # required-features = ["demo", "libp2p/rsa"] diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 1ffbb04a7f..06d7d9deb3 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -69,7 +69,6 @@ pub fn load_config_from_file( config } -/// yeesh maybe we should just implement SignatureKey for this... pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { let mut hasher = blake3::Hasher::new(); hasher.update(&seed); diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 10c6d85cea..ae0f0e40f5 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -1,11 +1,14 @@ use crate::infra::{load_config_from_file, OrchestratorArgs}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use async_lock::RwLock; use async_trait::async_trait; use futures::StreamExt; use hotshot::{ traits::{ - implementations::{MemoryStorage, WebCommChannel, WebServerNetwork}, + implementations::{ + Libp2pCommChannel, Libp2pNetwork, MemoryStorage, WebCommChannel, WebServerNetwork, + }, NodeImplementation, }, types::{SignatureKey, SystemContextHandle}, @@ -17,6 +20,7 @@ use hotshot_orchestrator::{ config::{NetworkConfig, WebServerConfig}, }; use hotshot_task::task::FilterEvent; +use hotshot_types::HotShotConfig; use hotshot_types::{ certificate::ViewSyncCertificate, data::{QuorumProposal, SequencingLeaf, TestableLeaf}, @@ -33,8 +37,17 @@ use hotshot_types::{ }, state::{ConsensusTime, TestableBlock, TestableState}, }, - HotShotConfig, }; +use libp2p_identity::{ + ed25519::{self, SecretKey}, + Keypair, +}; +use libp2p_networking::{ + network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}, + reexport::Multiaddr, +}; +use std::{collections::BTreeSet, sync::Arc}; +use std::{num::NonZeroUsize, str::FromStr}; // use libp2p::{ // identity::{ // ed25519::{Keypair as EdKeypair, SecretKey}, @@ -43,7 +56,7 @@ use hotshot_types::{ // multiaddr::{self, Protocol}, // Multiaddr, // }; -// use libp2p_identity::PeerId; +use libp2p_identity::PeerId; // use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}; use std::{ //collections::{BTreeSet, VecDeque}, @@ -376,15 +389,6 @@ pub trait RunDA< // WEB SERVER -/// Alias for the [`WebCommChannel`] for sequencing consensus. -type StaticDAComm = WebCommChannel; - -/// Alias for the ['WebCommChannel'] for validating consensus -type StaticQuorumComm = WebCommChannel; - -/// Alias for the ['WebCommChannel'] for view sync consensus -type StaticViewSyncComm = WebCommChannel; - /// Represents a web server-based run pub struct WebServerDARun< TYPES: NodeType, @@ -396,9 +400,9 @@ pub struct WebServerDARun< ::StakeTableEntry, TYPES::ElectionConfigType, >, - quorum_network: StaticQuorumComm, - da_network: StaticDAComm, - view_sync_network: StaticViewSyncComm, + quorum_network: WebCommChannel, + da_network: WebCommChannel, + view_sync_network: WebCommChannel, } #[async_trait] @@ -440,9 +444,9 @@ impl< RunDA< TYPES, MEMBERSHIP, - StaticDAComm, - StaticQuorumComm, - StaticViewSyncComm, + WebCommChannel, + WebCommChannel, + WebCommChannel, NODE, > for WebServerDARun where @@ -530,6 +534,235 @@ where } } +// Libp2p + +/// Represents a libp2p-based run +pub struct Libp2pDARun, MEMBERSHIP: Membership> +{ + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + quorum_network: Libp2pCommChannel, + da_network: Libp2pCommChannel, + view_sync_network: Libp2pCommChannel, +} + +#[async_trait] +impl< + TYPES: NodeType, + MEMBERSHIP: Membership + Debug, + NODE: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + Exchanges = SequencingExchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + SequencingLeaf, + QuorumProposal>, + MEMBERSHIP, + Libp2pCommChannel, + Message, + >, + CommitteeExchange< + TYPES, + MEMBERSHIP, + Libp2pCommChannel, + Message, + >, + ViewSyncExchange< + TYPES, + ViewSyncCertificate, + MEMBERSHIP, + Libp2pCommChannel, + Message, + >, + >, + Storage = MemoryStorage>, + ConsensusMessage = SequencingMessage, + >, + > + RunDA< + TYPES, + MEMBERSHIP, + Libp2pCommChannel, + Libp2pCommChannel, + Libp2pCommChannel, + NODE, + > for Libp2pDARun +where + ::StateType: TestableState, + ::BlockType: TestableBlock, + SequencingLeaf: TestableLeaf, + Self: Sync, +{ + async fn initialize_networking( + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + ) -> Libp2pDARun { + let (pubkey, _privkey) = + <::SignatureKey as SignatureKey>::generated_from_seed_indexed( + config.seed, + config.node_index, + ); + let mut config = config; + let libp2p_config = config + .libp2p_config + .take() + .expect("Configuration is not for a Libp2p network"); + let bs_len = libp2p_config.bootstrap_nodes.len(); + let bootstrap_nodes: Vec<(PeerId, Multiaddr)> = libp2p_config + .bootstrap_nodes + .iter() + .map(|(addr, pair)| { + let kp = Keypair::from_protobuf_encoding(pair).unwrap(); + let peer_id = PeerId::from_public_key(&kp.public()); + let multiaddr = + Multiaddr::from_str(&format!("/ip4/{}/udp/{}/quic-v1", addr.ip(), addr.port())) + .unwrap(); + (peer_id, multiaddr) + }) + .collect(); + let identity = libp2p_generate_indexed_identity(config.seed, config.node_index); + let node_type = if (config.node_index as usize) < bs_len { + NetworkNodeType::Bootstrap + } else { + NetworkNodeType::Regular + }; + let node_index = config.node_index; + let port_index = match libp2p_config.index_ports { + true => node_index, + false => 0, + }; + let bound_addr: Multiaddr = format!( + "/{}/{}/udp/{}/quic-v1", + if libp2p_config.public_ip.is_ipv4() { + "ip4" + } else { + "ip6" + }, + libp2p_config.public_ip, + libp2p_config.base_port as u64 + port_index + ) + .parse() + .unwrap(); + + // generate network + let mut config_builder = NetworkNodeConfigBuilder::default(); + assert!(config.config.total_nodes.get() > 2); + let replicated_nodes = NonZeroUsize::new(config.config.total_nodes.get() - 2).unwrap(); + config_builder.replication_factor(replicated_nodes); + config_builder.identity(identity.clone()); + + config_builder.bound_addr(Some(bound_addr.clone())); + + let to_connect_addrs = bootstrap_nodes + .iter() + .map(|(peer_id, multiaddr)| (Some(*peer_id), multiaddr.clone())) + .collect(); + + config_builder.to_connect_addrs(to_connect_addrs); + + let mesh_params = + // NOTE I'm arbitrarily choosing these. + match node_type { + NetworkNodeType::Bootstrap => MeshParams { + mesh_n_high: libp2p_config.bootstrap_mesh_n_high, + mesh_n_low: libp2p_config.bootstrap_mesh_n_low, + mesh_outbound_min: libp2p_config.bootstrap_mesh_outbound_min, + mesh_n: libp2p_config.bootstrap_mesh_n, + }, + NetworkNodeType::Regular => MeshParams { + mesh_n_high: libp2p_config.mesh_n_high, + mesh_n_low: libp2p_config.mesh_n_low, + mesh_outbound_min: libp2p_config.mesh_outbound_min, + mesh_n: libp2p_config.mesh_n, + }, + NetworkNodeType::Conductor => unreachable!(), + }; + config_builder.mesh_params(Some(mesh_params)); + + let mut all_keys = BTreeSet::new(); + let mut da_keys = BTreeSet::new(); + for i in 0..config.config.total_nodes.get() as u64 { + let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; + let pubkey = TYPES::SignatureKey::from_private(&privkey); + if i < config.config.da_committee_size as u64 { + da_keys.insert(pubkey.clone()); + } + all_keys.insert(pubkey); + } + + let node_config = config_builder.build().unwrap(); + let underlying_quorum_network = Libp2pNetwork::new( + NoMetrics::boxed(), + node_config, + pubkey.clone(), + Arc::new(RwLock::new( + bootstrap_nodes + .iter() + .map(|(peer_id, addr)| (Some(*peer_id), addr.clone())) + .collect(), + )), + bs_len, + config.node_index as usize, + // NOTE: this introduces an invariant that the keys are assigned using this indexed + // function + all_keys, + da_keys, + ) + .await + .unwrap(); + + underlying_quorum_network.wait_for_ready().await; + + // Create the network + let quorum_network: Libp2pCommChannel = + Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + + let view_sync_network: Libp2pCommChannel = + Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + + let da_network: Libp2pCommChannel = + Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + + Libp2pDARun { + config, + quorum_network, + da_network, + view_sync_network, + } + } + + fn get_da_network(&self) -> Libp2pCommChannel { + self.da_network.clone() + } + + fn get_quorum_network(&self) -> Libp2pCommChannel { + self.quorum_network.clone() + } + + fn get_view_sync_network(&self) -> Libp2pCommChannel { + self.view_sync_network.clone() + } + + fn get_config( + &self, + ) -> NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + > { + self.config.clone() + } +} + /// Main entry point for validators pub async fn main_entry_point< TYPES: NodeType, @@ -613,3 +846,12 @@ pub async fn main_entry_point< info!("All nodes are ready! Starting HotShot"); run.run_hotshot(hotshot).await; } + +pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { + let mut hasher = blake3::Hasher::new(); + hasher.update(&seed); + hasher.update(&index.to_le_bytes()); + let new_seed = *hasher.finalize().as_bytes(); + let sk_bytes = SecretKey::try_from_bytes(new_seed).unwrap(); + >::from(sk_bytes).into() +} diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs new file mode 100644 index 0000000000..8a0f53c1c9 --- /dev/null +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -0,0 +1,69 @@ +use async_compatibility_layer::{ + art::async_spawn, + logging::{setup_backtrace, setup_logging}, +}; +use clap::Parser; +use hotshot::demos::sdemo::SDemoTypes; +use hotshot_orchestrator::client::ValidatorArgs; +use std::net::IpAddr; +use tracing::instrument; + +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; + +pub mod types; + +#[path = "../infra/mod.rs"] +pub mod infra; +#[path = "../infra/modDA.rs"] +pub mod infra_da; + +#[derive(Parser, Debug, Clone)] +struct MultiValidatorArgs { + /// Number of validators to run + pub num_nodes: u16, + /// The address the orchestrator runs on + pub host: IpAddr, + /// The port the orchestrator runs on + pub port: u16, + /// This node's public IP address, for libp2p + /// If no IP address is passed in, it will default to 127.0.0.1 + pub public_ip: Option, +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + let args = MultiValidatorArgs::parse(); + tracing::error!( + "connecting to orchestrator at {:?}:{:?}", + args.host, + args.port + ); + let mut nodes = Vec::new(); + for _ in 0..args.num_nodes { + let node = async_spawn(async move { + infra_da::main_entry_point::< + SDemoTypes, + ThisMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + NodeImpl, + ThisRun, + >(ValidatorArgs { + host: args.host.to_string(), + port: args.port, + public_ip: args.public_ip, + }) + .await + }); + nodes.push(node); + } + let _result = futures::future::join_all(nodes).await; +} diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs index 3bb08103d0..772d3aa12e 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -1,15 +1,21 @@ pub mod types; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demos::vdemo::VDemoTypes; +use hotshot::demos::sdemo::SDemoTypes; use tracing::instrument; use types::ThisMembership; -use crate::infra::{run_orchestrator, OrchestratorArgs}; -use crate::types::{NodeImpl, ThisNetwork}; +use crate::{ + infra::OrchestratorArgs, + infra_da::run_orchestrator_da, + types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}, +}; #[path = "../infra/mod.rs"] pub mod infra; +#[path = "../infra/modDA.rs"] +pub mod infra_da; #[cfg_attr( async_executor_impl = "tokio", @@ -18,7 +24,17 @@ pub mod infra; #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { + setup_logging(); + setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator::(args).await; + run_orchestrator_da::< + SDemoTypes, + ThisMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + NodeImpl, + >(args) + .await; } diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 8b3c70e5e1..5289c9d6fe 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -1,60 +1,75 @@ -use crate::infra::Libp2pRun; -use hotshot::traits::implementations::MemoryStorage; +use crate::infra_da::Libp2pDARun; use hotshot::{ - demos::vdemo::VDemoTypes, + demos::sdemo::SDemoTypes, traits::{ - election::static_committee::GeneralStaticCommittee, implementations::Libp2pCommChannel, + election::static_committee::GeneralStaticCommittee, + implementations::{Libp2pCommChannel, MemoryStorage}, }, }; -use hotshot_types::message::{Message, ValidatingMessage}; -use hotshot_types::traits::{ - election::QuorumExchange, - node_implementation::{ChannelMaps, NodeImplementation, ValidatingExchanges}, -}; use hotshot_types::{ - data::{ValidatingLeaf, ValidatingProposal}, - traits::node_implementation::NodeType, - vote::QuorumVote, + certificate::ViewSyncCertificate, + data::{DAProposal, QuorumProposal, SequencingLeaf}, + message::{Message, SequencingMessage}, + traits::{ + election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}, + node_implementation::{ChannelMaps, NodeImplementation, NodeType, SequencingExchanges}, + }, + vote::{DAVote, QuorumVote, ViewSyncVote}, }; use serde::{Deserialize, Serialize}; use std::fmt::Debug; -#[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] +#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type ThisLeaf = ValidatingLeaf; +pub type ThisLeaf = SequencingLeaf; pub type ThisMembership = - GeneralStaticCommittee::SignatureKey>; -pub type ThisNetwork = - Libp2pCommChannel; + GeneralStaticCommittee::SignatureKey>; +pub type DANetwork = Libp2pCommChannel; +pub type QuorumNetwork = Libp2pCommChannel; +pub type ViewSyncNetwork = Libp2pCommChannel; + +pub type ThisDAProposal = DAProposal; +pub type ThisDAVote = DAVote; -pub type ThisProposal = ValidatingProposal; -pub type ThisVote = QuorumVote; +pub type ThisQuorumProposal = QuorumProposal; +pub type ThisQuorumVote = QuorumVote; -impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; - type Leaf = ValidatingLeaf; - type Exchanges = ValidatingExchanges< - VDemoTypes, - Message, +pub type ThisViewSyncProposal = ViewSyncCertificate; +pub type ThisViewSyncVote = ViewSyncVote; + +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; + type Leaf = SequencingLeaf; + type Exchanges = SequencingExchanges< + SDemoTypes, + Message, QuorumExchange< - VDemoTypes, + SDemoTypes, Self::Leaf, - ThisProposal, + ThisQuorumProposal, + ThisMembership, + QuorumNetwork, + Message, + >, + CommitteeExchange>, + ViewSyncExchange< + SDemoTypes, + ThisViewSyncProposal, ThisMembership, - ThisNetwork, - Message, + ViewSyncNetwork, + Message, >, >; - type ConsensusMessage = ValidatingMessage; + type ConsensusMessage = SequencingMessage; fn new_channel_maps( - start_view: ::Time, + start_view: ::Time, ) -> ( - ChannelMaps, - Option>, + ChannelMaps, + Option>, ) { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = Libp2pRun; +pub type ThisRun = Libp2pDARun; diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index 59009c9d8f..ec2415fd65 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -1,15 +1,18 @@ -use crate::infra::main_entry_point; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demos::vdemo::VDemoTypes; -use hotshot_orchestrator::client::ValidatorArgs; -use tracing::instrument; +use hotshot::demos::sdemo::SDemoTypes; +use tracing::{info, instrument}; + +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; -use crate::types::{NodeImpl, ThisMembership, ThisNetwork, ThisRun}; +use hotshot_orchestrator::client::ValidatorArgs; pub mod types; #[path = "../infra/mod.rs"] pub mod infra; +#[path = "../infra/modDA.rs"] +pub mod infra_da; #[cfg_attr( async_executor_impl = "tokio", @@ -18,6 +21,21 @@ pub mod infra; #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { + setup_logging(); + setup_backtrace(); let args = ValidatorArgs::parse(); - main_entry_point::(args).await; + info!( + "connecting to orchestrator at {:?}:{:?}", + args.host, args.port + ); + infra_da::main_entry_point::< + SDemoTypes, + ThisMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + NodeImpl, + ThisRun, + >(args) + .await; } diff --git a/orchestrator/default-libp2p-run-config.toml b/orchestrator/default-libp2p-run-config.toml index 5d43c940fa..5757f4d9f9 100644 --- a/orchestrator/default-libp2p-run-config.toml +++ b/orchestrator/default-libp2p-run-config.toml @@ -1,4 +1,4 @@ -rounds = 100 +rounds = 10 transactions_per_round = 10 node_index = 0 seed = [ @@ -57,17 +57,18 @@ num_txn_per_round = 10 base_port = 9000 [config] -total_nodes = 5 +total_nodes = 10 +committee_nodes = 5 max_transactions = 100 min_transactions = 0 -next_view_timeout = 10000 +next_view_timeout = 30000 timeout_ratio = [ 11, 10, ] round_start_delay = 1 start_delay = 1 -num_bootstrap = 5 +num_bootstrap = 4 [config.propose_min_round_time] secs = 0 diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index a7e7ae0c28..93efa501e7 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -26,7 +26,6 @@ use libp2p::identity::{ Keypair, }; -/// yeesh maybe we should just implement SignatureKey for this... pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { let mut hasher = blake3::Hasher::new(); hasher.update(&seed); From 0fd30783dab579775c9bfa78c22471a052d4611f Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 19 Sep 2023 10:29:08 -0400 Subject: [PATCH 0110/1393] Bug - proposals after timeout don't have timeout cert and have too low of qc --- task-impls/src/consensus.rs | 171 ++++++++++++++++++++++++++---------- task-impls/src/events.rs | 5 +- types/src/certificate.rs | 16 ++-- 3 files changed, 139 insertions(+), 53 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 0572db796d..9cbae0ff6c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -275,7 +275,7 @@ where debug!("QCFormed! {:?}", qc.view_number); state .event_stream - .publish(SequencingHotShotEvent::QCFormed(qc.clone())) + .publish(SequencingHotShotEvent::QCFormed(either::Left(qc.clone()))) .await; state.accumulator = Either::Right(qc.clone()); @@ -302,12 +302,58 @@ where } }, SequencingHotShotEvent::TimeoutVoteRecv(vote) => { - panic!() + if state.timeout_accumulator.is_right() { + return (None, state); + } + + if vote.get_view() != state.cur_view { + error!( + "Vote view does not match! vote view is {} current view is {}", + *vote.get_view(), + *state.cur_view + ); + return (None, state); + } + + let accumulator = state.timeout_accumulator.left().unwrap(); + + match state.timeout_exchange.accumulate_vote_2( + accumulator, + &vote, + &vote.get_view().commit(), + ) { + Either::Left(acc) => { + state.timeout_accumulator = Either::Left(acc); + return (None, state); + } + Either::Right(qc) => { + debug!("QCFormed! {:?}", qc.view_number); + // TODO ED Make Timeout QC Formed Event + state + .event_stream + .publish(SequencingHotShotEvent::QCFormed(either::Right(qc.clone()))) + .await; + state.timeout_accumulator = Either::Right(qc.clone()); + + // No longer need to poll for votes + state + .quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *qc.view_number, + )) + .await; + + return (Some(HotShotTaskCompleted::ShutDown), state); + } + } } SequencingHotShotEvent::Shutdown => { return (Some(HotShotTaskCompleted::ShutDown), state); } - _ => {error!("Unexpected event")} + _ => { + error!("Unexpected event") + } } (None, state) } @@ -626,6 +672,7 @@ where return; } + // TODO ED How does this play in with the timeout cert? self.current_proposal = Some(proposal.data.clone()); let vote_token = self.quorum_exchange.make_vote_token(view); @@ -638,11 +685,20 @@ where debug!("We were not chosen for consensus committee on {:?}", view); } Ok(Some(vote_token)) => { + if proposal.data.justify_qc.view_number() != proposal.data.view_number - 1 { + // TODO ED Add timeout cert logic + if proposal.data.timeout_certificate.is_none() { + error!("Proposal needed a timeout cert but didn't have one {:?}", proposal.data.clone()); + return + } + // TODO ED Check timeout cert validity + } + debug!("We were chosen for consensus committee on {:?}", view); let consensus = self.consensus.upgradable_read().await; let message; - // TODO ED Insert TC logic here + // Construct the leaf. let justify_qc = proposal.data.justify_qc; @@ -904,7 +960,7 @@ where "Attempting to publish proposal after voting; now in view: {}", *new_view ); - self.publish_proposal_if_able(qc.clone(), qc.view_number + 1) + self.publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) .await; } if !self.vote_if_able().await { @@ -1001,7 +1057,11 @@ where }; let name = "Quorum Vote Collection"; let filter = FilterEvent(Arc::new(|event| { - matches!(event, SequencingHotShotEvent::QuorumVoteRecv(_) | SequencingHotShotEvent::TimeoutVoteRecv(_)) + matches!( + event, + SequencingHotShotEvent::QuorumVoteRecv(_) + | SequencingHotShotEvent::TimeoutVoteRecv(_) + ) })); let builder = @@ -1042,7 +1102,6 @@ where } SequencingHotShotEvent::TimeoutVoteRecv(vote) => { // debug!("Received quroum vote: {:?}", vote.get_view()); - if !self.timeout_exchange.is_leader(vote.get_view() + 1) { error!( @@ -1053,8 +1112,6 @@ where return; } - - // // TODO ED Insert TimeoutVote accumulator stuff here // match vote.clone() { @@ -1119,7 +1176,11 @@ where }; let name = "Quorum Vote Collection"; let filter = FilterEvent(Arc::new(|event| { - matches!(event, SequencingHotShotEvent::QuorumVoteRecv(_) | SequencingHotShotEvent::TimeoutVoteRecv(_)) + matches!( + event, + SequencingHotShotEvent::QuorumVoteRecv(_) + | SequencingHotShotEvent::TimeoutVoteRecv(_) + ) })); let builder = @@ -1145,42 +1206,62 @@ where .await; } } - SequencingHotShotEvent::QCFormed(qc) => { + SequencingHotShotEvent::QCFormed(cert) => { debug!("QC Formed event happened!"); - let mut consensus = self.consensus.write().await; - consensus.high_qc = qc.clone(); - - drop(consensus); - - // View may have already been updated by replica if they voted for this QC - // TODO ED We should separate leader state from replica state, they shouldn't share the same view - // Leader task should only run for a specific view, and never update its current view, but instead spawn another task - // let _res = self.update_view(qc.view_number + 1).await; - - // Start polling for votes for the next view - // if _res { - // if self.quorum_exchange.is_leader(qc.view_number + 2) { - // self.quorum_exchange - // .network() - // .inject_consensus_info( - // (ConsensusIntentEvent::PollForVotes(*qc.view_number + 1)), - // ) - // .await; - // } - // } + if let either::Right(qc) = cert.clone() { + // So we don't create a QC on the first view unless we are the leader + debug!( + "Attempting to publish proposal after forming a QC for view {}", + *qc.view_number + ); - // So we don't create a QC on the first view unless we are the leader - debug!( - "Attempting to publish proposal after forming a QC for view {}", - *qc.view_number - ); + // TODO ED Clean this up, get rid of clones + if self + .publish_proposal_if_able(self.consensus.read().await.high_qc.clone(), qc.clone().view_number + 1, Some(qc.clone())) + .await + { + self.update_view(qc.view_number + 1).await; + } + else { + error!("Wasn't able to publish proposal"); + } + } + if let either::Left(qc) = cert { + let mut consensus = self.consensus.write().await; + consensus.high_qc = qc.clone(); + + drop(consensus); + + // View may have already been updated by replica if they voted for this QC + // TODO ED We should separate leader state from replica state, they shouldn't share the same view + // Leader task should only run for a specific view, and never update its current view, but instead spawn another task + // let _res = self.update_view(qc.view_number + 1).await; + + // Start polling for votes for the next view + // if _res { + // if self.quorum_exchange.is_leader(qc.view_number + 2) { + // self.quorum_exchange + // .network() + // .inject_consensus_info( + // (ConsensusIntentEvent::PollForVotes(*qc.view_number + 1)), + // ) + // .await; + // } + // } + + // So we don't create a QC on the first view unless we are the leader + debug!( + "Attempting to publish proposal after forming a QC for view {}", + *qc.view_number + ); - if self - .publish_proposal_if_able(qc.clone(), qc.view_number + 1) - .await - { - self.update_view(qc.view_number + 1).await; + if self + .publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) + .await + { + self.update_view(qc.view_number + 1).await; + } } } SequencingHotShotEvent::DACRecv(cert) => { @@ -1238,7 +1319,7 @@ where let consensus = self.consensus.read().await; let qc = consensus.high_qc.clone(); drop(consensus); - if !self.publish_proposal_if_able(qc, self.cur_view).await { + if !self.publish_proposal_if_able(qc, self.cur_view, None).await { error!( "Failed to publish proposal on view change. View = {:?}", self.cur_view @@ -1292,6 +1373,7 @@ where &self, _qc: QuorumCertificate, view: TYPES::Time, + timeout_certificate: Option> ) -> bool { if !self.quorum_exchange.is_leader(view) { error!( @@ -1380,8 +1462,7 @@ where view_number: leaf.view_number, height: leaf.height, justify_qc: consensus.high_qc.clone(), - // TODO ED Update this to be the actual TC if there is one - timeout_certificate: None, + timeout_certificate: timeout_certificate.or_else(|| { None }), proposer_id: leaf.proposer_id, dac: None, }; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 6c3d978bdb..ded16bac6e 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,5 +1,6 @@ +use either::Either; use hotshot_types::{ - certificate::{DACertificate, QuorumCertificate}, + certificate::{DACertificate, QuorumCertificate, TimeoutCertificate}, data::{DAProposal, VidDisperse}, message::Proposal, traits::node_implementation::{ @@ -36,7 +37,7 @@ pub enum SequencingHotShotEvent> { /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal DAVoteSend(DAVote), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only - QCFormed(QuorumCertificate), + QCFormed(Either, TimeoutCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DACSend(DACertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks diff --git a/types/src/certificate.rs b/types/src/certificate.rs index c4e180bb48..3cc7cb7265 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -91,25 +91,29 @@ impl SignedCertificate; - type VoteAccumulator = TimeoutVoteAccumulator; - + type VoteAccumulator = TimeoutVoteAccumulator; + fn from_signatures_and_commitment( signatures: AssembledSignature, vote: Self::Vote, ) -> Self { - todo!() + let qc = TimeoutCertificate { + view_number: vote.get_view(), + signatures, + }; + qc } fn view_number(&self) -> TYPES::Time { - todo!() + self.view_number } fn signatures(&self) -> AssembledSignature { - todo!() + self.signatures.clone() } fn leaf_commitment(&self) -> Commitment { - todo!() + self.view_number.commit() } fn set_leaf_commitment(&mut self, commitment: Commitment) { From eb776a77042a8144a78a5d1eec5fabd74c15dcb0 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 19 Sep 2023 10:34:53 -0400 Subject: [PATCH 0111/1393] Fix previous bug --- task-impls/src/consensus.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9cbae0ff6c..29cbaa1f14 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1319,12 +1319,13 @@ where let consensus = self.consensus.read().await; let qc = consensus.high_qc.clone(); drop(consensus); - if !self.publish_proposal_if_able(qc, self.cur_view, None).await { - error!( - "Failed to publish proposal on view change. View = {:?}", - self.cur_view - ); - } + // TODO ED Do not want to publish proposal on view change + // if !self.publish_proposal_if_able(qc, self.cur_view, None).await { + // error!( + // "Failed to publish proposal on view change. View = {:?}", + // self.cur_view + // ); + // } } SequencingHotShotEvent::Timeout(view) => { let vote_token = self.timeout_exchange.make_vote_token(view); From 76720d2ecee5263de11c87235cab021615edda27 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 19 Sep 2023 13:33:52 -0400 Subject: [PATCH 0112/1393] view change location updated; need to handle update view sent for timeout message --- hotshot/src/lib.rs | 20 ++-- task-impls/src/consensus.rs | 43 ++++---- task-impls/src/view_sync.rs | 192 ++++++++++++++++++------------------ testing/tests/basic.rs | 2 +- 4 files changed, 132 insertions(+), 125 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 3617952591..b59ee8e0b9 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -257,20 +257,20 @@ impl> SystemContext { /// "Starts" consensus by sending a `ViewChange` event pub async fn start_consensus(&self) { - self.inner - .internal_event_stream - .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new(1))) - .await; + // self.inner + // .internal_event_stream + // .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new(1))) + // .await; // ED This isn't ideal... // async_sleep(Duration::new(1, 0)).await; - // self.inner - // .internal_event_stream - // .publish(SequencingHotShotEvent::QCFormed( - // QuorumCertificate::genesis(), - // )) - // .await; + self.inner + .internal_event_stream + .publish(SequencingHotShotEvent::QCFormed(either::Left( + QuorumCertificate::genesis())), + ) + .await; } /// Marks a given view number as timed out. This should be called a fixed period after a round is started. diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 29cbaa1f14..20f8499339 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -18,7 +18,6 @@ use hotshot_task::{ use hotshot_types::certificate::TimeoutCertificate; use hotshot_types::traits::election::TimeoutExchangeType; use hotshot_types::traits::node_implementation::SequencingTimeoutEx; -use hotshot_types::vote::DAVoteAccumulator; use hotshot_types::vote::QuorumVoteAccumulator; use hotshot_types::vote::TimeoutVoteAccumulator; use hotshot_types::{ @@ -672,6 +671,21 @@ where return; } + if proposal.data.justify_qc.view_number() != proposal.data.view_number - 1 { + // TODO ED Add timeout cert logic + if proposal.data.timeout_certificate.is_none() { + error!("Proposal needed a timeout cert but didn't have one {:?}", proposal.data.clone()); + return + } + else { + error!("Proposal for view {} had timeout certificate", *view); + } + // TODO ED Check timeout cert validity + } + + // TODO ED This needs to be moved further down so we only update the view after fully validating the qc. + self.update_view(view).await; + // TODO ED How does this play in with the timeout cert? self.current_proposal = Some(proposal.data.clone()); @@ -685,14 +699,7 @@ where debug!("We were not chosen for consensus committee on {:?}", view); } Ok(Some(vote_token)) => { - if proposal.data.justify_qc.view_number() != proposal.data.view_number - 1 { - // TODO ED Add timeout cert logic - if proposal.data.timeout_certificate.is_none() { - error!("Proposal needed a timeout cert but didn't have one {:?}", proposal.data.clone()); - return - } - // TODO ED Check timeout cert validity - } + debug!("We were chosen for consensus committee on {:?}", view); let consensus = self.consensus.upgradable_read().await; @@ -975,7 +982,7 @@ where } // Update current view and publish a view change event so other tasks also update - self.update_view(new_view).await; + // self.update_view(new_view).await; if let GeneralConsensusMessage::Vote(vote) = message { debug!("Sending vote to next leader {:?}", vote); @@ -1221,7 +1228,7 @@ where .publish_proposal_if_able(self.consensus.read().await.high_qc.clone(), qc.clone().view_number + 1, Some(qc.clone())) .await { - self.update_view(qc.view_number + 1).await; + // self.update_view(qc.view_number + 1).await; } else { error!("Wasn't able to publish proposal"); @@ -1260,7 +1267,7 @@ where .publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) .await { - self.update_view(qc.view_number + 1).await; + // self.update_view(qc.view_number + 1).await; } } } @@ -1272,7 +1279,7 @@ where // TODO Make sure we aren't voting for an arbitrarily old round for no reason if self.vote_if_able().await { - self.update_view(view + 1).await; + // self.update_view(view + 1).await; } } SequencingHotShotEvent::VidCertRecv(cert) => { @@ -1283,7 +1290,7 @@ where // TODO Make sure we aren't voting for an arbitrarily old round for no reason if self.vote_if_able().await { - self.update_view(view + 1).await; + // self.update_view(view + 1).await; } } SequencingHotShotEvent::ViewChange(new_view) => { @@ -1293,10 +1300,10 @@ where // update the view in state to the one in the message // Publish a view change event to the application - if !self.update_view(new_view).await { - debug!("view not updated"); - return; - } + // if !self.update_view(new_view).await { + // debug!("view not updated"); + // return; + // } self.output_event_stream .publish(Event { diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index f92752f487..34db7c6097 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -489,102 +489,102 @@ where } &SequencingHotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it - if view_number < TYPES::Time::new(*self.current_view) { - return; - } - - self.num_timeouts_tracked += 1; - error!("Num timeouts tracked is {}", self.num_timeouts_tracked); - - if self.num_timeouts_tracked > 2 { - error!("Too many timeouts! This shouldn't happen"); - } - - // TODO ED Make this a configurable variable - if self.num_timeouts_tracked == 2 { - // Start polling for view sync certificates - self.exchange - .network() - .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncCertificate( - *view_number + 1, - )) - .await; - - self.exchange - .network() - .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncVotes( - *view_number + 1, - )) - .await; - // panic!("Starting view sync!"); - // Spawn replica task - - let mut replica_state = ViewSyncReplicaTaskState { - current_view: self.current_view, - next_view: TYPES::Time::new(*view_number + 1), - relay: 0, - finalized: false, - sent_view_change_event: false, - phase: ViewSyncPhase::None, - exchange: self.exchange.clone(), - api: self.api.clone(), - event_stream: self.event_stream.clone(), - view_sync_timeout: self.view_sync_timeout, - id: self.id, - }; - - // TODO ED Make all these view numbers into a single variable to avoid errors - let result = replica_state - .handle_event(SequencingHotShotEvent::ViewSyncTrigger(view_number + 1)) - .await; - - if result.0 == Some(HotShotTaskCompleted::ShutDown) { - // The protocol has finished - return; - } - - replica_state = result.1; - - let name = format!( - "View Sync Replica Task: Attempting to enter view {:?} from view {:?}", - self.next_view, self.current_view - ); - - let replica_handle_event = HandleEvent(Arc::new( - move |event, state: ViewSyncReplicaTaskState| { - async move { state.handle_event(event).await }.boxed() - }, - )); - - let filter = FilterEvent(Arc::new(Self::filter)); - let builder = - TaskBuilder::>::new(name) - .register_event_stream(replica_state.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(replica_state) - .register_event_handler(replica_handle_event); - - let event_stream_id = builder.get_stream_id().unwrap(); - - self.replica_task_map.insert( - TYPES::Time::new(*view_number + 1), - ViewSyncTaskInfo { event_stream_id }, - ); - - let _view_sync_replica_task = async_spawn(async move { - ViewSyncReplicaTaskStateTypes::build(builder).launch().await - }); - } else { - // If this is the first timeout we've seen advance to the next view - self.current_view += 1; - self.event_stream - .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new( - *self.current_view, - ))) - .await; - } + // if view_number < TYPES::Time::new(*self.current_view) { + // return; + // } + + // self.num_timeouts_tracked += 1; + // error!("Num timeouts tracked is {}", self.num_timeouts_tracked); + + // if self.num_timeouts_tracked > 2 { + // error!("Too many timeouts! This shouldn't happen"); + // } + + // // TODO ED Make this a configurable variable + // if self.num_timeouts_tracked == 2 { + // // Start polling for view sync certificates + // self.exchange + // .network() + // .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncCertificate( + // *view_number + 1, + // )) + // .await; + + // self.exchange + // .network() + // .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncVotes( + // *view_number + 1, + // )) + // .await; + // // panic!("Starting view sync!"); + // // Spawn replica task + + // let mut replica_state = ViewSyncReplicaTaskState { + // current_view: self.current_view, + // next_view: TYPES::Time::new(*view_number + 1), + // relay: 0, + // finalized: false, + // sent_view_change_event: false, + // phase: ViewSyncPhase::None, + // exchange: self.exchange.clone(), + // api: self.api.clone(), + // event_stream: self.event_stream.clone(), + // view_sync_timeout: self.view_sync_timeout, + // id: self.id, + // }; + + // // TODO ED Make all these view numbers into a single variable to avoid errors + // let result = replica_state + // .handle_event(SequencingHotShotEvent::ViewSyncTrigger(view_number + 1)) + // .await; + + // if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // // The protocol has finished + // return; + // } + + // replica_state = result.1; + + // let name = format!( + // "View Sync Replica Task: Attempting to enter view {:?} from view {:?}", + // self.next_view, self.current_view + // ); + + // let replica_handle_event = HandleEvent(Arc::new( + // move |event, state: ViewSyncReplicaTaskState| { + // async move { state.handle_event(event).await }.boxed() + // }, + // )); + + // let filter = FilterEvent(Arc::new(Self::filter)); + // let builder = + // TaskBuilder::>::new(name) + // .register_event_stream(replica_state.event_stream.clone(), filter) + // .await + // .register_registry(&mut self.registry.clone()) + // .await + // .register_state(replica_state) + // .register_event_handler(replica_handle_event); + + // let event_stream_id = builder.get_stream_id().unwrap(); + + // self.replica_task_map.insert( + // TYPES::Time::new(*view_number + 1), + // ViewSyncTaskInfo { event_stream_id }, + // ); + + // let _view_sync_replica_task = async_spawn(async move { + // ViewSyncReplicaTaskStateTypes::build(builder).launch().await + // }); + // } else { + // // If this is the first timeout we've seen advance to the next view + // // self.current_view += 1; + // // self.event_stream + // // .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new( + // // *self.current_view, + // // ))) + // // .await; + // } } _ => {} diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 35e75fffa7..cbc2c149dc 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -18,7 +18,7 @@ async fn test_success() { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(1_200_000), + duration: Duration::from_secs(60), }, ), ..TestMetadata::default() From ca28a13f13f91cbf1ea946e9ca454de2240d6a82 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Tue, 19 Sep 2023 14:50:19 -0400 Subject: [PATCH 0113/1393] chore: Remove Committable from VoteData (#1777) * add get_commit method to VoteData * remove Committable from VoteData, replace commit() -> get_commit() * amalgamate impls of VoteData * lint --- types/src/traits/election.rs | 85 ++++++++++++------------------------ 1 file changed, 29 insertions(+), 56 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 81dfd46d57..6ced8bebbc 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -91,50 +91,21 @@ pub enum VoteData { ViewSyncFinalize(Commitment), } -/// Make different types of `VoteData` committable -impl Committable for VoteData { - fn commit(&self) -> Commitment { +impl VoteData +where + COMMITTABLE: Committable + Serialize + Clone, +{ + /// Return the underlying commitment. + #[must_use] + pub fn get_commit(&self) -> Commitment { + #[allow(clippy::enum_glob_use)] + use VoteData::*; match self { - VoteData::DA(block_commitment) => { - commit::RawCommitmentBuilder::new("DA BlockPayload Commit") - .field("block_commitment", *block_commitment) - .finalize() - } - VoteData::Yes(leaf_commitment) => commit::RawCommitmentBuilder::new("Yes Vote Commit") - .field("leaf_commitment", *leaf_commitment) - .finalize(), - VoteData::No(leaf_commitment) => commit::RawCommitmentBuilder::new("No Vote Commit") - .field("leaf_commitment", *leaf_commitment) - .finalize(), - VoteData::Timeout(view_number_commitment) => { - commit::RawCommitmentBuilder::new("Timeout View Number Commit") - .field("view_number_commitment", *view_number_commitment) - .finalize() - } - VoteData::ViewSyncPreCommit(commitment) => { - commit::RawCommitmentBuilder::new("ViewSyncPreCommit") - .field("commitment", *commitment) - .finalize() - } - VoteData::ViewSyncCommit(commitment) => { - commit::RawCommitmentBuilder::new("ViewSyncCommit") - .field("commitment", *commitment) - .finalize() - } - VoteData::ViewSyncFinalize(commitment) => { - commit::RawCommitmentBuilder::new("ViewSyncFinalize") - .field("commitment", *commitment) - .finalize() - } + DA(c) | Yes(c) | No(c) | Timeout(c) | ViewSyncPreCommit(c) | ViewSyncCommit(c) + | ViewSyncFinalize(c) => *c, } } - fn tag() -> String { - ("VOTE_DATA_COMMIT").to_string() - } -} - -impl VoteData { #[must_use] /// Convert vote data into bytes. /// @@ -367,7 +338,7 @@ pub trait ConsensusExchange: Send + Sync { match qc.signatures() { AssembledSignature::DA(qc) => { - let real_commit = VoteData::DA(leaf_commitment).commit(); + let real_commit = VoteData::DA(leaf_commitment).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -375,7 +346,7 @@ pub trait ConsensusExchange: Send + Sync { ::check(&real_qc_pp, real_commit.as_ref(), &qc) } AssembledSignature::Yes(qc) => { - let real_commit = VoteData::Yes(leaf_commitment).commit(); + let real_commit = VoteData::Yes(leaf_commitment).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -383,7 +354,7 @@ pub trait ConsensusExchange: Send + Sync { ::check(&real_qc_pp, real_commit.as_ref(), &qc) } AssembledSignature::No(qc) => { - let real_commit = VoteData::No(leaf_commitment).commit(); + let real_commit = VoteData::No(leaf_commitment).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -411,7 +382,7 @@ pub trait ConsensusExchange: Send + Sync { let mut is_valid_vote_token = false; let mut is_valid_signature = false; if let Some(key) = ::from_bytes(encoded_key) { - is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); + is_valid_signature = key.validate(encoded_signature, data.get_commit().as_ref()); let valid_vote_token = self.membership().validate_vote_token(key, vote_token); is_valid_vote_token = match valid_vote_token { Err(_) => { @@ -433,7 +404,7 @@ pub trait ConsensusExchange: Send + Sync { data: &VoteData, vote_token: &Checked, ) -> bool { - let is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); + let is_valid_signature = key.validate(encoded_signature, data.get_commit().as_ref()); let valid_vote_token = self .membership() .validate_vote_token(key.clone(), vote_token.clone()); @@ -643,7 +614,7 @@ impl< let signature = TYPES::SignatureKey::sign( &self.private_key, VoteData::::DA(block_commitment) - .commit() + .get_commit() .as_ref(), ); (self.public_key.to_bytes(), signature) @@ -688,7 +659,7 @@ impl< let signature = TYPES::SignatureKey::sign( &self.private_key, VoteData::::DA(block_commitment) - .commit() + .get_commit() .as_ref(), ); (self.public_key.to_bytes(), signature) @@ -931,7 +902,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::::Yes(leaf_commitment).commit().as_ref(), + VoteData::::Yes(leaf_commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -947,7 +918,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::::No(leaf_commitment).commit().as_ref(), + VoteData::::No(leaf_commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -963,7 +934,7 @@ impl< let signature = TYPES::SignatureKey::sign( &self.private_key, VoteData::::Timeout(view_number.commit()) - .commit() + .get_commit() .as_ref(), ); (self.public_key.to_bytes(), signature) @@ -1212,7 +1183,9 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::ViewSyncPreCommit(commitment).commit().as_ref(), + VoteData::ViewSyncPreCommit(commitment) + .get_commit() + .as_ref(), ); (self.public_key.to_bytes(), signature) @@ -1253,7 +1226,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::ViewSyncCommit(commitment).commit().as_ref(), + VoteData::ViewSyncCommit(commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) @@ -1294,7 +1267,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::ViewSyncFinalize(commitment).commit().as_ref(), + VoteData::ViewSyncFinalize(commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) @@ -1325,7 +1298,7 @@ impl< }; match certificate_internal.signatures { AssembledSignature::ViewSyncPreCommit(raw_signatures) => { - let real_commit = VoteData::ViewSyncPreCommit(vote_data.commit()).commit(); + let real_commit = VoteData::ViewSyncPreCommit(vote_data.commit()).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().failure_threshold().get()), @@ -1337,7 +1310,7 @@ impl< ) } AssembledSignature::ViewSyncCommit(raw_signatures) => { - let real_commit = VoteData::ViewSyncCommit(vote_data.commit()).commit(); + let real_commit = VoteData::ViewSyncCommit(vote_data.commit()).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -1349,7 +1322,7 @@ impl< ) } AssembledSignature::ViewSyncFinalize(raw_signatures) => { - let real_commit = VoteData::ViewSyncFinalize(vote_data.commit()).commit(); + let real_commit = VoteData::ViewSyncFinalize(vote_data.commit()).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), From ff1b6d8e1ba74ba10a192f18679f9c51ef99500c Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Tue, 19 Sep 2023 16:52:54 -0400 Subject: [PATCH 0114/1393] chore: remove Committable from VoteData type param (#1779) * remove Committable from VoteData * fix libp2p example build --- hotshot/examples/libp2p/multi-validator.rs | 4 +- hotshot/examples/libp2p/orchestrator.rs | 4 +- hotshot/examples/libp2p/types.rs | 54 +++++++-------- hotshot/examples/libp2p/validator.rs | 4 +- types/src/certificate.rs | 2 +- types/src/traits/election.rs | 80 +++++++++++++--------- types/src/vote.rs | 22 +++--- 7 files changed, 93 insertions(+), 77 deletions(-) diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index 8a0f53c1c9..3ed46fa979 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -3,7 +3,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use std::net::IpAddr; use tracing::instrument; @@ -49,7 +49,7 @@ async fn main() { for _ in 0..args.num_nodes { let node = async_spawn(async move { infra_da::main_entry_point::< - SDemoTypes, + DemoTypes, ThisMembership, DANetwork, QuorumNetwork, diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs index 772d3aa12e..594d004a93 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -2,7 +2,7 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::DemoTypes; use tracing::instrument; use types::ThisMembership; @@ -29,7 +29,7 @@ async fn main() { let args = OrchestratorArgs::parse(); run_orchestrator_da::< - SDemoTypes, + DemoTypes, ThisMembership, DANetwork, QuorumNetwork, diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 5289c9d6fe..79b1ea1419 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -1,6 +1,6 @@ use crate::infra_da::Libp2pDARun; use hotshot::{ - demos::sdemo::SDemoTypes, + demo::DemoTypes, traits::{ election::static_committee::GeneralStaticCommittee, implementations::{Libp2pCommChannel, MemoryStorage}, @@ -22,54 +22,54 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type ThisLeaf = SequencingLeaf; +pub type ThisLeaf = SequencingLeaf; pub type ThisMembership = - GeneralStaticCommittee::SignatureKey>; -pub type DANetwork = Libp2pCommChannel; -pub type QuorumNetwork = Libp2pCommChannel; -pub type ViewSyncNetwork = Libp2pCommChannel; + GeneralStaticCommittee::SignatureKey>; +pub type DANetwork = Libp2pCommChannel; +pub type QuorumNetwork = Libp2pCommChannel; +pub type ViewSyncNetwork = Libp2pCommChannel; -pub type ThisDAProposal = DAProposal; -pub type ThisDAVote = DAVote; +pub type ThisDAProposal = DAProposal; +pub type ThisDAVote = DAVote; -pub type ThisQuorumProposal = QuorumProposal; -pub type ThisQuorumVote = QuorumVote; +pub type ThisQuorumProposal = QuorumProposal; +pub type ThisQuorumVote = QuorumVote; -pub type ThisViewSyncProposal = ViewSyncCertificate; -pub type ThisViewSyncVote = ViewSyncVote; +pub type ThisViewSyncProposal = ViewSyncCertificate; +pub type ThisViewSyncVote = ViewSyncVote; -impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; - type Leaf = SequencingLeaf; +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; + type Leaf = SequencingLeaf; type Exchanges = SequencingExchanges< - SDemoTypes, - Message, + DemoTypes, + Message, QuorumExchange< - SDemoTypes, + DemoTypes, Self::Leaf, ThisQuorumProposal, ThisMembership, QuorumNetwork, - Message, + Message, >, - CommitteeExchange>, + CommitteeExchange>, ViewSyncExchange< - SDemoTypes, + DemoTypes, ThisViewSyncProposal, ThisMembership, ViewSyncNetwork, - Message, + Message, >, >; - type ConsensusMessage = SequencingMessage; + type ConsensusMessage = SequencingMessage; fn new_channel_maps( - start_view: ::Time, + start_view: ::Time, ) -> ( - ChannelMaps, - Option>, + ChannelMaps, + Option>, ) { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = Libp2pDARun; +pub type ThisRun = Libp2pDARun; diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index ec2415fd65..ab44e02991 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -1,6 +1,6 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::DemoTypes; use tracing::{info, instrument}; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; @@ -29,7 +29,7 @@ async fn main() { args.host, args.port ); infra_da::main_entry_point::< - SDemoTypes, + DemoTypes, ThisMembership, DANetwork, QuorumNetwork, diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 9792026dcf..5309729eb7 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -146,7 +146,7 @@ pub struct VoteMetaData, /// Data of the vote, yes, no, timeout, or DA - pub data: VoteData, + pub data: VoteData>, /// The votes's token pub vote_token: T, /// View number for the vote diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 6ced8bebbc..3d152cc85e 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -74,38 +74,46 @@ pub enum Checked { /// Data to vote on for different types of votes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] -pub enum VoteData { +pub enum VoteData +where + COMMITMENT: for<'a> Deserialize<'a>, +{ /// Vote to provide availability for a block. - DA(Commitment), + DA(COMMITMENT), /// Vote to append a leaf to the log. - Yes(Commitment), + Yes(COMMITMENT), /// Vote to reject a leaf from the log. - No(Commitment), + No(COMMITMENT), /// Vote to time out and proceed to the next view. - Timeout(Commitment), + Timeout(COMMITMENT), /// Vote to pre-commit the view sync. - ViewSyncPreCommit(Commitment), + ViewSyncPreCommit(COMMITMENT), /// Vote to commit the view sync. - ViewSyncCommit(Commitment), + ViewSyncCommit(COMMITMENT), /// Vote to finalize the view sync. - ViewSyncFinalize(Commitment), + ViewSyncFinalize(COMMITMENT), } -impl VoteData +impl VoteData where - COMMITTABLE: Committable + Serialize + Clone, + COMMITMENT: for<'a> Deserialize<'a> + Clone, { /// Return the underlying commitment. #[must_use] - pub fn get_commit(&self) -> Commitment { + pub fn get_commit(&self) -> COMMITMENT { #[allow(clippy::enum_glob_use)] use VoteData::*; match self { DA(c) | Yes(c) | No(c) | Timeout(c) | ViewSyncPreCommit(c) | ViewSyncCommit(c) - | ViewSyncFinalize(c) => *c, + | ViewSyncFinalize(c) => c.clone(), } } +} +impl VoteData +where + COMMITMENT: Serialize + for<'a> Deserialize<'a>, +{ #[must_use] /// Convert vote data into bytes. /// @@ -269,7 +277,7 @@ pub trait ConsensusExchange: Send + Sync { /// Network used by [`Membership`](Self::Membership) to communicate. type Networking: CommunicationChannel; /// Commitments to items which are the subject of proposals and decisions. - type Commitment: Committable + Serialize + Clone; + type Commitment: Committable + Serialize + for<'a> Deserialize<'a> + Clone; /// Join a [`ConsensusExchange`] with the given identity (`pk` and `sk`). fn create( @@ -322,7 +330,10 @@ pub trait ConsensusExchange: Send + Sync { } /// The contents of a vote on `commit`. - fn vote_data(&self, commit: Commitment) -> VoteData; + fn vote_data( + &self, + commit: Commitment, + ) -> VoteData>; /// Validate a QC. fn is_valid_cert(&self, qc: &Self::Certificate, commit: Commitment) -> bool { @@ -376,7 +387,7 @@ pub trait ConsensusExchange: Send + Sync { &self, encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, - data: VoteData, + data: VoteData>, vote_token: Checked, ) -> bool { let mut is_valid_vote_token = false; @@ -401,7 +412,7 @@ pub trait ConsensusExchange: Send + Sync { &self, key: &TYPES::SignatureKey, encoded_signature: &EncodedSignature, - data: &VoteData, + data: &VoteData>, vote_token: &Checked, ) -> bool { let is_valid_signature = key.validate(encoded_signature, data.get_commit().as_ref()); @@ -439,7 +450,7 @@ pub trait ConsensusExchange: Send + Sync { encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, leaf_commitment: Commitment, - vote_data: VoteData, + vote_data: VoteData>, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, accumlator: VoteAccumulator, @@ -613,9 +624,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::::DA(block_commitment) - .get_commit() - .as_ref(), + VoteData::DA(block_commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -658,9 +667,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::::DA(block_commitment) - .get_commit() - .as_ref(), + VoteData::DA(block_commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -710,7 +717,10 @@ impl< .make_vote_token(view_number, &self.private_key) } - fn vote_data(&self, commit: Commitment) -> VoteData { + fn vote_data( + &self, + commit: Commitment, + ) -> VoteData> { VoteData::DA(commit) } @@ -721,7 +731,7 @@ impl< encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, leaf_commitment: Commitment, - vote_data: VoteData, + vote_data: VoteData>, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, accumlator: VoteAccumulator, @@ -902,7 +912,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::::Yes(leaf_commitment).get_commit().as_ref(), + VoteData::Yes(leaf_commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -918,7 +928,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::::No(leaf_commitment).get_commit().as_ref(), + VoteData::No(leaf_commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -933,7 +943,7 @@ impl< fn sign_timeout_vote(&self, view_number: TYPES::Time) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::::Timeout(view_number.commit()) + VoteData::Timeout(view_number.commit()) .get_commit() .as_ref(), ); @@ -1023,7 +1033,10 @@ impl< &self.network } - fn vote_data(&self, commit: Commitment) -> VoteData { + fn vote_data( + &self, + commit: Commitment, + ) -> VoteData> { VoteData::Yes(commit) } @@ -1034,7 +1047,7 @@ impl< encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, leaf_commitment: Commitment, - vote_data: VoteData, + vote_data: VoteData>, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, accumlator: VoteAccumulator, @@ -1382,7 +1395,10 @@ impl< &self.network } - fn vote_data(&self, _commit: Commitment) -> VoteData { + fn vote_data( + &self, + _commit: Commitment, + ) -> VoteData> { unimplemented!() } @@ -1391,7 +1407,7 @@ impl< encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, leaf_commitment: Commitment>, - vote_data: VoteData, + vote_data: VoteData>, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, accumlator: VoteAccumulator, TYPES>, diff --git a/types/src/vote.rs b/types/src/vote.rs index d5dda8d825..ef53bc4018 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -38,7 +38,7 @@ pub trait VoteType EncodedSignature; /// Get the data this vote was signed over - fn get_data(&self) -> VoteData; + fn get_data(&self) -> VoteData>; /// Get the vote token of this vote fn get_vote_token(&self) -> TYPES::VoteTokenType; } @@ -56,7 +56,7 @@ pub struct DAVote { /// The vote token generated by this replica pub vote_token: TYPES::VoteTokenType, /// The vote data this vote is signed over - pub vote_data: VoteData, + pub vote_data: VoteData>, } /// A positive or negative vote on validating or commitment proposal. @@ -76,7 +76,7 @@ pub struct YesOrNoVote> { /// The vote token generated by this replica pub vote_token: TYPES::VoteTokenType, /// The vote data this vote is signed over - pub vote_data: VoteData, + pub vote_data: VoteData>, } /// A timeout vote. @@ -92,7 +92,7 @@ pub struct TimeoutVote> { /// The vote token generated by this replica pub vote_token: TYPES::VoteTokenType, /// The vote data this vote is signed over - pub vote_data: VoteData, + pub vote_data: VoteData>, } /// The internals of a view sync vote @@ -110,7 +110,7 @@ pub struct ViewSyncVoteInternal { /// The vote token generated by this replica pub vote_token: TYPES::VoteTokenType, /// The vote data this vote is signed over - pub vote_data: VoteData>, + pub vote_data: VoteData>>, } /// The data View Sync votes are signed over @@ -206,7 +206,7 @@ impl VoteType for DAVote { fn get_signature(&self) -> EncodedSignature { self.signature.1.clone() } - fn get_data(&self) -> VoteData { + fn get_data(&self) -> VoteData> { self.vote_data.clone() } fn get_vote_token(&self) -> ::VoteTokenType { @@ -239,7 +239,7 @@ impl> VoteType fn get_signature(&self) -> EncodedSignature { self.signature() } - fn get_data(&self) -> VoteData { + fn get_data(&self) -> VoteData> { match self { QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_data.clone(), QuorumVote::Timeout(_) => unimplemented!(), @@ -290,7 +290,7 @@ impl VoteType> for ViewSyncVote EncodedSignature { self.signature() } - fn get_data(&self) -> VoteData> { + fn get_data(&self) -> VoteData>> { match self { ViewSyncVote::PreCommit(vote_internal) | ViewSyncVote::Commit(vote_internal) @@ -762,7 +762,7 @@ type VoteMap = HashMap< Commitment, ( u64, - BTreeMap, TOKEN)>, + BTreeMap>, TOKEN)>, ), >; @@ -803,7 +803,7 @@ impl EncodedSignature, Vec<::StakeTableEntry>, usize, - VoteData, + VoteData>, TOKEN, ), ), @@ -824,7 +824,7 @@ where EncodedSignature, Vec<::StakeTableEntry>, usize, - VoteData, + VoteData>, TOKEN, ), ), From a8c8297d4f47cc59d28786a2a30544e70ede6885 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 19 Sep 2023 22:11:08 -0400 Subject: [PATCH 0115/1393] Some bugs fixed, still triggering view sync when it shouldn't --- task-impls/src/consensus.rs | 19 ++-- task-impls/src/network.rs | 1 + task-impls/src/view_sync.rs | 180 ++++++++++++++++++------------------ testing/tests/timeout.rs | 4 +- 4 files changed, 106 insertions(+), 98 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 20f8499339..749e011e63 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -301,6 +301,7 @@ where } }, SequencingHotShotEvent::TimeoutVoteRecv(vote) => { + error!("received timeout vote for view {}", *vote.get_view()); if state.timeout_accumulator.is_right() { return (None, state); } @@ -472,7 +473,7 @@ where ); if let GeneralConsensusMessage::Vote(vote) = message { - debug!("Sending vote to next quorum leader {:?}", vote.get_view()); + debug!("Sending vote to next quorum leader {:?}", vote.get_view() + 1); self.event_stream .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) .await; @@ -585,7 +586,7 @@ where // self.certs.remove(&v); // } self.cur_view = new_view; - self.current_proposal = None; + if new_view == TYPES::Time::new(1) { self.quorum_exchange @@ -633,7 +634,8 @@ where let timeout = self.timeout; self.timeout_task = async_spawn({ let stream = self.event_stream.clone(); - let view_number = self.cur_view; + // TODO ED + 1 here because of the logic change to view update. This indicates we haven't seen evidence for view change for this view within the time window + let view_number = self.cur_view + 1; async move { async_sleep(Duration::from_millis(timeout)).await; stream @@ -974,8 +976,9 @@ where // TOOD ED This means we publish the proposal without updating our own view, which doesn't seem right return; } + self.current_proposal = None; + - // ED Only do this GC if we are able to vote for v in (*self.cur_view)..=(*view) { let time = TYPES::Time::new(v); self.certs.remove(&time); @@ -1280,6 +1283,8 @@ where // TODO Make sure we aren't voting for an arbitrarily old round for no reason if self.vote_if_able().await { // self.update_view(view + 1).await; + self.current_proposal = None; + } } SequencingHotShotEvent::VidCertRecv(cert) => { @@ -1291,6 +1296,8 @@ where // TODO Make sure we aren't voting for an arbitrarily old round for no reason if self.vote_if_able().await { // self.update_view(view + 1).await; + self.current_proposal = None; + } } SequencingHotShotEvent::ViewChange(new_view) => { @@ -1364,7 +1371,7 @@ where .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) .await; debug!( - "We received a timeout event in the consensus task for view {}!", + "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view ); } @@ -1479,7 +1486,7 @@ where data: proposal, signature, }; - debug!("Sending proposal for view {:?} \n {:?}", self.cur_view, ""); + debug!("Sending proposal for view {:?} \n {:?}", leaf.view_number, ""); self.event_stream .publish(SequencingHotShotEvent::QuorumProposalSend( diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index c7df58d90a..0410d5f8dc 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -287,6 +287,7 @@ impl< ) } SequencingHotShotEvent::TimeoutVoteSend(vote) => { + // error!("Sending timeout vote to leader of view {}", *vote.get_view() + 1); ( vote.get_key(), MessageKind::::from_consensus_message(SequencingMessage(Left( diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 34db7c6097..53d0cc6b28 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -489,102 +489,102 @@ where } &SequencingHotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it - // if view_number < TYPES::Time::new(*self.current_view) { - // return; - // } + if view_number < TYPES::Time::new(*self.current_view) { + return; + } - // self.num_timeouts_tracked += 1; - // error!("Num timeouts tracked is {}", self.num_timeouts_tracked); + self.num_timeouts_tracked += 1; + error!("Num timeouts tracked is {}", self.num_timeouts_tracked); // if self.num_timeouts_tracked > 2 { // error!("Too many timeouts! This shouldn't happen"); // } - // // TODO ED Make this a configurable variable - // if self.num_timeouts_tracked == 2 { - // // Start polling for view sync certificates - // self.exchange - // .network() - // .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncCertificate( - // *view_number + 1, - // )) - // .await; - - // self.exchange - // .network() - // .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncVotes( - // *view_number + 1, - // )) - // .await; - // // panic!("Starting view sync!"); - // // Spawn replica task - - // let mut replica_state = ViewSyncReplicaTaskState { - // current_view: self.current_view, - // next_view: TYPES::Time::new(*view_number + 1), - // relay: 0, - // finalized: false, - // sent_view_change_event: false, - // phase: ViewSyncPhase::None, - // exchange: self.exchange.clone(), - // api: self.api.clone(), - // event_stream: self.event_stream.clone(), - // view_sync_timeout: self.view_sync_timeout, - // id: self.id, - // }; - - // // TODO ED Make all these view numbers into a single variable to avoid errors - // let result = replica_state - // .handle_event(SequencingHotShotEvent::ViewSyncTrigger(view_number + 1)) - // .await; - - // if result.0 == Some(HotShotTaskCompleted::ShutDown) { - // // The protocol has finished - // return; - // } - - // replica_state = result.1; - - // let name = format!( - // "View Sync Replica Task: Attempting to enter view {:?} from view {:?}", - // self.next_view, self.current_view - // ); - - // let replica_handle_event = HandleEvent(Arc::new( - // move |event, state: ViewSyncReplicaTaskState| { - // async move { state.handle_event(event).await }.boxed() - // }, - // )); - - // let filter = FilterEvent(Arc::new(Self::filter)); - // let builder = - // TaskBuilder::>::new(name) - // .register_event_stream(replica_state.event_stream.clone(), filter) - // .await - // .register_registry(&mut self.registry.clone()) - // .await - // .register_state(replica_state) - // .register_event_handler(replica_handle_event); - - // let event_stream_id = builder.get_stream_id().unwrap(); - - // self.replica_task_map.insert( - // TYPES::Time::new(*view_number + 1), - // ViewSyncTaskInfo { event_stream_id }, - // ); - - // let _view_sync_replica_task = async_spawn(async move { - // ViewSyncReplicaTaskStateTypes::build(builder).launch().await - // }); - // } else { - // // If this is the first timeout we've seen advance to the next view - // // self.current_view += 1; - // // self.event_stream - // // .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new( - // // *self.current_view, - // // ))) - // // .await; - // } + // TODO ED Make this a configurable variable + if self.num_timeouts_tracked > 2 { + // Start polling for view sync certificates + self.exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncCertificate( + *view_number + 1, + )) + .await; + + self.exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncVotes( + *view_number + 1, + )) + .await; + // panic!("Starting view sync!"); + // Spawn replica task + + let mut replica_state = ViewSyncReplicaTaskState { + current_view: self.current_view, + next_view: TYPES::Time::new(*view_number + 1), + relay: 0, + finalized: false, + sent_view_change_event: false, + phase: ViewSyncPhase::None, + exchange: self.exchange.clone(), + api: self.api.clone(), + event_stream: self.event_stream.clone(), + view_sync_timeout: self.view_sync_timeout, + id: self.id, + }; + + // TODO ED Make all these view numbers into a single variable to avoid errors + let result = replica_state + .handle_event(SequencingHotShotEvent::ViewSyncTrigger(view_number + 1)) + .await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } + + replica_state = result.1; + + let name = format!( + "View Sync Replica Task: Attempting to enter view {:?} from view {:?}", + self.next_view, self.current_view + ); + + let replica_handle_event = HandleEvent(Arc::new( + move |event, state: ViewSyncReplicaTaskState| { + async move { state.handle_event(event).await }.boxed() + }, + )); + + let filter = FilterEvent(Arc::new(Self::filter)); + let builder = + TaskBuilder::>::new(name) + .register_event_stream(replica_state.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(replica_state) + .register_event_handler(replica_handle_event); + + let event_stream_id = builder.get_stream_id().unwrap(); + + self.replica_task_map.insert( + TYPES::Time::new(*view_number + 1), + ViewSyncTaskInfo { event_stream_id }, + ); + + let _view_sync_replica_task = async_spawn(async move { + ViewSyncReplicaTaskStateTypes::build(builder).launch().await + }); + } else { + // If this is the first timeout we've seen advance to the next view + self.current_view = view_number + 1; + self.event_stream + .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new( + *self.current_view, + ))) + .await; + } } _ => {} diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index f8963c9d52..a9e046ef3b 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -30,7 +30,7 @@ async fn test_timeout() { metadata.timing_data = timing_data; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(0, 5000), dead_nodes)], + node_changes: vec![(Duration::from_secs(1), dead_nodes)], }; // TODO ED Add safety task, etc to confirm TCs are being formed @@ -38,7 +38,7 @@ async fn test_timeout() { metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(10000), + duration: Duration::from_millis(60000), }, ); metadata From 8fd7b84fb5e9467369851a7cfd695a613635e399 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 19 Sep 2023 23:41:43 -0400 Subject: [PATCH 0116/1393] Memory network is not receiving DA proposals correctly --- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 1 + task-impls/src/network.rs | 2 ++ task-impls/src/view_sync.rs | 1 + testing/tests/timeout.rs | 6 ++++-- 5 files changed, 9 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 749e011e63..a82f2531fe 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -988,7 +988,7 @@ where // self.update_view(new_view).await; if let GeneralConsensusMessage::Vote(vote) = message { - debug!("Sending vote to next leader {:?}", vote); + // debug!("Sending vote to next leader {:?}", vote); }; } } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index f8711b1d78..782a268a36 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -701,6 +701,7 @@ where } SequencingHotShotEvent::Shutdown => { + error!("Shutting down because of shutdown signal!"); return Some(HotShotTaskCompleted::ShutDown); } _ => { diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 0410d5f8dc..233190a6d5 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -100,6 +100,7 @@ impl< }, Either::Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(proposal) => { + error!("Received da proposal for view {:?}", proposal.clone().data.view_number); SequencingHotShotEvent::DAProposalRecv(proposal.clone(), sender) } CommitteeConsensusMessage::DAVote(vote) => { @@ -302,6 +303,7 @@ impl< return None; } SequencingHotShotEvent::Shutdown => { + error!("Networking task shutting down"); return Some(HotShotTaskCompleted::ShutDown); } event => { diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 53d0cc6b28..1b6c909e1d 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -502,6 +502,7 @@ where // TODO ED Make this a configurable variable if self.num_timeouts_tracked > 2 { + panic!(); // Start polling for view sync certificates self.exchange .network() diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index a9e046ef3b..1e378912a9 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -8,6 +8,8 @@ async fn test_timeout() { use std::time::Duration; + use hotshot_testing::node_types::SequencingLibp2pImpl; + use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, node_types::{SequencingMemoryImpl, SequencingTestTypes}, @@ -30,7 +32,7 @@ async fn test_timeout() { metadata.timing_data = timing_data; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::from_secs(1), dead_nodes)], + node_changes: vec![(Duration::from_millis(500), dead_nodes)], }; // TODO ED Add safety task, etc to confirm TCs are being formed @@ -38,7 +40,7 @@ async fn test_timeout() { metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(60000), + duration: Duration::from_millis(10000), }, ); metadata From bed854dbb810043f95806680ae2309cf2d6bcec3 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 20 Sep 2023 08:15:24 -0400 Subject: [PATCH 0117/1393] Update timeout test to use libp2p --- task-impls/src/view_sync.rs | 6 +++--- testing/tests/timeout.rs | 14 +++++++++++--- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 1b6c909e1d..61eef67d64 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -496,9 +496,9 @@ where self.num_timeouts_tracked += 1; error!("Num timeouts tracked is {}", self.num_timeouts_tracked); - // if self.num_timeouts_tracked > 2 { - // error!("Too many timeouts! This shouldn't happen"); - // } + if self.num_timeouts_tracked > 3 { + error!("Too many timeouts! This shouldn't happen"); + } // TODO ED Make this a configurable variable if self.num_timeouts_tracked > 2 { diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 1e378912a9..adbb58fe11 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -23,7 +23,13 @@ async fn test_timeout() { next_view_timeout: 1000, ..Default::default() }; - let mut metadata = TestMetadata::default(); + + // TODO ED Reduce down to 5 nodes once memory network issues is resolved + let mut metadata = TestMetadata { + total_nodes: 10, + start_nodes: 10, + ..Default::default() + }; let dead_nodes = vec![ChangeNode { idx: 0, updown: UpDown::Down, @@ -40,11 +46,13 @@ async fn test_timeout() { metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(10000), + duration: Duration::from_millis(30000), }, ); + + // TODO ED Test with memory network once issue is resolved. metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; From eec895ac722d86a63b18bdf85b1cc9459ae242e1 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 20 Sep 2023 10:32:26 -0400 Subject: [PATCH 0118/1393] Reduce build times and disk size for CI (#1783) * prune cargos to save on disk and build time * prune github actions * cache test * enable clippy linting --- hotshot/Cargo.toml | 1 - libp2p-networking/Cargo.toml | 7 ++----- orchestrator/Cargo.toml | 1 - task-impls/Cargo.toml | 2 -- task/Cargo.toml | 1 - testing/Cargo.toml | 1 - testing/README.md | 2 +- types/Cargo.toml | 5 +---- web_server/Cargo.toml | 3 --- 9 files changed, 4 insertions(+), 19 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index def132f6d8..671519ec84 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -105,7 +105,6 @@ jf-primitives = { workspace = true } libp2p = { workspace = true } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } -nll = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true, features = ["rc"] } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 82b5ed7d87..dfdb02020e 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -9,7 +9,7 @@ authors = ["Espresso Systems "] [features] default = ["webui"] -webui = ["tide"] +webui = [] # # this only has effect on linux # lossy_network = [ @@ -47,8 +47,6 @@ snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", ] } -# TODO do we want this as non optional?? -tokio-stream = "0.1.14" tracing = { workspace = true } void = "1.0.2" dashmap = "5.5.3" @@ -56,6 +54,7 @@ dashmap = "5.5.3" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] libp2p = { workspace = true, features = ["tokio"] } tokio = { workspace = true } +tokio-stream = "0.1.14" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] libp2p = { workspace = true, features = ["async-std"] } async-std = { workspace = true } @@ -76,5 +75,3 @@ netlink-sys = { git = "https://github.com/espressosystems/netlink.git", version ], optional = true } netlink-packet-generic = { git = "https://github.com/espressosystems/netlink.git", version = "0.2.0", optional = true } -[dev-dependencies] -clap = { version = "4.4", features = ["derive", "env"] } diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index f3f57ada3a..4a1cfad69c 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -16,7 +16,6 @@ blake3 = { workspace = true, features = ["traits-preview"] } hotshot-types = { version = "0.1.0", path = "../types", default-features = false } hotshot-utils = { path = "../utils" } libp2p-networking = { workspace = true } -nll = { workspace = true } tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.4.1" } surf-disco = { workspace = true } tracing = { workspace = true } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 5978e10629..08c900b53b 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -10,14 +10,12 @@ async-compatibility-layer = { workspace = true } async-trait = { workspace = true } either = { workspace = true } futures = { workspace = true } -nll = { workspace = true } serde = { workspace = true } snafu = { workspace = true } async-lock = { workspace = true } tracing = { workspace = true } atomic_enum = "0.2.0" pin-project = "1.1.3" -async-stream = "0.3.5" hotshot-constants = { path = "../constants", default-features = false } hotshot-types = { path = "../types", default-features = false } hotshot-task = { path = "../task", default-features = false } diff --git a/task/Cargo.toml b/task/Cargo.toml index 720a7aca6a..cb1703831f 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -10,7 +10,6 @@ async-compatibility-layer = { workspace = true } async-trait = { workspace = true } either = { workspace = true } futures = { workspace = true } -nll = { workspace = true } serde = { workspace = true } snafu = { workspace = true } async-lock = { workspace = true } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index b361b241d7..935f9e4342 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -34,7 +34,6 @@ rand = { workspace = true } rand_chacha = { workspace = true } snafu = { workspace = true } tracing = { workspace = true } -nll = { workspace = true } serde = { workspace = true } ethereum-types = { workspace = true } bitvec = { workspace = true } diff --git a/testing/README.md b/testing/README.md index a477450092..59b281f421 100644 --- a/testing/README.md +++ b/testing/README.md @@ -69,4 +69,4 @@ async { }; ``` -See TODO for examples. +See TODO for examples. \ No newline at end of file diff --git a/types/Cargo.toml b/types/Cargo.toml index 74ace15850..742882cce2 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -8,7 +8,7 @@ version = "0.1.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -demo = ["ed25519-compact"] +demo = [] [dependencies] arbitrary = { version = "1.3", features = ["derive"] } @@ -28,7 +28,6 @@ custom_debug = { workspace = true } derivative = "2.2.0" digest = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } -ed25519-compact = { version = "2.0.4", optional = true } either = { workspace = true, features = ["serde"] } espresso-systems-common = { workspace = true } futures = { workspace = true } @@ -39,7 +38,6 @@ hotshot-utils = { path = "../utils" } hotshot-task = { path = "../task", default-features = false } jf-primitives = { workspace = true, features = ["test-srs"] } jf-utils = { workspace = true } -nll = { workspace = true } libp2p-networking = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } @@ -50,7 +48,6 @@ tagged-base64 = { git = "https://github.com/EspressoSystems/tagged-base64", tag time = { workspace = true } tracing = { workspace = true } ethereum-types = { workspace = true } -bit-vec = "0.6.3" typenum = { workspace = true } [dev-dependencies] diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index 827d5a5258..bc2520065a 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -22,7 +22,6 @@ hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } jf-primitives = { workspace = true } tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.4.1" } -nll = { workspace = true } tracing = { workspace = true } rand = { workspace = true } serde = { workspace = true } @@ -30,8 +29,6 @@ serde_json = "1.0.96" snafu = { workspace = true } tide = { version = "0.16.0", default-features = false } toml = { workspace = true } -portpicker = "0.1" -surf-disco = { workspace = true } [dev-dependencies] hotshot-types = { path = "../types", default-features = false } From 5f0c346188e7236e27e96bb8732e9ea87ccedf6a Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 20 Sep 2023 12:19:10 -0400 Subject: [PATCH 0119/1393] chore: update jellyfish (#1785) * update jellyfish dep * add TODO comments --- task-impls/src/da.rs | 8 ++++---- testing/tests/da_task.rs | 6 +++--- testing/tests/network_task.rs | 6 +++--- types/src/data.rs | 6 ++++-- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index f8711b1d78..ff048ef469 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -671,7 +671,7 @@ where let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); let message_bytes = bincode::serialize(&message).unwrap(); - let (shares, common) = vid.dispersal_data(&message_bytes).unwrap(); + let vid_disperse = vid.disperse(&message_bytes).unwrap(); // TODO for now reuse the same block commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 @@ -680,9 +680,9 @@ where Proposal { data: VidDisperse { view_number: view, - commitment: block.commit(), - shares, - common, + commitment: block.commit(), // TODO GG should be vid_disperse.commit but that's a big change + shares: vid_disperse.shares, + common: vid_disperse.common, }, signature: message.signature, }, diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index a9dcbd0c2e..204038885c 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -49,13 +49,13 @@ async fn test_da_task() { }; let vid = vid_init(); let message_bytes = bincode::serialize(&message).unwrap(); - let (shares, common) = vid.dispersal_data(&message_bytes).unwrap(); + let vid_disperse = vid.disperse(&message_bytes).unwrap(); let vid_proposal = Proposal { data: VidDisperse { view_number: message.data.view_number, commitment: block_commitment, - shares, - common, + shares: vid_disperse.shares, + common: vid_disperse.common, }, signature: message.signature.clone(), }; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 8239fc72d4..7ad17e2008 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -53,15 +53,15 @@ async fn test_network_task() { let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; let vid = vid_init(); let da_proposal_bytes = bincode::serialize(&da_proposal).unwrap(); - let (shares, common) = vid.dispersal_data(&da_proposal_bytes).unwrap(); + let vid_disperse = vid.disperse(&da_proposal_bytes).unwrap(); // TODO for now reuse the same block commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 let da_vid_disperse = Proposal { data: VidDisperse { view_number: da_proposal.data.view_number, commitment: block_commitment, - shares, - common, + shares: vid_disperse.shares, + common: vid_disperse.common, }, signature: da_proposal.signature.clone(), }; diff --git a/types/src/data.rs b/types/src/data.rs index 8275c8d9b8..080355a195 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -173,11 +173,13 @@ pub struct VidDisperse { /// The view number for which this VID data is intended pub view_number: TYPES::Time, /// Block commitment + /// + /// TODO GG type should be `::Common` but that's a big change. pub commitment: Commitment, /// VID shares dispersed among storage nodes - pub shares: Vec<::StorageShare>, + pub shares: Vec<::Share>, /// VID common data sent to all storage nodes - pub common: ::StorageCommon, + pub common: ::Common, } /// Trusted KZG setup for VID. From 120a07e006ef746385f60e01f6248879564bfaf5 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 20 Sep 2023 12:22:17 -0400 Subject: [PATCH 0120/1393] remove transactions from consensus struct --- hotshot/src/lib.rs | 21 ----------------- hotshot/src/tasks/mod.rs | 4 +++- task-impls/src/transactions.rs | 41 +++++++++++++++++++++------------- types/src/consensus.rs | 14 ------------ 4 files changed, 29 insertions(+), 51 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index b09ea6235d..38c05e19be 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -133,11 +133,6 @@ pub struct SystemContextInner> { /// a reference to the metrics that the implementor is using. _metrics: Box, - /// Transactions - /// (this is shared btwn hotshot and `Consensus`) - transactions: - Arc, TYPES::Transaction>>>, - /// The hotstuff implementation consensus: Arc>>, @@ -219,8 +214,6 @@ impl> SystemContext { state_map, cur_view: start_view, last_decided_view: anchored_leaf.get_view_number(), - transactions: Arc::default(), - seen_transactions: HashSet::new(), saved_leaves, saved_blocks, // TODO this is incorrect @@ -231,17 +224,14 @@ impl> SystemContext { invalid_qc: 0, }; let consensus = Arc::new(RwLock::new(consensus)); - let txns = consensus.read().await.get_transactions(); let inner: Arc> = Arc::new(SystemContextInner { id: nonce, channel_maps: I::new_channel_maps(start_view), consensus, - transactions: txns, public_key, private_key, config, - // networking, storage, exchanges: Arc::new(exchanges), event_sender: RwLock::default(), @@ -546,11 +536,6 @@ impl> SystemContext { /// [`HotShot`] implementations that depend on [`TYPES::ConsensusType`]. #[async_trait] pub trait HotShotType> { - /// Get the [`transactions`] field of [`HotShot`]. - fn transactions( - &self, - ) -> &Arc, TYPES::Transaction>>>; - /// Get the [`hotstuff`] field of [`HotShot`]. fn consensus(&self) -> &Arc>>; @@ -684,12 +669,6 @@ where Membership = MEMBERSHIP, > + 'static, { - fn transactions( - &self, - ) -> &Arc, TYPES::Transaction>>> { - &self.inner.transactions - } - fn consensus(&self) -> &Arc>> { &self.inner.consensus } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 619fdb6d59..a2e68528f4 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -43,7 +43,7 @@ use hotshot_types::{ vote::{ViewSyncData, VoteType}, }; use serde::Serialize; -use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; +use std::{collections::{HashMap, HashSet}, marker::PhantomData, sync::Arc, time::Duration}; /// event for global event stream #[derive(Clone, Debug)] @@ -441,6 +441,8 @@ where registry: registry.clone(), api: c_api.clone(), consensus: handle.hotshot.get_consensus(), + transactions: Arc::default(), + seen_transactions: HashSet::new(), cur_view: TYPES::Time::new(0), committee_exchange: committee_exchange.into(), event_stream: event_stream.clone(), diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 8609708653..b41604376d 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -1,9 +1,11 @@ use crate::events::SequencingHotShotEvent; +use async_compatibility_layer::async_primitives::subscribable_rwlock::SubscribableRwLock; use async_compatibility_layer::{ art::async_timeout, async_primitives::subscribable_rwlock::ReadView, }; use async_lock::RwLock; use bincode::config::Options; +use commit::Commitment; use commit::Committable; use either::{Either, Left, Right}; use hotshot_task::{ @@ -26,9 +28,16 @@ use hotshot_types::{ }; use hotshot_utils::bincode::bincode_opts; use snafu::Snafu; -use std::{collections::HashSet, sync::Arc, time::Instant}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::Instant, +}; use tracing::{debug, error, instrument, warn}; +/// A type alias for `HashMap, T>` +type CommitmentMap = HashMap, T>; + #[derive(Snafu, Debug)] /// Error type for consensus tasks pub struct ConsensusTaskError {} @@ -61,6 +70,12 @@ pub struct TransactionTaskState< /// Reference to consensus. Leader will require a read lock on this. pub consensus: Arc>>>, + /// A list of undecided transactions + pub transactions: Arc>>, + + /// A list of transactions we've seen decided, but didn't receive + pub seen_transactions: HashSet>, + /// the committee exchange pub committee_exchange: Arc>, @@ -97,15 +112,14 @@ where ) -> Option { match event { SequencingHotShotEvent::TransactionsRecv(transactions) => { - let mut consensus = self.consensus.write().await; - consensus - .get_transactions() + let consensus = self.consensus.read().await; + self.transactions .modify(|txns| { for transaction in transactions { let size = bincode_opts().serialized_size(&transaction).unwrap_or(0); // If we didn't already know about this transaction, update our mempool metrics. - if !consensus.seen_transactions.remove(&transaction.commit()) + if !self.seen_transactions.remove(&transaction.commit()) && txns.insert(transaction.commit(), transaction).is_none() { consensus.metrics.outstanding_transactions.update(1); @@ -138,17 +152,16 @@ where Right(_) => {} } } - let mut consensus = self.consensus.write().await; - let txns = consensus.transactions.cloned().await; + let consensus = self.consensus.read().await; + let txns = self.transactions.cloned().await; let _ = included_txns.iter().map(|hash| { if !txns.contains_key(hash) { - consensus.seen_transactions.insert(*hash); + self.seen_transactions.insert(*hash); } }); drop(txns); - consensus - .transactions + self.transactions .modify(|txns| { *txns = txns .drain() @@ -255,12 +268,10 @@ where Either::Right(_commitment) => HashSet::new(), }; - let consensus = self.consensus.read().await; - - let receiver = consensus.transactions.subscribe().await; + let receiver = self.transactions.subscribe().await; loop { - let all_txns = consensus.transactions.cloned().await; + let all_txns = self.transactions.cloned().await; debug!("Size of transactions: {}", all_txns.len()); let unclaimed_txns: Vec<_> = all_txns .iter() @@ -290,7 +301,7 @@ where } break; } - let all_txns = consensus.transactions.cloned().await; + let all_txns = self.transactions.cloned().await; let txns: Vec = all_txns .iter() .filter_map(|(txn_hash, txn)| { diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a29c840056..ce27d7a547 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -2,8 +2,6 @@ pub use crate::traits::node_implementation::ViewQueue; pub use crate::utils::{View, ViewInner}; -use async_compatibility_layer::async_primitives::subscribable_rwlock::SubscribableRwLock; -use std::collections::HashSet; use crate::utils::Terminator; use crate::{ @@ -41,12 +39,6 @@ pub struct Consensus> { /// last view had a successful decide event pub last_decided_view: TYPES::Time, - /// A list of undecided transactions - pub transactions: Arc>>, - - /// A list of transactions we've seen decided, but didn't receive - pub seen_transactions: HashSet>, - /// Map of leaf hash -> leaf /// - contains undecided leaves /// - includes the MOST RECENT decided leaf @@ -264,12 +256,6 @@ impl> Consensus { self.state_map = self.state_map.split_off(&new_anchor_view); } - /// return a clone of the internal storage of unclaimed transactions - #[must_use] - pub fn get_transactions(&self) -> Arc>> { - self.transactions.clone() - } - /// Gets the last decided state /// # Panics /// if the last decided view's state does not exist in the state map From 71cad69f52d0f7c083c98793413391d882f2a3e8 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 20 Sep 2023 12:44:33 -0400 Subject: [PATCH 0121/1393] linting --- hotshot/src/lib.rs | 6 +++--- hotshot/src/tasks/mod.rs | 7 ++++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e543f22901..0f317c9410 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -42,12 +42,12 @@ use crate::{ }; use async_compatibility_layer::{ art::{async_spawn, async_spawn_local}, - async_primitives::{broadcast::BroadcastSender, subscribable_rwlock::SubscribableRwLock}, + async_primitives::broadcast::BroadcastSender, channel::UnboundedSender, }; use async_lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; use async_trait::async_trait; -use commit::{Commitment, Committable}; +use commit::Committable; use custom_debug::Debug; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, @@ -83,7 +83,7 @@ use hotshot_types::{ }; use snafu::ResultExt; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashMap}, marker::PhantomData, num::NonZeroUsize, sync::Arc, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index a2e68528f4..82ea383353 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -43,7 +43,12 @@ use hotshot_types::{ vote::{ViewSyncData, VoteType}, }; use serde::Serialize; -use std::{collections::{HashMap, HashSet}, marker::PhantomData, sync::Arc, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + marker::PhantomData, + sync::Arc, + time::Duration, +}; /// event for global event stream #[derive(Clone, Debug)] From 96b8381947c1402f79712b07220b2bc53b9449d4 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Sep 2023 11:48:53 -0700 Subject: [PATCH 0122/1393] add debug for metrics --- types/src/consensus.rs | 1 + types/src/traits/metrics.rs | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a29c840056..0f9a825b87 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -73,6 +73,7 @@ pub struct Consensus> { } /// The metrics being collected for the consensus algorithm +#[derive(Debug)] pub struct ConsensusMetrics { /// The current view pub current_view: Box, diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs index 0253e129d5..59b62920f8 100644 --- a/types/src/traits/metrics.rs +++ b/types/src/traits/metrics.rs @@ -6,6 +6,8 @@ //! - [`Histogram`]: stores multiple float values based for a graph (example usage: CPU %) //! - [`Label`]: Stores the last string (example usage: current version, network online/offline) +use std::fmt::Debug; + /// The metrics type. pub trait Metrics: Send + Sync { /// Create a [`Counter`] with an optional `unit_label`. @@ -76,12 +78,12 @@ impl Label for NoMetrics { } /// An ever-incrementing counter -pub trait Counter: Send + Sync { +pub trait Counter: Send + Sync + Debug { /// Add a value to the counter fn add(&self, amount: usize); } /// A gauge that stores the latest value. -pub trait Gauge: Send + Sync { +pub trait Gauge: Send + Sync + Debug { /// Set the gauge value fn set(&self, amount: usize); @@ -90,7 +92,7 @@ pub trait Gauge: Send + Sync { } /// A histogram which will record a series of points. -pub trait Histogram: Send + Sync { +pub trait Histogram: Send + Sync + Debug { /// Add a point to this histogram. fn add_point(&self, point: f64); } From e7fc08b9ab953cb3a4f8affd75088e561c691a70 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Sep 2023 11:48:53 -0700 Subject: [PATCH 0123/1393] add debug for metrics --- types/src/consensus.rs | 1 + types/src/traits/metrics.rs | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a29c840056..0f9a825b87 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -73,6 +73,7 @@ pub struct Consensus> { } /// The metrics being collected for the consensus algorithm +#[derive(Debug)] pub struct ConsensusMetrics { /// The current view pub current_view: Box, diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs index 0253e129d5..59b62920f8 100644 --- a/types/src/traits/metrics.rs +++ b/types/src/traits/metrics.rs @@ -6,6 +6,8 @@ //! - [`Histogram`]: stores multiple float values based for a graph (example usage: CPU %) //! - [`Label`]: Stores the last string (example usage: current version, network online/offline) +use std::fmt::Debug; + /// The metrics type. pub trait Metrics: Send + Sync { /// Create a [`Counter`] with an optional `unit_label`. @@ -76,12 +78,12 @@ impl Label for NoMetrics { } /// An ever-incrementing counter -pub trait Counter: Send + Sync { +pub trait Counter: Send + Sync + Debug { /// Add a value to the counter fn add(&self, amount: usize); } /// A gauge that stores the latest value. -pub trait Gauge: Send + Sync { +pub trait Gauge: Send + Sync + Debug { /// Set the gauge value fn set(&self, amount: usize); @@ -90,7 +92,7 @@ pub trait Gauge: Send + Sync { } /// A histogram which will record a series of points. -pub trait Histogram: Send + Sync { +pub trait Histogram: Send + Sync + Debug { /// Add a point to this histogram. fn add_point(&self, point: f64); } From d8e8249e1d9190b7a3fa0cbebcedf3b1eeefa931 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 20 Sep 2023 15:10:51 -0400 Subject: [PATCH 0124/1393] allow more failures in web catchup test --- testing/tests/catchup.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 9c6bd0bb38..01610f38f2 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -164,7 +164,7 @@ async fn test_catchup_one_node() { ..Default::default() }; // only alow for the view which the catchup node hasn't started to fail - metadata.overall_safety_properties.num_failed_views = 1; + metadata.overall_safety_properties.num_failed_views = 5; metadata .gen_launcher::() From 0a05084c9af499b9b5501cbb95cdf5ae013793db Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Sep 2023 12:20:55 -0700 Subject: [PATCH 0125/1393] add derive(Debug) for TestMetrics --- types/src/traits/metrics.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs index 59b62920f8..4d73e5f67a 100644 --- a/types/src/traits/metrics.rs +++ b/types/src/traits/metrics.rs @@ -111,6 +111,7 @@ mod test { sync::{Arc, Mutex}, }; + #[derive(Debug)] struct TestMetrics { prefix: String, values: Arc>, From b597a15eab09fb7f11b17076cabf90a016849ac7 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 20 Sep 2023 17:49:32 -0400 Subject: [PATCH 0126/1393] chore: remove Committable from 3 more types (#1789) * VoteMap type param Commitment -> COMMITMENT * VoteAccumulator type param commitment instead of Committable * VoteType remove Committable from type param (this was a big one) --- hotshot/src/tasks/mod.rs | 6 ++-- types/src/traits/election.rs | 4 +-- types/src/vote.rs | 58 ++++++++++++++++++++---------------- 3 files changed, 37 insertions(+), 31 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 619fdb6d59..3f342b68f7 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -5,7 +5,7 @@ use crate::{ QuorumCertificate, SequencingQuorumEx, }; use async_compatibility_layer::art::async_sleep; -use commit::Committable; +use commit::{Commitment, Committable}; use futures::FutureExt; use hotshot_task::{ boxed_sync, @@ -66,7 +66,7 @@ pub async fn add_network_message_task< >, COMMITTABLE: Committable + Serialize + Clone, PROPOSAL: ProposalType, - VOTE: VoteType, + VOTE: VoteType>, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange< TYPES, @@ -176,7 +176,7 @@ pub async fn add_network_event_task< >, COMMITTABLE: Committable + Serialize + Clone, PROPOSAL: ProposalType, - VOTE: VoteType, + VOTE: VoteType>, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange< TYPES, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 3d152cc85e..fb1ff72a58 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -165,7 +165,7 @@ where TOKEN: VoteToken, { /// `VoteType` that is used in this certificate - type Vote: VoteType; + type Vote: VoteType>; /// `Accumulator` type to accumulate votes. type VoteAccumulator: Accumulator2; @@ -267,7 +267,7 @@ pub trait ConsensusExchange: Send + Sync { type Proposal: ProposalType; /// A vote on a [`Proposal`](Self::Proposal). // TODO ED Make this equal Certificate vote (if possible?) - type Vote: VoteType; + type Vote: VoteType>; /// A [`SignedCertificate`] attesting to a decision taken by the committee. type Certificate: SignedCertificate + Hash diff --git a/types/src/vote.rs b/types/src/vote.rs index ef53bc4018..c470925b54 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -28,7 +28,7 @@ use std::{ use tracing::error; /// The vote sent by consensus messages. -pub trait VoteType: +pub trait VoteType Deserialize<'a> + Serialize + Clone>: Debug + Clone + 'static + Serialize + for<'a> Deserialize<'a> + Send + Sync + PartialEq { /// Get the view this vote was cast for @@ -38,7 +38,7 @@ pub trait VoteType EncodedSignature; /// Get the data this vote was signed over - fn get_data(&self) -> VoteData>; + fn get_data(&self) -> VoteData; /// Get the vote token of this vote fn get_vote_token(&self) -> TYPES::VoteTokenType; } @@ -196,7 +196,7 @@ pub enum QuorumVote> { Timeout(TimeoutVote), } -impl VoteType for DAVote { +impl VoteType> for DAVote { fn get_view(&self) -> TYPES::Time { self.current_view } @@ -223,7 +223,7 @@ impl DAVote { } } -impl> VoteType +impl> VoteType> for QuorumVote { fn get_view(&self) -> TYPES::Time { @@ -275,7 +275,7 @@ impl> QuorumVote } } -impl VoteType> for ViewSyncVote { +impl VoteType>> for ViewSyncVote { fn get_view(&self) -> TYPES::Time { match self { ViewSyncVote::PreCommit(v) | ViewSyncVote::Commit(v) | ViewSyncVote::Finalize(v) => { @@ -320,7 +320,7 @@ pub trait Accumulator: Sized { pub trait Accumulator2< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType, + VOTE: VoteType>, >: Sized { /// Append 1 vote to the accumulator. If the threshold is not reached, return @@ -338,10 +338,10 @@ pub trait Accumulator2< pub struct DAVoteAccumulator< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType, + VOTE: VoteType>, > { /// Map of all da signatures accumlated so far - pub da_vote_outcomes: VoteMap, + pub da_vote_outcomes: VoteMap, TYPES::VoteTokenType>, /// A quorum's worth of stake, generally 2f + 1 pub success_threshold: NonZeroU64, /// A list of valid signatures for certificate aggregation @@ -355,7 +355,7 @@ pub struct DAVoteAccumulator< impl< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType, + VOTE: VoteType>, > Accumulator2 for DAVoteAccumulator { fn append( @@ -429,14 +429,14 @@ impl< pub struct QuorumVoteAccumulator< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType, + VOTE: VoteType>, > { /// Map of all signatures accumlated so far - pub total_vote_outcomes: VoteMap, + pub total_vote_outcomes: VoteMap, TYPES::VoteTokenType>, /// Map of all yes signatures accumlated so far - pub yes_vote_outcomes: VoteMap, + pub yes_vote_outcomes: VoteMap, TYPES::VoteTokenType>, /// Map of all no signatures accumlated so far - pub no_vote_outcomes: VoteMap, + pub no_vote_outcomes: VoteMap, TYPES::VoteTokenType>, /// A quorum's worth of stake, generally 2f + 1 pub success_threshold: NonZeroU64, @@ -453,7 +453,7 @@ pub struct QuorumVoteAccumulator< impl< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType, + VOTE: VoteType>, > Accumulator2 for QuorumVoteAccumulator { fn append( @@ -560,14 +560,14 @@ impl< pub struct ViewSyncVoteAccumulator< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType, + VOTE: VoteType>, > { /// Map of all pre_commit signatures accumlated so far - pub pre_commit_vote_outcomes: VoteMap, + pub pre_commit_vote_outcomes: VoteMap, TYPES::VoteTokenType>, /// Map of all ommit signatures accumlated so far - pub commit_vote_outcomes: VoteMap, + pub commit_vote_outcomes: VoteMap, TYPES::VoteTokenType>, /// Map of all finalize signatures accumlated so far - pub finalize_vote_outcomes: VoteMap, + pub finalize_vote_outcomes: VoteMap, TYPES::VoteTokenType>, /// A quorum's worth of stake, generally 2f + 1 pub success_threshold: NonZeroU64, @@ -584,7 +584,7 @@ pub struct ViewSyncVoteAccumulator< impl< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType, + VOTE: VoteType>, > Accumulator2 for ViewSyncVoteAccumulator { #[allow(clippy::too_many_lines)] @@ -734,7 +734,7 @@ impl< pub struct AccumulatorPlaceholder< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType, + VOTE: VoteType>, > { /// Phantom data to make compiler happy pub phantom: PhantomData<(TYPES, VOTE, COMMITTABLE)>, @@ -743,7 +743,7 @@ pub struct AccumulatorPlaceholder< impl< TYPES: NodeType, COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType, + VOTE: VoteType>, > Accumulator2 for AccumulatorPlaceholder { fn append( @@ -758,17 +758,23 @@ impl< /// Mapping of commitments to vote tokens by key. // TODO ED Remove this whole token generic -type VoteMap = HashMap< - Commitment, +type VoteMap = HashMap< + COMMITMENT, ( u64, - BTreeMap>, TOKEN)>, + BTreeMap, TOKEN)>, ), >; /// Describe the process of collecting signatures on block or leaf commitment, to form a DAC or QC, /// respectively. -pub struct VoteAccumulator { +/// +/// TODO GG used only in election.rs; move this to there and make it private? +pub struct VoteAccumulator< + TOKEN, + COMMITMENT: Serialize + for<'a> Deserialize<'a> + Clone, + TYPES: NodeType, +> { /// Map of all signatures accumlated so far pub total_vote_outcomes: VoteMap, /// Map of all da signatures accumlated so far @@ -809,7 +815,7 @@ impl ), ), AssembledSignature, - > for VoteAccumulator + > for VoteAccumulator, TYPES> where TOKEN: Clone + VoteToken, { From 486e793ffea4987f0200d40a7e29d2738b703887 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 06:54:08 -0400 Subject: [PATCH 0127/1393] comments --- task-impls/src/consensus.rs | 68 +++++++++++++++++++++---------------- testing/tests/timeout.rs | 13 +++++-- 2 files changed, 49 insertions(+), 32 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a82f2531fe..99b4028b2f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -473,7 +473,10 @@ where ); if let GeneralConsensusMessage::Vote(vote) = message { - debug!("Sending vote to next quorum leader {:?}", vote.get_view() + 1); + debug!( + "Sending vote to next quorum leader {:?}", + vote.get_view() + 1 + ); self.event_stream .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) .await; @@ -586,14 +589,15 @@ where // self.certs.remove(&v); // } self.cur_view = new_view; - - if new_view == TYPES::Time::new(1) { - self.quorum_exchange - .network() - .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) - .await; - } + // TODO ED These injects aren't right + // Commenting out because HotShot doesn't start on 1 now + // if new_view == TYPES::Time::new(1) { + // self.quorum_exchange + // .network() + // .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) + // .await; + // } // Poll the future leader for lookahead let lookahead_view = new_view + LOOK_AHEAD; @@ -610,14 +614,15 @@ where // Start polling for proposals for the new view self.quorum_exchange .network() - .inject_consensus_info(ConsensusIntentEvent::PollForProposal(*self.cur_view)) + .inject_consensus_info(ConsensusIntentEvent::PollForProposal(*self.cur_view + 1)) .await; self.quorum_exchange .network() - .inject_consensus_info(ConsensusIntentEvent::PollForDAC(*self.cur_view)) + .inject_consensus_info(ConsensusIntentEvent::PollForDAC(*self.cur_view + 1)) .await; + // TODO ED I think this poll is still correct (actually want to poll for it in both views, in case this node just doesn't receive the latest proposal, but that is more of a web server issue specifically) if self.quorum_exchange.is_leader(self.cur_view + 1) { debug!("Polling for quorum votes for view {}", *self.cur_view); self.quorum_exchange @@ -676,19 +681,21 @@ where if proposal.data.justify_qc.view_number() != proposal.data.view_number - 1 { // TODO ED Add timeout cert logic if proposal.data.timeout_certificate.is_none() { - error!("Proposal needed a timeout cert but didn't have one {:?}", proposal.data.clone()); - return - } - else { + error!( + "Proposal needed a timeout cert but didn't have one {:?}", + proposal.data.clone() + ); + return; + } else { error!("Proposal for view {} had timeout certificate", *view); } // TODO ED Check timeout cert validity } - // TODO ED This needs to be moved further down so we only update the view after fully validating the qc. + // TODO ED This needs to be moved further down so we only update the view after fully validating the qc. self.update_view(view).await; - // TODO ED How does this play in with the timeout cert? + // TODO ED How does this play in with the timeout cert? self.current_proposal = Some(proposal.data.clone()); let vote_token = self.quorum_exchange.make_vote_token(view); @@ -701,14 +708,10 @@ where debug!("We were not chosen for consensus committee on {:?}", view); } Ok(Some(vote_token)) => { - - debug!("We were chosen for consensus committee on {:?}", view); let consensus = self.consensus.upgradable_read().await; let message; - - // Construct the leaf. let justify_qc = proposal.data.justify_qc; let parent = if justify_qc.is_genesis() { @@ -978,7 +981,6 @@ where } self.current_proposal = None; - for v in (*self.cur_view)..=(*view) { let time = TYPES::Time::new(v); self.certs.remove(&time); @@ -1228,12 +1230,15 @@ where // TODO ED Clean this up, get rid of clones if self - .publish_proposal_if_able(self.consensus.read().await.high_qc.clone(), qc.clone().view_number + 1, Some(qc.clone())) + .publish_proposal_if_able( + self.consensus.read().await.high_qc.clone(), + qc.clone().view_number + 1, + Some(qc.clone()), + ) .await { // self.update_view(qc.view_number + 1).await; - } - else { + } else { error!("Wasn't able to publish proposal"); } } @@ -1284,7 +1289,6 @@ where if self.vote_if_able().await { // self.update_view(view + 1).await; self.current_proposal = None; - } } SequencingHotShotEvent::VidCertRecv(cert) => { @@ -1297,7 +1301,6 @@ where if self.vote_if_able().await { // self.update_view(view + 1).await; self.current_proposal = None; - } } SequencingHotShotEvent::ViewChange(new_view) => { @@ -1342,6 +1345,10 @@ where // } } SequencingHotShotEvent::Timeout(view) => { + // TODO ED This is not an ideal check, we should have the timeout task receive view change events and then cancel itself + if self.cur_view >= view { + return; + } let vote_token = self.timeout_exchange.make_vote_token(view); match vote_token { @@ -1388,7 +1395,7 @@ where &self, _qc: QuorumCertificate, view: TYPES::Time, - timeout_certificate: Option> + timeout_certificate: Option>, ) -> bool { if !self.quorum_exchange.is_leader(view) { error!( @@ -1477,7 +1484,7 @@ where view_number: leaf.view_number, height: leaf.height, justify_qc: consensus.high_qc.clone(), - timeout_certificate: timeout_certificate.or_else(|| { None }), + timeout_certificate: timeout_certificate.or_else(|| None), proposer_id: leaf.proposer_id, dac: None, }; @@ -1486,7 +1493,10 @@ where data: proposal, signature, }; - debug!("Sending proposal for view {:?} \n {:?}", leaf.view_number, ""); + debug!( + "Sending proposal for view {:?} \n {:?}", + leaf.view_number, "" + ); self.event_stream .publish(SequencingHotShotEvent::QuorumProposalSend( diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index adbb58fe11..7ac21061ce 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -9,7 +9,9 @@ async fn test_timeout() { use std::time::Duration; use hotshot_testing::node_types::SequencingLibp2pImpl; + use hotshot_testing::node_types::SequencingWebImpl; + use hotshot_testing::overall_safety_task::OverallSafetyPropertiesDescription; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, node_types::{SequencingMemoryImpl, SequencingTestTypes}, @@ -37,6 +39,11 @@ async fn test_timeout() { metadata.timing_data = timing_data; + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + num_successful_views: 50, + ..Default::default() + }; + metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(Duration::from_millis(500), dead_nodes)], }; @@ -46,13 +53,13 @@ async fn test_timeout() { metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(30000), + duration: Duration::from_secs(30), }, ); - // TODO ED Test with memory network once issue is resolved. + // TODO ED Test with memory network once issue is resolved. metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; From 82e6294a5d2b459eabd554fe448c2552d3d0f755 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 08:20:52 -0400 Subject: [PATCH 0128/1393] Fix test_consensus_task test after timeout logic update --- task-impls/src/harness.rs | 4 ++++ testing/tests/consensus_task.rs | 27 +++++++++++++++++++-------- testing/tests/timeout.rs | 3 +-- 3 files changed, 24 insertions(+), 10 deletions(-) diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 136093bc3e..4978b27492 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -11,6 +11,7 @@ use hotshot_task::{ use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; use snafu::Snafu; use std::{collections::HashMap, future::Future, sync::Arc}; +use tracing::error; /// The state for the test harness task. Keeps track of which events and how many we expect to get pub struct TestHarnessState> { @@ -106,6 +107,9 @@ pub fn handle_event>( *num_expected -= 1; } + // TODO ED Remove + error!("Event is {:?}", event); + if state.expected_output.is_empty() { return (Some(HotShotTaskCompleted::ShutDown), state); } diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index ec1bd7b05c..52693881b9 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -85,9 +85,11 @@ async fn build_vote( tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_consensus_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; + use hotshot_types::certificate::QuorumCertificate; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -98,19 +100,28 @@ async fn test_consensus_task() { let mut input = Vec::new(); let mut output = HashMap::new(); - input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); + // Trigger a proposal to send by creating a new QC. Then recieve that proposal and update view based on the valid QC in the proposal + let qc = + QuorumCertificate::>::genesis(); + let proposal = build_quorum_proposal(&handle, &private_key, 1).await; + + input.push(SequencingHotShotEvent::QCFormed(either::Left(qc.clone()))); + input.push(SequencingHotShotEvent::QuorumProposalRecv( + proposal.clone(), + public_key, + )); input.push(SequencingHotShotEvent::Shutdown); + output.insert(SequencingHotShotEvent::QCFormed(either::Left(qc)), 1); output.insert( - SequencingHotShotEvent::QuorumProposalSend( - build_quorum_proposal(&handle, &private_key, 1).await, - public_key, - ), + SequencingHotShotEvent::QuorumProposalSend(proposal.clone(), public_key), 1, ); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 2); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 2); + output.insert( + SequencingHotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), + 1, + ); + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); output.insert(SequencingHotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| { diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 7ac21061ce..954588958c 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -4,7 +4,6 @@ tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_timeout() { use std::time::Duration; @@ -40,7 +39,7 @@ async fn test_timeout() { metadata.timing_data = timing_data; metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_successful_views: 50, + num_successful_views: 25, ..Default::default() }; From 8498220a5bc9ca6bb16387929a466d688c9b322a Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 10:55:41 -0400 Subject: [PATCH 0129/1393] test_consensus_vote working --- testing/tests/consensus_task.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 52693881b9..9e722088ef 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -146,20 +146,18 @@ async fn test_consensus_vote() { let handle = build_system_handle(2).await.0; let (private_key, public_key) = key_pair_for_id(1); + let (private_key_2, public_key_2) = key_pair_for_id(2); let mut input = Vec::new(); let mut output = HashMap::new(); let proposal = build_quorum_proposal(&handle, &private_key, 1).await; - input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); + // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader input.push(SequencingHotShotEvent::QuorumProposalRecv( proposal.clone(), public_key, )); - - input.push(SequencingHotShotEvent::Shutdown); - output.insert( SequencingHotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), 1, @@ -168,10 +166,14 @@ async fn test_consensus_vote() { if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal, ViewNumber::new(1)).await { - output.insert(SequencingHotShotEvent::QuorumVoteSend(vote), 1); + output.insert(SequencingHotShotEvent::QuorumVoteSend(vote.clone()), 1); + input.push(SequencingHotShotEvent::QuorumVoteRecv(vote.clone())); + output.insert(SequencingHotShotEvent::QuorumVoteRecv(vote), 1); } - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 2); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); + + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); + + input.push(SequencingHotShotEvent::Shutdown); output.insert(SequencingHotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| { From 55de069c7ae0abb6ffa5f4a3bad0c24a6f347b5c Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 13:31:08 -0400 Subject: [PATCH 0130/1393] test_basic passes --- task-impls/src/view_sync.rs | 1 - testing/tests/view_sync_task.rs | 31 ++++++++++++++++++------------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 61eef67d64..51476a4a92 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -502,7 +502,6 @@ where // TODO ED Make this a configurable variable if self.num_timeouts_tracked > 2 { - panic!(); // Start polling for view sync certificates self.exchange .network() diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 25cbe55b9a..919303d36a 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -34,7 +34,7 @@ async fn test_view_sync_task() { async_compatibility_layer::logging::setup_backtrace(); // Build the API for node 3. - let handle = build_system_handle(3).await.0; + let handle = build_system_handle(5).await.0; let api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { inner: handle.hotshot.inner.clone(), @@ -42,19 +42,19 @@ async fn test_view_sync_task() { let view_sync_exchange = api.inner.exchanges.view_sync_exchange().clone(); let relay_pub_key = api.public_key().to_bytes(); let vote_token = view_sync_exchange - .make_vote_token(ViewNumber::new(3)) + .make_vote_token(ViewNumber::new(5)) .unwrap_or_else(|_| panic!("Error making vote token")) .unwrap_or_else(|| panic!("Not chosen for the committee")); let vote_data_internal: ViewSyncData = ViewSyncData { relay: relay_pub_key.clone(), - round: ViewNumber::new(3), + round: ViewNumber::new(5), }; let vote_data_internal_commitment = vote_data_internal.commit(); let signature = view_sync_exchange.sign_precommit_message(vote_data_internal_commitment); let vote = ViewSyncVote::PreCommit(ViewSyncVoteInternal { relay_pub_key, relay: 0, - round: ViewNumber::new(3), + round: ViewNumber::new(5), signature, vote_token, vote_data: VoteData::ViewSyncPreCommit(vote_data_internal_commitment), @@ -64,23 +64,28 @@ async fn test_view_sync_task() { let mut input = Vec::new(); let mut output = HashMap::new(); - input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(SequencingHotShotEvent::Timeout(ViewNumber::new(1))); input.push(SequencingHotShotEvent::Timeout(ViewNumber::new(2))); + input.push(SequencingHotShotEvent::Timeout(ViewNumber::new(3))); + input.push(SequencingHotShotEvent::Timeout(ViewNumber::new(4))); + input.push(SequencingHotShotEvent::Shutdown); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); - output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(1)), 1); output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(2)), 1); + output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(3)), 1); + output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(4)), 1); + // 2 `Timeout` events will trigger a replica task to handle a `ViewSyncTrigger` event, which // will then publish a `ViewSyncVoteSend` event. output.insert(SequencingHotShotEvent::ViewSyncVoteSend(vote.clone()), 1); - output.insert( - SequencingHotShotEvent::ViewSyncTimeout(ViewNumber::new(3), 0, ViewSyncPhase::None), - 1, - ); + // output.insert( + // SequencingHotShotEvent::ViewSyncTimeout(ViewNumber::new(5), 0, ViewSyncPhase::None), + // 1, + // ); // Triggered by the `Timeout` events. - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); + // output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(3)), 1); + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(4)), 1); + output.insert(SequencingHotShotEvent::Shutdown, 1); let build_fn = From 32213b309d7107c8988e0b53fcd42718591bf174 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Thu, 21 Sep 2023 13:45:05 -0400 Subject: [PATCH 0131/1393] chore: Remove Committable from more types (#1792) * remove Committable from Accumulator2 * remove Committable from AccumulatorPlaceholder * remove Committable from DAVoteAccumulator type param * remove Committable from QuorumVoteAccumulator type param * remove Committable from ViewSyncVoteAccumulator type param * remove Committable from SignedCertificate type param (that's a big one) --- task-impls/src/consensus.rs | 4 +-- task-impls/src/da.rs | 4 +-- task-impls/src/view_sync.rs | 4 +-- types/src/certificate.rs | 14 +++++---- types/src/traits/election.rs | 20 ++++++------ types/src/vote.rs | 61 ++++++++++++++++++------------------ 6 files changed, 55 insertions(+), 52 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index dccf7919c3..ef4694a282 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -4,7 +4,7 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use bitvec::prelude::*; -use commit::Committable; +use commit::{Commitment, Committable}; use core::time::Duration; use either::{Either, Left, Right}; use futures::FutureExt; @@ -151,7 +151,7 @@ pub struct VoteCollectionTaskState< TYPES, TYPES::Time, TYPES::VoteTokenType, - SequencingLeaf, + Commitment>, >>::VoteAccumulator, QuorumCertificate>, >, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index ff048ef469..9b1657324f 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -3,7 +3,7 @@ use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use bitvec::prelude::*; -use commit::Committable; +use commit::{Commitment, Committable}; use either::{Either, Left, Right}; use futures::FutureExt; use hotshot_task::{ @@ -103,7 +103,7 @@ pub struct DAVoteCollectionTaskState< TYPES, TYPES::Time, TYPES::VoteTokenType, - TYPES::BlockType, + Commitment, >>::VoteAccumulator, DACertificate, >, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index f92752f487..eaac060975 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1,7 +1,7 @@ #![allow(clippy::module_name_repetitions)] use crate::events::SequencingHotShotEvent; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use commit::Committable; +use commit::{Commitment, Committable}; use either::Either::{self, Left, Right}; use futures::FutureExt; use hotshot_task::{ @@ -228,7 +228,7 @@ pub struct ViewSyncRelayTaskState< TYPES, TYPES::Time, TYPES::VoteTokenType, - ViewSyncData, + Commitment>, >>::VoteAccumulator, ViewSyncCertificate, >, diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 5309729eb7..7018490e09 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -157,11 +157,11 @@ pub struct VoteMetaData> - SignedCertificate + SignedCertificate> for QuorumCertificate { type Vote = QuorumVote; - type VoteAccumulator = QuorumVoteAccumulator; + type VoteAccumulator = QuorumVoteAccumulator, Self::Vote>; fn from_signatures_and_commitment( signatures: AssembledSignature, @@ -234,11 +234,12 @@ impl> Committable } } -impl SignedCertificate +impl + SignedCertificate> for DACertificate { type Vote = DAVote; - type VoteAccumulator = DAVoteAccumulator; + type VoteAccumulator = DAVoteAccumulator, Self::Vote>; fn from_signatures_and_commitment( signatures: AssembledSignature, @@ -323,11 +324,12 @@ impl Committable for ViewSyncCertificate { } impl - SignedCertificate> + SignedCertificate>> for ViewSyncCertificate { type Vote = ViewSyncVote; - type VoteAccumulator = ViewSyncVoteAccumulator, Self::Vote>; + type VoteAccumulator = + ViewSyncVoteAccumulator>, Self::Vote>; /// Build a QC from the threshold signature and commitment fn from_signatures_and_commitment( signatures: AssembledSignature, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index fb1ff72a58..7ccde773af 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -158,17 +158,17 @@ pub trait ElectionConfig: } /// A certificate of some property which has been signed by a quroum of nodes. -pub trait SignedCertificate +pub trait SignedCertificate where Self: Send + Sync + Clone + Serialize + for<'a> Deserialize<'a>, - COMMITTABLE: Committable + Serialize + Clone, + COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, TOKEN: VoteToken, { /// `VoteType` that is used in this certificate - type Vote: VoteType>; + type Vote: VoteType; /// `Accumulator` type to accumulate votes. - type VoteAccumulator: Accumulator2; + type VoteAccumulator: Accumulator2; /// Build a QC from the threshold signature and commitment // TODO ED Rename this function and rework this function parameters @@ -189,10 +189,10 @@ where // TODO ED Make an issue for this /// Get the leaf commitment. - fn leaf_commitment(&self) -> Commitment; + fn leaf_commitment(&self) -> COMMITMENT; /// Set the leaf commitment. - fn set_leaf_commitment(&mut self, commitment: Commitment); + fn set_leaf_commitment(&mut self, commitment: COMMITMENT); /// Get whether the certificate is for the genesis block. fn is_genesis(&self) -> bool; @@ -269,7 +269,7 @@ pub trait ConsensusExchange: Send + Sync { // TODO ED Make this equal Certificate vote (if possible?) type Vote: VoteType>; /// A [`SignedCertificate`] attesting to a decision taken by the committee. - type Certificate: SignedCertificate + type Certificate: SignedCertificate> + Hash + Eq; /// The committee eligible to make decisions. @@ -468,13 +468,13 @@ pub trait ConsensusExchange: Send + Sync { TYPES, TYPES::Time, TYPES::VoteTokenType, - Self::Commitment, + Commitment, >>::VoteAccumulator, vote: &<>::Certificate as SignedCertificate< TYPES, TYPES::Time, TYPES::VoteTokenType, - Self::Commitment, + Commitment, >>::Vote, _commit: &Commitment, ) -> Either< @@ -482,7 +482,7 @@ pub trait ConsensusExchange: Send + Sync { TYPES, TYPES::Time, TYPES::VoteTokenType, - Self::Commitment, + Commitment, >>::VoteAccumulator, Self::Certificate, > { diff --git a/types/src/vote.rs b/types/src/vote.rs index c470925b54..c27225e4af 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -22,6 +22,7 @@ use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, + hash::Hash, marker::PhantomData, num::NonZeroU64, }; @@ -319,8 +320,8 @@ pub trait Accumulator: Sized { /// Accumulator trait used to accumulate votes into an `AssembledSignature` pub trait Accumulator2< TYPES: NodeType, - COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType>, + COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + VOTE: VoteType, >: Sized { /// Append 1 vote to the accumulator. If the threshold is not reached, return @@ -337,11 +338,11 @@ pub trait Accumulator2< /// Accumulates DA votes pub struct DAVoteAccumulator< TYPES: NodeType, - COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType>, + COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + VOTE: VoteType, > { /// Map of all da signatures accumlated so far - pub da_vote_outcomes: VoteMap, TYPES::VoteTokenType>, + pub da_vote_outcomes: VoteMap, /// A quorum's worth of stake, generally 2f + 1 pub success_threshold: NonZeroU64, /// A list of valid signatures for certificate aggregation @@ -354,9 +355,9 @@ pub struct DAVoteAccumulator< impl< TYPES: NodeType, - COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType>, - > Accumulator2 for DAVoteAccumulator + COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, + VOTE: VoteType, + > Accumulator2 for DAVoteAccumulator { fn append( mut self, @@ -428,15 +429,15 @@ impl< /// Accumulate quorum votes pub struct QuorumVoteAccumulator< TYPES: NodeType, - COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType>, + COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + VOTE: VoteType, > { /// Map of all signatures accumlated so far - pub total_vote_outcomes: VoteMap, TYPES::VoteTokenType>, + pub total_vote_outcomes: VoteMap, /// Map of all yes signatures accumlated so far - pub yes_vote_outcomes: VoteMap, TYPES::VoteTokenType>, + pub yes_vote_outcomes: VoteMap, /// Map of all no signatures accumlated so far - pub no_vote_outcomes: VoteMap, TYPES::VoteTokenType>, + pub no_vote_outcomes: VoteMap, /// A quorum's worth of stake, generally 2f + 1 pub success_threshold: NonZeroU64, @@ -452,9 +453,9 @@ pub struct QuorumVoteAccumulator< impl< TYPES: NodeType, - COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType>, - > Accumulator2 for QuorumVoteAccumulator + COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, + VOTE: VoteType, + > Accumulator2 for QuorumVoteAccumulator { fn append( mut self, @@ -559,15 +560,15 @@ impl< /// Accumulates view sync votes pub struct ViewSyncVoteAccumulator< TYPES: NodeType, - COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType>, + COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + VOTE: VoteType, > { /// Map of all pre_commit signatures accumlated so far - pub pre_commit_vote_outcomes: VoteMap, TYPES::VoteTokenType>, + pub pre_commit_vote_outcomes: VoteMap, /// Map of all ommit signatures accumlated so far - pub commit_vote_outcomes: VoteMap, TYPES::VoteTokenType>, + pub commit_vote_outcomes: VoteMap, /// Map of all finalize signatures accumlated so far - pub finalize_vote_outcomes: VoteMap, TYPES::VoteTokenType>, + pub finalize_vote_outcomes: VoteMap, /// A quorum's worth of stake, generally 2f + 1 pub success_threshold: NonZeroU64, @@ -583,9 +584,9 @@ pub struct ViewSyncVoteAccumulator< impl< TYPES: NodeType, - COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType>, - > Accumulator2 for ViewSyncVoteAccumulator + COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, + VOTE: VoteType, + > Accumulator2 for ViewSyncVoteAccumulator { #[allow(clippy::too_many_lines)] fn append( @@ -733,18 +734,18 @@ impl< /// Placeholder accumulator; will be replaced by accumulator for each certificate type pub struct AccumulatorPlaceholder< TYPES: NodeType, - COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType>, + COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + VOTE: VoteType, > { /// Phantom data to make compiler happy - pub phantom: PhantomData<(TYPES, VOTE, COMMITTABLE)>, + pub phantom: PhantomData<(TYPES, VOTE, COMMITMENT)>, } impl< TYPES: NodeType, - COMMITTABLE: Committable + Serialize + Clone, - VOTE: VoteType>, - > Accumulator2 for AccumulatorPlaceholder + COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + VOTE: VoteType, + > Accumulator2 for AccumulatorPlaceholder { fn append( self, From 4d846879b314bfa415e1c45acd39485dd08a4bf2 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 15:26:36 -0400 Subject: [PATCH 0132/1393] All tests passing? --- hotshot/src/tasks/mod.rs | 11 +++++++++++ .../traits/networking/web_server_network.rs | 2 ++ task-impls/src/consensus.rs | 18 +++++++++--------- task-impls/src/da.rs | 1 + types/src/message.rs | 2 +- web_server/src/lib.rs | 3 ++- 6 files changed, 26 insertions(+), 11 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 4dc66b5539..26b7effbef 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -27,6 +27,7 @@ use hotshot_task_impls::{ view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; use hotshot_types::certificate::TimeoutCertificate; +use hotshot_types::traits::network::ConsensusIntentEvent; use hotshot_types::traits::node_implementation::SequencingTimeoutEx; use hotshot_types::{ certificate::ViewSyncCertificate, @@ -308,6 +309,16 @@ where id: handle.hotshot.inner.id, qc: None, }; + consensus_state + .quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) + .await; + consensus_state + .quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForProposal(1)) + .await; let filter = FilterEvent(Arc::new(consensus_event_filter)); let consensus_name = "Consensus Task"; let consensus_event_handler = HandleEvent(Arc::new( diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 72cd62cc8d..7b03aae947 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -147,6 +147,7 @@ impl Inner { message_purpose: MessagePurpose, view_number: u64, ) -> Result<(), NetworkError> { + error!("Polling for view {}", view_number); let mut vote_index = 0; let mut tx_index = 0; @@ -206,6 +207,7 @@ impl Inner { error!("We should not receive transactions in this function"); } MessagePurpose::Proposal => { + error!("Received proposal"); // Only pushing the first proposal since we will soon only be allowing 1 proposal per view self.broadcast_poll_queue .write() diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 99b4028b2f..048b69c582 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1310,10 +1310,10 @@ where // update the view in state to the one in the message // Publish a view change event to the application - // if !self.update_view(new_view).await { - // debug!("view not updated"); - // return; - // } + if !self.update_view(new_view).await { + debug!("view not updated"); + return; + } self.output_event_stream .publish(Event { @@ -1346,7 +1346,7 @@ where } SequencingHotShotEvent::Timeout(view) => { // TODO ED This is not an ideal check, we should have the timeout task receive view change events and then cancel itself - if self.cur_view >= view { + if self.cur_view > view { return; } let vote_token = self.timeout_exchange.make_vote_token(view); @@ -1373,10 +1373,10 @@ where } } - self.quorum_exchange - .network() - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) - .await; + // self.quorum_exchange + // .network() + // .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) + // .await; debug!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 782a268a36..3ea39ce63b 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -588,6 +588,7 @@ where .get_committee(self.cur_view + 1) .contains(self.committee_exchange.public_key()); + // TODO ED Is this right? if is_da { debug!("Polling for DA proposals for view {}", *self.cur_view + 1); self.committee_exchange diff --git a/types/src/message.rs b/types/src/message.rs index 28daf64b43..f08cb54dfb 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -455,7 +455,7 @@ impl< GeneralConsensusMessage::InternalTrigger(_) => MessagePurpose::Internal, GeneralConsensusMessage::ViewSyncVote(_) => MessagePurpose::ViewSyncVote, GeneralConsensusMessage::ViewSyncCertificate(_) => MessagePurpose::ViewSyncProposal, - GeneralConsensusMessage::TimeoutVote(_) => todo!(), + GeneralConsensusMessage::TimeoutVote(_) => MessagePurpose::Vote, }, Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 2bd746e1cd..e5439dcca4 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -5,6 +5,7 @@ use async_compatibility_layer::channel::OneShotReceiver; use async_lock::RwLock; use clap::Args; use futures::FutureExt; +use tracing::error; use hotshot_types::traits::signature_key::{EncodedPublicKey, SignatureKey}; use rand::{distributions::Alphanumeric, rngs::StdRng, thread_rng, Rng, SeedableRng}; @@ -322,7 +323,7 @@ impl WebServerDataSource for WebServerState { } /// Stores a received proposal in the `WebServerState` fn post_proposal(&mut self, view_number: u64, mut proposal: Vec) -> Result<(), Error> { - debug!("Received proposal for view {}", view_number); + error!("Received proposal for view {}", view_number); if view_number > self.recent_proposal { self.recent_proposal = view_number; From 32120bfc8a4ba4d99e231c6fea90f8ac3516f7f4 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 20:00:46 -0400 Subject: [PATCH 0133/1393] Tests pass? --- task-impls/src/da.rs | 3 +++ types/src/traits/election.rs | 9 ++++++++- types/src/vote.rs | 2 +- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 82abdfaeb6..53d0a57501 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -161,7 +161,10 @@ where &vote, &vote.block_commitment, ) { + + Left(new_accumulator) => { + error!("Not enough DA votes yet"); state.accumulator = either::Left(new_accumulator); } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 334500fa24..c8745652e2 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -434,6 +434,9 @@ pub trait ConsensusExchange: Send + Sync { vote_token: &Checked, ) -> bool { let is_valid_signature = key.validate(encoded_signature, data.get_commit().as_ref()); + if !is_valid_signature { + panic!() + } let valid_vote_token = self .membership() .validate_vote_token(key.clone(), vote_token.clone()); @@ -446,7 +449,11 @@ pub trait ConsensusExchange: Send + Sync { Ok(Checked::Inval(_) | Checked::Unchecked(_)) => false, }; - is_valid_signature && is_valid_vote_token + let result = is_valid_signature && is_valid_vote_token; + if !result { + panic!() + } + result } #[doc(hidden)] diff --git a/types/src/vote.rs b/types/src/vote.rs index 8a4e9663df..46e93c3e12 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -435,7 +435,7 @@ impl< stake_table_entries: Vec<::StakeTableEntry>, ) -> Either> { - let VoteData::Timeout(vote_commitment) = vote.get_data() else { + let VoteData::DA(vote_commitment) = vote.get_data() else { return Either::Left(self); }; From f64a03f2d1e9d6d92b1c8c01cbeaf21855aeddf9 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 21 Sep 2023 17:31:09 -0700 Subject: [PATCH 0134/1393] Save before sync with main --- hotshot/Cargo.toml | 1 - hotshot/examples/infra/modDA.rs | 9 ++-- hotshot/src/demo.rs | 10 +--- hotshot/src/lib.rs | 4 +- hotshot/src/tasks/mod.rs | 6 +-- task-impls/src/consensus.rs | 17 ++++--- task-impls/src/da.rs | 35 +------------ task-impls/src/transactions.rs | 76 ++++++++++++++++++++++++---- testing/src/node_types.rs | 6 +-- testing/src/task_helpers.rs | 12 ++--- testing/tests/atomic_storage.rs | 70 ------------------------- testing/tests/da_task.rs | 28 +++++----- testing/tests/network_task.rs | 22 ++++---- types/Cargo.toml | 1 + {hotshot => types}/src/block_impl.rs | 73 +++++++++++++++++--------- types/src/data.rs | 2 - types/src/lib.rs | 1 + types/src/traits/block_contents.rs | 35 +++---------- types/src/traits/state.rs | 12 ----- 19 files changed, 184 insertions(+), 236 deletions(-) rename {hotshot => types}/src/block_impl.rs (57%) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 671519ec84..d7816208f1 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -108,7 +108,6 @@ libp2p-networking = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true, features = ["rc"] } -sha3 = "^0.10" snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index ae0f0e40f5..82cc12bf68 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -20,8 +20,8 @@ use hotshot_orchestrator::{ config::{NetworkConfig, WebServerConfig}, }; use hotshot_task::task::FilterEvent; -use hotshot_types::HotShotConfig; use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, data::{QuorumProposal, SequencingLeaf, TestableLeaf}, event::{Event, EventType}, @@ -37,6 +37,7 @@ use hotshot_types::{ }, state::{ConsensusTime, TestableBlock, TestableState}, }, + HotShotConfig, }; use libp2p_identity::{ ed25519::{self, SecretKey}, @@ -407,7 +408,7 @@ pub struct WebServerDARun< #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType, MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, @@ -551,7 +552,7 @@ pub struct Libp2pDARun, MEMBERSHIP #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType, MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, @@ -765,7 +766,7 @@ where /// Main entry point for validators pub async fn main_entry_point< - TYPES: NodeType, + TYPES: NodeType, MEMBERSHIP: Membership + Debug, DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index feaed2fe6b..976cea433d 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -5,15 +5,13 @@ //! //! These implementations are useful in examples and integration testing, but are not suitable for //! production use. -use crate::{ - block_impl::{BlockPayloadError, VIDBlockPayload, VIDTransaction}, - traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}, -}; +use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; use commit::{Commitment, Committable}; use derivative::Derivative; use either::Either; use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ + block_impl::{BlockPayloadError, VIDBlockPayload, VIDTransaction}, certificate::{AssembledSignature, QuorumCertificate}, data::{ fake_commitment, genesis_proposer_id, random_commitment, LeafType, SequencingLeaf, @@ -72,10 +70,6 @@ impl State for SDemoState { type Time = ViewNumber; - fn next_block(_state: Option) -> Self::BlockType { - VIDBlockPayload(Vec::new()) - } - fn validate_block(&self, _block: &Self::BlockType, view_number: &Self::Time) -> bool { if view_number == &ViewNumber::genesis() { &self.view_number == view_number diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 117153967e..fe50fac759 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -19,7 +19,6 @@ #[cfg(feature = "docs")] pub mod documentation; -pub mod block_impl; /// Contains structures and functions for committee election pub mod certificate; #[cfg(feature = "demo")] @@ -56,6 +55,7 @@ use hotshot_task::{ use hotshot_task_impls::{events::SequencingHotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction}, certificate::{DACertificate, ViewSyncCertificate}, consensus::{BlockStore, Consensus, ConsensusMetrics, View, ViewInner, ViewQueue}, data::{DAProposal, DeltasType, LeafType, QuorumProposal, SequencingLeaf}, @@ -649,7 +649,7 @@ pub trait HotShotType> { #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 619fdb6d59..02aed896af 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -27,6 +27,7 @@ use hotshot_task_impls::{ view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, data::{ProposalType, QuorumProposal, SequencingLeaf}, event::Event, @@ -38,7 +39,6 @@ use hotshot_types::{ CommitteeEx, ExchangesType, NodeImplementation, NodeType, ViewSyncEx, }, state::ConsensusTime, - BlockPayload, }, vote::{ViewSyncData, VoteType}, }; @@ -284,7 +284,7 @@ where consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), - block: TYPES::BlockType::new(), + block: None, quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), api: c_api.clone(), committee_exchange: c_api.inner.exchanges.committee_exchange().clone().into(), @@ -412,7 +412,7 @@ where /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_transaction_task< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index dccf7919c3..7aa93de10e 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -83,8 +83,8 @@ pub struct SequencingConsensusTaskState< /// View number this view is executing in. pub cur_view: TYPES::Time, - /// Current block submitted to DA - pub block: TYPES::BlockType, + /// Current block submitted to DA, if any. + pub block: Option, /// the quorum exchange pub quorum_exchange: Arc>, @@ -1078,7 +1078,7 @@ where } SequencingHotShotEvent::SendDABlockData(block) => { // ED TODO Should make sure this is actually the most recent block - self.block = block; + self.block = Some(block); } _ => {} } @@ -1150,10 +1150,13 @@ where // TODO do some sort of sanity check on the view number that it matches decided } - let block_commitment = self.block.commit(); - if block_commitment == TYPES::BlockType::new().commit() { - debug!("BlockPayload is generic block! {:?}", self.cur_view); - } + let block_commitment = match &self.block { + Some(block) => block.commit(), + None => { + debug!("No block yet."); + return false; + } + }; let leaf = SequencingLeaf { view_number: view, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index ff048ef469..61b95267a7 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -18,7 +18,7 @@ use hotshot_types::vote::VoteType; use hotshot_types::{ certificate::DACertificate, consensus::{Consensus, View}, - data::{DAProposal, ProposalType, SequencingLeaf, VidDisperse, VidScheme, VidSchemeTrait}, + data::{DAProposal, ProposalType, SequencingLeaf}, message::{Message, Proposal, SequencingMessage}, traits::{ consensus_api::SequencingConsensusApi, @@ -658,39 +658,6 @@ where self.committee_exchange.public_key().clone(), )) .await; - - debug!("Prepare VID shares"); - { - /// TODO https://github.com/EspressoSystems/HotShot/issues/1693 - const NUM_STORAGE_NODES: usize = 10; - /// TODO https://github.com/EspressoSystems/HotShot/issues/1693 - const NUM_CHUNKS: usize = 5; - - // TODO https://github.com/EspressoSystems/HotShot/issues/1686 - let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); - - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - let message_bytes = bincode::serialize(&message).unwrap(); - let vid_disperse = vid.disperse(&message_bytes).unwrap(); - // TODO for now reuse the same block commitment and signature as DA committee - // https://github.com/EspressoSystems/jellyfish/issues/369 - - self.event_stream - .publish(SequencingHotShotEvent::VidDisperseSend( - Proposal { - data: VidDisperse { - view_number: view, - commitment: block.commit(), // TODO GG should be vid_disperse.commit but that's a big change - shares: vid_disperse.shares, - common: vid_disperse.common, - }, - signature: message.signature, - }, - // TODO don't send to committee, send to quorum (consensus.rs) https://github.com/EspressoSystems/HotShot/issues/1696 - self.committee_exchange.public_key().clone(), - )) - .await; - } } SequencingHotShotEvent::Timeout(view) => { diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 8609708653..ff832a272c 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -13,15 +13,17 @@ use hotshot_task::{ task_impls::HSTWithEvent, }; use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction}, certificate::DACertificate, consensus::Consensus, - data::SequencingLeaf, + data::{SequencingLeaf, VidScheme, VidSchemeTrait}, message::{Message, SequencingMessage}, traits::{ + block_contents::Transaction, consensus_api::SequencingConsensusApi, election::ConsensusExchange, node_implementation::{CommitteeEx, NodeImplementation, NodeType}, - BlockPayload, State, + BlockPayload, }, }; use hotshot_utils::bincode::bincode_opts; @@ -72,7 +74,7 @@ pub struct TransactionTaskState< } impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -220,18 +222,52 @@ where drop(consensus); - let mut block = ::StateType::next_block(None); + let block; let txns = self.wait_for_transactions(parent_leaf).await?; - for txn in txns { - if let Ok(new_block) = block.add_transaction_raw(&txn) { - block = new_block; - continue; + debug!("Prepare VID shares"); + if txns.len() > 0 { + /// TODO https://github.com/EspressoSystems/HotShot/issues/1693 + const NUM_STORAGE_NODES: usize = 10; + /// TODO https://github.com/EspressoSystems/HotShot/issues/1693 + const NUM_CHUNKS: usize = 5; + + // TODO https://github.com/EspressoSystems/HotShot/issues/1686 + let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); + + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + // TODO https://github.com/EspressoSystems/jellyfish/issues/375 + let mut txns_flatten = Vec::new(); + for txn in &txns { + txns_flatten.extend(txn.bytes()); } + tracing::error!("here txn task {:?}", txns); + let vid_disperse = vid.disperse(&txns_flatten).unwrap(); + block = VIDBlockPayload::new(txns, vid_disperse.commit); + + // TODO Commenting out the following code since we need to update the proposal, + // signature, and exchange for VID dispersal. They were copy-pasted from DA + // code. + // self.event_stream + // .publish(SequencingHotShotEvent::VidDisperseSend( + // Proposal { + // data: VidDisperse { + // view_number: view + 1, + // commitment: block.commit(), + // shares: vid_disperse.shares, + // common: vid_disperse.common, + // }, + // signature: message.signature, + // }, + // // TODO don't send to committee, send to quorum (consensus.rs) https://github.com/EspressoSystems/HotShot/issues/1696 + // self.committee_exchange.public_key().clone(), + // )) + // .await; + + self.event_stream + .publish(SequencingHotShotEvent::BlockReady(block, view + 1)) + .await; } - self.event_stream - .publish(SequencingHotShotEvent::BlockReady(block, view + 1)) - .await; return None; } SequencingHotShotEvent::Shutdown => { @@ -241,7 +277,25 @@ where } None } +} +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > TransactionTaskState +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = TYPES::BlockType, + >, +{ #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] async fn wait_for_transactions( &self, diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index f4b2f9db1a..4ffd74e2bf 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -1,7 +1,4 @@ -use hotshot::{ - block_impl::{VIDBlockPayload, VIDTransaction}, - traits::implementations::CombinedNetworks, -}; +use hotshot::traits::implementations::CombinedNetworks; use std::{marker::PhantomData, sync::Arc}; use hotshot::{ @@ -17,6 +14,7 @@ use hotshot::{ types::bn254::BLSPubKey, }; use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, data::{QuorumProposal, SequencingLeaf, ViewNumber}, message::{Message, SequencingMessage}, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 8e0f85a95c..1a8123a35b 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -6,13 +6,14 @@ use commit::Committable; use either::Right; use hotshot::{ certificate::QuorumCertificate, - traits::{BlockPayload, NodeImplementation, TestableNodeImplementation}, + traits::{NodeImplementation, TestableNodeImplementation}, types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, HotShotInitializer, HotShotSequencingConsensusApi, SystemContext, }; use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::SequencingHotShotEvent; use hotshot_types::{ + block_impl::VIDBlockPayload, data::{QuorumProposal, SequencingLeaf, VidScheme, ViewNumber}, message::{Message, Proposal}, traits::{ @@ -21,7 +22,7 @@ use hotshot_types::{ metrics::NoMetrics, node_implementation::{CommitteeEx, ExchangesType, NodeType, QuorumEx}, signature_key::EncodedSignature, - state::ConsensusTime, + state::{ConsensusTime, TestableBlock}, }, }; @@ -116,8 +117,7 @@ async fn build_quorum_proposal_and_signature( let parent_leaf = leaf.clone(); // every event input is seen on the event stream in the output. - - let block_commitment = ::BlockType::new().commit(); + let block = ::genesis(); let leaf = SequencingLeaf { view_number: ViewNumber::new(view), height: parent_leaf.height + 1, @@ -125,14 +125,14 @@ async fn build_quorum_proposal_and_signature( parent_commitment: parent_leaf.commit(), // Use the block commitment rather than the block, so that the replica can construct // the same leaf with the commitment. - deltas: Right(block_commitment), + deltas: Right(block.commit()), rejected: vec![], timestamp: 0, proposer_id: api.public_key().to_bytes(), }; let signature = ::sign(private_key, leaf.commit().as_ref()); let proposal = QuorumProposal::> { - block_commitment, + block_commitment: block.commit(), view_number: ViewNumber::new(view), height: 1, justify_qc: QuorumCertificate::genesis(), diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs index c8183af164..381db09d87 100644 --- a/testing/tests/atomic_storage.rs +++ b/testing/tests/atomic_storage.rs @@ -13,76 +13,6 @@ use rand::thread_rng; type AtomicStorage = hotshot::traits::implementations::AtomicStorage; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_happy_path_blocks() { - // This folder will be destroyed when the last handle to it closes - let file = tempfile::tempdir().expect("Could not create temp dir"); - let path = file.path(); - println!("Using store in {:?}", path); - let mut store = AtomicStorage::open(path).expect("Could not open atomic store"); - - let block = VDEntryBlock::default(); - let hash = block.hash(); - store - .update(|mut m| { - let block = block.clone(); - async move { m.insert_block(hash, block).await } - }) - .await - .unwrap(); - - // Make sure the data is still there after re-opening - drop(store); - store = AtomicStorage::open(path).expect("Could not open atomic store"); - assert_eq!( - store.get_block(&hash).await.unwrap(), - Some(DEntryBlock::default()) - ); - - // Add some transactions - let mut rng = thread_rng(); - let state = >::get_starting_state(); - let mut hashes = Vec::new(); - let mut block = block; - for _ in 0..10 { - let new = block - .add_transaction_raw(&random_transaction(&state, &mut rng)) - .expect("Could not add transaction"); - println!("Inserting {:?}: {:?}", new.hash(), new); - store - .update(|mut m| { - let new = new.clone(); - async move { m.insert_block(new.hash(), new.clone()).await } - }) - .await - .unwrap(); - hashes.push(new.hash()); - block = new; - } - - // read them all back 3 times - // 1st time: normal readback - // 2nd: after dropping and re-opening the store - for i in 0..3 { - if i == 1 { - drop(store); - store = AtomicStorage::open(path).expect("Could not open atomic store"); - } - - // read them all back - for (idx, hash) in hashes.iter().enumerate() { - match store.get_block(hash).await.expect("Could not read hash") { - Some(block) => println!("read {:?}", block), - None => panic!("Could not read hash {} {:?}", idx, hash), - } - } - } -} - #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 204038885c..ba042b35ee 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -6,6 +6,7 @@ use hotshot_testing::{ task_helpers::vid_init, }; use hotshot_types::{ + block_impl::VIDTransaction, data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, traits::{ consensus_api::ConsensusSharedApi, election::ConsensusExchange, @@ -20,10 +21,12 @@ use std::collections::HashMap; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_da_task() { - use hotshot::{block_impl::VIDBlockPayload, tasks::add_da_task}; + use hotshot::tasks::add_da_task; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{message::Proposal, traits::election::CommitteeExchangeType}; + use hotshot_types::{ + block_impl::VIDBlockPayload, message::Proposal, traits::election::CommitteeExchangeType, + }; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -36,9 +39,13 @@ async fn test_da_task() { }; let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); - let block = VIDBlockPayload(Vec::new()); - let block_commitment = block.commit(); - let signature = committee_exchange.sign_da_proposal(&block_commitment); + let vid = vid_init(); + let txn = vec![0u8]; + tracing::error!("here da task"); + let vid_disperse = vid.disperse(&txn).unwrap(); + let block_commitment = vid_disperse.commit; + let block = VIDBlockPayload::new(vec![VIDTransaction(txn)], block_commitment); + let signature = committee_exchange.sign_da_proposal(&block.commit()); let proposal = DAProposal { deltas: block.clone(), view_number: ViewNumber::new(2), @@ -47,13 +54,10 @@ async fn test_da_task() { data: proposal, signature, }; - let vid = vid_init(); - let message_bytes = bincode::serialize(&message).unwrap(); - let vid_disperse = vid.disperse(&message_bytes).unwrap(); let vid_proposal = Proposal { data: VidDisperse { view_number: message.data.view_number, - commitment: block_commitment, + commitment: block.commit(), shares: vid_disperse.shares, common: vid_disperse.common, }, @@ -88,7 +92,7 @@ async fn test_da_task() { SequencingHotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), 1, ); - output.insert(SequencingHotShotEvent::SendDABlockData(block), 1); + output.insert(SequencingHotShotEvent::SendDABlockData(block.clone()), 1); output.insert( SequencingHotShotEvent::DAProposalSend(message.clone(), pub_key), 1, @@ -98,7 +102,7 @@ async fn test_da_task() { .unwrap() .unwrap(); let da_vote = - committee_exchange.create_da_message(block_commitment, ViewNumber::new(2), vote_token); + committee_exchange.create_da_message(block.commit(), ViewNumber::new(2), vote_token); output.insert(SequencingHotShotEvent::DAVoteSend(da_vote), 1); output.insert( SequencingHotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), @@ -110,7 +114,7 @@ async fn test_da_task() { .unwrap() .unwrap(); let vid_vote = - committee_exchange.create_vid_message(block_commitment, ViewNumber::new(2), vote_token); + committee_exchange.create_vid_message(block.commit(), ViewNumber::new(2), vote_token); output.insert(SequencingHotShotEvent::VidVoteSend(vid_vote), 1); output.insert(SequencingHotShotEvent::DAProposalRecv(message, pub_key), 1); diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 7ad17e2008..f11508e8f6 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -21,11 +21,13 @@ use std::collections::HashMap; #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[ignore] async fn test_network_task() { - use hotshot::block_impl::VIDBlockPayload; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::{ - data::VidDisperse, message::Proposal, traits::election::CommitteeExchangeType, + block_impl::{VIDBlockPayload, VIDTransaction}, + data::VidDisperse, + message::Proposal, + traits::election::CommitteeExchangeType, }; async_compatibility_layer::logging::setup_logging(); @@ -40,9 +42,13 @@ async fn test_network_task() { let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); let priv_key = api.private_key(); - let block = VIDBlockPayload(Vec::new()); - let block_commitment = block.commit(); - let signature = committee_exchange.sign_da_proposal(&block_commitment); + let vid = vid_init(); + let txn = vec![0u8]; + tracing::error!("here network task1"); + let vid_disperse = vid.disperse(&txn).unwrap(); + let block_commitment = vid_disperse.commit; + let block = VIDBlockPayload::new(vec![VIDTransaction(txn)], block_commitment); + let signature = committee_exchange.sign_da_proposal(&block.commit()); let da_proposal = Proposal { data: DAProposal { deltas: block.clone(), @@ -51,15 +57,13 @@ async fn test_network_task() { signature, }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; - let vid = vid_init(); - let da_proposal_bytes = bincode::serialize(&da_proposal).unwrap(); - let vid_disperse = vid.disperse(&da_proposal_bytes).unwrap(); // TODO for now reuse the same block commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 + tracing::error!("here network task2"); let da_vid_disperse = Proposal { data: VidDisperse { view_number: da_proposal.data.view_number, - commitment: block_commitment, + commitment: block.commit(), shares: vid_disperse.shares, common: vid_disperse.common, }, diff --git a/types/Cargo.toml b/types/Cargo.toml index 742882cce2..dbc1b7b009 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -43,6 +43,7 @@ rand = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } +sha3 = "^0.10" snafu = { workspace = true } tagged-base64 = { git = "https://github.com/EspressoSystems/tagged-base64", tag = "0.2.4" } time = { workspace = true } diff --git a/hotshot/src/block_impl.rs b/types/src/block_impl.rs similarity index 57% rename from hotshot/src/block_impl.rs rename to types/src/block_impl.rs index 8960d79158..79b0ce234b 100644 --- a/hotshot/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -4,8 +4,11 @@ use std::{ fmt::{Debug, Display}, }; +use crate::{ + data::{test_srs, VidScheme, VidSchemeTrait}, + traits::{block_contents::Transaction, state::TestableBlock, BlockPayload}, +}; use commit::{Commitment, Committable}; -use hotshot_types::traits::{block_contents::Transaction, state::TestableBlock, BlockPayload}; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; use snafu::Snafu; @@ -28,7 +31,11 @@ impl Committable for VIDTransaction { } } -impl Transaction for VIDTransaction {} +impl Transaction for VIDTransaction { + fn bytes(&self) -> Vec { + self.0.clone() + } +} impl VIDTransaction { /// create a new transaction @@ -55,14 +62,31 @@ pub enum BlockPayloadError { /// A [`BlockPayload`] that contains a list of `VIDTransaction`. #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct VIDBlockPayload(pub Vec); +pub struct VIDBlockPayload { + /// List of transactions. + pub transactions: Vec, + /// VID commitment. + pub commitment: ::Commit, +} + +impl VIDBlockPayload { + /// Constructor. + #[must_use] + pub fn new( + transactions: Vec, + commitment: ::Commit, + ) -> Self { + Self { + transactions, + commitment, + } + } +} impl Committable for VIDBlockPayload { fn commit(&self) -> Commitment { - // TODO: Use use VID block commitment. - // let builder = commit::RawCommitmentBuilder::new("BlockPayload Comm"); - builder.finalize() + builder.generic_byte_array(&self.commitment).finalize() } fn tag() -> String { @@ -72,17 +96,30 @@ impl Committable for VIDBlockPayload { impl Display for VIDBlockPayload { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "BlockPayload #txns={}", self.0.len()) + write!(f, "BlockPayload #txns={}", self.transactions.len()) } } impl TestableBlock for VIDBlockPayload { fn genesis() -> Self { - VIDBlockPayload(Vec::new()) + /// TODO + const NUM_STORAGE_NODES: usize = 10; + /// TODO + const NUM_CHUNKS: usize = 5; + + // TODO + let srs = test_srs(NUM_STORAGE_NODES); + + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + + let txn = vec![0u8]; + tracing::error!("here genesis"); + let vid_disperse = vid.disperse(&txn).unwrap(); + VIDBlockPayload::new(vec![VIDTransaction(txn)], vid_disperse.commit) } fn txn_count(&self) -> u64 { - self.0.len() as u64 + self.transactions.len() as u64 } } @@ -91,20 +128,10 @@ impl BlockPayload for VIDBlockPayload { type Transaction = VIDTransaction; - fn new() -> Self { - ::genesis() - } - - fn add_transaction_raw( - &self, - tx: &Self::Transaction, - ) -> std::result::Result { - let mut new = self.0.clone(); - new.push(tx.clone()); - Ok(VIDBlockPayload(new)) - } - fn contained_transactions(&self) -> HashSet> { - self.0.iter().map(commit::Committable::commit).collect() + self.transactions + .iter() + .map(commit::Committable::commit) + .collect() } } diff --git a/types/src/data.rs b/types/src/data.rs index 080355a195..ff63321431 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -173,8 +173,6 @@ pub struct VidDisperse { /// The view number for which this VID data is intended pub view_number: TYPES::Time, /// Block commitment - /// - /// TODO GG type should be `::Common` but that's a big change. pub commitment: Commitment, /// VID shares dispersed among storage nodes pub shares: Vec<::Share>, diff --git a/types/src/lib.rs b/types/src/lib.rs index 6152689acf..54739106c3 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -11,6 +11,7 @@ use std::{num::NonZeroUsize, time::Duration}; +pub mod block_impl; pub mod certificate; pub mod consensus; pub mod data; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index a365dd7144..4ba10f601c 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -20,8 +20,6 @@ use std::{ /// * Must have a predefined error type ([`BlockPayload::Error`]) /// * Must have a transaction type that can be compared for equality, serialized and serialized, /// sent between threads, and can have a hash produced of it -/// * Must be able to be produced incrementally by appending transactions -/// ([`add_transaction_raw`](BlockPayload::add_transaction_raw)) /// * Must be hashable pub trait BlockPayload: Serialize @@ -42,18 +40,6 @@ pub trait BlockPayload: /// The type of the transitions we are applying type Transaction: Transaction; - /// Construct an empty or genesis block. - fn new() -> Self; - - /// Attempts to add a transaction, returning an Error if it would result in a structurally - /// invalid block - /// - /// # Errors - /// - /// Should return an error if this transaction leads to an invalid block - fn add_transaction_raw(&self, tx: &Self::Transaction) - -> std::result::Result; - /// returns hashes of all the transactions in this block /// TODO make this ordered with a vec fn contained_transactions(&self) -> HashSet>; @@ -63,6 +49,8 @@ pub trait BlockPayload: pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash { + /// Get the transaction bytes. + fn bytes(&self) -> Vec; } /// Dummy implementation of `BlockPayload` for unit tests @@ -112,7 +100,11 @@ pub mod dummy { "DUMMY_TXN".to_string() } } - impl super::Transaction for DummyTransaction {} + impl super::Transaction for DummyTransaction { + fn bytes(&self) -> Vec { + Vec::new() + } + } impl std::error::Error for DummyError {} @@ -133,19 +125,6 @@ pub mod dummy { type Transaction = DummyTransaction; - fn new() -> Self { - Self { nonce: 0 } - } - - fn add_transaction_raw( - &self, - _tx: &Self::Transaction, - ) -> std::result::Result { - Ok(Self { - nonce: self.nonce + 1, - }) - } - fn contained_transactions(&self) -> HashSet> { HashSet::new() } diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 93ff3e6603..bd8fe7d2ed 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -20,8 +20,6 @@ use std::{ /// This trait represents the behaviors that the 'global' ledger state must have: /// * A defined error type ([`Error`](State::Error)) /// * The type of block that modifies this type of state ([`BlockPayload`](State::BlockType)) -/// * A method to get a template (empty) next block from the current state -/// ([`next_block`](State::next_block)) /// * The ability to validate that a block is actually a valid extension of this state /// ([`validate_block`](State::validate_block)) /// * The ability to produce a new state, with the modifications from the block applied @@ -46,9 +44,6 @@ pub trait State: /// Time compatibility needed for reward collection type Time: ConsensusTime; - /// Returns an empty, template next block given this current state - fn next_block(prev_commitment: Option) -> Self::BlockType; - /// Returns true if and only if the provided block is valid and can extend this state fn validate_block(&self, block: &Self::BlockType, view_number: &Self::Time) -> bool; @@ -165,13 +160,6 @@ pub mod dummy { type BlockType = DummyBlock; type Time = ViewNumber; - fn next_block(state: Option) -> Self::BlockType { - match state { - Some(state) => DummyBlock { nonce: state.nonce }, - None => unimplemented!(), - } - } - fn validate_block(&self, _block: &Self::BlockType, _view_number: &Self::Time) -> bool { false } From 29d43851c17d1c5c537692cf1fe43dc1d10da68e Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 20:42:19 -0400 Subject: [PATCH 0135/1393] Cleaning up TODOs --- hotshot/Cargo.toml | 1 - hotshot/examples/infra/modDA.rs | 1 - .../traits/networking/web_server_network.rs | 2 - task-impls/src/consensus.rs | 266 ++++++------------ types/src/certificate.rs | 2 +- 5 files changed, 90 insertions(+), 182 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 671519ec84..452ff4de63 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -76,7 +76,6 @@ required-features = ["demo", "libp2p/rsa"] path = "examples/web-server-da/multi-web-server.rs" [dependencies] -# TODO ED We should upgrade ark libraries to 0.4 async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index ae0f0e40f5..95cffd94ed 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -256,7 +256,6 @@ pub trait RunDA< let adjusted_padding = if padding < size { 0 } else { padding - size }; let mut txns: VecDeque = VecDeque::new(); - // TODO ED: In the future we should have each node generate transactions every round to simulate a more realistic network let tx_to_gen = transactions_per_round * rounds * 3; { let mut txn_rng = rand::thread_rng(); diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 7b03aae947..14409fecb7 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -116,8 +116,6 @@ struct Inner { /// The last tx_index we saw from the web server tx_index: Arc>, - // TODO ED This should be TYPES::Time - // Theoretically there should never be contention for this lock... /// Task map for quorum proposals. proposal_task_map: Arc>>>>, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index d0db84cf03..a0f878cdd4 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -39,6 +39,8 @@ use hotshot_types::{ vote::{QuorumVote, VoteType}, }; +use tracing::warn; + use snafu::Snafu; use std::{ collections::{HashMap, HashSet}, @@ -328,7 +330,6 @@ where } Either::Right(qc) => { debug!("QCFormed! {:?}", qc.view_number); - // TODO ED Make Timeout QC Formed Event state .event_stream .publish(SequencingHotShotEvent::QCFormed(either::Right(qc.clone()))) @@ -583,22 +584,12 @@ where ); // Remove old certs, we won't vote on past views - // TODO ED Put back in once we fix other errors - // for view in *self.cur_view..*new_view - 1 { - // let v = TYPES::Time::new(view); - // self.certs.remove(&v); - // } + for view in *self.cur_view..*new_view - 1 { + let v = TYPES::Time::new(view); + self.certs.remove(&v); + } self.cur_view = new_view; - // TODO ED These injects aren't right - // Commenting out because HotShot doesn't start on 1 now - // if new_view == TYPES::Time::new(1) { - // self.quorum_exchange - // .network() - // .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) - // .await; - // } - // Poll the future leader for lookahead let lookahead_view = new_view + LOOK_AHEAD; if !self.quorum_exchange.is_leader(lookahead_view) { @@ -622,7 +613,6 @@ where .inject_consensus_info(ConsensusIntentEvent::PollForDAC(*self.cur_view + 1)) .await; - // TODO ED I think this poll is still correct (actually want to poll for it in both views, in case this node just doesn't receive the latest proposal, but that is more of a web server issue specifically) if self.quorum_exchange.is_leader(self.cur_view + 1) { debug!("Polling for quorum votes for view {}", *self.cur_view); self.quorum_exchange @@ -639,7 +629,8 @@ where let timeout = self.timeout; self.timeout_task = async_spawn({ let stream = self.event_stream.clone(); - // TODO ED + 1 here because of the logic change to view update. This indicates we haven't seen evidence for view change for this view within the time window + // Nuance: We timeout on the view + 1 here because that means that we have + // not seen evidence to transition to this new view let view_number = self.cur_view + 1; async move { async_sleep(Duration::from_millis(timeout)).await; @@ -678,28 +669,77 @@ where return; } - if proposal.data.justify_qc.view_number() != proposal.data.view_number - 1 { - // TODO ED Add timeout cert logic - if proposal.data.timeout_certificate.is_none() { - error!( - "Proposal needed a timeout cert but didn't have one {:?}", - proposal.data.clone() - ); + // Verify a timeout certificate exists and is valid + if proposal.data.justify_qc.view_number() != view - 1 { + let Some(timeout_cert) = proposal.data.timeout_certificate.clone() else { + warn!( + "Quorum proposal for view {} needed a timeout certificate but did not have one", + *view); + return; + }; + + if !self + .timeout_exchange + .is_valid_cert(&timeout_cert.clone(), view.commit()) + { + warn!("Timeout certificate for view {} was invalid", *view); return; - } else { - error!("Proposal for view {} had timeout certificate", *view); } - // TODO ED Check timeout cert validity } - // TODO ED This needs to be moved further down so we only update the view after fully validating the qc. + let justify_qc = proposal.data.justify_qc.clone(); + + if !self + .quorum_exchange + .is_valid_cert(&justify_qc, justify_qc.leaf_commitment) + { + error!("Invalid justify_qc in proposal for view {}", *view); + return; + } + + // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here self.update_view(view).await; - // TODO ED How does this play in with the timeout cert? self.current_proposal = Some(proposal.data.clone()); + let consensus = self.consensus.upgradable_read().await; + + // Construct the leaf. + let parent = if justify_qc.is_genesis() { + self.genesis_leaf().await + } else { + consensus + .saved_leaves + .get(&justify_qc.leaf_commitment()) + .cloned() + }; + + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some(parent) = parent else { + error!( + "Proposal's parent missing from storage with commitment: {:?}", + justify_qc.leaf_commitment() + ); + return; + }; + let parent_commitment = parent.commit(); + let leaf: SequencingLeaf<_> = SequencingLeaf { + view_number: view, + height: proposal.data.height, + justify_qc: justify_qc.clone(), + parent_commitment, + deltas: Right(proposal.data.block_commitment), + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: sender.to_bytes(), + }; + let justify_qc_commitment = justify_qc.commit(); + let leaf_commitment = leaf.commit(); + let vote_token = self.quorum_exchange.make_vote_token(view); - // TODO: do some of this logic without the vote token check, only do that when voting. + + // TODO Put vote token creation inside message creation functions + // https://github.com/EspressoSystems/HotShot/issues/1795. match vote_token { Err(e) => { error!("Failed to generate vote token for {:?} {:?}", view, e); @@ -709,81 +749,23 @@ where } Ok(Some(vote_token)) => { debug!("We were chosen for consensus committee on {:?}", view); - let consensus = self.consensus.upgradable_read().await; - let message; - - // Construct the leaf. - let justify_qc = proposal.data.justify_qc; - let parent = if justify_qc.is_genesis() { - self.genesis_leaf().await - } else { - consensus - .saved_leaves - .get(&justify_qc.leaf_commitment()) - .cloned() - }; - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some(parent) = parent else { - error!( - "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.leaf_commitment() - ); - return; - }; - let parent_commitment = parent.commit(); - let leaf: SequencingLeaf<_> = SequencingLeaf { - view_number: view, - height: proposal.data.height, - justify_qc: justify_qc.clone(), - parent_commitment, - deltas: Right(proposal.data.block_commitment), - rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: sender.to_bytes(), - }; - let justify_qc_commitment = justify_qc.commit(); - let leaf_commitment = leaf.commit(); - - // Validate the `justify_qc`. - if !self - .quorum_exchange - .is_valid_cert(&justify_qc, parent_commitment) - { - error!("Invalid justify_qc in proposal!. parent commitment is {:?} justify qc is {:?}", parent_commitment, justify_qc.clone()); - - message = self.quorum_exchange.create_no_message::( - justify_qc_commitment, - leaf_commitment, - view, - vote_token, - ); - } - // Validate the `height`. - else if leaf.height != parent.height + 1 { + // Validate the `height` + // TODO Remove height from proposal validation; view number is sufficient + // https://github.com/EspressoSystems/HotShot/issues/1796 + if leaf.height != parent.height + 1 { error!( "Incorrect height in proposal (expected {}, got {})", parent.height + 1, leaf.height ); - message = self.quorum_exchange.create_no_message( - justify_qc_commitment, - leaf_commitment, - view, - vote_token, - ); + return; } // Validate the signature. else if !view_leader_key .validate(&proposal.signature, leaf_commitment.as_ref()) { error!(?proposal.signature, "Could not verify proposal."); - message = self.quorum_exchange.create_no_message( - justify_qc_commitment, - leaf_commitment, - view, - vote_token, - ); } // Create a positive vote if either liveness or safety check // passes. @@ -811,20 +793,6 @@ where // Skip if both saftey and liveness checks fail. if !safety_check && !liveness_check { error!("Failed safety check and liveness check"); - message = self.quorum_exchange.create_no_message( - justify_qc_commitment, - leaf_commitment, - view, - vote_token, - ); - } else { - // Generate a message with yes vote. - message = self.quorum_exchange.create_yes_message( - justify_qc_commitment, - leaf_commitment, - view, - vote_token, - ); } } @@ -976,7 +944,6 @@ where .await; } if !self.vote_if_able().await { - // TOOD ED This means we publish the proposal without updating our own view, which doesn't seem right return; } self.current_proposal = None; @@ -985,13 +952,6 @@ where let time = TYPES::Time::new(v); self.certs.remove(&time); } - - // Update current view and publish a view change event so other tasks also update - // self.update_view(new_view).await; - - if let GeneralConsensusMessage::Vote(vote) = message { - // debug!("Sending vote to next leader {:?}", vote); - }; } } } @@ -1007,8 +967,6 @@ where return; } - // TODO ED Insert TimeoutVote accumulator stuff here - match vote.clone() { QuorumVote::Yes(vote_internal) => { let handle_event = HandleEvent(Arc::new(move |event, state| { @@ -1046,14 +1004,13 @@ where &vote_internal.clone().leaf_commitment, ); + // TODO Create default functions for accumulators + // https://github.com/EspressoSystems/HotShot/issues/1797 let timeout_accumulator = TimeoutVoteAccumulator { da_vote_outcomes: HashMap::new(), - - // TODO ED Don't use quorum exchange here - success_threshold: self.quorum_exchange.success_threshold(), - + success_threshold: self.timeout_exchange.success_threshold(), sig_lists: Vec::new(), - signers: bitvec![0; self.quorum_exchange.total_nodes()], + signers: bitvec![0; self.timeout_exchange.total_nodes()], phantom: PhantomData, }; @@ -1124,10 +1081,6 @@ where return; } - // // TODO ED Insert TimeoutVote accumulator stuff here - - // match vote.clone() { - // QuorumVote::Yes(vote_internal)=> { let handle_event = HandleEvent(Arc::new(move |event, state| { async move { vote_handle(state, event).await }.boxed() })); @@ -1143,15 +1096,13 @@ where }; // // Todo check if we are the leader - // TODO ED Make this a default accum let new_accumulator = TimeoutVoteAccumulator { da_vote_outcomes: HashMap::new(), - // TODO ED Don't use quorum exchange here - success_threshold: self.quorum_exchange.success_threshold(), + success_threshold: self.timeout_exchange.success_threshold(), sig_lists: Vec::new(), - signers: bitvec![0; self.quorum_exchange.total_nodes()], + signers: bitvec![0; self.timeout_exchange.total_nodes()], phantom: PhantomData, }; @@ -1222,24 +1173,23 @@ where debug!("QC Formed event happened!"); if let either::Right(qc) = cert.clone() { - // So we don't create a QC on the first view unless we are the leader debug!( - "Attempting to publish proposal after forming a QC for view {}", + "Attempting to publish proposal after forming a TC for view {}", *qc.view_number ); - // TODO ED Clean this up, get rid of clones + let view = qc.view_number + 1; + if self .publish_proposal_if_able( self.consensus.read().await.high_qc.clone(), - qc.clone().view_number + 1, + view, Some(qc.clone()), ) .await { - // self.update_view(qc.view_number + 1).await; } else { - error!("Wasn't able to publish proposal"); + warn!("Wasn't able to publish proposal"); } } if let either::Left(qc) = cert { @@ -1247,25 +1197,6 @@ where consensus.high_qc = qc.clone(); drop(consensus); - - // View may have already been updated by replica if they voted for this QC - // TODO ED We should separate leader state from replica state, they shouldn't share the same view - // Leader task should only run for a specific view, and never update its current view, but instead spawn another task - // let _res = self.update_view(qc.view_number + 1).await; - - // Start polling for votes for the next view - // if _res { - // if self.quorum_exchange.is_leader(qc.view_number + 2) { - // self.quorum_exchange - // .network() - // .inject_consensus_info( - // (ConsensusIntentEvent::PollForVotes(*qc.view_number + 1)), - // ) - // .await; - // } - // } - - // So we don't create a QC on the first view unless we are the leader debug!( "Attempting to publish proposal after forming a QC for view {}", *qc.view_number @@ -1275,7 +1206,7 @@ where .publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) .await { - // self.update_view(qc.view_number + 1).await; + warn!("Wasn't able to publish proposal"); } } } @@ -1324,11 +1255,6 @@ where }) .await; - debug!("View changed to {}", *new_view); - - // ED Need to update the view here? What does otherwise? - // self.update_view(qc.view_number + 1).await; - // So we don't create a QC on the first view unless we are the leader if !self.quorum_exchange.is_leader(self.cur_view) { return; } @@ -1336,16 +1262,9 @@ where let consensus = self.consensus.read().await; let qc = consensus.high_qc.clone(); drop(consensus); - // TODO ED Do not want to publish proposal on view change - // if !self.publish_proposal_if_able(qc, self.cur_view, None).await { - // error!( - // "Failed to publish proposal on view change. View = {:?}", - // self.cur_view - // ); - // } } SequencingHotShotEvent::Timeout(view) => { - // TODO ED This is not an ideal check, we should have the timeout task receive view change events and then cancel itself + // NOTE: We may optionally have the timeout task listen for view change events if self.cur_view > view { return; } @@ -1359,12 +1278,11 @@ where debug!("We were not chosen for consensus committee on {:?}", view); } Ok(Some(vote_token)) => { - // TODO ED Why I here and not in quorum exchange? let message = self .timeout_exchange .create_timeout_message::(view, vote_token); - // error!("Sending timeout vote for view {}", *view); + debug!("Sending timeout vote for view {}", *view); if let GeneralConsensusMessage::TimeoutVote(vote) = message { self.event_stream .publish(SequencingHotShotEvent::TimeoutVoteSend(vote)) @@ -1372,18 +1290,12 @@ where } } } - - // self.quorum_exchange - // .network() - // .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) - // .await; debug!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view ); } SequencingHotShotEvent::SendDABlockData(block) => { - // ED TODO Should make sure this is actually the most recent block self.block = block; } _ => {} diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 04adbbc134..37350facda 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -121,7 +121,7 @@ impl SignedCertificate bool { - todo!() + false } fn genesis() -> Self { From e3f8b2335b00b664b4c7fb5f3685e65b5dcd7cf8 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 21:13:32 -0400 Subject: [PATCH 0136/1393] More todos --- task-impls/src/da.rs | 18 +----- task-impls/src/harness.rs | 2 - testing/tests/timeout.rs | 5 +- types/src/certificate.rs | 10 +--- types/src/traits/election.rs | 2 - types/src/vote.rs | 110 +---------------------------------- 6 files changed, 9 insertions(+), 138 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 53d0a57501..dee213a1e1 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -161,8 +161,6 @@ where &vote, &vote.block_commitment, ) { - - Left(new_accumulator) => { error!("Not enough DA votes yet"); state.accumulator = either::Left(new_accumulator); @@ -582,16 +580,14 @@ where error!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; - // Inject view info into network - // ED I think it is possible that you receive a quorum proposal, vote on it and update your view before the da leader has sent their proposal, and therefore you skip polling for this view? + // Inject view info into network let is_da = self .committee_exchange .membership() .get_committee(self.cur_view + 1) .contains(self.committee_exchange.public_key()); - // TODO ED Is this right? if is_da { debug!("Polling for DA proposals for view {}", *self.cur_view + 1); self.committee_exchange @@ -640,22 +636,12 @@ where }; debug!("Sending DA proposal for view {:?}", data.view_number); - // let message = SequencingMessage::(Right( - // CommitteeConsensusMessage::DAProposal(Proposal { data, signature }), - // )); let message = Proposal { data, signature }; - // Brodcast DA proposal - // TODO ED We should send an event to do this, but just getting it to work for now self.event_stream .publish(SequencingHotShotEvent::SendDABlockData(block.clone())) .await; - // if let Err(e) = self.api.send_da_broadcast(message.clone()).await { - // consensus.metrics.failed_to_send_messages.add(1); - // warn!(?message, ?e, "Could not broadcast leader proposal"); - // } else { - // consensus.metrics.outgoing_broadcast_messages.add(1); - // } + self.event_stream .publish(SequencingHotShotEvent::DAProposalSend( message.clone(), diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 4978b27492..464217a383 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -107,8 +107,6 @@ pub fn handle_event>( *num_expected -= 1; } - // TODO ED Remove - error!("Event is {:?}", event); if state.expected_output.is_empty() { return (Some(HotShotTaskCompleted::ShutDown), state); diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 954588958c..2fd6023224 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -26,6 +26,7 @@ async fn test_timeout() { }; // TODO ED Reduce down to 5 nodes once memory network issues is resolved + // https://github.com/EspressoSystems/HotShot/issues/1790 let mut metadata = TestMetadata { total_nodes: 10, start_nodes: 10, @@ -47,7 +48,6 @@ async fn test_timeout() { node_changes: vec![(Duration::from_millis(500), dead_nodes)], }; - // TODO ED Add safety task, etc to confirm TCs are being formed metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( @@ -56,7 +56,8 @@ async fn test_timeout() { }, ); - // TODO ED Test with memory network once issue is resolved. + // TODO ED Test with memory network once issue is resolved + // https://github.com/EspressoSystems/HotShot/issues/1790 metadata .gen_launcher::() .launch() diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 37350facda..d0ed806083 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -86,7 +86,8 @@ pub struct TimeoutCertificate { pub signatures: AssembledSignature, } -impl SignedCertificate> +impl + SignedCertificate> for TimeoutCertificate { type Vote = TimeoutVote2; @@ -334,17 +335,10 @@ impl Committable for ViewSyncCertificate { let signatures_bytes = serialize_signature(&self.signatures()); let mut builder = commit::RawCommitmentBuilder::new("View Sync Certificate Commitment") - // .field("leaf commitment", self.leaf_commitment) - // .u64_field("view number", *self.view_number.deref()) .constant_str("justify_qc signatures") .var_size_bytes(&signatures_bytes); - // builder = builder - // .field("Leaf commitment", self.leaf_commitment) - // .u64_field("View number", *self.view_number.deref()); - let certificate_internal = match &self { - // TODO ED Not the best way to do this ViewSyncCertificate::PreCommit(certificate_internal) => { builder = builder.var_size_field("View Sync Phase", "PreCommit".as_bytes()); certificate_internal diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index c8745652e2..93e8a14abc 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -1500,7 +1500,6 @@ impl< } pub trait TimeoutExchangeType: ConsensusExchange { - // TODO ED Clean this function up fn create_timeout_message>( &self, view: TYPES::Time, @@ -1515,7 +1514,6 @@ pub trait TimeoutExchangeType: ConsensusExchange .get_commit().as_ref() ); - // TODO ED Should not use le bytes GeneralConsensusMessage::::TimeoutVote(TimeoutVote2 { signature: (self.public_key().to_bytes(), signature), diff --git a/types/src/vote.rs b/types/src/vote.rs index 46e93c3e12..8ba9f060d7 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -404,7 +404,8 @@ pub trait Accumulator2< ) -> Either>; } -// TODO ED Make a default accumulator +// TODO Make a default accumulator +// https://github.com/EspressoSystems/HotShot/issues/1797 pub struct TimeoutVoteAccumulator< TYPES: NodeType, COMMITMENT: Serialize + Clone + for<'a> Deserialize<'a>, @@ -434,7 +435,6 @@ impl< vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>, ) -> Either> { - let VoteData::DA(vote_commitment) = vote.get_data() else { return Either::Left(self); }; @@ -477,7 +477,6 @@ impl< if *da_stake_casted >= u64::from(self.success_threshold) { // Assemble QC let real_qc_pp = ::get_public_parameter( - // TODO ED Something about stake table entries. Might be easier to just pass in membership? stake_table_entries.clone(), U256::from(self.success_threshold.get()), ); @@ -496,43 +495,22 @@ impl< } } -// TODO ED Make a default accumulator -// pub struct TimeoutVoteAccumulator< -// TYPES: NodeType, -// COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, -// VOTE: VoteType, -// > { -// /// Map of all da signatures accumlated so far -// pub da_vote_outcomes: VoteMap, -// /// A quorum's worth of stake, generally 2f + 1 -// pub success_threshold: NonZeroU64, -// /// A list of valid signatures for certificate aggregation -// pub sig_lists: Vec<::PureAssembledSignatureType>, -// /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check -// pub signers: BitVec, -// /// Phantom data to specify the vote this accumulator is for -// pub phantom: PhantomData, -// } - impl< TYPES: NodeType, COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, VOTE: VoteType, > Accumulator2 for TimeoutVoteAccumulator { - // TODO ED Make commitment instead of committable fn append( mut self, vote: VOTE, vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>, ) -> Either> { - let VoteData::Timeout(vote_commitment) = vote.get_data() else { return Either::Left(self); }; - let encoded_key = vote.get_key().to_bytes(); // Deserialize the signature so that it can be assembeld into a QC @@ -571,7 +549,6 @@ impl< if *da_stake_casted >= u64::from(self.success_threshold) { // Assemble QC let real_qc_pp = ::get_public_parameter( - // TODO ED Something about stake table entries. Might be easier to just pass in membership? stake_table_entries.clone(), U256::from(self.success_threshold.get()), ); @@ -608,87 +585,6 @@ pub struct DAVoteAccumulator< pub phantom: PhantomData, } -// impl< -// TYPES: NodeType, -// COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, -// VOTE: VoteType, -// > Accumulator2 for DAVoteAccumulator -// { -// fn append( -// mut self, -// vote: VOTE, -// vote_node_id: usize, -// stake_table_entries: Vec<::StakeTableEntry>, -// ) -> Either> { - -// match vote.get_data() { -// VoteData::DA(_) => warn!("DA vote data"), -// VoteData::Timeout(_) => panic!(), -// _ => error!("Wrong vote data") - -// } - -// let VoteData::DA(vote_commitment) = vote.get_data() else { -// return Either::Left(self); -// }; - -// let encoded_key = vote.get_key().to_bytes(); - -// // Deserialize the signature so that it can be assembeld into a QC -// // TODO ED Update this once we've gotten rid of EncodedSignature -// let original_signature: ::PureAssembledSignatureType = -// bincode_opts() -// .deserialize(&vote.get_signature().0) -// .expect("Deserialization on the signature shouldn't be able to fail."); - -// let (da_stake_casted, da_vote_map) = self -// .da_vote_outcomes -// .entry(vote_commitment) -// .or_insert_with(|| (0, BTreeMap::new())); - -// // Check for duplicate vote -// // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey -// // Have to do this because SignatureKey is not hashable -// if da_vote_map.contains_key(&encoded_key) { -// return Either::Left(self); -// } - -// if self.signers.get(vote_node_id).as_deref() == Some(&true) { -// error!("Node id is already in signers list"); -// return Either::Left(self); -// } -// self.signers.set(vote_node_id, true); -// self.sig_lists.push(original_signature); - -// // Already checked that vote data was for a DA vote above -// *da_stake_casted += u64::from(vote.get_vote_token().vote_count()); -// da_vote_map.insert( -// encoded_key, -// (vote.get_signature(), vote.get_data(), vote.get_vote_token()), -// ); - -// if *da_stake_casted >= u64::from(self.success_threshold) { -// // Assemble QC -// let real_qc_pp = ::get_public_parameter( -// // TODO ED Something about stake table entries. Might be easier to just pass in membership? -// stake_table_entries.clone(), -// U256::from(self.success_threshold.get()), -// ); - -// let real_qc_sig = ::assemble( -// &real_qc_pp, -// self.signers.as_bitslice(), -// &self.sig_lists[..], -// ); - -// self.da_vote_outcomes.remove(&vote_commitment); - -// return Either::Right(AssembledSignature::DA(real_qc_sig)); -// } -// Either::Left(self) -// } -// } - /// Accumulate quorum votes pub struct QuorumVoteAccumulator< TYPES: NodeType, @@ -797,7 +693,6 @@ impl< if *total_stake_casted >= u64::from(self.success_threshold) { // Assemble QC let real_qc_pp = ::get_public_parameter( - // TODO ED Something about stake table entries. Might be easier to just pass in membership? stake_table_entries.clone(), U256::from(self.success_threshold.get()), ); @@ -1021,7 +916,6 @@ impl< } /// Mapping of commitments to vote tokens by key. -// TODO ED Remove this whole token generic type VoteMap = HashMap< COMMITMENT, ( From 897d428668d6194fcff83bc3a5e6ce4a8ed2eb19 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 21 Sep 2023 18:23:37 -0700 Subject: [PATCH 0137/1393] Fix empty block and txn --- hotshot/src/demo.rs | 4 +-- hotshot/src/tasks/mod.rs | 7 ++--- task-impls/src/consensus.rs | 16 ++++-------- task-impls/src/transactions.rs | 47 +++++++++++++++++----------------- testing/tests/da_task.rs | 1 - testing/tests/network_task.rs | 2 -- types/src/block_impl.rs | 1 - 7 files changed, 34 insertions(+), 44 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index 976cea433d..468ff5e420 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -101,9 +101,9 @@ impl TestableState for SDemoState { fn create_random_transaction( _state: Option<&Self>, _rng: &mut dyn rand::RngCore, - padding: u64, + _padding: u64, ) -> ::Transaction { - VIDTransaction(vec![0; padding as usize]) + VIDTransaction(vec![0]) } } /// Implementation of [`NodeType`] for [`VDemoNode`] diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 868768c3dc..bed3b3b344 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -38,7 +38,7 @@ use hotshot_types::{ node_implementation::{ CommitteeEx, ExchangesType, NodeImplementation, NodeType, ViewSyncEx, }, - state::ConsensusTime, + state::{ConsensusTime, TestableBlock}, }, vote::{ViewSyncData, VoteType}, }; @@ -251,7 +251,7 @@ where /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -289,7 +289,8 @@ where consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), - block: None, + // TODO (Keyao) Shouldn't use test function. + block: ::genesis(), quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), api: c_api.clone(), committee_exchange: c_api.inner.exchanges.committee_exchange().clone().into(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ba5d288a23..b9a10e3d6e 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -80,11 +80,11 @@ pub struct SequencingConsensusTaskState< pub consensus: Arc>>>, /// View timeout from config. pub timeout: u64, - /// View number this view is executing in. + /// View number this view is executing in pub cur_view: TYPES::Time, - /// Current block submitted to DA, if any. - pub block: Option, + /// Current block submitted to DA + pub block: TYPES::BlockType, /// the quorum exchange pub quorum_exchange: Arc>, @@ -1078,7 +1078,7 @@ where } SequencingHotShotEvent::SendDABlockData(block) => { // ED TODO Should make sure this is actually the most recent block - self.block = Some(block); + self.block = block; } _ => {} } @@ -1150,13 +1150,7 @@ where // TODO do some sort of sanity check on the view number that it matches decided } - let block_commitment = match &self.block { - Some(block) => block.commit(), - None => { - debug!("No block yet."); - return false; - } - }; + let block_commitment = self.block.commit(); let leaf = SequencingLeaf { view_number: view, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 2dce3684ba..c01cf1ca2b 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -18,12 +18,12 @@ use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, certificate::DACertificate, consensus::Consensus, - data::{SequencingLeaf, VidScheme, VidSchemeTrait}, - message::{Message, SequencingMessage}, + data::{SequencingLeaf, VidDisperse, VidScheme, VidSchemeTrait}, + message::{Message, Proposal, SequencingMessage}, traits::{ block_contents::Transaction, consensus_api::SequencingConsensusApi, - election::ConsensusExchange, + election::{CommitteeExchangeType, ConsensusExchange}, node_implementation::{CommitteeEx, NodeImplementation, NodeType}, BlockPayload, }, @@ -239,7 +239,7 @@ where let txns = self.wait_for_transactions(parent_leaf).await?; debug!("Prepare VID shares"); - if txns.len() > 0 { + if !txns.is_empty() { /// TODO https://github.com/EspressoSystems/HotShot/issues/1693 const NUM_STORAGE_NODES: usize = 10; /// TODO https://github.com/EspressoSystems/HotShot/issues/1693 @@ -254,31 +254,30 @@ where for txn in &txns { txns_flatten.extend(txn.bytes()); } - tracing::error!("here txn task {:?}", txns); let vid_disperse = vid.disperse(&txns_flatten).unwrap(); block = VIDBlockPayload::new(txns, vid_disperse.commit); - // TODO Commenting out the following code since we need to update the proposal, - // signature, and exchange for VID dispersal. They were copy-pasted from DA - // code. - // self.event_stream - // .publish(SequencingHotShotEvent::VidDisperseSend( - // Proposal { - // data: VidDisperse { - // view_number: view + 1, - // commitment: block.commit(), - // shares: vid_disperse.shares, - // common: vid_disperse.common, - // }, - // signature: message.signature, - // }, - // // TODO don't send to committee, send to quorum (consensus.rs) https://github.com/EspressoSystems/HotShot/issues/1696 - // self.committee_exchange.public_key().clone(), - // )) - // .await; + self.event_stream + .publish(SequencingHotShotEvent::BlockReady(block.clone(), view + 1)) + .await; self.event_stream - .publish(SequencingHotShotEvent::BlockReady(block, view + 1)) + .publish(SequencingHotShotEvent::VidDisperseSend( + Proposal { + data: VidDisperse { + view_number: view + 1, + commitment: block.commit(), + shares: vid_disperse.shares, + common: vid_disperse.common, + }, + // TODO (Keyao) This is also signed in DA task. + signature: self + .committee_exchange + .sign_da_proposal(&block.commit()), + }, + // TODO don't send to committee, send to quorum (consensus.rs) https://github.com/EspressoSystems/HotShot/issues/1696 + self.committee_exchange.public_key().clone(), + )) .await; } return None; diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index ba042b35ee..7c8b389d95 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -41,7 +41,6 @@ async fn test_da_task() { let pub_key = *api.public_key(); let vid = vid_init(); let txn = vec![0u8]; - tracing::error!("here da task"); let vid_disperse = vid.disperse(&txn).unwrap(); let block_commitment = vid_disperse.commit; let block = VIDBlockPayload::new(vec![VIDTransaction(txn)], block_commitment); diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index f11508e8f6..3413bc4898 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -44,7 +44,6 @@ async fn test_network_task() { let priv_key = api.private_key(); let vid = vid_init(); let txn = vec![0u8]; - tracing::error!("here network task1"); let vid_disperse = vid.disperse(&txn).unwrap(); let block_commitment = vid_disperse.commit; let block = VIDBlockPayload::new(vec![VIDTransaction(txn)], block_commitment); @@ -59,7 +58,6 @@ async fn test_network_task() { let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; // TODO for now reuse the same block commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 - tracing::error!("here network task2"); let da_vid_disperse = Proposal { data: VidDisperse { view_number: da_proposal.data.view_number, diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 79b0ce234b..bfe4ba6e59 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -113,7 +113,6 @@ impl TestableBlock for VIDBlockPayload { let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); let txn = vec![0u8]; - tracing::error!("here genesis"); let vid_disperse = vid.disperse(&txn).unwrap(); VIDBlockPayload::new(vec![VIDTransaction(txn)], vid_disperse.commit) } From d7dee924f560c7da713391df778fc479a1839072 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 21:41:04 -0400 Subject: [PATCH 0138/1393] Fix lints --- hotshot/src/lib.rs | 5 +- task-impls/src/consensus.rs | 311 +++++++++++------------- task-impls/src/da.rs | 8 +- task-impls/src/events.rs | 8 +- task-impls/src/harness.rs | 2 - task-impls/src/network.rs | 6 +- task-impls/src/view_sync.rs | 2 +- testing/tests/consensus_task.rs | 1 - testing/tests/timeout.rs | 4 +- testing/tests/view_sync_task.rs | 2 +- types/src/certificate.rs | 42 +--- types/src/message.rs | 16 +- types/src/traits/election.rs | 281 ++------------------- types/src/traits/node_implementation.rs | 18 +- types/src/vote.rs | 297 +--------------------- 15 files changed, 217 insertions(+), 786 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 52118f7eea..19fa2d9cde 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -257,8 +257,8 @@ impl> SystemContext { self.inner .internal_event_stream .publish(SequencingHotShotEvent::QCFormed(either::Left( - QuorumCertificate::genesis())), - ) + QuorumCertificate::genesis(), + ))) .await; } @@ -693,7 +693,6 @@ where let quorum_exchange = self.inner.exchanges.quorum_exchange().clone(); let committee_exchange = self.inner.exchanges.committee_exchange().clone(); let view_sync_exchange = self.inner.exchanges.view_sync_exchange().clone(); - let timeout_exchange = self.inner.exchanges.timeout_exchange().clone(); let handle = SystemContextHandle { registry, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a0f878cdd4..7c62af9500 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -167,7 +167,7 @@ pub struct VoteCollectionTaskState< { /// the quorum exchange pub quorum_exchange: Arc>, - + /// the timeout exchange pub timeout_exchange: Arc>, #[allow(clippy::type_complexity)] @@ -183,6 +183,7 @@ pub struct VoteCollectionTaskState< >, /// Accumulator for votes + #[allow(clippy::type_complexity)] pub timeout_accumulator: Either< as SignedCertificate< TYPES, @@ -263,7 +264,7 @@ where let accumulator = state.accumulator.left().unwrap(); - match state.quorum_exchange.accumulate_vote_2( + match state.quorum_exchange.accumulate_vote( accumulator, &vote, &vote_internal.leaf_commitment, @@ -293,11 +294,6 @@ where } } } - - QuorumVote::Timeout(_vote) => { - error!("The next leader has received an unexpected vote!"); - return (None, state); - } QuorumVote::No(_) => { error!("The next leader has received an unexpected vote!"); } @@ -319,7 +315,7 @@ where let accumulator = state.timeout_accumulator.left().unwrap(); - match state.timeout_exchange.accumulate_vote_2( + match state.timeout_exchange.accumulate_vote( accumulator, &vote, &vote.get_view().commit(), @@ -353,7 +349,7 @@ where return (Some(HotShotTaskCompleted::ShutDown), state); } _ => { - error!("Unexpected event") + error!("Unexpected event"); } } (None, state) @@ -733,84 +729,67 @@ where timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender.to_bytes(), }; - let justify_qc_commitment = justify_qc.commit(); let leaf_commitment = leaf.commit(); - let vote_token = self.quorum_exchange.make_vote_token(view); - - // TODO Put vote token creation inside message creation functions - // https://github.com/EspressoSystems/HotShot/issues/1795. - match vote_token { - Err(e) => { - error!("Failed to generate vote token for {:?} {:?}", view, e); - } - Ok(None) => { - debug!("We were not chosen for consensus committee on {:?}", view); + // Validate the `height` + // TODO Remove height from proposal validation; view number is sufficient + // https://github.com/EspressoSystems/HotShot/issues/1796 + if leaf.height != parent.height + 1 { + error!( + "Incorrect height in proposal (expected {}, got {})", + parent.height + 1, + leaf.height + ); + return; + } + // Validate the signature. + else if !view_leader_key.validate(&proposal.signature, leaf_commitment.as_ref()) { + error!(?proposal.signature, "Could not verify proposal."); + } + // Create a positive vote if either liveness or safety check + // passes. + else { + // Liveness check. + let liveness_check = justify_qc.view_number > consensus.locked_view; + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = consensus.visit_leaf_ancestors( + justify_qc.view_number, + Terminator::Inclusive(consensus.locked_view), + false, + |leaf| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.view_number != consensus.locked_view + }, + ); + let safety_check = outcome.is_ok(); + if let Err(e) = outcome { + self.api.send_view_error(view, Arc::new(e)).await; } - Ok(Some(vote_token)) => { - debug!("We were chosen for consensus committee on {:?}", view); - // Validate the `height` - // TODO Remove height from proposal validation; view number is sufficient - // https://github.com/EspressoSystems/HotShot/issues/1796 - if leaf.height != parent.height + 1 { - error!( - "Incorrect height in proposal (expected {}, got {})", - parent.height + 1, - leaf.height - ); - return; - } - // Validate the signature. - else if !view_leader_key - .validate(&proposal.signature, leaf_commitment.as_ref()) - { - error!(?proposal.signature, "Could not verify proposal."); - } - // Create a positive vote if either liveness or safety check - // passes. - else { - // Liveness check. - let liveness_check = justify_qc.view_number > consensus.locked_view; - - // Safety check. - // Check if proposal extends from the locked leaf. - let outcome = consensus.visit_leaf_ancestors( - justify_qc.view_number, - Terminator::Inclusive(consensus.locked_view), - false, - |leaf| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.view_number != consensus.locked_view - }, - ); - let safety_check = outcome.is_ok(); - if let Err(e) = outcome { - self.api.send_view_error(view, Arc::new(e)).await; - } - - // Skip if both saftey and liveness checks fail. - if !safety_check && !liveness_check { - error!("Failed safety check and liveness check"); - } - } + // Skip if both saftey and liveness checks fail. + if !safety_check && !liveness_check { + error!("Failed safety check and liveness check"); + } + } - let high_qc = leaf.justify_qc.clone(); - let mut new_anchor_view = consensus.last_decided_view; - let mut new_locked_view = consensus.locked_view; - let mut last_view_number_visited = view; - let mut new_commit_reached: bool = false; - let mut new_decide_reached = false; - let mut new_decide_qc = None; - let mut leaf_views = Vec::new(); - let mut included_txns = HashSet::new(); - let old_anchor_view = consensus.last_decided_view; - let parent_view = leaf.justify_qc.view_number; - let mut current_chain_length = 0usize; - if parent_view + 1 == view { - current_chain_length += 1; - if let Err(e) = consensus.visit_leaf_ancestors( + let high_qc = leaf.justify_qc.clone(); + let mut new_anchor_view = consensus.last_decided_view; + let mut new_locked_view = consensus.locked_view; + let mut last_view_number_visited = view; + let mut new_commit_reached: bool = false; + let mut new_decide_reached = false; + let mut new_decide_qc = None; + let mut leaf_views = Vec::new(); + let mut included_txns = HashSet::new(); + let old_anchor_view = consensus.last_decided_view; + let parent_view = leaf.justify_qc.view_number; + let mut current_chain_length = 0usize; + if parent_view + 1 == view { + current_chain_length += 1; + if let Err(e) = consensus.visit_leaf_ancestors( parent_view, Terminator::Exclusive(old_anchor_view), true, @@ -869,90 +848,88 @@ where event: EventType::Error { error: e.into() }, }).await; } - } + } - let included_txns_set: HashSet<_> = if new_decide_reached { - included_txns - } else { - HashSet::new() - }; + let included_txns_set: HashSet<_> = if new_decide_reached { + included_txns + } else { + HashSet::new() + }; - // promote lock here to add proposal to statemap - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - if high_qc.view_number > consensus.high_qc.view_number { - consensus.high_qc = high_qc; - } - consensus.state_map.insert( - view, - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - }, - }, - ); - consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); - if new_commit_reached { - consensus.locked_view = new_locked_view; - } - #[allow(clippy::cast_precision_loss)] - if new_decide_reached { - debug!("about to publish decide"); - self.event_stream - .publish(SequencingHotShotEvent::LeafDecided(leaf_views.clone())) - .await; - let decide_sent = self.output_event_stream.publish(Event { - view_number: consensus.last_decided_view, - event: EventType::Decide { - leaf_chain: Arc::new(leaf_views), - qc: Arc::new(new_decide_qc.unwrap()), - block_size: Some(included_txns_set.len().try_into().unwrap()), - }, - }); - let old_anchor_view = consensus.last_decided_view; - consensus - .collect_garbage(old_anchor_view, new_anchor_view) - .await; - consensus.last_decided_view = new_anchor_view; - consensus.invalid_qc = 0; + // promote lock here to add proposal to statemap + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + if high_qc.view_number > consensus.high_qc.view_number { + consensus.high_qc = high_qc; + } + consensus.state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + }, + }, + ); + consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + if new_commit_reached { + consensus.locked_view = new_locked_view; + } + #[allow(clippy::cast_precision_loss)] + if new_decide_reached { + debug!("about to publish decide"); + self.event_stream + .publish(SequencingHotShotEvent::LeafDecided(leaf_views.clone())) + .await; + let decide_sent = self.output_event_stream.publish(Event { + view_number: consensus.last_decided_view, + event: EventType::Decide { + leaf_chain: Arc::new(leaf_views), + qc: Arc::new(new_decide_qc.unwrap()), + block_size: Some(included_txns_set.len().try_into().unwrap()), + }, + }); + let old_anchor_view = consensus.last_decided_view; + consensus + .collect_garbage(old_anchor_view, new_anchor_view) + .await; + consensus.last_decided_view = new_anchor_view; + consensus.invalid_qc = 0; - // We're only storing the last QC. We could store more but we're realistically only going to retrieve the last one. - if let Err(e) = self.api.store_leaf(old_anchor_view, leaf).await { - error!("Could not insert new anchor into the storage API: {:?}", e); - } + // We're only storing the last QC. We could store more but we're realistically only going to retrieve the last one. + if let Err(e) = self.api.store_leaf(old_anchor_view, leaf).await { + error!("Could not insert new anchor into the storage API: {:?}", e); + } - debug!("Sending Decide for view {:?}", consensus.last_decided_view); - debug!("Decided txns len {:?}", included_txns_set.len()); - decide_sent.await; - } + debug!("Sending Decide for view {:?}", consensus.last_decided_view); + debug!("Decided txns len {:?}", included_txns_set.len()); + decide_sent.await; + } - let new_view = self.current_proposal.clone().unwrap().view_number + 1; - // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = self.quorum_exchange.is_leader(new_view) - && consensus.high_qc.view_number - == self.current_proposal.clone().unwrap().view_number; - // todo get rid of this clone - let qc = consensus.high_qc.clone(); - - drop(consensus); - if should_propose { - debug!( - "Attempting to publish proposal after voting; now in view: {}", - *new_view - ); - self.publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) - .await; - } - if !self.vote_if_able().await { - return; - } - self.current_proposal = None; + let new_view = self.current_proposal.clone().unwrap().view_number + 1; + // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = self.quorum_exchange.is_leader(new_view) + && consensus.high_qc.view_number + == self.current_proposal.clone().unwrap().view_number; + // todo get rid of this clone + let qc = consensus.high_qc.clone(); - for v in (*self.cur_view)..=(*view) { - let time = TYPES::Time::new(v); - self.certs.remove(&time); - } - } + drop(consensus); + if should_propose { + debug!( + "Attempting to publish proposal after voting; now in view: {}", + *new_view + ); + self.publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) + .await; + } + if !self.vote_if_able().await { + return; + } + self.current_proposal = None; + + for v in (*self.cur_view)..=(*view) { + let time = TYPES::Time::new(v); + self.certs.remove(&time); } } SequencingHotShotEvent::QuorumVoteRecv(vote) => { @@ -998,7 +975,7 @@ where phantom: PhantomData, }; - let accumulator = self.quorum_exchange.accumulate_vote_2( + let accumulator = self.quorum_exchange.accumulate_vote( new_accumulator, &vote, &vote_internal.clone().leaf_commitment, @@ -1064,7 +1041,7 @@ where .await; } } - QuorumVote::Timeout(_) | QuorumVote::No(_) => { + QuorumVote::No(_) => { error!("The next leader has received an unexpected vote!"); } } @@ -1106,7 +1083,7 @@ where phantom: PhantomData, }; - let timeout_accumulator = self.timeout_exchange.accumulate_vote_2( + let timeout_accumulator = self.timeout_exchange.accumulate_vote( new_accumulator, &vote, &vote.get_view().commit(), @@ -1254,14 +1231,6 @@ where }, }) .await; - - if !self.quorum_exchange.is_leader(self.cur_view) { - return; - } - - let consensus = self.consensus.read().await; - let qc = consensus.high_qc.clone(); - drop(consensus); } SequencingHotShotEvent::Timeout(view) => { // NOTE: We may optionally have the timeout task listen for view change events diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index dee213a1e1..96a4d18a48 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -156,7 +156,7 @@ where let accumulator = state.accumulator.left().unwrap(); - match state.committee_exchange.accumulate_vote_2( + match state.committee_exchange.accumulate_vote( accumulator, &vote, &vote.block_commitment, @@ -198,7 +198,7 @@ where let accumulator = state.accumulator.left().unwrap(); - match state.committee_exchange.accumulate_vote_2( + match state.committee_exchange.accumulate_vote( accumulator, &vote, &vote.block_commitment, @@ -374,7 +374,7 @@ where phantom: PhantomData, }; - let accumulator = self.committee_exchange.accumulate_vote_2( + let accumulator = self.committee_exchange.accumulate_vote( new_accumulator, &vote, &vote.clone().block_commitment, @@ -456,7 +456,7 @@ where phantom: PhantomData, }; - let accumulator = self.committee_exchange.accumulate_vote_2( + let accumulator = self.committee_exchange.accumulate_vote( new_accumulator, &vote, &vote.clone().block_commitment, diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index ded16bac6e..92e0013615 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -6,7 +6,7 @@ use hotshot_types::{ traits::node_implementation::{ NodeImplementation, NodeType, QuorumProposalType, ViewSyncProposalType, }, - vote::{DAVote, QuorumVote, ViewSyncVote, TimeoutVote2}, + vote::{DAVote, QuorumVote, TimeoutVote, ViewSyncVote}, }; use crate::view_sync::ViewSyncPhase; @@ -20,8 +20,10 @@ pub enum SequencingHotShotEvent> { QuorumProposalRecv(Proposal>, TYPES::SignatureKey), /// A quorum vote has been received from the network; handled by the consensus task QuorumVoteRecv(QuorumVote), - TimeoutVoteRecv(TimeoutVote2), - TimeoutVoteSend(TimeoutVote2), + /// A timeout vote recevied from the network; handled by consensus task + TimeoutVoteRecv(TimeoutVote), + /// Send a timeout vote to the network; emitted by consensus task replicas + TimeoutVoteSend(TimeoutVote), /// A DA proposal has been received from the network; handled by the DA task DAProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 464217a383..136093bc3e 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -11,7 +11,6 @@ use hotshot_task::{ use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; use snafu::Snafu; use std::{collections::HashMap, future::Future, sync::Arc}; -use tracing::error; /// The state for the test harness task. Keeps track of which events and how many we expect to get pub struct TestHarnessState> { @@ -107,7 +106,6 @@ pub fn handle_event>( *num_expected -= 1; } - if state.expected_output.is_empty() { return (Some(HotShotTaskCompleted::ShutDown), state); } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 233190a6d5..c81463e5c7 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -100,7 +100,10 @@ impl< }, Either::Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(proposal) => { - error!("Received da proposal for view {:?}", proposal.clone().data.view_number); + error!( + "Received da proposal for view {:?}", + proposal.clone().data.view_number + ); SequencingHotShotEvent::DAProposalRecv(proposal.clone(), sender) } CommitteeConsensusMessage::DAVote(vote) => { @@ -354,7 +357,6 @@ impl< | SequencingHotShotEvent::VidCertSend(_, _) | SequencingHotShotEvent::ViewChange(_) | SequencingHotShotEvent::TimeoutVoteSend(_) - ) } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index d5b2480c11..86be8ba62e 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1004,7 +1004,7 @@ where *vote_internal.round, vote_internal.relay ); - let accumulator = self.exchange.accumulate_vote_2( + let accumulator = self.exchange.accumulate_vote( self.accumulator.left().unwrap(), &vote, &view_sync_data, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 9e722088ef..faa08e7c20 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -146,7 +146,6 @@ async fn test_consensus_vote() { let handle = build_system_handle(2).await.0; let (private_key, public_key) = key_pair_for_id(1); - let (private_key_2, public_key_2) = key_pair_for_id(2); let mut input = Vec::new(); let mut output = HashMap::new(); diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 2fd6023224..64ea239321 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -7,13 +7,12 @@ async fn test_timeout() { use std::time::Duration; - use hotshot_testing::node_types::SequencingLibp2pImpl; use hotshot_testing::node_types::SequencingWebImpl; use hotshot_testing::overall_safety_task::OverallSafetyPropertiesDescription; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::SequencingTestTypes, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, }; @@ -48,7 +47,6 @@ async fn test_timeout() { node_changes: vec![(Duration::from_millis(500), dead_nodes)], }; - metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 919303d36a..99255e8209 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -23,7 +23,7 @@ async fn test_view_sync_task() { use core::panic; use hotshot::tasks::add_view_sync_task; - use hotshot_task_impls::{harness::run_harness, view_sync::ViewSyncPhase}; + use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::{ traits::election::VoteData, diff --git a/types/src/certificate.rs b/types/src/certificate.rs index d0ed806083..c540a349fc 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -1,19 +1,16 @@ //! Provides two types of cerrtificates and their accumulators. -use crate::vote::AccumulatorPlaceholder; use crate::vote::DAVoteAccumulator; use crate::vote::QuorumVote; use crate::vote::QuorumVoteAccumulator; -use crate::vote::TimeoutVote2; +use crate::vote::TimeoutVote; use crate::vote::TimeoutVoteAccumulator; use crate::vote::ViewSyncVoteAccumulator; use crate::vote::VoteType; use crate::{ data::{fake_commitment, serialize_signature, LeafType}, traits::{ - election::{SignedCertificate, VoteData, VoteToken}, - node_implementation::NodeType, - signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, + election::SignedCertificate, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, }, vote::{DAVote, ViewSyncData, ViewSyncVote}, @@ -90,7 +87,7 @@ impl SignedCertificate> for TimeoutCertificate { - type Vote = TimeoutVote2; + type Vote = TimeoutVote; type VoteAccumulator = TimeoutVoteAccumulator, Self::Vote>; @@ -98,11 +95,10 @@ impl signatures: AssembledSignature, vote: Self::Vote, ) -> Self { - let qc = TimeoutCertificate { + TimeoutCertificate { view_number: vote.get_view(), signatures, - }; - qc + } } fn view_number(&self) -> TYPES::Time { @@ -117,8 +113,8 @@ impl self.view_number.commit() } - fn set_leaf_commitment(&mut self, commitment: Commitment) { - todo!() + fn set_leaf_commitment(&mut self, _commitment: Commitment) { + unimplemented!() } fn is_genesis(&self) -> bool { @@ -126,7 +122,7 @@ impl } fn genesis() -> Self { - todo!() + unimplemented!() } } @@ -174,7 +170,7 @@ pub enum AssembledSignature { No(::QCType), /// These signatures are for a 'DA' certificate DA(::QCType), - + /// These signatures are for a `Timeout` certificate Timeout(::QCType), /// These signatures are for genesis certificate Genesis(), @@ -186,25 +182,6 @@ pub enum AssembledSignature { ViewSyncFinalize(::QCType), } -/// Data from a vote needed to accumulate into a `SignedCertificate` -pub struct VoteMetaData { - /// Voter's public key - pub encoded_key: EncodedPublicKey, - /// Votes signature - pub encoded_signature: EncodedSignature, - /// Commitment to what's voted on. E.g. the leaf for a `QuorumCertificate` - pub commitment: Commitment, - /// Data of the vote, yes, no, timeout, or DA - pub data: VoteData>, - /// The votes's token - pub vote_token: T, - /// View number for the vote - pub view_number: TIME, - /// The relay index for view sync - // TODO ED Make VoteMetaData more generic to avoid this variable that only ViewSync uses - pub relay: Option, -} - impl> SignedCertificate> for QuorumCertificate @@ -220,7 +197,6 @@ impl> QuorumVote::Yes(vote_internal) | QuorumVote::No(vote_internal) => { vote_internal.leaf_commitment } - QuorumVote::Timeout(_) => unimplemented!(), }; let qc = QuorumCertificate { leaf_commitment, diff --git a/types/src/message.rs b/types/src/message.rs index f08cb54dfb..1885a95d3d 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -13,7 +13,7 @@ use crate::{ }, signature_key::EncodedSignature, }, - vote::{DAVote, QuorumVote, TimeoutVote, TimeoutVote2, ViewSyncVote, VoteType}, + vote::{DAVote, QuorumVote, TimeoutVote, ViewSyncVote, VoteType}, }; use derivative::Derivative; use either::Either::{self, Left, Right}; @@ -201,8 +201,7 @@ where } GeneralConsensusMessage::ViewSyncVote(_) | GeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), - | GeneralConsensusMessage::TimeoutVote(_) => todo!(), - + GeneralConsensusMessage::TimeoutVote(_) => todo!(), } } } @@ -325,7 +324,7 @@ where ViewSyncCertificate(Proposal>), /// Message with a Timeout vote - TimeoutVote(TimeoutVote2), + TimeoutVote(TimeoutVote), /// Internal ONLY message indicating a view interrupt. #[serde(skip)] @@ -421,9 +420,7 @@ impl< GeneralConsensusMessage::ViewSyncCertificate(message) => { message.data.get_view_number() } - GeneralConsensusMessage::TimeoutVote(message) => { - message.get_view() - } + GeneralConsensusMessage::TimeoutVote(message) => message.get_view(), } } Right(committee_message) => { @@ -451,11 +448,12 @@ impl< match &self.0 { Left(general_message) => match general_message { GeneralConsensusMessage::Proposal(_) => MessagePurpose::Proposal, - GeneralConsensusMessage::Vote(_) => MessagePurpose::Vote, + GeneralConsensusMessage::Vote(_) | GeneralConsensusMessage::TimeoutVote(_) => { + MessagePurpose::Vote + } GeneralConsensusMessage::InternalTrigger(_) => MessagePurpose::Internal, GeneralConsensusMessage::ViewSyncVote(_) => MessagePurpose::ViewSyncVote, GeneralConsensusMessage::ViewSyncCertificate(_) => MessagePurpose::ViewSyncProposal, - GeneralConsensusMessage::TimeoutVote(_) => MessagePurpose::Vote, }, Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 93e8a14abc..db9c46f9ea 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -10,10 +10,10 @@ use super::{ use crate::{ certificate::{ AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, - ViewSyncCertificate, VoteMetaData, + ViewSyncCertificate, }, data::{DAProposal, ProposalType}, - vote::TimeoutVote2, + vote::TimeoutVote, }; use crate::{ @@ -21,7 +21,7 @@ use crate::{ vote::ViewSyncVoteInternal, }; -use crate::vote::Accumulator2; +use crate::vote::Accumulator; use crate::{ data::LeafType, traits::{ @@ -30,10 +30,7 @@ use crate::{ signature_key::SignatureKey, state::ConsensusTime, }, - vote::{ - DAVote, QuorumVote, TimeoutVote, ViewSyncData, ViewSyncVote, VoteAccumulator, VoteType, - YesOrNoVote, - }, + vote::{DAVote, QuorumVote, ViewSyncData, ViewSyncVote, VoteType, YesOrNoVote}, }; use bincode::Options; use commit::{Commitment, Committable}; @@ -170,7 +167,7 @@ where type Vote: VoteType; /// `Accumulator` type to accumulate votes. - type VoteAccumulator: Accumulator2; + type VoteAccumulator: Accumulator; /// Build a QC from the threshold signature and commitment // TODO ED Rename this function and rework this function parameters @@ -331,12 +328,6 @@ pub trait ConsensusExchange: Send + Sync { .make_vote_token(view_number, self.private_key()) } - /// The contents of a vote on `commit`. - fn vote_data( - &self, - commit: Commitment, - ) -> VoteData>; - /// Validate a QC. fn is_valid_cert(&self, qc: &Self::Certificate, commit: Commitment) -> bool { if qc.is_genesis() && qc.view_number() == TYPES::Time::genesis() { @@ -382,14 +373,6 @@ pub trait ConsensusExchange: Send + Sync { ); ::check(&real_qc_pp, real_commit.as_ref(), &qc) } - AssembledSignature::Timeout(qc) => { - let real_commit = VoteData::Timeout(leaf_commitment).get_commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().success_threshold().get()), - ); - ::check(&real_qc_pp, real_commit.as_ref(), &qc) - } AssembledSignature::Genesis() => true, AssembledSignature::ViewSyncPreCommit(_) | AssembledSignature::ViewSyncCommit(_) @@ -402,31 +385,6 @@ pub trait ConsensusExchange: Send + Sync { /// Validate a vote by checking its signature and token. fn is_valid_vote( - &self, - encoded_key: &EncodedPublicKey, - encoded_signature: &EncodedSignature, - data: VoteData>, - vote_token: Checked, - ) -> bool { - let mut is_valid_vote_token = false; - let mut is_valid_signature = false; - if let Some(key) = ::from_bytes(encoded_key) { - is_valid_signature = key.validate(encoded_signature, data.get_commit().as_ref()); - let valid_vote_token = self.membership().validate_vote_token(key, vote_token); - is_valid_vote_token = match valid_vote_token { - Err(_) => { - error!("Vote token was invalid"); - false - } - Ok(Checked::Valid(_)) => true, - Ok(Checked::Inval(_) | Checked::Unchecked(_)) => false, - }; - } - is_valid_signature && is_valid_vote_token - } - - /// Validate a vote by checking its signature and token. - fn is_valid_vote_2( &self, key: &TYPES::SignatureKey, encoded_signature: &EncodedSignature, @@ -434,9 +392,6 @@ pub trait ConsensusExchange: Send + Sync { vote_token: &Checked, ) -> bool { let is_valid_signature = key.validate(encoded_signature, data.get_commit().as_ref()); - if !is_valid_signature { - panic!() - } let valid_vote_token = self .membership() .validate_vote_token(key.clone(), vote_token.clone()); @@ -449,45 +404,15 @@ pub trait ConsensusExchange: Send + Sync { Ok(Checked::Inval(_) | Checked::Unchecked(_)) => false, }; - let result = is_valid_signature && is_valid_vote_token; - if !result { - panic!() - } - result - } - - #[doc(hidden)] - - fn accumulate_internal( - &self, - _vota_meta: VoteMetaData, - _accumulator: VoteAccumulator, - ) -> Either, Self::Certificate> - { - todo!() // TODO ED Remove this function + is_valid_signature && is_valid_vote_token } - /// Add a vote to the accumulating signature. Return The certificate if the vote - /// brings us over the threshould, Else return the accumulator. - #[allow(clippy::too_many_arguments)] - fn accumulate_vote( - &self, - encoded_key: &EncodedPublicKey, - encoded_signature: &EncodedSignature, - leaf_commitment: Commitment, - vote_data: VoteData>, - vote_token: TYPES::VoteTokenType, - view_number: TYPES::Time, - accumlator: VoteAccumulator, - relay: Option, - ) -> Either, Self::Certificate>; - // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the `SignedCertificate` // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. /// Accumulate vote /// Returns either the accumulate if no threshold was reached, or a `SignedCertificate` if the threshold was reached #[allow(clippy::type_complexity)] - fn accumulate_vote_2( + fn accumulate_vote( &self, accumulator: <>::Certificate as SignedCertificate< TYPES, @@ -511,7 +436,7 @@ pub trait ConsensusExchange: Send + Sync { >>::VoteAccumulator, Self::Certificate, > { - if !self.is_valid_vote_2( + if !self.is_valid_vote( &vote.get_key(), &vote.get_signature(), &vote.get_data(), @@ -742,38 +667,6 @@ impl< .make_vote_token(view_number, &self.private_key) } - fn vote_data( - &self, - commit: Commitment, - ) -> VoteData> { - VoteData::DA(commit) - } - - /// Add a vote to the accumulating signature. Return The certificate if the vote - /// brings us over the threshould, Else return the accumulator. - fn accumulate_vote( - &self, - encoded_key: &EncodedPublicKey, - encoded_signature: &EncodedSignature, - leaf_commitment: Commitment, - vote_data: VoteData>, - vote_token: TYPES::VoteTokenType, - view_number: TYPES::Time, - accumlator: VoteAccumulator, - _relay: Option, - ) -> Either, Self::Certificate> - { - let meta = VoteMetaData { - encoded_key: encoded_key.clone(), - encoded_signature: encoded_signature.clone(), - commitment: leaf_commitment, - data: vote_data, - vote_token, - view_number, - relay: None, - }; - self.accumulate_internal(meta, accumlator) - } fn membership(&self) -> &Self::Membership { &self.membership } @@ -790,7 +683,7 @@ pub trait QuorumExchangeType, ConsensusExchange { /// Create a message with a positive vote on validating or commitment proposal. - // TODO ED This returns just a general message type, it's not even bound to a proposal, and this is just a function on the QC. Make proprosal doesn't really apply to all cert types. + // TODO ED This returns just a general message type, it's not even bound to a proposal, and this is just a function on the QC. Make proprosal doesn't really apply to all cert types. fn create_yes_message>( &self, justify_qc_commitment: Commitment, @@ -829,15 +722,6 @@ pub trait QuorumExchangeType, leaf_commitment: Commitment, ) -> (EncodedPublicKey, EncodedSignature); - /// Sign a timeout vote. - /// - /// We only sign the view number, which is the minimum amount of information necessary for - /// checking that this node timed out on that view. - /// - /// This also allows for the high QC included with the vote to be spoofed in a MITM scenario, - /// but it is outside our threat model. - fn sign_timeout_vote(&self, view_number: TYPES::Time) -> (EncodedPublicKey, EncodedSignature); - /// Create a message with a negative vote on validating or commitment proposal. fn create_no_message>( &self, @@ -848,16 +732,6 @@ pub trait QuorumExchangeType, ) -> GeneralConsensusMessage where I::Exchanges: ExchangesType>; - - /// Create a message with a timeout vote on validating or commitment proposal. - fn create_timeout_message>( - &self, - justify_qc: QuorumCertificate, - current_view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage - where - I::Exchanges: ExchangesType>; } /// Standard implementation of [`QuroumExchangeType`] based on Hot Stuff consensus. @@ -959,22 +833,6 @@ impl< (self.public_key.to_bytes(), signature) } - /// Sign a timeout vote. - /// - /// We only sign the view number, which is the minimum amount of information necessary for - /// checking that this node timed out on that view. - /// - /// This also allows for the high QC included with the vote to be spoofed in a MITM scenario, - /// but it is outside our threat model. - fn sign_timeout_vote(&self, view_number: TYPES::Time) -> (EncodedPublicKey, EncodedSignature) { - let signature = TYPES::SignatureKey::sign( - &self.private_key, - VoteData::Timeout(view_number.commit()) - .get_commit() - .as_ref(), - ); - (self.public_key.to_bytes(), signature) - } /// Create a message with a negative vote on validating or commitment proposal. fn create_no_message>( &self, @@ -996,26 +854,6 @@ impl< vote_data: VoteData::No(leaf_commitment), })) } - - /// Create a message with a timeout vote on validating or commitment proposal. - fn create_timeout_message>( - &self, - high_qc: QuorumCertificate, - current_view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage - where - I::Exchanges: ExchangesType>, - { - let signature = self.sign_timeout_vote(current_view); - GeneralConsensusMessage::::Vote(QuorumVote::Timeout(TimeoutVote { - high_qc, - signature, - current_view, - vote_token, - vote_data: VoteData::Timeout(current_view.commit()), - })) - } } impl< @@ -1059,37 +897,6 @@ impl< &self.network } - fn vote_data( - &self, - commit: Commitment, - ) -> VoteData> { - VoteData::Yes(commit) - } - - /// Add a vote to the accumulating signature. Return The certificate if the vote - /// brings us over the threshould, Else return the accumulator. - fn accumulate_vote( - &self, - encoded_key: &EncodedPublicKey, - encoded_signature: &EncodedSignature, - leaf_commitment: Commitment, - vote_data: VoteData>, - vote_token: TYPES::VoteTokenType, - view_number: TYPES::Time, - accumlator: VoteAccumulator, - _relay: Option, - ) -> Either, Self::Certificate> { - let meta = VoteMetaData { - encoded_key: encoded_key.clone(), - encoded_signature: encoded_signature.clone(), - commitment: leaf_commitment, - data: vote_data, - vote_token, - view_number, - relay: None, - }; - self.accumulate_internal(meta, accumlator) - } fn membership(&self) -> &Self::Membership { &self.membership } @@ -1421,37 +1228,6 @@ impl< &self.network } - fn vote_data( - &self, - _commit: Commitment, - ) -> VoteData> { - unimplemented!() - } - - fn accumulate_vote( - &self, - encoded_key: &EncodedPublicKey, - encoded_signature: &EncodedSignature, - leaf_commitment: Commitment>, - vote_data: VoteData>, - vote_token: TYPES::VoteTokenType, - view_number: TYPES::Time, - accumlator: VoteAccumulator, TYPES>, - relay: Option, - ) -> Either, TYPES>, Self::Certificate> - { - let meta = VoteMetaData { - encoded_key: encoded_key.clone(), - encoded_signature: encoded_signature.clone(), - commitment: leaf_commitment, - data: vote_data, - vote_token, - view_number, - relay, - }; - self.accumulate_internal(meta, accumlator) - } - fn membership(&self) -> &Self::Membership { &self.membership } @@ -1499,7 +1275,9 @@ impl< { } +/// Trait defining functiosn for a `TimeoutExchange` pub trait TimeoutExchangeType: ConsensusExchange { + /// Create and sign a timeout message fn create_timeout_message>( &self, view: TYPES::Time, @@ -1509,19 +1287,17 @@ pub trait TimeoutExchangeType: ConsensusExchange I::Exchanges: ExchangesType>, { let signature = TYPES::SignatureKey::sign( - &self.private_key(), - &VoteData::>::Timeout(view.commit()) - .get_commit().as_ref() - + self.private_key(), + VoteData::>::Timeout(view.commit()) + .get_commit() + .as_ref(), ); - GeneralConsensusMessage::::TimeoutVote(TimeoutVote2 { + GeneralConsensusMessage::::TimeoutVote(TimeoutVote { signature: (self.public_key().to_bytes(), signature), current_view: view, - vote_token + vote_token, }) - - } } @@ -1545,7 +1321,7 @@ impl< > ConsensusExchange for TimeoutExchange { type Proposal = PROPOSAL; - type Vote = TimeoutVote2; + type Vote = TimeoutVote; type Certificate = TimeoutCertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; @@ -1575,10 +1351,6 @@ impl< &self.network } - fn vote_data(&self, _commit: Commitment) -> VoteData> { - unimplemented!() - } - fn membership(&self) -> &Self::Membership { &self.membership } @@ -1588,23 +1360,6 @@ impl< fn private_key(&self) -> &<::SignatureKey as SignatureKey>::PrivateKey { &self.private_key } - - fn accumulate_vote( - &self, - _encoded_key: &EncodedPublicKey, - _encoded_signature: &EncodedSignature, - _leaf_commitment: Commitment, - _vote_data: VoteData>, - _vote_token: ::VoteTokenType, - _view_number: ::Time, - _accumlator: VoteAccumulator<::VoteTokenType, Self::Commitment, TYPES>, - _relay: Option, - ) -> Either< - VoteAccumulator<::VoteTokenType, Self::Commitment, TYPES>, - Self::Certificate, - > { - todo!() - } } /// Testable implementation of a [`Membership`]. Will expose a method to generate a vote token used for testing. diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index f7aafe8615..5e3d30f2f4 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -151,9 +151,10 @@ pub trait ExchangesType, MESSA /// Protocol for exchanging data availability proposals and votes. type CommitteeExchange: CommitteeExchangeType + Clone + Debug; - /// Get the committee exchange. + /// Get the committee exchange fn committee_exchange(&self) -> &Self::CommitteeExchange; + /// Get the timeout exchange fn timeout_exchange(&self) -> &Self::TimeoutExchange; /// Protocol for exchanging quorum proposals and votes. @@ -162,6 +163,7 @@ pub trait ExchangesType, MESSA /// Protocol for exchanging view sync proposals and votes. type ViewSyncExchange: ViewSyncExchangeType + Clone + Debug; + /// Protocol for receiving timeout votes type TimeoutExchange: TimeoutExchangeType + Clone + Debug; /// Election configurations for exchanges @@ -234,10 +236,15 @@ pub struct SequencingExchanges< /// Committee exchange. committee_exchange: COMMITTEEEXCHANGE, - // TODO ED Make this not public - pub timeout_exchange: TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE>, + /// Timeout exchange + // This type can be simplified once we rework the exchanges trait + // It is here to avoid needing to instantiate it where all the other exchanges are instantiated + // https://github.com/EspressoSystems/HotShot/issues/1799 + #[allow(clippy::type_complexity)] + + pub timeout_exchange: TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE>, - /// Phantom data. + /// Phantom data _phantom: PhantomData<(TYPES, MESSAGE)>, } @@ -255,6 +262,7 @@ where type CommitteeExchange = COMMITTEEEXCHANGE; type QuorumExchange = QUORUMEXCHANGE; type ViewSyncExchange = VIEWSYNCEXCHANGE; + #[allow(clippy::type_complexity)] type TimeoutExchange = TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE>; type ElectionConfigs = (TYPES::ElectionConfigType, TYPES::ElectionConfigType); @@ -286,6 +294,7 @@ where entry.clone(), sk.clone(), ); + #[allow(clippy::type_complexity)] let timeout_exchange: TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE> = TimeoutExchange::create( entries.clone(), configs.0.clone(), @@ -349,6 +358,7 @@ pub type SequencingQuorumEx = Message, >>::QuorumExchange; +/// Alias for `TimeoutExchange` type pub type SequencingTimeoutEx = <>::Exchanges as ExchangesType< TYPES, diff --git a/types/src/vote.rs b/types/src/vote.rs index 8ba9f060d7..33a0c9eccb 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -26,7 +26,7 @@ use std::{ marker::PhantomData, num::NonZeroU64, }; -use tracing::{error, warn}; +use tracing::error; /// The vote sent by consensus messages. pub trait VoteType Deserialize<'a> + Serialize + Clone>: @@ -80,26 +80,10 @@ pub struct YesOrNoVote> { pub vote_data: VoteData>, } -/// A timeout vote. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -pub struct TimeoutVote> { - /// The highest valid QC this node knows about - pub high_qc: QuorumCertificate, - /// The signature share associated with this vote - pub signature: (EncodedPublicKey, EncodedSignature), - /// The view this vote was cast for - pub current_view: TYPES::Time, - /// The vote token generated by this replica - pub vote_token: TYPES::VoteTokenType, - /// The vote data this vote is signed over - pub vote_data: VoteData>, -} - /// A timeout vote #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] -pub struct TimeoutVote2 { +pub struct TimeoutVote { /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), /// The view this vote was cast for @@ -108,7 +92,7 @@ pub struct TimeoutVote2 { pub vote_token: TYPES::VoteTokenType, } -impl VoteType> for TimeoutVote2 { +impl VoteType> for TimeoutVote { fn get_view(&self) -> ::Time { self.current_view } @@ -133,7 +117,7 @@ impl VoteType> for TimeoutVote2< /// A timeout vote // #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] // #[serde(bound(deserialize = ""))] -// pub struct TimeoutVote2 { +// pub struct TimeoutVote { // /// The signature share associated with this vote // pub signature: (EncodedPublicKey, EncodedSignature), // /// The view this vote was cast for @@ -142,7 +126,7 @@ impl VoteType> for TimeoutVote2< // pub vote_token: TYPES::VoteTokenType, // } -// impl VoteType for TimeoutVote2 { +// impl VoteType for TimeoutVote { // fn get_view(&self) -> ::Time { // self.current_view // } @@ -262,8 +246,6 @@ pub enum QuorumVote> { Yes(YesOrNoVote), /// Negative vote. No(YesOrNoVote), - /// Timeout vote. - Timeout(TimeoutVote), } impl VoteType> for DAVote { @@ -299,7 +281,6 @@ impl> VoteType TYPES::Time { match self { QuorumVote::Yes(v) | QuorumVote::No(v) => v.current_view, - QuorumVote::Timeout(v) => v.current_view, } } @@ -312,13 +293,11 @@ impl> VoteType VoteData> { match self { QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_data.clone(), - QuorumVote::Timeout(_) => unimplemented!(), } } fn get_vote_token(&self) -> ::VoteTokenType { match self { QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_token.clone(), - QuorumVote::Timeout(_) => unimplemented!(), } } } @@ -329,7 +308,6 @@ impl> QuorumVote pub fn signature(&self) -> EncodedSignature { match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.1.clone(), - Self::Timeout(vote) => vote.signature.1.clone(), } } /// Get the signature key. @@ -339,7 +317,6 @@ impl> QuorumVote pub fn signature_key(&self) -> TYPES::SignatureKey { let encoded = match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.0.clone(), - Self::Timeout(vote) => vote.signature.0.clone(), }; ::from_bytes(&encoded).unwrap() } @@ -377,17 +354,8 @@ impl VoteType>> for ViewS } } -/// The aggreation of votes, implemented by `VoteAccumulator`. -pub trait Accumulator: Sized { - /// Accumate the `val` to the current state. - /// - /// If a threshold is reached, returns `U` (e.g., a certificate). Else, returns `Self` and - /// continues accumulating items. - fn append(self, val: T) -> Either; -} - /// Accumulator trait used to accumulate votes into an `AssembledSignature` -pub trait Accumulator2< +pub trait Accumulator< TYPES: NodeType, COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, VOTE: VoteType, @@ -406,6 +374,7 @@ pub trait Accumulator2< // TODO Make a default accumulator // https://github.com/EspressoSystems/HotShot/issues/1797 +/// Accumulator for `TimeoutVote`s pub struct TimeoutVoteAccumulator< TYPES: NodeType, COMMITMENT: Serialize + Clone + for<'a> Deserialize<'a>, @@ -427,7 +396,7 @@ impl< TYPES: NodeType, COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, VOTE: VoteType, - > Accumulator2 for DAVoteAccumulator + > Accumulator for DAVoteAccumulator { fn append( mut self, @@ -499,7 +468,7 @@ impl< TYPES: NodeType, COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, VOTE: VoteType, - > Accumulator2 for TimeoutVoteAccumulator + > Accumulator for TimeoutVoteAccumulator { fn append( mut self, @@ -614,7 +583,7 @@ impl< TYPES: NodeType, COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, VOTE: VoteType, - > Accumulator2 for QuorumVoteAccumulator + > Accumulator for QuorumVoteAccumulator { fn append( mut self, @@ -744,7 +713,7 @@ impl< TYPES: NodeType, COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, VOTE: VoteType, - > Accumulator2 for ViewSyncVoteAccumulator + > Accumulator for ViewSyncVoteAccumulator { #[allow(clippy::too_many_lines)] fn append( @@ -889,32 +858,6 @@ impl< } } -/// Placeholder accumulator; will be replaced by accumulator for each certificate type -pub struct AccumulatorPlaceholder< - TYPES: NodeType, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, - VOTE: VoteType, -> { - /// Phantom data to make compiler happy - pub phantom: PhantomData<(TYPES, VOTE, COMMITMENT)>, -} - -impl< - TYPES: NodeType, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, - VOTE: VoteType, - > Accumulator2 for AccumulatorPlaceholder -{ - fn append( - self, - _vote: VOTE, - _vote_node_id: usize, - _stake_table_entries: Vec<::StakeTableEntry>, - ) -> Either> { - either::Left(self) - } -} - /// Mapping of commitments to vote tokens by key. type VoteMap = HashMap< COMMITMENT, @@ -923,221 +866,3 @@ type VoteMap = HashMap< BTreeMap, TOKEN)>, ), >; - -/// Describe the process of collecting signatures on block or leaf commitment, to form a DAC or QC, -/// respectively. -/// -/// TODO GG used only in election.rs; move this to there and make it private? -pub struct VoteAccumulator< - TOKEN, - COMMITMENT: Serialize + for<'a> Deserialize<'a> + Clone, - TYPES: NodeType, -> { - /// Map of all signatures accumlated so far - pub total_vote_outcomes: VoteMap, - /// Map of all da signatures accumlated so far - pub da_vote_outcomes: VoteMap, - /// Map of all yes signatures accumlated so far - pub yes_vote_outcomes: VoteMap, - /// Map of all no signatures accumlated so far - pub no_vote_outcomes: VoteMap, - /// Map of all view sync precommit votes accumulated thus far - pub viewsync_precommit_vote_outcomes: VoteMap, - /// Map of all view sync commit votes accumulated thus far - pub viewsync_commit_vote_outcomes: VoteMap, - /// Map of all view sync finalize votes accumulated thus far - pub viewsync_finalize_vote_outcomes: VoteMap, - /// A quorum's worth of stake, generall 2f + 1 - pub success_threshold: NonZeroU64, - /// Enough stake to know that we cannot possibly get a quorum, generally f + 1 - pub failure_threshold: NonZeroU64, - /// A list of valid signatures for certificate aggregation - pub sig_lists: Vec<::PureAssembledSignatureType>, - /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check - pub signers: BitVec, -} - -impl - Accumulator< - ( - Commitment, - ( - EncodedPublicKey, - ( - EncodedSignature, - Vec<::StakeTableEntry>, - usize, - VoteData>, - TOKEN, - ), - ), - ), - AssembledSignature, - > for VoteAccumulator, TYPES> -where - TOKEN: Clone + VoteToken, -{ - #![allow(clippy::too_many_lines)] - fn append( - mut self, - val: ( - Commitment, - ( - EncodedPublicKey, - ( - EncodedSignature, - Vec<::StakeTableEntry>, - usize, - VoteData>, - TOKEN, - ), - ), - ), - ) -> Either> { - let (commitment, (key, (sig, entries, node_id, vote_data, token))) = val; - - // Desereialize the sig so that it can be assembeld into a QC - let original_signature: ::PureAssembledSignatureType = - bincode_opts() - .deserialize(&sig.0) - .expect("Deserialization on the signature shouldn't be able to fail."); - - let (total_stake_casted, total_vote_map) = self - .total_vote_outcomes - .entry(commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - // Check for duplicate vote - if total_vote_map.contains_key(&key) { - return Either::Left(self); - } - let (da_stake_casted, da_vote_map) = self - .da_vote_outcomes - .entry(commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - let (yes_stake_casted, yes_vote_map) = self - .yes_vote_outcomes - .entry(commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - let (no_stake_casted, no_vote_map) = self - .no_vote_outcomes - .entry(commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - let (viewsync_precommit_stake_casted, viewsync_precommit_vote_map) = self - .viewsync_precommit_vote_outcomes - .entry(commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - let (viewsync_commit_stake_casted, viewsync_commit_vote_map) = self - .viewsync_commit_vote_outcomes - .entry(commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - let (viewsync_finalize_stake_casted, viewsync_finalize_vote_map) = self - .viewsync_finalize_vote_outcomes - .entry(commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - // Accumulate the stake for each leaf commitment rather than the total - // stake of all votes, in case they correspond to inconsistent - // commitments. - - // update the active_keys and sig_lists - if self.signers.get(node_id).as_deref() == Some(&true) { - error!("node id already in signers"); - return Either::Left(self); - } - self.signers.set(node_id, true); - self.sig_lists.push(original_signature); - - *total_stake_casted += u64::from(token.vote_count()); - total_vote_map.insert(key.clone(), (sig.clone(), vote_data.clone(), token.clone())); - - match vote_data { - VoteData::DA(_) => { - *da_stake_casted += u64::from(token.vote_count()); - da_vote_map.insert(key, (sig, vote_data, token)); - } - VoteData::Yes(_) => { - *yes_stake_casted += u64::from(token.vote_count()); - yes_vote_map.insert(key, (sig, vote_data, token)); - } - VoteData::No(_) => { - *no_stake_casted += u64::from(token.vote_count()); - no_vote_map.insert(key, (sig, vote_data, token)); - } - VoteData::ViewSyncPreCommit(_) => { - *viewsync_precommit_stake_casted += u64::from(token.vote_count()); - viewsync_precommit_vote_map.insert(key, (sig, vote_data, token)); - } - VoteData::ViewSyncCommit(_) => { - *viewsync_commit_stake_casted += u64::from(token.vote_count()); - viewsync_commit_vote_map.insert(key, (sig, vote_data, token)); - } - VoteData::ViewSyncFinalize(_) => { - *viewsync_finalize_stake_casted += u64::from(token.vote_count()); - viewsync_finalize_vote_map.insert(key, (sig, vote_data, token)); - } - VoteData::Timeout(_) => { - unimplemented!() - } - } - - // This is a messy way of accounting for the different vote types, but we will be replacing this code very soon - if *total_stake_casted >= u64::from(self.success_threshold) { - // Do assemble for QC here - let real_qc_pp = ::get_public_parameter( - entries.clone(), - U256::from(self.success_threshold.get()), - ); - - let real_qc_sig = ::assemble( - &real_qc_pp, - self.signers.as_bitslice(), - &self.sig_lists[..], - ); - - if *yes_stake_casted >= u64::from(self.success_threshold) { - self.yes_vote_outcomes.remove(&commitment); - return Either::Right(AssembledSignature::Yes(real_qc_sig)); - } else if *no_stake_casted >= u64::from(self.failure_threshold) { - self.total_vote_outcomes.remove(&commitment); - return Either::Right(AssembledSignature::No(real_qc_sig)); - } else if *da_stake_casted >= u64::from(self.success_threshold) { - self.da_vote_outcomes.remove(&commitment); - return Either::Right(AssembledSignature::DA(real_qc_sig)); - } else if *viewsync_commit_stake_casted >= u64::from(self.success_threshold) { - self.viewsync_commit_vote_outcomes - .remove(&commitment) - .unwrap(); - return Either::Right(AssembledSignature::ViewSyncCommit(real_qc_sig)); - } else if *viewsync_finalize_stake_casted >= u64::from(self.success_threshold) { - self.viewsync_finalize_vote_outcomes - .remove(&commitment) - .unwrap(); - return Either::Right(AssembledSignature::ViewSyncFinalize(real_qc_sig)); - } - } - if *viewsync_precommit_stake_casted >= u64::from(self.failure_threshold) { - let real_qc_pp = ::get_public_parameter( - entries, - U256::from(self.failure_threshold.get()), - ); - - let real_qc_sig = ::assemble( - &real_qc_pp, - self.signers.as_bitslice(), - &self.sig_lists[..], - ); - - self.viewsync_precommit_vote_outcomes - .remove(&commitment) - .unwrap(); - return Either::Right(AssembledSignature::ViewSyncPreCommit(real_qc_sig)); - } - Either::Left(self) - } -} From c466050502ba5967c0f506ba77e0069bb7d7b021 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 21 Sep 2023 18:44:58 -0700 Subject: [PATCH 0139/1393] Reorder events --- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 3 --- task-impls/src/transactions.rs | 9 +++++++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index b9a10e3d6e..4436e80de2 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -80,7 +80,7 @@ pub struct SequencingConsensusTaskState< pub consensus: Arc>>>, /// View timeout from config. pub timeout: u64, - /// View number this view is executing in + /// View number this view is executing in. pub cur_view: TYPES::Time, /// Current block submitted to DA diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index fe0a6ed311..5c4fb0fea3 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -643,9 +643,6 @@ where // Brodcast DA proposal // TODO ED We should send an event to do this, but just getting it to work for now - self.event_stream - .publish(SequencingHotShotEvent::SendDABlockData(block.clone())) - .await; // if let Err(e) = self.api.send_da_broadcast(message.clone()).await { // consensus.metrics.failed_to_send_messages.add(1); // warn!(?message, ?e, "Could not broadcast leader proposal"); diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index c01cf1ca2b..4aca5299f5 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -235,7 +235,6 @@ where drop(consensus); - let block; let txns = self.wait_for_transactions(parent_leaf).await?; debug!("Prepare VID shares"); @@ -255,12 +254,18 @@ where txns_flatten.extend(txn.bytes()); } let vid_disperse = vid.disperse(&txns_flatten).unwrap(); - block = VIDBlockPayload::new(txns, vid_disperse.commit); + let block = VIDBlockPayload::new(txns, vid_disperse.commit); + // TODO (Keyao) Is the order of the following events and events in the original + // DA task correct? self.event_stream .publish(SequencingHotShotEvent::BlockReady(block.clone(), view + 1)) .await; + self.event_stream + .publish(SequencingHotShotEvent::SendDABlockData(block.clone())) + .await; + self.event_stream .publish(SequencingHotShotEvent::VidDisperseSend( Proposal { From 7b9a6913874d3496818281bfcc9fe9e9c62a032a Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 21:48:30 -0400 Subject: [PATCH 0140/1393] rename functions --- types/src/certificate.rs | 20 ++++---------------- types/src/traits/election.rs | 7 ++----- 2 files changed, 6 insertions(+), 21 deletions(-) diff --git a/types/src/certificate.rs b/types/src/certificate.rs index c540a349fc..7aaa28640c 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -91,10 +91,7 @@ impl type VoteAccumulator = TimeoutVoteAccumulator, Self::Vote>; - fn from_signatures_and_commitment( - signatures: AssembledSignature, - vote: Self::Vote, - ) -> Self { + fn create_certificateure(signatures: AssembledSignature, vote: Self::Vote) -> Self { TimeoutCertificate { view_number: vote.get_view(), signatures, @@ -189,10 +186,7 @@ impl> type Vote = QuorumVote; type VoteAccumulator = QuorumVoteAccumulator, Self::Vote>; - fn from_signatures_and_commitment( - signatures: AssembledSignature, - vote: Self::Vote, - ) -> Self { + fn create_certificateure(signatures: AssembledSignature, vote: Self::Vote) -> Self { let leaf_commitment = match vote.clone() { QuorumVote::Yes(vote_internal) | QuorumVote::No(vote_internal) => { vote_internal.leaf_commitment @@ -266,10 +260,7 @@ impl type Vote = DAVote; type VoteAccumulator = DAVoteAccumulator, Self::Vote>; - fn from_signatures_and_commitment( - signatures: AssembledSignature, - vote: Self::Vote, - ) -> Self { + fn create_certificateure(signatures: AssembledSignature, vote: Self::Vote) -> Self { DACertificate { view_number: vote.get_view(), signatures, @@ -349,10 +340,7 @@ impl type VoteAccumulator = ViewSyncVoteAccumulator>, Self::Vote>; /// Build a QC from the threshold signature and commitment - fn from_signatures_and_commitment( - signatures: AssembledSignature, - vote: Self::Vote, - ) -> Self { + fn create_certificateure(signatures: AssembledSignature, vote: Self::Vote) -> Self { let certificate_internal = ViewSyncCertificateInternal { round: vote.get_view(), relay: vote.relay(), diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index db9c46f9ea..a5f98d47fc 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -173,10 +173,7 @@ where // TODO ED Rename this function and rework this function parameters // Assumes last vote was valid since it caused a QC to form. // Removes need for relay on other cert specific fields - fn from_signatures_and_commitment( - signatures: AssembledSignature, - vote: Self::Vote, - ) -> Self; + fn create_certificateure(signatures: AssembledSignature, vote: Self::Vote) -> Self; /// Get the view number. fn view_number(&self) -> TIME; @@ -465,7 +462,7 @@ pub trait ConsensusExchange: Send + Sync { Either::Left(accumulator) => Either::Left(accumulator), Either::Right(signatures) => { // TODO ED Update this function to just take in the signatures and most recent vote - Either::Right(Self::Certificate::from_signatures_and_commitment( + Either::Right(Self::Certificate::create_certificateure( signatures, vote.clone(), )) From 561109f83f362bad28059763672321edf6c356e5 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 21:50:02 -0400 Subject: [PATCH 0141/1393] Lint imports --- hotshot/examples/infra/modDA.rs | 5 ++--- hotshot/src/lib.rs | 15 ++++----------- hotshot/src/tasks/mod.rs | 10 ++++------ orchestrator/src/config.rs | 2 +- task-impls/src/consensus.rs | 15 ++++++--------- task-impls/src/da.rs | 9 +++------ task-impls/src/transactions.rs | 7 +++---- testing/src/spinning_task.rs | 4 +--- testing/src/test_runner.rs | 9 +++++---- testing/tests/timeout.rs | 2 +- types/src/certificate.rs | 12 ++++-------- types/src/consensus.rs | 8 +++++--- types/src/traits/election.rs | 3 +-- 13 files changed, 40 insertions(+), 61 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 95cffd94ed..e6081e2da1 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -20,7 +20,6 @@ use hotshot_orchestrator::{ config::{NetworkConfig, WebServerConfig}, }; use hotshot_task::task::FilterEvent; -use hotshot_types::HotShotConfig; use hotshot_types::{ certificate::ViewSyncCertificate, data::{QuorumProposal, SequencingLeaf, TestableLeaf}, @@ -37,6 +36,7 @@ use hotshot_types::{ }, state::{ConsensusTime, TestableBlock, TestableState}, }, + HotShotConfig, }; use libp2p_identity::{ ed25519::{self, SecretKey}, @@ -46,8 +46,7 @@ use libp2p_networking::{ network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}, reexport::Multiaddr, }; -use std::{collections::BTreeSet, sync::Arc}; -use std::{num::NonZeroUsize, str::FromStr}; +use std::{collections::BTreeSet, num::NonZeroUsize, str::FromStr, sync::Arc}; // use libp2p::{ // identity::{ // ed25519::{Keypair as EdKeypair, SecretKey}, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 19fa2d9cde..320dbd892f 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -54,8 +54,9 @@ use hotshot_task::{ task_launcher::TaskRunner, }; use hotshot_task_impls::{events::SequencingHotShotEvent, network::NetworkTaskKind}; -use hotshot_types::certificate::TimeoutCertificate; -use hotshot_types::traits::node_implementation::SequencingTimeoutEx; +use hotshot_types::{ + certificate::TimeoutCertificate, traits::node_implementation::SequencingTimeoutEx, +}; use hotshot_types::{ certificate::{DACertificate, ViewSyncCertificate}, @@ -244,16 +245,8 @@ impl> SystemContext { Ok(Self { inner }) } - /// "Starts" consensus by sending a `ViewChange` event + /// "Starts" consensus by sending a `QCFormed` event pub async fn start_consensus(&self) { - // self.inner - // .internal_event_stream - // .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new(1))) - // .await; - - // ED This isn't ideal... - // async_sleep(Duration::new(1, 0)).await; - self.inner .internal_event_stream .publish(SequencingHotShotEvent::QCFormed(either::Left( diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index a7aba740ab..0ebfb22535 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -26,19 +26,17 @@ use hotshot_task_impls::{ transactions::{TransactionTaskState, TransactionsTaskTypes}, view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; -use hotshot_types::certificate::TimeoutCertificate; -use hotshot_types::traits::network::ConsensusIntentEvent; -use hotshot_types::traits::node_implementation::SequencingTimeoutEx; use hotshot_types::{ - certificate::ViewSyncCertificate, + certificate::{TimeoutCertificate, ViewSyncCertificate}, data::{ProposalType, QuorumProposal, SequencingLeaf}, event::Event, message::{Message, Messages, SequencingMessage}, traits::{ election::{ConsensusExchange, Membership}, - network::{CommunicationChannel, TransmitType}, + network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{ - CommitteeEx, ExchangesType, NodeImplementation, NodeType, ViewSyncEx, + CommitteeEx, ExchangesType, NodeImplementation, NodeType, SequencingTimeoutEx, + ViewSyncEx, }, state::ConsensusTime, BlockPayload, diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 05a38b615c..c233ea33ee 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,6 +1,6 @@ use hotshot_types::{ExecutionType, HotShotConfig}; -use std::marker::PhantomData; use std::{ + marker::PhantomData, net::{IpAddr, Ipv4Addr, SocketAddr}, num::NonZeroUsize, time::Duration, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7c62af9500..9d6261e25e 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -15,28 +15,25 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::certificate::TimeoutCertificate; -use hotshot_types::traits::election::TimeoutExchangeType; -use hotshot_types::traits::node_implementation::SequencingTimeoutEx; -use hotshot_types::vote::QuorumVoteAccumulator; -use hotshot_types::vote::TimeoutVoteAccumulator; use hotshot_types::{ - certificate::{DACertificate, QuorumCertificate}, + certificate::{DACertificate, QuorumCertificate, TimeoutCertificate}, consensus::{Consensus, View}, data::{LeafType, ProposalType, QuorumProposal, SequencingLeaf}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, traits::{ consensus_api::SequencingConsensusApi, - election::{ConsensusExchange, QuorumExchangeType, SignedCertificate}, + election::{ConsensusExchange, QuorumExchangeType, SignedCertificate, TimeoutExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{CommitteeEx, NodeImplementation, NodeType, SequencingQuorumEx}, + node_implementation::{ + CommitteeEx, NodeImplementation, NodeType, SequencingQuorumEx, SequencingTimeoutEx, + }, signature_key::SignatureKey, state::ConsensusTime, BlockPayload, }, utils::{Terminator, ViewInner}, - vote::{QuorumVote, VoteType}, + vote::{QuorumVote, QuorumVoteAccumulator, TimeoutVoteAccumulator, VoteType}, }; use tracing::warn; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 96a4d18a48..2c80b9eaff 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -12,9 +12,6 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::traits::election::SignedCertificate; -use hotshot_types::vote::DAVoteAccumulator; -use hotshot_types::vote::VoteType; use hotshot_types::{ certificate::DACertificate, consensus::{Consensus, View}, @@ -22,7 +19,7 @@ use hotshot_types::{ message::{Message, Proposal, SequencingMessage}, traits::{ consensus_api::SequencingConsensusApi, - election::{CommitteeExchangeType, ConsensusExchange, Membership}, + election::{CommitteeExchangeType, ConsensusExchange, Membership, SignedCertificate}, network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{CommitteeEx, NodeImplementation, NodeType}, signature_key::SignatureKey, @@ -30,11 +27,11 @@ use hotshot_types::{ BlockPayload, }, utils::ViewInner, + vote::{DAVoteAccumulator, VoteType}, }; use snafu::Snafu; -use std::marker::PhantomData; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use tracing::{debug, error, instrument, warn}; #[derive(Snafu, Debug)] diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index b41604376d..e75011fb31 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -1,12 +1,11 @@ use crate::events::SequencingHotShotEvent; -use async_compatibility_layer::async_primitives::subscribable_rwlock::SubscribableRwLock; use async_compatibility_layer::{ - art::async_timeout, async_primitives::subscribable_rwlock::ReadView, + art::async_timeout, + async_primitives::subscribable_rwlock::{ReadView, SubscribableRwLock}, }; use async_lock::RwLock; use bincode::config::Options; -use commit::Commitment; -use commit::Committable; +use commit::{Commitment, Committable}; use either::{Either, Left, Right}; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index a9d9e5d586..1144dbd2d8 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -7,9 +7,7 @@ use std::{ use crate::{test_launcher::TaskGenerator, test_runner::Node, GlobalTestEvent}; use async_compatibility_layer::art::async_sleep; use futures::FutureExt; -use hotshot::traits::TestableNodeImplementation; -use hotshot::HotShotType; -use hotshot::SystemContext; +use hotshot::{traits::TestableNodeImplementation, HotShotType, SystemContext}; use hotshot_task::{ boxed_sync, event_stream::ChannelStream, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 056f0f8a26..036a88df32 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -3,8 +3,10 @@ use super::{ overall_safety_task::{OverallSafetyTask, RoundCtx}, txn_task::TxnTask, }; -use crate::spinning_task::UpDown; -use crate::test_launcher::{Networks, TestLauncher}; +use crate::{ + spinning_task::UpDown, + test_launcher::{Networks, TestLauncher}, +}; use hotshot::types::SystemContextHandle; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, HotShotType, SystemContext}; @@ -22,8 +24,7 @@ use hotshot_types::{ }, HotShotConfig, }; -use std::collections::HashMap; -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; #[allow(deprecated)] use tracing::info; diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 64ea239321..6988c78a28 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -9,10 +9,10 @@ async fn test_timeout() { use hotshot_testing::node_types::SequencingWebImpl; - use hotshot_testing::overall_safety_task::OverallSafetyPropertiesDescription; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, node_types::SequencingTestTypes, + overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, }; diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 7aaa28640c..b52720ad1a 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -1,19 +1,15 @@ //! Provides two types of cerrtificates and their accumulators. -use crate::vote::DAVoteAccumulator; -use crate::vote::QuorumVote; -use crate::vote::QuorumVoteAccumulator; -use crate::vote::TimeoutVote; -use crate::vote::TimeoutVoteAccumulator; -use crate::vote::ViewSyncVoteAccumulator; -use crate::vote::VoteType; use crate::{ data::{fake_commitment, serialize_signature, LeafType}, traits::{ election::SignedCertificate, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, }, - vote::{DAVote, ViewSyncData, ViewSyncVote}, + vote::{ + DAVote, DAVoteAccumulator, QuorumVote, QuorumVoteAccumulator, TimeoutVote, + TimeoutVoteAccumulator, ViewSyncData, ViewSyncVote, ViewSyncVoteAccumulator, VoteType, + }, }; use bincode::Options; use commit::{Commitment, Committable}; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index ce27d7a547..3a977b7461 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -1,9 +1,10 @@ //! Provides the core consensus types -pub use crate::traits::node_implementation::ViewQueue; -pub use crate::utils::{View, ViewInner}; +pub use crate::{ + traits::node_implementation::ViewQueue, + utils::{View, ViewInner}, +}; -use crate::utils::Terminator; use crate::{ certificate::QuorumCertificate, data::LeafType, @@ -12,6 +13,7 @@ use crate::{ metrics::{Counter, Gauge, Histogram, Metrics}, node_implementation::NodeType, }, + utils::Terminator, }; use commit::{Commitment, Committable}; use derivative::Derivative; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index a5f98d47fc..760ceeb1ef 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -21,7 +21,6 @@ use crate::{ vote::ViewSyncVoteInternal, }; -use crate::vote::Accumulator; use crate::{ data::LeafType, traits::{ @@ -30,7 +29,7 @@ use crate::{ signature_key::SignatureKey, state::ConsensusTime, }, - vote::{DAVote, QuorumVote, ViewSyncData, ViewSyncVote, VoteType, YesOrNoVote}, + vote::{Accumulator, DAVote, QuorumVote, ViewSyncData, ViewSyncVote, VoteType, YesOrNoVote}, }; use bincode::Options; use commit::{Commitment, Committable}; From 13de1bcf34c27adfb98f225f4df65ff689b1555c Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 22:02:40 -0400 Subject: [PATCH 0142/1393] Additional cleanup --- .../traits/networking/web_server_network.rs | 2 - hotshot/src/types/handle.rs | 2 +- task-impls/src/consensus.rs | 9 ++--- task-impls/src/network.rs | 25 +++++-------- testing/tests/view_sync_task.rs | 8 ---- types/src/certificate.rs | 25 ++----------- types/src/traits/election.rs | 7 +--- types/src/vote.rs | 37 ------------------- 8 files changed, 20 insertions(+), 95 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 14409fecb7..5a67880e9f 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -145,7 +145,6 @@ impl Inner { message_purpose: MessagePurpose, view_number: u64, ) -> Result<(), NetworkError> { - error!("Polling for view {}", view_number); let mut vote_index = 0; let mut tx_index = 0; @@ -205,7 +204,6 @@ impl Inner { error!("We should not receive transactions in this function"); } MessagePurpose::Proposal => { - error!("Received proposal"); // Only pushing the first proposal since we will soon only be allowing 1 proposal per view self.broadcast_poll_queue .write() diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 5b8e737c1a..3c2a618b58 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -191,7 +191,7 @@ impl + 'static> SystemContextHandl if anchor_leaf.view_number == TYPES::Time::genesis() { let leaf: I::Leaf = I::Leaf::from_stored_view(anchor_leaf); let mut qc = QuorumCertificate::::genesis(); - qc.set_leaf_commitment(leaf.commit()); + qc.leaf_commitment = leaf.commit(); let event = Event { view_number: TYPES::Time::genesis(), event: EventType::Decide { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9d6261e25e..1247d2ba8a 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -295,8 +295,10 @@ where error!("The next leader has received an unexpected vote!"); } }, + // TODO: Code below is redundant of code above; can be fixed + // during exchange refactor + // https://github.com/EspressoSystems/HotShot/issues/1799 SequencingHotShotEvent::TimeoutVoteRecv(vote) => { - error!("received timeout vote for view {}", *vote.get_view()); if state.timeout_accumulator.is_right() { return (None, state); } @@ -1044,8 +1046,6 @@ where } } SequencingHotShotEvent::TimeoutVoteRecv(vote) => { - // debug!("Received quroum vote: {:?}", vote.get_view()); - if !self.timeout_exchange.is_leader(vote.get_view() + 1) { error!( "We are not the leader for view {} are we the leader for view + 1? {}", @@ -1190,9 +1190,7 @@ where let view = cert.view_number; self.certs.insert(view, cert); - // TODO Make sure we aren't voting for an arbitrarily old round for no reason if self.vote_if_able().await { - // self.update_view(view + 1).await; self.current_proposal = None; } } @@ -1204,7 +1202,6 @@ where // TODO Make sure we aren't voting for an arbitrarily old round for no reason if self.vote_if_able().await { - // self.update_view(view + 1).await; self.current_proposal = None; } } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index c81463e5c7..e376d25945 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -90,7 +90,6 @@ impl< SequencingHotShotEvent::ViewSyncCertificateRecv(view_sync_message) } GeneralConsensusMessage::TimeoutVote(message) => { - // error!("Recv timeout vote in network task for view {:?}", message.get_view()); SequencingHotShotEvent::TimeoutVoteRecv(message) } GeneralConsensusMessage::InternalTrigger(_) => { @@ -107,7 +106,6 @@ impl< SequencingHotShotEvent::DAProposalRecv(proposal.clone(), sender) } CommitteeConsensusMessage::DAVote(vote) => { - // error!("DA Vote message recv {:?}", vote.current_view); SequencingHotShotEvent::DAVoteRecv(vote.clone()) } CommitteeConsensusMessage::DACertificate(cert) => { @@ -244,7 +242,7 @@ impl< CommitteeConsensusMessage::VidVote(vote.clone()), ))), TransmitType::Direct, - Some(membership.get_leader(vote.current_view)), // TODO who is VID leader? https://github.com/EspressoSystems/HotShot/issues/1699 + Some(membership.get_leader(vote.get_view())), // TODO who is VID leader? https://github.com/EspressoSystems/HotShot/issues/1699 ), SequencingHotShotEvent::DAVoteSend(vote) => ( vote.signature_key(), @@ -252,7 +250,7 @@ impl< CommitteeConsensusMessage::DAVote(vote.clone()), ))), TransmitType::Direct, - Some(membership.get_leader(vote.current_view)), + Some(membership.get_leader(vote.get_view())), ), SequencingHotShotEvent::VidCertSend(certificate, sender) => ( sender, @@ -290,17 +288,14 @@ impl< Some(membership.get_leader(vote.round() + vote.relay())), ) } - SequencingHotShotEvent::TimeoutVoteSend(vote) => { - // error!("Sending timeout vote to leader of view {}", *vote.get_view() + 1); - ( - vote.get_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::TimeoutVote(vote.clone()), - ))), - TransmitType::Direct, - Some(membership.get_leader(vote.get_view() + 1)), - ) - } + SequencingHotShotEvent::TimeoutVoteSend(vote) => ( + vote.get_key(), + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::TimeoutVote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.get_view() + 1)), + ), SequencingHotShotEvent::ViewChange(view) => { self.view = view; return None; diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 99255e8209..3bbcdafd70 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -74,15 +74,7 @@ async fn test_view_sync_task() { output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(3)), 1); output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(4)), 1); - // 2 `Timeout` events will trigger a replica task to handle a `ViewSyncTrigger` event, which - // will then publish a `ViewSyncVoteSend` event. output.insert(SequencingHotShotEvent::ViewSyncVoteSend(vote.clone()), 1); - // output.insert( - // SequencingHotShotEvent::ViewSyncTimeout(ViewNumber::new(5), 0, ViewSyncPhase::None), - // 1, - // ); - // Triggered by the `Timeout` events. - // output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(3)), 1); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(4)), 1); diff --git a/types/src/certificate.rs b/types/src/certificate.rs index b52720ad1a..6b31b68d1a 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -87,7 +87,7 @@ impl type VoteAccumulator = TimeoutVoteAccumulator, Self::Vote>; - fn create_certificateure(signatures: AssembledSignature, vote: Self::Vote) -> Self { + fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { TimeoutCertificate { view_number: vote.get_view(), signatures, @@ -106,10 +106,6 @@ impl self.view_number.commit() } - fn set_leaf_commitment(&mut self, _commitment: Commitment) { - unimplemented!() - } - fn is_genesis(&self) -> bool { false } @@ -182,7 +178,7 @@ impl> type Vote = QuorumVote; type VoteAccumulator = QuorumVoteAccumulator, Self::Vote>; - fn create_certificateure(signatures: AssembledSignature, vote: Self::Vote) -> Self { + fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { let leaf_commitment = match vote.clone() { QuorumVote::Yes(vote_internal) | QuorumVote::No(vote_internal) => { vote_internal.leaf_commitment @@ -210,10 +206,6 @@ impl> self.leaf_commitment } - fn set_leaf_commitment(&mut self, commitment: Commitment) { - self.leaf_commitment = commitment; - } - fn is_genesis(&self) -> bool { self.is_genesis } @@ -256,7 +248,7 @@ impl type Vote = DAVote; type VoteAccumulator = DAVoteAccumulator, Self::Vote>; - fn create_certificateure(signatures: AssembledSignature, vote: Self::Vote) -> Self { + fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { DACertificate { view_number: vote.get_view(), signatures, @@ -276,10 +268,6 @@ impl self.block_commitment } - fn set_leaf_commitment(&mut self, _commitment: Commitment) { - // This function is only useful for QC. Will be removed after we have separated cert traits. - } - fn is_genesis(&self) -> bool { // This function is only useful for QC. Will be removed after we have separated cert traits. false @@ -336,7 +324,7 @@ impl type VoteAccumulator = ViewSyncVoteAccumulator>, Self::Vote>; /// Build a QC from the threshold signature and commitment - fn create_certificateure(signatures: AssembledSignature, vote: Self::Vote) -> Self { + fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { let certificate_internal = ViewSyncCertificateInternal { round: vote.get_view(), relay: vote.relay(), @@ -382,11 +370,6 @@ impl todo!() } - /// Set the leaf commitment. - fn set_leaf_commitment(&mut self, _commitment: Commitment>) { - todo!() - } - /// Get whether the certificate is for the genesis block. fn is_genesis(&self) -> bool { todo!() diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 760ceeb1ef..45d171eb38 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -172,7 +172,7 @@ where // TODO ED Rename this function and rework this function parameters // Assumes last vote was valid since it caused a QC to form. // Removes need for relay on other cert specific fields - fn create_certificateure(signatures: AssembledSignature, vote: Self::Vote) -> Self; + fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self; /// Get the view number. fn view_number(&self) -> TIME; @@ -186,9 +186,6 @@ where /// Get the leaf commitment. fn leaf_commitment(&self) -> COMMITMENT; - /// Set the leaf commitment. - fn set_leaf_commitment(&mut self, commitment: COMMITMENT); - /// Get whether the certificate is for the genesis block. fn is_genesis(&self) -> bool; @@ -461,7 +458,7 @@ pub trait ConsensusExchange: Send + Sync { Either::Left(accumulator) => Either::Left(accumulator), Either::Right(signatures) => { // TODO ED Update this function to just take in the signatures and most recent vote - Either::Right(Self::Certificate::create_certificateure( + Either::Right(Self::Certificate::create_certificate( signatures, vote.clone(), )) diff --git a/types/src/vote.rs b/types/src/vote.rs index 33a0c9eccb..d6b823dcd5 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -114,41 +114,6 @@ impl VoteType> for TimeoutVote { -// /// The signature share associated with this vote -// pub signature: (EncodedPublicKey, EncodedSignature), -// /// The view this vote was cast for -// pub current_view: TYPES::Time, -// /// The vote token generated by this replica -// pub vote_token: TYPES::VoteTokenType, -// } - -// impl VoteType for TimeoutVote { -// fn get_view(&self) -> ::Time { -// self.current_view -// } - -// fn get_key(&self) -> ::SignatureKey { -// ::from_bytes(&self.signature.0).unwrap() -// } - -// fn get_signature(&self) -> EncodedSignature { -// self.signature.1.clone() -// } - -// fn get_data(&self) -> VoteData { -// VoteData::Timeout(self.get_view().commit()) -// } - -// fn get_vote_token(&self) -> ::VoteTokenType { -// self.vote_token.clone() -// } - -// } - /// The internals of a view sync vote #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] @@ -313,7 +278,6 @@ impl> QuorumVote /// Get the signature key. /// # Panics /// If the deserialization fails. - pub fn signature_key(&self) -> TYPES::SignatureKey { let encoded = match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.0.clone(), @@ -634,7 +598,6 @@ impl< self.signers.set(vote_node_id, true); self.sig_lists.push(original_signature); - // TODO ED Make all these get calls as local variables to avoid constantly calling them *total_stake_casted += u64::from(vote.get_vote_token().vote_count()); total_vote_map.insert( encoded_key.clone(), From 5137f72f2388a074c2ccbab0166f249b9b65b404 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 22:05:05 -0400 Subject: [PATCH 0143/1393] Additional cleanup --- hotshot/src/types/handle.rs | 42 ------------------------------------ types/src/traits/election.rs | 2 -- 2 files changed, 44 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 3c2a618b58..b61eda8e93 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -215,48 +215,6 @@ impl + 'static> SystemContextHandl self.maybe_do_genesis_init().await; } - /// iterate through all events on a [`NodeImplementation`] and determine if the node finished - /// successfully - /// # Errors - /// Errors if unable to obtain storage - /// # Panics - /// Panics if the event stream is shut down while this is running - // pub async fn collect_round_events( - // &mut self, - // ) -> Result< - // ( - // Vec<>::Leaf>, - // QuorumCertificate>::Leaf>, - // ), - // HotShotError, - // > { - // // TODO we should probably do a view check - // // but we can do that later. It's non-obvious how to get the view number out - // // to check against - // - // // drain all events from this node - // let mut results = Ok((vec![], QuorumCertificate::genesis())); - // loop { - // // unwrap is fine here since the thing hasn't been shut down - // let event = self.next_event().await.unwrap(); - // match event.event { - // EventType::ReplicaViewTimeout { view_number: time } => { - // error!(?event, "Replica timed out!"); - // results = Err(HotShotError::ViewTimeoutError { - // view_number: time, - // state: RoundTimedoutState::TestCollectRoundEventsTimedOut, - // }); - // } - // EventType::Decide { leaf_chain, qc } => { - // results = Ok((leaf_chain.to_vec(), (*qc).clone())); - // } - // EventType::ViewFinished { view_number: _ } => return results, - // event => { - // debug!("recv-ed event {:?}", event); - // } - // } - // } - // } /// Provides a reference to the underlying storage for this [`SystemContext`], allowing access to /// historical data diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 45d171eb38..9fef1bdc48 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -433,7 +433,6 @@ pub trait ConsensusExchange: Send + Sync { &vote.get_key(), &vote.get_signature(), &vote.get_data(), - // TODO ED We've had this comment for a while: Ignoring deserialization errors below since we are getting rid of it soon &Checked::Unchecked(vote.get_vote_token()), ) { error!("Invalid vote!"); @@ -457,7 +456,6 @@ pub trait ConsensusExchange: Send + Sync { ) { Either::Left(accumulator) => Either::Left(accumulator), Either::Right(signatures) => { - // TODO ED Update this function to just take in the signatures and most recent vote Either::Right(Self::Certificate::create_certificate( signatures, vote.clone(), From 16e53cda284aced2490b5d027be191c20803dc9f Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 21 Sep 2023 22:07:32 -0400 Subject: [PATCH 0144/1393] Additional cleanup --- hotshot/src/types/handle.rs | 1 - types/src/traits/election.rs | 10 ++++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index b61eda8e93..7a6a7b53f5 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -215,7 +215,6 @@ impl + 'static> SystemContextHandl self.maybe_do_genesis_init().await; } - /// Provides a reference to the underlying storage for this [`SystemContext`], allowing access to /// historical data pub fn storage(&self) -> &I::Storage { diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 9fef1bdc48..90562f2822 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -455,12 +455,10 @@ pub trait ConsensusExchange: Send + Sync { self.membership().get_committee_qc_stake_table(), ) { Either::Left(accumulator) => Either::Left(accumulator), - Either::Right(signatures) => { - Either::Right(Self::Certificate::create_certificate( - signatures, - vote.clone(), - )) - } + Either::Right(signatures) => Either::Right(Self::Certificate::create_certificate( + signatures, + vote.clone(), + )), } } From bf8a492f11c83f492cf6cd847d86e1daf2f385bf Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 21 Sep 2023 21:44:24 -0700 Subject: [PATCH 0145/1393] Add a logging to make test_success pass --- task-impls/src/transactions.rs | 20 ++++++++++++-------- types/src/block_impl.rs | 8 -------- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4aca5299f5..2e46ee8869 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -330,11 +330,14 @@ where loop { let all_txns = self.transactions.cloned().await; + tracing::error!("All txns {:?}", all_txns); debug!("Size of transactions: {}", all_txns.len()); - let unclaimed_txns: Vec<_> = all_txns - .iter() - .filter(|(txn_hash, _txn)| !previous_used_txns.contains(txn_hash)) - .collect(); + // TODO (Keyao) How to prevent duplicate txn now that we've removed the ID? + // let unclaimed_txns: Vec<_> = all_txns + // .iter() + // .filter(|(txn_hash, _txn)| !previous_used_txns.contains(txn_hash)) + // .collect(); + let unclaimed_txns = all_txns; let time_past = task_start_time.elapsed(); if unclaimed_txns.len() < self.api.min_transactions() @@ -363,11 +366,12 @@ where let txns: Vec = all_txns .iter() .filter_map(|(txn_hash, txn)| { - if previous_used_txns.contains(txn_hash) { - None - } else { + // TODO (Keyao) How to prevent duplicate txn now that we've removed the ID? + // if previous_used_txns.contains(txn_hash) { + // None + // } else { Some(txn.clone()) - } + // } }) .collect(); Some(txns) diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index bfe4ba6e59..4677cd5ab6 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -37,14 +37,6 @@ impl Transaction for VIDTransaction { } } -impl VIDTransaction { - /// create a new transaction - #[must_use] - pub fn new() -> Self { - Self(Vec::new()) - } -} - /// The error type for block payload. #[derive(Snafu, Debug)] pub enum BlockPayloadError { From 0238b1741dae82dbadbbdbf90678154647735c8f Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 22 Sep 2023 08:06:04 -0400 Subject: [PATCH 0146/1393] Fix lint bug --- task-impls/src/consensus.rs | 6 +++++- task-impls/src/da.rs | 1 - task-impls/src/network.rs | 4 ---- types/src/traits/election.rs | 5 ++++- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 1247d2ba8a..cf49cc15a4 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -673,9 +673,13 @@ where return; }; + if timeout_cert.view_number != view - 1 { + warn!("Timeout certificate for view {} was not for the immediately preceding view", *view); + } + if !self .timeout_exchange - .is_valid_cert(&timeout_cert.clone(), view.commit()) + .is_valid_cert(&timeout_cert.clone(), timeout_cert.view_number.commit()) { warn!("Timeout certificate for view {} was invalid", *view); return; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 2c80b9eaff..15fc9b1cfe 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -159,7 +159,6 @@ where &vote.block_commitment, ) { Left(new_accumulator) => { - error!("Not enough DA votes yet"); state.accumulator = either::Left(new_accumulator); } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index e376d25945..38b020e79a 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -99,10 +99,6 @@ impl< }, Either::Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(proposal) => { - error!( - "Received da proposal for view {:?}", - proposal.clone().data.view_number - ); SequencingHotShotEvent::DAProposalRecv(proposal.clone(), sender) } CommitteeConsensusMessage::DAVote(vote) => { diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 90562f2822..3c5cfc713e 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -329,7 +329,10 @@ pub trait ConsensusExchange: Send + Sync { let leaf_commitment = qc.leaf_commitment(); if leaf_commitment != commit { - error!("Leaf commitment does not equal parent commitment"); + error!( + "Leaf commitment does not equal parent commitment {:?}", + qc.signatures() + ); return false; } From 5914abc5b3d76e5e7690fa6c06daa8f7a26cdd5c Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 22 Sep 2023 08:07:02 -0400 Subject: [PATCH 0147/1393] Fix lint bug --- task-impls/src/consensus.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index cf49cc15a4..a5267325f4 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -675,6 +675,7 @@ where if timeout_cert.view_number != view - 1 { warn!("Timeout certificate for view {} was not for the immediately preceding view", *view); + return; } if !self From 9a40dca859c9cd34b18a3605970c0da1ba600b4e Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 22 Sep 2023 08:13:27 -0400 Subject: [PATCH 0148/1393] Fix fmt --- task-impls/src/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a5267325f4..7ea35fd1d8 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -675,7 +675,7 @@ where if timeout_cert.view_number != view - 1 { warn!("Timeout certificate for view {} was not for the immediately preceding view", *view); - return; + return; } if !self From 88426db77b6c3d50354e1cc3bb05fbae782d1c30 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 22 Sep 2023 08:44:32 -0400 Subject: [PATCH 0149/1393] Update view sync view update --- task-impls/src/view_sync.rs | 2 +- testing/tests/view_sync_task.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 86be8ba62e..bdbbb19a36 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -578,7 +578,7 @@ where }); } else { // If this is the first timeout we've seen advance to the next view - self.current_view = view_number + 1; + self.current_view = view_number; self.event_stream .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new( *self.current_view, diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 3bbcdafd70..d42a3aa2ab 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -75,8 +75,9 @@ async fn test_view_sync_task() { output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(4)), 1); output.insert(SequencingHotShotEvent::ViewSyncVoteSend(vote.clone()), 1); + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(3)), 1); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(4)), 1); + // output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(4)), 1); output.insert(SequencingHotShotEvent::Shutdown, 1); From d35a894771ca851b3030671131c6c0f2350e929e Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 22 Sep 2023 09:11:02 -0400 Subject: [PATCH 0150/1393] Add 1 thread to catchup just command --- testing/tests/view_sync_task.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index d42a3aa2ab..8116dcf2a2 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -77,7 +77,6 @@ async fn test_view_sync_task() { output.insert(SequencingHotShotEvent::ViewSyncVoteSend(vote.clone()), 1); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(3)), 1); - // output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(4)), 1); output.insert(SequencingHotShotEvent::Shutdown, 1); From faf59ee235eba2804e1d7fdd4dd92a8d61924fe0 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 22 Sep 2023 15:01:52 -0700 Subject: [PATCH 0151/1393] Add clone() to NetworkingMetrics --- hotshot/Cargo.toml | 1 + hotshot/src/traits/networking.rs | 2 +- hotshot/src/traits/networking/libp2p_network.rs | 9 ++++++--- types/Cargo.toml | 1 + types/src/traits/metrics.rs | 15 ++++++++++----- 5 files changed, 19 insertions(+), 9 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 671519ec84..34f9a9b63e 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -112,6 +112,7 @@ sha3 = "^0.10" snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } +dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.14" } tracing = { workspace = true } typenum = { workspace = true } diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index e48e71d29b..017fae77ec 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -14,10 +14,10 @@ pub use hotshot_types::traits::network::{ ChannelSendSnafu, CouldNotDeliverSnafu, FailedToDeserializeSnafu, FailedToSerializeSnafu, NetworkError, NetworkReliability, NoSuchNodeSnafu, ShutDownSnafu, }; - use hotshot_types::traits::metrics::{Counter, Gauge, Metrics}; /// Contains the metrics that we're interested in from the networking interfaces +#[derive(Clone)] pub struct NetworkingMetrics { #[allow(dead_code)] /// A [`Gauge`] which tracks how many peers are connected diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index da8784e0a7..1cea7633c1 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -1,7 +1,6 @@ //! Libp2p based/production networking implementation //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network - use super::NetworkingMetrics; use crate::NodeImplementation; use async_compatibility_layer::{ @@ -320,7 +319,7 @@ impl Libp2pNetwork { let (node_lookup_send, node_lookup_recv) = unbounded(); let (cache_gc_shutdown_send, cache_gc_shutdown_recv) = unbounded::<()>(); - let result = Libp2pNetwork { + let mut result = Libp2pNetwork { inner: Arc::new(Libp2pNetworkInner { handle: network_handle, broadcast_recv, @@ -404,13 +403,14 @@ impl Libp2pNetwork { } /// Initiates connection to the outside world - fn spawn_connect(&self, id: usize) { + fn spawn_connect(&mut self, id: usize) { let pk = self.inner.pk.clone(); let bootstrap_ref = self.inner.bootstrap_addrs.clone(); let num_bootstrap = self.inner.bootstrap_addrs_len; let handle = self.inner.handle.clone(); let is_bootstrapped = self.inner.is_bootstrapped.clone(); let node_type = self.inner.handle.config().node_type; + let metrics_connected_peers = self.inner.metrics.connected_peers.clone(); async_spawn({ let is_ready = self.inner.is_ready.clone(); async move { @@ -439,6 +439,9 @@ impl Libp2pNetwork { .await .unwrap(); + let connected_num = handle.num_connected().await?; + metrics_connected_peers.set(connected_num); + while !is_bootstrapped.load(Ordering::Relaxed) { async_sleep(Duration::from_secs(1)).await; } diff --git a/types/Cargo.toml b/types/Cargo.toml index 742882cce2..9f65d06657 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -49,6 +49,7 @@ time = { workspace = true } tracing = { workspace = true } ethereum-types = { workspace = true } typenum = { workspace = true } +dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.14" } [dev-dependencies] serde_json = "1.0.107" diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs index 4d73e5f67a..78aaef7817 100644 --- a/types/src/traits/metrics.rs +++ b/types/src/traits/metrics.rs @@ -7,6 +7,7 @@ //! - [`Label`]: Stores the last string (example usage: current version, network online/offline) use std::fmt::Debug; +use dyn_clone::DynClone; /// The metrics type. pub trait Metrics: Send + Sync { @@ -78,12 +79,12 @@ impl Label for NoMetrics { } /// An ever-incrementing counter -pub trait Counter: Send + Sync + Debug { +pub trait Counter: Send + Sync + Debug + DynClone { /// Add a value to the counter fn add(&self, amount: usize); } /// A gauge that stores the latest value. -pub trait Gauge: Send + Sync + Debug { +pub trait Gauge: Send + Sync + Debug + DynClone { /// Set the gauge value fn set(&self, amount: usize); @@ -92,16 +93,20 @@ pub trait Gauge: Send + Sync + Debug { } /// A histogram which will record a series of points. -pub trait Histogram: Send + Sync + Debug { +pub trait Histogram: Send + Sync + Debug + DynClone { /// Add a point to this histogram. fn add_point(&self, point: f64); } /// A label that stores the last string value. -pub trait Label: Send + Sync { +pub trait Label: Send + Sync + DynClone { /// Set the label value fn set(&self, value: String); } +dyn_clone::clone_trait_object!(Gauge); +dyn_clone::clone_trait_object!(Counter); +dyn_clone::clone_trait_object!(Histogram); +dyn_clone::clone_trait_object!(Label); #[cfg(test)] mod test { @@ -111,7 +116,7 @@ mod test { sync::{Arc, Mutex}, }; - #[derive(Debug)] + #[derive(Debug, Clone)] struct TestMetrics { prefix: String, values: Arc>, From 2efbcbcd02b7d1b58b15a918bb54354439e89aa3 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 22 Sep 2023 16:14:15 -0700 Subject: [PATCH 0152/1393] Make tests pass --- orchestrator/default-libp2p-run-config.toml | 2 +- orchestrator/default-run-config.toml | 2 +- orchestrator/default-web-server-run-config.toml | 2 +- task-impls/src/da.rs | 10 ++++------ task-impls/src/transactions.rs | 7 +------ testing/src/test_builder.rs | 6 +++--- testing/tests/da_task.rs | 4 ---- 7 files changed, 11 insertions(+), 22 deletions(-) diff --git a/orchestrator/default-libp2p-run-config.toml b/orchestrator/default-libp2p-run-config.toml index 5757f4d9f9..220935614e 100644 --- a/orchestrator/default-libp2p-run-config.toml +++ b/orchestrator/default-libp2p-run-config.toml @@ -75,5 +75,5 @@ secs = 0 nanos = 0 [config.propose_max_round_time] -secs = 1 +secs = 2 nanos = 0 diff --git a/orchestrator/default-run-config.toml b/orchestrator/default-run-config.toml index fe34d75811..b8a4b19478 100644 --- a/orchestrator/default-run-config.toml +++ b/orchestrator/default-run-config.toml @@ -56,7 +56,7 @@ secs = 0 nanos = 0 [config.propose_max_round_time] -secs = 1 +secs = 2 nanos = 0 [web_server_config] diff --git a/orchestrator/default-web-server-run-config.toml b/orchestrator/default-web-server-run-config.toml index 0ea0f86ccf..28229f4d76 100644 --- a/orchestrator/default-web-server-run-config.toml +++ b/orchestrator/default-web-server-run-config.toml @@ -57,7 +57,7 @@ secs = 0 nanos = 0 [config.propose_max_round_time] -secs = 1 +secs = 2 nanos = 0 [web_server_config] diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 5c4fb0fea3..6b3ba021a5 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -643,12 +643,10 @@ where // Brodcast DA proposal // TODO ED We should send an event to do this, but just getting it to work for now - // if let Err(e) = self.api.send_da_broadcast(message.clone()).await { - // consensus.metrics.failed_to_send_messages.add(1); - // warn!(?message, ?e, "Could not broadcast leader proposal"); - // } else { - // consensus.metrics.outgoing_broadcast_messages.add(1); - // } + self.event_stream + .publish(SequencingHotShotEvent::SendDABlockData(block.clone())) + .await; + self.event_stream .publish(SequencingHotShotEvent::DAProposalSend( message.clone(), diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 2e46ee8869..08854a98bb 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -262,10 +262,6 @@ where .publish(SequencingHotShotEvent::BlockReady(block.clone(), view + 1)) .await; - self.event_stream - .publish(SequencingHotShotEvent::SendDABlockData(block.clone())) - .await; - self.event_stream .publish(SequencingHotShotEvent::VidDisperseSend( Proposal { @@ -330,7 +326,6 @@ where loop { let all_txns = self.transactions.cloned().await; - tracing::error!("All txns {:?}", all_txns); debug!("Size of transactions: {}", all_txns.len()); // TODO (Keyao) How to prevent duplicate txn now that we've removed the ID? // let unclaimed_txns: Vec<_> = all_txns @@ -370,7 +365,7 @@ where // if previous_used_txns.contains(txn_hash) { // None // } else { - Some(txn.clone()) + Some(txn.clone()) // } }) .collect(); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 326debb69b..3515f04368 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -124,7 +124,7 @@ impl TestMetadata { } } - /// Default setting with 20 nodes and 10 views of successful views. + /// Default setting with 20 nodes and 8 views of successful views. pub fn default_more_nodes_less_success() -> TestMetadata { TestMetadata { total_nodes: 20, @@ -139,11 +139,11 @@ impl TestMetadata { completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // Increase the duration to get the expected number of successful views. - duration: Duration::new(40, 0), + duration: Duration::new(200, 0), }, ), overall_safety_properties: OverallSafetyPropertiesDescription { - num_successful_views: 10, + num_successful_views: 8, ..Default::default() }, ..TestMetadata::default() diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 7c8b389d95..f1b1744884 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -103,10 +103,6 @@ async fn test_da_task() { let da_vote = committee_exchange.create_da_message(block.commit(), ViewNumber::new(2), vote_token); output.insert(SequencingHotShotEvent::DAVoteSend(da_vote), 1); - output.insert( - SequencingHotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), - 1, - ); let vote_token = committee_exchange .make_vote_token(ViewNumber::new(2)) From 62047c5c81c5efc5049e685f8add760cb050ddf9 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 22 Sep 2023 16:48:54 -0700 Subject: [PATCH 0153/1393] fix lint --- hotshot/src/traits/networking.rs | 3 +-- hotshot/src/traits/networking/libp2p_network.rs | 1 - types/src/traits/metrics.rs | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index 017fae77ec..e1ac026546 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -9,12 +9,11 @@ pub mod libp2p_network; pub mod memory_network; pub mod web_server_libp2p_fallback; pub mod web_server_network; - +use hotshot_types::traits::metrics::{Counter, Gauge, Metrics}; pub use hotshot_types::traits::network::{ ChannelSendSnafu, CouldNotDeliverSnafu, FailedToDeserializeSnafu, FailedToSerializeSnafu, NetworkError, NetworkReliability, NoSuchNodeSnafu, ShutDownSnafu, }; -use hotshot_types::traits::metrics::{Counter, Gauge, Metrics}; /// Contains the metrics that we're interested in from the networking interfaces #[derive(Clone)] diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 1cea7633c1..ac949a331f 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -441,7 +441,6 @@ impl Libp2pNetwork { let connected_num = handle.num_connected().await?; metrics_connected_peers.set(connected_num); - while !is_bootstrapped.load(Ordering::Relaxed) { async_sleep(Duration::from_secs(1)).await; } diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs index 78aaef7817..e326fca9bc 100644 --- a/types/src/traits/metrics.rs +++ b/types/src/traits/metrics.rs @@ -6,8 +6,8 @@ //! - [`Histogram`]: stores multiple float values based for a graph (example usage: CPU %) //! - [`Label`]: Stores the last string (example usage: current version, network online/offline) -use std::fmt::Debug; use dyn_clone::DynClone; +use std::fmt::Debug; /// The metrics type. pub trait Metrics: Send + Sync { From 7988fc3f9299c2ad8c8be0e5adf532cbbd9da4c7 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 25 Sep 2023 12:44:16 -0700 Subject: [PATCH 0154/1393] Add gh issue and cleanup --- hotshot/src/tasks/mod.rs | 5 ++-- task-impls/src/transactions.rs | 49 +++++++++++++++++----------------- types/src/block_impl.rs | 31 ++++++++++++--------- 3 files changed, 45 insertions(+), 40 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index bed3b3b344..fafc703075 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -38,7 +38,7 @@ use hotshot_types::{ node_implementation::{ CommitteeEx, ExchangesType, NodeImplementation, NodeType, ViewSyncEx, }, - state::{ConsensusTime, TestableBlock}, + state::ConsensusTime, }, vote::{ViewSyncData, VoteType}, }; @@ -289,8 +289,7 @@ where consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), - // TODO (Keyao) Shouldn't use test function. - block: ::genesis(), + block: VIDBlockPayload::genesis(), quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), api: c_api.clone(), committee_exchange: c_api.inner.exchanges.committee_exchange().clone().into(), diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 08854a98bb..b986cc931f 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -7,7 +7,7 @@ use async_lock::RwLock; use bincode::config::Options; use commit::Commitment; use commit::Committable; -use either::{Either, Left, Right}; +use either::{Left, Right}; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, @@ -239,14 +239,12 @@ where debug!("Prepare VID shares"); if !txns.is_empty() { - /// TODO https://github.com/EspressoSystems/HotShot/issues/1693 + // TODO https://github.com/EspressoSystems/HotShot/issues/1693 const NUM_STORAGE_NODES: usize = 10; - /// TODO https://github.com/EspressoSystems/HotShot/issues/1693 + // TODO https://github.com/EspressoSystems/HotShot/issues/1693 const NUM_CHUNKS: usize = 5; - // TODO https://github.com/EspressoSystems/HotShot/issues/1686 let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); // TODO https://github.com/EspressoSystems/jellyfish/issues/375 let mut txns_flatten = Vec::new(); @@ -256,8 +254,6 @@ where let vid_disperse = vid.disperse(&txns_flatten).unwrap(); let block = VIDBlockPayload::new(txns, vid_disperse.commit); - // TODO (Keyao) Is the order of the following events and events in the original - // DA task correct? self.event_stream .publish(SequencingHotShotEvent::BlockReady(block.clone(), view + 1)) .await; @@ -312,22 +308,25 @@ where #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] async fn wait_for_transactions( &self, - parent_leaf: SequencingLeaf, + _parent_leaf: SequencingLeaf, ) -> Option> { let task_start_time = Instant::now(); + // TODO (Keyao) Investigate the use of transaction hash + // // let parent_leaf = self.parent_leaf().await?; - let previous_used_txns = match parent_leaf.deltas { - Either::Left(block) => block.contained_transactions(), - Either::Right(_commitment) => HashSet::new(), - }; + // let previous_used_txns = match parent_leaf.deltas { + // Either::Left(block) => block.contained_transactions(), + // Either::Right(_commitment) => HashSet::new(), + // }; let receiver = self.transactions.subscribe().await; loop { let all_txns = self.transactions.cloned().await; debug!("Size of transactions: {}", all_txns.len()); - // TODO (Keyao) How to prevent duplicate txn now that we've removed the ID? + // TODO (Keyao) Investigate the use of transaction hash + // // let unclaimed_txns: Vec<_> = all_txns // .iter() // .filter(|(txn_hash, _txn)| !previous_used_txns.contains(txn_hash)) @@ -358,17 +357,19 @@ where break; } let all_txns = self.transactions.cloned().await; - let txns: Vec = all_txns - .iter() - .filter_map(|(txn_hash, txn)| { - // TODO (Keyao) How to prevent duplicate txn now that we've removed the ID? - // if previous_used_txns.contains(txn_hash) { - // None - // } else { - Some(txn.clone()) - // } - }) - .collect(); + // TODO (Keyao) Investigate the use of transaction hash + // + let txns: Vec = all_txns.values().cloned().collect(); + // let txns: Vec = all_txns + // .iter() + // .filter_map(|(txn_hash, txn)| { + // if previous_used_txns.contains(txn_hash) { + // None + // } else { + // Some(txn.clone()) + // } + // }) + // .collect(); Some(txns) } diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 4677cd5ab6..634adc2d4c 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -73,6 +73,23 @@ impl VIDBlockPayload { commitment, } } + + /// Create a genesis block payload with transaction bytes `vec![0]`. + /// # Panics + /// If the `VidScheme` construction fails. + #[must_use] + pub fn genesis() -> Self { + // TODO + const NUM_STORAGE_NODES: usize = 10; + // TODO + const NUM_CHUNKS: usize = 5; + // TODO + let srs = test_srs(NUM_STORAGE_NODES); + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + let txn = vec![0]; + let vid_disperse = vid.disperse(&txn).unwrap(); + VIDBlockPayload::new(vec![VIDTransaction(txn)], vid_disperse.commit) + } } impl Committable for VIDBlockPayload { @@ -94,19 +111,7 @@ impl Display for VIDBlockPayload { impl TestableBlock for VIDBlockPayload { fn genesis() -> Self { - /// TODO - const NUM_STORAGE_NODES: usize = 10; - /// TODO - const NUM_CHUNKS: usize = 5; - - // TODO - let srs = test_srs(NUM_STORAGE_NODES); - - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - - let txn = vec![0u8]; - let vid_disperse = vid.disperse(&txn).unwrap(); - VIDBlockPayload::new(vec![VIDTransaction(txn)], vid_disperse.commit) + Self::genesis() } fn txn_count(&self) -> u64 { From e92f3199135207d39446f73830d220ee9e2ed685 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 25 Sep 2023 13:21:20 -0700 Subject: [PATCH 0155/1393] Add another issue and fix lint --- orchestrator/default-libp2p-run-config.toml | 4 ++++ task-impls/src/transactions.rs | 6 +----- testing/src/task_helpers.rs | 4 +--- types/src/block_impl.rs | 11 +++++++---- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/orchestrator/default-libp2p-run-config.toml b/orchestrator/default-libp2p-run-config.toml index 220935614e..9fb6cb6a17 100644 --- a/orchestrator/default-libp2p-run-config.toml +++ b/orchestrator/default-libp2p-run-config.toml @@ -51,6 +51,8 @@ mesh_outbound_min = 2 mesh_n = 4 next_view_timeout = 10 propose_min_round_time = 0 +# TODO Update libp2p config to use consistent propose_max_round_time. +# propose_max_round_time = 10 online_time = 10 num_txn_per_round = 10 @@ -75,5 +77,7 @@ secs = 0 nanos = 0 [config.propose_max_round_time] +# TODO Update libp2p config to use consistent propose_max_round_time. +# secs = 2 nanos = 0 diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index b986cc931f..11475fdbcf 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -15,7 +15,7 @@ use hotshot_task::{ task_impls::HSTWithEvent, }; use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction}, + block_impl::{VIDBlockPayload, VIDTransaction, NUM_CHUNKS, NUM_STORAGE_NODES}, certificate::DACertificate, consensus::Consensus, data::{SequencingLeaf, VidDisperse, VidScheme, VidSchemeTrait}, @@ -239,10 +239,6 @@ where debug!("Prepare VID shares"); if !txns.is_empty() { - // TODO https://github.com/EspressoSystems/HotShot/issues/1693 - const NUM_STORAGE_NODES: usize = 10; - // TODO https://github.com/EspressoSystems/HotShot/issues/1693 - const NUM_CHUNKS: usize = 5; // TODO https://github.com/EspressoSystems/HotShot/issues/1686 let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 1a8123a35b..f4e7fe49a8 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -13,7 +13,7 @@ use hotshot::{ use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::SequencingHotShotEvent; use hotshot_types::{ - block_impl::VIDBlockPayload, + block_impl::{VIDBlockPayload, NUM_CHUNKS, NUM_STORAGE_NODES}, data::{QuorumProposal, SequencingLeaf, VidScheme, ViewNumber}, message::{Message, Proposal}, traits::{ @@ -165,8 +165,6 @@ pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey } pub fn vid_init() -> VidScheme { - const NUM_STORAGE_NODES: usize = 10; - const NUM_CHUNKS: usize = 5; let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap() } diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 634adc2d4c..03a9ca3737 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -13,6 +13,13 @@ use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; use snafu::Snafu; +// TODO +/// Number of storage nodes for VID initiation. +pub const NUM_STORAGE_NODES: usize = 10; +// TODO +/// Number of chunks for VID initiation. +pub const NUM_CHUNKS: usize = 5; + /// The transaction in a [`VIDBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct VIDTransaction(pub Vec); @@ -79,10 +86,6 @@ impl VIDBlockPayload { /// If the `VidScheme` construction fails. #[must_use] pub fn genesis() -> Self { - // TODO - const NUM_STORAGE_NODES: usize = 10; - // TODO - const NUM_CHUNKS: usize = 5; // TODO let srs = test_srs(NUM_STORAGE_NODES); let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); From d2ced6eb7d5e6fd3dcde14e1e1a38466f4d6337c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 25 Sep 2023 15:07:57 -0700 Subject: [PATCH 0156/1393] add Debug to NetworkingMetrics --- hotshot/src/traits/networking.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index e1ac026546..3f197d6f94 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -9,6 +9,7 @@ pub mod libp2p_network; pub mod memory_network; pub mod web_server_libp2p_fallback; pub mod web_server_network; +use custom_debug::Debug; use hotshot_types::traits::metrics::{Counter, Gauge, Metrics}; pub use hotshot_types::traits::network::{ ChannelSendSnafu, CouldNotDeliverSnafu, FailedToDeserializeSnafu, FailedToSerializeSnafu, @@ -16,7 +17,7 @@ pub use hotshot_types::traits::network::{ }; /// Contains the metrics that we're interested in from the networking interfaces -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct NetworkingMetrics { #[allow(dead_code)] /// A [`Gauge`] which tracks how many peers are connected From 767abe181b49bee966e449587dd0914205a819e6 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Mon, 25 Sep 2023 18:30:29 -0400 Subject: [PATCH 0157/1393] chore: Remove Committable for more types, use CommitmentBounds (#1812) * remove Committable from VoteMetaData type param * remove Committable from VoteToken * remove Committable from ConsensusExchange::Commitment assoc type (whoa that's big) * update commit package (for Send, Sync on Commitment) * WIP remove Committable from QuorumCertificate. there's a circular dependency QuorumCertificate -> SignedCertificate::Vote -> QuorumVote -> YesOrNoVote -> QuorumCertificate so remove Committable from all those, too. need an alternative to fake_commitment in order to proceed * use latest 'commit' for default_commitment_no_preimage() * lint, remove unneeded pub * fix merge bug, tests pass locally with lots of error logs * lint * use CommitmentBounds everywhere, fix ensuing build breaks * cargo fmt why didn't it run automatically for me? * lint again grrrrrrrrr --- hotshot/src/demo.rs | 2 +- hotshot/src/lib.rs | 12 ++-- hotshot/src/tasks/mod.rs | 23 ++++--- hotshot/src/types/handle.rs | 8 +-- task-impls/src/consensus.rs | 44 +++++++------- task-impls/src/da.rs | 12 ++-- task-impls/src/events.rs | 10 +-- task-impls/src/transactions.rs | 9 ++- task-impls/src/view_sync.rs | 14 ++--- testing/src/overall_safety_task.rs | 8 ++- types/src/certificate.rs | 46 +++++++------- types/src/consensus.rs | 2 +- types/src/data.rs | 29 +++++---- types/src/event.rs | 3 +- types/src/message.rs | 5 +- types/src/traits/consensus_api.rs | 4 +- types/src/traits/election.rs | 98 ++++++++++++++---------------- types/src/traits/storage.rs | 4 +- types/src/vote.rs | 89 +++++++++++---------------- 19 files changed, 200 insertions(+), 222 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index feaed2fe6b..afe0a38bb3 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -179,7 +179,7 @@ where /// Provides a random [`QuorumCertificate`] pub fn random_quorum_certificate>( rng: &mut dyn rand::RngCore, -) -> QuorumCertificate { +) -> QuorumCertificate> { QuorumCertificate { // block_commitment: random_commitment(rng), leaf_commitment: random_commitment(rng), diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 0f317c9410..0b9e78f2ee 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -47,7 +47,7 @@ use async_compatibility_layer::{ }; use async_lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; use async_trait::async_trait; -use commit::Committable; +use commit::{Commitment, Committable}; use custom_debug::Debug; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, @@ -647,8 +647,8 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, Membership = MEMBERSHIP, > + 'static, CommitteeEx: ConsensusExchange< @@ -656,7 +656,7 @@ where Message, Proposal = DAProposal, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, ViewSyncEx: ConsensusExchange< @@ -664,7 +664,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, Membership = MEMBERSHIP, > + 'static, { @@ -1059,7 +1059,7 @@ impl> HotShotInitializer::genesis(); + let justify_qc = QuorumCertificate::>::genesis(); Ok(Self { inner: LEAF::new(time, justify_qc, genesis_block, state), diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 6be415cd66..843309aef0 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -5,7 +5,7 @@ use crate::{ QuorumCertificate, SequencingQuorumEx, }; use async_compatibility_layer::art::async_sleep; -use commit::{Commitment, Committable}; +use commit::{Commitment, CommitmentBounds}; use futures::FutureExt; use hotshot_task::{ boxed_sync, @@ -42,7 +42,6 @@ use hotshot_types::{ }, vote::{ViewSyncData, VoteType}, }; -use serde::Serialize; use std::{ collections::{HashMap, HashSet}, marker::PhantomData, @@ -69,9 +68,9 @@ pub async fn add_network_message_task< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, - COMMITTABLE: Committable + Serialize + Clone, + COMMITMENT: CommitmentBounds, PROPOSAL: ProposalType, - VOTE: VoteType>, + VOTE: VoteType, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange< TYPES, @@ -179,9 +178,9 @@ pub async fn add_network_event_task< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, - COMMITTABLE: Committable + Serialize + Clone, + COMMITMENT: CommitmentBounds, PROPOSAL: ProposalType, - VOTE: VoteType>, + VOTE: VoteType, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange< TYPES, @@ -268,14 +267,14 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { let consensus = handle.hotshot.get_consensus(); @@ -365,7 +364,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { // build the da task @@ -434,7 +433,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { // build the transactions task @@ -508,7 +507,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { let api = HotShotSequencingConsensusApi { diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 5b8e737c1a..35e79a78dd 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -190,7 +190,7 @@ impl + 'static> SystemContextHandl if let Ok(anchor_leaf) = self.storage().get_anchored_view().await { if anchor_leaf.view_number == TYPES::Time::genesis() { let leaf: I::Leaf = I::Leaf::from_stored_view(anchor_leaf); - let mut qc = QuorumCertificate::::genesis(); + let mut qc = QuorumCertificate::>::genesis(); qc.set_leaf_commitment(leaf.commit()); let event = Event { view_number: TYPES::Time::genesis(), @@ -226,7 +226,7 @@ impl + 'static> SystemContextHandl // ) -> Result< // ( // Vec<>::Leaf>, - // QuorumCertificate>::Leaf>, + // QuorumCertificate>::Leaf>>, // ), // HotShotError, // > { @@ -333,7 +333,7 @@ impl + 'static> SystemContextHandl #[cfg(feature = "hotshot-testing")] pub fn create_yes_message( &self, - justify_qc_commitment: Commitment>, + justify_qc_commitment: Commitment>>, leaf_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, @@ -342,7 +342,7 @@ impl + 'static> SystemContextHandl QuorumEx: ConsensusExchange< TYPES, Message, - Certificate = QuorumCertificate, + Certificate = QuorumCertificate>, >, { let inner = self.hotshot.inner.clone(); diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ef4694a282..f83c309b14 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -64,14 +64,14 @@ pub struct SequencingConsensusTaskState< TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { /// The global task registry @@ -126,7 +126,7 @@ pub struct SequencingConsensusTaskState< pub id: u64, /// The most Recent QC we've formed from votes, if we've formed it. - pub qc: Option>, + pub qc: Option>>, } /// State for the vote collection task. This handles the building of a QC from a votes received @@ -138,8 +138,8 @@ pub struct VoteCollectionTaskState< TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, { /// the quorum exchange @@ -147,13 +147,13 @@ pub struct VoteCollectionTaskState< #[allow(clippy::type_complexity)] /// Accumulator for votes pub accumulator: Either< - > as SignedCertificate< + >> as SignedCertificate< TYPES, TYPES::Time, TYPES::VoteTokenType, Commitment>, >>::VoteAccumulator, - QuorumCertificate>, + QuorumCertificate>>, >, /// View which this vote collection task is collecting votes in pub cur_view: TYPES::Time, @@ -170,8 +170,8 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, { } @@ -190,8 +190,8 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, { match event { @@ -272,14 +272,14 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus genesis leaf", level = "error")] @@ -1087,7 +1087,7 @@ where /// Sends a proposal if possible from the high qc we have pub async fn publish_proposal_if_able( &self, - _qc: QuorumCertificate, + _qc: QuorumCertificate>, view: TYPES::Time, ) -> bool { if !self.quorum_exchange.is_leader(view) { @@ -1213,14 +1213,14 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { } @@ -1262,14 +1262,14 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { if let SequencingHotShotEvent::Shutdown = event { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 9b1657324f..87a7b685a4 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -55,7 +55,7 @@ pub struct DATaskState< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { /// The state's api @@ -91,7 +91,7 @@ pub struct DAVoteCollectionTaskState< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { /// the committee exchange @@ -122,7 +122,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { } @@ -140,7 +140,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { match event { @@ -252,7 +252,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { /// main task event handler @@ -741,7 +741,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index de8e31fd03..f9fc7f8ba3 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,3 +1,5 @@ +use crate::view_sync::ViewSyncPhase; +use commit::Commitment; use hotshot_types::{ certificate::{DACertificate, QuorumCertificate}, data::{DAProposal, VidDisperse}, @@ -8,8 +10,6 @@ use hotshot_types::{ vote::{DAVote, QuorumVote, ViewSyncVote}, }; -use crate::view_sync::ViewSyncPhase; - /// All of the possible events that can be passed between Sequecning `HotShot` tasks #[derive(Eq, Hash, PartialEq, Debug, Clone)] pub enum SequencingHotShotEvent> { @@ -18,7 +18,7 @@ pub enum SequencingHotShotEvent> { /// A quorum proposal has been received from the network; handled by the consensus task QuorumProposalRecv(Proposal>, TYPES::SignatureKey), /// A quorum vote has been received from the network; handled by the consensus task - QuorumVoteRecv(QuorumVote), + QuorumVoteRecv(QuorumVote>), /// A DA proposal has been received from the network; handled by the DA task DAProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task @@ -28,13 +28,13 @@ pub enum SequencingHotShotEvent> { /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal - QuorumVoteSend(QuorumVote), + QuorumVoteSend(QuorumVote>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DAProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal DAVoteSend(DAVote), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only - QCFormed(QuorumCertificate), + QCFormed(QuorumCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DACSend(DACertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index b41604376d..376cbbc661 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -5,8 +5,7 @@ use async_compatibility_layer::{ }; use async_lock::RwLock; use bincode::config::Options; -use commit::Commitment; -use commit::Committable; +use commit::{Commitment, Committable}; use either::{Either, Left, Right}; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, @@ -56,7 +55,7 @@ pub struct TransactionTaskState< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { /// The state's api @@ -100,7 +99,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { /// main task event handler @@ -342,7 +341,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index eaac060975..30f3df83b6 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -75,7 +75,7 @@ pub struct ViewSyncTaskState< Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { /// Registry to register sub tasks @@ -124,7 +124,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { } @@ -152,7 +152,7 @@ pub struct ViewSyncReplicaTaskState< Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { /// Timeout for view sync rounds @@ -195,7 +195,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { } @@ -270,7 +270,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] @@ -620,7 +620,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Replica Task", level = "error")] @@ -951,7 +951,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { /// Handles incoming events for the view sync relay task diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 4bda48adcc..b2a9a88451 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -90,9 +90,13 @@ impl> TS for OverallSafety pub struct RoundResult> { /// Transactions that were submitted // pub txns: Vec, + /// Nodes that committed this round /// id -> (leaf, qc) - pub success_nodes: HashMap, QuorumCertificate)>, + // TODO GG: isn't it infeasible to store a Vec? + #[allow(clippy::type_complexity)] + success_nodes: HashMap, QuorumCertificate>)>, + /// Nodes that failed to commit this round pub failed_nodes: HashMap>>>, @@ -185,7 +189,7 @@ impl> RoundResult pub fn insert_into_result( &mut self, idx: usize, - result: (Vec, QuorumCertificate), + result: (Vec, QuorumCertificate>), maybe_block_size: Option, ) -> Option { self.success_nodes.insert(idx as u64, result.clone()); diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 7018490e09..09c3235744 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -6,7 +6,7 @@ use crate::vote::QuorumVoteAccumulator; use crate::vote::ViewSyncVoteAccumulator; use crate::vote::VoteType; use crate::{ - data::{fake_commitment, serialize_signature, LeafType}, + data::serialize_signature, traits::{ election::{SignedCertificate, VoteData, VoteToken}, node_implementation::NodeType, @@ -16,13 +16,14 @@ use crate::{ vote::{DAVote, ViewSyncData, ViewSyncVote}, }; use bincode::Options; -use commit::{Commitment, Committable}; +use commit::{Commitment, CommitmentBounds, Committable}; use espresso_systems_common::hotshot::tag; use hotshot_utils::bincode::bincode_opts; use serde::{Deserialize, Serialize}; use std::{ fmt::{self, Debug, Display, Formatter}, + hash::Hash, ops::Deref, }; use tracing::debug; @@ -49,12 +50,12 @@ pub struct DACertificate { /// /// A Quorum Certificate is a threshold signature of the `Leaf` being proposed, as well as some /// metadata, such as the `Stage` of consensus the quorum certificate was generated during. -#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash)] +#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash, Eq)] #[serde(bound(deserialize = ""))] -pub struct QuorumCertificate> { +pub struct QuorumCertificate { /// commitment to previous leaf #[debug(skip)] - pub leaf_commitment: Commitment, + pub leaf_commitment: COMMITMENT, /// Which view this QC relates to pub view_number: TYPES::Time, /// assembled signature for certificate aggregation @@ -63,7 +64,9 @@ pub struct QuorumCertificate> pub is_genesis: bool, } -impl> Display for QuorumCertificate { +impl Display + for QuorumCertificate +{ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, @@ -138,15 +141,15 @@ pub enum AssembledSignature { } /// Data from a vote needed to accumulate into a `SignedCertificate` -pub struct VoteMetaData { +pub struct VoteMetaData { /// Voter's public key pub encoded_key: EncodedPublicKey, /// Votes signature pub encoded_signature: EncodedSignature, /// Commitment to what's voted on. E.g. the leaf for a `QuorumCertificate` - pub commitment: Commitment, + pub commitment: COMMITMENT, /// Data of the vote, yes, no, timeout, or DA - pub data: VoteData>, + pub data: VoteData, /// The votes's token pub vote_token: T, /// View number for the vote @@ -156,12 +159,12 @@ pub struct VoteMetaData, } -impl> - SignedCertificate> - for QuorumCertificate +impl + SignedCertificate + for QuorumCertificate { - type Vote = QuorumVote; - type VoteAccumulator = QuorumVoteAccumulator, Self::Vote>; + type Vote = QuorumVote; + type VoteAccumulator = QuorumVoteAccumulator; fn from_signatures_and_commitment( signatures: AssembledSignature, @@ -191,11 +194,11 @@ impl> self.signatures.clone() } - fn leaf_commitment(&self) -> Commitment { + fn leaf_commitment(&self) -> COMMITMENT { self.leaf_commitment } - fn set_leaf_commitment(&mut self, commitment: Commitment) { + fn set_leaf_commitment(&mut self, commitment: COMMITMENT) { self.leaf_commitment = commitment; } @@ -204,8 +207,9 @@ impl> } fn genesis() -> Self { + // TODO GG need a new way to get fake commit now that we don't have Committable Self { - leaf_commitment: fake_commitment::(), + leaf_commitment: COMMITMENT::default_commitment_no_preimage(), view_number: ::genesis(), signatures: AssembledSignature::Genesis(), is_genesis: true, @@ -213,16 +217,14 @@ impl> } } -impl> Eq for QuorumCertificate {} - -impl> Committable - for QuorumCertificate +impl Committable + for QuorumCertificate { fn commit(&self) -> Commitment { let signatures_bytes = serialize_signature(&self.signatures); commit::RawCommitmentBuilder::new("Quorum Certificate Commitment") - .field("leaf commitment", self.leaf_commitment) + .var_size_field("leaf commitment", self.leaf_commitment.as_ref()) .u64_field("view number", *self.view_number.deref()) .constant_str("justify_qc signatures") .var_size_bytes(&signatures_bytes) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index ce27d7a547..e10900aa7e 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -53,7 +53,7 @@ pub struct Consensus> { pub locked_view: TYPES::Time, /// the highqc per spec - pub high_qc: QuorumCertificate, + pub high_qc: QuorumCertificate>, /// A reference to the metrics trait #[debug(skip)] diff --git a/types/src/data.rs b/types/src/data.rs index 080355a195..95456393ed 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -9,7 +9,6 @@ use crate::{ ViewSyncCertificate, }, traits::{ - election::SignedCertificate, node_implementation::NodeType, signature_key::{EncodedPublicKey, SignatureKey}, state::{ConsensusTime, TestableBlock, TestableState}, @@ -132,7 +131,7 @@ where pub height: u64, /// Per spec, justification - pub justify_qc: QuorumCertificate, + pub justify_qc: QuorumCertificate>, /// The hash of the parent `Leaf` /// So we can ask if it extends @@ -215,7 +214,7 @@ pub struct QuorumProposal> { pub height: u64, /// Per spec, justification - pub justify_qc: QuorumCertificate, + pub justify_qc: QuorumCertificate>, /// Possible timeout certificate. Only present if the justify_qc is not for the preceding view pub timeout_certificate: Option>, @@ -444,7 +443,7 @@ pub trait LeafType: /// Create a new leaf from its components. fn new( view_number: LeafTime, - justify_qc: QuorumCertificate, + justify_qc: QuorumCertificate>, deltas: LeafBlock, state: LeafState, ) -> Self; @@ -457,7 +456,7 @@ pub trait LeafType: /// Change the height of this leaf. fn set_height(&mut self, height: u64); /// The QC linking this leaf to its parent in the chain. - fn get_justify_qc(&self) -> QuorumCertificate; + fn get_justify_qc(&self) -> QuorumCertificate>; /// Commitment to this leaf's parent. fn get_parent_commitment(&self) -> Commitment; /// The block contained in this leaf. @@ -531,11 +530,11 @@ pub struct ValidatingLeaf { pub height: u64, /// Per spec, justification - pub justify_qc: QuorumCertificate, + pub justify_qc: QuorumCertificate>, /// The hash of the parent `Leaf` /// So we can ask if it extends - pub parent_commitment: Commitment>, + pub parent_commitment: Commitment, /// BlockPayload leaf wants to apply pub deltas: TYPES::BlockType, @@ -570,11 +569,11 @@ pub struct SequencingLeaf { pub height: u64, /// Per spec, justification - pub justify_qc: QuorumCertificate, + pub justify_qc: QuorumCertificate>, /// The hash of the parent `SequencingLeaf` /// So we can ask if it extends - pub parent_commitment: Commitment>, + pub parent_commitment: Commitment, /// The block or block commitment to be applied pub deltas: Either>, @@ -644,7 +643,7 @@ impl LeafType for ValidatingLeaf { fn new( view_number: ::Time, - justify_qc: QuorumCertificate, + justify_qc: QuorumCertificate>, deltas: ::BlockType, state: ::StateType, ) -> Self { @@ -673,7 +672,7 @@ impl LeafType for ValidatingLeaf { self.height = height; } - fn get_justify_qc(&self) -> QuorumCertificate { + fn get_justify_qc(&self) -> QuorumCertificate> { self.justify_qc.clone() } @@ -761,7 +760,7 @@ impl LeafType for SequencingLeaf { fn new( view_number: ::Time, - justify_qc: QuorumCertificate, + justify_qc: QuorumCertificate>, deltas: ::BlockType, _state: ::StateType, ) -> Self { @@ -789,7 +788,7 @@ impl LeafType for SequencingLeaf { self.height = height; } - fn get_justify_qc(&self) -> QuorumCertificate { + fn get_justify_qc(&self) -> QuorumCertificate> { self.justify_qc.clone() } @@ -934,7 +933,7 @@ impl Committable for ValidatingLeaf { .u64(*self.justify_qc.view_number) .field( "justify_qc leaf commitment", - self.justify_qc.leaf_commitment(), + self.justify_qc.leaf_commitment, ) .constant_str("justify_qc signatures") .var_size_bytes(&signatures_bytes) @@ -966,7 +965,7 @@ impl Committable for SequencingLeaf { .u64(*self.justify_qc.view_number) .field( "justify_qc leaf commitment", - self.justify_qc.leaf_commitment(), + self.justify_qc.leaf_commitment, ) .constant_str("justify_qc signatures") .var_size_bytes(&signatures_bytes) diff --git a/types/src/event.rs b/types/src/event.rs index f33a39f8cd..5d091cd400 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -4,6 +4,7 @@ use crate::{ certificate::QuorumCertificate, data::LeafType, error::HotShotError, traits::node_implementation::NodeType, }; +use commit::Commitment; use std::sync::Arc; /// A status event emitted by a `HotShot` instance /// @@ -42,7 +43,7 @@ pub enum EventType> { /// /// Note that the QC for each additional leaf in the chain can be obtained from the leaf /// before it using - qc: Arc>, + qc: Arc>>, /// Optional information of the number of transactions in the block, for logging purposes. block_size: Option, }, diff --git a/types/src/message.rs b/types/src/message.rs index d5897c0ef8..c26de8bcef 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -15,6 +15,7 @@ use crate::{ }, vote::{DAVote, QuorumVote, ViewSyncVote, VoteType}, }; +use commit::Commitment; use derivative::Derivative; use either::Either::{self, Left, Right}; use serde::{Deserialize, Serialize}; @@ -149,7 +150,7 @@ where /// Message with a quorum proposal. Proposal(Proposal>, TYPES::SignatureKey), /// Message with a quorum vote. - Vote(QuorumVote, TYPES::SignatureKey), + Vote(QuorumVote>, TYPES::SignatureKey), /// Message with a view sync vote. ViewSyncVote(ViewSyncVote), /// Message with a view sync certificate. @@ -314,7 +315,7 @@ where Proposal(Proposal>), /// Message with a quorum vote. - Vote(QuorumVote), + Vote(QuorumVote>), /// Message with a view sync vote. ViewSyncVote(ViewSyncVote), diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index ab3f899944..08716a3d73 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -14,7 +14,7 @@ use crate::{ }, }; use async_trait::async_trait; - +use commit::Commitment; use std::{num::NonZeroUsize, sync::Arc, time::Duration}; /// The API that [`HotStuff`] needs to talk to the system, implemented for both validating and @@ -97,7 +97,7 @@ pub trait ConsensusSharedApi< &self, view_number: TYPES::Time, leaf_views: Vec, - decide_qc: QuorumCertificate, + decide_qc: QuorumCertificate>, ) { self.send_event(Event { view_number, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 7ccde773af..6b3ef7e790 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -34,7 +34,7 @@ use crate::{ }, }; use bincode::Options; -use commit::{Commitment, Committable}; +use commit::{Commitment, CommitmentBounds, Committable}; use derivative::Derivative; use either::Either; use ethereum_types::U256; @@ -76,7 +76,7 @@ pub enum Checked { #[serde(bound(deserialize = ""))] pub enum VoteData where - COMMITMENT: for<'a> Deserialize<'a>, + COMMITMENT: CommitmentBounds, { /// Vote to provide availability for a block. DA(COMMITMENT), @@ -96,7 +96,7 @@ where impl VoteData where - COMMITMENT: for<'a> Deserialize<'a> + Clone, + COMMITMENT: CommitmentBounds, { /// Return the underlying commitment. #[must_use] @@ -105,14 +105,14 @@ where use VoteData::*; match self { DA(c) | Yes(c) | No(c) | Timeout(c) | ViewSyncPreCommit(c) | ViewSyncCommit(c) - | ViewSyncFinalize(c) => c.clone(), + | ViewSyncFinalize(c) => *c, } } } impl VoteData where - COMMITMENT: Serialize + for<'a> Deserialize<'a>, + COMMITMENT: CommitmentBounds, { #[must_use] /// Convert vote data into bytes. @@ -135,7 +135,6 @@ pub trait VoteToken: + PartialEq + Hash + Eq - + Committable { // type StakeTable; // type KeyPair: SignatureKey; @@ -161,7 +160,7 @@ pub trait ElectionConfig: pub trait SignedCertificate where Self: Send + Sync + Clone + Serialize + for<'a> Deserialize<'a>, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + COMMITMENT: CommitmentBounds, TOKEN: VoteToken, { /// `VoteType` that is used in this certificate @@ -267,9 +266,9 @@ pub trait ConsensusExchange: Send + Sync { type Proposal: ProposalType; /// A vote on a [`Proposal`](Self::Proposal). // TODO ED Make this equal Certificate vote (if possible?) - type Vote: VoteType>; + type Vote: VoteType; /// A [`SignedCertificate`] attesting to a decision taken by the committee. - type Certificate: SignedCertificate> + type Certificate: SignedCertificate + Hash + Eq; /// The committee eligible to make decisions. @@ -277,7 +276,7 @@ pub trait ConsensusExchange: Send + Sync { /// Network used by [`Membership`](Self::Membership) to communicate. type Networking: CommunicationChannel; /// Commitments to items which are the subject of proposals and decisions. - type Commitment: Committable + Serialize + for<'a> Deserialize<'a> + Clone; + type Commitment: CommitmentBounds; /// Join a [`ConsensusExchange`] with the given identity (`pk` and `sk`). fn create( @@ -330,13 +329,10 @@ pub trait ConsensusExchange: Send + Sync { } /// The contents of a vote on `commit`. - fn vote_data( - &self, - commit: Commitment, - ) -> VoteData>; + fn vote_data(&self, commit: Self::Commitment) -> VoteData; /// Validate a QC. - fn is_valid_cert(&self, qc: &Self::Certificate, commit: Commitment) -> bool { + fn is_valid_cert(&self, qc: &Self::Certificate, commit: Self::Commitment) -> bool { if qc.is_genesis() && qc.view_number() == TYPES::Time::genesis() { return true; } @@ -387,7 +383,7 @@ pub trait ConsensusExchange: Send + Sync { &self, encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, - data: VoteData>, + data: VoteData, vote_token: Checked, ) -> bool { let mut is_valid_vote_token = false; @@ -412,7 +408,7 @@ pub trait ConsensusExchange: Send + Sync { &self, key: &TYPES::SignatureKey, encoded_signature: &EncodedSignature, - data: &VoteData>, + data: &VoteData, vote_token: &Checked, ) -> bool { let is_valid_signature = key.validate(encoded_signature, data.get_commit().as_ref()); @@ -449,8 +445,8 @@ pub trait ConsensusExchange: Send + Sync { &self, encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, - leaf_commitment: Commitment, - vote_data: VoteData>, + leaf_commitment: Self::Commitment, + vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, accumlator: VoteAccumulator, @@ -468,21 +464,21 @@ pub trait ConsensusExchange: Send + Sync { TYPES, TYPES::Time, TYPES::VoteTokenType, - Commitment, + Self::Commitment, >>::VoteAccumulator, vote: &<>::Certificate as SignedCertificate< TYPES, TYPES::Time, TYPES::VoteTokenType, - Commitment, + Self::Commitment, >>::Vote, - _commit: &Commitment, + _commit: &Self::Commitment, ) -> Either< <>::Certificate as SignedCertificate< TYPES, TYPES::Time, TYPES::VoteTokenType, - Commitment, + Self::Commitment, >>::VoteAccumulator, Self::Certificate, > { @@ -685,7 +681,7 @@ impl< type Certificate = DACertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = TYPES::BlockType; + type Commitment = Commitment; fn create( entries: Vec<::StakeTableEntry>, @@ -717,10 +713,7 @@ impl< .make_vote_token(view_number, &self.private_key) } - fn vote_data( - &self, - commit: Commitment, - ) -> VoteData> { + fn vote_data(&self, commit: Self::Commitment) -> VoteData { VoteData::DA(commit) } @@ -730,8 +723,8 @@ impl< &self, encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, - leaf_commitment: Commitment, - vote_data: VoteData>, + leaf_commitment: Self::Commitment, + vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, accumlator: VoteAccumulator, @@ -815,7 +808,7 @@ pub trait QuorumExchangeType, /// Create a message with a negative vote on validating or commitment proposal. fn create_no_message>( &self, - justify_qc_commitment: Commitment>, + justify_qc_commitment: Commitment>>, leaf_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, @@ -826,7 +819,7 @@ pub trait QuorumExchangeType, /// Create a message with a timeout vote on validating or commitment proposal. fn create_timeout_message>( &self, - justify_qc: QuorumCertificate, + justify_qc: QuorumCertificate>, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, ) -> GeneralConsensusMessage @@ -873,7 +866,7 @@ impl< /// Create a message with a positive vote on validating or commitment proposal. fn create_yes_message>( &self, - justify_qc_commitment: Commitment>, + justify_qc_commitment: Commitment>>, leaf_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, @@ -952,7 +945,7 @@ impl< /// Create a message with a negative vote on validating or commitment proposal. fn create_no_message>( &self, - justify_qc_commitment: Commitment>, + justify_qc_commitment: Commitment>>, leaf_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, @@ -974,7 +967,7 @@ impl< /// Create a message with a timeout vote on validating or commitment proposal. fn create_timeout_message>( &self, - high_qc: QuorumCertificate, + high_qc: QuorumCertificate>, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, ) -> GeneralConsensusMessage @@ -1003,11 +996,11 @@ impl< for QuorumExchange { type Proposal = PROPOSAL; - type Vote = QuorumVote; - type Certificate = QuorumCertificate; + type Vote = QuorumVote>; + type Certificate = QuorumCertificate>; type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = LEAF; + type Commitment = Commitment; fn create( entries: Vec<::StakeTableEntry>, @@ -1033,10 +1026,7 @@ impl< &self.network } - fn vote_data( - &self, - commit: Commitment, - ) -> VoteData> { + fn vote_data(&self, commit: Self::Commitment) -> VoteData { VoteData::Yes(commit) } @@ -1047,12 +1037,13 @@ impl< encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, leaf_commitment: Commitment, - vote_data: VoteData>, + vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, - accumlator: VoteAccumulator, + accumlator: VoteAccumulator, TYPES>, _relay: Option, - ) -> Either, Self::Certificate> { + ) -> Either, TYPES>, Self::Certificate> + { let meta = VoteMetaData { encoded_key: encoded_key.clone(), encoded_signature: encoded_signature.clone(), @@ -1369,7 +1360,7 @@ impl< type Certificate = ViewSyncCertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = ViewSyncData; + type Commitment = Commitment>; fn create( entries: Vec<::StakeTableEntry>, @@ -1395,10 +1386,7 @@ impl< &self.network } - fn vote_data( - &self, - _commit: Commitment, - ) -> VoteData> { + fn vote_data(&self, _commit: Self::Commitment) -> VoteData { unimplemented!() } @@ -1407,13 +1395,15 @@ impl< encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, leaf_commitment: Commitment>, - vote_data: VoteData>, + vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, - accumlator: VoteAccumulator, TYPES>, + accumlator: VoteAccumulator>, TYPES>, relay: Option, - ) -> Either, TYPES>, Self::Certificate> - { + ) -> Either< + VoteAccumulator>, TYPES>, + Self::Certificate, + > { let meta = VoteMetaData { encoded_key: encoded_key.clone(), encoded_signature: encoded_signature.clone(), diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 20c8f87f9a..122698486c 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -132,7 +132,7 @@ pub struct StoredView> { /// The parent of this view pub parent: Commitment, /// The justify QC of this view. See the hotstuff paper for more information on this. - pub justify_qc: QuorumCertificate, + pub justify_qc: QuorumCertificate>, /// The state of this view pub state: LEAF::MaybeState, /// The deltas of this view @@ -156,7 +156,7 @@ where /// /// Note that this will set the `parent` to `LeafHash::default()`, so this will not have a parent. pub fn from_qc_block_and_state( - qc: QuorumCertificate, + qc: QuorumCertificate>, deltas: LEAF::DeltasType, state: LEAF::MaybeState, height: u64, diff --git a/types/src/vote.rs b/types/src/vote.rs index c27225e4af..9e232b755a 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -5,7 +5,6 @@ use crate::{ certificate::{AssembledSignature, QuorumCertificate}, - data::LeafType, traits::{ election::{VoteData, VoteToken}, node_implementation::NodeType, @@ -14,7 +13,7 @@ use crate::{ }; use bincode::Options; use bitvec::prelude::*; -use commit::{Commitment, Committable}; +use commit::{Commitment, CommitmentBounds, Committable}; use either::Either; use ethereum_types::U256; use hotshot_utils::bincode::bincode_opts; @@ -29,7 +28,7 @@ use std::{ use tracing::error; /// The vote sent by consensus messages. -pub trait VoteType Deserialize<'a> + Serialize + Clone>: +pub trait VoteType: Debug + Clone + 'static + Serialize + for<'a> Deserialize<'a> + Send + Sync + PartialEq { /// Get the view this vote was cast for @@ -63,29 +62,29 @@ pub struct DAVote { /// A positive or negative vote on validating or commitment proposal. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] -pub struct YesOrNoVote> { +pub struct YesOrNoVote { /// TODO we should remove this /// this is correct, but highly inefficient /// we should check a cache, and if that fails request the qc - pub justify_qc_commitment: Commitment>, + pub justify_qc_commitment: Commitment>, /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), /// The leaf commitment being voted on. - pub leaf_commitment: Commitment, + pub leaf_commitment: COMMITMENT, /// The view this vote was cast for pub current_view: TYPES::Time, /// The vote token generated by this replica pub vote_token: TYPES::VoteTokenType, /// The vote data this vote is signed over - pub vote_data: VoteData>, + pub vote_data: VoteData, } /// A timeout vote. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] -pub struct TimeoutVote> { +pub struct TimeoutVote { /// The highest valid QC this node knows about - pub high_qc: QuorumCertificate, + pub high_qc: QuorumCertificate, /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), /// The view this vote was cast for @@ -188,13 +187,13 @@ impl ViewSyncVote { /// Votes on validating or commitment proposal. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] -pub enum QuorumVote> { +pub enum QuorumVote { /// Posivite vote. - Yes(YesOrNoVote), + Yes(YesOrNoVote), /// Negative vote. - No(YesOrNoVote), + No(YesOrNoVote), /// Timeout vote. - Timeout(TimeoutVote), + Timeout(TimeoutVote), } impl VoteType> for DAVote { @@ -224,8 +223,8 @@ impl DAVote { } } -impl> VoteType> - for QuorumVote +impl VoteType + for QuorumVote { fn get_view(&self) -> TYPES::Time { match self { @@ -240,7 +239,7 @@ impl> VoteType EncodedSignature { self.signature() } - fn get_data(&self) -> VoteData> { + fn get_data(&self) -> VoteData { match self { QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_data.clone(), QuorumVote::Timeout(_) => unimplemented!(), @@ -254,7 +253,7 @@ impl> VoteType> QuorumVote { +impl QuorumVote { /// Get the encoded signature. pub fn signature(&self) -> EncodedSignature { @@ -320,7 +319,7 @@ pub trait Accumulator: Sized { /// Accumulator trait used to accumulate votes into an `AssembledSignature` pub trait Accumulator2< TYPES: NodeType, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + COMMITMENT: CommitmentBounds, VOTE: VoteType, >: Sized { @@ -338,7 +337,7 @@ pub trait Accumulator2< /// Accumulates DA votes pub struct DAVoteAccumulator< TYPES: NodeType, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + COMMITMENT: CommitmentBounds, VOTE: VoteType, > { /// Map of all da signatures accumlated so far @@ -353,11 +352,8 @@ pub struct DAVoteAccumulator< pub phantom: PhantomData, } -impl< - TYPES: NodeType, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, - VOTE: VoteType, - > Accumulator2 for DAVoteAccumulator +impl> + Accumulator2 for DAVoteAccumulator { fn append( mut self, @@ -429,7 +425,7 @@ impl< /// Accumulate quorum votes pub struct QuorumVoteAccumulator< TYPES: NodeType, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + COMMITMENT: CommitmentBounds, VOTE: VoteType, > { /// Map of all signatures accumlated so far @@ -451,11 +447,8 @@ pub struct QuorumVoteAccumulator< pub phantom: PhantomData, } -impl< - TYPES: NodeType, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, - VOTE: VoteType, - > Accumulator2 for QuorumVoteAccumulator +impl> + Accumulator2 for QuorumVoteAccumulator { fn append( mut self, @@ -560,7 +553,7 @@ impl< /// Accumulates view sync votes pub struct ViewSyncVoteAccumulator< TYPES: NodeType, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + COMMITMENT: CommitmentBounds, VOTE: VoteType, > { /// Map of all pre_commit signatures accumlated so far @@ -582,11 +575,8 @@ pub struct ViewSyncVoteAccumulator< pub phantom: PhantomData, } -impl< - TYPES: NodeType, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone + Copy + PartialEq + Eq + Hash, - VOTE: VoteType, - > Accumulator2 for ViewSyncVoteAccumulator +impl> + Accumulator2 for ViewSyncVoteAccumulator { #[allow(clippy::too_many_lines)] fn append( @@ -734,18 +724,15 @@ impl< /// Placeholder accumulator; will be replaced by accumulator for each certificate type pub struct AccumulatorPlaceholder< TYPES: NodeType, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, + COMMITMENT: CommitmentBounds, VOTE: VoteType, > { /// Phantom data to make compiler happy pub phantom: PhantomData<(TYPES, VOTE, COMMITMENT)>, } -impl< - TYPES: NodeType, - COMMITMENT: for<'a> Deserialize<'a> + Serialize + Clone, - VOTE: VoteType, - > Accumulator2 for AccumulatorPlaceholder +impl> + Accumulator2 for AccumulatorPlaceholder { fn append( self, @@ -771,11 +758,7 @@ type VoteMap = HashMap< /// respectively. /// /// TODO GG used only in election.rs; move this to there and make it private? -pub struct VoteAccumulator< - TOKEN, - COMMITMENT: Serialize + for<'a> Deserialize<'a> + Clone, - TYPES: NodeType, -> { +pub struct VoteAccumulator { /// Map of all signatures accumlated so far pub total_vote_outcomes: VoteMap, /// Map of all da signatures accumlated so far @@ -800,23 +783,23 @@ pub struct VoteAccumulator< pub signers: BitVec, } -impl +impl Accumulator< ( - Commitment, + COMMITMENT, ( EncodedPublicKey, ( EncodedSignature, Vec<::StakeTableEntry>, usize, - VoteData>, + VoteData, TOKEN, ), ), ), AssembledSignature, - > for VoteAccumulator, TYPES> + > for VoteAccumulator where TOKEN: Clone + VoteToken, { @@ -824,14 +807,14 @@ where fn append( mut self, val: ( - Commitment, + COMMITMENT, ( EncodedPublicKey, ( EncodedSignature, Vec<::StakeTableEntry>, usize, - VoteData>, + VoteData, TOKEN, ), ), From d91a5d2677eb8f933ac61e35c390f41b1a2d17a4 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 25 Sep 2023 19:50:30 -0400 Subject: [PATCH 0158/1393] Persist Kademlia cache to disk (#1773) --- .../src/traits/networking/libp2p_network.rs | 47 +-- .../src/network/behaviours/dht/cache.rs | 325 ++++++++++++++++++ .../network/behaviours/{dht.rs => dht/mod.rs} | 52 ++- libp2p-networking/src/network/mod.rs | 12 +- libp2p-networking/src/network/node.rs | 4 +- libp2p-networking/src/network/node/config.rs | 5 + libp2p-networking/src/network/node/handle.rs | 169 +-------- libp2p-networking/tests/common/mod.rs | 10 +- libp2p-networking/tests/counter.rs | 31 +- 9 files changed, 421 insertions(+), 234 deletions(-) create mode 100644 libp2p-networking/src/network/behaviours/dht/cache.rs rename libp2p-networking/src/network/behaviours/{dht.rs => dht/mod.rs} (94%) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index da8784e0a7..ebb68e7d01 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -5,14 +5,14 @@ use super::NetworkingMetrics; use crate::NodeImplementation; use async_compatibility_layer::{ - art::{async_block_on, async_sleep, async_spawn, async_timeout}, + art::{async_block_on, async_sleep, async_spawn}, channel::{unbounded, UnboundedReceiver, UnboundedSendError, UnboundedSender}, }; use async_lock::RwLock; use async_trait::async_trait; use bimap::BiHashMap; use bincode::Options; -use hotshot_constants::{KAD_DEFAULT_REPUB_INTERVAL_SEC, LOOK_AHEAD}; +use hotshot_constants::LOOK_AHEAD; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ data::ViewNumber, @@ -82,7 +82,7 @@ struct Libp2pNetworkInner { /// this node's public key pk: K, /// handle to control the network - handle: Arc>, + handle: Arc>, /// map of known replica peer ids to public keys broadcast_recv: UnboundedReceiver, /// Sender for broadcast messages @@ -93,8 +93,6 @@ struct Libp2pNetworkInner { direct_recv: UnboundedReceiver, /// Sender for node lookup (relevant view number, key of node) (None for shutdown) node_lookup_send: UnboundedSender>, - /// Sender for shutdown of the peer cache's garbage collection task - cache_gc_shutdown_send: UnboundedSender<()>, /// this is really cheating to enable local tests /// hashset of (bootstrap_addr, peer_id) bootstrap_addrs: PeerInfoVec, @@ -200,6 +198,8 @@ where // setting to sane defaults .ttl(None) .republication_interval(None) + // this removes the cache for tests + .dht_cache_location(None) .build() .unwrap() } else { @@ -220,6 +220,8 @@ where // setting to sane defaults .ttl(None) .republication_interval(None) + // this removes the cache for tests + .dht_cache_location(None) .build() .unwrap() }; @@ -287,7 +289,7 @@ impl Libp2pNetwork { ) -> Result, NetworkError> { assert!(bootstrap_addrs_len > 4, "Need at least 5 bootstrap nodes"); let network_handle = Arc::new( - NetworkNodeHandle::<(), K>::new(config, id) + Box::pin(NetworkNodeHandle::<()>::new(config, id)) .await .map_err(Into::::into)?, ); @@ -318,7 +320,6 @@ impl Libp2pNetwork { let (direct_send, direct_recv) = unbounded(); let (broadcast_send, broadcast_recv) = unbounded(); let (node_lookup_send, node_lookup_recv) = unbounded(); - let (cache_gc_shutdown_send, cache_gc_shutdown_recv) = unbounded::<()>(); let result = Libp2pNetwork { inner: Arc::new(Libp2pNetworkInner { @@ -336,7 +337,6 @@ impl Libp2pNetwork { metrics: NetworkingMetrics::new(&*metrics), topic_map, node_lookup_send, - cache_gc_shutdown_send, // Start the latest view from 0. "Latest" refers to "most recent view we are polling for // proposals on". We need this because to have consensus info injected we need a working // network already. In the worst case, we send a few lookups we don't need. @@ -345,20 +345,15 @@ impl Libp2pNetwork { }; result.spawn_event_generator(direct_send, broadcast_send); - result.spawn_node_lookup(node_lookup_recv, cache_gc_shutdown_recv); + result.spawn_node_lookup(node_lookup_recv); result.spawn_connect(id); Ok(result) } /// Spawns task for looking up nodes pre-emptively - /// as well as garbage collecting the peer cache #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] - fn spawn_node_lookup( - &self, - node_lookup_recv: UnboundedReceiver>, - cache_gc_shutdown_send: UnboundedReceiver<()>, - ) { + fn spawn_node_lookup(&self, node_lookup_recv: UnboundedReceiver>) { let handle = self.inner.handle.clone(); let dht_timeout = self.inner.dht_timeout; let latest_seen_view = self.inner.latest_seen_view.clone(); @@ -375,32 +370,13 @@ impl Libp2pNetwork { // only run if we are not too close to the next view number if latest_seen_view.load(Ordering::Relaxed) + THRESHOLD <= *view_number { - // look up node, caching if applicable + // look up if let Err(err) = handle_.lookup_node::(pk.clone(), dht_timeout).await { error!("Failed to perform lookup for key {:?}: {}", pk, err); }; } } }); - - // deals with garbage collecting the lookup queue - let handle_ = handle.clone(); - async_spawn(async move { - loop { - let ttl = handle_ - .config() - .ttl - .unwrap_or(Duration::from_secs(KAD_DEFAULT_REPUB_INTERVAL_SEC * 8)); - if async_timeout(ttl, cache_gc_shutdown_send.recv()) - .await - .is_err() - { - handle_.prune_peer_cache().await; - } else { - break; - } - } - }); } /// Initiates connection to the outside world @@ -573,7 +549,6 @@ impl ConnectedNetwork for Libp2p { let closure = async move { self.inner.node_lookup_send.send(None).await.unwrap(); - self.inner.cache_gc_shutdown_send.send(()).await.unwrap(); if self.inner.handle.is_killed() { error!("Called shut down when already shut down! Noop."); } else { diff --git a/libp2p-networking/src/network/behaviours/dht/cache.rs b/libp2p-networking/src/network/behaviours/dht/cache.rs new file mode 100644 index 0000000000..602bb41e16 --- /dev/null +++ b/libp2p-networking/src/network/behaviours/dht/cache.rs @@ -0,0 +1,325 @@ +use std::{ + collections::{BTreeMap, HashMap}, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, + time::{Duration, SystemTime}, +}; + +use async_compatibility_layer::art::async_block_on; +use async_lock::RwLock; +use bincode::Options; +use dashmap::{mapref::one::Ref, DashMap}; +use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; +use hotshot_utils::bincode::bincode_opts; +use snafu::{ResultExt, Snafu}; + +/// Error wrapper type for cache +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum CacheError { + /// Failed to read or write from disk + Disk { + /// source of error + source: std::io::Error, + }, + + /// Failure to serialize the cache + Serialization { + /// source of error + source: Box, + }, + + /// Failure to deserialize the cache + Deserialization { + /// source of error + source: Box, + }, + + /// General cache error + GeneralCache { + /// source of error + source: Box, + }, +} + +#[derive(Clone, derive_builder::Builder, custom_debug::Debug, Default)] +pub struct Config { + #[builder(default = "Some(\"dht.cache\".to_string())")] + pub filename: Option, + #[builder(default = "Duration::from_secs(KAD_DEFAULT_REPUB_INTERVAL_SEC * 16)")] + pub expiry: Duration, + #[builder(default = "4")] + pub max_disk_parity_delta: u32, +} + +impl Default for Cache { + fn default() -> Self { + async_block_on(Self::new(Config::default())) + } +} + +pub struct Cache { + /// the cache's config + config: Config, + + /// the cache for records (key -> value) + cache: Arc, Vec>>, + /// the expiries for the dht cache, in order (expiry time -> key) + expiries: Arc>>>, + + /// number of inserts since the last save + disk_parity_delta: Arc, +} + +impl Cache { + pub async fn new(config: Config) -> Self { + let cache = Self { + cache: Arc::new(DashMap::new()), + expiries: Arc::new(RwLock::new(BTreeMap::new())), + config, + disk_parity_delta: Arc::new(AtomicU32::new(0)), + }; + + // try loading from file + if let Err(err) = cache.load().await { + tracing::warn!("failed to load cache from file: {}", err); + }; + + cache + } + + pub async fn load(&self) -> Result<(), CacheError> { + if let Some(filename) = &self.config.filename { + let encoded = std::fs::read(filename).context(DiskSnafu)?; + + let cache: HashMap, Vec)> = bincode_opts() + .deserialize(&encoded) + .context(DeserializationSnafu)?; + + // inline prune and insert + let now = SystemTime::now(); + for (expiry, (key, value)) in cache { + if now < expiry { + self.cache.insert(key.clone(), value); + self.expiries.write().await.insert(expiry, key); + } + } + } + + Ok(()) + } + + pub async fn save(&self) -> Result<(), CacheError> { + if let Some(filename) = &self.config.filename { + // prune first + self.prune().await; + + // serialize + let mut cache_to_write = HashMap::new(); + let expiries = self.expiries.read().await; + for (expiry, key) in &*expiries { + if let Some(entry) = self.cache.get(key) { + cache_to_write.insert(expiry, (key, entry.value().clone())); + } else { + tracing::warn!("key not found in cache: {:?}", key); + Err(CacheError::GeneralCache { + source: Box::new(bincode::ErrorKind::Custom( + "key not found in cache".to_string(), + )), + })?; + }; + } + + let encoded = bincode_opts() + .serialize(&cache_to_write) + .context(SerializationSnafu)?; + + std::fs::write(filename, encoded).context(DiskSnafu)?; + } + + Ok(()) + } + + async fn prune(&self) { + let now = SystemTime::now(); + let mut expiries = self.expiries.write().await; + let mut removed: u32 = 0; + + while let Some((expires, key)) = expiries.pop_first() { + if now > expires { + self.cache.remove(&key); + removed += 1; + } else { + expiries.insert(expires, key); + break; + } + } + + if removed > 0 { + self.disk_parity_delta.fetch_add(removed, Ordering::Relaxed); + } + } + + pub async fn get(&self, key: &Vec) -> Option, Vec>> { + // prune, save if necessary + self.prune().await; + self.save_if_necessary().await; + + // get + self.cache.get(key) + } + + pub async fn insert(&self, key: Vec, value: Vec) { + // insert into cache and expiries + self.cache.insert(key.clone(), value); + self.expiries + .write() + .await + .insert(SystemTime::now() + self.config.expiry, key); + + // save if reached max disk parity delta + self.disk_parity_delta.fetch_add(1, Ordering::Relaxed); + self.save_if_necessary().await; + } + + async fn save_if_necessary(&self) { + let cur_disk_parity_delta = self.disk_parity_delta.load(Ordering::Relaxed); + if cur_disk_parity_delta >= self.config.max_disk_parity_delta { + if let Err(err) = self.save().await { + tracing::error!("failed to save cache to file: {}", err); + }; + } + } +} + +#[cfg(test)] +mod test { + + use super::*; + use async_compatibility_layer::art::async_sleep; + use libp2p_identity::PeerId; + use tracing::instrument; + + /// cache eviction test + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_dht_cache_eviction() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + // cache with 1s eviction + let cache = Cache::new(Config { + filename: None, + expiry: Duration::from_secs(1), + max_disk_parity_delta: 4, + }) + .await; + + let (key, value) = (PeerId::random(), PeerId::random()); + + // insert + cache.insert(key.to_bytes(), value.to_bytes()).await; + + // check that it is in the cache and expiries + assert_eq!( + cache.get(&key.to_bytes()).await.unwrap().value(), + &value.to_bytes() + ); + assert_eq!(cache.expiries.read().await.len(), 1); + + // sleep for 1s + async_sleep(Duration::from_secs(1)).await; + + // check that now is evicted + assert!(cache.get(&key.to_bytes()).await.is_none()); + + // check that the cache and expiries are empty + assert!(cache.expiries.read().await.is_empty()); + assert!(cache.cache.is_empty()); + } + + /// cache add test + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_dht_cache_save_load() { + let _ = std::fs::remove_file("test.cache"); + + let cache = Cache::new(Config { + filename: Some("test.cache".to_string()), + expiry: Duration::from_secs(600), + max_disk_parity_delta: 4, + }) + .await; + + // add 10 key-value pairs to the cache + for i in 0u8..10u8 { + let (key, value) = (vec![i; 1], vec![i + 1; 1]); + cache.insert(key, value).await; + } + + // save the cache + cache.save().await.unwrap(); + + // load the cache + let cache = Cache::new(Config { + filename: Some("test.cache".to_string()), + expiry: Duration::from_secs(600), + max_disk_parity_delta: 4, + }) + .await; + + // check that the cache has the 10 key-value pairs + for i in 0u8..10u8 { + let (key, value) = (vec![i; 1], vec![i + 1; 1]); + assert_eq!(cache.get(&key).await.unwrap().value(), &value); + } + + // delete the cache file + let _ = std::fs::remove_file("test.cache"); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_dht_disk_parity() { + let _ = std::fs::remove_file("test.cache"); + + let cache = Cache::new(Config { + // tests run sequentially, shouldn't matter + filename: Some("test.cache".to_string()), + expiry: Duration::from_secs(600), + max_disk_parity_delta: 4, + }) + .await; + + // insert into cache + for i in 0..3 { + cache.insert(vec![i; 1], vec![i + 1; 1]).await; + } + + // check that file is not saved + assert!(!std::path::Path::new("test.cache").exists()); + + // insert into cache + cache.insert(vec![0; 1], vec![1; 1]).await; + + // check that file is saved + assert!(std::path::Path::new("test.cache").exists()); + + // delete the cache file + _ = std::fs::remove_file("test.cache"); + } +} diff --git a/libp2p-networking/src/network/behaviours/dht.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs similarity index 94% rename from libp2p-networking/src/network/behaviours/dht.rs rename to libp2p-networking/src/network/behaviours/dht/mod.rs index 46adb05d0c..7086b6dab1 100644 --- a/libp2p-networking/src/network/behaviours/dht.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -5,6 +5,9 @@ use std::{ time::Duration, }; +mod cache; + +use async_compatibility_layer::art::async_block_on; use futures::channel::oneshot::Sender; use libp2p::{ kad::{ @@ -21,6 +24,8 @@ use tracing::{error, info, warn}; pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; const MAX_DHT_QUERY_SIZE: usize = 5; +use self::cache::Cache; + use super::exponential_backoff::ExponentialBackoff; /// Behaviour wrapping libp2p's kademlia @@ -56,6 +61,8 @@ pub struct DHTBehaviour { pub peer_id: PeerId, /// replication factor pub replication_factor: NonZeroUsize, + /// kademlia cache + cache: Cache, } /// State of bootstrapping @@ -106,10 +113,11 @@ impl DHTBehaviour { /// Create a new DHT behaviour #[must_use] - pub fn new( + pub async fn new( mut kadem: Kademlia, pid: PeerId, replication_factor: NonZeroUsize, + cache_location: Option, ) -> Self { // needed because otherwise we stay in client mode when testing locally // and don't publish keys stuff @@ -138,6 +146,13 @@ impl DHTBehaviour { }, in_progress_get_closest_peers: HashMap::default(), replication_factor, + cache: Cache::new( + cache::ConfigBuilder::default() + .filename(cache_location) + .build() + .unwrap_or_default(), + ) + .await, } } @@ -223,17 +238,26 @@ impl DHTBehaviour { return; } - let qid = self.kadem.get_record(key.clone().into()); - let query = KadGetQuery { - backoff, - progress: DHTProgress::InProgress(qid), - notify: chan, - num_replicas: factor, - key, - retry_count: retry_count - 1, - records: HashMap::default(), - }; - self.in_progress_get_record_queries.insert(qid, query); + // check cache before making the request + if let Some(entry) = async_block_on(self.cache.get(&key)) { + // exists in cache + if chan.send(entry.value().clone()).is_err() { + warn!("Get DHT: channel closed before get record request result could be sent"); + } + } else { + // doesn't exist in cache, actually propagate request + let qid = self.kadem.get_record(key.clone().into()); + let query = KadGetQuery { + backoff, + progress: DHTProgress::InProgress(qid), + notify: chan, + num_replicas: factor, + key, + retry_count: retry_count - 1, + records: HashMap::default(), + }; + self.in_progress_get_record_queries.insert(qid, query); + } } /// update state based on recv-ed get query @@ -279,6 +303,10 @@ impl DHTBehaviour { .into_iter() .find(|(_, v)| *v >= NUM_REPLICATED_TO_TRUST) { + // insert into cache + async_block_on(self.cache.insert(key, r.clone())); + + // return value if notify.send(r).is_err() { warn!("Get DHT: channel closed before get record request result could be sent"); } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 54b3d89035..3ec1c07b73 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -34,7 +34,7 @@ use libp2p::{ use libp2p_identity::PeerId; use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; -use std::{collections::HashSet, fmt::Debug, hash::Hash, str::FromStr, sync::Arc, time::Duration}; +use std::{collections::HashSet, fmt::Debug, str::FromStr, sync::Arc, time::Duration}; use tracing::{info, instrument}; #[cfg(async_executor_impl = "async-std")] @@ -229,12 +229,12 @@ pub async fn gen_transport( /// a single node, connects them to each other /// and waits for connections to propagate to all nodes. #[instrument] -pub async fn spin_up_swarm( +pub async fn spin_up_swarm( timeout_len: Duration, known_nodes: Vec<(Option, Multiaddr)>, config: NetworkNodeConfig, idx: usize, - handle: &Arc>, + handle: &Arc>, ) -> Result<(), NetworkNodeHandleError> { info!("known_nodes{:?}", known_nodes); handle.add_known_peers(known_nodes).await?; @@ -248,9 +248,9 @@ pub async fn spin_up_swarm( - handles: &[Arc>], +pub fn get_random_handle( + handles: &[Arc>], rng: &mut dyn rand::RngCore, -) -> Arc> { +) -> Arc> { handles.iter().choose(rng).unwrap().clone() } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 2f7dd46ba4..835058bf84 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -269,7 +269,9 @@ impl NetworkNode { config .replication_factor .unwrap_or_else(|| NonZeroUsize::new(4).unwrap()), - ), + config.dht_cache_location.clone(), + ) + .await, identify, DMBehaviour::new(request_response), ); diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 4d19b6f516..82897c0de5 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -24,6 +24,11 @@ pub struct NetworkNodeConfig { #[builder(setter(into, strip_option), default = "DEFAULT_REPLICATION_FACTOR")] pub replication_factor: Option, + /// location of the dht cache + /// default is "dht.cache" in the current directory + #[builder(default = "Some(\"dht.cache\".to_string())")] + pub dht_cache_location: Option, + #[builder(default)] /// parameters for gossipsub mesh network pub mesh_params: Option, diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 04961140c6..c4e6460666 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -11,25 +11,22 @@ use async_compatibility_layer::{ UnboundedReceiver, UnboundedRecvError, UnboundedSender, }, }; -use async_lock::{Mutex, RwLock}; +use async_lock::Mutex; use bincode::Options; -use dashmap::DashMap; use futures::{stream::FuturesOrdered, Future, FutureExt}; -use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; use hotshot_utils::bincode::bincode_opts; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; use std::{ - collections::{BTreeMap, HashSet}, + collections::HashSet, fmt::Debug, - hash::Hash, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, - time::{Duration, Instant, SystemTime}, + time::{Duration, Instant}, }; use tracing::{debug, info, instrument}; @@ -37,7 +34,7 @@ use tracing::{debug, info, instrument}; /// - A reference to the state /// - Controls for the swarm #[derive(Debug)] -pub struct NetworkNodeHandle { +pub struct NetworkNodeHandle { /// network configuration network_config: NetworkNodeConfig, /// the state of the replica @@ -51,10 +48,6 @@ pub struct NetworkNodeHandle { peer_id: PeerId, /// human readable id id: usize, - /// the cache for peers we've looked up - peer_cache: Arc>, - /// the expiries for the peer cache, in order - peer_cache_expiries: Arc>>, /// A list of webui listeners that are listening for changes on this node webui_listeners: Arc>>>, @@ -91,7 +84,7 @@ impl NetworkNodeReceiver { } } -impl NetworkNodeHandle { +impl NetworkNodeHandle { /// constructs a new node listening on `known_addr` #[instrument] pub async fn new(config: NetworkNodeConfig, id: usize) -> Result { @@ -126,8 +119,6 @@ impl NetworkNodeHa listen_addr, peer_id, id, - peer_cache: Arc::new(DashMap::new()), - peer_cache_expiries: Arc::new(RwLock::new(BTreeMap::new())), webui_listeners: Arc::default(), receiver: NetworkNodeReceiver { kill_switch, @@ -147,10 +138,9 @@ impl NetworkNodeHa #[allow(clippy::unused_async)] pub async fn spawn_handler(self: &Arc, cb: F) -> impl Future where - F: Fn(NetworkEvent, Arc>) -> RET + Sync + Send + 'static, + F: Fn(NetworkEvent, Arc>) -> RET + Sync + Send + 'static, RET: Future> + Send + 'static, S: Send + 'static, - K: Send + Sync + 'static, { assert!( !self.receiver.receiver_spawned.swap(true, Ordering::Relaxed), @@ -269,7 +259,7 @@ impl NetworkNodeHa } } -impl NetworkNodeHandle { +impl NetworkNodeHandle { /// Print out the routing table used by kademlia /// NOTE: only for debugging purposes currently /// # Errors @@ -292,48 +282,16 @@ impl NetworkNodeHandle { r.await.map_err(|_| NetworkNodeHandleError::RecvError) } - /// Prunes the peer lookup cache, removing old entries - /// Should be 1:1 with kademlia expiries - pub async fn prune_peer_cache(&self) { - let now = SystemTime::now(); - let mut expiries = self.peer_cache_expiries.write().await; - - while let Some((expires, key)) = expiries.pop_first() { - if now > expires { - self.peer_cache.remove(&key); - } else { - expiries.insert(expires, key); - break; - } - } - } - /// Looks up a node's `PeerId` and attempts to validate routing - /// Will use cached `PeerId` if available /// # Errors /// if the peer was unable to be looked up (did not provide a response, DNE) - pub async fn lookup_node Deserialize<'a>>( + pub async fn lookup_node Deserialize<'a> + Serialize>( &self, - key: K, + key: V, dht_timeout: Duration, ) -> Result { - let pid = if let Some(record) = self.peer_cache.get(&key) { - // exists in cache. look up routing but skip kademlia - *record.value() - } else { - // does not exist in cache. look up in kademlia, store in cache - let pid = self.get_record_timeout::(&key, dht_timeout).await?; - self.peer_cache.insert(key.clone(), pid); - self.peer_cache_expiries.write().await.insert( - SystemTime::now() - + self - .network_config - .ttl - .unwrap_or(Duration::from_secs(KAD_DEFAULT_REPUB_INTERVAL_SEC * 16)), - key, - ); - pid - }; + // get record (from DHT) + let pid = self.get_record_timeout::(&key, dht_timeout).await?; // pid lookup for routing self.lookup_pid(pid).await?; @@ -669,7 +627,7 @@ impl NetworkNodeHandle { } } -impl NetworkNodeHandle { +impl NetworkNodeHandle { /// Get a clone of the internal state pub async fn state(&self) -> S { self.state.cloned().await @@ -738,106 +696,3 @@ pub mod network_node_handle_error { NetworkSnafu, NodeConfigSnafu, RecvSnafu, SendSnafu, SerializationSnafu, TimeoutSnafu, }; } - -#[cfg(test)] -mod test { - use super::*; - - /// libp2p peer cache test - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[instrument] - async fn test_libp2p_cache_eviction() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle: NetworkNodeHandle<(), PeerId> = - NetworkNodeHandle::new(NetworkNodeConfig::default(), 0) - .await - .unwrap(); - - let now = SystemTime::now(); - let later = now + Duration::from_secs(1); - - // present insert - let present_key = PeerId::random(); - let present_pid = PeerId::random(); - handle.peer_cache.insert(present_key, present_pid); - handle - .peer_cache_expiries - .write() - .await - .insert(now, present_key); - - // later insert - let later_key = PeerId::random(); - let later_pid = PeerId::random(); - handle.peer_cache.insert(later_key, later_pid); - handle - .peer_cache_expiries - .write() - .await - .insert(now + Duration::from_secs(1), later_key); - - // check that now and later exist - assert!(handle - .peer_cache - .get(&present_key) - .is_some_and(|entry| entry.value() == &present_pid)); - assert!(handle - .peer_cache - .get(&later_key) - .is_some_and(|entry| entry.value() == &later_pid)); - assert!(handle - .peer_cache_expiries - .read() - .await - .get(&now) - .is_some_and(|entry| entry == &present_key)); - assert!(handle - .peer_cache_expiries - .read() - .await - .get(&later) - .is_some_and(|entry| entry == &later_key)); - - // prune - handle.prune_peer_cache().await; - - // check that now doesn't exist and later does - assert!(handle.peer_cache.get(&present_key).is_none()); - assert!(handle - .peer_cache - .get(&later_key) - .is_some_and(|entry| entry.value() == &later_pid)); - assert!(handle.peer_cache_expiries.read().await.get(&now).is_none()); - assert!(handle - .peer_cache_expiries - .read() - .await - .get(&later) - .is_some_and(|entry| entry == &later_key)); - - // wait for later to expire - async_sleep(Duration::from_secs(1)).await; - - // prune - handle.prune_peer_cache().await; - - // check that later doesn't exist - assert!(handle.peer_cache.get(&later_key).is_none()); - assert!(handle - .peer_cache_expiries - .read() - .await - .get(&later) - .is_none()); - - // check that the expiries and cache are empty - assert!(handle.peer_cache_expiries.read().await.is_empty()); - assert!(handle.peer_cache.is_empty()); - } -} diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index 5a531bbafb..ca9e967319 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -39,8 +39,8 @@ pub async fn test_bed, FutG: Future> + 'static + Send + Sync, - F: FnOnce(Vec>>, Duration) -> FutF, - G: Fn(NetworkEvent, Arc>) -> FutG + 'static + Send + Sync, + F: FnOnce(Vec>>, Duration) -> FutF, + G: Fn(NetworkEvent, Arc>) -> FutG + 'static + Send + Sync, { setup_logging(); setup_backtrace(); @@ -69,7 +69,7 @@ pub async fn test_bed(handles: &[Arc>]) -> HashMap { +fn gen_peerid_map(handles: &[Arc>]) -> HashMap { let mut r_val = HashMap::new(); for handle in handles { r_val.insert(handle.peer_id(), handle.id()); @@ -79,7 +79,7 @@ fn gen_peerid_map(handles: &[Arc>]) -> HashMap(handles: &[Arc>]) { +pub async fn print_connections(handles: &[Arc>]) { let m = gen_peerid_map(handles); warn!("PRINTING CONNECTION STATES"); for handle in handles.iter() { @@ -104,7 +104,7 @@ pub async fn spin_up_swarms( num_of_nodes: usize, timeout_len: Duration, num_bootstrap: usize, -) -> Result>>, TestError> { +) -> Result>>, TestError> { let mut handles = Vec::new(); let mut bootstrap_addrs = Vec::<(PeerId, Multiaddr)>::new(); let mut connecting_futs = Vec::new(); diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 85b7cb2a7f..eefbdcf37b 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -56,7 +56,7 @@ pub enum CounterMessage { #[instrument] pub async fn counter_handle_network_event( event: NetworkEvent, - handle: Arc>, + handle: Arc>, ) -> Result<(), NetworkNodeHandleError> { use CounterMessage::*; use NetworkEvent::*; @@ -121,8 +121,8 @@ pub async fn counter_handle_network_event( /// `requester_handle` asks for `requestee_handle`'s state, /// and then `requester_handle` updates its state to equal `requestee_handle`. async fn run_request_response_increment<'a>( - requester_handle: Arc>, - requestee_handle: Arc>, + requester_handle: Arc>, + requestee_handle: Arc>, timeout: Duration, ) -> Result<(), TestError> { async move { @@ -168,7 +168,7 @@ async fn run_request_response_increment<'a>( /// broadcasts `msg` from a randomly chosen handle /// then asserts that all nodes match `new_state` async fn run_gossip_round( - handles: &[Arc>], + handles: &[Arc>], msg: CounterMessage, new_state: CounterState, timeout_duration: Duration, @@ -234,7 +234,7 @@ async fn run_gossip_round( } async fn run_intersperse_many_rounds( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { for i in 0..NUM_ROUNDS as u32 { @@ -250,21 +250,18 @@ async fn run_intersperse_many_rounds( } async fn run_dht_many_rounds( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { run_dht_rounds(&handles, timeout, 0, NUM_ROUNDS).await; } -async fn run_dht_one_round( - handles: Vec>>, - timeout: Duration, -) { +async fn run_dht_one_round(handles: Vec>>, timeout: Duration) { run_dht_rounds(&handles, timeout, 0, 1).await; } async fn run_request_response_many_rounds( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { for _i in 0..NUM_ROUNDS { @@ -276,7 +273,7 @@ async fn run_request_response_many_rounds( } pub async fn run_request_response_one_round( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { run_request_response_increment_all(&handles, timeout).await; @@ -286,21 +283,21 @@ pub async fn run_request_response_one_round( } pub async fn run_gossip_many_rounds( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { run_gossip_rounds(&handles, NUM_ROUNDS, 0, timeout).await } async fn run_gossip_one_round( - handles: Vec>>, + handles: Vec>>, timeout: Duration, ) { run_gossip_rounds(&handles, 1, 0, timeout).await } async fn run_dht_rounds( - handles: &[Arc>], + handles: &[Arc>], timeout: Duration, starting_val: usize, num_rounds: usize, @@ -336,7 +333,7 @@ async fn run_dht_rounds( /// runs `num_rounds` of message broadcast, incrementing the state of all nodes each broadcast async fn run_gossip_rounds( - handles: &[Arc>], + handles: &[Arc>], num_rounds: usize, starting_state: CounterState, timeout: Duration, @@ -361,7 +358,7 @@ async fn run_gossip_rounds( /// then has all other peers request its state /// and update their state to the recv'ed state async fn run_request_response_increment_all( - handles: &[Arc>], + handles: &[Arc>], timeout: Duration, ) { let mut rng = rand::thread_rng(); From 0f7de2b53ce073e912967fa7dfdac95d785d9b76 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 25 Sep 2023 17:35:17 -0700 Subject: [PATCH 0159/1393] Add Debug for MemoryNetowkrInner --- hotshot/src/traits/networking/libp2p_network.rs | 5 +++-- hotshot/src/traits/networking/memory_network.rs | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index ac949a331f..7654490e2a 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -77,6 +77,7 @@ impl Debug for Libp2pNetwork { pub type PeerInfoVec = Arc, Multiaddr)>>>; /// The underlying state of the libp2p network +#[derive(Debug)] struct Libp2pNetworkInner { /// this node's public key pk: K, @@ -410,7 +411,7 @@ impl Libp2pNetwork { let handle = self.inner.handle.clone(); let is_bootstrapped = self.inner.is_bootstrapped.clone(); let node_type = self.inner.handle.config().node_type; - let metrics_connected_peers = self.inner.metrics.connected_peers.clone(); + let metrics_connected_peers = self.inner.clone(); async_spawn({ let is_ready = self.inner.is_ready.clone(); async move { @@ -440,7 +441,7 @@ impl Libp2pNetwork { .unwrap(); let connected_num = handle.num_connected().await?; - metrics_connected_peers.set(connected_num); + metrics_connected_peers.metrics.connected_peers.set(connected_num); while !is_bootstrapped.load(Ordering::Relaxed) { async_sleep(Duration::from_secs(1)).await; } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 51f7bbbdf7..4add5dd222 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -87,6 +87,7 @@ enum Combo { } /// Internal state for a `MemoryNetwork` instance +#[derive(Debug)] struct MemoryNetworkInner { /// Input for broadcast messages broadcast_input: RwLock>>>, From 452c913bf0e4b24c519bfad3fd9ed3a27581ed88 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 25 Sep 2023 17:36:52 -0700 Subject: [PATCH 0160/1393] fmt check --- hotshot/src/traits/networking/libp2p_network.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 7654490e2a..c010f8a8da 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -441,7 +441,10 @@ impl Libp2pNetwork { .unwrap(); let connected_num = handle.num_connected().await?; - metrics_connected_peers.metrics.connected_peers.set(connected_num); + metrics_connected_peers + .metrics + .connected_peers + .set(connected_num); while !is_bootstrapped.load(Ordering::Relaxed) { async_sleep(Duration::from_secs(1)).await; } From ca3e2ba6b729190fe6e9de26abe7e8149fac3080 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 25 Sep 2023 18:05:32 -0700 Subject: [PATCH 0161/1393] Address PR comments --- hotshot/src/demo.rs | 7 +++++-- task-impls/src/transactions.rs | 16 +++++++++++++--- testing/tests/da_task.rs | 6 +++++- testing/tests/network_task.rs | 5 ++++- types/src/block_impl.rs | 29 ++++++++++------------------- types/src/traits/block_contents.rs | 14 +++++++------- 6 files changed, 44 insertions(+), 33 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index 468ff5e420..cec7120acd 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -101,9 +101,12 @@ impl TestableState for SDemoState { fn create_random_transaction( _state: Option<&Self>, _rng: &mut dyn rand::RngCore, - _padding: u64, + padding: u64, ) -> ::Transaction { - VIDTransaction(vec![0]) + if padding == 0 { + panic!("Padding should be nonzero for VID computation to work."); + } + VIDTransaction(vec![0; padding as usize]) } } /// Implementation of [`NodeType`] for [`VDemoNode`] diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 11475fdbcf..a9c8f9ad52 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -21,7 +21,6 @@ use hotshot_types::{ data::{SequencingLeaf, VidDisperse, VidScheme, VidSchemeTrait}, message::{Message, Proposal, SequencingMessage}, traits::{ - block_contents::Transaction, consensus_api::SequencingConsensusApi, election::{CommitteeExchangeType, ConsensusExchange}, node_implementation::{CommitteeEx, NodeImplementation, NodeType}, @@ -88,6 +87,9 @@ pub struct TransactionTaskState< pub id: u64, } +// We have two `TransactionTaskState` implementations with different bounds. The implementation +// here requires `TYPES: NodeType`, +// whereas it's just `TYPES: NodeType` in the second implementation. impl< TYPES: NodeType, I: NodeImplementation< @@ -245,15 +247,20 @@ where // TODO https://github.com/EspressoSystems/jellyfish/issues/375 let mut txns_flatten = Vec::new(); for txn in &txns { - txns_flatten.extend(txn.bytes()); + txns_flatten.extend(txn.0.clone()); } let vid_disperse = vid.disperse(&txns_flatten).unwrap(); - let block = VIDBlockPayload::new(txns, vid_disperse.commit); + let block = VIDBlockPayload { + transactions: txns, + commitment: vid_disperse.commit, + }; self.event_stream .publish(SequencingHotShotEvent::BlockReady(block.clone(), view + 1)) .await; + // TODO (Keyao) Determine and update where to publish VidDisperseSend. + // self.event_stream .publish(SequencingHotShotEvent::VidDisperseSend( Proposal { @@ -284,6 +291,9 @@ where } } +// We have two `TransactionTaskState` implementations with different bounds. The implementation +// above requires `TYPES: NodeType`, +// whereas here it's just `TYPES: NodeType`. impl< TYPES: NodeType, I: NodeImplementation< diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index f1b1744884..e31c34e6aa 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -43,7 +43,11 @@ async fn test_da_task() { let txn = vec![0u8]; let vid_disperse = vid.disperse(&txn).unwrap(); let block_commitment = vid_disperse.commit; - let block = VIDBlockPayload::new(vec![VIDTransaction(txn)], block_commitment); + let block = VIDBlockPayload { + transactions: vec![VIDTransaction(txn)], + commitment: block_commitment, + }; + let signature = committee_exchange.sign_da_proposal(&block.commit()); let proposal = DAProposal { deltas: block.clone(), diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 3413bc4898..c4165a8c31 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -46,7 +46,10 @@ async fn test_network_task() { let txn = vec![0u8]; let vid_disperse = vid.disperse(&txn).unwrap(); let block_commitment = vid_disperse.commit; - let block = VIDBlockPayload::new(vec![VIDTransaction(txn)], block_commitment); + let block = VIDBlockPayload { + transactions: vec![VIDTransaction(txn)], + commitment: block_commitment, + }; let signature = committee_exchange.sign_da_proposal(&block.commit()); let da_proposal = Proposal { data: DAProposal { diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 03a9ca3737..2360cb3a24 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -38,11 +38,7 @@ impl Committable for VIDTransaction { } } -impl Transaction for VIDTransaction { - fn bytes(&self) -> Vec { - self.0.clone() - } -} +impl Transaction for VIDTransaction {} /// The error type for block payload. #[derive(Snafu, Debug)] @@ -69,29 +65,24 @@ pub struct VIDBlockPayload { } impl VIDBlockPayload { - /// Constructor. - #[must_use] - pub fn new( - transactions: Vec, - commitment: ::Commit, - ) -> Self { - Self { - transactions, - commitment, - } - } - - /// Create a genesis block payload with transaction bytes `vec![0]`. + /// Create a genesis block payload with transaction bytes `vec![0]`, to be used for + /// consensus task initiation. /// # Panics /// If the `VidScheme` construction fails. #[must_use] pub fn genesis() -> Self { // TODO let srs = test_srs(NUM_STORAGE_NODES); + // TODO We are using constant numbers for now, but they will change as the quorum size + // changes. + // TODO let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); let txn = vec![0]; let vid_disperse = vid.disperse(&txn).unwrap(); - VIDBlockPayload::new(vec![VIDTransaction(txn)], vid_disperse.commit) + VIDBlockPayload { + transactions: vec![VIDTransaction(txn)], + commitment: vid_disperse.commit, + } } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 4ba10f601c..2fc3383a14 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -13,6 +13,8 @@ use std::{ hash::Hash, }; +// TODO (Keyao) Determine whether we can refactor BlockPayload and Transaction from traits to structs. +// /// Abstraction over the full contents of a block /// /// This trait encapsulates the behaviors that the transactions of a block must have in order to be @@ -45,12 +47,12 @@ pub trait BlockPayload: fn contained_transactions(&self) -> HashSet>; } +// TODO (Keyao) Determine whether we can refactor BlockPayload and Transaction from traits to structs. +// /// Abstraction over any type of transaction. Used by [`BlockPayload`]. pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash { - /// Get the transaction bytes. - fn bytes(&self) -> Vec; } /// Dummy implementation of `BlockPayload` for unit tests @@ -64,6 +66,8 @@ pub mod dummy { pub use crate::traits::state::dummy::DummyState; use crate::traits::state::TestableBlock; + // TODO (Keyao) Investigate the use of DummyBlock. + // /// The dummy block #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)] pub struct DummyBlock { @@ -100,11 +104,7 @@ pub mod dummy { "DUMMY_TXN".to_string() } } - impl super::Transaction for DummyTransaction { - fn bytes(&self) -> Vec { - Vec::new() - } - } + impl super::Transaction for DummyTransaction {} impl std::error::Error for DummyError {} From f6b7d8a2389e24db34a532b117b33b67e5b09855 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Tue, 26 Sep 2023 10:41:47 -0400 Subject: [PATCH 0162/1393] nonzero base size for create_random_transaction --- hotshot/src/demo.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index cec7120acd..ca00c535f3 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -103,10 +103,8 @@ impl TestableState for SDemoState { _rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { - if padding == 0 { - panic!("Padding should be nonzero for VID computation to work."); - } - VIDTransaction(vec![0; padding as usize]) + const RANDOM_TX_BASE_SIZE: usize = 8; + VIDTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) } } /// Implementation of [`NodeType`] for [`VDemoNode`] From 040e047ba3e2f90a06cb39bb8b324de101e609c7 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 26 Sep 2023 10:53:57 -0400 Subject: [PATCH 0163/1393] Don't use libp2p cache by default (#1820) * don't use cache by default * clarifying comment --- hotshot/src/traits/networking/libp2p_network.rs | 4 ---- libp2p-networking/src/network/node/config.rs | 5 ++--- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index ebb68e7d01..52ed2b73bc 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -198,8 +198,6 @@ where // setting to sane defaults .ttl(None) .republication_interval(None) - // this removes the cache for tests - .dht_cache_location(None) .build() .unwrap() } else { @@ -220,8 +218,6 @@ where // setting to sane defaults .ttl(None) .republication_interval(None) - // this removes the cache for tests - .dht_cache_location(None) .build() .unwrap() }; diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 82897c0de5..d97097e8e1 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -24,9 +24,8 @@ pub struct NetworkNodeConfig { #[builder(setter(into, strip_option), default = "DEFAULT_REPLICATION_FACTOR")] pub replication_factor: Option, - /// location of the dht cache - /// default is "dht.cache" in the current directory - #[builder(default = "Some(\"dht.cache\".to_string())")] + /// location of the dht cache, default is None + #[builder(default = "None")] pub dht_cache_location: Option, #[builder(default)] From 4cc1de7f94a87e1cd4756400131dd9f724d55897 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Tue, 26 Sep 2023 11:27:49 -0400 Subject: [PATCH 0164/1393] fix build break after merge main --- task-impls/src/transactions.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index e419115d90..4b0b46cfcf 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -307,7 +307,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] From 75b7f2175750163b8dbc69203ff3be49cb2f0f88 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 26 Sep 2023 11:55:31 -0400 Subject: [PATCH 0165/1393] Prune libp2p config file (#1819) * prune libp2p config file * fmt --- orchestrator/default-libp2p-run-config.toml | 7 +---- orchestrator/src/config.rs | 35 ++++++++++++--------- orchestrator/src/lib.rs | 8 ++--- 3 files changed, 24 insertions(+), 26 deletions(-) diff --git a/orchestrator/default-libp2p-run-config.toml b/orchestrator/default-libp2p-run-config.toml index 5757f4d9f9..a865225c8f 100644 --- a/orchestrator/default-libp2p-run-config.toml +++ b/orchestrator/default-libp2p-run-config.toml @@ -39,7 +39,6 @@ padding = 10 start_delay_seconds = 60 [libp2p_config] -num_bootstrap_nodes = 5 index_ports = true bootstrap_mesh_n_high = 4 bootstrap_mesh_n_low = 4 @@ -49,11 +48,7 @@ mesh_n_high = 4 mesh_n_low = 4 mesh_outbound_min = 2 mesh_n = 4 -next_view_timeout = 10 -propose_min_round_time = 0 -propose_max_round_time = 10 online_time = 10 -num_txn_per_round = 10 base_port = 9000 [config] @@ -68,7 +63,7 @@ timeout_ratio = [ ] round_start_delay = 1 start_delay = 1 -num_bootstrap = 4 +num_bootstrap = 5 [config.propose_min_round_time] secs = 0 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 05a38b615c..86dc6c5ae2 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -8,7 +8,7 @@ use std::{ #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { pub bootstrap_nodes: Vec<(SocketAddr, Vec)>, - pub num_bootstrap_nodes: u64, + pub num_bootstrap_nodes: usize, pub public_ip: IpAddr, pub base_port: u16, pub node_index: u64, @@ -22,15 +22,14 @@ pub struct Libp2pConfig { pub mesh_outbound_min: usize, pub mesh_n: usize, pub next_view_timeout: u64, - pub propose_min_round_time: u64, - pub propose_max_round_time: u64, + pub propose_min_round_time: Duration, + pub propose_max_round_time: Duration, pub online_time: u64, - pub num_txn_per_round: u64, + pub num_txn_per_round: usize, } #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfigFile { - pub num_bootstrap_nodes: u64, pub index_ports: bool, pub bootstrap_mesh_n_high: usize, pub bootstrap_mesh_n_low: usize, @@ -40,11 +39,7 @@ pub struct Libp2pConfigFile { pub mesh_n_low: usize, pub mesh_outbound_min: usize, pub mesh_n: usize, - pub next_view_timeout: u64, - pub propose_min_round_time: u64, - pub propose_max_round_time: u64, pub online_time: u64, - pub num_txn_per_round: u64, pub base_port: u16, } @@ -59,6 +54,10 @@ pub struct WebServerConfig { pub struct NetworkConfig { pub rounds: usize, pub transactions_per_round: usize, + pub num_bootrap: usize, + pub next_view_timeout: u64, + pub propose_min_round_time: Duration, + pub propose_max_round_time: Duration, pub node_index: u64, pub seed: [u8; 32], pub padding: usize, @@ -88,6 +87,10 @@ impl Default for NetworkConfig { web_server_config: None, da_web_server_config: None, _key_type_phantom: PhantomData, + next_view_timeout: 10, + num_bootrap: 5, + propose_min_round_time: Duration::from_secs(0), + propose_max_round_time: Duration::from_secs(10), } } } @@ -126,10 +129,14 @@ impl From for NetworkConfig { rounds: val.rounds, transactions_per_round: val.transactions_per_round, node_index: 0, + num_bootrap: val.config.num_bootstrap, + next_view_timeout: val.config.next_view_timeout, + propose_max_round_time: val.config.propose_max_round_time, + propose_min_round_time: val.config.propose_min_round_time, seed: val.seed, padding: val.padding, libp2p_config: val.libp2p_config.map(|libp2p_config| Libp2pConfig { - num_bootstrap_nodes: libp2p_config.num_bootstrap_nodes, + num_bootstrap_nodes: val.config.num_bootstrap, index_ports: libp2p_config.index_ports, bootstrap_nodes: Vec::new(), public_ip: IpAddr::V4(Ipv4Addr::UNSPECIFIED), @@ -143,11 +150,11 @@ impl From for NetworkConfig { mesh_n_low: libp2p_config.mesh_n_low, mesh_outbound_min: libp2p_config.mesh_outbound_min, mesh_n: libp2p_config.mesh_n, - next_view_timeout: libp2p_config.next_view_timeout, - propose_min_round_time: libp2p_config.propose_min_round_time, - propose_max_round_time: libp2p_config.propose_max_round_time, + next_view_timeout: val.config.next_view_timeout, + propose_min_round_time: val.config.propose_min_round_time, + propose_max_round_time: val.config.propose_max_round_time, online_time: libp2p_config.online_time, - num_txn_per_round: libp2p_config.num_txn_per_round, + num_txn_per_round: val.transactions_per_round, }), config: val.config.into(), key_type_name: std::any::type_name::().to_string(), diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 93efa501e7..d3faa7d410 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -116,9 +116,7 @@ where if self.config.libp2p_config.clone().is_some() { let libp2p_config_clone = self.config.libp2p_config.clone().unwrap(); // Designate node as bootstrap node and store its identity information - if libp2p_config_clone.bootstrap_nodes.len() - < libp2p_config_clone.num_bootstrap_nodes.try_into().unwrap() - { + if libp2p_config_clone.bootstrap_nodes.len() < libp2p_config_clone.num_bootstrap_nodes { let port_index = match libp2p_config_clone.index_ports { true => node_index, false => 0, @@ -145,9 +143,7 @@ where ) -> Result, ServerError> { if self.config.libp2p_config.is_some() { let libp2p_config = self.config.clone().libp2p_config.unwrap(); - if libp2p_config.bootstrap_nodes.len() - < libp2p_config.num_bootstrap_nodes.try_into().unwrap() - { + if libp2p_config.bootstrap_nodes.len() < libp2p_config.num_bootstrap_nodes { return Err(ServerError { status: tide_disco::StatusCode::BadRequest, message: "Not enough bootstrap nodes have registered".to_string(), From 6bce469724a95d1d73644c0fb6793bb492d75941 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 26 Sep 2023 16:28:34 -0700 Subject: [PATCH 0166/1393] Integrate with jellyfish update, update parameters and add issues --- orchestrator/default-libp2p-run-config.toml | 4 +- orchestrator/default-run-config.toml | 2 + .../default-web-server-run-config.toml | 2 + orchestrator/src/config.rs | 2 +- task-impls/src/transactions.rs | 76 +++++++++---------- 5 files changed, 44 insertions(+), 42 deletions(-) diff --git a/orchestrator/default-libp2p-run-config.toml b/orchestrator/default-libp2p-run-config.toml index 5892d3d95e..c217d24d1c 100644 --- a/orchestrator/default-libp2p-run-config.toml +++ b/orchestrator/default-libp2p-run-config.toml @@ -69,8 +69,8 @@ num_bootstrap = 5 secs = 0 nanos = 0 +# TODO (Keyao) Clean up configuration parameters. +# [config.propose_max_round_time] -# TODO Update libp2p config to use consistent propose_max_round_time. -# secs = 2 nanos = 0 diff --git a/orchestrator/default-run-config.toml b/orchestrator/default-run-config.toml index b8a4b19478..ee8333f80a 100644 --- a/orchestrator/default-run-config.toml +++ b/orchestrator/default-run-config.toml @@ -55,6 +55,8 @@ num_bootstrap = 4 secs = 0 nanos = 0 +# TODO (Keyao) Clean up configuration parameters. +# [config.propose_max_round_time] secs = 2 nanos = 0 diff --git a/orchestrator/default-web-server-run-config.toml b/orchestrator/default-web-server-run-config.toml index 28229f4d76..c5d0bd0253 100644 --- a/orchestrator/default-web-server-run-config.toml +++ b/orchestrator/default-web-server-run-config.toml @@ -56,6 +56,8 @@ num_bootstrap = 4 secs = 0 nanos = 0 +# TODO (Keyao) Clean up configuration parameters. +# [config.propose_max_round_time] secs = 2 nanos = 0 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 86dc6c5ae2..f61911d944 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -230,7 +230,7 @@ fn default_config() -> HotShotConfigFile { total_nodes: NonZeroUsize::new(10).unwrap(), committee_nodes: 5, max_transactions: NonZeroUsize::new(100).unwrap(), - min_transactions: 0, + min_transactions: 1, next_view_timeout: 10000, timeout_ratio: (11, 10), round_start_delay: 1, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4b0b46cfcf..c682f5f38d 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -237,48 +237,46 @@ where drop(consensus); let txns = self.wait_for_transactions(parent_leaf).await?; + // TODO (Keyao) Determine whether to allow empty transaction when proposing a block. + // debug!("Prepare VID shares"); - if !txns.is_empty() { - // TODO https://github.com/EspressoSystems/HotShot/issues/1686 - let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - // TODO https://github.com/EspressoSystems/jellyfish/issues/375 - let mut txns_flatten = Vec::new(); - for txn in &txns { - txns_flatten.extend(txn.0.clone()); - } - let vid_disperse = vid.disperse(&txns_flatten).unwrap(); - let block = VIDBlockPayload { - transactions: txns, - commitment: vid_disperse.commit, - }; - - self.event_stream - .publish(SequencingHotShotEvent::BlockReady(block.clone(), view + 1)) - .await; - - // TODO (Keyao) Determine and update where to publish VidDisperseSend. - // - self.event_stream - .publish(SequencingHotShotEvent::VidDisperseSend( - Proposal { - data: VidDisperse { - view_number: view + 1, - commitment: block.commit(), - shares: vid_disperse.shares, - common: vid_disperse.common, - }, - // TODO (Keyao) This is also signed in DA task. - signature: self - .committee_exchange - .sign_da_proposal(&block.commit()), - }, - // TODO don't send to committee, send to quorum (consensus.rs) https://github.com/EspressoSystems/HotShot/issues/1696 - self.committee_exchange.public_key().clone(), - )) - .await; + // TODO https://github.com/EspressoSystems/HotShot/issues/1686 + let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + // TODO https://github.com/EspressoSystems/jellyfish/issues/375 + let mut txns_flatten = Vec::new(); + for txn in &txns { + txns_flatten.extend(txn.0.clone()); } + let vid_disperse = vid.disperse(&txns_flatten).unwrap(); + let block = VIDBlockPayload { + transactions: txns, + commitment: vid_disperse.commit, + }; + + self.event_stream + .publish(SequencingHotShotEvent::BlockReady(block.clone(), view + 1)) + .await; + + // TODO (Keyao) Determine and update where to publish VidDisperseSend. + // + self.event_stream + .publish(SequencingHotShotEvent::VidDisperseSend( + Proposal { + data: VidDisperse { + view_number: view + 1, + commitment: block.commit(), + shares: vid_disperse.shares, + common: vid_disperse.common, + }, + // TODO (Keyao) This is also signed in DA task. + signature: self.committee_exchange.sign_da_proposal(&block.commit()), + }, + // TODO don't send to committee, send to quorum (consensus.rs) https://github.com/EspressoSystems/HotShot/issues/1696 + self.committee_exchange.public_key().clone(), + )) + .await; return None; } SequencingHotShotEvent::Shutdown => { From 63ecfb53fcc673a2b72b445a89ff8957000b4c89 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 27 Sep 2023 10:10:47 -0400 Subject: [PATCH 0167/1393] clippy appeasement --- hotshot/src/demo.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index cac2dcfc75..81cac76f4b 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -103,6 +103,7 @@ impl TestableState for SDemoState { _rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { + /// clippy appeasement for `RANDOM_TX_BASE_SIZE` const RANDOM_TX_BASE_SIZE: usize = 8; VIDTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) } From c563a9c336669f397b62476d1f8d9f7b0293d3c1 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 28 Sep 2023 00:57:55 -0700 Subject: [PATCH 0168/1393] basic metrics completed for networking metrics --- hotshot/examples/infra/modDA.rs | 5 +- hotshot/src/traits.rs | 1 + hotshot/src/traits/networking.rs | 147 ++++++++++++++++-- .../src/traits/networking/libp2p_network.rs | 12 +- .../src/traits/networking/memory_network.rs | 11 +- types/src/traits/metrics.rs | 3 +- 6 files changed, 155 insertions(+), 24 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index ae0f0e40f5..f888d575ab 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -7,7 +7,8 @@ use futures::StreamExt; use hotshot::{ traits::{ implementations::{ - Libp2pCommChannel, Libp2pNetwork, MemoryStorage, WebCommChannel, WebServerNetwork, + Libp2pCommChannel, Libp2pNetwork, MemoryStorage, NetworkingMetricsValue, + WebCommChannel, WebServerNetwork, }, NodeImplementation, }, @@ -701,7 +702,7 @@ where let node_config = config_builder.build().unwrap(); let underlying_quorum_network = Libp2pNetwork::new( - NoMetrics::boxed(), + NetworkingMetricsValue::new(), node_config, pubkey.clone(), Arc::new(RwLock::new( diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index a0e56cf86d..7d20468b44 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -17,6 +17,7 @@ pub mod implementations { memory_network::{DummyReliability, MasterMap, MemoryCommChannel, MemoryNetwork}, web_server_libp2p_fallback::{CombinedNetworks, WebServerWithFallbackCommChannel}, web_server_network::{WebCommChannel, WebServerNetwork}, + NetworkingMetricsValue, }, storage::memory_storage::MemoryStorage, // atomic_storage::AtomicStorage, }; diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index 3f197d6f94..aeaca0d815 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -9,17 +9,24 @@ pub mod libp2p_network; pub mod memory_network; pub mod web_server_libp2p_fallback; pub mod web_server_network; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; + use custom_debug::Debug; -use hotshot_types::traits::metrics::{Counter, Gauge, Metrics}; +use hotshot_types::traits::metrics::{Counter, Gauge, Histogram, Label, Metrics}; pub use hotshot_types::traits::network::{ ChannelSendSnafu, CouldNotDeliverSnafu, FailedToDeserializeSnafu, FailedToSerializeSnafu, NetworkError, NetworkReliability, NoSuchNodeSnafu, ShutDownSnafu, }; -/// Contains the metrics that we're interested in from the networking interfaces +/// Contains several `NetworkingMetrics` that we're interested in from the networking interfaces #[derive(Clone, Debug)] -pub struct NetworkingMetrics { +pub struct NetworkingMetricsValue { #[allow(dead_code)] + /// The values that are being tracked + pub values: Arc>, /// A [`Gauge`] which tracks how many peers are connected pub connected_peers: Box, /// A [`Counter`] which tracks how many messages have been received @@ -36,10 +43,130 @@ pub struct NetworkingMetrics { // pub kademlia_buckets: Box, } +/// The wrapper with a string name for the networking metrics +#[derive(Clone, Debug)] +pub struct NetworkingMetrics { + /// a prefix which tracks the name of the metric + prefix: String, + /// a map of values + values: Arc>, +} + +/// the set of counters and gauges for the networking metrics +#[derive(Clone, Debug, Default)] +pub struct InnerNetworkingMetrics { + /// All the counters of the networking metrics + counters: HashMap, + /// All the gauges of the networking metrics + gauges: HashMap, + /// All the histograms of the networking metrics + histograms: HashMap>, + /// All the labels of the networking metrics + labels: HashMap, +} + impl NetworkingMetrics { - /// Create a new instance of this [`NetworkingMetrics`] struct, setting all the counters and gauges - pub(self) fn new(metrics: &dyn Metrics) -> Self { + /// For the creation and naming of gauge, counter, histogram and label. + pub fn sub(&self, name: String) -> Self { + let prefix = if self.prefix.is_empty() { + name + } else { + format!("{}-{name}", self.prefix) + }; Self { + prefix, + values: Arc::clone(&self.values), + } + } +} + +impl Metrics for NetworkingMetrics { + fn create_counter(&self, label: String, _unit_label: Option) -> Box { + Box::new(self.sub(label)) + } + + fn create_gauge(&self, label: String, _unit_label: Option) -> Box { + Box::new(self.sub(label)) + } + + fn create_histogram(&self, label: String, _unit_label: Option) -> Box { + Box::new(self.sub(label)) + } + + fn create_label(&self, label: String) -> Box { + Box::new(self.sub(label)) + } + + fn subgroup(&self, subgroup_name: String) -> Box { + Box::new(self.sub(subgroup_name)) + } +} + +impl Counter for NetworkingMetrics { + fn add(&self, amount: usize) { + *self + .values + .lock() + .unwrap() + .counters + .entry(self.prefix.clone()) + .or_default() += amount; + } +} + +impl Gauge for NetworkingMetrics { + fn set(&self, amount: usize) { + *self + .values + .lock() + .unwrap() + .gauges + .entry(self.prefix.clone()) + .or_default() = amount; + } + fn update(&self, delta: i64) { + let mut values = self.values.lock().unwrap(); + let value = values.gauges.entry(self.prefix.clone()).or_default(); + let signed_value = i64::try_from(*value).unwrap_or(i64::MAX); + *value = usize::try_from(signed_value + delta).unwrap_or(0); + } +} + +impl Histogram for NetworkingMetrics { + fn add_point(&self, point: f64) { + self.values + .lock() + .unwrap() + .histograms + .entry(self.prefix.clone()) + .or_default() + .push(point); + } +} + +impl Label for NetworkingMetrics { + fn set(&self, value: String) { + *self + .values + .lock() + .unwrap() + .labels + .entry(self.prefix.clone()) + .or_default() = value; + } +} + +impl NetworkingMetricsValue { + /// Create a new instance of this [`NetworkingMetricsValue`] struct, setting all the counters and gauges + #[must_use] + pub fn new() -> Self { + let values = Arc::default(); + let metrics: Box = Box::new(NetworkingMetrics { + prefix: String::new(), + values: Arc::clone(&values), + }); + Self { + values, connected_peers: metrics.create_gauge(String::from("connected_peers"), None), incoming_message_count: metrics .create_counter(String::from("incoming_message_count"), None), @@ -47,10 +174,12 @@ impl NetworkingMetrics { .create_counter(String::from("outgoing_message_count"), None), message_failed_to_send: metrics .create_counter(String::from("message_failed_to_send"), None), - // gossipsub_mesh_connected: metrics - // .create_gauge(String::from("gossipsub_mesh_connected"), None), - // kademlia_entries: metrics.create_gauge(String::from("kademlia_entries"), None), - // kademlia_buckets: metrics.create_gauge(String::from("kademlia_buckets"), None), } } } + +impl Default for NetworkingMetricsValue { + fn default() -> Self { + Self::new() + } +} diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index c010f8a8da..2ef23e5f2a 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -1,7 +1,7 @@ //! Libp2p based/production networking implementation //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network -use super::NetworkingMetrics; +use super::NetworkingMetricsValue; use crate::NodeImplementation; use async_compatibility_layer::{ art::{async_block_on, async_sleep, async_spawn, async_timeout}, @@ -18,7 +18,6 @@ use hotshot_types::{ message::{Message, MessageKind}, traits::{ election::Membership, - metrics::{Metrics, NoMetrics}, network::{ CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, NetworkMsg, TestableChannelImplementation, @@ -107,7 +106,7 @@ struct Libp2pNetworkInner { /// whether or not we've bootstrapped into the DHT yet is_bootstrapped: Arc, /// The networking metrics we're keeping track of - metrics: NetworkingMetrics, + metrics: NetworkingMetricsValue, /// topic map /// hash(hashset) -> topic /// btreemap ordered so is hashable @@ -228,7 +227,7 @@ where let da = da_keys.clone(); async_block_on(async move { Libp2pNetwork::new( - NoMetrics::boxed(), + NetworkingMetricsValue::new(), config, pubkey, bootstrap_addrs_ref, @@ -275,7 +274,7 @@ impl Libp2pNetwork { /// This will panic if there are less than 5 bootstrap nodes #[allow(clippy::too_many_arguments)] pub async fn new( - metrics: Box, + metrics: NetworkingMetricsValue, config: NetworkNodeConfig, pk: K, bootstrap_addrs: Arc, Multiaddr)>>>, @@ -333,7 +332,7 @@ impl Libp2pNetwork { is_ready: Arc::new(AtomicBool::new(false)), dht_timeout: Duration::from_secs(30), is_bootstrapped: Arc::new(AtomicBool::new(false)), - metrics: NetworkingMetrics::new(&*metrics), + metrics, topic_map, node_lookup_send, cache_gc_shutdown_send, @@ -445,6 +444,7 @@ impl Libp2pNetwork { .metrics .connected_peers .set(connected_num); + while !is_bootstrapped.load(Ordering::Relaxed) { async_sleep(Duration::from_secs(1)).await; } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 4add5dd222..6a53082872 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -3,7 +3,7 @@ //! This module provides an in-memory only simulation of an actual network, useful for unit and //! integration tests. -use super::{FailedToSerializeSnafu, NetworkError, NetworkReliability, NetworkingMetrics}; +use super::{FailedToSerializeSnafu, NetworkError, NetworkReliability, NetworkingMetricsValue}; use crate::NodeImplementation; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, @@ -19,7 +19,6 @@ use hotshot_types::{ message::{Message, MessageKind}, traits::{ election::Membership, - metrics::{Metrics, NoMetrics}, network::{ CommunicationChannel, ConnectedNetwork, NetworkMsg, TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, ViewMessage, @@ -104,7 +103,7 @@ struct MemoryNetworkInner { in_flight_message_count: AtomicUsize, /// The networking metrics we're keeping track of - metrics: NetworkingMetrics, + metrics: NetworkingMetricsValue, } /// In memory only network simulator. @@ -133,7 +132,7 @@ impl MemoryNetwork { #[instrument(skip(metrics))] pub fn new( pub_key: K, - metrics: Box, + metrics: NetworkingMetricsValue, master_map: Arc>, reliability_config: Option>, ) -> MemoryNetwork { @@ -249,7 +248,7 @@ impl MemoryNetwork { direct_output: Mutex::new(direct_output), master_map: master_map.clone(), in_flight_message_count, - metrics: NetworkingMetrics::new(&*metrics), + metrics, }), }; master_map.map.insert(pub_key, mn.clone()); @@ -302,7 +301,7 @@ impl> Box::new(move |node_id| { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); - MemoryNetwork::new(pubkey, NoMetrics::boxed(), master.clone(), None) + MemoryNetwork::new(pubkey, NetworkingMetricsValue::new(), master.clone(), None) }) } diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs index e326fca9bc..fc69b5c077 100644 --- a/types/src/traits/metrics.rs +++ b/types/src/traits/metrics.rs @@ -10,7 +10,7 @@ use dyn_clone::DynClone; use std::fmt::Debug; /// The metrics type. -pub trait Metrics: Send + Sync { +pub trait Metrics: Send + Sync + DynClone + Debug { /// Create a [`Counter`] with an optional `unit_label`. /// /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" @@ -103,6 +103,7 @@ pub trait Label: Send + Sync + DynClone { /// Set the label value fn set(&self, value: String); } +dyn_clone::clone_trait_object!(Metrics); dyn_clone::clone_trait_object!(Gauge); dyn_clone::clone_trait_object!(Counter); dyn_clone::clone_trait_object!(Histogram); From 1dbb48dc0819266c1b321971b0f6ee4011934652 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 28 Sep 2023 14:46:20 -0400 Subject: [PATCH 0169/1393] Fix transactions (#1825) * incorporate VID transaction commitments into hash * remove commitment calculation * fix number of transactions to send * split out function * generate transactions JIT --- hotshot/examples/infra/modDA.rs | 92 +++++++++------------ hotshot/src/demo.rs | 6 +- orchestrator/default-libp2p-run-config.toml | 2 +- 3 files changed, 45 insertions(+), 55 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 82cc12bf68..ef188a7849 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -47,6 +47,8 @@ use libp2p_networking::{ network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}, reexport::Multiaddr, }; +use rand::rngs::StdRng; +use rand::SeedableRng; use std::{collections::BTreeSet, sync::Arc}; use std::{num::NonZeroUsize, str::FromStr}; // use libp2p::{ @@ -59,9 +61,9 @@ use std::{num::NonZeroUsize, str::FromStr}; // }; use libp2p_identity::PeerId; // use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}; +use std::{fmt::Debug, net::Ipv4Addr}; use std::{ //collections::{BTreeSet, VecDeque}, - collections::VecDeque, //fs, mem, net::IpAddr, @@ -71,7 +73,6 @@ use std::{ //time::{Duration, Instant}, time::Instant, }; -use std::{fmt::Debug, net::Ipv4Addr}; //use surf_disco::error::ClientError; //use surf_disco::Client; use tracing::{debug, error, info, warn}; @@ -125,6 +126,19 @@ pub async fn run_orchestrator_da< .await; } +/// Helper function to calculate the nuymber of transactions to send per node per round +fn calculate_num_tx_per_round( + node_index: u64, + total_num_nodes: usize, + transactions_per_round: usize, +) -> usize { + if node_index == 0 { + transactions_per_round / total_num_nodes + transactions_per_round % total_num_nodes + } else { + transactions_per_round / total_num_nodes + } +} + /// Defines the behavior of a "run" of the network with a given configuration #[async_trait] pub trait RunDA< @@ -254,38 +268,23 @@ pub trait RunDA< } = self.get_config(); let size = mem::size_of::(); - let adjusted_padding = if padding < size { 0 } else { padding - size }; - let mut txns: VecDeque = VecDeque::new(); - - // TODO ED: In the future we should have each node generate transactions every round to simulate a more realistic network - let tx_to_gen = transactions_per_round * rounds * 3; - { - let mut txn_rng = rand::thread_rng(); - for _ in 0..tx_to_gen { - let txn = - <::StateType as TestableState>::create_random_transaction( - None, - &mut txn_rng, - padding as u64, - ); - txns.push_back(txn); - } - } - debug!("Generated {} transactions", tx_to_gen); + let padding = padding.saturating_sub(size); + let mut txn_rng = StdRng::seed_from_u64(node_index); - debug!("Adjusted padding size is {:?} bytes", adjusted_padding); - let mut round = 0; - let mut total_transactions = 0; + debug!("Adjusted padding size is {:?} bytes", padding); - let start = Instant::now(); + let mut total_transactions_committed = 0; + let mut total_transactions_sent = 0; + let transactions_to_send_per_round = + calculate_num_tx_per_round(node_index, total_nodes.get(), transactions_per_round); info!("Starting hotshot!"); + let start = Instant::now(); + let (mut event_stream, _streamid) = context.get_event_stream(FilterEvent::default()).await; let mut anchor_view: TYPES::Time = ::genesis(); let mut num_successful_commits = 0; - let total_nodes_u64 = total_nodes.get() as u64; - context.hotshot.start_consensus().await; loop { @@ -314,8 +313,20 @@ pub trait RunDA< } } + // send transactions + for _ in 0..transactions_to_send_per_round { + let txn = + <::StateType as TestableState>::create_random_transaction( + None, + &mut txn_rng, + padding as u64, + ); + _ = context.submit_transaction(txn).await.unwrap(); + total_transactions_sent += 1; + } + if let Some(size) = block_size { - total_transactions += size; + total_transactions_committed += size; } num_successful_commits += leaf_chain.len(); @@ -334,39 +345,16 @@ pub trait RunDA< EventType::NextLeaderViewTimeout { view_number } => { warn!("Timed out as the next leader in view {:?}", view_number); } - EventType::ViewFinished { view_number } => { - if *view_number > round { - round = *view_number; - info!("view finished: {:?}", view_number); - for _ in 0..transactions_per_round { - if node_index >= total_nodes_u64 - 10 { - let txn = txns.pop_front().unwrap(); - - debug!("Submitting txn on round {}", round); - - let result = context.submit_transaction(txn).await; - - if result.is_err() { - error! ( - "Could not send transaction to web server on round {}", - round - ) - } - } - } - } - } + EventType::ViewFinished { view_number: _ } => {} _ => unimplemented!(), } } } - - round += 1; } // Output run results let total_time_elapsed = start.elapsed(); - error!("{rounds} rounds completed in {total_time_elapsed:?} - Total transactions committed: {total_transactions} - Total commitments: {num_successful_commits}"); + error!("[{node_index}]: {rounds} rounds completed in {total_time_elapsed:?} - Total transactions sent: {total_transactions_sent} - Total transactions committed: {total_transactions_committed} - Total commitments: {num_successful_commits}"); } /// Returns the da network for this run diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index 81cac76f4b..97509c3b21 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -100,12 +100,14 @@ impl State for SDemoState { impl TestableState for SDemoState { fn create_random_transaction( _state: Option<&Self>, - _rng: &mut dyn rand::RngCore, + rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { /// clippy appeasement for `RANDOM_TX_BASE_SIZE` const RANDOM_TX_BASE_SIZE: usize = 8; - VIDTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) + let mut bytes = vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]; + rng.fill_bytes(&mut bytes); + VIDTransaction(bytes) } } /// Implementation of [`NodeType`] for [`VDemoNode`] diff --git a/orchestrator/default-libp2p-run-config.toml b/orchestrator/default-libp2p-run-config.toml index c217d24d1c..a353ed06f5 100644 --- a/orchestrator/default-libp2p-run-config.toml +++ b/orchestrator/default-libp2p-run-config.toml @@ -1,5 +1,5 @@ rounds = 10 -transactions_per_round = 10 +transactions_per_round = 12 node_index = 0 seed = [ 0, From 8145a9bded6dda7e344dd5af3da78f1de9ef1d33 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 28 Sep 2023 22:30:49 -0700 Subject: [PATCH 0170/1393] revoke initialization of consensus metrics, no more NoMetrics Initialization --- hotshot/examples/infra/modDA.rs | 4 +- hotshot/src/lib.rs | 19 ++--- testing/src/task_helpers.rs | 5 +- testing/src/test_runner.rs | 4 +- types/src/consensus.rs | 144 ++++++++++++++++++++++++++++++-- 5 files changed, 150 insertions(+), 26 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index f888d575ab..a9c0402392 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -23,6 +23,7 @@ use hotshot_orchestrator::{ use hotshot_task::task::FilterEvent; use hotshot_types::HotShotConfig; use hotshot_types::{ + consensus::ConsensusMetricsValue, certificate::ViewSyncCertificate, data::{QuorumProposal, SequencingLeaf, TestableLeaf}, event::{Event, EventType}, @@ -31,7 +32,6 @@ use hotshot_types::{ election::{ CommitteeExchange, ConsensusExchange, Membership, QuorumExchange, ViewSyncExchange, }, - metrics::NoMetrics, network::CommunicationChannel, node_implementation::{ CommitteeEx, ExchangesType, NodeType, QuorumEx, SequencingExchanges, @@ -235,7 +235,7 @@ pub trait RunDA< MemoryStorage::empty(), exchanges, initializer, - NoMetrics::boxed(), + ConsensusMetricsValue::new(), ) .await .expect("Could not init hotshot") diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 117153967e..f9bda9ae4c 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -57,7 +57,7 @@ use hotshot_task_impls::{events::SequencingHotShotEvent, network::NetworkTaskKin use hotshot_types::{ certificate::{DACertificate, ViewSyncCertificate}, - consensus::{BlockStore, Consensus, ConsensusMetrics, View, ViewInner, ViewQueue}, + consensus::{BlockStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, data::{DAProposal, DeltasType, LeafType, QuorumProposal, SequencingLeaf}, error::StorageSnafu, message::{ @@ -67,7 +67,6 @@ use hotshot_types::{ traits::{ consensus_api::{ConsensusSharedApi, SequencingConsensusApi}, election::{ConsensusExchange, Membership, SignedCertificate}, - metrics::Metrics, network::{CommunicationChannel, NetworkError}, node_implementation::{ ChannelMaps, CommitteeEx, ExchangesType, NodeType, SendToTasks, SequencingQuorumEx, @@ -129,8 +128,8 @@ pub struct SystemContextInner> { /// Sender for [`Event`]s event_sender: RwLock>>>, - /// a reference to the metrics that the implementor is using. - _metrics: Box, + /// the metrics that the implementor is using. + _metrics: Arc, /// Transactions /// (this is shared btwn hotshot and `Consensus`) @@ -179,13 +178,11 @@ impl> SystemContext { storage: I::Storage, exchanges: I::Exchanges, initializer: HotShotInitializer, - metrics: Box, + metrics: ConsensusMetricsValue, ) -> Result> { debug!("Creating a new hotshot"); - let consensus_metrics = Arc::new(ConsensusMetrics::new( - &*metrics.subgroup("consensus".to_string()), - )); + let consensus_metrics = Arc::new(metrics); let anchored_leaf = initializer.inner; // insert to storage @@ -226,7 +223,7 @@ impl> SystemContext { // https://github.com/EspressoSystems/HotShot/issues/560 locked_view: anchored_leaf.get_view_number(), high_qc: anchored_leaf.get_justify_qc(), - metrics: consensus_metrics, + metrics: consensus_metrics.clone(), invalid_qc: 0, }; let consensus = Arc::new(RwLock::new(consensus)); @@ -244,7 +241,7 @@ impl> SystemContext { storage, exchanges: Arc::new(exchanges), event_sender: RwLock::default(), - _metrics: metrics, + _metrics: consensus_metrics.clone(), internal_event_stream: ChannelStream::new(), output_event_stream: ChannelStream::new(), }); @@ -403,7 +400,7 @@ impl> SystemContext { storage: I::Storage, exchanges: I::Exchanges, initializer: HotShotInitializer, - metrics: Box, + metrics: ConsensusMetricsValue, ) -> Result< ( SystemContextHandle, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 8e0f85a95c..515e48ba60 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -18,11 +18,10 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusSharedApi, election::{ConsensusExchange, Membership, SignedCertificate}, - metrics::NoMetrics, node_implementation::{CommitteeEx, ExchangesType, NodeType, QuorumEx}, signature_key::EncodedSignature, state::ConsensusTime, - }, + }, consensus::ConsensusMetricsValue, }; pub async fn build_system_handle( @@ -81,7 +80,7 @@ pub async fn build_system_handle( storage, exchanges, initializer, - NoMetrics::boxed(), + ConsensusMetricsValue::new(), ) .await .expect("Could not init hotshot") diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 056f0f8a26..cf6932a390 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -12,10 +12,10 @@ use hotshot_task::{ event_stream::ChannelStream, global_registry::GlobalRegistry, task_launcher::TaskRunner, }; use hotshot_types::{ + consensus::ConsensusMetricsValue, message::Message, traits::{ election::{ConsensusExchange, Membership}, - metrics::NoMetrics, network::CommunicationChannel, node_implementation::{ExchangesType, NodeType, QuorumCommChannel, QuorumEx}, signature_key::SignatureKey, @@ -276,7 +276,7 @@ where storage, exchanges, initializer, - NoMetrics::boxed(), + ConsensusMetricsValue::new(), ) .await .expect("Could not init hotshot") diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 0f9a825b87..b73e93b22c 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -11,7 +11,7 @@ use crate::{ data::LeafType, error::HotShotError, traits::{ - metrics::{Counter, Gauge, Histogram, Metrics}, + metrics::{Counter, Gauge, Histogram, Metrics, Label}, node_implementation::NodeType, }, }; @@ -19,7 +19,7 @@ use commit::{Commitment, Committable}; use derivative::Derivative; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, - sync::Arc, + sync::{Arc, Mutex}, }; use tracing::error; @@ -65,16 +65,19 @@ pub struct Consensus> { /// A reference to the metrics trait #[debug(skip)] - pub metrics: Arc, + pub metrics: Arc, /// Amount of invalid QCs we've seen since the last commit /// Used for metrics. This resets to 0 on every decide event. pub invalid_qc: usize, } -/// The metrics being collected for the consensus algorithm -#[derive(Debug)] -pub struct ConsensusMetrics { +/// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces +#[derive(Clone, Debug)] +pub struct ConsensusMetricsValue { + #[allow(dead_code)] + /// The values that are being tracked + pub values: Arc>, /// The current view pub current_view: Box, /// The duration to collect votes in a view (only applies when this insance is the leader) @@ -115,11 +118,130 @@ pub struct ConsensusMetrics { pub failed_to_send_messages: Box, } +/// The wrapper with a string name for the networking metrics +#[derive(Clone, Debug)] +pub struct ConsensusMetrics { + /// a prefix which tracks the name of the metric + prefix: String, + /// a map of values + values: Arc>, +} + +/// the set of counters and gauges for the networking metrics +#[derive(Clone, Debug, Default)] +pub struct InnerConsensusMetrics { + /// All the counters of the networking metrics + counters: HashMap, + /// All the gauges of the networking metrics + gauges: HashMap, + /// All the histograms of the networking metrics + histograms: HashMap>, + /// All the labels of the networking metrics + labels: HashMap, +} + impl ConsensusMetrics { - /// Create a new instance of this [`ConsensusMetrics`] struct, setting all the counters and gauges + /// For the creation and naming of gauge, counter, histogram and label. + pub fn sub(&self, name: String) -> Self { + let prefix = if self.prefix.is_empty() { + name + } else { + format!("{}-{name}", self.prefix) + }; + Self { + prefix, + values: Arc::clone(&self.values), + } + } +} + +impl Metrics for ConsensusMetrics { + fn create_counter(&self, label: String, _unit_label: Option) -> Box { + Box::new(self.sub(label)) + } + + fn create_gauge(&self, label: String, _unit_label: Option) -> Box { + Box::new(self.sub(label)) + } + + fn create_histogram(&self, label: String, _unit_label: Option) -> Box { + Box::new(self.sub(label)) + } + + fn create_label(&self, label: String) -> Box { + Box::new(self.sub(label)) + } + + fn subgroup(&self, subgroup_name: String) -> Box { + Box::new(self.sub(subgroup_name)) + } +} + +impl Counter for ConsensusMetrics { + fn add(&self, amount: usize) { + *self + .values + .lock() + .unwrap() + .counters + .entry(self.prefix.clone()) + .or_default() += amount; + } +} + +impl Gauge for ConsensusMetrics { + fn set(&self, amount: usize) { + *self + .values + .lock() + .unwrap() + .gauges + .entry(self.prefix.clone()) + .or_default() = amount; + } + fn update(&self, delta: i64) { + let mut values = self.values.lock().unwrap(); + let value = values.gauges.entry(self.prefix.clone()).or_default(); + let signed_value = i64::try_from(*value).unwrap_or(i64::MAX); + *value = usize::try_from(signed_value + delta).unwrap_or(0); + } +} + +impl Histogram for ConsensusMetrics { + fn add_point(&self, point: f64) { + self.values + .lock() + .unwrap() + .histograms + .entry(self.prefix.clone()) + .or_default() + .push(point); + } +} + +impl Label for ConsensusMetrics { + fn set(&self, value: String) { + *self + .values + .lock() + .unwrap() + .labels + .entry(self.prefix.clone()) + .or_default() = value; + } +} + +impl ConsensusMetricsValue { + /// Create a new instance of this [`ConsensusMetricsValue`] struct, setting all the counters and gauges #[must_use] - pub fn new(metrics: &dyn Metrics) -> Self { + pub fn new() -> Self { + let values = Arc::default(); + let metrics: Box = Box::new(ConsensusMetrics { + prefix: String::new(), + values: Arc::clone(&values), + }); Self { + values, current_view: metrics.create_gauge(String::from("current_view"), None), vote_validate_duration: metrics.create_histogram( String::from("vote_validate_duration"), @@ -166,6 +288,12 @@ impl ConsensusMetrics { } } +impl Default for ConsensusMetricsValue { + fn default() -> Self { + Self::new() + } +} + impl> Consensus { /// increment the current view /// NOTE may need to do gc here From bad0b2ce685aada09a67dd565cc85551ad751baa Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 28 Sep 2023 22:38:52 -0700 Subject: [PATCH 0171/1393] fix lint --- hotshot/examples/infra/modDA.rs | 2 +- testing/src/task_helpers.rs | 3 ++- types/src/consensus.rs | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index a9c0402392..2b63a8a794 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -23,8 +23,8 @@ use hotshot_orchestrator::{ use hotshot_task::task::FilterEvent; use hotshot_types::HotShotConfig; use hotshot_types::{ - consensus::ConsensusMetricsValue, certificate::ViewSyncCertificate, + consensus::ConsensusMetricsValue, data::{QuorumProposal, SequencingLeaf, TestableLeaf}, event::{Event, EventType}, message::{Message, SequencingMessage}, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 515e48ba60..7418028b53 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -13,6 +13,7 @@ use hotshot::{ use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::SequencingHotShotEvent; use hotshot_types::{ + consensus::ConsensusMetricsValue, data::{QuorumProposal, SequencingLeaf, VidScheme, ViewNumber}, message::{Message, Proposal}, traits::{ @@ -21,7 +22,7 @@ use hotshot_types::{ node_implementation::{CommitteeEx, ExchangesType, NodeType, QuorumEx}, signature_key::EncodedSignature, state::ConsensusTime, - }, consensus::ConsensusMetricsValue, + }, }; pub async fn build_system_handle( diff --git a/types/src/consensus.rs b/types/src/consensus.rs index b73e93b22c..6ba49fd211 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -11,7 +11,7 @@ use crate::{ data::LeafType, error::HotShotError, traits::{ - metrics::{Counter, Gauge, Histogram, Metrics, Label}, + metrics::{Counter, Gauge, Histogram, Label, Metrics}, node_implementation::NodeType, }, }; @@ -141,6 +141,7 @@ pub struct InnerConsensusMetrics { } impl ConsensusMetrics { + #[must_use] /// For the creation and naming of gauge, counter, histogram and label. pub fn sub(&self, name: String) -> Self { let prefix = if self.prefix.is_empty() { From 98e062876093c150d1a54e11a50901cf6d190dc3 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 29 Sep 2023 15:26:13 -0400 Subject: [PATCH 0172/1393] Fix/re-enable memory network tests (#1831) --- .../src/traits/networking/memory_network.rs | 356 ----------------- testing/tests/memory_network.rs | 368 ++++++++++++++++++ 2 files changed, 368 insertions(+), 356 deletions(-) create mode 100644 testing/tests/memory_network.rs diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 51f7bbbdf7..976186c030 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -574,359 +574,3 @@ impl, MEMBERSHIP: Membership; - // type Transaction = VDemoTransaction; - // type ElectionConfigType = StaticElectionConfig; - // type StateType = VDemoState; - // } - // - // type TestMembership = GeneralStaticCommittee; - // type TestNetwork = MemoryCommChannel; - // - // impl NodeImplementation for TestImpl { - // type ConsensusMessage = ValidatingMessage; - // type Exchanges = ValidatingExchanges< - // Test, - // Message, - // QuorumExchange< - // Test, - // TestLeaf, - // TestProposal, - // TestMembership, - // TestNetwork, - // Message, - // >, - // ViewSyncExchange>, - // >; - // type Leaf = TestLeaf; - // type Storage = MemoryStorage; - // - // fn new_channel_maps( - // start_view: ViewNumber, - // ) -> (ChannelMaps, Option>) { - // (ChannelMaps::new(start_view), None) - // } - // } - // - // type TestLeaf = ValidatingLeaf; - // type TestVote = QuorumVote; - // type TestProposal = ValidatingProposal; - // - // /// fake Eq - // /// we can't compare the votetokentype for equality, so we can't - // /// derive EQ on `VoteType` and thereby message - // /// we are only sending data messages, though so we compare key and - // /// data message - // fn fake_message_eq(message_1: Message, message_2: Message) { - // assert_eq!(message_1.sender, message_2.sender); - // if let MessageKind::Data(DataMessage::SubmitTransaction(d_1, _)) = message_1.kind { - // if let MessageKind::Data(DataMessage::SubmitTransaction(d_2, _)) = message_2.kind { - // assert_eq!(d_1, d_2); - // } - // } else { - // panic!("Got unexpected message type in memory test!"); - // } - // } - // - // #[instrument] - // fn get_pubkey() -> Ed25519Pub { - // let priv_key = Ed25519Priv::generate(); - // Ed25519Pub::from_private(&priv_key) - // } - // - // /// create a message - // fn gen_messages(num_messages: u64, seed: u64, pk: Ed25519Pub) -> Vec> { - // let mut messages = Vec::new(); - // for i in 0..num_messages { - // let message = Message { - // sender: pk, - // kind: MessageKind::Data(DataMessage::SubmitTransaction( - // VDemoTransaction { - // add: Addition { - // account: "A".to_string(), - // amount: 50 + i + seed, - // }, - // sub: Subtraction { - // account: "B".to_string(), - // amount: 50 + i + seed, - // }, - // nonce: seed + i, - // padding: vec![50; 0], - // }, - // ::new(0), - // )), - // _phantom: PhantomData, - // }; - // messages.push(message); - // } - // messages - // } - // - // // Spawning a single MemoryNetwork should produce no errors - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // #[instrument] - // async fn spawn_single() { - // setup_logging(); - // let group: Arc, ::SignatureKey>> = - // MasterMap::new(); - // trace!(?group); - // let pub_key = get_pubkey(); - // let _network = MemoryNetwork::new(pub_key, NoMetrics::boxed(), group, Option::None); - // } - // - // // // Spawning a two MemoryNetworks and connecting them should produce no errors - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // #[instrument] - // async fn spawn_double() { - // setup_logging(); - // let group: Arc, ::SignatureKey>> = - // MasterMap::new(); - // trace!(?group); - // let pub_key_1 = get_pubkey(); - // let _network_1 = - // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); - // let pub_key_2 = get_pubkey(); - // let _network_2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); - // } - // - // // Check to make sure direct queue works - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // #[allow(deprecated)] - // #[instrument] - // async fn direct_queue() { - // setup_logging(); - // // Create some dummy messages - // - // // Make and connect the networking instances - // let group: Arc, ::SignatureKey>> = - // MasterMap::new(); - // trace!(?group); - // let pub_key_1 = get_pubkey(); - // let network1 = - // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); - // let pub_key_2 = get_pubkey(); - // let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); - // - // let first_messages: Vec> = gen_messages(5, 100, pub_key_1); - // - // // Test 1 -> 2 - // // Send messages - // for sent_message in first_messages { - // network1 - // .direct_message(sent_message.clone(), pub_key_2) - // .await - // .expect("Failed to message node"); - // let mut recv_messages = network2 - // .recv_msgs(TransmitType::Direct) - // .await - // .expect("Failed to receive message"); - // let recv_message = recv_messages.pop().unwrap(); - // assert!(recv_messages.is_empty()); - // fake_message_eq(sent_message, recv_message); - // } - // - // let second_messages: Vec> = gen_messages(5, 200, pub_key_2); - // - // // Test 2 -> 1 - // // Send messages - // for sent_message in second_messages { - // network2 - // .direct_message(sent_message.clone(), pub_key_1) - // .await - // .expect("Failed to message node"); - // let mut recv_messages = network1 - // .recv_msgs(TransmitType::Direct) - // .await - // .expect("Failed to receive message"); - // let recv_message = recv_messages.pop().unwrap(); - // assert!(recv_messages.is_empty()); - // fake_message_eq(sent_message, recv_message); - // } - // } - // - // // Check to make sure direct queue works - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // #[allow(deprecated)] - // #[instrument] - // async fn broadcast_queue() { - // setup_logging(); - // // Make and connect the networking instances - // let group: Arc, ::SignatureKey>> = - // MasterMap::new(); - // trace!(?group); - // let pub_key_1 = get_pubkey(); - // let network1 = - // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); - // let pub_key_2 = get_pubkey(); - // let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); - // - // let first_messages: Vec> = gen_messages(5, 100, pub_key_1); - // - // // Test 1 -> 2 - // // Send messages - // for sent_message in first_messages { - // network1 - // .broadcast_message( - // sent_message.clone(), - // vec![pub_key_2].into_iter().collect::>(), - // ) - // .await - // .expect("Failed to message node"); - // let mut recv_messages = network2 - // .recv_msgs(TransmitType::Broadcast) - // .await - // .expect("Failed to receive message"); - // let recv_message = recv_messages.pop().unwrap(); - // assert!(recv_messages.is_empty()); - // fake_message_eq(sent_message, recv_message); - // } - // - // let second_messages: Vec> = gen_messages(5, 200, pub_key_2); - // - // // Test 2 -> 1 - // // Send messages - // for sent_message in second_messages { - // network2 - // .broadcast_message( - // sent_message.clone(), - // vec![pub_key_1].into_iter().collect::>(), - // ) - // .await - // .expect("Failed to message node"); - // let mut recv_messages = network1 - // .recv_msgs(TransmitType::Broadcast) - // .await - // .expect("Failed to receive message"); - // let recv_message = recv_messages.pop().unwrap(); - // assert!(recv_messages.is_empty()); - // fake_message_eq(sent_message, recv_message); - // } - // } - // - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // #[instrument] - // #[allow(deprecated)] - // async fn test_in_flight_message_count() { - // // setup_logging(); - // - // // let group: Arc, ::SignatureKey>> = - // // MasterMap::new(); - // // trace!(?group); - // // let pub_key_1 = get_pubkey(); - // // let network1 = - // // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); - // // let pub_key_2 = get_pubkey(); - // // let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); - // - // // // Create some dummy messages - // // let messages: Vec> = gen_messages(5, 100, pub_key_1); - // - // // // assert_eq!(network1.in_flight_message_count(), Some(0)); - // // // assert_eq!(network2.in_flight_message_count(), Some(0)); - // - // // for (_count, message) in messages.iter().enumerate() { - // // network1 - // // .direct_message(message.clone(), pub_key_2) - // // .await - // // .unwrap(); - // // // network 2 has received `count` broadcast messages and `count + 1` direct messages - // // // assert_eq!(network2.in_flight_message_count(), Some(count + count + 1)); - // - // // // network2.broadcast_message(message.clone()).await.unwrap(); - // // // network 1 has received `count` broadcast messages - // // // assert_eq!(network1.in_flight_message_count(), Some(count + 1)); - // - // // // network 2 has received `count + 1` broadcast messages and `count + 1` direct messages - // // // assert_eq!(network2.in_flight_message_count(), Some((count + 1) * 2)); - // // } - // - // // for _count in (0..messages.len()).rev() { - // // network1.recv_msgs(TransmitType::Broadcast).await.unwrap(); - // // // assert_eq!(network1.in_flight_message_count(), Some(count)); - // - // // network2.recv_msgs(TransmitType::Broadcast).await.unwrap(); - // // network2.recv_msgs(TransmitType::Direct).await.unwrap(); - // // // assert_eq!(network2.in_flight_message_count(), Some(count * 2)); - // // } - // - // // // assert_eq!(network1.in_flight_message_count(), Some(0)); - // // // assert_eq!(network2.in_flight_message_count(), Some(0)); - // } -} diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs new file mode 100644 index 0000000000..d1a231a2c0 --- /dev/null +++ b/testing/tests/memory_network.rs @@ -0,0 +1,368 @@ +use std::collections::BTreeSet; +use std::marker::PhantomData; +use std::sync::Arc; + +use async_compatibility_layer::logging::setup_logging; +use hotshot::demo::SDemoState; +use hotshot::traits::election::static_committee::{ + GeneralStaticCommittee, StaticElectionConfig, StaticVoteToken, +}; +use hotshot::traits::implementations::{ + MasterMap, MemoryCommChannel, MemoryNetwork, MemoryStorage, +}; +use hotshot::traits::NodeImplementation; +use hotshot::types::bn254::{BLSPrivKey, BLSPubKey}; +use hotshot::types::SignatureKey; +use hotshot_types::block_impl::{VIDBlockPayload, VIDTransaction}; +use hotshot_types::certificate::ViewSyncCertificate; +use hotshot_types::data::{DAProposal, QuorumProposal, SequencingLeaf}; +use hotshot_types::message::{Message, SequencingMessage}; +use hotshot_types::traits::election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}; +use hotshot_types::traits::metrics::NoMetrics; +use hotshot_types::traits::network::TestableNetworkingImplementation; +use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; +use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType, SequencingExchanges}; +use hotshot_types::vote::{DAVote, ViewSyncVote}; +use hotshot_types::{ + data::ViewNumber, + message::{DataMessage, MessageKind}, + traits::state::ConsensusTime, + vote::QuorumVote, +}; +use rand::rngs::StdRng; +use rand::{RngCore, SeedableRng}; +use serde::{Deserialize, Serialize}; +use tracing::instrument; +use tracing::trace; + +#[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, +)] +pub struct Test; + +impl NodeType for Test { + type Time = ViewNumber; + type BlockType = VIDBlockPayload; + type SignatureKey = BLSPubKey; + type VoteTokenType = StaticVoteToken; + type Transaction = VIDTransaction; + type ElectionConfigType = StaticElectionConfig; + type StateType = SDemoState; +} + +#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] +pub struct TestImpl {} + +pub type ThisLeaf = SequencingLeaf; +pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; +pub type DANetwork = MemoryCommChannel; +pub type QuorumNetwork = MemoryCommChannel; +pub type ViewSyncNetwork = MemoryCommChannel; + +pub type ThisDAProposal = DAProposal; +pub type ThisDAVote = DAVote; + +pub type ThisQuorumProposal = QuorumProposal; +pub type ThisQuorumVote = QuorumVote; + +pub type ThisViewSyncProposal = ViewSyncCertificate; +pub type ThisViewSyncVote = ViewSyncVote; + +impl NodeImplementation for TestImpl { + type Storage = MemoryStorage; + type Leaf = SequencingLeaf; + type Exchanges = SequencingExchanges< + Test, + Message, + QuorumExchange< + Test, + Self::Leaf, + ThisQuorumProposal, + ThisMembership, + QuorumNetwork, + Message, + >, + CommitteeExchange>, + ViewSyncExchange< + Test, + ThisViewSyncProposal, + ThisMembership, + ViewSyncNetwork, + Message, + >, + >; + type ConsensusMessage = SequencingMessage; + + fn new_channel_maps( + start_view: ::Time, + ) -> (ChannelMaps, Option>) { + (ChannelMaps::new(start_view), None) + } +} + +/// fake Eq +/// we can't compare the votetokentype for equality, so we can't +/// derive EQ on `VoteType` and thereby message +/// we are only sending data messages, though so we compare key and +/// data message +fn fake_message_eq(message_1: Message, message_2: Message) { + assert_eq!(message_1.sender, message_2.sender); + if let MessageKind::Data(DataMessage::SubmitTransaction(d_1, _)) = message_1.kind { + if let MessageKind::Data(DataMessage::SubmitTransaction(d_2, _)) = message_2.kind { + assert_eq!(d_1, d_2); + } + } else { + panic!("Got unexpected message type in memory test!"); + } +} + +#[instrument] +fn get_pubkey() -> BLSPubKey { + // random 32 bytes + let mut bytes = [0; 32]; + rand::thread_rng().fill_bytes(&mut bytes); + BLSPubKey::from_private(&BLSPrivKey::generate_from_seed(bytes)) +} + +/// create a message +fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec> { + let mut messages = Vec::new(); + for _ in 0..num_messages { + // create a random transaction from seed + let mut bytes = [0u8; 8]; + let mut rng = StdRng::seed_from_u64(seed); + rng.fill_bytes(&mut bytes); + + let message = Message { + sender: pk, + kind: MessageKind::Data(DataMessage::SubmitTransaction( + VIDTransaction(bytes.to_vec()), + ::new(0), + )), + _phantom: PhantomData, + }; + messages.push(message); + } + messages +} + +// Spawning a single MemoryNetwork should produce no errors +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn memory_network_spawn_single() { + setup_logging(); + let group: Arc, ::SignatureKey>> = + MasterMap::new(); + trace!(?group); + let pub_key = get_pubkey(); + let _network = MemoryNetwork::new(pub_key, NoMetrics::boxed(), group, Option::None); +} + +// // Spawning a two MemoryNetworks and connecting them should produce no errors +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn memory_network_spawn_double() { + setup_logging(); + let group: Arc, ::SignatureKey>> = + MasterMap::new(); + trace!(?group); + let pub_key_1 = get_pubkey(); + let _network_1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + let pub_key_2 = get_pubkey(); + let _network_2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); +} + +// Check to make sure direct queue works +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn memory_network_direct_queue() { + setup_logging(); + // Create some dummy messages + + // Make and connect the networking instances + let group: Arc, ::SignatureKey>> = + MasterMap::new(); + trace!(?group); + + let pub_key_1 = get_pubkey(); + let network1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + + let pub_key_2 = get_pubkey(); + let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + + let first_messages: Vec> = gen_messages(5, 100, pub_key_1); + + // Test 1 -> 2 + // Send messages + for sent_message in first_messages { + network1 + .direct_message(sent_message.clone(), pub_key_2) + .await + .expect("Failed to message node"); + let mut recv_messages = network2 + .recv_msgs(TransmitType::Direct) + .await + .expect("Failed to receive message"); + let recv_message = recv_messages.pop().unwrap(); + assert!(recv_messages.is_empty()); + fake_message_eq(sent_message, recv_message); + } + + let second_messages: Vec> = gen_messages(5, 200, pub_key_2); + + // Test 2 -> 1 + // Send messages + for sent_message in second_messages { + network2 + .direct_message(sent_message.clone(), pub_key_1) + .await + .expect("Failed to message node"); + let mut recv_messages = network1 + .recv_msgs(TransmitType::Direct) + .await + .expect("Failed to receive message"); + let recv_message = recv_messages.pop().unwrap(); + assert!(recv_messages.is_empty()); + fake_message_eq(sent_message, recv_message); + } +} + +// Check to make sure direct queue works +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn memory_network_broadcast_queue() { + setup_logging(); + // Make and connect the networking instances + let group: Arc, ::SignatureKey>> = + MasterMap::new(); + trace!(?group); + let pub_key_1 = get_pubkey(); + let network1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + let pub_key_2 = get_pubkey(); + let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + + let first_messages: Vec> = gen_messages(5, 100, pub_key_1); + + // Test 1 -> 2 + // Send messages + for sent_message in first_messages { + network1 + .broadcast_message( + sent_message.clone(), + vec![pub_key_2].into_iter().collect::>(), + ) + .await + .expect("Failed to message node"); + let mut recv_messages = network2 + .recv_msgs(TransmitType::Broadcast) + .await + .expect("Failed to receive message"); + let recv_message = recv_messages.pop().unwrap(); + assert!(recv_messages.is_empty()); + fake_message_eq(sent_message, recv_message); + } + + let second_messages: Vec> = gen_messages(5, 200, pub_key_2); + + // Test 2 -> 1 + // Send messages + for sent_message in second_messages { + network2 + .broadcast_message( + sent_message.clone(), + vec![pub_key_1].into_iter().collect::>(), + ) + .await + .expect("Failed to message node"); + let mut recv_messages = network1 + .recv_msgs(TransmitType::Broadcast) + .await + .expect("Failed to receive message"); + let recv_message = recv_messages.pop().unwrap(); + assert!(recv_messages.is_empty()); + fake_message_eq(sent_message, recv_message); + } +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +#[allow(deprecated)] +async fn memory_network_test_in_flight_message_count() { + setup_logging(); + + let group: Arc, ::SignatureKey>> = + MasterMap::new(); + trace!(?group); + let pub_key_1 = get_pubkey(); + let network1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + let pub_key_2 = get_pubkey(); + let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + + // Create some dummy messages + let messages: Vec> = gen_messages(5, 100, pub_key_1); + let broadcast_recipients = BTreeSet::from([pub_key_1, pub_key_2]); + + assert_eq!(network1.in_flight_message_count(), Some(0)); + assert_eq!(network2.in_flight_message_count(), Some(0)); + + for (count, message) in messages.iter().enumerate() { + network1 + .direct_message(message.clone(), pub_key_2) + .await + .unwrap(); + // network 2 has received `count` broadcast messages and `count + 1` direct messages + assert_eq!(network2.in_flight_message_count(), Some(count + count + 1)); + + network2 + .broadcast_message(message.clone(), broadcast_recipients.clone()) + .await + .unwrap(); + // network 1 has received `count` broadcast messages + assert_eq!(network1.in_flight_message_count(), Some(count + 1)); + + // network 2 has received `count + 1` broadcast messages and `count + 1` direct messages + assert_eq!(network2.in_flight_message_count(), Some((count + 1) * 2)); + } + + while network1.in_flight_message_count().unwrap() > 0 { + network1.recv_msgs(TransmitType::Broadcast).await.unwrap(); + } + + while network2.in_flight_message_count().unwrap() > 0 { + network2.recv_msgs(TransmitType::Broadcast).await.unwrap(); + network2.recv_msgs(TransmitType::Direct).await.unwrap(); + } + + assert_eq!(network1.in_flight_message_count(), Some(0)); + assert_eq!(network2.in_flight_message_count(), Some(0)); +} From e727ff90167bb5f2a533a7d37dfef626143f86f7 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 29 Sep 2023 15:51:50 -0400 Subject: [PATCH 0173/1393] fix: revert to using fixed val for transactions (#1835) --- hotshot/src/demo.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index 97509c3b21..81cac76f4b 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -100,14 +100,12 @@ impl State for SDemoState { impl TestableState for SDemoState { fn create_random_transaction( _state: Option<&Self>, - rng: &mut dyn rand::RngCore, + _rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { /// clippy appeasement for `RANDOM_TX_BASE_SIZE` const RANDOM_TX_BASE_SIZE: usize = 8; - let mut bytes = vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]; - rng.fill_bytes(&mut bytes); - VIDTransaction(bytes) + VIDTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) } } /// Implementation of [`NodeType`] for [`VDemoNode`] From 4a2cf164c28cfcf8855331f466f68c4750ef0938 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 29 Sep 2023 16:05:49 -0700 Subject: [PATCH 0174/1393] Save before switching branch --- hotshot/examples/infra/modDA.rs | 16 +++--- hotshot/src/demo.rs | 12 ++--- hotshot/src/lib.rs | 6 +-- hotshot/src/tasks/mod.rs | 10 ++-- hotshot/src/traits/election/vrf.rs | 2 +- .../src/traits/networking/memory_network.rs | 2 +- hotshot/src/traits/storage/memory_storage.rs | 2 +- task-impls/src/consensus.rs | 10 ++-- task-impls/src/da.rs | 14 ++--- task-impls/src/events.rs | 4 +- task-impls/src/transactions.rs | 14 ++--- testing/src/node_types.rs | 2 +- types/src/block_impl.rs | 11 +++- types/src/certificate.rs | 10 ++-- types/src/consensus.rs | 2 +- types/src/data.rs | 53 ++++++++++--------- types/src/traits/block_contents.rs | 33 +++++++++--- types/src/traits/election.rs | 22 ++++---- types/src/traits/node_implementation.rs | 28 +++++----- types/src/traits/state.rs | 18 +++---- types/src/traits/storage.rs | 2 +- types/src/vote.rs | 8 +-- 22 files changed, 155 insertions(+), 126 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 82cc12bf68..cb6cf5e289 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -161,7 +161,7 @@ pub trait RunDA< >, > where ::StateType: TestableState, - ::BlockType: TestableBlock, + ::BlockPayload: TestableBlock, SequencingLeaf: TestableLeaf, Self: Sync, SystemContext: HotShotType, @@ -180,7 +180,7 @@ pub trait RunDA< /// get the anchored view /// Note: sequencing leaf does not have state, so does not return state async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { - let genesis_block = TYPES::BlockType::genesis(); + let genesis_block = TYPES::BlockPayload::genesis(); let initializer = hotshot::HotShotInitializer::>::from_genesis( genesis_block, @@ -408,7 +408,7 @@ pub struct WebServerDARun< #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType, MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, @@ -452,7 +452,7 @@ impl< > for WebServerDARun where ::StateType: TestableState, - ::BlockType: TestableBlock, + ::BlockPayload: TestableBlock, SequencingLeaf: TestableLeaf, Self: Sync, { @@ -552,7 +552,7 @@ pub struct Libp2pDARun, MEMBERSHIP #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType, MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, @@ -596,7 +596,7 @@ impl< > for Libp2pDARun where ::StateType: TestableState, - ::BlockType: TestableBlock, + ::BlockPayload: TestableBlock, SequencingLeaf: TestableLeaf, Self: Sync, { @@ -766,7 +766,7 @@ where /// Main entry point for validators pub async fn main_entry_point< - TYPES: NodeType, + TYPES: NodeType, MEMBERSHIP: Membership + Debug, DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, @@ -802,7 +802,7 @@ pub async fn main_entry_point< args: ValidatorArgs, ) where ::StateType: TestableState, - ::BlockType: TestableBlock, + ::BlockPayload: TestableBlock, SequencingLeaf: TestableLeaf, { setup_logging(); diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index 81cac76f4b..33ac15dd28 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -66,11 +66,11 @@ impl Default for SDemoState { impl State for SDemoState { type Error = BlockPayloadError; - type BlockType = VIDBlockPayload; + type BlockPayload = VIDBlockPayload; type Time = ViewNumber; - fn validate_block(&self, _block: &Self::BlockType, view_number: &Self::Time) -> bool { + fn validate_block(&self, _block: &Self::BlockPayload, view_number: &Self::Time) -> bool { if view_number == &ViewNumber::genesis() { &self.view_number == view_number } else { @@ -80,7 +80,7 @@ impl State for SDemoState { fn append( &self, - block: &Self::BlockType, + block: &Self::BlockPayload, view_number: &Self::Time, ) -> Result { if !self.validate_block(block, view_number) { @@ -102,7 +102,7 @@ impl TestableState for SDemoState { _state: Option<&Self>, _rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> ::Transaction { /// clippy appeasement for `RANDOM_TX_BASE_SIZE` const RANDOM_TX_BASE_SIZE: usize = 8; VIDTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) @@ -126,7 +126,7 @@ pub struct DemoTypes; impl NodeType for DemoTypes { type Time = ViewNumber; - type BlockType = VIDBlockPayload; + type BlockPayload = VIDBlockPayload; type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; type Transaction = VIDTransaction; @@ -187,7 +187,7 @@ pub fn random_quorum_certificate( - deltas: Either>, + deltas: Either>, rng: &mut dyn rand::RngCore, ) -> SequencingLeaf { let justify_qc = random_quorum_certificate(rng); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index b7872ce59e..2281a1af08 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -634,7 +634,7 @@ pub trait HotShotType> { #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -656,7 +656,7 @@ where Message, Proposal = DAProposal, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, ViewSyncEx: ConsensusExchange< @@ -1052,7 +1052,7 @@ impl> HotShotInitializer Result> { + pub fn from_genesis(genesis_block: TYPES::BlockPayload) -> Result> { let state = TYPES::StateType::default() .append(&genesis_block, &TYPES::Time::new(0)) .map_err(|err| HotShotError::Misc { diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 66f9fb20da..1a0fc8ad4e 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -250,7 +250,7 @@ where /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -274,7 +274,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { let consensus = handle.hotshot.get_consensus(); @@ -364,7 +364,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { // build the da task @@ -416,7 +416,7 @@ where /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_transaction_task< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -433,7 +433,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { // build the transactions task diff --git a/hotshot/src/traits/election/vrf.rs b/hotshot/src/traits/election/vrf.rs index d1151e0ab2..f99dabd7e8 100644 --- a/hotshot/src/traits/election/vrf.rs +++ b/hotshot/src/traits/election/vrf.rs @@ -884,7 +884,7 @@ impl Clone for VRFStakeTable; // type VoteTokenType = VRFVoteToken< // BLSVerKey, diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 51f7bbbdf7..087333d77a 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -629,7 +629,7 @@ mod tests { // // TODO (da) can this be SequencingConsensus? // type ConsensusType = ValidatingConsensus; // type Time = ViewNumber; - // type BlockType = VDemoBlock; + // type BlockPayload = VDemoBlock; // type SignatureKey = Ed25519Pub; // type VoteTokenType = StaticVoteToken; // type Transaction = VDemoTransaction; diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index dd6e0217d5..73d86fc1d9 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -148,7 +148,7 @@ mod test { impl NodeType for DummyTypes { type Time = ViewNumber; - type BlockType = DummyBlock; + type BlockPayload = DummyBlock; type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; type Transaction = ::Transaction; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 236b5ae6b8..1fb8d383e8 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -71,7 +71,7 @@ pub struct SequencingConsensusTaskState< TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { /// The global task registry @@ -84,7 +84,7 @@ pub struct SequencingConsensusTaskState< pub cur_view: TYPES::Time, /// Current block submitted to DA - pub block: TYPES::BlockType, + pub block: TYPES::BlockPayload, /// the quorum exchange pub quorum_exchange: Arc>, @@ -279,7 +279,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus genesis leaf", level = "error")] @@ -1217,7 +1217,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { } @@ -1266,7 +1266,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { if let SequencingHotShotEvent::Shutdown = event { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 1100f4e078..fbe156aec6 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -55,7 +55,7 @@ pub struct DATaskState< TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { /// The state's api @@ -91,7 +91,7 @@ pub struct DAVoteCollectionTaskState< TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { /// the committee exchange @@ -103,7 +103,7 @@ pub struct DAVoteCollectionTaskState< TYPES, TYPES::Time, TYPES::VoteTokenType, - Commitment, + Commitment, >>::VoteAccumulator, DACertificate, >, @@ -122,7 +122,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { } @@ -140,7 +140,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { match event { @@ -252,7 +252,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { /// main task event handler @@ -703,7 +703,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index f9fc7f8ba3..fbb7e227b0 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -61,9 +61,9 @@ pub enum SequencingHotShotEvent> { /// Send transactions to the network TransactionSend(TYPES::Transaction, TYPES::SignatureKey), /// Event to send DA block data from DA leader to next quorum leader (which should always be the same node); internal event only - SendDABlockData(TYPES::BlockType), + SendDABlockData(TYPES::BlockPayload), /// Event when the transactions task has a block formed - BlockReady(TYPES::BlockType, TYPES::Time), + BlockReady(TYPES::BlockPayload, TYPES::Time), /// Event when consensus decided on a leaf LeafDecided(Vec), /// Send VID shares to VID storage nodes; emitted by the DA leader diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index c682f5f38d..e868c50ba5 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -56,7 +56,7 @@ pub struct TransactionTaskState< TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { /// The state's api @@ -87,10 +87,10 @@ pub struct TransactionTaskState< } // We have two `TransactionTaskState` implementations with different bounds. The implementation -// here requires `TYPES: NodeType`, +// here requires `TYPES: NodeType`, // whereas it's just `TYPES: NodeType` in the second implementation. impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -103,7 +103,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { /// main task event handler @@ -289,7 +289,7 @@ where } // We have two `TransactionTaskState` implementations with different bounds. The implementation -// above requires `TYPES: NodeType`, +// above requires `TYPES: NodeType`, // whereas here it's just `TYPES: NodeType`. impl< TYPES: NodeType, @@ -305,7 +305,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] @@ -403,7 +403,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = Commitment, + Commitment = Commitment, >, { } diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 4ffd74e2bf..3cfb013851 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -42,7 +42,7 @@ use serde::{Deserialize, Serialize}; pub struct SequencingTestTypes; impl NodeType for SequencingTestTypes { type Time = ViewNumber; - type BlockType = VIDBlockPayload; + type BlockPayload = VIDBlockPayload; type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; type Transaction = VIDTransaction; diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 2360cb3a24..2e9f75cd09 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -5,7 +5,7 @@ use std::{ }; use crate::{ - data::{test_srs, VidScheme, VidSchemeTrait}, + data::{test_srs, VidScheme, VidSchemeTrait, SequencingLeaf}, traits::{block_contents::Transaction, state::TestableBlock, BlockPayload}, }; use commit::{Commitment, Committable}; @@ -125,3 +125,12 @@ impl BlockPayload for VIDBlockPayload { .collect() } } + +/// A [`BlockHeader`] that commits to [`VIDBlockPayload`]. +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct VIDBlockHeader { + /// Previous leaf. + pub previous_leaf: SequencingLeaf, + /// VID commitment. + pub commitment: ::Commit, +} \ No newline at end of file diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 09c3235744..2abf94b34b 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -40,7 +40,7 @@ pub struct DACertificate { pub view_number: TYPES::Time, /// committment to the block - pub block_commitment: Commitment, + pub block_commitment: Commitment, /// Assembled signature for certificate aggregation pub signatures: AssembledSignature, @@ -237,11 +237,11 @@ impl Committable } impl - SignedCertificate> + SignedCertificate> for DACertificate { type Vote = DAVote; - type VoteAccumulator = DAVoteAccumulator, Self::Vote>; + type VoteAccumulator = DAVoteAccumulator, Self::Vote>; fn from_signatures_and_commitment( signatures: AssembledSignature, @@ -262,11 +262,11 @@ impl self.signatures.clone() } - fn leaf_commitment(&self) -> Commitment { + fn leaf_commitment(&self) -> Commitment { self.block_commitment } - fn set_leaf_commitment(&mut self, _commitment: Commitment) { + fn set_leaf_commitment(&mut self, _commitment: Commitment) { // This function is only useful for QC. Will be removed after we have separated cert traits. } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index e10900aa7e..959c31604e 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -47,7 +47,7 @@ pub struct Consensus> { /// Saved blocks /// /// Contains the full block for every leaf in `saved_leaves` if that block is available. - pub saved_blocks: BlockStore, + pub saved_blocks: BlockStore, /// The `locked_qc` view number pub locked_view: TYPES::Time, diff --git a/types/src/data.rs b/types/src/data.rs index 44a1937161..66c5ebfe31 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -108,7 +108,7 @@ pub fn genesis_proposer_id() -> EncodedPublicKey { } /// The `Transaction` type associated with a `State`, as a syntactic shortcut -pub type Transaction = <::BlockType as BlockPayload>::Transaction; +pub type Transaction = <::BlockPayload as BlockPayload>::Transaction; /// `Commitment` to the `Transaction` type associated with a `State`, as a syntactic shortcut pub type TxnCommitment = Commitment>; @@ -122,7 +122,7 @@ where LEAF: Committable, { /// current view's block commitment - pub block_commitment: Commitment, + pub block_commitment: Commitment, /// CurView from leader when proposing leaf pub view_number: TYPES::Time, @@ -139,13 +139,13 @@ where pub parent_commitment: Commitment, /// BlockPayload leaf wants to apply - pub deltas: TYPES::BlockType, + pub deltas: TYPES::BlockPayload, /// What the state should be after applying `self.deltas` pub state_commitment: Commitment, /// Transactions that were marked for rejection while collecting deltas - pub rejected: Vec<::Transaction>, + pub rejected: Vec<::Transaction>, /// the propser id pub proposer_id: EncodedPublicKey, @@ -155,7 +155,7 @@ where #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct DAProposal { /// BlockPayload leaf wants to apply - pub deltas: TYPES::BlockType, + pub deltas: TYPES::BlockPayload, /// View this proposal applies to pub view_number: TYPES::Time, } @@ -172,7 +172,7 @@ pub struct VidDisperse { /// The view number for which this VID data is intended pub view_number: TYPES::Time, /// Block commitment - pub commitment: Commitment, + pub commitment: Commitment, /// VID shares dispersed among storage nodes pub shares: Vec<::Share>, /// VID common data sent to all storage nodes @@ -203,7 +203,7 @@ pub fn test_srs( #[serde(bound(deserialize = ""))] pub struct QuorumProposal> { /// The commitment to append. - pub block_commitment: Commitment, + pub block_commitment: Commitment, /// CurView from leader when proposing leaf pub view_number: TYPES::Time, @@ -281,7 +281,7 @@ pub trait ProposalType: /// A state change encoded in a leaf. /// -/// [`DeltasType`] represents a [block](NodeType::BlockType), but it may not contain the block in +/// [`DeltasType`] represents a [block](NodeType::BlockPayload), but it may not contain the block in /// full. It is guaranteed to contain, at least, a cryptographic commitment to the block, and it /// provides an interface for resolving the commitment to a full block if the full block is /// available. @@ -495,7 +495,7 @@ pub type LeafNode = ::NodeType; /// The [`StateType`] in a [`LeafType`]. pub type LeafState = as NodeType>::StateType; /// The [`BlockPayload`] in a [`LeafType`]. -pub type LeafBlock = as NodeType>::BlockType; +pub type LeafBlock = as NodeType>::BlockPayload; /// The [`Transaction`] in a [`LeafType`]. pub type LeafTransaction = as BlockPayload>::Transaction; /// The [`ConsensusTime`] used by a [`LeafType`]. @@ -511,7 +511,7 @@ pub trait TestableLeaf { &self, rng: &mut dyn rand::RngCore, padding: u64, - ) -> <::BlockType as BlockPayload>::Transaction; + ) -> <::BlockPayload as BlockPayload>::Transaction; } /// This is the consensus-internal analogous concept to a block, and it contains the block proper, @@ -535,13 +535,13 @@ pub struct ValidatingLeaf { pub parent_commitment: Commitment, /// BlockPayload leaf wants to apply - pub deltas: TYPES::BlockType, + pub deltas: TYPES::BlockPayload, /// What the state should be AFTER applying `self.deltas` pub state: TYPES::StateType, /// Transactions that were marked for rejection while collecting deltas - pub rejected: Vec<::Transaction>, + pub rejected: Vec<::Transaction>, /// the timestamp the leaf was constructed at, in nanoseconds. Only exposed for dashboard stats #[derivative(PartialEq = "ignore")] @@ -574,11 +574,12 @@ pub struct SequencingLeaf { pub parent_commitment: Commitment, /// The block or block commitment to be applied - pub deltas: Either>, + pub deltas: Either>, /// Transactions that were marked for rejection while collecting deltas - pub rejected: Vec<::Transaction>, + pub rejected: Vec<::Transaction>, + // TODO (Keyao) Remove. /// the timestamp the leaf was constructed at, in nanoseconds. Only exposed for dashboard stats pub timestamp: i128, @@ -636,13 +637,13 @@ impl Display for ValidatingLeaf { impl LeafType for ValidatingLeaf { type NodeType = TYPES; - type DeltasType = TYPES::BlockType; + type DeltasType = TYPES::BlockPayload; type MaybeState = TYPES::StateType; fn new( view_number: ::Time, justify_qc: QuorumCertificate>, - deltas: ::BlockType, + deltas: ::BlockPayload, state: ::StateType, ) -> Self { Self { @@ -682,7 +683,7 @@ impl LeafType for ValidatingLeaf { self.deltas.clone() } - fn get_deltas_commitment(&self) -> Commitment<::BlockType> { + fn get_deltas_commitment(&self) -> Commitment<::BlockPayload> { self.deltas.block_commitment() } @@ -694,7 +695,7 @@ impl LeafType for ValidatingLeaf { self.state.clone() } - fn get_rejected(&self) -> Vec<::Transaction> { + fn get_rejected(&self) -> Vec<::Transaction> { self.rejected.clone() } @@ -724,7 +725,7 @@ impl LeafType for ValidatingLeaf { impl TestableLeaf for ValidatingLeaf where TYPES::StateType: TestableState, - TYPES::BlockType: TestableBlock, + TYPES::BlockPayload: TestableBlock, { type NodeType = TYPES; @@ -732,7 +733,7 @@ where &self, rng: &mut dyn rand::RngCore, padding: u64, - ) -> <::BlockType as BlockPayload>::Transaction { + ) -> <::BlockPayload as BlockPayload>::Transaction { ::create_random_transaction( Some(&self.state), rng, @@ -753,13 +754,13 @@ impl Display for SequencingLeaf { impl LeafType for SequencingLeaf { type NodeType = TYPES; - type DeltasType = Either>; + type DeltasType = Either>; type MaybeState = (); fn new( view_number: ::Time, justify_qc: QuorumCertificate>, - deltas: ::BlockType, + deltas: ::BlockPayload, _state: ::StateType, ) -> Self { Self { @@ -798,7 +799,7 @@ impl LeafType for SequencingLeaf { self.deltas.clone() } - fn get_deltas_commitment(&self) -> Commitment<::BlockType> { + fn get_deltas_commitment(&self) -> Commitment<::BlockPayload> { self.deltas.block_commitment() } @@ -809,7 +810,7 @@ impl LeafType for SequencingLeaf { // The Sequencing Leaf doesn't have a state. fn get_state(&self) -> Self::MaybeState {} - fn get_rejected(&self) -> Vec<::Transaction> { + fn get_rejected(&self) -> Vec<::Transaction> { self.rejected.clone() } @@ -838,7 +839,7 @@ impl LeafType for SequencingLeaf { impl TestableLeaf for SequencingLeaf where TYPES::StateType: TestableState, - TYPES::BlockType: TestableBlock, + TYPES::BlockPayload: TestableBlock, { type NodeType = TYPES; @@ -846,7 +847,7 @@ where &self, rng: &mut dyn rand::RngCore, padding: u64, - ) -> <::BlockType as BlockPayload>::Transaction { + ) -> <::BlockPayload as BlockPayload>::Transaction { TYPES::StateType::create_random_transaction(None, rng, padding) } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 2fc3383a14..ab851745d4 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -1,7 +1,7 @@ //! Abstraction over the contents of a block //! -//! This module provides the [`BlockPayload`] and [`BlockHeader`] traits, which describe the -//! behaviors that a block is expected to have. +//! This module provides the [`Transaction`], [`BlockPayload`], and [`BlockHeader`] traits, which +//! describe the behaviors that a block is expected to have. use commit::{Commitment, Committable}; use serde::{de::DeserializeOwned, Serialize}; @@ -13,6 +13,14 @@ use std::{ hash::Hash, }; +// TODO (Keyao) Determine whether we can refactor BlockPayload and Transaction from traits to structs. +// +/// Abstraction over any type of transaction. Used by [`BlockPayload`]. +pub trait Transaction: + Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash +{ +} + // TODO (Keyao) Determine whether we can refactor BlockPayload and Transaction from traits to structs. // /// Abstraction over the full contents of a block @@ -47,12 +55,23 @@ pub trait BlockPayload: fn contained_transactions(&self) -> HashSet>; } -// TODO (Keyao) Determine whether we can refactor BlockPayload and Transaction from traits to structs. -// -/// Abstraction over any type of transaction. Used by [`BlockPayload`]. -pub trait Transaction: - Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash +pub trait BlockHeader: + Serialize + + Clone + + Debug + + Display + + Hash + + PartialEq + + Eq + + Send + + Sync + + Committable + + DeserializeOwned { + type Payload: BlockPayload; + + /// Get the payload commitment. + fn commitment(&self) -> Commitment; } /// Dummy implementation of `BlockPayload` for unit tests diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 6b3ef7e790..7d9c0d0c2d 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -534,7 +534,7 @@ pub trait CommitteeExchangeType: ConsensusExchange { /// Sign a DA proposal. - fn sign_da_proposal(&self, block_commitment: &Commitment) + fn sign_da_proposal(&self, block_commitment: &Commitment) -> EncodedSignature; /// Sign a vote on DA proposal. @@ -543,13 +543,13 @@ pub trait CommitteeExchangeType: /// of information necessary for checking that this node voted on that block. fn sign_da_vote( &self, - block_commitment: Commitment, + block_commitment: Commitment, ) -> (EncodedPublicKey, EncodedSignature); /// Create a message with a vote on DA proposal. fn create_da_message( &self, - block_commitment: Commitment, + block_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, ) -> DAVote; @@ -559,7 +559,7 @@ pub trait CommitteeExchangeType: /// Create a message with a vote on VID disperse data. fn create_vid_message( &self, - block_commitment: Commitment, + block_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, ) -> DAVote; @@ -567,7 +567,7 @@ pub trait CommitteeExchangeType: /// Sign a vote on VID proposal. fn sign_vid_vote( &self, - block_commitment: Commitment, + block_commitment: Commitment, ) -> (EncodedPublicKey, EncodedSignature); } @@ -605,7 +605,7 @@ impl< /// Sign a DA proposal. fn sign_da_proposal( &self, - block_commitment: &Commitment, + block_commitment: &Commitment, ) -> EncodedSignature { let signature = TYPES::SignatureKey::sign(&self.private_key, block_commitment.as_ref()); signature @@ -616,7 +616,7 @@ impl< /// of information necessary for checking that this node voted on that block. fn sign_da_vote( &self, - block_commitment: Commitment, + block_commitment: Commitment, ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, @@ -627,7 +627,7 @@ impl< /// Create a message with a vote on DA proposal. fn create_da_message( &self, - block_commitment: Commitment, + block_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, ) -> DAVote { @@ -643,7 +643,7 @@ impl< fn create_vid_message( &self, - block_commitment: Commitment, + block_commitment: Commitment, current_view: ::Time, vote_token: ::VoteTokenType, ) -> DAVote { @@ -659,7 +659,7 @@ impl< fn sign_vid_vote( &self, - block_commitment: Commitment<::BlockType>, + block_commitment: Commitment<::BlockPayload>, ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, @@ -681,7 +681,7 @@ impl< type Certificate = DACertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = Commitment; + type Commitment = Commitment; fn create( entries: Vec<::StakeTableEntry>, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 49d9184bc4..a48f3152d3 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -359,7 +359,7 @@ pub trait TestableNodeImplementation: NodeImplementation state: Option<&TYPES::StateType>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> ::Transaction; /// Creates random transaction if possible /// otherwise panics @@ -368,13 +368,13 @@ pub trait TestableNodeImplementation: NodeImplementation leaf: &Self::Leaf, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> ::Transaction; /// generate a genesis block - fn block_genesis() -> TYPES::BlockType; + fn block_genesis() -> TYPES::BlockPayload; /// the number of transactions in a block - fn txn_count(block: &TYPES::BlockType) -> u64; + fn txn_count(block: &TYPES::BlockPayload) -> u64; /// Create ephemeral storage /// Will be deleted/lost immediately after storage is dropped @@ -413,7 +413,7 @@ where QuorumNetwork, >, TYPES::StateType: TestableState, - TYPES::BlockType: TestableBlock, + TYPES::BlockPayload: TestableBlock, I::Storage: TestableStorage, I::Leaf: TestableLeaf, { @@ -428,7 +428,7 @@ where state: Option<&TYPES::StateType>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> ::Transaction { ::create_random_transaction(state, rng, padding) } @@ -436,16 +436,16 @@ where leaf: &Self::Leaf, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> ::Transaction { ::create_random_transaction(leaf, rng, padding) } - fn block_genesis() -> TYPES::BlockType { - ::genesis() + fn block_genesis() -> TYPES::BlockPayload { + ::genesis() } - fn txn_count(block: &TYPES::BlockType) -> u64 { - ::txn_count(block) + fn txn_count(block: &TYPES::BlockPayload) -> u64 { + ::txn_count(block) } fn construct_tmp_storage() -> Result { @@ -548,8 +548,8 @@ pub trait NodeType: type Time: ConsensusTime; /// The block type that this hotshot setup is using. /// - /// This should be the same block that `StateType::BlockType` is using. - type BlockType: BlockPayload; + /// This should be the same block that `StateType::BlockPayload` is using. + type BlockPayload: BlockPayload; /// The signature key that this hotshot setup is using. type SignatureKey: SignatureKey; /// The vote token that this hotshot setup is using. @@ -562,5 +562,5 @@ pub trait NodeType: type ElectionConfigType: ElectionConfig; /// The state type that this hotshot setup is using. - type StateType: State; + type StateType: State; } diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index bd8fe7d2ed..c143efdbc8 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -19,7 +19,7 @@ use std::{ /// /// This trait represents the behaviors that the 'global' ledger state must have: /// * A defined error type ([`Error`](State::Error)) -/// * The type of block that modifies this type of state ([`BlockPayload`](State::BlockType)) +/// * The type of block that modifies this type of state ([`BlockPayload`](State::BlockPayload)) /// * The ability to validate that a block is actually a valid extension of this state /// ([`validate_block`](State::validate_block)) /// * The ability to produce a new state, with the modifications from the block applied @@ -40,12 +40,12 @@ pub trait State: /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; /// The type of block this state is associated with - type BlockType: BlockPayload; + type BlockPayload: BlockPayload; /// Time compatibility needed for reward collection type Time: ConsensusTime; /// Returns true if and only if the provided block is valid and can extend this state - fn validate_block(&self, block: &Self::BlockType, view_number: &Self::Time) -> bool; + fn validate_block(&self, block: &Self::BlockPayload, view_number: &Self::Time) -> bool; /// Appends the given block to this state, returning an new state /// @@ -54,7 +54,7 @@ pub trait State: /// Should produce and error if appending this block would lead to an invalid state fn append( &self, - block: &Self::BlockType, + block: &Self::BlockPayload, view_number: &Self::Time, ) -> Result; @@ -95,7 +95,7 @@ pub trait ConsensusTime: /// extra functions required on state to be usable by hotshot-testing pub trait TestableState: State where - ::BlockType: TestableBlock, + ::BlockPayload: TestableBlock, { /// Creates random transaction if possible /// otherwise panics @@ -104,7 +104,7 @@ where state: Option<&Self>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> ::Transaction; } /// extra functions required on block to be usable by hotshot-testing @@ -157,16 +157,16 @@ pub mod dummy { impl State for DummyState { type Error = DummyError; - type BlockType = DummyBlock; + type BlockPayload = DummyBlock; type Time = ViewNumber; - fn validate_block(&self, _block: &Self::BlockType, _view_number: &Self::Time) -> bool { + fn validate_block(&self, _block: &Self::BlockPayload, _view_number: &Self::Time) -> bool { false } fn append( &self, - _block: &Self::BlockType, + _block: &Self::BlockPayload, _view_number: &Self::Time, ) -> Result { Ok(Self { diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 122698486c..996ea53e02 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -161,7 +161,7 @@ where state: LEAF::MaybeState, height: u64, parent_commitment: Commitment, - rejected: Vec<::Transaction>, + rejected: Vec<::Transaction>, proposer_id: EncodedPublicKey, ) -> Self { Self { diff --git a/types/src/vote.rs b/types/src/vote.rs index 9e232b755a..daf244d7aa 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -50,13 +50,13 @@ pub struct DAVote { /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), /// The block commitment being voted on. - pub block_commitment: Commitment, + pub block_commitment: Commitment, /// The view this vote was cast for pub current_view: TYPES::Time, /// The vote token generated by this replica pub vote_token: TYPES::VoteTokenType, /// The vote data this vote is signed over - pub vote_data: VoteData>, + pub vote_data: VoteData>, } /// A positive or negative vote on validating or commitment proposal. @@ -196,7 +196,7 @@ pub enum QuorumVote { Timeout(TimeoutVote), } -impl VoteType> for DAVote { +impl VoteType> for DAVote { fn get_view(&self) -> TYPES::Time { self.current_view } @@ -206,7 +206,7 @@ impl VoteType> for DAVote EncodedSignature { self.signature.1.clone() } - fn get_data(&self) -> VoteData> { + fn get_data(&self) -> VoteData> { self.vote_data.clone() } fn get_vote_token(&self) -> ::VoteTokenType { From fd0a036ffd7e9b81f7ed343e2289b9682fb337b4 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Sat, 30 Sep 2023 16:35:29 -0700 Subject: [PATCH 0175/1393] Fix index update and parameter --- web_server/src/lib.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index ef7850cbb0..861c6a5a67 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -309,7 +309,12 @@ impl WebServerDataSource for WebServerState { .or_insert_with(|| vec![(*highest_index, vote)]); self.view_sync_vote_index .entry(view_number) - .and_modify(|index| *index += 1); + .and_modify(|index| { + // Update the index if it's not just added. + if *index > 0 { + *index += 1 + } + }); Ok(()) } /// Stores a received proposal in the `WebServerState` @@ -337,7 +342,8 @@ impl WebServerDataSource for WebServerState { ) -> Result<(), Error> { // Only keep proposal history for MAX_VIEWS number of view if self.view_sync_proposals.len() >= MAX_VIEWS { - self.view_sync_proposals.remove(&self.oldest_view_sync_vote); + self.view_sync_proposals + .remove(&self.oldest_view_sync_proposal); while !self .view_sync_proposals .contains_key(&self.oldest_view_sync_proposal) @@ -355,7 +361,12 @@ impl WebServerDataSource for WebServerState { .or_insert_with(|| vec![(*highest_index, proposal)]); self.view_sync_proposal_index .entry(view_number) - .and_modify(|index| *index += 1); + .and_modify(|index| { + // Update the index if it's not just added. + if *index > 0 { + *index += 1 + } + }); Ok(()) } From a57954140cb6a2a8dfc28594b12df84f031a35a7 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Sun, 1 Oct 2023 10:43:48 +0900 Subject: [PATCH 0176/1393] update cargo lock --- task-impls/src/consensus.rs | 1 + task-impls/src/da.rs | 2 ++ 2 files changed, 3 insertions(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 63f3f45f1d..ad6a1c8c99 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1093,6 +1093,7 @@ where } SequencingHotShotEvent::SendDABlockData(block) => { // ED TODO Should make sure this is actually the most recent block + // ED Should make this a map to view self.block = block; } _ => {} diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index c7d20786ab..37fcdde00b 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -500,6 +500,8 @@ where drop(consensus); + // ED This is taking a really long time to return, since is based on application + // let mut block = ::StateType::next_block(None); let txns = self.wait_for_transactions(parent_leaf).await?; From eb62427359e5169c73a62f7e7143f9c18dd78b83 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Sun, 1 Oct 2023 11:03:38 +0900 Subject: [PATCH 0177/1393] Update self.block to be option --- hotshot/src/tasks/mod.rs | 2 +- task-impls/src/consensus.rs | 98 +++++++++++++++++++------------------ 2 files changed, 52 insertions(+), 48 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 55e430cbe4..5415bad8ea 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -352,7 +352,7 @@ where consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), - block: TYPES::BlockType::new(), + block: Some(TYPES::BlockType::new()), quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), api: c_api.clone(), committee_exchange: c_api.inner.exchanges.committee_exchange().clone().into(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ad6a1c8c99..30a48fda4d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -86,7 +86,7 @@ pub struct SequencingConsensusTaskState< pub cur_view: TYPES::Time, /// Current block submitted to DA - pub block: TYPES::BlockType, + pub block: Option, /// the quorum exchange pub quorum_exchange: Arc>, @@ -1094,7 +1094,7 @@ where SequencingHotShotEvent::SendDABlockData(block) => { // ED TODO Should make sure this is actually the most recent block // ED Should make this a map to view - self.block = block; + self.block = Some(block); } _ => {} } @@ -1102,7 +1102,7 @@ where /// Sends a proposal if possible from the high qc we have pub async fn publish_proposal_if_able( - &self, + &mut self, _qc: QuorumCertificate, view: TYPES::Time, ) -> bool { @@ -1167,52 +1167,56 @@ where // TODO do some sort of sanity check on the view number that it matches decided } - let block_commitment = self.block.commit(); - if block_commitment == TYPES::BlockType::new().commit() { - debug!("Block is generic block! {:?}", self.cur_view); - } - - let leaf = SequencingLeaf { - view_number: view, - height: parent_leaf.height + 1, - justify_qc: consensus.high_qc.clone(), - parent_commitment: parent_leaf.commit(), - // Use the block commitment rather than the block, so that the replica can construct - // the same leaf with the commitment. - deltas: Right(block_commitment), - rejected: vec![], - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: self.api.public_key().to_bytes(), - }; + if let Some(block) = &self.block { + let block_commitment = block.commit(); + if block_commitment == TYPES::BlockType::new().commit() { + debug!("Block is generic block! {:?}", self.cur_view); + } - let signature = self - .quorum_exchange - .sign_validating_or_commitment_proposal::(&leaf.commit()); - // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. - let proposal = QuorumProposal { - block_commitment, - view_number: leaf.view_number, - height: leaf.height, - justify_qc: consensus.high_qc.clone(), - // TODO ED Update this to be the actual TC if there is one - timeout_certificate: None, - proposer_id: leaf.proposer_id, - dac: None, - }; + let leaf = SequencingLeaf { + view_number: view, + height: parent_leaf.height + 1, + justify_qc: consensus.high_qc.clone(), + parent_commitment: parent_leaf.commit(), + // Use the block commitment rather than the block, so that the replica can construct + // the same leaf with the commitment. + deltas: Right(block_commitment), + rejected: vec![], + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: self.api.public_key().to_bytes(), + }; + + let signature = self + .quorum_exchange + .sign_validating_or_commitment_proposal::(&leaf.commit()); + // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. + let proposal = QuorumProposal { + block_commitment, + view_number: leaf.view_number, + height: leaf.height, + justify_qc: consensus.high_qc.clone(), + // TODO ED Update this to be the actual TC if there is one + timeout_certificate: None, + proposer_id: leaf.proposer_id, + dac: None, + }; + + let message = Proposal { + data: proposal, + signature, + }; + debug!("Sending proposal for view {:?} \n {:?}", self.cur_view, ""); - let message = Proposal { - data: proposal, - signature, - }; - debug!("Sending proposal for view {:?} \n {:?}", self.cur_view, ""); - - self.event_stream - .publish(SequencingHotShotEvent::QuorumProposalSend( - message, - self.quorum_exchange.public_key().clone(), - )) - .await; - true + self.event_stream + .publish(SequencingHotShotEvent::QuorumProposalSend( + message, + self.quorum_exchange.public_key().clone(), + )) + .await; + self.block = None; + return true + } + false } } From b30ee3013b8cc39d9849f455da6e493428a4b6bb Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Sun, 1 Oct 2023 11:12:05 +0900 Subject: [PATCH 0178/1393] Cargo fmt --- task-impls/src/consensus.rs | 4 ++-- task-impls/src/da.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 30a48fda4d..3c3ba5abd7 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1213,8 +1213,8 @@ where self.quorum_exchange.public_key().clone(), )) .await; - self.block = None; - return true + self.block = None; + return true; } false } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 37fcdde00b..4037e07a23 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -501,7 +501,7 @@ where drop(consensus); // ED This is taking a really long time to return, since is based on application - // + // let mut block = ::StateType::next_block(None); let txns = self.wait_for_transactions(parent_leaf).await?; From 6175b417340f2a6c69ea3b9de6c70e2d40fa5d83 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Sun, 1 Oct 2023 11:54:12 +0900 Subject: [PATCH 0179/1393] Update view sync logging --- task-impls/src/view_sync.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 18c8ae581f..eafeb7a468 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -277,7 +277,7 @@ where (certificate_internal, ViewSyncPhase::Finalize) } }; - debug!( + error!( "Received view sync cert for phase {:?}", last_seen_certificate ); @@ -483,7 +483,10 @@ where } self.num_timeouts_tracked += 1; - error!("Num timeouts tracked is {}", self.num_timeouts_tracked); + error!( + "Num timeouts tracked is {}. View {} timed out", + self.num_timeouts_tracked, *view_number + ); if self.num_timeouts_tracked > 2 { error!("Too many timeouts! This shouldn't happen"); @@ -491,6 +494,10 @@ where // TODO ED Make this a configurable variable if self.num_timeouts_tracked == 2 { + error!( + "Starting view sync protocol; attempting to sync on view {}", + *view_number + 1 + ); // Start polling for view sync certificates self.exchange .network() @@ -637,7 +644,7 @@ where // Ignore certificate if it is for an older round if certificate_internal.round < self.next_view { - debug!("We're already in a higher round"); + error!("We're already in a higher round"); return (None, self); } @@ -654,9 +661,9 @@ where // If certificate is not valid, return current state if !self .exchange - .is_valid_view_sync_cert(message.data, certificate_internal.round) + .is_valid_view_sync_cert(message.data.clone(), certificate_internal.round) { - error!("Not valid view sync cert!"); + error!("Not valid view sync cert! {:?}", message.data); return (None, self); } From 5a3cb6e5267990eef0a8cce0c94ec6c4118305d1 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Sat, 30 Sep 2023 22:57:15 -0700 Subject: [PATCH 0180/1393] synced most recent block height --- task-impls/src/consensus.rs | 9 +++++++-- task-impls/src/da.rs | 6 ------ types/src/consensus.rs | 10 ++++++---- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index dccf7919c3..24bccf2059 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -584,7 +584,7 @@ where // TODO ED Insert TC logic here // Construct the leaf. - let justify_qc = proposal.data.justify_qc; + let justify_qc = proposal.clone().data.justify_qc; let parent = if justify_qc.is_genesis() { self.genesis_leaf().await } else { @@ -969,7 +969,6 @@ where let mut consensus = self.consensus.write().await; consensus.high_qc = qc.clone(); - drop(consensus); // View may have already been updated by replica if they voted for this QC @@ -1168,6 +1167,12 @@ where proposer_id: self.api.public_key().to_bytes(), }; + let consensus = self.consensus.read().await; + consensus + .metrics + .last_synced_block_height + .set(usize::try_from(leaf.height).unwrap_or(0)); + let signature = self .quorum_exchange .sign_validating_or_commitment_proposal::(&leaf.commit()); diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index ff048ef469..c7df2854ef 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -646,12 +646,6 @@ where self.event_stream .publish(SequencingHotShotEvent::SendDABlockData(block.clone())) .await; - // if let Err(e) = self.api.send_da_broadcast(message.clone()).await { - // consensus.metrics.failed_to_send_messages.add(1); - // warn!(?message, ?e, "Could not broadcast leader proposal"); - // } else { - // consensus.metrics.outgoing_broadcast_messages.add(1); - // } self.event_stream .publish(SequencingHotShotEvent::DAProposalSend( message.clone(), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 6ba49fd211..4b6b7bb0d6 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -78,6 +78,8 @@ pub struct ConsensusMetricsValue { #[allow(dead_code)] /// The values that are being tracked pub values: Arc>, + /// The number of last synced synced block height + pub last_synced_block_height: Box, /// The current view pub current_view: Box, /// The duration to collect votes in a view (only applies when this insance is the leader) @@ -114,8 +116,8 @@ pub struct ConsensusMetricsValue { pub direct_messages_received: Box, /// Total broadcast messages received pub broadcast_messages_received: Box, - /// Total number of messages which couldn't be sent - pub failed_to_send_messages: Box, + // Total number of messages which couldn't be sent + // pub failed_to_send_messages: Box, } /// The wrapper with a string name for the networking metrics @@ -243,6 +245,8 @@ impl ConsensusMetricsValue { }); Self { values, + last_synced_block_height: metrics + .create_gauge(String::from("last_synced_block_height"), None), current_view: metrics.create_gauge(String::from("current_view"), None), vote_validate_duration: metrics.create_histogram( String::from("vote_validate_duration"), @@ -281,8 +285,6 @@ impl ConsensusMetricsValue { .create_counter(String::from("direct_messages_received"), None), broadcast_messages_received: metrics .create_counter(String::from("broadcast_messages_received"), None), - failed_to_send_messages: metrics - .create_counter(String::from("failed_to_send_messages"), None), number_of_timeouts: metrics .create_counter(String::from("number_of_views_timed_out"), None), } From 223e072c184809fba5560fe1a4d2968119635986 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 1 Oct 2023 17:46:19 -0400 Subject: [PATCH 0181/1393] feat: memory network reliability --- hotshot/src/traits.rs | 2 +- .../src/traits/networking/memory_network.rs | 149 ++++++------ testing/src/network_reliability.rs | 163 ------------- types/src/traits/network.rs | 229 +++++++++++++++++- 4 files changed, 299 insertions(+), 244 deletions(-) delete mode 100644 testing/src/network_reliability.rs diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index a0e56cf86d..370dc47839 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -14,7 +14,7 @@ pub mod implementations { pub use super::{ networking::{ libp2p_network::{Libp2pCommChannel, Libp2pNetwork, PeerInfoVec}, - memory_network::{DummyReliability, MasterMap, MemoryCommChannel, MemoryNetwork}, + memory_network::{MasterMap, MemoryCommChannel, MemoryNetwork}, web_server_libp2p_fallback::{CombinedNetworks, WebServerWithFallbackCommChannel}, web_server_network::{WebCommChannel, WebServerNetwork}, }, diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 976186c030..9d9e751ee5 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -6,7 +6,7 @@ use super::{FailedToSerializeSnafu, NetworkError, NetworkReliability, NetworkingMetrics}; use crate::NodeImplementation; use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, + art::async_spawn, channel::{bounded, Receiver, SendError, Sender}, }; use async_lock::{Mutex, RwLock}; @@ -42,18 +42,6 @@ use std::{ }; use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; -#[derive(Debug, Clone, Copy)] -/// dummy implementation of network reliability -pub struct DummyReliability {} -impl NetworkReliability for DummyReliability { - fn sample_keep(&self) -> bool { - true - } - fn sample_delay(&self) -> std::time::Duration { - std::time::Duration::ZERO - } -} - /// Shared state for in-memory mock networking. /// /// This type is responsible for keeping track of the channels to each [`MemoryNetwork`], and is @@ -104,6 +92,9 @@ struct MemoryNetworkInner { /// The networking metrics we're keeping track of metrics: NetworkingMetrics, + + /// config to introduce unreliability to the network + reliability_config: Option>>, } /// In memory only network simulator. @@ -134,7 +125,7 @@ impl MemoryNetwork { pub_key: K, metrics: Box, master_map: Arc>, - reliability_config: Option>, + reliability_config: Option>>, ) -> MemoryNetwork { info!("Attaching new MemoryNetwork"); let (broadcast_input, broadcast_task_recv) = bounded(128); @@ -165,30 +156,11 @@ impl MemoryNetwork { match x { Ok(x) => { let dts = direct_task_send.clone(); - if let Some(r) = reliability_config.clone() { - async_spawn(async move { - if r.sample_keep() { - let delay = r.sample_delay(); - if delay > std::time::Duration::ZERO { - async_sleep(delay).await; - } - let res = dts.send(x).await; - if res.is_ok() { - trace!("Passed message to output queue"); - } else { - error!("Output queue receivers are shutdown"); - } - } else { - warn!("dropping packet!"); - } - }); + let res = dts.send(x).await; + if res.is_ok() { + trace!("Passed message to output queue"); } else { - let res = dts.send(x).await; - if res.is_ok() { - trace!("Passed message to output queue"); - } else { - error!("Output queue receivers are shutdown"); - } + error!("Output queue receivers are shutdown"); } } Err(e) => { @@ -203,28 +175,11 @@ impl MemoryNetwork { match x { Ok(x) => { let bts = broadcast_task_send.clone(); - if let Some(r) = reliability_config.clone() { - async_spawn(async move { - if r.sample_keep() { - let delay = r.sample_delay(); - if delay > std::time::Duration::ZERO { - async_sleep(delay).await; - } - let res = bts.send(x).await; - if res.is_ok() { - trace!("Passed message to output queue"); - } else { - warn!("dropping packet!"); - } - } - }); + let res = bts.send(x).await; + if res.is_ok() { + trace!("Passed message to output queue"); } else { - let res = bts.send(x).await; - if res.is_ok() { - trace!("Passed message to output queue"); - } else { - warn!("dropping packet!"); - } + warn!("dropping packet!"); } } Err(e) => { @@ -249,6 +204,7 @@ impl MemoryNetwork { master_map: master_map.clone(), in_flight_message_count, metrics: NetworkingMetrics::new(&*metrics), + reliability_config, }), }; master_map.map.insert(pub_key, mn.clone()); @@ -347,20 +303,40 @@ impl ConnectedNetwork for Memory .context(FailedToSerializeSnafu)?; trace!("Message bincoded, sending"); for node in &self.inner.master_map.map { + // TODO delay/drop etc here let (key, node) = node.pair(); if !recipients.contains(key) { continue; } trace!(?key, "Sending message to node"); - let res = node.broadcast_input(vec.clone()).await; - match res { - Ok(_) => { - self.inner.metrics.outgoing_message_count.add(1); - trace!(?key, "Delivered message to remote"); + if let Some(r) = &self.inner.reliability_config { + let config = r.read().await; + { + let node2 = node.clone(); + let fut = config.chaos_send_msg( + vec.clone(), + Arc::new(move |msg: Vec| { + let node3 = (node2).clone(); + boxed_sync(async move { + let _res = node3.broadcast_input(msg).await; + // NOTE we're dropping metrics here but this is only for testing + // purposes. I think that should be okay + }) + }), + ); + async_spawn(fut); } - Err(e) => { - self.inner.metrics.message_failed_to_send.add(1); - warn!(?e, ?key, "Error sending broadcast message to node"); + } else { + let res = node.broadcast_input(vec.clone()).await; + match res { + Ok(_) => { + self.inner.metrics.outgoing_message_count.add(1); + trace!(?key, "Delivered message to remote"); + } + Err(e) => { + self.inner.metrics.message_failed_to_send.add(1); + warn!(?e, ?key, "Error sending broadcast message to node"); + } } } } @@ -376,18 +352,37 @@ impl ConnectedNetwork for Memory .context(FailedToSerializeSnafu)?; trace!("Message bincoded, finding recipient"); if let Some(node) = self.inner.master_map.map.get(&recipient) { - let node = node.value(); - let res = node.direct_input(vec).await; - match res { - Ok(_) => { - self.inner.metrics.outgoing_message_count.add(1); - trace!(?recipient, "Delivered message to remote"); - Ok(()) + let node = node.value().clone(); + if let Some(r) = &self.inner.reliability_config { + let config = r.read().await; + { + let fut = config.chaos_send_msg( + vec.clone(), + Arc::new(move |msg: Vec| { + let node2 = node.clone(); + boxed_sync(async move { + let _res = node2.broadcast_input(msg).await; + // NOTE we're dropping metrics here but this is only for testing + // purposes. I think that should be okay + }) + }), + ); + async_spawn(fut); } - Err(e) => { - self.inner.metrics.message_failed_to_send.add(1); - warn!(?e, ?recipient, "Error delivering direct message"); - Err(NetworkError::CouldNotDeliver) + Ok(()) + } else { + let res = node.direct_input(vec).await; + match res { + Ok(_) => { + self.inner.metrics.outgoing_message_count.add(1); + trace!(?recipient, "Delivered message to remote"); + Ok(()) + } + Err(e) => { + self.inner.metrics.message_failed_to_send.add(1); + warn!(?e, ?recipient, "Error delivering direct message"); + Err(NetworkError::CouldNotDeliver) + } } } } else { diff --git a/testing/src/network_reliability.rs b/testing/src/network_reliability.rs deleted file mode 100644 index 97b22b533b..0000000000 --- a/testing/src/network_reliability.rs +++ /dev/null @@ -1,163 +0,0 @@ -use std::time::Duration; - -use hotshot::traits::NetworkReliability; -use rand::{ - distributions::{Bernoulli, Uniform}, - prelude::Distribution, -}; - -/// A synchronous network. Packets may be delayed, but are guaranteed -/// to arrive within `timeout` ns -#[derive(Clone, Copy, Debug, Default)] -pub struct SynchronousNetwork { - /// Max delay of packet before arrival - timeout_ms: u64, - /// Lowest value in milliseconds that a packet may be delayed - delay_low_ms: u64, -} - -impl NetworkReliability for SynchronousNetwork { - /// never drop a packet - fn sample_keep(&self) -> bool { - true - } - fn sample_delay(&self) -> Duration { - Duration::from_millis( - Uniform::new_inclusive(self.delay_low_ms, self.timeout_ms) - .sample(&mut rand::thread_rng()), - ) - } -} - -/// An asynchronous network. Packets may be dropped entirely -/// or delayed for arbitrarily long periods -/// probability that packet is kept = `keep_numerator` / `keep_denominator` -/// packet delay is obtained by sampling from a uniform distribution -/// between `delay_low_ms` and `delay_high_ms`, inclusive -#[derive(Debug, Clone, Copy)] -pub struct AsynchronousNetwork { - /// numerator for probability of keeping packets - keep_numerator: u32, - /// denominator for probability of keeping packets - keep_denominator: u32, - /// lowest value in milliseconds that a packet may be delayed - delay_low_ms: u64, - /// highest value in milliseconds that a packet may be delayed - delay_high_ms: u64, -} - -impl NetworkReliability for AsynchronousNetwork { - fn sample_keep(&self) -> bool { - Bernoulli::from_ratio(self.keep_numerator, self.keep_denominator) - .unwrap() - .sample(&mut rand::thread_rng()) - } - fn sample_delay(&self) -> Duration { - Duration::from_millis( - Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) - .sample(&mut rand::thread_rng()), - ) - } -} - -/// An partially synchronous network. Behaves asynchronously -/// until some arbitrary time bound, GST, -/// then synchronously after GST -#[derive(Debug, Clone, Copy)] -pub struct PartiallySynchronousNetwork { - /// asynchronous portion of network - asynchronous: AsynchronousNetwork, - /// synchronous portion of network - synchronous: SynchronousNetwork, - /// time when GST occurs - gst: std::time::Duration, - /// when the network was started - start: std::time::Instant, -} - -impl NetworkReliability for PartiallySynchronousNetwork { - /// never drop a packet - fn sample_keep(&self) -> bool { - true - } - fn sample_delay(&self) -> Duration { - // act asyncronous before gst - if self.start.elapsed() < self.gst { - if self.asynchronous.sample_keep() { - self.asynchronous.sample_delay() - } else { - // assume packet was "dropped" and will arrive after gst - self.synchronous.sample_delay() + self.gst - } - } else { - // act syncronous after gst - self.synchronous.sample_delay() - } - } -} - -impl Default for AsynchronousNetwork { - // disable all chance of failure - fn default() -> Self { - AsynchronousNetwork { - keep_numerator: 1, - keep_denominator: 1, - delay_low_ms: 0, - delay_high_ms: 0, - } - } -} - -impl Default for PartiallySynchronousNetwork { - fn default() -> Self { - PartiallySynchronousNetwork { - synchronous: SynchronousNetwork::default(), - asynchronous: AsynchronousNetwork::default(), - gst: std::time::Duration::new(0, 0), - start: std::time::Instant::now(), - } - } -} - -impl SynchronousNetwork { - /// create new `SynchronousNetwork` - pub fn new(timeout: u64, delay_low_ms: u64) -> Self { - SynchronousNetwork { - timeout_ms: timeout, - delay_low_ms, - } - } -} - -impl AsynchronousNetwork { - /// create new `AsynchronousNetwork` - pub fn new( - keep_numerator: u32, - keep_denominator: u32, - delay_low_ms: u64, - delay_high_ms: u64, - ) -> Self { - AsynchronousNetwork { - keep_numerator, - keep_denominator, - delay_low_ms, - delay_high_ms, - } - } -} - -impl PartiallySynchronousNetwork { - /// create new `PartiallySynchronousNetwork` - pub fn new( - asynchronous: AsynchronousNetwork, - synchronous: SynchronousNetwork, - gst: std::time::Duration, - ) -> Self { - PartiallySynchronousNetwork { - asynchronous, - synchronous, - gst, - start: std::time::Instant::now(), - } - } -} diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index ef02b05245..1729636836 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -2,9 +2,10 @@ //! //! Contains types and traits used by `HotShot` to abstract over network access +use async_compatibility_layer::art::async_sleep; #[cfg(async_executor_impl = "async-std")] use async_std::future::TimeoutError; -use hotshot_task::BoxSyncFuture; +use hotshot_task::{boxed_sync, BoxSyncFuture}; use libp2p_networking::network::NetworkNodeHandleError; #[cfg(async_executor_impl = "tokio")] use tokio::time::error::Elapsed as TimeoutError; @@ -14,6 +15,10 @@ use super::{election::Membership, node_implementation::NodeType, signature_key:: use crate::{data::ViewNumber, message::MessagePurpose}; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; +use rand::{ + distributions::{Bernoulli, Uniform}, + prelude::Distribution, +}; use serde::{Deserialize, Serialize}; use snafu::Snafu; use std::{collections::BTreeSet, fmt::Debug, sync::Arc, time::Duration}; @@ -366,6 +371,7 @@ pub enum NetworkChange { } /// interface describing how reliable the network is +#[async_trait] pub trait NetworkReliability: Debug + Sync + std::marker::Send { /// Sample from bernoulli distribution to decide whether /// or not to keep a packet @@ -373,8 +379,225 @@ pub trait NetworkReliability: Debug + Sync + std::marker::Send { /// /// Panics if `self.keep_numerator > self.keep_denominator` /// - fn sample_keep(&self) -> bool; + fn sample_keep(&self) -> bool { + true + } + /// sample from uniform distribution to decide whether /// or not to keep a packet - fn sample_delay(&self) -> Duration; + fn sample_delay(&self) -> Duration { + std::time::Duration::ZERO + } + + /// scramble the packet + fn scramble(&self, msg: Vec) -> Vec { + msg + } + + /// number of times to repeat the packet + fn sample_repeat(&self) -> usize { + 1 + } + + /// given a message and a way to send the message, + /// decide whether or not to send the message + /// how long to delay the message + /// whether or not to send duplicates + /// and whether or not to include noise with the message + /// then send the message + fn chaos_send_msg( + &self, + msg: Vec, + send_fn: Arc) -> BoxSyncFuture<'static, ()>>, + ) -> BoxSyncFuture<'static, ()> { + let sample_keep = self.sample_keep(); + let delay = self.sample_delay(); + let repeats = self.sample_repeat(); + let mut msgs = Vec::new(); + for _idx in 0..repeats { + let scrambled = self.scramble(msg.clone()); + msgs.push(scrambled); + } + let closure = async move { + if sample_keep { + async_sleep(delay).await; + for msg in msgs { + send_fn(msg).await; + } + } + }; + boxed_sync(closure) + } +} + +/// ideal network +#[derive(Clone, Copy, Debug, Default)] +pub struct PerfectNetwork {} + +impl NetworkReliability for PerfectNetwork {} + +/// A synchronous network. Packets may be delayed, but are guaranteed +/// to arrive within `timeout` ns +#[derive(Clone, Copy, Debug, Default)] +pub struct SynchronousNetwork { + /// Max delay of packet before arrival + timeout_ms: u64, + /// Lowest value in milliseconds that a packet may be delayed + delay_low_ms: u64, +} + +impl NetworkReliability for SynchronousNetwork { + /// never drop a packet + fn sample_keep(&self) -> bool { + true + } + fn sample_delay(&self) -> Duration { + Duration::from_millis( + Uniform::new_inclusive(self.delay_low_ms, self.timeout_ms) + .sample(&mut rand::thread_rng()), + ) + } +} + +/// An asynchronous network. Packets may be dropped entirely +/// or delayed for arbitrarily long periods +/// probability that packet is kept = `keep_numerator` / `keep_denominator` +/// packet delay is obtained by sampling from a uniform distribution +/// between `delay_low_ms` and `delay_high_ms`, inclusive +#[derive(Debug, Clone, Copy)] +pub struct AsynchronousNetwork { + /// numerator for probability of keeping packets + keep_numerator: u32, + /// denominator for probability of keeping packets + keep_denominator: u32, + /// lowest value in milliseconds that a packet may be delayed + delay_low_ms: u64, + /// highest value in milliseconds that a packet may be delayed + delay_high_ms: u64, +} + +impl NetworkReliability for AsynchronousNetwork { + fn sample_keep(&self) -> bool { + Bernoulli::from_ratio(self.keep_numerator, self.keep_denominator) + .unwrap() + .sample(&mut rand::thread_rng()) + } + fn sample_delay(&self) -> Duration { + Duration::from_millis( + Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) + .sample(&mut rand::thread_rng()), + ) + } +} + +/// An partially synchronous network. Behaves asynchronously +/// until some arbitrary time bound, GST, +/// then synchronously after GST +#[allow(clippy::similar_names)] +#[derive(Debug, Clone, Copy)] +pub struct PartiallySynchronousNetwork { + /// asynchronous portion of network + asynchronous: AsynchronousNetwork, + /// synchronous portion of network + synchronous: SynchronousNetwork, + /// time when GST occurs + gst: std::time::Duration, + /// when the network was started + start: std::time::Instant, +} + +impl NetworkReliability for PartiallySynchronousNetwork { + /// never drop a packet + fn sample_keep(&self) -> bool { + true + } + fn sample_delay(&self) -> Duration { + // act asyncronous before gst + if self.start.elapsed() < self.gst { + if self.asynchronous.sample_keep() { + self.asynchronous.sample_delay() + } else { + // assume packet was "dropped" and will arrive after gst + self.synchronous.sample_delay() + self.gst + } + } else { + // act syncronous after gst + self.synchronous.sample_delay() + } + } +} + +impl Default for AsynchronousNetwork { + // disable all chance of failure + fn default() -> Self { + AsynchronousNetwork { + keep_numerator: 1, + keep_denominator: 1, + delay_low_ms: 0, + delay_high_ms: 0, + } + } +} + +impl Default for PartiallySynchronousNetwork { + fn default() -> Self { + PartiallySynchronousNetwork { + synchronous: SynchronousNetwork::default(), + asynchronous: AsynchronousNetwork::default(), + gst: std::time::Duration::new(0, 0), + start: std::time::Instant::now(), + } + } +} + +impl SynchronousNetwork { + /// create new `SynchronousNetwork` + #[must_use] + pub fn new(timeout: u64, delay_low_ms: u64) -> Self { + SynchronousNetwork { + timeout_ms: timeout, + delay_low_ms, + } + } +} + +impl AsynchronousNetwork { + /// create new `AsynchronousNetwork` + #[must_use] + pub fn new( + keep_numerator: u32, + keep_denominator: u32, + delay_low_ms: u64, + delay_high_ms: u64, + ) -> Self { + AsynchronousNetwork { + keep_numerator, + keep_denominator, + delay_low_ms, + delay_high_ms, + } + } +} + +impl PartiallySynchronousNetwork { + /// create new `PartiallySynchronousNetwork` + #[allow(clippy::similar_names)] + #[must_use] + pub fn new( + asynchronous: AsynchronousNetwork, + synchronous: SynchronousNetwork, + gst: std::time::Duration, + ) -> Self { + PartiallySynchronousNetwork { + asynchronous, + synchronous, + gst, + start: std::time::Instant::now(), + } + } +} + +/// A chaotic network using all the networking calls +pub struct ChaosNetwork { + // TODO } From b4c7860f3c0971b6eaa877fbf1f8503c7e499612 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 2 Oct 2023 11:03:43 -0400 Subject: [PATCH 0182/1393] log why we don't vote at INFO level --- task-impls/src/consensus.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 3c3ba5abd7..9b5491f43e 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -45,7 +45,7 @@ use std::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, instrument, info}; /// Error returned by the consensus task #[derive(Snafu, Debug)] @@ -456,13 +456,13 @@ where } } } - debug!( + info!( "Couldn't find DAC cert in certs, meaning we haven't received it yet for view {:?}", *proposal.get_view_number(), ); return false; } - debug!( + info!( "Could not vote because we don't have a proposal yet for view {}", *self.cur_view ); From 24dbbd6b01e0ffb40d9b9e5ddd1a57741e7faab7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 13:39:09 -0400 Subject: [PATCH 0183/1393] Bump generic-array from 0.14.7 to 1.0.0 (#1724) * Bump generic-array from 0.14.7 to 1.0.0 Bumps [generic-array](https://github.com/fizyk20/generic-array) from 0.14.7 to 1.0.0. - [Release notes](https://github.com/fizyk20/generic-array/releases) - [Changelog](https://github.com/fizyk20/generic-array/blob/master/CHANGELOG.md) - [Commits](https://github.com/fizyk20/generic-array/commits/1.0.0) --- updated-dependencies: - dependency-name: generic-array dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * set generic-array dep to 0.14.7, make it a shared workspace dep * update commit dep to latest main * update jellyfish dep to latest main --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Gus Gutoski --- hotshot-qc/Cargo.toml | 2 +- hotshot-stake-table/Cargo.toml | 2 +- types/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 0faa1f0a87..9c53fb335a 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -19,7 +19,7 @@ ark-std = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } -generic-array = "0.14.7" +generic-array = { workspace = true } hotshot-types = { path = "../types" } jf-primitives = { workspace = true } jf-relation = { workspace = true } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 8f11e4d66f..4059589a85 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -16,7 +16,7 @@ bitvec = { workspace = true } digest = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } ethereum-types = { workspace = true } -generic-array = "0.14.7" +generic-array = { workspace = true } hotshot-types = { path = "../types" } jf-primitives = { workspace = true } jf-relation = { workspace = true } diff --git a/types/Cargo.toml b/types/Cargo.toml index dbc1b7b009..c0eb7a11e0 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -31,7 +31,7 @@ displaydoc = { version = "0.2.3", default-features = false } either = { workspace = true, features = ["serde"] } espresso-systems-common = { workspace = true } futures = { workspace = true } -generic-array = "0.14.7" +generic-array = { workspace = true } hex_fmt = "0.3.0" hotshot-constants = { path = "../constants" } hotshot-utils = { path = "../utils" } From 52387911224dfa20c1dfbf7cc13017d7b88c2e01 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 2 Oct 2023 14:17:23 -0700 Subject: [PATCH 0184/1393] Remove index nonzero check, rename hightest_index --- task-impls/src/consensus.rs | 2 +- web_server/src/lib.rs | 32 +++++++++++--------------------- 2 files changed, 12 insertions(+), 22 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9b5491f43e..b4a38db5e5 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -45,7 +45,7 @@ use std::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, instrument, info}; +use tracing::{debug, error, info, instrument}; /// Error returned by the consensus task #[derive(Snafu, Debug)] diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 861c6a5a67..ac2730b448 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -280,11 +280,11 @@ impl WebServerDataSource for WebServerState { self.oldest_vote += 1; } } - let highest_index = self.vote_index.entry(view_number).or_insert(0); + let next_index = self.vote_index.entry(view_number).or_insert(0); self.votes .entry(view_number) - .and_modify(|current_votes| current_votes.push((*highest_index, vote.clone()))) - .or_insert_with(|| vec![(*highest_index, vote)]); + .and_modify(|current_votes| current_votes.push((*next_index, vote.clone()))) + .or_insert_with(|| vec![(*next_index, vote)]); self.vote_index .entry(view_number) .and_modify(|index| *index += 1); @@ -302,19 +302,14 @@ impl WebServerDataSource for WebServerState { self.oldest_view_sync_vote += 1; } } - let highest_index = self.view_sync_vote_index.entry(view_number).or_insert(0); + let next_index = self.view_sync_vote_index.entry(view_number).or_insert(0); self.view_sync_votes .entry(view_number) - .and_modify(|current_votes| current_votes.push((*highest_index, vote.clone()))) - .or_insert_with(|| vec![(*highest_index, vote)]); + .and_modify(|current_votes| current_votes.push((*next_index, vote.clone()))) + .or_insert_with(|| vec![(*next_index, vote)]); self.view_sync_vote_index .entry(view_number) - .and_modify(|index| { - // Update the index if it's not just added. - if *index > 0 { - *index += 1 - } - }); + .and_modify(|index| *index += 1); Ok(()) } /// Stores a received proposal in the `WebServerState` @@ -351,22 +346,17 @@ impl WebServerDataSource for WebServerState { self.oldest_view_sync_proposal += 1; } } - let highest_index = self + let next_index = self .view_sync_proposal_index .entry(view_number) .or_insert(0); self.view_sync_proposals .entry(view_number) - .and_modify(|current_props| current_props.push((*highest_index, proposal.clone()))) - .or_insert_with(|| vec![(*highest_index, proposal)]); + .and_modify(|current_props| current_props.push((*next_index, proposal.clone()))) + .or_insert_with(|| vec![(*next_index, proposal)]); self.view_sync_proposal_index .entry(view_number) - .and_modify(|index| { - // Update the index if it's not just added. - if *index > 0 { - *index += 1 - } - }); + .and_modify(|index| *index += 1); Ok(()) } From cb2a61f7e11edbf58abb157b3ed2d2fe0beddfd4 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 2 Oct 2023 17:24:01 -0400 Subject: [PATCH 0185/1393] Decrease network task polling frequency from 500ns to 100ms (#1853) * Decrease network task polling frequency from 500ns to 100ms * Fix format * increase webserver test duration --------- Co-authored-by: Rob --- hotshot/src/tasks/mod.rs | 4 ++-- task-impls/src/consensus.rs | 2 +- testing/tests/web_server.rs | 8 ++++++++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 5415bad8ea..1b411c35d2 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -162,7 +162,7 @@ where .expect("Failed to receive broadcast messages"), ); if msgs.0.is_empty() { - async_sleep(Duration::new(0, 500)).await; + async_sleep(Duration::from_millis(100)).await; } else { break msgs; } @@ -182,7 +182,7 @@ where .expect("Failed to receive direct messages"), ); if msgs.0.is_empty() { - async_sleep(Duration::new(0, 500)).await; + async_sleep(Duration::from_millis(100)).await; } else { break msgs; } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9b5491f43e..b4a38db5e5 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -45,7 +45,7 @@ use std::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, instrument, info}; +use tracing::{debug, error, info, instrument}; /// Error returned by the consensus task #[derive(Snafu, Debug)] diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index 75d0e4f89c..9b19532902 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -1,5 +1,8 @@ +use std::time::Duration; + use async_compatibility_layer::logging::shutdown_logging; use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, node_types::{SequencingTestTypes, SequencingWebImpl}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::{TestMetadata, TimingData}, @@ -28,6 +31,11 @@ async fn web_server_network() { num_successful_views: 35, ..Default::default() }, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(20), + }, + ), ..TestMetadata::default() }; metadata From bc4b4350038f114b034c382520b7d3f09065d17a Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 2 Oct 2023 17:01:37 -0700 Subject: [PATCH 0186/1393] update last_synced_block_height, current_view, incoming_tx, outgoing_tx, num_timeouts etc. for metrics --- hotshot/src/traits/networking.rs | 24 ++++++--- .../src/traits/networking/libp2p_network.rs | 8 +-- .../src/traits/networking/memory_network.rs | 12 ++--- task-impls/src/consensus.rs | 17 ++++--- types/src/consensus.rs | 51 ++++--------------- types/src/data.rs | 4 ++ types/src/traits/state.rs | 2 + 7 files changed, 52 insertions(+), 66 deletions(-) diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index aeaca0d815..17ba29d32d 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -29,10 +29,14 @@ pub struct NetworkingMetricsValue { pub values: Arc>, /// A [`Gauge`] which tracks how many peers are connected pub connected_peers: Box, - /// A [`Counter`] which tracks how many messages have been received - pub incoming_message_count: Box, - /// A [`Counter`] which tracks how many messages have been send - pub outgoing_message_count: Box, + /// A [`Counter`] which tracks how many messages have been received directly + pub incoming_direct_message_count: Box, + /// A [`Counter`] which tracks how many messages have been received by broadcast + pub incoming_broadcast_message_count: Box, + /// A [`Counter`] which tracks how many messages have been send directly + pub outgoing_direct_message_count: Box, + /// A [`Counter`] which tracks how many messages have been send by broadcast + pub outgoing_broadcast_message_count: Box, /// A [`Counter`] which tracks how many messages failed to send pub message_failed_to_send: Box, // A [`Gauge`] which tracks how many connected entries there are in the gossipsub mesh @@ -168,10 +172,14 @@ impl NetworkingMetricsValue { Self { values, connected_peers: metrics.create_gauge(String::from("connected_peers"), None), - incoming_message_count: metrics - .create_counter(String::from("incoming_message_count"), None), - outgoing_message_count: metrics - .create_counter(String::from("outgoing_message_count"), None), + incoming_direct_message_count: metrics + .create_counter(String::from("incoming_direct_message_count"), None), + incoming_broadcast_message_count: metrics + .create_counter(String::from("incoming_broadcast_message_count"), None), + outgoing_direct_message_count: metrics + .create_counter(String::from("outgoing_direct_message_count"), None), + outgoing_broadcast_message_count: metrics + .create_counter(String::from("outgoing_broadcast_message_count"), None), message_failed_to_send: metrics .create_counter(String::from("message_failed_to_send"), None), } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 2ef23e5f2a..1a5550ef82 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -627,7 +627,7 @@ impl ConnectedNetwork for Libp2p match self.inner.handle.gossip(topic, &message).await { Ok(()) => { - self.inner.metrics.outgoing_message_count.add(1); + self.inner.metrics.outgoing_broadcast_message_count.add(1); Ok(()) } Err(e) => { @@ -675,7 +675,7 @@ impl ConnectedNetwork for Libp2p match self.inner.handle.direct_request(pid, &message).await { Ok(()) => { - self.inner.metrics.outgoing_message_count.add(1); + self.inner.metrics.outgoing_direct_message_count.add(1); Ok(()) } Err(e) => { @@ -706,7 +706,7 @@ impl ConnectedNetwork for Libp2p .drain_at_least_one() .await .map_err(|_x| NetworkError::ShutDown)?; - self.inner.metrics.incoming_message_count.add(result.len()); + self.inner.metrics.incoming_direct_message_count.add(result.len()); Ok(result) } TransmitType::Broadcast => { @@ -716,7 +716,7 @@ impl ConnectedNetwork for Libp2p .drain_at_least_one() .await .map_err(|_x| NetworkError::ShutDown)?; - self.inner.metrics.incoming_message_count.add(result.len()); + self.inner.metrics.incoming_direct_message_count.add(result.len()); Ok(result) } } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 6a53082872..cd13aab788 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -264,7 +264,7 @@ impl MemoryNetwork { .fetch_add(1, Ordering::Relaxed); let input = self.inner.broadcast_input.read().await; if let Some(input) = &*input { - self.inner.metrics.outgoing_message_count.add(1); + self.inner.metrics.outgoing_broadcast_message_count.add(1); input.send(message).await } else { Err(SendError(message)) @@ -278,7 +278,7 @@ impl MemoryNetwork { .fetch_add(1, Ordering::Relaxed); let input = self.inner.direct_input.read().await; if let Some(input) = &*input { - self.inner.metrics.outgoing_message_count.add(1); + self.inner.metrics.outgoing_direct_message_count.add(1); input.send(message).await } else { Err(SendError(message)) @@ -355,7 +355,7 @@ impl ConnectedNetwork for Memory let res = node.broadcast_input(vec.clone()).await; match res { Ok(_) => { - self.inner.metrics.outgoing_message_count.add(1); + self.inner.metrics.outgoing_broadcast_message_count.add(1); trace!(?key, "Delivered message to remote"); } Err(e) => { @@ -380,7 +380,7 @@ impl ConnectedNetwork for Memory let res = node.direct_input(vec).await; match res { Ok(_) => { - self.inner.metrics.outgoing_message_count.add(1); + self.inner.metrics.outgoing_direct_message_count.add(1); trace!(?recipient, "Delivered message to remote"); Ok(()) } @@ -423,7 +423,7 @@ impl ConnectedNetwork for Memory self.inner .in_flight_message_count .fetch_sub(ret.len(), Ordering::Relaxed); - self.inner.metrics.incoming_message_count.add(ret.len()); + self.inner.metrics.incoming_direct_message_count.add(ret.len()); Ok(ret) } TransmitType::Broadcast => { @@ -438,7 +438,7 @@ impl ConnectedNetwork for Memory self.inner .in_flight_message_count .fetch_sub(ret.len(), Ordering::Relaxed); - self.inner.metrics.incoming_message_count.add(ret.len()); + self.inner.metrics.incoming_broadcast_message_count.add(ret.len()); Ok(ret) } } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 24bccf2059..c5bbc55933 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -537,6 +537,8 @@ where .await; } }); + let consensus = self.consensus.read().await; + consensus.metrics.current_view.set(*self.cur_view as usize); return true; } @@ -740,6 +742,10 @@ where // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above if new_decide_reached { let mut leaf = leaf.clone(); + consensus + .metrics + .last_synced_block_height + .set(usize::try_from(leaf.height).unwrap_or(0)); // If the full block is available for this leaf, include it in the leaf // chain that we send to the client. @@ -1045,6 +1051,7 @@ where .await; debug!("View changed to {}", *new_view); + // ED Need to update the view here? What does otherwise? // self.update_view(qc.view_number + 1).await; @@ -1061,7 +1068,7 @@ where "Failed to publish proposal on view change. View = {:?}", self.cur_view ); - } + } } SequencingHotShotEvent::Timeout(view) => { // The view sync module will handle updating views in the case of timeout @@ -1074,6 +1081,8 @@ where "We received a timeout event in the consensus task for view {}!", *view ); + let consensus = self.consensus.read().await; + consensus.metrics.number_of_timeouts.add(1); } SequencingHotShotEvent::SendDABlockData(block) => { // ED TODO Should make sure this is actually the most recent block @@ -1167,12 +1176,6 @@ where proposer_id: self.api.public_key().to_bytes(), }; - let consensus = self.consensus.read().await; - consensus - .metrics - .last_synced_block_height - .set(usize::try_from(leaf.height).unwrap_or(0)); - let signature = self .quorum_exchange .sign_validating_or_commitment_proposal::(&leaf.commit()); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 4b6b7bb0d6..de5921ce53 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -3,6 +3,7 @@ pub use crate::traits::node_implementation::ViewQueue; pub use crate::utils::{View, ViewInner}; use async_compatibility_layer::async_primitives::subscribable_rwlock::SubscribableRwLock; +use displaydoc::Display; use std::collections::HashSet; use crate::utils::Terminator; @@ -82,14 +83,14 @@ pub struct ConsensusMetricsValue { pub last_synced_block_height: Box, /// The current view pub current_view: Box, - /// The duration to collect votes in a view (only applies when this insance is the leader) - pub vote_validate_duration: Box, - /// The duration we waited for txns before building the proposal - pub proposal_wait_duration: Box, - /// The duration to build the proposal - pub proposal_build_duration: Box, - /// The duration of each view, in seconds - pub view_duration: Box, + // The duration to collect votes in a view (only applies when this insance is the leader) + // pub vote_validate_duration: Box, + // The duration we waited for txns before building the proposal + // pub proposal_wait_duration: Box, + // The duration to build the proposal + // pub proposal_build_duration: Box, + // The duration of each view, in seconds + // pub view_duration: Box, /// Number of views that are in-flight since the last committed view pub number_of_views_since_last_commit: Box, /// Number of views that are in-flight since the last anchor view @@ -108,16 +109,6 @@ pub struct ConsensusMetricsValue { pub outstanding_transactions_memory_size: Box, /// Number of views that timed out pub number_of_timeouts: Box, - /// Total direct messages this node sent out - pub outgoing_direct_messages: Box, - /// Total broadcasts sent - pub outgoing_broadcast_messages: Box, - /// Total messages received - pub direct_messages_received: Box, - /// Total broadcast messages received - pub broadcast_messages_received: Box, - // Total number of messages which couldn't be sent - // pub failed_to_send_messages: Box, } /// The wrapper with a string name for the networking metrics @@ -130,7 +121,7 @@ pub struct ConsensusMetrics { } /// the set of counters and gauges for the networking metrics -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug, Default, Display)] pub struct InnerConsensusMetrics { /// All the counters of the networking metrics counters: HashMap, @@ -248,20 +239,6 @@ impl ConsensusMetricsValue { last_synced_block_height: metrics .create_gauge(String::from("last_synced_block_height"), None), current_view: metrics.create_gauge(String::from("current_view"), None), - vote_validate_duration: metrics.create_histogram( - String::from("vote_validate_duration"), - Some(String::from("seconds")), - ), - proposal_build_duration: metrics.create_histogram( - String::from("proposal_build_duration"), - Some(String::from("seconds")), - ), - proposal_wait_duration: metrics.create_histogram( - String::from("proposal_wait_duration"), - Some(String::from("seconds")), - ), - view_duration: metrics - .create_histogram(String::from("view_duration"), Some(String::from("seconds"))), number_of_views_since_last_commit: metrics .create_gauge(String::from("number_of_views_since_last_commit"), None), number_of_views_per_decide_event: metrics @@ -277,14 +254,6 @@ impl ConsensusMetricsValue { .create_gauge(String::from("outstanding_transactions"), None), outstanding_transactions_memory_size: metrics .create_gauge(String::from("outstanding_transactions_memory_size"), None), - outgoing_direct_messages: metrics - .create_counter(String::from("outgoing_direct_messages"), None), - outgoing_broadcast_messages: metrics - .create_counter(String::from("outgoing_broadcast_messages"), None), - direct_messages_received: metrics - .create_counter(String::from("direct_messages_received"), None), - broadcast_messages_received: metrics - .create_counter(String::from("broadcast_messages_received"), None), number_of_timeouts: metrics .create_counter(String::from("number_of_views_timed_out"), None), } diff --git a/types/src/data.rs b/types/src/data.rs index 080355a195..14b180b039 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -64,6 +64,10 @@ impl ConsensusTime for ViewNumber { fn new(n: u64) -> Self { Self(n) } + /// Returen the u64 format + fn get_u64(&self) -> u64 { + self.0 + } } impl Committable for ViewNumber { diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 93ff3e6603..8e6c3758ae 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -95,6 +95,8 @@ pub trait ConsensusTime: } /// Create a new instance of this time unit fn new(val: u64) -> Self; + /// Get the u64 format of time + fn get_u64(&self) -> u64; } /// extra functions required on state to be usable by hotshot-testing From 7b855e25ad6115283a40a59365c892d07aa0e354 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 2 Oct 2023 22:52:32 -0700 Subject: [PATCH 0187/1393] metrics for invalid qc, rejected txs, number of views per decide --- hotshot/src/lib.rs | 1 - task-impls/src/consensus.rs | 9 +++++-- task-impls/src/transactions.rs | 3 +++ types/src/consensus.rs | 44 ++++++++++++++++------------------ 4 files changed, 30 insertions(+), 27 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index f9bda9ae4c..06dd78bfc5 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -224,7 +224,6 @@ impl> SystemContext { locked_view: anchored_leaf.get_view_number(), high_qc: anchored_leaf.get_justify_qc(), metrics: consensus_metrics.clone(), - invalid_qc: 0, }; let consensus = Arc::new(RwLock::new(consensus)); let txns = consensus.read().await.get_transactions(); diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index c5bbc55933..8656230761 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -539,6 +539,7 @@ where }); let consensus = self.consensus.read().await; consensus.metrics.current_view.set(*self.cur_view as usize); + consensus.metrics.number_of_views_since_last_decide.set((*self.cur_view as usize) - (consensus.last_decided_view.get_u64() as usize)); return true; } @@ -624,7 +625,7 @@ where .is_valid_cert(&justify_qc, parent_commitment) { error!("Invalid justify_qc in proposal!. parent commitment is {:?} justify qc is {:?}", parent_commitment, justify_qc.clone()); - + consensus.metrics.invalid_qc.update(1); message = self.quorum_exchange.create_no_message::( justify_qc_commitment, leaf_commitment, @@ -822,7 +823,11 @@ where .collect_garbage(old_anchor_view, new_anchor_view) .await; consensus.last_decided_view = new_anchor_view; - consensus.invalid_qc = 0; + consensus.metrics.invalid_qc.set(0); + consensus.metrics.last_decided_view.set(consensus.last_decided_view.get_u64() as usize); + let cur_number_of_views_per_decide_event = *self.cur_view - consensus.last_decided_view.get_u64(); + consensus.metrics.number_of_views_per_decide_event.add_point(cur_number_of_views_per_decide_event as f64); + // We're only storing the last QC. We could store more but we're realistically only going to retrieve the last one. if let Err(e) = self.api.store_leaf(old_anchor_view, leaf).await { diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 8609708653..ee14e54d9c 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -116,6 +116,9 @@ where warn!("Conversion failed: {e}. Using the max value."); i64::MAX })); + } else { + // it's more like the calculation of duplicate transactions + consensus.metrics.rejected_transactions.add(1); } } }) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index de5921ce53..5e9436ae9b 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -67,10 +67,6 @@ pub struct Consensus> { /// A reference to the metrics trait #[debug(skip)] pub metrics: Arc, - - /// Amount of invalid QCs we've seen since the last commit - /// Used for metrics. This resets to 0 on every decide event. - pub invalid_qc: usize, } /// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces @@ -81,6 +77,8 @@ pub struct ConsensusMetricsValue { pub values: Arc>, /// The number of last synced synced block height pub last_synced_block_height: Box, + /// The number of last decided view + pub last_decided_view: Box, /// The current view pub current_view: Box, // The duration to collect votes in a view (only applies when this insance is the leader) @@ -91,17 +89,17 @@ pub struct ConsensusMetricsValue { // pub proposal_build_duration: Box, // The duration of each view, in seconds // pub view_duration: Box, - /// Number of views that are in-flight since the last committed view - pub number_of_views_since_last_commit: Box, + /// Number of views that are in-flight since the last decided view + pub number_of_views_since_last_decide: Box, /// Number of views that are in-flight since the last anchor view pub number_of_views_per_decide_event: Box, - /// Number of invalid QCs between anchors - pub invalid_qc_views: Box, - /// Number of views that were discarded since from one achor to the next - pub discarded_views_per_decide_event: Box, - /// Views where no proposal was seen from one anchor to the next - pub empty_views_per_decide_event: Box, - /// Number of rejected transactions + /// Number of invalid QCs we've seen since the last commit. + pub invalid_qc: Box, + // Number of views that were discarded since from one anchor to the next + // pub discarded_views_per_decide_event: Box, + // Views where no proposal was seen from one anchor to the next + // pub empty_views_per_decide_event: Box, + /// Number of rejected transactions, it's more like duplicated transactions in current implementation pub rejected_transactions: Box, /// Number of outstanding transactions pub outstanding_transactions: Box, @@ -124,13 +122,13 @@ pub struct ConsensusMetrics { #[derive(Clone, Debug, Default, Display)] pub struct InnerConsensusMetrics { /// All the counters of the networking metrics - counters: HashMap, + pub counters: HashMap, /// All the gauges of the networking metrics - gauges: HashMap, + pub gauges: HashMap, /// All the histograms of the networking metrics - histograms: HashMap>, + pub histograms: HashMap>, /// All the labels of the networking metrics - labels: HashMap, + pub labels: HashMap, } impl ConsensusMetrics { @@ -238,16 +236,14 @@ impl ConsensusMetricsValue { values, last_synced_block_height: metrics .create_gauge(String::from("last_synced_block_height"), None), + last_decided_view: metrics.create_gauge(String::from("last_decided_view"), None), current_view: metrics.create_gauge(String::from("current_view"), None), - number_of_views_since_last_commit: metrics - .create_gauge(String::from("number_of_views_since_last_commit"), None), + number_of_views_since_last_decide: metrics + .create_gauge(String::from("number_of_views_since_last_decide"), None), number_of_views_per_decide_event: metrics .create_histogram(String::from("number_of_views_per_decide_event"), None), - invalid_qc_views: metrics.create_histogram(String::from("invalid_qc_views"), None), - discarded_views_per_decide_event: metrics - .create_histogram(String::from("discarded_views_per_decide_event"), None), - empty_views_per_decide_event: metrics - .create_histogram(String::from("empty_views_per_decide_event"), None), + invalid_qc: metrics. + create_gauge(String::from("invalid_qc"), None), rejected_transactions: metrics .create_counter(String::from("rejected_transactions"), None), outstanding_transactions: metrics From 8f08e1eca2d3f1435fc462c0213edb1602cb2b83 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 2 Oct 2023 23:15:55 -0700 Subject: [PATCH 0188/1393] fix lint --- .../src/traits/networking/libp2p_network.rs | 10 +++++-- .../src/traits/networking/memory_network.rs | 10 +++++-- task-impls/src/consensus.rs | 26 +++++++++++++------ types/src/consensus.rs | 3 +-- 4 files changed, 35 insertions(+), 14 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 1a5550ef82..c7259ec23f 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -706,7 +706,10 @@ impl ConnectedNetwork for Libp2p .drain_at_least_one() .await .map_err(|_x| NetworkError::ShutDown)?; - self.inner.metrics.incoming_direct_message_count.add(result.len()); + self.inner + .metrics + .incoming_direct_message_count + .add(result.len()); Ok(result) } TransmitType::Broadcast => { @@ -716,7 +719,10 @@ impl ConnectedNetwork for Libp2p .drain_at_least_one() .await .map_err(|_x| NetworkError::ShutDown)?; - self.inner.metrics.incoming_direct_message_count.add(result.len()); + self.inner + .metrics + .incoming_direct_message_count + .add(result.len()); Ok(result) } } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index cd13aab788..5fa6530680 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -423,7 +423,10 @@ impl ConnectedNetwork for Memory self.inner .in_flight_message_count .fetch_sub(ret.len(), Ordering::Relaxed); - self.inner.metrics.incoming_direct_message_count.add(ret.len()); + self.inner + .metrics + .incoming_direct_message_count + .add(ret.len()); Ok(ret) } TransmitType::Broadcast => { @@ -438,7 +441,10 @@ impl ConnectedNetwork for Memory self.inner .in_flight_message_count .fetch_sub(ret.len(), Ordering::Relaxed); - self.inner.metrics.incoming_broadcast_message_count.add(ret.len()); + self.inner + .metrics + .incoming_broadcast_message_count + .add(ret.len()); Ok(ret) } } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 8656230761..eae7854352 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -538,8 +538,14 @@ where } }); let consensus = self.consensus.read().await; - consensus.metrics.current_view.set(*self.cur_view as usize); - consensus.metrics.number_of_views_since_last_decide.set((*self.cur_view as usize) - (consensus.last_decided_view.get_u64() as usize)); + consensus + .metrics + .current_view + .set(usize::try_from(self.cur_view.get_u64()).unwrap()); + consensus.metrics.number_of_views_since_last_decide.set( + usize::try_from(self.cur_view.get_u64()).unwrap() + - usize::try_from(consensus.last_decided_view.get_u64()).unwrap(), + ); return true; } @@ -824,10 +830,15 @@ where .await; consensus.last_decided_view = new_anchor_view; consensus.metrics.invalid_qc.set(0); - consensus.metrics.last_decided_view.set(consensus.last_decided_view.get_u64() as usize); - let cur_number_of_views_per_decide_event = *self.cur_view - consensus.last_decided_view.get_u64(); - consensus.metrics.number_of_views_per_decide_event.add_point(cur_number_of_views_per_decide_event as f64); - + consensus.metrics.last_decided_view.set( + usize::try_from(consensus.last_decided_view.get_u64()).unwrap(), + ); + let cur_number_of_views_per_decide_event = + *self.cur_view - consensus.last_decided_view.get_u64(); + consensus + .metrics + .number_of_views_per_decide_event + .add_point(cur_number_of_views_per_decide_event as f64); // We're only storing the last QC. We could store more but we're realistically only going to retrieve the last one. if let Err(e) = self.api.store_leaf(old_anchor_view, leaf).await { @@ -1056,7 +1067,6 @@ where .await; debug!("View changed to {}", *new_view); - // ED Need to update the view here? What does otherwise? // self.update_view(qc.view_number + 1).await; @@ -1073,7 +1083,7 @@ where "Failed to publish proposal on view change. View = {:?}", self.cur_view ); - } + } } SequencingHotShotEvent::Timeout(view) => { // The view sync module will handle updating views in the case of timeout diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 5e9436ae9b..cdf4db9d85 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -242,8 +242,7 @@ impl ConsensusMetricsValue { .create_gauge(String::from("number_of_views_since_last_decide"), None), number_of_views_per_decide_event: metrics .create_histogram(String::from("number_of_views_per_decide_event"), None), - invalid_qc: metrics. - create_gauge(String::from("invalid_qc"), None), + invalid_qc: metrics.create_gauge(String::from("invalid_qc"), None), rejected_transactions: metrics .create_counter(String::from("rejected_transactions"), None), outstanding_transactions: metrics From 02a981172e64a031ad67abbfbefe8bafd5644f7e Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 2 Oct 2023 23:27:47 -0700 Subject: [PATCH 0189/1393] fix lint --- testing/src/task_helpers.rs | 2 +- testing/tests/memory_network.rs | 54 ++++++++++++++++++++++++--------- types/src/consensus.rs | 2 -- 3 files changed, 41 insertions(+), 17 deletions(-) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 2e935dab61..17be6f7dfc 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -13,8 +13,8 @@ use hotshot::{ use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::SequencingHotShotEvent; use hotshot_types::{ - consensus::ConsensusMetricsValue, block_impl::{VIDBlockPayload, NUM_CHUNKS, NUM_STORAGE_NODES}, + consensus::ConsensusMetricsValue, data::{QuorumProposal, SequencingLeaf, VidScheme, ViewNumber}, message::{Message, Proposal}, traits::{ diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index d1a231a2c0..74acdcd85b 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -8,7 +8,7 @@ use hotshot::traits::election::static_committee::{ GeneralStaticCommittee, StaticElectionConfig, StaticVoteToken, }; use hotshot::traits::implementations::{ - MasterMap, MemoryCommChannel, MemoryNetwork, MemoryStorage, + MasterMap, MemoryCommChannel, MemoryNetwork, MemoryStorage, NetworkingMetricsValue, }; use hotshot::traits::NodeImplementation; use hotshot::types::bn254::{BLSPrivKey, BLSPubKey}; @@ -18,7 +18,6 @@ use hotshot_types::certificate::ViewSyncCertificate; use hotshot_types::data::{DAProposal, QuorumProposal, SequencingLeaf}; use hotshot_types::message::{Message, SequencingMessage}; use hotshot_types::traits::election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}; -use hotshot_types::traits::metrics::NoMetrics; use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType, SequencingExchanges}; @@ -168,8 +167,7 @@ async fn memory_network_spawn_single() { let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); - let pub_key = get_pubkey(); - let _network = MemoryNetwork::new(pub_key, NoMetrics::boxed(), group, Option::None); + let _pub_key = get_pubkey(); } // // Spawning a two MemoryNetworks and connecting them should produce no errors @@ -184,10 +182,8 @@ async fn memory_network_spawn_double() { let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); - let pub_key_1 = get_pubkey(); - let _network_1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); - let pub_key_2 = get_pubkey(); - let _network_2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + let _pub_key_1 = get_pubkey(); + let _pub_key_2 = get_pubkey(); } // Check to make sure direct queue works @@ -207,10 +203,20 @@ async fn memory_network_direct_queue() { trace!(?group); let pub_key_1 = get_pubkey(); - let network1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + let network1 = MemoryNetwork::new( + pub_key_1, + NetworkingMetricsValue::new(), + group.clone(), + Option::None, + ); let pub_key_2 = get_pubkey(); - let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + let network2 = MemoryNetwork::new( + pub_key_2, + NetworkingMetricsValue::new(), + group, + Option::None, + ); let first_messages: Vec> = gen_messages(5, 100, pub_key_1); @@ -263,9 +269,19 @@ async fn memory_network_broadcast_queue() { MasterMap::new(); trace!(?group); let pub_key_1 = get_pubkey(); - let network1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + let network1 = MemoryNetwork::new( + pub_key_1, + NetworkingMetricsValue::new(), + group.clone(), + Option::None, + ); let pub_key_2 = get_pubkey(); - let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + let network2 = MemoryNetwork::new( + pub_key_2, + NetworkingMetricsValue::new(), + group, + Option::None, + ); let first_messages: Vec> = gen_messages(5, 100, pub_key_1); @@ -324,9 +340,19 @@ async fn memory_network_test_in_flight_message_count() { MasterMap::new(); trace!(?group); let pub_key_1 = get_pubkey(); - let network1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + let network1 = MemoryNetwork::new( + pub_key_1, + NetworkingMetricsValue::new(), + group.clone(), + Option::None, + ); let pub_key_2 = get_pubkey(); - let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + let network2 = MemoryNetwork::new( + pub_key_2, + NetworkingMetricsValue::new(), + group, + Option::None, + ); // Create some dummy messages let messages: Vec> = gen_messages(5, 100, pub_key_1); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 987d8d52af..24a51e5b6c 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -2,9 +2,7 @@ pub use crate::traits::node_implementation::ViewQueue; pub use crate::utils::{View, ViewInner}; -use async_compatibility_layer::async_primitives::subscribable_rwlock::SubscribableRwLock; use displaydoc::Display; -use std::collections::HashSet; use crate::utils::Terminator; use crate::{ From 5a11811d390c8e53736d9d0eca23b84857cdd676 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 3 Oct 2023 11:35:51 -0400 Subject: [PATCH 0190/1393] fix hanging test (#1847) --- testing/tests/memory_network.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index d1a231a2c0..27f6ae6531 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -358,9 +358,12 @@ async fn memory_network_test_in_flight_message_count() { network1.recv_msgs(TransmitType::Broadcast).await.unwrap(); } + while network2.in_flight_message_count().unwrap() > messages.len() { + network2.recv_msgs(TransmitType::Direct).await.unwrap(); + } + while network2.in_flight_message_count().unwrap() > 0 { network2.recv_msgs(TransmitType::Broadcast).await.unwrap(); - network2.recv_msgs(TransmitType::Direct).await.unwrap(); } assert_eq!(network1.in_flight_message_count(), Some(0)); From cc6bce7d9e295912a2ba691c5f9404ee488ddf03 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 3 Oct 2023 18:02:04 -0700 Subject: [PATCH 0191/1393] Update logging --- task-impls/src/da.rs | 2 +- task-impls/src/view_sync.rs | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 4037e07a23..b6244c972a 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -604,7 +604,7 @@ where match result { Err(_) => { // Fall through below to updating new block - error!( + debug!( "propose_max_round_time passed, sending transactions we have so far" ); } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index eafeb7a468..3ff00b6cf0 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -781,6 +781,7 @@ where let phase = self.phase.clone(); async move { async_sleep(self.view_sync_timeout).await; + error!("Vote sending timed out in ViewSyncCertificateRecv"); stream .publish(SequencingHotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -842,6 +843,7 @@ where let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; + error!("Vote sending timed out in ViewSyncTrigger"); stream .publish(SequencingHotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -912,6 +914,7 @@ where let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; + error!("Vote sending timed out in ViewSyncTimeout"); stream .publish(SequencingHotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), From 1fbd3c40bcb27dab8a52c85ae74de31d0106f8e6 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 3 Oct 2023 18:04:48 -0700 Subject: [PATCH 0192/1393] Fix a task name --- task-impls/src/view_sync.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 3ff00b6cf0..ad9191f3f1 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -543,7 +543,8 @@ where let name = format!( "View Sync Replica Task: Attempting to enter view {:?} from view {:?}", - self.next_view, self.current_view + *view_number + 1, + *view_number ); let replica_handle_event = HandleEvent(Arc::new( From 03a0ff0164334c747c1628715ccf33bf3dab9eac Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 3 Oct 2023 23:29:55 -0400 Subject: [PATCH 0193/1393] Subscribe to web server when starting view sync --- task-impls/src/view_sync.rs | 48 ++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index eafeb7a468..1b5417722b 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -299,19 +299,20 @@ where } // We do not have a replica task already running, so start one - let mut replica_state = ViewSyncReplicaTaskState { - current_view: certificate_internal.round, - next_view: certificate_internal.round, - relay: 0, - finalized: false, - sent_view_change_event: false, - phase: ViewSyncPhase::None, - exchange: self.exchange.clone(), - api: self.api.clone(), - event_stream: self.event_stream.clone(), - view_sync_timeout: self.view_sync_timeout, - id: self.id, - }; + let mut replica_state: ViewSyncReplicaTaskState = + ViewSyncReplicaTaskState { + current_view: certificate_internal.round, + next_view: certificate_internal.round, + relay: 0, + finalized: false, + sent_view_change_event: false, + phase: ViewSyncPhase::None, + exchange: self.exchange.clone(), + api: self.api.clone(), + event_stream: self.event_stream.clone(), + view_sync_timeout: self.view_sync_timeout, + id: self.id, + }; let result = replica_state.handle_event(event.clone()).await; @@ -514,10 +515,29 @@ where .await; // panic!("Starting view sync!"); // Spawn replica task + let next_view = *view_number + 1; + // Subscribe to the next view just in case there is progress being made + self.exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForProposal(next_view)) + .await; + + self.exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForDAC(next_view)) + .await; + + if self.exchange.is_leader(TYPES::Time::new(next_view + 1)) { + debug!("Polling for quorum votes for view {}", next_view); + self.exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForVotes(next_view)) + .await; + } let mut replica_state = ViewSyncReplicaTaskState { current_view: self.current_view, - next_view: TYPES::Time::new(*view_number + 1), + next_view: TYPES::Time::new(next_view), relay: 0, finalized: false, sent_view_change_event: false, From 4ec77508e9b9a758b9ce9d27413eeffe2c8bdc85 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 4 Oct 2023 17:37:38 -0700 Subject: [PATCH 0194/1393] Remove commitment check in is_valid_cert, update state even is missing parent leaf. --- task-impls/src/consensus.rs | 330 +++++++++++++++++++---------------- types/src/traits/election.rs | 9 +- 2 files changed, 185 insertions(+), 154 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index b4a38db5e5..3f2551245a 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -429,8 +429,13 @@ where // Validate the DAC. if self .committee_exchange - .is_valid_cert(cert, proposal.block_commitment) + .is_valid_cert(cert) { + // Validate the block commitment for non-genesis DAC. + if !cert.is_genesis() && cert.leaf_commitment() != proposal.block_commitment { + error!("Block commitment does not equal parent commitment"); + return false; + } self.quorum_exchange.create_yes_message( proposal.justify_qc.commit(), leaf.commit(), @@ -567,7 +572,6 @@ where Ok(Some(vote_token)) => { debug!("We were chosen for consensus committee on {:?}", view); let consensus = self.consensus.upgradable_read().await; - let message; // TODO ED Insert TC logic here @@ -582,111 +586,147 @@ where .cloned() }; - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some(parent) = parent else { - error!( - "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.leaf_commitment() - ); - return; - }; - let parent_commitment = parent.commit(); - let leaf: SequencingLeaf<_> = SequencingLeaf { - view_number: view, - height: proposal.data.height, - justify_qc: justify_qc.clone(), - parent_commitment, - deltas: Right(proposal.data.block_commitment), - rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: sender.to_bytes(), - }; + // Validate the `justify_qc`. let justify_qc_commitment = justify_qc.commit(); - let leaf_commitment = leaf.commit(); + let invalid = !self.quorum_exchange.is_valid_cert(&justify_qc); + let leaf; - // Validate the `justify_qc`. - if !self - .quorum_exchange - .is_valid_cert(&justify_qc, parent_commitment) - { - error!("Invalid justify_qc in proposal!. parent commitment is {:?} justify qc is {:?}", parent_commitment, justify_qc.clone()); + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + if let Some(parent) = parent { + let message; + leaf = SequencingLeaf { + view_number: view, + height: proposal.data.height, + justify_qc: justify_qc.clone(), + parent_commitment: parent.commit(), + deltas: Right(proposal.data.block_commitment), + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: sender.to_bytes(), + }; + let parent_commitment = parent.commit(); + let leaf_commitment = leaf.commit(); - message = self.quorum_exchange.create_no_message::( - justify_qc_commitment, - leaf_commitment, - view, - vote_token, - ); - } - // Validate the `height`. - else if leaf.height != parent.height + 1 { - error!( - "Incorrect height in proposal (expected {}, got {})", - parent.height + 1, - leaf.height - ); - message = self.quorum_exchange.create_no_message( - justify_qc_commitment, - leaf_commitment, - view, - vote_token, - ); - } - // Validate the signature. - else if !view_leader_key - .validate(&proposal.signature, leaf_commitment.as_ref()) - { - error!(?proposal.signature, "Could not verify proposal."); - message = self.quorum_exchange.create_no_message( - justify_qc_commitment, - leaf_commitment, - view, - vote_token, - ); - } - // Create a positive vote if either liveness or safety check - // passes. - else { - // Liveness check. - let liveness_check = justify_qc.view_number > consensus.locked_view; - - // Safety check. - // Check if proposal extends from the locked leaf. - let outcome = consensus.visit_leaf_ancestors( - justify_qc.view_number, - Terminator::Inclusive(consensus.locked_view), - false, - |leaf| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.view_number != consensus.locked_view - }, - ); - let safety_check = outcome.is_ok(); - if let Err(e) = outcome { - self.api.send_view_error(view, Arc::new(e)).await; - } + if invalid { + error!("Invalid justify_qc in proposal! parent commitment is {:?} justify qc is {:?}", parent_commitment, justify_qc.clone()); - // Skip if both saftey and liveness checks fail. - if !safety_check && !liveness_check { - error!("Failed safety check and liveness check"); + message = self.quorum_exchange.create_no_message::( + justify_qc_commitment, + leaf_commitment, + view, + vote_token, + ); + } + // Validate the leaf commitment for non-genesis QC. + else if !justify_qc.is_genesis() + && justify_qc.leaf_commitment() != parent_commitment + { + error!("Leaf commitment does not equal parent commitment"); + message = self.quorum_exchange.create_no_message::( + justify_qc_commitment, + leaf_commitment, + view, + vote_token, + ); + } + // Validate the `height`. + else if leaf.height != parent.height + 1 { + error!( + "Incorrect height in proposal (expected {}, got {})", + parent.height + 1, + leaf.height + ); message = self.quorum_exchange.create_no_message( justify_qc_commitment, leaf_commitment, view, vote_token, ); - } else { - // Generate a message with yes vote. - message = self.quorum_exchange.create_yes_message( + } + // Validate the signature. + else if !view_leader_key + .validate(&proposal.signature, leaf_commitment.as_ref()) + { + error!(?proposal.signature, "Could not verify proposal."); + message = self.quorum_exchange.create_no_message( justify_qc_commitment, leaf_commitment, view, vote_token, ); } + // Create a positive vote if either liveness or safety check + // passes. + // Liveness check. + else { + let liveness_check = justify_qc.view_number > consensus.locked_view; + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = consensus.visit_leaf_ancestors( + justify_qc.view_number, + Terminator::Inclusive(consensus.locked_view), + false, + |leaf| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.view_number != consensus.locked_view + }, + ); + let safety_check = outcome.is_ok(); + if let Err(e) = outcome { + self.api.send_view_error(view, Arc::new(e)).await; + } + + // Skip if both saftey and liveness checks fail. + if !safety_check && !liveness_check { + error!("Failed safety check and liveness check"); + message = self.quorum_exchange.create_no_message( + justify_qc_commitment, + leaf_commitment, + view, + vote_token, + ); + } + // Generate a message with yes vote. + else { + message = self.quorum_exchange.create_yes_message( + justify_qc_commitment, + leaf_commitment, + view, + vote_token, + ); + } + } + + if let GeneralConsensusMessage::Vote(vote) = message { + debug!("Sending vote to next leader {:?}", vote); + }; + } else { + // Allow missing parent so we can update the state, but we won't + // vote in this case. + error!( + "Proposal's parent missing from storage with commitment: {:?}", + justify_qc.leaf_commitment() + ); + + if invalid { + error!("Invalid justify_qc in proposal {:?}", justify_qc.clone()); + } + leaf = SequencingLeaf { + view_number: view, + height: proposal.data.height, + justify_qc: justify_qc.clone(), + parent_commitment: justify_qc.leaf_commitment(), + deltas: Right(proposal.data.block_commitment), + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: sender.to_bytes(), + }; } + // TODO (Keyao) Update consensus state only if all verifications pass. + // let high_qc = leaf.justify_qc.clone(); let mut new_anchor_view = consensus.last_decided_view; let mut new_locked_view = consensus.locked_view; @@ -702,64 +742,64 @@ where if parent_view + 1 == view { current_chain_length += 1; if let Err(e) = consensus.visit_leaf_ancestors( - parent_view, - Terminator::Exclusive(old_anchor_view), - true, - |leaf| { - if !new_decide_reached { - if last_view_number_visited == leaf.view_number + 1 { - last_view_number_visited = leaf.view_number; - current_chain_length += 1; - if current_chain_length == 2 { - new_locked_view = leaf.view_number; - new_commit_reached = true; - // The next leaf in the chain, if there is one, is decided, so this - // leaf's justify_qc would become the QC for the decided chain. - new_decide_qc = Some(leaf.justify_qc.clone()); - } else if current_chain_length == 3 { - new_anchor_view = leaf.view_number; - new_decide_reached = true; - } - } else { - // nothing more to do here... we don't have a new chain extension - return false; - } - } - // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above - if new_decide_reached { - let mut leaf = leaf.clone(); - - // If the full block is available for this leaf, include it in the leaf - // chain that we send to the client. - if let Some(block) = - consensus.saved_blocks.get(leaf.get_deltas_commitment()) - { - if let Err(err) = leaf.fill_deltas(block.clone()) { - error!("unable to fill leaf {} with block {}, block will not be available: {}", - leaf.commit(), block.commit(), err); + parent_view, + Terminator::Exclusive(old_anchor_view), + true, + |leaf| { + if !new_decide_reached { + if last_view_number_visited == leaf.view_number + 1 { + last_view_number_visited = leaf.view_number; + current_chain_length += 1; + if current_chain_length == 2 { + new_locked_view = leaf.view_number; + new_commit_reached = true; + // The next leaf in the chain, if there is one, is decided, so this + // leaf's justify_qc would become the QC for the decided chain. + new_decide_qc = Some(leaf.justify_qc.clone()); + } else if current_chain_length == 3 { + new_anchor_view = leaf.view_number; + new_decide_reached = true; + } + } else { + // nothing more to do here... we don't have a new chain extension + return false; + } } - } - - leaf_views.push(leaf.clone()); - match &leaf.deltas { - Left(block) => { - let txns = block.contained_transactions(); - for txn in txns { - included_txns.insert(txn); + // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above + if new_decide_reached { + let mut leaf = leaf.clone(); + + // If the full block is available for this leaf, include it in the leaf + // chain that we send to the client. + if let Some(block) = + consensus.saved_blocks.get(leaf.get_deltas_commitment()) + { + if let Err(err) = leaf.fill_deltas(block.clone()) { + error!("unable to fill leaf {} with block {}, block will not be available: {}", + leaf.commit(), block.commit(), err); + } + } + + leaf_views.push(leaf.clone()); + match &leaf.deltas { + Left(block) => { + let txns = block.contained_transactions(); + for txn in txns { + included_txns.insert(txn); + } + } + Right(_) => {} } } - Right(_) => {} + true + }, + ) { + error!("publishing view error"); + self.output_event_stream.publish(Event { + view_number: view, + event: EventType::Error { error: e.into() }, + }).await; } - } - true - }, - ) { - error!("publishing view error"); - self.output_event_stream.publish(Event { - view_number: view, - event: EventType::Error { error: e.into() }, - }).await; - } } let included_txns_set: HashSet<_> = if new_decide_reached { @@ -883,10 +923,6 @@ where // Update current view and publish a view change event so other tasks also update self.update_view(new_view).await; - - if let GeneralConsensusMessage::Vote(vote) = message { - debug!("Sending vote to next leader {:?}", vote); - }; } } } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 81a173e954..fdc435163b 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -342,18 +342,13 @@ pub trait ConsensusExchange: Send + Sync { /// The contents of a vote on `commit`. fn vote_data(&self, commit: Commitment) -> VoteData; - /// Validate a QC. - fn is_valid_cert(&self, qc: &Self::Certificate, commit: Commitment) -> bool { + /// Validate a certificate. + fn is_valid_cert(&self, qc: &Self::Certificate) -> bool { if qc.is_genesis() && qc.view_number() == TYPES::Time::genesis() { return true; } let leaf_commitment = qc.leaf_commitment(); - if leaf_commitment != commit { - error!("Leaf commitment does not equal parent commitment"); - return false; - } - match qc.signatures() { AssembledSignature::DA(qc) => { let real_commit = VoteData::DA(leaf_commitment).commit(); From a271837288eeb4dafc975d16760c18b314a08936 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 4 Oct 2023 20:01:54 -0700 Subject: [PATCH 0195/1393] Check ancestors only if having parent leaf --- task-impls/src/consensus.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 3f2551245a..268249518c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -592,7 +592,7 @@ where let leaf; // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - if let Some(parent) = parent { + if let Some(parent) = parent.clone() { let message; leaf = SequencingLeaf { view_number: view, @@ -736,12 +736,13 @@ where let mut new_decide_qc = None; let mut leaf_views = Vec::new(); let mut included_txns = HashSet::new(); - let old_anchor_view = consensus.last_decided_view; - let parent_view = leaf.justify_qc.view_number; - let mut current_chain_length = 0usize; - if parent_view + 1 == view { - current_chain_length += 1; - if let Err(e) = consensus.visit_leaf_ancestors( + if parent.is_some() { + let old_anchor_view = consensus.last_decided_view; + let parent_view = leaf.justify_qc.view_number; + let mut current_chain_length = 0usize; + if parent_view + 1 == view { + current_chain_length += 1; + if let Err(e) = consensus.visit_leaf_ancestors( parent_view, Terminator::Exclusive(old_anchor_view), true, @@ -800,6 +801,7 @@ where event: EventType::Error { error: e.into() }, }).await; } + } } let included_txns_set: HashSet<_> = if new_decide_reached { @@ -904,7 +906,7 @@ where drop(consensus); if should_propose { debug!( - "Attempting to publish proposal after voting; now in view: {}", + "Attempting to publish proposal before voting; now in view: {}", *new_view ); self.publish_proposal_if_able(qc.clone(), qc.view_number + 1) From 550044fe15cc00b82d56a2b1f5eca8e163fdae33 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 5 Oct 2023 10:03:46 -0400 Subject: [PATCH 0196/1393] store leaf and return if missing parent, fix subscribe in view sync --- task-impls/src/consensus.rs | 38 +++++++++++++++++++++++++++++++------ task-impls/src/view_sync.rs | 20 +++++++++---------- 2 files changed, 42 insertions(+), 16 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 268249518c..12d7313075 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -339,8 +339,9 @@ where // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { error!( - "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.leaf_commitment() + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.leaf_commitment(), + proposal.view_number, ); return false; }; @@ -408,8 +409,9 @@ where // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { error!( - "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.leaf_commitment() + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.leaf_commitment(), + proposal.view_number, ); return false; }; @@ -706,12 +708,14 @@ where // Allow missing parent so we can update the state, but we won't // vote in this case. error!( - "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.leaf_commitment() + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.leaf_commitment(), + proposal.data.view_number, ); if invalid { error!("Invalid justify_qc in proposal {:?}", justify_qc.clone()); + return; } leaf = SequencingLeaf { view_number: view, @@ -723,6 +727,28 @@ where timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender.to_bytes(), }; + if !view_leader_key + .validate(&proposal.signature, leaf.commit().as_ref()) + { + error!(?proposal.signature, "Could not verify proposal."); + return; + } + + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + consensus.state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + }, + }, + ); + consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + drop(consensus); + // The valid QC and signature on the proposal is evidence we can go to the next view + // even though we can't vote in this round because we missed the last proposal. + self.update_view(TYPES::Time::new(*view + 1)).await; + return; } // TODO (Keyao) Update consensus state only if all verifications pass. diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index e35ec6f399..da52ceaf82 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -516,25 +516,25 @@ where // panic!("Starting view sync!"); // Spawn replica task let next_view = *view_number + 1; + // Subscribe to the view after we are leader since we know we won't propose in the next view if we are leader. + let subscribe_view = if self.exchange.is_leader(TYPES::Time::new(next_view)) { + next_view + 1 + } else { + next_view + }; // Subscribe to the next view just in case there is progress being made self.exchange .network() - .inject_consensus_info(ConsensusIntentEvent::PollForProposal(next_view)) + .inject_consensus_info(ConsensusIntentEvent::PollForProposal( + subscribe_view, + )) .await; self.exchange .network() - .inject_consensus_info(ConsensusIntentEvent::PollForDAC(next_view)) + .inject_consensus_info(ConsensusIntentEvent::PollForDAC(subscribe_view)) .await; - if self.exchange.is_leader(TYPES::Time::new(next_view + 1)) { - debug!("Polling for quorum votes for view {}", next_view); - self.exchange - .network() - .inject_consensus_info(ConsensusIntentEvent::PollForVotes(next_view)) - .await; - } - let mut replica_state = ViewSyncReplicaTaskState { current_view: self.current_view, next_view: TYPES::Time::new(next_view), From c0c95e7f9c5edfff0c5b3e0d4c3229dd3dc38ba9 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 9 Oct 2023 11:14:59 -0400 Subject: [PATCH 0197/1393] merge with main --- constants/Cargo.toml | 5 + constants/src/lib.rs | 10 + hotshot-qc/Cargo.toml | 2 +- hotshot-signature-key/src/bn254.rs | 6 +- hotshot-signature-key/src/bn254/bn254_priv.rs | 8 +- hotshot-signature-key/src/bn254/bn254_pub.rs | 37 +- hotshot-stake-table/Cargo.toml | 2 +- hotshot/Cargo.toml | 30 +- hotshot/examples/infra/mod.rs | 5 +- hotshot/examples/infra/modDA.rs | 520 +++++++++------ hotshot/examples/libp2p/multi-validator.rs | 69 ++ hotshot/examples/libp2p/orchestrator.rs | 24 +- hotshot/examples/libp2p/types.rs | 83 ++- hotshot/examples/libp2p/validator.rs | 30 +- .../examples/web-server-da/multi-validator.rs | 4 +- .../web-server-da/multi-web-server.rs | 8 +- .../examples/web-server-da/orchestrator.rs | 4 +- hotshot/examples/web-server-da/types.rs | 57 +- hotshot/examples/web-server-da/validator.rs | 4 +- hotshot/examples/web-server-da/web-server.rs | 4 +- hotshot/src/demo.rs | 207 ++++++ hotshot/src/demos.rs | 8 - hotshot/src/demos/sdemo.rs | 384 ----------- hotshot/src/demos/vdemo.rs | 607 ------------------ hotshot/src/lib.rs | 82 +-- hotshot/src/tasks/mod.rs | 184 +++--- hotshot/src/traits.rs | 4 +- .../src/traits/election/static_committee.rs | 44 +- .../src/traits/networking/libp2p_network.rs | 245 ++++--- .../src/traits/networking/memory_network.rs | 577 +++-------------- .../networking/web_server_libp2p_fallback.rs | 66 +- .../traits/networking/web_server_network.rs | 169 +++-- .../networking/web_sever_libp2p_fallback.rs | 318 --------- hotshot/src/traits/storage/atomic_storage.rs | 16 +- .../atomic_storage/dual_key_value_store.rs | 6 +- hotshot/src/traits/storage/memory_storage.rs | 11 +- hotshot/src/types/handle.rs | 8 +- libp2p-networking/Cargo.toml | 11 +- .../src/network/behaviours/dht/cache.rs | 325 ++++++++++ .../network/behaviours/{dht.rs => dht/mod.rs} | 52 +- .../src/network/behaviours/gossip.rs | 12 +- libp2p-networking/src/network/node.rs | 7 +- libp2p-networking/src/network/node/config.rs | 4 + libp2p-networking/src/network/node/handle.rs | 18 +- libp2p-networking/tests/counter.rs | 4 +- orchestrator/Cargo.toml | 4 +- orchestrator/default-libp2p-run-config.toml | 18 +- orchestrator/default-run-config.toml | 4 +- .../default-web-server-run-config.toml | 4 +- orchestrator/src/config.rs | 46 +- orchestrator/src/lib.rs | 19 +- task-impls/Cargo.toml | 3 +- task-impls/src/consensus.rs | 231 +++---- task-impls/src/da.rs | 486 +++++++------- task-impls/src/events.rs | 40 +- task-impls/src/lib.rs | 3 + task-impls/src/network.rs | 64 +- task-impls/src/transactions.rs | 417 ++++++++++++ task-impls/src/view_sync.rs | 85 +-- task/Cargo.toml | 1 - testing/Cargo.toml | 2 +- testing/README.md | 2 +- testing/src/network_reliability.rs | 163 ----- testing/src/node_types.rs | 192 +++--- testing/src/overall_safety_task.rs | 8 +- testing/src/spinning_task.rs | 3 +- testing/src/task_helpers.rs | 37 +- testing/src/test_builder.rs | 9 +- testing/src/test_launcher.rs | 4 - testing/src/test_runner.rs | 11 +- testing/tests/atomic_storage.rs | 72 +-- testing/tests/basic.rs | 12 +- testing/tests/catchup.rs | 62 +- testing/tests/da_task.rs | 84 ++- testing/tests/memory_network.rs | 371 +++++++++++ testing/tests/network_task.rs | 62 +- testing/tests/timeout.rs | 2 +- types/Cargo.toml | 18 +- types/src/block_impl.rs | 127 ++++ types/src/certificate.rs | 94 +-- types/src/consensus.rs | 16 +- types/src/constants.rs | 14 - types/src/data.rs | 126 ++-- types/src/event.rs | 3 +- types/src/lib.rs | 6 +- types/src/message.rs | 72 ++- types/src/traits.rs | 2 +- types/src/traits/block_contents.rs | 63 +- types/src/traits/consensus_api.rs | 19 +- types/src/traits/election.rs | 433 ++++++++----- types/src/traits/network.rs | 283 +++++++- types/src/traits/node_implementation.rs | 34 +- types/src/traits/signature_key.rs | 34 +- types/src/traits/state.rs | 22 +- types/src/traits/storage.rs | 10 +- types/src/vote.rs | 576 +++++++++++++++-- web_server/Cargo.toml | 3 - web_server/api.toml | 7 + web_server/src/config.rs | 28 + web_server/src/lib.rs | 15 + 100 files changed, 4862 insertions(+), 3945 deletions(-) create mode 100644 constants/Cargo.toml create mode 100644 constants/src/lib.rs create mode 100644 hotshot/examples/libp2p/multi-validator.rs create mode 100644 hotshot/src/demo.rs delete mode 100644 hotshot/src/demos.rs delete mode 100644 hotshot/src/demos/sdemo.rs delete mode 100644 hotshot/src/demos/vdemo.rs delete mode 100644 hotshot/src/traits/networking/web_sever_libp2p_fallback.rs create mode 100644 libp2p-networking/src/network/behaviours/dht/cache.rs rename libp2p-networking/src/network/behaviours/{dht.rs => dht/mod.rs} (94%) create mode 100644 task-impls/src/transactions.rs delete mode 100644 testing/src/network_reliability.rs create mode 100644 testing/tests/memory_network.rs create mode 100644 types/src/block_impl.rs delete mode 100644 types/src/constants.rs diff --git a/constants/Cargo.toml b/constants/Cargo.toml new file mode 100644 index 0000000000..6f04253d2e --- /dev/null +++ b/constants/Cargo.toml @@ -0,0 +1,5 @@ +[package] +name = "hotshot-constants" +version.workspace = true + +[dependencies] diff --git a/constants/src/lib.rs b/constants/src/lib.rs new file mode 100644 index 0000000000..8a4d9d5d46 --- /dev/null +++ b/constants/src/lib.rs @@ -0,0 +1,10 @@ +//! configurable constants for hotshot + +/// the ID of the genesis block proposer +pub const GENESIS_PROPOSER_ID: [u8; 2] = [4, 2]; + +/// the number of views to gather information for ahead of time +pub const LOOK_AHEAD: u64 = 5; + +/// the default kademlia record republication interval (in seconds) +pub const KAD_DEFAULT_REPUB_INTERVAL_SEC: u64 = 28800; diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 0faa1f0a87..9c53fb335a 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -19,7 +19,7 @@ ark-std = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } -generic-array = "0.14.7" +generic-array = { workspace = true } hotshot-types = { path = "../types" } jf-primitives = { workspace = true } jf-relation = { workspace = true } diff --git a/hotshot-signature-key/src/bn254.rs b/hotshot-signature-key/src/bn254.rs index fc7273afd5..2414c89c9c 100644 --- a/hotshot-signature-key/src/bn254.rs +++ b/hotshot-signature-key/src/bn254.rs @@ -1,8 +1,8 @@ //! Demonstration implementation of the [`SignatureKey`] trait using BN254 use hotshot_types::traits::signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}; -/// `BN254Priv` implementation +/// `BLSPrivKey` implementation mod bn254_priv; -/// `BN254Pub` implementation +/// `BLSPubKey` implementation mod bn254_pub; -pub use self::{bn254_priv::BN254Priv, bn254_pub::BN254Pub}; +pub use self::{bn254_priv::BLSPrivKey, bn254_pub::BLSPubKey}; diff --git a/hotshot-signature-key/src/bn254/bn254_priv.rs b/hotshot-signature-key/src/bn254/bn254_priv.rs index 3b7000eea4..439b52ff39 100644 --- a/hotshot-signature-key/src/bn254/bn254_priv.rs +++ b/hotshot-signature-key/src/bn254/bn254_priv.rs @@ -7,12 +7,12 @@ use std::cmp::Ordering; /// Private key type for a bn254 keypair #[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug)] -pub struct BN254Priv { +pub struct BLSPrivKey { /// The private key for this keypair pub(super) priv_key: QCSignKey, } -impl BN254Priv { +impl BLSPrivKey { /// Generate a new private key from scratch #[must_use] pub fn generate() -> Self { @@ -54,7 +54,7 @@ impl BN254Priv { } } -impl PartialOrd for BN254Priv { +impl PartialOrd for BLSPrivKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.priv_key.to_string(); let other_bytes = &other.priv_key.to_string(); @@ -62,7 +62,7 @@ impl PartialOrd for BN254Priv { } } -impl Ord for BN254Priv { +impl Ord for BLSPrivKey { fn cmp(&self, other: &Self) -> Ordering { let self_bytes = &self.priv_key.to_string(); let other_bytes = &other.priv_key.to_string(); diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index c25fad4be0..025d455129 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -1,4 +1,4 @@ -use super::{BN254Priv, EncodedPublicKey, EncodedSignature, SignatureKey}; +use super::{BLSPrivKey, EncodedPublicKey, EncodedSignature, SignatureKey}; use bincode::Options; use bitvec::prelude::*; use blake3::traits::digest::generic_array::GenericArray; @@ -22,12 +22,12 @@ use typenum::U32; /// This type makes use of noise for non-determinisitc signatures. #[derive(Clone, PartialEq, Eq, Hash, Copy, Serialize, Deserialize, Debug)] -pub struct BN254Pub { +pub struct BLSPubKey { /// The public key for this keypair pub_key: VerKey, } -impl PartialOrd for BN254Pub { +impl PartialOrd for BLSPubKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.pub_key.to_string(); let other_bytes = &other.pub_key.to_string(); @@ -35,7 +35,7 @@ impl PartialOrd for BN254Pub { } } -impl Ord for BN254Pub { +impl Ord for BLSPubKey { fn cmp(&self, other: &Self) -> Ordering { let self_bytes = &self.pub_key.to_string(); let other_bytes = &other.pub_key.to_string(); @@ -43,18 +43,16 @@ impl Ord for BN254Pub { } } -impl SignatureKey for BN254Pub { - type PrivateKey = BN254Priv; +impl SignatureKey for BLSPubKey { + type PrivateKey = BLSPrivKey; type StakeTableEntry = JFStakeTableEntry; type QCParams = JFQCParams< ::VerificationKey, ::PublicParameter, >; - type QCType = ( - ::Signature, - BitVec, - ); - // as AssembledQuorumCertificate>::QC; + type PureAssembledSignatureType = + ::Signature; + type QCType = (Self::PureAssembledSignatureType, BitVec); #[instrument(skip(self))] fn validate(&self, signature: &EncodedSignature, data: &[u8]) -> bool { @@ -114,7 +112,7 @@ impl SignatureKey for BN254Pub { fn from_bytes(bytes: &EncodedPublicKey) -> Option { let x: Result = bincode_opts().deserialize(&bytes.0); match x { - Ok(pub_key) => Some(BN254Pub { pub_key }), + Ok(pub_key) => Some(BLSPubKey { pub_key }), Err(e) => { debug!(?e, "Failed to deserialize public key"); None @@ -134,6 +132,12 @@ impl SignatureKey for BN254Pub { } } + fn get_public_key(entry: &Self::StakeTableEntry) -> Self { + Self { + pub_key: entry.stake_key, + } + } + fn get_public_parameter( stake_entries: Vec, threshold: U256, @@ -150,19 +154,14 @@ impl SignatureKey for BN254Pub { BitVectorQC::::check(real_qc_pp, msg, qc).is_ok() } - fn get_sig_proof( - signature: &Self::QCType, - ) -> ( - ::Signature, - BitVec, - ) { + fn get_sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec) { signature.clone() } fn assemble( real_qc_pp: &Self::QCParams, signers: &BitSlice, - sigs: &[::Signature], + sigs: &[Self::PureAssembledSignatureType], ) -> Self::QCType { BitVectorQC::::assemble(real_qc_pp, signers, sigs) .expect("this assembling shouldn't fail") diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 8f11e4d66f..4059589a85 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -16,7 +16,7 @@ bitvec = { workspace = true } digest = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } ethereum-types = { workspace = true } -generic-array = "0.14.7" +generic-array = { workspace = true } hotshot-types = { path = "../types" } jf-primitives = { workspace = true } jf-relation = { workspace = true } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 7edc7363c5..d7816208f1 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -25,16 +25,21 @@ docs = [] doc-images = [] hotshot-testing = [] -# [[example]] -# name = "libp2p-validator" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/libp2p/validator.rs" -# -# [[example]] -# name = "libp2p-orchestrator" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/libp2p/orchestrator.rs" -# +[[example]] +name = "libp2p-validator" +required-features = ["demo", "libp2p/rsa"] +path = "examples/libp2p/validator.rs" + +[[example]] +name = "libp2p-multi-validator" +required-features = ["demo", "libp2p/rsa"] +path = "examples/libp2p/multi-validator.rs" + +[[example]] +name = "libp2p-orchestrator" +required-features = ["demo", "libp2p/rsa"] +path = "examples/libp2p/orchestrator.rs" + # [[example]] # name = "web-server-orchestrator" # required-features = ["demo", "libp2p/rsa"] @@ -80,6 +85,7 @@ bincode = { workspace = true } bitvec = { workspace = true } clap = { version = "4.4", features = ["derive", "env"], optional = true } commit = { workspace = true } +hotshot-constants = { path = "../constants" } custom_debug = { workspace = true } dashmap = "5.5.1" derivative = { version = "2.2.0", optional = true } @@ -95,10 +101,10 @@ hotshot-types = { path = "../types", version = "0.1.0", default-features = false hotshot-utils = { path = "../utils" } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +jf-primitives = { workspace = true } libp2p = { workspace = true } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } -nll = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true, features = ["rc"] } @@ -116,6 +122,6 @@ async-std = { workspace = true } [dev-dependencies] blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } -serde_json = "1.0.106" +serde_json = "1.0.107" toml = { workspace = true } diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index efd3b8169d..06d7d9deb3 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -52,7 +52,7 @@ pub fn load_config_from_file( > = config_toml.into(); // Generate network's public keys - config.config.known_nodes = (0..config.config.total_nodes.get()) + let known_nodes: Vec<_> = (0..config.config.total_nodes.get()) .map(|node_id| { TYPES::SignatureKey::generated_from_seed_indexed( config.seed, @@ -63,13 +63,12 @@ pub fn load_config_from_file( .collect(); config.config.known_nodes_with_stake = (0..config.config.total_nodes.get()) - .map(|node_id| config.config.known_nodes[node_id].get_stake_table_entry(1u64)) + .map(|node_id| known_nodes[node_id].get_stake_table_entry(1u64)) .collect(); config } -/// yeesh maybe we should just implement SignatureKey for this... pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { let mut hasher = blake3::Hasher::new(); hasher.update(&seed); diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 729cbfd79f..ef188a7849 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -1,11 +1,14 @@ use crate::infra::{load_config_from_file, OrchestratorArgs}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use async_lock::RwLock; use async_trait::async_trait; use futures::StreamExt; use hotshot::{ traits::{ - implementations::{MemoryStorage, WebCommChannel, WebServerNetwork}, + implementations::{ + Libp2pCommChannel, Libp2pNetwork, MemoryStorage, WebCommChannel, WebServerNetwork, + }, NodeImplementation, }, types::{SignatureKey, SystemContextHandle}, @@ -18,8 +21,9 @@ use hotshot_orchestrator::{ }; use hotshot_task::task::FilterEvent; use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, - data::{DAProposal, QuorumProposal, SequencingLeaf, TestableLeaf}, + data::{QuorumProposal, SequencingLeaf, TestableLeaf}, event::{Event, EventType}, message::{Message, SequencingMessage}, traits::{ @@ -33,9 +37,20 @@ use hotshot_types::{ }, state::{ConsensusTime, TestableBlock, TestableState}, }, - vote::{DAVote, QuorumVote, ViewSyncVote}, HotShotConfig, }; +use libp2p_identity::{ + ed25519::{self, SecretKey}, + Keypair, +}; +use libp2p_networking::{ + network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}, + reexport::Multiaddr, +}; +use rand::rngs::StdRng; +use rand::SeedableRng; +use std::{collections::BTreeSet, sync::Arc}; +use std::{num::NonZeroUsize, str::FromStr}; // use libp2p::{ // identity::{ // ed25519::{Keypair as EdKeypair, SecretKey}, @@ -44,11 +59,11 @@ use hotshot_types::{ // multiaddr::{self, Protocol}, // Multiaddr, // }; -// use libp2p_identity::PeerId; +use libp2p_identity::PeerId; // use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}; +use std::{fmt::Debug, net::Ipv4Addr}; use std::{ //collections::{BTreeSet, VecDeque}, - collections::VecDeque, //fs, mem, net::IpAddr, @@ -58,7 +73,6 @@ use std::{ //time::{Duration, Instant}, time::Instant, }; -use std::{fmt::Debug, net::Ipv4Addr}; //use surf_disco::error::ClientError; //use surf_disco::Client; use tracing::{debug, error, info, warn}; @@ -67,27 +81,9 @@ use tracing::{debug, error, info, warn}; pub async fn run_orchestrator_da< TYPES: NodeType, MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel< - TYPES, - Message, - DAProposal, - DAVote, - MEMBERSHIP, - > + Debug, - QUORUMNETWORK: CommunicationChannel< - TYPES, - Message, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, - > + Debug, - VIEWSYNCNETWORK: CommunicationChannel< - TYPES, - Message, - ViewSyncCertificate, - ViewSyncVote, - MEMBERSHIP, - > + Debug, + DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -130,32 +126,27 @@ pub async fn run_orchestrator_da< .await; } +/// Helper function to calculate the nuymber of transactions to send per node per round +fn calculate_num_tx_per_round( + node_index: u64, + total_num_nodes: usize, + transactions_per_round: usize, +) -> usize { + if node_index == 0 { + transactions_per_round / total_num_nodes + transactions_per_round % total_num_nodes + } else { + transactions_per_round / total_num_nodes + } +} + /// Defines the behavior of a "run" of the network with a given configuration #[async_trait] pub trait RunDA< TYPES: NodeType, MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel< - TYPES, - Message, - DAProposal, - DAVote, - MEMBERSHIP, - > + Debug, - QUORUMNETWORK: CommunicationChannel< - TYPES, - Message, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, - > + Debug, - VIEWSYNCNETWORK: CommunicationChannel< - TYPES, - Message, - ViewSyncCertificate, - ViewSyncVote, - MEMBERSHIP, - > + Debug, + DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -215,7 +206,6 @@ pub trait RunDA< // Get KeyPair for certificate Aggregation let (pk, sk) = TYPES::SignatureKey::generated_from_seed_indexed(config.seed, config.node_index); - let known_nodes = config.config.known_nodes.clone(); let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let entry = pk.get_stake_table_entry(1u64); @@ -240,7 +230,6 @@ pub trait RunDA< let exchanges = NODE::Exchanges::create( known_nodes_with_stake.clone(), - known_nodes.clone(), (quorum_election_config, committee_election_config), ( quorum_network.clone(), @@ -279,38 +268,23 @@ pub trait RunDA< } = self.get_config(); let size = mem::size_of::(); - let adjusted_padding = if padding < size { 0 } else { padding - size }; - let mut txns: VecDeque = VecDeque::new(); - - // TODO ED: In the future we should have each node generate transactions every round to simulate a more realistic network - let tx_to_gen = transactions_per_round * rounds * 3; - { - let mut txn_rng = rand::thread_rng(); - for _ in 0..tx_to_gen { - let txn = - <::StateType as TestableState>::create_random_transaction( - None, - &mut txn_rng, - padding as u64, - ); - txns.push_back(txn); - } - } - debug!("Generated {} transactions", tx_to_gen); + let padding = padding.saturating_sub(size); + let mut txn_rng = StdRng::seed_from_u64(node_index); - debug!("Adjusted padding size is {:?} bytes", adjusted_padding); - let mut round = 0; - let mut total_transactions = 0; + debug!("Adjusted padding size is {:?} bytes", padding); - let start = Instant::now(); + let mut total_transactions_committed = 0; + let mut total_transactions_sent = 0; + let transactions_to_send_per_round = + calculate_num_tx_per_round(node_index, total_nodes.get(), transactions_per_round); info!("Starting hotshot!"); + let start = Instant::now(); + let (mut event_stream, _streamid) = context.get_event_stream(FilterEvent::default()).await; let mut anchor_view: TYPES::Time = ::genesis(); let mut num_successful_commits = 0; - let total_nodes_u64 = total_nodes.get() as u64; - context.hotshot.start_consensus().await; loop { @@ -339,8 +313,20 @@ pub trait RunDA< } } + // send transactions + for _ in 0..transactions_to_send_per_round { + let txn = + <::StateType as TestableState>::create_random_transaction( + None, + &mut txn_rng, + padding as u64, + ); + _ = context.submit_transaction(txn).await.unwrap(); + total_transactions_sent += 1; + } + if let Some(size) = block_size { - total_transactions += size; + total_transactions_committed += size; } num_successful_commits += leaf_chain.len(); @@ -359,39 +345,16 @@ pub trait RunDA< EventType::NextLeaderViewTimeout { view_number } => { warn!("Timed out as the next leader in view {:?}", view_number); } - EventType::ViewFinished { view_number } => { - if *view_number > round { - round = *view_number; - info!("view finished: {:?}", view_number); - for _ in 0..transactions_per_round { - if node_index >= total_nodes_u64 - 10 { - let txn = txns.pop_front().unwrap(); - - debug!("Submitting txn on round {}", round); - - let result = context.submit_transaction(txn).await; - - if result.is_err() { - error! ( - "Could not send transaction to web server on round {}", - round - ) - } - } - } - } - } + EventType::ViewFinished { view_number: _ } => {} _ => unimplemented!(), } } } - - round += 1; } // Output run results let total_time_elapsed = start.elapsed(); - error!("{rounds} rounds completed in {total_time_elapsed:?} - Total transactions committed: {total_transactions} - Total commitments: {num_successful_commits}"); + error!("[{node_index}]: {rounds} rounds completed in {total_time_elapsed:?} - Total transactions sent: {total_transactions_sent} - Total transactions committed: {total_transactions_committed} - Total commitments: {num_successful_commits}"); } /// Returns the da network for this run @@ -415,23 +378,6 @@ pub trait RunDA< // WEB SERVER -/// Alias for the [`WebCommChannel`] for sequencing consensus. -type StaticDAComm = - WebCommChannel, DAVote, MEMBERSHIP>; - -/// Alias for the ['WebCommChannel'] for validating consensus -type StaticQuorumComm = WebCommChannel< - TYPES, - I, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, ->; - -/// Alias for the ['WebCommChannel'] for view sync consensus -type StaticViewSyncComm = - WebCommChannel, ViewSyncVote, MEMBERSHIP>; - /// Represents a web server-based run pub struct WebServerDARun< TYPES: NodeType, @@ -443,14 +389,14 @@ pub struct WebServerDARun< ::StakeTableEntry, TYPES::ElectionConfigType, >, - quorum_network: StaticQuorumComm, - da_network: StaticDAComm, - view_sync_network: StaticViewSyncComm, + quorum_network: WebCommChannel, + da_network: WebCommChannel, + view_sync_network: WebCommChannel, } #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType, MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, @@ -463,32 +409,20 @@ impl< SequencingLeaf, QuorumProposal>, MEMBERSHIP, - WebCommChannel< - TYPES, - NODE, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, - >, + WebCommChannel, Message, >, CommitteeExchange< TYPES, MEMBERSHIP, - WebCommChannel, DAVote, MEMBERSHIP>, + WebCommChannel, Message, >, ViewSyncExchange< TYPES, ViewSyncCertificate, MEMBERSHIP, - WebCommChannel< - TYPES, - NODE, - ViewSyncCertificate, - ViewSyncVote, - MEMBERSHIP, - >, + WebCommChannel, Message, >, >, @@ -499,9 +433,9 @@ impl< RunDA< TYPES, MEMBERSHIP, - StaticDAComm, - StaticQuorumComm, - StaticViewSyncComm, + WebCommChannel, + WebCommChannel, + WebCommChannel, NODE, > for WebServerDARun where @@ -540,21 +474,11 @@ where ); // Create the network - let quorum_network: WebCommChannel< - TYPES, - NODE, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, - > = WebCommChannel::new(underlying_quorum_network.clone().into()); + let quorum_network: WebCommChannel = + WebCommChannel::new(underlying_quorum_network.clone().into()); - let view_sync_network: WebCommChannel< - TYPES, - NODE, - ViewSyncCertificate, - ViewSyncVote, - MEMBERSHIP, - > = WebCommChannel::new(underlying_quorum_network.into()); + let view_sync_network: WebCommChannel = + WebCommChannel::new(underlying_quorum_network.into()); let WebServerConfig { host, @@ -563,17 +487,10 @@ where }: WebServerConfig = config.clone().da_web_server_config.unwrap(); // Each node runs the DA network so that leaders have access to transactions and DA votes - let da_network: WebCommChannel, DAVote, MEMBERSHIP> = - WebCommChannel::new( - WebServerNetwork::create( - &host.to_string(), - port, - wait_between_polls, - pub_key, - true, - ) + let da_network: WebCommChannel = WebCommChannel::new( + WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true) .into(), - ); + ); WebServerDARun { config, @@ -583,28 +500,244 @@ where } } - fn get_da_network( - &self, - ) -> WebCommChannel, DAVote, MEMBERSHIP> { + fn get_da_network(&self) -> WebCommChannel { self.da_network.clone() } - fn get_quorum_network( + fn get_quorum_network(&self) -> WebCommChannel { + self.quorum_network.clone() + } + + fn get_view_sync_network(&self) -> WebCommChannel { + self.view_sync_network.clone() + } + + fn get_config( &self, - ) -> WebCommChannel< + ) -> NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + > { + self.config.clone() + } +} + +// Libp2p + +/// Represents a libp2p-based run +pub struct Libp2pDARun, MEMBERSHIP: Membership> +{ + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + quorum_network: Libp2pCommChannel, + da_network: Libp2pCommChannel, + view_sync_network: Libp2pCommChannel, +} + +#[async_trait] +impl< + TYPES: NodeType, + MEMBERSHIP: Membership + Debug, + NODE: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + Exchanges = SequencingExchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + SequencingLeaf, + QuorumProposal>, + MEMBERSHIP, + Libp2pCommChannel, + Message, + >, + CommitteeExchange< + TYPES, + MEMBERSHIP, + Libp2pCommChannel, + Message, + >, + ViewSyncExchange< + TYPES, + ViewSyncCertificate, + MEMBERSHIP, + Libp2pCommChannel, + Message, + >, + >, + Storage = MemoryStorage>, + ConsensusMessage = SequencingMessage, + >, + > + RunDA< TYPES, - NODE, - QuorumProposal>, - QuorumVote>, MEMBERSHIP, - > { + Libp2pCommChannel, + Libp2pCommChannel, + Libp2pCommChannel, + NODE, + > for Libp2pDARun +where + ::StateType: TestableState, + ::BlockType: TestableBlock, + SequencingLeaf: TestableLeaf, + Self: Sync, +{ + async fn initialize_networking( + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + ) -> Libp2pDARun { + let (pubkey, _privkey) = + <::SignatureKey as SignatureKey>::generated_from_seed_indexed( + config.seed, + config.node_index, + ); + let mut config = config; + let libp2p_config = config + .libp2p_config + .take() + .expect("Configuration is not for a Libp2p network"); + let bs_len = libp2p_config.bootstrap_nodes.len(); + let bootstrap_nodes: Vec<(PeerId, Multiaddr)> = libp2p_config + .bootstrap_nodes + .iter() + .map(|(addr, pair)| { + let kp = Keypair::from_protobuf_encoding(pair).unwrap(); + let peer_id = PeerId::from_public_key(&kp.public()); + let multiaddr = + Multiaddr::from_str(&format!("/ip4/{}/udp/{}/quic-v1", addr.ip(), addr.port())) + .unwrap(); + (peer_id, multiaddr) + }) + .collect(); + let identity = libp2p_generate_indexed_identity(config.seed, config.node_index); + let node_type = if (config.node_index as usize) < bs_len { + NetworkNodeType::Bootstrap + } else { + NetworkNodeType::Regular + }; + let node_index = config.node_index; + let port_index = match libp2p_config.index_ports { + true => node_index, + false => 0, + }; + let bound_addr: Multiaddr = format!( + "/{}/{}/udp/{}/quic-v1", + if libp2p_config.public_ip.is_ipv4() { + "ip4" + } else { + "ip6" + }, + libp2p_config.public_ip, + libp2p_config.base_port as u64 + port_index + ) + .parse() + .unwrap(); + + // generate network + let mut config_builder = NetworkNodeConfigBuilder::default(); + assert!(config.config.total_nodes.get() > 2); + let replicated_nodes = NonZeroUsize::new(config.config.total_nodes.get() - 2).unwrap(); + config_builder.replication_factor(replicated_nodes); + config_builder.identity(identity.clone()); + + config_builder.bound_addr(Some(bound_addr.clone())); + + let to_connect_addrs = bootstrap_nodes + .iter() + .map(|(peer_id, multiaddr)| (Some(*peer_id), multiaddr.clone())) + .collect(); + + config_builder.to_connect_addrs(to_connect_addrs); + + let mesh_params = + // NOTE I'm arbitrarily choosing these. + match node_type { + NetworkNodeType::Bootstrap => MeshParams { + mesh_n_high: libp2p_config.bootstrap_mesh_n_high, + mesh_n_low: libp2p_config.bootstrap_mesh_n_low, + mesh_outbound_min: libp2p_config.bootstrap_mesh_outbound_min, + mesh_n: libp2p_config.bootstrap_mesh_n, + }, + NetworkNodeType::Regular => MeshParams { + mesh_n_high: libp2p_config.mesh_n_high, + mesh_n_low: libp2p_config.mesh_n_low, + mesh_outbound_min: libp2p_config.mesh_outbound_min, + mesh_n: libp2p_config.mesh_n, + }, + NetworkNodeType::Conductor => unreachable!(), + }; + config_builder.mesh_params(Some(mesh_params)); + + let mut all_keys = BTreeSet::new(); + let mut da_keys = BTreeSet::new(); + for i in 0..config.config.total_nodes.get() as u64 { + let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; + let pubkey = TYPES::SignatureKey::from_private(&privkey); + if i < config.config.da_committee_size as u64 { + da_keys.insert(pubkey.clone()); + } + all_keys.insert(pubkey); + } + + let node_config = config_builder.build().unwrap(); + let underlying_quorum_network = Libp2pNetwork::new( + NoMetrics::boxed(), + node_config, + pubkey.clone(), + Arc::new(RwLock::new( + bootstrap_nodes + .iter() + .map(|(peer_id, addr)| (Some(*peer_id), addr.clone())) + .collect(), + )), + bs_len, + config.node_index as usize, + // NOTE: this introduces an invariant that the keys are assigned using this indexed + // function + all_keys, + da_keys, + ) + .await + .unwrap(); + + underlying_quorum_network.wait_for_ready().await; + + // Create the network + let quorum_network: Libp2pCommChannel = + Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + + let view_sync_network: Libp2pCommChannel = + Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + + let da_network: Libp2pCommChannel = + Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + + Libp2pDARun { + config, + quorum_network, + da_network, + view_sync_network, + } + } + + fn get_da_network(&self) -> Libp2pCommChannel { + self.da_network.clone() + } + + fn get_quorum_network(&self) -> Libp2pCommChannel { self.quorum_network.clone() } - fn get_view_sync_network( - &self, - ) -> WebCommChannel, ViewSyncVote, MEMBERSHIP> - { + fn get_view_sync_network(&self) -> Libp2pCommChannel { self.view_sync_network.clone() } @@ -621,29 +754,11 @@ where /// Main entry point for validators pub async fn main_entry_point< - TYPES: NodeType, + TYPES: NodeType, MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel< - TYPES, - Message, - DAProposal, - DAVote, - MEMBERSHIP, - > + Debug, - QUORUMNETWORK: CommunicationChannel< - TYPES, - Message, - QuorumProposal>, - QuorumVote>, - MEMBERSHIP, - > + Debug, - VIEWSYNCNETWORK: CommunicationChannel< - TYPES, - Message, - ViewSyncCertificate, - ViewSyncVote, - MEMBERSHIP, - > + Debug, + DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -720,3 +835,12 @@ pub async fn main_entry_point< info!("All nodes are ready! Starting HotShot"); run.run_hotshot(hotshot).await; } + +pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { + let mut hasher = blake3::Hasher::new(); + hasher.update(&seed); + hasher.update(&index.to_le_bytes()); + let new_seed = *hasher.finalize().as_bytes(); + let sk_bytes = SecretKey::try_from_bytes(new_seed).unwrap(); + >::from(sk_bytes).into() +} diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs new file mode 100644 index 0000000000..3ed46fa979 --- /dev/null +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -0,0 +1,69 @@ +use async_compatibility_layer::{ + art::async_spawn, + logging::{setup_backtrace, setup_logging}, +}; +use clap::Parser; +use hotshot::demo::DemoTypes; +use hotshot_orchestrator::client::ValidatorArgs; +use std::net::IpAddr; +use tracing::instrument; + +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; + +pub mod types; + +#[path = "../infra/mod.rs"] +pub mod infra; +#[path = "../infra/modDA.rs"] +pub mod infra_da; + +#[derive(Parser, Debug, Clone)] +struct MultiValidatorArgs { + /// Number of validators to run + pub num_nodes: u16, + /// The address the orchestrator runs on + pub host: IpAddr, + /// The port the orchestrator runs on + pub port: u16, + /// This node's public IP address, for libp2p + /// If no IP address is passed in, it will default to 127.0.0.1 + pub public_ip: Option, +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + let args = MultiValidatorArgs::parse(); + tracing::error!( + "connecting to orchestrator at {:?}:{:?}", + args.host, + args.port + ); + let mut nodes = Vec::new(); + for _ in 0..args.num_nodes { + let node = async_spawn(async move { + infra_da::main_entry_point::< + DemoTypes, + ThisMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + NodeImpl, + ThisRun, + >(ValidatorArgs { + host: args.host.to_string(), + port: args.port, + public_ip: args.public_ip, + }) + .await + }); + nodes.push(node); + } + let _result = futures::future::join_all(nodes).await; +} diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs index 3bb08103d0..594d004a93 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -1,15 +1,21 @@ pub mod types; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demos::vdemo::VDemoTypes; +use hotshot::demo::DemoTypes; use tracing::instrument; use types::ThisMembership; -use crate::infra::{run_orchestrator, OrchestratorArgs}; -use crate::types::{NodeImpl, ThisNetwork}; +use crate::{ + infra::OrchestratorArgs, + infra_da::run_orchestrator_da, + types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}, +}; #[path = "../infra/mod.rs"] pub mod infra; +#[path = "../infra/modDA.rs"] +pub mod infra_da; #[cfg_attr( async_executor_impl = "tokio", @@ -18,7 +24,17 @@ pub mod infra; #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { + setup_logging(); + setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator::(args).await; + run_orchestrator_da::< + DemoTypes, + ThisMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + NodeImpl, + >(args) + .await; } diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 8b3c70e5e1..79b1ea1419 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -1,60 +1,75 @@ -use crate::infra::Libp2pRun; -use hotshot::traits::implementations::MemoryStorage; +use crate::infra_da::Libp2pDARun; use hotshot::{ - demos::vdemo::VDemoTypes, + demo::DemoTypes, traits::{ - election::static_committee::GeneralStaticCommittee, implementations::Libp2pCommChannel, + election::static_committee::GeneralStaticCommittee, + implementations::{Libp2pCommChannel, MemoryStorage}, }, }; -use hotshot_types::message::{Message, ValidatingMessage}; -use hotshot_types::traits::{ - election::QuorumExchange, - node_implementation::{ChannelMaps, NodeImplementation, ValidatingExchanges}, -}; use hotshot_types::{ - data::{ValidatingLeaf, ValidatingProposal}, - traits::node_implementation::NodeType, - vote::QuorumVote, + certificate::ViewSyncCertificate, + data::{DAProposal, QuorumProposal, SequencingLeaf}, + message::{Message, SequencingMessage}, + traits::{ + election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}, + node_implementation::{ChannelMaps, NodeImplementation, NodeType, SequencingExchanges}, + }, + vote::{DAVote, QuorumVote, ViewSyncVote}, }; use serde::{Deserialize, Serialize}; use std::fmt::Debug; -#[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] +#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type ThisLeaf = ValidatingLeaf; +pub type ThisLeaf = SequencingLeaf; pub type ThisMembership = - GeneralStaticCommittee::SignatureKey>; -pub type ThisNetwork = - Libp2pCommChannel; + GeneralStaticCommittee::SignatureKey>; +pub type DANetwork = Libp2pCommChannel; +pub type QuorumNetwork = Libp2pCommChannel; +pub type ViewSyncNetwork = Libp2pCommChannel; + +pub type ThisDAProposal = DAProposal; +pub type ThisDAVote = DAVote; -pub type ThisProposal = ValidatingProposal; -pub type ThisVote = QuorumVote; +pub type ThisQuorumProposal = QuorumProposal; +pub type ThisQuorumVote = QuorumVote; -impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; - type Leaf = ValidatingLeaf; - type Exchanges = ValidatingExchanges< - VDemoTypes, - Message, +pub type ThisViewSyncProposal = ViewSyncCertificate; +pub type ThisViewSyncVote = ViewSyncVote; + +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; + type Leaf = SequencingLeaf; + type Exchanges = SequencingExchanges< + DemoTypes, + Message, QuorumExchange< - VDemoTypes, + DemoTypes, Self::Leaf, - ThisProposal, + ThisQuorumProposal, + ThisMembership, + QuorumNetwork, + Message, + >, + CommitteeExchange>, + ViewSyncExchange< + DemoTypes, + ThisViewSyncProposal, ThisMembership, - ThisNetwork, - Message, + ViewSyncNetwork, + Message, >, >; - type ConsensusMessage = ValidatingMessage; + type ConsensusMessage = SequencingMessage; fn new_channel_maps( - start_view: ::Time, + start_view: ::Time, ) -> ( - ChannelMaps, - Option>, + ChannelMaps, + Option>, ) { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = Libp2pRun; +pub type ThisRun = Libp2pDARun; diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index 59009c9d8f..ab44e02991 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -1,15 +1,18 @@ -use crate::infra::main_entry_point; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demos::vdemo::VDemoTypes; -use hotshot_orchestrator::client::ValidatorArgs; -use tracing::instrument; +use hotshot::demo::DemoTypes; +use tracing::{info, instrument}; + +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; -use crate::types::{NodeImpl, ThisMembership, ThisNetwork, ThisRun}; +use hotshot_orchestrator::client::ValidatorArgs; pub mod types; #[path = "../infra/mod.rs"] pub mod infra; +#[path = "../infra/modDA.rs"] +pub mod infra_da; #[cfg_attr( async_executor_impl = "tokio", @@ -18,6 +21,21 @@ pub mod infra; #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { + setup_logging(); + setup_backtrace(); let args = ValidatorArgs::parse(); - main_entry_point::(args).await; + info!( + "connecting to orchestrator at {:?}:{:?}", + args.host, args.port + ); + infra_da::main_entry_point::< + DemoTypes, + ThisMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + NodeImpl, + ThisRun, + >(args) + .await; } diff --git a/hotshot/examples/web-server-da/multi-validator.rs b/hotshot/examples/web-server-da/multi-validator.rs index 8a0f53c1c9..3ed46fa979 100644 --- a/hotshot/examples/web-server-da/multi-validator.rs +++ b/hotshot/examples/web-server-da/multi-validator.rs @@ -3,7 +3,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use std::net::IpAddr; use tracing::instrument; @@ -49,7 +49,7 @@ async fn main() { for _ in 0..args.num_nodes { let node = async_spawn(async move { infra_da::main_entry_point::< - SDemoTypes, + DemoTypes, ThisMembership, DANetwork, QuorumNetwork, diff --git a/hotshot/examples/web-server-da/multi-web-server.rs b/hotshot/examples/web-server-da/multi-web-server.rs index c41c9c0b03..f954050ad0 100644 --- a/hotshot/examples/web-server-da/multi-web-server.rs +++ b/hotshot/examples/web-server-da/multi-web-server.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use async_compatibility_layer::{art::async_spawn, channel::oneshot}; use clap::Parser; -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::DemoTypes; use tracing::error; #[derive(Parser, Debug)] @@ -27,7 +27,7 @@ async fn main() { let cdn_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >(Some(server_shutdown_cdn), args.cdn_port) .await { @@ -37,7 +37,7 @@ async fn main() { }); let da_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >(Some(server_shutdown_da), args.da_port) .await { @@ -47,7 +47,7 @@ async fn main() { }); let vs_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >(Some(server_shutdown_view_sync), args.view_sync_port) .await { diff --git a/hotshot/examples/web-server-da/orchestrator.rs b/hotshot/examples/web-server-da/orchestrator.rs index 772d3aa12e..594d004a93 100644 --- a/hotshot/examples/web-server-da/orchestrator.rs +++ b/hotshot/examples/web-server-da/orchestrator.rs @@ -2,7 +2,7 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::DemoTypes; use tracing::instrument; use types::ThisMembership; @@ -29,7 +29,7 @@ async fn main() { let args = OrchestratorArgs::parse(); run_orchestrator_da::< - SDemoTypes, + DemoTypes, ThisMembership, DANetwork, QuorumNetwork, diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index 9ea1f9a694..017784b354 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -1,6 +1,6 @@ use crate::infra_da::WebServerDARun; use hotshot::{ - demos::sdemo::SDemoTypes, + demo::DemoTypes, traits::{ election::static_committee::GeneralStaticCommittee, implementations::{MemoryStorage, WebCommChannel}, @@ -22,57 +22,54 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type ThisLeaf = SequencingLeaf; +pub type ThisLeaf = SequencingLeaf; pub type ThisMembership = - GeneralStaticCommittee::SignatureKey>; -pub type DANetwork = - WebCommChannel; -pub type QuorumNetwork = - WebCommChannel; -pub type ViewSyncNetwork = - WebCommChannel; + GeneralStaticCommittee::SignatureKey>; +pub type DANetwork = WebCommChannel; +pub type QuorumNetwork = WebCommChannel; +pub type ViewSyncNetwork = WebCommChannel; -pub type ThisDAProposal = DAProposal; -pub type ThisDAVote = DAVote; +pub type ThisDAProposal = DAProposal; +pub type ThisDAVote = DAVote; -pub type ThisQuorumProposal = QuorumProposal; -pub type ThisQuorumVote = QuorumVote; +pub type ThisQuorumProposal = QuorumProposal; +pub type ThisQuorumVote = QuorumVote; -pub type ThisViewSyncProposal = ViewSyncCertificate; -pub type ThisViewSyncVote = ViewSyncVote; +pub type ThisViewSyncProposal = ViewSyncCertificate; +pub type ThisViewSyncVote = ViewSyncVote; -impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; - type Leaf = SequencingLeaf; +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; + type Leaf = SequencingLeaf; type Exchanges = SequencingExchanges< - SDemoTypes, - Message, + DemoTypes, + Message, QuorumExchange< - SDemoTypes, + DemoTypes, Self::Leaf, ThisQuorumProposal, ThisMembership, QuorumNetwork, - Message, + Message, >, - CommitteeExchange>, + CommitteeExchange>, ViewSyncExchange< - SDemoTypes, + DemoTypes, ThisViewSyncProposal, ThisMembership, ViewSyncNetwork, - Message, + Message, >, >; - type ConsensusMessage = SequencingMessage; + type ConsensusMessage = SequencingMessage; fn new_channel_maps( - start_view: ::Time, + start_view: ::Time, ) -> ( - ChannelMaps, - Option>, + ChannelMaps, + Option>, ) { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = WebServerDARun; +pub type ThisRun = WebServerDARun; diff --git a/hotshot/examples/web-server-da/validator.rs b/hotshot/examples/web-server-da/validator.rs index ec2415fd65..ab44e02991 100644 --- a/hotshot/examples/web-server-da/validator.rs +++ b/hotshot/examples/web-server-da/validator.rs @@ -1,6 +1,6 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::DemoTypes; use tracing::{info, instrument}; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; @@ -29,7 +29,7 @@ async fn main() { args.host, args.port ); infra_da::main_entry_point::< - SDemoTypes, + DemoTypes, ThisMembership, DANetwork, QuorumNetwork, diff --git a/hotshot/examples/web-server-da/web-server.rs b/hotshot/examples/web-server-da/web-server.rs index 99d0b12f63..9c4912c6a3 100644 --- a/hotshot/examples/web-server-da/web-server.rs +++ b/hotshot/examples/web-server-da/web-server.rs @@ -1,4 +1,4 @@ -use hotshot::demos::sdemo::SDemoTypes; +use hotshot::demo::DemoTypes; use std::sync::Arc; use async_compatibility_layer::{ @@ -23,7 +23,7 @@ async fn main() { let (server_shutdown_sender, server_shutdown) = oneshot(); let _sender = Arc::new(server_shutdown_sender); let _result = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >(Some(server_shutdown), args.port) .await; } diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs new file mode 100644 index 0000000000..81cac76f4b --- /dev/null +++ b/hotshot/src/demo.rs @@ -0,0 +1,207 @@ +//! Sequencing consensus demo +//! +//! This module provides an implementation of the `HotShot` suite of traits that implements a +//! basic demonstration of sequencing consensus. +//! +//! These implementations are useful in examples and integration testing, but are not suitable for +//! production use. +use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; +use commit::{Commitment, Committable}; +use derivative::Derivative; +use either::Either; +use hotshot_signature_key::bn254::BLSPubKey; +use hotshot_types::{ + block_impl::{BlockPayloadError, VIDBlockPayload, VIDTransaction}, + certificate::{AssembledSignature, QuorumCertificate}, + data::{ + fake_commitment, genesis_proposer_id, random_commitment, LeafType, SequencingLeaf, + ViewNumber, + }, + traits::{ + election::Membership, + node_implementation::NodeType, + state::{ConsensusTime, TestableState}, + BlockPayload, State, + }, +}; +use rand::Rng; +use serde::{Deserialize, Serialize}; +use std::{fmt::Debug, marker::PhantomData}; + +/// sequencing demo entry state +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct SDemoState { + /// the block height + block_height: u64, + /// the view number + view_number: ViewNumber, + /// the previous state commitment + prev_state_commitment: Commitment, +} + +impl Committable for SDemoState { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("SDemo State Commit") + .u64_field("block_height", self.block_height) + .u64_field("view_number", *self.view_number) + .field("prev_state_commitment", self.prev_state_commitment) + .finalize() + } + + fn tag() -> String { + "SEQUENCING_DEMO_STATE".to_string() + } +} + +impl Default for SDemoState { + fn default() -> Self { + Self { + block_height: 0, + view_number: ViewNumber::genesis(), + prev_state_commitment: fake_commitment(), + } + } +} + +impl State for SDemoState { + type Error = BlockPayloadError; + + type BlockType = VIDBlockPayload; + + type Time = ViewNumber; + + fn validate_block(&self, _block: &Self::BlockType, view_number: &Self::Time) -> bool { + if view_number == &ViewNumber::genesis() { + &self.view_number == view_number + } else { + self.view_number < *view_number + } + } + + fn append( + &self, + block: &Self::BlockType, + view_number: &Self::Time, + ) -> Result { + if !self.validate_block(block, view_number) { + return Err(BlockPayloadError::InvalidBlock); + } + + Ok(SDemoState { + block_height: self.block_height + 1, + view_number: *view_number, + prev_state_commitment: self.commit(), + }) + } + + fn on_commit(&self) {} +} + +impl TestableState for SDemoState { + fn create_random_transaction( + _state: Option<&Self>, + _rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction { + /// clippy appeasement for `RANDOM_TX_BASE_SIZE` + const RANDOM_TX_BASE_SIZE: usize = 8; + VIDTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) + } +} +/// Implementation of [`NodeType`] for [`VDemoNode`] +#[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, +)] +pub struct DemoTypes; + +impl NodeType for DemoTypes { + type Time = ViewNumber; + type BlockType = VIDBlockPayload; + type SignatureKey = BLSPubKey; + type VoteTokenType = StaticVoteToken; + type Transaction = VIDTransaction; + type ElectionConfigType = StaticElectionConfig; + type StateType = SDemoState; +} + +/// The node implementation for the sequencing demo +#[derive(Derivative)] +#[derivative(Clone(bound = ""))] +pub struct SDemoNode(PhantomData) +where + MEMBERSHIP: Membership + std::fmt::Debug; + +impl SDemoNode +where + MEMBERSHIP: Membership + std::fmt::Debug, +{ + /// Create a new `SDemoNode` + #[must_use] + pub fn new() -> Self { + SDemoNode(PhantomData) + } +} + +impl Debug for SDemoNode +where + MEMBERSHIP: Membership + std::fmt::Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SDemoNode") + .field("_phantom", &"phantom") + .finish() + } +} + +impl Default for SDemoNode +where + MEMBERSHIP: Membership + std::fmt::Debug, +{ + fn default() -> Self { + Self::new() + } +} + +/// Provides a random [`QuorumCertificate`] +pub fn random_quorum_certificate>( + rng: &mut dyn rand::RngCore, +) -> QuorumCertificate> { + QuorumCertificate { + // block_commitment: random_commitment(rng), + leaf_commitment: random_commitment(rng), + view_number: TYPES::Time::new(rng.gen()), + signatures: AssembledSignature::Genesis(), + is_genesis: rng.gen(), + } +} + +/// Provides a random [`SequencingLeaf`] +pub fn random_sequencing_leaf( + deltas: Either>, + rng: &mut dyn rand::RngCore, +) -> SequencingLeaf { + let justify_qc = random_quorum_certificate(rng); + // let state = TYPES::StateType::default() + // .append(&deltas, &TYPES::Time::new(42)) + // .unwrap_or_default(); + SequencingLeaf { + view_number: justify_qc.view_number, + height: rng.next_u64(), + justify_qc, + parent_commitment: random_commitment(rng), + deltas, + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: genesis_proposer_id(), + } +} diff --git a/hotshot/src/demos.rs b/hotshot/src/demos.rs deleted file mode 100644 index 7ddbef89c6..0000000000 --- a/hotshot/src/demos.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! Contains implementations of the `HotShot` traits used in the examples and integration testing. -//! -//! These implementations are not suitable for production use. - -/// this is a demo for sequencing consensus -pub mod sdemo; -/// this is a demo for validating consensus -pub mod vdemo; diff --git a/hotshot/src/demos/sdemo.rs b/hotshot/src/demos/sdemo.rs deleted file mode 100644 index 97117347a9..0000000000 --- a/hotshot/src/demos/sdemo.rs +++ /dev/null @@ -1,384 +0,0 @@ -//! Sequencing consensus demo -//! -//! This module provides an implementation of the `HotShot` suite of traits that implements a -//! basic demonstration of sequencing consensus. -//! -//! These implementations are useful in examples and integration testing, but are not suitable for -//! production use. -use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; -use std::{ - collections::HashSet, - fmt::{Debug, Display}, - marker::PhantomData, - ops::Deref, -}; - -use commit::{Commitment, Committable}; -use derivative::Derivative; -use either::Either; -use hotshot_signature_key::bn254::BN254Pub; -use hotshot_types::{ - certificate::{AssembledSignature, QuorumCertificate}, - constants::genesis_proposer_id, - data::{fake_commitment, random_commitment, LeafType, SequencingLeaf, ViewNumber}, - traits::{ - block_contents::Transaction, - election::Membership, - node_implementation::NodeType, - state::{ConsensusTime, TestableBlock, TestableState}, - Block, State, - }, -}; -use rand::Rng; -use serde::{Deserialize, Serialize}; -use snafu::Snafu; - -/// The transaction for the sequencing demo -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct SDemoTransaction { - /// identifier for the transaction - pub id: u64, - /// padding to add to txn (to make it larger and thereby more realistic) - pub padding: Vec, -} - -impl Deref for SDemoTransaction { - type Target = u64; - - fn deref(&self) -> &Self::Target { - &self.id - } -} - -impl Committable for SDemoTransaction { - fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("SDemo Txn Comm") - .u64_field("id", self.id) - .finalize() - } - - fn tag() -> String { - "SEQUENCING_DEMO_TXN".to_string() - } -} - -impl Transaction for SDemoTransaction {} - -impl SDemoTransaction { - /// create a new transaction - #[must_use] - pub fn new(id: u64) -> Self { - Self { - id, - padding: vec![], - } - } -} - -/// genesis block -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct SDemoGenesisBlock {} - -/// Any block after genesis -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct SDemoNormalBlock { - /// Block state commitment - pub previous_state: (), - /// Transaction vector - pub transactions: Vec, -} - -/// The block for the sequencing demo -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub enum SDemoBlock { - /// genesis block - Genesis(SDemoGenesisBlock), - /// normal block - Normal(SDemoNormalBlock), -} - -impl Committable for SDemoBlock { - fn commit(&self) -> Commitment { - match &self { - SDemoBlock::Genesis(_) => { - commit::RawCommitmentBuilder::new("SDemo Genesis Comm").finalize() - } - SDemoBlock::Normal(block) => { - let mut builder = commit::RawCommitmentBuilder::new("SDemo Normal Comm"); - for txn in &block.transactions { - builder = builder.u64_field("transaction", **txn); - } - builder.finalize() - } - } - } - - fn tag() -> String { - "SEQUENCING_DEMO_BLOCK".to_string() - } -} - -/// sequencing demo entry state -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct SDemoState { - /// the block height - block_height: u64, - /// the view number - view_number: ViewNumber, - /// the previous state commitment - prev_state_commitment: Commitment, -} - -impl Committable for SDemoState { - fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("SDemo State Commit") - .u64_field("block_height", self.block_height) - .u64_field("view_number", *self.view_number) - .field("prev_state_commitment", self.prev_state_commitment) - .finalize() - } - - fn tag() -> String { - "SEQUENCING_DEMO_STATE".to_string() - } -} - -impl Default for SDemoState { - fn default() -> Self { - Self { - block_height: 0, - view_number: ViewNumber::genesis(), - prev_state_commitment: fake_commitment(), - } - } -} - -/// The error type for the sequencing demo -#[derive(Snafu, Debug)] -pub enum SDemoError { - /// Previous state commitment does not match - PreviousStateMismatch, - /// Nonce was reused - ReusedTxn, - /// Genesis failure - GenesisFailed, - /// Genesis reencountered after initialization - GenesisAfterStart, - /// no transasctions added to genesis - GenesisCantHaveTransactions, - /// invalid block - InvalidBlock, -} - -impl Display for SDemoBlock { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - SDemoBlock::Genesis(_) => { - write!(f, "SDemo Genesis Block") - } - SDemoBlock::Normal(block) => { - write!(f, "SDemo Normal Block #txns={}", block.transactions.len()) - } - } - } -} - -impl TestableBlock for SDemoBlock { - fn genesis() -> Self { - SDemoBlock::Genesis(SDemoGenesisBlock {}) - } - - fn txn_count(&self) -> u64 { - match self { - SDemoBlock::Genesis(_) => 0, - SDemoBlock::Normal(n) => n.transactions.len() as u64, - } - } -} - -impl Block for SDemoBlock { - type Error = SDemoError; - - type Transaction = SDemoTransaction; - - fn new() -> Self { - ::genesis() - } - - fn add_transaction_raw( - &self, - tx: &Self::Transaction, - ) -> std::result::Result { - match self { - SDemoBlock::Genesis(_) => Err(SDemoError::GenesisCantHaveTransactions), - SDemoBlock::Normal(n) => { - let mut new = n.clone(); - new.transactions.push(tx.clone()); - Ok(SDemoBlock::Normal(new)) - } - } - } - - fn contained_transactions(&self) -> HashSet> { - match self { - SDemoBlock::Genesis(_) => HashSet::new(), - SDemoBlock::Normal(n) => n - .transactions - .iter() - .map(commit::Committable::commit) - .collect(), - } - } -} - -impl State for SDemoState { - type Error = SDemoError; - - type BlockType = SDemoBlock; - - type Time = ViewNumber; - - fn next_block(_state: Option) -> Self::BlockType { - SDemoBlock::Normal(SDemoNormalBlock { - previous_state: (), - transactions: Vec::new(), - }) - } - - fn validate_block(&self, block: &Self::BlockType, view_number: &Self::Time) -> bool { - match block { - SDemoBlock::Genesis(_) => { - view_number == &ViewNumber::genesis() && view_number == &self.view_number - } - SDemoBlock::Normal(_n) => self.view_number < *view_number, - } - } - - fn append( - &self, - block: &Self::BlockType, - view_number: &Self::Time, - ) -> Result { - if !self.validate_block(block, view_number) { - return Err(SDemoError::InvalidBlock); - } - - Ok(SDemoState { - block_height: self.block_height + 1, - view_number: *view_number, - prev_state_commitment: self.commit(), - }) - } - - fn on_commit(&self) {} -} - -impl TestableState for SDemoState { - fn create_random_transaction( - _state: Option<&Self>, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> ::Transaction { - SDemoTransaction { - id: rng.gen_range(0..10), - padding: vec![0; padding as usize], - } - } -} -/// Implementation of [`NodeType`] for [`VDemoNode`] -#[derive( - Copy, - Clone, - Debug, - Default, - Hash, - PartialEq, - Eq, - PartialOrd, - Ord, - serde::Serialize, - serde::Deserialize, -)] -pub struct SDemoTypes; - -impl NodeType for SDemoTypes { - type Time = ViewNumber; - type BlockType = SDemoBlock; - type SignatureKey = BN254Pub; - type VoteTokenType = StaticVoteToken; - type Transaction = SDemoTransaction; - type ElectionConfigType = StaticElectionConfig; - type StateType = SDemoState; -} - -/// The node implementation for the sequencing demo -#[derive(Derivative)] -#[derivative(Clone(bound = ""))] -pub struct SDemoNode(PhantomData) -where - MEMBERSHIP: Membership + std::fmt::Debug; - -impl SDemoNode -where - MEMBERSHIP: Membership + std::fmt::Debug, -{ - /// Create a new `SDemoNode` - #[must_use] - pub fn new() -> Self { - SDemoNode(PhantomData) - } -} - -impl Debug for SDemoNode -where - MEMBERSHIP: Membership + std::fmt::Debug, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("SDemoNode") - .field("_phantom", &"phantom") - .finish() - } -} - -impl Default for SDemoNode -where - MEMBERSHIP: Membership + std::fmt::Debug, -{ - fn default() -> Self { - Self::new() - } -} - -/// Provides a random [`QuorumCertificate`] -pub fn random_quorum_certificate>( - rng: &mut dyn rand::RngCore, -) -> QuorumCertificate { - QuorumCertificate { - // block_commitment: random_commitment(rng), - leaf_commitment: random_commitment(rng), - view_number: TYPES::Time::new(rng.gen()), - signatures: AssembledSignature::Genesis(), - is_genesis: rng.gen(), - } -} - -/// Provides a random [`SequencingLeaf`] -pub fn random_sequencing_leaf( - deltas: Either>, - rng: &mut dyn rand::RngCore, -) -> SequencingLeaf { - let justify_qc = random_quorum_certificate(rng); - // let state = TYPES::StateType::default() - // .append(&deltas, &TYPES::Time::new(42)) - // .unwrap_or_default(); - SequencingLeaf { - view_number: justify_qc.view_number, - height: rng.next_u64(), - justify_qc, - parent_commitment: random_commitment(rng), - deltas, - rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: genesis_proposer_id(), - } -} diff --git a/hotshot/src/demos/vdemo.rs b/hotshot/src/demos/vdemo.rs deleted file mode 100644 index 1d091c60b1..0000000000 --- a/hotshot/src/demos/vdemo.rs +++ /dev/null @@ -1,607 +0,0 @@ -// //! Validating (vanilla) consensus demo -// //! -// //! This module provides an implementation of the `HotShot` suite of traits that implements a -// //! basic demonstration of validating consensus. -// //! -// //! These implementations are useful in examples and integration testing, but are not suitable for -// //! production use. -// -// use crate::traits::{ -// election::static_committee::{StaticElectionConfig, StaticVoteToken}, -// Block, -// }; -// use commit::{Commitment, Committable}; -// use derivative::Derivative; -// -// use hotshot_types::{ -// certificate::{QuorumCertificate, YesNoSignature}, -// constants::genesis_proposer_id, -// data::{random_commitment, LeafType, ValidatingLeaf, ViewNumber}, -// traits::{ -// block_contents::Transaction, -// consensus_type::validating_consensus::ValidatingConsensus, -// election::Membership, -// node_implementation::NodeType, -// signature_key::ed25519::Ed25519Pub, -// state::{ConsensusTime, TestableBlock, TestableState}, -// State, -// }, -// }; -// -// use rand::Rng; -// use serde::{Deserialize, Serialize}; -// use snafu::{ensure, Snafu}; -// use std::{ -// collections::{BTreeMap, HashSet}, -// fmt::{Debug, Display}, -// marker::PhantomData, -// }; -// use tracing::error; -// -// /// The account identifier type used by the demo -// /// -// /// This is a type alias to [`String`] for simplicity. -// pub type Account = String; -// -// /// The account balance type used by the demo -// /// -// /// This is a type alias to [`u64`] for simplicity. -// pub type Balance = u64; -// -// /// Records a reduction in an account balance -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub struct Subtraction { -// /// An account identifier -// pub account: Account, -// /// An account balance -// pub amount: Balance, -// } -// -// /// Records an increase in an account balance -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub struct Addition { -// /// An account identifier -// pub account: Account, -// /// An account balance -// pub amount: Balance, -// } -// -// /// The error type for the validating demo -// #[derive(Snafu, Debug)] -// pub enum VDemoError { -// /// The subtraction and addition amounts for this transaction were not equal -// InconsistentTransaction, -// /// No such input account exists -// NoSuchInputAccount, -// /// No such output account exists -// NoSuchOutputAccount, -// /// Tried to move more money than was in the account -// InsufficentBalance, -// /// Previous state commitment does not match -// PreviousStateMismatch, -// /// Nonce was reused -// ReusedNonce, -// /// Genesis failure -// GenesisFailed, -// /// Genesis reencountered after initialization -// GenesisAfterStart, -// } -// -// /// The transaction for the validating demo -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub struct VDemoTransaction { -// /// An increment to an account balance -// pub add: Addition, -// /// A decrement to an account balance -// pub sub: Subtraction, -// /// The nonce for a transaction, no two transactions can have the same nonce -// pub nonce: u64, -// /// Number of bytes to pad to each transaction -// pub padding: Vec, -// } -// -// impl Transaction for VDemoTransaction {} -// -// impl VDemoTransaction { -// /// Ensures that this transaction is at least consistent with itself -// #[must_use] -// pub fn validate_independence(&self) -> bool { -// // Ensure that we are adding to one account exactly as much as we are subtracting from -// // another -// self.add.amount <= self.sub.amount // TODO why not strict equality? -// } -// } -// -// /// The state for the validating demo -// /// NOTE both fields are btrees because we need -// /// ordered-ing otherwise commitments will not match -// #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, Hash)] -// pub struct VDemoState { -// /// Key/value store of accounts and balances -// pub balances: BTreeMap, -// // /// Set of previously seen nonces -// // pub nonces: BTreeSet, -// } -// -// impl Committable for VDemoState { -// fn commit(&self) -> Commitment { -// let mut builder = commit::RawCommitmentBuilder::new("VDemo State Comm"); -// -// for (k, v) in &self.balances { -// builder = builder.u64_field(k, *v); -// } -// builder = builder.constant_str("nonces"); -// -// // for nonce in &self.nonces { -// // builder = builder.u64(*nonce); -// // } -// -// builder.finalize() -// } -// -// fn tag() -> String { -// "VALIDATING_DEMO_STATE".to_string() -// } -// } -// -// /// initializes the first state on genesis -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub struct VDemoGenesisBlock { -// /// initializes the first state -// pub accounts: BTreeMap, -// } -// -// /// Any block after genesis -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub struct VDemoNormalBlock { -// /// Block state commitment -// pub previous_state: Commitment, -// /// Transaction vector -// pub transactions: Vec, -// } -// -// /// The block for the validating demo -// #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -// pub enum VDemoBlock { -// /// genesis block -// Genesis(VDemoGenesisBlock), -// /// normal block -// Normal(VDemoNormalBlock), -// } -// -// impl Committable for VDemoBlock { -// fn commit(&self) -> Commitment { -// match &self { -// VDemoBlock::Genesis(block) => { -// let mut builder = commit::RawCommitmentBuilder::new("VDemo Genesis Comm") -// .u64_field("account_count", block.accounts.len() as u64); -// for account in &block.accounts { -// builder = builder.u64_field(account.0, *account.1); -// } -// builder.finalize() -// } -// VDemoBlock::Normal(block) => { -// let mut builder = commit::RawCommitmentBuilder::new("VDemo Block Comm") -// .var_size_field("Previous State", block.previous_state.as_ref()); -// -// for txn in &block.transactions { -// builder = builder -// .u64_field(&txn.add.account, txn.add.amount) -// .u64_field(&txn.sub.account, txn.sub.amount) -// .constant_str("nonce") -// .u64_field("nonce", txn.nonce); -// } -// -// builder.finalize() -// } -// } -// } -// -// fn tag() -> String { -// "VALIDATING_DEMO_BLOCK".to_string() -// } -// } -// -// impl Committable for VDemoTransaction { -// fn commit(&self) -> Commitment { -// commit::RawCommitmentBuilder::new("VDemo Txn Comm") -// .u64_field(&self.add.account, self.add.amount) -// .u64_field(&self.sub.account, self.sub.amount) -// .constant_str("nonce") -// .u64_field("nonce", self.nonce) -// .finalize() -// } -// -// fn tag() -> String { -// "VALIDATING_DEMO_TXN".to_string() -// } -// } -// -// impl VDemoBlock { -// /// generate a genesis block with the provided initial accounts and balances -// #[must_use] -// pub fn genesis_from(accounts: BTreeMap) -> Self { -// Self::Genesis(VDemoGenesisBlock { accounts }) -// } -// } -// -// impl State for VDemoState { -// type Error = VDemoError; -// -// type BlockType = VDemoBlock; -// -// type Time = ViewNumber; -// -// #[allow(clippy::panic)] -// fn next_block(state: Option) -> Self::BlockType { -// match state { -// Some(state) => VDemoBlock::Normal(VDemoNormalBlock { -// previous_state: state.commit(), -// transactions: Vec::new(), -// }), -// None => panic!("State is required for the next block"), -// } -// } -// -// // Note: validate_block is actually somewhat redundant, its meant to be a quick and dirty check -// // for clarity, the logic is duplicated with append_to -// fn validate_block(&self, block: &Self::BlockType, _view_number: &Self::Time) -> bool { -// match block { -// VDemoBlock::Genesis(_) => self.balances.is_empty(), // && self.nonces.is_empty(), -// VDemoBlock::Normal(block) => { -// let state = self; -// // A valid block is one in which every transaction is internally consistent, and results in -// // nobody having a negative balance -// // -// // We will check this, in this case, by making a trial copy of our balances map, making -// // trial modifications to it, and then asserting that no balances are negative -// let mut trial_balances = state.balances.clone(); -// for tx in &block.transactions { -// // This is a macro from SNAFU that returns an Err if the condition is not satisfied -// // -// // We first check that the transaction is internally consistent, then apply the change -// // to our trial map -// if !tx.validate_independence() { -// error!("validate_independence failed"); -// return false; -// } -// // Find the input account, and subtract the transfer balance from it, failing if it -// // doesn't exist -// if let Some(input_account) = trial_balances.get_mut(&tx.sub.account) { -// *input_account -= tx.sub.amount; -// } else { -// error!("no such input account"); -// return false; -// } -// // Find the output account, and add the transfer balance to it, failing if it doesn't -// // exist -// if let Some(output_account) = trial_balances.get_mut(&tx.add.account) { -// *output_account += tx.add.amount; -// } else { -// error!("no such output account"); -// return false; -// } -// // // Check to make sure the nonce isn't used -// // if state.nonces.contains(&tx.nonce) { -// // warn!(?state, ?tx, "State nonce is used for transaction"); -// // return false; -// // } -// } -// // This block has now passed all our tests, and thus has not done anything bad, so the block -// // is valid if its previous state hash matches that of the previous state -// let result = block.previous_state == state.commit(); -// if !result { -// error!( -// "hash failure. previous_block: {:?} hash_state: {:?}", -// block.previous_state, -// state.commit() -// ); -// } -// result -// } -// } -// } -// -// fn append( -// &self, -// block: &Self::BlockType, -// _view_number: &Self::Time, -// ) -> std::result::Result { -// match block { -// VDemoBlock::Genesis(block) => { -// if self.balances.is_empty() { -// // && self.nonces.is_empty() -// let mut new_state = Self::default(); -// for account in &block.accounts { -// if new_state -// .balances -// .insert(account.0.clone(), *account.1) -// .is_some() -// { -// error!("Adding the same account twice during application of genesis block!"); -// return Err(VDemoError::GenesisFailed); -// } -// } -// Ok(new_state) -// } else { -// Err(VDemoError::GenesisAfterStart) -// } -// } -// VDemoBlock::Normal(block) => { -// let state = self; -// // A valid block is one in which every transaction is internally consistent, and results in -// // nobody having a negative balance -// // -// // We will check this, in this case, by making a trial copy of our balances map, making -// // trial modifications to it, and then asserting that no balances are negative -// let mut trial_balances = state.balances.clone(); -// for tx in &block.transactions { -// // This is a macro from SNAFU that returns an Err if the condition is not satisfied -// // -// // We first check that the transaction is internally consistent, then apply the change -// // to our trial map -// ensure!(tx.validate_independence(), InconsistentTransactionSnafu); -// // Find the input account, and subtract the transfer balance from it, failing if it -// // doesn't exist -// if let Some(input_account) = trial_balances.get_mut(&tx.sub.account) { -// *input_account -= tx.sub.amount; -// } else { -// return Err(VDemoError::NoSuchInputAccount); -// } -// // Find the output account, and add the transfer balance to it, failing if it doesn't -// // exist -// if let Some(output_account) = trial_balances.get_mut(&tx.add.account) { -// *output_account += tx.add.amount; -// } else { -// return Err(VDemoError::NoSuchOutputAccount); -// } -// // // Check for nonce reuse -// // if state.nonces.contains(&tx.nonce) { -// // return Err(VDemoError::ReusedNonce); -// // } -// } -// // Make sure our previous state commitment matches the provided state -// if block.previous_state == state.commit() { -// // This block has now passed all our tests, and thus has not done anything bad -// // Add the nonces from this block -// // let mut nonces = state.nonces.clone(); -// // for tx in &block.transactions { -// // nonces.insert(tx.nonce); -// // } -// Ok(VDemoState { -// balances: trial_balances, -// // nonces, -// }) -// } else { -// Err(VDemoError::PreviousStateMismatch) -// } -// } -// } -// } -// -// fn on_commit(&self) { -// // Does nothing in this implementation -// } -// } -// -// impl Display for VDemoBlock { -// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -// match self { -// VDemoBlock::Genesis(block) => { -// write!(f, "VDemo Genesis Block: {block:#?}") -// } -// VDemoBlock::Normal(block) => { -// write!(f, "VDemo Normal Block #txns={}", block.transactions.len()) -// } -// } -// } -// } -// -// impl TestableState for VDemoState { -// fn create_random_transaction( -// state: Option<&Self>, -// rng: &mut dyn rand::RngCore, -// padding: u64, -// ) -> ::Transaction { -// use rand::seq::IteratorRandom; -// -// let state = state.expect("Missing state"); -// -// let non_zero_balances = state -// .balances -// .iter() -// .filter(|b| *b.1 > 0) -// .collect::>(); -// -// assert!( -// !non_zero_balances.is_empty(), -// "No nonzero balances were available! Unable to generate transaction." -// ); -// -// let input_account = non_zero_balances.iter().choose(rng).unwrap().0; -// let output_account = state.balances.keys().choose(rng).unwrap(); -// let amount = rng.gen_range(0..100); -// -// VDemoTransaction { -// add: Addition { -// account: output_account.to_string(), -// amount, -// }, -// sub: Subtraction { -// account: input_account.to_string(), -// amount, -// }, -// nonce: rng.gen(), -// padding: vec![0; padding as usize], -// } -// } -// } -// -// impl TestableBlock for VDemoBlock { -// fn genesis() -> Self { -// let accounts: BTreeMap = vec![ -// ("Joe", 1_000_000), -// ("Nathan M", 500_000), -// ("John", 400_000), -// ("Nathan Y", 600_000), -// ("Ian", 5_000_000), -// ] -// .into_iter() -// .map(|(x, y)| (x.to_string(), y)) -// .collect(); -// Self::Genesis(VDemoGenesisBlock { accounts }) -// } -// -// fn txn_count(&self) -> u64 { -// if let VDemoBlock::Normal(block) = self { -// block.transactions.len() as u64 -// } else { -// 0 -// } -// } -// } -// -// impl Block for VDemoBlock { -// type Transaction = VDemoTransaction; -// -// type Error = VDemoError; -// -// fn new() -> Self { -// ::genesis() -// } -// -// fn add_transaction_raw( -// &self, -// tx: &Self::Transaction, -// ) -> std::result::Result { -// match self { -// VDemoBlock::Genesis(_) => Err(VDemoError::GenesisAfterStart), -// VDemoBlock::Normal(block) => { -// // first, make sure that the transaction is internally valid -// if tx.validate_independence() { -// // Then check the previous transactions in the block -// if block.transactions.iter().any(|x| x.nonce == tx.nonce) { -// return Err(VDemoError::ReusedNonce); -// } -// let mut new_block = block.clone(); -// // Insert our transaction and return -// new_block.transactions.push(tx.clone()); -// Ok(VDemoBlock::Normal(new_block)) -// } else { -// Err(VDemoError::InconsistentTransaction) -// } -// } -// } -// } -// fn contained_transactions(&self) -> HashSet> { -// match self { -// VDemoBlock::Genesis(_) => HashSet::new(), -// VDemoBlock::Normal(block) => block -// .transactions -// .clone() -// .into_iter() -// .map(|tx| tx.commit()) -// .collect(), -// } -// } -// } -// -// /// Implementation of [`NodeType`] for [`VDemoNode`] -// #[derive( -// Copy, -// Clone, -// Debug, -// Default, -// Hash, -// PartialEq, -// Eq, -// PartialOrd, -// Ord, -// serde::Serialize, -// serde::Deserialize, -// )] -// pub struct VDemoTypes; -// -// impl NodeType for VDemoTypes { -// type ConsensusType = ValidatingConsensus; -// type Time = ViewNumber; -// type BlockType = VDemoBlock; -// type SignatureKey = Ed25519Pub; -// type VoteTokenType = StaticVoteToken; -// type Transaction = VDemoTransaction; -// type ElectionConfigType = StaticElectionConfig; -// type StateType = VDemoState; -// } -// -// /// The node implementation for the validating demo -// #[derive(Derivative)] -// #[derivative(Clone(bound = ""))] -// pub struct VDemoNode(PhantomData) -// where -// MEMBERSHIP: Membership + Debug; -// -// impl VDemoNode -// where -// MEMBERSHIP: Membership + Debug, -// { -// /// Create a new `VDemoNode` -// #[must_use] -// pub fn new() -> Self { -// VDemoNode(PhantomData) -// } -// } -// -// impl Debug for VDemoNode -// where -// MEMBERSHIP: Membership + Debug, -// { -// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -// f.debug_struct("VDemoNode") -// .field("_phantom", &"phantom") -// .finish() -// } -// } -// -// impl Default for VDemoNode -// where -// MEMBERSHIP: Membership + Debug, -// { -// fn default() -> Self { -// Self::new() -// } -// } -// -// /// Provides a random [`QuorumCertificate`] -// pub fn random_quorum_certificate>( -// rng: &mut dyn rand::RngCore, -// ) -> QuorumCertificate { -// QuorumCertificate { -// // block_commitment: random_commitment(rng), -// leaf_commitment: random_commitment(rng), -// view_number: TYPES::Time::new(rng.gen()), -// signatures: YesNoSignature::Yes(BTreeMap::default()), -// is_genesis: rng.gen(), -// } -// } -// -// /// Provides a random [`ValidatingLeaf`] -// pub fn random_validating_leaf>( -// deltas: TYPES::BlockType, -// rng: &mut dyn rand::RngCore, -// ) -> ValidatingLeaf { -// let justify_qc = random_quorum_certificate(rng); -// let state = TYPES::StateType::default() -// .append(&deltas, &TYPES::Time::new(42)) -// .unwrap_or_default(); -// ValidatingLeaf { -// view_number: justify_qc.view_number, -// height: rng.next_u64(), -// justify_qc, -// parent_commitment: random_commitment(rng), -// deltas, -// state, -// rejected: Vec::new(), -// timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), -// proposer_id: genesis_proposer_id(), -// } -// } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index fa435e1f1d..b7872ce59e 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -19,12 +19,10 @@ #[cfg(feature = "docs")] pub mod documentation; -/// Data availability support -// pub mod da; /// Contains structures and functions for committee election pub mod certificate; #[cfg(feature = "demo")] -pub mod demos; +pub mod demo; /// Contains traits consumed by [`HotShot`] pub mod traits; /// Contains types used by the crate @@ -36,17 +34,17 @@ use crate::{ certificate::QuorumCertificate, tasks::{ add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, - add_view_sync_task, + add_transaction_task, add_view_sync_task, }, traits::{NodeImplementation, Storage}, types::{Event, SystemContextHandle}, }; use async_compatibility_layer::{ art::{async_spawn, async_spawn_local}, - async_primitives::{broadcast::BroadcastSender, subscribable_rwlock::SubscribableRwLock}, - channel::{unbounded, UnboundedReceiver, UnboundedSender}, + async_primitives::broadcast::BroadcastSender, + channel::UnboundedSender, }; -use async_lock::{Mutex, RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; +use async_lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; use async_trait::async_trait; use commit::{Commitment, Committable}; use custom_debug::Debug; @@ -57,9 +55,10 @@ use hotshot_task::{ use hotshot_task_impls::{events::SequencingHotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction}, certificate::{DACertificate, ViewSyncCertificate}, consensus::{BlockStore, Consensus, ConsensusMetrics, View, ViewInner, ViewQueue}, - data::{DAProposal, DeltasType, LeafType, ProposalType, QuorumProposal, SequencingLeaf}, + data::{DAProposal, DeltasType, LeafType, QuorumProposal, SequencingLeaf}, error::StorageSnafu, message::{ ConsensusMessageType, DataMessage, InternalTrigger, Message, MessageKind, @@ -79,12 +78,12 @@ use hotshot_types::{ storage::StoredView, State, }, - vote::{ViewSyncData, VoteType}, + vote::ViewSyncData, HotShotConfig, }; use snafu::ResultExt; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashMap}, marker::PhantomData, num::NonZeroUsize, sync::Arc, @@ -114,7 +113,6 @@ pub struct SystemContextInner> { /// Configuration items for this hotshot instance config: HotShotConfig< - TYPES::SignatureKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -134,11 +132,6 @@ pub struct SystemContextInner> { /// a reference to the metrics that the implementor is using. _metrics: Box, - /// Transactions - /// (this is shared btwn hotshot and `Consensus`) - transactions: - Arc, TYPES::Transaction>>>, - /// The hotstuff implementation consensus: Arc>>, @@ -146,9 +139,6 @@ pub struct SystemContextInner> { /// latter of which is only applicable for sequencing consensus. channel_maps: (ChannelMaps, Option>), - /// for receiving messages in the network lookup task - recv_network_lookup: Arc>>>, - // global_registry: GlobalRegistry, /// Access to the output event stream. output_event_stream: ChannelStream>, @@ -178,7 +168,6 @@ impl> SystemContext { private_key: ::PrivateKey, nonce: u64, config: HotShotConfig< - TYPES::SignatureKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -224,8 +213,6 @@ impl> SystemContext { state_map, cur_view: start_view, last_decided_view: anchored_leaf.get_view_number(), - transactions: Arc::default(), - seen_transactions: HashSet::new(), saved_leaves, saved_blocks, // TODO this is incorrect @@ -236,19 +223,14 @@ impl> SystemContext { invalid_qc: 0, }; let consensus = Arc::new(RwLock::new(consensus)); - let txns = consensus.read().await.get_transactions(); - let (_send_network_lookup, recv_network_lookup) = unbounded(); let inner: Arc> = Arc::new(SystemContextInner { - recv_network_lookup: Arc::new(Mutex::new(recv_network_lookup)), id: nonce, channel_maps: I::new_channel_maps(start_view), consensus, - transactions: txns, public_key, private_key, config, - // networking, storage, exchanges: Arc::new(exchanges), event_sender: RwLock::default(), @@ -405,7 +387,6 @@ impl> SystemContext { private_key: ::PrivateKey, node_id: u64, config: HotShotConfig< - TYPES::SignatureKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -554,11 +535,6 @@ impl> SystemContext { /// [`HotShot`] implementations that depend on [`TYPES::ConsensusType`]. #[async_trait] pub trait HotShotType> { - /// Get the [`transactions`] field of [`HotShot`]. - fn transactions( - &self, - ) -> &Arc, TYPES::Transaction>>>; - /// Get the [`hotstuff`] field of [`HotShot`]. fn consensus(&self) -> &Arc>>; @@ -658,7 +634,7 @@ pub trait HotShotType> { #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -671,8 +647,8 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, Membership = MEMBERSHIP, > + 'static, CommitteeEx: ConsensusExchange< @@ -680,7 +656,7 @@ where Message, Proposal = DAProposal, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, ViewSyncEx: ConsensusExchange< @@ -688,16 +664,10 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, Membership = MEMBERSHIP, > + 'static, { - fn transactions( - &self, - ) -> &Arc, TYPES::Transaction>>> { - &self.inner.transactions - } - fn consensus(&self) -> &Arc>> { &self.inner.consensus } @@ -775,6 +745,13 @@ where handle.clone(), ) .await; + let task_runner = add_transaction_task( + task_runner, + internal_event_stream.clone(), + committee_exchange.clone(), + handle.clone(), + ) + .await; let task_runner = add_view_sync_task::( task_runner, internal_event_stream.clone(), @@ -936,10 +913,7 @@ impl< I: NodeImplementation>, > SequencingConsensusApi for HotShotSequencingConsensusApi { - async fn send_direct_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( + async fn send_direct_message( &self, recipient: TYPES::SignatureKey, message: SequencingMessage, @@ -964,10 +938,7 @@ impl< Ok(()) } - async fn send_direct_da_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( + async fn send_direct_da_message( &self, recipient: TYPES::SignatureKey, message: SequencingMessage, @@ -994,10 +965,7 @@ impl< // TODO (DA) Refactor ConsensusApi and HotShot to use SystemContextInner directly. // - async fn send_broadcast_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( + async fn send_broadcast_message( &self, message: SequencingMessage, ) -> std::result::Result<(), NetworkError> { @@ -1091,7 +1059,7 @@ impl> HotShotInitializer::genesis(); + let justify_qc = QuorumCertificate::>::genesis(); Ok(Self { inner: LEAF::new(time, justify_qc, genesis_block, state), diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 1b411c35d2..bc95f8dbb6 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -2,9 +2,10 @@ use crate::{ async_spawn, types::SystemContextHandle, DACertificate, HotShotSequencingConsensusApi, - QuorumCertificate, SequencingQuorumEx, SystemContext, + QuorumCertificate, SequencingQuorumEx, }; -use async_compatibility_layer::art::{async_sleep, async_spawn_local}; +use async_compatibility_layer::art::async_sleep; +use commit::{Commitment, CommitmentBounds}; use futures::FutureExt; use hotshot_task::{ boxed_sync, @@ -22,11 +23,12 @@ use hotshot_task_impls::{ NetworkEventTaskState, NetworkEventTaskTypes, NetworkMessageTaskState, NetworkMessageTaskTypes, NetworkTaskKind, }, + transactions::{TransactionTaskState, TransactionsTaskTypes}, view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, - constants::LOOK_AHEAD, data::{ProposalType, QuorumProposal, SequencingLeaf}, event::Event, message::{Message, Messages, SequencingMessage}, @@ -37,79 +39,15 @@ use hotshot_types::{ CommitteeEx, ExchangesType, NodeImplementation, NodeType, ViewSyncEx, }, state::ConsensusTime, - Block, }, vote::{ViewSyncData, VoteType}, }; use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, marker::PhantomData, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, + sync::Arc, time::Duration, }; -use tracing::info; - -/// Task to look up a node in the future as needed -pub async fn network_lookup_task>( - hotshot: SystemContext, - shut_down: Arc, -) { - info!("Launching network lookup task"); - let networking = hotshot.inner.exchanges.quorum_exchange().network().clone(); - - let inner = hotshot.inner.clone(); - - let mut completion_map: HashMap> = HashMap::default(); - - while !shut_down.load(Ordering::Relaxed) { - let lock = hotshot.inner.recv_network_lookup.lock().await; - - if let Ok(Some(cur_view)) = lock.recv().await { - let view_to_lookup = cur_view + LOOK_AHEAD; - - // perform pruning - // TODO in the future btreemap would be better - completion_map = completion_map - .drain() - .filter(|(view, is_done)| { - if !is_done.load(Ordering::Relaxed) { - // we are past the view where this is useful - if cur_view >= *view { - is_done.store(true, Ordering::Relaxed); - return true; - } - // we aren't done - return false; - } - true - }) - .collect(); - - // logic to look ahead - if !inner.exchanges.quorum_exchange().is_leader(view_to_lookup) { - let is_done = Arc::new(AtomicBool::new(false)); - completion_map.insert(view_to_lookup, is_done.clone()); - let inner = inner.clone(); - let networking = networking.clone(); - async_spawn_local(async move { - info!("starting lookup for {:?}", view_to_lookup); - let _result = networking - .lookup_node(inner.exchanges.quorum_exchange().get_leader(view_to_lookup)) - .await; - info!("finished lookup for {:?}", view_to_lookup); - }); - } - } - } - - // shut down all child tasks - for (_, is_done) in completion_map { - is_done.store(true, Ordering::Relaxed); - } -} /// event for global event stream #[derive(Clone, Debug)] @@ -130,8 +68,9 @@ pub async fn add_network_message_task< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, + COMMITMENT: CommitmentBounds, PROPOSAL: ProposalType, - VOTE: VoteType, + VOTE: VoteType, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange< TYPES, @@ -147,8 +86,7 @@ pub async fn add_network_message_task< ) -> TaskRunner // This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. where - EXCHANGE::Networking: - CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, + EXCHANGE::Networking: CommunicationChannel, MEMBERSHIP>, { let channel = exchange.network().clone(); let broadcast_stream = GeneratedStream::>::new(Arc::new(move || { @@ -240,8 +178,9 @@ pub async fn add_network_event_task< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, + COMMITMENT: CommitmentBounds, PROPOSAL: ProposalType, - VOTE: VoteType, + VOTE: VoteType, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange< TYPES, @@ -258,19 +197,16 @@ pub async fn add_network_event_task< ) -> TaskRunner // This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. where - EXCHANGE::Networking: - CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, + EXCHANGE::Networking: CommunicationChannel, MEMBERSHIP>, { let filter = NetworkEventTaskState::< TYPES, I, - PROPOSAL, - VOTE, MEMBERSHIP, >::Networking, >::filter(task_kind); let channel = exchange.network().clone(); - let network_state: NetworkEventTaskState<_, _, _, _, _, _> = NetworkEventTaskState { + let network_state: NetworkEventTaskState<_, _, _, _> = NetworkEventTaskState { channel, event_stream: event_stream.clone(), view: TYPES::Time::genesis(), @@ -278,7 +214,7 @@ where }; let registry = task_runner.registry.clone(); let network_event_handler = HandleEvent(Arc::new( - move |event, mut state: NetworkEventTaskState<_, _, _, _, MEMBERSHIP, _>| { + move |event, mut state: NetworkEventTaskState<_, _, MEMBERSHIP, _>| { let membership = exchange.membership().clone(); async move { let completion_status = state.handle_event(event, &membership).await; @@ -290,7 +226,7 @@ where let networking_name = "Networking Task"; let networking_task_builder = - TaskBuilder::>::new(networking_name.to_string()) + TaskBuilder::>::new(networking_name.to_string()) .register_event_stream(event_stream.clone(), filter) .await .register_registry(&mut registry.clone()) @@ -314,7 +250,7 @@ where /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -331,14 +267,14 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { let consensus = handle.hotshot.get_consensus(); @@ -352,7 +288,7 @@ where consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), - block: Some(TYPES::BlockType::new()), + block: Some(VIDBlockPayload::genesis()), quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), api: c_api.clone(), committee_exchange: c_api.inner.exchanges.committee_exchange().clone().into(), @@ -428,7 +364,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { // build the da task @@ -476,6 +412,80 @@ where task_runner.add_task(da_task_id, da_name.to_string(), da_task) } +/// add the Transaction Handling task +/// # Panics +/// Is unable to panic. This section here is just to satisfy clippy +pub async fn add_transaction_task< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +>( + task_runner: TaskRunner, + event_stream: ChannelStream>, + committee_exchange: CommitteeEx, + handle: SystemContextHandle, +) -> TaskRunner +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = Commitment, + >, +{ + // build the transactions task + let c_api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let registry = task_runner.registry.clone(); + let transactions_state = TransactionTaskState { + registry: registry.clone(), + api: c_api.clone(), + consensus: handle.hotshot.get_consensus(), + transactions: Arc::default(), + seen_transactions: HashSet::new(), + cur_view: TYPES::Time::new(0), + committee_exchange: committee_exchange.into(), + event_stream: event_stream.clone(), + id: handle.hotshot.inner.id, + }; + let transactions_event_handler = HandleEvent(Arc::new( + move |event, + mut state: TransactionTaskState< + TYPES, + I, + HotShotSequencingConsensusApi, + >| { + async move { + let completion_status = state.handle_event(event).await; + (completion_status, state) + } + .boxed() + }, + )); + let transactions_name = "Transactions Task"; + let transactions_event_filter = FilterEvent(Arc::new( + TransactionTaskState::>::filter, + )); + + let transactions_task_builder = TaskBuilder::< + TransactionsTaskTypes>, + >::new(transactions_name.to_string()) + .register_event_stream(event_stream.clone(), transactions_event_filter) + .await + .register_registry(&mut registry.clone()) + .await + .register_state(transactions_state) + .register_event_handler(transactions_event_handler); + // impossible for unwrap to fail + // we *just* registered + let da_task_id = transactions_task_builder.get_task_id().unwrap(); + let da_task = TransactionsTaskTypes::build(transactions_task_builder).launch(); + task_runner.add_task(da_task_id, transactions_name.to_string(), da_task) +} /// add the view sync task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy @@ -497,7 +507,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { let api = HotShotSequencingConsensusApi { diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 0f751912c5..370dc47839 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -4,7 +4,7 @@ mod networking; mod node_implementation; mod storage; -pub use hotshot_types::traits::{Block, State}; +pub use hotshot_types::traits::{BlockPayload, State}; pub use networking::{NetworkError, NetworkReliability}; pub use node_implementation::{NodeImplementation, TestableNodeImplementation}; pub use storage::{Result as StorageResult, Storage}; @@ -14,7 +14,7 @@ pub mod implementations { pub use super::{ networking::{ libp2p_network::{Libp2pCommChannel, Libp2pNetwork, PeerInfoVec}, - memory_network::{DummyReliability, MasterMap, MemoryCommChannel, MemoryNetwork}, + memory_network::{MasterMap, MemoryCommChannel, MemoryNetwork}, web_server_libp2p_fallback::{CombinedNetworks, WebServerWithFallbackCommChannel}, web_server_network::{WebCommChannel, WebServerNetwork}, }, diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 2c95746d4b..ccdcfb722f 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,7 +1,7 @@ // use ark_bls12_381::Parameters as Param381; use commit::{Commitment, Committable, RawCommitmentBuilder}; use espresso_systems_common::hotshot::tag; -use hotshot_signature_key::bn254::BN254Pub; +use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ data::LeafType, traits::{ @@ -19,12 +19,8 @@ use tracing::debug; #[derive(Clone, Debug, Eq, PartialEq)] pub struct GeneralStaticCommittee, PUBKEY: SignatureKey> { - /// All the nodes participating - nodes: Vec, /// All the nodes participating and their stake nodes_with_stake: Vec, - /// The nodes on the static committee - committee_nodes: Vec, /// The nodes on the static committee and their stake committee_nodes_with_stake: Vec, /// Node type phantom @@ -34,18 +30,16 @@ pub struct GeneralStaticCommittee, PUBKEY: Signa } /// static committee using a vrf kp -pub type StaticCommittee = GeneralStaticCommittee; +pub type StaticCommittee = GeneralStaticCommittee; impl, PUBKEY: SignatureKey> GeneralStaticCommittee { /// Creates a new dummy elector #[must_use] - pub fn new(nodes: Vec, nodes_with_stake: Vec) -> Self { + pub fn new(_nodes: &[PUBKEY], nodes_with_stake: Vec) -> Self { Self { - nodes: nodes.clone(), nodes_with_stake: nodes_with_stake.clone(), - committee_nodes: nodes, committee_nodes_with_stake: nodes_with_stake, _type_phantom: PhantomData, _leaf_phantom: PhantomData, @@ -107,8 +101,9 @@ where /// Index the vector of public keys with the current view number fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { - let index = (*view_number % self.nodes.len() as u64) as usize; - self.nodes[index].clone() + let index = (*view_number % self.nodes_with_stake.len() as u64) as usize; + let res = self.nodes_with_stake[index].clone(); + TYPES::SignatureKey::get_public_key(&res) } /// Simply make the partial signature @@ -117,9 +112,9 @@ where view_number: TYPES::Time, private_key: &::PrivateKey, ) -> std::result::Result>, ElectionError> { - // TODO ED Below let pub_key = PUBKEY::from_private(private_key); - if !self.committee_nodes.contains(&pub_key) { + let entry = pub_key.get_stake_table_entry(1u64); + if !self.committee_nodes_with_stake.contains(&entry) { return Ok(None); } let mut message: Vec = vec![]; @@ -137,7 +132,8 @@ where ) -> Result, ElectionError> { match token { Checked::Valid(t) | Checked::Unchecked(t) => { - if self.committee_nodes.contains(&pub_key) { + let entry = pub_key.get_stake_table_entry(1u64); + if self.committee_nodes_with_stake.contains(&entry) { Ok(Checked::Valid(t)) } else { Ok(Checked::Inval(t)) @@ -153,18 +149,13 @@ where fn create_election( keys_qc: Vec, - keys: Vec, config: TYPES::ElectionConfigType, ) -> Self { - let mut committee_nodes = keys.clone(); let mut committee_nodes_with_stake = keys_qc.clone(); - committee_nodes.truncate(config.num_nodes.try_into().unwrap()); debug!("Election Membership Size: {}", config.num_nodes); committee_nodes_with_stake.truncate(config.num_nodes.try_into().unwrap()); Self { nodes_with_stake: keys_qc, - nodes: keys, - committee_nodes, committee_nodes_with_stake, _type_phantom: PhantomData, _leaf_phantom: PhantomData, @@ -172,21 +163,28 @@ where } fn total_nodes(&self) -> usize { - self.committee_nodes.len() + self.committee_nodes_with_stake.len() } fn success_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(((self.committee_nodes.len() as u64 * 2) / 3) + 1).unwrap() + NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64 * 2) / 3) + 1).unwrap() } fn failure_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(((self.committee_nodes.len() as u64) / 3) + 1).unwrap() + NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64) / 3) + 1).unwrap() } fn get_committee( &self, _view_number: ::Time, ) -> std::collections::BTreeSet<::SignatureKey> { - self.committee_nodes.clone().into_iter().collect() + // Transfer from committee_nodes_with_stake to pure committee_nodes + (0..self.committee_nodes_with_stake.len()) + .map(|node_id| { + ::SignatureKey::get_public_key( + &self.committee_nodes_with_stake[node_id], + ) + }) + .collect() } } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index c01bf53982..52ed2b73bc 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -6,15 +6,16 @@ use super::NetworkingMetrics; use crate::NodeImplementation; use async_compatibility_layer::{ art::{async_block_on, async_sleep, async_spawn}, - channel::{unbounded, UnboundedReceiver, UnboundedSender}, + channel::{unbounded, UnboundedReceiver, UnboundedSendError, UnboundedSender}, }; use async_lock::RwLock; use async_trait::async_trait; use bimap::BiHashMap; use bincode::Options; +use hotshot_constants::LOOK_AHEAD; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::ProposalType, + data::ViewNumber, message::{Message, MessageKind}, traits::{ election::Membership, @@ -26,8 +27,8 @@ use hotshot_types::{ }, node_implementation::NodeType, signature_key::SignatureKey, + state::ConsensusTime, }, - vote::VoteType, }; use hotshot_utils::bincode::bincode_opts; use libp2p_identity::PeerId; @@ -49,10 +50,13 @@ use std::{ marker::PhantomData, num::NonZeroUsize, str::FromStr, - sync::{atomic::AtomicBool, Arc}, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }, time::Duration, }; -use tracing::{error, info, instrument}; +use tracing::{error, info, instrument, warn}; /// hardcoded topic of QC used pub const QC_TOPIC: &str = "global"; @@ -79,9 +83,6 @@ struct Libp2pNetworkInner { pk: K, /// handle to control the network handle: Arc>, - /// Bidirectional map from public key provided by espresso - /// to public key provided by libp2p - pubkey_pid_map: RwLock>, /// map of known replica peer ids to public keys broadcast_recv: UnboundedReceiver, /// Sender for broadcast messages @@ -90,6 +91,8 @@ struct Libp2pNetworkInner { direct_send: UnboundedSender, /// Receiver for direct messages direct_recv: UnboundedReceiver, + /// Sender for node lookup (relevant view number, key of node) (None for shutdown) + node_lookup_send: UnboundedSender>, /// this is really cheating to enable local tests /// hashset of (bootstrap_addr, peer_id) bootstrap_addrs: PeerInfoVec, @@ -107,6 +110,10 @@ struct Libp2pNetworkInner { /// hash(hashset) -> topic /// btreemap ordered so is hashable topic_map: RwLock, String>>, + /// the latest view number (for node lookup purposes) + /// NOTE: supposed to represent a ViewNumber but we + /// haven't made that atomic yet and we prefer lock-free + latest_seen_view: Arc, } /// Networking implementation that uses libp2p @@ -244,11 +251,7 @@ impl Libp2pNetwork { /// Returns when network is ready pub async fn wait_for_ready(&self) { loop { - if self - .inner - .is_ready - .load(std::sync::atomic::Ordering::Relaxed) - { + if self.inner.is_ready.load(Ordering::Relaxed) { break; } async_sleep(Duration::from_secs(1)).await; @@ -282,7 +285,7 @@ impl Libp2pNetwork { ) -> Result, NetworkError> { assert!(bootstrap_addrs_len > 4, "Need at least 5 bootstrap nodes"); let network_handle = Arc::new( - NetworkNodeHandle::<()>::new(config, id) + Box::pin(NetworkNodeHandle::<()>::new(config, id)) .await .map_err(Into::::into)?, ); @@ -300,7 +303,6 @@ impl Libp2pNetwork { } let mut pubkey_pid_map = BiHashMap::new(); - pubkey_pid_map.insert(pk.clone(), network_handle.peer_id()); let mut topic_map = BiHashMap::new(); @@ -313,11 +315,11 @@ impl Libp2pNetwork { // if bounded figure out a way to log dropped msgs let (direct_send, direct_recv) = unbounded(); let (broadcast_send, broadcast_recv) = unbounded(); + let (node_lookup_send, node_lookup_recv) = unbounded(); let result = Libp2pNetwork { inner: Arc::new(Libp2pNetworkInner { handle: network_handle, - pubkey_pid_map: RwLock::new(pubkey_pid_map), broadcast_recv, direct_send: direct_send.clone(), direct_recv, @@ -330,16 +332,49 @@ impl Libp2pNetwork { is_bootstrapped: Arc::new(AtomicBool::new(false)), metrics: NetworkingMetrics::new(&*metrics), topic_map, + node_lookup_send, + // Start the latest view from 0. "Latest" refers to "most recent view we are polling for + // proposals on". We need this because to have consensus info injected we need a working + // network already. In the worst case, we send a few lookups we don't need. + latest_seen_view: Arc::new(AtomicU64::new(0)), }), }; result.spawn_event_generator(direct_send, broadcast_send); - + result.spawn_node_lookup(node_lookup_recv); result.spawn_connect(id); Ok(result) } + /// Spawns task for looking up nodes pre-emptively + #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] + fn spawn_node_lookup(&self, node_lookup_recv: UnboundedReceiver>) { + let handle = self.inner.handle.clone(); + let dht_timeout = self.inner.dht_timeout; + let latest_seen_view = self.inner.latest_seen_view.clone(); + + // deals with handling lookup queue. should be infallible + let handle_ = handle.clone(); + async_spawn(async move { + // cancels on shutdown + while let Ok(Some((view_number, pk))) = node_lookup_recv.recv().await { + /// defines lookahead threshold based on the constant + const THRESHOLD: u64 = (LOOK_AHEAD as f64 * 0.8) as u64; + + info!("Performing lookup for peer {:?}", pk); + + // only run if we are not too close to the next view number + if latest_seen_view.load(Ordering::Relaxed) + THRESHOLD <= *view_number { + // look up + if let Err(err) = handle_.lookup_node::(pk.clone(), dht_timeout).await { + error!("Failed to perform lookup for key {:?}: {}", pk, err); + }; + } + } + }); + } + /// Initiates connection to the outside world fn spawn_connect(&self, id: usize) { let pk = self.inner.pk.clone(); @@ -376,7 +411,7 @@ impl Libp2pNetwork { .await .unwrap(); - while !is_bootstrapped.load(std::sync::atomic::Ordering::Relaxed) { + while !is_bootstrapped.load(Ordering::Relaxed) { async_sleep(Duration::from_secs(1)).await; } @@ -414,7 +449,7 @@ impl Libp2pNetwork { node_type ); - is_ready.store(true, std::sync::atomic::Ordering::Relaxed); + is_ready.store(true, Ordering::Relaxed); info!("STARTING CONSENSUS ON {:?}", handle.peer_id()); Ok::<(), NetworkError>(()) } @@ -480,7 +515,7 @@ impl Libp2pNetwork { .context(FailedToSerializeSnafu); } NetworkEvent::IsBootstrapped => { - is_bootstrapped.store(true, std::sync::atomic::Ordering::Relaxed); + is_bootstrapped.store(true, Ordering::Relaxed); } } } @@ -499,9 +534,7 @@ impl ConnectedNetwork for Libp2p #[instrument(name = "Libp2pNetwork::ready_nonblocking", skip_all)] async fn is_ready(&self) -> bool { - self.inner - .is_ready - .load(std::sync::atomic::Ordering::Relaxed) + self.inner.is_ready.load(Ordering::Relaxed) } #[instrument(name = "Libp2pNetwork::shut_down", skip_all)] @@ -511,6 +544,7 @@ impl ConnectedNetwork for Libp2p Self: 'b, { let closure = async move { + self.inner.node_lookup_send.send(None).await.unwrap(); if self.inner.handle.is_killed() { error!("Called shut down when already shut down! Noop."); } else { @@ -544,7 +578,7 @@ impl ConnectedNetwork for Libp2p source: NetworkNodeHandleError::NoSuchTopic, })? .clone(); - error!("Broadcasting to topic {}", topic); + info!("broadcasting to topic: {}", topic); // gossip doesn't broadcast from itself, so special case if recipients.contains(&self.inner.pk) { @@ -586,37 +620,24 @@ impl ConnectedNetwork for Libp2p } self.wait_for_ready().await; - // check local cache. if that fails, initiate search - // if search fails, just error out - // NOTE: relay may be a good way to fix this in the future . - let pid: PeerId = if let Some(pid) = self + + let pid = match self .inner - .pubkey_pid_map - .read() + .handle + .lookup_node::(recipient.clone(), self.inner.dht_timeout) .await - .get_by_left(&recipient) { - *pid - } else { - match self - .inner - .handle - .get_record_timeout(&recipient, self.inner.dht_timeout) - .await - { - Ok(r) => r, - Err(e) => { - self.inner.metrics.message_failed_to_send.add(1); - error!("Failed to message {:?} because could not find recipient peer id for pk {:?}", message, recipient); - return Err(NetworkError::Libp2p { source: e }); - } + Ok(pid) => pid, + Err(err) => { + self.inner.metrics.message_failed_to_send.add(1); + error!( + "Failed to message {:?} because could not find recipient peer id for pk {:?}", + message, recipient + ); + return Err(NetworkError::Libp2p { source: err }); } }; - if let Err(e) = self.inner.handle.lookup_pid(pid).await { - self.inner.metrics.message_failed_to_send.add(1); - return Err(e.into()); - } match self.inner.handle.direct_request(pid, &message).await { Ok(()) => { self.inner.metrics.outgoing_message_count.add(1); @@ -669,44 +690,37 @@ impl ConnectedNetwork for Libp2p boxed_sync(closure) } - #[instrument(name = "Libp2pNetwork::lookup_node", skip_all)] - async fn lookup_node(&self, pk: K) -> Result<(), NetworkError> { - self.wait_for_ready().await; - - if self.inner.handle.is_killed() { - return Err(NetworkError::ShutDown); - } - - let maybe_pid = self - .inner - .handle - .get_record_timeout(&pk, self.inner.dht_timeout) + #[instrument(name = "Libp2pNetwork::queue_node_lookup", skip_all)] + async fn queue_node_lookup( + &self, + view_number: ViewNumber, + pk: K, + ) -> Result<(), UnboundedSendError>> { + self.inner + .node_lookup_send + .send(Some((view_number, pk))) .await - .map_err(Into::::into); - - if let Ok(pid) = maybe_pid { - if self.inner.handle.lookup_pid(pid).await.is_err() { - error!("Failed to look up pid"); - return Err(NetworkError::Libp2p { - source: NetworkNodeHandleError::DHTError { - source: libp2p_networking::network::error::DHTError::NotFound, - }, - }); - }; - } else { - error!("Unable to look up pubkey {:?}", pk); - return Err(NetworkError::Libp2p { - source: NetworkNodeHandleError::DHTError { - source: libp2p_networking::network::error::DHTError::NotFound, - }, - }); - } - - Ok(()) } - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { - // Not required + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + match event { + ConsensusIntentEvent::PollFutureLeader(future_view, future_leader) => { + let _ = self + .queue_node_lookup(ViewNumber::new(future_view), future_leader) + .await + .map_err(|err| warn!("failed to process node lookup request: {}", err)); + } + + ConsensusIntentEvent::PollForProposal(new_view) => { + if new_view > self.inner.latest_seen_view.load(Ordering::Relaxed) { + self.inner + .latest_seen_view + .store(new_view, Ordering::Relaxed); + } + } + + _ => {} + } } } @@ -715,21 +729,14 @@ impl ConnectedNetwork for Libp2p pub struct Libp2pCommChannel< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, >( Arc, TYPES::SignatureKey>>, - PhantomData<(TYPES, I, PROPOSAL, VOTE, MEMBERSHIP)>, + PhantomData<(TYPES, I, MEMBERSHIP)>, ); -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > Libp2pCommChannel +impl, MEMBERSHIP: Membership> + Libp2pCommChannel { /// create a new libp2p communication channel #[must_use] @@ -738,14 +745,9 @@ impl< } } -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > TestableNetworkingImplementation> - for Libp2pCommChannel +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for Libp2pCommChannel where MessageKind: ViewMessage, { @@ -787,14 +789,9 @@ where // top // we don't really want to make this the default implementation because that forces it to require ConnectedNetwork to be implemented. The struct we implement over might use multiple ConnectedNetworks #[async_trait] -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> - for Libp2pCommChannel +impl, MEMBERSHIP: Membership> + CommunicationChannel, MEMBERSHIP> + for Libp2pCommChannel where MessageKind: ViewMessage, { @@ -851,30 +848,30 @@ where boxed_sync(closure) } - async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { - self.0.lookup_node(pk).await + async fn queue_node_lookup( + &self, + view_number: ViewNumber, + pk: TYPES::SignatureKey, + ) -> Result<(), UnboundedSendError>> { + self.0.queue_node_lookup(view_number, pk).await } - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { - // Not required + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::inject_consensus_info(&self.0, event) + .await; } } -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > +impl, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, Message, - PROPOSAL, - VOTE, MEMBERSHIP, Libp2pNetwork, TYPES::SignatureKey>, - > for Libp2pCommChannel + > for Libp2pCommChannel { fn generate_network( ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index d6269e2c38..9d9e751ee5 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -6,7 +6,7 @@ use super::{FailedToSerializeSnafu, NetworkError, NetworkReliability, NetworkingMetrics}; use crate::NodeImplementation; use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, + art::async_spawn, channel::{bounded, Receiver, SendError, Sender}, }; use async_lock::{Mutex, RwLock}; @@ -16,20 +16,17 @@ use dashmap::DashMap; use futures::StreamExt; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::ProposalType, message::{Message, MessageKind}, traits::{ election::Membership, metrics::{Metrics, NoMetrics}, network::{ - CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, NetworkMsg, - TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, - ViewMessage, + CommunicationChannel, ConnectedNetwork, NetworkMsg, TestableChannelImplementation, + TestableNetworkingImplementation, TransmitType, ViewMessage, }, node_implementation::NodeType, signature_key::SignatureKey, }, - vote::VoteType, }; use hotshot_utils::bincode::bincode_opts; use rand::Rng; @@ -45,18 +42,6 @@ use std::{ }; use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; -#[derive(Debug, Clone, Copy)] -/// dummy implementation of network reliability -pub struct DummyReliability {} -impl NetworkReliability for DummyReliability { - fn sample_keep(&self) -> bool { - true - } - fn sample_delay(&self) -> std::time::Duration { - std::time::Duration::ZERO - } -} - /// Shared state for in-memory mock networking. /// /// This type is responsible for keeping track of the channels to each [`MemoryNetwork`], and is @@ -107,6 +92,9 @@ struct MemoryNetworkInner { /// The networking metrics we're keeping track of metrics: NetworkingMetrics, + + /// config to introduce unreliability to the network + reliability_config: Option>>, } /// In memory only network simulator. @@ -137,7 +125,7 @@ impl MemoryNetwork { pub_key: K, metrics: Box, master_map: Arc>, - reliability_config: Option>, + reliability_config: Option>>, ) -> MemoryNetwork { info!("Attaching new MemoryNetwork"); let (broadcast_input, broadcast_task_recv) = bounded(128); @@ -168,30 +156,11 @@ impl MemoryNetwork { match x { Ok(x) => { let dts = direct_task_send.clone(); - if let Some(r) = reliability_config.clone() { - async_spawn(async move { - if r.sample_keep() { - let delay = r.sample_delay(); - if delay > std::time::Duration::ZERO { - async_sleep(delay).await; - } - let res = dts.send(x).await; - if res.is_ok() { - trace!("Passed message to output queue"); - } else { - error!("Output queue receivers are shutdown"); - } - } else { - warn!("dropping packet!"); - } - }); + let res = dts.send(x).await; + if res.is_ok() { + trace!("Passed message to output queue"); } else { - let res = dts.send(x).await; - if res.is_ok() { - trace!("Passed message to output queue"); - } else { - error!("Output queue receivers are shutdown"); - } + error!("Output queue receivers are shutdown"); } } Err(e) => { @@ -206,28 +175,11 @@ impl MemoryNetwork { match x { Ok(x) => { let bts = broadcast_task_send.clone(); - if let Some(r) = reliability_config.clone() { - async_spawn(async move { - if r.sample_keep() { - let delay = r.sample_delay(); - if delay > std::time::Duration::ZERO { - async_sleep(delay).await; - } - let res = bts.send(x).await; - if res.is_ok() { - trace!("Passed message to output queue"); - } else { - warn!("dropping packet!"); - } - } - }); + let res = bts.send(x).await; + if res.is_ok() { + trace!("Passed message to output queue"); } else { - let res = bts.send(x).await; - if res.is_ok() { - trace!("Passed message to output queue"); - } else { - warn!("dropping packet!"); - } + warn!("dropping packet!"); } } Err(e) => { @@ -252,6 +204,7 @@ impl MemoryNetwork { master_map: master_map.clone(), in_flight_message_count, metrics: NetworkingMetrics::new(&*metrics), + reliability_config, }), }; master_map.map.insert(pub_key, mn.clone()); @@ -350,20 +303,40 @@ impl ConnectedNetwork for Memory .context(FailedToSerializeSnafu)?; trace!("Message bincoded, sending"); for node in &self.inner.master_map.map { + // TODO delay/drop etc here let (key, node) = node.pair(); if !recipients.contains(key) { continue; } trace!(?key, "Sending message to node"); - let res = node.broadcast_input(vec.clone()).await; - match res { - Ok(_) => { - self.inner.metrics.outgoing_message_count.add(1); - trace!(?key, "Delivered message to remote"); + if let Some(r) = &self.inner.reliability_config { + let config = r.read().await; + { + let node2 = node.clone(); + let fut = config.chaos_send_msg( + vec.clone(), + Arc::new(move |msg: Vec| { + let node3 = (node2).clone(); + boxed_sync(async move { + let _res = node3.broadcast_input(msg).await; + // NOTE we're dropping metrics here but this is only for testing + // purposes. I think that should be okay + }) + }), + ); + async_spawn(fut); } - Err(e) => { - self.inner.metrics.message_failed_to_send.add(1); - warn!(?e, ?key, "Error sending broadcast message to node"); + } else { + let res = node.broadcast_input(vec.clone()).await; + match res { + Ok(_) => { + self.inner.metrics.outgoing_message_count.add(1); + trace!(?key, "Delivered message to remote"); + } + Err(e) => { + self.inner.metrics.message_failed_to_send.add(1); + warn!(?e, ?key, "Error sending broadcast message to node"); + } } } } @@ -379,18 +352,37 @@ impl ConnectedNetwork for Memory .context(FailedToSerializeSnafu)?; trace!("Message bincoded, finding recipient"); if let Some(node) = self.inner.master_map.map.get(&recipient) { - let node = node.value(); - let res = node.direct_input(vec).await; - match res { - Ok(_) => { - self.inner.metrics.outgoing_message_count.add(1); - trace!(?recipient, "Delivered message to remote"); - Ok(()) + let node = node.value().clone(); + if let Some(r) = &self.inner.reliability_config { + let config = r.read().await; + { + let fut = config.chaos_send_msg( + vec.clone(), + Arc::new(move |msg: Vec| { + let node2 = node.clone(); + boxed_sync(async move { + let _res = node2.broadcast_input(msg).await; + // NOTE we're dropping metrics here but this is only for testing + // purposes. I think that should be okay + }) + }), + ); + async_spawn(fut); } - Err(e) => { - self.inner.metrics.message_failed_to_send.add(1); - warn!(?e, ?recipient, "Error delivering direct message"); - Err(NetworkError::CouldNotDeliver) + Ok(()) + } else { + let res = node.direct_input(vec).await; + match res { + Ok(_) => { + self.inner.metrics.outgoing_message_count.add(1); + trace!(?recipient, "Delivered message to remote"); + Ok(()) + } + Err(e) => { + self.inner.metrics.message_failed_to_send.add(1); + warn!(?e, ?recipient, "Error delivering direct message"); + Err(NetworkError::CouldNotDeliver) + } } } } else { @@ -448,16 +440,6 @@ impl ConnectedNetwork for Memory }; boxed_sync(closure) } - - #[instrument(name = "MemoryNetwork::lookup_node", skip_all)] - async fn lookup_node(&self, _pk: K) -> Result<(), NetworkError> { - // no lookup required - Ok(()) - } - - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { - // Not required - } } /// memory identity communication channel @@ -465,21 +447,14 @@ impl ConnectedNetwork for Memory pub struct MemoryCommChannel< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, >( Arc, TYPES::SignatureKey>>, - PhantomData<(I, PROPOSAL, VOTE, MEMBERSHIP)>, + PhantomData<(I, MEMBERSHIP)>, ); -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > MemoryCommChannel +impl, MEMBERSHIP: Membership> + MemoryCommChannel { /// create new communication channel #[must_use] @@ -488,14 +463,9 @@ impl< } } -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > TestableNetworkingImplementation> - for MemoryCommChannel +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for MemoryCommChannel where MessageKind: ViewMessage, { @@ -525,14 +495,9 @@ where } #[async_trait] -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> - for MemoryCommChannel +impl, MEMBERSHIP: Membership> + CommunicationChannel, MEMBERSHIP> + for MemoryCommChannel where MessageKind: ViewMessage, { @@ -588,31 +553,15 @@ where let closure = async move { self.0.recv_msgs(transmit_type).await }; boxed_sync(closure) } - - async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { - self.0.lookup_node(pk).await - } - - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) { - // Not required - } } -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > +impl, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, Message, - PROPOSAL, - VOTE, MEMBERSHIP, MemoryNetwork, TYPES::SignatureKey>, - > for MemoryCommChannel + > for MemoryCommChannel { fn generate_network( ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> @@ -620,359 +569,3 @@ impl< Box::new(move |network| MemoryCommChannel::new(network)) } } - -#[cfg(test)] -// panic in tests -#[allow(clippy::panic)] -mod tests { - // use super::*; - // use crate::{ - // demos::vdemo::{Addition, Subtraction, VDemoBlock, VDemoState, VDemoTransaction}, - // traits::election::static_committee::{ - // GeneralStaticCommittee, StaticElectionConfig, StaticVoteToken, - // }, - // }; - // - // use crate::traits::implementations::MemoryStorage; - // use async_compatibility_layer::logging::setup_logging; - // use hotshot_types::traits::election::QuorumExchange; - // use hotshot_types::traits::node_implementation::{ChannelMaps, ValidatingExchanges}; - // use hotshot_types::{ - // data::ViewNumber, - // message::{DataMessage, MessageKind, ValidatingMessage}, - // traits::{ - // signature_key::ed25519::{Ed25519Priv, Ed25519Pub}, - // state::ConsensusTime, - // }, - // vote::QuorumVote, - // }; - // use hotshot_types::{ - // data::{ValidatingLeaf, ValidatingProposal}, - // traits::consensus_type::validating_consensus::ValidatingConsensus, - // }; - // use serde::{Deserialize, Serialize}; - // - // #[derive( - // Copy, - // Clone, - // Debug, - // Default, - // Hash, - // PartialEq, - // Eq, - // PartialOrd, - // Ord, - // serde::Serialize, - // serde::Deserialize, - // )] - // struct Test {} - // #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] - // struct TestImpl {} - // - // // impl NetworkMsg for Test {} - // - // impl NodeType for Test { - // // TODO (da) can this be SequencingConsensus? - // type ConsensusType = ValidatingConsensus; - // type Time = ViewNumber; - // type BlockType = VDemoBlock; - // type SignatureKey = Ed25519Pub; - // type VoteTokenType = StaticVoteToken; - // type Transaction = VDemoTransaction; - // type ElectionConfigType = StaticElectionConfig; - // type StateType = VDemoState; - // } - // - // type TestMembership = GeneralStaticCommittee; - // type TestNetwork = MemoryCommChannel; - // - // impl NodeImplementation for TestImpl { - // type ConsensusMessage = ValidatingMessage; - // type Exchanges = ValidatingExchanges< - // Test, - // Message, - // QuorumExchange< - // Test, - // TestLeaf, - // TestProposal, - // TestMembership, - // TestNetwork, - // Message, - // >, - // ViewSyncExchange>, - // >; - // type Leaf = TestLeaf; - // type Storage = MemoryStorage; - // - // fn new_channel_maps( - // start_view: ViewNumber, - // ) -> (ChannelMaps, Option>) { - // (ChannelMaps::new(start_view), None) - // } - // } - // - // type TestLeaf = ValidatingLeaf; - // type TestVote = QuorumVote; - // type TestProposal = ValidatingProposal; - // - // /// fake Eq - // /// we can't compare the votetokentype for equality, so we can't - // /// derive EQ on `VoteType` and thereby message - // /// we are only sending data messages, though so we compare key and - // /// data message - // fn fake_message_eq(message_1: Message, message_2: Message) { - // assert_eq!(message_1.sender, message_2.sender); - // if let MessageKind::Data(DataMessage::SubmitTransaction(d_1, _)) = message_1.kind { - // if let MessageKind::Data(DataMessage::SubmitTransaction(d_2, _)) = message_2.kind { - // assert_eq!(d_1, d_2); - // } - // } else { - // panic!("Got unexpected message type in memory test!"); - // } - // } - // - // #[instrument] - // fn get_pubkey() -> Ed25519Pub { - // let priv_key = Ed25519Priv::generate(); - // Ed25519Pub::from_private(&priv_key) - // } - // - // /// create a message - // fn gen_messages(num_messages: u64, seed: u64, pk: Ed25519Pub) -> Vec> { - // let mut messages = Vec::new(); - // for i in 0..num_messages { - // let message = Message { - // sender: pk, - // kind: MessageKind::Data(DataMessage::SubmitTransaction( - // VDemoTransaction { - // add: Addition { - // account: "A".to_string(), - // amount: 50 + i + seed, - // }, - // sub: Subtraction { - // account: "B".to_string(), - // amount: 50 + i + seed, - // }, - // nonce: seed + i, - // padding: vec![50; 0], - // }, - // ::new(0), - // )), - // _phantom: PhantomData, - // }; - // messages.push(message); - // } - // messages - // } - // - // // Spawning a single MemoryNetwork should produce no errors - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // #[instrument] - // async fn spawn_single() { - // setup_logging(); - // let group: Arc, ::SignatureKey>> = - // MasterMap::new(); - // trace!(?group); - // let pub_key = get_pubkey(); - // let _network = MemoryNetwork::new(pub_key, NoMetrics::boxed(), group, Option::None); - // } - // - // // // Spawning a two MemoryNetworks and connecting them should produce no errors - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // #[instrument] - // async fn spawn_double() { - // setup_logging(); - // let group: Arc, ::SignatureKey>> = - // MasterMap::new(); - // trace!(?group); - // let pub_key_1 = get_pubkey(); - // let _network_1 = - // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); - // let pub_key_2 = get_pubkey(); - // let _network_2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); - // } - // - // // Check to make sure direct queue works - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // #[allow(deprecated)] - // #[instrument] - // async fn direct_queue() { - // setup_logging(); - // // Create some dummy messages - // - // // Make and connect the networking instances - // let group: Arc, ::SignatureKey>> = - // MasterMap::new(); - // trace!(?group); - // let pub_key_1 = get_pubkey(); - // let network1 = - // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); - // let pub_key_2 = get_pubkey(); - // let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); - // - // let first_messages: Vec> = gen_messages(5, 100, pub_key_1); - // - // // Test 1 -> 2 - // // Send messages - // for sent_message in first_messages { - // network1 - // .direct_message(sent_message.clone(), pub_key_2) - // .await - // .expect("Failed to message node"); - // let mut recv_messages = network2 - // .recv_msgs(TransmitType::Direct) - // .await - // .expect("Failed to receive message"); - // let recv_message = recv_messages.pop().unwrap(); - // assert!(recv_messages.is_empty()); - // fake_message_eq(sent_message, recv_message); - // } - // - // let second_messages: Vec> = gen_messages(5, 200, pub_key_2); - // - // // Test 2 -> 1 - // // Send messages - // for sent_message in second_messages { - // network2 - // .direct_message(sent_message.clone(), pub_key_1) - // .await - // .expect("Failed to message node"); - // let mut recv_messages = network1 - // .recv_msgs(TransmitType::Direct) - // .await - // .expect("Failed to receive message"); - // let recv_message = recv_messages.pop().unwrap(); - // assert!(recv_messages.is_empty()); - // fake_message_eq(sent_message, recv_message); - // } - // } - // - // // Check to make sure direct queue works - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // #[allow(deprecated)] - // #[instrument] - // async fn broadcast_queue() { - // setup_logging(); - // // Make and connect the networking instances - // let group: Arc, ::SignatureKey>> = - // MasterMap::new(); - // trace!(?group); - // let pub_key_1 = get_pubkey(); - // let network1 = - // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); - // let pub_key_2 = get_pubkey(); - // let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); - // - // let first_messages: Vec> = gen_messages(5, 100, pub_key_1); - // - // // Test 1 -> 2 - // // Send messages - // for sent_message in first_messages { - // network1 - // .broadcast_message( - // sent_message.clone(), - // vec![pub_key_2].into_iter().collect::>(), - // ) - // .await - // .expect("Failed to message node"); - // let mut recv_messages = network2 - // .recv_msgs(TransmitType::Broadcast) - // .await - // .expect("Failed to receive message"); - // let recv_message = recv_messages.pop().unwrap(); - // assert!(recv_messages.is_empty()); - // fake_message_eq(sent_message, recv_message); - // } - // - // let second_messages: Vec> = gen_messages(5, 200, pub_key_2); - // - // // Test 2 -> 1 - // // Send messages - // for sent_message in second_messages { - // network2 - // .broadcast_message( - // sent_message.clone(), - // vec![pub_key_1].into_iter().collect::>(), - // ) - // .await - // .expect("Failed to message node"); - // let mut recv_messages = network1 - // .recv_msgs(TransmitType::Broadcast) - // .await - // .expect("Failed to receive message"); - // let recv_message = recv_messages.pop().unwrap(); - // assert!(recv_messages.is_empty()); - // fake_message_eq(sent_message, recv_message); - // } - // } - // - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // #[instrument] - // #[allow(deprecated)] - // async fn test_in_flight_message_count() { - // // setup_logging(); - // - // // let group: Arc, ::SignatureKey>> = - // // MasterMap::new(); - // // trace!(?group); - // // let pub_key_1 = get_pubkey(); - // // let network1 = - // // MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); - // // let pub_key_2 = get_pubkey(); - // // let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); - // - // // // Create some dummy messages - // // let messages: Vec> = gen_messages(5, 100, pub_key_1); - // - // // // assert_eq!(network1.in_flight_message_count(), Some(0)); - // // // assert_eq!(network2.in_flight_message_count(), Some(0)); - // - // // for (_count, message) in messages.iter().enumerate() { - // // network1 - // // .direct_message(message.clone(), pub_key_2) - // // .await - // // .unwrap(); - // // // network 2 has received `count` broadcast messages and `count + 1` direct messages - // // // assert_eq!(network2.in_flight_message_count(), Some(count + count + 1)); - // - // // // network2.broadcast_message(message.clone()).await.unwrap(); - // // // network 1 has received `count` broadcast messages - // // // assert_eq!(network1.in_flight_message_count(), Some(count + 1)); - // - // // // network 2 has received `count + 1` broadcast messages and `count + 1` direct messages - // // // assert_eq!(network2.in_flight_message_count(), Some((count + 1) * 2)); - // // } - // - // // for _count in (0..messages.len()).rev() { - // // network1.recv_msgs(TransmitType::Broadcast).await.unwrap(); - // // // assert_eq!(network1.in_flight_message_count(), Some(count)); - // - // // network2.recv_msgs(TransmitType::Broadcast).await.unwrap(); - // // network2.recv_msgs(TransmitType::Direct).await.unwrap(); - // // // assert_eq!(network2.in_flight_message_count(), Some(count * 2)); - // // } - // - // // // assert_eq!(network1.in_flight_message_count(), Some(0)); - // // // assert_eq!(network2.in_flight_message_count(), Some(0)); - // } -} diff --git a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs index 723c7c071d..b3324681a5 100644 --- a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs +++ b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs @@ -10,9 +10,10 @@ use async_trait::async_trait; use futures::join; +use async_compatibility_layer::channel::UnboundedSendError; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::ProposalType, + data::ViewNumber, message::Message, traits::{ election::Membership, @@ -23,7 +24,6 @@ use hotshot_types::{ }, node_implementation::NodeType, }, - vote::VoteType, }; use std::{marker::PhantomData, sync::Arc}; use tracing::error; @@ -155,13 +155,8 @@ impl, MEMBERSHIP: Membership, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> +impl, MEMBERSHIP: Membership> + CommunicationChannel, MEMBERSHIP> for WebServerWithFallbackCommChannel { type NETWORK = CombinedNetworks; @@ -263,51 +258,38 @@ impl< boxed_sync(closure) } - async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { - match join!( - self.network().lookup_node(pk.clone()), - self.fallback().lookup_node(pk) - ) { - (Err(e1), Err(e2)) => { - error!( - "Both network lookups failed primary error: {}, fallback error: {}", - e1, e2 - ); - Err(e1) - } - (Err(e), _) => { - error!("Failed primary lookup with error: {}", e); - Ok(()) - } - (_, Err(e)) => { - error!("Failed backup lookup with error: {}", e); - Ok(()) - } - _ => Ok(()), - } + async fn queue_node_lookup( + &self, + view_number: ViewNumber, + pk: TYPES::SignatureKey, + ) -> Result<(), UnboundedSendError>> { + self.network() + .queue_node_lookup(view_number, pk.clone()) + .await?; + self.fallback().queue_node_lookup(view_number, pk).await?; + + Ok(()) } - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { as ConnectedNetwork< Message, TYPES::SignatureKey, - >>::inject_consensus_info(self.network(), event) + >>::inject_consensus_info(self.network(), event.clone()) .await; + + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::inject_consensus_info(self.fallback(), event) + .await; } } -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > +impl, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, Message, - PROPOSAL, - VOTE, MEMBERSHIP, CombinedNetworks, > for WebServerWithFallbackCommChannel diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index e0001b94ab..72cd62cc8d 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -13,7 +13,6 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ - data::ProposalType, message::{Message, MessagePurpose}, traits::{ election::Membership, @@ -25,7 +24,6 @@ use hotshot_types::{ node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, }, - vote::VoteType, }; use hotshot_web_server::{self, config}; use rand::random; @@ -48,21 +46,14 @@ use tracing::{debug, error, info}; pub struct WebCommChannel< TYPES: NodeType, I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, >( Arc, TYPES::SignatureKey, TYPES>>, - PhantomData<(MEMBERSHIP, I, PROPOSAL, VOTE)>, + PhantomData<(MEMBERSHIP, I)>, ); -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > WebCommChannel +impl, MEMBERSHIP: Membership> + WebCommChannel { /// Create new communication channel #[must_use] @@ -128,17 +119,23 @@ struct Inner { // TODO ED This should be TYPES::Time // Theoretically there should never be contention for this lock... /// Task map for quorum proposals. - proposal_task_map: Arc>>>, + proposal_task_map: + Arc>>>>, /// Task map for quorum votes. - vote_task_map: Arc>>>, + vote_task_map: + Arc>>>>, /// Task map for DACs. - dac_task_map: Arc>>>, + dac_task_map: + Arc>>>>, /// Task map for view sync certificates. - view_sync_cert_task_map: Arc>>>, + view_sync_cert_task_map: + Arc>>>>, /// Task map for view sync votes. - view_sync_vote_task_map: Arc>>>, + view_sync_vote_task_map: + Arc>>>>, /// Task map for transactions - txn_task_map: Arc>>>, + txn_task_map: + Arc>>>>, } impl Inner { @@ -146,7 +143,7 @@ impl Inner { /// Pull a web server. async fn poll_web_server( &self, - receiver: UnboundedReceiver, + receiver: UnboundedReceiver>, message_purpose: MessagePurpose, view_number: u64, ) -> Result<(), NetworkError> { @@ -161,6 +158,7 @@ impl Inner { while self.running.load(Ordering::Relaxed) { let endpoint = match message_purpose { MessagePurpose::Proposal => config::get_proposal_route(view_number), + MessagePurpose::CurrentProposal => config::get_recent_proposal_route(), MessagePurpose::Vote => config::get_vote_route(view_number, vote_index), MessagePurpose::Data => config::get_transactions_route(tx_index), MessagePurpose::Internal => unimplemented!(), @@ -171,6 +169,9 @@ impl Inner { config::get_view_sync_vote_route(view_number, vote_index) } MessagePurpose::DAC => config::get_da_certificate_route(view_number), + MessagePurpose::VidDisperse => config::get_vid_disperse_route(view_number), // like `Proposal` + MessagePurpose::VidVote => config::get_vid_vote_route(view_number, vote_index), // like `Vote` + MessagePurpose::VidCert => config::get_vid_cert_route(view_number), // like `DAC` }; if message_purpose == MessagePurpose::Data { @@ -221,6 +222,15 @@ impl Inner { // } // } } + MessagePurpose::CurrentProposal => { + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + self.broadcast_poll_queue + .write() + .await + .push(deserialized_messages[0].clone()); + + return Ok(()); + } MessagePurpose::Vote => { // error!( // "Received {} votes from web server for view {} is da {}", @@ -234,6 +244,14 @@ impl Inner { direct_poll_queue.push(vote.clone()); } } + MessagePurpose::VidVote => { + // TODO copy-pasted from `MessagePurpose::Vote` https://github.com/EspressoSystems/HotShot/issues/1690 + let mut direct_poll_queue = self.direct_poll_queue.write().await; + for vote in &deserialized_messages { + vote_index += 1; + direct_poll_queue.push(vote.clone()); + } + } MessagePurpose::DAC => { debug!( "Received DAC from web server for view {} {}", @@ -249,6 +267,41 @@ impl Inner { // In future we should check to make sure DAC is valid return Ok(()); } + MessagePurpose::VidCert => { + // TODO copy-pasted from `MessagePurpose::DAC` https://github.com/EspressoSystems/HotShot/issues/1690 + debug!( + "Received VID cert from web server for view {} {}", + view_number, self.is_da + ); + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + self.broadcast_poll_queue + .write() + .await + .push(deserialized_messages[0].clone()); + + // return if we found a VID cert, since there will only be 1 per view + // In future we should check to make sure VID cert is valid + return Ok(()); + } + MessagePurpose::VidDisperse => { + // TODO copy-pasted from `MessagePurpose::Proposal` https://github.com/EspressoSystems/HotShot/issues/1690 + + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + self.broadcast_poll_queue + .write() + .await + .push(deserialized_messages[0].clone()); + + return Ok(()); + // Wait for the view to change before polling for proposals again + // let event = receiver.recv().await; + // match event { + // Ok(event) => view_number = event.view_number(), + // Err(_r) => { + // error!("Proposal receiver error! It was likely shutdown") + // } + // } + } MessagePurpose::ViewSyncVote => { // error!( // "Received {} view sync votes from web server for view {} is da {}", @@ -500,13 +553,18 @@ impl< MessagePurpose::Proposal => config::post_proposal_route(*view_number), MessagePurpose::Vote => config::post_vote_route(*view_number), MessagePurpose::Data => config::post_transactions_route(), - MessagePurpose::Internal => return Err(WebServerNetworkError::EndpointError), + MessagePurpose::Internal | MessagePurpose::CurrentProposal => { + return Err(WebServerNetworkError::EndpointError) + } MessagePurpose::ViewSyncProposal => { // error!("Posting view sync proposal route is: {}", config::post_view_sync_proposal_route(*view_number)); config::post_view_sync_proposal_route(*view_number) } MessagePurpose::ViewSyncVote => config::post_view_sync_vote_route(*view_number), MessagePurpose::DAC => config::post_da_certificate_route(*view_number), + MessagePurpose::VidVote => config::post_vid_vote_route(*view_number), + MessagePurpose::VidDisperse => config::post_vid_disperse_route(*view_number), + MessagePurpose::VidCert => config::post_vid_cert_route(*view_number), }; let network_msg: SendMsg = SendMsg { @@ -518,14 +576,9 @@ impl< } #[async_trait] -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> - for WebCommChannel +impl, MEMBERSHIP: Membership> + CommunicationChannel, MEMBERSHIP> + for WebCommChannel { type NETWORK = WebServerNetwork, TYPES::SignatureKey, TYPES>; /// Blocks until node is successfully initialized @@ -608,13 +661,7 @@ impl< boxed_sync(closure) } - /// look up a node - /// blocking - async fn lookup_node(&self, _pk: TYPES::SignatureKey) -> Result<(), NetworkError> { - Ok(()) - } - - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { as ConnectedNetwork< Message, TYPES::SignatureKey, @@ -725,14 +772,8 @@ impl< boxed_sync(closure) } - /// look up a node - /// blocking - async fn lookup_node(&self, _pk: K) -> Result<(), NetworkError> { - Ok(()) - } - #[allow(clippy::too_many_lines)] - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { debug!( "Injecting event: {:?} is da {}", event.clone(), @@ -783,6 +824,25 @@ impl< .await; } } + ConsensusIntentEvent::PollForCurrentProposal => { + // create new task + let (_, receiver) = unbounded(); + + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server(receiver, MessagePurpose::CurrentProposal, 1) + .await + { + error!( + "Background receive proposal polling encountered an error: {:?}", + e + ); + } + } + }); + } ConsensusIntentEvent::PollForVotes(view_number) => { let mut task_map = self.inner.vote_task_map.write().await; if let Entry::Vacant(e) = task_map.entry(view_number) { @@ -997,7 +1057,7 @@ impl< }; } - _ => error!("Unexpected event!"), + _ => {} } } } @@ -1052,14 +1112,9 @@ impl> } } -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > TestableNetworkingImplementation> - for WebCommChannel +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for WebCommChannel { fn generator( expected_node_count: usize, @@ -1087,21 +1142,13 @@ impl< } } -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > +impl, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, Message, - PROPOSAL, - VOTE, MEMBERSHIP, WebServerNetwork, TYPES::SignatureKey, TYPES>, - > for WebCommChannel + > for WebCommChannel { fn generate_network() -> Box< dyn Fn(Arc, TYPES::SignatureKey, TYPES>>) -> Self diff --git a/hotshot/src/traits/networking/web_sever_libp2p_fallback.rs b/hotshot/src/traits/networking/web_sever_libp2p_fallback.rs deleted file mode 100644 index 723c7c071d..0000000000 --- a/hotshot/src/traits/networking/web_sever_libp2p_fallback.rs +++ /dev/null @@ -1,318 +0,0 @@ -//! Networking Implementation that has a primary and a fallback newtork. If the primary -//! Errors we will use the backup to send or receive -use super::NetworkError; -use crate::{ - traits::implementations::{Libp2pNetwork, WebServerNetwork}, - NodeImplementation, -}; - -use async_trait::async_trait; - -use futures::join; - -use hotshot_task::{boxed_sync, BoxSyncFuture}; -use hotshot_types::{ - data::ProposalType, - message::Message, - traits::{ - election::Membership, - network::{ - CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, - TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, - ViewMessage, - }, - node_implementation::NodeType, - }, - vote::VoteType, -}; -use std::{marker::PhantomData, sync::Arc}; -use tracing::error; -/// A communication channel with 2 networks, where we can fall back to the slower network if the -/// primary fails -#[derive(Clone, Debug)] -pub struct WebServerWithFallbackCommChannel< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, -> { - /// The two networks we'll use for send/recv - networks: Arc>, -} - -impl, MEMBERSHIP: Membership> - WebServerWithFallbackCommChannel -{ - /// Constructor - #[must_use] - pub fn new(networks: Arc>) -> Self { - Self { networks } - } - - /// Get a ref to the primary network - #[must_use] - pub fn network(&self) -> &WebServerNetwork, TYPES::SignatureKey, TYPES> { - &self.networks.0 - } - - /// Get a ref to the backup network - #[must_use] - pub fn fallback(&self) -> &Libp2pNetwork, TYPES::SignatureKey> { - &self.networks.1 - } -} - -/// Wrapper for the tuple of `WebServerNetwork` and `Libp2pNetwork` -/// We need this so we can impl `TestableNetworkingImplementation` -/// on the tuple -#[derive(Debug, Clone)] -pub struct CombinedNetworks< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, ->( - pub WebServerNetwork, TYPES::SignatureKey, TYPES>, - pub Libp2pNetwork, TYPES::SignatureKey>, - pub PhantomData, -); - -impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> - for CombinedNetworks -{ - fn generator( - expected_node_count: usize, - num_bootstrap: usize, - network_id: usize, - da_committee_size: usize, - is_da: bool, - ) -> Box Self + 'static> { - let generators = ( - , - TYPES::SignatureKey, - TYPES, - > as TestableNetworkingImplementation<_, _>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da - ), - , TYPES::SignatureKey> as TestableNetworkingImplementation<_, _>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da - ) - ); - Box::new(move |node_id| { - CombinedNetworks(generators.0(node_id), generators.1(node_id), PhantomData) - }) - } - - /// Get the number of messages in-flight. - /// - /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. - fn in_flight_message_count(&self) -> Option { - None - } -} - -impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> - for WebServerWithFallbackCommChannel -{ - fn generator( - expected_node_count: usize, - num_bootstrap: usize, - network_id: usize, - da_committee_size: usize, - is_da: bool, - ) -> Box Self + 'static> { - let generator = as TestableNetworkingImplementation<_, _>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da - ); - Box::new(move |node_id| Self { - networks: generator(node_id).into(), - }) - } - - /// Get the number of messages in-flight. - /// - /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. - fn in_flight_message_count(&self) -> Option { - None - } -} - -#[async_trait] -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP> - for WebServerWithFallbackCommChannel -{ - type NETWORK = CombinedNetworks; - - async fn wait_for_ready(&self) { - join!( - self.network().wait_for_ready(), - self.fallback().wait_for_ready() - ); - } - - async fn is_ready(&self) -> bool { - self.network().is_ready().await && self.fallback().is_ready().await - } - - fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - join!(self.network().shut_down(), self.fallback().shut_down()); - }; - boxed_sync(closure) - } - - async fn broadcast_message( - &self, - message: Message, - election: &MEMBERSHIP, - ) -> Result<(), NetworkError> { - let recipients = - >::get_committee(election, message.get_view_number()); - let fallback = self - .fallback() - .broadcast_message(message.clone(), recipients.clone()); - let network = self.network().broadcast_message(message, recipients); - match join!(fallback, network) { - (Err(e1), Err(e2)) => { - error!( - "Both network broadcasts failed primary error: {}, fallback error: {}", - e1, e2 - ); - Err(e1) - } - (Err(e), _) => { - error!("Failed primary broadcast with error: {}", e); - Ok(()) - } - (_, Err(e)) => { - error!("Failed backup broadcast with error: {}", e); - Ok(()) - } - _ => Ok(()), - } - } - - async fn direct_message( - &self, - message: Message, - recipient: TYPES::SignatureKey, - ) -> Result<(), NetworkError> { - match self - .network() - .direct_message(message.clone(), recipient.clone()) - .await - { - Ok(_) => Ok(()), - Err(e) => { - error!( - "Falling back on direct message, error on primary network: {}", - e - ); - self.fallback().direct_message(message, recipient).await - } - } - } - - fn recv_msgs<'a, 'b>( - &'a self, - transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - match self.network().recv_msgs(transmit_type).await { - Ok(msgs) => Ok(msgs), - Err(e) => { - error!( - "Falling back on recv message, error on primary network: {}", - e - ); - self.fallback().recv_msgs(transmit_type).await - } - } - }; - boxed_sync(closure) - } - - async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError> { - match join!( - self.network().lookup_node(pk.clone()), - self.fallback().lookup_node(pk) - ) { - (Err(e1), Err(e2)) => { - error!( - "Both network lookups failed primary error: {}, fallback error: {}", - e1, e2 - ); - Err(e1) - } - (Err(e), _) => { - error!("Failed primary lookup with error: {}", e); - Ok(()) - } - (_, Err(e)) => { - error!("Failed backup lookup with error: {}", e); - Ok(()) - } - _ => Ok(()), - } - } - - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { - as ConnectedNetwork< - Message, - TYPES::SignatureKey, - >>::inject_consensus_info(self.network(), event) - .await; - } -} - -impl< - TYPES: NodeType, - I: NodeImplementation, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, - > - TestableChannelImplementation< - TYPES, - Message, - PROPOSAL, - VOTE, - MEMBERSHIP, - CombinedNetworks, - > for WebServerWithFallbackCommChannel -{ - fn generate_network() -> Box) -> Self + 'static> { - Box::new(move |network| WebServerWithFallbackCommChannel::new(network)) - } -} diff --git a/hotshot/src/traits/storage/atomic_storage.rs b/hotshot/src/traits/storage/atomic_storage.rs index 9d054fe1ac..4654d65ca7 100644 --- a/hotshot/src/traits/storage/atomic_storage.rs +++ b/hotshot/src/traits/storage/atomic_storage.rs @@ -33,7 +33,7 @@ where atomic_store: Mutex, /// The Blocks stored by this [`AtomicStorage`] - blocks: HashMapStore, STATE::Block>, + blocks: HashMapStore, STATE::BlockPayload>, /// The [`QuorumCertificate`]s stored by this [`AtomicStorage`] qcs: DualKeyValueStore>, @@ -142,15 +142,15 @@ impl Storage for AtomicStorage { #[instrument(name = "AtomicStorage::get_block", skip_all)] async fn get_block( &self, - hash: &Commitment, - ) -> StorageResult> { + hash: &Commitment, + ) -> StorageResult> { Ok(self.inner.blocks.get(hash).await) } #[instrument(name = "AtomicStorage::get_qc", skip_all)] async fn get_qc( &self, - hash: &Commitment, + hash: &Commitment, ) -> StorageResult>> { Ok(self.inner.qcs.load_by_key_1_ref(hash).await) } @@ -176,7 +176,7 @@ impl Storage for AtomicStorage { #[instrument(name = "AtomicStorage::get_leaf_by_block", skip_all)] async fn get_leaf_by_block( &self, - hash: &Commitment, + hash: &Commitment, ) -> StorageResult>> { Ok(self.inner.leaves.load_by_key_2_ref(hash).await) } @@ -187,7 +187,7 @@ impl Storage for AtomicStorage { } async fn get_internal_state(&self) -> StorageState { - let mut blocks: Vec<(Commitment, STATE::Block)> = + let mut blocks: Vec<(Commitment, STATE::BBlockPayloadlock)> = self.inner.blocks.load_all().await.into_iter().collect(); blocks.sort_by_key(|(hash, _)| *hash); @@ -226,8 +226,8 @@ impl<'a, STATE: StateContents + 'static> StorageUpdater<'a, STATE> #[instrument(name = "AtomicStorage::get_block", skip_all)] async fn insert_block( &mut self, - hash: Commitment, - block: STATE::Block, + hash: Commitment, + block: STATE::BlockPayload, ) -> StorageResult { trace!(?block, "inserting block"); self.inner diff --git a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs index 70431af012..84cf8c76a2 100644 --- a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs +++ b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs @@ -181,7 +181,7 @@ pub trait DualKeyValue: Serialize + DeserializeOwned + Clone { } impl DualKeyValue for QuorumCertificate { - type Key1 = Commitment; + type Key1 = Commitment; type Key2 = ViewNumber; const KEY_1_NAME: &'static str = "block_commitment"; @@ -200,7 +200,7 @@ where STATE: StateContents, { type Key1 = Commitment>; - type Key2 = Commitment; + type Key2 = Commitment; const KEY_1_NAME: &'static str = "leaf_commitment"; const KEY_2_NAME: &'static str = "block_commitment"; @@ -210,6 +210,6 @@ where } fn key_2(&self) -> Self::Key2 { - ::commit(&self.deltas) + ::commit(&self.deltas) } } diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 2a9e570a76..dd6e0217d5 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -117,16 +117,15 @@ mod test { use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; use super::*; - use hotshot_signature_key::bn254::BN254Pub; + use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ certificate::{AssembledSignature, QuorumCertificate}, - constants::genesis_proposer_id, - data::{fake_commitment, ValidatingLeaf, ViewNumber}, + data::{fake_commitment, genesis_proposer_id, ValidatingLeaf, ViewNumber}, traits::{ block_contents::dummy::{DummyBlock, DummyState}, node_implementation::NodeType, state::ConsensusTime, - Block, + BlockPayload, }, }; use std::{fmt::Debug, hash::Hash}; @@ -150,9 +149,9 @@ mod test { impl NodeType for DummyTypes { type Time = ViewNumber; type BlockType = DummyBlock; - type SignatureKey = BN254Pub; + type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; - type Transaction = ::Transaction; + type Transaction = ::Transaction; type ElectionConfigType = StaticElectionConfig; type StateType = DummyState; } diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 5b8e737c1a..35e79a78dd 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -190,7 +190,7 @@ impl + 'static> SystemContextHandl if let Ok(anchor_leaf) = self.storage().get_anchored_view().await { if anchor_leaf.view_number == TYPES::Time::genesis() { let leaf: I::Leaf = I::Leaf::from_stored_view(anchor_leaf); - let mut qc = QuorumCertificate::::genesis(); + let mut qc = QuorumCertificate::>::genesis(); qc.set_leaf_commitment(leaf.commit()); let event = Event { view_number: TYPES::Time::genesis(), @@ -226,7 +226,7 @@ impl + 'static> SystemContextHandl // ) -> Result< // ( // Vec<>::Leaf>, - // QuorumCertificate>::Leaf>, + // QuorumCertificate>::Leaf>>, // ), // HotShotError, // > { @@ -333,7 +333,7 @@ impl + 'static> SystemContextHandl #[cfg(feature = "hotshot-testing")] pub fn create_yes_message( &self, - justify_qc_commitment: Commitment>, + justify_qc_commitment: Commitment>>, leaf_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, @@ -342,7 +342,7 @@ impl + 'static> SystemContextHandl QuorumEx: ConsensusExchange< TYPES, Message, - Certificate = QuorumCertificate, + Certificate = QuorumCertificate>, >, { let inner = self.hotshot.inner.clone(); diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 9e9df1cd98..dfdb02020e 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -9,7 +9,7 @@ authors = ["Espresso Systems "] [features] default = ["webui"] -webui = ["tide"] +webui = [] # # this only has effect on linux # lossy_network = [ @@ -34,6 +34,7 @@ custom_debug = { workspace = true } derive_builder = "0.12.0" either = { workspace = true } futures = { workspace = true } +hotshot-constants = { path = "../constants" } hotshot-utils = { path = "../utils" } libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } @@ -41,19 +42,19 @@ libp2p-noise = { version = "0.43.0", default-features = false } parking_lot = "0.12.1" rand = { workspace = true } serde = { workspace = true } -serde_json = "1.0.106" +serde_json = "1.0.107" snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", ] } -# TODO do we want this as non optional?? -tokio-stream = "0.1.14" tracing = { workspace = true } void = "1.0.2" +dashmap = "5.5.3" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] libp2p = { workspace = true, features = ["tokio"] } tokio = { workspace = true } +tokio-stream = "0.1.14" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] libp2p = { workspace = true, features = ["async-std"] } async-std = { workspace = true } @@ -74,5 +75,3 @@ netlink-sys = { git = "https://github.com/espressosystems/netlink.git", version ], optional = true } netlink-packet-generic = { git = "https://github.com/espressosystems/netlink.git", version = "0.2.0", optional = true } -[dev-dependencies] -clap = { version = "4.4", features = ["derive", "env"] } diff --git a/libp2p-networking/src/network/behaviours/dht/cache.rs b/libp2p-networking/src/network/behaviours/dht/cache.rs new file mode 100644 index 0000000000..602bb41e16 --- /dev/null +++ b/libp2p-networking/src/network/behaviours/dht/cache.rs @@ -0,0 +1,325 @@ +use std::{ + collections::{BTreeMap, HashMap}, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, + time::{Duration, SystemTime}, +}; + +use async_compatibility_layer::art::async_block_on; +use async_lock::RwLock; +use bincode::Options; +use dashmap::{mapref::one::Ref, DashMap}; +use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; +use hotshot_utils::bincode::bincode_opts; +use snafu::{ResultExt, Snafu}; + +/// Error wrapper type for cache +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum CacheError { + /// Failed to read or write from disk + Disk { + /// source of error + source: std::io::Error, + }, + + /// Failure to serialize the cache + Serialization { + /// source of error + source: Box, + }, + + /// Failure to deserialize the cache + Deserialization { + /// source of error + source: Box, + }, + + /// General cache error + GeneralCache { + /// source of error + source: Box, + }, +} + +#[derive(Clone, derive_builder::Builder, custom_debug::Debug, Default)] +pub struct Config { + #[builder(default = "Some(\"dht.cache\".to_string())")] + pub filename: Option, + #[builder(default = "Duration::from_secs(KAD_DEFAULT_REPUB_INTERVAL_SEC * 16)")] + pub expiry: Duration, + #[builder(default = "4")] + pub max_disk_parity_delta: u32, +} + +impl Default for Cache { + fn default() -> Self { + async_block_on(Self::new(Config::default())) + } +} + +pub struct Cache { + /// the cache's config + config: Config, + + /// the cache for records (key -> value) + cache: Arc, Vec>>, + /// the expiries for the dht cache, in order (expiry time -> key) + expiries: Arc>>>, + + /// number of inserts since the last save + disk_parity_delta: Arc, +} + +impl Cache { + pub async fn new(config: Config) -> Self { + let cache = Self { + cache: Arc::new(DashMap::new()), + expiries: Arc::new(RwLock::new(BTreeMap::new())), + config, + disk_parity_delta: Arc::new(AtomicU32::new(0)), + }; + + // try loading from file + if let Err(err) = cache.load().await { + tracing::warn!("failed to load cache from file: {}", err); + }; + + cache + } + + pub async fn load(&self) -> Result<(), CacheError> { + if let Some(filename) = &self.config.filename { + let encoded = std::fs::read(filename).context(DiskSnafu)?; + + let cache: HashMap, Vec)> = bincode_opts() + .deserialize(&encoded) + .context(DeserializationSnafu)?; + + // inline prune and insert + let now = SystemTime::now(); + for (expiry, (key, value)) in cache { + if now < expiry { + self.cache.insert(key.clone(), value); + self.expiries.write().await.insert(expiry, key); + } + } + } + + Ok(()) + } + + pub async fn save(&self) -> Result<(), CacheError> { + if let Some(filename) = &self.config.filename { + // prune first + self.prune().await; + + // serialize + let mut cache_to_write = HashMap::new(); + let expiries = self.expiries.read().await; + for (expiry, key) in &*expiries { + if let Some(entry) = self.cache.get(key) { + cache_to_write.insert(expiry, (key, entry.value().clone())); + } else { + tracing::warn!("key not found in cache: {:?}", key); + Err(CacheError::GeneralCache { + source: Box::new(bincode::ErrorKind::Custom( + "key not found in cache".to_string(), + )), + })?; + }; + } + + let encoded = bincode_opts() + .serialize(&cache_to_write) + .context(SerializationSnafu)?; + + std::fs::write(filename, encoded).context(DiskSnafu)?; + } + + Ok(()) + } + + async fn prune(&self) { + let now = SystemTime::now(); + let mut expiries = self.expiries.write().await; + let mut removed: u32 = 0; + + while let Some((expires, key)) = expiries.pop_first() { + if now > expires { + self.cache.remove(&key); + removed += 1; + } else { + expiries.insert(expires, key); + break; + } + } + + if removed > 0 { + self.disk_parity_delta.fetch_add(removed, Ordering::Relaxed); + } + } + + pub async fn get(&self, key: &Vec) -> Option, Vec>> { + // prune, save if necessary + self.prune().await; + self.save_if_necessary().await; + + // get + self.cache.get(key) + } + + pub async fn insert(&self, key: Vec, value: Vec) { + // insert into cache and expiries + self.cache.insert(key.clone(), value); + self.expiries + .write() + .await + .insert(SystemTime::now() + self.config.expiry, key); + + // save if reached max disk parity delta + self.disk_parity_delta.fetch_add(1, Ordering::Relaxed); + self.save_if_necessary().await; + } + + async fn save_if_necessary(&self) { + let cur_disk_parity_delta = self.disk_parity_delta.load(Ordering::Relaxed); + if cur_disk_parity_delta >= self.config.max_disk_parity_delta { + if let Err(err) = self.save().await { + tracing::error!("failed to save cache to file: {}", err); + }; + } + } +} + +#[cfg(test)] +mod test { + + use super::*; + use async_compatibility_layer::art::async_sleep; + use libp2p_identity::PeerId; + use tracing::instrument; + + /// cache eviction test + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_dht_cache_eviction() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + // cache with 1s eviction + let cache = Cache::new(Config { + filename: None, + expiry: Duration::from_secs(1), + max_disk_parity_delta: 4, + }) + .await; + + let (key, value) = (PeerId::random(), PeerId::random()); + + // insert + cache.insert(key.to_bytes(), value.to_bytes()).await; + + // check that it is in the cache and expiries + assert_eq!( + cache.get(&key.to_bytes()).await.unwrap().value(), + &value.to_bytes() + ); + assert_eq!(cache.expiries.read().await.len(), 1); + + // sleep for 1s + async_sleep(Duration::from_secs(1)).await; + + // check that now is evicted + assert!(cache.get(&key.to_bytes()).await.is_none()); + + // check that the cache and expiries are empty + assert!(cache.expiries.read().await.is_empty()); + assert!(cache.cache.is_empty()); + } + + /// cache add test + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_dht_cache_save_load() { + let _ = std::fs::remove_file("test.cache"); + + let cache = Cache::new(Config { + filename: Some("test.cache".to_string()), + expiry: Duration::from_secs(600), + max_disk_parity_delta: 4, + }) + .await; + + // add 10 key-value pairs to the cache + for i in 0u8..10u8 { + let (key, value) = (vec![i; 1], vec![i + 1; 1]); + cache.insert(key, value).await; + } + + // save the cache + cache.save().await.unwrap(); + + // load the cache + let cache = Cache::new(Config { + filename: Some("test.cache".to_string()), + expiry: Duration::from_secs(600), + max_disk_parity_delta: 4, + }) + .await; + + // check that the cache has the 10 key-value pairs + for i in 0u8..10u8 { + let (key, value) = (vec![i; 1], vec![i + 1; 1]); + assert_eq!(cache.get(&key).await.unwrap().value(), &value); + } + + // delete the cache file + let _ = std::fs::remove_file("test.cache"); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_dht_disk_parity() { + let _ = std::fs::remove_file("test.cache"); + + let cache = Cache::new(Config { + // tests run sequentially, shouldn't matter + filename: Some("test.cache".to_string()), + expiry: Duration::from_secs(600), + max_disk_parity_delta: 4, + }) + .await; + + // insert into cache + for i in 0..3 { + cache.insert(vec![i; 1], vec![i + 1; 1]).await; + } + + // check that file is not saved + assert!(!std::path::Path::new("test.cache").exists()); + + // insert into cache + cache.insert(vec![0; 1], vec![1; 1]).await; + + // check that file is saved + assert!(std::path::Path::new("test.cache").exists()); + + // delete the cache file + _ = std::fs::remove_file("test.cache"); + } +} diff --git a/libp2p-networking/src/network/behaviours/dht.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs similarity index 94% rename from libp2p-networking/src/network/behaviours/dht.rs rename to libp2p-networking/src/network/behaviours/dht/mod.rs index 46adb05d0c..7086b6dab1 100644 --- a/libp2p-networking/src/network/behaviours/dht.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -5,6 +5,9 @@ use std::{ time::Duration, }; +mod cache; + +use async_compatibility_layer::art::async_block_on; use futures::channel::oneshot::Sender; use libp2p::{ kad::{ @@ -21,6 +24,8 @@ use tracing::{error, info, warn}; pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; const MAX_DHT_QUERY_SIZE: usize = 5; +use self::cache::Cache; + use super::exponential_backoff::ExponentialBackoff; /// Behaviour wrapping libp2p's kademlia @@ -56,6 +61,8 @@ pub struct DHTBehaviour { pub peer_id: PeerId, /// replication factor pub replication_factor: NonZeroUsize, + /// kademlia cache + cache: Cache, } /// State of bootstrapping @@ -106,10 +113,11 @@ impl DHTBehaviour { /// Create a new DHT behaviour #[must_use] - pub fn new( + pub async fn new( mut kadem: Kademlia, pid: PeerId, replication_factor: NonZeroUsize, + cache_location: Option, ) -> Self { // needed because otherwise we stay in client mode when testing locally // and don't publish keys stuff @@ -138,6 +146,13 @@ impl DHTBehaviour { }, in_progress_get_closest_peers: HashMap::default(), replication_factor, + cache: Cache::new( + cache::ConfigBuilder::default() + .filename(cache_location) + .build() + .unwrap_or_default(), + ) + .await, } } @@ -223,17 +238,26 @@ impl DHTBehaviour { return; } - let qid = self.kadem.get_record(key.clone().into()); - let query = KadGetQuery { - backoff, - progress: DHTProgress::InProgress(qid), - notify: chan, - num_replicas: factor, - key, - retry_count: retry_count - 1, - records: HashMap::default(), - }; - self.in_progress_get_record_queries.insert(qid, query); + // check cache before making the request + if let Some(entry) = async_block_on(self.cache.get(&key)) { + // exists in cache + if chan.send(entry.value().clone()).is_err() { + warn!("Get DHT: channel closed before get record request result could be sent"); + } + } else { + // doesn't exist in cache, actually propagate request + let qid = self.kadem.get_record(key.clone().into()); + let query = KadGetQuery { + backoff, + progress: DHTProgress::InProgress(qid), + notify: chan, + num_replicas: factor, + key, + retry_count: retry_count - 1, + records: HashMap::default(), + }; + self.in_progress_get_record_queries.insert(qid, query); + } } /// update state based on recv-ed get query @@ -279,6 +303,10 @@ impl DHTBehaviour { .into_iter() .find(|(_, v)| *v >= NUM_REPLICATED_TO_TRUST) { + // insert into cache + async_block_on(self.cache.insert(key, r.clone())); + + // return value if notify.send(r).is_err() { warn!("Get DHT: channel closed before get record request result could be sent"); } diff --git a/libp2p-networking/src/network/behaviours/gossip.rs b/libp2p-networking/src/network/behaviours/gossip.rs index 1119ad27f3..bfc946c9b5 100644 --- a/libp2p-networking/src/network/behaviours/gossip.rs +++ b/libp2p-networking/src/network/behaviours/gossip.rs @@ -4,13 +4,13 @@ use std::{ }; use libp2p::{ - gossipsub::{Behaviour, Event, IdentTopic, TopicHash}, + gossipsub::{Behaviour, Event, IdentTopic, PublishError::Duplicate, TopicHash}, swarm::{NetworkBehaviour, PollParameters, THandlerInEvent, THandlerOutEvent, ToSwarm}, Multiaddr, }; use libp2p_identity::PeerId; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use super::exponential_backoff::ExponentialBackoff; @@ -214,8 +214,12 @@ impl GossipBehaviour { /// Publish a given gossip pub fn publish_gossip(&mut self, topic: IdentTopic, contents: Vec) { let res = self.gossipsub.publish(topic.clone(), contents.clone()); - if res.is_err() { - error!("error publishing gossip message {:?}", res); + if let Err(e) = res { + if matches!(e, Duplicate) { + debug!("duplicate gossip message"); + } else { + error!("error publishing gossip message {:?}", e); + } self.in_progress_gossip.push_back((topic, contents)); } } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 37c9683428..835058bf84 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -31,6 +31,7 @@ use async_compatibility_layer::{ }; use either::Either; use futures::{select, FutureExt, StreamExt}; +use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; use libp2p::{ core::{muxing::StreamMuxerBox, transport::Boxed}, gossipsub::{ @@ -239,7 +240,7 @@ impl NetworkNode { // 8 hours by default let record_republication_interval = config .republication_interval - .unwrap_or(Duration::from_secs(28800)); + .unwrap_or(Duration::from_secs(KAD_DEFAULT_REPUB_INTERVAL_SEC)); let ttl = Some(config.ttl.unwrap_or(16 * record_republication_interval)); kconfig .set_parallelism(NonZeroUsize::new(1).unwrap()) @@ -268,7 +269,9 @@ impl NetworkNode { config .replication_factor .unwrap_or_else(|| NonZeroUsize::new(4).unwrap()), - ), + config.dht_cache_location.clone(), + ) + .await, identify, DMBehaviour::new(request_response), ); diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 4d19b6f516..d97097e8e1 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -24,6 +24,10 @@ pub struct NetworkNodeConfig { #[builder(setter(into, strip_option), default = "DEFAULT_REPLICATION_FACTOR")] pub replication_factor: Option, + /// location of the dht cache, default is None + #[builder(default = "None")] + pub dht_cache_location: Option, + #[builder(default)] /// parameters for gossipsub mesh network pub mesh_params: Option, diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 3a7236c353..c4e6460666 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -112,7 +112,6 @@ impl NetworkNodeHandle { let kill_switch = Mutex::new(Some(kill_switch)); let recv_kill = Mutex::new(Some(recv_kill)); - Ok(NetworkNodeHandle { network_config: config, state: std::sync::Arc::default(), @@ -283,6 +282,23 @@ impl NetworkNodeHandle { r.await.map_err(|_| NetworkNodeHandleError::RecvError) } + /// Looks up a node's `PeerId` and attempts to validate routing + /// # Errors + /// if the peer was unable to be looked up (did not provide a response, DNE) + pub async fn lookup_node Deserialize<'a> + Serialize>( + &self, + key: V, + dht_timeout: Duration, + ) -> Result { + // get record (from DHT) + let pid = self.get_record_timeout::(&key, dht_timeout).await?; + + // pid lookup for routing + self.lookup_pid(pid).await?; + + Ok(pid) + } + /// Insert a record into the kademlia DHT /// # Errors /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index ae3277b29f..eefbdcf37b 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -12,7 +12,7 @@ use libp2p_networking::network::{ use serde::{Deserialize, Serialize}; use snafu::ResultExt; use std::{fmt::Debug, sync::Arc, time::Duration}; -use tracing::{error, info, instrument, warn}; +use tracing::{debug, error, info, instrument, warn}; #[cfg(async_executor_impl = "async-std")] use async_std::prelude::StreamExt; @@ -304,7 +304,7 @@ async fn run_dht_rounds( ) { let mut rng = rand::thread_rng(); for i in 0..num_rounds { - error!("round: {:?}", i); + debug!("begin round {}", i); let msg_handle = get_random_handle(handles, &mut rng); let mut key = vec![0; DHT_KV_PADDING]; key.push((starting_val + i) as u8); diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 3febbbce65..4a1cfad69c 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -16,14 +16,14 @@ blake3 = { workspace = true, features = ["traits-preview"] } hotshot-types = { version = "0.1.0", path = "../types", default-features = false } hotshot-utils = { path = "../utils" } libp2p-networking = { workspace = true } -nll = { workspace = true } tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.4.1" } surf-disco = { workspace = true } tracing = { workspace = true } serde = { workspace = true } serde_json = "1.0.96" snafu = { workspace = true } -toml = "0.5.9" # TODO GG upgrade to toml = { workspace = true } +# TODO upgrade to toml = { workspace = true } https://github.com/EspressoSystems/HotShot/issues/1698 +toml = "0.5.9" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/orchestrator/default-libp2p-run-config.toml b/orchestrator/default-libp2p-run-config.toml index 5d43c940fa..a353ed06f5 100644 --- a/orchestrator/default-libp2p-run-config.toml +++ b/orchestrator/default-libp2p-run-config.toml @@ -1,5 +1,5 @@ -rounds = 100 -transactions_per_round = 10 +rounds = 10 +transactions_per_round = 12 node_index = 0 seed = [ 0, @@ -39,7 +39,6 @@ padding = 10 start_delay_seconds = 60 [libp2p_config] -num_bootstrap_nodes = 5 index_ports = true bootstrap_mesh_n_high = 4 bootstrap_mesh_n_low = 4 @@ -49,18 +48,15 @@ mesh_n_high = 4 mesh_n_low = 4 mesh_outbound_min = 2 mesh_n = 4 -next_view_timeout = 10 -propose_min_round_time = 0 -propose_max_round_time = 10 online_time = 10 -num_txn_per_round = 10 base_port = 9000 [config] -total_nodes = 5 +total_nodes = 10 +committee_nodes = 5 max_transactions = 100 min_transactions = 0 -next_view_timeout = 10000 +next_view_timeout = 30000 timeout_ratio = [ 11, 10, @@ -73,6 +69,8 @@ num_bootstrap = 5 secs = 0 nanos = 0 +# TODO (Keyao) Clean up configuration parameters. +# [config.propose_max_round_time] -secs = 1 +secs = 2 nanos = 0 diff --git a/orchestrator/default-run-config.toml b/orchestrator/default-run-config.toml index fe34d75811..ee8333f80a 100644 --- a/orchestrator/default-run-config.toml +++ b/orchestrator/default-run-config.toml @@ -55,8 +55,10 @@ num_bootstrap = 4 secs = 0 nanos = 0 +# TODO (Keyao) Clean up configuration parameters. +# [config.propose_max_round_time] -secs = 1 +secs = 2 nanos = 0 [web_server_config] diff --git a/orchestrator/default-web-server-run-config.toml b/orchestrator/default-web-server-run-config.toml index 0ea0f86ccf..c5d0bd0253 100644 --- a/orchestrator/default-web-server-run-config.toml +++ b/orchestrator/default-web-server-run-config.toml @@ -56,8 +56,10 @@ num_bootstrap = 4 secs = 0 nanos = 0 +# TODO (Keyao) Clean up configuration parameters. +# [config.propose_max_round_time] -secs = 1 +secs = 2 nanos = 0 [web_server_config] diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 061ab256d5..f61911d944 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,4 +1,5 @@ use hotshot_types::{ExecutionType, HotShotConfig}; +use std::marker::PhantomData; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, num::NonZeroUsize, @@ -7,7 +8,7 @@ use std::{ #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { pub bootstrap_nodes: Vec<(SocketAddr, Vec)>, - pub num_bootstrap_nodes: u64, + pub num_bootstrap_nodes: usize, pub public_ip: IpAddr, pub base_port: u16, pub node_index: u64, @@ -21,15 +22,14 @@ pub struct Libp2pConfig { pub mesh_outbound_min: usize, pub mesh_n: usize, pub next_view_timeout: u64, - pub propose_min_round_time: u64, - pub propose_max_round_time: u64, + pub propose_min_round_time: Duration, + pub propose_max_round_time: Duration, pub online_time: u64, - pub num_txn_per_round: u64, + pub num_txn_per_round: usize, } #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfigFile { - pub num_bootstrap_nodes: u64, pub index_ports: bool, pub bootstrap_mesh_n_high: usize, pub bootstrap_mesh_n_low: usize, @@ -39,11 +39,7 @@ pub struct Libp2pConfigFile { pub mesh_n_low: usize, pub mesh_outbound_min: usize, pub mesh_n: usize, - pub next_view_timeout: u64, - pub propose_min_round_time: u64, - pub propose_max_round_time: u64, pub online_time: u64, - pub num_txn_per_round: u64, pub base_port: u16, } @@ -58,6 +54,10 @@ pub struct WebServerConfig { pub struct NetworkConfig { pub rounds: usize, pub transactions_per_round: usize, + pub num_bootrap: usize, + pub next_view_timeout: u64, + pub propose_min_round_time: Duration, + pub propose_max_round_time: Duration, pub node_index: u64, pub seed: [u8; 32], pub padding: usize, @@ -65,9 +65,10 @@ pub struct NetworkConfig { pub key_type_name: String, pub election_config_type_name: String, pub libp2p_config: Option, - pub config: HotShotConfig, + pub config: HotShotConfig, pub web_server_config: Option, pub da_web_server_config: Option, + _key_type_phantom: PhantomData, } impl Default for NetworkConfig { @@ -85,6 +86,11 @@ impl Default for NetworkConfig { election_config_type_name: std::any::type_name::().to_string(), web_server_config: None, da_web_server_config: None, + _key_type_phantom: PhantomData, + next_view_timeout: 10, + num_bootrap: 5, + propose_min_round_time: Duration::from_secs(0), + propose_max_round_time: Duration::from_secs(10), } } } @@ -123,10 +129,14 @@ impl From for NetworkConfig { rounds: val.rounds, transactions_per_round: val.transactions_per_round, node_index: 0, + num_bootrap: val.config.num_bootstrap, + next_view_timeout: val.config.next_view_timeout, + propose_max_round_time: val.config.propose_max_round_time, + propose_min_round_time: val.config.propose_min_round_time, seed: val.seed, padding: val.padding, libp2p_config: val.libp2p_config.map(|libp2p_config| Libp2pConfig { - num_bootstrap_nodes: libp2p_config.num_bootstrap_nodes, + num_bootstrap_nodes: val.config.num_bootstrap, index_ports: libp2p_config.index_ports, bootstrap_nodes: Vec::new(), public_ip: IpAddr::V4(Ipv4Addr::UNSPECIFIED), @@ -140,11 +150,11 @@ impl From for NetworkConfig { mesh_n_low: libp2p_config.mesh_n_low, mesh_outbound_min: libp2p_config.mesh_outbound_min, mesh_n: libp2p_config.mesh_n, - next_view_timeout: libp2p_config.next_view_timeout, - propose_min_round_time: libp2p_config.propose_min_round_time, - propose_max_round_time: libp2p_config.propose_max_round_time, + next_view_timeout: val.config.next_view_timeout, + propose_min_round_time: val.config.propose_min_round_time, + propose_max_round_time: val.config.propose_max_round_time, online_time: libp2p_config.online_time, - num_txn_per_round: libp2p_config.num_txn_per_round, + num_txn_per_round: val.transactions_per_round, }), config: val.config.into(), key_type_name: std::any::type_name::().to_string(), @@ -152,6 +162,7 @@ impl From for NetworkConfig { start_delay_seconds: val.start_delay_seconds, web_server_config: val.web_server_config, da_web_server_config: val.da_web_server_config, + _key_type_phantom: PhantomData, } } } @@ -183,14 +194,13 @@ pub struct HotShotConfigFile { pub propose_max_round_time: Duration, } -impl From for HotShotConfig { +impl From for HotShotConfig { fn from(val: HotShotConfigFile) -> Self { HotShotConfig { execution_type: ExecutionType::Continuous, total_nodes: val.total_nodes, max_transactions: val.max_transactions, min_transactions: val.min_transactions, - known_nodes: Vec::new(), known_nodes_with_stake: Vec::new(), da_committee_size: val.committee_nodes, next_view_timeout: val.next_view_timeout, @@ -220,7 +230,7 @@ fn default_config() -> HotShotConfigFile { total_nodes: NonZeroUsize::new(10).unwrap(), committee_nodes: 5, max_transactions: NonZeroUsize::new(100).unwrap(), - min_transactions: 0, + min_transactions: 1, next_view_timeout: 10000, timeout_ratio: (11, 10), round_start_delay: 1, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 4391fc43e6..d3faa7d410 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -26,7 +26,6 @@ use libp2p::identity::{ Keypair, }; -/// yeesh maybe we should just implement SignatureKey for this... pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { let mut hasher = blake3::Hasher::new(); hasher.update(&seed); @@ -117,9 +116,7 @@ where if self.config.libp2p_config.clone().is_some() { let libp2p_config_clone = self.config.libp2p_config.clone().unwrap(); // Designate node as bootstrap node and store its identity information - if libp2p_config_clone.bootstrap_nodes.len() - < libp2p_config_clone.num_bootstrap_nodes.try_into().unwrap() - { + if libp2p_config_clone.bootstrap_nodes.len() < libp2p_config_clone.num_bootstrap_nodes { let port_index = match libp2p_config_clone.index_ports { true => node_index, false => 0, @@ -146,9 +143,7 @@ where ) -> Result, ServerError> { if self.config.libp2p_config.is_some() { let libp2p_config = self.config.clone().libp2p_config.unwrap(); - if libp2p_config.bootstrap_nodes.len() - < libp2p_config.num_bootstrap_nodes.try_into().unwrap() - { + if libp2p_config.bootstrap_nodes.len() < libp2p_config.num_bootstrap_nodes { return Err(ServerError { status: tide_disco::StatusCode::BadRequest, message: "Not enough bootstrap nodes have registered".to_string(), @@ -174,7 +169,15 @@ where fn post_ready(&mut self) -> Result<(), ServerError> { self.nodes_connected += 1; println!("Nodes connected: {}", self.nodes_connected); - if self.nodes_connected >= self.config.config.known_nodes.len().try_into().unwrap() { + if self.nodes_connected + >= self + .config + .config + .known_nodes_with_stake + .len() + .try_into() + .unwrap() + { self.start = true; } Ok(()) diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 71b7e467d8..08c900b53b 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -10,14 +10,13 @@ async-compatibility-layer = { workspace = true } async-trait = { workspace = true } either = { workspace = true } futures = { workspace = true } -nll = { workspace = true } serde = { workspace = true } snafu = { workspace = true } async-lock = { workspace = true } tracing = { workspace = true } atomic_enum = "0.2.0" pin-project = "1.1.3" -async-stream = "0.3.5" +hotshot-constants = { path = "../constants", default-features = false } hotshot-types = { path = "../types", default-features = false } hotshot-task = { path = "../task", default-features = false } time = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 12d7313075..11a98de85c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,23 +1,21 @@ use crate::events::SequencingHotShotEvent; -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - async_primitives::subscribable_rwlock::ReadView, -}; +use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use bincode::Options; use bitvec::prelude::*; -use commit::Committable; +use commit::{Commitment, Committable}; use core::time::Duration; use either::{Either, Left, Right}; use futures::FutureExt; +use hotshot_constants::LOOK_AHEAD; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::vote::QuorumVoteAccumulator; use hotshot_types::{ certificate::{DACertificate, QuorumCertificate}, consensus::{Consensus, View}, @@ -31,12 +29,12 @@ use hotshot_types::{ node_implementation::{CommitteeEx, NodeImplementation, NodeType, SequencingQuorumEx}, signature_key::SignatureKey, state::ConsensusTime, - Block, + BlockPayload, }, utils::{Terminator, ViewInner}, - vote::{QuorumVote, VoteAccumulator, VoteType}, + vote::{QuorumVote, VoteType}, }; -use hotshot_utils::bincode::bincode_opts; + use snafu::Snafu; use std::{ collections::{HashMap, HashSet}, @@ -66,14 +64,14 @@ pub struct SequencingConsensusTaskState< TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { /// The global task registry @@ -128,7 +126,7 @@ pub struct SequencingConsensusTaskState< pub id: u64, /// The most Recent QC we've formed from votes, if we've formed it. - pub qc: Option>, + pub qc: Option>>, } /// State for the vote collection task. This handles the building of a QC from a votes received @@ -140,16 +138,23 @@ pub struct VoteCollectionTaskState< TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, { /// the quorum exchange pub quorum_exchange: Arc>, #[allow(clippy::type_complexity)] /// Accumulator for votes - pub accumulator: - Either, QuorumCertificate>, + pub accumulator: Either< + >> as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Commitment>, + >>::VoteAccumulator, + QuorumCertificate>>, + >, /// View which this vote collection task is collecting votes in pub cur_view: TYPES::Time, /// The event stream shared by all tasks @@ -165,8 +170,8 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, { } @@ -185,37 +190,32 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, { - // TODO ED Emit a view change event upon new proposal? match event { - SequencingHotShotEvent::QuorumVoteRecv(vote) => match vote { - QuorumVote::Yes(vote) => { + SequencingHotShotEvent::QuorumVoteRecv(vote) => match vote.clone() { + QuorumVote::Yes(vote_internal) => { // For the case where we receive votes after we've made a certificate if state.accumulator.is_right() { return (None, state); } - if vote.current_view != state.cur_view { + if vote_internal.current_view != state.cur_view { error!( "Vote view does not match! vote view is {} current view is {}", - *vote.current_view, *state.cur_view + *vote_internal.current_view, *state.cur_view ); return (None, state); } let accumulator = state.accumulator.left().unwrap(); - match state.quorum_exchange.accumulate_vote( - &vote.signature.0, - &vote.signature.1, - vote.leaf_commitment, - vote.vote_data, - vote.vote_token.clone(), - state.cur_view, + + match state.quorum_exchange.accumulate_vote_2( accumulator, - None, + &vote, + &vote_internal.leaf_commitment, ) { Either::Left(acc) => { state.accumulator = Either::Left(acc); @@ -272,14 +272,14 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus genesis leaf", level = "error")] @@ -367,10 +367,7 @@ where ); if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.current_view() - ); + debug!("Sending vote to next quorum leader {:?}", vote.get_view()); self.event_stream .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) .await; @@ -449,12 +446,8 @@ where }; - // TODO ED Only publish event in vote if able if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.current_view() - ); + debug!("Sending vote to next quorum leader {:?}", vote.get_view()); self.event_stream .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) .await; @@ -495,6 +488,25 @@ where self.cur_view = new_view; self.current_proposal = None; + if new_view == TYPES::Time::new(1) { + self.quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) + .await; + } + + // Poll the future leader for lookahead + let lookahead_view = new_view + LOOK_AHEAD; + if !self.quorum_exchange.is_leader(lookahead_view) { + self.quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollFutureLeader( + *lookahead_view, + self.quorum_exchange.get_leader(lookahead_view), + )) + .await; + } + // Start polling for proposals for the new view self.quorum_exchange .network() @@ -550,7 +562,7 @@ where let view = proposal.data.get_view_number(); if view < self.cur_view { - error!("view too high {:?}", proposal.data.clone()); + debug!("Proposal is from an older view {:?}", proposal.data.clone()); return; } @@ -855,46 +867,10 @@ where } #[allow(clippy::cast_precision_loss)] if new_decide_reached { - let mut included_txn_size = 0; - let mut included_txn_count = 0; - let txns = consensus.transactions.cloned().await; - // store transactions in this block we never added to our transactions. - let _ = included_txns_set.iter().map(|hash| { - if !txns.contains_key(hash) { - consensus.seen_transactions.insert(*hash); - } - }); - drop(txns); - consensus - .transactions - .modify(|txns| { - *txns = txns - .drain() - .filter(|(txn_hash, txn)| { - if included_txns_set.contains(txn_hash) { - included_txn_count += 1; - included_txn_size += bincode_opts() - .serialized_size(txn) - .unwrap_or_default(); - false - } else { - true - } - }) - .collect(); - }) - .await; - - consensus - .metrics - .outstanding_transactions - .update(-included_txn_count); - consensus - .metrics - .outstanding_transactions_memory_size - .update(-(i64::try_from(included_txn_size).unwrap_or(i64::MAX))); - debug!("about to publish decide"); + self.event_stream + .publish(SequencingHotShotEvent::LeafDecided(leaf_views.clone())) + .await; let decide_sent = self.output_event_stream.publish(Event { view_number: consensus.last_decided_view, event: EventType::Decide { @@ -955,26 +931,26 @@ where } } SequencingHotShotEvent::QuorumVoteRecv(vote) => { - debug!("Received quroum vote: {:?}", vote.current_view()); + debug!("Received quroum vote: {:?}", vote.get_view()); - if !self.quorum_exchange.is_leader(vote.current_view() + 1) { + if !self.quorum_exchange.is_leader(vote.get_view() + 1) { error!( "We are not the leader for view {} are we the leader for view + 1? {}", - *vote.current_view() + 1, - self.quorum_exchange.is_leader(vote.current_view() + 2) + *vote.get_view() + 1, + self.quorum_exchange.is_leader(vote.get_view() + 2) ); return; } - match vote { - QuorumVote::Yes(vote) => { + match vote.clone() { + QuorumVote::Yes(vote_internal) => { let handle_event = HandleEvent(Arc::new(move |event, state| { async move { vote_handle(state, event).await }.boxed() })); let collection_view = if let Some((collection_view, collection_task, _)) = &self.vote_collector { - if vote.current_view > *collection_view { + if vote_internal.current_view > *collection_view { // ED I think we'd want to let that task timeout to avoid a griefing vector self.registry.shutdown_task(*collection_task).await; } @@ -983,37 +959,31 @@ where TYPES::Time::new(0) }; - let acc = VoteAccumulator { + // Todo check if we are the leader + let new_accumulator = QuorumVoteAccumulator { total_vote_outcomes: HashMap::new(), - da_vote_outcomes: HashMap::new(), yes_vote_outcomes: HashMap::new(), no_vote_outcomes: HashMap::new(), - viewsync_precommit_vote_outcomes: HashMap::new(), - viewsync_commit_vote_outcomes: HashMap::new(), - viewsync_finalize_vote_outcomes: HashMap::new(), + success_threshold: self.quorum_exchange.success_threshold(), failure_threshold: self.quorum_exchange.failure_threshold(), + sig_lists: Vec::new(), signers: bitvec![0; self.quorum_exchange.total_nodes()], + phantom: PhantomData, }; - // Todo check if we are the leader - let accumulator = self.quorum_exchange.accumulate_vote( - &vote.clone().signature.0, - &vote.clone().signature.1, - vote.clone().leaf_commitment, - vote.clone().vote_data.clone(), - vote.clone().vote_token.clone(), - vote.clone().current_view, - acc, - None, + let accumulator = self.quorum_exchange.accumulate_vote_2( + new_accumulator, + &vote, + &vote_internal.clone().leaf_commitment, ); - if vote.current_view > collection_view { + if vote_internal.current_view > collection_view { let state = VoteCollectionTaskState { quorum_exchange: self.quorum_exchange.clone(), accumulator, - cur_view: vote.current_view, + cur_view: vote_internal.current_view, event_stream: self.event_stream.clone(), id: self.id, }; @@ -1033,17 +1003,22 @@ where let id = builder.get_task_id().unwrap(); let stream_id = builder.get_stream_id().unwrap(); - self.vote_collector = Some((vote.current_view, id, stream_id)); + self.vote_collector = Some((vote_internal.current_view, id, stream_id)); let _task = async_spawn(async move { VoteCollectionTypes::build(builder).launch().await; }); - debug!("Starting vote handle for view {:?}", vote.current_view); + debug!( + "Starting vote handle for view {:?}", + vote_internal.current_view + ); } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream .direct_message( stream_id, - SequencingHotShotEvent::QuorumVoteRecv(QuorumVote::Yes(vote)), + SequencingHotShotEvent::QuorumVoteRecv(QuorumVote::Yes( + vote_internal, + )), ) .await; } @@ -1102,7 +1077,17 @@ where self.update_view(view + 1).await; } } + SequencingHotShotEvent::VidCertRecv(cert) => { + debug!("VID cert received for view ! {}", *cert.view_number); + let view = cert.view_number; + self.certs.insert(view, cert); // TODO new cert type for VID https://github.com/EspressoSystems/HotShot/issues/1701 + + // TODO Make sure we aren't voting for an arbitrarily old round for no reason + if self.vote_if_able().await { + self.update_view(view + 1).await; + } + } SequencingHotShotEvent::ViewChange(new_view) => { debug!("View Change event for view {}", *new_view); @@ -1167,10 +1152,9 @@ where /// Sends a proposal if possible from the high qc we have pub async fn publish_proposal_if_able( &mut self, - _qc: QuorumCertificate, + _qc: QuorumCertificate>, view: TYPES::Time, ) -> bool { - // TODO ED This should not be qc view number + 1 if !self.quorum_exchange.is_leader(view) { error!( "Somehow we formed a QC but are not the leader for the next view {:?}", @@ -1231,11 +1215,9 @@ where // TODO do some sort of sanity check on the view number that it matches decided } + // let block_commitment = Some(self.block.commit()); if let Some(block) = &self.block { let block_commitment = block.commit(); - if block_commitment == TYPES::BlockType::new().commit() { - debug!("Block is generic block! {:?}", self.cur_view); - } let leaf = SequencingLeaf { view_number: view, @@ -1298,14 +1280,14 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { } @@ -1347,14 +1329,14 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>, - Commitment = SequencingLeaf, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { if let SequencingHotShotEvent::Shutdown = event { @@ -1375,6 +1357,7 @@ pub fn consensus_event_filter>( | SequencingHotShotEvent::QuorumVoteRecv(_) | SequencingHotShotEvent::QCFormed(_) | SequencingHotShotEvent::DACRecv(_) + | SequencingHotShotEvent::VidCertRecv(_) | SequencingHotShotEvent::ViewChange(_) | SequencingHotShotEvent::SendDABlockData(_) | SequencingHotShotEvent::Timeout(_) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index b6244c972a..1100f4e078 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -1,12 +1,9 @@ use crate::events::SequencingHotShotEvent; -use async_compatibility_layer::{ - art::{async_spawn, async_timeout}, - async_primitives::subscribable_rwlock::ReadView, -}; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -use bincode::config::Options; + use bitvec::prelude::*; -use commit::Committable; +use commit::{Commitment, Committable}; use either::{Either, Left, Right}; use futures::FutureExt; use hotshot_task::{ @@ -15,11 +12,14 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::traits::election::SignedCertificate; +use hotshot_types::vote::DAVoteAccumulator; +use hotshot_types::vote::VoteType; use hotshot_types::{ certificate::DACertificate, consensus::{Consensus, View}, data::{DAProposal, ProposalType, SequencingLeaf}, - message::{CommitteeConsensusMessage, Message, Proposal, SequencingMessage}, + message::{Message, Proposal, SequencingMessage}, traits::{ consensus_api::SequencingConsensusApi, election::{CommitteeExchangeType, ConsensusExchange, Membership}, @@ -27,18 +27,14 @@ use hotshot_types::{ node_implementation::{CommitteeEx, NodeImplementation, NodeType}, signature_key::SignatureKey, state::ConsensusTime, - Block, State, + BlockPayload, }, utils::ViewInner, - vote::VoteAccumulator, }; -use hotshot_utils::bincode::bincode_opts; + use snafu::Snafu; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - time::Instant, -}; +use std::marker::PhantomData; +use std::{collections::HashMap, sync::Arc}; use tracing::{debug, error, instrument, warn}; #[derive(Snafu, Debug)] @@ -59,7 +55,7 @@ pub struct DATaskState< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { /// The state's api @@ -70,7 +66,6 @@ pub struct DATaskState< /// View number this view is executing in. pub cur_view: TYPES::Time, - // pub transactions: Arc>>, /// Reference to consensus. Leader will require a read lock on this. pub consensus: Arc>>>, @@ -96,15 +91,22 @@ pub struct DAVoteCollectionTaskState< TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { /// the committee exchange pub committee_exchange: Arc>, - /// the vote accumulator - pub accumulator: - Either, DACertificate>, - // TODO ED Make this just "view" since it is only for this task + #[allow(clippy::type_complexity)] + /// Accumulates DA votes + pub accumulator: Either< + as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Commitment, + >>::VoteAccumulator, + DACertificate, + >, /// the current view pub cur_view: TYPES::Time, /// event stream for channel events @@ -120,7 +122,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { } @@ -138,7 +140,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { match event { @@ -153,21 +155,16 @@ where } let accumulator = state.accumulator.left().unwrap(); - match state.committee_exchange.accumulate_vote( - &vote.signature.0, - &vote.signature.1, - vote.block_commitment, - vote.vote_data, - vote.vote_token.clone(), - state.cur_view, + + match state.committee_exchange.accumulate_vote_2( accumulator, - None, + &vote, + &vote.block_commitment, ) { - Left(acc) => { - state.accumulator = Either::Left(acc); - // debug!("Not enough DA votes! "); - return (None, state); + Left(new_accumulator) => { + state.accumulator = either::Left(new_accumulator); } + Right(dac) => { debug!("Sending DAC! {:?}", dac.view_number); state @@ -192,8 +189,51 @@ where } } } + SequencingHotShotEvent::VidVoteRecv(vote) => { + // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 + + debug!("VID vote recv, collection task {:?}", vote.get_view()); + // panic!("Vote handle received DA vote for view {}", *vote.current_view); + + let accumulator = state.accumulator.left().unwrap(); + + match state.committee_exchange.accumulate_vote_2( + accumulator, + &vote, + &vote.block_commitment, + ) { + Left(new_accumulator) => { + state.accumulator = either::Left(new_accumulator); + } + + Right(vid_cert) => { + debug!("Sending VID cert! {:?}", vid_cert.view_number); + state + .event_stream + .publish(SequencingHotShotEvent::VidCertSend( + vid_cert.clone(), + state.committee_exchange.public_key().clone(), + )) + .await; + + state.accumulator = Right(vid_cert.clone()); + state + .committee_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *vid_cert.view_number, + )) + .await; + + // Return completed at this point + return (Some(HotShotTaskCompleted::ShutDown), state); + } + } + } SequencingHotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), - _ => {} + _ => { + error!("unexpected event {:?}", event); + } } (None, state) } @@ -212,46 +252,16 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] - pub async fn handle_event( &mut self, event: SequencingHotShotEvent, ) -> Option { match event { - SequencingHotShotEvent::TransactionsRecv(transactions) => { - // TODO ED Add validation checks - - let mut consensus = self.consensus.write().await; - consensus - .get_transactions() - .modify(|txns| { - for transaction in transactions { - let size = bincode_opts().serialized_size(&transaction).unwrap_or(0); - - // If we didn't already know about this transaction, update our mempool metrics. - if !consensus.seen_transactions.remove(&transaction.commit()) - && txns.insert(transaction.commit(), transaction).is_none() - { - consensus.metrics.outstanding_transactions.update(1); - consensus - .metrics - .outstanding_transactions_memory_size - .update(i64::try_from(size).unwrap_or_else(|e| { - warn!("Conversion failed: {e}. Using the max value."); - i64::MAX - })); - } - } - }) - .await; - - return None; - } SequencingHotShotEvent::DAProposalRecv(proposal, sender) => { debug!( "DA proposal received for view: {:?}", @@ -298,7 +308,7 @@ where } Ok(Some(vote_token)) => { // Generate and send vote - let message = self.committee_exchange.create_da_message( + let vote = self.committee_exchange.create_da_message( block_commitment, view, vote_token, @@ -307,12 +317,10 @@ where // ED Don't think this is necessary? // self.cur_view = view; - if let CommitteeConsensusMessage::DAVote(vote) = message { - debug!("Sending vote to the DA leader {:?}", vote.current_view); - self.event_stream - .publish(SequencingHotShotEvent::DAVoteSend(vote)) - .await; - } + debug!("Sending vote to the DA leader {:?}", vote.current_view); + self.event_stream + .publish(SequencingHotShotEvent::DAVoteSend(vote)) + .await; let mut consensus = self.consensus.write().await; // Ensure this view is in the view map for garbage collection, but do not overwrite if @@ -356,32 +364,25 @@ where } else { TYPES::Time::new(0) }; - let acc = VoteAccumulator { - total_vote_outcomes: HashMap::new(), + + let new_accumulator = DAVoteAccumulator { da_vote_outcomes: HashMap::new(), - yes_vote_outcomes: HashMap::new(), - no_vote_outcomes: HashMap::new(), - viewsync_precommit_vote_outcomes: HashMap::new(), - viewsync_commit_vote_outcomes: HashMap::new(), - viewsync_finalize_vote_outcomes: HashMap::new(), success_threshold: self.committee_exchange.success_threshold(), - failure_threshold: self.committee_exchange.failure_threshold(), sig_lists: Vec::new(), signers: bitvec![0; self.committee_exchange.total_nodes()], + phantom: PhantomData, }; - let accumulator = self.committee_exchange.accumulate_vote( - &vote.clone().signature.0, - &vote.clone().signature.1, - vote.clone().block_commitment, - vote.clone().vote_data.clone(), - vote.clone().vote_token.clone(), - vote.clone().current_view, - acc, - None, + + let accumulator = self.committee_exchange.accumulate_vote_2( + new_accumulator, + &vote, + &vote.clone().block_commitment, ); + if view > collection_view { let state = DAVoteCollectionTaskState { committee_exchange: self.committee_exchange.clone(), + accumulator, cur_view: view, event_stream: self.event_stream.clone(), @@ -412,7 +413,163 @@ where .await; }; } - // TODO ED Update high QC through QCFormed event + SequencingHotShotEvent::VidVoteRecv(vote) => { + // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 + + // warn!( + // "VID vote recv, Main Task {:?}, key: {:?}", + // vote.current_view, + // self.committee_exchange.public_key() + // ); + // Check if we are the leader and the vote is from the sender. + let view = vote.current_view; + if !self.committee_exchange.is_leader(view) { + error!( + "We are not the VID leader for view {} are we leader for next view? {}", + *view, + self.committee_exchange.is_leader(view + 1) + ); + return None; + } + + let handle_event = HandleEvent(Arc::new(move |event, state| { + async move { vote_handle(state, event).await }.boxed() + })); + let collection_view = + if let Some((collection_view, collection_id, _)) = &self.vote_collector { + // TODO: Is this correct for consecutive leaders? + if view > *collection_view { + // warn!("shutting down for view {:?}", collection_view); + self.registry.shutdown_task(*collection_id).await; + } + *collection_view + } else { + TYPES::Time::new(0) + }; + + let new_accumulator = DAVoteAccumulator { + da_vote_outcomes: HashMap::new(), + success_threshold: self.committee_exchange.success_threshold(), + sig_lists: Vec::new(), + signers: bitvec![0; self.committee_exchange.total_nodes()], + phantom: PhantomData, + }; + + let accumulator = self.committee_exchange.accumulate_vote_2( + new_accumulator, + &vote, + &vote.clone().block_commitment, + ); + + if view > collection_view { + let state = DAVoteCollectionTaskState { + committee_exchange: self.committee_exchange.clone(), + + accumulator, + cur_view: view, + event_stream: self.event_stream.clone(), + id: self.id, + }; + let name = "VID Vote Collection"; + let filter = FilterEvent(Arc::new(|event| { + matches!(event, SequencingHotShotEvent::VidVoteRecv(_)) + })); + let builder = + TaskBuilder::>::new(name.to_string()) + .register_event_stream(self.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(state) + .register_event_handler(handle_event); + let id = builder.get_task_id().unwrap(); + let stream_id = builder.get_stream_id().unwrap(); + let _task = + async_spawn( + async move { DAVoteCollectionTypes::build(builder).launch().await }, + ); + self.vote_collector = Some((view, id, stream_id)); + } else if let Some((_, _, stream_id)) = self.vote_collector { + self.event_stream + .direct_message(stream_id, SequencingHotShotEvent::VidVoteRecv(vote)) + .await; + }; + } + SequencingHotShotEvent::VidDisperseRecv(disperse, sender) => { + // TODO copy-pasted from DAProposalRecv https://github.com/EspressoSystems/HotShot/issues/1690 + debug!( + "VID disperse received for view: {:?}", + disperse.data.get_view_number() + ); + + // ED NOTE: Assuming that the next view leader is the one who sends DA proposal for this view + let view = disperse.data.get_view_number(); + + // Allow a DA proposal that is one view older, in case we have voted on a quorum + // proposal and updated the view. + // `self.cur_view` should be at least 1 since there is a view change before getting + // the `DAProposalRecv` event. Otherewise, the view number subtraction below will + // cause an overflow error. + if view < self.cur_view - 1 { + warn!("Throwing away VID disperse data that is more than one view older"); + return None; + } + + debug!("VID disperse data is fresh."); + let block_commitment = disperse.data.commitment; + + // ED Is this the right leader? + let view_leader_key = self.committee_exchange.get_leader(view); + if view_leader_key != sender { + error!("VID proposal doesn't have expected leader key for view {} \n DA proposal is: [N/A for VID]", *view); + return None; + } + + if !view_leader_key.validate(&disperse.signature, block_commitment.as_ref()) { + error!("Could not verify VID proposal sig."); + return None; + } + + let vote_token = self.committee_exchange.make_vote_token(view); + match vote_token { + Err(e) => { + error!("Failed to generate vote token for {:?} {:?}", view, e); + } + Ok(None) => { + debug!("We were not chosen for VID quorum on {:?}", view); + } + Ok(Some(vote_token)) => { + // Generate and send vote + let vote = self.committee_exchange.create_vid_message( + block_commitment, + view, + vote_token, + ); + + // ED Don't think this is necessary? + // self.cur_view = view; + + debug!("Sending vote to the VID leader {:?}", vote.current_view); + self.event_stream + .publish(SequencingHotShotEvent::VidVoteSend(vote)) + .await; + let mut consensus = self.consensus.write().await; + + // Ensure this view is in the view map for garbage collection, but do not overwrite if + // there is already a view there: the replica task may have inserted a `Leaf` view which + // contains strictly more information. + consensus.state_map.entry(view).or_insert(View { + view_inner: ViewInner::DA { + block: block_commitment, + }, + }); + + // Record the block we have promised to make available. + // TODO https://github.com/EspressoSystems/HotShot/issues/1692 + // consensus.saved_blocks.insert(proposal.data.deltas); + } + } + } SequencingHotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { return None; @@ -425,7 +582,6 @@ where // Inject view info into network // ED I think it is possible that you receive a quorum proposal, vote on it and update your view before the da leader has sent their proposal, and therefore you skip polling for this view? - // TODO ED Only poll if you are on the committee let is_da = self .committee_exchange .membership() @@ -451,8 +607,6 @@ where .await; } - // TODO ED Make this a new task so it doesn't block main DA task - // If we are not the next leader (DA leader for this view) immediately exit if !self.committee_exchange.is_leader(self.cur_view + 1) { // panic!("We are not the DA leader for view {}", *self.cur_view + 1); @@ -466,64 +620,19 @@ where .inject_consensus_info(ConsensusIntentEvent::PollForVotes(*self.cur_view + 1)) .await; - // ED Copy of parent_leaf() function from sequencing leader - - let consensus = self.consensus.read().await; - let parent_view_number = &consensus.high_qc.view_number; - - let Some(parent_view) = consensus.state_map.get(parent_view_number) else { - error!( - "Couldn't find high QC parent in state map. Parent view {:?}", - parent_view_number - ); - return None; - }; - let Some(leaf) = parent_view.get_leaf_commitment() else { - error!( - ?parent_view_number, - ?parent_view, - "Parent of high QC points to a view without a proposal" - ); - return None; - }; - let Some(leaf) = consensus.saved_leaves.get(&leaf) else { - error!("Failed to find high QC parent."); - return None; - }; - let parent_leaf = leaf.clone(); - - // Prepare the DA Proposal - // let Some(parent_leaf) = self.parent_leaf().await else { - // warn!("Couldn't find high QC parent in state map."); - // return None; - // }; - - drop(consensus); - - // ED This is taking a really long time to return, since is based on application - // - let mut block = ::StateType::next_block(None); - let txns = self.wait_for_transactions(parent_leaf).await?; - + return None; + } + SequencingHotShotEvent::BlockReady(block, view) => { self.committee_exchange .network() - .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions( - *self.cur_view + 1, - )) + .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) .await; - for txn in txns { - if let Ok(new_block) = block.add_transaction_raw(&txn) { - block = new_block; - continue; - } - } - let signature = self.committee_exchange.sign_da_proposal(&block.commit()); let data: DAProposal = DAProposal { deltas: block.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? - view_number: self.cur_view + 1, + view_number: view, }; debug!("Sending DA proposal for view {:?}", data.view_number); @@ -537,20 +646,13 @@ where self.event_stream .publish(SequencingHotShotEvent::SendDABlockData(block.clone())) .await; - // if let Err(e) = self.api.send_da_broadcast(message.clone()).await { - // consensus.metrics.failed_to_send_messages.add(1); - // warn!(?message, ?e, "Could not broadcast leader proposal"); - // } else { - // consensus.metrics.outgoing_broadcast_messages.add(1); - // } + self.event_stream .publish(SequencingHotShotEvent::DAProposalSend( - message, + message.clone(), self.committee_exchange.public_key().clone(), )) .await; - - return None; } SequencingHotShotEvent::Timeout(view) => { @@ -563,73 +665,11 @@ where SequencingHotShotEvent::Shutdown => { return Some(HotShotTaskCompleted::ShutDown); } - _ => {} - } - None - } - - /// return None if we can't get transactions - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Vote Collection Task", level = "error")] - - async fn wait_for_transactions( - &self, - parent_leaf: SequencingLeaf, - ) -> Option> { - let task_start_time = Instant::now(); - - // let parent_leaf = self.parent_leaf().await?; - let previous_used_txns = match parent_leaf.deltas { - Either::Left(block) => block.contained_transactions(), - Either::Right(_commitment) => HashSet::new(), - }; - - let consensus = self.consensus.read().await; - - let receiver = consensus.transactions.subscribe().await; - - loop { - let all_txns = consensus.transactions.cloned().await; - debug!("Size of transactions: {}", all_txns.len()); - let unclaimed_txns: Vec<_> = all_txns - .iter() - .filter(|(txn_hash, _txn)| !previous_used_txns.contains(txn_hash)) - .collect(); - - let time_past = task_start_time.elapsed(); - if unclaimed_txns.len() < self.api.min_transactions() - && (time_past < self.api.propose_max_round_time()) - { - let duration = self.api.propose_max_round_time() - time_past; - let result = async_timeout(duration, receiver.recv()).await; - match result { - Err(_) => { - // Fall through below to updating new block - debug!( - "propose_max_round_time passed, sending transactions we have so far" - ); - } - Ok(Err(e)) => { - // Something unprecedented is wrong, and `transactions` has been dropped - error!("Channel receiver error for SubscribableRwLock {:?}", e); - return None; - } - Ok(Ok(_)) => continue, - } + _ => { + error!("unexpected event {:?}", event); } - break; } - let all_txns = consensus.transactions.cloned().await; - let txns: Vec = all_txns - .iter() - .filter_map(|(txn_hash, txn)| { - if previous_used_txns.contains(txn_hash) { - None - } else { - Some(txn.clone()) - } - }) - .collect(); - Some(txns) + None } /// Filter the DA event. @@ -639,8 +679,10 @@ where SequencingHotShotEvent::DAProposalRecv(_, _) | SequencingHotShotEvent::DAVoteRecv(_) | SequencingHotShotEvent::Shutdown - | SequencingHotShotEvent::TransactionsRecv(_) + | SequencingHotShotEvent::BlockReady(_, _) | SequencingHotShotEvent::Timeout(_) + | SequencingHotShotEvent::VidDisperseRecv(_, _) + | SequencingHotShotEvent::VidVoteRecv(_) | SequencingHotShotEvent::ViewChange(_) ) } @@ -661,7 +703,7 @@ where TYPES, Message, Certificate = DACertificate, - Commitment = TYPES::BlockType, + Commitment = Commitment, >, { } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 98819108a4..f9fc7f8ba3 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,6 +1,8 @@ +use crate::view_sync::ViewSyncPhase; +use commit::Commitment; use hotshot_types::{ certificate::{DACertificate, QuorumCertificate}, - data::DAProposal, + data::{DAProposal, VidDisperse}, message::Proposal, traits::node_implementation::{ NodeImplementation, NodeType, QuorumProposalType, ViewSyncProposalType, @@ -8,8 +10,6 @@ use hotshot_types::{ vote::{DAVote, QuorumVote, ViewSyncVote}, }; -use crate::view_sync::ViewSyncPhase; - /// All of the possible events that can be passed between Sequecning `HotShot` tasks #[derive(Eq, Hash, PartialEq, Debug, Clone)] pub enum SequencingHotShotEvent> { @@ -18,7 +18,7 @@ pub enum SequencingHotShotEvent> { /// A quorum proposal has been received from the network; handled by the consensus task QuorumProposalRecv(Proposal>, TYPES::SignatureKey), /// A quorum vote has been received from the network; handled by the consensus task - QuorumVoteRecv(QuorumVote), + QuorumVoteRecv(QuorumVote>), /// A DA proposal has been received from the network; handled by the DA task DAProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task @@ -28,13 +28,13 @@ pub enum SequencingHotShotEvent> { /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal - QuorumVoteSend(QuorumVote), + QuorumVoteSend(QuorumVote>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DAProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal DAVoteSend(DAVote), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only - QCFormed(QuorumCertificate), + QCFormed(QuorumCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DACSend(DACertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks @@ -62,4 +62,32 @@ pub enum SequencingHotShotEvent> { TransactionSend(TYPES::Transaction, TYPES::SignatureKey), /// Event to send DA block data from DA leader to next quorum leader (which should always be the same node); internal event only SendDABlockData(TYPES::BlockType), + /// Event when the transactions task has a block formed + BlockReady(TYPES::BlockType, TYPES::Time), + /// Event when consensus decided on a leaf + LeafDecided(Vec), + /// Send VID shares to VID storage nodes; emitted by the DA leader + /// + /// Like [`DAProposalSend`]. + VidDisperseSend(Proposal>, TYPES::SignatureKey), + /// Vid disperse data has been received from the network; handled by the DA task + /// + /// Like [`DAProposalRecv`]. + VidDisperseRecv(Proposal>, TYPES::SignatureKey), + /// Send a VID vote to the VID leader; emitted by VID storage nodes in the DA task after seeing a valid VID dispersal + /// + /// Like [`DAVoteSend`] + VidVoteSend(DAVote), + /// A VID vote has been received by the network; handled by the DA task + /// + /// Like [`DAVoteRecv`] + VidVoteRecv(DAVote), + /// The VID leader has collected enough votes to form a VID cert; emitted by the VID leader in the DA task; sent to the entire network via the networking task + /// + /// Like [`DACSend`] + VidCertSend(DACertificate, TYPES::SignatureKey), + /// A VID cert has been recieved by the network; handled by the consensus task + /// + /// Like [`DACRecv`] + VidCertRecv(DACertificate), } diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index aca2cafdea..5e7492d84a 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -17,6 +17,9 @@ pub mod consensus; /// The task which implements the main parts of data availability. pub mod da; +/// The task which implements all transaction handling +pub mod transactions; + /// Defines the events passed between tasks pub mod events; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a4b083ba37..5fbada8539 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -7,7 +7,7 @@ use hotshot_task::{ GeneratedStream, Merge, }; use hotshot_types::{ - data::{ProposalType, SequencingLeaf}, + data::SequencingLeaf, message::{ CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, Messages, SequencingMessage, @@ -106,6 +106,15 @@ impl< // panic!("Recevid DA C! "); SequencingHotShotEvent::DACRecv(cert) } + CommitteeConsensusMessage::VidDisperseMsg(proposal) => { + SequencingHotShotEvent::VidDisperseRecv(proposal, sender) + } + CommitteeConsensusMessage::VidVote(vote) => { + SequencingHotShotEvent::VidVoteRecv(vote.clone()) + } + CommitteeConsensusMessage::VidCertificate(cert) => { + SequencingHotShotEvent::VidCertRecv(cert) + } }, }; // TODO (Keyao benchmarking) Update these event variants (similar to the @@ -137,10 +146,8 @@ pub struct NetworkEventTaskState< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, - COMMCHANNEL: CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, + COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, > { /// comm channel pub channel: COMMCHANNEL, @@ -149,7 +156,7 @@ pub struct NetworkEventTaskState< /// view number pub view: TYPES::Time, /// phantom data - pub phantom: PhantomData<(PROPOSAL, VOTE, MEMBERSHIP)>, + pub phantom: PhantomData, // TODO ED Need to add exchange so we can get the recipient key and our own key? } @@ -160,11 +167,9 @@ impl< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, - COMMCHANNEL: CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, - > TS for NetworkEventTaskState + COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, + > TS for NetworkEventTaskState { } @@ -175,17 +180,16 @@ impl< Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, - COMMCHANNEL: CommunicationChannel, PROPOSAL, VOTE, MEMBERSHIP>, - > NetworkEventTaskState + COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, + > NetworkEventTaskState { /// Handle the given event. /// /// Returns the completion status. /// # Panics /// Panic sif a direct message event is received with no recipient + #[allow(clippy::too_many_lines)] // TODO https://github.com/EspressoSystems/HotShot/issues/1704 pub async fn handle_event( &mut self, event: SequencingHotShotEvent, @@ -208,9 +212,16 @@ impl< GeneralConsensusMessage::Vote(vote.clone()), ))), TransmitType::Direct, - Some(membership.get_leader(vote.current_view() + 1)), + Some(membership.get_leader(vote.get_view() + 1)), + ), + SequencingHotShotEvent::VidDisperseSend(proposal, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::VidDisperseMsg(proposal), + ))), // TODO not a CommitteeConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 + TransmitType::Broadcast, // TODO not a broadcast https://github.com/EspressoSystems/HotShot/issues/1696 + None, ), - SequencingHotShotEvent::DAProposalSend(proposal, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Right( @@ -219,6 +230,14 @@ impl< TransmitType::Broadcast, None, ), + SequencingHotShotEvent::VidVoteSend(vote) => ( + vote.signature_key(), + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::VidVote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.current_view)), // TODO who is VID leader? https://github.com/EspressoSystems/HotShot/issues/1699 + ), SequencingHotShotEvent::DAVoteSend(vote) => ( vote.signature_key(), MessageKind::::from_consensus_message(SequencingMessage(Right( @@ -227,6 +246,14 @@ impl< TransmitType::Direct, Some(membership.get_leader(vote.current_view)), ), + SequencingHotShotEvent::VidCertSend(certificate, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::VidCertificate(certificate), + ))), + TransmitType::Broadcast, + None, + ), // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee SequencingHotShotEvent::DACSend(certificate, sender) => ( sender, @@ -307,6 +334,7 @@ impl< | SequencingHotShotEvent::QuorumVoteSend(_) | SequencingHotShotEvent::Shutdown | SequencingHotShotEvent::DACSend(_, _) + | SequencingHotShotEvent::VidCertSend(_, _) | SequencingHotShotEvent::ViewChange(_) ) } @@ -318,6 +346,8 @@ impl< SequencingHotShotEvent::DAProposalSend(_, _) | SequencingHotShotEvent::DAVoteSend(_) | SequencingHotShotEvent::Shutdown + | SequencingHotShotEvent::VidDisperseSend(_, _) + | SequencingHotShotEvent::VidVoteSend(_) | SequencingHotShotEvent::ViewChange(_) ) } @@ -348,9 +378,9 @@ pub type NetworkMessageTaskTypes = HSTWithMessage< >; /// network event task types -pub type NetworkEventTaskTypes = HSTWithEvent< +pub type NetworkEventTaskTypes = HSTWithEvent< NetworkTaskError, SequencingHotShotEvent, ChannelStream>, - NetworkEventTaskState, + NetworkEventTaskState, >; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs new file mode 100644 index 0000000000..c682f5f38d --- /dev/null +++ b/task-impls/src/transactions.rs @@ -0,0 +1,417 @@ +use crate::events::SequencingHotShotEvent; +use async_compatibility_layer::async_primitives::subscribable_rwlock::SubscribableRwLock; +use async_compatibility_layer::{ + art::async_timeout, async_primitives::subscribable_rwlock::ReadView, +}; +use async_lock::RwLock; +use bincode::config::Options; +use commit::{Commitment, Committable}; +use either::{Left, Right}; +use hotshot_task::{ + event_stream::{ChannelStream, EventStream}, + global_registry::GlobalRegistry, + task::{HotShotTaskCompleted, TS}, + task_impls::HSTWithEvent, +}; +use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction, NUM_CHUNKS, NUM_STORAGE_NODES}, + certificate::DACertificate, + consensus::Consensus, + data::{SequencingLeaf, VidDisperse, VidScheme, VidSchemeTrait}, + message::{Message, Proposal, SequencingMessage}, + traits::{ + consensus_api::SequencingConsensusApi, + election::{CommitteeExchangeType, ConsensusExchange}, + node_implementation::{CommitteeEx, NodeImplementation, NodeType}, + BlockPayload, + }, +}; +use hotshot_utils::bincode::bincode_opts; +use snafu::Snafu; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::Instant, +}; +use tracing::{debug, error, instrument, warn}; + +/// A type alias for `HashMap, T>` +type CommitmentMap = HashMap, T>; + +#[derive(Snafu, Debug)] +/// Error type for consensus tasks +pub struct ConsensusTaskError {} + +/// Tracks state of a Transaction task +pub struct TransactionTaskState< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, +> where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = Commitment, + >, +{ + /// The state's api + pub api: A, + /// Global registry task for the state + pub registry: GlobalRegistry, + + /// View number this view is executing in. + pub cur_view: TYPES::Time, + + /// Reference to consensus. Leader will require a read lock on this. + pub consensus: Arc>>>, + + /// A list of undecided transactions + pub transactions: Arc>>, + + /// A list of transactions we've seen decided, but didn't receive + pub seen_transactions: HashSet>, + + /// the committee exchange + pub committee_exchange: Arc>, + + /// Global events stream to publish events + pub event_stream: ChannelStream>, + + /// This state's ID + pub id: u64, +} + +// We have two `TransactionTaskState` implementations with different bounds. The implementation +// here requires `TYPES: NodeType`, +// whereas it's just `TYPES: NodeType` in the second implementation. +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > TransactionTaskState +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = Commitment, + >, +{ + /// main task event handler + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] + + pub async fn handle_event( + &mut self, + event: SequencingHotShotEvent, + ) -> Option { + match event { + SequencingHotShotEvent::TransactionsRecv(transactions) => { + let consensus = self.consensus.read().await; + self.transactions + .modify(|txns| { + for transaction in transactions { + let size = bincode_opts().serialized_size(&transaction).unwrap_or(0); + + // If we didn't already know about this transaction, update our mempool metrics. + if !self.seen_transactions.remove(&transaction.commit()) + && txns.insert(transaction.commit(), transaction).is_none() + { + consensus.metrics.outstanding_transactions.update(1); + consensus + .metrics + .outstanding_transactions_memory_size + .update(i64::try_from(size).unwrap_or_else(|e| { + warn!("Conversion failed: {e}. Using the max value."); + i64::MAX + })); + } + } + }) + .await; + + return None; + } + SequencingHotShotEvent::LeafDecided(leaf_chain) => { + let mut included_txns = HashSet::new(); + let mut included_txn_size = 0; + let mut included_txn_count = 0; + for leaf in leaf_chain { + match &leaf.deltas { + Left(block) => { + let txns = block.contained_transactions(); + for txn in txns { + included_txns.insert(txn); + } + } + Right(_) => {} + } + } + let consensus = self.consensus.read().await; + let txns = self.transactions.cloned().await; + + let _ = included_txns.iter().map(|hash| { + if !txns.contains_key(hash) { + self.seen_transactions.insert(*hash); + } + }); + drop(txns); + self.transactions + .modify(|txns| { + *txns = txns + .drain() + .filter(|(txn_hash, txn)| { + if included_txns.contains(txn_hash) { + included_txn_count += 1; + included_txn_size += + bincode_opts().serialized_size(txn).unwrap_or_default(); + false + } else { + true + } + }) + .collect(); + }) + .await; + + consensus + .metrics + .outstanding_transactions + .update(-included_txn_count); + consensus + .metrics + .outstanding_transactions_memory_size + .update(-(i64::try_from(included_txn_size).unwrap_or(i64::MAX))); + return None; + } + SequencingHotShotEvent::ViewChange(view) => { + if *self.cur_view >= *view { + return None; + } + + if *view - *self.cur_view > 1 { + error!("View changed by more than 1 going to view {:?}", view); + } + self.cur_view = view; + + // If we are not the next leader (DA leader for this view) immediately exit + if !self.committee_exchange.is_leader(self.cur_view + 1) { + // panic!("We are not the DA leader for view {}", *self.cur_view + 1); + return None; + } + + // ED Copy of parent_leaf() function from sequencing leader + + let consensus = self.consensus.read().await; + let parent_view_number = &consensus.high_qc.view_number; + + let Some(parent_view) = consensus.state_map.get(parent_view_number) else { + error!( + "Couldn't find high QC parent in state map. Parent view {:?}", + parent_view_number + ); + return None; + }; + let Some(leaf) = parent_view.get_leaf_commitment() else { + error!( + ?parent_view_number, + ?parent_view, + "Parent of high QC points to a view without a proposal" + ); + return None; + }; + let Some(leaf) = consensus.saved_leaves.get(&leaf) else { + error!("Failed to find high QC parent."); + return None; + }; + let parent_leaf = leaf.clone(); + + drop(consensus); + + let txns = self.wait_for_transactions(parent_leaf).await?; + // TODO (Keyao) Determine whether to allow empty transaction when proposing a block. + // + + debug!("Prepare VID shares"); + // TODO https://github.com/EspressoSystems/HotShot/issues/1686 + let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + // TODO https://github.com/EspressoSystems/jellyfish/issues/375 + let mut txns_flatten = Vec::new(); + for txn in &txns { + txns_flatten.extend(txn.0.clone()); + } + let vid_disperse = vid.disperse(&txns_flatten).unwrap(); + let block = VIDBlockPayload { + transactions: txns, + commitment: vid_disperse.commit, + }; + + self.event_stream + .publish(SequencingHotShotEvent::BlockReady(block.clone(), view + 1)) + .await; + + // TODO (Keyao) Determine and update where to publish VidDisperseSend. + // + self.event_stream + .publish(SequencingHotShotEvent::VidDisperseSend( + Proposal { + data: VidDisperse { + view_number: view + 1, + commitment: block.commit(), + shares: vid_disperse.shares, + common: vid_disperse.common, + }, + // TODO (Keyao) This is also signed in DA task. + signature: self.committee_exchange.sign_da_proposal(&block.commit()), + }, + // TODO don't send to committee, send to quorum (consensus.rs) https://github.com/EspressoSystems/HotShot/issues/1696 + self.committee_exchange.public_key().clone(), + )) + .await; + return None; + } + SequencingHotShotEvent::Shutdown => { + return Some(HotShotTaskCompleted::ShutDown); + } + _ => {} + } + None + } +} + +// We have two `TransactionTaskState` implementations with different bounds. The implementation +// above requires `TYPES: NodeType`, +// whereas here it's just `TYPES: NodeType`. +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > TransactionTaskState +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = Commitment, + >, +{ + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] + async fn wait_for_transactions( + &self, + _parent_leaf: SequencingLeaf, + ) -> Option> { + let task_start_time = Instant::now(); + + // TODO (Keyao) Investigate the use of transaction hash + // + // let parent_leaf = self.parent_leaf().await?; + // let previous_used_txns = match parent_leaf.deltas { + // Either::Left(block) => block.contained_transactions(), + // Either::Right(_commitment) => HashSet::new(), + // }; + + let receiver = self.transactions.subscribe().await; + + loop { + let all_txns = self.transactions.cloned().await; + debug!("Size of transactions: {}", all_txns.len()); + // TODO (Keyao) Investigate the use of transaction hash + // + // let unclaimed_txns: Vec<_> = all_txns + // .iter() + // .filter(|(txn_hash, _txn)| !previous_used_txns.contains(txn_hash)) + // .collect(); + let unclaimed_txns = all_txns; + + let time_past = task_start_time.elapsed(); + if unclaimed_txns.len() < self.api.min_transactions() + && (time_past < self.api.propose_max_round_time()) + { + let duration = self.api.propose_max_round_time() - time_past; + let result = async_timeout(duration, receiver.recv()).await; + match result { + Err(_) => { + // Fall through below to updating new block + error!( + "propose_max_round_time passed, sending transactions we have so far" + ); + } + Ok(Err(e)) => { + // Something unprecedented is wrong, and `transactions` has been dropped + error!("Channel receiver error for SubscribableRwLock {:?}", e); + return None; + } + Ok(Ok(_)) => continue, + } + } + break; + } + let all_txns = self.transactions.cloned().await; + // TODO (Keyao) Investigate the use of transaction hash + // + let txns: Vec = all_txns.values().cloned().collect(); + // let txns: Vec = all_txns + // .iter() + // .filter_map(|(txn_hash, txn)| { + // if previous_used_txns.contains(txn_hash) { + // None + // } else { + // Some(txn.clone()) + // } + // }) + // .collect(); + Some(txns) + } + + /// Event filter for the transaction task + pub fn filter(event: &SequencingHotShotEvent) -> bool { + matches!( + event, + SequencingHotShotEvent::TransactionsRecv(_) + | SequencingHotShotEvent::LeafDecided(_) + | SequencingHotShotEvent::Shutdown + | SequencingHotShotEvent::ViewChange(_) + ) + } +} + +/// task state implementation for Transactions Task +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > TS for TransactionTaskState +where + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Certificate = DACertificate, + Commitment = Commitment, + >, +{ +} + +/// Type alias for DA Task Types +pub type TransactionsTaskTypes = HSTWithEvent< + ConsensusTaskError, + SequencingHotShotEvent, + ChannelStream>, + TransactionTaskState, +>; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index da52ceaf82..4b241c37cd 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1,7 +1,7 @@ #![allow(clippy::module_name_repetitions)] use crate::events::SequencingHotShotEvent; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use commit::Committable; +use commit::{Commitment, Committable}; use either::Either::{self, Left, Right}; use futures::FutureExt; use hotshot_task::{ @@ -9,7 +9,13 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::traits::{election::Membership, network::ConsensusIntentEvent}; +use hotshot_types::{ + traits::{ + election::{Membership, SignedCertificate}, + network::ConsensusIntentEvent, + }, + vote::ViewSyncVoteAccumulator, +}; use bitvec::prelude::*; use hotshot_task::global_registry::GlobalRegistry; @@ -25,10 +31,10 @@ use hotshot_types::{ signature_key::SignatureKey, state::ConsensusTime, }, - vote::{ViewSyncData, ViewSyncVote, VoteAccumulator}, + vote::{ViewSyncData, ViewSyncVote}, }; use snafu::Snafu; -use std::{collections::HashMap, sync::Arc, time::Duration}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; use tracing::{debug, error, instrument}; #[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] /// Phases of view sync @@ -69,7 +75,7 @@ pub struct ViewSyncTaskState< Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { /// Registry to register sub tasks @@ -118,7 +124,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { } @@ -146,7 +152,7 @@ pub struct ViewSyncReplicaTaskState< Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { /// Timeout for view sync rounds @@ -189,7 +195,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { } @@ -216,8 +222,14 @@ pub struct ViewSyncRelayTaskState< /// View sync exchange pub exchange: Arc>, /// Vote accumulator + #[allow(clippy::type_complexity)] pub accumulator: Either< - VoteAccumulator>, + as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Commitment>, + >>::VoteAccumulator, ViewSyncCertificate, >, /// Our node id; for logging @@ -258,7 +270,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] @@ -381,24 +393,23 @@ where return; } - let accumulator = VoteAccumulator { - total_vote_outcomes: HashMap::new(), - da_vote_outcomes: HashMap::new(), - yes_vote_outcomes: HashMap::new(), - no_vote_outcomes: HashMap::new(), - viewsync_precommit_vote_outcomes: HashMap::new(), - viewsync_commit_vote_outcomes: HashMap::new(), - viewsync_finalize_vote_outcomes: HashMap::new(), + let new_accumulator = ViewSyncVoteAccumulator { + pre_commit_vote_outcomes: HashMap::new(), + commit_vote_outcomes: HashMap::new(), + finalize_vote_outcomes: HashMap::new(), + success_threshold: self.exchange.success_threshold(), failure_threshold: self.exchange.failure_threshold(), + sig_lists: Vec::new(), signers: bitvec![0; self.exchange.total_nodes()], + phantom: PhantomData, }; let mut relay_state = ViewSyncRelayTaskState { event_stream: self.event_stream.clone(), exchange: self.exchange.clone(), - accumulator: either::Left(accumulator), + accumulator: either::Left(new_accumulator), id: self.id, }; @@ -637,7 +648,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Replica Task", level = "error")] @@ -971,7 +982,7 @@ where Message, Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, - Commitment = ViewSyncData, + Commitment = Commitment>, >, { /// Handles incoming events for the view sync relay task @@ -989,7 +1000,7 @@ where return (Some(HotShotTaskCompleted::ShutDown), self); } - let (vote_internal, phase) = match vote { + let (vote_internal, phase) = match vote.clone() { ViewSyncVote::PreCommit(vote_internal) => { (vote_internal, ViewSyncPhase::PreCommit) } @@ -1024,15 +1035,10 @@ where *vote_internal.round, vote_internal.relay ); - let accumulator = self.exchange.accumulate_vote( - &vote_internal.signature.0, - &vote_internal.signature.1, - view_sync_data, - vote_internal.vote_data, - vote_internal.vote_token.clone(), - vote_internal.round, + let accumulator = self.exchange.accumulate_vote_2( self.accumulator.left().unwrap(), - Some(vote_internal.relay), + &vote, + &view_sync_data, ); self.accumulator = match accumulator { @@ -1044,7 +1050,6 @@ where data: certificate.clone(), signature, }; - // error!("Sending view sync cert {:?}", message.clone()); self.event_stream .publish(SequencingHotShotEvent::ViewSyncCertificateSend( message, @@ -1053,19 +1058,19 @@ where .await; // Reset accumulator for new certificate - either::Left(VoteAccumulator { - total_vote_outcomes: HashMap::new(), - da_vote_outcomes: HashMap::new(), - yes_vote_outcomes: HashMap::new(), - no_vote_outcomes: HashMap::new(), - viewsync_precommit_vote_outcomes: HashMap::new(), - viewsync_commit_vote_outcomes: HashMap::new(), - viewsync_finalize_vote_outcomes: HashMap::new(), + let new_accumulator = ViewSyncVoteAccumulator { + pre_commit_vote_outcomes: HashMap::new(), + commit_vote_outcomes: HashMap::new(), + finalize_vote_outcomes: HashMap::new(), + success_threshold: self.exchange.success_threshold(), failure_threshold: self.exchange.failure_threshold(), + sig_lists: Vec::new(), signers: bitvec![0; self.exchange.total_nodes()], - }) + phantom: PhantomData, + }; + either::Left(new_accumulator) } }; diff --git a/task/Cargo.toml b/task/Cargo.toml index 720a7aca6a..cb1703831f 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -10,7 +10,6 @@ async-compatibility-layer = { workspace = true } async-trait = { workspace = true } either = { workspace = true } futures = { workspace = true } -nll = { workspace = true } serde = { workspace = true } snafu = { workspace = true } async-lock = { workspace = true } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index e75c328a5d..935f9e4342 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -34,13 +34,13 @@ rand = { workspace = true } rand_chacha = { workspace = true } snafu = { workspace = true } tracing = { workspace = true } -nll = { workspace = true } serde = { workspace = true } ethereum-types = { workspace = true } bitvec = { workspace = true } [dev-dependencies] async-lock = { workspace = true } +bincode = { workspace = true } # GG any better options for serialization? [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/testing/README.md b/testing/README.md index a477450092..59b281f421 100644 --- a/testing/README.md +++ b/testing/README.md @@ -69,4 +69,4 @@ async { }; ``` -See TODO for examples. +See TODO for examples. \ No newline at end of file diff --git a/testing/src/network_reliability.rs b/testing/src/network_reliability.rs deleted file mode 100644 index 97b22b533b..0000000000 --- a/testing/src/network_reliability.rs +++ /dev/null @@ -1,163 +0,0 @@ -use std::time::Duration; - -use hotshot::traits::NetworkReliability; -use rand::{ - distributions::{Bernoulli, Uniform}, - prelude::Distribution, -}; - -/// A synchronous network. Packets may be delayed, but are guaranteed -/// to arrive within `timeout` ns -#[derive(Clone, Copy, Debug, Default)] -pub struct SynchronousNetwork { - /// Max delay of packet before arrival - timeout_ms: u64, - /// Lowest value in milliseconds that a packet may be delayed - delay_low_ms: u64, -} - -impl NetworkReliability for SynchronousNetwork { - /// never drop a packet - fn sample_keep(&self) -> bool { - true - } - fn sample_delay(&self) -> Duration { - Duration::from_millis( - Uniform::new_inclusive(self.delay_low_ms, self.timeout_ms) - .sample(&mut rand::thread_rng()), - ) - } -} - -/// An asynchronous network. Packets may be dropped entirely -/// or delayed for arbitrarily long periods -/// probability that packet is kept = `keep_numerator` / `keep_denominator` -/// packet delay is obtained by sampling from a uniform distribution -/// between `delay_low_ms` and `delay_high_ms`, inclusive -#[derive(Debug, Clone, Copy)] -pub struct AsynchronousNetwork { - /// numerator for probability of keeping packets - keep_numerator: u32, - /// denominator for probability of keeping packets - keep_denominator: u32, - /// lowest value in milliseconds that a packet may be delayed - delay_low_ms: u64, - /// highest value in milliseconds that a packet may be delayed - delay_high_ms: u64, -} - -impl NetworkReliability for AsynchronousNetwork { - fn sample_keep(&self) -> bool { - Bernoulli::from_ratio(self.keep_numerator, self.keep_denominator) - .unwrap() - .sample(&mut rand::thread_rng()) - } - fn sample_delay(&self) -> Duration { - Duration::from_millis( - Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) - .sample(&mut rand::thread_rng()), - ) - } -} - -/// An partially synchronous network. Behaves asynchronously -/// until some arbitrary time bound, GST, -/// then synchronously after GST -#[derive(Debug, Clone, Copy)] -pub struct PartiallySynchronousNetwork { - /// asynchronous portion of network - asynchronous: AsynchronousNetwork, - /// synchronous portion of network - synchronous: SynchronousNetwork, - /// time when GST occurs - gst: std::time::Duration, - /// when the network was started - start: std::time::Instant, -} - -impl NetworkReliability for PartiallySynchronousNetwork { - /// never drop a packet - fn sample_keep(&self) -> bool { - true - } - fn sample_delay(&self) -> Duration { - // act asyncronous before gst - if self.start.elapsed() < self.gst { - if self.asynchronous.sample_keep() { - self.asynchronous.sample_delay() - } else { - // assume packet was "dropped" and will arrive after gst - self.synchronous.sample_delay() + self.gst - } - } else { - // act syncronous after gst - self.synchronous.sample_delay() - } - } -} - -impl Default for AsynchronousNetwork { - // disable all chance of failure - fn default() -> Self { - AsynchronousNetwork { - keep_numerator: 1, - keep_denominator: 1, - delay_low_ms: 0, - delay_high_ms: 0, - } - } -} - -impl Default for PartiallySynchronousNetwork { - fn default() -> Self { - PartiallySynchronousNetwork { - synchronous: SynchronousNetwork::default(), - asynchronous: AsynchronousNetwork::default(), - gst: std::time::Duration::new(0, 0), - start: std::time::Instant::now(), - } - } -} - -impl SynchronousNetwork { - /// create new `SynchronousNetwork` - pub fn new(timeout: u64, delay_low_ms: u64) -> Self { - SynchronousNetwork { - timeout_ms: timeout, - delay_low_ms, - } - } -} - -impl AsynchronousNetwork { - /// create new `AsynchronousNetwork` - pub fn new( - keep_numerator: u32, - keep_denominator: u32, - delay_low_ms: u64, - delay_high_ms: u64, - ) -> Self { - AsynchronousNetwork { - keep_numerator, - keep_denominator, - delay_low_ms, - delay_high_ms, - } - } -} - -impl PartiallySynchronousNetwork { - /// create new `PartiallySynchronousNetwork` - pub fn new( - asynchronous: AsynchronousNetwork, - synchronous: SynchronousNetwork, - gst: std::time::Duration, - ) -> Self { - PartiallySynchronousNetwork { - asynchronous, - synchronous, - gst, - start: std::time::Instant::now(), - } - } -} diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 1d2985c56d..4ffd74e2bf 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -2,7 +2,7 @@ use hotshot::traits::implementations::CombinedNetworks; use std::{marker::PhantomData, sync::Arc}; use hotshot::{ - demos::sdemo::{SDemoBlock, SDemoState, SDemoTransaction}, + demo::SDemoState, traits::{ election::static_committee::{StaticCommittee, StaticElectionConfig, StaticVoteToken}, implementations::{ @@ -11,18 +11,18 @@ use hotshot::{ }, NodeImplementation, }, - types::bn254::BN254Pub, + types::bn254::BLSPubKey, }; use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, - data::{DAProposal, QuorumProposal, SequencingLeaf, ViewNumber}, + data::{QuorumProposal, SequencingLeaf, ViewNumber}, message::{Message, SequencingMessage}, traits::{ election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}, network::{TestableChannelImplementation, TestableNetworkingImplementation}, node_implementation::{ChannelMaps, NodeType, SequencingExchanges, TestableExchange}, }, - vote::{DAVote, QuorumVote, ViewSyncVote}, }; use serde::{Deserialize, Serialize}; @@ -42,10 +42,10 @@ use serde::{Deserialize, Serialize}; pub struct SequencingTestTypes; impl NodeType for SequencingTestTypes { type Time = ViewNumber; - type BlockType = SDemoBlock; - type SignatureKey = BN254Pub; + type BlockType = VIDBlockPayload; + type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; - type Transaction = SDemoTransaction; + type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = SDemoState; } @@ -65,80 +65,33 @@ pub struct StaticFallbackImpl; pub type StaticMembership = StaticCommittee>; -pub type StaticMemoryDAComm = MemoryCommChannel< - SequencingTestTypes, - SequencingMemoryImpl, - DAProposal, - DAVote, - StaticMembership, ->; +pub type StaticMemoryDAComm = + MemoryCommChannel; -type StaticLibp2pDAComm = Libp2pCommChannel< - SequencingTestTypes, - SequencingLibp2pImpl, - DAProposal, - DAVote, - StaticMembership, ->; +type StaticLibp2pDAComm = + Libp2pCommChannel; -type StaticWebDAComm = WebCommChannel< - SequencingTestTypes, - SequencingWebImpl, - DAProposal, - DAVote, - StaticMembership, ->; +type StaticWebDAComm = WebCommChannel; type StaticFallbackComm = WebServerWithFallbackCommChannel; -pub type StaticMemoryQuorumComm = MemoryCommChannel< - SequencingTestTypes, - SequencingMemoryImpl, - QuorumProposal>, - QuorumVote>, - StaticMembership, ->; +pub type StaticMemoryQuorumComm = + MemoryCommChannel; -type StaticLibp2pQuorumComm = Libp2pCommChannel< - SequencingTestTypes, - SequencingLibp2pImpl, - QuorumProposal>, - QuorumVote>, - StaticMembership, ->; +type StaticLibp2pQuorumComm = + Libp2pCommChannel; -type StaticWebQuorumComm = WebCommChannel< - SequencingTestTypes, - SequencingWebImpl, - QuorumProposal>, - QuorumVote>, - StaticMembership, ->; +type StaticWebQuorumComm = WebCommChannel; -pub type StaticMemoryViewSyncComm = MemoryCommChannel< - SequencingTestTypes, - SequencingMemoryImpl, - ViewSyncCertificate, - ViewSyncVote, - StaticMembership, ->; +pub type StaticMemoryViewSyncComm = + MemoryCommChannel; -type StaticLibp2pViewSyncComm = Libp2pCommChannel< - SequencingTestTypes, - SequencingLibp2pImpl, - ViewSyncCertificate, - ViewSyncVote, - StaticMembership, ->; +type StaticLibp2pViewSyncComm = + Libp2pCommChannel; -type StaticWebViewSyncComm = WebCommChannel< - SequencingTestTypes, - SequencingWebImpl, - ViewSyncCertificate, - ViewSyncVote, - StaticMembership, ->; +type StaticWebViewSyncComm = + WebCommChannel; pub type SequencingLibp2pExchange = SequencingExchanges< SequencingTestTypes, @@ -231,9 +184,24 @@ impl Box::new(move |id| { let network = Arc::new(network_generator(id)); - let quorum_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); - let committee_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); - let view_sync_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network); + let quorum_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network.clone()); + let committee_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network.clone()); + let view_sync_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network); (quorum_chan, committee_chan, view_sync_chan) }) @@ -325,9 +293,24 @@ impl Box::new(move |id| { let network = Arc::new(network_generator(id)); let network_da = Arc::new(network_da_generator(id)); - let quorum_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); - let committee_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network_da); - let view_sync_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network); + let quorum_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network.clone()); + let committee_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network_da); + let view_sync_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network); (quorum_chan, committee_chan, view_sync_chan) }) @@ -445,9 +428,24 @@ impl Box::new(move |id| { let network = Arc::new(network_generator(id)); let network_da = Arc::new(network_da_generator(id)); - let quorum_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network.clone()); - let committee_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network_da); - let view_sync_chan = <>>::Networking as TestableChannelImplementation<_, _, _, _, _, _>>::generate_network()(network); + let quorum_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network.clone()); + let committee_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network_da); + let view_sync_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network); (quorum_chan, committee_chan, view_sync_chan) }) @@ -584,44 +582,20 @@ impl <, - >>::Networking as TestableChannelImplementation< - _, - _, - QuorumProposal< - SequencingTestTypes, - >::Leaf, - >, - QuorumVote< - SequencingTestTypes, - >::Leaf, - >, - _, - _, - >>::generate_network()(network.clone()); + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network.clone()); let committee_chan = <, - >>::Networking as TestableChannelImplementation< - _, - _, - DAProposal, - DAVote, - _, - _, - >>::generate_network()(network_da); + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network_da); let view_sync_chan = <, - >>::Networking as TestableChannelImplementation< - _, - _, - ViewSyncCertificate, - ViewSyncVote, - _, - _, - >>::generate_network()(network); + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network); (quorum_chan, committee_chan, view_sync_chan) }) } diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 4bda48adcc..b2a9a88451 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -90,9 +90,13 @@ impl> TS for OverallSafety pub struct RoundResult> { /// Transactions that were submitted // pub txns: Vec, + /// Nodes that committed this round /// id -> (leaf, qc) - pub success_nodes: HashMap, QuorumCertificate)>, + // TODO GG: isn't it infeasible to store a Vec? + #[allow(clippy::type_complexity)] + success_nodes: HashMap, QuorumCertificate>)>, + /// Nodes that failed to commit this round pub failed_nodes: HashMap>>>, @@ -185,7 +189,7 @@ impl> RoundResult pub fn insert_into_result( &mut self, idx: usize, - result: (Vec, QuorumCertificate), + result: (Vec, QuorumCertificate>), maybe_block_size: Option, ) -> Option { self.success_nodes.insert(idx as u64, result.clone()); diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index c8ef07d51c..a9d9e5d586 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -113,7 +113,8 @@ impl SpinningTaskDescription { state.late_start.remove(&idx.try_into().unwrap()) { tracing::error!("Spinning up node late"); - node.run_tasks().await; + let handle = node.run_tasks().await; + handle.hotshot.start_consensus().await; } } UpDown::Down => { diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 13868721d4..f4e7fe49a8 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -6,14 +6,15 @@ use commit::Committable; use either::Right; use hotshot::{ certificate::QuorumCertificate, - traits::{Block, NodeImplementation, TestableNodeImplementation}, - types::{bn254::BN254Pub, SignatureKey, SystemContextHandle}, + traits::{NodeImplementation, TestableNodeImplementation}, + types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, HotShotInitializer, HotShotSequencingConsensusApi, SystemContext, }; use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::SequencingHotShotEvent; use hotshot_types::{ - data::{QuorumProposal, SequencingLeaf, ViewNumber}, + block_impl::{VIDBlockPayload, NUM_CHUNKS, NUM_STORAGE_NODES}, + data::{QuorumProposal, SequencingLeaf, VidScheme, ViewNumber}, message::{Message, Proposal}, traits::{ consensus_api::ConsensusSharedApi, @@ -21,7 +22,7 @@ use hotshot_types::{ metrics::NoMetrics, node_implementation::{CommitteeEx, ExchangesType, NodeType, QuorumEx}, signature_key::EncodedSignature, - state::ConsensusTime, + state::{ConsensusTime, TestableBlock}, }, }; @@ -47,9 +48,9 @@ pub async fn build_system_handle( >>::block_genesis()) .unwrap(); - let known_nodes = config.known_nodes.clone(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; + let private_key = + ::generated_from_seed_indexed([0u8; 32], node_id).1; let public_key = ::SignatureKey::from_private(&private_key); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< @@ -67,7 +68,6 @@ pub async fn build_system_handle( let exchanges = >::Exchanges::create( known_nodes_with_stake.clone(), - known_nodes.clone(), (quorum_election_config, committee_election_config), networks, public_key, @@ -90,7 +90,7 @@ pub async fn build_system_handle( async fn build_quorum_proposal_and_signature( handle: &SystemContextHandle, - private_key: &::PrivateKey, + private_key: &::PrivateKey, view: u64, ) -> ( QuorumProposal>, @@ -117,8 +117,7 @@ async fn build_quorum_proposal_and_signature( let parent_leaf = leaf.clone(); // every event input is seen on the event stream in the output. - - let block_commitment = ::BlockType::new().commit(); + let block = ::genesis(); let leaf = SequencingLeaf { view_number: ViewNumber::new(view), height: parent_leaf.height + 1, @@ -126,14 +125,14 @@ async fn build_quorum_proposal_and_signature( parent_commitment: parent_leaf.commit(), // Use the block commitment rather than the block, so that the replica can construct // the same leaf with the commitment. - deltas: Right(block_commitment), + deltas: Right(block.commit()), rejected: vec![], timestamp: 0, proposer_id: api.public_key().to_bytes(), }; - let signature = ::sign(private_key, leaf.commit().as_ref()); + let signature = ::sign(private_key, leaf.commit().as_ref()); let proposal = QuorumProposal::> { - block_commitment, + block_commitment: block.commit(), view_number: ViewNumber::new(view), height: 1, justify_qc: QuorumCertificate::genesis(), @@ -147,7 +146,7 @@ async fn build_quorum_proposal_and_signature( pub async fn build_quorum_proposal( handle: &SystemContextHandle, - private_key: &::PrivateKey, + private_key: &::PrivateKey, view: u64, ) -> Proposal>> { let (proposal, signature) = @@ -158,8 +157,14 @@ pub async fn build_quorum_proposal( } } -pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey, BN254Pub) { - let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; +pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey, BLSPubKey) { + let private_key = + ::generated_from_seed_indexed([0u8; 32], node_id).1; let public_key = ::SignatureKey::from_private(&private_key); (private_key, public_key) } + +pub fn vid_init() -> VidScheme { + let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); + VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap() +} diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index f1db2a9320..3515f04368 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -124,7 +124,7 @@ impl TestMetadata { } } - /// Default setting with 20 nodes and 10 views of successful views. + /// Default setting with 20 nodes and 8 views of successful views. pub fn default_more_nodes_less_success() -> TestMetadata { TestMetadata { total_nodes: 20, @@ -139,11 +139,11 @@ impl TestMetadata { completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // Increase the duration to get the expected number of successful views. - duration: Duration::new(40, 0), + duration: Duration::new(200, 0), }, ), overall_safety_properties: OverallSafetyPropertiesDescription { - num_successful_views: 10, + num_successful_views: 8, ..Default::default() }, ..TestMetadata::default() @@ -219,7 +219,6 @@ impl TestMetadata { num_bootstrap: num_bootstrap_nodes, min_transactions, max_transactions: NonZeroUsize::new(99999).unwrap(), - known_nodes, known_nodes_with_stake, da_committee_size, next_view_timeout: 500, @@ -247,7 +246,7 @@ impl TestMetadata { } = timing_data; let mod_config = // TODO this should really be using the timing config struct - |a: &mut HotShotConfig::StakeTableEntry, TYPES::ElectionConfigType>| { + |a: &mut HotShotConfig<::StakeTableEntry, TYPES::ElectionConfigType>| { a.next_view_timeout = next_view_timeout; a.timeout_ratio = timeout_ratio; a.round_start_delay = round_start_delay; diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 4a7e6bd9b1..4fb230b315 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -83,8 +83,6 @@ where QuorumCommChannel: CommunicationChannel< TYPES, Message, - as ConsensusExchange>>::Proposal, - as ConsensusExchange>>::Vote, as ConsensusExchange>>::Membership, >, { @@ -94,7 +92,6 @@ where pub storage: Generator<>::Storage>, /// configuration used to generate each hotshot node pub config: HotShotConfig< - TYPES::SignatureKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -198,7 +195,6 @@ impl> TestLauncher::StakeTableEntry, TYPES::ElectionConfigType, >, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 57c82d24e9..056f0f8a26 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -41,8 +41,6 @@ where QuorumCommChannel: CommunicationChannel< TYPES, Message, - as ConsensusExchange>>::Proposal, - as ConsensusExchange>>::Vote, as ConsensusExchange>>::Membership, >, { @@ -59,8 +57,6 @@ where QuorumCommChannel: CommunicationChannel< TYPES, Message, - as ConsensusExchange>>::Proposal, - as ConsensusExchange>>::Vote, as ConsensusExchange>>::Membership, >, { @@ -202,8 +198,8 @@ where >, { let mut results = vec![]; - for _i in 0..total { - tracing::error!("running node{}", _i); + for i in 0..total { + tracing::debug!("launch node {}", i); let node_id = self.next_node_id; let storage = (self.launcher.resource_generator.storage)(node_id); let config = self.launcher.resource_generator.config.clone(); @@ -234,7 +230,6 @@ where storage: I::Storage, initializer: HotShotInitializer, config: HotShotConfig< - TYPES::SignatureKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -250,7 +245,6 @@ where let node_id = self.next_node_id; self.next_node_id += 1; - let known_nodes = config.known_nodes.clone(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); // Generate key pair for certificate aggregation let private_key = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; @@ -265,7 +259,6 @@ where let committee_election_config = I::committee_election_config_generator(); let exchanges = I::Exchanges::create( known_nodes_with_stake.clone(), - known_nodes.clone(), ( quorum_election_config, committee_election_config(config.da_committee_size as u64), diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs index 3b97a60c9c..381db09d87 100644 --- a/testing/tests/atomic_storage.rs +++ b/testing/tests/atomic_storage.rs @@ -6,83 +6,13 @@ use hotshot::{ random_quorom_certificate, random_transaction, random_validating_leaf, VDemoBlock, VDemoState, }, - traits::{Block, State, Storage}, + traits::{BlockPayload, State, Storage}, }; use hotshot_types::{data::ViewNumber, traits::state::TestableState}; use rand::thread_rng; type AtomicStorage = hotshot::traits::implementations::AtomicStorage; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_happy_path_blocks() { - // This folder will be destroyed when the last handle to it closes - let file = tempfile::tempdir().expect("Could not create temp dir"); - let path = file.path(); - println!("Using store in {:?}", path); - let mut store = AtomicStorage::open(path).expect("Could not open atomic store"); - - let block = VDEntryBlock::default(); - let hash = block.hash(); - store - .update(|mut m| { - let block = block.clone(); - async move { m.insert_block(hash, block).await } - }) - .await - .unwrap(); - - // Make sure the data is still there after re-opening - drop(store); - store = AtomicStorage::open(path).expect("Could not open atomic store"); - assert_eq!( - store.get_block(&hash).await.unwrap(), - Some(DEntryBlock::default()) - ); - - // Add some transactions - let mut rng = thread_rng(); - let state = >::get_starting_state(); - let mut hashes = Vec::new(); - let mut block = block; - for _ in 0..10 { - let new = block - .add_transaction_raw(&random_transaction(&state, &mut rng)) - .expect("Could not add transaction"); - println!("Inserting {:?}: {:?}", new.hash(), new); - store - .update(|mut m| { - let new = new.clone(); - async move { m.insert_block(new.hash(), new.clone()).await } - }) - .await - .unwrap(); - hashes.push(new.hash()); - block = new; - } - - // read them all back 3 times - // 1st time: normal readback - // 2nd: after dropping and re-opening the store - for i in 0..3 { - if i == 1 { - drop(store); - store = AtomicStorage::open(path).expect("Could not open atomic store"); - } - - // read them all back - for (idx, hash) in hashes.iter().enumerate() { - match store.get_block(hash).await.expect("Could not read hash") { - Some(block) => println!("read {:?}", block), - None => panic!("Could not read hash {} {:?}", idx, hash), - } - } - } -} - #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index fc4b6017f7..35e75fffa7 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -6,13 +6,23 @@ #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_success() { use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, node_types::{SequencingMemoryImpl, SequencingTestTypes}, test_builder::TestMetadata, }; + use std::time::Duration; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata::default(); + let metadata = TestMetadata { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(1_200_000), + }, + ), + ..TestMetadata::default() + }; metadata .gen_launcher::() .launch() diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 2cbc6c8a16..01610f38f2 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -59,6 +59,61 @@ async fn test_catchup() { .await; } +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_catchup_web() { + use std::time::Duration; + + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingTestTypes, SequencingWebImpl}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestMetadata, TimingData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let timing_data = TimingData { + next_view_timeout: 1000, + ..Default::default() + }; + let mut metadata = TestMetadata::default(); + let catchup_nodes = vec![ChangeNode { + idx: 18, + updown: UpDown::Up, + }]; + + metadata.timing_data = timing_data; + metadata.start_nodes = 19; + metadata.total_nodes = 20; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::from_millis(400), catchup_nodes)], + }; + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(100000), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }; + + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + /// Test that one node catches up and has sucessful views after coming back #[cfg(test)] #[cfg_attr( @@ -66,6 +121,7 @@ async fn test_catchup() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_catchup_one_node() { use std::time::Duration; @@ -94,13 +150,13 @@ async fn test_catchup_one_node() { metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), catchup_nodes)], + node_changes: vec![(Duration::from_millis(400), catchup_nodes)], }; metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(100000), + duration: Duration::from_millis(20000), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { @@ -108,7 +164,7 @@ async fn test_catchup_one_node() { ..Default::default() }; // only alow for the view which the catchup node hasn't started to fail - metadata.overall_safety_properties.num_failed_views = 1; + metadata.overall_safety_properties.num_failed_views = 5; metadata .gen_launcher::() diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 0322909203..e31c34e6aa 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,9 +1,13 @@ use commit::Committable; use hotshot::HotShotSequencingConsensusApi; use hotshot_task_impls::events::SequencingHotShotEvent; -use hotshot_testing::node_types::{SequencingMemoryImpl, SequencingTestTypes}; +use hotshot_testing::{ + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + task_helpers::vid_init, +}; use hotshot_types::{ - data::{DAProposal, ViewNumber}, + block_impl::VIDTransaction, + data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, traits::{ consensus_api::ConsensusSharedApi, election::ConsensusExchange, node_implementation::ExchangesType, state::ConsensusTime, @@ -17,15 +21,11 @@ use std::collections::HashMap; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_da_task() { - use hotshot::{ - demos::sdemo::{SDemoBlock, SDemoNormalBlock}, - tasks::add_da_task, - }; + use hotshot::tasks::add_da_task; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::{ - message::{CommitteeConsensusMessage, Proposal}, - traits::election::CommitteeExchangeType, + block_impl::VIDBlockPayload, message::Proposal, traits::election::CommitteeExchangeType, }; async_compatibility_layer::logging::setup_logging(); @@ -39,12 +39,16 @@ async fn test_da_task() { }; let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); - let block = SDemoBlock::Normal(SDemoNormalBlock { - previous_state: (), - transactions: Vec::new(), - }); - let block_commitment = block.commit(); - let signature = committee_exchange.sign_da_proposal(&block_commitment); + let vid = vid_init(); + let txn = vec![0u8]; + let vid_disperse = vid.disperse(&txn).unwrap(); + let block_commitment = vid_disperse.commit; + let block = VIDBlockPayload { + transactions: vec![VIDTransaction(txn)], + commitment: block_commitment, + }; + + let signature = committee_exchange.sign_da_proposal(&block.commit()); let proposal = DAProposal { deltas: block.clone(), view_number: ViewNumber::new(2), @@ -53,6 +57,17 @@ async fn test_da_task() { data: proposal, signature, }; + let vid_proposal = Proposal { + data: VidDisperse { + view_number: message.data.view_number, + commitment: block.commit(), + shares: vid_disperse.shares, + common: vid_disperse.common, + }, + signature: message.signature.clone(), + }; + // TODO for now reuse the same block commitment and signature as DA committee + // https://github.com/EspressoSystems/jellyfish/issues/369 // Every event input is seen on the event stream in the output. let mut input = Vec::new(); @@ -61,26 +76,51 @@ async fn test_da_task() { // In view 1, node 2 is the next leader. input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); + input.push(SequencingHotShotEvent::BlockReady( + block.clone(), + ViewNumber::new(2), + )); input.push(SequencingHotShotEvent::DAProposalRecv( message.clone(), pub_key, )); + input.push(SequencingHotShotEvent::VidDisperseRecv( + vid_proposal.clone(), + pub_key, + )); input.push(SequencingHotShotEvent::Shutdown); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); - output.insert(SequencingHotShotEvent::SendDABlockData(block), 1); + output.insert( + SequencingHotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), + 1, + ); + output.insert(SequencingHotShotEvent::SendDABlockData(block.clone()), 1); output.insert( SequencingHotShotEvent::DAProposalSend(message.clone(), pub_key), 1, ); - if let Ok(Some(vote_token)) = committee_exchange.make_vote_token(ViewNumber::new(2)) { - let da_message = - committee_exchange.create_da_message(block_commitment, ViewNumber::new(2), vote_token); - if let CommitteeConsensusMessage::DAVote(vote) = da_message { - output.insert(SequencingHotShotEvent::DAVoteSend(vote), 1); - } - } + let vote_token = committee_exchange + .make_vote_token(ViewNumber::new(2)) + .unwrap() + .unwrap(); + let da_vote = + committee_exchange.create_da_message(block.commit(), ViewNumber::new(2), vote_token); + output.insert(SequencingHotShotEvent::DAVoteSend(da_vote), 1); + + let vote_token = committee_exchange + .make_vote_token(ViewNumber::new(2)) + .unwrap() + .unwrap(); + let vid_vote = + committee_exchange.create_vid_message(block.commit(), ViewNumber::new(2), vote_token); + output.insert(SequencingHotShotEvent::VidVoteSend(vid_vote), 1); + output.insert(SequencingHotShotEvent::DAProposalRecv(message, pub_key), 1); + output.insert( + SequencingHotShotEvent::VidDisperseRecv(vid_proposal, pub_key), + 1, + ); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(SequencingHotShotEvent::Shutdown, 1); diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs new file mode 100644 index 0000000000..27f6ae6531 --- /dev/null +++ b/testing/tests/memory_network.rs @@ -0,0 +1,371 @@ +use std::collections::BTreeSet; +use std::marker::PhantomData; +use std::sync::Arc; + +use async_compatibility_layer::logging::setup_logging; +use hotshot::demo::SDemoState; +use hotshot::traits::election::static_committee::{ + GeneralStaticCommittee, StaticElectionConfig, StaticVoteToken, +}; +use hotshot::traits::implementations::{ + MasterMap, MemoryCommChannel, MemoryNetwork, MemoryStorage, +}; +use hotshot::traits::NodeImplementation; +use hotshot::types::bn254::{BLSPrivKey, BLSPubKey}; +use hotshot::types::SignatureKey; +use hotshot_types::block_impl::{VIDBlockPayload, VIDTransaction}; +use hotshot_types::certificate::ViewSyncCertificate; +use hotshot_types::data::{DAProposal, QuorumProposal, SequencingLeaf}; +use hotshot_types::message::{Message, SequencingMessage}; +use hotshot_types::traits::election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}; +use hotshot_types::traits::metrics::NoMetrics; +use hotshot_types::traits::network::TestableNetworkingImplementation; +use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; +use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType, SequencingExchanges}; +use hotshot_types::vote::{DAVote, ViewSyncVote}; +use hotshot_types::{ + data::ViewNumber, + message::{DataMessage, MessageKind}, + traits::state::ConsensusTime, + vote::QuorumVote, +}; +use rand::rngs::StdRng; +use rand::{RngCore, SeedableRng}; +use serde::{Deserialize, Serialize}; +use tracing::instrument; +use tracing::trace; + +#[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, +)] +pub struct Test; + +impl NodeType for Test { + type Time = ViewNumber; + type BlockType = VIDBlockPayload; + type SignatureKey = BLSPubKey; + type VoteTokenType = StaticVoteToken; + type Transaction = VIDTransaction; + type ElectionConfigType = StaticElectionConfig; + type StateType = SDemoState; +} + +#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] +pub struct TestImpl {} + +pub type ThisLeaf = SequencingLeaf; +pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; +pub type DANetwork = MemoryCommChannel; +pub type QuorumNetwork = MemoryCommChannel; +pub type ViewSyncNetwork = MemoryCommChannel; + +pub type ThisDAProposal = DAProposal; +pub type ThisDAVote = DAVote; + +pub type ThisQuorumProposal = QuorumProposal; +pub type ThisQuorumVote = QuorumVote; + +pub type ThisViewSyncProposal = ViewSyncCertificate; +pub type ThisViewSyncVote = ViewSyncVote; + +impl NodeImplementation for TestImpl { + type Storage = MemoryStorage; + type Leaf = SequencingLeaf; + type Exchanges = SequencingExchanges< + Test, + Message, + QuorumExchange< + Test, + Self::Leaf, + ThisQuorumProposal, + ThisMembership, + QuorumNetwork, + Message, + >, + CommitteeExchange>, + ViewSyncExchange< + Test, + ThisViewSyncProposal, + ThisMembership, + ViewSyncNetwork, + Message, + >, + >; + type ConsensusMessage = SequencingMessage; + + fn new_channel_maps( + start_view: ::Time, + ) -> (ChannelMaps, Option>) { + (ChannelMaps::new(start_view), None) + } +} + +/// fake Eq +/// we can't compare the votetokentype for equality, so we can't +/// derive EQ on `VoteType` and thereby message +/// we are only sending data messages, though so we compare key and +/// data message +fn fake_message_eq(message_1: Message, message_2: Message) { + assert_eq!(message_1.sender, message_2.sender); + if let MessageKind::Data(DataMessage::SubmitTransaction(d_1, _)) = message_1.kind { + if let MessageKind::Data(DataMessage::SubmitTransaction(d_2, _)) = message_2.kind { + assert_eq!(d_1, d_2); + } + } else { + panic!("Got unexpected message type in memory test!"); + } +} + +#[instrument] +fn get_pubkey() -> BLSPubKey { + // random 32 bytes + let mut bytes = [0; 32]; + rand::thread_rng().fill_bytes(&mut bytes); + BLSPubKey::from_private(&BLSPrivKey::generate_from_seed(bytes)) +} + +/// create a message +fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec> { + let mut messages = Vec::new(); + for _ in 0..num_messages { + // create a random transaction from seed + let mut bytes = [0u8; 8]; + let mut rng = StdRng::seed_from_u64(seed); + rng.fill_bytes(&mut bytes); + + let message = Message { + sender: pk, + kind: MessageKind::Data(DataMessage::SubmitTransaction( + VIDTransaction(bytes.to_vec()), + ::new(0), + )), + _phantom: PhantomData, + }; + messages.push(message); + } + messages +} + +// Spawning a single MemoryNetwork should produce no errors +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn memory_network_spawn_single() { + setup_logging(); + let group: Arc, ::SignatureKey>> = + MasterMap::new(); + trace!(?group); + let pub_key = get_pubkey(); + let _network = MemoryNetwork::new(pub_key, NoMetrics::boxed(), group, Option::None); +} + +// // Spawning a two MemoryNetworks and connecting them should produce no errors +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn memory_network_spawn_double() { + setup_logging(); + let group: Arc, ::SignatureKey>> = + MasterMap::new(); + trace!(?group); + let pub_key_1 = get_pubkey(); + let _network_1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + let pub_key_2 = get_pubkey(); + let _network_2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); +} + +// Check to make sure direct queue works +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn memory_network_direct_queue() { + setup_logging(); + // Create some dummy messages + + // Make and connect the networking instances + let group: Arc, ::SignatureKey>> = + MasterMap::new(); + trace!(?group); + + let pub_key_1 = get_pubkey(); + let network1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + + let pub_key_2 = get_pubkey(); + let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + + let first_messages: Vec> = gen_messages(5, 100, pub_key_1); + + // Test 1 -> 2 + // Send messages + for sent_message in first_messages { + network1 + .direct_message(sent_message.clone(), pub_key_2) + .await + .expect("Failed to message node"); + let mut recv_messages = network2 + .recv_msgs(TransmitType::Direct) + .await + .expect("Failed to receive message"); + let recv_message = recv_messages.pop().unwrap(); + assert!(recv_messages.is_empty()); + fake_message_eq(sent_message, recv_message); + } + + let second_messages: Vec> = gen_messages(5, 200, pub_key_2); + + // Test 2 -> 1 + // Send messages + for sent_message in second_messages { + network2 + .direct_message(sent_message.clone(), pub_key_1) + .await + .expect("Failed to message node"); + let mut recv_messages = network1 + .recv_msgs(TransmitType::Direct) + .await + .expect("Failed to receive message"); + let recv_message = recv_messages.pop().unwrap(); + assert!(recv_messages.is_empty()); + fake_message_eq(sent_message, recv_message); + } +} + +// Check to make sure direct queue works +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn memory_network_broadcast_queue() { + setup_logging(); + // Make and connect the networking instances + let group: Arc, ::SignatureKey>> = + MasterMap::new(); + trace!(?group); + let pub_key_1 = get_pubkey(); + let network1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + let pub_key_2 = get_pubkey(); + let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + + let first_messages: Vec> = gen_messages(5, 100, pub_key_1); + + // Test 1 -> 2 + // Send messages + for sent_message in first_messages { + network1 + .broadcast_message( + sent_message.clone(), + vec![pub_key_2].into_iter().collect::>(), + ) + .await + .expect("Failed to message node"); + let mut recv_messages = network2 + .recv_msgs(TransmitType::Broadcast) + .await + .expect("Failed to receive message"); + let recv_message = recv_messages.pop().unwrap(); + assert!(recv_messages.is_empty()); + fake_message_eq(sent_message, recv_message); + } + + let second_messages: Vec> = gen_messages(5, 200, pub_key_2); + + // Test 2 -> 1 + // Send messages + for sent_message in second_messages { + network2 + .broadcast_message( + sent_message.clone(), + vec![pub_key_1].into_iter().collect::>(), + ) + .await + .expect("Failed to message node"); + let mut recv_messages = network1 + .recv_msgs(TransmitType::Broadcast) + .await + .expect("Failed to receive message"); + let recv_message = recv_messages.pop().unwrap(); + assert!(recv_messages.is_empty()); + fake_message_eq(sent_message, recv_message); + } +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +#[allow(deprecated)] +async fn memory_network_test_in_flight_message_count() { + setup_logging(); + + let group: Arc, ::SignatureKey>> = + MasterMap::new(); + trace!(?group); + let pub_key_1 = get_pubkey(); + let network1 = MemoryNetwork::new(pub_key_1, NoMetrics::boxed(), group.clone(), Option::None); + let pub_key_2 = get_pubkey(); + let network2 = MemoryNetwork::new(pub_key_2, NoMetrics::boxed(), group, Option::None); + + // Create some dummy messages + let messages: Vec> = gen_messages(5, 100, pub_key_1); + let broadcast_recipients = BTreeSet::from([pub_key_1, pub_key_2]); + + assert_eq!(network1.in_flight_message_count(), Some(0)); + assert_eq!(network2.in_flight_message_count(), Some(0)); + + for (count, message) in messages.iter().enumerate() { + network1 + .direct_message(message.clone(), pub_key_2) + .await + .unwrap(); + // network 2 has received `count` broadcast messages and `count + 1` direct messages + assert_eq!(network2.in_flight_message_count(), Some(count + count + 1)); + + network2 + .broadcast_message(message.clone(), broadcast_recipients.clone()) + .await + .unwrap(); + // network 1 has received `count` broadcast messages + assert_eq!(network1.in_flight_message_count(), Some(count + 1)); + + // network 2 has received `count + 1` broadcast messages and `count + 1` direct messages + assert_eq!(network2.in_flight_message_count(), Some((count + 1) * 2)); + } + + while network1.in_flight_message_count().unwrap() > 0 { + network1.recv_msgs(TransmitType::Broadcast).await.unwrap(); + } + + while network2.in_flight_message_count().unwrap() > messages.len() { + network2.recv_msgs(TransmitType::Direct).await.unwrap(); + } + + while network2.in_flight_message_count().unwrap() > 0 { + network2.recv_msgs(TransmitType::Broadcast).await.unwrap(); + } + + assert_eq!(network1.in_flight_message_count(), Some(0)); + assert_eq!(network2.in_flight_message_count(), Some(0)); +} diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index aa0c80024f..c4165a8c31 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -3,10 +3,10 @@ use hotshot::HotShotSequencingConsensusApi; use hotshot_task_impls::events::SequencingHotShotEvent; use hotshot_testing::{ node_types::{SequencingMemoryImpl, SequencingTestTypes}, - task_helpers::build_quorum_proposal, + task_helpers::{build_quorum_proposal, vid_init}, }; use hotshot_types::{ - data::{DAProposal, ViewNumber}, + data::{DAProposal, VidSchemeTrait, ViewNumber}, traits::{ consensus_api::ConsensusSharedApi, node_implementation::ExchangesType, state::ConsensusTime, }, @@ -21,10 +21,14 @@ use std::collections::HashMap; #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[ignore] async fn test_network_task() { - use hotshot::demos::sdemo::{SDemoBlock, SDemoNormalBlock}; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{message::Proposal, traits::election::CommitteeExchangeType}; + use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction}, + data::VidDisperse, + message::Proposal, + traits::election::CommitteeExchangeType, + }; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -38,12 +42,15 @@ async fn test_network_task() { let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); let priv_key = api.private_key(); - let block = SDemoBlock::Normal(SDemoNormalBlock { - previous_state: (), - transactions: Vec::new(), - }); - let block_commitment = block.commit(); - let signature = committee_exchange.sign_da_proposal(&block_commitment); + let vid = vid_init(); + let txn = vec![0u8]; + let vid_disperse = vid.disperse(&txn).unwrap(); + let block_commitment = vid_disperse.commit; + let block = VIDBlockPayload { + transactions: vec![VIDTransaction(txn)], + commitment: block_commitment, + }; + let signature = committee_exchange.sign_da_proposal(&block.commit()); let da_proposal = Proposal { data: DAProposal { deltas: block.clone(), @@ -52,16 +59,35 @@ async fn test_network_task() { signature, }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; + // TODO for now reuse the same block commitment and signature as DA committee + // https://github.com/EspressoSystems/jellyfish/issues/369 + let da_vid_disperse = Proposal { + data: VidDisperse { + view_number: da_proposal.data.view_number, + commitment: block.commit(), + shares: vid_disperse.shares, + common: vid_disperse.common, + }, + signature: da_proposal.signature.clone(), + }; // Every event input is seen on the event stream in the output. let mut input = Vec::new(); let mut output = HashMap::new(); input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(SequencingHotShotEvent::BlockReady( + block.clone(), + ViewNumber::new(2), + )); input.push(SequencingHotShotEvent::DAProposalSend( da_proposal.clone(), pub_key, )); + input.push(SequencingHotShotEvent::VidDisperseSend( + da_vid_disperse.clone(), + pub_key, + )); input.push(SequencingHotShotEvent::QuorumProposalSend( quorum_proposal.clone(), pub_key, @@ -70,11 +96,25 @@ async fn test_network_task() { input.push(SequencingHotShotEvent::Shutdown); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 2); - // One output from the input, the other from the DA task. output.insert( SequencingHotShotEvent::DAProposalSend(da_proposal.clone(), pub_key), + 2, // 2 occurrences: 1 from `input`, 1 from the DA task + ); + output.insert( + SequencingHotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), 2, ); + output.insert( + SequencingHotShotEvent::VidDisperseRecv(da_vid_disperse.clone(), pub_key), + 1, + ); + output.insert( + SequencingHotShotEvent::VidDisperseSend(da_vid_disperse, pub_key), + 2, // 2 occurrences: 1 from `input`, 1 from the DA task + ); + output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(1)), 1); + output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(2)), 1); + // Only one output from the input. // The consensus task will fail to send a second proposal, like the DA task does, due to the // view number check in `publish_proposal_if_able` in consensus.rs, and we will see an error in diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 7ebeddd577..f8963c9d52 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -30,7 +30,7 @@ async fn test_timeout() { metadata.timing_data = timing_data; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), dead_nodes)], + node_changes: vec![(Duration::new(0, 5000), dead_nodes)], }; // TODO ED Add safety task, etc to confirm TCs are being formed diff --git a/types/Cargo.toml b/types/Cargo.toml index f65eac0983..c0eb7a11e0 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -8,16 +8,17 @@ version = "0.1.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -demo = ["ed25519-compact"] +demo = [] [dependencies] arbitrary = { version = "1.3", features = ["derive"] } +ark-bls12-381 = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } ark-serialize = { version = "0.3", features = [ "derive", -] } # TODO GG upgrade to 0.4 and inherit this dep from workspace +] } # TODO upgrade to 0.4 and inherit this dep from workspace https://github.com/EspressoSystems/HotShot/issues/1700 ark-std = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } @@ -27,30 +28,31 @@ custom_debug = { workspace = true } derivative = "2.2.0" digest = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } -ed25519-compact = { version = "2.0.4", optional = true } either = { workspace = true, features = ["serde"] } espresso-systems-common = { workspace = true } futures = { workspace = true } -generic-array = "0.14.7" +generic-array = { workspace = true } hex_fmt = "0.3.0" +hotshot-constants = { path = "../constants" } hotshot-utils = { path = "../utils" } hotshot-task = { path = "../task", default-features = false } -jf-primitives = { workspace = true } -nll = { workspace = true } +jf-primitives = { workspace = true, features = ["test-srs"] } +jf-utils = { workspace = true } libp2p-networking = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } +sha2 = { workspace = true } +sha3 = "^0.10" snafu = { workspace = true } tagged-base64 = { git = "https://github.com/EspressoSystems/tagged-base64", tag = "0.2.4" } time = { workspace = true } tracing = { workspace = true } ethereum-types = { workspace = true } -bit-vec = "0.6.3" typenum = { workspace = true } [dev-dependencies] -serde_json = "1.0.106" +serde_json = "1.0.107" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs new file mode 100644 index 0000000000..2360cb3a24 --- /dev/null +++ b/types/src/block_impl.rs @@ -0,0 +1,127 @@ +//! This module provides an implementation of the `HotShot` suite of traits. +use std::{ + collections::HashSet, + fmt::{Debug, Display}, +}; + +use crate::{ + data::{test_srs, VidScheme, VidSchemeTrait}, + traits::{block_contents::Transaction, state::TestableBlock, BlockPayload}, +}; +use commit::{Commitment, Committable}; +use serde::{Deserialize, Serialize}; +use sha3::{Digest, Keccak256}; +use snafu::Snafu; + +// TODO +/// Number of storage nodes for VID initiation. +pub const NUM_STORAGE_NODES: usize = 10; +// TODO +/// Number of chunks for VID initiation. +pub const NUM_CHUNKS: usize = 5; + +/// The transaction in a [`VIDBlockPayload`]. +#[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct VIDTransaction(pub Vec); + +impl Committable for VIDTransaction { + fn commit(&self) -> Commitment { + let builder = commit::RawCommitmentBuilder::new("Txn Comm"); + let mut hasher = Keccak256::new(); + hasher.update(&self.0); + let generic_array = hasher.finalize(); + builder.generic_byte_array(&generic_array).finalize() + } + + fn tag() -> String { + "SEQUENCING_TXN".to_string() + } +} + +impl Transaction for VIDTransaction {} + +/// The error type for block payload. +#[derive(Snafu, Debug)] +pub enum BlockPayloadError { + /// Previous state commitment does not match + PreviousStateMismatch, + /// Nonce was reused + ReusedTxn, + /// Genesis failure + GenesisFailed, + /// Genesis reencountered after initialization + GenesisAfterStart, + /// invalid block + InvalidBlock, +} + +/// A [`BlockPayload`] that contains a list of `VIDTransaction`. +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct VIDBlockPayload { + /// List of transactions. + pub transactions: Vec, + /// VID commitment. + pub commitment: ::Commit, +} + +impl VIDBlockPayload { + /// Create a genesis block payload with transaction bytes `vec![0]`, to be used for + /// consensus task initiation. + /// # Panics + /// If the `VidScheme` construction fails. + #[must_use] + pub fn genesis() -> Self { + // TODO + let srs = test_srs(NUM_STORAGE_NODES); + // TODO We are using constant numbers for now, but they will change as the quorum size + // changes. + // TODO + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + let txn = vec![0]; + let vid_disperse = vid.disperse(&txn).unwrap(); + VIDBlockPayload { + transactions: vec![VIDTransaction(txn)], + commitment: vid_disperse.commit, + } + } +} + +impl Committable for VIDBlockPayload { + fn commit(&self) -> Commitment { + let builder = commit::RawCommitmentBuilder::new("BlockPayload Comm"); + builder.generic_byte_array(&self.commitment).finalize() + } + + fn tag() -> String { + "VID_BLOCK_PAYLOAD".to_string() + } +} + +impl Display for VIDBlockPayload { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "BlockPayload #txns={}", self.transactions.len()) + } +} + +impl TestableBlock for VIDBlockPayload { + fn genesis() -> Self { + Self::genesis() + } + + fn txn_count(&self) -> u64 { + self.transactions.len() as u64 + } +} + +impl BlockPayload for VIDBlockPayload { + type Error = BlockPayloadError; + + type Transaction = VIDTransaction; + + fn contained_transactions(&self) -> HashSet> { + self.transactions + .iter() + .map(commit::Committable::commit) + .collect() + } +} diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 47720eba75..09c3235744 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -1,22 +1,29 @@ //! Provides two types of cerrtificates and their accumulators. +use crate::vote::DAVoteAccumulator; +use crate::vote::QuorumVote; +use crate::vote::QuorumVoteAccumulator; +use crate::vote::ViewSyncVoteAccumulator; +use crate::vote::VoteType; use crate::{ - data::{fake_commitment, serialize_signature, LeafType}, + data::serialize_signature, traits::{ election::{SignedCertificate, VoteData, VoteToken}, node_implementation::NodeType, signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, state::ConsensusTime, }, - vote::ViewSyncData, + vote::{DAVote, ViewSyncData, ViewSyncVote}, }; use bincode::Options; -use commit::{Commitment, Committable}; +use commit::{Commitment, CommitmentBounds, Committable}; + use espresso_systems_common::hotshot::tag; use hotshot_utils::bincode::bincode_opts; use serde::{Deserialize, Serialize}; use std::{ fmt::{self, Debug, Display, Formatter}, + hash::Hash, ops::Deref, }; use tracing::debug; @@ -43,12 +50,12 @@ pub struct DACertificate { /// /// A Quorum Certificate is a threshold signature of the `Leaf` being proposed, as well as some /// metadata, such as the `Stage` of consensus the quorum certificate was generated during. -#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash)] +#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash, Eq)] #[serde(bound(deserialize = ""))] -pub struct QuorumCertificate> { +pub struct QuorumCertificate { /// commitment to previous leaf #[debug(skip)] - pub leaf_commitment: Commitment, + pub leaf_commitment: COMMITMENT, /// Which view this QC relates to pub view_number: TYPES::Time, /// assembled signature for certificate aggregation @@ -57,7 +64,9 @@ pub struct QuorumCertificate> pub is_genesis: bool, } -impl> Display for QuorumCertificate { +impl Display + for QuorumCertificate +{ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, @@ -132,15 +141,15 @@ pub enum AssembledSignature { } /// Data from a vote needed to accumulate into a `SignedCertificate` -pub struct VoteMetaData { +pub struct VoteMetaData { /// Voter's public key pub encoded_key: EncodedPublicKey, /// Votes signature pub encoded_signature: EncodedSignature, /// Commitment to what's voted on. E.g. the leaf for a `QuorumCertificate` - pub commitment: Commitment, + pub commitment: COMMITMENT, /// Data of the vote, yes, no, timeout, or DA - pub data: VoteData, + pub data: VoteData, /// The votes's token pub vote_token: T, /// View number for the vote @@ -150,19 +159,26 @@ pub struct VoteMetaData, } -impl> - SignedCertificate - for QuorumCertificate +impl + SignedCertificate + for QuorumCertificate { + type Vote = QuorumVote; + type VoteAccumulator = QuorumVoteAccumulator; + fn from_signatures_and_commitment( - view_number: TYPES::Time, signatures: AssembledSignature, - commit: Commitment, - _relay: Option, + vote: Self::Vote, ) -> Self { + let leaf_commitment = match vote.clone() { + QuorumVote::Yes(vote_internal) | QuorumVote::No(vote_internal) => { + vote_internal.leaf_commitment + } + QuorumVote::Timeout(_) => unimplemented!(), + }; let qc = QuorumCertificate { - leaf_commitment: commit, - view_number, + leaf_commitment, + view_number: vote.get_view(), signatures, is_genesis: false, }; @@ -178,11 +194,11 @@ impl> self.signatures.clone() } - fn leaf_commitment(&self) -> Commitment { + fn leaf_commitment(&self) -> COMMITMENT { self.leaf_commitment } - fn set_leaf_commitment(&mut self, commitment: Commitment) { + fn set_leaf_commitment(&mut self, commitment: COMMITMENT) { self.leaf_commitment = commitment; } @@ -191,8 +207,9 @@ impl> } fn genesis() -> Self { + // TODO GG need a new way to get fake commit now that we don't have Committable Self { - leaf_commitment: fake_commitment::(), + leaf_commitment: COMMITMENT::default_commitment_no_preimage(), view_number: ::genesis(), signatures: AssembledSignature::Genesis(), is_genesis: true, @@ -200,16 +217,14 @@ impl> } } -impl> Eq for QuorumCertificate {} - -impl> Committable - for QuorumCertificate +impl Committable + for QuorumCertificate { fn commit(&self) -> Commitment { let signatures_bytes = serialize_signature(&self.signatures); commit::RawCommitmentBuilder::new("Quorum Certificate Commitment") - .field("leaf commitment", self.leaf_commitment) + .var_size_field("leaf commitment", self.leaf_commitment.as_ref()) .u64_field("view number", *self.view_number.deref()) .constant_str("justify_qc signatures") .var_size_bytes(&signatures_bytes) @@ -221,19 +236,21 @@ impl> Committable } } -impl SignedCertificate +impl + SignedCertificate> for DACertificate { + type Vote = DAVote; + type VoteAccumulator = DAVoteAccumulator, Self::Vote>; + fn from_signatures_and_commitment( - view_number: TYPES::Time, signatures: AssembledSignature, - commit: Commitment, - _relay: Option, + vote: Self::Vote, ) -> Self { DACertificate { - view_number, + view_number: vote.get_view(), signatures, - block_commitment: commit, + block_commitment: vote.block_commitment, } } @@ -309,19 +326,20 @@ impl Committable for ViewSyncCertificate { } impl - SignedCertificate> + SignedCertificate>> for ViewSyncCertificate { + type Vote = ViewSyncVote; + type VoteAccumulator = + ViewSyncVoteAccumulator>, Self::Vote>; /// Build a QC from the threshold signature and commitment fn from_signatures_and_commitment( - view_number: TYPES::Time, signatures: AssembledSignature, - _commit: Commitment>, - relay: Option, + vote: Self::Vote, ) -> Self { let certificate_internal = ViewSyncCertificateInternal { - round: view_number, - relay: relay.unwrap(), + round: vote.get_view(), + relay: vote.relay(), signatures: signatures.clone(), }; match signatures { diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a29c840056..e10900aa7e 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -2,8 +2,6 @@ pub use crate::traits::node_implementation::ViewQueue; pub use crate::utils::{View, ViewInner}; -use async_compatibility_layer::async_primitives::subscribable_rwlock::SubscribableRwLock; -use std::collections::HashSet; use crate::utils::Terminator; use crate::{ @@ -41,12 +39,6 @@ pub struct Consensus> { /// last view had a successful decide event pub last_decided_view: TYPES::Time, - /// A list of undecided transactions - pub transactions: Arc>>, - - /// A list of transactions we've seen decided, but didn't receive - pub seen_transactions: HashSet>, - /// Map of leaf hash -> leaf /// - contains undecided leaves /// - includes the MOST RECENT decided leaf @@ -61,7 +53,7 @@ pub struct Consensus> { pub locked_view: TYPES::Time, /// the highqc per spec - pub high_qc: QuorumCertificate, + pub high_qc: QuorumCertificate>, /// A reference to the metrics trait #[debug(skip)] @@ -264,12 +256,6 @@ impl> Consensus { self.state_map = self.state_map.split_off(&new_anchor_view); } - /// return a clone of the internal storage of unclaimed transactions - #[must_use] - pub fn get_transactions(&self) -> Arc>> { - self.transactions.clone() - } - /// Gets the last decided state /// # Panics /// if the last decided view's state does not exist in the state map diff --git a/types/src/constants.rs b/types/src/constants.rs deleted file mode 100644 index 26acfb1abc..0000000000 --- a/types/src/constants.rs +++ /dev/null @@ -1,14 +0,0 @@ -//! configurable constants for hotshot - -use crate::traits::signature_key::EncodedPublicKey; - -/// the number of views to gather information for ahead of time -pub const LOOK_AHEAD: u64 = 5; - -/// the genesis proposer pk -/// unfortunately need to allocate on the heap (for vec), so this ends up as a function instead of a -/// const -#[must_use] -pub fn genesis_proposer_id() -> EncodedPublicKey { - EncodedPublicKey(vec![4, 2]) -} diff --git a/types/src/data.rs b/types/src/data.rs index 68fc0ec064..44a1937161 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -8,23 +8,24 @@ use crate::{ AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCertificate, }, - constants::genesis_proposer_id, traits::{ - election::SignedCertificate, node_implementation::NodeType, signature_key::{EncodedPublicKey, SignatureKey}, state::{ConsensusTime, TestableBlock, TestableState}, storage::StoredView, - Block, State, + BlockPayload, State, }, }; +use ark_bls12_381::Bls12_381; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; use bincode::Options; use commit::{Commitment, Committable}; use derivative::Derivative; use either::Either; use espresso_systems_common::hotshot::tag; +use hotshot_constants::GENESIS_PROPOSER_ID; use hotshot_utils::bincode::bincode_opts; +use jf_primitives::pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; use snafu::{ensure, Snafu}; @@ -100,8 +101,14 @@ impl std::ops::Sub for ViewNumber { } } +/// Generate the genesis block proposer ID from the defined constant +#[must_use] +pub fn genesis_proposer_id() -> EncodedPublicKey { + EncodedPublicKey(GENESIS_PROPOSER_ID.to_vec()) +} + /// The `Transaction` type associated with a `State`, as a syntactic shortcut -pub type Transaction = <::BlockType as Block>::Transaction; +pub type Transaction = <::BlockType as BlockPayload>::Transaction; /// `Commitment` to the `Transaction` type associated with a `State`, as a syntactic shortcut pub type TxnCommitment = Commitment>; @@ -124,21 +131,21 @@ where pub height: u64, /// Per spec, justification - pub justify_qc: QuorumCertificate, + pub justify_qc: QuorumCertificate>, /// The hash of the parent `Leaf` /// So we can ask if it extends #[debug(skip)] pub parent_commitment: Commitment, - /// Block leaf wants to apply + /// BlockPayload leaf wants to apply pub deltas: TYPES::BlockType, /// What the state should be after applying `self.deltas` pub state_commitment: Commitment, /// Transactions that were marked for rejection while collecting deltas - pub rejected: Vec<::Transaction>, + pub rejected: Vec<::Transaction>, /// the propser id pub proposer_id: EncodedPublicKey, @@ -147,12 +154,50 @@ where /// A proposal to start providing data availability for a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct DAProposal { - /// Block leaf wants to apply + /// BlockPayload leaf wants to apply pub deltas: TYPES::BlockType, /// View this proposal applies to pub view_number: TYPES::Time, } +/// The VID scheme type used in `HotShot`. +pub type VidScheme = jf_primitives::vid::advz::Advz; +pub use jf_primitives::vid::VidScheme as VidSchemeTrait; + +/// VID dispersal data +/// +/// Like [`DAProposal`]. +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +pub struct VidDisperse { + /// The view number for which this VID data is intended + pub view_number: TYPES::Time, + /// Block commitment + pub commitment: Commitment, + /// VID shares dispersed among storage nodes + pub shares: Vec<::Share>, + /// VID common data sent to all storage nodes + pub common: ::Common, +} + +/// Trusted KZG setup for VID. +/// +/// TESTING ONLY: don't use this in production +/// TODO +/// +/// # Panics +/// ...because this is only for tests. This comment exists to pacify clippy. +#[must_use] +pub fn test_srs( + num_storage_nodes: usize, +) -> as PolynomialCommitmentScheme>::SRS { + let mut rng = jf_utils::test_rng(); + UnivariateKzgPCS::::gen_srs_for_testing( + &mut rng, + checked_fft_size(num_storage_nodes).unwrap(), + ) + .unwrap() +} + /// Proposal to append a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound(deserialize = ""))] @@ -167,7 +212,7 @@ pub struct QuorumProposal> { pub height: u64, /// Per spec, justification - pub justify_qc: QuorumCertificate, + pub justify_qc: QuorumCertificate>, /// Possible timeout certificate. Only present if the justify_qc is not for the preceding view pub timeout_certificate: Option>, @@ -196,6 +241,13 @@ impl ProposalType for DAProposal { } } +impl ProposalType for VidDisperse { + type NodeType = TYPES; + fn get_view_number(&self) -> ::Time { + self.view_number + } +} + impl> ProposalType for QuorumProposal { @@ -233,14 +285,14 @@ pub trait ProposalType: /// full. It is guaranteed to contain, at least, a cryptographic commitment to the block, and it /// provides an interface for resolving the commitment to a full block if the full block is /// available. -pub trait DeltasType: +pub trait DeltasType: Clone + Debug + for<'a> Deserialize<'a> + PartialEq + Eq + std::hash::Hash + Send + Serialize + Sync { /// Errors reported by this type. type Error: std::error::Error; /// Get a cryptographic commitment to the block represented by this delta. - fn block_commitment(&self) -> Commitment; + fn block_commitment(&self) -> Commitment; /// Get the full block if it is available, otherwise return this object unchanged. /// @@ -248,7 +300,7 @@ pub trait DeltasType: /// /// Returns the original [`DeltasType`], unchanged, in an [`Err`] variant in the case where the /// full block is not currently available. - fn try_resolve(self) -> Result; + fn try_resolve(self) -> Result; /// Fill this [`DeltasType`] by providing a complete block. /// @@ -259,7 +311,7 @@ pub trait DeltasType: /// /// Fails if `block` does not match `self.block_commitment()`, or if the block is not able to be /// stored for some implementation-defined reason. - fn fill(&mut self, block: Block) -> Result<(), Self::Error>; + fn fill(&mut self, block: BlockPayload) -> Result<(), Self::Error>; } /// Error which occurs when [`DeltasType::fill`] is called with a block that does not match the @@ -389,7 +441,7 @@ pub trait LeafType: /// Create a new leaf from its components. fn new( view_number: LeafTime, - justify_qc: QuorumCertificate, + justify_qc: QuorumCertificate>, deltas: LeafBlock, state: LeafState, ) -> Self; @@ -402,7 +454,7 @@ pub trait LeafType: /// Change the height of this leaf. fn set_height(&mut self, height: u64); /// The QC linking this leaf to its parent in the chain. - fn get_justify_qc(&self) -> QuorumCertificate; + fn get_justify_qc(&self) -> QuorumCertificate>; /// Commitment to this leaf's parent. fn get_parent_commitment(&self) -> Commitment; /// The block contained in this leaf. @@ -442,10 +494,10 @@ pub type LeafDeltasError = as DeltasType pub type LeafNode = ::NodeType; /// The [`StateType`] in a [`LeafType`]. pub type LeafState = as NodeType>::StateType; -/// The [`Block`] in a [`LeafType`]. +/// The [`BlockPayload`] in a [`LeafType`]. pub type LeafBlock = as NodeType>::BlockType; /// The [`Transaction`] in a [`LeafType`]. -pub type LeafTransaction = as Block>::Transaction; +pub type LeafTransaction = as BlockPayload>::Transaction; /// The [`ConsensusTime`] used by a [`LeafType`]. pub type LeafTime = as NodeType>::Time; @@ -459,12 +511,12 @@ pub trait TestableLeaf { &self, rng: &mut dyn rand::RngCore, padding: u64, - ) -> <::BlockType as Block>::Transaction; + ) -> <::BlockType as BlockPayload>::Transaction; } /// This is the consensus-internal analogous concept to a block, and it contains the block proper, /// as well as the hash of its parent `Leaf`. -/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::Block` +/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::BlockPayload` #[derive(Serialize, Deserialize, Clone, Debug, Derivative)] #[serde(bound(deserialize = ""))] #[derivative(Hash, PartialEq, Eq)] @@ -476,20 +528,20 @@ pub struct ValidatingLeaf { pub height: u64, /// Per spec, justification - pub justify_qc: QuorumCertificate, + pub justify_qc: QuorumCertificate>, /// The hash of the parent `Leaf` /// So we can ask if it extends - pub parent_commitment: Commitment>, + pub parent_commitment: Commitment, - /// Block leaf wants to apply + /// BlockPayload leaf wants to apply pub deltas: TYPES::BlockType, /// What the state should be AFTER applying `self.deltas` pub state: TYPES::StateType, /// Transactions that were marked for rejection while collecting deltas - pub rejected: Vec<::Transaction>, + pub rejected: Vec<::Transaction>, /// the timestamp the leaf was constructed at, in nanoseconds. Only exposed for dashboard stats #[derivative(PartialEq = "ignore")] @@ -504,7 +556,7 @@ pub struct ValidatingLeaf { /// This is the consensus-internal analogous concept to a block, and it contains the block proper, /// as well as the hash of its parent `Leaf`. -/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::Block` +/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::BlockPayload` #[derive(Serialize, Deserialize, Clone, Debug, Derivative, Eq)] #[serde(bound(deserialize = ""))] pub struct SequencingLeaf { @@ -515,17 +567,17 @@ pub struct SequencingLeaf { pub height: u64, /// Per spec, justification - pub justify_qc: QuorumCertificate, + pub justify_qc: QuorumCertificate>, /// The hash of the parent `SequencingLeaf` /// So we can ask if it extends - pub parent_commitment: Commitment>, + pub parent_commitment: Commitment, /// The block or block commitment to be applied pub deltas: Either>, /// Transactions that were marked for rejection while collecting deltas - pub rejected: Vec<::Transaction>, + pub rejected: Vec<::Transaction>, /// the timestamp the leaf was constructed at, in nanoseconds. Only exposed for dashboard stats pub timestamp: i128, @@ -589,7 +641,7 @@ impl LeafType for ValidatingLeaf { fn new( view_number: ::Time, - justify_qc: QuorumCertificate, + justify_qc: QuorumCertificate>, deltas: ::BlockType, state: ::StateType, ) -> Self { @@ -618,7 +670,7 @@ impl LeafType for ValidatingLeaf { self.height = height; } - fn get_justify_qc(&self) -> QuorumCertificate { + fn get_justify_qc(&self) -> QuorumCertificate> { self.justify_qc.clone() } @@ -642,7 +694,7 @@ impl LeafType for ValidatingLeaf { self.state.clone() } - fn get_rejected(&self) -> Vec<::Transaction> { + fn get_rejected(&self) -> Vec<::Transaction> { self.rejected.clone() } @@ -680,7 +732,7 @@ where &self, rng: &mut dyn rand::RngCore, padding: u64, - ) -> <::BlockType as Block>::Transaction { + ) -> <::BlockType as BlockPayload>::Transaction { ::create_random_transaction( Some(&self.state), rng, @@ -706,7 +758,7 @@ impl LeafType for SequencingLeaf { fn new( view_number: ::Time, - justify_qc: QuorumCertificate, + justify_qc: QuorumCertificate>, deltas: ::BlockType, _state: ::StateType, ) -> Self { @@ -734,7 +786,7 @@ impl LeafType for SequencingLeaf { self.height = height; } - fn get_justify_qc(&self) -> QuorumCertificate { + fn get_justify_qc(&self) -> QuorumCertificate> { self.justify_qc.clone() } @@ -757,7 +809,7 @@ impl LeafType for SequencingLeaf { // The Sequencing Leaf doesn't have a state. fn get_state(&self) -> Self::MaybeState {} - fn get_rejected(&self) -> Vec<::Transaction> { + fn get_rejected(&self) -> Vec<::Transaction> { self.rejected.clone() } @@ -794,7 +846,7 @@ where &self, rng: &mut dyn rand::RngCore, padding: u64, - ) -> <::BlockType as Block>::Transaction { + ) -> <::BlockType as BlockPayload>::Transaction { TYPES::StateType::create_random_transaction(None, rng, padding) } } @@ -879,7 +931,7 @@ impl Committable for ValidatingLeaf { .u64(*self.justify_qc.view_number) .field( "justify_qc leaf commitment", - self.justify_qc.leaf_commitment(), + self.justify_qc.leaf_commitment, ) .constant_str("justify_qc signatures") .var_size_bytes(&signatures_bytes) @@ -911,7 +963,7 @@ impl Committable for SequencingLeaf { .u64(*self.justify_qc.view_number) .field( "justify_qc leaf commitment", - self.justify_qc.leaf_commitment(), + self.justify_qc.leaf_commitment, ) .constant_str("justify_qc signatures") .var_size_bytes(&signatures_bytes) diff --git a/types/src/event.rs b/types/src/event.rs index f33a39f8cd..5d091cd400 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -4,6 +4,7 @@ use crate::{ certificate::QuorumCertificate, data::LeafType, error::HotShotError, traits::node_implementation::NodeType, }; +use commit::Commitment; use std::sync::Arc; /// A status event emitted by a `HotShot` instance /// @@ -42,7 +43,7 @@ pub enum EventType> { /// /// Note that the QC for each additional leaf in the chain can be obtained from the leaf /// before it using - qc: Arc>, + qc: Arc>>, /// Optional information of the number of transactions in the block, for logging purposes. block_size: Option, }, diff --git a/types/src/lib.rs b/types/src/lib.rs index e574bef876..54739106c3 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -11,9 +11,9 @@ use std::{num::NonZeroUsize, time::Duration}; +pub mod block_impl; pub mod certificate; pub mod consensus; -pub mod constants; pub mod data; pub mod error; pub mod event; @@ -36,7 +36,7 @@ pub enum ExecutionType { /// Holds configuration for a `HotShot` #[derive(Clone, custom_debug::Debug, serde::Serialize, serde::Deserialize)] -pub struct HotShotConfig { +pub struct HotShotConfig { /// Whether to run one view or continuous views pub execution_type: ExecutionType, /// Total number of nodes in the network @@ -45,8 +45,6 @@ pub struct HotShotConfig { pub min_transactions: usize, /// Maximum transactions per block pub max_transactions: NonZeroUsize, - /// List of known node's public keys, including own, sorted by nonce () - pub known_nodes: Vec, /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter pub known_nodes_with_stake: Vec, /// List of DA committee nodes for static DA committe diff --git a/types/src/message.rs b/types/src/message.rs index 89d2864aa9..c26de8bcef 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -5,7 +5,7 @@ use crate::{ certificate::DACertificate, - data::{DAProposal, ProposalType}, + data::{DAProposal, ProposalType, VidDisperse}, traits::{ network::{NetworkMsg, ViewMessage}, node_implementation::{ @@ -15,6 +15,7 @@ use crate::{ }, vote::{DAVote, QuorumVote, ViewSyncVote, VoteType}, }; +use commit::Commitment; use derivative::Derivative; use either::Either::{self, Left, Right}; use serde::{Deserialize, Serialize}; @@ -52,11 +53,13 @@ impl> ViewMessage for Messa #[derive(Clone, Debug)] pub struct Messages>(pub Vec>); -/// A message type agnostic description of a messages purpose +/// A message type agnostic description of a message's purpose #[derive(PartialEq, Copy, Clone)] pub enum MessagePurpose { /// Message with a quorum proposal. Proposal, + /// Message with most recent proposal the server has + CurrentProposal, /// Message with a quorum vote. Vote, /// Message with a view sync vote. @@ -69,6 +72,12 @@ pub enum MessagePurpose { Internal, /// Data message Data, + /// VID disperse, like [`Proposal`]. + VidDisperse, + /// VID vote, like [`Vote`]. + VidVote, + /// VID certificate, like [`DAC`]. + VidCert, } // TODO (da) make it more customized to the consensus layer, maybe separating the specific message @@ -141,7 +150,7 @@ where /// Message with a quorum proposal. Proposal(Proposal>, TYPES::SignatureKey), /// Message with a quorum vote. - Vote(QuorumVote, TYPES::SignatureKey), + Vote(QuorumVote>, TYPES::SignatureKey), /// Message with a view sync vote. ViewSyncVote(ViewSyncVote), /// Message with a view sync certificate. @@ -207,6 +216,12 @@ pub enum ProcessedCommitteeConsensusMessage { DAVote(DAVote, TYPES::SignatureKey), /// Certificate for the DA. DACertificate(DACertificate, TYPES::SignatureKey), + /// VID dispersal data. Like [`DAProposal`] + VidDisperseMsg(Proposal>, TYPES::SignatureKey), + /// Vote from VID storage node. Like [`DAVote`] + VidVote(DAVote, TYPES::SignatureKey), + /// Certificate for VID. Like [`DACertificate`] + VidCertificate(DACertificate, TYPES::SignatureKey), } impl From> @@ -223,6 +238,15 @@ impl From> ProcessedCommitteeConsensusMessage::DACertificate(cert, _) => { CommitteeConsensusMessage::DACertificate(cert) } + ProcessedCommitteeConsensusMessage::VidDisperseMsg(disperse, _) => { + CommitteeConsensusMessage::VidDisperseMsg(disperse) + } + ProcessedCommitteeConsensusMessage::VidVote(v, _) => { + CommitteeConsensusMessage::VidVote(v) + } + ProcessedCommitteeConsensusMessage::VidCertificate(cert, _) => { + CommitteeConsensusMessage::VidCertificate(cert) + } } } } @@ -240,6 +264,15 @@ impl ProcessedCommitteeConsensusMessage { CommitteeConsensusMessage::DACertificate(cert) => { ProcessedCommitteeConsensusMessage::DACertificate(cert, sender) } + CommitteeConsensusMessage::VidDisperseMsg(disperse) => { + ProcessedCommitteeConsensusMessage::VidDisperseMsg(disperse, sender) + } + CommitteeConsensusMessage::VidVote(v) => { + ProcessedCommitteeConsensusMessage::VidVote(v, sender) + } + CommitteeConsensusMessage::VidCertificate(cert) => { + ProcessedCommitteeConsensusMessage::VidCertificate(cert, sender) + } } } } @@ -282,7 +315,7 @@ where Proposal(Proposal>), /// Message with a quorum vote. - Vote(QuorumVote), + Vote(QuorumVote>), /// Message with a view sync vote. ViewSyncVote(ViewSyncVote), @@ -307,6 +340,23 @@ pub enum CommitteeConsensusMessage { /// Certificate data is available DACertificate(DACertificate), + + /// Initiate VID dispersal. + /// + /// Like [`DAProposal`]. Use `Msg` suffix to distinguish from [`VidDisperse`]. + /// TODO this variant should not be a [`CommitteeConsensusMessage`] because + VidDisperseMsg(Proposal>), + + /// Vote for VID disperse data + /// + /// Like [`DAVote`]. + /// TODO currently re-using [`DAVote`]; do we need a separate VID vote? + VidVote(DAVote), + /// VID certificate data is available + /// + /// Like [`DACertificate`] + /// TODO currently re-using [`DACertificate`]; do we need a separate VID cert? + VidCertificate(DACertificate), } /// Messages related to the consensus protocol. @@ -359,7 +409,7 @@ impl< // this should match replica upon receipt p.data.get_view_number() } - GeneralConsensusMessage::Vote(vote_message) => vote_message.current_view(), + GeneralConsensusMessage::Vote(vote_message) => vote_message.get_view(), GeneralConsensusMessage::InternalTrigger(trigger) => match trigger { InternalTrigger::Timeout(time) => *time, }, @@ -376,8 +426,13 @@ impl< // this should match replica upon receipt p.data.get_view_number() } - CommitteeConsensusMessage::DAVote(vote_message) => vote_message.current_view(), - CommitteeConsensusMessage::DACertificate(cert) => cert.view_number, + CommitteeConsensusMessage::DAVote(vote_message) => vote_message.get_view(), + CommitteeConsensusMessage::DACertificate(cert) + | CommitteeConsensusMessage::VidCertificate(cert) => cert.view_number, + CommitteeConsensusMessage::VidDisperseMsg(disperse) => { + disperse.data.get_view_number() + } + CommitteeConsensusMessage::VidVote(vote) => vote.get_view(), } } } @@ -397,7 +452,10 @@ impl< Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, CommitteeConsensusMessage::DAVote(_) => MessagePurpose::Vote, + CommitteeConsensusMessage::VidVote(_) => MessagePurpose::VidVote, CommitteeConsensusMessage::DACertificate(_) => MessagePurpose::DAC, + CommitteeConsensusMessage::VidDisperseMsg(_) => MessagePurpose::VidDisperse, + CommitteeConsensusMessage::VidCertificate(_) => todo!(), }, } } diff --git a/types/src/traits.rs b/types/src/traits.rs index c6a76acea3..9c25e5fcb8 100644 --- a/types/src/traits.rs +++ b/types/src/traits.rs @@ -11,5 +11,5 @@ pub mod stake_table; pub mod state; pub mod storage; -pub use block_contents::Block; +pub use block_contents::BlockPayload; pub use state::State; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index b7d9b4da44..2fc3383a14 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -1,10 +1,10 @@ //! Abstraction over the contents of a block //! -//! This module provides the [`Block`] trait, which describes the behaviors that a block is -//! expected to have. +//! This module provides the [`BlockPayload`] and [`BlockHeader`] traits, which describe the +//! behaviors that a block is expected to have. use commit::{Commitment, Committable}; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Serialize}; use std::{ collections::HashSet, @@ -13,16 +13,17 @@ use std::{ hash::Hash, }; +// TODO (Keyao) Determine whether we can refactor BlockPayload and Transaction from traits to structs. +// /// Abstraction over the full contents of a block /// -/// This trait encapsulates the behaviors that a block must have in order to be used by consensus: -/// * Must have a predefined error type ([`Block::Error`]) +/// This trait encapsulates the behaviors that the transactions of a block must have in order to be +/// used by consensus +/// * Must have a predefined error type ([`BlockPayload::Error`]) /// * Must have a transaction type that can be compared for equality, serialized and serialized, /// sent between threads, and can have a hash produced of it -/// * Must be able to be produced incrementally by appending transactions -/// ([`add_transaction_raw`](Block::add_transaction_raw)) /// * Must be hashable -pub trait Block: +pub trait BlockPayload: Serialize + Clone + Debug @@ -41,45 +42,32 @@ pub trait Block: /// The type of the transitions we are applying type Transaction: Transaction; - /// Construct an empty or genesis block. - fn new() -> Self; - - /// Attempts to add a transaction, returning an Error if it would result in a structurally - /// invalid block - /// - /// # Errors - /// - /// Should return an error if this transaction leads to an invalid block - fn add_transaction_raw(&self, tx: &Self::Transaction) - -> std::result::Result; - /// returns hashes of all the transactions in this block /// TODO make this ordered with a vec fn contained_transactions(&self) -> HashSet>; } -/// Commitment to a block, used by data availibity -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] -#[serde(bound(deserialize = ""), transparent)] -pub struct BlockCommitment(pub Commitment); - -/// Abstraction over any type of transaction. Used by [`Block`]. +// TODO (Keyao) Determine whether we can refactor BlockPayload and Transaction from traits to structs. +// +/// Abstraction over any type of transaction. Used by [`BlockPayload`]. pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash { } -/// Dummy implementation of `BlockContents` for unit tests +/// Dummy implementation of `BlockPayload` for unit tests pub mod dummy { use std::fmt::Display; - use super::{Block, Commitment, Committable, Debug, Hash, HashSet, Serialize}; + use super::{BlockPayload, Commitment, Committable, Debug, Hash, HashSet, Serialize}; use rand::Rng; use serde::Deserialize; pub use crate::traits::state::dummy::DummyState; use crate::traits::state::TestableBlock; + // TODO (Keyao) Investigate the use of DummyBlock. + // /// The dummy block #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)] pub struct DummyBlock { @@ -107,7 +95,7 @@ pub mod dummy { impl Committable for DummyTransaction { fn commit(&self) -> commit::Commitment { - commit::RawCommitmentBuilder::new("Dummy Block Comm") + commit::RawCommitmentBuilder::new("Dummy BlockPayload Comm") .u64_field("Dummy Field", 0) .finalize() } @@ -132,24 +120,11 @@ pub mod dummy { } } - impl Block for DummyBlock { + impl BlockPayload for DummyBlock { type Error = DummyError; type Transaction = DummyTransaction; - fn new() -> Self { - ::genesis() - } - - fn add_transaction_raw( - &self, - _tx: &Self::Transaction, - ) -> std::result::Result { - Ok(Self { - nonce: self.nonce + 1, - }) - } - fn contained_transactions(&self) -> HashSet> { HashSet::new() } @@ -167,7 +142,7 @@ pub mod dummy { impl Committable for DummyBlock { fn commit(&self) -> commit::Commitment { - commit::RawCommitmentBuilder::new("Dummy Block Comm") + commit::RawCommitmentBuilder::new("Dummy BlockPayload Comm") .u64_field("Nonce", self.nonce) .finalize() } diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index 2687107886..08716a3d73 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -2,7 +2,7 @@ use crate::{ certificate::QuorumCertificate, - data::{LeafType, ProposalType}, + data::LeafType, error::HotShotError, event::{Event, EventType}, message::{DataMessage, SequencingMessage}, @@ -12,10 +12,9 @@ use crate::{ signature_key::SignatureKey, storage::StorageError, }, - vote::VoteType, }; use async_trait::async_trait; - +use commit::Commitment; use std::{num::NonZeroUsize, sync::Arc, time::Duration}; /// The API that [`HotStuff`] needs to talk to the system, implemented for both validating and @@ -98,7 +97,7 @@ pub trait ConsensusSharedApi< &self, view_number: TYPES::Time, leaf_views: Vec, - decide_qc: QuorumCertificate, + decide_qc: QuorumCertificate>, ) { self.send_event(Event { view_number, @@ -130,27 +129,21 @@ pub trait SequencingConsensusApi< >: ConsensusSharedApi { /// Send a direct message to the given recipient - async fn send_direct_message, VOTE: VoteType>( + async fn send_direct_message( &self, recipient: TYPES::SignatureKey, message: SequencingMessage, ) -> std::result::Result<(), NetworkError>; /// send a direct message using the DA communication channel - async fn send_direct_da_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( + async fn send_direct_da_message( &self, recipient: TYPES::SignatureKey, message: SequencingMessage, ) -> std::result::Result<(), NetworkError>; /// Send a broadcast message to the entire network. - async fn send_broadcast_message< - PROPOSAL: ProposalType, - VOTE: VoteType, - >( + async fn send_broadcast_message( &self, message: SequencingMessage, ) -> std::result::Result<(), NetworkError>; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index fdc435163b..0a881cd093 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -15,10 +15,11 @@ use crate::{ }; use crate::{ - message::{CommitteeConsensusMessage, GeneralConsensusMessage, Message}, + message::{GeneralConsensusMessage, Message}, vote::ViewSyncVoteInternal, }; +use crate::vote::Accumulator2; use crate::{ data::LeafType, traits::{ @@ -28,12 +29,12 @@ use crate::{ state::ConsensusTime, }, vote::{ - Accumulator, DAVote, QuorumVote, TimeoutVote, ViewSyncData, ViewSyncVote, VoteAccumulator, - VoteType, YesOrNoVote, + DAVote, QuorumVote, TimeoutVote, ViewSyncData, ViewSyncVote, VoteAccumulator, VoteType, + YesOrNoVote, }, }; use bincode::Options; -use commit::{Commitment, Committable}; +use commit::{Commitment, CommitmentBounds, Committable}; use derivative::Derivative; use either::Either; use ethereum_types::U256; @@ -60,6 +61,7 @@ pub enum ElectionError { /// the outcome is already knowable. /// /// This would be a useful general utility. +#[derive(Clone)] pub enum Checked { /// This item has been checked, and is valid Valid(T), @@ -72,65 +74,46 @@ pub enum Checked { /// Data to vote on for different types of votes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] -pub enum VoteData { +pub enum VoteData +where + COMMITMENT: CommitmentBounds, +{ /// Vote to provide availability for a block. - DA(Commitment), + DA(COMMITMENT), /// Vote to append a leaf to the log. - Yes(Commitment), + Yes(COMMITMENT), /// Vote to reject a leaf from the log. - No(Commitment), + No(COMMITMENT), /// Vote to time out and proceed to the next view. - Timeout(Commitment), + Timeout(COMMITMENT), /// Vote to pre-commit the view sync. - ViewSyncPreCommit(Commitment), + ViewSyncPreCommit(COMMITMENT), /// Vote to commit the view sync. - ViewSyncCommit(Commitment), + ViewSyncCommit(COMMITMENT), /// Vote to finalize the view sync. - ViewSyncFinalize(Commitment), + ViewSyncFinalize(COMMITMENT), } -/// Make different types of `VoteData` committable -impl Committable for VoteData { - fn commit(&self) -> Commitment { +impl VoteData +where + COMMITMENT: CommitmentBounds, +{ + /// Return the underlying commitment. + #[must_use] + pub fn get_commit(&self) -> COMMITMENT { + #[allow(clippy::enum_glob_use)] + use VoteData::*; match self { - VoteData::DA(block_commitment) => commit::RawCommitmentBuilder::new("DA Block Commit") - .field("block_commitment", *block_commitment) - .finalize(), - VoteData::Yes(leaf_commitment) => commit::RawCommitmentBuilder::new("Yes Vote Commit") - .field("leaf_commitment", *leaf_commitment) - .finalize(), - VoteData::No(leaf_commitment) => commit::RawCommitmentBuilder::new("No Vote Commit") - .field("leaf_commitment", *leaf_commitment) - .finalize(), - VoteData::Timeout(view_number_commitment) => { - commit::RawCommitmentBuilder::new("Timeout View Number Commit") - .field("view_number_commitment", *view_number_commitment) - .finalize() - } - VoteData::ViewSyncPreCommit(commitment) => { - commit::RawCommitmentBuilder::new("ViewSyncPreCommit") - .field("commitment", *commitment) - .finalize() - } - VoteData::ViewSyncCommit(commitment) => { - commit::RawCommitmentBuilder::new("ViewSyncCommit") - .field("commitment", *commitment) - .finalize() - } - VoteData::ViewSyncFinalize(commitment) => { - commit::RawCommitmentBuilder::new("ViewSyncFinalize") - .field("commitment", *commitment) - .finalize() - } + DA(c) | Yes(c) | No(c) | Timeout(c) | ViewSyncPreCommit(c) | ViewSyncCommit(c) + | ViewSyncFinalize(c) => *c, } } - - fn tag() -> String { - ("VOTE_DATA_COMMIT").to_string() - } } -impl VoteData { +impl VoteData +where + COMMITMENT: CommitmentBounds, +{ #[must_use] /// Convert vote data into bytes. /// @@ -152,7 +135,6 @@ pub trait VoteToken: + PartialEq + Hash + Eq - + Committable { // type StakeTable; // type KeyPair: SignatureKey; @@ -175,18 +157,25 @@ pub trait ElectionConfig: } /// A certificate of some property which has been signed by a quroum of nodes. -pub trait SignedCertificate +pub trait SignedCertificate where Self: Send + Sync + Clone + Serialize + for<'a> Deserialize<'a>, - COMMITTABLE: Committable + Serialize + Clone, + COMMITMENT: CommitmentBounds, TOKEN: VoteToken, { + /// `VoteType` that is used in this certificate + type Vote: VoteType; + + /// `Accumulator` type to accumulate votes. + type VoteAccumulator: Accumulator2; + /// Build a QC from the threshold signature and commitment + // TODO ED Rename this function and rework this function parameters + // Assumes last vote was valid since it caused a QC to form. + // Removes need for relay on other cert specific fields fn from_signatures_and_commitment( - view_number: TIME, signatures: AssembledSignature, - commit: Commitment, - relay: Option, + vote: Self::Vote, ) -> Self; /// Get the view number. @@ -196,12 +185,13 @@ where fn signatures(&self) -> AssembledSignature; // TODO (da) the following functions should be refactored into a QC-specific trait. + // TODO ED Make an issue for this /// Get the leaf commitment. - fn leaf_commitment(&self) -> Commitment; + fn leaf_commitment(&self) -> COMMITMENT; /// Set the leaf commitment. - fn set_leaf_commitment(&mut self, commitment: Commitment); + fn set_leaf_commitment(&mut self, commitment: COMMITMENT); /// Get whether the certificate is for the genesis block. fn is_genesis(&self) -> bool; @@ -221,7 +211,6 @@ pub trait Membership: /// TODO may want to move this to a testableelection trait fn create_election( entries: Vec<::StakeTableEntry>, - keys: Vec, config: TYPES::ElectionConfigType, ) -> Self; @@ -276,7 +265,8 @@ pub trait ConsensusExchange: Send + Sync { /// A proposal for participants to vote on. type Proposal: ProposalType; /// A vote on a [`Proposal`](Self::Proposal). - type Vote: VoteType; + // TODO ED Make this equal Certificate vote (if possible?) + type Vote: VoteType; /// A [`SignedCertificate`] attesting to a decision taken by the committee. type Certificate: SignedCertificate + Hash @@ -284,14 +274,13 @@ pub trait ConsensusExchange: Send + Sync { /// The committee eligible to make decisions. type Membership: Membership; /// Network used by [`Membership`](Self::Membership) to communicate. - type Networking: CommunicationChannel; + type Networking: CommunicationChannel; /// Commitments to items which are the subject of proposals and decisions. - type Commitment: Committable + Serialize + Clone; + type Commitment: CommitmentBounds; /// Join a [`ConsensusExchange`] with the given identity (`pk` and `sk`). fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, config: TYPES::ElectionConfigType, network: Self::Networking, pk: TYPES::SignatureKey, @@ -340,7 +329,7 @@ pub trait ConsensusExchange: Send + Sync { } /// The contents of a vote on `commit`. - fn vote_data(&self, commit: Commitment) -> VoteData; + fn vote_data(&self, commit: Self::Commitment) -> VoteData; /// Validate a certificate. fn is_valid_cert(&self, qc: &Self::Certificate) -> bool { @@ -351,7 +340,7 @@ pub trait ConsensusExchange: Send + Sync { match qc.signatures() { AssembledSignature::DA(qc) => { - let real_commit = VoteData::DA(leaf_commitment).commit(); + let real_commit = VoteData::DA(leaf_commitment).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -359,7 +348,7 @@ pub trait ConsensusExchange: Send + Sync { ::check(&real_qc_pp, real_commit.as_ref(), &qc) } AssembledSignature::Yes(qc) => { - let real_commit = VoteData::Yes(leaf_commitment).commit(); + let real_commit = VoteData::Yes(leaf_commitment).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -367,7 +356,7 @@ pub trait ConsensusExchange: Send + Sync { ::check(&real_qc_pp, real_commit.as_ref(), &qc) } AssembledSignature::No(qc) => { - let real_commit = VoteData::No(leaf_commitment).commit(); + let real_commit = VoteData::No(leaf_commitment).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -395,7 +384,7 @@ pub trait ConsensusExchange: Send + Sync { let mut is_valid_vote_token = false; let mut is_valid_signature = false; if let Some(key) = ::from_bytes(encoded_key) { - is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); + is_valid_signature = key.validate(encoded_signature, data.get_commit().as_ref()); let valid_vote_token = self.membership().validate_vote_token(key, vote_token); is_valid_vote_token = match valid_vote_token { Err(_) => { @@ -409,59 +398,39 @@ pub trait ConsensusExchange: Send + Sync { is_valid_signature && is_valid_vote_token } + /// Validate a vote by checking its signature and token. + fn is_valid_vote_2( + &self, + key: &TYPES::SignatureKey, + encoded_signature: &EncodedSignature, + data: &VoteData, + vote_token: &Checked, + ) -> bool { + let is_valid_signature = key.validate(encoded_signature, data.get_commit().as_ref()); + let valid_vote_token = self + .membership() + .validate_vote_token(key.clone(), vote_token.clone()); + let is_valid_vote_token = match valid_vote_token { + Err(_) => { + error!("Vote token was invalid"); + false + } + Ok(Checked::Valid(_)) => true, + Ok(Checked::Inval(_) | Checked::Unchecked(_)) => false, + }; + + is_valid_signature && is_valid_vote_token + } + #[doc(hidden)] + fn accumulate_internal( &self, - vota_meta: VoteMetaData, - accumulator: VoteAccumulator, - ) -> Either, Self::Certificate> { - if !self.is_valid_vote( - &vota_meta.encoded_key, - &vota_meta.encoded_signature, - vota_meta.data.clone(), - // Ignoring deserialization errors below since we are getting rid of it soon - Checked::Unchecked(vota_meta.vote_token.clone()), - ) { - error!("Invalid vote!"); - return Either::Left(accumulator); - } - - if let Some(key) = ::from_bytes(&vota_meta.encoded_key) - { - let stake_table_entry = key.get_stake_table_entry(1u64); - let append_node_id = self - .membership() - .get_committee_qc_stake_table() - .iter() - .position(|x| *x == stake_table_entry.clone()) - .unwrap(); - - match accumulator.append(( - vota_meta.commitment, - ( - vota_meta.encoded_key.clone(), - ( - vota_meta.encoded_signature.clone(), - self.membership().get_committee_qc_stake_table(), - append_node_id, - vota_meta.data, - vota_meta.vote_token, - ), - ), - )) { - Either::Left(accumulator) => Either::Left(accumulator), - Either::Right(signatures) => { - Either::Right(Self::Certificate::from_signatures_and_commitment( - vota_meta.view_number, - signatures, - vota_meta.commitment, - vota_meta.relay, - )) - } - } - } else { - Either::Left(accumulator) - } + _vota_meta: VoteMetaData, + _accumulator: VoteAccumulator, + ) -> Either, Self::Certificate> + { + todo!() // TODO ED Remove this function } /// Add a vote to the accumulating signature. Return The certificate if the vote @@ -471,13 +440,79 @@ pub trait ConsensusExchange: Send + Sync { &self, encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, - leaf_commitment: Commitment, + leaf_commitment: Self::Commitment, vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, - accumlator: VoteAccumulator, + accumlator: VoteAccumulator, relay: Option, - ) -> Either, Self::Certificate>; + ) -> Either, Self::Certificate>; + + // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the `SignedCertificate` + // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. + /// Accumulate vote + /// Returns either the accumulate if no threshold was reached, or a `SignedCertificate` if the threshold was reached + #[allow(clippy::type_complexity)] + fn accumulate_vote_2( + &self, + accumulator: <>::Certificate as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Self::Commitment, + >>::VoteAccumulator, + vote: &<>::Certificate as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Self::Commitment, + >>::Vote, + _commit: &Self::Commitment, + ) -> Either< + <>::Certificate as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Self::Commitment, + >>::VoteAccumulator, + Self::Certificate, + > { + if !self.is_valid_vote_2( + &vote.get_key(), + &vote.get_signature(), + &vote.get_data(), + // TODO ED We've had this comment for a while: Ignoring deserialization errors below since we are getting rid of it soon + &Checked::Unchecked(vote.get_vote_token()), + ) { + error!("Invalid vote!"); + return Either::Left(accumulator); + } + + let stake_table_entry = vote.get_key().get_stake_table_entry(1u64); + // TODO ED Could we make this part of the vote in the future? It's only a usize. + let append_node_id = self + .membership() + .get_committee_qc_stake_table() + .iter() + .position(|x| *x == stake_table_entry.clone()) + .unwrap(); + + // TODO ED Should make append function take a reference to vote + match accumulator.append( + vote.clone(), + append_node_id, + self.membership().get_committee_qc_stake_table(), + ) { + Either::Left(accumulator) => Either::Left(accumulator), + Either::Right(signatures) => { + // TODO ED Update this function to just take in the signatures and most recent vote + Either::Right(Self::Certificate::from_signatures_and_commitment( + signatures, + vote.clone(), + )) + } + } + } /// The committee which votes on proposals. fn membership(&self) -> &Self::Membership; @@ -512,7 +547,23 @@ pub trait CommitteeExchangeType: block_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, - ) -> CommitteeConsensusMessage; + ) -> DAVote; + + // TODO temporary vid methods, move to quorum https://github.com/EspressoSystems/HotShot/issues/1696 + + /// Create a message with a vote on VID disperse data. + fn create_vid_message( + &self, + block_commitment: Commitment, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> DAVote; + + /// Sign a vote on VID proposal. + fn sign_vid_vote( + &self, + block_commitment: Commitment, + ) -> (EncodedPublicKey, EncodedSignature); } /// Standard implementation of [`CommitteeExchangeType`] utilizing a DA committee. @@ -521,7 +572,7 @@ pub trait CommitteeExchangeType: pub struct CommitteeExchange< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, DAVote, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > { /// The network being used by this exchange. @@ -542,7 +593,7 @@ pub struct CommitteeExchange< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, DAVote, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > CommitteeExchangeType for CommitteeExchange { @@ -564,9 +615,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::::DA(block_commitment) - .commit() - .as_ref(), + VoteData::DA(block_commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -576,22 +625,49 @@ impl< block_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, - ) -> CommitteeConsensusMessage { + ) -> DAVote { let signature = self.sign_da_vote(block_commitment); - CommitteeConsensusMessage::::DAVote(DAVote { + DAVote { signature, block_commitment, current_view, vote_token, vote_data: VoteData::DA(block_commitment), - }) + } + } + + fn create_vid_message( + &self, + block_commitment: Commitment, + current_view: ::Time, + vote_token: ::VoteTokenType, + ) -> DAVote { + let signature = self.sign_vid_vote(block_commitment); + DAVote { + signature, + block_commitment, + current_view, + vote_token, + vote_data: VoteData::DA(block_commitment), + } + } + + fn sign_vid_vote( + &self, + block_commitment: Commitment<::BlockType>, + ) -> (EncodedPublicKey, EncodedSignature) { + let signature = TYPES::SignatureKey::sign( + &self.private_key, + VoteData::DA(block_commitment).get_commit().as_ref(), + ); + (self.public_key.to_bytes(), signature) } } impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, DAVote, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > ConsensusExchange for CommitteeExchange { @@ -600,20 +676,18 @@ impl< type Certificate = DACertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = TYPES::BlockType; + type Commitment = Commitment; fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, config: TYPES::ElectionConfigType, network: Self::Networking, pk: TYPES::SignatureKey, entry: ::StakeTableEntry, sk: ::PrivateKey, ) -> Self { - let membership = >::Membership::create_election( - entries, keys, config, - ); + let membership = + >::Membership::create_election(entries, config); Self { network, membership, @@ -634,7 +708,7 @@ impl< .make_vote_token(view_number, &self.private_key) } - fn vote_data(&self, commit: Commitment) -> VoteData { + fn vote_data(&self, commit: Self::Commitment) -> VoteData { VoteData::DA(commit) } @@ -644,13 +718,14 @@ impl< &self, encoded_key: &EncodedPublicKey, encoded_signature: &EncodedSignature, - leaf_commitment: Commitment, + leaf_commitment: Self::Commitment, vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, - accumlator: VoteAccumulator, + accumlator: VoteAccumulator, _relay: Option, - ) -> Either, Self::Certificate> { + ) -> Either, Self::Certificate> + { let meta = VoteMetaData { encoded_key: encoded_key.clone(), encoded_signature: encoded_signature.clone(), @@ -728,7 +803,7 @@ pub trait QuorumExchangeType, /// Create a message with a negative vote on validating or commitment proposal. fn create_no_message>( &self, - justify_qc_commitment: Commitment>, + justify_qc_commitment: Commitment>>, leaf_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, @@ -739,7 +814,7 @@ pub trait QuorumExchangeType, /// Create a message with a timeout vote on validating or commitment proposal. fn create_timeout_message>( &self, - justify_qc: QuorumCertificate, + justify_qc: QuorumCertificate>, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, ) -> GeneralConsensusMessage @@ -755,7 +830,7 @@ pub struct QuorumExchange< LEAF: LeafType, PROPOSAL: ProposalType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > { /// The network being used by this exchange. @@ -778,7 +853,7 @@ impl< LEAF: LeafType, MEMBERSHIP: Membership, PROPOSAL: ProposalType, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > QuorumExchangeType for QuorumExchange @@ -786,7 +861,7 @@ impl< /// Create a message with a positive vote on validating or commitment proposal. fn create_yes_message>( &self, - justify_qc_commitment: Commitment>, + justify_qc_commitment: Commitment>>, leaf_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, @@ -825,7 +900,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::::Yes(leaf_commitment).commit().as_ref(), + VoteData::Yes(leaf_commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -841,7 +916,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::::No(leaf_commitment).commit().as_ref(), + VoteData::No(leaf_commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -856,8 +931,8 @@ impl< fn sign_timeout_vote(&self, view_number: TYPES::Time) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::::Timeout(view_number.commit()) - .commit() + VoteData::Timeout(view_number.commit()) + .get_commit() .as_ref(), ); (self.public_key.to_bytes(), signature) @@ -865,7 +940,7 @@ impl< /// Create a message with a negative vote on validating or commitment proposal. fn create_no_message>( &self, - justify_qc_commitment: Commitment>, + justify_qc_commitment: Commitment>>, leaf_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, @@ -887,7 +962,7 @@ impl< /// Create a message with a timeout vote on validating or commitment proposal. fn create_timeout_message>( &self, - high_qc: QuorumCertificate, + high_qc: QuorumCertificate>, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, ) -> GeneralConsensusMessage @@ -910,30 +985,28 @@ impl< LEAF: LeafType, PROPOSAL: ProposalType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > ConsensusExchange for QuorumExchange { type Proposal = PROPOSAL; - type Vote = QuorumVote; - type Certificate = QuorumCertificate; + type Vote = QuorumVote>; + type Certificate = QuorumCertificate>; type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = LEAF; + type Commitment = Commitment; fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, config: TYPES::ElectionConfigType, network: Self::Networking, pk: TYPES::SignatureKey, entry: ::StakeTableEntry, sk: ::PrivateKey, ) -> Self { - let membership = >::Membership::create_election( - entries, keys, config, - ); + let membership = + >::Membership::create_election(entries, config); Self { network, membership, @@ -948,7 +1021,7 @@ impl< &self.network } - fn vote_data(&self, commit: Commitment) -> VoteData { + fn vote_data(&self, commit: Self::Commitment) -> VoteData { VoteData::Yes(commit) } @@ -962,9 +1035,10 @@ impl< vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, - accumlator: VoteAccumulator, + accumlator: VoteAccumulator, TYPES>, _relay: Option, - ) -> Either, Self::Certificate> { + ) -> Either, TYPES>, Self::Certificate> + { let meta = VoteMetaData { encoded_key: encoded_key.clone(), encoded_signature: encoded_signature.clone(), @@ -1047,7 +1121,7 @@ pub struct ViewSyncExchange< TYPES: NodeType, PROPOSAL: ProposalType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > { /// The network being used by this exchange. @@ -1069,7 +1143,7 @@ impl< TYPES: NodeType, MEMBERSHIP: Membership, PROPOSAL: ProposalType, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > ViewSyncExchangeType for ViewSyncExchange { @@ -1108,7 +1182,9 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::ViewSyncPreCommit(commitment).commit().as_ref(), + VoteData::ViewSyncPreCommit(commitment) + .get_commit() + .as_ref(), ); (self.public_key.to_bytes(), signature) @@ -1149,7 +1225,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::ViewSyncCommit(commitment).commit().as_ref(), + VoteData::ViewSyncCommit(commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) @@ -1190,7 +1266,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::ViewSyncFinalize(commitment).commit().as_ref(), + VoteData::ViewSyncFinalize(commitment).get_commit().as_ref(), ); (self.public_key.to_bytes(), signature) @@ -1221,7 +1297,7 @@ impl< }; match certificate_internal.signatures { AssembledSignature::ViewSyncPreCommit(raw_signatures) => { - let real_commit = VoteData::ViewSyncPreCommit(vote_data.commit()).commit(); + let real_commit = VoteData::ViewSyncPreCommit(vote_data.commit()).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().failure_threshold().get()), @@ -1233,7 +1309,7 @@ impl< ) } AssembledSignature::ViewSyncCommit(raw_signatures) => { - let real_commit = VoteData::ViewSyncCommit(vote_data.commit()).commit(); + let real_commit = VoteData::ViewSyncCommit(vote_data.commit()).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -1245,7 +1321,7 @@ impl< ) } AssembledSignature::ViewSyncFinalize(raw_signatures) => { - let real_commit = VoteData::ViewSyncFinalize(vote_data.commit()).commit(); + let real_commit = VoteData::ViewSyncFinalize(vote_data.commit()).get_commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -1270,7 +1346,7 @@ impl< TYPES: NodeType, PROPOSAL: ProposalType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, MEMBERSHIP>, + NETWORK: CommunicationChannel, M: NetworkMsg, > ConsensusExchange for ViewSyncExchange { @@ -1279,20 +1355,18 @@ impl< type Certificate = ViewSyncCertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = ViewSyncData; + type Commitment = Commitment>; fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, config: TYPES::ElectionConfigType, network: Self::Networking, pk: TYPES::SignatureKey, entry: ::StakeTableEntry, sk: ::PrivateKey, ) -> Self { - let membership = >::Membership::create_election( - entries, keys, config, - ); + let membership = + >::Membership::create_election(entries, config); Self { network, membership, @@ -1307,7 +1381,7 @@ impl< &self.network } - fn vote_data(&self, _commit: Commitment) -> VoteData { + fn vote_data(&self, _commit: Self::Commitment) -> VoteData { unimplemented!() } @@ -1319,9 +1393,12 @@ impl< vote_data: VoteData, vote_token: TYPES::VoteTokenType, view_number: TYPES::Time, - accumlator: VoteAccumulator>, + accumlator: VoteAccumulator>, TYPES>, relay: Option, - ) -> Either>, Self::Certificate> { + ) -> Either< + VoteAccumulator>, TYPES>, + Self::Certificate, + > { let meta = VoteMetaData { encoded_key: encoded_key.clone(), encoded_signature: encoded_signature.clone(), diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index ee2d09672d..1729636836 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -2,17 +2,23 @@ //! //! Contains types and traits used by `HotShot` to abstract over network access +use async_compatibility_layer::art::async_sleep; #[cfg(async_executor_impl = "async-std")] use async_std::future::TimeoutError; -use hotshot_task::BoxSyncFuture; +use hotshot_task::{boxed_sync, BoxSyncFuture}; use libp2p_networking::network::NetworkNodeHandleError; #[cfg(async_executor_impl = "tokio")] use tokio::time::error::Elapsed as TimeoutError; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use super::{election::Membership, node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{data::ProposalType, message::MessagePurpose, vote::VoteType}; +use crate::{data::ViewNumber, message::MessagePurpose}; +use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; +use rand::{ + distributions::{Bernoulli, Uniform}, + prelude::Distribution, +}; use serde::{Deserialize, Serialize}; use snafu::Snafu; use std::{collections::BTreeSet, fmt::Debug, sync::Arc, time::Duration}; @@ -130,11 +136,13 @@ pub enum NetworkError { #[derive(Clone, Debug)] // Storing view number as a u64 to avoid the need TYPES generic /// Events to poll or cancel consensus processes. -pub enum ConsensusIntentEvent { +pub enum ConsensusIntentEvent { /// Poll for votes for a particular view PollForVotes(u64), /// Poll for a proposal for a particular view PollForProposal(u64), + /// Poll for the most recent proposal the webserver has + PollForCurrentProposal, /// Poll for a DAC for a particular view PollForDAC(u64), /// Poll for view sync votes starting at a particular view @@ -143,6 +151,8 @@ pub enum ConsensusIntentEvent { PollForViewSyncCertificate(u64), /// Poll for new transactions PollForTransactions(u64), + /// Poll for future leader + PollFutureLeader(u64, K), /// Cancel polling for votes CancelPollForVotes(u64), /// Cancel polling for view sync votes. @@ -157,7 +167,7 @@ pub enum ConsensusIntentEvent { CancelPollForTransactions(u64), } -impl ConsensusIntentEvent { +impl ConsensusIntentEvent { /// Get the view number of the event. #[must_use] pub fn view_number(&self) -> u64 { @@ -173,7 +183,9 @@ impl ConsensusIntentEvent { | ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) | ConsensusIntentEvent::PollForViewSyncCertificate(view_number) | ConsensusIntentEvent::PollForTransactions(view_number) - | ConsensusIntentEvent::CancelPollForTransactions(view_number) => *view_number, + | ConsensusIntentEvent::CancelPollForTransactions(view_number) + | ConsensusIntentEvent::PollFutureLeader(view_number, _) => *view_number, + ConsensusIntentEvent::PollForCurrentProposal => 1, } } } @@ -196,13 +208,8 @@ pub trait ViewMessage { /// API for interacting directly with a consensus committee /// intended to be implemented for both DA and for validating consensus committees #[async_trait] -pub trait CommunicationChannel< - TYPES: NodeType, - M: NetworkMsg, - PROPOSAL: ProposalType, - VOTE: VoteType, - MEMBERSHIP: Membership, ->: Clone + Debug + Send + Sync + 'static +pub trait CommunicationChannel>: + Clone + Debug + Send + Sync + 'static { /// Underlying Network implementation's type type NETWORK; @@ -250,13 +257,18 @@ pub trait CommunicationChannel< 'a: 'b, Self: 'b; - /// look up a node - /// blocking - async fn lookup_node(&self, pk: TYPES::SignatureKey) -> Result<(), NetworkError>; + /// queues looking up a node + async fn queue_node_lookup( + &self, + _view_number: ViewNumber, + _pk: TYPES::SignatureKey, + ) -> Result<(), UnboundedSendError>> { + Ok(()) + } /// Injects consensus data such as view number into the networking implementation /// blocking - async fn inject_consensus_info(&self, event: ConsensusIntentEvent); + async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} } /// represents a networking implmentration @@ -305,14 +317,19 @@ pub trait ConnectedNetwork: 'a: 'b, Self: 'b; - /// look up a node - /// blocking - async fn lookup_node(&self, pk: K) -> Result<(), NetworkError>; + /// queues lookup of a node + async fn queue_node_lookup( + &self, + _view_number: ViewNumber, + _pk: K, + ) -> Result<(), UnboundedSendError>> { + Ok(()) + } /// Injects consensus data such as view number into the networking implementation /// blocking /// Ideally we would pass in the `Time` type, but that requires making the entire trait generic over NodeType - async fn inject_consensus_info(&self, event: ConsensusIntentEvent); + async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} } /// Describes additional functionality needed by the test network implementation @@ -335,11 +352,9 @@ pub trait TestableNetworkingImplementation { pub trait TestableChannelImplementation< TYPES: NodeType, M: NetworkMsg, - PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, NETWORK, ->: CommunicationChannel +>: CommunicationChannel { /// generates the `CommunicationChannel` given it's associated network type fn generate_network() -> Box) -> Self + 'static>; @@ -356,6 +371,7 @@ pub enum NetworkChange { } /// interface describing how reliable the network is +#[async_trait] pub trait NetworkReliability: Debug + Sync + std::marker::Send { /// Sample from bernoulli distribution to decide whether /// or not to keep a packet @@ -363,8 +379,225 @@ pub trait NetworkReliability: Debug + Sync + std::marker::Send { /// /// Panics if `self.keep_numerator > self.keep_denominator` /// - fn sample_keep(&self) -> bool; + fn sample_keep(&self) -> bool { + true + } + /// sample from uniform distribution to decide whether /// or not to keep a packet - fn sample_delay(&self) -> Duration; + fn sample_delay(&self) -> Duration { + std::time::Duration::ZERO + } + + /// scramble the packet + fn scramble(&self, msg: Vec) -> Vec { + msg + } + + /// number of times to repeat the packet + fn sample_repeat(&self) -> usize { + 1 + } + + /// given a message and a way to send the message, + /// decide whether or not to send the message + /// how long to delay the message + /// whether or not to send duplicates + /// and whether or not to include noise with the message + /// then send the message + fn chaos_send_msg( + &self, + msg: Vec, + send_fn: Arc) -> BoxSyncFuture<'static, ()>>, + ) -> BoxSyncFuture<'static, ()> { + let sample_keep = self.sample_keep(); + let delay = self.sample_delay(); + let repeats = self.sample_repeat(); + let mut msgs = Vec::new(); + for _idx in 0..repeats { + let scrambled = self.scramble(msg.clone()); + msgs.push(scrambled); + } + let closure = async move { + if sample_keep { + async_sleep(delay).await; + for msg in msgs { + send_fn(msg).await; + } + } + }; + boxed_sync(closure) + } +} + +/// ideal network +#[derive(Clone, Copy, Debug, Default)] +pub struct PerfectNetwork {} + +impl NetworkReliability for PerfectNetwork {} + +/// A synchronous network. Packets may be delayed, but are guaranteed +/// to arrive within `timeout` ns +#[derive(Clone, Copy, Debug, Default)] +pub struct SynchronousNetwork { + /// Max delay of packet before arrival + timeout_ms: u64, + /// Lowest value in milliseconds that a packet may be delayed + delay_low_ms: u64, +} + +impl NetworkReliability for SynchronousNetwork { + /// never drop a packet + fn sample_keep(&self) -> bool { + true + } + fn sample_delay(&self) -> Duration { + Duration::from_millis( + Uniform::new_inclusive(self.delay_low_ms, self.timeout_ms) + .sample(&mut rand::thread_rng()), + ) + } +} + +/// An asynchronous network. Packets may be dropped entirely +/// or delayed for arbitrarily long periods +/// probability that packet is kept = `keep_numerator` / `keep_denominator` +/// packet delay is obtained by sampling from a uniform distribution +/// between `delay_low_ms` and `delay_high_ms`, inclusive +#[derive(Debug, Clone, Copy)] +pub struct AsynchronousNetwork { + /// numerator for probability of keeping packets + keep_numerator: u32, + /// denominator for probability of keeping packets + keep_denominator: u32, + /// lowest value in milliseconds that a packet may be delayed + delay_low_ms: u64, + /// highest value in milliseconds that a packet may be delayed + delay_high_ms: u64, +} + +impl NetworkReliability for AsynchronousNetwork { + fn sample_keep(&self) -> bool { + Bernoulli::from_ratio(self.keep_numerator, self.keep_denominator) + .unwrap() + .sample(&mut rand::thread_rng()) + } + fn sample_delay(&self) -> Duration { + Duration::from_millis( + Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) + .sample(&mut rand::thread_rng()), + ) + } +} + +/// An partially synchronous network. Behaves asynchronously +/// until some arbitrary time bound, GST, +/// then synchronously after GST +#[allow(clippy::similar_names)] +#[derive(Debug, Clone, Copy)] +pub struct PartiallySynchronousNetwork { + /// asynchronous portion of network + asynchronous: AsynchronousNetwork, + /// synchronous portion of network + synchronous: SynchronousNetwork, + /// time when GST occurs + gst: std::time::Duration, + /// when the network was started + start: std::time::Instant, +} + +impl NetworkReliability for PartiallySynchronousNetwork { + /// never drop a packet + fn sample_keep(&self) -> bool { + true + } + fn sample_delay(&self) -> Duration { + // act asyncronous before gst + if self.start.elapsed() < self.gst { + if self.asynchronous.sample_keep() { + self.asynchronous.sample_delay() + } else { + // assume packet was "dropped" and will arrive after gst + self.synchronous.sample_delay() + self.gst + } + } else { + // act syncronous after gst + self.synchronous.sample_delay() + } + } +} + +impl Default for AsynchronousNetwork { + // disable all chance of failure + fn default() -> Self { + AsynchronousNetwork { + keep_numerator: 1, + keep_denominator: 1, + delay_low_ms: 0, + delay_high_ms: 0, + } + } +} + +impl Default for PartiallySynchronousNetwork { + fn default() -> Self { + PartiallySynchronousNetwork { + synchronous: SynchronousNetwork::default(), + asynchronous: AsynchronousNetwork::default(), + gst: std::time::Duration::new(0, 0), + start: std::time::Instant::now(), + } + } +} + +impl SynchronousNetwork { + /// create new `SynchronousNetwork` + #[must_use] + pub fn new(timeout: u64, delay_low_ms: u64) -> Self { + SynchronousNetwork { + timeout_ms: timeout, + delay_low_ms, + } + } +} + +impl AsynchronousNetwork { + /// create new `AsynchronousNetwork` + #[must_use] + pub fn new( + keep_numerator: u32, + keep_denominator: u32, + delay_low_ms: u64, + delay_high_ms: u64, + ) -> Self { + AsynchronousNetwork { + keep_numerator, + keep_denominator, + delay_low_ms, + delay_high_ms, + } + } +} + +impl PartiallySynchronousNetwork { + /// create new `PartiallySynchronousNetwork` + #[allow(clippy::similar_names)] + #[must_use] + pub fn new( + asynchronous: AsynchronousNetwork, + synchronous: SynchronousNetwork, + gst: std::time::Duration, + ) -> Self { + PartiallySynchronousNetwork { + asynchronous, + synchronous, + gst, + start: std::time::Instant::now(), + } + } +} + +/// A chaotic network using all the networking calls +pub struct ChaosNetwork { + // TODO } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 8ba5abdebb..49d9184bc4 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -19,7 +19,7 @@ use crate::{ message::{ConsensusMessageType, Message, SequencingMessage}, traits::{ election::Membership, network::TestableChannelImplementation, signature_key::SignatureKey, - storage::Storage, Block, + storage::Storage, BlockPayload, }, }; use async_compatibility_layer::channel::{unbounded, UnboundedReceiver, UnboundedSender}; @@ -166,7 +166,6 @@ pub trait ExchangesType, MESSA /// Create all exchanges. fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, configs: Self::ElectionConfigs, networks: ( >::Networking, @@ -184,7 +183,7 @@ pub trait ExchangesType, MESSA /// Get the view sync exchange. fn view_sync_exchange(&self) -> &Self::ViewSyncExchange; - /// Block the underlying networking interfaces until node is successfully initialized into the + /// BlockPayload the underlying networking interfaces until node is successfully initialized into the /// networks. async fn wait_for_networks_ready(&self); @@ -257,7 +256,6 @@ where fn create( entries: Vec<::StakeTableEntry>, - keys: Vec, configs: Self::ElectionConfigs, networks: ( >::Networking, @@ -270,7 +268,6 @@ where ) -> Self { let quorum_exchange = QUORUMEXCHANGE::create( entries.clone(), - keys.clone(), configs.0.clone(), networks.0, pk.clone(), @@ -279,7 +276,6 @@ where ); let view_sync_exchange = VIEWSYNCEXCHANGE::create( entries.clone(), - keys.clone(), configs.0, networks.2, pk.clone(), @@ -287,7 +283,7 @@ where sk.clone(), ); let committee_exchange = - COMMITTEEEXCHANGE::create(entries, keys, configs.1, networks.1, pk, entry, sk); + COMMITTEEEXCHANGE::create(entries, configs.1, networks.1, pk, entry, sk); Self { quorum_exchange, @@ -363,7 +359,7 @@ pub trait TestableNodeImplementation: NodeImplementation state: Option<&TYPES::StateType>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> ::Transaction; /// Creates random transaction if possible /// otherwise panics @@ -372,7 +368,7 @@ pub trait TestableNodeImplementation: NodeImplementation leaf: &Self::Leaf, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> ::Transaction; /// generate a genesis block fn block_genesis() -> TYPES::BlockType; @@ -401,24 +397,18 @@ where QuorumCommChannel: TestableChannelImplementation< TYPES, Message, - QuorumProposalType, - QuorumVoteType, QuorumMembership, QuorumNetwork, >, CommitteeCommChannel: TestableChannelImplementation< TYPES, Message, - CommitteeProposalType, - CommitteeVote, CommitteeMembership, QuorumNetwork, >, ViewSyncCommChannel: TestableChannelImplementation< TYPES, Message, - ViewSyncProposalType, - ViewSyncVoteType, ViewSyncMembership, QuorumNetwork, >, @@ -438,7 +428,7 @@ where state: Option<&TYPES::StateType>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> ::Transaction { ::create_random_transaction(state, rng, padding) } @@ -446,7 +436,7 @@ where leaf: &Self::Leaf, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> ::Transaction { ::create_random_transaction(leaf, rng, padding) } @@ -518,8 +508,6 @@ pub type ViewSyncMembership = QuorumMembership; pub type QuorumNetwork = as CommunicationChannel< TYPES, Message, - QuorumProposalType, - QuorumVoteType, QuorumMembership, >>::NETWORK; @@ -527,8 +515,6 @@ pub type QuorumNetwork = as Communication pub type CommitteeNetwork = as CommunicationChannel< TYPES, Message, - CommitteeProposalType, - CommitteeVote, CommitteeMembership, >>::NETWORK; @@ -536,8 +522,6 @@ pub type CommitteeNetwork = as Communi pub type ViewSyncNetwork = as CommunicationChannel< TYPES, Message, - ViewSyncProposalType, - ViewSyncVoteType, ViewSyncMembership, >>::NETWORK; @@ -565,14 +549,14 @@ pub trait NodeType: /// The block type that this hotshot setup is using. /// /// This should be the same block that `StateType::BlockType` is using. - type BlockType: Block; + type BlockType: BlockPayload; /// The signature key that this hotshot setup is using. type SignatureKey: SignatureKey; /// The vote token that this hotshot setup is using. type VoteTokenType: VoteToken; /// The transaction type that this hotshot setup is using. /// - /// This should be equal to `Block::Transaction` + /// This should be equal to `BlockPayload::Transaction` type Transaction: Transaction; /// The election config type that this hotshot setup is using. type ElectionConfigType: ElectionConfig; diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 5daed175bd..d5141816f5 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -3,9 +3,6 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, Serializatio use bitvec::prelude::*; use espresso_systems_common::hotshot::tag; use ethereum_types::U256; -use jf_primitives::signatures::{ - bls_over_bn254::BLSOverBN254CurveSignatureScheme, SignatureScheme, -}; use serde::{Deserialize, Serialize}; use std::{fmt::Debug, hash::Hash}; use tagged_base64::tagged; @@ -23,17 +20,13 @@ use tagged_base64::tagged; PartialOrd, Ord, )] -pub struct EncodedPublicKey( - #[debug(with = "custom_debug::hexbuf")] pub Vec, // pub ::VerificationKey -); +pub struct EncodedPublicKey(#[debug(with = "custom_debug::hexbuf")] pub Vec); /// Type saftey wrapper for byte encoded signature #[derive( Clone, custom_debug::Debug, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, )] -pub struct EncodedSignature( - #[debug(with = "custom_debug::hexbuf")] pub Vec, // pub ::Signature -); +pub struct EncodedSignature(#[debug(with = "custom_debug::hexbuf")] pub Vec); impl AsRef<[u8]> for EncodedSignature { fn as_ref(&self) -> &[u8] { @@ -70,6 +63,17 @@ pub trait SignatureKey: + for<'a> Deserialize<'a>; /// The type of the quorum certificate parameters used for assembled signature type QCParams: Send + Sync + Sized + Clone + Debug + Hash; + /// The type of the assembled signature, without `BitVec` + type PureAssembledSignatureType: Send + + Sync + + Sized + + Clone + + Debug + + Hash + + PartialEq + + Eq + + Serialize + + for<'a> Deserialize<'a>; /// The type of the assembled qc: assembled signature + `BitVec` type QCType: Send + Sync @@ -101,6 +105,9 @@ pub trait SignatureKey: /// get the stake table entry from the public key and stake value fn get_stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry; + /// only get the public key from the stake table entry + fn get_public_key(entry: &Self::StakeTableEntry) -> Self; + /// get the public parameter for the assembled signature checking fn get_public_parameter( stake_entries: Vec, @@ -111,17 +118,12 @@ pub trait SignatureKey: fn check(real_qc_pp: &Self::QCParams, data: &[u8], qc: &Self::QCType) -> bool; /// get the assembled signature and the `BitVec` separately from the assembled signature - fn get_sig_proof( - signature: &Self::QCType, - ) -> ( - ::Signature, - BitVec, - ); + fn get_sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec); /// assemble the signature from the partial signature and the indication of signers in `BitVec` fn assemble( real_qc_pp: &Self::QCParams, signers: &BitSlice, - sigs: &[::Signature], + sigs: &[Self::PureAssembledSignatureType], ) -> Self::QCType; } diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 83fb250442..bd8fe7d2ed 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -3,7 +3,7 @@ //! This module provides the [`State`] trait, which serves as an compatibility over the current //! network state, which is modified by the transactions contained within blocks. -use crate::traits::Block; +use crate::traits::BlockPayload; use commit::Committable; use espresso_systems_common::hotshot::tag; use serde::{de::DeserializeOwned, Serialize}; @@ -19,9 +19,7 @@ use std::{ /// /// This trait represents the behaviors that the 'global' ledger state must have: /// * A defined error type ([`Error`](State::Error)) -/// * The type of block that modifies this type of state ([`Block`](State::BlockType)) -/// * A method to get a template (empty) next block from the current state -/// ([`next_block`](State::next_block)) +/// * The type of block that modifies this type of state ([`BlockPayload`](State::BlockType)) /// * The ability to validate that a block is actually a valid extension of this state /// ([`validate_block`](State::validate_block)) /// * The ability to produce a new state, with the modifications from the block applied @@ -42,13 +40,10 @@ pub trait State: /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; /// The type of block this state is associated with - type BlockType: Block; + type BlockType: BlockPayload; /// Time compatibility needed for reward collection type Time: ConsensusTime; - /// Returns an empty, template next block given this current state - fn next_block(prev_commitment: Option) -> Self::BlockType; - /// Returns true if and only if the provided block is valid and can extend this state fn validate_block(&self, block: &Self::BlockType, view_number: &Self::Time) -> bool; @@ -109,11 +104,11 @@ where state: Option<&Self>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> ::Transaction; } /// extra functions required on block to be usable by hotshot-testing -pub trait TestableBlock: Block + Debug { +pub trait TestableBlock: BlockPayload + Debug { /// generate a genesis block fn genesis() -> Self; @@ -165,13 +160,6 @@ pub mod dummy { type BlockType = DummyBlock; type Time = ViewNumber; - fn next_block(state: Option) -> Self::BlockType { - match state { - Some(state) => DummyBlock { nonce: state.nonce }, - None => unimplemented!(), - } - } - fn validate_block(&self, _block: &Self::BlockType, _view_number: &Self::Time) -> bool { false } diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 41b7cc225a..122698486c 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -4,7 +4,7 @@ use super::{node_implementation::NodeType, signature_key::EncodedPublicKey}; use crate::{ certificate::QuorumCertificate, data::LeafType, - traits::{election::SignedCertificate, Block}, + traits::{election::SignedCertificate, BlockPayload}, }; use async_trait::async_trait; use commit::Commitment; @@ -132,7 +132,7 @@ pub struct StoredView> { /// The parent of this view pub parent: Commitment, /// The justify QC of this view. See the hotstuff paper for more information on this. - pub justify_qc: QuorumCertificate, + pub justify_qc: QuorumCertificate>, /// The state of this view pub state: LEAF::MaybeState, /// The deltas of this view @@ -152,16 +152,16 @@ where TYPES: NodeType, LEAF: LeafType, { - /// Create a new `StoredView` from the given QC, Block and State. + /// Create a new `StoredView` from the given QC, `BlockPayload` and State. /// /// Note that this will set the `parent` to `LeafHash::default()`, so this will not have a parent. pub fn from_qc_block_and_state( - qc: QuorumCertificate, + qc: QuorumCertificate>, deltas: LEAF::DeltasType, state: LEAF::MaybeState, height: u64, parent_commitment: Commitment, - rejected: Vec<::Transaction>, + rejected: Vec<::Transaction>, proposer_id: EncodedPublicKey, ) -> Self { Self { diff --git a/types/src/vote.rs b/types/src/vote.rs index cdbb8cb03f..9e232b755a 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -5,7 +5,6 @@ use crate::{ certificate::{AssembledSignature, QuorumCertificate}, - data::LeafType, traits::{ election::{VoteData, VoteToken}, node_implementation::NodeType, @@ -14,27 +13,34 @@ use crate::{ }; use bincode::Options; use bitvec::prelude::*; -use commit::{Commitment, Committable}; +use commit::{Commitment, CommitmentBounds, Committable}; use either::Either; use ethereum_types::U256; use hotshot_utils::bincode::bincode_opts; -use jf_primitives::signatures::{ - bls_over_bn254::BLSOverBN254CurveSignatureScheme, SignatureScheme, -}; use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, + hash::Hash, + marker::PhantomData, num::NonZeroU64, }; use tracing::error; /// The vote sent by consensus messages. -pub trait VoteType: +pub trait VoteType: Debug + Clone + 'static + Serialize + for<'a> Deserialize<'a> + Send + Sync + PartialEq { - /// The view this vote was cast for. - fn current_view(&self) -> TYPES::Time; + /// Get the view this vote was cast for + fn get_view(&self) -> TYPES::Time; + /// Get the signature key associated with this vote + fn get_key(&self) -> TYPES::SignatureKey; + /// Get the signature associated with this vote + fn get_signature(&self) -> EncodedSignature; + /// Get the data this vote was signed over + fn get_data(&self) -> VoteData; + /// Get the vote token of this vote + fn get_vote_token(&self) -> TYPES::VoteTokenType; } /// A vote on DA proposal. @@ -50,35 +56,35 @@ pub struct DAVote { /// The vote token generated by this replica pub vote_token: TYPES::VoteTokenType, /// The vote data this vote is signed over - pub vote_data: VoteData, + pub vote_data: VoteData>, } /// A positive or negative vote on validating or commitment proposal. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] -pub struct YesOrNoVote> { +pub struct YesOrNoVote { /// TODO we should remove this /// this is correct, but highly inefficient /// we should check a cache, and if that fails request the qc - pub justify_qc_commitment: Commitment>, + pub justify_qc_commitment: Commitment>, /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), /// The leaf commitment being voted on. - pub leaf_commitment: Commitment, + pub leaf_commitment: COMMITMENT, /// The view this vote was cast for pub current_view: TYPES::Time, /// The vote token generated by this replica pub vote_token: TYPES::VoteTokenType, /// The vote data this vote is signed over - pub vote_data: VoteData, + pub vote_data: VoteData, } /// A timeout vote. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] -pub struct TimeoutVote> { +pub struct TimeoutVote { /// The highest valid QC this node knows about - pub high_qc: QuorumCertificate, + pub high_qc: QuorumCertificate, /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), /// The view this vote was cast for @@ -86,7 +92,7 @@ pub struct TimeoutVote> { /// The vote token generated by this replica pub vote_token: TYPES::VoteTokenType, /// The vote data this vote is signed over - pub vote_data: VoteData, + pub vote_data: VoteData>, } /// The internals of a view sync vote @@ -104,7 +110,7 @@ pub struct ViewSyncVoteInternal { /// The vote token generated by this replica pub vote_token: TYPES::VoteTokenType, /// The vote data this vote is signed over - pub vote_data: VoteData>, + pub vote_data: VoteData>>, } /// The data View Sync votes are signed over @@ -181,19 +187,31 @@ impl ViewSyncVote { /// Votes on validating or commitment proposal. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] -pub enum QuorumVote> { +pub enum QuorumVote { /// Posivite vote. - Yes(YesOrNoVote), + Yes(YesOrNoVote), /// Negative vote. - No(YesOrNoVote), + No(YesOrNoVote), /// Timeout vote. - Timeout(TimeoutVote), + Timeout(TimeoutVote), } -impl VoteType for DAVote { - fn current_view(&self) -> TYPES::Time { +impl VoteType> for DAVote { + fn get_view(&self) -> TYPES::Time { self.current_view } + fn get_key(&self) -> ::SignatureKey { + self.signature_key() + } + fn get_signature(&self) -> EncodedSignature { + self.signature.1.clone() + } + fn get_data(&self) -> VoteData> { + self.vote_data.clone() + } + fn get_vote_token(&self) -> ::VoteTokenType { + self.vote_token.clone() + } } impl DAVote { @@ -205,19 +223,39 @@ impl DAVote { } } -impl> VoteType - for QuorumVote +impl VoteType + for QuorumVote { - fn current_view(&self) -> TYPES::Time { + fn get_view(&self) -> TYPES::Time { match self { QuorumVote::Yes(v) | QuorumVote::No(v) => v.current_view, QuorumVote::Timeout(v) => v.current_view, } } + + fn get_key(&self) -> ::SignatureKey { + self.signature_key() + } + fn get_signature(&self) -> EncodedSignature { + self.signature() + } + fn get_data(&self) -> VoteData { + match self { + QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_data.clone(), + QuorumVote::Timeout(_) => unimplemented!(), + } + } + fn get_vote_token(&self) -> ::VoteTokenType { + match self { + QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_token.clone(), + QuorumVote::Timeout(_) => unimplemented!(), + } + } } -impl> QuorumVote { +impl QuorumVote { /// Get the encoded signature. + pub fn signature(&self) -> EncodedSignature { match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.1.clone(), @@ -227,6 +265,7 @@ impl> QuorumVote /// Get the signature key. /// # Panics /// If the deserialization fails. + pub fn signature_key(&self) -> TYPES::SignatureKey { let encoded = match &self { Self::Yes(vote) | Self::No(vote) => vote.signature.0.clone(), @@ -236,14 +275,36 @@ impl> QuorumVote } } -impl VoteType for ViewSyncVote { - fn current_view(&self) -> TYPES::Time { +impl VoteType>> for ViewSyncVote { + fn get_view(&self) -> TYPES::Time { match self { ViewSyncVote::PreCommit(v) | ViewSyncVote::Commit(v) | ViewSyncVote::Finalize(v) => { v.round } } } + fn get_key(&self) -> ::SignatureKey { + self.signature_key() + } + + fn get_signature(&self) -> EncodedSignature { + self.signature() + } + fn get_data(&self) -> VoteData>> { + match self { + ViewSyncVote::PreCommit(vote_internal) + | ViewSyncVote::Commit(vote_internal) + | ViewSyncVote::Finalize(vote_internal) => vote_internal.vote_data.clone(), + } + } + + fn get_vote_token(&self) -> ::VoteTokenType { + match self { + ViewSyncVote::PreCommit(vote_internal) + | ViewSyncVote::Commit(vote_internal) + | ViewSyncVote::Finalize(vote_internal) => vote_internal.vote_token.clone(), + } + } } /// The aggreation of votes, implemented by `VoteAccumulator`. @@ -255,18 +316,449 @@ pub trait Accumulator: Sized { fn append(self, val: T) -> Either; } +/// Accumulator trait used to accumulate votes into an `AssembledSignature` +pub trait Accumulator2< + TYPES: NodeType, + COMMITMENT: CommitmentBounds, + VOTE: VoteType, +>: Sized +{ + /// Append 1 vote to the accumulator. If the threshold is not reached, return + /// the accumulator, else return the `AssembledSignature` + /// Only called from inside `accumulate_internal` + fn append( + self, + vote: VOTE, + vote_node_id: usize, + stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either>; +} + +/// Accumulates DA votes +pub struct DAVoteAccumulator< + TYPES: NodeType, + COMMITMENT: CommitmentBounds, + VOTE: VoteType, +> { + /// Map of all da signatures accumlated so far + pub da_vote_outcomes: VoteMap, + /// A quorum's worth of stake, generally 2f + 1 + pub success_threshold: NonZeroU64, + /// A list of valid signatures for certificate aggregation + pub sig_lists: Vec<::PureAssembledSignatureType>, + /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check + pub signers: BitVec, + /// Phantom data to specify the vote this accumulator is for + pub phantom: PhantomData, +} + +impl> + Accumulator2 for DAVoteAccumulator +{ + fn append( + mut self, + vote: VOTE, + vote_node_id: usize, + stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either> { + let VoteData::DA(vote_commitment) = vote.get_data() else { + return Either::Left(self); + }; + + let encoded_key = vote.get_key().to_bytes(); + + // Deserialize the signature so that it can be assembeld into a QC + // TODO ED Update this once we've gotten rid of EncodedSignature + let original_signature: ::PureAssembledSignatureType = + bincode_opts() + .deserialize(&vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); + + let (da_stake_casted, da_vote_map) = self + .da_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + // Check for duplicate vote + // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey + // Have to do this because SignatureKey is not hashable + if da_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + + if self.signers.get(vote_node_id).as_deref() == Some(&true) { + error!("Node id is already in signers list"); + return Either::Left(self); + } + self.signers.set(vote_node_id, true); + self.sig_lists.push(original_signature); + + // Already checked that vote data was for a DA vote above + *da_stake_casted += u64::from(vote.get_vote_token().vote_count()); + da_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + + if *da_stake_casted >= u64::from(self.success_threshold) { + // Assemble QC + let real_qc_pp = ::get_public_parameter( + // TODO ED Something about stake table entries. Might be easier to just pass in membership? + stake_table_entries.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + + self.da_vote_outcomes.remove(&vote_commitment); + + return Either::Right(AssembledSignature::DA(real_qc_sig)); + } + Either::Left(self) + } +} + +/// Accumulate quorum votes +pub struct QuorumVoteAccumulator< + TYPES: NodeType, + COMMITMENT: CommitmentBounds, + VOTE: VoteType, +> { + /// Map of all signatures accumlated so far + pub total_vote_outcomes: VoteMap, + /// Map of all yes signatures accumlated so far + pub yes_vote_outcomes: VoteMap, + /// Map of all no signatures accumlated so far + pub no_vote_outcomes: VoteMap, + + /// A quorum's worth of stake, generally 2f + 1 + pub success_threshold: NonZeroU64, + /// A failure threshold, generally f + 1 + pub failure_threshold: NonZeroU64, + /// A list of valid signatures for certificate aggregation + pub sig_lists: Vec<::PureAssembledSignatureType>, + /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check + pub signers: BitVec, + /// Phantom data to ensure this struct is over a specific `VoteType` implementation + pub phantom: PhantomData, +} + +impl> + Accumulator2 for QuorumVoteAccumulator +{ + fn append( + mut self, + vote: VOTE, + vote_node_id: usize, + stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either> { + let (VoteData::Yes(vote_commitment) | VoteData::No(vote_commitment)) = vote.get_data() + else { + return Either::Left(self); + }; + + let encoded_key = vote.get_key().to_bytes(); + + // Deserialize the signature so that it can be assembeld into a QC + // TODO ED Update this once we've gotten rid of EncodedSignature + let original_signature: ::PureAssembledSignatureType = + bincode_opts() + .deserialize(&vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); + + let (total_stake_casted, total_vote_map) = self + .total_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (yes_stake_casted, yes_vote_map) = self + .yes_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + let (no_stake_casted, no_vote_map) = self + .no_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + // Check for duplicate vote + // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey + // Have to do this because SignatureKey is not hashable + if total_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + + if self.signers.get(vote_node_id).as_deref() == Some(&true) { + error!("Node id is already in signers list"); + return Either::Left(self); + } + self.signers.set(vote_node_id, true); + self.sig_lists.push(original_signature); + + // TODO ED Make all these get calls as local variables to avoid constantly calling them + *total_stake_casted += u64::from(vote.get_vote_token().vote_count()); + total_vote_map.insert( + encoded_key.clone(), + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + + match vote.get_data() { + VoteData::Yes(_) => { + *yes_stake_casted += u64::from(vote.get_vote_token().vote_count()); + yes_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + } + VoteData::No(_) => { + *no_stake_casted += u64::from(vote.get_vote_token().vote_count()); + no_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + } + _ => return Either::Left(self), + } + + if *total_stake_casted >= u64::from(self.success_threshold) { + // Assemble QC + let real_qc_pp = ::get_public_parameter( + // TODO ED Something about stake table entries. Might be easier to just pass in membership? + stake_table_entries.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + + if *yes_stake_casted >= u64::from(self.success_threshold) { + self.yes_vote_outcomes.remove(&vote_commitment); + return Either::Right(AssembledSignature::Yes(real_qc_sig)); + } else if *no_stake_casted >= u64::from(self.failure_threshold) { + self.total_vote_outcomes.remove(&vote_commitment); + return Either::Right(AssembledSignature::No(real_qc_sig)); + } + } + Either::Left(self) + } +} + +/// Accumulates view sync votes +pub struct ViewSyncVoteAccumulator< + TYPES: NodeType, + COMMITMENT: CommitmentBounds, + VOTE: VoteType, +> { + /// Map of all pre_commit signatures accumlated so far + pub pre_commit_vote_outcomes: VoteMap, + /// Map of all ommit signatures accumlated so far + pub commit_vote_outcomes: VoteMap, + /// Map of all finalize signatures accumlated so far + pub finalize_vote_outcomes: VoteMap, + + /// A quorum's worth of stake, generally 2f + 1 + pub success_threshold: NonZeroU64, + /// A quorum's failure threshold, generally f + 1 + pub failure_threshold: NonZeroU64, + /// A list of valid signatures for certificate aggregation + pub sig_lists: Vec<::PureAssembledSignatureType>, + /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check + pub signers: BitVec, + /// Phantom data since we want the accumulator to be attached to a single `VoteType` + pub phantom: PhantomData, +} + +impl> + Accumulator2 for ViewSyncVoteAccumulator +{ + #[allow(clippy::too_many_lines)] + fn append( + mut self, + vote: VOTE, + vote_node_id: usize, + stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either> { + let (VoteData::ViewSyncPreCommit(vote_commitment) + | VoteData::ViewSyncCommit(vote_commitment) + | VoteData::ViewSyncFinalize(vote_commitment)) = vote.get_data() + else { + return Either::Left(self); + }; + + // error!("Vote is {:?}", vote.clone()); + + let encoded_key = vote.get_key().to_bytes(); + + // Deserialize the signature so that it can be assembeld into a QC + // TODO ED Update this once we've gotten rid of EncodedSignature + let original_signature: ::PureAssembledSignatureType = + bincode_opts() + .deserialize(&vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); + + let (pre_commit_stake_casted, pre_commit_vote_map) = self + .pre_commit_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + // Check for duplicate vote + if pre_commit_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + + let (commit_stake_casted, commit_vote_map) = self + .commit_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + if commit_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + + let (finalize_stake_casted, finalize_vote_map) = self + .finalize_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + if finalize_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + + // update the active_keys and sig_lists + // TODO ED Possible bug where a node sends precommit vote and then commit vote after + // precommit cert is formed, their commit vote won't be counted because of this check + // Probably need separate signers vecs. + if self.signers.get(vote_node_id).as_deref() == Some(&true) { + error!("node id already in signers"); + return Either::Left(self); + } + self.signers.set(vote_node_id, true); + self.sig_lists.push(original_signature); + + match vote.get_data() { + VoteData::ViewSyncPreCommit(_) => { + *pre_commit_stake_casted += u64::from(vote.get_vote_token().vote_count()); + pre_commit_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + } + VoteData::ViewSyncCommit(_) => { + *commit_stake_casted += u64::from(vote.get_vote_token().vote_count()); + commit_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + } + VoteData::ViewSyncFinalize(_) => { + *finalize_stake_casted += u64::from(vote.get_vote_token().vote_count()); + finalize_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + } + _ => unimplemented!(), + } + + if *pre_commit_stake_casted >= u64::from(self.failure_threshold) { + let real_qc_pp = ::get_public_parameter( + stake_table_entries, + U256::from(self.failure_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + + self.pre_commit_vote_outcomes + .remove(&vote_commitment) + .unwrap(); + return Either::Right(AssembledSignature::ViewSyncPreCommit(real_qc_sig)); + } + + if *commit_stake_casted >= u64::from(self.success_threshold) { + let real_qc_pp = ::get_public_parameter( + stake_table_entries.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + self.commit_vote_outcomes.remove(&vote_commitment).unwrap(); + return Either::Right(AssembledSignature::ViewSyncCommit(real_qc_sig)); + } + + if *finalize_stake_casted >= u64::from(self.success_threshold) { + let real_qc_pp = ::get_public_parameter( + stake_table_entries.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + self.finalize_vote_outcomes + .remove(&vote_commitment) + .unwrap(); + return Either::Right(AssembledSignature::ViewSyncFinalize(real_qc_sig)); + } + + Either::Left(self) + } +} + +/// Placeholder accumulator; will be replaced by accumulator for each certificate type +pub struct AccumulatorPlaceholder< + TYPES: NodeType, + COMMITMENT: CommitmentBounds, + VOTE: VoteType, +> { + /// Phantom data to make compiler happy + pub phantom: PhantomData<(TYPES, VOTE, COMMITMENT)>, +} + +impl> + Accumulator2 for AccumulatorPlaceholder +{ + fn append( + self, + _vote: VOTE, + _vote_node_id: usize, + _stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either> { + either::Left(self) + } +} + /// Mapping of commitments to vote tokens by key. -type VoteMap = HashMap< - Commitment, +// TODO ED Remove this whole token generic +type VoteMap = HashMap< + COMMITMENT, ( u64, - BTreeMap, TOKEN)>, + BTreeMap, TOKEN)>, ), >; /// Describe the process of collecting signatures on block or leaf commitment, to form a DAC or QC, /// respectively. -pub struct VoteAccumulator { +/// +/// TODO GG used only in election.rs; move this to there and make it private? +pub struct VoteAccumulator { /// Map of all signatures accumlated so far pub total_vote_outcomes: VoteMap, /// Map of all da signatures accumlated so far @@ -286,28 +778,28 @@ pub struct VoteAccumulator { /// Enough stake to know that we cannot possibly get a quorum, generally f + 1 pub failure_threshold: NonZeroU64, /// A list of valid signatures for certificate aggregation - pub sig_lists: Vec<::Signature>, + pub sig_lists: Vec<::PureAssembledSignatureType>, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check pub signers: BitVec, } -impl +impl Accumulator< ( - Commitment, + COMMITMENT, ( EncodedPublicKey, ( EncodedSignature, Vec<::StakeTableEntry>, usize, - VoteData, + VoteData, TOKEN, ), ), ), AssembledSignature, - > for VoteAccumulator + > for VoteAccumulator where TOKEN: Clone + VoteToken, { @@ -315,14 +807,14 @@ where fn append( mut self, val: ( - Commitment, + COMMITMENT, ( EncodedPublicKey, ( EncodedSignature, Vec<::StakeTableEntry>, usize, - VoteData, + VoteData, TOKEN, ), ), @@ -331,7 +823,7 @@ where let (commitment, (key, (sig, entries, node_id, vote_data, token))) = val; // Desereialize the sig so that it can be assembeld into a QC - let origianl_sig: ::Signature = + let original_signature: ::PureAssembledSignatureType = bincode_opts() .deserialize(&sig.0) .expect("Deserialization on the signature shouldn't be able to fail."); @@ -385,7 +877,7 @@ where return Either::Left(self); } self.signers.set(node_id, true); - self.sig_lists.push(origianl_sig); + self.sig_lists.push(original_signature); *total_stake_casted += u64::from(token.vote_count()); total_vote_map.insert(key.clone(), (sig.clone(), vote_data.clone(), token.clone())); diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index 827d5a5258..bc2520065a 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -22,7 +22,6 @@ hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } jf-primitives = { workspace = true } tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.4.1" } -nll = { workspace = true } tracing = { workspace = true } rand = { workspace = true } serde = { workspace = true } @@ -30,8 +29,6 @@ serde_json = "1.0.96" snafu = { workspace = true } tide = { version = "0.16.0", default-features = false } toml = { workspace = true } -portpicker = "0.1" -surf-disco = { workspace = true } [dev-dependencies] hotshot-types = { path = "../types", default-features = false } diff --git a/web_server/api.toml b/web_server/api.toml index 5f749bcdb0..cc610fc9c9 100644 --- a/web_server/api.toml +++ b/web_server/api.toml @@ -11,6 +11,13 @@ DOC = """ Return the proposal for a given view number """ +# GET the proposal for a view, where the view is passed as an argument +[route.getrecentproposal] +PATH = ["proposal/"] +DOC = """ +Return the proposal for the most recent view the server has +""" + # POST a proposal, where the view is passed as an argument [route.postproposal] PATH = ["proposal/:view_number"] diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 1da2781c55..f9d0e7c0c7 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -17,6 +17,10 @@ pub fn post_proposal_route(view_number: u64) -> String { format!("api/proposal/{view_number}") } +pub fn get_recent_proposal_route() -> String { + "api/proposal".to_string() +} + pub fn get_da_certificate_route(view_number: u64) -> String { format!("api/certificate/{view_number}") } @@ -33,6 +37,30 @@ pub fn post_vote_route(view_number: u64) -> String { format!("api/votes/{view_number}") } +pub fn get_vid_disperse_route(view_number: u64) -> String { + format!("api/vid/disperse/{view_number}") +} + +pub fn post_vid_disperse_route(view_number: u64) -> String { + format!("api/vid/disperse/{view_number}") +} + +pub fn get_vid_vote_route(view_number: u64, index: u64) -> String { + format!("api/vid/votes/{view_number}/{index}") +} + +pub fn post_vid_vote_route(view_number: u64) -> String { + format!("api/vid/votes/{view_number}") +} + +pub fn get_vid_cert_route(view_number: u64) -> String { + format!("api/vid/cert/{view_number}") +} + +pub fn post_vid_cert_route(view_number: u64) -> String { + format!("api/vid/cert/{view_number}") +} + pub fn get_transactions_route(index: u64) -> String { format!("api/transactions/{index}") } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index ac2730b448..e29ce036e8 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -33,6 +33,8 @@ struct WebServerState { da_certificates: HashMap)>, /// view for oldest proposals in memory oldest_proposal: u64, + /// view for the most recent proposal to help nodes catchup + recent_proposal: u64, /// view for teh oldest DA certificate oldest_certificate: u64, @@ -74,6 +76,7 @@ impl WebServerState { num_txns: 0, oldest_vote: 0, oldest_proposal: 0, + recent_proposal: 0, oldest_certificate: 0, shutdown: None, stake_table: Vec::new(), @@ -101,6 +104,7 @@ impl WebServerState { /// Trait defining methods needed for the `WebServerState` pub trait WebServerDataSource { fn get_proposal(&self, view_number: u64) -> Result>>, Error>; + fn get_recent_proposal(&self) -> Result>>, Error>; fn get_view_sync_proposal( &self, view_number: u64, @@ -156,6 +160,10 @@ impl WebServerDataSource for WebServerState { } } + fn get_recent_proposal(&self) -> Result>>, Error> { + self.get_proposal(self.recent_proposal) + } + fn get_view_sync_proposal( &self, view_number: u64, @@ -316,6 +324,10 @@ impl WebServerDataSource for WebServerState { fn post_proposal(&mut self, view_number: u64, mut proposal: Vec) -> Result<(), Error> { debug!("Received proposal for view {}", view_number); + if view_number > self.recent_proposal { + self.recent_proposal = view_number; + } + // Only keep proposal history for MAX_VIEWS number of view if self.proposals.len() >= MAX_VIEWS { self.proposals.remove(&self.oldest_proposal); @@ -495,6 +507,9 @@ where } .boxed() })? + .get("getrecentproposal", |_req, state| { + async move { state.get_recent_proposal() }.boxed() + })? .get("getviewsyncproposal", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; From 756649f0a9afbac566c77aeb24e2790a32614079 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 9 Oct 2023 11:55:04 -0400 Subject: [PATCH 0198/1393] Add tracing to network task --- task-impls/src/network.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 5fbada8539..605239c748 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -22,6 +22,7 @@ use hotshot_types::{ use snafu::Snafu; use std::{marker::PhantomData, sync::Arc}; use tracing::error; +use tracing::instrument; /// the type of network task #[derive(Clone, Copy, Debug)] @@ -190,6 +191,8 @@ impl< /// # Panics /// Panic sif a direct message event is received with no recipient #[allow(clippy::too_many_lines)] // TODO https://github.com/EspressoSystems/HotShot/issues/1704 + #[instrument(skip_all, fields(view = *self.view), name = "Newtork Task", level = "error")] + pub async fn handle_event( &mut self, event: SequencingHotShotEvent, From de30b23ac127e09ab978a61da4065c4f57318500 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Sep 2023 00:10:27 -0400 Subject: [PATCH 0199/1393] add endpoint to get a recent proposal to catchup w/ webserver --- .../traits/networking/web_server_network.rs | 30 ++++++++++ task-impls/src/consensus.rs | 7 ++- testing/src/spinning_task.rs | 3 +- testing/tests/catchup.rs | 60 ++++++++++++++++++- testing/tests/timeout.rs | 2 +- types/src/message.rs | 2 + types/src/traits/network.rs | 3 + web_server/api.toml | 7 +++ web_server/src/config.rs | 4 ++ web_server/src/lib.rs | 15 +++++ 10 files changed, 128 insertions(+), 5 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index e0001b94ab..f2c6c1c842 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -161,6 +161,7 @@ impl Inner { while self.running.load(Ordering::Relaxed) { let endpoint = match message_purpose { MessagePurpose::Proposal => config::get_proposal_route(view_number), + MessagePurpose::CurrentProposal => config::get_recent_proposal_route(), MessagePurpose::Vote => config::get_vote_route(view_number, vote_index), MessagePurpose::Data => config::get_transactions_route(tx_index), MessagePurpose::Internal => unimplemented!(), @@ -221,6 +222,15 @@ impl Inner { // } // } } + MessagePurpose::CurrentProposal => { + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + self.broadcast_poll_queue + .write() + .await + .push(deserialized_messages[0].clone()); + + return Ok(()); + } MessagePurpose::Vote => { // error!( // "Received {} votes from web server for view {} is da {}", @@ -498,6 +508,7 @@ impl< let endpoint = match &message.purpose() { MessagePurpose::Proposal => config::post_proposal_route(*view_number), + MessagePurpose::CurrentProposal => return Err(WebServerNetworkError::EndpointError), MessagePurpose::Vote => config::post_vote_route(*view_number), MessagePurpose::Data => config::post_transactions_route(), MessagePurpose::Internal => return Err(WebServerNetworkError::EndpointError), @@ -783,6 +794,25 @@ impl< .await; } } + ConsensusIntentEvent::PollForCurrentProposal => { + // create new task + let (_, receiver) = unbounded(); + + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server(receiver, MessagePurpose::CurrentProposal, 1) + .await + { + error!( + "Background receive proposal polling encountered an error: {:?}", + e + ); + } + } + }); + } ConsensusIntentEvent::PollForVotes(view_number) => { let mut task_map = self.inner.vote_task_map.write().await; if let Entry::Vacant(e) = task_map.entry(view_number) { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 12d7313075..cea584f915 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -494,7 +494,12 @@ where // } self.cur_view = new_view; self.current_proposal = None; - + if new_view == TYPES::Time::new(1) { + self.quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) + .await; + } // Start polling for proposals for the new view self.quorum_exchange .network() diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index c8ef07d51c..a9d9e5d586 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -113,7 +113,8 @@ impl SpinningTaskDescription { state.late_start.remove(&idx.try_into().unwrap()) { tracing::error!("Spinning up node late"); - node.run_tasks().await; + let handle = node.run_tasks().await; + handle.hotshot.start_consensus().await; } } UpDown::Down => { diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 2cbc6c8a16..9c6bd0bb38 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -59,6 +59,61 @@ async fn test_catchup() { .await; } +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_catchup_web() { + use std::time::Duration; + + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingTestTypes, SequencingWebImpl}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestMetadata, TimingData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let timing_data = TimingData { + next_view_timeout: 1000, + ..Default::default() + }; + let mut metadata = TestMetadata::default(); + let catchup_nodes = vec![ChangeNode { + idx: 18, + updown: UpDown::Up, + }]; + + metadata.timing_data = timing_data; + metadata.start_nodes = 19; + metadata.total_nodes = 20; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::from_millis(400), catchup_nodes)], + }; + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(100000), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }; + + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + /// Test that one node catches up and has sucessful views after coming back #[cfg(test)] #[cfg_attr( @@ -66,6 +121,7 @@ async fn test_catchup() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_catchup_one_node() { use std::time::Duration; @@ -94,13 +150,13 @@ async fn test_catchup_one_node() { metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), catchup_nodes)], + node_changes: vec![(Duration::from_millis(400), catchup_nodes)], }; metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(100000), + duration: Duration::from_millis(20000), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 7ebeddd577..f8963c9d52 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -30,7 +30,7 @@ async fn test_timeout() { metadata.timing_data = timing_data; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), dead_nodes)], + node_changes: vec![(Duration::new(0, 5000), dead_nodes)], }; // TODO ED Add safety task, etc to confirm TCs are being formed diff --git a/types/src/message.rs b/types/src/message.rs index 89d2864aa9..c499309a43 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -57,6 +57,8 @@ pub struct Messages>(pub Vec *view_number, + ConsensusIntentEvent::PollForCurrentProposal => 1, } } } diff --git a/web_server/api.toml b/web_server/api.toml index 5f749bcdb0..cc610fc9c9 100644 --- a/web_server/api.toml +++ b/web_server/api.toml @@ -11,6 +11,13 @@ DOC = """ Return the proposal for a given view number """ +# GET the proposal for a view, where the view is passed as an argument +[route.getrecentproposal] +PATH = ["proposal/"] +DOC = """ +Return the proposal for the most recent view the server has +""" + # POST a proposal, where the view is passed as an argument [route.postproposal] PATH = ["proposal/:view_number"] diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 1da2781c55..c06f365bd3 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -17,6 +17,10 @@ pub fn post_proposal_route(view_number: u64) -> String { format!("api/proposal/{view_number}") } +pub fn get_recent_proposal_route() -> String { + "api/proposal".to_string() +} + pub fn get_da_certificate_route(view_number: u64) -> String { format!("api/certificate/{view_number}") } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index ac2730b448..e29ce036e8 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -33,6 +33,8 @@ struct WebServerState { da_certificates: HashMap)>, /// view for oldest proposals in memory oldest_proposal: u64, + /// view for the most recent proposal to help nodes catchup + recent_proposal: u64, /// view for teh oldest DA certificate oldest_certificate: u64, @@ -74,6 +76,7 @@ impl WebServerState { num_txns: 0, oldest_vote: 0, oldest_proposal: 0, + recent_proposal: 0, oldest_certificate: 0, shutdown: None, stake_table: Vec::new(), @@ -101,6 +104,7 @@ impl WebServerState { /// Trait defining methods needed for the `WebServerState` pub trait WebServerDataSource { fn get_proposal(&self, view_number: u64) -> Result>>, Error>; + fn get_recent_proposal(&self) -> Result>>, Error>; fn get_view_sync_proposal( &self, view_number: u64, @@ -156,6 +160,10 @@ impl WebServerDataSource for WebServerState { } } + fn get_recent_proposal(&self) -> Result>>, Error> { + self.get_proposal(self.recent_proposal) + } + fn get_view_sync_proposal( &self, view_number: u64, @@ -316,6 +324,10 @@ impl WebServerDataSource for WebServerState { fn post_proposal(&mut self, view_number: u64, mut proposal: Vec) -> Result<(), Error> { debug!("Received proposal for view {}", view_number); + if view_number > self.recent_proposal { + self.recent_proposal = view_number; + } + // Only keep proposal history for MAX_VIEWS number of view if self.proposals.len() >= MAX_VIEWS { self.proposals.remove(&self.oldest_proposal); @@ -495,6 +507,9 @@ where } .boxed() })? + .get("getrecentproposal", |_req, state| { + async move { state.get_recent_proposal() }.boxed() + })? .get("getviewsyncproposal", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; From 413b4d2578dcd72051195d8c3bc93efb349852da Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 10 Oct 2023 10:53:30 -0400 Subject: [PATCH 0200/1393] Update combined network (#1863) * combined network updates * merge with develop --- constants/src/lib.rs | 9 + hotshot/src/traits.rs | 2 +- hotshot/src/traits/networking.rs | 2 +- .../src/traits/networking/combined_network.rs | 475 ++++++++++++++++++ .../networking/web_server_libp2p_fallback.rs | 300 ----------- testing/src/node_types.rs | 141 +++--- ...allback_network.rs => combined_network.rs} | 26 +- types/src/message.rs | 16 +- types/src/traits/node_implementation.rs | 2 + 9 files changed, 591 insertions(+), 382 deletions(-) create mode 100644 hotshot/src/traits/networking/combined_network.rs delete mode 100644 hotshot/src/traits/networking/web_server_libp2p_fallback.rs rename testing/tests/{fallback_network.rs => combined_network.rs} (67%) diff --git a/constants/src/lib.rs b/constants/src/lib.rs index 8a4d9d5d46..0b1b769650 100644 --- a/constants/src/lib.rs +++ b/constants/src/lib.rs @@ -8,3 +8,12 @@ pub const LOOK_AHEAD: u64 = 5; /// the default kademlia record republication interval (in seconds) pub const KAD_DEFAULT_REPUB_INTERVAL_SEC: u64 = 28800; + +/// the number of messages to cache in the combined network +pub const COMBINED_NETWORK_CACHE_SIZE: usize = 1000; + +/// the number of messages to attempt to send over the primary network before switching to prefer the secondary network +pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 10; + +/// the number of messages to send over the secondary network before re-attempting the (presumed down) primary network +pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 10; diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 370dc47839..e29c33a999 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -13,9 +13,9 @@ pub use storage::{Result as StorageResult, Storage}; pub mod implementations { pub use super::{ networking::{ + combined_network::{CombinedCommChannel, CombinedNetworks}, libp2p_network::{Libp2pCommChannel, Libp2pNetwork, PeerInfoVec}, memory_network::{MasterMap, MemoryCommChannel, MemoryNetwork}, - web_server_libp2p_fallback::{CombinedNetworks, WebServerWithFallbackCommChannel}, web_server_network::{WebCommChannel, WebServerNetwork}, }, storage::memory_storage::MemoryStorage, // atomic_storage::AtomicStorage, diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index e48e71d29b..357e7a4da3 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -5,9 +5,9 @@ //! - [`MemoryNetwork`](memory_network::MemoryNetwork), an in memory testing-only implementation //! - [`Libp2pNetwork`](libp2p_network::Libp2pNetwork), a production-ready networking impelmentation built on top of libp2p-rs. +pub mod combined_network; pub mod libp2p_network; pub mod memory_network; -pub mod web_server_libp2p_fallback; pub mod web_server_network; pub use hotshot_types::traits::network::{ diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs new file mode 100644 index 0000000000..63fdb6df88 --- /dev/null +++ b/hotshot/src/traits/networking/combined_network.rs @@ -0,0 +1,475 @@ +//! Networking Implementation that has a primary and a fallback newtork. If the primary +//! Errors we will use the backup to send or receive +use super::NetworkError; +use crate::{ + traits::implementations::{Libp2pNetwork, WebServerNetwork}, + NodeImplementation, +}; +use async_lock::RwLock; +use hotshot_constants::{ + COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, + COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, +}; +use std::{ + collections::HashSet, + hash::Hasher, + sync::atomic::{AtomicU64, Ordering}, +}; +use tracing::error; + +use async_trait::async_trait; + +use futures::join; + +use async_compatibility_layer::channel::UnboundedSendError; +use hotshot_task::{boxed_sync, BoxSyncFuture}; +use hotshot_types::{ + data::ViewNumber, + message::Message, + traits::{ + election::Membership, + network::{ + CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, + TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, + ViewMessage, + }, + node_implementation::NodeType, + }, +}; +use std::{collections::hash_map::DefaultHasher, marker::PhantomData, sync::Arc}; + +use std::hash::Hash; + +/// A cache to keep track of the last n messages we've seen, avoids reprocessing duplicates +/// from multiple networks +#[derive(Clone, Debug)] +struct Cache { + /// The maximum number of items to store in the cache + capacity: usize, + /// The cache itself + cache: HashSet, + /// The hashes of the messages in the cache, in order of insertion + hashes: Vec, +} + +impl Cache { + /// Create a new cache with the given capacity + fn new(capacity: usize) -> Self { + Self { + capacity, + cache: HashSet::with_capacity(capacity), + hashes: Vec::with_capacity(capacity), + } + } + + /// Insert a hash into the cache + fn insert(&mut self, hash: u64) { + if self.cache.contains(&hash) { + return; + } + + // calculate how much we are over and remove that many elements from the cache. deal with overflow + let over = (self.hashes.len() + 1).saturating_sub(self.capacity); + if over > 0 { + for _ in 0..over { + let hash = self.hashes.remove(0); + self.cache.remove(&hash); + } + } + + self.cache.insert(hash); + self.hashes.push(hash); + } + + /// Check if the cache contains a hash + fn contains(&self, hash: u64) -> bool { + self.cache.contains(&hash) + } + + /// Get the number of items in the cache + #[cfg(test)] + fn len(&self) -> usize { + self.cache.len() + } +} + +/// Helper function to calculate a hash of a type that implements Hash +fn calculate_hash_of(t: &T) -> u64 { + let mut s = DefaultHasher::new(); + t.hash(&mut s); + s.finish() +} + +/// A communication channel with 2 networks, where we can fall back to the slower network if the +/// primary fails +#[derive(Clone, Debug)] +pub struct CombinedCommChannel< + TYPES: NodeType, + I: NodeImplementation, + MEMBERSHIP: Membership, +> { + /// The two networks we'll use for send/recv + networks: Arc>, + + /// Last n seen messages to prevent processing duplicates + message_cache: Arc>, + + /// If the primary network is down (0) or not, and for how many messages + primary_down: Arc, +} + +impl, MEMBERSHIP: Membership> + CombinedCommChannel +{ + /// Constructor + #[must_use] + pub fn new(networks: Arc>) -> Self { + Self { + networks, + message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), + primary_down: Arc::new(AtomicU64::new(0)), + } + } + + /// Get a ref to the primary network + #[must_use] + pub fn primary(&self) -> &WebServerNetwork, TYPES::SignatureKey, TYPES> { + &self.networks.0 + } + + /// Get a ref to the backup network + #[must_use] + pub fn secondary(&self) -> &Libp2pNetwork, TYPES::SignatureKey> { + &self.networks.1 + } +} + +/// Wrapper for the tuple of `WebServerNetwork` and `Libp2pNetwork` +/// We need this so we can impl `TestableNetworkingImplementation` +/// on the tuple +#[derive(Debug, Clone)] +pub struct CombinedNetworks< + TYPES: NodeType, + I: NodeImplementation, + MEMBERSHIP: Membership, +>( + pub WebServerNetwork, TYPES::SignatureKey, TYPES>, + pub Libp2pNetwork, TYPES::SignatureKey>, + pub PhantomData, +); + +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for CombinedNetworks +{ + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + is_da: bool, + ) -> Box Self + 'static> { + let generators = ( + , + TYPES::SignatureKey, + TYPES, + > as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da + ), + , TYPES::SignatureKey> as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da + ) + ); + Box::new(move |node_id| { + CombinedNetworks(generators.0(node_id), generators.1(node_id), PhantomData) + }) + } + + /// Get the number of messages in-flight. + /// + /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. + fn in_flight_message_count(&self) -> Option { + None + } +} + +impl, MEMBERSHIP: Membership> + TestableNetworkingImplementation> + for CombinedCommChannel +{ + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + is_da: bool, + ) -> Box Self + 'static> { + let generator = as TestableNetworkingImplementation<_, _>>::generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + is_da + ); + Box::new(move |node_id| Self { + networks: generator(node_id).into(), + message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), + primary_down: Arc::new(AtomicU64::new(0)), + }) + } + + /// Get the number of messages in-flight. + /// + /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. + fn in_flight_message_count(&self) -> Option { + None + } +} + +#[async_trait] +impl, MEMBERSHIP: Membership> + CommunicationChannel, MEMBERSHIP> + for CombinedCommChannel +{ + type NETWORK = CombinedNetworks; + + async fn wait_for_ready(&self) { + join!( + self.primary().wait_for_ready(), + self.secondary().wait_for_ready() + ); + } + + async fn is_ready(&self) -> bool { + self.primary().is_ready().await && self.secondary().is_ready().await + } + + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b, + { + let closure = async move { + join!(self.primary().shut_down(), self.secondary().shut_down()); + }; + boxed_sync(closure) + } + + async fn broadcast_message( + &self, + message: Message, + election: &MEMBERSHIP, + ) -> Result<(), NetworkError> { + let recipients = + >::get_committee(election, message.get_view_number()); + + // broadcast optimistically on both networks, but if the primary network is down, skip it + if self.primary_down.load(Ordering::Relaxed) < COMBINED_NETWORK_MIN_PRIMARY_FAILURES + || self.primary_down.load(Ordering::Relaxed) % COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL + == 0 + { + // broadcast on the primary network as it is not down, or we are checking if it is back up + match self + .primary() + .broadcast_message(message.clone(), recipients.clone()) + .await + { + Ok(_) => { + self.primary_down.store(0, Ordering::Relaxed); + } + Err(e) => { + error!("Error on primary network: {}", e); + self.primary_down.fetch_add(1, Ordering::Relaxed); + } + }; + } + + self.secondary() + .broadcast_message(message, recipients) + .await + } + + async fn direct_message( + &self, + message: Message, + recipient: TYPES::SignatureKey, + ) -> Result<(), NetworkError> { + // DM optimistically on both networks, but if the primary network is down, skip it + if self.primary_down.load(Ordering::Relaxed) < COMBINED_NETWORK_MIN_PRIMARY_FAILURES + || self.primary_down.load(Ordering::Relaxed) % COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL + == 0 + { + // message on the primary network as it is not down, or we are checking if it is back up + match self + .primary() + .direct_message(message.clone(), recipient.clone()) + .await + { + Ok(_) => { + self.primary_down.store(0, Ordering::Relaxed); + } + Err(e) => { + error!("Error on primary network: {}", e); + self.primary_down.fetch_add(1, Ordering::Relaxed); + } + }; + } + + self.secondary().direct_message(message, recipient).await + } + + fn recv_msgs<'a, 'b>( + &'a self, + transmit_type: TransmitType, + ) -> BoxSyncFuture<'b, Result>, NetworkError>> + where + 'a: 'b, + Self: 'b, + { + // recv on both networks because nodes may be accessible only on either. discard duplicates + let closure = async move { + let mut primary_msgs = self.primary().recv_msgs(transmit_type).await?; + let mut secondary_msgs = self.secondary().recv_msgs(transmit_type).await?; + + primary_msgs.append(secondary_msgs.as_mut()); + + let mut filtered_msgs = Vec::with_capacity(primary_msgs.len()); + for msg in primary_msgs { + if !self + .message_cache + .read() + .await + .contains(calculate_hash_of(&msg)) + { + filtered_msgs.push(msg.clone()); + self.message_cache + .write() + .await + .insert(calculate_hash_of(&msg)); + } + } + + Ok(filtered_msgs) + }; + + boxed_sync(closure) + } + + async fn queue_node_lookup( + &self, + view_number: ViewNumber, + pk: TYPES::SignatureKey, + ) -> Result<(), UnboundedSendError>> { + self.primary() + .queue_node_lookup(view_number, pk.clone()) + .await?; + self.secondary().queue_node_lookup(view_number, pk).await + } + + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + as ConnectedNetwork,TYPES::SignatureKey>>:: + inject_consensus_info(self.primary(), event.clone()).await; + + as ConnectedNetwork,TYPES::SignatureKey>>:: + inject_consensus_info(self.secondary(), event).await; + } +} + +impl, MEMBERSHIP: Membership> + TestableChannelImplementation< + TYPES, + Message, + MEMBERSHIP, + CombinedNetworks, + > for CombinedCommChannel +{ + fn generate_network() -> Box) -> Self + 'static> { + Box::new(move |network| CombinedCommChannel::new(network)) + } +} + +#[cfg(test)] +mod test { + use hotshot_types::block_impl::VIDTransaction; + + use super::*; + use tracing::instrument; + + /// cache eviction test + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_cache_eviction() { + let mut cache = Cache::new(3); + cache.insert(1); + cache.insert(2); + cache.insert(3); + cache.insert(4); + assert_eq!(cache.cache.len(), 3); + assert_eq!(cache.hashes.len(), 3); + assert!(!cache.cache.contains(&1)); + assert!(cache.cache.contains(&2)); + assert!(cache.cache.contains(&3)); + assert!(cache.cache.contains(&4)); + assert!(!cache.hashes.contains(&1)); + assert!(cache.hashes.contains(&2)); + assert!(cache.hashes.contains(&3)); + assert!(cache.hashes.contains(&4)); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_hash_calculation() { + let message1 = VIDTransaction(vec![0; 32]); + let message2 = VIDTransaction(vec![1; 32]); + + assert_eq!(calculate_hash_of(&message1), calculate_hash_of(&message1)); + assert_ne!(calculate_hash_of(&message1), calculate_hash_of(&message2)); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_cache_integrity() { + let message1 = VIDTransaction(vec![0; 32]); + let message2 = VIDTransaction(vec![1; 32]); + + let mut cache = Cache::new(3); + + // test insertion integrity + cache.insert(calculate_hash_of(&message1)); + cache.insert(calculate_hash_of(&message2)); + + assert!(cache.contains(calculate_hash_of(&message1))); + assert!(cache.contains(calculate_hash_of(&message2))); + + // check that the cache is not modified on duplicate entries + cache.insert(calculate_hash_of(&message1)); + assert!(cache.contains(calculate_hash_of(&message1))); + assert!(cache.contains(calculate_hash_of(&message2))); + assert_eq!(cache.len(), 2); + } +} diff --git a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs b/hotshot/src/traits/networking/web_server_libp2p_fallback.rs deleted file mode 100644 index b3324681a5..0000000000 --- a/hotshot/src/traits/networking/web_server_libp2p_fallback.rs +++ /dev/null @@ -1,300 +0,0 @@ -//! Networking Implementation that has a primary and a fallback newtork. If the primary -//! Errors we will use the backup to send or receive -use super::NetworkError; -use crate::{ - traits::implementations::{Libp2pNetwork, WebServerNetwork}, - NodeImplementation, -}; - -use async_trait::async_trait; - -use futures::join; - -use async_compatibility_layer::channel::UnboundedSendError; -use hotshot_task::{boxed_sync, BoxSyncFuture}; -use hotshot_types::{ - data::ViewNumber, - message::Message, - traits::{ - election::Membership, - network::{ - CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, - TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, - ViewMessage, - }, - node_implementation::NodeType, - }, -}; -use std::{marker::PhantomData, sync::Arc}; -use tracing::error; -/// A communication channel with 2 networks, where we can fall back to the slower network if the -/// primary fails -#[derive(Clone, Debug)] -pub struct WebServerWithFallbackCommChannel< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, -> { - /// The two networks we'll use for send/recv - networks: Arc>, -} - -impl, MEMBERSHIP: Membership> - WebServerWithFallbackCommChannel -{ - /// Constructor - #[must_use] - pub fn new(networks: Arc>) -> Self { - Self { networks } - } - - /// Get a ref to the primary network - #[must_use] - pub fn network(&self) -> &WebServerNetwork, TYPES::SignatureKey, TYPES> { - &self.networks.0 - } - - /// Get a ref to the backup network - #[must_use] - pub fn fallback(&self) -> &Libp2pNetwork, TYPES::SignatureKey> { - &self.networks.1 - } -} - -/// Wrapper for the tuple of `WebServerNetwork` and `Libp2pNetwork` -/// We need this so we can impl `TestableNetworkingImplementation` -/// on the tuple -#[derive(Debug, Clone)] -pub struct CombinedNetworks< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, ->( - pub WebServerNetwork, TYPES::SignatureKey, TYPES>, - pub Libp2pNetwork, TYPES::SignatureKey>, - pub PhantomData, -); - -impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> - for CombinedNetworks -{ - fn generator( - expected_node_count: usize, - num_bootstrap: usize, - network_id: usize, - da_committee_size: usize, - is_da: bool, - ) -> Box Self + 'static> { - let generators = ( - , - TYPES::SignatureKey, - TYPES, - > as TestableNetworkingImplementation<_, _>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da - ), - , TYPES::SignatureKey> as TestableNetworkingImplementation<_, _>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da - ) - ); - Box::new(move |node_id| { - CombinedNetworks(generators.0(node_id), generators.1(node_id), PhantomData) - }) - } - - /// Get the number of messages in-flight. - /// - /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. - fn in_flight_message_count(&self) -> Option { - None - } -} - -impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> - for WebServerWithFallbackCommChannel -{ - fn generator( - expected_node_count: usize, - num_bootstrap: usize, - network_id: usize, - da_committee_size: usize, - is_da: bool, - ) -> Box Self + 'static> { - let generator = as TestableNetworkingImplementation<_, _>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da - ); - Box::new(move |node_id| Self { - networks: generator(node_id).into(), - }) - } - - /// Get the number of messages in-flight. - /// - /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. - fn in_flight_message_count(&self) -> Option { - None - } -} - -#[async_trait] -impl, MEMBERSHIP: Membership> - CommunicationChannel, MEMBERSHIP> - for WebServerWithFallbackCommChannel -{ - type NETWORK = CombinedNetworks; - - async fn wait_for_ready(&self) { - join!( - self.network().wait_for_ready(), - self.fallback().wait_for_ready() - ); - } - - async fn is_ready(&self) -> bool { - self.network().is_ready().await && self.fallback().is_ready().await - } - - fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - join!(self.network().shut_down(), self.fallback().shut_down()); - }; - boxed_sync(closure) - } - - async fn broadcast_message( - &self, - message: Message, - election: &MEMBERSHIP, - ) -> Result<(), NetworkError> { - let recipients = - >::get_committee(election, message.get_view_number()); - let fallback = self - .fallback() - .broadcast_message(message.clone(), recipients.clone()); - let network = self.network().broadcast_message(message, recipients); - match join!(fallback, network) { - (Err(e1), Err(e2)) => { - error!( - "Both network broadcasts failed primary error: {}, fallback error: {}", - e1, e2 - ); - Err(e1) - } - (Err(e), _) => { - error!("Failed primary broadcast with error: {}", e); - Ok(()) - } - (_, Err(e)) => { - error!("Failed backup broadcast with error: {}", e); - Ok(()) - } - _ => Ok(()), - } - } - - async fn direct_message( - &self, - message: Message, - recipient: TYPES::SignatureKey, - ) -> Result<(), NetworkError> { - match self - .network() - .direct_message(message.clone(), recipient.clone()) - .await - { - Ok(_) => Ok(()), - Err(e) => { - error!( - "Falling back on direct message, error on primary network: {}", - e - ); - self.fallback().direct_message(message, recipient).await - } - } - } - - fn recv_msgs<'a, 'b>( - &'a self, - transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - match self.network().recv_msgs(transmit_type).await { - Ok(msgs) => Ok(msgs), - Err(e) => { - error!( - "Falling back on recv message, error on primary network: {}", - e - ); - self.fallback().recv_msgs(transmit_type).await - } - } - }; - boxed_sync(closure) - } - - async fn queue_node_lookup( - &self, - view_number: ViewNumber, - pk: TYPES::SignatureKey, - ) -> Result<(), UnboundedSendError>> { - self.network() - .queue_node_lookup(view_number, pk.clone()) - .await?; - self.fallback().queue_node_lookup(view_number, pk).await?; - - Ok(()) - } - - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { - as ConnectedNetwork< - Message, - TYPES::SignatureKey, - >>::inject_consensus_info(self.network(), event.clone()) - .await; - - as ConnectedNetwork< - Message, - TYPES::SignatureKey, - >>::inject_consensus_info(self.fallback(), event) - .await; - } -} - -impl, MEMBERSHIP: Membership> - TestableChannelImplementation< - TYPES, - Message, - MEMBERSHIP, - CombinedNetworks, - > for WebServerWithFallbackCommChannel -{ - fn generate_network() -> Box) -> Self + 'static> { - Box::new(move |network| WebServerWithFallbackCommChannel::new(network)) - } -} diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 4ffd74e2bf..7e5308b35f 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -6,8 +6,8 @@ use hotshot::{ traits::{ election::static_committee::{StaticCommittee, StaticElectionConfig, StaticVoteToken}, implementations::{ - Libp2pCommChannel, Libp2pNetwork, MemoryCommChannel, MemoryNetwork, MemoryStorage, - WebCommChannel, WebServerNetwork, WebServerWithFallbackCommChannel, + CombinedCommChannel, Libp2pCommChannel, Libp2pNetwork, MemoryCommChannel, + MemoryNetwork, MemoryStorage, WebCommChannel, WebServerNetwork, }, NodeImplementation, }, @@ -60,7 +60,7 @@ pub struct SequencingLibp2pImpl; pub struct SequencingWebImpl; #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] -pub struct StaticFallbackImpl; +pub struct SequencingCombinedImpl; pub type StaticMembership = StaticCommittee>; @@ -73,8 +73,8 @@ type StaticLibp2pDAComm = type StaticWebDAComm = WebCommChannel; -type StaticFallbackComm = - WebServerWithFallbackCommChannel; +type StaticCombinedDAComm = + CombinedCommChannel; pub type StaticMemoryQuorumComm = MemoryCommChannel; @@ -84,6 +84,9 @@ type StaticLibp2pQuorumComm = type StaticWebQuorumComm = WebCommChannel; +type StaticCombinedQuorumComm = + CombinedCommChannel; + pub type StaticMemoryViewSyncComm = MemoryCommChannel; @@ -93,6 +96,9 @@ type StaticLibp2pViewSyncComm = type StaticWebViewSyncComm = WebCommChannel; +type StaticCombinedViewSyncComm = + CombinedCommChannel; + pub type SequencingLibp2pExchange = SequencingExchanges< SequencingTestTypes, Message, @@ -471,38 +477,57 @@ impl NodeImplementation for SequencingWebImpl { } } -pub type SequencingFallbackExchange = SequencingExchanges< +pub type SequencingCombinedExchange = SequencingExchanges< SequencingTestTypes, - Message, + Message, QuorumExchange< SequencingTestTypes, - >::Leaf, + >::Leaf, QuorumProposal>, StaticMembership, - StaticFallbackComm, - Message, + StaticCombinedQuorumComm, + Message, >, CommitteeExchange< SequencingTestTypes, StaticMembership, - StaticFallbackComm, - Message, + StaticCombinedDAComm, + Message, >, ViewSyncExchange< SequencingTestTypes, ViewSyncCertificate, StaticMembership, - StaticFallbackComm, - Message, + StaticCombinedViewSyncComm, + Message, >, >; +impl NodeImplementation for SequencingCombinedImpl { + type Storage = MemoryStorage>; + type Leaf = SequencingLeaf; + type Exchanges = SequencingCombinedExchange; + type ConsensusMessage = SequencingMessage; + + fn new_channel_maps( + start_view: ::Time, + ) -> ( + ChannelMaps, + Option>, + ) { + ( + ChannelMaps::new(start_view), + Some(ChannelMaps::new(start_view)), + ) + } +} + impl TestableExchange< SequencingTestTypes, - >::Leaf, - Message, - > for SequencingFallbackExchange + >::Leaf, + Message, + > for SequencingCombinedExchange { #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( @@ -515,52 +540,54 @@ impl ) -> ( , + Message, >>::Networking, , + Message, >>::Networking, , + Message, >>::Networking, ) + 'static, > { - let libp2p_generator = Arc::new(, + let web_server_network_generator = Arc::new(, ::SignatureKey, + _, > as TestableNetworkingImplementation< SequencingTestTypes, - Message, + Message, >>::generator( expected_node_count, num_bootstrap, 0, da_committee_size, - true, + false, )); - let ws_generator = Arc::new(, + + let web_server_network_da_generator = Arc::new(, ::SignatureKey, - _, + SequencingTestTypes, > as TestableNetworkingImplementation< SequencingTestTypes, - Message, + Message, >>::generator( expected_node_count, num_bootstrap, 1, da_committee_size, - false, + true, )); - let ws_da_generator = Arc::new(, + + let libp2p_network_generator = Arc::new(, ::SignatureKey, - SequencingTestTypes, > as TestableNetworkingImplementation< SequencingTestTypes, - Message, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -569,53 +596,43 @@ impl true, )); + // libp2p Box::new(move |id| { - let libp2p_network = libp2p_generator(id); - let ws = ws_generator(id); - let ws_da = ws_da_generator(id); - - // TODO make a proper constructor - let network = Arc::new(CombinedNetworks(ws, libp2p_network.clone(), PhantomData)); - let network_da = Arc::new(CombinedNetworks(ws_da, libp2p_network, PhantomData)); + let web_server_network = web_server_network_generator(id); + let web_server_network_da = web_server_network_da_generator(id); + + let libp2p_network = libp2p_network_generator(id); + + let network = Arc::new(CombinedNetworks( + web_server_network, + libp2p_network.clone(), + PhantomData, + )); + let network_da = Arc::new(CombinedNetworks( + web_server_network_da, + libp2p_network, + PhantomData, + )); let quorum_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let committee_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da); let view_sync_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); (quorum_chan, committee_chan, view_sync_chan) }) } } - -impl NodeImplementation for StaticFallbackImpl { - type Storage = MemoryStorage>; - type Leaf = SequencingLeaf; - type Exchanges = SequencingFallbackExchange; - type ConsensusMessage = SequencingMessage; - - fn new_channel_maps( - start_view: ::Time, - ) -> ( - ChannelMaps, - Option>, - ) { - ( - ChannelMaps::new(start_view), - Some(ChannelMaps::new(start_view)), - ) - } -} diff --git a/testing/tests/fallback_network.rs b/testing/tests/combined_network.rs similarity index 67% rename from testing/tests/fallback_network.rs rename to testing/tests/combined_network.rs index 7a66e653c6..30a7f5e277 100644 --- a/testing/tests/fallback_network.rs +++ b/testing/tests/combined_network.rs @@ -2,9 +2,9 @@ use std::time::Duration; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{SequencingLibp2pImpl, SequencingTestTypes}, + node_types::{SequencingCombinedImpl, SequencingTestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, - test_builder::TestMetadata, + test_builder::{TestMetadata, TimingData}, }; use tracing::instrument; @@ -15,24 +15,32 @@ use tracing::instrument; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] -async fn webserver_libp2p_network() { +async fn test_combined_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata: TestMetadata = TestMetadata { + timing_data: TimingData { + round_start_delay: 25, + next_view_timeout: 10000, + start_delay: 120000, + + ..Default::default() + }, overall_safety_properties: OverallSafetyPropertiesDescription { - check_leaf: true, + num_successful_views: 35, ..Default::default() }, + // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::new(240, 0), + duration: Duration::from_millis(1_200_000), }, ), ..TestMetadata::default_multiple_rounds() }; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await @@ -46,12 +54,12 @@ async fn webserver_libp2p_network() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] #[ignore] -async fn test_stress_webserver_libp2p_network() { +async fn test_stress_combined_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let metadata = TestMetadata::default_stress(); metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await diff --git a/types/src/message.rs b/types/src/message.rs index c26de8bcef..2823c788e1 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -22,15 +22,13 @@ use serde::{Deserialize, Serialize}; use std::{fmt::Debug, marker::PhantomData}; /// Incoming message -#[derive(Serialize, Deserialize, Clone, Debug, Derivative)] +#[derive(Serialize, Deserialize, Clone, Debug, Derivative, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] -#[derivative(PartialEq)] pub struct Message> { /// The sender of this message pub sender: TYPES::SignatureKey, /// The message kind - #[derivative(PartialEq = "ignore")] pub kind: MessageKind, /// Phantom data. @@ -83,7 +81,7 @@ pub enum MessagePurpose { // TODO (da) make it more customized to the consensus layer, maybe separating the specific message // data from the kind enum. /// Enum representation of any message type -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] #[serde(bound(deserialize = "", serialize = ""))] pub enum MessageKind> { /// Messages related to the consensus protocol @@ -132,7 +130,7 @@ impl> ViewMessage for Messa } /// Internal triggers sent by consensus messages. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] pub enum InternalTrigger { // May add other triggers if necessary. @@ -304,7 +302,7 @@ impl< } } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] #[serde(bound(deserialize = "", serialize = ""))] /// Messages related to both validating and sequencing consensus. pub enum GeneralConsensusMessage> @@ -328,7 +326,7 @@ where InternalTrigger(InternalTrigger), } -#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)] +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] #[serde(bound(deserialize = "", serialize = ""))] /// Messages related to the sequencing consensus protocol for the DA committee. pub enum CommitteeConsensusMessage { @@ -383,7 +381,7 @@ pub trait SequencingMessageType>: } /// Messages for sequencing consensus. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] #[serde(bound(deserialize = "", serialize = ""))] pub struct SequencingMessage< TYPES: NodeType, @@ -455,7 +453,7 @@ impl< CommitteeConsensusMessage::VidVote(_) => MessagePurpose::VidVote, CommitteeConsensusMessage::DACertificate(_) => MessagePurpose::DAC, CommitteeConsensusMessage::VidDisperseMsg(_) => MessagePurpose::VidDisperse, - CommitteeConsensusMessage::VidCertificate(_) => todo!(), + CommitteeConsensusMessage::VidCertificate(_) => MessagePurpose::VidCert, }, } } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 49d9184bc4..f229a9af9b 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -128,6 +128,8 @@ pub trait NodeImplementation: + Sync + 'static + for<'a> Deserialize<'a> + + Hash + + Eq + Serialize; /// Consensus type selected exchanges. From e86fbc49c6778f665818513ff5753edceba82976 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Tue, 10 Oct 2023 11:10:19 -0400 Subject: [PATCH 0201/1393] fix: Transaction task use quorum exchange instead of committee exchange (#1864) * tidy code TODO comments * comments * don't re-hash block commitment * more comments * add sign_block_commitment method to QuorumExchangeType trait * TransactionTaskState change committee_exchange -> quorum_exchange then fix all the build breaks * add test_memory_network * get num_storage_nodes from quorum exchange membership * update comment github issue link * lint * address https://github.com/EspressoSystems/HotShot/pull/1864#pullrequestreview-1660806880 --- hotshot/src/lib.rs | 4 +- hotshot/src/tasks/mod.rs | 11 +++-- task-impls/src/transactions.rs | 66 +++++++++++++++------------ types/src/block_impl.rs | 5 ++- types/src/certificate.rs | 1 - types/src/traits/election.rs | 82 +++++++++++++++++++++------------- 6 files changed, 100 insertions(+), 69 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index b7872ce59e..4c59597bdd 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -713,7 +713,7 @@ where let task_runner = add_network_event_task( task_runner, internal_event_stream.clone(), - quorum_exchange, + quorum_exchange.clone(), NetworkTaskKind::Quorum, ) .await; @@ -748,7 +748,7 @@ where let task_runner = add_transaction_task( task_runner, internal_event_stream.clone(), - committee_exchange.clone(), + quorum_exchange, handle.clone(), ) .await; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index bc95f8dbb6..01a9d06030 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -36,7 +36,7 @@ use hotshot_types::{ election::{ConsensusExchange, Membership}, network::{CommunicationChannel, TransmitType}, node_implementation::{ - CommitteeEx, ExchangesType, NodeImplementation, NodeType, ViewSyncEx, + CommitteeEx, ExchangesType, NodeImplementation, NodeType, QuorumEx, ViewSyncEx, }, state::ConsensusTime, }, @@ -425,15 +425,14 @@ pub async fn add_transaction_task< >( task_runner: TaskRunner, event_stream: ChannelStream>, - committee_exchange: CommitteeEx, + quorum_exchange: QuorumEx, handle: SystemContextHandle, ) -> TaskRunner where - CommitteeEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Certificate = DACertificate, - Commitment = Commitment, + Certificate = QuorumCertificate>, >, { // build the transactions task @@ -448,7 +447,7 @@ where transactions: Arc::default(), seen_transactions: HashSet::new(), cur_view: TYPES::Time::new(0), - committee_exchange: committee_exchange.into(), + quorum_exchange: quorum_exchange.into(), event_stream: event_stream.clone(), id: handle.hotshot.inner.id, }; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index c682f5f38d..a47a05e349 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -14,15 +14,15 @@ use hotshot_task::{ task_impls::HSTWithEvent, }; use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction, NUM_CHUNKS, NUM_STORAGE_NODES}, - certificate::DACertificate, + block_impl::{VIDBlockPayload, VIDTransaction}, + certificate::QuorumCertificate, consensus::Consensus, data::{SequencingLeaf, VidDisperse, VidScheme, VidSchemeTrait}, message::{Message, Proposal, SequencingMessage}, traits::{ consensus_api::SequencingConsensusApi, - election::{CommitteeExchangeType, ConsensusExchange}, - node_implementation::{CommitteeEx, NodeImplementation, NodeType}, + election::{ConsensusExchange, Membership, QuorumExchangeType}, + node_implementation::{NodeImplementation, NodeType, QuorumEx}, BlockPayload, }, }; @@ -52,11 +52,10 @@ pub struct TransactionTaskState< >, A: SequencingConsensusApi, I> + 'static, > where - CommitteeEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Certificate = DACertificate, - Commitment = Commitment, + Certificate = QuorumCertificate>, >, { /// The state's api @@ -77,7 +76,7 @@ pub struct TransactionTaskState< pub seen_transactions: HashSet>, /// the committee exchange - pub committee_exchange: Arc>, + pub quorum_exchange: Arc>, /// Global events stream to publish events pub event_stream: ChannelStream>, @@ -99,11 +98,10 @@ impl< A: SequencingConsensusApi, I> + 'static, > TransactionTaskState where - CommitteeEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Certificate = DACertificate, - Commitment = Commitment, + Certificate = QuorumCertificate>, >, { /// main task event handler @@ -203,7 +201,7 @@ where self.cur_view = view; // If we are not the next leader (DA leader for this view) immediately exit - if !self.committee_exchange.is_leader(self.cur_view + 1) { + if !self.quorum_exchange.is_leader(self.cur_view + 1) { // panic!("We are not the DA leader for view {}", *self.cur_view + 1); return None; } @@ -236,25 +234,40 @@ where drop(consensus); - let txns = self.wait_for_transactions(parent_leaf).await?; - // TODO (Keyao) Determine whether to allow empty transaction when proposing a block. + // TODO (Keyao) Determine whether to allow empty blocks. // + let txns = self.wait_for_transactions(parent_leaf).await?; + + // TODO move all VID stuff to a new VID task + // details here: https://github.com/EspressoSystems/HotShot/issues/1817#issuecomment-1747143528 + let num_storage_nodes = self.quorum_exchange.membership().total_nodes(); + debug!("Prepare VID shares for {} storage nodes", num_storage_nodes); + + // TODO Secure SRS for VID + // https://github.com/EspressoSystems/HotShot/issues/1686 + let srs = hotshot_types::data::test_srs(num_storage_nodes); - debug!("Prepare VID shares"); - // TODO https://github.com/EspressoSystems/HotShot/issues/1686 - let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - // TODO https://github.com/EspressoSystems/jellyfish/issues/375 + // TODO proper source for VID erasure code rate + // https://github.com/EspressoSystems/HotShot/issues/1734 + let num_chunks = num_storage_nodes / 2; + + let vid = VidScheme::new(num_chunks, num_storage_nodes, &srs).unwrap(); + + // TODO Wasteful flattening of tx bytes to accommodate VID API + // https://github.com/EspressoSystems/jellyfish/issues/375 let mut txns_flatten = Vec::new(); for txn in &txns { txns_flatten.extend(txn.0.clone()); } + let vid_disperse = vid.disperse(&txns_flatten).unwrap(); let block = VIDBlockPayload { transactions: txns, commitment: vid_disperse.commit, }; + // TODO never clone a block + // https://github.com/EspressoSystems/HotShot/issues/1858 self.event_stream .publish(SequencingHotShotEvent::BlockReady(block.clone(), view + 1)) .await; @@ -271,10 +284,9 @@ where common: vid_disperse.common, }, // TODO (Keyao) This is also signed in DA task. - signature: self.committee_exchange.sign_da_proposal(&block.commit()), + signature: self.quorum_exchange.sign_block_commitment(block.commit()), }, - // TODO don't send to committee, send to quorum (consensus.rs) https://github.com/EspressoSystems/HotShot/issues/1696 - self.committee_exchange.public_key().clone(), + self.quorum_exchange.public_key().clone(), )) .await; return None; @@ -301,11 +313,10 @@ impl< A: SequencingConsensusApi, I> + 'static, > TransactionTaskState where - CommitteeEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Certificate = DACertificate, - Commitment = Commitment, + Certificate = QuorumCertificate>, >, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] @@ -399,11 +410,10 @@ impl< A: SequencingConsensusApi, I> + 'static, > TS for TransactionTaskState where - CommitteeEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Certificate = DACertificate, - Commitment = Commitment, + Certificate = QuorumCertificate>, >, { } diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 2360cb3a24..c4a08e345d 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -8,6 +8,7 @@ use crate::{ data::{test_srs, VidScheme, VidSchemeTrait}, traits::{block_contents::Transaction, state::TestableBlock, BlockPayload}, }; +use ark_serialize::CanonicalDeserialize; use commit::{Commitment, Committable}; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; @@ -88,8 +89,8 @@ impl VIDBlockPayload { impl Committable for VIDBlockPayload { fn commit(&self) -> Commitment { - let builder = commit::RawCommitmentBuilder::new("BlockPayload Comm"); - builder.generic_byte_array(&self.commitment).finalize() + as CanonicalDeserialize>::deserialize(&*self.commitment) + .expect("conversion from VidScheme::Commit to Commitment should succeed") } fn tag() -> String { diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 09c3235744..2e55334f56 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -207,7 +207,6 @@ impl } fn genesis() -> Self { - // TODO GG need a new way to get fake commit now that we don't have Committable Self { leaf_commitment: COMMITMENT::default_commitment_no_preimage(), view_number: ::genesis(), diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 0a881cd093..3961187fae 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -94,19 +94,28 @@ where ViewSyncFinalize(COMMITMENT), } -impl VoteData +/// Make different types of `VoteData` committable +impl Committable for VoteData where COMMITMENT: CommitmentBounds, { - /// Return the underlying commitment. - #[must_use] - pub fn get_commit(&self) -> COMMITMENT { - #[allow(clippy::enum_glob_use)] - use VoteData::*; - match self { - DA(c) | Yes(c) | No(c) | Timeout(c) | ViewSyncPreCommit(c) | ViewSyncCommit(c) - | ViewSyncFinalize(c) => *c, - } + fn commit(&self) -> Commitment { + let (tag, commit) = match self { + VoteData::DA(c) => ("DA BlockPayload Commit", c), + VoteData::Yes(c) => ("Yes Vote Commit", c), + VoteData::No(c) => ("No Vote Commit", c), + VoteData::Timeout(c) => ("Timeout View Number Commit", c), + VoteData::ViewSyncPreCommit(c) => ("ViewSyncPreCommit", c), + VoteData::ViewSyncCommit(c) => ("ViewSyncCommit", c), + VoteData::ViewSyncFinalize(c) => ("ViewSyncFinalize", c), + }; + commit::RawCommitmentBuilder::new(tag) + .var_size_bytes(commit.as_ref()) + .finalize() + } + + fn tag() -> String { + ("VOTE_DATA_COMMIT").to_string() } } @@ -340,7 +349,7 @@ pub trait ConsensusExchange: Send + Sync { match qc.signatures() { AssembledSignature::DA(qc) => { - let real_commit = VoteData::DA(leaf_commitment).get_commit(); + let real_commit = VoteData::DA(leaf_commitment).commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -348,7 +357,7 @@ pub trait ConsensusExchange: Send + Sync { ::check(&real_qc_pp, real_commit.as_ref(), &qc) } AssembledSignature::Yes(qc) => { - let real_commit = VoteData::Yes(leaf_commitment).get_commit(); + let real_commit = VoteData::Yes(leaf_commitment).commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -356,7 +365,7 @@ pub trait ConsensusExchange: Send + Sync { ::check(&real_qc_pp, real_commit.as_ref(), &qc) } AssembledSignature::No(qc) => { - let real_commit = VoteData::No(leaf_commitment).get_commit(); + let real_commit = VoteData::No(leaf_commitment).commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -384,7 +393,7 @@ pub trait ConsensusExchange: Send + Sync { let mut is_valid_vote_token = false; let mut is_valid_signature = false; if let Some(key) = ::from_bytes(encoded_key) { - is_valid_signature = key.validate(encoded_signature, data.get_commit().as_ref()); + is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); let valid_vote_token = self.membership().validate_vote_token(key, vote_token); is_valid_vote_token = match valid_vote_token { Err(_) => { @@ -406,7 +415,7 @@ pub trait ConsensusExchange: Send + Sync { data: &VoteData, vote_token: &Checked, ) -> bool { - let is_valid_signature = key.validate(encoded_signature, data.get_commit().as_ref()); + let is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); let valid_vote_token = self .membership() .validate_vote_token(key.clone(), vote_token.clone()); @@ -615,7 +624,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::DA(block_commitment).get_commit().as_ref(), + VoteData::DA(block_commitment).commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -658,7 +667,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::DA(block_commitment).get_commit().as_ref(), + VoteData::DA(block_commitment).commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -770,6 +779,12 @@ pub trait QuorumExchangeType, leaf_commitment: &Commitment, ) -> EncodedSignature; + /// Sign a block commitment. + fn sign_block_commitment( + &self, + block_commitment: Commitment, + ) -> EncodedSignature; + /// Sign a positive vote on validating or commitment proposal. /// /// The leaf commitment and the type of the vote (yes) are signed, which is the minimum amount @@ -888,19 +903,28 @@ impl< signature } + fn sign_block_commitment( + &self, + block_commitment: Commitment<::BlockType>, + ) -> EncodedSignature { + TYPES::SignatureKey::sign(&self.private_key, block_commitment.as_ref()) + } + /// Sign a positive vote on validating or commitment proposal. /// /// The leaf commitment and the type of the vote (yes) are signed, which is the minimum amount /// of information necessary for any user of the subsequently constructed QC to check that this /// node voted `Yes` on that leaf. The leaf is expected to be reconstructed based on other /// information in the yes vote. + /// + /// TODO GG: why return the pubkey? Some other `sign_xxx` methods do not return the pubkey. fn sign_yes_vote( &self, leaf_commitment: Commitment, ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::Yes(leaf_commitment).get_commit().as_ref(), + VoteData::Yes(leaf_commitment).commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -910,13 +934,14 @@ impl< /// The leaf commitment and the type of the vote (no) are signed, which is the minimum amount /// of information necessary for any user of the subsequently constructed QC to check that this /// node voted `No` on that leaf. + /// TODO GG: why return the pubkey? Some other `sign_xxx` methods do not return the pubkey. fn sign_no_vote( &self, leaf_commitment: Commitment, ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::No(leaf_commitment).get_commit().as_ref(), + VoteData::No(leaf_commitment).commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -928,12 +953,11 @@ impl< /// /// This also allows for the high QC included with the vote to be spoofed in a MITM scenario, /// but it is outside our threat model. + /// TODO GG: why return the pubkey? Some other `sign_xxx` methods do not return the pubkey. fn sign_timeout_vote(&self, view_number: TYPES::Time) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::Timeout(view_number.commit()) - .get_commit() - .as_ref(), + VoteData::Timeout(view_number.commit()).commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -1182,9 +1206,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::ViewSyncPreCommit(commitment) - .get_commit() - .as_ref(), + VoteData::ViewSyncPreCommit(commitment).commit().as_ref(), ); (self.public_key.to_bytes(), signature) @@ -1225,7 +1247,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::ViewSyncCommit(commitment).get_commit().as_ref(), + VoteData::ViewSyncCommit(commitment).commit().as_ref(), ); (self.public_key.to_bytes(), signature) @@ -1266,7 +1288,7 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::ViewSyncFinalize(commitment).get_commit().as_ref(), + VoteData::ViewSyncFinalize(commitment).commit().as_ref(), ); (self.public_key.to_bytes(), signature) @@ -1297,7 +1319,7 @@ impl< }; match certificate_internal.signatures { AssembledSignature::ViewSyncPreCommit(raw_signatures) => { - let real_commit = VoteData::ViewSyncPreCommit(vote_data.commit()).get_commit(); + let real_commit = VoteData::ViewSyncPreCommit(vote_data.commit()).commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().failure_threshold().get()), @@ -1309,7 +1331,7 @@ impl< ) } AssembledSignature::ViewSyncCommit(raw_signatures) => { - let real_commit = VoteData::ViewSyncCommit(vote_data.commit()).get_commit(); + let real_commit = VoteData::ViewSyncCommit(vote_data.commit()).commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -1321,7 +1343,7 @@ impl< ) } AssembledSignature::ViewSyncFinalize(raw_signatures) => { - let real_commit = VoteData::ViewSyncFinalize(vote_data.commit()).get_commit(); + let real_commit = VoteData::ViewSyncFinalize(vote_data.commit()).commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), From 812f551179f59dfba80ab5ea86e6dfcb1280462a Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 10 Oct 2023 15:05:15 -0700 Subject: [PATCH 0202/1393] remove the metric rejected txs and some clean up --- task-impls/src/transactions.rs | 3 --- types/src/consensus.rs | 19 +------------------ 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index b4936bc001..c682f5f38d 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -133,9 +133,6 @@ where warn!("Conversion failed: {e}. Using the max value."); i64::MAX })); - } else { - // it's more like the calculation of duplicate transactions - consensus.metrics.rejected_transactions.add(1); } } }) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 24a51e5b6c..896c49ae93 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -64,7 +64,6 @@ pub struct Consensus> { /// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces #[derive(Clone, Debug)] pub struct ConsensusMetricsValue { - #[allow(dead_code)] /// The values that are being tracked pub values: Arc>, /// The number of last synced synced block height @@ -73,26 +72,12 @@ pub struct ConsensusMetricsValue { pub last_decided_view: Box, /// The current view pub current_view: Box, - // The duration to collect votes in a view (only applies when this insance is the leader) - // pub vote_validate_duration: Box, - // The duration we waited for txns before building the proposal - // pub proposal_wait_duration: Box, - // The duration to build the proposal - // pub proposal_build_duration: Box, - // The duration of each view, in seconds - // pub view_duration: Box, /// Number of views that are in-flight since the last decided view pub number_of_views_since_last_decide: Box, /// Number of views that are in-flight since the last anchor view pub number_of_views_per_decide_event: Box, /// Number of invalid QCs we've seen since the last commit. pub invalid_qc: Box, - // Number of views that were discarded since from one anchor to the next - // pub discarded_views_per_decide_event: Box, - // Views where no proposal was seen from one anchor to the next - // pub empty_views_per_decide_event: Box, - /// Number of rejected transactions, it's more like duplicated transactions in current implementation - pub rejected_transactions: Box, /// Number of outstanding transactions pub outstanding_transactions: Box, /// Memory size in bytes of the serialized transactions still outstanding @@ -235,14 +220,12 @@ impl ConsensusMetricsValue { number_of_views_per_decide_event: metrics .create_histogram(String::from("number_of_views_per_decide_event"), None), invalid_qc: metrics.create_gauge(String::from("invalid_qc"), None), - rejected_transactions: metrics - .create_counter(String::from("rejected_transactions"), None), outstanding_transactions: metrics .create_gauge(String::from("outstanding_transactions"), None), outstanding_transactions_memory_size: metrics .create_gauge(String::from("outstanding_transactions_memory_size"), None), number_of_timeouts: metrics - .create_counter(String::from("number_of_views_timed_out"), None), + .create_counter(String::from("number_of_timeouts"), None), } } } From b9802f01a130f68e4a6de80836bc06dc16a6b536 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 11:51:32 -0400 Subject: [PATCH 0203/1393] basic tests pass --- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 18 ++++++++++-------- types/src/traits/election.rs | 14 +++++--------- types/src/vote.rs | 2 +- 4 files changed, 17 insertions(+), 19 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ff1c92395d..77ac7dfb33 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -550,7 +550,7 @@ where cert.view_number, vote_token) } else { - error!("Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", cert.view_number, self.cur_view ); + error!("Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", cert, self.cur_view ); return false; }; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 04db0b25fa..767e149289 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -272,10 +272,11 @@ where // `self.cur_view` should be at least 1 since there is a view change before getting // the `DAProposalRecv` event. Otherewise, the view number subtraction below will // cause an overflow error. - if view < self.cur_view - 1 { - warn!("Throwing away DA proposal that is more than one view older"); - return None; - } + // TODO ED Come back to this - we probably don't need this, but we should also never receive a DAC where this fails, investigate block ready so it doesn't make one for the genesis block + // if view < self.cur_view - 1 { + // warn!("Throwing away DA proposal that is more than one view older"); + // return None; + // } debug!( "Got a DA block with {} transactions!", @@ -507,10 +508,11 @@ where // `self.cur_view` should be at least 1 since there is a view change before getting // the `DAProposalRecv` event. Otherewise, the view number subtraction below will // cause an overflow error. - if view < self.cur_view - 1 { - warn!("Throwing away VID disperse data that is more than one view older"); - return None; - } + // TODO ED Revisit this + // if view < self.cur_view - 1 { + // warn!("Throwing away VID disperse data that is more than one view older"); + // return None; + // } debug!("VID disperse data is fresh."); let block_commitment = disperse.data.commitment; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 282d85ba52..465d1aa03a 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -354,7 +354,11 @@ pub trait ConsensusExchange: Send + Sync { self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), ); - ::check(&real_qc_pp, real_commit.as_ref(), &qc) + ::check( + &real_qc_pp, + real_commit.as_ref(), + &qc, + ) } AssembledSignature::Yes(qc) => { let real_commit = VoteData::Yes(leaf_commitment).commit(); @@ -380,14 +384,6 @@ pub trait ConsensusExchange: Send + Sync { ); ::check(&real_qc_pp, real_commit.as_ref(), &qc) } - AssembledSignature::Timeout(qc) => { - let real_commit = VoteData::Timeout(leaf_commitment).get_commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().success_threshold().get()), - ); - ::check(&real_qc_pp, real_commit.as_ref(), &qc) - } AssembledSignature::Genesis() => true, AssembledSignature::ViewSyncPreCommit(_) | AssembledSignature::ViewSyncCommit(_) diff --git a/types/src/vote.rs b/types/src/vote.rs index 4fea6ab472..bb4542ebe3 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -421,7 +421,7 @@ impl< self.da_vote_outcomes.remove(&vote_commitment); - return Either::Right(AssembledSignature::Timeout(real_qc_sig)); + return Either::Right(AssembledSignature::DA(real_qc_sig)); } Either::Left(self) } From 643f3d9d407af00d1d78185930ba28001dfd77e8 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:05:47 -0400 Subject: [PATCH 0204/1393] Tests passing? --- task-impls/src/consensus.rs | 74 +++++++++++++++--------------------- task-impls/src/view_sync.rs | 2 +- testing/tests/timeout.rs | 4 +- types/src/traits/election.rs | 6 ++- 4 files changed, 38 insertions(+), 48 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 77ac7dfb33..7e7adad4c8 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -83,7 +83,6 @@ pub struct SequencingConsensusTaskState< Certificate = TimeoutCertificate, Commitment = Commitment, >, - { /// The global task registry pub registry: GlobalRegistry, @@ -178,7 +177,7 @@ pub struct VoteCollectionTaskState< Commitment>, >>::VoteAccumulator, QuorumCertificate>>, - >, + >, /// Accumulator for votes #[allow(clippy::type_complexity)] @@ -216,7 +215,6 @@ where Certificate = TimeoutCertificate, Commitment = Commitment, >, - { } @@ -244,7 +242,6 @@ where Certificate = TimeoutCertificate, Commitment = Commitment, >, - { match event { SequencingHotShotEvent::QuorumVoteRecv(vote) => match vote.clone() { @@ -302,6 +299,7 @@ where // during exchange refactor // https://github.com/EspressoSystems/HotShot/issues/1799 SequencingHotShotEvent::TimeoutVoteRecv(vote) => { + debug!("Received timeout vote for view {}", *vote.get_view()); if state.timeout_accumulator.is_right() { return (None, state); } @@ -688,11 +686,8 @@ where return; } - // TODO ED Do we need to check that the commit in the cert is in fact a commit to the correct view? I think we do. - if !self - .timeout_exchange - .is_valid_cert(&timeout_cert.clone()) - { + // TODO ED Do we need to check that the commit in the cert is in fact a commit to the correct view? I think we do. + if !self.timeout_exchange.is_valid_cert(&timeout_cert.clone()) { warn!("Timeout certificate for view {} was invalid", *view); return; } @@ -700,10 +695,7 @@ where let justify_qc = proposal.data.justify_qc.clone(); - if !self - .quorum_exchange - .is_valid_cert(&justify_qc) - { + if !self.quorum_exchange.is_valid_cert(&justify_qc) { error!("Invalid justify_qc in proposal for view {}", *view); return; } @@ -1173,11 +1165,7 @@ where let high_qc = self.consensus.read().await.high_qc.clone(); if self - .publish_proposal_if_able( - high_qc, - view, - Some(qc.clone()), - ) + .publish_proposal_if_able(high_qc, view, Some(qc.clone())) .await { } else { @@ -1194,7 +1182,7 @@ where *qc.view_number ); - if self + if !self .publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) .await { @@ -1246,7 +1234,7 @@ where } SequencingHotShotEvent::Timeout(view) => { // NOTE: We may optionally have the timeout task listen for view change events - if self.cur_view > view { + if self.cur_view >= view { return; } let vote_token = self.timeout_exchange.make_vote_token(view); @@ -1317,6 +1305,7 @@ where return false; }; if leaf_commitment != consensus.high_qc.leaf_commitment() { + // TODO ED This happens on the genesis block debug!( "They don't equal: {:?} {:?}", leaf_commitment, @@ -1367,28 +1356,28 @@ where proposer_id: self.api.public_key().to_bytes(), }; - let signature = self - .quorum_exchange - .sign_validating_or_commitment_proposal::(&leaf.commit()); - // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. - let proposal = QuorumProposal { - block_commitment, - view_number: leaf.view_number, - height: leaf.height, - justify_qc: consensus.high_qc.clone(), - timeout_certificate: timeout_certificate.or_else(|| None), - proposer_id: leaf.proposer_id, - dac: None, - }; + let signature = self + .quorum_exchange + .sign_validating_or_commitment_proposal::(&leaf.commit()); + // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. + let proposal = QuorumProposal { + block_commitment, + view_number: leaf.view_number, + height: leaf.height, + justify_qc: consensus.high_qc.clone(), + timeout_certificate: timeout_certificate.or_else(|| None), + proposer_id: leaf.proposer_id, + dac: None, + }; - let message = Proposal { - data: proposal, - signature, - }; - debug!( - "Sending proposal for view {:?} \n {:?}", - leaf.view_number, "" - ); + let message = Proposal { + data: proposal, + signature, + }; + debug!( + "Sending proposal for view {:?} \n {:?}", + leaf.view_number, "" + ); self.event_stream .publish(SequencingHotShotEvent::QuorumProposalSend( @@ -1399,6 +1388,7 @@ where self.block = None; return true; } + debug!("Self block was None"); false } } @@ -1433,7 +1423,6 @@ where Certificate = TimeoutCertificate, Commitment = Commitment, >, - { } @@ -1490,7 +1479,6 @@ where Certificate = TimeoutCertificate, Commitment = Commitment, >, - { if let SequencingHotShotEvent::Shutdown = event { (Some(HotShotTaskCompleted::ShutDown), state) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index c44dfb328f..c7efa7e212 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -490,7 +490,7 @@ where } &SequencingHotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it - if view_number < TYPES::Time::new(*self.current_view) { + if view_number <= TYPES::Time::new(*self.current_view) { return; } diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 6988c78a28..913a529462 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -7,7 +7,7 @@ async fn test_timeout() { use std::time::Duration; - use hotshot_testing::node_types::SequencingWebImpl; + use hotshot_testing::node_types::SequencingLibp2pImpl; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -57,7 +57,7 @@ async fn test_timeout() { // TODO ED Test with memory network once issue is resolved // https://github.com/EspressoSystems/HotShot/issues/1790 metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 465d1aa03a..2879f89414 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -377,7 +377,7 @@ pub trait ConsensusExchange: Send + Sync { ::check(&real_qc_pp, real_commit.as_ref(), &qc) } AssembledSignature::Timeout(qc) => { - let real_commit = VoteData::Timeout(leaf_commitment).get_commit(); + let real_commit = VoteData::Timeout(leaf_commitment).commit(); let real_qc_pp = ::get_public_parameter( self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), @@ -453,6 +453,8 @@ pub trait ConsensusExchange: Send + Sync { &vote.get_data(), &Checked::Unchecked(vote.get_vote_token()), ) { + error!("Vote data is {:?}", vote.get_data()); + error!("Invalid vote!"); return Either::Left(accumulator); } @@ -1310,7 +1312,7 @@ pub trait TimeoutExchangeType: ConsensusExchange let signature = TYPES::SignatureKey::sign( self.private_key(), VoteData::>::Timeout(view.commit()) - .get_commit() + .commit() .as_ref(), ); From 08ab30a581bd0f22e238834aa0aea8cd4d7c0c75 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:20:02 -0400 Subject: [PATCH 0205/1393] lints --- hotshot-signature-key/src/bn254/bn254_priv.rs | 1 + hotshot-signature-key/src/bn254/bn254_pub.rs | 1 + hotshot/src/tasks/mod.rs | 4 ++-- hotshot/src/traits/networking/combined_network.rs | 4 ++-- hotshot/src/traits/networking/memory_network.rs | 4 ++-- libp2p-networking/src/network/node/handle.rs | 2 +- task-impls/src/da.rs | 2 +- task-impls/src/harness.rs | 6 +++--- task-impls/src/transactions.rs | 2 +- task/src/event_stream.rs | 4 ++-- task/src/task.rs | 2 +- testing/tests/consensus_task.rs | 8 +++++--- types/src/certificate.rs | 4 +--- types/src/traits/election.rs | 8 ++------ 14 files changed, 25 insertions(+), 27 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_priv.rs b/hotshot-signature-key/src/bn254/bn254_priv.rs index 439b52ff39..87d80c8dfe 100644 --- a/hotshot-signature-key/src/bn254/bn254_priv.rs +++ b/hotshot-signature-key/src/bn254/bn254_priv.rs @@ -54,6 +54,7 @@ impl BLSPrivKey { } } +#[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] impl PartialOrd for BLSPrivKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.priv_key.to_string(); diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 025d455129..410a0e12fb 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -27,6 +27,7 @@ pub struct BLSPubKey { pub_key: VerKey, } +#[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] impl PartialOrd for BLSPubKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.pub_key.to_string(); diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 48a33574f8..b1ee195e63 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -36,8 +36,8 @@ use hotshot_types::{ election::{ConsensusExchange, Membership}, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{ - CommitteeEx, ExchangesType, NodeImplementation, NodeType, QuorumEx, SequencingTimeoutEx, - ViewSyncEx, + CommitteeEx, ExchangesType, NodeImplementation, NodeType, QuorumEx, + SequencingTimeoutEx, ViewSyncEx, }, state::ConsensusTime, }, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 63fdb6df88..e717c40b1b 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -287,7 +287,7 @@ impl, MEMBERSHIP: Membership { + Ok(()) => { self.primary_down.store(0, Ordering::Relaxed); } Err(e) => { @@ -318,7 +318,7 @@ impl, MEMBERSHIP: Membership { + Ok(()) => { self.primary_down.store(0, Ordering::Relaxed); } Err(e) => { diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 9d9e751ee5..9b68c805e6 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -329,7 +329,7 @@ impl ConnectedNetwork for Memory } else { let res = node.broadcast_input(vec.clone()).await; match res { - Ok(_) => { + Ok(()) => { self.inner.metrics.outgoing_message_count.add(1); trace!(?key, "Delivered message to remote"); } @@ -373,7 +373,7 @@ impl ConnectedNetwork for Memory } else { let res = node.direct_input(vec).await; match res { - Ok(_) => { + Ok(()) => { self.inner.metrics.outgoing_message_count.add(1); trace!(?recipient, "Delivered message to remote"); Ok(()) diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index c4e6460666..4c66d1e840 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -188,7 +188,7 @@ impl NetworkNodeHandle { } } } - }).map(|_| ()) + }).map(|()| ()) } /// Wait until at least `num_peers` have connected, or until `timeout` time has passed. diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 767e149289..1915704e08 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -32,7 +32,7 @@ use hotshot_types::{ use snafu::Snafu; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, instrument}; #[derive(Snafu, Debug)] /// Error type for consensus tasks diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 136093bc3e..c2e2f0be50 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -3,7 +3,7 @@ use async_compatibility_layer::art::async_spawn; use futures::FutureExt; use hotshot_task::{ - event_stream::{self, ChannelStream, EventStream}, + event_stream::{ChannelStream, EventStream}, task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, task_launcher::TaskRunner, @@ -52,7 +52,7 @@ pub async fn run_harness( { let task_runner = TaskRunner::new(); let registry = task_runner.registry.clone(); - let event_stream = event_stream.unwrap_or(event_stream::ChannelStream::new()); + let event_stream = event_stream.unwrap_or_default(); let state = TestHarnessState { expected_output }; let handler = HandleEvent(Arc::new(move |event, state| { async move { handle_event(event, state) }.boxed() @@ -76,7 +76,7 @@ pub async fn run_harness( let runner = async_spawn(async move { task_runner.launch().await }); for event in input { - let _ = event_stream.publish(event).await; + let () = event_stream.publish(event).await; } let _ = runner.await; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 88ccd973a8..cb939d0ca7 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -6,7 +6,7 @@ use async_compatibility_layer::{ use async_lock::RwLock; use bincode::config::Options; use commit::{Commitment, Committable}; -use either::{Either, Left, Right}; +use either::{Left, Right}; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, diff --git a/task/src/event_stream.rs b/task/src/event_stream.rs index 875d045994..5248fe4373 100644 --- a/task/src/event_stream.rs +++ b/task/src/event_stream.rs @@ -129,7 +129,7 @@ impl EventStream for ChannelStream { Some((filter, sender)) => { if filter(&event) { match sender.send(event.clone()).await { - Ok(_) => (), + Ok(()) => (), // error sending => stream is closed so remove it Err(_) => self.unsubscribe(id).await, } @@ -147,7 +147,7 @@ impl EventStream for ChannelStream { for (uid, (filter, sender)) in &inner.subscribers { if filter(&event) { match sender.send(event.clone()).await { - Ok(_) => (), + Ok(()) => (), // error sending => stream is closed so remove it Err(_) => { self.unsubscribe(*uid).await; diff --git a/task/src/task.rs b/task/src/task.rs index 37b6f2d56f..8435ff0fcf 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -381,7 +381,7 @@ impl<'pin, HSTT: HotShotTaskTypes> ProjectedHST<'pin, HSTT> { cx: &mut Context<'_>, ) -> Poll { match fut.as_mut().poll(cx) { - Poll::Ready(_) => Poll::Ready( + Poll::Ready(()) => Poll::Ready( self.r_val .take() .unwrap_or_else(|| HotShotTaskCompleted::LostReturnValue), diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 9c9059e380..dc8439a85d 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,5 +1,5 @@ -use commit::Committable; use commit::Commitment; +use commit::Committable; use either::Right; use hotshot::{ tasks::add_consensus_task, @@ -102,8 +102,10 @@ async fn test_consensus_task() { let mut output = HashMap::new(); // Trigger a proposal to send by creating a new QC. Then recieve that proposal and update view based on the valid QC in the proposal - let qc = - QuorumCertificate::>>::genesis(); + let qc = QuorumCertificate::< + SequencingTestTypes, + Commitment>, + >::genesis(); let proposal = build_quorum_proposal(&handle, &private_key, 1).await; input.push(SequencingHotShotEvent::QCFormed(either::Left(qc.clone()))); diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 1601f01587..bf02d2f871 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -20,7 +20,6 @@ use serde::{Deserialize, Serialize}; use std::{ fmt::{self, Debug, Display, Formatter}, hash::Hash, - ops::Deref, }; use tracing::debug; @@ -209,7 +208,6 @@ impl self.leaf_commitment } - fn is_genesis(&self) -> bool { self.is_genesis } @@ -232,7 +230,7 @@ impl Committable commit::RawCommitmentBuilder::new("Quorum Certificate Commitment") .var_size_field("leaf commitment", self.leaf_commitment.as_ref()) - .u64_field("view number", *self.view_number.deref()) + .u64_field("view number", *self.view_number) .constant_str("justify_qc signatures") .var_size_bytes(&signatures_bytes) .finalize() diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 2879f89414..8d3b795267 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -128,7 +128,7 @@ where use VoteData::*; match self { DA(c) | Yes(c) | No(c) | Timeout(c) | ViewSyncPreCommit(c) | ViewSyncCommit(c) - | ViewSyncFinalize(c) => c.clone(), + | ViewSyncFinalize(c) => *c, } } @@ -354,11 +354,7 @@ pub trait ConsensusExchange: Send + Sync { self.membership().get_committee_qc_stake_table(), U256::from(self.membership().success_threshold().get()), ); - ::check( - &real_qc_pp, - real_commit.as_ref(), - &qc, - ) + ::check(&real_qc_pp, real_commit.as_ref(), &qc) } AssembledSignature::Yes(qc) => { let real_commit = VoteData::Yes(leaf_commitment).commit(); From 822611a12cd2ec8da50dae6cbbf5cad900bd7da4 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 11 Oct 2023 11:27:18 -0700 Subject: [PATCH 0206/1393] solve conflict --- hotshot/src/traits/networking/memory_network.rs | 6 +++--- task-impls/src/consensus.rs | 8 +++----- types/src/consensus.rs | 3 +-- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 7fd22aa274..c67d90147e 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -92,7 +92,7 @@ struct MemoryNetworkInner { /// The networking metrics we're keeping track of metrics: NetworkingMetricsValue, - + /// config to introduce unreliability to the network reliability_config: Option>>, } @@ -330,7 +330,7 @@ impl ConnectedNetwork for Memory let res = node.broadcast_input(vec.clone()).await; match res { Ok(_) => { - self.inner.metrics.outgoing_message_count.add(1); + self.inner.metrics.outgoing_broadcast_message_count.add(1); trace!(?key, "Delivered message to remote"); } Err(e) => { @@ -374,7 +374,7 @@ impl ConnectedNetwork for Memory let res = node.direct_input(vec).await; match res { Ok(_) => { - self.inner.metrics.outgoing_message_count.add(1); + self.inner.metrics.outgoing_direct_message_count.add(1); trace!(?recipient, "Delivered message to remote"); Ok(()) } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 5f4455ddc4..cac7d4d1f2 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -816,12 +816,10 @@ where // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above if new_decide_reached { let mut leaf = leaf.clone(); - consensus - .metrics - .last_synced_block_height - .set(usize::try_from(leaf.height).unwrap_or(0)); - + .metrics + .last_synced_block_height + .set(usize::try_from(leaf.height).unwrap_or(0)); // If the full block is available for this leaf, include it in the leaf // chain that we send to the client. if let Some(block) = diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 896c49ae93..6574a73f8a 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -224,8 +224,7 @@ impl ConsensusMetricsValue { .create_gauge(String::from("outstanding_transactions"), None), outstanding_transactions_memory_size: metrics .create_gauge(String::from("outstanding_transactions_memory_size"), None), - number_of_timeouts: metrics - .create_counter(String::from("number_of_timeouts"), None), + number_of_timeouts: metrics.create_counter(String::from("number_of_timeouts"), None), } } } From 620a95fc5ffd0afd855cb3c9ecfaed430d088386 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:28:05 -0400 Subject: [PATCH 0207/1393] lints --- libp2p-networking/src/network/node/handle.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 4c66d1e840..ad91d8096a 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -136,6 +136,9 @@ impl NetworkNodeHandle { /// /// Will panic if a handler is already spawned #[allow(clippy::unused_async)] + // Tokio and async_std disagree how this function should be linted + #[allow(clippy::ignored_unit_patterns)] + pub async fn spawn_handler(self: &Arc, cb: F) -> impl Future where F: Fn(NetworkEvent, Arc>) -> RET + Sync + Send + 'static, @@ -188,7 +191,7 @@ impl NetworkNodeHandle { } } } - }).map(|()| ()) + }).map(|_| ()) } /// Wait until at least `num_peers` have connected, or until `timeout` time has passed. From fffe7f7f52b76df7ea03c54cd94307151faf8d2c Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:31:41 -0400 Subject: [PATCH 0208/1393] lints --- libp2p-networking/src/network/node/handle.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index ad91d8096a..fe68d679ce 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -136,8 +136,8 @@ impl NetworkNodeHandle { /// /// Will panic if a handler is already spawned #[allow(clippy::unused_async)] - // Tokio and async_std disagree how this function should be linted - #[allow(clippy::ignored_unit_patterns)] + // // Tokio and async_std disagree how this function should be linted + // #[allow(clippy::ignored_unit_patterns)] pub async fn spawn_handler(self: &Arc, cb: F) -> impl Future where From 1459a23a6175a7020743e5d99cceba0f7a308ffb Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:33:37 -0400 Subject: [PATCH 0209/1393] lints --- hotshot-signature-key/src/bn254/bn254_priv.rs | 2 +- hotshot-signature-key/src/bn254/bn254_pub.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_priv.rs b/hotshot-signature-key/src/bn254/bn254_priv.rs index 87d80c8dfe..5b16c7a93d 100644 --- a/hotshot-signature-key/src/bn254/bn254_priv.rs +++ b/hotshot-signature-key/src/bn254/bn254_priv.rs @@ -54,7 +54,7 @@ impl BLSPrivKey { } } -#[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] +// #[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] impl PartialOrd for BLSPrivKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.priv_key.to_string(); diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 410a0e12fb..43fc3a6c43 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -27,7 +27,7 @@ pub struct BLSPubKey { pub_key: VerKey, } -#[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] +// #[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] impl PartialOrd for BLSPubKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.pub_key.to_string(); From 61e4474a6cca286ea36376d46201b94c0550856b Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:38:44 -0400 Subject: [PATCH 0210/1393] remove get_commit func --- types/src/traits/election.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 8d3b795267..b44ea5082c 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -121,17 +121,6 @@ impl VoteData where COMMITMENT: CommitmentBounds, { - /// Return the underlying commitment. - #[must_use] - pub fn get_commit(&self) -> COMMITMENT { - #[allow(clippy::enum_glob_use)] - use VoteData::*; - match self { - DA(c) | Yes(c) | No(c) | Timeout(c) | ViewSyncPreCommit(c) | ViewSyncCommit(c) - | ViewSyncFinalize(c) => *c, - } - } - #[must_use] /// Convert vote data into bytes. /// From b1bf7fd172ce743520f6ba6d3202766eaf16c4e4 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 17:37:37 -0400 Subject: [PATCH 0211/1393] Fix subtraction overflow --- task-impls/src/consensus.rs | 1 + task-impls/src/da.rs | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7e7adad4c8..bad34b19cf 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -723,6 +723,7 @@ where "Proposal's parent missing from storage with commitment: {:?}", justify_qc.leaf_commitment() ); + // TODO ED Remove this return return; }; let parent_commitment = parent.commit(); diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 1915704e08..8d686d1685 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -32,7 +32,7 @@ use hotshot_types::{ use snafu::Snafu; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, instrument, warn}; #[derive(Snafu, Debug)] /// Error type for consensus tasks @@ -273,10 +273,11 @@ where // the `DAProposalRecv` event. Otherewise, the view number subtraction below will // cause an overflow error. // TODO ED Come back to this - we probably don't need this, but we should also never receive a DAC where this fails, investigate block ready so it doesn't make one for the genesis block - // if view < self.cur_view - 1 { - // warn!("Throwing away DA proposal that is more than one view older"); - // return None; - // } + + if self.cur_view != TYPES::Time::genesis() && view < self.cur_view - 1 { + warn!("Throwing away DA proposal that is more than one view older"); + return None; + } debug!( "Got a DA block with {} transactions!", @@ -509,10 +510,11 @@ where // the `DAProposalRecv` event. Otherewise, the view number subtraction below will // cause an overflow error. // TODO ED Revisit this - // if view < self.cur_view - 1 { - // warn!("Throwing away VID disperse data that is more than one view older"); - // return None; - // } + + if self.cur_view != TYPES::Time::genesis() && view < self.cur_view - 1 { + warn!("Throwing away VID disperse data that is more than one view older"); + return None; + } debug!("VID disperse data is fresh."); let block_commitment = disperse.data.commitment; From 84463c81b2f342db40497f8ddef181221b6af588 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 19:54:22 -0400 Subject: [PATCH 0212/1393] Update state map even if no parent --- task-impls/src/consensus.rs | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 0833eaa874..c2d359ec5c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -726,13 +726,37 @@ where .cloned() }; + // // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { + // If no parent then just update our state map and return. We will not vote. error!( "Proposal's parent missing from storage with commitment: {:?}", justify_qc.leaf_commitment() ); - // TODO ED Remove this return + let leaf = SequencingLeaf { + view_number: view, + height: proposal.data.height, + justify_qc: justify_qc.clone(), + parent_commitment: justify_qc.leaf_commitment(), + deltas: Right(proposal.data.block_commitment), + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: sender.to_bytes(), + }; + + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + consensus.state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + }, + }, + ); + consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + + return; }; let parent_commitment = parent.commit(); @@ -750,7 +774,6 @@ where // consensus.metrics.invalid_qc.update(1); - // Validate the `height` // TODO Remove height from proposal validation; view number is sufficient // https://github.com/EspressoSystems/HotShot/issues/1796 @@ -913,9 +936,10 @@ where .await; consensus.last_decided_view = new_anchor_view; consensus.metrics.invalid_qc.set(0); - consensus.metrics.last_decided_view.set( - usize::try_from(consensus.last_decided_view.get_u64()).unwrap(), - ); + consensus + .metrics + .last_decided_view + .set(usize::try_from(consensus.last_decided_view.get_u64()).unwrap()); // We're only storing the last QC. We could store more but we're realistically only going to retrieve the last one. if let Err(e) = self.api.store_leaf(old_anchor_view, leaf).await { From ce104b03a3f32e2cd0f7a50d0f72c25a1868c17c Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 20:03:42 -0400 Subject: [PATCH 0213/1393] Test timeout on both libp2p and web --- testing/tests/timeout.rs | 67 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 913a529462..9ffaa21009 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -4,7 +4,72 @@ tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_timeout() { +async fn test_timeout_web() { + use std::time::Duration; + + use hotshot_testing::node_types::SequencingWebImpl; + + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::SequencingTestTypes, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestMetadata, TimingData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let timing_data = TimingData { + next_view_timeout: 1000, + ..Default::default() + }; + + // TODO ED Reduce down to 5 nodes once memory network issues is resolved + // https://github.com/EspressoSystems/HotShot/issues/1790 + let mut metadata = TestMetadata { + total_nodes: 10, + start_nodes: 10, + ..Default::default() + }; + let dead_nodes = vec![ChangeNode { + idx: 0, + updown: UpDown::Down, + }]; + + metadata.timing_data = timing_data; + + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + num_successful_views: 25, + ..Default::default() + }; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::from_millis(500), dead_nodes)], + }; + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(30), + }, + ); + + // TODO ED Test with memory network once issue is resolved + // https://github.com/EspressoSystems/HotShot/issues/1790 + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_timeout_libp2p() { use std::time::Duration; use hotshot_testing::node_types::SequencingLibp2pImpl; From b78a82eab1512516451fef9d5b9b6d855570e739 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 20:04:47 -0400 Subject: [PATCH 0214/1393] lint --- task-impls/src/consensus.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index c2d359ec5c..eb4a8355b0 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -726,10 +726,10 @@ where .cloned() }; - // + // // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { - // If no parent then just update our state map and return. We will not vote. + // If no parent then just update our state map and return. We will not vote. error!( "Proposal's parent missing from storage with commitment: {:?}", justify_qc.leaf_commitment() @@ -755,7 +755,6 @@ where }, ); consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); - return; }; From 006cb09a36e0f1322a418b3bc67b234078a4ad86 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 20:24:23 -0400 Subject: [PATCH 0215/1393] comments --- testing/tests/timeout.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 9ffaa21009..10409d2519 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -4,6 +4,8 @@ tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +// TODO Add memory network tests after this issue is finished: +// https://github.com/EspressoSystems/HotShot/issues/1790 async fn test_timeout_web() { use std::time::Duration; @@ -24,8 +26,6 @@ async fn test_timeout_web() { ..Default::default() }; - // TODO ED Reduce down to 5 nodes once memory network issues is resolved - // https://github.com/EspressoSystems/HotShot/issues/1790 let mut metadata = TestMetadata { total_nodes: 10, start_nodes: 10, @@ -89,8 +89,6 @@ async fn test_timeout_libp2p() { ..Default::default() }; - // TODO ED Reduce down to 5 nodes once memory network issues is resolved - // https://github.com/EspressoSystems/HotShot/issues/1790 let mut metadata = TestMetadata { total_nodes: 10, start_nodes: 10, From 4bee20b65093bfea99335d50744dd6ec3747e97d Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 20:27:58 -0400 Subject: [PATCH 0216/1393] update metrics --- task-impls/src/consensus.rs | 4 ++-- testing/tests/timeout.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index eb4a8355b0..9a8bb5b40f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -706,6 +706,8 @@ where if !self.quorum_exchange.is_valid_cert(&justify_qc) { error!("Invalid justify_qc in proposal for view {}", *view); + let consensus = self.consensus.write().await; + consensus.metrics.invalid_qc.update(1); return; } @@ -771,8 +773,6 @@ where }; let leaf_commitment = leaf.commit(); - // consensus.metrics.invalid_qc.update(1); - // Validate the `height` // TODO Remove height from proposal validation; view number is sufficient // https://github.com/EspressoSystems/HotShot/issues/1796 diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 10409d2519..e8e4278195 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -4,7 +4,7 @@ tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -// TODO Add memory network tests after this issue is finished: +// TODO Add memory network tests after this issue is finished: // https://github.com/EspressoSystems/HotShot/issues/1790 async fn test_timeout_web() { use std::time::Duration; From 8c25c94f1b3f4ff5d526632fc7dab481b917696b Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 20:31:11 -0400 Subject: [PATCH 0217/1393] metric updates --- task-impls/src/consensus.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9a8bb5b40f..c359d4b9f6 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -858,6 +858,10 @@ where // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above if new_decide_reached { let mut leaf = leaf.clone(); + consensus + .metrics + .last_synced_block_height + .set(usize::try_from(leaf.height).unwrap_or(0)); // If the full block is available for this leaf, include it in the leaf // chain that we send to the client. @@ -939,6 +943,12 @@ where .metrics .last_decided_view .set(usize::try_from(consensus.last_decided_view.get_u64()).unwrap()); + let cur_number_of_views_per_decide_event = + *self.cur_view - consensus.last_decided_view.get_u64(); + consensus + .metrics + .number_of_views_per_decide_event + .add_point(cur_number_of_views_per_decide_event as f64); // We're only storing the last QC. We could store more but we're realistically only going to retrieve the last one. if let Err(e) = self.api.store_leaf(old_anchor_view, leaf).await { From 383ca897136854df4612f6de124f79b1ee9d627c Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 20:43:14 -0400 Subject: [PATCH 0218/1393] Create own timeout cert validation function --- task-impls/src/consensus.rs | 8 +++++--- types/src/traits/election.rs | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index c359d4b9f6..011eeec800 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -695,8 +695,10 @@ where return; } - // TODO ED Do we need to check that the commit in the cert is in fact a commit to the correct view? I think we do. - if !self.timeout_exchange.is_valid_cert(&timeout_cert.clone()) { + if !self + .timeout_exchange + .is_valid_timeout_cert(&timeout_cert.clone(), view - 1) + { warn!("Timeout certificate for view {} was invalid", *view); return; } @@ -1356,7 +1358,7 @@ where return false; }; if leaf_commitment != consensus.high_qc.leaf_commitment() { - // TODO ED This happens on the genesis block + // NOTE: This happens on the genesis block debug!( "They don't equal: {:?} {:?}", leaf_commitment, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index b44ea5082c..14affde19b 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -1307,6 +1307,28 @@ pub trait TimeoutExchangeType: ConsensusExchange vote_token, }) } + + /// Validate a timeout certificate. + /// This is separate from other certificate verification functions because we also need to + /// verify the certificate is signed over the view we expect + fn is_valid_timeout_cert(&self, qc: &Self::Certificate, view_number: TYPES::Time) -> bool { + let comparison_commitment = view_number.commit(); + + match qc.signatures() { + AssembledSignature::Timeout(qc) => { + let real_commit = VoteData::Timeout(comparison_commitment).commit(); + let real_qc_pp = ::get_public_parameter( + self.membership().get_committee_qc_stake_table(), + U256::from(self.membership().success_threshold().get()), + ); + ::check(&real_qc_pp, real_commit.as_ref(), &qc) + } + _ => { + error!("Expected TimeoutCertificate, received another certificate variant"); + false + } + } + } } impl< From a761e7cf362f3a3d99082c9dab03878648b93fc1 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 20:45:38 -0400 Subject: [PATCH 0219/1393] lints: --- types/src/traits/election.rs | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 14affde19b..f4c8126715 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -1314,19 +1314,16 @@ pub trait TimeoutExchangeType: ConsensusExchange fn is_valid_timeout_cert(&self, qc: &Self::Certificate, view_number: TYPES::Time) -> bool { let comparison_commitment = view_number.commit(); - match qc.signatures() { - AssembledSignature::Timeout(qc) => { - let real_commit = VoteData::Timeout(comparison_commitment).commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().success_threshold().get()), - ); - ::check(&real_qc_pp, real_commit.as_ref(), &qc) - } - _ => { - error!("Expected TimeoutCertificate, received another certificate variant"); - false - } + if let AssembledSignature::Timeout(qc) = qc.signatures() { + let real_commit = VoteData::Timeout(comparison_commitment).commit(); + let real_qc_pp = ::get_public_parameter( + self.membership().get_committee_qc_stake_table(), + U256::from(self.membership().success_threshold().get()), + ); + ::check(&real_qc_pp, real_commit.as_ref(), &qc) + } else { + error!("Expected TimeoutCertificate, received another certificate variant"); + false } } } From 9a4bb58c73f89944be8591d15fe67dca64b44a62 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 20:53:20 -0400 Subject: [PATCH 0220/1393] Comments --- types/src/traits/election.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index f4c8126715..9f5c6f4e38 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -362,12 +362,7 @@ pub trait ConsensusExchange: Send + Sync { ::check(&real_qc_pp, real_commit.as_ref(), &qc) } AssembledSignature::Timeout(qc) => { - let real_commit = VoteData::Timeout(leaf_commitment).commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().success_threshold().get()), - ); - ::check(&real_qc_pp, real_commit.as_ref(), &qc) + error!("QC type should not be timeout here"); } AssembledSignature::Genesis() => true, AssembledSignature::ViewSyncPreCommit(_) From 886f0de273b7789fae6e735543af00d43327d821 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 20:53:57 -0400 Subject: [PATCH 0221/1393] Comments --- types/src/traits/election.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 9f5c6f4e38..0383c83bcd 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -363,6 +363,7 @@ pub trait ConsensusExchange: Send + Sync { } AssembledSignature::Timeout(qc) => { error!("QC type should not be timeout here"); + false } AssembledSignature::Genesis() => true, AssembledSignature::ViewSyncPreCommit(_) From c8323511a7ca2370297a8a93db1d95ae6cd23683 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Wed, 11 Oct 2023 20:55:54 -0400 Subject: [PATCH 0222/1393] lints --- types/src/traits/election.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 0383c83bcd..0b660a4221 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -361,7 +361,7 @@ pub trait ConsensusExchange: Send + Sync { ); ::check(&real_qc_pp, real_commit.as_ref(), &qc) } - AssembledSignature::Timeout(qc) => { + AssembledSignature::Timeout(_) => { error!("QC type should not be timeout here"); false } From 8975608da06a54161076d6faac78d651fb766606 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 12 Oct 2023 10:00:45 -0400 Subject: [PATCH 0223/1393] Add return if proposal is invliad --- task-impls/src/consensus.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 011eeec800..7887b7e245 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -786,9 +786,10 @@ where ); return; } - // Validate the signature. + // Validate the signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment else if !view_leader_key.validate(&proposal.signature, leaf_commitment.as_ref()) { error!(?proposal.signature, "Could not verify proposal."); + return; } // Create a positive vote if either liveness or safety check // passes. @@ -811,11 +812,13 @@ where let safety_check = outcome.is_ok(); if let Err(e) = outcome { self.api.send_view_error(view, Arc::new(e)).await; + return; } // Skip if both saftey and liveness checks fail. if !safety_check && !liveness_check { error!("Failed safety check and liveness check"); + return; } } From 9a51100dc41c137d60982c0ba2e8ececa4bcd049 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 12 Oct 2023 10:03:26 -0400 Subject: [PATCH 0224/1393] lints --- task-impls/src/consensus.rs | 51 ++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7887b7e245..748e6e9f1d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -793,33 +793,32 @@ where } // Create a positive vote if either liveness or safety check // passes. - else { - // Liveness check. - let liveness_check = justify_qc.view_number > consensus.locked_view; - - // Safety check. - // Check if proposal extends from the locked leaf. - let outcome = consensus.visit_leaf_ancestors( - justify_qc.view_number, - Terminator::Inclusive(consensus.locked_view), - false, - |leaf| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.view_number != consensus.locked_view - }, - ); - let safety_check = outcome.is_ok(); - if let Err(e) = outcome { - self.api.send_view_error(view, Arc::new(e)).await; - return; - } - // Skip if both saftey and liveness checks fail. - if !safety_check && !liveness_check { - error!("Failed safety check and liveness check"); - return; - } + // Liveness check. + let liveness_check = justify_qc.view_number > consensus.locked_view; + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = consensus.visit_leaf_ancestors( + justify_qc.view_number, + Terminator::Inclusive(consensus.locked_view), + false, + |leaf| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.view_number != consensus.locked_view + }, + ); + let safety_check = outcome.is_ok(); + if let Err(e) = outcome { + self.api.send_view_error(view, Arc::new(e)).await; + return; + } + + // Skip if both saftey and liveness checks fail. + if !safety_check && !liveness_check { + error!("Failed safety check and liveness check"); + return; } let high_qc = leaf.justify_qc.clone(); From a19fc32fe20ce8377dc484be9e23005f3718ff51 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 12 Oct 2023 11:13:20 -0400 Subject: [PATCH 0225/1393] fix catchup web and make more strict --- hotshot/src/traits/networking/web_server_network.rs | 5 +++-- testing/src/test_runner.rs | 5 ----- testing/tests/catchup.rs | 12 ++++++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index f2c6c1c842..54c8fa158b 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -508,10 +508,11 @@ impl< let endpoint = match &message.purpose() { MessagePurpose::Proposal => config::post_proposal_route(*view_number), - MessagePurpose::CurrentProposal => return Err(WebServerNetworkError::EndpointError), MessagePurpose::Vote => config::post_vote_route(*view_number), MessagePurpose::Data => config::post_transactions_route(), - MessagePurpose::Internal => return Err(WebServerNetworkError::EndpointError), + MessagePurpose::Internal | MessagePurpose::CurrentProposal => { + return Err(WebServerNetworkError::EndpointError) + } MessagePurpose::ViewSyncProposal => { // error!("Posting view sync proposal route is: {}", config::post_view_sync_proposal_route(*view_number)); config::post_view_sync_proposal_route(*view_number) diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 57c82d24e9..8c57183eaa 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -89,11 +89,6 @@ where } } } - assert!( - late_start_nodes.len() - <= self.launcher.metadata.total_nodes - self.launcher.metadata.start_nodes, - "Test wants to late start too many nodes." - ); self.add_nodes(self.launcher.metadata.total_nodes, &late_start_nodes) .await; diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 9c6bd0bb38..d70d4fc63c 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -38,19 +38,20 @@ async fn test_catchup() { metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), catchup_nodes)], + node_changes: vec![(Duration::from_millis(1400), catchup_nodes)], }; metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(100000), + duration: Duration::from_millis(10000), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() }; + metadata.overall_safety_properties.num_failed_views = 2; metadata .gen_launcher::() @@ -89,11 +90,11 @@ async fn test_catchup_web() { }]; metadata.timing_data = timing_data; - metadata.start_nodes = 19; + metadata.start_nodes = 20; metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::from_millis(400), catchup_nodes)], + node_changes: vec![(Duration::from_millis(2500), catchup_nodes)], }; metadata.completion_task_description = @@ -107,6 +108,9 @@ async fn test_catchup_web() { ..Default::default() }; + // only alow for the view which the catchup node hasn't started to fail + metadata.overall_safety_properties.num_failed_views = 1; + metadata .gen_launcher::() .launch() From 2158753c1be383aba1c4580e4b99c29edd927133 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 12 Oct 2023 11:40:18 -0400 Subject: [PATCH 0226/1393] re-enable test --- testing/tests/catchup.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index d70d4fc63c..1c568629ff 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -125,7 +125,6 @@ async fn test_catchup_web() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_catchup_one_node() { use std::time::Duration; From 7d1f020854731985067de3cc445d7062d89f37c0 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 12 Oct 2023 13:08:15 -0400 Subject: [PATCH 0227/1393] increase timeout of catchup back to previous --- testing/tests/catchup.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 1c568629ff..2415f4318c 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -44,7 +44,7 @@ async fn test_catchup() { metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(10000), + duration: Duration::from_millis(100000), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { From 6c8f1b9f54d45af797765ea83124d0484695f045 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 12 Oct 2023 15:47:49 -0700 Subject: [PATCH 0228/1393] remove some generated_from_seed_indexed() --- hotshot/examples/infra/modDA.rs | 3 +-- testing/src/task_helpers.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index ef188a7849..730846625b 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -680,8 +680,7 @@ where let mut all_keys = BTreeSet::new(); let mut da_keys = BTreeSet::new(); for i in 0..config.config.total_nodes.get() as u64 { - let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; - let pubkey = TYPES::SignatureKey::from_private(&privkey); + let pubkey = TYPES::SignatureKey::get_public_key(config.config.known_nodes_with_stake.get(&i).unwrap()); if i < config.config.da_committee_size as u64 { da_keys.insert(pubkey.clone()); } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index f4e7fe49a8..92de2ecf3e 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -51,7 +51,7 @@ pub async fn build_system_handle( let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; - let public_key = ::SignatureKey::from_private(&private_key); + let public_key = ::get_public_key(config.known_nodes_with_stake.get(node_id as usize).unwrap()); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< SequencingTestTypes, From 16aaf8b1fbb15fb4676b1d23cd731e98bd3fa728 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 12 Oct 2023 15:47:49 -0700 Subject: [PATCH 0229/1393] remove some generated_from_seed_indexed() --- hotshot/examples/infra/modDA.rs | 3 +-- testing/src/task_helpers.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index a06f023159..82cc2ad75d 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -681,8 +681,7 @@ where let mut all_keys = BTreeSet::new(); let mut da_keys = BTreeSet::new(); for i in 0..config.config.total_nodes.get() as u64 { - let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; - let pubkey = TYPES::SignatureKey::from_private(&privkey); + let pubkey = TYPES::SignatureKey::get_public_key(config.config.known_nodes_with_stake.get(&i).unwrap()); if i < config.config.da_committee_size as u64 { da_keys.insert(pubkey.clone()); } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 17be6f7dfc..b17c876697 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -51,7 +51,7 @@ pub async fn build_system_handle( let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; - let public_key = ::SignatureKey::from_private(&private_key); + let public_key = ::get_public_key(config.known_nodes_with_stake.get(node_id as usize).unwrap()); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< SequencingTestTypes, From 19f254ed2f64e74cfda164afa3d2065b82b422e1 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 12 Oct 2023 16:03:37 -0700 Subject: [PATCH 0230/1393] fix lint --- hotshot/examples/infra/modDA.rs | 8 +++++++- testing/src/task_helpers.rs | 4 +++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 82cc2ad75d..a9f9a9dc2e 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -681,7 +681,13 @@ where let mut all_keys = BTreeSet::new(); let mut da_keys = BTreeSet::new(); for i in 0..config.config.total_nodes.get() as u64 { - let pubkey = TYPES::SignatureKey::get_public_key(config.config.known_nodes_with_stake.get(&i).unwrap()); + let pubkey = TYPES::SignatureKey::get_public_key( + config + .config + .known_nodes_with_stake + .get(i as usize) + .unwrap(), + ); if i < config.config.da_committee_size as u64 { da_keys.insert(pubkey.clone()); } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index b17c876697..faa1cf4067 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -51,7 +51,9 @@ pub async fn build_system_handle( let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; - let public_key = ::get_public_key(config.known_nodes_with_stake.get(node_id as usize).unwrap()); + let public_key = ::get_public_key( + config.known_nodes_with_stake.get(node_id as usize).unwrap(), + ); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< SequencingTestTypes, From 3e0b2954822b32aa9bc76df3d363b054b99db7ac Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 12 Oct 2023 18:39:29 -0700 Subject: [PATCH 0231/1393] make the key pair configurable --- hotshot-signature-key/src/bn254/bn254_priv.rs | 2 +- hotshot/examples/infra/mod.rs | 2 ++ hotshot/examples/infra/modDA.rs | 9 ++++++ hotshot/src/lib.rs | 3 ++ orchestrator/src/client.rs | 2 ++ orchestrator/src/config.rs | 11 +++---- orchestrator/src/lib.rs | 29 ++++++++++--------- testing/src/test_builder.rs | 6 +++- testing/src/test_launcher.rs | 2 ++ testing/src/test_runner.rs | 1 + types/src/lib.rs | 4 ++- types/src/traits/signature_key.rs | 10 ++++++- 12 files changed, 58 insertions(+), 23 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_priv.rs b/hotshot-signature-key/src/bn254/bn254_priv.rs index 5b16c7a93d..dcd9129c09 100644 --- a/hotshot-signature-key/src/bn254/bn254_priv.rs +++ b/hotshot-signature-key/src/bn254/bn254_priv.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use std::cmp::Ordering; /// Private key type for a bn254 keypair -#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug)] +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug, Hash)] pub struct BLSPrivKey { /// The private key for this keypair pub(super) priv_key: QCSignKey, diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 06d7d9deb3..9fb2490a39 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -37,6 +37,7 @@ pub fn load_config_from_file( ) -> NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, > { let config_file_as_string: String = fs::read_to_string(config_file.as_str()) @@ -48,6 +49,7 @@ pub fn load_config_from_file( let mut config: NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, > = config_toml.into(); diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index a9f9a9dc2e..50418ee45e 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -186,6 +186,7 @@ pub trait RunDA< config: NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, >, ) -> Self; @@ -208,6 +209,7 @@ pub trait RunDA< let (pk, sk) = TYPES::SignatureKey::generated_from_seed_indexed(config.seed, config.node_index); let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); + let known_nodes_sk = config.config.known_nodes_sk.clone(); let entry = pk.get_stake_table_entry(1u64); let da_network = self.get_da_network(); @@ -373,6 +375,7 @@ pub trait RunDA< ) -> NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, >; } @@ -388,6 +391,7 @@ pub struct WebServerDARun< config: NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, >, quorum_network: WebCommChannel, @@ -449,6 +453,7 @@ where config: NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, >, ) -> WebServerDARun { @@ -518,6 +523,7 @@ where ) -> NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, > { self.config.clone() @@ -532,6 +538,7 @@ pub struct Libp2pDARun, MEMBERSHIP config: NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, >, quorum_network: Libp2pCommChannel, @@ -593,6 +600,7 @@ where config: NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, >, ) -> Libp2pDARun { @@ -752,6 +760,7 @@ where ) -> NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, > { self.config.clone() diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index dcf6670dc7..9c70d10362 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -115,6 +115,7 @@ pub struct SystemContextInner> { /// Configuration items for this hotshot instance config: HotShotConfig< + ::PrivateKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -170,6 +171,7 @@ impl> SystemContext { private_key: ::PrivateKey, nonce: u64, config: HotShotConfig< + ::PrivateKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -378,6 +380,7 @@ impl> SystemContext { private_key: ::PrivateKey, node_id: u64, config: HotShotConfig< + ::PrivateKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 57522da4fd..432cff8d2d 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -68,6 +68,7 @@ impl OrchestratorClient { ) -> NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, > { let f = |client: Client| { @@ -76,6 +77,7 @@ impl OrchestratorClient { NetworkConfig< TYPES::SignatureKey, ::StakeTableEntry, + ::PrivateKey, TYPES::ElectionConfigType, >, ClientError, diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index ad937e7754..a8e7617f27 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -51,7 +51,7 @@ pub struct WebServerConfig { } #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -pub struct NetworkConfig { +pub struct NetworkConfig { pub rounds: usize, pub transactions_per_round: usize, pub num_bootrap: usize, @@ -65,13 +65,13 @@ pub struct NetworkConfig { pub key_type_name: String, pub election_config_type_name: String, pub libp2p_config: Option, - pub config: HotShotConfig, + pub config: HotShotConfig, pub web_server_config: Option, pub da_web_server_config: Option, _key_type_phantom: PhantomData, } -impl Default for NetworkConfig { +impl Default for NetworkConfig { fn default() -> Self { Self { rounds: default_rounds(), @@ -123,7 +123,7 @@ fn default_web_server_config() -> Option { None } -impl From for NetworkConfig { +impl From for NetworkConfig { fn from(val: NetworkConfigFile) -> Self { NetworkConfig { rounds: val.rounds, @@ -194,7 +194,7 @@ pub struct HotShotConfigFile { pub propose_max_round_time: Duration, } -impl From for HotShotConfig { +impl From for HotShotConfig { fn from(val: HotShotConfigFile) -> Self { HotShotConfig { execution_type: ExecutionType::Continuous, @@ -202,6 +202,7 @@ impl From for HotShotConfig { max_transactions: val.max_transactions, min_transactions: val.min_transactions, known_nodes_with_stake: Vec::new(), + known_nodes_sk: Vec::new(), da_committee_size: val.committee_nodes, next_view_timeout: val.next_view_timeout, timeout_ratio: val.timeout_ratio, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index d3faa7d410..ff5977f455 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -36,11 +36,11 @@ pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { } #[derive(Default, Clone)] -struct OrchestratorState { +struct OrchestratorState { /// Tracks the latest node index we have generated a configuration for latest_index: u16, /// The network configuration - config: NetworkConfig, + config: NetworkConfig, /// Whether nodes should start their HotShot instances /// Will be set to true once all nodes post they are ready to start start: bool, @@ -51,9 +51,9 @@ struct OrchestratorState { } impl - OrchestratorState + OrchestratorState { - pub fn new(network_config: NetworkConfig) -> Self { + pub fn new(network_config: NetworkConfig) -> Self { let mut web_client = None; if network_config.web_server_config.is_some() { let base_url = "http://0.0.0.0/9000".to_string().parse().unwrap(); @@ -69,19 +69,19 @@ impl } } -pub trait OrchestratorApi { +pub trait OrchestratorApi { fn post_identity(&mut self, identity: IpAddr) -> Result; fn post_getconfig( &mut self, node_index: u16, - ) -> Result, ServerError>; + ) -> Result, ServerError>; fn get_start(&self) -> Result; fn post_ready(&mut self) -> Result<(), ServerError>; fn post_run_results(&mut self) -> Result<(), ServerError>; } -impl OrchestratorApi - for OrchestratorState +impl OrchestratorApi + for OrchestratorState where KEY: serde::Serialize + Clone + SignatureKey, ELECTION: serde::Serialize + Clone + Send, @@ -140,7 +140,7 @@ where fn post_getconfig( &mut self, _node_index: u16, - ) -> Result, ServerError> { + ) -> Result, ServerError> { if self.config.libp2p_config.is_some() { let libp2p_config = self.config.clone().libp2p_config.unwrap(); if libp2p_config.bootstrap_nodes.len() < libp2p_config.num_bootstrap_nodes { @@ -189,12 +189,13 @@ where } /// Sets up all API routes -fn define_api() -> Result, ApiError> +fn define_api() -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, - ::State: Send + Sync + OrchestratorApi, + ::State: Send + Sync + OrchestratorApi, KEY: serde::Serialize, ENTRY: serde::Serialize, + PRIVATEKEY: serde::Serialize, ELECTION: serde::Serialize, { let api_toml = toml::from_str::(include_str!(concat!( @@ -237,7 +238,7 @@ where /// Runs the orchestrator pub async fn run_orchestrator( - network_config: NetworkConfig, + network_config: NetworkConfig, host: IpAddr, port: u16, ) -> io::Result<()> @@ -247,10 +248,10 @@ where { let api = define_api().map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api")); - let state: RwLock> = + let state: RwLock> = RwLock::new(OrchestratorState::new(network_config)); - let mut app = App::>, ServerError>::with_state(state); + let mut app = App::>, ServerError>::with_state(state); app.register_module("api", api.unwrap()) .expect("Error registering api"); tracing::error!("lisening on {:?}:{:?}", host, port); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 3515f04368..a167d12957 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -200,10 +200,12 @@ impl TestMetadata { .. } = self.clone(); + let mut known_nodes_sk: Vec<::PrivateKey> = Vec::new(); let known_nodes: Vec<::SignatureKey> = (0..total_nodes) .map(|id| { let priv_key = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], id as u64).1; + known_nodes_sk.push(priv_key.clone()); TYPES::SignatureKey::from_private(&priv_key) }) .collect(); @@ -211,6 +213,7 @@ impl TestMetadata { (0..total_nodes) .map(|id| known_nodes[id].get_stake_table_entry(1u64)) .collect(); + // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); let config = HotShotConfig { // TODO this doesn't exist anymore @@ -220,6 +223,7 @@ impl TestMetadata { min_transactions, max_transactions: NonZeroUsize::new(99999).unwrap(), known_nodes_with_stake, + known_nodes_sk, da_committee_size, next_view_timeout: 500, timeout_ratio: (11, 10), @@ -246,7 +250,7 @@ impl TestMetadata { } = timing_data; let mod_config = // TODO this should really be using the timing config struct - |a: &mut HotShotConfig<::StakeTableEntry, TYPES::ElectionConfigType>| { + |a: &mut HotShotConfig<::PrivateKey, ::StakeTableEntry, TYPES::ElectionConfigType>| { a.next_view_timeout = next_view_timeout; a.timeout_ratio = timeout_ratio; a.round_start_delay = round_start_delay; diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 4fb230b315..a5a9625edc 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -92,6 +92,7 @@ where pub storage: Generator<>::Storage>, /// configuration used to generate each hotshot node pub config: HotShotConfig< + ::PrivateKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, @@ -195,6 +196,7 @@ impl> TestLauncher::PrivateKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 0f9429c22c..fb16293ce6 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -231,6 +231,7 @@ where storage: I::Storage, initializer: HotShotInitializer, config: HotShotConfig< + ::PrivateKey, ::StakeTableEntry, TYPES::ElectionConfigType, >, diff --git a/types/src/lib.rs b/types/src/lib.rs index 54739106c3..cdb74bf156 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -36,7 +36,7 @@ pub enum ExecutionType { /// Holds configuration for a `HotShot` #[derive(Clone, custom_debug::Debug, serde::Serialize, serde::Deserialize)] -pub struct HotShotConfig { +pub struct HotShotConfig { /// Whether to run one view or continuous views pub execution_type: ExecutionType, /// Total number of nodes in the network @@ -47,6 +47,8 @@ pub struct HotShotConfig { pub max_transactions: NonZeroUsize, /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter pub known_nodes_with_stake: Vec, + /// List of known node's private keys for network initialization, cannot be public + pub known_nodes_sk: Vec, /// List of DA committee nodes for static DA committe pub da_committee_size: usize, /// Base duration for next-view timeout, in milliseconds diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index d5141816f5..ee6904e5e3 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -50,7 +50,15 @@ pub trait SignatureKey: + Ord { /// The private key type for this signature algorithm - type PrivateKey: Send + Sync + Sized + Clone; + type PrivateKey: Send + + Sync + + Sized + + Clone + + Debug + + Eq + + Serialize + + for<'a> Deserialize<'a> + + Hash; /// The type of the entry that contain both public key and stake value type StakeTableEntry: Send + Sync From f146c0789546d7777cbaf093293f325dfdfffbec Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 12 Oct 2023 18:48:51 -0700 Subject: [PATCH 0232/1393] get keypair and stakevalue from config in modDA.rs --- hotshot/examples/infra/modDA.rs | 6 +++--- orchestrator/src/lib.rs | 12 +++++++++--- testing/src/test_builder.rs | 2 +- types/src/traits/signature_key.rs | 6 +++--- 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 50418ee45e..e7214b8d6e 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -206,11 +206,11 @@ pub trait RunDA< let config = self.get_config(); // Get KeyPair for certificate Aggregation - let (pk, sk) = - TYPES::SignatureKey::generated_from_seed_indexed(config.seed, config.node_index); let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let known_nodes_sk = config.config.known_nodes_sk.clone(); - let entry = pk.get_stake_table_entry(1u64); + let entry = known_nodes_with_stake.get(config.node_index as usize).unwrap(); + let pk = TYPES::SignatureKey::get_public_key(entry); + let sk = known_nodes_sk.get(config.node_index as usize).unwrap(); let da_network = self.get_da_network(); let quorum_network = self.get_quorum_network(); diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index ff5977f455..fef9b22b03 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -53,7 +53,9 @@ struct OrchestratorState { impl OrchestratorState { - pub fn new(network_config: NetworkConfig) -> Self { + pub fn new( + network_config: NetworkConfig, + ) -> Self { let mut web_client = None; if network_config.web_server_config.is_some() { let base_url = "http://0.0.0.0/9000".to_string().parse().unwrap(); @@ -140,7 +142,8 @@ where fn post_getconfig( &mut self, _node_index: u16, - ) -> Result, ServerError> { + ) -> Result, ServerError> + { if self.config.libp2p_config.is_some() { let libp2p_config = self.config.clone().libp2p_config.unwrap(); if libp2p_config.bootstrap_nodes.len() < libp2p_config.num_bootstrap_nodes { @@ -251,7 +254,10 @@ where let state: RwLock> = RwLock::new(OrchestratorState::new(network_config)); - let mut app = App::>, ServerError>::with_state(state); + let mut app = App::< + RwLock>, + ServerError, + >::with_state(state); app.register_module("api", api.unwrap()) .expect("Error registering api"); tracing::error!("lisening on {:?}:{:?}", host, port); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index a167d12957..ce36363f08 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -213,7 +213,7 @@ impl TestMetadata { (0..total_nodes) .map(|id| known_nodes[id].get_stake_table_entry(1u64)) .collect(); - + // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); let config = HotShotConfig { // TODO this doesn't exist anymore diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index ee6904e5e3..49396322ef 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -50,10 +50,10 @@ pub trait SignatureKey: + Ord { /// The private key type for this signature algorithm - type PrivateKey: Send + type PrivateKey: Send + Sync - + Sized - + Clone + + Sized + + Clone + Debug + Eq + Serialize From aaf094ded131db699a3d254b7765f06d7fbf0175 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 12 Oct 2023 18:55:25 -0700 Subject: [PATCH 0233/1393] initialize known_nodes_sk --- hotshot/examples/infra/mod.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 9fb2490a39..da44fb2249 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -54,20 +54,22 @@ pub fn load_config_from_file( > = config_toml.into(); // Generate network's public keys + let known_nodes_sk = Vec::new(); let known_nodes: Vec<_> = (0..config.config.total_nodes.get()) .map(|node_id| { - TYPES::SignatureKey::generated_from_seed_indexed( + let (key_pair, sk) = TYPES::SignatureKey::generated_from_seed_indexed( config.seed, node_id.try_into().unwrap(), - ) - .0 + ); + known_nodes_sk.push(sk); + key_pair }) .collect(); config.config.known_nodes_with_stake = (0..config.config.total_nodes.get()) .map(|node_id| known_nodes[node_id].get_stake_table_entry(1u64)) .collect(); - + config.config.known_nodes_sk = known_nodes_sk; config } From ec4fc68032b047c65a0a2300941647af0a16a035 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 12 Oct 2023 23:07:17 -0700 Subject: [PATCH 0234/1393] generated_from_seed_indexed only appear in initialization or generator process --- hotshot/examples/infra/mod.rs | 2 +- hotshot/examples/infra/modDA.rs | 18 ++++++++---------- orchestrator/src/lib.rs | 2 +- testing/src/task_helpers.rs | 5 ++--- testing/src/test_runner.rs | 7 ++++--- 5 files changed, 16 insertions(+), 18 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index da44fb2249..9a979e0d83 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -54,7 +54,7 @@ pub fn load_config_from_file( > = config_toml.into(); // Generate network's public keys - let known_nodes_sk = Vec::new(); + let mut known_nodes_sk = Vec::new(); let known_nodes: Vec<_> = (0..config.config.total_nodes.get()) .map(|node_id| { let (key_pair, sk) = TYPES::SignatureKey::generated_from_seed_indexed( diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index e7214b8d6e..5b6e78acfc 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -458,11 +458,10 @@ where >, ) -> WebServerDARun { // Generate our own key - let (pub_key, _priv_key) = - <::SignatureKey as SignatureKey>::generated_from_seed_indexed( - config.seed, - config.node_index, - ); + let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); + let pub_key = <::SignatureKey as SignatureKey>::get_public_key( + known_nodes_with_stake.get(config.node_index as usize).expect("node_id should be within the range of known_nodes") + ); // Get the configuration for the web server let WebServerConfig { @@ -604,11 +603,10 @@ where TYPES::ElectionConfigType, >, ) -> Libp2pDARun { - let (pubkey, _privkey) = - <::SignatureKey as SignatureKey>::generated_from_seed_indexed( - config.seed, - config.node_index, - ); + let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); + let pub_key = <::SignatureKey as SignatureKey>::get_public_key( + known_nodes_with_stake.get(config.node_index as usize).unwrap() + ); let mut config = config; let libp2p_config = config .libp2p_config diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index fef9b22b03..3d2d3793f6 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -102,7 +102,7 @@ where //add new node's key to stake table if self.config.web_server_config.clone().is_some() { - let new_key = KEY::generated_from_seed_indexed(self.config.seed, node_index.into()).0; + let new_key = KEY::get_public_key(self.config.config.known_nodes_with_stake.get(node_index as usize).expect("node_id should be within the range of known_nodes")); let client_clone = self.client.clone().unwrap(); async move { client_clone diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index faa1cf4067..8604d18a81 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -49,10 +49,9 @@ pub async fn build_system_handle( .unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let private_key = - ::generated_from_seed_indexed([0u8; 32], node_id).1; + let private_key = config.known_nodes_sk.get(node_id as usize).expect("node_id should be within the range of known_nodes").clone(); let public_key = ::get_public_key( - config.known_nodes_with_stake.get(node_id as usize).unwrap(), + known_nodes_with_stake.get(node_id as usize).expect("node_id should be within the range of known_nodes") ); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index fb16293ce6..09ddf4bc6e 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -246,11 +246,12 @@ where { let node_id = self.next_node_id; self.next_node_id += 1; - let known_nodes_with_stake = config.known_nodes_with_stake.clone(); // Generate key pair for certificate aggregation - let private_key = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; - let public_key = TYPES::SignatureKey::from_private(&private_key); + let private_key = config.known_nodes_sk.get(node_id as usize).expect("node_id should be within the range of known_nodes").clone(); + let public_key = <::SignatureKey as SignatureKey>::get_public_key( + known_nodes_with_stake.get(node_id as usize).expect("node_id should be within the range of known_nodes") + ); let entry = public_key.get_stake_table_entry(1u64); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< From 1270eb89fc80e8bd73544375bbbcf28ea4594b02 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 12 Oct 2023 23:13:06 -0700 Subject: [PATCH 0235/1393] let the stake value be extractable if needed rather than assign 1u64 manually --- testing/src/test_runner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 09ddf4bc6e..6e32f51cc1 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -252,7 +252,7 @@ where let public_key = <::SignatureKey as SignatureKey>::get_public_key( known_nodes_with_stake.get(node_id as usize).expect("node_id should be within the range of known_nodes") ); - let entry = public_key.get_stake_table_entry(1u64); + let entry = known_nodes_with_stake.get(node_id as usize).expect("node_id should be within the range of known_nodes"); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< TYPES, From d651ef277a5c42f6f829392d7fdecc0dec4f71d5 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 12 Oct 2023 23:18:45 -0700 Subject: [PATCH 0236/1393] unwrap() to expect() --- hotshot/examples/infra/modDA.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 5b6e78acfc..e72ab0653a 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -208,9 +208,9 @@ pub trait RunDA< // Get KeyPair for certificate Aggregation let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let known_nodes_sk = config.config.known_nodes_sk.clone(); - let entry = known_nodes_with_stake.get(config.node_index as usize).unwrap(); + let entry = known_nodes_with_stake.get(config.node_index as usize).expect("node_id should be within the range of known_nodes"); let pk = TYPES::SignatureKey::get_public_key(entry); - let sk = known_nodes_sk.get(config.node_index as usize).unwrap(); + let sk = known_nodes_sk.get(config.node_index as usize).expect("node_id should be within the range of known_nodes"); let da_network = self.get_da_network(); let quorum_network = self.get_quorum_network(); @@ -605,7 +605,7 @@ where ) -> Libp2pDARun { let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let pub_key = <::SignatureKey as SignatureKey>::get_public_key( - known_nodes_with_stake.get(config.node_index as usize).unwrap() + known_nodes_with_stake.get(config.node_index as usize).expect("node_id should be within the range of known_nodes") ); let mut config = config; let libp2p_config = config @@ -692,7 +692,7 @@ where .config .known_nodes_with_stake .get(i as usize) - .unwrap(), + .expect("node_id should be within the range of known_nodes"), ); if i < config.config.da_committee_size as u64 { da_keys.insert(pubkey.clone()); From 26082d7b27e809bb38cb2e1c59386e1d7325337c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 12 Oct 2023 23:50:27 -0700 Subject: [PATCH 0237/1393] avoid too many generic type for OrchestratorState --- hotshot/examples/infra/modDA.rs | 26 +++++++++++++++++--------- orchestrator/src/lib.rs | 20 +++++++++++++------- testing/src/task_helpers.rs | 10 ++++++++-- testing/src/test_runner.rs | 14 +++++++++++--- 4 files changed, 49 insertions(+), 21 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index e72ab0653a..77a850d788 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -208,9 +208,13 @@ pub trait RunDA< // Get KeyPair for certificate Aggregation let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let known_nodes_sk = config.config.known_nodes_sk.clone(); - let entry = known_nodes_with_stake.get(config.node_index as usize).expect("node_id should be within the range of known_nodes"); + let entry = known_nodes_with_stake + .get(config.node_index as usize) + .expect("node_id should be within the range of known_nodes"); let pk = TYPES::SignatureKey::get_public_key(entry); - let sk = known_nodes_sk.get(config.node_index as usize).expect("node_id should be within the range of known_nodes"); + let sk = known_nodes_sk + .get(config.node_index as usize) + .expect("node_id should be within the range of known_nodes"); let da_network = self.get_da_network(); let quorum_network = self.get_quorum_network(); @@ -246,7 +250,7 @@ pub trait RunDA< SystemContext::init( pk, - sk, + sk.clone(), config.node_index, config.config, MemoryStorage::empty(), @@ -459,8 +463,10 @@ where ) -> WebServerDARun { // Generate our own key let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); - let pub_key = <::SignatureKey as SignatureKey>::get_public_key( - known_nodes_with_stake.get(config.node_index as usize).expect("node_id should be within the range of known_nodes") + let pubkey = <::SignatureKey as SignatureKey>::get_public_key( + known_nodes_with_stake + .get(config.node_index as usize) + .expect("node_id should be within the range of known_nodes"), ); // Get the configuration for the web server @@ -474,7 +480,7 @@ where &host.to_string(), port, wait_between_polls, - pub_key.clone(), + pubkey.clone(), false, ); @@ -493,7 +499,7 @@ where // Each node runs the DA network so that leaders have access to transactions and DA votes let da_network: WebCommChannel = WebCommChannel::new( - WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true) + WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pubkey, true) .into(), ); @@ -604,8 +610,10 @@ where >, ) -> Libp2pDARun { let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); - let pub_key = <::SignatureKey as SignatureKey>::get_public_key( - known_nodes_with_stake.get(config.node_index as usize).expect("node_id should be within the range of known_nodes") + let pubkey = <::SignatureKey as SignatureKey>::get_public_key( + known_nodes_with_stake + .get(config.node_index as usize) + .expect("node_id should be within the range of known_nodes"), ); let mut config = config; let libp2p_config = config diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 3d2d3793f6..8df771ca93 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -36,11 +36,11 @@ pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { } #[derive(Default, Clone)] -struct OrchestratorState { +struct OrchestratorState { /// Tracks the latest node index we have generated a configuration for latest_index: u16, /// The network configuration - config: NetworkConfig, + config: NetworkConfig, /// Whether nodes should start their HotShot instances /// Will be set to true once all nodes post they are ready to start start: bool, @@ -51,7 +51,7 @@ struct OrchestratorState { } impl - OrchestratorState + OrchestratorState { pub fn new( network_config: NetworkConfig, @@ -83,7 +83,7 @@ pub trait OrchestratorApi { } impl OrchestratorApi - for OrchestratorState + for OrchestratorState where KEY: serde::Serialize + Clone + SignatureKey, ELECTION: serde::Serialize + Clone + Send, @@ -102,7 +102,13 @@ where //add new node's key to stake table if self.config.web_server_config.clone().is_some() { - let new_key = KEY::get_public_key(self.config.config.known_nodes_with_stake.get(node_index as usize).expect("node_id should be within the range of known_nodes")); + let new_key = KEY::get_public_key( + self.config + .config + .known_nodes_with_stake + .get(node_index as usize) + .expect("node_id should be within the range of known_nodes"), + ); let client_clone = self.client.clone().unwrap(); async move { client_clone @@ -251,11 +257,11 @@ where { let api = define_api().map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api")); - let state: RwLock> = + let state: RwLock> = RwLock::new(OrchestratorState::new(network_config)); let mut app = App::< - RwLock>, + RwLock>, ServerError, >::with_state(state); app.register_module("api", api.unwrap()) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 8604d18a81..7b4a1daca3 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -49,9 +49,15 @@ pub async fn build_system_handle( .unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let private_key = config.known_nodes_sk.get(node_id as usize).expect("node_id should be within the range of known_nodes").clone(); + let private_key = config + .known_nodes_sk + .get(node_id as usize) + .expect("node_id should be within the range of known_nodes") + .clone(); let public_key = ::get_public_key( - known_nodes_with_stake.get(node_id as usize).expect("node_id should be within the range of known_nodes") + known_nodes_with_stake + .get(node_id as usize) + .expect("node_id should be within the range of known_nodes"), ); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 6e32f51cc1..31fa25b491 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -248,11 +248,19 @@ where self.next_node_id += 1; let known_nodes_with_stake = config.known_nodes_with_stake.clone(); // Generate key pair for certificate aggregation - let private_key = config.known_nodes_sk.get(node_id as usize).expect("node_id should be within the range of known_nodes").clone(); + let private_key = config + .known_nodes_sk + .get(node_id as usize) + .expect("node_id should be within the range of known_nodes") + .clone(); let public_key = <::SignatureKey as SignatureKey>::get_public_key( - known_nodes_with_stake.get(node_id as usize).expect("node_id should be within the range of known_nodes") + known_nodes_with_stake + .get(node_id as usize) + .expect("node_id should be within the range of known_nodes"), ); - let entry = known_nodes_with_stake.get(node_id as usize).expect("node_id should be within the range of known_nodes"); + let entry = known_nodes_with_stake + .get(node_id as usize) + .expect("node_id should be within the range of known_nodes"); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< TYPES, From 254b0dc5a0168311f24a7eb57b53a76c3797f31c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 13 Oct 2023 00:00:24 -0700 Subject: [PATCH 0238/1393] avoid too many generic type for NetworkConfig --- hotshot/examples/infra/mod.rs | 4 ---- hotshot/examples/infra/modDA.rs | 16 ---------------- orchestrator/src/client.rs | 6 +----- orchestrator/src/config.rs | 10 +++++----- orchestrator/src/lib.rs | 19 ++++++++----------- 5 files changed, 14 insertions(+), 41 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 9a979e0d83..345010891e 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -36,8 +36,6 @@ pub fn load_config_from_file( config_file: String, ) -> NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, > { let config_file_as_string: String = fs::read_to_string(config_file.as_str()) @@ -48,8 +46,6 @@ pub fn load_config_from_file( let mut config: NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, > = config_toml.into(); diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 77a850d788..0842f695eb 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -185,8 +185,6 @@ pub trait RunDA< async fn initialize_networking( config: NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, >, ) -> Self; @@ -378,8 +376,6 @@ pub trait RunDA< &self, ) -> NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, >; } @@ -394,8 +390,6 @@ pub struct WebServerDARun< > { config: NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, >, quorum_network: WebCommChannel, @@ -456,8 +450,6 @@ where async fn initialize_networking( config: NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, >, ) -> WebServerDARun { @@ -527,8 +519,6 @@ where &self, ) -> NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, > { self.config.clone() @@ -542,8 +532,6 @@ pub struct Libp2pDARun, MEMBERSHIP { config: NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, >, quorum_network: Libp2pCommChannel, @@ -604,8 +592,6 @@ where async fn initialize_networking( config: NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, >, ) -> Libp2pDARun { @@ -765,8 +751,6 @@ where &self, ) -> NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, > { self.config.clone() diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 432cff8d2d..d615312c90 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -5,7 +5,7 @@ use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; -use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; +use hotshot_types::traits::node_implementation::NodeType; use surf_disco::{error::ClientError, Client}; /// Holds the client connection to the orchestrator @@ -67,8 +67,6 @@ impl OrchestratorClient { node_index: u16, ) -> NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, > { let f = |client: Client| { @@ -76,8 +74,6 @@ impl OrchestratorClient { let config: Result< NetworkConfig< TYPES::SignatureKey, - ::StakeTableEntry, - ::PrivateKey, TYPES::ElectionConfigType, >, ClientError, diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index a8e7617f27..2ecb92dc8d 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,4 +1,4 @@ -use hotshot_types::{ExecutionType, HotShotConfig}; +use hotshot_types::{ExecutionType, HotShotConfig, traits::signature_key::SignatureKey}; use std::{ marker::PhantomData, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -51,7 +51,7 @@ pub struct WebServerConfig { } #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -pub struct NetworkConfig { +pub struct NetworkConfig { pub rounds: usize, pub transactions_per_round: usize, pub num_bootrap: usize, @@ -65,13 +65,13 @@ pub struct NetworkConfig { pub key_type_name: String, pub election_config_type_name: String, pub libp2p_config: Option, - pub config: HotShotConfig, + pub config: HotShotConfig, pub web_server_config: Option, pub da_web_server_config: Option, _key_type_phantom: PhantomData, } -impl Default for NetworkConfig { +impl Default for NetworkConfig { fn default() -> Self { Self { rounds: default_rounds(), @@ -123,7 +123,7 @@ fn default_web_server_config() -> Option { None } -impl From for NetworkConfig { +impl From for NetworkConfig { fn from(val: NetworkConfigFile) -> Self { NetworkConfig { rounds: val.rounds, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 8df771ca93..864f501fa5 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -40,7 +40,7 @@ struct OrchestratorState { /// Tracks the latest node index we have generated a configuration for latest_index: u16, /// The network configuration - config: NetworkConfig, + config: NetworkConfig, /// Whether nodes should start their HotShot instances /// Will be set to true once all nodes post they are ready to start start: bool, @@ -54,7 +54,7 @@ impl OrchestratorState { pub fn new( - network_config: NetworkConfig, + network_config: NetworkConfig, ) -> Self { let mut web_client = None; if network_config.web_server_config.is_some() { @@ -71,12 +71,12 @@ impl } } -pub trait OrchestratorApi { +pub trait OrchestratorApi { fn post_identity(&mut self, identity: IpAddr) -> Result; fn post_getconfig( &mut self, node_index: u16, - ) -> Result, ServerError>; + ) -> Result, ServerError>; fn get_start(&self) -> Result; fn post_ready(&mut self) -> Result<(), ServerError>; fn post_run_results(&mut self) -> Result<(), ServerError>; @@ -148,7 +148,7 @@ where fn post_getconfig( &mut self, _node_index: u16, - ) -> Result, ServerError> + ) -> Result, ServerError> { if self.config.libp2p_config.is_some() { let libp2p_config = self.config.clone().libp2p_config.unwrap(); @@ -198,7 +198,7 @@ where } /// Sets up all API routes -fn define_api() -> Result, ApiError> +fn define_api() -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, ::State: Send + Sync + OrchestratorApi, @@ -247,7 +247,7 @@ where /// Runs the orchestrator pub async fn run_orchestrator( - network_config: NetworkConfig, + network_config: NetworkConfig, host: IpAddr, port: u16, ) -> io::Result<()> @@ -260,10 +260,7 @@ where let state: RwLock> = RwLock::new(OrchestratorState::new(network_config)); - let mut app = App::< - RwLock>, - ServerError, - >::with_state(state); + let mut app = App::>, ServerError>::with_state(state); app.register_module("api", api.unwrap()) .expect("Error registering api"); tracing::error!("lisening on {:?}:{:?}", host, port); From 60961dcdd714238ad82e51754b14c5e0c12b540f Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 13 Oct 2023 00:03:25 -0700 Subject: [PATCH 0239/1393] avoid too many generic type for OrchestratorApi --- orchestrator/src/lib.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 864f501fa5..3c11bacc63 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -71,7 +71,7 @@ impl } } -pub trait OrchestratorApi { +pub trait OrchestratorApi { fn post_identity(&mut self, identity: IpAddr) -> Result; fn post_getconfig( &mut self, @@ -82,7 +82,7 @@ pub trait OrchestratorApi { fn post_run_results(&mut self) -> Result<(), ServerError>; } -impl OrchestratorApi +impl OrchestratorApi for OrchestratorState where KEY: serde::Serialize + Clone + SignatureKey, @@ -198,13 +198,11 @@ where } /// Sets up all API routes -fn define_api() -> Result, ApiError> +fn define_api() -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, - ::State: Send + Sync + OrchestratorApi, + ::State: Send + Sync + OrchestratorApi, KEY: serde::Serialize, - ENTRY: serde::Serialize, - PRIVATEKEY: serde::Serialize, ELECTION: serde::Serialize, { let api_toml = toml::from_str::(include_str!(concat!( From 307cc687d3e1ea875143b3c80597239082bd11bd Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 13 Oct 2023 00:04:03 -0700 Subject: [PATCH 0240/1393] fix lint --- hotshot/examples/infra/mod.rs | 11 +++----- hotshot/examples/infra/modDA.rs | 46 ++++++--------------------------- orchestrator/src/client.rs | 10 ++----- orchestrator/src/config.rs | 2 +- orchestrator/src/lib.rs | 10 +++---- 5 files changed, 17 insertions(+), 62 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 345010891e..7f2bf3ffa3 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -34,20 +34,15 @@ pub struct OrchestratorArgs { /// Reads a network configuration from a given filepath pub fn load_config_from_file( config_file: String, -) -> NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, -> { +) -> NetworkConfig { let config_file_as_string: String = fs::read_to_string(config_file.as_str()) .unwrap_or_else(|_| panic!("Could not read config file located at {config_file}")); let config_toml: NetworkConfigFile = toml::from_str::(&config_file_as_string) .expect("Unable to convert config file to TOML"); - let mut config: NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - > = config_toml.into(); + let mut config: NetworkConfig = + config_toml.into(); // Generate network's public keys let mut known_nodes_sk = Vec::new(); diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 0842f695eb..bb5c544a80 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -183,10 +183,7 @@ pub trait RunDA< { /// Initializes networking, returns self async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >, + config: NetworkConfig, ) -> Self; /// Initializes the genesis state and HotShot instance; does not start HotShot consensus @@ -372,12 +369,7 @@ pub trait RunDA< fn get_view_sync_network(&self) -> VIEWSYNCNETWORK; /// Returns the config for this run - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >; + fn get_config(&self) -> NetworkConfig; } // WEB SERVER @@ -388,10 +380,7 @@ pub struct WebServerDARun< I: NodeImplementation, MEMBERSHIP: Membership, > { - config: NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >, + config: NetworkConfig, quorum_network: WebCommChannel, da_network: WebCommChannel, view_sync_network: WebCommChannel, @@ -448,10 +437,7 @@ where Self: Sync, { async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >, + config: NetworkConfig, ) -> WebServerDARun { // Generate our own key let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); @@ -515,12 +501,7 @@ where self.view_sync_network.clone() } - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - > { + fn get_config(&self) -> NetworkConfig { self.config.clone() } } @@ -530,10 +511,7 @@ where /// Represents a libp2p-based run pub struct Libp2pDARun, MEMBERSHIP: Membership> { - config: NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >, + config: NetworkConfig, quorum_network: Libp2pCommChannel, da_network: Libp2pCommChannel, view_sync_network: Libp2pCommChannel, @@ -590,10 +568,7 @@ where Self: Sync, { async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >, + config: NetworkConfig, ) -> Libp2pDARun { let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let pubkey = <::SignatureKey as SignatureKey>::get_public_key( @@ -747,12 +722,7 @@ where self.view_sync_network.clone() } - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - > { + fn get_config(&self) -> NetworkConfig { self.config.clone() } } diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index d615312c90..d612479fb4 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -65,17 +65,11 @@ impl OrchestratorClient { pub async fn get_config_from_orchestrator( &self, node_index: u16, - ) -> NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - > { + ) -> NetworkConfig { let f = |client: Client| { async move { let config: Result< - NetworkConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >, + NetworkConfig, ClientError, > = client .post(&format!("api/config/{node_index}")) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 2ecb92dc8d..a3d15e350f 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,4 +1,4 @@ -use hotshot_types::{ExecutionType, HotShotConfig, traits::signature_key::SignatureKey}; +use hotshot_types::{traits::signature_key::SignatureKey, ExecutionType, HotShotConfig}; use std::{ marker::PhantomData, net::{IpAddr, Ipv4Addr, SocketAddr}, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 3c11bacc63..0c90fdf2d8 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -53,9 +53,7 @@ struct OrchestratorState { impl OrchestratorState { - pub fn new( - network_config: NetworkConfig, - ) -> Self { + pub fn new(network_config: NetworkConfig) -> Self { let mut web_client = None; if network_config.web_server_config.is_some() { let base_url = "http://0.0.0.0/9000".to_string().parse().unwrap(); @@ -82,8 +80,7 @@ pub trait OrchestratorApi { fn post_run_results(&mut self) -> Result<(), ServerError>; } -impl OrchestratorApi - for OrchestratorState +impl OrchestratorApi for OrchestratorState where KEY: serde::Serialize + Clone + SignatureKey, ELECTION: serde::Serialize + Clone + Send, @@ -148,8 +145,7 @@ where fn post_getconfig( &mut self, _node_index: u16, - ) -> Result, ServerError> - { + ) -> Result, ServerError> { if self.config.libp2p_config.is_some() { let libp2p_config = self.config.clone().libp2p_config.unwrap(); if libp2p_config.bootstrap_nodes.len() < libp2p_config.num_bootstrap_nodes { From ea330af9b24d89d65e712188e1c36de2ab66d82b Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 13 Oct 2023 11:54:51 -0400 Subject: [PATCH 0241/1393] Expect no failures by default, tighten up default timing --- testing/src/overall_safety_task.rs | 2 +- testing/src/test_builder.rs | 19 ++++++++++--------- testing/tests/basic.rs | 17 +++++++++++------ 3 files changed, 22 insertions(+), 16 deletions(-) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 4bda48adcc..26b21130ee 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -367,7 +367,7 @@ impl Default for OverallSafetyPropertiesDescription { check_leaf: false, check_state: true, check_block: true, - num_failed_views: 10, + num_failed_views: 0, transaction_threshold: 0, // very strict threshold_calculator: Arc::new(|_num_live, num_total| 2 * num_total / 3 + 1), diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index f1db2a9320..a56c9330fc 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -66,12 +66,12 @@ pub struct TestMetadata { impl Default for TimingData { fn default() -> Self { Self { - next_view_timeout: 10000, + next_view_timeout: 1000, timeout_ratio: (11, 10), - round_start_delay: 1, - start_delay: 1, + round_start_delay: 100, + start_delay: 100, propose_min_round_time: Duration::new(0, 0), - propose_max_round_time: Duration::new(5, 0), + propose_max_round_time: Duration::from_millis(100), } } } @@ -125,7 +125,7 @@ impl TestMetadata { } /// Default setting with 20 nodes and 10 views of successful views. - pub fn default_more_nodes_less_success() -> TestMetadata { + pub fn default_more_nodes() -> TestMetadata { TestMetadata { total_nodes: 20, start_nodes: 20, @@ -139,12 +139,13 @@ impl TestMetadata { completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // Increase the duration to get the expected number of successful views. - duration: Duration::new(40, 0), + duration: Duration::new(140, 0), }, ), - overall_safety_properties: OverallSafetyPropertiesDescription { - num_successful_views: 10, - ..Default::default() + overall_safety_properties: Default::default(), + timing_data: TimingData { + next_view_timeout: 1000, + ..TimingData::default() }, ..TestMetadata::default() } diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index fc4b6017f7..116598c459 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -38,7 +38,7 @@ async fn test_with_failures_one() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes_less_success(); + let mut metadata = TestMetadata::default_more_nodes(); // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -50,8 +50,9 @@ async fn test_with_failures_one() { }]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(4, 0), dead_nodes)], + node_changes: vec![(Duration::new(1, 0), dead_nodes)], }; + metadata.overall_safety_properties.num_failed_views = 2; metadata .gen_launcher::() .launch() @@ -77,7 +78,7 @@ async fn test_with_failures_half_f() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes_less_success(); + let mut metadata = TestMetadata::default_more_nodes(); // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -99,8 +100,9 @@ async fn test_with_failures_half_f() { ]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(4, 0), dead_nodes)], + node_changes: vec![(Duration::new(1, 0), dead_nodes)], }; + metadata.overall_safety_properties.num_failed_views = 6; metadata .gen_launcher::() .launch() @@ -126,7 +128,10 @@ async fn test_with_failures_f() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes_less_success(); + let mut metadata = TestMetadata::default_more_nodes(); + metadata.overall_safety_properties.num_failed_views = 6; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 22; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -160,7 +165,7 @@ async fn test_with_failures_f() { ]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(4, 0), dead_nodes)], + node_changes: vec![(Duration::new(1, 0), dead_nodes)], }; metadata .gen_launcher::() From 195de91dd99fce9f38a7a5ec2513857c11e8157b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 11:30:23 -0700 Subject: [PATCH 0242/1393] Bump libp2p-noise from 0.43.1 to 0.43.2 (#1926) Bumps [libp2p-noise](https://github.com/libp2p/rust-libp2p) from 0.43.1 to 0.43.2. - [Release notes](https://github.com/libp2p/rust-libp2p/releases) - [Changelog](https://github.com/libp2p/rust-libp2p/blob/master/CHANGELOG.md) - [Commits](https://github.com/libp2p/rust-libp2p/compare/libp2p-noise-v0.43.1...libp2p-noise-0.43.2) --- updated-dependencies: - dependency-name: libp2p-noise dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- libp2p-networking/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index dfdb02020e..f955ab1cca 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -38,7 +38,7 @@ hotshot-constants = { path = "../constants" } hotshot-utils = { path = "../utils" } libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } -libp2p-noise = { version = "0.43.0", default-features = false } +libp2p-noise = { version = "0.43.2", default-features = false } parking_lot = "0.12.1" rand = { workspace = true } serde = { workspace = true } From de344d919671c7b5cdb228761fb36ee30fba2a4e Mon Sep 17 00:00:00 2001 From: MRain Date: Tue, 17 Oct 2023 15:59:42 -0700 Subject: [PATCH 0243/1393] a vector based stake table --- hotshot-stake-table/src/lib.rs | 2 + hotshot-stake-table/src/mt_based/config.rs | 24 +- hotshot-stake-table/src/mt_based/internal.rs | 43 +-- hotshot-stake-table/src/utils.rs | 20 ++ hotshot-stake-table/src/vec_based.rs | 309 +++++++++++++++++++ 5 files changed, 361 insertions(+), 37 deletions(-) create mode 100644 hotshot-stake-table/src/utils.rs create mode 100644 hotshot-stake-table/src/vec_based.rs diff --git a/hotshot-stake-table/src/lib.rs b/hotshot-stake-table/src/lib.rs index 6ba4d305fb..fecb3a06d9 100644 --- a/hotshot-stake-table/src/lib.rs +++ b/hotshot-stake-table/src/lib.rs @@ -3,3 +3,5 @@ #![deny(missing_docs)] pub mod mt_based; +pub mod utils; +pub mod vec_based; diff --git a/hotshot-stake-table/src/mt_based/config.rs b/hotshot-stake-table/src/mt_based/config.rs index eacc80a6d5..9b2b455b84 100644 --- a/hotshot-stake-table/src/mt_based/config.rs +++ b/hotshot-stake-table/src/mt_based/config.rs @@ -1,8 +1,8 @@ //! Config file for stake table +use crate::utils::ToFields; use ark_ff::PrimeField; use ark_std::vec; -use ethereum_types::U256; -use jf_primitives::crhf::FixedLengthRescueCRHF; +use jf_primitives::{crhf::FixedLengthRescueCRHF, signatures::bls_over_bn254}; /// Branch of merkle tree. /// Set to 3 because we are currently using RATE-3 rescue hash function @@ -13,9 +13,19 @@ pub(crate) type FieldType = ark_bn254::Fq; /// Hash algorithm used in Merkle tree, using a RATE-3 rescue pub(crate) type Digest = FixedLengthRescueCRHF; -/// convert a U256 to a field element. -pub(crate) fn u256_to_field(v: &U256) -> F { - let mut bytes = vec![0u8; 32]; - v.to_little_endian(&mut bytes); - F::from_le_bytes_mod_order(&bytes) +impl ToFields for FieldType { + const SIZE: usize = 1; + fn to_fields(&self) -> Vec { + vec![FieldType::default()] + } +} + +impl ToFields for bls_over_bn254::VerKey { + const SIZE: usize = 2; + fn to_fields(&self) -> Vec { + let bytes = jf_utils::to_bytes!(&self.to_affine()).unwrap(); + let x = ::from_le_bytes_mod_order(&bytes[..32]); + let y = ::from_le_bytes_mod_order(&bytes[32..]); + vec![x, y] + } } diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index 4fe8e7b02d..3389ca35c0 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -1,19 +1,19 @@ //! Utilities and internals for maintaining a local stake table -use super::config::{u256_to_field, Digest, FieldType, TREE_BRANCH}; -use ark_ff::{Field, PrimeField}; +use super::config::{Digest, FieldType, TREE_BRANCH}; +use crate::utils::{u256_to_field, ToFields}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::{hash::Hash, sync::Arc, vec, vec::Vec}; use ethereum_types::U256; use hotshot_types::traits::stake_table::StakeTableError; -use jf_primitives::{crhf::CRHF, signatures::bls_over_bn254}; +use jf_primitives::crhf::CRHF; use jf_utils::canonical; use serde::{Deserialize, Serialize}; use tagged_base64::tagged; /// Common trait bounds for generic key type `K` for [`PersistentMerkleNode`] pub trait Key: - Clone + CanonicalSerialize + CanonicalDeserialize + PartialEq + Eq + IntoFields + Hash + Clone + CanonicalSerialize + CanonicalDeserialize + PartialEq + Eq + ToFields + Hash { } impl Key for T where @@ -22,32 +22,11 @@ impl Key for T where + CanonicalDeserialize + PartialEq + Eq - + IntoFields + + ToFields + Hash { } -/// A trait that converts into a field element. -/// Help avoid "cannot impl foreign traits on foreign types" problem -pub trait IntoFields { - fn into_fields(self) -> [F; 2]; -} - -impl IntoFields for FieldType { - fn into_fields(self) -> [FieldType; 2] { - [FieldType::default(), self] - } -} - -impl IntoFields for bls_over_bn254::VerKey { - fn into_fields(self) -> [FieldType; 2] { - let bytes = jf_utils::to_bytes!(&self.to_affine()).unwrap(); - let x = ::from_le_bytes_mod_order(&bytes[..32]); - let y = ::from_le_bytes_mod_order(&bytes[32..]); - [x, y] - } -} - /// A persistent merkle tree tailored for the stake table. /// Generic over the key type `K` #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] @@ -129,7 +108,8 @@ impl MerkleProof { match self.path.first() { Some(MerklePathEntry::Leaf { key, value }) => { let mut input = [FieldType::default(); 3]; - input[..2].copy_from_slice(&(*key).clone().into_fields()[..]); + input[..>::SIZE] + .copy_from_slice(&(*key).clone().to_fields()[..]); input[2] = u256_to_field(value); let init = Digest::evaluate(input).map_err(|_| StakeTableError::RescueError)?[0]; self.path @@ -344,7 +324,8 @@ impl PersistentMerkleNode { if height == 0 { if matches!(self, PersistentMerkleNode::Empty) { let mut input = [FieldType::default(); 3]; - input[..2].copy_from_slice(&(*key).clone().into_fields()[..]); + input[..>::SIZE] + .copy_from_slice(&(*key).clone().to_fields()[..]); input[2] = u256_to_field(&value); Ok(Arc::new(PersistentMerkleNode::Leaf { comm: Digest::evaluate(input).map_err(|_| StakeTableError::RescueError)?[0], @@ -439,7 +420,8 @@ impl PersistentMerkleNode { .ok_or(StakeTableError::StakeOverflow) }?; let mut input = [FieldType::default(); 3]; - input[..2].copy_from_slice(&(*key).clone().into_fields()[..]); + input[..>::SIZE] + .copy_from_slice(&(*key).clone().to_fields()[..]); input[2] = u256_to_field(&value); Ok(( Arc::new(PersistentMerkleNode::Leaf { @@ -506,7 +488,8 @@ impl PersistentMerkleNode { } => { if key == cur_key { let mut input = [FieldType::default(); 3]; - input[..2].copy_from_slice(&(*key).clone().into_fields()[..]); + input[..>::SIZE] + .copy_from_slice(&(*key).clone().to_fields()[..]); input[2] = u256_to_field(&value); Ok(( Arc::new(PersistentMerkleNode::Leaf { diff --git a/hotshot-stake-table/src/utils.rs b/hotshot-stake-table/src/utils.rs new file mode 100644 index 0000000000..e4382109fc --- /dev/null +++ b/hotshot-stake-table/src/utils.rs @@ -0,0 +1,20 @@ +//! Utilities to help building a stake table. + +use ark_ff::{Field, PrimeField}; +use ethereum_types::U256; + +/// A trait that converts into a field element. +pub trait ToFields { + /// The number of field elements needed to represent the given struct. + const SIZE: usize; + + /// Convert the given struct into a list of field elements. + fn to_fields(&self) -> Vec; +} + +/// convert a U256 to a field element. +pub(crate) fn u256_to_field(v: &U256) -> F { + let mut bytes = vec![0u8; 32]; + v.to_little_endian(&mut bytes); + F::from_le_bytes_mod_order(&bytes) +} diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs new file mode 100644 index 0000000000..e0d72664ef --- /dev/null +++ b/hotshot-stake-table/src/vec_based.rs @@ -0,0 +1,309 @@ +//! A vector based stake table implementation. The commitment is the rescue hash of the list of (key, amount) pairs; + +use ark_std::{collections::HashMap, hash::Hash, rand::SeedableRng}; +use digest::crypto_common::rand_core::CryptoRngCore; +use ethereum_types::{U256, U512}; +use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; +use jf_primitives::rescue::{sponge::RescueCRHF, RescueParameter}; +use serde::{Deserialize, Serialize}; + +use crate::utils::{u256_to_field, ToFields}; + +/// Locally maintained stake table, generic over public key type `K`. +/// Whose commitment is a rescue hash over field `F`. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct StakeTable, F: RescueParameter> { + /// The most up-to-date stake table, where the incoming transactions shall be performed on. + head: Vec<(K, U256)>, + /// The snapshot of stake table at the beginning of the current epoch + epoch_start: Vec<(K, U256)>, + /// The stake table used for leader election. + last_epoch_start: Vec<(K, U256)>, + + /// Total stakes for different versions + head_total_stake: U256, + epoch_start_total_stake: U256, + last_epoch_start_total_stake: U256, + + /// Commitment for finalized versions + epoch_start_comm: F, + last_epoch_start_comm: F, + + /// The mapping from public keys to their location in the Merkle tree. + #[serde(skip)] + mapping: HashMap, +} + +impl StakeTableScheme for StakeTable +where + K: Eq + Hash + Clone + ToFields, + F: RescueParameter, +{ + type Key = K; + type Amount = U256; + type Commitment = F; + type LookupProof = (); + type IntoIter = as ark_std::iter::IntoIterator>::IntoIter; + // type IntoIter = ark_std::slice::Iter<'a, &'a (K, U256)>; + + fn register( + &mut self, + new_key: Self::Key, + amount: Self::Amount, + ) -> Result<(), StakeTableError> { + match self.mapping.get(&new_key) { + Some(_) => Err(StakeTableError::ExistingKey), + None => { + let pos = self.mapping.len(); + self.head.push((new_key.clone(), amount)); + self.mapping.insert(new_key, pos); + Ok(()) + } + } + } + + fn deregister(&mut self, existing_key: &Self::Key) -> Result<(), StakeTableError> { + match self.mapping.get(existing_key) { + Some(pos) => { + self.head_total_stake -= self.head[*pos].1; + self.head[*pos].1 = U256::zero(); + Ok(()) + } + None => Err(StakeTableError::KeyNotFound), + } + } + + fn commitment(&self, version: SnapshotVersion) -> Result { + match version { + // IMPORTANT: we don't support committing the head version b/c it's not finalized. + SnapshotVersion::Head => Err(StakeTableError::SnapshotUnsupported), + SnapshotVersion::EpochStart => Ok(self.epoch_start_comm), + SnapshotVersion::LastEpochStart => Ok(self.last_epoch_start_comm), + SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), + } + } + + fn total_stake(&self, version: SnapshotVersion) -> Result { + match version { + SnapshotVersion::Head => Ok(self.head_total_stake), + SnapshotVersion::EpochStart => Ok(self.epoch_start_total_stake), + SnapshotVersion::LastEpochStart => Ok(self.last_epoch_start_total_stake), + SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), + } + } + + fn len(&self, version: SnapshotVersion) -> Result { + match version { + SnapshotVersion::Head => Ok(self.head.len()), + SnapshotVersion::EpochStart => Ok(self.epoch_start.len()), + SnapshotVersion::LastEpochStart => Ok(self.last_epoch_start.len()), + SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), + } + } + + fn contains_key(&self, key: &Self::Key) -> bool { + self.mapping.contains_key(key) + } + + fn lookup( + &self, + version: SnapshotVersion, + key: &Self::Key, + ) -> Result<(Self::Amount, Self::LookupProof), StakeTableError> { + match self.mapping.get(key) { + Some(&pos) => match version { + SnapshotVersion::Head => { + if pos >= self.head.len() { + Err(StakeTableError::KeyNotFound) + } else { + Ok((self.head[pos].1, ())) + } + } + SnapshotVersion::EpochStart => { + if pos >= self.epoch_start.len() { + Err(StakeTableError::KeyNotFound) + } else { + Ok((self.epoch_start[pos].1, ())) + } + } + SnapshotVersion::LastEpochStart => { + if pos >= self.last_epoch_start.len() { + Err(StakeTableError::KeyNotFound) + } else { + Ok((self.last_epoch_start[pos].1, ())) + } + } + SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), + }, + None => Err(StakeTableError::KeyNotFound), + } + } + + fn simple_lookup( + &self, + version: SnapshotVersion, + key: &Self::Key, + ) -> Result { + match self.mapping.get(key) { + Some(&pos) => match version { + SnapshotVersion::Head => { + if pos >= self.head.len() { + Err(StakeTableError::KeyNotFound) + } else { + Ok(self.head[pos].1) + } + } + SnapshotVersion::EpochStart => { + if pos >= self.epoch_start.len() { + Err(StakeTableError::KeyNotFound) + } else { + Ok(self.epoch_start[pos].1) + } + } + SnapshotVersion::LastEpochStart => { + if pos >= self.last_epoch_start.len() { + Err(StakeTableError::KeyNotFound) + } else { + Ok(self.last_epoch_start[pos].1) + } + } + SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), + }, + None => Err(StakeTableError::KeyNotFound), + } + } + + fn update( + &mut self, + key: &Self::Key, + delta: Self::Amount, + negative: bool, + ) -> Result { + match self.mapping.get(key) { + Some(&pos) => { + let old_amount = self.head[pos].1; + if negative { + if delta > self.head[pos].1 { + return Err(StakeTableError::InsufficientFund); + } + self.head_total_stake -= delta; + self.head[pos].1 -= delta; + } + self.head_total_stake += delta; + self.head[pos].1 += delta; + Ok(old_amount) + } + None => Err(StakeTableError::KeyNotFound), + } + } + + fn sample( + &self, + rng: &mut (impl SeedableRng + CryptoRngCore), + ) -> Option<(&Self::Key, &Self::Amount)> { + let mut bytes = [0u8; 64]; + rng.fill_bytes(&mut bytes); + let r = U512::from_big_endian(&bytes); + let m = U512::from(self.last_epoch_start_total_stake); + let mut pos: U256 = (r % m).try_into().unwrap(); // won't fail + let idx = 0; + while pos > self.last_epoch_start[idx].1 { + pos -= self.last_epoch_start[idx].1; + } + Some((&self.last_epoch_start[idx].0, &self.last_epoch_start[idx].1)) + } + + fn try_iter(&self, version: SnapshotVersion) -> Result { + match version { + SnapshotVersion::Head => Ok(self.head.clone().into_iter()), + SnapshotVersion::EpochStart => Ok(self.epoch_start.clone().into_iter()), + SnapshotVersion::LastEpochStart => Ok(self.last_epoch_start.clone().into_iter()), + SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), + } + } +} + +impl StakeTable +where + K: Eq + Hash + Clone + ToFields, + F: RescueParameter, +{ + /// Initiating an empty stake table. + /// Overall capacity is `TREE_BRANCH.pow(height)`. + pub fn new() -> Self { + let comm = RescueCRHF::sponge_with_zero_padding(&[], 1)[0]; + Self { + head: vec![], + epoch_start: vec![], + last_epoch_start: vec![], + head_total_stake: U256::zero(), + epoch_start_total_stake: U256::zero(), + last_epoch_start_total_stake: U256::zero(), + mapping: HashMap::new(), + epoch_start_comm: comm, + last_epoch_start_comm: comm, + } + } + + /// Update the stake table when the epoch number advances, should be manually called. + pub fn advance(&mut self) { + // Could we avoid this `clone()`? + self.last_epoch_start = self.epoch_start.clone(); + self.last_epoch_start_total_stake = self.epoch_start_total_stake; + self.last_epoch_start_comm = self.epoch_start_comm; + self.epoch_start = self.head.clone(); + self.epoch_start_total_stake = self.head_total_stake; + self.epoch_start_comm = self.compute_head_comm(); + } + + /// Set the stake withheld by `key` to be `value`. + /// Return the previous stake if succeed. + pub fn set_value(&mut self, key: &K, value: U256) -> Result { + match self.mapping.get(key) { + Some(pos) => { + let old_value = self.head[*pos].1; + self.head[*pos].1 = value; + Ok(old_value) + } + None => Err(StakeTableError::KeyNotFound), + } + } + + /// Helper function to recompute the stake table commitment for head version + fn compute_head_comm(&mut self) -> F { + if self.head.is_empty() { + return RescueCRHF::sponge_with_zero_padding(&[], 1)[0]; + } + let mut to_be_hashed = vec![]; + self.head.iter().for_each(|(key, amount)| { + to_be_hashed.extend(key.to_fields()); + to_be_hashed.push(u256_to_field(amount)); + }); + let mut comm = to_be_hashed[0]; + for i in (1..self.head.len()).step_by(2) { + comm = RescueCRHF::sponge_with_zero_padding( + &[ + comm, + to_be_hashed[i], + if i + 1 < to_be_hashed.len() { + to_be_hashed[i + 1] + } else { + F::zero() + }, + ], + 1, + )[0]; + } + comm + } +} + +impl Default for StakeTable +where + K: Eq + Hash + Clone + ToFields, + F: RescueParameter, +{ + fn default() -> Self { + Self::new() + } +} From 6165277de703e892c95a6898f5da365122480c78 Mon Sep 17 00:00:00 2001 From: MRain Date: Thu, 19 Oct 2023 10:06:28 -0700 Subject: [PATCH 0244/1393] test & interface fixes --- hotshot-qc/src/bit_vector.rs | 6 +- hotshot-stake-table/Cargo.toml | 1 + hotshot-stake-table/src/mt_based.rs | 18 +-- hotshot-stake-table/src/vec_based.rs | 160 +++++++++++++++++++++++++-- types/src/traits/stake_table.rs | 9 +- 5 files changed, 172 insertions(+), 22 deletions(-) diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index df05b64de4..b348480d06 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -219,9 +219,9 @@ mod tests { let key_pair3 = KeyPair::generate(&mut rng); let mut st = ST::new(3); - st.register(key_pair1.ver_key(), U256::from(3u8)).unwrap(); - st.register(key_pair2.ver_key(), U256::from(5u8)).unwrap(); - st.register(key_pair3.ver_key(), U256::from(7u8)).unwrap(); + st.register(&key_pair1.ver_key(), U256::from(3u8)).unwrap(); + st.register(&key_pair2.ver_key(), U256::from(5u8)).unwrap(); + st.register(&key_pair3.ver_key(), U256::from(7u8)).unwrap(); st.advance(); st.advance(); diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 4059589a85..40961c1498 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -26,6 +26,7 @@ tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag typenum = { workspace = true } [dev-dependencies] +ark-ed-on-bn254 = "0.4.0" rand_chacha = { workspace = true } [features] diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index 643ccc8302..74ff77dcf6 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -39,20 +39,20 @@ impl StakeTableScheme for StakeTable { fn register( &mut self, - new_key: Self::Key, + new_key: &Self::Key, amount: Self::Amount, ) -> Result<(), StakeTableError> { - match self.mapping.get(&new_key) { + match self.mapping.get(new_key) { Some(_) => Err(StakeTableError::ExistingKey), None => { let pos = self.mapping.len(); self.head = self.head.register( self.height, &to_merkle_path(pos, self.height), - &new_key, + new_key, amount, )?; - self.mapping.insert(new_key, pos); + self.mapping.insert(new_key.clone(), pos); Ok(()) } } @@ -224,14 +224,14 @@ mod tests { #[test] fn test_stake_table() -> Result<(), StakeTableError> { - let mut st = StakeTable::new(3); + let mut st = StakeTable::::new(3); let keys = (0..10).map(Key::from).collect::>(); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(0)); // Registering keys keys.iter() .take(4) - .for_each(|&key| st.register(key, U256::from(100)).unwrap()); + .for_each(|key| st.register(key, U256::from(100)).unwrap()); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(400)); assert_eq!(st.total_stake(SnapshotVersion::EpochStart)?, U256::from(0)); assert_eq!( @@ -247,7 +247,7 @@ mod tests { keys.iter() .skip(4) .take(3) - .for_each(|&key| st.register(key, U256::from(100)).unwrap()); + .for_each(|key| st.register(key, U256::from(100)).unwrap()); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(600)); assert_eq!( st.total_stake(SnapshotVersion::EpochStart)?, @@ -260,7 +260,7 @@ mod tests { st.advance(); keys.iter() .skip(7) - .for_each(|&key| st.register(key, U256::from(100)).unwrap()); + .for_each(|key| st.register(key, U256::from(100)).unwrap()); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(900)); assert_eq!( st.total_stake(SnapshotVersion::EpochStart)?, @@ -272,7 +272,7 @@ mod tests { ); // No duplicate register - assert!(st.register(keys[0], U256::from(100)).is_err()); + assert!(st.register(&keys[0], U256::from(100)).is_err()); // The 9-th key is still in head stake table assert!(st.lookup(SnapshotVersion::EpochStart, &keys[9]).is_err()); assert!(st.lookup(SnapshotVersion::EpochStart, &keys[5]).is_ok()); diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index e0d72664ef..8a04f842fc 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -43,20 +43,22 @@ where type Amount = U256; type Commitment = F; type LookupProof = (); + // TODO(Chengyu): Can we make it references? type IntoIter = as ark_std::iter::IntoIterator>::IntoIter; // type IntoIter = ark_std::slice::Iter<'a, &'a (K, U256)>; fn register( &mut self, - new_key: Self::Key, + new_key: &Self::Key, amount: Self::Amount, ) -> Result<(), StakeTableError> { - match self.mapping.get(&new_key) { + match self.mapping.get(new_key) { Some(_) => Err(StakeTableError::ExistingKey), None => { let pos = self.mapping.len(); self.head.push((new_key.clone(), amount)); - self.mapping.insert(new_key, pos); + self.head_total_stake += amount; + self.mapping.insert(new_key.clone(), pos); Ok(()) } } @@ -181,17 +183,17 @@ where ) -> Result { match self.mapping.get(key) { Some(&pos) => { - let old_amount = self.head[pos].1; if negative { if delta > self.head[pos].1 { return Err(StakeTableError::InsufficientFund); } self.head_total_stake -= delta; self.head[pos].1 -= delta; + } else { + self.head_total_stake += delta; + self.head[pos].1 += delta; } - self.head_total_stake += delta; - self.head[pos].1 += delta; - Ok(old_amount) + Ok(self.head[pos].1) } None => Err(StakeTableError::KeyNotFound), } @@ -263,6 +265,8 @@ where Some(pos) => { let old_value = self.head[*pos].1; self.head[*pos].1 = value; + self.head_total_stake -= old_value; + self.head_total_stake += value; Ok(old_value) } None => Err(StakeTableError::KeyNotFound), @@ -307,3 +311,145 @@ where Self::new() } } + +#[cfg(test)] +mod tests { + use crate::utils::ToFields; + + use super::StakeTable; + use ark_std::{rand::SeedableRng, vec::Vec}; + use ethereum_types::U256; + use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; + use jf_primitives::signatures::bls_over_bn254::{ + BLSOverBN254CurveSignatureScheme, VerKey as BLSVerKey, + }; + use jf_primitives::signatures::schnorr::VerKey as SchnorrVerKey; + use jf_primitives::signatures::{SchnorrSignatureScheme, SignatureScheme}; + use jf_utils::bytes_to_field_elements; + + // KeyType is a pair of BLS verfication key and Schnorr verification key + type Key = (BLSVerKey, SchnorrVerKey); + type F = ark_bn254::Fr; + + impl ToFields for Key { + const SIZE: usize = 6; + + fn to_fields(&self) -> Vec { + let bytes = jf_utils::to_bytes!(&self.0.to_affine()).unwrap(); // won't fail + let mut v = bytes_to_field_elements(bytes); + let p = self.1.to_affine(); + v.push(p.x); + v.push(p.y); + v + } + } + + #[test] + fn test_stake_table() -> Result<(), StakeTableError> { + let mut st = StakeTable::::new(); + let mut prng = jf_utils::test_rng(); + let keys = (0..10) + .map(|_| { + ( + BLSOverBN254CurveSignatureScheme::key_gen(&(), &mut prng) + .unwrap() + .1, + SchnorrSignatureScheme::key_gen(&(), &mut prng).unwrap().1, + ) + }) + .collect::>(); + assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(0)); + + // Registering keys + keys.iter() + .take(4) + .for_each(|key| st.register(key, U256::from(100)).unwrap()); + assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(400)); + assert_eq!(st.total_stake(SnapshotVersion::EpochStart)?, U256::from(0)); + assert_eq!( + st.total_stake(SnapshotVersion::LastEpochStart)?, + U256::from(0) + ); + // set to zero for futher sampling test + assert_eq!( + st.set_value(&keys[1], U256::from(0)).unwrap(), + U256::from(100) + ); + st.advance(); + keys.iter() + .skip(4) + .take(3) + .for_each(|key| st.register(key, U256::from(100)).unwrap()); + assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(600)); + assert_eq!( + st.total_stake(SnapshotVersion::EpochStart)?, + U256::from(300) + ); + assert_eq!( + st.total_stake(SnapshotVersion::LastEpochStart)?, + U256::from(0) + ); + st.advance(); + keys.iter() + .skip(7) + .for_each(|key| st.register(key, U256::from(100)).unwrap()); + assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(900)); + assert_eq!( + st.total_stake(SnapshotVersion::EpochStart)?, + U256::from(600) + ); + assert_eq!( + st.total_stake(SnapshotVersion::LastEpochStart)?, + U256::from(300) + ); + + // No duplicate register + assert!(st.register(&keys[0], U256::from(100)).is_err()); + // The 9-th key is still in head stake table + assert!(st.lookup(SnapshotVersion::EpochStart, &keys[9]).is_err()); + assert!(st.lookup(SnapshotVersion::EpochStart, &keys[5]).is_ok()); + // The 6-th key is still frozen + assert!(st + .lookup(SnapshotVersion::LastEpochStart, &keys[6]) + .is_err()); + assert!(st.lookup(SnapshotVersion::LastEpochStart, &keys[2]).is_ok()); + + // Set value shall return the old value + assert_eq!( + st.set_value(&keys[0], U256::from(101)).unwrap(), + U256::from(100) + ); + assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(901)); + assert_eq!( + st.total_stake(SnapshotVersion::EpochStart)?, + U256::from(600) + ); + + // Update that results in a negative stake + assert!(st.update(&keys[0], U256::from(1000), true).is_err()); + // Update should return the updated stake + assert_eq!( + st.update(&keys[0], U256::from(1), true).unwrap(), + U256::from(100) + ); + assert_eq!( + st.update(&keys[0], U256::from(100), false).unwrap(), + U256::from(200) + ); + + // Commitment test + assert!(st.commitment(SnapshotVersion::Head).is_err()); + assert!(st.commitment(SnapshotVersion::EpochStart).is_ok()); + assert!(st.commitment(SnapshotVersion::LastEpochStart).is_ok()); + + // Random test for sampling keys + let mut rng = rand_chacha::ChaCha20Rng::seed_from_u64(41u64); + for _ in 0..100 { + let (_key, value) = st.sample(&mut rng).unwrap(); + // Sampled keys should have positive stake + assert!(value > &U256::from(0)); + } + + Ok(()) + } +} diff --git a/types/src/traits/stake_table.rs b/types/src/traits/stake_table.rs index 49bcfd150a..cb085f2a50 100644 --- a/types/src/traits/stake_table.rs +++ b/types/src/traits/stake_table.rs @@ -36,8 +36,11 @@ pub trait StakeTableScheme { /// # Errors /// /// Return err if key is already registered. - fn register(&mut self, new_key: Self::Key, amount: Self::Amount) - -> Result<(), StakeTableError>; + fn register( + &mut self, + new_key: &Self::Key, + amount: Self::Amount, + ) -> Result<(), StakeTableError>; /// Batch register a list of new keys. A default implementation is provided /// w/o batch optimization. @@ -53,7 +56,7 @@ pub trait StakeTableScheme { let _ = new_keys .into_iter() .zip(amounts) - .try_for_each(|(key, amount)| Self::register(self, key, amount)); + .try_for_each(|(key, amount)| Self::register(self, &key, amount)); Ok(()) } From eb0d5654515bbe73e11d8161a3da2ec8f5b4ce35 Mon Sep 17 00:00:00 2001 From: MRain Date: Thu, 19 Oct 2023 11:18:02 -0700 Subject: [PATCH 0245/1393] update commitment hash --- hotshot-stake-table/src/vec_based.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 8a04f842fc..c0371a558b 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -325,22 +325,18 @@ mod tests { }; use jf_primitives::signatures::schnorr::VerKey as SchnorrVerKey; use jf_primitives::signatures::{SchnorrSignatureScheme, SignatureScheme}; - use jf_utils::bytes_to_field_elements; // KeyType is a pair of BLS verfication key and Schnorr verification key type Key = (BLSVerKey, SchnorrVerKey); type F = ark_bn254::Fr; impl ToFields for Key { - const SIZE: usize = 6; + const SIZE: usize = 2; fn to_fields(&self) -> Vec { - let bytes = jf_utils::to_bytes!(&self.0.to_affine()).unwrap(); // won't fail - let mut v = bytes_to_field_elements(bytes); + // For light client contract, we only have to hash the Schnorr key let p = self.1.to_affine(); - v.push(p.x); - v.push(p.y); - v + vec![p.x, p.y] } } From fc3118b0562fbdb367ebab63dd95214af8e2ed62 Mon Sep 17 00:00:00 2001 From: MRain Date: Thu, 19 Oct 2023 12:10:07 -0700 Subject: [PATCH 0246/1393] config for vec_stake_table; Better documentation --- hotshot-stake-table/Cargo.toml | 2 +- hotshot-stake-table/src/vec_based.rs | 29 +++++---------------- hotshot-stake-table/src/vec_based/config.rs | 22 ++++++++++++++++ 3 files changed, 30 insertions(+), 23 deletions(-) create mode 100644 hotshot-stake-table/src/vec_based/config.rs diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 40961c1498..97756ac736 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -8,6 +8,7 @@ rust-version = { workspace = true } [dependencies] ark-bn254 = "0.4.0" +ark-ed-on-bn254 = "0.4.0" ark-ff = "0.4.0" ark-serialize = { workspace = true } ark-std = { workspace = true } @@ -26,7 +27,6 @@ tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag typenum = { workspace = true } [dev-dependencies] -ark-ed-on-bn254 = "0.4.0" rand_chacha = { workspace = true } [features] diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index c0371a558b..f3cf51b577 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -1,5 +1,6 @@ //! A vector based stake table implementation. The commitment is the rescue hash of the list of (key, amount) pairs; +use crate::utils::{u256_to_field, ToFields}; use ark_std::{collections::HashMap, hash::Hash, rand::SeedableRng}; use digest::crypto_common::rand_core::CryptoRngCore; use ethereum_types::{U256, U512}; @@ -7,10 +8,12 @@ use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, Stake use jf_primitives::rescue::{sponge::RescueCRHF, RescueParameter}; use serde::{Deserialize, Serialize}; -use crate::utils::{u256_to_field, ToFields}; +pub mod config; /// Locally maintained stake table, generic over public key type `K`. -/// Whose commitment is a rescue hash over field `F`. +/// Whose commitment is a rescue hash of all key-value pairs over field `F`. +/// NOTE: the commitment is only available for the finalized versions, and is +/// computed only once when it's finalized. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct StakeTable, F: RescueParameter> { /// The most up-to-date stake table, where the incoming transactions shall be performed on. @@ -314,32 +317,14 @@ where #[cfg(test)] mod tests { - use crate::utils::ToFields; - + use super::config::{FieldType as F, KeyType as Key}; use super::StakeTable; use ark_std::{rand::SeedableRng, vec::Vec}; use ethereum_types::U256; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; - use jf_primitives::signatures::bls_over_bn254::{ - BLSOverBN254CurveSignatureScheme, VerKey as BLSVerKey, - }; - use jf_primitives::signatures::schnorr::VerKey as SchnorrVerKey; + use jf_primitives::signatures::bls_over_bn254::BLSOverBN254CurveSignatureScheme; use jf_primitives::signatures::{SchnorrSignatureScheme, SignatureScheme}; - // KeyType is a pair of BLS verfication key and Schnorr verification key - type Key = (BLSVerKey, SchnorrVerKey); - type F = ark_bn254::Fr; - - impl ToFields for Key { - const SIZE: usize = 2; - - fn to_fields(&self) -> Vec { - // For light client contract, we only have to hash the Schnorr key - let p = self.1.to_affine(); - vec![p.x, p.y] - } - } - #[test] fn test_stake_table() -> Result<(), StakeTableError> { let mut st = StakeTable::::new(); diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs new file mode 100644 index 0000000000..f5a449eeb3 --- /dev/null +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -0,0 +1,22 @@ +//! Config file for stake table +use crate::utils::ToFields; +use ark_std::vec; +use jf_primitives::signatures::bls_over_bn254::VerKey as BLSVerKey; +use jf_primitives::signatures::schnorr::VerKey as SchnorrVerKey; + +/// Key type +pub type KeyType = (BLSVerKey, SchnorrVerKey); +/// Type for commitment +pub type FieldType = ark_ed_on_bn254::Fq; + +/// Hashable representation of a key +/// NOTE: commitment is only used in light client contract. +/// For this application, we needs only hash the Schnorr verfication key. +impl ToFields for KeyType { + const SIZE: usize = 2; + + fn to_fields(&self) -> Vec { + let p = self.1.to_affine(); + vec![p.x, p.y] + } +} From db4efa99024431d1ac240201f454e990d97badb5 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Sat, 21 Oct 2023 07:35:26 -0700 Subject: [PATCH 0247/1393] fix lints (#1933) --- hotshot-signature-key/src/bn254/bn254_priv.rs | 2 +- hotshot-signature-key/src/bn254/bn254_pub.rs | 2 +- hotshot/src/traits/networking/memory_network.rs | 4 ++-- libp2p-networking/src/network/node/handle.rs | 16 ++++++++-------- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_priv.rs b/hotshot-signature-key/src/bn254/bn254_priv.rs index 5b16c7a93d..87d80c8dfe 100644 --- a/hotshot-signature-key/src/bn254/bn254_priv.rs +++ b/hotshot-signature-key/src/bn254/bn254_priv.rs @@ -54,7 +54,7 @@ impl BLSPrivKey { } } -// #[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] +#[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] impl PartialOrd for BLSPrivKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.priv_key.to_string(); diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 43fc3a6c43..410a0e12fb 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -27,7 +27,7 @@ pub struct BLSPubKey { pub_key: VerKey, } -// #[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] +#[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] impl PartialOrd for BLSPubKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.pub_key.to_string(); diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index c67d90147e..3ea28fe7e6 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -329,7 +329,7 @@ impl ConnectedNetwork for Memory } else { let res = node.broadcast_input(vec.clone()).await; match res { - Ok(_) => { + Ok(()) => { self.inner.metrics.outgoing_broadcast_message_count.add(1); trace!(?key, "Delivered message to remote"); } @@ -373,7 +373,7 @@ impl ConnectedNetwork for Memory } else { let res = node.direct_input(vec).await; match res { - Ok(_) => { + Ok(()) => { self.inner.metrics.outgoing_direct_message_count.add(1); trace!(?recipient, "Delivered message to remote"); Ok(()) diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index fe68d679ce..39c763ac31 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -139,7 +139,7 @@ impl NetworkNodeHandle { // // Tokio and async_std disagree how this function should be linted // #[allow(clippy::ignored_unit_patterns)] - pub async fn spawn_handler(self: &Arc, cb: F) -> impl Future + pub async fn spawn_handler(self: &Arc, cb: F) -> impl Future where F: Fn(NetworkEvent, Arc>) -> RET + Sync + Send + 'static, RET: Future> + Send + 'static, @@ -153,12 +153,12 @@ impl NetworkNodeHandle { let handle = Arc::clone(self); async_spawn(async move { let receiver = handle.receiver.receiver.lock().await; - let Some(kill_switch) = handle.receiver.recv_kill.lock().await.take() else { - tracing::error!( - "`spawn_handle` was called on a network handle that was already closed" - ); - return; - }; + let Some(kill_switch) = handle.receiver.recv_kill.lock().await.take() else { + tracing::error!( + "`spawn_handle` was called on a network handle that was already closed" + ); + return; + }; let mut next_msg = receiver.recv().boxed(); let mut kill_switch = kill_switch.recv().boxed(); loop { @@ -191,7 +191,7 @@ impl NetworkNodeHandle { } } } - }).map(|_| ()) + }) } /// Wait until at least `num_peers` have connected, or until `timeout` time has passed. From d0de51d2e4d89eddce822149bc350191f501e25f Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 23 Oct 2023 12:52:41 -0700 Subject: [PATCH 0248/1393] VID task and exchange (#1903) * separate out vid task * almost done :) * fix vid test * remove polling from vid * lint * fix merge conflicts * Update crates/testing/tests/vid_task.rs Co-authored-by: Gus Gutoski * Update crates/task-impls/src/vid.rs Co-authored-by: Gus Gutoski * Update crates/task-impls/src/vid.rs Co-authored-by: Gus Gutoski * pr comments * clippy Signed-off-by: Rob * separate out VID certificate * add function for signing VID proposal --------- Signed-off-by: Rob Co-authored-by: Gus Gutoski --- hotshot/examples/infra/modDA.rs | 54 ++- hotshot/examples/libp2p/multi-validator.rs | 2 + hotshot/examples/libp2p/orchestrator.rs | 3 +- hotshot/examples/libp2p/types.rs | 4 +- hotshot/examples/libp2p/validator.rs | 2 + .../examples/web-server-da/multi-validator.rs | 2 + .../examples/web-server-da/orchestrator.rs | 3 +- hotshot/examples/web-server-da/types.rs | 4 +- hotshot/examples/web-server-da/validator.rs | 2 + hotshot/src/lib.rs | 38 +- hotshot/src/tasks/mod.rs | 77 ++- task-impls/src/consensus.rs | 17 +- task-impls/src/da.rs | 204 +------- task-impls/src/events.rs | 12 +- task-impls/src/lib.rs | 3 + task-impls/src/network.rs | 14 + task-impls/src/vid.rs | 437 ++++++++++++++++++ testing/src/node_types.rs | 92 +++- testing/src/test_builder.rs | 18 +- testing/src/test_launcher.rs | 5 + testing/tests/da_task.rs | 30 +- testing/tests/memory_network.rs | 6 +- testing/tests/vid_task.rs | 111 +++++ types/src/certificate.rs | 60 ++- types/src/data.rs | 4 + types/src/message.rs | 18 +- types/src/traits/election.rs | 164 ++++++- types/src/traits/node_implementation.rs | 67 ++- types/src/vote.rs | 133 ++++++ 29 files changed, 1275 insertions(+), 311 deletions(-) create mode 100644 task-impls/src/vid.rs create mode 100644 testing/tests/vid_task.rs diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index a06f023159..5e50c1d44f 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -21,6 +21,7 @@ use hotshot_orchestrator::{ config::{NetworkConfig, WebServerConfig}, }; use hotshot_task::task::FilterEvent; +use hotshot_types::traits::election::VIDExchange; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, @@ -85,6 +86,7 @@ pub async fn run_orchestrator_da< DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -107,6 +109,7 @@ pub async fn run_orchestrator_da< VIEWSYNCNETWORK, Message, >, + VIDExchange>, >, Storage = MemoryStorage>, ConsensusMessage = SequencingMessage, @@ -148,6 +151,7 @@ pub trait RunDA< DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -170,6 +174,7 @@ pub trait RunDA< VIEWSYNCNETWORK, Message, >, + VIDExchange>, >, Storage = MemoryStorage>, ConsensusMessage = SequencingMessage, @@ -213,6 +218,7 @@ pub trait RunDA< let da_network = self.get_da_network(); let quorum_network = self.get_quorum_network(); let view_sync_network = self.get_view_sync_network(); + let vid_network = self.get_vid_network(); // Since we do not currently pass the election config type in the NetworkConfig, this will always be the default election config let quorum_election_config = config.config.election_config.clone().unwrap_or_else(|| { @@ -236,6 +242,7 @@ pub trait RunDA< quorum_network.clone(), da_network.clone(), view_sync_network.clone(), + vid_network.clone(), ), pk.clone(), entry.clone(), @@ -367,6 +374,9 @@ pub trait RunDA< ///Returns view sync network for this run fn get_view_sync_network(&self) -> VIEWSYNCNETWORK; + ///Returns VID network for this run + fn get_vid_network(&self) -> VIDNETWORK; + /// Returns the config for this run fn get_config( &self, @@ -393,6 +403,7 @@ pub struct WebServerDARun< quorum_network: WebCommChannel, da_network: WebCommChannel, view_sync_network: WebCommChannel, + vid_network: WebCommChannel, } #[async_trait] @@ -426,6 +437,12 @@ impl< WebCommChannel, Message, >, + VIDExchange< + TYPES, + MEMBERSHIP, + WebCommChannel, + Message, + >, >, Storage = MemoryStorage>, ConsensusMessage = SequencingMessage, @@ -437,6 +454,7 @@ impl< WebCommChannel, WebCommChannel, WebCommChannel, + WebCommChannel, NODE, > for WebServerDARun where @@ -489,6 +507,17 @@ where // Each node runs the DA network so that leaders have access to transactions and DA votes let da_network: WebCommChannel = WebCommChannel::new( + WebServerNetwork::create( + &host.to_string(), + port, + wait_between_polls, + pub_key.clone(), + true, + ) + .into(), + ); + + let vid_network: WebCommChannel = WebCommChannel::new( WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true) .into(), ); @@ -498,6 +527,7 @@ where quorum_network, da_network, view_sync_network, + vid_network, } } @@ -513,6 +543,10 @@ where self.view_sync_network.clone() } + fn get_vid_network(&self) -> WebCommChannel { + self.vid_network.clone() + } + fn get_config( &self, ) -> NetworkConfig< @@ -537,6 +571,7 @@ pub struct Libp2pDARun, MEMBERSHIP quorum_network: Libp2pCommChannel, da_network: Libp2pCommChannel, view_sync_network: Libp2pCommChannel, + vid_network: Libp2pCommChannel, } #[async_trait] @@ -570,6 +605,12 @@ impl< Libp2pCommChannel, Message, >, + VIDExchange< + TYPES, + MEMBERSHIP, + Libp2pCommChannel, + Message, + >, >, Storage = MemoryStorage>, ConsensusMessage = SequencingMessage, @@ -581,6 +622,7 @@ impl< Libp2pCommChannel, Libp2pCommChannel, Libp2pCommChannel, + Libp2pCommChannel, NODE, > for Libp2pDARun where @@ -722,11 +764,15 @@ where let da_network: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + let vid_network: Libp2pCommChannel = + Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + Libp2pDARun { config, quorum_network, da_network, view_sync_network, + vid_network, } } @@ -742,6 +788,10 @@ where self.view_sync_network.clone() } + fn get_vid_network(&self) -> Libp2pCommChannel { + self.vid_network.clone() + } + fn get_config( &self, ) -> NetworkConfig< @@ -760,6 +810,7 @@ pub async fn main_entry_point< DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -782,11 +833,12 @@ pub async fn main_entry_point< VIEWSYNCNETWORK, Message, >, + VIDExchange>, >, Storage = MemoryStorage>, ConsensusMessage = SequencingMessage, >, - RUNDA: RunDA, + RUNDA: RunDA, >( args: ValidatorArgs, ) where diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index 3ed46fa979..b45c706304 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -7,6 +7,7 @@ use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use std::net::IpAddr; use tracing::instrument; +use types::VIDNetwork; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; @@ -54,6 +55,7 @@ async fn main() { DANetwork, QuorumNetwork, ViewSyncNetwork, + VIDNetwork, NodeImpl, ThisRun, >(ValidatorArgs { diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs index 594d004a93..79e0b01560 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -9,7 +9,7 @@ use types::ThisMembership; use crate::{ infra::OrchestratorArgs, infra_da::run_orchestrator_da, - types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}, + types::{DANetwork, NodeImpl, QuorumNetwork, VIDNetwork, ViewSyncNetwork}, }; #[path = "../infra/mod.rs"] @@ -34,6 +34,7 @@ async fn main() { DANetwork, QuorumNetwork, ViewSyncNetwork, + VIDNetwork, NodeImpl, >(args) .await; diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 79b1ea1419..d42acfacdd 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -11,7 +11,7 @@ use hotshot_types::{ data::{DAProposal, QuorumProposal, SequencingLeaf}, message::{Message, SequencingMessage}, traits::{ - election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}, + election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, NodeImplementation, NodeType, SequencingExchanges}, }, vote::{DAVote, QuorumVote, ViewSyncVote}, @@ -26,6 +26,7 @@ pub type ThisLeaf = SequencingLeaf; pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; pub type DANetwork = Libp2pCommChannel; +pub type VIDNetwork = Libp2pCommChannel; pub type QuorumNetwork = Libp2pCommChannel; pub type ViewSyncNetwork = Libp2pCommChannel; @@ -60,6 +61,7 @@ impl NodeImplementation for NodeImpl { ViewSyncNetwork, Message, >, + VIDExchange>, >; type ConsensusMessage = SequencingMessage; diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index ab44e02991..8f6084ddd5 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -2,6 +2,7 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot::demo::DemoTypes; use tracing::{info, instrument}; +use types::VIDNetwork; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; @@ -34,6 +35,7 @@ async fn main() { DANetwork, QuorumNetwork, ViewSyncNetwork, + VIDNetwork, NodeImpl, ThisRun, >(args) diff --git a/hotshot/examples/web-server-da/multi-validator.rs b/hotshot/examples/web-server-da/multi-validator.rs index 3ed46fa979..b45c706304 100644 --- a/hotshot/examples/web-server-da/multi-validator.rs +++ b/hotshot/examples/web-server-da/multi-validator.rs @@ -7,6 +7,7 @@ use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use std::net::IpAddr; use tracing::instrument; +use types::VIDNetwork; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; @@ -54,6 +55,7 @@ async fn main() { DANetwork, QuorumNetwork, ViewSyncNetwork, + VIDNetwork, NodeImpl, ThisRun, >(ValidatorArgs { diff --git a/hotshot/examples/web-server-da/orchestrator.rs b/hotshot/examples/web-server-da/orchestrator.rs index 594d004a93..0853caaef5 100644 --- a/hotshot/examples/web-server-da/orchestrator.rs +++ b/hotshot/examples/web-server-da/orchestrator.rs @@ -4,7 +4,7 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot::demo::DemoTypes; use tracing::instrument; -use types::ThisMembership; +use types::{ThisMembership, VIDNetwork}; use crate::{ infra::OrchestratorArgs, @@ -34,6 +34,7 @@ async fn main() { DANetwork, QuorumNetwork, ViewSyncNetwork, + VIDNetwork, NodeImpl, >(args) .await; diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index 017784b354..3de7a1531f 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -11,7 +11,7 @@ use hotshot_types::{ data::{DAProposal, QuorumProposal, SequencingLeaf}, message::{Message, SequencingMessage}, traits::{ - election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}, + election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, NodeImplementation, NodeType, SequencingExchanges}, }, vote::{DAVote, QuorumVote, ViewSyncVote}, @@ -26,6 +26,7 @@ pub type ThisLeaf = SequencingLeaf; pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; pub type DANetwork = WebCommChannel; +pub type VIDNetwork = WebCommChannel; pub type QuorumNetwork = WebCommChannel; pub type ViewSyncNetwork = WebCommChannel; @@ -60,6 +61,7 @@ impl NodeImplementation for NodeImpl { ViewSyncNetwork, Message, >, + VIDExchange>, >; type ConsensusMessage = SequencingMessage; diff --git a/hotshot/examples/web-server-da/validator.rs b/hotshot/examples/web-server-da/validator.rs index ab44e02991..8f6084ddd5 100644 --- a/hotshot/examples/web-server-da/validator.rs +++ b/hotshot/examples/web-server-da/validator.rs @@ -2,6 +2,7 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot::demo::DemoTypes; use tracing::{info, instrument}; +use types::VIDNetwork; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; @@ -34,6 +35,7 @@ async fn main() { DANetwork, QuorumNetwork, ViewSyncNetwork, + VIDNetwork, NodeImpl, ThisRun, >(args) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index dcf6670dc7..d105b9ca68 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -54,7 +54,9 @@ use hotshot_task::{ }; use hotshot_task_impls::{events::SequencingHotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ - certificate::TimeoutCertificate, traits::node_implementation::SequencingTimeoutEx, + certificate::{TimeoutCertificate, VIDCertificate}, + data::VidDisperse, + traits::node_implementation::SequencingTimeoutEx, }; use hotshot_types::{ @@ -73,7 +75,7 @@ use hotshot_types::{ network::{CommunicationChannel, NetworkError}, node_implementation::{ ChannelMaps, CommitteeEx, ExchangesType, NodeType, SendToTasks, SequencingQuorumEx, - ViewSyncEx, + VIDEx, ViewSyncEx, }, signature_key::SignatureKey, state::ConsensusTime, @@ -91,6 +93,7 @@ use std::{ sync::Arc, time::Duration, }; +use tasks::add_vid_task; use tracing::{debug, error, info, instrument, trace, warn}; // -- Rexports // External @@ -658,6 +661,14 @@ where Commitment = Commitment>, Membership = MEMBERSHIP, > + 'static, + VIDEx: ConsensusExchange< + TYPES, + Message, + Proposal = VidDisperse, + Certificate = VIDCertificate, + Commitment = Commitment, + Membership = MEMBERSHIP, + > + 'static, SequencingTimeoutEx: ConsensusExchange< TYPES, Message, @@ -671,6 +682,7 @@ where &self.inner.consensus } + #[allow(clippy::too_many_lines)] async fn run_tasks(self) -> SystemContextHandle { // ED Need to set first first number to 1, or properly trigger the change upon start let task_runner = TaskRunner::new(); @@ -682,6 +694,7 @@ where let quorum_exchange = self.inner.exchanges.quorum_exchange().clone(); let committee_exchange = self.inner.exchanges.committee_exchange().clone(); let view_sync_exchange = self.inner.exchanges.view_sync_exchange().clone(); + let vid_exchange = self.inner.exchanges.vid_exchange().clone(); let handle = SystemContextHandle { registry, @@ -709,6 +722,12 @@ where view_sync_exchange.clone(), ) .await; + let task_runner = add_network_message_task( + task_runner, + internal_event_stream.clone(), + vid_exchange.clone(), + ) + .await; let task_runner = add_network_event_task( task_runner, internal_event_stream.clone(), @@ -730,6 +749,13 @@ where NetworkTaskKind::ViewSync, ) .await; + let task_runner = add_network_event_task( + task_runner, + internal_event_stream.clone(), + vid_exchange.clone(), + NetworkTaskKind::VID, + ) + .await; let task_runner = add_consensus_task( task_runner, internal_event_stream.clone(), @@ -744,6 +770,13 @@ where handle.clone(), ) .await; + let task_runner = add_vid_task( + task_runner, + internal_event_stream.clone(), + vid_exchange.clone(), + handle.clone(), + ) + .await; let task_runner = add_transaction_task( task_runner, internal_event_stream.clone(), @@ -761,7 +794,6 @@ where task_runner.launch().await; info!("Task runner exited!"); }); - handle } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b1ee195e63..7207a273a9 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -24,11 +24,12 @@ use hotshot_task_impls::{ NetworkMessageTaskTypes, NetworkTaskKind, }, transactions::{TransactionTaskState, TransactionsTaskTypes}, + vid::{VIDTaskState, VIDTaskTypes}, view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, - certificate::{TimeoutCertificate, ViewSyncCertificate}, + certificate::{TimeoutCertificate, VIDCertificate, ViewSyncCertificate}, data::{ProposalType, QuorumProposal, SequencingLeaf}, event::Event, message::{Message, Messages, SequencingMessage}, @@ -37,7 +38,7 @@ use hotshot_types::{ network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{ CommitteeEx, ExchangesType, NodeImplementation, NodeType, QuorumEx, - SequencingTimeoutEx, ViewSyncEx, + SequencingTimeoutEx, VIDEx, ViewSyncEx, }, state::ConsensusTime, }, @@ -306,7 +307,8 @@ where timeout_task: async_spawn(async move {}), event_stream: event_stream.clone(), output_event_stream: output_stream, - certs: HashMap::new(), + da_certs: HashMap::new(), + vid_certs: HashMap::new(), current_proposal: None, id: handle.hotshot.inner.id, qc: None, @@ -362,6 +364,75 @@ where ) } +/// add the VID task +/// # Panics +/// Is unable to panic. This section here is just to satisfy clippy +pub async fn add_vid_task< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, +>( + task_runner: TaskRunner, + event_stream: ChannelStream>, + vid_exchange: VIDEx, + handle: SystemContextHandle, +) -> TaskRunner +where + VIDEx: ConsensusExchange< + TYPES, + Message, + Certificate = VIDCertificate, + Commitment = Commitment, + >, +{ + // build the vid task + let c_api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let registry = task_runner.registry.clone(); + let vid_state = VIDTaskState { + registry: registry.clone(), + api: c_api.clone(), + consensus: handle.hotshot.get_consensus(), + cur_view: TYPES::Time::new(0), + vid_exchange: vid_exchange.into(), + vote_collector: None, + event_stream: event_stream.clone(), + id: handle.hotshot.inner.id, + }; + let vid_event_handler = HandleEvent(Arc::new( + move |event, mut state: VIDTaskState>| { + async move { + let completion_status = state.handle_event(event).await; + (completion_status, state) + } + .boxed() + }, + )); + let vid_name = "VID Task"; + let vid_event_filter = FilterEvent(Arc::new( + VIDTaskState::>::filter, + )); + + let vid_task_builder = TaskBuilder::< + VIDTaskTypes>, + >::new(vid_name.to_string()) + .register_event_stream(event_stream.clone(), vid_event_filter) + .await + .register_registry(&mut registry.clone()) + .await + .register_state(vid_state) + .register_event_handler(vid_event_handler); + // impossible for unwrap to fail + // we *just* registered + let vid_task_id = vid_task_builder.get_task_id().unwrap(); + let vid_task = VIDTaskTypes::build(vid_task_builder).launch(); + task_runner.add_task(vid_task_id, vid_name.to_string(), vid_task) +} + /// add the Data Availability task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 748e6e9f1d..13c1390b3e 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -16,7 +16,7 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, }; use hotshot_types::{ - certificate::{DACertificate, QuorumCertificate, TimeoutCertificate}, + certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, consensus::{Consensus, View}, data::{LeafType, ProposalType, QuorumProposal, SequencingLeaf}, event::{Event, EventType}, @@ -128,7 +128,10 @@ pub struct SequencingConsensusTaskState< pub output_event_stream: ChannelStream>, /// All the DA certs we've received for current and future views. - pub certs: HashMap>, + pub da_certs: HashMap>, + + /// All the VID certs we've received for current and future views. + pub vid_certs: HashMap>, /// The most recent proposal we have, will correspond to the current view if Some() /// Will be none if the view advanced through timeout/view_sync @@ -486,7 +489,7 @@ where // Only vote if you have the DA cert // ED Need to update the view number this is stored under? - if let Some(cert) = self.certs.get(&(proposal.get_view_number())) { + if let Some(cert) = self.da_certs.get(&(proposal.get_view_number())) { let view = cert.view_number; let vote_token = self.quorum_exchange.make_vote_token(view); // TODO: do some of this logic without the vote token check, only do that when voting. @@ -589,7 +592,7 @@ where // Remove old certs, we won't vote on past views for view in *self.cur_view..*new_view - 1 { let v = TYPES::Time::new(view); - self.certs.remove(&v); + self.da_certs.remove(&v); } self.cur_view = new_view; @@ -989,7 +992,7 @@ where for v in (*self.cur_view)..=(*view) { let time = TYPES::Time::new(v); - self.certs.remove(&time); + self.da_certs.remove(&time); } } SequencingHotShotEvent::QuorumVoteRecv(vote) => { @@ -1247,7 +1250,7 @@ where debug!("DAC Recved for view ! {}", *cert.view_number); let view = cert.view_number; - self.certs.insert(view, cert); + self.da_certs.insert(view, cert); if self.vote_if_able().await { self.current_proposal = None; @@ -1257,7 +1260,7 @@ where debug!("VID cert received for view ! {}", *cert.view_number); let view = cert.view_number; - self.certs.insert(view, cert); // TODO new cert type for VID https://github.com/EspressoSystems/HotShot/issues/1701 + self.vid_certs.insert(view, cert); // TODO Make sure we aren't voting for an arbitrarily old round for no reason if self.vote_if_able().await { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index b7d5ea78ad..9492f3fc6e 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -12,6 +12,7 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::vote::DAVoteAccumulator; use hotshot_types::{ certificate::DACertificate, consensus::{Consensus, View}, @@ -27,7 +28,6 @@ use hotshot_types::{ BlockPayload, }, utils::ViewInner, - vote::{DAVoteAccumulator, VoteType}, }; use snafu::Snafu; @@ -186,47 +186,6 @@ where } } } - SequencingHotShotEvent::VidVoteRecv(vote) => { - // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 - - debug!("VID vote recv, collection task {:?}", vote.get_view()); - // panic!("Vote handle received DA vote for view {}", *vote.current_view); - - let accumulator = state.accumulator.left().unwrap(); - - match state.committee_exchange.accumulate_vote( - accumulator, - &vote, - &vote.block_commitment, - ) { - Left(new_accumulator) => { - state.accumulator = either::Left(new_accumulator); - } - - Right(vid_cert) => { - debug!("Sending VID cert! {:?}", vid_cert.view_number); - state - .event_stream - .publish(SequencingHotShotEvent::VidCertSend( - vid_cert.clone(), - state.committee_exchange.public_key().clone(), - )) - .await; - - state.accumulator = Right(vid_cert.clone()); - state - .committee_exchange - .network() - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - *vid_cert.view_number, - )) - .await; - - // Return completed at this point - return (Some(HotShotTaskCompleted::ShutDown), state); - } - } - } SequencingHotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), _ => { error!("unexpected event {:?}", event); @@ -412,165 +371,6 @@ where .await; }; } - SequencingHotShotEvent::VidVoteRecv(vote) => { - // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 - - // warn!( - // "VID vote recv, Main Task {:?}, key: {:?}", - // vote.current_view, - // self.committee_exchange.public_key() - // ); - // Check if we are the leader and the vote is from the sender. - let view = vote.current_view; - if !self.committee_exchange.is_leader(view) { - error!( - "We are not the VID leader for view {} are we leader for next view? {}", - *view, - self.committee_exchange.is_leader(view + 1) - ); - return None; - } - - let handle_event = HandleEvent(Arc::new(move |event, state| { - async move { vote_handle(state, event).await }.boxed() - })); - let collection_view = - if let Some((collection_view, collection_id, _)) = &self.vote_collector { - // TODO: Is this correct for consecutive leaders? - if view > *collection_view { - // warn!("shutting down for view {:?}", collection_view); - self.registry.shutdown_task(*collection_id).await; - } - *collection_view - } else { - TYPES::Time::new(0) - }; - - let new_accumulator = DAVoteAccumulator { - da_vote_outcomes: HashMap::new(), - success_threshold: self.committee_exchange.success_threshold(), - sig_lists: Vec::new(), - signers: bitvec![0; self.committee_exchange.total_nodes()], - phantom: PhantomData, - }; - - let accumulator = self.committee_exchange.accumulate_vote( - new_accumulator, - &vote, - &vote.clone().block_commitment, - ); - - if view > collection_view { - let state = DAVoteCollectionTaskState { - committee_exchange: self.committee_exchange.clone(), - - accumulator, - cur_view: view, - event_stream: self.event_stream.clone(), - id: self.id, - }; - let name = "VID Vote Collection"; - let filter = FilterEvent(Arc::new(|event| { - matches!(event, SequencingHotShotEvent::VidVoteRecv(_)) - })); - let builder = - TaskBuilder::>::new(name.to_string()) - .register_event_stream(self.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(state) - .register_event_handler(handle_event); - let id = builder.get_task_id().unwrap(); - let stream_id = builder.get_stream_id().unwrap(); - let _task = - async_spawn( - async move { DAVoteCollectionTypes::build(builder).launch().await }, - ); - self.vote_collector = Some((view, id, stream_id)); - } else if let Some((_, _, stream_id)) = self.vote_collector { - self.event_stream - .direct_message(stream_id, SequencingHotShotEvent::VidVoteRecv(vote)) - .await; - }; - } - SequencingHotShotEvent::VidDisperseRecv(disperse, sender) => { - // TODO copy-pasted from DAProposalRecv https://github.com/EspressoSystems/HotShot/issues/1690 - debug!( - "VID disperse received for view: {:?}", - disperse.data.get_view_number() - ); - - // ED NOTE: Assuming that the next view leader is the one who sends DA proposal for this view - let view = disperse.data.get_view_number(); - - // Allow a DA proposal that is one view older, in case we have voted on a quorum - // proposal and updated the view. - // `self.cur_view` should be at least 1 since there is a view change before getting - // the `DAProposalRecv` event. Otherewise, the view number subtraction below will - // cause an overflow error. - // TODO ED Revisit this - - if self.cur_view != TYPES::Time::genesis() && view < self.cur_view - 1 { - warn!("Throwing away VID disperse data that is more than one view older"); - return None; - } - - debug!("VID disperse data is fresh."); - let block_commitment = disperse.data.commitment; - - // ED Is this the right leader? - let view_leader_key = self.committee_exchange.get_leader(view); - if view_leader_key != sender { - error!("VID proposal doesn't have expected leader key for view {} \n DA proposal is: [N/A for VID]", *view); - return None; - } - - if !view_leader_key.validate(&disperse.signature, block_commitment.as_ref()) { - error!("Could not verify VID proposal sig."); - return None; - } - - let vote_token = self.committee_exchange.make_vote_token(view); - match vote_token { - Err(e) => { - error!("Failed to generate vote token for {:?} {:?}", view, e); - } - Ok(None) => { - debug!("We were not chosen for VID quorum on {:?}", view); - } - Ok(Some(vote_token)) => { - // Generate and send vote - let vote = self.committee_exchange.create_vid_message( - block_commitment, - view, - vote_token, - ); - - // ED Don't think this is necessary? - // self.cur_view = view; - - debug!("Sending vote to the VID leader {:?}", vote.current_view); - self.event_stream - .publish(SequencingHotShotEvent::VidVoteSend(vote)) - .await; - let mut consensus = self.consensus.write().await; - - // Ensure this view is in the view map for garbage collection, but do not overwrite if - // there is already a view there: the replica task may have inserted a `Leaf` view which - // contains strictly more information. - consensus.state_map.entry(view).or_insert(View { - view_inner: ViewInner::DA { - block: block_commitment, - }, - }); - - // Record the block we have promised to make available. - // TODO https://github.com/EspressoSystems/HotShot/issues/1692 - // consensus.saved_blocks.insert(proposal.data.deltas); - } - } - } SequencingHotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { return None; @@ -676,8 +476,6 @@ where | SequencingHotShotEvent::Shutdown | SequencingHotShotEvent::BlockReady(_, _) | SequencingHotShotEvent::Timeout(_) - | SequencingHotShotEvent::VidDisperseRecv(_, _) - | SequencingHotShotEvent::VidVoteRecv(_) | SequencingHotShotEvent::ViewChange(_) ) } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 4672e791ff..9ccdee9883 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -2,13 +2,13 @@ use crate::view_sync::ViewSyncPhase; use commit::Commitment; use either::Either; use hotshot_types::{ - certificate::{DACertificate, QuorumCertificate, TimeoutCertificate}, + certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, data::{DAProposal, VidDisperse}, message::Proposal, traits::node_implementation::{ NodeImplementation, NodeType, QuorumProposalType, ViewSyncProposalType, }, - vote::{DAVote, QuorumVote, TimeoutVote, ViewSyncVote}, + vote::{DAVote, QuorumVote, TimeoutVote, VIDVote, ViewSyncVote}, }; /// All of the possible events that can be passed between Sequecning `HotShot` tasks @@ -82,17 +82,17 @@ pub enum SequencingHotShotEvent> { /// Send a VID vote to the VID leader; emitted by VID storage nodes in the DA task after seeing a valid VID dispersal /// /// Like [`DAVoteSend`] - VidVoteSend(DAVote), + VidVoteSend(VIDVote), /// A VID vote has been received by the network; handled by the DA task /// /// Like [`DAVoteRecv`] - VidVoteRecv(DAVote), + VidVoteRecv(VIDVote), /// The VID leader has collected enough votes to form a VID cert; emitted by the VID leader in the DA task; sent to the entire network via the networking task /// /// Like [`DACSend`] - VidCertSend(DACertificate, TYPES::SignatureKey), + VidCertSend(VIDCertificate, TYPES::SignatureKey), /// A VID cert has been recieved by the network; handled by the consensus task /// /// Like [`DACRecv`] - VidCertRecv(DACertificate), + VidCertRecv(VIDCertificate), } diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 5e7492d84a..01dcd1da6f 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -31,3 +31,6 @@ pub mod harness; /// The task which implements view synchronization pub mod view_sync; + +/// The task which implements verifiable information dispersal +pub mod vid; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 62c67757d8..106805cf3e 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -33,6 +33,8 @@ pub enum NetworkTaskKind { Committee, /// view sync ViewSync, + /// vid + VID, } /// the network message task state @@ -337,6 +339,7 @@ impl< NetworkTaskKind::Quorum => FilterEvent(Arc::new(Self::quorum_filter)), NetworkTaskKind::Committee => FilterEvent(Arc::new(Self::committee_filter)), NetworkTaskKind::ViewSync => FilterEvent(Arc::new(Self::view_sync_filter)), + NetworkTaskKind::VID => FilterEvent(Arc::new(Self::vid_filter)), } } @@ -367,6 +370,17 @@ impl< ) } + /// vid filter + fn vid_filter(event: &SequencingHotShotEvent) -> bool { + matches!( + event, + SequencingHotShotEvent::Shutdown + | SequencingHotShotEvent::VidDisperseSend(_, _) + | SequencingHotShotEvent::VidVoteSend(_) + | SequencingHotShotEvent::ViewChange(_) + ) + } + /// view sync filter fn view_sync_filter(event: &SequencingHotShotEvent) -> bool { matches!( diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs new file mode 100644 index 0000000000..bde44d7781 --- /dev/null +++ b/task-impls/src/vid.rs @@ -0,0 +1,437 @@ +use crate::events::SequencingHotShotEvent; +use async_compatibility_layer::art::async_spawn; +use async_lock::RwLock; + +use bitvec::prelude::*; +use commit::Commitment; +use either::{Either, Left, Right}; +use futures::FutureExt; +use hotshot_task::{ + event_stream::{ChannelStream, EventStream}, + global_registry::GlobalRegistry, + task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, + task_impls::{HSTWithEvent, TaskBuilder}, +}; +use hotshot_types::vote::VoteType; +use hotshot_types::{ + certificate::VIDCertificate, traits::election::SignedCertificate, vote::VIDVoteAccumulator, +}; +use hotshot_types::{ + consensus::{Consensus, View}, + data::{ProposalType, SequencingLeaf}, + message::{Message, SequencingMessage}, + traits::{ + consensus_api::SequencingConsensusApi, + election::{ConsensusExchange, VIDExchangeType}, + node_implementation::{NodeImplementation, NodeType, VIDEx}, + signature_key::SignatureKey, + state::ConsensusTime, + }, + utils::ViewInner, +}; + +use snafu::Snafu; +use std::marker::PhantomData; +use std::{collections::HashMap, sync::Arc}; +use tracing::{debug, error, instrument, warn}; + +#[derive(Snafu, Debug)] +/// Error type for consensus tasks +pub struct ConsensusTaskError {} + +/// Tracks state of a DA task +pub struct VIDTaskState< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, +> where + VIDEx: ConsensusExchange< + TYPES, + Message, + Certificate = VIDCertificate, + Commitment = Commitment, + >, +{ + /// The state's api + pub api: A, + /// Global registry task for the state + pub registry: GlobalRegistry, + + /// View number this view is executing in. + pub cur_view: TYPES::Time, + + /// Reference to consensus. Leader will require a read lock on this. + pub consensus: Arc>>>, + + /// the VID exchange + pub vid_exchange: Arc>, + + /// The view and ID of the current vote collection task, if there is one. + pub vote_collector: Option<(TYPES::Time, usize, usize)>, + + /// Global events stream to publish events + pub event_stream: ChannelStream>, + + /// This state's ID + pub id: u64, +} + +/// Struct to maintain DA Vote Collection task state +pub struct VIDVoteCollectionTaskState< + TYPES: NodeType, + I: NodeImplementation>, +> where + VIDEx: ConsensusExchange< + TYPES, + Message, + Certificate = VIDCertificate, + Commitment = Commitment, + >, +{ + /// the vid exchange + pub vid_exchange: Arc>, + #[allow(clippy::type_complexity)] + /// Accumulates VID votes + pub accumulator: Either< + as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Commitment, + >>::VoteAccumulator, + VIDCertificate, + >, + /// the current view + pub cur_view: TYPES::Time, + /// event stream for channel events + pub event_stream: ChannelStream>, + /// the id of this task state + pub id: u64, +} + +impl>> TS + for VIDVoteCollectionTaskState +where + VIDEx: ConsensusExchange< + TYPES, + Message, + Certificate = VIDCertificate, + Commitment = Commitment, + >, +{ +} + +#[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "VID Vote Collection Task", level = "error")] +async fn vote_handle( + mut state: VIDVoteCollectionTaskState, + event: SequencingHotShotEvent, +) -> ( + Option, + VIDVoteCollectionTaskState, +) +where + TYPES: NodeType, + I: NodeImplementation>, + VIDEx: ConsensusExchange< + TYPES, + Message, + Certificate = VIDCertificate, + Commitment = Commitment, + >, +{ + match event { + SequencingHotShotEvent::VidVoteRecv(vote) => { + // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 + + debug!("VID vote recv, collection task {:?}", vote.get_view()); + // panic!("Vote handle received DA vote for view {}", *vote.current_view); + + let accumulator = state.accumulator.left().unwrap(); + + match state + .vid_exchange + .accumulate_vote(accumulator, &vote, &vote.block_commitment) + { + Left(new_accumulator) => { + state.accumulator = either::Left(new_accumulator); + } + + Right(vid_cert) => { + debug!("Sending VID cert! {:?}", vid_cert.view_number); + state + .event_stream + .publish(SequencingHotShotEvent::VidCertSend( + vid_cert.clone(), + state.vid_exchange.public_key().clone(), + )) + .await; + + state.accumulator = Right(vid_cert.clone()); + + // Return completed at this point + return (Some(HotShotTaskCompleted::ShutDown), state); + } + } + } + SequencingHotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), + _ => { + error!("unexpected event {:?}", event); + } + } + (None, state) +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > VIDTaskState +where + VIDEx: ConsensusExchange< + TYPES, + Message, + Certificate = VIDCertificate, + Commitment = Commitment, + >, +{ + /// main task event handler + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] + pub async fn handle_event( + &mut self, + event: SequencingHotShotEvent, + ) -> Option { + match event { + SequencingHotShotEvent::VidVoteRecv(vote) => { + // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 + + // warn!( + // "VID vote recv, Main Task {:?}, key: {:?}", + // vote.current_view, + // self.vid_exchange.public_key() + // ); + // Check if we are the leader and the vote is from the sender. + let view = vote.current_view; + if !self.vid_exchange.is_leader(view) { + error!( + "We are not the VID leader for view {} are we leader for next view? {}", + *view, + self.vid_exchange.is_leader(view + 1) + ); + return None; + } + + let handle_event = HandleEvent(Arc::new(move |event, state| { + async move { vote_handle(state, event).await }.boxed() + })); + let collection_view = + if let Some((collection_view, collection_id, _)) = &self.vote_collector { + // TODO: Is this correct for consecutive leaders? + if view > *collection_view { + // warn!("shutting down for view {:?}", collection_view); + self.registry.shutdown_task(*collection_id).await; + } + *collection_view + } else { + TYPES::Time::new(0) + }; + + let new_accumulator = VIDVoteAccumulator { + vid_vote_outcomes: HashMap::new(), + success_threshold: self.vid_exchange.success_threshold(), + sig_lists: Vec::new(), + signers: bitvec![0; self.vid_exchange.total_nodes()], + phantom: PhantomData, + }; + + let accumulator = self.vid_exchange.accumulate_vote( + new_accumulator, + &vote, + &vote.clone().block_commitment, + ); + + if view > collection_view { + let state = VIDVoteCollectionTaskState { + vid_exchange: self.vid_exchange.clone(), + + accumulator, + cur_view: view, + event_stream: self.event_stream.clone(), + id: self.id, + }; + let name = "VID Vote Collection"; + let filter = FilterEvent(Arc::new(|event| { + matches!(event, SequencingHotShotEvent::VidVoteRecv(_)) + })); + let builder = + TaskBuilder::>::new(name.to_string()) + .register_event_stream(self.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(state) + .register_event_handler(handle_event); + let id = builder.get_task_id().unwrap(); + let stream_id = builder.get_stream_id().unwrap(); + let _task = async_spawn(async move { + VIDVoteCollectionTypes::build(builder).launch().await + }); + self.vote_collector = Some((view, id, stream_id)); + } else if let Some((_, _, stream_id)) = self.vote_collector { + self.event_stream + .direct_message(stream_id, SequencingHotShotEvent::VidVoteRecv(vote)) + .await; + }; + } + SequencingHotShotEvent::VidDisperseRecv(disperse, sender) => { + // TODO copy-pasted from DAProposalRecv https://github.com/EspressoSystems/HotShot/issues/1690 + debug!( + "VID disperse received for view: {:?}", + disperse.data.get_view_number() + ); + + // ED NOTE: Assuming that the next view leader is the one who sends DA proposal for this view + let view = disperse.data.get_view_number(); + + // Allow a DA proposal that is one view older, in case we have voted on a quorum + // proposal and updated the view. + // `self.cur_view` should be at least 1 since there is a view change before getting + // the `DAProposalRecv` event. Otherewise, the view number subtraction below will + // cause an overflow error. + if view < self.cur_view - 1 { + warn!("Throwing away VID disperse data that is more than one view older"); + return None; + } + + debug!("VID disperse data is fresh."); + let block_commitment = disperse.data.commitment; + + // ED Is this the right leader? + let view_leader_key = self.vid_exchange.get_leader(view); + if view_leader_key != sender { + error!("VID proposal doesn't have expected leader key for view {} \n DA proposal is: [N/A for VID]", *view); + return None; + } + + if !view_leader_key.validate(&disperse.signature, block_commitment.as_ref()) { + error!("Could not verify VID proposal sig."); + return None; + } + + let vote_token = self.vid_exchange.make_vote_token(view); + match vote_token { + Err(e) => { + error!("Failed to generate vote token for {:?} {:?}", view, e); + } + Ok(None) => { + debug!("We were not chosen for VID quorum on {:?}", view); + } + Ok(Some(vote_token)) => { + // Generate and send vote + let vote = self.vid_exchange.create_vid_message( + block_commitment, + view, + vote_token, + ); + + // ED Don't think this is necessary? + // self.cur_view = view; + + debug!("Sending vote to the VID leader {:?}", vote.current_view); + self.event_stream + .publish(SequencingHotShotEvent::VidVoteSend(vote)) + .await; + let mut consensus = self.consensus.write().await; + + // Ensure this view is in the view map for garbage collection, but do not overwrite if + // there is already a view there: the replica task may have inserted a `Leaf` view which + // contains strictly more information. + consensus.state_map.entry(view).or_insert(View { + view_inner: ViewInner::DA { + block: block_commitment, + }, + }); + + // Record the block we have promised to make available. + // TODO https://github.com/EspressoSystems/HotShot/issues/1692 + // consensus.saved_blocks.insert(proposal.data.deltas); + } + } + } + SequencingHotShotEvent::ViewChange(view) => { + if *self.cur_view >= *view { + return None; + } + + if *view - *self.cur_view > 1 { + error!("View changed by more than 1 going to view {:?}", view); + } + self.cur_view = view; + + return None; + } + + SequencingHotShotEvent::Shutdown => { + return Some(HotShotTaskCompleted::ShutDown); + } + _ => { + error!("unexpected event {:?}", event); + } + } + None + } + + /// Filter the DA event. + pub fn filter(event: &SequencingHotShotEvent) -> bool { + matches!( + event, + SequencingHotShotEvent::Shutdown + | SequencingHotShotEvent::VidDisperseRecv(_, _) + | SequencingHotShotEvent::VidVoteRecv(_) + | SequencingHotShotEvent::ViewChange(_) + ) + } +} + +/// task state implementation for VID Task +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + ConsensusMessage = SequencingMessage, + >, + A: SequencingConsensusApi, I> + 'static, + > TS for VIDTaskState +where + VIDEx: ConsensusExchange< + TYPES, + Message, + Certificate = VIDCertificate, + Commitment = Commitment, + >, +{ +} + +/// Type alias for VID Vote Collection Types +pub type VIDVoteCollectionTypes = HSTWithEvent< + ConsensusTaskError, + SequencingHotShotEvent, + ChannelStream>, + VIDVoteCollectionTaskState, +>; + +/// Type alias for VID Task Types +pub type VIDTaskTypes = HSTWithEvent< + ConsensusTaskError, + SequencingHotShotEvent, + ChannelStream>, + VIDTaskState, +>; diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 7e5308b35f..1b8e4df7e5 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -19,7 +19,7 @@ use hotshot_types::{ data::{QuorumProposal, SequencingLeaf, ViewNumber}, message::{Message, SequencingMessage}, traits::{ - election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}, + election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, network::{TestableChannelImplementation, TestableNetworkingImplementation}, node_implementation::{ChannelMaps, NodeType, SequencingExchanges, TestableExchange}, }, @@ -99,6 +99,17 @@ type StaticWebViewSyncComm = type StaticCombinedViewSyncComm = CombinedCommChannel; +pub type StaticMemoryVIDComm = + MemoryCommChannel; + +type StaticLibp2pVIDComm = + Libp2pCommChannel; + +type StaticWebVIDComm = WebCommChannel; + +type StaticCombinedVIDComm = + CombinedCommChannel; + pub type SequencingLibp2pExchange = SequencingExchanges< SequencingTestTypes, Message, @@ -123,6 +134,12 @@ pub type SequencingLibp2pExchange = SequencingExchanges< StaticLibp2pViewSyncComm, Message, >, + VIDExchange< + SequencingTestTypes, + StaticMembership, + StaticLibp2pVIDComm, + Message, + >, >; impl NodeImplementation for SequencingLibp2pImpl { @@ -172,6 +189,10 @@ impl SequencingTestTypes, Message, >>::Networking, + , + >>::Networking, ) + 'static, > { let network_generator = Arc::new(, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network.clone()); + let vid_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); - (quorum_chan, committee_chan, view_sync_chan) + (quorum_chan, committee_chan, view_sync_chan, vid_chan) }) } } @@ -238,6 +265,12 @@ pub type SequencingMemoryExchange = SequencingExchanges< StaticMemoryViewSyncComm, Message, >, + VIDExchange< + SequencingTestTypes, + StaticMembership, + StaticMemoryVIDComm, + Message, + >, >; impl @@ -268,6 +301,10 @@ impl SequencingTestTypes, Message, >>::Networking, + , + >>::Networking, ) + 'static, > { let network_generator = Arc::new(, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( - )(network_da); + )(network_da.clone()); let view_sync_chan = <, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network_da); + let vid_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); - (quorum_chan, committee_chan, view_sync_chan) + (quorum_chan, committee_chan, view_sync_chan, vid_chan) }) } } @@ -371,6 +414,12 @@ pub type SequencingWebExchanges = SequencingExchanges< StaticWebViewSyncComm, Message, >, + VIDExchange< + SequencingTestTypes, + StaticMembership, + StaticWebVIDComm, + Message, + >, >; impl @@ -401,6 +450,10 @@ impl SequencingTestTypes, Message, >>::Networking, + , + >>::Networking, ) + 'static, > { let network_generator = Arc::new(, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( - )(network_da); + )(network_da.clone()); let view_sync_chan = <, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); + let vid_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network_da); - (quorum_chan, committee_chan, view_sync_chan) + (quorum_chan, committee_chan, view_sync_chan, vid_chan) }) } } @@ -501,6 +560,12 @@ pub type SequencingCombinedExchange = SequencingExchanges< StaticCombinedViewSyncComm, Message, >, + VIDExchange< + SequencingTestTypes, + StaticMembership, + StaticCombinedVIDComm, + Message, + >, >; impl NodeImplementation for SequencingCombinedImpl { @@ -550,6 +615,10 @@ impl SequencingTestTypes, Message, >>::Networking, + , + >>::Networking, ) + 'static, > { let web_server_network_generator = Arc::new(, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( - )(network_da); + )(network_da.clone()); let view_sync_chan = <, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); - (quorum_chan, committee_chan, view_sync_chan) + + let vid_chan = + <, + >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + )(network_da); + (quorum_chan, committee_chan, view_sync_chan, vid_chan) }) } } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 3515f04368..1bf5ace197 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -260,11 +260,19 @@ impl TestMetadata { let overall_safety_task_generator = overall_safety_properties.build(); let spinning_task_generator = spinning_properties.build(); TestLauncher { - resource_generator: ResourceGenerators { - channel_generator: <>::Exchanges as TestableExchange<_, _, _>>::gen_comm_channels(total_nodes, num_bootstrap_nodes, da_committee_size), - storage: Box::new(|_| I::construct_tmp_storage().unwrap()), - config, - }, + resource_generator: + ResourceGenerators { + channel_generator: + <>::Exchanges as TestableExchange< + _, + _, + _, + >>::gen_comm_channels( + total_nodes, num_bootstrap_nodes, da_committee_size + ), + storage: Box::new(|_| I::construct_tmp_storage().unwrap()), + config, + }, metadata: self, txn_task_generator, overall_safety_task_generator, diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 4fb230b315..25c6b14459 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -44,6 +44,11 @@ pub type Networks = ( >::Leaf, Message, >>::ViewSyncExchange as ConsensusExchange>>::Networking, + <<>::Exchanges as ExchangesType< + TYPES, + >::Leaf, + Message, + >>::VIDExchange as ConsensusExchange>>::Networking, ); /// Wrapper for a function that takes a `node_id` and returns an instance of `T`. diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index e31c34e6aa..0c32e724cc 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -7,7 +7,7 @@ use hotshot_testing::{ }; use hotshot_types::{ block_impl::VIDTransaction, - data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, + data::{DAProposal, VidSchemeTrait, ViewNumber}, traits::{ consensus_api::ConsensusSharedApi, election::ConsensusExchange, node_implementation::ExchangesType, state::ConsensusTime, @@ -57,15 +57,7 @@ async fn test_da_task() { data: proposal, signature, }; - let vid_proposal = Proposal { - data: VidDisperse { - view_number: message.data.view_number, - commitment: block.commit(), - shares: vid_disperse.shares, - common: vid_disperse.common, - }, - signature: message.signature.clone(), - }; + // TODO for now reuse the same block commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 @@ -84,10 +76,7 @@ async fn test_da_task() { message.clone(), pub_key, )); - input.push(SequencingHotShotEvent::VidDisperseRecv( - vid_proposal.clone(), - pub_key, - )); + input.push(SequencingHotShotEvent::Shutdown); output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); @@ -108,19 +97,8 @@ async fn test_da_task() { committee_exchange.create_da_message(block.commit(), ViewNumber::new(2), vote_token); output.insert(SequencingHotShotEvent::DAVoteSend(da_vote), 1); - let vote_token = committee_exchange - .make_vote_token(ViewNumber::new(2)) - .unwrap() - .unwrap(); - let vid_vote = - committee_exchange.create_vid_message(block.commit(), ViewNumber::new(2), vote_token); - output.insert(SequencingHotShotEvent::VidVoteSend(vid_vote), 1); - output.insert(SequencingHotShotEvent::DAProposalRecv(message, pub_key), 1); - output.insert( - SequencingHotShotEvent::VidDisperseRecv(vid_proposal, pub_key), - 1, - ); + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(SequencingHotShotEvent::Shutdown, 1); diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index a4ee4282e7..3ad84cf6e6 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -17,7 +17,9 @@ use hotshot_types::block_impl::{VIDBlockPayload, VIDTransaction}; use hotshot_types::certificate::ViewSyncCertificate; use hotshot_types::data::{DAProposal, QuorumProposal, SequencingLeaf}; use hotshot_types::message::{Message, SequencingMessage}; -use hotshot_types::traits::election::{CommitteeExchange, QuorumExchange, ViewSyncExchange}; +use hotshot_types::traits::election::{ + CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange, +}; use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType, SequencingExchanges}; @@ -67,6 +69,7 @@ pub type ThisMembership = GeneralStaticCommittee; pub type QuorumNetwork = MemoryCommChannel; pub type ViewSyncNetwork = MemoryCommChannel; +pub type VIDNetwork = MemoryCommChannel; pub type ThisDAProposal = DAProposal; pub type ThisDAVote = DAVote; @@ -99,6 +102,7 @@ impl NodeImplementation for TestImpl { ViewSyncNetwork, Message, >, + VIDExchange>, >; type ConsensusMessage = SequencingMessage; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs new file mode 100644 index 0000000000..cf19740e1d --- /dev/null +++ b/testing/tests/vid_task.rs @@ -0,0 +1,111 @@ +use commit::Committable; +use hotshot::{tasks::add_vid_task, HotShotSequencingConsensusApi}; +use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot_testing::{ + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + task_helpers::vid_init, +}; +use hotshot_types::traits::election::VIDExchangeType; +use hotshot_types::{ + block_impl::VIDTransaction, + data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, + traits::{ + consensus_api::ConsensusSharedApi, election::ConsensusExchange, + node_implementation::ExchangesType, state::ConsensusTime, + }, +}; +use std::collections::HashMap; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_vid_task() { + use hotshot_task_impls::harness::run_harness; + use hotshot_testing::task_helpers::build_system_handle; + use hotshot_types::{block_impl::VIDBlockPayload, message::Proposal}; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + // Build the API for node 2. + let handle = build_system_handle(2).await.0; + let api: HotShotSequencingConsensusApi = + HotShotSequencingConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let vid_exchange = api.inner.exchanges.vid_exchange().clone(); + let pub_key = *api.public_key(); + + let vid = vid_init(); + let txn = vec![0u8]; + let vid_disperse = vid.disperse(&txn).unwrap(); + let block_commitment = vid_disperse.commit; + let block = VIDBlockPayload { + transactions: vec![VIDTransaction(txn)], + commitment: block_commitment, + }; + + let signature = vid_exchange.sign_vid_proposal(&block.commit()); + let proposal: DAProposal = DAProposal { + deltas: block.clone(), + view_number: ViewNumber::new(2), + }; + let message = Proposal { + data: proposal, + signature, + }; + let vid_proposal = Proposal { + data: VidDisperse { + view_number: message.data.view_number, + commitment: block.commit(), + shares: vid_disperse.shares, + common: vid_disperse.common, + }, + signature: message.signature.clone(), + }; + + // Every event input is seen on the event stream in the output. + let mut input = Vec::new(); + let mut output = HashMap::new(); + + // In view 1, node 2 is the next leader. + input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); + input.push(SequencingHotShotEvent::BlockReady( + block.clone(), + ViewNumber::new(2), + )); + + input.push(SequencingHotShotEvent::VidDisperseRecv( + vid_proposal.clone(), + pub_key, + )); + input.push(SequencingHotShotEvent::Shutdown); + + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); + output.insert( + SequencingHotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), + 1, + ); + + let vote_token = vid_exchange + .make_vote_token(ViewNumber::new(2)) + .unwrap() + .unwrap(); + let vid_vote = vid_exchange.create_vid_message(block.commit(), ViewNumber::new(2), vote_token); + output.insert(SequencingHotShotEvent::VidVoteSend(vid_vote), 1); + + output.insert( + SequencingHotShotEvent::VidDisperseRecv(vid_proposal, pub_key), + 1, + ); + output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); + output.insert(SequencingHotShotEvent::Shutdown, 1); + + let build_fn = + |task_runner, event_stream| add_vid_task(task_runner, event_stream, vid_exchange, handle); + + run_harness(input, output, None, build_fn).await; +} diff --git a/types/src/certificate.rs b/types/src/certificate.rs index bf02d2f871..8995e948a2 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -8,7 +8,8 @@ use crate::{ }, vote::{ DAVote, DAVoteAccumulator, QuorumVote, QuorumVoteAccumulator, TimeoutVote, - TimeoutVoteAccumulator, ViewSyncData, ViewSyncVote, ViewSyncVoteAccumulator, VoteType, + TimeoutVoteAccumulator, VIDVote, VIDVoteAccumulator, ViewSyncData, ViewSyncVote, + ViewSyncVoteAccumulator, VoteType, }, }; use bincode::Options; @@ -41,6 +42,21 @@ pub struct DACertificate { pub signatures: AssembledSignature, } +/// A `VIDCertificate` is a threshold signature that some data is available. +/// It is signed by the whole quorum. +#[derive(Clone, PartialEq, custom_debug::Debug, serde::Serialize, serde::Deserialize, Hash)] +#[serde(bound(deserialize = ""))] +pub struct VIDCertificate { + /// The view number this VID certificate was generated during + pub view_number: TYPES::Time, + + /// committment to the block + pub block_commitment: Commitment, + + /// Assembled signature for certificate aggregation + pub signatures: AssembledSignature, +} + /// The type used for Quorum Certificates /// /// A Quorum Certificate is a threshold signature of the `Leaf` being proposed, as well as some @@ -161,6 +177,8 @@ pub enum AssembledSignature { No(::QCType), /// These signatures are for a 'DA' certificate DA(::QCType), + /// These signatures are for a 'VID' certificate + VID(::QCType), /// These signatures are for a `Timeout` certificate Timeout(::QCType), /// These signatures are for genesis certificate @@ -279,8 +297,48 @@ impl } } +impl + SignedCertificate> + for VIDCertificate +{ + type Vote = VIDVote; + type VoteAccumulator = VIDVoteAccumulator, Self::Vote>; + + fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { + VIDCertificate { + view_number: vote.get_view(), + signatures, + block_commitment: vote.block_commitment, + } + } + + fn view_number(&self) -> TYPES::Time { + self.view_number + } + + fn signatures(&self) -> AssembledSignature { + self.signatures.clone() + } + + fn leaf_commitment(&self) -> Commitment { + self.block_commitment + } + + fn is_genesis(&self) -> bool { + // This function is only useful for QC. Will be removed after we have separated cert traits. + false + } + + fn genesis() -> Self { + // This function is only useful for QC. Will be removed after we have separated cert traits. + unimplemented!() + } +} + impl Eq for DACertificate {} +impl Eq for VIDCertificate {} + impl Committable for ViewSyncCertificate { fn commit(&self) -> Commitment { let signatures_bytes = serialize_signature(&self.signatures()); diff --git a/types/src/data.rs b/types/src/data.rs index e20a7da9de..356b7a60fa 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -880,6 +880,10 @@ pub fn serialize_signature(signature: &AssembledSignature { + signatures_bytes.extend("VID".as_bytes()); + Some(signatures.clone()) + } AssembledSignature::Yes(signatures) => { signatures_bytes.extend("Yes".as_bytes()); Some(signatures.clone()) diff --git a/types/src/message.rs b/types/src/message.rs index a44722d8ed..e85c9c51df 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -4,7 +4,7 @@ //! `HotShot` nodes can send among themselves. use crate::{ - certificate::DACertificate, + certificate::{DACertificate, VIDCertificate}, data::{DAProposal, ProposalType, VidDisperse}, traits::{ network::{NetworkMsg, ViewMessage}, @@ -13,7 +13,7 @@ use crate::{ }, signature_key::EncodedSignature, }, - vote::{DAVote, QuorumVote, TimeoutVote, ViewSyncVote, VoteType}, + vote::{DAVote, QuorumVote, TimeoutVote, VIDVote, ViewSyncVote, VoteType}, }; use commit::Commitment; use derivative::Derivative; @@ -218,9 +218,9 @@ pub enum ProcessedCommitteeConsensusMessage { /// VID dispersal data. Like [`DAProposal`] VidDisperseMsg(Proposal>, TYPES::SignatureKey), /// Vote from VID storage node. Like [`DAVote`] - VidVote(DAVote, TYPES::SignatureKey), + VidVote(VIDVote, TYPES::SignatureKey), /// Certificate for VID. Like [`DACertificate`] - VidCertificate(DACertificate, TYPES::SignatureKey), + VidCertificate(VIDCertificate, TYPES::SignatureKey), } impl From> @@ -352,13 +352,11 @@ pub enum CommitteeConsensusMessage { /// Vote for VID disperse data /// /// Like [`DAVote`]. - /// TODO currently re-using [`DAVote`]; do we need a separate VID vote? - VidVote(DAVote), + VidVote(VIDVote), /// VID certificate data is available /// /// Like [`DACertificate`] - /// TODO currently re-using [`DACertificate`]; do we need a separate VID cert? - VidCertificate(DACertificate), + VidCertificate(VIDCertificate), } /// Messages related to the consensus protocol. @@ -430,8 +428,8 @@ impl< p.data.get_view_number() } CommitteeConsensusMessage::DAVote(vote_message) => vote_message.get_view(), - CommitteeConsensusMessage::DACertificate(cert) - | CommitteeConsensusMessage::VidCertificate(cert) => cert.view_number, + CommitteeConsensusMessage::DACertificate(cert) => cert.view_number, + CommitteeConsensusMessage::VidCertificate(cert) => cert.view_number, CommitteeConsensusMessage::VidDisperseMsg(disperse) => { disperse.data.get_view_number() } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 0b660a4221..fa191d32b5 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -9,11 +9,11 @@ use super::{ }; use crate::{ certificate::{ - AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, + AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate, ViewSyncCertificate, }, - data::{DAProposal, ProposalType}, - vote::TimeoutVote, + data::{DAProposal, ProposalType, VidDisperse}, + vote::{TimeoutVote, VIDVote}, }; use crate::{ @@ -84,6 +84,8 @@ where No(COMMITMENT), /// Vote to time out and proceed to the next view. Timeout(COMMITMENT), + /// Vote for VID proposal + VID(COMMITMENT), /// Vote to pre-commit the view sync. ViewSyncPreCommit(COMMITMENT), /// Vote to commit the view sync. @@ -100,6 +102,7 @@ where fn commit(&self) -> Commitment { let (tag, commit) = match self { VoteData::DA(c) => ("DA BlockPayload Commit", c), + VoteData::VID(c) => ("VID Proposal Commit", c), VoteData::Yes(c) => ("Yes Vote Commit", c), VoteData::No(c) => ("No Vote Commit", c), VoteData::Timeout(c) => ("Timeout View Number Commit", c), @@ -345,6 +348,14 @@ pub trait ConsensusExchange: Send + Sync { ); ::check(&real_qc_pp, real_commit.as_ref(), &qc) } + AssembledSignature::VID(qc) => { + let real_commit = VoteData::VID(leaf_commitment).commit(); + let real_qc_pp = ::get_public_parameter( + self.membership().get_committee_qc_stake_table(), + U256::from(self.membership().success_threshold().get()), + ); + ::check(&real_qc_pp, real_commit.as_ref(), &qc) + } AssembledSignature::Yes(qc) => { let real_commit = VoteData::Yes(leaf_commitment).commit(); let real_qc_pp = ::get_public_parameter( @@ -497,22 +508,6 @@ pub trait CommitteeExchangeType: current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, ) -> DAVote; - - // TODO temporary vid methods, move to quorum https://github.com/EspressoSystems/HotShot/issues/1696 - - /// Create a message with a vote on VID disperse data. - fn create_vid_message( - &self, - block_commitment: Commitment, - current_view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> DAVote; - - /// Sign a vote on VID proposal. - fn sign_vid_vote( - &self, - block_commitment: Commitment, - ) -> (EncodedPublicKey, EncodedSignature); } /// Standard implementation of [`CommitteeExchangeType`] utilizing a DA committee. @@ -584,15 +579,125 @@ impl< vote_data: VoteData::DA(block_commitment), } } +} +impl< + TYPES: NodeType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, + M: NetworkMsg, + > ConsensusExchange for CommitteeExchange +{ + type Proposal = DAProposal; + type Vote = DAVote; + type Certificate = DACertificate; + type Membership = MEMBERSHIP; + type Networking = NETWORK; + type Commitment = Commitment; + + fn create( + entries: Vec<::StakeTableEntry>, + config: TYPES::ElectionConfigType, + network: Self::Networking, + pk: TYPES::SignatureKey, + entry: ::StakeTableEntry, + sk: ::PrivateKey, + ) -> Self { + let membership = + >::Membership::create_election(entries, config); + Self { + network, + membership, + public_key: pk, + entry, + private_key: sk, + _pd: PhantomData, + } + } + fn network(&self) -> &NETWORK { + &self.network + } + fn make_vote_token( + &self, + view_number: TYPES::Time, + ) -> std::result::Result, ElectionError> { + self.membership + .make_vote_token(view_number, &self.private_key) + } + + fn membership(&self) -> &Self::Membership { + &self.membership + } + fn public_key(&self) -> &TYPES::SignatureKey { + &self.public_key + } + fn private_key(&self) -> &<::SignatureKey as SignatureKey>::PrivateKey { + &self.private_key + } +} + +/// A [`ConsensusExchange`] where participants vote to provide availability for blobs of data. +pub trait VIDExchangeType: ConsensusExchange { + /// Create a message with a vote on VID disperse data. + fn create_vid_message( + &self, + block_commitment: Commitment, + current_view: TYPES::Time, + vote_token: TYPES::VoteTokenType, + ) -> VIDVote; + + /// Sign a vote on VID proposal. + fn sign_vid_vote( + &self, + block_commitment: Commitment, + ) -> (EncodedPublicKey, EncodedSignature); + + /// Sign a VID proposal. + fn sign_vid_proposal( + &self, + block_commitment: &Commitment, + ) -> EncodedSignature; +} + +/// Standard implementation of [`VIDExchangeType`] +#[derive(Derivative)] +#[derivative(Clone, Debug)] +pub struct VIDExchange< + TYPES: NodeType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, + M: NetworkMsg, +> { + /// The network being used by this exchange. + network: NETWORK, + /// The committee which votes on proposals. + membership: MEMBERSHIP, + /// This participant's public key. + public_key: TYPES::SignatureKey, + /// Entry with public key and staking value for certificate aggregation + entry: ::StakeTableEntry, + /// This participant's private key. + #[derivative(Debug = "ignore")] + private_key: ::PrivateKey, + #[doc(hidden)] + _pd: PhantomData<(TYPES, MEMBERSHIP, M)>, +} + +impl< + TYPES: NodeType, + MEMBERSHIP: Membership, + NETWORK: CommunicationChannel, + M: NetworkMsg, + > VIDExchangeType for VIDExchange +{ fn create_vid_message( &self, block_commitment: Commitment, current_view: ::Time, vote_token: ::VoteTokenType, - ) -> DAVote { + ) -> VIDVote { let signature = self.sign_vid_vote(block_commitment); - DAVote { + VIDVote { signature, block_commitment, current_view, @@ -611,6 +716,15 @@ impl< ); (self.public_key.to_bytes(), signature) } + + /// Sign a VID proposal. + fn sign_vid_proposal( + &self, + block_commitment: &Commitment, + ) -> EncodedSignature { + let signature = TYPES::SignatureKey::sign(&self.private_key, block_commitment.as_ref()); + signature + } } impl< @@ -618,11 +732,11 @@ impl< MEMBERSHIP: Membership, NETWORK: CommunicationChannel, M: NetworkMsg, - > ConsensusExchange for CommitteeExchange + > ConsensusExchange for VIDExchange { - type Proposal = DAProposal; - type Vote = DAVote; - type Certificate = DACertificate; + type Proposal = VidDisperse; + type Vote = VIDVote; + type Certificate = VIDCertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; type Commitment = Commitment; diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 6d27d70103..90d7912539 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -7,7 +7,7 @@ use super::{ block_contents::Transaction, election::{ CommitteeExchangeType, ConsensusExchange, ElectionConfig, QuorumExchangeType, - TimeoutExchange, TimeoutExchangeType, ViewSyncExchangeType, VoteToken, + TimeoutExchange, TimeoutExchangeType, VIDExchangeType, ViewSyncExchangeType, VoteToken, }, network::{CommunicationChannel, NetworkMsg, TestableNetworkingImplementation}, state::{ConsensusTime, TestableBlock, TestableState}, @@ -153,6 +153,9 @@ pub trait ExchangesType, MESSA /// Protocol for exchanging data availability proposals and votes. type CommitteeExchange: CommitteeExchangeType + Clone + Debug; + /// Protocol for exchanging VID proposals and votes + type VIDExchange: VIDExchangeType + Clone + Debug; + /// Get the committee exchange fn committee_exchange(&self) -> &Self::CommitteeExchange; @@ -179,6 +182,7 @@ pub trait ExchangesType, MESSA >::Networking, >::Networking, >::Networking, + >::Networking, ), pk: TYPES::SignatureKey, entry: ::StakeTableEntry, @@ -191,6 +195,9 @@ pub trait ExchangesType, MESSA /// Get the view sync exchange. fn view_sync_exchange(&self) -> &Self::ViewSyncExchange; + /// Get the VID exchange + fn vid_exchange(&self) -> &Self::VIDExchange; + /// BlockPayload the underlying networking interfaces until node is successfully initialized into the /// networks. async fn wait_for_networks_ready(&self); @@ -216,6 +223,7 @@ pub trait TestableExchange, ME >::Networking, >::Networking, >::Networking, + >::Networking, ) + 'static, >; } @@ -228,6 +236,7 @@ pub struct SequencingExchanges< QUORUMEXCHANGE: QuorumExchangeType, MESSAGE> + Clone + Debug, COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, + VIDEXCHANGE: VIDExchangeType + Clone + Debug, > { /// Quorum exchange. quorum_exchange: QUORUMEXCHANGE, @@ -238,34 +247,52 @@ pub struct SequencingExchanges< /// Committee exchange. committee_exchange: COMMITTEEEXCHANGE, + /// VID exchange + vid_exchange: VIDEXCHANGE, + /// Timeout exchange // This type can be simplified once we rework the exchanges trait // It is here to avoid needing to instantiate it where all the other exchanges are instantiated // https://github.com/EspressoSystems/HotShot/issues/1799 #[allow(clippy::type_complexity)] - pub timeout_exchange: TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE>, + pub timeout_exchange: TimeoutExchange + as ExchangesType, MESSAGE>>::QuorumExchange + as ConsensusExchange>::Proposal, < + as ExchangesType, MESSAGE>>::QuorumExchange + as ConsensusExchange>::Membership, >::Networking, MESSAGE>, /// Phantom data _phantom: PhantomData<(TYPES, MESSAGE)>, } #[async_trait] -impl +impl ExchangesType, MESSAGE> - for SequencingExchanges + for SequencingExchanges< + TYPES, + MESSAGE, + QUORUMEXCHANGE, + COMMITTEEEXCHANGE, + VIEWSYNCEXCHANGE, + VIDEXCHANGE, + > where TYPES: NodeType, MESSAGE: NetworkMsg, QUORUMEXCHANGE: QuorumExchangeType, MESSAGE> + Clone + Debug, COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, + VIDEXCHANGE: VIDExchangeType + Clone + Debug, { type CommitteeExchange = COMMITTEEEXCHANGE; type QuorumExchange = QUORUMEXCHANGE; type ViewSyncExchange = VIEWSYNCEXCHANGE; + type VIDExchange = VIDEXCHANGE; #[allow(clippy::type_complexity)] - type TimeoutExchange = TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE>; + type TimeoutExchange = TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE>; + type ElectionConfigs = (TYPES::ElectionConfigType, TYPES::ElectionConfigType); fn committee_exchange(&self) -> &COMMITTEEEXCHANGE { @@ -283,6 +310,7 @@ where >::Networking, >::Networking, >::Networking, + >::Networking, ), pk: TYPES::SignatureKey, entry: ::StakeTableEntry, @@ -297,7 +325,7 @@ where sk.clone(), ); #[allow(clippy::type_complexity)] - let timeout_exchange: TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE> = TimeoutExchange::create( + let timeout_exchange: TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE> = TimeoutExchange::create( entries.clone(), configs.0.clone(), networks.0, @@ -314,13 +342,25 @@ where entry.clone(), sk.clone(), ); - let committee_exchange = - COMMITTEEEXCHANGE::create(entries, configs.1, networks.1, pk, entry, sk); + + let committee_exchange = COMMITTEEEXCHANGE::create( + entries.clone(), + configs.1.clone(), + networks.1, + pk.clone(), + entry.clone(), + sk.clone(), + ); + + // RM TODO: figure out if this is the proper config + // issue: https://github.com/EspressoSystems/HotShot/issues/1918 + let vid_exchange = VIDEXCHANGE::create(entries, configs.1, networks.3, pk, entry, sk); Self { quorum_exchange, committee_exchange, view_sync_exchange, + vid_exchange, timeout_exchange, _phantom: PhantomData, } @@ -334,6 +374,10 @@ where &self.view_sync_exchange } + fn vid_exchange(&self) -> &Self::VIDExchange { + &self.vid_exchange + } + async fn wait_for_networks_ready(&self) { self.quorum_exchange.network().wait_for_ready().await; self.committee_exchange.network().wait_for_ready().await; @@ -375,6 +419,13 @@ pub type CommitteeEx = <>::Exchanges as Message, >>::CommitteeExchange; +/// Alias for the [`VIDExchange`] type. +pub type VIDEx = <>::Exchanges as ExchangesType< + TYPES, + >::Leaf, + Message, +>>::VIDExchange; + /// Alias for the [`ViewSyncExchange`] type. pub type ViewSyncEx = <>::Exchanges as ExchangesType< TYPES, diff --git a/types/src/vote.rs b/types/src/vote.rs index bb4542ebe3..e5e3e15ba5 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -59,6 +59,22 @@ pub struct DAVote { pub vote_data: VoteData>, } +/// A vote on VID proposal. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +#[serde(bound(deserialize = ""))] +pub struct VIDVote { + /// The signature share associated with this vote + pub signature: (EncodedPublicKey, EncodedSignature), + /// The block commitment being voted on. + pub block_commitment: Commitment, + /// The view this vote was cast for + pub current_view: TYPES::Time, + /// The vote token generated by this replica + pub vote_token: TYPES::VoteTokenType, + /// The vote data this vote is signed over + pub vote_data: VoteData>, +} + /// A positive or negative vote on validating or commitment proposal. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] @@ -239,6 +255,33 @@ impl DAVote { } } +impl VoteType> for VIDVote { + fn get_view(&self) -> TYPES::Time { + self.current_view + } + fn get_key(&self) -> ::SignatureKey { + self.signature_key() + } + fn get_signature(&self) -> EncodedSignature { + self.signature.1.clone() + } + fn get_data(&self) -> VoteData> { + self.vote_data.clone() + } + fn get_vote_token(&self) -> ::VoteTokenType { + self.vote_token.clone() + } +} + +impl VIDVote { + /// Get the signature key. + /// # Panics + /// If the deserialization fails. + pub fn signature_key(&self) -> TYPES::SignatureKey { + ::from_bytes(&self.signature.0).unwrap() + } +} + impl VoteType for QuorumVote { @@ -499,6 +542,78 @@ impl< } } +impl< + TYPES: NodeType, + COMMITMENT: CommitmentBounds + Clone + Copy + PartialEq + Eq + Hash, + VOTE: VoteType, + > Accumulator for VIDVoteAccumulator +{ + fn append( + mut self, + vote: VOTE, + vote_node_id: usize, + stake_table_entries: Vec<::StakeTableEntry>, + ) -> Either> { + let VoteData::VID(vote_commitment) = vote.get_data() else { + return Either::Left(self); + }; + + let encoded_key = vote.get_key().to_bytes(); + + // Deserialize the signature so that it can be assembeld into a QC + // TODO ED Update this once we've gotten rid of EncodedSignature + let original_signature: ::PureAssembledSignatureType = + bincode_opts() + .deserialize(&vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); + + let (vid_stake_casted, vid_vote_map) = self + .vid_vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + // Check for duplicate vote + // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey + // Have to do this because SignatureKey is not hashable + if vid_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + + if self.signers.get(vote_node_id).as_deref() == Some(&true) { + error!("Node id is already in signers list"); + return Either::Left(self); + } + self.signers.set(vote_node_id, true); + self.sig_lists.push(original_signature); + + // Already checked that vote data was for a VID vote above + *vid_stake_casted += u64::from(vote.get_vote_token().vote_count()); + vid_vote_map.insert( + encoded_key, + (vote.get_signature(), vote.get_data(), vote.get_vote_token()), + ); + + if *vid_stake_casted >= u64::from(self.success_threshold) { + // Assemble QC + let real_qc_pp = ::get_public_parameter( + stake_table_entries.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + + self.vid_vote_outcomes.remove(&vote_commitment); + + return Either::Right(AssembledSignature::VID(real_qc_sig)); + } + Either::Left(self) + } +} + /// Accumulates DA votes pub struct DAVoteAccumulator< TYPES: NodeType, @@ -517,6 +632,24 @@ pub struct DAVoteAccumulator< pub phantom: PhantomData, } +/// Accumulates VID votes +pub struct VIDVoteAccumulator< + TYPES: NodeType, + COMMITMENT: CommitmentBounds + Clone, + VOTE: VoteType, +> { + /// Map of all VID signatures accumlated so far + pub vid_vote_outcomes: VoteMap, + /// A quorum's worth of stake, generally 2f + 1 + pub success_threshold: NonZeroU64, + /// A list of valid signatures for certificate aggregation + pub sig_lists: Vec<::PureAssembledSignatureType>, + /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check + pub signers: BitVec, + /// Phantom data to specify the vote this accumulator is for + pub phantom: PhantomData, +} + /// Accumulate quorum votes pub struct QuorumVoteAccumulator< TYPES: NodeType, From 8506e5639fc6bdc01fbc3fafe41f6f0bd4227c18 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 23 Oct 2023 17:50:09 -0700 Subject: [PATCH 0249/1393] Fix build --- hotshot/src/demo.rs | 30 ++------------ hotshot/src/lib.rs | 8 +++- hotshot/src/tasks/mod.rs | 8 ++-- hotshot/src/traits/storage/memory_storage.rs | 1 + task-impls/src/consensus.rs | 43 ++++++++++---------- task-impls/src/da.rs | 7 +++- task-impls/src/events.rs | 4 +- testing/src/node_types.rs | 3 +- testing/src/task_helpers.rs | 13 +++--- testing/tests/consensus_task.rs | 4 +- testing/tests/da_task.rs | 5 ++- testing/tests/memory_network.rs | 5 ++- testing/tests/network_task.rs | 5 ++- types/src/block_impl.rs | 28 +++++++++---- types/src/data.rs | 7 +--- types/src/traits/block_contents.rs | 21 ++++++++-- types/src/traits/election.rs | 6 ++- types/src/traits/node_implementation.rs | 4 +- 18 files changed, 111 insertions(+), 91 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index 33ac15dd28..0d1b3993ad 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -8,15 +8,11 @@ use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; use commit::{Commitment, Committable}; use derivative::Derivative; -use either::Either; use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ - block_impl::{BlockPayloadError, VIDBlockPayload, VIDTransaction}, + block_impl::{BlockPayloadError, VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::{AssembledSignature, QuorumCertificate}, - data::{ - fake_commitment, genesis_proposer_id, random_commitment, LeafType, SequencingLeaf, - ViewNumber, - }, + data::{fake_commitment, random_commitment, LeafType, ViewNumber}, traits::{ election::Membership, node_implementation::NodeType, @@ -126,6 +122,7 @@ pub struct DemoTypes; impl NodeType for DemoTypes { type Time = ViewNumber; + type BlockHeader = VIDBlockHeader; type BlockPayload = VIDBlockPayload; type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; @@ -184,24 +181,3 @@ pub fn random_quorum_certificate( - deltas: Either>, - rng: &mut dyn rand::RngCore, -) -> SequencingLeaf { - let justify_qc = random_quorum_certificate(rng); - // let state = TYPES::StateType::default() - // .append(&deltas, &TYPES::Time::new(42)) - // .unwrap_or_default(); - SequencingLeaf { - view_number: justify_qc.view_number, - height: rng.next_u64(), - justify_qc, - parent_commitment: random_commitment(rng), - deltas, - rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: genesis_proposer_id(), - } -} diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 2281a1af08..444538c3ad 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -55,7 +55,7 @@ use hotshot_task::{ use hotshot_task_impls::{events::SequencingHotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction}, + block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::{DACertificate, ViewSyncCertificate}, consensus::{BlockStore, Consensus, ConsensusMetrics, View, ViewInner, ViewQueue}, data::{DAProposal, DeltasType, LeafType, QuorumProposal, SequencingLeaf}, @@ -634,7 +634,11 @@ pub trait HotShotType> { #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType< + BlockHeader = VIDBlockHeader, + BlockPayload = VIDBlockPayload, + Transaction = VIDTransaction, + >, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 1a0fc8ad4e..77e230ec55 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -5,7 +5,7 @@ use crate::{ QuorumCertificate, SequencingQuorumEx, }; use async_compatibility_layer::art::async_sleep; -use commit::{Commitment, CommitmentBounds}; +use commit::{Commitment, CommitmentBounds, Committable}; use futures::FutureExt; use hotshot_task::{ boxed_sync, @@ -27,7 +27,7 @@ use hotshot_task_impls::{ view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction}, + block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, data::{ProposalType, QuorumProposal, SequencingLeaf}, event::Event, @@ -250,7 +250,7 @@ where /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -288,7 +288,7 @@ where consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), - block: VIDBlockPayload::genesis(), + block_commitment: VIDBlockPayload::genesis().commit(), quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), api: c_api.clone(), committee_exchange: c_api.inner.exchanges.committee_exchange().clone().into(), diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 73d86fc1d9..c57363bca7 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -148,6 +148,7 @@ mod test { impl NodeType for DummyTypes { type Time = ViewNumber; + type BlockHeader = DummyBlock; type BlockPayload = DummyBlock; type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 1fb8d383e8..581348e8b0 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -15,7 +15,7 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::vote::QuorumVoteAccumulator; +use hotshot_types::{block_impl::VIDBlockPayload, vote::QuorumVoteAccumulator}; use hotshot_types::{ certificate::{DACertificate, QuorumCertificate}, consensus::{Consensus, View}, @@ -23,6 +23,7 @@ use hotshot_types::{ event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, traits::{ + block_contents::BlockHeader, consensus_api::SequencingConsensusApi, election::{ConsensusExchange, QuorumExchangeType, SignedCertificate}, network::{CommunicationChannel, ConsensusIntentEvent}, @@ -83,8 +84,8 @@ pub struct SequencingConsensusTaskState< /// View number this view is executing in. pub cur_view: TYPES::Time, - /// Current block submitted to DA - pub block: TYPES::BlockPayload, + /// The commitment to the current block submitted to DA + pub block_commitment: Commitment, /// the quorum exchange pub quorum_exchange: Arc>, @@ -259,7 +260,7 @@ where } impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -348,10 +349,10 @@ where let leaf: SequencingLeaf<_> = SequencingLeaf { view_number: view, - height: proposal.height, + height: proposal.block_header.block_number, justify_qc: proposal.justify_qc.clone(), parent_commitment, - deltas: Right(proposal.block_commitment), + deltas: Right(self.block_commitment), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), @@ -414,10 +415,10 @@ where let leaf: SequencingLeaf<_> = SequencingLeaf { view_number: view, - height: proposal.height, + height: proposal.block_header.block_number, justify_qc: proposal.justify_qc.clone(), parent_commitment, - deltas: Right(proposal.block_commitment), + deltas: Right(proposal.block_header.commitment), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), @@ -426,7 +427,7 @@ where // Validate the DAC. if self .committee_exchange - .is_valid_cert(cert, proposal.block_commitment) + .is_valid_cert(cert, proposal.block_header.commitment) { self.quorum_exchange.create_yes_message( proposal.justify_qc.commit(), @@ -605,10 +606,10 @@ where let parent_commitment = parent.commit(); let leaf: SequencingLeaf<_> = SequencingLeaf { view_number: view, - height: proposal.data.height, + height: proposal.data.block_header.block_number, justify_qc: justify_qc.clone(), parent_commitment, - deltas: Right(proposal.data.block_commitment), + deltas: Right(proposal.data.block_header.commitment), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender.to_bytes(), @@ -1076,9 +1077,9 @@ where *view ); } - SequencingHotShotEvent::SendDABlockData(block) => { + SequencingHotShotEvent::SendBlockCommitment(block_commitment) => { // ED TODO Should make sure this is actually the most recent block - self.block = block; + self.block_commitment = block_commitment; } _ => {} } @@ -1150,16 +1151,12 @@ where // TODO do some sort of sanity check on the view number that it matches decided } - let block_commitment = self.block.commit(); - let leaf = SequencingLeaf { view_number: view, height: parent_leaf.height + 1, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), - // Use the block commitment rather than the block, so that the replica can construct - // the same leaf with the commitment. - deltas: Right(block_commitment), + deltas: Right(self.block_commitment), rejected: vec![], timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.api.public_key().to_bytes(), @@ -1170,9 +1167,11 @@ where .sign_validating_or_commitment_proposal::(&leaf.commit()); // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. let proposal = QuorumProposal { - block_commitment, + block_header: VIDBlockHeader { + block_number: parent_leaf.height + 1, + commitment: self.block_commitment, + }, view_number: leaf.view_number, - height: leaf.height, justify_qc: consensus.high_qc.clone(), // TODO ED Update this to be the actual TC if there is one timeout_certificate: None, @@ -1240,7 +1239,7 @@ pub type ConsensusTaskTypes = HSTWithEvent< /// Event handle for consensus pub async fn sequencing_consensus_handle< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -1289,7 +1288,7 @@ pub fn consensus_event_filter>( | SequencingHotShotEvent::DACRecv(_) | SequencingHotShotEvent::VidCertRecv(_) | SequencingHotShotEvent::ViewChange(_) - | SequencingHotShotEvent::SendDABlockData(_) + | SequencingHotShotEvent::SendBlockCommitment(_) | SequencingHotShotEvent::Timeout(_) | SequencingHotShotEvent::Shutdown, ) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index fbe156aec6..83d27874ba 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -628,7 +628,8 @@ where .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) .await; - let signature = self.committee_exchange.sign_da_proposal(&block.commit()); + let block_commitment = block.commit(); + let signature = self.committee_exchange.sign_da_proposal(&block_commitment); let data: DAProposal = DAProposal { deltas: block.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? @@ -644,7 +645,9 @@ where // TODO ED We should send an event to do this, but just getting it to work for now self.event_stream - .publish(SequencingHotShotEvent::SendDABlockData(block.clone())) + .publish(SequencingHotShotEvent::SendBlockCommitment( + block_commitment, + )) .await; self.event_stream diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index fbb7e227b0..001bd60c12 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -60,8 +60,8 @@ pub enum SequencingHotShotEvent> { TransactionsRecv(Vec), /// Send transactions to the network TransactionSend(TYPES::Transaction, TYPES::SignatureKey), - /// Event to send DA block data from DA leader to next quorum leader (which should always be the same node); internal event only - SendDABlockData(TYPES::BlockPayload), + /// Event to send block commitment from DA leader to the quorum; internal event only + SendBlockCommitment(Commitment), /// Event when the transactions task has a block formed BlockReady(TYPES::BlockPayload, TYPES::Time), /// Event when consensus decided on a leaf diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 3cfb013851..2970819d42 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -14,7 +14,7 @@ use hotshot::{ types::bn254::BLSPubKey, }; use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction}, + block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, data::{QuorumProposal, SequencingLeaf, ViewNumber}, message::{Message, SequencingMessage}, @@ -42,6 +42,7 @@ use serde::{Deserialize, Serialize}; pub struct SequencingTestTypes; impl NodeType for SequencingTestTypes { type Time = ViewNumber; + type BlockHeader = VIDBlockHeader; type BlockPayload = VIDBlockPayload; type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index f4e7fe49a8..8d41a8e11f 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -13,7 +13,7 @@ use hotshot::{ use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::SequencingHotShotEvent; use hotshot_types::{ - block_impl::{VIDBlockPayload, NUM_CHUNKS, NUM_STORAGE_NODES}, + block_impl::{VIDBlockHeader, VIDBlockPayload, NUM_CHUNKS, NUM_STORAGE_NODES}, data::{QuorumProposal, SequencingLeaf, VidScheme, ViewNumber}, message::{Message, Proposal}, traits::{ @@ -118,23 +118,24 @@ async fn build_quorum_proposal_and_signature( // every event input is seen on the event stream in the output. let block = ::genesis(); + let block_commitment = block.commit(); let leaf = SequencingLeaf { view_number: ViewNumber::new(view), height: parent_leaf.height + 1, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), - // Use the block commitment rather than the block, so that the replica can construct - // the same leaf with the commitment. - deltas: Right(block.commit()), + deltas: Right(block_commitment), rejected: vec![], timestamp: 0, proposer_id: api.public_key().to_bytes(), }; let signature = ::sign(private_key, leaf.commit().as_ref()); let proposal = QuorumProposal::> { - block_commitment: block.commit(), + block_header: VIDBlockHeader { + block_number: 1, + commitment: block_commitment, + }, view_number: ViewNumber::new(view), - height: 1, justify_qc: QuorumCertificate::genesis(), timeout_certificate: None, proposer_id: leaf.proposer_id, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index ec1bd7b05c..8c4bd866e9 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -62,10 +62,10 @@ async fn build_vote( let leaf: SequencingLeaf<_> = SequencingLeaf { view_number: view, - height: proposal.height, + height: proposal.block_header.block_number, justify_qc: proposal.justify_qc.clone(), parent_commitment, - deltas: Right(proposal.block_commitment), + deltas: Right(proposal.block_header.commitment), rejected: Vec::new(), timestamp: 0, proposer_id: quorum_exchange.get_leader(view).to_bytes(), diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index e31c34e6aa..4f43f6beb4 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -95,7 +95,10 @@ async fn test_da_task() { SequencingHotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), 1, ); - output.insert(SequencingHotShotEvent::SendDABlockData(block.clone()), 1); + output.insert( + SequencingHotShotEvent::SendBlockCommitment(block.commit()), + 1, + ); output.insert( SequencingHotShotEvent::DAProposalSend(message.clone(), pub_key), 1, diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 27f6ae6531..31f25f3dbb 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -13,7 +13,7 @@ use hotshot::traits::implementations::{ use hotshot::traits::NodeImplementation; use hotshot::types::bn254::{BLSPrivKey, BLSPubKey}; use hotshot::types::SignatureKey; -use hotshot_types::block_impl::{VIDBlockPayload, VIDTransaction}; +use hotshot_types::block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}; use hotshot_types::certificate::ViewSyncCertificate; use hotshot_types::data::{DAProposal, QuorumProposal, SequencingLeaf}; use hotshot_types::message::{Message, SequencingMessage}; @@ -52,7 +52,8 @@ pub struct Test; impl NodeType for Test { type Time = ViewNumber; - type BlockType = VIDBlockPayload; + type BlockHeader = VIDBlockHeader; + type BlockPayload = VIDBlockPayload; type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; type Transaction = VIDTransaction; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index c4165a8c31..fe50497132 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -124,7 +124,10 @@ async fn test_network_task() { SequencingHotShotEvent::QuorumProposalSend(quorum_proposal.clone(), pub_key), 1, ); - output.insert(SequencingHotShotEvent::SendDABlockData(block), 1); + output.insert( + SequencingHotShotEvent::SendBlockCommitment(block.commit()), + 1, + ); output.insert( SequencingHotShotEvent::DAProposalRecv(da_proposal, pub_key), 1, diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 2e9f75cd09..6501732dd4 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -5,8 +5,12 @@ use std::{ }; use crate::{ - data::{test_srs, VidScheme, VidSchemeTrait, SequencingLeaf}, - traits::{block_contents::Transaction, state::TestableBlock, BlockPayload}, + data::{test_srs, VidScheme, VidSchemeTrait}, + traits::{ + block_contents::{BlockHeader, Transaction}, + state::TestableBlock, + BlockPayload, + }, }; use commit::{Commitment, Committable}; use serde::{Deserialize, Serialize}; @@ -127,10 +131,18 @@ impl BlockPayload for VIDBlockPayload { } /// A [`BlockHeader`] that commits to [`VIDBlockPayload`]. -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct VIDBlockHeader { - /// Previous leaf. - pub previous_leaf: SequencingLeaf, +#[derive(PartialEq, Eq, Hash, Clone, Debug, Deserialize, Serialize)] +pub struct VIDBlockHeader { + /// Block number. + pub block_number: u64, /// VID commitment. - pub commitment: ::Commit, -} \ No newline at end of file + pub commitment: Commitment, +} + +impl BlockHeader for VIDBlockHeader { + type Payload = VIDBlockPayload; + + fn commitment(&self) -> Commitment { + self.commitment + } +} diff --git a/types/src/data.rs b/types/src/data.rs index 66c5ebfe31..50550d685e 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -202,15 +202,12 @@ pub fn test_srs( #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound(deserialize = ""))] pub struct QuorumProposal> { - /// The commitment to append. - pub block_commitment: Commitment, + /// The block header to append + pub block_header: TYPES::BlockHeader, /// CurView from leader when proposing leaf pub view_number: TYPES::Time, - /// Height from leader when proposing leaf - pub height: u64, - /// Per spec, justification pub justify_qc: QuorumCertificate>, diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index ab851745d4..9d8fd8196f 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -50,24 +50,27 @@ pub trait BlockPayload: /// The type of the transitions we are applying type Transaction: Transaction; + // type Header: BlockHeader; + /// returns hashes of all the transactions in this block /// TODO make this ordered with a vec fn contained_transactions(&self) -> HashSet>; } +/// Header of a block, which commits to a [`BlockPayload`]. pub trait BlockHeader: Serialize + Clone + Debug - + Display + // + Display + Hash + PartialEq + Eq + Send + Sync - + Committable + DeserializeOwned { + /// Block payload associated with the commitment. type Payload: BlockPayload; /// Get the payload commitment. @@ -78,7 +81,9 @@ pub trait BlockHeader: pub mod dummy { use std::fmt::Display; - use super::{BlockPayload, Commitment, Committable, Debug, Hash, HashSet, Serialize}; + use super::{ + BlockHeader, BlockPayload, Commitment, Committable, Debug, Hash, HashSet, Serialize, + }; use rand::Rng; use serde::Deserialize; @@ -139,6 +144,16 @@ pub mod dummy { } } + impl BlockHeader for DummyBlock { + type Payload = Self; + + fn commitment(&self) -> commit::Commitment { + commit::RawCommitmentBuilder::new("Dummy BlockPayload Comm") + .u64_field("Nonce", self.nonce) + .finalize() + } + } + impl BlockPayload for DummyBlock { type Error = DummyError; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 7d9c0d0c2d..ceef4f1f8c 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -534,8 +534,10 @@ pub trait CommitteeExchangeType: ConsensusExchange { /// Sign a DA proposal. - fn sign_da_proposal(&self, block_commitment: &Commitment) - -> EncodedSignature; + fn sign_da_proposal( + &self, + block_commitment: &Commitment, + ) -> EncodedSignature; /// Sign a vote on DA proposal. /// diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index a48f3152d3..df322948d2 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -4,7 +4,7 @@ //! describing the overall behavior of a node, as a composition of implementations of the node trait. use super::{ - block_contents::Transaction, + block_contents::{BlockHeader, Transaction}, election::{ CommitteeExchangeType, ConsensusExchange, ElectionConfig, QuorumExchangeType, ViewSyncExchangeType, VoteToken, @@ -546,6 +546,8 @@ pub trait NodeType: /// /// This should be the same `Time` that `StateType::Time` is using. type Time: ConsensusTime; + /// The block header type that this hotshot setup is using. + type BlockHeader: BlockHeader; /// The block type that this hotshot setup is using. /// /// This should be the same block that `StateType::BlockPayload` is using. From e60deb15239bb21229bca1bef5f0cf295b77401f Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 23 Oct 2023 18:40:12 -0700 Subject: [PATCH 0250/1393] add validator structure in config.rs --- orchestrator/src/config.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index a3d15e350f..d2839133e1 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -50,6 +50,20 @@ pub struct WebServerConfig { pub wait_between_polls: Duration, } +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct ValidatorConfig { + /// The validator's public key and stake value + pub public_key: KEY, + /// The validator's private key, should be in the mempool, not public + pub private_key: PRIVATEKEY, + /// The validator's stake + pub stake: u64, + /// The validator's index + pub node_id: u64, + /// The validator's public_key together with its stake value, which can be served as public parameter for key aggregation + pub entry: STAKETABLEENTRY, +} + #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct NetworkConfig { pub rounds: usize, From c83ee243416fbb19fec96d1c7016bab607ebc5dd Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 24 Oct 2023 09:21:03 -0700 Subject: [PATCH 0251/1393] Fix build after merge --- hotshot/src/lib.rs | 2 +- hotshot/src/tasks/mod.rs | 2 +- task-impls/src/consensus.rs | 28 +++++++++++----------------- task-impls/src/vid.rs | 14 +++++++------- types/src/certificate.rs | 8 ++++---- types/src/traits/election.rs | 8 ++++---- types/src/vote.rs | 8 ++++---- 7 files changed, 32 insertions(+), 38 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 7490527571..a970c4f72b 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -670,7 +670,7 @@ where Message, Proposal = VidDisperse, Certificate = VIDCertificate, - Commitment = Commitment, + Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, SequencingTimeoutEx: ConsensusExchange< diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index ffa5eb132e..0420e793ab 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -385,7 +385,7 @@ where TYPES, Message, Certificate = VIDCertificate, - Commitment = Commitment, + Commitment = Commitment, >, { // build the vid task diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 022a26dce4..d670c7f097 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -16,14 +16,13 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, }; use hotshot_types::{ - block_impl::VIDBlockPayload + block_impl::{VIDBlockHeader, VIDBlockPayload}, certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, consensus::{Consensus, View}, data::{LeafType, ProposalType, QuorumProposal, SequencingLeaf}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, traits::{ - block_contents::BlockHeader, consensus_api::SequencingConsensusApi, election::{ConsensusExchange, QuorumExchangeType, SignedCertificate, TimeoutExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent}, @@ -461,7 +460,7 @@ where height: proposal.block_header.block_number, justify_qc: proposal.justify_qc.clone(), parent_commitment, - deltas: Right(self.block_commitment), + deltas: Right(proposal.block_header.commitment), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), @@ -543,7 +542,7 @@ where .is_valid_cert(cert) { // Validate the block commitment for non-genesis DAC. - if !cert.is_genesis() && cert.leaf_commitment() != proposal.block_commitment { + if !cert.is_genesis() && cert.leaf_commitment() != proposal.block_header.commitment { error!("Block commitment does not equal parent commitment"); return false; } @@ -745,10 +744,10 @@ where ); let leaf = SequencingLeaf { view_number: view, - height: proposal.data.height, + height: proposal.data.block_header.block_number, justify_qc: justify_qc.clone(), parent_commitment: justify_qc.leaf_commitment(), - deltas: Right(proposal.data.block_commitment), + deltas: Right(proposal.data.block_header.commitment), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender.to_bytes(), @@ -770,10 +769,10 @@ where let parent_commitment = parent.commit(); let leaf: SequencingLeaf<_> = SequencingLeaf { view_number: view, - height: proposal.data.height, + height: proposal.data.block_header.block_number, justify_qc: justify_qc.clone(), parent_commitment, - deltas: Right(proposal.data.block_commitment), + deltas: Right(proposal.data.block_header.commitment), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender.to_bytes(), @@ -1399,10 +1398,7 @@ where // TODO do some sort of sanity check on the view number that it matches decided } - // let block_commitment = Some(self.block.commit()); - if let Some(block) = &self.block { - let block_commitment = block.commit(); - + if let Some(block_commitment) = &self.block_commitment { let leaf = SequencingLeaf { view_number: view, height: parent_leaf.height + 1, @@ -1410,7 +1406,7 @@ where parent_commitment: parent_leaf.commit(), // Use the block commitment rather than the block, so that the replica can construct // the same leaf with the commitment. - deltas: Right(block_commitment), + deltas: Right(*block_commitment), rejected: vec![], timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.api.public_key().to_bytes(), @@ -1423,9 +1419,8 @@ where let proposal = QuorumProposal { block_header: VIDBlockHeader { block_number: leaf.height, - commitment: block_commitment, + commitment: *block_commitment, }, - block_commitment, view_number: leaf.view_number, justify_qc: consensus.high_qc.clone(), timeout_certificate: timeout_certificate.or_else(|| None), @@ -1441,14 +1436,13 @@ where "Sending proposal for view {:?} \n {:?}", leaf.view_number, "" ); ->>>>>>> develop self.event_stream .publish(SequencingHotShotEvent::QuorumProposalSend( message, self.quorum_exchange.public_key().clone(), )) .await; - self.block = None; + self.block_commitment = None; return true; } debug!("Self block was None"); diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index bde44d7781..cf3589192d 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -53,7 +53,7 @@ pub struct VIDTaskState< TYPES, Message, Certificate = VIDCertificate, - Commitment = Commitment, + Commitment = Commitment, >, { /// The state's api @@ -89,7 +89,7 @@ pub struct VIDVoteCollectionTaskState< TYPES, Message, Certificate = VIDCertificate, - Commitment = Commitment, + Commitment = Commitment, >, { /// the vid exchange @@ -101,7 +101,7 @@ pub struct VIDVoteCollectionTaskState< TYPES, TYPES::Time, TYPES::VoteTokenType, - Commitment, + Commitment, >>::VoteAccumulator, VIDCertificate, >, @@ -120,7 +120,7 @@ where TYPES, Message, Certificate = VIDCertificate, - Commitment = Commitment, + Commitment = Commitment, >, { } @@ -140,7 +140,7 @@ where TYPES, Message, Certificate = VIDCertificate, - Commitment = Commitment, + Commitment = Commitment, >, { match event { @@ -199,7 +199,7 @@ where TYPES, Message, Certificate = VIDCertificate, - Commitment = Commitment, + Commitment = Commitment, >, { /// main task event handler @@ -415,7 +415,7 @@ where TYPES, Message, Certificate = VIDCertificate, - Commitment = Commitment, + Commitment = Commitment, >, { } diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 9f9c36b697..1c415ce5f8 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -51,7 +51,7 @@ pub struct VIDCertificate { pub view_number: TYPES::Time, /// committment to the block - pub block_commitment: Commitment, + pub block_commitment: Commitment, /// Assembled signature for certificate aggregation pub signatures: AssembledSignature, @@ -298,11 +298,11 @@ impl } impl - SignedCertificate> + SignedCertificate> for VIDCertificate { type Vote = VIDVote; - type VoteAccumulator = VIDVoteAccumulator, Self::Vote>; + type VoteAccumulator = VIDVoteAccumulator, Self::Vote>; fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { VIDCertificate { @@ -320,7 +320,7 @@ impl self.signatures.clone() } - fn leaf_commitment(&self) -> Commitment { + fn leaf_commitment(&self) -> Commitment { self.block_commitment } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 5c41378edf..4718e2d033 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -595,7 +595,7 @@ impl< type Certificate = DACertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = Commitment; + type Commitment = Commitment; fn create( entries: Vec<::StakeTableEntry>, @@ -722,7 +722,7 @@ impl< /// Sign a VID proposal. fn sign_vid_proposal( &self, - block_commitment: &Commitment, + block_commitment: &Commitment, ) -> EncodedSignature { let signature = TYPES::SignatureKey::sign(&self.private_key, block_commitment.as_ref()); signature @@ -810,7 +810,7 @@ pub trait QuorumExchangeType, /// Sign a block commitment. fn sign_block_commitment( &self, - block_commitment: Commitment, + block_commitment: Commitment, ) -> EncodedSignature; /// Sign a positive vote on validating or commitment proposal. @@ -914,7 +914,7 @@ impl< fn sign_block_commitment( &self, - block_commitment: Commitment<::BlockType>, + block_commitment: Commitment<::BlockPayload>, ) -> EncodedSignature { TYPES::SignatureKey::sign(&self.private_key, block_commitment.as_ref()) } diff --git a/types/src/vote.rs b/types/src/vote.rs index 6014ce7ea4..2b721819bb 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -66,13 +66,13 @@ pub struct VIDVote { /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), /// The block commitment being voted on. - pub block_commitment: Commitment, + pub block_commitment: Commitment, /// The view this vote was cast for pub current_view: TYPES::Time, /// The vote token generated by this replica pub vote_token: TYPES::VoteTokenType, /// The vote data this vote is signed over - pub vote_data: VoteData>, + pub vote_data: VoteData>, } /// A positive or negative vote on validating or commitment proposal. @@ -255,7 +255,7 @@ impl DAVote { } } -impl VoteType> for VIDVote { +impl VoteType> for VIDVote { fn get_view(&self) -> TYPES::Time { self.current_view } @@ -265,7 +265,7 @@ impl VoteType> for VIDVote< fn get_signature(&self) -> EncodedSignature { self.signature.1.clone() } - fn get_data(&self) -> VoteData> { + fn get_data(&self) -> VoteData> { self.vote_data.clone() } fn get_vote_token(&self) -> ::VoteTokenType { From b3df7b4f4230f6d967cd62a695115e34a05f5a89 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 24 Oct 2023 09:26:25 -0700 Subject: [PATCH 0252/1393] Fix clippy --- hotshot/examples/infra/modDA.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 2a273f22e4..fdec0f8a43 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -21,7 +21,7 @@ use hotshot_orchestrator::{ config::{NetworkConfig, WebServerConfig}, }; use hotshot_task::task::FilterEvent; -use hotshot_types::traits::election::VIDExchange; +use hotshot_types::{block_impl::VIDBlockHeader, traits::election::VIDExchange}; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, @@ -408,7 +408,11 @@ pub struct WebServerDARun< #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType< + Transaction = VIDTransaction, + BlockPayload = VIDBlockPayload, + BlockHeader = VIDBlockHeader, + >, MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, @@ -576,7 +580,11 @@ pub struct Libp2pDARun, MEMBERSHIP #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType< + Transaction = VIDTransaction, + BlockPayload = VIDBlockPayload, + BlockHeader = VIDBlockHeader, + >, MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, @@ -805,7 +813,11 @@ where /// Main entry point for validators pub async fn main_entry_point< - TYPES: NodeType, + TYPES: NodeType< + Transaction = VIDTransaction, + BlockPayload = VIDBlockPayload, + BlockHeader = VIDBlockHeader, + >, MEMBERSHIP: Membership + Debug, DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, From 8b876a089ddfa0d0bca8b9c8e5414d6160b9a922 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 24 Oct 2023 11:13:28 -0700 Subject: [PATCH 0253/1393] Remove a commented-out line --- types/src/traits/block_contents.rs | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 9d8fd8196f..59fd565140 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -59,16 +59,7 @@ pub trait BlockPayload: /// Header of a block, which commits to a [`BlockPayload`]. pub trait BlockHeader: - Serialize - + Clone - + Debug - // + Display - + Hash - + PartialEq - + Eq - + Send - + Sync - + DeserializeOwned + Serialize + Clone + Debug + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned { /// Block payload associated with the commitment. type Payload: BlockPayload; From fe7243cf898b39584b89ee580d6a0b44f4bf4dc7 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 24 Oct 2023 16:53:30 -0700 Subject: [PATCH 0254/1393] add validator config with default value --- hotshot-signature-key/src/bn254/bn254_pub.rs | 1 - orchestrator/src/config.rs | 44 +++++++++++++++----- orchestrator/src/lib.rs | 8 ++-- 3 files changed, 38 insertions(+), 15 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 43fc3a6c43..025d455129 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -27,7 +27,6 @@ pub struct BLSPubKey { pub_key: VerKey, } -// #[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] impl PartialOrd for BLSPubKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.pub_key.to_string(); diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index d2839133e1..37d17117c7 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,4 +1,4 @@ -use hotshot_types::{traits::signature_key::SignatureKey, ExecutionType, HotShotConfig}; +use hotshot_types::{traits::{signature_key::SignatureKey, election::ElectionConfig,}, ExecutionType, HotShotConfig}; use std::{ marker::PhantomData, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -51,21 +51,42 @@ pub struct WebServerConfig { } #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -pub struct ValidatorConfig { +#[serde(bound(deserialize = ""))] +pub struct ValidatorConfig { /// The validator's public key and stake value pub public_key: KEY, /// The validator's private key, should be in the mempool, not public - pub private_key: PRIVATEKEY, + pub private_key: KEY::PrivateKey, /// The validator's stake - pub stake: u64, - /// The validator's index - pub node_id: u64, + pub stake_value: u64, /// The validator's public_key together with its stake value, which can be served as public parameter for key aggregation - pub entry: STAKETABLEENTRY, + pub entry: KEY::StakeTableEntry, +} + +impl ValidatorConfig { + fn generated_from_seed_indexed(seed: [u8; 32], index: u64, stake_value: u64) -> Self { + let (public_key, private_key) = KEY::generated_from_seed_indexed( + seed, + index, + ); + Self { + public_key: public_key.clone(), + private_key: private_key, + stake_value: stake_value, + entry: public_key.get_stake_table_entry(stake_value), + } + } +} + +impl Default for ValidatorConfig { + fn default() -> Self { + Self::generated_from_seed_indexed([0u8; 32], 0, 1) + } } #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -pub struct NetworkConfig { +#[serde(bound(deserialize = ""))] +pub struct NetworkConfig { pub rounds: usize, pub transactions_per_round: usize, pub num_bootrap: usize, @@ -74,6 +95,7 @@ pub struct NetworkConfig { pub propose_max_round_time: Duration, pub node_index: u64, pub seed: [u8; 32], + pub validator_config: ValidatorConfig, pub padding: usize, pub start_delay_seconds: u64, pub key_type_name: String, @@ -85,13 +107,14 @@ pub struct NetworkConfig { _key_type_phantom: PhantomData, } -impl Default for NetworkConfig { +impl Default for NetworkConfig { fn default() -> Self { Self { rounds: default_rounds(), transactions_per_round: default_transactions_per_round(), node_index: 0, seed: [0u8; 32], + validator_config: ValidatorConfig::default(), padding: default_padding(), libp2p_config: None, config: default_config().into(), @@ -137,7 +160,7 @@ fn default_web_server_config() -> Option { None } -impl From for NetworkConfig { +impl From for NetworkConfig { fn from(val: NetworkConfigFile) -> Self { NetworkConfig { rounds: val.rounds, @@ -148,6 +171,7 @@ impl From for NetworkConfig { propose_max_round_time: val.config.propose_max_round_time, propose_min_round_time: val.config.propose_min_round_time, seed: val.seed, + validator_config: ValidatorConfig::generated_from_seed_indexed(val.seed, 0, 1), padding: val.padding, libp2p_config: val.libp2p_config.map(|libp2p_config| Libp2pConfig { num_bootstrap_nodes: val.config.num_bootstrap, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 0c90fdf2d8..728966a42e 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -36,7 +36,7 @@ pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { } #[derive(Default, Clone)] -struct OrchestratorState { +struct OrchestratorState { /// Tracks the latest node index we have generated a configuration for latest_index: u16, /// The network configuration @@ -69,7 +69,7 @@ impl } } -pub trait OrchestratorApi { +pub trait OrchestratorApi { fn post_identity(&mut self, identity: IpAddr) -> Result; fn post_getconfig( &mut self, @@ -83,7 +83,7 @@ pub trait OrchestratorApi { impl OrchestratorApi for OrchestratorState where KEY: serde::Serialize + Clone + SignatureKey, - ELECTION: serde::Serialize + Clone + Send, + ELECTION: serde::Serialize + Clone + Send + ElectionConfig, { fn post_identity(&mut self, identity: IpAddr) -> Result { let node_index = self.latest_index; @@ -194,7 +194,7 @@ where } /// Sets up all API routes -fn define_api() -> Result, ApiError> +fn define_api() -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, ::State: Send + Sync + OrchestratorApi, From a20ec529542e593652e3131971b11f81fcb910bd Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Wed, 25 Oct 2023 08:15:27 -0400 Subject: [PATCH 0255/1393] feat: libp2p byzantine completed --- hotshot/examples/infra/modDA.rs | 1 + .../src/traits/networking/libp2p_network.rs | 71 ++++++++++++++++++- .../src/traits/networking/memory_network.rs | 2 +- libp2p-networking/src/network/node/handle.rs | 18 ++++- types/src/traits/network.rs | 3 + 5 files changed, 88 insertions(+), 7 deletions(-) diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index a06f023159..dfeec6e5b9 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -706,6 +706,7 @@ where // function all_keys, da_keys, + None, ) .await .unwrap(); diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 20e6718955..1c21eca294 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -21,7 +21,7 @@ use hotshot_types::{ network::{ CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, NetworkMsg, TestableChannelImplementation, - TestableNetworkingImplementation, TransmitType, ViewMessage, + TestableNetworkingImplementation, TransmitType, ViewMessage, NetworkReliability, }, node_implementation::NodeType, signature_key::SignatureKey, @@ -113,6 +113,8 @@ struct Libp2pNetworkInner { /// NOTE: supposed to represent a ViewNumber but we /// haven't made that atomic yet and we prefer lock-free latest_seen_view: Arc, + /// reliability_config + reliability_config: Option>> } /// Networking implementation that uses libp2p @@ -233,6 +235,7 @@ where node_id as usize, keys, da, + None, ) .await .unwrap() @@ -281,6 +284,7 @@ impl Libp2pNetwork { // HACK committee_pks: BTreeSet, da_pks: BTreeSet, + reliability_config: Option>>, ) -> Result, NetworkError> { assert!(bootstrap_addrs_len > 4, "Need at least 5 bootstrap nodes"); let network_handle = Arc::new( @@ -336,6 +340,8 @@ impl Libp2pNetwork { // proposals on". We need this because to have consensus info injected we need a working // network already. In the worst case, we send a few lookups we don't need. latest_seen_view: Arc::new(AtomicU64::new(0)), + reliability_config + }), }; @@ -596,6 +602,37 @@ impl ConnectedNetwork for Libp2p .map_err(|_| NetworkError::ShutDown)?; } + // TODO maybe we should lift the metrics mutex up a level and copy the inner pattern + // ask during pair programming + // or maybe channels would be better? + let metrics = self.inner.metrics.clone(); + if let Some(config) = &self.inner.reliability_config { + let handle = self.inner.handle.clone(); + + let serialized_msg = bincode_opts().serialize(&message).context(FailedToSerializeSnafu)?; + let fut = config.read().await.chaos_send_msg( + serialized_msg, + Arc::new(move |msg: Vec| { + let topic_2 = topic.clone(); + let handle_2 = handle.clone(); + let metrics_2 = metrics.clone(); + boxed_sync(async move { + match handle_2.gossip_no_serialize(topic_2, msg).await { + Err(e) => { + metrics_2.message_failed_to_send.add(1); + warn!("Failed to broadcast to libp2p: {:?}", e) + }, + Ok(_) => { + metrics_2.outgoing_direct_message_count.add(1); + + } + } + }) + })); + async_spawn(async move {fut.await}); + return Ok(()); + } + match self.inner.handle.gossip(topic, &message).await { Ok(()) => { self.inner.metrics.outgoing_broadcast_message_count.add(1); @@ -644,13 +681,41 @@ impl ConnectedNetwork for Libp2p } }; + // TODO maybe we should lift the metrics mutex up a level and copy the inner pattern + // ask during pair programming + // or maybe channels would be better? + let metrics = self.inner.metrics.clone(); + if let Some(config) = &self.inner.reliability_config { + let handle = self.inner.handle.clone(); + + let serialized_msg = bincode_opts().serialize(&message).context(FailedToSerializeSnafu)?; + let fut = config.read().await.chaos_send_msg( + serialized_msg, + Arc::new(move |msg: Vec| { + let handle_2 = handle.clone(); + let metrics_2 = metrics.clone(); + boxed_sync(async move { + match handle_2.direct_request_no_serialize(pid, msg).await { + Err(e) => { + metrics_2.message_failed_to_send.add(1); + warn!("Failed to broadcast to libp2p: {:?}", e) + }, + Ok(_) => { + metrics_2.outgoing_direct_message_count.add(1); + + } + } + }) + })); + async_spawn(async move {fut.await}); + return Ok(()); + } + match self.inner.handle.direct_request(pid, &message).await { Ok(()) => { - self.inner.metrics.outgoing_direct_message_count.add(1); Ok(()) } Err(e) => { - self.inner.metrics.message_failed_to_send.add(1); Err(e.into()) } } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 3ea28fe7e6..47edc766ea 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -361,7 +361,7 @@ impl ConnectedNetwork for Memory Arc::new(move |msg: Vec| { let node2 = node.clone(); boxed_sync(async move { - let _res = node2.broadcast_input(msg).await; + let _res = node2.direct_input(msg).await; // NOTE we're dropping metrics here but this is only for testing // purposes. I think that should be okay }) diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 39c763ac31..76bdfdab21 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -452,9 +452,17 @@ impl NetworkNodeHandle { msg: &impl Serialize, ) -> Result<(), NetworkNodeHandleError> { let serialized_msg = bincode_opts().serialize(msg).context(SerializationSnafu)?; + self.direct_request_no_serialize(pid, serialized_msg).await + } + + pub async fn direct_request_no_serialize( + &self, + pid: PeerId, + contents: Vec, + ) -> Result<(), NetworkNodeHandleError> { let req = ClientRequest::DirectRequest { pid, - contents: serialized_msg, + contents, retry_count: 1, }; self.send_request(req).await @@ -474,7 +482,7 @@ impl NetworkNodeHandle { self.send_request(req).await } - /// Forcefully disconnet from a peer + /// Forcefully disconnect from a peer /// # Errors /// If the channel is closed somehow /// Shouldnt' happen. @@ -496,7 +504,11 @@ impl NetworkNodeHandle { msg: &impl Serialize, ) -> Result<(), NetworkNodeHandleError> { let serialized_msg = bincode_opts().serialize(msg).context(SerializationSnafu)?; - let req = ClientRequest::GossipMsg(topic, serialized_msg); + self.gossip_no_serialize(topic, serialized_msg).await + } + + pub async fn gossip_no_serialize(&self, topic: String, msg: Vec) -> Result<(), NetworkNodeHandleError>{ + let req = ClientRequest::GossipMsg(topic, msg); self.send_request(req).await } diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 1729636836..1a6414fd67 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -405,6 +405,9 @@ pub trait NetworkReliability: Debug + Sync + std::marker::Send { /// whether or not to send duplicates /// and whether or not to include noise with the message /// then send the message + /// note: usually self is stored in a rwlock + /// so instead of doing the sending part, we just fiddle with the message + /// then return a future that does the sending and delaying fn chaos_send_msg( &self, msg: Vec, From bfb81654ba3c96b820072ef4170e44b33346ca32 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 25 Oct 2023 20:53:55 -0400 Subject: [PATCH 0256/1393] remove modDA and simplify (#1940) --- hotshot/examples/infra/mod.rs | 914 +++++++++++++++++- hotshot/examples/infra/modDA.rs | 899 ----------------- hotshot/examples/libp2p/multi-validator.rs | 4 +- hotshot/examples/libp2p/orchestrator.rs | 12 +- hotshot/examples/libp2p/types.rs | 2 +- hotshot/examples/libp2p/validator.rs | 4 +- .../examples/web-server-da/multi-validator.rs | 4 +- .../examples/web-server-da/orchestrator.rs | 12 +- hotshot/examples/web-server-da/types.rs | 2 +- hotshot/examples/web-server-da/validator.rs | 4 +- 10 files changed, 898 insertions(+), 959 deletions(-) delete mode 100644 hotshot/examples/infra/modDA.rs diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 06d7d9deb3..ba774d52e8 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -1,20 +1,82 @@ +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use async_lock::RwLock; +use async_trait::async_trait; use clap::Parser; +use futures::StreamExt; +use hotshot::{ + traits::{ + implementations::{ + Libp2pCommChannel, Libp2pNetwork, MemoryStorage, NetworkingMetricsValue, + WebCommChannel, WebServerNetwork, + }, + NodeImplementation, + }, + types::{SignatureKey, SystemContextHandle}, + HotShotType, SystemContext, +}; use hotshot_orchestrator::{ self, - config::{NetworkConfig, NetworkConfigFile}, + client::{OrchestratorClient, ValidatorArgs}, + config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, }; -use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; -use libp2p::{ - identity::{ - ed25519::{Keypair as EdKeypair, SecretKey}, - Keypair, +use hotshot_task::task::FilterEvent; +use hotshot_types::traits::election::VIDExchange; +use hotshot_types::{ + block_impl::{VIDBlockPayload, VIDTransaction}, + certificate::ViewSyncCertificate, + consensus::ConsensusMetricsValue, + data::{QuorumProposal, SequencingLeaf, TestableLeaf}, + event::{Event, EventType}, + message::{Message, SequencingMessage}, + traits::{ + election::{ + CommitteeExchange, ConsensusExchange, Membership, QuorumExchange, ViewSyncExchange, + }, + network::CommunicationChannel, + node_implementation::{ + CommitteeEx, ExchangesType, NodeType, QuorumEx, SequencingExchanges, + }, + state::{ConsensusTime, TestableBlock, TestableState}, }, - multiaddr::{self}, - Multiaddr, + HotShotConfig, }; -use std::{fmt::Debug, fs, net::IpAddr, str::FromStr}; - -// ORCHESTRATOR +use libp2p_identity::{ + ed25519::{self, SecretKey}, + Keypair, +}; +use libp2p_networking::{ + network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}, + reexport::Multiaddr, +}; +use rand::rngs::StdRng; +use rand::SeedableRng; +use std::{collections::BTreeSet, sync::Arc}; +use std::{num::NonZeroUsize, str::FromStr}; +// use libp2p::{ +// identity::{ +// ed25519::{Keypair as EdKeypair, SecretKey}, +// Keypair, +// }, +// multiaddr::{self, Protocol}, +// Multiaddr, +// }; +use libp2p_identity::PeerId; +// use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}; +use std::{fmt::Debug, net::Ipv4Addr}; +use std::{ + //collections::{BTreeSet, VecDeque}, + fs, + mem, + net::IpAddr, + //num::NonZeroUsize, + //str::FromStr, + //sync::Arc, + //time::{Duration, Instant}, + time::Instant, +}; +//use surf_disco::error::ClientError; +//use surf_disco::Client; +use tracing::{debug, error, info, warn}; #[derive(Parser, Debug, Clone)] #[command( @@ -69,29 +131,821 @@ pub fn load_config_from_file( config } +/// Runs the orchestrator +pub async fn run_orchestrator< + TYPES: NodeType, + MEMBERSHIP: Membership + Debug, + DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + NODE: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + Exchanges = SequencingExchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + SequencingLeaf, + QuorumProposal>, + MEMBERSHIP, + QUORUMNETWORK, + Message, + >, + CommitteeExchange>, + ViewSyncExchange< + TYPES, + ViewSyncCertificate, + MEMBERSHIP, + VIEWSYNCNETWORK, + Message, + >, + VIDExchange>, + >, + Storage = MemoryStorage>, + ConsensusMessage = SequencingMessage, + >, +>( + OrchestratorArgs { + host, + port, + config_file, + }: OrchestratorArgs, +) { + error!("Starting orchestrator",); + let run_config = load_config_from_file::(config_file); + let _result = hotshot_orchestrator::run_orchestrator::< + TYPES::SignatureKey, + TYPES::ElectionConfigType, + >(run_config, host, port) + .await; +} + +/// Helper function to calculate the nuymber of transactions to send per node per round +fn calculate_num_tx_per_round( + node_index: u64, + total_num_nodes: usize, + transactions_per_round: usize, +) -> usize { + if node_index == 0 { + transactions_per_round / total_num_nodes + transactions_per_round % total_num_nodes + } else { + transactions_per_round / total_num_nodes + } +} + +/// Defines the behavior of a "run" of the network with a given configuration +#[async_trait] +pub trait RunDA< + TYPES: NodeType, + MEMBERSHIP: Membership + Debug, + DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + NODE: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + Exchanges = SequencingExchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + SequencingLeaf, + QuorumProposal>, + MEMBERSHIP, + QUORUMNETWORK, + Message, + >, + CommitteeExchange>, + ViewSyncExchange< + TYPES, + ViewSyncCertificate, + MEMBERSHIP, + VIEWSYNCNETWORK, + Message, + >, + VIDExchange>, + >, + Storage = MemoryStorage>, + ConsensusMessage = SequencingMessage, + >, +> where + ::StateType: TestableState, + ::BlockType: TestableBlock, + SequencingLeaf: TestableLeaf, + Self: Sync, + SystemContext: HotShotType, +{ + /// Initializes networking, returns self + async fn initialize_networking( + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + ) -> Self; + + /// Initializes the genesis state and HotShot instance; does not start HotShot consensus + /// # Panics if it cannot generate a genesis block, fails to initialize HotShot, or cannot + /// get the anchored view + /// Note: sequencing leaf does not have state, so does not return state + async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { + let genesis_block = TYPES::BlockType::genesis(); + let initializer = + hotshot::HotShotInitializer::>::from_genesis( + genesis_block, + ) + .expect("Couldn't generate genesis block"); + + let config = self.get_config(); + + // Get KeyPair for certificate Aggregation + let (pk, sk) = + TYPES::SignatureKey::generated_from_seed_indexed(config.seed, config.node_index); + let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); + let entry = pk.get_stake_table_entry(1u64); + + let da_network = self.get_da_network(); + let quorum_network = self.get_quorum_network(); + let view_sync_network = self.get_view_sync_network(); + let vid_network = self.get_vid_network(); + + // Since we do not currently pass the election config type in the NetworkConfig, this will always be the default election config + let quorum_election_config = config.config.election_config.clone().unwrap_or_else(|| { + as ConsensusExchange< + TYPES, + Message, + >>::Membership::default_election_config(config.config.total_nodes.get() as u64) + }); + + let committee_election_config = as ConsensusExchange< + TYPES, + Message, + >>::Membership::default_election_config( + config.config.da_committee_size.try_into().unwrap(), + ); + + let exchanges = NODE::Exchanges::create( + known_nodes_with_stake.clone(), + (quorum_election_config, committee_election_config), + ( + quorum_network.clone(), + da_network.clone(), + view_sync_network.clone(), + vid_network.clone(), + ), + pk.clone(), + entry.clone(), + sk.clone(), + ); + + SystemContext::init( + pk, + sk, + config.node_index, + config.config, + MemoryStorage::empty(), + exchanges, + initializer, + ConsensusMetricsValue::new(), + ) + .await + .expect("Could not init hotshot") + .0 + } + + /// Starts HotShot consensus, returns when consensus has finished + async fn run_hotshot(&self, mut context: SystemContextHandle) { + let NetworkConfig { + padding, + rounds, + transactions_per_round, + node_index, + config: HotShotConfig { total_nodes, .. }, + .. + } = self.get_config(); + + let size = mem::size_of::(); + let padding = padding.saturating_sub(size); + let mut txn_rng = StdRng::seed_from_u64(node_index); + + debug!("Adjusted padding size is {:?} bytes", padding); + + let mut total_transactions_committed = 0; + let mut total_transactions_sent = 0; + let transactions_to_send_per_round = + calculate_num_tx_per_round(node_index, total_nodes.get(), transactions_per_round); + + info!("Starting hotshot!"); + let start = Instant::now(); + + let (mut event_stream, _streamid) = context.get_event_stream(FilterEvent::default()).await; + let mut anchor_view: TYPES::Time = ::genesis(); + let mut num_successful_commits = 0; + + context.hotshot.start_consensus().await; + + loop { + match event_stream.next().await { + None => { + panic!("Error! Event stream completed before consensus ended."); + } + Some(Event { event, .. }) => { + match event { + EventType::Error { error } => { + error!("Error in consensus: {:?}", error); + // TODO what to do here + } + EventType::Decide { + leaf_chain, + qc: _, + block_size, + } => { + // this might be a obob + if let Some(leaf) = leaf_chain.get(0) { + info!("Decide event for leaf: {}", *leaf.view_number); + + let new_anchor = leaf.view_number; + if new_anchor >= anchor_view { + anchor_view = leaf.view_number; + } + } + + // send transactions + for _ in 0..transactions_to_send_per_round { + let txn = + <::StateType as TestableState>::create_random_transaction( + None, + &mut txn_rng, + padding as u64, + ); + _ = context.submit_transaction(txn).await.unwrap(); + total_transactions_sent += 1; + } + + if let Some(size) = block_size { + total_transactions_committed += size; + } + + num_successful_commits += leaf_chain.len(); + if num_successful_commits >= rounds { + break; + } + + if leaf_chain.len() > 1 { + warn!("Leaf chain is greater than 1 with len {}", leaf_chain.len()); + } + // when we make progress, submit new events + } + EventType::ReplicaViewTimeout { view_number } => { + warn!("Timed out as a replicas in view {:?}", view_number); + } + EventType::NextLeaderViewTimeout { view_number } => { + warn!("Timed out as the next leader in view {:?}", view_number); + } + EventType::ViewFinished { view_number: _ } => {} + _ => unimplemented!(), + } + } + } + } + + // Output run results + let total_time_elapsed = start.elapsed(); + error!("[{node_index}]: {rounds} rounds completed in {total_time_elapsed:?} - Total transactions sent: {total_transactions_sent} - Total transactions committed: {total_transactions_committed} - Total commitments: {num_successful_commits}"); + } + + /// Returns the da network for this run + fn get_da_network(&self) -> DANETWORK; + + /// Returns the quorum network for this run + fn get_quorum_network(&self) -> QUORUMNETWORK; + + ///Returns view sync network for this run + fn get_view_sync_network(&self) -> VIEWSYNCNETWORK; + + ///Returns VID network for this run + fn get_vid_network(&self) -> VIDNETWORK; + + /// Returns the config for this run + fn get_config( + &self, + ) -> NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >; +} + +// WEB SERVER + +/// Represents a web server-based run +pub struct WebServerDARun< + TYPES: NodeType, + I: NodeImplementation, + MEMBERSHIP: Membership, +> { + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + quorum_network: WebCommChannel, + da_network: WebCommChannel, + view_sync_network: WebCommChannel, + vid_network: WebCommChannel, +} + +#[async_trait] +impl< + TYPES: NodeType, + MEMBERSHIP: Membership + Debug, + NODE: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + Exchanges = SequencingExchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + SequencingLeaf, + QuorumProposal>, + MEMBERSHIP, + WebCommChannel, + Message, + >, + CommitteeExchange< + TYPES, + MEMBERSHIP, + WebCommChannel, + Message, + >, + ViewSyncExchange< + TYPES, + ViewSyncCertificate, + MEMBERSHIP, + WebCommChannel, + Message, + >, + VIDExchange< + TYPES, + MEMBERSHIP, + WebCommChannel, + Message, + >, + >, + Storage = MemoryStorage>, + ConsensusMessage = SequencingMessage, + >, + > + RunDA< + TYPES, + MEMBERSHIP, + WebCommChannel, + WebCommChannel, + WebCommChannel, + WebCommChannel, + NODE, + > for WebServerDARun +where + ::StateType: TestableState, + ::BlockType: TestableBlock, + SequencingLeaf: TestableLeaf, + Self: Sync, +{ + async fn initialize_networking( + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + ) -> WebServerDARun { + // Generate our own key + let (pub_key, _priv_key) = + <::SignatureKey as SignatureKey>::generated_from_seed_indexed( + config.seed, + config.node_index, + ); + + // Get the configuration for the web server + let WebServerConfig { + host, + port, + wait_between_polls, + }: WebServerConfig = config.clone().web_server_config.unwrap(); + + let underlying_quorum_network = WebServerNetwork::create( + &host.to_string(), + port, + wait_between_polls, + pub_key.clone(), + false, + ); + + // Create the network + let quorum_network: WebCommChannel = + WebCommChannel::new(underlying_quorum_network.clone().into()); + + let view_sync_network: WebCommChannel = + WebCommChannel::new(underlying_quorum_network.into()); + + let WebServerConfig { + host, + port, + wait_between_polls, + }: WebServerConfig = config.clone().da_web_server_config.unwrap(); + + // Each node runs the DA network so that leaders have access to transactions and DA votes + let da_network: WebCommChannel = WebCommChannel::new( + WebServerNetwork::create( + &host.to_string(), + port, + wait_between_polls, + pub_key.clone(), + true, + ) + .into(), + ); + + let vid_network: WebCommChannel = WebCommChannel::new( + WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true) + .into(), + ); + + WebServerDARun { + config, + quorum_network, + da_network, + view_sync_network, + vid_network, + } + } + + fn get_da_network(&self) -> WebCommChannel { + self.da_network.clone() + } + + fn get_quorum_network(&self) -> WebCommChannel { + self.quorum_network.clone() + } + + fn get_view_sync_network(&self) -> WebCommChannel { + self.view_sync_network.clone() + } + + fn get_vid_network(&self) -> WebCommChannel { + self.vid_network.clone() + } + + fn get_config( + &self, + ) -> NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + > { + self.config.clone() + } +} + +// Libp2p + +/// Represents a libp2p-based run +pub struct Libp2pDARun, MEMBERSHIP: Membership> +{ + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + quorum_network: Libp2pCommChannel, + da_network: Libp2pCommChannel, + view_sync_network: Libp2pCommChannel, + vid_network: Libp2pCommChannel, +} + +#[async_trait] +impl< + TYPES: NodeType, + MEMBERSHIP: Membership + Debug, + NODE: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + Exchanges = SequencingExchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + SequencingLeaf, + QuorumProposal>, + MEMBERSHIP, + Libp2pCommChannel, + Message, + >, + CommitteeExchange< + TYPES, + MEMBERSHIP, + Libp2pCommChannel, + Message, + >, + ViewSyncExchange< + TYPES, + ViewSyncCertificate, + MEMBERSHIP, + Libp2pCommChannel, + Message, + >, + VIDExchange< + TYPES, + MEMBERSHIP, + Libp2pCommChannel, + Message, + >, + >, + Storage = MemoryStorage>, + ConsensusMessage = SequencingMessage, + >, + > + RunDA< + TYPES, + MEMBERSHIP, + Libp2pCommChannel, + Libp2pCommChannel, + Libp2pCommChannel, + Libp2pCommChannel, + NODE, + > for Libp2pDARun +where + ::StateType: TestableState, + ::BlockType: TestableBlock, + SequencingLeaf: TestableLeaf, + Self: Sync, +{ + async fn initialize_networking( + config: NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + >, + ) -> Libp2pDARun { + let (pubkey, _privkey) = + <::SignatureKey as SignatureKey>::generated_from_seed_indexed( + config.seed, + config.node_index, + ); + let mut config = config; + let libp2p_config = config + .libp2p_config + .take() + .expect("Configuration is not for a Libp2p network"); + let bs_len = libp2p_config.bootstrap_nodes.len(); + let bootstrap_nodes: Vec<(PeerId, Multiaddr)> = libp2p_config + .bootstrap_nodes + .iter() + .map(|(addr, pair)| { + let kp = Keypair::from_protobuf_encoding(pair).unwrap(); + let peer_id = PeerId::from_public_key(&kp.public()); + let multiaddr = + Multiaddr::from_str(&format!("/ip4/{}/udp/{}/quic-v1", addr.ip(), addr.port())) + .unwrap(); + (peer_id, multiaddr) + }) + .collect(); + let identity = libp2p_generate_indexed_identity(config.seed, config.node_index); + let node_type = if (config.node_index as usize) < bs_len { + NetworkNodeType::Bootstrap + } else { + NetworkNodeType::Regular + }; + let node_index = config.node_index; + let port_index = match libp2p_config.index_ports { + true => node_index, + false => 0, + }; + let bound_addr: Multiaddr = format!( + "/{}/{}/udp/{}/quic-v1", + if libp2p_config.public_ip.is_ipv4() { + "ip4" + } else { + "ip6" + }, + libp2p_config.public_ip, + libp2p_config.base_port as u64 + port_index + ) + .parse() + .unwrap(); + + // generate network + let mut config_builder = NetworkNodeConfigBuilder::default(); + assert!(config.config.total_nodes.get() > 2); + let replicated_nodes = NonZeroUsize::new(config.config.total_nodes.get() - 2).unwrap(); + config_builder.replication_factor(replicated_nodes); + config_builder.identity(identity.clone()); + + config_builder.bound_addr(Some(bound_addr.clone())); + + let to_connect_addrs = bootstrap_nodes + .iter() + .map(|(peer_id, multiaddr)| (Some(*peer_id), multiaddr.clone())) + .collect(); + + config_builder.to_connect_addrs(to_connect_addrs); + + let mesh_params = + // NOTE I'm arbitrarily choosing these. + match node_type { + NetworkNodeType::Bootstrap => MeshParams { + mesh_n_high: libp2p_config.bootstrap_mesh_n_high, + mesh_n_low: libp2p_config.bootstrap_mesh_n_low, + mesh_outbound_min: libp2p_config.bootstrap_mesh_outbound_min, + mesh_n: libp2p_config.bootstrap_mesh_n, + }, + NetworkNodeType::Regular => MeshParams { + mesh_n_high: libp2p_config.mesh_n_high, + mesh_n_low: libp2p_config.mesh_n_low, + mesh_outbound_min: libp2p_config.mesh_outbound_min, + mesh_n: libp2p_config.mesh_n, + }, + NetworkNodeType::Conductor => unreachable!(), + }; + config_builder.mesh_params(Some(mesh_params)); + + let mut all_keys = BTreeSet::new(); + let mut da_keys = BTreeSet::new(); + for i in 0..config.config.total_nodes.get() as u64 { + let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; + let pubkey = TYPES::SignatureKey::from_private(&privkey); + if i < config.config.da_committee_size as u64 { + da_keys.insert(pubkey.clone()); + } + all_keys.insert(pubkey); + } + + let node_config = config_builder.build().unwrap(); + let underlying_quorum_network = Libp2pNetwork::new( + NetworkingMetricsValue::new(), + node_config, + pubkey.clone(), + Arc::new(RwLock::new( + bootstrap_nodes + .iter() + .map(|(peer_id, addr)| (Some(*peer_id), addr.clone())) + .collect(), + )), + bs_len, + config.node_index as usize, + // NOTE: this introduces an invariant that the keys are assigned using this indexed + // function + all_keys, + da_keys, + ) + .await + .unwrap(); + + underlying_quorum_network.wait_for_ready().await; + + // Create the network + let quorum_network: Libp2pCommChannel = + Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + + let view_sync_network: Libp2pCommChannel = + Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + + let da_network: Libp2pCommChannel = + Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + + let vid_network: Libp2pCommChannel = + Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + + Libp2pDARun { + config, + quorum_network, + da_network, + view_sync_network, + vid_network, + } + } + + fn get_da_network(&self) -> Libp2pCommChannel { + self.da_network.clone() + } + + fn get_quorum_network(&self) -> Libp2pCommChannel { + self.quorum_network.clone() + } + + fn get_view_sync_network(&self) -> Libp2pCommChannel { + self.view_sync_network.clone() + } + + fn get_vid_network(&self) -> Libp2pCommChannel { + self.vid_network.clone() + } + + fn get_config( + &self, + ) -> NetworkConfig< + TYPES::SignatureKey, + ::StakeTableEntry, + TYPES::ElectionConfigType, + > { + self.config.clone() + } +} + +/// Main entry point for validators +pub async fn main_entry_point< + TYPES: NodeType, + MEMBERSHIP: Membership + Debug, + DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + NODE: NodeImplementation< + TYPES, + Leaf = SequencingLeaf, + Exchanges = SequencingExchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + SequencingLeaf, + QuorumProposal>, + MEMBERSHIP, + QUORUMNETWORK, + Message, + >, + CommitteeExchange>, + ViewSyncExchange< + TYPES, + ViewSyncCertificate, + MEMBERSHIP, + VIEWSYNCNETWORK, + Message, + >, + VIDExchange>, + >, + Storage = MemoryStorage>, + ConsensusMessage = SequencingMessage, + >, + RUNDA: RunDA, +>( + args: ValidatorArgs, +) where + ::StateType: TestableState, + ::BlockType: TestableBlock, + SequencingLeaf: TestableLeaf, +{ + setup_logging(); + setup_backtrace(); + + info!("Starting validator"); + + let orchestrator_client: OrchestratorClient = + OrchestratorClient::connect_to_orchestrator(args.clone()).await; + + // Identify with the orchestrator + let public_ip = match args.public_ip { + Some(ip) => ip, + None => IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + }; + info!( + "Identifying with orchestrator using IP address {}", + public_ip.to_string() + ); + let node_index: u16 = orchestrator_client + .identify_with_orchestrator(public_ip.to_string()) + .await; + info!("Finished identifying; our node index is {node_index}"); + info!("Getting config from orchestrator"); + + let mut run_config = orchestrator_client + .get_config_from_orchestrator::(node_index) + .await; + + run_config.node_index = node_index.into(); + //run_config.libp2p_config.as_mut().unwrap().public_ip = args.public_ip.unwrap(); + + info!("Initializing networking"); + let run = RUNDA::initialize_networking(run_config.clone()).await; + let hotshot = run.initialize_state_and_hotshot().await; + + info!("Waiting for start command from orchestrator"); + orchestrator_client + .wait_for_all_nodes_ready(run_config.clone().node_index) + .await; + + info!("All nodes are ready! Starting HotShot"); + run.run_hotshot(hotshot).await; +} + pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { let mut hasher = blake3::Hasher::new(); hasher.update(&seed); hasher.update(&index.to_le_bytes()); let new_seed = *hasher.finalize().as_bytes(); let sk_bytes = SecretKey::try_from_bytes(new_seed).unwrap(); - >::from(sk_bytes).into() -} - -/// libp2p helper function -/// convert node string into multi addr -/// node string of the form: "$IP:$PORT" -pub fn parse_dns(s: &str) -> Result { - let mut i = s.split(':'); - let ip = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; - let port = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; - Multiaddr::from_str(&format!("/dns/{ip}/udp/{port}/quic-v1")) -} - -/// libp2p helper function -pub fn parse_ip(s: &str) -> Result { - let mut i = s.split(':'); - let ip = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; - let port = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; - Multiaddr::from_str(&format!("/ip4/{ip}/udp/{port}/quic-v1")) + >::from(sk_bytes).into() } diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs deleted file mode 100644 index 5e50c1d44f..0000000000 --- a/hotshot/examples/infra/modDA.rs +++ /dev/null @@ -1,899 +0,0 @@ -use crate::infra::{load_config_from_file, OrchestratorArgs}; - -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use async_lock::RwLock; -use async_trait::async_trait; -use futures::StreamExt; -use hotshot::{ - traits::{ - implementations::{ - Libp2pCommChannel, Libp2pNetwork, MemoryStorage, NetworkingMetricsValue, - WebCommChannel, WebServerNetwork, - }, - NodeImplementation, - }, - types::{SignatureKey, SystemContextHandle}, - HotShotType, SystemContext, -}; -use hotshot_orchestrator::{ - self, - client::{OrchestratorClient, ValidatorArgs}, - config::{NetworkConfig, WebServerConfig}, -}; -use hotshot_task::task::FilterEvent; -use hotshot_types::traits::election::VIDExchange; -use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction}, - certificate::ViewSyncCertificate, - consensus::ConsensusMetricsValue, - data::{QuorumProposal, SequencingLeaf, TestableLeaf}, - event::{Event, EventType}, - message::{Message, SequencingMessage}, - traits::{ - election::{ - CommitteeExchange, ConsensusExchange, Membership, QuorumExchange, ViewSyncExchange, - }, - network::CommunicationChannel, - node_implementation::{ - CommitteeEx, ExchangesType, NodeType, QuorumEx, SequencingExchanges, - }, - state::{ConsensusTime, TestableBlock, TestableState}, - }, - HotShotConfig, -}; -use libp2p_identity::{ - ed25519::{self, SecretKey}, - Keypair, -}; -use libp2p_networking::{ - network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}, - reexport::Multiaddr, -}; -use rand::rngs::StdRng; -use rand::SeedableRng; -use std::{collections::BTreeSet, sync::Arc}; -use std::{num::NonZeroUsize, str::FromStr}; -// use libp2p::{ -// identity::{ -// ed25519::{Keypair as EdKeypair, SecretKey}, -// Keypair, -// }, -// multiaddr::{self, Protocol}, -// Multiaddr, -// }; -use libp2p_identity::PeerId; -// use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}; -use std::{fmt::Debug, net::Ipv4Addr}; -use std::{ - //collections::{BTreeSet, VecDeque}, - //fs, - mem, - net::IpAddr, - //num::NonZeroUsize, - //str::FromStr, - //sync::Arc, - //time::{Duration, Instant}, - time::Instant, -}; -//use surf_disco::error::ClientError; -//use surf_disco::Client; -use tracing::{debug, error, info, warn}; - -/// Runs the orchestrator -pub async fn run_orchestrator_da< - TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - NODE: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< - TYPES, - Message, - QuorumExchange< - TYPES, - SequencingLeaf, - QuorumProposal>, - MEMBERSHIP, - QUORUMNETWORK, - Message, - >, - CommitteeExchange>, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - VIEWSYNCNETWORK, - Message, - >, - VIDExchange>, - >, - Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, - >, ->( - OrchestratorArgs { - host, - port, - config_file, - }: OrchestratorArgs, -) { - error!("Starting orchestrator",); - let run_config = load_config_from_file::(config_file); - let _result = hotshot_orchestrator::run_orchestrator::< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >(run_config, host, port) - .await; -} - -/// Helper function to calculate the nuymber of transactions to send per node per round -fn calculate_num_tx_per_round( - node_index: u64, - total_num_nodes: usize, - transactions_per_round: usize, -) -> usize { - if node_index == 0 { - transactions_per_round / total_num_nodes + transactions_per_round % total_num_nodes - } else { - transactions_per_round / total_num_nodes - } -} - -/// Defines the behavior of a "run" of the network with a given configuration -#[async_trait] -pub trait RunDA< - TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - NODE: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< - TYPES, - Message, - QuorumExchange< - TYPES, - SequencingLeaf, - QuorumProposal>, - MEMBERSHIP, - QUORUMNETWORK, - Message, - >, - CommitteeExchange>, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - VIEWSYNCNETWORK, - Message, - >, - VIDExchange>, - >, - Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, - >, -> where - ::StateType: TestableState, - ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, - Self: Sync, - SystemContext: HotShotType, -{ - /// Initializes networking, returns self - async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, - ) -> Self; - - /// Initializes the genesis state and HotShot instance; does not start HotShot consensus - /// # Panics if it cannot generate a genesis block, fails to initialize HotShot, or cannot - /// get the anchored view - /// Note: sequencing leaf does not have state, so does not return state - async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { - let genesis_block = TYPES::BlockType::genesis(); - let initializer = - hotshot::HotShotInitializer::>::from_genesis( - genesis_block, - ) - .expect("Couldn't generate genesis block"); - - let config = self.get_config(); - - // Get KeyPair for certificate Aggregation - let (pk, sk) = - TYPES::SignatureKey::generated_from_seed_indexed(config.seed, config.node_index); - let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); - let entry = pk.get_stake_table_entry(1u64); - - let da_network = self.get_da_network(); - let quorum_network = self.get_quorum_network(); - let view_sync_network = self.get_view_sync_network(); - let vid_network = self.get_vid_network(); - - // Since we do not currently pass the election config type in the NetworkConfig, this will always be the default election config - let quorum_election_config = config.config.election_config.clone().unwrap_or_else(|| { - as ConsensusExchange< - TYPES, - Message, - >>::Membership::default_election_config(config.config.total_nodes.get() as u64) - }); - - let committee_election_config = as ConsensusExchange< - TYPES, - Message, - >>::Membership::default_election_config( - config.config.da_committee_size.try_into().unwrap(), - ); - - let exchanges = NODE::Exchanges::create( - known_nodes_with_stake.clone(), - (quorum_election_config, committee_election_config), - ( - quorum_network.clone(), - da_network.clone(), - view_sync_network.clone(), - vid_network.clone(), - ), - pk.clone(), - entry.clone(), - sk.clone(), - ); - - SystemContext::init( - pk, - sk, - config.node_index, - config.config, - MemoryStorage::empty(), - exchanges, - initializer, - ConsensusMetricsValue::new(), - ) - .await - .expect("Could not init hotshot") - .0 - } - - /// Starts HotShot consensus, returns when consensus has finished - async fn run_hotshot(&self, mut context: SystemContextHandle) { - let NetworkConfig { - padding, - rounds, - transactions_per_round, - node_index, - config: HotShotConfig { total_nodes, .. }, - .. - } = self.get_config(); - - let size = mem::size_of::(); - let padding = padding.saturating_sub(size); - let mut txn_rng = StdRng::seed_from_u64(node_index); - - debug!("Adjusted padding size is {:?} bytes", padding); - - let mut total_transactions_committed = 0; - let mut total_transactions_sent = 0; - let transactions_to_send_per_round = - calculate_num_tx_per_round(node_index, total_nodes.get(), transactions_per_round); - - info!("Starting hotshot!"); - let start = Instant::now(); - - let (mut event_stream, _streamid) = context.get_event_stream(FilterEvent::default()).await; - let mut anchor_view: TYPES::Time = ::genesis(); - let mut num_successful_commits = 0; - - context.hotshot.start_consensus().await; - - loop { - match event_stream.next().await { - None => { - panic!("Error! Event stream completed before consensus ended."); - } - Some(Event { event, .. }) => { - match event { - EventType::Error { error } => { - error!("Error in consensus: {:?}", error); - // TODO what to do here - } - EventType::Decide { - leaf_chain, - qc: _, - block_size, - } => { - // this might be a obob - if let Some(leaf) = leaf_chain.get(0) { - info!("Decide event for leaf: {}", *leaf.view_number); - - let new_anchor = leaf.view_number; - if new_anchor >= anchor_view { - anchor_view = leaf.view_number; - } - } - - // send transactions - for _ in 0..transactions_to_send_per_round { - let txn = - <::StateType as TestableState>::create_random_transaction( - None, - &mut txn_rng, - padding as u64, - ); - _ = context.submit_transaction(txn).await.unwrap(); - total_transactions_sent += 1; - } - - if let Some(size) = block_size { - total_transactions_committed += size; - } - - num_successful_commits += leaf_chain.len(); - if num_successful_commits >= rounds { - break; - } - - if leaf_chain.len() > 1 { - warn!("Leaf chain is greater than 1 with len {}", leaf_chain.len()); - } - // when we make progress, submit new events - } - EventType::ReplicaViewTimeout { view_number } => { - warn!("Timed out as a replicas in view {:?}", view_number); - } - EventType::NextLeaderViewTimeout { view_number } => { - warn!("Timed out as the next leader in view {:?}", view_number); - } - EventType::ViewFinished { view_number: _ } => {} - _ => unimplemented!(), - } - } - } - } - - // Output run results - let total_time_elapsed = start.elapsed(); - error!("[{node_index}]: {rounds} rounds completed in {total_time_elapsed:?} - Total transactions sent: {total_transactions_sent} - Total transactions committed: {total_transactions_committed} - Total commitments: {num_successful_commits}"); - } - - /// Returns the da network for this run - fn get_da_network(&self) -> DANETWORK; - - /// Returns the quorum network for this run - fn get_quorum_network(&self) -> QUORUMNETWORK; - - ///Returns view sync network for this run - fn get_view_sync_network(&self) -> VIEWSYNCNETWORK; - - ///Returns VID network for this run - fn get_vid_network(&self) -> VIDNETWORK; - - /// Returns the config for this run - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >; -} - -// WEB SERVER - -/// Represents a web server-based run -pub struct WebServerDARun< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, -> { - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, - quorum_network: WebCommChannel, - da_network: WebCommChannel, - view_sync_network: WebCommChannel, - vid_network: WebCommChannel, -} - -#[async_trait] -impl< - TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - NODE: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< - TYPES, - Message, - QuorumExchange< - TYPES, - SequencingLeaf, - QuorumProposal>, - MEMBERSHIP, - WebCommChannel, - Message, - >, - CommitteeExchange< - TYPES, - MEMBERSHIP, - WebCommChannel, - Message, - >, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - WebCommChannel, - Message, - >, - VIDExchange< - TYPES, - MEMBERSHIP, - WebCommChannel, - Message, - >, - >, - Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, - >, - > - RunDA< - TYPES, - MEMBERSHIP, - WebCommChannel, - WebCommChannel, - WebCommChannel, - WebCommChannel, - NODE, - > for WebServerDARun -where - ::StateType: TestableState, - ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, - Self: Sync, -{ - async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, - ) -> WebServerDARun { - // Generate our own key - let (pub_key, _priv_key) = - <::SignatureKey as SignatureKey>::generated_from_seed_indexed( - config.seed, - config.node_index, - ); - - // Get the configuration for the web server - let WebServerConfig { - host, - port, - wait_between_polls, - }: WebServerConfig = config.clone().web_server_config.unwrap(); - - let underlying_quorum_network = WebServerNetwork::create( - &host.to_string(), - port, - wait_between_polls, - pub_key.clone(), - false, - ); - - // Create the network - let quorum_network: WebCommChannel = - WebCommChannel::new(underlying_quorum_network.clone().into()); - - let view_sync_network: WebCommChannel = - WebCommChannel::new(underlying_quorum_network.into()); - - let WebServerConfig { - host, - port, - wait_between_polls, - }: WebServerConfig = config.clone().da_web_server_config.unwrap(); - - // Each node runs the DA network so that leaders have access to transactions and DA votes - let da_network: WebCommChannel = WebCommChannel::new( - WebServerNetwork::create( - &host.to_string(), - port, - wait_between_polls, - pub_key.clone(), - true, - ) - .into(), - ); - - let vid_network: WebCommChannel = WebCommChannel::new( - WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true) - .into(), - ); - - WebServerDARun { - config, - quorum_network, - da_network, - view_sync_network, - vid_network, - } - } - - fn get_da_network(&self) -> WebCommChannel { - self.da_network.clone() - } - - fn get_quorum_network(&self) -> WebCommChannel { - self.quorum_network.clone() - } - - fn get_view_sync_network(&self) -> WebCommChannel { - self.view_sync_network.clone() - } - - fn get_vid_network(&self) -> WebCommChannel { - self.vid_network.clone() - } - - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - > { - self.config.clone() - } -} - -// Libp2p - -/// Represents a libp2p-based run -pub struct Libp2pDARun, MEMBERSHIP: Membership> -{ - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, - quorum_network: Libp2pCommChannel, - da_network: Libp2pCommChannel, - view_sync_network: Libp2pCommChannel, - vid_network: Libp2pCommChannel, -} - -#[async_trait] -impl< - TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - NODE: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< - TYPES, - Message, - QuorumExchange< - TYPES, - SequencingLeaf, - QuorumProposal>, - MEMBERSHIP, - Libp2pCommChannel, - Message, - >, - CommitteeExchange< - TYPES, - MEMBERSHIP, - Libp2pCommChannel, - Message, - >, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - Libp2pCommChannel, - Message, - >, - VIDExchange< - TYPES, - MEMBERSHIP, - Libp2pCommChannel, - Message, - >, - >, - Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, - >, - > - RunDA< - TYPES, - MEMBERSHIP, - Libp2pCommChannel, - Libp2pCommChannel, - Libp2pCommChannel, - Libp2pCommChannel, - NODE, - > for Libp2pDARun -where - ::StateType: TestableState, - ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, - Self: Sync, -{ - async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, - ) -> Libp2pDARun { - let (pubkey, _privkey) = - <::SignatureKey as SignatureKey>::generated_from_seed_indexed( - config.seed, - config.node_index, - ); - let mut config = config; - let libp2p_config = config - .libp2p_config - .take() - .expect("Configuration is not for a Libp2p network"); - let bs_len = libp2p_config.bootstrap_nodes.len(); - let bootstrap_nodes: Vec<(PeerId, Multiaddr)> = libp2p_config - .bootstrap_nodes - .iter() - .map(|(addr, pair)| { - let kp = Keypair::from_protobuf_encoding(pair).unwrap(); - let peer_id = PeerId::from_public_key(&kp.public()); - let multiaddr = - Multiaddr::from_str(&format!("/ip4/{}/udp/{}/quic-v1", addr.ip(), addr.port())) - .unwrap(); - (peer_id, multiaddr) - }) - .collect(); - let identity = libp2p_generate_indexed_identity(config.seed, config.node_index); - let node_type = if (config.node_index as usize) < bs_len { - NetworkNodeType::Bootstrap - } else { - NetworkNodeType::Regular - }; - let node_index = config.node_index; - let port_index = match libp2p_config.index_ports { - true => node_index, - false => 0, - }; - let bound_addr: Multiaddr = format!( - "/{}/{}/udp/{}/quic-v1", - if libp2p_config.public_ip.is_ipv4() { - "ip4" - } else { - "ip6" - }, - libp2p_config.public_ip, - libp2p_config.base_port as u64 + port_index - ) - .parse() - .unwrap(); - - // generate network - let mut config_builder = NetworkNodeConfigBuilder::default(); - assert!(config.config.total_nodes.get() > 2); - let replicated_nodes = NonZeroUsize::new(config.config.total_nodes.get() - 2).unwrap(); - config_builder.replication_factor(replicated_nodes); - config_builder.identity(identity.clone()); - - config_builder.bound_addr(Some(bound_addr.clone())); - - let to_connect_addrs = bootstrap_nodes - .iter() - .map(|(peer_id, multiaddr)| (Some(*peer_id), multiaddr.clone())) - .collect(); - - config_builder.to_connect_addrs(to_connect_addrs); - - let mesh_params = - // NOTE I'm arbitrarily choosing these. - match node_type { - NetworkNodeType::Bootstrap => MeshParams { - mesh_n_high: libp2p_config.bootstrap_mesh_n_high, - mesh_n_low: libp2p_config.bootstrap_mesh_n_low, - mesh_outbound_min: libp2p_config.bootstrap_mesh_outbound_min, - mesh_n: libp2p_config.bootstrap_mesh_n, - }, - NetworkNodeType::Regular => MeshParams { - mesh_n_high: libp2p_config.mesh_n_high, - mesh_n_low: libp2p_config.mesh_n_low, - mesh_outbound_min: libp2p_config.mesh_outbound_min, - mesh_n: libp2p_config.mesh_n, - }, - NetworkNodeType::Conductor => unreachable!(), - }; - config_builder.mesh_params(Some(mesh_params)); - - let mut all_keys = BTreeSet::new(); - let mut da_keys = BTreeSet::new(); - for i in 0..config.config.total_nodes.get() as u64 { - let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; - let pubkey = TYPES::SignatureKey::from_private(&privkey); - if i < config.config.da_committee_size as u64 { - da_keys.insert(pubkey.clone()); - } - all_keys.insert(pubkey); - } - - let node_config = config_builder.build().unwrap(); - let underlying_quorum_network = Libp2pNetwork::new( - NetworkingMetricsValue::new(), - node_config, - pubkey.clone(), - Arc::new(RwLock::new( - bootstrap_nodes - .iter() - .map(|(peer_id, addr)| (Some(*peer_id), addr.clone())) - .collect(), - )), - bs_len, - config.node_index as usize, - // NOTE: this introduces an invariant that the keys are assigned using this indexed - // function - all_keys, - da_keys, - ) - .await - .unwrap(); - - underlying_quorum_network.wait_for_ready().await; - - // Create the network - let quorum_network: Libp2pCommChannel = - Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - - let view_sync_network: Libp2pCommChannel = - Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - - let da_network: Libp2pCommChannel = - Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - - let vid_network: Libp2pCommChannel = - Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - - Libp2pDARun { - config, - quorum_network, - da_network, - view_sync_network, - vid_network, - } - } - - fn get_da_network(&self) -> Libp2pCommChannel { - self.da_network.clone() - } - - fn get_quorum_network(&self) -> Libp2pCommChannel { - self.quorum_network.clone() - } - - fn get_view_sync_network(&self) -> Libp2pCommChannel { - self.view_sync_network.clone() - } - - fn get_vid_network(&self) -> Libp2pCommChannel { - self.vid_network.clone() - } - - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - > { - self.config.clone() - } -} - -/// Main entry point for validators -pub async fn main_entry_point< - TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - NODE: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< - TYPES, - Message, - QuorumExchange< - TYPES, - SequencingLeaf, - QuorumProposal>, - MEMBERSHIP, - QUORUMNETWORK, - Message, - >, - CommitteeExchange>, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - VIEWSYNCNETWORK, - Message, - >, - VIDExchange>, - >, - Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, - >, - RUNDA: RunDA, ->( - args: ValidatorArgs, -) where - ::StateType: TestableState, - ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, -{ - setup_logging(); - setup_backtrace(); - - info!("Starting validator"); - - let orchestrator_client: OrchestratorClient = - OrchestratorClient::connect_to_orchestrator(args.clone()).await; - - // Identify with the orchestrator - let public_ip = match args.public_ip { - Some(ip) => ip, - None => IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - }; - info!( - "Identifying with orchestrator using IP address {}", - public_ip.to_string() - ); - let node_index: u16 = orchestrator_client - .identify_with_orchestrator(public_ip.to_string()) - .await; - info!("Finished identifying; our node index is {node_index}"); - info!("Getting config from orchestrator"); - - let mut run_config = orchestrator_client - .get_config_from_orchestrator::(node_index) - .await; - - run_config.node_index = node_index.into(); - //run_config.libp2p_config.as_mut().unwrap().public_ip = args.public_ip.unwrap(); - - info!("Initializing networking"); - let run = RUNDA::initialize_networking(run_config.clone()).await; - let hotshot = run.initialize_state_and_hotshot().await; - - info!("Waiting for start command from orchestrator"); - orchestrator_client - .wait_for_all_nodes_ready(run_config.clone().node_index) - .await; - - info!("All nodes are ready! Starting HotShot"); - run.run_hotshot(hotshot).await; -} - -pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { - let mut hasher = blake3::Hasher::new(); - hasher.update(&seed); - hasher.update(&index.to_le_bytes()); - let new_seed = *hasher.finalize().as_bytes(); - let sk_bytes = SecretKey::try_from_bytes(new_seed).unwrap(); - >::from(sk_bytes).into() -} diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index b45c706304..f6e955795d 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -15,8 +15,6 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[path = "../infra/modDA.rs"] -pub mod infra_da; #[derive(Parser, Debug, Clone)] struct MultiValidatorArgs { @@ -49,7 +47,7 @@ async fn main() { let mut nodes = Vec::new(); for _ in 0..args.num_nodes { let node = async_spawn(async move { - infra_da::main_entry_point::< + infra::main_entry_point::< DemoTypes, ThisMembership, DANetwork, diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs index 79e0b01560..98a958887c 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -6,16 +6,12 @@ use hotshot::demo::DemoTypes; use tracing::instrument; use types::ThisMembership; -use crate::{ - infra::OrchestratorArgs, - infra_da::run_orchestrator_da, - types::{DANetwork, NodeImpl, QuorumNetwork, VIDNetwork, ViewSyncNetwork}, -}; +use crate::infra::run_orchestrator; +use crate::infra::OrchestratorArgs; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, VIDNetwork, ViewSyncNetwork}; #[path = "../infra/mod.rs"] pub mod infra; -#[path = "../infra/modDA.rs"] -pub mod infra_da; #[cfg_attr( async_executor_impl = "tokio", @@ -28,7 +24,7 @@ async fn main() { setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator_da::< + run_orchestrator::< DemoTypes, ThisMembership, DANetwork, diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index d42acfacdd..cc474d62b8 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -1,4 +1,4 @@ -use crate::infra_da::Libp2pDARun; +use crate::infra::Libp2pDARun; use hotshot::{ demo::DemoTypes, traits::{ diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index 8f6084ddd5..3165e8f902 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -12,8 +12,6 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[path = "../infra/modDA.rs"] -pub mod infra_da; #[cfg_attr( async_executor_impl = "tokio", @@ -29,7 +27,7 @@ async fn main() { "connecting to orchestrator at {:?}:{:?}", args.host, args.port ); - infra_da::main_entry_point::< + infra::main_entry_point::< DemoTypes, ThisMembership, DANetwork, diff --git a/hotshot/examples/web-server-da/multi-validator.rs b/hotshot/examples/web-server-da/multi-validator.rs index b45c706304..f6e955795d 100644 --- a/hotshot/examples/web-server-da/multi-validator.rs +++ b/hotshot/examples/web-server-da/multi-validator.rs @@ -15,8 +15,6 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[path = "../infra/modDA.rs"] -pub mod infra_da; #[derive(Parser, Debug, Clone)] struct MultiValidatorArgs { @@ -49,7 +47,7 @@ async fn main() { let mut nodes = Vec::new(); for _ in 0..args.num_nodes { let node = async_spawn(async move { - infra_da::main_entry_point::< + infra::main_entry_point::< DemoTypes, ThisMembership, DANetwork, diff --git a/hotshot/examples/web-server-da/orchestrator.rs b/hotshot/examples/web-server-da/orchestrator.rs index 0853caaef5..84cd6d325b 100644 --- a/hotshot/examples/web-server-da/orchestrator.rs +++ b/hotshot/examples/web-server-da/orchestrator.rs @@ -6,16 +6,12 @@ use hotshot::demo::DemoTypes; use tracing::instrument; use types::{ThisMembership, VIDNetwork}; -use crate::{ - infra::OrchestratorArgs, - infra_da::run_orchestrator_da, - types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}, -}; +use crate::infra::run_orchestrator; +use crate::infra::OrchestratorArgs; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}; #[path = "../infra/mod.rs"] pub mod infra; -#[path = "../infra/modDA.rs"] -pub mod infra_da; #[cfg_attr( async_executor_impl = "tokio", @@ -28,7 +24,7 @@ async fn main() { setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator_da::< + run_orchestrator::< DemoTypes, ThisMembership, DANetwork, diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index 3de7a1531f..6fe7b1b087 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -1,4 +1,4 @@ -use crate::infra_da::WebServerDARun; +use crate::infra::WebServerDARun; use hotshot::{ demo::DemoTypes, traits::{ diff --git a/hotshot/examples/web-server-da/validator.rs b/hotshot/examples/web-server-da/validator.rs index 8f6084ddd5..3165e8f902 100644 --- a/hotshot/examples/web-server-da/validator.rs +++ b/hotshot/examples/web-server-da/validator.rs @@ -12,8 +12,6 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[path = "../infra/modDA.rs"] -pub mod infra_da; #[cfg_attr( async_executor_impl = "tokio", @@ -29,7 +27,7 @@ async fn main() { "connecting to orchestrator at {:?}:{:?}", args.host, args.port ); - infra_da::main_entry_point::< + infra::main_entry_point::< DemoTypes, ThisMembership, DANetwork, From 7a685488e48f532f85fbc1f36661b50802d95b79 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 25 Oct 2023 19:41:59 -0700 Subject: [PATCH 0257/1393] simplify hotshot and add node's validator info to hotshotconfig --- hotshot/examples/infra/mod.rs | 1 + hotshot/examples/infra/modDA.rs | 1 + hotshot/src/lib.rs | 9 ++---- orchestrator/src/config.rs | 46 +++---------------------------- testing/Cargo.toml | 1 + testing/src/task_helpers.rs | 15 +++------- testing/src/test_builder.rs | 15 ++++++---- testing/src/test_launcher.rs | 7 ++--- testing/src/test_runner.rs | 28 +++++++------------ testing/tests/basic.rs | 8 +++--- testing/tests/catchup.rs | 8 +++--- testing/tests/combined_network.rs | 4 +-- testing/tests/libp2p.rs | 4 +-- testing/tests/timeout.rs | 4 +-- testing/tests/web_server.rs | 2 +- types/src/consensus.rs | 1 - types/src/lib.rs | 44 ++++++++++++++++++++++++++--- 17 files changed, 90 insertions(+), 108 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 7f2bf3ffa3..b5802e569d 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -45,6 +45,7 @@ pub fn load_config_from_file( config_toml.into(); // Generate network's public keys + // Sishan NOTE: Looks like this file is not used anymore, so ignore the following initialization on keys. let mut known_nodes_sk = Vec::new(); let known_nodes: Vec<_> = (0..config.config.total_nodes.get()) .map(|node_id| { diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index bb5c544a80..ae383e878b 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -201,6 +201,7 @@ pub trait RunDA< let config = self.get_config(); // Get KeyPair for certificate Aggregation + // Sishan NOTE: Looks like this file is not used anymore, so ignore the following initialization on keys. let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let known_nodes_sk = config.config.known_nodes_sk.clone(); let entry = known_nodes_with_stake diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 9c70d10362..1ed39c8496 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -115,8 +115,7 @@ pub struct SystemContextInner> { /// Configuration items for this hotshot instance config: HotShotConfig< - ::PrivateKey, - ::StakeTableEntry, + TYPES::SignatureKey, TYPES::ElectionConfigType, >, @@ -171,8 +170,7 @@ impl> SystemContext { private_key: ::PrivateKey, nonce: u64, config: HotShotConfig< - ::PrivateKey, - ::StakeTableEntry, + TYPES::SignatureKey, TYPES::ElectionConfigType, >, storage: I::Storage, @@ -380,8 +378,7 @@ impl> SystemContext { private_key: ::PrivateKey, node_id: u64, config: HotShotConfig< - ::PrivateKey, - ::StakeTableEntry, + TYPES::SignatureKey, TYPES::ElectionConfigType, >, storage: I::Storage, diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 37d17117c7..272c6e2b42 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,6 +1,5 @@ -use hotshot_types::{traits::{signature_key::SignatureKey, election::ElectionConfig,}, ExecutionType, HotShotConfig}; +use hotshot_types::{traits::{signature_key::SignatureKey, election::ElectionConfig,}, ExecutionType, HotShotConfig, ValidatorConfig}; use std::{ - marker::PhantomData, net::{IpAddr, Ipv4Addr, SocketAddr}, num::NonZeroUsize, time::Duration, @@ -50,40 +49,6 @@ pub struct WebServerConfig { pub wait_between_polls: Duration, } -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -#[serde(bound(deserialize = ""))] -pub struct ValidatorConfig { - /// The validator's public key and stake value - pub public_key: KEY, - /// The validator's private key, should be in the mempool, not public - pub private_key: KEY::PrivateKey, - /// The validator's stake - pub stake_value: u64, - /// The validator's public_key together with its stake value, which can be served as public parameter for key aggregation - pub entry: KEY::StakeTableEntry, -} - -impl ValidatorConfig { - fn generated_from_seed_indexed(seed: [u8; 32], index: u64, stake_value: u64) -> Self { - let (public_key, private_key) = KEY::generated_from_seed_indexed( - seed, - index, - ); - Self { - public_key: public_key.clone(), - private_key: private_key, - stake_value: stake_value, - entry: public_key.get_stake_table_entry(stake_value), - } - } -} - -impl Default for ValidatorConfig { - fn default() -> Self { - Self::generated_from_seed_indexed([0u8; 32], 0, 1) - } -} - #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] #[serde(bound(deserialize = ""))] pub struct NetworkConfig { @@ -101,10 +66,9 @@ pub struct NetworkConfig { pub key_type_name: String, pub election_config_type_name: String, pub libp2p_config: Option, - pub config: HotShotConfig, + pub config: HotShotConfig, pub web_server_config: Option, pub da_web_server_config: Option, - _key_type_phantom: PhantomData, } impl Default for NetworkConfig { @@ -123,7 +87,6 @@ impl Default for NetworkConfig { election_config_type_name: std::any::type_name::().to_string(), web_server_config: None, da_web_server_config: None, - _key_type_phantom: PhantomData, next_view_timeout: 10, num_bootrap: 5, propose_min_round_time: Duration::from_secs(0), @@ -200,7 +163,6 @@ impl From for NetworkConf start_delay_seconds: val.start_delay_seconds, web_server_config: val.web_server_config, da_web_server_config: val.da_web_server_config, - _key_type_phantom: PhantomData, } } } @@ -232,7 +194,7 @@ pub struct HotShotConfigFile { pub propose_max_round_time: Duration, } -impl From for HotShotConfig { +impl From for HotShotConfig { fn from(val: HotShotConfigFile) -> Self { HotShotConfig { execution_type: ExecutionType::Continuous, @@ -240,7 +202,7 @@ impl From for HotShotConfig(); + let launcher = builder.gen_launcher::(node_id); let networks = (launcher.resource_generator.channel_generator)(node_id); let storage = (launcher.resource_generator.storage)(node_id); @@ -49,16 +50,8 @@ pub async fn build_system_handle( .unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let private_key = config - .known_nodes_sk - .get(node_id as usize) - .expect("node_id should be within the range of known_nodes") - .clone(); - let public_key = ::get_public_key( - known_nodes_with_stake - .get(node_id as usize) - .expect("node_id should be within the range of known_nodes"), - ); + let private_key = config.my_own_validator_config.private_key.clone(); + let public_key = config.my_own_validator_config.public_key.clone(); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< SequencingTestTypes, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index ce36363f08..0d7ce23df8 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -7,7 +7,7 @@ use hotshot_types::message::{Message, SequencingMessage}; use hotshot_types::{ traits::node_implementation::{NodeType, QuorumEx, TestableExchange}, - ExecutionType, HotShotConfig, + ExecutionType, HotShotConfig, ValidatorConfig, }; use super::completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}; @@ -180,6 +180,7 @@ impl Default for TestMetadata { impl TestMetadata { pub fn gen_launcher>( self, + node_id: u64, ) -> TestLauncher where I: NodeImplementation>, @@ -200,12 +201,10 @@ impl TestMetadata { .. } = self.clone(); - let mut known_nodes_sk: Vec<::PrivateKey> = Vec::new(); let known_nodes: Vec<::SignatureKey> = (0..total_nodes) .map(|id| { let priv_key = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], id as u64).1; - known_nodes_sk.push(priv_key.clone()); TYPES::SignatureKey::from_private(&priv_key) }) .collect(); @@ -213,7 +212,11 @@ impl TestMetadata { (0..total_nodes) .map(|id| known_nodes[id].get_stake_table_entry(1u64)) .collect(); - + let my_own_validator_config = ValidatorConfig::generated_from_seed_indexed( + [0u8; 32], + node_id, + 1, + ); // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); let config = HotShotConfig { // TODO this doesn't exist anymore @@ -223,7 +226,7 @@ impl TestMetadata { min_transactions, max_transactions: NonZeroUsize::new(99999).unwrap(), known_nodes_with_stake, - known_nodes_sk, + my_own_validator_config, da_committee_size, next_view_timeout: 500, timeout_ratio: (11, 10), @@ -250,7 +253,7 @@ impl TestMetadata { } = timing_data; let mod_config = // TODO this should really be using the timing config struct - |a: &mut HotShotConfig<::PrivateKey, ::StakeTableEntry, TYPES::ElectionConfigType>| { + |a: &mut HotShotConfig| { a.next_view_timeout = next_view_timeout; a.timeout_ratio = timeout_ratio; a.round_start_delay = round_start_delay; diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index a5a9625edc..a4c7cbb1bb 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -16,7 +16,6 @@ use hotshot_types::{ node_implementation::{ ExchangesType, NodeType, QuorumCommChannel, QuorumEx, QuorumNetwork, }, - signature_key::SignatureKey, }, HotShotConfig, }; @@ -92,8 +91,7 @@ where pub storage: Generator<>::Storage>, /// configuration used to generate each hotshot node pub config: HotShotConfig< - ::PrivateKey, - ::StakeTableEntry, + TYPES::SignatureKey, TYPES::ElectionConfigType, >, } @@ -196,8 +194,7 @@ impl> TestLauncher::PrivateKey, - ::StakeTableEntry, + TYPES::SignatureKey, TYPES::ElectionConfigType, >, ), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 31fa25b491..a82e557f88 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -22,7 +22,7 @@ use hotshot_types::{ node_implementation::{ExchangesType, NodeType, QuorumCommChannel, QuorumEx}, signature_key::SignatureKey, }, - HotShotConfig, + HotShotConfig, ValidatorConfig, }; use std::collections::{HashMap, HashSet}; @@ -207,8 +207,9 @@ where let initializer = HotShotInitializer::::from_genesis(I::block_genesis()).unwrap(); let networks = (self.launcher.resource_generator.channel_generator)(node_id); + let validator_config = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); let hotshot = self - .add_node_with_config(networks, storage, initializer, config) + .add_node_with_config(networks, storage, initializer, config, validator_config) .await; if late_start.contains(&node_id) { self.late_start.insert(node_id, hotshot); @@ -231,10 +232,10 @@ where storage: I::Storage, initializer: HotShotInitializer, config: HotShotConfig< - ::PrivateKey, - ::StakeTableEntry, + TYPES::SignatureKey, TYPES::ElectionConfigType, >, + validator_config: ValidatorConfig, ) -> SystemContext where I::Exchanges: ExchangesType< @@ -247,20 +248,11 @@ where let node_id = self.next_node_id; self.next_node_id += 1; let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - // Generate key pair for certificate aggregation - let private_key = config - .known_nodes_sk - .get(node_id as usize) - .expect("node_id should be within the range of known_nodes") - .clone(); - let public_key = <::SignatureKey as SignatureKey>::get_public_key( - known_nodes_with_stake - .get(node_id as usize) - .expect("node_id should be within the range of known_nodes"), - ); - let entry = known_nodes_with_stake - .get(node_id as usize) - .expect("node_id should be within the range of known_nodes"); + // Get key pair for certificate aggregation + let private_key = validator_config.private_key.clone(); + let public_key = validator_config.public_key.clone(); + let entry = + public_key.get_stake_table_entry(validator_config.stake_value); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< TYPES, diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index cbc2c149dc..c08645542e 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -24,7 +24,7 @@ async fn test_success() { ..TestMetadata::default() }; metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await; @@ -63,7 +63,7 @@ async fn test_with_failures_one() { node_changes: vec![(Duration::new(4, 0), dead_nodes)], }; metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await; @@ -112,7 +112,7 @@ async fn test_with_failures_half_f() { node_changes: vec![(Duration::new(4, 0), dead_nodes)], }; metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await; @@ -173,7 +173,7 @@ async fn test_with_failures_f() { node_changes: vec![(Duration::new(4, 0), dead_nodes)], }; metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await; diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 01610f38f2..7ca2d738be 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -53,7 +53,7 @@ async fn test_catchup() { }; metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await; @@ -108,7 +108,7 @@ async fn test_catchup_web() { }; metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await; @@ -167,7 +167,7 @@ async fn test_catchup_one_node() { metadata.overall_safety_properties.num_failed_views = 5; metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await; @@ -231,7 +231,7 @@ async fn test_catchup_in_view_sync() { }; metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await; diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index 30a7f5e277..58b5b1318b 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -40,7 +40,7 @@ async fn test_combined_network() { }; metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await @@ -59,7 +59,7 @@ async fn test_stress_combined_network() { async_compatibility_layer::logging::setup_backtrace(); let metadata = TestMetadata::default_stress(); metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index 26bdf11200..c7d3ac5428 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -32,7 +32,7 @@ async fn libp2p_network() { }; metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await @@ -51,7 +51,7 @@ async fn test_stress_libp2p_network() { async_compatibility_layer::logging::setup_backtrace(); let metadata = TestMetadata::default_stress(); metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index e8e4278195..e85a13e6bc 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -57,7 +57,7 @@ async fn test_timeout_web() { // TODO ED Test with memory network once issue is resolved // https://github.com/EspressoSystems/HotShot/issues/1790 metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await; @@ -120,7 +120,7 @@ async fn test_timeout_libp2p() { // TODO ED Test with memory network once issue is resolved // https://github.com/EspressoSystems/HotShot/issues/1790 metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await; diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index 9b19532902..b182d12b53 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -39,7 +39,7 @@ async fn web_server_network() { ..TestMetadata::default() }; metadata - .gen_launcher::() + .gen_launcher::(0) .launch() .run_test() .await; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 427fd5e587..8f9f7a11ef 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -59,7 +59,6 @@ pub struct Consensus> { pub high_qc: QuorumCertificate>, /// A reference to the metrics trait - #[debug(skip)] pub metrics: Arc, } diff --git a/types/src/lib.rs b/types/src/lib.rs index cdb74bf156..e436729013 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -11,6 +11,8 @@ use std::{num::NonZeroUsize, time::Duration}; +use traits::{signature_key::SignatureKey, election::ElectionConfig}; + pub mod block_impl; pub mod certificate; pub mod consensus; @@ -34,9 +36,43 @@ pub enum ExecutionType { Incremental, } +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +#[serde(bound(deserialize = ""))] +/// config for validator, including public key, private key, stake value +pub struct ValidatorConfig { + /// The validator's public key and stake value + pub public_key: KEY, + /// The validator's private key, should be in the mempool, not public + pub private_key: KEY::PrivateKey, + /// The validator's stake + pub stake_value: u64, +} + +impl ValidatorConfig { + /// generate validator config from input seed, index and stake value + pub fn generated_from_seed_indexed(seed: [u8; 32], index: u64, stake_value: u64) -> Self { + let (public_key, private_key) = KEY::generated_from_seed_indexed( + seed, + index, + ); + Self { + public_key: public_key.clone(), + private_key: private_key, + stake_value: stake_value, + } + } +} + +impl Default for ValidatorConfig { + fn default() -> Self { + Self::generated_from_seed_indexed([0u8; 32], 0, 1) + } +} + /// Holds configuration for a `HotShot` #[derive(Clone, custom_debug::Debug, serde::Serialize, serde::Deserialize)] -pub struct HotShotConfig { +#[serde(bound(deserialize = ""))] +pub struct HotShotConfig { /// Whether to run one view or continuous views pub execution_type: ExecutionType, /// Total number of nodes in the network @@ -46,9 +82,9 @@ pub struct HotShotConfig { /// Maximum transactions per block pub max_transactions: NonZeroUsize, /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter - pub known_nodes_with_stake: Vec, - /// List of known node's private keys for network initialization, cannot be public - pub known_nodes_sk: Vec, + pub known_nodes_with_stake: Vec, + /// My own validator config, including my public key, private key, stake value, serving as private parameter + pub my_own_validator_config: ValidatorConfig, /// List of DA committee nodes for static DA committe pub da_committee_size: usize, /// Base duration for next-view timeout, in milliseconds From 1eb8b339f97edf1dbca9fff8fce5b70f53a2d597 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 26 Oct 2023 00:04:38 -0700 Subject: [PATCH 0258/1393] move validator config from NetworkConfig to HotshotConfigFile --- orchestrator/Cargo.toml | 1 + orchestrator/src/config.rs | 57 ++++++++++++++++++++----------------- testing/src/task_helpers.rs | 1 - 3 files changed, 32 insertions(+), 27 deletions(-) diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 4a1cfad69c..b4f093972d 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -15,6 +15,7 @@ libp2p = { workspace = true } blake3 = { workspace = true, features = ["traits-preview"] } hotshot-types = { version = "0.1.0", path = "../types", default-features = false } hotshot-utils = { path = "../utils" } +hotshot-signature-key = { path = "../hotshot-signature-key" } libp2p-networking = { workspace = true } tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.4.1" } surf-disco = { workspace = true } diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 272c6e2b42..c1d4980047 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -60,7 +60,6 @@ pub struct NetworkConfig { pub propose_max_round_time: Duration, pub node_index: u64, pub seed: [u8; 32], - pub validator_config: ValidatorConfig, pub padding: usize, pub start_delay_seconds: u64, pub key_type_name: String, @@ -78,10 +77,9 @@ impl Default for NetworkConfig { transactions_per_round: default_transactions_per_round(), node_index: 0, seed: [0u8; 32], - validator_config: ValidatorConfig::default(), padding: default_padding(), libp2p_config: None, - config: default_config().into(), + config: HotShotConfigFile::default().into(), start_delay_seconds: 60, key_type_name: std::any::type_name::().to_string(), election_config_type_name: std::any::type_name::().to_string(), @@ -96,7 +94,8 @@ impl Default for NetworkConfig { } #[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] -pub struct NetworkConfigFile { +#[serde(bound(deserialize = ""))] +pub struct NetworkConfigFile { #[serde(default = "default_rounds")] pub rounds: usize, #[serde(default = "default_transactions_per_round")] @@ -111,8 +110,8 @@ pub struct NetworkConfigFile { pub start_delay_seconds: u64, #[serde(default)] pub libp2p_config: Option, - #[serde(default = "default_config")] - pub config: HotShotConfigFile, + #[serde(default)] + pub config: HotShotConfigFile, #[serde(default = "default_web_server_config")] pub web_server_config: Option, #[serde(default = "default_web_server_config")] @@ -123,8 +122,8 @@ fn default_web_server_config() -> Option { None } -impl From for NetworkConfig { - fn from(val: NetworkConfigFile) -> Self { +impl From> for NetworkConfig { + fn from(val: NetworkConfigFile) -> Self { NetworkConfig { rounds: val.rounds, transactions_per_round: val.transactions_per_round, @@ -134,7 +133,6 @@ impl From for NetworkConf propose_max_round_time: val.config.propose_max_round_time, propose_min_round_time: val.config.propose_min_round_time, seed: val.seed, - validator_config: ValidatorConfig::generated_from_seed_indexed(val.seed, 0, 1), padding: val.padding, libp2p_config: val.libp2p_config.map(|libp2p_config| Libp2pConfig { num_bootstrap_nodes: val.config.num_bootstrap, @@ -169,9 +167,12 @@ impl From for NetworkConf /// Holds configuration for a `HotShot` #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -pub struct HotShotConfigFile { +#[serde(bound(deserialize = ""))] +pub struct HotShotConfigFile { /// Total number of nodes in the network pub total_nodes: NonZeroUsize, + /// My own public key, secret key, stake value + pub my_own_validator_config: ValidatorConfig, /// Number of committee nodes pub committee_nodes: usize, /// Maximum transactions per block @@ -194,15 +195,15 @@ pub struct HotShotConfigFile { pub propose_max_round_time: Duration, } -impl From for HotShotConfig { - fn from(val: HotShotConfigFile) -> Self { +impl From> for HotShotConfig { + fn from(val: HotShotConfigFile) -> Self { HotShotConfig { execution_type: ExecutionType::Continuous, total_nodes: val.total_nodes, max_transactions: val.max_transactions, min_transactions: val.min_transactions, known_nodes_with_stake: Vec::new(), - my_own_validator_config: ValidatorConfig::default(), + my_own_validator_config: val.my_own_validator_config, da_committee_size: val.committee_nodes, next_view_timeout: val.next_view_timeout, timeout_ratio: val.timeout_ratio, @@ -226,19 +227,23 @@ fn default_transactions_per_round() -> usize { fn default_padding() -> usize { 100 } -fn default_config() -> HotShotConfigFile { - HotShotConfigFile { - total_nodes: NonZeroUsize::new(10).unwrap(), - committee_nodes: 5, - max_transactions: NonZeroUsize::new(100).unwrap(), - min_transactions: 1, - next_view_timeout: 10000, - timeout_ratio: (11, 10), - round_start_delay: 1, - start_delay: 1, - propose_min_round_time: Duration::from_secs(0), - propose_max_round_time: Duration::from_secs(10), - num_bootstrap: 5, + +impl Default for HotShotConfigFile { + fn default() -> Self { + Self { + total_nodes: NonZeroUsize::new(10).unwrap(), + my_own_validator_config: ValidatorConfig::default(), + committee_nodes: 5, + max_transactions: NonZeroUsize::new(100).unwrap(), + min_transactions: 1, + next_view_timeout: 10000, + timeout_ratio: (11, 10), + round_start_delay: 1, + start_delay: 1, + propose_min_round_time: Duration::from_secs(0), + propose_max_round_time: Duration::from_secs(10), + num_bootstrap: 5, + } } } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 3e609689da..ce5c3b14c7 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -25,7 +25,6 @@ use hotshot_types::{ state::{ConsensusTime, TestableBlock}, }, }; -use tracing::error; pub async fn build_system_handle( node_id: u64, From 51f31ce9c9f8801ae6cc4708fbe7c52a43616e06 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 26 Oct 2023 00:10:55 -0700 Subject: [PATCH 0259/1393] fix lint --- hotshot/examples/infra/mod.rs | 5 ++--- hotshot/examples/infra/modDA.rs | 15 +++++++-------- hotshot/src/lib.rs | 15 +++------------ orchestrator/src/config.rs | 5 ++++- orchestrator/src/lib.rs | 3 ++- testing/src/task_helpers.rs | 2 +- testing/src/test_builder.rs | 7 ++----- testing/src/test_launcher.rs | 12 ++---------- testing/src/test_runner.rs | 11 ++++------- types/src/lib.rs | 14 ++++++-------- 10 files changed, 33 insertions(+), 56 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index b5802e569d..359bab3960 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -37,8 +37,8 @@ pub fn load_config_from_file( ) -> NetworkConfig { let config_file_as_string: String = fs::read_to_string(config_file.as_str()) .unwrap_or_else(|_| panic!("Could not read config file located at {config_file}")); - let config_toml: NetworkConfigFile = - toml::from_str::(&config_file_as_string) + let config_toml: NetworkConfigFile = + toml::from_str::>(&config_file_as_string) .expect("Unable to convert config file to TOML"); let mut config: NetworkConfig = @@ -61,7 +61,6 @@ pub fn load_config_from_file( config.config.known_nodes_with_stake = (0..config.config.total_nodes.get()) .map(|node_id| known_nodes[node_id].get_stake_table_entry(1u64)) .collect(); - config.config.known_nodes_sk = known_nodes_sk; config } diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index ae383e878b..8ff1bc6d40 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -203,14 +203,13 @@ pub trait RunDA< // Get KeyPair for certificate Aggregation // Sishan NOTE: Looks like this file is not used anymore, so ignore the following initialization on keys. let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); - let known_nodes_sk = config.config.known_nodes_sk.clone(); - let entry = known_nodes_with_stake - .get(config.node_index as usize) - .expect("node_id should be within the range of known_nodes"); - let pk = TYPES::SignatureKey::get_public_key(entry); - let sk = known_nodes_sk - .get(config.node_index as usize) - .expect("node_id should be within the range of known_nodes"); + let entry = config + .config + .my_own_validator_config + .public_key + .get_stake_table_entry(config.config.my_own_validator_config.stake_value); + let pk = config.config.my_own_validator_config.public_key.clone(); + let sk = config.config.my_own_validator_config.private_key.clone(); let da_network = self.get_da_network(); let quorum_network = self.get_quorum_network(); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 1ed39c8496..12df0be72a 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -114,10 +114,7 @@ pub struct SystemContextInner> { private_key: ::PrivateKey, /// Configuration items for this hotshot instance - config: HotShotConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >, + config: HotShotConfig, /// Networking interface for this hotshot instance // networking: I::Networking, @@ -169,10 +166,7 @@ impl> SystemContext { public_key: TYPES::SignatureKey, private_key: ::PrivateKey, nonce: u64, - config: HotShotConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >, + config: HotShotConfig, storage: I::Storage, exchanges: I::Exchanges, initializer: HotShotInitializer, @@ -377,10 +371,7 @@ impl> SystemContext { public_key: TYPES::SignatureKey, private_key: ::PrivateKey, node_id: u64, - config: HotShotConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >, + config: HotShotConfig, storage: I::Storage, exchanges: I::Exchanges, initializer: HotShotInitializer, diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index c1d4980047..79ea891bed 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,4 +1,7 @@ -use hotshot_types::{traits::{signature_key::SignatureKey, election::ElectionConfig,}, ExecutionType, HotShotConfig, ValidatorConfig}; +use hotshot_types::{ + traits::{election::ElectionConfig, signature_key::SignatureKey}, + ExecutionType, HotShotConfig, ValidatorConfig, +}; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, num::NonZeroUsize, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 728966a42e..f506eabba3 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -194,7 +194,8 @@ where } /// Sets up all API routes -fn define_api() -> Result, ApiError> +fn define_api( +) -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, ::State: Send + Sync + OrchestratorApi, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index ce5c3b14c7..f720f3dd2c 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -50,7 +50,7 @@ pub async fn build_system_handle( let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = config.my_own_validator_config.private_key.clone(); - let public_key = config.my_own_validator_config.public_key.clone(); + let public_key = config.my_own_validator_config.public_key; let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< SequencingTestTypes, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 0d7ce23df8..926dd1f550 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -212,11 +212,8 @@ impl TestMetadata { (0..total_nodes) .map(|id| known_nodes[id].get_stake_table_entry(1u64)) .collect(); - let my_own_validator_config = ValidatorConfig::generated_from_seed_indexed( - [0u8; 32], - node_id, - 1, - ); + let my_own_validator_config = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); let config = HotShotConfig { // TODO this doesn't exist anymore diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index a4c7cbb1bb..5fafdfc90f 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -90,10 +90,7 @@ where /// generate a new storage for each node pub storage: Generator<>::Storage>, /// configuration used to generate each hotshot node - pub config: HotShotConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >, + pub config: HotShotConfig, } /// test launcher @@ -192,12 +189,7 @@ impl> TestLauncher, - ), + mut f: impl FnMut(&mut HotShotConfig), ) -> Self { f(&mut self.resource_generator.config); self diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index a82e557f88..78f624d06c 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -207,7 +207,8 @@ where let initializer = HotShotInitializer::::from_genesis(I::block_genesis()).unwrap(); let networks = (self.launcher.resource_generator.channel_generator)(node_id); - let validator_config = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); + let validator_config = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); let hotshot = self .add_node_with_config(networks, storage, initializer, config, validator_config) .await; @@ -231,10 +232,7 @@ where networks: Networks, storage: I::Storage, initializer: HotShotInitializer, - config: HotShotConfig< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >, + config: HotShotConfig, validator_config: ValidatorConfig, ) -> SystemContext where @@ -251,8 +249,7 @@ where // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); - let entry = - public_key.get_stake_table_entry(validator_config.stake_value); + let entry = public_key.get_stake_table_entry(validator_config.stake_value); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< TYPES, diff --git a/types/src/lib.rs b/types/src/lib.rs index e436729013..d75d95389d 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -11,7 +11,7 @@ use std::{num::NonZeroUsize, time::Duration}; -use traits::{signature_key::SignatureKey, election::ElectionConfig}; +use traits::{election::ElectionConfig, signature_key::SignatureKey}; pub mod block_impl; pub mod certificate; @@ -50,15 +50,13 @@ pub struct ValidatorConfig { impl ValidatorConfig { /// generate validator config from input seed, index and stake value + #[must_use] pub fn generated_from_seed_indexed(seed: [u8; 32], index: u64, stake_value: u64) -> Self { - let (public_key, private_key) = KEY::generated_from_seed_indexed( - seed, - index, - ); + let (public_key, private_key) = KEY::generated_from_seed_indexed(seed, index); Self { - public_key: public_key.clone(), - private_key: private_key, - stake_value: stake_value, + public_key, + private_key, + stake_value, } } } From 0f1010a627dbb8309bb8016cb04264faf89f9457 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 26 Oct 2023 00:13:52 -0700 Subject: [PATCH 0260/1393] remove useless comment --- hotshot/examples/infra/mod.rs | 1 - hotshot/examples/infra/modDA.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 359bab3960..ebf35e6b6e 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -45,7 +45,6 @@ pub fn load_config_from_file( config_toml.into(); // Generate network's public keys - // Sishan NOTE: Looks like this file is not used anymore, so ignore the following initialization on keys. let mut known_nodes_sk = Vec::new(); let known_nodes: Vec<_> = (0..config.config.total_nodes.get()) .map(|node_id| { diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs index 8ff1bc6d40..c39d7b5115 100644 --- a/hotshot/examples/infra/modDA.rs +++ b/hotshot/examples/infra/modDA.rs @@ -201,7 +201,6 @@ pub trait RunDA< let config = self.get_config(); // Get KeyPair for certificate Aggregation - // Sishan NOTE: Looks like this file is not used anymore, so ignore the following initialization on keys. let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let entry = config .config From db77776e98cdeaa8584212917cae825b4a869953 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 26 Oct 2023 09:20:18 -0700 Subject: [PATCH 0261/1393] cargo update but now the issue is in block_impl.rs called unwrap() on an Err --- types/src/data.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/types/src/data.rs b/types/src/data.rs index 2d68e47809..038cff2813 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -102,7 +102,6 @@ impl std::ops::Deref for ViewNumber { impl std::ops::Sub for ViewNumber { type Output = ViewNumber; fn sub(self, rhs: u64) -> Self::Output { - error!("self.0 = {}, rhs = {}", self.0, rhs); Self(self.0 - rhs) } } From ef26d75c403ac2a9e7249fe92c463e2b72894a12 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 26 Oct 2023 10:58:45 -0700 Subject: [PATCH 0262/1393] solve view number overflow and downgrade to original cargo.lock --- task-impls/src/network.rs | 1 - task-impls/src/vid.rs | 3 ++- types/src/data.rs | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 106805cf3e..0cabc1637c 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -108,7 +108,6 @@ impl< SequencingHotShotEvent::DAVoteRecv(vote.clone()) } CommitteeConsensusMessage::DACertificate(cert) => { - // panic!("Recevid DA C! "); SequencingHotShotEvent::DACRecv(cert) } CommitteeConsensusMessage::VidDisperseMsg(proposal) => { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index bde44d7781..0661308558 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -305,7 +305,8 @@ where // `self.cur_view` should be at least 1 since there is a view change before getting // the `DAProposalRecv` event. Otherewise, the view number subtraction below will // cause an overflow error. - if view < self.cur_view - 1 { + // Sishan NOTE: Looks like for this `VidDisperseRecv` task we do not have a view change + if self.cur_view != TYPES::Time::genesis() && view < self.cur_view - 1 { warn!("Throwing away VID disperse data that is more than one view older"); return None; } diff --git a/types/src/data.rs b/types/src/data.rs index 038cff2813..356b7a60fa 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -14,7 +14,7 @@ use crate::{ state::{ConsensusTime, TestableBlock, TestableState}, storage::StoredView, BlockPayload, State, - }, error, + }, }; use ark_bls12_381::Bls12_381; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; @@ -33,7 +33,6 @@ use std::{ fmt::{Debug, Display}, hash::Hash, }; -use tracing::error; /// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. #[derive( From a635e11132af70989a855bc309077723781fe46d Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 26 Oct 2023 11:07:12 -0700 Subject: [PATCH 0263/1393] fix lint --- hotshot-signature-key/src/bn254/bn254_priv.rs | 1 - hotshot/examples/infra/mod.rs | 54 +++---------------- 2 files changed, 8 insertions(+), 47 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_priv.rs b/hotshot-signature-key/src/bn254/bn254_priv.rs index 55fcb6e22d..09ab2e4299 100644 --- a/hotshot-signature-key/src/bn254/bn254_priv.rs +++ b/hotshot-signature-key/src/bn254/bn254_priv.rs @@ -54,7 +54,6 @@ impl BLSPrivKey { } } -#[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] impl PartialOrd for BLSPrivKey { fn partial_cmp(&self, other: &Self) -> Option { let self_bytes = &self.priv_key.to_string(); diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index f9e59afb2d..738b849402 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -234,11 +234,7 @@ pub trait RunDA< { /// Initializes networking, returns self async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, + config: NetworkConfig, ) -> Self; /// Initializes the genesis state and HotShot instance; does not start HotShot consensus @@ -424,13 +420,7 @@ pub trait RunDA< fn get_vid_network(&self) -> VIDNETWORK; /// Returns the config for this run - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >; + fn get_config(&self) -> NetworkConfig; } // WEB SERVER @@ -441,11 +431,7 @@ pub struct WebServerDARun< I: NodeImplementation, MEMBERSHIP: Membership, > { - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, + config: NetworkConfig, quorum_network: WebCommChannel, da_network: WebCommChannel, view_sync_network: WebCommChannel, @@ -510,11 +496,7 @@ where Self: Sync, { async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, + config: NetworkConfig, ) -> WebServerDARun { // Generate our own key let (pub_key, _priv_key) = @@ -593,13 +575,7 @@ where self.vid_network.clone() } - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - > { + fn get_config(&self) -> NetworkConfig { self.config.clone() } } @@ -609,11 +585,7 @@ where /// Represents a libp2p-based run pub struct Libp2pDARun, MEMBERSHIP: Membership> { - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, + config: NetworkConfig, quorum_network: Libp2pCommChannel, da_network: Libp2pCommChannel, view_sync_network: Libp2pCommChannel, @@ -678,11 +650,7 @@ where Self: Sync, { async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, + config: NetworkConfig, ) -> Libp2pDARun { let (pubkey, _privkey) = <::SignatureKey as SignatureKey>::generated_from_seed_indexed( @@ -838,13 +806,7 @@ where self.vid_network.clone() } - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - > { + fn get_config(&self) -> NetworkConfig { self.config.clone() } } From 0e3d4a78f6ef527cc3b3745b5d8bbc7f569c1d20 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 26 Oct 2023 11:13:17 -0700 Subject: [PATCH 0264/1393] fix lint, PartialOrd for Key --- hotshot-signature-key/src/bn254/bn254_priv.rs | 4 +--- hotshot-signature-key/src/bn254/bn254_pub.rs | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_priv.rs b/hotshot-signature-key/src/bn254/bn254_priv.rs index 09ab2e4299..0c9b0cd6f5 100644 --- a/hotshot-signature-key/src/bn254/bn254_priv.rs +++ b/hotshot-signature-key/src/bn254/bn254_priv.rs @@ -56,9 +56,7 @@ impl BLSPrivKey { impl PartialOrd for BLSPrivKey { fn partial_cmp(&self, other: &Self) -> Option { - let self_bytes = &self.priv_key.to_string(); - let other_bytes = &other.priv_key.to_string(); - self_bytes.partial_cmp(other_bytes) + Some(self.cmp(other)) } } diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 025d455129..9b31fc91a2 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -29,9 +29,7 @@ pub struct BLSPubKey { impl PartialOrd for BLSPubKey { fn partial_cmp(&self, other: &Self) -> Option { - let self_bytes = &self.pub_key.to_string(); - let other_bytes = &other.pub_key.to_string(); - self_bytes.partial_cmp(other_bytes) + Some(self.cmp(other)) } } From a9ed7767185640595d477af46894c20bb69183a3 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 26 Oct 2023 11:28:50 -0700 Subject: [PATCH 0265/1393] Address comments --- hotshot/src/demo.rs | 15 +- hotshot/src/lib.rs | 10 +- hotshot/src/tasks/mod.rs | 6 +- .../atomic_storage/dual_key_value_store.rs | 6 +- hotshot/src/traits/storage/memory_storage.rs | 1 - task-impls/src/consensus.rs | 83 +++++----- task-impls/src/da.rs | 22 +-- task-impls/src/events.rs | 4 +- task-impls/src/transactions.rs | 8 +- task-impls/src/vid.rs | 12 +- testing/src/overall_safety_task.rs | 8 +- testing/src/task_helpers.rs | 21 ++- testing/tests/consensus_task.rs | 3 +- testing/tests/da_task.rs | 8 +- testing/tests/network_task.rs | 10 +- testing/tests/vid_task.rs | 6 +- types/src/block_impl.rs | 34 +++- types/src/certificate.rs | 16 +- types/src/consensus.rs | 4 +- types/src/data.rs | 156 +++++++++++------- types/src/traits/block_contents.rs | 25 ++- types/src/traits/election.rs | 60 +++---- types/src/traits/state.rs | 35 +++- types/src/utils.rs | 8 +- types/src/vote.rs | 8 +- 25 files changed, 332 insertions(+), 237 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index 0d1b3993ad..2a86f960b6 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -62,11 +62,13 @@ impl Default for SDemoState { impl State for SDemoState { type Error = BlockPayloadError; + type BlockHeader = VIDBlockHeader; + type BlockPayload = VIDBlockPayload; type Time = ViewNumber; - fn validate_block(&self, _block: &Self::BlockPayload, view_number: &Self::Time) -> bool { + fn validate_block(&self, _block_header: &Self::BlockHeader, view_number: &Self::Time) -> bool { if view_number == &ViewNumber::genesis() { &self.view_number == view_number } else { @@ -74,12 +76,18 @@ impl State for SDemoState { } } + fn initialize() -> Self { + let mut state = Self::default(); + state.block_height += 1; + state + } + fn append( &self, - block: &Self::BlockPayload, + block_header: &Self::BlockHeader, view_number: &Self::Time, ) -> Result { - if !self.validate_block(block, view_number) { + if !self.validate_block(block_header, view_number) { return Err(BlockPayloadError::InvalidBlock); } @@ -174,7 +182,6 @@ pub fn random_quorum_certificate QuorumCertificate> { QuorumCertificate { - // block_commitment: random_commitment(rng), leaf_commitment: random_commitment(rng), view_number: TYPES::Time::new(rng.gen()), signatures: AssembledSignature::Genesis(), diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index a970c4f72b..67d94b02e7 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -1087,17 +1087,13 @@ impl> HotShotInitializer Result> { - let state = TYPES::StateType::default() - .append(&genesis_block, &TYPES::Time::new(0)) - .map_err(|err| HotShotError::Misc { - context: err.to_string(), - })?; + pub fn from_genesis(genesis_payload: TYPES::BlockPayload) -> Result> { + let state = TYPES::StateType::initialize(); let time = TYPES::Time::genesis(); let justify_qc = QuorumCertificate::>::genesis(); Ok(Self { - inner: LEAF::new(time, justify_qc, genesis_block, state), + inner: LEAF::new(time, justify_qc, genesis_payload, state), }) } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 0420e793ab..37fc59cd52 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -28,7 +28,7 @@ use hotshot_task_impls::{ view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; use hotshot_types::{ - block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, + block_impl::{VIDBlockPayload, VIDTransaction}, certificate::{TimeoutCertificate, VIDCertificate, ViewSyncCertificate}, data::{ProposalType, QuorumProposal, SequencingLeaf}, event::Event, @@ -252,7 +252,7 @@ where /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, @@ -297,7 +297,7 @@ where consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), - block_commitment: Some(VIDBlockPayload::genesis().commit()), + payload_commitment: Some(VIDBlockPayload::genesis().commit()), quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), timeout_exchange: c_api.inner.exchanges.timeout_exchange().clone().into(), api: c_api.clone(), diff --git a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs index 84cf8c76a2..c890099a1d 100644 --- a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs +++ b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs @@ -184,11 +184,11 @@ impl DualKeyValue for QuorumCertificate { type Key1 = Commitment; type Key2 = ViewNumber; - const KEY_1_NAME: &'static str = "block_commitment"; + const KEY_1_NAME: &'static str = "payload_commitment"; const KEY_2_NAME: &'static str = "view_number"; fn key_1(&self) -> Self::Key1 { - self.block_commitment + self.payload_commitment } fn key_2(&self) -> Self::Key2 { self.view_number @@ -203,7 +203,7 @@ where type Key2 = Commitment; const KEY_1_NAME: &'static str = "leaf_commitment"; - const KEY_2_NAME: &'static str = "block_commitment"; + const KEY_2_NAME: &'static str = "payload_commitment"; fn key_1(&self) -> Self::Key1 { self.commit() diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index c57363bca7..45c02bf6fe 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -167,7 +167,6 @@ mod test { let dummy_leaf_commit = fake_commitment::>(); StoredView::from_qc_block_and_state( QuorumCertificate { - // block_commitment: dummy_block_commit, is_genesis: view_number == ::Time::genesis(), leaf_commitment: dummy_leaf_commit, signatures: AssembledSignature::Genesis(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index d670c7f097..08e75c6323 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -16,13 +16,14 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, }; use hotshot_types::{ - block_impl::{VIDBlockHeader, VIDBlockPayload}, + block_impl::VIDBlockPayload, certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, consensus::{Consensus, View}, data::{LeafType, ProposalType, QuorumProposal, SequencingLeaf}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, traits::{ + block_contents::BlockHeader, consensus_api::SequencingConsensusApi, election::{ConsensusExchange, QuorumExchangeType, SignedCertificate, TimeoutExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent}, @@ -94,8 +95,8 @@ pub struct SequencingConsensusTaskState< /// View number this view is executing in. pub cur_view: TYPES::Time, - /// The commitment to the current block submitted to DA - pub block_commitment: Option>, + /// The commitment to the current block payload submitted to DA + pub payload_commitment: Option>, /// the quorum exchange pub quorum_exchange: Arc>, @@ -360,13 +361,14 @@ where } impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, A: SequencingConsensusApi, I> + 'static, + H: BlockHeader, > SequencingConsensusTaskState where SequencingQuorumEx: ConsensusExchange< @@ -457,10 +459,9 @@ where let leaf: SequencingLeaf<_> = SequencingLeaf { view_number: view, - height: proposal.block_header.block_number, justify_qc: proposal.justify_qc.clone(), parent_commitment, - deltas: Right(proposal.block_header.commitment), + deltas: Right(proposal.block_header.clone()), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), @@ -527,10 +528,9 @@ where let leaf: SequencingLeaf<_> = SequencingLeaf { view_number: view, - height: proposal.block_header.block_number, justify_qc: proposal.justify_qc.clone(), parent_commitment, - deltas: Right(proposal.block_header.commitment), + deltas: Right(proposal.block_header.clone()), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), @@ -541,9 +541,9 @@ where .committee_exchange .is_valid_cert(cert) { - // Validate the block commitment for non-genesis DAC. - if !cert.is_genesis() && cert.leaf_commitment() != proposal.block_header.commitment { - error!("Block commitment does not equal parent commitment"); + // Validate the block payload commitment for non-genesis DAC. + if !cert.is_genesis() && cert.leaf_commitment() != proposal.block_header.payload_commitment() { + error!("Block payload commitment does not equal parent commitment"); return false; } self.quorum_exchange.create_yes_message( @@ -744,10 +744,9 @@ where ); let leaf = SequencingLeaf { view_number: view, - height: proposal.data.block_header.block_number, justify_qc: justify_qc.clone(), parent_commitment: justify_qc.leaf_commitment(), - deltas: Right(proposal.data.block_header.commitment), + deltas: Right(proposal.data.block_header), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender.to_bytes(), @@ -769,29 +768,17 @@ where let parent_commitment = parent.commit(); let leaf: SequencingLeaf<_> = SequencingLeaf { view_number: view, - height: proposal.data.block_header.block_number, justify_qc: justify_qc.clone(), parent_commitment, - deltas: Right(proposal.data.block_header.commitment), + deltas: Right(proposal.data.block_header), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender.to_bytes(), }; let leaf_commitment = leaf.commit(); - // Validate the `height` - // TODO Remove height from proposal validation; view number is sufficient - // https://github.com/EspressoSystems/HotShot/issues/1796 - if leaf.height != parent.height + 1 { - error!( - "Incorrect height in proposal (expected {}, got {})", - parent.height + 1, - leaf.height - ); - return; - } // Validate the signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - else if !view_leader_key.validate(&proposal.signature, leaf_commitment.as_ref()) { + if !view_leader_key.validate(&proposal.signature, leaf_commitment.as_ref()) { error!(?proposal.signature, "Could not verify proposal."); return; } @@ -869,7 +856,7 @@ where consensus .metrics .last_synced_block_height - .set(usize::try_from(leaf.height).unwrap_or(0)); + .set(usize::try_from(leaf.get_height()).unwrap_or(0)); // If the full block is available for this leaf, include it in the leaf // chain that we send to the client. @@ -884,7 +871,7 @@ where leaf_views.push(leaf.clone()); match &leaf.deltas { - Left(block) => { + Left((_,block)) => { let txns = block.contained_transactions(); for txn in txns { included_txns.insert(txn); @@ -1323,8 +1310,8 @@ where let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } - SequencingHotShotEvent::SendBlockCommitment(block_commitment) => { - self.block_commitment = Some(block_commitment); + SequencingHotShotEvent::SendPayloadCommitment(payload_commitment) => { + self.payload_commitment = Some(payload_commitment); } _ => {} } @@ -1380,6 +1367,16 @@ where } let parent_leaf = leaf.clone(); + let parent_header = match parent_leaf.deltas { + Left((_, ref payload)) => { + if parent_leaf.view_number != TYPES::Time::new(0) { + error!("Non-genesis parent leaf should contain the block header rather than payload."); + return false; + } + TYPES::BlockHeader::genesis(payload.clone()) + } + Right(ref header) => header.clone(), + }; let original_parent_hash = parent_leaf.commit(); @@ -1398,15 +1395,17 @@ where // TODO do some sort of sanity check on the view number that it matches decided } - if let Some(block_commitment) = &self.block_commitment { + if let Some(payload_commitment) = &self.payload_commitment { let leaf = SequencingLeaf { view_number: view, - height: parent_leaf.height + 1, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), - // Use the block commitment rather than the block, so that the replica can construct - // the same leaf with the commitment. - deltas: Right(*block_commitment), + // Use the payload commitment rather than the payload, so that the replica can + // construct the same leaf with the commitment. + deltas: Right(TYPES::BlockHeader::new( + *payload_commitment, + parent_header.clone(), + )), rejected: vec![], timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.api.public_key().to_bytes(), @@ -1417,10 +1416,7 @@ where .sign_validating_or_commitment_proposal::(&leaf.commit()); // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. let proposal = QuorumProposal { - block_header: VIDBlockHeader { - block_number: leaf.height, - commitment: *block_commitment, - }, + block_header: TYPES::BlockHeader::new(*payload_commitment, parent_header.clone()), view_number: leaf.view_number, justify_qc: consensus.high_qc.clone(), timeout_certificate: timeout_certificate.or_else(|| None), @@ -1442,7 +1438,7 @@ where self.quorum_exchange.public_key().clone(), )) .await; - self.block_commitment = None; + self.payload_commitment = None; return true; } debug!("Self block was None"); @@ -1501,13 +1497,14 @@ pub type ConsensusTaskTypes = HSTWithEvent< /// Event handle for consensus pub async fn sequencing_consensus_handle< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, A: SequencingConsensusApi, I> + 'static, + H: BlockHeader, >( event: SequencingHotShotEvent, mut state: SequencingConsensusTaskState, @@ -1557,7 +1554,7 @@ pub fn consensus_event_filter>( | SequencingHotShotEvent::DACRecv(_) | SequencingHotShotEvent::VidCertRecv(_) | SequencingHotShotEvent::ViewChange(_) - | SequencingHotShotEvent::SendBlockCommitment(_) + | SequencingHotShotEvent::SendPayloadCommitment(_) | SequencingHotShotEvent::Timeout(_) | SequencingHotShotEvent::TimeoutVoteRecv(_) | SequencingHotShotEvent::Shutdown, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index dc4b8280f7..d23aadfa5f 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -156,7 +156,7 @@ where match state.committee_exchange.accumulate_vote( accumulator, &vote, - &vote.block_commitment, + &vote.payload_commitment, ) { Left(new_accumulator) => { state.accumulator = either::Left(new_accumulator); @@ -242,7 +242,7 @@ where "Got a DA block with {} transactions!", proposal.data.deltas.contained_transactions().len() ); - let block_commitment = proposal.data.deltas.commit(); + let payload_commitment = proposal.data.deltas.commit(); // ED Is this the right leader? let view_leader_key = self.committee_exchange.get_leader(view); @@ -251,7 +251,7 @@ where return None; } - if !view_leader_key.validate(&proposal.signature, block_commitment.as_ref()) { + if !view_leader_key.validate(&proposal.signature, payload_commitment.as_ref()) { error!("Could not verify proposal."); return None; } @@ -267,7 +267,7 @@ where Ok(Some(vote_token)) => { // Generate and send vote let vote = self.committee_exchange.create_da_message( - block_commitment, + payload_commitment, view, vote_token, ); @@ -286,7 +286,7 @@ where // contains strictly more information. consensus.state_map.entry(view).or_insert(View { view_inner: ViewInner::DA { - block: block_commitment, + block: payload_commitment, }, }); @@ -334,7 +334,7 @@ where let accumulator = self.committee_exchange.accumulate_vote( new_accumulator, &vote, - &vote.clone().block_commitment, + &vote.clone().payload_commitment, ); if view > collection_view { @@ -428,8 +428,10 @@ where .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) .await; - let block_commitment = block.commit(); - let signature = self.committee_exchange.sign_da_proposal(&block_commitment); + let payload_commitment = block.commit(); + let signature = self + .committee_exchange + .sign_da_proposal(&payload_commitment); let data: DAProposal = DAProposal { deltas: block.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? @@ -440,8 +442,8 @@ where let message = Proposal { data, signature }; self.event_stream - .publish(SequencingHotShotEvent::SendBlockCommitment( - block_commitment, + .publish(SequencingHotShotEvent::SendPayloadCommitment( + payload_commitment, )) .await; self.event_stream diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index f4666d8748..a2c2db415d 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -65,8 +65,8 @@ pub enum SequencingHotShotEvent> { TransactionsRecv(Vec), /// Send transactions to the network TransactionSend(TYPES::Transaction, TYPES::SignatureKey), - /// Event to send block commitment from DA leader to the quorum; internal event only - SendBlockCommitment(Commitment), + /// Event to send block payload commitment from DA leader to the quorum; internal event only + SendPayloadCommitment(Commitment), /// Event when the transactions task has a block formed BlockReady(TYPES::BlockPayload, TYPES::Time), /// Event when consensus decided on a leaf diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index cd390d4702..bc8d7798ed 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -144,7 +144,7 @@ where let mut included_txn_count = 0; for leaf in leaf_chain { match &leaf.deltas { - Left(block) => { + Left((_, block)) => { let txns = block.contained_transactions(); for txn in txns { included_txns.insert(txn); @@ -263,7 +263,7 @@ where let vid_disperse = vid.disperse(&txns_flatten).unwrap(); let block = VIDBlockPayload { transactions: txns, - commitment: vid_disperse.commit, + payload_commitment: vid_disperse.commit, }; // TODO never clone a block @@ -279,12 +279,12 @@ where Proposal { data: VidDisperse { view_number: view + 1, - commitment: block.commit(), + payload_commitment: block.commit(), shares: vid_disperse.shares, common: vid_disperse.common, }, // TODO (Keyao) This is also signed in DA task. - signature: self.quorum_exchange.sign_block_commitment(block.commit()), + signature: self.quorum_exchange.sign_payload_commitment(block.commit()), }, self.quorum_exchange.public_key().clone(), )) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index cf3589192d..5e59c6f7b4 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -154,7 +154,7 @@ where match state .vid_exchange - .accumulate_vote(accumulator, &vote, &vote.block_commitment) + .accumulate_vote(accumulator, &vote, &vote.payload_commitment) { Left(new_accumulator) => { state.accumulator = either::Left(new_accumulator); @@ -254,7 +254,7 @@ where let accumulator = self.vid_exchange.accumulate_vote( new_accumulator, &vote, - &vote.clone().block_commitment, + &vote.clone().payload_commitment, ); if view > collection_view { @@ -311,7 +311,7 @@ where } debug!("VID disperse data is fresh."); - let block_commitment = disperse.data.commitment; + let payload_commitment = disperse.data.payload_commitment; // ED Is this the right leader? let view_leader_key = self.vid_exchange.get_leader(view); @@ -320,7 +320,7 @@ where return None; } - if !view_leader_key.validate(&disperse.signature, block_commitment.as_ref()) { + if !view_leader_key.validate(&disperse.signature, payload_commitment.as_ref()) { error!("Could not verify VID proposal sig."); return None; } @@ -336,7 +336,7 @@ where Ok(Some(vote_token)) => { // Generate and send vote let vote = self.vid_exchange.create_vid_message( - block_commitment, + payload_commitment, view, vote_token, ); @@ -355,7 +355,7 @@ where // contains strictly more information. consensus.state_map.entry(view).or_insert(View { view_inner: ViewInner::DA { - block: block_commitment, + block: payload_commitment, }, }); diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index b2a9a88451..c10d7391ca 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -21,7 +21,7 @@ use hotshot_task::{ }; use hotshot_types::{ certificate::QuorumCertificate, - data::{DeltasType, LeafBlock, LeafType}, + data::{DeltasType, LeafBlockPayload, LeafType}, error::RoundTimedoutState, event::{Event, EventType}, traits::node_implementation::NodeType, @@ -109,7 +109,7 @@ pub struct RoundResult> { pub leaf_map: HashMap, /// block -> # entries decided on that block - pub block_map: HashMap>, usize>, + pub block_map: HashMap>, usize>, /// state -> # entries decided on that state pub state_map: HashMap<::MaybeState, usize>, @@ -215,7 +215,7 @@ impl> RoundResult v.insert(1); } } - match self.block_map.entry(block.clone().block_commitment()) { + match self.block_map.entry(block.clone().payload_commitment()) { std::collections::hash_map::Entry::Occupied(mut o) => { *o.get_mut() += 1; } @@ -291,7 +291,7 @@ impl> RoundResult // if neither, continue through let state_key = key.get_state(); - let block_key = key.get_deltas().block_commitment(); + let block_key = key.get_deltas().payload_commitment(); if *self.block_map.get(&block_key).unwrap() == threshold && *self.state_map.get(&state_key).unwrap() == threshold diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 8d34440500..9a20f6fe1e 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -3,7 +3,7 @@ use crate::{ test_builder::TestMetadata, }; use commit::Committable; -use either::Right; +use either::{Either::Left, Right}; use hotshot::{ certificate::QuorumCertificate, traits::{NodeImplementation, TestableNodeImplementation}, @@ -18,6 +18,7 @@ use hotshot_types::{ data::{QuorumProposal, SequencingLeaf, VidScheme, ViewNumber}, message::{Message, Proposal}, traits::{ + block_contents::BlockHeader, consensus_api::ConsensusSharedApi, election::{ConsensusExchange, Membership, SignedCertificate}, node_implementation::{CommitteeEx, ExchangesType, NodeType, QuorumEx}, @@ -115,26 +116,30 @@ async fn build_quorum_proposal_and_signature( panic!("Failed to find high QC parent."); }; let parent_leaf = leaf.clone(); + let parent_header = match parent_leaf.deltas { + Left((block_number, ref payload)) => VIDBlockHeader { + block_number, + payload_commitment: payload.commit(), + }, + Right(ref header) => header.clone(), + }; // every event input is seen on the event stream in the output. let block = ::genesis(); - let block_commitment = block.commit(); + let payload_commitment = block.commit(); + let block_header = VIDBlockHeader::new(payload_commitment, parent_header); let leaf = SequencingLeaf { view_number: ViewNumber::new(view), - height: parent_leaf.height + 1, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), - deltas: Right(block_commitment), + deltas: Right(block_header.clone()), rejected: vec![], timestamp: 0, proposer_id: api.public_key().to_bytes(), }; let signature = ::sign(private_key, leaf.commit().as_ref()); let proposal = QuorumProposal::> { - block_header: VIDBlockHeader { - block_number: 1, - commitment: block_commitment, - }, + block_header, view_number: ViewNumber::new(view), justify_qc: QuorumCertificate::genesis(), timeout_certificate: None, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index f4beb9d563..726b4ed0e9 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -63,10 +63,9 @@ async fn build_vote( let leaf: SequencingLeaf<_> = SequencingLeaf { view_number: view, - height: proposal.block_header.block_number, justify_qc: proposal.justify_qc.clone(), parent_commitment, - deltas: Right(proposal.block_header.commitment), + deltas: Right(proposal.block_header), rejected: Vec::new(), timestamp: 0, proposer_id: quorum_exchange.get_leader(view).to_bytes(), diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index f9380b6991..8732a93fbb 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -42,10 +42,10 @@ async fn test_da_task() { let vid = vid_init(); let txn = vec![0u8]; let vid_disperse = vid.disperse(&txn).unwrap(); - let block_commitment = vid_disperse.commit; + let payload_commitment = vid_disperse.commit; let block = VIDBlockPayload { transactions: vec![VIDTransaction(txn)], - commitment: block_commitment, + payload_commitment, }; let signature = committee_exchange.sign_da_proposal(&block.commit()); @@ -58,7 +58,7 @@ async fn test_da_task() { signature, }; - // TODO for now reuse the same block commitment and signature as DA committee + // TODO for now reuse the same block payload commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 // Every event input is seen on the event stream in the output. @@ -85,7 +85,7 @@ async fn test_da_task() { 1, ); output.insert( - SequencingHotShotEvent::SendBlockCommitment(block.commit()), + SequencingHotShotEvent::SendPayloadCommitment(block.commit()), 1, ); output.insert( diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index fe50497132..03c0b13d63 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -45,10 +45,10 @@ async fn test_network_task() { let vid = vid_init(); let txn = vec![0u8]; let vid_disperse = vid.disperse(&txn).unwrap(); - let block_commitment = vid_disperse.commit; + let payload_commitment = vid_disperse.commit; let block = VIDBlockPayload { transactions: vec![VIDTransaction(txn)], - commitment: block_commitment, + payload_commitment, }; let signature = committee_exchange.sign_da_proposal(&block.commit()); let da_proposal = Proposal { @@ -59,12 +59,12 @@ async fn test_network_task() { signature, }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; - // TODO for now reuse the same block commitment and signature as DA committee + // TODO for now reuse the same block payload commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 let da_vid_disperse = Proposal { data: VidDisperse { view_number: da_proposal.data.view_number, - commitment: block.commit(), + payload_commitment: block.commit(), shares: vid_disperse.shares, common: vid_disperse.common, }, @@ -125,7 +125,7 @@ async fn test_network_task() { 1, ); output.insert( - SequencingHotShotEvent::SendBlockCommitment(block.commit()), + SequencingHotShotEvent::SendPayloadCommitment(block.commit()), 1, ); output.insert( diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index cf19740e1d..d4edf7b6ea 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -41,10 +41,10 @@ async fn test_vid_task() { let vid = vid_init(); let txn = vec![0u8]; let vid_disperse = vid.disperse(&txn).unwrap(); - let block_commitment = vid_disperse.commit; + let payload_commitment = vid_disperse.commit; let block = VIDBlockPayload { transactions: vec![VIDTransaction(txn)], - commitment: block_commitment, + payload_commitment, }; let signature = vid_exchange.sign_vid_proposal(&block.commit()); @@ -59,7 +59,7 @@ async fn test_vid_task() { let vid_proposal = Proposal { data: VidDisperse { view_number: message.data.view_number, - commitment: block.commit(), + payload_commitment: block.commit(), shares: vid_disperse.shares, common: vid_disperse.common, }, diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 0de9365e79..90aa210b6f 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -65,8 +65,8 @@ pub enum BlockPayloadError { pub struct VIDBlockPayload { /// List of transactions. pub transactions: Vec, - /// VID commitment. - pub commitment: ::Commit, + /// VID commitment to the block payload. + pub payload_commitment: ::Commit, } impl VIDBlockPayload { @@ -86,14 +86,14 @@ impl VIDBlockPayload { let vid_disperse = vid.disperse(&txn).unwrap(); VIDBlockPayload { transactions: vec![VIDTransaction(txn)], - commitment: vid_disperse.commit, + payload_commitment: vid_disperse.commit, } } } impl Committable for VIDBlockPayload { fn commit(&self) -> Commitment { - as CanonicalDeserialize>::deserialize(&*self.commitment) + as CanonicalDeserialize>::deserialize(&*self.payload_commitment) .expect("conversion from VidScheme::Commit to Commitment should succeed") } @@ -136,14 +136,32 @@ impl BlockPayload for VIDBlockPayload { pub struct VIDBlockHeader { /// Block number. pub block_number: u64, - /// VID commitment. - pub commitment: Commitment, + /// VID commitment to the payload. + pub payload_commitment: Commitment, } impl BlockHeader for VIDBlockHeader { type Payload = VIDBlockPayload; - fn commitment(&self) -> Commitment { - self.commitment + fn new(payload_commitment: Commitment, parent_header: Self) -> Self { + Self { + block_number: parent_header.block_number + 1, + payload_commitment, + } + } + + fn genesis(payload: Self::Payload) -> Self { + Self { + block_number: 0, + payload_commitment: payload.commit(), + } + } + + fn block_number(&self) -> u64 { + self.block_number + } + + fn payload_commitment(&self) -> Commitment { + self.payload_commitment } } diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 1c415ce5f8..adb37fa56e 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -35,8 +35,8 @@ pub struct DACertificate { /// This value is covered by the threshold signature. pub view_number: TYPES::Time, - /// committment to the block - pub block_commitment: Commitment, + /// committment to the block payload + pub payload_commitment: Commitment, /// Assembled signature for certificate aggregation pub signatures: AssembledSignature, @@ -50,8 +50,8 @@ pub struct VIDCertificate { /// The view number this VID certificate was generated during pub view_number: TYPES::Time, - /// committment to the block - pub block_commitment: Commitment, + /// Committment to the block payload + pub payload_commitment: Commitment, /// Assembled signature for certificate aggregation pub signatures: AssembledSignature, @@ -270,7 +270,7 @@ impl DACertificate { view_number: vote.get_view(), signatures, - block_commitment: vote.block_commitment, + payload_commitment: vote.payload_commitment, } } @@ -283,7 +283,7 @@ impl } fn leaf_commitment(&self) -> Commitment { - self.block_commitment + self.payload_commitment } fn is_genesis(&self) -> bool { @@ -308,7 +308,7 @@ impl VIDCertificate { view_number: vote.get_view(), signatures, - block_commitment: vote.block_commitment, + payload_commitment: vote.payload_commitment, } } @@ -321,7 +321,7 @@ impl } fn leaf_commitment(&self) -> Commitment { - self.block_commitment + self.payload_commitment } fn is_genesis(&self) -> bool { diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 516b8879d6..fad59f0508 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -321,7 +321,7 @@ impl> Consensus { // perform gc self.state_map .range(old_anchor_view..new_anchor_view) - .filter_map(|(_view_number, view)| view.get_block_commitment()) + .filter_map(|(_view_number, view)| view.get_payload_commitment()) .for_each(|block| { self.saved_blocks.remove(block); }); @@ -351,7 +351,7 @@ impl> Consensus { } } -/// Mapping from block commitments to full blocks. +/// Mapping from block payload commitments to full blocks. /// /// Entries in this mapping are reference-counted, so multiple consensus objects can refer to the /// same block, and the block will only be deleted after _all_ such objects are garbage collected. diff --git a/types/src/data.rs b/types/src/data.rs index 4a4f604ae6..5f1cef024d 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -9,6 +9,7 @@ use crate::{ ViewSyncCertificate, }, traits::{ + block_contents::BlockHeader, node_implementation::NodeType, signature_key::{EncodedPublicKey, SignatureKey}, state::{ConsensusTime, TestableBlock, TestableState}, @@ -21,7 +22,7 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, Serializatio use bincode::Options; use commit::{Commitment, Committable}; use derivative::Derivative; -use either::Either; +use either::{Either, Left, Right}; use espresso_systems_common::hotshot::tag; use hotshot_constants::GENESIS_PROPOSER_ID; use hotshot_utils::bincode::bincode_opts; @@ -125,8 +126,8 @@ pub struct ValidatingProposal> where LEAF: Committable, { - /// current view's block commitment - pub block_commitment: Commitment, + /// Current view's block payload commitment + pub payload_commitment: Commitment, /// CurView from leader when proposing leaf pub view_number: TYPES::Time, @@ -175,8 +176,8 @@ pub use jf_primitives::vid::VidScheme as VidSchemeTrait; pub struct VidDisperse { /// The view number for which this VID data is intended pub view_number: TYPES::Time, - /// Block commitment - pub commitment: Commitment, + /// Block payload commitment + pub payload_commitment: Commitment, /// VID shares dispersed among storage nodes pub shares: Vec<::Share>, /// VID common data sent to all storage nodes @@ -293,7 +294,7 @@ pub trait DeltasType: type Error: std::error::Error; /// Get a cryptographic commitment to the block represented by this delta. - fn block_commitment(&self) -> Commitment; + fn payload_commitment(&self) -> Commitment; /// Get the full block if it is available, otherwise return this object unchanged. /// @@ -310,13 +311,13 @@ pub trait DeltasType: /// /// # Errors /// - /// Fails if `block` does not match `self.block_commitment()`, or if the block is not able to be + /// Fails if `block` does not match `self.payload_commitment()`, or if the block is not able to be /// stored for some implementation-defined reason. fn fill(&mut self, block: BlockPayload) -> Result<(), Self::Error>; } /// Error which occurs when [`DeltasType::fill`] is called with a block that does not match the -/// deltas' internal block commitment. +/// deltas' internal block payload commitment. #[derive(Clone, Copy, Debug, Snafu)] #[snafu(display("the block {:?} has commitment {} (expected {})", block, block.commit(), commitment))] pub struct InconsistentDeltasError { @@ -341,7 +342,7 @@ where { type Error = InconsistentDeltasError; - fn block_commitment(&self) -> Commitment { + fn payload_commitment(&self) -> Commitment { self.commit() } @@ -378,7 +379,7 @@ where { type Error = InconsistentDeltasError; - fn block_commitment(&self) -> Commitment { + fn payload_commitment(&self) -> Commitment { match self { Either::Left(block) => block.commit(), Either::Right(comm) => *comm, @@ -410,6 +411,45 @@ where } } +impl DeltasType for Either<(u64, PAYLOAD), HEADER> +where + HEADER: BlockHeader, + PAYLOAD: BlockPayload, +{ + type Error = InconsistentDeltasError; + + fn payload_commitment(&self) -> Commitment { + match self { + Either::Left((_, block)) => block.commit(), + Either::Right(header) => header.payload_commitment(), + } + } + + fn try_resolve(self) -> Result { + match self { + Either::Left((_, block)) => Ok(block), + Either::Right(_) => Err(self), + } + } + + fn fill(&mut self, block: PAYLOAD) -> Result<(), Self::Error> { + match self { + Either::Left((_, curr)) => curr.fill(block), + Either::Right(header) => { + ensure!( + header.payload_commitment() == block.commit(), + InconsistentDeltasSnafu { + block, + commitment: header.payload_commitment() + } + ); + *self = Either::Left((header.block_number(), block)); + Ok(()) + } + } + } +} + /// An item which is appended to a blockchain. pub trait LeafType: Debug @@ -427,7 +467,7 @@ pub trait LeafType: /// Type of nodes participating in the network. type NodeType: NodeType; /// Type of block contained by this leaf. - type DeltasType: DeltasType>; + type DeltasType: DeltasType>; /// Either state or empty type MaybeState: Clone + Debug @@ -443,7 +483,7 @@ pub trait LeafType: fn new( view_number: LeafTime, justify_qc: QuorumCertificate>, - deltas: LeafBlock, + deltas: LeafBlockPayload, state: LeafState, ) -> Self; /// Time when this leaf was created. @@ -452,8 +492,6 @@ pub trait LeafType: /// /// Equivalently, this is the number of leaves before this one in the chain. fn get_height(&self) -> u64; - /// Change the height of this leaf. - fn set_height(&mut self, height: u64); /// The QC linking this leaf to its parent in the chain. fn get_justify_qc(&self) -> QuorumCertificate>; /// Commitment to this leaf's parent. @@ -469,7 +507,7 @@ pub trait LeafType: /// /// Fails if `block` does not match `self.get_deltas_commitment()`, or if the block is not able /// to be stored for some implementation-defined reason. - fn fill_deltas(&mut self, block: LeafBlock) -> Result<(), LeafDeltasError>; + fn fill_deltas(&mut self, block: LeafBlockPayload) -> Result<(), LeafDeltasError>; /// The blockchain state after appending this leaf. fn get_state(&self) -> Self::MaybeState; /// Transactions rejected or invalidated by the application of this leaf. @@ -481,24 +519,26 @@ pub trait LeafType: /// Create a leaf from information stored about a view. fn from_stored_view(stored_view: StoredView) -> Self; - /// A commitment to the block contained in this leaf. - fn get_deltas_commitment(&self) -> Commitment> { - self.get_deltas().block_commitment() + /// A commitment to the block payload contained in this leaf. + fn get_deltas_commitment(&self) -> Commitment> { + self.get_deltas().payload_commitment() } } /// The [`DeltasType`] in a [`LeafType`]. pub type LeafDeltas = ::DeltasType; /// Errors reported by the [`DeltasType`] in a [`LeafType`]. -pub type LeafDeltasError = as DeltasType>>::Error; +pub type LeafDeltasError = as DeltasType>>::Error; /// The [`NodeType`] in a [`LeafType`]. pub type LeafNode = ::NodeType; /// The [`StateType`] in a [`LeafType`]. pub type LeafState = as NodeType>::StateType; +/// The [`BlockHeader`] in a [`LeafType`]. +pub type LeafBlockHeader = as NodeType>::BlockHeader; /// The [`BlockPayload`] in a [`LeafType`]. -pub type LeafBlock = as NodeType>::BlockPayload; +pub type LeafBlockPayload = as NodeType>::BlockPayload; /// The [`Transaction`] in a [`LeafType`]. -pub type LeafTransaction = as BlockPayload>::Transaction; +pub type LeafTransaction = as BlockPayload>::Transaction; /// The [`ConsensusTime`] used by a [`LeafType`]. pub type LeafTime = as NodeType>::Time; @@ -564,9 +604,6 @@ pub struct SequencingLeaf { /// CurView from leader when proposing leaf pub view_number: TYPES::Time, - /// Number of leaves before this one in the chain - pub height: u64, - /// Per spec, justification pub justify_qc: QuorumCertificate>, @@ -574,8 +611,8 @@ pub struct SequencingLeaf { /// So we can ask if it extends pub parent_commitment: Commitment, - /// The block or block commitment to be applied - pub deltas: Either>, + /// Either the block number and payload, or the block header. + pub deltas: Either<(u64, TYPES::BlockPayload), TYPES::BlockHeader>, /// Transactions that were marked for rejection while collecting deltas pub rejected: Vec<::Transaction>, @@ -591,15 +628,15 @@ pub struct SequencingLeaf { impl PartialEq for SequencingLeaf { fn eq(&self, other: &Self) -> bool { let delta_left = match &self.deltas { - Either::Left(deltas) => deltas.commit(), - Either::Right(deltas) => *deltas, + Either::Left(deltas) => (deltas.0, deltas.1.commit()), + Either::Right(deltas) => (deltas.block_number(), deltas.payload_commitment()), }; let delta_right = match &other.deltas { - Either::Left(deltas) => deltas.commit(), - Either::Right(deltas) => *deltas, + Either::Left(deltas) => (deltas.0, deltas.1.commit()), + Either::Right(deltas) => (deltas.block_number(), deltas.payload_commitment()), }; self.view_number == other.view_number - && self.height == other.height + // && self.height == other.height && self.justify_qc == other.justify_qc && self.parent_commitment == other.parent_commitment && delta_left == delta_right @@ -610,15 +647,17 @@ impl PartialEq for SequencingLeaf { impl Hash for SequencingLeaf { fn hash(&self, state: &mut H) { self.view_number.hash(state); - self.height.hash(state); + // self.height.hash(state); self.justify_qc.hash(state); self.parent_commitment.hash(state); match &self.deltas { Either::Left(deltas) => { - deltas.commit().hash(state); + deltas.0.hash(state); + deltas.1.commit().hash(state); } - Either::Right(commitment) => { - commitment.hash(state); + Either::Right(header) => { + header.block_number().hash(state); + header.payload_commitment().hash(state); } } // self.deltas.hash(state.commit()); @@ -668,10 +707,6 @@ impl LeafType for ValidatingLeaf { self.height } - fn set_height(&mut self, height: u64) { - self.height = height; - } - fn get_justify_qc(&self) -> QuorumCertificate> { self.justify_qc.clone() } @@ -685,10 +720,10 @@ impl LeafType for ValidatingLeaf { } fn get_deltas_commitment(&self) -> Commitment<::BlockPayload> { - self.deltas.block_commitment() + self.deltas.payload_commitment() } - fn fill_deltas(&mut self, block: LeafBlock) -> Result<(), LeafDeltasError> { + fn fill_deltas(&mut self, block: LeafBlockPayload) -> Result<(), LeafDeltasError> { self.deltas.fill(block) } @@ -748,14 +783,16 @@ impl Display for SequencingLeaf { write!( f, "view: {:?}, height: {:?}, justify: {}", - self.view_number, self.height, self.justify_qc + self.view_number, + self.get_height(), + self.justify_qc ) } } impl LeafType for SequencingLeaf { type NodeType = TYPES; - type DeltasType = Either>; + type DeltasType = Either<(u64, TYPES::BlockPayload), TYPES::BlockHeader>; type MaybeState = (); fn new( @@ -766,10 +803,9 @@ impl LeafType for SequencingLeaf { ) -> Self { Self { view_number, - height: 0, justify_qc, parent_commitment: fake_commitment(), - deltas: Either::Left(deltas), + deltas: Either::Left((0, deltas)), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: genesis_proposer_id(), @@ -781,11 +817,10 @@ impl LeafType for SequencingLeaf { } fn get_height(&self) -> u64 { - self.height - } - - fn set_height(&mut self, height: u64) { - self.height = height; + match &self.deltas { + Left((height, _)) => *height, + Right(header) => header.block_number(), + } } fn get_justify_qc(&self) -> QuorumCertificate> { @@ -801,10 +836,10 @@ impl LeafType for SequencingLeaf { } fn get_deltas_commitment(&self) -> Commitment<::BlockPayload> { - self.deltas.block_commitment() + self.deltas.payload_commitment() } - fn fill_deltas(&mut self, block: LeafBlock) -> Result<(), LeafDeltasError> { + fn fill_deltas(&mut self, block: LeafBlockPayload) -> Result<(), LeafDeltasError> { self.deltas.fill(block) } @@ -826,7 +861,6 @@ impl LeafType for SequencingLeaf { fn from_stored_view(stored_view: StoredView) -> Self { Self { view_number: stored_view.view_number, - height: 0, justify_qc: stored_view.justify_qc, parent_commitment: stored_view.parent, deltas: stored_view.deltas, @@ -935,7 +969,7 @@ impl Committable for ValidatingLeaf { .u64_field("view number", *self.view_number) .u64_field("height", self.height) .field("parent Leaf commitment", self.parent_commitment) - .field("block commitment", self.deltas.commit()) + .field("block payload commitment", self.deltas.commit()) .field("state commitment", self.state.commit()) .constant_str("justify_qc view number") .u64(*self.justify_qc.view_number) @@ -955,20 +989,20 @@ impl Committable for ValidatingLeaf { impl Committable for SequencingLeaf { fn commit(&self) -> commit::Commitment { - // Commit the block commitment, rather than the block, so that the replicas can reconstruct + // Commit the block payload, rather than the block, so that the replicas can reconstruct // the leaf. - let block_commitment = match &self.deltas { - Either::Left(block) => block.commit(), - Either::Right(commitment) => *commitment, + let (height, payload_commitment) = match &self.deltas { + Either::Left((height, payload)) => (*height, payload.commit()), + Either::Right(header) => (header.block_number(), header.payload_commitment()), }; let signatures_bytes = serialize_signature(&self.justify_qc.signatures); commit::RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) - .u64_field("height", self.height) + .u64_field("height", height) .field("parent Leaf commitment", self.parent_commitment) - .field("block commitment", block_commitment) + .field("block payload commitment", payload_commitment) .constant_str("justify_qc view number") .u64(*self.justify_qc.view_number) .field( @@ -994,7 +1028,7 @@ impl From> state_commitment: leaf.state.commit(), rejected: leaf.rejected, proposer_id: leaf.proposer_id, - block_commitment: leaf.deltas.commit(), + payload_commitment: leaf.deltas.commit(), } } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 59fd565140..6922b5c011 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -64,8 +64,17 @@ pub trait BlockHeader: /// Block payload associated with the commitment. type Payload: BlockPayload; + /// Build a header with the payload commitment and parent header. + fn new(payload_commitment: Commitment, parent_header: Self) -> Self; + + /// Build a genesis header with the genesis payload. + fn genesis(payload: Self::Payload) -> Self; + + /// Get the block number. + fn block_number(&self) -> u64; + /// Get the payload commitment. - fn commitment(&self) -> Commitment; + fn payload_commitment(&self) -> Commitment; } /// Dummy implementation of `BlockPayload` for unit tests @@ -138,7 +147,19 @@ pub mod dummy { impl BlockHeader for DummyBlock { type Payload = Self; - fn commitment(&self) -> commit::Commitment { + fn new(_payload_commitment: Commitment, _parent_header: Self) -> Self { + Self { nonce: 0 } + } + + fn genesis(_payload: Self::Payload) -> Self { + Self { nonce: 0 } + } + + fn block_number(&self) -> u64 { + 0 + } + + fn payload_commitment(&self) -> commit::Commitment { commit::RawCommitmentBuilder::new("Dummy BlockPayload Comm") .u64_field("Nonce", self.nonce) .finalize() diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 4718e2d033..0254dfbb53 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -491,22 +491,22 @@ pub trait CommitteeExchangeType: /// Sign a DA proposal. fn sign_da_proposal( &self, - block_commitment: &Commitment, + payload_commitment: &Commitment, ) -> EncodedSignature; /// Sign a vote on DA proposal. /// - /// The block commitment and the type of the vote (DA) are signed, which is the minimum amount + /// The block payload commitment and the type of the vote (DA) are signed, which is the minimum amount /// of information necessary for checking that this node voted on that block. fn sign_da_vote( &self, - block_commitment: Commitment, + payload_commitment: Commitment, ) -> (EncodedPublicKey, EncodedSignature); /// Create a message with a vote on DA proposal. fn create_da_message( &self, - block_commitment: Commitment, + payload_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, ) -> DAVote; @@ -546,39 +546,39 @@ impl< /// Sign a DA proposal. fn sign_da_proposal( &self, - block_commitment: &Commitment, + payload_commitment: &Commitment, ) -> EncodedSignature { - let signature = TYPES::SignatureKey::sign(&self.private_key, block_commitment.as_ref()); + let signature = TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()); signature } /// Sign a vote on DA proposal. /// - /// The block commitment and the type of the vote (DA) are signed, which is the minimum amount + /// The block payload commitment and the type of the vote (DA) are signed, which is the minimum amount /// of information necessary for checking that this node voted on that block. fn sign_da_vote( &self, - block_commitment: Commitment, + payload_commitment: Commitment, ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::DA(block_commitment).commit().as_ref(), + VoteData::DA(payload_commitment).commit().as_ref(), ); (self.public_key.to_bytes(), signature) } /// Create a message with a vote on DA proposal. fn create_da_message( &self, - block_commitment: Commitment, + payload_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, ) -> DAVote { - let signature = self.sign_da_vote(block_commitment); + let signature = self.sign_da_vote(payload_commitment); DAVote { signature, - block_commitment, + payload_commitment, current_view, vote_token, - vote_data: VoteData::DA(block_commitment), + vote_data: VoteData::DA(payload_commitment), } } } @@ -643,7 +643,7 @@ pub trait VIDExchangeType: ConsensusExchange, + payload_commitment: Commitment, current_view: TYPES::Time, vote_token: TYPES::VoteTokenType, ) -> VIDVote; @@ -651,13 +651,13 @@ pub trait VIDExchangeType: ConsensusExchange, + payload_commitment: Commitment, ) -> (EncodedPublicKey, EncodedSignature); /// Sign a VID proposal. fn sign_vid_proposal( &self, - block_commitment: &Commitment, + payload_commitment: &Commitment, ) -> EncodedSignature; } @@ -694,27 +694,27 @@ impl< { fn create_vid_message( &self, - block_commitment: Commitment, + payload_commitment: Commitment, current_view: ::Time, vote_token: ::VoteTokenType, ) -> VIDVote { - let signature = self.sign_vid_vote(block_commitment); + let signature = self.sign_vid_vote(payload_commitment); VIDVote { signature, - block_commitment, + payload_commitment, current_view, vote_token, - vote_data: VoteData::DA(block_commitment), + vote_data: VoteData::DA(payload_commitment), } } fn sign_vid_vote( &self, - block_commitment: Commitment<::BlockPayload>, + payload_commitment: Commitment<::BlockPayload>, ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::DA(block_commitment).commit().as_ref(), + VoteData::DA(payload_commitment).commit().as_ref(), ); (self.public_key.to_bytes(), signature) } @@ -722,9 +722,9 @@ impl< /// Sign a VID proposal. fn sign_vid_proposal( &self, - block_commitment: &Commitment, + payload_commitment: &Commitment, ) -> EncodedSignature { - let signature = TYPES::SignatureKey::sign(&self.private_key, block_commitment.as_ref()); + let signature = TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()); signature } } @@ -807,10 +807,10 @@ pub trait QuorumExchangeType, leaf_commitment: &Commitment, ) -> EncodedSignature; - /// Sign a block commitment. - fn sign_block_commitment( + /// Sign a block payload commitment. + fn sign_payload_commitment( &self, - block_commitment: Commitment, + payload_commitment: Commitment, ) -> EncodedSignature; /// Sign a positive vote on validating or commitment proposal. @@ -912,11 +912,11 @@ impl< signature } - fn sign_block_commitment( + fn sign_payload_commitment( &self, - block_commitment: Commitment<::BlockPayload>, + payload_commitment: Commitment<::BlockPayload>, ) -> EncodedSignature { - TYPES::SignatureKey::sign(&self.private_key, block_commitment.as_ref()) + TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()) } /// Sign a positive vote on validating or commitment proposal. diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 8c7efadcfc..3d6b8f6d29 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -15,6 +15,8 @@ use std::{ ops::{Deref, Sub}, }; +use super::block_contents::BlockHeader; + /// Abstraction over the state that blocks modify /// /// This trait represents the behaviors that the 'global' ledger state must have: @@ -39,22 +41,27 @@ pub trait State: { /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; - /// The type of block this state is associated with + /// The type of block header this state is associated with + type BlockHeader: BlockHeader; + /// The type of block payload this state is associated with type BlockPayload: BlockPayload; /// Time compatibility needed for reward collection type Time: ConsensusTime; - /// Returns true if and only if the provided block is valid and can extend this state - fn validate_block(&self, block: &Self::BlockPayload, view_number: &Self::Time) -> bool; + /// Returns true if and only if the provided block header is valid and can extend this state + fn validate_block(&self, block_header: &Self::BlockHeader, view_number: &Self::Time) -> bool; + + /// Initialize the state. + fn initialize() -> Self; - /// Appends the given block to this state, returning an new state + /// Appends the given block header to this state, returning an new state /// /// # Errors /// - /// Should produce and error if appending this block would lead to an invalid state + /// Should produce and error if appending this block header would lead to an invalid state fn append( &self, - block: &Self::BlockPayload, + block_header: &Self::BlockHeader, view_number: &Self::Time, ) -> Result; @@ -158,17 +165,27 @@ pub mod dummy { impl State for DummyState { type Error = DummyError; - + type BlockHeader = DummyBlock; type BlockPayload = DummyBlock; type Time = ViewNumber; - fn validate_block(&self, _block: &Self::BlockPayload, _view_number: &Self::Time) -> bool { + fn validate_block( + &self, + _block_header: &Self::BlockHeader, + _view_number: &Self::Time, + ) -> bool { false } + fn initialize() -> Self { + let mut state = Self::default(); + state.nonce += 1; + state + } + fn append( &self, - _block: &Self::BlockPayload, + _block_header: &Self::BlockHeader, _view_number: &Self::Time, ) -> Result { Ok(Self { diff --git a/types/src/utils.rs b/types/src/utils.rs index 38c4b5a852..cb06abf8d0 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -1,7 +1,7 @@ //! Utility functions, type aliases, helper structs and enum definitions. use crate::{ - data::{LeafBlock, LeafType}, + data::{LeafBlockPayload, LeafType}, traits::node_implementation::NodeType, }; use commit::Commitment; @@ -17,7 +17,7 @@ pub enum ViewInner> { /// leaders repeatedly request availability for blocks that they never propose. DA { /// Available block. - block: Commitment>, + block: Commitment>, }, /// Undecided view Leaf { @@ -39,9 +39,9 @@ impl> ViewInner { } } - /// return the underlying block hash if it exists + /// return the underlying block paylod commitment if it exists #[must_use] - pub fn get_block_commitment(&self) -> Option>> { + pub fn get_payload_commitment(&self) -> Option>> { if let Self::DA { block } = self { Some(*block) } else { diff --git a/types/src/vote.rs b/types/src/vote.rs index 2b721819bb..d63f8cc0fb 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -49,8 +49,8 @@ pub trait VoteType: pub struct DAVote { /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), - /// The block commitment being voted on. - pub block_commitment: Commitment, + /// The block payload commitment being voted on. + pub payload_commitment: Commitment, /// The view this vote was cast for pub current_view: TYPES::Time, /// The vote token generated by this replica @@ -65,8 +65,8 @@ pub struct DAVote { pub struct VIDVote { /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), - /// The block commitment being voted on. - pub block_commitment: Commitment, + /// The block payload commitment being voted on. + pub payload_commitment: Commitment, /// The view this vote was cast for pub current_view: TYPES::Time, /// The vote token generated by this replica From 6d599e3942547a2d5b1c2f2bee30839b0b9a09bb Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 26 Oct 2023 11:33:48 -0700 Subject: [PATCH 0266/1393] Fix after merge --- hotshot/examples/infra/mod.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index ba774d52e8..66b311f2be 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -233,7 +233,7 @@ pub trait RunDA< >, > where ::StateType: TestableState, - ::BlockType: TestableBlock, + ::BlockPayload: TestableBlock, SequencingLeaf: TestableLeaf, Self: Sync, SystemContext: HotShotType, @@ -252,7 +252,7 @@ pub trait RunDA< /// get the anchored view /// Note: sequencing leaf does not have state, so does not return state async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { - let genesis_block = TYPES::BlockType::genesis(); + let genesis_block = TYPES::BlockPayload::genesis(); let initializer = hotshot::HotShotInitializer::>::from_genesis( genesis_block, @@ -460,7 +460,7 @@ pub struct WebServerDARun< #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType, MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, @@ -511,7 +511,7 @@ impl< > for WebServerDARun where ::StateType: TestableState, - ::BlockType: TestableBlock, + ::BlockPayload: TestableBlock, SequencingLeaf: TestableLeaf, Self: Sync, { @@ -628,7 +628,7 @@ pub struct Libp2pDARun, MEMBERSHIP #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType, MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, @@ -679,7 +679,7 @@ impl< > for Libp2pDARun where ::StateType: TestableState, - ::BlockType: TestableBlock, + ::BlockPayload: TestableBlock, SequencingLeaf: TestableLeaf, Self: Sync, { @@ -857,7 +857,7 @@ where /// Main entry point for validators pub async fn main_entry_point< - TYPES: NodeType, + TYPES: NodeType, MEMBERSHIP: Membership + Debug, DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, @@ -895,7 +895,7 @@ pub async fn main_entry_point< args: ValidatorArgs, ) where ::StateType: TestableState, - ::BlockType: TestableBlock, + ::BlockPayload: TestableBlock, SequencingLeaf: TestableLeaf, { setup_logging(); From 5867fc8a619ec7397a4c980cd429cfc6ef97a2c0 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 26 Oct 2023 15:42:11 -0400 Subject: [PATCH 0267/1393] DA messages only gossip to DA nodes (#1946) --- hotshot/examples/infra/mod.rs | 4 ++-- hotshot/src/traits/networking/libp2p_network.rs | 17 ++++++++++++++--- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index ba774d52e8..623f906a87 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -782,7 +782,6 @@ where } all_keys.insert(pubkey); } - let node_config = config_builder.build().unwrap(); let underlying_quorum_network = Libp2pNetwork::new( NetworkingMetricsValue::new(), @@ -799,7 +798,8 @@ where // NOTE: this introduces an invariant that the keys are assigned using this indexed // function all_keys, - da_keys, + da_keys.clone(), + da_keys.contains(&pubkey), ) .await .unwrap(); diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 20e6718955..31e1f6f531 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -113,6 +113,8 @@ struct Libp2pNetworkInner { /// NOTE: supposed to represent a ViewNumber but we /// haven't made that atomic yet and we prefer lock-free latest_seen_view: Arc, + /// if we're a member of the DA committee or not + is_da: bool, } /// Networking implementation that uses libp2p @@ -227,12 +229,13 @@ where Libp2pNetwork::new( NetworkingMetricsValue::new(), config, - pubkey, + pubkey.clone(), bootstrap_addrs_ref, num_bootstrap, node_id as usize, keys, - da, + da.clone(), + da.contains(&pubkey), ) .await .unwrap() @@ -281,6 +284,7 @@ impl Libp2pNetwork { // HACK committee_pks: BTreeSet, da_pks: BTreeSet, + is_da: bool, ) -> Result, NetworkError> { assert!(bootstrap_addrs_len > 4, "Need at least 5 bootstrap nodes"); let network_handle = Arc::new( @@ -336,6 +340,7 @@ impl Libp2pNetwork { // proposals on". We need this because to have consensus info injected we need a working // network already. In the worst case, we send a few lookups we don't need. latest_seen_view: Arc::new(AtomicU64::new(0)), + is_da, }), }; @@ -383,6 +388,7 @@ impl Libp2pNetwork { let is_bootstrapped = self.inner.is_bootstrapped.clone(); let node_type = self.inner.handle.config().node_type; let metrics_connected_peers = self.inner.clone(); + let is_da = self.inner.is_da; async_spawn({ let is_ready = self.inner.is_ready.clone(); async move { @@ -422,7 +428,12 @@ impl Libp2pNetwork { } handle.subscribe(QC_TOPIC.to_string()).await.unwrap(); - handle.subscribe("DA".to_string()).await.unwrap(); + + // only subscribe to DA events if we are DA + if is_da { + handle.subscribe("DA".to_string()).await.unwrap(); + } + // TODO figure out some way of passing in ALL keypairs. That way we can add the // global topic to the topic map // NOTE this wont' work without this change From 4dff26218f8c559200f98b6efd5cdf354df03d1a Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 26 Oct 2023 14:39:31 -0700 Subject: [PATCH 0268/1393] Fix clippy --- task-impls/src/consensus.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 08e75c6323..1d2ca253ab 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1318,6 +1318,7 @@ where } /// Sends a proposal if possible from the high qc we have + #[allow(clippy::too_many_lines)] pub async fn publish_proposal_if_able( &mut self, _qc: QuorumCertificate>, From 640f241cd28df027eb530ba1a19eaf5e714936f0 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 26 Oct 2023 14:42:30 -0700 Subject: [PATCH 0269/1393] get other nodes' info during initialization --- hotshot/examples/infra/mod.rs | 2 -- orchestrator/src/config.rs | 13 ++++++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 738b849402..6a0b54d3dd 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -107,14 +107,12 @@ pub fn load_config_from_file( config_toml.into(); // Generate network's public keys - let mut known_nodes_sk = Vec::new(); let known_nodes: Vec<_> = (0..config.config.total_nodes.get()) .map(|node_id| { let (key_pair, sk) = TYPES::SignatureKey::generated_from_seed_indexed( config.seed, node_id.try_into().unwrap(), ); - known_nodes_sk.push(sk); key_pair }) .collect(); diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 79ea891bed..a35e413383 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -176,6 +176,8 @@ pub struct HotShotConfigFile { pub total_nodes: NonZeroUsize, /// My own public key, secret key, stake value pub my_own_validator_config: ValidatorConfig, + /// The known nodes' public key and stake value + pub known_nodes_with_stake: Vec, /// Number of committee nodes pub committee_nodes: usize, /// Maximum transactions per block @@ -205,7 +207,7 @@ impl From> for HotS total_nodes: val.total_nodes, max_transactions: val.max_transactions, min_transactions: val.min_transactions, - known_nodes_with_stake: Vec::new(), + known_nodes_with_stake: val.known_nodes_with_stake, my_own_validator_config: val.my_own_validator_config, da_committee_size: val.committee_nodes, next_view_timeout: val.next_view_timeout, @@ -233,9 +235,18 @@ fn default_padding() -> usize { impl Default for HotShotConfigFile { fn default() -> Self { + let gen_known_nodes_with_stake = (0..10) + .map(|node_id| { + let cur_validator_config: ValidatorConfig = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); + + cur_validator_config.public_key.get_stake_table_entry(cur_validator_config.stake_value) + } ) + .collect(); Self { total_nodes: NonZeroUsize::new(10).unwrap(), my_own_validator_config: ValidatorConfig::default(), + known_nodes_with_stake: gen_known_nodes_with_stake, committee_nodes: 5, max_transactions: NonZeroUsize::new(100).unwrap(), min_transactions: 1, From 1fc2210cb59711f9aaa55e3b59739833c4f4474c Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 26 Oct 2023 14:58:53 -0700 Subject: [PATCH 0270/1393] More fixes after merge --- hotshot/examples/infra/mod.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 66b311f2be..1dcc401698 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -20,7 +20,7 @@ use hotshot_orchestrator::{ config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, }; use hotshot_task::task::FilterEvent; -use hotshot_types::traits::election::VIDExchange; +use hotshot_types::{block_impl::VIDBlockHeader, traits::election::VIDExchange}; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, @@ -460,7 +460,11 @@ pub struct WebServerDARun< #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType< + Transaction = VIDTransaction, + BlockPayload = VIDBlockPayload, + BlockHeader = VIDBlockHeader, + >, MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, @@ -628,7 +632,11 @@ pub struct Libp2pDARun, MEMBERSHIP #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType< + Transaction = VIDTransaction, + BlockPayload = VIDBlockPayload, + BlockHeader = VIDBlockHeader, + >, MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, @@ -857,7 +865,11 @@ where /// Main entry point for validators pub async fn main_entry_point< - TYPES: NodeType, + TYPES: NodeType< + Transaction = VIDTransaction, + BlockPayload = VIDBlockPayload, + BlockHeader = VIDBlockHeader, + >, MEMBERSHIP: Membership + Debug, DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, From 94bcebf8cfda421690ac5b4868b99ecc238e58ef Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 26 Oct 2023 15:07:11 -0700 Subject: [PATCH 0271/1393] fix lint --- hotshot/examples/infra/mod.rs | 18 +----------------- orchestrator/src/config.rs | 18 ++++++++++-------- orchestrator/src/lib.rs | 13 ++++++------- testing/src/test_builder.rs | 1 + 4 files changed, 18 insertions(+), 32 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 6a0b54d3dd..355f1957ac 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -103,23 +103,7 @@ pub fn load_config_from_file( toml::from_str::>(&config_file_as_string) .expect("Unable to convert config file to TOML"); - let mut config: NetworkConfig = - config_toml.into(); - - // Generate network's public keys - let known_nodes: Vec<_> = (0..config.config.total_nodes.get()) - .map(|node_id| { - let (key_pair, sk) = TYPES::SignatureKey::generated_from_seed_indexed( - config.seed, - node_id.try_into().unwrap(), - ); - key_pair - }) - .collect(); - - config.config.known_nodes_with_stake = (0..config.config.total_nodes.get()) - .map(|node_id| known_nodes[node_id].get_stake_table_entry(1u64)) - .collect(); + let config: NetworkConfig = config_toml.into(); config } diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index a35e413383..e4cca05230 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -235,14 +235,16 @@ fn default_padding() -> usize { impl Default for HotShotConfigFile { fn default() -> Self { - let gen_known_nodes_with_stake = (0..10) - .map(|node_id| { - let cur_validator_config: ValidatorConfig = - ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); - - cur_validator_config.public_key.get_stake_table_entry(cur_validator_config.stake_value) - } ) - .collect(); + let gen_known_nodes_with_stake = (0..10) + .map(|node_id| { + let cur_validator_config: ValidatorConfig = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); + + cur_validator_config + .public_key + .get_stake_table_entry(cur_validator_config.stake_value) + }) + .collect(); Self { total_nodes: NonZeroUsize::new(10).unwrap(), my_own_validator_config: ValidatorConfig::default(), diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index f506eabba3..c4c665c0dc 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -99,13 +99,12 @@ where //add new node's key to stake table if self.config.web_server_config.clone().is_some() { - let new_key = KEY::get_public_key( - self.config - .config - .known_nodes_with_stake - .get(node_index as usize) - .expect("node_id should be within the range of known_nodes"), - ); + let new_key = self + .config + .config + .my_own_validator_config + .public_key + .clone(); let client_clone = self.client.clone().unwrap(); async move { client_clone diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 45fbd686cf..0904678f0c 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -201,6 +201,7 @@ impl TestMetadata { .. } = self.clone(); + // We will assign known_nodes' public key and stake value rather than read from config file since it's a test let known_nodes: Vec<::SignatureKey> = (0..total_nodes) .map(|id| { let priv_key = From d4004f1777832a4752e72664e2fb1909e30981e7 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 26 Oct 2023 16:15:39 -0700 Subject: [PATCH 0272/1393] add some comments and make sure key generation is only in system initialization or test initialization --- hotshot/examples/infra/mod.rs | 27 +++++++++---------- .../src/traits/networking/libp2p_network.rs | 2 ++ .../src/traits/networking/memory_network.rs | 1 + .../traits/networking/web_server_network.rs | 1 + testing/src/test_builder.rs | 2 +- testing/src/test_runner.rs | 1 + testing/tests/consensus_task.rs | 2 ++ 7 files changed, 20 insertions(+), 16 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 355f1957ac..e591e4bd2c 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -234,8 +234,8 @@ pub trait RunDA< let config = self.get_config(); // Get KeyPair for certificate Aggregation - let (pk, sk) = - TYPES::SignatureKey::generated_from_seed_indexed(config.seed, config.node_index); + let pk = config.config.my_own_validator_config.public_key.clone(); + let sk = config.config.my_own_validator_config.private_key.clone(); let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let entry = pk.get_stake_table_entry(1u64); @@ -480,12 +480,8 @@ where async fn initialize_networking( config: NetworkConfig, ) -> WebServerDARun { - // Generate our own key - let (pub_key, _priv_key) = - <::SignatureKey as SignatureKey>::generated_from_seed_indexed( - config.seed, - config.node_index, - ); + // Get our own key + let pub_key = config.config.my_own_validator_config.public_key.clone(); // Get the configuration for the web server let WebServerConfig { @@ -634,11 +630,7 @@ where async fn initialize_networking( config: NetworkConfig, ) -> Libp2pDARun { - let (pubkey, _privkey) = - <::SignatureKey as SignatureKey>::generated_from_seed_indexed( - config.seed, - config.node_index, - ); + let pubkey = config.config.my_own_validator_config.public_key.clone(); let mut config = config; let libp2p_config = config .libp2p_config @@ -719,8 +711,13 @@ where let mut all_keys = BTreeSet::new(); let mut da_keys = BTreeSet::new(); for i in 0..config.config.total_nodes.get() as u64 { - let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; - let pubkey = TYPES::SignatureKey::from_private(&privkey); + let pubkey = <::SignatureKey>::get_public_key( + config + .config + .known_nodes_with_stake + .get(i as usize) + .expect("node_id should be within the range of known_nodes"), + ); if i < config.config.da_committee_size as u64 { da_keys.insert(pubkey.clone()); } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 20e6718955..86f6f02930 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -150,6 +150,7 @@ where "DA committee size must be less than or equal to total # nodes" ); let bootstrap_addrs: PeerInfoVec = Arc::default(); + // We assign known_nodes' public key and stake value rather than read from config file since it's a test let mut all_keys = BTreeSet::new(); let mut da_keys = BTreeSet::new(); @@ -174,6 +175,7 @@ where let addr = // Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/0/quic-v1")).unwrap(); Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{}{}/quic-v1", 5000 + node_id, network_id)).unwrap(); + // We assign node's public key and stake value rather than read from config file since it's a test let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 3ea28fe7e6..41f153120f 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -254,6 +254,7 @@ impl> _is_da: bool, ) -> Box Self + 'static> { let master: Arc<_> = MasterMap::new(); + // We assign known_nodes' public key and stake value rather than read from config file since it's a test Box::new(move |node_id| { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 5a67880e9f..27ec7831fd 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -1082,6 +1082,7 @@ impl> port, )); + // We assign known_nodes' public key and stake value rather than read from config file since it's a test let known_nodes = (0..expected_node_count as u64) .map(|id| { TYPES::SignatureKey::from_private( diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 0904678f0c..b506403f42 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -201,7 +201,7 @@ impl TestMetadata { .. } = self.clone(); - // We will assign known_nodes' public key and stake value rather than read from config file since it's a test + // We assign known_nodes' public key and stake value rather than read from config file since it's a test let known_nodes: Vec<::SignatureKey> = (0..total_nodes) .map(|id| { let priv_key = diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 78f624d06c..1988a38da5 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -207,6 +207,7 @@ where let initializer = HotShotInitializer::::from_genesis(I::block_genesis()).unwrap(); let networks = (self.launcher.resource_generator.channel_generator)(node_id); + // We assign node's public key and stake value rather than read from config file since it's a test let validator_config = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); let hotshot = self diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index dc8439a85d..cc3e77a3f1 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -96,6 +96,7 @@ async fn test_consensus_task() { async_compatibility_layer::logging::setup_backtrace(); let handle = build_system_handle(1).await.0; + // We assign node's key pair rather than read from config file since it's a test let (private_key, public_key) = key_pair_for_id(1); let mut input = Vec::new(); @@ -148,6 +149,7 @@ async fn test_consensus_vote() { async_compatibility_layer::logging::setup_backtrace(); let handle = build_system_handle(2).await.0; + // We assign node's key pair rather than read from config file since it's a test let (private_key, public_key) = key_pair_for_id(1); let mut input = Vec::new(); From 2612c2aad7b05f2f1cf563c40bf3de5d4e522aa2 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 27 Oct 2023 15:14:10 -0400 Subject: [PATCH 0273/1393] Remove validating/sequencing ambiguity for readability (#1950) * remove validating/sequencing ambiguity for readability * remove everything but message --- hotshot/examples/infra/mod.rs | 70 +- hotshot/examples/libp2p/types.rs | 10 +- hotshot/examples/web-server-da/types.rs | 10 +- hotshot/src/demo.rs | 43 +- hotshot/src/lib.rs | 108 +- hotshot/src/tasks/mod.rs | 182 ++- hotshot/src/traits/election/vrf.rs | 1024 ----------------- hotshot/src/types/handle.rs | 8 +- task-impls/src/consensus.rs | 263 ++--- task-impls/src/da.rs | 88 +- task-impls/src/events.rs | 2 +- task-impls/src/harness.rs | 18 +- task-impls/src/network.rs | 136 ++- task-impls/src/transactions.rs | 60 +- task-impls/src/vid.rs | 76 +- task-impls/src/view_sync.rs | 130 +-- testing/src/node_types.rs | 472 ++++---- testing/src/overall_safety_task.rs | 9 +- testing/src/task_helpers.rs | 79 +- testing/tests/basic.rs | 16 +- testing/tests/catchup.rs | 16 +- testing/tests/combined_network.rs | 6 +- testing/tests/consensus_task.rs | 60 +- testing/tests/da_task.rs | 48 +- testing/tests/libp2p.rs | 6 +- testing/tests/memory_network.rs | 14 +- testing/tests/network_task.rs | 62 +- testing/tests/timeout.rs | 12 +- testing/tests/vid_task.rs | 46 +- testing/tests/view_sync_task.rs | 37 +- testing/tests/web_server.rs | 4 +- types/src/data.rs | 16 +- types/src/traits/consensus_api.rs | 4 +- .../consensus_type/sequencing_consensus.rs | 0 .../consensus_type/validating_consensus.rs | 16 - types/src/traits/node_implementation.rs | 106 +- 36 files changed, 1000 insertions(+), 2257 deletions(-) delete mode 100644 hotshot/src/traits/election/vrf.rs delete mode 100644 types/src/traits/consensus_type/sequencing_consensus.rs delete mode 100644 types/src/traits/consensus_type/validating_consensus.rs diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 623f906a87..92593b0df6 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -25,7 +25,7 @@ use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, consensus::ConsensusMetricsValue, - data::{QuorumProposal, SequencingLeaf, TestableLeaf}, + data::{Leaf, QuorumProposal, TestableLeaf}, event::{Event, EventType}, message::{Message, SequencingMessage}, traits::{ @@ -33,9 +33,7 @@ use hotshot_types::{ CommitteeExchange, ConsensusExchange, Membership, QuorumExchange, ViewSyncExchange, }, network::CommunicationChannel, - node_implementation::{ - CommitteeEx, ExchangesType, NodeType, QuorumEx, SequencingExchanges, - }, + node_implementation::{CommitteeEx, Exchanges, ExchangesType, NodeType, QuorumEx}, state::{ConsensusTime, TestableBlock, TestableState}, }, HotShotConfig, @@ -141,14 +139,14 @@ pub async fn run_orchestrator< VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< + Leaf = Leaf, + Exchanges = Exchanges< TYPES, Message, QuorumExchange< TYPES, - SequencingLeaf, - QuorumProposal>, + Leaf, + QuorumProposal>, MEMBERSHIP, QUORUMNETWORK, Message, @@ -163,7 +161,7 @@ pub async fn run_orchestrator< >, VIDExchange>, >, - Storage = MemoryStorage>, + Storage = MemoryStorage>, ConsensusMessage = SequencingMessage, >, >( @@ -206,14 +204,14 @@ pub trait RunDA< VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< + Leaf = Leaf, + Exchanges = Exchanges< TYPES, Message, QuorumExchange< TYPES, - SequencingLeaf, - QuorumProposal>, + Leaf, + QuorumProposal>, MEMBERSHIP, QUORUMNETWORK, Message, @@ -228,13 +226,13 @@ pub trait RunDA< >, VIDExchange>, >, - Storage = MemoryStorage>, + Storage = MemoryStorage>, ConsensusMessage = SequencingMessage, >, > where ::StateType: TestableState, ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, + Leaf: TestableLeaf, Self: Sync, SystemContext: HotShotType, { @@ -254,10 +252,8 @@ pub trait RunDA< async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { let genesis_block = TYPES::BlockType::genesis(); let initializer = - hotshot::HotShotInitializer::>::from_genesis( - genesis_block, - ) - .expect("Couldn't generate genesis block"); + hotshot::HotShotInitializer::>::from_genesis(genesis_block) + .expect("Couldn't generate genesis block"); let config = self.get_config(); @@ -464,14 +460,14 @@ impl< MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< + Leaf = Leaf, + Exchanges = Exchanges< TYPES, Message, QuorumExchange< TYPES, - SequencingLeaf, - QuorumProposal>, + Leaf, + QuorumProposal>, MEMBERSHIP, WebCommChannel, Message, @@ -496,7 +492,7 @@ impl< Message, >, >, - Storage = MemoryStorage>, + Storage = MemoryStorage>, ConsensusMessage = SequencingMessage, >, > @@ -512,7 +508,7 @@ impl< where ::StateType: TestableState, ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, + Leaf: TestableLeaf, Self: Sync, { async fn initialize_networking( @@ -632,14 +628,14 @@ impl< MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< + Leaf = Leaf, + Exchanges = Exchanges< TYPES, Message, QuorumExchange< TYPES, - SequencingLeaf, - QuorumProposal>, + Leaf, + QuorumProposal>, MEMBERSHIP, Libp2pCommChannel, Message, @@ -664,7 +660,7 @@ impl< Message, >, >, - Storage = MemoryStorage>, + Storage = MemoryStorage>, ConsensusMessage = SequencingMessage, >, > @@ -680,7 +676,7 @@ impl< where ::StateType: TestableState, ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, + Leaf: TestableLeaf, Self: Sync, { async fn initialize_networking( @@ -865,14 +861,14 @@ pub async fn main_entry_point< VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< + Leaf = Leaf, + Exchanges = Exchanges< TYPES, Message, QuorumExchange< TYPES, - SequencingLeaf, - QuorumProposal>, + Leaf, + QuorumProposal>, MEMBERSHIP, QUORUMNETWORK, Message, @@ -887,7 +883,7 @@ pub async fn main_entry_point< >, VIDExchange>, >, - Storage = MemoryStorage>, + Storage = MemoryStorage>, ConsensusMessage = SequencingMessage, >, RUNDA: RunDA, @@ -896,7 +892,7 @@ pub async fn main_entry_point< ) where ::StateType: TestableState, ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, + Leaf: TestableLeaf, { setup_logging(); setup_backtrace(); diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index cc474d62b8..2c49b76401 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -8,11 +8,11 @@ use hotshot::{ }; use hotshot_types::{ certificate::ViewSyncCertificate, - data::{DAProposal, QuorumProposal, SequencingLeaf}, + data::{DAProposal, Leaf, QuorumProposal}, message::{Message, SequencingMessage}, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, - node_implementation::{ChannelMaps, NodeImplementation, NodeType, SequencingExchanges}, + node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, }, vote::{DAVote, QuorumVote, ViewSyncVote}, }; @@ -22,7 +22,7 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type ThisLeaf = SequencingLeaf; +pub type ThisLeaf = Leaf; pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; pub type DANetwork = Libp2pCommChannel; @@ -41,8 +41,8 @@ pub type ThisViewSyncVote = ViewSyncVote; impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; - type Leaf = SequencingLeaf; - type Exchanges = SequencingExchanges< + type Leaf = Leaf; + type Exchanges = Exchanges< DemoTypes, Message, QuorumExchange< diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index 6fe7b1b087..cb3b453d6a 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -8,11 +8,11 @@ use hotshot::{ }; use hotshot_types::{ certificate::ViewSyncCertificate, - data::{DAProposal, QuorumProposal, SequencingLeaf}, + data::{DAProposal, Leaf, QuorumProposal}, message::{Message, SequencingMessage}, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, - node_implementation::{ChannelMaps, NodeImplementation, NodeType, SequencingExchanges}, + node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, }, vote::{DAVote, QuorumVote, ViewSyncVote}, }; @@ -22,7 +22,7 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type ThisLeaf = SequencingLeaf; +pub type ThisLeaf = Leaf; pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; pub type DANetwork = WebCommChannel; @@ -41,8 +41,8 @@ pub type ThisViewSyncVote = ViewSyncVote; impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; - type Leaf = SequencingLeaf; - type Exchanges = SequencingExchanges< + type Leaf = Leaf; + type Exchanges = Exchanges< DemoTypes, Message, QuorumExchange< diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index 81cac76f4b..afebd291a0 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -13,10 +13,7 @@ use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ block_impl::{BlockPayloadError, VIDBlockPayload, VIDTransaction}, certificate::{AssembledSignature, QuorumCertificate}, - data::{ - fake_commitment, genesis_proposer_id, random_commitment, LeafType, SequencingLeaf, - ViewNumber, - }, + data::{fake_commitment, genesis_proposer_id, random_commitment, Leaf, LeafType, ViewNumber}, traits::{ election::Membership, node_implementation::NodeType, @@ -30,7 +27,7 @@ use std::{fmt::Debug, marker::PhantomData}; /// sequencing demo entry state #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct SDemoState { +pub struct DemoState { /// the block height block_height: u64, /// the view number @@ -39,9 +36,9 @@ pub struct SDemoState { prev_state_commitment: Commitment, } -impl Committable for SDemoState { +impl Committable for DemoState { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("SDemo State Commit") + commit::RawCommitmentBuilder::new("Demo State Commit") .u64_field("block_height", self.block_height) .u64_field("view_number", *self.view_number) .field("prev_state_commitment", self.prev_state_commitment) @@ -53,7 +50,7 @@ impl Committable for SDemoState { } } -impl Default for SDemoState { +impl Default for DemoState { fn default() -> Self { Self { block_height: 0, @@ -63,7 +60,7 @@ impl Default for SDemoState { } } -impl State for SDemoState { +impl State for DemoState { type Error = BlockPayloadError; type BlockType = VIDBlockPayload; @@ -87,7 +84,7 @@ impl State for SDemoState { return Err(BlockPayloadError::InvalidBlock); } - Ok(SDemoState { + Ok(DemoState { block_height: self.block_height + 1, view_number: *view_number, prev_state_commitment: self.commit(), @@ -97,7 +94,7 @@ impl State for SDemoState { fn on_commit(&self) {} } -impl TestableState for SDemoState { +impl TestableState for DemoState { fn create_random_transaction( _state: Option<&Self>, _rng: &mut dyn rand::RngCore, @@ -131,39 +128,39 @@ impl NodeType for DemoTypes { type VoteTokenType = StaticVoteToken; type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; - type StateType = SDemoState; + type StateType = DemoState; } /// The node implementation for the sequencing demo #[derive(Derivative)] #[derivative(Clone(bound = ""))] -pub struct SDemoNode(PhantomData) +pub struct DemoNode(PhantomData) where MEMBERSHIP: Membership + std::fmt::Debug; -impl SDemoNode +impl DemoNode where MEMBERSHIP: Membership + std::fmt::Debug, { - /// Create a new `SDemoNode` + /// Create a new `DemoNode` #[must_use] pub fn new() -> Self { - SDemoNode(PhantomData) + DemoNode(PhantomData) } } -impl Debug for SDemoNode +impl Debug for DemoNode where MEMBERSHIP: Membership + std::fmt::Debug, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("SDemoNode") + f.debug_struct("DemoNode") .field("_phantom", &"phantom") .finish() } } -impl Default for SDemoNode +impl Default for DemoNode where MEMBERSHIP: Membership + std::fmt::Debug, { @@ -185,16 +182,16 @@ pub fn random_quorum_certificate( +/// Provides a random [`Leaf`] +pub fn random_leaf( deltas: Either>, rng: &mut dyn rand::RngCore, -) -> SequencingLeaf { +) -> Leaf { let justify_qc = random_quorum_certificate(rng); // let state = TYPES::StateType::default() // .append(&deltas, &TYPES::Time::new(42)) // .unwrap_or_default(); - SequencingLeaf { + Leaf { view_number: justify_qc.view_number, height: rng.next_u64(), justify_qc, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index d105b9ca68..d178619ea7 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -52,30 +52,30 @@ use hotshot_task::{ event_stream::{ChannelStream, EventStream}, task_launcher::TaskRunner, }; -use hotshot_task_impls::{events::SequencingHotShotEvent, network::NetworkTaskKind}; +use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ certificate::{TimeoutCertificate, VIDCertificate}, data::VidDisperse, - traits::node_implementation::SequencingTimeoutEx, + traits::node_implementation::TimeoutEx, }; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, certificate::{DACertificate, ViewSyncCertificate}, consensus::{BlockStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, - data::{DAProposal, DeltasType, LeafType, QuorumProposal, SequencingLeaf}, + data::{DAProposal, DeltasType, Leaf, LeafType, QuorumProposal}, error::StorageSnafu, message::{ ConsensusMessageType, DataMessage, InternalTrigger, Message, MessageKind, ProcessedGeneralConsensusMessage, SequencingMessage, }, traits::{ - consensus_api::{ConsensusSharedApi, SequencingConsensusApi}, + consensus_api::{ConsensusApi, ConsensusSharedApi}, election::{ConsensusExchange, Membership, SignedCertificate}, network::{CommunicationChannel, NetworkError}, node_implementation::{ - ChannelMaps, CommitteeEx, ExchangesType, NodeType, SendToTasks, SequencingQuorumEx, - VIDEx, ViewSyncEx, + ChannelMaps, CommitteeEx, ExchangesType, NodeType, QuorumEx, SendToTasks, VIDEx, + ViewSyncEx, }, signature_key::SignatureKey, state::ConsensusTime, @@ -149,7 +149,7 @@ pub struct SystemContextInner> { output_event_stream: ChannelStream>, /// access to the internal event stream, in case we need to, say, shut something down - internal_event_stream: ChannelStream>, + internal_event_stream: ChannelStream>, /// uid for instrumentation id: u64, @@ -248,7 +248,7 @@ impl> SystemContext { pub async fn start_consensus(&self) { self.inner .internal_event_stream - .publish(SequencingHotShotEvent::QCFormed(either::Left( + .publish(HotShotEvent::QCFormed(either::Left( QuorumCertificate::genesis(), ))) .await; @@ -391,7 +391,7 @@ impl> SystemContext { ) -> Result< ( SystemContextHandle, - ChannelStream>, + ChannelStream>, ), HotShotError, > @@ -631,18 +631,18 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, MEMBERSHIP: Membership, > HotShotType for SystemContext where - SequencingQuorumEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, - Commitment = Commitment>, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, Membership = MEMBERSHIP, > + 'static, CommitteeEx: ConsensusExchange< @@ -669,10 +669,10 @@ where Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, - SequencingTimeoutEx: ConsensusExchange< + TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal>, Certificate = TimeoutCertificate, Commitment = Commitment, Membership = MEMBERSHIP, @@ -798,86 +798,16 @@ where } } -/// A handle that exposes the interface that hotstuff needs to interact with [`HotShot`] -#[derive(Clone)] -struct HotShotValidatingConsensusApi> { - /// Reference to the [`SystemContextInner`] - inner: Arc>, -} - -#[async_trait] -impl> ConsensusSharedApi - for HotShotValidatingConsensusApi -{ - fn total_nodes(&self) -> NonZeroUsize { - self.inner.config.total_nodes - } - - fn propose_min_round_time(&self) -> Duration { - self.inner.config.propose_min_round_time - } - - fn propose_max_round_time(&self) -> Duration { - self.inner.config.propose_max_round_time - } - - fn max_transactions(&self) -> NonZeroUsize { - self.inner.config.max_transactions - } - - fn min_transactions(&self) -> usize { - self.inner.config.min_transactions - } - - /// Generates and encodes a vote token - - async fn should_start_round(&self, _: TYPES::Time) -> bool { - false - } - - async fn send_event(&self, event: Event) { - debug!(?event, "send_event"); - let mut event_sender = self.inner.event_sender.write().await; - if let Some(sender) = &*event_sender { - if let Err(e) = sender.send_async(event).await { - error!(?e, "Could not send event to event_sender"); - *event_sender = None; - } - } - } - - fn public_key(&self) -> &TYPES::SignatureKey { - &self.inner.public_key - } - - fn private_key(&self) -> &::PrivateKey { - &self.inner.private_key - } - - async fn store_leaf( - &self, - old_anchor_view: TYPES::Time, - leaf: I::Leaf, - ) -> std::result::Result<(), hotshot_types::traits::storage::StorageError> { - let view_to_insert = StoredView::from(leaf); - let storage = &self.inner.storage; - storage.append_single_view(view_to_insert).await?; - storage.cleanup_storage_up_to_view(old_anchor_view).await?; - storage.commit().await?; - Ok(()) - } -} - /// A handle that exposes the interface that hotstuff needs to interact with [`HotShot`] #[derive(Clone, Debug)] -pub struct HotShotSequencingConsensusApi> { +pub struct HotShotConsensusApi> { /// Reference to the [`SystemContextInner`] pub inner: Arc>, } #[async_trait] impl> ConsensusSharedApi - for HotShotSequencingConsensusApi + for HotShotConsensusApi { fn total_nodes(&self) -> NonZeroUsize { self.inner.config.total_nodes @@ -942,7 +872,7 @@ impl> ConsensusSharedApi>, - > SequencingConsensusApi for HotShotSequencingConsensusApi + > ConsensusApi for HotShotConsensusApi { async fn send_direct_message( &self, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 7207a273a9..67247f4e19 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -1,8 +1,7 @@ //! Provides a number of tasks that run continuously on a [`HotShot`] use crate::{ - async_spawn, types::SystemContextHandle, DACertificate, HotShotSequencingConsensusApi, - QuorumCertificate, SequencingQuorumEx, + async_spawn, types::SystemContextHandle, DACertificate, HotShotConsensusApi, QuorumCertificate, }; use async_compatibility_layer::art::async_sleep; use commit::{Commitment, CommitmentBounds}; @@ -16,9 +15,9 @@ use hotshot_task::{ GeneratedStream, Merge, }; use hotshot_task_impls::{ - consensus::{consensus_event_filter, ConsensusTaskTypes, SequencingConsensusTaskState}, + consensus::{consensus_event_filter, ConsensusTaskState, ConsensusTaskTypes}, da::{DATaskState, DATaskTypes}, - events::SequencingHotShotEvent, + events::HotShotEvent, network::{ NetworkEventTaskState, NetworkEventTaskTypes, NetworkMessageTaskState, NetworkMessageTaskTypes, NetworkTaskKind, @@ -30,15 +29,15 @@ use hotshot_task_impls::{ use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, certificate::{TimeoutCertificate, VIDCertificate, ViewSyncCertificate}, - data::{ProposalType, QuorumProposal, SequencingLeaf}, + data::{Leaf, ProposalType, QuorumProposal}, event::Event, message::{Message, Messages, SequencingMessage}, traits::{ election::{ConsensusExchange, Membership}, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{ - CommitteeEx, ExchangesType, NodeImplementation, NodeType, QuorumEx, - SequencingTimeoutEx, VIDEx, ViewSyncEx, + CommitteeEx, ExchangesType, NodeImplementation, NodeType, QuorumEx, TimeoutEx, VIDEx, + ViewSyncEx, }, state::ConsensusTime, }, @@ -65,11 +64,7 @@ pub enum GlobalEvent { /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_message_task< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, COMMITMENT: CommitmentBounds, PROPOSAL: ProposalType, VOTE: VoteType, @@ -83,7 +78,7 @@ pub async fn add_network_message_task< > + 'static, >( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, exchange: EXCHANGE, ) -> TaskRunner // This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. @@ -175,11 +170,7 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_event_task< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, COMMITMENT: CommitmentBounds, PROPOSAL: ProposalType, VOTE: VoteType, @@ -193,7 +184,7 @@ pub async fn add_network_event_task< > + 'static, >( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, exchange: EXCHANGE, task_kind: NetworkTaskKind, ) -> TaskRunner @@ -253,24 +244,20 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, >( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, output_stream: ChannelStream>, handle: SystemContextHandle, ) -> TaskRunner where - SequencingQuorumEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, - Commitment = Commitment>, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, @@ -278,21 +265,21 @@ where Certificate = DACertificate, Commitment = Commitment, >, - SequencingTimeoutEx: ConsensusExchange< + TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal>, Certificate = TimeoutCertificate, Commitment = Commitment, >, { let consensus = handle.hotshot.get_consensus(); - let c_api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { + let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; let registry = task_runner.registry.clone(); // build the consensus task - let consensus_state = SequencingConsensusTaskState { + let consensus_state = ConsensusTaskState { registry: registry.clone(), consensus, timeout: handle.hotshot.inner.config.next_view_timeout, @@ -326,14 +313,9 @@ where let filter = FilterEvent(Arc::new(consensus_event_filter)); let consensus_name = "Consensus Task"; let consensus_event_handler = HandleEvent(Arc::new( - move |event, - mut state: SequencingConsensusTaskState< - TYPES, - I, - HotShotSequencingConsensusApi, - >| { + move |event, mut state: ConsensusTaskState>| { async move { - if let SequencingHotShotEvent::Shutdown = event { + if let HotShotEvent::Shutdown = event { (Some(HotShotTaskCompleted::ShutDown), state) } else { state.handle_event(event).await; @@ -344,7 +326,7 @@ where }, )); let consensus_task_builder = TaskBuilder::< - ConsensusTaskTypes>, + ConsensusTaskTypes>, >::new(consensus_name.to_string()) .register_event_stream(event_stream.clone(), filter) .await @@ -369,14 +351,10 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_vid_task< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, >( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, vid_exchange: VIDEx, handle: SystemContextHandle, ) -> TaskRunner @@ -389,7 +367,7 @@ where >, { // build the vid task - let c_api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { + let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; let registry = task_runner.registry.clone(); @@ -404,7 +382,7 @@ where id: handle.hotshot.inner.id, }; let vid_event_handler = HandleEvent(Arc::new( - move |event, mut state: VIDTaskState>| { + move |event, mut state: VIDTaskState>| { async move { let completion_status = state.handle_event(event).await; (completion_status, state) @@ -414,18 +392,19 @@ where )); let vid_name = "VID Task"; let vid_event_filter = FilterEvent(Arc::new( - VIDTaskState::>::filter, + VIDTaskState::>::filter, )); - let vid_task_builder = TaskBuilder::< - VIDTaskTypes>, - >::new(vid_name.to_string()) - .register_event_stream(event_stream.clone(), vid_event_filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(vid_state) - .register_event_handler(vid_event_handler); + let vid_task_builder = + TaskBuilder::>>::new( + vid_name.to_string(), + ) + .register_event_stream(event_stream.clone(), vid_event_filter) + .await + .register_registry(&mut registry.clone()) + .await + .register_state(vid_state) + .register_event_handler(vid_event_handler); // impossible for unwrap to fail // we *just* registered let vid_task_id = vid_task_builder.get_task_id().unwrap(); @@ -438,14 +417,10 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_da_task< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, >( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, committee_exchange: CommitteeEx, handle: SystemContextHandle, ) -> TaskRunner @@ -458,7 +433,7 @@ where >, { // build the da task - let c_api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { + let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; let registry = task_runner.registry.clone(); @@ -473,7 +448,7 @@ where id: handle.hotshot.inner.id, }; let da_event_handler = HandleEvent(Arc::new( - move |event, mut state: DATaskState>| { + move |event, mut state: DATaskState>| { async move { let completion_status = state.handle_event(event).await; (completion_status, state) @@ -483,12 +458,12 @@ where )); let da_name = "DA Task"; let da_event_filter = FilterEvent(Arc::new( - DATaskState::>::filter, + DATaskState::>::filter, )); - let da_task_builder = TaskBuilder::< - DATaskTypes>, - >::new(da_name.to_string()) + let da_task_builder = TaskBuilder::>>::new( + da_name.to_string(), + ) .register_event_stream(event_stream.clone(), da_event_filter) .await .register_registry(&mut registry.clone()) @@ -507,14 +482,10 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_transaction_task< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, >( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, quorum_exchange: QuorumEx, handle: SystemContextHandle, ) -> TaskRunner @@ -526,7 +497,7 @@ where >, { // build the transactions task - let c_api: HotShotSequencingConsensusApi = HotShotSequencingConsensusApi { + let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; let registry = task_runner.registry.clone(); @@ -542,12 +513,7 @@ where id: handle.hotshot.inner.id, }; let transactions_event_handler = HandleEvent(Arc::new( - move |event, - mut state: TransactionTaskState< - TYPES, - I, - HotShotSequencingConsensusApi, - >| { + move |event, mut state: TransactionTaskState>| { async move { let completion_status = state.handle_event(event).await; (completion_status, state) @@ -557,11 +523,11 @@ where )); let transactions_name = "Transactions Task"; let transactions_event_filter = FilterEvent(Arc::new( - TransactionTaskState::>::filter, + TransactionTaskState::>::filter, )); let transactions_task_builder = TaskBuilder::< - TransactionsTaskTypes>, + TransactionsTaskTypes>, >::new(transactions_name.to_string()) .register_event_stream(event_stream.clone(), transactions_event_filter) .await @@ -580,14 +546,10 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_view_sync_task< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, >( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, handle: SystemContextHandle, ) -> TaskRunner where @@ -599,7 +561,7 @@ where Commitment = Commitment>, >, { - let api = HotShotSequencingConsensusApi { + let api = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; // build the view sync task @@ -618,32 +580,26 @@ where last_garbage_collected_view: TYPES::Time::new(0), }; let registry = task_runner.registry.clone(); - let view_sync_event_handler = - HandleEvent(Arc::new( - move |event, - mut state: ViewSyncTaskState< - TYPES, - I, - HotShotSequencingConsensusApi, - >| { - async move { - if let SequencingHotShotEvent::Shutdown = event { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - state.handle_event(event).await; - (None, state) - } + let view_sync_event_handler = HandleEvent(Arc::new( + move |event, mut state: ViewSyncTaskState>| { + async move { + if let HotShotEvent::Shutdown = event { + (Some(HotShotTaskCompleted::ShutDown), state) + } else { + state.handle_event(event).await; + (None, state) } - .boxed() - }, - )); + } + .boxed() + }, + )); let view_sync_name = "ViewSync Task"; let view_sync_event_filter = FilterEvent(Arc::new( - ViewSyncTaskState::>::filter, + ViewSyncTaskState::>::filter, )); let view_sync_task_builder = TaskBuilder::< - ViewSyncTaskStateTypes>, + ViewSyncTaskStateTypes>, >::new(view_sync_name.to_string()) .register_event_stream(event_stream.clone(), view_sync_event_filter) .await diff --git a/hotshot/src/traits/election/vrf.rs b/hotshot/src/traits/election/vrf.rs deleted file mode 100644 index d1151e0ab2..0000000000 --- a/hotshot/src/traits/election/vrf.rs +++ /dev/null @@ -1,1024 +0,0 @@ -use hotshot_types::traits::signature_key::EncodedPublicKey; - -#[allow(deprecated)] -use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, fmt::Debug, marker::PhantomData, num::NonZeroU64}; - -// TODO wrong palce for this -/// the sortition committee size parameter -pub const SORTITION_PARAMETER: u64 = 100; - -// TODO compatibility this function's impl into a trait -// TODO do we necessariy want the units of stake to be a u64? or generics -/// The stake table for VRFs -#[derive(Serialize, Deserialize, Debug)] -pub struct VRFStakeTable { - /// the mapping of id -> stake - mapping: BTreeMap, - /// total stake present - total_stake: NonZeroU64, - /// PhantomData - _pd: PhantomData<(VRF, VRFHASHER, VRFPARAMS)>, -} - -impl Clone for VRFStakeTable { - fn clone(&self) -> Self { - Self { - mapping: self.mapping.clone(), - total_stake: self.total_stake, - _pd: PhantomData, - } - } -} - -// impl VRFStakeTable { -// /// get total stake -// #[must_use] -// pub fn get_all_stake(&self) -> NonZeroU64 { -// self.total_stake -// } -// } - -// impl VRFStakeTable -// where -// VRF: Vrf, -// VRFPARAMS: Bls12Parameters, -// ::G1Parameters: TEHashToGroup, -// VRF::PublicKey: Clone, -// { -// /// get total stake -// /// # Panics -// /// If converting non-zero stake into `NonZeroU64` fails -// pub fn get_stake(&self, pk: &JfPubKey) -> Option -// where -// SIGSCHEME: SignatureScheme< -// VerificationKey = VRF::PublicKey, -// PublicParameter = (), -// MessageUnit = u8, -// >, -// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// { -// let encoded = pk.to_bytes(); -// let stake = self.mapping.get(&encoded).map(|val| val.get()); -// stake.and_then(NonZeroU64::new) -// } -// } - -// /// the vrf implementation -// #[derive(Derivative)] -// #[derivative(Debug, Eq, PartialEq)] -// pub struct VrfImpl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> -// where -// VRF: Vrf + Sync + Send, -// TYPES: NodeType, -// { -// /// the stake table -// #[derivative(Debug = "ignore", PartialEq = "ignore")] -// stake_table: VRFStakeTable, -// /// the proof params -// #[derivative(Debug = "ignore", PartialEq = "ignore")] -// proof_parameters: VRF::PublicParameter, -// /// the rng -// #[derivative(PartialEq = "ignore")] -// prng: std::sync::Arc>, -// /// the committee parameter -// sortition_parameter: NonZeroU64, -// /// the chain commitment seed -// chain_seed: [u8; 32], -// /// pdf cache -// #[derivative(PartialEq = "ignore")] -// _sortition_cache: std::sync::Arc>>>, - -// /// phantom data -// _pd: PhantomData<(TYPES, LEAF, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS)>, -// } - -// impl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> Clone -// for VrfImpl -// where -// VRF: Vrf + Sync + Send, -// TYPES: NodeType, -// { -// fn clone(&self) -> Self { -// Self { -// stake_table: self.stake_table.clone(), -// proof_parameters: (), -// prng: self.prng.clone(), -// sortition_parameter: self.sortition_parameter, -// chain_seed: self.chain_seed, -// _sortition_cache: Arc::default(), -// _pd: PhantomData, -// } -// } -// } - -// /// TODO doc me -// #[derive(Serialize, Deserialize, Clone)] -// pub struct VRFVoteToken { -// /// The public key assocaited with this token -// pub pub_key: PUBKEY, -// /// The list of signatures -// pub proof: PROOF, -// /// The number of signatures that are valid -// /// TODO (ct) this should be the sorition outbput -// pub count: NonZeroU64, -// } - -// impl Hash for VRFVoteToken -// where -// PUBKEY: serde::Serialize, -// PROOF: serde::Serialize, -// { -// fn hash(&self, state: &mut H) { -// bincode_opts().serialize(&self.pub_key).unwrap().hash(state); -// bincode_opts().serialize(&self.proof).unwrap().hash(state); -// self.count.hash(state); -// } -// } - -// impl Debug for VRFVoteToken { -// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -// f.debug_struct("VRFVoteToken") -// .field("pub_key", &std::any::type_name::()) -// .field("proof", &std::any::type_name::()) -// .field("count", &self.count) -// .finish() -// } -// } - -// impl PartialEq for VRFVoteToken -// where -// PUBKEY: serde::Serialize, -// PROOF: serde::Serialize, -// { -// fn eq(&self, other: &Self) -> bool { -// self.count == other.count -// && bincode_opts().serialize(&self.pub_key).unwrap() -// == bincode_opts().serialize(&other.pub_key).unwrap() -// && bincode_opts().serialize(&self.proof).unwrap() -// == bincode_opts().serialize(&other.proof).unwrap() -// } -// } - -// impl VoteToken for VRFVoteToken -// where -// PUBKEY: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static, -// PROOF: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static, -// { -// fn vote_count(&self) -> NonZeroU64 { -// self.count -// } -// } - -// impl Committable for VRFVoteToken -// where -// PUBKEY: serde::Serialize, -// PROOF: serde::Serialize, -// { -// fn commit(&self) -> Commitment { -// RawCommitmentBuilder::new(std::any::type_name::()) -// .u64(self.count.get()) -// .var_size_bytes(bincode_opts().serialize(&self.pub_key).unwrap().as_slice()) -// .var_size_bytes(bincode_opts().serialize(&self.proof).unwrap().as_slice()) -// .finalize() -// } - -// fn tag() -> String { -// tag::VRF_VOTE_TOKEN.to_string() -// } -// } - -// // KEY is VRFPubKey -// impl> -// Membership for VrfImpl -// where -// SIGSCHEME: SignatureScheme + Sync + Send + 'static, -// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// VRF: Vrf< -// PublicParameter = (), -// Input = Vec, -// Output = Vec, -// PublicKey = SIGSCHEME::VerificationKey, -// SecretKey = SIGSCHEME::SigningKey, -// > + Sync -// + Send -// + 'static, -// VRF::Proof: Clone + Sync + Send + Serialize + for<'a> Deserialize<'a>, -// VRF::PublicParameter: Sync + Send, -// VRFHASHER: digest::Digest + Clone + Sync + Send + 'static, -// VRFPARAMS: Sync + Send + Bls12Parameters, -// ::G1Parameters: TEHashToGroup, -// TYPES: NodeType< -// VoteTokenType = VRFVoteToken, -// ElectionConfigType = VRFStakeTableConfig, -// SignatureKey = JfPubKey, -// >, -// { -// // pubkey -> unit of stake -// type StakeTable = VRFStakeTable; - -// // FIXED STAKE -// // just return the state -// fn get_stake_table( -// &self, -// _view_number: TYPES::Time, -// _state: &TYPES::StateType, -// ) -> Self::StakeTable { -// self.stake_table.clone() -// } - -// fn get_leader(&self, view_number: TYPES::Time) -> JfPubKey { -// // TODO fst2 (ct) this is round robin, we should make this dependent on -// // the VRF + some source of randomness - -// // TODO for now do by stake table of how much stake each -// // participant has -// let mapping = &self.stake_table.mapping; -// let index = ((*view_number) as usize) % mapping.len(); -// let encoded = mapping.keys().nth(index).unwrap(); -// SignatureKey::from_bytes(encoded).unwrap() -// } - -// // what this is doing: -// // - -// fn make_vote_token( -// // TODO see if we can make this take &mut self -// // because we're using a mutable prng -// &self, -// view_number: TYPES::Time, -// private_key: &(SIGSCHEME::SigningKey, SIGSCHEME::VerificationKey), -// ) -> Result, ElectionError> { -// let pub_key = JfPubKey::::from_native(private_key.1.clone()); -// let Some(replicas_stake) = self.stake_table.get_stake(&pub_key) else { return Ok(None) }; - -// let view_seed = generate_view_seed::(view_number, &self.chain_seed); - -// let proof = Self::internal_get_vrf_proof( -// &private_key.0, -// &self.proof_parameters, -// &mut self.prng.lock().unwrap(), -// &view_seed, -// )?; - -// let selected_stake = Self::internal_get_sortition_for_proof( -// &self.proof_parameters, -// &proof, -// self.stake_table.get_all_stake(), -// replicas_stake, -// self.sortition_parameter, -// ); - -// match selected_stake { -// Some(count) => { -// // TODO (ct) this can fail, return Result::Err -// let proof = VRF::prove( -// &self.proof_parameters, -// &private_key.0, -// &view_seed, -// &mut *self.prng.lock().unwrap(), -// ) -// .unwrap(); - -// Ok(Some(VRFVoteToken { -// pub_key: private_key.1.clone(), -// proof, -// count, -// })) -// } -// None => Ok(None), -// } -// } - -// fn validate_vote_token( -// &self, -// view_number: TYPES::Time, -// pub_key: JfPubKey, -// token: Checked, -// ) -> Result, ElectionError> { -// match token { -// Checked::Unchecked(token) => { -// let stake: Option = self.stake_table.get_stake(&pub_key); -// let view_seed = -// generate_view_seed::(view_number, &self.chain_seed); -// if let Some(stake) = stake { -// Self::internal_check_sortition( -// &pub_key.pk, -// &self.proof_parameters, -// &token.proof, -// self.stake_table.get_all_stake(), -// stake, -// self.sortition_parameter, -// token.count, -// &view_seed, -// ) -// .map(|c| match c { -// Checked::Inval(_) => Checked::Inval(token), -// Checked::Valid(_) => Checked::Valid(token), -// Checked::Unchecked(_) => Checked::Unchecked(token), -// }) -// } else { -// // TODO better error -// Err(ElectionError::StubError) -// } -// } -// already_checked => Ok(already_checked), -// } -// } - -// fn create_election(keys: Vec>, config: TYPES::ElectionConfigType) -> Self { -// // This all needs to be refactored. For one thing, having the stake table - even an initial -// // stake table - hardcoded like this is flat-out broken. This is, obviously, an artifact -// let genesis_seed = [0u8; 32]; -// VrfImpl::with_initial_stake(keys, &config, genesis_seed) -// } - -// fn default_election_config(num_nodes: u64) -> TYPES::ElectionConfigType { -// let mut stake = Vec::new(); -// let units_of_stake_per_node = NonZeroU64::new(100).unwrap(); -// for _ in 0..num_nodes { -// stake.push(units_of_stake_per_node); -// } -// VRFStakeTableConfig { -// sortition_parameter: NonZeroU64::new(SORTITION_PARAMETER).unwrap(), -// distribution: stake, -// } -// } - -// fn success_threshold(&self) -> NonZeroU64 { -// NonZeroU64::new(((u64::from(self.sortition_parameter) * 2) / 3) + 1).unwrap() -// } - -// fn failure_threshold(&self) -> NonZeroU64 { -// NonZeroU64::new(((u64::from(self.sortition_parameter)) / 3) + 1).unwrap() -// } -// /// TODO if we ever come back to using this, we'll need to change this -// /// this stub is incorrect as it stands right now -// fn get_committee( -// &self, -// _view_number: ::Time, -// ) -> std::collections::BTreeSet<::SignatureKey> { -// self.stake_table -// .mapping -// .keys() -// .clone() -// .filter_map(::SignatureKey::from_bytes) -// .collect() -// } -// } - -// /// checks that the expected aomunt of stake matches the VRF output -// /// TODO this can be optimized most likely -// fn check_bin_idx( -// expected_amount_of_stake: u64, -// replicas_stake: u64, -// total_stake: u64, -// sortition_parameter: u64, -// unnormalized_seed: &[u8; 32], -// cache: &mut HashMap>, -// ) -> Option { -// let bin_idx = find_bin_idx( -// replicas_stake, -// total_stake, -// sortition_parameter, -// unnormalized_seed, -// cache, -// ); -// bin_idx.map(|idx| idx == NonZeroU64::new(expected_amount_of_stake).unwrap()) -// } - -// /// generates the seed from algorand paper -// /// baseed on `view_number` and a constant as of now, but in the future will be other things -// /// this is a stop-gap -// fn generate_view_seed( -// view_number: TYPES::Time, -// vrf_seed: &[u8; 32], -// ) -> [u8; 32] { -// let mut hasher = HASHER::new(); -// hasher.update(vrf_seed); -// hasher.update(view_number.deref().to_le_bytes()); -// let mut output = [0u8; 32]; -// output.copy_from_slice(hasher.finalize().as_ref()); -// output -// } - -// /// represents a binomial query made by sortition -// /// `B(stake_attempt; replicas_stake; sortition_parameter / total_stake)` -// #[derive(Hash, Eq, PartialEq, Clone, Debug)] -// pub struct BinomialQuery { -// /// the number of heads -// stake_attempt: u64, -// /// the total number of coin flips -// replicas_stake: u64, -// /// the total amount of stake -// total_stake: u64, -// /// the sortition parameter -// sortition_parameter: u64, -// } - -// impl BinomialQuery { -// /// get the committee parameter -// /// for this query -// #[must_use] -// pub fn get_p(&self) -> Ratio { -// let sortition_parameter_big: BigUint = BigUint::from(self.sortition_parameter); -// let total_stake_big: BigUint = BigUint::from(self.total_stake); -// Ratio::new(sortition_parameter_big, total_stake_big) -// } -// } - -// #[instrument] -// fn calculate_threshold_from_cache( -// previous_calculation: Option<(BinomialQuery, Ratio)>, -// query: BinomialQuery, -// ) -> Option> { -// if let Some((previous_query, previous_result)) = previous_calculation { -// let expected_previous_query = BinomialQuery { -// stake_attempt: query.stake_attempt - 1, -// ..query -// }; -// if previous_query == expected_previous_query { -// let permutation = Ratio::new( -// BigUint::from(query.replicas_stake - query.stake_attempt + 1), -// BigUint::from(query.stake_attempt), -// ); -// let p = query.get_p(); -// assert!(p.numer() < p.denom()); -// let reciprocal = Ratio::recip(&(Ratio::from_integer(BigUint::from(1_u32)) - p.clone())); -// let result = previous_result * p * reciprocal * permutation; -// assert!(result.numer() < result.denom()); - -// return Some(result); -// } -// } -// calculate_threshold(query) -// } - -// // Calculates B(j; w; p) where B means bernoulli distribution. -// // That is: run w trials, with p probability of success for each trial, and return the probability -// // of j successes. -// // p = tau / W, where tau is the sortition parameter (controlling committee size) -// // this is the only usage of W and tau -// // -// // Translation: -// // stake_attempt: our guess at what the stake might be. This is j -// // replicas_stake: the units of stake owned by the replica. This is w -// // total_stake: the units of stake owned in total. This is W -// // sorition_parameter: the parameter controlling the committee size. This is tau -// // -// // TODO (ct) better error handling -// // returns none if one of our calculations fails -// // -// // TODO keep data around from last iteration so less calculation is needed -// // TODO test this "correct/simple" implementation against any optimized version -// #[instrument] -// // fn calculate_threshold(stake_attempt: u32, replicas_stake: u64, total_stake: u64, sortition_parameter: u64) -> Option> { -// fn calculate_threshold(query: BinomialQuery) -> Option> { -// let stake_attempt = query.stake_attempt; -// tracing::info!("Running calculate threshold"); -// // TODO (ct) better error handling -// if stake_attempt > query.replicas_stake { -// error!("j is larger than amount of stake we are allowed"); -// return None; -// } - -// let sortition_parameter_big: BigUint = BigUint::from(query.sortition_parameter); -// let total_stake_big: BigUint = BigUint::from(query.total_stake); -// let one_big = BigUint::from(1_u32); - -// // this is the p parameter for the bernoulli distribution -// let p = Ratio::new(sortition_parameter_big, total_stake_big); - -// assert!(p.numer() <= p.denom()); - -// info!("p is {p:?}"); - -// // number of tails in bernoulli -// let failed_num = query.replicas_stake - stake_attempt; - -// // TODO cancel things out (avoid calculating factorial) -// // TODO can just do division -// let num_permutations = Ratio::new( -// factorial(query.replicas_stake), -// factorial(stake_attempt) * factorial(failed_num), -// ); - -// info!("num permutations is {num_permutations:?}, failed_num is {failed_num:?}"); - -// let one = Ratio::from_integer(one_big); - -// // TODO can keep results from last try -// let result = num_permutations -// * (p.pow(i32::try_from(stake_attempt).ok()?) -// * (one - p).pow(i32::try_from(failed_num).ok()?)); - -// assert!(result.numer() < result.denom()); - -// info!("result is is {result:?}"); - -// Some(result) -// } - -// /// compute i! as a biguint -// fn factorial(mut i: u64) -> BigUint { -// if i == 0 { -// return BigUint::from(1u32); -// } - -// let mut result = BigUint::from(1u32); -// while i > 0 { -// result *= i; -// i -= 1; -// } -// result -// } - -// /// find the amount of stake we rolled. -// /// NOTE: in the future this requires a view numb -// /// Returns None if zero stake was rolled -// #[instrument] -// fn find_bin_idx( -// replicas_stake: u64, -// total_stake: u64, -// sortition_parameter: u64, -// unnormalized_seed: &[u8; 32], -// cache: &mut HashMap>, -// ) -> Option { -// let unnormalized_seed = BigUint::from_bytes_le(unnormalized_seed); -// let normalized_seed = Ratio::new(unnormalized_seed, BigUint::from(2_u32).pow(256)); -// assert!(normalized_seed.numer() < normalized_seed.denom()); -// let mut j: u64 = 0; - -// // [j, j+1) -// // [cdf(j),cdf(j+1)) - -// // left_threshold corresponds to the sum of all bernoulli distributions -// // from i in 0 to j: B(i; replicas_stake; p). Where p is calculated later and corresponds to -// // algorands paper -// let mut left_threshold = Ratio::from_integer(BigUint::from(0u32)); - -// loop { -// // check cache - -// // if cache miss, feed in with previous val from cache -// // that *probably* exists - -// assert!(left_threshold.numer() < left_threshold.denom()); -// let query = BinomialQuery { -// stake_attempt: j + 1, -// replicas_stake, -// total_stake, -// sortition_parameter, -// }; - -// let bin_val = { -// // we already computed this value -// if let Some(result) = cache.get(&query) { -// result.clone() -// } else { -// // we haven't computed this value, but maybe -// // we already computed the previous value - -// let mut maybe_old_query = query.clone(); -// maybe_old_query.stake_attempt -= 1; -// let old_result = cache -// .get(&maybe_old_query) -// .map(|x| (maybe_old_query, x.clone())); -// let result = calculate_threshold_from_cache(old_result, query.clone())?; -// cache.insert(query, result.clone()); -// result -// } -// }; - -// // corresponds to right range from apper -// let right_threshold = left_threshold + bin_val.clone(); - -// // debugging info. Unnecessary -// { -// let right_threshold_float = ToPrimitive::to_f64(&right_threshold.clone()); -// let bin_val_float = ToPrimitive::to_f64(&bin_val.clone()); -// let normalized_seed_float = ToPrimitive::to_f64(&normalized_seed.clone()); -// info!("rightthreshold: {right_threshold_float:?}, bin: {bin_val_float:?}, seed: {normalized_seed_float:?}"); -// } - -// // from i in 0 to j + 1: B(i; replicas_stake; p) -// if normalized_seed < right_threshold { -// match j { -// 0 => return None, -// _ => return Some(NonZeroU64::new(j).unwrap()), -// } -// } -// left_threshold = right_threshold; -// j += 1; -// } -// } - -// impl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> -// VrfImpl -// where -// SIGSCHEME: SignatureScheme + Sync + Send, -// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// VRF: Vrf< -// PublicParameter = (), -// Input = [u8; 32], -// Output = [u8; 32], -// PublicKey = SIGSCHEME::VerificationKey, -// SecretKey = SIGSCHEME::SigningKey, -// > + Sync -// + Send, -// VRF::Proof: Clone + Sync + Send + Serialize + for<'a> Deserialize<'a>, -// VRF::PublicParameter: Sync + Send, -// VRFHASHER: digest::Digest + Clone + Sync + Send, -// VRFPARAMS: Sync + Send + Bls12Parameters, -// ::G1Parameters: TEHashToGroup, -// TYPES: NodeType, -// { -// /// create stake table with this initial stake -// /// # Panics -// /// TODO -// #[must_use] -// pub fn with_initial_stake( -// known_nodes: Vec>, -// config: &VRFStakeTableConfig, -// genesis_seed: [u8; 32], -// ) -> Self { -// assert_eq!(known_nodes.iter().len(), config.distribution.len()); -// let key_with_stake = known_nodes -// .into_iter() -// .map(|x| x.to_bytes()) -// .zip(config.distribution.clone()) -// .collect(); -// VrfImpl { -// stake_table: { -// let st = VRFStakeTable { -// mapping: key_with_stake, -// total_stake: NonZeroU64::new(config.distribution.iter().map(|x| x.get()).sum()) -// .unwrap(), -// _pd: PhantomData, -// }; -// st -// }, -// proof_parameters: (), -// chain_seed: genesis_seed, -// prng: Arc::new(Mutex::new(ChaChaRng::from_seed(Default::default()))), -// _pd: PhantomData, -// sortition_parameter: config.sortition_parameter, -// _sortition_cache: Arc::default(), -// } -// } - -// /// stateless delegate for VRF proof generation -// /// # Errors -// /// - -// fn internal_get_vrf_proof( -// private_key: &SIGSCHEME::SigningKey, -// proof_param: &VRF::PublicParameter, -// to_refactor: &mut rand_chacha::ChaChaRng, -// vrf_in_seed: &VRF::Input, -// ) -> Result { -// VRF::prove(proof_param, private_key, vrf_in_seed, to_refactor) -// .map_err(|_| ElectionError::StubError) -// } - -// /// stateless delegate for VRF sortition generation -// fn internal_get_sortition_for_proof( -// proof_param: &VRF::PublicParameter, -// proof: &VRF::Proof, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// ) -> Option { -// // TODO (ct) this can fail, return result::err -// let hash = VRF::evaluate(proof_param, proof).unwrap(); -// let mut cache: HashMap> = HashMap::new(); - -// find_bin_idx( -// u64::from(voter_stake), -// u64::from(total_stake), -// sortition_parameter.into(), -// &hash, -// &mut cache, -// ) -// } - -// /// stateless delegate for VRF sortition confirmation -// /// # Errors -// /// if the proof is malformed -// #[allow(clippy::too_many_arguments)] -// fn internal_check_sortition( -// public_key: &SIGSCHEME::VerificationKey, -// proof_param: &VRF::PublicParameter, -// proof: &VRF::Proof, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// sortition_claim: NonZeroU64, -// vrf_in_seed: &VRF::Input, -// ) -> Result, hotshot_types::traits::election::ElectionError> { -// if let Ok(true) = VRF::verify(proof_param, proof, public_key, vrf_in_seed) { -// let seed = VRF::evaluate(proof_param, proof).map_err(|_| ElectionError::StubError)?; -// if let Some(res) = check_bin_idx( -// u64::from(sortition_claim), -// u64::from(voter_stake), -// u64::from(total_stake), -// u64::from(sortition_parameter), -// &seed, -// &mut HashMap::new(), -// ) { -// if res { -// Ok(Checked::Valid(())) -// } else { -// Ok(Checked::Inval(())) -// } -// } else { -// Ok(Checked::Unchecked(())) -// } -// } else { -// Ok(Checked::Inval(())) -// } -// } - -// /// Stateless method to produce VRF proof and sortition for a given view number -// /// # Errors -// /// -// pub fn get_sortition_proof( -// private_key: &SIGSCHEME::SigningKey, -// proof_param: &VRF::PublicParameter, -// chain_seed: &VRF::Input, -// view_number: TYPES::Time, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// ) -> Result<(VRF::Proof, Option), hotshot_types::traits::election::ElectionError> -// { -// let mut rng = ChaChaRng::from_seed(Default::default()); // maybe use something else that isn't deterministic? -// let view_seed = generate_view_seed::(view_number, chain_seed); -// let proof = Self::internal_get_vrf_proof(private_key, proof_param, &mut rng, &view_seed)?; -// let sortition = Self::internal_get_sortition_for_proof( -// proof_param, -// &proof, -// total_stake, -// voter_stake, -// sortition_parameter, -// ); -// Ok((proof, sortition)) -// } - -// /// Stateless method to verify VRF proof and sortition for a given view number -// /// # Errors -// /// -// #[allow(clippy::too_many_arguments)] -// pub fn check_sortition_proof( -// public_key: &JfPubKey, -// proof_param: &VRF::PublicParameter, -// proof: &VRF::Proof, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// sortition_claim: NonZeroU64, -// chain_seed: &VRF::Input, -// view_number: TYPES::Time, -// ) -> Result { -// let view_seed = generate_view_seed::(view_number, chain_seed); -// Self::internal_check_sortition( -// &public_key.pk, -// proof_param, -// proof, -// total_stake, -// voter_stake, -// sortition_parameter, -// sortition_claim, -// &view_seed, -// ) -// .map(|c| matches!(c, Checked::Valid(_))) -// } -// } - -// impl> TestableElection -// for VrfImpl -// where -// TYPES: NodeType< -// VoteTokenType = VRFVoteToken< -// BLSVerKey, -// BLSSignature, -// >, -// ElectionConfigType = VRFStakeTableConfig, -// SignatureKey = JfPubKey, -// >, -// { -// fn generate_test_vote_token() -> TYPES::VoteTokenType { -// VRFVoteToken { -// count: NonZeroU64::new(1234).unwrap(), -// proof: BLSSignature::default(), -// pub_key: BLSVerKey::default(), -// } -// } -// } - -// /// configuration specifying the stake table -// #[derive(Clone, Serialize, Deserialize, core::fmt::Debug)] -// pub struct VRFStakeTableConfig { -// /// the committee size parameter -// pub sortition_parameter: NonZeroU64, -// /// the ordered distribution of stake across nodes -// pub distribution: Vec, -// } - -// impl Default for VRFStakeTableConfig { -// fn default() -> Self { -// VRFStakeTableConfig { -// sortition_parameter: NonZeroU64::new(SORTITION_PARAMETER).unwrap(), -// distribution: Vec::new(), -// } -// } -// } - -// impl ElectionConfig for VRFStakeTableConfig {} - -// Tests have been commented out, so `mod tests` isn't used. -// #[cfg(test)] -// mod tests { -// use super::*; -// use ark_bls12_381::Parameters as Param381; -// use ark_std::test_rng; - -// use blake3::Hasher; -// use hotshot_types::{ -// data::ViewNumber, -// traits::{ -// block_contents::dummy::{DummyBlock, DummyTransaction}, -// consensus_type::validating_consensus::ValidatingConsensus, -// state::dummy::DummyState, -// }, -// }; -// use jf_primitives::{ -// signatures::{ -// bls::{BLSSignature, BLSVerKey}, -// BLSSignatureScheme, -// }, -// vrf::blsvrf::BLSVRFScheme, -// }; -// use std::{num::NonZeroUsize, time::Duration}; - -// #[derive( -// Copy, -// Clone, -// Debug, -// Default, -// Hash, -// PartialEq, -// Eq, -// PartialOrd, -// Ord, -// serde::Serialize, -// serde::Deserialize, -// )] -// struct TestTypes; -// impl NodeType for TestTypes { -// // TODO (da) can this be SequencingConsensus? -// type ConsensusType = ValidatingConsensus; -// type Time = ViewNumber; -// type BlockType = DummyBlock; -// type SignatureKey = JfPubKey; -// type VoteTokenType = VRFVoteToken< -// BLSVerKey, -// BLSSignature, -// >; -// type Transaction = DummyTransaction; -// type ElectionConfigType = VRFStakeTableConfig; -// type StateType = DummyState; -// } - -// fn gen_vrf_impl>( -// num_nodes: usize, -// ) -> ( -// VrfImpl< -// TestTypes, -// LEAF, -// BLSSignatureScheme, -// BLSVRFScheme, -// Hasher, -// Param381, -// >, -// Vec<( -// jf_primitives::signatures::bls::BLSSignKey, -// jf_primitives::signatures::bls::BLSVerKey, -// )>, -// ) { -// let mut known_nodes = Vec::new(); -// let mut keys = Vec::new(); -// let rng = &mut test_rng(); -// let mut stake_distribution = Vec::new(); -// let stake_per_node = NonZeroU64::new(100).unwrap(); -// let genesis_seed = [0u8; 32]; -// for _i in 0..num_nodes { -// let (sk, pk) = BLSSignatureScheme::::key_gen(&(), rng).unwrap(); -// keys.push((sk.clone(), pk.clone())); -// known_nodes.push(JfPubKey::from_native(pk.clone())); -// stake_distribution.push(stake_per_node); -// } -// let stake_table = VrfImpl::with_initial_stake( -// known_nodes, -// &VRFStakeTableConfig { -// sortition_parameter: std::num::NonZeroU64::new(SORTITION_PARAMETER).unwrap(), -// distribution: stake_distribution, -// }, -// genesis_seed, -// ); -// (stake_table, keys) -// } - -// pub fn check_if_valid(token: &Checked) -> bool { -// match token { -// Checked::Valid(_) => true, -// Checked::Inval(_) | Checked::Unchecked(_) => false, -// } -// } - -// // #[test] -// // pub fn test_sortition() { -// // setup_logging(); -// // let (vrf_impl, keys) = gen_vrf_impl::>(10); -// // let views = 100; - -// // for view in 0..views { -// // for (node_idx, (sk, pk)) in keys.iter().enumerate() { -// // let token_result = vrf_impl -// // .make_vote_token(ViewNumber::new(view), &(sk.clone(), pk.clone())) -// // .unwrap(); -// // match token_result { -// // Some(token) => { -// // let count = token.count; -// // let result = vrf_impl -// // .validate_vote_token( -// // ViewNumber::new(view), -// // JfPubKey::from_native(pk.clone()), -// // Checked::Unchecked(token), -// // ) -// // .unwrap(); -// // let result_is_valid = check_if_valid(&result); -// // error!("view {view:?}, node_idx {node_idx:?}, stake {count:?} "); -// // assert!(result_is_valid); -// // } -// // _ => continue, -// // } -// // } -// // } -// // } - -// #[test] -// pub fn test_factorial() { -// assert_eq!(factorial(0), BigUint::from(1u32)); -// assert_eq!(factorial(1), BigUint::from(1u32)); -// assert_eq!(factorial(2), BigUint::from(2u32)); -// assert_eq!(factorial(3), BigUint::from(6u32)); -// assert_eq!(factorial(4), BigUint::from(24u32)); -// assert_eq!(factorial(5), BigUint::from(120u32)); -// } - -// // TODO add failure case - -// #[test] -// fn network_config_is_serializable() { -// // validate that `RunResults` can be serialized -// // Note that there is currently an issue with `VRFPubKey` where it can't be serialized with toml -// // so instead we only test with serde_json -// let key = -// as TestableSignatureKey>::generate_test_key(1); -// let pub_key = JfPubKey::::from_private(&key); -// let mut config = hotshot_centralized_server::NetworkConfig { -// config: hotshot_types::HotShotConfig { -// election_config: Some(super::VRFStakeTableConfig { -// distribution: vec![NonZeroU64::new(1).unwrap()], -// sortition_parameter: NonZeroU64::new(1).unwrap(), -// }), -// known_nodes: vec![pub_key], -// execution_type: hotshot_types::ExecutionType::Incremental, -// total_nodes: NonZeroUsize::new(1).unwrap(), -// min_transactions: 1, -// max_transactions: NonZeroUsize::new(1).unwrap(), -// next_view_timeout: 1, -// timeout_ratio: (1, 1), -// round_start_delay: 1, -// start_delay: 1, -// num_bootstrap: 1, -// propose_min_round_time: Duration::from_secs(1), -// propose_max_round_time: Duration::from_secs(1), -// }, -// ..Default::default() -// }; -// serde_json::to_string(&config).unwrap(); -// assert!(toml::to_string(&config).is_err()); - -// // validate that this is indeed a `pub_key` issue -// config.config.known_nodes.clear(); -// serde_json::to_string(&config).unwrap(); -// toml::to_string(&config).unwrap(); -// } -// } diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 7c54b46964..16ca56d549 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -12,7 +12,7 @@ use hotshot_task::{ task::FilterEvent, BoxSyncFuture, }; -use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::Consensus, data::LeafType, @@ -47,7 +47,7 @@ pub struct SystemContextHandle> { /// method is needed to generate new receivers to expose to the user pub(crate) output_event_stream: ChannelStream>, /// access to the internal ev ent stream, in case we need to, say, shut something down - pub(crate) internal_event_stream: ChannelStream>, + pub(crate) internal_event_stream: ChannelStream>, /// registry for controlling tasks pub(crate) registry: GlobalRegistry, @@ -147,8 +147,8 @@ impl + 'static> SystemContextHandl /// NOTE: this is only used for sanity checks in our tests pub async fn get_internal_event_stream_known_impl( &mut self, - filter: FilterEvent>, - ) -> (UnboundedStream>, StreamId) { + filter: FilterEvent>, + ) -> (UnboundedStream>, StreamId) { self.internal_event_stream.subscribe(filter).await } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 13c1390b3e..5139519813 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,4 +1,4 @@ -use crate::events::SequencingHotShotEvent; +use crate::events::HotShotEvent; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] @@ -18,16 +18,14 @@ use hotshot_task::{ use hotshot_types::{ certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, consensus::{Consensus, View}, - data::{LeafType, ProposalType, QuorumProposal, SequencingLeaf}, + data::{Leaf, LeafType, ProposalType, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, traits::{ - consensus_api::SequencingConsensusApi, + consensus_api::ConsensusApi, election::{ConsensusExchange, QuorumExchangeType, SignedCertificate, TimeoutExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{ - CommitteeEx, NodeImplementation, NodeType, SequencingQuorumEx, SequencingTimeoutEx, - }, + node_implementation::{CommitteeEx, NodeImplementation, NodeType, QuorumEx, TimeoutEx}, signature_key::SignatureKey, state::ConsensusTime, BlockPayload, @@ -54,21 +52,17 @@ pub struct ConsensusTaskError {} /// The state for the consensus task. Contains all of the information for the implementation /// of consensus -pub struct SequencingConsensusTaskState< +pub struct ConsensusTaskState< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - A: SequencingConsensusApi, I> + 'static, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, + A: ConsensusApi, I> + 'static, > where - SequencingQuorumEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, - Commitment = Commitment>, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, @@ -76,10 +70,10 @@ pub struct SequencingConsensusTaskState< Certificate = DACertificate, Commitment = Commitment, >, - SequencingTimeoutEx: ConsensusExchange< + TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal>, Certificate = TimeoutCertificate, Commitment = Commitment, >, @@ -87,7 +81,7 @@ pub struct SequencingConsensusTaskState< /// The global task registry pub registry: GlobalRegistry, /// Reference to consensus. The replica will require a write lock on this. - pub consensus: Arc>>>, + pub consensus: Arc>>>, /// View timeout from config. pub timeout: u64, /// View number this view is executing in. @@ -97,10 +91,10 @@ pub struct SequencingConsensusTaskState< pub block: Option, /// the quorum exchange - pub quorum_exchange: Arc>, + pub quorum_exchange: Arc>, /// The timeout exchange - pub timeout_exchange: Arc>, + pub timeout_exchange: Arc>, /// Consensus api pub api: A, @@ -122,7 +116,7 @@ pub struct SequencingConsensusTaskState< pub timeout_task: JoinHandle<()>, /// Global events stream to publish events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// Event stream to publish events to the application layer pub output_event_stream: ChannelStream>, @@ -148,38 +142,38 @@ pub struct SequencingConsensusTaskState< /// State for the vote collection task. This handles the building of a QC from a votes received pub struct VoteCollectionTaskState< TYPES: NodeType, - I: NodeImplementation>, + I: NodeImplementation>, > where - SequencingQuorumEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, - Commitment = Commitment>, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, - SequencingTimeoutEx: ConsensusExchange< + TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal>, Certificate = TimeoutCertificate, Commitment = Commitment, >, { /// the quorum exchange - pub quorum_exchange: Arc>, + pub quorum_exchange: Arc>, /// the timeout exchange - pub timeout_exchange: Arc>, + pub timeout_exchange: Arc>, #[allow(clippy::type_complexity)] /// Accumulator for votes pub accumulator: Either< - >> as SignedCertificate< + >> as SignedCertificate< TYPES, TYPES::Time, TYPES::VoteTokenType, - Commitment>, + Commitment>, >>::VoteAccumulator, - QuorumCertificate>>, + QuorumCertificate>>, >, /// Accumulator for votes @@ -196,25 +190,25 @@ pub struct VoteCollectionTaskState< /// View which this vote collection task is collecting votes in pub cur_view: TYPES::Time, /// The event stream shared by all tasks - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// Node id pub id: u64, } -impl>> TS +impl>> TS for VoteCollectionTaskState where - SequencingQuorumEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, - Commitment = Commitment>, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, - SequencingTimeoutEx: ConsensusExchange< + TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal>, Certificate = TimeoutCertificate, Commitment = Commitment, >, @@ -223,31 +217,31 @@ where #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "Quorum Vote Collection Task", level = "error")] -async fn vote_handle>>( +async fn vote_handle>>( mut state: VoteCollectionTaskState, - event: SequencingHotShotEvent, + event: HotShotEvent, ) -> ( std::option::Option, VoteCollectionTaskState, ) where - SequencingQuorumEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, - Commitment = Commitment>, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, - SequencingTimeoutEx: ConsensusExchange< + TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal>, Certificate = TimeoutCertificate, Commitment = Commitment, >, { match event { - SequencingHotShotEvent::QuorumVoteRecv(vote) => match vote.clone() { + HotShotEvent::QuorumVoteRecv(vote) => match vote.clone() { QuorumVote::Yes(vote_internal) => { // For the case where we receive votes after we've made a certificate if state.accumulator.is_right() { @@ -277,7 +271,7 @@ where debug!("QCFormed! {:?}", qc.view_number); state .event_stream - .publish(SequencingHotShotEvent::QCFormed(either::Left(qc.clone()))) + .publish(HotShotEvent::QCFormed(either::Left(qc.clone()))) .await; state.accumulator = Either::Right(qc.clone()); @@ -301,7 +295,7 @@ where // TODO: Code below is redundant of code above; can be fixed // during exchange refactor // https://github.com/EspressoSystems/HotShot/issues/1799 - SequencingHotShotEvent::TimeoutVoteRecv(vote) => { + HotShotEvent::TimeoutVoteRecv(vote) => { debug!("Received timeout vote for view {}", *vote.get_view()); if state.timeout_accumulator.is_right() { return (None, state); @@ -331,7 +325,7 @@ where debug!("QCFormed! {:?}", qc.view_number); state .event_stream - .publish(SequencingHotShotEvent::QCFormed(either::Right(qc.clone()))) + .publish(HotShotEvent::QCFormed(either::Right(qc.clone()))) .await; state.timeout_accumulator = Either::Right(qc.clone()); @@ -348,7 +342,7 @@ where } } } - SequencingHotShotEvent::Shutdown => { + HotShotEvent::Shutdown => { return (Some(HotShotTaskCompleted::ShutDown), state); } _ => { @@ -362,18 +356,18 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static, - > SequencingConsensusTaskState + A: ConsensusApi, I> + 'static, + > ConsensusTaskState where - SequencingQuorumEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, - Commitment = Commitment>, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, @@ -381,17 +375,17 @@ where Certificate = DACertificate, Commitment = Commitment, >, - SequencingTimeoutEx: ConsensusExchange< + TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal>, Certificate = TimeoutCertificate, Commitment = Commitment, >, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus genesis leaf", level = "error")] - async fn genesis_leaf(&self) -> Option> { + async fn genesis_leaf(&self) -> Option> { let consensus = self.consensus.read().await; let Some(genesis_view) = consensus.state_map.get(&TYPES::Time::genesis()) else { @@ -454,7 +448,7 @@ where }; let parent_commitment = parent.commit(); - let leaf: SequencingLeaf<_> = SequencingLeaf { + let leaf: Leaf<_> = Leaf { view_number: view, height: proposal.height, justify_qc: proposal.justify_qc.clone(), @@ -479,7 +473,7 @@ where vote.get_view() + 1 ); self.event_stream - .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) + .publish(HotShotEvent::QuorumVoteSend(vote)) .await; return true; } @@ -524,7 +518,7 @@ where }; let parent_commitment = parent.commit(); - let leaf: SequencingLeaf<_> = SequencingLeaf { + let leaf: Leaf<_> = Leaf { view_number: view, height: proposal.height, justify_qc: proposal.justify_qc.clone(), @@ -559,7 +553,7 @@ where if let GeneralConsensusMessage::Vote(vote) = message { debug!("Sending vote to next quorum leader {:?}", vote.get_view()); self.event_stream - .publish(SequencingHotShotEvent::QuorumVoteSend(vote)) + .publish(HotShotEvent::QuorumVoteSend(vote)) .await; return true; } @@ -628,7 +622,7 @@ where } self.event_stream - .publish(SequencingHotShotEvent::ViewChange(new_view)) + .publish(HotShotEvent::ViewChange(new_view)) .await; // Spawn a timeout task if we did actually update view @@ -641,9 +635,7 @@ where async move { async_sleep(Duration::from_millis(timeout)).await; stream - .publish(SequencingHotShotEvent::Timeout(TYPES::Time::new( - *view_number, - ))) + .publish(HotShotEvent::Timeout(TYPES::Time::new(*view_number))) .await; } }); @@ -664,9 +656,9 @@ where /// Handles a consensus event received on the event stream #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] - pub async fn handle_event(&mut self, event: SequencingHotShotEvent) { + pub async fn handle_event(&mut self, event: HotShotEvent) { match event { - SequencingHotShotEvent::QuorumProposalRecv(proposal, sender) => { + HotShotEvent::QuorumProposalRecv(proposal, sender) => { debug!( "Receved Quorum Propsoal for view {}", *proposal.data.view_number @@ -741,7 +733,7 @@ where "Proposal's parent missing from storage with commitment: {:?}", justify_qc.leaf_commitment() ); - let leaf = SequencingLeaf { + let leaf = Leaf { view_number: view, height: proposal.data.height, justify_qc: justify_qc.clone(), @@ -766,7 +758,7 @@ where return; }; let parent_commitment = parent.commit(); - let leaf: SequencingLeaf<_> = SequencingLeaf { + let leaf: Leaf<_> = Leaf { view_number: view, height: proposal.data.height, justify_qc: justify_qc.clone(), @@ -930,7 +922,7 @@ where if new_decide_reached { debug!("about to publish decide"); self.event_stream - .publish(SequencingHotShotEvent::LeafDecided(leaf_views.clone())) + .publish(HotShotEvent::LeafDecided(leaf_views.clone())) .await; let decide_sent = self.output_event_stream.publish(Event { view_number: consensus.last_decided_view, @@ -995,7 +987,7 @@ where self.da_certs.remove(&time); } } - SequencingHotShotEvent::QuorumVoteRecv(vote) => { + HotShotEvent::QuorumVoteRecv(vote) => { debug!("Received quroum vote: {:?}", vote.get_view()); if !self.quorum_exchange.is_leader(vote.get_view() + 1) { @@ -1068,8 +1060,8 @@ where let filter = FilterEvent(Arc::new(|event| { matches!( event, - SequencingHotShotEvent::QuorumVoteRecv(_) - | SequencingHotShotEvent::TimeoutVoteRecv(_) + HotShotEvent::QuorumVoteRecv(_) + | HotShotEvent::TimeoutVoteRecv(_) ) })); @@ -1097,9 +1089,7 @@ where self.event_stream .direct_message( stream_id, - SequencingHotShotEvent::QuorumVoteRecv(QuorumVote::Yes( - vote_internal, - )), + HotShotEvent::QuorumVoteRecv(QuorumVote::Yes(vote_internal)), ) .await; } @@ -1109,7 +1099,7 @@ where } } } - SequencingHotShotEvent::TimeoutVoteRecv(vote) => { + HotShotEvent::TimeoutVoteRecv(vote) => { if !self.timeout_exchange.is_leader(vote.get_view() + 1) { error!( "We are not the leader for view {} are we the leader for view + 1? {}", @@ -1179,8 +1169,7 @@ where let filter = FilterEvent(Arc::new(|event| { matches!( event, - SequencingHotShotEvent::QuorumVoteRecv(_) - | SequencingHotShotEvent::TimeoutVoteRecv(_) + HotShotEvent::QuorumVoteRecv(_) | HotShotEvent::TimeoutVoteRecv(_) ) })); @@ -1203,11 +1192,11 @@ where debug!("Starting vote handle for view {:?}", vote.get_view()); } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream - .direct_message(stream_id, SequencingHotShotEvent::TimeoutVoteRecv(vote)) + .direct_message(stream_id, HotShotEvent::TimeoutVoteRecv(vote)) .await; } } - SequencingHotShotEvent::QCFormed(cert) => { + HotShotEvent::QCFormed(cert) => { debug!("QC Formed event happened!"); if let either::Right(qc) = cert.clone() { @@ -1246,7 +1235,7 @@ where } } } - SequencingHotShotEvent::DACRecv(cert) => { + HotShotEvent::DACRecv(cert) => { debug!("DAC Recved for view ! {}", *cert.view_number); let view = cert.view_number; @@ -1256,7 +1245,7 @@ where self.current_proposal = None; } } - SequencingHotShotEvent::VidCertRecv(cert) => { + HotShotEvent::VidCertRecv(cert) => { debug!("VID cert received for view ! {}", *cert.view_number); let view = cert.view_number; @@ -1267,7 +1256,7 @@ where self.current_proposal = None; } } - SequencingHotShotEvent::ViewChange(new_view) => { + HotShotEvent::ViewChange(new_view) => { debug!("View Change event for view {}", *new_view); let old_view_number = self.cur_view; @@ -1288,7 +1277,7 @@ where }) .await; } - SequencingHotShotEvent::Timeout(view) => { + HotShotEvent::Timeout(view) => { // NOTE: We may optionally have the timeout task listen for view change events if self.cur_view >= view { return; @@ -1310,7 +1299,7 @@ where debug!("Sending timeout vote for view {}", *view); if let GeneralConsensusMessage::TimeoutVote(vote) = message { self.event_stream - .publish(SequencingHotShotEvent::TimeoutVoteSend(vote)) + .publish(HotShotEvent::TimeoutVoteSend(vote)) .await; } } @@ -1322,7 +1311,7 @@ where let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } - SequencingHotShotEvent::SendDABlockData(block) => { + HotShotEvent::SendDABlockData(block) => { self.block = Some(block); } _ => {} @@ -1401,7 +1390,7 @@ where if let Some(block) = &self.block { let block_commitment = block.commit(); - let leaf = SequencingLeaf { + let leaf = Leaf { view_number: view, height: parent_leaf.height + 1, justify_qc: consensus.high_qc.clone(), @@ -1438,7 +1427,7 @@ where ); self.event_stream - .publish(SequencingHotShotEvent::QuorumProposalSend( + .publish(HotShotEvent::QuorumProposalSend( message, self.quorum_exchange.public_key().clone(), )) @@ -1455,18 +1444,18 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I>, - > TS for SequencingConsensusTaskState + A: ConsensusApi, I>, + > TS for ConsensusTaskState where - SequencingQuorumEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, - Commitment = Commitment>, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, @@ -1474,10 +1463,10 @@ where Certificate = DACertificate, Commitment = Commitment, >, - SequencingTimeoutEx: ConsensusExchange< + TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal>, Certificate = TimeoutCertificate, Commitment = Commitment, >, @@ -1487,42 +1476,38 @@ where /// Type allias for consensus' vote collection task pub type VoteCollectionTypes = HSTWithEvent< ConsensusTaskError, - SequencingHotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, VoteCollectionTaskState, >; /// Type alias for Consensus task pub type ConsensusTaskTypes = HSTWithEvent< ConsensusTaskError, - SequencingHotShotEvent, - ChannelStream>, - SequencingConsensusTaskState, + HotShotEvent, + ChannelStream>, + ConsensusTaskState, >; /// Event handle for consensus pub async fn sequencing_consensus_handle< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - A: SequencingConsensusApi, I> + 'static, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, + A: ConsensusApi, I> + 'static, >( - event: SequencingHotShotEvent, - mut state: SequencingConsensusTaskState, + event: HotShotEvent, + mut state: ConsensusTaskState, ) -> ( std::option::Option, - SequencingConsensusTaskState, + ConsensusTaskState, ) where - SequencingQuorumEx: ConsensusExchange< + QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, - Commitment = Commitment>, + Proposal = QuorumProposal>, + Certificate = QuorumCertificate>>, + Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< TYPES, @@ -1530,15 +1515,15 @@ where Certificate = DACertificate, Commitment = Commitment, >, - SequencingTimeoutEx: ConsensusExchange< + TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal>, Certificate = TimeoutCertificate, Commitment = Commitment, >, { - if let SequencingHotShotEvent::Shutdown = event { + if let HotShotEvent::Shutdown = event { (Some(HotShotTaskCompleted::ShutDown), state) } else { state.handle_event(event).await; @@ -1548,19 +1533,19 @@ where /// Filter for consensus, returns true for event types the consensus task subscribes to. pub fn consensus_event_filter>( - event: &SequencingHotShotEvent, + event: &HotShotEvent, ) -> bool { matches!( event, - SequencingHotShotEvent::QuorumProposalRecv(_, _) - | SequencingHotShotEvent::QuorumVoteRecv(_) - | SequencingHotShotEvent::QCFormed(_) - | SequencingHotShotEvent::DACRecv(_) - | SequencingHotShotEvent::VidCertRecv(_) - | SequencingHotShotEvent::ViewChange(_) - | SequencingHotShotEvent::SendDABlockData(_) - | SequencingHotShotEvent::Timeout(_) - | SequencingHotShotEvent::TimeoutVoteRecv(_) - | SequencingHotShotEvent::Shutdown, + HotShotEvent::QuorumProposalRecv(_, _) + | HotShotEvent::QuorumVoteRecv(_) + | HotShotEvent::QCFormed(_) + | HotShotEvent::DACRecv(_) + | HotShotEvent::VidCertRecv(_) + | HotShotEvent::ViewChange(_) + | HotShotEvent::SendDABlockData(_) + | HotShotEvent::Timeout(_) + | HotShotEvent::TimeoutVoteRecv(_) + | HotShotEvent::Shutdown, ) } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 9492f3fc6e..fc7a8bd55c 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -1,4 +1,4 @@ -use crate::events::SequencingHotShotEvent; +use crate::events::HotShotEvent; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; @@ -16,10 +16,10 @@ use hotshot_types::vote::DAVoteAccumulator; use hotshot_types::{ certificate::DACertificate, consensus::{Consensus, View}, - data::{DAProposal, ProposalType, SequencingLeaf}, + data::{DAProposal, Leaf, ProposalType}, message::{Message, Proposal, SequencingMessage}, traits::{ - consensus_api::SequencingConsensusApi, + consensus_api::ConsensusApi, election::{CommitteeExchangeType, ConsensusExchange, Membership, SignedCertificate}, network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{CommitteeEx, NodeImplementation, NodeType}, @@ -41,12 +41,8 @@ pub struct ConsensusTaskError {} /// Tracks state of a DA task pub struct DATaskState< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - A: SequencingConsensusApi, I> + 'static, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, + A: ConsensusApi, I> + 'static, > where CommitteeEx: ConsensusExchange< TYPES, @@ -64,7 +60,7 @@ pub struct DATaskState< pub cur_view: TYPES::Time, /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>>, + pub consensus: Arc>>>, /// the committee exchange pub committee_exchange: Arc>, @@ -73,7 +69,7 @@ pub struct DATaskState< pub vote_collector: Option<(TYPES::Time, usize, usize)>, /// Global events stream to publish events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// This state's ID pub id: u64, @@ -82,7 +78,7 @@ pub struct DATaskState< /// Struct to maintain DA Vote Collection task state pub struct DAVoteCollectionTaskState< TYPES: NodeType, - I: NodeImplementation>, + I: NodeImplementation>, > where CommitteeEx: ConsensusExchange< TYPES, @@ -107,12 +103,12 @@ pub struct DAVoteCollectionTaskState< /// the current view pub cur_view: TYPES::Time, /// event stream for channel events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// the id of this task state pub id: u64, } -impl>> TS +impl>> TS for DAVoteCollectionTaskState where CommitteeEx: ConsensusExchange< @@ -125,9 +121,9 @@ where } #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "DA Vote Collection Task", level = "error")] -async fn vote_handle>>( +async fn vote_handle>>( mut state: DAVoteCollectionTaskState, - event: SequencingHotShotEvent, + event: HotShotEvent, ) -> ( std::option::Option, DAVoteCollectionTaskState, @@ -141,7 +137,7 @@ where >, { match event { - SequencingHotShotEvent::DAVoteRecv(vote) => { + HotShotEvent::DAVoteRecv(vote) => { debug!("DA vote recv, collection task {:?}", vote.current_view); // panic!("Vote handle received DA vote for view {}", *vote.current_view); @@ -166,7 +162,7 @@ where debug!("Sending DAC! {:?}", dac.view_number); state .event_stream - .publish(SequencingHotShotEvent::DACSend( + .publish(HotShotEvent::DACSend( dac.clone(), state.committee_exchange.public_key().clone(), )) @@ -186,7 +182,7 @@ where } } } - SequencingHotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), + HotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), _ => { error!("unexpected event {:?}", event); } @@ -198,10 +194,10 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static, + A: ConsensusApi, I> + 'static, > DATaskState where CommitteeEx: ConsensusExchange< @@ -215,10 +211,10 @@ where #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] pub async fn handle_event( &mut self, - event: SequencingHotShotEvent, + event: HotShotEvent, ) -> Option { match event { - SequencingHotShotEvent::DAProposalRecv(proposal, sender) => { + HotShotEvent::DAProposalRecv(proposal, sender) => { debug!( "DA proposal received for view: {:?}", proposal.data.get_view_number() @@ -277,7 +273,7 @@ where debug!("Sending vote to the DA leader {:?}", vote.current_view); self.event_stream - .publish(SequencingHotShotEvent::DAVoteSend(vote)) + .publish(HotShotEvent::DAVoteSend(vote)) .await; let mut consensus = self.consensus.write().await; @@ -295,7 +291,7 @@ where } } } - SequencingHotShotEvent::DAVoteRecv(vote) => { + HotShotEvent::DAVoteRecv(vote) => { // warn!( // "DA vote recv, Main Task {:?}, key: {:?}", // vote.current_view, @@ -348,7 +344,7 @@ where }; let name = "DA Vote Collection"; let filter = FilterEvent(Arc::new(|event| { - matches!(event, SequencingHotShotEvent::DAVoteRecv(_)) + matches!(event, HotShotEvent::DAVoteRecv(_)) })); let builder = TaskBuilder::>::new(name.to_string()) @@ -367,11 +363,11 @@ where self.vote_collector = Some((view, id, stream_id)); } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream - .direct_message(stream_id, SequencingHotShotEvent::DAVoteRecv(vote)) + .direct_message(stream_id, HotShotEvent::DAVoteRecv(vote)) .await; }; } - SequencingHotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { return None; } @@ -422,7 +418,7 @@ where return None; } - SequencingHotShotEvent::BlockReady(block, view) => { + HotShotEvent::BlockReady(block, view) => { self.committee_exchange .network() .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) @@ -439,24 +435,24 @@ where let message = Proposal { data, signature }; self.event_stream - .publish(SequencingHotShotEvent::SendDABlockData(block.clone())) + .publish(HotShotEvent::SendDABlockData(block.clone())) .await; self.event_stream - .publish(SequencingHotShotEvent::DAProposalSend( + .publish(HotShotEvent::DAProposalSend( message.clone(), self.committee_exchange.public_key().clone(), )) .await; } - SequencingHotShotEvent::Timeout(view) => { + HotShotEvent::Timeout(view) => { self.committee_exchange .network() .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) .await; } - SequencingHotShotEvent::Shutdown => { + HotShotEvent::Shutdown => { error!("Shutting down because of shutdown signal!"); return Some(HotShotTaskCompleted::ShutDown); } @@ -468,15 +464,15 @@ where } /// Filter the DA event. - pub fn filter(event: &SequencingHotShotEvent) -> bool { + pub fn filter(event: &HotShotEvent) -> bool { matches!( event, - SequencingHotShotEvent::DAProposalRecv(_, _) - | SequencingHotShotEvent::DAVoteRecv(_) - | SequencingHotShotEvent::Shutdown - | SequencingHotShotEvent::BlockReady(_, _) - | SequencingHotShotEvent::Timeout(_) - | SequencingHotShotEvent::ViewChange(_) + HotShotEvent::DAProposalRecv(_, _) + | HotShotEvent::DAVoteRecv(_) + | HotShotEvent::Shutdown + | HotShotEvent::BlockReady(_, _) + | HotShotEvent::Timeout(_) + | HotShotEvent::ViewChange(_) ) } } @@ -486,10 +482,10 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static, + A: ConsensusApi, I> + 'static, > TS for DATaskState where CommitteeEx: ConsensusExchange< @@ -504,15 +500,15 @@ where /// Type alias for DA Vote Collection Types pub type DAVoteCollectionTypes = HSTWithEvent< ConsensusTaskError, - SequencingHotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, DAVoteCollectionTaskState, >; /// Type alias for DA Task Types pub type DATaskTypes = HSTWithEvent< ConsensusTaskError, - SequencingHotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, DATaskState, >; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 9ccdee9883..ae06b1acda 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -13,7 +13,7 @@ use hotshot_types::{ /// All of the possible events that can be passed between Sequecning `HotShot` tasks #[derive(Eq, Hash, PartialEq, Debug, Clone)] -pub enum SequencingHotShotEvent> { +pub enum HotShotEvent> { /// Shutdown the task Shutdown, /// A quorum proposal has been received from the network; handled by the consensus task diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index c2e2f0be50..227c3b7c89 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -1,4 +1,4 @@ -use crate::events::SequencingHotShotEvent; +use crate::events::HotShotEvent; use async_compatibility_layer::art::async_spawn; use futures::FutureExt; @@ -15,7 +15,7 @@ use std::{collections::HashMap, future::Future, sync::Arc}; /// The state for the test harness task. Keeps track of which events and how many we expect to get pub struct TestHarnessState> { /// The expected events we get from the test. Maps an event to the number of times we expect to see it - expected_output: HashMap, usize>, + expected_output: HashMap, usize>, } impl> TS for TestHarnessState {} @@ -27,8 +27,8 @@ pub struct TestHarnessTaskError {} /// Type alias for the Test Harness Task pub type TestHarnessTaskTypes = HSTWithEvent< TestHarnessTaskError, - SequencingHotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, TestHarnessState, >; @@ -41,10 +41,10 @@ pub type TestHarnessTaskTypes = HSTWithEvent< /// Panics if any state the test expects is not set. Panicing causes a test failure #[allow(clippy::implicit_hasher)] pub async fn run_harness( - input: Vec>, - expected_output: HashMap, usize>, - event_stream: Option>>, - build_fn: impl FnOnce(TaskRunner, ChannelStream>) -> Fut, + input: Vec>, + expected_output: HashMap, usize>, + event_stream: Option>>, + build_fn: impl FnOnce(TaskRunner, ChannelStream>) -> Fut, ) where TYPES: NodeType, I: NodeImplementation, @@ -89,7 +89,7 @@ pub async fn run_harness( /// Will panic to fail the test when it receives and unexpected event #[allow(clippy::needless_pass_by_value)] pub fn handle_event>( - event: SequencingHotShotEvent, + event: HotShotEvent, mut state: TestHarnessState, ) -> ( std::option::Option, diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 106805cf3e..770b7726b1 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -1,4 +1,4 @@ -use crate::events::SequencingHotShotEvent; +use crate::events::HotShotEvent; use either::Either::{self, Left, Right}; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, @@ -7,7 +7,7 @@ use hotshot_task::{ GeneratedStream, Merge, }; use hotshot_types::{ - data::SequencingLeaf, + data::Leaf, message::{ CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, Messages, SequencingMessage, @@ -40,21 +40,17 @@ pub enum NetworkTaskKind { /// the network message task state pub struct NetworkMessageTaskState< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, > { /// event stream (used for publishing) - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, } impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, > TS for NetworkMessageTaskState @@ -65,7 +61,7 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, > NetworkMessageTaskState @@ -81,19 +77,19 @@ impl< let event = match consensus_message.0 { Either::Left(general_message) => match general_message { GeneralConsensusMessage::Proposal(proposal) => { - SequencingHotShotEvent::QuorumProposalRecv(proposal.clone(), sender) + HotShotEvent::QuorumProposalRecv(proposal.clone(), sender) } GeneralConsensusMessage::Vote(vote) => { - SequencingHotShotEvent::QuorumVoteRecv(vote.clone()) + HotShotEvent::QuorumVoteRecv(vote.clone()) } GeneralConsensusMessage::ViewSyncVote(view_sync_message) => { - SequencingHotShotEvent::ViewSyncVoteRecv(view_sync_message) + HotShotEvent::ViewSyncVoteRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncCertificate(view_sync_message) => { - SequencingHotShotEvent::ViewSyncCertificateRecv(view_sync_message) + HotShotEvent::ViewSyncCertificateRecv(view_sync_message) } GeneralConsensusMessage::TimeoutVote(message) => { - SequencingHotShotEvent::TimeoutVoteRecv(message) + HotShotEvent::TimeoutVoteRecv(message) } GeneralConsensusMessage::InternalTrigger(_) => { error!("Got unexpected message type in network task!"); @@ -102,23 +98,23 @@ impl< }, Either::Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(proposal) => { - SequencingHotShotEvent::DAProposalRecv(proposal.clone(), sender) + HotShotEvent::DAProposalRecv(proposal.clone(), sender) } CommitteeConsensusMessage::DAVote(vote) => { - SequencingHotShotEvent::DAVoteRecv(vote.clone()) + HotShotEvent::DAVoteRecv(vote.clone()) } CommitteeConsensusMessage::DACertificate(cert) => { // panic!("Recevid DA C! "); - SequencingHotShotEvent::DACRecv(cert) + HotShotEvent::DACRecv(cert) } CommitteeConsensusMessage::VidDisperseMsg(proposal) => { - SequencingHotShotEvent::VidDisperseRecv(proposal, sender) + HotShotEvent::VidDisperseRecv(proposal, sender) } CommitteeConsensusMessage::VidVote(vote) => { - SequencingHotShotEvent::VidVoteRecv(vote.clone()) + HotShotEvent::VidVoteRecv(vote.clone()) } CommitteeConsensusMessage::VidCertificate(cert) => { - SequencingHotShotEvent::VidCertRecv(cert) + HotShotEvent::VidCertRecv(cert) } }, }; @@ -137,7 +133,7 @@ impl< } if !transactions.is_empty() { self.event_stream - .publish(SequencingHotShotEvent::TransactionsRecv(transactions)) + .publish(HotShotEvent::TransactionsRecv(transactions)) .await; } } @@ -146,18 +142,14 @@ impl< /// network event task state pub struct NetworkEventTaskState< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, MEMBERSHIP: Membership, COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, > { /// comm channel pub channel: COMMCHANNEL, /// event stream - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// view number pub view: TYPES::Time, /// phantom data @@ -169,7 +161,7 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, MEMBERSHIP: Membership, @@ -182,7 +174,7 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, MEMBERSHIP: Membership, @@ -199,11 +191,11 @@ impl< pub async fn handle_event( &mut self, - event: SequencingHotShotEvent, + event: HotShotEvent, membership: &MEMBERSHIP, ) -> Option { let (sender, message_kind, transmit_type, recipient) = match event.clone() { - SequencingHotShotEvent::QuorumProposalSend(proposal, sender) => ( + HotShotEvent::QuorumProposalSend(proposal, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::Proposal(proposal), @@ -213,7 +205,7 @@ impl< ), // ED Each network task is subscribed to all these message types. Need filters per network task - SequencingHotShotEvent::QuorumVoteSend(vote) => ( + HotShotEvent::QuorumVoteSend(vote) => ( vote.signature_key(), MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::Vote(vote.clone()), @@ -221,7 +213,7 @@ impl< TransmitType::Direct, Some(membership.get_leader(vote.get_view() + 1)), ), - SequencingHotShotEvent::VidDisperseSend(proposal, sender) => ( + HotShotEvent::VidDisperseSend(proposal, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::VidDisperseMsg(proposal), @@ -229,7 +221,7 @@ impl< TransmitType::Broadcast, // TODO not a broadcast https://github.com/EspressoSystems/HotShot/issues/1696 None, ), - SequencingHotShotEvent::DAProposalSend(proposal, sender) => ( + HotShotEvent::DAProposalSend(proposal, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::DAProposal(proposal), @@ -237,7 +229,7 @@ impl< TransmitType::Broadcast, None, ), - SequencingHotShotEvent::VidVoteSend(vote) => ( + HotShotEvent::VidVoteSend(vote) => ( vote.signature_key(), MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::VidVote(vote.clone()), @@ -245,7 +237,7 @@ impl< TransmitType::Direct, Some(membership.get_leader(vote.get_view())), // TODO who is VID leader? https://github.com/EspressoSystems/HotShot/issues/1699 ), - SequencingHotShotEvent::DAVoteSend(vote) => ( + HotShotEvent::DAVoteSend(vote) => ( vote.signature_key(), MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::DAVote(vote.clone()), @@ -253,7 +245,7 @@ impl< TransmitType::Direct, Some(membership.get_leader(vote.get_view())), ), - SequencingHotShotEvent::VidCertSend(certificate, sender) => ( + HotShotEvent::VidCertSend(certificate, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::VidCertificate(certificate), @@ -262,7 +254,7 @@ impl< None, ), // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee - SequencingHotShotEvent::DACSend(certificate, sender) => ( + HotShotEvent::DACSend(certificate, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::DACertificate(certificate), @@ -270,7 +262,7 @@ impl< TransmitType::Broadcast, None, ), - SequencingHotShotEvent::ViewSyncCertificateSend(certificate_proposal, sender) => ( + HotShotEvent::ViewSyncCertificateSend(certificate_proposal, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::ViewSyncCertificate(certificate_proposal), @@ -278,7 +270,7 @@ impl< TransmitType::Broadcast, None, ), - SequencingHotShotEvent::ViewSyncVoteSend(vote) => { + HotShotEvent::ViewSyncVoteSend(vote) => { // error!("Sending view sync vote in network task to relay with index: {:?}", vote.round() + vote.relay()); ( vote.signature_key(), @@ -289,7 +281,7 @@ impl< Some(membership.get_leader(vote.round() + vote.relay())), ) } - SequencingHotShotEvent::TimeoutVoteSend(vote) => ( + HotShotEvent::TimeoutVoteSend(vote) => ( vote.get_key(), MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::TimeoutVote(vote.clone()), @@ -297,11 +289,11 @@ impl< TransmitType::Direct, Some(membership.get_leader(vote.get_view() + 1)), ), - SequencingHotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view) => { self.view = view; return None; } - SequencingHotShotEvent::Shutdown => { + HotShotEvent::Shutdown => { error!("Networking task shutting down"); return Some(HotShotTaskCompleted::ShutDown); } @@ -334,7 +326,7 @@ impl< } /// network filter - pub fn filter(task_kind: NetworkTaskKind) -> FilterEvent> { + pub fn filter(task_kind: NetworkTaskKind) -> FilterEvent> { match task_kind { NetworkTaskKind::Quorum => FilterEvent(Arc::new(Self::quorum_filter)), NetworkTaskKind::Committee => FilterEvent(Arc::new(Self::committee_filter)), @@ -344,51 +336,51 @@ impl< } /// quorum filter - fn quorum_filter(event: &SequencingHotShotEvent) -> bool { + fn quorum_filter(event: &HotShotEvent) -> bool { matches!( event, - SequencingHotShotEvent::QuorumProposalSend(_, _) - | SequencingHotShotEvent::QuorumVoteSend(_) - | SequencingHotShotEvent::Shutdown - | SequencingHotShotEvent::DACSend(_, _) - | SequencingHotShotEvent::VidCertSend(_, _) - | SequencingHotShotEvent::ViewChange(_) - | SequencingHotShotEvent::TimeoutVoteSend(_) + HotShotEvent::QuorumProposalSend(_, _) + | HotShotEvent::QuorumVoteSend(_) + | HotShotEvent::Shutdown + | HotShotEvent::DACSend(_, _) + | HotShotEvent::VidCertSend(_, _) + | HotShotEvent::ViewChange(_) + | HotShotEvent::TimeoutVoteSend(_) ) } /// committee filter - fn committee_filter(event: &SequencingHotShotEvent) -> bool { + fn committee_filter(event: &HotShotEvent) -> bool { matches!( event, - SequencingHotShotEvent::DAProposalSend(_, _) - | SequencingHotShotEvent::DAVoteSend(_) - | SequencingHotShotEvent::Shutdown - | SequencingHotShotEvent::VidDisperseSend(_, _) - | SequencingHotShotEvent::VidVoteSend(_) - | SequencingHotShotEvent::ViewChange(_) + HotShotEvent::DAProposalSend(_, _) + | HotShotEvent::DAVoteSend(_) + | HotShotEvent::Shutdown + | HotShotEvent::VidDisperseSend(_, _) + | HotShotEvent::VidVoteSend(_) + | HotShotEvent::ViewChange(_) ) } /// vid filter - fn vid_filter(event: &SequencingHotShotEvent) -> bool { + fn vid_filter(event: &HotShotEvent) -> bool { matches!( event, - SequencingHotShotEvent::Shutdown - | SequencingHotShotEvent::VidDisperseSend(_, _) - | SequencingHotShotEvent::VidVoteSend(_) - | SequencingHotShotEvent::ViewChange(_) + HotShotEvent::Shutdown + | HotShotEvent::VidDisperseSend(_, _) + | HotShotEvent::VidVoteSend(_) + | HotShotEvent::ViewChange(_) ) } /// view sync filter - fn view_sync_filter(event: &SequencingHotShotEvent) -> bool { + fn view_sync_filter(event: &HotShotEvent) -> bool { matches!( event, - SequencingHotShotEvent::ViewSyncVoteSend(_) - | SequencingHotShotEvent::ViewSyncCertificateSend(_, _) - | SequencingHotShotEvent::Shutdown - | SequencingHotShotEvent::ViewChange(_) + HotShotEvent::ViewSyncVoteSend(_) + | HotShotEvent::ViewSyncCertificateSend(_, _) + | HotShotEvent::Shutdown + | HotShotEvent::ViewChange(_) ) } } @@ -409,7 +401,7 @@ pub type NetworkMessageTaskTypes = HSTWithMessage< /// network event task types pub type NetworkEventTaskTypes = HSTWithEvent< NetworkTaskError, - SequencingHotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, NetworkEventTaskState, >; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index cb939d0ca7..45cea93f2d 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -1,4 +1,4 @@ -use crate::events::SequencingHotShotEvent; +use crate::events::HotShotEvent; use async_compatibility_layer::{ art::async_timeout, async_primitives::subscribable_rwlock::{ReadView, SubscribableRwLock}, @@ -17,10 +17,10 @@ use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, certificate::QuorumCertificate, consensus::Consensus, - data::{SequencingLeaf, VidDisperse, VidScheme, VidSchemeTrait}, + data::{Leaf, VidDisperse, VidScheme, VidSchemeTrait}, message::{Message, Proposal, SequencingMessage}, traits::{ - consensus_api::SequencingConsensusApi, + consensus_api::ConsensusApi, election::{ConsensusExchange, Membership, QuorumExchangeType}, node_implementation::{NodeImplementation, NodeType, QuorumEx}, BlockPayload, @@ -45,12 +45,8 @@ pub struct ConsensusTaskError {} /// Tracks state of a Transaction task pub struct TransactionTaskState< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - A: SequencingConsensusApi, I> + 'static, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, + A: ConsensusApi, I> + 'static, > where QuorumEx: ConsensusExchange< TYPES, @@ -67,7 +63,7 @@ pub struct TransactionTaskState< pub cur_view: TYPES::Time, /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>>, + pub consensus: Arc>>>, /// A list of undecided transactions pub transactions: Arc>>, @@ -79,7 +75,7 @@ pub struct TransactionTaskState< pub quorum_exchange: Arc>, /// Global events stream to publish events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// This state's ID pub id: u64, @@ -92,10 +88,10 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static, + A: ConsensusApi, I> + 'static, > TransactionTaskState where QuorumEx: ConsensusExchange< @@ -109,10 +105,10 @@ where pub async fn handle_event( &mut self, - event: SequencingHotShotEvent, + event: HotShotEvent, ) -> Option { match event { - SequencingHotShotEvent::TransactionsRecv(transactions) => { + HotShotEvent::TransactionsRecv(transactions) => { let consensus = self.consensus.read().await; self.transactions .modify(|txns| { @@ -138,7 +134,7 @@ where return None; } - SequencingHotShotEvent::LeafDecided(leaf_chain) => { + HotShotEvent::LeafDecided(leaf_chain) => { let mut included_txns = HashSet::new(); let mut included_txn_size = 0; let mut included_txn_count = 0; @@ -190,7 +186,7 @@ where .update(-(i64::try_from(included_txn_size).unwrap_or(i64::MAX))); return None; } - SequencingHotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { return None; } @@ -269,13 +265,13 @@ where // TODO never clone a block // https://github.com/EspressoSystems/HotShot/issues/1858 self.event_stream - .publish(SequencingHotShotEvent::BlockReady(block.clone(), view + 1)) + .publish(HotShotEvent::BlockReady(block.clone(), view + 1)) .await; // TODO (Keyao) Determine and update where to publish VidDisperseSend. // self.event_stream - .publish(SequencingHotShotEvent::VidDisperseSend( + .publish(HotShotEvent::VidDisperseSend( Proposal { data: VidDisperse { view_number: view + 1, @@ -291,7 +287,7 @@ where .await; return None; } - SequencingHotShotEvent::Shutdown => { + HotShotEvent::Shutdown => { return Some(HotShotTaskCompleted::ShutDown); } _ => {} @@ -307,10 +303,10 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static, + A: ConsensusApi, I> + 'static, > TransactionTaskState where QuorumEx: ConsensusExchange< @@ -322,7 +318,7 @@ where #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] async fn wait_for_transactions( &self, - _parent_leaf: SequencingLeaf, + _parent_leaf: Leaf, ) -> Option> { let task_start_time = Instant::now(); @@ -388,13 +384,13 @@ where } /// Event filter for the transaction task - pub fn filter(event: &SequencingHotShotEvent) -> bool { + pub fn filter(event: &HotShotEvent) -> bool { matches!( event, - SequencingHotShotEvent::TransactionsRecv(_) - | SequencingHotShotEvent::LeafDecided(_) - | SequencingHotShotEvent::Shutdown - | SequencingHotShotEvent::ViewChange(_) + HotShotEvent::TransactionsRecv(_) + | HotShotEvent::LeafDecided(_) + | HotShotEvent::Shutdown + | HotShotEvent::ViewChange(_) ) } } @@ -404,10 +400,10 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static, + A: ConsensusApi, I> + 'static, > TS for TransactionTaskState where QuorumEx: ConsensusExchange< @@ -421,7 +417,7 @@ where /// Type alias for DA Task Types pub type TransactionsTaskTypes = HSTWithEvent< ConsensusTaskError, - SequencingHotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, TransactionTaskState, >; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index bde44d7781..29a4a32f54 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -1,4 +1,4 @@ -use crate::events::SequencingHotShotEvent; +use crate::events::HotShotEvent; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; @@ -18,10 +18,10 @@ use hotshot_types::{ }; use hotshot_types::{ consensus::{Consensus, View}, - data::{ProposalType, SequencingLeaf}, + data::{Leaf, ProposalType}, message::{Message, SequencingMessage}, traits::{ - consensus_api::SequencingConsensusApi, + consensus_api::ConsensusApi, election::{ConsensusExchange, VIDExchangeType}, node_implementation::{NodeImplementation, NodeType, VIDEx}, signature_key::SignatureKey, @@ -42,12 +42,8 @@ pub struct ConsensusTaskError {} /// Tracks state of a DA task pub struct VIDTaskState< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - A: SequencingConsensusApi, I> + 'static, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, + A: ConsensusApi, I> + 'static, > where VIDEx: ConsensusExchange< TYPES, @@ -65,7 +61,7 @@ pub struct VIDTaskState< pub cur_view: TYPES::Time, /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>>, + pub consensus: Arc>>>, /// the VID exchange pub vid_exchange: Arc>, @@ -74,7 +70,7 @@ pub struct VIDTaskState< pub vote_collector: Option<(TYPES::Time, usize, usize)>, /// Global events stream to publish events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// This state's ID pub id: u64, @@ -83,7 +79,7 @@ pub struct VIDTaskState< /// Struct to maintain DA Vote Collection task state pub struct VIDVoteCollectionTaskState< TYPES: NodeType, - I: NodeImplementation>, + I: NodeImplementation>, > where VIDEx: ConsensusExchange< TYPES, @@ -108,12 +104,12 @@ pub struct VIDVoteCollectionTaskState< /// the current view pub cur_view: TYPES::Time, /// event stream for channel events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// the id of this task state pub id: u64, } -impl>> TS +impl>> TS for VIDVoteCollectionTaskState where VIDEx: ConsensusExchange< @@ -128,14 +124,14 @@ where #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "VID Vote Collection Task", level = "error")] async fn vote_handle( mut state: VIDVoteCollectionTaskState, - event: SequencingHotShotEvent, + event: HotShotEvent, ) -> ( Option, VIDVoteCollectionTaskState, ) where TYPES: NodeType, - I: NodeImplementation>, + I: NodeImplementation>, VIDEx: ConsensusExchange< TYPES, Message, @@ -144,7 +140,7 @@ where >, { match event { - SequencingHotShotEvent::VidVoteRecv(vote) => { + HotShotEvent::VidVoteRecv(vote) => { // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 debug!("VID vote recv, collection task {:?}", vote.get_view()); @@ -164,7 +160,7 @@ where debug!("Sending VID cert! {:?}", vid_cert.view_number); state .event_stream - .publish(SequencingHotShotEvent::VidCertSend( + .publish(HotShotEvent::VidCertSend( vid_cert.clone(), state.vid_exchange.public_key().clone(), )) @@ -177,7 +173,7 @@ where } } } - SequencingHotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), + HotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), _ => { error!("unexpected event {:?}", event); } @@ -189,10 +185,10 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static, + A: ConsensusApi, I> + 'static, > VIDTaskState where VIDEx: ConsensusExchange< @@ -206,10 +202,10 @@ where #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] pub async fn handle_event( &mut self, - event: SequencingHotShotEvent, + event: HotShotEvent, ) -> Option { match event { - SequencingHotShotEvent::VidVoteRecv(vote) => { + HotShotEvent::VidVoteRecv(vote) => { // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 // warn!( @@ -268,7 +264,7 @@ where }; let name = "VID Vote Collection"; let filter = FilterEvent(Arc::new(|event| { - matches!(event, SequencingHotShotEvent::VidVoteRecv(_)) + matches!(event, HotShotEvent::VidVoteRecv(_)) })); let builder = TaskBuilder::>::new(name.to_string()) @@ -286,11 +282,11 @@ where self.vote_collector = Some((view, id, stream_id)); } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream - .direct_message(stream_id, SequencingHotShotEvent::VidVoteRecv(vote)) + .direct_message(stream_id, HotShotEvent::VidVoteRecv(vote)) .await; }; } - SequencingHotShotEvent::VidDisperseRecv(disperse, sender) => { + HotShotEvent::VidDisperseRecv(disperse, sender) => { // TODO copy-pasted from DAProposalRecv https://github.com/EspressoSystems/HotShot/issues/1690 debug!( "VID disperse received for view: {:?}", @@ -346,7 +342,7 @@ where debug!("Sending vote to the VID leader {:?}", vote.current_view); self.event_stream - .publish(SequencingHotShotEvent::VidVoteSend(vote)) + .publish(HotShotEvent::VidVoteSend(vote)) .await; let mut consensus = self.consensus.write().await; @@ -365,7 +361,7 @@ where } } } - SequencingHotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { return None; } @@ -378,7 +374,7 @@ where return None; } - SequencingHotShotEvent::Shutdown => { + HotShotEvent::Shutdown => { return Some(HotShotTaskCompleted::ShutDown); } _ => { @@ -389,13 +385,13 @@ where } /// Filter the DA event. - pub fn filter(event: &SequencingHotShotEvent) -> bool { + pub fn filter(event: &HotShotEvent) -> bool { matches!( event, - SequencingHotShotEvent::Shutdown - | SequencingHotShotEvent::VidDisperseRecv(_, _) - | SequencingHotShotEvent::VidVoteRecv(_) - | SequencingHotShotEvent::ViewChange(_) + HotShotEvent::Shutdown + | HotShotEvent::VidDisperseRecv(_, _) + | HotShotEvent::VidVoteRecv(_) + | HotShotEvent::ViewChange(_) ) } } @@ -405,10 +401,10 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static, + A: ConsensusApi, I> + 'static, > TS for VIDTaskState where VIDEx: ConsensusExchange< @@ -423,15 +419,15 @@ where /// Type alias for VID Vote Collection Types pub type VIDVoteCollectionTypes = HSTWithEvent< ConsensusTaskError, - SequencingHotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, VIDVoteCollectionTaskState, >; /// Type alias for VID Task Types pub type VIDTaskTypes = HSTWithEvent< ConsensusTaskError, - SequencingHotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, VIDTaskState, >; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index c7efa7e212..7e5034baa5 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1,5 +1,5 @@ #![allow(clippy::module_name_repetitions)] -use crate::events::SequencingHotShotEvent; +use crate::events::HotShotEvent; use async_compatibility_layer::art::{async_sleep, async_spawn}; use commit::{Commitment, Committable}; use either::Either::{self, Left, Right}; @@ -21,10 +21,10 @@ use bitvec::prelude::*; use hotshot_task::global_registry::GlobalRegistry; use hotshot_types::{ certificate::ViewSyncCertificate, - data::SequencingLeaf, + data::Leaf, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, traits::{ - consensus_api::SequencingConsensusApi, + consensus_api::ConsensusApi, election::{ConsensusExchange, ViewSyncExchangeType}, network::CommunicationChannel, node_implementation::{NodeImplementation, NodeType, ViewSyncEx}, @@ -63,12 +63,8 @@ pub struct ViewSyncTaskError {} /// Main view sync task state pub struct ViewSyncTaskState< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - A: SequencingConsensusApi, I> + 'static + std::clone::Clone, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, + A: ConsensusApi, I> + 'static + std::clone::Clone, > where ViewSyncEx: ConsensusExchange< TYPES, @@ -81,7 +77,7 @@ pub struct ViewSyncTaskState< /// Registry to register sub tasks pub registry: GlobalRegistry, /// Event stream to publish events to - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// View HotShot is currently in pub current_view: TYPES::Time, /// View HotShot wishes to be in @@ -113,10 +109,10 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static + std::clone::Clone, + A: ConsensusApi, I> + 'static + std::clone::Clone, > TS for ViewSyncTaskState where ViewSyncEx: ConsensusExchange< @@ -132,20 +128,16 @@ where /// Types for the main view sync task pub type ViewSyncTaskStateTypes = HSTWithEvent< ViewSyncTaskError, - SequencingHotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, ViewSyncTaskState, >; /// State of a view sync replica task pub struct ViewSyncReplicaTaskState< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, - A: SequencingConsensusApi, I> + 'static, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, + A: ConsensusApi, I> + 'static, > where ViewSyncEx: ConsensusExchange< TYPES, @@ -177,17 +169,17 @@ pub struct ViewSyncReplicaTaskState< /// HotShot consensus API pub api: A, /// Event stream to publish events to - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, } impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static, + A: ConsensusApi, I> + 'static, > TS for ViewSyncReplicaTaskState where ViewSyncEx: ConsensusExchange< @@ -203,22 +195,18 @@ where /// Types for view sync replica state pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< ViewSyncTaskError, - SequencingHotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, ViewSyncReplicaTaskState, >; /// State of a view sync relay task pub struct ViewSyncRelayTaskState< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, > { /// Event stream to publish events to - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// View sync exchange pub exchange: Arc>, /// Vote accumulator @@ -240,7 +228,7 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, > TS for ViewSyncRelayTaskState @@ -250,8 +238,8 @@ impl< /// Types used by the view sync relay task pub type ViewSyncRelayTaskStateTypes = HSTWithEvent< ViewSyncTaskError, - SequencingHotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, ViewSyncRelayTaskState, >; @@ -259,10 +247,10 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static + std::clone::Clone, + A: ConsensusApi, I> + 'static + std::clone::Clone, > ViewSyncTaskState where ViewSyncEx: ConsensusExchange< @@ -275,9 +263,9 @@ where { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] /// Handles incoming events for the main view sync task - pub async fn handle_event(&mut self, event: SequencingHotShotEvent) { + pub async fn handle_event(&mut self, event: HotShotEvent) { match &event { - SequencingHotShotEvent::ViewSyncCertificateRecv(message) => { + HotShotEvent::ViewSyncCertificateRecv(message) => { let (certificate_internal, last_seen_certificate) = match &message.data { ViewSyncCertificate::PreCommit(certificate_internal) => { (certificate_internal, ViewSyncPhase::PreCommit) @@ -367,7 +355,7 @@ where }); } - SequencingHotShotEvent::ViewSyncVoteRecv(vote) => { + HotShotEvent::ViewSyncVoteRecv(vote) => { let vote_internal = match vote { ViewSyncVote::PreCommit(vote_internal) | ViewSyncVote::Commit(vote_internal) @@ -448,7 +436,7 @@ where }); } - &SequencingHotShotEvent::ViewChange(new_view) => { + &HotShotEvent::ViewChange(new_view) => { let new_view = TYPES::Time::new(*new_view); if self.current_view < new_view { debug!( @@ -469,7 +457,7 @@ where self.event_stream .direct_message( replica_task_info.event_stream_id, - SequencingHotShotEvent::Shutdown, + HotShotEvent::Shutdown, ) .await; } @@ -479,7 +467,7 @@ where self.event_stream .direct_message( relay_task_info.event_stream_id, - SequencingHotShotEvent::Shutdown, + HotShotEvent::Shutdown, ) .await; } @@ -488,7 +476,7 @@ where self.last_garbage_collected_view = self.current_view - 1; } } - &SequencingHotShotEvent::Timeout(view_number) => { + &HotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it if view_number <= TYPES::Time::new(*self.current_view) { return; @@ -558,7 +546,7 @@ where // TODO ED Make all these view numbers into a single variable to avoid errors let result = replica_state - .handle_event(SequencingHotShotEvent::ViewSyncTrigger(view_number + 1)) + .handle_event(HotShotEvent::ViewSyncTrigger(view_number + 1)) .await; if result.0 == Some(HotShotTaskCompleted::ShutDown) { @@ -604,7 +592,7 @@ where // If this is the first timeout we've seen advance to the next view self.current_view = view_number; self.event_stream - .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new( + .publish(HotShotEvent::ViewChange(TYPES::Time::new( *self.current_view, ))) .await; @@ -616,15 +604,15 @@ where } /// Filter view sync related events. - pub fn filter(event: &SequencingHotShotEvent) -> bool { + pub fn filter(event: &HotShotEvent) -> bool { matches!( event, - SequencingHotShotEvent::ViewSyncCertificateRecv(_) - | SequencingHotShotEvent::ViewSyncVoteRecv(_) - | SequencingHotShotEvent::Shutdown - | SequencingHotShotEvent::Timeout(_) - | SequencingHotShotEvent::ViewSyncTimeout(_, _, _) - | SequencingHotShotEvent::ViewChange(_) + HotShotEvent::ViewSyncCertificateRecv(_) + | HotShotEvent::ViewSyncVoteRecv(_) + | HotShotEvent::Shutdown + | HotShotEvent::Timeout(_) + | HotShotEvent::ViewSyncTimeout(_, _, _) + | HotShotEvent::ViewChange(_) ) } } @@ -633,10 +621,10 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - A: SequencingConsensusApi, I> + 'static, + A: ConsensusApi, I> + 'static, > ViewSyncReplicaTaskState where ViewSyncEx: ConsensusExchange< @@ -651,13 +639,13 @@ where /// Handle incoming events for the view sync replica task pub async fn handle_event( mut self, - event: SequencingHotShotEvent, + event: HotShotEvent, ) -> ( std::option::Option, ViewSyncReplicaTaskState, ) { match event { - SequencingHotShotEvent::ViewSyncCertificateRecv(message) => { + HotShotEvent::ViewSyncCertificateRecv(message) => { let (certificate_internal, last_seen_certificate) = match message.data.clone() { ViewSyncCertificate::PreCommit(certificate_internal) => { (certificate_internal, ViewSyncPhase::PreCommit) @@ -715,9 +703,7 @@ where if self.phase >= ViewSyncPhase::Commit && !self.sent_view_change_event { error!("VIEW SYNC UPDATING VIEW TO {}", *self.next_view); self.event_stream - .publish(SequencingHotShotEvent::ViewChange(TYPES::Time::new( - *self.next_view, - ))) + .publish(HotShotEvent::ViewChange(TYPES::Time::new(*self.next_view))) .await; self.sent_view_change_event = true; } @@ -771,7 +757,7 @@ where // error!("Sending vs vote {:?}", vote.clone()); self.event_stream - .publish(SequencingHotShotEvent::ViewSyncVoteSend(vote)) + .publish(HotShotEvent::ViewSyncVoteSend(vote)) .await; } @@ -798,7 +784,7 @@ where // error!("Sending vs vote {:?}", vote.clone()); self.event_stream - .publish(SequencingHotShotEvent::ViewSyncVoteSend(vote)) + .publish(HotShotEvent::ViewSyncVoteSend(vote)) .await; } } @@ -811,7 +797,7 @@ where async_sleep(self.view_sync_timeout).await; error!("Vote sending timed out in ViewSyncCertificateRecv"); stream - .publish(SequencingHotShotEvent::ViewSyncTimeout( + .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), self.relay, phase, @@ -836,7 +822,7 @@ where } } - SequencingHotShotEvent::ViewSyncTrigger(view_number) => { + HotShotEvent::ViewSyncTrigger(view_number) => { if self.next_view != TYPES::Time::new(*view_number) { error!("Unexpected view number to triger view sync"); return (None, self); @@ -862,7 +848,7 @@ where // error!("Sending vs vote {:?}", vote.clone()); self.event_stream - .publish(SequencingHotShotEvent::ViewSyncVoteSend(vote)) + .publish(HotShotEvent::ViewSyncVoteSend(vote)) .await; } @@ -873,7 +859,7 @@ where async_sleep(self.view_sync_timeout).await; error!("Vote sending timed out in ViewSyncTrigger"); stream - .publish(SequencingHotShotEvent::ViewSyncTimeout( + .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), self.relay, ViewSyncPhase::None, @@ -894,7 +880,7 @@ where } } - SequencingHotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { + HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { // Shouldn't ever receive a timeout for a relay higher than ours if TYPES::Time::new(*round) == self.next_view && relay == self.relay @@ -933,7 +919,7 @@ where if let GeneralConsensusMessage::ViewSyncVote(vote) = message { self.event_stream - .publish(SequencingHotShotEvent::ViewSyncVoteSend(vote)) + .publish(HotShotEvent::ViewSyncVoteSend(vote)) .await; } @@ -944,7 +930,7 @@ where async_sleep(self.view_sync_timeout).await; error!("Vote sending timed out in ViewSyncTimeout"); stream - .publish(SequencingHotShotEvent::ViewSyncTimeout( + .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), self.relay, last_seen_certificate, @@ -968,7 +954,7 @@ impl< TYPES: NodeType, I: NodeImplementation< TYPES, - Leaf = SequencingLeaf, + Leaf = Leaf, ConsensusMessage = SequencingMessage, >, > ViewSyncRelayTaskState @@ -985,13 +971,13 @@ where #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] pub async fn handle_event( mut self, - event: SequencingHotShotEvent, + event: HotShotEvent, ) -> ( std::option::Option, ViewSyncRelayTaskState, ) { match event { - SequencingHotShotEvent::ViewSyncVoteRecv(vote) => { + HotShotEvent::ViewSyncVoteRecv(vote) => { if self.accumulator.is_right() { return (Some(HotShotTaskCompleted::ShutDown), self); } @@ -1047,7 +1033,7 @@ where signature, }; self.event_stream - .publish(SequencingHotShotEvent::ViewSyncCertificateSend( + .publish(HotShotEvent::ViewSyncCertificateSend( message, self.exchange.public_key().clone(), )) diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 1b8e4df7e5..3e04ec9013 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -2,7 +2,7 @@ use hotshot::traits::implementations::CombinedNetworks; use std::{marker::PhantomData, sync::Arc}; use hotshot::{ - demo::SDemoState, + demo::DemoState, traits::{ election::static_committee::{StaticCommittee, StaticElectionConfig, StaticVoteToken}, implementations::{ @@ -16,12 +16,12 @@ use hotshot::{ use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, - data::{QuorumProposal, SequencingLeaf, ViewNumber}, + data::{Leaf, QuorumProposal, ViewNumber}, message::{Message, SequencingMessage}, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, network::{TestableChannelImplementation, TestableNetworkingImplementation}, - node_implementation::{ChannelMaps, NodeType, SequencingExchanges, TestableExchange}, + node_implementation::{ChannelMaps, Exchanges, NodeType, TestableExchange}, }, }; use serde::{Deserialize, Serialize}; @@ -39,120 +39,101 @@ use serde::{Deserialize, Serialize}; serde::Serialize, serde::Deserialize, )] -pub struct SequencingTestTypes; -impl NodeType for SequencingTestTypes { +pub struct TestTypes; +impl NodeType for TestTypes { type Time = ViewNumber; type BlockType = VIDBlockPayload; type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; - type StateType = SDemoState; + type StateType = DemoState; } #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] -pub struct SequencingMemoryImpl; +pub struct MemoryImpl; #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] -pub struct SequencingLibp2pImpl; +pub struct Libp2pImpl; #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] -pub struct SequencingWebImpl; +pub struct WebImpl; #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] -pub struct SequencingCombinedImpl; +pub struct CombinedImpl; -pub type StaticMembership = - StaticCommittee>; +pub type StaticMembership = StaticCommittee>; -pub type StaticMemoryDAComm = - MemoryCommChannel; +pub type StaticMemoryDAComm = MemoryCommChannel; -type StaticLibp2pDAComm = - Libp2pCommChannel; +type StaticLibp2pDAComm = Libp2pCommChannel; -type StaticWebDAComm = WebCommChannel; +type StaticWebDAComm = WebCommChannel; -type StaticCombinedDAComm = - CombinedCommChannel; +type StaticCombinedDAComm = CombinedCommChannel; -pub type StaticMemoryQuorumComm = - MemoryCommChannel; +pub type StaticMemoryQuorumComm = MemoryCommChannel; -type StaticLibp2pQuorumComm = - Libp2pCommChannel; +type StaticLibp2pQuorumComm = Libp2pCommChannel; -type StaticWebQuorumComm = WebCommChannel; +type StaticWebQuorumComm = WebCommChannel; -type StaticCombinedQuorumComm = - CombinedCommChannel; +type StaticCombinedQuorumComm = CombinedCommChannel; -pub type StaticMemoryViewSyncComm = - MemoryCommChannel; +pub type StaticMemoryViewSyncComm = MemoryCommChannel; -type StaticLibp2pViewSyncComm = - Libp2pCommChannel; +type StaticLibp2pViewSyncComm = Libp2pCommChannel; -type StaticWebViewSyncComm = - WebCommChannel; +type StaticWebViewSyncComm = WebCommChannel; -type StaticCombinedViewSyncComm = - CombinedCommChannel; +type StaticCombinedViewSyncComm = CombinedCommChannel; -pub type StaticMemoryVIDComm = - MemoryCommChannel; +pub type StaticMemoryVIDComm = MemoryCommChannel; -type StaticLibp2pVIDComm = - Libp2pCommChannel; +type StaticLibp2pVIDComm = Libp2pCommChannel; -type StaticWebVIDComm = WebCommChannel; +type StaticWebVIDComm = WebCommChannel; -type StaticCombinedVIDComm = - CombinedCommChannel; +type StaticCombinedVIDComm = CombinedCommChannel; -pub type SequencingLibp2pExchange = SequencingExchanges< - SequencingTestTypes, - Message, +pub type SequencingLibp2pExchange = Exchanges< + TestTypes, + Message, QuorumExchange< - SequencingTestTypes, - >::Leaf, - QuorumProposal>, + TestTypes, + >::Leaf, + QuorumProposal>, StaticMembership, StaticLibp2pQuorumComm, - Message, + Message, >, CommitteeExchange< - SequencingTestTypes, + TestTypes, StaticMembership, StaticLibp2pDAComm, - Message, + Message, >, ViewSyncExchange< - SequencingTestTypes, - ViewSyncCertificate, + TestTypes, + ViewSyncCertificate, StaticMembership, StaticLibp2pViewSyncComm, - Message, - >, - VIDExchange< - SequencingTestTypes, - StaticMembership, - StaticLibp2pVIDComm, - Message, + Message, >, + VIDExchange>, >; -impl NodeImplementation for SequencingLibp2pImpl { - type Storage = MemoryStorage>; - type Leaf = SequencingLeaf; +impl NodeImplementation for Libp2pImpl { + type Storage = MemoryStorage>; + type Leaf = Leaf; type Exchanges = SequencingLibp2pExchange; - type ConsensusMessage = SequencingMessage; + type ConsensusMessage = SequencingMessage; fn new_channel_maps( - start_view: ::Time, + start_view: ::Time, ) -> ( - ChannelMaps, - Option>, + ChannelMaps, + Option>, ) { ( ChannelMaps::new(start_view), @@ -163,9 +144,9 @@ impl NodeImplementation for SequencingLibp2pImpl { impl TestableExchange< - SequencingTestTypes, - >::Leaf, - Message, + TestTypes, + >::Leaf, + Message, > for SequencingLibp2pExchange { #[allow(clippy::arc_with_non_send_sync)] @@ -178,29 +159,29 @@ impl u64, ) -> ( , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, ) + 'static, > { let network_generator = Arc::new(, - ::SignatureKey, + Message, + ::SignatureKey, > as TestableNetworkingImplementation< - SequencingTestTypes, - Message, + TestTypes, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -213,26 +194,26 @@ impl let network = Arc::new(network_generator(id)); let quorum_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let committee_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let view_sync_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let vid_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); @@ -241,43 +222,38 @@ impl } } -pub type SequencingMemoryExchange = SequencingExchanges< - SequencingTestTypes, - Message, +pub type SequencingMemoryExchange = Exchanges< + TestTypes, + Message, QuorumExchange< - SequencingTestTypes, - >::Leaf, - QuorumProposal>, + TestTypes, + >::Leaf, + QuorumProposal>, StaticMembership, StaticMemoryQuorumComm, - Message, + Message, >, CommitteeExchange< - SequencingTestTypes, + TestTypes, StaticMembership, StaticMemoryDAComm, - Message, + Message, >, ViewSyncExchange< - SequencingTestTypes, - ViewSyncCertificate, + TestTypes, + ViewSyncCertificate, StaticMembership, StaticMemoryViewSyncComm, - Message, - >, - VIDExchange< - SequencingTestTypes, - StaticMembership, - StaticMemoryVIDComm, - Message, + Message, >, + VIDExchange>, >; impl TestableExchange< - SequencingTestTypes, - >::Leaf, - Message, + TestTypes, + >::Leaf, + Message, > for SequencingMemoryExchange { #[allow(clippy::arc_with_non_send_sync)] @@ -290,29 +266,29 @@ impl u64, ) -> ( , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, ) + 'static, > { let network_generator = Arc::new(, - ::SignatureKey, + Message, + ::SignatureKey, > as TestableNetworkingImplementation< - SequencingTestTypes, - Message, + TestTypes, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -321,11 +297,11 @@ impl false, )); let network_da_generator = Arc::new(, - ::SignatureKey, + Message, + ::SignatureKey, > as TestableNetworkingImplementation< - SequencingTestTypes, - Message, + TestTypes, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -338,26 +314,26 @@ impl let network_da = Arc::new(network_da_generator(id)); let quorum_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let committee_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da.clone()); let view_sync_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da); let vid_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); @@ -366,17 +342,17 @@ impl } } -impl NodeImplementation for SequencingMemoryImpl { - type Storage = MemoryStorage>; - type Leaf = SequencingLeaf; +impl NodeImplementation for MemoryImpl { + type Storage = MemoryStorage>; + type Leaf = Leaf; type Exchanges = SequencingMemoryExchange; - type ConsensusMessage = SequencingMessage; + type ConsensusMessage = SequencingMessage; fn new_channel_maps( - start_view: ::Time, + start_view: ::Time, ) -> ( - ChannelMaps, - Option>, + ChannelMaps, + Option>, ) { ( ChannelMaps::new(start_view), @@ -390,43 +366,33 @@ impl NodeImplementation for SequencingMemoryImpl { // when are we getting HKT for rust // smh my head -pub type SequencingWebExchanges = SequencingExchanges< - SequencingTestTypes, - Message, +pub type SequencingWebExchanges = Exchanges< + TestTypes, + Message, QuorumExchange< - SequencingTestTypes, - >::Leaf, - QuorumProposal>, + TestTypes, + >::Leaf, + QuorumProposal>, StaticMembership, StaticWebQuorumComm, - Message, - >, - CommitteeExchange< - SequencingTestTypes, - StaticMembership, - StaticWebDAComm, - Message, + Message, >, + CommitteeExchange>, ViewSyncExchange< - SequencingTestTypes, - ViewSyncCertificate, + TestTypes, + ViewSyncCertificate, StaticMembership, StaticWebViewSyncComm, - Message, - >, - VIDExchange< - SequencingTestTypes, - StaticMembership, - StaticWebVIDComm, - Message, + Message, >, + VIDExchange>, >; impl TestableExchange< - SequencingTestTypes, - >::Leaf, - Message, + TestTypes, + >::Leaf, + Message, > for SequencingWebExchanges { #[allow(clippy::arc_with_non_send_sync)] @@ -439,30 +405,30 @@ impl u64, ) -> ( , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, ) + 'static, > { let network_generator = Arc::new(, - ::SignatureKey, + Message, + ::SignatureKey, _, > as TestableNetworkingImplementation< - SequencingTestTypes, - Message, + TestTypes, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -471,12 +437,12 @@ impl false, )); let network_da_generator = Arc::new(, - ::SignatureKey, - SequencingTestTypes, + Message, + ::SignatureKey, + TestTypes, > as TestableNetworkingImplementation< - SequencingTestTypes, - Message, + TestTypes, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -489,26 +455,26 @@ impl let network_da = Arc::new(network_da_generator(id)); let quorum_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let committee_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da.clone()); let view_sync_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); let vid_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da); @@ -517,17 +483,17 @@ impl } } -impl NodeImplementation for SequencingWebImpl { - type Storage = MemoryStorage>; - type Leaf = SequencingLeaf; +impl NodeImplementation for WebImpl { + type Storage = MemoryStorage>; + type Leaf = Leaf; type Exchanges = SequencingWebExchanges; - type ConsensusMessage = SequencingMessage; + type ConsensusMessage = SequencingMessage; fn new_channel_maps( - start_view: ::Time, + start_view: ::Time, ) -> ( - ChannelMaps, - Option>, + ChannelMaps, + Option>, ) { ( ChannelMaps::new(start_view), @@ -536,49 +502,49 @@ impl NodeImplementation for SequencingWebImpl { } } -pub type SequencingCombinedExchange = SequencingExchanges< - SequencingTestTypes, - Message, +pub type CombinedExchange = Exchanges< + TestTypes, + Message, QuorumExchange< - SequencingTestTypes, - >::Leaf, - QuorumProposal>, + TestTypes, + >::Leaf, + QuorumProposal>, StaticMembership, StaticCombinedQuorumComm, - Message, + Message, >, CommitteeExchange< - SequencingTestTypes, + TestTypes, StaticMembership, StaticCombinedDAComm, - Message, + Message, >, ViewSyncExchange< - SequencingTestTypes, - ViewSyncCertificate, + TestTypes, + ViewSyncCertificate, StaticMembership, StaticCombinedViewSyncComm, - Message, + Message, >, VIDExchange< - SequencingTestTypes, + TestTypes, StaticMembership, StaticCombinedVIDComm, - Message, + Message, >, >; -impl NodeImplementation for SequencingCombinedImpl { - type Storage = MemoryStorage>; - type Leaf = SequencingLeaf; - type Exchanges = SequencingCombinedExchange; - type ConsensusMessage = SequencingMessage; +impl NodeImplementation for CombinedImpl { + type Storage = MemoryStorage>; + type Leaf = Leaf; + type Exchanges = CombinedExchange; + type ConsensusMessage = SequencingMessage; fn new_channel_maps( - start_view: ::Time, + start_view: ::Time, ) -> ( - ChannelMaps, - Option>, + ChannelMaps, + Option>, ) { ( ChannelMaps::new(start_view), @@ -589,10 +555,10 @@ impl NodeImplementation for SequencingCombinedImpl { impl TestableExchange< - SequencingTestTypes, - >::Leaf, - Message, - > for SequencingCombinedExchange + TestTypes, + >::Leaf, + Message, + > for CombinedExchange { #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( @@ -604,30 +570,30 @@ impl u64, ) -> ( , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, , + TestTypes, + Message, >>::Networking, ) + 'static, > { let web_server_network_generator = Arc::new(, - ::SignatureKey, + Message, + ::SignatureKey, _, > as TestableNetworkingImplementation< - SequencingTestTypes, - Message, + TestTypes, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -637,12 +603,12 @@ impl )); let web_server_network_da_generator = Arc::new(, - ::SignatureKey, - SequencingTestTypes, + Message, + ::SignatureKey, + TestTypes, > as TestableNetworkingImplementation< - SequencingTestTypes, - Message, + TestTypes, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -652,11 +618,11 @@ impl )); let libp2p_network_generator = Arc::new(, - ::SignatureKey, + Message, + ::SignatureKey, > as TestableNetworkingImplementation< - SequencingTestTypes, - Message, + TestTypes, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -685,27 +651,27 @@ impl let quorum_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let committee_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da.clone()); let view_sync_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); let vid_chan = <, + TestTypes, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da); (quorum_chan, committee_chan, view_sync_chan, vid_chan) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index b2a9a88451..68df883ed1 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -1,7 +1,7 @@ use commit::Commitment; use either::Either; use hotshot_task::{event_stream::EventStream, Merge}; -use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot_task_impls::events::HotShotEvent; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, @@ -600,15 +600,12 @@ pub type OverallSafetyTaskTypes = HSTWithEventAndMessage< ChannelStream, ( usize, - Either< - Event>::Leaf>, - SequencingHotShotEvent, - >, + Either>::Leaf>, HotShotEvent>, ), MergeN< Merge< UnboundedStream>::Leaf>>, - UnboundedStream>, + UnboundedStream>, >, >, OverallSafetyTask, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 17be6f7dfc..846e67ad5b 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -1,5 +1,5 @@ use crate::{ - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, }; use commit::Committable; @@ -8,14 +8,14 @@ use hotshot::{ certificate::QuorumCertificate, traits::{NodeImplementation, TestableNodeImplementation}, types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, - HotShotInitializer, HotShotSequencingConsensusApi, SystemContext, + HotShotConsensusApi, HotShotInitializer, SystemContext, }; use hotshot_task::event_stream::ChannelStream; -use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ block_impl::{VIDBlockPayload, NUM_CHUNKS, NUM_STORAGE_NODES}, consensus::ConsensusMetricsValue, - data::{QuorumProposal, SequencingLeaf, VidScheme, ViewNumber}, + data::{Leaf, QuorumProposal, VidScheme, ViewNumber}, message::{Message, Proposal}, traits::{ consensus_api::ConsensusSharedApi, @@ -29,51 +29,50 @@ use hotshot_types::{ pub async fn build_system_handle( node_id: u64, ) -> ( - SystemContextHandle, - ChannelStream>, + SystemContextHandle, + ChannelStream>, ) { let builder = TestMetadata::default_multiple_rounds(); - let launcher = builder.gen_launcher::(); + let launcher = builder.gen_launcher::(); let networks = (launcher.resource_generator.channel_generator)(node_id); let storage = (launcher.resource_generator.storage)(node_id); let config = launcher.resource_generator.config.clone(); let initializer = HotShotInitializer::< - SequencingTestTypes, - >::Leaf, - >::from_genesis(>::block_genesis()) + TestTypes, + >::Leaf, + >::from_genesis( + >::block_genesis() + ) .unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; - let public_key = ::SignatureKey::from_private(&private_key); + let public_key = ::SignatureKey::from_private(&private_key); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { - as ConsensusExchange< - SequencingTestTypes, - Message, + as ConsensusExchange< + TestTypes, + Message, >>::Membership::default_election_config(config.total_nodes.get() as u64) }); let committee_election_config = config.election_config.clone().unwrap_or_else(|| { - as ConsensusExchange< - SequencingTestTypes, - Message, + as ConsensusExchange< + TestTypes, + Message, >>::Membership::default_election_config(config.total_nodes.get() as u64) }); - let exchanges = - >::Exchanges::create( - known_nodes_with_stake.clone(), - (quorum_election_config, committee_election_config), - networks, - public_key, - public_key.get_stake_table_entry(1u64), - private_key.clone(), - ); + let exchanges = >::Exchanges::create( + known_nodes_with_stake.clone(), + (quorum_election_config, committee_election_config), + networks, + public_key, + public_key.get_stake_table_entry(1u64), + private_key.clone(), + ); SystemContext::init( public_key, private_key, @@ -89,19 +88,15 @@ pub async fn build_system_handle( } async fn build_quorum_proposal_and_signature( - handle: &SystemContextHandle, + handle: &SystemContextHandle, private_key: &::PrivateKey, view: u64, -) -> ( - QuorumProposal>, - EncodedSignature, -) { +) -> (QuorumProposal>, EncodedSignature) { let consensus_lock = handle.get_consensus(); let consensus = consensus_lock.read().await; - let api: HotShotSequencingConsensusApi = - HotShotSequencingConsensusApi { - inner: handle.hotshot.inner.clone(), - }; + let api: HotShotConsensusApi = HotShotConsensusApi { + inner: handle.hotshot.inner.clone(), + }; let _quorum_exchange = api.inner.exchanges.quorum_exchange().clone(); let parent_view_number = &consensus.high_qc.view_number(); @@ -118,7 +113,7 @@ async fn build_quorum_proposal_and_signature( // every event input is seen on the event stream in the output. let block = ::genesis(); - let leaf = SequencingLeaf { + let leaf = Leaf { view_number: ViewNumber::new(view), height: parent_leaf.height + 1, justify_qc: consensus.high_qc.clone(), @@ -131,7 +126,7 @@ async fn build_quorum_proposal_and_signature( proposer_id: api.public_key().to_bytes(), }; let signature = ::sign(private_key, leaf.commit().as_ref()); - let proposal = QuorumProposal::> { + let proposal = QuorumProposal::> { block_commitment: block.commit(), view_number: ViewNumber::new(view), height: 1, @@ -145,10 +140,10 @@ async fn build_quorum_proposal_and_signature( } pub async fn build_quorum_proposal( - handle: &SystemContextHandle, + handle: &SystemContextHandle, private_key: &::PrivateKey, view: u64, -) -> Proposal>> { +) -> Proposal>> { let (proposal, signature) = build_quorum_proposal_and_signature(handle, private_key, view).await; Proposal { @@ -160,7 +155,7 @@ pub async fn build_quorum_proposal( pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey, BLSPubKey) { let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; - let public_key = ::SignatureKey::from_private(&private_key); + let public_key = ::SignatureKey::from_private(&private_key); (private_key, public_key) } diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index cbc2c149dc..c1d9b81c1e 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -7,7 +7,7 @@ async fn test_success() { use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, }; use std::time::Duration; @@ -24,7 +24,7 @@ async fn test_success() { ..TestMetadata::default() }; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; @@ -41,7 +41,7 @@ async fn test_with_failures_one() { use std::time::Duration; use hotshot_testing::{ - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::TestMetadata, }; @@ -63,7 +63,7 @@ async fn test_with_failures_one() { node_changes: vec![(Duration::new(4, 0), dead_nodes)], }; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; @@ -80,7 +80,7 @@ async fn test_with_failures_half_f() { use std::time::Duration; use hotshot_testing::{ - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::TestMetadata, }; @@ -112,7 +112,7 @@ async fn test_with_failures_half_f() { node_changes: vec![(Duration::new(4, 0), dead_nodes)], }; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; @@ -129,7 +129,7 @@ async fn test_with_failures_f() { use std::time::Duration; use hotshot_testing::{ - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::TestMetadata, }; @@ -173,7 +173,7 @@ async fn test_with_failures_f() { node_changes: vec![(Duration::new(4, 0), dead_nodes)], }; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 01610f38f2..971bc7f8c0 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -9,7 +9,7 @@ async fn test_catchup() { use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, @@ -53,7 +53,7 @@ async fn test_catchup() { }; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; @@ -70,7 +70,7 @@ async fn test_catchup_web() { use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{SequencingTestTypes, SequencingWebImpl}, + node_types::{TestTypes, WebImpl}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, @@ -108,7 +108,7 @@ async fn test_catchup_web() { }; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; @@ -127,7 +127,7 @@ async fn test_catchup_one_node() { use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, @@ -167,7 +167,7 @@ async fn test_catchup_one_node() { metadata.overall_safety_properties.num_failed_views = 5; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; @@ -187,7 +187,7 @@ async fn test_catchup_in_view_sync() { use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, @@ -231,7 +231,7 @@ async fn test_catchup_in_view_sync() { }; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index 30a7f5e277..cfe2ba8462 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -2,7 +2,7 @@ use std::time::Duration; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{SequencingCombinedImpl, SequencingTestTypes}, + node_types::{CombinedImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::{TestMetadata, TimingData}, }; @@ -40,7 +40,7 @@ async fn test_combined_network() { }; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await @@ -59,7 +59,7 @@ async fn test_stress_combined_network() { async_compatibility_layer::logging::setup_backtrace(); let metadata = TestMetadata::default_stress(); metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index dc8439a85d..cc268740d7 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -4,16 +4,16 @@ use either::Right; use hotshot::{ tasks::add_consensus_task, types::{SignatureKey, SystemContextHandle}, - HotShotSequencingConsensusApi, + HotShotConsensusApi, }; use hotshot_task::event_stream::ChannelStream; -use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, task_helpers::{build_quorum_proposal, key_pair_for_id}, }; use hotshot_types::{ - data::{QuorumProposal, SequencingLeaf, ViewNumber}, + data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, traits::{ election::{ConsensusExchange, QuorumExchangeType, SignedCertificate}, @@ -25,16 +25,15 @@ use hotshot_types::{ use std::collections::HashMap; async fn build_vote( - handle: &SystemContextHandle, - proposal: QuorumProposal>, + handle: &SystemContextHandle, + proposal: QuorumProposal>, view: ViewNumber, -) -> GeneralConsensusMessage { +) -> GeneralConsensusMessage { let consensus_lock = handle.get_consensus(); let consensus = consensus_lock.read().await; - let api: HotShotSequencingConsensusApi = - HotShotSequencingConsensusApi { - inner: handle.hotshot.inner.clone(), - }; + let api: HotShotConsensusApi = HotShotConsensusApi { + inner: handle.hotshot.inner.clone(), + }; let quorum_exchange = api.inner.exchanges.quorum_exchange().clone(); let vote_token = quorum_exchange.make_vote_token(view).unwrap().unwrap(); @@ -61,7 +60,7 @@ async fn build_vote( let parent_commitment = parent.commit(); - let leaf: SequencingLeaf<_> = SequencingLeaf { + let leaf: Leaf<_> = Leaf { view_number: view, height: proposal.height, justify_qc: proposal.justify_qc.clone(), @@ -102,30 +101,27 @@ async fn test_consensus_task() { let mut output = HashMap::new(); // Trigger a proposal to send by creating a new QC. Then recieve that proposal and update view based on the valid QC in the proposal - let qc = QuorumCertificate::< - SequencingTestTypes, - Commitment>, - >::genesis(); + let qc = QuorumCertificate::>>::genesis(); let proposal = build_quorum_proposal(&handle, &private_key, 1).await; - input.push(SequencingHotShotEvent::QCFormed(either::Left(qc.clone()))); - input.push(SequencingHotShotEvent::QuorumProposalRecv( + input.push(HotShotEvent::QCFormed(either::Left(qc.clone()))); + input.push(HotShotEvent::QuorumProposalRecv( proposal.clone(), public_key, )); - input.push(SequencingHotShotEvent::Shutdown); + input.push(HotShotEvent::Shutdown); - output.insert(SequencingHotShotEvent::QCFormed(either::Left(qc)), 1); + output.insert(HotShotEvent::QCFormed(either::Left(qc)), 1); output.insert( - SequencingHotShotEvent::QuorumProposalSend(proposal.clone(), public_key), + HotShotEvent::QuorumProposalSend(proposal.clone(), public_key), 1, ); output.insert( - SequencingHotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), + HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), 1, ); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); - output.insert(SequencingHotShotEvent::Shutdown, 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); + output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| { add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) @@ -156,27 +152,27 @@ async fn test_consensus_vote() { let proposal = build_quorum_proposal(&handle, &private_key, 1).await; // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader - input.push(SequencingHotShotEvent::QuorumProposalRecv( + input.push(HotShotEvent::QuorumProposalRecv( proposal.clone(), public_key, )); output.insert( - SequencingHotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), + HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), 1, ); let proposal = proposal.data; if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal, ViewNumber::new(1)).await { - output.insert(SequencingHotShotEvent::QuorumVoteSend(vote.clone()), 1); - input.push(SequencingHotShotEvent::QuorumVoteRecv(vote.clone())); - output.insert(SequencingHotShotEvent::QuorumVoteRecv(vote), 1); + output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); + input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); + output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); } - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); - input.push(SequencingHotShotEvent::Shutdown); - output.insert(SequencingHotShotEvent::Shutdown, 1); + input.push(HotShotEvent::Shutdown); + output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| { add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 0c32e724cc..223765a015 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,8 +1,8 @@ use commit::Committable; -use hotshot::HotShotSequencingConsensusApi; -use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot::HotShotConsensusApi; +use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, task_helpers::vid_init, }; use hotshot_types::{ @@ -33,10 +33,9 @@ async fn test_da_task() { // Build the API for node 2. let handle = build_system_handle(2).await.0; - let api: HotShotSequencingConsensusApi = - HotShotSequencingConsensusApi { - inner: handle.hotshot.inner.clone(), - }; + let api: HotShotConsensusApi = HotShotConsensusApi { + inner: handle.hotshot.inner.clone(), + }; let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); let vid = vid_init(); @@ -66,41 +65,32 @@ async fn test_da_task() { let mut output = HashMap::new(); // In view 1, node 2 is the next leader. - input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); - input.push(SequencingHotShotEvent::BlockReady( - block.clone(), - ViewNumber::new(2), - )); - input.push(SequencingHotShotEvent::DAProposalRecv( - message.clone(), - pub_key, - )); + input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); + input.push(HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2))); + input.push(HotShotEvent::DAProposalRecv(message.clone(), pub_key)); - input.push(SequencingHotShotEvent::Shutdown); + input.push(HotShotEvent::Shutdown); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); - output.insert( - SequencingHotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), - 1, - ); - output.insert(SequencingHotShotEvent::SendDABlockData(block.clone()), 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); output.insert( - SequencingHotShotEvent::DAProposalSend(message.clone(), pub_key), + HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), 1, ); + output.insert(HotShotEvent::SendDABlockData(block.clone()), 1); + output.insert(HotShotEvent::DAProposalSend(message.clone(), pub_key), 1); let vote_token = committee_exchange .make_vote_token(ViewNumber::new(2)) .unwrap() .unwrap(); let da_vote = committee_exchange.create_da_message(block.commit(), ViewNumber::new(2), vote_token); - output.insert(SequencingHotShotEvent::DAVoteSend(da_vote), 1); + output.insert(HotShotEvent::DAVoteSend(da_vote), 1); - output.insert(SequencingHotShotEvent::DAProposalRecv(message, pub_key), 1); + output.insert(HotShotEvent::DAProposalRecv(message, pub_key), 1); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); - output.insert(SequencingHotShotEvent::Shutdown, 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); + output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| { add_da_task(task_runner, event_stream, committee_exchange, handle) diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index 26bdf11200..cac73b3554 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -2,7 +2,7 @@ use std::time::Duration; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{SequencingLibp2pImpl, SequencingTestTypes}, + node_types::{Libp2pImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::TestMetadata, }; @@ -32,7 +32,7 @@ async fn libp2p_network() { }; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await @@ -51,7 +51,7 @@ async fn test_stress_libp2p_network() { async_compatibility_layer::logging::setup_backtrace(); let metadata = TestMetadata::default_stress(); metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 3ad84cf6e6..ec5a4910d2 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use std::sync::Arc; use async_compatibility_layer::logging::setup_logging; -use hotshot::demo::SDemoState; +use hotshot::demo::DemoState; use hotshot::traits::election::static_committee::{ GeneralStaticCommittee, StaticElectionConfig, StaticVoteToken, }; @@ -15,14 +15,14 @@ use hotshot::types::bn254::{BLSPrivKey, BLSPubKey}; use hotshot::types::SignatureKey; use hotshot_types::block_impl::{VIDBlockPayload, VIDTransaction}; use hotshot_types::certificate::ViewSyncCertificate; -use hotshot_types::data::{DAProposal, QuorumProposal, SequencingLeaf}; +use hotshot_types::data::{DAProposal, Leaf, QuorumProposal}; use hotshot_types::message::{Message, SequencingMessage}; use hotshot_types::traits::election::{ CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange, }; use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; -use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType, SequencingExchanges}; +use hotshot_types::traits::node_implementation::{ChannelMaps, Exchanges, NodeType}; use hotshot_types::vote::{DAVote, ViewSyncVote}; use hotshot_types::{ data::ViewNumber, @@ -58,13 +58,13 @@ impl NodeType for Test { type VoteTokenType = StaticVoteToken; type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; - type StateType = SDemoState; + type StateType = DemoState; } #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct TestImpl {} -pub type ThisLeaf = SequencingLeaf; +pub type ThisLeaf = Leaf; pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; pub type DANetwork = MemoryCommChannel; pub type QuorumNetwork = MemoryCommChannel; @@ -82,8 +82,8 @@ pub type ThisViewSyncVote = ViewSyncVote; impl NodeImplementation for TestImpl { type Storage = MemoryStorage; - type Leaf = SequencingLeaf; - type Exchanges = SequencingExchanges< + type Leaf = Leaf; + type Exchanges = Exchanges< Test, Message, QuorumExchange< diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index c4165a8c31..a8b75813d2 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -1,8 +1,8 @@ use commit::Committable; -use hotshot::HotShotSequencingConsensusApi; -use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot::HotShotConsensusApi; +use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, task_helpers::{build_quorum_proposal, vid_init}, }; use hotshot_types::{ @@ -35,10 +35,9 @@ async fn test_network_task() { // Build the API for node 2. let (handle, event_stream) = build_system_handle(2).await; - let api: HotShotSequencingConsensusApi = - HotShotSequencingConsensusApi { - inner: handle.hotshot.inner.clone(), - }; + let api: HotShotConsensusApi = HotShotConsensusApi { + inner: handle.hotshot.inner.clone(), + }; let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); let priv_key = api.private_key(); @@ -75,45 +74,39 @@ async fn test_network_task() { let mut input = Vec::new(); let mut output = HashMap::new(); - input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(SequencingHotShotEvent::BlockReady( - block.clone(), - ViewNumber::new(2), - )); - input.push(SequencingHotShotEvent::DAProposalSend( - da_proposal.clone(), - pub_key, - )); - input.push(SequencingHotShotEvent::VidDisperseSend( + input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2))); + input.push(HotShotEvent::DAProposalSend(da_proposal.clone(), pub_key)); + input.push(HotShotEvent::VidDisperseSend( da_vid_disperse.clone(), pub_key, )); - input.push(SequencingHotShotEvent::QuorumProposalSend( + input.push(HotShotEvent::QuorumProposalSend( quorum_proposal.clone(), pub_key, )); - input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); - input.push(SequencingHotShotEvent::Shutdown); + input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); + input.push(HotShotEvent::Shutdown); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 2); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 2); output.insert( - SequencingHotShotEvent::DAProposalSend(da_proposal.clone(), pub_key), + HotShotEvent::DAProposalSend(da_proposal.clone(), pub_key), 2, // 2 occurrences: 1 from `input`, 1 from the DA task ); output.insert( - SequencingHotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), + HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), 2, ); output.insert( - SequencingHotShotEvent::VidDisperseRecv(da_vid_disperse.clone(), pub_key), + HotShotEvent::VidDisperseRecv(da_vid_disperse.clone(), pub_key), 1, ); output.insert( - SequencingHotShotEvent::VidDisperseSend(da_vid_disperse, pub_key), + HotShotEvent::VidDisperseSend(da_vid_disperse, pub_key), 2, // 2 occurrences: 1 from `input`, 1 from the DA task ); - output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(1)), 1); - output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(2)), 1); + output.insert(HotShotEvent::Timeout(ViewNumber::new(1)), 1); + output.insert(HotShotEvent::Timeout(ViewNumber::new(2)), 1); // Only one output from the input. // The consensus task will fail to send a second proposal, like the DA task does, due to the @@ -121,20 +114,17 @@ async fn test_network_task() { // logging, but that is fine for testing as long as the network task is correctly handling // events. output.insert( - SequencingHotShotEvent::QuorumProposalSend(quorum_proposal.clone(), pub_key), - 1, - ); - output.insert(SequencingHotShotEvent::SendDABlockData(block), 1); - output.insert( - SequencingHotShotEvent::DAProposalRecv(da_proposal, pub_key), + HotShotEvent::QuorumProposalSend(quorum_proposal.clone(), pub_key), 1, ); + output.insert(HotShotEvent::SendDABlockData(block), 1); + output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); output.insert( - SequencingHotShotEvent::QuorumProposalRecv(quorum_proposal, pub_key), + HotShotEvent::QuorumProposalRecv(quorum_proposal, pub_key), 1, ); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 2); - output.insert(SequencingHotShotEvent::Shutdown, 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 2); + output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, _| async { task_runner }; run_harness(input, output, Some(event_stream), build_fn).await; diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index e8e4278195..3b7e2e2bfb 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -9,11 +9,11 @@ async fn test_timeout_web() { use std::time::Duration; - use hotshot_testing::node_types::SequencingWebImpl; + use hotshot_testing::node_types::WebImpl; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::SequencingTestTypes, + node_types::TestTypes, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, @@ -57,7 +57,7 @@ async fn test_timeout_web() { // TODO ED Test with memory network once issue is resolved // https://github.com/EspressoSystems/HotShot/issues/1790 metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; @@ -72,11 +72,11 @@ async fn test_timeout_web() { async fn test_timeout_libp2p() { use std::time::Duration; - use hotshot_testing::node_types::SequencingLibp2pImpl; + use hotshot_testing::node_types::Libp2pImpl; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::SequencingTestTypes, + node_types::TestTypes, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, @@ -120,7 +120,7 @@ async fn test_timeout_libp2p() { // TODO ED Test with memory network once issue is resolved // https://github.com/EspressoSystems/HotShot/issues/1790 metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index cf19740e1d..3a3464f386 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -1,8 +1,8 @@ use commit::Committable; -use hotshot::{tasks::add_vid_task, HotShotSequencingConsensusApi}; -use hotshot_task_impls::events::SequencingHotShotEvent; +use hotshot::{tasks::add_vid_task, HotShotConsensusApi}; +use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ - node_types::{SequencingMemoryImpl, SequencingTestTypes}, + node_types::{MemoryImpl, TestTypes}, task_helpers::vid_init, }; use hotshot_types::traits::election::VIDExchangeType; @@ -31,10 +31,9 @@ async fn test_vid_task() { // Build the API for node 2. let handle = build_system_handle(2).await.0; - let api: HotShotSequencingConsensusApi = - HotShotSequencingConsensusApi { - inner: handle.hotshot.inner.clone(), - }; + let api: HotShotConsensusApi = HotShotConsensusApi { + inner: handle.hotshot.inner.clone(), + }; let vid_exchange = api.inner.exchanges.vid_exchange().clone(); let pub_key = *api.public_key(); @@ -48,7 +47,7 @@ async fn test_vid_task() { }; let signature = vid_exchange.sign_vid_proposal(&block.commit()); - let proposal: DAProposal = DAProposal { + let proposal: DAProposal = DAProposal { deltas: block.clone(), view_number: ViewNumber::new(2), }; @@ -71,22 +70,16 @@ async fn test_vid_task() { let mut output = HashMap::new(); // In view 1, node 2 is the next leader. - input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(SequencingHotShotEvent::ViewChange(ViewNumber::new(2))); - input.push(SequencingHotShotEvent::BlockReady( - block.clone(), - ViewNumber::new(2), - )); + input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); + input.push(HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2))); - input.push(SequencingHotShotEvent::VidDisperseRecv( - vid_proposal.clone(), - pub_key, - )); - input.push(SequencingHotShotEvent::Shutdown); + input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); + input.push(HotShotEvent::Shutdown); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(1)), 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); output.insert( - SequencingHotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), + HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), 1, ); @@ -95,14 +88,11 @@ async fn test_vid_task() { .unwrap() .unwrap(); let vid_vote = vid_exchange.create_vid_message(block.commit(), ViewNumber::new(2), vote_token); - output.insert(SequencingHotShotEvent::VidVoteSend(vid_vote), 1); + output.insert(HotShotEvent::VidVoteSend(vid_vote), 1); - output.insert( - SequencingHotShotEvent::VidDisperseRecv(vid_proposal, pub_key), - 1, - ); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); - output.insert(SequencingHotShotEvent::Shutdown, 1); + output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); + output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| add_vid_task(task_runner, event_stream, vid_exchange, handle); diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 8116dcf2a2..2e131c8703 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,7 +1,7 @@ use commit::Committable; -use hotshot::{types::SignatureKey, HotShotSequencingConsensusApi}; -use hotshot_task_impls::events::SequencingHotShotEvent; -use hotshot_testing::node_types::{SequencingMemoryImpl, SequencingTestTypes}; +use hotshot::{types::SignatureKey, HotShotConsensusApi}; +use hotshot_task_impls::events::HotShotEvent; +use hotshot_testing::node_types::{MemoryImpl, TestTypes}; use hotshot_types::{ data::ViewNumber, traits::{ @@ -35,17 +35,16 @@ async fn test_view_sync_task() { // Build the API for node 3. let handle = build_system_handle(5).await.0; - let api: HotShotSequencingConsensusApi = - HotShotSequencingConsensusApi { - inner: handle.hotshot.inner.clone(), - }; + let api: HotShotConsensusApi = HotShotConsensusApi { + inner: handle.hotshot.inner.clone(), + }; let view_sync_exchange = api.inner.exchanges.view_sync_exchange().clone(); let relay_pub_key = api.public_key().to_bytes(); let vote_token = view_sync_exchange .make_vote_token(ViewNumber::new(5)) .unwrap_or_else(|_| panic!("Error making vote token")) .unwrap_or_else(|| panic!("Not chosen for the committee")); - let vote_data_internal: ViewSyncData = ViewSyncData { + let vote_data_internal: ViewSyncData = ViewSyncData { relay: relay_pub_key.clone(), round: ViewNumber::new(5), }; @@ -64,21 +63,21 @@ async fn test_view_sync_task() { let mut input = Vec::new(); let mut output = HashMap::new(); - input.push(SequencingHotShotEvent::Timeout(ViewNumber::new(2))); - input.push(SequencingHotShotEvent::Timeout(ViewNumber::new(3))); - input.push(SequencingHotShotEvent::Timeout(ViewNumber::new(4))); + input.push(HotShotEvent::Timeout(ViewNumber::new(2))); + input.push(HotShotEvent::Timeout(ViewNumber::new(3))); + input.push(HotShotEvent::Timeout(ViewNumber::new(4))); - input.push(SequencingHotShotEvent::Shutdown); + input.push(HotShotEvent::Shutdown); - output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(2)), 1); - output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(3)), 1); - output.insert(SequencingHotShotEvent::Timeout(ViewNumber::new(4)), 1); + output.insert(HotShotEvent::Timeout(ViewNumber::new(2)), 1); + output.insert(HotShotEvent::Timeout(ViewNumber::new(3)), 1); + output.insert(HotShotEvent::Timeout(ViewNumber::new(4)), 1); - output.insert(SequencingHotShotEvent::ViewSyncVoteSend(vote.clone()), 1); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(2)), 1); - output.insert(SequencingHotShotEvent::ViewChange(ViewNumber::new(3)), 1); + output.insert(HotShotEvent::ViewSyncVoteSend(vote.clone()), 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(3)), 1); - output.insert(SequencingHotShotEvent::Shutdown, 1); + output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| add_view_sync_task(task_runner, event_stream, handle); diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index 9b19532902..233c4adda2 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -3,7 +3,7 @@ use std::time::Duration; use async_compatibility_layer::logging::shutdown_logging; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{SequencingTestTypes, SequencingWebImpl}, + node_types::{TestTypes, WebImpl}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::{TestMetadata, TimingData}, }; @@ -39,7 +39,7 @@ async fn web_server_network() { ..TestMetadata::default() }; metadata - .gen_launcher::() + .gen_launcher::() .launch() .run_test() .await; diff --git a/types/src/data.rs b/types/src/data.rs index 356b7a60fa..e958092cb1 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -563,7 +563,7 @@ pub struct ValidatingLeaf { /// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::BlockPayload` #[derive(Serialize, Deserialize, Clone, Debug, Derivative, Eq)] #[serde(bound(deserialize = ""))] -pub struct SequencingLeaf { +pub struct Leaf { /// CurView from leader when proposing leaf pub view_number: TYPES::Time, @@ -573,7 +573,7 @@ pub struct SequencingLeaf { /// Per spec, justification pub justify_qc: QuorumCertificate>, - /// The hash of the parent `SequencingLeaf` + /// The hash of the parent `Leaf` /// So we can ask if it extends pub parent_commitment: Commitment, @@ -590,7 +590,7 @@ pub struct SequencingLeaf { pub proposer_id: EncodedPublicKey, } -impl PartialEq for SequencingLeaf { +impl PartialEq for Leaf { fn eq(&self, other: &Self) -> bool { let delta_left = match &self.deltas { Either::Left(deltas) => deltas.commit(), @@ -609,7 +609,7 @@ impl PartialEq for SequencingLeaf { } } -impl Hash for SequencingLeaf { +impl Hash for Leaf { fn hash(&self, state: &mut H) { self.view_number.hash(state); self.height.hash(state); @@ -745,7 +745,7 @@ where } } -impl Display for SequencingLeaf { +impl Display for Leaf { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, @@ -755,7 +755,7 @@ impl Display for SequencingLeaf { } } -impl LeafType for SequencingLeaf { +impl LeafType for Leaf { type NodeType = TYPES; type DeltasType = Either>; type MaybeState = (); @@ -839,7 +839,7 @@ impl LeafType for SequencingLeaf { } } -impl TestableLeaf for SequencingLeaf +impl TestableLeaf for Leaf where TYPES::StateType: TestableState, TYPES::BlockType: TestableBlock, @@ -955,7 +955,7 @@ impl Committable for ValidatingLeaf { } } -impl Committable for SequencingLeaf { +impl Committable for Leaf { fn commit(&self) -> commit::Commitment { // Commit the block commitment, rather than the block, so that the replicas can reconstruct // the leaf. diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index 08716a3d73..a2ee27c3b1 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -1,4 +1,4 @@ -//! Contains the [`SequencingConsensusApi`] and [`ValidatingConsensusApi`] traits. +//! Contains the [`ConsensusApi`] trait. use crate::{ certificate::QuorumCertificate, @@ -122,7 +122,7 @@ pub trait ConsensusSharedApi< /// The API that [`HotStuff`] needs to talk to the system, for sequencing consensus. #[async_trait] -pub trait SequencingConsensusApi< +pub trait ConsensusApi< TYPES: NodeType, LEAF: LeafType, I: NodeImplementation>, diff --git a/types/src/traits/consensus_type/sequencing_consensus.rs b/types/src/traits/consensus_type/sequencing_consensus.rs deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/types/src/traits/consensus_type/validating_consensus.rs b/types/src/traits/consensus_type/validating_consensus.rs deleted file mode 100644 index 21b6630301..0000000000 --- a/types/src/traits/consensus_type/validating_consensus.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! The [`ValidatingConsensusType`] trait allows consensus-specific customization points. - -use crate::traits::consensus_type::ConsensusType; - -/// Marker trait for consensus which provides ordering and execution. -pub trait ValidatingConsensusType -where - Self: ConsensusType, -{ -} - -/// Consensus which provides ordering and execution. -#[derive(Clone, Debug)] -pub struct ValidatingConsensus; -impl ConsensusType for ValidatingConsensus {} -impl ValidatingConsensusType for ValidatingConsensus {} diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 90d7912539..42662cf2e9 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -15,7 +15,7 @@ use super::{ State, }; use crate::{ - data::{LeafType, SequencingLeaf, TestableLeaf}, + data::{Leaf, LeafType, TestableLeaf}, message::{ConsensusMessageType, Message, SequencingMessage}, traits::{ election::Membership, network::TestableChannelImplementation, signature_key::SignatureKey, @@ -134,7 +134,7 @@ pub trait NodeImplementation: /// Consensus type selected exchanges. /// - /// Implements either `ValidatingExchangesType` or `SequencingExchangesType`. + /// Implements either `ValidatingExchangesType` or `ExchangesType`. type Exchanges: ExchangesType>; /// Create channels for sending/recv-ing proposals and votes for quorum and committee @@ -228,12 +228,12 @@ pub trait TestableExchange, ME >; } -/// Implementes [`SequencingExchangesType`]. +/// Implementes [`ExchangesType`]. #[derive(Clone, Debug)] -pub struct SequencingExchanges< +pub struct Exchanges< TYPES: NodeType, MESSAGE: NetworkMsg, - QUORUMEXCHANGE: QuorumExchangeType, MESSAGE> + Clone + Debug, + QUORUMEXCHANGE: QuorumExchangeType, MESSAGE> + Clone + Debug, COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, VIDEXCHANGE: VIDExchangeType + Clone + Debug, @@ -255,13 +255,33 @@ pub struct SequencingExchanges< // It is here to avoid needing to instantiate it where all the other exchanges are instantiated // https://github.com/EspressoSystems/HotShot/issues/1799 #[allow(clippy::type_complexity)] - - pub timeout_exchange: TimeoutExchange - as ExchangesType, MESSAGE>>::QuorumExchange - as ConsensusExchange>::Proposal, < - as ExchangesType, MESSAGE>>::QuorumExchange - as ConsensusExchange>::Membership, >::Networking, MESSAGE>, + pub timeout_exchange: TimeoutExchange< + TYPES, + < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange< + TYPES, + MESSAGE, + >>::Proposal, + < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange< + TYPES, + MESSAGE, + >>::Membership, + >::Networking, + MESSAGE, + >, /// Phantom data _phantom: PhantomData<(TYPES, MESSAGE)>, @@ -269,19 +289,12 @@ pub struct SequencingExchanges< #[async_trait] impl - ExchangesType, MESSAGE> - for SequencingExchanges< - TYPES, - MESSAGE, - QUORUMEXCHANGE, - COMMITTEEEXCHANGE, - VIEWSYNCEXCHANGE, - VIDEXCHANGE, - > + ExchangesType, MESSAGE> + for Exchanges where TYPES: NodeType, MESSAGE: NetworkMsg, - QUORUMEXCHANGE: QuorumExchangeType, MESSAGE> + Clone + Debug, + QUORUMEXCHANGE: QuorumExchangeType, MESSAGE> + Clone + Debug, COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, VIDEXCHANGE: VIDExchangeType + Clone + Debug, @@ -291,7 +304,33 @@ where type ViewSyncExchange = VIEWSYNCEXCHANGE; type VIDExchange = VIDEXCHANGE; #[allow(clippy::type_complexity)] - type TimeoutExchange = TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE>; + type TimeoutExchange = TimeoutExchange< + TYPES, + < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange< + TYPES, + MESSAGE, + >>::Proposal, + < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange< + TYPES, + MESSAGE, + >>::Membership, + >::Networking, + MESSAGE, + >; type ElectionConfigs = (TYPES::ElectionConfigType, TYPES::ElectionConfigType); @@ -325,7 +364,7 @@ where sk.clone(), ); #[allow(clippy::type_complexity)] - let timeout_exchange: TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE> = TimeoutExchange::create( + let timeout_exchange: TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE> = TimeoutExchange::create( entries.clone(), configs.0.clone(), networks.0, @@ -396,21 +435,12 @@ pub type QuorumEx = <>::Exchanges as Ex Message, >>::QuorumExchange; -/// Alias for the [`CommitteeExchange`] type for sequencing consensus. -pub type SequencingQuorumEx = - <>::Exchanges as ExchangesType< - TYPES, - >::Leaf, - Message, - >>::QuorumExchange; - /// Alias for `TimeoutExchange` type -pub type SequencingTimeoutEx = - <>::Exchanges as ExchangesType< - TYPES, - >::Leaf, - Message, - >>::TimeoutExchange; +pub type TimeoutEx = <>::Exchanges as ExchangesType< + TYPES, + >::Leaf, + Message, +>>::TimeoutExchange; /// Alias for the [`CommitteeExchange`] type. pub type CommitteeEx = <>::Exchanges as ExchangesType< From c6bdd0dd7ddb05110d0dfec8f62deba91981e770 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 27 Oct 2023 12:17:46 -0700 Subject: [PATCH 0274/1393] Remove header bound, refactor leaf WIP --- task-impls/src/consensus.rs | 47 ++++++++--------------- task-impls/src/transactions.rs | 16 +++----- testing/src/task_helpers.rs | 11 ++---- testing/tests/consensus_task.rs | 3 +- types/src/data.rs | 66 ++++++++++++--------------------- types/src/traits/storage.rs | 21 +++++++---- 6 files changed, 63 insertions(+), 101 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 1d2ca253ab..0d64aa6eec 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -361,16 +361,16 @@ where } impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, A: SequencingConsensusApi, I> + 'static, - H: BlockHeader, > SequencingConsensusTaskState where + TYPES::BlockHeader: BlockHeader, SequencingQuorumEx: ConsensusExchange< TYPES, Message, @@ -461,7 +461,8 @@ where view_number: view, justify_qc: proposal.justify_qc.clone(), parent_commitment, - deltas: Right(proposal.block_header.clone()), + block_header: proposal.block_header.clone(), + transaction_commitments: HashSet::new(), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), @@ -530,7 +531,8 @@ where view_number: view, justify_qc: proposal.justify_qc.clone(), parent_commitment, - deltas: Right(proposal.block_header.clone()), + block_header: proposal.block_header.clone(), + transaction_commitments: HashSet::new(), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), @@ -746,7 +748,8 @@ where view_number: view, justify_qc: justify_qc.clone(), parent_commitment: justify_qc.leaf_commitment(), - deltas: Right(proposal.data.block_header), + block_header: proposal.data.block_header, + transaction_commitments: HashSet::new(), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender.to_bytes(), @@ -770,7 +773,8 @@ where view_number: view, justify_qc: justify_qc.clone(), parent_commitment, - deltas: Right(proposal.data.block_header), + block_header: proposal.data.block_header, + transaction_commitments: HashSet::new(), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender.to_bytes(), @@ -870,15 +874,9 @@ where } leaf_views.push(leaf.clone()); - match &leaf.deltas { - Left((_,block)) => { - let txns = block.contained_transactions(); - for txn in txns { + for txn in leaf.transaction_commitments { included_txns.insert(txn); } - } - Right(_) => {} - } } true }, @@ -1368,16 +1366,7 @@ where } let parent_leaf = leaf.clone(); - let parent_header = match parent_leaf.deltas { - Left((_, ref payload)) => { - if parent_leaf.view_number != TYPES::Time::new(0) { - error!("Non-genesis parent leaf should contain the block header rather than payload."); - return false; - } - TYPES::BlockHeader::genesis(payload.clone()) - } - Right(ref header) => header.clone(), - }; + let parent_header = parent_leaf.block_header; let original_parent_hash = parent_leaf.commit(); @@ -1401,12 +1390,8 @@ where view_number: view, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), - // Use the payload commitment rather than the payload, so that the replica can - // construct the same leaf with the commitment. - deltas: Right(TYPES::BlockHeader::new( - *payload_commitment, - parent_header.clone(), - )), + block_header: TYPES::BlockHeader::new(*payload_commitment, parent_header.clone()), + transaction_commitments: HashSet::new(), rejected: vec![], timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.api.public_key().to_bytes(), @@ -1498,14 +1483,13 @@ pub type ConsensusTaskTypes = HSTWithEvent< /// Event handle for consensus pub async fn sequencing_consensus_handle< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = SequencingLeaf, ConsensusMessage = SequencingMessage, >, A: SequencingConsensusApi, I> + 'static, - H: BlockHeader, >( event: SequencingHotShotEvent, mut state: SequencingConsensusTaskState, @@ -1514,6 +1498,7 @@ pub async fn sequencing_consensus_handle< SequencingConsensusTaskState, ) where + TYPES::BlockHeader: BlockHeader, SequencingQuorumEx: ConsensusExchange< TYPES, Message, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index bc8d7798ed..72171de679 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -143,14 +143,8 @@ where let mut included_txn_size = 0; let mut included_txn_count = 0; for leaf in leaf_chain { - match &leaf.deltas { - Left((_, block)) => { - let txns = block.contained_transactions(); - for txn in txns { - included_txns.insert(txn); - } - } - Right(_) => {} + for txn in leaf.transaction_commitments { + included_txns.insert(txn); } } let consensus = self.consensus.read().await; @@ -329,9 +323,9 @@ where // TODO (Keyao) Investigate the use of transaction hash // // let parent_leaf = self.parent_leaf().await?; - // let previous_used_txns = match parent_leaf.deltas { - // Either::Left(block) => block.contained_transactions(), - // Either::Right(_commitment) => HashSet::new(), + // let previous_used_txns = match parent_leaf.tarnsaction_commitments { + // Some(txns) => txns, + // None => HashSet::new(), // }; let receiver = self.transactions.subscribe().await; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 9a20f6fe1e..f929eb4fee 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -116,13 +116,7 @@ async fn build_quorum_proposal_and_signature( panic!("Failed to find high QC parent."); }; let parent_leaf = leaf.clone(); - let parent_header = match parent_leaf.deltas { - Left((block_number, ref payload)) => VIDBlockHeader { - block_number, - payload_commitment: payload.commit(), - }, - Right(ref header) => header.clone(), - }; + let parent_header = parent_leaf.block_header; // every event input is seen on the event stream in the output. let block = ::genesis(); @@ -132,7 +126,8 @@ async fn build_quorum_proposal_and_signature( view_number: ViewNumber::new(view), justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), - deltas: Right(block_header.clone()), + block_header: block_header.clone(), + transaction_commitments: HashSet::new(), rejected: vec![], timestamp: 0, proposer_id: api.public_key().to_bytes(), diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 726b4ed0e9..38163b6de5 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -65,7 +65,8 @@ async fn build_vote( view_number: view, justify_qc: proposal.justify_qc.clone(), parent_commitment, - deltas: Right(proposal.block_header), + block_header: proposal.block_header, + transaction_commitments: HashSet::new(), rejected: Vec::new(), timestamp: 0, proposer_id: quorum_exchange.get_leader(view).to_bytes(), diff --git a/types/src/data.rs b/types/src/data.rs index 5f1cef024d..9f18e0745c 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -31,6 +31,7 @@ use rand::Rng; use serde::{Deserialize, Serialize}; use snafu::{ensure, Snafu}; use std::{ + collections::HashSet, fmt::{Debug, Display}, hash::Hash, }; @@ -611,10 +612,16 @@ pub struct SequencingLeaf { /// So we can ask if it extends pub parent_commitment: Commitment, - /// Either the block number and payload, or the block header. - pub deltas: Either<(u64, TYPES::BlockPayload), TYPES::BlockHeader>, + /// Block header. + pub block_header: TYPES::BlockHeader, - /// Transactions that were marked for rejection while collecting deltas + /// Set of commitments to the contained transactions. + /// + /// It may be empty for nodes not in the DA committee. + pub transaction_commitments: + HashSet::Transaction>>, + + /// Transactions that were marked for rejection while collecting the block. pub rejected: Vec<::Transaction>, // TODO (Keyao) Remove. @@ -627,19 +634,11 @@ pub struct SequencingLeaf { impl PartialEq for SequencingLeaf { fn eq(&self, other: &Self) -> bool { - let delta_left = match &self.deltas { - Either::Left(deltas) => (deltas.0, deltas.1.commit()), - Either::Right(deltas) => (deltas.block_number(), deltas.payload_commitment()), - }; - let delta_right = match &other.deltas { - Either::Left(deltas) => (deltas.0, deltas.1.commit()), - Either::Right(deltas) => (deltas.block_number(), deltas.payload_commitment()), - }; self.view_number == other.view_number - // && self.height == other.height && self.justify_qc == other.justify_qc && self.parent_commitment == other.parent_commitment - && delta_left == delta_right + && self.block_header == other.block_header + && self.transaction_commitments == other.transaction_commitments && self.rejected == other.rejected } } @@ -647,20 +646,10 @@ impl PartialEq for SequencingLeaf { impl Hash for SequencingLeaf { fn hash(&self, state: &mut H) { self.view_number.hash(state); - // self.height.hash(state); self.justify_qc.hash(state); self.parent_commitment.hash(state); - match &self.deltas { - Either::Left(deltas) => { - deltas.0.hash(state); - deltas.1.commit().hash(state); - } - Either::Right(header) => { - header.block_number().hash(state); - header.payload_commitment().hash(state); - } - } - // self.deltas.hash(state.commit()); + self.block_header.hasher(state); + self.transaction_commitments.hash(state); self.rejected.hash(state); } } @@ -749,7 +738,8 @@ impl LeafType for ValidatingLeaf { height: 0, justify_qc: stored_view.justify_qc, parent_commitment: stored_view.parent, - deltas: stored_view.deltas, + block_header: stored_view.block_header, + transaction_commitments: stored_view.transaction_commitments, state: stored_view.state, rejected: stored_view.rejected, timestamp: stored_view.timestamp, @@ -817,10 +807,7 @@ impl LeafType for SequencingLeaf { } fn get_height(&self) -> u64 { - match &self.deltas { - Left((height, _)) => *height, - Right(header) => header.block_number(), - } + self.block_header.block_number() } fn get_justify_qc(&self) -> QuorumCertificate> { @@ -831,12 +818,12 @@ impl LeafType for SequencingLeaf { self.parent_commitment } - fn get_deltas(&self) -> Self::DeltasType { - self.deltas.clone() + fn get_block_header(&self) -> ::BlockHeader { + self.block_header.clone() } - fn get_deltas_commitment(&self) -> Commitment<::BlockPayload> { - self.deltas.payload_commitment() + fn get_block_commitment(&self) -> Commitment<::BlockPayload> { + self.block_header.payload_commitment() } fn fill_deltas(&mut self, block: LeafBlockPayload) -> Result<(), LeafDeltasError> { @@ -989,20 +976,13 @@ impl Committable for ValidatingLeaf { impl Committable for SequencingLeaf { fn commit(&self) -> commit::Commitment { - // Commit the block payload, rather than the block, so that the replicas can reconstruct - // the leaf. - let (height, payload_commitment) = match &self.deltas { - Either::Left((height, payload)) => (*height, payload.commit()), - Either::Right(header) => (header.block_number(), header.payload_commitment()), - }; - let signatures_bytes = serialize_signature(&self.justify_qc.signatures); + // Skip the transaction commitments, so that the repliacs can reconstruct the leaf. commit::RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) - .u64_field("height", height) .field("parent Leaf commitment", self.parent_commitment) - .field("block payload commitment", payload_commitment) + .field("block header", self.block_header) .constant_str("justify_qc view number") .u64(*self.justify_qc.view_number) .field( diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 996ea53e02..7da4c0b59f 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -10,7 +10,7 @@ use async_trait::async_trait; use commit::Commitment; use derivative::Derivative; use snafu::Snafu; -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashSet}; /// Errors that can occur in the storage layer. #[derive(Clone, Debug, Snafu)] #[snafu(visibility(pub))] @@ -127,16 +127,19 @@ where pub struct StoredView> { /// The view number of this view pub view_number: TYPES::Time, - /// The index of `parent` in the chain - pub height: u64, /// The parent of this view pub parent: Commitment, /// The justify QC of this view. See the hotstuff paper for more information on this. pub justify_qc: QuorumCertificate>, /// The state of this view pub state: LEAF::MaybeState, - /// The deltas of this view - pub deltas: LEAF::DeltasType, + /// Block header. + pub block_header: TYPES::BlockHeader, + /// Set of commitments to the contained transactions. + /// + /// It may be empty for nodes not in the DA committee. + pub transaction_commitments: + HashSet::Transaction>>, /// transactions rejected in this view pub rejected: Vec, /// the timestamp this view was recv-ed in nanonseconds @@ -157,7 +160,10 @@ where /// Note that this will set the `parent` to `LeafHash::default()`, so this will not have a parent. pub fn from_qc_block_and_state( qc: QuorumCertificate>, - deltas: LEAF::DeltasType, + block_header: TYPES::BlockHeader, + transaction_commitments: HashSet< + Commitment<::Transaction>, + >, state: LEAF::MaybeState, height: u64, parent_commitment: Commitment, @@ -165,12 +171,13 @@ where proposer_id: EncodedPublicKey, ) -> Self { Self { - deltas, view_number: qc.view_number(), height, parent: parent_commitment, justify_qc: qc, state, + block_header, + transaction_commitments, rejected, timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id, From cf5fe427c98a34729b4d28813fc8d1824450fe60 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 27 Oct 2023 12:25:42 -0700 Subject: [PATCH 0275/1393] Fmt after merge --- hotshot/src/tasks/mod.rs | 12 ++---------- task-impls/src/consensus.rs | 8 ++------ task-impls/src/da.rs | 4 +--- 3 files changed, 5 insertions(+), 19 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 9bbc8c8488..4c0917f565 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -244,11 +244,7 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, >( task_runner: TaskRunner, event_stream: ChannelStream>, @@ -486,11 +482,7 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_transaction_task< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, >( task_runner: TaskRunner, event_stream: ChannelStream>, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index d21d8ea5c7..b609d6717b 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1297,7 +1297,7 @@ where let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } -HotShotEvent::SendPayloadCommitment(payload_commitment) => { + HotShotEvent::SendPayloadCommitment(payload_commitment) => { self.payload_commitment = Some(payload_commitment); } _ => {} @@ -1473,11 +1473,7 @@ pub type ConsensusTaskTypes = HSTWithEvent< /// Event handle for consensus pub async fn sequencing_consensus_handle< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static, >( event: HotShotEvent, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 46235a321b..5271c4da90 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -438,9 +438,7 @@ where let message = Proposal { data, signature }; self.event_stream - .publish(HotShotEvent::SendPayloadCommitment( - payload_commitment, - )) + .publish(HotShotEvent::SendPayloadCommitment(payload_commitment)) .await; self.event_stream .publish(HotShotEvent::DAProposalSend( From b940bf83bf2bc97eeafe140e7bcdf51bc0b2ab12 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 27 Oct 2023 16:18:07 -0400 Subject: [PATCH 0276/1393] New Cert, Vote, and Acummulator --- hotshot/src/traits/election/vrf.rs | 992 ----------------------------- types/src/lib.rs | 1 + 2 files changed, 1 insertion(+), 992 deletions(-) diff --git a/hotshot/src/traits/election/vrf.rs b/hotshot/src/traits/election/vrf.rs index d1151e0ab2..0a87034fe7 100644 --- a/hotshot/src/traits/election/vrf.rs +++ b/hotshot/src/traits/election/vrf.rs @@ -30,995 +30,3 @@ impl Clone for VRFStakeTable VRFStakeTable { -// /// get total stake -// #[must_use] -// pub fn get_all_stake(&self) -> NonZeroU64 { -// self.total_stake -// } -// } - -// impl VRFStakeTable -// where -// VRF: Vrf, -// VRFPARAMS: Bls12Parameters, -// ::G1Parameters: TEHashToGroup, -// VRF::PublicKey: Clone, -// { -// /// get total stake -// /// # Panics -// /// If converting non-zero stake into `NonZeroU64` fails -// pub fn get_stake(&self, pk: &JfPubKey) -> Option -// where -// SIGSCHEME: SignatureScheme< -// VerificationKey = VRF::PublicKey, -// PublicParameter = (), -// MessageUnit = u8, -// >, -// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// { -// let encoded = pk.to_bytes(); -// let stake = self.mapping.get(&encoded).map(|val| val.get()); -// stake.and_then(NonZeroU64::new) -// } -// } - -// /// the vrf implementation -// #[derive(Derivative)] -// #[derivative(Debug, Eq, PartialEq)] -// pub struct VrfImpl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> -// where -// VRF: Vrf + Sync + Send, -// TYPES: NodeType, -// { -// /// the stake table -// #[derivative(Debug = "ignore", PartialEq = "ignore")] -// stake_table: VRFStakeTable, -// /// the proof params -// #[derivative(Debug = "ignore", PartialEq = "ignore")] -// proof_parameters: VRF::PublicParameter, -// /// the rng -// #[derivative(PartialEq = "ignore")] -// prng: std::sync::Arc>, -// /// the committee parameter -// sortition_parameter: NonZeroU64, -// /// the chain commitment seed -// chain_seed: [u8; 32], -// /// pdf cache -// #[derivative(PartialEq = "ignore")] -// _sortition_cache: std::sync::Arc>>>, - -// /// phantom data -// _pd: PhantomData<(TYPES, LEAF, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS)>, -// } - -// impl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> Clone -// for VrfImpl -// where -// VRF: Vrf + Sync + Send, -// TYPES: NodeType, -// { -// fn clone(&self) -> Self { -// Self { -// stake_table: self.stake_table.clone(), -// proof_parameters: (), -// prng: self.prng.clone(), -// sortition_parameter: self.sortition_parameter, -// chain_seed: self.chain_seed, -// _sortition_cache: Arc::default(), -// _pd: PhantomData, -// } -// } -// } - -// /// TODO doc me -// #[derive(Serialize, Deserialize, Clone)] -// pub struct VRFVoteToken { -// /// The public key assocaited with this token -// pub pub_key: PUBKEY, -// /// The list of signatures -// pub proof: PROOF, -// /// The number of signatures that are valid -// /// TODO (ct) this should be the sorition outbput -// pub count: NonZeroU64, -// } - -// impl Hash for VRFVoteToken -// where -// PUBKEY: serde::Serialize, -// PROOF: serde::Serialize, -// { -// fn hash(&self, state: &mut H) { -// bincode_opts().serialize(&self.pub_key).unwrap().hash(state); -// bincode_opts().serialize(&self.proof).unwrap().hash(state); -// self.count.hash(state); -// } -// } - -// impl Debug for VRFVoteToken { -// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -// f.debug_struct("VRFVoteToken") -// .field("pub_key", &std::any::type_name::()) -// .field("proof", &std::any::type_name::()) -// .field("count", &self.count) -// .finish() -// } -// } - -// impl PartialEq for VRFVoteToken -// where -// PUBKEY: serde::Serialize, -// PROOF: serde::Serialize, -// { -// fn eq(&self, other: &Self) -> bool { -// self.count == other.count -// && bincode_opts().serialize(&self.pub_key).unwrap() -// == bincode_opts().serialize(&other.pub_key).unwrap() -// && bincode_opts().serialize(&self.proof).unwrap() -// == bincode_opts().serialize(&other.proof).unwrap() -// } -// } - -// impl VoteToken for VRFVoteToken -// where -// PUBKEY: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static, -// PROOF: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static, -// { -// fn vote_count(&self) -> NonZeroU64 { -// self.count -// } -// } - -// impl Committable for VRFVoteToken -// where -// PUBKEY: serde::Serialize, -// PROOF: serde::Serialize, -// { -// fn commit(&self) -> Commitment { -// RawCommitmentBuilder::new(std::any::type_name::()) -// .u64(self.count.get()) -// .var_size_bytes(bincode_opts().serialize(&self.pub_key).unwrap().as_slice()) -// .var_size_bytes(bincode_opts().serialize(&self.proof).unwrap().as_slice()) -// .finalize() -// } - -// fn tag() -> String { -// tag::VRF_VOTE_TOKEN.to_string() -// } -// } - -// // KEY is VRFPubKey -// impl> -// Membership for VrfImpl -// where -// SIGSCHEME: SignatureScheme + Sync + Send + 'static, -// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// VRF: Vrf< -// PublicParameter = (), -// Input = Vec, -// Output = Vec, -// PublicKey = SIGSCHEME::VerificationKey, -// SecretKey = SIGSCHEME::SigningKey, -// > + Sync -// + Send -// + 'static, -// VRF::Proof: Clone + Sync + Send + Serialize + for<'a> Deserialize<'a>, -// VRF::PublicParameter: Sync + Send, -// VRFHASHER: digest::Digest + Clone + Sync + Send + 'static, -// VRFPARAMS: Sync + Send + Bls12Parameters, -// ::G1Parameters: TEHashToGroup, -// TYPES: NodeType< -// VoteTokenType = VRFVoteToken, -// ElectionConfigType = VRFStakeTableConfig, -// SignatureKey = JfPubKey, -// >, -// { -// // pubkey -> unit of stake -// type StakeTable = VRFStakeTable; - -// // FIXED STAKE -// // just return the state -// fn get_stake_table( -// &self, -// _view_number: TYPES::Time, -// _state: &TYPES::StateType, -// ) -> Self::StakeTable { -// self.stake_table.clone() -// } - -// fn get_leader(&self, view_number: TYPES::Time) -> JfPubKey { -// // TODO fst2 (ct) this is round robin, we should make this dependent on -// // the VRF + some source of randomness - -// // TODO for now do by stake table of how much stake each -// // participant has -// let mapping = &self.stake_table.mapping; -// let index = ((*view_number) as usize) % mapping.len(); -// let encoded = mapping.keys().nth(index).unwrap(); -// SignatureKey::from_bytes(encoded).unwrap() -// } - -// // what this is doing: -// // - -// fn make_vote_token( -// // TODO see if we can make this take &mut self -// // because we're using a mutable prng -// &self, -// view_number: TYPES::Time, -// private_key: &(SIGSCHEME::SigningKey, SIGSCHEME::VerificationKey), -// ) -> Result, ElectionError> { -// let pub_key = JfPubKey::::from_native(private_key.1.clone()); -// let Some(replicas_stake) = self.stake_table.get_stake(&pub_key) else { return Ok(None) }; - -// let view_seed = generate_view_seed::(view_number, &self.chain_seed); - -// let proof = Self::internal_get_vrf_proof( -// &private_key.0, -// &self.proof_parameters, -// &mut self.prng.lock().unwrap(), -// &view_seed, -// )?; - -// let selected_stake = Self::internal_get_sortition_for_proof( -// &self.proof_parameters, -// &proof, -// self.stake_table.get_all_stake(), -// replicas_stake, -// self.sortition_parameter, -// ); - -// match selected_stake { -// Some(count) => { -// // TODO (ct) this can fail, return Result::Err -// let proof = VRF::prove( -// &self.proof_parameters, -// &private_key.0, -// &view_seed, -// &mut *self.prng.lock().unwrap(), -// ) -// .unwrap(); - -// Ok(Some(VRFVoteToken { -// pub_key: private_key.1.clone(), -// proof, -// count, -// })) -// } -// None => Ok(None), -// } -// } - -// fn validate_vote_token( -// &self, -// view_number: TYPES::Time, -// pub_key: JfPubKey, -// token: Checked, -// ) -> Result, ElectionError> { -// match token { -// Checked::Unchecked(token) => { -// let stake: Option = self.stake_table.get_stake(&pub_key); -// let view_seed = -// generate_view_seed::(view_number, &self.chain_seed); -// if let Some(stake) = stake { -// Self::internal_check_sortition( -// &pub_key.pk, -// &self.proof_parameters, -// &token.proof, -// self.stake_table.get_all_stake(), -// stake, -// self.sortition_parameter, -// token.count, -// &view_seed, -// ) -// .map(|c| match c { -// Checked::Inval(_) => Checked::Inval(token), -// Checked::Valid(_) => Checked::Valid(token), -// Checked::Unchecked(_) => Checked::Unchecked(token), -// }) -// } else { -// // TODO better error -// Err(ElectionError::StubError) -// } -// } -// already_checked => Ok(already_checked), -// } -// } - -// fn create_election(keys: Vec>, config: TYPES::ElectionConfigType) -> Self { -// // This all needs to be refactored. For one thing, having the stake table - even an initial -// // stake table - hardcoded like this is flat-out broken. This is, obviously, an artifact -// let genesis_seed = [0u8; 32]; -// VrfImpl::with_initial_stake(keys, &config, genesis_seed) -// } - -// fn default_election_config(num_nodes: u64) -> TYPES::ElectionConfigType { -// let mut stake = Vec::new(); -// let units_of_stake_per_node = NonZeroU64::new(100).unwrap(); -// for _ in 0..num_nodes { -// stake.push(units_of_stake_per_node); -// } -// VRFStakeTableConfig { -// sortition_parameter: NonZeroU64::new(SORTITION_PARAMETER).unwrap(), -// distribution: stake, -// } -// } - -// fn success_threshold(&self) -> NonZeroU64 { -// NonZeroU64::new(((u64::from(self.sortition_parameter) * 2) / 3) + 1).unwrap() -// } - -// fn failure_threshold(&self) -> NonZeroU64 { -// NonZeroU64::new(((u64::from(self.sortition_parameter)) / 3) + 1).unwrap() -// } -// /// TODO if we ever come back to using this, we'll need to change this -// /// this stub is incorrect as it stands right now -// fn get_committee( -// &self, -// _view_number: ::Time, -// ) -> std::collections::BTreeSet<::SignatureKey> { -// self.stake_table -// .mapping -// .keys() -// .clone() -// .filter_map(::SignatureKey::from_bytes) -// .collect() -// } -// } - -// /// checks that the expected aomunt of stake matches the VRF output -// /// TODO this can be optimized most likely -// fn check_bin_idx( -// expected_amount_of_stake: u64, -// replicas_stake: u64, -// total_stake: u64, -// sortition_parameter: u64, -// unnormalized_seed: &[u8; 32], -// cache: &mut HashMap>, -// ) -> Option { -// let bin_idx = find_bin_idx( -// replicas_stake, -// total_stake, -// sortition_parameter, -// unnormalized_seed, -// cache, -// ); -// bin_idx.map(|idx| idx == NonZeroU64::new(expected_amount_of_stake).unwrap()) -// } - -// /// generates the seed from algorand paper -// /// baseed on `view_number` and a constant as of now, but in the future will be other things -// /// this is a stop-gap -// fn generate_view_seed( -// view_number: TYPES::Time, -// vrf_seed: &[u8; 32], -// ) -> [u8; 32] { -// let mut hasher = HASHER::new(); -// hasher.update(vrf_seed); -// hasher.update(view_number.deref().to_le_bytes()); -// let mut output = [0u8; 32]; -// output.copy_from_slice(hasher.finalize().as_ref()); -// output -// } - -// /// represents a binomial query made by sortition -// /// `B(stake_attempt; replicas_stake; sortition_parameter / total_stake)` -// #[derive(Hash, Eq, PartialEq, Clone, Debug)] -// pub struct BinomialQuery { -// /// the number of heads -// stake_attempt: u64, -// /// the total number of coin flips -// replicas_stake: u64, -// /// the total amount of stake -// total_stake: u64, -// /// the sortition parameter -// sortition_parameter: u64, -// } - -// impl BinomialQuery { -// /// get the committee parameter -// /// for this query -// #[must_use] -// pub fn get_p(&self) -> Ratio { -// let sortition_parameter_big: BigUint = BigUint::from(self.sortition_parameter); -// let total_stake_big: BigUint = BigUint::from(self.total_stake); -// Ratio::new(sortition_parameter_big, total_stake_big) -// } -// } - -// #[instrument] -// fn calculate_threshold_from_cache( -// previous_calculation: Option<(BinomialQuery, Ratio)>, -// query: BinomialQuery, -// ) -> Option> { -// if let Some((previous_query, previous_result)) = previous_calculation { -// let expected_previous_query = BinomialQuery { -// stake_attempt: query.stake_attempt - 1, -// ..query -// }; -// if previous_query == expected_previous_query { -// let permutation = Ratio::new( -// BigUint::from(query.replicas_stake - query.stake_attempt + 1), -// BigUint::from(query.stake_attempt), -// ); -// let p = query.get_p(); -// assert!(p.numer() < p.denom()); -// let reciprocal = Ratio::recip(&(Ratio::from_integer(BigUint::from(1_u32)) - p.clone())); -// let result = previous_result * p * reciprocal * permutation; -// assert!(result.numer() < result.denom()); - -// return Some(result); -// } -// } -// calculate_threshold(query) -// } - -// // Calculates B(j; w; p) where B means bernoulli distribution. -// // That is: run w trials, with p probability of success for each trial, and return the probability -// // of j successes. -// // p = tau / W, where tau is the sortition parameter (controlling committee size) -// // this is the only usage of W and tau -// // -// // Translation: -// // stake_attempt: our guess at what the stake might be. This is j -// // replicas_stake: the units of stake owned by the replica. This is w -// // total_stake: the units of stake owned in total. This is W -// // sorition_parameter: the parameter controlling the committee size. This is tau -// // -// // TODO (ct) better error handling -// // returns none if one of our calculations fails -// // -// // TODO keep data around from last iteration so less calculation is needed -// // TODO test this "correct/simple" implementation against any optimized version -// #[instrument] -// // fn calculate_threshold(stake_attempt: u32, replicas_stake: u64, total_stake: u64, sortition_parameter: u64) -> Option> { -// fn calculate_threshold(query: BinomialQuery) -> Option> { -// let stake_attempt = query.stake_attempt; -// tracing::info!("Running calculate threshold"); -// // TODO (ct) better error handling -// if stake_attempt > query.replicas_stake { -// error!("j is larger than amount of stake we are allowed"); -// return None; -// } - -// let sortition_parameter_big: BigUint = BigUint::from(query.sortition_parameter); -// let total_stake_big: BigUint = BigUint::from(query.total_stake); -// let one_big = BigUint::from(1_u32); - -// // this is the p parameter for the bernoulli distribution -// let p = Ratio::new(sortition_parameter_big, total_stake_big); - -// assert!(p.numer() <= p.denom()); - -// info!("p is {p:?}"); - -// // number of tails in bernoulli -// let failed_num = query.replicas_stake - stake_attempt; - -// // TODO cancel things out (avoid calculating factorial) -// // TODO can just do division -// let num_permutations = Ratio::new( -// factorial(query.replicas_stake), -// factorial(stake_attempt) * factorial(failed_num), -// ); - -// info!("num permutations is {num_permutations:?}, failed_num is {failed_num:?}"); - -// let one = Ratio::from_integer(one_big); - -// // TODO can keep results from last try -// let result = num_permutations -// * (p.pow(i32::try_from(stake_attempt).ok()?) -// * (one - p).pow(i32::try_from(failed_num).ok()?)); - -// assert!(result.numer() < result.denom()); - -// info!("result is is {result:?}"); - -// Some(result) -// } - -// /// compute i! as a biguint -// fn factorial(mut i: u64) -> BigUint { -// if i == 0 { -// return BigUint::from(1u32); -// } - -// let mut result = BigUint::from(1u32); -// while i > 0 { -// result *= i; -// i -= 1; -// } -// result -// } - -// /// find the amount of stake we rolled. -// /// NOTE: in the future this requires a view numb -// /// Returns None if zero stake was rolled -// #[instrument] -// fn find_bin_idx( -// replicas_stake: u64, -// total_stake: u64, -// sortition_parameter: u64, -// unnormalized_seed: &[u8; 32], -// cache: &mut HashMap>, -// ) -> Option { -// let unnormalized_seed = BigUint::from_bytes_le(unnormalized_seed); -// let normalized_seed = Ratio::new(unnormalized_seed, BigUint::from(2_u32).pow(256)); -// assert!(normalized_seed.numer() < normalized_seed.denom()); -// let mut j: u64 = 0; - -// // [j, j+1) -// // [cdf(j),cdf(j+1)) - -// // left_threshold corresponds to the sum of all bernoulli distributions -// // from i in 0 to j: B(i; replicas_stake; p). Where p is calculated later and corresponds to -// // algorands paper -// let mut left_threshold = Ratio::from_integer(BigUint::from(0u32)); - -// loop { -// // check cache - -// // if cache miss, feed in with previous val from cache -// // that *probably* exists - -// assert!(left_threshold.numer() < left_threshold.denom()); -// let query = BinomialQuery { -// stake_attempt: j + 1, -// replicas_stake, -// total_stake, -// sortition_parameter, -// }; - -// let bin_val = { -// // we already computed this value -// if let Some(result) = cache.get(&query) { -// result.clone() -// } else { -// // we haven't computed this value, but maybe -// // we already computed the previous value - -// let mut maybe_old_query = query.clone(); -// maybe_old_query.stake_attempt -= 1; -// let old_result = cache -// .get(&maybe_old_query) -// .map(|x| (maybe_old_query, x.clone())); -// let result = calculate_threshold_from_cache(old_result, query.clone())?; -// cache.insert(query, result.clone()); -// result -// } -// }; - -// // corresponds to right range from apper -// let right_threshold = left_threshold + bin_val.clone(); - -// // debugging info. Unnecessary -// { -// let right_threshold_float = ToPrimitive::to_f64(&right_threshold.clone()); -// let bin_val_float = ToPrimitive::to_f64(&bin_val.clone()); -// let normalized_seed_float = ToPrimitive::to_f64(&normalized_seed.clone()); -// info!("rightthreshold: {right_threshold_float:?}, bin: {bin_val_float:?}, seed: {normalized_seed_float:?}"); -// } - -// // from i in 0 to j + 1: B(i; replicas_stake; p) -// if normalized_seed < right_threshold { -// match j { -// 0 => return None, -// _ => return Some(NonZeroU64::new(j).unwrap()), -// } -// } -// left_threshold = right_threshold; -// j += 1; -// } -// } - -// impl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> -// VrfImpl -// where -// SIGSCHEME: SignatureScheme + Sync + Send, -// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// VRF: Vrf< -// PublicParameter = (), -// Input = [u8; 32], -// Output = [u8; 32], -// PublicKey = SIGSCHEME::VerificationKey, -// SecretKey = SIGSCHEME::SigningKey, -// > + Sync -// + Send, -// VRF::Proof: Clone + Sync + Send + Serialize + for<'a> Deserialize<'a>, -// VRF::PublicParameter: Sync + Send, -// VRFHASHER: digest::Digest + Clone + Sync + Send, -// VRFPARAMS: Sync + Send + Bls12Parameters, -// ::G1Parameters: TEHashToGroup, -// TYPES: NodeType, -// { -// /// create stake table with this initial stake -// /// # Panics -// /// TODO -// #[must_use] -// pub fn with_initial_stake( -// known_nodes: Vec>, -// config: &VRFStakeTableConfig, -// genesis_seed: [u8; 32], -// ) -> Self { -// assert_eq!(known_nodes.iter().len(), config.distribution.len()); -// let key_with_stake = known_nodes -// .into_iter() -// .map(|x| x.to_bytes()) -// .zip(config.distribution.clone()) -// .collect(); -// VrfImpl { -// stake_table: { -// let st = VRFStakeTable { -// mapping: key_with_stake, -// total_stake: NonZeroU64::new(config.distribution.iter().map(|x| x.get()).sum()) -// .unwrap(), -// _pd: PhantomData, -// }; -// st -// }, -// proof_parameters: (), -// chain_seed: genesis_seed, -// prng: Arc::new(Mutex::new(ChaChaRng::from_seed(Default::default()))), -// _pd: PhantomData, -// sortition_parameter: config.sortition_parameter, -// _sortition_cache: Arc::default(), -// } -// } - -// /// stateless delegate for VRF proof generation -// /// # Errors -// /// - -// fn internal_get_vrf_proof( -// private_key: &SIGSCHEME::SigningKey, -// proof_param: &VRF::PublicParameter, -// to_refactor: &mut rand_chacha::ChaChaRng, -// vrf_in_seed: &VRF::Input, -// ) -> Result { -// VRF::prove(proof_param, private_key, vrf_in_seed, to_refactor) -// .map_err(|_| ElectionError::StubError) -// } - -// /// stateless delegate for VRF sortition generation -// fn internal_get_sortition_for_proof( -// proof_param: &VRF::PublicParameter, -// proof: &VRF::Proof, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// ) -> Option { -// // TODO (ct) this can fail, return result::err -// let hash = VRF::evaluate(proof_param, proof).unwrap(); -// let mut cache: HashMap> = HashMap::new(); - -// find_bin_idx( -// u64::from(voter_stake), -// u64::from(total_stake), -// sortition_parameter.into(), -// &hash, -// &mut cache, -// ) -// } - -// /// stateless delegate for VRF sortition confirmation -// /// # Errors -// /// if the proof is malformed -// #[allow(clippy::too_many_arguments)] -// fn internal_check_sortition( -// public_key: &SIGSCHEME::VerificationKey, -// proof_param: &VRF::PublicParameter, -// proof: &VRF::Proof, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// sortition_claim: NonZeroU64, -// vrf_in_seed: &VRF::Input, -// ) -> Result, hotshot_types::traits::election::ElectionError> { -// if let Ok(true) = VRF::verify(proof_param, proof, public_key, vrf_in_seed) { -// let seed = VRF::evaluate(proof_param, proof).map_err(|_| ElectionError::StubError)?; -// if let Some(res) = check_bin_idx( -// u64::from(sortition_claim), -// u64::from(voter_stake), -// u64::from(total_stake), -// u64::from(sortition_parameter), -// &seed, -// &mut HashMap::new(), -// ) { -// if res { -// Ok(Checked::Valid(())) -// } else { -// Ok(Checked::Inval(())) -// } -// } else { -// Ok(Checked::Unchecked(())) -// } -// } else { -// Ok(Checked::Inval(())) -// } -// } - -// /// Stateless method to produce VRF proof and sortition for a given view number -// /// # Errors -// /// -// pub fn get_sortition_proof( -// private_key: &SIGSCHEME::SigningKey, -// proof_param: &VRF::PublicParameter, -// chain_seed: &VRF::Input, -// view_number: TYPES::Time, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// ) -> Result<(VRF::Proof, Option), hotshot_types::traits::election::ElectionError> -// { -// let mut rng = ChaChaRng::from_seed(Default::default()); // maybe use something else that isn't deterministic? -// let view_seed = generate_view_seed::(view_number, chain_seed); -// let proof = Self::internal_get_vrf_proof(private_key, proof_param, &mut rng, &view_seed)?; -// let sortition = Self::internal_get_sortition_for_proof( -// proof_param, -// &proof, -// total_stake, -// voter_stake, -// sortition_parameter, -// ); -// Ok((proof, sortition)) -// } - -// /// Stateless method to verify VRF proof and sortition for a given view number -// /// # Errors -// /// -// #[allow(clippy::too_many_arguments)] -// pub fn check_sortition_proof( -// public_key: &JfPubKey, -// proof_param: &VRF::PublicParameter, -// proof: &VRF::Proof, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// sortition_claim: NonZeroU64, -// chain_seed: &VRF::Input, -// view_number: TYPES::Time, -// ) -> Result { -// let view_seed = generate_view_seed::(view_number, chain_seed); -// Self::internal_check_sortition( -// &public_key.pk, -// proof_param, -// proof, -// total_stake, -// voter_stake, -// sortition_parameter, -// sortition_claim, -// &view_seed, -// ) -// .map(|c| matches!(c, Checked::Valid(_))) -// } -// } - -// impl> TestableElection -// for VrfImpl -// where -// TYPES: NodeType< -// VoteTokenType = VRFVoteToken< -// BLSVerKey, -// BLSSignature, -// >, -// ElectionConfigType = VRFStakeTableConfig, -// SignatureKey = JfPubKey, -// >, -// { -// fn generate_test_vote_token() -> TYPES::VoteTokenType { -// VRFVoteToken { -// count: NonZeroU64::new(1234).unwrap(), -// proof: BLSSignature::default(), -// pub_key: BLSVerKey::default(), -// } -// } -// } - -// /// configuration specifying the stake table -// #[derive(Clone, Serialize, Deserialize, core::fmt::Debug)] -// pub struct VRFStakeTableConfig { -// /// the committee size parameter -// pub sortition_parameter: NonZeroU64, -// /// the ordered distribution of stake across nodes -// pub distribution: Vec, -// } - -// impl Default for VRFStakeTableConfig { -// fn default() -> Self { -// VRFStakeTableConfig { -// sortition_parameter: NonZeroU64::new(SORTITION_PARAMETER).unwrap(), -// distribution: Vec::new(), -// } -// } -// } - -// impl ElectionConfig for VRFStakeTableConfig {} - -// Tests have been commented out, so `mod tests` isn't used. -// #[cfg(test)] -// mod tests { -// use super::*; -// use ark_bls12_381::Parameters as Param381; -// use ark_std::test_rng; - -// use blake3::Hasher; -// use hotshot_types::{ -// data::ViewNumber, -// traits::{ -// block_contents::dummy::{DummyBlock, DummyTransaction}, -// consensus_type::validating_consensus::ValidatingConsensus, -// state::dummy::DummyState, -// }, -// }; -// use jf_primitives::{ -// signatures::{ -// bls::{BLSSignature, BLSVerKey}, -// BLSSignatureScheme, -// }, -// vrf::blsvrf::BLSVRFScheme, -// }; -// use std::{num::NonZeroUsize, time::Duration}; - -// #[derive( -// Copy, -// Clone, -// Debug, -// Default, -// Hash, -// PartialEq, -// Eq, -// PartialOrd, -// Ord, -// serde::Serialize, -// serde::Deserialize, -// )] -// struct TestTypes; -// impl NodeType for TestTypes { -// // TODO (da) can this be SequencingConsensus? -// type ConsensusType = ValidatingConsensus; -// type Time = ViewNumber; -// type BlockType = DummyBlock; -// type SignatureKey = JfPubKey; -// type VoteTokenType = VRFVoteToken< -// BLSVerKey, -// BLSSignature, -// >; -// type Transaction = DummyTransaction; -// type ElectionConfigType = VRFStakeTableConfig; -// type StateType = DummyState; -// } - -// fn gen_vrf_impl>( -// num_nodes: usize, -// ) -> ( -// VrfImpl< -// TestTypes, -// LEAF, -// BLSSignatureScheme, -// BLSVRFScheme, -// Hasher, -// Param381, -// >, -// Vec<( -// jf_primitives::signatures::bls::BLSSignKey, -// jf_primitives::signatures::bls::BLSVerKey, -// )>, -// ) { -// let mut known_nodes = Vec::new(); -// let mut keys = Vec::new(); -// let rng = &mut test_rng(); -// let mut stake_distribution = Vec::new(); -// let stake_per_node = NonZeroU64::new(100).unwrap(); -// let genesis_seed = [0u8; 32]; -// for _i in 0..num_nodes { -// let (sk, pk) = BLSSignatureScheme::::key_gen(&(), rng).unwrap(); -// keys.push((sk.clone(), pk.clone())); -// known_nodes.push(JfPubKey::from_native(pk.clone())); -// stake_distribution.push(stake_per_node); -// } -// let stake_table = VrfImpl::with_initial_stake( -// known_nodes, -// &VRFStakeTableConfig { -// sortition_parameter: std::num::NonZeroU64::new(SORTITION_PARAMETER).unwrap(), -// distribution: stake_distribution, -// }, -// genesis_seed, -// ); -// (stake_table, keys) -// } - -// pub fn check_if_valid(token: &Checked) -> bool { -// match token { -// Checked::Valid(_) => true, -// Checked::Inval(_) | Checked::Unchecked(_) => false, -// } -// } - -// // #[test] -// // pub fn test_sortition() { -// // setup_logging(); -// // let (vrf_impl, keys) = gen_vrf_impl::>(10); -// // let views = 100; - -// // for view in 0..views { -// // for (node_idx, (sk, pk)) in keys.iter().enumerate() { -// // let token_result = vrf_impl -// // .make_vote_token(ViewNumber::new(view), &(sk.clone(), pk.clone())) -// // .unwrap(); -// // match token_result { -// // Some(token) => { -// // let count = token.count; -// // let result = vrf_impl -// // .validate_vote_token( -// // ViewNumber::new(view), -// // JfPubKey::from_native(pk.clone()), -// // Checked::Unchecked(token), -// // ) -// // .unwrap(); -// // let result_is_valid = check_if_valid(&result); -// // error!("view {view:?}, node_idx {node_idx:?}, stake {count:?} "); -// // assert!(result_is_valid); -// // } -// // _ => continue, -// // } -// // } -// // } -// // } - -// #[test] -// pub fn test_factorial() { -// assert_eq!(factorial(0), BigUint::from(1u32)); -// assert_eq!(factorial(1), BigUint::from(1u32)); -// assert_eq!(factorial(2), BigUint::from(2u32)); -// assert_eq!(factorial(3), BigUint::from(6u32)); -// assert_eq!(factorial(4), BigUint::from(24u32)); -// assert_eq!(factorial(5), BigUint::from(120u32)); -// } - -// // TODO add failure case - -// #[test] -// fn network_config_is_serializable() { -// // validate that `RunResults` can be serialized -// // Note that there is currently an issue with `VRFPubKey` where it can't be serialized with toml -// // so instead we only test with serde_json -// let key = -// as TestableSignatureKey>::generate_test_key(1); -// let pub_key = JfPubKey::::from_private(&key); -// let mut config = hotshot_centralized_server::NetworkConfig { -// config: hotshot_types::HotShotConfig { -// election_config: Some(super::VRFStakeTableConfig { -// distribution: vec![NonZeroU64::new(1).unwrap()], -// sortition_parameter: NonZeroU64::new(1).unwrap(), -// }), -// known_nodes: vec![pub_key], -// execution_type: hotshot_types::ExecutionType::Incremental, -// total_nodes: NonZeroUsize::new(1).unwrap(), -// min_transactions: 1, -// max_transactions: NonZeroUsize::new(1).unwrap(), -// next_view_timeout: 1, -// timeout_ratio: (1, 1), -// round_start_delay: 1, -// start_delay: 1, -// num_bootstrap: 1, -// propose_min_round_time: Duration::from_secs(1), -// propose_max_round_time: Duration::from_secs(1), -// }, -// ..Default::default() -// }; -// serde_json::to_string(&config).unwrap(); -// assert!(toml::to_string(&config).is_err()); - -// // validate that this is indeed a `pub_key` issue -// config.config.known_nodes.clear(); -// serde_json::to_string(&config).unwrap(); -// toml::to_string(&config).unwrap(); -// } -// } diff --git a/types/src/lib.rs b/types/src/lib.rs index 54739106c3..ff296cf28d 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -21,6 +21,7 @@ pub mod message; pub mod traits; pub mod utils; pub mod vote; +pub mod vote2; /// the type of consensus to run. Either: /// wait for a signal to start a view, /// or constantly run From 134d595a83fa0faf9094ecc4ba927c6fe46d0677 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 27 Oct 2023 16:18:39 -0400 Subject: [PATCH 0277/1393] Add files --- types/src/vote2.rs | 156 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 156 insertions(+) create mode 100644 types/src/vote2.rs diff --git a/types/src/vote2.rs b/types/src/vote2.rs new file mode 100644 index 0000000000..a408284043 --- /dev/null +++ b/types/src/vote2.rs @@ -0,0 +1,156 @@ +use std::{ + collections::{BTreeMap, HashMap}, + marker::PhantomData, + num::NonZeroU64, +}; + +use bincode::Options; +use bitvec::vec::BitVec; +use commit::CommitmentBounds; +use either::Either; +use ethereum_types::U256; +use hotshot_utils::bincode::bincode_opts; +use tracing::error; + +use crate::{ + traits::{ + election::Membership, + node_implementation::NodeType, + signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, + }, +}; + +pub trait Vote2 { + type Membership: Membership; + type Commitment: CommitmentBounds; + + fn get_signature(&self) -> EncodedSignature; + fn get_data_commitment(&self) -> Self::Commitment; + fn get_signing_key(&self) -> TYPES::SignatureKey; + // fn create_signed_vote(Self::Commitment, Self::Membership) ?? +} + +pub trait ViewNumber { + fn get_view_number(&self) -> TYPES::Time; +} + +pub trait Certificate2 { + type Membership: Membership; + type Commitment: CommitmentBounds; + + fn create_signed_certificate( + data_commitment: Self::Commitment, + sig: ::QCType, + ) -> Self; + fn is_valid_cert(&self) -> bool; + fn threshold() -> u64; + fn get_data_commitment(&self) -> Self::Commitment; +} + +pub struct VoteAccumulator2< + TYPES: NodeType, + VOTE: Vote2, + CERT: Certificate2, +> { + /// Map of all signatures accumlated so far + pub vote_outcomes: VoteMap2, + /// A quorum's worth of stake, generally 2f + 1 + pub success_threshold: NonZeroU64, + /// A list of valid signatures for certificate aggregation + pub sig_lists: Vec<::PureAssembledSignatureType>, + /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check + pub signers: BitVec, + /// Phantom data to specify the vote this accumulator is for + pub phantom: PhantomData<(TYPES, VOTE, CERT)>, +} + +impl< + TYPES: NodeType, + VOTE: Vote2, + CERT: Certificate2, + > VoteAccumulator2 +{ + fn accumulate(mut self, vote: &VOTE, membership: &VOTE::Membership) -> Either { + let key = vote.get_signing_key(); + + let vote_commitment = vote.get_data_commitment(); + if !key.validate( + &vote.get_signature(), + &bincode_opts().serialize(&vote_commitment).unwrap(), + ) { + error!("Vote data is {:?}", vote.get_data_commitment()); + error!("Invalid vote! Data"); + return Either::Left(self); + } + + // TODO: Lookup the actual stake + let stake_table_entry: <::SignatureKey as SignatureKey>::StakeTableEntry = key.get_stake_table_entry(1u64); + let stake_table = membership.get_committee_qc_stake_table(); + let vote_node_id = stake_table + .iter() + .position(|x| *x == stake_table_entry.clone()) + .unwrap(); + + let encoded_key = key.to_bytes(); + + // Deserialize the signature so that it can be assembeld into a QC + // TODO ED Update this once we've gotten rid of EncodedSignature + let original_signature: ::PureAssembledSignatureType = + bincode_opts() + .deserialize(&vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); + + let (total_stake_casted, total_vote_map) = self + .vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (0, BTreeMap::new())); + + // Check for duplicate vote + // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey + // Have to do this because SignatureKey is not hashable + if total_vote_map.contains_key(&encoded_key) { + return Either::Left(self); + } + + if self.signers.get(vote_node_id).as_deref() == Some(&true) { + error!("Node id is already in signers list"); + return Either::Left(self); + } + self.signers.set(vote_node_id, true); + self.sig_lists.push(original_signature); + + // TODO: Get the stake from the stake table entry. + *total_stake_casted += 1; + total_vote_map.insert( + encoded_key.clone(), + (vote.get_signature(), vote.get_data_commitment()), + ); + + if *total_stake_casted >= u64::from(CERT::threshold()) { + // Assemble QC + let real_qc_pp = ::get_public_parameter( + stake_table.clone(), + U256::from(self.success_threshold.get()), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + self.signers.as_bitslice(), + &self.sig_lists[..], + ); + + let cert = CERT::create_signed_certificate(vote.get_data_commitment(), real_qc_sig); + return Either::Right(cert); + } + Either::Left(self) + } +} + +/// Mapping of commitments to vote tokens by key. +type VoteMap2 = HashMap< + COMMITMENT, + ( + u64, + BTreeMap, + ), +>; From a8059f30c28d89d3f4e9b7d0c68689b1aed8dde3 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 27 Oct 2023 16:50:49 -0400 Subject: [PATCH 0278/1393] Documatation and linting --- types/src/traits/election.rs | 2 +- types/src/vote2.rs | 45 ++++++++++++++++++++++++++---------- 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index fa191d32b5..cff1f06dca 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -204,7 +204,7 @@ where fn genesis() -> Self; } -/// A protocol for determining membership in and participating in a ccommittee. +/// A protocol for determining membership in and participating in a committee. pub trait Membership: Clone + Debug + Eq + PartialEq + Send + Sync + 'static { diff --git a/types/src/vote2.rs b/types/src/vote2.rs index a408284043..3e452c102d 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -1,7 +1,8 @@ +//! Vote, Accumulator, and Certificate Types + use std::{ collections::{BTreeMap, HashMap}, marker::PhantomData, - num::NonZeroU64, }; use bincode::Options; @@ -12,41 +13,60 @@ use ethereum_types::U256; use hotshot_utils::bincode::bincode_opts; use tracing::error; -use crate::{ - traits::{ - election::Membership, - node_implementation::NodeType, - signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, - }, +use crate::traits::{ + election::Membership, + node_implementation::NodeType, + signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, }; +/// A simple vote that has a signer and commitment to the data voted on. pub trait Vote2 { + /// The membership of those that send this vote type type Membership: Membership; + /// Type of data commitment this vote uses. type Commitment: CommitmentBounds; + /// Get the signature of the vote sender fn get_signature(&self) -> EncodedSignature; + /// Gets the Data commitment the vote references fn get_data_commitment(&self) -> Self::Commitment; + + /// Gets the public signature key of the votes creator/sender fn get_signing_key(&self) -> TYPES::SignatureKey; // fn create_signed_vote(Self::Commitment, Self::Membership) ?? } +/// Any type that is associated with a view pub trait ViewNumber { + /// Returns the view number the type refers to. fn get_view_number(&self) -> TYPES::Time; } +/// The certificate formed from the collection of signatures a committee. +/// The committee is defined by the `Membership` associated type. +/// The votes all must be over the `Commitment` associated type. pub trait Certificate2 { + /// Type that defines membership for voters on the certificate type Membership: Membership; + /// The data commitment this certificate certifies. type Commitment: CommitmentBounds; + /// Build a certificate from the data commitment and the quorum of signers fn create_signed_certificate( data_commitment: Self::Commitment, sig: ::QCType, ) -> Self; + + /// Checks if the cert is valid fn is_valid_cert(&self) -> bool; + /// Returns the amount of stake needed to create this certificate + // TODO: Make this a static ratio of the total stake of `Membership` fn threshold() -> u64; + /// Get the data commitment the certificate is referencing fn get_data_commitment(&self) -> Self::Commitment; } +/// Accumulates votes until a certificate is formed. This implementation works for all simple vote and certificate pairs pub struct VoteAccumulator2< TYPES: NodeType, VOTE: Vote2, @@ -54,13 +74,11 @@ pub struct VoteAccumulator2< > { /// Map of all signatures accumlated so far pub vote_outcomes: VoteMap2, - /// A quorum's worth of stake, generally 2f + 1 - pub success_threshold: NonZeroU64, /// A list of valid signatures for certificate aggregation pub sig_lists: Vec<::PureAssembledSignatureType>, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check pub signers: BitVec, - /// Phantom data to specify the vote this accumulator is for + /// Phantom data to specify the types this accumulator is for pub phantom: PhantomData<(TYPES, VOTE, CERT)>, } @@ -70,6 +88,9 @@ impl< CERT: Certificate2, > VoteAccumulator2 { + /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we + /// have accumulated enough votes to exceed the threshold for creating a certificate. + #[allow(dead_code)] fn accumulate(mut self, vote: &VOTE, membership: &VOTE::Membership) -> Either { let key = vote.get_signing_key(); @@ -126,11 +147,11 @@ impl< (vote.get_signature(), vote.get_data_commitment()), ); - if *total_stake_casted >= u64::from(CERT::threshold()) { + if *total_stake_casted >= CERT::threshold() { // Assemble QC let real_qc_pp = ::get_public_parameter( stake_table.clone(), - U256::from(self.success_threshold.get()), + U256::from(CERT::threshold()), ); let real_qc_sig = ::assemble( From 3e49db503628c0275b46d563ffea8eba5f384674 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 27 Oct 2023 17:53:29 -0700 Subject: [PATCH 0279/1393] Refactor leaf --- hotshot/src/demo.rs | 2 +- hotshot/src/lib.rs | 15 +- hotshot/src/tasks/mod.rs | 2 +- hotshot/src/traits/storage/memory_storage.rs | 2 +- task-impls/src/consensus.rs | 113 +++++---- task-impls/src/da.rs | 11 +- task-impls/src/transactions.rs | 2 - task-impls/src/vid.rs | 2 +- testing/src/overall_safety_task.rs | 8 +- testing/src/task_helpers.rs | 7 +- testing/tests/consensus_task.rs | 2 +- testing/tests/da_task.rs | 4 +- testing/tests/network_task.rs | 4 +- testing/tests/vid_task.rs | 2 +- types/src/block_impl.rs | 4 +- types/src/consensus.rs | 86 ++++--- types/src/data.rs | 248 ++++--------------- types/src/traits/block_contents.rs | 8 +- types/src/traits/storage.rs | 2 - 19 files changed, 204 insertions(+), 320 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index f70c060701..000deb351b 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -12,7 +12,7 @@ use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ block_impl::{BlockPayloadError, VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::{AssembledSignature, QuorumCertificate}, - data::{fake_commitment, random_commitment, Leaf, LeafType, ViewNumber}, + data::{fake_commitment, random_commitment, LeafType, ViewNumber}, traits::{ election::Membership, node_implementation::NodeType, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index ab753eaa08..7ebc80dafb 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -62,8 +62,8 @@ use hotshot_types::{ use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::{DACertificate, ViewSyncCertificate}, - consensus::{BlockStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, - data::{DAProposal, DeltasType, Leaf, LeafType, QuorumProposal}, + consensus::{Consensus, ConsensusMetricsValue, TransactionStore, View, ViewInner, ViewQueue}, + data::{DAProposal, Leaf, LeafType, QuorumProposal}, error::StorageSnafu, message::{ ConsensusMessageType, DataMessage, InternalTrigger, Message, MessageKind, @@ -204,10 +204,13 @@ impl> SystemContext { ); let mut saved_leaves = HashMap::new(); - let mut saved_blocks = BlockStore::default(); + let mut saved_transaction_commitments = TransactionStore::default(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); - if let Ok(block) = anchored_leaf.get_deltas().try_resolve() { - saved_blocks.insert(block); + if !anchored_leaf.get_transanction_commitments().is_empty() { + saved_transaction_commitments.insert( + anchored_leaf.get_payload_commitment(), + anchored_leaf.get_transanction_commitments(), + ); } let start_view = anchored_leaf.get_view_number(); @@ -217,7 +220,7 @@ impl> SystemContext { cur_view: start_view, last_decided_view: anchored_leaf.get_view_number(), saved_leaves, - saved_blocks, + saved_transaction_commitments, // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 locked_view: anchored_leaf.get_view_number(), diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 4c0917f565..bbabfb1ca8 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -243,7 +243,7 @@ where /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation, ConsensusMessage = SequencingMessage>, >( task_runner: TaskRunner, diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 45c02bf6fe..5511e4d4cd 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -173,8 +173,8 @@ mod test { view_number, }, DummyBlock::random(rng), + DummyBlock::random(rng).transaction_commitments(), DummyState::random(rng), - rng.next_u64(), dummy_leaf_commit, Vec::new(), genesis_proposer_id(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index b609d6717b..7d588c7a8d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -6,7 +6,7 @@ use async_std::task::JoinHandle; use bitvec::prelude::*; use commit::{Commitment, Committable}; use core::time::Duration; -use either::{Either, Left, Right}; +use either::Either; use futures::FutureExt; use hotshot_constants::LOOK_AHEAD; use hotshot_task::{ @@ -16,7 +16,7 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, }; use hotshot_types::{ - block_impl::VIDBlockPayload, + block_impl::{VIDBlockPayload, VIDTransaction}, certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, consensus::{Consensus, View}, data::{Leaf, LeafType, ProposalType, QuorumProposal}, @@ -30,7 +30,6 @@ use hotshot_types::{ node_implementation::{CommitteeEx, NodeImplementation, NodeType, QuorumEx, TimeoutEx}, signature_key::SignatureKey, state::ConsensusTime, - BlockPayload, }, utils::{Terminator, ViewInner}, vote::{QuorumVote, QuorumVoteAccumulator, TimeoutVoteAccumulator, VoteType}, @@ -55,7 +54,7 @@ pub struct ConsensusTaskError {} /// The state for the consensus task. Contains all of the information for the implementation /// of consensus pub struct ConsensusTaskState< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static, > where @@ -355,7 +354,7 @@ where } impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = Leaf, @@ -823,62 +822,62 @@ where if parent_view + 1 == view { current_chain_length += 1; if let Err(e) = consensus.visit_leaf_ancestors( - parent_view, - Terminator::Exclusive(old_anchor_view), - true, - |leaf| { - if !new_decide_reached { - if last_view_number_visited == leaf.view_number + 1 { - last_view_number_visited = leaf.view_number; - current_chain_length += 1; - if current_chain_length == 2 { - new_locked_view = leaf.view_number; - new_commit_reached = true; - // The next leaf in the chain, if there is one, is decided, so this - // leaf's justify_qc would become the QC for the decided chain. - new_decide_qc = Some(leaf.justify_qc.clone()); - } else if current_chain_length == 3 { - new_anchor_view = leaf.view_number; - new_decide_reached = true; - } - } else { - // nothing more to do here... we don't have a new chain extension - return false; + parent_view, + Terminator::Exclusive(old_anchor_view), + true, + |leaf| { + if !new_decide_reached { + if last_view_number_visited == leaf.view_number + 1 { + last_view_number_visited = leaf.view_number; + current_chain_length += 1; + if current_chain_length == 2 { + new_locked_view = leaf.view_number; + new_commit_reached = true; + // The next leaf in the chain, if there is one, is decided, so this + // leaf's justify_qc would become the QC for the decided chain. + new_decide_qc = Some(leaf.justify_qc.clone()); + } else if current_chain_length == 3 { + new_anchor_view = leaf.view_number; + new_decide_reached = true; } + } else { + // nothing more to do here... we don't have a new chain extension + return false; } - // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above - if new_decide_reached { - let mut leaf = leaf.clone(); - consensus + } + // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above + if new_decide_reached { + let mut leaf = leaf.clone(); + consensus .metrics .last_synced_block_height .set(usize::try_from(leaf.get_height()).unwrap_or(0)); - // If the full block is available for this leaf, include it in the leaf - // chain that we send to the client. - if let Some(block) = - consensus.saved_blocks.get(leaf.get_deltas_commitment()) - { - if let Err(err) = leaf.fill_deltas(block.clone()) { - error!("unable to fill leaf {} with block {}, block will not be available: {}", - leaf.commit(), block.commit(), err); - } - } - - leaf_views.push(leaf.clone()); - for txn in leaf.transaction_commitments { - included_txns.insert(txn); - } + // If the full block is available for this leaf, include it in the leaf + // chain that we send to the client. + if let Some(comm) = consensus + .saved_transaction_commitments + .get(leaf.get_payload_commitment()) + { + leaf.fill_transaction_commitments(comm.clone()); + } + + leaf_views.push(leaf.clone()); + for txn in leaf.transaction_commitments { + included_txns.insert(txn); + } } - true - }, - ) { - error!("publishing view error"); - self.output_event_stream.publish(Event { + true + }, + ) { + error!("publishing view error"); + self.output_event_stream + .publish(Event { view_number: view, event: EventType::Error { error: e.into() }, - }).await; - } + }) + .await; + } } let included_txns_set: HashSet<_> = if new_decide_reached { @@ -1355,7 +1354,7 @@ where } let parent_leaf = leaf.clone(); - let parent_header = parent_leaf.block_header; + let parent_header = parent_leaf.block_header.clone(); let original_parent_hash = parent_leaf.commit(); @@ -1379,7 +1378,7 @@ where view_number: view, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), - block_header: TYPES::BlockHeader::new(*payload_commitment, parent_header.clone()), + block_header: TYPES::BlockHeader::new(*payload_commitment, &parent_header), transaction_commitments: HashSet::new(), rejected: vec![], timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), @@ -1391,7 +1390,7 @@ where .sign_validating_or_commitment_proposal::(&leaf.commit()); // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. let proposal = QuorumProposal { - block_header: TYPES::BlockHeader::new(*payload_commitment, parent_header.clone()), + block_header: TYPES::BlockHeader::new(*payload_commitment, &parent_header), view_number: leaf.view_number, justify_qc: consensus.high_qc.clone(), timeout_certificate: timeout_certificate.or_else(|| None), @@ -1422,7 +1421,7 @@ where } impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = Leaf, @@ -1472,7 +1471,7 @@ pub type ConsensusTaskTypes = HSTWithEvent< /// Event handle for consensus pub async fn sequencing_consensus_handle< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static, >( diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 5271c4da90..f2f8237c2f 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -236,9 +236,9 @@ where debug!( "Got a DA block with {} transactions!", - proposal.data.deltas.contained_transactions().len() + proposal.data.block_payload.transaction_commitments().len() ); - let payload_commitment = proposal.data.deltas.commit(); + let payload_commitment = proposal.data.block_payload.commit(); // ED Is this the right leader? let view_leader_key = self.committee_exchange.get_leader(view); @@ -287,7 +287,10 @@ where }); // Record the block we have promised to make available. - consensus.saved_blocks.insert(proposal.data.deltas); + consensus.saved_transaction_commitments.insert( + proposal.data.block_payload.commit(), + proposal.data.block_payload.transaction_commitments(), + ); } } } @@ -429,7 +432,7 @@ where .committee_exchange .sign_da_proposal(&payload_commitment); let data: DAProposal = DAProposal { - deltas: block.clone(), + block_payload: block.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? view_number: view, }; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index cce883f9bd..4b5b356d4a 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -6,7 +6,6 @@ use async_compatibility_layer::{ use async_lock::RwLock; use bincode::config::Options; use commit::{Commitment, Committable}; -use either::{Left, Right}; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, @@ -23,7 +22,6 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::{ConsensusExchange, Membership, QuorumExchangeType}, node_implementation::{NodeImplementation, NodeType, QuorumEx}, - BlockPayload, }, }; use hotshot_utils::bincode::bincode_opts; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 5a602dbc88..63883ca897 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -357,7 +357,7 @@ where // Record the block we have promised to make available. // TODO https://github.com/EspressoSystems/HotShot/issues/1692 - // consensus.saved_blocks.insert(proposal.data.deltas); + // consensus.saved_transaction_commitments.insert(proposal.data.block_payload); } } } diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 535b5c1832..7bc4e83bdc 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -21,7 +21,7 @@ use hotshot_task::{ }; use hotshot_types::{ certificate::QuorumCertificate, - data::{DeltasType, LeafBlockPayload, LeafType}, + data::{LeafBlockPayload, LeafType}, error::RoundTimedoutState, event::{Event, EventType}, traits::node_implementation::NodeType, @@ -205,7 +205,7 @@ impl> RoundResult } } - let (state, block) = (leaf.get_state(), leaf.get_deltas()); + let (state, payload_commitment) = (leaf.get_state(), leaf.get_payload_commitment()); match self.state_map.entry(state.clone()) { std::collections::hash_map::Entry::Occupied(mut o) => { @@ -215,7 +215,7 @@ impl> RoundResult v.insert(1); } } - match self.block_map.entry(block.clone().payload_commitment()) { + match self.block_map.entry(payload_commitment) { std::collections::hash_map::Entry::Occupied(mut o) => { *o.get_mut() += 1; } @@ -291,7 +291,7 @@ impl> RoundResult // if neither, continue through let state_key = key.get_state(); - let block_key = key.get_deltas().payload_commitment(); + let block_key = key.get_payload_commitment(); if *self.block_map.get(&block_key).unwrap() == threshold && *self.state_map.get(&state_key).unwrap() == threshold diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 2077f2ab34..32a3d5f2cd 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -1,9 +1,10 @@ +use std::collections::HashSet; + use crate::{ node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, }; use commit::Committable; -use either::{Either::Left, Right}; use hotshot::{ certificate::QuorumCertificate, traits::{NodeImplementation, TestableNodeImplementation}, @@ -111,12 +112,12 @@ async fn build_quorum_proposal_and_signature( panic!("Failed to find high QC parent."); }; let parent_leaf = leaf.clone(); - let parent_header = parent_leaf.block_header; + let parent_header = parent_leaf.block_header.clone(); // every event input is seen on the event stream in the output. let block = ::genesis(); let payload_commitment = block.commit(); - let block_header = VIDBlockHeader::new(payload_commitment, parent_header); + let block_header = VIDBlockHeader::new(payload_commitment, &parent_header); let leaf = Leaf { view_number: ViewNumber::new(view), justify_qc: consensus.high_qc.clone(), diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 9526b89df2..7ddc824c19 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,6 +1,5 @@ use commit::Commitment; use commit::Committable; -use either::Right; use hotshot::{ tasks::add_consensus_task, types::{SignatureKey, SystemContextHandle}, @@ -23,6 +22,7 @@ use hotshot_types::{ }; use std::collections::HashMap; +use std::collections::HashSet; async fn build_vote( handle: &SystemContextHandle, diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index fc91642c7c..3994c14b19 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -49,7 +49,7 @@ async fn test_da_task() { let signature = committee_exchange.sign_da_proposal(&block.commit()); let proposal = DAProposal { - deltas: block.clone(), + block_payload: block.clone(), view_number: ViewNumber::new(2), }; let message = Proposal { @@ -77,7 +77,7 @@ async fn test_da_task() { HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), 1, ); - output.insert(HotShotEvent::SendDABlockData(block.clone()), 1); + output.insert(HotShotEvent::SendPayloadCommitment(block.commit()), 1); output.insert(HotShotEvent::DAProposalSend(message.clone(), pub_key), 1); let vote_token = committee_exchange .make_vote_token(ViewNumber::new(2)) diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index d88ce95ba2..e9409bc69b 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -52,7 +52,7 @@ async fn test_network_task() { let signature = committee_exchange.sign_da_proposal(&block.commit()); let da_proposal = Proposal { data: DAProposal { - deltas: block.clone(), + block_payload: block.clone(), view_number: ViewNumber::new(2), }, signature, @@ -117,7 +117,7 @@ async fn test_network_task() { HotShotEvent::QuorumProposalSend(quorum_proposal.clone(), pub_key), 1, ); - output.insert(HotShotEvent::SendDABlockData(block), 1); + output.insert(HotShotEvent::SendPayloadCommitment(block.commit()), 1); output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); output.insert( HotShotEvent::QuorumProposalRecv(quorum_proposal, pub_key), diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index d97f158c4f..3070f05cf8 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -48,7 +48,7 @@ async fn test_vid_task() { let signature = vid_exchange.sign_vid_proposal(&block.commit()); let proposal: DAProposal = DAProposal { - deltas: block.clone(), + block_payload: block.clone(), view_number: ViewNumber::new(2), }; let message = Proposal { diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 90aa210b6f..def1698078 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -123,7 +123,7 @@ impl BlockPayload for VIDBlockPayload { type Transaction = VIDTransaction; - fn contained_transactions(&self) -> HashSet> { + fn transaction_commitments(&self) -> HashSet> { self.transactions .iter() .map(commit::Committable::commit) @@ -143,7 +143,7 @@ pub struct VIDBlockHeader { impl BlockHeader for VIDBlockHeader { type Payload = VIDBlockPayload; - fn new(payload_commitment: Commitment, parent_header: Self) -> Self { + fn new(payload_commitment: Commitment, parent_header: &Self) -> Self { Self { block_number: parent_header.block_number + 1, payload_commitment, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index fad59f0508..e61685ad69 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -13,13 +13,14 @@ use crate::{ traits::{ metrics::{Counter, Gauge, Histogram, Label, Metrics}, node_implementation::NodeType, + BlockPayload, }, utils::Terminator, }; -use commit::{Commitment, Committable}; +use commit::Commitment; use derivative::Derivative; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, sync::{Arc, Mutex}, }; use tracing::error; @@ -47,10 +48,11 @@ pub struct Consensus> { /// - includes the MOST RECENT decided leaf pub saved_leaves: CommitmentMap, - /// Saved blocks + /// Saved transaction commitments /// - /// Contains the full block for every leaf in `saved_leaves` if that block is available. - pub saved_blocks: BlockStore, + /// Contains the transaction commitments of the block for every leaf in `saved_leaves` if that + /// commitment set is available. + pub saved_transaction_commitments: TransactionStore, /// The `locked_qc` view number pub locked_view: TYPES::Time, @@ -297,7 +299,7 @@ impl> Consensus { } /// garbage collects based on state change - /// right now, this removes from both the `saved_blocks` + /// right now, this removes from both the `saved_transaction_commitments` /// and `state_map` fields of `Consensus` /// # Panics /// On inconsistent stored entries @@ -323,14 +325,15 @@ impl> Consensus { .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_payload_commitment()) .for_each(|block| { - self.saved_blocks.remove(block); + self.saved_transaction_commitments.remove(block); }); self.state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_leaf_commitment()) .for_each(|leaf| { if let Some(removed) = self.saved_leaves.remove(&leaf) { - self.saved_blocks.remove(removed.get_deltas_commitment()); + self.saved_transaction_commitments + .remove(removed.get_payload_commitment()); } }); self.state_map = self.state_map.split_off(&new_anchor_view); @@ -351,7 +354,10 @@ impl> Consensus { } } -/// Mapping from block payload commitments to full blocks. +/// Alias for the set of transaction commitments. +type TransactionCommitments = HashSet::Transaction>>; + +/// Mapping from block payload commitments to the set of transaction commitents. /// /// Entries in this mapping are reference-counted, so multiple consensus objects can refer to the /// same block, and the block will only be deleted after _all_ such objects are garbage collected. @@ -359,46 +365,60 @@ impl> Consensus { /// before all but one branch are ultimately garbage collected. #[derive(Clone, Debug, Derivative)] #[derivative(Default(bound = ""))] -pub struct BlockStore(HashMap, (BLOCK, u64)>); +pub struct TransactionStore( + HashMap, (TransactionCommitments, u64)>, +); -impl BlockStore { - /// Save `block` for later retrieval. +impl TransactionStore { + /// Save payload commitment for later retrieval. /// /// After calling this function, and before the corresponding call to [`remove`](Self::remove), - /// `self.get(block.commit())` will return `Some(block)`. + /// `self.get(payload_commitment)` will return `Some(transaction_commitments)`. /// - /// This function will increment a reference count on the saved block, so that multiple calls to - /// [`insert`](Self::insert) for the same block result in multiple owning references to the - /// block. [`remove`](Self::remove) must be called once for each reference before the block will - /// be deallocated. - pub fn insert(&mut self, block: BLOCK) { + /// This function will increment a reference count on the saved payload commitment, so that + /// multiple calls to [`insert`](Self::insert) for the same payload commitment result in + /// multiple owning references to the payload commitment. [`remove`](Self::remove) must be + /// called once for each reference before the payload commitment will be deallocated. + pub fn insert( + &mut self, + payload_commitment: Commitment, + transaction_commitments: TransactionCommitments, + ) { self.0 - .entry(block.commit()) + .entry(payload_commitment) .and_modify(|(_, refcount)| *refcount += 1) - .or_insert((block, 1)); + .or_insert((transaction_commitments, 1)); } - /// Get a saved block, if available. + /// Get a saved set of transaction commitments, if available. /// - /// If a block has been saved with [`insert`](Self::insert), this function will retrieve it. It - /// may return [`None`] if a block with the given commitment has not been saved or if the block - /// has been dropped with [`remove`](Self::remove). + /// If a set of transaction commitments has been saved with [`insert`](Self::insert), this + /// function will retrieve it. It may return [`None`] if a block with the given commitment has + /// not been saved or if the block has been dropped with [`remove`](Self::remove). #[must_use] - pub fn get(&self, block: Commitment) -> Option<&BLOCK> { - self.0.get(&block).map(|(block, _)| block) + pub fn get( + &self, + payload_commitment: Commitment, + ) -> Option<&HashSet::Transaction>>> { + self.0 + .get(&payload_commitment) + .map(|(txn_comm, _)| txn_comm) } - /// Drop a reference to a saved block. + /// Drop a reference to a saved set of transaction commitments. /// - /// If the block exists and this call drops the last reference to it, the block will be - /// returned. Otherwise, the return value is [`None`]. - pub fn remove(&mut self, block: Commitment) -> Option { - if let Entry::Occupied(mut e) = self.0.entry(block) { + /// If the set exists and this call drops the last reference to it, the set will be returned, + /// Otherwise, the return value is [`None`]. + pub fn remove( + &mut self, + payload_commitment: Commitment, + ) -> Option::Transaction>>> { + if let Entry::Occupied(mut e) = self.0.entry(payload_commitment) { let (_, refcount) = e.get_mut(); *refcount -= 1; if *refcount == 0 { - let (block, _) = e.remove(); - return Some(block); + let (txn_comm, _) = e.remove(); + return Some(txn_comm); } } None diff --git a/types/src/data.rs b/types/src/data.rs index 0495ae1805..418fbd64b9 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -22,14 +22,12 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, Serializatio use bincode::Options; use commit::{Commitment, Committable}; use derivative::Derivative; -use either::{Either, Left, Right}; use espresso_systems_common::hotshot::tag; use hotshot_constants::GENESIS_PROPOSER_ID; use hotshot_utils::bincode::bincode_opts; use jf_primitives::pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; -use snafu::{ensure, Snafu}; use std::{ collections::HashSet, fmt::{Debug, Display}, @@ -161,7 +159,7 @@ where #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct DAProposal { /// BlockPayload leaf wants to apply - pub deltas: TYPES::BlockPayload, + pub block_payload: TYPES::BlockPayload, /// View this proposal applies to pub view_number: TYPES::Time, } @@ -317,140 +315,6 @@ pub trait DeltasType: fn fill(&mut self, block: BlockPayload) -> Result<(), Self::Error>; } -/// Error which occurs when [`DeltasType::fill`] is called with a block that does not match the -/// deltas' internal block payload commitment. -#[derive(Clone, Copy, Debug, Snafu)] -#[snafu(display("the block {:?} has commitment {} (expected {})", block, block.commit(), commitment))] -pub struct InconsistentDeltasError { - /// The block with the wrong commitment. - block: BLOCK, - /// The expected commitment. - commitment: Commitment, -} - -impl DeltasType for BLOCK -where - BLOCK: Committable - + Clone - + Debug - + for<'a> Deserialize<'a> - + PartialEq - + Eq - + std::hash::Hash - + Send - + Serialize - + Sync, -{ - type Error = InconsistentDeltasError; - - fn payload_commitment(&self) -> Commitment { - self.commit() - } - - fn try_resolve(self) -> Result { - Ok(self) - } - - fn fill(&mut self, block: BLOCK) -> Result<(), Self::Error> { - ensure!( - block.commit() == self.commit(), - InconsistentDeltasSnafu { - block, - commitment: self.commit() - } - ); - // If the commitments are equal the blocks are equal, and we already have the block, so we - // don't have to do anything. - Ok(()) - } -} - -impl DeltasType for Either> -where - BLOCK: Committable - + Clone - + Debug - + for<'a> Deserialize<'a> - + PartialEq - + Eq - + std::hash::Hash - + Send - + Serialize - + Sync, -{ - type Error = InconsistentDeltasError; - - fn payload_commitment(&self) -> Commitment { - match self { - Either::Left(block) => block.commit(), - Either::Right(comm) => *comm, - } - } - - fn try_resolve(self) -> Result { - match self { - Either::Left(block) => Ok(block), - Either::Right(_) => Err(self), - } - } - - fn fill(&mut self, block: BLOCK) -> Result<(), Self::Error> { - match self { - Either::Left(curr) => curr.fill(block), - Either::Right(comm) => { - ensure!( - *comm == block.commit(), - InconsistentDeltasSnafu { - block, - commitment: *comm - } - ); - *self = Either::Left(block); - Ok(()) - } - } - } -} - -impl DeltasType for Either<(u64, PAYLOAD), HEADER> -where - HEADER: BlockHeader, - PAYLOAD: BlockPayload, -{ - type Error = InconsistentDeltasError; - - fn payload_commitment(&self) -> Commitment { - match self { - Either::Left((_, block)) => block.commit(), - Either::Right(header) => header.payload_commitment(), - } - } - - fn try_resolve(self) -> Result { - match self { - Either::Left((_, block)) => Ok(block), - Either::Right(_) => Err(self), - } - } - - fn fill(&mut self, block: PAYLOAD) -> Result<(), Self::Error> { - match self { - Either::Left((_, curr)) => curr.fill(block), - Either::Right(header) => { - ensure!( - header.payload_commitment() == block.commit(), - InconsistentDeltasSnafu { - block, - commitment: header.payload_commitment() - } - ); - *self = Either::Left((header.block_number(), block)); - Ok(()) - } - } - } -} - /// An item which is appended to a blockchain. pub trait LeafType: Debug @@ -467,8 +331,8 @@ pub trait LeafType: { /// Type of nodes participating in the network. type NodeType: NodeType; - /// Type of block contained by this leaf. - type DeltasType: DeltasType>; + // /// Type of block contained by this leaf. + // type DeltasType: DeltasType>; /// Either state or empty type MaybeState: Clone + Debug @@ -497,18 +361,21 @@ pub trait LeafType: fn get_justify_qc(&self) -> QuorumCertificate>; /// Commitment to this leaf's parent. fn get_parent_commitment(&self) -> Commitment; - /// The block contained in this leaf. - fn get_deltas(&self) -> Self::DeltasType; - /// Fill this leaf with the entire corresponding block. - /// - /// After this function succeeds, `self.get_deltas().try_resolve()` is guaranteed to return - /// `Ok(block)`. - /// - /// # Errors - /// - /// Fails if `block` does not match `self.get_deltas_commitment()`, or if the block is not able - /// to be stored for some implementation-defined reason. - fn fill_deltas(&mut self, block: LeafBlockPayload) -> Result<(), LeafDeltasError>; + /// The block header contained in this leaf. + fn get_block_header(&self) -> ::BlockHeader; + /// A commitment to the block payload contained in this leaf. + fn get_payload_commitment(&self) -> Commitment> { + self.get_block_header().payload_commitment() + } + /// Fill the transaciton commitments of this leaf with the corresponding block payload. + fn fill_transaction_commitments( + &mut self, + transaction_commitments: HashSet::Transaction>>, + ); + /// Optional set of commitments to the transactions. + fn get_transanction_commitments( + &self, + ) -> HashSet::Transaction>>; /// The blockchain state after appending this leaf. fn get_state(&self) -> Self::MaybeState; /// Transactions rejected or invalidated by the application of this leaf. @@ -519,17 +386,8 @@ pub trait LeafType: fn get_proposer_id(&self) -> EncodedPublicKey; /// Create a leaf from information stored about a view. fn from_stored_view(stored_view: StoredView) -> Self; - - /// A commitment to the block payload contained in this leaf. - fn get_deltas_commitment(&self) -> Commitment> { - self.get_deltas().payload_commitment() - } } -/// The [`DeltasType`] in a [`LeafType`]. -pub type LeafDeltas = ::DeltasType; -/// Errors reported by the [`DeltasType`] in a [`LeafType`]. -pub type LeafDeltasError = as DeltasType>>::Error; /// The [`NodeType`] in a [`LeafType`]. pub type LeafNode = ::NodeType; /// The [`StateType`] in a [`LeafType`]. @@ -648,8 +506,10 @@ impl Hash for Leaf { self.view_number.hash(state); self.justify_qc.hash(state); self.parent_commitment.hash(state); - self.block_header.hasher(state); - self.transaction_commitments.hash(state); + self.block_header.hash(state); + for com in &self.transaction_commitments { + com.hash(state); + } self.rejected.hash(state); } } @@ -666,7 +526,7 @@ impl Display for ValidatingLeaf { impl LeafType for ValidatingLeaf { type NodeType = TYPES; - type DeltasType = TYPES::BlockPayload; + // type DeltasType = TYPES::BlockPayload; type MaybeState = TYPES::StateType; fn new( @@ -704,16 +564,21 @@ impl LeafType for ValidatingLeaf { self.parent_commitment } - fn get_deltas(&self) -> Self::DeltasType { - self.deltas.clone() + fn get_block_header(&self) -> ::BlockHeader { + unimplemented!("Unimplemented for validating consensus which will be removed."); } - fn get_deltas_commitment(&self) -> Commitment<::BlockPayload> { - self.deltas.payload_commitment() + fn fill_transaction_commitments( + &mut self, + _transaction_commitments: HashSet::Transaction>>, + ) { + unimplemented!("Unimplemented for validating consensus which will be removed."); } - fn fill_deltas(&mut self, block: LeafBlockPayload) -> Result<(), LeafDeltasError> { - self.deltas.fill(block) + fn get_transanction_commitments( + &self, + ) -> HashSet::Transaction>> { + unimplemented!("Unimplemented for validating consensus which will be removed."); } fn get_state(&self) -> Self::MaybeState { @@ -732,19 +597,8 @@ impl LeafType for ValidatingLeaf { self.proposer_id.clone() } - fn from_stored_view(stored_view: StoredView) -> Self { - Self { - view_number: stored_view.view_number, - height: 0, - justify_qc: stored_view.justify_qc, - parent_commitment: stored_view.parent, - block_header: stored_view.block_header, - transaction_commitments: stored_view.transaction_commitments, - state: stored_view.state, - rejected: stored_view.rejected, - timestamp: stored_view.timestamp, - proposer_id: stored_view.proposer_id, - } + fn from_stored_view(_stored_view: StoredView) -> Self { + unimplemented!("Unimplemented for validating consensus which will be removed."); } } @@ -782,20 +636,21 @@ impl Display for Leaf { impl LeafType for Leaf { type NodeType = TYPES; - type DeltasType = Either<(u64, TYPES::BlockPayload), TYPES::BlockHeader>; + // type DeltasType = Either<(u64, TYPES::BlockPayload), TYPES::BlockHeader>; type MaybeState = (); fn new( view_number: ::Time, justify_qc: QuorumCertificate>, - deltas: ::BlockPayload, + payload: ::BlockPayload, _state: ::StateType, ) -> Self { Self { view_number, justify_qc, parent_commitment: fake_commitment(), - deltas: Either::Left((0, deltas)), + block_header: TYPES::BlockHeader::genesis(payload.clone()), + transaction_commitments: payload.transaction_commitments(), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: genesis_proposer_id(), @@ -822,12 +677,17 @@ impl LeafType for Leaf { self.block_header.clone() } - fn get_block_commitment(&self) -> Commitment<::BlockPayload> { - self.block_header.payload_commitment() + fn fill_transaction_commitments( + &mut self, + transaction_commitments: HashSet::Transaction>>, + ) { + self.transaction_commitments = transaction_commitments; } - fn fill_deltas(&mut self, block: LeafBlockPayload) -> Result<(), LeafDeltasError> { - self.deltas.fill(block) + fn get_transanction_commitments( + &self, + ) -> HashSet::Transaction>> { + self.transaction_commitments.clone() } // The Sequencing Leaf doesn't have a state. @@ -850,7 +710,8 @@ impl LeafType for Leaf { view_number: stored_view.view_number, justify_qc: stored_view.justify_qc, parent_commitment: stored_view.parent, - deltas: stored_view.deltas, + block_header: stored_view.block_header, + transaction_commitments: stored_view.transaction_commitments, rejected: stored_view.rejected, timestamp: stored_view.timestamp, proposer_id: stored_view.proposer_id, @@ -981,8 +842,9 @@ impl Committable for Leaf { // Skip the transaction commitments, so that the repliacs can reconstruct the leaf. commit::RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) + .u64_field("block number", self.get_height()) .field("parent Leaf commitment", self.parent_commitment) - .field("block header", self.block_header) + .field("block payload commitment", self.get_payload_commitment()) .constant_str("justify_qc view number") .u64(*self.justify_qc.view_number) .field( @@ -1021,11 +883,11 @@ where fn from(leaf: LEAF) -> Self { StoredView { view_number: leaf.get_view_number(), - height: leaf.get_height(), parent: leaf.get_parent_commitment(), justify_qc: leaf.get_justify_qc(), state: leaf.get_state(), - deltas: leaf.get_deltas(), + block_header: leaf.get_block_header(), + transaction_commitments: leaf.get_transanction_commitments(), rejected: leaf.get_rejected(), timestamp: leaf.get_timestamp(), proposer_id: leaf.get_proposer_id(), diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 6922b5c011..a61a2613d9 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -54,7 +54,7 @@ pub trait BlockPayload: /// returns hashes of all the transactions in this block /// TODO make this ordered with a vec - fn contained_transactions(&self) -> HashSet>; + fn transaction_commitments(&self) -> HashSet>; } /// Header of a block, which commits to a [`BlockPayload`]. @@ -65,7 +65,7 @@ pub trait BlockHeader: type Payload: BlockPayload; /// Build a header with the payload commitment and parent header. - fn new(payload_commitment: Commitment, parent_header: Self) -> Self; + fn new(payload_commitment: Commitment, parent_header: &Self) -> Self; /// Build a genesis header with the genesis payload. fn genesis(payload: Self::Payload) -> Self; @@ -147,7 +147,7 @@ pub mod dummy { impl BlockHeader for DummyBlock { type Payload = Self; - fn new(_payload_commitment: Commitment, _parent_header: Self) -> Self { + fn new(_payload_commitment: Commitment, _parent_header: &Self) -> Self { Self { nonce: 0 } } @@ -171,7 +171,7 @@ pub mod dummy { type Transaction = DummyTransaction; - fn contained_transactions(&self) -> HashSet> { + fn transaction_commitments(&self) -> HashSet> { HashSet::new() } } diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 7da4c0b59f..80df3b9841 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -165,14 +165,12 @@ where Commitment<::Transaction>, >, state: LEAF::MaybeState, - height: u64, parent_commitment: Commitment, rejected: Vec<::Transaction>, proposer_id: EncodedPublicKey, ) -> Self { Self { view_number: qc.view_number(), - height, parent: parent_commitment, justify_qc: qc, state, From 7e10599cb5842ee9f6df62f320ca16ffcb5bf710 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 07:39:25 -0400 Subject: [PATCH 0280/1393] Bump libp2p from 0.52.3 to 0.52.4 (#1930) * Bump libp2p from 0.52.3 to 0.52.4 Bumps [libp2p](https://github.com/libp2p/rust-libp2p) from 0.52.3 to 0.52.4. - [Release notes](https://github.com/libp2p/rust-libp2p/releases) - [Changelog](https://github.com/libp2p/rust-libp2p/blob/master/CHANGELOG.md) - [Commits](https://github.com/libp2p/rust-libp2p/compare/libp2p-v0.52.3...libp2p-v0.52.4) --- updated-dependencies: - dependency-name: libp2p dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * fix some of the old code * finish building swarm --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Rob --- .../src/network/behaviours/dht/mod.rs | 13 +++++---- libp2p-networking/src/network/mod.rs | 8 +++--- libp2p-networking/src/network/node.rs | 27 ++++++++++++------- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 7086b6dab1..a22056c907 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -9,11 +9,13 @@ mod cache; use async_compatibility_layer::art::async_block_on; use futures::channel::oneshot::Sender; +use libp2p::kad::Behaviour as KademliaBehaviour; +use libp2p::kad::Event as KademliaEvent; use libp2p::{ kad::{ /* handler::KademliaHandlerIn, */ store::MemoryStore, BootstrapError, BootstrapOk, - GetClosestPeersOk, GetRecordOk, GetRecordResult, Kademlia, KademliaEvent, Mode, - ProgressStep, PutRecordResult, QueryId, QueryResult, Quorum, Record, + GetClosestPeersOk, GetRecordOk, GetRecordResult, Mode, ProgressStep, PutRecordResult, + QueryId, QueryResult, Quorum, Record, }, swarm::{NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm}, Multiaddr, @@ -52,7 +54,7 @@ pub struct DHTBehaviour { /// List of previously failled put requests queued_put_record_queries: VecDeque, /// Kademlia behaviour - pub kadem: Kademlia, + pub kadem: KademliaBehaviour, /// State of bootstrapping pub bootstrap_state: Bootstrap, /// State of last random walk @@ -114,7 +116,7 @@ impl DHTBehaviour { /// Create a new DHT behaviour #[must_use] pub async fn new( - mut kadem: Kademlia, + mut kadem: KademliaBehaviour, pid: PeerId, replication_factor: NonZeroUsize, cache_location: Option, @@ -540,7 +542,8 @@ pub enum DHTProgress { // 1. use of deprecated associated function `libp2p::libp2p_swarm::NetworkBehaviour::inject_event`: Implement `NetworkBehaviour::on_connection_handler_event` instead. The default implementation of this `inject_*` method delegates to it. impl NetworkBehaviour for DHTBehaviour { - type ConnectionHandler = as NetworkBehaviour>::ConnectionHandler; + type ConnectionHandler = + as NetworkBehaviour>::ConnectionHandler; type ToSwarm = DHTEvent; diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 3ec1c07b73..2d35fdca5e 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -38,9 +38,9 @@ use std::{collections::HashSet, fmt::Debug, str::FromStr, sync::Arc, time::Durat use tracing::{info, instrument}; #[cfg(async_executor_impl = "async-std")] -use libp2p::dns::DnsConfig; +use libp2p::dns::async_std::Transport as DnsTransport; #[cfg(async_executor_impl = "tokio")] -use libp2p::dns::TokioDnsConfig as DnsConfig; +use libp2p::dns::tokio::Transport as DnsTransport; #[cfg(async_executor_impl = "async-std")] use quic::async_std::Transport as QuicTransport; #[cfg(async_executor_impl = "tokio")] @@ -211,12 +211,12 @@ pub async fn gen_transport( let dns_quic = { #[cfg(async_executor_impl = "async-std")] { - DnsConfig::system(quic_transport).await + DnsTransport::system(quic_transport).await } #[cfg(async_executor_impl = "tokio")] { - DnsConfig::system(quic_transport) + DnsTransport::system(quic_transport) } } .map_err(|e| NetworkError::TransportLaunch { source: e })?; diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 835058bf84..543bd24d71 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -43,12 +43,12 @@ use libp2p::{ Info as IdentifyInfo, }, identity::Keypair, - kad::{store::MemoryStore, Kademlia, KademliaConfig}, + kad::{store::MemoryStore, Behaviour, Config}, request_response::{ Behaviour as RequestResponse, Config as RequestResponseConfig, ProtocolSupport, }, - swarm::{SwarmBuilder, SwarmEvent}, - Multiaddr, Swarm, + swarm::SwarmEvent, + Multiaddr, Swarm, SwarmBuilder, }; use libp2p_identity::PeerId; use rand::{prelude::SliceRandom, thread_rng}; @@ -236,7 +236,7 @@ impl NetworkNode { let identify = IdentifyBehaviour::new(identify_cfg); // - Build DHT needed for peer discovery - let mut kconfig = KademliaConfig::default(); + let mut kconfig = Config::default(); // 8 hours by default let record_republication_interval = config .republication_interval @@ -252,7 +252,7 @@ impl NetworkNode { kconfig.set_replication_factor(factor); } - let kadem = Kademlia::with_config(peer_id, MemoryStore::new(peer_id), kconfig); + let kadem = Behaviour::with_config(peer_id, MemoryStore::new(peer_id), kconfig); let rrconfig = RequestResponseConfig::default(); @@ -275,12 +275,19 @@ impl NetworkNode { identify, DMBehaviour::new(request_response), ); - let executor = Box::new(|fut| { - async_spawn(fut); - }); - SwarmBuilder::with_executor(transport, network, peer_id, executor) - .dial_concurrency_factor(std::num::NonZeroU8::new(1).unwrap()) + // build swarm + let swarm = SwarmBuilder::with_existing_identity(identity.clone()); + #[cfg(async_executor_impl = "async-std")] + let swarm = swarm.with_async_std(); + #[cfg(async_executor_impl = "tokio")] + let swarm = swarm.with_tokio(); + + swarm + .with_other_transport(|_| transport) + .unwrap() + .with_behaviour(|_| network) + .unwrap() .build() }; for (peer, addr) in &config.to_connect_addrs { From 8bdae2502d42efb122fc9b9b96fa72a327f26841 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 30 Oct 2023 14:42:04 -0400 Subject: [PATCH 0281/1393] Fix committee filter by removing VID-bound events (#1953) * fix committee filter by removing VID-bound events * fix vid filter --- task-impls/src/network.rs | 4 +--- task-impls/src/vid.rs | 3 ++- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 770b7726b1..523dbd1f70 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -343,7 +343,6 @@ impl< | HotShotEvent::QuorumVoteSend(_) | HotShotEvent::Shutdown | HotShotEvent::DACSend(_, _) - | HotShotEvent::VidCertSend(_, _) | HotShotEvent::ViewChange(_) | HotShotEvent::TimeoutVoteSend(_) ) @@ -356,8 +355,6 @@ impl< HotShotEvent::DAProposalSend(_, _) | HotShotEvent::DAVoteSend(_) | HotShotEvent::Shutdown - | HotShotEvent::VidDisperseSend(_, _) - | HotShotEvent::VidVoteSend(_) | HotShotEvent::ViewChange(_) ) } @@ -368,6 +365,7 @@ impl< event, HotShotEvent::Shutdown | HotShotEvent::VidDisperseSend(_, _) + | HotShotEvent::VidCertSend(_, _) | HotShotEvent::VidVoteSend(_) | HotShotEvent::ViewChange(_) ) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 29a4a32f54..e0d5d3b90d 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -384,13 +384,14 @@ where None } - /// Filter the DA event. + /// Filter the VID event. pub fn filter(event: &HotShotEvent) -> bool { matches!( event, HotShotEvent::Shutdown | HotShotEvent::VidDisperseRecv(_, _) | HotShotEvent::VidVoteRecv(_) + | HotShotEvent::VidCertRecv(_) | HotShotEvent::ViewChange(_) ) } From e2f08be1a27e9b18e31aa4f7d844f29889ce547f Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 30 Oct 2023 14:59:36 -0400 Subject: [PATCH 0282/1393] Add VID to webserver (#1954) * add vid to webserver * minor fixes * fix certificates/votes/tasks * increase next view timeout for tests * remove commented code --- .../traits/networking/web_server_network.rs | 152 ++++++++++++- task-impls/src/consensus.rs | 7 +- task-impls/src/da.rs | 6 +- task-impls/src/transactions.rs | 1 + task-impls/src/vid.rs | 58 ++++- testing/tests/catchup.rs | 8 +- testing/tests/vid_task.rs | 2 +- types/src/traits/election.rs | 13 +- types/src/traits/network.rs | 18 ++ web_server/api.toml | 53 +++++ web_server/src/config.rs | 16 +- web_server/src/lib.rs | 202 ++++++++++++++++++ 12 files changed, 493 insertions(+), 43 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 5a67880e9f..a9a44930e9 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -122,6 +122,15 @@ struct Inner { /// Task map for quorum votes. vote_task_map: Arc>>>>, + /// Task map for vid votes + vid_vote_task_map: + Arc>>>>, + /// Task map for VID certs + vid_cert_task_map: + Arc>>>>, + /// Task map for VID disperse data + vid_disperse_task_map: + Arc>>>>, /// Task map for DACs. dac_task_map: Arc>>>>, @@ -169,7 +178,7 @@ impl Inner { MessagePurpose::DAC => config::get_da_certificate_route(view_number), MessagePurpose::VidDisperse => config::get_vid_disperse_route(view_number), // like `Proposal` MessagePurpose::VidVote => config::get_vid_vote_route(view_number, vote_index), // like `Vote` - MessagePurpose::VidCert => config::get_vid_cert_route(view_number), // like `DAC` + MessagePurpose::VidCert => config::get_vid_certificate_route(view_number), // like `DAC` }; if message_purpose == MessagePurpose::Data { @@ -351,8 +360,10 @@ impl Inner { // TODO ED Should add extra error checking here to make sure we are intending to cancel a task ConsensusIntentEvent::CancelPollForVotes(event_view) | ConsensusIntentEvent::CancelPollForProposal(event_view) + | ConsensusIntentEvent::CancelPollForVIDVotes(event_view) + | ConsensusIntentEvent::CancelPollForVIDCertificate(event_view) | ConsensusIntentEvent::CancelPollForDAC(event_view) - | ConsensusIntentEvent::CancelPollForViewSyncCertificate(event_view) + | ConsensusIntentEvent::CancelPollForVIDDisperse(event_view) | ConsensusIntentEvent::CancelPollForViewSyncVotes(event_view) => { if view_number == event_view { debug!("Shutting down polling task for view {}", event_view); @@ -371,7 +382,9 @@ impl Inner { } } - _ => unimplemented!(), + _ => { + unimplemented!() + } } } // Nothing on receiving channel @@ -528,6 +541,9 @@ impl< tx_index: Arc::default(), proposal_task_map: Arc::default(), vote_task_map: Arc::default(), + vid_vote_task_map: Arc::default(), + vid_cert_task_map: Arc::default(), + vid_disperse_task_map: Arc::default(), dac_task_map: Arc::default(), view_sync_cert_task_map: Arc::default(), view_sync_vote_task_map: Arc::default(), @@ -562,7 +578,7 @@ impl< MessagePurpose::DAC => config::post_da_certificate_route(*view_number), MessagePurpose::VidVote => config::post_vid_vote_route(*view_number), MessagePurpose::VidDisperse => config::post_vid_disperse_route(*view_number), - MessagePurpose::VidCert => config::post_vid_cert_route(*view_number), + MessagePurpose::VidCert => config::post_vid_certificate_route(*view_number), }; let network_msg: SendMsg = SendMsg { @@ -822,6 +838,46 @@ impl< .await; } } + ConsensusIntentEvent::PollForVIDDisperse(view_number) => { + // Check if we already have a task for this (we shouldn't) + + // Going to do a write lock since mostly likely we will need it - can change to upgradable read in the future + let mut task_map = self.inner.vid_disperse_task_map.write().await; + if let Entry::Vacant(e) = task_map.entry(view_number) { + // create new task + let (sender, receiver) = unbounded(); + e.insert(sender); + + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server(receiver, MessagePurpose::VidDisperse, view_number) + .await + { + error!( + "Background receive VID disperse polling encountered an error: {:?}", + e + ); + } + } + }); + } else { + error!("Somehow task already existed!"); + } + + // GC proposal collection if we are two views in the future + if let Some((_, sender)) = task_map.remove_entry(&view_number.wrapping_sub(2)) { + // Send task cancel message to old task + + // If task already exited we expect an error + let _res = sender + .send(ConsensusIntentEvent::CancelPollForVIDDisperse( + view_number.wrapping_sub(2), + )) + .await; + } + } ConsensusIntentEvent::PollForCurrentProposal => { // create new task let (_, receiver) = unbounded(); @@ -878,6 +934,44 @@ impl< .await; } } + ConsensusIntentEvent::PollForVIDVotes(view_number) => { + let mut task_map = self.inner.vid_vote_task_map.write().await; + if let Entry::Vacant(e) = task_map.entry(view_number) { + // create new task + let (sender, receiver) = unbounded(); + e.insert(sender); + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server(receiver, MessagePurpose::VidVote, view_number) + .await + { + error!( + "Background receive proposal polling encountered an error: {:?}", + e + ); + } + } + }); + } else { + error!("Somehow task already existed!"); + } + + // GC proposal collection if we are two views in the future + // TODO ED This won't work for vote collection, last task is more than 2 view ago depending on size of network, will need to rely on cancel task from consensus + if let Some((_, sender)) = task_map.remove_entry(&(view_number.wrapping_sub(2))) { + // Send task cancel message to old task + + // If task already exited we expect an error + let _res = sender + .send(ConsensusIntentEvent::CancelPollForVIDVotes( + view_number.wrapping_sub(2), + )) + .await; + } + } + ConsensusIntentEvent::PollForDAC(view_number) => { let mut task_map = self.inner.dac_task_map.write().await; if let Entry::Vacant(e) = task_map.entry(view_number) { @@ -914,6 +1008,43 @@ impl< .await; } } + + ConsensusIntentEvent::PollForVIDCertificate(view_number) => { + let mut task_map = self.inner.vid_cert_task_map.write().await; + if let Entry::Vacant(e) = task_map.entry(view_number) { + // create new task + let (sender, receiver) = unbounded(); + e.insert(sender); + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server(receiver, MessagePurpose::VidCert, view_number) + .await + { + error!( + "Background receive proposal polling encountered an error: {:?}", + e + ); + } + } + }); + } else { + error!("Somehow task already existed!"); + } + + // GC proposal collection if we are two views in the future + if let Some((_, sender)) = task_map.remove_entry(&(view_number.wrapping_sub(2))) { + // Send task cancel message to old task + + // If task already exited we expect an error + let _res = sender + .send(ConsensusIntentEvent::CancelPollForVIDCertificate( + view_number.wrapping_sub(2), + )) + .await; + } + } ConsensusIntentEvent::CancelPollForVotes(view_number) => { let mut task_map = self.inner.vote_task_map.write().await; @@ -927,6 +1058,19 @@ impl< } } + ConsensusIntentEvent::CancelPollForVIDVotes(view_number) => { + let mut task_map = self.inner.vid_vote_task_map.write().await; + + if let Some((_, sender)) = task_map.remove_entry(&(view_number)) { + // Send task cancel message to old task + + // If task already exited we expect an error + let _res = sender + .send(ConsensusIntentEvent::CancelPollForVIDVotes(view_number)) + .await; + } + } + ConsensusIntentEvent::PollForViewSyncCertificate(view_number) => { let mut task_map = self.inner.view_sync_cert_task_map.write().await; if let Entry::Vacant(e) = task_map.entry(view_number) { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 5139519813..94209fcf39 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1251,11 +1251,9 @@ where let view = cert.view_number; self.vid_certs.insert(view, cert); - // TODO Make sure we aren't voting for an arbitrarily old round for no reason - if self.vote_if_able().await { - self.current_proposal = None; - } + // RM TODO: VOTING } + HotShotEvent::ViewChange(new_view) => { debug!("View Change event for view {}", *new_view); @@ -1541,7 +1539,6 @@ pub fn consensus_event_filter>( | HotShotEvent::QuorumVoteRecv(_) | HotShotEvent::QCFormed(_) | HotShotEvent::DACRecv(_) - | HotShotEvent::VidCertRecv(_) | HotShotEvent::ViewChange(_) | HotShotEvent::SendDABlockData(_) | HotShotEvent::Timeout(_) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index fc7a8bd55c..0b77c3ca54 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -292,11 +292,7 @@ where } } HotShotEvent::DAVoteRecv(vote) => { - // warn!( - // "DA vote recv, Main Task {:?}, key: {:?}", - // vote.current_view, - // self.committee_exchange.public_key() - // ); + debug!("DA vote recv, Main Task {:?}", vote.current_view,); // Check if we are the leader and the vote is from the sender. let view = vote.current_view; if !self.committee_exchange.is_leader(view) { diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 45cea93f2d..40040e7613 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -270,6 +270,7 @@ where // TODO (Keyao) Determine and update where to publish VidDisperseSend. // + debug!("publishing VID disperse for view {}", *view + 1); self.event_stream .publish(HotShotEvent::VidDisperseSend( Proposal { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index e0d5d3b90d..f43064b924 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -12,7 +12,8 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::vote::VoteType; +use hotshot_types::traits::network::CommunicationChannel; +use hotshot_types::traits::network::ConsensusIntentEvent; use hotshot_types::{ certificate::VIDCertificate, traits::election::SignedCertificate, vote::VIDVoteAccumulator, }; @@ -141,13 +142,16 @@ where { match event { HotShotEvent::VidVoteRecv(vote) => { - // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 + debug!("VID vote recv, collection task {:?}", vote.current_view); + // panic!("Vote handle received VID vote for view {}", *vote.current_view); - debug!("VID vote recv, collection task {:?}", vote.get_view()); - // panic!("Vote handle received DA vote for view {}", *vote.current_view); + // For the case where we receive votes after we've made a certificate + if state.accumulator.is_right() { + debug!("VID accumulator finished view: {:?}", state.cur_view); + return (None, state); + } let accumulator = state.accumulator.left().unwrap(); - match state .vid_exchange .accumulate_vote(accumulator, &vote, &vote.block_commitment) @@ -167,13 +171,19 @@ where .await; state.accumulator = Right(vid_cert.clone()); + state + .vid_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDVotes( + *vid_cert.view_number, + )) + .await; // Return completed at this point return (Some(HotShotTaskCompleted::ShutDown), state); } } } - HotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), _ => { error!("unexpected event {:?}", event); } @@ -206,12 +216,10 @@ where ) -> Option { match event { HotShotEvent::VidVoteRecv(vote) => { - // TODO copy-pasted from DAVoteRecv https://github.com/EspressoSystems/HotShot/issues/1690 - // warn!( // "VID vote recv, Main Task {:?}, key: {:?}", // vote.current_view, - // self.vid_exchange.public_key() + // self.committee_exchange.public_key() // ); // Check if we are the leader and the vote is from the sender. let view = vote.current_view; @@ -361,6 +369,9 @@ where } } } + HotShotEvent::VidCertRecv(_) => { + // RM TODO + } HotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { return None; @@ -371,6 +382,35 @@ where } self.cur_view = view; + // Start polling for VID disperse for the new view + self.vid_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForVIDDisperse( + *self.cur_view + 1, + )) + .await; + + self.vid_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForVIDCertificate( + *self.cur_view + 1, + )) + .await; + + // If we are not the next leader, we should exit + if !self.vid_exchange.is_leader(self.cur_view + 1) { + // panic!("We are not the DA leader for view {}", *self.cur_view + 1); + return None; + } + + // Start polling for VID votes for the "next view" + self.vid_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::PollForVIDVotes( + *self.cur_view + 1, + )) + .await; + return None; } diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 971bc7f8c0..0672681fdb 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -18,7 +18,7 @@ async fn test_catchup() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { - next_view_timeout: 1000, + next_view_timeout: 2000, ..Default::default() }; let mut metadata = TestMetadata::default(); @@ -79,7 +79,7 @@ async fn test_catchup_web() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { - next_view_timeout: 1000, + next_view_timeout: 2000, ..Default::default() }; let mut metadata = TestMetadata::default(); @@ -136,7 +136,7 @@ async fn test_catchup_one_node() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { - next_view_timeout: 1000, + next_view_timeout: 2000, ..Default::default() }; let mut metadata = TestMetadata::default(); @@ -196,7 +196,7 @@ async fn test_catchup_in_view_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { - next_view_timeout: 1000, + next_view_timeout: 2000, ..Default::default() }; let mut metadata = TestMetadata::default(); diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 3a3464f386..4b52a7c376 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -46,7 +46,7 @@ async fn test_vid_task() { commitment: block_commitment, }; - let signature = vid_exchange.sign_vid_proposal(&block.commit()); + let signature = vid_exchange.sign_vid_disperse(&block.commit()); let proposal: DAProposal = DAProposal { deltas: block.clone(), view_number: ViewNumber::new(2), diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index cff1f06dca..2257e7b795 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -446,7 +446,6 @@ pub trait ConsensusExchange: Send + Sync { &Checked::Unchecked(vote.get_vote_token()), ) { error!("Vote data is {:?}", vote.get_data()); - error!("Invalid vote!"); return Either::Left(accumulator); } @@ -646,14 +645,14 @@ pub trait VIDExchangeType: ConsensusExchange VIDVote; - /// Sign a vote on VID proposal. + /// Sign a vote on VID disperse fn sign_vid_vote( &self, block_commitment: Commitment, ) -> (EncodedPublicKey, EncodedSignature); - /// Sign a VID proposal. - fn sign_vid_proposal( + /// Sign a VID disperse + fn sign_vid_disperse( &self, block_commitment: &Commitment, ) -> EncodedSignature; @@ -702,7 +701,7 @@ impl< block_commitment, current_view, vote_token, - vote_data: VoteData::DA(block_commitment), + vote_data: VoteData::VID(block_commitment), } } @@ -712,13 +711,13 @@ impl< ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, - VoteData::DA(block_commitment).commit().as_ref(), + VoteData::VID(block_commitment).commit().as_ref(), ); (self.public_key.to_bytes(), signature) } /// Sign a VID proposal. - fn sign_vid_proposal( + fn sign_vid_disperse( &self, block_commitment: &Commitment, ) -> EncodedSignature { diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 1729636836..1a037ec89a 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -139,12 +139,18 @@ pub enum NetworkError { pub enum ConsensusIntentEvent { /// Poll for votes for a particular view PollForVotes(u64), + /// Poll for VID votes for a particular view + PollForVIDVotes(u64), /// Poll for a proposal for a particular view PollForProposal(u64), + /// Poll for VID disperse data for a particular view + PollForVIDDisperse(u64), /// Poll for the most recent proposal the webserver has PollForCurrentProposal, /// Poll for a DAC for a particular view PollForDAC(u64), + /// Poll for a VID certificate for a certain view + PollForVIDCertificate(u64), /// Poll for view sync votes starting at a particular view PollForViewSyncVotes(u64), /// Poll for view sync proposals (certificates) for a particular view @@ -155,14 +161,20 @@ pub enum ConsensusIntentEvent { PollFutureLeader(u64, K), /// Cancel polling for votes CancelPollForVotes(u64), + /// Cancel polling for VID votes for a particular view + CancelPollForVIDVotes(u64), /// Cancel polling for view sync votes. CancelPollForViewSyncVotes(u64), /// Cancel polling for proposals. CancelPollForProposal(u64), /// Cancal polling for DAC. CancelPollForDAC(u64), + /// Cancel polling for VID certificate + CancelPollForVIDCertificate(u64), /// Cancel polling for view sync certificate. CancelPollForViewSyncCertificate(u64), + /// Cancel polling for VID disperse data + CancelPollForVIDDisperse(u64), /// Cancel polling for transactions CancelPollForTransactions(u64), } @@ -179,7 +191,13 @@ impl ConsensusIntentEvent { | ConsensusIntentEvent::CancelPollForViewSyncVotes(view_number) | ConsensusIntentEvent::CancelPollForVotes(view_number) | ConsensusIntentEvent::CancelPollForProposal(view_number) + | ConsensusIntentEvent::PollForVIDCertificate(view_number) + | ConsensusIntentEvent::PollForVIDVotes(view_number) + | ConsensusIntentEvent::PollForVIDDisperse(view_number) + | ConsensusIntentEvent::CancelPollForVIDDisperse(view_number) | ConsensusIntentEvent::CancelPollForDAC(view_number) + | ConsensusIntentEvent::CancelPollForVIDCertificate(view_number) + | ConsensusIntentEvent::CancelPollForVIDVotes(view_number) | ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) | ConsensusIntentEvent::PollForViewSyncCertificate(view_number) | ConsensusIntentEvent::PollForTransactions(view_number) diff --git a/web_server/api.toml b/web_server/api.toml index cc610fc9c9..64c843d3a7 100644 --- a/web_server/api.toml +++ b/web_server/api.toml @@ -11,6 +11,14 @@ DOC = """ Return the proposal for a given view number """ +# POST a VID disperse, where the view is passed as an argument +[route.getviddisperse] +PATH = ["vid_disperse/:view_number"] +":view_number" = "Integer" +DOC = """ +Return the VID disperse data for a given view number +""" + # GET the proposal for a view, where the view is passed as an argument [route.getrecentproposal] PATH = ["proposal/"] @@ -27,6 +35,15 @@ DOC = """ Post the proposal for a given view_number """ +# POST a VID disperse, where the view is passed as an argument +[route.postviddisperse] +PATH = ["vid_disperse/:view_number"] +METHOD = "POST" +":view_number" = "Integer" +DOC = """ +Post the VID disperse data for a given view number +""" + # GET the DA certificate for a view, where the view is passed as an argument [route.getcertificate] PATH = ["certificate/:view_number"] @@ -35,6 +52,14 @@ DOC = """ Return the DA certificate for a given view number """ +# GET the VID certificate for a view, where the view is passed as an argument +[route.getvidcertificate] +PATH = ["vid_certificate/:view_number"] +":view_number" = "Integer" +DOC = """ +Return the VID certificate for a given view number +""" + # POST a DA certificate, where the view is passed as an argument [route.postcertificate] PATH = ["certificate/:view_number"] @@ -44,6 +69,15 @@ DOC = """ Post the DA certificate for a given view_number """ +# POST a VID certificate, where the view is passed as an argument +[route.postvidcertificate] +PATH = ["vid_certificate/:view_number"] +METHOD = "POST" +":view_number" = "Integer" +DOC = """ +Post the VID certificate for a given view_number +""" + # GET all the votes from a given index for a given view number [route.getvotes] PATH = ["votes/:view_number/:index"] @@ -54,6 +88,16 @@ DOC = """ Get all votes for a view number """ +# GET all the VID votes from a given index for a given view number +[route.getvidvotes] +PATH = ["vid_votes/:view_number/:index"] +":view_number" = "Integer" +":index" = "Integer" +METHOD = "GET" +DOC = """ +Get all VID votes for a view number +""" + # POST a vote, where the view number is passed as an argument [route.postvote] PATH = ["votes/:view_number"] @@ -63,6 +107,15 @@ DOC = """ Send a vote """ +# POST a VID vote, where the view number is passed as an argument +[route.postvidvote] +PATH = ["vid_votes/:view_number"] +":view_number" = "Integer" +METHOD = "POST" +DOC = """ +Send a VID vote +""" + # GET all transactions starting at :index [route.gettransactions] PATH = ["transactions/:index"] diff --git a/web_server/src/config.rs b/web_server/src/config.rs index f9d0e7c0c7..70d57b726d 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -38,27 +38,27 @@ pub fn post_vote_route(view_number: u64) -> String { } pub fn get_vid_disperse_route(view_number: u64) -> String { - format!("api/vid/disperse/{view_number}") + format!("api/vid_disperse/{view_number}") } pub fn post_vid_disperse_route(view_number: u64) -> String { - format!("api/vid/disperse/{view_number}") + format!("api/vid_disperse/{view_number}") } pub fn get_vid_vote_route(view_number: u64, index: u64) -> String { - format!("api/vid/votes/{view_number}/{index}") + format!("api/vid_votes/{view_number}/{index}") } pub fn post_vid_vote_route(view_number: u64) -> String { - format!("api/vid/votes/{view_number}") + format!("api/vid_votes/{view_number}") } -pub fn get_vid_cert_route(view_number: u64) -> String { - format!("api/vid/cert/{view_number}") +pub fn get_vid_certificate_route(view_number: u64) -> String { + format!("api/vid_certificate/{view_number}") } -pub fn post_vid_cert_route(view_number: u64) -> String { - format!("api/vid/cert/{view_number}") +pub fn post_vid_certificate_route(view_number: u64) -> String { + format!("api/vid_certificate/{view_number}") } pub fn get_transactions_route(index: u64) -> String { diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index c295993c68..b1e267d982 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -53,6 +53,18 @@ struct WebServerState { oldest_view_sync_vote: u64, + vid_disperses: HashMap)>, + oldest_vid_disperse: u64, + recent_vid_disperse: u64, + + vid_votes: HashMap)>>, + oldest_vid_vote: u64, + // recent_vid_vote: u64, + vid_certificates: HashMap)>, + oldest_vid_certificate: u64, + // recent_vid_certificate: u64, + vid_vote_index: HashMap, + /// index -> transaction // TODO ED Make indexable by hash of tx transactions: HashMap>, @@ -88,6 +100,19 @@ impl WebServerState { view_sync_proposals: HashMap::new(), view_sync_votes: HashMap::new(), view_sync_vote_index: HashMap::new(), + + vid_disperses: HashMap::new(), + oldest_vid_disperse: 0, + recent_vid_disperse: 0, + + vid_votes: HashMap::new(), + oldest_vid_vote: 0, + // recent_vid_vote: 0, + vid_certificates: HashMap::new(), + oldest_vid_certificate: 0, + // recent_vid_certificate: 0, + vid_vote_index: HashMap::new(), + oldest_view_sync_vote: 0, oldest_view_sync_proposal: 0, view_sync_proposal_index: HashMap::new(), @@ -135,6 +160,15 @@ pub trait WebServerDataSource { fn post_completed_transaction(&mut self, block: Vec) -> Result<(), Error>; fn post_secret_proposal(&mut self, _view_number: u64, _proposal: Vec) -> Result<(), Error>; fn proposal(&self, view_number: u64) -> Option<(String, Vec)>; + + fn post_vid_disperse(&mut self, view_number: u64, disperse: Vec) -> Result<(), Error>; + fn post_vid_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; + fn post_vid_certificate(&mut self, view_number: u64, certificate: Vec) + -> Result<(), Error>; + + fn get_vid_disperse(&self, view_number: u64) -> Result>>, Error>; + fn get_vid_votes(&self, view_number: u64, index: u64) -> Result>>, Error>; + fn get_vid_certificate(&self, index: u64) -> Result>>, Error>; } impl WebServerDataSource for WebServerState { @@ -161,6 +195,26 @@ impl WebServerDataSource for WebServerState { } } + /// Return the VID disperse data that the server has received for a particular view + fn get_vid_disperse(&self, view_number: u64) -> Result>>, Error> { + match self.vid_disperses.get(&view_number) { + Some(disperse) => { + if disperse.1.is_empty() { + Err(ServerError { + status: StatusCode::NotImplemented, + message: format!("VID disperse not found for view {view_number}"), + }) + } else { + Ok(Some(vec![disperse.1.clone()])) + } + } + None => Err(ServerError { + status: StatusCode::NotImplemented, + message: format!("VID disperse not found for view {view_number}"), + }), + } + } + fn get_recent_proposal(&self) -> Result>>, Error> { self.get_proposal(self.recent_proposal) } @@ -200,6 +254,22 @@ impl WebServerDataSource for WebServerState { } } + /// Return all VID votes the server has received for a particular view from provided index to most recent + fn get_vid_votes(&self, view_number: u64, index: u64) -> Result>>, Error> { + let vid_votes = self.vid_votes.get(&view_number); + let mut ret_votes = vec![]; + if let Some(vid_votes) = vid_votes { + for i in index..*self.vid_vote_index.get(&view_number).unwrap() { + ret_votes.push(vid_votes[i as usize].1.clone()); + } + } + if !ret_votes.is_empty() { + Ok(Some(ret_votes)) + } else { + Ok(None) + } + } + fn get_view_sync_votes( &self, view_number: u64, @@ -280,6 +350,26 @@ impl WebServerDataSource for WebServerState { } } + /// Return the VID certificate the server has received for a particular view + fn get_vid_certificate(&self, index: u64) -> Result>>, Error> { + match self.vid_certificates.get(&index) { + Some(vid_cert) => { + if vid_cert.1.is_empty() { + Err(ServerError { + status: StatusCode::NotImplemented, + message: format!("VID Certificate not found for view {index}"), + }) + } else { + Ok(Some(vec![vid_cert.1.clone()])) + } + } + None => Err(ServerError { + status: StatusCode::NotImplemented, + message: format!("VID certificate not found for view {index}"), + }), + } + } + /// Stores a received vote in the `WebServerState` fn post_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error> { // Only keep vote history for MAX_VIEWS number of views @@ -300,6 +390,26 @@ impl WebServerDataSource for WebServerState { Ok(()) } + /// Stores a received VID vote in the `WebServerState` + fn post_vid_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error> { + // Only keep vote history for MAX_VIEWS number of views + if self.vid_votes.len() >= MAX_VIEWS { + self.vid_votes.remove(&self.oldest_vote); + while !self.vid_votes.contains_key(&self.oldest_vid_vote) { + self.oldest_vid_vote += 1; + } + } + let next_index = self.vid_vote_index.entry(view_number).or_insert(0); + self.vid_votes + .entry(view_number) + .and_modify(|current_votes| current_votes.push((*next_index, vote.clone()))) + .or_insert_with(|| vec![(*next_index, vote)]); + self.vid_vote_index + .entry(view_number) + .and_modify(|index| *index += 1); + Ok(()) + } + fn post_view_sync_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error> { // Only keep vote history for MAX_VIEWS number of views if self.view_sync_votes.len() >= MAX_VIEWS { @@ -343,6 +453,26 @@ impl WebServerDataSource for WebServerState { Ok(()) } + fn post_vid_disperse(&mut self, view_number: u64, mut disperse: Vec) -> Result<(), Error> { + error!("Received VID disperse for view {}", view_number); + if view_number > self.recent_vid_disperse { + self.recent_vid_disperse = view_number; + } + + // Only keep proposal history for MAX_VIEWS number of view + if self.vid_disperses.len() >= MAX_VIEWS { + self.vid_disperses.remove(&self.oldest_vid_disperse); + while !self.vid_disperses.contains_key(&self.oldest_vid_disperse) { + self.oldest_vid_disperse += 1; + } + } + self.vid_disperses + .entry(view_number) + .and_modify(|(_, empty_proposal)| empty_proposal.append(&mut disperse)) + .or_insert_with(|| (String::new(), disperse)); + Ok(()) + } + fn post_view_sync_proposal( &mut self, view_number: u64, @@ -390,6 +520,31 @@ impl WebServerDataSource for WebServerState { .or_insert_with(|| (String::new(), cert)); Ok(()) } + + fn post_vid_certificate( + &mut self, + view_number: u64, + mut certificate: Vec, + ) -> Result<(), Error> { + error!("Received VID Certificate for view {}", view_number); + + // Only keep proposal history for MAX_VIEWS number of view + if self.vid_certificates.len() >= MAX_VIEWS { + self.vid_certificates.remove(&self.oldest_vid_certificate); + while !self + .vid_certificates + .contains_key(&self.oldest_vid_certificate) + { + self.oldest_vid_certificate += 1; + } + } + self.vid_certificates + .entry(view_number) + .and_modify(|(_, empty_cert)| empty_cert.append(&mut certificate)) + .or_insert_with(|| (String::new(), certificate)); + Ok(()) + } + /// Stores a received group of transactions in the `WebServerState` fn post_transaction(&mut self, txn: Vec) -> Result<(), Error> { if self.transactions.len() >= MAX_TXNS { @@ -508,6 +663,13 @@ where } .boxed() })? + .get("getviddisperse", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + state.get_vid_disperse(view_number) + } + .boxed() + })? .get("getrecentproposal", |_req, state| { async move { state.get_recent_proposal() }.boxed() })? @@ -526,6 +688,13 @@ where } .boxed() })? + .get("getvidcertificate", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + state.get_vid_certificate(view_number) + } + .boxed() + })? .get("getvotes", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; @@ -534,6 +703,14 @@ where } .boxed() })? + .get("getvidvotes", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let index: u64 = req.integer_param("index")?; + state.get_vid_votes(view_number, index) + } + .boxed() + })? .get("getviewsyncvotes", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; @@ -558,6 +735,15 @@ where } .boxed() })? + .post("postvidvote", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + // Using body_bytes because we don't want to deserialize; body_auto or body_json deserializes automatically + let vote = req.body_bytes(); + state.post_vid_vote(view_number, vote) + } + .boxed() + })? .post("postviewsyncvote", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; @@ -575,6 +761,14 @@ where } .boxed() })? + .post("postviddisperse", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let disperse = req.body_bytes(); + state.post_vid_disperse(view_number, disperse) + } + .boxed() + })? .post("postviewsyncproposal", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; @@ -591,6 +785,14 @@ where } .boxed() })? + .post("postvidcertificate", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let cert = req.body_bytes(); + state.post_vid_certificate(view_number, cert) + } + .boxed() + })? .post("posttransaction", |req, state| { async move { let txns = req.body_bytes(); From 1d0ca37dd8882ea7ea08cadbb683fb7862b581fe Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 30 Oct 2023 17:52:16 -0400 Subject: [PATCH 0283/1393] first pass at generic vote impl --- types/src/lib.rs | 1 + types/src/vote2.rs | 7 ++----- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/types/src/lib.rs b/types/src/lib.rs index ff296cf28d..620d49a17b 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -18,6 +18,7 @@ pub mod data; pub mod error; pub mod event; pub mod message; +pub mod quorum_vote; pub mod traits; pub mod utils; pub mod vote; diff --git a/types/src/vote2.rs b/types/src/vote2.rs index 3e452c102d..a7ca2c96af 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -20,7 +20,7 @@ use crate::traits::{ }; /// A simple vote that has a signer and commitment to the data voted on. -pub trait Vote2 { +pub trait Vote2: 'static { /// The membership of those that send this vote type type Membership: Membership; /// Type of data commitment this vote uses. @@ -95,10 +95,7 @@ impl< let key = vote.get_signing_key(); let vote_commitment = vote.get_data_commitment(); - if !key.validate( - &vote.get_signature(), - &bincode_opts().serialize(&vote_commitment).unwrap(), - ) { + if !key.validate(&vote.get_signature(), vote_commitment.as_ref()) { error!("Vote data is {:?}", vote.get_data_commitment()); error!("Invalid vote! Data"); return Either::Left(self); From 7e28e971d88b872bf01c9eade29da87bca07d539 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 30 Oct 2023 16:02:58 -0700 Subject: [PATCH 0284/1393] Replace txn set with payload in leaf, return a reference for get_block_header. --- hotshot/src/lib.rs | 13 +- hotshot/src/traits/election/vrf.rs | 1024 ------------------ hotshot/src/traits/storage/memory_storage.rs | 2 +- task-impls/src/consensus.rs | 32 +- task-impls/src/da.rs | 9 +- task-impls/src/transactions.rs | 7 +- task-impls/src/vid.rs | 2 +- testing/src/task_helpers.rs | 4 +- testing/tests/consensus_task.rs | 3 +- types/src/consensus.rs | 56 +- types/src/data.rs | 91 +- types/src/traits/storage.rs | 18 +- 12 files changed, 116 insertions(+), 1145 deletions(-) delete mode 100644 hotshot/src/traits/election/vrf.rs diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 7ebc80dafb..3d5c770ea9 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -62,7 +62,7 @@ use hotshot_types::{ use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::{DACertificate, ViewSyncCertificate}, - consensus::{Consensus, ConsensusMetricsValue, TransactionStore, View, ViewInner, ViewQueue}, + consensus::{BlockPayloadStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, data::{DAProposal, Leaf, LeafType, QuorumProposal}, error::StorageSnafu, message::{ @@ -204,13 +204,10 @@ impl> SystemContext { ); let mut saved_leaves = HashMap::new(); - let mut saved_transaction_commitments = TransactionStore::default(); + let mut saved_block_payloads = BlockPayloadStore::default(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); - if !anchored_leaf.get_transanction_commitments().is_empty() { - saved_transaction_commitments.insert( - anchored_leaf.get_payload_commitment(), - anchored_leaf.get_transanction_commitments(), - ); + if let Some(payload) = anchored_leaf.get_block_payload() { + saved_block_payloads.insert(payload); } let start_view = anchored_leaf.get_view_number(); @@ -220,7 +217,7 @@ impl> SystemContext { cur_view: start_view, last_decided_view: anchored_leaf.get_view_number(), saved_leaves, - saved_transaction_commitments, + saved_block_payloads, // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 locked_view: anchored_leaf.get_view_number(), diff --git a/hotshot/src/traits/election/vrf.rs b/hotshot/src/traits/election/vrf.rs deleted file mode 100644 index f99dabd7e8..0000000000 --- a/hotshot/src/traits/election/vrf.rs +++ /dev/null @@ -1,1024 +0,0 @@ -use hotshot_types::traits::signature_key::EncodedPublicKey; - -#[allow(deprecated)] -use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, fmt::Debug, marker::PhantomData, num::NonZeroU64}; - -// TODO wrong palce for this -/// the sortition committee size parameter -pub const SORTITION_PARAMETER: u64 = 100; - -// TODO compatibility this function's impl into a trait -// TODO do we necessariy want the units of stake to be a u64? or generics -/// The stake table for VRFs -#[derive(Serialize, Deserialize, Debug)] -pub struct VRFStakeTable { - /// the mapping of id -> stake - mapping: BTreeMap, - /// total stake present - total_stake: NonZeroU64, - /// PhantomData - _pd: PhantomData<(VRF, VRFHASHER, VRFPARAMS)>, -} - -impl Clone for VRFStakeTable { - fn clone(&self) -> Self { - Self { - mapping: self.mapping.clone(), - total_stake: self.total_stake, - _pd: PhantomData, - } - } -} - -// impl VRFStakeTable { -// /// get total stake -// #[must_use] -// pub fn get_all_stake(&self) -> NonZeroU64 { -// self.total_stake -// } -// } - -// impl VRFStakeTable -// where -// VRF: Vrf, -// VRFPARAMS: Bls12Parameters, -// ::G1Parameters: TEHashToGroup, -// VRF::PublicKey: Clone, -// { -// /// get total stake -// /// # Panics -// /// If converting non-zero stake into `NonZeroU64` fails -// pub fn get_stake(&self, pk: &JfPubKey) -> Option -// where -// SIGSCHEME: SignatureScheme< -// VerificationKey = VRF::PublicKey, -// PublicParameter = (), -// MessageUnit = u8, -// >, -// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// { -// let encoded = pk.to_bytes(); -// let stake = self.mapping.get(&encoded).map(|val| val.get()); -// stake.and_then(NonZeroU64::new) -// } -// } - -// /// the vrf implementation -// #[derive(Derivative)] -// #[derivative(Debug, Eq, PartialEq)] -// pub struct VrfImpl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> -// where -// VRF: Vrf + Sync + Send, -// TYPES: NodeType, -// { -// /// the stake table -// #[derivative(Debug = "ignore", PartialEq = "ignore")] -// stake_table: VRFStakeTable, -// /// the proof params -// #[derivative(Debug = "ignore", PartialEq = "ignore")] -// proof_parameters: VRF::PublicParameter, -// /// the rng -// #[derivative(PartialEq = "ignore")] -// prng: std::sync::Arc>, -// /// the committee parameter -// sortition_parameter: NonZeroU64, -// /// the chain commitment seed -// chain_seed: [u8; 32], -// /// pdf cache -// #[derivative(PartialEq = "ignore")] -// _sortition_cache: std::sync::Arc>>>, - -// /// phantom data -// _pd: PhantomData<(TYPES, LEAF, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS)>, -// } - -// impl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> Clone -// for VrfImpl -// where -// VRF: Vrf + Sync + Send, -// TYPES: NodeType, -// { -// fn clone(&self) -> Self { -// Self { -// stake_table: self.stake_table.clone(), -// proof_parameters: (), -// prng: self.prng.clone(), -// sortition_parameter: self.sortition_parameter, -// chain_seed: self.chain_seed, -// _sortition_cache: Arc::default(), -// _pd: PhantomData, -// } -// } -// } - -// /// TODO doc me -// #[derive(Serialize, Deserialize, Clone)] -// pub struct VRFVoteToken { -// /// The public key assocaited with this token -// pub pub_key: PUBKEY, -// /// The list of signatures -// pub proof: PROOF, -// /// The number of signatures that are valid -// /// TODO (ct) this should be the sorition outbput -// pub count: NonZeroU64, -// } - -// impl Hash for VRFVoteToken -// where -// PUBKEY: serde::Serialize, -// PROOF: serde::Serialize, -// { -// fn hash(&self, state: &mut H) { -// bincode_opts().serialize(&self.pub_key).unwrap().hash(state); -// bincode_opts().serialize(&self.proof).unwrap().hash(state); -// self.count.hash(state); -// } -// } - -// impl Debug for VRFVoteToken { -// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -// f.debug_struct("VRFVoteToken") -// .field("pub_key", &std::any::type_name::()) -// .field("proof", &std::any::type_name::()) -// .field("count", &self.count) -// .finish() -// } -// } - -// impl PartialEq for VRFVoteToken -// where -// PUBKEY: serde::Serialize, -// PROOF: serde::Serialize, -// { -// fn eq(&self, other: &Self) -> bool { -// self.count == other.count -// && bincode_opts().serialize(&self.pub_key).unwrap() -// == bincode_opts().serialize(&other.pub_key).unwrap() -// && bincode_opts().serialize(&self.proof).unwrap() -// == bincode_opts().serialize(&other.proof).unwrap() -// } -// } - -// impl VoteToken for VRFVoteToken -// where -// PUBKEY: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static, -// PROOF: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static, -// { -// fn vote_count(&self) -> NonZeroU64 { -// self.count -// } -// } - -// impl Committable for VRFVoteToken -// where -// PUBKEY: serde::Serialize, -// PROOF: serde::Serialize, -// { -// fn commit(&self) -> Commitment { -// RawCommitmentBuilder::new(std::any::type_name::()) -// .u64(self.count.get()) -// .var_size_bytes(bincode_opts().serialize(&self.pub_key).unwrap().as_slice()) -// .var_size_bytes(bincode_opts().serialize(&self.proof).unwrap().as_slice()) -// .finalize() -// } - -// fn tag() -> String { -// tag::VRF_VOTE_TOKEN.to_string() -// } -// } - -// // KEY is VRFPubKey -// impl> -// Membership for VrfImpl -// where -// SIGSCHEME: SignatureScheme + Sync + Send + 'static, -// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// VRF: Vrf< -// PublicParameter = (), -// Input = Vec, -// Output = Vec, -// PublicKey = SIGSCHEME::VerificationKey, -// SecretKey = SIGSCHEME::SigningKey, -// > + Sync -// + Send -// + 'static, -// VRF::Proof: Clone + Sync + Send + Serialize + for<'a> Deserialize<'a>, -// VRF::PublicParameter: Sync + Send, -// VRFHASHER: digest::Digest + Clone + Sync + Send + 'static, -// VRFPARAMS: Sync + Send + Bls12Parameters, -// ::G1Parameters: TEHashToGroup, -// TYPES: NodeType< -// VoteTokenType = VRFVoteToken, -// ElectionConfigType = VRFStakeTableConfig, -// SignatureKey = JfPubKey, -// >, -// { -// // pubkey -> unit of stake -// type StakeTable = VRFStakeTable; - -// // FIXED STAKE -// // just return the state -// fn get_stake_table( -// &self, -// _view_number: TYPES::Time, -// _state: &TYPES::StateType, -// ) -> Self::StakeTable { -// self.stake_table.clone() -// } - -// fn get_leader(&self, view_number: TYPES::Time) -> JfPubKey { -// // TODO fst2 (ct) this is round robin, we should make this dependent on -// // the VRF + some source of randomness - -// // TODO for now do by stake table of how much stake each -// // participant has -// let mapping = &self.stake_table.mapping; -// let index = ((*view_number) as usize) % mapping.len(); -// let encoded = mapping.keys().nth(index).unwrap(); -// SignatureKey::from_bytes(encoded).unwrap() -// } - -// // what this is doing: -// // - -// fn make_vote_token( -// // TODO see if we can make this take &mut self -// // because we're using a mutable prng -// &self, -// view_number: TYPES::Time, -// private_key: &(SIGSCHEME::SigningKey, SIGSCHEME::VerificationKey), -// ) -> Result, ElectionError> { -// let pub_key = JfPubKey::::from_native(private_key.1.clone()); -// let Some(replicas_stake) = self.stake_table.get_stake(&pub_key) else { return Ok(None) }; - -// let view_seed = generate_view_seed::(view_number, &self.chain_seed); - -// let proof = Self::internal_get_vrf_proof( -// &private_key.0, -// &self.proof_parameters, -// &mut self.prng.lock().unwrap(), -// &view_seed, -// )?; - -// let selected_stake = Self::internal_get_sortition_for_proof( -// &self.proof_parameters, -// &proof, -// self.stake_table.get_all_stake(), -// replicas_stake, -// self.sortition_parameter, -// ); - -// match selected_stake { -// Some(count) => { -// // TODO (ct) this can fail, return Result::Err -// let proof = VRF::prove( -// &self.proof_parameters, -// &private_key.0, -// &view_seed, -// &mut *self.prng.lock().unwrap(), -// ) -// .unwrap(); - -// Ok(Some(VRFVoteToken { -// pub_key: private_key.1.clone(), -// proof, -// count, -// })) -// } -// None => Ok(None), -// } -// } - -// fn validate_vote_token( -// &self, -// view_number: TYPES::Time, -// pub_key: JfPubKey, -// token: Checked, -// ) -> Result, ElectionError> { -// match token { -// Checked::Unchecked(token) => { -// let stake: Option = self.stake_table.get_stake(&pub_key); -// let view_seed = -// generate_view_seed::(view_number, &self.chain_seed); -// if let Some(stake) = stake { -// Self::internal_check_sortition( -// &pub_key.pk, -// &self.proof_parameters, -// &token.proof, -// self.stake_table.get_all_stake(), -// stake, -// self.sortition_parameter, -// token.count, -// &view_seed, -// ) -// .map(|c| match c { -// Checked::Inval(_) => Checked::Inval(token), -// Checked::Valid(_) => Checked::Valid(token), -// Checked::Unchecked(_) => Checked::Unchecked(token), -// }) -// } else { -// // TODO better error -// Err(ElectionError::StubError) -// } -// } -// already_checked => Ok(already_checked), -// } -// } - -// fn create_election(keys: Vec>, config: TYPES::ElectionConfigType) -> Self { -// // This all needs to be refactored. For one thing, having the stake table - even an initial -// // stake table - hardcoded like this is flat-out broken. This is, obviously, an artifact -// let genesis_seed = [0u8; 32]; -// VrfImpl::with_initial_stake(keys, &config, genesis_seed) -// } - -// fn default_election_config(num_nodes: u64) -> TYPES::ElectionConfigType { -// let mut stake = Vec::new(); -// let units_of_stake_per_node = NonZeroU64::new(100).unwrap(); -// for _ in 0..num_nodes { -// stake.push(units_of_stake_per_node); -// } -// VRFStakeTableConfig { -// sortition_parameter: NonZeroU64::new(SORTITION_PARAMETER).unwrap(), -// distribution: stake, -// } -// } - -// fn success_threshold(&self) -> NonZeroU64 { -// NonZeroU64::new(((u64::from(self.sortition_parameter) * 2) / 3) + 1).unwrap() -// } - -// fn failure_threshold(&self) -> NonZeroU64 { -// NonZeroU64::new(((u64::from(self.sortition_parameter)) / 3) + 1).unwrap() -// } -// /// TODO if we ever come back to using this, we'll need to change this -// /// this stub is incorrect as it stands right now -// fn get_committee( -// &self, -// _view_number: ::Time, -// ) -> std::collections::BTreeSet<::SignatureKey> { -// self.stake_table -// .mapping -// .keys() -// .clone() -// .filter_map(::SignatureKey::from_bytes) -// .collect() -// } -// } - -// /// checks that the expected aomunt of stake matches the VRF output -// /// TODO this can be optimized most likely -// fn check_bin_idx( -// expected_amount_of_stake: u64, -// replicas_stake: u64, -// total_stake: u64, -// sortition_parameter: u64, -// unnormalized_seed: &[u8; 32], -// cache: &mut HashMap>, -// ) -> Option { -// let bin_idx = find_bin_idx( -// replicas_stake, -// total_stake, -// sortition_parameter, -// unnormalized_seed, -// cache, -// ); -// bin_idx.map(|idx| idx == NonZeroU64::new(expected_amount_of_stake).unwrap()) -// } - -// /// generates the seed from algorand paper -// /// baseed on `view_number` and a constant as of now, but in the future will be other things -// /// this is a stop-gap -// fn generate_view_seed( -// view_number: TYPES::Time, -// vrf_seed: &[u8; 32], -// ) -> [u8; 32] { -// let mut hasher = HASHER::new(); -// hasher.update(vrf_seed); -// hasher.update(view_number.deref().to_le_bytes()); -// let mut output = [0u8; 32]; -// output.copy_from_slice(hasher.finalize().as_ref()); -// output -// } - -// /// represents a binomial query made by sortition -// /// `B(stake_attempt; replicas_stake; sortition_parameter / total_stake)` -// #[derive(Hash, Eq, PartialEq, Clone, Debug)] -// pub struct BinomialQuery { -// /// the number of heads -// stake_attempt: u64, -// /// the total number of coin flips -// replicas_stake: u64, -// /// the total amount of stake -// total_stake: u64, -// /// the sortition parameter -// sortition_parameter: u64, -// } - -// impl BinomialQuery { -// /// get the committee parameter -// /// for this query -// #[must_use] -// pub fn get_p(&self) -> Ratio { -// let sortition_parameter_big: BigUint = BigUint::from(self.sortition_parameter); -// let total_stake_big: BigUint = BigUint::from(self.total_stake); -// Ratio::new(sortition_parameter_big, total_stake_big) -// } -// } - -// #[instrument] -// fn calculate_threshold_from_cache( -// previous_calculation: Option<(BinomialQuery, Ratio)>, -// query: BinomialQuery, -// ) -> Option> { -// if let Some((previous_query, previous_result)) = previous_calculation { -// let expected_previous_query = BinomialQuery { -// stake_attempt: query.stake_attempt - 1, -// ..query -// }; -// if previous_query == expected_previous_query { -// let permutation = Ratio::new( -// BigUint::from(query.replicas_stake - query.stake_attempt + 1), -// BigUint::from(query.stake_attempt), -// ); -// let p = query.get_p(); -// assert!(p.numer() < p.denom()); -// let reciprocal = Ratio::recip(&(Ratio::from_integer(BigUint::from(1_u32)) - p.clone())); -// let result = previous_result * p * reciprocal * permutation; -// assert!(result.numer() < result.denom()); - -// return Some(result); -// } -// } -// calculate_threshold(query) -// } - -// // Calculates B(j; w; p) where B means bernoulli distribution. -// // That is: run w trials, with p probability of success for each trial, and return the probability -// // of j successes. -// // p = tau / W, where tau is the sortition parameter (controlling committee size) -// // this is the only usage of W and tau -// // -// // Translation: -// // stake_attempt: our guess at what the stake might be. This is j -// // replicas_stake: the units of stake owned by the replica. This is w -// // total_stake: the units of stake owned in total. This is W -// // sorition_parameter: the parameter controlling the committee size. This is tau -// // -// // TODO (ct) better error handling -// // returns none if one of our calculations fails -// // -// // TODO keep data around from last iteration so less calculation is needed -// // TODO test this "correct/simple" implementation against any optimized version -// #[instrument] -// // fn calculate_threshold(stake_attempt: u32, replicas_stake: u64, total_stake: u64, sortition_parameter: u64) -> Option> { -// fn calculate_threshold(query: BinomialQuery) -> Option> { -// let stake_attempt = query.stake_attempt; -// tracing::info!("Running calculate threshold"); -// // TODO (ct) better error handling -// if stake_attempt > query.replicas_stake { -// error!("j is larger than amount of stake we are allowed"); -// return None; -// } - -// let sortition_parameter_big: BigUint = BigUint::from(query.sortition_parameter); -// let total_stake_big: BigUint = BigUint::from(query.total_stake); -// let one_big = BigUint::from(1_u32); - -// // this is the p parameter for the bernoulli distribution -// let p = Ratio::new(sortition_parameter_big, total_stake_big); - -// assert!(p.numer() <= p.denom()); - -// info!("p is {p:?}"); - -// // number of tails in bernoulli -// let failed_num = query.replicas_stake - stake_attempt; - -// // TODO cancel things out (avoid calculating factorial) -// // TODO can just do division -// let num_permutations = Ratio::new( -// factorial(query.replicas_stake), -// factorial(stake_attempt) * factorial(failed_num), -// ); - -// info!("num permutations is {num_permutations:?}, failed_num is {failed_num:?}"); - -// let one = Ratio::from_integer(one_big); - -// // TODO can keep results from last try -// let result = num_permutations -// * (p.pow(i32::try_from(stake_attempt).ok()?) -// * (one - p).pow(i32::try_from(failed_num).ok()?)); - -// assert!(result.numer() < result.denom()); - -// info!("result is is {result:?}"); - -// Some(result) -// } - -// /// compute i! as a biguint -// fn factorial(mut i: u64) -> BigUint { -// if i == 0 { -// return BigUint::from(1u32); -// } - -// let mut result = BigUint::from(1u32); -// while i > 0 { -// result *= i; -// i -= 1; -// } -// result -// } - -// /// find the amount of stake we rolled. -// /// NOTE: in the future this requires a view numb -// /// Returns None if zero stake was rolled -// #[instrument] -// fn find_bin_idx( -// replicas_stake: u64, -// total_stake: u64, -// sortition_parameter: u64, -// unnormalized_seed: &[u8; 32], -// cache: &mut HashMap>, -// ) -> Option { -// let unnormalized_seed = BigUint::from_bytes_le(unnormalized_seed); -// let normalized_seed = Ratio::new(unnormalized_seed, BigUint::from(2_u32).pow(256)); -// assert!(normalized_seed.numer() < normalized_seed.denom()); -// let mut j: u64 = 0; - -// // [j, j+1) -// // [cdf(j),cdf(j+1)) - -// // left_threshold corresponds to the sum of all bernoulli distributions -// // from i in 0 to j: B(i; replicas_stake; p). Where p is calculated later and corresponds to -// // algorands paper -// let mut left_threshold = Ratio::from_integer(BigUint::from(0u32)); - -// loop { -// // check cache - -// // if cache miss, feed in with previous val from cache -// // that *probably* exists - -// assert!(left_threshold.numer() < left_threshold.denom()); -// let query = BinomialQuery { -// stake_attempt: j + 1, -// replicas_stake, -// total_stake, -// sortition_parameter, -// }; - -// let bin_val = { -// // we already computed this value -// if let Some(result) = cache.get(&query) { -// result.clone() -// } else { -// // we haven't computed this value, but maybe -// // we already computed the previous value - -// let mut maybe_old_query = query.clone(); -// maybe_old_query.stake_attempt -= 1; -// let old_result = cache -// .get(&maybe_old_query) -// .map(|x| (maybe_old_query, x.clone())); -// let result = calculate_threshold_from_cache(old_result, query.clone())?; -// cache.insert(query, result.clone()); -// result -// } -// }; - -// // corresponds to right range from apper -// let right_threshold = left_threshold + bin_val.clone(); - -// // debugging info. Unnecessary -// { -// let right_threshold_float = ToPrimitive::to_f64(&right_threshold.clone()); -// let bin_val_float = ToPrimitive::to_f64(&bin_val.clone()); -// let normalized_seed_float = ToPrimitive::to_f64(&normalized_seed.clone()); -// info!("rightthreshold: {right_threshold_float:?}, bin: {bin_val_float:?}, seed: {normalized_seed_float:?}"); -// } - -// // from i in 0 to j + 1: B(i; replicas_stake; p) -// if normalized_seed < right_threshold { -// match j { -// 0 => return None, -// _ => return Some(NonZeroU64::new(j).unwrap()), -// } -// } -// left_threshold = right_threshold; -// j += 1; -// } -// } - -// impl, SIGSCHEME, VRF, VRFHASHER, VRFPARAMS> -// VrfImpl -// where -// SIGSCHEME: SignatureScheme + Sync + Send, -// SIGSCHEME::VerificationKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::SigningKey: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// SIGSCHEME::Signature: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, -// VRF: Vrf< -// PublicParameter = (), -// Input = [u8; 32], -// Output = [u8; 32], -// PublicKey = SIGSCHEME::VerificationKey, -// SecretKey = SIGSCHEME::SigningKey, -// > + Sync -// + Send, -// VRF::Proof: Clone + Sync + Send + Serialize + for<'a> Deserialize<'a>, -// VRF::PublicParameter: Sync + Send, -// VRFHASHER: digest::Digest + Clone + Sync + Send, -// VRFPARAMS: Sync + Send + Bls12Parameters, -// ::G1Parameters: TEHashToGroup, -// TYPES: NodeType, -// { -// /// create stake table with this initial stake -// /// # Panics -// /// TODO -// #[must_use] -// pub fn with_initial_stake( -// known_nodes: Vec>, -// config: &VRFStakeTableConfig, -// genesis_seed: [u8; 32], -// ) -> Self { -// assert_eq!(known_nodes.iter().len(), config.distribution.len()); -// let key_with_stake = known_nodes -// .into_iter() -// .map(|x| x.to_bytes()) -// .zip(config.distribution.clone()) -// .collect(); -// VrfImpl { -// stake_table: { -// let st = VRFStakeTable { -// mapping: key_with_stake, -// total_stake: NonZeroU64::new(config.distribution.iter().map(|x| x.get()).sum()) -// .unwrap(), -// _pd: PhantomData, -// }; -// st -// }, -// proof_parameters: (), -// chain_seed: genesis_seed, -// prng: Arc::new(Mutex::new(ChaChaRng::from_seed(Default::default()))), -// _pd: PhantomData, -// sortition_parameter: config.sortition_parameter, -// _sortition_cache: Arc::default(), -// } -// } - -// /// stateless delegate for VRF proof generation -// /// # Errors -// /// - -// fn internal_get_vrf_proof( -// private_key: &SIGSCHEME::SigningKey, -// proof_param: &VRF::PublicParameter, -// to_refactor: &mut rand_chacha::ChaChaRng, -// vrf_in_seed: &VRF::Input, -// ) -> Result { -// VRF::prove(proof_param, private_key, vrf_in_seed, to_refactor) -// .map_err(|_| ElectionError::StubError) -// } - -// /// stateless delegate for VRF sortition generation -// fn internal_get_sortition_for_proof( -// proof_param: &VRF::PublicParameter, -// proof: &VRF::Proof, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// ) -> Option { -// // TODO (ct) this can fail, return result::err -// let hash = VRF::evaluate(proof_param, proof).unwrap(); -// let mut cache: HashMap> = HashMap::new(); - -// find_bin_idx( -// u64::from(voter_stake), -// u64::from(total_stake), -// sortition_parameter.into(), -// &hash, -// &mut cache, -// ) -// } - -// /// stateless delegate for VRF sortition confirmation -// /// # Errors -// /// if the proof is malformed -// #[allow(clippy::too_many_arguments)] -// fn internal_check_sortition( -// public_key: &SIGSCHEME::VerificationKey, -// proof_param: &VRF::PublicParameter, -// proof: &VRF::Proof, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// sortition_claim: NonZeroU64, -// vrf_in_seed: &VRF::Input, -// ) -> Result, hotshot_types::traits::election::ElectionError> { -// if let Ok(true) = VRF::verify(proof_param, proof, public_key, vrf_in_seed) { -// let seed = VRF::evaluate(proof_param, proof).map_err(|_| ElectionError::StubError)?; -// if let Some(res) = check_bin_idx( -// u64::from(sortition_claim), -// u64::from(voter_stake), -// u64::from(total_stake), -// u64::from(sortition_parameter), -// &seed, -// &mut HashMap::new(), -// ) { -// if res { -// Ok(Checked::Valid(())) -// } else { -// Ok(Checked::Inval(())) -// } -// } else { -// Ok(Checked::Unchecked(())) -// } -// } else { -// Ok(Checked::Inval(())) -// } -// } - -// /// Stateless method to produce VRF proof and sortition for a given view number -// /// # Errors -// /// -// pub fn get_sortition_proof( -// private_key: &SIGSCHEME::SigningKey, -// proof_param: &VRF::PublicParameter, -// chain_seed: &VRF::Input, -// view_number: TYPES::Time, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// ) -> Result<(VRF::Proof, Option), hotshot_types::traits::election::ElectionError> -// { -// let mut rng = ChaChaRng::from_seed(Default::default()); // maybe use something else that isn't deterministic? -// let view_seed = generate_view_seed::(view_number, chain_seed); -// let proof = Self::internal_get_vrf_proof(private_key, proof_param, &mut rng, &view_seed)?; -// let sortition = Self::internal_get_sortition_for_proof( -// proof_param, -// &proof, -// total_stake, -// voter_stake, -// sortition_parameter, -// ); -// Ok((proof, sortition)) -// } - -// /// Stateless method to verify VRF proof and sortition for a given view number -// /// # Errors -// /// -// #[allow(clippy::too_many_arguments)] -// pub fn check_sortition_proof( -// public_key: &JfPubKey, -// proof_param: &VRF::PublicParameter, -// proof: &VRF::Proof, -// total_stake: NonZeroU64, -// voter_stake: NonZeroU64, -// sortition_parameter: NonZeroU64, -// sortition_claim: NonZeroU64, -// chain_seed: &VRF::Input, -// view_number: TYPES::Time, -// ) -> Result { -// let view_seed = generate_view_seed::(view_number, chain_seed); -// Self::internal_check_sortition( -// &public_key.pk, -// proof_param, -// proof, -// total_stake, -// voter_stake, -// sortition_parameter, -// sortition_claim, -// &view_seed, -// ) -// .map(|c| matches!(c, Checked::Valid(_))) -// } -// } - -// impl> TestableElection -// for VrfImpl -// where -// TYPES: NodeType< -// VoteTokenType = VRFVoteToken< -// BLSVerKey, -// BLSSignature, -// >, -// ElectionConfigType = VRFStakeTableConfig, -// SignatureKey = JfPubKey, -// >, -// { -// fn generate_test_vote_token() -> TYPES::VoteTokenType { -// VRFVoteToken { -// count: NonZeroU64::new(1234).unwrap(), -// proof: BLSSignature::default(), -// pub_key: BLSVerKey::default(), -// } -// } -// } - -// /// configuration specifying the stake table -// #[derive(Clone, Serialize, Deserialize, core::fmt::Debug)] -// pub struct VRFStakeTableConfig { -// /// the committee size parameter -// pub sortition_parameter: NonZeroU64, -// /// the ordered distribution of stake across nodes -// pub distribution: Vec, -// } - -// impl Default for VRFStakeTableConfig { -// fn default() -> Self { -// VRFStakeTableConfig { -// sortition_parameter: NonZeroU64::new(SORTITION_PARAMETER).unwrap(), -// distribution: Vec::new(), -// } -// } -// } - -// impl ElectionConfig for VRFStakeTableConfig {} - -// Tests have been commented out, so `mod tests` isn't used. -// #[cfg(test)] -// mod tests { -// use super::*; -// use ark_bls12_381::Parameters as Param381; -// use ark_std::test_rng; - -// use blake3::Hasher; -// use hotshot_types::{ -// data::ViewNumber, -// traits::{ -// block_contents::dummy::{DummyBlock, DummyTransaction}, -// consensus_type::validating_consensus::ValidatingConsensus, -// state::dummy::DummyState, -// }, -// }; -// use jf_primitives::{ -// signatures::{ -// bls::{BLSSignature, BLSVerKey}, -// BLSSignatureScheme, -// }, -// vrf::blsvrf::BLSVRFScheme, -// }; -// use std::{num::NonZeroUsize, time::Duration}; - -// #[derive( -// Copy, -// Clone, -// Debug, -// Default, -// Hash, -// PartialEq, -// Eq, -// PartialOrd, -// Ord, -// serde::Serialize, -// serde::Deserialize, -// )] -// struct TestTypes; -// impl NodeType for TestTypes { -// // TODO (da) can this be SequencingConsensus? -// type ConsensusType = ValidatingConsensus; -// type Time = ViewNumber; -// type BlockPayload = DummyBlock; -// type SignatureKey = JfPubKey; -// type VoteTokenType = VRFVoteToken< -// BLSVerKey, -// BLSSignature, -// >; -// type Transaction = DummyTransaction; -// type ElectionConfigType = VRFStakeTableConfig; -// type StateType = DummyState; -// } - -// fn gen_vrf_impl>( -// num_nodes: usize, -// ) -> ( -// VrfImpl< -// TestTypes, -// LEAF, -// BLSSignatureScheme, -// BLSVRFScheme, -// Hasher, -// Param381, -// >, -// Vec<( -// jf_primitives::signatures::bls::BLSSignKey, -// jf_primitives::signatures::bls::BLSVerKey, -// )>, -// ) { -// let mut known_nodes = Vec::new(); -// let mut keys = Vec::new(); -// let rng = &mut test_rng(); -// let mut stake_distribution = Vec::new(); -// let stake_per_node = NonZeroU64::new(100).unwrap(); -// let genesis_seed = [0u8; 32]; -// for _i in 0..num_nodes { -// let (sk, pk) = BLSSignatureScheme::::key_gen(&(), rng).unwrap(); -// keys.push((sk.clone(), pk.clone())); -// known_nodes.push(JfPubKey::from_native(pk.clone())); -// stake_distribution.push(stake_per_node); -// } -// let stake_table = VrfImpl::with_initial_stake( -// known_nodes, -// &VRFStakeTableConfig { -// sortition_parameter: std::num::NonZeroU64::new(SORTITION_PARAMETER).unwrap(), -// distribution: stake_distribution, -// }, -// genesis_seed, -// ); -// (stake_table, keys) -// } - -// pub fn check_if_valid(token: &Checked) -> bool { -// match token { -// Checked::Valid(_) => true, -// Checked::Inval(_) | Checked::Unchecked(_) => false, -// } -// } - -// // #[test] -// // pub fn test_sortition() { -// // setup_logging(); -// // let (vrf_impl, keys) = gen_vrf_impl::>(10); -// // let views = 100; - -// // for view in 0..views { -// // for (node_idx, (sk, pk)) in keys.iter().enumerate() { -// // let token_result = vrf_impl -// // .make_vote_token(ViewNumber::new(view), &(sk.clone(), pk.clone())) -// // .unwrap(); -// // match token_result { -// // Some(token) => { -// // let count = token.count; -// // let result = vrf_impl -// // .validate_vote_token( -// // ViewNumber::new(view), -// // JfPubKey::from_native(pk.clone()), -// // Checked::Unchecked(token), -// // ) -// // .unwrap(); -// // let result_is_valid = check_if_valid(&result); -// // error!("view {view:?}, node_idx {node_idx:?}, stake {count:?} "); -// // assert!(result_is_valid); -// // } -// // _ => continue, -// // } -// // } -// // } -// // } - -// #[test] -// pub fn test_factorial() { -// assert_eq!(factorial(0), BigUint::from(1u32)); -// assert_eq!(factorial(1), BigUint::from(1u32)); -// assert_eq!(factorial(2), BigUint::from(2u32)); -// assert_eq!(factorial(3), BigUint::from(6u32)); -// assert_eq!(factorial(4), BigUint::from(24u32)); -// assert_eq!(factorial(5), BigUint::from(120u32)); -// } - -// // TODO add failure case - -// #[test] -// fn network_config_is_serializable() { -// // validate that `RunResults` can be serialized -// // Note that there is currently an issue with `VRFPubKey` where it can't be serialized with toml -// // so instead we only test with serde_json -// let key = -// as TestableSignatureKey>::generate_test_key(1); -// let pub_key = JfPubKey::::from_private(&key); -// let mut config = hotshot_centralized_server::NetworkConfig { -// config: hotshot_types::HotShotConfig { -// election_config: Some(super::VRFStakeTableConfig { -// distribution: vec![NonZeroU64::new(1).unwrap()], -// sortition_parameter: NonZeroU64::new(1).unwrap(), -// }), -// known_nodes: vec![pub_key], -// execution_type: hotshot_types::ExecutionType::Incremental, -// total_nodes: NonZeroUsize::new(1).unwrap(), -// min_transactions: 1, -// max_transactions: NonZeroUsize::new(1).unwrap(), -// next_view_timeout: 1, -// timeout_ratio: (1, 1), -// round_start_delay: 1, -// start_delay: 1, -// num_bootstrap: 1, -// propose_min_round_time: Duration::from_secs(1), -// propose_max_round_time: Duration::from_secs(1), -// }, -// ..Default::default() -// }; -// serde_json::to_string(&config).unwrap(); -// assert!(toml::to_string(&config).is_err()); - -// // validate that this is indeed a `pub_key` issue -// config.config.known_nodes.clear(); -// serde_json::to_string(&config).unwrap(); -// toml::to_string(&config).unwrap(); -// } -// } diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 5511e4d4cd..12e4d0ddde 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -173,7 +173,7 @@ mod test { view_number, }, DummyBlock::random(rng), - DummyBlock::random(rng).transaction_commitments(), + Some(DummyBlock::random(rng)), DummyState::random(rng), dummy_leaf_commit, Vec::new(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7d588c7a8d..2b1355f29d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -30,6 +30,7 @@ use hotshot_types::{ node_implementation::{CommitteeEx, NodeImplementation, NodeType, QuorumEx, TimeoutEx}, signature_key::SignatureKey, state::ConsensusTime, + BlockPayload, }, utils::{Terminator, ViewInner}, vote::{QuorumVote, QuorumVoteAccumulator, TimeoutVoteAccumulator, VoteType}, @@ -455,7 +456,7 @@ where justify_qc: proposal.justify_qc.clone(), parent_commitment, block_header: proposal.block_header.clone(), - transaction_commitments: HashSet::new(), + block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), @@ -525,7 +526,7 @@ where justify_qc: proposal.justify_qc.clone(), parent_commitment, block_header: proposal.block_header.clone(), - transaction_commitments: HashSet::new(), + block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), @@ -740,7 +741,7 @@ where justify_qc: justify_qc.clone(), parent_commitment: justify_qc.leaf_commitment(), block_header: proposal.data.block_header, - transaction_commitments: HashSet::new(), + block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender.to_bytes(), @@ -765,7 +766,7 @@ where justify_qc: justify_qc.clone(), parent_commitment, block_header: proposal.data.block_header, - transaction_commitments: HashSet::new(), + block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender.to_bytes(), @@ -853,18 +854,25 @@ where .last_synced_block_height .set(usize::try_from(leaf.get_height()).unwrap_or(0)); - // If the full block is available for this leaf, include it in the leaf - // chain that we send to the client. - if let Some(comm) = consensus - .saved_transaction_commitments + // If the block payload is available for this leaf, include it in + // the leaf chain that we send to the client. + if let Some(payload) = consensus + .saved_block_payloads .get(leaf.get_payload_commitment()) { - leaf.fill_transaction_commitments(comm.clone()); + if let Err(e) = leaf.fill_block_payload(payload.clone()) { + error!( + "Saved block payload and commitment don't match: {:?}", + e + ); + } } leaf_views.push(leaf.clone()); - for txn in leaf.transaction_commitments { - included_txns.insert(txn); + if let Some(payload) = leaf.block_payload { + for txn in payload.transaction_commitments() { + included_txns.insert(txn); + } } } true @@ -1379,7 +1387,7 @@ where justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), block_header: TYPES::BlockHeader::new(*payload_commitment, &parent_header), - transaction_commitments: HashSet::new(), + block_payload: None, rejected: vec![], timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.api.public_key().to_bytes(), diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index f2f8237c2f..6d62b11a77 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -286,11 +286,10 @@ where }, }); - // Record the block we have promised to make available. - consensus.saved_transaction_commitments.insert( - proposal.data.block_payload.commit(), - proposal.data.block_payload.transaction_commitments(), - ); + // Record the block payload we have promised to make available. + consensus + .saved_block_payloads + .insert(proposal.data.block_payload); } } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4b5b356d4a..509a8b45fa 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -22,6 +22,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::{ConsensusExchange, Membership, QuorumExchangeType}, node_implementation::{NodeImplementation, NodeType, QuorumEx}, + BlockPayload, }, }; use hotshot_utils::bincode::bincode_opts; @@ -137,8 +138,10 @@ where let mut included_txn_size = 0; let mut included_txn_count = 0; for leaf in leaf_chain { - for txn in leaf.transaction_commitments { - included_txns.insert(txn); + if let Some(payload) = leaf.block_payload { + for txn in payload.transaction_commitments() { + included_txns.insert(txn); + } } } let consensus = self.consensus.read().await; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 63883ca897..dda95a09af 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -357,7 +357,7 @@ where // Record the block we have promised to make available. // TODO https://github.com/EspressoSystems/HotShot/issues/1692 - // consensus.saved_transaction_commitments.insert(proposal.data.block_payload); + // consensus.saved_block_payloads.insert(proposal.data.block_payload); } } } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 32a3d5f2cd..3aa8b87ffb 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -1,5 +1,3 @@ -use std::collections::HashSet; - use crate::{ node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, @@ -123,7 +121,7 @@ async fn build_quorum_proposal_and_signature( justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), - transaction_commitments: HashSet::new(), + block_payload: None, rejected: vec![], timestamp: 0, proposer_id: api.public_key().to_bytes(), diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 7ddc824c19..12feb914a1 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -22,7 +22,6 @@ use hotshot_types::{ }; use std::collections::HashMap; -use std::collections::HashSet; async fn build_vote( handle: &SystemContextHandle, @@ -65,7 +64,7 @@ async fn build_vote( justify_qc: proposal.justify_qc.clone(), parent_commitment, block_header: proposal.block_header, - transaction_commitments: HashSet::new(), + block_payload: None, rejected: Vec::new(), timestamp: 0, proposer_id: quorum_exchange.get_leader(view).to_bytes(), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index e61685ad69..99a8b00c51 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -20,7 +20,7 @@ use crate::{ use commit::Commitment; use derivative::Derivative; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{hash_map::Entry, BTreeMap, HashMap}, sync::{Arc, Mutex}, }; use tracing::error; @@ -48,11 +48,10 @@ pub struct Consensus> { /// - includes the MOST RECENT decided leaf pub saved_leaves: CommitmentMap, - /// Saved transaction commitments + /// Saved block payloads /// - /// Contains the transaction commitments of the block for every leaf in `saved_leaves` if that - /// commitment set is available. - pub saved_transaction_commitments: TransactionStore, + /// Contains the block payload for every leaf in `saved_leaves` if that payload is available. + pub saved_block_payloads: BlockPayloadStore, /// The `locked_qc` view number pub locked_view: TYPES::Time, @@ -299,7 +298,7 @@ impl> Consensus { } /// garbage collects based on state change - /// right now, this removes from both the `saved_transaction_commitments` + /// right now, this removes from both the `saved_block_payloads` /// and `state_map` fields of `Consensus` /// # Panics /// On inconsistent stored entries @@ -325,14 +324,14 @@ impl> Consensus { .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_payload_commitment()) .for_each(|block| { - self.saved_transaction_commitments.remove(block); + self.saved_block_payloads.remove(block); }); self.state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_leaf_commitment()) .for_each(|leaf| { if let Some(removed) = self.saved_leaves.remove(&leaf) { - self.saved_transaction_commitments + self.saved_block_payloads .remove(removed.get_payload_commitment()); } }); @@ -354,10 +353,7 @@ impl> Consensus { } } -/// Alias for the set of transaction commitments. -type TransactionCommitments = HashSet::Transaction>>; - -/// Mapping from block payload commitments to the set of transaction commitents. +/// Mapping from block payload commitments to the payloads. /// /// Entries in this mapping are reference-counted, so multiple consensus objects can refer to the /// same block, and the block will only be deleted after _all_ such objects are garbage collected. @@ -365,29 +361,23 @@ type TransactionCommitments = HashSet( - HashMap, (TransactionCommitments, u64)>, -); +pub struct BlockPayloadStore(HashMap, (PAYLOAD, u64)>); -impl TransactionStore { +impl BlockPayloadStore { /// Save payload commitment for later retrieval. /// /// After calling this function, and before the corresponding call to [`remove`](Self::remove), - /// `self.get(payload_commitment)` will return `Some(transaction_commitments)`. + /// `self.get(payload_commitment)` will return `Some(payload)`. /// /// This function will increment a reference count on the saved payload commitment, so that /// multiple calls to [`insert`](Self::insert) for the same payload commitment result in /// multiple owning references to the payload commitment. [`remove`](Self::remove) must be /// called once for each reference before the payload commitment will be deallocated. - pub fn insert( - &mut self, - payload_commitment: Commitment, - transaction_commitments: TransactionCommitments, - ) { + pub fn insert(&mut self, payload: PAYLOAD) { self.0 - .entry(payload_commitment) + .entry(payload.commit()) .and_modify(|(_, refcount)| *refcount += 1) - .or_insert((transaction_commitments, 1)); + .or_insert((payload, 1)); } /// Get a saved set of transaction commitments, if available. @@ -396,29 +386,21 @@ impl TransactionStore { /// function will retrieve it. It may return [`None`] if a block with the given commitment has /// not been saved or if the block has been dropped with [`remove`](Self::remove). #[must_use] - pub fn get( - &self, - payload_commitment: Commitment, - ) -> Option<&HashSet::Transaction>>> { - self.0 - .get(&payload_commitment) - .map(|(txn_comm, _)| txn_comm) + pub fn get(&self, payload_commitment: Commitment) -> Option<&PAYLOAD> { + self.0.get(&payload_commitment).map(|(payload, _)| payload) } /// Drop a reference to a saved set of transaction commitments. /// /// If the set exists and this call drops the last reference to it, the set will be returned, /// Otherwise, the return value is [`None`]. - pub fn remove( - &mut self, - payload_commitment: Commitment, - ) -> Option::Transaction>>> { + pub fn remove(&mut self, payload_commitment: Commitment) -> Option { if let Entry::Occupied(mut e) = self.0.entry(payload_commitment) { let (_, refcount) = e.get_mut(); *refcount -= 1; if *refcount == 0 { - let (txn_comm, _) = e.remove(); - return Some(txn_comm); + let (payload, _) = e.remove(); + return Some(payload); } } None diff --git a/types/src/data.rs b/types/src/data.rs index 418fbd64b9..7a00900131 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -28,8 +28,8 @@ use hotshot_utils::bincode::bincode_opts; use jf_primitives::pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; +use snafu::Snafu; use std::{ - collections::HashSet, fmt::{Debug, Display}, hash::Hash, }; @@ -315,6 +315,17 @@ pub trait DeltasType: fn fill(&mut self, block: BlockPayload) -> Result<(), Self::Error>; } +/// Error which occurs when [`LeafType::fill_block_payload`] is called with a payload commitment +/// that does not match the internal payload commitment of the leaf. +#[derive(Clone, Copy, Debug, Snafu)] +#[snafu(display("the block payload {:?} has commitment {} (expected {})", payload, payload.commit(), commitment))] +pub struct InconsistentPayloadCommitmentError { + /// The block payload with the wrong commitment. + payload: PAYLOAD, + /// The expected commitment. + commitment: Commitment, +} + /// An item which is appended to a blockchain. pub trait LeafType: Debug @@ -362,20 +373,22 @@ pub trait LeafType: /// Commitment to this leaf's parent. fn get_parent_commitment(&self) -> Commitment; /// The block header contained in this leaf. - fn get_block_header(&self) -> ::BlockHeader; + fn get_block_header(&self) -> &::BlockHeader; /// A commitment to the block payload contained in this leaf. fn get_payload_commitment(&self) -> Commitment> { self.get_block_header().payload_commitment() } - /// Fill the transaciton commitments of this leaf with the corresponding block payload. - fn fill_transaction_commitments( + /// Fill this leaf with the block payload. + /// + /// # Errors + /// + /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()`. + fn fill_block_payload( &mut self, - transaction_commitments: HashSet::Transaction>>, - ); - /// Optional set of commitments to the transactions. - fn get_transanction_commitments( - &self, - ) -> HashSet::Transaction>>; + block_payload: ::BlockPayload, + ) -> Result<(), InconsistentPayloadCommitmentError<::BlockPayload>>; + /// Optional block payload. + fn get_block_payload(&self) -> Option<::BlockPayload>; /// The blockchain state after appending this leaf. fn get_state(&self) -> Self::MaybeState; /// Transactions rejected or invalidated by the application of this leaf. @@ -473,11 +486,10 @@ pub struct Leaf { /// Block header. pub block_header: TYPES::BlockHeader, - /// Set of commitments to the contained transactions. + /// Optional block payload. /// /// It may be empty for nodes not in the DA committee. - pub transaction_commitments: - HashSet::Transaction>>, + pub block_payload: Option, /// Transactions that were marked for rejection while collecting the block. pub rejected: Vec<::Transaction>, @@ -496,7 +508,6 @@ impl PartialEq for Leaf { && self.justify_qc == other.justify_qc && self.parent_commitment == other.parent_commitment && self.block_header == other.block_header - && self.transaction_commitments == other.transaction_commitments && self.rejected == other.rejected } } @@ -507,13 +518,9 @@ impl Hash for Leaf { self.justify_qc.hash(state); self.parent_commitment.hash(state); self.block_header.hash(state); - for com in &self.transaction_commitments { - com.hash(state); - } self.rejected.hash(state); } } - impl Display for ValidatingLeaf { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( @@ -526,7 +533,6 @@ impl Display for ValidatingLeaf { impl LeafType for ValidatingLeaf { type NodeType = TYPES; - // type DeltasType = TYPES::BlockPayload; type MaybeState = TYPES::StateType; fn new( @@ -564,20 +570,19 @@ impl LeafType for ValidatingLeaf { self.parent_commitment } - fn get_block_header(&self) -> ::BlockHeader { + fn get_block_header(&self) -> &::BlockHeader { unimplemented!("Unimplemented for validating consensus which will be removed."); } - fn fill_transaction_commitments( + fn fill_block_payload( &mut self, - _transaction_commitments: HashSet::Transaction>>, - ) { + _block_payload: ::BlockPayload, + ) -> Result<(), InconsistentPayloadCommitmentError<::BlockPayload>> + { unimplemented!("Unimplemented for validating consensus which will be removed."); } - fn get_transanction_commitments( - &self, - ) -> HashSet::Transaction>> { + fn get_block_payload(&self) -> Option<::BlockPayload> { unimplemented!("Unimplemented for validating consensus which will be removed."); } @@ -650,7 +655,7 @@ impl LeafType for Leaf { justify_qc, parent_commitment: fake_commitment(), block_header: TYPES::BlockHeader::genesis(payload.clone()), - transaction_commitments: payload.transaction_commitments(), + block_payload: Some(payload), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: genesis_proposer_id(), @@ -673,21 +678,27 @@ impl LeafType for Leaf { self.parent_commitment } - fn get_block_header(&self) -> ::BlockHeader { - self.block_header.clone() + fn get_block_header(&self) -> &::BlockHeader { + &self.block_header } - fn fill_transaction_commitments( + fn fill_block_payload( &mut self, - transaction_commitments: HashSet::Transaction>>, - ) { - self.transaction_commitments = transaction_commitments; + block_payload: ::BlockPayload, + ) -> Result<(), InconsistentPayloadCommitmentError<::BlockPayload>> + { + if block_payload.commit() != self.block_header.payload_commitment() { + return Err(InconsistentPayloadCommitmentError { + payload: block_payload, + commitment: self.block_header.payload_commitment(), + }); + } + self.block_payload = Some(block_payload); + Ok(()) } - fn get_transanction_commitments( - &self, - ) -> HashSet::Transaction>> { - self.transaction_commitments.clone() + fn get_block_payload(&self) -> Option<::BlockPayload> { + self.block_payload.clone() } // The Sequencing Leaf doesn't have a state. @@ -711,7 +722,7 @@ impl LeafType for Leaf { justify_qc: stored_view.justify_qc, parent_commitment: stored_view.parent, block_header: stored_view.block_header, - transaction_commitments: stored_view.transaction_commitments, + block_payload: stored_view.block_payload, rejected: stored_view.rejected, timestamp: stored_view.timestamp, proposer_id: stored_view.proposer_id, @@ -886,8 +897,8 @@ where parent: leaf.get_parent_commitment(), justify_qc: leaf.get_justify_qc(), state: leaf.get_state(), - block_header: leaf.get_block_header(), - transaction_commitments: leaf.get_transanction_commitments(), + block_header: leaf.get_block_header().clone(), + block_payload: leaf.get_block_payload(), rejected: leaf.get_rejected(), timestamp: leaf.get_timestamp(), proposer_id: leaf.get_proposer_id(), diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 80df3b9841..24c7e0459f 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -10,7 +10,7 @@ use async_trait::async_trait; use commit::Commitment; use derivative::Derivative; use snafu::Snafu; -use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::collections::{BTreeMap, BTreeSet}; /// Errors that can occur in the storage layer. #[derive(Clone, Debug, Snafu)] #[snafu(visibility(pub))] @@ -135,11 +135,10 @@ pub struct StoredView> { pub state: LEAF::MaybeState, /// Block header. pub block_header: TYPES::BlockHeader, - /// Set of commitments to the contained transactions. + /// Optional block payload. /// /// It may be empty for nodes not in the DA committee. - pub transaction_commitments: - HashSet::Transaction>>, + pub block_payload: Option, /// transactions rejected in this view pub rejected: Vec, /// the timestamp this view was recv-ed in nanonseconds @@ -155,15 +154,14 @@ where TYPES: NodeType, LEAF: LeafType, { - /// Create a new `StoredView` from the given QC, `BlockPayload` and State. + /// Create a new `StoredView` from the given QC, `BlockHeader`, `BlockPayload` and State. /// - /// Note that this will set the `parent` to `LeafHash::default()`, so this will not have a parent. + /// Note that this will set the `parent` to `LeafHash::default()`, so this will not have a + /// parent. pub fn from_qc_block_and_state( qc: QuorumCertificate>, block_header: TYPES::BlockHeader, - transaction_commitments: HashSet< - Commitment<::Transaction>, - >, + block_payload: Option, state: LEAF::MaybeState, parent_commitment: Commitment, rejected: Vec<::Transaction>, @@ -175,7 +173,7 @@ where justify_qc: qc, state, block_header, - transaction_commitments, + block_payload, rejected, timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id, From 44718bf1fceff8c71ef06712c467f959e34c9a37 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 30 Oct 2023 22:28:02 -0700 Subject: [PATCH 0285/1393] add struct ValidatorConfigFile --- orchestrator/src/config.rs | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index e4cca05230..b091e7c5e0 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -200,6 +200,18 @@ pub struct HotShotConfigFile { pub propose_max_round_time: Duration, } +/// Holds configuration for a validator node +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(bound(deserialize = ""))] +pub struct ValidatorConfigFile { + /// The validator's public key and stake value + pub public_key: KEY, + /// The validator's private key, should be in the mempool, not public + pub private_key: KEY::PrivateKey, + /// The validator's stake + pub stake_value: u64, +} + impl From> for HotShotConfig { fn from(val: HotShotConfigFile) -> Self { HotShotConfig { @@ -233,6 +245,23 @@ fn default_padding() -> usize { 100 } +impl From> for ValidatorConfig { + fn from(val: ValidatorConfigFile) -> Self { + ValidatorConfig { + public_key: val.public_key, + private_key: val.private_key, + stake_value: val.stake_value, + } + } +} +impl From> for HotShotConfig { + fn from(value: ValidatorConfigFile) -> Self { + let mut config: HotShotConfig = HotShotConfigFile::default().into(); + config.my_own_validator_config = value.into(); + config + } +} + impl Default for HotShotConfigFile { fn default() -> Self { let gen_known_nodes_with_stake = (0..10) From 2f102f5801b4a0d4a6b46270eb2179f9df548c37 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 31 Oct 2023 16:12:37 -0400 Subject: [PATCH 0286/1393] Adding a simple certificate type --- types/src/lib.rs | 3 +- types/src/simple_certificate.rs | 86 ++++++++++++++++ types/src/simple_vote.rs | 176 ++++++++++++++++++++++++++++++++ types/src/vote2.rs | 68 +++++++----- 4 files changed, 308 insertions(+), 25 deletions(-) create mode 100644 types/src/simple_certificate.rs create mode 100644 types/src/simple_vote.rs diff --git a/types/src/lib.rs b/types/src/lib.rs index 620d49a17b..8021122547 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -18,7 +18,8 @@ pub mod data; pub mod error; pub mod event; pub mod message; -pub mod quorum_vote; +pub mod simple_certificate; +pub mod simple_vote; pub mod traits; pub mod utils; pub mod vote; diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs new file mode 100644 index 0000000000..2046852097 --- /dev/null +++ b/types/src/simple_certificate.rs @@ -0,0 +1,86 @@ +#![allow(dead_code)] +#![allow(clippy::missing_docs_in_private_items)] +#![allow(missing_docs)] + +use std::marker::PhantomData; + +use commit::Commitment; +use ethereum_types::U256; + +use crate::{ + simple_vote::Voteable, + traits::{ + election::Membership, node_implementation::NodeType, signature_key::SignatureKey, + state::ConsensusTime, + }, + vote2::Certificate2, +}; + +/// A certificate which can be created by aggregating many simple votes on the commitment. +pub struct SimpleCertificate> { + /// commitment to previous leaf + pub leaf_commitment: VOTEABLE, + /// commitment of all the voetes this should be signed over + pub vote_commitment: Commitment, + /// Which view this QC relates to + pub view_number: TYPES::Time, + /// assembled signature for certificate aggregation + pub signatures: ::QCType, + /// If this QC is for the genesis block + pub is_genesis: bool, + /// phantom data for `MEMBERSHIP` and `TYPES` + _pd: PhantomData<(TYPES, MEMBERSHIP)>, +} + +impl> + Certificate2 for SimpleCertificate +{ + type Commitment = VOTEABLE; + type Membership = MEMBERSHIP; + + fn create_signed_certificate( + vote_commitment: Commitment, + data: Self::Commitment, + sig: ::QCType, + view: TYPES::Time, + ) -> Self { + SimpleCertificate { + leaf_commitment: data, + vote_commitment, + view_number: view, + signatures: sig, + is_genesis: false, + _pd: PhantomData, + } + } + fn is_valid_cert( + &self, + vote_commitment: Commitment, + membership: &MEMBERSHIP, + ) -> bool { + if vote_commitment != self.vote_commitment { + return false; + } + if self.is_genesis && self.view_number == TYPES::Time::genesis() { + return true; + } + let real_qc_pp = ::get_public_parameter( + membership.get_committee_qc_stake_table(), + U256::from(membership.success_threshold().get()), + ); + ::check( + &real_qc_pp, + vote_commitment.as_ref(), + &self.signatures, + ) + } + fn threshold(membership: &MEMBERSHIP) -> u64 { + membership.success_threshold().into() + } + fn get_data(&self) -> &Self::Commitment { + &self.leaf_commitment + } + fn get_data_commitment(&self) -> Commitment { + self.vote_commitment + } +} diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs new file mode 100644 index 0000000000..410ac2028d --- /dev/null +++ b/types/src/simple_vote.rs @@ -0,0 +1,176 @@ +#![allow(dead_code)] +#![allow(clippy::missing_docs_in_private_items)] +#![allow(missing_docs)] + +use std::marker::PhantomData; + +use commit::{Commitment, Committable}; + +use crate::{ + traits::{ + election::Membership, + node_implementation::NodeType, + signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, + }, + vote2::{HasViewNumber, Vote2}, +}; + +pub struct YesData { + pub leaf_commit: Commitment, +} +pub struct NoData { + pub leaf_commit: Commitment, +} +pub struct DAData { + pub block_commit: Commitment, +} +pub struct TimeoutData { + pub view: TYPES::Time, +} +pub struct VIDData { + pub block_commit: Commitment, +} +pub struct ViewSyncPreCommitData { + /// The relay this vote is intended for + pub relay: EncodedPublicKey, + /// The view number we are trying to sync on + pub round: TYPES::Time, +} +pub struct ViewSyncCommitData { + /// The relay this vote is intended for + pub relay: EncodedPublicKey, + /// The view number we are trying to sync on + pub round: TYPES::Time, +} +pub struct ViewSyncFinalizeData { + /// The relay this vote is intended for + pub relay: EncodedPublicKey, + /// The view number we are trying to sync on + pub round: TYPES::Time, +} + +/// Marker trait for data or commitments that can be voted on. +pub trait Voteable: sealed::Sealed + Committable + Clone {} + +/// Sealed is used to make sure no other files can implement the Voteable trait. +/// All simple voteable types should be implemented here. This prevents us from +/// creating/using improper types when using the vote types. +mod sealed { + use commit::Committable; + + pub trait Sealed {} + + // TODO: Does the implement for things outside this file that are commitable? + impl Sealed for C {} +} + +/// A simple yes vote over some votable type. +pub struct SimpleVote> { + /// The signature share associated with this vote + pub signature: (EncodedPublicKey, EncodedSignature), + /// The leaf commitment being voted on. + pub data: DATA, + /// The view this vote was cast for + pub current_view: TYPES::Time, + /// phantom data for `MEMBERSHIP` + _pd: PhantomData, +} + +impl> HasViewNumber + for SimpleVote +{ + fn get_view_number(&self) -> ::Time { + self.current_view + } +} + +impl> Vote2 + for SimpleVote +{ + type Commitment = DATA; + type Membership = MEMBERSHIP; + + fn get_signing_key(&self) -> ::SignatureKey { + ::from_bytes(&self.signature.0).unwrap() + } + + fn get_signature(&self) -> EncodedSignature { + self.signature.1.clone() + } + + fn get_data(&self) -> &DATA { + &self.data + } + + fn get_data_commitment(&self) -> Commitment { + self.data.commit() + } +} + +impl Committable for YesData { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("Yes Vote") + .var_size_bytes(self.leaf_commit.as_ref()) + .finalize() + } +} +impl Committable for NoData { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("No Vote") + .var_size_bytes(self.leaf_commit.as_ref()) + .finalize() + } +} +impl Committable for DAData { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("DA Vote") + .var_size_bytes(self.block_commit.as_ref()) + .finalize() + } +} +impl Committable for VIDData { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("DA Vote") + .var_size_bytes(self.block_commit.as_ref()) + .finalize() + } +} + +fn view_and_relay_commit( + view: TYPES::Time, + relay: &EncodedPublicKey, + tag: &str, +) -> Commitment { + let builder = commit::RawCommitmentBuilder::new(tag); + builder + .var_size_field("Relay public key", &relay.0) + .u64(*view) + .finalize() +} + +impl Committable for ViewSyncPreCommitData { + fn commit(&self) -> Commitment { + view_and_relay_commit::(self.round, &self.relay, "View Sync Precommit") + } +} + +impl Committable for ViewSyncFinalizeData { + fn commit(&self) -> Commitment { + view_and_relay_commit::(self.round, &self.relay, "View Sync Finalize") + } +} +impl Committable for ViewSyncCommitData { + fn commit(&self) -> Commitment { + view_and_relay_commit::(self.round, &self.relay, "View Sync Commit") + } +} + +// Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file +pub type YesVote = SimpleVote, M>; +pub type NoVote = SimpleVote, M>; +pub type DAVote = SimpleVote, M>; +pub type VIDVote = SimpleVote, M>; +pub type TimeoutVote = SimpleVote, M>; +pub type ViewSyncCommitVote = SimpleVote, M>; +pub type ViewSyncPreCommitVote = SimpleVote, M>; +pub type ViewSyncFinalizeVote = SimpleVote, M>; diff --git a/types/src/vote2.rs b/types/src/vote2.rs index a7ca2c96af..175845a2c1 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -7,29 +7,34 @@ use std::{ use bincode::Options; use bitvec::vec::BitVec; -use commit::CommitmentBounds; +use commit::Commitment; use either::Either; use ethereum_types::U256; use hotshot_utils::bincode::bincode_opts; use tracing::error; -use crate::traits::{ - election::Membership, - node_implementation::NodeType, - signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, +use crate::{ + simple_vote::Voteable, + traits::{ + election::Membership, + node_implementation::NodeType, + signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, + }, }; /// A simple vote that has a signer and commitment to the data voted on. -pub trait Vote2: 'static { +pub trait Vote2: HasViewNumber { /// The membership of those that send this vote type type Membership: Membership; /// Type of data commitment this vote uses. - type Commitment: CommitmentBounds; + type Commitment: Voteable; /// Get the signature of the vote sender fn get_signature(&self) -> EncodedSignature; - /// Gets the Data commitment the vote references - fn get_data_commitment(&self) -> Self::Commitment; + /// Gets the data which was voted on by this vote + fn get_data(&self) -> &Self::Commitment; + /// Gets the Data commitment of the vote + fn get_data_commitment(&self) -> Commitment; /// Gets the public signature key of the votes creator/sender fn get_signing_key(&self) -> TYPES::SignatureKey; @@ -37,33 +42,43 @@ pub trait Vote2: 'static { } /// Any type that is associated with a view -pub trait ViewNumber { +pub trait HasViewNumber { /// Returns the view number the type refers to. fn get_view_number(&self) -> TYPES::Time; } -/// The certificate formed from the collection of signatures a committee. -/// The committee is defined by the `Membership` associated type. -/// The votes all must be over the `Commitment` associated type. +/** +The certificate formed from the collection of signatures a committee. +The committee is defined by the `Membership` associated type. +The votes all must be over the `Commitment` associated type. +*/ pub trait Certificate2 { /// Type that defines membership for voters on the certificate type Membership: Membership; /// The data commitment this certificate certifies. - type Commitment: CommitmentBounds; + type Commitment: Voteable; /// Build a certificate from the data commitment and the quorum of signers fn create_signed_certificate( - data_commitment: Self::Commitment, + vote_commitment: Commitment, + data: Self::Commitment, sig: ::QCType, + view: TYPES::Time, ) -> Self; /// Checks if the cert is valid - fn is_valid_cert(&self) -> bool; + fn is_valid_cert( + &self, + vote_commitment: Commitment, + membership: &Self::Membership, + ) -> bool; /// Returns the amount of stake needed to create this certificate // TODO: Make this a static ratio of the total stake of `Membership` - fn threshold() -> u64; - /// Get the data commitment the certificate is referencing - fn get_data_commitment(&self) -> Self::Commitment; + fn threshold(membership: &Self::Membership) -> u64; + /// Get the commitment which was voted on + fn get_data(&self) -> &Self::Commitment; + /// Get the vote commitment which the votes commit to + fn get_data_commitment(&self) -> Commitment; } /// Accumulates votes until a certificate is formed. This implementation works for all simple vote and certificate pairs @@ -73,7 +88,7 @@ pub struct VoteAccumulator2< CERT: Certificate2, > { /// Map of all signatures accumlated so far - pub vote_outcomes: VoteMap2, + pub vote_outcomes: VoteMap2>, /// A list of valid signatures for certificate aggregation pub sig_lists: Vec<::PureAssembledSignatureType>, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check @@ -85,7 +100,7 @@ pub struct VoteAccumulator2< impl< TYPES: NodeType, VOTE: Vote2, - CERT: Certificate2, + CERT: Certificate2, > VoteAccumulator2 { /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we @@ -144,11 +159,11 @@ impl< (vote.get_signature(), vote.get_data_commitment()), ); - if *total_stake_casted >= CERT::threshold() { + if *total_stake_casted >= CERT::threshold(membership) { // Assemble QC let real_qc_pp = ::get_public_parameter( stake_table.clone(), - U256::from(CERT::threshold()), + U256::from(CERT::threshold(membership)), ); let real_qc_sig = ::assemble( @@ -157,7 +172,12 @@ impl< &self.sig_lists[..], ); - let cert = CERT::create_signed_certificate(vote.get_data_commitment(), real_qc_sig); + let cert = CERT::create_signed_certificate( + vote.get_data_commitment(), + vote.get_data().clone(), + real_qc_sig, + vote.get_view_number(), + ); return Either::Right(cert); } Either::Left(self) From 4b9d4842c669f215e6a4a8062b9688e640b876da Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 31 Oct 2023 14:52:14 -0700 Subject: [PATCH 0287/1393] Remove DummyBlock --- hotshot/src/traits/storage/memory_storage.rs | 26 ++-- types/src/traits/block_contents.rs | 122 ------------------- types/src/traits/state.rs | 22 +++- 3 files changed, 30 insertions(+), 140 deletions(-) diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 12e4d0ddde..7b7b1ccd01 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -117,16 +117,13 @@ mod test { use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; use super::*; + use commit::Committable; use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ + block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::{AssembledSignature, QuorumCertificate}, data::{fake_commitment, genesis_proposer_id, ValidatingLeaf, ViewNumber}, - traits::{ - block_contents::dummy::{DummyBlock, DummyState}, - node_implementation::NodeType, - state::ConsensusTime, - BlockPayload, - }, + traits::{node_implementation::NodeType, state::dummy::DummyState, state::ConsensusTime}, }; use std::{fmt::Debug, hash::Hash}; use tracing::instrument; @@ -148,11 +145,11 @@ mod test { impl NodeType for DummyTypes { type Time = ViewNumber; - type BlockHeader = DummyBlock; - type BlockPayload = DummyBlock; + type BlockHeader = VIDBlockHeader; + type BlockPayload = VIDBlockPayload; type SignatureKey = BLSPubKey; type VoteTokenType = StaticVoteToken; - type Transaction = ::Transaction; + type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = DummyState; } @@ -162,8 +159,11 @@ mod test { rng: &mut dyn rand::RngCore, view_number: ::Time, ) -> StoredView> { - // TODO is it okay to be using genesis here? - let _dummy_block_commit = fake_commitment::(); + let payload = VIDBlockPayload::genesis(); + let header = VIDBlockHeader { + block_number: 0, + payload_commitment: payload.commit(), + }; let dummy_leaf_commit = fake_commitment::>(); StoredView::from_qc_block_and_state( QuorumCertificate { @@ -172,8 +172,8 @@ mod test { signatures: AssembledSignature::Genesis(), view_number, }, - DummyBlock::random(rng), - Some(DummyBlock::random(rng)), + header, + Some(payload), DummyState::random(rng), dummy_leaf_commit, Vec::new(), diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index a61a2613d9..0e753e082c 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -76,125 +76,3 @@ pub trait BlockHeader: /// Get the payload commitment. fn payload_commitment(&self) -> Commitment; } - -/// Dummy implementation of `BlockPayload` for unit tests -pub mod dummy { - use std::fmt::Display; - - use super::{ - BlockHeader, BlockPayload, Commitment, Committable, Debug, Hash, HashSet, Serialize, - }; - use rand::Rng; - use serde::Deserialize; - - pub use crate::traits::state::dummy::DummyState; - use crate::traits::state::TestableBlock; - - // TODO (Keyao) Investigate the use of DummyBlock. - // - /// The dummy block - #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)] - pub struct DummyBlock { - /// Some dummy data - pub nonce: u64, - } - - impl DummyBlock { - /// Generate a random `DummyBlock` - pub fn random(rng: &mut dyn rand::RngCore) -> Self { - Self { nonce: rng.gen() } - } - } - - /// Dummy error - #[derive(Debug)] - pub struct DummyError; - - /// dummy transaction. No functionality - #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash)] - pub enum DummyTransaction { - /// the only variant. Dummy. - Dummy, - } - - impl Committable for DummyTransaction { - fn commit(&self) -> commit::Commitment { - commit::RawCommitmentBuilder::new("Dummy BlockPayload Comm") - .u64_field("Dummy Field", 0) - .finalize() - } - - fn tag() -> String { - "DUMMY_TXN".to_string() - } - } - impl super::Transaction for DummyTransaction {} - - impl std::error::Error for DummyError {} - - impl std::fmt::Display for DummyError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("A bad thing happened") - } - } - - impl Display for DummyBlock { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self:#?}") - } - } - - impl BlockHeader for DummyBlock { - type Payload = Self; - - fn new(_payload_commitment: Commitment, _parent_header: &Self) -> Self { - Self { nonce: 0 } - } - - fn genesis(_payload: Self::Payload) -> Self { - Self { nonce: 0 } - } - - fn block_number(&self) -> u64 { - 0 - } - - fn payload_commitment(&self) -> commit::Commitment { - commit::RawCommitmentBuilder::new("Dummy BlockPayload Comm") - .u64_field("Nonce", self.nonce) - .finalize() - } - } - - impl BlockPayload for DummyBlock { - type Error = DummyError; - - type Transaction = DummyTransaction; - - fn transaction_commitments(&self) -> HashSet> { - HashSet::new() - } - } - - impl TestableBlock for DummyBlock { - fn genesis() -> Self { - Self { nonce: 0 } - } - - fn txn_count(&self) -> u64 { - 1 - } - } - - impl Committable for DummyBlock { - fn commit(&self) -> commit::Commitment { - commit::RawCommitmentBuilder::new("Dummy BlockPayload Comm") - .u64_field("Nonce", self.nonce) - .finalize() - } - - fn tag() -> String { - "DUMMY_BLOCK".to_string() - } - } -} diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 3d6b8f6d29..4899e9976a 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -129,12 +129,24 @@ pub trait TestableBlock: BlockPayload + Debug { pub mod dummy { use super::{tag, Committable, Debug, Hash, Serialize, State, TestableState}; use crate::{ + block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, data::ViewNumber, - traits::block_contents::dummy::{DummyBlock, DummyError, DummyTransaction}, }; use rand::Rng; use serde::Deserialize; + /// Dummy error + #[derive(Debug)] + pub struct DummyError; + + impl std::error::Error for DummyError {} + + impl std::fmt::Display for DummyError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("A bad thing happened") + } + } + /// The dummy state #[derive(Clone, Debug, Default, Hash, PartialEq, Eq, Serialize, Deserialize)] pub struct DummyState { @@ -165,8 +177,8 @@ pub mod dummy { impl State for DummyState { type Error = DummyError; - type BlockHeader = DummyBlock; - type BlockPayload = DummyBlock; + type BlockHeader = VIDBlockHeader; + type BlockPayload = VIDBlockPayload; type Time = ViewNumber; fn validate_block( @@ -201,8 +213,8 @@ pub mod dummy { _state: Option<&Self>, _: &mut dyn rand::RngCore, _: u64, - ) -> DummyTransaction { - DummyTransaction::Dummy + ) -> VIDTransaction { + VIDTransaction(vec![0u8]) } } } From c6fefe20d6bc000279e2c3a80d87a921f507d5cf Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 1 Nov 2023 00:38:19 -0700 Subject: [PATCH 0288/1393] add validator config file --- orchestrator/src/config.rs | 85 +++++++++++++++++++++++++++++++------ testing/src/test_builder.rs | 28 ++++++------ types/src/lib.rs | 2 - 3 files changed, 88 insertions(+), 27 deletions(-) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index b091e7c5e0..f9a08711ac 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -6,7 +6,12 @@ use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, num::NonZeroUsize, time::Duration, + env, + path::PathBuf, }; +use tracing::error; +use toml; +use std::fs; #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { pub bootstrap_nodes: Vec<(SocketAddr, Vec)>, @@ -203,15 +208,66 @@ pub struct HotShotConfigFile { /// Holds configuration for a validator node #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] #[serde(bound(deserialize = ""))] -pub struct ValidatorConfigFile { - /// The validator's public key and stake value - pub public_key: KEY, - /// The validator's private key, should be in the mempool, not public - pub private_key: KEY::PrivateKey, +pub struct ValidatorConfigFile { + /// The validator's seed + pub seed: [u8; 32], + /// The validator's index, which can be treated as another input to the seed + pub node_id: u64, /// The validator's stake pub stake_value: u64, } +impl Default for ValidatorConfigFile { + fn default() -> Self { + Self { + seed: [0u8; 32], + node_id: 0, + stake_value: 1, + } + } +} + +fn get_current_working_dir() -> std::io::Result { + env::current_dir() +} + +impl ValidatorConfigFile { + pub fn from_file() -> Self { + let current_working_dir = match get_current_working_dir() { + Ok(dir) => dir, + Err(e) => { + error!("get_current_working_dir error: {:?}", e); + PathBuf::from("") + } + }; + let filename = current_working_dir.into_os_string().into_string().unwrap() + + "/../../config/ValidatorConfigFile.toml"; + match fs::read_to_string(filename.clone()) { + // If successful return the files text as `contents`. + Ok(contents) => { + let data: ValidatorConfigFile = match toml::from_str(&contents) { + // If successful, return data as `Data` struct. + // `d` is a local variable. + Ok(d) => d, + // Handle the `error` case. + Err(e) => { + // Write `msg` to `stderr`. + error!("Unable to load data from `{}`: {}", filename, e); + ValidatorConfigFile::default() + } + }; + data + } + // Handle the `error` case. + Err(e) => { + // Write `msg` to `stderr`. + error!("Could not read file `{}`: {}", filename, e); + ValidatorConfigFile::default() + } + } + } +} + impl From> for HotShotConfig { fn from(val: HotShotConfigFile) -> Self { HotShotConfig { @@ -245,17 +301,22 @@ fn default_padding() -> usize { 100 } -impl From> for ValidatorConfig { - fn from(val: ValidatorConfigFile) -> Self { +impl From for ValidatorConfig { + fn from(val: ValidatorConfigFile) -> Self { + let validator_config = ValidatorConfig::generated_from_seed_indexed( + val.seed, + val.node_id, + val.stake_value, + ); ValidatorConfig { - public_key: val.public_key, - private_key: val.private_key, - stake_value: val.stake_value, + public_key: validator_config.public_key, + private_key: validator_config.private_key, + stake_value: validator_config.stake_value, } } } -impl From> for HotShotConfig { - fn from(value: ValidatorConfigFile) -> Self { +impl From for HotShotConfig { + fn from(value: ValidatorConfigFile) -> Self { let mut config: HotShotConfig = HotShotConfigFile::default().into(); config.my_own_validator_config = value.into(); config diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index b506403f42..9d3b3889b2 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,4 +1,5 @@ use hotshot::types::SignatureKey; +use hotshot_orchestrator::config::ValidatorConfigFile; use hotshot_types::traits::election::{ConsensusExchange, Membership}; use std::{num::NonZeroUsize, sync::Arc, time::Duration}; @@ -20,7 +21,6 @@ use super::{ overall_safety_task::OverallSafetyPropertiesDescription, txn_task::TxnTaskDescription, }; use hotshot::{HotShotType, SystemContext}; - /// data describing how a round should be timed. #[derive(Clone, Debug, Copy)] pub struct TimingData { @@ -201,20 +201,22 @@ impl TestMetadata { .. } = self.clone(); - // We assign known_nodes' public key and stake value rather than read from config file since it's a test - let known_nodes: Vec<::SignatureKey> = (0..total_nodes) - .map(|id| { - let priv_key = - TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], id as u64).1; - TYPES::SignatureKey::from_private(&priv_key) - }) + // We assign known_nodes' public key and stake value here rather than read from config file since it's a test. + let known_nodes_with_stake = (0..total_nodes) + .map(|node_id| { + let cur_validator_config: ValidatorConfig = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id as u64, 1); + + cur_validator_config.public_key.get_stake_table_entry(cur_validator_config.stake_value) + } ) .collect(); - let known_nodes_with_stake: Vec<::StakeTableEntry> = - (0..total_nodes) - .map(|id| known_nodes[id].get_stake_table_entry(1u64)) - .collect(); - let my_own_validator_config = + // But now to test validator's config, we input the info of my_own_validator from config file when node_id == 0. + let mut my_own_validator_config = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); + if node_id == 0 { + my_own_validator_config = + ValidatorConfig::from(ValidatorConfigFile::from_file()); + } // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); let config = HotShotConfig { // TODO this doesn't exist anymore diff --git a/types/src/lib.rs b/types/src/lib.rs index d75d95389d..4a74e0767a 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -10,9 +10,7 @@ #![allow(clippy::module_name_repetitions)] use std::{num::NonZeroUsize, time::Duration}; - use traits::{election::ElectionConfig, signature_key::SignatureKey}; - pub mod block_impl; pub mod certificate; pub mod consensus; From 43bd6397217850ec97bdc02912506fb1bde69ab3 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 1 Nov 2023 00:45:18 -0700 Subject: [PATCH 0289/1393] fix lint --- orchestrator/src/config.rs | 17 +++++++---------- testing/src/test_builder.rs | 13 +++++++------ 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index f9a08711ac..8783010dda 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -2,16 +2,16 @@ use hotshot_types::{ traits::{election::ElectionConfig, signature_key::SignatureKey}, ExecutionType, HotShotConfig, ValidatorConfig, }; +use std::fs; use std::{ + env, net::{IpAddr, Ipv4Addr, SocketAddr}, num::NonZeroUsize, - time::Duration, - env, path::PathBuf, + time::Duration, }; -use tracing::error; use toml; -use std::fs; +use tracing::error; #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { pub bootstrap_nodes: Vec<(SocketAddr, Vec)>, @@ -241,7 +241,7 @@ impl ValidatorConfigFile { } }; let filename = current_working_dir.into_os_string().into_string().unwrap() - + "/../../config/ValidatorConfigFile.toml"; + + "/../../config/ValidatorConfigFile.toml"; match fs::read_to_string(filename.clone()) { // If successful return the files text as `contents`. Ok(contents) => { @@ -303,11 +303,8 @@ fn default_padding() -> usize { impl From for ValidatorConfig { fn from(val: ValidatorConfigFile) -> Self { - let validator_config = ValidatorConfig::generated_from_seed_indexed( - val.seed, - val.node_id, - val.stake_value, - ); + let validator_config = + ValidatorConfig::generated_from_seed_indexed(val.seed, val.node_id, val.stake_value); ValidatorConfig { public_key: validator_config.public_key, private_key: validator_config.private_key, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 9d3b3889b2..aa25ade1e7 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -204,18 +204,19 @@ impl TestMetadata { // We assign known_nodes' public key and stake value here rather than read from config file since it's a test. let known_nodes_with_stake = (0..total_nodes) .map(|node_id| { - let cur_validator_config: ValidatorConfig = + let cur_validator_config: ValidatorConfig = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id as u64, 1); - - cur_validator_config.public_key.get_stake_table_entry(cur_validator_config.stake_value) - } ) + + cur_validator_config + .public_key + .get_stake_table_entry(cur_validator_config.stake_value) + }) .collect(); // But now to test validator's config, we input the info of my_own_validator from config file when node_id == 0. let mut my_own_validator_config = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); if node_id == 0 { - my_own_validator_config = - ValidatorConfig::from(ValidatorConfigFile::from_file()); + my_own_validator_config = ValidatorConfig::from(ValidatorConfigFile::from_file()); } // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); let config = HotShotConfig { From 9b4149af0d0eb2d09ffdf2611d215e4c442890c7 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 1 Nov 2023 13:44:25 -0400 Subject: [PATCH 0290/1393] integrating votes; partially integrating certs/accumulator --- .../src/traits/election/static_committee.rs | 2 +- hotshot/src/types/handle.rs | 46 +-- task-impls/src/consensus.rs | 318 +++++++++--------- task-impls/src/events.rs | 17 +- types/src/message.rs | 18 +- types/src/simple_certificate.rs | 18 +- types/src/simple_vote.rs | 44 ++- types/src/traits/election.rs | 103 +----- types/src/vote2.rs | 19 +- 9 files changed, 270 insertions(+), 315 deletions(-) diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index ccdcfb722f..8252ac2562 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -17,7 +17,7 @@ use tracing::debug; /// Dummy implementation of [`Membership`] -#[derive(Clone, Debug, Eq, PartialEq)] +#[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct GeneralStaticCommittee, PUBKEY: SignatureKey> { /// All the nodes participating and their stake nodes_with_stake: Vec, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 16ca56d549..88cfcf95dd 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -330,29 +330,29 @@ impl + 'static> SystemContextHandl } /// create a yes message - #[cfg(feature = "hotshot-testing")] - pub fn create_yes_message( - &self, - justify_qc_commitment: Commitment>>, - leaf_commitment: Commitment, - current_view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage - where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Certificate = QuorumCertificate>, - >, - { - let inner = self.hotshot.inner.clone(); - inner.exchanges.quorum_exchange().create_yes_message( - justify_qc_commitment, - leaf_commitment, - current_view, - vote_token, - ) - } + // #[cfg(feature = "hotshot-testing")] + // pub fn create_yes_message( + // &self, + // justify_qc_commitment: Commitment>>, + // leaf_commitment: Commitment, + // current_view: TYPES::Time, + // vote_token: TYPES::VoteTokenType, + // ) -> GeneralConsensusMessage + // where + // QuorumEx: ConsensusExchange< + // TYPES, + // Message, + // Certificate = QuorumCertificate>, + // >, + // { + // let inner = self.hotshot.inner.clone(); + // inner.exchanges.quorum_exchange().create_yes_message( + // justify_qc_commitment, + // leaf_commitment, + // current_view, + // vote_token, + // ) + // } /// Wrapper around `HotShotConsensusApi`'s `send_broadcast_consensus_message` function #[cfg(feature = "hotshot-testing")] diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 5139519813..e614161177 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -21,17 +21,22 @@ use hotshot_types::{ data::{Leaf, LeafType, ProposalType, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, + simple_certificate::QuorumCertificate2, + simple_vote::{YesData, YesVote}, traits::{ consensus_api::ConsensusApi, election::{ConsensusExchange, QuorumExchangeType, SignedCertificate, TimeoutExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{CommitteeEx, NodeImplementation, NodeType, QuorumEx, TimeoutEx}, + node_implementation::{ + CommitteeEx, NodeImplementation, NodeType, QuorumEx, QuorumMembership, TimeoutEx, + }, signature_key::SignatureKey, state::ConsensusTime, BlockPayload, }, utils::{Terminator, ViewInner}, vote::{QuorumVote, QuorumVoteAccumulator, TimeoutVoteAccumulator, VoteType}, + vote2::{HasViewNumber, VoteAccumulator2}, }; use tracing::warn; @@ -167,13 +172,12 @@ pub struct VoteCollectionTaskState< #[allow(clippy::type_complexity)] /// Accumulator for votes pub accumulator: Either< - >> as SignedCertificate< + VoteAccumulator2< TYPES, - TYPES::Time, - TYPES::VoteTokenType, - Commitment>, - >>::VoteAccumulator, - QuorumCertificate>>, + YesVote>, + QuorumCertificate2>, + >, + QuorumCertificate2>, >, /// Accumulator for votes @@ -241,57 +245,49 @@ where >, { match event { - HotShotEvent::QuorumVoteRecv(vote) => match vote.clone() { - QuorumVote::Yes(vote_internal) => { - // For the case where we receive votes after we've made a certificate - if state.accumulator.is_right() { - return (None, state); - } + HotShotEvent::QuorumVoteRecv(vote) => { + // For the case where we receive votes after we've made a certificate + if state.accumulator.is_right() { + return (None, state); + } - if vote_internal.current_view != state.cur_view { - error!( - "Vote view does not match! vote view is {} current view is {}", - *vote_internal.current_view, *state.cur_view - ); + if vote.get_view_number() != state.cur_view { + error!( + "Vote view does not match! vote view is {} current view is {}", + *vote.get_view_number(), + *state.cur_view + ); + return (None, state); + } + + let accumulator = state.accumulator.left().unwrap(); + + match accumulator.accumulate(&vote, state.quorum_exchange.membership()) { + Either::Left(acc) => { + state.accumulator = Either::Left(acc); return (None, state); } + Either::Right(qc) => { + debug!("QCFormed! {:?}", qc.view_number); + state + .event_stream + .publish(HotShotEvent::QCFormed(either::Left(qc.clone()))) + .await; + state.accumulator = Either::Right(qc.clone()); - let accumulator = state.accumulator.left().unwrap(); + // No longer need to poll for votes + state + .quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *qc.view_number, + )) + .await; - match state.quorum_exchange.accumulate_vote( - accumulator, - &vote, - &vote_internal.leaf_commitment, - ) { - Either::Left(acc) => { - state.accumulator = Either::Left(acc); - return (None, state); - } - Either::Right(qc) => { - debug!("QCFormed! {:?}", qc.view_number); - state - .event_stream - .publish(HotShotEvent::QCFormed(either::Left(qc.clone()))) - .await; - state.accumulator = Either::Right(qc.clone()); - - // No longer need to poll for votes - state - .quorum_exchange - .network() - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - *qc.view_number, - )) - .await; - - return (Some(HotShotTaskCompleted::ShutDown), state); - } + return (Some(HotShotTaskCompleted::ShutDown), state); } } - QuorumVote::No(_) => { - error!("The next leader has received an unexpected vote!"); - } - }, + } // TODO: Code below is redundant of code above; can be fixed // during exchange refactor // https://github.com/EspressoSystems/HotShot/issues/1799 @@ -458,19 +454,19 @@ where timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), }; - - let message: GeneralConsensusMessage = - self.quorum_exchange.create_yes_message( - proposal.justify_qc.commit(), - leaf.commit(), + let vote = + YesVote::>::create_signed_vote( + YesData { leaf_commit: leaf.commit() }, view, - vote_token, + &self.quorum_exchange.public_key(), + &self.quorum_exchange.private_key(), ); + let message = GeneralConsensusMessage::::Vote(vote); if let GeneralConsensusMessage::Vote(vote) = message { debug!( "Sending vote to next quorum leader {:?}", - vote.get_view() + 1 + vote.get_view_number() + 1 ); self.event_stream .publish(HotShotEvent::QuorumVoteSend(vote)) @@ -528,30 +524,34 @@ where timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), }; - let message: GeneralConsensusMessage= + // Validate the DAC. - if self - .committee_exchange - .is_valid_cert(cert) - { + let message = if self.committee_exchange.is_valid_cert(cert) { // Validate the block commitment for non-genesis DAC. - if !cert.is_genesis() && cert.leaf_commitment() != proposal.block_commitment { + if !cert.is_genesis() + && cert.leaf_commitment() != proposal.block_commitment + { error!("Block commitment does not equal parent commitment"); return false; } - self.quorum_exchange.create_yes_message( - proposal.justify_qc.commit(), - leaf.commit(), - cert.view_number, - vote_token) + let vote = + YesVote::>::create_signed_vote( + YesData { leaf_commit: leaf.commit() }, + view, + &self.quorum_exchange.public_key(), + &self.quorum_exchange.private_key(), + ); + GeneralConsensusMessage::::Vote(vote) } else { error!("Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", cert, self.cur_view ); return false; - }; if let GeneralConsensusMessage::Vote(vote) = message { - debug!("Sending vote to next quorum leader {:?}", vote.get_view()); + debug!( + "Sending vote to next quorum leader {:?}", + vote.get_view_number() + ); self.event_stream .publish(HotShotEvent::QuorumVoteSend(vote)) .await; @@ -988,115 +988,97 @@ where } } HotShotEvent::QuorumVoteRecv(vote) => { - debug!("Received quroum vote: {:?}", vote.get_view()); + debug!("Received quroum vote: {:?}", vote.get_view_number()); - if !self.quorum_exchange.is_leader(vote.get_view() + 1) { + if !self.quorum_exchange.is_leader(vote.get_view_number() + 1) { error!( "We are not the leader for view {} are we the leader for view + 1? {}", - *vote.get_view() + 1, - self.quorum_exchange.is_leader(vote.get_view() + 2) + *vote.get_view_number() + 1, + self.quorum_exchange.is_leader(vote.get_view_number() + 2) ); return; } - match vote.clone() { - QuorumVote::Yes(vote_internal) => { - let handle_event = HandleEvent(Arc::new(move |event, state| { - async move { vote_handle(state, event).await }.boxed() - })); - let collection_view = if let Some((collection_view, collection_task, _)) = - &self.vote_collector - { - if vote_internal.current_view > *collection_view { - // ED I think we'd want to let that task timeout to avoid a griefing vector - self.registry.shutdown_task(*collection_task).await; - } - *collection_view - } else { - TYPES::Time::new(0) - }; + let handle_event = HandleEvent(Arc::new(move |event, state| { + async move { vote_handle(state, event).await }.boxed() + })); + let collection_view = + if let Some((collection_view, collection_task, _)) = &self.vote_collector { + if vote.get_view_number() > *collection_view { + // ED I think we'd want to let that task timeout to avoid a griefing vector + self.registry.shutdown_task(*collection_task).await; + } + *collection_view + } else { + TYPES::Time::new(0) + }; - // Todo check if we are the leader - let new_accumulator = QuorumVoteAccumulator { - total_vote_outcomes: HashMap::new(), - yes_vote_outcomes: HashMap::new(), - no_vote_outcomes: HashMap::new(), + // Todo check if we are the leader + let new_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), + sig_lists: Vec::new(), + signers: bitvec![0; self.quorum_exchange.total_nodes()], + phantom: PhantomData, + }; - success_threshold: self.quorum_exchange.success_threshold(), - failure_threshold: self.quorum_exchange.failure_threshold(), + let accumulator = + new_accumulator.accumulate(&vote, self.quorum_exchange.membership()); - sig_lists: Vec::new(), - signers: bitvec![0; self.quorum_exchange.total_nodes()], - phantom: PhantomData, - }; + // TODO Create default functions for accumulators + // https://github.com/EspressoSystems/HotShot/issues/1797 + let timeout_accumulator = TimeoutVoteAccumulator { + da_vote_outcomes: HashMap::new(), + success_threshold: self.timeout_exchange.success_threshold(), + sig_lists: Vec::new(), + signers: bitvec![0; self.timeout_exchange.total_nodes()], + phantom: PhantomData, + }; - let accumulator = self.quorum_exchange.accumulate_vote( - new_accumulator, - &vote, - &vote_internal.clone().leaf_commitment, - ); - - // TODO Create default functions for accumulators - // https://github.com/EspressoSystems/HotShot/issues/1797 - let timeout_accumulator = TimeoutVoteAccumulator { - da_vote_outcomes: HashMap::new(), - success_threshold: self.timeout_exchange.success_threshold(), - sig_lists: Vec::new(), - signers: bitvec![0; self.timeout_exchange.total_nodes()], - phantom: PhantomData, - }; + if vote.get_view_number() > collection_view { + let state = VoteCollectionTaskState { + quorum_exchange: self.quorum_exchange.clone(), + timeout_exchange: self.timeout_exchange.clone(), + accumulator, + timeout_accumulator: either::Left(timeout_accumulator), + cur_view: vote.get_view_number(), + event_stream: self.event_stream.clone(), + id: self.id, + }; + let name = "Quorum Vote Collection"; + let filter = FilterEvent(Arc::new(|event| { + matches!( + event, + HotShotEvent::QuorumVoteRecv(_) | HotShotEvent::TimeoutVoteRecv(_) + ) + })); - if vote_internal.current_view > collection_view { - let state = VoteCollectionTaskState { - quorum_exchange: self.quorum_exchange.clone(), - timeout_exchange: self.timeout_exchange.clone(), - accumulator, - timeout_accumulator: either::Left(timeout_accumulator), - cur_view: vote_internal.current_view, - event_stream: self.event_stream.clone(), - id: self.id, - }; - let name = "Quorum Vote Collection"; - let filter = FilterEvent(Arc::new(|event| { - matches!( - event, - HotShotEvent::QuorumVoteRecv(_) - | HotShotEvent::TimeoutVoteRecv(_) - ) - })); - - let builder = - TaskBuilder::>::new(name.to_string()) - .register_event_stream(self.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(state) - .register_event_handler(handle_event); - let id = builder.get_task_id().unwrap(); - let stream_id = builder.get_stream_id().unwrap(); - - self.vote_collector = Some((vote_internal.current_view, id, stream_id)); - - let _task = async_spawn(async move { - VoteCollectionTypes::build(builder).launch().await; - }); - debug!( - "Starting vote handle for view {:?}", - vote_internal.current_view - ); - } else if let Some((_, _, stream_id)) = self.vote_collector { - self.event_stream - .direct_message( - stream_id, - HotShotEvent::QuorumVoteRecv(QuorumVote::Yes(vote_internal)), - ) - .await; - } - } - QuorumVote::No(_) => { - error!("The next leader has received an unexpected vote!"); - } + let builder = + TaskBuilder::>::new(name.to_string()) + .register_event_stream(self.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(state) + .register_event_handler(handle_event); + let id = builder.get_task_id().unwrap(); + let stream_id = builder.get_stream_id().unwrap(); + + self.vote_collector = Some((vote.get_view_number(), id, stream_id)); + + let _task = async_spawn(async move { + VoteCollectionTypes::build(builder).launch().await; + }); + debug!( + "Starting vote handle for view {:?}", + vote.get_view_number() + ); + } else if let Some((_, _, stream_id)) = self.vote_collector { + self.event_stream + .direct_message( + stream_id, + HotShotEvent::QuorumVoteRecv(vote), + ) + .await; } } HotShotEvent::TimeoutVoteRecv(vote) => { diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index ae06b1acda..4b75d2a6ac 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -5,10 +5,12 @@ use hotshot_types::{ certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, data::{DAProposal, VidDisperse}, message::Proposal, + simple_certificate::QuorumCertificate2, + simple_vote::YesVote, traits::node_implementation::{ - NodeImplementation, NodeType, QuorumProposalType, ViewSyncProposalType, + NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, ViewSyncProposalType, }, - vote::{DAVote, QuorumVote, TimeoutVote, VIDVote, ViewSyncVote}, + vote::{DAVote, TimeoutVote, VIDVote, ViewSyncVote}, }; /// All of the possible events that can be passed between Sequecning `HotShot` tasks @@ -19,7 +21,7 @@ pub enum HotShotEvent> { /// A quorum proposal has been received from the network; handled by the consensus task QuorumProposalRecv(Proposal>, TYPES::SignatureKey), /// A quorum vote has been received from the network; handled by the consensus task - QuorumVoteRecv(QuorumVote>), + QuorumVoteRecv(YesVote>), /// A timeout vote recevied from the network; handled by consensus task TimeoutVoteRecv(TimeoutVote), /// Send a timeout vote to the network; emitted by consensus task replicas @@ -33,13 +35,18 @@ pub enum HotShotEvent> { /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal - QuorumVoteSend(QuorumVote>), + QuorumVoteSend(YesVote>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DAProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal DAVoteSend(DAVote), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only - QCFormed(Either>, TimeoutCertificate>), + QCFormed( + Either< + QuorumCertificate2>, + TimeoutCertificate, + >, + ), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DACSend(DACertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks diff --git a/types/src/message.rs b/types/src/message.rs index e85c9c51df..f9d159b9e0 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -3,13 +3,16 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. +use crate::vote2::HasViewNumber; use crate::{ certificate::{DACertificate, VIDCertificate}, data::{DAProposal, ProposalType, VidDisperse}, + simple_vote::YesVote, traits::{ network::{NetworkMsg, ViewMessage}, node_implementation::{ - ExchangesType, NodeImplementation, NodeType, QuorumProposalType, ViewSyncProposalType, + ExchangesType, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, + ViewSyncProposalType, }, signature_key::EncodedSignature, }, @@ -148,7 +151,10 @@ where /// Message with a quorum proposal. Proposal(Proposal>, TYPES::SignatureKey), /// Message with a quorum vote. - Vote(QuorumVote>, TYPES::SignatureKey), + Vote( + YesVote>, + TYPES::SignatureKey, + ), /// Message with a view sync vote. ViewSyncVote(ViewSyncVote), /// Message with a view sync certificate. @@ -303,7 +309,7 @@ impl< } } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(bound(deserialize = "", serialize = ""))] /// Messages related to both validating and sequencing consensus. pub enum GeneralConsensusMessage> @@ -314,7 +320,7 @@ where Proposal(Proposal>), /// Message with a quorum vote. - Vote(QuorumVote>), + Vote(YesVote>), /// Message with a view sync vote. ViewSyncVote(ViewSyncVote), @@ -383,7 +389,7 @@ pub trait SequencingMessageType>: } /// Messages for sequencing consensus. -#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] #[serde(bound(deserialize = "", serialize = ""))] pub struct SequencingMessage< TYPES: NodeType, @@ -409,7 +415,7 @@ impl< // this should match replica upon receipt p.data.get_view_number() } - GeneralConsensusMessage::Vote(vote_message) => vote_message.get_view(), + GeneralConsensusMessage::Vote(vote_message) => vote_message.get_view_number(), GeneralConsensusMessage::InternalTrigger(trigger) => match trigger { InternalTrigger::Timeout(time) => *time, }, diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 2046852097..83c80a911c 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -8,7 +8,7 @@ use commit::Commitment; use ethereum_types::U256; use crate::{ - simple_vote::Voteable, + simple_vote::{Voteable, YesData}, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, @@ -17,10 +17,11 @@ use crate::{ }; /// A certificate which can be created by aggregating many simple votes on the commitment. +#[derive(Eq, Hash, PartialEq, Debug, Clone)] pub struct SimpleCertificate> { - /// commitment to previous leaf + /// commitment to previous leaf which all the votes in this certificate are voting on pub leaf_commitment: VOTEABLE, - /// commitment of all the voetes this should be signed over + /// commitment of all the votes this cert should be signed over pub vote_commitment: Commitment, /// Which view this QC relates to pub view_number: TYPES::Time, @@ -35,12 +36,12 @@ pub struct SimpleCertificate> Certificate2 for SimpleCertificate { - type Commitment = VOTEABLE; + type Voteable = VOTEABLE; type Membership = MEMBERSHIP; fn create_signed_certificate( vote_commitment: Commitment, - data: Self::Commitment, + data: Self::Voteable, sig: ::QCType, view: TYPES::Time, ) -> Self { @@ -77,10 +78,13 @@ impl u64 { membership.success_threshold().into() } - fn get_data(&self) -> &Self::Commitment { + fn get_data(&self) -> &Self::Voteable { &self.leaf_commitment } - fn get_data_commitment(&self) -> Commitment { + fn get_data_commitment(&self) -> Commitment { self.vote_commitment } } + +// Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file +pub type QuorumCertificate2 = SimpleCertificate, M>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 410ac2028d..5a31e6aba5 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -2,9 +2,10 @@ #![allow(clippy::missing_docs_in_private_items)] #![allow(missing_docs)] -use std::marker::PhantomData; +use std::{clone, fmt::Debug, hash::Hash, marker::PhantomData}; use commit::{Commitment, Committable}; +use serde::{Deserialize, Serialize}; use crate::{ traits::{ @@ -14,34 +15,41 @@ use crate::{ }, vote2::{HasViewNumber, Vote2}, }; - +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct YesData { pub leaf_commit: Commitment, } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct NoData { pub leaf_commit: Commitment, } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct DAData { pub block_commit: Commitment, } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct TimeoutData { pub view: TYPES::Time, } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct VIDData { pub block_commit: Commitment, } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct ViewSyncPreCommitData { /// The relay this vote is intended for pub relay: EncodedPublicKey, /// The view number we are trying to sync on pub round: TYPES::Time, } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct ViewSyncCommitData { /// The relay this vote is intended for pub relay: EncodedPublicKey, /// The view number we are trying to sync on pub round: TYPES::Time, } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct ViewSyncFinalizeData { /// The relay this vote is intended for pub relay: EncodedPublicKey, @@ -50,7 +58,10 @@ pub struct ViewSyncFinalizeData { } /// Marker trait for data or commitments that can be voted on. -pub trait Voteable: sealed::Sealed + Committable + Clone {} +pub trait Voteable: + sealed::Sealed + Committable + Clone + Serialize + Debug + PartialEq + Hash + Eq +{ +} /// Sealed is used to make sure no other files can implement the Voteable trait. /// All simple voteable types should be implemented here. This prevents us from @@ -65,6 +76,7 @@ mod sealed { } /// A simple yes vote over some votable type. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct SimpleVote> { /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), @@ -107,6 +119,25 @@ impl> V } } +impl> + SimpleVote +{ + pub fn create_signed_vote( + data: DATA, + view: TYPES::Time, + pub_key: &TYPES::SignatureKey, + private_key: &::PrivateKey, + ) -> Self { + let signature = TYPES::SignatureKey::sign(private_key, data.commit().as_ref()); + Self { + signature: (pub_key.to_bytes(), signature), + data, + current_view: view, + _pd: PhantomData, + } + } +} + impl Committable for YesData { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("Yes Vote") @@ -165,6 +196,13 @@ impl Committable for ViewSyncCommitData { } } +// impl votable for all the data types in this file sealed marker should ensure nothing is accidently +// implemented for structs that aren't "voteable" +impl Voteable + for V +{ +} + // Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file pub type YesVote = SimpleVote, M>; pub type NoVote = SimpleVote, M>; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index cff1f06dca..e790d74a8d 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -206,7 +206,7 @@ where /// A protocol for determining membership in and participating in a committee. pub trait Membership: - Clone + Debug + Eq + PartialEq + Send + Sync + 'static + Clone + Debug + Eq + PartialEq + Send + Sync + Hash + 'static { /// generate a default election configuration fn default_election_config(num_nodes: u64) -> TYPES::ElectionConfigType; @@ -788,16 +788,16 @@ pub trait QuorumExchangeType, { /// Create a message with a positive vote on validating or commitment proposal. // TODO ED This returns just a general message type, it's not even bound to a proposal, and this is just a function on the QC. Make proprosal doesn't really apply to all cert types. - fn create_yes_message>( - &self, - justify_qc_commitment: Commitment, - leaf_commitment: Commitment, - current_view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage - where - >::Certificate: commit::Committable, - I::Exchanges: ExchangesType>; + // fn create_yes_message>( + // &self, + // justify_qc_commitment: Commitment, + // leaf_commitment: Commitment, + // current_view: TYPES::Time, + // vote_token: TYPES::VoteTokenType, + // ) -> GeneralConsensusMessage + // where + // >::Certificate: commit::Committable, + // I::Exchanges: ExchangesType>; /// Sign a validating or commitment proposal. fn sign_validating_or_commitment_proposal>( @@ -821,27 +821,6 @@ pub trait QuorumExchangeType, &self, leaf_commitment: Commitment, ) -> (EncodedPublicKey, EncodedSignature); - - /// Sign a neagtive vote on validating or commitment proposal. - /// - /// The leaf commitment and the type of the vote (no) are signed, which is the minimum amount - /// of information necessary for any user of the subsequently constructed QC to check that this - /// node voted `No` on that leaf. - fn sign_no_vote( - &self, - leaf_commitment: Commitment, - ) -> (EncodedPublicKey, EncodedSignature); - - /// Create a message with a negative vote on validating or commitment proposal. - fn create_no_message>( - &self, - justify_qc_commitment: Commitment>>, - leaf_commitment: Commitment, - current_view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage - where - I::Exchanges: ExchangesType>; } /// Standard implementation of [`QuroumExchangeType`] based on Hot Stuff consensus. @@ -880,27 +859,6 @@ impl< > QuorumExchangeType for QuorumExchange { - /// Create a message with a positive vote on validating or commitment proposal. - fn create_yes_message>( - &self, - justify_qc_commitment: Commitment>>, - leaf_commitment: Commitment, - current_view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage - where - I::Exchanges: ExchangesType>, - { - let signature = self.sign_yes_vote(leaf_commitment); - GeneralConsensusMessage::::Vote(QuorumVote::Yes(YesOrNoVote { - justify_qc_commitment, - signature, - leaf_commitment, - current_view, - vote_token, - vote_data: VoteData::Yes(leaf_commitment), - })) - } /// Sign a validating or commitment proposal. fn sign_validating_or_commitment_proposal>( &self, @@ -935,45 +893,6 @@ impl< ); (self.public_key.to_bytes(), signature) } - - /// Sign a neagtive vote on validating or commitment proposal. - /// - /// The leaf commitment and the type of the vote (no) are signed, which is the minimum amount - /// of information necessary for any user of the subsequently constructed QC to check that this - /// node voted `No` on that leaf. - /// TODO GG: why return the pubkey? Some other `sign_xxx` methods do not return the pubkey. - fn sign_no_vote( - &self, - leaf_commitment: Commitment, - ) -> (EncodedPublicKey, EncodedSignature) { - let signature = TYPES::SignatureKey::sign( - &self.private_key, - VoteData::No(leaf_commitment).commit().as_ref(), - ); - (self.public_key.to_bytes(), signature) - } - - /// Create a message with a negative vote on validating or commitment proposal. - fn create_no_message>( - &self, - justify_qc_commitment: Commitment>>, - leaf_commitment: Commitment, - current_view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage - where - I::Exchanges: ExchangesType>, - { - let signature = self.sign_no_vote(leaf_commitment); - GeneralConsensusMessage::::Vote(QuorumVote::No(YesOrNoVote { - justify_qc_commitment, - signature, - leaf_commitment, - current_view, - vote_token, - vote_data: VoteData::No(leaf_commitment), - })) - } } impl< diff --git a/types/src/vote2.rs b/types/src/vote2.rs index 175845a2c1..0c15ee3982 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -56,12 +56,12 @@ pub trait Certificate2 { /// Type that defines membership for voters on the certificate type Membership: Membership; /// The data commitment this certificate certifies. - type Commitment: Voteable; + type Voteable: Voteable; /// Build a certificate from the data commitment and the quorum of signers fn create_signed_certificate( - vote_commitment: Commitment, - data: Self::Commitment, + vote_commitment: Commitment, + data: Self::Voteable, sig: ::QCType, view: TYPES::Time, ) -> Self; @@ -69,23 +69,23 @@ pub trait Certificate2 { /// Checks if the cert is valid fn is_valid_cert( &self, - vote_commitment: Commitment, + vote_commitment: Commitment, membership: &Self::Membership, ) -> bool; /// Returns the amount of stake needed to create this certificate // TODO: Make this a static ratio of the total stake of `Membership` fn threshold(membership: &Self::Membership) -> u64; /// Get the commitment which was voted on - fn get_data(&self) -> &Self::Commitment; + fn get_data(&self) -> &Self::Voteable; /// Get the vote commitment which the votes commit to - fn get_data_commitment(&self) -> Commitment; + fn get_data_commitment(&self) -> Commitment; } /// Accumulates votes until a certificate is formed. This implementation works for all simple vote and certificate pairs pub struct VoteAccumulator2< TYPES: NodeType, VOTE: Vote2, - CERT: Certificate2, + CERT: Certificate2, > { /// Map of all signatures accumlated so far pub vote_outcomes: VoteMap2>, @@ -100,13 +100,12 @@ pub struct VoteAccumulator2< impl< TYPES: NodeType, VOTE: Vote2, - CERT: Certificate2, + CERT: Certificate2, > VoteAccumulator2 { /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we /// have accumulated enough votes to exceed the threshold for creating a certificate. - #[allow(dead_code)] - fn accumulate(mut self, vote: &VOTE, membership: &VOTE::Membership) -> Either { + pub fn accumulate(mut self, vote: &VOTE, membership: &VOTE::Membership) -> Either { let key = vote.get_signing_key(); let vote_commitment = vote.get_data_commitment(); From 1377599ce2701aea78b447ce1d69e7627f708203 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 2 Nov 2023 11:26:54 -0400 Subject: [PATCH 0291/1393] builds except tests --- hotshot/src/demo.rs | 66 +++++----- hotshot/src/lib.rs | 5 +- hotshot/src/traits/storage/memory_storage.rs | 16 ++- hotshot/src/types/handle.rs | 9 +- task-impls/src/consensus.rs | 70 +++++----- task-impls/src/da.rs | 2 +- task-impls/src/events.rs | 7 +- task-impls/src/network.rs | 5 +- task-impls/src/transactions.rs | 2 +- task-impls/src/vid.rs | 2 +- testing/src/task_helpers.rs | 5 +- types/src/consensus.rs | 5 +- types/src/data.rs | 130 ++++++++----------- types/src/event.rs | 4 +- types/src/message.rs | 4 +- types/src/simple_certificate.rs | 85 ++++++++---- types/src/simple_vote.rs | 2 +- types/src/traits/consensus_api.rs | 3 +- types/src/traits/storage.rs | 9 +- types/src/vote2.rs | 23 ++-- 20 files changed, 231 insertions(+), 223 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index afebd291a0..0695ccc3d5 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -169,36 +169,36 @@ where } } -/// Provides a random [`QuorumCertificate`] -pub fn random_quorum_certificate>( - rng: &mut dyn rand::RngCore, -) -> QuorumCertificate> { - QuorumCertificate { - // block_commitment: random_commitment(rng), - leaf_commitment: random_commitment(rng), - view_number: TYPES::Time::new(rng.gen()), - signatures: AssembledSignature::Genesis(), - is_genesis: rng.gen(), - } -} - -/// Provides a random [`Leaf`] -pub fn random_leaf( - deltas: Either>, - rng: &mut dyn rand::RngCore, -) -> Leaf { - let justify_qc = random_quorum_certificate(rng); - // let state = TYPES::StateType::default() - // .append(&deltas, &TYPES::Time::new(42)) - // .unwrap_or_default(); - Leaf { - view_number: justify_qc.view_number, - height: rng.next_u64(), - justify_qc, - parent_commitment: random_commitment(rng), - deltas, - rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: genesis_proposer_id(), - } -} +// /// Provides a random [`QuorumCertificate`] +// pub fn random_quorum_certificate>( +// rng: &mut dyn rand::RngCore, +// ) -> QuorumCertificate2> { +// QuorumCertificate { +// // block_commitment: random_commitment(rng), +// leaf_commitment: random_commitment(rng), +// view_number: TYPES::Time::new(rng.gen()), +// signatures: None, +// is_genesis: rng.gen(), +// } +// } + +// /// Provides a random [`Leaf`] +// pub fn random_leaf( +// deltas: Either>, +// rng: &mut dyn rand::RngCore, +// ) -> Leaf { +// let justify_qc = random_quorum_certificate(rng); +// // let state = TYPES::StateType::default() +// // .append(&deltas, &TYPES::Time::new(42)) +// // .unwrap_or_default(); +// Leaf { +// view_number: justify_qc.view_number, +// height: rng.next_u64(), +// justify_qc, +// parent_commitment: random_commitment(rng), +// deltas, +// rejected: Vec::new(), +// timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), +// proposer_id: genesis_proposer_id(), +// } +// } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index d178619ea7..2fe6d10ba3 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -56,6 +56,7 @@ use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ certificate::{TimeoutCertificate, VIDCertificate}, data::VidDisperse, + simple_certificate::QuorumCertificate2, traits::node_implementation::TimeoutEx, }; @@ -249,7 +250,7 @@ impl> SystemContext { self.inner .internal_event_stream .publish(HotShotEvent::QCFormed(either::Left( - QuorumCertificate::genesis(), + QuorumCertificate2::genesis(), ))) .await; } @@ -1020,7 +1021,7 @@ impl> HotShotInitializer>::genesis(); + let justify_qc = QuorumCertificate2::::genesis(); Ok(Self { inner: LEAF::new(time, justify_qc, genesis_block, state), diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index dd6e0217d5..ad488e18ed 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -117,10 +117,12 @@ mod test { use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; use super::*; + use commit::Committable; use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ certificate::{AssembledSignature, QuorumCertificate}, data::{fake_commitment, genesis_proposer_id, ValidatingLeaf, ViewNumber}, + simple_certificate::QuorumCertificate2, traits::{ block_contents::dummy::{DummyBlock, DummyState}, node_implementation::NodeType, @@ -128,7 +130,7 @@ mod test { BlockPayload, }, }; - use std::{fmt::Debug, hash::Hash}; + use std::{fmt::Debug, hash::Hash, marker::PhantomData}; use tracing::instrument; #[derive( @@ -164,13 +166,19 @@ mod test { // TODO is it okay to be using genesis here? let _dummy_block_commit = fake_commitment::(); let dummy_leaf_commit = fake_commitment::>(); + let data = hotshot_types::simple_vote::YesData { + leaf_commit: dummy_leaf_commit, + }; + let commit = data.commit(); StoredView::from_qc_block_and_state( - QuorumCertificate { + QuorumCertificate2 { // block_commitment: dummy_block_commit, is_genesis: view_number == ::Time::genesis(), - leaf_commitment: dummy_leaf_commit, - signatures: AssembledSignature::Genesis(), + leaf_commitment: data, + vote_commitment: commit, + signatures: None, view_number, + _pd: PhantomData, }, DummyBlock::random(rng), DummyState::random(rng), diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 88cfcf95dd..b0ef126200 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -1,5 +1,6 @@ //! Provides an event-streaming handle for a [`HotShot`] running in the background +use crate::QuorumCertificate2; use crate::{traits::NodeImplementation, types::Event, Message, QuorumCertificate, SystemContext}; use async_compatibility_layer::channel::UnboundedStream; use async_lock::RwLock; @@ -13,6 +14,7 @@ use hotshot_task::{ BoxSyncFuture, }; use hotshot_task_impls::events::HotShotEvent; +use hotshot_types::simple_vote::YesData; use hotshot_types::{ consensus::Consensus, data::LeafType, @@ -26,7 +28,6 @@ use hotshot_types::{ storage::Storage, }, }; - use std::sync::Arc; use tracing::error; @@ -190,8 +191,10 @@ impl + 'static> SystemContextHandl if let Ok(anchor_leaf) = self.storage().get_anchored_view().await { if anchor_leaf.view_number == TYPES::Time::genesis() { let leaf: I::Leaf = I::Leaf::from_stored_view(anchor_leaf); - let mut qc = QuorumCertificate::>::genesis(); - qc.leaf_commitment = leaf.commit(); + let mut qc = QuorumCertificate2::::genesis(); + qc.leaf_commitment = YesData { + leaf_commit: leaf.commit(), + }; let event = Event { view_number: TYPES::Time::genesis(), event: EventType::Decide { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e614161177..b8395705ab 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -35,8 +35,8 @@ use hotshot_types::{ BlockPayload, }, utils::{Terminator, ViewInner}, - vote::{QuorumVote, QuorumVoteAccumulator, TimeoutVoteAccumulator, VoteType}, - vote2::{HasViewNumber, VoteAccumulator2}, + vote::{TimeoutVoteAccumulator, VoteType}, + vote2::{Certificate2, HasViewNumber, VoteAccumulator2}, }; use tracing::warn; @@ -175,9 +175,9 @@ pub struct VoteCollectionTaskState< VoteAccumulator2< TYPES, YesVote>, - QuorumCertificate2>, + QuorumCertificate2, >, - QuorumCertificate2>, + QuorumCertificate2, >, /// Accumulator for votes @@ -407,7 +407,7 @@ where async fn vote_if_able(&self) -> bool { if let Some(proposal) = &self.current_proposal { // ED Need to account for the genesis DA cert - if proposal.justify_qc.is_genesis() && proposal.view_number == TYPES::Time::new(1) { + if proposal.justify_qc.is_genesis && proposal.view_number == TYPES::Time::new(1) { // warn!("Proposal is genesis!"); let view = TYPES::Time::new(*proposal.view_number); @@ -420,16 +420,16 @@ where Ok(None) => { debug!("We were not chosen for consensus committee on {:?}", view); } - Ok(Some(vote_token)) => { + Ok(Some(_vote_token)) => { let justify_qc = proposal.justify_qc.clone(); - let parent = if justify_qc.is_genesis() { + let parent = if justify_qc.is_genesis { self.genesis_leaf().await } else { self.consensus .read() .await .saved_leaves - .get(&justify_qc.leaf_commitment()) + .get(&justify_qc.get_data().leaf_commit) .cloned() }; @@ -437,7 +437,7 @@ where let Some(parent) = parent else { error!( "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.leaf_commitment(), + justify_qc.get_data().leaf_commit, proposal.view_number, ); return false; @@ -492,14 +492,14 @@ where } Ok(Some(vote_token)) => { let justify_qc = proposal.justify_qc.clone(); - let parent = if justify_qc.is_genesis() { + let parent = if justify_qc.is_genesis { self.genesis_leaf().await } else { self.consensus .read() .await .saved_leaves - .get(&justify_qc.leaf_commitment()) + .get(&justify_qc.get_data().leaf_commit) .cloned() }; @@ -507,7 +507,7 @@ where let Some(parent) = parent else { error!( "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.leaf_commitment(), + justify_qc.get_data().leaf_commit, proposal.view_number, ); return false; @@ -677,7 +677,7 @@ where } // Verify a timeout certificate exists and is valid - if proposal.data.justify_qc.view_number() != view - 1 { + if proposal.data.justify_qc.get_view_number() != view - 1 { let Some(timeout_cert) = proposal.data.timeout_certificate.clone() else { warn!( "Quorum proposal for view {} needed a timeout certificate but did not have one", @@ -701,7 +701,7 @@ where let justify_qc = proposal.data.justify_qc.clone(); - if !self.quorum_exchange.is_valid_cert(&justify_qc) { + if !justify_qc.is_valid_cert(self.quorum_exchange.membership()) { error!("Invalid justify_qc in proposal for view {}", *view); let consensus = self.consensus.write().await; consensus.metrics.invalid_qc.update(1); @@ -716,12 +716,12 @@ where let consensus = self.consensus.upgradable_read().await; // Construct the leaf. - let parent = if justify_qc.is_genesis() { + let parent = if justify_qc.is_genesis { self.genesis_leaf().await } else { consensus .saved_leaves - .get(&justify_qc.leaf_commitment()) + .get(&justify_qc.get_data().leaf_commit) .cloned() }; @@ -731,13 +731,13 @@ where // If no parent then just update our state map and return. We will not vote. error!( "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.leaf_commitment() + justify_qc.get_data().leaf_commit ); let leaf = Leaf { view_number: view, height: proposal.data.height, justify_qc: justify_qc.clone(), - parent_commitment: justify_qc.leaf_commitment(), + parent_commitment: justify_qc.get_data().leaf_commit, deltas: Right(proposal.data.block_commitment), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), @@ -790,12 +790,12 @@ where // passes. // Liveness check. - let liveness_check = justify_qc.view_number > consensus.locked_view; + let liveness_check = justify_qc.get_view_number() > consensus.locked_view; // Safety check. // Check if proposal extends from the locked leaf. let outcome = consensus.visit_leaf_ancestors( - justify_qc.view_number, + justify_qc.get_view_number(), Terminator::Inclusive(consensus.locked_view), false, |leaf| { @@ -826,7 +826,7 @@ where let mut leaf_views = Vec::new(); let mut included_txns = HashSet::new(); let old_anchor_view = consensus.last_decided_view; - let parent_view = leaf.justify_qc.view_number; + let parent_view = leaf.justify_qc.get_view_number(); let mut current_chain_length = 0usize; if parent_view + 1 == view { current_chain_length += 1; @@ -1068,16 +1068,10 @@ where let _task = async_spawn(async move { VoteCollectionTypes::build(builder).launch().await; }); - debug!( - "Starting vote handle for view {:?}", - vote.get_view_number() - ); + debug!("Starting vote handle for view {:?}", vote.get_view_number()); } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream - .direct_message( - stream_id, - HotShotEvent::QuorumVoteRecv(vote), - ) + .direct_message(stream_id, HotShotEvent::QuorumVoteRecv(vote)) .await; } } @@ -1122,14 +1116,8 @@ where &vote.get_view().commit(), ); - let quorum_accumulator = QuorumVoteAccumulator { - total_vote_outcomes: HashMap::new(), - yes_vote_outcomes: HashMap::new(), - no_vote_outcomes: HashMap::new(), - - success_threshold: self.quorum_exchange.success_threshold(), - failure_threshold: self.quorum_exchange.failure_threshold(), - + let quorum_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.quorum_exchange.total_nodes()], phantom: PhantomData, @@ -1303,7 +1291,7 @@ where /// Sends a proposal if possible from the high qc we have pub async fn publish_proposal_if_able( &mut self, - _qc: QuorumCertificate>, + _qc: QuorumCertificate2, view: TYPES::Time, timeout_certificate: Option>, ) -> bool { @@ -1316,7 +1304,7 @@ where } let consensus = self.consensus.read().await; - let parent_view_number = &consensus.high_qc.view_number(); + let parent_view_number = &consensus.high_qc.get_view_number(); let mut reached_decided = false; let Some(parent_view) = consensus.state_map.get(parent_view_number) else { @@ -1333,12 +1321,12 @@ where ); return false; }; - if leaf_commitment != consensus.high_qc.leaf_commitment() { + if leaf_commitment != consensus.high_qc.get_data().leaf_commit { // NOTE: This happens on the genesis block debug!( "They don't equal: {:?} {:?}", leaf_commitment, - consensus.high_qc.leaf_commitment() + consensus.high_qc.get_data().leaf_commit ); } let Some(leaf) = consensus.saved_leaves.get(&leaf_commitment) else { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index fc7a8bd55c..d0fafed10f 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -22,7 +22,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::{CommitteeExchangeType, ConsensusExchange, Membership, SignedCertificate}, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{CommitteeEx, NodeImplementation, NodeType}, + node_implementation::{CommitteeEx, NodeImplementation, NodeType, QuorumMembership}, signature_key::SignatureKey, state::ConsensusTime, BlockPayload, diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 4b75d2a6ac..7bd092cf96 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -41,12 +41,7 @@ pub enum HotShotEvent> { /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal DAVoteSend(DAVote), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only - QCFormed( - Either< - QuorumCertificate2>, - TimeoutCertificate, - >, - ), + QCFormed(Either, TimeoutCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DACSend(DACertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 770b7726b1..5f033180b6 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -18,6 +18,7 @@ use hotshot_types::{ node_implementation::{NodeImplementation, NodeType}, }, vote::VoteType, + vote2::{HasViewNumber, Vote2}, }; use snafu::Snafu; use std::{marker::PhantomData, sync::Arc}; @@ -206,12 +207,12 @@ impl< // ED Each network task is subscribed to all these message types. Need filters per network task HotShotEvent::QuorumVoteSend(vote) => ( - vote.signature_key(), + vote.get_signing_key(), MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::Vote(vote.clone()), ))), TransmitType::Direct, - Some(membership.get_leader(vote.get_view() + 1)), + Some(membership.get_leader(vote.get_view_number() + 1)), ), HotShotEvent::VidDisperseSend(proposal, sender) => ( sender, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 45cea93f2d..1106fd2e7d 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -22,7 +22,7 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, election::{ConsensusExchange, Membership, QuorumExchangeType}, - node_implementation::{NodeImplementation, NodeType, QuorumEx}, + node_implementation::{NodeImplementation, NodeType, QuorumEx, QuorumMembership}, BlockPayload, }, }; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 29a4a32f54..3252890216 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -23,7 +23,7 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, election::{ConsensusExchange, VIDExchangeType}, - node_implementation::{NodeImplementation, NodeType, VIDEx}, + node_implementation::{NodeImplementation, NodeType, QuorumMembership, VIDEx}, signature_key::SignatureKey, state::ConsensusTime, }, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 846e67ad5b..67b2e4af12 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -24,6 +24,7 @@ use hotshot_types::{ signature_key::EncodedSignature, state::{ConsensusTime, TestableBlock}, }, + vote2::HasViewNumber, simple_certificate::QuorumCertificate2, }; pub async fn build_system_handle( @@ -99,7 +100,7 @@ async fn build_quorum_proposal_and_signature( }; let _quorum_exchange = api.inner.exchanges.quorum_exchange().clone(); - let parent_view_number = &consensus.high_qc.view_number(); + let parent_view_number = &consensus.high_qc.get_view_number(); let Some(parent_view) = consensus.state_map.get(parent_view_number) else { panic!("Couldn't find high QC parent in state map."); }; @@ -130,7 +131,7 @@ async fn build_quorum_proposal_and_signature( block_commitment: block.commit(), view_number: ViewNumber::new(view), height: 1, - justify_qc: QuorumCertificate::genesis(), + justify_qc: QuorumCertificate2::genesis(), timeout_certificate: None, proposer_id: leaf.proposer_id, dac: None, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 427fd5e587..ece2ea0b95 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -7,10 +7,11 @@ pub use crate::{ use displaydoc::Display; use crate::{ - certificate::QuorumCertificate, data::LeafType, error::HotShotError, + simple_certificate::QuorumCertificate2, traits::{ + election::Membership, metrics::{Counter, Gauge, Histogram, Label, Metrics}, node_implementation::NodeType, }, @@ -56,7 +57,7 @@ pub struct Consensus> { pub locked_view: TYPES::Time, /// the highqc per spec - pub high_qc: QuorumCertificate>, + pub high_qc: QuorumCertificate2, /// A reference to the metrics trait #[debug(skip)] diff --git a/types/src/data.rs b/types/src/data.rs index e958092cb1..327170de9e 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -8,13 +8,16 @@ use crate::{ AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCertificate, }, + simple_certificate::QuorumCertificate2, traits::{ + election::Membership, node_implementation::NodeType, signature_key::{EncodedPublicKey, SignatureKey}, state::{ConsensusTime, TestableBlock, TestableState}, storage::StoredView, BlockPayload, State, }, + vote2::Certificate2, }; use ark_bls12_381::Bls12_381; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; @@ -116,45 +119,6 @@ pub type Transaction = <::BlockType as BlockPayload>::Tra /// `Commitment` to the `Transaction` type associated with a `State`, as a syntactic shortcut pub type TxnCommitment = Commitment>; -/// subset of state that we stick into a leaf. -/// original hotstuff proposal -#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Derivative, Eq)] -#[serde(bound(deserialize = ""))] -#[derivative(PartialEq, Hash)] -pub struct ValidatingProposal> -where - LEAF: Committable, -{ - /// current view's block commitment - pub block_commitment: Commitment, - - /// CurView from leader when proposing leaf - pub view_number: TYPES::Time, - - /// Height from leader when proposing leaf - pub height: u64, - - /// Per spec, justification - pub justify_qc: QuorumCertificate>, - - /// The hash of the parent `Leaf` - /// So we can ask if it extends - #[debug(skip)] - pub parent_commitment: Commitment, - - /// BlockPayload leaf wants to apply - pub deltas: TYPES::BlockType, - - /// What the state should be after applying `self.deltas` - pub state_commitment: Commitment, - - /// Transactions that were marked for rejection while collecting deltas - pub rejected: Vec<::Transaction>, - - /// the propser id - pub proposer_id: EncodedPublicKey, -} - /// A proposal to start providing data availability for a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct DAProposal { @@ -216,7 +180,7 @@ pub struct QuorumProposal> { pub height: u64, /// Per spec, justification - pub justify_qc: QuorumCertificate>, + pub justify_qc: QuorumCertificate2, /// Possible timeout certificate. Only present if the justify_qc is not for the preceding view pub timeout_certificate: Option>, @@ -229,15 +193,6 @@ pub struct QuorumProposal> { pub dac: Option>, } -impl> ProposalType - for ValidatingProposal -{ - type NodeType = TYPES; - fn get_view_number(&self) -> ::Time { - self.view_number - } -} - impl ProposalType for DAProposal { type NodeType = TYPES; fn get_view_number(&self) -> ::Time { @@ -445,7 +400,7 @@ pub trait LeafType: /// Create a new leaf from its components. fn new( view_number: LeafTime, - justify_qc: QuorumCertificate>, + justify_qc: QuorumCertificate2, deltas: LeafBlock, state: LeafState, ) -> Self; @@ -458,7 +413,7 @@ pub trait LeafType: /// Change the height of this leaf. fn set_height(&mut self, height: u64); /// The QC linking this leaf to its parent in the chain. - fn get_justify_qc(&self) -> QuorumCertificate>; + fn get_justify_qc(&self) -> QuorumCertificate2; /// Commitment to this leaf's parent. fn get_parent_commitment(&self) -> Commitment; /// The block contained in this leaf. @@ -532,7 +487,7 @@ pub struct ValidatingLeaf { pub height: u64, /// Per spec, justification - pub justify_qc: QuorumCertificate>, + pub justify_qc: QuorumCertificate2, /// The hash of the parent `Leaf` /// So we can ask if it extends @@ -571,7 +526,7 @@ pub struct Leaf { pub height: u64, /// Per spec, justification - pub justify_qc: QuorumCertificate>, + pub justify_qc: QuorumCertificate2, /// The hash of the parent `Leaf` /// So we can ask if it extends @@ -645,7 +600,7 @@ impl LeafType for ValidatingLeaf { fn new( view_number: ::Time, - justify_qc: QuorumCertificate>, + justify_qc: QuorumCertificate2, deltas: ::BlockType, state: ::StateType, ) -> Self { @@ -674,7 +629,7 @@ impl LeafType for ValidatingLeaf { self.height = height; } - fn get_justify_qc(&self) -> QuorumCertificate> { + fn get_justify_qc(&self) -> QuorumCertificate2 { self.justify_qc.clone() } @@ -762,7 +717,7 @@ impl LeafType for Leaf { fn new( view_number: ::Time, - justify_qc: QuorumCertificate>, + justify_qc: QuorumCertificate2, deltas: ::BlockType, _state: ::StateType, ) -> Self { @@ -790,7 +745,7 @@ impl LeafType for Leaf { self.height = height; } - fn get_justify_qc(&self) -> QuorumCertificate> { + fn get_justify_qc(&self) -> QuorumCertificate2 { self.justify_qc.clone() } @@ -873,6 +828,7 @@ pub fn random_commitment(rng: &mut dyn rand::RngCore) -> Commitm /// Serialization for the QC assembled signature /// # Panics /// if serialization fails +// TODO: Remove after new QC is integrated pub fn serialize_signature(signature: &AssembledSignature) -> Vec { let mut signatures_bytes = vec![]; let signatures: Option<::QCType> = match &signature { @@ -929,9 +885,37 @@ pub fn serialize_signature(signature: &AssembledSignature( + signatures: &::QCType, +) -> Vec { + let mut signatures_bytes = vec![]; + signatures_bytes.extend("Yes".as_bytes()); + + let (sig, proof) = TYPES::SignatureKey::get_sig_proof(&signatures); + let proof_bytes = bincode_opts() + .serialize(&proof.as_bitslice()) + .expect("This serialization shouldn't be able to fail"); + signatures_bytes.extend("bitvec proof".as_bytes()); + signatures_bytes.extend(proof_bytes.as_slice()); + let sig_bytes = bincode_opts() + .serialize(&sig) + .expect("This serialization shouldn't be able to fail"); + signatures_bytes.extend("aggregated signature".as_bytes()); + signatures_bytes.extend(sig_bytes.as_slice()); + signatures_bytes +} impl Committable for ValidatingLeaf { fn commit(&self) -> commit::Commitment { - let signatures_bytes = serialize_signature(&self.justify_qc.signatures); + let signatures_bytes = if self.justify_qc.is_genesis { + let mut bytes = vec![]; + bytes.extend("genesis".as_bytes()); + bytes + } else { + serialize_signature2::(self.justify_qc.signatures.as_ref().unwrap()) + }; commit::RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) @@ -943,7 +927,7 @@ impl Committable for ValidatingLeaf { .u64(*self.justify_qc.view_number) .field( "justify_qc leaf commitment", - self.justify_qc.leaf_commitment, + self.justify_qc.get_data().leaf_commit, ) .constant_str("justify_qc signatures") .var_size_bytes(&signatures_bytes) @@ -964,7 +948,13 @@ impl Committable for Leaf { Either::Right(commitment) => *commitment, }; - let signatures_bytes = serialize_signature(&self.justify_qc.signatures); + let signatures_bytes = if self.justify_qc.is_genesis { + let mut bytes = vec![]; + bytes.extend("genesis".as_bytes()); + bytes + } else { + serialize_signature2::(self.justify_qc.signatures.as_ref().unwrap()) + }; commit::RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) @@ -975,7 +965,7 @@ impl Committable for Leaf { .u64(*self.justify_qc.view_number) .field( "justify_qc leaf commitment", - self.justify_qc.leaf_commitment, + self.justify_qc.get_data().leaf_commit, ) .constant_str("justify_qc signatures") .var_size_bytes(&signatures_bytes) @@ -983,24 +973,6 @@ impl Committable for Leaf { } } -impl From> - for ValidatingProposal> -{ - fn from(leaf: ValidatingLeaf) -> Self { - Self { - view_number: leaf.view_number, - height: leaf.height, - justify_qc: leaf.justify_qc, - parent_commitment: leaf.parent_commitment, - deltas: leaf.deltas.clone(), - state_commitment: leaf.state.commit(), - rejected: leaf.rejected, - proposer_id: leaf.proposer_id, - block_commitment: leaf.deltas.commit(), - } - } -} - impl From for StoredView where TYPES: NodeType, diff --git a/types/src/event.rs b/types/src/event.rs index 5d091cd400..c390a1bc63 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -2,7 +2,7 @@ use crate::{ certificate::QuorumCertificate, data::LeafType, error::HotShotError, - traits::node_implementation::NodeType, + simple_certificate::QuorumCertificate2, traits::node_implementation::NodeType, }; use commit::Commitment; use std::sync::Arc; @@ -43,7 +43,7 @@ pub enum EventType> { /// /// Note that the QC for each additional leaf in the chain can be obtained from the leaf /// before it using - qc: Arc>>, + qc: Arc>, /// Optional information of the number of transactions in the block, for logging purposes. block_size: Option, }, diff --git a/types/src/message.rs b/types/src/message.rs index f9d159b9e0..07ee231f97 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -309,7 +309,7 @@ impl< } } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] /// Messages related to both validating and sequencing consensus. pub enum GeneralConsensusMessage> @@ -389,7 +389,7 @@ pub trait SequencingMessageType>: } /// Messages for sequencing consensus. -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] pub struct SequencingMessage< TYPES: NodeType, diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 83c80a911c..6daf9254c6 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -2,9 +2,13 @@ #![allow(clippy::missing_docs_in_private_items)] #![allow(missing_docs)] -use std::marker::PhantomData; +use std::{ + fmt::{self, Debug, Display, Formatter}, + hash::Hash, + marker::PhantomData, +}; -use commit::Commitment; +use commit::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; use crate::{ @@ -13,12 +17,14 @@ use crate::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, }, - vote2::Certificate2, + vote2::{Certificate2, HasViewNumber}, }; +use serde::{Deserialize, Serialize}; + /// A certificate which can be created by aggregating many simple votes on the commitment. -#[derive(Eq, Hash, PartialEq, Debug, Clone)] -pub struct SimpleCertificate> { +#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] +pub struct SimpleCertificate { /// commitment to previous leaf which all the votes in this certificate are voting on pub leaf_commitment: VOTEABLE, /// commitment of all the votes this cert should be signed over @@ -26,18 +32,18 @@ pub struct SimpleCertificate::QCType, + pub signatures: Option<::QCType>, /// If this QC is for the genesis block pub is_genesis: bool, /// phantom data for `MEMBERSHIP` and `TYPES` - _pd: PhantomData<(TYPES, MEMBERSHIP)>, + pub _pd: PhantomData, } -impl> - Certificate2 for SimpleCertificate +impl Certificate2 + for SimpleCertificate { type Voteable = VOTEABLE; - type Membership = MEMBERSHIP; + // type Membership = MEMBERSHIP; fn create_signed_certificate( vote_commitment: Commitment, @@ -49,19 +55,12 @@ impl, - membership: &MEMBERSHIP, - ) -> bool { - if vote_commitment != self.vote_commitment { - return false; - } + fn is_valid_cert>(&self, membership: &MEMBERSHIP) -> bool { if self.is_genesis && self.view_number == TYPES::Time::genesis() { return true; } @@ -71,11 +70,11 @@ impl::check( &real_qc_pp, - vote_commitment.as_ref(), - &self.signatures, + self.vote_commitment.as_ref(), + self.signatures.as_ref().unwrap(), ) } - fn threshold(membership: &MEMBERSHIP) -> u64 { + fn threshold>(membership: &MEMBERSHIP) -> u64 { membership.success_threshold().into() } fn get_data(&self) -> &Self::Voteable { @@ -86,5 +85,45 @@ impl HasViewNumber + for SimpleCertificate +{ + fn get_view_number(&self) -> TYPES::Time { + self.view_number + } +} +impl Display + for QuorumCertificate2 +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "view: {:?}, is_genesis: {:?}", + self.view_number, self.is_genesis + ) + } +} + +impl< + TYPES: NodeType, + LEAF: Committable + Committable + Clone + Serialize + Debug + PartialEq + Hash + Eq + 'static, + > SimpleCertificate> +{ + pub fn genesis() -> Self { + let data = YesData { + leaf_commit: Commitment::::default_commitment_no_preimage(), + }; + let commit = data.commit(); + Self { + leaf_commitment: data, + vote_commitment: commit, + view_number: ::genesis(), + signatures: None, + is_genesis: true, + _pd: PhantomData, + } + } +} + // Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file -pub type QuorumCertificate2 = SimpleCertificate, M>; +pub type QuorumCertificate2 = SimpleCertificate>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 5a31e6aba5..70c2823e92 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -2,7 +2,7 @@ #![allow(clippy::missing_docs_in_private_items)] #![allow(missing_docs)] -use std::{clone, fmt::Debug, hash::Hash, marker::PhantomData}; +use std::{fmt::Debug, hash::Hash, marker::PhantomData}; use commit::{Commitment, Committable}; use serde::{Deserialize, Serialize}; diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index a2ee27c3b1..f1caade541 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -6,6 +6,7 @@ use crate::{ error::HotShotError, event::{Event, EventType}, message::{DataMessage, SequencingMessage}, + simple_certificate::QuorumCertificate2, traits::{ network::NetworkError, node_implementation::{NodeImplementation, NodeType}, @@ -97,7 +98,7 @@ pub trait ConsensusSharedApi< &self, view_number: TYPES::Time, leaf_views: Vec, - decide_qc: QuorumCertificate>, + decide_qc: QuorumCertificate2, ) { self.send_event(Event { view_number, diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 122698486c..9ca3b0c21b 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -2,9 +2,10 @@ use super::{node_implementation::NodeType, signature_key::EncodedPublicKey}; use crate::{ - certificate::QuorumCertificate, data::LeafType, + simple_certificate::QuorumCertificate2, traits::{election::SignedCertificate, BlockPayload}, + vote2::HasViewNumber, }; use async_trait::async_trait; use commit::Commitment; @@ -132,7 +133,7 @@ pub struct StoredView> { /// The parent of this view pub parent: Commitment, /// The justify QC of this view. See the hotstuff paper for more information on this. - pub justify_qc: QuorumCertificate>, + pub justify_qc: QuorumCertificate2, /// The state of this view pub state: LEAF::MaybeState, /// The deltas of this view @@ -156,7 +157,7 @@ where /// /// Note that this will set the `parent` to `LeafHash::default()`, so this will not have a parent. pub fn from_qc_block_and_state( - qc: QuorumCertificate>, + qc: QuorumCertificate2, deltas: LEAF::DeltasType, state: LEAF::MaybeState, height: u64, @@ -166,7 +167,7 @@ where ) -> Self { Self { deltas, - view_number: qc.view_number(), + view_number: qc.get_view_number(), height, parent: parent_commitment, justify_qc: qc, diff --git a/types/src/vote2.rs b/types/src/vote2.rs index 0c15ee3982..6d80017655 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -52,9 +52,9 @@ The certificate formed from the collection of signatures a committee. The committee is defined by the `Membership` associated type. The votes all must be over the `Commitment` associated type. */ -pub trait Certificate2 { +pub trait Certificate2: HasViewNumber { /// Type that defines membership for voters on the certificate - type Membership: Membership; + // type Membership: Membership; /// The data commitment this certificate certifies. type Voteable: Voteable; @@ -67,14 +67,10 @@ pub trait Certificate2 { ) -> Self; /// Checks if the cert is valid - fn is_valid_cert( - &self, - vote_commitment: Commitment, - membership: &Self::Membership, - ) -> bool; + fn is_valid_cert>(&self, membership: &MEMBERSHIP) -> bool; /// Returns the amount of stake needed to create this certificate // TODO: Make this a static ratio of the total stake of `Membership` - fn threshold(membership: &Self::Membership) -> u64; + fn threshold>(membership: &MEMBERSHIP) -> u64; /// Get the commitment which was voted on fn get_data(&self) -> &Self::Voteable; /// Get the vote commitment which the votes commit to @@ -100,7 +96,7 @@ pub struct VoteAccumulator2< impl< TYPES: NodeType, VOTE: Vote2, - CERT: Certificate2, + CERT: Certificate2, > VoteAccumulator2 { /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we @@ -160,10 +156,11 @@ impl< if *total_stake_casted >= CERT::threshold(membership) { // Assemble QC - let real_qc_pp = ::get_public_parameter( - stake_table.clone(), - U256::from(CERT::threshold(membership)), - ); + let real_qc_pp: <::SignatureKey as SignatureKey>::QCParams = + ::get_public_parameter( + stake_table.clone(), + U256::from(CERT::threshold(membership)), + ); let real_qc_sig = ::assemble( &real_qc_pp, From 455183784e7af99bde78ea43e2c1dd8ed833f8ca Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 2 Nov 2023 11:32:12 -0400 Subject: [PATCH 0292/1393] fix and remove NoVote --- hotshot/src/demo.rs | 7 +++---- hotshot/src/traits/storage/memory_storage.rs | 1 - hotshot/src/types/handle.rs | 6 +++--- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 2 +- task-impls/src/events.rs | 4 ++-- task-impls/src/transactions.rs | 2 +- task-impls/src/vid.rs | 2 +- testing/src/task_helpers.rs | 3 ++- types/src/consensus.rs | 1 - types/src/data.rs | 6 +----- types/src/event.rs | 6 +++--- types/src/message.rs | 4 ++-- types/src/simple_vote.rs | 5 ----- types/src/traits/consensus_api.rs | 3 +-- types/src/traits/election.rs | 2 +- 16 files changed, 22 insertions(+), 34 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index 0695ccc3d5..09d4a9e947 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -8,12 +8,11 @@ use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; use commit::{Commitment, Committable}; use derivative::Derivative; -use either::Either; + use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ block_impl::{BlockPayloadError, VIDBlockPayload, VIDTransaction}, - certificate::{AssembledSignature, QuorumCertificate}, - data::{fake_commitment, genesis_proposer_id, random_commitment, Leaf, LeafType, ViewNumber}, + data::{fake_commitment, LeafType, ViewNumber}, traits::{ election::Membership, node_implementation::NodeType, @@ -21,7 +20,7 @@ use hotshot_types::{ BlockPayload, State, }, }; -use rand::Rng; + use serde::{Deserialize, Serialize}; use std::{fmt::Debug, marker::PhantomData}; diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index ad488e18ed..7977940c41 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -120,7 +120,6 @@ mod test { use commit::Committable; use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ - certificate::{AssembledSignature, QuorumCertificate}, data::{fake_commitment, genesis_proposer_id, ValidatingLeaf, ViewNumber}, simple_certificate::QuorumCertificate2, traits::{ diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index b0ef126200..6cca7cb62a 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -1,7 +1,7 @@ //! Provides an event-streaming handle for a [`HotShot`] running in the background use crate::QuorumCertificate2; -use crate::{traits::NodeImplementation, types::Event, Message, QuorumCertificate, SystemContext}; +use crate::{traits::NodeImplementation, types::Event, SystemContext}; use async_compatibility_layer::channel::UnboundedStream; use async_lock::RwLock; use commit::Committable; @@ -20,10 +20,10 @@ use hotshot_types::{ data::LeafType, error::HotShotError, event::EventType, - message::{GeneralConsensusMessage, MessageKind}, + message::MessageKind, traits::{ election::{ConsensusExchange, QuorumExchangeType, SignedCertificate}, - node_implementation::{ExchangesType, NodeType, QuorumEx}, + node_implementation::{ExchangesType, NodeType}, state::ConsensusTime, storage::Storage, }, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index b8395705ab..f52c3ceb86 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -490,7 +490,7 @@ where Ok(None) => { debug!("We were not chosen for consensus committee on {:?}", view); } - Ok(Some(vote_token)) => { + Ok(Some(_vote_token)) => { let justify_qc = proposal.justify_qc.clone(); let parent = if justify_qc.is_genesis { self.genesis_leaf().await diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index d0fafed10f..fc7a8bd55c 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -22,7 +22,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::{CommitteeExchangeType, ConsensusExchange, Membership, SignedCertificate}, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{CommitteeEx, NodeImplementation, NodeType, QuorumMembership}, + node_implementation::{CommitteeEx, NodeImplementation, NodeType}, signature_key::SignatureKey, state::ConsensusTime, BlockPayload, diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 7bd092cf96..9e9090b2bc 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,8 +1,8 @@ use crate::view_sync::ViewSyncPhase; -use commit::Commitment; + use either::Either; use hotshot_types::{ - certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, + certificate::{DACertificate, TimeoutCertificate, VIDCertificate}, data::{DAProposal, VidDisperse}, message::Proposal, simple_certificate::QuorumCertificate2, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 1106fd2e7d..45cea93f2d 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -22,7 +22,7 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, election::{ConsensusExchange, Membership, QuorumExchangeType}, - node_implementation::{NodeImplementation, NodeType, QuorumEx, QuorumMembership}, + node_implementation::{NodeImplementation, NodeType, QuorumEx}, BlockPayload, }, }; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 3252890216..29a4a32f54 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -23,7 +23,7 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, election::{ConsensusExchange, VIDExchangeType}, - node_implementation::{NodeImplementation, NodeType, QuorumMembership, VIDEx}, + node_implementation::{NodeImplementation, NodeType, VIDEx}, signature_key::SignatureKey, state::ConsensusTime, }, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 67b2e4af12..6917b79478 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -17,6 +17,7 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, QuorumProposal, VidScheme, ViewNumber}, message::{Message, Proposal}, + simple_certificate::QuorumCertificate2, traits::{ consensus_api::ConsensusSharedApi, election::{ConsensusExchange, Membership, SignedCertificate}, @@ -24,7 +25,7 @@ use hotshot_types::{ signature_key::EncodedSignature, state::{ConsensusTime, TestableBlock}, }, - vote2::HasViewNumber, simple_certificate::QuorumCertificate2, + vote2::HasViewNumber, }; pub async fn build_system_handle( diff --git a/types/src/consensus.rs b/types/src/consensus.rs index ece2ea0b95..fa13bc017d 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -11,7 +11,6 @@ use crate::{ error::HotShotError, simple_certificate::QuorumCertificate2, traits::{ - election::Membership, metrics::{Counter, Gauge, Histogram, Label, Metrics}, node_implementation::NodeType, }, diff --git a/types/src/data.rs b/types/src/data.rs index 327170de9e..658317f2f6 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -4,13 +4,9 @@ //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. use crate::{ - certificate::{ - AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, - ViewSyncCertificate, - }, + certificate::{AssembledSignature, DACertificate, TimeoutCertificate, ViewSyncCertificate}, simple_certificate::QuorumCertificate2, traits::{ - election::Membership, node_implementation::NodeType, signature_key::{EncodedPublicKey, SignatureKey}, state::{ConsensusTime, TestableBlock, TestableState}, diff --git a/types/src/event.rs b/types/src/event.rs index c390a1bc63..c70bb2a8e7 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -1,10 +1,10 @@ //! Events that a `HotShot` instance can emit use crate::{ - certificate::QuorumCertificate, data::LeafType, error::HotShotError, - simple_certificate::QuorumCertificate2, traits::node_implementation::NodeType, + data::LeafType, error::HotShotError, simple_certificate::QuorumCertificate2, + traits::node_implementation::NodeType, }; -use commit::Commitment; + use std::sync::Arc; /// A status event emitted by a `HotShot` instance /// diff --git a/types/src/message.rs b/types/src/message.rs index 07ee231f97..c40e6f2e84 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -16,9 +16,9 @@ use crate::{ }, signature_key::EncodedSignature, }, - vote::{DAVote, QuorumVote, TimeoutVote, VIDVote, ViewSyncVote, VoteType}, + vote::{DAVote, TimeoutVote, VIDVote, ViewSyncVote, VoteType}, }; -use commit::Commitment; + use derivative::Derivative; use either::Either::{self, Left, Right}; use serde::{Deserialize, Serialize}; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 70c2823e92..4749bf064e 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -20,10 +20,6 @@ pub struct YesData { pub leaf_commit: Commitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -pub struct NoData { - pub leaf_commit: Commitment, -} -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct DAData { pub block_commit: Commitment, } @@ -205,7 +201,6 @@ impl = SimpleVote, M>; -pub type NoVote = SimpleVote, M>; pub type DAVote = SimpleVote, M>; pub type VIDVote = SimpleVote, M>; pub type TimeoutVote = SimpleVote, M>; diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index f1caade541..ac48fc593f 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -1,7 +1,6 @@ //! Contains the [`ConsensusApi`] trait. use crate::{ - certificate::QuorumCertificate, data::LeafType, error::HotShotError, event::{Event, EventType}, @@ -15,7 +14,7 @@ use crate::{ }, }; use async_trait::async_trait; -use commit::Commitment; + use std::{num::NonZeroUsize, sync::Arc, time::Duration}; /// The API that [`HotStuff`] needs to talk to the system, implemented for both validating and diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index e790d74a8d..66918f5363 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -29,7 +29,7 @@ use crate::{ signature_key::SignatureKey, state::ConsensusTime, }, - vote::{Accumulator, DAVote, QuorumVote, ViewSyncData, ViewSyncVote, VoteType, YesOrNoVote}, + vote::{Accumulator, DAVote, QuorumVote, ViewSyncData, ViewSyncVote, VoteType}, }; use bincode::Options; use commit::{Commitment, CommitmentBounds, Committable}; From 8e7fff845c8dce6a9e2f3bd420b1b1dde5816dc6 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 2 Nov 2023 12:02:29 -0400 Subject: [PATCH 0293/1393] just build works --- testing/src/overall_safety_task.rs | 5 +++-- testing/tests/consensus_task.rs | 25 +++++++++++++++---------- types/src/simple_vote.rs | 8 +------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 68df883ed1..8716142945 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -24,6 +24,7 @@ use hotshot_types::{ data::{DeltasType, LeafBlock, LeafType}, error::RoundTimedoutState, event::{Event, EventType}, + simple_certificate::QuorumCertificate2, traits::node_implementation::NodeType, }; use snafu::Snafu; @@ -95,7 +96,7 @@ pub struct RoundResult> { /// id -> (leaf, qc) // TODO GG: isn't it infeasible to store a Vec? #[allow(clippy::type_complexity)] - success_nodes: HashMap, QuorumCertificate>)>, + success_nodes: HashMap, QuorumCertificate2)>, /// Nodes that failed to commit this round pub failed_nodes: HashMap>>>, @@ -189,7 +190,7 @@ impl> RoundResult pub fn insert_into_result( &mut self, idx: usize, - result: (Vec, QuorumCertificate>), + result: (Vec, QuorumCertificate2), maybe_block_size: Option, ) -> Option { self.success_nodes.insert(idx as u64, result.clone()); diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index cc268740d7..ddc83f37a1 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -12,6 +12,10 @@ use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, task_helpers::{build_quorum_proposal, key_pair_for_id}, }; +use hotshot_types::simple_vote::YesData; +use hotshot_types::simple_vote::YesVote; +use hotshot_types::traits::node_implementation::QuorumMembership; +use hotshot_types::vote2::Certificate2; use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, @@ -39,7 +43,7 @@ async fn build_vote( let justify_qc = proposal.justify_qc.clone(); let view = ViewNumber::new(*proposal.view_number); - let parent = if justify_qc.is_genesis() { + let parent = if justify_qc.is_genesis { let Some(genesis_view) = consensus.state_map.get(&ViewNumber::new(0)) else { panic!("Couldn't find genesis view in state map."); }; @@ -53,7 +57,7 @@ async fn build_vote( } else { consensus .saved_leaves - .get(&justify_qc.leaf_commitment()) + .get(&justify_qc.get_data().leaf_commit) .cloned() .unwrap() }; @@ -70,13 +74,14 @@ async fn build_vote( timestamp: 0, proposer_id: quorum_exchange.get_leader(view).to_bytes(), }; - - quorum_exchange.create_yes_message( - proposal.justify_qc.commit(), - leaf.commit(), + let vote = + YesVote::, QuorumMembership>::create_signed_vote( + YesData { leaf_commit: leaf.commit() }, view, - vote_token, - ) + &quorum_exchange.public_key(), + &quorum_exchange.private_key(), + ); + GeneralConsensusMessage::::Vote(vote) } #[cfg(test)] @@ -89,7 +94,7 @@ async fn build_vote( async fn test_consensus_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::certificate::QuorumCertificate; + use hotshot_types::{certificate::QuorumCertificate, simple_certificate::QuorumCertificate2}; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -101,7 +106,7 @@ async fn test_consensus_task() { let mut output = HashMap::new(); // Trigger a proposal to send by creating a new QC. Then recieve that proposal and update view based on the valid QC in the proposal - let qc = QuorumCertificate::>>::genesis(); + let qc = QuorumCertificate2::>::genesis(); let proposal = build_quorum_proposal(&handle, &private_key, 1).await; input.push(HotShotEvent::QCFormed(either::Left(qc.clone()))); diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 4749bf064e..a036c241d8 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -141,13 +141,7 @@ impl Committable for YesData { .finalize() } } -impl Committable for NoData { - fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("No Vote") - .var_size_bytes(self.leaf_commit.as_ref()) - .finalize() - } -} + impl Committable for DAData { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("DA Vote") From 24ed16e57cd049be95c27f89fd52e278398db82b Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 2 Nov 2023 12:15:48 -0400 Subject: [PATCH 0294/1393] tests build and I think pass --- task-impls/src/vid.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 29a4a32f54..d57f02f603 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -301,7 +301,7 @@ where // `self.cur_view` should be at least 1 since there is a view change before getting // the `DAProposalRecv` event. Otherewise, the view number subtraction below will // cause an overflow error. - if view < self.cur_view - 1 { + if *self.cur_view == 0 || view < self.cur_view - 1 { warn!("Throwing away VID disperse data that is more than one view older"); return None; } From d04c56569c0aa03dba93b2951fe37a1d816e30e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Nov 2023 12:23:21 -0400 Subject: [PATCH 0295/1393] Bump jf-utils from `c0b8842` to `d99c22d` (#1970) * Bump jf-utils from `c0b8842` to `d99c22d` Bumps [jf-utils](https://github.com/espressosystems/jellyfish) from `c0b8842` to `d99c22d`. - [Release notes](https://github.com/espressosystems/jellyfish/releases) - [Commits](https://github.com/espressosystems/jellyfish/compare/c0b88424ac1c362c2066971d0da5c43e8832ed5f...d99c22da6b733131c665b8bf8edcc33258d4f41d) --- updated-dependencies: - dependency-name: jf-utils dependency-type: direct:production ... Signed-off-by: dependabot[bot] * hardcode to 8 chunks --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Rob --- task-impls/src/transactions.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4ed9a9f091..9038bb6b14 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -240,7 +240,7 @@ where // TODO proper source for VID erasure code rate // https://github.com/EspressoSystems/HotShot/issues/1734 - let num_chunks = num_storage_nodes / 2; + let num_chunks = 8; let vid = VidScheme::new(num_chunks, num_storage_nodes, &srs).unwrap(); From 7d75553a30fe2e54706c6944590d106c4023afab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Nov 2023 12:41:59 -0400 Subject: [PATCH 0296/1393] Bump jf-relation from `c0b8842` to `d99c22d` (#1969) * Bump jf-relation from `c0b8842` to `d99c22d` Bumps [jf-relation](https://github.com/EspressoSystems/jellyfish) from `c0b8842` to `d99c22d`. - [Release notes](https://github.com/EspressoSystems/jellyfish/releases) - [Commits](https://github.com/EspressoSystems/jellyfish/compare/c0b88424ac1c362c2066971d0da5c43e8832ed5f...d99c22da6b733131c665b8bf8edcc33258d4f41d) --- updated-dependencies: - dependency-name: jf-relation dependency-type: direct:production ... Signed-off-by: dependabot[bot] * num_storage_nodes to 8 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Rob --- task-impls/src/transactions.rs | 2 +- types/src/block_impl.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 9038bb6b14..1815eb62ab 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -231,7 +231,7 @@ where // TODO move all VID stuff to a new VID task // details here: https://github.com/EspressoSystems/HotShot/issues/1817#issuecomment-1747143528 - let num_storage_nodes = self.quorum_exchange.membership().total_nodes(); + let num_storage_nodes = 8; debug!("Prepare VID shares for {} storage nodes", num_storage_nodes); // TODO Secure SRS for VID diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index def1698078..af664791bd 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -20,10 +20,10 @@ use snafu::Snafu; // TODO /// Number of storage nodes for VID initiation. -pub const NUM_STORAGE_NODES: usize = 10; +pub const NUM_STORAGE_NODES: usize = 8; // TODO /// Number of chunks for VID initiation. -pub const NUM_CHUNKS: usize = 5; +pub const NUM_CHUNKS: usize = 8; /// The transaction in a [`VIDBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] From 452dd5bd71a5636f7ef1efb5e35eb54532908b9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Nov 2023 12:46:02 -0400 Subject: [PATCH 0297/1393] Bump serde_json from 1.0.107 to 1.0.108 (#1963) * Bump serde_json from 1.0.107 to 1.0.108 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.107 to 1.0.108. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.107...v1.0.108) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * fix lint --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Rob --- hotshot/Cargo.toml | 2 +- libp2p-networking/Cargo.toml | 2 +- task-impls/src/transactions.rs | 2 +- types/Cargo.toml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 0c26d2ade1..c89e0df218 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -122,6 +122,6 @@ async-std = { workspace = true } [dev-dependencies] blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } -serde_json = "1.0.107" +serde_json = "1.0.108" toml = { workspace = true } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index f955ab1cca..c9dd3b56f2 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -42,7 +42,7 @@ libp2p-noise = { version = "0.43.2", default-features = false } parking_lot = "0.12.1" rand = { workspace = true } serde = { workspace = true } -serde_json = "1.0.107" +serde_json = "1.0.108" snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 1815eb62ab..2fe424657c 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -20,7 +20,7 @@ use hotshot_types::{ message::{Message, Proposal, SequencingMessage}, traits::{ consensus_api::ConsensusApi, - election::{ConsensusExchange, Membership, QuorumExchangeType}, + election::{ConsensusExchange, QuorumExchangeType}, node_implementation::{NodeImplementation, NodeType, QuorumEx}, BlockPayload, }, diff --git a/types/Cargo.toml b/types/Cargo.toml index e0532c2e66..ab3be882eb 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -53,7 +53,7 @@ typenum = { workspace = true } dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.14" } [dev-dependencies] -serde_json = "1.0.107" +serde_json = "1.0.108" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } From b161d058155b43f6ec04e1248eebe07e119fbf2e Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 2 Nov 2023 13:51:53 -0400 Subject: [PATCH 0298/1393] cargo fix --- testing/src/overall_safety_task.rs | 1 - testing/src/task_helpers.rs | 1 - testing/tests/consensus_task.rs | 8 ++++---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 8cd0f99bbb..058d4768c1 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -20,7 +20,6 @@ use hotshot_task::{ MergeN, }; use hotshot_types::{ - certificate::QuorumCertificate, data::{LeafBlockPayload, LeafType}, error::RoundTimedoutState, event::{Event, EventType}, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 4e73eb3829..622e8ab66a 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -4,7 +4,6 @@ use crate::{ }; use commit::Committable; use hotshot::{ - certificate::QuorumCertificate, traits::{NodeImplementation, TestableNodeImplementation}, types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, HotShotConsensusApi, HotShotInitializer, SystemContext, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 2b95397455..f9c2dadf5d 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,4 +1,4 @@ -use commit::Commitment; + use commit::Committable; use hotshot::{ tasks::add_consensus_task, @@ -19,7 +19,7 @@ use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, traits::{ - election::{ConsensusExchange, QuorumExchangeType, SignedCertificate}, + election::{ConsensusExchange, SignedCertificate}, node_implementation::ExchangesType, state::ConsensusTime, }, @@ -38,7 +38,7 @@ async fn build_vote( inner: handle.hotshot.inner.clone(), }; let quorum_exchange = api.inner.exchanges.quorum_exchange().clone(); - let vote_token = quorum_exchange.make_vote_token(view).unwrap().unwrap(); + let _vote_token = quorum_exchange.make_vote_token(view).unwrap().unwrap(); let justify_qc = proposal.justify_qc.clone(); let view = ViewNumber::new(*proposal.view_number); @@ -93,7 +93,7 @@ async fn build_vote( async fn test_consensus_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{certificate::QuorumCertificate, simple_certificate::QuorumCertificate2}; + use hotshot_types::{simple_certificate::QuorumCertificate2}; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); From 5dca75df52db82a4f87ca19a588bac5d76392359 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 2 Nov 2023 14:24:09 -0400 Subject: [PATCH 0299/1393] doc and lint --- hotshot/src/demo.rs | 2 +- hotshot/src/lib.rs | 2 +- hotshot/src/types/handle.rs | 2 +- task-impls/src/consensus.rs | 8 ++++---- testing/src/task_helpers.rs | 2 +- testing/tests/consensus_task.rs | 11 ++++------- types/src/data.rs | 2 +- types/src/simple_certificate.rs | 8 ++++---- types/src/simple_vote.rs | 30 ++++++++++++++++++++++++++---- types/src/traits/storage.rs | 4 +--- types/src/vote2.rs | 3 +++ 11 files changed, 47 insertions(+), 27 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index d327ab4e0b..a558922ec5 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -12,7 +12,7 @@ use derivative::Derivative; use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ block_impl::{BlockPayloadError, VIDBlockHeader, VIDBlockPayload, VIDTransaction}, - data::{fake_commitment, LeafType, ViewNumber}, + data::{fake_commitment, ViewNumber}, traits::{ election::Membership, node_implementation::NodeType, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index c2c01d36ac..48cb49a0db 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -72,7 +72,7 @@ use hotshot_types::{ }, traits::{ consensus_api::{ConsensusApi, ConsensusSharedApi}, - election::{ConsensusExchange, Membership, SignedCertificate}, + election::{ConsensusExchange, Membership}, network::{CommunicationChannel, NetworkError}, node_implementation::{ ChannelMaps, CommitteeEx, ExchangesType, NodeType, QuorumEx, SendToTasks, VIDEx, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 6cca7cb62a..91cadbace6 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -22,7 +22,7 @@ use hotshot_types::{ event::EventType, message::MessageKind, traits::{ - election::{ConsensusExchange, QuorumExchangeType, SignedCertificate}, + election::{ConsensusExchange, QuorumExchangeType}, node_implementation::{ExchangesType, NodeType}, state::ConsensusTime, storage::Storage, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 3f5a3a4bcc..7b1da0860f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -461,8 +461,8 @@ where YesVote::>::create_signed_vote( YesData { leaf_commit: leaf.commit() }, view, - &self.quorum_exchange.public_key(), - &self.quorum_exchange.private_key(), + self.quorum_exchange.public_key(), + self.quorum_exchange.private_key(), ); let message = GeneralConsensusMessage::::Vote(vote); @@ -542,8 +542,8 @@ where YesVote::>::create_signed_vote( YesData { leaf_commit: leaf.commit() }, view, - &self.quorum_exchange.public_key(), - &self.quorum_exchange.private_key(), + self.quorum_exchange.public_key(), + self.quorum_exchange.private_key(), ); GeneralConsensusMessage::::Vote(vote) } else { diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 622e8ab66a..719e2203d4 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -19,7 +19,7 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, consensus_api::ConsensusSharedApi, - election::{ConsensusExchange, Membership, SignedCertificate}, + election::{ConsensusExchange, Membership}, node_implementation::{CommitteeEx, ExchangesType, NodeType, QuorumEx}, signature_key::EncodedSignature, state::{ConsensusTime, TestableBlock}, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index f9c2dadf5d..483614e81d 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,4 +1,3 @@ - use commit::Committable; use hotshot::{ tasks::add_consensus_task, @@ -19,9 +18,7 @@ use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, traits::{ - election::{ConsensusExchange, SignedCertificate}, - node_implementation::ExchangesType, - state::ConsensusTime, + election::ConsensusExchange, node_implementation::ExchangesType, state::ConsensusTime, }, }; @@ -77,8 +74,8 @@ async fn build_vote( YesVote::, QuorumMembership>::create_signed_vote( YesData { leaf_commit: leaf.commit() }, view, - &quorum_exchange.public_key(), - &quorum_exchange.private_key(), + quorum_exchange.public_key(), + quorum_exchange.private_key(), ); GeneralConsensusMessage::::Vote(vote) } @@ -93,7 +90,7 @@ async fn build_vote( async fn test_consensus_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{simple_certificate::QuorumCertificate2}; + use hotshot_types::simple_certificate::QuorumCertificate2; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); diff --git a/types/src/data.rs b/types/src/data.rs index fdd4a73130..824d965422 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -781,7 +781,7 @@ pub fn serialize_signature2( let mut signatures_bytes = vec![]; signatures_bytes.extend("Yes".as_bytes()); - let (sig, proof) = TYPES::SignatureKey::get_sig_proof(&signatures); + let (sig, proof) = TYPES::SignatureKey::get_sig_proof(signatures); let proof_bytes = bincode_opts() .serialize(&proof.as_bitslice()) .expect("This serialization shouldn't be able to fail"); diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 6daf9254c6..c412eb5efe 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -1,6 +1,4 @@ -#![allow(dead_code)] -#![allow(clippy::missing_docs_in_private_items)] -#![allow(missing_docs)] +//! Implementations of the simple certificate type. Used for Quorum, DA, and Timeout Certificates use std::{ fmt::{self, Debug, Display, Formatter}, @@ -109,6 +107,8 @@ impl< LEAF: Committable + Committable + Clone + Serialize + Debug + PartialEq + Hash + Eq + 'static, > SimpleCertificate> { + #[must_use] + /// Creat the Genisis certificate pub fn genesis() -> Self { let data = YesData { leaf_commit: Commitment::::default_commitment_no_preimage(), @@ -125,5 +125,5 @@ impl< } } -// Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file +/// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `YesVotes` pub type QuorumCertificate2 = SimpleCertificate>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index a036c241d8..9a316735b0 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -1,6 +1,4 @@ -#![allow(dead_code)] -#![allow(clippy::missing_docs_in_private_items)] -#![allow(missing_docs)] +//! Implementations of the simple vote types. use std::{fmt::Debug, hash::Hash, marker::PhantomData}; @@ -15,23 +13,33 @@ use crate::{ }, vote2::{HasViewNumber, Vote2}, }; + #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a yes vote. pub struct YesData { + /// Commitment to the leaf pub leaf_commit: Commitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a DA vote. pub struct DAData { + /// Commitment to a block pub block_commit: Commitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a timeout vote. pub struct TimeoutData { + /// View the timeout is for pub view: TYPES::Time, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a VID vote. pub struct VIDData { + /// Commitment to the block the VID vote is on. pub block_commit: Commitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Pre Commit vote. pub struct ViewSyncPreCommitData { /// The relay this vote is intended for pub relay: EncodedPublicKey, @@ -39,6 +47,7 @@ pub struct ViewSyncPreCommitData { pub round: TYPES::Time, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Commit vote. pub struct ViewSyncCommitData { /// The relay this vote is intended for pub relay: EncodedPublicKey, @@ -46,6 +55,7 @@ pub struct ViewSyncCommitData { pub round: TYPES::Time, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Finalize vote. pub struct ViewSyncFinalizeData { /// The relay this vote is intended for pub relay: EncodedPublicKey, @@ -53,7 +63,9 @@ pub struct ViewSyncFinalizeData { pub round: TYPES::Time, } -/// Marker trait for data or commitments that can be voted on. +/// Marker trait for data or commitments that can be voted on. +/// Only structs in this file can implement voteable. This is enforced with the `Sealed` trait +/// Sealing this trait prevents creating new vote types outside this file. pub trait Voteable: sealed::Sealed + Committable + Clone + Serialize + Debug + PartialEq + Hash + Eq { @@ -65,6 +77,7 @@ pub trait Voteable: mod sealed { use commit::Committable; + /// Only structs in this file can impl `Sealed` pub trait Sealed {} // TODO: Does the implement for things outside this file that are commitable? @@ -118,6 +131,7 @@ impl> V impl> SimpleVote { + /// Creates and signs a simple vote pub fn create_signed_vote( data: DATA, view: TYPES::Time, @@ -157,6 +171,7 @@ impl Committable for VIDData { } } +/// This implements commit for all the types which contain a view and relay public key. fn view_and_relay_commit( view: TYPES::Time, relay: &EncodedPublicKey, @@ -194,10 +209,17 @@ impl = SimpleVote, M>; +/// DA vote type alias pub type DAVote = SimpleVote, M>; +/// VID vote type alias pub type VIDVote = SimpleVote, M>; +/// Timeout Vote type alias pub type TimeoutVote = SimpleVote, M>; +/// View Sync Commit Vote type alias pub type ViewSyncCommitVote = SimpleVote, M>; +/// View Sync Pre Commit Vote type alias pub type ViewSyncPreCommitVote = SimpleVote, M>; +/// View Sync Finalize Vote type alias pub type ViewSyncFinalizeVote = SimpleVote, M>; diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 8cf477876f..b7078b151c 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -2,9 +2,7 @@ use super::{node_implementation::NodeType, signature_key::EncodedPublicKey}; use crate::{ - data::LeafType, - simple_certificate::QuorumCertificate2, - traits::{election::SignedCertificate, BlockPayload}, + data::LeafType, simple_certificate::QuorumCertificate2, traits::BlockPayload, vote2::HasViewNumber, }; use async_trait::async_trait; diff --git a/types/src/vote2.rs b/types/src/vote2.rs index 6d80017655..2feed67a43 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -101,6 +101,9 @@ impl< { /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we /// have accumulated enough votes to exceed the threshold for creating a certificate. + /// + /// # Panics + /// Panics if the vote comes from a node not in the stake table pub fn accumulate(mut self, vote: &VOTE, membership: &VOTE::Membership) -> Either { let key = vote.get_signing_key(); From 5b14c2bf02e4dcaa8fa8cc69a18f5de6477d6450 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 2 Nov 2023 15:30:15 -0400 Subject: [PATCH 0300/1393] increase timeout test timeout --- hotshot/src/types/handle.rs | 25 ------------------------- testing/tests/timeout.rs | 8 ++++---- types/src/traits/election.rs | 13 ------------- 3 files changed, 4 insertions(+), 42 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 91cadbace6..daae23225b 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -332,31 +332,6 @@ impl + 'static> SystemContextHandl .sign_validating_or_commitment_proposal::(leaf_commitment) } - /// create a yes message - // #[cfg(feature = "hotshot-testing")] - // pub fn create_yes_message( - // &self, - // justify_qc_commitment: Commitment>>, - // leaf_commitment: Commitment, - // current_view: TYPES::Time, - // vote_token: TYPES::VoteTokenType, - // ) -> GeneralConsensusMessage - // where - // QuorumEx: ConsensusExchange< - // TYPES, - // Message, - // Certificate = QuorumCertificate>, - // >, - // { - // let inner = self.hotshot.inner.clone(); - // inner.exchanges.quorum_exchange().create_yes_message( - // justify_qc_commitment, - // leaf_commitment, - // current_view, - // vote_token, - // ) - // } - /// Wrapper around `HotShotConsensusApi`'s `send_broadcast_consensus_message` function #[cfg(feature = "hotshot-testing")] pub async fn send_broadcast_consensus_message(&self, msg: I::ConsensusMessage) { diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 3b7e2e2bfb..98c275868c 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -22,7 +22,7 @@ async fn test_timeout_web() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { - next_view_timeout: 1000, + next_view_timeout: 2000, ..Default::default() }; @@ -50,7 +50,7 @@ async fn test_timeout_web() { metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(30), + duration: Duration::from_secs(60), }, ); @@ -85,7 +85,7 @@ async fn test_timeout_libp2p() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { - next_view_timeout: 1000, + next_view_timeout: 2000, ..Default::default() }; @@ -113,7 +113,7 @@ async fn test_timeout_libp2p() { metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(30), + duration: Duration::from_secs(60), }, ); diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 8f39168382..d2872cc2f5 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -787,19 +787,6 @@ impl< pub trait QuorumExchangeType, M: NetworkMsg>: ConsensusExchange { - /// Create a message with a positive vote on validating or commitment proposal. - // TODO ED This returns just a general message type, it's not even bound to a proposal, and this is just a function on the QC. Make proprosal doesn't really apply to all cert types. - // fn create_yes_message>( - // &self, - // justify_qc_commitment: Commitment, - // leaf_commitment: Commitment, - // current_view: TYPES::Time, - // vote_token: TYPES::VoteTokenType, - // ) -> GeneralConsensusMessage - // where - // >::Certificate: commit::Committable, - // I::Exchanges: ExchangesType>; - /// Sign a validating or commitment proposal. fn sign_validating_or_commitment_proposal>( &self, From 80f7d8d77796a023ad4f121f9ad8fabe00ccf670 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 2 Nov 2023 12:52:17 -0700 Subject: [PATCH 0301/1393] Fix overflow, update some comments. --- task-impls/src/vid.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 0cd8da17a4..dd95ba3096 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -40,7 +40,7 @@ use tracing::{debug, error, instrument, warn}; /// Error type for consensus tasks pub struct ConsensusTaskError {} -/// Tracks state of a DA task +/// Tracks state of a VID task pub struct VIDTaskState< TYPES: NodeType, I: NodeImplementation, ConsensusMessage = SequencingMessage>, @@ -77,7 +77,7 @@ pub struct VIDTaskState< pub id: u64, } -/// Struct to maintain DA Vote Collection task state +/// Struct to maintain VID Vote Collection task state pub struct VIDVoteCollectionTaskState< TYPES: NodeType, I: NodeImplementation>, @@ -209,7 +209,7 @@ where >, { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] pub async fn handle_event( &mut self, event: HotShotEvent, @@ -304,12 +304,11 @@ where // ED NOTE: Assuming that the next view leader is the one who sends DA proposal for this view let view = disperse.data.get_view_number(); - // Allow a DA proposal that is one view older, in case we have voted on a quorum - // proposal and updated the view. - // `self.cur_view` should be at least 1 since there is a view change before getting - // the `DAProposalRecv` event. Otherewise, the view number subtraction below will - // cause an overflow error. - if view < self.cur_view - 1 { + // Allow VID disperse date that is one view older, in case we have updated the + // view. + // Adding `+ 1` on the LHS rather tahn `- 1` on the RHS, to avoid the overflow + // error due to subtracting the genesis view number. + if view + 1 < self.cur_view { warn!("Throwing away VID disperse data that is more than one view older"); return None; } From 0dad186c0f73dc89f9329a62612bf144dcac4415 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 2 Nov 2023 16:31:09 -0700 Subject: [PATCH 0302/1393] make config file path configurable --- orchestrator/src/config.rs | 4 ++-- testing/src/test_builder.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 8783010dda..e0abbbf5de 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -232,7 +232,7 @@ fn get_current_working_dir() -> std::io::Result { } impl ValidatorConfigFile { - pub fn from_file() -> Self { + pub fn from_file(dir_str: &str) -> Self { let current_working_dir = match get_current_working_dir() { Ok(dir) => dir, Err(e) => { @@ -241,7 +241,7 @@ impl ValidatorConfigFile { } }; let filename = current_working_dir.into_os_string().into_string().unwrap() - + "/../../config/ValidatorConfigFile.toml"; + + "/../../" + dir_str; match fs::read_to_string(filename.clone()) { // If successful return the files text as `contents`. Ok(contents) => { diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index aa25ade1e7..bf555dc021 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -216,7 +216,7 @@ impl TestMetadata { let mut my_own_validator_config = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); if node_id == 0 { - my_own_validator_config = ValidatorConfig::from(ValidatorConfigFile::from_file()); + my_own_validator_config = ValidatorConfig::from(ValidatorConfigFile::from_file("/config/ValidatorConfigFile.toml")); } // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); let config = HotShotConfig { From c7635429dc5cd11f4eb586d517aa7279d8e636d3 Mon Sep 17 00:00:00 2001 From: MRain Date: Thu, 2 Nov 2023 23:42:48 -0400 Subject: [PATCH 0303/1393] Refactor --- hotshot-qc/src/bit_vector.rs | 6 +- hotshot-stake-table/src/mt_based.rs | 49 ++- hotshot-stake-table/src/mt_based/config.rs | 2 +- hotshot-stake-table/src/mt_based/internal.rs | 26 +- hotshot-stake-table/src/vec_based.rs | 380 +++++++++++-------- hotshot-stake-table/src/vec_based/config.rs | 29 +- types/src/traits/stake_table.rs | 43 ++- 7 files changed, 316 insertions(+), 219 deletions(-) diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index b348480d06..df05b64de4 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -219,9 +219,9 @@ mod tests { let key_pair3 = KeyPair::generate(&mut rng); let mut st = ST::new(3); - st.register(&key_pair1.ver_key(), U256::from(3u8)).unwrap(); - st.register(&key_pair2.ver_key(), U256::from(5u8)).unwrap(); - st.register(&key_pair3.ver_key(), U256::from(7u8)).unwrap(); + st.register(key_pair1.ver_key(), U256::from(3u8)).unwrap(); + st.register(key_pair2.ver_key(), U256::from(5u8)).unwrap(); + st.register(key_pair3.ver_key(), U256::from(7u8)).unwrap(); st.advance(); st.advance(); diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index 74ff77dcf6..e1318ebd40 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -36,20 +36,22 @@ impl StakeTableScheme for StakeTable { type Commitment = MerkleCommitment; type LookupProof = MerkleProof; type IntoIter = internal::IntoIter; + type Aux = (); fn register( &mut self, - new_key: &Self::Key, + new_key: Self::Key, amount: Self::Amount, + _: Self::Aux, ) -> Result<(), StakeTableError> { - match self.mapping.get(new_key) { + match self.mapping.get(&new_key) { Some(_) => Err(StakeTableError::ExistingKey), None => { let pos = self.mapping.len(); self.head = self.head.register( self.height, &to_merkle_path(pos, self.height), - new_key, + &new_key, amount, )?; self.mapping.insert(new_key.clone(), pos); @@ -86,7 +88,18 @@ impl StakeTableScheme for StakeTable { self.mapping.contains_key(key) } - fn lookup( + fn lookup(&self, version: SnapshotVersion, key: &K) -> Result { + let root = Self::get_root(self, version)?; + match self.mapping.get(key) { + Some(index) => { + let branches = to_merkle_path(*index, self.height); + root.simple_lookup(self.height, &branches) + } + None => Err(StakeTableError::KeyNotFound), + } + } + + fn lookup_with_proof( &self, version: SnapshotVersion, key: &Self::Key, @@ -104,19 +117,13 @@ impl StakeTableScheme for StakeTable { Ok((amount, proof)) } - fn simple_lookup( + fn lookup_with_aux_and_proof( &self, version: SnapshotVersion, - key: &K, - ) -> Result { - let root = Self::get_root(self, version)?; - match self.mapping.get(key) { - Some(index) => { - let branches = to_merkle_path(*index, self.height); - root.simple_lookup(self.height, &branches) - } - None => Err(StakeTableError::KeyNotFound), - } + key: &Self::Key, + ) -> Result<(Self::Amount, Self::Aux, Self::LookupProof), StakeTableError> { + let (amount, proof) = self.lookup_with_proof(version, key)?; + Ok((amount, (), proof)) } fn update( @@ -231,7 +238,7 @@ mod tests { // Registering keys keys.iter() .take(4) - .for_each(|key| st.register(key, U256::from(100)).unwrap()); + .for_each(|key| st.register(*key, U256::from(100), ()).unwrap()); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(400)); assert_eq!(st.total_stake(SnapshotVersion::EpochStart)?, U256::from(0)); assert_eq!( @@ -247,7 +254,7 @@ mod tests { keys.iter() .skip(4) .take(3) - .for_each(|key| st.register(key, U256::from(100)).unwrap()); + .for_each(|key| st.register(*key, U256::from(100), ()).unwrap()); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(600)); assert_eq!( st.total_stake(SnapshotVersion::EpochStart)?, @@ -260,7 +267,7 @@ mod tests { st.advance(); keys.iter() .skip(7) - .for_each(|key| st.register(key, U256::from(100)).unwrap()); + .for_each(|key| st.register(*key, U256::from(100), ()).unwrap()); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(900)); assert_eq!( st.total_stake(SnapshotVersion::EpochStart)?, @@ -272,7 +279,7 @@ mod tests { ); // No duplicate register - assert!(st.register(&keys[0], U256::from(100)).is_err()); + assert!(st.register(keys[0], U256::from(100), ()).is_err()); // The 9-th key is still in head stake table assert!(st.lookup(SnapshotVersion::EpochStart, &keys[9]).is_err()); assert!(st.lookup(SnapshotVersion::EpochStart, &keys[5]).is_ok()); @@ -306,7 +313,9 @@ mod tests { ); // Testing membership proof - let proof = st.lookup(SnapshotVersion::EpochStart, &keys[5])?.1; + let proof = st + .lookup_with_proof(SnapshotVersion::EpochStart, &keys[5])? + .1; assert!(proof .verify(&st.commitment(SnapshotVersion::EpochStart)?) .is_ok()); diff --git a/hotshot-stake-table/src/mt_based/config.rs b/hotshot-stake-table/src/mt_based/config.rs index 9b2b455b84..5d733bc725 100644 --- a/hotshot-stake-table/src/mt_based/config.rs +++ b/hotshot-stake-table/src/mt_based/config.rs @@ -16,7 +16,7 @@ pub(crate) type Digest = FixedLengthRescueCRHF; impl ToFields for FieldType { const SIZE: usize = 1; fn to_fields(&self) -> Vec { - vec![FieldType::default()] + vec![*self] } } diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index 3389ca35c0..5ab25b855c 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -512,7 +512,6 @@ impl PersistentMerkleNode { /// Traverse using post-order: children from left to right, finally visit the current. pub struct IntoIter { unvisited: Vec>>, - num_visited: usize, } impl IntoIter { @@ -522,47 +521,42 @@ impl IntoIter { pub(crate) fn new(root: Arc>) -> Self { Self { unvisited: vec![root], - num_visited: 0, } } } impl Iterator for IntoIter { - type Item = (K, U256); + type Item = (K, U256, ()); fn next(&mut self) -> Option { if self.unvisited.is_empty() { return None; } - let visiting = (**self.unvisited.last()?).clone(); + // This unwrap always succeed because `unvisited` is nonempty + let visiting = (*self.unvisited.pop().unwrap()).clone(); match visiting { PersistentMerkleNode::Empty => None, - PersistentMerkleNode::Leaf { - comm: _, - key, - value, - } => { - self.unvisited.pop(); - self.num_visited += 1; - Some((key, value)) - } PersistentMerkleNode::Branch { comm: _, children, num_keys: _, total_stakes: _, } => { - self.unvisited.pop(); // put the left-most child to the last, so it is visited first. self.unvisited.extend(children.into_iter().rev()); self.next() } + PersistentMerkleNode::Leaf { + comm: _, + key, + value, + } => Some((key, value, ())), } } } impl IntoIterator for PersistentMerkleNode { - type Item = (K, U256); + type Item = (K, U256, ()); type IntoIter = self::IntoIter; fn into_iter(self) -> Self::IntoIter { @@ -738,7 +732,7 @@ mod tests { .register(height as usize, &paths[i], &keys[i], amounts[i]) .unwrap(); } - for (i, (k, v)) in (*root).clone().into_iter().enumerate() { + for (i, (k, v, _)) in (*root).clone().into_iter().enumerate() { assert_eq!((k, v), (keys[i], amounts[i])); } } diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index f3cf51b577..2b9e330e18 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -4,74 +4,111 @@ use crate::utils::{u256_to_field, ToFields}; use ark_std::{collections::HashMap, hash::Hash, rand::SeedableRng}; use digest::crypto_common::rand_core::CryptoRngCore; use ethereum_types::{U256, U512}; -use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; -use jf_primitives::rescue::{sponge::RescueCRHF, RescueParameter}; +use hotshot_types::traits::stake_table::{ + SnapshotVersion, StakeTableError, StakeTableScheme, STAKE_TABLE_CAPACITY, +}; +use jf_primitives::{ + crhf::{VariableLengthRescueCRHF, CRHF}, + rescue::RescueParameter, +}; use serde::{Deserialize, Serialize}; pub mod config; +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +struct StakeTableSnapshot { + pub bls_keys: Vec, + pub schnorr_keys: Vec, + pub stake_amount: Vec, +} + +impl Default for StakeTableSnapshot { + fn default() -> Self { + Self { + bls_keys: vec![], + schnorr_keys: vec![], + stake_amount: vec![], + } + } +} + /// Locally maintained stake table, generic over public key type `K`. /// Whose commitment is a rescue hash of all key-value pairs over field `F`. /// NOTE: the commitment is only available for the finalized versions, and is /// computed only once when it's finalized. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct StakeTable, F: RescueParameter> { +pub struct StakeTable +where + K1: Eq + Hash + Clone + ToFields, + K2: Eq + Hash + Clone + ToFields, + F: RescueParameter, +{ /// The most up-to-date stake table, where the incoming transactions shall be performed on. - head: Vec<(K, U256)>, + head: StakeTableSnapshot, /// The snapshot of stake table at the beginning of the current epoch - epoch_start: Vec<(K, U256)>, + epoch_start: StakeTableSnapshot, /// The stake table used for leader election. - last_epoch_start: Vec<(K, U256)>, + last_epoch_start: StakeTableSnapshot, /// Total stakes for different versions head_total_stake: U256, epoch_start_total_stake: U256, last_epoch_start_total_stake: U256, - /// Commitment for finalized versions - epoch_start_comm: F, - last_epoch_start_comm: F, + /// We only support committing the finalized versions. + /// Commitment for a finalized version is a triple where + /// - First item is the rescue hash of the bls keys + /// - Second item is the rescue hash of the Schnorr keys + /// - Third item is the rescue hash of all the stake amounts + epoch_start_comm: (F, F, F), + last_epoch_start_comm: (F, F, F), /// The mapping from public keys to their location in the Merkle tree. #[serde(skip)] - mapping: HashMap, + bls_mapping: HashMap, } -impl StakeTableScheme for StakeTable +impl StakeTableScheme for StakeTable where - K: Eq + Hash + Clone + ToFields, + K1: Eq + Hash + Clone + ToFields, + K2: Eq + Hash + Clone + ToFields, F: RescueParameter, { - type Key = K; + /// The stake table is indexed by BLS key + type Key = K1; + /// The auxiliary information is the associated Schnorr key + type Aux = K2; type Amount = U256; - type Commitment = F; + type Commitment = (F, F, F); type LookupProof = (); // TODO(Chengyu): Can we make it references? - type IntoIter = as ark_std::iter::IntoIterator>::IntoIter; - // type IntoIter = ark_std::slice::Iter<'a, &'a (K, U256)>; + type IntoIter = as ark_std::iter::IntoIterator>::IntoIter; fn register( &mut self, - new_key: &Self::Key, + new_key: Self::Key, amount: Self::Amount, + aux: Self::Aux, ) -> Result<(), StakeTableError> { - match self.mapping.get(new_key) { + match self.bls_mapping.get(&new_key) { Some(_) => Err(StakeTableError::ExistingKey), None => { - let pos = self.mapping.len(); - self.head.push((new_key.clone(), amount)); + let pos = self.bls_mapping.len(); + self.head.bls_keys.push(new_key.clone()); + self.head.schnorr_keys.push(aux); + self.head.stake_amount.push(amount); self.head_total_stake += amount; - self.mapping.insert(new_key.clone(), pos); + self.bls_mapping.insert(new_key, pos); Ok(()) } } } fn deregister(&mut self, existing_key: &Self::Key) -> Result<(), StakeTableError> { - match self.mapping.get(existing_key) { + match self.bls_mapping.get(existing_key) { Some(pos) => { - self.head_total_stake -= self.head[*pos].1; - self.head[*pos].1 = U256::zero(); + self.head_total_stake -= self.head.stake_amount[*pos]; + self.head.stake_amount[*pos] = U256::zero(); Ok(()) } None => Err(StakeTableError::KeyNotFound), @@ -98,83 +135,47 @@ where } fn len(&self, version: SnapshotVersion) -> Result { - match version { - SnapshotVersion::Head => Ok(self.head.len()), - SnapshotVersion::EpochStart => Ok(self.epoch_start.len()), - SnapshotVersion::LastEpochStart => Ok(self.last_epoch_start.len()), - SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), - } + Ok(self.get_version(version)?.bls_keys.len()) } fn contains_key(&self, key: &Self::Key) -> bool { - self.mapping.contains_key(key) + self.bls_mapping.contains_key(key) } fn lookup( &self, version: SnapshotVersion, key: &Self::Key, - ) -> Result<(Self::Amount, Self::LookupProof), StakeTableError> { - match self.mapping.get(key) { - Some(&pos) => match version { - SnapshotVersion::Head => { - if pos >= self.head.len() { - Err(StakeTableError::KeyNotFound) - } else { - Ok((self.head[pos].1, ())) - } - } - SnapshotVersion::EpochStart => { - if pos >= self.epoch_start.len() { - Err(StakeTableError::KeyNotFound) - } else { - Ok((self.epoch_start[pos].1, ())) - } - } - SnapshotVersion::LastEpochStart => { - if pos >= self.last_epoch_start.len() { - Err(StakeTableError::KeyNotFound) - } else { - Ok((self.last_epoch_start[pos].1, ())) - } - } - SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), - }, - None => Err(StakeTableError::KeyNotFound), + ) -> Result { + let table = self.get_version(version)?; + let pos = self.lookup_pos(key)?; + if pos >= table.bls_keys.len() { + Err(StakeTableError::KeyNotFound) + } else { + Ok(table.stake_amount[pos]) } } - fn simple_lookup( + fn lookup_with_proof( &self, version: SnapshotVersion, key: &Self::Key, - ) -> Result { - match self.mapping.get(key) { - Some(&pos) => match version { - SnapshotVersion::Head => { - if pos >= self.head.len() { - Err(StakeTableError::KeyNotFound) - } else { - Ok(self.head[pos].1) - } - } - SnapshotVersion::EpochStart => { - if pos >= self.epoch_start.len() { - Err(StakeTableError::KeyNotFound) - } else { - Ok(self.epoch_start[pos].1) - } - } - SnapshotVersion::LastEpochStart => { - if pos >= self.last_epoch_start.len() { - Err(StakeTableError::KeyNotFound) - } else { - Ok(self.last_epoch_start[pos].1) - } - } - SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), - }, - None => Err(StakeTableError::KeyNotFound), + ) -> Result<(Self::Amount, Self::LookupProof), StakeTableError> { + let amount = self.lookup(version, key)?; + Ok((amount, ())) + } + + fn lookup_with_aux_and_proof( + &self, + version: SnapshotVersion, + key: &Self::Key, + ) -> Result<(Self::Amount, Self::Aux, Self::LookupProof), StakeTableError> { + let table = self.get_version(version)?; + let pos = self.lookup_pos(key)?; + if pos >= table.bls_keys.len() { + Err(StakeTableError::KeyNotFound) + } else { + Ok((table.stake_amount[pos], table.schnorr_keys[pos].clone(), ())) } } @@ -184,22 +185,18 @@ where delta: Self::Amount, negative: bool, ) -> Result { - match self.mapping.get(key) { - Some(&pos) => { - if negative { - if delta > self.head[pos].1 { - return Err(StakeTableError::InsufficientFund); - } - self.head_total_stake -= delta; - self.head[pos].1 -= delta; - } else { - self.head_total_stake += delta; - self.head[pos].1 += delta; - } - Ok(self.head[pos].1) + let pos = self.lookup_pos(key)?; + if negative { + if delta > self.head.stake_amount[pos] { + return Err(StakeTableError::InsufficientFund); } - None => Err(StakeTableError::KeyNotFound), + self.head_total_stake -= delta; + self.head.stake_amount[pos] -= delta; + } else { + self.head_total_stake += delta; + self.head.stake_amount[pos] += delta; } + Ok(self.head.stake_amount[pos]) } fn sample( @@ -212,41 +209,58 @@ where let m = U512::from(self.last_epoch_start_total_stake); let mut pos: U256 = (r % m).try_into().unwrap(); // won't fail let idx = 0; - while pos > self.last_epoch_start[idx].1 { - pos -= self.last_epoch_start[idx].1; + while pos > self.last_epoch_start.stake_amount[idx] { + pos -= self.last_epoch_start.stake_amount[idx]; } - Some((&self.last_epoch_start[idx].0, &self.last_epoch_start[idx].1)) + Some(( + &self.last_epoch_start.bls_keys[idx], + &self.last_epoch_start.stake_amount[idx], + )) } fn try_iter(&self, version: SnapshotVersion) -> Result { - match version { - SnapshotVersion::Head => Ok(self.head.clone().into_iter()), - SnapshotVersion::EpochStart => Ok(self.epoch_start.clone().into_iter()), - SnapshotVersion::LastEpochStart => Ok(self.last_epoch_start.clone().into_iter()), - SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), - } + let table = self.get_version(version)?; + let owned = (0..table.bls_keys.len()) + .map(|i| { + ( + table.bls_keys[i].clone(), + table.stake_amount[i], + table.schnorr_keys[i].clone(), + ) + }) + .collect::>(); + Ok(owned.into_iter()) } } -impl StakeTable +impl StakeTable where - K: Eq + Hash + Clone + ToFields, + K1: Eq + Hash + Clone + ToFields, + K2: Eq + Hash + Clone + ToFields, F: RescueParameter, { /// Initiating an empty stake table. - /// Overall capacity is `TREE_BRANCH.pow(height)`. pub fn new() -> Self { - let comm = RescueCRHF::sponge_with_zero_padding(&[], 1)[0]; + let to_be_hashed = vec![F::default(); STAKE_TABLE_CAPACITY * >::SIZE]; + let default_bls_comm = + VariableLengthRescueCRHF::::evaluate(&to_be_hashed).unwrap()[0]; + let to_be_hashed = vec![F::default(); STAKE_TABLE_CAPACITY * >::SIZE]; + let default_schnorr_comm = + VariableLengthRescueCRHF::::evaluate(&to_be_hashed).unwrap()[0]; + let to_be_hashed = vec![F::default(); STAKE_TABLE_CAPACITY]; + let default_stake_comm = + VariableLengthRescueCRHF::::evaluate(&to_be_hashed).unwrap()[0]; + let default_comm = (default_bls_comm, default_schnorr_comm, default_stake_comm); Self { - head: vec![], - epoch_start: vec![], - last_epoch_start: vec![], + head: StakeTableSnapshot::default(), + epoch_start: StakeTableSnapshot::default(), + last_epoch_start: StakeTableSnapshot::default(), head_total_stake: U256::zero(), epoch_start_total_stake: U256::zero(), last_epoch_start_total_stake: U256::zero(), - mapping: HashMap::new(), - epoch_start_comm: comm, - last_epoch_start_comm: comm, + bls_mapping: HashMap::new(), + epoch_start_comm: default_comm, + last_epoch_start_comm: default_comm, } } @@ -263,11 +277,11 @@ where /// Set the stake withheld by `key` to be `value`. /// Return the previous stake if succeed. - pub fn set_value(&mut self, key: &K, value: U256) -> Result { - match self.mapping.get(key) { + pub fn set_value(&mut self, key: &K1, value: U256) -> Result { + match self.bls_mapping.get(key) { Some(pos) => { - let old_value = self.head[*pos].1; - self.head[*pos].1 = value; + let old_value = self.head.stake_amount[*pos]; + self.head.stake_amount[*pos] = value; self.head_total_stake -= old_value; self.head_total_stake += value; Ok(old_value) @@ -277,37 +291,73 @@ where } /// Helper function to recompute the stake table commitment for head version - fn compute_head_comm(&mut self) -> F { - if self.head.is_empty() { - return RescueCRHF::sponge_with_zero_padding(&[], 1)[0]; + fn compute_head_comm(&mut self) -> (F, F, F) { + // Compute rescue hash for bls keys + let mut to_be_hashed = self + .head + .bls_keys + .iter() + .map(|key| key.to_fields()) + .collect::>() + .concat(); + to_be_hashed.resize( + STAKE_TABLE_CAPACITY * >::SIZE, + F::default(), + ); + let bls_comm = VariableLengthRescueCRHF::::evaluate(to_be_hashed).unwrap()[0]; + + // Compute rescue hash for Schnorr keys + let mut to_be_hashed = self + .head + .schnorr_keys + .iter() + .map(|key| key.to_fields()) + .collect::>() + .concat(); + to_be_hashed.resize( + STAKE_TABLE_CAPACITY * >::SIZE, + F::default(), + ); + let schnorr_comm = VariableLengthRescueCRHF::::evaluate(to_be_hashed).unwrap()[0]; + + // Compute rescue hash for stake amounts + let mut to_be_hashed = self + .head + .stake_amount + .iter() + .map(|x| u256_to_field(x)) + .collect::>(); + to_be_hashed.resize(STAKE_TABLE_CAPACITY, F::default()); + let stake_comm = VariableLengthRescueCRHF::::evaluate(to_be_hashed).unwrap()[0]; + (bls_comm, schnorr_comm, stake_comm) + } + + /// Return the index of a given key. + /// Err if the key doesn't exists + fn lookup_pos(&self, key: &K1) -> Result { + match self.bls_mapping.get(key) { + Some(pos) => Ok(*pos), + None => Err(StakeTableError::KeyNotFound), } - let mut to_be_hashed = vec![]; - self.head.iter().for_each(|(key, amount)| { - to_be_hashed.extend(key.to_fields()); - to_be_hashed.push(u256_to_field(amount)); - }); - let mut comm = to_be_hashed[0]; - for i in (1..self.head.len()).step_by(2) { - comm = RescueCRHF::sponge_with_zero_padding( - &[ - comm, - to_be_hashed[i], - if i + 1 < to_be_hashed.len() { - to_be_hashed[i + 1] - } else { - F::zero() - }, - ], - 1, - )[0]; + } + + fn get_version( + &self, + version: SnapshotVersion, + ) -> Result<&StakeTableSnapshot, StakeTableError> { + match version { + SnapshotVersion::Head => Ok(&self.head), + SnapshotVersion::EpochStart => Ok(&self.epoch_start), + SnapshotVersion::LastEpochStart => Ok(&self.last_epoch_start), + SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), } - comm } } -impl Default for StakeTable +impl Default for StakeTable where - K: Eq + Hash + Clone + ToFields, + K1: Eq + Hash + Clone + ToFields, + K2: Eq + Hash + Clone + ToFields, F: RescueParameter, { fn default() -> Self { @@ -317,7 +367,7 @@ where #[cfg(test)] mod tests { - use super::config::{FieldType as F, KeyType as Key}; + use super::config::{BLSVerKey, FieldType as F, SchnorrVerKey}; use super::StakeTable; use ark_std::{rand::SeedableRng, vec::Vec}; use ethereum_types::U256; @@ -327,7 +377,7 @@ mod tests { #[test] fn test_stake_table() -> Result<(), StakeTableError> { - let mut st = StakeTable::::new(); + let mut st = StakeTable::::new(); let mut prng = jf_utils::test_rng(); let keys = (0..10) .map(|_| { @@ -344,7 +394,7 @@ mod tests { // Registering keys keys.iter() .take(4) - .for_each(|key| st.register(key, U256::from(100)).unwrap()); + .for_each(|key| st.register(key.0, U256::from(100), key.1.clone()).unwrap()); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(400)); assert_eq!(st.total_stake(SnapshotVersion::EpochStart)?, U256::from(0)); assert_eq!( @@ -353,14 +403,14 @@ mod tests { ); // set to zero for futher sampling test assert_eq!( - st.set_value(&keys[1], U256::from(0)).unwrap(), + st.set_value(&keys[1].0, U256::from(0)).unwrap(), U256::from(100) ); st.advance(); keys.iter() .skip(4) .take(3) - .for_each(|key| st.register(key, U256::from(100)).unwrap()); + .for_each(|key| st.register(key.0, U256::from(100), key.1.clone()).unwrap()); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(600)); assert_eq!( st.total_stake(SnapshotVersion::EpochStart)?, @@ -373,7 +423,7 @@ mod tests { st.advance(); keys.iter() .skip(7) - .for_each(|key| st.register(key, U256::from(100)).unwrap()); + .for_each(|key| st.register(key.0, U256::from(100), key.1.clone()).unwrap()); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(900)); assert_eq!( st.total_stake(SnapshotVersion::EpochStart)?, @@ -385,19 +435,23 @@ mod tests { ); // No duplicate register - assert!(st.register(&keys[0], U256::from(100)).is_err()); + assert!(st + .register(keys[0].0, U256::from(100), keys[0].1.clone()) + .is_err()); // The 9-th key is still in head stake table - assert!(st.lookup(SnapshotVersion::EpochStart, &keys[9]).is_err()); - assert!(st.lookup(SnapshotVersion::EpochStart, &keys[5]).is_ok()); + assert!(st.lookup(SnapshotVersion::EpochStart, &keys[9].0).is_err()); + assert!(st.lookup(SnapshotVersion::EpochStart, &keys[5].0).is_ok()); // The 6-th key is still frozen assert!(st - .lookup(SnapshotVersion::LastEpochStart, &keys[6]) + .lookup(SnapshotVersion::LastEpochStart, &keys[6].0) .is_err()); - assert!(st.lookup(SnapshotVersion::LastEpochStart, &keys[2]).is_ok()); + assert!(st + .lookup(SnapshotVersion::LastEpochStart, &keys[2].0) + .is_ok()); // Set value shall return the old value assert_eq!( - st.set_value(&keys[0], U256::from(101)).unwrap(), + st.set_value(&keys[0].0, U256::from(101)).unwrap(), U256::from(100) ); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(901)); @@ -407,14 +461,14 @@ mod tests { ); // Update that results in a negative stake - assert!(st.update(&keys[0], U256::from(1000), true).is_err()); + assert!(st.update(&keys[0].0, U256::from(1000), true).is_err()); // Update should return the updated stake assert_eq!( - st.update(&keys[0], U256::from(1), true).unwrap(), + st.update(&keys[0].0, U256::from(1), true).unwrap(), U256::from(100) ); assert_eq!( - st.update(&keys[0], U256::from(100), false).unwrap(), + st.update(&keys[0].0, U256::from(100), false).unwrap(), U256::from(200) ); diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index f5a449eeb3..751e1ceb76 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -1,22 +1,39 @@ //! Config file for stake table use crate::utils::ToFields; +use ark_ff::PrimeField; use ark_std::vec; -use jf_primitives::signatures::bls_over_bn254::VerKey as BLSVerKey; -use jf_primitives::signatures::schnorr::VerKey as SchnorrVerKey; +use jf_utils::to_bytes; -/// Key type -pub type KeyType = (BLSVerKey, SchnorrVerKey); +/// BLS verification key as indexing key +pub use jf_primitives::signatures::bls_over_bn254::VerKey as BLSVerKey; +/// Schnorr verification key as auxiliary information +pub type SchnorrVerKey = jf_primitives::signatures::schnorr::VerKey; /// Type for commitment pub type FieldType = ark_ed_on_bn254::Fq; /// Hashable representation of a key /// NOTE: commitment is only used in light client contract. /// For this application, we needs only hash the Schnorr verfication key. -impl ToFields for KeyType { +impl ToFields for SchnorrVerKey { const SIZE: usize = 2; fn to_fields(&self) -> Vec { - let p = self.1.to_affine(); + let p = self.to_affine(); vec![p.x, p.y] } } + +impl ToFields for BLSVerKey { + const SIZE: usize = 5; + + fn to_fields(&self) -> Vec { + let bytes = to_bytes!(&self.to_affine()).unwrap(); + vec![ + FieldType::from_le_bytes_mod_order(&bytes[..31]), + FieldType::from_le_bytes_mod_order(&bytes[31..62]), + FieldType::from_le_bytes_mod_order(&bytes[62..93]), + FieldType::from_le_bytes_mod_order(&bytes[93..124]), + FieldType::from_le_bytes_mod_order(&bytes[124..]), + ] + } +} diff --git a/types/src/traits/stake_table.rs b/types/src/traits/stake_table.rs index cb085f2a50..b7d608ba36 100644 --- a/types/src/traits/stake_table.rs +++ b/types/src/traits/stake_table.rs @@ -5,6 +5,9 @@ use digest::crypto_common::rand_core::CryptoRngCore; use displaydoc::Display; use jf_primitives::errors::PrimitivesError; +/// Capacity of a stake table +pub const STAKE_TABLE_CAPACITY: usize = 1000; + /// Snapshots of the stake table pub enum SnapshotVersion { /// the latest "Head" where all new changes are applied to @@ -29,7 +32,9 @@ pub trait StakeTableScheme { /// type for the proof associated with the lookup result (if any) type LookupProof; /// type for the iterator over (key, value) entries - type IntoIter: Iterator; + type IntoIter: Iterator; + /// Auxiliary information associated with the key + type Aux: Clone; /// Register a new key into the stake table. /// @@ -38,8 +43,9 @@ pub trait StakeTableScheme { /// Return err if key is already registered. fn register( &mut self, - new_key: &Self::Key, + new_key: Self::Key, amount: Self::Amount, + aux: Self::Aux, ) -> Result<(), StakeTableError>; /// Batch register a list of new keys. A default implementation is provided @@ -48,15 +54,22 @@ pub trait StakeTableScheme { /// # Errors /// /// Return err if any of `new_keys` fails to register. - fn batch_register(&mut self, new_keys: I, amounts: J) -> Result<(), StakeTableError> + fn batch_register( + &mut self, + new_keys: I, + amounts: J, + auxs: K, + ) -> Result<(), StakeTableError> where I: IntoIterator, J: IntoIterator, + K: IntoIterator, { let _ = new_keys .into_iter() .zip(amounts) - .try_for_each(|(key, amount)| Self::register(self, &key, amount)); + .zip(auxs) + .try_for_each(|((key, amount), aux)| Self::register(self, key, amount, aux)); Ok(()) } @@ -105,8 +118,7 @@ pub trait StakeTableScheme { /// Returns true if `key` is currently registered, else returns false. fn contains_key(&self, key: &Self::Key) -> bool; - /// Lookup the stake under a key against a specific historical `version`, - /// returns error if keys unregistered. + /// Returns the stakes withhelded by a public key. /// /// # Errors /// Return err if the `version` is not supported or `key` doesn't exist. @@ -114,18 +126,29 @@ pub trait StakeTableScheme { &self, version: SnapshotVersion, key: &Self::Key, + ) -> Result; + + /// Returns the stakes withhelded by a public key along with a membership proof. + /// + /// # Errors + /// Return err if the `version` is not supported or `key` doesn't exist. + fn lookup_with_proof( + &self, + version: SnapshotVersion, + key: &Self::Key, ) -> Result<(Self::Amount, Self::LookupProof), StakeTableError>; - /// Returns the stakes withhelded by a public key, None if the key is not registered. - /// If you need a lookup proof, use [`Self::lookup()`] instead (which is usually more expensive). + /// Return the associated stake amount and auxiliary information of a public key, + /// along with a membership proof. /// /// # Errors /// Return err if the `version` is not supported or `key` doesn't exist. - fn simple_lookup( + #[allow(clippy::type_complexity)] + fn lookup_with_aux_and_proof( &self, version: SnapshotVersion, key: &Self::Key, - ) -> Result; + ) -> Result<(Self::Amount, Self::Aux, Self::LookupProof), StakeTableError>; /// Update the stake of the `key` with `(negative ? -1 : 1) * delta`. /// Return the updated stake or error. From f727d4b507dc4e0ead83197d4e4db30bfb3377bf Mon Sep 17 00:00:00 2001 From: MRain Date: Thu, 2 Nov 2023 23:48:46 -0400 Subject: [PATCH 0304/1393] Fix serialization and test for iter --- hotshot-stake-table/src/vec_based.rs | 5 +++++ hotshot-stake-table/src/vec_based/config.rs | 6 ++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 2b9e330e18..10ee587da5 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -485,6 +485,11 @@ mod tests { assert!(value > &U256::from(0)); } + // Test for try_iter + for (i, (k1, _, k2)) in st.try_iter(SnapshotVersion::Head).unwrap().enumerate() { + assert_eq!((k1, k2), keys[i]); + } + Ok(()) } } diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index 751e1ceb76..338aa05080 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -24,16 +24,14 @@ impl ToFields for SchnorrVerKey { } impl ToFields for BLSVerKey { - const SIZE: usize = 5; + const SIZE: usize = 2; fn to_fields(&self) -> Vec { let bytes = to_bytes!(&self.to_affine()).unwrap(); vec![ FieldType::from_le_bytes_mod_order(&bytes[..31]), FieldType::from_le_bytes_mod_order(&bytes[31..62]), - FieldType::from_le_bytes_mod_order(&bytes[62..93]), - FieldType::from_le_bytes_mod_order(&bytes[93..124]), - FieldType::from_le_bytes_mod_order(&bytes[124..]), + FieldType::from_le_bytes_mod_order(&bytes[62..]), ] } } From 70b9e9c17d4a8c10da72e1709906902ed3e7a4de Mon Sep 17 00:00:00 2001 From: MRain Date: Thu, 2 Nov 2023 23:55:06 -0400 Subject: [PATCH 0305/1393] fix build in other crates --- hotshot-qc/src/bit_vector.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index df05b64de4..833d62c673 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -219,9 +219,12 @@ mod tests { let key_pair3 = KeyPair::generate(&mut rng); let mut st = ST::new(3); - st.register(key_pair1.ver_key(), U256::from(3u8)).unwrap(); - st.register(key_pair2.ver_key(), U256::from(5u8)).unwrap(); - st.register(key_pair3.ver_key(), U256::from(7u8)).unwrap(); + st.register(key_pair1.ver_key(), U256::from(3u8), ()) + .unwrap(); + st.register(key_pair2.ver_key(), U256::from(5u8), ()) + .unwrap(); + st.register(key_pair3.ver_key(), U256::from(7u8), ()) + .unwrap(); st.advance(); st.advance(); From 97f4b45daf941346b2fafb1e492de69ab7809efe Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 3 Nov 2023 01:28:08 -0700 Subject: [PATCH 0306/1393] scripts for generating key pairs added --- orchestrator/src/config.rs | 14 +++++--------- testing/src/test_builder.rs | 2 +- types/src/lib.rs | 3 ++- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index e0abbbf5de..a42be2aaf0 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -213,8 +213,8 @@ pub struct ValidatorConfigFile { pub seed: [u8; 32], /// The validator's index, which can be treated as another input to the seed pub node_id: u64, - /// The validator's stake - pub stake_value: u64, + // The validator's stake, commented for now + // pub stake_value: u64, } impl Default for ValidatorConfigFile { @@ -222,18 +222,13 @@ impl Default for ValidatorConfigFile { Self { seed: [0u8; 32], node_id: 0, - stake_value: 1, } } } -fn get_current_working_dir() -> std::io::Result { - env::current_dir() -} - impl ValidatorConfigFile { pub fn from_file(dir_str: &str) -> Self { - let current_working_dir = match get_current_working_dir() { + let current_working_dir = match env::current_dir() { Ok(dir) => dir, Err(e) => { error!("get_current_working_dir error: {:?}", e); @@ -303,8 +298,9 @@ fn default_padding() -> usize { impl From for ValidatorConfig { fn from(val: ValidatorConfigFile) -> Self { + // here stake_value is set to 1, since we don't input stake_value from ValidatorConfigFile for now let validator_config = - ValidatorConfig::generated_from_seed_indexed(val.seed, val.node_id, val.stake_value); + ValidatorConfig::generated_from_seed_indexed(val.seed, val.node_id, 1); ValidatorConfig { public_key: validator_config.public_key, private_key: validator_config.private_key, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index bf555dc021..69204eaafc 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -216,7 +216,7 @@ impl TestMetadata { let mut my_own_validator_config = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); if node_id == 0 { - my_own_validator_config = ValidatorConfig::from(ValidatorConfigFile::from_file("/config/ValidatorConfigFile.toml")); + my_own_validator_config = ValidatorConfig::from(ValidatorConfigFile::from_file("config/ValidatorConfigFile.toml")); } // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); let config = HotShotConfig { diff --git a/types/src/lib.rs b/types/src/lib.rs index 4a74e0767a..1833add6d9 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -10,6 +10,7 @@ #![allow(clippy::module_name_repetitions)] use std::{num::NonZeroUsize, time::Duration}; +use displaydoc::Display; use traits::{election::ElectionConfig, signature_key::SignatureKey}; pub mod block_impl; pub mod certificate; @@ -34,7 +35,7 @@ pub enum ExecutionType { Incremental, } -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Display)] #[serde(bound(deserialize = ""))] /// config for validator, including public key, private key, stake value pub struct ValidatorConfig { From 3f88ee3b0bead696bee49f8ce43eef2e9280222e Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 3 Nov 2023 01:31:42 -0700 Subject: [PATCH 0307/1393] fix lint --- orchestrator/src/config.rs | 15 +++------------ testing/src/test_builder.rs | 4 +++- types/src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index a42be2aaf0..cec5deaf3d 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -206,7 +206,7 @@ pub struct HotShotConfigFile { } /// Holds configuration for a validator node -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, Default)] #[serde(bound(deserialize = ""))] pub struct ValidatorConfigFile { /// The validator's seed @@ -217,15 +217,6 @@ pub struct ValidatorConfigFile { // pub stake_value: u64, } -impl Default for ValidatorConfigFile { - fn default() -> Self { - Self { - seed: [0u8; 32], - node_id: 0, - } - } -} - impl ValidatorConfigFile { pub fn from_file(dir_str: &str) -> Self { let current_working_dir = match env::current_dir() { @@ -235,8 +226,8 @@ impl ValidatorConfigFile { PathBuf::from("") } }; - let filename = current_working_dir.into_os_string().into_string().unwrap() - + "/../../" + dir_str; + let filename = + current_working_dir.into_os_string().into_string().unwrap() + "/../../" + dir_str; match fs::read_to_string(filename.clone()) { // If successful return the files text as `contents`. Ok(contents) => { diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 69204eaafc..14eab3a556 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -216,7 +216,9 @@ impl TestMetadata { let mut my_own_validator_config = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); if node_id == 0 { - my_own_validator_config = ValidatorConfig::from(ValidatorConfigFile::from_file("config/ValidatorConfigFile.toml")); + my_own_validator_config = ValidatorConfig::from(ValidatorConfigFile::from_file( + "config/ValidatorConfigFile.toml", + )); } // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); let config = HotShotConfig { diff --git a/types/src/lib.rs b/types/src/lib.rs index 1833add6d9..bac574d6aa 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -9,8 +9,8 @@ )] #![allow(clippy::module_name_repetitions)] -use std::{num::NonZeroUsize, time::Duration}; use displaydoc::Display; +use std::{num::NonZeroUsize, time::Duration}; use traits::{election::ElectionConfig, signature_key::SignatureKey}; pub mod block_impl; pub mod certificate; From 4200da752ac3c6752da61063034c2879ca47f810 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 3 Nov 2023 01:33:12 -0700 Subject: [PATCH 0308/1393] forget to add new file for validator config --- testing/tests/gen_key_pair.rs | 37 +++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 testing/tests/gen_key_pair.rs diff --git a/testing/tests/gen_key_pair.rs b/testing/tests/gen_key_pair.rs new file mode 100644 index 0000000000..ab7cbcb1aa --- /dev/null +++ b/testing/tests/gen_key_pair.rs @@ -0,0 +1,37 @@ +#[cfg(test)] +mod tests { + use core::panic; + use hotshot::types::{bn254::BLSPubKey, SignatureKey}; + use hotshot_orchestrator::config::ValidatorConfigFile; + use hotshot_types::ValidatorConfig; + use std::env; + use std::fs::File; + use std::io::prelude::*; + #[test] + fn gen_key_pair_gen_from_config_file() { + let config_file = ValidatorConfigFile::from_file("config/ValidatorConfigFile.toml"); + let my_own_validator_config = ValidatorConfig::::from(config_file.clone()); + if config_file.seed == [0u8; 32] && config_file.node_id == 0 { + assert_eq!( + my_own_validator_config.public_key, + ::from_private(&my_own_validator_config.private_key) + ); + } + + let current_working_dir = match env::current_dir() { + Ok(dir) => dir, + Err(e) => { + panic!("get_current_working_dir error: {:?}", e); + } + }; + let filename = current_working_dir.into_os_string().into_string().unwrap() + + "/../../config/ValidatorConfigOutput"; + match File::create(filename) { + Err(why) => panic!("couldn't create file for output key pairs: {}", why), + Ok(mut file) => match write!(file, "{:?}", my_own_validator_config) { + Err(why) => panic!("couldn't generate key pairs and write to the file: {}", why), + Ok(_) => println!("successfully wrote to file for output key pairs"), + }, + } + } +} From 0d1f0abdf0b90420bb9ea346b1a02bb27cd190ea Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 3 Nov 2023 10:49:42 -0400 Subject: [PATCH 0309/1393] Addressing comments, renaming Yes -> Quorum for Vote/Data --- hotshot/src/traits/storage/memory_storage.rs | 4 +-- hotshot/src/types/handle.rs | 4 +-- task-impls/src/consensus.rs | 12 +++---- task-impls/src/events.rs | 6 ++-- testing/tests/consensus_task.rs | 8 ++--- types/src/message.rs | 6 ++-- types/src/simple_certificate.rs | 20 +++++------ types/src/simple_vote.rs | 38 ++++++++++---------- types/src/vote2.rs | 3 +- 9 files changed, 50 insertions(+), 51 deletions(-) diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 34e5c38a0c..f06c0e5dc2 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -166,14 +166,14 @@ mod test { // TODO is it okay to be using genesis here? let _dummy_block_commit = fake_commitment::(); let dummy_leaf_commit = fake_commitment::>(); - let data = hotshot_types::simple_vote::YesData { + let data = hotshot_types::simple_vote::QuorumData { leaf_commit: dummy_leaf_commit, }; let commit = data.commit(); StoredView::from_qc_block_and_state( QuorumCertificate2 { is_genesis: view_number == ::Time::genesis(), - leaf_commitment: data, + data, vote_commitment: commit, signatures: None, view_number, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index daae23225b..9500698bbc 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -14,7 +14,7 @@ use hotshot_task::{ BoxSyncFuture, }; use hotshot_task_impls::events::HotShotEvent; -use hotshot_types::simple_vote::YesData; +use hotshot_types::simple_vote::QuorumData; use hotshot_types::{ consensus::Consensus, data::LeafType, @@ -192,7 +192,7 @@ impl + 'static> SystemContextHandl if anchor_leaf.view_number == TYPES::Time::genesis() { let leaf: I::Leaf = I::Leaf::from_stored_view(anchor_leaf); let mut qc = QuorumCertificate2::::genesis(); - qc.leaf_commitment = YesData { + qc.data = QuorumData { leaf_commit: leaf.commit(), }; let event = Event { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7b1da0860f..f5b755cd8a 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -23,7 +23,7 @@ use hotshot_types::{ event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, simple_certificate::QuorumCertificate2, - simple_vote::{YesData, YesVote}, + simple_vote::{QuorumData, QuorumVote}, traits::{ block_contents::BlockHeader, consensus_api::ConsensusApi, @@ -176,7 +176,7 @@ pub struct VoteCollectionTaskState< pub accumulator: Either< VoteAccumulator2< TYPES, - YesVote>, + QuorumVote>, QuorumCertificate2, >, QuorumCertificate2, @@ -458,8 +458,8 @@ where proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), }; let vote = - YesVote::>::create_signed_vote( - YesData { leaf_commit: leaf.commit() }, + QuorumVote::>::create_signed_vote( + QuorumData { leaf_commit: leaf.commit() }, view, self.quorum_exchange.public_key(), self.quorum_exchange.private_key(), @@ -539,8 +539,8 @@ where return false; } let vote = - YesVote::>::create_signed_vote( - YesData { leaf_commit: leaf.commit() }, + QuorumVote::>::create_signed_vote( + QuorumData { leaf_commit: leaf.commit() }, view, self.quorum_exchange.public_key(), self.quorum_exchange.private_key(), diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 12f7f3b571..8bf8db5de2 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -7,7 +7,7 @@ use hotshot_types::{ data::{DAProposal, VidDisperse}, message::Proposal, simple_certificate::QuorumCertificate2, - simple_vote::YesVote, + simple_vote::QuorumVote, traits::node_implementation::{ NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, ViewSyncProposalType, }, @@ -22,7 +22,7 @@ pub enum HotShotEvent> { /// A quorum proposal has been received from the network; handled by the consensus task QuorumProposalRecv(Proposal>, TYPES::SignatureKey), /// A quorum vote has been received from the network; handled by the consensus task - QuorumVoteRecv(YesVote>), + QuorumVoteRecv(QuorumVote>), /// A timeout vote recevied from the network; handled by consensus task TimeoutVoteRecv(TimeoutVote), /// Send a timeout vote to the network; emitted by consensus task replicas @@ -36,7 +36,7 @@ pub enum HotShotEvent> { /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal - QuorumVoteSend(YesVote>), + QuorumVoteSend(QuorumVote>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DAProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 483614e81d..968fba31eb 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -10,8 +10,8 @@ use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, task_helpers::{build_quorum_proposal, key_pair_for_id}, }; -use hotshot_types::simple_vote::YesData; -use hotshot_types::simple_vote::YesVote; +use hotshot_types::simple_vote::QuorumData; +use hotshot_types::simple_vote::QuorumVote; use hotshot_types::traits::node_implementation::QuorumMembership; use hotshot_types::vote2::Certificate2; use hotshot_types::{ @@ -71,8 +71,8 @@ async fn build_vote( proposer_id: quorum_exchange.get_leader(view).to_bytes(), }; let vote = - YesVote::, QuorumMembership>::create_signed_vote( - YesData { leaf_commit: leaf.commit() }, + QuorumVote::, QuorumMembership>::create_signed_vote( + QuorumData { leaf_commit: leaf.commit() }, view, quorum_exchange.public_key(), quorum_exchange.private_key(), diff --git a/types/src/message.rs b/types/src/message.rs index c40e6f2e84..cc1acc80f7 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -7,7 +7,7 @@ use crate::vote2::HasViewNumber; use crate::{ certificate::{DACertificate, VIDCertificate}, data::{DAProposal, ProposalType, VidDisperse}, - simple_vote::YesVote, + simple_vote::QuorumVote, traits::{ network::{NetworkMsg, ViewMessage}, node_implementation::{ @@ -152,7 +152,7 @@ where Proposal(Proposal>, TYPES::SignatureKey), /// Message with a quorum vote. Vote( - YesVote>, + QuorumVote>, TYPES::SignatureKey, ), /// Message with a view sync vote. @@ -320,7 +320,7 @@ where Proposal(Proposal>), /// Message with a quorum vote. - Vote(YesVote>), + Vote(QuorumVote>), /// Message with a view sync vote. ViewSyncVote(ViewSyncVote), diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index c412eb5efe..18bf1d0259 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -10,7 +10,7 @@ use commit::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; use crate::{ - simple_vote::{Voteable, YesData}, + simple_vote::{QuorumData, Voteable}, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, @@ -23,8 +23,8 @@ use serde::{Deserialize, Serialize}; /// A certificate which can be created by aggregating many simple votes on the commitment. #[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] pub struct SimpleCertificate { - /// commitment to previous leaf which all the votes in this certificate are voting on - pub leaf_commitment: VOTEABLE, + /// The data this certificate is for. I.e the thing that was voted on to create this Certificate + pub data: VOTEABLE, /// commitment of all the votes this cert should be signed over pub vote_commitment: Commitment, /// Which view this QC relates to @@ -50,7 +50,7 @@ impl Certificate2 view: TYPES::Time, ) -> Self { SimpleCertificate { - leaf_commitment: data, + data, vote_commitment, view_number: view, signatures: Some(sig), @@ -76,7 +76,7 @@ impl Certificate2 membership.success_threshold().into() } fn get_data(&self) -> &Self::Voteable { - &self.leaf_commitment + &self.data } fn get_data_commitment(&self) -> Commitment { self.vote_commitment @@ -105,17 +105,17 @@ impl Display impl< TYPES: NodeType, LEAF: Committable + Committable + Clone + Serialize + Debug + PartialEq + Hash + Eq + 'static, - > SimpleCertificate> + > QuorumCertificate2 { #[must_use] /// Creat the Genisis certificate pub fn genesis() -> Self { - let data = YesData { + let data = QuorumData { leaf_commit: Commitment::::default_commitment_no_preimage(), }; let commit = data.commit(); Self { - leaf_commitment: data, + data, vote_commitment: commit, view_number: ::genesis(), signatures: None, @@ -125,5 +125,5 @@ impl< } } -/// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `YesVotes` -pub type QuorumCertificate2 = SimpleCertificate>; +/// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` +pub type QuorumCertificate2 = SimpleCertificate>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 9a316735b0..62438ea4e9 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -16,15 +16,15 @@ use crate::{ #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a yes vote. -pub struct YesData { +pub struct QuorumData { /// Commitment to the leaf pub leaf_commit: Commitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a DA vote. -pub struct DAData { - /// Commitment to a block - pub block_commit: Commitment, +pub struct DAData { + /// Commitment to a block payload + pub payload_commit: Commitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a timeout vote. @@ -34,9 +34,9 @@ pub struct TimeoutData { } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a VID vote. -pub struct VIDData { - /// Commitment to the block the VID vote is on. - pub block_commit: Commitment, +pub struct VIDData { + /// Commitment to the block payload the VID vote is on. + pub payload_commit: Commitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Pre Commit vote. @@ -92,7 +92,7 @@ pub struct SimpleVote, } @@ -101,7 +101,7 @@ impl> H for SimpleVote { fn get_view_number(&self) -> ::Time { - self.current_view + self.view_number } } @@ -142,13 +142,13 @@ impl> Self { signature: (pub_key.to_bytes(), signature), data, - current_view: view, + view_number: view, _pd: PhantomData, } } } -impl Committable for YesData { +impl Committable for QuorumData { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("Yes Vote") .var_size_bytes(self.leaf_commit.as_ref()) @@ -156,17 +156,17 @@ impl Committable for YesData { } } -impl Committable for DAData { +impl Committable for DAData { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("DA Vote") - .var_size_bytes(self.block_commit.as_ref()) + .var_size_bytes(self.payload_commit.as_ref()) .finalize() } } -impl Committable for VIDData { +impl Committable for VIDData { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("DA Vote") - .var_size_bytes(self.block_commit.as_ref()) + commit::RawCommitmentBuilder::new("VID Vote") + .var_size_bytes(self.payload_commit.as_ref()) .finalize() } } @@ -210,11 +210,11 @@ impl = SimpleVote, M>; +pub type QuorumVote = SimpleVote, M>; /// DA vote type alias -pub type DAVote = SimpleVote, M>; +pub type DAVote = SimpleVote, M>; /// VID vote type alias -pub type VIDVote = SimpleVote, M>; +pub type VIDVote = SimpleVote, M>; /// Timeout Vote type alias pub type TimeoutVote = SimpleVote, M>; /// View Sync Commit Vote type alias diff --git a/types/src/vote2.rs b/types/src/vote2.rs index 2feed67a43..0c926b8f5f 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -109,8 +109,7 @@ impl< let vote_commitment = vote.get_data_commitment(); if !key.validate(&vote.get_signature(), vote_commitment.as_ref()) { - error!("Vote data is {:?}", vote.get_data_commitment()); - error!("Invalid vote! Data"); + error!("Invalid vote! Vote Data {:?}", vote.get_data()); return Either::Left(self); } From e0dc533cc2057ed67b0c384c3829fd90dd5a90fc Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 3 Nov 2023 10:51:43 -0400 Subject: [PATCH 0310/1393] better view check in VID --- task-impls/src/vid.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 502948e7d0..70cec7fc5d 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -309,7 +309,7 @@ where // `self.cur_view` should be at least 1 since there is a view change before getting // the `DAProposalRecv` event. Otherewise, the view number subtraction below will // cause an overflow error. - if *self.cur_view == 0 || view < self.cur_view - 1 { + if view + 1 < self.cur_view { warn!("Throwing away VID disperse data that is more than one view older"); return None; } From 3278d917a68e9f53ea446ece7f394266eeca9d79 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 3 Nov 2023 13:53:19 -0400 Subject: [PATCH 0311/1393] Integrate new DAVote and Certificate --- task-impls/src/consensus.rs | 11 +++--- task-impls/src/da.rs | 59 ++++++++++++++++----------------- task-impls/src/events.rs | 19 ++++++----- task-impls/src/network.rs | 4 +-- testing/src/task_helpers.rs | 1 - testing/tests/da_task.rs | 15 +++++---- types/src/data.rs | 6 +--- types/src/message.rs | 46 +++++++++++++++---------- types/src/simple_certificate.rs | 6 ++-- types/src/simple_vote.rs | 4 +-- types/src/traits/election.rs | 47 -------------------------- 11 files changed, 90 insertions(+), 128 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f5b755cd8a..803b952a14 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -22,7 +22,7 @@ use hotshot_types::{ data::{Leaf, LeafType, ProposalType, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, - simple_certificate::QuorumCertificate2, + simple_certificate::{DACertificate2, QuorumCertificate2}, simple_vote::{QuorumData, QuorumVote}, traits::{ block_contents::BlockHeader, @@ -129,7 +129,7 @@ pub struct ConsensusTaskState< pub output_event_stream: ChannelStream>, /// All the DA certs we've received for current and future views. - pub da_certs: HashMap>, + pub da_certs: HashMap>, /// All the VID certs we've received for current and future views. pub vid_certs: HashMap>, @@ -529,10 +529,10 @@ where }; // Validate the DAC. - let message = if self.committee_exchange.is_valid_cert(cert) { + let message = if cert.is_valid_cert(self.committee_exchange.membership()) { // Validate the block payload commitment for non-genesis DAC. - if !cert.is_genesis() - && cert.leaf_commitment() + if !cert.is_genesis + && cert.get_data().payload_commit != proposal.block_header.payload_commitment() { error!("Block payload commitment does not equal parent commitment"); @@ -1372,7 +1372,6 @@ where justify_qc: consensus.high_qc.clone(), timeout_certificate: timeout_certificate.or_else(|| None), proposer_id: leaf.proposer_id, - dac: None, }; let message = Proposal { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index b027d532a9..086f353e5f 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -12,15 +12,15 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::vote::DAVoteAccumulator; use hotshot_types::{ certificate::DACertificate, consensus::{Consensus, View}, data::{DAProposal, Leaf, ProposalType}, message::{Message, Proposal, SequencingMessage}, + simple_vote::{DAData, DAVote2}, traits::{ consensus_api::ConsensusApi, - election::{CommitteeExchangeType, ConsensusExchange, Membership, SignedCertificate}, + election::{CommitteeExchangeType, ConsensusExchange, Membership}, network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{CommitteeEx, NodeImplementation, NodeType}, signature_key::SignatureKey, @@ -28,6 +28,11 @@ use hotshot_types::{ BlockPayload, }, utils::ViewInner, + vote2::HasViewNumber, + vote2::VoteAccumulator2, +}; +use hotshot_types::{ + simple_certificate::DACertificate2, traits::node_implementation::CommitteeMembership, }; use snafu::Snafu; @@ -92,13 +97,12 @@ pub struct DAVoteCollectionTaskState< #[allow(clippy::type_complexity)] /// Accumulates DA votes pub accumulator: Either< - as SignedCertificate< + VoteAccumulator2< TYPES, - TYPES::Time, - TYPES::VoteTokenType, - Commitment, - >>::VoteAccumulator, - DACertificate, + DAVote2>, + DACertificate2, + >, + DACertificate2, >, /// the current view pub cur_view: TYPES::Time, @@ -138,8 +142,8 @@ where { match event { HotShotEvent::DAVoteRecv(vote) => { - debug!("DA vote recv, collection task {:?}", vote.current_view); - // panic!("Vote handle received DA vote for view {}", *vote.current_view); + debug!("DA vote recv, collection task {:?}", vote.get_view_number()); + // panic!("Vote handle received DA vote for view {}", *vote.get_view_number()); // For the case where we receive votes after we've made a certificate if state.accumulator.is_right() { @@ -149,11 +153,7 @@ where let accumulator = state.accumulator.left().unwrap(); - match state.committee_exchange.accumulate_vote( - accumulator, - &vote, - &vote.payload_commitment, - ) { + match accumulator.accumulate(&vote, state.committee_exchange.membership()) { Left(new_accumulator) => { state.accumulator = either::Left(new_accumulator); } @@ -260,18 +260,21 @@ where Ok(None) => { debug!("We were not chosen for DA committee on {:?}", view); } - Ok(Some(vote_token)) => { + Ok(Some(_vote_token)) => { // Generate and send vote - let vote = self.committee_exchange.create_da_message( - payload_commitment, + let vote = DAVote2::create_signed_vote( + DAData { + payload_commit: payload_commitment, + }, view, - vote_token, + self.committee_exchange.public_key(), + self.committee_exchange.private_key(), ); // ED Don't think this is necessary? // self.cur_view = view; - debug!("Sending vote to the DA leader {:?}", vote.current_view); + debug!("Sending vote to the DA leader {:?}", vote.get_view_number()); self.event_stream .publish(HotShotEvent::DAVoteSend(vote)) .await; @@ -294,9 +297,9 @@ where } } HotShotEvent::DAVoteRecv(vote) => { - debug!("DA vote recv, Main Task {:?}", vote.current_view,); + debug!("DA vote recv, Main Task {:?}", vote.get_view_number(),); // Check if we are the leader and the vote is from the sender. - let view = vote.current_view; + let view = vote.get_view_number(); if !self.committee_exchange.is_leader(view) { error!("We are not the committee leader for view {} are we leader for next view? {}", *view, self.committee_exchange.is_leader(view + 1)); return None; @@ -317,19 +320,15 @@ where TYPES::Time::new(0) }; - let new_accumulator = DAVoteAccumulator { - da_vote_outcomes: HashMap::new(), - success_threshold: self.committee_exchange.success_threshold(), + let new_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.committee_exchange.total_nodes()], phantom: PhantomData, }; - let accumulator = self.committee_exchange.accumulate_vote( - new_accumulator, - &vote, - &vote.clone().payload_commitment, - ); + let accumulator = + new_accumulator.accumulate(&vote, self.committee_exchange.membership()); if view > collection_view { let state = DAVoteCollectionTaskState { diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 8bf8db5de2..6b502afabb 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -3,15 +3,16 @@ use crate::view_sync::ViewSyncPhase; use commit::Commitment; use either::Either; use hotshot_types::{ - certificate::{DACertificate, TimeoutCertificate, VIDCertificate}, + certificate::{TimeoutCertificate, VIDCertificate}, data::{DAProposal, VidDisperse}, message::Proposal, - simple_certificate::QuorumCertificate2, - simple_vote::QuorumVote, + simple_certificate::{DACertificate2, QuorumCertificate2}, + simple_vote::{DAVote2, QuorumVote}, traits::node_implementation::{ - NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, ViewSyncProposalType, + CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, + ViewSyncProposalType, }, - vote::{DAVote, TimeoutVote, VIDVote, ViewSyncVote}, + vote::{TimeoutVote, VIDVote, ViewSyncVote}, }; /// All of the possible events that can be passed between Sequecning `HotShot` tasks @@ -30,9 +31,9 @@ pub enum HotShotEvent> { /// A DA proposal has been received from the network; handled by the DA task DAProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task - DAVoteRecv(DAVote), + DAVoteRecv(DAVote2>), /// A Data Availability Certificate (DAC) has been recieved by the network; handled by the consensus task - DACRecv(DACertificate), + DACRecv(DACertificate2), /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal @@ -40,11 +41,11 @@ pub enum HotShotEvent> { /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DAProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal - DAVoteSend(DAVote), + DAVoteSend(DAVote2>), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only QCFormed(Either, TimeoutCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task - DACSend(DACertificate, TYPES::SignatureKey), + DACSend(DACertificate2, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks ViewChange(TYPES::Time), /// Timeout for the view sync protocol; emitted by a replica in the view sync task diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 217030cfd6..8ddbe5ede1 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -239,12 +239,12 @@ impl< Some(membership.get_leader(vote.get_view())), // TODO who is VID leader? https://github.com/EspressoSystems/HotShot/issues/1699 ), HotShotEvent::DAVoteSend(vote) => ( - vote.signature_key(), + vote.get_signing_key(), MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::DAVote(vote.clone()), ))), TransmitType::Direct, - Some(membership.get_leader(vote.get_view())), + Some(membership.get_leader(vote.get_view_number())), ), HotShotEvent::VidCertSend(certificate, sender) => ( sender, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 719e2203d4..4ba8243908 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -134,7 +134,6 @@ async fn build_quorum_proposal_and_signature( justify_qc: QuorumCertificate2::genesis(), timeout_certificate: None, proposer_id: leaf.proposer_id, - dac: None, }; (proposal, signature) diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 3994c14b19..ff755286e0 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -8,6 +8,7 @@ use hotshot_testing::{ use hotshot_types::{ block_impl::VIDTransaction, data::{DAProposal, VidSchemeTrait, ViewNumber}, + simple_vote::{DAData, DAVote2}, traits::{ consensus_api::ConsensusSharedApi, election::ConsensusExchange, node_implementation::ExchangesType, state::ConsensusTime, @@ -79,12 +80,14 @@ async fn test_da_task() { ); output.insert(HotShotEvent::SendPayloadCommitment(block.commit()), 1); output.insert(HotShotEvent::DAProposalSend(message.clone(), pub_key), 1); - let vote_token = committee_exchange - .make_vote_token(ViewNumber::new(2)) - .unwrap() - .unwrap(); - let da_vote = - committee_exchange.create_da_message(block.commit(), ViewNumber::new(2), vote_token); + let da_vote = DAVote2::create_signed_vote( + DAData { + payload_commit: block.commit(), + }, + ViewNumber::new(2), + committee_exchange.public_key(), + committee_exchange.private_key(), + ); output.insert(HotShotEvent::DAVoteSend(da_vote), 1); output.insert(HotShotEvent::DAProposalRecv(message, pub_key), 1); diff --git a/types/src/data.rs b/types/src/data.rs index 824d965422..af6e9dc8f3 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -4,7 +4,7 @@ //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. use crate::{ - certificate::{AssembledSignature, DACertificate, TimeoutCertificate, ViewSyncCertificate}, + certificate::{AssembledSignature, TimeoutCertificate, ViewSyncCertificate}, simple_certificate::QuorumCertificate2, traits::{ block_contents::BlockHeader, @@ -180,10 +180,6 @@ pub struct QuorumProposal> { /// the propser id pub proposer_id: EncodedPublicKey, - - /// Data availibity certificate - // TODO We should be able to remove this - pub dac: Option>, } impl ProposalType for DAProposal { diff --git a/types/src/message.rs b/types/src/message.rs index cc1acc80f7..fe207befc9 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -3,9 +3,12 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. +use crate::simple_certificate::DACertificate2; +use crate::simple_vote::DAVote2; +use crate::traits::node_implementation::CommitteeMembership; use crate::vote2::HasViewNumber; use crate::{ - certificate::{DACertificate, VIDCertificate}, + certificate::VIDCertificate, data::{DAProposal, ProposalType, VidDisperse}, simple_vote::QuorumVote, traits::{ @@ -16,7 +19,7 @@ use crate::{ }, signature_key::EncodedSignature, }, - vote::{DAVote, TimeoutVote, VIDVote, ViewSyncVote, VoteType}, + vote::{TimeoutVote, VIDVote, ViewSyncVote, VoteType}, }; use derivative::Derivative; @@ -214,13 +217,16 @@ where /// A processed consensus message for the DA committee in sequencing consensus. #[derive(Serialize, Clone, Debug, PartialEq)] #[serde(bound(deserialize = ""))] -pub enum ProcessedCommitteeConsensusMessage { +pub enum ProcessedCommitteeConsensusMessage> { /// Proposal for the DA committee. DAProposal(Proposal>, TYPES::SignatureKey), /// Vote from the DA committee. - DAVote(DAVote, TYPES::SignatureKey), + DAVote( + DAVote2>, + TYPES::SignatureKey, + ), /// Certificate for the DA. - DACertificate(DACertificate, TYPES::SignatureKey), + DACertificate(DACertificate2, TYPES::SignatureKey), /// VID dispersal data. Like [`DAProposal`] VidDisperseMsg(Proposal>, TYPES::SignatureKey), /// Vote from VID storage node. Like [`DAVote`] @@ -229,10 +235,10 @@ pub enum ProcessedCommitteeConsensusMessage { VidCertificate(VIDCertificate, TYPES::SignatureKey), } -impl From> - for CommitteeConsensusMessage +impl> + From> for CommitteeConsensusMessage { - fn from(value: ProcessedCommitteeConsensusMessage) -> Self { + fn from(value: ProcessedCommitteeConsensusMessage) -> Self { match value { ProcessedCommitteeConsensusMessage::DAProposal(p, _) => { CommitteeConsensusMessage::DAProposal(p) @@ -256,9 +262,9 @@ impl From> } } -impl ProcessedCommitteeConsensusMessage { +impl> ProcessedCommitteeConsensusMessage { /// Create a [`ProcessedCommitteeConsensusMessage`] from a [`CommitteeConsensusMessage`]. - pub fn new(value: CommitteeConsensusMessage, sender: TYPES::SignatureKey) -> Self { + pub fn new(value: CommitteeConsensusMessage, sender: TYPES::SignatureKey) -> Self { match value { CommitteeConsensusMessage::DAProposal(p) => { ProcessedCommitteeConsensusMessage::DAProposal(p, sender) @@ -283,8 +289,10 @@ impl ProcessedCommitteeConsensusMessage { } /// A processed consensus message for sequencing consensus. -pub type ProcessedSequencingMessage = - Either, ProcessedCommitteeConsensusMessage>; +pub type ProcessedSequencingMessage = Either< + ProcessedGeneralConsensusMessage, + ProcessedCommitteeConsensusMessage, +>; impl< TYPES: NodeType, @@ -339,15 +347,15 @@ where #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] #[serde(bound(deserialize = "", serialize = ""))] /// Messages related to the sequencing consensus protocol for the DA committee. -pub enum CommitteeConsensusMessage { +pub enum CommitteeConsensusMessage> { /// Proposal for data availability committee DAProposal(Proposal>), /// vote for data availability committee - DAVote(DAVote), + DAVote(DAVote2>), /// Certificate data is available - DACertificate(DACertificate), + DACertificate(DACertificate2), /// Initiate VID dispersal. /// @@ -394,7 +402,7 @@ pub trait SequencingMessageType>: pub struct SequencingMessage< TYPES: NodeType, I: NodeImplementation>, ->(pub Either, CommitteeConsensusMessage>); +>(pub Either, CommitteeConsensusMessage>); impl< TYPES: NodeType, @@ -433,7 +441,9 @@ impl< // this should match replica upon receipt p.data.get_view_number() } - CommitteeConsensusMessage::DAVote(vote_message) => vote_message.get_view(), + CommitteeConsensusMessage::DAVote(vote_message) => { + vote_message.get_view_number() + } CommitteeConsensusMessage::DACertificate(cert) => cert.view_number, CommitteeConsensusMessage::VidCertificate(cert) => cert.view_number, CommitteeConsensusMessage::VidDisperseMsg(disperse) => { @@ -475,7 +485,7 @@ impl< I: NodeImplementation>, > SequencingMessageType for SequencingMessage { - type CommitteeConsensusMessage = CommitteeConsensusMessage; + type CommitteeConsensusMessage = CommitteeConsensusMessage; } #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 18bf1d0259..eb834e00c4 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -10,7 +10,7 @@ use commit::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; use crate::{ - simple_vote::{QuorumData, Voteable}, + simple_vote::{DAData, QuorumData, Voteable}, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, @@ -41,7 +41,6 @@ impl Certificate2 for SimpleCertificate { type Voteable = VOTEABLE; - // type Membership = MEMBERSHIP; fn create_signed_certificate( vote_commitment: Commitment, @@ -127,3 +126,6 @@ impl< /// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` pub type QuorumCertificate2 = SimpleCertificate>; +/// Type alias for a DA certificate over `DAData` +pub type DACertificate2 = + SimpleCertificate::BlockPayload>>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 62438ea4e9..b0285cf0bf 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -212,9 +212,9 @@ impl = SimpleVote, M>; /// DA vote type alias -pub type DAVote = SimpleVote, M>; +pub type DAVote2 = SimpleVote::BlockPayload>, M>; /// VID vote type alias -pub type VIDVote = SimpleVote, M>; +pub type VIDVote = SimpleVote::BlockPayload>, M>; /// Timeout Vote type alias pub type TimeoutVote = SimpleVote, M>; /// View Sync Commit Vote type alias diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index d2872cc2f5..fa8415acb2 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -492,23 +492,6 @@ pub trait CommitteeExchangeType: &self, payload_commitment: &Commitment, ) -> EncodedSignature; - - /// Sign a vote on DA proposal. - /// - /// The block payload commitment and the type of the vote (DA) are signed, which is the minimum amount - /// of information necessary for checking that this node voted on that block. - fn sign_da_vote( - &self, - payload_commitment: Commitment, - ) -> (EncodedPublicKey, EncodedSignature); - - /// Create a message with a vote on DA proposal. - fn create_da_message( - &self, - payload_commitment: Commitment, - current_view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> DAVote; } /// Standard implementation of [`CommitteeExchangeType`] utilizing a DA committee. @@ -550,36 +533,6 @@ impl< let signature = TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()); signature } - /// Sign a vote on DA proposal. - /// - /// The block payload commitment and the type of the vote (DA) are signed, which is the minimum amount - /// of information necessary for checking that this node voted on that block. - fn sign_da_vote( - &self, - payload_commitment: Commitment, - ) -> (EncodedPublicKey, EncodedSignature) { - let signature = TYPES::SignatureKey::sign( - &self.private_key, - VoteData::DA(payload_commitment).commit().as_ref(), - ); - (self.public_key.to_bytes(), signature) - } - /// Create a message with a vote on DA proposal. - fn create_da_message( - &self, - payload_commitment: Commitment, - current_view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> DAVote { - let signature = self.sign_da_vote(payload_commitment); - DAVote { - signature, - payload_commitment, - current_view, - vote_token, - vote_data: VoteData::DA(payload_commitment), - } - } } impl< From 27a0b2225ad6856a21607d0f21b7b959af6a09ab Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 3 Nov 2023 13:59:19 -0400 Subject: [PATCH 0312/1393] remove one unused line --- testing/tests/consensus_task.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 968fba31eb..ed8d549f42 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -35,7 +35,6 @@ async fn build_vote( inner: handle.hotshot.inner.clone(), }; let quorum_exchange = api.inner.exchanges.quorum_exchange().clone(); - let _vote_token = quorum_exchange.make_vote_token(view).unwrap().unwrap(); let justify_qc = proposal.justify_qc.clone(); let view = ViewNumber::new(*proposal.view_number); From 11c71b94798d8d04c3287b4ad3c78df09b106494 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 3 Nov 2023 14:01:44 -0400 Subject: [PATCH 0313/1393] remove unused line --- testing/tests/consensus_task.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 968fba31eb..ed8d549f42 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -35,7 +35,6 @@ async fn build_vote( inner: handle.hotshot.inner.clone(), }; let quorum_exchange = api.inner.exchanges.quorum_exchange().clone(); - let _vote_token = quorum_exchange.make_vote_token(view).unwrap().unwrap(); let justify_qc = proposal.justify_qc.clone(); let view = ViewNumber::new(*proposal.view_number); From 382902379828939ec57fd8410da78268c5b8267f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 3 Nov 2023 14:11:35 -0400 Subject: [PATCH 0314/1393] lint --- testing/tests/consensus_task.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index ed8d549f42..f68b224929 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -27,7 +27,6 @@ use std::collections::HashMap; async fn build_vote( handle: &SystemContextHandle, proposal: QuorumProposal>, - view: ViewNumber, ) -> GeneralConsensusMessage { let consensus_lock = handle.get_consensus(); let consensus = consensus_lock.read().await; @@ -161,9 +160,7 @@ async fn test_consensus_vote() { 1, ); let proposal = proposal.data; - if let GeneralConsensusMessage::Vote(vote) = - build_vote(&handle, proposal, ViewNumber::new(1)).await - { + if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); From ecfd2764107319507e38198cef20c95895e50361 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 3 Nov 2023 14:33:39 -0400 Subject: [PATCH 0315/1393] remove membership associated type from certificate --- types/src/simple_certificate.rs | 1 - types/src/vote2.rs | 2 -- 2 files changed, 3 deletions(-) diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 18bf1d0259..766c4eaa69 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -41,7 +41,6 @@ impl Certificate2 for SimpleCertificate { type Voteable = VOTEABLE; - // type Membership = MEMBERSHIP; fn create_signed_certificate( vote_commitment: Commitment, diff --git a/types/src/vote2.rs b/types/src/vote2.rs index 0c926b8f5f..e510bef4c6 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -53,8 +53,6 @@ The committee is defined by the `Membership` associated type. The votes all must be over the `Commitment` associated type. */ pub trait Certificate2: HasViewNumber { - /// Type that defines membership for voters on the certificate - // type Membership: Membership; /// The data commitment this certificate certifies. type Voteable: Voteable; From 7b98d5d8110f8c5d5803fbc1e6e2ea103758f646 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 3 Nov 2023 21:36:54 -0400 Subject: [PATCH 0316/1393] Replace TimeoutVote and TimeoutCertificate with new types --- task-impls/src/consensus.rs | 96 ++++++++++++++------------------- task-impls/src/events.rs | 14 ++--- task-impls/src/network.rs | 4 +- types/src/data.rs | 6 +-- types/src/message.rs | 8 +-- types/src/simple_certificate.rs | 4 +- types/src/simple_vote.rs | 12 ++++- types/src/traits/election.rs | 50 +---------------- 8 files changed, 71 insertions(+), 123 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 803b952a14..48bc8fb402 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -22,12 +22,12 @@ use hotshot_types::{ data::{Leaf, LeafType, ProposalType, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, - simple_certificate::{DACertificate2, QuorumCertificate2}, - simple_vote::{QuorumData, QuorumVote}, + simple_certificate::{DACertificate2, QuorumCertificate2, TimeoutCertificate2}, + simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote2}, traits::{ block_contents::BlockHeader, consensus_api::ConsensusApi, - election::{ConsensusExchange, QuorumExchangeType, SignedCertificate, TimeoutExchangeType}, + election::{ConsensusExchange, QuorumExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{ CommitteeEx, NodeImplementation, NodeType, QuorumEx, QuorumMembership, TimeoutEx, @@ -37,7 +37,6 @@ use hotshot_types::{ BlockPayload, }, utils::{Terminator, ViewInner}, - vote::{TimeoutVoteAccumulator, VoteType}, vote2::{Certificate2, HasViewNumber, VoteAccumulator2}, }; @@ -185,13 +184,12 @@ pub struct VoteCollectionTaskState< /// Accumulator for votes #[allow(clippy::type_complexity)] pub timeout_accumulator: Either< - as SignedCertificate< + VoteAccumulator2< TYPES, - TYPES::Time, - TYPES::VoteTokenType, - Commitment, - >>::VoteAccumulator, - TimeoutCertificate, + TimeoutVote2>, + TimeoutCertificate2, + >, + TimeoutCertificate2, >, /// View which this vote collection task is collecting votes in pub cur_view: TYPES::Time, @@ -294,15 +292,15 @@ where // during exchange refactor // https://github.com/EspressoSystems/HotShot/issues/1799 HotShotEvent::TimeoutVoteRecv(vote) => { - debug!("Received timeout vote for view {}", *vote.get_view()); + debug!("Received timeout vote for view {}", *vote.get_view_number()); if state.timeout_accumulator.is_right() { return (None, state); } - if vote.get_view() != state.cur_view { + if vote.get_view_number() != state.cur_view { error!( "Vote view does not match! vote view is {} current view is {}", - *vote.get_view(), + *vote.get_view_number(), *state.cur_view ); return (None, state); @@ -310,11 +308,7 @@ where let accumulator = state.timeout_accumulator.left().unwrap(); - match state.timeout_exchange.accumulate_vote( - accumulator, - &vote, - &vote.get_view().commit(), - ) { + match accumulator.accumulate(&vote, state.quorum_exchange.membership()) { Either::Left(acc) => { state.timeout_accumulator = Either::Left(acc); return (None, state); @@ -689,15 +683,12 @@ where return; }; - if timeout_cert.view_number != view - 1 { + if timeout_cert.get_data().view != view - 1 { warn!("Timeout certificate for view {} was not for the immediately preceding view", *view); return; } - if !self - .timeout_exchange - .is_valid_timeout_cert(&timeout_cert.clone(), view - 1) - { + if !timeout_cert.is_valid_cert(self.timeout_exchange.membership()) { warn!("Timeout certificate for view {} was invalid", *view); return; } @@ -1020,9 +1011,8 @@ where // TODO Create default functions for accumulators // https://github.com/EspressoSystems/HotShot/issues/1797 - let timeout_accumulator = TimeoutVoteAccumulator { - da_vote_outcomes: HashMap::new(), - success_threshold: self.timeout_exchange.success_threshold(), + let timeout_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.timeout_exchange.total_nodes()], phantom: PhantomData, @@ -1070,11 +1060,11 @@ where } } HotShotEvent::TimeoutVoteRecv(vote) => { - if !self.timeout_exchange.is_leader(vote.get_view() + 1) { + if !self.timeout_exchange.is_leader(vote.get_view_number() + 1) { error!( "We are not the leader for view {} are we the leader for view + 1? {}", - *vote.get_view() + 1, - self.timeout_exchange.is_leader(vote.get_view() + 2) + *vote.get_view_number() + 1, + self.timeout_exchange.is_leader(vote.get_view_number() + 2) ); return; } @@ -1084,7 +1074,7 @@ where })); let collection_view = if let Some((collection_view, collection_task, _)) = &self.vote_collector { - if vote.get_view() > *collection_view { + if vote.get_view_number() > *collection_view { // ED I think we'd want to let that task timeout to avoid a griefing vector self.registry.shutdown_task(*collection_task).await; } @@ -1094,21 +1084,15 @@ where }; // // Todo check if we are the leader - let new_accumulator = TimeoutVoteAccumulator { - da_vote_outcomes: HashMap::new(), - - success_threshold: self.timeout_exchange.success_threshold(), - + let new_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.timeout_exchange.total_nodes()], phantom: PhantomData, }; - let timeout_accumulator = self.timeout_exchange.accumulate_vote( - new_accumulator, - &vote, - &vote.get_view().commit(), - ); + let timeout_accumulator = + new_accumulator.accumulate(&vote, self.quorum_exchange.membership()); let quorum_accumulator = VoteAccumulator2 { vote_outcomes: HashMap::new(), @@ -1119,13 +1103,13 @@ where // self.timeout_accumulator = accumulator; - if vote.get_view() > collection_view { + if vote.get_view_number() > collection_view { let state = VoteCollectionTaskState { quorum_exchange: self.quorum_exchange.clone(), timeout_exchange: self.timeout_exchange.clone(), accumulator: either::Left(quorum_accumulator), timeout_accumulator, - cur_view: vote.get_view(), + cur_view: vote.get_view_number(), event_stream: self.event_stream.clone(), id: self.id, }; @@ -1148,12 +1132,12 @@ where let id = builder.get_task_id().unwrap(); let stream_id = builder.get_stream_id().unwrap(); - self.vote_collector = Some((vote.get_view(), id, stream_id)); + self.vote_collector = Some((vote.get_view_number(), id, stream_id)); let _task = async_spawn(async move { VoteCollectionTypes::build(builder).launch().await; }); - debug!("Starting vote handle for view {:?}", vote.get_view()); + debug!("Starting vote handle for view {:?}", vote.get_view_number()); } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream .direct_message(stream_id, HotShotEvent::TimeoutVoteRecv(vote)) @@ -1253,17 +1237,17 @@ where Ok(None) => { debug!("We were not chosen for consensus committee on {:?}", view); } - Ok(Some(vote_token)) => { - let message = self - .timeout_exchange - .create_timeout_message::(view, vote_token); - - debug!("Sending timeout vote for view {}", *view); - if let GeneralConsensusMessage::TimeoutVote(vote) = message { - self.event_stream - .publish(HotShotEvent::TimeoutVoteSend(vote)) - .await; - } + Ok(Some(_vote_token)) => { + let vote = TimeoutVote2::create_signed_vote( + TimeoutData { view }, + view, + self.timeout_exchange.public_key(), + self.timeout_exchange.private_key(), + ); + + self.event_stream + .publish(HotShotEvent::TimeoutVoteSend(vote)) + .await; } } debug!( @@ -1286,7 +1270,7 @@ where &mut self, _qc: QuorumCertificate2, view: TYPES::Time, - timeout_certificate: Option>, + timeout_certificate: Option>, ) -> bool { if !self.quorum_exchange.is_leader(view) { error!( diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 6b502afabb..82e4330bd0 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -3,16 +3,16 @@ use crate::view_sync::ViewSyncPhase; use commit::Commitment; use either::Either; use hotshot_types::{ - certificate::{TimeoutCertificate, VIDCertificate}, + certificate::VIDCertificate, data::{DAProposal, VidDisperse}, message::Proposal, - simple_certificate::{DACertificate2, QuorumCertificate2}, - simple_vote::{DAVote2, QuorumVote}, + simple_certificate::{DACertificate2, QuorumCertificate2, TimeoutCertificate2}, + simple_vote::{DAVote2, QuorumVote, TimeoutVote2}, traits::node_implementation::{ CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, ViewSyncProposalType, }, - vote::{TimeoutVote, VIDVote, ViewSyncVote}, + vote::{VIDVote, ViewSyncVote}, }; /// All of the possible events that can be passed between Sequecning `HotShot` tasks @@ -25,9 +25,9 @@ pub enum HotShotEvent> { /// A quorum vote has been received from the network; handled by the consensus task QuorumVoteRecv(QuorumVote>), /// A timeout vote recevied from the network; handled by consensus task - TimeoutVoteRecv(TimeoutVote), + TimeoutVoteRecv(TimeoutVote2>), /// Send a timeout vote to the network; emitted by consensus task replicas - TimeoutVoteSend(TimeoutVote), + TimeoutVoteSend(TimeoutVote2>), /// A DA proposal has been received from the network; handled by the DA task DAProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task @@ -43,7 +43,7 @@ pub enum HotShotEvent> { /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal DAVoteSend(DAVote2>), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only - QCFormed(Either, TimeoutCertificate>), + QCFormed(Either, TimeoutCertificate2>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DACSend(DACertificate2, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 8ddbe5ede1..531f37c802 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -283,12 +283,12 @@ impl< ) } HotShotEvent::TimeoutVoteSend(vote) => ( - vote.get_key(), + vote.get_signing_key(), MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::TimeoutVote(vote.clone()), ))), TransmitType::Direct, - Some(membership.get_leader(vote.get_view() + 1)), + Some(membership.get_leader(vote.get_view_number() + 1)), ), HotShotEvent::ViewChange(view) => { self.view = view; diff --git a/types/src/data.rs b/types/src/data.rs index af6e9dc8f3..a6c5cf05fa 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -4,8 +4,8 @@ //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. use crate::{ - certificate::{AssembledSignature, TimeoutCertificate, ViewSyncCertificate}, - simple_certificate::QuorumCertificate2, + certificate::{AssembledSignature, ViewSyncCertificate}, + simple_certificate::{QuorumCertificate2, TimeoutCertificate2}, traits::{ block_contents::BlockHeader, node_implementation::NodeType, @@ -176,7 +176,7 @@ pub struct QuorumProposal> { pub justify_qc: QuorumCertificate2, /// Possible timeout certificate. Only present if the justify_qc is not for the preceding view - pub timeout_certificate: Option>, + pub timeout_certificate: Option>, /// the propser id pub proposer_id: EncodedPublicKey, diff --git a/types/src/message.rs b/types/src/message.rs index fe207befc9..4e64076aaf 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -4,7 +4,7 @@ //! `HotShot` nodes can send among themselves. use crate::simple_certificate::DACertificate2; -use crate::simple_vote::DAVote2; +use crate::simple_vote::{DAVote2, TimeoutVote2}; use crate::traits::node_implementation::CommitteeMembership; use crate::vote2::HasViewNumber; use crate::{ @@ -19,7 +19,7 @@ use crate::{ }, signature_key::EncodedSignature, }, - vote::{TimeoutVote, VIDVote, ViewSyncVote, VoteType}, + vote::{VIDVote, ViewSyncVote, VoteType}, }; use derivative::Derivative; @@ -337,7 +337,7 @@ where ViewSyncCertificate(Proposal>), /// Message with a Timeout vote - TimeoutVote(TimeoutVote), + TimeoutVote(TimeoutVote2>), /// Internal ONLY message indicating a view interrupt. #[serde(skip)] @@ -431,7 +431,7 @@ impl< GeneralConsensusMessage::ViewSyncCertificate(message) => { message.data.get_view_number() } - GeneralConsensusMessage::TimeoutVote(message) => message.get_view(), + GeneralConsensusMessage::TimeoutVote(message) => message.get_view_number(), } } Right(committee_message) => { diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index eb834e00c4..5274d357ed 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -10,7 +10,7 @@ use commit::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; use crate::{ - simple_vote::{DAData, QuorumData, Voteable}, + simple_vote::{DAData, QuorumData, TimeoutData, Voteable}, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, @@ -129,3 +129,5 @@ pub type QuorumCertificate2 = SimpleCertificate = SimpleCertificate::BlockPayload>>; +/// Type alias for a Timeout certificate over a view number +pub type TimeoutCertificate2 = SimpleCertificate>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index b0285cf0bf..f4f1177686 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -156,6 +156,14 @@ impl Committable for QuorumData { } } +impl Committable for TimeoutData { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("Timeout Vote") + .u64(*self.view) + .finalize() + } +} + impl Committable for DAData { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("DA Vote") @@ -214,9 +222,9 @@ pub type QuorumVote = SimpleVote, M>; /// DA vote type alias pub type DAVote2 = SimpleVote::BlockPayload>, M>; /// VID vote type alias -pub type VIDVote = SimpleVote::BlockPayload>, M>; +pub type VIDVote2 = SimpleVote::BlockPayload>, M>; /// Timeout Vote type alias -pub type TimeoutVote = SimpleVote, M>; +pub type TimeoutVote2 = SimpleVote, M>; /// View Sync Commit Vote type alias pub type ViewSyncCommitVote = SimpleVote, M>; /// View Sync Pre Commit Vote type alias diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index fa8415acb2..bc0bd0fe45 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -16,16 +16,12 @@ use crate::{ vote::{TimeoutVote, VIDVote}, }; -use crate::{ - message::{GeneralConsensusMessage, Message}, - vote::ViewSyncVoteInternal, -}; +use crate::{message::GeneralConsensusMessage, vote::ViewSyncVoteInternal}; use crate::{ data::LeafType, traits::{ network::{CommunicationChannel, NetworkMsg}, - node_implementation::ExchangesType, signature_key::SignatureKey, state::ConsensusTime, }, @@ -1254,49 +1250,7 @@ impl< } /// Trait defining functiosn for a `TimeoutExchange` -pub trait TimeoutExchangeType: ConsensusExchange { - /// Create and sign a timeout message - fn create_timeout_message>( - &self, - view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage - where - I::Exchanges: ExchangesType>, - { - let signature = TYPES::SignatureKey::sign( - self.private_key(), - VoteData::>::Timeout(view.commit()) - .commit() - .as_ref(), - ); - - GeneralConsensusMessage::::TimeoutVote(TimeoutVote { - signature: (self.public_key().to_bytes(), signature), - current_view: view, - vote_token, - }) - } - - /// Validate a timeout certificate. - /// This is separate from other certificate verification functions because we also need to - /// verify the certificate is signed over the view we expect - fn is_valid_timeout_cert(&self, qc: &Self::Certificate, view_number: TYPES::Time) -> bool { - let comparison_commitment = view_number.commit(); - - if let AssembledSignature::Timeout(qc) = qc.signatures() { - let real_commit = VoteData::Timeout(comparison_commitment).commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().success_threshold().get()), - ); - ::check(&real_qc_pp, real_commit.as_ref(), &qc) - } else { - error!("Expected TimeoutCertificate, received another certificate variant"); - false - } - } -} +pub trait TimeoutExchangeType: ConsensusExchange {} impl< TYPES: NodeType, From fa01a0a64e221765f95f0b19f1d4c5150af61666 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 3 Nov 2023 22:19:02 -0400 Subject: [PATCH 0317/1393] remove old QC almost entirely --- hotshot/src/lib.rs | 2 - hotshot/src/tasks/mod.rs | 12 +--- hotshot/src/traits/storage/atomic_storage.rs | 20 +++--- .../atomic_storage/dual_key_value_store.rs | 4 +- hotshot/src/types/handle.rs | 43 ------------ task-impls/src/consensus.rs | 12 +--- task-impls/src/transactions.rs | 25 ++----- types/src/certificate.rs | 67 +++++-------------- types/src/traits/election.rs | 4 +- types/src/vote.rs | 6 +- 10 files changed, 36 insertions(+), 159 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 48cb49a0db..fc831ff5e9 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -31,7 +31,6 @@ pub mod types; pub mod tasks; use crate::{ - certificate::QuorumCertificate, tasks::{ add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, add_transaction_task, add_view_sync_task, @@ -646,7 +645,6 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, Commitment = Commitment>, Membership = MEMBERSHIP, > + 'static, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index bbabfb1ca8..953b99b1fa 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -1,8 +1,6 @@ //! Provides a number of tasks that run continuously on a [`HotShot`] -use crate::{ - async_spawn, types::SystemContextHandle, DACertificate, HotShotConsensusApi, QuorumCertificate, -}; +use crate::{async_spawn, types::SystemContextHandle, DACertificate, HotShotConsensusApi}; use async_compatibility_layer::art::async_sleep; use commit::{Commitment, CommitmentBounds, Committable}; use futures::FutureExt; @@ -256,7 +254,6 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< @@ -298,7 +295,6 @@ where vid_certs: HashMap::new(), current_proposal: None, id: handle.hotshot.inner.id, - qc: None, }; consensus_state .quorum_exchange @@ -490,11 +486,7 @@ pub async fn add_transaction_task< handle: SystemContextHandle, ) -> TaskRunner where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Certificate = QuorumCertificate>, - >, + QuorumEx: ConsensusExchange>, { // build the transactions task let c_api: HotShotConsensusApi = HotShotConsensusApi { diff --git a/hotshot/src/traits/storage/atomic_storage.rs b/hotshot/src/traits/storage/atomic_storage.rs index 4654d65ca7..2f141b8f61 100644 --- a/hotshot/src/traits/storage/atomic_storage.rs +++ b/hotshot/src/traits/storage/atomic_storage.rs @@ -4,17 +4,15 @@ mod dual_key_value_store; mod hash_map_store; use self::{dual_key_value_store::DualKeyValueStore, hash_map_store::HashMapStore}; -use crate::{data::Leaf, traits::StateContents, QuorumCertificate}; +use crate::{data::Leaf, traits::StateContents}; use async_std::sync::Mutex; use async_trait::async_trait; use atomic_store::{AtomicStore, AtomicStoreLoader}; use commit::Commitment; -use hotshot_types::{ - traits::storage::{ +use hotshot_types::traits::storage::{ AtomicStoreSnafu, Storage, StorageError, StorageResult, StorageState, StorageUpdater, TestableStorage, - }, -}; + }; use serde::{de::DeserializeOwned, Serialize}; use snafu::ResultExt; use std::{path::Path, sync::Arc}; @@ -36,7 +34,7 @@ where blocks: HashMapStore, STATE::BlockPayload>, /// The [`QuorumCertificate`]s stored by this [`AtomicStorage`] - qcs: DualKeyValueStore>, + qcs: DualKeyValueStore>, /// The [`Leaf`s stored by this [`AtomicStorage`] /// @@ -151,20 +149,20 @@ impl Storage for AtomicStorage { async fn get_qc( &self, hash: &Commitment, - ) -> StorageResult>> { + ) -> StorageResult>> { Ok(self.inner.qcs.load_by_key_1_ref(hash).await) } #[instrument(name = "AtomicStorage::get_newest_qc", skip_all)] - async fn get_newest_qc(&self) -> StorageResult>> { - Ok(self.inner.qcs.load_latest(|qc| qc.view_number()).await) + async fn get_newest_qc(&self) -> StorageResult>> { + Ok(self.inner.qcs.load_latest(|qc| qc.get_view_number()).await) } #[instrument(name = "AtomicStorage::get_qc_for_view", skip_all)] async fn get_qc_for_view( &self, view: TYPES::Time, - ) -> StorageResult>> { + ) -> StorageResult>> { Ok(self.inner.qcs.load_by_key_2(view).await) } @@ -244,7 +242,7 @@ impl<'a, STATE: StateContents + 'static> StorageUpdater<'a, STATE> } #[instrument(name = "AtomicStorage::insert_qc", skip_all)] - async fn insert_qc(&mut self, qc: QuorumCertificate) -> StorageResult { + async fn insert_qc(&mut self, qc: QuorumCertificate2) -> StorageResult { self.inner.qcs.insert(qc).await } diff --git a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs index c890099a1d..9fa25095d2 100644 --- a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs +++ b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs @@ -6,7 +6,7 @@ use async_std::sync::RwLock; use atomic_store::{load_store::BincodeLoadStore, AppendLog, AtomicStoreLoader}; use commit::{Commitment, Committable}; use hotshot_types::{ - data::{Leaf, QuorumCertificate, ViewNumber}, + data::{Leaf, ViewNumber}, traits::{ storage::{AtomicStoreSnafu, InconsistencySnafu, StorageError}, StateContents, @@ -180,7 +180,7 @@ pub trait DualKeyValue: Serialize + DeserializeOwned + Clone { fn key_2(&self) -> Self::Key2; } -impl DualKeyValue for QuorumCertificate { +impl DualKeyValue for QuorumCertificate2 { type Key1 = Commitment; type Key2 = ViewNumber; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 9500698bbc..56ad4e8e50 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -218,49 +218,6 @@ impl + 'static> SystemContextHandl self.maybe_do_genesis_init().await; } - /// iterate through all events on a [`NodeImplementation`] and determine if the node finished - /// successfully - /// # Errors - /// Errors if unable to obtain storage - /// # Panics - /// Panics if the event stream is shut down while this is running - // pub async fn collect_round_events( - // &mut self, - // ) -> Result< - // ( - // Vec<>::Leaf>, - // QuorumCertificate>::Leaf>>, - // ), - // HotShotError, - // > { - // // TODO we should probably do a view check - // // but we can do that later. It's non-obvious how to get the view number out - // // to check against - // - // // drain all events from this node - // let mut results = Ok((vec![], QuorumCertificate::genesis())); - // loop { - // // unwrap is fine here since the thing hasn't been shut down - // let event = self.next_event().await.unwrap(); - // match event.event { - // EventType::ReplicaViewTimeout { view_number: time } => { - // error!(?event, "Replica timed out!"); - // results = Err(HotShotError::ViewTimeoutError { - // view_number: time, - // state: RoundTimedoutState::TestCollectRoundEventsTimedOut, - // }); - // } - // EventType::Decide { leaf_chain, qc } => { - // results = Ok((leaf_chain.to_vec(), (*qc).clone())); - // } - // EventType::ViewFinished { view_number: _ } => return results, - // event => { - // debug!("recv-ed event {:?}", event); - // } - // } - // } - // } - /// Provides a reference to the underlying storage for this [`SystemContext`], allowing access to /// historical data pub fn storage(&self) -> &I::Storage { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f5b755cd8a..e984372aa1 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -17,7 +17,7 @@ use hotshot_task::{ }; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, - certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, + certificate::{DACertificate, TimeoutCertificate, VIDCertificate}, consensus::{Consensus, View}, data::{Leaf, LeafType, ProposalType, QuorumProposal}, event::{Event, EventType}, @@ -68,7 +68,6 @@ pub struct ConsensusTaskState< TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< @@ -141,9 +140,6 @@ pub struct ConsensusTaskState< // ED Should replace this with config information since we need it anyway /// The node's id pub id: u64, - - /// The most Recent QC we've formed from votes, if we've formed it. - pub qc: Option>>, } /// State for the vote collection task. This handles the building of a QC from a votes received @@ -155,7 +151,6 @@ pub struct VoteCollectionTaskState< TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, Commitment = Commitment>, >, TimeoutEx: ConsensusExchange< @@ -208,7 +203,6 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, Commitment = Commitment>, >, TimeoutEx: ConsensusExchange< @@ -235,7 +229,6 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, Commitment = Commitment>, >, TimeoutEx: ConsensusExchange< @@ -365,7 +358,6 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< @@ -1411,7 +1403,6 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< @@ -1464,7 +1455,6 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = QuorumCertificate>>, Commitment = Commitment>, >, CommitteeEx: ConsensusExchange< diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 2fe424657c..9aacf68090 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -14,7 +14,6 @@ use hotshot_task::{ }; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, - certificate::QuorumCertificate, consensus::Consensus, data::{Leaf, VidDisperse, VidScheme, VidSchemeTrait}, message::{Message, Proposal, SequencingMessage}, @@ -47,11 +46,7 @@ pub struct TransactionTaskState< I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static, > where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Certificate = QuorumCertificate>, - >, + QuorumEx: ConsensusExchange>, { /// The state's api pub api: A, @@ -93,11 +88,7 @@ impl< A: ConsensusApi, I> + 'static, > TransactionTaskState where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Certificate = QuorumCertificate>, - >, + QuorumEx: ConsensusExchange>, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] @@ -305,11 +296,7 @@ impl< A: ConsensusApi, I> + 'static, > TransactionTaskState where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Certificate = QuorumCertificate>, - >, + QuorumEx: ConsensusExchange>, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] async fn wait_for_transactions( @@ -402,11 +389,7 @@ impl< A: ConsensusApi, I> + 'static, > TS for TransactionTaskState where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Certificate = QuorumCertificate>, - >, + QuorumEx: ConsensusExchange>, { } diff --git a/types/src/certificate.rs b/types/src/certificate.rs index adb37fa56e..4657d5e68d 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -21,6 +21,7 @@ use serde::{Deserialize, Serialize}; use std::{ fmt::{self, Debug, Display, Formatter}, hash::Hash, + marker::PhantomData, }; use tracing::debug; @@ -57,33 +58,21 @@ pub struct VIDCertificate { pub signatures: AssembledSignature, } -/// The type used for Quorum Certificates -/// -/// A Quorum Certificate is a threshold signature of the `Leaf` being proposed, as well as some -/// metadata, such as the `Stage` of consensus the quorum certificate was generated during. +/// Depricated type for QC + +// TODO:remove this struct https://github.com/EspressoSystems/HotShot/issues/1995 #[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash, Eq)] #[serde(bound(deserialize = ""))] pub struct QuorumCertificate { - /// commitment to previous leaf - #[debug(skip)] - pub leaf_commitment: COMMITMENT, - /// Which view this QC relates to - pub view_number: TYPES::Time, - /// assembled signature for certificate aggregation - pub signatures: AssembledSignature, - /// If this QC is for the genesis block - pub is_genesis: bool, + /// phantom data + _pd: PhantomData<(TYPES, COMMITMENT)>, } impl Display for QuorumCertificate { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!( - f, - "view: {:?}, is_genesis: {:?}", - self.view_number, self.is_genesis - ) + write!(f, "",) } } @@ -198,45 +187,28 @@ impl type Vote = QuorumVote; type VoteAccumulator = QuorumVoteAccumulator; - fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { - let leaf_commitment = match vote.clone() { - QuorumVote::Yes(vote_internal) | QuorumVote::No(vote_internal) => { - vote_internal.leaf_commitment - } - }; - let qc = QuorumCertificate { - leaf_commitment, - view_number: vote.get_view(), - signatures, - is_genesis: false, - }; - debug!("QC commitment when formed is {:?}", qc.leaf_commitment); - qc + fn create_certificate(_signatures: AssembledSignature, _vote: Self::Vote) -> Self { + Self { _pd: PhantomData } } fn view_number(&self) -> TYPES::Time { - self.view_number + TYPES::Time::new(1) } fn signatures(&self) -> AssembledSignature { - self.signatures.clone() + AssembledSignature::Genesis() } fn leaf_commitment(&self) -> COMMITMENT { - self.leaf_commitment + COMMITMENT::default_commitment_no_preimage() } fn is_genesis(&self) -> bool { - self.is_genesis + true } fn genesis() -> Self { - Self { - leaf_commitment: COMMITMENT::default_commitment_no_preimage(), - view_number: ::genesis(), - signatures: AssembledSignature::Genesis(), - is_genesis: true, - } + Self { _pd: PhantomData } } } @@ -244,19 +216,10 @@ impl Committable for QuorumCertificate { fn commit(&self) -> Commitment { - let signatures_bytes = serialize_signature(&self.signatures); - commit::RawCommitmentBuilder::new("Quorum Certificate Commitment") - .var_size_field("leaf commitment", self.leaf_commitment.as_ref()) - .u64_field("view number", *self.view_number) - .constant_str("justify_qc signatures") - .var_size_bytes(&signatures_bytes) + .u64_field("view number", 1) .finalize() } - - fn tag() -> String { - tag::QC.to_string() - } } impl diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index d2872cc2f5..288dafd6fc 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -9,8 +9,7 @@ use super::{ }; use crate::{ certificate::{ - AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate, - ViewSyncCertificate, + AssembledSignature, DACertificate, TimeoutCertificate, VIDCertificate, ViewSyncCertificate, QuorumCertificate, }, data::{DAProposal, ProposalType, VidDisperse}, vote::{TimeoutVote, VIDVote}, @@ -895,6 +894,7 @@ impl< { type Proposal = PROPOSAL; type Vote = QuorumVote>; + // TODO: remove this https://github.com/EspressoSystems/HotShot/issues/1995 type Certificate = QuorumCertificate>; type Membership = MEMBERSHIP; type Networking = NETWORK; diff --git a/types/src/vote.rs b/types/src/vote.rs index d63f8cc0fb..97a51b0ae0 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -4,7 +4,7 @@ //! can send, and vote accumulator that converts votes into certificates. use crate::{ - certificate::{AssembledSignature, QuorumCertificate}, + certificate::AssembledSignature, traits::{ election::{VoteData, VoteToken}, node_implementation::NodeType, @@ -79,10 +79,6 @@ pub struct VIDVote { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] pub struct YesOrNoVote { - /// TODO we should remove this - /// this is correct, but highly inefficient - /// we should check a cache, and if that fails request the qc - pub justify_qc_commitment: Commitment>, /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), /// The leaf commitment being voted on. From 895fbfa6979e91d381f42260ffb926216f7119a6 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 3 Nov 2023 22:24:28 -0400 Subject: [PATCH 0318/1393] lint --- types/src/certificate.rs | 1 - types/src/traits/election.rs | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 4657d5e68d..266290425e 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -23,7 +23,6 @@ use std::{ hash::Hash, marker::PhantomData, }; -use tracing::debug; /// A `DACertificate` is a threshold signature that some data is available. /// It is signed by the members of the DA committee, not the entire network. It is used diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 288dafd6fc..5eb9b87c2e 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -9,7 +9,8 @@ use super::{ }; use crate::{ certificate::{ - AssembledSignature, DACertificate, TimeoutCertificate, VIDCertificate, ViewSyncCertificate, QuorumCertificate, + AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate, + ViewSyncCertificate, }, data::{DAProposal, ProposalType, VidDisperse}, vote::{TimeoutVote, VIDVote}, From f8261362754d3a2dc34022b8cbbea1f01d39b7a5 Mon Sep 17 00:00:00 2001 From: shuoer86 <129674997+shuoer86@users.noreply.github.com> Date: Sun, 5 Nov 2023 19:10:30 +0800 Subject: [PATCH 0319/1393] Fix typos --- hotshot-signature-key/src/bn254/bn254_priv.rs | 2 +- hotshot-signature-key/src/bn254/bn254_pub.rs | 2 +- hotshot/src/traits/networking.rs | 2 +- hotshot/src/traits/storage/atomic_storage/hash_map_store.rs | 2 +- hotshot/src/types/handle.rs | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_priv.rs b/hotshot-signature-key/src/bn254/bn254_priv.rs index 87d80c8dfe..cefc36cde4 100644 --- a/hotshot-signature-key/src/bn254/bn254_priv.rs +++ b/hotshot-signature-key/src/bn254/bn254_priv.rs @@ -24,7 +24,7 @@ impl BLSPrivKey { } #[must_use] - /// Get real seed used for random key generation funtion + /// Get real seed used for random key generation function pub fn get_seed_from_seed_indexed(seed: [u8; 32], index: u64) -> [u8; 32] { let mut hasher = blake3::Hasher::new(); hasher.update(&seed); diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 410a0e12fb..ff4764cec9 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -19,7 +19,7 @@ use typenum::U32; /// Public key type for an bn254 [`SignatureKey`] pair /// -/// This type makes use of noise for non-determinisitc signatures. +/// This type makes use of noise for non-deterministic signatures. #[derive(Clone, PartialEq, Eq, Hash, Copy, Serialize, Deserialize, Debug)] pub struct BLSPubKey { diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index a14e4ead54..0cfeacc779 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -3,7 +3,7 @@ //! This module contains a trait abstracting over network access, as well as implementations of that //! trait. Currently this includes //! - [`MemoryNetwork`](memory_network::MemoryNetwork), an in memory testing-only implementation -//! - [`Libp2pNetwork`](libp2p_network::Libp2pNetwork), a production-ready networking impelmentation built on top of libp2p-rs. +//! - [`Libp2pNetwork`](libp2p_network::Libp2pNetwork), a production-ready networking implementation built on top of libp2p-rs. pub mod combined_network; pub mod libp2p_network; diff --git a/hotshot/src/traits/storage/atomic_storage/hash_map_store.rs b/hotshot/src/traits/storage/atomic_storage/hash_map_store.rs index 305b9ce650..dbe2fdbcef 100644 --- a/hotshot/src/traits/storage/atomic_storage/hash_map_store.rs +++ b/hotshot/src/traits/storage/atomic_storage/hash_map_store.rs @@ -52,7 +52,7 @@ where read.data.get(hash).cloned() } - /// Insert a new key-value entry into the store. This won't be committed untill `commit` is called. + /// Insert a new key-value entry into the store. This won't be committed until `commit` is called. pub async fn insert(&self, key: K, val: V) -> atomic_store::Result<()> { let mut lock = self.inner.write().await; // Make sure to commit the store first before updating the internal value diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 9500698bbc..ee19ffaa87 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -267,7 +267,7 @@ impl + 'static> SystemContextHandl &self.storage } - /// Get the underyling consensus state for this [`SystemContext`] + /// Get the underlying consensus state for this [`SystemContext`] pub fn get_consensus(&self) -> Arc>> { self.hotshot.get_consensus() } From fcdf6b63497c5f6695fc410c342a04dccc674fb0 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 6 Nov 2023 08:34:20 -0500 Subject: [PATCH 0320/1393] feat: byzantine network --- .../src/traits/networking/combined_network.rs | 13 +- .../src/traits/networking/libp2p_network.rs | 75 ++-- .../src/traits/networking/memory_network.rs | 22 +- .../traits/networking/web_server_network.rs | 6 +- libp2p-networking/src/network/node/handle.rs | 14 +- testing/Cargo.toml | 2 +- testing/src/node_types.rs | 15 +- testing/src/test_builder.rs | 8 +- testing/tests/byzantine.rs | 322 ++++++++++++++++++ types/src/traits/network.rs | 59 +++- types/src/traits/node_implementation.rs | 5 +- 11 files changed, 476 insertions(+), 65 deletions(-) create mode 100644 testing/tests/byzantine.rs diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index e717c40b1b..023d4b1a33 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -29,7 +29,7 @@ use hotshot_types::{ traits::{ election::Membership, network::{ - CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, + CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, NetworkReliability, TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, ViewMessage, }, @@ -168,6 +168,7 @@ impl, MEMBERSHIP: Membership>, ) -> Box Self + 'static> { let generators = ( , MEMBERSHIP: Membership, TYPES::SignatureKey> as TestableNetworkingImplementation<_, _>>::generator( expected_node_count, num_bootstrap, network_id, da_committee_size, - is_da + is_da, + reliability_config, ) ); Box::new(move |node_id| { @@ -212,6 +215,7 @@ impl, MEMBERSHIP: Membership>, ) -> Box Self + 'static> { let generator = , MEMBERSHIP: Membership { /// haven't made that atomic yet and we prefer lock-free latest_seen_view: Arc, /// reliability_config - reliability_config: Option>> + reliability_config: Option>, } /// Networking implementation that uses libp2p @@ -146,6 +146,7 @@ where network_id: usize, da_committee_size: usize, _is_da: bool, + reliability_config: Option>, ) -> Box Self + 'static> { assert!( da_committee_size <= expected_node_count, @@ -225,6 +226,11 @@ where let bootstrap_addrs_ref = bootstrap_addrs.clone(); let keys = all_keys.clone(); let da = da_keys.clone(); + // let reliability_config_dup = match &reliability_config { + // Some(ref config) => Some(config), + // None => todo!(), + // }; + let relaibility_config_dup = reliability_config.clone(); async_block_on(async move { Libp2pNetwork::new( NetworkingMetricsValue::new(), @@ -235,7 +241,7 @@ where node_id as usize, keys, da, - None, + relaibility_config_dup, ) .await .unwrap() @@ -284,7 +290,7 @@ impl Libp2pNetwork { // HACK committee_pks: BTreeSet, da_pks: BTreeSet, - reliability_config: Option>>, + reliability_config: Option>, ) -> Result, NetworkError> { assert!(bootstrap_addrs_len > 4, "Need at least 5 bootstrap nodes"); let network_handle = Arc::new( @@ -340,8 +346,7 @@ impl Libp2pNetwork { // proposals on". We need this because to have consensus info injected we need a working // network already. In the worst case, we send a few lookups we don't need. latest_seen_view: Arc::new(AtomicU64::new(0)), - reliability_config - + reliability_config, }), }; @@ -606,11 +611,13 @@ impl ConnectedNetwork for Libp2p // ask during pair programming // or maybe channels would be better? let metrics = self.inner.metrics.clone(); - if let Some(config) = &self.inner.reliability_config { + if let Some(ref config) = &self.inner.reliability_config { let handle = self.inner.handle.clone(); - let serialized_msg = bincode_opts().serialize(&message).context(FailedToSerializeSnafu)?; - let fut = config.read().await.chaos_send_msg( + let serialized_msg = bincode_opts() + .serialize(&message) + .context(FailedToSerializeSnafu)?; + let fut = config.clone().chaos_send_msg( serialized_msg, Arc::new(move |msg: Vec| { let topic_2 = topic.clone(); @@ -620,16 +627,16 @@ impl ConnectedNetwork for Libp2p match handle_2.gossip_no_serialize(topic_2, msg).await { Err(e) => { metrics_2.message_failed_to_send.add(1); - warn!("Failed to broadcast to libp2p: {:?}", e) - }, - Ok(_) => { - metrics_2.outgoing_direct_message_count.add(1); - + warn!("Failed to broadcast to libp2p: {:?}", e); + } + Ok(()) => { + metrics_2.outgoing_direct_message_count.add(1); } } }) - })); - async_spawn(async move {fut.await}); + }), + ); + async_spawn(fut); return Ok(()); } @@ -685,11 +692,13 @@ impl ConnectedNetwork for Libp2p // ask during pair programming // or maybe channels would be better? let metrics = self.inner.metrics.clone(); - if let Some(config) = &self.inner.reliability_config { + if let Some(ref config) = &self.inner.reliability_config { let handle = self.inner.handle.clone(); - let serialized_msg = bincode_opts().serialize(&message).context(FailedToSerializeSnafu)?; - let fut = config.read().await.chaos_send_msg( + let serialized_msg = bincode_opts() + .serialize(&message) + .context(FailedToSerializeSnafu)?; + let fut = config.clone().chaos_send_msg( serialized_msg, Arc::new(move |msg: Vec| { let handle_2 = handle.clone(); @@ -698,26 +707,22 @@ impl ConnectedNetwork for Libp2p match handle_2.direct_request_no_serialize(pid, msg).await { Err(e) => { metrics_2.message_failed_to_send.add(1); - warn!("Failed to broadcast to libp2p: {:?}", e) - }, - Ok(_) => { - metrics_2.outgoing_direct_message_count.add(1); - + warn!("Failed to broadcast to libp2p: {:?}", e); + } + Ok(()) => { + metrics_2.outgoing_direct_message_count.add(1); } } }) - })); - async_spawn(async move {fut.await}); + }), + ); + async_spawn(fut); return Ok(()); } match self.inner.handle.direct_request(pid, &message).await { - Ok(()) => { - Ok(()) - } - Err(e) => { - Err(e.into()) - } + Ok(()) => Ok(()), + Err(e) => Err(e.into()), } } @@ -843,6 +848,7 @@ where network_id: usize, da_committee_size: usize, is_da: bool, + reliability_config: Option>, ) -> Box Self + 'static> { let generator = , @@ -852,7 +858,8 @@ where num_bootstrap, network_id, da_committee_size, - is_da + is_da, + reliability_config ); Box::new(move |node_id| Self(generator(node_id).into(), PhantomData)) } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 47edc766ea..75d9e9c693 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -94,7 +94,7 @@ struct MemoryNetworkInner { metrics: NetworkingMetricsValue, /// config to introduce unreliability to the network - reliability_config: Option>>, + reliability_config: Option>, } /// In memory only network simulator. @@ -125,7 +125,7 @@ impl MemoryNetwork { pub_key: K, metrics: NetworkingMetricsValue, master_map: Arc>, - reliability_config: Option>>, + reliability_config: Option>, ) -> MemoryNetwork { info!("Attaching new MemoryNetwork"); let (broadcast_input, broadcast_task_recv) = bounded(128); @@ -252,12 +252,18 @@ impl> _network_id: usize, _da_committee_size: usize, _is_da: bool, + reliability_config: Option>, ) -> Box Self + 'static> { let master: Arc<_> = MasterMap::new(); Box::new(move |node_id| { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); - MemoryNetwork::new(pubkey, NetworkingMetricsValue::new(), master.clone(), None) + MemoryNetwork::new( + pubkey, + NetworkingMetricsValue::new(), + master.clone(), + reliability_config.clone(), + ) }) } @@ -309,8 +315,7 @@ impl ConnectedNetwork for Memory continue; } trace!(?key, "Sending message to node"); - if let Some(r) = &self.inner.reliability_config { - let config = r.read().await; + if let Some(ref config) = &self.inner.reliability_config { { let node2 = node.clone(); let fut = config.chaos_send_msg( @@ -353,8 +358,7 @@ impl ConnectedNetwork for Memory trace!("Message bincoded, finding recipient"); if let Some(node) = self.inner.master_map.map.get(&recipient) { let node = node.value().clone(); - if let Some(r) = &self.inner.reliability_config { - let config = r.read().await; + if let Some(ref config) = &self.inner.reliability_config { { let fut = config.chaos_send_msg( vec.clone(), @@ -481,6 +485,7 @@ where network_id: usize, da_committee_size: usize, is_da: bool, + reliability_config: Option>, ) -> Box Self + 'static> { let generator = , @@ -490,7 +495,8 @@ where num_bootstrap, network_id, da_committee_size, - is_da + is_da, + reliability_config, ); Box::new(move |node_id| Self(generator(node_id).into(), PhantomData)) } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 5a67880e9f..06b21693a5 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -29,7 +29,7 @@ use hotshot_web_server::{self, config}; use rand::random; use serde::{Deserialize, Serialize}; -use hotshot_types::traits::network::ViewMessage; +use hotshot_types::traits::network::{NetworkReliability, ViewMessage}; use std::{ collections::{hash_map::Entry, BTreeSet, HashMap}, marker::PhantomData, @@ -1070,6 +1070,7 @@ impl> _network_id: usize, _da_committee_size: usize, is_da: bool, + _reliability_config: Option>, ) -> Box Self + 'static> { let (server_shutdown_sender, server_shutdown) = oneshot(); let sender = Arc::new(server_shutdown_sender); @@ -1120,6 +1121,7 @@ impl, MEMBERSHIP: Membership>, ) -> Box Self + 'static> { let generator = , @@ -1131,6 +1133,8 @@ impl, MEMBERSHIP: Membership NetworkNodeHandle { self.direct_request_no_serialize(pid, serialized_msg).await } + /// Make a direct request to `peer_id` containing `msg` without serializing + /// # Errors + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` pub async fn direct_request_no_serialize( &self, pid: PeerId, @@ -507,7 +511,15 @@ impl NetworkNodeHandle { self.gossip_no_serialize(topic, serialized_msg).await } - pub async fn gossip_no_serialize(&self, topic: String, msg: Vec) -> Result<(), NetworkNodeHandleError>{ + /// Gossip a message to peers without serializing + /// # Errors + /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` + pub async fn gossip_no_serialize( + &self, + topic: String, + msg: Vec, + ) -> Result<(), NetworkNodeHandleError> { let req = ClientRequest::GossipMsg(topic, msg); self.send_request(req).await } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 935f9e4342..7e8a33a134 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -37,9 +37,9 @@ tracing = { workspace = true } serde = { workspace = true } ethereum-types = { workspace = true } bitvec = { workspace = true } +async-lock = { workspace = true } [dev-dependencies] -async-lock = { workspace = true } bincode = { workspace = true } # GG any better options for serialization? [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 7e5308b35f..dbf141b4f2 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -1,4 +1,4 @@ -use hotshot::traits::implementations::CombinedNetworks; +use hotshot::traits::{implementations::CombinedNetworks, NetworkReliability}; use std::{marker::PhantomData, sync::Arc}; use hotshot::{ @@ -156,6 +156,7 @@ impl expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, + byzantine_metadata: Option>, ) -> Box< dyn Fn( u64, @@ -186,6 +187,7 @@ impl 0, da_committee_size, false, + byzantine_metadata, )); Box::new(move |id| { @@ -252,6 +254,7 @@ impl expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, + byzantine_metadata: Option>, ) -> Box< dyn Fn( u64, @@ -282,6 +285,7 @@ impl 0, da_committee_size, false, + byzantine_metadata.clone(), )); let network_da_generator = Arc::new(, @@ -295,6 +299,7 @@ impl 1, da_committee_size, true, + byzantine_metadata, )); Box::new(move |id| { let network = Arc::new(network_generator(id)); @@ -385,6 +390,7 @@ impl expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, + byzantine_metadata: Option>, ) -> Box< dyn Fn( u64, @@ -403,6 +409,7 @@ impl >>::Networking, ) + 'static, > { + // this is unsupported currently let network_generator = Arc::new(, ::SignatureKey, @@ -416,6 +423,7 @@ impl 0, da_committee_size, false, + byzantine_metadata.clone(), )); let network_da_generator = Arc::new(, @@ -430,6 +438,7 @@ impl 1, da_committee_size, true, + byzantine_metadata.clone(), )); Box::new(move |id| { let network = Arc::new(network_generator(id)); @@ -534,6 +543,7 @@ impl expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, + byzantine_metadata: Option>, ) -> Box< dyn Fn( u64, @@ -565,6 +575,7 @@ impl 0, da_committee_size, false, + byzantine_metadata.clone(), )); let web_server_network_da_generator = Arc::new(>, } impl Default for TimingData { @@ -173,6 +175,7 @@ impl Default for TestMetadata { duration: Duration::from_millis(10000), }, ), + byzantine_metadata: None, } } } @@ -197,6 +200,7 @@ impl TestMetadata { completion_task_description, overall_safety_properties, spinning_properties, + byzantine_metadata, .. } = self.clone(); @@ -261,7 +265,7 @@ impl TestMetadata { let spinning_task_generator = spinning_properties.build(); TestLauncher { resource_generator: ResourceGenerators { - channel_generator: <>::Exchanges as TestableExchange<_, _, _>>::gen_comm_channels(total_nodes, num_bootstrap_nodes, da_committee_size), + channel_generator: <>::Exchanges as TestableExchange<_, _, _>>::gen_comm_channels(total_nodes, num_bootstrap_nodes, da_committee_size, byzantine_metadata), storage: Box::new(|_| I::construct_tmp_storage().unwrap()), config, }, diff --git a/testing/tests/byzantine.rs b/testing/tests/byzantine.rs new file mode 100644 index 0000000000..4558f1ffa6 --- /dev/null +++ b/testing/tests/byzantine.rs @@ -0,0 +1,322 @@ +use hotshot_types::traits::network::AsynchronousNetwork; +use hotshot_types::traits::network::ChaosNetwork; +use hotshot_types::traits::network::PartiallySynchronousNetwork; +use hotshot_types::traits::network::SynchronousNetwork; +use std::time::Duration; +use std::time::Instant; + +use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingLibp2pImpl, SequencingTestTypes}, + overall_safety_task::OverallSafetyPropertiesDescription, + test_builder::TestMetadata, +}; +use tracing::instrument; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn libp2p_network_sync() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + overall_safety_properties: OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::new(240, 0), + }, + ), + byzantine_metadata: Some(Box::new(SynchronousNetwork { + timeout_ms: 30, + delay_low_ms: 4, + })), + ..TestMetadata::default_multiple_rounds() + }; + + metadata + .gen_launcher::() + .launch() + .run_test() + .await +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_memory_network_sync() { + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + test_builder::TestMetadata, + }; + use std::time::Duration; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(240), + }, + ), + byzantine_metadata: Some(Box::new(SynchronousNetwork { + timeout_ms: 30, + delay_low_ms: 4, + })), + ..TestMetadata::default() + }; + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn libp2p_network_async() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + overall_safety_properties: OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::new(240, 0), + }, + ), + byzantine_metadata: Some(Box::new(AsynchronousNetwork { + keep_numerator: 8, + keep_denominator: 10, + delay_low_ms: 4, + delay_high_ms: 30, + })), + ..TestMetadata::default_multiple_rounds() + }; + + metadata + .gen_launcher::() + .launch() + .run_test() + .await +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_memory_network_async() { + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + test_builder::TestMetadata, + }; + use std::time::Duration; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(240), + }, + ), + byzantine_metadata: Some(Box::new(AsynchronousNetwork { + keep_numerator: 8, + keep_denominator: 10, + delay_low_ms: 4, + delay_high_ms: 30, + })), + ..TestMetadata::default() + }; + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_memory_network_partially_sync() { + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + test_builder::TestMetadata, + }; + use std::time::Duration; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(240), + }, + ), + byzantine_metadata: Some(Box::new(PartiallySynchronousNetwork { + asynchronous: AsynchronousNetwork { + keep_numerator: 8, + keep_denominator: 10, + delay_low_ms: 4, + delay_high_ms: 30, + }, + synchronous: SynchronousNetwork { + timeout_ms: 30, + delay_low_ms: 4, + }, + gst: std::time::Duration::from_millis(1000), + start: Instant::now(), + })), + ..TestMetadata::default() + }; + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn libp2p_network_partially_sync() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + overall_safety_properties: OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::new(240, 0), + }, + ), + byzantine_metadata: Some(Box::new(PartiallySynchronousNetwork { + asynchronous: AsynchronousNetwork { + keep_numerator: 8, + keep_denominator: 10, + delay_low_ms: 4, + delay_high_ms: 30, + }, + synchronous: SynchronousNetwork { + timeout_ms: 30, + delay_low_ms: 4, + }, + gst: std::time::Duration::from_millis(1000), + start: Instant::now(), + })), + ..TestMetadata::default_multiple_rounds() + }; + + metadata + .gen_launcher::() + .launch() + .run_test() + .await +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_memory_network_chaos() { + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{SequencingMemoryImpl, SequencingTestTypes}, + test_builder::TestMetadata, + }; + use std::time::Duration; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(240), + }, + ), + byzantine_metadata: Some(Box::new(ChaosNetwork { + keep_numerator: 8, + keep_denominator: 10, + delay_low_ms: 4, + delay_high_ms: 30, + repeat_low: 1, + repeat_high: 5, + })), + ..TestMetadata::default() + }; + metadata + .gen_launcher::() + .launch() + .run_test() + .await; +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn libp2p_network_chaos() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + overall_safety_properties: OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::new(240, 0), + }, + ), + byzantine_metadata: Some(Box::new(ChaosNetwork { + keep_numerator: 8, + keep_denominator: 10, + delay_low_ms: 4, + delay_high_ms: 30, + repeat_low: 1, + repeat_high: 5, + })), + ..TestMetadata::default_multiple_rounds() + }; + + metadata + .gen_launcher::() + .launch() + .run_test() + .await +} diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 1a6414fd67..4b9ce1f83b 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -5,6 +5,7 @@ use async_compatibility_layer::art::async_sleep; #[cfg(async_executor_impl = "async-std")] use async_std::future::TimeoutError; +use dyn_clone::DynClone; use hotshot_task::{boxed_sync, BoxSyncFuture}; use libp2p_networking::network::NetworkNodeHandleError; #[cfg(async_executor_impl = "tokio")] @@ -341,6 +342,7 @@ pub trait TestableNetworkingImplementation { network_id: usize, da_committee_size: usize, is_da: bool, + reliability_config: Option>, ) -> Box Self + 'static>; /// Get the number of messages in-flight. @@ -372,7 +374,7 @@ pub enum NetworkChange { /// interface describing how reliable the network is #[async_trait] -pub trait NetworkReliability: Debug + Sync + std::marker::Send { +pub trait NetworkReliability: Debug + Sync + std::marker::Send + DynClone + 'static { /// Sample from bernoulli distribution to decide whether /// or not to keep a packet /// # Panics @@ -433,6 +435,8 @@ pub trait NetworkReliability: Debug + Sync + std::marker::Send { } } +dyn_clone::clone_trait_object!(NetworkReliability); + /// ideal network #[derive(Clone, Copy, Debug, Default)] pub struct PerfectNetwork {} @@ -444,9 +448,9 @@ impl NetworkReliability for PerfectNetwork {} #[derive(Clone, Copy, Debug, Default)] pub struct SynchronousNetwork { /// Max delay of packet before arrival - timeout_ms: u64, + pub timeout_ms: u64, /// Lowest value in milliseconds that a packet may be delayed - delay_low_ms: u64, + pub delay_low_ms: u64, } impl NetworkReliability for SynchronousNetwork { @@ -470,13 +474,13 @@ impl NetworkReliability for SynchronousNetwork { #[derive(Debug, Clone, Copy)] pub struct AsynchronousNetwork { /// numerator for probability of keeping packets - keep_numerator: u32, + pub keep_numerator: u32, /// denominator for probability of keeping packets - keep_denominator: u32, + pub keep_denominator: u32, /// lowest value in milliseconds that a packet may be delayed - delay_low_ms: u64, + pub delay_low_ms: u64, /// highest value in milliseconds that a packet may be delayed - delay_high_ms: u64, + pub delay_high_ms: u64, } impl NetworkReliability for AsynchronousNetwork { @@ -500,13 +504,13 @@ impl NetworkReliability for AsynchronousNetwork { #[derive(Debug, Clone, Copy)] pub struct PartiallySynchronousNetwork { /// asynchronous portion of network - asynchronous: AsynchronousNetwork, + pub asynchronous: AsynchronousNetwork, /// synchronous portion of network - synchronous: SynchronousNetwork, + pub synchronous: SynchronousNetwork, /// time when GST occurs - gst: std::time::Duration, + pub gst: std::time::Duration, /// when the network was started - start: std::time::Instant, + pub start: std::time::Instant, } impl NetworkReliability for PartiallySynchronousNetwork { @@ -601,6 +605,37 @@ impl PartiallySynchronousNetwork { } /// A chaotic network using all the networking calls +#[derive(Debug, Clone)] pub struct ChaosNetwork { - // TODO + /// numerator for probability of keeping packets + pub keep_numerator: u32, + /// denominator for probability of keeping packets + pub keep_denominator: u32, + /// lowest value in milliseconds that a packet may be delayed + pub delay_low_ms: u64, + /// highest value in milliseconds that a packet may be delayed + pub delay_high_ms: u64, + /// lowest value of repeats for a message + pub repeat_low: usize, + /// highest value of repeats for a message + pub repeat_high: usize, +} + +impl NetworkReliability for ChaosNetwork { + fn sample_keep(&self) -> bool { + Bernoulli::from_ratio(self.keep_numerator, self.keep_denominator) + .unwrap() + .sample(&mut rand::thread_rng()) + } + + fn sample_delay(&self) -> Duration { + Duration::from_millis( + Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) + .sample(&mut rand::thread_rng()), + ) + } + + fn sample_repeat(&self) -> usize { + Uniform::new_inclusive(self.repeat_low, self.repeat_high).sample(&mut rand::thread_rng()) + } } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 6d27d70103..f7b5845330 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -9,7 +9,9 @@ use super::{ CommitteeExchangeType, ConsensusExchange, ElectionConfig, QuorumExchangeType, TimeoutExchange, TimeoutExchangeType, ViewSyncExchangeType, VoteToken, }, - network::{CommunicationChannel, NetworkMsg, TestableNetworkingImplementation}, + network::{ + CommunicationChannel, NetworkMsg, NetworkReliability, TestableNetworkingImplementation, + }, state::{ConsensusTime, TestableBlock, TestableState}, storage::{StorageError, StorageState, TestableStorage}, State, @@ -209,6 +211,7 @@ pub trait TestableExchange, ME expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, + byzantine_metadata: Option>, ) -> Box< dyn Fn( u64, From a14af08a6f013481231099e3f59424f66a82e276 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 6 Nov 2023 08:50:34 -0500 Subject: [PATCH 0321/1393] fix: compilation error --- hotshot/examples/infra/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 1309525923..755758a9db 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -802,6 +802,7 @@ where // NOTE: this introduces an invariant that the keys are assigned using this indexed // function all_keys, + None, da_keys.clone(), da_keys.contains(&pubkey), ) From d33628f7d097aba6e8fff14e493b172ff301b1c0 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 6 Nov 2023 08:54:49 -0500 Subject: [PATCH 0322/1393] fix: minor renaming --- testing/src/node_types.rs | 24 +++++++++---------- testing/src/test_builder.rs | 10 ++++---- .../{byzantine.rs => unreliable_network.rs} | 16 ++++++------- types/src/traits/node_implementation.rs | 2 +- 4 files changed, 26 insertions(+), 26 deletions(-) rename testing/tests/{byzantine.rs => unreliable_network.rs} (95%) diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 4c79cb65b4..fe1826599e 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -155,7 +155,7 @@ impl expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, - byzantine_metadata: Option>, + unreliable_network: Option>, ) -> Box< dyn Fn( u64, @@ -190,7 +190,7 @@ impl 0, da_committee_size, false, - byzantine_metadata, + unreliable_network, )); Box::new(move |id| { @@ -264,7 +264,7 @@ impl expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, - byzantine_metadata: Option>, + unreliable_network: Option>, ) -> Box< dyn Fn( u64, @@ -299,7 +299,7 @@ impl 0, da_committee_size, false, - byzantine_metadata.clone(), + unreliable_network.clone(), )); let network_da_generator = Arc::new(, @@ -313,7 +313,7 @@ impl 1, da_committee_size, true, - byzantine_metadata, + unreliable_network, )); Box::new(move |id| { let network = Arc::new(network_generator(id)); @@ -406,7 +406,7 @@ impl expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, - byzantine_metadata: Option>, + unreliable_network: Option>, ) -> Box< dyn Fn( u64, @@ -443,7 +443,7 @@ impl 0, da_committee_size, false, - byzantine_metadata.clone(), + unreliable_network.clone(), )); let network_da_generator = Arc::new(, @@ -458,7 +458,7 @@ impl 1, da_committee_size, true, - byzantine_metadata.clone(), + unreliable_network.clone(), )); Box::new(move |id| { let network = Arc::new(network_generator(id)); @@ -575,7 +575,7 @@ impl expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, - byzantine_metadata: Option>, + unreliable_network: Option>, ) -> Box< dyn Fn( u64, @@ -611,7 +611,7 @@ impl 0, da_committee_size, false, - byzantine_metadata.clone(), + unreliable_network.clone(), )); let web_server_network_da_generator = Arc::new(>, + /// unrelabile networking metadata + pub unreliable_network: Option>, } impl Default for TimingData { @@ -175,7 +175,7 @@ impl Default for TestMetadata { duration: Duration::from_millis(10000), }, ), - byzantine_metadata: None, + unreliable_network: None, } } } @@ -200,7 +200,7 @@ impl TestMetadata { completion_task_description, overall_safety_properties, spinning_properties, - byzantine_metadata, + unreliable_network, .. } = self.clone(); @@ -272,7 +272,7 @@ impl TestMetadata { _, _, >>::gen_comm_channels( - total_nodes, num_bootstrap_nodes, da_committee_size, byzantine_metadata + total_nodes, num_bootstrap_nodes, da_committee_size, unreliable_network ), storage: Box::new(|_| I::construct_tmp_storage().unwrap()), config, diff --git a/testing/tests/byzantine.rs b/testing/tests/unreliable_network.rs similarity index 95% rename from testing/tests/byzantine.rs rename to testing/tests/unreliable_network.rs index adaa3e847b..9462b72dec 100644 --- a/testing/tests/byzantine.rs +++ b/testing/tests/unreliable_network.rs @@ -32,7 +32,7 @@ async fn libp2p_network_sync() { duration: Duration::new(240, 0), }, ), - byzantine_metadata: Some(Box::new(SynchronousNetwork { + unreliable_network: Some(Box::new(SynchronousNetwork { timeout_ms: 30, delay_low_ms: 4, })), @@ -69,7 +69,7 @@ async fn test_memory_network_sync() { duration: Duration::from_secs(240), }, ), - byzantine_metadata: Some(Box::new(SynchronousNetwork { + unreliable_network: Some(Box::new(SynchronousNetwork { timeout_ms: 30, delay_low_ms: 4, })), @@ -101,7 +101,7 @@ async fn libp2p_network_async() { duration: Duration::new(240, 0), }, ), - byzantine_metadata: Some(Box::new(AsynchronousNetwork { + unreliable_network: Some(Box::new(AsynchronousNetwork { keep_numerator: 8, keep_denominator: 10, delay_low_ms: 4, @@ -140,7 +140,7 @@ async fn test_memory_network_async() { duration: Duration::from_secs(240), }, ), - byzantine_metadata: Some(Box::new(AsynchronousNetwork { + unreliable_network: Some(Box::new(AsynchronousNetwork { keep_numerator: 8, keep_denominator: 10, delay_low_ms: 4, @@ -178,7 +178,7 @@ async fn test_memory_network_partially_sync() { duration: Duration::from_secs(240), }, ), - byzantine_metadata: Some(Box::new(PartiallySynchronousNetwork { + unreliable_network: Some(Box::new(PartiallySynchronousNetwork { asynchronous: AsynchronousNetwork { keep_numerator: 8, keep_denominator: 10, @@ -220,7 +220,7 @@ async fn libp2p_network_partially_sync() { duration: Duration::new(240, 0), }, ), - byzantine_metadata: Some(Box::new(PartiallySynchronousNetwork { + unreliable_network: Some(Box::new(PartiallySynchronousNetwork { asynchronous: AsynchronousNetwork { keep_numerator: 8, keep_denominator: 10, @@ -267,7 +267,7 @@ async fn test_memory_network_chaos() { duration: Duration::from_secs(240), }, ), - byzantine_metadata: Some(Box::new(ChaosNetwork { + unreliable_network: Some(Box::new(ChaosNetwork { keep_numerator: 8, keep_denominator: 10, delay_low_ms: 4, @@ -303,7 +303,7 @@ async fn libp2p_network_chaos() { duration: Duration::new(240, 0), }, ), - byzantine_metadata: Some(Box::new(ChaosNetwork { + unreliable_network: Some(Box::new(ChaosNetwork { keep_numerator: 8, keep_denominator: 10, delay_low_ms: 4, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index b4afd3eb79..bd61e6329c 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -218,7 +218,7 @@ pub trait TestableExchange, ME expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, - byzantine_metadata: Option>, + unreliable_network: Option>, ) -> Box< dyn Fn( u64, From 26b80119ad03341fe12000be66da7db0e49d5c9f Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 6 Nov 2023 09:00:14 -0500 Subject: [PATCH 0323/1393] fix: unstage modDA, not sure how that got there --- hotshot/examples/infra/modDA.rs | 848 -------------------------------- 1 file changed, 848 deletions(-) delete mode 100644 hotshot/examples/infra/modDA.rs diff --git a/hotshot/examples/infra/modDA.rs b/hotshot/examples/infra/modDA.rs deleted file mode 100644 index dfeec6e5b9..0000000000 --- a/hotshot/examples/infra/modDA.rs +++ /dev/null @@ -1,848 +0,0 @@ -use crate::infra::{load_config_from_file, OrchestratorArgs}; - -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use async_lock::RwLock; -use async_trait::async_trait; -use futures::StreamExt; -use hotshot::{ - traits::{ - implementations::{ - Libp2pCommChannel, Libp2pNetwork, MemoryStorage, NetworkingMetricsValue, - WebCommChannel, WebServerNetwork, - }, - NodeImplementation, - }, - types::{SignatureKey, SystemContextHandle}, - HotShotType, SystemContext, -}; -use hotshot_orchestrator::{ - self, - client::{OrchestratorClient, ValidatorArgs}, - config::{NetworkConfig, WebServerConfig}, -}; -use hotshot_task::task::FilterEvent; -use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction}, - certificate::ViewSyncCertificate, - consensus::ConsensusMetricsValue, - data::{QuorumProposal, SequencingLeaf, TestableLeaf}, - event::{Event, EventType}, - message::{Message, SequencingMessage}, - traits::{ - election::{ - CommitteeExchange, ConsensusExchange, Membership, QuorumExchange, ViewSyncExchange, - }, - network::CommunicationChannel, - node_implementation::{ - CommitteeEx, ExchangesType, NodeType, QuorumEx, SequencingExchanges, - }, - state::{ConsensusTime, TestableBlock, TestableState}, - }, - HotShotConfig, -}; -use libp2p_identity::{ - ed25519::{self, SecretKey}, - Keypair, -}; -use libp2p_networking::{ - network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}, - reexport::Multiaddr, -}; -use rand::rngs::StdRng; -use rand::SeedableRng; -use std::{collections::BTreeSet, sync::Arc}; -use std::{num::NonZeroUsize, str::FromStr}; -// use libp2p::{ -// identity::{ -// ed25519::{Keypair as EdKeypair, SecretKey}, -// Keypair, -// }, -// multiaddr::{self, Protocol}, -// Multiaddr, -// }; -use libp2p_identity::PeerId; -// use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}; -use std::{fmt::Debug, net::Ipv4Addr}; -use std::{ - //collections::{BTreeSet, VecDeque}, - //fs, - mem, - net::IpAddr, - //num::NonZeroUsize, - //str::FromStr, - //sync::Arc, - //time::{Duration, Instant}, - time::Instant, -}; -//use surf_disco::error::ClientError; -//use surf_disco::Client; -use tracing::{debug, error, info, warn}; - -/// Runs the orchestrator -pub async fn run_orchestrator_da< - TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - NODE: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< - TYPES, - Message, - QuorumExchange< - TYPES, - SequencingLeaf, - QuorumProposal>, - MEMBERSHIP, - QUORUMNETWORK, - Message, - >, - CommitteeExchange>, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - VIEWSYNCNETWORK, - Message, - >, - >, - Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, - >, ->( - OrchestratorArgs { - host, - port, - config_file, - }: OrchestratorArgs, -) { - error!("Starting orchestrator",); - let run_config = load_config_from_file::(config_file); - let _result = hotshot_orchestrator::run_orchestrator::< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >(run_config, host, port) - .await; -} - -/// Helper function to calculate the nuymber of transactions to send per node per round -fn calculate_num_tx_per_round( - node_index: u64, - total_num_nodes: usize, - transactions_per_round: usize, -) -> usize { - if node_index == 0 { - transactions_per_round / total_num_nodes + transactions_per_round % total_num_nodes - } else { - transactions_per_round / total_num_nodes - } -} - -/// Defines the behavior of a "run" of the network with a given configuration -#[async_trait] -pub trait RunDA< - TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - NODE: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< - TYPES, - Message, - QuorumExchange< - TYPES, - SequencingLeaf, - QuorumProposal>, - MEMBERSHIP, - QUORUMNETWORK, - Message, - >, - CommitteeExchange>, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - VIEWSYNCNETWORK, - Message, - >, - >, - Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, - >, -> where - ::StateType: TestableState, - ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, - Self: Sync, - SystemContext: HotShotType, -{ - /// Initializes networking, returns self - async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, - ) -> Self; - - /// Initializes the genesis state and HotShot instance; does not start HotShot consensus - /// # Panics if it cannot generate a genesis block, fails to initialize HotShot, or cannot - /// get the anchored view - /// Note: sequencing leaf does not have state, so does not return state - async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { - let genesis_block = TYPES::BlockType::genesis(); - let initializer = - hotshot::HotShotInitializer::>::from_genesis( - genesis_block, - ) - .expect("Couldn't generate genesis block"); - - let config = self.get_config(); - - // Get KeyPair for certificate Aggregation - let (pk, sk) = - TYPES::SignatureKey::generated_from_seed_indexed(config.seed, config.node_index); - let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); - let entry = pk.get_stake_table_entry(1u64); - - let da_network = self.get_da_network(); - let quorum_network = self.get_quorum_network(); - let view_sync_network = self.get_view_sync_network(); - - // Since we do not currently pass the election config type in the NetworkConfig, this will always be the default election config - let quorum_election_config = config.config.election_config.clone().unwrap_or_else(|| { - as ConsensusExchange< - TYPES, - Message, - >>::Membership::default_election_config(config.config.total_nodes.get() as u64) - }); - - let committee_election_config = as ConsensusExchange< - TYPES, - Message, - >>::Membership::default_election_config( - config.config.da_committee_size.try_into().unwrap(), - ); - - let exchanges = NODE::Exchanges::create( - known_nodes_with_stake.clone(), - (quorum_election_config, committee_election_config), - ( - quorum_network.clone(), - da_network.clone(), - view_sync_network.clone(), - ), - pk.clone(), - entry.clone(), - sk.clone(), - ); - - SystemContext::init( - pk, - sk, - config.node_index, - config.config, - MemoryStorage::empty(), - exchanges, - initializer, - ConsensusMetricsValue::new(), - ) - .await - .expect("Could not init hotshot") - .0 - } - - /// Starts HotShot consensus, returns when consensus has finished - async fn run_hotshot(&self, mut context: SystemContextHandle) { - let NetworkConfig { - padding, - rounds, - transactions_per_round, - node_index, - config: HotShotConfig { total_nodes, .. }, - .. - } = self.get_config(); - - let size = mem::size_of::(); - let padding = padding.saturating_sub(size); - let mut txn_rng = StdRng::seed_from_u64(node_index); - - debug!("Adjusted padding size is {:?} bytes", padding); - - let mut total_transactions_committed = 0; - let mut total_transactions_sent = 0; - let transactions_to_send_per_round = - calculate_num_tx_per_round(node_index, total_nodes.get(), transactions_per_round); - - info!("Starting hotshot!"); - let start = Instant::now(); - - let (mut event_stream, _streamid) = context.get_event_stream(FilterEvent::default()).await; - let mut anchor_view: TYPES::Time = ::genesis(); - let mut num_successful_commits = 0; - - context.hotshot.start_consensus().await; - - loop { - match event_stream.next().await { - None => { - panic!("Error! Event stream completed before consensus ended."); - } - Some(Event { event, .. }) => { - match event { - EventType::Error { error } => { - error!("Error in consensus: {:?}", error); - // TODO what to do here - } - EventType::Decide { - leaf_chain, - qc: _, - block_size, - } => { - // this might be a obob - if let Some(leaf) = leaf_chain.get(0) { - info!("Decide event for leaf: {}", *leaf.view_number); - - let new_anchor = leaf.view_number; - if new_anchor >= anchor_view { - anchor_view = leaf.view_number; - } - } - - // send transactions - for _ in 0..transactions_to_send_per_round { - let txn = - <::StateType as TestableState>::create_random_transaction( - None, - &mut txn_rng, - padding as u64, - ); - _ = context.submit_transaction(txn).await.unwrap(); - total_transactions_sent += 1; - } - - if let Some(size) = block_size { - total_transactions_committed += size; - } - - num_successful_commits += leaf_chain.len(); - if num_successful_commits >= rounds { - break; - } - - if leaf_chain.len() > 1 { - warn!("Leaf chain is greater than 1 with len {}", leaf_chain.len()); - } - // when we make progress, submit new events - } - EventType::ReplicaViewTimeout { view_number } => { - warn!("Timed out as a replicas in view {:?}", view_number); - } - EventType::NextLeaderViewTimeout { view_number } => { - warn!("Timed out as the next leader in view {:?}", view_number); - } - EventType::ViewFinished { view_number: _ } => {} - _ => unimplemented!(), - } - } - } - } - - // Output run results - let total_time_elapsed = start.elapsed(); - error!("[{node_index}]: {rounds} rounds completed in {total_time_elapsed:?} - Total transactions sent: {total_transactions_sent} - Total transactions committed: {total_transactions_committed} - Total commitments: {num_successful_commits}"); - } - - /// Returns the da network for this run - fn get_da_network(&self) -> DANETWORK; - - /// Returns the quorum network for this run - fn get_quorum_network(&self) -> QUORUMNETWORK; - - ///Returns view sync network for this run - fn get_view_sync_network(&self) -> VIEWSYNCNETWORK; - - /// Returns the config for this run - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >; -} - -// WEB SERVER - -/// Represents a web server-based run -pub struct WebServerDARun< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, -> { - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, - quorum_network: WebCommChannel, - da_network: WebCommChannel, - view_sync_network: WebCommChannel, -} - -#[async_trait] -impl< - TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - NODE: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< - TYPES, - Message, - QuorumExchange< - TYPES, - SequencingLeaf, - QuorumProposal>, - MEMBERSHIP, - WebCommChannel, - Message, - >, - CommitteeExchange< - TYPES, - MEMBERSHIP, - WebCommChannel, - Message, - >, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - WebCommChannel, - Message, - >, - >, - Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, - >, - > - RunDA< - TYPES, - MEMBERSHIP, - WebCommChannel, - WebCommChannel, - WebCommChannel, - NODE, - > for WebServerDARun -where - ::StateType: TestableState, - ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, - Self: Sync, -{ - async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, - ) -> WebServerDARun { - // Generate our own key - let (pub_key, _priv_key) = - <::SignatureKey as SignatureKey>::generated_from_seed_indexed( - config.seed, - config.node_index, - ); - - // Get the configuration for the web server - let WebServerConfig { - host, - port, - wait_between_polls, - }: WebServerConfig = config.clone().web_server_config.unwrap(); - - let underlying_quorum_network = WebServerNetwork::create( - &host.to_string(), - port, - wait_between_polls, - pub_key.clone(), - false, - ); - - // Create the network - let quorum_network: WebCommChannel = - WebCommChannel::new(underlying_quorum_network.clone().into()); - - let view_sync_network: WebCommChannel = - WebCommChannel::new(underlying_quorum_network.into()); - - let WebServerConfig { - host, - port, - wait_between_polls, - }: WebServerConfig = config.clone().da_web_server_config.unwrap(); - - // Each node runs the DA network so that leaders have access to transactions and DA votes - let da_network: WebCommChannel = WebCommChannel::new( - WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true) - .into(), - ); - - WebServerDARun { - config, - quorum_network, - da_network, - view_sync_network, - } - } - - fn get_da_network(&self) -> WebCommChannel { - self.da_network.clone() - } - - fn get_quorum_network(&self) -> WebCommChannel { - self.quorum_network.clone() - } - - fn get_view_sync_network(&self) -> WebCommChannel { - self.view_sync_network.clone() - } - - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - > { - self.config.clone() - } -} - -// Libp2p - -/// Represents a libp2p-based run -pub struct Libp2pDARun, MEMBERSHIP: Membership> -{ - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, - quorum_network: Libp2pCommChannel, - da_network: Libp2pCommChannel, - view_sync_network: Libp2pCommChannel, -} - -#[async_trait] -impl< - TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - NODE: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< - TYPES, - Message, - QuorumExchange< - TYPES, - SequencingLeaf, - QuorumProposal>, - MEMBERSHIP, - Libp2pCommChannel, - Message, - >, - CommitteeExchange< - TYPES, - MEMBERSHIP, - Libp2pCommChannel, - Message, - >, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - Libp2pCommChannel, - Message, - >, - >, - Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, - >, - > - RunDA< - TYPES, - MEMBERSHIP, - Libp2pCommChannel, - Libp2pCommChannel, - Libp2pCommChannel, - NODE, - > for Libp2pDARun -where - ::StateType: TestableState, - ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, - Self: Sync, -{ - async fn initialize_networking( - config: NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - >, - ) -> Libp2pDARun { - let (pubkey, _privkey) = - <::SignatureKey as SignatureKey>::generated_from_seed_indexed( - config.seed, - config.node_index, - ); - let mut config = config; - let libp2p_config = config - .libp2p_config - .take() - .expect("Configuration is not for a Libp2p network"); - let bs_len = libp2p_config.bootstrap_nodes.len(); - let bootstrap_nodes: Vec<(PeerId, Multiaddr)> = libp2p_config - .bootstrap_nodes - .iter() - .map(|(addr, pair)| { - let kp = Keypair::from_protobuf_encoding(pair).unwrap(); - let peer_id = PeerId::from_public_key(&kp.public()); - let multiaddr = - Multiaddr::from_str(&format!("/ip4/{}/udp/{}/quic-v1", addr.ip(), addr.port())) - .unwrap(); - (peer_id, multiaddr) - }) - .collect(); - let identity = libp2p_generate_indexed_identity(config.seed, config.node_index); - let node_type = if (config.node_index as usize) < bs_len { - NetworkNodeType::Bootstrap - } else { - NetworkNodeType::Regular - }; - let node_index = config.node_index; - let port_index = match libp2p_config.index_ports { - true => node_index, - false => 0, - }; - let bound_addr: Multiaddr = format!( - "/{}/{}/udp/{}/quic-v1", - if libp2p_config.public_ip.is_ipv4() { - "ip4" - } else { - "ip6" - }, - libp2p_config.public_ip, - libp2p_config.base_port as u64 + port_index - ) - .parse() - .unwrap(); - - // generate network - let mut config_builder = NetworkNodeConfigBuilder::default(); - assert!(config.config.total_nodes.get() > 2); - let replicated_nodes = NonZeroUsize::new(config.config.total_nodes.get() - 2).unwrap(); - config_builder.replication_factor(replicated_nodes); - config_builder.identity(identity.clone()); - - config_builder.bound_addr(Some(bound_addr.clone())); - - let to_connect_addrs = bootstrap_nodes - .iter() - .map(|(peer_id, multiaddr)| (Some(*peer_id), multiaddr.clone())) - .collect(); - - config_builder.to_connect_addrs(to_connect_addrs); - - let mesh_params = - // NOTE I'm arbitrarily choosing these. - match node_type { - NetworkNodeType::Bootstrap => MeshParams { - mesh_n_high: libp2p_config.bootstrap_mesh_n_high, - mesh_n_low: libp2p_config.bootstrap_mesh_n_low, - mesh_outbound_min: libp2p_config.bootstrap_mesh_outbound_min, - mesh_n: libp2p_config.bootstrap_mesh_n, - }, - NetworkNodeType::Regular => MeshParams { - mesh_n_high: libp2p_config.mesh_n_high, - mesh_n_low: libp2p_config.mesh_n_low, - mesh_outbound_min: libp2p_config.mesh_outbound_min, - mesh_n: libp2p_config.mesh_n, - }, - NetworkNodeType::Conductor => unreachable!(), - }; - config_builder.mesh_params(Some(mesh_params)); - - let mut all_keys = BTreeSet::new(); - let mut da_keys = BTreeSet::new(); - for i in 0..config.config.total_nodes.get() as u64 { - let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; - let pubkey = TYPES::SignatureKey::from_private(&privkey); - if i < config.config.da_committee_size as u64 { - da_keys.insert(pubkey.clone()); - } - all_keys.insert(pubkey); - } - - let node_config = config_builder.build().unwrap(); - let underlying_quorum_network = Libp2pNetwork::new( - NetworkingMetricsValue::new(), - node_config, - pubkey.clone(), - Arc::new(RwLock::new( - bootstrap_nodes - .iter() - .map(|(peer_id, addr)| (Some(*peer_id), addr.clone())) - .collect(), - )), - bs_len, - config.node_index as usize, - // NOTE: this introduces an invariant that the keys are assigned using this indexed - // function - all_keys, - da_keys, - None, - ) - .await - .unwrap(); - - underlying_quorum_network.wait_for_ready().await; - - // Create the network - let quorum_network: Libp2pCommChannel = - Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - - let view_sync_network: Libp2pCommChannel = - Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - - let da_network: Libp2pCommChannel = - Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - - Libp2pDARun { - config, - quorum_network, - da_network, - view_sync_network, - } - } - - fn get_da_network(&self) -> Libp2pCommChannel { - self.da_network.clone() - } - - fn get_quorum_network(&self) -> Libp2pCommChannel { - self.quorum_network.clone() - } - - fn get_view_sync_network(&self) -> Libp2pCommChannel { - self.view_sync_network.clone() - } - - fn get_config( - &self, - ) -> NetworkConfig< - TYPES::SignatureKey, - ::StakeTableEntry, - TYPES::ElectionConfigType, - > { - self.config.clone() - } -} - -/// Main entry point for validators -pub async fn main_entry_point< - TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - NODE: NodeImplementation< - TYPES, - Leaf = SequencingLeaf, - Exchanges = SequencingExchanges< - TYPES, - Message, - QuorumExchange< - TYPES, - SequencingLeaf, - QuorumProposal>, - MEMBERSHIP, - QUORUMNETWORK, - Message, - >, - CommitteeExchange>, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - VIEWSYNCNETWORK, - Message, - >, - >, - Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, - >, - RUNDA: RunDA, ->( - args: ValidatorArgs, -) where - ::StateType: TestableState, - ::BlockType: TestableBlock, - SequencingLeaf: TestableLeaf, -{ - setup_logging(); - setup_backtrace(); - - info!("Starting validator"); - - let orchestrator_client: OrchestratorClient = - OrchestratorClient::connect_to_orchestrator(args.clone()).await; - - // Identify with the orchestrator - let public_ip = match args.public_ip { - Some(ip) => ip, - None => IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - }; - info!( - "Identifying with orchestrator using IP address {}", - public_ip.to_string() - ); - let node_index: u16 = orchestrator_client - .identify_with_orchestrator(public_ip.to_string()) - .await; - info!("Finished identifying; our node index is {node_index}"); - info!("Getting config from orchestrator"); - - let mut run_config = orchestrator_client - .get_config_from_orchestrator::(node_index) - .await; - - run_config.node_index = node_index.into(); - //run_config.libp2p_config.as_mut().unwrap().public_ip = args.public_ip.unwrap(); - - info!("Initializing networking"); - let run = RUNDA::initialize_networking(run_config.clone()).await; - let hotshot = run.initialize_state_and_hotshot().await; - - info!("Waiting for start command from orchestrator"); - orchestrator_client - .wait_for_all_nodes_ready(run_config.clone().node_index) - .await; - - info!("All nodes are ready! Starting HotShot"); - run.run_hotshot(hotshot).await; -} - -pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { - let mut hasher = blake3::Hasher::new(); - hasher.update(&seed); - hasher.update(&index.to_le_bytes()); - let new_seed = *hasher.finalize().as_bytes(); - let sk_bytes = SecretKey::try_from_bytes(new_seed).unwrap(); - >::from(sk_bytes).into() -} From 12d1d1d47c3c6849d5d45651ec59087e1a18b276 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 6 Nov 2023 10:59:25 -0500 Subject: [PATCH 0324/1393] Integrate new VID vote and cert --- hotshot/src/lib.rs | 5 +- hotshot/src/tasks/mod.rs | 10 +-- task-impls/src/consensus.rs | 8 +- task-impls/src/events.rs | 18 ++-- task-impls/src/network.rs | 4 +- task-impls/src/vid.rs | 110 +++++++++--------------- testing/tests/vid_task.rs | 15 ++-- types/src/message.rs | 22 ++--- types/src/simple_certificate.rs | 5 +- types/src/simple_vote.rs | 2 +- types/src/traits/election.rs | 41 --------- types/src/traits/node_implementation.rs | 4 + 12 files changed, 91 insertions(+), 153 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 48cb49a0db..6c08c76e95 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -54,9 +54,7 @@ use hotshot_task::{ }; use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ - certificate::{TimeoutCertificate, VIDCertificate}, - data::VidDisperse, - simple_certificate::QuorumCertificate2, + certificate::TimeoutCertificate, data::VidDisperse, simple_certificate::QuorumCertificate2, traits::node_implementation::TimeoutEx, }; @@ -670,7 +668,6 @@ where TYPES, Message, Proposal = VidDisperse, - Certificate = VIDCertificate, Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index bbabfb1ca8..8a6f17e376 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -28,7 +28,7 @@ use hotshot_task_impls::{ }; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, - certificate::{TimeoutCertificate, VIDCertificate, ViewSyncCertificate}, + certificate::{TimeoutCertificate, ViewSyncCertificate}, data::{Leaf, ProposalType, QuorumProposal}, event::Event, message::{Message, Messages, SequencingMessage}, @@ -359,12 +359,8 @@ pub async fn add_vid_task< handle: SystemContextHandle, ) -> TaskRunner where - VIDEx: ConsensusExchange< - TYPES, - Message, - Certificate = VIDCertificate, - Commitment = Commitment, - >, + VIDEx: + ConsensusExchange, Commitment = Commitment>, { // build the vid task let c_api: HotShotConsensusApi = HotShotConsensusApi { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 803b952a14..e0198cb8f2 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -17,12 +17,12 @@ use hotshot_task::{ }; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, - certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, + certificate::{DACertificate, QuorumCertificate, TimeoutCertificate}, consensus::{Consensus, View}, data::{Leaf, LeafType, ProposalType, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, - simple_certificate::{DACertificate2, QuorumCertificate2}, + simple_certificate::{DACertificate2, QuorumCertificate2, VIDCertificate2}, simple_vote::{QuorumData, QuorumVote}, traits::{ block_contents::BlockHeader, @@ -132,7 +132,7 @@ pub struct ConsensusTaskState< pub da_certs: HashMap>, /// All the VID certs we've received for current and future views. - pub vid_certs: HashMap>, + pub vid_certs: HashMap>, /// The most recent proposal we have, will correspond to the current view if Some() /// Will be none if the view advanced through timeout/view_sync @@ -1212,7 +1212,7 @@ where HotShotEvent::VidCertRecv(cert) => { debug!("VID cert received for view ! {}", *cert.view_number); - let view = cert.view_number; + let view = cert.get_view_number(); self.vid_certs.insert(view, cert); // RM TODO: VOTING diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 6b502afabb..16db44efb1 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -3,16 +3,16 @@ use crate::view_sync::ViewSyncPhase; use commit::Commitment; use either::Either; use hotshot_types::{ - certificate::{TimeoutCertificate, VIDCertificate}, + certificate::TimeoutCertificate, data::{DAProposal, VidDisperse}, message::Proposal, - simple_certificate::{DACertificate2, QuorumCertificate2}, - simple_vote::{DAVote2, QuorumVote}, + simple_certificate::{DACertificate2, QuorumCertificate2, VIDCertificate2}, + simple_vote::{DAVote2, QuorumVote, VIDVote2}, traits::node_implementation::{ CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, - ViewSyncProposalType, + VIDMembership, ViewSyncProposalType, }, - vote::{TimeoutVote, VIDVote, ViewSyncVote}, + vote::{TimeoutVote, ViewSyncVote}, }; /// All of the possible events that can be passed between Sequecning `HotShot` tasks @@ -86,17 +86,17 @@ pub enum HotShotEvent> { /// Send a VID vote to the VID leader; emitted by VID storage nodes in the DA task after seeing a valid VID dispersal /// /// Like [`DAVoteSend`] - VidVoteSend(VIDVote), + VidVoteSend(VIDVote2>), /// A VID vote has been received by the network; handled by the DA task /// /// Like [`DAVoteRecv`] - VidVoteRecv(VIDVote), + VidVoteRecv(VIDVote2>), /// The VID leader has collected enough votes to form a VID cert; emitted by the VID leader in the DA task; sent to the entire network via the networking task /// /// Like [`DACSend`] - VidCertSend(VIDCertificate, TYPES::SignatureKey), + VidCertSend(VIDCertificate2, TYPES::SignatureKey), /// A VID cert has been recieved by the network; handled by the consensus task /// /// Like [`DACRecv`] - VidCertRecv(VIDCertificate), + VidCertRecv(VIDCertificate2), } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 8ddbe5ede1..17b75271aa 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -231,12 +231,12 @@ impl< None, ), HotShotEvent::VidVoteSend(vote) => ( - vote.signature_key(), + vote.get_signing_key(), MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::VidVote(vote.clone()), ))), TransmitType::Direct, - Some(membership.get_leader(vote.get_view())), // TODO who is VID leader? https://github.com/EspressoSystems/HotShot/issues/1699 + Some(membership.get_leader(vote.get_view_number())), // TODO who is VID leader? https://github.com/EspressoSystems/HotShot/issues/1699 ), HotShotEvent::DAVoteSend(vote) => ( vote.get_signing_key(), diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index dd95ba3096..56e829c33f 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -12,24 +12,26 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::traits::network::CommunicationChannel; -use hotshot_types::traits::network::ConsensusIntentEvent; -use hotshot_types::{ - certificate::VIDCertificate, traits::election::SignedCertificate, vote::VIDVoteAccumulator, -}; +use hotshot_types::traits::{network::ConsensusIntentEvent, node_implementation::VIDMembership}; use hotshot_types::{ consensus::{Consensus, View}, data::{Leaf, ProposalType}, message::{Message, SequencingMessage}, traits::{ consensus_api::ConsensusApi, - election::{ConsensusExchange, VIDExchangeType}, + election::ConsensusExchange, node_implementation::{NodeImplementation, NodeType, VIDEx}, signature_key::SignatureKey, state::ConsensusTime, }, utils::ViewInner, }; +use hotshot_types::{ + simple_certificate::VIDCertificate2, + simple_vote::{VIDData, VIDVote2}, + traits::network::CommunicationChannel, + vote2::{HasViewNumber, VoteAccumulator2}, +}; use snafu::Snafu; use std::marker::PhantomData; @@ -46,12 +48,8 @@ pub struct VIDTaskState< I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static, > where - VIDEx: ConsensusExchange< - TYPES, - Message, - Certificate = VIDCertificate, - Commitment = Commitment, - >, + VIDEx: + ConsensusExchange, Commitment = Commitment>, { /// The state's api pub api: A, @@ -82,25 +80,16 @@ pub struct VIDVoteCollectionTaskState< TYPES: NodeType, I: NodeImplementation>, > where - VIDEx: ConsensusExchange< - TYPES, - Message, - Certificate = VIDCertificate, - Commitment = Commitment, - >, + VIDEx: + ConsensusExchange, Commitment = Commitment>, { /// the vid exchange pub vid_exchange: Arc>, #[allow(clippy::type_complexity)] /// Accumulates VID votes pub accumulator: Either< - as SignedCertificate< - TYPES, - TYPES::Time, - TYPES::VoteTokenType, - Commitment, - >>::VoteAccumulator, - VIDCertificate, + VoteAccumulator2>, VIDCertificate2>, + VIDCertificate2, >, /// the current view pub cur_view: TYPES::Time, @@ -113,12 +102,8 @@ pub struct VIDVoteCollectionTaskState< impl>> TS for VIDVoteCollectionTaskState where - VIDEx: ConsensusExchange< - TYPES, - Message, - Certificate = VIDCertificate, - Commitment = Commitment, - >, + VIDEx: + ConsensusExchange, Commitment = Commitment>, { } @@ -133,16 +118,15 @@ async fn vote_handle( where TYPES: NodeType, I: NodeImplementation>, - VIDEx: ConsensusExchange< - TYPES, - Message, - Certificate = VIDCertificate, - Commitment = Commitment, - >, + VIDEx: + ConsensusExchange, Commitment = Commitment>, { match event { HotShotEvent::VidVoteRecv(vote) => { - debug!("VID vote recv, collection task {:?}", vote.current_view); + debug!( + "VID vote recv, collection task {:?}", + vote.get_view_number() + ); // panic!("Vote handle received VID vote for view {}", *vote.current_view); // For the case where we receive votes after we've made a certificate @@ -152,10 +136,7 @@ where } let accumulator = state.accumulator.left().unwrap(); - match state - .vid_exchange - .accumulate_vote(accumulator, &vote, &vote.payload_commitment) - { + match accumulator.accumulate(&vote, state.vid_exchange.membership()) { Left(new_accumulator) => { state.accumulator = either::Left(new_accumulator); } @@ -201,12 +182,8 @@ impl< A: ConsensusApi, I> + 'static, > VIDTaskState where - VIDEx: ConsensusExchange< - TYPES, - Message, - Certificate = VIDCertificate, - Commitment = Commitment, - >, + VIDEx: + ConsensusExchange, Commitment = Commitment>, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] @@ -222,7 +199,7 @@ where // self.committee_exchange.public_key() // ); // Check if we are the leader and the vote is from the sender. - let view = vote.current_view; + let view = vote.get_view_number(); if !self.vid_exchange.is_leader(view) { error!( "We are not the VID leader for view {} are we leader for next view? {}", @@ -247,19 +224,14 @@ where TYPES::Time::new(0) }; - let new_accumulator = VIDVoteAccumulator { - vid_vote_outcomes: HashMap::new(), - success_threshold: self.vid_exchange.success_threshold(), + let new_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.vid_exchange.total_nodes()], phantom: PhantomData, }; - let accumulator = self.vid_exchange.accumulate_vote( - new_accumulator, - &vote, - &vote.clone().payload_commitment, - ); + let accumulator = new_accumulator.accumulate(&vote, self.vid_exchange.membership()); if view > collection_view { let state = VIDVoteCollectionTaskState { @@ -336,18 +308,24 @@ where Ok(None) => { debug!("We were not chosen for VID quorum on {:?}", view); } - Ok(Some(vote_token)) => { + Ok(Some(_vote_token)) => { // Generate and send vote - let vote = self.vid_exchange.create_vid_message( - payload_commitment, + let vote = VIDVote2::create_signed_vote( + VIDData { + payload_commit: payload_commitment, + }, view, - vote_token, + self.vid_exchange.public_key(), + self.vid_exchange.private_key(), ); // ED Don't think this is necessary? // self.cur_view = view; - debug!("Sending vote to the VID leader {:?}", vote.current_view); + debug!( + "Sending vote to the VID leader {:?}", + vote.get_view_number() + ); self.event_stream .publish(HotShotEvent::VidVoteSend(vote)) .await; @@ -447,12 +425,8 @@ impl< A: ConsensusApi, I> + 'static, > TS for VIDTaskState where - VIDEx: ConsensusExchange< - TYPES, - Message, - Certificate = VIDCertificate, - Commitment = Commitment, - >, + VIDEx: + ConsensusExchange, Commitment = Commitment>, { } diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 3395bbbc43..edb54a0d9a 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -5,7 +5,6 @@ use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, task_helpers::vid_init, }; -use hotshot_types::traits::election::VIDExchangeType; use hotshot_types::{ block_impl::VIDTransaction, data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, @@ -14,6 +13,7 @@ use hotshot_types::{ node_implementation::ExchangesType, state::ConsensusTime, }, }; +use hotshot_types::{simple_vote::VIDVote2, traits::election::VIDExchangeType}; use std::collections::HashMap; #[cfg_attr( @@ -83,11 +83,14 @@ async fn test_vid_task() { 1, ); - let vote_token = vid_exchange - .make_vote_token(ViewNumber::new(2)) - .unwrap() - .unwrap(); - let vid_vote = vid_exchange.create_vid_message(block.commit(), ViewNumber::new(2), vote_token); + let vid_vote = VIDVote2::create_signed_vote( + hotshot_types::simple_vote::VIDData { + payload_commit: block.commit(), + }, + ViewNumber::new(2), + vid_exchange.public_key(), + vid_exchange.private_key(), + ); output.insert(HotShotEvent::VidVoteSend(vid_vote), 1); output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); diff --git a/types/src/message.rs b/types/src/message.rs index fe207befc9..92a29ad93c 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -3,12 +3,11 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. -use crate::simple_certificate::DACertificate2; -use crate::simple_vote::DAVote2; -use crate::traits::node_implementation::CommitteeMembership; +use crate::simple_certificate::{DACertificate2, VIDCertificate2}; +use crate::simple_vote::{DAVote2, VIDVote2}; +use crate::traits::node_implementation::{CommitteeMembership, VIDMembership}; use crate::vote2::HasViewNumber; use crate::{ - certificate::VIDCertificate, data::{DAProposal, ProposalType, VidDisperse}, simple_vote::QuorumVote, traits::{ @@ -19,7 +18,7 @@ use crate::{ }, signature_key::EncodedSignature, }, - vote::{TimeoutVote, VIDVote, ViewSyncVote, VoteType}, + vote::{TimeoutVote, ViewSyncVote, VoteType}, }; use derivative::Derivative; @@ -230,9 +229,12 @@ pub enum ProcessedCommitteeConsensusMessage>, TYPES::SignatureKey), /// Vote from VID storage node. Like [`DAVote`] - VidVote(VIDVote, TYPES::SignatureKey), + VidVote( + VIDVote2>, + TYPES::SignatureKey, + ), /// Certificate for VID. Like [`DACertificate`] - VidCertificate(VIDCertificate, TYPES::SignatureKey), + VidCertificate(VIDCertificate2, TYPES::SignatureKey), } impl> @@ -366,11 +368,11 @@ pub enum CommitteeConsensusMessage /// Vote for VID disperse data /// /// Like [`DAVote`]. - VidVote(VIDVote), + VidVote(VIDVote2>), /// VID certificate data is available /// /// Like [`DACertificate`] - VidCertificate(VIDCertificate), + VidCertificate(VIDCertificate2), } /// Messages related to the consensus protocol. @@ -449,7 +451,7 @@ impl< CommitteeConsensusMessage::VidDisperseMsg(disperse) => { disperse.data.get_view_number() } - CommitteeConsensusMessage::VidVote(vote) => vote.get_view(), + CommitteeConsensusMessage::VidVote(vote) => vote.get_view_number(), } } } diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index eb834e00c4..2ef87f6734 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -10,7 +10,7 @@ use commit::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; use crate::{ - simple_vote::{DAData, QuorumData, Voteable}, + simple_vote::{DAData, QuorumData, VIDData, Voteable}, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, @@ -129,3 +129,6 @@ pub type QuorumCertificate2 = SimpleCertificate = SimpleCertificate::BlockPayload>>; +/// type alias for a VID certificate +pub type VIDCertificate2 = + SimpleCertificate::BlockPayload>>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index b0285cf0bf..d6b0e6be7b 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -214,7 +214,7 @@ pub type QuorumVote = SimpleVote, M>; /// DA vote type alias pub type DAVote2 = SimpleVote::BlockPayload>, M>; /// VID vote type alias -pub type VIDVote = SimpleVote::BlockPayload>, M>; +pub type VIDVote2 = SimpleVote::BlockPayload>, M>; /// Timeout Vote type alias pub type TimeoutVote = SimpleVote, M>; /// View Sync Commit Vote type alias diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index fa8415acb2..9f4b8ef949 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -592,20 +592,6 @@ impl< /// A [`ConsensusExchange`] where participants vote to provide availability for blobs of data. pub trait VIDExchangeType: ConsensusExchange { - /// Create a message with a vote on VID disperse data. - fn create_vid_message( - &self, - payload_commitment: Commitment, - current_view: TYPES::Time, - vote_token: TYPES::VoteTokenType, - ) -> VIDVote; - - /// Sign a vote on VID disperse - fn sign_vid_vote( - &self, - payload_commitment: Commitment, - ) -> (EncodedPublicKey, EncodedSignature); - /// Sign a VID disperse fn sign_vid_disperse( &self, @@ -644,33 +630,6 @@ impl< M: NetworkMsg, > VIDExchangeType for VIDExchange { - fn create_vid_message( - &self, - payload_commitment: Commitment, - current_view: ::Time, - vote_token: ::VoteTokenType, - ) -> VIDVote { - let signature = self.sign_vid_vote(payload_commitment); - VIDVote { - signature, - payload_commitment, - current_view, - vote_token, - vote_data: VoteData::VID(payload_commitment), - } - } - - fn sign_vid_vote( - &self, - payload_commitment: Commitment<::BlockPayload>, - ) -> (EncodedPublicKey, EncodedSignature) { - let signature = TYPES::SignatureKey::sign( - &self.private_key, - VoteData::VID(payload_commitment).commit().as_ref(), - ); - (self.public_key.to_bytes(), signature) - } - /// Sign a VID proposal. fn sign_vid_disperse( &self, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index e89e6b0625..4323f6bae3 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -619,6 +619,10 @@ pub type CommitteeCommChannel = pub type QuorumMembership = as ConsensusExchange>>::Membership; +/// TYPE aliase for the membership of VID exchange +pub type VIDMembership = + as ConsensusExchange>>::Membership; + /// Protocol for determining membership in a DA committee. pub type CommitteeMembership = as ConsensusExchange>>::Membership; From dfad195b0805da261b47ace4f2fea5566f02a489 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 7 Nov 2023 09:09:32 -0500 Subject: [PATCH 0325/1393] start removing old vote references --- hotshot/examples/libp2p/types.rs | 3 +-- hotshot/examples/web-server-da/types.rs | 3 +-- testing/tests/memory_network.rs | 2 -- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 2c49b76401..b3f2773fc5 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -14,7 +14,7 @@ use hotshot_types::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, }, - vote::{DAVote, QuorumVote, ViewSyncVote}, + vote::{DAVote, ViewSyncVote}, }; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -34,7 +34,6 @@ pub type ThisDAProposal = DAProposal; pub type ThisDAVote = DAVote; pub type ThisQuorumProposal = QuorumProposal; -pub type ThisQuorumVote = QuorumVote; pub type ThisViewSyncProposal = ViewSyncCertificate; pub type ThisViewSyncVote = ViewSyncVote; diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index cb3b453d6a..0d456e131d 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -14,7 +14,7 @@ use hotshot_types::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, }, - vote::{DAVote, QuorumVote, ViewSyncVote}, + vote::{DAVote, ViewSyncVote}, }; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -34,7 +34,6 @@ pub type ThisDAProposal = DAProposal; pub type ThisDAVote = DAVote; pub type ThisQuorumProposal = QuorumProposal; -pub type ThisQuorumVote = QuorumVote; pub type ThisViewSyncProposal = ViewSyncCertificate; pub type ThisViewSyncVote = ViewSyncVote; diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 60e49f9546..d384281f66 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -28,7 +28,6 @@ use hotshot_types::{ data::ViewNumber, message::{DataMessage, MessageKind}, traits::state::ConsensusTime, - vote::QuorumVote, }; use rand::rngs::StdRng; use rand::{RngCore, SeedableRng}; @@ -76,7 +75,6 @@ pub type ThisDAProposal = DAProposal; pub type ThisDAVote = DAVote; pub type ThisQuorumProposal = QuorumProposal; -pub type ThisQuorumVote = QuorumVote; pub type ThisViewSyncProposal = ViewSyncCertificate; pub type ThisViewSyncVote = ViewSyncVote; From b78803f32ff30873d32c3d13c3429590de837f26 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 7 Nov 2023 10:21:05 -0500 Subject: [PATCH 0326/1393] remove vote and certificate associated types from ConsensusExchange --- hotshot/src/certificate.rs | 2 +- hotshot/src/lib.rs | 11 +- hotshot/src/tasks/mod.rs | 51 ++--- task-impls/src/consensus.rs | 45 ++-- task-impls/src/da.rs | 49 ++--- task-impls/src/network.rs | 1 - task-impls/src/view_sync.rs | 14 +- types/src/certificate.rs | 234 +-------------------- types/src/traits/election.rs | 267 +++++++++--------------- types/src/traits/node_implementation.rs | 10 +- 10 files changed, 156 insertions(+), 528 deletions(-) diff --git a/hotshot/src/certificate.rs b/hotshot/src/certificate.rs index c1b18b078b..8b13789179 100644 --- a/hotshot/src/certificate.rs +++ b/hotshot/src/certificate.rs @@ -1 +1 @@ -pub use hotshot_types::certificate::QuorumCertificate; + diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 00c7b30bd3..8318217f42 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -53,13 +53,14 @@ use hotshot_task::{ }; use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ - certificate::TimeoutCertificate, data::VidDisperse, simple_certificate::QuorumCertificate2, - traits::node_implementation::TimeoutEx, + data::VidDisperse, + simple_certificate::QuorumCertificate2, + traits::{election::ViewSyncExchangeType, node_implementation::TimeoutEx}, }; use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, - certificate::{DACertificate, ViewSyncCertificate}, + certificate::ViewSyncCertificate, consensus::{BlockPayloadStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, data::{DAProposal, Leaf, LeafType, QuorumProposal}, error::StorageSnafu, @@ -650,11 +651,10 @@ where TYPES, Message, Proposal = DAProposal, - Certificate = DACertificate, Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, - ViewSyncEx: ConsensusExchange< + ViewSyncEx: ViewSyncExchangeType< TYPES, Message, Proposal = ViewSyncCertificate, @@ -673,7 +673,6 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = TimeoutCertificate, Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 63c0574435..b8eba956f8 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -1,8 +1,8 @@ //! Provides a number of tasks that run continuously on a [`HotShot`] -use crate::{async_spawn, types::SystemContextHandle, DACertificate, HotShotConsensusApi}; +use crate::{async_spawn, types::SystemContextHandle, HotShotConsensusApi}; use async_compatibility_layer::art::async_sleep; -use commit::{Commitment, CommitmentBounds, Committable}; +use commit::{Commitment, Committable}; use futures::FutureExt; use hotshot_task::{ boxed_sync, @@ -26,12 +26,12 @@ use hotshot_task_impls::{ }; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, - certificate::{TimeoutCertificate, ViewSyncCertificate}, + certificate::ViewSyncCertificate, data::{Leaf, ProposalType, QuorumProposal}, event::Event, message::{Message, Messages, SequencingMessage}, traits::{ - election::{ConsensusExchange, Membership}, + election::{ConsensusExchange, Membership, ViewSyncExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{ CommitteeEx, ExchangesType, NodeImplementation, NodeType, QuorumEx, TimeoutEx, VIDEx, @@ -39,7 +39,7 @@ use hotshot_types::{ }, state::ConsensusTime, }, - vote::{ViewSyncData, VoteType}, + vote::ViewSyncData, }; use std::{ collections::{HashMap, HashSet}, @@ -63,17 +63,10 @@ pub enum GlobalEvent { pub async fn add_network_message_task< TYPES: NodeType, I: NodeImplementation, ConsensusMessage = SequencingMessage>, - COMMITMENT: CommitmentBounds, PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, - EXCHANGE: ConsensusExchange< - TYPES, - Message, - Proposal = PROPOSAL, - Vote = VOTE, - Membership = MEMBERSHIP, - > + 'static, + EXCHANGE: ConsensusExchange, Proposal = PROPOSAL, Membership = MEMBERSHIP> + + 'static, >( task_runner: TaskRunner, event_stream: ChannelStream>, @@ -169,17 +162,10 @@ where pub async fn add_network_event_task< TYPES: NodeType, I: NodeImplementation, ConsensusMessage = SequencingMessage>, - COMMITMENT: CommitmentBounds, PROPOSAL: ProposalType, - VOTE: VoteType, MEMBERSHIP: Membership, - EXCHANGE: ConsensusExchange< - TYPES, - Message, - Proposal = PROPOSAL, - Vote = VOTE, - Membership = MEMBERSHIP, - > + 'static, + EXCHANGE: ConsensusExchange, Proposal = PROPOSAL, Membership = MEMBERSHIP> + + 'static, >( task_runner: TaskRunner, event_stream: ChannelStream>, @@ -256,17 +242,12 @@ where Proposal = QuorumProposal>, Commitment = Commitment>, >, - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, TimeoutEx: ConsensusExchange< TYPES, Message, Proposal = QuorumProposal>, - Certificate = TimeoutCertificate, Commitment = Commitment, >, { @@ -417,12 +398,8 @@ pub async fn add_da_task< handle: SystemContextHandle, ) -> TaskRunner where - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, { // build the da task let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -541,7 +518,7 @@ pub async fn add_view_sync_task< handle: SystemContextHandle, ) -> TaskRunner where - ViewSyncEx: ConsensusExchange< + ViewSyncEx: ViewSyncExchangeType< TYPES, Message, Proposal = ViewSyncCertificate, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 0b01a29339..efb43212cf 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -21,8 +21,10 @@ use hotshot_types::{ data::{Leaf, LeafType, ProposalType, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, - simple_certificate::{DACertificate2, QuorumCertificate2, TimeoutCertificate2}, - simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote2, VIDData, VIDVote2}, + simple_certificate::{ + DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, + }, + simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote2}, traits::{ block_contents::BlockHeader, consensus_api::ConsensusApi, @@ -68,17 +70,12 @@ pub struct ConsensusTaskState< Proposal = QuorumProposal>, Commitment = Commitment>, >, - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, TimeoutEx: ConsensusExchange< TYPES, Message, Proposal = QuorumProposal>, - Certificate = TimeoutCertificate, Commitment = Commitment, >, { @@ -155,7 +152,6 @@ pub struct VoteCollectionTaskState< TYPES, Message, Proposal = QuorumProposal>, - Certificate = TimeoutCertificate, Commitment = Commitment, >, { @@ -206,7 +202,6 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = TimeoutCertificate, Commitment = Commitment, >, { @@ -232,7 +227,6 @@ where TYPES, Message, Proposal = QuorumProposal>, - Certificate = TimeoutCertificate, Commitment = Commitment, >, { @@ -353,17 +347,12 @@ where Proposal = QuorumProposal>, Commitment = Commitment>, >, - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, TimeoutEx: ConsensusExchange< TYPES, Message, Proposal = QuorumProposal>, - Certificate = TimeoutCertificate, Commitment = Commitment, >, { @@ -1387,17 +1376,12 @@ where Proposal = QuorumProposal>, Commitment = Commitment>, >, - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, TimeoutEx: ConsensusExchange< TYPES, Message, Proposal = QuorumProposal>, - Certificate = TimeoutCertificate, Commitment = Commitment, >, { @@ -1439,17 +1423,12 @@ where Proposal = QuorumProposal>, Commitment = Commitment>, >, - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, TimeoutEx: ConsensusExchange< TYPES, Message, Proposal = QuorumProposal>, - Certificate = TimeoutCertificate, Commitment = Commitment, >, { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 086f353e5f..0679540b29 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -13,7 +13,6 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, }; use hotshot_types::{ - certificate::DACertificate, consensus::{Consensus, View}, data::{DAProposal, Leaf, ProposalType}, message::{Message, Proposal, SequencingMessage}, @@ -49,12 +48,8 @@ pub struct DATaskState< I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static, > where - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, { /// The state's api pub api: A, @@ -85,12 +80,8 @@ pub struct DAVoteCollectionTaskState< TYPES: NodeType, I: NodeImplementation>, > where - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, { /// the committee exchange pub committee_exchange: Arc>, @@ -115,12 +106,8 @@ pub struct DAVoteCollectionTaskState< impl>> TS for DAVoteCollectionTaskState where - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, { } @@ -133,12 +120,8 @@ async fn vote_handle, ) where - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, { match event { HotShotEvent::DAVoteRecv(vote) => { @@ -200,12 +183,8 @@ impl< A: ConsensusApi, I> + 'static, > DATaskState where - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] @@ -488,12 +467,8 @@ impl< A: ConsensusApi, I> + 'static, > TS for DATaskState where - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Certificate = DACertificate, - Commitment = Commitment, - >, + CommitteeEx: + ConsensusExchange, Commitment = Commitment>, { } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a4c6786a7e..dcbe837ee3 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -17,7 +17,6 @@ use hotshot_types::{ network::{CommunicationChannel, TransmitType}, node_implementation::{NodeImplementation, NodeType}, }, - vote::VoteType, vote2::{HasViewNumber, Vote2}, }; use snafu::Snafu; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 7e5034baa5..3f35d00e3a 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -66,7 +66,7 @@ pub struct ViewSyncTaskState< I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static + std::clone::Clone, > where - ViewSyncEx: ConsensusExchange< + ViewSyncEx: ViewSyncExchangeType< TYPES, Message, Proposal = ViewSyncCertificate, @@ -115,7 +115,7 @@ impl< A: ConsensusApi, I> + 'static + std::clone::Clone, > TS for ViewSyncTaskState where - ViewSyncEx: ConsensusExchange< + ViewSyncEx: ViewSyncExchangeType< TYPES, Message, Proposal = ViewSyncCertificate, @@ -139,7 +139,7 @@ pub struct ViewSyncReplicaTaskState< I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static, > where - ViewSyncEx: ConsensusExchange< + ViewSyncEx: ViewSyncExchangeType< TYPES, Message, Proposal = ViewSyncCertificate, @@ -182,7 +182,7 @@ impl< A: ConsensusApi, I> + 'static, > TS for ViewSyncReplicaTaskState where - ViewSyncEx: ConsensusExchange< + ViewSyncEx: ViewSyncExchangeType< TYPES, Message, Proposal = ViewSyncCertificate, @@ -253,7 +253,7 @@ impl< A: ConsensusApi, I> + 'static + std::clone::Clone, > ViewSyncTaskState where - ViewSyncEx: ConsensusExchange< + ViewSyncEx: ViewSyncExchangeType< TYPES, Message, Proposal = ViewSyncCertificate, @@ -627,7 +627,7 @@ impl< A: ConsensusApi, I> + 'static, > ViewSyncReplicaTaskState where - ViewSyncEx: ConsensusExchange< + ViewSyncEx: ViewSyncExchangeType< TYPES, Message, Proposal = ViewSyncCertificate, @@ -959,7 +959,7 @@ impl< >, > ViewSyncRelayTaskState where - ViewSyncEx: ConsensusExchange< + ViewSyncEx: ViewSyncExchangeType< TYPES, Message, Proposal = ViewSyncCertificate, diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 266290425e..20e7c888b6 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -4,122 +4,16 @@ use crate::{ data::serialize_signature, traits::{ election::SignedCertificate, node_implementation::NodeType, signature_key::SignatureKey, - state::ConsensusTime, - }, - vote::{ - DAVote, DAVoteAccumulator, QuorumVote, QuorumVoteAccumulator, TimeoutVote, - TimeoutVoteAccumulator, VIDVote, VIDVoteAccumulator, ViewSyncData, ViewSyncVote, - ViewSyncVoteAccumulator, VoteType, }, + vote::{ViewSyncData, ViewSyncVote, ViewSyncVoteAccumulator, VoteType}, }; use bincode::Options; -use commit::{Commitment, CommitmentBounds, Committable}; +use commit::{Commitment, Committable}; use espresso_systems_common::hotshot::tag; use hotshot_utils::bincode::bincode_opts; use serde::{Deserialize, Serialize}; -use std::{ - fmt::{self, Debug, Display, Formatter}, - hash::Hash, - marker::PhantomData, -}; - -/// A `DACertificate` is a threshold signature that some data is available. -/// It is signed by the members of the DA committee, not the entire network. It is used -/// to prove that the data will be made available to those outside of the DA committee. -#[derive(Clone, PartialEq, custom_debug::Debug, serde::Serialize, serde::Deserialize, Hash)] -#[serde(bound(deserialize = ""))] -pub struct DACertificate { - /// The view number this quorum certificate was generated during - /// - /// This value is covered by the threshold signature. - pub view_number: TYPES::Time, - - /// committment to the block payload - pub payload_commitment: Commitment, - - /// Assembled signature for certificate aggregation - pub signatures: AssembledSignature, -} - -/// A `VIDCertificate` is a threshold signature that some data is available. -/// It is signed by the whole quorum. -#[derive(Clone, PartialEq, custom_debug::Debug, serde::Serialize, serde::Deserialize, Hash)] -#[serde(bound(deserialize = ""))] -pub struct VIDCertificate { - /// The view number this VID certificate was generated during - pub view_number: TYPES::Time, - - /// Committment to the block payload - pub payload_commitment: Commitment, - - /// Assembled signature for certificate aggregation - pub signatures: AssembledSignature, -} - -/// Depricated type for QC - -// TODO:remove this struct https://github.com/EspressoSystems/HotShot/issues/1995 -#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash, Eq)] -#[serde(bound(deserialize = ""))] -pub struct QuorumCertificate { - /// phantom data - _pd: PhantomData<(TYPES, COMMITMENT)>, -} - -impl Display - for QuorumCertificate -{ - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "",) - } -} - -/// Timeout Certificate -#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -pub struct TimeoutCertificate { - /// View that timed out - pub view_number: TYPES::Time, - /// assembled signature for certificate aggregation - pub signatures: AssembledSignature, -} - -impl - SignedCertificate> - for TimeoutCertificate -{ - type Vote = TimeoutVote; - - type VoteAccumulator = TimeoutVoteAccumulator, Self::Vote>; - - fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { - TimeoutCertificate { - view_number: vote.get_view(), - signatures, - } - } - - fn view_number(&self) -> TYPES::Time { - self.view_number - } - - fn signatures(&self) -> AssembledSignature { - self.signatures.clone() - } - - fn leaf_commitment(&self) -> Commitment { - self.view_number.commit() - } - - fn is_genesis(&self) -> bool { - false - } - - fn genesis() -> Self { - unimplemented!() - } -} +use std::{fmt::Debug, hash::Hash}; /// Certificate for view sync. #[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash)] @@ -179,128 +73,6 @@ pub enum AssembledSignature { ViewSyncFinalize(::QCType), } -impl - SignedCertificate - for QuorumCertificate -{ - type Vote = QuorumVote; - type VoteAccumulator = QuorumVoteAccumulator; - - fn create_certificate(_signatures: AssembledSignature, _vote: Self::Vote) -> Self { - Self { _pd: PhantomData } - } - - fn view_number(&self) -> TYPES::Time { - TYPES::Time::new(1) - } - - fn signatures(&self) -> AssembledSignature { - AssembledSignature::Genesis() - } - - fn leaf_commitment(&self) -> COMMITMENT { - COMMITMENT::default_commitment_no_preimage() - } - - fn is_genesis(&self) -> bool { - true - } - - fn genesis() -> Self { - Self { _pd: PhantomData } - } -} - -impl Committable - for QuorumCertificate -{ - fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("Quorum Certificate Commitment") - .u64_field("view number", 1) - .finalize() - } -} - -impl - SignedCertificate> - for DACertificate -{ - type Vote = DAVote; - type VoteAccumulator = DAVoteAccumulator, Self::Vote>; - - fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { - DACertificate { - view_number: vote.get_view(), - signatures, - payload_commitment: vote.payload_commitment, - } - } - - fn view_number(&self) -> TYPES::Time { - self.view_number - } - - fn signatures(&self) -> AssembledSignature { - self.signatures.clone() - } - - fn leaf_commitment(&self) -> Commitment { - self.payload_commitment - } - - fn is_genesis(&self) -> bool { - // This function is only useful for QC. Will be removed after we have separated cert traits. - false - } - - fn genesis() -> Self { - // This function is only useful for QC. Will be removed after we have separated cert traits. - unimplemented!() - } -} - -impl - SignedCertificate> - for VIDCertificate -{ - type Vote = VIDVote; - type VoteAccumulator = VIDVoteAccumulator, Self::Vote>; - - fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { - VIDCertificate { - view_number: vote.get_view(), - signatures, - payload_commitment: vote.payload_commitment, - } - } - - fn view_number(&self) -> TYPES::Time { - self.view_number - } - - fn signatures(&self) -> AssembledSignature { - self.signatures.clone() - } - - fn leaf_commitment(&self) -> Commitment { - self.payload_commitment - } - - fn is_genesis(&self) -> bool { - // This function is only useful for QC. Will be removed after we have separated cert traits. - false - } - - fn genesis() -> Self { - // This function is only useful for QC. Will be removed after we have separated cert traits. - unimplemented!() - } -} - -impl Eq for DACertificate {} - -impl Eq for VIDCertificate {} - impl Committable for ViewSyncCertificate { fn commit(&self) -> Commitment { let signatures_bytes = serialize_signature(&self.signatures()); diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 20f9612eb5..d01a77f966 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -8,12 +8,8 @@ use super::{ signature_key::{EncodedPublicKey, EncodedSignature}, }; use crate::{ - certificate::{ - AssembledSignature, DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate, - ViewSyncCertificate, - }, + certificate::{AssembledSignature, ViewSyncCertificate}, data::{DAProposal, ProposalType, VidDisperse}, - vote::{TimeoutVote, VIDVote}, }; use crate::{message::GeneralConsensusMessage, vote::ViewSyncVoteInternal}; @@ -23,9 +19,8 @@ use crate::{ traits::{ network::{CommunicationChannel, NetworkMsg}, signature_key::SignatureKey, - state::ConsensusTime, }, - vote::{Accumulator, DAVote, QuorumVote, ViewSyncData, ViewSyncVote, VoteType}, + vote::{Accumulator, ViewSyncData, ViewSyncVote, VoteType}, }; use bincode::Options; use commit::{Commitment, CommitmentBounds, Committable}; @@ -264,13 +259,7 @@ pub trait Membership: pub trait ConsensusExchange: Send + Sync { /// A proposal for participants to vote on. type Proposal: ProposalType; - /// A vote on a [`Proposal`](Self::Proposal). - // TODO ED Make this equal Certificate vote (if possible?) - type Vote: VoteType; - /// A [`SignedCertificate`] attesting to a decision taken by the committee. - type Certificate: SignedCertificate - + Hash - + Eq; + /// The committee eligible to make decisions. type Membership: Membership; /// Network used by [`Membership`](Self::Membership) to communicate. @@ -328,147 +317,6 @@ pub trait ConsensusExchange: Send + Sync { .make_vote_token(view_number, self.private_key()) } - /// Validate a certificate. - fn is_valid_cert(&self, qc: &Self::Certificate) -> bool { - if qc.is_genesis() && qc.view_number() == TYPES::Time::genesis() { - return true; - } - let leaf_commitment = qc.leaf_commitment(); - - match qc.signatures() { - AssembledSignature::DA(qc) => { - let real_commit = VoteData::DA(leaf_commitment).commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().success_threshold().get()), - ); - ::check(&real_qc_pp, real_commit.as_ref(), &qc) - } - AssembledSignature::VID(qc) => { - let real_commit = VoteData::VID(leaf_commitment).commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().success_threshold().get()), - ); - ::check(&real_qc_pp, real_commit.as_ref(), &qc) - } - AssembledSignature::Yes(qc) => { - let real_commit = VoteData::Yes(leaf_commitment).commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().success_threshold().get()), - ); - ::check(&real_qc_pp, real_commit.as_ref(), &qc) - } - AssembledSignature::No(qc) => { - let real_commit = VoteData::No(leaf_commitment).commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().success_threshold().get()), - ); - ::check(&real_qc_pp, real_commit.as_ref(), &qc) - } - AssembledSignature::Timeout(_) => { - error!("QC type should not be timeout here"); - false - } - AssembledSignature::Genesis() => true, - AssembledSignature::ViewSyncPreCommit(_) - | AssembledSignature::ViewSyncCommit(_) - | AssembledSignature::ViewSyncFinalize(_) => { - error!("QC should not be ViewSync type here"); - false - } - } - } - - /// Validate a vote by checking its signature and token. - fn is_valid_vote( - &self, - key: &TYPES::SignatureKey, - encoded_signature: &EncodedSignature, - data: &VoteData, - vote_token: &Checked, - ) -> bool { - let is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); - let valid_vote_token = self - .membership() - .validate_vote_token(key.clone(), vote_token.clone()); - let is_valid_vote_token = match valid_vote_token { - Err(_) => { - error!("Vote token was invalid"); - false - } - Ok(Checked::Valid(_)) => true, - Ok(Checked::Inval(_) | Checked::Unchecked(_)) => false, - }; - - is_valid_signature && is_valid_vote_token - } - - // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the `SignedCertificate` - // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. - /// Accumulate vote - /// Returns either the accumulate if no threshold was reached, or a `SignedCertificate` if the threshold was reached - #[allow(clippy::type_complexity)] - fn accumulate_vote( - &self, - accumulator: <>::Certificate as SignedCertificate< - TYPES, - TYPES::Time, - TYPES::VoteTokenType, - Self::Commitment, - >>::VoteAccumulator, - vote: &<>::Certificate as SignedCertificate< - TYPES, - TYPES::Time, - TYPES::VoteTokenType, - Self::Commitment, - >>::Vote, - _commit: &Self::Commitment, - ) -> Either< - <>::Certificate as SignedCertificate< - TYPES, - TYPES::Time, - TYPES::VoteTokenType, - Self::Commitment, - >>::VoteAccumulator, - Self::Certificate, - > { - if !self.is_valid_vote( - &vote.get_key(), - &vote.get_signature(), - &vote.get_data(), - &Checked::Unchecked(vote.get_vote_token()), - ) { - error!("Vote data is {:?}", vote.get_data()); - error!("Invalid vote!"); - return Either::Left(accumulator); - } - - let stake_table_entry = vote.get_key().get_stake_table_entry(1u64); - // TODO ED Could we make this part of the vote in the future? It's only a usize. - let append_node_id = self - .membership() - .get_committee_qc_stake_table() - .iter() - .position(|x| *x == stake_table_entry.clone()) - .unwrap(); - - // TODO ED Should make append function take a reference to vote - match accumulator.append( - vote.clone(), - append_node_id, - self.membership().get_committee_qc_stake_table(), - ) { - Either::Left(accumulator) => Either::Left(accumulator), - Either::Right(signatures) => Either::Right(Self::Certificate::create_certificate( - signatures, - vote.clone(), - )), - } - } - /// The committee which votes on proposals. fn membership(&self) -> &Self::Membership; @@ -539,8 +387,6 @@ impl< > ConsensusExchange for CommitteeExchange { type Proposal = DAProposal; - type Vote = DAVote; - type Certificate = DACertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; type Commitment = Commitment; @@ -644,8 +490,6 @@ impl< > ConsensusExchange for VIDExchange { type Proposal = VidDisperse; - type Vote = VIDVote; - type Certificate = VIDCertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; type Commitment = Commitment; @@ -802,9 +646,6 @@ impl< for QuorumExchange { type Proposal = PROPOSAL; - type Vote = QuorumVote>; - // TODO: remove this https://github.com/EspressoSystems/HotShot/issues/1995 - type Certificate = QuorumCertificate>; type Membership = MEMBERSHIP; type Networking = NETWORK; type Commitment = Commitment; @@ -848,6 +689,13 @@ impl< pub trait ViewSyncExchangeType: ConsensusExchange { + /// A vote on a [`Proposal`](Self::Proposal). + // TODO ED Make this equal Certificate vote (if possible?) + type Vote: VoteType; + /// A [`SignedCertificate`] attesting to a decision taken by the committee. + type Certificate: SignedCertificate + + Hash + + Eq; /// Creates a precommit vote fn create_precommit_message>( &self, @@ -895,6 +743,93 @@ pub trait ViewSyncExchangeType: /// Sign a certificate. fn sign_certificate_proposal(&self, certificate: Self::Certificate) -> EncodedSignature; + + // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the `SignedCertificate` + // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. + /// Accumulate vote + /// Returns either the accumulate if no threshold was reached, or a `SignedCertificate` if the threshold was reached + #[allow(clippy::type_complexity)] + fn accumulate_vote( + &self, + accumulator: <>::Certificate as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Self::Commitment, + >>::VoteAccumulator, + vote: &<>::Certificate as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Self::Commitment, + >>::Vote, + _commit: &Self::Commitment, + ) -> Either< + <>::Certificate as SignedCertificate< + TYPES, + TYPES::Time, + TYPES::VoteTokenType, + Self::Commitment, + >>::VoteAccumulator, + Self::Certificate, + > { + if !self.is_valid_vote( + &vote.get_key(), + &vote.get_signature(), + &vote.get_data(), + &Checked::Unchecked(vote.get_vote_token()), + ) { + error!("Vote data is {:?}", vote.get_data()); + error!("Invalid vote!"); + return Either::Left(accumulator); + } + + let stake_table_entry = vote.get_key().get_stake_table_entry(1u64); + // TODO ED Could we make this part of the vote in the future? It's only a usize. + let append_node_id = self + .membership() + .get_committee_qc_stake_table() + .iter() + .position(|x| *x == stake_table_entry.clone()) + .unwrap(); + + // TODO ED Should make append function take a reference to vote + match accumulator.append( + vote.clone(), + append_node_id, + self.membership().get_committee_qc_stake_table(), + ) { + Either::Left(accumulator) => Either::Left(accumulator), + Either::Right(signatures) => Either::Right(Self::Certificate::create_certificate( + signatures, + vote.clone(), + )), + } + } + + /// Validate a vote by checking its signature and token. + fn is_valid_vote( + &self, + key: &TYPES::SignatureKey, + encoded_signature: &EncodedSignature, + data: &VoteData, + vote_token: &Checked, + ) -> bool { + let is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); + let valid_vote_token = self + .membership() + .validate_vote_token(key.clone(), vote_token.clone()); + let is_valid_vote_token = match valid_vote_token { + Err(_) => { + error!("Vote token was invalid"); + false + } + Ok(Checked::Valid(_)) => true, + Ok(Checked::Inval(_) | Checked::Unchecked(_)) => false, + }; + + is_valid_signature && is_valid_vote_token + } } /// Standard implementation of [`ViewSyncExchangeType`] based on Hot Stuff consensus. @@ -930,6 +865,10 @@ impl< M: NetworkMsg, > ViewSyncExchangeType for ViewSyncExchange { + type Vote = ViewSyncVote; + + type Certificate = ViewSyncCertificate; + fn create_precommit_message>( &self, round: TYPES::Time, @@ -1132,8 +1071,6 @@ impl< > ConsensusExchange for ViewSyncExchange { type Proposal = PROPOSAL; - type Vote = ViewSyncVote; - type Certificate = ViewSyncCertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; type Commitment = Commitment>; @@ -1232,8 +1169,6 @@ impl< > ConsensusExchange for TimeoutExchange { type Proposal = PROPOSAL; - type Vote = TimeoutVote; - type Certificate = TimeoutCertificate; type Membership = MEMBERSHIP; type Networking = NETWORK; type Commitment = Commitment; diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 4323f6bae3..8ce6a517cc 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -591,17 +591,9 @@ pub type CommitteeProposalType = pub type ViewSyncProposalType = as ConsensusExchange>>::Proposal; -/// A vote on a [`QuorumProposalType`]. -pub type QuorumVoteType = - as ConsensusExchange>>::Vote; - -/// A vote on a [`ComitteeProposal`]. -pub type CommitteeVote = - as ConsensusExchange>>::Vote; - /// A vote on a [`ViewSyncProposal`]. pub type ViewSyncVoteType = - as ConsensusExchange>>::Vote; + as ViewSyncExchangeType>>::Vote; /// Communication channel for [`QuorumProposalType`] and [`QuorumVote`]. pub type QuorumCommChannel = From 2af94e50e2692f35f719b5d509b217459033e53c Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 7 Nov 2023 11:05:49 -0500 Subject: [PATCH 0327/1393] removing the vote types --- hotshot/examples/libp2p/types.rs | 3 +- hotshot/examples/web-server-da/types.rs | 3 +- task-impls/src/view_sync.rs | 19 +- testing/tests/memory_network.rs | 3 +- types/src/certificate.rs | 5 +- types/src/traits/election.rs | 28 +- types/src/vote.rs | 640 +----------------------- 7 files changed, 29 insertions(+), 672 deletions(-) diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index b3f2773fc5..542e406f0e 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -14,7 +14,7 @@ use hotshot_types::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, }, - vote::{DAVote, ViewSyncVote}, + vote::ViewSyncVote, }; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -31,7 +31,6 @@ pub type QuorumNetwork = Libp2pCommChannel; pub type ViewSyncNetwork = Libp2pCommChannel; pub type ThisDAProposal = DAProposal; -pub type ThisDAVote = DAVote; pub type ThisQuorumProposal = QuorumProposal; diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index 0d456e131d..d5de57386e 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -14,7 +14,7 @@ use hotshot_types::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, }, - vote::{DAVote, ViewSyncVote}, + vote::ViewSyncVote, }; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -31,7 +31,6 @@ pub type QuorumNetwork = WebCommChannel; pub type ViewSyncNetwork = WebCommChannel; pub type ThisDAProposal = DAProposal; -pub type ThisDAVote = DAVote; pub type ThisQuorumProposal = QuorumProposal; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 3f35d00e3a..11cba581f0 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -10,10 +10,7 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, }; use hotshot_types::{ - traits::{ - election::{Membership, SignedCertificate}, - network::ConsensusIntentEvent, - }, + traits::{election::Membership, network::ConsensusIntentEvent}, vote::ViewSyncVoteAccumulator, }; @@ -34,7 +31,7 @@ use hotshot_types::{ vote::{ViewSyncData, ViewSyncVote}, }; use snafu::Snafu; -use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; +use std::{collections::HashMap, sync::Arc, time::Duration}; use tracing::{debug, error, instrument}; #[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] /// Phases of view sync @@ -211,15 +208,7 @@ pub struct ViewSyncRelayTaskState< pub exchange: Arc>, /// Vote accumulator #[allow(clippy::type_complexity)] - pub accumulator: Either< - as SignedCertificate< - TYPES, - TYPES::Time, - TYPES::VoteTokenType, - Commitment>, - >>::VoteAccumulator, - ViewSyncCertificate, - >, + pub accumulator: Either, ViewSyncCertificate>, /// Our node id; for logging pub id: u64, } @@ -391,7 +380,6 @@ where sig_lists: Vec::new(), signers: bitvec![0; self.exchange.total_nodes()], - phantom: PhantomData, }; let mut relay_state = ViewSyncRelayTaskState { @@ -1050,7 +1038,6 @@ where sig_lists: Vec::new(), signers: bitvec![0; self.exchange.total_nodes()], - phantom: PhantomData, }; either::Left(new_accumulator) } diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index d384281f66..511cb4867e 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -23,7 +23,7 @@ use hotshot_types::traits::election::{ use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; use hotshot_types::traits::node_implementation::{ChannelMaps, Exchanges, NodeType}; -use hotshot_types::vote::{DAVote, ViewSyncVote}; +use hotshot_types::vote::ViewSyncVote; use hotshot_types::{ data::ViewNumber, message::{DataMessage, MessageKind}, @@ -72,7 +72,6 @@ pub type ViewSyncNetwork = MemoryCommChannel; pub type VIDNetwork = MemoryCommChannel; pub type ThisDAProposal = DAProposal; -pub type ThisDAVote = DAVote; pub type ThisQuorumProposal = QuorumProposal; diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 20e7c888b6..96b351631b 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -5,7 +5,7 @@ use crate::{ traits::{ election::SignedCertificate, node_implementation::NodeType, signature_key::SignatureKey, }, - vote::{ViewSyncData, ViewSyncVote, ViewSyncVoteAccumulator, VoteType}, + vote::{ViewSyncData, ViewSyncVote, VoteType}, }; use bincode::Options; use commit::{Commitment, Committable}; @@ -113,8 +113,7 @@ impl for ViewSyncCertificate { type Vote = ViewSyncVote; - type VoteAccumulator = - ViewSyncVoteAccumulator>, Self::Vote>; + /// Build a QC from the threshold signature and commitment fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { let certificate_internal = ViewSyncCertificateInternal { diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index d01a77f966..642673b571 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -10,6 +10,7 @@ use super::{ use crate::{ certificate::{AssembledSignature, ViewSyncCertificate}, data::{DAProposal, ProposalType, VidDisperse}, + vote::ViewSyncVoteAccumulator, }; use crate::{message::GeneralConsensusMessage, vote::ViewSyncVoteInternal}; @@ -20,7 +21,7 @@ use crate::{ network::{CommunicationChannel, NetworkMsg}, signature_key::SignatureKey, }, - vote::{Accumulator, ViewSyncData, ViewSyncVote, VoteType}, + vote::{ViewSyncData, ViewSyncVote, VoteType}, }; use bincode::Options; use commit::{Commitment, CommitmentBounds, Committable}; @@ -167,9 +168,6 @@ where /// `VoteType` that is used in this certificate type Vote: VoteType; - /// `Accumulator` type to accumulate votes. - type VoteAccumulator: Accumulator; - /// Build a QC from the threshold signature and commitment // TODO ED Rename this function and rework this function parameters // Assumes last vote was valid since it caused a QC to form. @@ -751,12 +749,7 @@ pub trait ViewSyncExchangeType: #[allow(clippy::type_complexity)] fn accumulate_vote( &self, - accumulator: <>::Certificate as SignedCertificate< - TYPES, - TYPES::Time, - TYPES::VoteTokenType, - Self::Commitment, - >>::VoteAccumulator, + accumulator: ViewSyncVoteAccumulator, vote: &<>::Certificate as SignedCertificate< TYPES, TYPES::Time, @@ -764,15 +757,16 @@ pub trait ViewSyncExchangeType: Self::Commitment, >>::Vote, _commit: &Self::Commitment, - ) -> Either< - <>::Certificate as SignedCertificate< + ) -> Either, Self::Certificate> + where + >::Certificate: SignedCertificate< TYPES, TYPES::Time, TYPES::VoteTokenType, Self::Commitment, - >>::VoteAccumulator, - Self::Certificate, - > { + Vote = ViewSyncVote, + >, + { if !self.is_valid_vote( &vote.get_key(), &vote.get_signature(), @@ -795,7 +789,7 @@ pub trait ViewSyncExchangeType: // TODO ED Should make append function take a reference to vote match accumulator.append( - vote.clone(), + vote, append_node_id, self.membership().get_committee_qc_stake_table(), ) { @@ -812,7 +806,7 @@ pub trait ViewSyncExchangeType: &self, key: &TYPES::SignatureKey, encoded_signature: &EncodedSignature, - data: &VoteData, + data: &VoteData>>, vote_token: &Checked, ) -> bool { let is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); diff --git a/types/src/vote.rs b/types/src/vote.rs index 97a51b0ae0..3f7befe196 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -22,7 +22,6 @@ use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, hash::Hash, - marker::PhantomData, num::NonZeroU64, }; use tracing::error; @@ -43,88 +42,6 @@ pub trait VoteType: fn get_vote_token(&self) -> TYPES::VoteTokenType; } -/// A vote on DA proposal. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -#[serde(bound(deserialize = ""))] -pub struct DAVote { - /// The signature share associated with this vote - pub signature: (EncodedPublicKey, EncodedSignature), - /// The block payload commitment being voted on. - pub payload_commitment: Commitment, - /// The view this vote was cast for - pub current_view: TYPES::Time, - /// The vote token generated by this replica - pub vote_token: TYPES::VoteTokenType, - /// The vote data this vote is signed over - pub vote_data: VoteData>, -} - -/// A vote on VID proposal. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -#[serde(bound(deserialize = ""))] -pub struct VIDVote { - /// The signature share associated with this vote - pub signature: (EncodedPublicKey, EncodedSignature), - /// The block payload commitment being voted on. - pub payload_commitment: Commitment, - /// The view this vote was cast for - pub current_view: TYPES::Time, - /// The vote token generated by this replica - pub vote_token: TYPES::VoteTokenType, - /// The vote data this vote is signed over - pub vote_data: VoteData>, -} - -/// A positive or negative vote on validating or commitment proposal. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -pub struct YesOrNoVote { - /// The signature share associated with this vote - pub signature: (EncodedPublicKey, EncodedSignature), - /// The leaf commitment being voted on. - pub leaf_commitment: COMMITMENT, - /// The view this vote was cast for - pub current_view: TYPES::Time, - /// The vote token generated by this replica - pub vote_token: TYPES::VoteTokenType, - /// The vote data this vote is signed over - pub vote_data: VoteData, -} - -/// A timeout vote -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -pub struct TimeoutVote { - /// The signature share associated with this vote - pub signature: (EncodedPublicKey, EncodedSignature), - /// The view this vote was cast for - pub current_view: TYPES::Time, - /// The vote token generated by this replica - pub vote_token: TYPES::VoteTokenType, -} - -impl VoteType> for TimeoutVote { - fn get_view(&self) -> ::Time { - self.current_view - } - - fn get_key(&self) -> ::SignatureKey { - ::from_bytes(&self.signature.0).unwrap() - } - - fn get_signature(&self) -> EncodedSignature { - self.signature.1.clone() - } - - fn get_data(&self) -> VoteData> { - VoteData::Timeout(self.get_view().commit()) - } - - fn get_vote_token(&self) -> ::VoteTokenType { - self.vote_token.clone() - } -} - /// The internals of a view sync vote #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] @@ -214,116 +131,6 @@ impl ViewSyncVote { } } -/// Votes on validating or commitment proposal. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -pub enum QuorumVote { - /// Posivite vote. - Yes(YesOrNoVote), - /// Negative vote. - No(YesOrNoVote), -} - -impl VoteType> for DAVote { - fn get_view(&self) -> TYPES::Time { - self.current_view - } - fn get_key(&self) -> ::SignatureKey { - self.signature_key() - } - fn get_signature(&self) -> EncodedSignature { - self.signature.1.clone() - } - fn get_data(&self) -> VoteData> { - self.vote_data.clone() - } - fn get_vote_token(&self) -> ::VoteTokenType { - self.vote_token.clone() - } -} - -impl DAVote { - /// Get the signature key. - /// # Panics - /// If the deserialization fails. - pub fn signature_key(&self) -> TYPES::SignatureKey { - ::from_bytes(&self.signature.0).unwrap() - } -} - -impl VoteType> for VIDVote { - fn get_view(&self) -> TYPES::Time { - self.current_view - } - fn get_key(&self) -> ::SignatureKey { - self.signature_key() - } - fn get_signature(&self) -> EncodedSignature { - self.signature.1.clone() - } - fn get_data(&self) -> VoteData> { - self.vote_data.clone() - } - fn get_vote_token(&self) -> ::VoteTokenType { - self.vote_token.clone() - } -} - -impl VIDVote { - /// Get the signature key. - /// # Panics - /// If the deserialization fails. - pub fn signature_key(&self) -> TYPES::SignatureKey { - ::from_bytes(&self.signature.0).unwrap() - } -} - -impl VoteType - for QuorumVote -{ - fn get_view(&self) -> TYPES::Time { - match self { - QuorumVote::Yes(v) | QuorumVote::No(v) => v.current_view, - } - } - - fn get_key(&self) -> ::SignatureKey { - self.signature_key() - } - fn get_signature(&self) -> EncodedSignature { - self.signature() - } - fn get_data(&self) -> VoteData { - match self { - QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_data.clone(), - } - } - fn get_vote_token(&self) -> ::VoteTokenType { - match self { - QuorumVote::Yes(v) | QuorumVote::No(v) => v.vote_token.clone(), - } - } -} - -impl QuorumVote { - /// Get the encoded signature. - - pub fn signature(&self) -> EncodedSignature { - match &self { - Self::Yes(vote) | Self::No(vote) => vote.signature.1.clone(), - } - } - /// Get the signature key. - /// # Panics - /// If the deserialization fails. - pub fn signature_key(&self) -> TYPES::SignatureKey { - let encoded = match &self { - Self::Yes(vote) | Self::No(vote) => vote.signature.0.clone(), - }; - ::from_bytes(&encoded).unwrap() - } -} - impl VoteType>> for ViewSyncVote { fn get_view(&self) -> TYPES::Time { match self { @@ -356,437 +163,14 @@ impl VoteType>> for ViewS } } -/// Accumulator trait used to accumulate votes into an `AssembledSignature` -pub trait Accumulator< - TYPES: NodeType, - COMMITMENT: CommitmentBounds, - VOTE: VoteType, ->: Sized -{ - /// Append 1 vote to the accumulator. If the threshold is not reached, return - /// the accumulator, else return the `AssembledSignature` - /// Only called from inside `accumulate_internal` - fn append( - self, - vote: VOTE, - vote_node_id: usize, - stake_table_entries: Vec<::StakeTableEntry>, - ) -> Either>; -} - -// TODO Make a default accumulator -// https://github.com/EspressoSystems/HotShot/issues/1797 -/// Accumulator for `TimeoutVote`s -pub struct TimeoutVoteAccumulator< - TYPES: NodeType, - COMMITMENT: CommitmentBounds, - VOTE: VoteType, -> { - /// Map of all da signatures accumlated so far - pub da_vote_outcomes: VoteMap, - /// A quorum's worth of stake, generally 2f + 1 - pub success_threshold: NonZeroU64, - /// A list of valid signatures for certificate aggregation - pub sig_lists: Vec<::PureAssembledSignatureType>, - /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check - pub signers: BitVec, - /// Phantom data to specify the vote this accumulator is for - pub phantom: PhantomData, -} - -impl< - TYPES: NodeType, - COMMITMENT: CommitmentBounds + Clone + Copy + PartialEq + Eq + Hash, - VOTE: VoteType, - > Accumulator for DAVoteAccumulator -{ - fn append( - mut self, - vote: VOTE, - vote_node_id: usize, - stake_table_entries: Vec<::StakeTableEntry>, - ) -> Either> { - let VoteData::DA(vote_commitment) = vote.get_data() else { - return Either::Left(self); - }; - - let encoded_key = vote.get_key().to_bytes(); - - // Deserialize the signature so that it can be assembeld into a QC - // TODO ED Update this once we've gotten rid of EncodedSignature - let original_signature: ::PureAssembledSignatureType = - bincode_opts() - .deserialize(&vote.get_signature().0) - .expect("Deserialization on the signature shouldn't be able to fail."); - - let (da_stake_casted, da_vote_map) = self - .da_vote_outcomes - .entry(vote_commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - // Check for duplicate vote - // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey - // Have to do this because SignatureKey is not hashable - if da_vote_map.contains_key(&encoded_key) { - return Either::Left(self); - } - - if self.signers.get(vote_node_id).as_deref() == Some(&true) { - error!("Node id is already in signers list"); - return Either::Left(self); - } - self.signers.set(vote_node_id, true); - self.sig_lists.push(original_signature); - - // Already checked that vote data was for a DA vote above - *da_stake_casted += u64::from(vote.get_vote_token().vote_count()); - da_vote_map.insert( - encoded_key, - (vote.get_signature(), vote.get_data(), vote.get_vote_token()), - ); - - if *da_stake_casted >= u64::from(self.success_threshold) { - // Assemble QC - let real_qc_pp = ::get_public_parameter( - stake_table_entries.clone(), - U256::from(self.success_threshold.get()), - ); - - let real_qc_sig = ::assemble( - &real_qc_pp, - self.signers.as_bitslice(), - &self.sig_lists[..], - ); - - self.da_vote_outcomes.remove(&vote_commitment); - - return Either::Right(AssembledSignature::DA(real_qc_sig)); - } - Either::Left(self) - } -} - -impl< - TYPES: NodeType, - COMMITMENT: CommitmentBounds + Clone + Copy + PartialEq + Eq + Hash, - VOTE: VoteType, - > Accumulator for TimeoutVoteAccumulator -{ - fn append( - mut self, - vote: VOTE, - vote_node_id: usize, - stake_table_entries: Vec<::StakeTableEntry>, - ) -> Either> { - let VoteData::Timeout(vote_commitment) = vote.get_data() else { - return Either::Left(self); - }; - - let encoded_key = vote.get_key().to_bytes(); - - // Deserialize the signature so that it can be assembeld into a QC - // TODO ED Update this once we've gotten rid of EncodedSignature - let original_signature: ::PureAssembledSignatureType = - bincode_opts() - .deserialize(&vote.get_signature().0) - .expect("Deserialization on the signature shouldn't be able to fail."); - - let (da_stake_casted, da_vote_map) = self - .da_vote_outcomes - .entry(vote_commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - // Check for duplicate vote - // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey - // Have to do this because SignatureKey is not hashable - if da_vote_map.contains_key(&encoded_key) { - return Either::Left(self); - } - - if self.signers.get(vote_node_id).as_deref() == Some(&true) { - error!("Node id is already in signers list"); - return Either::Left(self); - } - self.signers.set(vote_node_id, true); - self.sig_lists.push(original_signature); - - // Already checked that vote data was for a DA vote above - *da_stake_casted += u64::from(vote.get_vote_token().vote_count()); - da_vote_map.insert( - encoded_key, - (vote.get_signature(), vote.get_data(), vote.get_vote_token()), - ); - - if *da_stake_casted >= u64::from(self.success_threshold) { - // Assemble QC - let real_qc_pp = ::get_public_parameter( - stake_table_entries.clone(), - U256::from(self.success_threshold.get()), - ); - - let real_qc_sig = ::assemble( - &real_qc_pp, - self.signers.as_bitslice(), - &self.sig_lists[..], - ); - - self.da_vote_outcomes.remove(&vote_commitment); - - return Either::Right(AssembledSignature::Timeout(real_qc_sig)); - } - Either::Left(self) - } -} - -impl< - TYPES: NodeType, - COMMITMENT: CommitmentBounds + Clone + Copy + PartialEq + Eq + Hash, - VOTE: VoteType, - > Accumulator for VIDVoteAccumulator -{ - fn append( - mut self, - vote: VOTE, - vote_node_id: usize, - stake_table_entries: Vec<::StakeTableEntry>, - ) -> Either> { - let VoteData::VID(vote_commitment) = vote.get_data() else { - return Either::Left(self); - }; - - let encoded_key = vote.get_key().to_bytes(); - - // Deserialize the signature so that it can be assembeld into a QC - // TODO ED Update this once we've gotten rid of EncodedSignature - let original_signature: ::PureAssembledSignatureType = - bincode_opts() - .deserialize(&vote.get_signature().0) - .expect("Deserialization on the signature shouldn't be able to fail."); - - let (vid_stake_casted, vid_vote_map) = self - .vid_vote_outcomes - .entry(vote_commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - // Check for duplicate vote - // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey - // Have to do this because SignatureKey is not hashable - if vid_vote_map.contains_key(&encoded_key) { - return Either::Left(self); - } - - if self.signers.get(vote_node_id).as_deref() == Some(&true) { - error!("Node id is already in signers list"); - return Either::Left(self); - } - self.signers.set(vote_node_id, true); - self.sig_lists.push(original_signature); - - // Already checked that vote data was for a VID vote above - *vid_stake_casted += u64::from(vote.get_vote_token().vote_count()); - vid_vote_map.insert( - encoded_key, - (vote.get_signature(), vote.get_data(), vote.get_vote_token()), - ); - - if *vid_stake_casted >= u64::from(self.success_threshold) { - // Assemble QC - let real_qc_pp = ::get_public_parameter( - stake_table_entries.clone(), - U256::from(self.success_threshold.get()), - ); - - let real_qc_sig = ::assemble( - &real_qc_pp, - self.signers.as_bitslice(), - &self.sig_lists[..], - ); - - self.vid_vote_outcomes.remove(&vote_commitment); - - return Either::Right(AssembledSignature::VID(real_qc_sig)); - } - Either::Left(self) - } -} - -/// Accumulates DA votes -pub struct DAVoteAccumulator< - TYPES: NodeType, - COMMITMENT: CommitmentBounds + Clone, - VOTE: VoteType, -> { - /// Map of all da signatures accumlated so far - pub da_vote_outcomes: VoteMap, - /// A quorum's worth of stake, generally 2f + 1 - pub success_threshold: NonZeroU64, - /// A list of valid signatures for certificate aggregation - pub sig_lists: Vec<::PureAssembledSignatureType>, - /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check - pub signers: BitVec, - /// Phantom data to specify the vote this accumulator is for - pub phantom: PhantomData, -} - -/// Accumulates VID votes -pub struct VIDVoteAccumulator< - TYPES: NodeType, - COMMITMENT: CommitmentBounds + Clone, - VOTE: VoteType, -> { - /// Map of all VID signatures accumlated so far - pub vid_vote_outcomes: VoteMap, - /// A quorum's worth of stake, generally 2f + 1 - pub success_threshold: NonZeroU64, - /// A list of valid signatures for certificate aggregation - pub sig_lists: Vec<::PureAssembledSignatureType>, - /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check - pub signers: BitVec, - /// Phantom data to specify the vote this accumulator is for - pub phantom: PhantomData, -} - -/// Accumulate quorum votes -pub struct QuorumVoteAccumulator< - TYPES: NodeType, - COMMITMENT: CommitmentBounds, - VOTE: VoteType, -> { - /// Map of all signatures accumlated so far - pub total_vote_outcomes: VoteMap, - /// Map of all yes signatures accumlated so far - pub yes_vote_outcomes: VoteMap, - /// Map of all no signatures accumlated so far - pub no_vote_outcomes: VoteMap, - - /// A quorum's worth of stake, generally 2f + 1 - pub success_threshold: NonZeroU64, - /// A failure threshold, generally f + 1 - pub failure_threshold: NonZeroU64, - /// A list of valid signatures for certificate aggregation - pub sig_lists: Vec<::PureAssembledSignatureType>, - /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check - pub signers: BitVec, - /// Phantom data to ensure this struct is over a specific `VoteType` implementation - pub phantom: PhantomData, -} - -impl< - TYPES: NodeType, - COMMITMENT: CommitmentBounds + Clone + Copy + PartialEq + Eq + Hash, - VOTE: VoteType, - > Accumulator for QuorumVoteAccumulator -{ - fn append( - mut self, - vote: VOTE, - vote_node_id: usize, - stake_table_entries: Vec<::StakeTableEntry>, - ) -> Either> { - let (VoteData::Yes(vote_commitment) | VoteData::No(vote_commitment)) = vote.get_data() - else { - return Either::Left(self); - }; - - let encoded_key = vote.get_key().to_bytes(); - - // Deserialize the signature so that it can be assembeld into a QC - // TODO ED Update this once we've gotten rid of EncodedSignature - let original_signature: ::PureAssembledSignatureType = - bincode_opts() - .deserialize(&vote.get_signature().0) - .expect("Deserialization on the signature shouldn't be able to fail."); - - let (total_stake_casted, total_vote_map) = self - .total_vote_outcomes - .entry(vote_commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - let (yes_stake_casted, yes_vote_map) = self - .yes_vote_outcomes - .entry(vote_commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - let (no_stake_casted, no_vote_map) = self - .no_vote_outcomes - .entry(vote_commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - // Check for duplicate vote - // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey - // Have to do this because SignatureKey is not hashable - if total_vote_map.contains_key(&encoded_key) { - return Either::Left(self); - } - - if self.signers.get(vote_node_id).as_deref() == Some(&true) { - error!("Node id is already in signers list"); - return Either::Left(self); - } - self.signers.set(vote_node_id, true); - self.sig_lists.push(original_signature); - - *total_stake_casted += u64::from(vote.get_vote_token().vote_count()); - total_vote_map.insert( - encoded_key.clone(), - (vote.get_signature(), vote.get_data(), vote.get_vote_token()), - ); - - match vote.get_data() { - VoteData::Yes(_) => { - *yes_stake_casted += u64::from(vote.get_vote_token().vote_count()); - yes_vote_map.insert( - encoded_key, - (vote.get_signature(), vote.get_data(), vote.get_vote_token()), - ); - } - VoteData::No(_) => { - *no_stake_casted += u64::from(vote.get_vote_token().vote_count()); - no_vote_map.insert( - encoded_key, - (vote.get_signature(), vote.get_data(), vote.get_vote_token()), - ); - } - _ => return Either::Left(self), - } - - if *total_stake_casted >= u64::from(self.success_threshold) { - // Assemble QC - let real_qc_pp = ::get_public_parameter( - stake_table_entries.clone(), - U256::from(self.success_threshold.get()), - ); - - let real_qc_sig = ::assemble( - &real_qc_pp, - self.signers.as_bitslice(), - &self.sig_lists[..], - ); - - if *yes_stake_casted >= u64::from(self.success_threshold) { - self.yes_vote_outcomes.remove(&vote_commitment); - return Either::Right(AssembledSignature::Yes(real_qc_sig)); - } else if *no_stake_casted >= u64::from(self.failure_threshold) { - self.total_vote_outcomes.remove(&vote_commitment); - return Either::Right(AssembledSignature::No(real_qc_sig)); - } - } - Either::Left(self) - } -} - /// Accumulates view sync votes -pub struct ViewSyncVoteAccumulator< - TYPES: NodeType, - COMMITMENT: CommitmentBounds, - VOTE: VoteType, -> { +pub struct ViewSyncVoteAccumulator { /// Map of all pre_commit signatures accumlated so far - pub pre_commit_vote_outcomes: VoteMap, + pub pre_commit_vote_outcomes: VoteMap>, TYPES::VoteTokenType>, /// Map of all ommit signatures accumlated so far - pub commit_vote_outcomes: VoteMap, + pub commit_vote_outcomes: VoteMap>, TYPES::VoteTokenType>, /// Map of all finalize signatures accumlated so far - pub finalize_vote_outcomes: VoteMap, + pub finalize_vote_outcomes: VoteMap>, TYPES::VoteTokenType>, /// A quorum's worth of stake, generally 2f + 1 pub success_threshold: NonZeroU64, @@ -796,20 +180,16 @@ pub struct ViewSyncVoteAccumulator< pub sig_lists: Vec<::PureAssembledSignatureType>, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check pub signers: BitVec, - /// Phantom data since we want the accumulator to be attached to a single `VoteType` - pub phantom: PhantomData, } -impl< - TYPES: NodeType, - COMMITMENT: CommitmentBounds + Clone + Copy + PartialEq + Eq + Hash, - VOTE: VoteType, - > Accumulator for ViewSyncVoteAccumulator -{ +impl ViewSyncVoteAccumulator { + /// append a vote + /// # Panics + /// if serialize fails #[allow(clippy::too_many_lines)] - fn append( + pub fn append( mut self, - vote: VOTE, + vote: &ViewSyncVote, vote_node_id: usize, stake_table_entries: Vec<::StakeTableEntry>, ) -> Either> { From d49d6be83aa2cb24b33bf1b1972e5573b096cba6 Mon Sep 17 00:00:00 2001 From: MRain Date: Tue, 7 Nov 2023 11:09:01 -0500 Subject: [PATCH 0328/1393] remove redundant clone --- hotshot-stake-table/src/mt_based.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index e1318ebd40..ecdb15776c 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -54,7 +54,7 @@ impl StakeTableScheme for StakeTable { &new_key, amount, )?; - self.mapping.insert(new_key.clone(), pos); + self.mapping.insert(new_key, pos); Ok(()) } } From 1a944732ef1866773e79c068e1031ded9915bb15 Mon Sep 17 00:00:00 2001 From: MRain Date: Tue, 7 Nov 2023 13:24:46 -0500 Subject: [PATCH 0329/1393] addressing comments --- hotshot-stake-table/src/vec_based.rs | 37 +++++++++++++++------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 10ee587da5..427d09401e 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -118,10 +118,9 @@ where fn commitment(&self, version: SnapshotVersion) -> Result { match version { // IMPORTANT: we don't support committing the head version b/c it's not finalized. - SnapshotVersion::Head => Err(StakeTableError::SnapshotUnsupported), SnapshotVersion::EpochStart => Ok(self.epoch_start_comm), SnapshotVersion::LastEpochStart => Ok(self.last_epoch_start_comm), - SnapshotVersion::BlockNum(_) => Err(StakeTableError::SnapshotUnsupported), + _ => Err(StakeTableError::SnapshotUnsupported), } } @@ -241,15 +240,17 @@ where { /// Initiating an empty stake table. pub fn new() -> Self { - let to_be_hashed = vec![F::default(); STAKE_TABLE_CAPACITY * >::SIZE]; + let bls_comm_preimage = + vec![F::default(); STAKE_TABLE_CAPACITY * >::SIZE]; let default_bls_comm = - VariableLengthRescueCRHF::::evaluate(&to_be_hashed).unwrap()[0]; - let to_be_hashed = vec![F::default(); STAKE_TABLE_CAPACITY * >::SIZE]; + VariableLengthRescueCRHF::::evaluate(&bls_comm_preimage).unwrap()[0]; + let schnorr_comm_preimage = + vec![F::default(); STAKE_TABLE_CAPACITY * >::SIZE]; let default_schnorr_comm = - VariableLengthRescueCRHF::::evaluate(&to_be_hashed).unwrap()[0]; - let to_be_hashed = vec![F::default(); STAKE_TABLE_CAPACITY]; + VariableLengthRescueCRHF::::evaluate(&schnorr_comm_preimage).unwrap()[0]; + let stake_comm_preimage = vec![F::default(); STAKE_TABLE_CAPACITY]; let default_stake_comm = - VariableLengthRescueCRHF::::evaluate(&to_be_hashed).unwrap()[0]; + VariableLengthRescueCRHF::::evaluate(&stake_comm_preimage).unwrap()[0]; let default_comm = (default_bls_comm, default_schnorr_comm, default_stake_comm); Self { head: StakeTableSnapshot::default(), @@ -293,42 +294,44 @@ where /// Helper function to recompute the stake table commitment for head version fn compute_head_comm(&mut self) -> (F, F, F) { // Compute rescue hash for bls keys - let mut to_be_hashed = self + let mut bls_comm_preimage = self .head .bls_keys .iter() .map(|key| key.to_fields()) .collect::>() .concat(); - to_be_hashed.resize( + bls_comm_preimage.resize( STAKE_TABLE_CAPACITY * >::SIZE, F::default(), ); - let bls_comm = VariableLengthRescueCRHF::::evaluate(to_be_hashed).unwrap()[0]; + let bls_comm = VariableLengthRescueCRHF::::evaluate(bls_comm_preimage).unwrap()[0]; // Compute rescue hash for Schnorr keys - let mut to_be_hashed = self + let mut schnorr_comm_preimage = self .head .schnorr_keys .iter() .map(|key| key.to_fields()) .collect::>() .concat(); - to_be_hashed.resize( + schnorr_comm_preimage.resize( STAKE_TABLE_CAPACITY * >::SIZE, F::default(), ); - let schnorr_comm = VariableLengthRescueCRHF::::evaluate(to_be_hashed).unwrap()[0]; + let schnorr_comm = + VariableLengthRescueCRHF::::evaluate(schnorr_comm_preimage).unwrap()[0]; // Compute rescue hash for stake amounts - let mut to_be_hashed = self + let mut stake_comm_preimage = self .head .stake_amount .iter() .map(|x| u256_to_field(x)) .collect::>(); - to_be_hashed.resize(STAKE_TABLE_CAPACITY, F::default()); - let stake_comm = VariableLengthRescueCRHF::::evaluate(to_be_hashed).unwrap()[0]; + stake_comm_preimage.resize(STAKE_TABLE_CAPACITY, F::default()); + let stake_comm = + VariableLengthRescueCRHF::::evaluate(stake_comm_preimage).unwrap()[0]; (bls_comm, schnorr_comm, stake_comm) } From a5c370d4765fb531e99a17380fdffcee5c56b58f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 7 Nov 2023 16:43:17 -0500 Subject: [PATCH 0330/1393] remove old todo --- task-impls/src/network.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 17b75271aa..f13a510fd7 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -236,8 +236,7 @@ impl< CommitteeConsensusMessage::VidVote(vote.clone()), ))), TransmitType::Direct, - Some(membership.get_leader(vote.get_view_number())), // TODO who is VID leader? https://github.com/EspressoSystems/HotShot/issues/1699 - ), + Some(membership.get_leader(vote.get_view_number())), HotShotEvent::DAVoteSend(vote) => ( vote.get_signing_key(), MessageKind::::from_consensus_message(SequencingMessage(Right( From 0c3782881b0e731b7121678d0c7e4a26810cd5c9 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 8 Nov 2023 10:22:30 -0500 Subject: [PATCH 0331/1393] fix build --- task-impls/src/network.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index f13a510fd7..8386c1e442 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -237,6 +237,7 @@ impl< ))), TransmitType::Direct, Some(membership.get_leader(vote.get_view_number())), + ), HotShotEvent::DAVoteSend(vote) => ( vote.get_signing_key(), MessageKind::::from_consensus_message(SequencingMessage(Right( From 04aefe94f15b3e234cf0abbf102f09ceb81bee5e Mon Sep 17 00:00:00 2001 From: MRain Date: Wed, 8 Nov 2023 11:08:11 -0500 Subject: [PATCH 0332/1393] move `capacity` out of traits --- hotshot-stake-table/src/config.rs | 4 ++++ hotshot-stake-table/src/lib.rs | 1 + hotshot-stake-table/src/vec_based.rs | 9 +++++---- types/src/traits/stake_table.rs | 3 --- 4 files changed, 10 insertions(+), 7 deletions(-) create mode 100644 hotshot-stake-table/src/config.rs diff --git a/hotshot-stake-table/src/config.rs b/hotshot-stake-table/src/config.rs new file mode 100644 index 0000000000..41c27d73b9 --- /dev/null +++ b/hotshot-stake-table/src/config.rs @@ -0,0 +1,4 @@ +//! Configuration file for stake table + +/// Capacity of a stake table +pub const STAKE_TABLE_CAPACITY: usize = 1000; diff --git a/hotshot-stake-table/src/lib.rs b/hotshot-stake-table/src/lib.rs index fecb3a06d9..e2a6e80d36 100644 --- a/hotshot-stake-table/src/lib.rs +++ b/hotshot-stake-table/src/lib.rs @@ -2,6 +2,7 @@ #![deny(warnings)] #![deny(missing_docs)] +pub mod config; pub mod mt_based; pub mod utils; pub mod vec_based; diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 427d09401e..da3175bde4 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -1,12 +1,13 @@ //! A vector based stake table implementation. The commitment is the rescue hash of the list of (key, amount) pairs; -use crate::utils::{u256_to_field, ToFields}; +use crate::{ + config::STAKE_TABLE_CAPACITY, + utils::{u256_to_field, ToFields}, +}; use ark_std::{collections::HashMap, hash::Hash, rand::SeedableRng}; use digest::crypto_common::rand_core::CryptoRngCore; use ethereum_types::{U256, U512}; -use hotshot_types::traits::stake_table::{ - SnapshotVersion, StakeTableError, StakeTableScheme, STAKE_TABLE_CAPACITY, -}; +use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; use jf_primitives::{ crhf::{VariableLengthRescueCRHF, CRHF}, rescue::RescueParameter, diff --git a/types/src/traits/stake_table.rs b/types/src/traits/stake_table.rs index b7d608ba36..4da7f374cc 100644 --- a/types/src/traits/stake_table.rs +++ b/types/src/traits/stake_table.rs @@ -5,9 +5,6 @@ use digest::crypto_common::rand_core::CryptoRngCore; use displaydoc::Display; use jf_primitives::errors::PrimitivesError; -/// Capacity of a stake table -pub const STAKE_TABLE_CAPACITY: usize = 1000; - /// Snapshots of the stake table pub enum SnapshotVersion { /// the latest "Head" where all new changes are applied to From 152e0e3408f21b620d4e64a16d03999bac4899e9 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 8 Nov 2023 23:49:33 -0800 Subject: [PATCH 0333/1393] Update mod.rs for larger timeout duration --- hotshot/src/tasks/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index bbabfb1ca8..b89a188c0f 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -575,7 +575,7 @@ where num_timeouts_tracked: 0, replica_task_map: HashMap::default(), relay_task_map: HashMap::default(), - view_sync_timeout: Duration::new(5, 0), + view_sync_timeout: Duration::new(10, 0), id: handle.hotshot.inner.id, last_garbage_collected_view: TYPES::Time::new(0), }; From 1760133ac79f46dfbf4dedfac68502f8aafd2fde Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 9 Nov 2023 00:33:43 -0800 Subject: [PATCH 0334/1393] Revert "Update mod.rs for larger timeout duration" This reverts commit 0ca0a4f552e54e84bcd7b14334a8f7390a4e65a8. --- hotshot/src/tasks/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b89a188c0f..bbabfb1ca8 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -575,7 +575,7 @@ where num_timeouts_tracked: 0, replica_task_map: HashMap::default(), relay_task_map: HashMap::default(), - view_sync_timeout: Duration::new(10, 0), + view_sync_timeout: Duration::new(5, 0), id: handle.hotshot.inner.id, last_garbage_collected_view: TYPES::Time::new(0), }; From eb5c7cda84b43ecb9254bbc1e5c8d2fee0f96b56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Nov 2023 10:10:45 -0500 Subject: [PATCH 0335/1393] Bump libp2p-noise from 0.43.2 to 0.44.0 (#2027) Bumps [libp2p-noise](https://github.com/libp2p/rust-libp2p) from 0.43.2 to 0.44.0. - [Release notes](https://github.com/libp2p/rust-libp2p/releases) - [Changelog](https://github.com/libp2p/rust-libp2p/blob/master/CHANGELOG.md) - [Commits](https://github.com/libp2p/rust-libp2p/compare/libp2p-noise-0.43.2...libp2p-noise-v0.44.0) --- updated-dependencies: - dependency-name: libp2p-noise dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- libp2p-networking/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index c9dd3b56f2..ed86459d3d 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -38,7 +38,7 @@ hotshot-constants = { path = "../constants" } hotshot-utils = { path = "../utils" } libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } -libp2p-noise = { version = "0.43.2", default-features = false } +libp2p-noise = { version = "0.44.0", default-features = false } parking_lot = "0.12.1" rand = { workspace = true } serde = { workspace = true } From 1987940be800eea1f521bc41d5dcdff6de01148f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 9 Nov 2023 10:12:39 -0500 Subject: [PATCH 0336/1393] remove empty file --- hotshot/src/certificate.rs | 1 - hotshot/src/lib.rs | 2 -- 2 files changed, 3 deletions(-) delete mode 100644 hotshot/src/certificate.rs diff --git a/hotshot/src/certificate.rs b/hotshot/src/certificate.rs deleted file mode 100644 index 8b13789179..0000000000 --- a/hotshot/src/certificate.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 8318217f42..bb7b491b99 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -19,8 +19,6 @@ #[cfg(feature = "docs")] pub mod documentation; -/// Contains structures and functions for committee election -pub mod certificate; #[cfg(feature = "demo")] pub mod demo; /// Contains traits consumed by [`HotShot`] From 463aba909f3a17edd77a634c0d8aaf12e9516a14 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Nov 2023 10:36:47 -0500 Subject: [PATCH 0337/1393] Bump dyn-clone from 1.0.14 to 1.0.16 (#1976) Bumps [dyn-clone](https://github.com/dtolnay/dyn-clone) from 1.0.14 to 1.0.16. - [Release notes](https://github.com/dtolnay/dyn-clone/releases) - [Commits](https://github.com/dtolnay/dyn-clone/compare/cee99471c46f9f512640aa03c680a547ac72c22c...f2f0a02f1f7190048153e5ea8f554db7377a50a9) --- updated-dependencies: - dependency-name: dyn-clone dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- hotshot/Cargo.toml | 2 +- types/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index c89e0df218..f85eafdb5c 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -110,7 +110,7 @@ serde = { workspace = true, features = ["rc"] } snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } -dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.14" } +dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } tracing = { workspace = true } typenum = { workspace = true } diff --git a/types/Cargo.toml b/types/Cargo.toml index ab3be882eb..3b63fabaad 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -50,7 +50,7 @@ time = { workspace = true } tracing = { workspace = true } ethereum-types = { workspace = true } typenum = { workspace = true } -dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.14" } +dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } [dev-dependencies] serde_json = "1.0.108" From 17b8a8350d9617bde896151608da23642744b863 Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Thu, 9 Nov 2023 12:28:52 -0500 Subject: [PATCH 0338/1393] threshold checking for hotshot state prover (#1901) * threshold checking circuit * change some variables to public * comments & tests(TBD) * doing test * test * Adding back the bls_comm * small refactor for test * use `STAKE_TABLE_CAPACITY` * Move state definition to another crate * Formatting * merge imports * augment test & comments * Addressing comments * duplicated items --- hotshot-state-prover/Cargo.toml | 35 +++++ hotshot-state-prover/src/circuit.rs | 234 ++++++++++++++++++++++++++++ hotshot-state-prover/src/lib.rs | 3 + types/Cargo.toml | 14 +- types/src/traits/stake_table.rs | 7 + types/src/traits/state.rs | 16 ++ 6 files changed, 303 insertions(+), 6 deletions(-) create mode 100644 hotshot-state-prover/Cargo.toml create mode 100644 hotshot-state-prover/src/circuit.rs create mode 100644 hotshot-state-prover/src/lib.rs diff --git a/hotshot-state-prover/Cargo.toml b/hotshot-state-prover/Cargo.toml new file mode 100644 index 0000000000..8b2ecfa928 --- /dev/null +++ b/hotshot-state-prover/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "hotshot-state-prover" +description = "Generate state update proof for HotShot light client" +version = { workspace = true } +authors = { workspace = true } +edition = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +ark-bn254 = "0.4.0" +ark-ec = "0.4.0" +ark-ed-on-bn254 = "0.4.0" +ark-ff = "0.4.0" +ark-serialize = { workspace = true } +ark-std = { workspace = true } +bincode = { workspace = true } +bitvec = { workspace = true } +digest = { workspace = true } +displaydoc = { version = "0.2.3", default-features = false } +ethereum-types = { workspace = true } +generic-array = "0.14.7" +hotshot-types = { path = "../types" } +jf-plonk = { workspace = true } +jf-primitives = { workspace = true } +jf-relation = { workspace = true } +jf-utils = { workspace = true } +serde = { workspace = true, features = ["rc"] } +tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag = "0.3.0" } +typenum = { workspace = true } +hotshot-stake-table = { path = "../hotshot-stake-table" } + +[features] +default = ["parallel"] +std = ["ark-std/std", "ark-serialize/std", "ark-ff/std"] +parallel = ["jf-primitives/parallel", "jf-utils/parallel", "ark-ff/parallel"] diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs new file mode 100644 index 0000000000..b96284d595 --- /dev/null +++ b/hotshot-state-prover/src/circuit.rs @@ -0,0 +1,234 @@ +//! Circuit implementation for verifying light client state update + +use std::marker::PhantomData; + +use ark_ec::twisted_edwards::TECurveConfig; +use ark_ff::PrimeField; +use ethereum_types::U256; +use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; +use hotshot_types::traits::{ + stake_table::{SnapshotVersion, StakeTableScheme}, + state::LightClientState, +}; +use jf_plonk::errors::PlonkError; +use jf_primitives::{ + circuit::signature::schnorr::VerKeyVar, + rescue::RescueParameter, + signatures::{ + bls_over_bn254::VerKey as BLSVerKey, + schnorr::{Signature, VerKey as SchnorrVerKey}, + }, +}; +use jf_relation::{ + errors::CircuitError, gadgets::ecc::TEPoint, BoolVar, Circuit, PlonkCircuit, Variable, +}; + +/// Lossy conversion of a U256 into a field element. +pub(crate) fn u256_to_field(v: &U256) -> F { + let mut bytes = vec![0u8; 32]; + v.to_little_endian(&mut bytes); + F::from_le_bytes_mod_order(&bytes) +} + +/// Variable for stake table entry +#[derive(Clone, Debug)] +pub struct StakeTableEntryVar { + pub schnorr_ver_key: VerKeyVar, + pub stake_amount: Variable, +} + +/// HotShot state Variable +/// The stake table commitment is a triple (bls_keys_comm, stake_amount_comm, schnorr_keys_comm). +#[derive(Clone, Debug)] +pub struct LightClientStateVar { + pub view_number_var: Variable, + pub block_height_var: Variable, + pub block_comm_var: Variable, + pub fee_ledger_comm_var: Variable, + pub stake_table_comm_var: (Variable, Variable, Variable), +} + +#[derive(Clone, Debug)] +pub struct StateUpdateBuilder(PhantomData); + +impl StateUpdateBuilder +where + F: RescueParameter, +{ + /// A function that takes as input: + /// - stake table entries (`Vec<(BLSVerKey, Amount, SchnorrVerKey)>`) + /// - schnorr signatures of the updated states (`Vec`) + /// - updated hotshot state (`(view_number, block_height, block_comm, fee_ledger_comm, stake_table_comm)`) + /// - signer bit vector + /// - quorum threshold + /// checks that + /// - the signer's accumulated weight exceeds the quorum threshold + /// - the commitment of the stake table + /// - all schnorr signatures are valid + pub fn build( + stake_table: &ST, + _sigs: &[Signature

], + _hotshot_state: &LightClientState, + signer_bit_vec: &[bool], + threshold: &U256, + ) -> Result<(PlonkCircuit, Vec), PlonkError> + where + ST: StakeTableScheme>, + P: TECurveConfig, + { + let mut circuit = PlonkCircuit::new_turbo_plonk(); + + // Dummy circuit implementation, fill in the details later + // TODO(Chengyu): + // - [DONE] the signer's accumulated weight exceeds the quorum threshold + // - The commitment of the stake table as [https://www.notion.so/espressosys/Light-Client-Contract-a416ebbfa9f342d79fccbf90de9706ef?pvs=4#6c0e26d753cd42e9bb0f22db1c519f45] + // - Batch Schnorr signature verification + + // creating variables for stake table entries + let mut stake_table_var = stake_table + .try_iter(SnapshotVersion::LastEpochStart)? + .map(|(_bls_ver_key, amount, schnorr_ver_key)| { + let schnorr_ver_key = + VerKeyVar(circuit.create_point_variable(schnorr_ver_key.to_affine().into())?); + let stake_amount = circuit.create_variable(u256_to_field::(&amount))?; + Ok(StakeTableEntryVar { + schnorr_ver_key, + stake_amount, + }) + }) + .collect::, CircuitError>>()?; + let dummy_ver_key_var = + VerKeyVar(circuit.create_constant_point_variable(TEPoint::default())?); + stake_table_var.resize( + STAKE_TABLE_CAPACITY, + StakeTableEntryVar { + schnorr_ver_key: dummy_ver_key_var, + stake_amount: 0, + }, + ); + + let mut signer_bit_vec_var = signer_bit_vec + .iter() + .map(|&b| circuit.create_boolean_variable(b)) + .collect::, CircuitError>>()?; + signer_bit_vec_var.resize(STAKE_TABLE_CAPACITY, BoolVar(circuit.zero())); + + let threshold = u256_to_field::(threshold); + let threshold_var = circuit.create_public_variable(threshold)?; + + // TODO(Chengyu): put in the hotshot state + let public_inputs = vec![threshold]; + + // Checking whether the accumulated weight exceeds the quorum threshold + let mut signed_amount_var = (0..STAKE_TABLE_CAPACITY / 2) + .map(|i| { + circuit.mul_add( + &[ + stake_table_var[2 * i].stake_amount, + signer_bit_vec_var[2 * i].0, + stake_table_var[2 * i + 1].stake_amount, + signer_bit_vec_var[2 * i + 1].0, + ], + &[F::one(), F::one()], + ) + }) + .collect::, CircuitError>>()?; + // Adding the last if STAKE_TABLE_CAPACITY is not a multiple of 2 + if STAKE_TABLE_CAPACITY % 2 == 1 { + signed_amount_var.push(circuit.mul( + stake_table_var[STAKE_TABLE_CAPACITY - 1].stake_amount, + signer_bit_vec_var[STAKE_TABLE_CAPACITY - 1].0, + )?); + } + let acc_amount_var = circuit.sum(&signed_amount_var)?; + circuit.enforce_leq(threshold_var, acc_amount_var)?; + + // circuit.mul_add(wires_in, q_muls) + circuit.finalize_for_arithmetization()?; + Ok((circuit, public_inputs)) + } +} + +#[cfg(test)] +mod tests { + use super::{LightClientState, StateUpdateBuilder}; + use ark_ed_on_bn254::EdwardsConfig as Config; + use ethereum_types::U256; + use hotshot_stake_table::vec_based::StakeTable; + use hotshot_types::traits::stake_table::StakeTableScheme; + use jf_primitives::signatures::{ + bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey as BLSVerKey}, + SchnorrSignatureScheme, SignatureScheme, + }; + use jf_relation::Circuit; + + type F = ark_ed_on_bn254::Fq; + type SchnorrVerKey = jf_primitives::signatures::schnorr::VerKey; + + fn key_pairs_for_testing() -> Vec<(BLSVerKey, SchnorrVerKey)> { + let mut prng = jf_utils::test_rng(); + (0..10) + .map(|_| { + ( + BLSOverBN254CurveSignatureScheme::key_gen(&(), &mut prng) + .unwrap() + .1, + SchnorrSignatureScheme::key_gen(&(), &mut prng).unwrap().1, + ) + }) + .collect::>() + } + + fn stake_table_for_testing( + keys: &[(BLSVerKey, SchnorrVerKey)], + ) -> StakeTable { + let mut st = StakeTable::::new(); + // Registering keys + keys.iter().enumerate().for_each(|(i, key)| { + st.register(key.0, U256::from((i + 1) as u32), key.1.clone()) + .unwrap() + }); + // Freeze the stake table + st.advance(); + st.advance(); + st + } + + #[test] + fn test_circuit_building() { + let keys = key_pairs_for_testing(); + let st = stake_table_for_testing(&keys); + + // bit vector with total weight 26 + let bit_vec = [ + true, true, true, false, true, true, false, false, true, false, + ]; + // good path + let (circuit, public_inputs) = StateUpdateBuilder::::build( + &st, + &[], + &LightClientState::default(), + &bit_vec, + &U256::from(25u32), + ) + .unwrap(); + assert!(circuit.check_circuit_satisfiability(&public_inputs).is_ok()); + + // bad path: total weight doesn't meet the threshold + // bit vector with total weight 23 + let bad_bit_vec = [ + true, true, true, true, true, false, false, true, false, false, + ]; + let (bad_circuit, public_inputs) = StateUpdateBuilder::::build( + &st, + &[], + &LightClientState::default(), + &bad_bit_vec, + &U256::from(25u32), + ) + .unwrap(); + assert!(bad_circuit + .check_circuit_satisfiability(&public_inputs) + .is_err()); + } +} diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs new file mode 100644 index 0000000000..13feae9401 --- /dev/null +++ b/hotshot-state-prover/src/lib.rs @@ -0,0 +1,3 @@ +//! SNARK-assisted light client state update verification in HotShot + +pub mod circuit; diff --git a/types/Cargo.toml b/types/Cargo.toml index 3b63fabaad..601b03b8f5 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -13,13 +13,14 @@ demo = [] [dependencies] arbitrary = { version = "1.3", features = ["derive"] } ark-bls12-381 = { workspace = true } -async-compatibility-layer = { workspace = true } -async-lock = { workspace = true } -async-trait = { workspace = true } +ark-ff = "0.4.0" ark-serialize = { version = "0.3", features = [ "derive", ] } # TODO upgrade to 0.4 and inherit this dep from workspace https://github.com/EspressoSystems/HotShot/issues/1700 ark-std = { workspace = true } +async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } +async-trait = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } blake3 = { workspace = true } @@ -28,14 +29,17 @@ custom_debug = { workspace = true } derivative = "2.2.0" digest = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } +dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } either = { workspace = true, features = ["serde"] } espresso-systems-common = { workspace = true } +ethereum-types = { workspace = true } futures = { workspace = true } generic-array = { workspace = true } hex_fmt = "0.3.0" hotshot-constants = { path = "../constants" } -hotshot-utils = { path = "../utils" } hotshot-task = { path = "../task", default-features = false } +hotshot-utils = { path = "../utils" } +jf-plonk = { workspace = true } jf-primitives = { workspace = true, features = ["test-srs"] } jf-utils = { workspace = true } libp2p-networking = { workspace = true } @@ -48,9 +52,7 @@ snafu = { workspace = true } tagged-base64 = { git = "https://github.com/EspressoSystems/tagged-base64", tag = "0.2.4" } time = { workspace = true } tracing = { workspace = true } -ethereum-types = { workspace = true } typenum = { workspace = true } -dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } [dev-dependencies] serde_json = "1.0.108" diff --git a/types/src/traits/stake_table.rs b/types/src/traits/stake_table.rs index 4da7f374cc..598a662650 100644 --- a/types/src/traits/stake_table.rs +++ b/types/src/traits/stake_table.rs @@ -3,6 +3,7 @@ use ark_std::{rand::SeedableRng, string::ToString, vec::Vec}; use digest::crypto_common::rand_core::CryptoRngCore; use displaydoc::Display; +use jf_plonk::errors::PlonkError; use jf_primitives::errors::PrimitivesError; /// Snapshots of the stake table @@ -226,3 +227,9 @@ impl From for PrimitivesError { Self::ParameterError(value.to_string()) } } + +impl From for PlonkError { + fn from(value: StakeTableError) -> Self { + Self::PrimitiveError(PrimitivesError::ParameterError(value.to_string())) + } +} diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 4899e9976a..6a845088f2 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -4,6 +4,7 @@ //! network state, which is modified by the transactions contained within blocks. use crate::traits::BlockPayload; +use ark_ff::PrimeField; use commit::Committable; use espresso_systems_common::hotshot::tag; use serde::{de::DeserializeOwned, Serialize}; @@ -218,3 +219,18 @@ pub mod dummy { } } } + +/// A serialized consensus state for proof generation +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)] +pub struct LightClientState { + /// Current view number + pub view_number: usize, + /// Current block height + pub block_height: usize, + /// Block commitment + pub block_comm: F, + /// Commitment for fee ledger + pub fee_ledger_comm: F, + /// Commitment for the stake table + pub stake_table_comm: (F, F, F), +} From e21669ad37a1068da287932663f50362de35f271 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 9 Nov 2023 16:02:55 -0800 Subject: [PATCH 0339/1393] Associate payload with metadata, fix txn flattening --- hotshot/src/demo.rs | 6 +- task-impls/src/consensus.rs | 4 +- task-impls/src/transactions.rs | 35 +++------ testing/src/task_helpers.rs | 2 +- testing/tests/da_task.rs | 16 ++--- testing/tests/network_task.rs | 7 +- testing/tests/vid_task.rs | 7 +- types/src/block_impl.rs | 110 ++++++++++++++++++++++------- types/src/traits/block_contents.rs | 24 +++++-- 9 files changed, 130 insertions(+), 81 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index a558922ec5..d682857474 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -11,7 +11,7 @@ use derivative::Derivative; use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ - block_impl::{BlockPayloadError, VIDBlockHeader, VIDBlockPayload, VIDTransaction}, + block_impl::{BlockError, VIDBlockHeader, VIDBlockPayload, VIDTransaction}, data::{fake_commitment, ViewNumber}, traits::{ election::Membership, @@ -60,7 +60,7 @@ impl Default for DemoState { } impl State for DemoState { - type Error = BlockPayloadError; + type Error = BlockError; type BlockHeader = VIDBlockHeader; @@ -88,7 +88,7 @@ impl State for DemoState { view_number: &Self::Time, ) -> Result { if !self.validate_block(block_header, view_number) { - return Err(BlockPayloadError::InvalidBlock); + return Err(BlockError::InvalidBlockHeader); } Ok(DemoState { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 803b952a14..33a01227b0 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1355,7 +1355,7 @@ where view_number: view, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), - block_header: TYPES::BlockHeader::new(*payload_commitment, &parent_header), + block_header: TYPES::BlockHeader::new(*payload_commitment, (), &parent_header), block_payload: None, rejected: vec![], timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), @@ -1367,7 +1367,7 @@ where .sign_validating_or_commitment_proposal::(&leaf.commit()); // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. let proposal = QuorumProposal { - block_header: TYPES::BlockHeader::new(*payload_commitment, &parent_header), + block_header: TYPES::BlockHeader::new(*payload_commitment, (), &parent_header), view_number: leaf.view_number, justify_qc: consensus.high_qc.clone(), timeout_certificate: timeout_certificate.or_else(|| None), diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 2fe424657c..7e4f8af709 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -13,10 +13,10 @@ use hotshot_task::{ task_impls::HSTWithEvent, }; use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction}, + block_impl::{VIDBlockPayload, VIDTransaction, NUM_CHUNKS, NUM_STORAGE_NODES}, certificate::QuorumCertificate, consensus::Consensus, - data::{Leaf, VidDisperse, VidScheme, VidSchemeTrait}, + data::{test_srs, Leaf, VidDisperse, VidScheme, VidSchemeTrait}, message::{Message, Proposal, SequencingMessage}, traits::{ consensus_api::ConsensusApi, @@ -228,30 +228,15 @@ where // TODO (Keyao) Determine whether to allow empty blocks. // let txns = self.wait_for_transactions(parent_leaf).await?; + let encoded_txns = VIDTransaction::encode(txns.clone()); + // TODO + let srs = test_srs(NUM_STORAGE_NODES); + // TODO We are using constant numbers for now, but they will change as the quorum size + // changes. + // TODO + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + let vid_disperse = vid.disperse(encoded_txns.clone()).unwrap(); - // TODO move all VID stuff to a new VID task - // details here: https://github.com/EspressoSystems/HotShot/issues/1817#issuecomment-1747143528 - let num_storage_nodes = 8; - debug!("Prepare VID shares for {} storage nodes", num_storage_nodes); - - // TODO Secure SRS for VID - // https://github.com/EspressoSystems/HotShot/issues/1686 - let srs = hotshot_types::data::test_srs(num_storage_nodes); - - // TODO proper source for VID erasure code rate - // https://github.com/EspressoSystems/HotShot/issues/1734 - let num_chunks = 8; - - let vid = VidScheme::new(num_chunks, num_storage_nodes, &srs).unwrap(); - - // TODO Wasteful flattening of tx bytes to accommodate VID API - // https://github.com/EspressoSystems/jellyfish/issues/375 - let mut txns_flatten = Vec::new(); - for txn in &txns { - txns_flatten.extend(txn.0.clone()); - } - - let vid_disperse = vid.disperse(&txns_flatten).unwrap(); let block = VIDBlockPayload { transactions: txns, payload_commitment: vid_disperse.commit, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 4ba8243908..bd69bbf47b 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -116,7 +116,7 @@ async fn build_quorum_proposal_and_signature( // every event input is seen on the event stream in the output. let block = ::genesis(); let payload_commitment = block.commit(); - let block_header = VIDBlockHeader::new(payload_commitment, &parent_header); + let block_header = VIDBlockHeader::new(payload_commitment, (), &parent_header); let leaf = Leaf { view_number: ViewNumber::new(view), justify_qc: consensus.high_qc.clone(), diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index ff755286e0..79f04c7122 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,13 +1,10 @@ use commit::Committable; use hotshot::HotShotConsensusApi; use hotshot_task_impls::events::HotShotEvent; -use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - task_helpers::vid_init, -}; +use hotshot_testing::node_types::{MemoryImpl, TestTypes}; use hotshot_types::{ block_impl::VIDTransaction, - data::{DAProposal, VidSchemeTrait, ViewNumber}, + data::{DAProposal, ViewNumber}, simple_vote::{DAData, DAVote2}, traits::{ consensus_api::ConsensusSharedApi, election::ConsensusExchange, @@ -39,12 +36,11 @@ async fn test_da_task() { }; let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); - let vid = vid_init(); - let txn = vec![0u8]; - let vid_disperse = vid.disperse(&txn).unwrap(); - let payload_commitment = vid_disperse.commit; + let transactions = vec![VIDTransaction(vec![0])]; + let encoded_txns = vec![1, 0]; + let payload_commitment = VIDBlockPayload::vid_commitment(&encoded_txns); let block = VIDBlockPayload { - transactions: vec![VIDTransaction(txn)], + transactions, payload_commitment, }; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index e9409bc69b..ac1368863f 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -42,11 +42,12 @@ async fn test_network_task() { let pub_key = *api.public_key(); let priv_key = api.private_key(); let vid = vid_init(); - let txn = vec![0u8]; - let vid_disperse = vid.disperse(&txn).unwrap(); + let transactions = vec![VIDTransaction(vec![0])]; + let encoded_txns = vec![1, 0]; + let vid_disperse = vid.disperse(&encoded_txns).unwrap(); let payload_commitment = vid_disperse.commit; let block = VIDBlockPayload { - transactions: vec![VIDTransaction(txn)], + transactions, payload_commitment, }; let signature = committee_exchange.sign_da_proposal(&block.commit()); diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 3395bbbc43..64dc05cc83 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -38,11 +38,12 @@ async fn test_vid_task() { let pub_key = *api.public_key(); let vid = vid_init(); - let txn = vec![0u8]; - let vid_disperse = vid.disperse(&txn).unwrap(); + let transactions = vec![VIDTransaction(vec![0])]; + let encoded_txns = vec![1, 0]; + let vid_disperse = vid.disperse(&encoded_txns).unwrap(); let payload_commitment = vid_disperse.commit; let block = VIDBlockPayload { - transactions: vec![VIDTransaction(txn)], + transactions, payload_commitment, }; diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index af664791bd..375f917d70 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -25,10 +25,35 @@ pub const NUM_STORAGE_NODES: usize = 8; /// Number of chunks for VID initiation. pub const NUM_CHUNKS: usize = 8; +/// The error type for block and its transactions. +#[derive(Snafu, Debug)] +pub enum BlockError { + /// Invalid block header. + InvalidBlockHeader, +} + /// The transaction in a [`VIDBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct VIDTransaction(pub Vec); +impl VIDTransaction { + #[must_use] + /// Encode a list of transactions into bytes. + pub fn encode(transactions: Vec) -> Vec { + let mut encoded = Vec::new(); + + for txn in transactions { + // Encode the length of the inner transaction and the transaction bytes. + if let Ok(len) = txn.0.len().try_into() { + encoded.extend::>(vec![len]); + encoded.extend(txn.0); + } + } + + encoded + } +} + impl Committable for VIDTransaction { fn commit(&self) -> Commitment { let builder = commit::RawCommitmentBuilder::new("Txn Comm"); @@ -45,21 +70,6 @@ impl Committable for VIDTransaction { impl Transaction for VIDTransaction {} -/// The error type for block payload. -#[derive(Snafu, Debug)] -pub enum BlockPayloadError { - /// Previous state commitment does not match - PreviousStateMismatch, - /// Nonce was reused - ReusedTxn, - /// Genesis failure - GenesisFailed, - /// Genesis reencountered after initialization - GenesisAfterStart, - /// invalid block - InvalidBlock, -} - /// A [`BlockPayload`] that contains a list of `VIDTransaction`. #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct VIDBlockPayload { @@ -70,23 +80,31 @@ pub struct VIDBlockPayload { } impl VIDBlockPayload { - /// Create a genesis block payload with transaction bytes `vec![0]`, to be used for - /// consensus task initiation. - /// # Panics - /// If the `VidScheme` construction fails. #[must_use] - pub fn genesis() -> Self { + /// Compute the VID payload commitment. + /// # Panics + /// If the VID computation fails. + pub fn vid_commitment(encoded_transactions: &[u8]) -> ::Commit { // TODO let srs = test_srs(NUM_STORAGE_NODES); // TODO We are using constant numbers for now, but they will change as the quorum size // changes. // TODO let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - let txn = vec![0]; - let vid_disperse = vid.disperse(&txn).unwrap(); + vid.disperse(encoded_transactions.to_vec()).unwrap().commit + } + + /// Create a genesis block payload with transaction bytes `vec![0]`, to be used for + /// consensus task initiation. + /// # Panics + /// If the `VidScheme` construction fails. + #[must_use] + pub fn genesis() -> Self { + let txns: Vec = vec![0]; + let encoded = VIDTransaction::encode(vec![VIDTransaction(txns.clone())]); VIDBlockPayload { - transactions: vec![VIDTransaction(txn)], - payload_commitment: vid_disperse.commit, + transactions: vec![VIDTransaction(txns)], + payload_commitment: Self::vid_commitment(&encoded), } } } @@ -119,9 +137,43 @@ impl TestableBlock for VIDBlockPayload { } impl BlockPayload for VIDBlockPayload { - type Error = BlockPayloadError; - + type Error = BlockError; type Transaction = VIDTransaction; + type Metadata = (); + + fn build(transactions: impl IntoIterator) -> (Self, Self::Metadata) { + let txns_vec: Vec = transactions.into_iter().collect(); + let encoded = VIDTransaction::encode(txns_vec.clone()); + ( + Self { + transactions: txns_vec, + payload_commitment: Self::vid_commitment(&encoded), + }, + (), + ) + } + + fn encode(&self) -> Vec { + VIDTransaction::encode(self.transactions.clone()) + } + + fn decode(encoded_transactions: &[u8]) -> Self { + let mut transactions = Vec::new(); + let mut current_index = 0; + while current_index < encoded_transactions.len() { + let txn_len = encoded_transactions[current_index] as usize; + let next_index = current_index + txn_len; + transactions.push(VIDTransaction( + encoded_transactions[current_index..next_index].to_vec(), + )); + current_index = next_index; + } + + Self { + transactions, + payload_commitment: Self::vid_commitment(encoded_transactions), + } + } fn transaction_commitments(&self) -> HashSet> { self.transactions @@ -143,7 +195,11 @@ pub struct VIDBlockHeader { impl BlockHeader for VIDBlockHeader { type Payload = VIDBlockPayload; - fn new(payload_commitment: Commitment, parent_header: &Self) -> Self { + fn new( + payload_commitment: Commitment, + _metadata: ::Metadata, + parent_header: &Self, + ) -> Self { Self { block_number: parent_header.block_number + 1, payload_commitment, diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 0e753e082c..9ee9755a1a 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -13,16 +13,12 @@ use std::{ hash::Hash, }; -// TODO (Keyao) Determine whether we can refactor BlockPayload and Transaction from traits to structs. -// /// Abstraction over any type of transaction. Used by [`BlockPayload`]. pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash { } -// TODO (Keyao) Determine whether we can refactor BlockPayload and Transaction from traits to structs. -// /// Abstraction over the full contents of a block /// /// This trait encapsulates the behaviors that the transactions of a block must have in order to be @@ -50,7 +46,17 @@ pub trait BlockPayload: /// The type of the transitions we are applying type Transaction: Transaction; - // type Header: BlockHeader; + /// Data created during block building which feeds into the block header + type Metadata; + + /// Build a payload and associated metadata with the transactions. + fn build(transactions: impl IntoIterator) -> (Self, Self::Metadata); + + /// Encode the payload + fn encode(&self) -> Vec; + + /// Decode the payload + fn decode(encoded_transactions: &[u8]) -> Self; /// returns hashes of all the transactions in this block /// TODO make this ordered with a vec @@ -64,8 +70,12 @@ pub trait BlockHeader: /// Block payload associated with the commitment. type Payload: BlockPayload; - /// Build a header with the payload commitment and parent header. - fn new(payload_commitment: Commitment, parent_header: &Self) -> Self; + /// Build a header with the payload commitment, metadata, and parent header. + fn new( + payload_commitment: Commitment, + metadata: ::Metadata, + parent_header: &Self, + ) -> Self; /// Build a genesis header with the genesis payload. fn genesis(payload: Self::Payload) -> Self; From a6984b0c2d5c986174b23ddbccd93652498d0a77 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 9 Nov 2023 19:12:39 -0500 Subject: [PATCH 0340/1393] fix hanging votes (#2030) --- task-impls/src/consensus.rs | 70 ++++++++++++++++++------------------- task-impls/src/da.rs | 18 +++++----- task-impls/src/vid.rs | 17 ++++----- 3 files changed, 53 insertions(+), 52 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index efb43212cf..abcbfe43a0 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -978,27 +978,27 @@ where TYPES::Time::new(0) }; - // Todo check if we are the leader - let new_accumulator = VoteAccumulator2 { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.quorum_exchange.total_nodes()], - phantom: PhantomData, - }; + if vote.get_view_number() > collection_view { + // Todo check if we are the leader + let new_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), + sig_lists: Vec::new(), + signers: bitvec![0; self.quorum_exchange.total_nodes()], + phantom: PhantomData, + }; - let accumulator = - new_accumulator.accumulate(&vote, self.quorum_exchange.membership()); + let accumulator = + new_accumulator.accumulate(&vote, self.quorum_exchange.membership()); - // TODO Create default functions for accumulators - // https://github.com/EspressoSystems/HotShot/issues/1797 - let timeout_accumulator = VoteAccumulator2 { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.timeout_exchange.total_nodes()], - phantom: PhantomData, - }; + // TODO Create default functions for accumulators + // https://github.com/EspressoSystems/HotShot/issues/1797 + let timeout_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), + sig_lists: Vec::new(), + signers: bitvec![0; self.timeout_exchange.total_nodes()], + phantom: PhantomData, + }; - if vote.get_view_number() > collection_view { let state = VoteCollectionTaskState { quorum_exchange: self.quorum_exchange.clone(), timeout_exchange: self.timeout_exchange.clone(), @@ -1063,27 +1063,27 @@ where TYPES::Time::new(0) }; - // // Todo check if we are the leader - let new_accumulator = VoteAccumulator2 { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.timeout_exchange.total_nodes()], - phantom: PhantomData, - }; + if vote.get_view_number() > collection_view { + // Todo check if we are the leader + let new_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), + sig_lists: Vec::new(), + signers: bitvec![0; self.timeout_exchange.total_nodes()], + phantom: PhantomData, + }; - let timeout_accumulator = - new_accumulator.accumulate(&vote, self.quorum_exchange.membership()); + let timeout_accumulator = + new_accumulator.accumulate(&vote, self.quorum_exchange.membership()); - let quorum_accumulator = VoteAccumulator2 { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.quorum_exchange.total_nodes()], - phantom: PhantomData, - }; + let quorum_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), + sig_lists: Vec::new(), + signers: bitvec![0; self.quorum_exchange.total_nodes()], + phantom: PhantomData, + }; - // self.timeout_accumulator = accumulator; + // self.timeout_accumulator = accumulator; - if vote.get_view_number() > collection_view { let state = VoteCollectionTaskState { quorum_exchange: self.quorum_exchange.clone(), timeout_exchange: self.timeout_exchange.clone(), diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 0679540b29..b29dab50d5 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -299,17 +299,17 @@ where TYPES::Time::new(0) }; - let new_accumulator = VoteAccumulator2 { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.committee_exchange.total_nodes()], - phantom: PhantomData, - }; + if view > collection_view { + let new_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), + sig_lists: Vec::new(), + signers: bitvec![0; self.committee_exchange.total_nodes()], + phantom: PhantomData, + }; - let accumulator = - new_accumulator.accumulate(&vote, self.committee_exchange.membership()); + let accumulator = + new_accumulator.accumulate(&vote, self.committee_exchange.membership()); - if view > collection_view { let state = DAVoteCollectionTaskState { committee_exchange: self.committee_exchange.clone(), diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 56e829c33f..b92b50d8e8 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -224,16 +224,17 @@ where TYPES::Time::new(0) }; - let new_accumulator = VoteAccumulator2 { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.vid_exchange.total_nodes()], - phantom: PhantomData, - }; + if view > collection_view { + let new_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), + sig_lists: Vec::new(), + signers: bitvec![0; self.vid_exchange.total_nodes()], + phantom: PhantomData, + }; - let accumulator = new_accumulator.accumulate(&vote, self.vid_exchange.membership()); + let accumulator = + new_accumulator.accumulate(&vote, self.vid_exchange.membership()); - if view > collection_view { let state = VIDVoteCollectionTaskState { vid_exchange: self.vid_exchange.clone(), From 122f841f753d082e166a4915b7d0ec33b7fe3191 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 8 Nov 2023 19:33:27 -0800 Subject: [PATCH 0341/1393] update confusing timeout tracking message --- task-impls/src/view_sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 11cba581f0..ef5536dc4e 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -472,7 +472,7 @@ where self.num_timeouts_tracked += 1; error!( - "Num timeouts tracked is {}. View {} timed out", + "Num timeouts tracked since last view change is {}. View {} timed out", self.num_timeouts_tracked, *view_number ); From a6975c904efe363b3d5ef8f43a790f48faed9509 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 8 Nov 2023 19:34:13 -0800 Subject: [PATCH 0342/1393] update confusing timeout tracking message --- task-impls/src/view_sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index ef5536dc4e..68df3f5f9e 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -477,7 +477,7 @@ where ); if self.num_timeouts_tracked > 3 { - error!("Too many timeouts! This shouldn't happen"); + error!("Too many consecutive timeouts! This shouldn't happen"); } // TODO ED Make this a configurable variable From 592e8e94259d0e7f4710be2a7785d775b7fb01aa Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 9 Nov 2023 00:31:15 -0800 Subject: [PATCH 0343/1393] some tries for CI timeout issue --- hotshot/src/tasks/mod.rs | 2 +- task-impls/src/consensus.rs | 1 + testing/tests/web_server.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b8eba956f8..6f57378a14 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -540,7 +540,7 @@ where num_timeouts_tracked: 0, replica_task_map: HashMap::default(), relay_task_map: HashMap::default(), - view_sync_timeout: Duration::new(5, 0), + view_sync_timeout: Duration::new(10, 0), id: handle.hotshot.inner.id, last_garbage_collected_view: TYPES::Time::new(0), }; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index abcbfe43a0..8d047d5bf0 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -610,6 +610,7 @@ where // Nuance: We timeout on the view + 1 here because that means that we have // not seen evidence to transition to this new view let view_number = self.cur_view + 1; + error!("Timeout task spawned for view {}", *view_number); async move { async_sleep(Duration::from_millis(timeout)).await; stream diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index c815800919..664ebfbe87 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -28,7 +28,7 @@ async fn web_server_network() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_successful_views: 35, + num_successful_views: 30, ..Default::default() }, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( From 66d8c4f8918ccb2dc3fd0204b97d15e0a9b1524a Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 9 Nov 2023 20:12:39 -0800 Subject: [PATCH 0344/1393] Remove println error --- task-impls/src/consensus.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 8d047d5bf0..abcbfe43a0 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -610,7 +610,6 @@ where // Nuance: We timeout on the view + 1 here because that means that we have // not seen evidence to transition to this new view let view_number = self.cur_view + 1; - error!("Timeout task spawned for view {}", *view_number); async move { async_sleep(Duration::from_millis(timeout)).await; stream From 3acc9c39e220a4d409cc0641bcaa97569b029ba6 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 9 Nov 2023 20:22:44 -0800 Subject: [PATCH 0345/1393] re-change num_successful_views for web_server_network --- testing/tests/web_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index 664ebfbe87..c815800919 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -28,7 +28,7 @@ async fn web_server_network() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_successful_views: 30, + num_successful_views: 35, ..Default::default() }, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( From 7e853910b47a091c3ad9b28a899cc2001f99e622 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 10 Nov 2023 09:03:11 -0500 Subject: [PATCH 0346/1393] Removing single view sync certificate types in favor of 3 separate certificate types --- task-impls/src/events.rs | 10 ++++++++++ task-impls/src/view_sync.rs | 9 +-------- types/src/simple_certificate.rs | 10 +++++++++- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index b4be7769be..16a17a0169 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -7,6 +7,7 @@ use hotshot_types::{ message::Proposal, simple_certificate::{ DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, + ViewSyncPreCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncCommitCertificate2, }, simple_vote::{DAVote2, QuorumVote, TimeoutVote2, VIDVote2}, traits::node_implementation::{ @@ -61,7 +62,16 @@ pub enum HotShotEvent> { /// Receive a view sync vote from the network; received by a relay in the view sync task ViewSyncVoteRecv(ViewSyncVote), /// Receive a view sync certificate from the network; received by a replica in the view sync task + // TODO ED Remove this event in favor of separate events depending on which certificate type it is. ViewSyncCertificateRecv(Proposal>), + + /// Receive a `ViewSyncPreCommitCertificate2` from the network; received by a replica in the view sync task + ViewSyncPreCommitCertificate2Recv(ViewSyncPreCommitCertificate2), + /// Receive a `ViewSyncCommitCertificate2` from the network; received by a replica in the view sync task + ViewSyncCommitCertificate2Recv(ViewSyncCommitCertificate2), + /// Receive a `ViewSyncFinalizeCertificate2` from the network; received by a replica in the view sync task + ViewSyncFinalizeCertificate2Recv(ViewSyncFinalizeCertificate2), + /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only ViewSyncTrigger(TYPES::Time), /// A consensus view has timed out; emitted by a replica in the consensus task; received by the view sync task; internal event only diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 11cba581f0..267b14bee5 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -66,8 +66,7 @@ pub struct ViewSyncTaskState< ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, - Certificate = ViewSyncCertificate, + // TODO ED Remove this when exchanges is done, but we don't actually use this commitment type anymore. Commitment = Commitment>, >, { @@ -115,8 +114,6 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, - Certificate = ViewSyncCertificate, Commitment = Commitment>, >, { @@ -139,8 +136,6 @@ pub struct ViewSyncReplicaTaskState< ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, - Certificate = ViewSyncCertificate, Commitment = Commitment>, >, { @@ -182,8 +177,6 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, - Certificate = ViewSyncCertificate, Commitment = Commitment>, >, { diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 220683c7bc..a17a5c2a94 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -10,7 +10,7 @@ use commit::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; use crate::{ - simple_vote::{DAData, QuorumData, TimeoutData, VIDData, Voteable}, + simple_vote::{DAData, QuorumData, TimeoutData, VIDData, Voteable, ViewSyncPreCommitData, ViewSyncCommitData, ViewSyncFinalizeData}, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, @@ -134,3 +134,11 @@ pub type TimeoutCertificate2 = SimpleCertificate = SimpleCertificate::BlockPayload>>; + +// TODO ED Update this to use the correct threshold instead of the default `success_threshold` +/// Type alias for a ViewSyncPreCommit certificate over a view number +pub type ViewSyncPreCommitCertificate2 = SimpleCertificate>; +/// Type alias for a ViewSyncCommit certificate over a view number +pub type ViewSyncCommitCertificate2 = SimpleCertificate>; +/// Type alias for a ViewSyncFinalize certificate over a view number +pub type ViewSyncFinalizeCertificate2 = SimpleCertificate>; From 0266fc69eb69fdc9652ce0259a1a8c6059e2caac Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 10 Nov 2023 09:31:06 -0500 Subject: [PATCH 0347/1393] Naive replacement of old ViewSyncCertificate --- task-impls/src/events.rs | 11 ++++++++++- task-impls/src/view_sync.rs | 31 ++++++++++--------------------- 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 16a17a0169..b2e574180e 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -7,7 +7,7 @@ use hotshot_types::{ message::Proposal, simple_certificate::{ DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, - ViewSyncPreCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncCommitCertificate2, + ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{DAVote2, QuorumVote, TimeoutVote2, VIDVote2}, traits::node_implementation::{ @@ -54,6 +54,8 @@ pub enum HotShotEvent> { ViewSyncTimeout(TYPES::Time, u64, ViewSyncPhase), /// Send a view sync vote to the network; emitted by a replica in the view sync task ViewSyncVoteSend(ViewSyncVote), + + // TODO ED Remove this event /// Send a view sync certificate to the network; emitted by a relay in the view sync task ViewSyncCertificateSend( Proposal>, @@ -72,6 +74,13 @@ pub enum HotShotEvent> { /// Receive a `ViewSyncFinalizeCertificate2` from the network; received by a replica in the view sync task ViewSyncFinalizeCertificate2Recv(ViewSyncFinalizeCertificate2), + /// Send a `ViewSyncPreCommitCertificate2` from the network; emitted by a relay in the view sync task + ViewSyncPreCommitCertificate2Send(ViewSyncPreCommitCertificate2), + /// Send a `ViewSyncCommitCertificate2` from the network; emitted by a relay in the view sync task + ViewSyncCommitCertificate2Send(ViewSyncCommitCertificate2), + /// Send a `ViewSyncFinalizeCertificate2` from the network; emitted by a relay in the view sync task + ViewSyncFinalizeCertificate2Send(ViewSyncFinalizeCertificate2), + /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only ViewSyncTrigger(TYPES::Time), /// A consensus view has timed out; emitted by a replica in the consensus task; received by the view sync task; internal event only diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 267b14bee5..4dfe99e8fb 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -11,7 +11,7 @@ use hotshot_task::{ }; use hotshot_types::{ traits::{election::Membership, network::ConsensusIntentEvent}, - vote::ViewSyncVoteAccumulator, + vote::ViewSyncVoteAccumulator, vote2::HasViewNumber, }; use bitvec::prelude::*; @@ -32,7 +32,7 @@ use hotshot_types::{ }; use snafu::Snafu; use std::{collections::HashMap, sync::Arc, time::Duration}; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, instrument, info}; #[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] /// Phases of view sync pub enum ViewSyncPhase { @@ -247,31 +247,20 @@ where /// Handles incoming events for the main view sync task pub async fn handle_event(&mut self, event: HotShotEvent) { match &event { - HotShotEvent::ViewSyncCertificateRecv(message) => { - let (certificate_internal, last_seen_certificate) = match &message.data { - ViewSyncCertificate::PreCommit(certificate_internal) => { - (certificate_internal, ViewSyncPhase::PreCommit) - } - ViewSyncCertificate::Commit(certificate_internal) => { - (certificate_internal, ViewSyncPhase::Commit) - } - ViewSyncCertificate::Finalize(certificate_internal) => { - (certificate_internal, ViewSyncPhase::Finalize) - } - }; - error!( + HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { + info!( "Received view sync cert for phase {:?}", - last_seen_certificate + certificate ); // This certificate is old, we can throw it away // If next view = cert round, then that means we should already have a task running for it - if self.current_view > certificate_internal.round { + if self.current_view > certificate.get_view_number() { debug!("Already in a higher view than the view sync message"); return; } - if let Some(replica_task) = self.replica_task_map.get(&certificate_internal.round) { + if let Some(replica_task) = self.replica_task_map.get(&certificate.get_view_number()) { // Forward event then return debug!("Forwarding message"); self.event_stream @@ -283,8 +272,8 @@ where // We do not have a replica task already running, so start one let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { - current_view: certificate_internal.round, - next_view: certificate_internal.round, + current_view: certificate.get_view_number(), + next_view: certificate.get_view_number(), relay: 0, finalized: false, sent_view_change_event: false, @@ -328,7 +317,7 @@ where let event_stream_id = builder.get_stream_id().unwrap(); self.replica_task_map.insert( - certificate_internal.round, + certificate.get_view_number(), ViewSyncTaskInfo { event_stream_id }, ); From e6afabc576ea8f9583a5744a183b5ee32079c00f Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 10 Nov 2023 10:25:00 -0500 Subject: [PATCH 0348/1393] Update view sync vote data to include relay index --- task-impls/src/events.rs | 23 +++++++++++++-- task-impls/src/view_sync.rs | 57 ++++++++++++++----------------------- types/src/simple_vote.rs | 6 ++-- 3 files changed, 45 insertions(+), 41 deletions(-) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index b2e574180e..45ff234f86 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -9,10 +9,10 @@ use hotshot_types::{ DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, - simple_vote::{DAVote2, QuorumVote, TimeoutVote2, VIDVote2}, + simple_vote::{DAVote2, QuorumVote, TimeoutVote2, VIDVote2, ViewSyncPreCommitVote, ViewSyncCommitVote, ViewSyncFinalizeVote}, traits::node_implementation::{ CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, - VIDMembership, ViewSyncProposalType, + VIDMembership, ViewSyncProposalType, ViewSyncMembership, }, vote::ViewSyncVote, }; @@ -61,12 +61,31 @@ pub enum HotShotEvent> { Proposal>, TYPES::SignatureKey, ), + // TODO ED Remove this in favor of separate votes for each view sync vote type /// Receive a view sync vote from the network; received by a relay in the view sync task ViewSyncVoteRecv(ViewSyncVote), /// Receive a view sync certificate from the network; received by a replica in the view sync task // TODO ED Remove this event in favor of separate events depending on which certificate type it is. ViewSyncCertificateRecv(Proposal>), + /// Receive a `ViewSyncPreCommitVote` from the network; received by a relay in the view sync task + ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote>), + /// Receive a `ViewSyncCommitVote` from the network; received by a relay in the view sync task + ViewSyncCommitVoteRecv(ViewSyncCommitVote>), + /// Receive a `ViewSyncFinalizeVote` from the network; received by a relay in the view sync task + ViewSyncFinalizeVoteRecv(ViewSyncFinalizeVote>), + + + + /// Send a `ViewSyncPreCommitVote` from the network; emitted by a replica in the view sync task + ViewSyncPreCommitVoteSend(ViewSyncPreCommitVote>), + /// Send a `ViewSyncCommitVote` from the network; emitted by a replica in the view sync task + ViewSyncCommitVoteSend(ViewSyncCommitVote>), + /// Send a `ViewSyncFinalizeVote` from the network; emitted by a replica in the view sync task + ViewSyncFinalizeVoteSend(ViewSyncFinalizeVote>), + + + /// Receive a `ViewSyncPreCommitCertificate2` from the network; received by a replica in the view sync task ViewSyncPreCommitCertificate2Recv(ViewSyncPreCommitCertificate2), /// Receive a `ViewSyncCommitCertificate2` from the network; received by a replica in the view sync task diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 4dfe99e8fb..e094bb333c 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -10,8 +10,8 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, }; use hotshot_types::{ - traits::{election::Membership, network::ConsensusIntentEvent}, - vote::ViewSyncVoteAccumulator, vote2::HasViewNumber, + traits::{election::Membership, network::ConsensusIntentEvent, node_implementation::{QuorumMembership, ViewSyncMembership}}, + vote::ViewSyncVoteAccumulator, vote2::{HasViewNumber, VoteAccumulator2, Vote2}, simple_vote::ViewSyncPreCommitVote, simple_certificate::ViewSyncPreCommitCertificate2, }; use bitvec::prelude::*; @@ -31,7 +31,7 @@ use hotshot_types::{ vote::{ViewSyncData, ViewSyncVote}, }; use snafu::Snafu; -use std::{collections::HashMap, sync::Arc, time::Duration}; +use std::{collections::HashMap, sync::Arc, time::Duration, marker::PhantomData}; use tracing::{debug, error, instrument, info}; #[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] /// Phases of view sync @@ -201,8 +201,14 @@ pub struct ViewSyncRelayTaskState< pub exchange: Arc>, /// Vote accumulator #[allow(clippy::type_complexity)] - pub accumulator: Either, ViewSyncCertificate>, - /// Our node id; for logging + pub accumulator: Either< + VoteAccumulator2< + TYPES, + ViewSyncPreCommitVote>, + ViewSyncPreCommitCertificate2, + >, + ViewSyncPreCommitCertificate2, + >, /// Our node id; for logging pub id: u64, } @@ -247,7 +253,7 @@ where /// Handles incoming events for the main view sync task pub async fn handle_event(&mut self, event: HotShotEvent) { match &event { - HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { info!( "Received view sync cert for phase {:?}", certificate @@ -352,16 +358,11 @@ where return; } - let new_accumulator = ViewSyncVoteAccumulator { - pre_commit_vote_outcomes: HashMap::new(), - commit_vote_outcomes: HashMap::new(), - finalize_vote_outcomes: HashMap::new(), - - success_threshold: self.exchange.success_threshold(), - failure_threshold: self.exchange.failure_threshold(), - + let new_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.exchange.total_nodes()], + phantom: PhantomData, }; let mut relay_state = ViewSyncRelayTaskState { @@ -947,50 +948,34 @@ where ViewSyncRelayTaskState, ) { match event { - HotShotEvent::ViewSyncVoteRecv(vote) => { + HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { if self.accumulator.is_right() { return (Some(HotShotTaskCompleted::ShutDown), self); } - let (vote_internal, phase) = match vote.clone() { - ViewSyncVote::PreCommit(vote_internal) => { - (vote_internal, ViewSyncPhase::PreCommit) - } - ViewSyncVote::Commit(vote_internal) => (vote_internal, ViewSyncPhase::Commit), - ViewSyncVote::Finalize(vote_internal) => { - (vote_internal, ViewSyncPhase::Finalize) - } - }; - - debug!( - "Recved vote for next view {}, and relay {}, and phase {:?}", - *vote_internal.round, vote_internal.relay, phase - ); - // Ignore this vote if we are not the correct relay if !self .exchange - .is_leader(vote_internal.round + vote_internal.relay) + .is_leader(vote.get_data().round + vote.get_data().relay) { debug!("We are not the correct relay"); return (None, self); } let view_sync_data = ViewSyncData:: { - round: vote_internal.round, + round: vote.get_data().round, relay: self.exchange.public_key().to_bytes(), } .commit(); debug!( "Accumulating view sync vote {} relay {}", - *vote_internal.round, vote_internal.relay + *vote.get_data().round, vote.get_data().relay ); - let accumulator = self.exchange.accumulate_vote( - self.accumulator.left().unwrap(), + let accumulator = self.accumulator.left().unwrap().accumulate( &vote, - &view_sync_data, + self.exchange.membership() ); self.accumulator = match accumulator { diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index f4f1177686..d523bfca31 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -42,7 +42,7 @@ pub struct VIDData { /// Data used for a Pre Commit vote. pub struct ViewSyncPreCommitData { /// The relay this vote is intended for - pub relay: EncodedPublicKey, + pub relay: u64, /// The view number we are trying to sync on pub round: TYPES::Time, } @@ -50,7 +50,7 @@ pub struct ViewSyncPreCommitData { /// Data used for a Commit vote. pub struct ViewSyncCommitData { /// The relay this vote is intended for - pub relay: EncodedPublicKey, + pub relay: u64, /// The view number we are trying to sync on pub round: TYPES::Time, } @@ -58,7 +58,7 @@ pub struct ViewSyncCommitData { /// Data used for a Finalize vote. pub struct ViewSyncFinalizeData { /// The relay this vote is intended for - pub relay: EncodedPublicKey, + pub relay: u64, /// The view number we are trying to sync on pub round: TYPES::Time, } From 572f8ccb86cf7a6514944d44222225676111aec8 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 10 Nov 2023 10:25:10 -0500 Subject: [PATCH 0349/1393] remove vote token except for view sync --- hotshot-qc/src/bit_vector_old.rs | 8 +- .../src/traits/election/static_committee.rs | 17 +- task-impls/src/consensus.rs | 286 +++++++++--------- task-impls/src/da.rs | 84 ++--- task-impls/src/vid.rs | 91 +++--- testing/tests/view_sync_task.rs | 5 +- types/src/traits/election.rs | 36 +-- types/src/traits/signature_key.rs | 8 +- types/src/vote2.rs | 14 +- 9 files changed, 278 insertions(+), 271 deletions(-) diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs index 36ba155da8..9fda83cbd7 100644 --- a/hotshot-qc/src/bit_vector_old.rs +++ b/hotshot-qc/src/bit_vector_old.rs @@ -12,7 +12,7 @@ use ark_std::{ use bitvec::prelude::*; use ethereum_types::U256; use generic_array::GenericArray; -use hotshot_types::traits::qc::QuorumCertificate; +use hotshot_types::traits::{qc::QuorumCertificate, signature_key::StakeTableEntryType}; use jf_primitives::{ errors::{PrimitivesError, PrimitivesError::ParameterError}, signatures::AggregateableSignatureSchemes, @@ -35,6 +35,12 @@ pub struct StakeTableEntry { pub stake_amount: U256, } +impl StakeTableEntryType for StakeTableEntry { + fn get_stake(&self) -> U256 { + self.stake_amount + } +} + /// Public parameters of [`BitVectorQC`] #[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash)] pub struct QCParams { diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 8252ac2562..d581ea9775 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -105,7 +105,6 @@ where let res = self.nodes_with_stake[index].clone(); TYPES::SignatureKey::get_public_key(&res) } - /// Simply make the partial signature fn make_vote_token( &self, @@ -124,6 +123,22 @@ where let signature = PUBKEY::sign(private_key, &message); Ok(Some(StaticVoteToken { signature, pub_key })) } + fn has_stake(&self, pub_key: &PUBKEY) -> bool { + let entry = pub_key.get_stake_table_entry(1u64); + self.committee_nodes_with_stake.contains(&entry) + } + + fn get_stake( + &self, + pub_key: &::SignatureKey, + ) -> Option<::StakeTableEntry> { + let entry = pub_key.get_stake_table_entry(1u64); + if self.committee_nodes_with_stake.contains(&entry) { + Some(entry) + } else { + None + } + } fn validate_vote_token( &self, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index efb43212cf..3b39de4a6a 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -28,7 +28,7 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, consensus_api::ConsensusApi, - election::{ConsensusExchange, QuorumExchangeType}, + election::{ConsensusExchange, Membership, QuorumExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{ CommitteeEx, NodeImplementation, NodeType, QuorumEx, QuorumMembership, TimeoutEx, @@ -40,7 +40,6 @@ use hotshot_types::{ utils::{Terminator, ViewInner}, vote2::{Certificate2, HasViewNumber, VoteAccumulator2}, }; - use tracing::warn; use snafu::Snafu; @@ -382,75 +381,76 @@ where #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] async fn vote_if_able(&self) -> bool { + if !self + .quorum_exchange + .membership() + .has_stake(self.quorum_exchange.public_key()) + { + debug!( + "We were not chosen for consensus committee on {:?}", + self.cur_view + ); + return false; + } if let Some(proposal) = &self.current_proposal { // ED Need to account for the genesis DA cert if proposal.justify_qc.is_genesis && proposal.view_number == TYPES::Time::new(1) { // warn!("Proposal is genesis!"); let view = TYPES::Time::new(*proposal.view_number); - let vote_token = self.quorum_exchange.make_vote_token(view); + let justify_qc = proposal.justify_qc.clone(); + let parent = if justify_qc.is_genesis { + self.genesis_leaf().await + } else { + self.consensus + .read() + .await + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned() + }; - match vote_token { - Err(e) => { - error!("Failed to generate vote token for {:?} {:?}", view, e); - } - Ok(None) => { - debug!("We were not chosen for consensus committee on {:?}", view); - } - Ok(Some(_vote_token)) => { - let justify_qc = proposal.justify_qc.clone(); - let parent = if justify_qc.is_genesis { - self.genesis_leaf().await - } else { - self.consensus - .read() - .await - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - }; - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some(parent) = parent else { - error!( + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some(parent) = parent else { + error!( "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", justify_qc.get_data().leaf_commit, proposal.view_number, ); - return false; - }; - let parent_commitment = parent.commit(); - - let leaf: Leaf<_> = Leaf { - view_number: view, - justify_qc: proposal.justify_qc.clone(), - parent_commitment, - block_header: proposal.block_header.clone(), - block_payload: None, - rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), - }; - let vote = - QuorumVote::>::create_signed_vote( - QuorumData { leaf_commit: leaf.commit() }, - view, - self.quorum_exchange.public_key(), - self.quorum_exchange.private_key(), - ); - let message = GeneralConsensusMessage::::Vote(vote); + return false; + }; + let parent_commitment = parent.commit(); - if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.get_view_number() + 1 - ); - self.event_stream - .publish(HotShotEvent::QuorumVoteSend(vote)) - .await; - return true; - } - } + let leaf: Leaf<_> = Leaf { + view_number: view, + justify_qc: proposal.justify_qc.clone(), + parent_commitment, + block_header: proposal.block_header.clone(), + block_payload: None, + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), + }; + let vote = + QuorumVote::>::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(), + }, + view, + self.quorum_exchange.public_key(), + self.quorum_exchange.private_key(), + ); + let message = GeneralConsensusMessage::::Vote(vote); + + if let GeneralConsensusMessage::Vote(vote) = message { + debug!( + "Sending vote to next quorum leader {:?}", + vote.get_view_number() + 1 + ); + self.event_stream + .publish(HotShotEvent::QuorumVoteSend(vote)) + .await; + return true; } } @@ -458,84 +458,76 @@ where // ED Need to update the view number this is stored under? if let Some(cert) = self.da_certs.get(&(proposal.get_view_number())) { let view = cert.view_number; - let vote_token = self.quorum_exchange.make_vote_token(view); // TODO: do some of this logic without the vote token check, only do that when voting. - match vote_token { - Err(e) => { - error!("Failed to generate vote token for {:?} {:?}", view, e); - } - Ok(None) => { - debug!("We were not chosen for consensus committee on {:?}", view); - } - Ok(Some(_vote_token)) => { - let justify_qc = proposal.justify_qc.clone(); - let parent = if justify_qc.is_genesis { - self.genesis_leaf().await - } else { - self.consensus - .read() - .await - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - }; - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some(parent) = parent else { - error!( + let justify_qc = proposal.justify_qc.clone(); + let parent = if justify_qc.is_genesis { + self.genesis_leaf().await + } else { + self.consensus + .read() + .await + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned() + }; + + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some(parent) = parent else { + error!( "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", justify_qc.get_data().leaf_commit, proposal.view_number, ); - return false; - }; - let parent_commitment = parent.commit(); - - let leaf: Leaf<_> = Leaf { - view_number: view, - justify_qc: proposal.justify_qc.clone(), - parent_commitment, - block_header: proposal.block_header.clone(), - block_payload: None, - rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), - }; - - // Validate the DAC. - let message = if cert.is_valid_cert(self.committee_exchange.membership()) { - // Validate the block payload commitment for non-genesis DAC. - if !cert.is_genesis - && cert.get_data().payload_commit - != proposal.block_header.payload_commitment() - { - error!("Block payload commitment does not equal parent commitment"); - return false; - } - let vote = + return false; + }; + let parent_commitment = parent.commit(); + + let leaf: Leaf<_> = Leaf { + view_number: view, + justify_qc: proposal.justify_qc.clone(), + parent_commitment, + block_header: proposal.block_header.clone(), + block_payload: None, + rejected: Vec::new(), + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), + proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), + }; + + // Validate the DAC. + let message = if cert.is_valid_cert(self.committee_exchange.membership()) { + // Validate the block payload commitment for non-genesis DAC. + if !cert.is_genesis + && cert.get_data().payload_commit + != proposal.block_header.payload_commitment() + { + error!("Block payload commitment does not equal parent commitment"); + return false; + } + let vote = QuorumVote::>::create_signed_vote( QuorumData { leaf_commit: leaf.commit() }, view, self.quorum_exchange.public_key(), self.quorum_exchange.private_key(), ); - GeneralConsensusMessage::::Vote(vote) - } else { - error!("Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", cert, self.cur_view ); - return false; - }; - - if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.get_view_number() - ); - self.event_stream - .publish(HotShotEvent::QuorumVoteSend(vote)) - .await; - return true; - } - } + GeneralConsensusMessage::::Vote(vote) + } else { + error!( + "Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", + cert, self.cur_view + ); + return false; + }; + + if let GeneralConsensusMessage::Vote(vote) = message { + debug!( + "Sending vote to next quorum leader {:?}", + vote.get_view_number() + ); + self.event_stream + .publish(HotShotEvent::QuorumVoteSend(vote)) + .await; + return true; } } info!( @@ -1208,28 +1200,28 @@ where if self.cur_view >= view { return; } - let vote_token = self.timeout_exchange.make_vote_token(view); - - match vote_token { - Err(e) => { - error!("Failed to generate vote token for {:?} {:?}", view, e); - } - Ok(None) => { - debug!("We were not chosen for consensus committee on {:?}", view); - } - Ok(Some(_vote_token)) => { - let vote = TimeoutVote2::create_signed_vote( - TimeoutData { view }, - view, - self.timeout_exchange.public_key(), - self.timeout_exchange.private_key(), - ); - - self.event_stream - .publish(HotShotEvent::TimeoutVoteSend(vote)) - .await; - } + if !self + .timeout_exchange + .membership() + .has_stake(self.timeout_exchange.public_key()) + { + debug!( + "We were not chosen for consensus committee on {:?}", + self.cur_view + ); + return; } + + let vote = TimeoutVote2::create_signed_vote( + TimeoutData { view }, + view, + self.timeout_exchange.public_key(), + self.timeout_exchange.private_key(), + ); + + self.event_stream + .publish(HotShotEvent::TimeoutVoteSend(vote)) + .await; debug!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 0679540b29..cee575ffb1 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -231,49 +231,49 @@ where return None; } - let vote_token = self.committee_exchange.make_vote_token(view); - match vote_token { - Err(e) => { - error!("Failed to generate vote token for {:?} {:?}", view, e); - } - Ok(None) => { - debug!("We were not chosen for DA committee on {:?}", view); - } - Ok(Some(_vote_token)) => { - // Generate and send vote - let vote = DAVote2::create_signed_vote( - DAData { - payload_commit: payload_commitment, - }, - view, - self.committee_exchange.public_key(), - self.committee_exchange.private_key(), - ); - - // ED Don't think this is necessary? - // self.cur_view = view; - - debug!("Sending vote to the DA leader {:?}", vote.get_view_number()); - self.event_stream - .publish(HotShotEvent::DAVoteSend(vote)) - .await; - let mut consensus = self.consensus.write().await; - - // Ensure this view is in the view map for garbage collection, but do not overwrite if - // there is already a view there: the replica task may have inserted a `Leaf` view which - // contains strictly more information. - consensus.state_map.entry(view).or_insert(View { - view_inner: ViewInner::DA { - block: payload_commitment, - }, - }); - - // Record the block payload we have promised to make available. - consensus - .saved_block_payloads - .insert(proposal.data.block_payload); - } + if !self + .committee_exchange + .membership() + .has_stake(self.committee_exchange.public_key()) + { + debug!( + "We were not chosen for consensus committee on {:?}", + self.cur_view + ); + return None; } + // Generate and send vote + let vote = DAVote2::create_signed_vote( + DAData { + payload_commit: payload_commitment, + }, + view, + self.committee_exchange.public_key(), + self.committee_exchange.private_key(), + ); + + // ED Don't think this is necessary? + // self.cur_view = view; + + debug!("Sending vote to the DA leader {:?}", vote.get_view_number()); + self.event_stream + .publish(HotShotEvent::DAVoteSend(vote)) + .await; + let mut consensus = self.consensus.write().await; + + // Ensure this view is in the view map for garbage collection, but do not overwrite if + // there is already a view there: the replica task may have inserted a `Leaf` view which + // contains strictly more information. + consensus.state_map.entry(view).or_insert(View { + view_inner: ViewInner::DA { + block: payload_commitment, + }, + }); + + // Record the block payload we have promised to make available. + consensus + .saved_block_payloads + .insert(proposal.data.block_payload); } HotShotEvent::DAVoteRecv(vote) => { debug!("DA vote recv, Main Task {:?}", vote.get_view_number(),); diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 56e829c33f..28142cdf6d 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -19,7 +19,7 @@ use hotshot_types::{ message::{Message, SequencingMessage}, traits::{ consensus_api::ConsensusApi, - election::ConsensusExchange, + election::{ConsensusExchange, Membership}, node_implementation::{NodeImplementation, NodeType, VIDEx}, signature_key::SignatureKey, state::ConsensusTime, @@ -300,51 +300,52 @@ where return None; } - let vote_token = self.vid_exchange.make_vote_token(view); - match vote_token { - Err(e) => { - error!("Failed to generate vote token for {:?} {:?}", view, e); - } - Ok(None) => { - debug!("We were not chosen for VID quorum on {:?}", view); - } - Ok(Some(_vote_token)) => { - // Generate and send vote - let vote = VIDVote2::create_signed_vote( - VIDData { - payload_commit: payload_commitment, - }, - view, - self.vid_exchange.public_key(), - self.vid_exchange.private_key(), - ); - - // ED Don't think this is necessary? - // self.cur_view = view; - - debug!( - "Sending vote to the VID leader {:?}", - vote.get_view_number() - ); - self.event_stream - .publish(HotShotEvent::VidVoteSend(vote)) - .await; - let mut consensus = self.consensus.write().await; - - // Ensure this view is in the view map for garbage collection, but do not overwrite if - // there is already a view there: the replica task may have inserted a `Leaf` view which - // contains strictly more information. - consensus.state_map.entry(view).or_insert(View { - view_inner: ViewInner::DA { - block: payload_commitment, - }, - }); - - // Record the block we have promised to make available. - // TODO https://github.com/EspressoSystems/HotShot/issues/1692 - // consensus.saved_block_payloads.insert(proposal.data.block_payload); - } + if !self + .vid_exchange + .membership() + .has_stake(self.vid_exchange.public_key()) + { + debug!( + "We were not chosen for consensus committee on {:?}", + self.cur_view + ); + return None; } + + // Generate and send vote + let vote = VIDVote2::create_signed_vote( + VIDData { + payload_commit: payload_commitment, + }, + view, + self.vid_exchange.public_key(), + self.vid_exchange.private_key(), + ); + + // ED Don't think this is necessary? + // self.cur_view = view; + + debug!( + "Sending vote to the VID leader {:?}", + vote.get_view_number() + ); + self.event_stream + .publish(HotShotEvent::VidVoteSend(vote)) + .await; + let mut consensus = self.consensus.write().await; + + // Ensure this view is in the view map for garbage collection, but do not overwrite if + // there is already a view there: the replica task may have inserted a `Leaf` view which + // contains strictly more information. + consensus.state_map.entry(view).or_insert(View { + view_inner: ViewInner::DA { + block: payload_commitment, + }, + }); + + // Record the block we have promised to make available. + // TODO https://github.com/EspressoSystems/HotShot/issues/1692 + // consensus.saved_block_payloads.insert(proposal.data.block_payload); } HotShotEvent::VidCertRecv(_) => { // RM TODO diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 2e131c8703..c419153bd8 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -6,7 +6,7 @@ use hotshot_types::{ data::ViewNumber, traits::{ consensus_api::ConsensusSharedApi, - election::{ConsensusExchange, ViewSyncExchangeType}, + election::{ConsensusExchange, Membership, ViewSyncExchangeType}, node_implementation::ExchangesType, state::ConsensusTime, }, @@ -41,7 +41,8 @@ async fn test_view_sync_task() { let view_sync_exchange = api.inner.exchanges.view_sync_exchange().clone(); let relay_pub_key = api.public_key().to_bytes(); let vote_token = view_sync_exchange - .make_vote_token(ViewNumber::new(5)) + .membership() + .make_vote_token(ViewNumber::new(5), api.private_key()) .unwrap_or_else(|_| panic!("Error making vote token")) .unwrap_or_else(|| panic!("Not chosen for the committee")); let vote_data_internal: ViewSyncData = ViewSyncData { diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 642673b571..b8ebb06d98 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -229,6 +229,16 @@ pub trait Membership: priv_key: &::PrivateKey, ) -> Result, ElectionError>; + /// Check if a key has stake + fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; + + /// Get the stake table entry for a public key, returns `None` if the + /// key is not in the table + fn get_stake( + &self, + pub_key: &TYPES::SignatureKey, + ) -> Option<::StakeTableEntry>; + /// Checks the claims of a received vote token /// /// # Errors @@ -303,18 +313,6 @@ pub trait ConsensusExchange: Send + Sync { self.membership().total_nodes() } - /// Attempts to generate a vote token for participation at time `view_number`. - /// - /// # Errors - /// When unable to make a vote token because not part of the committee - fn make_vote_token( - &self, - view_number: TYPES::Time, - ) -> std::result::Result, ElectionError> { - self.membership() - .make_vote_token(view_number, self.private_key()) - } - /// The committee which votes on proposals. fn membership(&self) -> &Self::Membership; @@ -411,13 +409,6 @@ impl< fn network(&self) -> &NETWORK { &self.network } - fn make_vote_token( - &self, - view_number: TYPES::Time, - ) -> std::result::Result, ElectionError> { - self.membership - .make_vote_token(view_number, &self.private_key) - } fn membership(&self) -> &Self::Membership { &self.membership @@ -514,13 +505,6 @@ impl< fn network(&self) -> &NETWORK { &self.network } - fn make_vote_token( - &self, - view_number: TYPES::Time, - ) -> std::result::Result, ElectionError> { - self.membership - .make_vote_token(view_number, &self.private_key) - } fn membership(&self) -> &Self::Membership { &self.membership diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index d5141816f5..08958ed3c1 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -33,6 +33,11 @@ impl AsRef<[u8]> for EncodedSignature { self.0.as_slice() } } +/// Type representing stake table entries in a `StakeTable` +pub trait StakeTableEntryType { + /// Get the stake value + fn get_stake(&self) -> U256; +} /// Trait for abstracting public key signatures pub trait SignatureKey: @@ -52,7 +57,8 @@ pub trait SignatureKey: /// The private key type for this signature algorithm type PrivateKey: Send + Sync + Sized + Clone; /// The type of the entry that contain both public key and stake value - type StakeTableEntry: Send + type StakeTableEntry: StakeTableEntryType + + Send + Sync + Sized + Clone diff --git a/types/src/vote2.rs b/types/src/vote2.rs index e510bef4c6..1c0cc98023 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -18,7 +18,7 @@ use crate::{ traits::{ election::Membership, node_implementation::NodeType, - signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, + signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey, StakeTableEntryType}, }, }; @@ -112,7 +112,9 @@ impl< } // TODO: Lookup the actual stake - let stake_table_entry: <::SignatureKey as SignatureKey>::StakeTableEntry = key.get_stake_table_entry(1u64); + let Some(stake_table_entry) = membership.get_stake(&key) else { + return Either::Left(self); + }; let stake_table = membership.get_committee_qc_stake_table(); let vote_node_id = stake_table .iter() @@ -131,7 +133,7 @@ impl< let (total_stake_casted, total_vote_map) = self .vote_outcomes .entry(vote_commitment) - .or_insert_with(|| (0, BTreeMap::new())); + .or_insert_with(|| (U256::from(0), BTreeMap::new())); // Check for duplicate vote // TODO ED Re-encoding signature key to bytes until we get rid of EncodedKey @@ -148,13 +150,13 @@ impl< self.sig_lists.push(original_signature); // TODO: Get the stake from the stake table entry. - *total_stake_casted += 1; + *total_stake_casted += stake_table_entry.get_stake(); total_vote_map.insert( encoded_key.clone(), (vote.get_signature(), vote.get_data_commitment()), ); - if *total_stake_casted >= CERT::threshold(membership) { + if *total_stake_casted >= CERT::threshold(membership).into() { // Assemble QC let real_qc_pp: <::SignatureKey as SignatureKey>::QCParams = ::get_public_parameter( @@ -184,7 +186,7 @@ impl< type VoteMap2 = HashMap< COMMITMENT, ( - u64, + U256, BTreeMap, ), >; From d411fe9febd9c802971a5327a8fbce3c84439ff7 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 10 Nov 2023 10:39:38 -0500 Subject: [PATCH 0350/1393] remove TODO --- types/src/vote2.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/types/src/vote2.rs b/types/src/vote2.rs index 1c0cc98023..886809c2fc 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -111,7 +111,6 @@ impl< return Either::Left(self); } - // TODO: Lookup the actual stake let Some(stake_table_entry) = membership.get_stake(&key) else { return Either::Left(self); }; From 6722cc41ad50e30aebc9eb772fb64ce612064e0b Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 10 Nov 2023 10:57:24 -0500 Subject: [PATCH 0351/1393] remove ValidatingLeaf --- hotshot/src/traits/storage/memory_storage.rs | 13 +- types/src/data.rs | 177 ------------------- 2 files changed, 5 insertions(+), 185 deletions(-) diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 58d56a6c55..baf1b83118 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -121,7 +121,7 @@ mod test { use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, - data::{fake_commitment, genesis_proposer_id, ValidatingLeaf, ViewNumber}, + data::{fake_commitment, genesis_proposer_id, Leaf, ViewNumber}, simple_certificate::QuorumCertificate2, traits::{node_implementation::NodeType, state::dummy::DummyState, state::ConsensusTime}, }; @@ -154,17 +154,15 @@ mod test { type StateType = DummyState; } - #[instrument(skip(rng))] fn random_stored_view( - rng: &mut dyn rand::RngCore, view_number: ::Time, - ) -> StoredView> { + ) -> StoredView> { let payload = VIDBlockPayload::genesis(); let header = VIDBlockHeader { block_number: 0, payload_commitment: payload.commit(), }; - let dummy_leaf_commit = fake_commitment::>(); + let dummy_leaf_commit = fake_commitment::>(); let data = hotshot_types::simple_vote::QuorumData { leaf_commit: dummy_leaf_commit, }; @@ -180,7 +178,7 @@ mod test { }, header, Some(payload), - DummyState::random(rng), + (), dummy_leaf_commit, Vec::new(), genesis_proposer_id(), @@ -194,9 +192,8 @@ mod test { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn memory_storage() { - let mut rng = rand::thread_rng(); let storage = MemoryStorage::construct_tmp_storage().unwrap(); - let genesis = random_stored_view(&mut rng, ::Time::genesis()); + let genesis = random_stored_view(::Time::genesis()); storage .append_single_view(genesis.clone()) .await diff --git a/types/src/data.rs b/types/src/data.rs index a6c5cf05fa..5115e6ba8e 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -21,7 +21,6 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, Serializatio use bincode::Options; use commit::{Commitment, Committable}; use derivative::Derivative; -use espresso_systems_common::hotshot::tag; use hotshot_constants::GENESIS_PROPOSER_ID; use hotshot_utils::bincode::bincode_opts; use jf_primitives::pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}; @@ -374,46 +373,6 @@ pub trait TestableLeaf { ) -> <::BlockPayload as BlockPayload>::Transaction; } -/// This is the consensus-internal analogous concept to a block, and it contains the block proper, -/// as well as the hash of its parent `Leaf`. -/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::BlockPayload` -#[derive(Serialize, Deserialize, Clone, Debug, Derivative)] -#[serde(bound(deserialize = ""))] -#[derivative(Hash, PartialEq, Eq)] -pub struct ValidatingLeaf { - /// CurView from leader when proposing leaf - pub view_number: TYPES::Time, - - /// Number of leaves before this one in the chain - pub height: u64, - - /// Per spec, justification - pub justify_qc: QuorumCertificate2, - - /// The hash of the parent `Leaf` - /// So we can ask if it extends - pub parent_commitment: Commitment, - - /// BlockPayload leaf wants to apply - pub deltas: TYPES::BlockPayload, - - /// What the state should be AFTER applying `self.deltas` - pub state: TYPES::StateType, - - /// Transactions that were marked for rejection while collecting deltas - pub rejected: Vec<::Transaction>, - - /// the timestamp the leaf was constructed at, in nanoseconds. Only exposed for dashboard stats - #[derivative(PartialEq = "ignore")] - #[derivative(Hash = "ignore")] - pub timestamp: i128, - - /// the proposer id of the leaf - #[derivative(PartialEq = "ignore")] - #[derivative(Hash = "ignore")] - pub proposer_id: EncodedPublicKey, -} - /// This is the consensus-internal analogous concept to a block, and it contains the block proper, /// as well as the hash of its parent `Leaf`. /// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::BlockPayload` @@ -468,111 +427,6 @@ impl Hash for Leaf { self.rejected.hash(state); } } -impl Display for ValidatingLeaf { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "view: {:?}, height: {:?}, justify: {}", - self.view_number, self.height, self.justify_qc - ) - } -} - -impl LeafType for ValidatingLeaf { - type NodeType = TYPES; - type MaybeState = TYPES::StateType; - - fn new( - view_number: ::Time, - justify_qc: QuorumCertificate2, - deltas: ::BlockPayload, - state: ::StateType, - ) -> Self { - Self { - view_number, - height: 0, - justify_qc, - parent_commitment: fake_commitment(), - deltas, - state, - rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: genesis_proposer_id(), - } - } - - fn get_view_number(&self) -> TYPES::Time { - self.view_number - } - - fn get_height(&self) -> u64 { - self.height - } - - fn get_justify_qc(&self) -> QuorumCertificate2 { - self.justify_qc.clone() - } - - fn get_parent_commitment(&self) -> Commitment { - self.parent_commitment - } - - fn get_block_header(&self) -> &::BlockHeader { - unimplemented!("Unimplemented for validating consensus which will be removed."); - } - - fn fill_block_payload( - &mut self, - _block_payload: ::BlockPayload, - ) -> Result<(), InconsistentPayloadCommitmentError<::BlockPayload>> - { - unimplemented!("Unimplemented for validating consensus which will be removed."); - } - - fn get_block_payload(&self) -> Option<::BlockPayload> { - unimplemented!("Unimplemented for validating consensus which will be removed."); - } - - fn get_state(&self) -> Self::MaybeState { - self.state.clone() - } - - fn get_rejected(&self) -> Vec<::Transaction> { - self.rejected.clone() - } - - fn get_timestamp(&self) -> i128 { - self.timestamp - } - - fn get_proposer_id(&self) -> EncodedPublicKey { - self.proposer_id.clone() - } - - fn from_stored_view(_stored_view: StoredView) -> Self { - unimplemented!("Unimplemented for validating consensus which will be removed."); - } -} - -impl TestableLeaf for ValidatingLeaf -where - TYPES::StateType: TestableState, - TYPES::BlockPayload: TestableBlock, -{ - type NodeType = TYPES; - - fn create_random_transaction( - &self, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> <::BlockPayload as BlockPayload>::Transaction { - ::create_random_transaction( - Some(&self.state), - rng, - padding, - ) - } -} impl Display for Leaf { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -790,37 +644,6 @@ pub fn serialize_signature2( signatures_bytes.extend(sig_bytes.as_slice()); signatures_bytes } -impl Committable for ValidatingLeaf { - fn commit(&self) -> commit::Commitment { - let signatures_bytes = if self.justify_qc.is_genesis { - let mut bytes = vec![]; - bytes.extend("genesis".as_bytes()); - bytes - } else { - serialize_signature2::(self.justify_qc.signatures.as_ref().unwrap()) - }; - - commit::RawCommitmentBuilder::new("leaf commitment") - .u64_field("view number", *self.view_number) - .u64_field("height", self.height) - .field("parent Leaf commitment", self.parent_commitment) - .field("block payload commitment", self.deltas.commit()) - .field("state commitment", self.state.commit()) - .constant_str("justify_qc view number") - .u64(*self.justify_qc.view_number) - .field( - "justify_qc leaf commitment", - self.justify_qc.get_data().leaf_commit, - ) - .constant_str("justify_qc signatures") - .var_size_bytes(&signatures_bytes) - .finalize() - } - - fn tag() -> String { - tag::LEAF.to_string() - } -} impl Committable for Leaf { fn commit(&self) -> commit::Commitment { From b7b0b6793efc454bbe1f69f433f621946c18d632 Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Fri, 10 Nov 2023 11:41:19 -0500 Subject: [PATCH 0352/1393] [hotshot-state-prover] circuit for verification stake table commitment (#2022) * threshold checking circuit * change some variables to public * comments & tests(TBD) * doing test * test * Adding back the bls_comm * small refactor for test * use `STAKE_TABLE_CAPACITY` * commitment computation * Move state definition to another crate * Formatting * merge imports * test; bug fixes * augment test & comments * Addressing comments * duplicated items * final conflict resolve * consistent naming * one more good path test --- hotshot-stake-table/src/vec_based.rs | 28 +++---- hotshot-state-prover/src/circuit.rs | 121 +++++++++++++++++++++++---- types/src/traits/state.rs | 2 +- 3 files changed, 117 insertions(+), 34 deletions(-) diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index da3175bde4..c3212ca955 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -41,7 +41,7 @@ impl Default for StakeTableSnapshot { pub struct StakeTable where K1: Eq + Hash + Clone + ToFields, - K2: Eq + Hash + Clone + ToFields, + K2: Eq + Hash + Clone + Default + ToFields, F: RescueParameter, { /// The most up-to-date stake table, where the incoming transactions shall be performed on. @@ -72,7 +72,7 @@ where impl StakeTableScheme for StakeTable where K1: Eq + Hash + Clone + ToFields, - K2: Eq + Hash + Clone + ToFields, + K2: Eq + Hash + Clone + Default + ToFields, F: RescueParameter, { /// The stake table is indexed by BLS key @@ -236,7 +236,7 @@ where impl StakeTable where K1: Eq + Hash + Clone + ToFields, - K2: Eq + Hash + Clone + ToFields, + K2: Eq + Hash + Clone + Default + ToFields, F: RescueParameter, { /// Initiating an empty stake table. @@ -293,15 +293,17 @@ where } /// Helper function to recompute the stake table commitment for head version + /// Commitment of a stake table is a triple (bls_keys_comm, schnorr_keys_comm, stake_amount_comm) + /// TODO(Chengyu): The BLS verification keys doesn't implement Default. Thus we directly pad with `F::default()`. fn compute_head_comm(&mut self) -> (F, F, F) { + let padding_len = STAKE_TABLE_CAPACITY - self.head.bls_keys.len(); // Compute rescue hash for bls keys let mut bls_comm_preimage = self .head .bls_keys .iter() - .map(|key| key.to_fields()) - .collect::>() - .concat(); + .flat_map(|key| key.to_fields()) + .collect::>(); bls_comm_preimage.resize( STAKE_TABLE_CAPACITY * >::SIZE, F::default(), @@ -309,17 +311,13 @@ where let bls_comm = VariableLengthRescueCRHF::::evaluate(bls_comm_preimage).unwrap()[0]; // Compute rescue hash for Schnorr keys - let mut schnorr_comm_preimage = self + let schnorr_comm_preimage = self .head .schnorr_keys .iter() - .map(|key| key.to_fields()) - .collect::>() - .concat(); - schnorr_comm_preimage.resize( - STAKE_TABLE_CAPACITY * >::SIZE, - F::default(), - ); + .chain(ark_std::iter::repeat(&K2::default()).take(padding_len)) + .flat_map(|key| key.to_fields()) + .collect::>(); let schnorr_comm = VariableLengthRescueCRHF::::evaluate(schnorr_comm_preimage).unwrap()[0]; @@ -361,7 +359,7 @@ where impl Default for StakeTable where K1: Eq + Hash + Clone + ToFields, - K2: Eq + Hash + Clone + ToFields, + K2: Eq + Hash + Clone + Default + ToFields, F: RescueParameter, { fn default() -> Self { diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index b96284d595..d116dc378b 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -12,16 +12,14 @@ use hotshot_types::traits::{ }; use jf_plonk::errors::PlonkError; use jf_primitives::{ - circuit::signature::schnorr::VerKeyVar, + circuit::{rescue::RescueNativeGadget, signature::schnorr::VerKeyVar}, rescue::RescueParameter, signatures::{ bls_over_bn254::VerKey as BLSVerKey, schnorr::{Signature, VerKey as SchnorrVerKey}, }, }; -use jf_relation::{ - errors::CircuitError, gadgets::ecc::TEPoint, BoolVar, Circuit, PlonkCircuit, Variable, -}; +use jf_relation::{errors::CircuitError, BoolVar, Circuit, PlonkCircuit, Variable}; /// Lossy conversion of a U256 into a field element. pub(crate) fn u256_to_field(v: &U256) -> F { @@ -37,8 +35,8 @@ pub struct StakeTableEntryVar { pub stake_amount: Variable, } -/// HotShot state Variable -/// The stake table commitment is a triple (bls_keys_comm, stake_amount_comm, schnorr_keys_comm). +/// Light client state Variable +/// The stake table commitment is a triple (bls_keys_comm, schnorr_keys_comm, stake_amount_comm). #[derive(Clone, Debug)] pub struct LightClientStateVar { pub view_number_var: Variable, @@ -58,7 +56,7 @@ where /// A function that takes as input: /// - stake table entries (`Vec<(BLSVerKey, Amount, SchnorrVerKey)>`) /// - schnorr signatures of the updated states (`Vec`) - /// - updated hotshot state (`(view_number, block_height, block_comm, fee_ledger_comm, stake_table_comm)`) + /// - updated light client state (`(view_number, block_height, block_comm, fee_ledger_comm, stake_table_comm)`) /// - signer bit vector /// - quorum threshold /// checks that @@ -68,7 +66,7 @@ where pub fn build( stake_table: &ST, _sigs: &[Signature

], - _hotshot_state: &LightClientState, + lightclient_state: &LightClientState, signer_bit_vec: &[bool], threshold: &U256, ) -> Result<(PlonkCircuit, Vec), PlonkError> @@ -81,7 +79,7 @@ where // Dummy circuit implementation, fill in the details later // TODO(Chengyu): // - [DONE] the signer's accumulated weight exceeds the quorum threshold - // - The commitment of the stake table as [https://www.notion.so/espressosys/Light-Client-Contract-a416ebbfa9f342d79fccbf90de9706ef?pvs=4#6c0e26d753cd42e9bb0f22db1c519f45] + // - [DONE] The commitment of the stake table as [https://www.notion.so/espressosys/Light-Client-Contract-a416ebbfa9f342d79fccbf90de9706ef?pvs=4#6c0e26d753cd42e9bb0f22db1c519f45] // - Batch Schnorr signature verification // creating variables for stake table entries @@ -97,8 +95,7 @@ where }) }) .collect::, CircuitError>>()?; - let dummy_ver_key_var = - VerKeyVar(circuit.create_constant_point_variable(TEPoint::default())?); + let dummy_ver_key_var = VerKeyVar(circuit.neutral_point_variable()); stake_table_var.resize( STAKE_TABLE_CAPACITY, StakeTableEntryVar { @@ -116,8 +113,31 @@ where let threshold = u256_to_field::(threshold); let threshold_var = circuit.create_public_variable(threshold)?; - // TODO(Chengyu): put in the hotshot state - let public_inputs = vec![threshold]; + let view_number_f = F::from(lightclient_state.view_number as u64); + let block_height_f = F::from(lightclient_state.block_height as u64); + let lightclient_state_var = LightClientStateVar { + view_number_var: circuit.create_public_variable(view_number_f)?, + block_height_var: circuit.create_public_variable(block_height_f)?, + block_comm_var: circuit.create_public_variable(lightclient_state.block_comm)?, + fee_ledger_comm_var: circuit + .create_public_variable(lightclient_state.fee_ledger_comm)?, + stake_table_comm_var: ( + circuit.create_public_variable(lightclient_state.stake_table_comm.0)?, + circuit.create_public_variable(lightclient_state.stake_table_comm.1)?, + circuit.create_public_variable(lightclient_state.stake_table_comm.2)?, + ), + }; + + let public_inputs = vec![ + threshold, + view_number_f, + block_height_f, + lightclient_state.block_comm, + lightclient_state.fee_ledger_comm, + lightclient_state.stake_table_comm.0, + lightclient_state.stake_table_comm.1, + lightclient_state.stake_table_comm.2, + ]; // Checking whether the accumulated weight exceeds the quorum threshold let mut signed_amount_var = (0..STAKE_TABLE_CAPACITY / 2) @@ -143,7 +163,34 @@ where let acc_amount_var = circuit.sum(&signed_amount_var)?; circuit.enforce_leq(threshold_var, acc_amount_var)?; - // circuit.mul_add(wires_in, q_muls) + let schnorr_ver_key_preimage_vars = stake_table_var + .iter() + .flat_map(|var| [var.schnorr_ver_key.0.get_x(), var.schnorr_ver_key.0.get_y()]) + .collect::>(); + let schnorr_ver_key_comm = RescueNativeGadget::::rescue_sponge_with_padding( + &mut circuit, + &schnorr_ver_key_preimage_vars, + 1, + )?[0]; + circuit.enforce_equal( + schnorr_ver_key_comm, + lightclient_state_var.stake_table_comm_var.1, + )?; + + let stake_amount_preimage_vars = stake_table_var + .iter() + .map(|var| var.stake_amount) + .collect::>(); + let stake_amount_comm = RescueNativeGadget::::rescue_sponge_with_padding( + &mut circuit, + &stake_amount_preimage_vars, + 1, + )?[0]; + circuit.enforce_equal( + stake_amount_comm, + lightclient_state_var.stake_table_comm_var.2, + )?; + circuit.finalize_for_arithmetization()?; Ok((circuit, public_inputs)) } @@ -155,7 +202,7 @@ mod tests { use ark_ed_on_bn254::EdwardsConfig as Config; use ethereum_types::U256; use hotshot_stake_table::vec_based::StakeTable; - use hotshot_types::traits::stake_table::StakeTableScheme; + use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableScheme}; use jf_primitives::signatures::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey as BLSVerKey}, SchnorrSignatureScheme, SignatureScheme, @@ -199,6 +246,14 @@ mod tests { let keys = key_pairs_for_testing(); let st = stake_table_for_testing(&keys); + let lightclient_state = LightClientState { + view_number: 0, + block_height: 0, + block_comm: F::default(), + fee_ledger_comm: F::default(), + stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), + }; + // bit vector with total weight 26 let bit_vec = [ true, true, true, false, true, true, false, false, true, false, @@ -207,9 +262,19 @@ mod tests { let (circuit, public_inputs) = StateUpdateBuilder::::build( &st, &[], - &LightClientState::default(), + &lightclient_state, &bit_vec, - &U256::from(25u32), + &U256::from(26u32), + ) + .unwrap(); + assert!(circuit.check_circuit_satisfiability(&public_inputs).is_ok()); + + let (circuit, public_inputs) = StateUpdateBuilder::::build( + &st, + &[], + &lightclient_state, + &bit_vec, + &U256::from(10u32), ) .unwrap(); assert!(circuit.check_circuit_satisfiability(&public_inputs).is_ok()); @@ -222,7 +287,7 @@ mod tests { let (bad_circuit, public_inputs) = StateUpdateBuilder::::build( &st, &[], - &LightClientState::default(), + &lightclient_state, &bad_bit_vec, &U256::from(25u32), ) @@ -230,5 +295,25 @@ mod tests { assert!(bad_circuit .check_circuit_satisfiability(&public_inputs) .is_err()); + + // bad path: bad stake table commitment + let bad_lightclient_state = LightClientState { + view_number: 0, + block_height: 0, + block_comm: F::default(), + fee_ledger_comm: F::default(), + stake_table_comm: (F::default(), F::default(), F::default()), + }; + let (bad_circuit, public_inputs) = StateUpdateBuilder::::build( + &st, + &[], + &bad_lightclient_state, + &bit_vec, + &U256::from(26u32), + ) + .unwrap(); + assert!(bad_circuit + .check_circuit_satisfiability(&public_inputs) + .is_err()); } } diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 6a845088f2..d0b2c437f8 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -220,7 +220,7 @@ pub mod dummy { } } -/// A serialized consensus state for proof generation +/// A serialized light client state for proof generation #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)] pub struct LightClientState { /// Current view number From 70ac0cbe83ffeaccbbebb2e4ee7609d69959014c Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 10 Nov 2023 12:54:53 -0500 Subject: [PATCH 0353/1393] Compiling with new PreCommit logic --- task-impls/src/events.rs | 6 +- task-impls/src/view_sync.rs | 151 ++++++++++++++---------- types/src/simple_vote.rs | 12 +- types/src/traits/node_implementation.rs | 3 +- 4 files changed, 98 insertions(+), 74 deletions(-) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 45ff234f86..d679ae6f5d 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -69,11 +69,11 @@ pub enum HotShotEvent> { ViewSyncCertificateRecv(Proposal>), /// Receive a `ViewSyncPreCommitVote` from the network; received by a relay in the view sync task - ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote>), + ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote>), /// Receive a `ViewSyncCommitVote` from the network; received by a relay in the view sync task - ViewSyncCommitVoteRecv(ViewSyncCommitVote>), + ViewSyncCommitVoteRecv(ViewSyncCommitVote>), /// Receive a `ViewSyncFinalizeVote` from the network; received by a relay in the view sync task - ViewSyncFinalizeVoteRecv(ViewSyncFinalizeVote>), + ViewSyncFinalizeVoteRecv(ViewSyncFinalizeVote>), diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index e094bb333c..2e56ee9048 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -10,8 +10,14 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, }; use hotshot_types::{ - traits::{election::Membership, network::ConsensusIntentEvent, node_implementation::{QuorumMembership, ViewSyncMembership}}, - vote::ViewSyncVoteAccumulator, vote2::{HasViewNumber, VoteAccumulator2, Vote2}, simple_vote::ViewSyncPreCommitVote, simple_certificate::ViewSyncPreCommitCertificate2, + simple_certificate::ViewSyncPreCommitCertificate2, + simple_vote::ViewSyncPreCommitVote, + traits::{ + election::Membership, network::ConsensusIntentEvent, + node_implementation::ViewSyncMembership, + }, + vote::ViewSyncVoteAccumulator, + vote2::{HasViewNumber, Vote2, VoteAccumulator2}, }; use bitvec::prelude::*; @@ -31,8 +37,8 @@ use hotshot_types::{ vote::{ViewSyncData, ViewSyncVote}, }; use snafu::Snafu; -use std::{collections::HashMap, sync::Arc, time::Duration, marker::PhantomData}; -use tracing::{debug, error, instrument, info}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; +use tracing::{debug, error, info, instrument}; #[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] /// Phases of view sync pub enum ViewSyncPhase { @@ -66,8 +72,9 @@ pub struct ViewSyncTaskState< ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - // TODO ED Remove this when exchanges is done, but we don't actually use this commitment type anymore. + // TODO ED Remove this when exchanges is done, but we don't actually use this commitment type anymore. Commitment = Commitment>, + // Membership = ViewSyncMembership >, { /// Registry to register sub tasks @@ -199,6 +206,8 @@ pub struct ViewSyncRelayTaskState< pub event_stream: ChannelStream>, /// View sync exchange pub exchange: Arc>, + + pub membership: ViewSyncMembership, /// Vote accumulator #[allow(clippy::type_complexity)] pub accumulator: Either< @@ -208,7 +217,8 @@ pub struct ViewSyncRelayTaskState< ViewSyncPreCommitCertificate2, >, ViewSyncPreCommitCertificate2, - >, /// Our node id; for logging + >, + /// Our node id; for logging pub id: u64, } @@ -253,11 +263,8 @@ where /// Handles incoming events for the main view sync task pub async fn handle_event(&mut self, event: HotShotEvent) { match &event { - HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { - info!( - "Received view sync cert for phase {:?}", - certificate - ); + HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { + info!("Received view sync cert for phase {:?}", certificate); // This certificate is old, we can throw it away // If next view = cert round, then that means we should already have a task running for it @@ -266,7 +273,9 @@ where return; } - if let Some(replica_task) = self.replica_task_map.get(&certificate.get_view_number()) { + if let Some(replica_task) = + self.replica_task_map.get(&certificate.get_view_number()) + { // Forward event then return debug!("Forwarding message"); self.event_stream @@ -368,6 +377,7 @@ where let mut relay_state = ViewSyncRelayTaskState { event_stream: self.event_stream.clone(), exchange: self.exchange.clone(), + membership: self.exchange.membership().clone(), accumulator: either::Left(new_accumulator), id: self.id, }; @@ -949,72 +959,85 @@ where ) { match event { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { - if self.accumulator.is_right() { - return (Some(HotShotTaskCompleted::ShutDown), self); - } - // Ignore this vote if we are not the correct relay + // TODO ED Replace exchange with membership if !self .exchange .is_leader(vote.get_data().round + vote.get_data().relay) { - debug!("We are not the correct relay"); + info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); return (None, self); } - let view_sync_data = ViewSyncData:: { - round: vote.get_data().round, - relay: self.exchange.public_key().to_bytes(), - } - .commit(); - debug!( - "Accumulating view sync vote {} relay {}", - *vote.get_data().round, vote.get_data().relay + "Accumulating ViewSyncPreCommitVote for round {} and relay {}", + *vote.get_data().round, + vote.get_data().relay ); - let accumulator = self.accumulator.left().unwrap().accumulate( - &vote, - self.exchange.membership() - ); + match self.accumulator { + Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), + Left(accumulator) => { + match accumulator.accumulate(&vote, self.exchange.membership()) { + Left(new_accumulator) => self.accumulator = Either::Left(new_accumulator), + Right(certificate) => { + self.event_stream + .publish(HotShotEvent::ViewSyncPreCommitCertificate2Send( + certificate.clone(), + )) + .await; + self.accumulator = Right(certificate); - self.accumulator = match accumulator { - Left(new_accumulator) => Either::Left(new_accumulator), - Right(certificate) => { - let signature = - self.exchange.sign_certificate_proposal(certificate.clone()); - let message = Proposal { - data: certificate.clone(), - signature, - }; - self.event_stream - .publish(HotShotEvent::ViewSyncCertificateSend( - message, - self.exchange.public_key().clone(), - )) - .await; - - // Reset accumulator for new certificate - let new_accumulator = ViewSyncVoteAccumulator { - pre_commit_vote_outcomes: HashMap::new(), - commit_vote_outcomes: HashMap::new(), - finalize_vote_outcomes: HashMap::new(), - - success_threshold: self.exchange.success_threshold(), - failure_threshold: self.exchange.failure_threshold(), - - sig_lists: Vec::new(), - signers: bitvec![0; self.exchange.total_nodes()], - }; - either::Left(new_accumulator) + return (Some(HotShotTaskCompleted::ShutDown), self); + } + } } }; - - if phase == ViewSyncPhase::Finalize { - (Some(HotShotTaskCompleted::ShutDown), self) - } else { - (None, self) - } + (None, self) + + // let accumulator = self + // .accumulator + // .left() + // .unwrap() + // .accumulate(&vote, self.exchange.membership()); + + // self.accumulator = match accumulator { + // Left(new_accumulator) => Either::Left(new_accumulator), + // Right(certificate) => { + // let signature = + // self.exchange.sign_certificate_proposal(certificate.clone()); + // let message = Proposal { + // data: certificate.clone(), + // signature, + // }; + // self.event_stream + // .publish(HotShotEvent::ViewSyncCertificateSend( + // message, + // self.exchange.public_key().clone(), + // )) + // .await; + + // // Reset accumulator for new certificate + // let new_accumulator = ViewSyncVoteAccumulator { + // pre_commit_vote_outcomes: HashMap::new(), + // commit_vote_outcomes: HashMap::new(), + // finalize_vote_outcomes: HashMap::new(), + + // success_threshold: self.exchange.success_threshold(), + // failure_threshold: self.exchange.failure_threshold(), + + // sig_lists: Vec::new(), + // signers: bitvec![0; self.exchange.total_nodes()], + // }; + // either::Left(new_accumulator) + // } + // }; + + // if phase == ViewSyncPhase::Finalize { + // (Some(HotShotTaskCompleted::ShutDown), self) + // } else { + // (None, self) + // } } _ => (None, self), } diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index d523bfca31..32e9aebf8c 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -182,30 +182,30 @@ impl Committable for VIDData { /// This implements commit for all the types which contain a view and relay public key. fn view_and_relay_commit( view: TYPES::Time, - relay: &EncodedPublicKey, + relay: u64, tag: &str, ) -> Commitment { let builder = commit::RawCommitmentBuilder::new(tag); builder - .var_size_field("Relay public key", &relay.0) .u64(*view) + .u64(relay) .finalize() } impl Committable for ViewSyncPreCommitData { fn commit(&self) -> Commitment { - view_and_relay_commit::(self.round, &self.relay, "View Sync Precommit") + view_and_relay_commit::(self.round, self.relay, "View Sync Precommit") } } impl Committable for ViewSyncFinalizeData { fn commit(&self) -> Commitment { - view_and_relay_commit::(self.round, &self.relay, "View Sync Finalize") + view_and_relay_commit::(self.round, self.relay, "View Sync Finalize") } } impl Committable for ViewSyncCommitData { fn commit(&self) -> Commitment { - view_and_relay_commit::(self.round, &self.relay, "View Sync Commit") + view_and_relay_commit::(self.round, self.relay, "View Sync Commit") } } @@ -217,7 +217,7 @@ impl = SimpleVote, M>; /// DA vote type alias pub type DAVote2 = SimpleVote::BlockPayload>, M>; diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 8ce6a517cc..042c1a6a7f 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -620,7 +620,8 @@ pub type CommitteeMembership = as ConsensusExchange>>::Membership; /// Protocol for determining membership in a view sync committee. -pub type ViewSyncMembership = QuorumMembership; +pub type ViewSyncMembership = + as ConsensusExchange>>::Membership; /// Type for the underlying quorum `ConnectedNetwork` that will be shared (for now) b/t Communication Channels pub type QuorumNetwork = as CommunicationChannel< From eb2c4dac6a02fb4a5d5085b689c6d0e525e4c7df Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 10 Nov 2023 13:39:12 -0500 Subject: [PATCH 0354/1393] lint passing except for test stuff --- hotshot/examples/infra/mod.rs | 30 ++- hotshot/examples/libp2p/types.rs | 8 +- hotshot/examples/web-server-da/types.rs | 10 +- hotshot/src/lib.rs | 56 +++--- hotshot/src/tasks/mod.rs | 20 +- .../src/traits/election/static_committee.rs | 27 +-- hotshot/src/traits/storage/memory_storage.rs | 40 ++-- hotshot/src/types/handle.rs | 36 ++-- task-impls/src/consensus.rs | 99 +++++---- task-impls/src/da.rs | 38 ++-- task-impls/src/events.rs | 10 +- task-impls/src/network.rs | 29 +-- task-impls/src/transactions.rs | 30 +-- task-impls/src/vid.rs | 38 ++-- task-impls/src/view_sync.rs | 55 ++--- testing/src/node_ctx.rs | 12 +- testing/src/node_types.rs | 17 +- testing/src/overall_safety_task.rs | 31 +-- testing/src/per_node_safety_task.rs | 6 +- testing/src/test_runner.rs | 7 +- testing/tests/atomic_storage.rs | 1 - testing/tests/memory_network.rs | 6 +- types/src/consensus.rs | 16 +- types/src/data.rs | 188 +++++------------- types/src/event.rs | 12 +- types/src/message.rs | 12 +- types/src/simple_certificate.rs | 15 +- types/src/simple_vote.rs | 10 +- types/src/traits/consensus_api.rs | 20 +- types/src/traits/election.rs | 28 +-- types/src/traits/node_implementation.rs | 177 +++++++++-------- types/src/traits/storage.rs | 53 ++--- types/src/utils.rs | 25 +-- 33 files changed, 460 insertions(+), 702 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 321684fa22..7fbf6a85a3 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -115,14 +115,13 @@ pub async fn run_orchestrator< VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, - Leaf = Leaf, Exchanges = Exchanges< TYPES, Message, QuorumExchange< TYPES, Leaf, - QuorumProposal>, + QuorumProposal, MEMBERSHIP, QUORUMNETWORK, Message, @@ -137,7 +136,7 @@ pub async fn run_orchestrator< >, VIDExchange>, >, - Storage = MemoryStorage>, + Storage = MemoryStorage, ConsensusMessage = SequencingMessage, >, >( @@ -180,14 +179,13 @@ pub trait RunDA< VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, - Leaf = Leaf, Exchanges = Exchanges< TYPES, Message, QuorumExchange< TYPES, Leaf, - QuorumProposal>, + QuorumProposal, MEMBERSHIP, QUORUMNETWORK, Message, @@ -202,7 +200,7 @@ pub trait RunDA< >, VIDExchange>, >, - Storage = MemoryStorage>, + Storage = MemoryStorage, ConsensusMessage = SequencingMessage, >, > where @@ -223,9 +221,8 @@ pub trait RunDA< /// Note: sequencing leaf does not have state, so does not return state async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { let genesis_block = TYPES::BlockPayload::genesis(); - let initializer = - hotshot::HotShotInitializer::>::from_genesis(genesis_block) - .expect("Couldn't generate genesis block"); + let initializer = hotshot::HotShotInitializer::::from_genesis(genesis_block) + .expect("Couldn't generate genesis block"); let config = self.get_config(); @@ -426,14 +423,13 @@ impl< MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, - Leaf = Leaf, Exchanges = Exchanges< TYPES, Message, QuorumExchange< TYPES, Leaf, - QuorumProposal>, + QuorumProposal, MEMBERSHIP, WebCommChannel, Message, @@ -458,7 +454,7 @@ impl< Message, >, >, - Storage = MemoryStorage>, + Storage = MemoryStorage, ConsensusMessage = SequencingMessage, >, > @@ -580,14 +576,13 @@ impl< MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, - Leaf = Leaf, Exchanges = Exchanges< TYPES, Message, QuorumExchange< TYPES, Leaf, - QuorumProposal>, + QuorumProposal, MEMBERSHIP, Libp2pCommChannel, Message, @@ -612,7 +607,7 @@ impl< Message, >, >, - Storage = MemoryStorage>, + Storage = MemoryStorage, ConsensusMessage = SequencingMessage, >, > @@ -808,14 +803,13 @@ pub async fn main_entry_point< VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, - Leaf = Leaf, Exchanges = Exchanges< TYPES, Message, QuorumExchange< TYPES, Leaf, - QuorumProposal>, + QuorumProposal, MEMBERSHIP, QUORUMNETWORK, Message, @@ -830,7 +824,7 @@ pub async fn main_entry_point< >, VIDExchange>, >, - Storage = MemoryStorage>, + Storage = MemoryStorage, ConsensusMessage = SequencingMessage, >, RUNDA: RunDA, diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 542e406f0e..a5ef290f5c 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -22,9 +22,7 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type ThisLeaf = Leaf; -pub type ThisMembership = - GeneralStaticCommittee::SignatureKey>; +pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; pub type DANetwork = Libp2pCommChannel; pub type VIDNetwork = Libp2pCommChannel; pub type QuorumNetwork = Libp2pCommChannel; @@ -32,20 +30,18 @@ pub type ViewSyncNetwork = Libp2pCommChannel; -pub type ThisQuorumProposal = QuorumProposal; +pub type ThisQuorumProposal = QuorumProposal; pub type ThisViewSyncProposal = ViewSyncCertificate; pub type ThisViewSyncVote = ViewSyncVote; impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; - type Leaf = Leaf; type Exchanges = Exchanges< DemoTypes, Message, QuorumExchange< DemoTypes, - Self::Leaf, ThisQuorumProposal, ThisMembership, QuorumNetwork, diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index d5de57386e..dfcd6a54b1 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -22,9 +22,7 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type ThisLeaf = Leaf; -pub type ThisMembership = - GeneralStaticCommittee::SignatureKey>; +pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; pub type DANetwork = WebCommChannel; pub type VIDNetwork = WebCommChannel; pub type QuorumNetwork = WebCommChannel; @@ -32,20 +30,18 @@ pub type ViewSyncNetwork = WebCommChannel; pub type ThisDAProposal = DAProposal; -pub type ThisQuorumProposal = QuorumProposal; +pub type ThisQuorumProposal = QuorumProposal; pub type ThisViewSyncProposal = ViewSyncCertificate; pub type ThisViewSyncVote = ViewSyncVote; impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; - type Leaf = Leaf; + type Storage = MemoryStorage; type Exchanges = Exchanges< DemoTypes, Message, QuorumExchange< DemoTypes, - Self::Leaf, ThisQuorumProposal, ThisMembership, QuorumNetwork, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 89203d9074..2ca2553ec1 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -60,7 +60,7 @@ use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, consensus::{BlockPayloadStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, - data::{DAProposal, Leaf, LeafType, QuorumProposal}, + data::{DAProposal, Leaf, QuorumProposal}, error::StorageSnafu, message::{ ConsensusMessageType, DataMessage, InternalTrigger, Message, MessageKind, @@ -77,7 +77,6 @@ use hotshot_types::{ signature_key::SignatureKey, state::ConsensusTime, storage::StoredView, - State, }, vote::ViewSyncData, HotShotConfig, @@ -126,13 +125,13 @@ pub struct SystemContextInner> { pub exchanges: Arc, /// Sender for [`Event`]s - event_sender: RwLock>>>, + event_sender: RwLock>>>, /// the metrics that the implementor is using. _metrics: Arc, /// The hotstuff implementation - consensus: Arc>>, + consensus: Arc>>, /// Channels for sending/recv-ing proposals and votes for quorum and committee exchanges, the /// latter of which is only applicable for sequencing consensus. @@ -140,7 +139,7 @@ pub struct SystemContextInner> { // global_registry: GlobalRegistry, /// Access to the output event stream. - output_event_stream: ChannelStream>, + output_event_stream: ChannelStream>, /// access to the internal event stream, in case we need to, say, shut something down internal_event_stream: ChannelStream>, @@ -169,7 +168,7 @@ impl> SystemContext { config: HotShotConfig, storage: I::Storage, exchanges: I::Exchanges, - initializer: HotShotInitializer, + initializer: HotShotInitializer, metrics: ConsensusMetricsValue, ) -> Result> { debug!("Creating a new hotshot"); @@ -332,25 +331,25 @@ impl> SystemContext { /// # Panics /// /// Panics if internal state for consensus is inconsistent - pub async fn get_state(&self) -> ::MaybeState { + pub async fn get_state(&self) { self.inner .consensus .read() .await .get_decided_leaf() - .get_state() + .get_state(); } /// Returns a copy of the consensus struct #[must_use] - pub fn get_consensus(&self) -> Arc>> { + pub fn get_consensus(&self) -> Arc>> { self.inner.consensus.clone() } /// Returns a copy of the last decided leaf /// # Panics /// Panics if internal state for consensus is inconsistent - pub async fn get_decided_leaf(&self) -> I::Leaf { + pub async fn get_decided_leaf(&self) -> Leaf { self.inner.consensus.read().await.get_decided_leaf() } @@ -374,7 +373,7 @@ impl> SystemContext { config: HotShotConfig, storage: I::Storage, exchanges: I::Exchanges, - initializer: HotShotInitializer, + initializer: HotShotInitializer, metrics: ConsensusMetricsValue, ) -> Result< ( @@ -518,7 +517,7 @@ impl> SystemContext { #[async_trait] pub trait HotShotType> { /// Get the [`hotstuff`] field of [`HotShot`]. - fn consensus(&self) -> &Arc>>; + fn consensus(&self) -> &Arc>>; /// Spawn all tasks that operate on the given [`HotShot`]. /// @@ -621,18 +620,14 @@ impl< BlockPayload = VIDBlockPayload, Transaction = VIDTransaction, >, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, MEMBERSHIP: Membership, > HotShotType for SystemContext where QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment>, Membership = MEMBERSHIP, > + 'static, @@ -661,12 +656,12 @@ where TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, { - fn consensus(&self) -> &Arc>> { + fn consensus(&self) -> &Arc>> { &self.inner.consensus } @@ -794,7 +789,7 @@ pub struct HotShotConsensusApi> { } #[async_trait] -impl> ConsensusSharedApi +impl> ConsensusSharedApi for HotShotConsensusApi { fn total_nodes(&self) -> NonZeroUsize { @@ -823,7 +818,7 @@ impl> ConsensusSharedApi) { + async fn send_event(&self, event: Event) { debug!(?event, "send_event"); let mut event_sender = self.inner.event_sender.write().await; if let Some(sender) = &*event_sender { @@ -845,7 +840,7 @@ impl> ConsensusSharedApi, ) -> std::result::Result<(), hotshot_types::traits::storage::StorageError> { let view_to_insert = StoredView::from(leaf); let storage = &self.inner.storage; @@ -860,7 +855,7 @@ impl> ConsensusSharedApi>, - > ConsensusApi for HotShotConsensusApi + > ConsensusApi for HotShotConsensusApi { async fn send_direct_message( &self, @@ -992,27 +987,26 @@ impl< } /// initializer struct for creating starting block -pub struct HotShotInitializer> { +pub struct HotShotInitializer { /// the leaf specified initialization - inner: LEAF, + inner: Leaf, } -impl> HotShotInitializer { +impl HotShotInitializer { /// initialize from genesis /// # Errors /// If we are unable to apply the genesis block to the default state pub fn from_genesis(genesis_payload: TYPES::BlockPayload) -> Result> { - let state = TYPES::StateType::initialize(); let time = TYPES::Time::genesis(); - let justify_qc = QuorumCertificate2::::genesis(); + let justify_qc = QuorumCertificate2::::genesis(); Ok(Self { - inner: LEAF::new(time, justify_qc, genesis_payload, state), + inner: Leaf::new(time, justify_qc, genesis_payload), }) } /// reload previous state based on most recent leaf - pub fn from_reload(anchor_leaf: LEAF) -> Self { + pub fn from_reload(anchor_leaf: Leaf) -> Self { Self { inner: anchor_leaf } } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b8eba956f8..120294e8b1 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -62,7 +62,7 @@ pub enum GlobalEvent { /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_message_task< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, PROPOSAL: ProposalType, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange, Proposal = PROPOSAL, Membership = MEMBERSHIP> @@ -161,7 +161,7 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_event_task< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, PROPOSAL: ProposalType, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange, Proposal = PROPOSAL, Membership = MEMBERSHIP> @@ -228,18 +228,18 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, >( task_runner: TaskRunner, event_stream: ChannelStream>, - output_stream: ChannelStream>, + output_stream: ChannelStream>, handle: SystemContextHandle, ) -> TaskRunner where QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment>, >, CommitteeEx: @@ -247,7 +247,7 @@ where TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment, >, { @@ -328,7 +328,7 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_vid_task< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, >( task_runner: TaskRunner, event_stream: ChannelStream>, @@ -390,7 +390,7 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_da_task< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, >( task_runner: TaskRunner, event_stream: ChannelStream>, @@ -451,7 +451,7 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_transaction_task< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, >( task_runner: TaskRunner, event_stream: ChannelStream>, @@ -511,7 +511,7 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_view_sync_task< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, >( task_runner: TaskRunner, event_stream: ChannelStream>, diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 8252ac2562..291ca93121 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -2,13 +2,10 @@ use commit::{Commitment, Committable, RawCommitmentBuilder}; use espresso_systems_common::hotshot::tag; use hotshot_signature_key::bn254::BLSPubKey; -use hotshot_types::{ - data::LeafType, - traits::{ - election::{Checked, ElectionConfig, ElectionError, Membership, VoteToken}, - node_implementation::NodeType, - signature_key::{EncodedSignature, SignatureKey}, - }, +use hotshot_types::traits::{ + election::{Checked, ElectionConfig, ElectionError, Membership, VoteToken}, + node_implementation::NodeType, + signature_key::{EncodedSignature, SignatureKey}, }; #[allow(deprecated)] use serde::{Deserialize, Serialize}; @@ -18,23 +15,19 @@ use tracing::debug; /// Dummy implementation of [`Membership`] #[derive(Clone, Debug, Eq, PartialEq, Hash)] -pub struct GeneralStaticCommittee, PUBKEY: SignatureKey> { +pub struct GeneralStaticCommittee { /// All the nodes participating and their stake nodes_with_stake: Vec, /// The nodes on the static committee and their stake committee_nodes_with_stake: Vec, /// Node type phantom _type_phantom: PhantomData, - /// Leaf phantom - _leaf_phantom: PhantomData, } /// static committee using a vrf kp -pub type StaticCommittee = GeneralStaticCommittee; +pub type StaticCommittee = GeneralStaticCommittee; -impl, PUBKEY: SignatureKey> - GeneralStaticCommittee -{ +impl GeneralStaticCommittee { /// Creates a new dummy elector #[must_use] pub fn new(_nodes: &[PUBKEY], nodes_with_stake: Vec) -> Self { @@ -42,7 +35,6 @@ impl, PUBKEY: SignatureKey> nodes_with_stake: nodes_with_stake.clone(), committee_nodes_with_stake: nodes_with_stake, _type_phantom: PhantomData, - _leaf_phantom: PhantomData, } } } @@ -85,8 +77,8 @@ pub struct StaticElectionConfig { impl ElectionConfig for StaticElectionConfig {} -impl, PUBKEY: SignatureKey + 'static> Membership - for GeneralStaticCommittee +impl Membership + for GeneralStaticCommittee where TYPES: NodeType< SignatureKey = PUBKEY, @@ -158,7 +150,6 @@ where nodes_with_stake: keys_qc, committee_nodes_with_stake, _type_phantom: PhantomData, - _leaf_phantom: PhantomData, } } diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index baf1b83118..9c7fd14d4c 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -3,13 +3,10 @@ //! This module provides a non-persisting, dummy adapter for the [`Storage`] trait use async_lock::RwLock; use async_trait::async_trait; -use hotshot_types::{ - data::LeafType, - traits::{ - node_implementation::NodeType, - storage::{ - Result, Storage, StorageError, StorageState, StoredView, TestableStorage, ViewEntry, - }, +use hotshot_types::traits::{ + node_implementation::NodeType, + storage::{ + Result, Storage, StorageError, StorageState, StoredView, TestableStorage, ViewEntry, }, }; use std::{ @@ -18,21 +15,21 @@ use std::{ }; /// Internal state for a [`MemoryStorage`] -struct MemoryStorageInternal> { +struct MemoryStorageInternal { /// The views that have been stored - stored: BTreeMap>, + stored: BTreeMap>, /// The views that have failed failed: BTreeSet, } /// In memory, ephemeral, storage for a [`HotShot`](crate::HotShot) instance #[derive(Clone)] -pub struct MemoryStorage> { +pub struct MemoryStorage { /// The inner state of this [`MemoryStorage`] - inner: Arc>>, + inner: Arc>>, } -impl> MemoryStorage { +impl MemoryStorage { /// Create a new instance of the memory storage with the given block and state #[must_use] pub fn empty() -> Self { @@ -47,14 +44,12 @@ impl> MemoryStorage> TestableStorage - for MemoryStorage -{ +impl TestableStorage for MemoryStorage { fn construct_tmp_storage() -> Result { Ok(Self::empty()) } - async fn get_full_state(&self) -> StorageState { + async fn get_full_state(&self) -> StorageState { let inner = self.inner.read().await; StorageState { stored: inner.stored.clone(), @@ -64,10 +59,8 @@ impl> TestableStorage> Storage - for MemoryStorage -{ - async fn append(&self, views: Vec>) -> Result { +impl Storage for MemoryStorage { + async fn append(&self, views: Vec>) -> Result { let mut inner = self.inner.write().await; for view in views { match view { @@ -97,7 +90,7 @@ impl> Storage Ok(old_stored.len() + old_failed.len()) } - async fn get_anchored_view(&self) -> Result> { + async fn get_anchored_view(&self) -> Result> { let inner = self.inner.read().await; let last = inner .stored @@ -154,9 +147,7 @@ mod test { type StateType = DummyState; } - fn random_stored_view( - view_number: ::Time, - ) -> StoredView> { + fn random_stored_view(view_number: ::Time) -> StoredView { let payload = VIDBlockPayload::genesis(); let header = VIDBlockHeader { block_number: 0, @@ -178,7 +169,6 @@ mod test { }, header, Some(payload), - (), dummy_leaf_commit, Vec::new(), genesis_proposer_id(), diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 56ad4e8e50..82f7449869 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -14,10 +14,10 @@ use hotshot_task::{ BoxSyncFuture, }; use hotshot_task_impls::events::HotShotEvent; +use hotshot_types::data::Leaf; use hotshot_types::simple_vote::QuorumData; use hotshot_types::{ consensus::Consensus, - data::LeafType, error::HotShotError, event::EventType, message::MessageKind, @@ -46,7 +46,7 @@ pub struct SystemContextHandle> { /// /// This is kept around as an implementation detail, as the [`BroadcastSender::handle_async`] /// method is needed to generate new receivers to expose to the user - pub(crate) output_event_stream: ChannelStream>, + pub(crate) output_event_stream: ChannelStream>, /// access to the internal ev ent stream, in case we need to, say, shut something down pub(crate) internal_event_stream: ChannelStream>, /// registry for controlling tasks @@ -79,7 +79,7 @@ impl + 'static> SystemContextHandl // /// # Errors // /// // /// Will return [`HotShotError::NetworkFault`] if the underlying [`SystemContext`] has been closed. - // pub async fn next_event(&mut self) -> Result, HotShotError> { + // pub async fn next_event(&mut self) -> Result, HotShotError> { // let result = self.stream_output.recv_async().await; // match result { // Ok(result) => Ok(result), @@ -91,7 +91,7 @@ impl + 'static> SystemContextHandl // /// # Errors // /// // /// Will return [`HotShotError::NetworkFault`] if the underlying [`HotShot`] instance has shut down - // pub fn try_next_event(&mut self) -> Result>, HotShotError> { + // pub fn try_next_event(&mut self) -> Result>, HotShotError> { // self.stream.await // // let result = self.stream_output.try_recv(); // // Ok(result) @@ -103,13 +103,13 @@ impl + 'static> SystemContextHandl /// /// Will return [`HotShotError::NetworkFault`] if the underlying [`HotShot`] instance has been shut /// down. - // pub async fn available_events(&mut self) -> Result>, HotShotError> { + // pub async fn available_events(&mut self) -> Result>, HotShotError> { // let mut stream = self.output_stream; - // let _ = > as StreamExt/* :: */>::next(&mut *stream); + // let _ = > as StreamExt/* :: */>::next(&mut *stream); // let mut output = vec![]; // Loop to pull out all the outputs // loop { - // let _ = > as StreamExt/* :: */>::next(stream); + // let _ = > as StreamExt/* :: */>::next(stream); // let _ = FutureExt::::next(*self.output_stream).await; // match FutureExt output.push(x), @@ -125,8 +125,8 @@ impl + 'static> SystemContextHandl /// obtains a stream to expose to the user pub async fn get_event_stream( &mut self, - filter: FilterEvent>, - ) -> (impl Stream>, StreamId) { + filter: FilterEvent>, + ) -> (impl Stream>, StreamId) { self.output_event_stream.subscribe(filter).await } @@ -136,8 +136,8 @@ impl + 'static> SystemContextHandl /// - type wrapper pub async fn get_event_stream_known_impl( &mut self, - filter: FilterEvent>, - ) -> (UnboundedStream>, StreamId) { + filter: FilterEvent>, + ) -> (UnboundedStream>, StreamId) { self.output_event_stream.subscribe(filter).await } @@ -158,15 +158,15 @@ impl + 'static> SystemContextHandl /// # Errors /// /// Returns an error if the underlying `Storage` returns an error - pub async fn get_state(&self) -> ::MaybeState { - self.hotshot.get_state().await + pub async fn get_state(&self) { + self.hotshot.get_state().await; } /// Gets most recent decided leaf /// # Panics /// /// Panics if internal consensus is in an inconsistent state. - pub async fn get_decided_leaf(&self) -> I::Leaf { + pub async fn get_decided_leaf(&self) -> Leaf { self.hotshot.get_decided_leaf().await } @@ -190,8 +190,8 @@ impl + 'static> SystemContextHandl let _anchor = self.storage(); if let Ok(anchor_leaf) = self.storage().get_anchored_view().await { if anchor_leaf.view_number == TYPES::Time::genesis() { - let leaf: I::Leaf = I::Leaf::from_stored_view(anchor_leaf); - let mut qc = QuorumCertificate2::::genesis(); + let leaf = Leaf::from_stored_view(anchor_leaf); + let mut qc = QuorumCertificate2::::genesis(); qc.data = QuorumData { leaf_commit: leaf.commit(), }; @@ -225,7 +225,7 @@ impl + 'static> SystemContextHandl } /// Get the underyling consensus state for this [`SystemContext`] - pub fn get_consensus(&self) -> Arc>> { + pub fn get_consensus(&self) -> Arc>> { self.hotshot.get_consensus() } @@ -280,7 +280,7 @@ impl + 'static> SystemContextHandl #[cfg(feature = "hotshot-testing")] pub fn sign_validating_or_commitment_proposal( &self, - leaf_commitment: &Commitment, + leaf_commitment: &Commitment>, ) -> EncodedSignature { let inner = self.hotshot.inner.clone(); inner diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index abcbfe43a0..15a8b48ee3 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -18,7 +18,7 @@ use hotshot_task::{ use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, consensus::{Consensus, View}, - data::{Leaf, LeafType, ProposalType, QuorumProposal}, + data::{Leaf, ProposalType, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, simple_certificate::{ @@ -61,13 +61,13 @@ pub struct ConsensusTaskError {} /// of consensus pub struct ConsensusTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > where QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment>, >, CommitteeEx: @@ -75,14 +75,14 @@ pub struct ConsensusTaskState< TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment, >, { /// The global task registry pub registry: GlobalRegistry, /// Reference to consensus. The replica will require a write lock on this. - pub consensus: Arc>>>, + pub consensus: Arc>>, /// View timeout from config. pub timeout: u64, /// View number this view is executing in. @@ -120,7 +120,7 @@ pub struct ConsensusTaskState< pub event_stream: ChannelStream>, /// Event stream to publish events to the application layer - pub output_event_stream: ChannelStream>, + pub output_event_stream: ChannelStream>, /// All the DA certs we've received for current and future views. pub da_certs: HashMap>, @@ -130,7 +130,7 @@ pub struct ConsensusTaskState< /// The most recent proposal we have, will correspond to the current view if Some() /// Will be none if the view advanced through timeout/view_sync - pub current_proposal: Option>, + pub current_proposal: Option>, // ED Should replace this with config information since we need it anyway /// The node's id @@ -138,20 +138,18 @@ pub struct ConsensusTaskState< } /// State for the vote collection task. This handles the building of a QC from a votes received -pub struct VoteCollectionTaskState< - TYPES: NodeType, - I: NodeImplementation>, -> where +pub struct VoteCollectionTaskState> +where QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment>, >, TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment, >, { @@ -165,10 +163,10 @@ pub struct VoteCollectionTaskState< pub accumulator: Either< VoteAccumulator2< TYPES, - QuorumVote>, - QuorumCertificate2, + QuorumVote>, + QuorumCertificate2, >, - QuorumCertificate2, + QuorumCertificate2, >, /// Accumulator for votes @@ -189,19 +187,18 @@ pub struct VoteCollectionTaskState< pub id: u64, } -impl>> TS - for VoteCollectionTaskState +impl> TS for VoteCollectionTaskState where QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment>, >, TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment, >, { @@ -209,7 +206,7 @@ where #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "Quorum Vote Collection Task", level = "error")] -async fn vote_handle>>( +async fn vote_handle>( mut state: VoteCollectionTaskState, event: HotShotEvent, ) -> ( @@ -220,13 +217,13 @@ where QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment>, >, TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment, >, { @@ -332,19 +329,15 @@ where impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > ConsensusTaskState where TYPES::BlockHeader: BlockHeader, QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment>, >, CommitteeEx: @@ -352,7 +345,7 @@ where TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment, >, { @@ -432,8 +425,10 @@ where proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), }; let vote = - QuorumVote::>::create_signed_vote( - QuorumData { leaf_commit: leaf.commit() }, + QuorumVote::>::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(), + }, view, self.quorum_exchange.public_key(), self.quorum_exchange.private_key(), @@ -513,12 +508,14 @@ where return false; } let vote = - QuorumVote::>::create_signed_vote( - QuorumData { leaf_commit: leaf.commit() }, - view, - self.quorum_exchange.public_key(), - self.quorum_exchange.private_key(), - ); + QuorumVote::>::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(), + }, + view, + self.quorum_exchange.public_key(), + self.quorum_exchange.private_key(), + ); GeneralConsensusMessage::::Vote(vote) } else { error!("Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", cert, self.cur_view ); @@ -1248,7 +1245,7 @@ where #[allow(clippy::too_many_lines)] pub async fn publish_proposal_if_able( &mut self, - _qc: QuorumCertificate2, + _qc: QuorumCertificate2, view: TYPES::Time, timeout_certificate: Option>, ) -> bool { @@ -1362,18 +1359,14 @@ where impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I>, + I: NodeImplementation>, + A: ConsensusApi, > TS for ConsensusTaskState where QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment>, >, CommitteeEx: @@ -1381,7 +1374,7 @@ where TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment, >, { @@ -1406,8 +1399,8 @@ pub type ConsensusTaskTypes = HSTWithEvent< /// Event handle for consensus pub async fn sequencing_consensus_handle< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, >( event: HotShotEvent, mut state: ConsensusTaskState, @@ -1420,7 +1413,7 @@ where QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment>, >, CommitteeEx: @@ -1428,7 +1421,7 @@ where TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal>, + Proposal = QuorumProposal, Commitment = Commitment, >, { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index b29dab50d5..1944920d54 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -14,7 +14,7 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::{Consensus, View}, - data::{DAProposal, Leaf, ProposalType}, + data::{DAProposal, ProposalType}, message::{Message, Proposal, SequencingMessage}, simple_vote::{DAData, DAVote2}, traits::{ @@ -45,8 +45,8 @@ pub struct ConsensusTaskError {} /// Tracks state of a DA task pub struct DATaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > where CommitteeEx: ConsensusExchange, Commitment = Commitment>, @@ -60,7 +60,7 @@ pub struct DATaskState< pub cur_view: TYPES::Time, /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>>, + pub consensus: Arc>>, /// the committee exchange pub committee_exchange: Arc>, @@ -76,10 +76,8 @@ pub struct DATaskState< } /// Struct to maintain DA Vote Collection task state -pub struct DAVoteCollectionTaskState< - TYPES: NodeType, - I: NodeImplementation>, -> where +pub struct DAVoteCollectionTaskState> +where CommitteeEx: ConsensusExchange, Commitment = Commitment>, { @@ -103,16 +101,14 @@ pub struct DAVoteCollectionTaskState< pub id: u64, } -impl>> TS - for DAVoteCollectionTaskState -where +impl> TS for DAVoteCollectionTaskState where CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment> { } #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "DA Vote Collection Task", level = "error")] -async fn vote_handle>>( +async fn vote_handle>( mut state: DAVoteCollectionTaskState, event: HotShotEvent, ) -> ( @@ -175,12 +171,8 @@ where impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > DATaskState where CommitteeEx: @@ -459,12 +451,8 @@ where /// task state implementation for DA Task impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > TS for DATaskState where CommitteeEx: diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index b4be7769be..3c9f11f367 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -3,7 +3,7 @@ use crate::view_sync::ViewSyncPhase; use commit::Commitment; use either::Either; use hotshot_types::{ - data::{DAProposal, VidDisperse}, + data::{DAProposal, Leaf, VidDisperse}, message::Proposal, simple_certificate::{ DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, @@ -24,7 +24,7 @@ pub enum HotShotEvent> { /// A quorum proposal has been received from the network; handled by the consensus task QuorumProposalRecv(Proposal>, TYPES::SignatureKey), /// A quorum vote has been received from the network; handled by the consensus task - QuorumVoteRecv(QuorumVote>), + QuorumVoteRecv(QuorumVote>), /// A timeout vote recevied from the network; handled by consensus task TimeoutVoteRecv(TimeoutVote2>), /// Send a timeout vote to the network; emitted by consensus task replicas @@ -38,13 +38,13 @@ pub enum HotShotEvent> { /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal - QuorumVoteSend(QuorumVote>), + QuorumVoteSend(QuorumVote>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DAProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal DAVoteSend(DAVote2>), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only - QCFormed(Either, TimeoutCertificate2>), + QCFormed(Either, TimeoutCertificate2>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DACSend(DACertificate2, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks @@ -75,7 +75,7 @@ pub enum HotShotEvent> { /// Event when the transactions task has a block formed BlockReady(TYPES::BlockPayload, TYPES::Time), /// Event when consensus decided on a leaf - LeafDecided(Vec), + LeafDecided(Vec>), /// Send VID shares to VID storage nodes; emitted by the DA leader /// /// Like [`DAProposalSend`]. diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 80d1c7d341..5f2a8ba95e 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -7,7 +7,6 @@ use hotshot_task::{ GeneratedStream, Merge, }; use hotshot_types::{ - data::Leaf, message::{ CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, Messages, SequencingMessage, @@ -40,7 +39,7 @@ pub enum NetworkTaskKind { /// the network message task state pub struct NetworkMessageTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, > { /// event stream (used for publishing) pub event_stream: ChannelStream>, @@ -48,22 +47,14 @@ pub struct NetworkMessageTaskState< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, > TS for NetworkMessageTaskState { } impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, > NetworkMessageTaskState { /// Handle the message. @@ -142,7 +133,7 @@ impl< /// network event task state pub struct NetworkEventTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, MEMBERSHIP: Membership, COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, > { @@ -159,11 +150,7 @@ pub struct NetworkEventTaskState< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, MEMBERSHIP: Membership, COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, > TS for NetworkEventTaskState @@ -172,11 +159,7 @@ impl< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, MEMBERSHIP: Membership, COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, > NetworkEventTaskState diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 9aacf68090..7dec0b91b2 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -43,8 +43,8 @@ pub struct ConsensusTaskError {} /// Tracks state of a Transaction task pub struct TransactionTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > where QuorumEx: ConsensusExchange>, { @@ -57,7 +57,7 @@ pub struct TransactionTaskState< pub cur_view: TYPES::Time, /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>>, + pub consensus: Arc>>, /// A list of undecided transactions pub transactions: Arc>>, @@ -80,12 +80,8 @@ pub struct TransactionTaskState< // whereas it's just `TYPES: NodeType` in the second implementation. impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > TransactionTaskState where QuorumEx: ConsensusExchange>, @@ -288,12 +284,8 @@ where // whereas here it's just `TYPES: NodeType`. impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > TransactionTaskState where QuorumEx: ConsensusExchange>, @@ -381,12 +373,8 @@ where /// task state implementation for Transactions Task impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > TS for TransactionTaskState where QuorumEx: ConsensusExchange>, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index b92b50d8e8..06048d4bea 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -15,7 +15,7 @@ use hotshot_task::{ use hotshot_types::traits::{network::ConsensusIntentEvent, node_implementation::VIDMembership}; use hotshot_types::{ consensus::{Consensus, View}, - data::{Leaf, ProposalType}, + data::ProposalType, message::{Message, SequencingMessage}, traits::{ consensus_api::ConsensusApi, @@ -45,8 +45,8 @@ pub struct ConsensusTaskError {} /// Tracks state of a VID task pub struct VIDTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > where VIDEx: ConsensusExchange, Commitment = Commitment>, @@ -60,7 +60,7 @@ pub struct VIDTaskState< pub cur_view: TYPES::Time, /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>>, + pub consensus: Arc>>, /// the VID exchange pub vid_exchange: Arc>, @@ -76,10 +76,8 @@ pub struct VIDTaskState< } /// Struct to maintain VID Vote Collection task state -pub struct VIDVoteCollectionTaskState< - TYPES: NodeType, - I: NodeImplementation>, -> where +pub struct VIDVoteCollectionTaskState> +where VIDEx: ConsensusExchange, Commitment = Commitment>, { @@ -99,11 +97,9 @@ pub struct VIDVoteCollectionTaskState< pub id: u64, } -impl>> TS - for VIDVoteCollectionTaskState -where +impl> TS for VIDVoteCollectionTaskState where VIDEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment> { } @@ -117,7 +113,7 @@ async fn vote_handle( ) where TYPES: NodeType, - I: NodeImplementation>, + I: NodeImplementation, VIDEx: ConsensusExchange, Commitment = Commitment>, { @@ -174,12 +170,8 @@ where impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > VIDTaskState where VIDEx: @@ -418,12 +410,8 @@ where /// task state implementation for VID Task impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > TS for VIDTaskState where VIDEx: diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 11cba581f0..035dfc72c2 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -18,7 +18,6 @@ use bitvec::prelude::*; use hotshot_task::global_registry::GlobalRegistry; use hotshot_types::{ certificate::ViewSyncCertificate, - data::Leaf, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, traits::{ consensus_api::ConsensusApi, @@ -60,8 +59,8 @@ pub struct ViewSyncTaskError {} /// Main view sync task state pub struct ViewSyncTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, - A: ConsensusApi, I> + 'static + std::clone::Clone, + I: NodeImplementation>, + A: ConsensusApi + 'static + std::clone::Clone, > where ViewSyncEx: ViewSyncExchangeType< TYPES, @@ -104,12 +103,8 @@ pub struct ViewSyncTaskState< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static + std::clone::Clone, + I: NodeImplementation>, + A: ConsensusApi + 'static + std::clone::Clone, > TS for ViewSyncTaskState where ViewSyncEx: ViewSyncExchangeType< @@ -133,8 +128,8 @@ pub type ViewSyncTaskStateTypes = HSTWithEvent< /// State of a view sync replica task pub struct ViewSyncReplicaTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > where ViewSyncEx: ViewSyncExchangeType< TYPES, @@ -171,12 +166,8 @@ pub struct ViewSyncReplicaTaskState< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > TS for ViewSyncReplicaTaskState where ViewSyncEx: ViewSyncExchangeType< @@ -200,7 +191,7 @@ pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< /// State of a view sync relay task pub struct ViewSyncRelayTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, > { /// Event stream to publish events to pub event_stream: ChannelStream>, @@ -215,11 +206,7 @@ pub struct ViewSyncRelayTaskState< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, > TS for ViewSyncRelayTaskState { } @@ -234,12 +221,8 @@ pub type ViewSyncRelayTaskStateTypes = HSTWithEvent< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static + std::clone::Clone, + I: NodeImplementation>, + A: ConsensusApi + 'static + std::clone::Clone, > ViewSyncTaskState where ViewSyncEx: ViewSyncExchangeType< @@ -607,12 +590,8 @@ where impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static, + I: NodeImplementation>, + A: ConsensusApi + 'static, > ViewSyncReplicaTaskState where ViewSyncEx: ViewSyncExchangeType< @@ -940,11 +919,7 @@ where impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, > ViewSyncRelayTaskState where ViewSyncEx: ViewSyncExchangeType< diff --git a/testing/src/node_ctx.rs b/testing/src/node_ctx.rs index 57a50d6a53..00e886b479 100644 --- a/testing/src/node_ctx.rs +++ b/testing/src/node_ctx.rs @@ -1,7 +1,7 @@ use std::{collections::HashMap, sync::Arc}; use hotshot::{traits::TestableNodeImplementation, HotShotError}; -use hotshot_types::{data::LeafType, traits::node_implementation::NodeType}; +use hotshot_types::traits::node_implementation::NodeType; /// context for a round // TODO eventually we want these to just be futures @@ -31,7 +31,7 @@ pub enum ViewStatus), /// The view is a success. - ViewSuccess(ViewSuccess), + ViewSuccess(ViewSuccess), } /// In-progress status of a view. @@ -44,13 +44,13 @@ pub struct ViewFailed(pub Arc>); /// Success status of a view. #[derive(Debug, Clone)] -pub struct ViewSuccess> { +pub struct ViewSuccess { /// state after decide event - pub agreed_state: LEAF::MaybeState, + pub agreed_state: (), /// block after decide event - pub agreed_block: LEAF::DeltasType, + pub agreed_block: LeafBlockPayload>, /// leaf after decide event - pub agreed_leaf: LEAF, + pub agreed_leaf: Leaf, } diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index ca065bec14..8a923edcf5 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -126,7 +126,6 @@ pub type SequencingLibp2pExchange = Exchanges< impl NodeImplementation for Libp2pImpl { type Storage = MemoryStorage>; - type Leaf = Leaf; type Exchanges = SequencingLibp2pExchange; type ConsensusMessage = SequencingMessage; @@ -345,7 +344,6 @@ impl impl NodeImplementation for MemoryImpl { type Storage = MemoryStorage>; - type Leaf = Leaf; type Exchanges = SequencingMemoryExchange; type ConsensusMessage = SequencingMessage; @@ -486,7 +484,6 @@ impl impl NodeImplementation for WebImpl { type Storage = MemoryStorage>; - type Leaf = Leaf; type Exchanges = SequencingWebExchanges; type ConsensusMessage = SequencingMessage; @@ -508,8 +505,7 @@ pub type CombinedExchange = Exchanges< Message, QuorumExchange< TestTypes, - >::Leaf, - QuorumProposal>, + QuorumProposal, StaticMembership, StaticCombinedQuorumComm, Message, @@ -536,8 +532,7 @@ pub type CombinedExchange = Exchanges< >; impl NodeImplementation for CombinedImpl { - type Storage = MemoryStorage>; - type Leaf = Leaf; + type Storage = MemoryStorage; type Exchanges = CombinedExchange; type ConsensusMessage = SequencingMessage; @@ -554,13 +549,7 @@ impl NodeImplementation for CombinedImpl { } } -impl - TestableExchange< - TestTypes, - >::Leaf, - Message, - > for CombinedExchange -{ +impl TestableExchange> for CombinedExchange { #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 058d4768c1..24ef4311ce 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -20,7 +20,8 @@ use hotshot_task::{ MergeN, }; use hotshot_types::{ - data::{LeafBlockPayload, LeafType}, + block_impl::VIDBlockPayload, + data::{Leaf, LeafBlockPayload}, error::RoundTimedoutState, event::{Event, EventType}, simple_certificate::QuorumCertificate2, @@ -87,15 +88,15 @@ impl> TS for OverallSafety /// Result of running a round of consensus #[derive(Debug)] -pub struct RoundResult> { +pub struct RoundResult { /// Transactions that were submitted // pub txns: Vec, /// Nodes that committed this round /// id -> (leaf, qc) - // TODO GG: isn't it infeasible to store a Vec? + // TODO GG: isn't it infeasible to store a Vec>? #[allow(clippy::type_complexity)] - success_nodes: HashMap, QuorumCertificate2)>, + success_nodes: HashMap>, QuorumCertificate2)>, /// Nodes that failed to commit this round pub failed_nodes: HashMap>>>, @@ -106,18 +107,18 @@ pub struct RoundResult> { /// NOTE: technically a map is not needed /// left one anyway for ease of viewing /// leaf -> # entries decided on that leaf - pub leaf_map: HashMap, + pub leaf_map: HashMap, usize>, /// block -> # entries decided on that block - pub block_map: HashMap>, usize>, + pub block_map: HashMap, usize>, /// state -> # entries decided on that state - pub state_map: HashMap<::MaybeState, usize>, + pub state_map: HashMap<(), usize>, pub num_txns_map: HashMap, } -impl> Default for RoundResult { +impl Default for RoundResult { fn default() -> Self { Self { success_nodes: Default::default(), @@ -184,17 +185,17 @@ impl> RoundCtx { } } -impl> RoundResult { +impl RoundResult { /// insert into round result pub fn insert_into_result( &mut self, idx: usize, - result: (Vec, QuorumCertificate2), + result: (Vec>, QuorumCertificate2), maybe_block_size: Option, - ) -> Option { + ) -> Option> { self.success_nodes.insert(idx as u64, result.clone()); - let maybe_leaf: Option = result.0.into_iter().last(); + let maybe_leaf: Option> = result.0.into_iter().last(); if let Some(leaf) = maybe_leaf.clone() { match self.leaf_map.entry(leaf.clone()) { std::collections::hash_map::Entry::Occupied(mut o) => { @@ -245,7 +246,7 @@ impl> RoundResult &mut self, threshold: usize, total_num_nodes: usize, - key: LEAF, + key: Leaf, check_leaf: bool, check_state: bool, check_block: bool, @@ -309,8 +310,8 @@ impl> RoundResult } /// generate leaves - pub fn gen_leaves(&self) -> HashMap { - let mut leaves = HashMap::::new(); + pub fn gen_leaves(&self) -> HashMap, usize> { + let mut leaves = HashMap::, usize>::new(); for (leaf_vec, _) in self.success_nodes.values() { let most_recent_leaf = leaf_vec.iter().last(); diff --git a/testing/src/per_node_safety_task.rs b/testing/src/per_node_safety_task.rs index 82acef4042..af20f00b79 100644 --- a/testing/src/per_node_safety_task.rs +++ b/testing/src/per_node_safety_task.rs @@ -160,7 +160,7 @@ // self, // // registry: &mut GlobalRegistry, // // test_event_stream: ChannelStream, -// // hotshot_event_stream: UnboundedStream>, +// // hotshot_event_stream: UnboundedStream>, // ) -> TaskGenerator< // PerNodeSafetyTask // > { @@ -252,7 +252,7 @@ // PerNodeSafetyTaskErr, // GlobalTestEvent, // ChannelStream, -// Event, -// UnboundedStream>, +// Event, +// UnboundedStream>, // PerNodeSafetyTask, // >; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 1988a38da5..439e5c88eb 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -66,7 +66,6 @@ where where I::Exchanges: ExchangesType< TYPES, - I::Leaf, Message, ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), >, @@ -193,7 +192,6 @@ where where I::Exchanges: ExchangesType< TYPES, - I::Leaf, Message, ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), >, @@ -205,7 +203,7 @@ where let storage = (self.launcher.resource_generator.storage)(node_id); let config = self.launcher.resource_generator.config.clone(); let initializer = - HotShotInitializer::::from_genesis(I::block_genesis()).unwrap(); + HotShotInitializer::::from_genesis(I::block_genesis()).unwrap(); let networks = (self.launcher.resource_generator.channel_generator)(node_id); // We assign node's public key and stake value rather than read from config file since it's a test let validator_config = @@ -232,14 +230,13 @@ where &mut self, networks: Networks, storage: I::Storage, - initializer: HotShotInitializer, + initializer: HotShotInitializer, config: HotShotConfig, validator_config: ValidatorConfig, ) -> SystemContext where I::Exchanges: ExchangesType< TYPES, - I::Leaf, Message, ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), >, diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs index 381db09d87..a10bee76ed 100644 --- a/testing/tests/atomic_storage.rs +++ b/testing/tests/atomic_storage.rs @@ -1,7 +1,6 @@ #![cfg(foo)] use hotshot::{ certificate::QuorumCertificate, - data::LeafType, demos::vdemo::{ random_quorom_certificate, random_transaction, random_validating_leaf, VDemoBlock, VDemoState, diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 511cb4867e..c646c193d6 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -64,8 +64,7 @@ impl NodeType for Test { #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct TestImpl {} -pub type ThisLeaf = Leaf; -pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; +pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; pub type DANetwork = MemoryCommChannel; pub type QuorumNetwork = MemoryCommChannel; pub type ViewSyncNetwork = MemoryCommChannel; @@ -73,14 +72,13 @@ pub type VIDNetwork = MemoryCommChannel; pub type ThisDAProposal = DAProposal; -pub type ThisQuorumProposal = QuorumProposal; +pub type ThisQuorumProposal = QuorumProposal; pub type ThisViewSyncProposal = ViewSyncCertificate; pub type ThisViewSyncVote = ViewSyncVote; impl NodeImplementation for TestImpl { type Storage = MemoryStorage; - type Leaf = Leaf; type Exchanges = Exchanges< Test, Message, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 92659d2328..6af690228d 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -7,7 +7,7 @@ pub use crate::{ use displaydoc::Display; use crate::{ - data::LeafType, + data::Leaf, error::HotShotError, simple_certificate::QuorumCertificate2, traits::{ @@ -32,10 +32,10 @@ type CommitmentMap = HashMap, T>; /// /// This will contain the state of all rounds. #[derive(custom_debug::Debug)] -pub struct Consensus> { +pub struct Consensus { /// The phases that are currently loaded in memory // TODO(https://github.com/EspressoSystems/hotshot/issues/153): Allow this to be loaded from `Storage`? - pub state_map: BTreeMap>, + pub state_map: BTreeMap>, /// cur_view from pseudocode pub cur_view: TYPES::Time, @@ -46,7 +46,7 @@ pub struct Consensus> { /// Map of leaf hash -> leaf /// - contains undecided leaves /// - includes the MOST RECENT decided leaf - pub saved_leaves: CommitmentMap, + pub saved_leaves: CommitmentMap>, /// Saved block payloads /// @@ -57,7 +57,7 @@ pub struct Consensus> { pub locked_view: TYPES::Time, /// the highqc per spec - pub high_qc: QuorumCertificate2, + pub high_qc: QuorumCertificate2, /// A reference to the metrics trait pub metrics: Arc, @@ -237,7 +237,7 @@ impl Default for ConsensusMetricsValue { } } -impl> Consensus { +impl Consensus { /// increment the current view /// NOTE may need to do gc here pub fn increment_view(&mut self) -> TYPES::Time { @@ -256,7 +256,7 @@ impl> Consensus { mut f: F, ) -> Result<(), HotShotError> where - F: FnMut(&LEAF) -> bool, + F: FnMut(&Leaf) -> bool, { let mut next_leaf = if let Some(view) = self.state_map.get(&start_from) { view.get_leaf_commitment() @@ -342,7 +342,7 @@ impl> Consensus { /// if the last decided view's state does not exist in the state map /// this should never happen. #[must_use] - pub fn get_decided_leaf(&self) -> LEAF { + pub fn get_decided_leaf(&self) -> Leaf { let decided_view_num = self.last_decided_view; let view = self.state_map.get(&decided_view_num).unwrap(); let leaf = view diff --git a/types/src/data.rs b/types/src/data.rs index 5115e6ba8e..60d59f5deb 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -43,9 +43,6 @@ use std::{ Ord, Hash, Serialize, - // std::ops::Add, - // std::ops::Div, - // std::ops::Rem, Deserialize, CanonicalSerialize, CanonicalDeserialize, @@ -164,7 +161,7 @@ pub fn test_srs( /// Proposal to append a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound(deserialize = ""))] -pub struct QuorumProposal> { +pub struct QuorumProposal { /// The block header to append pub block_header: TYPES::BlockHeader, @@ -172,7 +169,7 @@ pub struct QuorumProposal> { pub view_number: TYPES::Time, /// Per spec, justification - pub justify_qc: QuorumCertificate2, + pub justify_qc: QuorumCertificate2, /// Possible timeout certificate. Only present if the justify_qc is not for the preceding view pub timeout_certificate: Option>, @@ -195,9 +192,7 @@ impl ProposalType for VidDisperse { } } -impl> ProposalType - for QuorumProposal -{ +impl ProposalType for QuorumProposal { type NodeType = TYPES; fn get_view_number(&self) -> ::Time { self.view_number @@ -261,7 +256,7 @@ pub trait DeltasType: fn fill(&mut self, block: BlockPayload) -> Result<(), Self::Error>; } -/// Error which occurs when [`LeafType::fill_block_payload`] is called with a payload commitment +/// Error which occurs when [`Leaf::fill_block_payload`] is called with a payload commitment /// that does not match the internal payload commitment of the leaf. #[derive(Clone, Copy, Debug, Snafu)] #[snafu(display("the block payload {:?} has commitment {} (expected {})", payload, payload.commit(), commitment))] @@ -272,95 +267,7 @@ pub struct InconsistentPayloadCommitmentError { commitment: Commitment, } -/// An item which is appended to a blockchain. -pub trait LeafType: - Debug - + Display - + Clone - + 'static - + Committable - + Serialize - + for<'a> Deserialize<'a> - + Send - + Sync - + Eq - + std::hash::Hash -{ - /// Type of nodes participating in the network. - type NodeType: NodeType; - // /// Type of block contained by this leaf. - // type DeltasType: DeltasType>; - /// Either state or empty - type MaybeState: Clone - + Debug - + for<'a> Deserialize<'a> - + PartialEq - + Eq - + std::hash::Hash - + Send - + Serialize - + Sync; - - /// Create a new leaf from its components. - fn new( - view_number: LeafTime, - justify_qc: QuorumCertificate2, - deltas: LeafBlockPayload, - state: LeafState, - ) -> Self; - /// Time when this leaf was created. - fn get_view_number(&self) -> LeafTime; - /// Height of this leaf in the chain. - /// - /// Equivalently, this is the number of leaves before this one in the chain. - fn get_height(&self) -> u64; - /// The QC linking this leaf to its parent in the chain. - fn get_justify_qc(&self) -> QuorumCertificate2; - /// Commitment to this leaf's parent. - fn get_parent_commitment(&self) -> Commitment; - /// The block header contained in this leaf. - fn get_block_header(&self) -> &::BlockHeader; - /// A commitment to the block payload contained in this leaf. - fn get_payload_commitment(&self) -> Commitment> { - self.get_block_header().payload_commitment() - } - /// Fill this leaf with the block payload. - /// - /// # Errors - /// - /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()`. - fn fill_block_payload( - &mut self, - block_payload: ::BlockPayload, - ) -> Result<(), InconsistentPayloadCommitmentError<::BlockPayload>>; - /// Optional block payload. - fn get_block_payload(&self) -> Option<::BlockPayload>; - /// The blockchain state after appending this leaf. - fn get_state(&self) -> Self::MaybeState; - /// Transactions rejected or invalidated by the application of this leaf. - fn get_rejected(&self) -> Vec>; - /// Real-world time when this leaf was created. - fn get_timestamp(&self) -> i128; - /// Identity of the network participant who proposed this leaf. - fn get_proposer_id(&self) -> EncodedPublicKey; - /// Create a leaf from information stored about a view. - fn from_stored_view(stored_view: StoredView) -> Self; -} - -/// The [`NodeType`] in a [`LeafType`]. -pub type LeafNode = ::NodeType; -/// The [`StateType`] in a [`LeafType`]. -pub type LeafState = as NodeType>::StateType; -/// The [`BlockHeader`] in a [`LeafType`]. -pub type LeafBlockHeader = as NodeType>::BlockHeader; -/// The [`BlockPayload`] in a [`LeafType`]. -pub type LeafBlockPayload = as NodeType>::BlockPayload; -/// The [`Transaction`] in a [`LeafType`]. -pub type LeafTransaction = as BlockPayload>::Transaction; -/// The [`ConsensusTime`] used by a [`LeafType`]. -pub type LeafTime = as NodeType>::Time; - -/// Additional functions required to use a [`LeafType`] with hotshot-testing. +/// Additional functions required to use a [`Leaf`] with hotshot-testing. pub trait TestableLeaf { /// Type of nodes participating in the network. type NodeType: NodeType; @@ -383,7 +290,7 @@ pub struct Leaf { pub view_number: TYPES::Time, /// Per spec, justification - pub justify_qc: QuorumCertificate2, + pub justify_qc: QuorumCertificate2, /// The hash of the parent `Leaf` /// So we can ask if it extends @@ -440,16 +347,12 @@ impl Display for Leaf { } } -impl LeafType for Leaf { - type NodeType = TYPES; - // type DeltasType = Either<(u64, TYPES::BlockPayload), TYPES::BlockHeader>; - type MaybeState = (); - - fn new( - view_number: ::Time, - justify_qc: QuorumCertificate2, - payload: ::BlockPayload, - _state: ::StateType, +impl Leaf { + /// Create a new leaf from its components. + pub fn new( + view_number: TYPES::Time, + justify_qc: QuorumCertificate2, + payload: TYPES::BlockPayload, ) -> Self { Self { view_number, @@ -463,31 +366,37 @@ impl LeafType for Leaf { } } - fn get_view_number(&self) -> TYPES::Time { + /// Time when this leaf was created. + pub fn get_view_number(&self) -> TYPES::Time { self.view_number } - - fn get_height(&self) -> u64 { + /// Height of this leaf in the chain. + /// + /// Equivalently, this is the number of leaves before this one in the chain. + pub fn get_height(&self) -> u64 { self.block_header.block_number() } - - fn get_justify_qc(&self) -> QuorumCertificate2 { + /// The QC linking this leaf to its parent in the chain. + pub fn get_justify_qc(&self) -> QuorumCertificate2 { self.justify_qc.clone() } - - fn get_parent_commitment(&self) -> Commitment { + /// Commitment to this leaf's parent. + pub fn get_parent_commitment(&self) -> Commitment { self.parent_commitment } - - fn get_block_header(&self) -> &::BlockHeader { + /// The block header contained in this leaf. + pub fn get_block_header(&self) -> &::BlockHeader { &self.block_header } - - fn fill_block_payload( + /// Fill this leaf with the block payload. + /// + /// # Errors + /// + /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()`. + pub fn fill_block_payload( &mut self, - block_payload: ::BlockPayload, - ) -> Result<(), InconsistentPayloadCommitmentError<::BlockPayload>> - { + block_payload: TYPES::BlockPayload, + ) -> Result<(), InconsistentPayloadCommitmentError> { if block_payload.commit() != self.block_header.payload_commitment() { return Err(InconsistentPayloadCommitmentError { payload: block_payload, @@ -497,27 +406,32 @@ impl LeafType for Leaf { self.block_payload = Some(block_payload); Ok(()) } - - fn get_block_payload(&self) -> Option<::BlockPayload> { + /// Optional block payload. + pub fn get_block_payload(&self) -> Option { self.block_payload.clone() } + /// A commitment to the block payload contained in this leaf. + pub fn get_payload_commitment(&self) -> Commitment { + self.get_block_header().payload_commitment() + } + /// The blockchain state after appending this leaf. // The Sequencing Leaf doesn't have a state. - fn get_state(&self) -> Self::MaybeState {} - - fn get_rejected(&self) -> Vec<::Transaction> { + pub fn get_state(&self) {} + /// Transactions rejected or invalidated by the application of this leaf. + pub fn get_rejected(&self) -> Vec<::Transaction> { self.rejected.clone() } - - fn get_timestamp(&self) -> i128 { + /// Real-world time when this leaf was created. + pub fn get_timestamp(&self) -> i128 { self.timestamp } - - fn get_proposer_id(&self) -> EncodedPublicKey { + /// Identity of the network participant who proposed this leaf. + pub fn get_proposer_id(&self) -> EncodedPublicKey { self.proposer_id.clone() } - - fn from_stored_view(stored_view: StoredView) -> Self { + /// Create a leaf from information stored about a view. + pub fn from_stored_view(stored_view: StoredView) -> Self { Self { view_number: stored_view.view_number, justify_qc: stored_view.justify_qc, @@ -673,17 +587,15 @@ impl Committable for Leaf { } } -impl From for StoredView +impl From> for StoredView where TYPES: NodeType, - LEAF: LeafType, { - fn from(leaf: LEAF) -> Self { + fn from(leaf: Leaf) -> Self { StoredView { view_number: leaf.get_view_number(), parent: leaf.get_parent_commitment(), justify_qc: leaf.get_justify_qc(), - state: leaf.get_state(), block_header: leaf.get_block_header().clone(), block_payload: leaf.get_block_payload(), rejected: leaf.get_rejected(), diff --git a/types/src/event.rs b/types/src/event.rs index c70bb2a8e7..6a8e535c27 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -1,7 +1,7 @@ //! Events that a `HotShot` instance can emit use crate::{ - data::LeafType, error::HotShotError, simple_certificate::QuorumCertificate2, + data::Leaf, error::HotShotError, simple_certificate::QuorumCertificate2, traits::node_implementation::NodeType, }; @@ -11,11 +11,11 @@ use std::sync::Arc; /// This includes some metadata, such as the stage and view number that the event was generated in, /// as well as an inner [`EventType`] describing the event proper. #[derive(Clone, Debug)] -pub struct Event> { +pub struct Event { /// The view number that this event originates from pub view_number: TYPES::Time, /// The underlying event - pub event: EventType, + pub event: EventType, } /// The type and contents of a status event emitted by a `HotShot` instance @@ -24,7 +24,7 @@ pub struct Event> { /// number, and is thus always returned wrapped in an [`Event`]. #[non_exhaustive] #[derive(Clone, Debug)] -pub enum EventType> { +pub enum EventType { /// A view encountered an error and was interrupted Error { /// The underlying error @@ -38,12 +38,12 @@ pub enum EventType> { /// block first in the list. /// /// This list may be incomplete if the node is currently performing catchup. - leaf_chain: Arc>, + leaf_chain: Arc>>, /// The QC signing the most recent leaf in `leaf_chain`. /// /// Note that the QC for each additional leaf in the chain can be obtained from the leaf /// before it using - qc: Arc>, + qc: Arc>, /// Optional information of the number of transactions in the block, for logging purposes. block_size: Option, }, diff --git a/types/src/message.rs b/types/src/message.rs index 0ef9ed8590..8a80e27933 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -148,13 +148,13 @@ pub enum InternalTrigger { #[serde(bound(deserialize = ""))] pub enum ProcessedGeneralConsensusMessage> where - I::Exchanges: ExchangesType>, + I::Exchanges: ExchangesType>, { /// Message with a quorum proposal. Proposal(Proposal>, TYPES::SignatureKey), /// Message with a quorum vote. Vote( - QuorumVote>, + QuorumVote>, TYPES::SignatureKey, ), /// Message with a view sync vote. @@ -169,7 +169,7 @@ where impl> From> for GeneralConsensusMessage where - I::Exchanges: ExchangesType>, + I::Exchanges: ExchangesType>, { fn from(value: ProcessedGeneralConsensusMessage) -> Self { match value { @@ -192,7 +192,7 @@ where impl> ProcessedGeneralConsensusMessage where - I::Exchanges: ExchangesType>, + I::Exchanges: ExchangesType>, { /// Create a [`ProcessedGeneralConsensusMessage`] from a [`GeneralConsensusMessage`]. /// # Panics @@ -324,13 +324,13 @@ impl< /// Messages related to both validating and sequencing consensus. pub enum GeneralConsensusMessage> where - I::Exchanges: ExchangesType>, + I::Exchanges: ExchangesType>, { /// Message with a quorum proposal. Proposal(Proposal>), /// Message with a quorum vote. - Vote(QuorumVote>), + Vote(QuorumVote>), /// Message with a view sync vote. ViewSyncVote(ViewSyncVote), diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 220683c7bc..a7e48fa384 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -10,6 +10,7 @@ use commit::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; use crate::{ + data::Leaf, simple_vote::{DAData, QuorumData, TimeoutData, VIDData, Voteable}, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, @@ -89,9 +90,7 @@ impl HasViewNumber self.view_number } } -impl Display - for QuorumCertificate2 -{ +impl Display for QuorumCertificate2 { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, @@ -101,16 +100,12 @@ impl Display } } -impl< - TYPES: NodeType, - LEAF: Committable + Committable + Clone + Serialize + Debug + PartialEq + Hash + Eq + 'static, - > QuorumCertificate2 -{ +impl QuorumCertificate2 { #[must_use] /// Creat the Genisis certificate pub fn genesis() -> Self { let data = QuorumData { - leaf_commit: Commitment::::default_commitment_no_preimage(), + leaf_commit: Commitment::>::default_commitment_no_preimage(), }; let commit = data.commit(); Self { @@ -125,7 +120,7 @@ impl< } /// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` -pub type QuorumCertificate2 = SimpleCertificate>; +pub type QuorumCertificate2 = SimpleCertificate>; /// Type alias for a DA certificate over `DAData` pub type DACertificate2 = SimpleCertificate::BlockPayload>>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index f4f1177686..7a747ebd18 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -6,6 +6,7 @@ use commit::{Commitment, Committable}; use serde::{Deserialize, Serialize}; use crate::{ + data::Leaf, traits::{ election::Membership, node_implementation::NodeType, @@ -16,9 +17,10 @@ use crate::{ #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a yes vote. -pub struct QuorumData { +#[serde(bound(deserialize = ""))] +pub struct QuorumData { /// Commitment to the leaf - pub leaf_commit: Commitment, + pub leaf_commit: Commitment>, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a DA vote. @@ -148,7 +150,7 @@ impl> } } -impl Committable for QuorumData { +impl Committable for QuorumData { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("Yes Vote") .var_size_bytes(self.leaf_commit.as_ref()) @@ -218,7 +220,7 @@ impl = SimpleVote, M>; +pub type QuorumVote = SimpleVote, M>; /// DA vote type alias pub type DAVote2 = SimpleVote::BlockPayload>, M>; /// VID vote type alias diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index ac48fc593f..a14b7eac7c 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -1,7 +1,7 @@ //! Contains the [`ConsensusApi`] trait. use crate::{ - data::LeafType, + data::Leaf, error::HotShotError, event::{Event, EventType}, message::{DataMessage, SequencingMessage}, @@ -20,12 +20,7 @@ use std::{num::NonZeroUsize, sync::Arc, time::Duration}; /// The API that [`HotStuff`] needs to talk to the system, implemented for both validating and /// sequencing consensus. #[async_trait] -pub trait ConsensusSharedApi< - TYPES: NodeType, - LEAF: LeafType, - I: NodeImplementation, ->: Send + Sync -{ +pub trait ConsensusSharedApi>: Send + Sync { /// Total number of nodes in the network. Also known as `n`. fn total_nodes(&self) -> NonZeroUsize; @@ -40,7 +35,7 @@ pub trait ConsensusSharedApi< async fn store_leaf( &self, old_anchor_view: TYPES::Time, - leaf: LEAF, + leaf: Leaf, ) -> Result<(), StorageError>; /// Retuns the maximum transactions allowed in a block @@ -55,7 +50,7 @@ pub trait ConsensusSharedApi< async fn should_start_round(&self, view_number: TYPES::Time) -> bool; /// Notify the system of an event within `hotshot-consensus`. - async fn send_event(&self, event: Event); + async fn send_event(&self, event: Event); /// Get a reference to the public key. fn public_key(&self) -> &TYPES::SignatureKey; @@ -96,8 +91,8 @@ pub trait ConsensusSharedApi< async fn send_decide( &self, view_number: TYPES::Time, - leaf_views: Vec, - decide_qc: QuorumCertificate2, + leaf_views: Vec>, + decide_qc: QuorumCertificate2, ) { self.send_event(Event { view_number, @@ -124,9 +119,8 @@ pub trait ConsensusSharedApi< #[async_trait] pub trait ConsensusApi< TYPES: NodeType, - LEAF: LeafType, I: NodeImplementation>, ->: ConsensusSharedApi +>: ConsensusSharedApi { /// Send a direct message to the given recipient async fn send_direct_message( diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 642673b571..a9070a3a9d 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -9,14 +9,13 @@ use super::{ }; use crate::{ certificate::{AssembledSignature, ViewSyncCertificate}, - data::{DAProposal, ProposalType, VidDisperse}, + data::{DAProposal, Leaf, ProposalType, VidDisperse}, vote::ViewSyncVoteAccumulator, }; use crate::{message::GeneralConsensusMessage, vote::ViewSyncVoteInternal}; use crate::{ - data::LeafType, traits::{ network::{CommunicationChannel, NetworkMsg}, signature_key::SignatureKey, @@ -534,13 +533,11 @@ impl< } /// A [`ConsensusExchange`] where participants vote to append items to a log. -pub trait QuorumExchangeType, M: NetworkMsg>: - ConsensusExchange -{ +pub trait QuorumExchangeType: ConsensusExchange { /// Sign a validating or commitment proposal. fn sign_validating_or_commitment_proposal>( &self, - leaf_commitment: &Commitment, + leaf_commitment: &Commitment>, ) -> EncodedSignature; /// Sign a block payload commitment. @@ -557,7 +554,7 @@ pub trait QuorumExchangeType, /// information in the yes vote. fn sign_yes_vote( &self, - leaf_commitment: Commitment, + leaf_commitment: Commitment>, ) -> (EncodedPublicKey, EncodedSignature); } @@ -566,7 +563,6 @@ pub trait QuorumExchangeType, #[derivative(Clone, Debug)] pub struct QuorumExchange< TYPES: NodeType, - LEAF: LeafType, PROPOSAL: ProposalType, MEMBERSHIP: Membership, NETWORK: CommunicationChannel, @@ -584,23 +580,21 @@ pub struct QuorumExchange< #[derivative(Debug = "ignore")] private_key: ::PrivateKey, #[doc(hidden)] - _pd: PhantomData<(LEAF, PROPOSAL, MEMBERSHIP, M)>, + _pd: PhantomData<(Leaf, PROPOSAL, MEMBERSHIP, M)>, } impl< TYPES: NodeType, - LEAF: LeafType, MEMBERSHIP: Membership, PROPOSAL: ProposalType, NETWORK: CommunicationChannel, M: NetworkMsg, - > QuorumExchangeType - for QuorumExchange + > QuorumExchangeType for QuorumExchange { /// Sign a validating or commitment proposal. fn sign_validating_or_commitment_proposal>( &self, - leaf_commitment: &Commitment, + leaf_commitment: &Commitment>, ) -> EncodedSignature { let signature = TYPES::SignatureKey::sign(&self.private_key, leaf_commitment.as_ref()); signature @@ -623,7 +617,7 @@ impl< /// TODO GG: why return the pubkey? Some other `sign_xxx` methods do not return the pubkey. fn sign_yes_vote( &self, - leaf_commitment: Commitment, + leaf_commitment: Commitment>, ) -> (EncodedPublicKey, EncodedSignature) { let signature = TYPES::SignatureKey::sign( &self.private_key, @@ -635,18 +629,16 @@ impl< impl< TYPES: NodeType, - LEAF: LeafType, PROPOSAL: ProposalType, MEMBERSHIP: Membership, NETWORK: CommunicationChannel, M: NetworkMsg, - > ConsensusExchange - for QuorumExchange + > ConsensusExchange for QuorumExchange { type Proposal = PROPOSAL; type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = Commitment; + type Commitment = Commitment>; fn create( entries: Vec<::StakeTableEntry>, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 8ce6a517cc..2462bc95e0 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -15,7 +15,7 @@ use super::{ State, }; use crate::{ - data::{Leaf, LeafType, TestableLeaf}, + data::{Leaf, TestableLeaf}, message::{ConsensusMessageType, Message, SequencingMessage}, traits::{ election::Membership, network::TestableChannelImplementation, signature_key::SignatureKey, @@ -114,11 +114,8 @@ impl> ChannelMaps { pub trait NodeImplementation: Send + Sync + Debug + Clone + Eq + Hash + 'static + Serialize + for<'de> Deserialize<'de> { - /// Leaf type for this consensus implementation - type Leaf: LeafType; - /// Storage type for this consensus implementation - type Storage: Storage + Clone; + type Storage: Storage + Clone; /// Consensus message type. type ConsensusMessage: ConsensusMessageType @@ -135,7 +132,7 @@ pub trait NodeImplementation: /// Consensus type selected exchanges. /// /// Implements either `ValidatingExchangesType` or `ExchangesType`. - type Exchanges: ExchangesType>; + type Exchanges: ExchangesType>; /// Create channels for sending/recv-ing proposals and votes for quorum and committee /// exchanges, the latter of which is only applicable for sequencing consensus. @@ -147,9 +144,7 @@ pub trait NodeImplementation: /// Contains the protocols for exchanging proposals and votes. #[allow(clippy::type_complexity)] #[async_trait] -pub trait ExchangesType, MESSAGE: NetworkMsg>: - Send + Sync -{ +pub trait ExchangesType: Send + Sync { /// Protocol for exchanging data availability proposals and votes. type CommitteeExchange: CommitteeExchangeType + Clone + Debug; @@ -163,7 +158,7 @@ pub trait ExchangesType, MESSA fn timeout_exchange(&self) -> &Self::TimeoutExchange; /// Protocol for exchanging quorum proposals and votes. - type QuorumExchange: QuorumExchangeType + Clone + Debug; + type QuorumExchange: QuorumExchangeType + Clone + Debug; /// Protocol for exchanging view sync proposals and votes. type ViewSyncExchange: ViewSyncExchangeType + Clone + Debug; @@ -207,8 +202,8 @@ pub trait ExchangesType, MESSA } /// an exchange that is testable -pub trait TestableExchange, MESSAGE: NetworkMsg>: - ExchangesType +pub trait TestableExchange: + ExchangesType { /// generate communication channels #[allow(clippy::type_complexity)] @@ -233,7 +228,7 @@ pub trait TestableExchange, ME pub struct Exchanges< TYPES: NodeType, MESSAGE: NetworkMsg, - QUORUMEXCHANGE: QuorumExchangeType, MESSAGE> + Clone + Debug, + QUORUMEXCHANGE: QuorumExchangeType + Clone + Debug, COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, VIDEXCHANGE: VIDExchangeType + Clone + Debug, @@ -255,33 +250,34 @@ pub struct Exchanges< // It is here to avoid needing to instantiate it where all the other exchanges are instantiated // https://github.com/EspressoSystems/HotShot/issues/1799 #[allow(clippy::type_complexity)] - pub timeout_exchange: TimeoutExchange< - TYPES, - < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange< + pub timeout_exchange: + TimeoutExchange< TYPES, + < as ExchangesType>::QuorumExchange as ConsensusExchange< + TYPES, + MESSAGE, + >>::Proposal, + < as ExchangesType>::QuorumExchange as ConsensusExchange< + TYPES, + MESSAGE, + >>::Membership, + >::Networking, MESSAGE, - >>::Proposal, - < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange< - TYPES, - MESSAGE, - >>::Membership, - >::Networking, - MESSAGE, - >, + >, /// Phantom data _phantom: PhantomData<(TYPES, MESSAGE)>, @@ -289,12 +285,12 @@ pub struct Exchanges< #[async_trait] impl - ExchangesType, MESSAGE> + ExchangesType for Exchanges where TYPES: NodeType, MESSAGE: NetworkMsg, - QUORUMEXCHANGE: QuorumExchangeType, MESSAGE> + Clone + Debug, + QUORUMEXCHANGE: QuorumExchangeType + Clone + Debug, COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, VIDEXCHANGE: VIDExchangeType + Clone + Debug, @@ -304,33 +300,34 @@ where type ViewSyncExchange = VIEWSYNCEXCHANGE; type VIDExchange = VIDEXCHANGE; #[allow(clippy::type_complexity)] - type TimeoutExchange = TimeoutExchange< - TYPES, - < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange< - TYPES, - MESSAGE, - >>::Proposal, - < as ExchangesType>::QuorumExchange as ConsensusExchange< + TYPES, + MESSAGE, + >>::Proposal, + < as ExchangesType>::QuorumExchange as ConsensusExchange< + TYPES, + MESSAGE, + >>::Membership, + >::Networking, MESSAGE, - QUORUMEXCHANGE, - COMMITTEEEXCHANGE, - VIEWSYNCEXCHANGE, - VIDEXCHANGE, - > as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange< - TYPES, - MESSAGE, - >>::Membership, - >::Networking, - MESSAGE, - >; + >; type ElectionConfigs = (TYPES::ElectionConfigType, TYPES::ElectionConfigType); @@ -364,7 +361,33 @@ where sk.clone(), ); #[allow(clippy::type_complexity)] - let timeout_exchange: TimeoutExchange as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Proposal, < as ExchangesType, MESSAGE>>::QuorumExchange as ConsensusExchange>::Membership, >::Networking, MESSAGE> = TimeoutExchange::create( + let timeout_exchange: TimeoutExchange< + TYPES, + < as ExchangesType>::QuorumExchange as ConsensusExchange< + TYPES, + MESSAGE, + >>::Proposal, + < as ExchangesType>::QuorumExchange as ConsensusExchange< + TYPES, + MESSAGE, + >>::Membership, + >::Networking, + MESSAGE, + > = TimeoutExchange::create( entries.clone(), configs.0.clone(), networks.0, @@ -431,35 +454,30 @@ where /// Alias for the [`QuorumExchange`] type. pub type QuorumEx = <>::Exchanges as ExchangesType< TYPES, - >::Leaf, Message, >>::QuorumExchange; /// Alias for `TimeoutExchange` type pub type TimeoutEx = <>::Exchanges as ExchangesType< TYPES, - >::Leaf, Message, >>::TimeoutExchange; /// Alias for the [`CommitteeExchange`] type. pub type CommitteeEx = <>::Exchanges as ExchangesType< TYPES, - >::Leaf, Message, >>::CommitteeExchange; /// Alias for the [`VIDExchange`] type. pub type VIDEx = <>::Exchanges as ExchangesType< TYPES, - >::Leaf, Message, >>::VIDExchange; /// Alias for the [`ViewSyncExchange`] type. pub type ViewSyncEx = <>::Exchanges as ExchangesType< TYPES, - >::Leaf, Message, >>::ViewSyncExchange; @@ -487,7 +505,7 @@ pub trait TestableNodeImplementation: NodeImplementation /// otherwise panics /// `padding` is the bytes of padding to add to the transaction fn leaf_create_random_transaction( - leaf: &Self::Leaf, + leaf: &Leaf, rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction; @@ -505,7 +523,7 @@ pub trait TestableNodeImplementation: NodeImplementation fn construct_tmp_storage() -> Result; /// Return the full internal state. This is useful for debugging. - async fn get_full_state(storage: &Self::Storage) -> StorageState; + async fn get_full_state(storage: &Self::Storage) -> StorageState; } #[async_trait] @@ -536,8 +554,7 @@ where >, TYPES::StateType: TestableState, TYPES::BlockPayload: TestableBlock, - I::Storage: TestableStorage, - I::Leaf: TestableLeaf, + I::Storage: TestableStorage, { type CommitteeElectionConfig = TYPES::ElectionConfigType; @@ -555,11 +572,11 @@ where } fn leaf_create_random_transaction( - leaf: &Self::Leaf, + leaf: &Leaf, rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { - ::create_random_transaction(leaf, rng, padding) + Leaf::create_random_transaction(leaf, rng, padding) } fn block_genesis() -> TYPES::BlockPayload { @@ -571,11 +588,11 @@ where } fn construct_tmp_storage() -> Result { - >::construct_tmp_storage() + >::construct_tmp_storage() } - async fn get_full_state(storage: &Self::Storage) -> StorageState { - >::get_full_state(storage).await + async fn get_full_state(storage: &Self::Storage) -> StorageState { + >::get_full_state(storage).await } } diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index b7078b151c..8adfbaf6ef 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -2,8 +2,7 @@ use super::{node_implementation::NodeType, signature_key::EncodedPublicKey}; use crate::{ - data::LeafType, simple_certificate::QuorumCertificate2, traits::BlockPayload, - vote2::HasViewNumber, + data::Leaf, simple_certificate::QuorumCertificate2, traits::BlockPayload, vote2::HasViewNumber, }; use async_trait::async_trait; use commit::Commitment; @@ -28,17 +27,16 @@ pub type Result = std::result::Result; /// /// This trait has been constructed for object saftey over convenience. #[async_trait] -pub trait Storage: Clone + Send + Sync + Sized + 'static +pub trait Storage: Clone + Send + Sync + Sized + 'static where TYPES: NodeType + 'static, - LEAF: LeafType + 'static, { /// Append the list of views to this storage - async fn append(&self, views: Vec>) -> Result; + async fn append(&self, views: Vec>) -> Result; /// Cleans up the storage up to the given view. The given view number will still persist in this storage afterwards. async fn cleanup_storage_up_to_view(&self, view: TYPES::Time) -> Result; /// Get the latest anchored view - async fn get_anchored_view(&self) -> Result>; + async fn get_anchored_view(&self) -> Result>; /// Commit this storage. async fn commit(&self) -> Result; @@ -46,7 +44,7 @@ where /// ```rust,ignore /// storage.append(vec![ViewEntry::Success(view)]).await /// ``` - async fn append_single_view(&self, view: StoredView) -> Result { + async fn append_single_view(&self, view: StoredView) -> Result { self.append(vec![ViewEntry::Success(view)]).await } // future improvement: @@ -59,8 +57,7 @@ where /// Extra requirements on Storage implementations required for testing #[async_trait] -pub trait TestableStorage>: - Clone + Send + Sync + Storage +pub trait TestableStorage: Clone + Send + Sync + Storage where TYPES: NodeType + 'static, { @@ -71,51 +68,48 @@ where fn construct_tmp_storage() -> Result; /// Return the full internal state. This is useful for debugging. - async fn get_full_state(&self) -> StorageState; + async fn get_full_state(&self) -> StorageState; } /// An internal representation of the data stored in a [`Storage`]. /// /// This should only be used for testing, never in production code. #[derive(Debug, PartialEq)] -pub struct StorageState> { +pub struct StorageState { /// The views that have been successful - pub stored: BTreeMap>, + pub stored: BTreeMap>, /// The views that have failed pub failed: BTreeSet, } /// An entry to `Storage::append`. This makes it possible to commit both succeeded and failed views at the same time #[derive(Debug, PartialEq)] -pub enum ViewEntry +pub enum ViewEntry where TYPES: NodeType, - LEAF: LeafType, { /// A succeeded view - Success(StoredView), + Success(StoredView), /// A failed view Failed(TYPES::Time), // future improvement: // InProgress(InProgressView), } -impl From> for ViewEntry +impl From> for ViewEntry where TYPES: NodeType, - LEAF: LeafType, { - fn from(view: StoredView) -> Self { + fn from(view: StoredView) -> Self { Self::Success(view) } } -impl From for ViewEntry +impl From> for ViewEntry where TYPES: NodeType, - LEAF: LeafType, { - fn from(leaf: LEAF) -> Self { + fn from(leaf: Leaf) -> Self { Self::Success(StoredView::from(leaf)) } } @@ -123,15 +117,13 @@ where /// A view stored in the [`Storage`] #[derive(Clone, Debug, Derivative)] #[derivative(PartialEq)] -pub struct StoredView> { +pub struct StoredView { /// The view number of this view pub view_number: TYPES::Time, /// The parent of this view - pub parent: Commitment, + pub parent: Commitment>, /// The justify QC of this view. See the hotstuff paper for more information on this. - pub justify_qc: QuorumCertificate2, - /// The state of this view - pub state: LEAF::MaybeState, + pub justify_qc: QuorumCertificate2, /// Block header. pub block_header: TYPES::BlockHeader, /// Optional block payload. @@ -148,21 +140,19 @@ pub struct StoredView> { pub proposer_id: EncodedPublicKey, } -impl StoredView +impl StoredView where TYPES: NodeType, - LEAF: LeafType, { /// Create a new `StoredView` from the given QC, `BlockHeader`, `BlockPayload` and State. /// /// Note that this will set the `parent` to `LeafHash::default()`, so this will not have a /// parent. pub fn from_qc_block_and_state( - qc: QuorumCertificate2, + qc: QuorumCertificate2, block_header: TYPES::BlockHeader, block_payload: Option, - state: LEAF::MaybeState, - parent_commitment: Commitment, + parent_commitment: Commitment>, rejected: Vec<::Transaction>, proposer_id: EncodedPublicKey, ) -> Self { @@ -170,7 +160,6 @@ where view_number: qc.get_view_number(), parent: parent_commitment, justify_qc: qc, - state, block_header, block_payload, rejected, diff --git a/types/src/utils.rs b/types/src/utils.rs index cb06abf8d0..64d6aae159 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -1,15 +1,12 @@ //! Utility functions, type aliases, helper structs and enum definitions. -use crate::{ - data::{LeafBlockPayload, LeafType}, - traits::node_implementation::NodeType, -}; +use crate::{data::Leaf, traits::node_implementation::NodeType}; use commit::Commitment; use std::ops::Deref; /// A view's state #[derive(Debug)] -pub enum ViewInner> { +pub enum ViewInner { /// A pending view with an available block but not leaf proposal yet. /// /// Storing this state allows us to garbage collect blocks for views where a proposal is never @@ -17,21 +14,21 @@ pub enum ViewInner> { /// leaders repeatedly request availability for blocks that they never propose. DA { /// Available block. - block: Commitment>, + block: Commitment, }, /// Undecided view Leaf { /// Proposed leaf - leaf: Commitment, + leaf: Commitment>, }, /// Leaf has failed Failed, } -impl> ViewInner { +impl ViewInner { /// return the underlying leaf hash if it exists #[must_use] - pub fn get_leaf_commitment(&self) -> Option> { + pub fn get_leaf_commitment(&self) -> Option>> { if let Self::Leaf { leaf } = self { Some(*leaf) } else { @@ -41,7 +38,7 @@ impl> ViewInner { /// return the underlying block paylod commitment if it exists #[must_use] - pub fn get_payload_commitment(&self) -> Option>> { + pub fn get_payload_commitment(&self) -> Option> { if let Self::DA { block } = self { Some(*block) } else { @@ -50,8 +47,8 @@ impl> ViewInner { } } -impl> Deref for View { - type Target = ViewInner; +impl Deref for View { + type Target = ViewInner; fn deref(&self) -> &Self::Target { &self.view_inner @@ -60,9 +57,9 @@ impl> Deref for View> { +pub struct View { /// The view data. Wrapped in a struct so we can mutate - pub view_inner: ViewInner, + pub view_inner: ViewInner, } /// A struct containing information about a finished round. From dfce4b72e5abee796f8209fa4bc1b620bdf32066 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 10 Nov 2023 14:01:35 -0500 Subject: [PATCH 0355/1393] lint passing --- hotshot/examples/infra/mod.rs | 5 --- hotshot/examples/libp2p/types.rs | 4 +-- hotshot/examples/web-server-da/types.rs | 2 +- testing/src/node_types.rs | 43 +++++++------------------ testing/src/overall_safety_task.rs | 38 ++++++++-------------- testing/src/task_helpers.rs | 18 +++++------ testing/src/test_builder.rs | 4 +-- testing/src/test_launcher.rs | 4 --- testing/tests/consensus_task.rs | 11 ++++--- testing/tests/memory_network.rs | 5 ++- 10 files changed, 44 insertions(+), 90 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 7fbf6a85a3..1622770c94 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -120,7 +120,6 @@ pub async fn run_orchestrator< Message, QuorumExchange< TYPES, - Leaf, QuorumProposal, MEMBERSHIP, QUORUMNETWORK, @@ -184,7 +183,6 @@ pub trait RunDA< Message, QuorumExchange< TYPES, - Leaf, QuorumProposal, MEMBERSHIP, QUORUMNETWORK, @@ -428,7 +426,6 @@ impl< Message, QuorumExchange< TYPES, - Leaf, QuorumProposal, MEMBERSHIP, WebCommChannel, @@ -581,7 +578,6 @@ impl< Message, QuorumExchange< TYPES, - Leaf, QuorumProposal, MEMBERSHIP, Libp2pCommChannel, @@ -808,7 +804,6 @@ pub async fn main_entry_point< Message, QuorumExchange< TYPES, - Leaf, QuorumProposal, MEMBERSHIP, QUORUMNETWORK, diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index a5ef290f5c..4278792004 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -8,7 +8,7 @@ use hotshot::{ }; use hotshot_types::{ certificate::ViewSyncCertificate, - data::{DAProposal, Leaf, QuorumProposal}, + data::{DAProposal, QuorumProposal}, message::{Message, SequencingMessage}, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, @@ -36,7 +36,7 @@ pub type ThisViewSyncProposal = ViewSyncCertificate; pub type ThisViewSyncVote = ViewSyncVote; impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; + type Storage = MemoryStorage; type Exchanges = Exchanges< DemoTypes, Message, diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index dfcd6a54b1..b424fe0e98 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -8,7 +8,7 @@ use hotshot::{ }; use hotshot_types::{ certificate::ViewSyncCertificate, - data::{DAProposal, Leaf, QuorumProposal}, + data::{DAProposal, QuorumProposal}, message::{Message, SequencingMessage}, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 8a923edcf5..7af2da9332 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -16,7 +16,7 @@ use hotshot::{ use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, - data::{Leaf, QuorumProposal, ViewNumber}, + data::{QuorumProposal, ViewNumber}, message::{Message, SequencingMessage}, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, @@ -63,7 +63,7 @@ pub struct WebImpl; #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct CombinedImpl; -pub type StaticMembership = StaticCommittee>; +pub type StaticMembership = StaticCommittee; pub type StaticMemoryDAComm = MemoryCommChannel; @@ -102,8 +102,7 @@ pub type SequencingLibp2pExchange = Exchanges< Message, QuorumExchange< TestTypes, - >::Leaf, - QuorumProposal>, + QuorumProposal, StaticMembership, StaticLibp2pQuorumComm, Message, @@ -125,7 +124,7 @@ pub type SequencingLibp2pExchange = Exchanges< >; impl NodeImplementation for Libp2pImpl { - type Storage = MemoryStorage>; + type Storage = MemoryStorage; type Exchanges = SequencingLibp2pExchange; type ConsensusMessage = SequencingMessage; @@ -142,13 +141,7 @@ impl NodeImplementation for Libp2pImpl { } } -impl - TestableExchange< - TestTypes, - >::Leaf, - Message, - > for SequencingLibp2pExchange -{ +impl TestableExchange> for SequencingLibp2pExchange { #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, @@ -227,8 +220,7 @@ pub type SequencingMemoryExchange = Exchanges< Message, QuorumExchange< TestTypes, - >::Leaf, - QuorumProposal>, + QuorumProposal, StaticMembership, StaticMemoryQuorumComm, Message, @@ -249,13 +241,7 @@ pub type SequencingMemoryExchange = Exchanges< VIDExchange>, >; -impl - TestableExchange< - TestTypes, - >::Leaf, - Message, - > for SequencingMemoryExchange -{ +impl TestableExchange> for SequencingMemoryExchange { #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, @@ -343,7 +329,7 @@ impl } impl NodeImplementation for MemoryImpl { - type Storage = MemoryStorage>; + type Storage = MemoryStorage; type Exchanges = SequencingMemoryExchange; type ConsensusMessage = SequencingMessage; @@ -370,8 +356,7 @@ pub type SequencingWebExchanges = Exchanges< Message, QuorumExchange< TestTypes, - >::Leaf, - QuorumProposal>, + QuorumProposal, StaticMembership, StaticWebQuorumComm, Message, @@ -387,13 +372,7 @@ pub type SequencingWebExchanges = Exchanges< VIDExchange>, >; -impl - TestableExchange< - TestTypes, - >::Leaf, - Message, - > for SequencingWebExchanges -{ +impl TestableExchange> for SequencingWebExchanges { #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, @@ -483,7 +462,7 @@ impl } impl NodeImplementation for WebImpl { - type Storage = MemoryStorage>; + type Storage = MemoryStorage; type Exchanges = SequencingWebExchanges; type ConsensusMessage = SequencingMessage; diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 24ef4311ce..27091c5493 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -9,10 +9,7 @@ use std::{ use async_compatibility_layer::channel::UnboundedStream; use futures::FutureExt; -use hotshot::{ - traits::{NodeImplementation, TestableNodeImplementation}, - HotShotError, -}; +use hotshot::{traits::TestableNodeImplementation, HotShotError}; use hotshot_task::{ event_stream::ChannelStream, task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, @@ -20,8 +17,7 @@ use hotshot_task::{ MergeN, }; use hotshot_types::{ - block_impl::VIDBlockPayload, - data::{Leaf, LeafBlockPayload}, + data::Leaf, error::RoundTimedoutState, event::{Event, EventType}, simple_certificate::QuorumCertificate2, @@ -79,7 +75,7 @@ pub struct OverallSafetyTask>, /// ctx - pub ctx: RoundCtx, + pub ctx: RoundCtx, /// event stream for publishing safety violations pub test_event_stream: ChannelStream, } @@ -110,7 +106,7 @@ pub struct RoundResult { pub leaf_map: HashMap, usize>, /// block -> # entries decided on that block - pub block_map: HashMap, usize>, + pub block_map: HashMap, usize>, /// state -> # entries decided on that state pub state_map: HashMap<(), usize>, @@ -134,7 +130,7 @@ impl Default for RoundResult { /// smh my head I shouldn't need to implement this /// Rust doesn't realize I doesn't need to implement default -impl> Default for RoundCtx { +impl Default for RoundCtx { fn default() -> Self { Self { round_results: Default::default(), @@ -149,18 +145,17 @@ impl> Default for RoundCtx /// that we poll when things are event driven /// this context will be passed around #[derive(Debug)] -pub struct RoundCtx> { +pub struct RoundCtx { /// results from previous rounds /// view number -> round result - pub round_results: - HashMap>::Leaf>>, + pub round_results: HashMap>, /// during the run view refactor pub failed_views: HashSet, /// successful views pub successful_views: HashSet, } -impl> RoundCtx { +impl RoundCtx { /// inserts an error into the context pub fn insert_error_to_context( &mut self, @@ -187,6 +182,7 @@ impl> RoundCtx { impl RoundResult { /// insert into round result + #[allow(clippy::unit_arg)] pub fn insert_into_result( &mut self, idx: usize, @@ -208,7 +204,7 @@ impl RoundResult { let (state, payload_commitment) = (leaf.get_state(), leaf.get_payload_commitment()); - match self.state_map.entry(state.clone()) { + match self.state_map.entry(state) { std::collections::hash_map::Entry::Occupied(mut o) => { *o.get_mut() += 1; } @@ -241,7 +237,7 @@ impl RoundResult { /// determines whether or not the round passes /// also do a safety check - #[allow(clippy::too_many_arguments)] + #[allow(clippy::too_many_arguments, clippy::let_unit_value)] pub fn update_status( &mut self, threshold: usize, @@ -599,15 +595,7 @@ pub type OverallSafetyTaskTypes = HSTWithEventAndMessage< OverallSafetyTaskErr, GlobalTestEvent, ChannelStream, - ( - usize, - Either>::Leaf>, HotShotEvent>, - ), - MergeN< - Merge< - UnboundedStream>::Leaf>>, - UnboundedStream>, - >, - >, + (usize, Either, HotShotEvent>), + MergeN>, UnboundedStream>>>, OverallSafetyTask, >; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index acd5844557..ab5c48bec1 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -41,13 +41,11 @@ pub async fn build_system_handle( let storage = (launcher.resource_generator.storage)(node_id); let config = launcher.resource_generator.config.clone(); - let initializer = HotShotInitializer::< - TestTypes, - >::Leaf, - >::from_genesis( - >::block_genesis() - ) - .unwrap(); + let initializer = + HotShotInitializer::::from_genesis(>::block_genesis()) + .unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = config.my_own_validator_config.private_key.clone(); @@ -91,7 +89,7 @@ async fn build_quorum_proposal_and_signature( handle: &SystemContextHandle, private_key: &::PrivateKey, view: u64, -) -> (QuorumProposal>, EncodedSignature) { +) -> (QuorumProposal, EncodedSignature) { let consensus_lock = handle.get_consensus(); let consensus = consensus_lock.read().await; let api: HotShotConsensusApi = HotShotConsensusApi { @@ -127,7 +125,7 @@ async fn build_quorum_proposal_and_signature( proposer_id: api.public_key().to_bytes(), }; let signature = ::sign(private_key, leaf.commit().as_ref()); - let proposal = QuorumProposal::> { + let proposal = QuorumProposal:: { block_header, view_number: ViewNumber::new(view), justify_qc: QuorumCertificate2::genesis(), @@ -142,7 +140,7 @@ pub async fn build_quorum_proposal( handle: &SystemContextHandle, private_key: &::PrivateKey, view: u64, -) -> Proposal>> { +) -> Proposal> { let (proposal, signature) = build_quorum_proposal_and_signature(handle, private_key, view).await; Proposal { diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 14eab3a556..2ab68282b6 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -184,8 +184,7 @@ impl TestMetadata { ) -> TestLauncher where I: NodeImplementation>, - >::Exchanges: - TestableExchange>::Leaf, Message>, + >::Exchanges: TestableExchange>, SystemContext: HotShotType, { let TestMetadata { @@ -276,7 +275,6 @@ impl TestMetadata { <>::Exchanges as TestableExchange< _, _, - _, >>::gen_comm_channels( total_nodes, num_bootstrap_nodes, da_committee_size ), diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 66262f1649..d6320bdbb1 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -30,22 +30,18 @@ use super::{ pub type Networks = ( <<>::Exchanges as ExchangesType< TYPES, - >::Leaf, Message, >>::QuorumExchange as ConsensusExchange>>::Networking, <<>::Exchanges as ExchangesType< TYPES, - >::Leaf, Message, >>::CommitteeExchange as ConsensusExchange>>::Networking, <<>::Exchanges as ExchangesType< TYPES, - >::Leaf, Message, >>::ViewSyncExchange as ConsensusExchange>>::Networking, <<>::Exchanges as ExchangesType< TYPES, - >::Leaf, Message, >>::VIDExchange as ConsensusExchange>>::Networking, ); diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index c8bb007dc5..0a3df35cf0 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -26,7 +26,7 @@ use std::collections::HashMap; async fn build_vote( handle: &SystemContextHandle, - proposal: QuorumProposal>, + proposal: QuorumProposal, ) -> GeneralConsensusMessage { let consensus_lock = handle.get_consensus(); let consensus = consensus_lock.read().await; @@ -68,9 +68,10 @@ async fn build_vote( timestamp: 0, proposer_id: quorum_exchange.get_leader(view).to_bytes(), }; - let vote = - QuorumVote::, QuorumMembership>::create_signed_vote( - QuorumData { leaf_commit: leaf.commit() }, + let vote = QuorumVote::>::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(), + }, view, quorum_exchange.public_key(), quorum_exchange.private_key(), @@ -101,7 +102,7 @@ async fn test_consensus_task() { let mut output = HashMap::new(); // Trigger a proposal to send by creating a new QC. Then recieve that proposal and update view based on the valid QC in the proposal - let qc = QuorumCertificate2::>::genesis(); + let qc = QuorumCertificate2::::genesis(); let proposal = build_quorum_proposal(&handle, &private_key, 1).await; input.push(HotShotEvent::QCFormed(either::Left(qc.clone()))); diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index c646c193d6..b9b545d8cc 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -15,7 +15,7 @@ use hotshot::types::bn254::{BLSPrivKey, BLSPubKey}; use hotshot::types::SignatureKey; use hotshot_types::block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}; use hotshot_types::certificate::ViewSyncCertificate; -use hotshot_types::data::{DAProposal, Leaf, QuorumProposal}; +use hotshot_types::data::{DAProposal, QuorumProposal}; use hotshot_types::message::{Message, SequencingMessage}; use hotshot_types::traits::election::{ CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange, @@ -78,13 +78,12 @@ pub type ThisViewSyncProposal = ViewSyncCertificate; pub type ThisViewSyncVote = ViewSyncVote; impl NodeImplementation for TestImpl { - type Storage = MemoryStorage; + type Storage = MemoryStorage; type Exchanges = Exchanges< Test, Message, QuorumExchange< Test, - Self::Leaf, ThisQuorumProposal, ThisMembership, QuorumNetwork, From 4351acade31b0afee342dae07cbe7f631e13360e Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 10 Nov 2023 11:41:53 -0800 Subject: [PATCH 0356/1393] Fix lint --- types/src/block_impl.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 375f917d70..6c51094d84 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -91,7 +91,7 @@ impl VIDBlockPayload { // changes. // TODO let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - vid.disperse(encoded_transactions.to_vec()).unwrap().commit + vid.disperse(encoded_transactions).unwrap().commit } /// Create a genesis block payload with transaction bytes `vec![0]`, to be used for From cf942d5f2d041e897afb14b0f0a068b260b8f499 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 10 Nov 2023 14:41:55 -0500 Subject: [PATCH 0357/1393] Update jellyfish (#2044) * update jf * update jellyfish, pacify clippy --- testing/src/task_helpers.rs | 2 +- types/src/block_impl.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index acd5844557..ccaf327891 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -160,5 +160,5 @@ pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey pub fn vid_init() -> VidScheme { let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); - VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap() + VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, srs).unwrap() } diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index af664791bd..2c1ecc2983 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -81,7 +81,7 @@ impl VIDBlockPayload { // TODO We are using constant numbers for now, but they will change as the quorum size // changes. // TODO - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, srs).unwrap(); let txn = vec![0]; let vid_disperse = vid.disperse(&txn).unwrap(); VIDBlockPayload { From b6f65a0e56fae07cf964827ee54ea8709e3dffc4 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 10 Nov 2023 16:57:53 -0500 Subject: [PATCH 0358/1393] Starting to remove the ProposalType trait --- hotshot/src/tasks/mod.rs | 8 +++----- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 2 +- task-impls/src/vid.rs | 1 - types/src/data.rs | 33 ++++++++--------------------- types/src/message.rs | 20 +++++++++--------- types/src/traits/election.rs | 40 ++++++++++-------------------------- 7 files changed, 35 insertions(+), 71 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 120294e8b1..9024491603 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -27,7 +27,7 @@ use hotshot_task_impls::{ use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, - data::{Leaf, ProposalType, QuorumProposal}, + data::{Leaf, QuorumProposal}, event::Event, message::{Message, Messages, SequencingMessage}, traits::{ @@ -63,9 +63,8 @@ pub enum GlobalEvent { pub async fn add_network_message_task< TYPES: NodeType, I: NodeImplementation>, - PROPOSAL: ProposalType, MEMBERSHIP: Membership, - EXCHANGE: ConsensusExchange, Proposal = PROPOSAL, Membership = MEMBERSHIP> + EXCHANGE: ConsensusExchange, Membership = MEMBERSHIP> + 'static, >( task_runner: TaskRunner, @@ -162,9 +161,8 @@ where pub async fn add_network_event_task< TYPES: NodeType, I: NodeImplementation>, - PROPOSAL: ProposalType, MEMBERSHIP: Membership, - EXCHANGE: ConsensusExchange, Proposal = PROPOSAL, Membership = MEMBERSHIP> + EXCHANGE: ConsensusExchange, Membership = MEMBERSHIP> + 'static, >( task_runner: TaskRunner, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 15a8b48ee3..f706c993f9 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -18,7 +18,7 @@ use hotshot_task::{ use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, consensus::{Consensus, View}, - data::{Leaf, ProposalType, QuorumProposal}, + data::{Leaf, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, simple_certificate::{ diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 1944920d54..057598d0ea 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -14,7 +14,7 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::{Consensus, View}, - data::{DAProposal, ProposalType}, + data::DAProposal, message::{Message, Proposal, SequencingMessage}, simple_vote::{DAData, DAVote2}, traits::{ diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 06048d4bea..750ed5fed8 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -15,7 +15,6 @@ use hotshot_task::{ use hotshot_types::traits::{network::ConsensusIntentEvent, node_implementation::VIDMembership}; use hotshot_types::{ consensus::{Consensus, View}, - data::ProposalType, message::{Message, SequencingMessage}, traits::{ consensus_api::ConsensusApi, diff --git a/types/src/data.rs b/types/src/data.rs index 60d59f5deb..075b7bce4c 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -14,7 +14,7 @@ use crate::{ storage::StoredView, BlockPayload, State, }, - vote2::Certificate2, + vote2::{Certificate2, HasViewNumber}, }; use ark_bls12_381::Bls12_381; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; @@ -178,30 +178,26 @@ pub struct QuorumProposal { pub proposer_id: EncodedPublicKey, } -impl ProposalType for DAProposal { - type NodeType = TYPES; - fn get_view_number(&self) -> ::Time { +impl HasViewNumber for DAProposal { + fn get_view_number(&self) -> TYPES::Time { self.view_number } } -impl ProposalType for VidDisperse { - type NodeType = TYPES; - fn get_view_number(&self) -> ::Time { +impl HasViewNumber for VidDisperse { + fn get_view_number(&self) -> TYPES::Time { self.view_number } } -impl ProposalType for QuorumProposal { - type NodeType = TYPES; - fn get_view_number(&self) -> ::Time { +impl HasViewNumber for QuorumProposal { + fn get_view_number(&self) -> TYPES::Time { self.view_number } } -impl ProposalType for ViewSyncCertificate { - type NodeType = TYPES; - fn get_view_number(&self) -> ::Time { +impl HasViewNumber for ViewSyncCertificate { + fn get_view_number(&self) -> TYPES::Time { match self { ViewSyncCertificate::PreCommit(certificate_internal) | ViewSyncCertificate::Commit(certificate_internal) @@ -210,17 +206,6 @@ impl ProposalType for ViewSyncCertificate { } } -/// A proposal to a network of voting nodes. -pub trait ProposalType: - Debug + Clone + 'static + Serialize + for<'a> Deserialize<'a> + Send + Sync + PartialEq + Eq + Hash -{ - /// Type of nodes that can vote on this proposal. - type NodeType: NodeType; - - /// Time at which this proposal is valid. - fn get_view_number(&self) -> ::Time; -} - /// A state change encoded in a leaf. /// /// [`DeltasType`] represents a [block](NodeType::BlockPayload), but it may not contain the block in diff --git a/types/src/message.rs b/types/src/message.rs index 8a80e27933..fc6799ba38 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -8,7 +8,7 @@ use crate::simple_vote::{DAVote2, TimeoutVote2, VIDVote2}; use crate::traits::node_implementation::CommitteeMembership; use crate::vote2::HasViewNumber; use crate::{ - data::{DAProposal, ProposalType, VidDisperse}, + data::{DAProposal, VidDisperse}, simple_vote::QuorumVote, traits::{ network::{NetworkMsg, ViewMessage}, @@ -151,7 +151,7 @@ where I::Exchanges: ExchangesType>, { /// Message with a quorum proposal. - Proposal(Proposal>, TYPES::SignatureKey), + Proposal(Proposal>, TYPES::SignatureKey), /// Message with a quorum vote. Vote( QuorumVote>, @@ -160,7 +160,7 @@ where /// Message with a view sync vote. ViewSyncVote(ViewSyncVote), /// Message with a view sync certificate. - ViewSyncCertificate(Proposal>), + ViewSyncCertificate(Proposal>), /// Internal ONLY message indicating a view interrupt. #[serde(skip)] InternalTrigger(InternalTrigger), @@ -218,7 +218,7 @@ where #[serde(bound(deserialize = ""))] pub enum ProcessedCommitteeConsensusMessage> { /// Proposal for the DA committee. - DAProposal(Proposal>, TYPES::SignatureKey), + DAProposal(Proposal>, TYPES::SignatureKey), /// Vote from the DA committee. DAVote( DAVote2>, @@ -227,7 +227,7 @@ pub enum ProcessedCommitteeConsensusMessage, TYPES::SignatureKey), /// VID dispersal data. Like [`DAProposal`] - VidDisperseMsg(Proposal>, TYPES::SignatureKey), + VidDisperseMsg(Proposal>, TYPES::SignatureKey), /// Vote from VID storage node. Like [`DAVote`] VidVote( VIDVote2>, @@ -327,7 +327,7 @@ where I::Exchanges: ExchangesType>, { /// Message with a quorum proposal. - Proposal(Proposal>), + Proposal(Proposal>), /// Message with a quorum vote. Vote(QuorumVote>), @@ -336,7 +336,7 @@ where ViewSyncVote(ViewSyncVote), /// Message with a view sync certificate. - ViewSyncCertificate(Proposal>), + ViewSyncCertificate(Proposal>), /// Message with a Timeout vote TimeoutVote(TimeoutVote2>), @@ -351,7 +351,7 @@ where /// Messages related to the sequencing consensus protocol for the DA committee. pub enum CommitteeConsensusMessage> { /// Proposal for data availability committee - DAProposal(Proposal>), + DAProposal(Proposal>), /// vote for data availability committee DAVote(DAVote2>), @@ -363,7 +363,7 @@ pub enum CommitteeConsensusMessage /// /// Like [`DAProposal`]. Use `Msg` suffix to distinguish from [`VidDisperse`]. /// TODO this variant should not be a [`CommitteeConsensusMessage`] because - VidDisperseMsg(Proposal>), + VidDisperseMsg(Proposal>), /// Vote for VID disperse data /// @@ -503,7 +503,7 @@ pub enum DataMessage { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] /// Prepare qc from the leader -pub struct Proposal { +pub struct Proposal> { // NOTE: optimization could include view number to help look up parent leaf // could even do 16 bit numbers if we want /// The data being proposed. diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index a9070a3a9d..ce8fc70c4d 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -9,7 +9,7 @@ use super::{ }; use crate::{ certificate::{AssembledSignature, ViewSyncCertificate}, - data::{DAProposal, Leaf, ProposalType, VidDisperse}, + data::{DAProposal, Leaf, VidDisperse}, vote::ViewSyncVoteAccumulator, }; @@ -254,8 +254,6 @@ pub trait Membership: /// allowing them to vote and query information about the overall state of the protocol (such as /// membership and leader status). pub trait ConsensusExchange: Send + Sync { - /// A proposal for participants to vote on. - type Proposal: ProposalType; /// The committee eligible to make decisions. type Membership: Membership; @@ -383,7 +381,6 @@ impl< M: NetworkMsg, > ConsensusExchange for CommitteeExchange { - type Proposal = DAProposal; type Membership = MEMBERSHIP; type Networking = NETWORK; type Commitment = Commitment; @@ -486,7 +483,6 @@ impl< M: NetworkMsg, > ConsensusExchange for VIDExchange { - type Proposal = VidDisperse; type Membership = MEMBERSHIP; type Networking = NETWORK; type Commitment = Commitment; @@ -563,7 +559,6 @@ pub trait QuorumExchangeType: ConsensusExchange< #[derivative(Clone, Debug)] pub struct QuorumExchange< TYPES: NodeType, - PROPOSAL: ProposalType, MEMBERSHIP: Membership, NETWORK: CommunicationChannel, M: NetworkMsg, @@ -580,16 +575,15 @@ pub struct QuorumExchange< #[derivative(Debug = "ignore")] private_key: ::PrivateKey, #[doc(hidden)] - _pd: PhantomData<(Leaf, PROPOSAL, MEMBERSHIP, M)>, + _pd: PhantomData<(Leaf, MEMBERSHIP, M)>, } impl< TYPES: NodeType, MEMBERSHIP: Membership, - PROPOSAL: ProposalType, NETWORK: CommunicationChannel, M: NetworkMsg, - > QuorumExchangeType for QuorumExchange + > QuorumExchangeType for QuorumExchange { /// Sign a validating or commitment proposal. fn sign_validating_or_commitment_proposal>( @@ -629,13 +623,11 @@ impl< impl< TYPES: NodeType, - PROPOSAL: ProposalType, MEMBERSHIP: Membership, NETWORK: CommunicationChannel, M: NetworkMsg, - > ConsensusExchange for QuorumExchange + > ConsensusExchange for QuorumExchange { - type Proposal = PROPOSAL; type Membership = MEMBERSHIP; type Networking = NETWORK; type Commitment = Commitment>; @@ -823,7 +815,6 @@ pub trait ViewSyncExchangeType: #[derivative(Clone, Debug)] pub struct ViewSyncExchange< TYPES: NodeType, - PROPOSAL: ProposalType, MEMBERSHIP: Membership, NETWORK: CommunicationChannel, M: NetworkMsg, @@ -840,16 +831,15 @@ pub struct ViewSyncExchange< #[derivative(Debug = "ignore")] private_key: ::PrivateKey, #[doc(hidden)] - _pd: PhantomData<(PROPOSAL, MEMBERSHIP, M)>, + _pd: PhantomData<(MEMBERSHIP, M)>, } impl< TYPES: NodeType, MEMBERSHIP: Membership, - PROPOSAL: ProposalType, NETWORK: CommunicationChannel, M: NetworkMsg, - > ViewSyncExchangeType for ViewSyncExchange + > ViewSyncExchangeType for ViewSyncExchange { type Vote = ViewSyncVote; @@ -1050,13 +1040,11 @@ impl< impl< TYPES: NodeType, - PROPOSAL: ProposalType, MEMBERSHIP: Membership, NETWORK: CommunicationChannel, M: NetworkMsg, - > ConsensusExchange for ViewSyncExchange + > ConsensusExchange for ViewSyncExchange { - type Proposal = PROPOSAL; type Membership = MEMBERSHIP; type Networking = NETWORK; type Commitment = Commitment>; @@ -1102,7 +1090,6 @@ impl< #[derivative(Clone, Debug)] pub struct TimeoutExchange< TYPES: NodeType, - PROPOSAL: ProposalType, MEMBERSHIP: Membership, NETWORK: CommunicationChannel, M: NetworkMsg, @@ -1119,16 +1106,15 @@ pub struct TimeoutExchange< #[derivative(Debug = "ignore")] private_key: ::PrivateKey, #[doc(hidden)] - _pd: PhantomData<(PROPOSAL, MEMBERSHIP, M)>, + _pd: PhantomData<(MEMBERSHIP, M)>, } impl< TYPES: NodeType, - PROPOSAL: ProposalType, MEMBERSHIP: Membership, NETWORK: CommunicationChannel, M: NetworkMsg, - > TimeoutExchange + > TimeoutExchange { } @@ -1137,24 +1123,20 @@ pub trait TimeoutExchangeType: ConsensusExchange impl< TYPES: NodeType, - PROPOSAL: ProposalType, MEMBERSHIP: Membership, NETWORK: CommunicationChannel, M: NetworkMsg, - > TimeoutExchangeType for TimeoutExchange + > TimeoutExchangeType for TimeoutExchange { } -// TODO ED Get rid of ProposalType as generic, is debt left over from Validating Consensus impl< TYPES: NodeType, - PROPOSAL: ProposalType, MEMBERSHIP: Membership, NETWORK: CommunicationChannel, M: NetworkMsg, - > ConsensusExchange for TimeoutExchange + > ConsensusExchange for TimeoutExchange { - type Proposal = PROPOSAL; type Membership = MEMBERSHIP; type Networking = NETWORK; type Commitment = Commitment; From ac6890996aee7caabecc995a884904447edfcf4a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 10 Nov 2023 17:56:13 -0500 Subject: [PATCH 0359/1393] Completed removed ProposalType --- hotshot/examples/infra/mod.rs | 55 ++---------- hotshot/examples/libp2p/types.rs | 23 +---- hotshot/examples/web-server-da/types.rs | 23 +---- hotshot/src/lib.rs | 18 +--- hotshot/src/tasks/mod.rs | 25 ++---- task-impls/src/consensus.rs | 113 ++++++------------------ task-impls/src/da.rs | 6 +- task-impls/src/events.rs | 22 ++--- task-impls/src/transactions.rs | 2 + task-impls/src/view_sync.rs | 10 +-- testing/src/node_types.rs | 18 +--- testing/src/task_helpers.rs | 5 +- testing/tests/da_task.rs | 3 +- testing/tests/memory_network.rs | 23 +---- testing/tests/network_task.rs | 4 +- testing/tests/vid_task.rs | 3 + types/src/message.rs | 18 ++-- types/src/traits/election.rs | 3 +- types/src/traits/node_implementation.rs | 45 ---------- 19 files changed, 99 insertions(+), 320 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 1622770c94..2565327d7b 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -23,9 +23,8 @@ use hotshot_task::task::FilterEvent; use hotshot_types::{block_impl::VIDBlockHeader, traits::election::VIDExchange}; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, - certificate::ViewSyncCertificate, consensus::ConsensusMetricsValue, - data::{Leaf, QuorumProposal, TestableLeaf}, + data::{Leaf, TestableLeaf}, event::{Event, EventType}, message::{Message, SequencingMessage}, traits::{ @@ -118,21 +117,9 @@ pub async fn run_orchestrator< Exchanges = Exchanges< TYPES, Message, - QuorumExchange< - TYPES, - QuorumProposal, - MEMBERSHIP, - QUORUMNETWORK, - Message, - >, + QuorumExchange>, CommitteeExchange>, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - VIEWSYNCNETWORK, - Message, - >, + ViewSyncExchange>, VIDExchange>, >, Storage = MemoryStorage, @@ -181,21 +168,9 @@ pub trait RunDA< Exchanges = Exchanges< TYPES, Message, - QuorumExchange< - TYPES, - QuorumProposal, - MEMBERSHIP, - QUORUMNETWORK, - Message, - >, + QuorumExchange>, CommitteeExchange>, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - VIEWSYNCNETWORK, - Message, - >, + ViewSyncExchange>, VIDExchange>, >, Storage = MemoryStorage, @@ -426,7 +401,6 @@ impl< Message, QuorumExchange< TYPES, - QuorumProposal, MEMBERSHIP, WebCommChannel, Message, @@ -439,7 +413,6 @@ impl< >, ViewSyncExchange< TYPES, - ViewSyncCertificate, MEMBERSHIP, WebCommChannel, Message, @@ -578,7 +551,6 @@ impl< Message, QuorumExchange< TYPES, - QuorumProposal, MEMBERSHIP, Libp2pCommChannel, Message, @@ -591,7 +563,6 @@ impl< >, ViewSyncExchange< TYPES, - ViewSyncCertificate, MEMBERSHIP, Libp2pCommChannel, Message, @@ -802,21 +773,9 @@ pub async fn main_entry_point< Exchanges = Exchanges< TYPES, Message, - QuorumExchange< - TYPES, - QuorumProposal, - MEMBERSHIP, - QUORUMNETWORK, - Message, - >, + QuorumExchange>, CommitteeExchange>, - ViewSyncExchange< - TYPES, - ViewSyncCertificate, - MEMBERSHIP, - VIEWSYNCNETWORK, - Message, - >, + ViewSyncExchange>, VIDExchange>, >, Storage = MemoryStorage, diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 4278792004..550249db47 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -7,8 +7,6 @@ use hotshot::{ }, }; use hotshot_types::{ - certificate::ViewSyncCertificate, - data::{DAProposal, QuorumProposal}, message::{Message, SequencingMessage}, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, @@ -28,11 +26,6 @@ pub type VIDNetwork = Libp2pCommChannel; pub type QuorumNetwork = Libp2pCommChannel; pub type ViewSyncNetwork = Libp2pCommChannel; -pub type ThisDAProposal = DAProposal; - -pub type ThisQuorumProposal = QuorumProposal; - -pub type ThisViewSyncProposal = ViewSyncCertificate; pub type ThisViewSyncVote = ViewSyncVote; impl NodeImplementation for NodeImpl { @@ -40,21 +33,9 @@ impl NodeImplementation for NodeImpl { type Exchanges = Exchanges< DemoTypes, Message, - QuorumExchange< - DemoTypes, - ThisQuorumProposal, - ThisMembership, - QuorumNetwork, - Message, - >, + QuorumExchange>, CommitteeExchange>, - ViewSyncExchange< - DemoTypes, - ThisViewSyncProposal, - ThisMembership, - ViewSyncNetwork, - Message, - >, + ViewSyncExchange>, VIDExchange>, >; type ConsensusMessage = SequencingMessage; diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index b424fe0e98..ecfb2c405c 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -7,8 +7,6 @@ use hotshot::{ }, }; use hotshot_types::{ - certificate::ViewSyncCertificate, - data::{DAProposal, QuorumProposal}, message::{Message, SequencingMessage}, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, @@ -28,11 +26,6 @@ pub type VIDNetwork = WebCommChannel; pub type QuorumNetwork = WebCommChannel; pub type ViewSyncNetwork = WebCommChannel; -pub type ThisDAProposal = DAProposal; - -pub type ThisQuorumProposal = QuorumProposal; - -pub type ThisViewSyncProposal = ViewSyncCertificate; pub type ThisViewSyncVote = ViewSyncVote; impl NodeImplementation for NodeImpl { @@ -40,21 +33,9 @@ impl NodeImplementation for NodeImpl { type Exchanges = Exchanges< DemoTypes, Message, - QuorumExchange< - DemoTypes, - ThisQuorumProposal, - ThisMembership, - QuorumNetwork, - Message, - >, + QuorumExchange>, CommitteeExchange>, - ViewSyncExchange< - DemoTypes, - ThisViewSyncProposal, - ThisMembership, - ViewSyncNetwork, - Message, - >, + ViewSyncExchange>, VIDExchange>, >; type ConsensusMessage = SequencingMessage; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 2ca2553ec1..b29331455a 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -51,16 +51,15 @@ use hotshot_task::{ }; use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ - data::VidDisperse, + certificate::ViewSyncCertificate, simple_certificate::QuorumCertificate2, traits::{election::ViewSyncExchangeType, node_implementation::TimeoutEx}, }; use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, - certificate::ViewSyncCertificate, consensus::{BlockPayloadStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, - data::{DAProposal, Leaf, QuorumProposal}, + data::Leaf, error::StorageSnafu, message::{ ConsensusMessageType, DataMessage, InternalTrigger, Message, MessageKind, @@ -627,21 +626,18 @@ where QuorumEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal, Commitment = Commitment>, Membership = MEMBERSHIP, > + 'static, CommitteeEx: ConsensusExchange< TYPES, Message, - Proposal = DAProposal, Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, Commitment = Commitment>, Membership = MEMBERSHIP, @@ -649,14 +645,12 @@ where VIDEx: ConsensusExchange< TYPES, Message, - Proposal = VidDisperse, Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, TimeoutEx: ConsensusExchange< TYPES, Message, - Proposal = QuorumProposal, Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, @@ -767,12 +761,8 @@ where handle.clone(), ) .await; - let task_runner = add_view_sync_task::( - task_runner, - internal_event_stream.clone(), - handle.clone(), - ) - .await; + let task_runner = + add_view_sync_task(task_runner, internal_event_stream.clone(), handle.clone()).await; async_spawn(async move { task_runner.launch().await; info!("Task runner exited!"); diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 9024491603..71395f36ad 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -27,7 +27,7 @@ use hotshot_task_impls::{ use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, - data::{Leaf, QuorumProposal}, + data::Leaf, event::Event, message::{Message, Messages, SequencingMessage}, traits::{ @@ -64,8 +64,7 @@ pub async fn add_network_message_task< TYPES: NodeType, I: NodeImplementation>, MEMBERSHIP: Membership, - EXCHANGE: ConsensusExchange, Membership = MEMBERSHIP> - + 'static, + EXCHANGE: ConsensusExchange, Membership = MEMBERSHIP> + 'static, >( task_runner: TaskRunner, event_stream: ChannelStream>, @@ -162,8 +161,7 @@ pub async fn add_network_event_task< TYPES: NodeType, I: NodeImplementation>, MEMBERSHIP: Membership, - EXCHANGE: ConsensusExchange, Membership = MEMBERSHIP> - + 'static, + EXCHANGE: ConsensusExchange, Membership = MEMBERSHIP> + 'static, >( task_runner: TaskRunner, event_stream: ChannelStream>, @@ -234,20 +232,12 @@ pub async fn add_consensus_task< handle: SystemContextHandle, ) -> TaskRunner where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment>, - >, + QuorumEx: + ConsensusExchange, Commitment = Commitment>>, CommitteeEx: ConsensusExchange, Commitment = Commitment>, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment, - >, + TimeoutEx: + ConsensusExchange, Commitment = Commitment>, { let consensus = handle.hotshot.get_consensus(); let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -519,7 +509,6 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, Commitment = Commitment>, >, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f706c993f9..c89c1ddd77 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -64,20 +64,12 @@ pub struct ConsensusTaskState< I: NodeImplementation>, A: ConsensusApi + 'static, > where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment>, - >, + QuorumEx: + ConsensusExchange, Commitment = Commitment>>, CommitteeEx: ConsensusExchange, Commitment = Commitment>, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment, - >, + TimeoutEx: + ConsensusExchange, Commitment = Commitment>, { /// The global task registry pub registry: GlobalRegistry, @@ -140,18 +132,10 @@ pub struct ConsensusTaskState< /// State for the vote collection task. This handles the building of a QC from a votes received pub struct VoteCollectionTaskState> where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment>, - >, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment, - >, + QuorumEx: + ConsensusExchange, Commitment = Commitment>>, + TimeoutEx: + ConsensusExchange, Commitment = Commitment>, { /// the quorum exchange pub quorum_exchange: Arc>, @@ -189,18 +173,10 @@ where impl> TS for VoteCollectionTaskState where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment>, - >, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment, - >, + QuorumEx: + ConsensusExchange, Commitment = Commitment>>, + TimeoutEx: + ConsensusExchange, Commitment = Commitment>, { } @@ -214,18 +190,10 @@ async fn vote_handle>( VoteCollectionTaskState, ) where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment>, - >, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment, - >, + QuorumEx: + ConsensusExchange, Commitment = Commitment>>, + TimeoutEx: + ConsensusExchange, Commitment = Commitment>, { match event { HotShotEvent::QuorumVoteRecv(vote) => { @@ -334,20 +302,12 @@ impl< > ConsensusTaskState where TYPES::BlockHeader: BlockHeader, - QuorumEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment>, - >, + QuorumEx: + ConsensusExchange, Commitment = Commitment>>, CommitteeEx: ConsensusExchange, Commitment = Commitment>, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment, - >, + TimeoutEx: + ConsensusExchange, Commitment = Commitment>, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus genesis leaf", level = "error")] @@ -1338,6 +1298,7 @@ where let message = Proposal { data: proposal, signature, + _pd: PhantomData, }; debug!( "Sending proposal for view {:?} \n {:?}", @@ -1363,20 +1324,12 @@ impl< A: ConsensusApi, > TS for ConsensusTaskState where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment>, - >, + QuorumEx: + ConsensusExchange, Commitment = Commitment>>, CommitteeEx: ConsensusExchange, Commitment = Commitment>, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment, - >, + TimeoutEx: + ConsensusExchange, Commitment = Commitment>, { } @@ -1410,20 +1363,12 @@ pub async fn sequencing_consensus_handle< ) where TYPES::BlockHeader: BlockHeader, - QuorumEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment>, - >, + QuorumEx: + ConsensusExchange, Commitment = Commitment>>, CommitteeEx: ConsensusExchange, Commitment = Commitment>, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Proposal = QuorumProposal, - Commitment = Commitment, - >, + TimeoutEx: + ConsensusExchange, Commitment = Commitment>, { if let HotShotEvent::Shutdown = event { (Some(HotShotTaskCompleted::ShutDown), state) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 057598d0ea..5d258e96f8 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -403,7 +403,11 @@ where }; debug!("Sending DA proposal for view {:?}", data.view_number); - let message = Proposal { data, signature }; + let message = Proposal { + data, + signature, + _pd: PhantomData, + }; self.event_stream .publish(HotShotEvent::SendPayloadCommitment(payload_commitment)) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 3c9f11f367..9d9ccfa103 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -3,15 +3,15 @@ use crate::view_sync::ViewSyncPhase; use commit::Commitment; use either::Either; use hotshot_types::{ - data::{DAProposal, Leaf, VidDisperse}, + certificate::ViewSyncCertificate, + data::{DAProposal, Leaf, QuorumProposal, VidDisperse}, message::Proposal, simple_certificate::{ DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, }, simple_vote::{DAVote2, QuorumVote, TimeoutVote2, VIDVote2}, traits::node_implementation::{ - CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, - VIDMembership, ViewSyncProposalType, + CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, VIDMembership, }, vote::ViewSyncVote, }; @@ -22,7 +22,7 @@ pub enum HotShotEvent> { /// Shutdown the task Shutdown, /// A quorum proposal has been received from the network; handled by the consensus task - QuorumProposalRecv(Proposal>, TYPES::SignatureKey), + QuorumProposalRecv(Proposal>, TYPES::SignatureKey), /// A quorum vote has been received from the network; handled by the consensus task QuorumVoteRecv(QuorumVote>), /// A timeout vote recevied from the network; handled by consensus task @@ -30,17 +30,17 @@ pub enum HotShotEvent> { /// Send a timeout vote to the network; emitted by consensus task replicas TimeoutVoteSend(TimeoutVote2>), /// A DA proposal has been received from the network; handled by the DA task - DAProposalRecv(Proposal>, TYPES::SignatureKey), + DAProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task DAVoteRecv(DAVote2>), /// A Data Availability Certificate (DAC) has been recieved by the network; handled by the consensus task DACRecv(DACertificate2), /// Send a quorum proposal to the network; emitted by the leader in the consensus task - QuorumProposalSend(Proposal>, TYPES::SignatureKey), + QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task - DAProposalSend(Proposal>, TYPES::SignatureKey), + DAProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal DAVoteSend(DAVote2>), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only @@ -55,13 +55,13 @@ pub enum HotShotEvent> { ViewSyncVoteSend(ViewSyncVote), /// Send a view sync certificate to the network; emitted by a relay in the view sync task ViewSyncCertificateSend( - Proposal>, + Proposal>, TYPES::SignatureKey, ), /// Receive a view sync vote from the network; received by a relay in the view sync task ViewSyncVoteRecv(ViewSyncVote), /// Receive a view sync certificate from the network; received by a replica in the view sync task - ViewSyncCertificateRecv(Proposal>), + ViewSyncCertificateRecv(Proposal>), /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only ViewSyncTrigger(TYPES::Time), /// A consensus view has timed out; emitted by a replica in the consensus task; received by the view sync task; internal event only @@ -79,11 +79,11 @@ pub enum HotShotEvent> { /// Send VID shares to VID storage nodes; emitted by the DA leader /// /// Like [`DAProposalSend`]. - VidDisperseSend(Proposal>, TYPES::SignatureKey), + VidDisperseSend(Proposal>, TYPES::SignatureKey), /// Vid disperse data has been received from the network; handled by the DA task /// /// Like [`DAProposalRecv`]. - VidDisperseRecv(Proposal>, TYPES::SignatureKey), + VidDisperseRecv(Proposal>, TYPES::SignatureKey), /// Send a VID vote to the VID leader; emitted by VID storage nodes in the DA task after seeing a valid VID dispersal /// /// Like [`DAVoteSend`] diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 7dec0b91b2..13e6dd75b7 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -28,6 +28,7 @@ use hotshot_utils::bincode::bincode_opts; use snafu::Snafu; use std::{ collections::{HashMap, HashSet}, + marker::PhantomData, sync::Arc, time::Instant, }; @@ -264,6 +265,7 @@ where }, // TODO (Keyao) This is also signed in DA task. signature: self.quorum_exchange.sign_payload_commitment(block.commit()), + _pd: PhantomData, }, self.quorum_exchange.public_key().clone(), )) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 035dfc72c2..789f132e4c 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -30,7 +30,7 @@ use hotshot_types::{ vote::{ViewSyncData, ViewSyncVote}, }; use snafu::Snafu; -use std::{collections::HashMap, sync::Arc, time::Duration}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; use tracing::{debug, error, instrument}; #[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] /// Phases of view sync @@ -65,7 +65,6 @@ pub struct ViewSyncTaskState< ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, Commitment = Commitment>, >, @@ -110,7 +109,6 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, Commitment = Commitment>, >, @@ -134,7 +132,6 @@ pub struct ViewSyncReplicaTaskState< ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, Commitment = Commitment>, >, @@ -173,7 +170,6 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, Commitment = Commitment>, >, @@ -228,7 +224,6 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, Commitment = Commitment>, >, @@ -597,7 +592,6 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, Commitment = Commitment>, >, @@ -925,7 +919,6 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, Certificate = ViewSyncCertificate, Commitment = Commitment>, >, @@ -994,6 +987,7 @@ where let message = Proposal { data: certificate.clone(), signature, + _pd: PhantomData, }; self.event_stream .publish(HotShotEvent::ViewSyncCertificateSend( diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 7af2da9332..9fbc87df98 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -15,8 +15,7 @@ use hotshot::{ }; use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, - certificate::ViewSyncCertificate, - data::{QuorumProposal, ViewNumber}, + data::ViewNumber, message::{Message, SequencingMessage}, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, @@ -102,7 +101,6 @@ pub type SequencingLibp2pExchange = Exchanges< Message, QuorumExchange< TestTypes, - QuorumProposal, StaticMembership, StaticLibp2pQuorumComm, Message, @@ -115,7 +113,6 @@ pub type SequencingLibp2pExchange = Exchanges< >, ViewSyncExchange< TestTypes, - ViewSyncCertificate, StaticMembership, StaticLibp2pViewSyncComm, Message, @@ -220,7 +217,6 @@ pub type SequencingMemoryExchange = Exchanges< Message, QuorumExchange< TestTypes, - QuorumProposal, StaticMembership, StaticMemoryQuorumComm, Message, @@ -233,7 +229,6 @@ pub type SequencingMemoryExchange = Exchanges< >, ViewSyncExchange< TestTypes, - ViewSyncCertificate, StaticMembership, StaticMemoryViewSyncComm, Message, @@ -354,17 +349,10 @@ impl NodeImplementation for MemoryImpl { pub type SequencingWebExchanges = Exchanges< TestTypes, Message, - QuorumExchange< - TestTypes, - QuorumProposal, - StaticMembership, - StaticWebQuorumComm, - Message, - >, + QuorumExchange>, CommitteeExchange>, ViewSyncExchange< TestTypes, - ViewSyncCertificate, StaticMembership, StaticWebViewSyncComm, Message, @@ -484,7 +472,6 @@ pub type CombinedExchange = Exchanges< Message, QuorumExchange< TestTypes, - QuorumProposal, StaticMembership, StaticCombinedQuorumComm, Message, @@ -497,7 +484,6 @@ pub type CombinedExchange = Exchanges< >, ViewSyncExchange< TestTypes, - ViewSyncCertificate, StaticMembership, StaticCombinedViewSyncComm, Message, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index ab5c48bec1..5d90959950 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -1,3 +1,5 @@ +use std::marker::PhantomData; + use crate::{ node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, @@ -140,12 +142,13 @@ pub async fn build_quorum_proposal( handle: &SystemContextHandle, private_key: &::PrivateKey, view: u64, -) -> Proposal> { +) -> Proposal> { let (proposal, signature) = build_quorum_proposal_and_signature(handle, private_key, view).await; Proposal { data: proposal, signature, + _pd: PhantomData, } } diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index ff755286e0..1c4fa63f91 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -14,7 +14,7 @@ use hotshot_types::{ node_implementation::ExchangesType, state::ConsensusTime, }, }; -use std::collections::HashMap; +use std::{collections::HashMap, marker::PhantomData}; #[cfg_attr( async_executor_impl = "tokio", @@ -56,6 +56,7 @@ async fn test_da_task() { let message = Proposal { data: proposal, signature, + _pd: PhantomData, }; // TODO for now reuse the same block payload commitment and signature as DA committee diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index b9b545d8cc..b9c33c4291 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -14,8 +14,6 @@ use hotshot::traits::NodeImplementation; use hotshot::types::bn254::{BLSPrivKey, BLSPubKey}; use hotshot::types::SignatureKey; use hotshot_types::block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}; -use hotshot_types::certificate::ViewSyncCertificate; -use hotshot_types::data::{DAProposal, QuorumProposal}; use hotshot_types::message::{Message, SequencingMessage}; use hotshot_types::traits::election::{ CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange, @@ -70,11 +68,6 @@ pub type QuorumNetwork = MemoryCommChannel; pub type ViewSyncNetwork = MemoryCommChannel; pub type VIDNetwork = MemoryCommChannel; -pub type ThisDAProposal = DAProposal; - -pub type ThisQuorumProposal = QuorumProposal; - -pub type ThisViewSyncProposal = ViewSyncCertificate; pub type ThisViewSyncVote = ViewSyncVote; impl NodeImplementation for TestImpl { @@ -82,21 +75,9 @@ impl NodeImplementation for TestImpl { type Exchanges = Exchanges< Test, Message, - QuorumExchange< - Test, - ThisQuorumProposal, - ThisMembership, - QuorumNetwork, - Message, - >, + QuorumExchange>, CommitteeExchange>, - ViewSyncExchange< - Test, - ThisViewSyncProposal, - ThisMembership, - ViewSyncNetwork, - Message, - >, + ViewSyncExchange>, VIDExchange>, >; type ConsensusMessage = SequencingMessage; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index e9409bc69b..d0b93126cc 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -11,7 +11,7 @@ use hotshot_types::{ consensus_api::ConsensusSharedApi, node_implementation::ExchangesType, state::ConsensusTime, }, }; -use std::collections::HashMap; +use std::{collections::HashMap, marker::PhantomData}; #[cfg(test)] #[cfg_attr( @@ -56,6 +56,7 @@ async fn test_network_task() { view_number: ViewNumber::new(2), }, signature, + _pd: PhantomData, }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; // TODO for now reuse the same block payload commitment and signature as DA committee @@ -68,6 +69,7 @@ async fn test_network_task() { common: vid_disperse.common, }, signature: da_proposal.signature.clone(), + _pd: PhantomData, }; // Every event input is seen on the event stream in the output. diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index edb54a0d9a..67a3bc7b2e 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -15,6 +15,7 @@ use hotshot_types::{ }; use hotshot_types::{simple_vote::VIDVote2, traits::election::VIDExchangeType}; use std::collections::HashMap; +use std::marker::PhantomData; #[cfg_attr( async_executor_impl = "tokio", @@ -54,6 +55,7 @@ async fn test_vid_task() { let message = Proposal { data: proposal, signature, + _pd: PhantomData, }; let vid_proposal = Proposal { data: VidDisperse { @@ -63,6 +65,7 @@ async fn test_vid_task() { common: vid_disperse.common, }, signature: message.signature.clone(), + _pd: PhantomData, }; // Every event input is seen on the event stream in the output. diff --git a/types/src/message.rs b/types/src/message.rs index fc6799ba38..58160dbbee 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -3,6 +3,8 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. +use crate::certificate::ViewSyncCertificate; +use crate::data::QuorumProposal; use crate::simple_certificate::{DACertificate2, VIDCertificate2}; use crate::simple_vote::{DAVote2, TimeoutVote2, VIDVote2}; use crate::traits::node_implementation::CommitteeMembership; @@ -13,8 +15,7 @@ use crate::{ traits::{ network::{NetworkMsg, ViewMessage}, node_implementation::{ - ExchangesType, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, - VIDMembership, ViewSyncProposalType, + ExchangesType, NodeImplementation, NodeType, QuorumMembership, VIDMembership, }, signature_key::EncodedSignature, }, @@ -23,6 +24,7 @@ use crate::{ use derivative::Derivative; use either::Either::{self, Left, Right}; +use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use std::{fmt::Debug, marker::PhantomData}; @@ -151,7 +153,7 @@ where I::Exchanges: ExchangesType>, { /// Message with a quorum proposal. - Proposal(Proposal>, TYPES::SignatureKey), + Proposal(Proposal>, TYPES::SignatureKey), /// Message with a quorum vote. Vote( QuorumVote>, @@ -160,7 +162,7 @@ where /// Message with a view sync vote. ViewSyncVote(ViewSyncVote), /// Message with a view sync certificate. - ViewSyncCertificate(Proposal>), + ViewSyncCertificate(Proposal>), /// Internal ONLY message indicating a view interrupt. #[serde(skip)] InternalTrigger(InternalTrigger), @@ -327,7 +329,7 @@ where I::Exchanges: ExchangesType>, { /// Message with a quorum proposal. - Proposal(Proposal>), + Proposal(Proposal>), /// Message with a quorum vote. Vote(QuorumVote>), @@ -336,7 +338,7 @@ where ViewSyncVote(ViewSyncVote), /// Message with a view sync certificate. - ViewSyncCertificate(Proposal>), + ViewSyncCertificate(Proposal>), /// Message with a Timeout vote TimeoutVote(TimeoutVote2>), @@ -503,11 +505,13 @@ pub enum DataMessage { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] /// Prepare qc from the leader -pub struct Proposal> { +pub struct Proposal + DeserializeOwned> { // NOTE: optimization could include view number to help look up parent leaf // could even do 16 bit numbers if we want /// The data being proposed. pub data: PROPOSAL, /// The proposal must be signed by the view leader pub signature: EncodedSignature, + /// Phantom for TYPES + pub _pd: PhantomData, } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index ce8fc70c4d..9feffcc3b6 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -9,7 +9,7 @@ use super::{ }; use crate::{ certificate::{AssembledSignature, ViewSyncCertificate}, - data::{DAProposal, Leaf, VidDisperse}, + data::Leaf, vote::ViewSyncVoteAccumulator, }; @@ -254,7 +254,6 @@ pub trait Membership: /// allowing them to vote and query information about the overall state of the protocol (such as /// membership and leader status). pub trait ConsensusExchange: Send + Sync { - /// The committee eligible to make decisions. type Membership: Membership; /// Network used by [`Membership`](Self::Membership) to communicate. diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 2462bc95e0..15fbba8b53 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -253,17 +253,6 @@ pub struct Exchanges< pub timeout_exchange: TimeoutExchange< TYPES, - < as ExchangesType>::QuorumExchange as ConsensusExchange< - TYPES, - MESSAGE, - >>::Proposal, < as ExchangesType>::QuorumExchange as ConsensusExchange< - TYPES, - MESSAGE, - >>::Proposal, < as ExchangesType>::QuorumExchange as ConsensusExchange< - TYPES, - MESSAGE, - >>::Proposal, < = - as ConsensusExchange>>::Proposal; - -/// A proposal to provide data availability for a new leaf. -pub type CommitteeProposalType = - as ConsensusExchange>>::Proposal; - -/// A proposal to sync the view. -pub type ViewSyncProposalType = - as ConsensusExchange>>::Proposal; - /// A vote on a [`ViewSyncProposal`]. pub type ViewSyncVoteType = as ViewSyncExchangeType>>::Vote; From 344e25e79c2e0abacf1858bed56779b2c474481c Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Mon, 13 Nov 2023 11:03:40 -0500 Subject: [PATCH 0360/1393] [hotshot-state-prover] Schnorr signature verification (#2033) * threshold checking circuit * change some variables to public * comments & tests(TBD) * doing test * test * Adding back the bls_comm * small refactor for test * use `STAKE_TABLE_CAPACITY` * commitment computation * Move state definition to another crate * Formatting * merge imports * test; bug fixes * augment test & comments * Addressing comments * duplicated items * final conflict resolve * consistent naming * one more good path test * signature verification * change the capacity & address comments * address comments * code fix * fix test * Fix merge issue * remove duplicated doc * better test --- hotshot-stake-table/src/config.rs | 2 +- hotshot-state-prover/src/circuit.rs | 326 ++++++++++++++++++++++------ 2 files changed, 262 insertions(+), 66 deletions(-) diff --git a/hotshot-stake-table/src/config.rs b/hotshot-stake-table/src/config.rs index 41c27d73b9..12f69b2e85 100644 --- a/hotshot-stake-table/src/config.rs +++ b/hotshot-stake-table/src/config.rs @@ -1,4 +1,4 @@ //! Configuration file for stake table /// Capacity of a stake table -pub const STAKE_TABLE_CAPACITY: usize = 1000; +pub const STAKE_TABLE_CAPACITY: usize = 200; diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index d116dc378b..cc81f8b37f 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -12,7 +12,10 @@ use hotshot_types::traits::{ }; use jf_plonk::errors::PlonkError; use jf_primitives::{ - circuit::{rescue::RescueNativeGadget, signature::schnorr::VerKeyVar}, + circuit::{ + rescue::RescueNativeGadget, + signature::schnorr::{SignatureGadget, SignatureVar, VerKeyVar}, + }, rescue::RescueParameter, signatures::{ bls_over_bn254::VerKey as BLSVerKey, @@ -31,19 +34,86 @@ pub(crate) fn u256_to_field(v: &U256) -> F { /// Variable for stake table entry #[derive(Clone, Debug)] pub struct StakeTableEntryVar { + /// Schnorr verification keys pub schnorr_ver_key: VerKeyVar, + /// Stake amount pub stake_amount: Variable, } /// Light client state Variable /// The stake table commitment is a triple (bls_keys_comm, schnorr_keys_comm, stake_amount_comm). +/// Variable for a stake table commitment +#[derive(Clone, Debug)] +pub struct StakeTableCommVar { + /// Commitment for BLS keys + pub bls_keys_comm: Variable, + /// Commitment for Schnorr keys + pub schnorr_keys_comm: Variable, + /// Commitment for stake amount + pub stake_amount_comm: Variable, +} + +/// Light client state Variable #[derive(Clone, Debug)] pub struct LightClientStateVar { - pub view_number_var: Variable, - pub block_height_var: Variable, - pub block_comm_var: Variable, - pub fee_ledger_comm_var: Variable, - pub stake_table_comm_var: (Variable, Variable, Variable), + /// Private list holding all variables + /// vars[0]: view number + /// vars[1]: block height + /// vars[2]: block commitment + /// vars[3]: fee ledger commitment + /// vars[4-6]: stake table commitment + vars: [Variable; 7], +} + +impl LightClientStateVar { + pub fn new( + circuit: &mut PlonkCircuit, + state: &LightClientState, + ) -> Result { + let view_number_f = F::from(state.view_number as u64); + let block_height_f = F::from(state.block_height as u64); + Ok(Self { + vars: [ + circuit.create_public_variable(view_number_f)?, + circuit.create_public_variable(block_height_f)?, + circuit.create_public_variable(state.block_comm)?, + circuit.create_public_variable(state.fee_ledger_comm)?, + circuit.create_public_variable(state.stake_table_comm.0)?, + circuit.create_public_variable(state.stake_table_comm.1)?, + circuit.create_public_variable(state.stake_table_comm.2)?, + ], + }) + } + + pub fn view_number(&self) -> Variable { + self.vars[0] + } + + pub fn block_height(&self) -> Variable { + self.vars[1] + } + + pub fn block_comm(&self) -> Variable { + self.vars[2] + } + + pub fn fee_ledger_comm(&self) -> Variable { + self.vars[3] + } + + pub fn stake_table_comm(&self) -> StakeTableCommVar { + StakeTableCommVar { + bls_keys_comm: self.vars[4], + schnorr_keys_comm: self.vars[5], + stake_amount_comm: self.vars[6], + } + } +} + +impl AsRef<[Variable]> for LightClientStateVar { + fn as_ref(&self) -> &[Variable] { + &self.vars + } } #[derive(Clone, Debug)] @@ -65,7 +135,7 @@ where /// - all schnorr signatures are valid pub fn build( stake_table: &ST, - _sigs: &[Signature

], + sigs: &[Signature

], lightclient_state: &LightClientState, signer_bit_vec: &[bool], threshold: &U256, @@ -76,18 +146,11 @@ where { let mut circuit = PlonkCircuit::new_turbo_plonk(); - // Dummy circuit implementation, fill in the details later - // TODO(Chengyu): - // - [DONE] the signer's accumulated weight exceeds the quorum threshold - // - [DONE] The commitment of the stake table as [https://www.notion.so/espressosys/Light-Client-Contract-a416ebbfa9f342d79fccbf90de9706ef?pvs=4#6c0e26d753cd42e9bb0f22db1c519f45] - // - Batch Schnorr signature verification - // creating variables for stake table entries let mut stake_table_var = stake_table .try_iter(SnapshotVersion::LastEpochStart)? .map(|(_bls_ver_key, amount, schnorr_ver_key)| { - let schnorr_ver_key = - VerKeyVar(circuit.create_point_variable(schnorr_ver_key.to_affine().into())?); + let schnorr_ver_key = circuit.create_signature_vk_variable(&schnorr_ver_key)?; let stake_amount = circuit.create_variable(u256_to_field::(&amount))?; Ok(StakeTableEntryVar { schnorr_ver_key, @@ -104,6 +167,20 @@ where }, ); + // creating variables for signatures + let mut sig_vars = sigs + .iter() + .map(|sig| circuit.create_signature_variable(sig)) + .collect::, CircuitError>>()?; + sig_vars.resize( + STAKE_TABLE_CAPACITY, + SignatureVar { + s: circuit.zero(), + R: circuit.neutral_point_variable(), + }, + ); + + // creating Boolean variables for the bit vector let mut signer_bit_vec_var = signer_bit_vec .iter() .map(|&b| circuit.create_boolean_variable(b)) @@ -113,21 +190,10 @@ where let threshold = u256_to_field::(threshold); let threshold_var = circuit.create_public_variable(threshold)?; + let lightclient_state_var = LightClientStateVar::new(&mut circuit, lightclient_state)?; + let view_number_f = F::from(lightclient_state.view_number as u64); let block_height_f = F::from(lightclient_state.block_height as u64); - let lightclient_state_var = LightClientStateVar { - view_number_var: circuit.create_public_variable(view_number_f)?, - block_height_var: circuit.create_public_variable(block_height_f)?, - block_comm_var: circuit.create_public_variable(lightclient_state.block_comm)?, - fee_ledger_comm_var: circuit - .create_public_variable(lightclient_state.fee_ledger_comm)?, - stake_table_comm_var: ( - circuit.create_public_variable(lightclient_state.stake_table_comm.0)?, - circuit.create_public_variable(lightclient_state.stake_table_comm.1)?, - circuit.create_public_variable(lightclient_state.stake_table_comm.2)?, - ), - }; - let public_inputs = vec![ threshold, view_number_f, @@ -163,6 +229,7 @@ where let acc_amount_var = circuit.sum(&signed_amount_var)?; circuit.enforce_leq(threshold_var, acc_amount_var)?; + // checking the commitment for the list of schnorr keys let schnorr_ver_key_preimage_vars = stake_table_var .iter() .flat_map(|var| [var.schnorr_ver_key.0.get_x(), var.schnorr_ver_key.0.get_y()]) @@ -174,9 +241,10 @@ where )?[0]; circuit.enforce_equal( schnorr_ver_key_comm, - lightclient_state_var.stake_table_comm_var.1, + lightclient_state_var.stake_table_comm().schnorr_keys_comm, )?; + // checking the commitment for the list of stake amounts let stake_amount_preimage_vars = stake_table_var .iter() .map(|var| var.stake_amount) @@ -188,9 +256,33 @@ where )?[0]; circuit.enforce_equal( stake_amount_comm, - lightclient_state_var.stake_table_comm_var.2, + lightclient_state_var.stake_table_comm().stake_amount_comm, )?; + // checking all signatures + let verification_result_vars = stake_table_var + .iter() + .zip(sig_vars) + .map(|(entry, sig)| { + SignatureGadget::<_, P>::check_signature_validity( + &mut circuit, + &entry.schnorr_ver_key, + lightclient_state_var.as_ref(), + &sig, + ) + }) + .collect::, CircuitError>>()?; + let bit_x_result_vars = signer_bit_vec_var + .iter() + .zip(verification_result_vars) + .map(|(&bit, result)| { + let neg_bit = circuit.logic_neg(bit)?; + circuit.logic_or(neg_bit, result) + }) + .collect::, CircuitError>>()?; + let sig_ver_result = circuit.logic_and_all(&bit_x_result_vars)?; + circuit.enforce_true(sig_ver_result.0)?; + circuit.finalize_for_arithmetization()?; Ok((circuit, public_inputs)) } @@ -200,41 +292,55 @@ where mod tests { use super::{LightClientState, StateUpdateBuilder}; use ark_ed_on_bn254::EdwardsConfig as Config; + use ark_std::rand::{CryptoRng, RngCore}; use ethereum_types::U256; use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableScheme}; - use jf_primitives::signatures::{ - bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey as BLSVerKey}, - SchnorrSignatureScheme, SignatureScheme, + use jf_primitives::{ + crhf::{VariableLengthRescueCRHF, CRHF}, + errors::PrimitivesError, + signatures::{ + bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey as BLSVerKey}, + schnorr::Signature, + SchnorrSignatureScheme, SignatureScheme, + }, }; use jf_relation::Circuit; + use jf_utils::test_rng; type F = ark_ed_on_bn254::Fq; type SchnorrVerKey = jf_primitives::signatures::schnorr::VerKey; + type SchnorrSignKey = jf_primitives::signatures::schnorr::SignKey; - fn key_pairs_for_testing() -> Vec<(BLSVerKey, SchnorrVerKey)> { - let mut prng = jf_utils::test_rng(); - (0..10) + fn key_pairs_for_testing( + num_validators: usize, + prng: &mut R, + ) -> (Vec, Vec<(SchnorrSignKey, SchnorrVerKey)>) { + let bls_keys = (0..num_validators) .map(|_| { - ( - BLSOverBN254CurveSignatureScheme::key_gen(&(), &mut prng) - .unwrap() - .1, - SchnorrSignatureScheme::key_gen(&(), &mut prng).unwrap().1, - ) + BLSOverBN254CurveSignatureScheme::key_gen(&(), prng) + .unwrap() + .1 }) - .collect::>() + .collect::>(); + let schnorr_keys = (0..num_validators) + .map(|_| SchnorrSignatureScheme::key_gen(&(), prng).unwrap()) + .collect::>(); + (bls_keys, schnorr_keys) } fn stake_table_for_testing( - keys: &[(BLSVerKey, SchnorrVerKey)], + bls_keys: &[BLSVerKey], + schnorr_keys: &[(SchnorrSignKey, SchnorrVerKey)], ) -> StakeTable { let mut st = StakeTable::::new(); // Registering keys - keys.iter().enumerate().for_each(|(i, key)| { - st.register(key.0, U256::from((i + 1) as u32), key.1.clone()) - .unwrap() - }); + bls_keys.iter().enumerate().zip(schnorr_keys).for_each( + |((i, bls_key), (_, schnorr_key))| { + st.register(*bls_key, U256::from((i + 1) as u32), schnorr_key.clone()) + .unwrap() + }, + ); // Freeze the stake table st.advance(); st.advance(); @@ -243,25 +349,61 @@ mod tests { #[test] fn test_circuit_building() { - let keys = key_pairs_for_testing(); - let st = stake_table_for_testing(&keys); + let num_validators = 10; + let mut prng = test_rng(); + + let (bls_keys, schnorr_keys) = key_pairs_for_testing(num_validators, &mut prng); + let st = stake_table_for_testing(&bls_keys, &schnorr_keys); + + let block_comm = + VariableLengthRescueCRHF::::evaluate(vec![F::from(1u32), F::from(2u32)]).unwrap() + [0]; + let fee_ledger_comm = + VariableLengthRescueCRHF::::evaluate(vec![F::from(3u32), F::from(5u32)]).unwrap() + [0]; let lightclient_state = LightClientState { - view_number: 0, - block_height: 0, - block_comm: F::default(), - fee_ledger_comm: F::default(), + view_number: 100, + block_height: 73, + block_comm, + fee_ledger_comm, stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), }; + let state_msg = [ + F::from(lightclient_state.view_number as u64), + F::from(lightclient_state.block_height as u64), + lightclient_state.block_comm, + lightclient_state.fee_ledger_comm, + lightclient_state.stake_table_comm.0, + lightclient_state.stake_table_comm.1, + lightclient_state.stake_table_comm.2, + ]; + + let sigs = schnorr_keys + .iter() + .map(|(key, _)| SchnorrSignatureScheme::::sign(&(), key, state_msg, &mut prng)) + .collect::, PrimitivesError>>() + .unwrap(); // bit vector with total weight 26 let bit_vec = [ true, true, true, false, true, true, false, false, true, false, ]; + let bit_masked_sigs = bit_vec + .iter() + .zip(sigs.iter()) + .map(|(bit, sig)| { + if *bit { + sig.clone() + } else { + Signature::::default() + } + }) + .collect::>(); // good path let (circuit, public_inputs) = StateUpdateBuilder::::build( &st, - &[], + &bit_masked_sigs, &lightclient_state, &bit_vec, &U256::from(26u32), @@ -271,7 +413,7 @@ mod tests { let (circuit, public_inputs) = StateUpdateBuilder::::build( &st, - &[], + &bit_masked_sigs, &lightclient_state, &bit_vec, &U256::from(10u32), @@ -284,9 +426,20 @@ mod tests { let bad_bit_vec = [ true, true, true, true, true, false, false, true, false, false, ]; + let bad_bit_masked_sigs = bad_bit_vec + .iter() + .zip(sigs.iter()) + .map(|(bit, sig)| { + if *bit { + sig.clone() + } else { + Signature::::default() + } + }) + .collect::>(); let (bad_circuit, public_inputs) = StateUpdateBuilder::::build( &st, - &[], + &bad_bit_masked_sigs, &lightclient_state, &bad_bit_vec, &U256::from(25u32), @@ -297,16 +450,27 @@ mod tests { .is_err()); // bad path: bad stake table commitment - let bad_lightclient_state = LightClientState { - view_number: 0, - block_height: 0, - block_comm: F::default(), - fee_ledger_comm: F::default(), - stake_table_comm: (F::default(), F::default(), F::default()), - }; + let mut bad_lightclient_state = lightclient_state.clone(); + bad_lightclient_state.stake_table_comm.1 = F::default(); + let bad_state_msg = [ + F::from(bad_lightclient_state.view_number as u64), + F::from(bad_lightclient_state.block_height as u64), + bad_lightclient_state.block_comm, + bad_lightclient_state.fee_ledger_comm, + bad_lightclient_state.stake_table_comm.0, + bad_lightclient_state.stake_table_comm.1, + bad_lightclient_state.stake_table_comm.2, + ]; + let sig_for_bad_state = schnorr_keys + .iter() + .map(|(key, _)| { + SchnorrSignatureScheme::::sign(&(), key, bad_state_msg, &mut prng) + }) + .collect::, PrimitivesError>>() + .unwrap(); let (bad_circuit, public_inputs) = StateUpdateBuilder::::build( &st, - &[], + &sig_for_bad_state, &bad_lightclient_state, &bit_vec, &U256::from(26u32), @@ -315,5 +479,37 @@ mod tests { assert!(bad_circuit .check_circuit_satisfiability(&public_inputs) .is_err()); + + // bad path: incorrect signatures + let mut wrong_light_client_state = lightclient_state.clone(); + // state with a different bls key commitment + wrong_light_client_state.stake_table_comm.0 = F::default(); + let wrong_state_msg = [ + F::from(wrong_light_client_state.view_number as u64), + F::from(wrong_light_client_state.block_height as u64), + wrong_light_client_state.block_comm, + wrong_light_client_state.fee_ledger_comm, + wrong_light_client_state.stake_table_comm.0, + wrong_light_client_state.stake_table_comm.1, + wrong_light_client_state.stake_table_comm.2, + ]; + let wrong_sigs = schnorr_keys + .iter() + .map(|(key, _)| { + SchnorrSignatureScheme::::sign(&(), key, wrong_state_msg, &mut prng) + }) + .collect::, PrimitivesError>>() + .unwrap(); + let (bad_circuit, public_inputs) = StateUpdateBuilder::::build( + &st, + &wrong_sigs, + &lightclient_state, + &bit_vec, + &U256::from(26u32), + ) + .unwrap(); + assert!(bad_circuit + .check_circuit_satisfiability(&public_inputs) + .is_err()); } } From fc274c48987b8fcf970890c05189228ab0bfd48c Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 13 Nov 2023 13:31:11 -0500 Subject: [PATCH 0361/1393] remove some redundant clones (#2042) --- hotshot/src/traits/networking/libp2p_network.rs | 3 +-- testing/src/overall_safety_task.rs | 2 +- testing/src/task_helpers.rs | 3 +-- types/src/traits/election.rs | 2 +- types/src/vote.rs | 4 ++-- types/src/vote2.rs | 4 ++-- 6 files changed, 8 insertions(+), 10 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 0c6a4ba446..f445e357b4 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -361,7 +361,6 @@ impl Libp2pNetwork { let latest_seen_view = self.inner.latest_seen_view.clone(); // deals with handling lookup queue. should be infallible - let handle_ = handle.clone(); async_spawn(async move { // cancels on shutdown while let Ok(Some((view_number, pk))) = node_lookup_recv.recv().await { @@ -373,7 +372,7 @@ impl Libp2pNetwork { // only run if we are not too close to the next view number if latest_seen_view.load(Ordering::Relaxed) + THRESHOLD <= *view_number { // look up - if let Err(err) = handle_.lookup_node::(pk.clone(), dht_timeout).await { + if let Err(err) = handle.lookup_node::(pk.clone(), dht_timeout).await { error!("Failed to perform lookup for key {:?}: {}", pk, err); }; } diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 058d4768c1..1ee21613ae 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -207,7 +207,7 @@ impl> RoundResult let (state, payload_commitment) = (leaf.get_state(), leaf.get_payload_commitment()); - match self.state_map.entry(state.clone()) { + match self.state_map.entry(state) { std::collections::hash_map::Entry::Occupied(mut o) => { *o.get_mut() += 1; } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index ccaf327891..230082f6d5 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -110,12 +110,11 @@ async fn build_quorum_proposal_and_signature( panic!("Failed to find high QC parent."); }; let parent_leaf = leaf.clone(); - let parent_header = parent_leaf.block_header.clone(); // every event input is seen on the event stream in the output. let block = ::genesis(); let payload_commitment = block.commit(); - let block_header = VIDBlockHeader::new(payload_commitment, &parent_header); + let block_header = VIDBlockHeader::new(payload_commitment, &parent_leaf.block_header); let leaf = Leaf { view_number: ViewNumber::new(view), justify_qc: consensus.high_qc.clone(), diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 642673b571..4df825a4c6 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -988,7 +988,7 @@ impl< fn is_valid_view_sync_cert(&self, certificate: Self::Certificate, round: TYPES::Time) -> bool { // Sishan NOTE TODO: would be better to test this, looks like this func is never called. - let (certificate_internal, _threshold, vote_data) = match certificate.clone() { + let (certificate_internal, _threshold, vote_data) = match certificate { ViewSyncCertificate::PreCommit(certificate_internal) => { let vote_data = ViewSyncData:: { relay: self diff --git a/types/src/vote.rs b/types/src/vote.rs index 3f7befe196..a7af6b6a08 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -295,7 +295,7 @@ impl ViewSyncVoteAccumulator { if *commit_stake_casted >= u64::from(self.success_threshold) { let real_qc_pp = ::get_public_parameter( - stake_table_entries.clone(), + stake_table_entries, U256::from(self.success_threshold.get()), ); @@ -310,7 +310,7 @@ impl ViewSyncVoteAccumulator { if *finalize_stake_casted >= u64::from(self.success_threshold) { let real_qc_pp = ::get_public_parameter( - stake_table_entries.clone(), + stake_table_entries, U256::from(self.success_threshold.get()), ); diff --git a/types/src/vote2.rs b/types/src/vote2.rs index e510bef4c6..6efb06b252 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -150,7 +150,7 @@ impl< // TODO: Get the stake from the stake table entry. *total_stake_casted += 1; total_vote_map.insert( - encoded_key.clone(), + encoded_key, (vote.get_signature(), vote.get_data_commitment()), ); @@ -158,7 +158,7 @@ impl< // Assemble QC let real_qc_pp: <::SignatureKey as SignatureKey>::QCParams = ::get_public_parameter( - stake_table.clone(), + stake_table, U256::from(CERT::threshold(membership)), ); From 38a66fe81818e37c441a0a31fc95d4360e601480 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 13 Nov 2023 22:07:39 -0500 Subject: [PATCH 0362/1393] Reworking main view sync task to separate votes into 3 --- task-impls/src/view_sync.rs | 145 +++++++++++++++++++++++------------- 1 file changed, 92 insertions(+), 53 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 2e56ee9048..9928295983 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -17,7 +17,7 @@ use hotshot_types::{ node_implementation::ViewSyncMembership, }, vote::ViewSyncVoteAccumulator, - vote2::{HasViewNumber, Vote2, VoteAccumulator2}, + vote2::{HasViewNumber, Vote2, VoteAccumulator2, Certificate2}, }; use bitvec::prelude::*; @@ -200,7 +200,9 @@ pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< /// State of a view sync relay task pub struct ViewSyncRelayTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, + VOTE: Vote2, + CERTIFICATE: Certificate2, > { /// Event stream to publish events to pub event_stream: ChannelStream>, @@ -213,10 +215,10 @@ pub struct ViewSyncRelayTaskState< pub accumulator: Either< VoteAccumulator2< TYPES, - ViewSyncPreCommitVote>, - ViewSyncPreCommitCertificate2, + VOTE, + CERTIFICATE >, - ViewSyncPreCommitCertificate2, + CERTIFICATE, >, /// Our node id; for logging pub id: u64, @@ -228,17 +230,20 @@ impl< TYPES, Leaf = Leaf, ConsensusMessage = SequencingMessage, + >, - > TS for ViewSyncRelayTaskState + VOTE: Vote2 + std::marker::Send + std::marker::Sync + 'static, + CERTIFICATE: Certificate2 + std::marker::Send + std::marker::Sync + 'static, + > TS for ViewSyncRelayTaskState { } /// Types used by the view sync relay task -pub type ViewSyncRelayTaskStateTypes = HSTWithEvent< +pub type ViewSyncRelayTaskStateTypes = HSTWithEvent< ViewSyncTaskError, HotShotEvent, ChannelStream>, - ViewSyncRelayTaskState, + ViewSyncRelayTaskState, >; impl< @@ -938,7 +943,7 @@ impl< Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - > ViewSyncRelayTaskState + > ViewSyncRelayTaskState>, ViewSyncPreCommitCertificate2> where ViewSyncEx: ViewSyncExchangeType< TYPES, @@ -947,6 +952,7 @@ where Certificate = ViewSyncCertificate, Commitment = Commitment>, >, + { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] @@ -955,7 +961,7 @@ where event: HotShotEvent, ) -> ( std::option::Option, - ViewSyncRelayTaskState, + ViewSyncRelayTaskState>, ViewSyncPreCommitCertificate2> ) { match event { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { @@ -995,50 +1001,83 @@ where }; (None, self) - // let accumulator = self - // .accumulator - // .left() - // .unwrap() - // .accumulate(&vote, self.exchange.membership()); - - // self.accumulator = match accumulator { - // Left(new_accumulator) => Either::Left(new_accumulator), - // Right(certificate) => { - // let signature = - // self.exchange.sign_certificate_proposal(certificate.clone()); - // let message = Proposal { - // data: certificate.clone(), - // signature, - // }; - // self.event_stream - // .publish(HotShotEvent::ViewSyncCertificateSend( - // message, - // self.exchange.public_key().clone(), - // )) - // .await; - - // // Reset accumulator for new certificate - // let new_accumulator = ViewSyncVoteAccumulator { - // pre_commit_vote_outcomes: HashMap::new(), - // commit_vote_outcomes: HashMap::new(), - // finalize_vote_outcomes: HashMap::new(), - - // success_threshold: self.exchange.success_threshold(), - // failure_threshold: self.exchange.failure_threshold(), - - // sig_lists: Vec::new(), - // signers: bitvec![0; self.exchange.total_nodes()], - // }; - // either::Left(new_accumulator) - // } - // }; - - // if phase == ViewSyncPhase::Finalize { - // (Some(HotShotTaskCompleted::ShutDown), self) - // } else { - // (None, self) - // } } + HotShotEvent::ViewSyncCommitVoteRecv(vote) => { + // Ignore this vote if we are not the correct relay + if !self + .exchange + .is_leader(vote.get_data().round + vote.get_data().relay) + { + info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); + return (None, self); + } + + debug!( + "Accumulating ViewSyncPreCommitVote for round {} and relay {}", + *vote.get_data().round, + vote.get_data().relay + ); + + match self.accumulator { + Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), + Left(accumulator) => { + match accumulator.accumulate(&vote, self.exchange.membership()) { + Left(new_accumulator) => self.accumulator = Either::Left(new_accumulator), + Right(certificate) => { + self.event_stream + .publish(HotShotEvent::ViewSyncCommitCertificate2Send( + certificate.clone(), + )) + .await; + self.accumulator = Right(certificate); + + return (Some(HotShotTaskCompleted::ShutDown), self); + } + } + } + }; + (None, self) + + } + HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { + // Ignore this vote if we are not the correct relay + // TODO ED Replace exchange with membership + if !self + .exchange + .is_leader(vote.get_data().round + vote.get_data().relay) + { + info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); + return (None, self); + } + + debug!( + "Accumulating ViewSyncPreCommitVote for round {} and relay {}", + *vote.get_data().round, + vote.get_data().relay + ); + + match self.accumulator { + Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), + Left(accumulator) => { + match accumulator.accumulate(&vote, self.exchange.membership()) { + Left(new_accumulator) => self.accumulator = Either::Left(new_accumulator), + Right(certificate) => { + self.event_stream + .publish(HotShotEvent::ViewSyncFinalizeCertificate2Send( + certificate.clone(), + )) + .await; + self.accumulator = Right(certificate); + + return (Some(HotShotTaskCompleted::ShutDown), self); + } + } + } + }; + (None, self) + + } + _ => (None, self), } } From fab02e5a7d97b9228d7a077fe9604f4f658d3f7f Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 13 Nov 2023 22:17:11 -0500 Subject: [PATCH 0363/1393] PreCommit vote and accumulator compiling now --- task-impls/src/view_sync.rs | 166 +++++++++++------------------------- 1 file changed, 52 insertions(+), 114 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 9928295983..a4ce8e3ddf 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -17,7 +17,7 @@ use hotshot_types::{ node_implementation::ViewSyncMembership, }, vote::ViewSyncVoteAccumulator, - vote2::{HasViewNumber, Vote2, VoteAccumulator2, Certificate2}, + vote2::{Certificate2, HasViewNumber, Vote2, VoteAccumulator2}, }; use bitvec::prelude::*; @@ -200,8 +200,8 @@ pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< /// State of a view sync relay task pub struct ViewSyncRelayTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, - VOTE: Vote2, + I: NodeImplementation, ConsensusMessage = SequencingMessage>, + VOTE: Vote2, CERTIFICATE: Certificate2, > { /// Event stream to publish events to @@ -212,14 +212,7 @@ pub struct ViewSyncRelayTaskState< pub membership: ViewSyncMembership, /// Vote accumulator #[allow(clippy::type_complexity)] - pub accumulator: Either< - VoteAccumulator2< - TYPES, - VOTE, - CERTIFICATE - >, - CERTIFICATE, - >, + pub accumulator: Either, CERTIFICATE>, /// Our node id; for logging pub id: u64, } @@ -230,10 +223,12 @@ impl< TYPES, Leaf = Leaf, ConsensusMessage = SequencingMessage, - >, - VOTE: Vote2 + std::marker::Send + std::marker::Sync + 'static, - CERTIFICATE: Certificate2 + std::marker::Send + std::marker::Sync + 'static, + VOTE: Vote2 + std::marker::Send + std::marker::Sync + 'static, + CERTIFICATE: Certificate2 + + std::marker::Send + + std::marker::Sync + + 'static, > TS for ViewSyncRelayTaskState { } @@ -346,14 +341,8 @@ where }); } - HotShotEvent::ViewSyncVoteRecv(vote) => { - let vote_internal = match vote { - ViewSyncVote::PreCommit(vote_internal) - | ViewSyncVote::Commit(vote_internal) - | ViewSyncVote::Finalize(vote_internal) => vote_internal, - }; - - if let Some(relay_task) = self.relay_task_map.get(&vote_internal.round) { + HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { + if let Some(relay_task) = self.relay_task_map.get(&vote.get_view_number()) { // Forward event then return self.event_stream .direct_message(relay_task.event_stream_id, event) @@ -365,7 +354,7 @@ where if !self .exchange - .is_leader(vote_internal.round + vote_internal.relay) + .is_leader(vote.get_view_number() + vote.get_data().relay) { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); @@ -382,7 +371,7 @@ where let mut relay_state = ViewSyncRelayTaskState { event_stream: self.event_stream.clone(), exchange: self.exchange.clone(), - membership: self.exchange.membership().clone(), + membership: self.exchange.membership().clone(), accumulator: either::Left(new_accumulator), id: self.id, }; @@ -396,27 +385,40 @@ where relay_state = result.1; - let name = format!("View Sync Relay Task for view {:?}", vote_internal.round); + let name = format!("View Sync Relay Task for view {:?}", vote.get_view_number()); let relay_handle_event = HandleEvent(Arc::new( - move |event, state: ViewSyncRelayTaskState| { + move |event, + state: ViewSyncRelayTaskState< + TYPES, + I, + ViewSyncPreCommitVote>, + ViewSyncPreCommitCertificate2, + >| { async move { state.handle_event(event).await }.boxed() }, )); let filter = FilterEvent::default(); - let builder = TaskBuilder::>::new(name) - .register_event_stream(relay_state.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(relay_state) - .register_event_handler(relay_handle_event); + let builder = TaskBuilder::< + ViewSyncRelayTaskStateTypes< + TYPES, + I, + ViewSyncPreCommitVote>, + ViewSyncPreCommitCertificate2, + >, + >::new(name) + .register_event_stream(relay_state.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(relay_state) + .register_event_handler(relay_handle_event); let event_stream_id = builder.get_stream_id().unwrap(); self.relay_task_map - .insert(vote_internal.round, ViewSyncTaskInfo { event_stream_id }); + .insert(vote.get_view_number(), ViewSyncTaskInfo { event_stream_id }); let _view_sync_relay_task = async_spawn(async move { ViewSyncRelayTaskStateTypes::build(builder).launch().await }); @@ -943,7 +945,13 @@ impl< Leaf = Leaf, ConsensusMessage = SequencingMessage, >, - > ViewSyncRelayTaskState>, ViewSyncPreCommitCertificate2> + > + ViewSyncRelayTaskState< + TYPES, + I, + ViewSyncPreCommitVote>, + ViewSyncPreCommitCertificate2, + > where ViewSyncEx: ViewSyncExchangeType< TYPES, @@ -952,7 +960,6 @@ where Certificate = ViewSyncCertificate, Commitment = Commitment>, >, - { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] @@ -961,7 +968,12 @@ where event: HotShotEvent, ) -> ( std::option::Option, - ViewSyncRelayTaskState>, ViewSyncPreCommitCertificate2> + ViewSyncRelayTaskState< + TYPES, + I, + ViewSyncPreCommitVote>, + ViewSyncPreCommitCertificate2, + >, ) { match event { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { @@ -985,47 +997,12 @@ where Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), Left(accumulator) => { match accumulator.accumulate(&vote, self.exchange.membership()) { - Left(new_accumulator) => self.accumulator = Either::Left(new_accumulator), - Right(certificate) => { - self.event_stream - .publish(HotShotEvent::ViewSyncPreCommitCertificate2Send( - certificate.clone(), - )) - .await; - self.accumulator = Right(certificate); - - return (Some(HotShotTaskCompleted::ShutDown), self); + Left(new_accumulator) => { + self.accumulator = Either::Left(new_accumulator) } - } - } - }; - (None, self) - - } - HotShotEvent::ViewSyncCommitVoteRecv(vote) => { - // Ignore this vote if we are not the correct relay - if !self - .exchange - .is_leader(vote.get_data().round + vote.get_data().relay) - { - info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); - return (None, self); - } - - debug!( - "Accumulating ViewSyncPreCommitVote for round {} and relay {}", - *vote.get_data().round, - vote.get_data().relay - ); - - match self.accumulator { - Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), - Left(accumulator) => { - match accumulator.accumulate(&vote, self.exchange.membership()) { - Left(new_accumulator) => self.accumulator = Either::Left(new_accumulator), Right(certificate) => { self.event_stream - .publish(HotShotEvent::ViewSyncCommitCertificate2Send( + .publish(HotShotEvent::ViewSyncPreCommitCertificate2Send( certificate.clone(), )) .await; @@ -1037,47 +1014,8 @@ where } }; (None, self) - } - HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { - // Ignore this vote if we are not the correct relay - // TODO ED Replace exchange with membership - if !self - .exchange - .is_leader(vote.get_data().round + vote.get_data().relay) - { - info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); - return (None, self); - } - debug!( - "Accumulating ViewSyncPreCommitVote for round {} and relay {}", - *vote.get_data().round, - vote.get_data().relay - ); - - match self.accumulator { - Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), - Left(accumulator) => { - match accumulator.accumulate(&vote, self.exchange.membership()) { - Left(new_accumulator) => self.accumulator = Either::Left(new_accumulator), - Right(certificate) => { - self.event_stream - .publish(HotShotEvent::ViewSyncFinalizeCertificate2Send( - certificate.clone(), - )) - .await; - self.accumulator = Right(certificate); - - return (Some(HotShotTaskCompleted::ShutDown), self); - } - } - } - }; - (None, self) - - } - _ => (None, self), } } From f455d2e84c97ff47b53fcb4db873bfa18d41929b Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 13 Nov 2023 22:35:37 -0500 Subject: [PATCH 0364/1393] Compile with commit and finalize impl tasks --- task-impls/src/view_sync.rs | 174 +++++++++++++++++++++++++++++++++++- 1 file changed, 171 insertions(+), 3 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index a4ce8e3ddf..64d7808fdd 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -10,8 +10,8 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, }; use hotshot_types::{ - simple_certificate::ViewSyncPreCommitCertificate2, - simple_vote::ViewSyncPreCommitVote, + simple_certificate::{ViewSyncPreCommitCertificate2, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2}, + simple_vote::{ViewSyncPreCommitVote, ViewSyncCommitVote, ViewSyncFinalizeVote}, traits::{ election::Membership, network::ConsensusIntentEvent, node_implementation::ViewSyncMembership, @@ -368,7 +368,8 @@ where phantom: PhantomData, }; - let mut relay_state = ViewSyncRelayTaskState { + let mut relay_state = ViewSyncRelayTaskState::>, ViewSyncPreCommitCertificate2> + { event_stream: self.event_stream.clone(), exchange: self.exchange.clone(), membership: self.exchange.membership().clone(), @@ -1020,3 +1021,170 @@ where } } } + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = Leaf, + ConsensusMessage = SequencingMessage, + >, + > + ViewSyncRelayTaskState< + TYPES, + I, + ViewSyncCommitVote>, + ViewSyncCommitCertificate2, + > +where + ViewSyncEx: ViewSyncExchangeType< + TYPES, + Message, + Proposal = ViewSyncCertificate, + Certificate = ViewSyncCertificate, + Commitment = Commitment>, + >, +{ + /// Handles incoming events for the view sync relay task + #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] + pub async fn handle_event( + mut self, + event: HotShotEvent, + ) -> ( + std::option::Option, + ViewSyncRelayTaskState< + TYPES, + I, + ViewSyncCommitVote>, + ViewSyncCommitCertificate2, + >, + ) { + match event { + HotShotEvent::ViewSyncCommitVoteRecv(vote) => { + // Ignore this vote if we are not the correct relay + // TODO ED Replace exchange with membership + if !self + .exchange + .is_leader(vote.get_data().round + vote.get_data().relay) + { + info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); + return (None, self); + } + + debug!( + "Accumulating ViewSyncPreCommitVote for round {} and relay {}", + *vote.get_data().round, + vote.get_data().relay + ); + + match self.accumulator { + Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), + Left(accumulator) => { + match accumulator.accumulate(&vote, self.exchange.membership()) { + Left(new_accumulator) => { + self.accumulator = Either::Left(new_accumulator) + } + Right(certificate) => { + self.event_stream + .publish(HotShotEvent::ViewSyncCommitCertificate2Send( + certificate.clone(), + )) + .await; + self.accumulator = Right(certificate); + + return (Some(HotShotTaskCompleted::ShutDown), self); + } + } + } + }; + (None, self) + } + + _ => (None, self), + } + } +} + +impl< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Leaf = Leaf, + ConsensusMessage = SequencingMessage, + >, + > + ViewSyncRelayTaskState< + TYPES, + I, + ViewSyncFinalizeVote>, + ViewSyncFinalizeCertificate2, + > +where + ViewSyncEx: ViewSyncExchangeType< + TYPES, + Message, + Proposal = ViewSyncCertificate, + Certificate = ViewSyncCertificate, + Commitment = Commitment>, + >, +{ + /// Handles incoming events for the view sync relay task + #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] + pub async fn handle_event( + mut self, + event: HotShotEvent, + ) -> ( + std::option::Option, + ViewSyncRelayTaskState< + TYPES, + I, + ViewSyncFinalizeVote>, + ViewSyncFinalizeCertificate2, + >, + ) { + match event { + HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { + // Ignore this vote if we are not the correct relay + // TODO ED Replace exchange with membership + if !self + .exchange + .is_leader(vote.get_data().round + vote.get_data().relay) + { + info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); + return (None, self); + } + + debug!( + "Accumulating ViewSyncPreCommitVote for round {} and relay {}", + *vote.get_data().round, + vote.get_data().relay + ); + + match self.accumulator { + Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), + Left(accumulator) => { + match accumulator.accumulate(&vote, self.exchange.membership()) { + Left(new_accumulator) => { + self.accumulator = Either::Left(new_accumulator) + } + Right(certificate) => { + self.event_stream + .publish(HotShotEvent::ViewSyncFinalizeCertificate2Send( + certificate.clone(), + )) + .await; + self.accumulator = Right(certificate); + + return (Some(HotShotTaskCompleted::ShutDown), self); + } + } + } + }; + (None, self) + } + + _ => (None, self), + } + } +} + From 63ce3f661f99dfff499b44e394160e62c50ed456 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 13 Nov 2023 22:40:35 -0500 Subject: [PATCH 0365/1393] Added task spawning for commit and finalize tasks --- task-impls/src/view_sync.rs | 168 ++++++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 64d7808fdd..5fd7f99a91 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -425,6 +425,174 @@ where }); } + HotShotEvent::ViewSyncCommitVoteRecv(vote) => { + if let Some(relay_task) = self.relay_task_map.get(&vote.get_view_number()) { + // Forward event then return + self.event_stream + .direct_message(relay_task.event_stream_id, event) + .await; + return; + } + + // We do not have a relay task already running, so start one + + if !self + .exchange + .is_leader(vote.get_view_number() + vote.get_data().relay) + { + // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` + debug!("View sync vote sent to wrong leader"); + return; + } + + let new_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), + sig_lists: Vec::new(), + signers: bitvec![0; self.exchange.total_nodes()], + phantom: PhantomData, + }; + + let mut relay_state = ViewSyncRelayTaskState::>, ViewSyncCommitCertificate2> + { + event_stream: self.event_stream.clone(), + exchange: self.exchange.clone(), + membership: self.exchange.membership().clone(), + accumulator: either::Left(new_accumulator), + id: self.id, + }; + + let result = relay_state.handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } + + relay_state = result.1; + + let name = format!("View Sync Relay Task for view {:?}", vote.get_view_number()); + + let relay_handle_event = HandleEvent(Arc::new( + move |event, + state: ViewSyncRelayTaskState< + TYPES, + I, + ViewSyncCommitVote>, + ViewSyncCommitCertificate2, + >| { + async move { state.handle_event(event).await }.boxed() + }, + )); + + let filter = FilterEvent::default(); + let builder = TaskBuilder::< + ViewSyncRelayTaskStateTypes< + TYPES, + I, + ViewSyncCommitVote>, + ViewSyncCommitCertificate2, + >, + >::new(name) + .register_event_stream(relay_state.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(relay_state) + .register_event_handler(relay_handle_event); + + let event_stream_id = builder.get_stream_id().unwrap(); + + self.relay_task_map + .insert(vote.get_view_number(), ViewSyncTaskInfo { event_stream_id }); + let _view_sync_relay_task = async_spawn(async move { + ViewSyncRelayTaskStateTypes::build(builder).launch().await + }); + } + + HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { + if let Some(relay_task) = self.relay_task_map.get(&vote.get_view_number()) { + // Forward event then return + self.event_stream + .direct_message(relay_task.event_stream_id, event) + .await; + return; + } + + // We do not have a relay task already running, so start one + + if !self + .exchange + .is_leader(vote.get_view_number() + vote.get_data().relay) + { + // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` + debug!("View sync vote sent to wrong leader"); + return; + } + + let new_accumulator = VoteAccumulator2 { + vote_outcomes: HashMap::new(), + sig_lists: Vec::new(), + signers: bitvec![0; self.exchange.total_nodes()], + phantom: PhantomData, + }; + + let mut relay_state = ViewSyncRelayTaskState::>, ViewSyncFinalizeCertificate2> + { + event_stream: self.event_stream.clone(), + exchange: self.exchange.clone(), + membership: self.exchange.membership().clone(), + accumulator: either::Left(new_accumulator), + id: self.id, + }; + + let result = relay_state.handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } + + relay_state = result.1; + + let name = format!("View Sync Relay Task for view {:?}", vote.get_view_number()); + + let relay_handle_event = HandleEvent(Arc::new( + move |event, + state: ViewSyncRelayTaskState< + TYPES, + I, + ViewSyncFinalizeVote>, + ViewSyncFinalizeCertificate2, + >| { + async move { state.handle_event(event).await }.boxed() + }, + )); + + let filter = FilterEvent::default(); + let builder = TaskBuilder::< + ViewSyncRelayTaskStateTypes< + TYPES, + I, + ViewSyncFinalizeVote>, + ViewSyncFinalizeCertificate2, + >, + >::new(name) + .register_event_stream(relay_state.event_stream.clone(), filter) + .await + .register_registry(&mut self.registry.clone()) + .await + .register_state(relay_state) + .register_event_handler(relay_handle_event); + + let event_stream_id = builder.get_stream_id().unwrap(); + + self.relay_task_map + .insert(vote.get_view_number(), ViewSyncTaskInfo { event_stream_id }); + let _view_sync_relay_task = async_spawn(async move { + ViewSyncRelayTaskStateTypes::build(builder).launch().await + }); + } + &HotShotEvent::ViewChange(new_view) => { let new_view = TYPES::Time::new(*new_view); if self.current_view < new_view { From 9996221be358ff688387ed3bea44b6789c96dd4f Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 13 Nov 2023 23:58:16 -0500 Subject: [PATCH 0366/1393] Compiling, need to replace commented out logic --- hotshot/src/lib.rs | 3 - hotshot/src/tasks/mod.rs | 3 - task-impls/src/events.rs | 20 +- task-impls/src/network.rs | 104 +++- task-impls/src/view_sync.rs | 495 +++++++++---------- testing/tests/view_sync_task.rs | 40 +- types/src/message.rs | 101 +++- types/src/traits/election.rs | 620 +++++++++++------------- types/src/traits/node_implementation.rs | 8 +- 9 files changed, 701 insertions(+), 693 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 89203d9074..347613d21d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -646,9 +646,6 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, - Certificate = ViewSyncCertificate, - Commitment = Commitment>, Membership = MEMBERSHIP, > + 'static, VIDEx: ConsensusExchange< diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b8eba956f8..f7147039a1 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -521,9 +521,6 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Proposal = ViewSyncCertificate, - Certificate = ViewSyncCertificate, - Commitment = Commitment>, >, { let api = HotShotConsensusApi { diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index d679ae6f5d..8d0f8afdd3 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -12,7 +12,7 @@ use hotshot_types::{ simple_vote::{DAVote2, QuorumVote, TimeoutVote2, VIDVote2, ViewSyncPreCommitVote, ViewSyncCommitVote, ViewSyncFinalizeVote}, traits::node_implementation::{ CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, - VIDMembership, ViewSyncProposalType, ViewSyncMembership, + VIDMembership, ViewSyncMembership, }, vote::ViewSyncVote, }; @@ -53,20 +53,20 @@ pub enum HotShotEvent> { /// Timeout for the view sync protocol; emitted by a replica in the view sync task ViewSyncTimeout(TYPES::Time, u64, ViewSyncPhase), /// Send a view sync vote to the network; emitted by a replica in the view sync task - ViewSyncVoteSend(ViewSyncVote), + // ViewSyncVoteSend(ViewSyncVote), // TODO ED Remove this event /// Send a view sync certificate to the network; emitted by a relay in the view sync task - ViewSyncCertificateSend( - Proposal>, - TYPES::SignatureKey, - ), + // ViewSyncCertificateSend( + // Proposal>, + // TYPES::SignatureKey, + // ), // TODO ED Remove this in favor of separate votes for each view sync vote type /// Receive a view sync vote from the network; received by a relay in the view sync task ViewSyncVoteRecv(ViewSyncVote), /// Receive a view sync certificate from the network; received by a replica in the view sync task // TODO ED Remove this event in favor of separate events depending on which certificate type it is. - ViewSyncCertificateRecv(Proposal>), + // ViewSyncCertificateRecv(Proposal>), /// Receive a `ViewSyncPreCommitVote` from the network; received by a relay in the view sync task ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote>), @@ -94,11 +94,11 @@ pub enum HotShotEvent> { ViewSyncFinalizeCertificate2Recv(ViewSyncFinalizeCertificate2), /// Send a `ViewSyncPreCommitCertificate2` from the network; emitted by a relay in the view sync task - ViewSyncPreCommitCertificate2Send(ViewSyncPreCommitCertificate2), + ViewSyncPreCommitCertificate2Send(ViewSyncPreCommitCertificate2, TYPES::SignatureKey), /// Send a `ViewSyncCommitCertificate2` from the network; emitted by a relay in the view sync task - ViewSyncCommitCertificate2Send(ViewSyncCommitCertificate2), + ViewSyncCommitCertificate2Send(ViewSyncCommitCertificate2, TYPES::SignatureKey), /// Send a `ViewSyncFinalizeCertificate2` from the network; emitted by a relay in the view sync task - ViewSyncFinalizeCertificate2Send(ViewSyncFinalizeCertificate2), + ViewSyncFinalizeCertificate2Send(ViewSyncFinalizeCertificate2, TYPES::SignatureKey), /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only ViewSyncTrigger(TYPES::Time), diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 80d1c7d341..87e73e860a 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -82,12 +82,27 @@ impl< GeneralConsensusMessage::Vote(vote) => { HotShotEvent::QuorumVoteRecv(vote.clone()) } - GeneralConsensusMessage::ViewSyncVote(view_sync_message) => { - HotShotEvent::ViewSyncVoteRecv(view_sync_message) + GeneralConsensusMessage::ViewSyncPreCommitVote(view_sync_message) => { + HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message) } - GeneralConsensusMessage::ViewSyncCertificate(view_sync_message) => { - HotShotEvent::ViewSyncCertificateRecv(view_sync_message) + GeneralConsensusMessage::ViewSyncPreCommitCertificate( + view_sync_message, + ) => HotShotEvent::ViewSyncPreCommitCertificate2Recv(view_sync_message), + + GeneralConsensusMessage::ViewSyncCommitVote(view_sync_message) => { + HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message) + } + GeneralConsensusMessage::ViewSyncCommitCertificate( + view_sync_message, + ) => HotShotEvent::ViewSyncCommitCertificate2Recv(view_sync_message), + + GeneralConsensusMessage::ViewSyncFinalizeVote(view_sync_message) => { + HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message) } + GeneralConsensusMessage::ViewSyncFinalizeCertificate( + view_sync_message, + ) => HotShotEvent::ViewSyncFinalizeCertificate2Recv(view_sync_message), + GeneralConsensusMessage::TimeoutVote(message) => { HotShotEvent::TimeoutVoteRecv(message) } @@ -262,25 +277,75 @@ impl< TransmitType::Broadcast, None, ), - HotShotEvent::ViewSyncCertificateSend(certificate_proposal, sender) => ( + HotShotEvent::ViewSyncPreCommitVoteSend(vote) => ( + vote.get_signing_key(), + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.get_view_number() + vote.get_data().relay)), + ), + HotShotEvent::ViewSyncCommitVoteSend(vote) => ( + vote.get_signing_key(), + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.get_view_number() + vote.get_data().relay)), + ), + HotShotEvent::ViewSyncFinalizeVoteSend(vote) => ( + vote.get_signing_key(), + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.get_view_number() + vote.get_data().relay)), + ), + HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate.clone()), + ))), + TransmitType::Broadcast, + None, + ), + HotShotEvent::ViewSyncCommitCertificate2Send(certificate, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::ViewSyncCertificate(certificate_proposal), + GeneralConsensusMessage::ViewSyncCommitCertificate(certificate.clone()), ))), TransmitType::Broadcast, None, ), - HotShotEvent::ViewSyncVoteSend(vote) => { - // error!("Sending view sync vote in network task to relay with index: {:?}", vote.round() + vote.relay()); - ( - vote.signature_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::ViewSyncVote(vote.clone()), - ))), - TransmitType::Direct, - Some(membership.get_leader(vote.round() + vote.relay())), - ) - } + + HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate.clone()), + ))), + TransmitType::Broadcast, + None, + ), + + // HotShotEvent::ViewSyncCertificateSend(certificate_proposal, sender) => ( + // sender, + // MessageKind::::from_consensus_message(SequencingMessage(Left( + // GeneralConsensusMessage::ViewSyncCertificate(certificate_proposal), + // ))), + // TransmitType::Broadcast, + // None, + // ), + // HotShotEvent::ViewSyncVoteSend(vote) => { + // // error!("Sending view sync vote in network task to relay with index: {:?}", vote.round() + vote.relay()); + // ( + // vote.signature_key(), + // MessageKind::::from_consensus_message(SequencingMessage(Left( + // GeneralConsensusMessage::ViewSyncVote(vote.clone()), + // ))), + // TransmitType::Direct, + // Some(membership.get_leader(vote.round() + vote.relay())), + // ) + // } HotShotEvent::TimeoutVoteSend(vote) => ( vote.get_signing_key(), MessageKind::::from_consensus_message(SequencingMessage(Left( @@ -372,11 +437,12 @@ impl< } /// view sync filter + // TODO ED Add new events here fn view_sync_filter(event: &HotShotEvent) -> bool { matches!( event, - HotShotEvent::ViewSyncVoteSend(_) - | HotShotEvent::ViewSyncCertificateSend(_, _) + // HotShotEvent::ViewSyncVoteSend(_) + // | HotShotEvent::ViewSyncCertificateSend(_, _) | HotShotEvent::Shutdown | HotShotEvent::ViewChange(_) ) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 5fd7f99a91..534fd87a05 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -10,8 +10,12 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, }; use hotshot_types::{ - simple_certificate::{ViewSyncPreCommitCertificate2, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2}, - simple_vote::{ViewSyncPreCommitVote, ViewSyncCommitVote, ViewSyncFinalizeVote}, + simple_certificate::{ + ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + }, + simple_vote::{ + ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitData, ViewSyncPreCommitVote, + }, traits::{ election::Membership, network::ConsensusIntentEvent, node_implementation::ViewSyncMembership, @@ -73,7 +77,7 @@ pub struct ViewSyncTaskState< TYPES, Message, // TODO ED Remove this when exchanges is done, but we don't actually use this commitment type anymore. - Commitment = Commitment>, + // Commitment = Commitment>, // Membership = ViewSyncMembership >, { @@ -121,7 +125,7 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Commitment = Commitment>, + // Commitment = Commitment>, >, { } @@ -143,7 +147,7 @@ pub struct ViewSyncReplicaTaskState< ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Commitment = Commitment>, + // Commitment = Commitment>, >, { /// Timeout for view sync rounds @@ -184,7 +188,7 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - Commitment = Commitment>, + // Commitment = Commitment>, >, { } @@ -251,13 +255,7 @@ impl< A: ConsensusApi, I> + 'static + std::clone::Clone, > ViewSyncTaskState where - ViewSyncEx: ViewSyncExchangeType< - TYPES, - Message, - Proposal = ViewSyncCertificate, - Certificate = ViewSyncCertificate, - Commitment = Commitment>, - >, + ViewSyncEx: ViewSyncExchangeType>, { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] /// Handles incoming events for the main view sync task @@ -368,8 +366,12 @@ where phantom: PhantomData, }; - let mut relay_state = ViewSyncRelayTaskState::>, ViewSyncPreCommitCertificate2> - { + let mut relay_state = ViewSyncRelayTaskState::< + TYPES, + I, + ViewSyncPreCommitVote>, + ViewSyncPreCommitCertificate2, + > { event_stream: self.event_stream.clone(), exchange: self.exchange.clone(), membership: self.exchange.membership().clone(), @@ -452,8 +454,12 @@ where phantom: PhantomData, }; - let mut relay_state = ViewSyncRelayTaskState::>, ViewSyncCommitCertificate2> - { + let mut relay_state = ViewSyncRelayTaskState::< + TYPES, + I, + ViewSyncCommitVote>, + ViewSyncCommitCertificate2, + > { event_stream: self.event_stream.clone(), exchange: self.exchange.clone(), membership: self.exchange.membership().clone(), @@ -536,8 +542,12 @@ where phantom: PhantomData, }; - let mut relay_state = ViewSyncRelayTaskState::>, ViewSyncFinalizeCertificate2> - { + let mut relay_state = ViewSyncRelayTaskState::< + TYPES, + I, + ViewSyncFinalizeVote>, + ViewSyncFinalizeCertificate2, + > { event_stream: self.event_stream.clone(), exchange: self.exchange.clone(), membership: self.exchange.membership().clone(), @@ -762,10 +772,10 @@ where /// Filter view sync related events. pub fn filter(event: &HotShotEvent) -> bool { + // TODO ED Add new events matches!( event, - HotShotEvent::ViewSyncCertificateRecv(_) - | HotShotEvent::ViewSyncVoteRecv(_) + | HotShotEvent::Shutdown | HotShotEvent::Timeout(_) | HotShotEvent::ViewSyncTimeout(_, _, _) @@ -784,13 +794,7 @@ impl< A: ConsensusApi, I> + 'static, > ViewSyncReplicaTaskState where - ViewSyncEx: ViewSyncExchangeType< - TYPES, - Message, - Proposal = ViewSyncCertificate, - Certificate = ViewSyncCertificate, - Commitment = Commitment>, - >, + ViewSyncEx: ViewSyncExchangeType>, { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task @@ -802,21 +806,23 @@ where ViewSyncReplicaTaskState, ) { match event { - HotShotEvent::ViewSyncCertificateRecv(message) => { - let (certificate_internal, last_seen_certificate) = match message.data.clone() { - ViewSyncCertificate::PreCommit(certificate_internal) => { - (certificate_internal, ViewSyncPhase::PreCommit) - } - ViewSyncCertificate::Commit(certificate_internal) => { - (certificate_internal, ViewSyncPhase::Commit) - } - ViewSyncCertificate::Finalize(certificate_internal) => { - (certificate_internal, ViewSyncPhase::Finalize) - } - }; + HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { + // let (certificate_internal, last_seen_certificate) = match message.data.clone() { + // ViewSyncCertificate::PreCommit(certificate_internal) => { + // (certificate_internal, ViewSyncPhase::PreCommit) + // } + // ViewSyncCertificate::Commit(certificate_internal) => { + // (certificate_internal, ViewSyncPhase::Commit) + // } + // ViewSyncCertificate::Finalize(certificate_internal) => { + // (certificate_internal, ViewSyncPhase::Finalize) + // } + // }; + + let last_seen_certificate = ViewSyncPhase::PreCommit; // Ignore certificate if it is for an older round - if certificate_internal.round < self.next_view { + if certificate.get_view_number() < self.next_view { error!("We're already in a higher round"); return (None, self); @@ -824,19 +830,19 @@ where let relay_key = self .exchange - .get_leader(certificate_internal.round + certificate_internal.relay); + .get_leader(certificate.get_view_number() + certificate.get_data().relay); - if !relay_key.validate(&message.signature, message.data.commit().as_ref()) { - error!("Key does not validate for certificate sender"); - return (None, self); - } + // if !relay_key.validate( + // &certificate.signature, + // certificate.get_data_commitment().as_ref(), + // ) { + // error!("Key does not validate for certificate sender"); + // return (None, self); + // } // If certificate is not valid, return current state - if !self - .exchange - .is_valid_view_sync_cert(message.data.clone(), certificate_internal.round) - { - error!("Not valid view sync cert! {:?}", message.data); + if !certificate.is_valid_cert(self.exchange.membership()) { + error!("Not valid view sync cert! {:?}", certificate.get_data()); return (None, self); } @@ -845,7 +851,7 @@ where // since another task should have been started for the higher round // TODO ED Perhaps in the future this should return an error giving more // context - if certificate_internal.round > self.next_view { + if certificate.get_view_number() > self.next_view { return (Some(HotShotTaskCompleted::ShutDown), self); } @@ -882,101 +888,52 @@ where return ((Some(HotShotTaskCompleted::ShutDown)), self); } - if certificate_internal.relay > self.relay { - self.relay = certificate_internal.relay; + if certificate.get_data().relay > self.relay { + self.relay = certificate.get_data().relay; } // TODO ED Assuming that nodes must have stake for the view they are voting to enter - let maybe_vote_token = self - .exchange - .membership() - .make_vote_token(self.next_view, self.exchange.private_key()); - - match maybe_vote_token { - Ok(Some(vote_token)) => { - let message = match self.phase { - ViewSyncPhase::None => unimplemented!(), - ViewSyncPhase::PreCommit => self.exchange.create_commit_message::( - self.next_view, - self.relay, - vote_token.clone(), - ), - ViewSyncPhase::Commit => self.exchange.create_finalize_message::( - self.next_view, - self.relay, - vote_token.clone(), - ), - // Should never hit this - ViewSyncPhase::Finalize => unimplemented!(), - }; - - if let GeneralConsensusMessage::ViewSyncVote(vote) = message { - // error!("Sending vs vote {:?}", vote.clone()); - - self.event_stream - .publish(HotShotEvent::ViewSyncVoteSend(vote)) - .await; - } - - // Send to the first relay after sending to k_th relay - if self.relay > 0 { - let message = match self.phase { - ViewSyncPhase::None => unimplemented!(), - ViewSyncPhase::PreCommit => { - self.exchange.create_precommit_message::( - self.next_view, - 0, - vote_token.clone(), - ) - } - ViewSyncPhase::Commit => self.exchange.create_commit_message::( - self.next_view, - 0, - vote_token.clone(), - ), - ViewSyncPhase::Finalize => unimplemented!(), - }; - // error!("Sending vs vote {:?}", message.clone()); - if let GeneralConsensusMessage::ViewSyncVote(vote) = message { - // error!("Sending vs vote {:?}", vote.clone()); - - self.event_stream - .publish(HotShotEvent::ViewSyncVoteSend(vote)) - .await; - } - } - - // TODO ED Add event to shutdown this task if a view is completed - async_spawn({ - let stream = self.event_stream.clone(); - let phase = self.phase.clone(); - async move { - async_sleep(self.view_sync_timeout).await; - error!("Vote sending timed out in ViewSyncCertificateRecv"); - stream - .publish(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*self.next_view), - self.relay, - phase, - )) - .await; - } - }); + // let maybe_vote_token = self + // .exchange + // .membership() + // .make_vote_token(self.next_view, self.exchange.private_key()); + + let vote = + ViewSyncPreCommitVote::>::create_signed_vote( + ViewSyncPreCommitData { relay: certificate.get_data().relay, round: self.next_view}, + self.next_view, + self.exchange.public_key(), + self.exchange.private_key(), + ); + let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); - return (None, self); - } - Ok(None) => { - debug!( - "We were not chosen for committee on view {}", - *self.next_view - ); - return (None, self); - } - Err(_) => { - error!("Problem generating vote token"); - return (None, self); - } + if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { + debug!( + "Sending vote to next quorum leader {:?}", + vote.get_view_number() + 1 + ); + self.event_stream + .publish(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) + .await; } + // TODO ED Send to first relay + + // TODO ED Add event to shutdown this task if a view is completed + async_spawn({ + let stream = self.event_stream.clone(); + let phase = self.phase.clone(); + async move { + async_sleep(self.view_sync_timeout).await; + error!("Vote sending timed out in ViewSyncCertificateRecv"); + stream + .publish(HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*self.next_view), + self.relay, + phase, + )) + .await; + } + }); } HotShotEvent::ViewSyncTrigger(view_number) => { @@ -984,57 +941,59 @@ where error!("Unexpected view number to triger view sync"); return (None, self); } - let maybe_vote_token = self - .exchange - .membership() - .make_vote_token(self.next_view, self.exchange.private_key()); - - match maybe_vote_token { - Ok(Some(vote_token)) => { - let message = self.exchange.create_precommit_message::( - self.next_view, - self.relay, - vote_token.clone(), - ); - - if let GeneralConsensusMessage::ViewSyncVote(vote) = message { - debug!( - "Sending precommit vote to start protocol for next view = {}", - *vote.round() - ); - // error!("Sending vs vote {:?}", vote.clone()); - - self.event_stream - .publish(HotShotEvent::ViewSyncVoteSend(vote)) - .await; - } - - // TODO ED Add event to shutdown this task - async_spawn({ - let stream = self.event_stream.clone(); - async move { - async_sleep(self.view_sync_timeout).await; - error!("Vote sending timed out in ViewSyncTrigger"); - stream - .publish(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*self.next_view), - self.relay, - ViewSyncPhase::None, - )) - .await; - } - }); - return (None, self); - } - Ok(None) => { - debug!("We were not chosen for committee on view {}", *view_number); - return (None, self); - } - Err(_) => { - error!("Problem generating vote token"); - return (None, self); - } - } + // let maybe_vote_token = self + // .exchange + // .membership() + // .make_vote_token(self.next_view, self.exchange.private_key()); + + // match maybe_vote_token { + // Ok(Some(vote_token)) => { + // let message = self.exchange.create_precommit_message::( + // self.next_view, + // self.relay, + // vote_token.clone(), + // ); + + // if let GeneralConsensusMessage::ViewSyncVote(vote) = message { + // debug!( + // "Sending precommit vote to start protocol for next view = {}", + // *vote.round() + // ); + // // error!("Sending vs vote {:?}", vote.clone()); + + // self.event_stream + // .publish(HotShotEvent::ViewSyncVoteSend(vote)) + // .await; + // } + + // // TODO ED Add event to shutdown this task + // async_spawn({ + // let stream = self.event_stream.clone(); + // async move { + // async_sleep(self.view_sync_timeout).await; + // error!("Vote sending timed out in ViewSyncTrigger"); + // stream + // .publish(HotShotEvent::ViewSyncTimeout( + // TYPES::Time::new(*self.next_view), + // self.relay, + // ViewSyncPhase::None, + // )) + // .await; + // } + // }); + // return (None, self); + // } + // Ok(None) => { + // debug!("We were not chosen for committee on view {}", *view_number); + // return (None, self); + // } + // Err(_) => { + // error!("Problem generating vote token"); + // return (None, self); + // } + // } + // TODO ED + return (None, self); } HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { @@ -1043,62 +1002,64 @@ where && relay == self.relay && last_seen_certificate == self.phase { - let maybe_vote_token = self - .exchange - .membership() - .make_vote_token(self.next_view, self.exchange.private_key()); - - match maybe_vote_token { - Ok(Some(vote_token)) => { - self.relay += 1; - let message = match self.phase { - ViewSyncPhase::None => self.exchange.create_precommit_message::( - self.next_view, - self.relay, - vote_token.clone(), - ), - ViewSyncPhase::PreCommit => { - self.exchange.create_commit_message::( - self.next_view, - self.relay, - vote_token.clone(), - ) - } - ViewSyncPhase::Commit => { - self.exchange.create_finalize_message::( - self.next_view, - self.relay, - vote_token.clone(), - ) - } - ViewSyncPhase::Finalize => unimplemented!(), - }; - - if let GeneralConsensusMessage::ViewSyncVote(vote) = message { - self.event_stream - .publish(HotShotEvent::ViewSyncVoteSend(vote)) - .await; - } - - // TODO ED Add event to shutdown this task - async_spawn({ - let stream = self.event_stream.clone(); - async move { - async_sleep(self.view_sync_timeout).await; - error!("Vote sending timed out in ViewSyncTimeout"); - stream - .publish(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*self.next_view), - self.relay, - last_seen_certificate, - )) - .await; - } - }); - return (None, self); - } - Ok(None) | Err(_) => return (None, self), - } + // let maybe_vote_token = self + // .exchange + // .membership() + // .make_vote_token(self.next_view, self.exchange.private_key()); + + // match maybe_vote_token { + // Ok(Some(vote_token)) => { + // self.relay += 1; + // let message = match self.phase { + // ViewSyncPhase::None => self.exchange.create_precommit_message::( + // self.next_view, + // self.relay, + // vote_token.clone(), + // ), + // ViewSyncPhase::PreCommit => { + // self.exchange.create_commit_message::( + // self.next_view, + // self.relay, + // vote_token.clone(), + // ) + // } + // ViewSyncPhase::Commit => { + // self.exchange.create_finalize_message::( + // self.next_view, + // self.relay, + // vote_token.clone(), + // ) + // } + // ViewSyncPhase::Finalize => unimplemented!(), + // }; + + // if let GeneralConsensusMessage::ViewSyncVote(vote) = message { + // self.event_stream + // .publish(HotShotEvent::ViewSyncVoteSend(vote)) + // .await; + // } + + // // TODO ED Add event to shutdown this task + // async_spawn({ + // let stream = self.event_stream.clone(); + // async move { + // async_sleep(self.view_sync_timeout).await; + // error!("Vote sending timed out in ViewSyncTimeout"); + // stream + // .publish(HotShotEvent::ViewSyncTimeout( + // TYPES::Time::new(*self.next_view), + // self.relay, + // last_seen_certificate, + // )) + // .await; + // } + // }); + // return (None, self); + // } + // Ok(None) | Err(_) => return (None, self), + // } + // TODO ED + return (None, self); } } _ => return (None, self), @@ -1122,13 +1083,7 @@ impl< ViewSyncPreCommitCertificate2, > where - ViewSyncEx: ViewSyncExchangeType< - TYPES, - Message, - Proposal = ViewSyncCertificate, - Certificate = ViewSyncCertificate, - Commitment = Commitment>, - >, + ViewSyncEx: ViewSyncExchangeType>, { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] @@ -1172,7 +1127,7 @@ where Right(certificate) => { self.event_stream .publish(HotShotEvent::ViewSyncPreCommitCertificate2Send( - certificate.clone(), + certificate.clone(), self.exchange.public_key().clone() )) .await; self.accumulator = Right(certificate); @@ -1205,13 +1160,7 @@ impl< ViewSyncCommitCertificate2, > where - ViewSyncEx: ViewSyncExchangeType< - TYPES, - Message, - Proposal = ViewSyncCertificate, - Certificate = ViewSyncCertificate, - Commitment = Commitment>, - >, + ViewSyncEx: ViewSyncExchangeType>, { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] @@ -1230,7 +1179,6 @@ where match event { HotShotEvent::ViewSyncCommitVoteRecv(vote) => { // Ignore this vote if we are not the correct relay - // TODO ED Replace exchange with membership if !self .exchange .is_leader(vote.get_data().round + vote.get_data().relay) @@ -1240,7 +1188,7 @@ where } debug!( - "Accumulating ViewSyncPreCommitVote for round {} and relay {}", + "Accumulating ViewSyncCommitVote for round {} and relay {}", *vote.get_data().round, vote.get_data().relay ); @@ -1255,7 +1203,7 @@ where Right(certificate) => { self.event_stream .publish(HotShotEvent::ViewSyncCommitCertificate2Send( - certificate.clone(), + certificate.clone(), self.exchange.public_key().clone() )) .await; self.accumulator = Right(certificate); @@ -1288,13 +1236,7 @@ impl< ViewSyncFinalizeCertificate2, > where - ViewSyncEx: ViewSyncExchangeType< - TYPES, - Message, - Proposal = ViewSyncCertificate, - Certificate = ViewSyncCertificate, - Commitment = Commitment>, - >, + ViewSyncEx: ViewSyncExchangeType>, { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] @@ -1323,7 +1265,7 @@ where } debug!( - "Accumulating ViewSyncPreCommitVote for round {} and relay {}", + "Accumulating ViewSyncFinalizetVote for round {} and relay {}", *vote.get_data().round, vote.get_data().relay ); @@ -1338,7 +1280,7 @@ where Right(certificate) => { self.event_stream .publish(HotShotEvent::ViewSyncFinalizeCertificate2Send( - certificate.clone(), + certificate.clone(), self.exchange.public_key().clone() )) .await; self.accumulator = Right(certificate); @@ -1355,4 +1297,3 @@ where } } } - diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 2e131c8703..0cfd6a2c2b 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -33,31 +33,31 @@ async fn test_view_sync_task() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - // Build the API for node 3. + // Build the API for node 5. let handle = build_system_handle(5).await.0; let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; let view_sync_exchange = api.inner.exchanges.view_sync_exchange().clone(); let relay_pub_key = api.public_key().to_bytes(); - let vote_token = view_sync_exchange - .make_vote_token(ViewNumber::new(5)) - .unwrap_or_else(|_| panic!("Error making vote token")) - .unwrap_or_else(|| panic!("Not chosen for the committee")); - let vote_data_internal: ViewSyncData = ViewSyncData { - relay: relay_pub_key.clone(), - round: ViewNumber::new(5), - }; - let vote_data_internal_commitment = vote_data_internal.commit(); - let signature = view_sync_exchange.sign_precommit_message(vote_data_internal_commitment); - let vote = ViewSyncVote::PreCommit(ViewSyncVoteInternal { - relay_pub_key, - relay: 0, - round: ViewNumber::new(5), - signature, - vote_token, - vote_data: VoteData::ViewSyncPreCommit(vote_data_internal_commitment), - }); + // let vote_token = view_sync_exchange + // .make_vote_token(ViewNumber::new(5)) + // .unwrap_or_else(|_| panic!("Error making vote token")) + // .unwrap_or_else(|| panic!("Not chosen for the committee")); + // let vote_data_internal: ViewSyncData = ViewSyncData { + // relay: relay_pub_key.clone(), + // round: ViewNumber::new(5), + // }; + // let vote_data_internal_commitment = vote_data_internal.commit(); + // let signature = view_sync_exchange.sign_precommit_message(vote_data_internal_commitment); + // let vote = ViewSyncVote::PreCommit(ViewSyncVoteInternal { + // relay_pub_key, + // relay: 0, + // round: ViewNumber::new(5), + // signature, + // vote_token, + // vote_data: VoteData::ViewSyncPreCommit(vote_data_internal_commitment), + // }); // Every event input is seen on the event stream in the output. let mut input = Vec::new(); @@ -73,7 +73,7 @@ async fn test_view_sync_task() { output.insert(HotShotEvent::Timeout(ViewNumber::new(3)), 1); output.insert(HotShotEvent::Timeout(ViewNumber::new(4)), 1); - output.insert(HotShotEvent::ViewSyncVoteSend(vote.clone()), 1); + // output.insert(HotShotEvent::ViewSyncVoteSend(vote.clone()), 1); output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::ViewChange(ViewNumber::new(3)), 1); diff --git a/types/src/message.rs b/types/src/message.rs index 0ef9ed8590..9a9b767e3c 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -3,9 +3,15 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. -use crate::simple_certificate::{DACertificate2, VIDCertificate2}; -use crate::simple_vote::{DAVote2, TimeoutVote2, VIDVote2}; -use crate::traits::node_implementation::CommitteeMembership; +use crate::simple_certificate::{ + DACertificate2, VIDCertificate2, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, + ViewSyncPreCommitCertificate2, +}; +use crate::simple_vote::{ + DAVote2, TimeoutVote2, VIDVote2, ViewSyncCommitVote, ViewSyncFinalizeVote, + ViewSyncPreCommitVote, +}; +use crate::traits::node_implementation::{CommitteeMembership, ViewSyncMembership}; use crate::vote2::HasViewNumber; use crate::{ data::{DAProposal, ProposalType, VidDisperse}, @@ -14,7 +20,7 @@ use crate::{ network::{NetworkMsg, ViewMessage}, node_implementation::{ ExchangesType, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, - VIDMembership, ViewSyncProposalType, + VIDMembership, }, signature_key::EncodedSignature, }, @@ -158,9 +164,9 @@ where TYPES::SignatureKey, ), /// Message with a view sync vote. - ViewSyncVote(ViewSyncVote), - /// Message with a view sync certificate. - ViewSyncCertificate(Proposal>), + // ViewSyncVote(ViewSyncVote), + // /// Message with a view sync certificate. + // ViewSyncCertificate(Proposal>), /// Internal ONLY message indicating a view interrupt. #[serde(skip)] InternalTrigger(InternalTrigger), @@ -179,13 +185,12 @@ where ProcessedGeneralConsensusMessage::Vote(v, _) => GeneralConsensusMessage::Vote(v), ProcessedGeneralConsensusMessage::InternalTrigger(a) => { GeneralConsensusMessage::InternalTrigger(a) - } - ProcessedGeneralConsensusMessage::ViewSyncCertificate(certificate) => { - GeneralConsensusMessage::ViewSyncCertificate(certificate) - } - ProcessedGeneralConsensusMessage::ViewSyncVote(vote) => { - GeneralConsensusMessage::ViewSyncVote(vote) - } + } // ProcessedGeneralConsensusMessage::ViewSyncCertificate(certificate) => { + // GeneralConsensusMessage::ViewSyncCertificate(certificate) + // } + // ProcessedGeneralConsensusMessage::ViewSyncVote(vote) => { + // GeneralConsensusMessage::ViewSyncVote(vote) + // } } } } @@ -206,9 +211,16 @@ where GeneralConsensusMessage::InternalTrigger(a) => { ProcessedGeneralConsensusMessage::InternalTrigger(a) } - GeneralConsensusMessage::ViewSyncVote(_) - | GeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), + // GeneralConsensusMessage::ViewSyncVote(_) + // | GeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), + // ED NOTE These are deprecated anyway GeneralConsensusMessage::TimeoutVote(_) => todo!(), + GeneralConsensusMessage::ViewSyncPreCommitVote(_) => todo!(), + GeneralConsensusMessage::ViewSyncCommitVote(_) => todo!(), + GeneralConsensusMessage::ViewSyncFinalizeVote(_) => todo!(), + GeneralConsensusMessage::ViewSyncPreCommitCertificate(_) => todo!(), + GeneralConsensusMessage::ViewSyncCommitCertificate(_) => todo!(), + GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => todo!(), } } } @@ -332,12 +344,22 @@ where /// Message with a quorum vote. Vote(QuorumVote>), - /// Message with a view sync vote. - ViewSyncVote(ViewSyncVote), + ViewSyncPreCommitVote(ViewSyncPreCommitVote>), + ViewSyncCommitVote(ViewSyncCommitVote>), + + ViewSyncFinalizeVote(ViewSyncFinalizeVote>), + + ViewSyncPreCommitCertificate(ViewSyncPreCommitCertificate2), + + ViewSyncCommitCertificate(ViewSyncCommitCertificate2), - /// Message with a view sync certificate. - ViewSyncCertificate(Proposal>), + ViewSyncFinalizeCertificate(ViewSyncFinalizeCertificate2), + // /// Message with a view sync vote. + // ViewSyncVote(ViewSyncVote), + + // /// Message with a view sync certificate. + // ViewSyncCertificate(Proposal>), /// Message with a Timeout vote TimeoutVote(TimeoutVote2>), @@ -429,11 +451,29 @@ impl< GeneralConsensusMessage::InternalTrigger(trigger) => match trigger { InternalTrigger::Timeout(time) => *time, }, - GeneralConsensusMessage::ViewSyncVote(message) => message.round(), - GeneralConsensusMessage::ViewSyncCertificate(message) => { - message.data.get_view_number() - } + // GeneralConsensusMessage::ViewSyncVote(message) => message.round(), + // GeneralConsensusMessage::ViewSyncCertificate(message) => { + // message.data.get_view_number() + // } GeneralConsensusMessage::TimeoutVote(message) => message.get_view_number(), + GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { + message.get_view_number() + } + GeneralConsensusMessage::ViewSyncCommitVote(message) => { + message.get_view_number() + } + GeneralConsensusMessage::ViewSyncFinalizeVote(message) => { + message.get_view_number() + } + GeneralConsensusMessage::ViewSyncPreCommitCertificate(message) => { + message.get_view_number() + } + GeneralConsensusMessage::ViewSyncCommitCertificate(message) => { + message.get_view_number() + } + GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { + message.get_view_number() + } } } Right(committee_message) => { @@ -467,8 +507,17 @@ impl< MessagePurpose::Vote } GeneralConsensusMessage::InternalTrigger(_) => MessagePurpose::Internal, - GeneralConsensusMessage::ViewSyncVote(_) => MessagePurpose::ViewSyncVote, - GeneralConsensusMessage::ViewSyncCertificate(_) => MessagePurpose::ViewSyncProposal, + GeneralConsensusMessage::ViewSyncPreCommitVote(_) + | GeneralConsensusMessage::ViewSyncCommitVote(_) + | GeneralConsensusMessage::ViewSyncFinalizeVote(_) => MessagePurpose::ViewSyncVote, + + GeneralConsensusMessage::ViewSyncPreCommitCertificate(_) + | GeneralConsensusMessage::ViewSyncCommitCertificate(_) + | GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => { + MessagePurpose::ViewSyncProposal + } + // GeneralConsensusMessage::ViewSyncVote(_) => MessagePurpose::ViewSyncVote, + // GeneralConsensusMessage::ViewSyncCertificate(_) => MessagePurpose::ViewSyncProposal, }, Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 642673b571..2a5d09f9ee 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -687,143 +687,100 @@ impl< pub trait ViewSyncExchangeType: ConsensusExchange { - /// A vote on a [`Proposal`](Self::Proposal). - // TODO ED Make this equal Certificate vote (if possible?) - type Vote: VoteType; - /// A [`SignedCertificate`] attesting to a decision taken by the committee. - type Certificate: SignedCertificate - + Hash - + Eq; - /// Creates a precommit vote - fn create_precommit_message>( - &self, - round: TYPES::Time, - relay: u64, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage; - - /// Signs a precommit vote - fn sign_precommit_message( - &self, - commitment: Commitment>, - ) -> (EncodedPublicKey, EncodedSignature); - - /// Creates a commit vote - fn create_commit_message>( - &self, - round: TYPES::Time, - relay: u64, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage; - - /// Signs a commit vote - fn sign_commit_message( - &self, - commitment: Commitment>, - ) -> (EncodedPublicKey, EncodedSignature); - - /// Creates a finalize vote - fn create_finalize_message>( - &self, - round: TYPES::Time, - relay: u64, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage; - - /// Sings a finalize vote - fn sign_finalize_message( - &self, - commitment: Commitment>, - ) -> (EncodedPublicKey, EncodedSignature); - - /// Validate a certificate. - fn is_valid_view_sync_cert(&self, certificate: Self::Certificate, round: TYPES::Time) -> bool; - - /// Sign a certificate. - fn sign_certificate_proposal(&self, certificate: Self::Certificate) -> EncodedSignature; - - // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the `SignedCertificate` - // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. - /// Accumulate vote - /// Returns either the accumulate if no threshold was reached, or a `SignedCertificate` if the threshold was reached - #[allow(clippy::type_complexity)] - fn accumulate_vote( - &self, - accumulator: ViewSyncVoteAccumulator, - vote: &<>::Certificate as SignedCertificate< - TYPES, - TYPES::Time, - TYPES::VoteTokenType, - Self::Commitment, - >>::Vote, - _commit: &Self::Commitment, - ) -> Either, Self::Certificate> - where - >::Certificate: SignedCertificate< - TYPES, - TYPES::Time, - TYPES::VoteTokenType, - Self::Commitment, - Vote = ViewSyncVote, - >, - { - if !self.is_valid_vote( - &vote.get_key(), - &vote.get_signature(), - &vote.get_data(), - &Checked::Unchecked(vote.get_vote_token()), - ) { - error!("Vote data is {:?}", vote.get_data()); - error!("Invalid vote!"); - return Either::Left(accumulator); - } - - let stake_table_entry = vote.get_key().get_stake_table_entry(1u64); - // TODO ED Could we make this part of the vote in the future? It's only a usize. - let append_node_id = self - .membership() - .get_committee_qc_stake_table() - .iter() - .position(|x| *x == stake_table_entry.clone()) - .unwrap(); - - // TODO ED Should make append function take a reference to vote - match accumulator.append( - vote, - append_node_id, - self.membership().get_committee_qc_stake_table(), - ) { - Either::Left(accumulator) => Either::Left(accumulator), - Either::Right(signatures) => Either::Right(Self::Certificate::create_certificate( - signatures, - vote.clone(), - )), - } - } - - /// Validate a vote by checking its signature and token. - fn is_valid_vote( - &self, - key: &TYPES::SignatureKey, - encoded_signature: &EncodedSignature, - data: &VoteData>>, - vote_token: &Checked, - ) -> bool { - let is_valid_signature = key.validate(encoded_signature, data.commit().as_ref()); - let valid_vote_token = self - .membership() - .validate_vote_token(key.clone(), vote_token.clone()); - let is_valid_vote_token = match valid_vote_token { - Err(_) => { - error!("Vote token was invalid"); - false - } - Ok(Checked::Valid(_)) => true, - Ok(Checked::Inval(_) | Checked::Unchecked(_)) => false, - }; - - is_valid_signature && is_valid_vote_token - } + + // /// A vote on a [`Proposal`](Self::Proposal). + // // TODO ED Make this equal Certificate vote (if possible?) + // type Vote: VoteType; + // /// A [`SignedCertificate`] attesting to a decision taken by the committee. + // type Certificate: SignedCertificate + // + Hash + // + Eq; +// /// Creates a precommit vote +// fn create_precommit_message>( +// &self, +// round: TYPES::Time, +// relay: u64, +// vote_token: TYPES::VoteTokenType, +// ) -> GeneralConsensusMessage; + +// /// Signs a precommit vote +// fn sign_precommit_message( +// &self, +// commitment: Commitment>, +// ) -> (EncodedPublicKey, EncodedSignature); + +// /// Creates a commit vote +// fn create_commit_message>( +// &self, +// round: TYPES::Time, +// relay: u64, +// vote_token: TYPES::VoteTokenType, +// ) -> GeneralConsensusMessage; + +// /// Signs a commit vote +// fn sign_commit_message( +// &self, +// commitment: Commitment>, +// ) -> (EncodedPublicKey, EncodedSignature); + +// /// Creates a finalize vote +// fn create_finalize_message>( +// &self, +// round: TYPES::Time, +// relay: u64, +// vote_token: TYPES::VoteTokenType, +// ) -> GeneralConsensusMessage; + +// /// Sings a finalize vote +// fn sign_finalize_message( +// &self, +// commitment: Commitment>, +// ) -> (EncodedPublicKey, EncodedSignature); + +// /// Validate a certificate. +// fn is_valid_view_sync_cert(&self, certificate: Self::Certificate, round: TYPES::Time) -> bool; + +// /// Sign a certificate. +// fn sign_certificate_proposal(&self, certificate: Self::Certificate) -> EncodedSignature; + +// // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the `SignedCertificate` +// // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. +// /// Accumulate vote +// /// Returns either the accumulate if no threshold was reached, or a `SignedCertificate` if the threshold was reached +// #[allow(clippy::type_complexity)] +// fn accumulate_vote( +// &self, +// accumulator: ViewSyncVoteAccumulator, +// vote: &<>::Certificate as SignedCertificate< +// TYPES, +// TYPES::Time, +// TYPES::VoteTokenType, +// Self::Commitment, +// >>::Vote, +// _commit: &Self::Commitment, +// ) -> Either, Self::Certificate> +// where +// >::Certificate: SignedCertificate< +// TYPES, +// TYPES::Time, +// TYPES::VoteTokenType, +// Self::Commitment, +// Vote = ViewSyncVote, +// >, +// { + +// } + +// /// Validate a vote by checking its signature and token. +// fn is_valid_vote( +// &self, +// key: &TYPES::SignatureKey, +// encoded_signature: &EncodedSignature, +// data: &VoteData>>, +// vote_token: &Checked, +// ) -> bool { +// false +// } } /// Standard implementation of [`ViewSyncExchangeType`] based on Hot Stuff consensus. @@ -859,203 +816,204 @@ impl< M: NetworkMsg, > ViewSyncExchangeType for ViewSyncExchange { - type Vote = ViewSyncVote; - - type Certificate = ViewSyncCertificate; - - fn create_precommit_message>( - &self, - round: TYPES::Time, - relay: u64, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage { - let relay_pub_key = self.get_leader(round + relay).to_bytes(); - - let vote_data_internal: ViewSyncData = ViewSyncData { - relay: relay_pub_key.clone(), - round, - }; - - let vote_data_internal_commitment = vote_data_internal.commit(); + // type Vote = ViewSyncVote; - let signature = self.sign_precommit_message(vote_data_internal_commitment); - - GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::PreCommit( - ViewSyncVoteInternal { - relay_pub_key, - relay, - round, - signature, - vote_token, - vote_data: VoteData::ViewSyncPreCommit(vote_data_internal_commitment), - }, - )) - } - - fn sign_precommit_message( - &self, - commitment: Commitment>, - ) -> (EncodedPublicKey, EncodedSignature) { - let signature = TYPES::SignatureKey::sign( - &self.private_key, - VoteData::ViewSyncPreCommit(commitment).commit().as_ref(), - ); - - (self.public_key.to_bytes(), signature) - } - - fn create_commit_message>( - &self, - round: TYPES::Time, - relay: u64, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage { - let relay_pub_key = self.get_leader(round + relay).to_bytes(); - - let vote_data_internal: ViewSyncData = ViewSyncData { - relay: relay_pub_key.clone(), - round, - }; - - let vote_data_internal_commitment = vote_data_internal.commit(); - - let signature = self.sign_commit_message(vote_data_internal_commitment); - - GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::Commit( - ViewSyncVoteInternal { - relay_pub_key, - relay, - round, - signature, - vote_token, - vote_data: VoteData::ViewSyncCommit(vote_data_internal_commitment), - }, - )) - } - - fn sign_commit_message( - &self, - commitment: Commitment>, - ) -> (EncodedPublicKey, EncodedSignature) { - let signature = TYPES::SignatureKey::sign( - &self.private_key, - VoteData::ViewSyncCommit(commitment).commit().as_ref(), - ); - - (self.public_key.to_bytes(), signature) - } - - fn create_finalize_message>( - &self, - round: TYPES::Time, - relay: u64, - vote_token: TYPES::VoteTokenType, - ) -> GeneralConsensusMessage { - let relay_pub_key = self.get_leader(round + relay).to_bytes(); - - let vote_data_internal: ViewSyncData = ViewSyncData { - relay: relay_pub_key.clone(), - round, - }; - - let vote_data_internal_commitment = vote_data_internal.commit(); - - let signature = self.sign_finalize_message(vote_data_internal_commitment); - - GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::Finalize( - ViewSyncVoteInternal { - relay_pub_key, - relay, - round, - signature, - vote_token, - vote_data: VoteData::ViewSyncFinalize(vote_data_internal_commitment), - }, - )) - } - - fn sign_finalize_message( - &self, - commitment: Commitment>, - ) -> (EncodedPublicKey, EncodedSignature) { - let signature = TYPES::SignatureKey::sign( - &self.private_key, - VoteData::ViewSyncFinalize(commitment).commit().as_ref(), - ); - - (self.public_key.to_bytes(), signature) - } - - fn is_valid_view_sync_cert(&self, certificate: Self::Certificate, round: TYPES::Time) -> bool { - // Sishan NOTE TODO: would be better to test this, looks like this func is never called. - let (certificate_internal, _threshold, vote_data) = match certificate.clone() { - ViewSyncCertificate::PreCommit(certificate_internal) => { - let vote_data = ViewSyncData:: { - relay: self - .get_leader(round + certificate_internal.relay) - .to_bytes(), - round, - }; - (certificate_internal, self.failure_threshold(), vote_data) - } - ViewSyncCertificate::Commit(certificate_internal) - | ViewSyncCertificate::Finalize(certificate_internal) => { - let vote_data = ViewSyncData:: { - relay: self - .get_leader(round + certificate_internal.relay) - .to_bytes(), - round, - }; - (certificate_internal, self.success_threshold(), vote_data) - } - }; - match certificate_internal.signatures { - AssembledSignature::ViewSyncPreCommit(raw_signatures) => { - let real_commit = VoteData::ViewSyncPreCommit(vote_data.commit()).commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().failure_threshold().get()), - ); - ::check( - &real_qc_pp, - real_commit.as_ref(), - &raw_signatures, - ) - } - AssembledSignature::ViewSyncCommit(raw_signatures) => { - let real_commit = VoteData::ViewSyncCommit(vote_data.commit()).commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().success_threshold().get()), - ); - ::check( - &real_qc_pp, - real_commit.as_ref(), - &raw_signatures, - ) - } - AssembledSignature::ViewSyncFinalize(raw_signatures) => { - let real_commit = VoteData::ViewSyncFinalize(vote_data.commit()).commit(); - let real_qc_pp = ::get_public_parameter( - self.membership().get_committee_qc_stake_table(), - U256::from(self.membership().success_threshold().get()), - ); - ::check( - &real_qc_pp, - real_commit.as_ref(), - &raw_signatures, - ) - } - _ => true, - } - } - - fn sign_certificate_proposal(&self, certificate: Self::Certificate) -> EncodedSignature { - let signature = TYPES::SignatureKey::sign(&self.private_key, certificate.commit().as_ref()); - signature - } + // type Certificate = ViewSyncCertificate; } +// fn create_precommit_message>( +// &self, +// round: TYPES::Time, +// relay: u64, +// vote_token: TYPES::VoteTokenType, +// ) -> GeneralConsensusMessage { +// let relay_pub_key = self.get_leader(round + relay).to_bytes(); + +// let vote_data_internal: ViewSyncData = ViewSyncData { +// relay: relay_pub_key.clone(), +// round, +// }; + +// let vote_data_internal_commitment = vote_data_internal.commit(); + +// let signature = self.sign_precommit_message(vote_data_internal_commitment); + +// GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::PreCommit( +// ViewSyncVoteInternal { +// relay_pub_key, +// relay, +// round, +// signature, +// vote_token, +// vote_data: VoteData::ViewSyncPreCommit(vote_data_internal_commitment), +// }, +// )) +// } + +// fn sign_precommit_message( +// &self, +// commitment: Commitment>, +// ) -> (EncodedPublicKey, EncodedSignature) { +// let signature = TYPES::SignatureKey::sign( +// &self.private_key, +// VoteData::ViewSyncPreCommit(commitment).commit().as_ref(), +// ); + +// (self.public_key.to_bytes(), signature) +// } + +// fn create_commit_message>( +// &self, +// round: TYPES::Time, +// relay: u64, +// vote_token: TYPES::VoteTokenType, +// ) -> GeneralConsensusMessage { +// let relay_pub_key = self.get_leader(round + relay).to_bytes(); + +// let vote_data_internal: ViewSyncData = ViewSyncData { +// relay: relay_pub_key.clone(), +// round, +// }; + +// let vote_data_internal_commitment = vote_data_internal.commit(); + +// let signature = self.sign_commit_message(vote_data_internal_commitment); + +// GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::Commit( +// ViewSyncVoteInternal { +// relay_pub_key, +// relay, +// round, +// signature, +// vote_token, +// vote_data: VoteData::ViewSyncCommit(vote_data_internal_commitment), +// }, +// )) +// } + +// fn sign_commit_message( +// &self, +// commitment: Commitment>, +// ) -> (EncodedPublicKey, EncodedSignature) { +// let signature = TYPES::SignatureKey::sign( +// &self.private_key, +// VoteData::ViewSyncCommit(commitment).commit().as_ref(), +// ); + +// (self.public_key.to_bytes(), signature) +// } + +// fn create_finalize_message>( +// &self, +// round: TYPES::Time, +// relay: u64, +// vote_token: TYPES::VoteTokenType, +// ) -> GeneralConsensusMessage { +// let relay_pub_key = self.get_leader(round + relay).to_bytes(); + +// let vote_data_internal: ViewSyncData = ViewSyncData { +// relay: relay_pub_key.clone(), +// round, +// }; + +// let vote_data_internal_commitment = vote_data_internal.commit(); + +// let signature = self.sign_finalize_message(vote_data_internal_commitment); + +// GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::Finalize( +// ViewSyncVoteInternal { +// relay_pub_key, +// relay, +// round, +// signature, +// vote_token, +// vote_data: VoteData::ViewSyncFinalize(vote_data_internal_commitment), +// }, +// )) +// } + +// fn sign_finalize_message( +// &self, +// commitment: Commitment>, +// ) -> (EncodedPublicKey, EncodedSignature) { +// let signature = TYPES::SignatureKey::sign( +// &self.private_key, +// VoteData::ViewSyncFinalize(commitment).commit().as_ref(), +// ); + +// (self.public_key.to_bytes(), signature) +// } + +// fn is_valid_view_sync_cert(&self, certificate: Self::Certificate, round: TYPES::Time) -> bool { +// // Sishan NOTE TODO: would be better to test this, looks like this func is never called. +// let (certificate_internal, _threshold, vote_data) = match certificate.clone() { +// ViewSyncCertificate::PreCommit(certificate_internal) => { +// let vote_data = ViewSyncData:: { +// relay: self +// .get_leader(round + certificate_internal.relay) +// .to_bytes(), +// round, +// }; +// (certificate_internal, self.failure_threshold(), vote_data) +// } +// ViewSyncCertificate::Commit(certificate_internal) +// | ViewSyncCertificate::Finalize(certificate_internal) => { +// let vote_data = ViewSyncData:: { +// relay: self +// .get_leader(round + certificate_internal.relay) +// .to_bytes(), +// round, +// }; +// (certificate_internal, self.success_threshold(), vote_data) +// } +// }; +// match certificate_internal.signatures { +// AssembledSignature::ViewSyncPreCommit(raw_signatures) => { +// let real_commit = VoteData::ViewSyncPreCommit(vote_data.commit()).commit(); +// let real_qc_pp = ::get_public_parameter( +// self.membership().get_committee_qc_stake_table(), +// U256::from(self.membership().failure_threshold().get()), +// ); +// ::check( +// &real_qc_pp, +// real_commit.as_ref(), +// &raw_signatures, +// ) +// } +// AssembledSignature::ViewSyncCommit(raw_signatures) => { +// let real_commit = VoteData::ViewSyncCommit(vote_data.commit()).commit(); +// let real_qc_pp = ::get_public_parameter( +// self.membership().get_committee_qc_stake_table(), +// U256::from(self.membership().success_threshold().get()), +// ); +// ::check( +// &real_qc_pp, +// real_commit.as_ref(), +// &raw_signatures, +// ) +// } +// AssembledSignature::ViewSyncFinalize(raw_signatures) => { +// let real_commit = VoteData::ViewSyncFinalize(vote_data.commit()).commit(); +// let real_qc_pp = ::get_public_parameter( +// self.membership().get_committee_qc_stake_table(), +// U256::from(self.membership().success_threshold().get()), +// ); +// ::check( +// &real_qc_pp, +// real_commit.as_ref(), +// &raw_signatures, +// ) +// } +// _ => true, +// } +// } + +// fn sign_certificate_proposal(&self, certificate: Self::Certificate) -> EncodedSignature { +// let signature = TYPES::SignatureKey::sign(&self.private_key, certificate.commit().as_ref()); +// signature +// } +// } + impl< TYPES: NodeType, PROPOSAL: ProposalType, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 042c1a6a7f..439316987c 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -588,12 +588,12 @@ pub type CommitteeProposalType = as ConsensusExchange>>::Proposal; /// A proposal to sync the view. -pub type ViewSyncProposalType = - as ConsensusExchange>>::Proposal; +// pub type ViewSyncProposalType = +// as ConsensusExchange>>::Proposal; /// A vote on a [`ViewSyncProposal`]. -pub type ViewSyncVoteType = - as ViewSyncExchangeType>>::Vote; +// pub type ViewSyncVoteType = +// as ViewSyncExchangeType>>::Vote; /// Communication channel for [`QuorumProposalType`] and [`QuorumVote`]. pub type QuorumCommChannel = From 9123c91c255f530b4bc21adc2a302fede242404b Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 14 Nov 2023 00:04:58 -0500 Subject: [PATCH 0367/1393] view sync task test compiles --- testing/tests/view_sync_task.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 0cfd6a2c2b..a7d4ef508d 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -59,6 +59,11 @@ async fn test_view_sync_task() { // vote_data: VoteData::ViewSyncPreCommit(vote_data_internal_commitment), // }); + // let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote:: { signature: todo!(), data: todo!(), view_number: todo!(), _pd: std::marker::PhantomData }; + + + + let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote(todo!(), todo!(), todo!(), todo!()); // Every event input is seen on the event stream in the output. let mut input = Vec::new(); let mut output = HashMap::new(); From 52fdf728c9d1fc0b8a3b55054f677c330631c4dd Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 14 Nov 2023 07:52:41 -0500 Subject: [PATCH 0368/1393] updated view sync unit test --- testing/tests/view_sync_task.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index a7d4ef508d..5f766a5fe1 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -27,7 +27,7 @@ async fn test_view_sync_task() { use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::{ traits::election::VoteData, - vote::{ViewSyncData, ViewSyncVote, ViewSyncVoteInternal}, + vote::{ViewSyncData, ViewSyncVote, ViewSyncVoteInternal}, simple_vote::ViewSyncPreCommitData, }; async_compatibility_layer::logging::setup_logging(); @@ -62,8 +62,11 @@ async fn test_view_sync_task() { // let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote:: { signature: todo!(), data: todo!(), view_number: todo!(), _pd: std::marker::PhantomData }; - - let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote(todo!(), todo!(), todo!(), todo!()); + let vote_data = ViewSyncPreCommitData { + relay: 0, + round: ::Time::new(5) + }; + let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote(vote_data, ::Time::new(5), view_sync_exchange.public_key(), view_sync_exchange.private_key()); // Every event input is seen on the event stream in the output. let mut input = Vec::new(); let mut output = HashMap::new(); From 51caf9787651352ecf0c4acf1e17c5b70606747b Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 14 Nov 2023 08:40:54 -0500 Subject: [PATCH 0369/1393] View sync unit tests work --- task-impls/src/network.rs | 20 +++++++++------ task-impls/src/view_sync.rs | 44 +++++++++++++++++++++++++++------ testing/tests/view_sync_task.rs | 9 ++++++- 3 files changed, 56 insertions(+), 17 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 87e73e860a..a434153b08 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -437,15 +437,19 @@ impl< } /// view sync filter - // TODO ED Add new events here fn view_sync_filter(event: &HotShotEvent) -> bool { - matches!( - event, - // HotShotEvent::ViewSyncVoteSend(_) - // | HotShotEvent::ViewSyncCertificateSend(_, _) - | HotShotEvent::Shutdown - | HotShotEvent::ViewChange(_) - ) + // matches!( + // event, + // HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) + // | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) + // | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) + // | HotShotEvent::ViewSyncPreCommitVoteSend(_) + // | HotShotEvent::ViewSyncCommitVoteSend(_) + // | HotShotEvent::ViewSyncFinalizeVoteSend(_) + // | HotShotEvent::Shutdown + // | HotShotEvent::ViewChange(_) + // ) + true } } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 534fd87a05..18be22f5ce 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -14,7 +14,7 @@ use hotshot_types::{ ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitData, ViewSyncPreCommitVote, + ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitData, ViewSyncPreCommitVote, ViewSyncCommitData, }, traits::{ election::Membership, network::ConsensusIntentEvent, @@ -735,7 +735,7 @@ where }, )); - let filter = FilterEvent(Arc::new(Self::filter)); + let filter = FilterEvent::default(); let builder = TaskBuilder::>::new(name) .register_event_stream(replica_state.event_stream.clone(), filter) @@ -772,10 +772,16 @@ where /// Filter view sync related events. pub fn filter(event: &HotShotEvent) -> bool { - // TODO ED Add new events matches!( event, + HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) + + | HotShotEvent::ViewSyncPreCommitVoteRecv(_) + | HotShotEvent::ViewSyncCommitVoteRecv(_) + | HotShotEvent::ViewSyncFinalizeVoteRecv(_) | HotShotEvent::Shutdown | HotShotEvent::Timeout(_) | HotShotEvent::ViewSyncTimeout(_, _, _) @@ -899,21 +905,21 @@ where // .make_vote_token(self.next_view, self.exchange.private_key()); let vote = - ViewSyncPreCommitVote::>::create_signed_vote( - ViewSyncPreCommitData { relay: certificate.get_data().relay, round: self.next_view}, + ViewSyncCommitVote::>::create_signed_vote( + ViewSyncCommitData { relay: certificate.get_data().relay, round: self.next_view}, self.next_view, self.exchange.public_key(), self.exchange.private_key(), ); - let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); + let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); - if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { + if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { debug!( "Sending vote to next quorum leader {:?}", vote.get_view_number() + 1 ); self.event_stream - .publish(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) + .publish(HotShotEvent::ViewSyncCommitVoteSend(vote)) .await; } // TODO ED Send to first relay @@ -937,10 +943,32 @@ where } HotShotEvent::ViewSyncTrigger(view_number) => { + // TODO ED Check self . next view for correctness if self.next_view != TYPES::Time::new(*view_number) { error!("Unexpected view number to triger view sync"); return (None, self); } + + + let vote = + ViewSyncPreCommitVote::>::create_signed_vote( + ViewSyncPreCommitData { relay: 0, round: view_number}, + view_number, + self.exchange.public_key(), + self.exchange.private_key(), + ); + error!("Vote in task is {:?}", vote.clone()); + let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); + + if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { + error!("Triggering next view! {}", *view_number); + + self.event_stream + .publish(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) + .await; + } + error!("Triggering next view! {}", *view_number); + // let maybe_vote_token = self // .exchange // .membership() diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 5f766a5fe1..bdef870d7b 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -67,6 +67,11 @@ async fn test_view_sync_task() { round: ::Time::new(5) }; let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote(vote_data, ::Time::new(5), view_sync_exchange.public_key(), view_sync_exchange.private_key()); + + tracing::error!("Vote in test is {:?}", vote.clone()); + + + // Every event input is seen on the event stream in the output. let mut input = Vec::new(); let mut output = HashMap::new(); @@ -81,9 +86,11 @@ async fn test_view_sync_task() { output.insert(HotShotEvent::Timeout(ViewNumber::new(3)), 1); output.insert(HotShotEvent::Timeout(ViewNumber::new(4)), 1); - // output.insert(HotShotEvent::ViewSyncVoteSend(vote.clone()), 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::ViewChange(ViewNumber::new(3)), 1); + output.insert(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone()), 1); + output.insert(HotShotEvent::Shutdown, 1); From 0c9b6fde043f020dea9f77a6d3f7cebf9b535207 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 14 Nov 2023 09:40:30 -0500 Subject: [PATCH 0370/1393] Fix lints --- hotshot/src/lib.rs | 9 +- hotshot/src/tasks/mod.rs | 7 +- task-impls/src/events.rs | 11 +- task-impls/src/network.rs | 2 +- task-impls/src/view_sync.rs | 462 +++++++++++++++++--------------- testing/tests/view_sync_task.rs | 55 ++-- types/src/message.rs | 13 +- types/src/simple_certificate.rs | 17 +- types/src/simple_vote.rs | 5 +- types/src/traits/election.rs | 184 ++++++------- 10 files changed, 371 insertions(+), 394 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 347613d21d..c233dd8972 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -58,7 +58,6 @@ use hotshot_types::{ use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, - certificate::ViewSyncCertificate, consensus::{BlockPayloadStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, data::{DAProposal, Leaf, LeafType, QuorumProposal}, error::StorageSnafu, @@ -79,7 +78,6 @@ use hotshot_types::{ storage::StoredView, State, }, - vote::ViewSyncData, HotShotConfig, }; use snafu::ResultExt; @@ -643,11 +641,8 @@ where Commitment = Commitment, Membership = MEMBERSHIP, > + 'static, - ViewSyncEx: ViewSyncExchangeType< - TYPES, - Message, - Membership = MEMBERSHIP, - > + 'static, + ViewSyncEx: + ViewSyncExchangeType, Membership = MEMBERSHIP> + 'static, VIDEx: ConsensusExchange< TYPES, Message, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index f7147039a1..947254635d 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -26,7 +26,6 @@ use hotshot_task_impls::{ }; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, - certificate::ViewSyncCertificate, data::{Leaf, ProposalType, QuorumProposal}, event::Event, message::{Message, Messages, SequencingMessage}, @@ -39,7 +38,6 @@ use hotshot_types::{ }, state::ConsensusTime, }, - vote::ViewSyncData, }; use std::{ collections::{HashMap, HashSet}, @@ -518,10 +516,7 @@ pub async fn add_view_sync_task< handle: SystemContextHandle, ) -> TaskRunner where - ViewSyncEx: ViewSyncExchangeType< - TYPES, - Message, - >, + ViewSyncEx: ViewSyncExchangeType>, { let api = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 8d0f8afdd3..f7a5186f6a 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -9,10 +9,13 @@ use hotshot_types::{ DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, - simple_vote::{DAVote2, QuorumVote, TimeoutVote2, VIDVote2, ViewSyncPreCommitVote, ViewSyncCommitVote, ViewSyncFinalizeVote}, + simple_vote::{ + DAVote2, QuorumVote, TimeoutVote2, VIDVote2, ViewSyncCommitVote, ViewSyncFinalizeVote, + ViewSyncPreCommitVote, + }, traits::node_implementation::{ CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, - VIDMembership, ViewSyncMembership, + VIDMembership, ViewSyncMembership, }, vote::ViewSyncVote, }; @@ -75,8 +78,6 @@ pub enum HotShotEvent> { /// Receive a `ViewSyncFinalizeVote` from the network; received by a relay in the view sync task ViewSyncFinalizeVoteRecv(ViewSyncFinalizeVote>), - - /// Send a `ViewSyncPreCommitVote` from the network; emitted by a replica in the view sync task ViewSyncPreCommitVoteSend(ViewSyncPreCommitVote>), /// Send a `ViewSyncCommitVote` from the network; emitted by a replica in the view sync task @@ -84,8 +85,6 @@ pub enum HotShotEvent> { /// Send a `ViewSyncFinalizeVote` from the network; emitted by a replica in the view sync task ViewSyncFinalizeVoteSend(ViewSyncFinalizeVote>), - - /// Receive a `ViewSyncPreCommitCertificate2` from the network; received by a replica in the view sync task ViewSyncPreCommitCertificate2Recv(ViewSyncPreCommitCertificate2), /// Receive a `ViewSyncCommitCertificate2` from the network; received by a replica in the view sync task diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a434153b08..307d533c35 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -437,7 +437,7 @@ impl< } /// view sync filter - fn view_sync_filter(event: &HotShotEvent) -> bool { + fn view_sync_filter(_event: &HotShotEvent) -> bool { // matches!( // event, // HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 18be22f5ce..3669dfe857 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1,7 +1,7 @@ #![allow(clippy::module_name_repetitions)] use crate::events::HotShotEvent; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use commit::{Commitment, Committable}; + use either::Either::{self, Left, Right}; use futures::FutureExt; use hotshot_task::{ @@ -9,36 +9,31 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::simple_vote::ViewSyncFinalizeData; use hotshot_types::{ simple_certificate::{ ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitData, ViewSyncPreCommitVote, ViewSyncCommitData, - }, - traits::{ - election::Membership, network::ConsensusIntentEvent, - node_implementation::ViewSyncMembership, + ViewSyncCommitData, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitData, + ViewSyncPreCommitVote, }, - vote::ViewSyncVoteAccumulator, + traits::{network::ConsensusIntentEvent, node_implementation::ViewSyncMembership}, vote2::{Certificate2, HasViewNumber, Vote2, VoteAccumulator2}, }; use bitvec::prelude::*; use hotshot_task::global_registry::GlobalRegistry; use hotshot_types::{ - certificate::ViewSyncCertificate, data::Leaf, - message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, + message::{GeneralConsensusMessage, Message, SequencingMessage}, traits::{ consensus_api::ConsensusApi, election::{ConsensusExchange, ViewSyncExchangeType}, network::CommunicationChannel, node_implementation::{NodeImplementation, NodeType, ViewSyncEx}, - signature_key::SignatureKey, state::ConsensusTime, }, - vote::{ViewSyncData, ViewSyncVote}, }; use snafu::Snafu; use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; @@ -144,11 +139,7 @@ pub struct ViewSyncReplicaTaskState< I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static, > where - ViewSyncEx: ViewSyncExchangeType< - TYPES, - Message, - // Commitment = Commitment>, - >, + ViewSyncEx: ViewSyncExchangeType>, { /// Timeout for view sync rounds pub view_sync_timeout: Duration, @@ -185,11 +176,7 @@ impl< A: ConsensusApi, I> + 'static, > TS for ViewSyncReplicaTaskState where - ViewSyncEx: ViewSyncExchangeType< - TYPES, - Message, - // Commitment = Commitment>, - >, + ViewSyncEx: ViewSyncExchangeType>, { } @@ -213,7 +200,6 @@ pub struct ViewSyncRelayTaskState< /// View sync exchange pub exchange: Arc>, - pub membership: ViewSyncMembership, /// Vote accumulator #[allow(clippy::type_complexity)] pub accumulator: Either, CERTIFICATE>, @@ -258,6 +244,7 @@ where ViewSyncEx: ViewSyncExchangeType>, { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] + #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task pub async fn handle_event(&mut self, event: HotShotEvent) { match &event { @@ -349,7 +336,6 @@ where } // We do not have a relay task already running, so start one - if !self .exchange .is_leader(vote.get_view_number() + vote.get_data().relay) @@ -374,7 +360,6 @@ where > { event_stream: self.event_stream.clone(), exchange: self.exchange.clone(), - membership: self.exchange.membership().clone(), accumulator: either::Left(new_accumulator), id: self.id, }; @@ -437,7 +422,6 @@ where } // We do not have a relay task already running, so start one - if !self .exchange .is_leader(vote.get_view_number() + vote.get_data().relay) @@ -462,7 +446,6 @@ where > { event_stream: self.event_stream.clone(), exchange: self.exchange.clone(), - membership: self.exchange.membership().clone(), accumulator: either::Left(new_accumulator), id: self.id, }; @@ -525,7 +508,6 @@ where } // We do not have a relay task already running, so start one - if !self .exchange .is_leader(vote.get_view_number() + vote.get_data().relay) @@ -550,7 +532,6 @@ where > { event_stream: self.event_stream.clone(), exchange: self.exchange.clone(), - membership: self.exchange.membership().clone(), accumulator: either::Left(new_accumulator), id: self.id, }; @@ -659,7 +640,6 @@ where error!("Too many timeouts! This shouldn't happen"); } - // TODO ED Make this a configurable variable if self.num_timeouts_tracked > 2 { // Start polling for view sync certificates self.exchange @@ -675,7 +655,6 @@ where *view_number + 1, )) .await; - // panic!("Starting view sync!"); // Spawn replica task let next_view = *view_number + 1; // Subscribe to the view after we are leader since we know we won't propose in the next view if we are leader. @@ -711,7 +690,6 @@ where id: self.id, }; - // TODO ED Make all these view numbers into a single variable to avoid errors let result = replica_state .handle_event(HotShotEvent::ViewSyncTrigger(view_number + 1)) .await; @@ -774,11 +752,9 @@ where pub fn filter(event: &HotShotEvent) -> bool { matches!( event, - HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) + HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) | HotShotEvent::ViewSyncCommitCertificate2Recv(_) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - - | HotShotEvent::ViewSyncPreCommitVoteRecv(_) | HotShotEvent::ViewSyncCommitVoteRecv(_) | HotShotEvent::ViewSyncFinalizeVoteRecv(_) @@ -813,18 +789,6 @@ where ) { match event { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { - // let (certificate_internal, last_seen_certificate) = match message.data.clone() { - // ViewSyncCertificate::PreCommit(certificate_internal) => { - // (certificate_internal, ViewSyncPhase::PreCommit) - // } - // ViewSyncCertificate::Commit(certificate_internal) => { - // (certificate_internal, ViewSyncPhase::Commit) - // } - // ViewSyncCertificate::Finalize(certificate_internal) => { - // (certificate_internal, ViewSyncPhase::Finalize) - // } - // }; - let last_seen_certificate = ViewSyncPhase::PreCommit; // Ignore certificate if it is for an older round @@ -834,18 +798,6 @@ where return (None, self); } - let relay_key = self - .exchange - .get_leader(certificate.get_view_number() + certificate.get_data().relay); - - // if !relay_key.validate( - // &certificate.signature, - // certificate.get_data_commitment().as_ref(), - // ) { - // error!("Key does not validate for certificate sender"); - // return (None, self); - // } - // If certificate is not valid, return current state if !certificate.is_valid_cert(self.exchange.membership()) { error!("Not valid view sync cert! {:?}", certificate.get_data()); @@ -855,8 +807,6 @@ where // If certificate is for a higher round shutdown this task // since another task should have been started for the higher round - // TODO ED Perhaps in the future this should return an error giving more - // context if certificate.get_view_number() > self.next_view { return (Some(HotShotTaskCompleted::ShutDown), self); } @@ -868,63 +818,100 @@ where self.phase = last_seen_certificate; - // Send ViewChange event if necessary - if self.phase >= ViewSyncPhase::Commit && !self.sent_view_change_event { - error!("VIEW SYNC UPDATING VIEW TO {}", *self.next_view); + if certificate.get_data().relay > self.relay { + self.relay = certificate.get_data().relay; + } + + let vote = + ViewSyncCommitVote::>::create_signed_vote( + ViewSyncCommitData { + relay: certificate.get_data().relay, + round: self.next_view, + }, + self.next_view, + self.exchange.public_key(), + self.exchange.private_key(), + ); + let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); + + if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { self.event_stream - .publish(HotShotEvent::ViewChange(TYPES::Time::new(*self.next_view))) + .publish(HotShotEvent::ViewSyncCommitVoteSend(vote)) .await; - self.sent_view_change_event = true; } - // The protocol has ended - if self.phase == ViewSyncPhase::Finalize { - self.exchange - .network() - .inject_consensus_info( - ConsensusIntentEvent::CancelPollForViewSyncCertificate(*self.next_view), - ) - .await; - self.exchange - .network() - .inject_consensus_info(ConsensusIntentEvent::CancelPollForViewSyncVotes( - *self.next_view, - )) - .await; - return ((Some(HotShotTaskCompleted::ShutDown)), self); + async_spawn({ + let stream = self.event_stream.clone(); + let phase = self.phase.clone(); + async move { + async_sleep(self.view_sync_timeout).await; + error!("Vote sending timed out in ViewSyncCertificateRecv"); + stream + .publish(HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*self.next_view), + self.relay, + phase, + )) + .await; + } + }); + } + + HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { + let last_seen_certificate = ViewSyncPhase::Commit; + + // Ignore certificate if it is for an older round + if certificate.get_view_number() < self.next_view { + error!("We're already in a higher round"); + + return (None, self); + } + + // If certificate is not valid, return current state + if !certificate.is_valid_cert(self.exchange.membership()) { + error!("Not valid view sync cert! {:?}", certificate.get_data()); + + return (None, self); + } + + // If certificate is for a higher round shutdown this task + // since another task should have been started for the higher round + if certificate.get_view_number() > self.next_view { + return (Some(HotShotTaskCompleted::ShutDown), self); + } + + // Ignore if the certificate is for an already seen phase + if last_seen_certificate <= self.phase { + return (None, self); } + self.phase = last_seen_certificate; + if certificate.get_data().relay > self.relay { self.relay = certificate.get_data().relay; } - // TODO ED Assuming that nodes must have stake for the view they are voting to enter - // let maybe_vote_token = self - // .exchange - // .membership() - // .make_vote_token(self.next_view, self.exchange.private_key()); - let vote = - ViewSyncCommitVote::>::create_signed_vote( - ViewSyncCommitData { relay: certificate.get_data().relay, round: self.next_view}, - self.next_view, - self.exchange.public_key(), - self.exchange.private_key(), - ); - let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); - - if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.get_view_number() + 1 + ViewSyncFinalizeVote::>::create_signed_vote( + ViewSyncFinalizeData { + relay: certificate.get_data().relay, + round: self.next_view, + }, + self.next_view, + self.exchange.public_key(), + self.exchange.private_key(), ); + let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); + + if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { self.event_stream - .publish(HotShotEvent::ViewSyncCommitVoteSend(vote)) + .publish(HotShotEvent::ViewSyncFinalizeVoteSend(vote)) .await; } - // TODO ED Send to first relay + self.event_stream + .publish(HotShotEvent::ViewChange(self.next_view)) + .await; - // TODO ED Add event to shutdown this task if a view is completed async_spawn({ let stream = self.event_stream.clone(); let phase = self.phase.clone(); @@ -942,13 +929,51 @@ where }); } + HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + let last_seen_certificate = ViewSyncPhase::Finalize; + + // Ignore certificate if it is for an older round + if certificate.get_view_number() < self.next_view { + error!("We're already in a higher round"); + + return (None, self); + } + + // If certificate is not valid, return current state + if !certificate.is_valid_cert(self.exchange.membership()) { + error!("Not valid view sync cert! {:?}", certificate.get_data()); + + return (None, self); + } + + // If certificate is for a higher round shutdown this task + // since another task should have been started for the higher round + if certificate.get_view_number() > self.next_view { + return (Some(HotShotTaskCompleted::ShutDown), self); + } + + // Ignore if the certificate is for an already seen phase + if last_seen_certificate <= self.phase { + return (None, self); + } + + self.phase = last_seen_certificate; + + if certificate.get_data().relay > self.relay { + self.relay = certificate.get_data().relay; + } + + self.event_stream + .publish(HotShotEvent::ViewChange(self.next_view)) + .await; + return (Some(HotShotTaskCompleted::ShutDown), self); + } + HotShotEvent::ViewSyncTrigger(view_number) => { - // TODO ED Check self . next view for correctness if self.next_view != TYPES::Time::new(*view_number) { error!("Unexpected view number to triger view sync"); return (None, self); } - let vote = ViewSyncPreCommitVote::>::create_signed_vote( @@ -957,70 +982,29 @@ where self.exchange.public_key(), self.exchange.private_key(), ); - error!("Vote in task is {:?}", vote.clone()); let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { - error!("Triggering next view! {}", *view_number); - self.event_stream .publish(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) .await; } - error!("Triggering next view! {}", *view_number); - - // let maybe_vote_token = self - // .exchange - // .membership() - // .make_vote_token(self.next_view, self.exchange.private_key()); - - // match maybe_vote_token { - // Ok(Some(vote_token)) => { - // let message = self.exchange.create_precommit_message::( - // self.next_view, - // self.relay, - // vote_token.clone(), - // ); - - // if let GeneralConsensusMessage::ViewSyncVote(vote) = message { - // debug!( - // "Sending precommit vote to start protocol for next view = {}", - // *vote.round() - // ); - // // error!("Sending vs vote {:?}", vote.clone()); - - // self.event_stream - // .publish(HotShotEvent::ViewSyncVoteSend(vote)) - // .await; - // } - - // // TODO ED Add event to shutdown this task - // async_spawn({ - // let stream = self.event_stream.clone(); - // async move { - // async_sleep(self.view_sync_timeout).await; - // error!("Vote sending timed out in ViewSyncTrigger"); - // stream - // .publish(HotShotEvent::ViewSyncTimeout( - // TYPES::Time::new(*self.next_view), - // self.relay, - // ViewSyncPhase::None, - // )) - // .await; - // } - // }); - // return (None, self); - // } - // Ok(None) => { - // debug!("We were not chosen for committee on view {}", *view_number); - // return (None, self); - // } - // Err(_) => { - // error!("Problem generating vote token"); - // return (None, self); - // } - // } - // TODO ED + + async_spawn({ + let stream = self.event_stream.clone(); + async move { + async_sleep(self.view_sync_timeout).await; + error!("Vote sending timed out in ViewSyncTrigger"); + stream + .publish(HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*self.next_view), + self.relay, + ViewSyncPhase::None, + )) + .await; + } + }); + return (None, self); } @@ -1030,63 +1014,89 @@ where && relay == self.relay && last_seen_certificate == self.phase { - // let maybe_vote_token = self - // .exchange - // .membership() - // .make_vote_token(self.next_view, self.exchange.private_key()); - - // match maybe_vote_token { - // Ok(Some(vote_token)) => { - // self.relay += 1; - // let message = match self.phase { - // ViewSyncPhase::None => self.exchange.create_precommit_message::( - // self.next_view, - // self.relay, - // vote_token.clone(), - // ), - // ViewSyncPhase::PreCommit => { - // self.exchange.create_commit_message::( - // self.next_view, - // self.relay, - // vote_token.clone(), - // ) - // } - // ViewSyncPhase::Commit => { - // self.exchange.create_finalize_message::( - // self.next_view, - // self.relay, - // vote_token.clone(), - // ) - // } - // ViewSyncPhase::Finalize => unimplemented!(), - // }; - - // if let GeneralConsensusMessage::ViewSyncVote(vote) = message { - // self.event_stream - // .publish(HotShotEvent::ViewSyncVoteSend(vote)) - // .await; - // } - - // // TODO ED Add event to shutdown this task - // async_spawn({ - // let stream = self.event_stream.clone(); - // async move { - // async_sleep(self.view_sync_timeout).await; - // error!("Vote sending timed out in ViewSyncTimeout"); - // stream - // .publish(HotShotEvent::ViewSyncTimeout( - // TYPES::Time::new(*self.next_view), - // self.relay, - // last_seen_certificate, - // )) - // .await; - // } - // }); - // return (None, self); - // } - // Ok(None) | Err(_) => return (None, self), - // } - // TODO ED + self.relay += 1; + match self.phase { + ViewSyncPhase::None => { + let vote = + ViewSyncPreCommitVote::>::create_signed_vote( + ViewSyncPreCommitData { + relay: self.relay, + round: self.next_view, + }, + self.next_view, + self.exchange.public_key(), + self.exchange.private_key(), + ); + let message = + GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); + + if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { + self.event_stream + .publish(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) + .await; + } + } + ViewSyncPhase::PreCommit => { + let vote = + ViewSyncCommitVote::>::create_signed_vote( + ViewSyncCommitData { + relay: self.relay, + round: self.next_view, + }, + self.next_view, + self.exchange.public_key(), + self.exchange.private_key(), + ); + let message = + GeneralConsensusMessage::::ViewSyncCommitVote(vote); + + if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { + self.event_stream + .publish(HotShotEvent::ViewSyncCommitVoteSend(vote)) + .await; + } + } + ViewSyncPhase::Commit => { + let vote = + ViewSyncFinalizeVote::>::create_signed_vote( + ViewSyncFinalizeData { + relay: self.relay, + round: self.next_view, + }, + self.next_view, + self.exchange.public_key(), + self.exchange.private_key(), + ); + let message = + GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); + + if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { + self.event_stream + .publish(HotShotEvent::ViewSyncFinalizeVoteSend(vote)) + .await; + } + } + ViewSyncPhase::Finalize => { + // This should never occur + unimplemented!() + } + } + + async_spawn({ + let stream = self.event_stream.clone(); + async move { + async_sleep(self.view_sync_timeout).await; + error!("Vote sending timed out in ViewSyncTimeout"); + stream + .publish(HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*self.next_view), + self.relay, + last_seen_certificate, + )) + .await; + } + }); + return (None, self); } } @@ -1115,6 +1125,7 @@ where { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] + #[allow(clippy::type_complexity)] pub async fn handle_event( mut self, event: HotShotEvent, @@ -1150,12 +1161,13 @@ where Left(accumulator) => { match accumulator.accumulate(&vote, self.exchange.membership()) { Left(new_accumulator) => { - self.accumulator = Either::Left(new_accumulator) + self.accumulator = Either::Left(new_accumulator); } Right(certificate) => { self.event_stream .publish(HotShotEvent::ViewSyncPreCommitCertificate2Send( - certificate.clone(), self.exchange.public_key().clone() + certificate.clone(), + self.exchange.public_key().clone(), )) .await; self.accumulator = Right(certificate); @@ -1192,6 +1204,7 @@ where { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] + #[allow(clippy::type_complexity)] pub async fn handle_event( mut self, event: HotShotEvent, @@ -1226,12 +1239,13 @@ where Left(accumulator) => { match accumulator.accumulate(&vote, self.exchange.membership()) { Left(new_accumulator) => { - self.accumulator = Either::Left(new_accumulator) + self.accumulator = Either::Left(new_accumulator); } Right(certificate) => { self.event_stream .publish(HotShotEvent::ViewSyncCommitCertificate2Send( - certificate.clone(), self.exchange.public_key().clone() + certificate.clone(), + self.exchange.public_key().clone(), )) .await; self.accumulator = Right(certificate); @@ -1268,6 +1282,7 @@ where { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] + #[allow(clippy::type_complexity)] pub async fn handle_event( mut self, event: HotShotEvent, @@ -1303,12 +1318,13 @@ where Left(accumulator) => { match accumulator.accumulate(&vote, self.exchange.membership()) { Left(new_accumulator) => { - self.accumulator = Either::Left(new_accumulator) + self.accumulator = Either::Left(new_accumulator); } Right(certificate) => { self.event_stream .publish(HotShotEvent::ViewSyncFinalizeCertificate2Send( - certificate.clone(), self.exchange.public_key().clone() + certificate.clone(), + self.exchange.public_key().clone(), )) .await; self.accumulator = Right(certificate); diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index bdef870d7b..6979d8b7e3 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,14 +1,11 @@ -use commit::Committable; use hotshot::{types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; use hotshot_types::{ data::ViewNumber, traits::{ - consensus_api::ConsensusSharedApi, - election::{ConsensusExchange, ViewSyncExchangeType}, - node_implementation::ExchangesType, - state::ConsensusTime, + consensus_api::ConsensusSharedApi, election::ConsensusExchange, + node_implementation::ExchangesType, state::ConsensusTime, }, }; use std::collections::HashMap; @@ -20,15 +17,10 @@ use std::collections::HashMap; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_task() { - use core::panic; - use hotshot::tasks::add_view_sync_task; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{ - traits::election::VoteData, - vote::{ViewSyncData, ViewSyncVote, ViewSyncVoteInternal}, simple_vote::ViewSyncPreCommitData, - }; + use hotshot_types::simple_vote::ViewSyncPreCommitData; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -39,39 +31,24 @@ async fn test_view_sync_task() { inner: handle.hotshot.inner.clone(), }; let view_sync_exchange = api.inner.exchanges.view_sync_exchange().clone(); - let relay_pub_key = api.public_key().to_bytes(); - // let vote_token = view_sync_exchange - // .make_vote_token(ViewNumber::new(5)) - // .unwrap_or_else(|_| panic!("Error making vote token")) - // .unwrap_or_else(|| panic!("Not chosen for the committee")); - // let vote_data_internal: ViewSyncData = ViewSyncData { - // relay: relay_pub_key.clone(), - // round: ViewNumber::new(5), - // }; - // let vote_data_internal_commitment = vote_data_internal.commit(); - // let signature = view_sync_exchange.sign_precommit_message(vote_data_internal_commitment); - // let vote = ViewSyncVote::PreCommit(ViewSyncVoteInternal { - // relay_pub_key, - // relay: 0, - // round: ViewNumber::new(5), - // signature, - // vote_token, - // vote_data: VoteData::ViewSyncPreCommit(vote_data_internal_commitment), - // }); - - // let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote:: { signature: todo!(), data: todo!(), view_number: todo!(), _pd: std::marker::PhantomData }; - + let _relay_pub_key = api.public_key().to_bytes(); let vote_data = ViewSyncPreCommitData { - relay: 0, - round: ::Time::new(5) + relay: 0, + round: ::Time::new(5), }; - let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote(vote_data, ::Time::new(5), view_sync_exchange.public_key(), view_sync_exchange.private_key()); + let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::< + TestTypes, + hotshot_testing::node_types::StaticMembership, + >::create_signed_vote( + vote_data, + ::Time::new(5), + view_sync_exchange.public_key(), + view_sync_exchange.private_key(), + ); tracing::error!("Vote in test is {:?}", vote.clone()); - - // Every event input is seen on the event stream in the output. let mut input = Vec::new(); let mut output = HashMap::new(); @@ -86,12 +63,10 @@ async fn test_view_sync_task() { output.insert(HotShotEvent::Timeout(ViewNumber::new(3)), 1); output.insert(HotShotEvent::Timeout(ViewNumber::new(4)), 1); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::ViewChange(ViewNumber::new(3)), 1); output.insert(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone()), 1); - output.insert(HotShotEvent::Shutdown, 1); let build_fn = diff --git a/types/src/message.rs b/types/src/message.rs index 9a9b767e3c..8ee43fd23b 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -24,7 +24,6 @@ use crate::{ }, signature_key::EncodedSignature, }, - vote::ViewSyncVote, }; use derivative::Derivative; @@ -344,22 +343,24 @@ where /// Message with a quorum vote. Vote(QuorumVote>), + /// Message with a view sync pre-commit vote ViewSyncPreCommitVote(ViewSyncPreCommitVote>), + + /// Message with a view sync commit vote ViewSyncCommitVote(ViewSyncCommitVote>), + /// Message with a view sync finalize vote ViewSyncFinalizeVote(ViewSyncFinalizeVote>), + /// Message with a view sync pre-commit certificate ViewSyncPreCommitCertificate(ViewSyncPreCommitCertificate2), + /// Message with a view sync commit certificate ViewSyncCommitCertificate(ViewSyncCommitCertificate2), + /// Message with a view sync finalize certificate ViewSyncFinalizeCertificate(ViewSyncFinalizeCertificate2), - // /// Message with a view sync vote. - // ViewSyncVote(ViewSyncVote), - - // /// Message with a view sync certificate. - // ViewSyncCertificate(Proposal>), /// Message with a Timeout vote TimeoutVote(TimeoutVote2>), diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index a17a5c2a94..413d494ec5 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -10,7 +10,10 @@ use commit::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; use crate::{ - simple_vote::{DAData, QuorumData, TimeoutData, VIDData, Voteable, ViewSyncPreCommitData, ViewSyncCommitData, ViewSyncFinalizeData}, + simple_vote::{ + DAData, QuorumData, TimeoutData, VIDData, ViewSyncCommitData, ViewSyncFinalizeData, + ViewSyncPreCommitData, Voteable, + }, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, @@ -136,9 +139,11 @@ pub type VIDCertificate2 = SimpleCertificate::BlockPayload>>; // TODO ED Update this to use the correct threshold instead of the default `success_threshold` -/// Type alias for a ViewSyncPreCommit certificate over a view number -pub type ViewSyncPreCommitCertificate2 = SimpleCertificate>; -/// Type alias for a ViewSyncCommit certificate over a view number +/// Type alias for a `ViewSyncPreCommit` certificate over a view number +pub type ViewSyncPreCommitCertificate2 = + SimpleCertificate>; +/// Type alias for a `ViewSyncCommit` certificate over a view number pub type ViewSyncCommitCertificate2 = SimpleCertificate>; -/// Type alias for a ViewSyncFinalize certificate over a view number -pub type ViewSyncFinalizeCertificate2 = SimpleCertificate>; +/// Type alias for a `ViewSyncFinalize` certificate over a view number +pub type ViewSyncFinalizeCertificate2 = + SimpleCertificate>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 32e9aebf8c..0708423840 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -186,10 +186,7 @@ fn view_and_relay_commit( tag: &str, ) -> Commitment { let builder = commit::RawCommitmentBuilder::new(tag); - builder - .u64(*view) - .u64(relay) - .finalize() + builder.u64(*view).u64(relay).finalize() } impl Committable for ViewSyncPreCommitData { diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 2a5d09f9ee..47c5df4f4a 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -8,31 +8,26 @@ use super::{ signature_key::{EncodedPublicKey, EncodedSignature}, }; use crate::{ - certificate::{AssembledSignature, ViewSyncCertificate}, + certificate::AssembledSignature, data::{DAProposal, ProposalType, VidDisperse}, - vote::ViewSyncVoteAccumulator, }; -use crate::{message::GeneralConsensusMessage, vote::ViewSyncVoteInternal}; - use crate::{ data::LeafType, traits::{ network::{CommunicationChannel, NetworkMsg}, signature_key::SignatureKey, }, - vote::{ViewSyncData, ViewSyncVote, VoteType}, + vote::{ViewSyncData, VoteType}, }; use bincode::Options; use commit::{Commitment, CommitmentBounds, Committable}; use derivative::Derivative; -use either::Either; -use ethereum_types::U256; + use hotshot_utils::bincode::bincode_opts; use serde::{Deserialize, Serialize}; use snafu::Snafu; use std::{collections::BTreeSet, fmt::Debug, hash::Hash, marker::PhantomData, num::NonZeroU64}; -use tracing::error; /// Error for election problems #[derive(Snafu, Debug)] @@ -687,7 +682,6 @@ impl< pub trait ViewSyncExchangeType: ConsensusExchange { - // /// A vote on a [`Proposal`](Self::Proposal). // // TODO ED Make this equal Certificate vote (if possible?) // type Vote: VoteType; @@ -695,92 +689,92 @@ pub trait ViewSyncExchangeType: // type Certificate: SignedCertificate // + Hash // + Eq; -// /// Creates a precommit vote -// fn create_precommit_message>( -// &self, -// round: TYPES::Time, -// relay: u64, -// vote_token: TYPES::VoteTokenType, -// ) -> GeneralConsensusMessage; - -// /// Signs a precommit vote -// fn sign_precommit_message( -// &self, -// commitment: Commitment>, -// ) -> (EncodedPublicKey, EncodedSignature); - -// /// Creates a commit vote -// fn create_commit_message>( -// &self, -// round: TYPES::Time, -// relay: u64, -// vote_token: TYPES::VoteTokenType, -// ) -> GeneralConsensusMessage; - -// /// Signs a commit vote -// fn sign_commit_message( -// &self, -// commitment: Commitment>, -// ) -> (EncodedPublicKey, EncodedSignature); - -// /// Creates a finalize vote -// fn create_finalize_message>( -// &self, -// round: TYPES::Time, -// relay: u64, -// vote_token: TYPES::VoteTokenType, -// ) -> GeneralConsensusMessage; - -// /// Sings a finalize vote -// fn sign_finalize_message( -// &self, -// commitment: Commitment>, -// ) -> (EncodedPublicKey, EncodedSignature); - -// /// Validate a certificate. -// fn is_valid_view_sync_cert(&self, certificate: Self::Certificate, round: TYPES::Time) -> bool; - -// /// Sign a certificate. -// fn sign_certificate_proposal(&self, certificate: Self::Certificate) -> EncodedSignature; - -// // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the `SignedCertificate` -// // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. -// /// Accumulate vote -// /// Returns either the accumulate if no threshold was reached, or a `SignedCertificate` if the threshold was reached -// #[allow(clippy::type_complexity)] -// fn accumulate_vote( -// &self, -// accumulator: ViewSyncVoteAccumulator, -// vote: &<>::Certificate as SignedCertificate< -// TYPES, -// TYPES::Time, -// TYPES::VoteTokenType, -// Self::Commitment, -// >>::Vote, -// _commit: &Self::Commitment, -// ) -> Either, Self::Certificate> -// where -// >::Certificate: SignedCertificate< -// TYPES, -// TYPES::Time, -// TYPES::VoteTokenType, -// Self::Commitment, -// Vote = ViewSyncVote, -// >, -// { - -// } - -// /// Validate a vote by checking its signature and token. -// fn is_valid_vote( -// &self, -// key: &TYPES::SignatureKey, -// encoded_signature: &EncodedSignature, -// data: &VoteData>>, -// vote_token: &Checked, -// ) -> bool { -// false -// } + // /// Creates a precommit vote + // fn create_precommit_message>( + // &self, + // round: TYPES::Time, + // relay: u64, + // vote_token: TYPES::VoteTokenType, + // ) -> GeneralConsensusMessage; + + // /// Signs a precommit vote + // fn sign_precommit_message( + // &self, + // commitment: Commitment>, + // ) -> (EncodedPublicKey, EncodedSignature); + + // /// Creates a commit vote + // fn create_commit_message>( + // &self, + // round: TYPES::Time, + // relay: u64, + // vote_token: TYPES::VoteTokenType, + // ) -> GeneralConsensusMessage; + + // /// Signs a commit vote + // fn sign_commit_message( + // &self, + // commitment: Commitment>, + // ) -> (EncodedPublicKey, EncodedSignature); + + // /// Creates a finalize vote + // fn create_finalize_message>( + // &self, + // round: TYPES::Time, + // relay: u64, + // vote_token: TYPES::VoteTokenType, + // ) -> GeneralConsensusMessage; + + // /// Sings a finalize vote + // fn sign_finalize_message( + // &self, + // commitment: Commitment>, + // ) -> (EncodedPublicKey, EncodedSignature); + + // /// Validate a certificate. + // fn is_valid_view_sync_cert(&self, certificate: Self::Certificate, round: TYPES::Time) -> bool; + + // /// Sign a certificate. + // fn sign_certificate_proposal(&self, certificate: Self::Certificate) -> EncodedSignature; + + // // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the `SignedCertificate` + // // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. + // /// Accumulate vote + // /// Returns either the accumulate if no threshold was reached, or a `SignedCertificate` if the threshold was reached + // #[allow(clippy::type_complexity)] + // fn accumulate_vote( + // &self, + // accumulator: ViewSyncVoteAccumulator, + // vote: &<>::Certificate as SignedCertificate< + // TYPES, + // TYPES::Time, + // TYPES::VoteTokenType, + // Self::Commitment, + // >>::Vote, + // _commit: &Self::Commitment, + // ) -> Either, Self::Certificate> + // where + // >::Certificate: SignedCertificate< + // TYPES, + // TYPES::Time, + // TYPES::VoteTokenType, + // Self::Commitment, + // Vote = ViewSyncVote, + // >, + // { + + // } + + // /// Validate a vote by checking its signature and token. + // fn is_valid_vote( + // &self, + // key: &TYPES::SignatureKey, + // encoded_signature: &EncodedSignature, + // data: &VoteData>>, + // vote_token: &Checked, + // ) -> bool { + // false + // } } /// Standard implementation of [`ViewSyncExchangeType`] based on Hot Stuff consensus. From 523e65db5493d41f9622b2947ebb86cc463ecf73 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 14 Nov 2023 09:43:13 -0500 Subject: [PATCH 0371/1393] Remove commented out code --- task-impls/src/events.rs | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index f7a5186f6a..3159bf28c1 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -17,7 +17,6 @@ use hotshot_types::{ CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, VIDMembership, ViewSyncMembership, }, - vote::ViewSyncVote, }; /// All of the possible events that can be passed between Sequecning `HotShot` tasks @@ -55,21 +54,6 @@ pub enum HotShotEvent> { ViewChange(TYPES::Time), /// Timeout for the view sync protocol; emitted by a replica in the view sync task ViewSyncTimeout(TYPES::Time, u64, ViewSyncPhase), - /// Send a view sync vote to the network; emitted by a replica in the view sync task - // ViewSyncVoteSend(ViewSyncVote), - - // TODO ED Remove this event - /// Send a view sync certificate to the network; emitted by a relay in the view sync task - // ViewSyncCertificateSend( - // Proposal>, - // TYPES::SignatureKey, - // ), - // TODO ED Remove this in favor of separate votes for each view sync vote type - /// Receive a view sync vote from the network; received by a relay in the view sync task - ViewSyncVoteRecv(ViewSyncVote), - /// Receive a view sync certificate from the network; received by a replica in the view sync task - // TODO ED Remove this event in favor of separate events depending on which certificate type it is. - // ViewSyncCertificateRecv(Proposal>), /// Receive a `ViewSyncPreCommitVote` from the network; received by a relay in the view sync task ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote>), From d1c9e7e3b0f1ef4b87e9d6285785a75a61155d9d Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 14 Nov 2023 09:58:07 -0500 Subject: [PATCH 0372/1393] Remove commented out code --- task-impls/src/network.rs | 45 ++++++++++--------------------------- task-impls/src/view_sync.rs | 4 ---- 2 files changed, 12 insertions(+), 37 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 307d533c35..e883f8fd19 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -326,26 +326,6 @@ impl< TransmitType::Broadcast, None, ), - - // HotShotEvent::ViewSyncCertificateSend(certificate_proposal, sender) => ( - // sender, - // MessageKind::::from_consensus_message(SequencingMessage(Left( - // GeneralConsensusMessage::ViewSyncCertificate(certificate_proposal), - // ))), - // TransmitType::Broadcast, - // None, - // ), - // HotShotEvent::ViewSyncVoteSend(vote) => { - // // error!("Sending view sync vote in network task to relay with index: {:?}", vote.round() + vote.relay()); - // ( - // vote.signature_key(), - // MessageKind::::from_consensus_message(SequencingMessage(Left( - // GeneralConsensusMessage::ViewSyncVote(vote.clone()), - // ))), - // TransmitType::Direct, - // Some(membership.get_leader(vote.round() + vote.relay())), - // ) - // } HotShotEvent::TimeoutVoteSend(vote) => ( vote.get_signing_key(), MessageKind::::from_consensus_message(SequencingMessage(Left( @@ -437,19 +417,18 @@ impl< } /// view sync filter - fn view_sync_filter(_event: &HotShotEvent) -> bool { - // matches!( - // event, - // HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) - // | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) - // | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) - // | HotShotEvent::ViewSyncPreCommitVoteSend(_) - // | HotShotEvent::ViewSyncCommitVoteSend(_) - // | HotShotEvent::ViewSyncFinalizeVoteSend(_) - // | HotShotEvent::Shutdown - // | HotShotEvent::ViewChange(_) - // ) - true + fn view_sync_filter(event: &HotShotEvent) -> bool { + matches!( + event, + HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) + | HotShotEvent::ViewSyncPreCommitVoteSend(_) + | HotShotEvent::ViewSyncCommitVoteSend(_) + | HotShotEvent::ViewSyncFinalizeVoteSend(_) + | HotShotEvent::Shutdown + | HotShotEvent::ViewChange(_) + ) } } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 3669dfe857..a673ca5611 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -71,9 +71,6 @@ pub struct ViewSyncTaskState< ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - // TODO ED Remove this when exchanges is done, but we don't actually use this commitment type anymore. - // Commitment = Commitment>, - // Membership = ViewSyncMembership >, { /// Registry to register sub tasks @@ -120,7 +117,6 @@ where ViewSyncEx: ViewSyncExchangeType< TYPES, Message, - // Commitment = Commitment>, >, { } From 815527f73337606725e0b73dda5af4eee7a1f897 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 14 Nov 2023 10:01:55 -0500 Subject: [PATCH 0373/1393] Remove commented out code --- task-impls/src/view_sync.rs | 10 +- types/src/message.rs | 36 ++--- types/src/traits/election.rs | 197 ------------------------ types/src/traits/node_implementation.rs | 8 - 4 files changed, 12 insertions(+), 239 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index a673ca5611..66d55653d8 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -68,10 +68,7 @@ pub struct ViewSyncTaskState< I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static + std::clone::Clone, > where - ViewSyncEx: ViewSyncExchangeType< - TYPES, - Message, - >, + ViewSyncEx: ViewSyncExchangeType>, { /// Registry to register sub tasks pub registry: GlobalRegistry, @@ -114,10 +111,7 @@ impl< A: ConsensusApi, I> + 'static + std::clone::Clone, > TS for ViewSyncTaskState where - ViewSyncEx: ViewSyncExchangeType< - TYPES, - Message, - >, + ViewSyncEx: ViewSyncExchangeType>, { } diff --git a/types/src/message.rs b/types/src/message.rs index 8ee43fd23b..a8df326392 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -162,10 +162,6 @@ where QuorumVote>, TYPES::SignatureKey, ), - /// Message with a view sync vote. - // ViewSyncVote(ViewSyncVote), - // /// Message with a view sync certificate. - // ViewSyncCertificate(Proposal>), /// Internal ONLY message indicating a view interrupt. #[serde(skip)] InternalTrigger(InternalTrigger), @@ -184,12 +180,7 @@ where ProcessedGeneralConsensusMessage::Vote(v, _) => GeneralConsensusMessage::Vote(v), ProcessedGeneralConsensusMessage::InternalTrigger(a) => { GeneralConsensusMessage::InternalTrigger(a) - } // ProcessedGeneralConsensusMessage::ViewSyncCertificate(certificate) => { - // GeneralConsensusMessage::ViewSyncCertificate(certificate) - // } - // ProcessedGeneralConsensusMessage::ViewSyncVote(vote) => { - // GeneralConsensusMessage::ViewSyncVote(vote) - // } + } } } } @@ -210,16 +201,14 @@ where GeneralConsensusMessage::InternalTrigger(a) => { ProcessedGeneralConsensusMessage::InternalTrigger(a) } - // GeneralConsensusMessage::ViewSyncVote(_) - // | GeneralConsensusMessage::ViewSyncCertificate(_) => todo!(), - // ED NOTE These are deprecated anyway - GeneralConsensusMessage::TimeoutVote(_) => todo!(), - GeneralConsensusMessage::ViewSyncPreCommitVote(_) => todo!(), - GeneralConsensusMessage::ViewSyncCommitVote(_) => todo!(), - GeneralConsensusMessage::ViewSyncFinalizeVote(_) => todo!(), - GeneralConsensusMessage::ViewSyncPreCommitCertificate(_) => todo!(), - GeneralConsensusMessage::ViewSyncCommitCertificate(_) => todo!(), - GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => todo!(), + // ED NOTE These are deprecated + GeneralConsensusMessage::TimeoutVote(_) => unimplemented!(), + GeneralConsensusMessage::ViewSyncPreCommitVote(_) => unimplemented!(), + GeneralConsensusMessage::ViewSyncCommitVote(_) => unimplemented!(), + GeneralConsensusMessage::ViewSyncFinalizeVote(_) => unimplemented!(), + GeneralConsensusMessage::ViewSyncPreCommitCertificate(_) => unimplemented!(), + GeneralConsensusMessage::ViewSyncCommitCertificate(_) => unimplemented!(), + GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => unimplemented!(), } } } @@ -452,10 +441,7 @@ impl< GeneralConsensusMessage::InternalTrigger(trigger) => match trigger { InternalTrigger::Timeout(time) => *time, }, - // GeneralConsensusMessage::ViewSyncVote(message) => message.round(), - // GeneralConsensusMessage::ViewSyncCertificate(message) => { - // message.data.get_view_number() - // } + GeneralConsensusMessage::TimeoutVote(message) => message.get_view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { message.get_view_number() @@ -517,8 +503,6 @@ impl< | GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => { MessagePurpose::ViewSyncProposal } - // GeneralConsensusMessage::ViewSyncVote(_) => MessagePurpose::ViewSyncVote, - // GeneralConsensusMessage::ViewSyncCertificate(_) => MessagePurpose::ViewSyncProposal, }, Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 47c5df4f4a..7ed8087dcc 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -810,204 +810,8 @@ impl< M: NetworkMsg, > ViewSyncExchangeType for ViewSyncExchange { - // type Vote = ViewSyncVote; - - // type Certificate = ViewSyncCertificate; } -// fn create_precommit_message>( -// &self, -// round: TYPES::Time, -// relay: u64, -// vote_token: TYPES::VoteTokenType, -// ) -> GeneralConsensusMessage { -// let relay_pub_key = self.get_leader(round + relay).to_bytes(); - -// let vote_data_internal: ViewSyncData = ViewSyncData { -// relay: relay_pub_key.clone(), -// round, -// }; - -// let vote_data_internal_commitment = vote_data_internal.commit(); - -// let signature = self.sign_precommit_message(vote_data_internal_commitment); - -// GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::PreCommit( -// ViewSyncVoteInternal { -// relay_pub_key, -// relay, -// round, -// signature, -// vote_token, -// vote_data: VoteData::ViewSyncPreCommit(vote_data_internal_commitment), -// }, -// )) -// } - -// fn sign_precommit_message( -// &self, -// commitment: Commitment>, -// ) -> (EncodedPublicKey, EncodedSignature) { -// let signature = TYPES::SignatureKey::sign( -// &self.private_key, -// VoteData::ViewSyncPreCommit(commitment).commit().as_ref(), -// ); - -// (self.public_key.to_bytes(), signature) -// } - -// fn create_commit_message>( -// &self, -// round: TYPES::Time, -// relay: u64, -// vote_token: TYPES::VoteTokenType, -// ) -> GeneralConsensusMessage { -// let relay_pub_key = self.get_leader(round + relay).to_bytes(); - -// let vote_data_internal: ViewSyncData = ViewSyncData { -// relay: relay_pub_key.clone(), -// round, -// }; - -// let vote_data_internal_commitment = vote_data_internal.commit(); - -// let signature = self.sign_commit_message(vote_data_internal_commitment); - -// GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::Commit( -// ViewSyncVoteInternal { -// relay_pub_key, -// relay, -// round, -// signature, -// vote_token, -// vote_data: VoteData::ViewSyncCommit(vote_data_internal_commitment), -// }, -// )) -// } - -// fn sign_commit_message( -// &self, -// commitment: Commitment>, -// ) -> (EncodedPublicKey, EncodedSignature) { -// let signature = TYPES::SignatureKey::sign( -// &self.private_key, -// VoteData::ViewSyncCommit(commitment).commit().as_ref(), -// ); - -// (self.public_key.to_bytes(), signature) -// } - -// fn create_finalize_message>( -// &self, -// round: TYPES::Time, -// relay: u64, -// vote_token: TYPES::VoteTokenType, -// ) -> GeneralConsensusMessage { -// let relay_pub_key = self.get_leader(round + relay).to_bytes(); - -// let vote_data_internal: ViewSyncData = ViewSyncData { -// relay: relay_pub_key.clone(), -// round, -// }; - -// let vote_data_internal_commitment = vote_data_internal.commit(); - -// let signature = self.sign_finalize_message(vote_data_internal_commitment); - -// GeneralConsensusMessage::::ViewSyncVote(ViewSyncVote::Finalize( -// ViewSyncVoteInternal { -// relay_pub_key, -// relay, -// round, -// signature, -// vote_token, -// vote_data: VoteData::ViewSyncFinalize(vote_data_internal_commitment), -// }, -// )) -// } - -// fn sign_finalize_message( -// &self, -// commitment: Commitment>, -// ) -> (EncodedPublicKey, EncodedSignature) { -// let signature = TYPES::SignatureKey::sign( -// &self.private_key, -// VoteData::ViewSyncFinalize(commitment).commit().as_ref(), -// ); - -// (self.public_key.to_bytes(), signature) -// } - -// fn is_valid_view_sync_cert(&self, certificate: Self::Certificate, round: TYPES::Time) -> bool { -// // Sishan NOTE TODO: would be better to test this, looks like this func is never called. -// let (certificate_internal, _threshold, vote_data) = match certificate.clone() { -// ViewSyncCertificate::PreCommit(certificate_internal) => { -// let vote_data = ViewSyncData:: { -// relay: self -// .get_leader(round + certificate_internal.relay) -// .to_bytes(), -// round, -// }; -// (certificate_internal, self.failure_threshold(), vote_data) -// } -// ViewSyncCertificate::Commit(certificate_internal) -// | ViewSyncCertificate::Finalize(certificate_internal) => { -// let vote_data = ViewSyncData:: { -// relay: self -// .get_leader(round + certificate_internal.relay) -// .to_bytes(), -// round, -// }; -// (certificate_internal, self.success_threshold(), vote_data) -// } -// }; -// match certificate_internal.signatures { -// AssembledSignature::ViewSyncPreCommit(raw_signatures) => { -// let real_commit = VoteData::ViewSyncPreCommit(vote_data.commit()).commit(); -// let real_qc_pp = ::get_public_parameter( -// self.membership().get_committee_qc_stake_table(), -// U256::from(self.membership().failure_threshold().get()), -// ); -// ::check( -// &real_qc_pp, -// real_commit.as_ref(), -// &raw_signatures, -// ) -// } -// AssembledSignature::ViewSyncCommit(raw_signatures) => { -// let real_commit = VoteData::ViewSyncCommit(vote_data.commit()).commit(); -// let real_qc_pp = ::get_public_parameter( -// self.membership().get_committee_qc_stake_table(), -// U256::from(self.membership().success_threshold().get()), -// ); -// ::check( -// &real_qc_pp, -// real_commit.as_ref(), -// &raw_signatures, -// ) -// } -// AssembledSignature::ViewSyncFinalize(raw_signatures) => { -// let real_commit = VoteData::ViewSyncFinalize(vote_data.commit()).commit(); -// let real_qc_pp = ::get_public_parameter( -// self.membership().get_committee_qc_stake_table(), -// U256::from(self.membership().success_threshold().get()), -// ); -// ::check( -// &real_qc_pp, -// real_commit.as_ref(), -// &raw_signatures, -// ) -// } -// _ => true, -// } -// } - -// fn sign_certificate_proposal(&self, certificate: Self::Certificate) -> EncodedSignature { -// let signature = TYPES::SignatureKey::sign(&self.private_key, certificate.commit().as_ref()); -// signature -// } -// } - impl< TYPES: NodeType, PROPOSAL: ProposalType, @@ -1105,7 +909,6 @@ impl< { } -// TODO ED Get rid of ProposalType as generic, is debt left over from Validating Consensus impl< TYPES: NodeType, PROPOSAL: ProposalType, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 439316987c..fd2a58d4db 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -587,14 +587,6 @@ pub type QuorumProposalType = pub type CommitteeProposalType = as ConsensusExchange>>::Proposal; -/// A proposal to sync the view. -// pub type ViewSyncProposalType = -// as ConsensusExchange>>::Proposal; - -/// A vote on a [`ViewSyncProposal`]. -// pub type ViewSyncVoteType = -// as ViewSyncExchangeType>>::Vote; - /// Communication channel for [`QuorumProposalType`] and [`QuorumVote`]. pub type QuorumCommChannel = as ConsensusExchange>>::Networking; From 045bcefcf4dd62f9bfcd61c386a0b38100c9fa16 Mon Sep 17 00:00:00 2001 From: MRain Date: Tue, 14 Nov 2023 10:57:38 -0500 Subject: [PATCH 0374/1393] Proof generation --- hotshot-state-prover/src/circuit.rs | 383 ++++++++++++---------------- hotshot-state-prover/src/lib.rs | 76 ++++++ hotshot-state-prover/src/utils.rs | 54 ++++ 3 files changed, 296 insertions(+), 217 deletions(-) create mode 100644 hotshot-state-prover/src/utils.rs diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index cc81f8b37f..3bba623f87 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -1,7 +1,5 @@ //! Circuit implementation for verifying light client state update -use std::marker::PhantomData; - use ark_ec::twisted_edwards::TECurveConfig; use ark_ff::PrimeField; use ethereum_types::U256; @@ -116,236 +114,187 @@ impl AsRef<[Variable]> for LightClientStateVar { } } -#[derive(Clone, Debug)] -pub struct StateUpdateBuilder(PhantomData); - -impl StateUpdateBuilder +/// A function that takes as input: +/// - stake table entries (`Vec<(BLSVerKey, Amount, SchnorrVerKey)>`) +/// - schnorr signatures of the updated states (`Vec`) +/// - updated light client state (`(view_number, block_height, block_comm, fee_ledger_comm, stake_table_comm)`) +/// - signer bit vector +/// - quorum threshold +/// checks that +/// - the signer's accumulated weight exceeds the quorum threshold +/// - the commitment of the stake table +/// - all schnorr signatures are valid +pub(crate) fn build_state_verifier_circuit( + stake_table: &ST, + sigs: &[Signature

], + lightclient_state: &LightClientState, + signer_bit_vec: &[bool], + threshold: &U256, +) -> Result<(PlonkCircuit, Vec), PlonkError> where F: RescueParameter, + ST: StakeTableScheme>, + P: TECurveConfig, { - /// A function that takes as input: - /// - stake table entries (`Vec<(BLSVerKey, Amount, SchnorrVerKey)>`) - /// - schnorr signatures of the updated states (`Vec`) - /// - updated light client state (`(view_number, block_height, block_comm, fee_ledger_comm, stake_table_comm)`) - /// - signer bit vector - /// - quorum threshold - /// checks that - /// - the signer's accumulated weight exceeds the quorum threshold - /// - the commitment of the stake table - /// - all schnorr signatures are valid - pub fn build( - stake_table: &ST, - sigs: &[Signature

], - lightclient_state: &LightClientState, - signer_bit_vec: &[bool], - threshold: &U256, - ) -> Result<(PlonkCircuit, Vec), PlonkError> - where - ST: StakeTableScheme>, - P: TECurveConfig, - { - let mut circuit = PlonkCircuit::new_turbo_plonk(); - - // creating variables for stake table entries - let mut stake_table_var = stake_table - .try_iter(SnapshotVersion::LastEpochStart)? - .map(|(_bls_ver_key, amount, schnorr_ver_key)| { - let schnorr_ver_key = circuit.create_signature_vk_variable(&schnorr_ver_key)?; - let stake_amount = circuit.create_variable(u256_to_field::(&amount))?; - Ok(StakeTableEntryVar { - schnorr_ver_key, - stake_amount, - }) + let mut circuit = PlonkCircuit::new_turbo_plonk(); + + // creating variables for stake table entries + let mut stake_table_var = stake_table + .try_iter(SnapshotVersion::LastEpochStart)? + .map(|(_bls_ver_key, amount, schnorr_ver_key)| { + let schnorr_ver_key = circuit.create_signature_vk_variable(&schnorr_ver_key)?; + let stake_amount = circuit.create_variable(u256_to_field::(&amount))?; + Ok(StakeTableEntryVar { + schnorr_ver_key, + stake_amount, }) - .collect::, CircuitError>>()?; - let dummy_ver_key_var = VerKeyVar(circuit.neutral_point_variable()); - stake_table_var.resize( - STAKE_TABLE_CAPACITY, - StakeTableEntryVar { - schnorr_ver_key: dummy_ver_key_var, - stake_amount: 0, - }, - ); - - // creating variables for signatures - let mut sig_vars = sigs - .iter() - .map(|sig| circuit.create_signature_variable(sig)) - .collect::, CircuitError>>()?; - sig_vars.resize( - STAKE_TABLE_CAPACITY, - SignatureVar { - s: circuit.zero(), - R: circuit.neutral_point_variable(), - }, - ); - - // creating Boolean variables for the bit vector - let mut signer_bit_vec_var = signer_bit_vec - .iter() - .map(|&b| circuit.create_boolean_variable(b)) - .collect::, CircuitError>>()?; - signer_bit_vec_var.resize(STAKE_TABLE_CAPACITY, BoolVar(circuit.zero())); - - let threshold = u256_to_field::(threshold); - let threshold_var = circuit.create_public_variable(threshold)?; - - let lightclient_state_var = LightClientStateVar::new(&mut circuit, lightclient_state)?; - - let view_number_f = F::from(lightclient_state.view_number as u64); - let block_height_f = F::from(lightclient_state.block_height as u64); - let public_inputs = vec![ - threshold, - view_number_f, - block_height_f, - lightclient_state.block_comm, - lightclient_state.fee_ledger_comm, - lightclient_state.stake_table_comm.0, - lightclient_state.stake_table_comm.1, - lightclient_state.stake_table_comm.2, - ]; - - // Checking whether the accumulated weight exceeds the quorum threshold - let mut signed_amount_var = (0..STAKE_TABLE_CAPACITY / 2) - .map(|i| { - circuit.mul_add( - &[ - stake_table_var[2 * i].stake_amount, - signer_bit_vec_var[2 * i].0, - stake_table_var[2 * i + 1].stake_amount, - signer_bit_vec_var[2 * i + 1].0, - ], - &[F::one(), F::one()], - ) - }) - .collect::, CircuitError>>()?; - // Adding the last if STAKE_TABLE_CAPACITY is not a multiple of 2 - if STAKE_TABLE_CAPACITY % 2 == 1 { - signed_amount_var.push(circuit.mul( - stake_table_var[STAKE_TABLE_CAPACITY - 1].stake_amount, - signer_bit_vec_var[STAKE_TABLE_CAPACITY - 1].0, - )?); - } - let acc_amount_var = circuit.sum(&signed_amount_var)?; - circuit.enforce_leq(threshold_var, acc_amount_var)?; - - // checking the commitment for the list of schnorr keys - let schnorr_ver_key_preimage_vars = stake_table_var - .iter() - .flat_map(|var| [var.schnorr_ver_key.0.get_x(), var.schnorr_ver_key.0.get_y()]) - .collect::>(); - let schnorr_ver_key_comm = RescueNativeGadget::::rescue_sponge_with_padding( - &mut circuit, - &schnorr_ver_key_preimage_vars, - 1, - )?[0]; - circuit.enforce_equal( - schnorr_ver_key_comm, - lightclient_state_var.stake_table_comm().schnorr_keys_comm, - )?; - - // checking the commitment for the list of stake amounts - let stake_amount_preimage_vars = stake_table_var - .iter() - .map(|var| var.stake_amount) - .collect::>(); - let stake_amount_comm = RescueNativeGadget::::rescue_sponge_with_padding( - &mut circuit, - &stake_amount_preimage_vars, - 1, - )?[0]; - circuit.enforce_equal( - stake_amount_comm, - lightclient_state_var.stake_table_comm().stake_amount_comm, - )?; - - // checking all signatures - let verification_result_vars = stake_table_var - .iter() - .zip(sig_vars) - .map(|(entry, sig)| { - SignatureGadget::<_, P>::check_signature_validity( - &mut circuit, - &entry.schnorr_ver_key, - lightclient_state_var.as_ref(), - &sig, - ) - }) - .collect::, CircuitError>>()?; - let bit_x_result_vars = signer_bit_vec_var - .iter() - .zip(verification_result_vars) - .map(|(&bit, result)| { - let neg_bit = circuit.logic_neg(bit)?; - circuit.logic_or(neg_bit, result) - }) - .collect::, CircuitError>>()?; - let sig_ver_result = circuit.logic_and_all(&bit_x_result_vars)?; - circuit.enforce_true(sig_ver_result.0)?; - - circuit.finalize_for_arithmetization()?; - Ok((circuit, public_inputs)) + }) + .collect::, CircuitError>>()?; + let dummy_ver_key_var = VerKeyVar(circuit.neutral_point_variable()); + stake_table_var.resize( + STAKE_TABLE_CAPACITY, + StakeTableEntryVar { + schnorr_ver_key: dummy_ver_key_var, + stake_amount: 0, + }, + ); + + // creating variables for signatures + let mut sig_vars = sigs + .iter() + .map(|sig| circuit.create_signature_variable(sig)) + .collect::, CircuitError>>()?; + sig_vars.resize( + STAKE_TABLE_CAPACITY, + SignatureVar { + s: circuit.zero(), + R: circuit.neutral_point_variable(), + }, + ); + + // creating Boolean variables for the bit vector + let mut signer_bit_vec_var = signer_bit_vec + .iter() + .map(|&b| circuit.create_boolean_variable(b)) + .collect::, CircuitError>>()?; + signer_bit_vec_var.resize(STAKE_TABLE_CAPACITY, BoolVar(circuit.zero())); + + let threshold = u256_to_field::(threshold); + let threshold_var = circuit.create_public_variable(threshold)?; + + let lightclient_state_var = LightClientStateVar::new(&mut circuit, lightclient_state)?; + + let view_number_f = F::from(lightclient_state.view_number as u64); + let block_height_f = F::from(lightclient_state.block_height as u64); + let public_inputs = vec![ + threshold, + view_number_f, + block_height_f, + lightclient_state.block_comm, + lightclient_state.fee_ledger_comm, + lightclient_state.stake_table_comm.0, + lightclient_state.stake_table_comm.1, + lightclient_state.stake_table_comm.2, + ]; + + // Checking whether the accumulated weight exceeds the quorum threshold + let mut signed_amount_var = (0..STAKE_TABLE_CAPACITY / 2) + .map(|i| { + circuit.mul_add( + &[ + stake_table_var[2 * i].stake_amount, + signer_bit_vec_var[2 * i].0, + stake_table_var[2 * i + 1].stake_amount, + signer_bit_vec_var[2 * i + 1].0, + ], + &[F::one(), F::one()], + ) + }) + .collect::, CircuitError>>()?; + // Adding the last if STAKE_TABLE_CAPACITY is not a multiple of 2 + if STAKE_TABLE_CAPACITY % 2 == 1 { + signed_amount_var.push(circuit.mul( + stake_table_var[STAKE_TABLE_CAPACITY - 1].stake_amount, + signer_bit_vec_var[STAKE_TABLE_CAPACITY - 1].0, + )?); } + let acc_amount_var = circuit.sum(&signed_amount_var)?; + circuit.enforce_leq(threshold_var, acc_amount_var)?; + + // checking the commitment for the list of schnorr keys + let schnorr_ver_key_preimage_vars = stake_table_var + .iter() + .flat_map(|var| [var.schnorr_ver_key.0.get_x(), var.schnorr_ver_key.0.get_y()]) + .collect::>(); + let schnorr_ver_key_comm = RescueNativeGadget::::rescue_sponge_with_padding( + &mut circuit, + &schnorr_ver_key_preimage_vars, + 1, + )?[0]; + circuit.enforce_equal( + schnorr_ver_key_comm, + lightclient_state_var.stake_table_comm().schnorr_keys_comm, + )?; + + // checking the commitment for the list of stake amounts + let stake_amount_preimage_vars = stake_table_var + .iter() + .map(|var| var.stake_amount) + .collect::>(); + let stake_amount_comm = RescueNativeGadget::::rescue_sponge_with_padding( + &mut circuit, + &stake_amount_preimage_vars, + 1, + )?[0]; + circuit.enforce_equal( + stake_amount_comm, + lightclient_state_var.stake_table_comm().stake_amount_comm, + )?; + + // checking all signatures + let verification_result_vars = stake_table_var + .iter() + .zip(sig_vars) + .map(|(entry, sig)| { + SignatureGadget::<_, P>::check_signature_validity( + &mut circuit, + &entry.schnorr_ver_key, + lightclient_state_var.as_ref(), + &sig, + ) + }) + .collect::, CircuitError>>()?; + let bit_x_result_vars = signer_bit_vec_var + .iter() + .zip(verification_result_vars) + .map(|(&bit, result)| { + let neg_bit = circuit.logic_neg(bit)?; + circuit.logic_or(neg_bit, result) + }) + .collect::, CircuitError>>()?; + let sig_ver_result = circuit.logic_and_all(&bit_x_result_vars)?; + circuit.enforce_true(sig_ver_result.0)?; + + circuit.finalize_for_arithmetization()?; + Ok((circuit, public_inputs)) } #[cfg(test)] mod tests { - use super::{LightClientState, StateUpdateBuilder}; + use super::{build_state_verifier_circuit, LightClientState}; + use crate::utils::{key_pairs_for_testing, stake_table_for_testing}; use ark_ed_on_bn254::EdwardsConfig as Config; - use ark_std::rand::{CryptoRng, RngCore}; use ethereum_types::U256; - use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableScheme}; use jf_primitives::{ crhf::{VariableLengthRescueCRHF, CRHF}, errors::PrimitivesError, - signatures::{ - bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey as BLSVerKey}, - schnorr::Signature, - SchnorrSignatureScheme, SignatureScheme, - }, + signatures::{schnorr::Signature, SchnorrSignatureScheme, SignatureScheme}, }; use jf_relation::Circuit; use jf_utils::test_rng; type F = ark_ed_on_bn254::Fq; - type SchnorrVerKey = jf_primitives::signatures::schnorr::VerKey; - type SchnorrSignKey = jf_primitives::signatures::schnorr::SignKey; - - fn key_pairs_for_testing( - num_validators: usize, - prng: &mut R, - ) -> (Vec, Vec<(SchnorrSignKey, SchnorrVerKey)>) { - let bls_keys = (0..num_validators) - .map(|_| { - BLSOverBN254CurveSignatureScheme::key_gen(&(), prng) - .unwrap() - .1 - }) - .collect::>(); - let schnorr_keys = (0..num_validators) - .map(|_| SchnorrSignatureScheme::key_gen(&(), prng).unwrap()) - .collect::>(); - (bls_keys, schnorr_keys) - } - - fn stake_table_for_testing( - bls_keys: &[BLSVerKey], - schnorr_keys: &[(SchnorrSignKey, SchnorrVerKey)], - ) -> StakeTable { - let mut st = StakeTable::::new(); - // Registering keys - bls_keys.iter().enumerate().zip(schnorr_keys).for_each( - |((i, bls_key), (_, schnorr_key))| { - st.register(*bls_key, U256::from((i + 1) as u32), schnorr_key.clone()) - .unwrap() - }, - ); - // Freeze the stake table - st.advance(); - st.advance(); - st - } #[test] fn test_circuit_building() { @@ -401,7 +350,7 @@ mod tests { }) .collect::>(); // good path - let (circuit, public_inputs) = StateUpdateBuilder::::build( + let (circuit, public_inputs) = build_state_verifier_circuit( &st, &bit_masked_sigs, &lightclient_state, @@ -411,7 +360,7 @@ mod tests { .unwrap(); assert!(circuit.check_circuit_satisfiability(&public_inputs).is_ok()); - let (circuit, public_inputs) = StateUpdateBuilder::::build( + let (circuit, public_inputs) = build_state_verifier_circuit( &st, &bit_masked_sigs, &lightclient_state, @@ -437,7 +386,7 @@ mod tests { } }) .collect::>(); - let (bad_circuit, public_inputs) = StateUpdateBuilder::::build( + let (bad_circuit, public_inputs) = build_state_verifier_circuit( &st, &bad_bit_masked_sigs, &lightclient_state, @@ -468,7 +417,7 @@ mod tests { }) .collect::, PrimitivesError>>() .unwrap(); - let (bad_circuit, public_inputs) = StateUpdateBuilder::::build( + let (bad_circuit, public_inputs) = build_state_verifier_circuit( &st, &sig_for_bad_state, &bad_lightclient_state, @@ -500,7 +449,7 @@ mod tests { }) .collect::, PrimitivesError>>() .unwrap(); - let (bad_circuit, public_inputs) = StateUpdateBuilder::::build( + let (bad_circuit, public_inputs) = build_state_verifier_circuit( &st, &wrong_sigs, &lightclient_state, diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index 13feae9401..ca0e7e3892 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -1,3 +1,79 @@ //! SNARK-assisted light client state update verification in HotShot +/// State verifier circuit builder pub mod circuit; +mod utils; + +use ark_bn254::Bn254; +use ark_std::rand::{CryptoRng, RngCore}; +use circuit::build_state_verifier_circuit; +use ethereum_types::U256; +use hotshot_stake_table::vec_based::StakeTable; +use hotshot_types::traits::{ + stake_table::{SnapshotVersion, StakeTableScheme}, + state::LightClientState, +}; +use jf_plonk::{ + errors::PlonkError, + proof_system::{PlonkKzgSnark, UniversalSNARK}, + transcript::StandardTranscript, +}; +use jf_primitives::signatures::schnorr::Signature; +use jf_relation::PlonkCircuit; + +/// BLS verification key, base field and Schnorr verification key +pub use hotshot_stake_table::vec_based::config::{ + BLSVerKey, FieldType as BaseField, SchnorrVerKey, +}; +/// Proving key +pub type ProvingKey = jf_plonk::proof_system::structs::ProvingKey; +/// Verifying key +pub type VerifyingKey = jf_plonk::proof_system::structs::VerifyingKey; +/// Proof +pub type Proof = jf_plonk::proof_system::structs::Proof; +/// Universal SRS +pub type UniversalSrs = jf_plonk::proof_system::structs::UniversalSrs; +/// Curve config for Schnorr signatures +pub use ark_ed_on_bn254::EdwardsConfig; + +pub fn preprocess(srs: &UniversalSrs) -> Result<(ProvingKey, VerifyingKey), PlonkError> { + let (circuit, _) = build_dummy_circuit_for_preprocessing()?; + PlonkKzgSnark::preprocess(srs, &circuit) +} + +pub fn generate_state_update_proof( + rng: &mut R, + pk: &ProvingKey, + stake_table: &ST, + sigs: &[Signature], + lightclient_state: &LightClientState, + signer_bit_vec: &[bool], + threshold: &U256, +) -> Result<(Proof, Vec), PlonkError> +where + ST: StakeTableScheme, + R: CryptoRng + RngCore, +{ + let (circuit, public_inputs) = build_state_verifier_circuit( + stake_table, + sigs, + lightclient_state, + signer_bit_vec, + threshold, + )?; + let proof = PlonkKzgSnark::::prove::<_, _, StandardTranscript>(rng, &circuit, pk, None)?; + Ok((proof, public_inputs)) +} + +fn build_dummy_circuit_for_preprocessing( +) -> Result<(PlonkCircuit, Vec), PlonkError> { + let st = StakeTable::::new(); + let lightclient_state = LightClientState { + view_number: 0, + block_height: 0, + block_comm: BaseField::default(), + fee_ledger_comm: BaseField::default(), + stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), + }; + build_state_verifier_circuit(&st, &[], &lightclient_state, &[], &U256::zero()) +} diff --git a/hotshot-state-prover/src/utils.rs b/hotshot-state-prover/src/utils.rs new file mode 100644 index 0000000000..2dac55e75c --- /dev/null +++ b/hotshot-state-prover/src/utils.rs @@ -0,0 +1,54 @@ +use ark_ed_on_bn254::EdwardsConfig; +use ark_std::rand::{CryptoRng, RngCore}; +use ethereum_types::U256; +use hotshot_stake_table::vec_based::StakeTable; +use hotshot_types::traits::stake_table::StakeTableScheme; +use jf_primitives::signatures::{ + bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey as BLSVerKey}, + SchnorrSignatureScheme, SignatureScheme, +}; + +type F = ark_ed_on_bn254::Fq; +type SchnorrVerKey = jf_primitives::signatures::schnorr::VerKey; +type SchnorrSignKey = jf_primitives::signatures::schnorr::SignKey; + +/// Helper function for test +#[allow(dead_code)] +pub(crate) fn key_pairs_for_testing( + num_validators: usize, + prng: &mut R, +) -> (Vec, Vec<(SchnorrSignKey, SchnorrVerKey)>) { + let bls_keys = (0..num_validators) + .map(|_| { + BLSOverBN254CurveSignatureScheme::key_gen(&(), prng) + .unwrap() + .1 + }) + .collect::>(); + let schnorr_keys = (0..num_validators) + .map(|_| SchnorrSignatureScheme::key_gen(&(), prng).unwrap()) + .collect::>(); + (bls_keys, schnorr_keys) +} + +/// Helper function for test +#[allow(dead_code)] +pub(crate) fn stake_table_for_testing( + bls_keys: &[BLSVerKey], + schnorr_keys: &[(SchnorrSignKey, SchnorrVerKey)], +) -> StakeTable { + let mut st = StakeTable::::new(); + // Registering keys + bls_keys + .iter() + .enumerate() + .zip(schnorr_keys) + .for_each(|((i, bls_key), (_, schnorr_key))| { + st.register(*bls_key, U256::from((i + 1) as u32), schnorr_key.clone()) + .unwrap() + }); + // Freeze the stake table + st.advance(); + st.advance(); + st +} From c3e8db1f91121129f6551ab4a37dd2ae7d62fca8 Mon Sep 17 00:00:00 2001 From: MRain Date: Tue, 14 Nov 2023 11:26:24 -0500 Subject: [PATCH 0375/1393] better documentation --- hotshot-state-prover/src/circuit.rs | 16 ++++++++++------ hotshot-state-prover/src/lib.rs | 12 ++++++++++++ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 3bba623f87..27b51ef437 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -115,15 +115,19 @@ impl AsRef<[Variable]> for LightClientStateVar { } /// A function that takes as input: -/// - stake table entries (`Vec<(BLSVerKey, Amount, SchnorrVerKey)>`) -/// - schnorr signatures of the updated states (`Vec`) +/// - a list of stake table entries (`Vec<(BLSVerKey, Amount, SchnorrVerKey)>`) +/// - a list of schnorr signatures of the updated states (`Vec`), default if the node doesn't sign the state /// - updated light client state (`(view_number, block_height, block_comm, fee_ledger_comm, stake_table_comm)`) -/// - signer bit vector -/// - quorum threshold +/// - a bit vector indicates the signers +/// - a quorum threshold /// checks that /// - the signer's accumulated weight exceeds the quorum threshold -/// - the commitment of the stake table -/// - all schnorr signatures are valid +/// - the stake table corresponds to the one committed in the light client state +/// - all signed schnorr signatures are valid +/// returns +/// - A circuit for proof generation +/// - A list of public inputs for verification +/// - A PlonkError if any error happens when building the circuit pub(crate) fn build_state_verifier_circuit( stake_table: &ST, sigs: &[Signature

], diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index ca0e7e3892..140754d7e6 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -36,11 +36,22 @@ pub type UniversalSrs = jf_plonk::proof_system::structs::UniversalSrs; /// Curve config for Schnorr signatures pub use ark_ed_on_bn254::EdwardsConfig; +/// Given a SRS, returns the proving key and verifying key for state update pub fn preprocess(srs: &UniversalSrs) -> Result<(ProvingKey, VerifyingKey), PlonkError> { let (circuit, _) = build_dummy_circuit_for_preprocessing()?; PlonkKzgSnark::preprocess(srs, &circuit) } +/// Given a proving key and +/// - a list of stake table entries (`Vec<(BLSVerKey, Amount, SchnorrVerKey)>`) +/// - a list of schnorr signatures of the updated states (`Vec`), default if the node doesn't sign the state +/// - updated light client state (`(view_number, block_height, block_comm, fee_ledger_comm, stake_table_comm)`) +/// - a bit vector indicates the signers +/// - a quorum threshold +/// Returns error or a pair (proof, public_inputs) asserting that +/// - the signer's accumulated weight exceeds the quorum threshold +/// - the stake table corresponds to the one committed in the light client state +/// - all signed schnorr signatures are valid pub fn generate_state_update_proof( rng: &mut R, pk: &ProvingKey, @@ -65,6 +76,7 @@ where Ok((proof, public_inputs)) } +/// Internal function for helping generate the proving/verifying key fn build_dummy_circuit_for_preprocessing( ) -> Result<(PlonkCircuit, Vec), PlonkError> { let st = StakeTable::::new(); From 254a1c242eb857434f16a512ddc2eba2e26acba6 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Tue, 14 Nov 2023 11:28:49 -0500 Subject: [PATCH 0376/1393] Update tagged-base64 --- hotshot-stake-table/Cargo.toml | 2 +- orchestrator/Cargo.toml | 4 ++-- types/Cargo.toml | 6 ++---- types/src/data.rs | 2 +- types/src/traits/signature_key.rs | 2 +- web_server/Cargo.toml | 2 +- web_server/src/lib.rs | 2 +- 7 files changed, 9 insertions(+), 11 deletions(-) diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 8f11e4d66f..917f85eb92 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -22,7 +22,7 @@ jf-primitives = { workspace = true } jf-relation = { workspace = true } jf-utils = { workspace = true } serde = { workspace = true, features = ["rc"] } -tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag = "0.3.0" } +tagged-base64 = { workspace = true } typenum = { workspace = true } [dev-dependencies] diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 3febbbce65..1d0d6f789f 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -17,13 +17,13 @@ hotshot-types = { version = "0.1.0", path = "../types", default-features = false hotshot-utils = { path = "../utils" } libp2p-networking = { workspace = true } nll = { workspace = true } -tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.4.1" } +tide-disco = { workspace = true } surf-disco = { workspace = true } tracing = { workspace = true } serde = { workspace = true } serde_json = "1.0.96" snafu = { workspace = true } -toml = "0.5.9" # TODO GG upgrade to toml = { workspace = true } +toml = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/types/Cargo.toml b/types/Cargo.toml index f65eac0983..a7e4a00c18 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -15,9 +15,7 @@ arbitrary = { version = "1.3", features = ["derive"] } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } -ark-serialize = { version = "0.3", features = [ - "derive", -] } # TODO GG upgrade to 0.4 and inherit this dep from workspace +ark-serialize = { workspace = true, features = ["derive"] } ark-std = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } @@ -42,7 +40,7 @@ rand = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } snafu = { workspace = true } -tagged-base64 = { git = "https://github.com/EspressoSystems/tagged-base64", tag = "0.2.4" } +tagged-base64 = { workspace = true } time = { workspace = true } tracing = { workspace = true } ethereum-types = { workspace = true } diff --git a/types/src/data.rs b/types/src/data.rs index 68fc0ec064..ce07facb55 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -18,7 +18,7 @@ use crate::{ Block, State, }, }; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::Options; use commit::{Commitment, Committable}; use derivative::Derivative; diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 5daed175bd..c84f7d95f0 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -1,5 +1,5 @@ //! Minimal compatibility over public key signatures -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bitvec::prelude::*; use espresso_systems_common::hotshot::tag; use ethereum_types::U256; diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index 827d5a5258..a758d4ab37 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -21,7 +21,7 @@ libp2p-core = { version = "0.40.0", default-features = false } hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } jf-primitives = { workspace = true } -tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.4.1" } +tide-disco = { workspace = true } nll = { workspace = true } tracing = { workspace = true } rand = { workspace = true } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index e29ce036e8..90e94f8aaa 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -492,7 +492,7 @@ where let mut api = match &options.api_path { Some(path) => Api::::from_file(path)?, None => { - let toml = toml::from_str(include_str!("../api.toml")).map_err(|err| { + let toml: toml::Value = toml::from_str(include_str!("../api.toml")).map_err(|err| { ApiError::CannotReadToml { reason: err.to_string(), } From 8fd36fcedd4e5375828c0f19ca1e9d03c3ef2a2f Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 14 Nov 2023 09:17:38 -0800 Subject: [PATCH 0377/1393] Update encode and decode functions, replace test types with app-specific types, update return types --- hotshot/examples/infra/mod.rs | 6 +-- hotshot/src/lib.rs | 16 ++------ hotshot/src/tasks/mod.rs | 9 +++-- task-impls/src/consensus.rs | 36 +++++++++-------- task-impls/src/da.rs | 13 +++--- task-impls/src/events.rs | 22 +++++++--- task-impls/src/transactions.rs | 53 ++++++++++--------------- testing/src/task_helpers.rs | 6 +-- testing/src/test_runner.rs | 3 +- testing/tests/da_task.rs | 13 ++++-- testing/tests/network_task.rs | 13 ++++-- testing/tests/vid_task.rs | 8 +++- types/src/block_impl.rs | 64 ++++++++++++++++++++---------- types/src/data.rs | 45 +++++---------------- types/src/traits/block_contents.rs | 34 ++++++++++------ 15 files changed, 179 insertions(+), 162 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 321684fa22..7c4012c579 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -222,10 +222,8 @@ pub trait RunDA< /// get the anchored view /// Note: sequencing leaf does not have state, so does not return state async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { - let genesis_block = TYPES::BlockPayload::genesis(); - let initializer = - hotshot::HotShotInitializer::>::from_genesis(genesis_block) - .expect("Couldn't generate genesis block"); + let initializer = hotshot::HotShotInitializer::>::from_genesis() + .expect("Couldn't generate genesis block"); let config = self.get_config(); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 89203d9074..727bea95b3 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -57,7 +57,6 @@ use hotshot_types::{ }; use hotshot_types::{ - block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, consensus::{BlockPayloadStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, data::{DAProposal, Leaf, LeafType, QuorumProposal}, @@ -77,7 +76,6 @@ use hotshot_types::{ signature_key::SignatureKey, state::ConsensusTime, storage::StoredView, - State, }, vote::ViewSyncData, HotShotConfig, @@ -616,11 +614,7 @@ pub trait HotShotType> { #[async_trait] impl< - TYPES: NodeType< - BlockHeader = VIDBlockHeader, - BlockPayload = VIDBlockPayload, - Transaction = VIDTransaction, - >, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = Leaf, @@ -1001,13 +995,9 @@ impl> HotShotInitializer Result> { - let state = TYPES::StateType::initialize(); - let time = TYPES::Time::genesis(); - let justify_qc = QuorumCertificate2::::genesis(); - + pub fn from_genesis() -> Result> { Ok(Self { - inner: LEAF::new(time, justify_qc, genesis_payload, state), + inner: LEAF::genesis(), }) } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b8eba956f8..3ac963a0f6 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -25,7 +25,6 @@ use hotshot_task_impls::{ view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, data::{Leaf, ProposalType, QuorumProposal}, event::Event, @@ -38,6 +37,7 @@ use hotshot_types::{ ViewSyncEx, }, state::ConsensusTime, + BlockPayload, }, vote::ViewSyncData, }; @@ -227,7 +227,7 @@ where /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation, ConsensusMessage = SequencingMessage>, >( task_runner: TaskRunner, @@ -256,13 +256,14 @@ where inner: handle.hotshot.inner.clone(), }; let registry = task_runner.registry.clone(); + let (payload, metadata) = ::genesis(); // build the consensus task let consensus_state = ConsensusTaskState { registry: registry.clone(), consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), - payload_commitment: Some(VIDBlockPayload::genesis().commit()), + payload_commitment_and_metadata: Some((payload.commit(), metadata)), quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), timeout_exchange: c_api.inner.exchanges.timeout_exchange().clone().into(), api: c_api.clone(), @@ -450,7 +451,7 @@ where /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_transaction_task< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation, ConsensusMessage = SequencingMessage>, >( task_runner: TaskRunner, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 60d755bf58..62753bc67f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -16,7 +16,6 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, }; use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction}, consensus::{Consensus, View}, data::{Leaf, LeafType, ProposalType, QuorumProposal}, event::{Event, EventType}, @@ -57,10 +56,13 @@ use tracing::{debug, error, info, instrument}; #[derive(Snafu, Debug)] pub struct ConsensusTaskError {} +/// Alias for the block payload commitment and the associated metadata. +type CommitmentAndMetadata = (Commitment, ::Metadata); + /// The state for the consensus task. Contains all of the information for the implementation /// of consensus pub struct ConsensusTaskState< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static, > where @@ -88,8 +90,8 @@ pub struct ConsensusTaskState< /// View number this view is executing in. pub cur_view: TYPES::Time, - /// The commitment to the current block payload submitted to DA - pub payload_commitment: Option>, + /// The commitment to the current block payload and its metadata submitted to DA. + pub payload_commitment_and_metadata: Option>, /// the quorum exchange pub quorum_exchange: Arc>, @@ -331,7 +333,7 @@ where } impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = Leaf, @@ -340,7 +342,6 @@ impl< A: ConsensusApi, I> + 'static, > ConsensusTaskState where - TYPES::BlockHeader: BlockHeader, QuorumEx: ConsensusExchange< TYPES, Message, @@ -1237,8 +1238,8 @@ where let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } - HotShotEvent::SendPayloadCommitment(payload_commitment) => { - self.payload_commitment = Some(payload_commitment); + HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, metadata) => { + self.payload_commitment_and_metadata = Some((payload_commitment, metadata)); } _ => {} } @@ -1314,12 +1315,16 @@ where // TODO do some sort of sanity check on the view number that it matches decided } - if let Some(payload_commitment) = &self.payload_commitment { + if let Some((payload_commitment, metadata)) = &self.payload_commitment_and_metadata { let leaf = Leaf { view_number: view, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), - block_header: TYPES::BlockHeader::new(*payload_commitment, (), &parent_header), + block_header: TYPES::BlockHeader::new( + *payload_commitment, + metadata.clone(), + &parent_header, + ), block_payload: None, rejected: vec![], timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), @@ -1331,7 +1336,7 @@ where .sign_validating_or_commitment_proposal::(&leaf.commit()); // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. let proposal = QuorumProposal { - block_header: TYPES::BlockHeader::new(*payload_commitment, (), &parent_header), + block_header: leaf.block_header.clone(), view_number: leaf.view_number, justify_qc: consensus.high_qc.clone(), timeout_certificate: timeout_certificate.or_else(|| None), @@ -1352,7 +1357,7 @@ where self.quorum_exchange.public_key().clone(), )) .await; - self.payload_commitment = None; + self.payload_commitment_and_metadata = None; return true; } debug!("Self block was None"); @@ -1361,7 +1366,7 @@ where } impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = Leaf, @@ -1405,7 +1410,7 @@ pub type ConsensusTaskTypes = HSTWithEvent< /// Event handle for consensus pub async fn sequencing_consensus_handle< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation, ConsensusMessage = SequencingMessage>, A: ConsensusApi, I> + 'static, >( @@ -1416,7 +1421,6 @@ pub async fn sequencing_consensus_handle< ConsensusTaskState, ) where - TYPES::BlockHeader: BlockHeader, QuorumEx: ConsensusExchange< TYPES, Message, @@ -1451,7 +1455,7 @@ pub fn consensus_event_filter>( | HotShotEvent::QCFormed(_) | HotShotEvent::DACRecv(_) | HotShotEvent::ViewChange(_) - | HotShotEvent::SendPayloadCommitment(_) + | HotShotEvent::SendPayloadCommitmentAndMetadata(_, _) | HotShotEvent::Timeout(_) | HotShotEvent::TimeoutVoteRecv(_) | HotShotEvent::Shutdown, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index b29dab50d5..304aa0fc18 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -394,18 +394,18 @@ where return None; } - HotShotEvent::BlockReady(block, view) => { + HotShotEvent::BlockReady(payload, metadata, view) => { self.committee_exchange .network() .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) .await; - let payload_commitment = block.commit(); + let payload_commitment = payload.commit(); let signature = self .committee_exchange .sign_da_proposal(&payload_commitment); let data: DAProposal = DAProposal { - block_payload: block.clone(), + block_payload: payload.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? view_number: view, }; @@ -414,7 +414,10 @@ where let message = Proposal { data, signature }; self.event_stream - .publish(HotShotEvent::SendPayloadCommitment(payload_commitment)) + .publish(HotShotEvent::SendPayloadCommitmentAndMetadata( + payload_commitment, + metadata, + )) .await; self.event_stream .publish(HotShotEvent::DAProposalSend( @@ -449,7 +452,7 @@ where HotShotEvent::DAProposalRecv(_, _) | HotShotEvent::DAVoteRecv(_) | HotShotEvent::Shutdown - | HotShotEvent::BlockReady(_, _) + | HotShotEvent::BlockReady(_, _, _) | HotShotEvent::Timeout(_) | HotShotEvent::ViewChange(_) ) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index b4be7769be..a08932bb05 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -9,9 +9,12 @@ use hotshot_types::{ DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, }, simple_vote::{DAVote2, QuorumVote, TimeoutVote2, VIDVote2}, - traits::node_implementation::{ - CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, QuorumProposalType, - VIDMembership, ViewSyncProposalType, + traits::{ + node_implementation::{ + CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, + QuorumProposalType, VIDMembership, ViewSyncProposalType, + }, + BlockPayload, }, vote::ViewSyncVote, }; @@ -70,10 +73,17 @@ pub enum HotShotEvent> { TransactionsRecv(Vec), /// Send transactions to the network TransactionSend(TYPES::Transaction, TYPES::SignatureKey), - /// Event to send block payload commitment from DA leader to the quorum; internal event only - SendPayloadCommitment(Commitment), + /// Event to send block payload commitment and metadata from DA leader to the quorum; internal event only + SendPayloadCommitmentAndMetadata( + Commitment, + ::Metadata, + ), /// Event when the transactions task has a block formed - BlockReady(TYPES::BlockPayload, TYPES::Time), + BlockReady( + TYPES::BlockPayload, + ::Metadata, + TYPES::Time, + ), /// Event when consensus decided on a leaf LeafDecided(Vec), /// Send VID shares to VID storage nodes; emitted by the DA leader diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 63605b167c..572f2d02a1 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -13,7 +13,7 @@ use hotshot_task::{ task_impls::HSTWithEvent, }; use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction, NUM_CHUNKS, NUM_STORAGE_NODES}, + block_impl::{NUM_CHUNKS, NUM_STORAGE_NODES}, consensus::Consensus, data::{test_srs, Leaf, VidDisperse, VidScheme, VidSchemeTrait}, message::{Message, Proposal, SequencingMessage}, @@ -75,11 +75,8 @@ pub struct TransactionTaskState< pub id: u64, } -// We have two `TransactionTaskState` implementations with different bounds. The implementation -// here requires `TYPES: NodeType`, -// whereas it's just `TYPES: NodeType` in the second implementation. impl< - TYPES: NodeType, + TYPES: NodeType, I: NodeImplementation< TYPES, Leaf = Leaf, @@ -219,24 +216,32 @@ where // TODO (Keyao) Determine whether to allow empty blocks. // let txns = self.wait_for_transactions(parent_leaf).await?; - let encoded_txns = VIDTransaction::encode(txns.clone()); + // TODO (Keyao) We encode the transactions and compute the VID disperse data twice, + // through `from_transactions` once, then `encode` and `disperse` again. This is + // because we need `disperse` for the `VidDisperseSend` event, which can't be + // retrieved by `from_transactions` directly. + // This duplication will be fixed when updating the data sent to the DA task, i.e., + // in the `BlockReady` event. + // Relevant issue: https://github.com/EspressoSystems/HotShot/issues/2026. + let (payload, metadata) = + ::from_transactions(txns); + let encoded_txns = ::encode(&payload); // TODO let srs = test_srs(NUM_STORAGE_NODES); // TODO We are using constant numbers for now, but they will change as the quorum size // changes. // TODO let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - let vid_disperse = vid.disperse(encoded_txns.clone()).unwrap(); - - let block = VIDBlockPayload { - transactions: txns, - payload_commitment: vid_disperse.commit, - }; + let vid_disperse = vid.disperse(encoded_txns).unwrap(); // TODO never clone a block // https://github.com/EspressoSystems/HotShot/issues/1858 self.event_stream - .publish(HotShotEvent::BlockReady(block.clone(), view + 1)) + .publish(HotShotEvent::BlockReady( + payload.clone(), + metadata, + view + 1, + )) .await; // TODO (Keyao) Determine and update where to publish VidDisperseSend. @@ -247,12 +252,14 @@ where Proposal { data: VidDisperse { view_number: view + 1, - payload_commitment: block.commit(), + payload_commitment: payload.commit(), shares: vid_disperse.shares, common: vid_disperse.common, }, // TODO (Keyao) This is also signed in DA task. - signature: self.quorum_exchange.sign_payload_commitment(block.commit()), + signature: self + .quorum_exchange + .sign_payload_commitment(payload.commit()), }, self.quorum_exchange.public_key().clone(), )) @@ -266,23 +273,7 @@ where } None } -} -// We have two `TransactionTaskState` implementations with different bounds. The implementation -// above requires `TYPES: NodeType`, -// whereas here it's just `TYPES: NodeType`. -impl< - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - A: ConsensusApi, I> + 'static, - > TransactionTaskState -where - QuorumEx: ConsensusExchange>, -{ #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] async fn wait_for_transactions( &self, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index cba704b745..b84604e2b3 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -4,7 +4,7 @@ use crate::{ }; use commit::Committable; use hotshot::{ - traits::{NodeImplementation, TestableNodeImplementation}, + traits::NodeImplementation, types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, HotShotConsensusApi, HotShotInitializer, SystemContext, }; @@ -44,9 +44,7 @@ pub async fn build_system_handle( let initializer = HotShotInitializer::< TestTypes, >::Leaf, - >::from_genesis( - >::block_genesis() - ) + >::from_genesis() .unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 1988a38da5..b1766ee356 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -204,8 +204,7 @@ where let node_id = self.next_node_id; let storage = (self.launcher.resource_generator.storage)(node_id); let config = self.launcher.resource_generator.config.clone(); - let initializer = - HotShotInitializer::::from_genesis(I::block_genesis()).unwrap(); + let initializer = HotShotInitializer::::from_genesis().unwrap(); let networks = (self.launcher.resource_generator.channel_generator)(node_id); // We assign node's public key and stake value rather than read from config file since it's a test let validator_config = diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 79f04c7122..e9ca5c8888 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -64,17 +64,24 @@ async fn test_da_task() { // In view 1, node 2 is the next leader. input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - input.push(HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2))); + input.push(HotShotEvent::BlockReady( + block.clone(), + (), + ViewNumber::new(2), + )); input.push(HotShotEvent::DAProposalRecv(message.clone(), pub_key)); input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); output.insert( - HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), + HotShotEvent::BlockReady(block.clone(), (), ViewNumber::new(2)), + 1, + ); + output.insert( + HotShotEvent::SendPayloadCommitmentAndMetadata(block.commit(), ()), 1, ); - output.insert(HotShotEvent::SendPayloadCommitment(block.commit()), 1); output.insert(HotShotEvent::DAProposalSend(message.clone(), pub_key), 1); let da_vote = DAVote2::create_signed_vote( DAData { diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index ac1368863f..bbb6eb7164 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -76,7 +76,11 @@ async fn test_network_task() { let mut output = HashMap::new(); input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2))); + input.push(HotShotEvent::BlockReady( + block.clone(), + (), + ViewNumber::new(2), + )); input.push(HotShotEvent::DAProposalSend(da_proposal.clone(), pub_key)); input.push(HotShotEvent::VidDisperseSend( da_vid_disperse.clone(), @@ -95,7 +99,7 @@ async fn test_network_task() { 2, // 2 occurrences: 1 from `input`, 1 from the DA task ); output.insert( - HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), + HotShotEvent::BlockReady(block.clone(), (), ViewNumber::new(2)), 2, ); output.insert( @@ -118,7 +122,10 @@ async fn test_network_task() { HotShotEvent::QuorumProposalSend(quorum_proposal.clone(), pub_key), 1, ); - output.insert(HotShotEvent::SendPayloadCommitment(block.commit()), 1); + output.insert( + HotShotEvent::SendPayloadCommitmentAndMetadata(block.commit(), ()), + 1, + ); output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); output.insert( HotShotEvent::QuorumProposalRecv(quorum_proposal, pub_key), diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 6813f20dde..e342facfa5 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -73,14 +73,18 @@ async fn test_vid_task() { // In view 1, node 2 is the next leader. input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - input.push(HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2))); + input.push(HotShotEvent::BlockReady( + block.clone(), + (), + ViewNumber::new(2), + )); input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); output.insert( - HotShotEvent::BlockReady(block.clone(), ViewNumber::new(2)), + HotShotEvent::BlockReady(block.clone(), (), ViewNumber::new(2)), 1, ); diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index e0542a7f55..07b26c465e 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -1,7 +1,7 @@ //! This module provides an implementation of the `HotShot` suite of traits. use std::{ - collections::HashSet, fmt::{Debug, Display}, + mem::size_of, }; use crate::{ @@ -44,10 +44,9 @@ impl VIDTransaction { for txn in transactions { // Encode the length of the inner transaction and the transaction bytes. - if let Ok(len) = txn.0.len().try_into() { - encoded.extend::>(vec![len]); - encoded.extend(txn.0); - } + let txn_size = txn.0.len().to_le_bytes(); + encoded.extend(txn_size); + encoded.extend(txn.0); } encoded @@ -140,8 +139,11 @@ impl BlockPayload for VIDBlockPayload { type Error = BlockError; type Transaction = VIDTransaction; type Metadata = (); + type Encode<'a> = as IntoIterator>::IntoIter; - fn build(transactions: impl IntoIterator) -> (Self, Self::Metadata) { + fn from_transactions( + transactions: impl IntoIterator, + ) -> (Self, Self::Metadata) { let txns_vec: Vec = transactions.into_iter().collect(); let encoded = VIDTransaction::encode(txns_vec.clone()); ( @@ -153,29 +155,38 @@ impl BlockPayload for VIDBlockPayload { ) } - fn encode(&self) -> Vec { - VIDTransaction::encode(self.transactions.clone()) - } - - fn decode(encoded_transactions: &[u8]) -> Self { + fn from_bytes(encoded_transactions: Self::Encode<'_>, _metadata: Self::Metadata) -> Self { + let encoded_vec: Vec = encoded_transactions.collect(); let mut transactions = Vec::new(); let mut current_index = 0; - while current_index < encoded_transactions.len() { - let txn_len = encoded_transactions[current_index] as usize; - let next_index = current_index + txn_len; + while current_index < encoded_vec.len() { + // Decode the length of the transaction and the transaction bytes. + let txn_start_index = current_index + size_of::(); + let mut txn_len_bytes = [0; size_of::()]; + txn_len_bytes.copy_from_slice(&encoded_vec[current_index..txn_start_index]); + let txn_len: usize = usize::from_le_bytes(txn_len_bytes); + let next_index = txn_start_index + txn_len; transactions.push(VIDTransaction( - encoded_transactions[current_index..next_index].to_vec(), + encoded_vec[txn_start_index..next_index].to_vec(), )); current_index = next_index; } Self { transactions, - payload_commitment: Self::vid_commitment(encoded_transactions), + payload_commitment: Self::vid_commitment(&encoded_vec), } } - fn transaction_commitments(&self) -> HashSet> { + fn genesis() -> (Self, Self::Metadata) { + (Self::genesis(), ()) + } + + fn encode(&self) -> Self::Encode<'_> { + VIDTransaction::encode(self.transactions.clone()).into_iter() + } + + fn transaction_commitments(&self) -> Vec> { self.transactions .iter() .map(commit::Committable::commit) @@ -206,11 +217,20 @@ impl BlockHeader for VIDBlockHeader { } } - fn genesis(payload: Self::Payload) -> Self { - Self { - block_number: 0, - payload_commitment: payload.commit(), - } + fn genesis() -> ( + Self, + Self::Payload, + ::Metadata, + ) { + let (payload, metadata) = ::genesis(); + ( + Self { + block_number: 0, + payload_commitment: payload.commit(), + }, + payload, + metadata, + ) } fn block_number(&self) -> u64 { diff --git a/types/src/data.rs b/types/src/data.rs index a6c5cf05fa..da76373924 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -302,13 +302,8 @@ pub trait LeafType: + Serialize + Sync; - /// Create a new leaf from its components. - fn new( - view_number: LeafTime, - justify_qc: QuorumCertificate2, - deltas: LeafBlockPayload, - state: LeafState, - ) -> Self; + /// Create a genesis leaf. + fn genesis() -> Self; /// Time when this leaf was created. fn get_view_number(&self) -> LeafTime; /// Height of this leaf in the chain. @@ -482,23 +477,8 @@ impl LeafType for ValidatingLeaf { type NodeType = TYPES; type MaybeState = TYPES::StateType; - fn new( - view_number: ::Time, - justify_qc: QuorumCertificate2, - deltas: ::BlockPayload, - state: ::StateType, - ) -> Self { - Self { - view_number, - height: 0, - justify_qc, - parent_commitment: fake_commitment(), - deltas, - state, - rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: genesis_proposer_id(), - } + fn genesis() -> Self { + unimplemented!(); } fn get_view_number(&self) -> TYPES::Time { @@ -588,21 +568,16 @@ impl Display for Leaf { impl LeafType for Leaf { type NodeType = TYPES; - // type DeltasType = Either<(u64, TYPES::BlockPayload), TYPES::BlockHeader>; type MaybeState = (); - fn new( - view_number: ::Time, - justify_qc: QuorumCertificate2, - payload: ::BlockPayload, - _state: ::StateType, - ) -> Self { + fn genesis() -> Self { + let (block_header, block_payload, _) = TYPES::BlockHeader::genesis(); Self { - view_number, - justify_qc, + view_number: TYPES::Time::genesis(), + justify_qc: QuorumCertificate2::::genesis(), parent_commitment: fake_commitment(), - block_header: TYPES::BlockHeader::genesis(payload.clone()), - block_payload: Some(payload), + block_header, + block_payload: Some(block_payload), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: genesis_proposer_id(), diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 9ee9755a1a..015c1a96fa 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -7,7 +7,6 @@ use commit::{Commitment, Committable}; use serde::{de::DeserializeOwned, Serialize}; use std::{ - collections::HashSet, error::Error, fmt::{Debug, Display}, hash::Hash, @@ -47,20 +46,27 @@ pub trait BlockPayload: type Transaction: Transaction; /// Data created during block building which feeds into the block header - type Metadata; + type Metadata: Clone + Debug + Eq + Hash + Send + Sync; + + /// Encoded payload. + type Encode<'a>: 'a + Iterator + AsRef<[u8]> + Send; /// Build a payload and associated metadata with the transactions. - fn build(transactions: impl IntoIterator) -> (Self, Self::Metadata); + fn from_transactions( + transactions: impl IntoIterator, + ) -> (Self, Self::Metadata); - /// Encode the payload - fn encode(&self) -> Vec; + /// Build a payload with the encoded transaction bytes and metadata. + fn from_bytes(encoded_transactions: Self::Encode<'_>, metadata: Self::Metadata) -> Self; - /// Decode the payload - fn decode(encoded_transactions: &[u8]) -> Self; + /// Build the genesis payload and metadata. + fn genesis() -> (Self, Self::Metadata); + + /// Encode the payload + fn encode(&self) -> Self::Encode<'_>; - /// returns hashes of all the transactions in this block - /// TODO make this ordered with a vec - fn transaction_commitments(&self) -> HashSet>; + /// List of transaction commitments. + fn transaction_commitments(&self) -> Vec>; } /// Header of a block, which commits to a [`BlockPayload`]. @@ -77,8 +83,12 @@ pub trait BlockHeader: parent_header: &Self, ) -> Self; - /// Build a genesis header with the genesis payload. - fn genesis(payload: Self::Payload) -> Self; + /// Build the genesis header, payload, and metadata. + fn genesis() -> ( + Self, + Self::Payload, + ::Metadata, + ); /// Get the block number. fn block_number(&self) -> u64; From f3c720f9bdb4d5ad75085e0203312a007049d74b Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Tue, 14 Nov 2023 13:01:40 -0500 Subject: [PATCH 0378/1393] Change AssembledSignature variant from Genesis() to Genesis serde_json appears not to handle empty tuple variants correctly, but it does handle unit variants: https://github.com/serde-rs/json/issues/1084 --- hotshot/src/demos/sdemo.rs | 2 +- hotshot/src/traits/storage/memory_storage.rs | 2 +- types/src/certificate.rs | 4 ++-- types/src/data.rs | 2 +- types/src/traits/election.rs | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hotshot/src/demos/sdemo.rs b/hotshot/src/demos/sdemo.rs index 97117347a9..3669fcf109 100644 --- a/hotshot/src/demos/sdemo.rs +++ b/hotshot/src/demos/sdemo.rs @@ -357,7 +357,7 @@ pub fn random_quorum_certificate::Time::genesis(), leaf_commitment: dummy_leaf_commit, - signatures: AssembledSignature::Genesis(), + signatures: AssembledSignature::Genesis, view_number, }, DummyBlock::random(rng), diff --git a/types/src/certificate.rs b/types/src/certificate.rs index 47720eba75..6ee49ec8ac 100644 --- a/types/src/certificate.rs +++ b/types/src/certificate.rs @@ -122,7 +122,7 @@ pub enum AssembledSignature { /// These signatures are for a 'DA' certificate DA(::QCType), /// These signatures are for genesis certificate - Genesis(), + Genesis, /// These signatures are for ViewSyncPreCommit ViewSyncPreCommit(::QCType), /// These signatures are for ViewSyncCommit @@ -194,7 +194,7 @@ impl> Self { leaf_commitment: fake_commitment::(), view_number: ::genesis(), - signatures: AssembledSignature::Genesis(), + signatures: AssembledSignature::Genesis, is_genesis: true, } } diff --git a/types/src/data.rs b/types/src/data.rs index 68fc0ec064..e8944751a4 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -844,7 +844,7 @@ pub fn serialize_signature(signature: &AssembledSignature None, + AssembledSignature::Genesis => None, }; if let Some(sig) = signatures { let (sig, proof) = TYPES::SignatureKey::get_sig_proof(&sig); diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index fdc435163b..37a9140563 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -374,7 +374,7 @@ pub trait ConsensusExchange: Send + Sync { ); ::check(&real_qc_pp, real_commit.as_ref(), &qc) } - AssembledSignature::Genesis() => true, + AssembledSignature::Genesis => true, AssembledSignature::ViewSyncPreCommit(_) | AssembledSignature::ViewSyncCommit(_) | AssembledSignature::ViewSyncFinalize(_) => { From 01cdb2d1df1c88795b286f5f2d65021562acaa07 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 14 Nov 2023 13:26:41 -0500 Subject: [PATCH 0379/1393] Cancel polling for retrieved items (#2031) * cancel polling for retrieved items * move DAC cancel --- task-impls/src/consensus.rs | 15 ++++++++++++++- task-impls/src/da.rs | 8 ++++++++ task-impls/src/vid.rs | 17 +++++++++++++++-- 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index abcbfe43a0..7c7b3da855 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -642,6 +642,14 @@ where *proposal.data.view_number ); + // stop polling for the received proposal + self.quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal( + *proposal.data.view_number, + )) + .await; + let view = proposal.data.get_view_number(); if view < self.cur_view { debug!("Proposal is from an older view {:?}", proposal.data.clone()); @@ -1165,8 +1173,13 @@ where } HotShotEvent::DACRecv(cert) => { debug!("DAC Recved for view ! {}", *cert.view_number); - let view = cert.view_number; + + self.quorum_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForDAC(*view)) + .await; + self.da_certs.insert(view, cert); if self.vote_if_able().await { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index b29dab50d5..0213c108b5 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -208,6 +208,14 @@ where // cause an overflow error. // TODO ED Come back to this - we probably don't need this, but we should also never receive a DAC where this fails, investigate block ready so it doesn't make one for the genesis block + // stop polling for the received proposal + self.committee_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal( + *proposal.data.view_number, + )) + .await; + if self.cur_view != TYPES::Time::genesis() && view < self.cur_view - 1 { warn!("Throwing away DA proposal that is more than one view older"); return None; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index b92b50d8e8..deb0d2bb3f 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -274,6 +274,14 @@ where disperse.data.get_view_number() ); + // stop polling for the received disperse + self.vid_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse( + *disperse.data.view_number, + )) + .await; + // ED NOTE: Assuming that the next view leader is the one who sends DA proposal for this view let view = disperse.data.get_view_number(); @@ -347,8 +355,13 @@ where } } } - HotShotEvent::VidCertRecv(_) => { - // RM TODO + HotShotEvent::VidCertRecv(cert) => { + self.vid_exchange + .network() + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDCertificate( + *cert.view_number, + )) + .await; } HotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { From 960b5e7c99d865f627c8b2349f8d7ce32316dbd5 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 14 Nov 2023 14:16:48 -0500 Subject: [PATCH 0380/1393] Address comments --- testing/tests/view_sync_task.rs | 1 - types/src/traits/election.rs | 93 --------------------------------- 2 files changed, 94 deletions(-) diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 6979d8b7e3..ee6e45d528 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -31,7 +31,6 @@ async fn test_view_sync_task() { inner: handle.hotshot.inner.clone(), }; let view_sync_exchange = api.inner.exchanges.view_sync_exchange().clone(); - let _relay_pub_key = api.public_key().to_bytes(); let vote_data = ViewSyncPreCommitData { relay: 0, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 7ed8087dcc..cf52a46dcb 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -682,99 +682,6 @@ impl< pub trait ViewSyncExchangeType: ConsensusExchange { - // /// A vote on a [`Proposal`](Self::Proposal). - // // TODO ED Make this equal Certificate vote (if possible?) - // type Vote: VoteType; - // /// A [`SignedCertificate`] attesting to a decision taken by the committee. - // type Certificate: SignedCertificate - // + Hash - // + Eq; - // /// Creates a precommit vote - // fn create_precommit_message>( - // &self, - // round: TYPES::Time, - // relay: u64, - // vote_token: TYPES::VoteTokenType, - // ) -> GeneralConsensusMessage; - - // /// Signs a precommit vote - // fn sign_precommit_message( - // &self, - // commitment: Commitment>, - // ) -> (EncodedPublicKey, EncodedSignature); - - // /// Creates a commit vote - // fn create_commit_message>( - // &self, - // round: TYPES::Time, - // relay: u64, - // vote_token: TYPES::VoteTokenType, - // ) -> GeneralConsensusMessage; - - // /// Signs a commit vote - // fn sign_commit_message( - // &self, - // commitment: Commitment>, - // ) -> (EncodedPublicKey, EncodedSignature); - - // /// Creates a finalize vote - // fn create_finalize_message>( - // &self, - // round: TYPES::Time, - // relay: u64, - // vote_token: TYPES::VoteTokenType, - // ) -> GeneralConsensusMessage; - - // /// Sings a finalize vote - // fn sign_finalize_message( - // &self, - // commitment: Commitment>, - // ) -> (EncodedPublicKey, EncodedSignature); - - // /// Validate a certificate. - // fn is_valid_view_sync_cert(&self, certificate: Self::Certificate, round: TYPES::Time) -> bool; - - // /// Sign a certificate. - // fn sign_certificate_proposal(&self, certificate: Self::Certificate) -> EncodedSignature; - - // // TODO ED Depending on what we do in the future with the exchanges trait, we can move the accumulator out of the `SignedCertificate` - // // trait. Logically, I feel it makes sense to accumulate on the certificate rather than the exchange, however. - // /// Accumulate vote - // /// Returns either the accumulate if no threshold was reached, or a `SignedCertificate` if the threshold was reached - // #[allow(clippy::type_complexity)] - // fn accumulate_vote( - // &self, - // accumulator: ViewSyncVoteAccumulator, - // vote: &<>::Certificate as SignedCertificate< - // TYPES, - // TYPES::Time, - // TYPES::VoteTokenType, - // Self::Commitment, - // >>::Vote, - // _commit: &Self::Commitment, - // ) -> Either, Self::Certificate> - // where - // >::Certificate: SignedCertificate< - // TYPES, - // TYPES::Time, - // TYPES::VoteTokenType, - // Self::Commitment, - // Vote = ViewSyncVote, - // >, - // { - - // } - - // /// Validate a vote by checking its signature and token. - // fn is_valid_vote( - // &self, - // key: &TYPES::SignatureKey, - // encoded_signature: &EncodedSignature, - // data: &VoteData>>, - // vote_token: &Checked, - // ) -> bool { - // false - // } } /// Standard implementation of [`ViewSyncExchangeType`] based on Hot Stuff consensus. From 0174e8e19eb9143fb74e83778fb71b6503c61a2a Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 14 Nov 2023 14:54:09 -0500 Subject: [PATCH 0381/1393] Fix lints --- testing/tests/view_sync_task.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index ee6e45d528..8940abda26 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,11 +1,10 @@ -use hotshot::{types::SignatureKey, HotShotConsensusApi}; +use hotshot::HotShotConsensusApi; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; use hotshot_types::{ data::ViewNumber, traits::{ - consensus_api::ConsensusSharedApi, election::ConsensusExchange, - node_implementation::ExchangesType, state::ConsensusTime, + election::ConsensusExchange, node_implementation::ExchangesType, state::ConsensusTime, }, }; use std::collections::HashMap; From b4e1385165615a7f8a2187415540ed1cc15ca5df Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 14 Nov 2023 12:29:00 -0800 Subject: [PATCH 0382/1393] poll accidentally stashed commit: fd9e3b1, 0e83a2c, eb4e31b and fix lint --- hotshot/src/traits/networking/libp2p_network.rs | 10 ++++++++-- libp2p-networking/tests/counter.rs | 10 ++++++++-- testing/src/overall_safety_task.rs | 2 +- testing/src/test_builder.rs | 15 +++++++++------ testing/tests/basic.rs | 16 ++++++++++------ 5 files changed, 36 insertions(+), 17 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 0c6a4ba446..b484fa114d 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -140,6 +140,7 @@ where /// - An invalid configuration /// (probably an issue with the defaults of this function) /// - An inability to spin up the replica's network + #[allow(clippy::panic)] fn generator( expected_node_count: usize, num_bootstrap: usize, @@ -228,7 +229,7 @@ where let keys = all_keys.clone(); let da = da_keys.clone(); async_block_on(async move { - Libp2pNetwork::new( + match Libp2pNetwork::new( NetworkingMetricsValue::new(), config, pubkey.clone(), @@ -240,7 +241,12 @@ where da.contains(&pubkey), ) .await - .unwrap() + { + Ok(network) => network, + Err(err) => { + panic!("Failed to create network: {err}"); + } + } }) } }) diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index eefbdcf37b..d0cbd88569 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -142,12 +142,18 @@ async fn run_request_response_increment<'a>( let requestee_pid = requestee_handle.peer_id(); - stream.next().await.unwrap().unwrap(); + match stream.next().await.unwrap() { + Ok(()) => {} + Err(e) => panic!("timeout : {e:?} waiting handle {requestee_pid:?} to update state"), + } requester_handle .direct_request(requestee_pid, &CounterMessage::AskForCounter) .await .context(HandleSnafu)?; - stream.next().await.unwrap().unwrap(); + match stream.next().await.unwrap() { + Ok(()) => {} + Err(e) => panic!("timeout : {e:?} waiting handle {requestee_pid:?} to update state"), + } let s1 = requester_handle.state().await; diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 058d4768c1..1a41b798f8 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -371,7 +371,7 @@ impl Default for OverallSafetyPropertiesDescription { check_leaf: false, check_state: true, check_block: true, - num_failed_views: 10, + num_failed_views: 0, transaction_threshold: 0, // very strict threshold_calculator: Arc::new(|_num_live, num_total| 2 * num_total / 3 + 1), diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 14eab3a556..475514c7fd 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -66,12 +66,12 @@ pub struct TestMetadata { impl Default for TimingData { fn default() -> Self { Self { - next_view_timeout: 10000, + next_view_timeout: 1000, timeout_ratio: (11, 10), - round_start_delay: 1, - start_delay: 1, + round_start_delay: 100, + start_delay: 100, propose_min_round_time: Duration::new(0, 0), - propose_max_round_time: Duration::new(5, 0), + propose_max_round_time: Duration::from_millis(100), } } } @@ -125,7 +125,7 @@ impl TestMetadata { } /// Default setting with 20 nodes and 8 views of successful views. - pub fn default_more_nodes_less_success() -> TestMetadata { + pub fn default_more_nodes() -> TestMetadata { TestMetadata { total_nodes: 20, start_nodes: 20, @@ -143,9 +143,12 @@ impl TestMetadata { }, ), overall_safety_properties: OverallSafetyPropertiesDescription { - num_successful_views: 8, ..Default::default() }, + timing_data: TimingData { + next_view_timeout: 1000, + ..TimingData::default() + }, ..TestMetadata::default() } } diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index ca22e681ea..a1acf9fa04 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -48,7 +48,7 @@ async fn test_with_failures_one() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes_less_success(); + let mut metadata = TestMetadata::default_more_nodes(); // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -60,7 +60,7 @@ async fn test_with_failures_one() { }]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(4, 0), dead_nodes)], + node_changes: vec![(Duration::new(1, 0), dead_nodes)], }; metadata .gen_launcher::(0) @@ -87,7 +87,7 @@ async fn test_with_failures_half_f() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes_less_success(); + let mut metadata = TestMetadata::default_more_nodes(); // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -109,8 +109,9 @@ async fn test_with_failures_half_f() { ]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(4, 0), dead_nodes)], + node_changes: vec![(Duration::new(1, 0), dead_nodes)], }; + metadata.overall_safety_properties.num_failed_views = 6; metadata .gen_launcher::(0) .launch() @@ -136,7 +137,10 @@ async fn test_with_failures_f() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes_less_success(); + let mut metadata = TestMetadata::default_more_nodes(); + metadata.overall_safety_properties.num_failed_views = 6; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 22; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -170,7 +174,7 @@ async fn test_with_failures_f() { ]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(4, 0), dead_nodes)], + node_changes: vec![(Duration::new(1, 0), dead_nodes)], }; metadata .gen_launcher::(0) From 38f42b81cf8319ee5c7aa9b56480b6edbd8ee572 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 14 Nov 2023 15:56:16 -0500 Subject: [PATCH 0383/1393] address comments --- hotshot/examples/libp2p/multi-validator.rs | 6 +-- hotshot/examples/libp2p/orchestrator.rs | 4 +- hotshot/examples/libp2p/types.rs | 26 +++++----- hotshot/examples/libp2p/validator.rs | 6 +-- .../examples/web-server-da/multi-validator.rs | 6 +-- .../examples/web-server-da/orchestrator.rs | 6 +-- hotshot/examples/web-server-da/types.rs | 26 +++++----- hotshot/examples/web-server-da/validator.rs | 6 +-- hotshot/src/demo.rs | 7 ++- hotshot/src/types/handle.rs | 48 ------------------- 10 files changed, 45 insertions(+), 96 deletions(-) diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index f6e955795d..bb6937e8ad 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -3,13 +3,13 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot::demo::{DemoMembership, DemoTypes}; use hotshot_orchestrator::client::ValidatorArgs; use std::net::IpAddr; use tracing::instrument; use types::VIDNetwork; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; pub mod types; @@ -49,7 +49,7 @@ async fn main() { let node = async_spawn(async move { infra::main_entry_point::< DemoTypes, - ThisMembership, + DemoMembership, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs index 98a958887c..01c729b6b9 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -2,9 +2,9 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; +use hotshot::demo::DemoMembership; use hotshot::demo::DemoTypes; use tracing::instrument; -use types::ThisMembership; use crate::infra::run_orchestrator; use crate::infra::OrchestratorArgs; @@ -26,7 +26,7 @@ async fn main() { run_orchestrator::< DemoTypes, - ThisMembership, + DemoMembership, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 550249db47..37094fca32 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -1,10 +1,7 @@ use crate::infra::Libp2pDARun; use hotshot::{ - demo::DemoTypes, - traits::{ - election::static_committee::GeneralStaticCommittee, - implementations::{Libp2pCommChannel, MemoryStorage}, - }, + demo::{DemoMembership, DemoTypes}, + traits::implementations::{Libp2pCommChannel, MemoryStorage}, }; use hotshot_types::{ message::{Message, SequencingMessage}, @@ -20,11 +17,10 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; -pub type DANetwork = Libp2pCommChannel; -pub type VIDNetwork = Libp2pCommChannel; -pub type QuorumNetwork = Libp2pCommChannel; -pub type ViewSyncNetwork = Libp2pCommChannel; +pub type DANetwork = Libp2pCommChannel; +pub type VIDNetwork = Libp2pCommChannel; +pub type QuorumNetwork = Libp2pCommChannel; +pub type ViewSyncNetwork = Libp2pCommChannel; pub type ThisViewSyncVote = ViewSyncVote; @@ -33,10 +29,10 @@ impl NodeImplementation for NodeImpl { type Exchanges = Exchanges< DemoTypes, Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >; type ConsensusMessage = SequencingMessage; @@ -49,4 +45,4 @@ impl NodeImplementation for NodeImpl { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = Libp2pDARun; +pub type ThisRun = Libp2pDARun; diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index 3165e8f902..176f5e805f 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -1,10 +1,10 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot::demo::{DemoMembership, DemoTypes}; use tracing::{info, instrument}; use types::VIDNetwork; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; use hotshot_orchestrator::client::ValidatorArgs; @@ -29,7 +29,7 @@ async fn main() { ); infra::main_entry_point::< DemoTypes, - ThisMembership, + DemoMembership, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/web-server-da/multi-validator.rs b/hotshot/examples/web-server-da/multi-validator.rs index f6e955795d..bb6937e8ad 100644 --- a/hotshot/examples/web-server-da/multi-validator.rs +++ b/hotshot/examples/web-server-da/multi-validator.rs @@ -3,13 +3,13 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot::demo::{DemoMembership, DemoTypes}; use hotshot_orchestrator::client::ValidatorArgs; use std::net::IpAddr; use tracing::instrument; use types::VIDNetwork; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; pub mod types; @@ -49,7 +49,7 @@ async fn main() { let node = async_spawn(async move { infra::main_entry_point::< DemoTypes, - ThisMembership, + DemoMembership, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/web-server-da/orchestrator.rs b/hotshot/examples/web-server-da/orchestrator.rs index 84cd6d325b..7185c42e53 100644 --- a/hotshot/examples/web-server-da/orchestrator.rs +++ b/hotshot/examples/web-server-da/orchestrator.rs @@ -2,9 +2,9 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot::demo::{DemoMembership, DemoTypes}; use tracing::instrument; -use types::{ThisMembership, VIDNetwork}; +use types::VIDNetwork; use crate::infra::run_orchestrator; use crate::infra::OrchestratorArgs; @@ -26,7 +26,7 @@ async fn main() { run_orchestrator::< DemoTypes, - ThisMembership, + DemoMembership, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index ecfb2c405c..c7f097c7ce 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -1,10 +1,7 @@ use crate::infra::WebServerDARun; use hotshot::{ - demo::DemoTypes, - traits::{ - election::static_committee::GeneralStaticCommittee, - implementations::{MemoryStorage, WebCommChannel}, - }, + demo::{DemoMembership, DemoTypes}, + traits::implementations::{MemoryStorage, WebCommChannel}, }; use hotshot_types::{ message::{Message, SequencingMessage}, @@ -20,11 +17,10 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; -pub type DANetwork = WebCommChannel; -pub type VIDNetwork = WebCommChannel; -pub type QuorumNetwork = WebCommChannel; -pub type ViewSyncNetwork = WebCommChannel; +pub type DANetwork = WebCommChannel; +pub type VIDNetwork = WebCommChannel; +pub type QuorumNetwork = WebCommChannel; +pub type ViewSyncNetwork = WebCommChannel; pub type ThisViewSyncVote = ViewSyncVote; @@ -33,10 +29,10 @@ impl NodeImplementation for NodeImpl { type Exchanges = Exchanges< DemoTypes, Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >; type ConsensusMessage = SequencingMessage; @@ -49,4 +45,4 @@ impl NodeImplementation for NodeImpl { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = WebServerDARun; +pub type ThisRun = WebServerDARun; diff --git a/hotshot/examples/web-server-da/validator.rs b/hotshot/examples/web-server-da/validator.rs index 3165e8f902..176f5e805f 100644 --- a/hotshot/examples/web-server-da/validator.rs +++ b/hotshot/examples/web-server-da/validator.rs @@ -1,10 +1,10 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot::demo::{DemoMembership, DemoTypes}; use tracing::{info, instrument}; use types::VIDNetwork; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisMembership, ThisRun, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; use hotshot_orchestrator::client::ValidatorArgs; @@ -29,7 +29,7 @@ async fn main() { ); infra::main_entry_point::< DemoTypes, - ThisMembership, + DemoMembership, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index a558922ec5..9f10c04648 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -5,7 +5,9 @@ //! //! These implementations are useful in examples and integration testing, but are not suitable for //! production use. -use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; +use crate::traits::election::static_committee::{ + GeneralStaticCommittee, StaticElectionConfig, StaticVoteToken, +}; use commit::{Commitment, Committable}; use derivative::Derivative; @@ -139,6 +141,9 @@ impl NodeType for DemoTypes { type StateType = DemoState; } +/// Alias for the static committee used in the Demo apps +pub type DemoMembership = GeneralStaticCommittee::SignatureKey>; + /// The node implementation for the sequencing demo #[derive(Derivative)] #[derivative(Clone(bound = ""))] diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 3d890200ee..4d43e6c27e 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -74,54 +74,6 @@ impl + 'static> Clone } impl + 'static> SystemContextHandle { - // /// Will return the next event in the queue - // /// - // /// # Errors - // /// - // /// Will return [`HotShotError::NetworkFault`] if the underlying [`SystemContext`] has been closed. - // pub async fn next_event(&mut self) -> Result, HotShotError> { - // let result = self.stream_output.recv_async().await; - // match result { - // Ok(result) => Ok(result), - // Err(_) => Err(NetworkFault { source: ShutDown }), - // } - // } - // /// Will attempt to immediately pull an event out of the queue - // /// - // /// # Errors - // /// - // /// Will return [`HotShotError::NetworkFault`] if the underlying [`HotShot`] instance has shut down - // pub fn try_next_event(&mut self) -> Result>, HotShotError> { - // self.stream.await - // // let result = self.stream_output.try_recv(); - // // Ok(result) - // } - - /// Will pull all the currently available events out of the event queue. - /// - /// # Errors - /// - /// Will return [`HotShotError::NetworkFault`] if the underlying [`HotShot`] instance has been shut - /// down. - // pub async fn available_events(&mut self) -> Result>, HotShotError> { - // let mut stream = self.output_stream; - // let _ = > as StreamExt/* :: */>::next(&mut *stream); - // let mut output = vec![]; - // Loop to pull out all the outputs - // loop { - // let _ = > as StreamExt/* :: */>::next(stream); - // let _ = FutureExt::::next(*self.output_stream).await; - // match FutureExt output.push(x), - // Ok(None) => break, - // // try_next event can only return HotShotError { source: NetworkError::ShutDown } - // Err(x) => return Err(x), - // } - // } - // Ok(output) - // nll_todo() - // } - /// obtains a stream to expose to the user pub async fn get_event_stream( &mut self, From d1a56818150d468ab755dd2e3d063357d871693f Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 14 Nov 2023 15:15:38 -0800 Subject: [PATCH 0384/1393] Fix size conversion, remove AsRef, update from_bytes, add issue link, remove comment --- task-impls/src/da.rs | 2 ++ task-impls/src/transactions.rs | 11 +++-------- types/src/block_impl.rs | 20 ++++++++++++++------ types/src/traits/block_contents.rs | 10 ++++++++-- 4 files changed, 27 insertions(+), 16 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 304aa0fc18..561f440be6 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -404,6 +404,8 @@ where let signature = self .committee_exchange .sign_da_proposal(&payload_commitment); + // TODO (Keyao) Fix the payload sending and receiving for the DA proposal. + // let data: DAProposal = DAProposal { block_payload: payload.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 572f2d02a1..0d762a1c5b 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -216,13 +216,6 @@ where // TODO (Keyao) Determine whether to allow empty blocks. // let txns = self.wait_for_transactions(parent_leaf).await?; - // TODO (Keyao) We encode the transactions and compute the VID disperse data twice, - // through `from_transactions` once, then `encode` and `disperse` again. This is - // because we need `disperse` for the `VidDisperseSend` event, which can't be - // retrieved by `from_transactions` directly. - // This duplication will be fixed when updating the data sent to the DA task, i.e., - // in the `BlockReady` event. - // Relevant issue: https://github.com/EspressoSystems/HotShot/issues/2026. let (payload, metadata) = ::from_transactions(txns); let encoded_txns = ::encode(&payload); @@ -232,7 +225,9 @@ where // changes. // TODO let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - let vid_disperse = vid.disperse(encoded_txns).unwrap(); + let vid_disperse = vid + .disperse(encoded_txns.into_iter().collect::>()) + .unwrap(); // TODO never clone a block // https://github.com/EspressoSystems/HotShot/issues/1858 diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 07b26c465e..a5825b313e 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -43,8 +43,11 @@ impl VIDTransaction { let mut encoded = Vec::new(); for txn in transactions { - // Encode the length of the inner transaction and the transaction bytes. - let txn_size = txn.0.len().to_le_bytes(); + // The transaction length is converted from `usize` to `u64` to ensure consistent + // number of bytes on different platforms. + let txn_size = (txn.0.len() as u64).to_le_bytes(); + + // Concatenate the bytes of the transaction size and the transaction itself. encoded.extend(txn_size); encoded.extend(txn.0); } @@ -155,16 +158,21 @@ impl BlockPayload for VIDBlockPayload { ) } - fn from_bytes(encoded_transactions: Self::Encode<'_>, _metadata: Self::Metadata) -> Self { + fn from_bytes(encoded_transactions: E, _metadata: Self::Metadata) -> Self + where + E: Iterator, + { let encoded_vec: Vec = encoded_transactions.collect(); let mut transactions = Vec::new(); let mut current_index = 0; while current_index < encoded_vec.len() { - // Decode the length of the transaction and the transaction bytes. - let txn_start_index = current_index + size_of::(); - let mut txn_len_bytes = [0; size_of::()]; + // Decode the transaction length. + let txn_start_index = current_index + size_of::(); + let mut txn_len_bytes = [0; size_of::()]; txn_len_bytes.copy_from_slice(&encoded_vec[current_index..txn_start_index]); let txn_len: usize = usize::from_le_bytes(txn_len_bytes); + + // Get the transaction. let next_index = txn_start_index + txn_len; transactions.push(VIDTransaction( encoded_vec[txn_start_index..next_index].to_vec(), diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 015c1a96fa..20218b9fee 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -49,7 +49,9 @@ pub trait BlockPayload: type Metadata: Clone + Debug + Eq + Hash + Send + Sync; /// Encoded payload. - type Encode<'a>: 'a + Iterator + AsRef<[u8]> + Send; + type Encode<'a>: 'a + Iterator + Send + where + Self: 'a; /// Build a payload and associated metadata with the transactions. fn from_transactions( @@ -57,7 +59,11 @@ pub trait BlockPayload: ) -> (Self, Self::Metadata); /// Build a payload with the encoded transaction bytes and metadata. - fn from_bytes(encoded_transactions: Self::Encode<'_>, metadata: Self::Metadata) -> Self; + /// + /// `I` may be, but not necessarily is, the `Encode` type directly from `fn encode`. + fn from_bytes(encoded_transactions: I, metadata: Self::Metadata) -> Self + where + I: Iterator; /// Build the genesis payload and metadata. fn genesis() -> (Self, Self::Metadata); From 3a73957dfe31e674f644e36c698ef06a37fa97c2 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 14 Nov 2023 15:46:44 -0800 Subject: [PATCH 0385/1393] add more time for web_server_network test --- testing/tests/web_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index c815800919..037cddcb3b 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -33,7 +33,7 @@ async fn web_server_network() { }, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(20), + duration: Duration::from_secs(60), }, ), ..TestMetadata::default() From 81eccd86ab929a62e4aa55600db04c829801cea9 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 14 Nov 2023 16:09:49 -0800 Subject: [PATCH 0386/1393] Update encode conversion and tests --- testing/tests/da_task.rs | 2 +- testing/tests/network_task.rs | 2 +- testing/tests/vid_task.rs | 2 +- types/src/block_impl.rs | 19 +++++++++++++------ 4 files changed, 16 insertions(+), 9 deletions(-) diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index e9ca5c8888..cd6ebec74e 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -37,7 +37,7 @@ async fn test_da_task() { let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); let transactions = vec![VIDTransaction(vec![0])]; - let encoded_txns = vec![1, 0]; + let encoded_txns = VIDTransaction::encode(transactions.clone()); let payload_commitment = VIDBlockPayload::vid_commitment(&encoded_txns); let block = VIDBlockPayload { transactions, diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index bbb6eb7164..7e87f55d9c 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -43,7 +43,7 @@ async fn test_network_task() { let priv_key = api.private_key(); let vid = vid_init(); let transactions = vec![VIDTransaction(vec![0])]; - let encoded_txns = vec![1, 0]; + let encoded_txns = VIDTransaction::encode(transactions.clone()); let vid_disperse = vid.disperse(&encoded_txns).unwrap(); let payload_commitment = vid_disperse.commit; let block = VIDBlockPayload { diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index e342facfa5..bbf64034e2 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -39,7 +39,7 @@ async fn test_vid_task() { let vid = vid_init(); let transactions = vec![VIDTransaction(vec![0])]; - let encoded_txns = vec![1, 0]; + let encoded_txns = VIDTransaction::encode(transactions.clone()); let vid_disperse = vid.disperse(&encoded_txns).unwrap(); let payload_commitment = vid_disperse.commit; let block = VIDBlockPayload { diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index a5825b313e..d3aa41daee 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -1,4 +1,6 @@ -//! This module provides an implementation of the `HotShot` suite of traits. +//! This module provides implementations of block traits for examples and tests only. +//! TODO (Keyao) Organize non-production code. +//! use std::{ fmt::{Debug, Display}, mem::size_of, @@ -39,13 +41,18 @@ pub struct VIDTransaction(pub Vec); impl VIDTransaction { #[must_use] /// Encode a list of transactions into bytes. + /// + /// # Panics + /// If the conversion from the transaction length to `u32` fails. pub fn encode(transactions: Vec) -> Vec { let mut encoded = Vec::new(); for txn in transactions { - // The transaction length is converted from `usize` to `u64` to ensure consistent + // The transaction length is converted from `usize` to `u32` to ensure consistent // number of bytes on different platforms. - let txn_size = (txn.0.len() as u64).to_le_bytes(); + let txn_size = u32::try_from(txn.0.len()) + .expect("Conversion fails") + .to_le_bytes(); // Concatenate the bytes of the transaction size and the transaction itself. encoded.extend(txn_size); @@ -167,10 +174,10 @@ impl BlockPayload for VIDBlockPayload { let mut current_index = 0; while current_index < encoded_vec.len() { // Decode the transaction length. - let txn_start_index = current_index + size_of::(); - let mut txn_len_bytes = [0; size_of::()]; + let txn_start_index = current_index + size_of::(); + let mut txn_len_bytes = [0; size_of::()]; txn_len_bytes.copy_from_slice(&encoded_vec[current_index..txn_start_index]); - let txn_len: usize = usize::from_le_bytes(txn_len_bytes); + let txn_len: usize = u32::from_le_bytes(txn_len_bytes) as usize; // Get the transaction. let next_index = txn_start_index + txn_len; From 0e92e019803dbce1df4be8c7922328cf4422021c Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 15 Nov 2023 10:13:13 -0800 Subject: [PATCH 0387/1393] Make encode and from_txns return Result --- hotshot/src/demo.rs | 3 +- task-impls/src/transactions.rs | 98 +++++++++++++++++------------- testing/tests/da_task.rs | 2 +- testing/tests/network_task.rs | 2 +- testing/tests/vid_task.rs | 2 +- types/src/block_impl.rs | 43 ++++++------- types/src/traits/block_contents.rs | 20 +++++- 7 files changed, 97 insertions(+), 73 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index d682857474..4302b431a7 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -11,9 +11,10 @@ use derivative::Derivative; use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ - block_impl::{BlockError, VIDBlockHeader, VIDBlockPayload, VIDTransaction}, + block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, data::{fake_commitment, ViewNumber}, traits::{ + block_contents::BlockError, election::Membership, node_implementation::NodeType, state::{ConsensusTime, TestableState}, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 0d762a1c5b..b1acded348 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -216,49 +216,61 @@ where // TODO (Keyao) Determine whether to allow empty blocks. // let txns = self.wait_for_transactions(parent_leaf).await?; - let (payload, metadata) = - ::from_transactions(txns); - let encoded_txns = ::encode(&payload); - // TODO - let srs = test_srs(NUM_STORAGE_NODES); - // TODO We are using constant numbers for now, but they will change as the quorum size - // changes. - // TODO - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - let vid_disperse = vid - .disperse(encoded_txns.into_iter().collect::>()) - .unwrap(); - - // TODO never clone a block - // https://github.com/EspressoSystems/HotShot/issues/1858 - self.event_stream - .publish(HotShotEvent::BlockReady( - payload.clone(), - metadata, - view + 1, - )) - .await; - - // TODO (Keyao) Determine and update where to publish VidDisperseSend. - // - debug!("publishing VID disperse for view {}", *view + 1); - self.event_stream - .publish(HotShotEvent::VidDisperseSend( - Proposal { - data: VidDisperse { - view_number: view + 1, - payload_commitment: payload.commit(), - shares: vid_disperse.shares, - common: vid_disperse.common, - }, - // TODO (Keyao) This is also signed in DA task. - signature: self - .quorum_exchange - .sign_payload_commitment(payload.commit()), - }, - self.quorum_exchange.public_key().clone(), - )) - .await; + match ::from_transactions(txns) { + Ok((payload, metadata)) => { + match ::encode(&payload) { + Ok(encoded_txns) => { + // TODO + let srs = test_srs(NUM_STORAGE_NODES); + // TODO We are using constant numbers for now, but they will change as the quorum size + // changes. + // TODO + let vid = + VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + let vid_disperse = vid + .disperse(encoded_txns.into_iter().collect::>()) + .unwrap(); + + // TODO never clone a block + // https://github.com/EspressoSystems/HotShot/issues/1858 + self.event_stream + .publish(HotShotEvent::BlockReady( + payload.clone(), + metadata, + view + 1, + )) + .await; + + // TODO (Keyao) Determine and update where to publish VidDisperseSend. + // + debug!("publishing VID disperse for view {}", *view + 1); + self.event_stream + .publish(HotShotEvent::VidDisperseSend( + Proposal { + data: VidDisperse { + view_number: view + 1, + payload_commitment: payload.commit(), + shares: vid_disperse.shares, + common: vid_disperse.common, + }, + // TODO (Keyao) This is also signed in DA task. + signature: self + .quorum_exchange + .sign_payload_commitment(payload.commit()), + }, + self.quorum_exchange.public_key().clone(), + )) + .await; + } + Err(e) => { + error!("Failed to encode the block payload: {:?}.", e); + } + } + } + Err(e) => { + error!("Failed to build the block payload: {:?}.", e); + } + } return None; } HotShotEvent::Shutdown => { diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index cd6ebec74e..3dd70a47b1 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -37,7 +37,7 @@ async fn test_da_task() { let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); let transactions = vec![VIDTransaction(vec![0])]; - let encoded_txns = VIDTransaction::encode(transactions.clone()); + let encoded_txns = VIDTransaction::encode(transactions.clone()).unwrap(); let payload_commitment = VIDBlockPayload::vid_commitment(&encoded_txns); let block = VIDBlockPayload { transactions, diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 7e87f55d9c..2ce882c058 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -43,7 +43,7 @@ async fn test_network_task() { let priv_key = api.private_key(); let vid = vid_init(); let transactions = vec![VIDTransaction(vec![0])]; - let encoded_txns = VIDTransaction::encode(transactions.clone()); + let encoded_txns = VIDTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_txns).unwrap(); let payload_commitment = vid_disperse.commit; let block = VIDBlockPayload { diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index bbf64034e2..31284975b7 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -39,7 +39,7 @@ async fn test_vid_task() { let vid = vid_init(); let transactions = vec![VIDTransaction(vec![0])]; - let encoded_txns = VIDTransaction::encode(transactions.clone()); + let encoded_txns = VIDTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_txns).unwrap(); let payload_commitment = vid_disperse.commit; let block = VIDBlockPayload { diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index d3aa41daee..0b65e3fddc 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -9,7 +9,7 @@ use std::{ use crate::{ data::{test_srs, VidScheme, VidSchemeTrait}, traits::{ - block_contents::{BlockHeader, Transaction}, + block_contents::{BlockError, BlockHeader, Transaction}, state::TestableBlock, BlockPayload, }, @@ -18,7 +18,6 @@ use ark_serialize::CanonicalDeserialize; use commit::{Commitment, Committable}; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; -use snafu::Snafu; // TODO /// Number of storage nodes for VID initiation. @@ -27,39 +26,34 @@ pub const NUM_STORAGE_NODES: usize = 8; /// Number of chunks for VID initiation. pub const NUM_CHUNKS: usize = 8; -/// The error type for block and its transactions. -#[derive(Snafu, Debug)] -pub enum BlockError { - /// Invalid block header. - InvalidBlockHeader, -} - /// The transaction in a [`VIDBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct VIDTransaction(pub Vec); impl VIDTransaction { - #[must_use] /// Encode a list of transactions into bytes. /// - /// # Panics - /// If the conversion from the transaction length to `u32` fails. - pub fn encode(transactions: Vec) -> Vec { + /// # Errors + /// If the transaction length conversion fails. + pub fn encode(transactions: Vec) -> Result, BlockError> { let mut encoded = Vec::new(); for txn in transactions { // The transaction length is converted from `usize` to `u32` to ensure consistent // number of bytes on different platforms. - let txn_size = u32::try_from(txn.0.len()) - .expect("Conversion fails") - .to_le_bytes(); + let txn_size = match u32::try_from(txn.0.len()) { + Ok(len) => len.to_le_bytes(), + Err(_) => { + return Err(BlockError::InvalidTransactionLength); + } + }; // Concatenate the bytes of the transaction size and the transaction itself. encoded.extend(txn_size); encoded.extend(txn.0); } - encoded + Ok(encoded) } } @@ -110,7 +104,8 @@ impl VIDBlockPayload { #[must_use] pub fn genesis() -> Self { let txns: Vec = vec![0]; - let encoded = VIDTransaction::encode(vec![VIDTransaction(txns.clone())]); + // It's impossible for `encode` to fail because the transaciton length is very small. + let encoded = VIDTransaction::encode(vec![VIDTransaction(txns.clone())]).unwrap(); VIDBlockPayload { transactions: vec![VIDTransaction(txns)], payload_commitment: Self::vid_commitment(&encoded), @@ -153,16 +148,16 @@ impl BlockPayload for VIDBlockPayload { fn from_transactions( transactions: impl IntoIterator, - ) -> (Self, Self::Metadata) { + ) -> Result<(Self, Self::Metadata), BlockError> { let txns_vec: Vec = transactions.into_iter().collect(); - let encoded = VIDTransaction::encode(txns_vec.clone()); - ( + let encoded = VIDTransaction::encode(txns_vec.clone())?; + Ok(( Self { transactions: txns_vec, payload_commitment: Self::vid_commitment(&encoded), }, (), - ) + )) } fn from_bytes(encoded_transactions: E, _metadata: Self::Metadata) -> Self @@ -197,8 +192,8 @@ impl BlockPayload for VIDBlockPayload { (Self::genesis(), ()) } - fn encode(&self) -> Self::Encode<'_> { - VIDTransaction::encode(self.transactions.clone()).into_iter() + fn encode(&self) -> Result, BlockError> { + Ok(VIDTransaction::encode(self.transactions.clone())?.into_iter()) } fn transaction_commitments(&self) -> Vec> { diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 20218b9fee..6a3ef30f49 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -5,6 +5,7 @@ use commit::{Commitment, Committable}; use serde::{de::DeserializeOwned, Serialize}; +use snafu::Snafu; use std::{ error::Error, @@ -12,6 +13,15 @@ use std::{ hash::Hash, }; +/// The error type for block and its transactions. +#[derive(Snafu, Debug)] +pub enum BlockError { + /// Invalid block header. + InvalidBlockHeader, + /// Invalid transaction length. + InvalidTransactionLength, +} + /// Abstraction over any type of transaction. Used by [`BlockPayload`]. pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash @@ -54,9 +64,12 @@ pub trait BlockPayload: Self: 'a; /// Build a payload and associated metadata with the transactions. + /// + /// # Errors + /// If the transaction length conversion fails. fn from_transactions( transactions: impl IntoIterator, - ) -> (Self, Self::Metadata); + ) -> Result<(Self, Self::Metadata), BlockError>; /// Build a payload with the encoded transaction bytes and metadata. /// @@ -69,7 +82,10 @@ pub trait BlockPayload: fn genesis() -> (Self, Self::Metadata); /// Encode the payload - fn encode(&self) -> Self::Encode<'_>; + /// + /// # Errors + /// If the transaction length conversion fails. + fn encode(&self) -> Result, BlockError>; /// List of transaction commitments. fn transaction_commitments(&self) -> Vec>; From 42319094a5d4cf264bf606b0ebca6e06791ea770 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 15 Nov 2023 14:22:01 -0500 Subject: [PATCH 0388/1393] first pass remove message traits --- hotshot/examples/infra/mod.rs | 7 +-- hotshot/examples/libp2p/types.rs | 3 +- hotshot/examples/web-server-da/types.rs | 3 +- hotshot/src/lib.rs | 35 ++++-------- hotshot/src/tasks/mod.rs | 25 +++----- hotshot/src/types/handle.rs | 6 +- task-impls/src/consensus.rs | 18 ++---- task-impls/src/da.rs | 16 ++---- task-impls/src/network.rs | 36 +++--------- task-impls/src/transactions.rs | 22 ++----- task-impls/src/vid.rs | 16 ++---- task-impls/src/view_sync.rs | 56 +++++------------- testing/src/node_types.rs | 6 +- testing/src/test_builder.rs | 4 +- testing/src/txn_task.rs | 7 +-- testing/tests/memory_network.rs | 3 +- types/src/message.rs | 76 ++++++++++--------------- types/src/traits/consensus_api.rs | 2 +- types/src/traits/node_implementation.rs | 34 +++++------ 19 files changed, 114 insertions(+), 261 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 321684fa22..cf9826257f 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -27,7 +27,7 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, QuorumProposal, TestableLeaf}, event::{Event, EventType}, - message::{Message, SequencingMessage}, + message::Message, traits::{ election::{ CommitteeExchange, ConsensusExchange, Membership, QuorumExchange, ViewSyncExchange, @@ -138,7 +138,6 @@ pub async fn run_orchestrator< VIDExchange>, >, Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, >, >( OrchestratorArgs { @@ -203,7 +202,6 @@ pub trait RunDA< VIDExchange>, >, Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, >, > where ::StateType: TestableState, @@ -459,7 +457,6 @@ impl< >, >, Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, >, > RunDA< @@ -613,7 +610,6 @@ impl< >, >, Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, >, > RunDA< @@ -831,7 +827,6 @@ pub async fn main_entry_point< VIDExchange>, >, Storage = MemoryStorage>, - ConsensusMessage = SequencingMessage, >, RUNDA: RunDA, >( diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 542e406f0e..28306610f0 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -9,7 +9,7 @@ use hotshot::{ use hotshot_types::{ certificate::ViewSyncCertificate, data::{DAProposal, Leaf, QuorumProposal}, - message::{Message, SequencingMessage}, + message::Message, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, @@ -61,7 +61,6 @@ impl NodeImplementation for NodeImpl { >, VIDExchange>, >; - type ConsensusMessage = SequencingMessage; fn new_channel_maps( start_view: ::Time, diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index d5de57386e..9b5087b31e 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -9,7 +9,7 @@ use hotshot::{ use hotshot_types::{ certificate::ViewSyncCertificate, data::{DAProposal, Leaf, QuorumProposal}, - message::{Message, SequencingMessage}, + message::Message, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, @@ -61,7 +61,6 @@ impl NodeImplementation for NodeImpl { >, VIDExchange>, >; - type ConsensusMessage = SequencingMessage; fn new_channel_maps( start_view: ::Time, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 89203d9074..55ae36deef 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -63,8 +63,8 @@ use hotshot_types::{ data::{DAProposal, Leaf, LeafType, QuorumProposal}, error::StorageSnafu, message::{ - ConsensusMessageType, DataMessage, InternalTrigger, Message, MessageKind, - ProcessedGeneralConsensusMessage, SequencingMessage, + DataMessage, InternalTrigger, Message, MessageKind, ProcessedGeneralConsensusMessage, + SequencingMessage, }, traits::{ consensus_api::{ConsensusApi, ConsensusSharedApi}, @@ -259,28 +259,19 @@ impl> SystemContext { pub async fn timeout_view( &self, current_view: TYPES::Time, - send_replica: UnboundedSender< - >::ProcessedConsensusMessage, - >, - send_next_leader: Option< - UnboundedSender< - >::ProcessedConsensusMessage, - >, - >, - ) where - >::ProcessedConsensusMessage: - From>, - { + send_replica: UnboundedSender>, + send_next_leader: Option>>, + ) { let msg = ProcessedGeneralConsensusMessage::::InternalTrigger( InternalTrigger::Timeout(current_view), ); if let Some(chan) = send_next_leader { - if chan.send(msg.clone().into()).await.is_err() { + if chan.send(msg.clone()).await.is_err() { debug!("Error timing out next leader task"); } }; // NOTE this should always exist - if send_replica.send(msg.into()).await.is_err() { + if send_replica.send(msg).await.is_err() { debug!("Error timing out replica task"); }; } @@ -621,11 +612,7 @@ impl< BlockPayload = VIDBlockPayload, Transaction = VIDTransaction, >, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, MEMBERSHIP: Membership, > HotShotType for SystemContext where @@ -857,10 +844,8 @@ impl> ConsensusSharedApi>, - > ConsensusApi for HotShotConsensusApi +impl> ConsensusApi + for HotShotConsensusApi { async fn send_direct_message( &self, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b8eba956f8..2e1dbeeca7 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -29,7 +29,7 @@ use hotshot_types::{ certificate::ViewSyncCertificate, data::{Leaf, ProposalType, QuorumProposal}, event::Event, - message::{Message, Messages, SequencingMessage}, + message::{Message, Messages}, traits::{ election::{ConsensusExchange, Membership, ViewSyncExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, @@ -62,7 +62,7 @@ pub enum GlobalEvent { /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_message_task< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, PROPOSAL: ProposalType, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange, Proposal = PROPOSAL, Membership = MEMBERSHIP> @@ -161,7 +161,7 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_event_task< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, PROPOSAL: ProposalType, MEMBERSHIP: Membership, EXCHANGE: ConsensusExchange, Proposal = PROPOSAL, Membership = MEMBERSHIP> @@ -228,7 +228,7 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, >( task_runner: TaskRunner, event_stream: ChannelStream>, @@ -326,10 +326,7 @@ where /// add the VID task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy -pub async fn add_vid_task< - TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, ->( +pub async fn add_vid_task>>( task_runner: TaskRunner, event_stream: ChannelStream>, vid_exchange: VIDEx, @@ -388,10 +385,7 @@ where /// add the Data Availability task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy -pub async fn add_da_task< - TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, ->( +pub async fn add_da_task>>( task_runner: TaskRunner, event_stream: ChannelStream>, committee_exchange: CommitteeEx, @@ -451,7 +445,7 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_transaction_task< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, >( task_runner: TaskRunner, event_stream: ChannelStream>, @@ -509,10 +503,7 @@ where /// add the view sync task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy -pub async fn add_view_sync_task< - TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, ->( +pub async fn add_view_sync_task>>( task_runner: TaskRunner, event_stream: ChannelStream>, handle: SystemContextHandle, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 2610630393..9db0ca9ed5 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -20,7 +20,7 @@ use hotshot_types::{ data::LeafType, error::HotShotError, event::EventType, - message::MessageKind, + message::{MessageKind, SequencingMessage}, traits::{ election::{ConsensusExchange, QuorumExchangeType}, node_implementation::{ExchangesType, NodeType}, @@ -291,7 +291,7 @@ impl + 'static> SystemContextHandl /// Wrapper around `HotShotConsensusApi`'s `send_broadcast_consensus_message` function #[cfg(feature = "hotshot-testing")] - pub async fn send_broadcast_consensus_message(&self, msg: I::ConsensusMessage) { + pub async fn send_broadcast_consensus_message(&self, msg: SequencingMessage) { let _result = self .hotshot .send_broadcast_message(MessageKind::from_consensus_message(msg)) @@ -302,7 +302,7 @@ impl + 'static> SystemContextHandl #[cfg(feature = "hotshot-testing")] pub async fn send_direct_consensus_message( &self, - msg: I::ConsensusMessage, + msg: SequencingMessage, recipient: TYPES::SignatureKey, ) { let _result = self diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7c7b3da855..1f53d09c75 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -20,7 +20,7 @@ use hotshot_types::{ consensus::{Consensus, View}, data::{Leaf, LeafType, ProposalType, QuorumProposal}, event::{Event, EventType}, - message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, + message::{GeneralConsensusMessage, Message, Proposal}, simple_certificate::{ DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, }, @@ -61,7 +61,7 @@ pub struct ConsensusTaskError {} /// of consensus pub struct ConsensusTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > where QuorumEx: ConsensusExchange< @@ -332,11 +332,7 @@ where impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > ConsensusTaskState where @@ -1375,11 +1371,7 @@ where impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I>, > TS for ConsensusTaskState where @@ -1419,7 +1411,7 @@ pub type ConsensusTaskTypes = HSTWithEvent< /// Event handle for consensus pub async fn sequencing_consensus_handle< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, >( event: HotShotEvent, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 0213c108b5..bf7629bf60 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -15,7 +15,7 @@ use hotshot_task::{ use hotshot_types::{ consensus::{Consensus, View}, data::{DAProposal, Leaf, ProposalType}, - message::{Message, Proposal, SequencingMessage}, + message::{Message, Proposal}, simple_vote::{DAData, DAVote2}, traits::{ consensus_api::ConsensusApi, @@ -45,7 +45,7 @@ pub struct ConsensusTaskError {} /// Tracks state of a DA task pub struct DATaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > where CommitteeEx: @@ -175,11 +175,7 @@ where impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > DATaskState where @@ -467,11 +463,7 @@ where /// task state implementation for DA Task impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > TS for DATaskState where diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 80d1c7d341..024ed50c96 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -40,31 +40,19 @@ pub enum NetworkTaskKind { /// the network message task state pub struct NetworkMessageTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, > { /// event stream (used for publishing) pub event_stream: ChannelStream>, } -impl< - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - > TS for NetworkMessageTaskState +impl>> TS + for NetworkMessageTaskState { } -impl< - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - > NetworkMessageTaskState +impl>> + NetworkMessageTaskState { /// Handle the message. pub async fn handle_messages(&mut self, messages: Vec>) { @@ -142,7 +130,7 @@ impl< /// network event task state pub struct NetworkEventTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, MEMBERSHIP: Membership, COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, > { @@ -159,11 +147,7 @@ pub struct NetworkEventTaskState< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, MEMBERSHIP: Membership, COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, > TS for NetworkEventTaskState @@ -172,11 +156,7 @@ impl< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, MEMBERSHIP: Membership, COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, > NetworkEventTaskState diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 9aacf68090..e1d224327d 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -16,7 +16,7 @@ use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, consensus::Consensus, data::{Leaf, VidDisperse, VidScheme, VidSchemeTrait}, - message::{Message, Proposal, SequencingMessage}, + message::{Message, Proposal}, traits::{ consensus_api::ConsensusApi, election::{ConsensusExchange, QuorumExchangeType}, @@ -43,7 +43,7 @@ pub struct ConsensusTaskError {} /// Tracks state of a Transaction task pub struct TransactionTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > where QuorumEx: ConsensusExchange>, @@ -80,11 +80,7 @@ pub struct TransactionTaskState< // whereas it's just `TYPES: NodeType` in the second implementation. impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > TransactionTaskState where @@ -288,11 +284,7 @@ where // whereas here it's just `TYPES: NodeType`. impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > TransactionTaskState where @@ -381,11 +373,7 @@ where /// task state implementation for Transactions Task impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > TS for TransactionTaskState where diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index deb0d2bb3f..798c0d2229 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -16,7 +16,7 @@ use hotshot_types::traits::{network::ConsensusIntentEvent, node_implementation:: use hotshot_types::{ consensus::{Consensus, View}, data::{Leaf, ProposalType}, - message::{Message, SequencingMessage}, + message::Message, traits::{ consensus_api::ConsensusApi, election::ConsensusExchange, @@ -45,7 +45,7 @@ pub struct ConsensusTaskError {} /// Tracks state of a VID task pub struct VIDTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > where VIDEx: @@ -174,11 +174,7 @@ where impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > VIDTaskState where @@ -431,11 +427,7 @@ where /// task state implementation for VID Task impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > TS for VIDTaskState where diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 11cba581f0..c73b1eb57e 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -19,7 +19,7 @@ use hotshot_task::global_registry::GlobalRegistry; use hotshot_types::{ certificate::ViewSyncCertificate, data::Leaf, - message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, + message::{GeneralConsensusMessage, Message, Proposal}, traits::{ consensus_api::ConsensusApi, election::{ConsensusExchange, ViewSyncExchangeType}, @@ -60,7 +60,7 @@ pub struct ViewSyncTaskError {} /// Main view sync task state pub struct ViewSyncTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, A: ConsensusApi, I> + 'static + std::clone::Clone, > where ViewSyncEx: ViewSyncExchangeType< @@ -104,11 +104,7 @@ pub struct ViewSyncTaskState< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static + std::clone::Clone, > TS for ViewSyncTaskState where @@ -133,7 +129,7 @@ pub type ViewSyncTaskStateTypes = HSTWithEvent< /// State of a view sync replica task pub struct ViewSyncReplicaTaskState< TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > where ViewSyncEx: ViewSyncExchangeType< @@ -171,11 +167,7 @@ pub struct ViewSyncReplicaTaskState< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > TS for ViewSyncReplicaTaskState where @@ -198,10 +190,8 @@ pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< >; /// State of a view sync relay task -pub struct ViewSyncRelayTaskState< - TYPES: NodeType, - I: NodeImplementation, ConsensusMessage = SequencingMessage>, -> { +pub struct ViewSyncRelayTaskState>> +{ /// Event stream to publish events to pub event_stream: ChannelStream>, /// View sync exchange @@ -213,14 +203,8 @@ pub struct ViewSyncRelayTaskState< pub id: u64, } -impl< - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - > TS for ViewSyncRelayTaskState +impl>> TS + for ViewSyncRelayTaskState { } @@ -234,11 +218,7 @@ pub type ViewSyncRelayTaskStateTypes = HSTWithEvent< impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static + std::clone::Clone, > ViewSyncTaskState where @@ -607,11 +587,7 @@ where impl< TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, + I: NodeImplementation>, A: ConsensusApi, I> + 'static, > ViewSyncReplicaTaskState where @@ -938,14 +914,8 @@ where } } -impl< - TYPES: NodeType, - I: NodeImplementation< - TYPES, - Leaf = Leaf, - ConsensusMessage = SequencingMessage, - >, - > ViewSyncRelayTaskState +impl>> + ViewSyncRelayTaskState where ViewSyncEx: ViewSyncExchangeType< TYPES, diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index ca065bec14..700a68617a 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -17,7 +17,7 @@ use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, certificate::ViewSyncCertificate, data::{Leaf, QuorumProposal, ViewNumber}, - message::{Message, SequencingMessage}, + message::Message, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, network::{TestableChannelImplementation, TestableNetworkingImplementation}, @@ -128,7 +128,6 @@ impl NodeImplementation for Libp2pImpl { type Storage = MemoryStorage>; type Leaf = Leaf; type Exchanges = SequencingLibp2pExchange; - type ConsensusMessage = SequencingMessage; fn new_channel_maps( start_view: ::Time, @@ -347,7 +346,6 @@ impl NodeImplementation for MemoryImpl { type Storage = MemoryStorage>; type Leaf = Leaf; type Exchanges = SequencingMemoryExchange; - type ConsensusMessage = SequencingMessage; fn new_channel_maps( start_view: ::Time, @@ -488,7 +486,6 @@ impl NodeImplementation for WebImpl { type Storage = MemoryStorage>; type Leaf = Leaf; type Exchanges = SequencingWebExchanges; - type ConsensusMessage = SequencingMessage; fn new_channel_maps( start_view: ::Time, @@ -539,7 +536,6 @@ impl NodeImplementation for CombinedImpl { type Storage = MemoryStorage>; type Leaf = Leaf; type Exchanges = CombinedExchange; - type ConsensusMessage = SequencingMessage; fn new_channel_maps( start_view: ::Time, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 14eab3a556..d3841aabfc 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -4,7 +4,7 @@ use hotshot_types::traits::election::{ConsensusExchange, Membership}; use std::{num::NonZeroUsize, sync::Arc, time::Duration}; use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; -use hotshot_types::message::{Message, SequencingMessage}; +use hotshot_types::message::Message; use hotshot_types::{ traits::node_implementation::{NodeType, QuorumEx, TestableExchange}, @@ -183,7 +183,7 @@ impl TestMetadata { node_id: u64, ) -> TestLauncher where - I: NodeImplementation>, + I: NodeImplementation, >::Exchanges: TestableExchange>::Leaf, Message>, SystemContext: HotShotType, diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index 4d61b36a20..90e9a53f4d 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -9,10 +9,7 @@ use hotshot_task::{ task_impls::{HSTWithEventAndMessage, TaskBuilder}, GeneratedStream, }; -use hotshot_types::{ - message::SequencingMessage, - traits::node_implementation::{NodeImplementation, NodeType}, -}; +use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; use rand::thread_rng; use snafu::Snafu; use std::{sync::Arc, time::Duration}; @@ -64,7 +61,7 @@ impl TxnTaskDescription { ) -> TaskGenerator> where TYPES: NodeType, - I: NodeImplementation>, + I: NodeImplementation, { Box::new(move |state, mut registry, test_event_stream| { async move { diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 511cb4867e..d02f2db96c 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -16,7 +16,7 @@ use hotshot::types::SignatureKey; use hotshot_types::block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}; use hotshot_types::certificate::ViewSyncCertificate; use hotshot_types::data::{DAProposal, Leaf, QuorumProposal}; -use hotshot_types::message::{Message, SequencingMessage}; +use hotshot_types::message::Message; use hotshot_types::traits::election::{ CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange, }; @@ -102,7 +102,6 @@ impl NodeImplementation for TestImpl { >, VIDExchange>, >; - type ConsensusMessage = SequencingMessage; fn new_channel_maps( start_view: ::Time, diff --git a/types/src/message.rs b/types/src/message.rs index 0ef9ed8590..e1578fe798 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -90,7 +90,7 @@ pub enum MessagePurpose { #[serde(bound(deserialize = "", serialize = ""))] pub enum MessageKind> { /// Messages related to the consensus protocol - Consensus(I::ConsensusMessage), + Consensus(SequencingMessage), /// Messages relating to sharing data between nodes Data(DataMessage), /// Phantom data. @@ -101,7 +101,7 @@ impl> MessageKind { // Can't implement `From` directly due to potential conflict with // `From`. /// Construct a [`MessageKind`] from [`I::ConsensusMessage`]. - pub fn from_consensus_message(m: I::ConsensusMessage) -> Self { + pub fn from_consensus_message(m: SequencingMessage) -> Self { Self::Consensus(m) } } @@ -296,10 +296,8 @@ pub type ProcessedSequencingMessage = Either< ProcessedCommitteeConsensusMessage, >; -impl< - TYPES: NodeType, - I: NodeImplementation>, - > From> for SequencingMessage +impl> From> + for SequencingMessage { fn from(value: ProcessedSequencingMessage) -> Self { match value { @@ -309,10 +307,8 @@ impl< } } -impl< - TYPES: NodeType, - I: NodeImplementation>, - > From> for ProcessedSequencingMessage +impl> From> + for ProcessedSequencingMessage { fn from(value: ProcessedGeneralConsensusMessage) -> Self { Left(value) @@ -375,46 +371,39 @@ pub enum CommitteeConsensusMessage VidCertificate(VIDCertificate2), } -/// Messages related to the consensus protocol. -pub trait ConsensusMessageType> { - /// The type of messages for both validating and sequencing consensus. - type GeneralConsensusMessage; +// /// Messages related to the consensus protocol. +// pub trait ConsensusMessageType> { +// /// The type of messages for both validating and sequencing consensus. +// type GeneralConsensusMessage; - /// The type of processed consensus messages. - type ProcessedConsensusMessage: Send; +// /// The type of processed consensus messages. +// type ProcessedConsensusMessage: Send; - /// Get the view number when the message was sent or the view of the timeout. - fn view_number(&self) -> TYPES::Time; +// /// Get the view number when the message was sent or the view of the timeout. +// fn view_number(&self) -> TYPES::Time; - /// Get the message purpose. - fn purpose(&self) -> MessagePurpose; -} +// /// Get the message purpose. +// fn purpose(&self) -> MessagePurpose; +// } /// Messages related to the sequencing consensus protocol. -pub trait SequencingMessageType>: - ConsensusMessageType -{ - /// Messages for DA committee only. - type CommitteeConsensusMessage; -} +// pub trait SequencingMessageType>: +// ConsensusMessageType +// { +// /// Messages for DA committee only. +// type CommitteeConsensusMessage; +// } /// Messages for sequencing consensus. #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] -pub struct SequencingMessage< - TYPES: NodeType, - I: NodeImplementation>, ->(pub Either, CommitteeConsensusMessage>); - -impl< - TYPES: NodeType, - I: NodeImplementation>, - > ConsensusMessageType for SequencingMessage -{ - type GeneralConsensusMessage = GeneralConsensusMessage; - type ProcessedConsensusMessage = ProcessedSequencingMessage; +pub struct SequencingMessage>( + pub Either, CommitteeConsensusMessage>, +); +impl> SequencingMessage { // TODO: Disable panic after the `ViewSync` case is implemented. + /// Get the view number this message relates to #[allow(clippy::panic)] fn view_number(&self) -> TYPES::Time { match &self.0 { @@ -458,6 +447,7 @@ impl< } // TODO: Disable panic after the `ViewSync` case is implemented. + /// Get the message purpos #[allow(clippy::panic)] fn purpose(&self) -> MessagePurpose { match &self.0 { @@ -482,14 +472,6 @@ impl< } } -impl< - TYPES: NodeType, - I: NodeImplementation>, - > SequencingMessageType for SequencingMessage -{ - type CommitteeConsensusMessage = CommitteeConsensusMessage; -} - #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] /// Messages related to sending data between nodes diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index ac48fc593f..70abff0520 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -125,7 +125,7 @@ pub trait ConsensusSharedApi< pub trait ConsensusApi< TYPES: NodeType, LEAF: LeafType, - I: NodeImplementation>, + I: NodeImplementation, >: ConsensusSharedApi { /// Send a direct message to the given recipient diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 8ce6a517cc..e3199da3a8 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -16,7 +16,7 @@ use super::{ }; use crate::{ data::{Leaf, LeafType, TestableLeaf}, - message::{ConsensusMessageType, Message, SequencingMessage}, + message::{Message, ProcessedSequencingMessage}, traits::{ election::Membership, network::TestableChannelImplementation, signature_key::SignatureKey, storage::Storage, BlockPayload, @@ -34,16 +34,15 @@ use std::{ sync::{atomic::AtomicBool, Arc}, }; /// Alias for the [`ProcessedConsensusMessage`] type of a [`NodeImplementation`]. -type ProcessedConsensusMessageType = <>::ConsensusMessage as ConsensusMessageType>::ProcessedConsensusMessage; /// struct containing messages for a view to send to a replica or DA committee member. #[derive(Clone)] pub struct ViewQueue> { /// to send networking events to a replica or DA committee member. - pub sender_chan: UnboundedSender>, + pub sender_chan: UnboundedSender>, /// to recv networking events for a replica or DA committee member. - pub receiver_chan: Arc>>>, + pub receiver_chan: Arc>>>, /// `true` if this queue has already received a proposal pub has_received_proposal: Arc, @@ -120,17 +119,17 @@ pub trait NodeImplementation: /// Storage type for this consensus implementation type Storage: Storage + Clone; - /// Consensus message type. - type ConsensusMessage: ConsensusMessageType - + Clone - + Debug - + Send - + Sync - + 'static - + for<'a> Deserialize<'a> - + Hash - + Eq - + Serialize; + // /// Consensus message type. + // type ConsensusMessage: ConsensusMessageType + // + Clone + // + Debug + // + Send + // + Sync + // + 'static + // + for<'a> Deserialize<'a> + // + Hash + // + Eq + // + Serialize; /// Consensus type selected exchanges. /// @@ -509,10 +508,7 @@ pub trait TestableNodeImplementation: NodeImplementation } #[async_trait] -impl< - TYPES: NodeType, - I: NodeImplementation>, - > TestableNodeImplementation for I +impl> TestableNodeImplementation for I where CommitteeNetwork: TestableNetworkingImplementation>, QuorumNetwork: TestableNetworkingImplementation>, From fa00066e19dd886f2ee0f3d93df6e07cfb712f06 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 15 Nov 2023 12:14:13 -0800 Subject: [PATCH 0389/1393] Update error types, nit fixes --- task-impls/src/transactions.rs | 104 ++++++++++++++--------------- types/src/block_impl.rs | 4 +- types/src/traits/block_contents.rs | 4 +- 3 files changed, 56 insertions(+), 56 deletions(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index b1acded348..1ab70c4529 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -216,61 +216,61 @@ where // TODO (Keyao) Determine whether to allow empty blocks. // let txns = self.wait_for_transactions(parent_leaf).await?; - match ::from_transactions(txns) { - Ok((payload, metadata)) => { - match ::encode(&payload) { - Ok(encoded_txns) => { - // TODO - let srs = test_srs(NUM_STORAGE_NODES); - // TODO We are using constant numbers for now, but they will change as the quorum size - // changes. - // TODO - let vid = - VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - let vid_disperse = vid - .disperse(encoded_txns.into_iter().collect::>()) - .unwrap(); - - // TODO never clone a block - // https://github.com/EspressoSystems/HotShot/issues/1858 - self.event_stream - .publish(HotShotEvent::BlockReady( - payload.clone(), - metadata, - view + 1, - )) - .await; - - // TODO (Keyao) Determine and update where to publish VidDisperseSend. - // - debug!("publishing VID disperse for view {}", *view + 1); - self.event_stream - .publish(HotShotEvent::VidDisperseSend( - Proposal { - data: VidDisperse { - view_number: view + 1, - payload_commitment: payload.commit(), - shares: vid_disperse.shares, - common: vid_disperse.common, - }, - // TODO (Keyao) This is also signed in DA task. - signature: self - .quorum_exchange - .sign_payload_commitment(payload.commit()), - }, - self.quorum_exchange.public_key().clone(), - )) - .await; - } - Err(e) => { - error!("Failed to encode the block payload: {:?}.", e); - } + let (payload, metadata) = + match ::from_transactions(txns) { + Ok((payload, metadata)) => (payload, metadata), + Err(e) => { + error!("Failed to build the block payload: {:?}.", e); + return None; } - } + }; + let encoded_txns = match payload.encode() { + Ok(encoded) => encoded, Err(e) => { - error!("Failed to build the block payload: {:?}.", e); + error!("Failed to encode the block payload: {:?}.", e); + return None; } - } + }; + // TODO + let srs = test_srs(NUM_STORAGE_NODES); + // TODO We are using constant numbers for now, but they will change as the quorum size + // changes. + // TODO + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + let vid_disperse = vid + .disperse(encoded_txns.into_iter().collect::>()) + .unwrap(); + + // TODO never clone a block + // https://github.com/EspressoSystems/HotShot/issues/1858 + self.event_stream + .publish(HotShotEvent::BlockReady( + payload.clone(), + metadata, + view + 1, + )) + .await; + + // TODO (Keyao) Determine and update where to publish VidDisperseSend. + // + debug!("publishing VID disperse for view {}", *view + 1); + self.event_stream + .publish(HotShotEvent::VidDisperseSend( + Proposal { + data: VidDisperse { + view_number: view + 1, + payload_commitment: payload.commit(), + shares: vid_disperse.shares, + common: vid_disperse.common, + }, + // TODO (Keyao) This is also signed in DA task. + signature: self + .quorum_exchange + .sign_payload_commitment(payload.commit()), + }, + self.quorum_exchange.public_key().clone(), + )) + .await; return None; } HotShotEvent::Shutdown => { diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 0b65e3fddc..8cca78fa1c 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -148,7 +148,7 @@ impl BlockPayload for VIDBlockPayload { fn from_transactions( transactions: impl IntoIterator, - ) -> Result<(Self, Self::Metadata), BlockError> { + ) -> Result<(Self, Self::Metadata), Self::Error> { let txns_vec: Vec = transactions.into_iter().collect(); let encoded = VIDTransaction::encode(txns_vec.clone())?; Ok(( @@ -192,7 +192,7 @@ impl BlockPayload for VIDBlockPayload { (Self::genesis(), ()) } - fn encode(&self) -> Result, BlockError> { + fn encode(&self) -> Result, Self::Error> { Ok(VIDTransaction::encode(self.transactions.clone())?.into_iter()) } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 6a3ef30f49..647359157e 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -69,7 +69,7 @@ pub trait BlockPayload: /// If the transaction length conversion fails. fn from_transactions( transactions: impl IntoIterator, - ) -> Result<(Self, Self::Metadata), BlockError>; + ) -> Result<(Self, Self::Metadata), Self::Error>; /// Build a payload with the encoded transaction bytes and metadata. /// @@ -85,7 +85,7 @@ pub trait BlockPayload: /// /// # Errors /// If the transaction length conversion fails. - fn encode(&self) -> Result, BlockError>; + fn encode(&self) -> Result, Self::Error>; /// List of transaction commitments. fn transaction_commitments(&self) -> Vec>; From 81c807a6b9e188d5dbbcd8a5b94d19a9be8ae695 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 15 Nov 2023 16:29:48 -0500 Subject: [PATCH 0390/1393] remove some commented code --- types/src/message.rs | 23 ----------------------- types/src/traits/node_implementation.rs | 12 ------------ 2 files changed, 35 deletions(-) diff --git a/types/src/message.rs b/types/src/message.rs index 04db77d88e..c5c9ed7fa9 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -384,29 +384,6 @@ pub enum CommitteeConsensusMessage VidCertificate(VIDCertificate2), } -// /// Messages related to the consensus protocol. -// pub trait ConsensusMessageType> { -// /// The type of messages for both validating and sequencing consensus. -// type GeneralConsensusMessage; - -// /// The type of processed consensus messages. -// type ProcessedConsensusMessage: Send; - -// /// Get the view number when the message was sent or the view of the timeout. -// fn view_number(&self) -> TYPES::Time; - -// /// Get the message purpose. -// fn purpose(&self) -> MessagePurpose; -// } - -/// Messages related to the sequencing consensus protocol. -// pub trait SequencingMessageType>: -// ConsensusMessageType -// { -// /// Messages for DA committee only. -// type CommitteeConsensusMessage; -// } - /// Messages for sequencing consensus. #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 7490ee79a2..890e35ee57 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -116,18 +116,6 @@ pub trait NodeImplementation: /// Storage type for this consensus implementation type Storage: Storage + Clone; - // /// Consensus message type. - // type ConsensusMessage: ConsensusMessageType - // + Clone - // + Debug - // + Send - // + Sync - // + 'static - // + for<'a> Deserialize<'a> - // + Hash - // + Eq - // + Serialize; - /// Consensus type selected exchanges. /// /// Implements either `ValidatingExchangesType` or `ExchangesType`. From efc40520a15dab772478c5331f91b02a756e7cba Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 15 Nov 2023 17:16:18 -0500 Subject: [PATCH 0391/1393] Add Membership to TYPES --- hotshot/src/demo.rs | 1 + hotshot/src/traits/storage/memory_storage.rs | 5 ++++- testing/src/node_types.rs | 5 ++++- testing/tests/memory_network.rs | 1 + types/src/traits/node_implementation.rs | 3 +++ 5 files changed, 13 insertions(+), 2 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index 9f10c04648..f345b30973 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -139,6 +139,7 @@ impl NodeType for DemoTypes { type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = DemoState; + type Membership = DemoMembership; } /// Alias for the static committee used in the Demo apps diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 9c7fd14d4c..4667cf1282 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -107,7 +107,9 @@ impl Storage for MemoryStorage { #[cfg(test)] mod test { - use crate::traits::election::static_committee::{StaticElectionConfig, StaticVoteToken}; + use crate::traits::election::static_committee::{ + GeneralStaticCommittee, StaticElectionConfig, StaticVoteToken, + }; use super::*; use commit::Committable; @@ -145,6 +147,7 @@ mod test { type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = DummyState; + type Membership = GeneralStaticCommittee; } fn random_stored_view(view_number: ::Time) -> StoredView { diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index a5e8b3f621..f5162be0be 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -1,4 +1,6 @@ -use hotshot::traits::implementations::CombinedNetworks; +use hotshot::traits::{ + election::static_committee::GeneralStaticCommittee, implementations::CombinedNetworks, +}; use std::{marker::PhantomData, sync::Arc}; use hotshot::{ @@ -48,6 +50,7 @@ impl NodeType for TestTypes { type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = DemoState; + type Membership = GeneralStaticCommittee; } #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 0f39e11cb9..9c93307e4c 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -57,6 +57,7 @@ impl NodeType for Test { type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = DemoState; + type Membership = GeneralStaticCommittee; } #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 890e35ee57..c0746ba8fb 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -636,4 +636,7 @@ pub trait NodeType: /// The state type that this hotshot setup is using. type StateType: State; + + /// Membership used for this implementation + type Membership: Membership; } From 606c136979d84ace51941144e13aa66aaeca428c Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 15 Nov 2023 14:35:30 -0800 Subject: [PATCH 0392/1393] Fix build after merge --- hotshot/examples/infra/mod.rs | 1 - types/src/data.rs | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 31e539ea90..fda8cf1ec2 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -193,7 +193,6 @@ pub trait RunDA< /// get the anchored view /// Note: sequencing leaf does not have state, so does not return state async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { - let genesis_block = TYPES::BlockPayload::genesis(); let initializer = hotshot::HotShotInitializer::::from_genesis() .expect("Couldn't generate genesis block"); diff --git a/types/src/data.rs b/types/src/data.rs index 61d132c1eb..fa49893b10 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -334,11 +334,12 @@ impl Display for Leaf { impl Leaf { /// Create a new leaf from its components. + #[must_use] pub fn genesis() -> Self { let (block_header, block_payload, _) = TYPES::BlockHeader::genesis(); Self { view_number: TYPES::Time::genesis(), - justify_qc: QuorumCertificate2::::genesis(), + justify_qc: QuorumCertificate2::::genesis(), parent_commitment: fake_commitment(), block_header, block_payload: Some(block_payload), From aa5637f1c122b826e2d006a4dd8f5fe2a3cf4893 Mon Sep 17 00:00:00 2001 From: MRain Date: Wed, 15 Nov 2023 20:52:00 -0500 Subject: [PATCH 0393/1393] resolve the padding bug --- hotshot-state-prover/src/circuit.rs | 156 ++++++++++++++++++---------- hotshot-state-prover/src/lib.rs | 36 +++++-- 2 files changed, 131 insertions(+), 61 deletions(-) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 27b51ef437..ac52a077e3 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -2,25 +2,20 @@ use ark_ec::twisted_edwards::TECurveConfig; use ark_ff::PrimeField; +use ark_std::borrow::Borrow; use ethereum_types::U256; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; -use hotshot_types::traits::{ - stake_table::{SnapshotVersion, StakeTableScheme}, - state::LightClientState, -}; +use hotshot_types::traits::state::LightClientState; use jf_plonk::errors::PlonkError; use jf_primitives::{ circuit::{ rescue::RescueNativeGadget, - signature::schnorr::{SignatureGadget, SignatureVar, VerKeyVar}, + signature::schnorr::{SignatureGadget, VerKeyVar}, }, rescue::RescueParameter, - signatures::{ - bls_over_bn254::VerKey as BLSVerKey, - schnorr::{Signature, VerKey as SchnorrVerKey}, - }, + signatures::schnorr::{Signature, VerKey as SchnorrVerKey}, }; -use jf_relation::{errors::CircuitError, BoolVar, Circuit, PlonkCircuit, Variable}; +use jf_relation::{errors::CircuitError, Circuit, PlonkCircuit, Variable}; /// Lossy conversion of a U256 into a field element. pub(crate) fn u256_to_field(v: &U256) -> F { @@ -115,73 +110,122 @@ impl AsRef<[Variable]> for LightClientStateVar { } /// A function that takes as input: -/// - a list of stake table entries (`Vec<(BLSVerKey, Amount, SchnorrVerKey)>`) +/// - a list of stake table entries (`Vec<(SchnorrVerKey, Amount)>`) +/// - a bit vector indicates the signers /// - a list of schnorr signatures of the updated states (`Vec`), default if the node doesn't sign the state /// - updated light client state (`(view_number, block_height, block_comm, fee_ledger_comm, stake_table_comm)`) -/// - a bit vector indicates the signers /// - a quorum threshold -/// checks that +/// Lengths of input vectors should not exceed the `STAKE_TABLE_CAPACITY`. +/// The list of stake table entries, bit indicators and signatures will be padded to the `STAKE_TABLE_CAPACITY`. +/// It checks that /// - the signer's accumulated weight exceeds the quorum threshold /// - the stake table corresponds to the one committed in the light client state /// - all signed schnorr signatures are valid -/// returns +/// and returns /// - A circuit for proof generation /// - A list of public inputs for verification /// - A PlonkError if any error happens when building the circuit -pub(crate) fn build_state_verifier_circuit( - stake_table: &ST, - sigs: &[Signature

], +pub(crate) fn build_state_verifier_circuit( + stake_table_entries: STIter, + signer_bit_vec: BitIter, + signatures: SigIter, lightclient_state: &LightClientState, - signer_bit_vec: &[bool], threshold: &U256, ) -> Result<(PlonkCircuit, Vec), PlonkError> where F: RescueParameter, - ST: StakeTableScheme>, P: TECurveConfig, + STIter: IntoIterator, + STIter::Item: Borrow<(SchnorrVerKey

, U256)>, + STIter::IntoIter: ExactSizeIterator, + BitIter: IntoIterator, + BitIter::Item: Borrow, + BitIter::IntoIter: ExactSizeIterator, + SigIter: IntoIterator, + SigIter::Item: Borrow>, + SigIter::IntoIter: ExactSizeIterator, { + let stake_table_entries = stake_table_entries.into_iter(); + let signer_bit_vec = signer_bit_vec.into_iter(); + let signatures = signatures.into_iter(); + if stake_table_entries.len() > STAKE_TABLE_CAPACITY { + return Err(PlonkError::CircuitError(CircuitError::ParameterError( + format!( + "Number of input stake table entries {} exceeds the capacity {}", + stake_table_entries.len(), + STAKE_TABLE_CAPACITY, + ), + ))); + } + if signer_bit_vec.len() != STAKE_TABLE_CAPACITY { + return Err(PlonkError::CircuitError(CircuitError::ParameterError( + format!( + "Length of input bit vector {} exceeds the capacity {}", + signer_bit_vec.len(), + STAKE_TABLE_CAPACITY, + ), + ))); + } + if signatures.len() != STAKE_TABLE_CAPACITY { + return Err(PlonkError::CircuitError(CircuitError::ParameterError( + format!( + "Number of input signatures {} exceeds the capacity {}", + signatures.len(), + STAKE_TABLE_CAPACITY, + ), + ))); + } + let mut circuit = PlonkCircuit::new_turbo_plonk(); // creating variables for stake table entries - let mut stake_table_var = stake_table - .try_iter(SnapshotVersion::LastEpochStart)? - .map(|(_bls_ver_key, amount, schnorr_ver_key)| { - let schnorr_ver_key = circuit.create_signature_vk_variable(&schnorr_ver_key)?; - let stake_amount = circuit.create_variable(u256_to_field::(&amount))?; + let stake_table_entries_pad_len = STAKE_TABLE_CAPACITY - stake_table_entries.len(); + let mut stake_table_var = stake_table_entries + .map(|item| { + let item = item.borrow(); + let schnorr_ver_key = circuit.create_signature_vk_variable(&item.0)?; + let stake_amount = circuit.create_variable(u256_to_field::(&item.1))?; Ok(StakeTableEntryVar { schnorr_ver_key, stake_amount, }) }) .collect::, CircuitError>>()?; - let dummy_ver_key_var = VerKeyVar(circuit.neutral_point_variable()); - stake_table_var.resize( - STAKE_TABLE_CAPACITY, - StakeTableEntryVar { - schnorr_ver_key: dummy_ver_key_var, - stake_amount: 0, - }, + stake_table_var.extend( + (0..stake_table_entries_pad_len) + .map(|_| { + let schnorr_ver_key = + circuit.create_signature_vk_variable(&SchnorrVerKey::

::default())?; + let stake_amount = circuit.create_variable(F::default())?; + Ok(StakeTableEntryVar { + schnorr_ver_key, + stake_amount, + }) + }) + .collect::, CircuitError>>()?, ); // creating variables for signatures - let mut sig_vars = sigs - .iter() - .map(|sig| circuit.create_signature_variable(sig)) + let sig_pad_len = STAKE_TABLE_CAPACITY - signatures.len(); + let mut sig_vars = signatures + .map(|sig| circuit.create_signature_variable(sig.borrow())) .collect::, CircuitError>>()?; - sig_vars.resize( - STAKE_TABLE_CAPACITY, - SignatureVar { - s: circuit.zero(), - R: circuit.neutral_point_variable(), - }, + sig_vars.extend( + (0..sig_pad_len) + .map(|_| circuit.create_signature_variable(&Signature::

::default())) + .collect::, CircuitError>>()?, ); // creating Boolean variables for the bit vector + let bit_vec_pad_len = STAKE_TABLE_CAPACITY - signer_bit_vec.len(); let mut signer_bit_vec_var = signer_bit_vec - .iter() - .map(|&b| circuit.create_boolean_variable(b)) + .map(|b| circuit.create_boolean_variable(*b.borrow())) .collect::, CircuitError>>()?; - signer_bit_vec_var.resize(STAKE_TABLE_CAPACITY, BoolVar(circuit.zero())); + signer_bit_vec_var.extend( + (0..bit_vec_pad_len) + .map(|_| circuit.create_boolean_variable(false)) + .collect::, CircuitError>>()?, + ); let threshold = u256_to_field::(threshold); let threshold_var = circuit.create_public_variable(threshold)?; @@ -308,6 +352,12 @@ mod tests { let (bls_keys, schnorr_keys) = key_pairs_for_testing(num_validators, &mut prng); let st = stake_table_for_testing(&bls_keys, &schnorr_keys); + let entries = st + .try_iter(SnapshotVersion::LastEpochStart) + .unwrap() + .map(|(_, stake_amount, schnorr_key)| (schnorr_key, stake_amount)) + .collect::>(); + let block_comm = VariableLengthRescueCRHF::::evaluate(vec![F::from(1u32), F::from(2u32)]).unwrap() [0]; @@ -355,20 +405,20 @@ mod tests { .collect::>(); // good path let (circuit, public_inputs) = build_state_verifier_circuit( - &st, + &entries, + &bit_vec, &bit_masked_sigs, &lightclient_state, - &bit_vec, &U256::from(26u32), ) .unwrap(); assert!(circuit.check_circuit_satisfiability(&public_inputs).is_ok()); let (circuit, public_inputs) = build_state_verifier_circuit( - &st, + &entries, + &bit_vec, &bit_masked_sigs, &lightclient_state, - &bit_vec, &U256::from(10u32), ) .unwrap(); @@ -391,10 +441,10 @@ mod tests { }) .collect::>(); let (bad_circuit, public_inputs) = build_state_verifier_circuit( - &st, + &entries, + &bad_bit_vec, &bad_bit_masked_sigs, &lightclient_state, - &bad_bit_vec, &U256::from(25u32), ) .unwrap(); @@ -422,10 +472,10 @@ mod tests { .collect::, PrimitivesError>>() .unwrap(); let (bad_circuit, public_inputs) = build_state_verifier_circuit( - &st, + &entries, + &bit_vec, &sig_for_bad_state, &bad_lightclient_state, - &bit_vec, &U256::from(26u32), ) .unwrap(); @@ -454,10 +504,10 @@ mod tests { .collect::, PrimitivesError>>() .unwrap(); let (bad_circuit, public_inputs) = build_state_verifier_circuit( - &st, + &entries, + &bit_vec, &wrong_sigs, &lightclient_state, - &bit_vec, &U256::from(26u32), ) .unwrap(); diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index 140754d7e6..9c9d2656ee 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -5,7 +5,10 @@ pub mod circuit; mod utils; use ark_bn254::Bn254; -use ark_std::rand::{CryptoRng, RngCore}; +use ark_std::{ + borrow::Borrow, + rand::{CryptoRng, RngCore}, +}; use circuit::build_state_verifier_circuit; use ethereum_types::U256; use hotshot_stake_table::vec_based::StakeTable; @@ -52,24 +55,35 @@ pub fn preprocess(srs: &UniversalSrs) -> Result<(ProvingKey, VerifyingKey), Plon /// - the signer's accumulated weight exceeds the quorum threshold /// - the stake table corresponds to the one committed in the light client state /// - all signed schnorr signatures are valid -pub fn generate_state_update_proof( +pub fn generate_state_update_proof( rng: &mut R, pk: &ProvingKey, stake_table: &ST, - sigs: &[Signature], + signer_bit_vec: BitIter, + signatures: SigIter, lightclient_state: &LightClientState, - signer_bit_vec: &[bool], threshold: &U256, ) -> Result<(Proof, Vec), PlonkError> where ST: StakeTableScheme, R: CryptoRng + RngCore, + BitIter: IntoIterator, + BitIter::Item: Borrow, + BitIter::IntoIter: ExactSizeIterator, + SigIter: IntoIterator, + SigIter::Item: Borrow>, + SigIter::IntoIter: ExactSizeIterator, { + let stake_table_entries = stake_table + .try_iter(SnapshotVersion::LastEpochStart) + .unwrap() + .map(|(_, stake_amount, schnorr_key)| (schnorr_key, stake_amount)) + .collect::>(); let (circuit, public_inputs) = build_state_verifier_circuit( - stake_table, - sigs, - lightclient_state, + stake_table_entries, signer_bit_vec, + signatures, + lightclient_state, threshold, )?; let proof = PlonkKzgSnark::::prove::<_, _, StandardTranscript>(rng, &circuit, pk, None)?; @@ -87,5 +101,11 @@ fn build_dummy_circuit_for_preprocessing( fee_ledger_comm: BaseField::default(), stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), }; - build_state_verifier_circuit(&st, &[], &lightclient_state, &[], &U256::zero()) + build_state_verifier_circuit::( + &[], + &[], + &[], + &lightclient_state, + &U256::zero(), + ) } From c2b7ab3f6bdd4d7844410b2135fe2a4c56c847f7 Mon Sep 17 00:00:00 2001 From: MRain Date: Wed, 15 Nov 2023 21:34:00 -0500 Subject: [PATCH 0394/1393] test; and bug fixes --- hotshot-state-prover/src/circuit.rs | 4 +- hotshot-state-prover/src/lib.rs | 211 ++++++++++++++++++++++++++++ 2 files changed, 213 insertions(+), 2 deletions(-) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index ac52a077e3..9387829bc6 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -157,7 +157,7 @@ where ), ))); } - if signer_bit_vec.len() != STAKE_TABLE_CAPACITY { + if signer_bit_vec.len() > STAKE_TABLE_CAPACITY { return Err(PlonkError::CircuitError(CircuitError::ParameterError( format!( "Length of input bit vector {} exceeds the capacity {}", @@ -166,7 +166,7 @@ where ), ))); } - if signatures.len() != STAKE_TABLE_CAPACITY { + if signatures.len() > STAKE_TABLE_CAPACITY { return Err(PlonkError::CircuitError(CircuitError::ParameterError( format!( "Number of input signatures {} exceeds the capacity {}", diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index 9c9d2656ee..1f70317154 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -109,3 +109,214 @@ fn build_dummy_circuit_for_preprocessing( &U256::zero(), ) } + +#[cfg(test)] +mod tests { + use super::{ + utils::{key_pairs_for_testing, stake_table_for_testing}, + BLSVerKey, BaseField, SchnorrVerKey, UniversalSrs, + }; + use crate::{circuit::build_state_verifier_circuit, generate_state_update_proof, preprocess}; + use ark_bn254::Bn254; + use ark_ec::pairing::Pairing; + use ark_ed_on_bn254::EdwardsConfig as Config; + use ark_std::{ + rand::{CryptoRng, RngCore}, + One, + }; + use ethereum_types::U256; + use hotshot_stake_table::vec_based::StakeTable; + use hotshot_types::traits::{ + stake_table::{SnapshotVersion, StakeTableScheme}, + state::LightClientState, + }; + use jf_plonk::{ + errors::PlonkError, + proof_system::{PlonkKzgSnark, UniversalSNARK}, + transcript::StandardTranscript, + }; + use jf_primitives::{ + crhf::{VariableLengthRescueCRHF, CRHF}, + errors::PrimitivesError, + signatures::{schnorr::Signature, SchnorrSignatureScheme, SignatureScheme}, + }; + use jf_utils::test_rng; + + // FIXME(Chengyu): see + fn universal_setup_for_testing( + max_degree: usize, + rng: &mut R, + ) -> Result + where + R: RngCore + CryptoRng, + { + use ark_ec::{scalar_mul::fixed_base::FixedBase, CurveGroup}; + use ark_ff::PrimeField; + use ark_std::{end_timer, start_timer, UniformRand}; + + let setup_time = start_timer!(|| format!("KZG10::Setup with degree {}", max_degree)); + let beta = ::ScalarField::rand(rng); + let g = ::G1::rand(rng); + let h = ::G2::rand(rng); + + let mut powers_of_beta = vec![::ScalarField::one()]; + + let mut cur = beta; + for _ in 0..max_degree { + powers_of_beta.push(cur); + cur *= β + } + + let window_size = FixedBase::get_mul_window_size(max_degree + 1); + + let scalar_bits = ::ScalarField::MODULUS_BIT_SIZE as usize; + let g_time = start_timer!(|| "Generating powers of G"); + // TODO: parallelization + let g_table = FixedBase::get_window_table(scalar_bits, window_size, g); + let powers_of_g = FixedBase::msm::<::G1>( + scalar_bits, + window_size, + &g_table, + &powers_of_beta, + ); + end_timer!(g_time); + + let powers_of_g = ::G1::normalize_batch(&powers_of_g); + + let h = h.into_affine(); + let beta_h = (h * beta).into_affine(); + + let pp = UniversalSrs { + powers_of_g, + h, + beta_h, + }; + end_timer!(setup_time); + Ok(pp) + } + + /// Internal function for helping generate the proving/verifying key + fn get_num_of_gates() -> Result { + use ark_ed_on_bn254::EdwardsConfig; + use jf_relation::Circuit; + let st = StakeTable::::new(); + let lightclient_state = LightClientState { + view_number: 0, + block_height: 0, + block_comm: BaseField::default(), + fee_ledger_comm: BaseField::default(), + stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), + }; + Ok( + build_state_verifier_circuit::( + &[], + &[], + &[], + &lightclient_state, + &U256::zero(), + )? + .0 + .num_gates(), + ) + } + + #[test] + fn test_proof_generation() { + let num_validators = 10; + let mut prng = test_rng(); + + let (bls_keys, schnorr_keys) = key_pairs_for_testing(num_validators, &mut prng); + let st = stake_table_for_testing(&bls_keys, &schnorr_keys); + + let block_comm = VariableLengthRescueCRHF::::evaluate(vec![ + BaseField::from(1u32), + BaseField::from(2u32), + ]) + .unwrap()[0]; + let fee_ledger_comm = VariableLengthRescueCRHF::::evaluate(vec![ + BaseField::from(3u32), + BaseField::from(5u32), + ]) + .unwrap()[0]; + + let lightclient_state = LightClientState { + view_number: 100, + block_height: 73, + block_comm, + fee_ledger_comm, + stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), + }; + let state_msg = [ + BaseField::from(lightclient_state.view_number as u64), + BaseField::from(lightclient_state.block_height as u64), + lightclient_state.block_comm, + lightclient_state.fee_ledger_comm, + lightclient_state.stake_table_comm.0, + lightclient_state.stake_table_comm.1, + lightclient_state.stake_table_comm.2, + ]; + + let sigs = schnorr_keys + .iter() + .map(|(key, _)| SchnorrSignatureScheme::::sign(&(), key, state_msg, &mut prng)) + .collect::, PrimitivesError>>() + .unwrap(); + + // bit vector with total weight 26 + let bit_vec = [ + true, true, true, false, true, true, false, false, true, false, + ]; + let bit_masked_sigs = bit_vec + .iter() + .zip(sigs.iter()) + .map(|(bit, sig)| { + if *bit { + sig.clone() + } else { + Signature::::default() + } + }) + .collect::>(); + + // good path + let num_gates = get_num_of_gates().unwrap(); + let test_srs = universal_setup_for_testing(num_gates + 2, &mut prng).unwrap(); + ark_std::println!("Number of constraint in the circuit: {}", num_gates); + + let result = preprocess(&test_srs); + assert!(result.is_ok()); + let (pk, vk) = result.unwrap(); + + let result = generate_state_update_proof( + &mut prng, + &pk, + &st, + &bit_vec, + &bit_masked_sigs, + &lightclient_state, + &U256::from(26u32), + ); + assert!(result.is_ok()); + + let (proof, public_inputs) = result.unwrap(); + assert!(PlonkKzgSnark::::verify::( + &vk, + &public_inputs, + &proof, + None + ) + .is_ok()); + + // minimum bad path, other bad cases are checked inside `circuit.rs` + let result = generate_state_update_proof( + &mut prng, + &pk, + &st, + &bit_vec, + &bit_masked_sigs, + &lightclient_state, + &U256::from(100u32), + ); + assert!(result.is_err()); + } +} From 023764126f7de277f4978a13ea36bf93557f3f7b Mon Sep 17 00:00:00 2001 From: MRain Date: Wed, 15 Nov 2023 21:36:46 -0500 Subject: [PATCH 0395/1393] renaming variables --- hotshot-state-prover/src/circuit.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 9387829bc6..03c7f0b921 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -228,9 +228,9 @@ where ); let threshold = u256_to_field::(threshold); - let threshold_var = circuit.create_public_variable(threshold)?; + let threshold_pub_var = circuit.create_public_variable(threshold)?; - let lightclient_state_var = LightClientStateVar::new(&mut circuit, lightclient_state)?; + let lightclient_state_pub_var = LightClientStateVar::new(&mut circuit, lightclient_state)?; let view_number_f = F::from(lightclient_state.view_number as u64); let block_height_f = F::from(lightclient_state.block_height as u64); @@ -267,7 +267,7 @@ where )?); } let acc_amount_var = circuit.sum(&signed_amount_var)?; - circuit.enforce_leq(threshold_var, acc_amount_var)?; + circuit.enforce_leq(threshold_pub_var, acc_amount_var)?; // checking the commitment for the list of schnorr keys let schnorr_ver_key_preimage_vars = stake_table_var @@ -281,7 +281,9 @@ where )?[0]; circuit.enforce_equal( schnorr_ver_key_comm, - lightclient_state_var.stake_table_comm().schnorr_keys_comm, + lightclient_state_pub_var + .stake_table_comm() + .schnorr_keys_comm, )?; // checking the commitment for the list of stake amounts @@ -296,7 +298,9 @@ where )?[0]; circuit.enforce_equal( stake_amount_comm, - lightclient_state_var.stake_table_comm().stake_amount_comm, + lightclient_state_pub_var + .stake_table_comm() + .stake_amount_comm, )?; // checking all signatures @@ -307,7 +311,7 @@ where SignatureGadget::<_, P>::check_signature_validity( &mut circuit, &entry.schnorr_ver_key, - lightclient_state_var.as_ref(), + lightclient_state_pub_var.as_ref(), &sig, ) }) From 3db1eb7d45fcafadd93727e73022f4fd1bf44130 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 15 Nov 2023 22:36:50 -0500 Subject: [PATCH 0396/1393] remove NodeImplementation Generic from net layer --- hotshot/examples/infra/mod.rs | 200 ++++++++------- hotshot/examples/libp2p/multi-validator.rs | 3 +- hotshot/examples/libp2p/orchestrator.rs | 13 +- hotshot/examples/libp2p/types.rs | 17 +- hotshot/examples/libp2p/validator.rs | 3 +- .../examples/web-server-da/multi-validator.rs | 3 +- .../examples/web-server-da/orchestrator.rs | 14 +- hotshot/examples/web-server-da/types.rs | 17 +- hotshot/examples/web-server-da/validator.rs | 3 +- hotshot/src/lib.rs | 71 +++--- hotshot/src/tasks/mod.rs | 102 ++++---- .../src/traits/networking/combined_network.rs | 90 +++---- .../src/traits/networking/libp2p_network.rs | 41 ++-- .../src/traits/networking/memory_network.rs | 37 ++- .../traits/networking/web_server_network.rs | 44 ++-- hotshot/src/types/handle.rs | 10 +- task-impls/src/consensus.rs | 154 ++++++------ task-impls/src/da.rs | 52 ++-- task-impls/src/events.rs | 35 ++- task-impls/src/harness.rs | 37 ++- task-impls/src/network.rs | 96 ++++---- task-impls/src/transactions.rs | 18 +- task-impls/src/vid.rs | 46 ++-- task-impls/src/view_sync.rs | 158 ++++++------ testing/src/node_types.rs | 227 ++++++------------ testing/src/overall_safety_task.rs | 4 +- testing/src/task_helpers.rs | 6 +- testing/src/test_builder.rs | 4 +- testing/src/test_launcher.rs | 20 +- testing/src/test_runner.rs | 16 +- testing/tests/consensus_task.rs | 7 +- testing/tests/memory_network.rs | 43 ++-- testing/tests/view_sync_task.rs | 5 +- types/src/message.rs | 128 ++++------ types/src/simple_vote.rs | 35 +-- types/src/traits/consensus_api.rs | 8 +- types/src/traits/election.rs | 39 ++- types/src/traits/network.rs | 2 +- types/src/traits/node_implementation.rs | 66 ++--- types/src/vote2.rs | 4 +- 40 files changed, 850 insertions(+), 1028 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index c9c45517a7..8c60ffa953 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -107,20 +107,19 @@ pub fn load_config_from_file( /// Runs the orchestrator pub async fn run_orchestrator< TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + DANETWORK: CommunicationChannel, TYPES::Membership> + Debug, + QUORUMNETWORK: CommunicationChannel, TYPES::Membership> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, TYPES::Membership> + Debug, + VIDNETWORK: CommunicationChannel, TYPES::Membership> + Debug, NODE: NodeImplementation< TYPES, Exchanges = Exchanges< TYPES, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, + Message, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >, Storage = MemoryStorage, >, @@ -157,20 +156,19 @@ fn calculate_num_tx_per_round( #[async_trait] pub trait RunDA< TYPES: NodeType, - MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + DANETWORK: CommunicationChannel, TYPES::Membership> + Debug, + QUORUMNETWORK: CommunicationChannel, TYPES::Membership> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, TYPES::Membership> + Debug, + VIDNETWORK: CommunicationChannel, TYPES::Membership> + Debug, NODE: NodeImplementation< TYPES, Exchanges = Exchanges< TYPES, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, + Message, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >, Storage = MemoryStorage, >, @@ -212,13 +210,13 @@ pub trait RunDA< let quorum_election_config = config.config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< TYPES, - Message, + Message, >>::Membership::default_election_config(config.config.total_nodes.get() as u64) }); let committee_election_config = as ConsensusExchange< TYPES, - Message, + Message, >>::Membership::default_election_config( config.config.da_committee_size.try_into().unwrap(), ); @@ -372,16 +370,12 @@ pub trait RunDA< // WEB SERVER /// Represents a web server-based run -pub struct WebServerDARun< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, -> { +pub struct WebServerDARun> { config: NetworkConfig, - quorum_network: WebCommChannel, - da_network: WebCommChannel, - view_sync_network: WebCommChannel, - vid_network: WebCommChannel, + quorum_network: WebCommChannel, + da_network: WebCommChannel, + view_sync_network: WebCommChannel, + vid_network: WebCommChannel, } #[async_trait] @@ -391,35 +385,34 @@ impl< BlockPayload = VIDBlockPayload, BlockHeader = VIDBlockHeader, >, - MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, Exchanges = Exchanges< TYPES, - Message, + Message, QuorumExchange< TYPES, - MEMBERSHIP, - WebCommChannel, - Message, + TYPES::Membership, + WebCommChannel, + Message, >, CommitteeExchange< TYPES, - MEMBERSHIP, - WebCommChannel, - Message, + TYPES::Membership, + WebCommChannel, + Message, >, ViewSyncExchange< TYPES, - MEMBERSHIP, - WebCommChannel, - Message, + TYPES::Membership, + WebCommChannel, + Message, >, VIDExchange< TYPES, - MEMBERSHIP, - WebCommChannel, - Message, + TYPES::Membership, + WebCommChannel, + Message, >, >, Storage = MemoryStorage, @@ -427,13 +420,12 @@ impl< > RunDA< TYPES, - MEMBERSHIP, - WebCommChannel, - WebCommChannel, - WebCommChannel, - WebCommChannel, + WebCommChannel, + WebCommChannel, + WebCommChannel, + WebCommChannel, NODE, - > for WebServerDARun + > for WebServerDARun where ::StateType: TestableState, ::BlockPayload: TestableBlock, @@ -442,7 +434,7 @@ where { async fn initialize_networking( config: NetworkConfig, - ) -> WebServerDARun { + ) -> WebServerDARun { // Get our own key let pub_key = config.config.my_own_validator_config.public_key.clone(); @@ -462,10 +454,10 @@ where ); // Create the network - let quorum_network: WebCommChannel = + let quorum_network: WebCommChannel = WebCommChannel::new(underlying_quorum_network.clone().into()); - let view_sync_network: WebCommChannel = + let view_sync_network: WebCommChannel = WebCommChannel::new(underlying_quorum_network.into()); let WebServerConfig { @@ -475,7 +467,7 @@ where }: WebServerConfig = config.clone().da_web_server_config.unwrap(); // Each node runs the DA network so that leaders have access to transactions and DA votes - let da_network: WebCommChannel = WebCommChannel::new( + let da_network: WebCommChannel = WebCommChannel::new( WebServerNetwork::create( &host.to_string(), port, @@ -486,7 +478,7 @@ where .into(), ); - let vid_network: WebCommChannel = WebCommChannel::new( + let vid_network: WebCommChannel = WebCommChannel::new( WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true) .into(), ); @@ -500,19 +492,19 @@ where } } - fn get_da_network(&self) -> WebCommChannel { + fn get_da_network(&self) -> WebCommChannel { self.da_network.clone() } - fn get_quorum_network(&self) -> WebCommChannel { + fn get_quorum_network(&self) -> WebCommChannel { self.quorum_network.clone() } - fn get_view_sync_network(&self) -> WebCommChannel { + fn get_view_sync_network(&self) -> WebCommChannel { self.view_sync_network.clone() } - fn get_vid_network(&self) -> WebCommChannel { + fn get_vid_network(&self) -> WebCommChannel { self.vid_network.clone() } @@ -524,13 +516,12 @@ where // Libp2p /// Represents a libp2p-based run -pub struct Libp2pDARun, MEMBERSHIP: Membership> -{ +pub struct Libp2pDARun> { config: NetworkConfig, - quorum_network: Libp2pCommChannel, - da_network: Libp2pCommChannel, - view_sync_network: Libp2pCommChannel, - vid_network: Libp2pCommChannel, + quorum_network: Libp2pCommChannel, + da_network: Libp2pCommChannel, + view_sync_network: Libp2pCommChannel, + vid_network: Libp2pCommChannel, } #[async_trait] @@ -540,35 +531,34 @@ impl< BlockPayload = VIDBlockPayload, BlockHeader = VIDBlockHeader, >, - MEMBERSHIP: Membership + Debug, NODE: NodeImplementation< TYPES, Exchanges = Exchanges< TYPES, - Message, + Message, QuorumExchange< TYPES, - MEMBERSHIP, - Libp2pCommChannel, - Message, + TYPES::Membership, + Libp2pCommChannel, + Message, >, CommitteeExchange< TYPES, - MEMBERSHIP, - Libp2pCommChannel, - Message, + TYPES::Membership, + Libp2pCommChannel, + Message, >, ViewSyncExchange< TYPES, - MEMBERSHIP, - Libp2pCommChannel, - Message, + TYPES::Membership, + Libp2pCommChannel, + Message, >, VIDExchange< TYPES, - MEMBERSHIP, - Libp2pCommChannel, - Message, + TYPES::Membership, + Libp2pCommChannel, + Message, >, >, Storage = MemoryStorage, @@ -576,13 +566,12 @@ impl< > RunDA< TYPES, - MEMBERSHIP, - Libp2pCommChannel, - Libp2pCommChannel, - Libp2pCommChannel, - Libp2pCommChannel, + Libp2pCommChannel, + Libp2pCommChannel, + Libp2pCommChannel, + Libp2pCommChannel, NODE, - > for Libp2pDARun + > for Libp2pDARun where ::StateType: TestableState, ::BlockPayload: TestableBlock, @@ -591,7 +580,7 @@ where { async fn initialize_networking( config: NetworkConfig, - ) -> Libp2pDARun { + ) -> Libp2pDARun { let pubkey = config.config.my_own_validator_config.public_key.clone(); let mut config = config; let libp2p_config = config @@ -710,16 +699,16 @@ where underlying_quorum_network.wait_for_ready().await; // Create the network - let quorum_network: Libp2pCommChannel = + let quorum_network: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - let view_sync_network: Libp2pCommChannel = + let view_sync_network: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - let da_network: Libp2pCommChannel = + let da_network: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - let vid_network: Libp2pCommChannel = + let vid_network: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); Libp2pDARun { @@ -731,19 +720,19 @@ where } } - fn get_da_network(&self) -> Libp2pCommChannel { + fn get_da_network(&self) -> Libp2pCommChannel { self.da_network.clone() } - fn get_quorum_network(&self) -> Libp2pCommChannel { + fn get_quorum_network(&self) -> Libp2pCommChannel { self.quorum_network.clone() } - fn get_view_sync_network(&self) -> Libp2pCommChannel { + fn get_view_sync_network(&self) -> Libp2pCommChannel { self.view_sync_network.clone() } - fn get_vid_network(&self) -> Libp2pCommChannel { + fn get_vid_network(&self) -> Libp2pCommChannel { self.vid_network.clone() } @@ -759,24 +748,23 @@ pub async fn main_entry_point< BlockPayload = VIDBlockPayload, BlockHeader = VIDBlockHeader, >, - MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + DANETWORK: CommunicationChannel, TYPES::Membership> + Debug, + QUORUMNETWORK: CommunicationChannel, TYPES::Membership> + Debug, + VIEWSYNCNETWORK: CommunicationChannel, TYPES::Membership> + Debug, + VIDNETWORK: CommunicationChannel, TYPES::Membership> + Debug, NODE: NodeImplementation< TYPES, Exchanges = Exchanges< TYPES, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, + Message, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >, Storage = MemoryStorage, >, - RUNDA: RunDA, + RUNDA: RunDA, >( args: ValidatorArgs, ) where diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index bb6937e8ad..c227ee1818 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -3,7 +3,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demo::{DemoMembership, DemoTypes}; +use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use std::net::IpAddr; use tracing::instrument; @@ -49,7 +49,6 @@ async fn main() { let node = async_spawn(async move { infra::main_entry_point::< DemoTypes, - DemoMembership, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs index 01c729b6b9..54cf3550ae 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -2,7 +2,6 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoMembership; use hotshot::demo::DemoTypes; use tracing::instrument; @@ -24,14 +23,8 @@ async fn main() { setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator::< - DemoTypes, - DemoMembership, - DANetwork, - QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, - NodeImpl, - >(args) + run_orchestrator::( + args, + ) .await; } diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 517b6f5152..8bf819d2d5 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -28,20 +28,17 @@ impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type Exchanges = Exchanges< DemoTypes, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, + Message, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >; fn new_channel_maps( start_view: ::Time, - ) -> ( - ChannelMaps, - Option>, - ) { + ) -> (ChannelMaps, Option>) { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = Libp2pDARun; +pub type ThisRun = Libp2pDARun; diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index 176f5e805f..5e6c91cbca 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -1,6 +1,6 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::{DemoMembership, DemoTypes}; +use hotshot::demo::DemoTypes; use tracing::{info, instrument}; use types::VIDNetwork; @@ -29,7 +29,6 @@ async fn main() { ); infra::main_entry_point::< DemoTypes, - DemoMembership, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/web-server-da/multi-validator.rs b/hotshot/examples/web-server-da/multi-validator.rs index bb6937e8ad..c227ee1818 100644 --- a/hotshot/examples/web-server-da/multi-validator.rs +++ b/hotshot/examples/web-server-da/multi-validator.rs @@ -3,7 +3,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demo::{DemoMembership, DemoTypes}; +use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use std::net::IpAddr; use tracing::instrument; @@ -49,7 +49,6 @@ async fn main() { let node = async_spawn(async move { infra::main_entry_point::< DemoTypes, - DemoMembership, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/web-server-da/orchestrator.rs b/hotshot/examples/web-server-da/orchestrator.rs index 7185c42e53..f21ee05552 100644 --- a/hotshot/examples/web-server-da/orchestrator.rs +++ b/hotshot/examples/web-server-da/orchestrator.rs @@ -2,7 +2,7 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::{DemoMembership, DemoTypes}; +use hotshot::demo::DemoTypes; use tracing::instrument; use types::VIDNetwork; @@ -24,14 +24,8 @@ async fn main() { setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator::< - DemoTypes, - DemoMembership, - DANetwork, - QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, - NodeImpl, - >(args) + run_orchestrator::( + args, + ) .await; } diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index 2241d89d6d..e843662714 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -28,20 +28,17 @@ impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type Exchanges = Exchanges< DemoTypes, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, + Message, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >; fn new_channel_maps( start_view: ::Time, - ) -> ( - ChannelMaps, - Option>, - ) { + ) -> (ChannelMaps, Option>) { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = WebServerDARun; +pub type ThisRun = WebServerDARun; diff --git a/hotshot/examples/web-server-da/validator.rs b/hotshot/examples/web-server-da/validator.rs index 176f5e805f..5e6c91cbca 100644 --- a/hotshot/examples/web-server-da/validator.rs +++ b/hotshot/examples/web-server-da/validator.rs @@ -1,6 +1,6 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::{DemoMembership, DemoTypes}; +use hotshot::demo::DemoTypes; use tracing::{info, instrument}; use types::VIDNetwork; @@ -29,7 +29,6 @@ async fn main() { ); infra::main_entry_point::< DemoTypes, - DemoMembership, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 5ebefb6c8e..38393715e8 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -66,7 +66,7 @@ use hotshot_types::{ }, traits::{ consensus_api::{ConsensusApi, ConsensusSharedApi}, - election::{ConsensusExchange, Membership}, + election::ConsensusExchange, network::{CommunicationChannel, NetworkError}, node_implementation::{ ChannelMaps, CommitteeEx, ExchangesType, NodeType, QuorumEx, SendToTasks, VIDEx, @@ -81,7 +81,6 @@ use hotshot_types::{ use snafu::ResultExt; use std::{ collections::{BTreeMap, HashMap}, - marker::PhantomData, num::NonZeroUsize, sync::Arc, time::Duration, @@ -132,14 +131,14 @@ pub struct SystemContextInner> { /// Channels for sending/recv-ing proposals and votes for quorum and committee exchanges, the /// latter of which is only applicable for sequencing consensus. - channel_maps: (ChannelMaps, Option>), + channel_maps: (ChannelMaps, Option>), // global_registry: GlobalRegistry, /// Access to the output event stream. output_event_stream: ChannelStream>, /// access to the internal event stream, in case we need to, say, shut something down - internal_event_stream: ChannelStream>, + internal_event_stream: ChannelStream>, /// uid for instrumentation id: u64, @@ -255,10 +254,10 @@ impl> SystemContext { pub async fn timeout_view( &self, current_view: TYPES::Time, - send_replica: UnboundedSender>, - send_next_leader: Option>>, + send_replica: UnboundedSender>, + send_next_leader: Option>>, ) { - let msg = ProcessedGeneralConsensusMessage::::InternalTrigger( + let msg = ProcessedGeneralConsensusMessage::::InternalTrigger( InternalTrigger::Timeout(current_view), ); if let Some(chan) = send_next_leader { @@ -301,7 +300,6 @@ impl> SystemContext { Message { sender: api.inner.public_key.clone(), kind: MessageKind::from(message), - _phantom: PhantomData, }, &api.inner .exchanges @@ -366,7 +364,7 @@ impl> SystemContext { ) -> Result< ( SystemContextHandle, - ChannelStream>, + ChannelStream>, ), HotShotError, > @@ -402,7 +400,7 @@ impl> SystemContext { #[allow(clippy::unused_async)] pub async fn send_broadcast_message( &self, - kind: impl Into>, + kind: impl Into>, ) -> std::result::Result<(), NetworkError> { let inner = self.inner.clone(); let pk = self.inner.public_key.clone(); @@ -414,11 +412,7 @@ impl> SystemContext { .quorum_exchange() .network() .broadcast_message( - Message { - sender: pk, - kind, - _phantom: PhantomData, - }, + Message { sender: pk, kind }, // TODO this is morally wrong &inner.exchanges.quorum_exchange().membership().clone(), ) @@ -440,7 +434,7 @@ impl> SystemContext { /// Will return any errors that the underlying `message_node` can return. pub async fn send_direct_message( &self, - kind: impl Into>, + kind: impl Into>, recipient: TYPES::SignatureKey, ) -> std::result::Result<(), NetworkError> { self.inner @@ -451,7 +445,6 @@ impl> SystemContext { Message { sender: self.inner.public_key.clone(), kind: kind.into(), - _phantom: PhantomData, }, recipient, ) @@ -469,15 +462,15 @@ impl> SystemContext { /// doesn't exist, or creates entry. Then returns a clone of the entry pub async fn create_or_obtain_chan_from_read( view_num: TYPES::Time, - channel_map: RwLockUpgradableReadGuard<'_, SendToTasks>, - ) -> ViewQueue { + channel_map: RwLockUpgradableReadGuard<'_, SendToTasks>, + ) -> ViewQueue { // check if we have the entry // if we don't, insert if let Some(vq) = channel_map.channel_map.get(&view_num) { vq.clone() } else { let mut channel_map = - RwLockUpgradableReadGuard::<'_, SendToTasks>::upgrade(channel_map).await; + RwLockUpgradableReadGuard::<'_, SendToTasks>::upgrade(channel_map).await; let new_view_queue = ViewQueue::default(); let vq = new_view_queue.clone(); // NOTE: the read lock is held until all other read locks are DROPPED and @@ -495,8 +488,8 @@ impl> SystemContext { #[allow(clippy::unused_async)] // async for API compatibility reasons pub async fn create_or_obtain_chan_from_write( view_num: TYPES::Time, - mut channel_map: RwLockWriteGuard<'_, SendToTasks>, - ) -> ViewQueue { + mut channel_map: RwLockWriteGuard<'_, SendToTasks>, + ) -> ViewQueue { channel_map.channel_map.entry(view_num).or_default().clone() } } @@ -513,7 +506,7 @@ pub trait HotShotType> { async fn run_tasks(self) -> SystemContextHandle; // decide which handler to call based on the message variant and `transmit_type` - // async fn handle_message(&self, item: Message, transmit_type: TransmitType) { + // async fn handle_message(&self, item: Message, transmit_type: TransmitType) { // match (item.kind, transmit_type) { // (MessageKind::Consensus(msg), TransmitType::Broadcast) => { // self.handle_broadcast_consensus_message(msg, item.sender) @@ -609,34 +602,33 @@ impl< Transaction = VIDTransaction, >, I: NodeImplementation, - MEMBERSHIP: Membership, > HotShotType for SystemContext where QuorumEx: ConsensusExchange< TYPES, - Message, + Message, Commitment = Commitment>, - Membership = MEMBERSHIP, + Membership = TYPES::Membership, > + 'static, CommitteeEx: ConsensusExchange< TYPES, - Message, + Message, Commitment = Commitment, - Membership = MEMBERSHIP, + Membership = TYPES::Membership, > + 'static, ViewSyncEx: - ViewSyncExchangeType, Membership = MEMBERSHIP> + 'static, + ViewSyncExchangeType, Membership = TYPES::Membership> + 'static, VIDEx: ConsensusExchange< TYPES, - Message, + Message, Commitment = Commitment, - Membership = MEMBERSHIP, + Membership = TYPES::Membership, > + 'static, TimeoutEx: ConsensusExchange< TYPES, - Message, + Message, Commitment = Commitment, - Membership = MEMBERSHIP, + Membership = TYPES::Membership, > + 'static, { fn consensus(&self) -> &Arc>> { @@ -832,7 +824,7 @@ impl> ConsensusApi async fn send_direct_message( &self, recipient: TYPES::SignatureKey, - message: SequencingMessage, + message: SequencingMessage, ) -> std::result::Result<(), NetworkError> { let inner = self.inner.clone(); debug!(?message, ?recipient, "send_direct_message"); @@ -845,7 +837,6 @@ impl> ConsensusApi Message { sender: inner.public_key.clone(), kind: MessageKind::from_consensus_message(message), - _phantom: PhantomData, }, recipient, ) @@ -857,7 +848,7 @@ impl> ConsensusApi async fn send_direct_da_message( &self, recipient: TYPES::SignatureKey, - message: SequencingMessage, + message: SequencingMessage, ) -> std::result::Result<(), NetworkError> { let inner = self.inner.clone(); debug!(?message, ?recipient, "send_direct_message"); @@ -870,7 +861,6 @@ impl> ConsensusApi Message { sender: inner.public_key.clone(), kind: MessageKind::from_consensus_message(message), - _phantom: PhantomData, }, recipient, ) @@ -883,7 +873,7 @@ impl> ConsensusApi // async fn send_broadcast_message( &self, - message: SequencingMessage, + message: SequencingMessage, ) -> std::result::Result<(), NetworkError> { debug!(?message, "send_broadcast_message"); self.inner @@ -894,7 +884,6 @@ impl> ConsensusApi Message { sender: self.inner.public_key.clone(), kind: MessageKind::from_consensus_message(message), - _phantom: PhantomData, }, &self.inner.exchanges.quorum_exchange().membership().clone(), ) @@ -904,7 +893,7 @@ impl> ConsensusApi async fn send_da_broadcast( &self, - message: SequencingMessage, + message: SequencingMessage, ) -> std::result::Result<(), NetworkError> { debug!(?message, "send_da_broadcast_message"); self.inner @@ -915,7 +904,6 @@ impl> ConsensusApi Message { sender: self.inner.public_key.clone(), kind: MessageKind::from_consensus_message(message), - _phantom: PhantomData, }, &self .inner @@ -944,7 +932,6 @@ impl> ConsensusApi Message { sender: api.inner.public_key.clone(), kind: MessageKind::from(message), - _phantom: PhantomData, }, &api.inner .exchanges diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index da45f0d41c..858158280c 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -30,7 +30,7 @@ use hotshot_types::{ event::Event, message::{Message, Messages}, traits::{ - election::{ConsensusExchange, Membership, ViewSyncExchangeType}, + election::{ConsensusExchange, ViewSyncExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{ CommitteeEx, ExchangesType, NodeImplementation, NodeType, QuorumEx, TimeoutEx, VIDEx, @@ -60,20 +60,18 @@ pub enum GlobalEvent { /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_message_task< TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, - EXCHANGE: ConsensusExchange, Membership = MEMBERSHIP> + 'static, + EXCHANGE: ConsensusExchange, Membership = TYPES::Membership> + 'static, >( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, exchange: EXCHANGE, ) -> TaskRunner // This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. where - EXCHANGE::Networking: CommunicationChannel, MEMBERSHIP>, + EXCHANGE::Networking: CommunicationChannel, TYPES::Membership>, { let channel = exchange.network().clone(); - let broadcast_stream = GeneratedStream::>::new(Arc::new(move || { + let broadcast_stream = GeneratedStream::>::new(Arc::new(move || { let network = channel.clone(); let closure = async move { loop { @@ -93,7 +91,7 @@ where Some(boxed_sync(closure)) })); let channel = exchange.network().clone(); - let direct_stream = GeneratedStream::>::new(Arc::new(move || { + let direct_stream = GeneratedStream::>::new(Arc::new(move || { let network = channel.clone(); let closure = async move { loop { @@ -113,13 +111,13 @@ where Some(boxed_sync(closure)) })); let message_stream = Merge::new(broadcast_stream, direct_stream); - let network_state: NetworkMessageTaskState<_, _> = NetworkMessageTaskState { + let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { event_stream: event_stream.clone(), }; let registry = task_runner.registry.clone(); let network_message_handler = HandleMessage(Arc::new( - move |messages: either::Either, Messages>, - mut state: NetworkMessageTaskState| { + move |messages: either::Either, Messages>, + mut state: NetworkMessageTaskState| { let messages = match messages { either::Either::Left(messages) | either::Either::Right(messages) => messages, }; @@ -133,7 +131,7 @@ where let networking_name = "Networking Task"; let networking_task_builder = - TaskBuilder::>::new(networking_name.to_string()) + TaskBuilder::>::new(networking_name.to_string()) .register_message_stream(message_stream) .register_registry(&mut registry.clone()) .await @@ -157,35 +155,30 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_event_task< TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, - EXCHANGE: ConsensusExchange, Membership = MEMBERSHIP> + 'static, + EXCHANGE: ConsensusExchange, Membership = TYPES::Membership> + 'static, >( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, exchange: EXCHANGE, task_kind: NetworkTaskKind, ) -> TaskRunner // This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. where - EXCHANGE::Networking: CommunicationChannel, MEMBERSHIP>, + EXCHANGE::Networking: CommunicationChannel, TYPES::Membership>, { - let filter = NetworkEventTaskState::< - TYPES, - I, - MEMBERSHIP, - >::Networking, - >::filter(task_kind); + let filter = + NetworkEventTaskState::>::Networking>::filter( + task_kind, + ); let channel = exchange.network().clone(); - let network_state: NetworkEventTaskState<_, _, _, _> = NetworkEventTaskState { + let network_state: NetworkEventTaskState<_, _> = NetworkEventTaskState { channel, event_stream: event_stream.clone(), view: TYPES::Time::genesis(), - phantom: PhantomData, }; let registry = task_runner.registry.clone(); let network_event_handler = HandleEvent(Arc::new( - move |event, mut state: NetworkEventTaskState<_, _, MEMBERSHIP, _>| { + move |event, mut state: NetworkEventTaskState<_, _>| { let membership = exchange.membership().clone(); async move { let completion_status = state.handle_event(event, &membership).await; @@ -197,7 +190,7 @@ where let networking_name = "Networking Task"; let networking_task_builder = - TaskBuilder::>::new(networking_name.to_string()) + TaskBuilder::>::new(networking_name.to_string()) .register_event_stream(event_stream.clone(), filter) .await .register_registry(&mut registry.clone()) @@ -225,17 +218,29 @@ pub async fn add_consensus_task< I: NodeImplementation, >( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, output_stream: ChannelStream>, handle: SystemContextHandle, ) -> TaskRunner where - QuorumEx: - ConsensusExchange, Commitment = Commitment>>, - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, - TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + QuorumEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment>, + Membership = TYPES::Membership, + >, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, + TimeoutEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, { let consensus = handle.hotshot.get_consensus(); let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -314,13 +319,17 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_vid_task>( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, vid_exchange: VIDEx, handle: SystemContextHandle, ) -> TaskRunner where - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, { // build the vid task let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -373,13 +382,17 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_da_task>( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, committee_exchange: CommitteeEx, handle: SystemContextHandle, ) -> TaskRunner where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, { // build the da task let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -434,12 +447,12 @@ pub async fn add_transaction_task< I: NodeImplementation, >( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, quorum_exchange: QuorumEx, handle: SystemContextHandle, ) -> TaskRunner where - QuorumEx: ConsensusExchange>, + QuorumEx: ConsensusExchange, Membership = TYPES::Membership>, { // build the transactions task let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -491,11 +504,12 @@ where /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_view_sync_task>( task_runner: TaskRunner, - event_stream: ChannelStream>, + event_stream: ChannelStream>, handle: SystemContextHandle, ) -> TaskRunner where - ViewSyncEx: ViewSyncExchangeType>, + ViewSyncEx: + ViewSyncExchangeType, Membership = TYPES::Membership>, { let api = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index e717c40b1b..52b17b17bc 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -1,10 +1,7 @@ //! Networking Implementation that has a primary and a fallback newtork. If the primary //! Errors we will use the backup to send or receive use super::NetworkError; -use crate::{ - traits::implementations::{Libp2pNetwork, WebServerNetwork}, - NodeImplementation, -}; +use crate::traits::implementations::{Libp2pNetwork, WebServerNetwork}; use async_lock::RwLock; use hotshot_constants::{ COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, @@ -103,13 +100,9 @@ fn calculate_hash_of(t: &T) -> u64 { /// A communication channel with 2 networks, where we can fall back to the slower network if the /// primary fails #[derive(Clone, Debug)] -pub struct CombinedCommChannel< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, -> { +pub struct CombinedCommChannel> { /// The two networks we'll use for send/recv - networks: Arc>, + networks: Arc>, /// Last n seen messages to prevent processing duplicates message_cache: Arc>, @@ -118,12 +111,10 @@ pub struct CombinedCommChannel< primary_down: Arc, } -impl, MEMBERSHIP: Membership> - CombinedCommChannel -{ +impl> CombinedCommChannel { /// Constructor #[must_use] - pub fn new(networks: Arc>) -> Self { + pub fn new(networks: Arc>) -> Self { Self { networks, message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), @@ -133,13 +124,13 @@ impl, MEMBERSHIP: Membership &WebServerNetwork, TYPES::SignatureKey, TYPES> { + pub fn primary(&self) -> &WebServerNetwork, TYPES::SignatureKey, TYPES> { &self.networks.0 } /// Get a ref to the backup network #[must_use] - pub fn secondary(&self) -> &Libp2pNetwork, TYPES::SignatureKey> { + pub fn secondary(&self) -> &Libp2pNetwork, TYPES::SignatureKey> { &self.networks.1 } } @@ -148,19 +139,15 @@ impl, MEMBERSHIP: Membership, - MEMBERSHIP: Membership, ->( - pub WebServerNetwork, TYPES::SignatureKey, TYPES>, - pub Libp2pNetwork, TYPES::SignatureKey>, +pub struct CombinedNetworks>( + pub WebServerNetwork, TYPES::SignatureKey, TYPES>, + pub Libp2pNetwork, TYPES::SignatureKey>, pub PhantomData, ); -impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> - for CombinedNetworks +impl> + TestableNetworkingImplementation> + for CombinedNetworks { fn generator( expected_node_count: usize, @@ -171,7 +158,7 @@ impl, MEMBERSHIP: Membership Box Self + 'static> { let generators = ( , + Message, TYPES::SignatureKey, TYPES, > as TestableNetworkingImplementation<_, _>>::generator( @@ -181,7 +168,7 @@ impl, MEMBERSHIP: Membership, TYPES::SignatureKey> as TestableNetworkingImplementation<_, _>>::generator( + , TYPES::SignatureKey> as TestableNetworkingImplementation<_, _>>::generator( expected_node_count, num_bootstrap, network_id, @@ -202,9 +189,9 @@ impl, MEMBERSHIP: Membership, MEMBERSHIP: Membership> - TestableNetworkingImplementation> - for CombinedCommChannel +impl> + TestableNetworkingImplementation> + for CombinedCommChannel { fn generator( expected_node_count: usize, @@ -213,16 +200,15 @@ impl, MEMBERSHIP: Membership Box Self + 'static> { - let generator = as TestableNetworkingImplementation<_, _>>::generator( + let generator = as TestableNetworkingImplementation< + _, + _, + >>::generator( expected_node_count, num_bootstrap, network_id, da_committee_size, - is_da + is_da, ); Box::new(move |node_id| Self { networks: generator(node_id).into(), @@ -240,11 +226,11 @@ impl, MEMBERSHIP: Membership, MEMBERSHIP: Membership> - CommunicationChannel, MEMBERSHIP> - for CombinedCommChannel +impl> + CommunicationChannel, MEMBERSHIP> + for CombinedCommChannel { - type NETWORK = CombinedNetworks; + type NETWORK = CombinedNetworks; async fn wait_for_ready(&self) { join!( @@ -270,11 +256,11 @@ impl, MEMBERSHIP: Membership, - election: &MEMBERSHIP, + message: Message, + election: &TYPES::Membership, ) -> Result<(), NetworkError> { let recipients = - >::get_committee(election, message.get_view_number()); + ::Membership::get_committee(election, message.get_view_number()); // broadcast optimistically on both networks, but if the primary network is down, skip it if self.primary_down.load(Ordering::Relaxed) < COMBINED_NETWORK_MIN_PRIMARY_FAILURES @@ -304,7 +290,7 @@ impl, MEMBERSHIP: Membership, + message: Message, recipient: TYPES::SignatureKey, ) -> Result<(), NetworkError> { // DM optimistically on both networks, but if the primary network is down, skip it @@ -334,7 +320,7 @@ impl, MEMBERSHIP: Membership( &'a self, transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> + ) -> BoxSyncFuture<'b, Result>, NetworkError>> where 'a: 'b, Self: 'b, @@ -380,21 +366,21 @@ impl, MEMBERSHIP: Membership) { - as ConnectedNetwork,TYPES::SignatureKey>>:: + as ConnectedNetwork,TYPES::SignatureKey>>:: inject_consensus_info(self.primary(), event.clone()).await; - as ConnectedNetwork,TYPES::SignatureKey>>:: + as ConnectedNetwork,TYPES::SignatureKey>>:: inject_consensus_info(self.secondary(), event).await; } } -impl, MEMBERSHIP: Membership> +impl> TestableChannelImplementation< TYPES, - Message, + Message, MEMBERSHIP, - CombinedNetworks, - > for CombinedCommChannel + CombinedNetworks, + > for CombinedCommChannel { fn generate_network() -> Box) -> Self + 'static> { Box::new(move |network| CombinedCommChannel::new(network)) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index f445e357b4..61c7fe874b 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -125,11 +125,10 @@ pub struct Libp2pNetwork { inner: Arc>, } -impl> - TestableNetworkingImplementation> - for Libp2pNetwork, TYPES::SignatureKey> +impl TestableNetworkingImplementation> + for Libp2pNetwork, TYPES::SignatureKey> where - MessageKind: ViewMessage, + MessageKind: ViewMessage, { /// Returns a boxed function `f(node_id, public_key) -> Libp2pNetwork` /// with the purpose of generating libp2p networks. @@ -755,7 +754,7 @@ pub struct Libp2pCommChannel< I: NodeImplementation, MEMBERSHIP: Membership, >( - Arc, TYPES::SignatureKey>>, + Arc, TYPES::SignatureKey>>, PhantomData<(TYPES, I, MEMBERSHIP)>, ); @@ -764,16 +763,16 @@ impl, MEMBERSHIP: Membership, TYPES::SignatureKey>>) -> Self { + pub fn new(network: Arc, TYPES::SignatureKey>>) -> Self { Self(network, PhantomData) } } impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> + TestableNetworkingImplementation> for Libp2pCommChannel where - MessageKind: ViewMessage, + MessageKind: ViewMessage, { /// Returns a boxed function `f(node_id, public_key) -> Libp2pNetwork` /// with the purpose of generating libp2p networks. @@ -792,7 +791,7 @@ where is_da: bool, ) -> Box Self + 'static> { let generator = , + Message, TYPES::SignatureKey, > as TestableNetworkingImplementation<_, _>>::generator( expected_node_count, @@ -814,12 +813,12 @@ where // we don't really want to make this the default implementation because that forces it to require ConnectedNetwork to be implemented. The struct we implement over might use multiple ConnectedNetworks #[async_trait] impl, MEMBERSHIP: Membership> - CommunicationChannel, MEMBERSHIP> + CommunicationChannel, MEMBERSHIP> for Libp2pCommChannel where - MessageKind: ViewMessage, + MessageKind: ViewMessage, { - type NETWORK = Libp2pNetwork, TYPES::SignatureKey>; + type NETWORK = Libp2pNetwork, TYPES::SignatureKey>; async fn wait_for_ready(&self) { self.0.wait_for_ready().await; @@ -842,10 +841,10 @@ where async fn broadcast_message( &self, - message: Message, - membership: &MEMBERSHIP, + message: Message, + membership: &TYPES::Membership, ) -> Result<(), NetworkError> { - let recipients = >::get_committee( + let recipients = ::Membership::get_committee( membership, message.kind.get_view_number(), ); @@ -854,7 +853,7 @@ where async fn direct_message( &self, - message: Message, + message: Message, recipient: TYPES::SignatureKey, ) -> Result<(), NetworkError> { self.0.direct_message(message, recipient).await @@ -863,7 +862,7 @@ where fn recv_msgs<'a, 'b>( &'a self, transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> + ) -> BoxSyncFuture<'b, Result>, NetworkError>> where 'a: 'b, Self: 'b, @@ -882,7 +881,7 @@ where async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { as ConnectedNetwork< - Message, + Message, TYPES::SignatureKey, >>::inject_consensus_info(&self.0, event) .await; @@ -892,13 +891,13 @@ where impl, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, - Message, + Message, MEMBERSHIP, - Libp2pNetwork, TYPES::SignatureKey>, + Libp2pNetwork, TYPES::SignatureKey>, > for Libp2pCommChannel { fn generate_network( - ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> + ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> { Box::new(move |network| Libp2pCommChannel::new(network)) } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 41f153120f..ca2202b2a5 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -242,9 +242,8 @@ impl MemoryNetwork { } } -impl> - TestableNetworkingImplementation> - for MemoryNetwork, TYPES::SignatureKey> +impl TestableNetworkingImplementation> + for MemoryNetwork, TYPES::SignatureKey> { fn generator( _expected_node_count: usize, @@ -456,7 +455,7 @@ pub struct MemoryCommChannel< I: NodeImplementation, MEMBERSHIP: Membership, >( - Arc, TYPES::SignatureKey>>, + Arc, TYPES::SignatureKey>>, PhantomData<(I, MEMBERSHIP)>, ); @@ -465,16 +464,16 @@ impl, MEMBERSHIP: Membership, TYPES::SignatureKey>>) -> Self { + pub fn new(network: Arc, TYPES::SignatureKey>>) -> Self { Self(network, PhantomData) } } impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> + TestableNetworkingImplementation> for MemoryCommChannel where - MessageKind: ViewMessage, + MessageKind: ViewMessage, { fn generator( expected_node_count: usize, @@ -484,7 +483,7 @@ where is_da: bool, ) -> Box Self + 'static> { let generator = , + Message, TYPES::SignatureKey, > as TestableNetworkingImplementation<_, _>>::generator( expected_node_count, @@ -503,12 +502,12 @@ where #[async_trait] impl, MEMBERSHIP: Membership> - CommunicationChannel, MEMBERSHIP> + CommunicationChannel, MEMBERSHIP> for MemoryCommChannel where - MessageKind: ViewMessage, + MessageKind: ViewMessage, { - type NETWORK = MemoryNetwork, TYPES::SignatureKey>; + type NETWORK = MemoryNetwork, TYPES::SignatureKey>; async fn wait_for_ready(&self) { self.0.wait_for_ready().await; @@ -531,10 +530,10 @@ where async fn broadcast_message( &self, - message: Message, - election: &MEMBERSHIP, + message: Message, + election: &TYPES::Membership, ) -> Result<(), NetworkError> { - let recipients = >::get_committee( + let recipients = ::Membership::get_committee( election, message.kind.get_view_number(), ); @@ -543,7 +542,7 @@ where async fn direct_message( &self, - message: Message, + message: Message, recipient: TYPES::SignatureKey, ) -> Result<(), NetworkError> { self.0.direct_message(message, recipient).await @@ -552,7 +551,7 @@ where fn recv_msgs<'a, 'b>( &'a self, transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> + ) -> BoxSyncFuture<'b, Result>, NetworkError>> where 'a: 'b, Self: 'b, @@ -565,13 +564,13 @@ where impl, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, - Message, + Message, MEMBERSHIP, - MemoryNetwork, TYPES::SignatureKey>, + MemoryNetwork, TYPES::SignatureKey>, > for MemoryCommChannel { fn generate_network( - ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> + ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> { Box::new(move |network| MemoryCommChannel::new(network)) } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index c1d044c2be..0f1fbb9fe4 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -48,7 +48,7 @@ pub struct WebCommChannel< I: NodeImplementation, MEMBERSHIP: Membership, >( - Arc, TYPES::SignatureKey, TYPES>>, + Arc, TYPES::SignatureKey, TYPES>>, PhantomData<(MEMBERSHIP, I)>, ); @@ -57,9 +57,7 @@ impl, MEMBERSHIP: Membership, TYPES::SignatureKey, TYPES>>, - ) -> Self { + pub fn new(network: Arc, TYPES::SignatureKey, TYPES>>) -> Self { Self(network, PhantomData) } } @@ -591,15 +589,15 @@ impl< #[async_trait] impl, MEMBERSHIP: Membership> - CommunicationChannel, MEMBERSHIP> + CommunicationChannel, MEMBERSHIP> for WebCommChannel { - type NETWORK = WebServerNetwork, TYPES::SignatureKey, TYPES>; + type NETWORK = WebServerNetwork, TYPES::SignatureKey, TYPES>; /// Blocks until node is successfully initialized /// into the network async fn wait_for_ready(&self) { as ConnectedNetwork< - Message, + Message, TYPES::SignatureKey, >>::wait_for_ready(&self.0) .await; @@ -609,7 +607,7 @@ impl, MEMBERSHIP: Membership bool { as ConnectedNetwork< - Message, + Message, TYPES::SignatureKey, >>::is_ready(&self.0) .await @@ -625,7 +623,7 @@ impl, MEMBERSHIP: Membership as ConnectedNetwork< - Message, + Message, TYPES::SignatureKey, >>::shut_down(&self.0) .await; @@ -637,8 +635,8 @@ impl, MEMBERSHIP: Membership, - _election: &MEMBERSHIP, + message: Message, + _election: &TYPES::Membership, ) -> Result<(), NetworkError> { self.0.broadcast_message(message, BTreeSet::new()).await } @@ -647,7 +645,7 @@ impl, MEMBERSHIP: Membership, + message: Message, recipient: TYPES::SignatureKey, ) -> Result<(), NetworkError> { self.0.direct_message(message, recipient).await @@ -660,14 +658,14 @@ impl, MEMBERSHIP: Membership( &'a self, transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> + ) -> BoxSyncFuture<'b, Result>, NetworkError>> where 'a: 'b, Self: 'b, { let closure = async move { as ConnectedNetwork< - Message, + Message, TYPES::SignatureKey, >>::recv_msgs(&self.0, transmit_type) .await @@ -677,7 +675,7 @@ impl, MEMBERSHIP: Membership) { as ConnectedNetwork< - Message, + Message, TYPES::SignatureKey, >>::inject_consensus_info(&self.0, event) .await; @@ -1204,9 +1202,8 @@ impl< } } -impl> - TestableNetworkingImplementation> - for WebServerNetwork, TYPES::SignatureKey, TYPES> +impl TestableNetworkingImplementation> + for WebServerNetwork, TYPES::SignatureKey, TYPES> { fn generator( expected_node_count: usize, @@ -1256,7 +1253,7 @@ impl> } impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> + TestableNetworkingImplementation> for WebCommChannel { fn generator( @@ -1267,7 +1264,7 @@ impl, MEMBERSHIP: Membership Box Self + 'static> { let generator = , + Message, TYPES::SignatureKey, TYPES, > as TestableNetworkingImplementation<_, _>>::generator( @@ -1288,14 +1285,13 @@ impl, MEMBERSHIP: Membership, MEMBERSHIP: Membership> TestableChannelImplementation< TYPES, - Message, + Message, MEMBERSHIP, - WebServerNetwork, TYPES::SignatureKey, TYPES>, + WebServerNetwork, TYPES::SignatureKey, TYPES>, > for WebCommChannel { fn generate_network() -> Box< - dyn Fn(Arc, TYPES::SignatureKey, TYPES>>) -> Self - + 'static, + dyn Fn(Arc, TYPES::SignatureKey, TYPES>>) -> Self + 'static, > { Box::new(move |network| WebCommChannel::new(network)) } diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index a7a1d223f1..3483a07b69 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -48,7 +48,7 @@ pub struct SystemContextHandle> { /// method is needed to generate new receivers to expose to the user pub(crate) output_event_stream: ChannelStream>, /// access to the internal ev ent stream, in case we need to, say, shut something down - pub(crate) internal_event_stream: ChannelStream>, + pub(crate) internal_event_stream: ChannelStream>, /// registry for controlling tasks pub(crate) registry: GlobalRegistry, @@ -100,8 +100,8 @@ impl + 'static> SystemContextHandl /// NOTE: this is only used for sanity checks in our tests pub async fn get_internal_event_stream_known_impl( &mut self, - filter: FilterEvent>, - ) -> (UnboundedStream>, StreamId) { + filter: FilterEvent>, + ) -> (UnboundedStream>, StreamId) { self.internal_event_stream.subscribe(filter).await } @@ -243,7 +243,7 @@ impl + 'static> SystemContextHandl /// Wrapper around `HotShotConsensusApi`'s `send_broadcast_consensus_message` function #[cfg(feature = "hotshot-testing")] - pub async fn send_broadcast_consensus_message(&self, msg: SequencingMessage) { + pub async fn send_broadcast_consensus_message(&self, msg: SequencingMessage) { let _result = self .hotshot .send_broadcast_message(MessageKind::from_consensus_message(msg)) @@ -254,7 +254,7 @@ impl + 'static> SystemContextHandl #[cfg(feature = "hotshot-testing")] pub async fn send_direct_consensus_message( &self, - msg: SequencingMessage, + msg: SequencingMessage, recipient: TYPES::SignatureKey, ) { let _result = self diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a31f09efb3..c1b8eebf2c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -30,9 +30,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::{ConsensusExchange, QuorumExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{ - CommitteeEx, NodeImplementation, NodeType, QuorumEx, QuorumMembership, TimeoutEx, - }, + node_implementation::{CommitteeEx, NodeImplementation, NodeType, QuorumEx, TimeoutEx}, signature_key::SignatureKey, state::ConsensusTime, BlockPayload, @@ -65,11 +63,11 @@ pub struct ConsensusTaskState< A: ConsensusApi + 'static, > where QuorumEx: - ConsensusExchange, Commitment = Commitment>>, + ConsensusExchange, Commitment = Commitment>>, CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, { /// The global task registry pub registry: GlobalRegistry, @@ -109,7 +107,7 @@ pub struct ConsensusTaskState< pub timeout_task: JoinHandle<()>, /// Global events stream to publish events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// Event stream to publish events to the application layer pub output_event_stream: ChannelStream>, @@ -133,9 +131,9 @@ pub struct ConsensusTaskState< pub struct VoteCollectionTaskState> where QuorumEx: - ConsensusExchange, Commitment = Commitment>>, + ConsensusExchange, Commitment = Commitment>>, TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, { /// the quorum exchange pub quorum_exchange: Arc>, @@ -145,28 +143,20 @@ where #[allow(clippy::type_complexity)] /// Accumulator for votes pub accumulator: Either< - VoteAccumulator2< - TYPES, - QuorumVote>, - QuorumCertificate2, - >, + VoteAccumulator2, QuorumCertificate2>, QuorumCertificate2, >, /// Accumulator for votes #[allow(clippy::type_complexity)] pub timeout_accumulator: Either< - VoteAccumulator2< - TYPES, - TimeoutVote2>, - TimeoutCertificate2, - >, + VoteAccumulator2, TimeoutCertificate2>, TimeoutCertificate2, >, /// View which this vote collection task is collecting votes in pub cur_view: TYPES::Time, /// The event stream shared by all tasks - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// Node id pub id: u64, } @@ -174,9 +164,9 @@ where impl> TS for VoteCollectionTaskState where QuorumEx: - ConsensusExchange, Commitment = Commitment>>, + ConsensusExchange, Commitment = Commitment>>, TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, { } @@ -184,16 +174,20 @@ where async fn vote_handle>( mut state: VoteCollectionTaskState, - event: HotShotEvent, + event: HotShotEvent, ) -> ( std::option::Option, VoteCollectionTaskState, ) where - QuorumEx: - ConsensusExchange, Commitment = Commitment>>, + QuorumEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment>, + Membership = TYPES::Membership, + >, TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, { match event { HotShotEvent::QuorumVoteRecv(vote) => { @@ -302,12 +296,24 @@ impl< > ConsensusTaskState where TYPES::BlockHeader: BlockHeader, - QuorumEx: - ConsensusExchange, Commitment = Commitment>>, - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, - TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + QuorumEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment>, + Membership = TYPES::Membership, + >, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, + TimeoutEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus genesis leaf", level = "error")] @@ -384,16 +390,15 @@ where timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), }; - let vote = - QuorumVote::>::create_signed_vote( - QuorumData { - leaf_commit: leaf.commit(), - }, - view, - self.quorum_exchange.public_key(), - self.quorum_exchange.private_key(), - ); - let message = GeneralConsensusMessage::::Vote(vote); + let vote = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(), + }, + view, + self.quorum_exchange.public_key(), + self.quorum_exchange.private_key(), + ); + let message = GeneralConsensusMessage::::Vote(vote); if let GeneralConsensusMessage::Vote(vote) = message { debug!( @@ -467,16 +472,15 @@ where error!("Block payload commitment does not equal parent commitment"); return false; } - let vote = - QuorumVote::>::create_signed_vote( - QuorumData { - leaf_commit: leaf.commit(), - }, - view, - self.quorum_exchange.public_key(), - self.quorum_exchange.private_key(), - ); - GeneralConsensusMessage::::Vote(vote) + let vote = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(), + }, + view, + self.quorum_exchange.public_key(), + self.quorum_exchange.private_key(), + ); + GeneralConsensusMessage::::Vote(vote) } else { error!("Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", cert, self.cur_view ); return false; @@ -591,7 +595,7 @@ where /// Handles a consensus event received on the event stream #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] - pub async fn handle_event(&mut self, event: HotShotEvent) { + pub async fn handle_event(&mut self, event: HotShotEvent) { match event { HotShotEvent::QuorumProposalRecv(proposal, sender) => { debug!( @@ -1338,27 +1342,27 @@ impl< > TS for ConsensusTaskState where QuorumEx: - ConsensusExchange, Commitment = Commitment>>, + ConsensusExchange, Commitment = Commitment>>, CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, { } /// Type allias for consensus' vote collection task pub type VoteCollectionTypes = HSTWithEvent< ConsensusTaskError, - HotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, VoteCollectionTaskState, >; /// Type alias for Consensus task pub type ConsensusTaskTypes = HSTWithEvent< ConsensusTaskError, - HotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, ConsensusTaskState, >; @@ -1368,7 +1372,7 @@ pub async fn sequencing_consensus_handle< I: NodeImplementation, A: ConsensusApi + 'static, >( - event: HotShotEvent, + event: HotShotEvent, mut state: ConsensusTaskState, ) -> ( std::option::Option, @@ -1376,12 +1380,24 @@ pub async fn sequencing_consensus_handle< ) where TYPES::BlockHeader: BlockHeader, - QuorumEx: - ConsensusExchange, Commitment = Commitment>>, - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, - TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + QuorumEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment>, + Membership = TYPES::Membership, + >, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, + TimeoutEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, { if let HotShotEvent::Shutdown = event { (Some(HotShotTaskCompleted::ShutDown), state) @@ -1392,9 +1408,7 @@ where } /// Filter for consensus, returns true for event types the consensus task subscribes to. -pub fn consensus_event_filter>( - event: &HotShotEvent, -) -> bool { +pub fn consensus_event_filter(event: &HotShotEvent) -> bool { matches!( event, HotShotEvent::QuorumProposalRecv(_, _) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index c244bc8cab..098bf6e40e 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -12,6 +12,7 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::simple_certificate::DACertificate2; use hotshot_types::{ consensus::{Consensus, View}, data::DAProposal, @@ -30,9 +31,6 @@ use hotshot_types::{ vote2::HasViewNumber, vote2::VoteAccumulator2, }; -use hotshot_types::{ - simple_certificate::DACertificate2, traits::node_implementation::CommitteeMembership, -}; use snafu::Snafu; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; @@ -49,7 +47,7 @@ pub struct DATaskState< A: ConsensusApi + 'static, > where CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, { /// The state's api pub api: A, @@ -69,7 +67,7 @@ pub struct DATaskState< pub vote_collector: Option<(TYPES::Time, usize, usize)>, /// Global events stream to publish events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// This state's ID pub id: u64, @@ -79,45 +77,45 @@ pub struct DATaskState< pub struct DAVoteCollectionTaskState> where CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, { /// the committee exchange pub committee_exchange: Arc>, #[allow(clippy::type_complexity)] /// Accumulates DA votes pub accumulator: Either< - VoteAccumulator2< - TYPES, - DAVote2>, - DACertificate2, - >, + VoteAccumulator2, DACertificate2>, DACertificate2, >, /// the current view pub cur_view: TYPES::Time, /// event stream for channel events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// the id of this task state pub id: u64, } impl> TS for DAVoteCollectionTaskState where CommitteeEx: - ConsensusExchange, Commitment = Commitment> + ConsensusExchange, Commitment = Commitment> { } #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "DA Vote Collection Task", level = "error")] async fn vote_handle>( mut state: DAVoteCollectionTaskState, - event: HotShotEvent, + event: HotShotEvent, ) -> ( std::option::Option, DAVoteCollectionTaskState, ) where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, { match event { HotShotEvent::DAVoteRecv(vote) => { @@ -172,14 +170,18 @@ where impl, A: ConsensusApi + 'static> DATaskState where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] pub async fn handle_event( &mut self, - event: HotShotEvent, + event: HotShotEvent, ) -> Option { match event { HotShotEvent::DAProposalRecv(proposal, sender) => { @@ -444,7 +446,7 @@ where } /// Filter the DA event. - pub fn filter(event: &HotShotEvent) -> bool { + pub fn filter(event: &HotShotEvent) -> bool { matches!( event, HotShotEvent::DAProposalRecv(_, _) @@ -462,22 +464,22 @@ impl, A: ConsensusApi + for DATaskState where CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, { } /// Type alias for DA Vote Collection Types pub type DAVoteCollectionTypes = HSTWithEvent< ConsensusTaskError, - HotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, DAVoteCollectionTaskState, >; /// Type alias for DA Task Types pub type DATaskTypes = HSTWithEvent< ConsensusTaskError, - HotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, DATaskState, >; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index f575aedca2..9c68f75a93 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -13,39 +13,36 @@ use hotshot_types::{ DAVote2, QuorumVote, TimeoutVote2, VIDVote2, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, - traits::node_implementation::{ - CommitteeMembership, NodeImplementation, NodeType, QuorumMembership, VIDMembership, - ViewSyncMembership, - }, + traits::node_implementation::NodeType, }; /// All of the possible events that can be passed between Sequecning `HotShot` tasks #[derive(Eq, Hash, PartialEq, Debug, Clone)] -pub enum HotShotEvent> { +pub enum HotShotEvent { /// Shutdown the task Shutdown, /// A quorum proposal has been received from the network; handled by the consensus task QuorumProposalRecv(Proposal>, TYPES::SignatureKey), /// A quorum vote has been received from the network; handled by the consensus task - QuorumVoteRecv(QuorumVote>), + QuorumVoteRecv(QuorumVote), /// A timeout vote recevied from the network; handled by consensus task - TimeoutVoteRecv(TimeoutVote2>), + TimeoutVoteRecv(TimeoutVote2), /// Send a timeout vote to the network; emitted by consensus task replicas - TimeoutVoteSend(TimeoutVote2>), + TimeoutVoteSend(TimeoutVote2), /// A DA proposal has been received from the network; handled by the DA task DAProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task - DAVoteRecv(DAVote2>), + DAVoteRecv(DAVote2), /// A Data Availability Certificate (DAC) has been recieved by the network; handled by the consensus task DACRecv(DACertificate2), /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal - QuorumVoteSend(QuorumVote>), + QuorumVoteSend(QuorumVote), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DAProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal - DAVoteSend(DAVote2>), + DAVoteSend(DAVote2), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only QCFormed(Either, TimeoutCertificate2>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task @@ -56,18 +53,18 @@ pub enum HotShotEvent> { ViewSyncTimeout(TYPES::Time, u64, ViewSyncPhase), /// Receive a `ViewSyncPreCommitVote` from the network; received by a relay in the view sync task - ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote>), + ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote), /// Receive a `ViewSyncCommitVote` from the network; received by a relay in the view sync task - ViewSyncCommitVoteRecv(ViewSyncCommitVote>), + ViewSyncCommitVoteRecv(ViewSyncCommitVote), /// Receive a `ViewSyncFinalizeVote` from the network; received by a relay in the view sync task - ViewSyncFinalizeVoteRecv(ViewSyncFinalizeVote>), + ViewSyncFinalizeVoteRecv(ViewSyncFinalizeVote), /// Send a `ViewSyncPreCommitVote` from the network; emitted by a replica in the view sync task - ViewSyncPreCommitVoteSend(ViewSyncPreCommitVote>), + ViewSyncPreCommitVoteSend(ViewSyncPreCommitVote), /// Send a `ViewSyncCommitVote` from the network; emitted by a replica in the view sync task - ViewSyncCommitVoteSend(ViewSyncCommitVote>), + ViewSyncCommitVoteSend(ViewSyncCommitVote), /// Send a `ViewSyncFinalizeVote` from the network; emitted by a replica in the view sync task - ViewSyncFinalizeVoteSend(ViewSyncFinalizeVote>), + ViewSyncFinalizeVoteSend(ViewSyncFinalizeVote), /// Receive a `ViewSyncPreCommitCertificate2` from the network; received by a replica in the view sync task ViewSyncPreCommitCertificate2Recv(ViewSyncPreCommitCertificate2), @@ -108,11 +105,11 @@ pub enum HotShotEvent> { /// Send a VID vote to the VID leader; emitted by VID storage nodes in the DA task after seeing a valid VID dispersal /// /// Like [`DAVoteSend`] - VidVoteSend(VIDVote2>), + VidVoteSend(VIDVote2), /// A VID vote has been received by the network; handled by the DA task /// /// Like [`DAVoteRecv`] - VidVoteRecv(VIDVote2>), + VidVoteRecv(VIDVote2), /// The VID leader has collected enough votes to form a VID cert; emitted by the VID leader in the DA task; sent to the entire network via the networking task /// /// Like [`DACSend`] diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 227c3b7c89..277f878312 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -8,28 +8,28 @@ use hotshot_task::{ task_impls::{HSTWithEvent, TaskBuilder}, task_launcher::TaskRunner, }; -use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::NodeType; use snafu::Snafu; use std::{collections::HashMap, future::Future, sync::Arc}; /// The state for the test harness task. Keeps track of which events and how many we expect to get -pub struct TestHarnessState> { +pub struct TestHarnessState { /// The expected events we get from the test. Maps an event to the number of times we expect to see it - expected_output: HashMap, usize>, + expected_output: HashMap, usize>, } -impl> TS for TestHarnessState {} +impl TS for TestHarnessState {} /// Error emitted if the test harness task fails #[derive(Snafu, Debug)] pub struct TestHarnessTaskError {} /// Type alias for the Test Harness Task -pub type TestHarnessTaskTypes = HSTWithEvent< +pub type TestHarnessTaskTypes = HSTWithEvent< TestHarnessTaskError, - HotShotEvent, - ChannelStream>, - TestHarnessState, + HotShotEvent, + ChannelStream>, + TestHarnessState, >; /// Runs a test by building the task using `build_fn` and then passing it the `input` events @@ -40,14 +40,13 @@ pub type TestHarnessTaskTypes = HSTWithEvent< /// # Panics /// Panics if any state the test expects is not set. Panicing causes a test failure #[allow(clippy::implicit_hasher)] -pub async fn run_harness( - input: Vec>, - expected_output: HashMap, usize>, - event_stream: Option>>, - build_fn: impl FnOnce(TaskRunner, ChannelStream>) -> Fut, +pub async fn run_harness( + input: Vec>, + expected_output: HashMap, usize>, + event_stream: Option>>, + build_fn: impl FnOnce(TaskRunner, ChannelStream>) -> Fut, ) where TYPES: NodeType, - I: NodeImplementation, Fut: Future, { let task_runner = TaskRunner::new(); @@ -58,7 +57,7 @@ pub async fn run_harness( async move { handle_event(event, state) }.boxed() })); let filter = FilterEvent::default(); - let builder = TaskBuilder::>::new("test_harness".to_string()) + let builder = TaskBuilder::>::new("test_harness".to_string()) .register_event_stream(event_stream.clone(), filter) .await .register_registry(&mut registry.clone()) @@ -88,12 +87,12 @@ pub async fn run_harness( /// # Panics /// Will panic to fail the test when it receives and unexpected event #[allow(clippy::needless_pass_by_value)] -pub fn handle_event>( - event: HotShotEvent, - mut state: TestHarnessState, +pub fn handle_event( + event: HotShotEvent, + mut state: TestHarnessState, ) -> ( std::option::Option, - TestHarnessState, + TestHarnessState, ) { assert!( state.expected_output.contains_key(&event), diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 9be3f8b304..a684843c77 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -14,12 +14,12 @@ use hotshot_types::{ traits::{ election::Membership, network::{CommunicationChannel, TransmitType}, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::NodeType, }, vote2::{HasViewNumber, Vote2}, }; use snafu::Snafu; -use std::{marker::PhantomData, sync::Arc}; +use std::sync::Arc; use tracing::error; use tracing::instrument; @@ -37,16 +37,16 @@ pub enum NetworkTaskKind { } /// the network message task state -pub struct NetworkMessageTaskState> { +pub struct NetworkMessageTaskState { /// event stream (used for publishing) - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, } -impl> TS for NetworkMessageTaskState {} +impl TS for NetworkMessageTaskState {} -impl> NetworkMessageTaskState { +impl NetworkMessageTaskState { /// Handle the message. - pub async fn handle_messages(&mut self, messages: Vec>) { + pub async fn handle_messages(&mut self, messages: Vec>) { // We will send only one event for a vector of transactions. let mut transactions = Vec::new(); for message in messages { @@ -122,7 +122,6 @@ impl> NetworkMessageTaskState unimplemented!(), }; } if !transactions.is_empty() { @@ -136,36 +135,28 @@ impl> NetworkMessageTaskState, - MEMBERSHIP: Membership, - COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, + COMMCHANNEL: CommunicationChannel, TYPES::Membership>, > { /// comm channel pub channel: COMMCHANNEL, /// event stream - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// view number pub view: TYPES::Time, - /// phantom data - pub phantom: PhantomData, // TODO ED Need to add exchange so we can get the recipient key and our own key? } impl< TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, - COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, - > TS for NetworkEventTaskState + COMMCHANNEL: CommunicationChannel, TYPES::Membership>, + > TS for NetworkEventTaskState { } impl< TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, - COMMCHANNEL: CommunicationChannel, MEMBERSHIP>, - > NetworkEventTaskState + COMMCHANNEL: CommunicationChannel, TYPES::Membership>, + > NetworkEventTaskState { /// Handle the given event. /// @@ -177,13 +168,13 @@ impl< pub async fn handle_event( &mut self, - event: HotShotEvent, - membership: &MEMBERSHIP, + event: HotShotEvent, + membership: &TYPES::Membership, ) -> Option { let (sender, message_kind, transmit_type, recipient) = match event.clone() { HotShotEvent::QuorumProposalSend(proposal, sender) => ( sender, - MessageKind::::from_consensus_message(SequencingMessage(Left( + MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::Proposal(proposal), ))), TransmitType::Broadcast, @@ -193,7 +184,7 @@ impl< // ED Each network task is subscribed to all these message types. Need filters per network task HotShotEvent::QuorumVoteSend(vote) => ( vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( + MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::Vote(vote.clone()), ))), TransmitType::Direct, @@ -201,7 +192,7 @@ impl< ), HotShotEvent::VidDisperseSend(proposal, sender) => ( sender, - MessageKind::::from_consensus_message(SequencingMessage(Right( + MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::VidDisperseMsg(proposal), ))), // TODO not a CommitteeConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 TransmitType::Broadcast, // TODO not a broadcast https://github.com/EspressoSystems/HotShot/issues/1696 @@ -209,7 +200,7 @@ impl< ), HotShotEvent::DAProposalSend(proposal, sender) => ( sender, - MessageKind::::from_consensus_message(SequencingMessage(Right( + MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::DAProposal(proposal), ))), TransmitType::Broadcast, @@ -217,7 +208,7 @@ impl< ), HotShotEvent::VidVoteSend(vote) => ( vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Right( + MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::VidVote(vote.clone()), ))), TransmitType::Direct, @@ -225,7 +216,7 @@ impl< ), HotShotEvent::DAVoteSend(vote) => ( vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Right( + MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::DAVote(vote.clone()), ))), TransmitType::Direct, @@ -233,7 +224,7 @@ impl< ), HotShotEvent::VidCertSend(certificate, sender) => ( sender, - MessageKind::::from_consensus_message(SequencingMessage(Right( + MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::VidCertificate(certificate), ))), TransmitType::Broadcast, @@ -242,7 +233,7 @@ impl< // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee HotShotEvent::DACSend(certificate, sender) => ( sender, - MessageKind::::from_consensus_message(SequencingMessage(Right( + MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::DACertificate(certificate), ))), TransmitType::Broadcast, @@ -250,7 +241,7 @@ impl< ), HotShotEvent::ViewSyncPreCommitVoteSend(vote) => ( vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( + MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), ))), TransmitType::Direct, @@ -258,7 +249,7 @@ impl< ), HotShotEvent::ViewSyncCommitVoteSend(vote) => ( vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( + MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), ))), TransmitType::Direct, @@ -266,7 +257,7 @@ impl< ), HotShotEvent::ViewSyncFinalizeVoteSend(vote) => ( vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( + MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), ))), TransmitType::Direct, @@ -274,7 +265,7 @@ impl< ), HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => ( sender, - MessageKind::::from_consensus_message(SequencingMessage(Left( + MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate.clone()), ))), TransmitType::Broadcast, @@ -282,7 +273,7 @@ impl< ), HotShotEvent::ViewSyncCommitCertificate2Send(certificate, sender) => ( sender, - MessageKind::::from_consensus_message(SequencingMessage(Left( + MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::ViewSyncCommitCertificate(certificate.clone()), ))), TransmitType::Broadcast, @@ -291,7 +282,7 @@ impl< HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, sender) => ( sender, - MessageKind::::from_consensus_message(SequencingMessage(Left( + MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate.clone()), ))), TransmitType::Broadcast, @@ -299,7 +290,7 @@ impl< ), HotShotEvent::TimeoutVoteSend(vote) => ( vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( + MessageKind::::from_consensus_message(SequencingMessage(Left( GeneralConsensusMessage::TimeoutVote(vote.clone()), ))), TransmitType::Direct, @@ -322,7 +313,6 @@ impl< let message = Message { sender, kind: message_kind, - _phantom: PhantomData, }; let transmit_result = match transmit_type { TransmitType::Direct => { @@ -342,7 +332,7 @@ impl< } /// network filter - pub fn filter(task_kind: NetworkTaskKind) -> FilterEvent> { + pub fn filter(task_kind: NetworkTaskKind) -> FilterEvent> { match task_kind { NetworkTaskKind::Quorum => FilterEvent(Arc::new(Self::quorum_filter)), NetworkTaskKind::Committee => FilterEvent(Arc::new(Self::committee_filter)), @@ -352,7 +342,7 @@ impl< } /// quorum filter - fn quorum_filter(event: &HotShotEvent) -> bool { + fn quorum_filter(event: &HotShotEvent) -> bool { matches!( event, HotShotEvent::QuorumProposalSend(_, _) @@ -365,7 +355,7 @@ impl< } /// committee filter - fn committee_filter(event: &HotShotEvent) -> bool { + fn committee_filter(event: &HotShotEvent) -> bool { matches!( event, HotShotEvent::DAProposalSend(_, _) @@ -376,7 +366,7 @@ impl< } /// vid filter - fn vid_filter(event: &HotShotEvent) -> bool { + fn vid_filter(event: &HotShotEvent) -> bool { matches!( event, HotShotEvent::Shutdown @@ -388,7 +378,7 @@ impl< } /// view sync filter - fn view_sync_filter(event: &HotShotEvent) -> bool { + fn view_sync_filter(event: &HotShotEvent) -> bool { matches!( event, HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) @@ -408,18 +398,18 @@ impl< pub struct NetworkTaskError {} /// networking message task types -pub type NetworkMessageTaskTypes = HSTWithMessage< +pub type NetworkMessageTaskTypes = HSTWithMessage< NetworkTaskError, - Either, Messages>, + Either, Messages>, // A combination of broadcast and direct streams. - Merge>, GeneratedStream>>, - NetworkMessageTaskState, + Merge>, GeneratedStream>>, + NetworkMessageTaskState, >; /// network event task types -pub type NetworkEventTaskTypes = HSTWithEvent< +pub type NetworkEventTaskTypes = HSTWithEvent< NetworkTaskError, - HotShotEvent, - ChannelStream>, - NetworkEventTaskState, + HotShotEvent, + ChannelStream>, + NetworkEventTaskState, >; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 953b579825..86cc1856f7 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -47,7 +47,7 @@ pub struct TransactionTaskState< I: NodeImplementation, A: ConsensusApi + 'static, > where - QuorumEx: ConsensusExchange>, + QuorumEx: ConsensusExchange>, { /// The state's api pub api: A, @@ -70,7 +70,7 @@ pub struct TransactionTaskState< pub quorum_exchange: Arc>, /// Global events stream to publish events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// This state's ID pub id: u64, @@ -85,14 +85,14 @@ impl< A: ConsensusApi + 'static, > TransactionTaskState where - QuorumEx: ConsensusExchange>, + QuorumEx: ConsensusExchange>, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] pub async fn handle_event( &mut self, - event: HotShotEvent, + event: HotShotEvent, ) -> Option { match event { HotShotEvent::TransactionsRecv(transactions) => { @@ -287,7 +287,7 @@ where impl, A: ConsensusApi + 'static> TransactionTaskState where - QuorumEx: ConsensusExchange>, + QuorumEx: ConsensusExchange>, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] async fn wait_for_transactions( @@ -358,7 +358,7 @@ where } /// Event filter for the transaction task - pub fn filter(event: &HotShotEvent) -> bool { + pub fn filter(event: &HotShotEvent) -> bool { matches!( event, HotShotEvent::TransactionsRecv(_) @@ -373,14 +373,14 @@ where impl, A: ConsensusApi + 'static> TS for TransactionTaskState where - QuorumEx: ConsensusExchange>, + QuorumEx: ConsensusExchange>, { } /// Type alias for DA Task Types pub type TransactionsTaskTypes = HSTWithEvent< ConsensusTaskError, - HotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, TransactionTaskState, >; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 65b63f8bcc..5d8b1698cc 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -12,7 +12,7 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::traits::{network::ConsensusIntentEvent, node_implementation::VIDMembership}; +use hotshot_types::traits::network::ConsensusIntentEvent; use hotshot_types::{ consensus::{Consensus, View}, message::Message, @@ -48,7 +48,7 @@ pub struct VIDTaskState< A: ConsensusApi + 'static, > where VIDEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, { /// The state's api pub api: A, @@ -68,7 +68,7 @@ pub struct VIDTaskState< pub vote_collector: Option<(TYPES::Time, usize, usize)>, /// Global events stream to publish events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// This state's ID pub id: u64, @@ -78,34 +78,34 @@ pub struct VIDTaskState< pub struct VIDVoteCollectionTaskState> where VIDEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, { /// the vid exchange pub vid_exchange: Arc>, #[allow(clippy::type_complexity)] /// Accumulates VID votes pub accumulator: Either< - VoteAccumulator2>, VIDCertificate2>, + VoteAccumulator2, VIDCertificate2>, VIDCertificate2, >, /// the current view pub cur_view: TYPES::Time, /// event stream for channel events - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// the id of this task state pub id: u64, } impl> TS for VIDVoteCollectionTaskState where VIDEx: - ConsensusExchange, Commitment = Commitment> + ConsensusExchange, Commitment = Commitment> { } #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "VID Vote Collection Task", level = "error")] async fn vote_handle( mut state: VIDVoteCollectionTaskState, - event: HotShotEvent, + event: HotShotEvent, ) -> ( Option, VIDVoteCollectionTaskState, @@ -113,8 +113,12 @@ async fn vote_handle( where TYPES: NodeType, I: NodeImplementation, - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, { match event { HotShotEvent::VidVoteRecv(vote) => { @@ -170,14 +174,18 @@ where impl, A: ConsensusApi + 'static> VIDTaskState where - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange< + TYPES, + Message, + Commitment = Commitment, + Membership = TYPES::Membership, + >, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] pub async fn handle_event( &mut self, - event: HotShotEvent, + event: HotShotEvent, ) -> Option { match event { HotShotEvent::VidVoteRecv(vote) => { @@ -404,7 +412,7 @@ where } /// Filter the VID event. - pub fn filter(event: &HotShotEvent) -> bool { + pub fn filter(event: &HotShotEvent) -> bool { matches!( event, HotShotEvent::Shutdown @@ -421,22 +429,22 @@ impl, A: ConsensusApi + for VIDTaskState where VIDEx: - ConsensusExchange, Commitment = Commitment>, + ConsensusExchange, Commitment = Commitment>, { } /// Type alias for VID Vote Collection Types pub type VIDVoteCollectionTypes = HSTWithEvent< ConsensusTaskError, - HotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, VIDVoteCollectionTaskState, >; /// Type alias for VID Task Types pub type VIDTaskTypes = HSTWithEvent< ConsensusTaskError, - HotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, VIDTaskState, >; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 5d493cd833..b8e147ed3a 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -18,7 +18,7 @@ use hotshot_types::{ ViewSyncCommitData, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitData, ViewSyncPreCommitVote, }, - traits::{network::ConsensusIntentEvent, node_implementation::ViewSyncMembership}, + traits::network::ConsensusIntentEvent, vote2::{Certificate2, HasViewNumber, Vote2, VoteAccumulator2}, }; @@ -67,12 +67,12 @@ pub struct ViewSyncTaskState< I: NodeImplementation, A: ConsensusApi + 'static + std::clone::Clone, > where - ViewSyncEx: ViewSyncExchangeType>, + ViewSyncEx: ViewSyncExchangeType>, { /// Registry to register sub tasks pub registry: GlobalRegistry, /// Event stream to publish events to - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// View HotShot is currently in pub current_view: TYPES::Time, /// View HotShot wishes to be in @@ -106,15 +106,15 @@ impl< A: ConsensusApi + 'static + std::clone::Clone, > TS for ViewSyncTaskState where - ViewSyncEx: ViewSyncExchangeType>, + ViewSyncEx: ViewSyncExchangeType>, { } /// Types for the main view sync task pub type ViewSyncTaskStateTypes = HSTWithEvent< ViewSyncTaskError, - HotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, ViewSyncTaskState, >; @@ -124,7 +124,7 @@ pub struct ViewSyncReplicaTaskState< I: NodeImplementation, A: ConsensusApi + 'static, > where - ViewSyncEx: ViewSyncExchangeType>, + ViewSyncEx: ViewSyncExchangeType>, { /// Timeout for view sync rounds pub view_sync_timeout: Duration, @@ -148,21 +148,22 @@ pub struct ViewSyncReplicaTaskState< /// HotShot consensus API pub api: A, /// Event stream to publish events to - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, } impl, A: ConsensusApi + 'static> TS for ViewSyncReplicaTaskState where - ViewSyncEx: ViewSyncExchangeType>, + ViewSyncEx: + ViewSyncExchangeType, Membership = TYPES::Membership>, { } /// Types for view sync replica state pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< ViewSyncTaskError, - HotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, ViewSyncReplicaTaskState, >; @@ -174,7 +175,7 @@ pub struct ViewSyncRelayTaskState< CERTIFICATE: Certificate2, > { /// Event stream to publish events to - pub event_stream: ChannelStream>, + pub event_stream: ChannelStream>, /// View sync exchange pub exchange: Arc>, @@ -200,8 +201,8 @@ impl< /// Types used by the view sync relay task pub type ViewSyncRelayTaskStateTypes = HSTWithEvent< ViewSyncTaskError, - HotShotEvent, - ChannelStream>, + HotShotEvent, + ChannelStream>, ViewSyncRelayTaskState, >; @@ -211,12 +212,13 @@ impl< A: ConsensusApi + 'static + std::clone::Clone, > ViewSyncTaskState where - ViewSyncEx: ViewSyncExchangeType>, + ViewSyncEx: + ViewSyncExchangeType, Membership = TYPES::Membership>, { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task - pub async fn handle_event(&mut self, event: HotShotEvent) { + pub async fn handle_event(&mut self, event: HotShotEvent) { match &event { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { info!("Received view sync cert for phase {:?}", certificate); @@ -325,7 +327,7 @@ where let mut relay_state = ViewSyncRelayTaskState::< TYPES, I, - ViewSyncPreCommitVote>, + ViewSyncPreCommitVote, ViewSyncPreCommitCertificate2, > { event_stream: self.event_stream.clone(), @@ -350,7 +352,7 @@ where state: ViewSyncRelayTaskState< TYPES, I, - ViewSyncPreCommitVote>, + ViewSyncPreCommitVote, ViewSyncPreCommitCertificate2, >| { async move { state.handle_event(event).await }.boxed() @@ -362,7 +364,7 @@ where ViewSyncRelayTaskStateTypes< TYPES, I, - ViewSyncPreCommitVote>, + ViewSyncPreCommitVote, ViewSyncPreCommitCertificate2, >, >::new(name) @@ -411,7 +413,7 @@ where let mut relay_state = ViewSyncRelayTaskState::< TYPES, I, - ViewSyncCommitVote>, + ViewSyncCommitVote, ViewSyncCommitCertificate2, > { event_stream: self.event_stream.clone(), @@ -436,7 +438,7 @@ where state: ViewSyncRelayTaskState< TYPES, I, - ViewSyncCommitVote>, + ViewSyncCommitVote, ViewSyncCommitCertificate2, >| { async move { state.handle_event(event).await }.boxed() @@ -448,7 +450,7 @@ where ViewSyncRelayTaskStateTypes< TYPES, I, - ViewSyncCommitVote>, + ViewSyncCommitVote, ViewSyncCommitCertificate2, >, >::new(name) @@ -497,7 +499,7 @@ where let mut relay_state = ViewSyncRelayTaskState::< TYPES, I, - ViewSyncFinalizeVote>, + ViewSyncFinalizeVote, ViewSyncFinalizeCertificate2, > { event_stream: self.event_stream.clone(), @@ -522,7 +524,7 @@ where state: ViewSyncRelayTaskState< TYPES, I, - ViewSyncFinalizeVote>, + ViewSyncFinalizeVote, ViewSyncFinalizeCertificate2, >| { async move { state.handle_event(event).await }.boxed() @@ -534,7 +536,7 @@ where ViewSyncRelayTaskStateTypes< TYPES, I, - ViewSyncFinalizeVote>, + ViewSyncFinalizeVote, ViewSyncFinalizeCertificate2, >, >::new(name) @@ -719,7 +721,7 @@ where } /// Filter view sync related events. - pub fn filter(event: &HotShotEvent) -> bool { + pub fn filter(event: &HotShotEvent) -> bool { matches!( event, HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) @@ -739,13 +741,14 @@ where impl, A: ConsensusApi + 'static> ViewSyncReplicaTaskState where - ViewSyncEx: ViewSyncExchangeType>, + ViewSyncEx: + ViewSyncExchangeType, Membership = TYPES::Membership>, { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task pub async fn handle_event( mut self, - event: HotShotEvent, + event: HotShotEvent, ) -> ( std::option::Option, ViewSyncReplicaTaskState, @@ -785,17 +788,16 @@ where self.relay = certificate.get_data().relay; } - let vote = - ViewSyncCommitVote::>::create_signed_vote( - ViewSyncCommitData { - relay: certificate.get_data().relay, - round: self.next_view, - }, - self.next_view, - self.exchange.public_key(), - self.exchange.private_key(), - ); - let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); + let vote = ViewSyncCommitVote::::create_signed_vote( + ViewSyncCommitData { + relay: certificate.get_data().relay, + round: self.next_view, + }, + self.next_view, + self.exchange.public_key(), + self.exchange.private_key(), + ); + let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { self.event_stream @@ -854,17 +856,16 @@ where self.relay = certificate.get_data().relay; } - let vote = - ViewSyncFinalizeVote::>::create_signed_vote( - ViewSyncFinalizeData { - relay: certificate.get_data().relay, - round: self.next_view, - }, - self.next_view, - self.exchange.public_key(), - self.exchange.private_key(), - ); - let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); + let vote = ViewSyncFinalizeVote::::create_signed_vote( + ViewSyncFinalizeData { + relay: certificate.get_data().relay, + round: self.next_view, + }, + self.next_view, + self.exchange.public_key(), + self.exchange.private_key(), + ); + let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { self.event_stream @@ -938,14 +939,16 @@ where return (None, self); } - let vote = - ViewSyncPreCommitVote::>::create_signed_vote( - ViewSyncPreCommitData { relay: 0, round: view_number}, + let vote = ViewSyncPreCommitVote::::create_signed_vote( + ViewSyncPreCommitData { + relay: 0, + round: view_number, + }, view_number, self.exchange.public_key(), self.exchange.private_key(), ); - let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); + let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { self.event_stream @@ -980,8 +983,7 @@ where self.relay += 1; match self.phase { ViewSyncPhase::None => { - let vote = - ViewSyncPreCommitVote::>::create_signed_vote( + let vote = ViewSyncPreCommitVote::::create_signed_vote( ViewSyncPreCommitData { relay: self.relay, round: self.next_view, @@ -991,7 +993,7 @@ where self.exchange.private_key(), ); let message = - GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); + GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { self.event_stream @@ -1000,8 +1002,7 @@ where } } ViewSyncPhase::PreCommit => { - let vote = - ViewSyncCommitVote::>::create_signed_vote( + let vote = ViewSyncCommitVote::::create_signed_vote( ViewSyncCommitData { relay: self.relay, round: self.next_view, @@ -1011,7 +1012,7 @@ where self.exchange.private_key(), ); let message = - GeneralConsensusMessage::::ViewSyncCommitVote(vote); + GeneralConsensusMessage::::ViewSyncCommitVote(vote); if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { self.event_stream @@ -1020,8 +1021,7 @@ where } } ViewSyncPhase::Commit => { - let vote = - ViewSyncFinalizeVote::>::create_signed_vote( + let vote = ViewSyncFinalizeVote::::create_signed_vote( ViewSyncFinalizeData { relay: self.relay, round: self.next_view, @@ -1031,7 +1031,7 @@ where self.exchange.private_key(), ); let message = - GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); + GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { self.event_stream @@ -1073,24 +1073,25 @@ impl> ViewSyncRelayTaskState< TYPES, I, - ViewSyncPreCommitVote>, + ViewSyncPreCommitVote, ViewSyncPreCommitCertificate2, > where - ViewSyncEx: ViewSyncExchangeType>, + ViewSyncEx: + ViewSyncExchangeType, Membership = TYPES::Membership>, { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] #[allow(clippy::type_complexity)] pub async fn handle_event( mut self, - event: HotShotEvent, + event: HotShotEvent, ) -> ( std::option::Option, ViewSyncRelayTaskState< TYPES, I, - ViewSyncPreCommitVote>, + ViewSyncPreCommitVote, ViewSyncPreCommitCertificate2, >, ) { @@ -1142,27 +1143,23 @@ where } impl> - ViewSyncRelayTaskState< - TYPES, - I, - ViewSyncCommitVote>, - ViewSyncCommitCertificate2, - > + ViewSyncRelayTaskState, ViewSyncCommitCertificate2> where - ViewSyncEx: ViewSyncExchangeType>, + ViewSyncEx: + ViewSyncExchangeType, Membership = TYPES::Membership>, { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] #[allow(clippy::type_complexity)] pub async fn handle_event( mut self, - event: HotShotEvent, + event: HotShotEvent, ) -> ( std::option::Option, ViewSyncRelayTaskState< TYPES, I, - ViewSyncCommitVote>, + ViewSyncCommitVote, ViewSyncCommitCertificate2, >, ) { @@ -1216,24 +1213,25 @@ impl> ViewSyncRelayTaskState< TYPES, I, - ViewSyncFinalizeVote>, + ViewSyncFinalizeVote, ViewSyncFinalizeCertificate2, > where - ViewSyncEx: ViewSyncExchangeType>, + ViewSyncEx: + ViewSyncExchangeType, Membership = TYPES::Membership>, { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] #[allow(clippy::type_complexity)] pub async fn handle_event( mut self, - event: HotShotEvent, + event: HotShotEvent, ) -> ( std::option::Option, ViewSyncRelayTaskState< TYPES, I, - ViewSyncFinalizeVote>, + ViewSyncFinalizeVote, ViewSyncFinalizeCertificate2, >, ) { diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index f5162be0be..6b5b027ba0 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -73,7 +73,7 @@ type StaticLibp2pDAComm = Libp2pCommChannel; -type StaticCombinedDAComm = CombinedCommChannel; +type StaticCombinedDAComm = CombinedCommChannel; pub type StaticMemoryQuorumComm = MemoryCommChannel; @@ -81,7 +81,7 @@ type StaticLibp2pQuorumComm = Libp2pCommChannel; -type StaticCombinedQuorumComm = CombinedCommChannel; +type StaticCombinedQuorumComm = CombinedCommChannel; pub type StaticMemoryViewSyncComm = MemoryCommChannel; @@ -89,7 +89,7 @@ type StaticLibp2pViewSyncComm = Libp2pCommChannel; -type StaticCombinedViewSyncComm = CombinedCommChannel; +type StaticCombinedViewSyncComm = CombinedCommChannel; pub type StaticMemoryVIDComm = MemoryCommChannel; @@ -97,30 +97,15 @@ type StaticLibp2pVIDComm = Libp2pCommChannel; -type StaticCombinedVIDComm = CombinedCommChannel; +type StaticCombinedVIDComm = CombinedCommChannel; pub type SequencingLibp2pExchange = Exchanges< TestTypes, - Message, - QuorumExchange< - TestTypes, - StaticMembership, - StaticLibp2pQuorumComm, - Message, - >, - CommitteeExchange< - TestTypes, - StaticMembership, - StaticLibp2pDAComm, - Message, - >, - ViewSyncExchange< - TestTypes, - StaticMembership, - StaticLibp2pViewSyncComm, - Message, - >, - VIDExchange>, + Message, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >; impl NodeImplementation for Libp2pImpl { @@ -129,10 +114,7 @@ impl NodeImplementation for Libp2pImpl { fn new_channel_maps( start_view: ::Time, - ) -> ( - ChannelMaps, - Option>, - ) { + ) -> (ChannelMaps, Option>) { ( ChannelMaps::new(start_view), Some(ChannelMaps::new(start_view)), @@ -140,7 +122,7 @@ impl NodeImplementation for Libp2pImpl { } } -impl TestableExchange> for SequencingLibp2pExchange { +impl TestableExchange> for SequencingLibp2pExchange { #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, @@ -152,28 +134,28 @@ impl TestableExchange> for SequencingL ) -> ( , + Message, >>::Networking, , + Message, >>::Networking, , + Message, >>::Networking, , + Message, >>::Networking, ) + 'static, > { let network_generator = Arc::new(, + Message, ::SignatureKey, > as TestableNetworkingImplementation< TestTypes, - Message, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -187,25 +169,25 @@ impl TestableExchange> for SequencingL let quorum_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let committee_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let view_sync_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let vid_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); @@ -216,29 +198,14 @@ impl TestableExchange> for SequencingL pub type SequencingMemoryExchange = Exchanges< TestTypes, - Message, - QuorumExchange< - TestTypes, - StaticMembership, - StaticMemoryQuorumComm, - Message, - >, - CommitteeExchange< - TestTypes, - StaticMembership, - StaticMemoryDAComm, - Message, - >, - ViewSyncExchange< - TestTypes, - StaticMembership, - StaticMemoryViewSyncComm, - Message, - >, - VIDExchange>, + Message, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >; -impl TestableExchange> for SequencingMemoryExchange { +impl TestableExchange> for SequencingMemoryExchange { #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, @@ -250,28 +217,28 @@ impl TestableExchange> for SequencingM ) -> ( , + Message, >>::Networking, , + Message, >>::Networking, , + Message, >>::Networking, , + Message, >>::Networking, ) + 'static, > { let network_generator = Arc::new(, + Message, ::SignatureKey, > as TestableNetworkingImplementation< TestTypes, - Message, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -280,11 +247,11 @@ impl TestableExchange> for SequencingM false, )); let network_da_generator = Arc::new(, + Message, ::SignatureKey, > as TestableNetworkingImplementation< TestTypes, - Message, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -298,25 +265,25 @@ impl TestableExchange> for SequencingM let quorum_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let committee_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da.clone()); let view_sync_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da); let vid_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); @@ -331,10 +298,7 @@ impl NodeImplementation for MemoryImpl { fn new_channel_maps( start_view: ::Time, - ) -> ( - ChannelMaps, - Option>, - ) { + ) -> (ChannelMaps, Option>) { ( ChannelMaps::new(start_view), Some(ChannelMaps::new(start_view)), @@ -349,19 +313,14 @@ impl NodeImplementation for MemoryImpl { pub type SequencingWebExchanges = Exchanges< TestTypes, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange< - TestTypes, - StaticMembership, - StaticWebViewSyncComm, - Message, - >, - VIDExchange>, + Message, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >; -impl TestableExchange> for SequencingWebExchanges { +impl TestableExchange> for SequencingWebExchanges { #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, @@ -373,29 +332,29 @@ impl TestableExchange> for SequencingWebE ) -> ( , + Message, >>::Networking, , + Message, >>::Networking, , + Message, >>::Networking, , + Message, >>::Networking, ) + 'static, > { let network_generator = Arc::new(, + Message, ::SignatureKey, _, > as TestableNetworkingImplementation< TestTypes, - Message, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -404,12 +363,12 @@ impl TestableExchange> for SequencingWebE false, )); let network_da_generator = Arc::new(, + Message, ::SignatureKey, TestTypes, > as TestableNetworkingImplementation< TestTypes, - Message, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -423,25 +382,25 @@ impl TestableExchange> for SequencingWebE let quorum_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let committee_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da.clone()); let view_sync_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); let vid_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da); @@ -456,10 +415,7 @@ impl NodeImplementation for WebImpl { fn new_channel_maps( start_view: ::Time, - ) -> ( - ChannelMaps, - Option>, - ) { + ) -> (ChannelMaps, Option>) { ( ChannelMaps::new(start_view), Some(ChannelMaps::new(start_view)), @@ -469,31 +425,11 @@ impl NodeImplementation for WebImpl { pub type CombinedExchange = Exchanges< TestTypes, - Message, - QuorumExchange< - TestTypes, - StaticMembership, - StaticCombinedQuorumComm, - Message, - >, - CommitteeExchange< - TestTypes, - StaticMembership, - StaticCombinedDAComm, - Message, - >, - ViewSyncExchange< - TestTypes, - StaticMembership, - StaticCombinedViewSyncComm, - Message, - >, - VIDExchange< - TestTypes, - StaticMembership, - StaticCombinedVIDComm, - Message, - >, + Message, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >; impl NodeImplementation for CombinedImpl { @@ -502,10 +438,7 @@ impl NodeImplementation for CombinedImpl { fn new_channel_maps( start_view: ::Time, - ) -> ( - ChannelMaps, - Option>, - ) { + ) -> (ChannelMaps, Option>) { ( ChannelMaps::new(start_view), Some(ChannelMaps::new(start_view)), @@ -513,7 +446,7 @@ impl NodeImplementation for CombinedImpl { } } -impl TestableExchange> for CombinedExchange { +impl TestableExchange> for CombinedExchange { #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, @@ -525,29 +458,29 @@ impl TestableExchange> for CombinedE ) -> ( , + Message, >>::Networking, , + Message, >>::Networking, , + Message, >>::Networking, , + Message, >>::Networking, ) + 'static, > { let web_server_network_generator = Arc::new(, + Message, ::SignatureKey, _, > as TestableNetworkingImplementation< TestTypes, - Message, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -557,12 +490,12 @@ impl TestableExchange> for CombinedE )); let web_server_network_da_generator = Arc::new(, + Message, ::SignatureKey, TestTypes, > as TestableNetworkingImplementation< TestTypes, - Message, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -572,11 +505,11 @@ impl TestableExchange> for CombinedE )); let libp2p_network_generator = Arc::new(, + Message, ::SignatureKey, > as TestableNetworkingImplementation< TestTypes, - Message, + Message, >>::generator( expected_node_count, num_bootstrap, @@ -606,26 +539,26 @@ impl TestableExchange> for CombinedE let quorum_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network.clone()); let committee_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da.clone()); let view_sync_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network); let vid_chan = <, + Message, >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( )(network_da); (quorum_chan, committee_chan, view_sync_chan, vid_chan) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 27091c5493..0c7d86c1fd 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -595,7 +595,7 @@ pub type OverallSafetyTaskTypes = HSTWithEventAndMessage< OverallSafetyTaskErr, GlobalTestEvent, ChannelStream, - (usize, Either, HotShotEvent>), - MergeN>, UnboundedStream>>>, + (usize, Either, HotShotEvent>), + MergeN>, UnboundedStream>>>, OverallSafetyTask, >; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index c564c0441f..6a0ea334b8 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -33,7 +33,7 @@ pub async fn build_system_handle( node_id: u64, ) -> ( SystemContextHandle, - ChannelStream>, + ChannelStream>, ) { let builder = TestMetadata::default_multiple_rounds(); @@ -55,14 +55,14 @@ pub async fn build_system_handle( let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< TestTypes, - Message, + Message, >>::Membership::default_election_config(config.total_nodes.get() as u64) }); let committee_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< TestTypes, - Message, + Message, >>::Membership::default_election_config(config.total_nodes.get() as u64) }); let exchanges = >::Exchanges::create( diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 481f6d935d..165dd21c17 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -184,7 +184,7 @@ impl TestMetadata { ) -> TestLauncher where I: NodeImplementation, - >::Exchanges: TestableExchange>, + >::Exchanges: TestableExchange>, SystemContext: HotShotType, { let TestMetadata { @@ -240,7 +240,7 @@ impl TestMetadata { // TODO what's the difference between this and the second config? election_config: Some( as ConsensusExchange< TYPES, - Message, + Message, >>::Membership::default_election_config( total_nodes as u64 )), diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index d6320bdbb1..49ade61612 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -30,20 +30,20 @@ use super::{ pub type Networks = ( <<>::Exchanges as ExchangesType< TYPES, - Message, - >>::QuorumExchange as ConsensusExchange>>::Networking, + Message, + >>::QuorumExchange as ConsensusExchange>>::Networking, <<>::Exchanges as ExchangesType< TYPES, - Message, - >>::CommitteeExchange as ConsensusExchange>>::Networking, + Message, + >>::CommitteeExchange as ConsensusExchange>>::Networking, <<>::Exchanges as ExchangesType< TYPES, - Message, - >>::ViewSyncExchange as ConsensusExchange>>::Networking, + Message, + >>::ViewSyncExchange as ConsensusExchange>>::Networking, <<>::Exchanges as ExchangesType< TYPES, - Message, - >>::VIDExchange as ConsensusExchange>>::Networking, + Message, + >>::VIDExchange as ConsensusExchange>>::Networking, ); /// Wrapper for a function that takes a `node_id` and returns an instance of `T`. @@ -82,8 +82,8 @@ pub struct ResourceGenerators: CommunicationChannel< TYPES, - Message, - as ConsensusExchange>>::Membership, + Message, + as ConsensusExchange>>::Membership, >, { // generate channels diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 439e5c88eb..cbfc805101 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -41,8 +41,8 @@ pub struct TestRunner> where QuorumCommChannel: CommunicationChannel< TYPES, - Message, - as ConsensusExchange>>::Membership, + Message, + as ConsensusExchange>>::Membership, >, { pub(crate) launcher: TestLauncher, @@ -57,8 +57,8 @@ where SystemContext: HotShotType, QuorumCommChannel: CommunicationChannel< TYPES, - Message, - as ConsensusExchange>>::Membership, + Message, + as ConsensusExchange>>::Membership, >, { /// excecute test @@ -66,7 +66,7 @@ where where I::Exchanges: ExchangesType< TYPES, - Message, + Message, ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), >, { @@ -192,7 +192,7 @@ where where I::Exchanges: ExchangesType< TYPES, - Message, + Message, ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), >, { @@ -237,7 +237,7 @@ where where I::Exchanges: ExchangesType< TYPES, - Message, + Message, ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), >, { @@ -251,7 +251,7 @@ where let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { as ConsensusExchange< TYPES, - Message, + Message, >>::Membership::default_election_config(config.total_nodes.get() as u64) }); let committee_election_config = I::committee_election_config_generator(); diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 0a3df35cf0..201b1008d2 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -12,7 +12,6 @@ use hotshot_testing::{ }; use hotshot_types::simple_vote::QuorumData; use hotshot_types::simple_vote::QuorumVote; -use hotshot_types::traits::node_implementation::QuorumMembership; use hotshot_types::vote2::Certificate2; use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, @@ -27,7 +26,7 @@ use std::collections::HashMap; async fn build_vote( handle: &SystemContextHandle, proposal: QuorumProposal, -) -> GeneralConsensusMessage { +) -> GeneralConsensusMessage { let consensus_lock = handle.get_consensus(); let consensus = consensus_lock.read().await; let api: HotShotConsensusApi = HotShotConsensusApi { @@ -68,7 +67,7 @@ async fn build_vote( timestamp: 0, proposer_id: quorum_exchange.get_leader(view).to_bytes(), }; - let vote = QuorumVote::>::create_signed_vote( + let vote = QuorumVote::::create_signed_vote( QuorumData { leaf_commit: leaf.commit(), }, @@ -76,7 +75,7 @@ async fn build_vote( quorum_exchange.public_key(), quorum_exchange.private_key(), ); - GeneralConsensusMessage::::Vote(vote) + GeneralConsensusMessage::::Vote(vote) } #[cfg(test)] diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 9c93307e4c..54c76ad0f7 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -1,5 +1,4 @@ use std::collections::BTreeSet; -use std::marker::PhantomData; use std::sync::Arc; use async_compatibility_layer::logging::setup_logging; @@ -75,16 +74,16 @@ impl NodeImplementation for TestImpl { type Storage = MemoryStorage; type Exchanges = Exchanges< Test, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, + Message, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >; fn new_channel_maps( start_view: ::Time, - ) -> (ChannelMaps, Option>) { + ) -> (ChannelMaps, Option>) { (ChannelMaps::new(start_view), None) } } @@ -94,7 +93,7 @@ impl NodeImplementation for TestImpl { /// derive EQ on `VoteType` and thereby message /// we are only sending data messages, though so we compare key and /// data message -fn fake_message_eq(message_1: Message, message_2: Message) { +fn fake_message_eq(message_1: Message, message_2: Message) { assert_eq!(message_1.sender, message_2.sender); if let MessageKind::Data(DataMessage::SubmitTransaction(d_1, _)) = message_1.kind { if let MessageKind::Data(DataMessage::SubmitTransaction(d_2, _)) = message_2.kind { @@ -114,7 +113,7 @@ fn get_pubkey() -> BLSPubKey { } /// create a message -fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec> { +fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec> { let mut messages = Vec::new(); for _ in 0..num_messages { // create a random transaction from seed @@ -128,7 +127,6 @@ fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec::new(0), )), - _phantom: PhantomData, }; messages.push(message); } @@ -144,8 +142,7 @@ fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec, ::SignatureKey>> = - MasterMap::new(); + let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); let _pub_key = get_pubkey(); } @@ -159,8 +156,7 @@ async fn memory_network_spawn_single() { #[instrument] async fn memory_network_spawn_double() { setup_logging(); - let group: Arc, ::SignatureKey>> = - MasterMap::new(); + let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); let _pub_key_1 = get_pubkey(); let _pub_key_2 = get_pubkey(); @@ -178,8 +174,7 @@ async fn memory_network_direct_queue() { // Create some dummy messages // Make and connect the networking instances - let group: Arc, ::SignatureKey>> = - MasterMap::new(); + let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); let pub_key_1 = get_pubkey(); @@ -198,7 +193,7 @@ async fn memory_network_direct_queue() { Option::None, ); - let first_messages: Vec> = gen_messages(5, 100, pub_key_1); + let first_messages: Vec> = gen_messages(5, 100, pub_key_1); // Test 1 -> 2 // Send messages @@ -216,7 +211,7 @@ async fn memory_network_direct_queue() { fake_message_eq(sent_message, recv_message); } - let second_messages: Vec> = gen_messages(5, 200, pub_key_2); + let second_messages: Vec> = gen_messages(5, 200, pub_key_2); // Test 2 -> 1 // Send messages @@ -245,8 +240,7 @@ async fn memory_network_direct_queue() { async fn memory_network_broadcast_queue() { setup_logging(); // Make and connect the networking instances - let group: Arc, ::SignatureKey>> = - MasterMap::new(); + let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); let pub_key_1 = get_pubkey(); let network1 = MemoryNetwork::new( @@ -263,7 +257,7 @@ async fn memory_network_broadcast_queue() { Option::None, ); - let first_messages: Vec> = gen_messages(5, 100, pub_key_1); + let first_messages: Vec> = gen_messages(5, 100, pub_key_1); // Test 1 -> 2 // Send messages @@ -284,7 +278,7 @@ async fn memory_network_broadcast_queue() { fake_message_eq(sent_message, recv_message); } - let second_messages: Vec> = gen_messages(5, 200, pub_key_2); + let second_messages: Vec> = gen_messages(5, 200, pub_key_2); // Test 2 -> 1 // Send messages @@ -316,8 +310,7 @@ async fn memory_network_broadcast_queue() { async fn memory_network_test_in_flight_message_count() { setup_logging(); - let group: Arc, ::SignatureKey>> = - MasterMap::new(); + let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); let pub_key_1 = get_pubkey(); let network1 = MemoryNetwork::new( @@ -335,7 +328,7 @@ async fn memory_network_test_in_flight_message_count() { ); // Create some dummy messages - let messages: Vec> = gen_messages(5, 100, pub_key_1); + let messages: Vec> = gen_messages(5, 100, pub_key_1); let broadcast_recipients = BTreeSet::from([pub_key_1, pub_key_2]); assert_eq!(network1.in_flight_message_count(), Some(0)); diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 8940abda26..19abb33c7d 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -35,10 +35,7 @@ async fn test_view_sync_task() { relay: 0, round: ::Time::new(5), }; - let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::< - TestTypes, - hotshot_testing::node_types::StaticMembership, - >::create_signed_vote( + let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote( vote_data, ::Time::new(5), view_sync_exchange.public_key(), diff --git a/types/src/message.rs b/types/src/message.rs index c5c9ed7fa9..c9fa478272 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -12,16 +12,13 @@ use crate::simple_vote::{ DAVote2, TimeoutVote2, VIDVote2, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }; -use crate::traits::node_implementation::{CommitteeMembership, ViewSyncMembership}; use crate::vote2::HasViewNumber; use crate::{ data::{DAProposal, VidDisperse}, simple_vote::QuorumVote, traits::{ network::{NetworkMsg, ViewMessage}, - node_implementation::{ - ExchangesType, NodeImplementation, NodeType, QuorumMembership, VIDMembership, - }, + node_implementation::NodeType, signature_key::EncodedSignature, }, }; @@ -35,20 +32,17 @@ use std::{fmt::Debug, marker::PhantomData}; /// Incoming message #[derive(Serialize, Deserialize, Clone, Debug, Derivative, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] -pub struct Message> { +pub struct Message { /// The sender of this message pub sender: TYPES::SignatureKey, /// The message kind - pub kind: MessageKind, - - /// Phantom data. - pub _phantom: PhantomData, + pub kind: MessageKind, } -impl> NetworkMsg for Message {} +impl NetworkMsg for Message {} -impl> ViewMessage for Message { +impl ViewMessage for Message { /// get the view number out of a message fn get_view_number(&self) -> TYPES::Time { self.kind.get_view_number() @@ -60,7 +54,7 @@ impl> ViewMessage for Messa /// A wrapper type for implementing `PassType` on a vector of `Message`. #[derive(Clone, Debug)] -pub struct Messages>(pub Vec>); +pub struct Messages(pub Vec>); /// A message type agnostic description of a message's purpose #[derive(PartialEq, Copy, Clone)] @@ -94,38 +88,33 @@ pub enum MessagePurpose { /// Enum representation of any message type #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] #[serde(bound(deserialize = "", serialize = ""))] -pub enum MessageKind> { +pub enum MessageKind { /// Messages related to the consensus protocol - Consensus(SequencingMessage), + Consensus(SequencingMessage), /// Messages relating to sharing data between nodes Data(DataMessage), - /// Phantom data. - _Unreachable(PhantomData), } -impl> MessageKind { +impl MessageKind { // Can't implement `From` directly due to potential conflict with // `From`. /// Construct a [`MessageKind`] from [`I::ConsensusMessage`]. - pub fn from_consensus_message(m: SequencingMessage) -> Self { + pub fn from_consensus_message(m: SequencingMessage) -> Self { Self::Consensus(m) } } -impl> From> - for MessageKind -{ +impl From> for MessageKind { fn from(m: DataMessage) -> Self { Self::Data(m) } } -impl> ViewMessage for MessageKind { +impl ViewMessage for MessageKind { fn get_view_number(&self) -> TYPES::Time { match &self { MessageKind::Consensus(message) => message.view_number(), MessageKind::Data(DataMessage::SubmitTransaction(_, v)) => *v, - MessageKind::_Unreachable(_) => unimplemented!(), } } @@ -135,7 +124,6 @@ impl> ViewMessage for Messa MessageKind::Data(message) => match message { DataMessage::SubmitTransaction(_, _) => MessagePurpose::Data, }, - MessageKind::_Unreachable(_) => unimplemented!(), } } } @@ -152,28 +140,20 @@ pub enum InternalTrigger { /// A processed consensus message for both validating and sequencing consensus. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] #[serde(bound(deserialize = ""))] -pub enum ProcessedGeneralConsensusMessage> -where - I::Exchanges: ExchangesType>, -{ +pub enum ProcessedGeneralConsensusMessage { /// Message with a quorum proposal. Proposal(Proposal>, TYPES::SignatureKey), /// Message with a quorum vote. - Vote( - QuorumVote>, - TYPES::SignatureKey, - ), + Vote(QuorumVote, TYPES::SignatureKey), /// Internal ONLY message indicating a view interrupt. #[serde(skip)] InternalTrigger(InternalTrigger), } -impl> From> - for GeneralConsensusMessage -where - I::Exchanges: ExchangesType>, +impl From> + for GeneralConsensusMessage { - fn from(value: ProcessedGeneralConsensusMessage) -> Self { + fn from(value: ProcessedGeneralConsensusMessage) -> Self { match value { ProcessedGeneralConsensusMessage::Proposal(p, _) => { GeneralConsensusMessage::Proposal(p) @@ -186,14 +166,11 @@ where } } -impl> ProcessedGeneralConsensusMessage -where - I::Exchanges: ExchangesType>, -{ +impl ProcessedGeneralConsensusMessage { /// Create a [`ProcessedGeneralConsensusMessage`] from a [`GeneralConsensusMessage`]. /// # Panics /// if reaching the unimplemented `ViewSync` case. - pub fn new(value: GeneralConsensusMessage, sender: TYPES::SignatureKey) -> Self { + pub fn new(value: GeneralConsensusMessage, sender: TYPES::SignatureKey) -> Self { match value { GeneralConsensusMessage::Proposal(p) => { ProcessedGeneralConsensusMessage::Proposal(p, sender) @@ -217,31 +194,25 @@ where /// A processed consensus message for the DA committee in sequencing consensus. #[derive(Serialize, Clone, Debug, PartialEq)] #[serde(bound(deserialize = ""))] -pub enum ProcessedCommitteeConsensusMessage> { +pub enum ProcessedCommitteeConsensusMessage { /// Proposal for the DA committee. DAProposal(Proposal>, TYPES::SignatureKey), /// Vote from the DA committee. - DAVote( - DAVote2>, - TYPES::SignatureKey, - ), + DAVote(DAVote2, TYPES::SignatureKey), /// Certificate for the DA. DACertificate(DACertificate2, TYPES::SignatureKey), /// VID dispersal data. Like [`DAProposal`] VidDisperseMsg(Proposal>, TYPES::SignatureKey), /// Vote from VID storage node. Like [`DAVote`] - VidVote( - VIDVote2>, - TYPES::SignatureKey, - ), + VidVote(VIDVote2, TYPES::SignatureKey), /// Certificate for VID. Like [`DACertificate`] VidCertificate(VIDCertificate2, TYPES::SignatureKey), } -impl> - From> for CommitteeConsensusMessage +impl From> + for CommitteeConsensusMessage { - fn from(value: ProcessedCommitteeConsensusMessage) -> Self { + fn from(value: ProcessedCommitteeConsensusMessage) -> Self { match value { ProcessedCommitteeConsensusMessage::DAProposal(p, _) => { CommitteeConsensusMessage::DAProposal(p) @@ -265,9 +236,9 @@ impl> } } -impl> ProcessedCommitteeConsensusMessage { +impl ProcessedCommitteeConsensusMessage { /// Create a [`ProcessedCommitteeConsensusMessage`] from a [`CommitteeConsensusMessage`]. - pub fn new(value: CommitteeConsensusMessage, sender: TYPES::SignatureKey) -> Self { + pub fn new(value: CommitteeConsensusMessage, sender: TYPES::SignatureKey) -> Self { match value { CommitteeConsensusMessage::DAProposal(p) => { ProcessedCommitteeConsensusMessage::DAProposal(p, sender) @@ -292,15 +263,11 @@ impl> ProcessedCommitteeConsensusM } /// A processed consensus message for sequencing consensus. -pub type ProcessedSequencingMessage = Either< - ProcessedGeneralConsensusMessage, - ProcessedCommitteeConsensusMessage, ->; +pub type ProcessedSequencingMessage = + Either, ProcessedCommitteeConsensusMessage>; -impl> From> - for SequencingMessage -{ - fn from(value: ProcessedSequencingMessage) -> Self { +impl From> for SequencingMessage { + fn from(value: ProcessedSequencingMessage) -> Self { match value { Left(message) => SequencingMessage(Left(message.into())), Right(message) => SequencingMessage(Right(message.into())), @@ -308,10 +275,10 @@ impl> From> From> - for ProcessedSequencingMessage +impl From> + for ProcessedSequencingMessage { - fn from(value: ProcessedGeneralConsensusMessage) -> Self { + fn from(value: ProcessedGeneralConsensusMessage) -> Self { Left(value) } } @@ -319,24 +286,21 @@ impl> From> -where - I::Exchanges: ExchangesType>, -{ +pub enum GeneralConsensusMessage { /// Message with a quorum proposal. Proposal(Proposal>), /// Message with a quorum vote. - Vote(QuorumVote>), + Vote(QuorumVote), /// Message with a view sync pre-commit vote - ViewSyncPreCommitVote(ViewSyncPreCommitVote>), + ViewSyncPreCommitVote(ViewSyncPreCommitVote), /// Message with a view sync commit vote - ViewSyncCommitVote(ViewSyncCommitVote>), + ViewSyncCommitVote(ViewSyncCommitVote), /// Message with a view sync finalize vote - ViewSyncFinalizeVote(ViewSyncFinalizeVote>), + ViewSyncFinalizeVote(ViewSyncFinalizeVote), /// Message with a view sync pre-commit certificate ViewSyncPreCommitCertificate(ViewSyncPreCommitCertificate2), @@ -348,7 +312,7 @@ where ViewSyncFinalizeCertificate(ViewSyncFinalizeCertificate2), /// Message with a Timeout vote - TimeoutVote(TimeoutVote2>), + TimeoutVote(TimeoutVote2), /// Internal ONLY message indicating a view interrupt. #[serde(skip)] @@ -358,12 +322,12 @@ where #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] #[serde(bound(deserialize = "", serialize = ""))] /// Messages related to the sequencing consensus protocol for the DA committee. -pub enum CommitteeConsensusMessage> { +pub enum CommitteeConsensusMessage { /// Proposal for data availability committee DAProposal(Proposal>), /// vote for data availability committee - DAVote(DAVote2>), + DAVote(DAVote2), /// Certificate data is available DACertificate(DACertificate2), @@ -377,7 +341,7 @@ pub enum CommitteeConsensusMessage /// Vote for VID disperse data /// /// Like [`DAVote`]. - VidVote(VIDVote2>), + VidVote(VIDVote2), /// VID certificate data is available /// /// Like [`DACertificate`] @@ -387,11 +351,11 @@ pub enum CommitteeConsensusMessage /// Messages for sequencing consensus. #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] -pub struct SequencingMessage>( - pub Either, CommitteeConsensusMessage>, +pub struct SequencingMessage( + pub Either, CommitteeConsensusMessage>, ); -impl> SequencingMessage { +impl SequencingMessage { // TODO: Disable panic after the `ViewSync` case is implemented. /// Get the view number this message relates to #[allow(clippy::panic)] diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 6c88625704..1565a08ea9 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -1,6 +1,6 @@ //! Implementations of the simple vote types. -use std::{fmt::Debug, hash::Hash, marker::PhantomData}; +use std::{fmt::Debug, hash::Hash}; use commit::{Commitment, Committable}; use serde::{Deserialize, Serialize}; @@ -8,7 +8,6 @@ use serde::{Deserialize, Serialize}; use crate::{ data::Leaf, traits::{ - election::Membership, node_implementation::NodeType, signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, }, @@ -88,30 +87,23 @@ mod sealed { /// A simple yes vote over some votable type. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -pub struct SimpleVote> { +pub struct SimpleVote { /// The signature share associated with this vote pub signature: (EncodedPublicKey, EncodedSignature), /// The leaf commitment being voted on. pub data: DATA, /// The view this vote was cast for pub view_number: TYPES::Time, - /// phantom data for `MEMBERSHIP` - _pd: PhantomData, } -impl> HasViewNumber - for SimpleVote -{ +impl HasViewNumber for SimpleVote { fn get_view_number(&self) -> ::Time { self.view_number } } -impl> Vote2 - for SimpleVote -{ +impl Vote2 for SimpleVote { type Commitment = DATA; - type Membership = MEMBERSHIP; fn get_signing_key(&self) -> ::SignatureKey { ::from_bytes(&self.signature.0).unwrap() @@ -130,9 +122,7 @@ impl> V } } -impl> - SimpleVote -{ +impl SimpleVote { /// Creates and signs a simple vote pub fn create_signed_vote( data: DATA, @@ -145,7 +135,6 @@ impl> signature: (pub_key.to_bytes(), signature), data, view_number: view, - _pd: PhantomData, } } } @@ -217,16 +206,16 @@ impl = SimpleVote, M>; +pub type QuorumVote = SimpleVote>; /// DA vote type alias -pub type DAVote2 = SimpleVote::BlockPayload>, M>; +pub type DAVote2 = SimpleVote::BlockPayload>>; /// VID vote type alias -pub type VIDVote2 = SimpleVote::BlockPayload>, M>; +pub type VIDVote2 = SimpleVote::BlockPayload>>; /// Timeout Vote type alias -pub type TimeoutVote2 = SimpleVote, M>; +pub type TimeoutVote2 = SimpleVote>; /// View Sync Commit Vote type alias -pub type ViewSyncCommitVote = SimpleVote, M>; +pub type ViewSyncCommitVote = SimpleVote>; /// View Sync Pre Commit Vote type alias -pub type ViewSyncPreCommitVote = SimpleVote, M>; +pub type ViewSyncPreCommitVote = SimpleVote>; /// View Sync Finalize Vote type alias -pub type ViewSyncFinalizeVote = SimpleVote, M>; +pub type ViewSyncFinalizeVote = SimpleVote>; diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index 33ce04f57f..508c5cec6e 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -124,26 +124,26 @@ pub trait ConsensusApi>: async fn send_direct_message( &self, recipient: TYPES::SignatureKey, - message: SequencingMessage, + message: SequencingMessage, ) -> std::result::Result<(), NetworkError>; /// send a direct message using the DA communication channel async fn send_direct_da_message( &self, recipient: TYPES::SignatureKey, - message: SequencingMessage, + message: SequencingMessage, ) -> std::result::Result<(), NetworkError>; /// Send a broadcast message to the entire network. async fn send_broadcast_message( &self, - message: SequencingMessage, + message: SequencingMessage, ) -> std::result::Result<(), NetworkError>; /// Send a broadcast to the DA comitee, stub for now async fn send_da_broadcast( &self, - message: SequencingMessage, + message: SequencingMessage, ) -> std::result::Result<(), NetworkError>; /// Send a message with a transaction. diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index fe0775121a..87e71cc033 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -304,7 +304,7 @@ pub trait ConsensusExchange: Send + Sync { } /// The committee which votes on proposals. - fn membership(&self) -> &Self::Membership; + fn membership(&self) -> &TYPES::Membership; /// This participant's public key. fn public_key(&self) -> &TYPES::SignatureKey; @@ -336,7 +336,7 @@ pub struct CommitteeExchange< /// The network being used by this exchange. network: NETWORK, /// The committee which votes on proposals. - membership: MEMBERSHIP, + membership: TYPES::Membership, /// This participant's public key. public_key: TYPES::SignatureKey, /// Entry with public key and staking value for certificate aggregation @@ -355,7 +355,7 @@ impl< M: NetworkMsg, > CommitteeExchangeType for CommitteeExchange { - /// Sign a DA proposal. + /// Sign a DA proposal.Self as ConsensusExchange fn sign_da_proposal( &self, payload_commitment: &Commitment, @@ -384,8 +384,7 @@ impl< entry: ::StakeTableEntry, sk: ::PrivateKey, ) -> Self { - let membership = - >::Membership::create_election(entries, config); + let membership = ::Membership::create_election(entries, config); Self { network, membership, @@ -406,7 +405,7 @@ impl< .make_vote_token(view_number, &self.private_key) } - fn membership(&self) -> &Self::Membership { + fn membership(&self) -> &TYPES::Membership { &self.membership } fn public_key(&self) -> &TYPES::SignatureKey { @@ -438,7 +437,7 @@ pub struct VIDExchange< /// The network being used by this exchange. network: NETWORK, /// The committee which votes on proposals. - membership: MEMBERSHIP, + membership: TYPES::Membership, /// This participant's public key. public_key: TYPES::SignatureKey, /// Entry with public key and staking value for certificate aggregation @@ -486,8 +485,7 @@ impl< entry: ::StakeTableEntry, sk: ::PrivateKey, ) -> Self { - let membership = - >::Membership::create_election(entries, config); + let membership = ::Membership::create_election(entries, config); Self { network, membership, @@ -508,7 +506,7 @@ impl< .make_vote_token(view_number, &self.private_key) } - fn membership(&self) -> &Self::Membership { + fn membership(&self) -> &TYPES::Membership { &self.membership } fn public_key(&self) -> &TYPES::SignatureKey { @@ -546,7 +544,7 @@ pub struct QuorumExchange< /// The network being used by this exchange. network: NETWORK, /// The committee which votes on proposals. - membership: MEMBERSHIP, + membership: TYPES::Membership, /// This participant's public key. public_key: TYPES::SignatureKey, /// Entry with public key and staking value for certificate aggregation @@ -601,8 +599,7 @@ impl< entry: ::StakeTableEntry, sk: ::PrivateKey, ) -> Self { - let membership = - >::Membership::create_election(entries, config); + let membership = ::Membership::create_election(entries, config); Self { network, membership, @@ -617,7 +614,7 @@ impl< &self.network } - fn membership(&self) -> &Self::Membership { + fn membership(&self) -> &TYPES::Membership { &self.membership } fn public_key(&self) -> &TYPES::SignatureKey { @@ -646,7 +643,7 @@ pub struct ViewSyncExchange< /// The network being used by this exchange. network: NETWORK, /// The committee which votes on proposals. - membership: MEMBERSHIP, + membership: TYPES::Membership, /// This participant's public key. public_key: TYPES::SignatureKey, /// Entry with public key and staking value for certificate aggregation in the stake table. @@ -686,8 +683,7 @@ impl< entry: ::StakeTableEntry, sk: ::PrivateKey, ) -> Self { - let membership = - >::Membership::create_election(entries, config); + let membership = ::Membership::create_election(entries, config); Self { network, membership, @@ -702,7 +698,7 @@ impl< &self.network } - fn membership(&self) -> &Self::Membership { + fn membership(&self) -> &TYPES::Membership { &self.membership } fn public_key(&self) -> &TYPES::SignatureKey { @@ -726,7 +722,7 @@ pub struct TimeoutExchange< /// The network being used by this exchange. network: NETWORK, /// The committee which votes on proposals. - membership: MEMBERSHIP, + membership: TYPES::Membership, /// This participant's public key. public_key: TYPES::SignatureKey, /// Entry with public key and staking value for certificate aggregation in the stake table. @@ -778,8 +774,7 @@ impl< entry: ::StakeTableEntry, sk: ::PrivateKey, ) -> Self { - let membership = - >::Membership::create_election(entries, config); + let membership = ::Membership::create_election(entries, config); Self { network, membership, @@ -794,7 +789,7 @@ impl< &self.network } - fn membership(&self) -> &Self::Membership { + fn membership(&self) -> &TYPES::Membership { &self.membership } fn public_key(&self) -> &TYPES::SignatureKey { diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 1a037ec89a..13e0d7f86b 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -252,7 +252,7 @@ pub trait CommunicationChannel Result<(), NetworkError>; /// Sends a direct message to a specific node diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index c0746ba8fb..0968c02040 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -37,18 +37,18 @@ use std::{ /// struct containing messages for a view to send to a replica or DA committee member. #[derive(Clone)] -pub struct ViewQueue> { +pub struct ViewQueue { /// to send networking events to a replica or DA committee member. - pub sender_chan: UnboundedSender>, + pub sender_chan: UnboundedSender>, /// to recv networking events for a replica or DA committee member. - pub receiver_chan: Arc>>>, + pub receiver_chan: Arc>>>, /// `true` if this queue has already received a proposal pub has_received_proposal: Arc, } -impl> Default for ViewQueue { +impl Default for ViewQueue { /// create new view queue fn default() -> Self { let (s, r) = unbounded(); @@ -61,17 +61,17 @@ impl> Default for ViewQueue> { +pub struct SendToTasks { /// the current view number /// this should always be in sync with `Consensus` pub cur_view: TYPES::Time, /// a map from view number to ViewQueue /// one of (replica|next leader)'s' task for view i will be listening on the channel in here - pub channel_map: BTreeMap>, + pub channel_map: BTreeMap>, } -impl> SendToTasks { +impl SendToTasks { /// create new sendtosasks #[must_use] pub fn new(view_num: TYPES::Time) -> Self { @@ -84,15 +84,15 @@ impl> SendToTasks { /// Channels for sending/recv-ing proposals and votes. #[derive(Clone)] -pub struct ChannelMaps> { +pub struct ChannelMaps { /// Channel for the next consensus leader or DA leader. - pub proposal_channel: Arc>>, + pub proposal_channel: Arc>>, /// Channel for the replica or DA committee member. - pub vote_channel: Arc>>, + pub vote_channel: Arc>>, } -impl> ChannelMaps { +impl ChannelMaps { /// Create channels starting from a given view. pub fn new(start_view: TYPES::Time) -> Self { Self { @@ -119,13 +119,13 @@ pub trait NodeImplementation: /// Consensus type selected exchanges. /// /// Implements either `ValidatingExchangesType` or `ExchangesType`. - type Exchanges: ExchangesType>; + type Exchanges: ExchangesType>; /// Create channels for sending/recv-ing proposals and votes for quorum and committee /// exchanges, the latter of which is only applicable for sequencing consensus. fn new_channel_maps( start_view: TYPES::Time, - ) -> (ChannelMaps, Option>); + ) -> (ChannelMaps, Option>); } /// Contains the protocols for exchanging proposals and votes. @@ -408,31 +408,31 @@ where /// Alias for the [`QuorumExchange`] type. pub type QuorumEx = <>::Exchanges as ExchangesType< TYPES, - Message, + Message, >>::QuorumExchange; /// Alias for `TimeoutExchange` type pub type TimeoutEx = <>::Exchanges as ExchangesType< TYPES, - Message, + Message, >>::TimeoutExchange; /// Alias for the [`CommitteeExchange`] type. pub type CommitteeEx = <>::Exchanges as ExchangesType< TYPES, - Message, + Message, >>::CommitteeExchange; /// Alias for the [`VIDExchange`] type. pub type VIDEx = <>::Exchanges as ExchangesType< TYPES, - Message, + Message, >>::VIDExchange; /// Alias for the [`ViewSyncExchange`] type. pub type ViewSyncEx = <>::Exchanges as ExchangesType< TYPES, - Message, + Message, >>::ViewSyncExchange; /// extra functions required on a node implementation to be usable by hotshot-testing @@ -483,23 +483,23 @@ pub trait TestableNodeImplementation: NodeImplementation #[async_trait] impl> TestableNodeImplementation for I where - CommitteeNetwork: TestableNetworkingImplementation>, - QuorumNetwork: TestableNetworkingImplementation>, + CommitteeNetwork: TestableNetworkingImplementation>, + QuorumNetwork: TestableNetworkingImplementation>, QuorumCommChannel: TestableChannelImplementation< TYPES, - Message, + Message, QuorumMembership, QuorumNetwork, >, CommitteeCommChannel: TestableChannelImplementation< TYPES, - Message, + Message, CommitteeMembership, QuorumNetwork, >, ViewSyncCommChannel: TestableChannelImplementation< TYPES, - Message, + Message, ViewSyncMembership, QuorumNetwork, >, @@ -549,50 +549,50 @@ where /// Communication channel for [`QuorumProposalType`] and [`QuorumVote`]. pub type QuorumCommChannel = - as ConsensusExchange>>::Networking; + as ConsensusExchange>>::Networking; /// Communication channel for [`ViewSyncProposalType`] and [`ViewSyncVote`]. pub type ViewSyncCommChannel = - as ConsensusExchange>>::Networking; + as ConsensusExchange>>::Networking; /// Communication channel for [`CommitteeProposalType`] and [`DAVote`]. pub type CommitteeCommChannel = - as ConsensusExchange>>::Networking; + as ConsensusExchange>>::Networking; /// Protocol for determining membership in a consensus committee. pub type QuorumMembership = - as ConsensusExchange>>::Membership; + as ConsensusExchange>>::Membership; /// TYPE aliase for the membership of VID exchange pub type VIDMembership = - as ConsensusExchange>>::Membership; + as ConsensusExchange>>::Membership; /// Protocol for determining membership in a DA committee. pub type CommitteeMembership = - as ConsensusExchange>>::Membership; + as ConsensusExchange>>::Membership; /// Protocol for determining membership in a view sync committee. pub type ViewSyncMembership = - as ConsensusExchange>>::Membership; + as ConsensusExchange>>::Membership; /// Type for the underlying quorum `ConnectedNetwork` that will be shared (for now) b/t Communication Channels pub type QuorumNetwork = as CommunicationChannel< TYPES, - Message, + Message, QuorumMembership, >>::NETWORK; /// Type for the underlying committee `ConnectedNetwork` that will be shared (for now) b/t Communication Channels pub type CommitteeNetwork = as CommunicationChannel< TYPES, - Message, + Message, CommitteeMembership, >>::NETWORK; /// Type for the underlying view sync `ConnectedNetwork` that will be shared (for now) b/t Communication Channels pub type ViewSyncNetwork = as CommunicationChannel< TYPES, - Message, + Message, ViewSyncMembership, >>::NETWORK; diff --git a/types/src/vote2.rs b/types/src/vote2.rs index 6efb06b252..5ef2058cdc 100644 --- a/types/src/vote2.rs +++ b/types/src/vote2.rs @@ -24,8 +24,6 @@ use crate::{ /// A simple vote that has a signer and commitment to the data voted on. pub trait Vote2: HasViewNumber { - /// The membership of those that send this vote type - type Membership: Membership; /// Type of data commitment this vote uses. type Commitment: Voteable; @@ -102,7 +100,7 @@ impl< /// /// # Panics /// Panics if the vote comes from a node not in the stake table - pub fn accumulate(mut self, vote: &VOTE, membership: &VOTE::Membership) -> Either { + pub fn accumulate(mut self, vote: &VOTE, membership: &TYPES::Membership) -> Either { let key = vote.get_signing_key(); let vote_commitment = vote.get_data_commitment(); From 015634938e50ac90b2a7a5c734fee4fb45542f3c Mon Sep 17 00:00:00 2001 From: MRain Date: Thu, 16 Nov 2023 09:52:34 -0500 Subject: [PATCH 0397/1393] address comments --- hotshot-state-prover/src/circuit.rs | 47 ++++++++++++++++++++--------- hotshot-state-prover/src/lib.rs | 38 +++++++++++------------ 2 files changed, 51 insertions(+), 34 deletions(-) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 03c7f0b921..6be5ee2150 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -58,6 +58,21 @@ pub struct LightClientStateVar { vars: [Variable; 7], } +#[derive(Clone, Debug)] +pub struct PublicInputs(Vec); + +impl AsRef<[F]> for PublicInputs { + fn as_ref(&self) -> &[F] { + &self.0 + } +} + +impl From> for PublicInputs { + fn from(v: Vec) -> Self { + Self(v) + } +} + impl LightClientStateVar { pub fn new( circuit: &mut PlonkCircuit, @@ -125,13 +140,13 @@ impl AsRef<[Variable]> for LightClientStateVar { /// - A circuit for proof generation /// - A list of public inputs for verification /// - A PlonkError if any error happens when building the circuit -pub(crate) fn build_state_verifier_circuit( +pub(crate) fn build( stake_table_entries: STIter, signer_bit_vec: BitIter, signatures: SigIter, lightclient_state: &LightClientState, threshold: &U256, -) -> Result<(PlonkCircuit, Vec), PlonkError> +) -> Result<(PlonkCircuit, PublicInputs), PlonkError> where F: RescueParameter, P: TECurveConfig, @@ -328,12 +343,12 @@ where circuit.enforce_true(sig_ver_result.0)?; circuit.finalize_for_arithmetization()?; - Ok((circuit, public_inputs)) + Ok((circuit, public_inputs.into())) } #[cfg(test)] mod tests { - use super::{build_state_verifier_circuit, LightClientState}; + use super::{build, LightClientState}; use crate::utils::{key_pairs_for_testing, stake_table_for_testing}; use ark_ed_on_bn254::EdwardsConfig as Config; use ethereum_types::U256; @@ -408,7 +423,7 @@ mod tests { }) .collect::>(); // good path - let (circuit, public_inputs) = build_state_verifier_circuit( + let (circuit, public_inputs) = build( &entries, &bit_vec, &bit_masked_sigs, @@ -416,9 +431,11 @@ mod tests { &U256::from(26u32), ) .unwrap(); - assert!(circuit.check_circuit_satisfiability(&public_inputs).is_ok()); + assert!(circuit + .check_circuit_satisfiability(public_inputs.as_ref()) + .is_ok()); - let (circuit, public_inputs) = build_state_verifier_circuit( + let (circuit, public_inputs) = build( &entries, &bit_vec, &bit_masked_sigs, @@ -426,7 +443,9 @@ mod tests { &U256::from(10u32), ) .unwrap(); - assert!(circuit.check_circuit_satisfiability(&public_inputs).is_ok()); + assert!(circuit + .check_circuit_satisfiability(public_inputs.as_ref()) + .is_ok()); // bad path: total weight doesn't meet the threshold // bit vector with total weight 23 @@ -444,7 +463,7 @@ mod tests { } }) .collect::>(); - let (bad_circuit, public_inputs) = build_state_verifier_circuit( + let (bad_circuit, public_inputs) = build( &entries, &bad_bit_vec, &bad_bit_masked_sigs, @@ -453,7 +472,7 @@ mod tests { ) .unwrap(); assert!(bad_circuit - .check_circuit_satisfiability(&public_inputs) + .check_circuit_satisfiability(public_inputs.as_ref()) .is_err()); // bad path: bad stake table commitment @@ -475,7 +494,7 @@ mod tests { }) .collect::, PrimitivesError>>() .unwrap(); - let (bad_circuit, public_inputs) = build_state_verifier_circuit( + let (bad_circuit, public_inputs) = build( &entries, &bit_vec, &sig_for_bad_state, @@ -484,7 +503,7 @@ mod tests { ) .unwrap(); assert!(bad_circuit - .check_circuit_satisfiability(&public_inputs) + .check_circuit_satisfiability(public_inputs.as_ref()) .is_err()); // bad path: incorrect signatures @@ -507,7 +526,7 @@ mod tests { }) .collect::, PrimitivesError>>() .unwrap(); - let (bad_circuit, public_inputs) = build_state_verifier_circuit( + let (bad_circuit, public_inputs) = build( &entries, &bit_vec, &wrong_sigs, @@ -516,7 +535,7 @@ mod tests { ) .unwrap(); assert!(bad_circuit - .check_circuit_satisfiability(&public_inputs) + .check_circuit_satisfiability(public_inputs.as_ref()) .is_err()); } } diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index 1f70317154..d70b48dfce 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -9,7 +9,7 @@ use ark_std::{ borrow::Borrow, rand::{CryptoRng, RngCore}, }; -use circuit::build_state_verifier_circuit; +use circuit::PublicInputs; use ethereum_types::U256; use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::traits::{ @@ -63,9 +63,10 @@ pub fn generate_state_update_proof( signatures: SigIter, lightclient_state: &LightClientState, threshold: &U256, -) -> Result<(Proof, Vec), PlonkError> +) -> Result<(Proof, PublicInputs), PlonkError> where ST: StakeTableScheme, + ST::IntoIter: ExactSizeIterator, R: CryptoRng + RngCore, BitIter: IntoIterator, BitIter::Item: Borrow, @@ -77,9 +78,8 @@ where let stake_table_entries = stake_table .try_iter(SnapshotVersion::LastEpochStart) .unwrap() - .map(|(_, stake_amount, schnorr_key)| (schnorr_key, stake_amount)) - .collect::>(); - let (circuit, public_inputs) = build_state_verifier_circuit( + .map(|(_, stake_amount, schnorr_key)| (schnorr_key, stake_amount)); + let (circuit, public_inputs) = circuit::build( stake_table_entries, signer_bit_vec, signatures, @@ -92,7 +92,7 @@ where /// Internal function for helping generate the proving/verifying key fn build_dummy_circuit_for_preprocessing( -) -> Result<(PlonkCircuit, Vec), PlonkError> { +) -> Result<(PlonkCircuit, PublicInputs), PlonkError> { let st = StakeTable::::new(); let lightclient_state = LightClientState { view_number: 0, @@ -101,7 +101,7 @@ fn build_dummy_circuit_for_preprocessing( fee_ledger_comm: BaseField::default(), stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), }; - build_state_verifier_circuit::( + circuit::build::( &[], &[], &[], @@ -116,7 +116,7 @@ mod tests { utils::{key_pairs_for_testing, stake_table_for_testing}, BLSVerKey, BaseField, SchnorrVerKey, UniversalSrs, }; - use crate::{circuit::build_state_verifier_circuit, generate_state_update_proof, preprocess}; + use crate::{circuit::build, generate_state_update_proof, preprocess}; use ark_bn254::Bn254; use ark_ec::pairing::Pairing; use ark_ed_on_bn254::EdwardsConfig as Config; @@ -207,17 +207,15 @@ mod tests { fee_ledger_comm: BaseField::default(), stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), }; - Ok( - build_state_verifier_circuit::( - &[], - &[], - &[], - &lightclient_state, - &U256::zero(), - )? - .0 - .num_gates(), - ) + Ok(build::( + &[], + &[], + &[], + &lightclient_state, + &U256::zero(), + )? + .0 + .num_gates()) } #[test] @@ -301,7 +299,7 @@ mod tests { let (proof, public_inputs) = result.unwrap(); assert!(PlonkKzgSnark::::verify::( &vk, - &public_inputs, + public_inputs.as_ref(), &proof, None ) From e2b36fb5640f617cdeb57a61fc6886fce3860500 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 10:01:55 -0500 Subject: [PATCH 0398/1393] Remove VoteToken Completely + remove old types --- hotshot/examples/libp2p/types.rs | 3 - hotshot/examples/web-server-da/types.rs | 3 - hotshot/src/demo.rs | 5 +- hotshot/src/lib.rs | 34 +- hotshot/src/tasks/mod.rs | 38 +- .../src/traits/election/static_committee.rs | 78 +--- hotshot/src/traits/storage/memory_storage.rs | 5 +- hotshot/src/types.rs | 2 +- task-impls/src/consensus.rs | 82 +---- task-impls/src/da.rs | 28 +- task-impls/src/vid.rs | 27 +- testing/src/node_types.rs | 3 +- testing/tests/memory_network.rs | 8 +- types/src/certificate.rs | 174 --------- types/src/data.rs | 71 ---- types/src/lib.rs | 2 - types/src/traits/election.rs | 82 +---- types/src/traits/node_implementation.rs | 4 +- types/src/vote.rs | 339 ------------------ 19 files changed, 61 insertions(+), 927 deletions(-) delete mode 100644 types/src/certificate.rs delete mode 100644 types/src/vote.rs diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 8bf819d2d5..0f989c6c35 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -9,7 +9,6 @@ use hotshot_types::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, }, - vote::ViewSyncVote, }; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -22,8 +21,6 @@ pub type VIDNetwork = Libp2pCommChannel; pub type QuorumNetwork = Libp2pCommChannel; pub type ViewSyncNetwork = Libp2pCommChannel; -pub type ThisViewSyncVote = ViewSyncVote; - impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type Exchanges = Exchanges< diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index e843662714..87e0666208 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -9,7 +9,6 @@ use hotshot_types::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, }, - vote::ViewSyncVote, }; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -22,8 +21,6 @@ pub type VIDNetwork = WebCommChannel; pub type QuorumNetwork = WebCommChannel; pub type ViewSyncNetwork = WebCommChannel; -pub type ThisViewSyncVote = ViewSyncVote; - impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type Exchanges = Exchanges< diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index f345b30973..a7356e0822 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -5,9 +5,7 @@ //! //! These implementations are useful in examples and integration testing, but are not suitable for //! production use. -use crate::traits::election::static_committee::{ - GeneralStaticCommittee, StaticElectionConfig, StaticVoteToken, -}; +use crate::traits::election::static_committee::{GeneralStaticCommittee, StaticElectionConfig}; use commit::{Commitment, Committable}; use derivative::Derivative; @@ -135,7 +133,6 @@ impl NodeType for DemoTypes { type BlockHeader = VIDBlockHeader; type BlockPayload = VIDBlockPayload; type SignatureKey = BLSPubKey; - type VoteTokenType = StaticVoteToken; type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = DemoState; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 38393715e8..6c885ab5bd 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -43,7 +43,7 @@ use async_compatibility_layer::{ }; use async_lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; use async_trait::async_trait; -use commit::{Commitment, Committable}; +use commit::Committable; use custom_debug::Debug; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, @@ -604,32 +604,16 @@ impl< I: NodeImplementation, > HotShotType for SystemContext where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment>, - Membership = TYPES::Membership, - > + 'static, - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - > + 'static, + QuorumEx: + ConsensusExchange, Membership = TYPES::Membership> + 'static, + CommitteeEx: + ConsensusExchange, Membership = TYPES::Membership> + 'static, ViewSyncEx: ViewSyncExchangeType, Membership = TYPES::Membership> + 'static, - VIDEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - > + 'static, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - > + 'static, + VIDEx: + ConsensusExchange, Membership = TYPES::Membership> + 'static, + TimeoutEx: + ConsensusExchange, Membership = TYPES::Membership> + 'static, { fn consensus(&self) -> &Arc>> { &self.inner.consensus diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 858158280c..a636225c8f 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -2,7 +2,7 @@ use crate::{async_spawn, types::SystemContextHandle, HotShotConsensusApi}; use async_compatibility_layer::art::async_sleep; -use commit::{Commitment, Committable}; +use commit::Committable; use futures::FutureExt; use hotshot_task::{ boxed_sync, @@ -26,7 +26,6 @@ use hotshot_task_impls::{ }; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, - data::Leaf, event::Event, message::{Message, Messages}, traits::{ @@ -223,24 +222,9 @@ pub async fn add_consensus_task< handle: SystemContextHandle, ) -> TaskRunner where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment>, - Membership = TYPES::Membership, - >, - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, + QuorumEx: ConsensusExchange, Membership = TYPES::Membership>, + CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, + TimeoutEx: ConsensusExchange, Membership = TYPES::Membership>, { let consensus = handle.hotshot.get_consensus(); let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -324,12 +308,7 @@ pub async fn add_vid_task>( handle: SystemContextHandle, ) -> TaskRunner where - VIDEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, + VIDEx: ConsensusExchange, Membership = TYPES::Membership>, { // build the vid task let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -387,12 +366,7 @@ pub async fn add_da_task>( handle: SystemContextHandle, ) -> TaskRunner where - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, + CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, { // build the da task let c_api: HotShotConsensusApi = HotShotConsensusApi { diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index e09820ed17..d3bddaf4de 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,11 +1,9 @@ // use ark_bls12_381::Parameters as Param381; -use commit::{Commitment, Committable, RawCommitmentBuilder}; -use espresso_systems_common::hotshot::tag; use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::traits::{ - election::{Checked, ElectionConfig, ElectionError, Membership, VoteToken}, + election::{ElectionConfig, Membership}, node_implementation::NodeType, - signature_key::{EncodedSignature, SignatureKey}, + signature_key::SignatureKey, }; #[allow(deprecated)] use serde::{Deserialize, Serialize}; @@ -39,35 +37,6 @@ impl GeneralStaticCommittee { } } -#[derive(Serialize, Deserialize, Clone, Debug, Hash, PartialEq, Eq)] -#[serde(bound(deserialize = ""))] -/// Vote token for a static committee -pub struct StaticVoteToken { - /// signature - signature: EncodedSignature, - /// public key - pub_key: K, -} - -impl VoteToken for StaticVoteToken { - fn vote_count(&self) -> NonZeroU64 { - NonZeroU64::new(1).unwrap() - } -} - -impl Committable for StaticVoteToken { - fn commit(&self) -> Commitment { - RawCommitmentBuilder::new("StaticVoteToken") - .var_size_field("signature", &self.signature.0) - .var_size_field("pub_key", &self.pub_key.to_bytes().0) - .finalize() - } - - fn tag() -> String { - tag::STATIC_VOTE_TOKEN.to_string() - } -} - /// configuration for static committee. stub for now #[derive(Default, Clone, Serialize, Deserialize, core::fmt::Debug)] pub struct StaticElectionConfig { @@ -80,11 +49,7 @@ impl ElectionConfig for StaticElectionConfig {} impl Membership for GeneralStaticCommittee where - TYPES: NodeType< - SignatureKey = PUBKEY, - VoteTokenType = StaticVoteToken, - ElectionConfigType = StaticElectionConfig, - >, + TYPES: NodeType, { /// Clone the public key and corresponding stake table for current elected committee fn get_committee_qc_stake_table(&self) -> Vec { @@ -97,24 +62,7 @@ where let res = self.nodes_with_stake[index].clone(); TYPES::SignatureKey::get_public_key(&res) } - /// Simply make the partial signature - fn make_vote_token( - &self, - view_number: TYPES::Time, - private_key: &::PrivateKey, - ) -> std::result::Result>, ElectionError> { - let pub_key = PUBKEY::from_private(private_key); - let entry = pub_key.get_stake_table_entry(1u64); - if !self.committee_nodes_with_stake.contains(&entry) { - return Ok(None); - } - let mut message: Vec = vec![]; - message.extend(view_number.to_le_bytes()); - // Change the length from 8 to 32 to make it consistent with other commitments, use defined constant? instead of 32. - message.extend_from_slice(&[0u8; 32 - 8]); - let signature = PUBKEY::sign(private_key, &message); - Ok(Some(StaticVoteToken { signature, pub_key })) - } + fn has_stake(&self, pub_key: &PUBKEY) -> bool { let entry = pub_key.get_stake_table_entry(1u64); self.committee_nodes_with_stake.contains(&entry) @@ -132,24 +80,6 @@ where } } - fn validate_vote_token( - &self, - pub_key: PUBKEY, - token: Checked, - ) -> Result, ElectionError> { - match token { - Checked::Valid(t) | Checked::Unchecked(t) => { - let entry = pub_key.get_stake_table_entry(1u64); - if self.committee_nodes_with_stake.contains(&entry) { - Ok(Checked::Valid(t)) - } else { - Ok(Checked::Inval(t)) - } - } - Checked::Inval(t) => Ok(Checked::Inval(t)), - } - } - fn default_election_config(num_nodes: u64) -> TYPES::ElectionConfigType { StaticElectionConfig { num_nodes } } diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 4667cf1282..04dda39a75 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -107,9 +107,7 @@ impl Storage for MemoryStorage { #[cfg(test)] mod test { - use crate::traits::election::static_committee::{ - GeneralStaticCommittee, StaticElectionConfig, StaticVoteToken, - }; + use crate::traits::election::static_committee::{GeneralStaticCommittee, StaticElectionConfig}; use super::*; use commit::Committable; @@ -143,7 +141,6 @@ mod test { type BlockHeader = VIDBlockHeader; type BlockPayload = VIDBlockPayload; type SignatureKey = BLSPubKey; - type VoteTokenType = StaticVoteToken; type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = DummyState; diff --git a/hotshot/src/types.rs b/hotshot/src/types.rs index 6956d8a795..8a9c8b94bf 100644 --- a/hotshot/src/types.rs +++ b/hotshot/src/types.rs @@ -4,4 +4,4 @@ mod handle; pub use event::{Event, EventType}; pub use handle::SystemContextHandle; pub use hotshot_signature_key::bn254; -pub use hotshot_types::{message::Message, traits::signature_key::SignatureKey, vote::VoteType}; +pub use hotshot_types::{message::Message, traits::signature_key::SignatureKey}; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 44c0d98304..39adc48084 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -61,12 +61,9 @@ pub struct ConsensusTaskState< I: NodeImplementation, A: ConsensusApi + 'static, > where - QuorumEx: - ConsensusExchange, Commitment = Commitment>>, - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, - TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + QuorumEx: ConsensusExchange>, + CommitteeEx: ConsensusExchange>, + TimeoutEx: ConsensusExchange>, { /// The global task registry pub registry: GlobalRegistry, @@ -129,10 +126,8 @@ pub struct ConsensusTaskState< /// State for the vote collection task. This handles the building of a QC from a votes received pub struct VoteCollectionTaskState> where - QuorumEx: - ConsensusExchange, Commitment = Commitment>>, - TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + QuorumEx: ConsensusExchange>, + TimeoutEx: ConsensusExchange>, { /// the quorum exchange pub quorum_exchange: Arc>, @@ -162,10 +157,8 @@ where impl> TS for VoteCollectionTaskState where - QuorumEx: - ConsensusExchange, Commitment = Commitment>>, - TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + QuorumEx: ConsensusExchange>, + TimeoutEx: ConsensusExchange>, { } @@ -179,14 +172,8 @@ async fn vote_handle>( VoteCollectionTaskState, ) where - QuorumEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment>, - Membership = TYPES::Membership, - >, - TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + QuorumEx: ConsensusExchange, Membership = TYPES::Membership>, + TimeoutEx: ConsensusExchange>, { match event { HotShotEvent::QuorumVoteRecv(vote) => { @@ -295,24 +282,9 @@ impl< > ConsensusTaskState where TYPES::BlockHeader: BlockHeader, - QuorumEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment>, - Membership = TYPES::Membership, - >, - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, + QuorumEx: ConsensusExchange, Membership = TYPES::Membership>, + CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, + TimeoutEx: ConsensusExchange, Membership = TYPES::Membership>, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus genesis leaf", level = "error")] @@ -1331,12 +1303,9 @@ impl< A: ConsensusApi, > TS for ConsensusTaskState where - QuorumEx: - ConsensusExchange, Commitment = Commitment>>, - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, - TimeoutEx: - ConsensusExchange, Commitment = Commitment>, + QuorumEx: ConsensusExchange>, + CommitteeEx: ConsensusExchange>, + TimeoutEx: ConsensusExchange>, { } @@ -1370,24 +1339,9 @@ pub async fn sequencing_consensus_handle< ) where TYPES::BlockHeader: BlockHeader, - QuorumEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment>, - Membership = TYPES::Membership, - >, - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, - TimeoutEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, + QuorumEx: ConsensusExchange, Membership = TYPES::Membership>, + CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, + TimeoutEx: ConsensusExchange, Membership = TYPES::Membership>, { if let HotShotEvent::Shutdown = event { (Some(HotShotTaskCompleted::ShutDown), state) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 0d43fbf62c..412e5d56b4 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -3,7 +3,7 @@ use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use bitvec::prelude::*; -use commit::{Commitment, Committable}; +use commit::Committable; use either::{Either, Left, Right}; use futures::FutureExt; use hotshot_task::{ @@ -46,8 +46,7 @@ pub struct DATaskState< I: NodeImplementation, A: ConsensusApi + 'static, > where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange>, { /// The state's api pub api: A, @@ -76,8 +75,7 @@ pub struct DATaskState< /// Struct to maintain DA Vote Collection task state pub struct DAVoteCollectionTaskState> where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange>, { /// the committee exchange pub committee_exchange: Arc>, @@ -96,8 +94,7 @@ where } impl> TS for DAVoteCollectionTaskState where - CommitteeEx: - ConsensusExchange, Commitment = Commitment> + CommitteeEx: ConsensusExchange> { } @@ -110,12 +107,7 @@ async fn vote_handle>( DAVoteCollectionTaskState, ) where - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, + CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, { match event { HotShotEvent::DAVoteRecv(vote) => { @@ -170,12 +162,7 @@ where impl, A: ConsensusApi + 'static> DATaskState where - CommitteeEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, + CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] @@ -463,8 +450,7 @@ where impl, A: ConsensusApi + 'static> TS for DATaskState where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange>, { } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 92439cb4fc..e39f2803db 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -3,7 +3,6 @@ use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use bitvec::prelude::*; -use commit::Commitment; use either::{Either, Left, Right}; use futures::FutureExt; use hotshot_task::{ @@ -47,8 +46,7 @@ pub struct VIDTaskState< I: NodeImplementation, A: ConsensusApi + 'static, > where - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange>, { /// The state's api pub api: A, @@ -77,8 +75,7 @@ pub struct VIDTaskState< /// Struct to maintain VID Vote Collection task state pub struct VIDVoteCollectionTaskState> where - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange>, { /// the vid exchange pub vid_exchange: Arc>, @@ -97,8 +94,7 @@ where } impl> TS for VIDVoteCollectionTaskState where - VIDEx: - ConsensusExchange, Commitment = Commitment> + VIDEx: ConsensusExchange> { } @@ -113,12 +109,7 @@ async fn vote_handle( where TYPES: NodeType, I: NodeImplementation, - VIDEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, + VIDEx: ConsensusExchange, Membership = TYPES::Membership>, { match event { HotShotEvent::VidVoteRecv(vote) => { @@ -174,12 +165,7 @@ where impl, A: ConsensusApi + 'static> VIDTaskState where - VIDEx: ConsensusExchange< - TYPES, - Message, - Commitment = Commitment, - Membership = TYPES::Membership, - >, + VIDEx: ConsensusExchange, Membership = TYPES::Membership>, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] @@ -429,8 +415,7 @@ where impl, A: ConsensusApi + 'static> TS for VIDTaskState where - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange>, { } diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 6b5b027ba0..5c15bd40a9 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -6,7 +6,7 @@ use std::{marker::PhantomData, sync::Arc}; use hotshot::{ demo::DemoState, traits::{ - election::static_committee::{StaticCommittee, StaticElectionConfig, StaticVoteToken}, + election::static_committee::{StaticCommittee, StaticElectionConfig}, implementations::{ CombinedCommChannel, Libp2pCommChannel, Libp2pNetwork, MemoryCommChannel, MemoryNetwork, MemoryStorage, WebCommChannel, WebServerNetwork, @@ -46,7 +46,6 @@ impl NodeType for TestTypes { type BlockHeader = VIDBlockHeader; type BlockPayload = VIDBlockPayload; type SignatureKey = BLSPubKey; - type VoteTokenType = StaticVoteToken; type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = DemoState; diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 54c76ad0f7..da09153c7c 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -3,9 +3,7 @@ use std::sync::Arc; use async_compatibility_layer::logging::setup_logging; use hotshot::demo::DemoState; -use hotshot::traits::election::static_committee::{ - GeneralStaticCommittee, StaticElectionConfig, StaticVoteToken, -}; +use hotshot::traits::election::static_committee::{GeneralStaticCommittee, StaticElectionConfig}; use hotshot::traits::implementations::{ MasterMap, MemoryCommChannel, MemoryNetwork, MemoryStorage, NetworkingMetricsValue, }; @@ -20,7 +18,6 @@ use hotshot_types::traits::election::{ use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; use hotshot_types::traits::node_implementation::{ChannelMaps, Exchanges, NodeType}; -use hotshot_types::vote::ViewSyncVote; use hotshot_types::{ data::ViewNumber, message::{DataMessage, MessageKind}, @@ -52,7 +49,6 @@ impl NodeType for Test { type BlockHeader = VIDBlockHeader; type BlockPayload = VIDBlockPayload; type SignatureKey = BLSPubKey; - type VoteTokenType = StaticVoteToken; type Transaction = VIDTransaction; type ElectionConfigType = StaticElectionConfig; type StateType = DemoState; @@ -68,8 +64,6 @@ pub type QuorumNetwork = MemoryCommChannel; pub type ViewSyncNetwork = MemoryCommChannel; pub type VIDNetwork = MemoryCommChannel; -pub type ThisViewSyncVote = ViewSyncVote; - impl NodeImplementation for TestImpl { type Storage = MemoryStorage; type Exchanges = Exchanges< diff --git a/types/src/certificate.rs b/types/src/certificate.rs deleted file mode 100644 index 96b351631b..0000000000 --- a/types/src/certificate.rs +++ /dev/null @@ -1,174 +0,0 @@ -//! Provides two types of cerrtificates and their accumulators. - -use crate::{ - data::serialize_signature, - traits::{ - election::SignedCertificate, node_implementation::NodeType, signature_key::SignatureKey, - }, - vote::{ViewSyncData, ViewSyncVote, VoteType}, -}; -use bincode::Options; -use commit::{Commitment, Committable}; - -use espresso_systems_common::hotshot::tag; -use hotshot_utils::bincode::bincode_opts; -use serde::{Deserialize, Serialize}; -use std::{fmt::Debug, hash::Hash}; - -/// Certificate for view sync. -#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash)] -#[serde(bound(deserialize = ""))] -pub enum ViewSyncCertificate { - /// Pre-commit phase. - PreCommit(ViewSyncCertificateInternal), - /// Commit phase. - Commit(ViewSyncCertificateInternal), - /// Finalize phase. - Finalize(ViewSyncCertificateInternal), -} - -impl ViewSyncCertificate { - /// Serialize the certificate into bytes. - /// # Panics - /// If the serialization fails. - pub fn as_bytes(&self) -> Vec { - bincode_opts().serialize(&self).unwrap() - } -} - -/// A view sync certificate representing a quorum of votes for a particular view sync phase -#[derive(custom_debug::Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Hash)] -#[serde(bound(deserialize = ""))] -pub struct ViewSyncCertificateInternal { - /// Relay the votes are intended for - pub relay: u64, - /// View number the network is attempting to synchronize on - pub round: TYPES::Time, - /// Aggregated QC - pub signatures: AssembledSignature, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -/// Enum representing whether a signatures is for a 'Yes' or 'No' or 'DA' or 'Genesis' certificate -pub enum AssembledSignature { - // (enum, signature) - /// These signatures are for a 'Yes' certificate - Yes(::QCType), - /// These signatures are for a 'No' certificate - No(::QCType), - /// These signatures are for a 'DA' certificate - DA(::QCType), - /// These signatures are for a 'VID' certificate - VID(::QCType), - /// These signatures are for a `Timeout` certificate - Timeout(::QCType), - /// These signatures are for genesis certificate - Genesis(), - /// These signatures are for ViewSyncPreCommit - ViewSyncPreCommit(::QCType), - /// These signatures are for ViewSyncCommit - ViewSyncCommit(::QCType), - /// These signatures are for ViewSyncFinalize - ViewSyncFinalize(::QCType), -} - -impl Committable for ViewSyncCertificate { - fn commit(&self) -> Commitment { - let signatures_bytes = serialize_signature(&self.signatures()); - - let mut builder = commit::RawCommitmentBuilder::new("View Sync Certificate Commitment") - .constant_str("justify_qc signatures") - .var_size_bytes(&signatures_bytes); - - let certificate_internal = match &self { - ViewSyncCertificate::PreCommit(certificate_internal) => { - builder = builder.var_size_field("View Sync Phase", "PreCommit".as_bytes()); - certificate_internal - } - ViewSyncCertificate::Commit(certificate_internal) => { - builder = builder.var_size_field("View Sync Phase", "Commit".as_bytes()); - certificate_internal - } - ViewSyncCertificate::Finalize(certificate_internal) => { - builder = builder.var_size_field("View Sync Phase", "Finalize".as_bytes()); - certificate_internal - } - }; - - builder = builder - .u64_field("Relay", certificate_internal.relay) - .u64_field("Round", *certificate_internal.round); - builder.finalize() - } - - fn tag() -> String { - // TODO ED Update this repo with a view sync tag - tag::QC.to_string() - } -} - -impl - SignedCertificate>> - for ViewSyncCertificate -{ - type Vote = ViewSyncVote; - - /// Build a QC from the threshold signature and commitment - fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self { - let certificate_internal = ViewSyncCertificateInternal { - round: vote.get_view(), - relay: vote.relay(), - signatures: signatures.clone(), - }; - match signatures { - AssembledSignature::ViewSyncPreCommit(_) => { - ViewSyncCertificate::PreCommit(certificate_internal) - } - AssembledSignature::ViewSyncCommit(_) => { - ViewSyncCertificate::Commit(certificate_internal) - } - AssembledSignature::ViewSyncFinalize(_) => { - ViewSyncCertificate::Finalize(certificate_internal) - } - _ => unimplemented!(), - } - } - - /// Get the view number. - fn view_number(&self) -> TYPES::Time { - match self.clone() { - ViewSyncCertificate::PreCommit(certificate_internal) - | ViewSyncCertificate::Commit(certificate_internal) - | ViewSyncCertificate::Finalize(certificate_internal) => certificate_internal.round, - } - } - - /// Get signatures. - fn signatures(&self) -> AssembledSignature { - match self.clone() { - ViewSyncCertificate::PreCommit(certificate_internal) - | ViewSyncCertificate::Commit(certificate_internal) - | ViewSyncCertificate::Finalize(certificate_internal) => { - certificate_internal.signatures - } - } - } - - // TODO (da) the following functions should be refactored into a QC-specific trait. - /// Get the leaf commitment. - fn leaf_commitment(&self) -> Commitment> { - todo!() - } - - /// Get whether the certificate is for the genesis block. - fn is_genesis(&self) -> bool { - todo!() - } - - /// To be used only for generating the genesis quorum certificate; will fail if used anywhere else - fn genesis() -> Self { - todo!() - } -} -impl Eq for ViewSyncCertificate {} diff --git a/types/src/data.rs b/types/src/data.rs index 075b7bce4c..4415a8a8c5 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -4,7 +4,6 @@ //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. use crate::{ - certificate::{AssembledSignature, ViewSyncCertificate}, simple_certificate::{QuorumCertificate2, TimeoutCertificate2}, traits::{ block_contents::BlockHeader, @@ -196,16 +195,6 @@ impl HasViewNumber for QuorumProposal { } } -impl HasViewNumber for ViewSyncCertificate { - fn get_view_number(&self) -> TYPES::Time { - match self { - ViewSyncCertificate::PreCommit(certificate_internal) - | ViewSyncCertificate::Commit(certificate_internal) - | ViewSyncCertificate::Finalize(certificate_internal) => certificate_internal.round, - } - } -} - /// A state change encoded in a leaf. /// /// [`DeltasType`] represents a [block](NodeType::BlockPayload), but it may not contain the block in @@ -461,66 +450,6 @@ pub fn random_commitment(rng: &mut dyn rand::RngCore) -> Commitm .finalize() } -/// Serialization for the QC assembled signature -/// # Panics -/// if serialization fails -// TODO: Remove after new QC is integrated -pub fn serialize_signature(signature: &AssembledSignature) -> Vec { - let mut signatures_bytes = vec![]; - let signatures: Option<::QCType> = match &signature { - AssembledSignature::DA(signatures) => { - signatures_bytes.extend("DA".as_bytes()); - Some(signatures.clone()) - } - AssembledSignature::VID(signatures) => { - signatures_bytes.extend("VID".as_bytes()); - Some(signatures.clone()) - } - AssembledSignature::Yes(signatures) => { - signatures_bytes.extend("Yes".as_bytes()); - Some(signatures.clone()) - } - AssembledSignature::No(signatures) => { - signatures_bytes.extend("No".as_bytes()); - Some(signatures.clone()) - } - AssembledSignature::Timeout(signatures) => { - signatures_bytes.extend("Timeout".as_bytes()); - Some(signatures.clone()) - } - AssembledSignature::ViewSyncPreCommit(signatures) => { - signatures_bytes.extend("ViewSyncPreCommit".as_bytes()); - Some(signatures.clone()) - } - AssembledSignature::ViewSyncCommit(signatures) => { - signatures_bytes.extend("ViewSyncCommit".as_bytes()); - Some(signatures.clone()) - } - AssembledSignature::ViewSyncFinalize(signatures) => { - signatures_bytes.extend("ViewSyncFinalize".as_bytes()); - Some(signatures.clone()) - } - AssembledSignature::Genesis() => None, - }; - if let Some(sig) = signatures { - let (sig, proof) = TYPES::SignatureKey::get_sig_proof(&sig); - let proof_bytes = bincode_opts() - .serialize(&proof.as_bitslice()) - .expect("This serialization shouldn't be able to fail"); - signatures_bytes.extend("bitvec proof".as_bytes()); - signatures_bytes.extend(proof_bytes.as_slice()); - let sig_bytes = bincode_opts() - .serialize(&sig) - .expect("This serialization shouldn't be able to fail"); - signatures_bytes.extend("aggregated signature".as_bytes()); - signatures_bytes.extend(sig_bytes.as_slice()); - } else { - signatures_bytes.extend("genesis".as_bytes()); - } - - signatures_bytes -} - /// Serialization for the QC assembled signature /// # Panics /// if serialization fails diff --git a/types/src/lib.rs b/types/src/lib.rs index c6f8b9b080..991ae75a51 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -13,7 +13,6 @@ use displaydoc::Display; use std::{num::NonZeroUsize, time::Duration}; use traits::{election::ElectionConfig, signature_key::SignatureKey}; pub mod block_impl; -pub mod certificate; pub mod consensus; pub mod data; pub mod error; @@ -23,7 +22,6 @@ pub mod simple_certificate; pub mod simple_vote; pub mod traits; pub mod utils; -pub mod vote; pub mod vote2; /// the type of consensus to run. Either: /// wait for a signal to start a view, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 758e06bfc6..e814bbf77b 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -7,14 +7,11 @@ use super::{ node_implementation::{NodeImplementation, NodeType}, signature_key::EncodedSignature, }; -use crate::{certificate::AssembledSignature, data::Leaf}; - -use crate::{ - traits::{ - network::{CommunicationChannel, NetworkMsg}, - signature_key::SignatureKey, - }, - vote::{ViewSyncData, VoteType}, +use crate::data::Leaf; + +use crate::traits::{ + network::{CommunicationChannel, NetworkMsg}, + signature_key::SignatureKey, }; use bincode::Options; use commit::{Commitment, CommitmentBounds, Committable}; @@ -149,41 +146,6 @@ pub trait ElectionConfig: { } -/// A certificate of some property which has been signed by a quroum of nodes. -pub trait SignedCertificate -where - Self: Send + Sync + Clone + Serialize + for<'a> Deserialize<'a>, - COMMITMENT: CommitmentBounds, - TOKEN: VoteToken, -{ - /// `VoteType` that is used in this certificate - type Vote: VoteType; - - /// Build a QC from the threshold signature and commitment - // TODO ED Rename this function and rework this function parameters - // Assumes last vote was valid since it caused a QC to form. - // Removes need for relay on other cert specific fields - fn create_certificate(signatures: AssembledSignature, vote: Self::Vote) -> Self; - - /// Get the view number. - fn view_number(&self) -> TIME; - - /// Get signatures. - fn signatures(&self) -> AssembledSignature; - - // TODO (da) the following functions should be refactored into a QC-specific trait. - // TODO ED Make an issue for this - - /// Get the leaf commitment. - fn leaf_commitment(&self) -> COMMITMENT; - - /// Get whether the certificate is for the genesis block. - fn is_genesis(&self) -> bool; - - /// To be used only for generating the genesis quorum certificate; will fail if used anywhere else - fn genesis() -> Self; -} - /// A protocol for determining membership in and participating in a committee. pub trait Membership: Clone + Debug + Eq + PartialEq + Send + Sync + Hash + 'static @@ -209,17 +171,6 @@ pub trait Membership: /// The members of the committee for view `view_number`. fn get_committee(&self, view_number: TYPES::Time) -> BTreeSet; - /// Attempts to generate a vote token for self - /// - /// Returns `None` if the number of seats would be zero - /// # Errors - /// TODO tbd - fn make_vote_token( - &self, - view_number: TYPES::Time, - priv_key: &::PrivateKey, - ) -> Result, ElectionError>; - /// Check if a key has stake fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; @@ -230,16 +181,6 @@ pub trait Membership: pub_key: &TYPES::SignatureKey, ) -> Option<::StakeTableEntry>; - /// Checks the claims of a received vote token - /// - /// # Errors - /// TODO tbd - fn validate_vote_token( - &self, - pub_key: TYPES::SignatureKey, - token: Checked, - ) -> Result, ElectionError>; - /// Returns the number of total nodes in the committee fn total_nodes(&self) -> usize; @@ -260,8 +201,6 @@ pub trait ConsensusExchange: Send + Sync { type Membership: Membership; /// Network used by [`Membership`](Self::Membership) to communicate. type Networking: CommunicationChannel; - /// Commitments to items which are the subject of proposals and decisions. - type Commitment: CommitmentBounds; /// Join a [`ConsensusExchange`] with the given identity (`pk` and `sk`). fn create( @@ -372,7 +311,6 @@ impl< { type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = Commitment; fn create( entries: Vec<::StakeTableEntry>, @@ -466,7 +404,6 @@ impl< { type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = Commitment; fn create( entries: Vec<::StakeTableEntry>, @@ -573,7 +510,6 @@ impl< { type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = Commitment>; fn create( entries: Vec<::StakeTableEntry>, @@ -657,7 +593,6 @@ impl< { type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = Commitment>; fn create( entries: Vec<::StakeTableEntry>, @@ -748,7 +683,6 @@ impl< { type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = Commitment; fn create( entries: Vec<::StakeTableEntry>, @@ -783,9 +717,3 @@ impl< &self.private_key } } - -/// Testable implementation of a [`Membership`]. Will expose a method to generate a vote token used for testing. -pub trait TestableElection: Membership { - /// Generate a vote token used for testing. - fn generate_test_vote_token() -> TYPES::VoteTokenType; -} diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 0968c02040..0f3d8e1e8e 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -7,7 +7,7 @@ use super::{ block_contents::{BlockHeader, Transaction}, election::{ CommitteeExchangeType, ConsensusExchange, ElectionConfig, QuorumExchangeType, - TimeoutExchange, TimeoutExchangeType, VIDExchangeType, ViewSyncExchangeType, VoteToken, + TimeoutExchange, TimeoutExchangeType, VIDExchangeType, ViewSyncExchangeType, }, network::{CommunicationChannel, NetworkMsg, TestableNetworkingImplementation}, state::{ConsensusTime, TestableBlock, TestableState}, @@ -625,8 +625,6 @@ pub trait NodeType: type BlockPayload: BlockPayload; /// The signature key that this hotshot setup is using. type SignatureKey: SignatureKey; - /// The vote token that this hotshot setup is using. - type VoteTokenType: VoteToken; /// The transaction type that this hotshot setup is using. /// /// This should be equal to `BlockPayload::Transaction` diff --git a/types/src/vote.rs b/types/src/vote.rs deleted file mode 100644 index a7af6b6a08..0000000000 --- a/types/src/vote.rs +++ /dev/null @@ -1,339 +0,0 @@ -//! Vote and vote accumulator types -//! -//! This module contains types used to represent the various types of votes that `HotShot` nodes -//! can send, and vote accumulator that converts votes into certificates. - -use crate::{ - certificate::AssembledSignature, - traits::{ - election::{VoteData, VoteToken}, - node_implementation::NodeType, - signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, - }, -}; -use bincode::Options; -use bitvec::prelude::*; -use commit::{Commitment, CommitmentBounds, Committable}; -use either::Either; -use ethereum_types::U256; -use hotshot_utils::bincode::bincode_opts; -use serde::{Deserialize, Serialize}; -use std::{ - collections::{BTreeMap, HashMap}, - fmt::Debug, - hash::Hash, - num::NonZeroU64, -}; -use tracing::error; - -/// The vote sent by consensus messages. -pub trait VoteType: - Debug + Clone + 'static + Serialize + for<'a> Deserialize<'a> + Send + Sync + PartialEq -{ - /// Get the view this vote was cast for - fn get_view(&self) -> TYPES::Time; - /// Get the signature key associated with this vote - fn get_key(&self) -> TYPES::SignatureKey; - /// Get the signature associated with this vote - fn get_signature(&self) -> EncodedSignature; - /// Get the data this vote was signed over - fn get_data(&self) -> VoteData; - /// Get the vote token of this vote - fn get_vote_token(&self) -> TYPES::VoteTokenType; -} - -/// The internals of a view sync vote -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -pub struct ViewSyncVoteInternal { - /// The public key associated with the relay. - pub relay_pub_key: EncodedPublicKey, - /// The relay this vote is intended for - pub relay: u64, - /// The view number we are trying to sync on - pub round: TYPES::Time, - /// This node's signature over the VoteData - pub signature: (EncodedPublicKey, EncodedSignature), - /// The vote token generated by this replica - pub vote_token: TYPES::VoteTokenType, - /// The vote data this vote is signed over - pub vote_data: VoteData>>, -} - -/// The data View Sync votes are signed over -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -#[serde(bound(deserialize = ""))] -pub struct ViewSyncData { - /// The relay this vote is intended for - pub relay: EncodedPublicKey, - /// The view number we are trying to sync on - pub round: TYPES::Time, -} - -impl Committable for ViewSyncData { - fn commit(&self) -> Commitment { - let builder = commit::RawCommitmentBuilder::new("Quorum Certificate Commitment"); - - builder - .var_size_field("Relay public key", &self.relay.0) - .u64(*self.round) - .finalize() - } -} - -/// Votes to synchronize the network on a single view -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -pub enum ViewSyncVote { - /// PreCommit vote - PreCommit(ViewSyncVoteInternal), - /// Commit vote - Commit(ViewSyncVoteInternal), - /// Finalize vote - Finalize(ViewSyncVoteInternal), -} - -impl ViewSyncVote { - /// Get the encoded signature. - pub fn signature(&self) -> EncodedSignature { - match &self { - ViewSyncVote::PreCommit(vote_internal) - | ViewSyncVote::Commit(vote_internal) - | ViewSyncVote::Finalize(vote_internal) => vote_internal.signature.1.clone(), - } - } - /// Get the signature key. - /// # Panics - /// If the deserialization fails. - pub fn signature_key(&self) -> TYPES::SignatureKey { - let encoded = match &self { - ViewSyncVote::PreCommit(vote_internal) - | ViewSyncVote::Commit(vote_internal) - | ViewSyncVote::Finalize(vote_internal) => vote_internal.signature.0.clone(), - }; - ::from_bytes(&encoded).unwrap() - } - /// Get the relay. - pub fn relay(&self) -> u64 { - match &self { - ViewSyncVote::PreCommit(vote_internal) - | ViewSyncVote::Commit(vote_internal) - | ViewSyncVote::Finalize(vote_internal) => vote_internal.relay, - } - } - /// Get the round number. - pub fn round(&self) -> TYPES::Time { - match &self { - ViewSyncVote::PreCommit(vote_internal) - | ViewSyncVote::Commit(vote_internal) - | ViewSyncVote::Finalize(vote_internal) => vote_internal.round, - } - } -} - -impl VoteType>> for ViewSyncVote { - fn get_view(&self) -> TYPES::Time { - match self { - ViewSyncVote::PreCommit(v) | ViewSyncVote::Commit(v) | ViewSyncVote::Finalize(v) => { - v.round - } - } - } - fn get_key(&self) -> ::SignatureKey { - self.signature_key() - } - - fn get_signature(&self) -> EncodedSignature { - self.signature() - } - fn get_data(&self) -> VoteData>> { - match self { - ViewSyncVote::PreCommit(vote_internal) - | ViewSyncVote::Commit(vote_internal) - | ViewSyncVote::Finalize(vote_internal) => vote_internal.vote_data.clone(), - } - } - - fn get_vote_token(&self) -> ::VoteTokenType { - match self { - ViewSyncVote::PreCommit(vote_internal) - | ViewSyncVote::Commit(vote_internal) - | ViewSyncVote::Finalize(vote_internal) => vote_internal.vote_token.clone(), - } - } -} - -/// Accumulates view sync votes -pub struct ViewSyncVoteAccumulator { - /// Map of all pre_commit signatures accumlated so far - pub pre_commit_vote_outcomes: VoteMap>, TYPES::VoteTokenType>, - /// Map of all ommit signatures accumlated so far - pub commit_vote_outcomes: VoteMap>, TYPES::VoteTokenType>, - /// Map of all finalize signatures accumlated so far - pub finalize_vote_outcomes: VoteMap>, TYPES::VoteTokenType>, - - /// A quorum's worth of stake, generally 2f + 1 - pub success_threshold: NonZeroU64, - /// A quorum's failure threshold, generally f + 1 - pub failure_threshold: NonZeroU64, - /// A list of valid signatures for certificate aggregation - pub sig_lists: Vec<::PureAssembledSignatureType>, - /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check - pub signers: BitVec, -} - -impl ViewSyncVoteAccumulator { - /// append a vote - /// # Panics - /// if serialize fails - #[allow(clippy::too_many_lines)] - pub fn append( - mut self, - vote: &ViewSyncVote, - vote_node_id: usize, - stake_table_entries: Vec<::StakeTableEntry>, - ) -> Either> { - let (VoteData::ViewSyncPreCommit(vote_commitment) - | VoteData::ViewSyncCommit(vote_commitment) - | VoteData::ViewSyncFinalize(vote_commitment)) = vote.get_data() - else { - return Either::Left(self); - }; - - // error!("Vote is {:?}", vote.clone()); - - let encoded_key = vote.get_key().to_bytes(); - - // Deserialize the signature so that it can be assembeld into a QC - // TODO ED Update this once we've gotten rid of EncodedSignature - let original_signature: ::PureAssembledSignatureType = - bincode_opts() - .deserialize(&vote.get_signature().0) - .expect("Deserialization on the signature shouldn't be able to fail."); - - let (pre_commit_stake_casted, pre_commit_vote_map) = self - .pre_commit_vote_outcomes - .entry(vote_commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - // Check for duplicate vote - if pre_commit_vote_map.contains_key(&encoded_key) { - return Either::Left(self); - } - - let (commit_stake_casted, commit_vote_map) = self - .commit_vote_outcomes - .entry(vote_commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - if commit_vote_map.contains_key(&encoded_key) { - return Either::Left(self); - } - - let (finalize_stake_casted, finalize_vote_map) = self - .finalize_vote_outcomes - .entry(vote_commitment) - .or_insert_with(|| (0, BTreeMap::new())); - - if finalize_vote_map.contains_key(&encoded_key) { - return Either::Left(self); - } - - // update the active_keys and sig_lists - // TODO ED Possible bug where a node sends precommit vote and then commit vote after - // precommit cert is formed, their commit vote won't be counted because of this check - // Probably need separate signers vecs. - if self.signers.get(vote_node_id).as_deref() == Some(&true) { - error!("node id already in signers"); - return Either::Left(self); - } - self.signers.set(vote_node_id, true); - self.sig_lists.push(original_signature); - - match vote.get_data() { - VoteData::ViewSyncPreCommit(_) => { - *pre_commit_stake_casted += u64::from(vote.get_vote_token().vote_count()); - pre_commit_vote_map.insert( - encoded_key, - (vote.get_signature(), vote.get_data(), vote.get_vote_token()), - ); - } - VoteData::ViewSyncCommit(_) => { - *commit_stake_casted += u64::from(vote.get_vote_token().vote_count()); - commit_vote_map.insert( - encoded_key, - (vote.get_signature(), vote.get_data(), vote.get_vote_token()), - ); - } - VoteData::ViewSyncFinalize(_) => { - *finalize_stake_casted += u64::from(vote.get_vote_token().vote_count()); - finalize_vote_map.insert( - encoded_key, - (vote.get_signature(), vote.get_data(), vote.get_vote_token()), - ); - } - _ => unimplemented!(), - } - - if *pre_commit_stake_casted >= u64::from(self.failure_threshold) { - let real_qc_pp = ::get_public_parameter( - stake_table_entries, - U256::from(self.failure_threshold.get()), - ); - - let real_qc_sig = ::assemble( - &real_qc_pp, - self.signers.as_bitslice(), - &self.sig_lists[..], - ); - - self.pre_commit_vote_outcomes - .remove(&vote_commitment) - .unwrap(); - return Either::Right(AssembledSignature::ViewSyncPreCommit(real_qc_sig)); - } - - if *commit_stake_casted >= u64::from(self.success_threshold) { - let real_qc_pp = ::get_public_parameter( - stake_table_entries, - U256::from(self.success_threshold.get()), - ); - - let real_qc_sig = ::assemble( - &real_qc_pp, - self.signers.as_bitslice(), - &self.sig_lists[..], - ); - self.commit_vote_outcomes.remove(&vote_commitment).unwrap(); - return Either::Right(AssembledSignature::ViewSyncCommit(real_qc_sig)); - } - - if *finalize_stake_casted >= u64::from(self.success_threshold) { - let real_qc_pp = ::get_public_parameter( - stake_table_entries, - U256::from(self.success_threshold.get()), - ); - - let real_qc_sig = ::assemble( - &real_qc_pp, - self.signers.as_bitslice(), - &self.sig_lists[..], - ); - self.finalize_vote_outcomes - .remove(&vote_commitment) - .unwrap(); - return Either::Right(AssembledSignature::ViewSyncFinalize(real_qc_sig)); - } - - Either::Left(self) - } -} - -/// Mapping of commitments to vote tokens by key. -type VoteMap = HashMap< - COMMITMENT, - ( - u64, - BTreeMap, TOKEN)>, - ), ->; From d312b81367c51e3c51597806a3a098d5e7119070 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 10:04:29 -0500 Subject: [PATCH 0399/1393] remove 2 from names --- hotshot/src/lib.rs | 6 ++--- hotshot/src/traits/storage/atomic_storage.rs | 10 +++---- .../atomic_storage/dual_key_value_store.rs | 2 +- hotshot/src/traits/storage/memory_storage.rs | 4 +-- hotshot/src/types/handle.rs | 4 +-- task-impls/src/consensus.rs | 24 ++++++++--------- task-impls/src/da.rs | 12 ++++----- task-impls/src/events.rs | 26 +++++++++---------- task-impls/src/vid.rs | 10 +++---- testing/src/overall_safety_task.rs | 6 ++--- testing/src/task_helpers.rs | 4 +-- testing/tests/consensus_task.rs | 4 +-- testing/tests/da_task.rs | 4 +-- testing/tests/vid_task.rs | 4 +-- types/src/consensus.rs | 4 +-- types/src/data.rs | 12 ++++----- types/src/event.rs | 4 +-- types/src/message.rs | 23 ++++++++-------- types/src/simple_certificate.rs | 13 +++++----- types/src/simple_vote.rs | 6 ++--- types/src/traits/consensus_api.rs | 4 +-- types/src/traits/storage.rs | 6 ++--- 22 files changed, 93 insertions(+), 99 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 6c885ab5bd..f4fe0d1b95 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -51,7 +51,7 @@ use hotshot_task::{ }; use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ - simple_certificate::QuorumCertificate2, + simple_certificate::QuorumCertificate, traits::{election::ViewSyncExchangeType, node_implementation::TimeoutEx}, }; @@ -235,7 +235,7 @@ impl> SystemContext { self.inner .internal_event_stream .publish(HotShotEvent::QCFormed(either::Left( - QuorumCertificate2::genesis(), + QuorumCertificate::genesis(), ))) .await; } @@ -941,7 +941,7 @@ impl HotShotInitializer { /// If we are unable to apply the genesis block to the default state pub fn from_genesis(genesis_payload: TYPES::BlockPayload) -> Result> { let time = TYPES::Time::genesis(); - let justify_qc = QuorumCertificate2::::genesis(); + let justify_qc = QuorumCertificate::::genesis(); Ok(Self { inner: Leaf::new(time, justify_qc, genesis_payload), diff --git a/hotshot/src/traits/storage/atomic_storage.rs b/hotshot/src/traits/storage/atomic_storage.rs index 2f141b8f61..e3903fbf32 100644 --- a/hotshot/src/traits/storage/atomic_storage.rs +++ b/hotshot/src/traits/storage/atomic_storage.rs @@ -34,7 +34,7 @@ where blocks: HashMapStore, STATE::BlockPayload>, /// The [`QuorumCertificate`]s stored by this [`AtomicStorage`] - qcs: DualKeyValueStore>, + qcs: DualKeyValueStore>, /// The [`Leaf`s stored by this [`AtomicStorage`] /// @@ -149,12 +149,12 @@ impl Storage for AtomicStorage { async fn get_qc( &self, hash: &Commitment, - ) -> StorageResult>> { + ) -> StorageResult>> { Ok(self.inner.qcs.load_by_key_1_ref(hash).await) } #[instrument(name = "AtomicStorage::get_newest_qc", skip_all)] - async fn get_newest_qc(&self) -> StorageResult>> { + async fn get_newest_qc(&self) -> StorageResult>> { Ok(self.inner.qcs.load_latest(|qc| qc.get_view_number()).await) } @@ -162,7 +162,7 @@ impl Storage for AtomicStorage { async fn get_qc_for_view( &self, view: TYPES::Time, - ) -> StorageResult>> { + ) -> StorageResult>> { Ok(self.inner.qcs.load_by_key_2(view).await) } @@ -242,7 +242,7 @@ impl<'a, STATE: StateContents + 'static> StorageUpdater<'a, STATE> } #[instrument(name = "AtomicStorage::insert_qc", skip_all)] - async fn insert_qc(&mut self, qc: QuorumCertificate2) -> StorageResult { + async fn insert_qc(&mut self, qc: QuorumCertificate) -> StorageResult { self.inner.qcs.insert(qc).await } diff --git a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs index 9fa25095d2..e947b70127 100644 --- a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs +++ b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs @@ -180,7 +180,7 @@ pub trait DualKeyValue: Serialize + DeserializeOwned + Clone { fn key_2(&self) -> Self::Key2; } -impl DualKeyValue for QuorumCertificate2 { +impl DualKeyValue for QuorumCertificate { type Key1 = Commitment; type Key2 = ViewNumber; diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 04dda39a75..db59363198 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -115,7 +115,7 @@ mod test { use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, data::{fake_commitment, genesis_proposer_id, Leaf, ViewNumber}, - simple_certificate::QuorumCertificate2, + simple_certificate::QuorumCertificate, traits::{node_implementation::NodeType, state::dummy::DummyState, state::ConsensusTime}, }; use std::{fmt::Debug, hash::Hash, marker::PhantomData}; @@ -159,7 +159,7 @@ mod test { }; let commit = data.commit(); StoredView::from_qc_block_and_state( - QuorumCertificate2 { + QuorumCertificate { is_genesis: view_number == ::Time::genesis(), data, vote_commitment: commit, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 3483a07b69..138dc659eb 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -1,6 +1,6 @@ //! Provides an event-streaming handle for a [`HotShot`] running in the background -use crate::QuorumCertificate2; +use crate::QuorumCertificate; use crate::{traits::NodeImplementation, types::Event, SystemContext}; use async_compatibility_layer::channel::UnboundedStream; use async_lock::RwLock; @@ -143,7 +143,7 @@ impl + 'static> SystemContextHandl if let Ok(anchor_leaf) = self.storage().get_anchored_view().await { if anchor_leaf.view_number == TYPES::Time::genesis() { let leaf = Leaf::from_stored_view(anchor_leaf); - let mut qc = QuorumCertificate2::::genesis(); + let mut qc = QuorumCertificate::::genesis(); qc.data = QuorumData { leaf_commit: leaf.commit(), }; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 39adc48084..d9d9bf6cb2 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -21,10 +21,8 @@ use hotshot_types::{ data::{Leaf, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal}, - simple_certificate::{ - DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, - }, - simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote2}, + simple_certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, + simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, traits::{ block_contents::BlockHeader, consensus_api::ConsensusApi, @@ -109,10 +107,10 @@ pub struct ConsensusTaskState< pub output_event_stream: ChannelStream>, /// All the DA certs we've received for current and future views. - pub da_certs: HashMap>, + pub da_certs: HashMap>, /// All the VID certs we've received for current and future views. - pub vid_certs: HashMap>, + pub vid_certs: HashMap>, /// The most recent proposal we have, will correspond to the current view if Some() /// Will be none if the view advanced through timeout/view_sync @@ -137,15 +135,15 @@ where #[allow(clippy::type_complexity)] /// Accumulator for votes pub accumulator: Either< - VoteAccumulator2, QuorumCertificate2>, - QuorumCertificate2, + VoteAccumulator2, QuorumCertificate>, + QuorumCertificate, >, /// Accumulator for votes #[allow(clippy::type_complexity)] pub timeout_accumulator: Either< - VoteAccumulator2, TimeoutCertificate2>, - TimeoutCertificate2, + VoteAccumulator2, TimeoutCertificate>, + TimeoutCertificate, >, /// View which this vote collection task is collecting votes in pub cur_view: TYPES::Time, @@ -1156,7 +1154,7 @@ where return; } - let vote = TimeoutVote2::create_signed_vote( + let vote = TimeoutVote::create_signed_vote( TimeoutData { view }, view, self.timeout_exchange.public_key(), @@ -1184,9 +1182,9 @@ where #[allow(clippy::too_many_lines)] pub async fn publish_proposal_if_able( &mut self, - _qc: QuorumCertificate2, + _qc: QuorumCertificate, view: TYPES::Time, - timeout_certificate: Option>, + timeout_certificate: Option>, ) -> bool { if !self.quorum_exchange.is_leader(view) { error!( diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 412e5d56b4..c49b03ca14 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -12,12 +12,12 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::simple_certificate::DACertificate2; +use hotshot_types::simple_certificate::DACertificate; use hotshot_types::{ consensus::{Consensus, View}, data::DAProposal, message::{Message, Proposal}, - simple_vote::{DAData, DAVote2}, + simple_vote::{DAData, DAVote}, traits::{ consensus_api::ConsensusApi, election::{CommitteeExchangeType, ConsensusExchange, Membership}, @@ -81,10 +81,8 @@ where pub committee_exchange: Arc>, #[allow(clippy::type_complexity)] /// Accumulates DA votes - pub accumulator: Either< - VoteAccumulator2, DACertificate2>, - DACertificate2, - >, + pub accumulator: + Either, DACertificate>, DACertificate>, /// the current view pub cur_view: TYPES::Time, /// event stream for channel events @@ -229,7 +227,7 @@ where return None; } // Generate and send vote - let vote = DAVote2::create_signed_vote( + let vote = DAVote::create_signed_vote( DAData { payload_commit: payload_commitment, }, diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 9c68f75a93..1f1216adf8 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -6,11 +6,11 @@ use hotshot_types::{ data::{DAProposal, Leaf, QuorumProposal, VidDisperse}, message::Proposal, simple_certificate::{ - DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, + DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DAVote2, QuorumVote, TimeoutVote2, VIDVote2, ViewSyncCommitVote, ViewSyncFinalizeVote, + DAVote, QuorumVote, TimeoutVote, VIDVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::node_implementation::NodeType, @@ -26,15 +26,15 @@ pub enum HotShotEvent { /// A quorum vote has been received from the network; handled by the consensus task QuorumVoteRecv(QuorumVote), /// A timeout vote recevied from the network; handled by consensus task - TimeoutVoteRecv(TimeoutVote2), + TimeoutVoteRecv(TimeoutVote), /// Send a timeout vote to the network; emitted by consensus task replicas - TimeoutVoteSend(TimeoutVote2), + TimeoutVoteSend(TimeoutVote), /// A DA proposal has been received from the network; handled by the DA task DAProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task - DAVoteRecv(DAVote2), + DAVoteRecv(DAVote), /// A Data Availability Certificate (DAC) has been recieved by the network; handled by the consensus task - DACRecv(DACertificate2), + DACRecv(DACertificate), /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal @@ -42,11 +42,11 @@ pub enum HotShotEvent { /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DAProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal - DAVoteSend(DAVote2), + DAVoteSend(DAVote), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only - QCFormed(Either, TimeoutCertificate2>), + QCFormed(Either, TimeoutCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task - DACSend(DACertificate2, TYPES::SignatureKey), + DACSend(DACertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks ViewChange(TYPES::Time), /// Timeout for the view sync protocol; emitted by a replica in the view sync task @@ -105,17 +105,17 @@ pub enum HotShotEvent { /// Send a VID vote to the VID leader; emitted by VID storage nodes in the DA task after seeing a valid VID dispersal /// /// Like [`DAVoteSend`] - VidVoteSend(VIDVote2), + VidVoteSend(VIDVote), /// A VID vote has been received by the network; handled by the DA task /// /// Like [`DAVoteRecv`] - VidVoteRecv(VIDVote2), + VidVoteRecv(VIDVote), /// The VID leader has collected enough votes to form a VID cert; emitted by the VID leader in the DA task; sent to the entire network via the networking task /// /// Like [`DACSend`] - VidCertSend(VIDCertificate2, TYPES::SignatureKey), + VidCertSend(VIDCertificate, TYPES::SignatureKey), /// A VID cert has been recieved by the network; handled by the consensus task /// /// Like [`DACRecv`] - VidCertRecv(VIDCertificate2), + VidCertRecv(VIDCertificate), } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index e39f2803db..8a4133027b 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -25,8 +25,8 @@ use hotshot_types::{ utils::ViewInner, }; use hotshot_types::{ - simple_certificate::VIDCertificate2, - simple_vote::{VIDData, VIDVote2}, + simple_certificate::VIDCertificate, + simple_vote::{VIDData, VIDVote}, traits::network::CommunicationChannel, vote2::{HasViewNumber, VoteAccumulator2}, }; @@ -82,8 +82,8 @@ where #[allow(clippy::type_complexity)] /// Accumulates VID votes pub accumulator: Either< - VoteAccumulator2, VIDCertificate2>, - VIDCertificate2, + VoteAccumulator2, VIDCertificate>, + VIDCertificate, >, /// the current view pub cur_view: TYPES::Time, @@ -304,7 +304,7 @@ where } // Generate and send vote - let vote = VIDVote2::create_signed_vote( + let vote = VIDVote::create_signed_vote( VIDData { payload_commit: payload_commitment, }, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 0c7d86c1fd..1449edf8a5 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -20,7 +20,7 @@ use hotshot_types::{ data::Leaf, error::RoundTimedoutState, event::{Event, EventType}, - simple_certificate::QuorumCertificate2, + simple_certificate::QuorumCertificate, traits::node_implementation::NodeType, }; use snafu::Snafu; @@ -92,7 +92,7 @@ pub struct RoundResult { /// id -> (leaf, qc) // TODO GG: isn't it infeasible to store a Vec>? #[allow(clippy::type_complexity)] - success_nodes: HashMap>, QuorumCertificate2)>, + success_nodes: HashMap>, QuorumCertificate)>, /// Nodes that failed to commit this round pub failed_nodes: HashMap>>>, @@ -186,7 +186,7 @@ impl RoundResult { pub fn insert_into_result( &mut self, idx: usize, - result: (Vec>, QuorumCertificate2), + result: (Vec>, QuorumCertificate), maybe_block_size: Option, ) -> Option> { self.success_nodes.insert(idx as u64, result.clone()); diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 6a0ea334b8..ec72a3dd00 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -17,7 +17,7 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, QuorumProposal, VidScheme, ViewNumber}, message::{Message, Proposal}, - simple_certificate::QuorumCertificate2, + simple_certificate::QuorumCertificate, traits::{ block_contents::BlockHeader, consensus_api::ConsensusSharedApi, @@ -129,7 +129,7 @@ async fn build_quorum_proposal_and_signature( let proposal = QuorumProposal:: { block_header, view_number: ViewNumber::new(view), - justify_qc: QuorumCertificate2::genesis(), + justify_qc: QuorumCertificate::genesis(), timeout_certificate: None, proposer_id: leaf.proposer_id, }; diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 201b1008d2..050455903b 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -88,7 +88,7 @@ async fn build_vote( async fn test_consensus_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::simple_certificate::QuorumCertificate2; + use hotshot_types::simple_certificate::QuorumCertificate; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -101,7 +101,7 @@ async fn test_consensus_task() { let mut output = HashMap::new(); // Trigger a proposal to send by creating a new QC. Then recieve that proposal and update view based on the valid QC in the proposal - let qc = QuorumCertificate2::::genesis(); + let qc = QuorumCertificate::::genesis(); let proposal = build_quorum_proposal(&handle, &private_key, 1).await; input.push(HotShotEvent::QCFormed(either::Left(qc.clone()))); diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 1c4fa63f91..7d3ac6734d 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -8,7 +8,7 @@ use hotshot_testing::{ use hotshot_types::{ block_impl::VIDTransaction, data::{DAProposal, VidSchemeTrait, ViewNumber}, - simple_vote::{DAData, DAVote2}, + simple_vote::{DAData, DAVote}, traits::{ consensus_api::ConsensusSharedApi, election::ConsensusExchange, node_implementation::ExchangesType, state::ConsensusTime, @@ -81,7 +81,7 @@ async fn test_da_task() { ); output.insert(HotShotEvent::SendPayloadCommitment(block.commit()), 1); output.insert(HotShotEvent::DAProposalSend(message.clone(), pub_key), 1); - let da_vote = DAVote2::create_signed_vote( + let da_vote = DAVote::create_signed_vote( DAData { payload_commit: block.commit(), }, diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 67a3bc7b2e..f8c81c9217 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -13,7 +13,7 @@ use hotshot_types::{ node_implementation::ExchangesType, state::ConsensusTime, }, }; -use hotshot_types::{simple_vote::VIDVote2, traits::election::VIDExchangeType}; +use hotshot_types::{simple_vote::VIDVote, traits::election::VIDExchangeType}; use std::collections::HashMap; use std::marker::PhantomData; @@ -86,7 +86,7 @@ async fn test_vid_task() { 1, ); - let vid_vote = VIDVote2::create_signed_vote( + let vid_vote = VIDVote::create_signed_vote( hotshot_types::simple_vote::VIDData { payload_commit: block.commit(), }, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 6af690228d..6e47632ff8 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -9,7 +9,7 @@ use displaydoc::Display; use crate::{ data::Leaf, error::HotShotError, - simple_certificate::QuorumCertificate2, + simple_certificate::QuorumCertificate, traits::{ metrics::{Counter, Gauge, Histogram, Label, Metrics}, node_implementation::NodeType, @@ -57,7 +57,7 @@ pub struct Consensus { pub locked_view: TYPES::Time, /// the highqc per spec - pub high_qc: QuorumCertificate2, + pub high_qc: QuorumCertificate, /// A reference to the metrics trait pub metrics: Arc, diff --git a/types/src/data.rs b/types/src/data.rs index 4415a8a8c5..f4efaeaa2c 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -4,7 +4,7 @@ //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. use crate::{ - simple_certificate::{QuorumCertificate2, TimeoutCertificate2}, + simple_certificate::{QuorumCertificate, TimeoutCertificate}, traits::{ block_contents::BlockHeader, node_implementation::NodeType, @@ -168,10 +168,10 @@ pub struct QuorumProposal { pub view_number: TYPES::Time, /// Per spec, justification - pub justify_qc: QuorumCertificate2, + pub justify_qc: QuorumCertificate, /// Possible timeout certificate. Only present if the justify_qc is not for the preceding view - pub timeout_certificate: Option>, + pub timeout_certificate: Option>, /// the propser id pub proposer_id: EncodedPublicKey, @@ -264,7 +264,7 @@ pub struct Leaf { pub view_number: TYPES::Time, /// Per spec, justification - pub justify_qc: QuorumCertificate2, + pub justify_qc: QuorumCertificate, /// The hash of the parent `Leaf` /// So we can ask if it extends @@ -325,7 +325,7 @@ impl Leaf { /// Create a new leaf from its components. pub fn new( view_number: TYPES::Time, - justify_qc: QuorumCertificate2, + justify_qc: QuorumCertificate, payload: TYPES::BlockPayload, ) -> Self { Self { @@ -351,7 +351,7 @@ impl Leaf { self.block_header.block_number() } /// The QC linking this leaf to its parent in the chain. - pub fn get_justify_qc(&self) -> QuorumCertificate2 { + pub fn get_justify_qc(&self) -> QuorumCertificate { self.justify_qc.clone() } /// Commitment to this leaf's parent. diff --git a/types/src/event.rs b/types/src/event.rs index 6a8e535c27..666a45086d 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -1,7 +1,7 @@ //! Events that a `HotShot` instance can emit use crate::{ - data::Leaf, error::HotShotError, simple_certificate::QuorumCertificate2, + data::Leaf, error::HotShotError, simple_certificate::QuorumCertificate, traits::node_implementation::NodeType, }; @@ -43,7 +43,7 @@ pub enum EventType { /// /// Note that the QC for each additional leaf in the chain can be obtained from the leaf /// before it using - qc: Arc>, + qc: Arc>, /// Optional information of the number of transactions in the block, for logging purposes. block_size: Option, }, diff --git a/types/src/message.rs b/types/src/message.rs index c9fa478272..15193e9d6b 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -5,12 +5,11 @@ use crate::data::QuorumProposal; use crate::simple_certificate::{ - DACertificate2, VIDCertificate2, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, + DACertificate, VIDCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }; use crate::simple_vote::{ - DAVote2, TimeoutVote2, VIDVote2, ViewSyncCommitVote, ViewSyncFinalizeVote, - ViewSyncPreCommitVote, + DAVote, TimeoutVote, VIDVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }; use crate::vote2::HasViewNumber; use crate::{ @@ -198,15 +197,15 @@ pub enum ProcessedCommitteeConsensusMessage { /// Proposal for the DA committee. DAProposal(Proposal>, TYPES::SignatureKey), /// Vote from the DA committee. - DAVote(DAVote2, TYPES::SignatureKey), + DAVote(DAVote, TYPES::SignatureKey), /// Certificate for the DA. - DACertificate(DACertificate2, TYPES::SignatureKey), + DACertificate(DACertificate, TYPES::SignatureKey), /// VID dispersal data. Like [`DAProposal`] VidDisperseMsg(Proposal>, TYPES::SignatureKey), /// Vote from VID storage node. Like [`DAVote`] - VidVote(VIDVote2, TYPES::SignatureKey), + VidVote(VIDVote, TYPES::SignatureKey), /// Certificate for VID. Like [`DACertificate`] - VidCertificate(VIDCertificate2, TYPES::SignatureKey), + VidCertificate(VIDCertificate, TYPES::SignatureKey), } impl From> @@ -312,7 +311,7 @@ pub enum GeneralConsensusMessage { ViewSyncFinalizeCertificate(ViewSyncFinalizeCertificate2), /// Message with a Timeout vote - TimeoutVote(TimeoutVote2), + TimeoutVote(TimeoutVote), /// Internal ONLY message indicating a view interrupt. #[serde(skip)] @@ -327,10 +326,10 @@ pub enum CommitteeConsensusMessage { DAProposal(Proposal>), /// vote for data availability committee - DAVote(DAVote2), + DAVote(DAVote), /// Certificate data is available - DACertificate(DACertificate2), + DACertificate(DACertificate), /// Initiate VID dispersal. /// @@ -341,11 +340,11 @@ pub enum CommitteeConsensusMessage { /// Vote for VID disperse data /// /// Like [`DAVote`]. - VidVote(VIDVote2), + VidVote(VIDVote), /// VID certificate data is available /// /// Like [`DACertificate`] - VidCertificate(VIDCertificate2), + VidCertificate(VIDCertificate), } /// Messages for sequencing consensus. diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 8827cbbbd5..9f5321b174 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -93,7 +93,7 @@ impl HasViewNumber self.view_number } } -impl Display for QuorumCertificate2 { +impl Display for QuorumCertificate { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, @@ -103,7 +103,7 @@ impl Display for QuorumCertificate2 { } } -impl QuorumCertificate2 { +impl QuorumCertificate { #[must_use] /// Creat the Genisis certificate pub fn genesis() -> Self { @@ -123,14 +123,13 @@ impl QuorumCertificate2 { } /// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` -pub type QuorumCertificate2 = SimpleCertificate>; +pub type QuorumCertificate = SimpleCertificate>; /// Type alias for a DA certificate over `DAData` -pub type DACertificate2 = - SimpleCertificate::BlockPayload>>; +pub type DACertificate = SimpleCertificate::BlockPayload>>; /// Type alias for a Timeout certificate over a view number -pub type TimeoutCertificate2 = SimpleCertificate>; +pub type TimeoutCertificate = SimpleCertificate>; /// type alias for a VID certificate -pub type VIDCertificate2 = +pub type VIDCertificate = SimpleCertificate::BlockPayload>>; // TODO ED Update this to use the correct threshold instead of the default `success_threshold` diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 1565a08ea9..46c50a1814 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -208,11 +208,11 @@ impl = SimpleVote>; /// DA vote type alias -pub type DAVote2 = SimpleVote::BlockPayload>>; +pub type DAVote = SimpleVote::BlockPayload>>; /// VID vote type alias -pub type VIDVote2 = SimpleVote::BlockPayload>>; +pub type VIDVote = SimpleVote::BlockPayload>>; /// Timeout Vote type alias -pub type TimeoutVote2 = SimpleVote>; +pub type TimeoutVote = SimpleVote>; /// View Sync Commit Vote type alias pub type ViewSyncCommitVote = SimpleVote>; /// View Sync Pre Commit Vote type alias diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index 508c5cec6e..a05f01f297 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -5,7 +5,7 @@ use crate::{ error::HotShotError, event::{Event, EventType}, message::{DataMessage, SequencingMessage}, - simple_certificate::QuorumCertificate2, + simple_certificate::QuorumCertificate, traits::{ network::NetworkError, node_implementation::{NodeImplementation, NodeType}, @@ -92,7 +92,7 @@ pub trait ConsensusSharedApi>: Sen &self, view_number: TYPES::Time, leaf_views: Vec>, - decide_qc: QuorumCertificate2, + decide_qc: QuorumCertificate, ) { self.send_event(Event { view_number, diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 8adfbaf6ef..207c4cc43b 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -2,7 +2,7 @@ use super::{node_implementation::NodeType, signature_key::EncodedPublicKey}; use crate::{ - data::Leaf, simple_certificate::QuorumCertificate2, traits::BlockPayload, vote2::HasViewNumber, + data::Leaf, simple_certificate::QuorumCertificate, traits::BlockPayload, vote2::HasViewNumber, }; use async_trait::async_trait; use commit::Commitment; @@ -123,7 +123,7 @@ pub struct StoredView { /// The parent of this view pub parent: Commitment>, /// The justify QC of this view. See the hotstuff paper for more information on this. - pub justify_qc: QuorumCertificate2, + pub justify_qc: QuorumCertificate, /// Block header. pub block_header: TYPES::BlockHeader, /// Optional block payload. @@ -149,7 +149,7 @@ where /// Note that this will set the `parent` to `LeafHash::default()`, so this will not have a /// parent. pub fn from_qc_block_and_state( - qc: QuorumCertificate2, + qc: QuorumCertificate, block_header: TYPES::BlockHeader, block_payload: Option, parent_commitment: Commitment>, From 704624bb97561a14f98ac035bacad7fa555a7ded Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 10:06:28 -0500 Subject: [PATCH 0400/1393] rename vote2 -> vote --- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 4 ++-- task-impls/src/network.rs | 2 +- task-impls/src/vid.rs | 2 +- task-impls/src/view_sync.rs | 10 +++++----- testing/src/task_helpers.rs | 2 +- testing/tests/consensus_task.rs | 2 +- types/src/data.rs | 2 +- types/src/lib.rs | 2 +- types/src/message.rs | 2 +- types/src/simple_certificate.rs | 4 ++-- types/src/simple_vote.rs | 4 ++-- types/src/traits/storage.rs | 2 +- types/src/{vote2.rs => vote.rs} | 15 ++++++--------- 14 files changed, 26 insertions(+), 29 deletions(-) rename types/src/{vote2.rs => vote.rs} (94%) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index d9d9bf6cb2..c6b002e8a3 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -34,7 +34,7 @@ use hotshot_types::{ BlockPayload, }, utils::{Terminator, ViewInner}, - vote2::{Certificate2, HasViewNumber, VoteAccumulator2}, + vote::{Certificate, HasViewNumber, VoteAccumulator2}, }; use tracing::warn; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index c49b03ca14..ce3a7fcda3 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -28,8 +28,8 @@ use hotshot_types::{ BlockPayload, }, utils::ViewInner, - vote2::HasViewNumber, - vote2::VoteAccumulator2, + vote::HasViewNumber, + vote::VoteAccumulator2, }; use snafu::Snafu; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a684843c77..f68d94b1e4 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -16,7 +16,7 @@ use hotshot_types::{ network::{CommunicationChannel, TransmitType}, node_implementation::NodeType, }, - vote2::{HasViewNumber, Vote2}, + vote::{HasViewNumber, Vote}, }; use snafu::Snafu; use std::sync::Arc; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 8a4133027b..5fe7ff42ff 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -28,7 +28,7 @@ use hotshot_types::{ simple_certificate::VIDCertificate, simple_vote::{VIDData, VIDVote}, traits::network::CommunicationChannel, - vote2::{HasViewNumber, VoteAccumulator2}, + vote::{HasViewNumber, VoteAccumulator2}, }; use snafu::Snafu; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index b8e147ed3a..638910e7c0 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -19,7 +19,7 @@ use hotshot_types::{ ViewSyncPreCommitVote, }, traits::network::ConsensusIntentEvent, - vote2::{Certificate2, HasViewNumber, Vote2, VoteAccumulator2}, + vote::{Certificate, HasViewNumber, Vote, VoteAccumulator2}, }; use bitvec::prelude::*; @@ -171,8 +171,8 @@ pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< pub struct ViewSyncRelayTaskState< TYPES: NodeType, I: NodeImplementation, - VOTE: Vote2, - CERTIFICATE: Certificate2, + VOTE: Vote, + CERTIFICATE: Certificate, > { /// Event stream to publish events to pub event_stream: ChannelStream>, @@ -189,8 +189,8 @@ pub struct ViewSyncRelayTaskState< impl< TYPES: NodeType, I: NodeImplementation, - VOTE: Vote2 + std::marker::Send + std::marker::Sync + 'static, - CERTIFICATE: Certificate2 + VOTE: Vote + std::marker::Send + std::marker::Sync + 'static, + CERTIFICATE: Certificate + std::marker::Send + std::marker::Sync + 'static, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index ec72a3dd00..810544f7e5 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -26,7 +26,7 @@ use hotshot_types::{ signature_key::EncodedSignature, state::{ConsensusTime, TestableBlock}, }, - vote2::HasViewNumber, + vote::HasViewNumber, }; pub async fn build_system_handle( diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 050455903b..974dc7fb36 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -12,7 +12,7 @@ use hotshot_testing::{ }; use hotshot_types::simple_vote::QuorumData; use hotshot_types::simple_vote::QuorumVote; -use hotshot_types::vote2::Certificate2; +use hotshot_types::vote::Certificate; use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, diff --git a/types/src/data.rs b/types/src/data.rs index f4efaeaa2c..dc665549e8 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -13,7 +13,7 @@ use crate::{ storage::StoredView, BlockPayload, State, }, - vote2::{Certificate2, HasViewNumber}, + vote::{Certificate, HasViewNumber}, }; use ark_bls12_381::Bls12_381; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; diff --git a/types/src/lib.rs b/types/src/lib.rs index 991ae75a51..b024eac08c 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -22,7 +22,7 @@ pub mod simple_certificate; pub mod simple_vote; pub mod traits; pub mod utils; -pub mod vote2; +pub mod vote; /// the type of consensus to run. Either: /// wait for a signal to start a view, /// or constantly run diff --git a/types/src/message.rs b/types/src/message.rs index 15193e9d6b..44cd8ec351 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -11,7 +11,7 @@ use crate::simple_certificate::{ use crate::simple_vote::{ DAVote, TimeoutVote, VIDVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }; -use crate::vote2::HasViewNumber; +use crate::vote::HasViewNumber; use crate::{ data::{DAProposal, VidDisperse}, simple_vote::QuorumVote, diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 9f5321b174..0b19c83a8f 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -19,7 +19,7 @@ use crate::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, }, - vote2::{Certificate2, HasViewNumber}, + vote::{Certificate, HasViewNumber}, }; use serde::{Deserialize, Serialize}; @@ -41,7 +41,7 @@ pub struct SimpleCertificate { pub _pd: PhantomData, } -impl Certificate2 +impl Certificate for SimpleCertificate { type Voteable = VOTEABLE; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 46c50a1814..af2588c1f3 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -11,7 +11,7 @@ use crate::{ node_implementation::NodeType, signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, }, - vote2::{HasViewNumber, Vote2}, + vote::{HasViewNumber, Vote}, }; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] @@ -102,7 +102,7 @@ impl HasViewNumber for SimpleV } } -impl Vote2 for SimpleVote { +impl Vote for SimpleVote { type Commitment = DATA; fn get_signing_key(&self) -> ::SignatureKey { diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 207c4cc43b..9f831a672e 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -2,7 +2,7 @@ use super::{node_implementation::NodeType, signature_key::EncodedPublicKey}; use crate::{ - data::Leaf, simple_certificate::QuorumCertificate, traits::BlockPayload, vote2::HasViewNumber, + data::Leaf, simple_certificate::QuorumCertificate, traits::BlockPayload, vote::HasViewNumber, }; use async_trait::async_trait; use commit::Commitment; diff --git a/types/src/vote2.rs b/types/src/vote.rs similarity index 94% rename from types/src/vote2.rs rename to types/src/vote.rs index 575b7ef69f..52c8989c2e 100644 --- a/types/src/vote2.rs +++ b/types/src/vote.rs @@ -23,7 +23,7 @@ use crate::{ }; /// A simple vote that has a signer and commitment to the data voted on. -pub trait Vote2: HasViewNumber { +pub trait Vote: HasViewNumber { /// Type of data commitment this vote uses. type Commitment: Voteable; @@ -50,7 +50,7 @@ The certificate formed from the collection of signatures a committee. The committee is defined by the `Membership` associated type. The votes all must be over the `Commitment` associated type. */ -pub trait Certificate2: HasViewNumber { +pub trait Certificate: HasViewNumber { /// The data commitment this certificate certifies. type Voteable: Voteable; @@ -76,8 +76,8 @@ pub trait Certificate2: HasViewNumber { /// Accumulates votes until a certificate is formed. This implementation works for all simple vote and certificate pairs pub struct VoteAccumulator2< TYPES: NodeType, - VOTE: Vote2, - CERT: Certificate2, + VOTE: Vote, + CERT: Certificate, > { /// Map of all signatures accumlated so far pub vote_outcomes: VoteMap2>, @@ -89,11 +89,8 @@ pub struct VoteAccumulator2< pub phantom: PhantomData<(TYPES, VOTE, CERT)>, } -impl< - TYPES: NodeType, - VOTE: Vote2, - CERT: Certificate2, - > VoteAccumulator2 +impl, CERT: Certificate> + VoteAccumulator2 { /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we /// have accumulated enough votes to exceed the threshold for creating a certificate. From 3dd6ad56346c220f19a72d4f3d8055ec7b70fb95 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 10:07:12 -0500 Subject: [PATCH 0401/1393] remame VoteAccumulator2 -> VoteAccumulator --- task-impls/src/consensus.rs | 14 +++++++------- task-impls/src/da.rs | 6 +++--- task-impls/src/vid.rs | 6 +++--- task-impls/src/view_sync.rs | 10 +++++----- types/src/vote.rs | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index c6b002e8a3..20c41fcf81 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -34,7 +34,7 @@ use hotshot_types::{ BlockPayload, }, utils::{Terminator, ViewInner}, - vote::{Certificate, HasViewNumber, VoteAccumulator2}, + vote::{Certificate, HasViewNumber, VoteAccumulator}, }; use tracing::warn; @@ -135,14 +135,14 @@ where #[allow(clippy::type_complexity)] /// Accumulator for votes pub accumulator: Either< - VoteAccumulator2, QuorumCertificate>, + VoteAccumulator, QuorumCertificate>, QuorumCertificate, >, /// Accumulator for votes #[allow(clippy::type_complexity)] pub timeout_accumulator: Either< - VoteAccumulator2, TimeoutCertificate>, + VoteAccumulator, TimeoutCertificate>, TimeoutCertificate, >, /// View which this vote collection task is collecting votes in @@ -909,7 +909,7 @@ where if vote.get_view_number() > collection_view { // Todo check if we are the leader - let new_accumulator = VoteAccumulator2 { + let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.quorum_exchange.total_nodes()], @@ -921,7 +921,7 @@ where // TODO Create default functions for accumulators // https://github.com/EspressoSystems/HotShot/issues/1797 - let timeout_accumulator = VoteAccumulator2 { + let timeout_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.timeout_exchange.total_nodes()], @@ -994,7 +994,7 @@ where if vote.get_view_number() > collection_view { // Todo check if we are the leader - let new_accumulator = VoteAccumulator2 { + let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.timeout_exchange.total_nodes()], @@ -1004,7 +1004,7 @@ where let timeout_accumulator = new_accumulator.accumulate(&vote, self.quorum_exchange.membership()); - let quorum_accumulator = VoteAccumulator2 { + let quorum_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.quorum_exchange.total_nodes()], diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index ce3a7fcda3..20141cf776 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -29,7 +29,7 @@ use hotshot_types::{ }, utils::ViewInner, vote::HasViewNumber, - vote::VoteAccumulator2, + vote::VoteAccumulator, }; use snafu::Snafu; @@ -82,7 +82,7 @@ where #[allow(clippy::type_complexity)] /// Accumulates DA votes pub accumulator: - Either, DACertificate>, DACertificate>, + Either, DACertificate>, DACertificate>, /// the current view pub cur_view: TYPES::Time, /// event stream for channel events @@ -284,7 +284,7 @@ where }; if view > collection_view { - let new_accumulator = VoteAccumulator2 { + let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.committee_exchange.total_nodes()], diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 5fe7ff42ff..86eac2dd0d 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -28,7 +28,7 @@ use hotshot_types::{ simple_certificate::VIDCertificate, simple_vote::{VIDData, VIDVote}, traits::network::CommunicationChannel, - vote::{HasViewNumber, VoteAccumulator2}, + vote::{HasViewNumber, VoteAccumulator}, }; use snafu::Snafu; @@ -82,7 +82,7 @@ where #[allow(clippy::type_complexity)] /// Accumulates VID votes pub accumulator: Either< - VoteAccumulator2, VIDCertificate>, + VoteAccumulator, VIDCertificate>, VIDCertificate, >, /// the current view @@ -207,7 +207,7 @@ where }; if view > collection_view { - let new_accumulator = VoteAccumulator2 { + let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.vid_exchange.total_nodes()], diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 638910e7c0..58119f6ce4 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -19,7 +19,7 @@ use hotshot_types::{ ViewSyncPreCommitVote, }, traits::network::ConsensusIntentEvent, - vote::{Certificate, HasViewNumber, Vote, VoteAccumulator2}, + vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; use bitvec::prelude::*; @@ -181,7 +181,7 @@ pub struct ViewSyncRelayTaskState< /// Vote accumulator #[allow(clippy::type_complexity)] - pub accumulator: Either, CERTIFICATE>, + pub accumulator: Either, CERTIFICATE>, /// Our node id; for logging pub id: u64, } @@ -317,7 +317,7 @@ where return; } - let new_accumulator = VoteAccumulator2 { + let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.exchange.total_nodes()], @@ -403,7 +403,7 @@ where return; } - let new_accumulator = VoteAccumulator2 { + let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.exchange.total_nodes()], @@ -489,7 +489,7 @@ where return; } - let new_accumulator = VoteAccumulator2 { + let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), signers: bitvec![0; self.exchange.total_nodes()], diff --git a/types/src/vote.rs b/types/src/vote.rs index 52c8989c2e..b37045f824 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -74,7 +74,7 @@ pub trait Certificate: HasViewNumber { } /// Accumulates votes until a certificate is formed. This implementation works for all simple vote and certificate pairs -pub struct VoteAccumulator2< +pub struct VoteAccumulator< TYPES: NodeType, VOTE: Vote, CERT: Certificate, @@ -90,7 +90,7 @@ pub struct VoteAccumulator2< } impl, CERT: Certificate> - VoteAccumulator2 + VoteAccumulator { /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we /// have accumulated enough votes to exceed the threshold for creating a certificate. From b0fc622aba51b3975225477c3addcf29031c9e7d Mon Sep 17 00:00:00 2001 From: MRain Date: Thu, 16 Nov 2023 10:07:13 -0500 Subject: [PATCH 0402/1393] address comments --- hotshot-state-prover/src/circuit.rs | 30 +++-------------------------- hotshot-state-prover/src/lib.rs | 18 +++++------------ types/src/traits/state.rs | 15 +++++++++++++++ 3 files changed, 23 insertions(+), 40 deletions(-) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 6be5ee2150..0d77d5dd2a 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -391,15 +391,7 @@ mod tests { fee_ledger_comm, stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), }; - let state_msg = [ - F::from(lightclient_state.view_number as u64), - F::from(lightclient_state.block_height as u64), - lightclient_state.block_comm, - lightclient_state.fee_ledger_comm, - lightclient_state.stake_table_comm.0, - lightclient_state.stake_table_comm.1, - lightclient_state.stake_table_comm.2, - ]; + let state_msg = lightclient_state.to_array(); let sigs = schnorr_keys .iter() @@ -478,15 +470,7 @@ mod tests { // bad path: bad stake table commitment let mut bad_lightclient_state = lightclient_state.clone(); bad_lightclient_state.stake_table_comm.1 = F::default(); - let bad_state_msg = [ - F::from(bad_lightclient_state.view_number as u64), - F::from(bad_lightclient_state.block_height as u64), - bad_lightclient_state.block_comm, - bad_lightclient_state.fee_ledger_comm, - bad_lightclient_state.stake_table_comm.0, - bad_lightclient_state.stake_table_comm.1, - bad_lightclient_state.stake_table_comm.2, - ]; + let bad_state_msg = bad_lightclient_state.to_array let sig_for_bad_state = schnorr_keys .iter() .map(|(key, _)| { @@ -510,15 +494,7 @@ mod tests { let mut wrong_light_client_state = lightclient_state.clone(); // state with a different bls key commitment wrong_light_client_state.stake_table_comm.0 = F::default(); - let wrong_state_msg = [ - F::from(wrong_light_client_state.view_number as u64), - F::from(wrong_light_client_state.block_height as u64), - wrong_light_client_state.block_comm, - wrong_light_client_state.fee_ledger_comm, - wrong_light_client_state.stake_table_comm.0, - wrong_light_client_state.stake_table_comm.1, - wrong_light_client_state.stake_table_comm.2, - ]; + let wrong_state_msg = wrong_light_client_state.to_array(); let wrong_sigs = schnorr_keys .iter() .map(|(key, _)| { diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index d70b48dfce..0ae763278e 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -19,7 +19,7 @@ use hotshot_types::traits::{ use jf_plonk::{ errors::PlonkError, proof_system::{PlonkKzgSnark, UniversalSNARK}, - transcript::StandardTranscript, + transcript::SolidityTranscript, }; use jf_primitives::signatures::schnorr::Signature; use jf_relation::PlonkCircuit; @@ -86,7 +86,7 @@ where lightclient_state, threshold, )?; - let proof = PlonkKzgSnark::::prove::<_, _, StandardTranscript>(rng, &circuit, pk, None)?; + let proof = PlonkKzgSnark::::prove::<_, _, SolidityTranscript>(rng, &circuit, pk, None)?; Ok((proof, public_inputs)) } @@ -133,7 +133,7 @@ mod tests { use jf_plonk::{ errors::PlonkError, proof_system::{PlonkKzgSnark, UniversalSNARK}, - transcript::StandardTranscript, + transcript::SolidityTranscript, }; use jf_primitives::{ crhf::{VariableLengthRescueCRHF, CRHF}, @@ -244,15 +244,7 @@ mod tests { fee_ledger_comm, stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), }; - let state_msg = [ - BaseField::from(lightclient_state.view_number as u64), - BaseField::from(lightclient_state.block_height as u64), - lightclient_state.block_comm, - lightclient_state.fee_ledger_comm, - lightclient_state.stake_table_comm.0, - lightclient_state.stake_table_comm.1, - lightclient_state.stake_table_comm.2, - ]; + let state_msg = lightclient_state.to_array(); let sigs = schnorr_keys .iter() @@ -297,7 +289,7 @@ mod tests { assert!(result.is_ok()); let (proof, public_inputs) = result.unwrap(); - assert!(PlonkKzgSnark::::verify::( + assert!(PlonkKzgSnark::::verify::( &vk, public_inputs.as_ref(), &proof, diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index d0b2c437f8..d951671ce8 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -234,3 +234,18 @@ pub struct LightClientState { /// Commitment for the stake table pub stake_table_comm: (F, F, F), } + +impl LightClientState { + /// Return an array of field elements + pub fn to_array(&self) -> [F; 7] { + [ + F::from(self.view_number as u64), + F::from(self.block_height as u64), + self.block_comm, + self.fee_ledger_comm, + self.stake_table_comm.0, + self.stake_table_comm.1, + self.stake_table_comm.2, + ] + } +} From e215d8ee46c10c3a73c9a90ad982b5d1273ab5fc Mon Sep 17 00:00:00 2001 From: MRain Date: Thu, 16 Nov 2023 10:09:09 -0500 Subject: [PATCH 0403/1393] address comments --- hotshot-state-prover/src/circuit.rs | 2 +- hotshot-state-prover/src/lib.rs | 35 ++++++----------------------- 2 files changed, 8 insertions(+), 29 deletions(-) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 0d77d5dd2a..d9b47ac422 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -470,7 +470,7 @@ mod tests { // bad path: bad stake table commitment let mut bad_lightclient_state = lightclient_state.clone(); bad_lightclient_state.stake_table_comm.1 = F::default(); - let bad_state_msg = bad_lightclient_state.to_array + let bad_state_msg = bad_lightclient_state.to_array(); let sig_for_bad_state = schnorr_keys .iter() .map(|(key, _)| { diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index 0ae763278e..d50d273914 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -114,9 +114,9 @@ fn build_dummy_circuit_for_preprocessing( mod tests { use super::{ utils::{key_pairs_for_testing, stake_table_for_testing}, - BLSVerKey, BaseField, SchnorrVerKey, UniversalSrs, + BaseField, UniversalSrs, }; - use crate::{circuit::build, generate_state_update_proof, preprocess}; + use crate::{build_dummy_circuit_for_preprocessing, generate_state_update_proof, preprocess}; use ark_bn254::Bn254; use ark_ec::pairing::Pairing; use ark_ed_on_bn254::EdwardsConfig as Config; @@ -125,13 +125,11 @@ mod tests { One, }; use ethereum_types::U256; - use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::traits::{ stake_table::{SnapshotVersion, StakeTableScheme}, state::LightClientState, }; use jf_plonk::{ - errors::PlonkError, proof_system::{PlonkKzgSnark, UniversalSNARK}, transcript::SolidityTranscript, }; @@ -140,6 +138,7 @@ mod tests { errors::PrimitivesError, signatures::{schnorr::Signature, SchnorrSignatureScheme, SignatureScheme}, }; + use jf_relation::Circuit; use jf_utils::test_rng; // FIXME(Chengyu): see @@ -195,29 +194,6 @@ mod tests { Ok(pp) } - /// Internal function for helping generate the proving/verifying key - fn get_num_of_gates() -> Result { - use ark_ed_on_bn254::EdwardsConfig; - use jf_relation::Circuit; - let st = StakeTable::::new(); - let lightclient_state = LightClientState { - view_number: 0, - block_height: 0, - block_comm: BaseField::default(), - fee_ledger_comm: BaseField::default(), - stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), - }; - Ok(build::( - &[], - &[], - &[], - &lightclient_state, - &U256::zero(), - )? - .0 - .num_gates()) - } - #[test] fn test_proof_generation() { let num_validators = 10; @@ -269,7 +245,10 @@ mod tests { .collect::>(); // good path - let num_gates = get_num_of_gates().unwrap(); + let num_gates = build_dummy_circuit_for_preprocessing() + .unwrap() + .0 + .num_gates(); let test_srs = universal_setup_for_testing(num_gates + 2, &mut prng).unwrap(); ark_std::println!("Number of constraint in the circuit: {}", num_gates); From f582f8c54c1060e69db757107304cb25631a2076 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 13:27:31 -0500 Subject: [PATCH 0404/1393] remove some unused types --- types/src/traits/election.rs | 106 +---------------------------------- 1 file changed, 1 insertion(+), 105 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index e814bbf77b..3726ce3dec 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -13,12 +13,9 @@ use crate::traits::{ network::{CommunicationChannel, NetworkMsg}, signature_key::SignatureKey, }; -use bincode::Options; -use commit::{Commitment, CommitmentBounds, Committable}; +use commit::Commitment; use derivative::Derivative; -use hotshot_utils::bincode::bincode_opts; -use serde::{Deserialize, Serialize}; use snafu::Snafu; use std::{collections::BTreeSet, fmt::Debug, hash::Hash, marker::PhantomData, num::NonZeroU64}; @@ -33,107 +30,6 @@ pub enum ElectionError { MathError, } -/// For items that will always have the same validity outcome on a successful check, -/// allows for the case of "not yet possible to check" where the check might be -/// attempted again at a later point in time, but saves on repeated checking when -/// the outcome is already knowable. -/// -/// This would be a useful general utility. -#[derive(Clone)] -pub enum Checked { - /// This item has been checked, and is valid - Valid(T), - /// This item has been checked, and is not valid - Inval(T), - /// This item has not been checked - Unchecked(T), -} - -/// Data to vote on for different types of votes. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -pub enum VoteData -where - COMMITMENT: CommitmentBounds, -{ - /// Vote to provide availability for a block. - DA(COMMITMENT), - /// Vote to append a leaf to the log. - Yes(COMMITMENT), - /// Vote to reject a leaf from the log. - No(COMMITMENT), - /// Vote to time out and proceed to the next view. - Timeout(COMMITMENT), - /// Vote for VID proposal - VID(COMMITMENT), - /// Vote to pre-commit the view sync. - ViewSyncPreCommit(COMMITMENT), - /// Vote to commit the view sync. - ViewSyncCommit(COMMITMENT), - /// Vote to finalize the view sync. - ViewSyncFinalize(COMMITMENT), -} - -/// Make different types of `VoteData` committable -impl Committable for VoteData -where - COMMITMENT: CommitmentBounds, -{ - fn commit(&self) -> Commitment { - let (tag, commit) = match self { - VoteData::DA(c) => ("DA BlockPayload Commit", c), - VoteData::VID(c) => ("VID Proposal Commit", c), - VoteData::Yes(c) => ("Yes Vote Commit", c), - VoteData::No(c) => ("No Vote Commit", c), - VoteData::Timeout(c) => ("Timeout View Number Commit", c), - VoteData::ViewSyncPreCommit(c) => ("ViewSyncPreCommit", c), - VoteData::ViewSyncCommit(c) => ("ViewSyncCommit", c), - VoteData::ViewSyncFinalize(c) => ("ViewSyncFinalize", c), - }; - commit::RawCommitmentBuilder::new(tag) - .var_size_bytes(commit.as_ref()) - .finalize() - } - - fn tag() -> String { - ("VOTE_DATA_COMMIT").to_string() - } -} - -impl VoteData -where - COMMITMENT: CommitmentBounds, -{ - #[must_use] - /// Convert vote data into bytes. - /// - /// # Panics - /// Panics if the serialization fails. - pub fn as_bytes(&self) -> Vec { - bincode_opts().serialize(&self).unwrap() - } -} - -/// Proof of this entity's right to vote, and of the weight of those votes -pub trait VoteToken: - Clone - + Debug - + Send - + Sync - + serde::Serialize - + for<'de> serde::Deserialize<'de> - + PartialEq - + Hash - + Eq -{ - // type StakeTable; - // type KeyPair: SignatureKey; - // type ConsensusTime: ConsensusTime; - - /// the count, which validation will confirm - fn vote_count(&self) -> NonZeroU64; -} - /// election config pub trait ElectionConfig: Default From 933028a5779e2c2e6449cc71169db3d96d187e38 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 14:25:22 -0500 Subject: [PATCH 0405/1393] Removing more I and Message generics in networking stack --- hotshot/examples/infra/mod.rs | 146 ++++++------- hotshot/examples/libp2p/types.rs | 10 +- hotshot/examples/web-server-da/types.rs | 10 +- hotshot/src/tasks/mod.rs | 4 +- .../src/traits/networking/combined_network.rs | 60 ++---- .../src/traits/networking/libp2p_network.rs | 35 +--- .../src/traits/networking/memory_network.rs | 39 +--- .../traits/networking/web_server_network.rs | 135 +++++------- task-impls/src/network.rs | 17 +- testing/src/node_types.rs | 192 +++++++----------- testing/src/test_launcher.rs | 10 +- testing/src/test_runner.rs | 12 +- testing/tests/memory_network.rs | 8 +- types/src/traits/election.rs | 34 ++-- types/src/traits/network.rs | 27 ++- types/src/traits/node_implementation.rs | 49 ++--- 16 files changed, 294 insertions(+), 494 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 8c60ffa953..1211f1c643 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -107,10 +107,10 @@ pub fn load_config_from_file( /// Runs the orchestrator pub async fn run_orchestrator< TYPES: NodeType, - DANETWORK: CommunicationChannel, TYPES::Membership> + Debug, - QUORUMNETWORK: CommunicationChannel, TYPES::Membership> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, TYPES::Membership> + Debug, - VIDNETWORK: CommunicationChannel, TYPES::Membership> + Debug, + DANETWORK: CommunicationChannel + Debug, + QUORUMNETWORK: CommunicationChannel + Debug, + VIEWSYNCNETWORK: CommunicationChannel + Debug, + VIDNETWORK: CommunicationChannel + Debug, NODE: NodeImplementation< TYPES, Exchanges = Exchanges< @@ -156,10 +156,10 @@ fn calculate_num_tx_per_round( #[async_trait] pub trait RunDA< TYPES: NodeType, - DANETWORK: CommunicationChannel, TYPES::Membership> + Debug, - QUORUMNETWORK: CommunicationChannel, TYPES::Membership> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, TYPES::Membership> + Debug, - VIDNETWORK: CommunicationChannel, TYPES::Membership> + Debug, + DANETWORK: CommunicationChannel + Debug, + QUORUMNETWORK: CommunicationChannel + Debug, + VIEWSYNCNETWORK: CommunicationChannel + Debug, + VIDNETWORK: CommunicationChannel + Debug, NODE: NodeImplementation< TYPES, Exchanges = Exchanges< @@ -370,12 +370,12 @@ pub trait RunDA< // WEB SERVER /// Represents a web server-based run -pub struct WebServerDARun> { +pub struct WebServerDARun { config: NetworkConfig, - quorum_network: WebCommChannel, - da_network: WebCommChannel, - view_sync_network: WebCommChannel, - vid_network: WebCommChannel, + quorum_network: WebCommChannel, + da_network: WebCommChannel, + view_sync_network: WebCommChannel, + vid_network: WebCommChannel, } #[async_trait] @@ -390,42 +390,22 @@ impl< Exchanges = Exchanges< TYPES, Message, - QuorumExchange< - TYPES, - TYPES::Membership, - WebCommChannel, - Message, - >, - CommitteeExchange< - TYPES, - TYPES::Membership, - WebCommChannel, - Message, - >, - ViewSyncExchange< - TYPES, - TYPES::Membership, - WebCommChannel, - Message, - >, - VIDExchange< - TYPES, - TYPES::Membership, - WebCommChannel, - Message, - >, + QuorumExchange, Message>, + CommitteeExchange, Message>, + ViewSyncExchange, Message>, + VIDExchange, Message>, >, Storage = MemoryStorage, >, > RunDA< TYPES, - WebCommChannel, - WebCommChannel, - WebCommChannel, - WebCommChannel, + WebCommChannel, + WebCommChannel, + WebCommChannel, + WebCommChannel, NODE, - > for WebServerDARun + > for WebServerDARun where ::StateType: TestableState, ::BlockPayload: TestableBlock, @@ -434,7 +414,7 @@ where { async fn initialize_networking( config: NetworkConfig, - ) -> WebServerDARun { + ) -> WebServerDARun { // Get our own key let pub_key = config.config.my_own_validator_config.public_key.clone(); @@ -454,10 +434,10 @@ where ); // Create the network - let quorum_network: WebCommChannel = + let quorum_network: WebCommChannel = WebCommChannel::new(underlying_quorum_network.clone().into()); - let view_sync_network: WebCommChannel = + let view_sync_network: WebCommChannel = WebCommChannel::new(underlying_quorum_network.into()); let WebServerConfig { @@ -467,7 +447,7 @@ where }: WebServerConfig = config.clone().da_web_server_config.unwrap(); // Each node runs the DA network so that leaders have access to transactions and DA votes - let da_network: WebCommChannel = WebCommChannel::new( + let da_network: WebCommChannel = WebCommChannel::new( WebServerNetwork::create( &host.to_string(), port, @@ -478,7 +458,7 @@ where .into(), ); - let vid_network: WebCommChannel = WebCommChannel::new( + let vid_network: WebCommChannel = WebCommChannel::new( WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true) .into(), ); @@ -492,19 +472,19 @@ where } } - fn get_da_network(&self) -> WebCommChannel { + fn get_da_network(&self) -> WebCommChannel { self.da_network.clone() } - fn get_quorum_network(&self) -> WebCommChannel { + fn get_quorum_network(&self) -> WebCommChannel { self.quorum_network.clone() } - fn get_view_sync_network(&self) -> WebCommChannel { + fn get_view_sync_network(&self) -> WebCommChannel { self.view_sync_network.clone() } - fn get_vid_network(&self) -> WebCommChannel { + fn get_vid_network(&self) -> WebCommChannel { self.vid_network.clone() } @@ -516,12 +496,12 @@ where // Libp2p /// Represents a libp2p-based run -pub struct Libp2pDARun> { +pub struct Libp2pDARun { config: NetworkConfig, - quorum_network: Libp2pCommChannel, - da_network: Libp2pCommChannel, - view_sync_network: Libp2pCommChannel, - vid_network: Libp2pCommChannel, + quorum_network: Libp2pCommChannel, + da_network: Libp2pCommChannel, + view_sync_network: Libp2pCommChannel, + vid_network: Libp2pCommChannel, } #[async_trait] @@ -536,42 +516,32 @@ impl< Exchanges = Exchanges< TYPES, Message, - QuorumExchange< - TYPES, - TYPES::Membership, - Libp2pCommChannel, - Message, - >, + QuorumExchange, Message>, CommitteeExchange< TYPES, TYPES::Membership, - Libp2pCommChannel, + Libp2pCommChannel, Message, >, ViewSyncExchange< TYPES, TYPES::Membership, - Libp2pCommChannel, - Message, - >, - VIDExchange< - TYPES, - TYPES::Membership, - Libp2pCommChannel, + Libp2pCommChannel, Message, >, + VIDExchange, Message>, >, Storage = MemoryStorage, >, > RunDA< TYPES, - Libp2pCommChannel, - Libp2pCommChannel, - Libp2pCommChannel, - Libp2pCommChannel, + Libp2pCommChannel, + Libp2pCommChannel, + Libp2pCommChannel, + Libp2pCommChannel, NODE, - > for Libp2pDARun + > for Libp2pDARun where ::StateType: TestableState, ::BlockPayload: TestableBlock, @@ -580,7 +550,7 @@ where { async fn initialize_networking( config: NetworkConfig, - ) -> Libp2pDARun { + ) -> Libp2pDARun { let pubkey = config.config.my_own_validator_config.public_key.clone(); let mut config = config; let libp2p_config = config @@ -699,16 +669,16 @@ where underlying_quorum_network.wait_for_ready().await; // Create the network - let quorum_network: Libp2pCommChannel = + let quorum_network: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - let view_sync_network: Libp2pCommChannel = + let view_sync_network: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - let da_network: Libp2pCommChannel = + let da_network: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - let vid_network: Libp2pCommChannel = + let vid_network: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); Libp2pDARun { @@ -720,19 +690,19 @@ where } } - fn get_da_network(&self) -> Libp2pCommChannel { + fn get_da_network(&self) -> Libp2pCommChannel { self.da_network.clone() } - fn get_quorum_network(&self) -> Libp2pCommChannel { + fn get_quorum_network(&self) -> Libp2pCommChannel { self.quorum_network.clone() } - fn get_view_sync_network(&self) -> Libp2pCommChannel { + fn get_view_sync_network(&self) -> Libp2pCommChannel { self.view_sync_network.clone() } - fn get_vid_network(&self) -> Libp2pCommChannel { + fn get_vid_network(&self) -> Libp2pCommChannel { self.vid_network.clone() } @@ -748,10 +718,10 @@ pub async fn main_entry_point< BlockPayload = VIDBlockPayload, BlockHeader = VIDBlockHeader, >, - DANETWORK: CommunicationChannel, TYPES::Membership> + Debug, - QUORUMNETWORK: CommunicationChannel, TYPES::Membership> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, TYPES::Membership> + Debug, - VIDNETWORK: CommunicationChannel, TYPES::Membership> + Debug, + DANETWORK: CommunicationChannel + Debug, + QUORUMNETWORK: CommunicationChannel + Debug, + VIEWSYNCNETWORK: CommunicationChannel + Debug, + VIDNETWORK: CommunicationChannel + Debug, NODE: NodeImplementation< TYPES, Exchanges = Exchanges< diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 0f989c6c35..e70b0dcf60 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -16,10 +16,10 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type DANetwork = Libp2pCommChannel; -pub type VIDNetwork = Libp2pCommChannel; -pub type QuorumNetwork = Libp2pCommChannel; -pub type ViewSyncNetwork = Libp2pCommChannel; +pub type DANetwork = Libp2pCommChannel; +pub type VIDNetwork = Libp2pCommChannel; +pub type QuorumNetwork = Libp2pCommChannel; +pub type ViewSyncNetwork = Libp2pCommChannel; impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; @@ -38,4 +38,4 @@ impl NodeImplementation for NodeImpl { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = Libp2pDARun; +pub type ThisRun = Libp2pDARun; diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index 87e0666208..916d9cb0b6 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -16,10 +16,10 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type DANetwork = WebCommChannel; -pub type VIDNetwork = WebCommChannel; -pub type QuorumNetwork = WebCommChannel; -pub type ViewSyncNetwork = WebCommChannel; +pub type DANetwork = WebCommChannel; +pub type VIDNetwork = WebCommChannel; +pub type QuorumNetwork = WebCommChannel; +pub type ViewSyncNetwork = WebCommChannel; impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; @@ -38,4 +38,4 @@ impl NodeImplementation for NodeImpl { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = WebServerDARun; +pub type ThisRun = WebServerDARun; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index a636225c8f..e3ceda2906 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -67,7 +67,7 @@ pub async fn add_network_message_task< ) -> TaskRunner // This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. where - EXCHANGE::Networking: CommunicationChannel, TYPES::Membership>, + EXCHANGE::Networking: CommunicationChannel, { let channel = exchange.network().clone(); let broadcast_stream = GeneratedStream::>::new(Arc::new(move || { @@ -163,7 +163,7 @@ pub async fn add_network_event_task< ) -> TaskRunner // This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. where - EXCHANGE::Networking: CommunicationChannel, TYPES::Membership>, + EXCHANGE::Networking: CommunicationChannel, { let filter = NetworkEventTaskState::>::Networking>::filter( diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 52b17b17bc..50a4d9820d 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -33,7 +33,7 @@ use hotshot_types::{ node_implementation::NodeType, }, }; -use std::{collections::hash_map::DefaultHasher, marker::PhantomData, sync::Arc}; +use std::{collections::hash_map::DefaultHasher, sync::Arc}; use std::hash::Hash; @@ -100,9 +100,9 @@ fn calculate_hash_of(t: &T) -> u64 { /// A communication channel with 2 networks, where we can fall back to the slower network if the /// primary fails #[derive(Clone, Debug)] -pub struct CombinedCommChannel> { +pub struct CombinedCommChannel { /// The two networks we'll use for send/recv - networks: Arc>, + networks: Arc>, /// Last n seen messages to prevent processing duplicates message_cache: Arc>, @@ -111,10 +111,10 @@ pub struct CombinedCommChannel> { primary_down: Arc, } -impl> CombinedCommChannel { +impl CombinedCommChannel { /// Constructor #[must_use] - pub fn new(networks: Arc>) -> Self { + pub fn new(networks: Arc>) -> Self { Self { networks, message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), @@ -124,7 +124,7 @@ impl> CombinedCommChannel &WebServerNetwork, TYPES::SignatureKey, TYPES> { + pub fn primary(&self) -> &WebServerNetwork { &self.networks.0 } @@ -139,16 +139,12 @@ impl> CombinedCommChannel>( - pub WebServerNetwork, TYPES::SignatureKey, TYPES>, +pub struct CombinedNetworks( + pub WebServerNetwork, pub Libp2pNetwork, TYPES::SignatureKey>, - pub PhantomData, ); -impl> - TestableNetworkingImplementation> - for CombinedNetworks -{ +impl TestableNetworkingImplementation for CombinedNetworks { fn generator( expected_node_count: usize, num_bootstrap: usize, @@ -158,17 +154,15 @@ impl> ) -> Box Self + 'static> { let generators = ( , - TYPES::SignatureKey, TYPES, - > as TestableNetworkingImplementation<_, _>>::generator( + > as TestableNetworkingImplementation<_>>::generator( expected_node_count, num_bootstrap, network_id, da_committee_size, is_da ), - , TYPES::SignatureKey> as TestableNetworkingImplementation<_, _>>::generator( + , TYPES::SignatureKey> as TestableNetworkingImplementation<_>>::generator( expected_node_count, num_bootstrap, network_id, @@ -176,9 +170,7 @@ impl> is_da ) ); - Box::new(move |node_id| { - CombinedNetworks(generators.0(node_id), generators.1(node_id), PhantomData) - }) + Box::new(move |node_id| CombinedNetworks(generators.0(node_id), generators.1(node_id))) } /// Get the number of messages in-flight. @@ -189,10 +181,7 @@ impl> } } -impl> - TestableNetworkingImplementation> - for CombinedCommChannel -{ +impl TestableNetworkingImplementation for CombinedCommChannel { fn generator( expected_node_count: usize, num_bootstrap: usize, @@ -200,10 +189,7 @@ impl> da_committee_size: usize, is_da: bool, ) -> Box Self + 'static> { - let generator = as TestableNetworkingImplementation< - _, - _, - >>::generator( + let generator = as TestableNetworkingImplementation<_>>::generator( expected_node_count, num_bootstrap, network_id, @@ -226,11 +212,8 @@ impl> } #[async_trait] -impl> - CommunicationChannel, MEMBERSHIP> - for CombinedCommChannel -{ - type NETWORK = CombinedNetworks; +impl CommunicationChannel for CombinedCommChannel { + type NETWORK = CombinedNetworks; async fn wait_for_ready(&self) { join!( @@ -366,7 +349,7 @@ impl> } async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { - as ConnectedNetwork,TYPES::SignatureKey>>:: + as ConnectedNetwork,TYPES::SignatureKey>>:: inject_consensus_info(self.primary(), event.clone()).await; as ConnectedNetwork,TYPES::SignatureKey>>:: @@ -374,13 +357,8 @@ impl> } } -impl> - TestableChannelImplementation< - TYPES, - Message, - MEMBERSHIP, - CombinedNetworks, - > for CombinedCommChannel +impl TestableChannelImplementation> + for CombinedCommChannel { fn generate_network() -> Box) -> Self + 'static> { Box::new(move |network| CombinedCommChannel::new(network)) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 61c7fe874b..ffd83e8a5b 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -2,7 +2,6 @@ //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network use super::NetworkingMetricsValue; -use crate::NodeImplementation; use async_compatibility_layer::{ art::{async_block_on, async_sleep, async_spawn}, channel::{unbounded, UnboundedReceiver, UnboundedSendError, UnboundedSender}, @@ -125,7 +124,7 @@ pub struct Libp2pNetwork { inner: Arc>, } -impl TestableNetworkingImplementation> +impl TestableNetworkingImplementation for Libp2pNetwork, TYPES::SignatureKey> where MessageKind: ViewMessage, @@ -749,18 +748,12 @@ impl ConnectedNetwork for Libp2p /// libp2p identity communication channel #[derive(Clone, Debug)] -pub struct Libp2pCommChannel< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, ->( +pub struct Libp2pCommChannel( Arc, TYPES::SignatureKey>>, - PhantomData<(TYPES, I, MEMBERSHIP)>, + PhantomData, ); -impl, MEMBERSHIP: Membership> - Libp2pCommChannel -{ +impl Libp2pCommChannel { /// create a new libp2p communication channel #[must_use] pub fn new(network: Arc, TYPES::SignatureKey>>) -> Self { @@ -768,9 +761,7 @@ impl, MEMBERSHIP: Membership, MEMBERSHIP: Membership> - TestableNetworkingImplementation> - for Libp2pCommChannel +impl TestableNetworkingImplementation for Libp2pCommChannel where MessageKind: ViewMessage, { @@ -793,7 +784,7 @@ where let generator = , TYPES::SignatureKey, - > as TestableNetworkingImplementation<_, _>>::generator( + > as TestableNetworkingImplementation<_>>::generator( expected_node_count, num_bootstrap, network_id, @@ -812,9 +803,7 @@ where // top // we don't really want to make this the default implementation because that forces it to require ConnectedNetwork to be implemented. The struct we implement over might use multiple ConnectedNetworks #[async_trait] -impl, MEMBERSHIP: Membership> - CommunicationChannel, MEMBERSHIP> - for Libp2pCommChannel +impl CommunicationChannel for Libp2pCommChannel where MessageKind: ViewMessage, { @@ -888,13 +877,9 @@ where } } -impl, MEMBERSHIP: Membership> - TestableChannelImplementation< - TYPES, - Message, - MEMBERSHIP, - Libp2pNetwork, TYPES::SignatureKey>, - > for Libp2pCommChannel +impl + TestableChannelImplementation, TYPES::SignatureKey>> + for Libp2pCommChannel { fn generate_network( ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index ca2202b2a5..d2e34edb90 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -4,7 +4,6 @@ //! integration tests. use super::{FailedToSerializeSnafu, NetworkError, NetworkReliability, NetworkingMetricsValue}; -use crate::NodeImplementation; use async_compatibility_layer::{ art::async_spawn, channel::{bounded, Receiver, SendError, Sender}, @@ -33,7 +32,6 @@ use snafu::ResultExt; use std::{ collections::BTreeSet, fmt::Debug, - marker::PhantomData, sync::{ atomic::{AtomicUsize, Ordering}, Arc, @@ -242,7 +240,7 @@ impl MemoryNetwork { } } -impl TestableNetworkingImplementation> +impl TestableNetworkingImplementation for MemoryNetwork, TYPES::SignatureKey> { fn generator( @@ -450,28 +448,19 @@ impl ConnectedNetwork for Memory /// memory identity communication channel #[derive(Clone, Debug)] -pub struct MemoryCommChannel< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, ->( +pub struct MemoryCommChannel( Arc, TYPES::SignatureKey>>, - PhantomData<(I, MEMBERSHIP)>, ); -impl, MEMBERSHIP: Membership> - MemoryCommChannel -{ +impl MemoryCommChannel { /// create new communication channel #[must_use] pub fn new(network: Arc, TYPES::SignatureKey>>) -> Self { - Self(network, PhantomData) + Self(network) } } -impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> - for MemoryCommChannel +impl TestableNetworkingImplementation for MemoryCommChannel where MessageKind: ViewMessage, { @@ -485,14 +474,14 @@ where let generator = , TYPES::SignatureKey, - > as TestableNetworkingImplementation<_, _>>::generator( + > as TestableNetworkingImplementation<_>>::generator( expected_node_count, num_bootstrap, network_id, da_committee_size, is_da ); - Box::new(move |node_id| Self(generator(node_id).into(), PhantomData)) + Box::new(move |node_id| Self(generator(node_id).into())) } fn in_flight_message_count(&self) -> Option { @@ -501,9 +490,7 @@ where } #[async_trait] -impl, MEMBERSHIP: Membership> - CommunicationChannel, MEMBERSHIP> - for MemoryCommChannel +impl CommunicationChannel for MemoryCommChannel where MessageKind: ViewMessage, { @@ -561,13 +548,9 @@ where } } -impl, MEMBERSHIP: Membership> - TestableChannelImplementation< - TYPES, - Message, - MEMBERSHIP, - MemoryNetwork, TYPES::SignatureKey>, - > for MemoryCommChannel +impl + TestableChannelImplementation, TYPES::SignatureKey>> + for MemoryCommChannel { fn generate_network( ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 0f1fbb9fe4..39da919655 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -15,13 +15,12 @@ use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ message::{Message, MessagePurpose}, traits::{ - election::Membership, network::{ CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, NetworkError, NetworkMsg, TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, WebServerNetworkError, }, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::NodeType, signature_key::SignatureKey, }, }; @@ -32,7 +31,6 @@ use serde::{Deserialize, Serialize}; use hotshot_types::traits::network::ViewMessage; use std::{ collections::{hash_map::Entry, BTreeSet, HashMap}, - marker::PhantomData, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -43,37 +41,31 @@ use surf_disco::error::ClientError; use tracing::{debug, error, info}; /// Represents the communication channel abstraction for the web server #[derive(Clone, Debug)] -pub struct WebCommChannel< - TYPES: NodeType, - I: NodeImplementation, - MEMBERSHIP: Membership, ->( - Arc, TYPES::SignatureKey, TYPES>>, - PhantomData<(MEMBERSHIP, I)>, -); - -impl, MEMBERSHIP: Membership> - WebCommChannel -{ +pub struct WebCommChannel(Arc>); + +impl WebCommChannel { /// Create new communication channel #[must_use] - pub fn new(network: Arc, TYPES::SignatureKey, TYPES>>) -> Self { - Self(network, PhantomData) + pub fn new(network: Arc>) -> Self { + Self(network) } } /// The web server network state #[derive(Clone, Debug)] -pub struct WebServerNetwork { +pub struct WebServerNetwork { /// The inner, core state of the web server network - inner: Arc>, + inner: Arc>, /// An optional shutdown signal. This is only used when this connection is created through the `TestableNetworkingImplementation` API. server_shutdown_signal: Option>>, } -impl WebServerNetwork { +impl WebServerNetwork { /// Post a message to the web server and return the result - async fn post_message_to_web_server(&self, message: SendMsg) -> Result<(), NetworkError> { + async fn post_message_to_web_server( + &self, + message: SendMsg>, + ) -> Result<(), NetworkError> { let result: Result<(), ClientError> = self .inner .client @@ -91,15 +83,13 @@ impl WebServerNetwork { - /// Phantom data for generic types - phantom: PhantomData<(KEY, TYPES::ElectionConfigType)>, +struct Inner { /// Our own key _own_key: TYPES::SignatureKey, /// Queue for broadcasted messages - broadcast_poll_queue: Arc>>>, + broadcast_poll_queue: Arc>>>>, /// Queue for direct messages - direct_poll_queue: Arc>>>, + direct_poll_queue: Arc>>>>, /// Client is running running: AtomicBool, /// The web server connection is ready @@ -143,7 +133,7 @@ struct Inner { Arc>>>>, } -impl Inner { +impl Inner { #![allow(clippy::too_many_lines)] /// Pull a web server. async fn poll_web_server( @@ -398,7 +388,7 @@ impl Inner { async fn get_txs_from_web_server( &self, endpoint: String, - ) -> Result>)>, NetworkError> { + ) -> Result>>)>, NetworkError> { let result: Result>)>, ClientError> = self.client.get(&endpoint).send().await; match result { @@ -425,7 +415,7 @@ impl Inner { async fn get_message_from_web_server( &self, endpoint: String, - ) -> Result>>, NetworkError> { + ) -> Result>>>, NetworkError> { let result: Result>>, ClientError> = self.client.get(&endpoint).send().await; match result { @@ -499,12 +489,7 @@ impl RecvMsgTrait for RecvMsg { impl NetworkMsg for SendMsg {} impl NetworkMsg for RecvMsg {} -impl< - M: NetworkMsg + 'static + ViewMessage, - K: SignatureKey + 'static, - TYPES: NodeType + 'static, - > WebServerNetwork -{ +impl WebServerNetwork { /// Creates a new instance of the `WebServerNetwork` /// # Panics /// if the web server url is malformed @@ -527,7 +512,6 @@ impl< let client = surf_disco::Client::::new(base_url.unwrap()); let inner = Arc::new(Inner { - phantom: PhantomData, broadcast_poll_queue: Arc::default(), direct_poll_queue: Arc::default(), running: AtomicBool::new(true), @@ -558,7 +542,9 @@ impl< /// Parses a message to find the appropriate endpoint /// Returns a `SendMsg` containing the endpoint - fn parse_post_message(message: M) -> Result, WebServerNetworkError> { + fn parse_post_message( + message: Message, + ) -> Result>, WebServerNetworkError> { let view_number: TYPES::Time = message.get_view_number(); let endpoint = match &message.purpose() { @@ -579,7 +565,7 @@ impl< MessagePurpose::VidCert => config::post_vid_certificate_route(*view_number), }; - let network_msg: SendMsg = SendMsg { + let network_msg: SendMsg> = SendMsg { message: Some(message), endpoint, }; @@ -588,15 +574,12 @@ impl< } #[async_trait] -impl, MEMBERSHIP: Membership> - CommunicationChannel, MEMBERSHIP> - for WebCommChannel -{ - type NETWORK = WebServerNetwork, TYPES::SignatureKey, TYPES>; +impl CommunicationChannel for WebCommChannel { + type NETWORK = WebServerNetwork; /// Blocks until node is successfully initialized /// into the network async fn wait_for_ready(&self) { - as ConnectedNetwork< + as ConnectedNetwork< Message, TYPES::SignatureKey, >>::wait_for_ready(&self.0) @@ -606,10 +589,9 @@ impl, MEMBERSHIP: Membership bool { - as ConnectedNetwork< - Message, - TYPES::SignatureKey, - >>::is_ready(&self.0) + as ConnectedNetwork, TYPES::SignatureKey>>::is_ready( + &self.0, + ) .await } @@ -622,7 +604,7 @@ impl, MEMBERSHIP: Membership as ConnectedNetwork< + as ConnectedNetwork< Message, TYPES::SignatureKey, >>::shut_down(&self.0) @@ -664,7 +646,7 @@ impl, MEMBERSHIP: Membership as ConnectedNetwork< + as ConnectedNetwork< Message, TYPES::SignatureKey, >>::recv_msgs(&self.0, transmit_type) @@ -674,7 +656,7 @@ impl, MEMBERSHIP: Membership) { - as ConnectedNetwork< + as ConnectedNetwork< Message, TYPES::SignatureKey, >>::inject_consensus_info(&self.0, event) @@ -683,11 +665,8 @@ impl, MEMBERSHIP: Membership, - K: SignatureKey + 'static, - TYPES: NodeType + 'static, - > ConnectedNetwork for WebServerNetwork +impl ConnectedNetwork, TYPES::SignatureKey> + for WebServerNetwork { /// Blocks until the network is successfully initialized async fn wait_for_ready(&self) { @@ -719,8 +698,8 @@ impl< /// blocking async fn broadcast_message( &self, - message: M, - _recipients: BTreeSet, + message: Message, + _recipients: BTreeSet, ) -> Result<(), NetworkError> { let network_msg = Self::parse_post_message(message); match network_msg { @@ -733,7 +712,11 @@ impl< /// Sends a direct message to a specific node /// blocking - async fn direct_message(&self, message: M, _recipient: K) -> Result<(), NetworkError> { + async fn direct_message( + &self, + message: Message, + _recipient: TYPES::SignatureKey, + ) -> Result<(), NetworkError> { let network_msg = Self::parse_post_message(message); match network_msg { Ok(network_msg) => { @@ -754,7 +737,7 @@ impl< fn recv_msgs<'a, 'b>( &'a self, transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result, NetworkError>> + ) -> BoxSyncFuture<'b, Result>, NetworkError>> where 'a: 'b, Self: 'b, @@ -785,7 +768,7 @@ impl< } #[allow(clippy::too_many_lines)] - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { debug!( "Injecting event: {:?} is da {}", event.clone(), @@ -1202,9 +1185,7 @@ impl< } } -impl TestableNetworkingImplementation> - for WebServerNetwork, TYPES::SignatureKey, TYPES> -{ +impl TestableNetworkingImplementation for WebServerNetwork { fn generator( expected_node_count: usize, _num_bootstrap: usize, @@ -1252,10 +1233,7 @@ impl TestableNetworkingImplementation> } } -impl, MEMBERSHIP: Membership> - TestableNetworkingImplementation> - for WebCommChannel -{ +impl TestableNetworkingImplementation for WebCommChannel { fn generator( expected_node_count: usize, num_bootstrap: usize, @@ -1263,18 +1241,14 @@ impl, MEMBERSHIP: Membership Box Self + 'static> { - let generator = , - TYPES::SignatureKey, - TYPES, - > as TestableNetworkingImplementation<_, _>>::generator( + let generator = as TestableNetworkingImplementation<_>>::generator( expected_node_count, num_bootstrap, network_id, da_committee_size, is_da, ); - Box::new(move |node_id| Self(generator(node_id).into(), PhantomData)) + Box::new(move |node_id| Self(generator(node_id).into())) } fn in_flight_message_count(&self) -> Option { @@ -1282,17 +1256,10 @@ impl, MEMBERSHIP: Membership, MEMBERSHIP: Membership> - TestableChannelImplementation< - TYPES, - Message, - MEMBERSHIP, - WebServerNetwork, TYPES::SignatureKey, TYPES>, - > for WebCommChannel +impl TestableChannelImplementation> + for WebCommChannel { - fn generate_network() -> Box< - dyn Fn(Arc, TYPES::SignatureKey, TYPES>>) -> Self + 'static, - > { + fn generate_network() -> Box>) -> Self + 'static> { Box::new(move |network| WebCommChannel::new(network)) } } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index f68d94b1e4..78f0beaab8 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -133,10 +133,7 @@ impl NetworkMessageTaskState { } /// network event task state -pub struct NetworkEventTaskState< - TYPES: NodeType, - COMMCHANNEL: CommunicationChannel, TYPES::Membership>, -> { +pub struct NetworkEventTaskState> { /// comm channel pub channel: COMMCHANNEL, /// event stream @@ -146,17 +143,13 @@ pub struct NetworkEventTaskState< // TODO ED Need to add exchange so we can get the recipient key and our own key? } -impl< - TYPES: NodeType, - COMMCHANNEL: CommunicationChannel, TYPES::Membership>, - > TS for NetworkEventTaskState +impl> TS + for NetworkEventTaskState { } -impl< - TYPES: NodeType, - COMMCHANNEL: CommunicationChannel, TYPES::Membership>, - > NetworkEventTaskState +impl> + NetworkEventTaskState { /// Handle the given event. /// diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 5c15bd40a9..830a01e771 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -1,7 +1,7 @@ use hotshot::traits::{ election::static_committee::GeneralStaticCommittee, implementations::CombinedNetworks, }; -use std::{marker::PhantomData, sync::Arc}; +use std::sync::Arc; use hotshot::{ demo::DemoState, @@ -66,37 +66,37 @@ pub struct CombinedImpl; pub type StaticMembership = StaticCommittee; -pub type StaticMemoryDAComm = MemoryCommChannel; +pub type StaticMemoryDAComm = MemoryCommChannel; -type StaticLibp2pDAComm = Libp2pCommChannel; +type StaticLibp2pDAComm = Libp2pCommChannel; -type StaticWebDAComm = WebCommChannel; +type StaticWebDAComm = WebCommChannel; -type StaticCombinedDAComm = CombinedCommChannel; +type StaticCombinedDAComm = CombinedCommChannel; -pub type StaticMemoryQuorumComm = MemoryCommChannel; +pub type StaticMemoryQuorumComm = MemoryCommChannel; -type StaticLibp2pQuorumComm = Libp2pCommChannel; +type StaticLibp2pQuorumComm = Libp2pCommChannel; -type StaticWebQuorumComm = WebCommChannel; +type StaticWebQuorumComm = WebCommChannel; -type StaticCombinedQuorumComm = CombinedCommChannel; +type StaticCombinedQuorumComm = CombinedCommChannel; -pub type StaticMemoryViewSyncComm = MemoryCommChannel; +pub type StaticMemoryViewSyncComm = MemoryCommChannel; -type StaticLibp2pViewSyncComm = Libp2pCommChannel; +type StaticLibp2pViewSyncComm = Libp2pCommChannel; -type StaticWebViewSyncComm = WebCommChannel; +type StaticWebViewSyncComm = WebCommChannel; -type StaticCombinedViewSyncComm = CombinedCommChannel; +type StaticCombinedViewSyncComm = CombinedCommChannel; -pub type StaticMemoryVIDComm = MemoryCommChannel; +pub type StaticMemoryVIDComm = MemoryCommChannel; -type StaticLibp2pVIDComm = Libp2pCommChannel; +type StaticLibp2pVIDComm = Libp2pCommChannel; -type StaticWebVIDComm = WebCommChannel; +type StaticWebVIDComm = WebCommChannel; -type StaticCombinedVIDComm = CombinedCommChannel; +type StaticCombinedVIDComm = CombinedCommChannel; pub type SequencingLibp2pExchange = Exchanges< TestTypes, @@ -152,10 +152,7 @@ impl TestableExchange> for SequencingLibp2pExchang let network_generator = Arc::new(, ::SignatureKey, - > as TestableNetworkingImplementation< - TestTypes, - Message, - >>::generator( + > as TestableNetworkingImplementation>::generator( expected_node_count, num_bootstrap, 0, @@ -169,25 +166,25 @@ impl TestableExchange> for SequencingLibp2pExchang <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network.clone()); let committee_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network.clone()); let view_sync_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network.clone()); let vid_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network); (quorum_chan, committee_chan, view_sync_chan, vid_chan) @@ -235,10 +232,7 @@ impl TestableExchange> for SequencingMemoryExchang let network_generator = Arc::new(, ::SignatureKey, - > as TestableNetworkingImplementation< - TestTypes, - Message, - >>::generator( + > as TestableNetworkingImplementation>::generator( expected_node_count, num_bootstrap, 0, @@ -248,10 +242,7 @@ impl TestableExchange> for SequencingMemoryExchang let network_da_generator = Arc::new(, ::SignatureKey, - > as TestableNetworkingImplementation< - TestTypes, - Message, - >>::generator( + > as TestableNetworkingImplementation>::generator( expected_node_count, num_bootstrap, 1, @@ -265,25 +256,25 @@ impl TestableExchange> for SequencingMemoryExchang <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network.clone()); let committee_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network_da.clone()); let view_sync_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network_da); let vid_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network); (quorum_chan, committee_chan, view_sync_chan, vid_chan) @@ -347,34 +338,25 @@ impl TestableExchange> for SequencingWebExchanges >>::Networking, ) + 'static, > { - let network_generator = Arc::new(, - ::SignatureKey, - _, - > as TestableNetworkingImplementation< - TestTypes, - Message, - >>::generator( - expected_node_count, - num_bootstrap, - 0, - da_committee_size, - false, - )); - let network_da_generator = Arc::new(, - ::SignatureKey, - TestTypes, - > as TestableNetworkingImplementation< - TestTypes, - Message, - >>::generator( - expected_node_count, - num_bootstrap, - 1, - da_committee_size, - true, - )); + let network_generator = + Arc::new( as TestableNetworkingImplementation< + TestTypes, + >>::generator( + expected_node_count, + num_bootstrap, + 0, + da_committee_size, + false, + )); + let network_da_generator = Arc::new( + as TestableNetworkingImplementation>::generator( + expected_node_count, + num_bootstrap, + 1, + da_committee_size, + true, + ), + ); Box::new(move |id| { let network = Arc::new(network_generator(id)); let network_da = Arc::new(network_da_generator(id)); @@ -382,25 +364,25 @@ impl TestableExchange> for SequencingWebExchanges <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network.clone()); let committee_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network_da.clone()); let view_sync_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network); let vid_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network_da); (quorum_chan, committee_chan, view_sync_chan, vid_chan) @@ -473,43 +455,31 @@ impl TestableExchange> for CombinedExchange { >>::Networking, ) + 'static, > { - let web_server_network_generator = Arc::new(, - ::SignatureKey, - _, - > as TestableNetworkingImplementation< - TestTypes, - Message, - >>::generator( - expected_node_count, - num_bootstrap, - 0, - da_committee_size, - false, - )); + let web_server_network_generator = + Arc::new( as TestableNetworkingImplementation< + TestTypes, + >>::generator( + expected_node_count, + num_bootstrap, + 0, + da_committee_size, + false, + )); - let web_server_network_da_generator = Arc::new(, - ::SignatureKey, - TestTypes, - > as TestableNetworkingImplementation< - TestTypes, - Message, - >>::generator( - expected_node_count, - num_bootstrap, - 1, - da_committee_size, - true, - )); + let web_server_network_da_generator = Arc::new( + as TestableNetworkingImplementation>::generator( + expected_node_count, + num_bootstrap, + 1, + da_committee_size, + true, + ), + ); let libp2p_network_generator = Arc::new(, ::SignatureKey, - > as TestableNetworkingImplementation< - TestTypes, - Message, - >>::generator( + > as TestableNetworkingImplementation>::generator( expected_node_count, num_bootstrap, 2, @@ -524,41 +494,33 @@ impl TestableExchange> for CombinedExchange { let libp2p_network = libp2p_network_generator(id); - let network = Arc::new(CombinedNetworks( - web_server_network, - libp2p_network.clone(), - PhantomData, - )); - let network_da = Arc::new(CombinedNetworks( - web_server_network_da, - libp2p_network, - PhantomData, - )); + let network = Arc::new(CombinedNetworks(web_server_network, libp2p_network.clone())); + let network_da = Arc::new(CombinedNetworks(web_server_network_da, libp2p_network)); let quorum_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network.clone()); let committee_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network_da.clone()); let view_sync_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network); let vid_chan = <, - >>::Networking as TestableChannelImplementation<_, _, _, _>>::generate_network( + >>::Networking as TestableChannelImplementation<_, _>>::generate_network( )(network_da); (quorum_chan, committee_chan, view_sync_chan, vid_chan) }) diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 49ade61612..76b9cee98e 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -13,9 +13,7 @@ use hotshot_types::{ traits::{ election::ConsensusExchange, network::CommunicationChannel, - node_implementation::{ - ExchangesType, NodeType, QuorumCommChannel, QuorumEx, QuorumNetwork, - }, + node_implementation::{ExchangesType, NodeType, QuorumCommChannel, QuorumNetwork}, }, HotShotConfig, }; @@ -80,11 +78,7 @@ pub type Hook = Box< /// generators for resources used by each node pub struct ResourceGenerators> where - QuorumCommChannel: CommunicationChannel< - TYPES, - Message, - as ConsensusExchange>>::Membership, - >, + QuorumCommChannel: CommunicationChannel, { // generate channels pub channel_generator: Generator>, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index cbfc805101..9faec4aa86 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -39,11 +39,7 @@ pub struct Node> { /// spin up and down nodes, execute rounds pub struct TestRunner> where - QuorumCommChannel: CommunicationChannel< - TYPES, - Message, - as ConsensusExchange>>::Membership, - >, + QuorumCommChannel: CommunicationChannel, { pub(crate) launcher: TestLauncher, pub(crate) nodes: Vec>, @@ -55,11 +51,7 @@ where impl> TestRunner where SystemContext: HotShotType, - QuorumCommChannel: CommunicationChannel< - TYPES, - Message, - as ConsensusExchange>>::Membership, - >, + QuorumCommChannel: CommunicationChannel, { /// excecute test pub async fn run_test(mut self) diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index da09153c7c..66a0a0890b 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -59,10 +59,10 @@ impl NodeType for Test { pub struct TestImpl {} pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; -pub type DANetwork = MemoryCommChannel; -pub type QuorumNetwork = MemoryCommChannel; -pub type ViewSyncNetwork = MemoryCommChannel; -pub type VIDNetwork = MemoryCommChannel; +pub type DANetwork = MemoryCommChannel; +pub type QuorumNetwork = MemoryCommChannel; +pub type ViewSyncNetwork = MemoryCommChannel; +pub type VIDNetwork = MemoryCommChannel; impl NodeImplementation for TestImpl { type Storage = MemoryStorage; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 3726ce3dec..35b43536f9 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -96,7 +96,7 @@ pub trait ConsensusExchange: Send + Sync { /// The committee eligible to make decisions. type Membership: Membership; /// Network used by [`Membership`](Self::Membership) to communicate. - type Networking: CommunicationChannel; + type Networking: CommunicationChannel; /// Join a [`ConsensusExchange`] with the given identity (`pk` and `sk`). fn create( @@ -163,7 +163,7 @@ pub trait CommitteeExchangeType: pub struct CommitteeExchange< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > { /// The network being used by this exchange. @@ -184,7 +184,7 @@ pub struct CommitteeExchange< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > CommitteeExchangeType for CommitteeExchange { @@ -201,7 +201,7 @@ impl< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > ConsensusExchange for CommitteeExchange { @@ -256,7 +256,7 @@ pub trait VIDExchangeType: ConsensusExchange, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > { /// The network being used by this exchange. @@ -277,7 +277,7 @@ pub struct VIDExchange< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > VIDExchangeType for VIDExchange { @@ -294,7 +294,7 @@ impl< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > ConsensusExchange for VIDExchange { @@ -355,7 +355,7 @@ pub trait QuorumExchangeType: ConsensusExchange< pub struct QuorumExchange< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > { /// The network being used by this exchange. @@ -376,7 +376,7 @@ pub struct QuorumExchange< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > QuorumExchangeType for QuorumExchange { @@ -400,7 +400,7 @@ impl< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > ConsensusExchange for QuorumExchange { @@ -453,7 +453,7 @@ pub trait ViewSyncExchangeType: pub struct ViewSyncExchange< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > { /// The network being used by this exchange. @@ -474,7 +474,7 @@ pub struct ViewSyncExchange< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > ViewSyncExchangeType for ViewSyncExchange { @@ -483,7 +483,7 @@ impl< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > ConsensusExchange for ViewSyncExchange { @@ -531,7 +531,7 @@ impl< pub struct TimeoutExchange< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > { /// The network being used by this exchange. @@ -552,7 +552,7 @@ pub struct TimeoutExchange< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > TimeoutExchange { @@ -564,7 +564,7 @@ pub trait TimeoutExchangeType: ConsensusExchange impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > TimeoutExchangeType for TimeoutExchange { @@ -573,7 +573,7 @@ impl< impl< TYPES: NodeType, MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, + NETWORK: CommunicationChannel, M: NetworkMsg, > ConsensusExchange for TimeoutExchange { diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 13e0d7f86b..9c6b6bb137 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -11,8 +11,11 @@ use libp2p_networking::network::NetworkNodeHandleError; use tokio::time::error::Elapsed as TimeoutError; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} -use super::{election::Membership, node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{data::ViewNumber, message::MessagePurpose}; +use super::{node_implementation::NodeType, signature_key::SignatureKey}; +use crate::{ + data::ViewNumber, + message::{Message, MessagePurpose}, +}; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; use rand::{ @@ -226,9 +229,7 @@ pub trait ViewMessage { /// API for interacting directly with a consensus committee /// intended to be implemented for both DA and for validating consensus committees #[async_trait] -pub trait CommunicationChannel>: - Clone + Debug + Send + Sync + 'static -{ +pub trait CommunicationChannel: Clone + Debug + Send + Sync + 'static { /// Underlying Network implementation's type type NETWORK; /// Blocks until node is successfully initialized @@ -251,7 +252,7 @@ pub trait CommunicationChannel, election: &TYPES::Membership, ) -> Result<(), NetworkError>; @@ -259,7 +260,7 @@ pub trait CommunicationChannel, recipient: TYPES::SignatureKey, ) -> Result<(), NetworkError>; @@ -270,7 +271,7 @@ pub trait CommunicationChannel( &'a self, transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result, NetworkError>> + ) -> BoxSyncFuture<'b, Result>, NetworkError>> where 'a: 'b, Self: 'b; @@ -351,7 +352,7 @@ pub trait ConnectedNetwork: } /// Describes additional functionality needed by the test network implementation -pub trait TestableNetworkingImplementation { +pub trait TestableNetworkingImplementation { /// generates a network given an expected node count fn generator( expected_node_count: usize, @@ -367,12 +368,8 @@ pub trait TestableNetworkingImplementation { fn in_flight_message_count(&self) -> Option; } /// Describes additional functionality needed by the test communication channel -pub trait TestableChannelImplementation< - TYPES: NodeType, - M: NetworkMsg, - MEMBERSHIP: Membership, - NETWORK, ->: CommunicationChannel +pub trait TestableChannelImplementation: + CommunicationChannel { /// generates the `CommunicationChannel` given it's associated network type fn generate_network() -> Box) -> Self + 'static>; diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 0f3d8e1e8e..6c26af0e37 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -144,6 +144,9 @@ pub trait ExchangesType: Send + Sync { /// Get the timeout exchange fn timeout_exchange(&self) -> &Self::TimeoutExchange; + // type QuorumNetwork: CommunicationChannel; + // type CommitteeNetwork: CommunicationChannel; + /// Protocol for exchanging quorum proposals and votes. type QuorumExchange: QuorumExchangeType + Clone + Debug; @@ -483,26 +486,11 @@ pub trait TestableNodeImplementation: NodeImplementation #[async_trait] impl> TestableNodeImplementation for I where - CommitteeNetwork: TestableNetworkingImplementation>, - QuorumNetwork: TestableNetworkingImplementation>, - QuorumCommChannel: TestableChannelImplementation< - TYPES, - Message, - QuorumMembership, - QuorumNetwork, - >, - CommitteeCommChannel: TestableChannelImplementation< - TYPES, - Message, - CommitteeMembership, - QuorumNetwork, - >, - ViewSyncCommChannel: TestableChannelImplementation< - TYPES, - Message, - ViewSyncMembership, - QuorumNetwork, - >, + CommitteeNetwork: TestableNetworkingImplementation, + QuorumNetwork: TestableNetworkingImplementation, + QuorumCommChannel: TestableChannelImplementation>, + CommitteeCommChannel: TestableChannelImplementation>, + ViewSyncCommChannel: TestableChannelImplementation>, TYPES::StateType: TestableState, TYPES::BlockPayload: TestableBlock, I::Storage: TestableStorage, @@ -576,25 +564,16 @@ pub type ViewSyncMembership = as ConsensusExchange>>::Membership; /// Type for the underlying quorum `ConnectedNetwork` that will be shared (for now) b/t Communication Channels -pub type QuorumNetwork = as CommunicationChannel< - TYPES, - Message, - QuorumMembership, ->>::NETWORK; +pub type QuorumNetwork = + as CommunicationChannel>::NETWORK; /// Type for the underlying committee `ConnectedNetwork` that will be shared (for now) b/t Communication Channels -pub type CommitteeNetwork = as CommunicationChannel< - TYPES, - Message, - CommitteeMembership, ->>::NETWORK; +pub type CommitteeNetwork = + as CommunicationChannel>::NETWORK; /// Type for the underlying view sync `ConnectedNetwork` that will be shared (for now) b/t Communication Channels -pub type ViewSyncNetwork = as CommunicationChannel< - TYPES, - Message, - ViewSyncMembership, ->>::NETWORK; +pub type ViewSyncNetwork = + as CommunicationChannel>::NETWORK; /// Trait with all the type definitions that are used in the current hotshot setup. pub trait NodeType: From 78bd41728b90d6f1c9a9b5473434d2c74a251158 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 15:01:46 -0500 Subject: [PATCH 0406/1393] add newtork types to NodeImplementation trait --- hotshot/examples/libp2p/types.rs | 2 ++ hotshot/examples/web-server-da/types.rs | 2 ++ hotshot/src/lib.rs | 2 ++ testing/src/node_types.rs | 8 ++++++++ testing/tests/memory_network.rs | 2 ++ types/src/traits/node_implementation.rs | 5 +++++ 6 files changed, 21 insertions(+) diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index e70b0dcf60..c583796f47 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -23,6 +23,8 @@ pub type ViewSyncNetwork = Libp2pCommChannel; impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; + type QuorumNetwork = QuorumNetwork; + type CommitteeNetwork = DANetwork; type Exchanges = Exchanges< DemoTypes, Message, diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index 916d9cb0b6..af5735b5ec 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -23,6 +23,8 @@ pub type ViewSyncNetwork = WebCommChannel; impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; + type CommitteeNetwork = DANetwork; + type QuorumNetwork = QuorumNetwork; type Exchanges = Exchanges< DemoTypes, Message, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index f4fe0d1b95..6008d51948 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -120,6 +120,8 @@ pub struct SystemContextInner> { /// This `HotShot` instance's way to interact with the nodes needed to form a quorum and/or DA certificate. pub exchanges: Arc, + // pub quorum_network: Arc; + // pub committee_network: Arc; /// Sender for [`Event`]s event_sender: RwLock>>>, diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 830a01e771..a78d4d23f3 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -109,6 +109,8 @@ pub type SequencingLibp2pExchange = Exchanges< impl NodeImplementation for Libp2pImpl { type Storage = MemoryStorage; + type QuorumNetwork = StaticLibp2pQuorumComm; + type CommitteeNetwork = StaticLibp2pDAComm; type Exchanges = SequencingLibp2pExchange; fn new_channel_maps( @@ -284,6 +286,8 @@ impl TestableExchange> for SequencingMemoryExchang impl NodeImplementation for MemoryImpl { type Storage = MemoryStorage; + type QuorumNetwork = StaticMemoryQuorumComm; + type CommitteeNetwork = StaticMemoryDAComm; type Exchanges = SequencingMemoryExchange; fn new_channel_maps( @@ -392,6 +396,8 @@ impl TestableExchange> for SequencingWebExchanges impl NodeImplementation for WebImpl { type Storage = MemoryStorage; + type QuorumNetwork = StaticWebQuorumComm; + type CommitteeNetwork = StaticWebDAComm; type Exchanges = SequencingWebExchanges; fn new_channel_maps( @@ -415,6 +421,8 @@ pub type CombinedExchange = Exchanges< impl NodeImplementation for CombinedImpl { type Storage = MemoryStorage; + type QuorumNetwork = StaticCombinedQuorumComm; + type CommitteeNetwork = StaticCombinedDAComm; type Exchanges = CombinedExchange; fn new_channel_maps( diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 66a0a0890b..151e912647 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -66,6 +66,8 @@ pub type VIDNetwork = MemoryCommChannel; impl NodeImplementation for TestImpl { type Storage = MemoryStorage; + type QuorumNetwork = QuorumNetwork; + type CommitteeNetwork = DANetwork; type Exchanges = Exchanges< Test, Message, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 6c26af0e37..98b5bf15a0 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -121,6 +121,11 @@ pub trait NodeImplementation: /// Implements either `ValidatingExchangesType` or `ExchangesType`. type Exchanges: ExchangesType>; + /// Network for all nodes + type QuorumNetwork: CommunicationChannel; + /// Network for those in the DA committee + type CommitteeNetwork: CommunicationChannel; + /// Create channels for sending/recv-ing proposals and votes for quorum and committee /// exchanges, the latter of which is only applicable for sequencing consensus. fn new_channel_maps( From a4ebc7653e0644fe9d2817dcc766b1227359400c Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 15:52:55 -0500 Subject: [PATCH 0407/1393] Pass networks to SystemContext --- hotshot/examples/infra/mod.rs | 42 +++++++++++++++-------------------- hotshot/src/lib.rs | 25 +++++++++++++++++---- testing/src/task_helpers.rs | 9 +++++++- testing/src/test_runner.rs | 32 +++++++++++++++++++++++--- 4 files changed, 76 insertions(+), 32 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 1211f1c643..83f56934bf 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -12,7 +12,7 @@ use hotshot::{ NodeImplementation, }, types::{SignatureKey, SystemContextHandle}, - HotShotType, SystemContext, + HotShotType, Networks, SystemContext, }; use hotshot_orchestrator::{ self, @@ -47,32 +47,13 @@ use libp2p_networking::{ }; use rand::rngs::StdRng; use rand::SeedableRng; +use std::marker::PhantomData; use std::{collections::BTreeSet, sync::Arc}; use std::{num::NonZeroUsize, str::FromStr}; -// use libp2p::{ -// identity::{ -// ed25519::{Keypair as EdKeypair, SecretKey}, -// Keypair, -// }, -// multiaddr::{self, Protocol}, -// Multiaddr, -// }; + use libp2p_identity::PeerId; -// use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}; use std::{fmt::Debug, net::Ipv4Addr}; -use std::{ - //collections::{BTreeSet, VecDeque}, - fs, - mem, - net::IpAddr, - //num::NonZeroUsize, - //str::FromStr, - //sync::Arc, - //time::{Duration, Instant}, - time::Instant, -}; -//use surf_disco::error::ClientError; -//use surf_disco::Client; +use std::{fs, mem, net::IpAddr, time::Instant}; use tracing::{debug, error, info, warn}; #[derive(Parser, Debug, Clone)] @@ -162,6 +143,8 @@ pub trait RunDA< VIDNETWORK: CommunicationChannel + Debug, NODE: NodeImplementation< TYPES, + QuorumNetwork = QUORUMNETWORK, + CommitteeNetwork = DANETWORK, Exchanges = Exchanges< TYPES, Message, @@ -220,7 +203,11 @@ pub trait RunDA< >>::Membership::default_election_config( config.config.da_committee_size.try_into().unwrap(), ); - + let networks_bundle = Networks { + quorum_netowrk: quorum_network.clone(), + da_network: da_network.clone(), + _pd: PhantomData, + }; let exchanges = NODE::Exchanges::create( known_nodes_with_stake.clone(), (quorum_election_config, committee_election_config), @@ -242,6 +229,7 @@ pub trait RunDA< config.config, MemoryStorage::empty(), exchanges, + networks_bundle, initializer, ConsensusMetricsValue::new(), ) @@ -387,6 +375,8 @@ impl< >, NODE: NodeImplementation< TYPES, + QuorumNetwork = WebCommChannel, + CommitteeNetwork = WebCommChannel, Exchanges = Exchanges< TYPES, Message, @@ -513,6 +503,8 @@ impl< >, NODE: NodeImplementation< TYPES, + QuorumNetwork = Libp2pCommChannel, + CommitteeNetwork = Libp2pCommChannel, Exchanges = Exchanges< TYPES, Message, @@ -724,6 +716,8 @@ pub async fn main_entry_point< VIDNETWORK: CommunicationChannel + Debug, NODE: NodeImplementation< TYPES, + QuorumNetwork = QUORUMNETWORK, + CommitteeNetwork = DANETWORK, Exchanges = Exchanges< TYPES, Message, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 6008d51948..6df45e7edb 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -81,6 +81,7 @@ use hotshot_types::{ use snafu::ResultExt; use std::{ collections::{BTreeMap, HashMap}, + marker::PhantomData, num::NonZeroUsize, sync::Arc, time::Duration, @@ -100,6 +101,18 @@ pub const H_512: usize = 64; /// Length, in bytes, of a 256 bit hash pub const H_256: usize = 32; +/// Bundle of the networks used in consensus +pub struct Networks> { + /// Newtork for reaching all nodes + pub quorum_netowrk: I::QuorumNetwork, + + /// Network for reaching the DA committee + pub da_network: I::CommitteeNetwork, + + /// Phantom for TYPES and I + pub _pd: PhantomData<(TYPES, I)>, //TODO: Do we need seperate networks for Viewsync/VID? +} + /// Holds the state needed to participate in `HotShot` consensus pub struct SystemContextInner> { /// The public key of this node @@ -111,15 +124,15 @@ pub struct SystemContextInner> { /// Configuration items for this hotshot instance config: HotShotConfig, - /// Networking interface for this hotshot instance - // networking: I::Networking, - /// This `HotShot` instance's storage backend storage: I::Storage, /// This `HotShot` instance's way to interact with the nodes needed to form a quorum and/or DA certificate. pub exchanges: Arc, + /// Networks used by the instance of hotshot + pub networks: Arc>, + // pub quorum_network: Arc; // pub committee_network: Arc; /// Sender for [`Event`]s @@ -158,7 +171,7 @@ impl> SystemContext { /// Creates a new hotshot with the given configuration options and sets it up with the given /// genesis block #[allow(clippy::too_many_arguments)] - #[instrument(skip(private_key, storage, exchanges, initializer, metrics))] + #[instrument(skip(private_key, storage, exchanges, networks, initializer, metrics))] pub async fn new( public_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -166,6 +179,7 @@ impl> SystemContext { config: HotShotConfig, storage: I::Storage, exchanges: I::Exchanges, + networks: Networks, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, ) -> Result> { @@ -223,6 +237,7 @@ impl> SystemContext { config, storage, exchanges: Arc::new(exchanges), + networks: Arc::new(networks), event_sender: RwLock::default(), _metrics: consensus_metrics.clone(), internal_event_stream: ChannelStream::new(), @@ -361,6 +376,7 @@ impl> SystemContext { config: HotShotConfig, storage: I::Storage, exchanges: I::Exchanges, + networks: Networks, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, ) -> Result< @@ -381,6 +397,7 @@ impl> SystemContext { config, storage, exchanges, + networks, initializer, metrics, ) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 810544f7e5..3aca7eeaf4 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -8,7 +8,7 @@ use commit::Committable; use hotshot::{ traits::{NodeImplementation, TestableNodeImplementation}, types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, - HotShotConsensusApi, HotShotInitializer, SystemContext, + HotShotConsensusApi, HotShotInitializer, Networks, SystemContext, }; use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::HotShotEvent; @@ -65,6 +65,11 @@ pub async fn build_system_handle( Message, >>::Membership::default_election_config(config.total_nodes.get() as u64) }); + let networks_bundle = Networks { + quorum_netowrk: networks.0.clone(), + da_network: networks.1.clone(), + _pd: PhantomData, + }; let exchanges = >::Exchanges::create( known_nodes_with_stake.clone(), (quorum_election_config, committee_election_config), @@ -73,6 +78,7 @@ pub async fn build_system_handle( public_key.get_stake_table_entry(1u64), private_key.clone(), ); + SystemContext::init( public_key, private_key, @@ -80,6 +86,7 @@ pub async fn build_system_handle( config, storage, exchanges, + networks_bundle, initializer, ConsensusMetricsValue::new(), ) diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 9faec4aa86..d48ecc9305 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -7,7 +7,7 @@ use crate::{ spinning_task::UpDown, test_launcher::{Networks, TestLauncher}, }; -use hotshot::types::SystemContextHandle; +use hotshot::{traits::NodeImplementation, types::SystemContextHandle}; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, HotShotType, SystemContext}; use hotshot_task::{ @@ -19,12 +19,17 @@ use hotshot_types::{ traits::{ election::{ConsensusExchange, Membership}, network::CommunicationChannel, - node_implementation::{ExchangesType, NodeType, QuorumCommChannel, QuorumEx}, + node_implementation::{ + CommitteeCommChannel, ExchangesType, NodeType, QuorumCommChannel, QuorumEx, + }, signature_key::SignatureKey, }, HotShotConfig, ValidatorConfig, }; -use std::collections::{HashMap, HashSet}; +use std::{ + collections::{HashMap, HashSet}, + marker::PhantomData, +}; #[allow(deprecated)] use tracing::info; @@ -52,6 +57,11 @@ impl> TestRunner where SystemContext: HotShotType, QuorumCommChannel: CommunicationChannel, + I: NodeImplementation< + TYPES, + QuorumNetwork = QuorumCommChannel, + CommitteeNetwork = CommitteeCommChannel, + >, { /// excecute test pub async fn run_test(mut self) @@ -187,6 +197,11 @@ where Message, ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), >, + I: NodeImplementation< + TYPES, + QuorumNetwork = QuorumCommChannel, + CommitteeNetwork = CommitteeCommChannel, + >, { let mut results = vec![]; for i in 0..total { @@ -232,6 +247,11 @@ where Message, ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), >, + I: NodeImplementation< + TYPES, + QuorumNetwork = QuorumCommChannel, + CommitteeNetwork = CommitteeCommChannel, + >, { let node_id = self.next_node_id; self.next_node_id += 1; @@ -247,6 +267,11 @@ where >>::Membership::default_election_config(config.total_nodes.get() as u64) }); let committee_election_config = I::committee_election_config_generator(); + let network_bundle = hotshot::Networks { + quorum_netowrk: networks.0.clone(), + da_network: networks.1.clone(), + _pd: PhantomData, + }; let exchanges = I::Exchanges::create( known_nodes_with_stake.clone(), ( @@ -265,6 +290,7 @@ where config, storage, exchanges, + network_bundle, initializer, ConsensusMetricsValue::new(), ) From 44b126ecee4aa5dc171f496798259d4202cb099f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 16:10:16 -0500 Subject: [PATCH 0408/1393] add memberships to SystemContext --- hotshot/src/lib.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 6df45e7edb..0203996e4f 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -113,6 +113,18 @@ pub struct Networks> { pub _pd: PhantomData<(TYPES, I)>, //TODO: Do we need seperate networks for Viewsync/VID? } +/// Bundle of all the memberships a consensus instance uses +pub struct Memberships { + /// Quorum Membership + pub quorum_membership: TYPES::Membership, + /// DA + pub da_membership: TYPES::Membership, + /// VID + pub vid_membership: TYPES::Membership, + /// View Sync + pub view_sync_membership: TYPES::Membership, +} + /// Holds the state needed to participate in `HotShot` consensus pub struct SystemContextInner> { /// The public key of this node @@ -133,6 +145,9 @@ pub struct SystemContextInner> { /// Networks used by the instance of hotshot pub networks: Arc>, + /// Memberships used by consensus + pub memberships: Arc>, + // pub quorum_network: Arc; // pub committee_network: Arc; /// Sender for [`Event`]s @@ -228,6 +243,13 @@ impl> SystemContext { }; let consensus = Arc::new(RwLock::new(consensus)); + let memberships = Memberships { + quorum_membership: exchanges.quorum_exchange().membership().clone(), + da_membership: exchanges.committee_exchange().membership().clone(), + vid_membership: exchanges.vid_exchange().membership().clone(), + view_sync_membership: exchanges.view_sync_exchange().membership().clone(), + }; + let inner: Arc> = Arc::new(SystemContextInner { id: nonce, channel_maps: I::new_channel_maps(start_view), @@ -238,6 +260,7 @@ impl> SystemContext { storage, exchanges: Arc::new(exchanges), networks: Arc::new(networks), + memberships: Arc::new(memberships), event_sender: RwLock::default(), _metrics: consensus_metrics.clone(), internal_event_stream: ChannelStream::new(), From 5be629ed37698ce8495e4df343103b7a64bfa7c0 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 17:01:07 -0500 Subject: [PATCH 0409/1393] consensus task doesn't have exchanges --- hotshot/src/tasks/mod.rs | 19 +++-- task-impls/src/consensus.rs | 158 +++++++++++++++++++----------------- 2 files changed, 95 insertions(+), 82 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index e3ceda2906..b12f1066b1 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -238,10 +238,10 @@ where timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), payload_commitment: Some(VIDBlockPayload::genesis().commit()), - quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), - timeout_exchange: c_api.inner.exchanges.timeout_exchange().clone().into(), + // quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), + // timeout_exchange: c_api.inner.exchanges.timeout_exchange().clone().into(), api: c_api.clone(), - committee_exchange: c_api.inner.exchanges.committee_exchange().clone().into(), + // committee_exchange: c_api.inner.exchanges.committee_exchange().clone().into(), _pd: PhantomData, vote_collector: None, timeout_task: async_spawn(async move {}), @@ -251,15 +251,20 @@ where vid_certs: HashMap::new(), current_proposal: None, id: handle.hotshot.inner.id, + public_key: todo!(), + private_key: todo!(), + quorum_network: todo!(), + committee_network: todo!(), + timeout_membership: todo!(), + quorum_membership: todo!(), + committee_membership: todo!(), }; consensus_state - .quorum_exchange - .network() + .quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) .await; consensus_state - .quorum_exchange - .network() + .quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForProposal(1)) .await; let filter = FilterEvent(Arc::new(consensus_event_filter)); diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 20c41fcf81..88afb8c724 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -26,7 +26,7 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, consensus_api::ConsensusApi, - election::{ConsensusExchange, Membership, QuorumExchangeType}, + election::{ConsensusExchange, Membership}, network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{CommitteeEx, NodeImplementation, NodeType, QuorumEx, TimeoutEx}, signature_key::SignatureKey, @@ -63,6 +63,10 @@ pub struct ConsensusTaskState< CommitteeEx: ConsensusExchange>, TimeoutEx: ConsensusExchange>, { + /// Our public key + pub public_key: TYPES::SignatureKey, + /// Our Private Key + pub private_key: ::PrivateKey, /// The global task registry pub registry: GlobalRegistry, /// Reference to consensus. The replica will require a write lock on this. @@ -75,18 +79,24 @@ pub struct ConsensusTaskState< /// The commitment to the current block payload submitted to DA pub payload_commitment: Option>, - /// the quorum exchange - pub quorum_exchange: Arc>, + /// Network for all nodes + pub quorum_network: Arc, - /// The timeout exchange - pub timeout_exchange: Arc>, + /// Network for DA committee + pub committee_network: Arc, + + /// Membership for Timeout votes/certs + pub timeout_membership: Arc, + + /// Membership for Quorum Certs/votes + pub quorum_membership: Arc, + + /// Membership for DA committee Votes/certs + pub committee_membership: Arc, /// Consensus api pub api: A, - /// the committee exchange - pub committee_exchange: Arc>, - /// needed to typecheck pub _pd: PhantomData, @@ -127,10 +137,12 @@ where QuorumEx: ConsensusExchange>, TimeoutEx: ConsensusExchange>, { - /// the quorum exchange - pub quorum_exchange: Arc>, - /// the timeout exchange - pub timeout_exchange: Arc>, + /// Network for all nodes + pub quorum_network: Arc, + /// Membership for Timeout votes/certs + pub timeout_membership: Arc, + /// Membership for Quorum Certs/votes + pub quorum_membership: Arc, #[allow(clippy::type_complexity)] /// Accumulator for votes @@ -191,7 +203,7 @@ where let accumulator = state.accumulator.left().unwrap(); - match accumulator.accumulate(&vote, state.quorum_exchange.membership()) { + match accumulator.accumulate(&vote, &state.quorum_membership) { Either::Left(acc) => { state.accumulator = Either::Left(acc); return (None, state); @@ -206,8 +218,7 @@ where // No longer need to poll for votes state - .quorum_exchange - .network() + .quorum_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( *qc.view_number, )) @@ -237,7 +248,7 @@ where let accumulator = state.timeout_accumulator.left().unwrap(); - match accumulator.accumulate(&vote, state.quorum_exchange.membership()) { + match accumulator.accumulate(&vote, &state.timeout_membership) { Either::Left(acc) => { state.timeout_accumulator = Either::Left(acc); return (None, state); @@ -252,8 +263,7 @@ where // No longer need to poll for votes state - .quorum_exchange - .network() + .quorum_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( *qc.view_number, )) @@ -310,11 +320,7 @@ where #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] async fn vote_if_able(&self) -> bool { - if !self - .quorum_exchange - .membership() - .has_stake(self.quorum_exchange.public_key()) - { + if !self.quorum_membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", self.cur_view @@ -358,15 +364,15 @@ where block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), + proposer_id: self.quorum_membership.get_leader(view).to_bytes(), }; let vote = QuorumVote::::create_signed_vote( QuorumData { leaf_commit: leaf.commit(), }, view, - self.quorum_exchange.public_key(), - self.quorum_exchange.private_key(), + &self.public_key, + &self.private_key, ); let message = GeneralConsensusMessage::::Vote(vote); @@ -418,11 +424,11 @@ where block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: self.quorum_exchange.get_leader(view).to_bytes(), + proposer_id: self.quorum_membership.get_leader(view).to_bytes(), }; // Validate the DAC. - let message = if cert.is_valid_cert(self.committee_exchange.membership()) { + let message = if cert.is_valid_cert(self.committee_membership.as_ref()) { // Validate the block payload commitment for non-genesis DAC. if !cert.is_genesis && cert.get_data().payload_commit @@ -436,8 +442,8 @@ where leaf_commit: leaf.commit(), }, view, - self.quorum_exchange.public_key(), - self.quorum_exchange.private_key(), + &self.public_key, + &self.private_key, ); GeneralConsensusMessage::::Vote(vote) } else { @@ -491,31 +497,27 @@ where // Poll the future leader for lookahead let lookahead_view = new_view + LOOK_AHEAD; - if !self.quorum_exchange.is_leader(lookahead_view) { - self.quorum_exchange - .network() + if self.quorum_membership.get_leader(lookahead_view) != self.public_key { + self.quorum_network .inject_consensus_info(ConsensusIntentEvent::PollFutureLeader( *lookahead_view, - self.quorum_exchange.get_leader(lookahead_view), + self.quorum_membership.get_leader(lookahead_view), )) .await; } // Start polling for proposals for the new view - self.quorum_exchange - .network() + self.quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForProposal(*self.cur_view + 1)) .await; - self.quorum_exchange - .network() + self.quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForDAC(*self.cur_view + 1)) .await; - if self.quorum_exchange.is_leader(self.cur_view + 1) { + if self.quorum_membership.get_leader(self.cur_view + 1) == self.public_key { debug!("Polling for quorum votes for view {}", *self.cur_view); - self.quorum_exchange - .network() + self.quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForVotes(*self.cur_view)) .await; } @@ -564,8 +566,7 @@ where ); // stop polling for the received proposal - self.quorum_exchange - .network() + self.quorum_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal( *proposal.data.view_number, )) @@ -577,7 +578,7 @@ where return; } - let view_leader_key = self.quorum_exchange.get_leader(view); + let view_leader_key = self.quorum_membership.get_leader(view); if view_leader_key != sender { error!("Leader key does not match key in proposal"); return; @@ -597,7 +598,7 @@ where return; } - if !timeout_cert.is_valid_cert(self.timeout_exchange.membership()) { + if !timeout_cert.is_valid_cert(self.timeout_membership.as_ref()) { warn!("Timeout certificate for view {} was invalid", *view); return; } @@ -605,7 +606,7 @@ where let justify_qc = proposal.data.justify_qc.clone(); - if !justify_qc.is_valid_cert(self.quorum_exchange.membership()) { + if !justify_qc.is_valid_cert(self.quorum_membership.as_ref()) { error!("Invalid justify_qc in proposal for view {}", *view); let consensus = self.consensus.write().await; consensus.metrics.invalid_qc.update(1); @@ -856,7 +857,7 @@ where let new_view = self.current_proposal.clone().unwrap().view_number + 1; // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = self.quorum_exchange.is_leader(new_view) + let should_propose = self.quorum_membership.get_leader(new_view) == self.public_key && consensus.high_qc.view_number == self.current_proposal.clone().unwrap().view_number; // todo get rid of this clone @@ -884,11 +885,17 @@ where HotShotEvent::QuorumVoteRecv(vote) => { debug!("Received quroum vote: {:?}", vote.get_view_number()); - if !self.quorum_exchange.is_leader(vote.get_view_number() + 1) { + if self + .quorum_membership + .get_leader(vote.get_view_number() + 1) + != self.public_key + { error!( "We are not the leader for view {} are we the leader for view + 1? {}", *vote.get_view_number() + 1, - self.quorum_exchange.is_leader(vote.get_view_number() + 2) + self.quorum_membership + .get_leader(vote.get_view_number() + 2) + == self.public_key ); return; } @@ -912,25 +919,26 @@ where let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), - signers: bitvec![0; self.quorum_exchange.total_nodes()], + signers: bitvec![0; self.quorum_membership.total_nodes()], phantom: PhantomData, }; let accumulator = - new_accumulator.accumulate(&vote, self.quorum_exchange.membership()); + new_accumulator.accumulate(&vote, self.quorum_membership.as_ref()); // TODO Create default functions for accumulators // https://github.com/EspressoSystems/HotShot/issues/1797 let timeout_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), - signers: bitvec![0; self.timeout_exchange.total_nodes()], + signers: bitvec![0; self.timeout_membership.total_nodes()], phantom: PhantomData, }; let state = VoteCollectionTaskState { - quorum_exchange: self.quorum_exchange.clone(), - timeout_exchange: self.timeout_exchange.clone(), + quorum_network: self.quorum_network.clone(), + quorum_membership: self.quorum_membership.clone(), + timeout_membership: self.timeout_membership.clone(), accumulator, timeout_accumulator: either::Left(timeout_accumulator), cur_view: vote.get_view_number(), @@ -969,11 +977,17 @@ where } } HotShotEvent::TimeoutVoteRecv(vote) => { - if !self.timeout_exchange.is_leader(vote.get_view_number() + 1) { + if self + .timeout_membership + .get_leader(vote.get_view_number() + 1) + != self.public_key + { error!( "We are not the leader for view {} are we the leader for view + 1? {}", *vote.get_view_number() + 1, - self.timeout_exchange.is_leader(vote.get_view_number() + 2) + self.timeout_membership + .get_leader(vote.get_view_number() + 2) + == self.public_key ); return; } @@ -997,25 +1011,26 @@ where let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), - signers: bitvec![0; self.timeout_exchange.total_nodes()], + signers: bitvec![0; self.timeout_membership.total_nodes()], phantom: PhantomData, }; let timeout_accumulator = - new_accumulator.accumulate(&vote, self.quorum_exchange.membership()); + new_accumulator.accumulate(&vote, self.quorum_membership.as_ref()); let quorum_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), - signers: bitvec![0; self.quorum_exchange.total_nodes()], + signers: bitvec![0; self.quorum_membership.total_nodes()], phantom: PhantomData, }; // self.timeout_accumulator = accumulator; let state = VoteCollectionTaskState { - quorum_exchange: self.quorum_exchange.clone(), - timeout_exchange: self.timeout_exchange.clone(), + quorum_network: self.quorum_network.clone(), + quorum_membership: self.quorum_membership.clone(), + timeout_membership: self.timeout_membership.clone(), accumulator: either::Left(quorum_accumulator), timeout_accumulator, cur_view: vote.get_view_number(), @@ -1096,8 +1111,7 @@ where debug!("DAC Recved for view ! {}", *cert.view_number); let view = cert.view_number; - self.quorum_exchange - .network() + self.quorum_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForDAC(*view)) .await; @@ -1142,11 +1156,7 @@ where if self.cur_view >= view { return; } - if !self - .timeout_exchange - .membership() - .has_stake(self.timeout_exchange.public_key()) - { + if !self.timeout_membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", self.cur_view @@ -1157,8 +1167,8 @@ where let vote = TimeoutVote::create_signed_vote( TimeoutData { view }, view, - self.timeout_exchange.public_key(), - self.timeout_exchange.private_key(), + &self.public_key, + &self.private_key, ); self.event_stream @@ -1186,7 +1196,7 @@ where view: TYPES::Time, timeout_certificate: Option>, ) -> bool { - if !self.quorum_exchange.is_leader(view) { + if self.quorum_membership.get_leader(view) != self.public_key { error!( "Somehow we formed a QC but are not the leader for the next view {:?}", view @@ -1260,9 +1270,7 @@ where proposer_id: self.api.public_key().to_bytes(), }; - let signature = self - .quorum_exchange - .sign_validating_or_commitment_proposal::(&leaf.commit()); + let signature = TYPES::SignatureKey::sign(&self.private_key, leaf.commit().as_ref()); // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. let proposal = QuorumProposal { block_header: TYPES::BlockHeader::new(*payload_commitment, &parent_header), @@ -1284,7 +1292,7 @@ where self.event_stream .publish(HotShotEvent::QuorumProposalSend( message, - self.quorum_exchange.public_key().clone(), + self.public_key.clone(), )) .await; self.payload_commitment = None; From 2dca592163728d454dadaa00b5e95c862c90a43c Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 17:46:16 -0500 Subject: [PATCH 0410/1393] add consensus w/o exchagnes --- hotshot/examples/infra/mod.rs | 2 +- hotshot/src/lib.rs | 2 +- hotshot/src/tasks/mod.rs | 28 ++++++++++------------------ task-impls/src/consensus.rs | 30 +++--------------------------- testing/src/task_helpers.rs | 2 +- testing/src/test_runner.rs | 2 +- 6 files changed, 17 insertions(+), 49 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 83f56934bf..05f817d889 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -204,7 +204,7 @@ pub trait RunDA< config.config.da_committee_size.try_into().unwrap(), ); let networks_bundle = Networks { - quorum_netowrk: quorum_network.clone(), + quorum_network: quorum_network.clone(), da_network: da_network.clone(), _pd: PhantomData, }; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 0203996e4f..1ce315003e 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -104,7 +104,7 @@ pub const H_256: usize = 32; /// Bundle of the networks used in consensus pub struct Networks> { /// Newtork for reaching all nodes - pub quorum_netowrk: I::QuorumNetwork, + pub quorum_network: I::QuorumNetwork, /// Network for reaching the DA committee pub da_network: I::CommitteeNetwork, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b12f1066b1..b9edfe53a2 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -29,11 +29,11 @@ use hotshot_types::{ event::Event, message::{Message, Messages}, traits::{ + consensus_api::ConsensusSharedApi, election::{ConsensusExchange, ViewSyncExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{ - CommitteeEx, ExchangesType, NodeImplementation, NodeType, QuorumEx, TimeoutEx, VIDEx, - ViewSyncEx, + CommitteeEx, ExchangesType, NodeImplementation, NodeType, QuorumEx, VIDEx, ViewSyncEx, }, state::ConsensusTime, }, @@ -220,12 +220,7 @@ pub async fn add_consensus_task< event_stream: ChannelStream>, output_stream: ChannelStream>, handle: SystemContextHandle, -) -> TaskRunner -where - QuorumEx: ConsensusExchange, Membership = TYPES::Membership>, - CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, - TimeoutEx: ConsensusExchange, Membership = TYPES::Membership>, -{ +) -> TaskRunner { let consensus = handle.hotshot.get_consensus(); let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), @@ -238,10 +233,7 @@ where timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), payload_commitment: Some(VIDBlockPayload::genesis().commit()), - // quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), - // timeout_exchange: c_api.inner.exchanges.timeout_exchange().clone().into(), api: c_api.clone(), - // committee_exchange: c_api.inner.exchanges.committee_exchange().clone().into(), _pd: PhantomData, vote_collector: None, timeout_task: async_spawn(async move {}), @@ -251,13 +243,13 @@ where vid_certs: HashMap::new(), current_proposal: None, id: handle.hotshot.inner.id, - public_key: todo!(), - private_key: todo!(), - quorum_network: todo!(), - committee_network: todo!(), - timeout_membership: todo!(), - quorum_membership: todo!(), - committee_membership: todo!(), + public_key: c_api.public_key().clone(), + private_key: c_api.private_key().clone(), + quorum_network: c_api.inner.networks.quorum_network.clone().into(), + committee_network: c_api.inner.networks.da_network.clone().into(), + timeout_membership: c_api.inner.memberships.quorum_membership.clone().into(), + quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), + committee_membership: c_api.inner.memberships.da_membership.clone().into(), }; consensus_state .quorum_network diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 88afb8c724..78f3c85207 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -165,12 +165,7 @@ where pub id: u64, } -impl> TS for VoteCollectionTaskState -where - QuorumEx: ConsensusExchange>, - TimeoutEx: ConsensusExchange>, -{ -} +impl> TS for VoteCollectionTaskState {} #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "Quorum Vote Collection Task", level = "error")] @@ -180,11 +175,7 @@ async fn vote_handle>( ) -> ( std::option::Option, VoteCollectionTaskState, -) -where - QuorumEx: ConsensusExchange, Membership = TYPES::Membership>, - TimeoutEx: ConsensusExchange>, -{ +) { match event { HotShotEvent::QuorumVoteRecv(vote) => { // For the case where we receive votes after we've made a certificate @@ -288,11 +279,6 @@ impl< I: NodeImplementation, A: ConsensusApi + 'static, > ConsensusTaskState -where - TYPES::BlockHeader: BlockHeader, - QuorumEx: ConsensusExchange, Membership = TYPES::Membership>, - CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, - TimeoutEx: ConsensusExchange, Membership = TYPES::Membership>, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus genesis leaf", level = "error")] @@ -1308,10 +1294,6 @@ impl< I: NodeImplementation, A: ConsensusApi, > TS for ConsensusTaskState -where - QuorumEx: ConsensusExchange>, - CommitteeEx: ConsensusExchange>, - TimeoutEx: ConsensusExchange>, { } @@ -1342,13 +1324,7 @@ pub async fn sequencing_consensus_handle< ) -> ( std::option::Option, ConsensusTaskState, -) -where - TYPES::BlockHeader: BlockHeader, - QuorumEx: ConsensusExchange, Membership = TYPES::Membership>, - CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, - TimeoutEx: ConsensusExchange, Membership = TYPES::Membership>, -{ +) { if let HotShotEvent::Shutdown = event { (Some(HotShotTaskCompleted::ShutDown), state) } else { diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 3aca7eeaf4..cef5921227 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -66,7 +66,7 @@ pub async fn build_system_handle( >>::Membership::default_election_config(config.total_nodes.get() as u64) }); let networks_bundle = Networks { - quorum_netowrk: networks.0.clone(), + quorum_network: networks.0.clone(), da_network: networks.1.clone(), _pd: PhantomData, }; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index d48ecc9305..964cc0e5c1 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -268,7 +268,7 @@ where }); let committee_election_config = I::committee_election_config_generator(); let network_bundle = hotshot::Networks { - quorum_netowrk: networks.0.clone(), + quorum_network: networks.0.clone(), da_network: networks.1.clone(), _pd: PhantomData, }; From d6080bf2a6f97e8663794fef086facdaab60e0ef Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 21:48:58 -0500 Subject: [PATCH 0411/1393] remove exchange from DA task --- hotshot/src/lib.rs | 9 +--- hotshot/src/tasks/mod.rs | 6 ++- task-impls/src/da.rs | 99 +++++++++++++++++++++------------------- testing/tests/da_task.rs | 4 +- 4 files changed, 58 insertions(+), 60 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 1ce315003e..5777f200c2 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -742,13 +742,8 @@ where handle.clone(), ) .await; - let task_runner = add_da_task( - task_runner, - internal_event_stream.clone(), - committee_exchange.clone(), - handle.clone(), - ) - .await; + let task_runner = + add_da_task(task_runner, internal_event_stream.clone(), handle.clone()).await; let task_runner = add_vid_task( task_runner, internal_event_stream.clone(), diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b9edfe53a2..f72892f963 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -359,7 +359,6 @@ where pub async fn add_da_task>( task_runner: TaskRunner, event_stream: ChannelStream>, - committee_exchange: CommitteeEx, handle: SystemContextHandle, ) -> TaskRunner where @@ -374,10 +373,13 @@ where registry: registry.clone(), api: c_api.clone(), consensus: handle.hotshot.get_consensus(), + da_membership: c_api.inner.memberships.da_membership.clone().into(), + da_network: c_api.inner.networks.da_network.clone().into(), cur_view: TYPES::Time::new(0), - committee_exchange: committee_exchange.into(), vote_collector: None, event_stream: event_stream.clone(), + public_key: c_api.public_key().clone(), + private_key: c_api.private_key().clone(), id: handle.hotshot.inner.id, }; let da_event_handler = HandleEvent(Arc::new( diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 20141cf776..68e967c1d5 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -20,7 +20,7 @@ use hotshot_types::{ simple_vote::{DAData, DAVote}, traits::{ consensus_api::ConsensusApi, - election::{CommitteeExchangeType, ConsensusExchange, Membership}, + election::{ConsensusExchange, Membership}, network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{CommitteeEx, NodeImplementation, NodeType}, signature_key::SignatureKey, @@ -59,8 +59,11 @@ pub struct DATaskState< /// Reference to consensus. Leader will require a read lock on this. pub consensus: Arc>>, - /// the committee exchange - pub committee_exchange: Arc>, + /// Membership for the DA committee + pub da_membership: Arc, + + /// Network for DA + pub da_network: Arc, /// The view and ID of the current vote collection task, if there is one. pub vote_collector: Option<(TYPES::Time, usize, usize)>, @@ -68,6 +71,12 @@ pub struct DATaskState< /// Global events stream to publish events pub event_stream: ChannelStream>, + /// This Nodes public key + pub public_key: TYPES::SignatureKey, + + /// This Nodes private key + pub private_key: ::PrivateKey, + /// This state's ID pub id: u64, } @@ -77,9 +86,12 @@ pub struct DAVoteCollectionTaskState: ConsensusExchange>, { - /// the committee exchange - pub committee_exchange: Arc>, - #[allow(clippy::type_complexity)] + /// Membership for the DA committee + pub da_membership: Arc, + + /// Network for DA + pub da_network: Arc, + // #[allow(clippy::type_complexity)] /// Accumulates DA votes pub accumulator: Either, DACertificate>, DACertificate>, @@ -87,6 +99,11 @@ where pub cur_view: TYPES::Time, /// event stream for channel events pub event_stream: ChannelStream>, + /// This Nodes public key + pub public_key: TYPES::SignatureKey, + + /// This Nodes private key + pub private_key: ::PrivateKey, /// the id of this task state pub id: u64, } @@ -120,7 +137,7 @@ where let accumulator = state.accumulator.left().unwrap(); - match accumulator.accumulate(&vote, state.committee_exchange.membership()) { + match accumulator.accumulate(&vote, state.da_membership.as_ref()) { Left(new_accumulator) => { state.accumulator = either::Left(new_accumulator); } @@ -129,16 +146,12 @@ where debug!("Sending DAC! {:?}", dac.view_number); state .event_stream - .publish(HotShotEvent::DACSend( - dac.clone(), - state.committee_exchange.public_key().clone(), - )) + .publish(HotShotEvent::DACSend(dac.clone(), state.public_key.clone())) .await; state.accumulator = Right(dac.clone()); state - .committee_exchange - .network() + .da_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( *dac.view_number, )) @@ -185,8 +198,7 @@ where // TODO ED Come back to this - we probably don't need this, but we should also never receive a DAC where this fails, investigate block ready so it doesn't make one for the genesis block // stop polling for the received proposal - self.committee_exchange - .network() + self.da_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal( *proposal.data.view_number, )) @@ -204,7 +216,7 @@ where let payload_commitment = proposal.data.block_payload.commit(); // ED Is this the right leader? - let view_leader_key = self.committee_exchange.get_leader(view); + let view_leader_key = self.da_membership.get_leader(view); if view_leader_key != sender { error!("DA proposal doesn't have expected leader key for view {} \n DA proposal is: {:?}", *view, proposal.data.clone()); return None; @@ -215,11 +227,7 @@ where return None; } - if !self - .committee_exchange - .membership() - .has_stake(self.committee_exchange.public_key()) - { + if !self.da_membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", self.cur_view @@ -232,8 +240,8 @@ where payload_commit: payload_commitment, }, view, - self.committee_exchange.public_key(), - self.committee_exchange.private_key(), + &self.public_key, + &self.private_key, ); // ED Don't think this is necessary? @@ -263,8 +271,8 @@ where debug!("DA vote recv, Main Task {:?}", vote.get_view_number(),); // Check if we are the leader and the vote is from the sender. let view = vote.get_view_number(); - if !self.committee_exchange.is_leader(view) { - error!("We are not the committee leader for view {} are we leader for next view? {}", *view, self.committee_exchange.is_leader(view + 1)); + if self.da_membership.get_leader(view) != self.public_key { + error!("We are not the committee leader for view {} are we leader for next view? {}", *view, self.da_membership.get_leader(view + 1) == self.public_key); return None; } @@ -287,20 +295,22 @@ where let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), - signers: bitvec![0; self.committee_exchange.total_nodes()], + signers: bitvec![0; self.da_membership.total_nodes()], phantom: PhantomData, }; let accumulator = - new_accumulator.accumulate(&vote, self.committee_exchange.membership()); + new_accumulator.accumulate(&vote, self.da_membership.as_ref()); let state = DAVoteCollectionTaskState { - committee_exchange: self.committee_exchange.clone(), - accumulator, cur_view: view, event_stream: self.event_stream.clone(), id: self.id, + da_membership: self.da_membership.clone(), + da_network: self.da_network.clone(), + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), }; let name = "DA Vote Collection"; let filter = FilterEvent(Arc::new(|event| { @@ -339,24 +349,21 @@ where // Inject view info into network let is_da = self - .committee_exchange - .membership() + .da_membership .get_committee(self.cur_view + 1) - .contains(self.committee_exchange.public_key()); + .contains(&self.public_key); if is_da { debug!("Polling for DA proposals for view {}", *self.cur_view + 1); - self.committee_exchange - .network() + self.da_network .inject_consensus_info(ConsensusIntentEvent::PollForProposal( *self.cur_view + 1, )) .await; } - if self.committee_exchange.is_leader(self.cur_view + 3) { + if self.da_membership.get_leader(self.cur_view + 3) == self.public_key { debug!("Polling for transactions for view {}", *self.cur_view + 3); - self.committee_exchange - .network() + self.da_network .inject_consensus_info(ConsensusIntentEvent::PollForTransactions( *self.cur_view + 3, )) @@ -364,30 +371,27 @@ where } // If we are not the next leader (DA leader for this view) immediately exit - if !self.committee_exchange.is_leader(self.cur_view + 1) { + if self.da_membership.get_leader(self.cur_view + 1) != self.public_key { // panic!("We are not the DA leader for view {}", *self.cur_view + 1); return None; } debug!("Polling for DA votes for view {}", *self.cur_view + 1); // Start polling for DA votes for the "next view" - self.committee_exchange - .network() + self.da_network .inject_consensus_info(ConsensusIntentEvent::PollForVotes(*self.cur_view + 1)) .await; return None; } HotShotEvent::BlockReady(block, view) => { - self.committee_exchange - .network() + self.da_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) .await; let payload_commitment = block.commit(); - let signature = self - .committee_exchange - .sign_da_proposal(&payload_commitment); + let signature = + TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()); let data: DAProposal = DAProposal { block_payload: block.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? @@ -407,14 +411,13 @@ where self.event_stream .publish(HotShotEvent::DAProposalSend( message.clone(), - self.committee_exchange.public_key().clone(), + self.public_key.clone(), )) .await; } HotShotEvent::Timeout(view) => { - self.committee_exchange - .network() + self.da_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) .await; } diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 7d3ac6734d..debfcb7d0e 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -96,9 +96,7 @@ async fn test_da_task() { output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::Shutdown, 1); - let build_fn = |task_runner, event_stream| { - add_da_task(task_runner, event_stream, committee_exchange, handle) - }; + let build_fn = |task_runner, event_stream| add_da_task(task_runner, event_stream, handle); run_harness(input, output, None, build_fn).await; } From 3713d9ea686233f792215e16e2d8cc1f423f8b4a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 22:01:55 -0500 Subject: [PATCH 0412/1393] remove exchagnes from transactions task --- hotshot/src/lib.rs | 9 ++------ hotshot/src/tasks/mod.rs | 6 ++++-- task-impls/src/da.rs | 29 ++++++------------------- task-impls/src/transactions.rs | 39 +++++++++++++++++----------------- 4 files changed, 33 insertions(+), 50 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 5777f200c2..a92d499e35 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -751,13 +751,8 @@ where handle.clone(), ) .await; - let task_runner = add_transaction_task( - task_runner, - internal_event_stream.clone(), - quorum_exchange, - handle.clone(), - ) - .await; + let task_runner = + add_transaction_task(task_runner, internal_event_stream.clone(), handle.clone()).await; let task_runner = add_view_sync_task(task_runner, internal_event_stream.clone(), handle.clone()).await; async_spawn(async move { diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index f72892f963..9217543fe7 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -421,7 +421,6 @@ pub async fn add_transaction_task< >( task_runner: TaskRunner, event_stream: ChannelStream>, - quorum_exchange: QuorumEx, handle: SystemContextHandle, ) -> TaskRunner where @@ -439,7 +438,10 @@ where transactions: Arc::default(), seen_transactions: HashSet::new(), cur_view: TYPES::Time::new(0), - quorum_exchange: quorum_exchange.into(), + network: c_api.inner.networks.quorum_network.clone().into(), + membership: c_api.inner.memberships.quorum_membership.clone().into(), + public_key: c_api.public_key().clone(), + private_key: c_api.private_key().clone(), event_stream: event_stream.clone(), id: handle.hotshot.inner.id, }; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 68e967c1d5..a3a69996cd 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -16,13 +16,13 @@ use hotshot_types::simple_certificate::DACertificate; use hotshot_types::{ consensus::{Consensus, View}, data::DAProposal, - message::{Message, Proposal}, + message::Proposal, simple_vote::{DAData, DAVote}, traits::{ consensus_api::ConsensusApi, - election::{ConsensusExchange, Membership}, + election::Membership, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{CommitteeEx, NodeImplementation, NodeType}, + node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, state::ConsensusTime, BlockPayload, @@ -45,9 +45,7 @@ pub struct DATaskState< TYPES: NodeType, I: NodeImplementation, A: ConsensusApi + 'static, -> where - CommitteeEx: ConsensusExchange>, -{ +> { /// The state's api pub api: A, /// Global registry task for the state @@ -82,10 +80,7 @@ pub struct DATaskState< } /// Struct to maintain DA Vote Collection task state -pub struct DAVoteCollectionTaskState> -where - CommitteeEx: ConsensusExchange>, -{ +pub struct DAVoteCollectionTaskState> { /// Membership for the DA committee pub da_membership: Arc, @@ -108,10 +103,7 @@ where pub id: u64, } -impl> TS for DAVoteCollectionTaskState where - CommitteeEx: ConsensusExchange> -{ -} +impl> TS for DAVoteCollectionTaskState {} #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "DA Vote Collection Task", level = "error")] async fn vote_handle>( @@ -120,10 +112,7 @@ async fn vote_handle>( ) -> ( std::option::Option, DAVoteCollectionTaskState, -) -where - CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, -{ +) { match event { HotShotEvent::DAVoteRecv(vote) => { debug!("DA vote recv, collection task {:?}", vote.get_view_number()); @@ -172,8 +161,6 @@ where impl, A: ConsensusApi + 'static> DATaskState -where - CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] @@ -450,8 +437,6 @@ where /// task state implementation for DA Task impl, A: ConsensusApi + 'static> TS for DATaskState -where - CommitteeEx: ConsensusExchange>, { } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 86cc1856f7..80f6404b10 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -16,11 +16,12 @@ use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, consensus::Consensus, data::{Leaf, VidDisperse, VidScheme, VidSchemeTrait}, - message::{Message, Proposal}, + message::Proposal, traits::{ consensus_api::ConsensusApi, - election::{ConsensusExchange, QuorumExchangeType}, - node_implementation::{NodeImplementation, NodeType, QuorumEx}, + election::Membership, + node_implementation::{NodeImplementation, NodeType}, + signature_key::SignatureKey, BlockPayload, }, }; @@ -46,9 +47,7 @@ pub struct TransactionTaskState< TYPES: NodeType, I: NodeImplementation, A: ConsensusApi + 'static, -> where - QuorumEx: ConsensusExchange>, -{ +> { /// The state's api pub api: A, /// Global registry task for the state @@ -66,12 +65,19 @@ pub struct TransactionTaskState< /// A list of transactions we've seen decided, but didn't receive pub seen_transactions: HashSet>, - /// the committee exchange - pub quorum_exchange: Arc>, + /// Network for all nodes + pub network: Arc, + + /// Membership for teh quorum + pub membership: Arc, /// Global events stream to publish events pub event_stream: ChannelStream>, + /// This Nodes Public Key + pub public_key: TYPES::SignatureKey, + /// Our Private Key + pub private_key: ::PrivateKey, /// This state's ID pub id: u64, } @@ -84,8 +90,6 @@ impl< I: NodeImplementation, A: ConsensusApi + 'static, > TransactionTaskState -where - QuorumEx: ConsensusExchange>, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] @@ -179,9 +183,7 @@ where } self.cur_view = view; - // If we are not the next leader (DA leader for this view) immediately exit - if !self.quorum_exchange.is_leader(self.cur_view + 1) { - // panic!("We are not the DA leader for view {}", *self.cur_view + 1); + if self.membership.get_leader(self.cur_view + 1) != self.public_key { return None; } @@ -264,10 +266,13 @@ where common: vid_disperse.common, }, // TODO (Keyao) This is also signed in DA task. - signature: self.quorum_exchange.sign_payload_commitment(block.commit()), + signature: TYPES::SignatureKey::sign( + &self.private_key, + block.commit().as_ref(), + ), _pd: PhantomData, }, - self.quorum_exchange.public_key().clone(), + self.public_key.clone(), )) .await; return None; @@ -286,8 +291,6 @@ where // whereas here it's just `TYPES: NodeType`. impl, A: ConsensusApi + 'static> TransactionTaskState -where - QuorumEx: ConsensusExchange>, { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] async fn wait_for_transactions( @@ -372,8 +375,6 @@ where /// task state implementation for Transactions Task impl, A: ConsensusApi + 'static> TS for TransactionTaskState -where - QuorumEx: ConsensusExchange>, { } From ab7e8f25ea38989c366eebb786a18ab5de9f6f29 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 22:18:37 -0500 Subject: [PATCH 0413/1393] remove exchanges from view sync --- hotshot/src/tasks/mod.rs | 7 +- task-impls/src/view_sync.rs | 202 +++++++++++++++++++----------------- 2 files changed, 112 insertions(+), 97 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 9217543fe7..9b85712fe7 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -33,7 +33,7 @@ use hotshot_types::{ election::{ConsensusExchange, ViewSyncExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{ - CommitteeEx, ExchangesType, NodeImplementation, NodeType, QuorumEx, VIDEx, ViewSyncEx, + CommitteeEx, NodeImplementation, NodeType, QuorumEx, VIDEx, ViewSyncEx, }, state::ConsensusTime, }, @@ -495,7 +495,10 @@ where event_stream: event_stream.clone(), current_view: TYPES::Time::new(0), next_view: TYPES::Time::new(0), - exchange: (*api.inner.exchanges.view_sync_exchange()).clone().into(), + network: api.inner.networks.quorum_network.clone().into(), + membership: api.inner.memberships.view_sync_membership.clone().into(), + public_key: api.public_key().clone(), + private_key: api.private_key().clone(), api, num_timeouts_tracked: 0, replica_task_map: HashMap::default(), diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 58119f6ce4..045b7c71d0 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -9,7 +9,6 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::simple_vote::ViewSyncFinalizeData; use hotshot_types::{ simple_certificate::{ ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, @@ -21,16 +20,17 @@ use hotshot_types::{ traits::network::ConsensusIntentEvent, vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; +use hotshot_types::{simple_vote::ViewSyncFinalizeData, traits::signature_key::SignatureKey}; use bitvec::prelude::*; use hotshot_task::global_registry::GlobalRegistry; use hotshot_types::{ - message::{GeneralConsensusMessage, Message}, + message::GeneralConsensusMessage, traits::{ consensus_api::ConsensusApi, - election::{ConsensusExchange, ViewSyncExchangeType}, + election::Membership, network::CommunicationChannel, - node_implementation::{NodeImplementation, NodeType, ViewSyncEx}, + node_implementation::{NodeImplementation, NodeType}, state::ConsensusTime, }, }; @@ -66,9 +66,7 @@ pub struct ViewSyncTaskState< TYPES: NodeType, I: NodeImplementation, A: ConsensusApi + 'static + std::clone::Clone, -> where - ViewSyncEx: ViewSyncExchangeType>, -{ +> { /// Registry to register sub tasks pub registry: GlobalRegistry, /// Event stream to publish events to @@ -77,8 +75,14 @@ pub struct ViewSyncTaskState< pub current_view: TYPES::Time, /// View HotShot wishes to be in pub next_view: TYPES::Time, - /// View sync exchange - pub exchange: Arc>, + /// Network for all nodes + pub network: Arc, + /// Membership for teh quorum + pub membership: Arc, + /// This Nodes Public Key + pub public_key: TYPES::SignatureKey, + /// Our Private Key + pub private_key: ::PrivateKey, /// HotShot consensus API pub api: A, /// Our node id; for logging @@ -105,8 +109,6 @@ impl< I: NodeImplementation, A: ConsensusApi + 'static + std::clone::Clone, > TS for ViewSyncTaskState -where - ViewSyncEx: ViewSyncExchangeType>, { } @@ -123,9 +125,7 @@ pub struct ViewSyncReplicaTaskState< TYPES: NodeType, I: NodeImplementation, A: ConsensusApi + 'static, -> where - ViewSyncEx: ViewSyncExchangeType>, -{ +> { /// Timeout for view sync rounds pub view_sync_timeout: Duration, /// Current round HotShot is in @@ -143,8 +143,14 @@ pub struct ViewSyncReplicaTaskState< /// Our node id; for logging pub id: u64, - /// View sync exchange - pub exchange: Arc>, + /// Network for all nodes + pub network: Arc, + /// Membership for teh quorum + pub membership: Arc, + /// This Nodes Public Key + pub public_key: TYPES::SignatureKey, + /// Our Private Key + pub private_key: ::PrivateKey, /// HotShot consensus API pub api: A, /// Event stream to publish events to @@ -153,9 +159,6 @@ pub struct ViewSyncReplicaTaskState< impl, A: ConsensusApi + 'static> TS for ViewSyncReplicaTaskState -where - ViewSyncEx: - ViewSyncExchangeType, Membership = TYPES::Membership>, { } @@ -176,8 +179,14 @@ pub struct ViewSyncRelayTaskState< > { /// Event stream to publish events to pub event_stream: ChannelStream>, - /// View sync exchange - pub exchange: Arc>, + /// Network for all nodes + pub network: Arc, + /// Membership for teh quorum + pub membership: Arc, + /// This Nodes Public Key + pub public_key: TYPES::SignatureKey, + /// Our Private Key + pub private_key: ::PrivateKey, /// Vote accumulator #[allow(clippy::type_complexity)] @@ -211,9 +220,6 @@ impl< I: NodeImplementation, A: ConsensusApi + 'static + std::clone::Clone, > ViewSyncTaskState -where - ViewSyncEx: - ViewSyncExchangeType, Membership = TYPES::Membership>, { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] @@ -250,7 +256,10 @@ where finalized: false, sent_view_change_event: false, phase: ViewSyncPhase::None, - exchange: self.exchange.clone(), + membership: self.membership.clone(), + network: self.network.clone(), + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), api: self.api.clone(), event_stream: self.event_stream.clone(), view_sync_timeout: self.view_sync_timeout, @@ -308,9 +317,10 @@ where } // We do not have a relay task already running, so start one - if !self - .exchange - .is_leader(vote.get_view_number() + vote.get_data().relay) + if self + .membership + .get_leader(vote.get_view_number() + vote.get_data().relay) + != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); @@ -320,7 +330,7 @@ where let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), - signers: bitvec![0; self.exchange.total_nodes()], + signers: bitvec![0; self.membership.total_nodes()], phantom: PhantomData, }; @@ -331,7 +341,10 @@ where ViewSyncPreCommitCertificate2, > { event_stream: self.event_stream.clone(), - exchange: self.exchange.clone(), + membership: self.membership.clone(), + network: self.network.clone(), + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), accumulator: either::Left(new_accumulator), id: self.id, }; @@ -394,9 +407,10 @@ where } // We do not have a relay task already running, so start one - if !self - .exchange - .is_leader(vote.get_view_number() + vote.get_data().relay) + if self + .membership + .get_leader(vote.get_view_number() + vote.get_data().relay) + != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); @@ -406,7 +420,7 @@ where let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), - signers: bitvec![0; self.exchange.total_nodes()], + signers: bitvec![0; self.membership.total_nodes()], phantom: PhantomData, }; @@ -417,7 +431,10 @@ where ViewSyncCommitCertificate2, > { event_stream: self.event_stream.clone(), - exchange: self.exchange.clone(), + membership: self.membership.clone(), + network: self.network.clone(), + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), accumulator: either::Left(new_accumulator), id: self.id, }; @@ -480,9 +497,10 @@ where } // We do not have a relay task already running, so start one - if !self - .exchange - .is_leader(vote.get_view_number() + vote.get_data().relay) + if self + .membership + .get_leader(vote.get_view_number() + vote.get_data().relay) + != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); @@ -492,7 +510,7 @@ where let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), - signers: bitvec![0; self.exchange.total_nodes()], + signers: bitvec![0; self.membership.total_nodes()], phantom: PhantomData, }; @@ -503,7 +521,10 @@ where ViewSyncFinalizeCertificate2, > { event_stream: self.event_stream.clone(), - exchange: self.exchange.clone(), + membership: self.membership.clone(), + network: self.network.clone(), + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), accumulator: either::Left(new_accumulator), id: self.id, }; @@ -614,15 +635,13 @@ where if self.num_timeouts_tracked > 2 { // Start polling for view sync certificates - self.exchange - .network() + self.network .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncCertificate( *view_number + 1, )) .await; - self.exchange - .network() + self.network .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncVotes( *view_number + 1, )) @@ -630,21 +649,21 @@ where // Spawn replica task let next_view = *view_number + 1; // Subscribe to the view after we are leader since we know we won't propose in the next view if we are leader. - let subscribe_view = if self.exchange.is_leader(TYPES::Time::new(next_view)) { - next_view + 1 - } else { + let subscribe_view = if self.membership.get_leader(TYPES::Time::new(next_view)) + == self.public_key + { next_view + } else { + next_view + 1 }; // Subscribe to the next view just in case there is progress being made - self.exchange - .network() + self.network .inject_consensus_info(ConsensusIntentEvent::PollForProposal( subscribe_view, )) .await; - self.exchange - .network() + self.network .inject_consensus_info(ConsensusIntentEvent::PollForDAC(subscribe_view)) .await; @@ -655,7 +674,10 @@ where finalized: false, sent_view_change_event: false, phase: ViewSyncPhase::None, - exchange: self.exchange.clone(), + membership: self.membership.clone(), + network: self.network.clone(), + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), api: self.api.clone(), event_stream: self.event_stream.clone(), view_sync_timeout: self.view_sync_timeout, @@ -740,9 +762,6 @@ where impl, A: ConsensusApi + 'static> ViewSyncReplicaTaskState -where - ViewSyncEx: - ViewSyncExchangeType, Membership = TYPES::Membership>, { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task @@ -765,7 +784,7 @@ where } // If certificate is not valid, return current state - if !certificate.is_valid_cert(self.exchange.membership()) { + if !certificate.is_valid_cert(self.membership.as_ref()) { error!("Not valid view sync cert! {:?}", certificate.get_data()); return (None, self); @@ -794,8 +813,8 @@ where round: self.next_view, }, self.next_view, - self.exchange.public_key(), - self.exchange.private_key(), + &self.public_key, + &self.private_key, ); let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); @@ -833,7 +852,7 @@ where } // If certificate is not valid, return current state - if !certificate.is_valid_cert(self.exchange.membership()) { + if !certificate.is_valid_cert(self.membership.as_ref()) { error!("Not valid view sync cert! {:?}", certificate.get_data()); return (None, self); @@ -862,8 +881,8 @@ where round: self.next_view, }, self.next_view, - self.exchange.public_key(), - self.exchange.private_key(), + &self.public_key, + &self.private_key, ); let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); @@ -904,7 +923,7 @@ where } // If certificate is not valid, return current state - if !certificate.is_valid_cert(self.exchange.membership()) { + if !certificate.is_valid_cert(self.membership.as_ref()) { error!("Not valid view sync cert! {:?}", certificate.get_data()); return (None, self); @@ -945,8 +964,8 @@ where round: view_number, }, view_number, - self.exchange.public_key(), - self.exchange.private_key(), + &self.public_key, + &self.private_key, ); let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); @@ -989,8 +1008,8 @@ where round: self.next_view, }, self.next_view, - self.exchange.public_key(), - self.exchange.private_key(), + &self.public_key, + &self.private_key, ); let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); @@ -1008,8 +1027,8 @@ where round: self.next_view, }, self.next_view, - self.exchange.public_key(), - self.exchange.private_key(), + &self.public_key, + &self.private_key, ); let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); @@ -1027,8 +1046,8 @@ where round: self.next_view, }, self.next_view, - self.exchange.public_key(), - self.exchange.private_key(), + &self.public_key, + &self.private_key, ); let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); @@ -1076,9 +1095,6 @@ impl> ViewSyncPreCommitVote, ViewSyncPreCommitCertificate2, > -where - ViewSyncEx: - ViewSyncExchangeType, Membership = TYPES::Membership>, { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] @@ -1099,9 +1115,10 @@ where HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { // Ignore this vote if we are not the correct relay // TODO ED Replace exchange with membership - if !self - .exchange - .is_leader(vote.get_data().round + vote.get_data().relay) + if self + .membership + .get_leader(vote.get_data().round + vote.get_data().relay) + != self.public_key { info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); return (None, self); @@ -1116,7 +1133,7 @@ where match self.accumulator { Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), Left(accumulator) => { - match accumulator.accumulate(&vote, self.exchange.membership()) { + match accumulator.accumulate(&vote, self.membership.as_ref()) { Left(new_accumulator) => { self.accumulator = Either::Left(new_accumulator); } @@ -1124,7 +1141,7 @@ where self.event_stream .publish(HotShotEvent::ViewSyncPreCommitCertificate2Send( certificate.clone(), - self.exchange.public_key().clone(), + self.public_key.clone(), )) .await; self.accumulator = Right(certificate); @@ -1144,9 +1161,6 @@ where impl> ViewSyncRelayTaskState, ViewSyncCommitCertificate2> -where - ViewSyncEx: - ViewSyncExchangeType, Membership = TYPES::Membership>, { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] @@ -1166,9 +1180,10 @@ where match event { HotShotEvent::ViewSyncCommitVoteRecv(vote) => { // Ignore this vote if we are not the correct relay - if !self - .exchange - .is_leader(vote.get_data().round + vote.get_data().relay) + if self + .membership + .get_leader(vote.get_data().round + vote.get_data().relay) + != self.public_key { info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); return (None, self); @@ -1183,7 +1198,7 @@ where match self.accumulator { Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), Left(accumulator) => { - match accumulator.accumulate(&vote, self.exchange.membership()) { + match accumulator.accumulate(&vote, self.membership.as_ref()) { Left(new_accumulator) => { self.accumulator = Either::Left(new_accumulator); } @@ -1191,7 +1206,7 @@ where self.event_stream .publish(HotShotEvent::ViewSyncCommitCertificate2Send( certificate.clone(), - self.exchange.public_key().clone(), + self.public_key.clone(), )) .await; self.accumulator = Right(certificate); @@ -1216,9 +1231,6 @@ impl> ViewSyncFinalizeVote, ViewSyncFinalizeCertificate2, > -where - ViewSyncEx: - ViewSyncExchangeType, Membership = TYPES::Membership>, { /// Handles incoming events for the view sync relay task #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] @@ -1238,10 +1250,10 @@ where match event { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { // Ignore this vote if we are not the correct relay - // TODO ED Replace exchange with membership - if !self - .exchange - .is_leader(vote.get_data().round + vote.get_data().relay) + if self + .membership + .get_leader(vote.get_data().round + vote.get_data().relay) + != self.public_key { info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); return (None, self); @@ -1256,7 +1268,7 @@ where match self.accumulator { Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), Left(accumulator) => { - match accumulator.accumulate(&vote, self.exchange.membership()) { + match accumulator.accumulate(&vote, self.membership.as_ref()) { Left(new_accumulator) => { self.accumulator = Either::Left(new_accumulator); } @@ -1264,7 +1276,7 @@ where self.event_stream .publish(HotShotEvent::ViewSyncFinalizeCertificate2Send( certificate.clone(), - self.exchange.public_key().clone(), + self.public_key.clone(), )) .await; self.accumulator = Right(certificate); From 2da2ff39ecbb85aa1529f3ea88a365650a986cf9 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 16 Nov 2023 22:26:40 -0500 Subject: [PATCH 0414/1393] remove exchanges from vid task --- hotshot/src/lib.rs | 9 +--- hotshot/src/tasks/mod.rs | 6 ++- task-impls/src/vid.rs | 97 +++++++++++++++++---------------------- testing/tests/vid_task.rs | 3 +- 4 files changed, 49 insertions(+), 66 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index a92d499e35..b05b45f6d7 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -744,13 +744,8 @@ where .await; let task_runner = add_da_task(task_runner, internal_event_stream.clone(), handle.clone()).await; - let task_runner = add_vid_task( - task_runner, - internal_event_stream.clone(), - vid_exchange.clone(), - handle.clone(), - ) - .await; + let task_runner = + add_vid_task(task_runner, internal_event_stream.clone(), handle.clone()).await; let task_runner = add_transaction_task(task_runner, internal_event_stream.clone(), handle.clone()).await; let task_runner = diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 9b85712fe7..b7903c9665 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -301,7 +301,6 @@ pub async fn add_consensus_task< pub async fn add_vid_task>( task_runner: TaskRunner, event_stream: ChannelStream>, - vid_exchange: VIDEx, handle: SystemContextHandle, ) -> TaskRunner where @@ -317,8 +316,11 @@ where api: c_api.clone(), consensus: handle.hotshot.get_consensus(), cur_view: TYPES::Time::new(0), - vid_exchange: vid_exchange.into(), vote_collector: None, + network: c_api.inner.networks.quorum_network.clone().into(), + membership: c_api.inner.memberships.vid_membership.clone().into(), + public_key: c_api.public_key().clone(), + private_key: c_api.private_key().clone(), event_stream: event_stream.clone(), id: handle.hotshot.inner.id, }; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 86eac2dd0d..4628fc1a67 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -14,11 +14,10 @@ use hotshot_task::{ use hotshot_types::traits::network::ConsensusIntentEvent; use hotshot_types::{ consensus::{Consensus, View}, - message::Message, traits::{ consensus_api::ConsensusApi, - election::{ConsensusExchange, Membership}, - node_implementation::{NodeImplementation, NodeType, VIDEx}, + election::Membership, + node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, state::ConsensusTime, }, @@ -45,9 +44,7 @@ pub struct VIDTaskState< TYPES: NodeType, I: NodeImplementation, A: ConsensusApi + 'static, -> where - VIDEx: ConsensusExchange>, -{ +> { /// The state's api pub api: A, /// Global registry task for the state @@ -58,10 +55,14 @@ pub struct VIDTaskState< /// Reference to consensus. Leader will require a read lock on this. pub consensus: Arc>>, - - /// the VID exchange - pub vid_exchange: Arc>, - + /// Network for all nodes + pub network: Arc, + /// Membership for teh quorum + pub membership: Arc, + /// This Nodes Public Key + pub public_key: TYPES::SignatureKey, + /// Our Private Key + pub private_key: ::PrivateKey, /// The view and ID of the current vote collection task, if there is one. pub vote_collector: Option<(TYPES::Time, usize, usize)>, @@ -73,12 +74,15 @@ pub struct VIDTaskState< } /// Struct to maintain VID Vote Collection task state -pub struct VIDVoteCollectionTaskState> -where - VIDEx: ConsensusExchange>, -{ - /// the vid exchange - pub vid_exchange: Arc>, +pub struct VIDVoteCollectionTaskState> { + /// Network for all nodes + pub network: Arc, + /// Membership for teh quorum + pub membership: Arc, + /// This Nodes Public Key + pub public_key: TYPES::SignatureKey, + /// Our Private Key + pub private_key: ::PrivateKey, #[allow(clippy::type_complexity)] /// Accumulates VID votes pub accumulator: Either< @@ -93,10 +97,7 @@ where pub id: u64, } -impl> TS for VIDVoteCollectionTaskState where - VIDEx: ConsensusExchange> -{ -} +impl> TS for VIDVoteCollectionTaskState {} #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "VID Vote Collection Task", level = "error")] async fn vote_handle( @@ -109,7 +110,6 @@ async fn vote_handle( where TYPES: NodeType, I: NodeImplementation, - VIDEx: ConsensusExchange, Membership = TYPES::Membership>, { match event { HotShotEvent::VidVoteRecv(vote) => { @@ -126,7 +126,7 @@ where } let accumulator = state.accumulator.left().unwrap(); - match accumulator.accumulate(&vote, state.vid_exchange.membership()) { + match accumulator.accumulate(&vote, state.membership.as_ref()) { Left(new_accumulator) => { state.accumulator = either::Left(new_accumulator); } @@ -137,14 +137,13 @@ where .event_stream .publish(HotShotEvent::VidCertSend( vid_cert.clone(), - state.vid_exchange.public_key().clone(), + state.public_key.clone(), )) .await; state.accumulator = Right(vid_cert.clone()); state - .vid_exchange - .network() + .network .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDVotes( *vid_cert.view_number, )) @@ -164,8 +163,6 @@ where impl, A: ConsensusApi + 'static> VIDTaskState -where - VIDEx: ConsensusExchange, Membership = TYPES::Membership>, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] @@ -182,11 +179,11 @@ where // ); // Check if we are the leader and the vote is from the sender. let view = vote.get_view_number(); - if !self.vid_exchange.is_leader(view) { + if self.membership.get_leader(view) != self.public_key { error!( "We are not the VID leader for view {} are we leader for next view? {}", *view, - self.vid_exchange.is_leader(view + 1) + self.membership.get_leader(view + 1) == self.public_key ); return None; } @@ -210,16 +207,17 @@ where let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), sig_lists: Vec::new(), - signers: bitvec![0; self.vid_exchange.total_nodes()], + signers: bitvec![0; self.membership.total_nodes()], phantom: PhantomData, }; - let accumulator = - new_accumulator.accumulate(&vote, self.vid_exchange.membership()); + let accumulator = new_accumulator.accumulate(&vote, self.membership.as_ref()); let state = VIDVoteCollectionTaskState { - vid_exchange: self.vid_exchange.clone(), - + network: self.network.clone(), + membership: self.membership.clone(), + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), accumulator, cur_view: view, event_stream: self.event_stream.clone(), @@ -257,8 +255,7 @@ where ); // stop polling for the received disperse - self.vid_exchange - .network() + self.network .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse( *disperse.data.view_number, )) @@ -280,7 +277,7 @@ where let payload_commitment = disperse.data.payload_commitment; // ED Is this the right leader? - let view_leader_key = self.vid_exchange.get_leader(view); + let view_leader_key = self.membership.get_leader(view); if view_leader_key != sender { error!("VID proposal doesn't have expected leader key for view {} \n DA proposal is: [N/A for VID]", *view); return None; @@ -291,11 +288,7 @@ where return None; } - if !self - .vid_exchange - .membership() - .has_stake(self.vid_exchange.public_key()) - { + if !self.membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", self.cur_view @@ -309,8 +302,8 @@ where payload_commit: payload_commitment, }, view, - self.vid_exchange.public_key(), - self.vid_exchange.private_key(), + &self.public_key, + &self.private_key, ); // ED Don't think this is necessary? @@ -339,8 +332,7 @@ where // consensus.saved_block_payloads.insert(proposal.data.block_payload); } HotShotEvent::VidCertRecv(cert) => { - self.vid_exchange - .network() + self.network .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDCertificate( *cert.view_number, )) @@ -357,29 +349,26 @@ where self.cur_view = view; // Start polling for VID disperse for the new view - self.vid_exchange - .network() + self.network .inject_consensus_info(ConsensusIntentEvent::PollForVIDDisperse( *self.cur_view + 1, )) .await; - self.vid_exchange - .network() + self.network .inject_consensus_info(ConsensusIntentEvent::PollForVIDCertificate( *self.cur_view + 1, )) .await; // If we are not the next leader, we should exit - if !self.vid_exchange.is_leader(self.cur_view + 1) { + if self.membership.get_leader(self.cur_view + 1) != self.public_key { // panic!("We are not the DA leader for view {}", *self.cur_view + 1); return None; } // Start polling for VID votes for the "next view" - self.vid_exchange - .network() + self.network .inject_consensus_info(ConsensusIntentEvent::PollForVIDVotes( *self.cur_view + 1, )) @@ -414,8 +403,6 @@ where /// task state implementation for VID Task impl, A: ConsensusApi + 'static> TS for VIDTaskState -where - VIDEx: ConsensusExchange>, { } diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index f8c81c9217..37fe2798c8 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -100,8 +100,7 @@ async fn test_vid_task() { output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::Shutdown, 1); - let build_fn = - |task_runner, event_stream| add_vid_task(task_runner, event_stream, vid_exchange, handle); + let build_fn = |task_runner, event_stream| add_vid_task(task_runner, event_stream, handle); run_harness(input, output, None, build_fn).await; } From 00ec33754f7e7550bfbfcad72655d9da83388237 Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 17 Nov 2023 10:27:51 -0500 Subject: [PATCH 0415/1393] address comments --- hotshot-state-prover/src/circuit.rs | 61 +++++++++++++++++++++++++---- hotshot-state-prover/src/lib.rs | 21 +++++----- hotshot-state-prover/src/utils.rs | 2 - types/src/traits/state.rs | 19 +++++---- 4 files changed, 73 insertions(+), 30 deletions(-) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index d9b47ac422..24bdf26b94 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -59,20 +59,67 @@ pub struct LightClientStateVar { } #[derive(Clone, Debug)] -pub struct PublicInputs(Vec); +pub struct PublicInput(Vec); -impl AsRef<[F]> for PublicInputs { +impl AsRef<[F]> for PublicInput { fn as_ref(&self) -> &[F] { &self.0 } } -impl From> for PublicInputs { +impl From> for PublicInput { fn from(v: Vec) -> Self { Self(v) } } +impl PublicInput { + /// Return the threshold + pub fn threshold(&self) -> F { + self.0[0] + } + + /// Return the view number of the light client state + pub fn view_number(&self) -> F { + self.0[1] + } + + /// Return the block height of the light client state + pub fn block_height(&self) -> F { + self.0[2] + } + + /// Return the block commitment of the light client state + pub fn block_comm(&self) -> F { + self.0[3] + } + + /// Return the fee ledger commitment of the light client state + pub fn fee_ledger_comm(&self) -> F { + self.0[4] + } + + /// Return the stake table commitment of the light client state + pub fn stake_table_comm(&self) -> (F, F, F) { + (self.0[5], self.0[6], self.0[7]) + } + + /// Return the bls key commitment of the light client state + pub fn bls_key_comm(&self) -> F { + self.0[5] + } + + /// Return the schnorr key commitment of the light client state + pub fn schnorr_key_comm(&self) -> F { + self.0[6] + } + + /// Return the stake amount commitment of the light client state + pub fn stake_amount_comm(&self) -> F { + self.0[7] + } +} + impl LightClientStateVar { pub fn new( circuit: &mut PlonkCircuit, @@ -146,7 +193,7 @@ pub(crate) fn build( signatures: SigIter, lightclient_state: &LightClientState, threshold: &U256, -) -> Result<(PlonkCircuit, PublicInputs), PlonkError> +) -> Result<(PlonkCircuit, PublicInput), PlonkError> where F: RescueParameter, P: TECurveConfig, @@ -391,7 +438,7 @@ mod tests { fee_ledger_comm, stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), }; - let state_msg = lightclient_state.to_array(); + let state_msg: [F; 7] = lightclient_state.clone().into(); let sigs = schnorr_keys .iter() @@ -470,7 +517,7 @@ mod tests { // bad path: bad stake table commitment let mut bad_lightclient_state = lightclient_state.clone(); bad_lightclient_state.stake_table_comm.1 = F::default(); - let bad_state_msg = bad_lightclient_state.to_array(); + let bad_state_msg: [F; 7] = bad_lightclient_state.clone().into(); let sig_for_bad_state = schnorr_keys .iter() .map(|(key, _)| { @@ -494,7 +541,7 @@ mod tests { let mut wrong_light_client_state = lightclient_state.clone(); // state with a different bls key commitment wrong_light_client_state.stake_table_comm.0 = F::default(); - let wrong_state_msg = wrong_light_client_state.to_array(); + let wrong_state_msg: [F; 7] = wrong_light_client_state.into(); let wrong_sigs = schnorr_keys .iter() .map(|(key, _)| { diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index d50d273914..84ec5bfc41 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -2,6 +2,8 @@ /// State verifier circuit builder pub mod circuit; +/// Utilities for test +#[cfg(test)] mod utils; use ark_bn254::Bn254; @@ -9,7 +11,7 @@ use ark_std::{ borrow::Borrow, rand::{CryptoRng, RngCore}, }; -use circuit::PublicInputs; +use circuit::PublicInput; use ethereum_types::U256; use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::traits::{ @@ -41,7 +43,7 @@ pub use ark_ed_on_bn254::EdwardsConfig; /// Given a SRS, returns the proving key and verifying key for state update pub fn preprocess(srs: &UniversalSrs) -> Result<(ProvingKey, VerifyingKey), PlonkError> { - let (circuit, _) = build_dummy_circuit_for_preprocessing()?; + let (circuit, _) = build_for_preprocessing()?; PlonkKzgSnark::preprocess(srs, &circuit) } @@ -63,7 +65,7 @@ pub fn generate_state_update_proof( signatures: SigIter, lightclient_state: &LightClientState, threshold: &U256, -) -> Result<(Proof, PublicInputs), PlonkError> +) -> Result<(Proof, PublicInput), PlonkError> where ST: StakeTableScheme, ST::IntoIter: ExactSizeIterator, @@ -91,8 +93,8 @@ where } /// Internal function for helping generate the proving/verifying key -fn build_dummy_circuit_for_preprocessing( -) -> Result<(PlonkCircuit, PublicInputs), PlonkError> { +fn build_for_preprocessing() -> Result<(PlonkCircuit, PublicInput), PlonkError> +{ let st = StakeTable::::new(); let lightclient_state = LightClientState { view_number: 0, @@ -116,7 +118,7 @@ mod tests { utils::{key_pairs_for_testing, stake_table_for_testing}, BaseField, UniversalSrs, }; - use crate::{build_dummy_circuit_for_preprocessing, generate_state_update_proof, preprocess}; + use crate::{build_for_preprocessing, generate_state_update_proof, preprocess}; use ark_bn254::Bn254; use ark_ec::pairing::Pairing; use ark_ed_on_bn254::EdwardsConfig as Config; @@ -220,7 +222,7 @@ mod tests { fee_ledger_comm, stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), }; - let state_msg = lightclient_state.to_array(); + let state_msg: [BaseField; 7] = lightclient_state.clone().into(); let sigs = schnorr_keys .iter() @@ -245,10 +247,7 @@ mod tests { .collect::>(); // good path - let num_gates = build_dummy_circuit_for_preprocessing() - .unwrap() - .0 - .num_gates(); + let num_gates = build_for_preprocessing().unwrap().0.num_gates(); let test_srs = universal_setup_for_testing(num_gates + 2, &mut prng).unwrap(); ark_std::println!("Number of constraint in the circuit: {}", num_gates); diff --git a/hotshot-state-prover/src/utils.rs b/hotshot-state-prover/src/utils.rs index 2dac55e75c..2bbc18e053 100644 --- a/hotshot-state-prover/src/utils.rs +++ b/hotshot-state-prover/src/utils.rs @@ -13,7 +13,6 @@ type SchnorrVerKey = jf_primitives::signatures::schnorr::VerKey; type SchnorrSignKey = jf_primitives::signatures::schnorr::SignKey; /// Helper function for test -#[allow(dead_code)] pub(crate) fn key_pairs_for_testing( num_validators: usize, prng: &mut R, @@ -32,7 +31,6 @@ pub(crate) fn key_pairs_for_testing( } /// Helper function for test -#[allow(dead_code)] pub(crate) fn stake_table_for_testing( bls_keys: &[BLSVerKey], schnorr_keys: &[(SchnorrSignKey, SchnorrVerKey)], diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index d951671ce8..816664f63b 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -235,17 +235,16 @@ pub struct LightClientState { pub stake_table_comm: (F, F, F), } -impl LightClientState { - /// Return an array of field elements - pub fn to_array(&self) -> [F; 7] { +impl From> for [F; 7] { + fn from(state: LightClientState) -> Self { [ - F::from(self.view_number as u64), - F::from(self.block_height as u64), - self.block_comm, - self.fee_ledger_comm, - self.stake_table_comm.0, - self.stake_table_comm.1, - self.stake_table_comm.2, + F::from(state.view_number as u64), + F::from(state.block_height as u64), + state.block_comm, + state.fee_ledger_comm, + state.stake_table_comm.0, + state.stake_table_comm.1, + state.stake_table_comm.2, ] } } From 5da7ecbc5ef1922618c19315e277cb92b38383c4 Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 17 Nov 2023 10:34:57 -0500 Subject: [PATCH 0416/1393] address comment: move preprocessing inside --- hotshot-state-prover/src/circuit.rs | 17 ++++++++++++++++ hotshot-state-prover/src/lib.rs | 31 ++++++----------------------- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 24bdf26b94..59ef07a002 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -393,6 +393,23 @@ where Ok((circuit, public_inputs.into())) } +/// Internal function to build a dummy circuit +pub(crate) fn build_for_preprocessing( +) -> Result<(PlonkCircuit, PublicInput), PlonkError> +where + F: RescueParameter, + P: TECurveConfig, +{ + let lightclient_state = LightClientState { + view_number: 0, + block_height: 0, + block_comm: F::default(), + fee_ledger_comm: F::default(), + stake_table_comm: (F::default(), F::default(), F::default()), + }; + build::(&[], &[], &[], &lightclient_state, &U256::zero()) +} + #[cfg(test)] mod tests { use super::{build, LightClientState}; diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index 84ec5bfc41..d4984de75e 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -13,7 +13,6 @@ use ark_std::{ }; use circuit::PublicInput; use ethereum_types::U256; -use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::traits::{ stake_table::{SnapshotVersion, StakeTableScheme}, state::LightClientState, @@ -24,7 +23,6 @@ use jf_plonk::{ transcript::SolidityTranscript, }; use jf_primitives::signatures::schnorr::Signature; -use jf_relation::PlonkCircuit; /// BLS verification key, base field and Schnorr verification key pub use hotshot_stake_table::vec_based::config::{ @@ -43,7 +41,7 @@ pub use ark_ed_on_bn254::EdwardsConfig; /// Given a SRS, returns the proving key and verifying key for state update pub fn preprocess(srs: &UniversalSrs) -> Result<(ProvingKey, VerifyingKey), PlonkError> { - let (circuit, _) = build_for_preprocessing()?; + let (circuit, _) = circuit::build_for_preprocessing::()?; PlonkKzgSnark::preprocess(srs, &circuit) } @@ -92,33 +90,13 @@ where Ok((proof, public_inputs)) } -/// Internal function for helping generate the proving/verifying key -fn build_for_preprocessing() -> Result<(PlonkCircuit, PublicInput), PlonkError> -{ - let st = StakeTable::::new(); - let lightclient_state = LightClientState { - view_number: 0, - block_height: 0, - block_comm: BaseField::default(), - fee_ledger_comm: BaseField::default(), - stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), - }; - circuit::build::( - &[], - &[], - &[], - &lightclient_state, - &U256::zero(), - ) -} - #[cfg(test)] mod tests { use super::{ utils::{key_pairs_for_testing, stake_table_for_testing}, BaseField, UniversalSrs, }; - use crate::{build_for_preprocessing, generate_state_update_proof, preprocess}; + use crate::{circuit::build_for_preprocessing, generate_state_update_proof, preprocess}; use ark_bn254::Bn254; use ark_ec::pairing::Pairing; use ark_ed_on_bn254::EdwardsConfig as Config; @@ -247,7 +225,10 @@ mod tests { .collect::>(); // good path - let num_gates = build_for_preprocessing().unwrap().0.num_gates(); + let num_gates = build_for_preprocessing::() + .unwrap() + .0 + .num_gates(); let test_srs = universal_setup_for_testing(num_gates + 2, &mut prng).unwrap(); ark_std::println!("Number of constraint in the circuit: {}", num_gates); From ec462c049d83bfd4f682f456f538ac55d3f8eb69 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 17 Nov 2023 10:44:21 -0500 Subject: [PATCH 0417/1393] use proper vid config (#2032) --- types/src/traits/node_implementation.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 5b67bd478c..89907ffb1b 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -365,7 +365,7 @@ where let view_sync_exchange = VIEWSYNCEXCHANGE::create( entries.clone(), - configs.0, + configs.0.clone(), networks.2, pk.clone(), entry.clone(), @@ -374,16 +374,14 @@ where let committee_exchange = COMMITTEEEXCHANGE::create( entries.clone(), - configs.1.clone(), + configs.1, networks.1, pk.clone(), entry.clone(), sk.clone(), ); - // RM TODO: figure out if this is the proper config - // issue: https://github.com/EspressoSystems/HotShot/issues/1918 - let vid_exchange = VIDEXCHANGE::create(entries, configs.1, networks.3, pk, entry, sk); + let vid_exchange = VIDEXCHANGE::create(entries, configs.0, networks.3, pk, entry, sk); Self { quorum_exchange, From bce5086faa21ccdbf8f92c596ec699592ca11c0d Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 17 Nov 2023 10:55:43 -0500 Subject: [PATCH 0418/1393] add more type conversion --- types/src/traits/state.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 816664f63b..bed7fed42b 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -248,3 +248,16 @@ impl From> for [F; 7] { ] } } +impl From<&LightClientState> for [F; 7] { + fn from(state: &LightClientState) -> Self { + [ + F::from(state.view_number as u64), + F::from(state.block_height as u64), + state.block_comm, + state.fee_ledger_comm, + state.stake_table_comm.0, + state.stake_table_comm.1, + state.stake_table_comm.2, + ] + } +} From 04607e7dc1be2ba32419ac41113acf0f222280c4 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 17 Nov 2023 11:19:14 -0500 Subject: [PATCH 0419/1393] Update examples, add combined network examples (#2023) * update examples, add combined network * add orchestrator, validators * update dockerfiles * update example names * update docs * merge --- hotshot/Cargo.toml | 71 +- hotshot/examples/combined/all.rs | 107 +++ hotshot/examples/combined/multi-validator.rs | 70 ++ hotshot/examples/combined/orchestrator.rs | 38 + hotshot/examples/combined/types.rs | 48 ++ hotshot/examples/combined/validator.rs | 42 + hotshot/examples/infra/mod.rs | 740 ++++++++++++------ hotshot/examples/libp2p/all.rs | 80 ++ hotshot/examples/web-server-da/README.md | 29 - hotshot/examples/webserver/README.md | 29 + hotshot/examples/webserver/all.rs | 98 +++ .../multi-validator.rs | 0 .../multi-webserver.rs} | 32 +- .../orchestrator.rs | 0 .../{web-server-da => webserver}/types.rs | 0 .../{web-server-da => webserver}/validator.rs | 0 .../web-server.rs => webserver/webserver.rs} | 6 +- orchestrator/README.md | 4 +- orchestrator/default-run-config.toml | 70 -- .../default-web-server-run-config.toml | 79 -- ...libp2p-run-config.toml => run-config.toml} | 50 +- orchestrator/src/config.rs | 14 +- orchestrator/src/lib.rs | 7 +- 23 files changed, 1112 insertions(+), 502 deletions(-) create mode 100644 hotshot/examples/combined/all.rs create mode 100644 hotshot/examples/combined/multi-validator.rs create mode 100644 hotshot/examples/combined/orchestrator.rs create mode 100644 hotshot/examples/combined/types.rs create mode 100644 hotshot/examples/combined/validator.rs create mode 100644 hotshot/examples/libp2p/all.rs delete mode 100644 hotshot/examples/web-server-da/README.md create mode 100644 hotshot/examples/webserver/README.md create mode 100644 hotshot/examples/webserver/all.rs rename hotshot/examples/{web-server-da => webserver}/multi-validator.rs (100%) rename hotshot/examples/{web-server-da/multi-web-server.rs => webserver/multi-webserver.rs} (63%) rename hotshot/examples/{web-server-da => webserver}/orchestrator.rs (100%) rename hotshot/examples/{web-server-da => webserver}/types.rs (100%) rename hotshot/examples/{web-server-da => webserver}/validator.rs (100%) rename hotshot/examples/{web-server-da/web-server.rs => webserver/webserver.rs} (86%) delete mode 100644 orchestrator/default-run-config.toml delete mode 100644 orchestrator/default-web-server-run-config.toml rename orchestrator/{default-libp2p-run-config.toml => run-config.toml} (72%) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index f85eafdb5c..f0a62be572 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -25,55 +25,78 @@ docs = [] doc-images = [] hotshot-testing = [] +# libp2p [[example]] -name = "libp2p-validator" +name = "validator-libp2p" required-features = ["demo", "libp2p/rsa"] path = "examples/libp2p/validator.rs" [[example]] -name = "libp2p-multi-validator" +name = "multi-validator-libp2p" required-features = ["demo", "libp2p/rsa"] path = "examples/libp2p/multi-validator.rs" [[example]] -name = "libp2p-orchestrator" +name = "orchestrator-libp2p" required-features = ["demo", "libp2p/rsa"] path = "examples/libp2p/orchestrator.rs" -# [[example]] -# name = "web-server-orchestrator" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/web-server/orchestrator.rs" -# -# [[example]] -# name = "web-server-validator" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/web-server/validator.rs" +[[example]] +name = "all-libp2p" +required-features = ["demo", "libp2p/rsa"] +path = "examples/libp2p/all.rs" + +# webserver +[[example]] +name = "webserver" +required-features = ["demo", "libp2p/rsa"] +path = "examples/webserver/webserver.rs" + +[[example]] +name = "orchestrator-webserver" +required-features = ["demo", "libp2p/rsa"] +path = "examples/webserver/orchestrator.rs" + +[[example]] +name = "validator-webserver" +required-features = ["demo", "libp2p/rsa"] +path = "examples/webserver/validator.rs" + +[[example]] +name = "multi-validator-webserver" +required-features = ["demo", "libp2p/rsa"] +path = "examples/webserver/multi-validator.rs" [[example]] -name = "web-server" +name = "multi-webserver" required-features = ["demo", "libp2p/rsa"] -path = "examples/web-server-da/web-server.rs" +path = "examples/webserver/multi-webserver.rs" [[example]] -name = "web-server-da-orchestrator" +name = "all-webserver" required-features = ["demo", "libp2p/rsa"] -path = "examples/web-server-da/orchestrator.rs" +path = "examples/webserver/all.rs" +# combined [[example]] -name = "web-server-da-validator" +name = "all-combined" required-features = ["demo", "libp2p/rsa"] -path = "examples/web-server-da/validator.rs" +path = "examples/combined/all.rs" [[example]] -name = "multi-validator" +name = "multi-validator-combined" required-features = ["demo", "libp2p/rsa"] -path = "examples/web-server-da/multi-validator.rs" +path = "examples/combined/multi-validator.rs" [[example]] -name = "multi-web-server" +name = "validator-combined" required-features = ["demo", "libp2p/rsa"] -path = "examples/web-server-da/multi-web-server.rs" +path = "examples/combined/validator.rs" + +[[example]] +name = "orchestrator-combined" +required-features = ["demo", "libp2p/rsa"] +path = "examples/combined/orchestrator.rs" [dependencies] async-compatibility-layer = { workspace = true } @@ -110,6 +133,7 @@ serde = { workspace = true, features = ["rc"] } snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } +local-ip-address = "0.5.6" dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } tracing = { workspace = true } @@ -123,5 +147,4 @@ async-std = { workspace = true } blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } serde_json = "1.0.108" -toml = { workspace = true } - +toml = { workspace = true } \ No newline at end of file diff --git a/hotshot/examples/combined/all.rs b/hotshot/examples/combined/all.rs new file mode 100644 index 0000000000..e50757df80 --- /dev/null +++ b/hotshot/examples/combined/all.rs @@ -0,0 +1,107 @@ +pub mod types; + +use crate::infra::load_config_from_file; +use crate::types::ThisRun; +use async_compatibility_layer::art::async_spawn; +use async_compatibility_layer::channel::oneshot; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use clap::Parser; +use hotshot::demo::DemoMembership; +use hotshot::demo::DemoTypes; +use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_orchestrator::config::NetworkConfig; +use hotshot_types::traits::node_implementation::NodeType; +use std::net::{IpAddr, Ipv4Addr}; +use std::sync::Arc; +use tracing::{error, instrument}; + +use crate::{ + infra::run_orchestrator, + infra::{ConfigArgs, OrchestratorArgs}, + types::{DANetwork, NodeImpl, QuorumNetwork, VIDNetwork, ViewSyncNetwork}, +}; + +#[path = "../infra/mod.rs"] +pub mod infra; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + + // use configfile args + let args = ConfigArgs::parse(); + + // spawn web servers + let (server_shutdown_sender_cdn, server_shutdown_cdn) = oneshot(); + let (server_shutdown_sender_da, server_shutdown_da) = oneshot(); + let _sender = Arc::new(server_shutdown_sender_cdn); + let _sender = Arc::new(server_shutdown_sender_da); + + async_spawn(async move { + if let Err(e) = hotshot_web_server::run_web_server::< + ::SignatureKey, + >(Some(server_shutdown_cdn), 9000) + .await + { + error!("Problem starting cdn web server: {:?}", e); + } + }); + async_spawn(async move { + if let Err(e) = hotshot_web_server::run_web_server::< + ::SignatureKey, + >(Some(server_shutdown_da), 9001) + .await + { + error!("Problem starting da web server: {:?}", e); + } + }); + + // orchestrator + async_spawn(run_orchestrator::< + DemoTypes, + DemoMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + VIDNetwork, + NodeImpl, + >(OrchestratorArgs { + host: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + port: 4444, + config_file: args.config_file.clone(), + })); + + // nodes + let config: NetworkConfig< + ::SignatureKey, + ::ElectionConfigType, + > = load_config_from_file::(args.config_file); + let mut nodes = Vec::new(); + for _ in 0..config.config.total_nodes.into() { + let node = async_spawn(async move { + infra::main_entry_point::< + DemoTypes, + DemoMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + VIDNetwork, + NodeImpl, + ThisRun, + >(ValidatorArgs { + host: "127.0.0.1".to_string(), + port: 4444, + public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + }) + .await + }); + nodes.push(node); + } + futures::future::join_all(nodes).await; +} diff --git a/hotshot/examples/combined/multi-validator.rs b/hotshot/examples/combined/multi-validator.rs new file mode 100644 index 0000000000..6a39e52802 --- /dev/null +++ b/hotshot/examples/combined/multi-validator.rs @@ -0,0 +1,70 @@ +use async_compatibility_layer::{ + art::async_spawn, + logging::{setup_backtrace, setup_logging}, +}; +use clap::Parser; +use hotshot::demo::DemoTypes; +use hotshot_orchestrator::client::ValidatorArgs; +use std::net::IpAddr; +use tracing::instrument; +use types::VIDNetwork; + +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; +use hotshot::demo::DemoMembership; + +pub mod types; + +#[path = "../infra/mod.rs"] +pub mod infra; + +#[derive(Parser, Debug, Clone)] +struct MultiValidatorArgs { + /// Number of validators to run + pub num_nodes: u16, + /// The address the orchestrator runs on + pub host: IpAddr, + /// The port the orchestrator runs on + pub port: u16, + /// This node's public IP address, for libp2p + /// If no IP address is passed in, it will default to 127.0.0.1 + pub public_ip: Option, +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + let args = MultiValidatorArgs::parse(); + tracing::error!( + "connecting to orchestrator at {:?}:{:?}", + args.host, + args.port + ); + let mut nodes = Vec::new(); + for _ in 0..args.num_nodes { + let node = async_spawn(async move { + infra::main_entry_point::< + DemoTypes, + DemoMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + VIDNetwork, + NodeImpl, + ThisRun, + >(ValidatorArgs { + host: args.host.to_string(), + port: args.port, + public_ip: args.public_ip, + }) + .await + }); + nodes.push(node); + } + let _result = futures::future::join_all(nodes).await; +} diff --git a/hotshot/examples/combined/orchestrator.rs b/hotshot/examples/combined/orchestrator.rs new file mode 100644 index 0000000000..5e45139292 --- /dev/null +++ b/hotshot/examples/combined/orchestrator.rs @@ -0,0 +1,38 @@ +pub mod types; + +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use clap::Parser; +use hotshot::demo::DemoTypes; +use tracing::instrument; +use types::VIDNetwork; + +use crate::infra::run_orchestrator; +use crate::infra::OrchestratorArgs; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}; +use hotshot::demo::DemoMembership; + +#[path = "../infra/mod.rs"] +pub mod infra; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + let args = OrchestratorArgs::parse(); + + run_orchestrator::< + DemoTypes, + DemoMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + VIDNetwork, + NodeImpl, + >(args) + .await; +} diff --git a/hotshot/examples/combined/types.rs b/hotshot/examples/combined/types.rs new file mode 100644 index 0000000000..f08b47c9c2 --- /dev/null +++ b/hotshot/examples/combined/types.rs @@ -0,0 +1,48 @@ +use crate::infra::CombinedDARun; +use hotshot::{ + demo::{DemoMembership, DemoTypes}, + traits::implementations::{CombinedCommChannel, MemoryStorage}, +}; +use hotshot_types::{ + message::{Message, SequencingMessage}, + traits::{ + election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, + node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, + }, + vote::ViewSyncVote, +}; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; + +#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] +pub struct NodeImpl {} + +pub type DANetwork = CombinedCommChannel; +pub type VIDNetwork = CombinedCommChannel; +pub type QuorumNetwork = CombinedCommChannel; +pub type ViewSyncNetwork = CombinedCommChannel; + +pub type ThisViewSyncVote = ViewSyncVote; + +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; + type Exchanges = Exchanges< + DemoTypes, + Message, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, + >; + type ConsensusMessage = SequencingMessage; + + fn new_channel_maps( + start_view: ::Time, + ) -> ( + ChannelMaps, + Option>, + ) { + (ChannelMaps::new(start_view), None) + } +} +pub type ThisRun = CombinedDARun; diff --git a/hotshot/examples/combined/validator.rs b/hotshot/examples/combined/validator.rs new file mode 100644 index 0000000000..4af836c459 --- /dev/null +++ b/hotshot/examples/combined/validator.rs @@ -0,0 +1,42 @@ +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use clap::Parser; +use hotshot::demo::DemoTypes; +use tracing::{info, instrument}; +use types::VIDNetwork; + +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; +use hotshot::demo::DemoMembership; + +use hotshot_orchestrator::client::ValidatorArgs; + +pub mod types; + +#[path = "../infra/mod.rs"] +pub mod infra; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + let args = ValidatorArgs::parse(); + info!( + "connecting to orchestrator at {:?}:{:?}", + args.host, args.port + ); + infra::main_entry_point::< + DemoTypes, + DemoMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + VIDNetwork, + NodeImpl, + ThisRun, + >(args) + .await; +} diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index fda8cf1ec2..c47ca90550 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -1,8 +1,11 @@ +use async_compatibility_layer::art::async_sleep; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_lock::RwLock; use async_trait::async_trait; use clap::Parser; +use core::marker::PhantomData; use futures::StreamExt; +use hotshot::traits::implementations::{CombinedCommChannel, CombinedNetworks}; use hotshot::{ traits::{ implementations::{ @@ -20,6 +23,8 @@ use hotshot_orchestrator::{ config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, }; use hotshot_task::task::FilterEvent; +use hotshot_types::traits::network::ConnectedNetwork; +use hotshot_types::ValidatorConfig; use hotshot_types::{block_impl::VIDBlockHeader, traits::election::VIDExchange}; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, @@ -47,6 +52,7 @@ use libp2p_networking::{ }; use rand::rngs::StdRng; use rand::SeedableRng; +use std::time::Duration; use std::{collections::BTreeSet, sync::Arc}; use std::{num::NonZeroUsize, str::FromStr}; // use libp2p::{ @@ -59,11 +65,10 @@ use std::{num::NonZeroUsize, str::FromStr}; // }; use libp2p_identity::PeerId; // use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}; -use std::{fmt::Debug, net::Ipv4Addr}; +use std::fmt::Debug; use std::{ //collections::{BTreeSet, VecDeque}, fs, - mem, net::IpAddr, //num::NonZeroUsize, //str::FromStr, @@ -73,7 +78,7 @@ use std::{ }; //use surf_disco::error::ClientError; //use surf_disco::Client; -use tracing::{debug, error, info, warn}; +use tracing::{error, info, warn}; #[derive(Parser, Debug, Clone)] #[command( @@ -90,6 +95,17 @@ pub struct OrchestratorArgs { pub config_file: String, } +#[derive(Parser, Debug, Clone)] +#[command( + name = "Multi-machine consensus", + about = "Simulates consensus among multiple machines" +)] +/// The configuration file to be used for this run +pub struct ConfigArgs { + /// The configuration file to be used for this run + pub config_file: String, +} + /// Reads a network configuration from a given filepath pub fn load_config_from_file( config_file: String, @@ -100,7 +116,24 @@ pub fn load_config_from_file( toml::from_str::>(&config_file_as_string) .expect("Unable to convert config file to TOML"); - let config: NetworkConfig = config_toml.into(); + let mut config: NetworkConfig = + config_toml.into(); + + // Generate network's public keys + let known_nodes: Vec<_> = (0..config.config.total_nodes.get()) + .map(|node_id| { + TYPES::SignatureKey::generated_from_seed_indexed( + config.seed, + node_id.try_into().unwrap(), + ) + .0 + }) + .collect(); + + config.config.known_nodes_with_stake = (0..config.config.total_nodes.get()) + .map(|node_id| known_nodes[node_id].get_stake_table_entry(1u64)) + .collect(); + config } @@ -108,19 +141,19 @@ pub fn load_config_from_file( pub async fn run_orchestrator< TYPES: NodeType, MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + DACHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMCHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCCHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, + VIDCHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Exchanges = Exchanges< TYPES, Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >, Storage = MemoryStorage, ConsensusMessage = SequencingMessage, @@ -147,11 +180,144 @@ fn calculate_num_tx_per_round( total_num_nodes: usize, transactions_per_round: usize, ) -> usize { - if node_index == 0 { - transactions_per_round / total_num_nodes + transactions_per_round % total_num_nodes + transactions_per_round / total_num_nodes + + ((total_num_nodes - 1 - node_index as usize) < (transactions_per_round % total_num_nodes)) + as usize +} + +async fn webserver_network_from_config>( + config: NetworkConfig, + pub_key: TYPES::SignatureKey, +) -> WebServerNetwork, TYPES::SignatureKey, TYPES> { + // Get the configuration for the web server + let WebServerConfig { + host, + port, + wait_between_polls, + }: WebServerConfig = config.clone().web_server_config.unwrap(); + + WebServerNetwork::create( + &host.to_string(), + port, + wait_between_polls, + pub_key.clone(), + false, + ) +} + +async fn libp2p_network_from_config>( + config: NetworkConfig, + pub_key: TYPES::SignatureKey, +) -> Libp2pNetwork, TYPES::SignatureKey> { + let mut config = config; + let libp2p_config = config + .libp2p_config + .take() + .expect("Configuration is not for a Libp2p network"); + let bs_len = libp2p_config.bootstrap_nodes.len(); + let bootstrap_nodes: Vec<(PeerId, Multiaddr)> = libp2p_config + .bootstrap_nodes + .iter() + .map(|(addr, pair)| { + let kp = Keypair::from_protobuf_encoding(pair).unwrap(); + let peer_id = PeerId::from_public_key(&kp.public()); + let multiaddr = + Multiaddr::from_str(&format!("/ip4/{}/udp/{}/quic-v1", addr.ip(), addr.port())) + .unwrap(); + (peer_id, multiaddr) + }) + .collect(); + let identity = libp2p_generate_indexed_identity(config.seed, config.node_index); + let node_type = if (config.node_index as usize) < bs_len { + NetworkNodeType::Bootstrap } else { - transactions_per_round / total_num_nodes + NetworkNodeType::Regular + }; + let node_index = config.node_index; + let port_index = match libp2p_config.index_ports { + true => node_index, + false => 0, + }; + let bound_addr: Multiaddr = format!( + "/{}/{}/udp/{}/quic-v1", + if libp2p_config.public_ip.is_ipv4() { + "ip4" + } else { + "ip6" + }, + libp2p_config.public_ip, + libp2p_config.base_port as u64 + port_index + ) + .parse() + .unwrap(); + + // generate network + let mut config_builder = NetworkNodeConfigBuilder::default(); + assert!(config.config.total_nodes.get() > 2); + let replicated_nodes = NonZeroUsize::new(config.config.total_nodes.get() - 2).unwrap(); + config_builder.replication_factor(replicated_nodes); + config_builder.identity(identity.clone()); + + config_builder.bound_addr(Some(bound_addr.clone())); + + let to_connect_addrs = bootstrap_nodes + .iter() + .map(|(peer_id, multiaddr)| (Some(*peer_id), multiaddr.clone())) + .collect(); + + config_builder.to_connect_addrs(to_connect_addrs); + + let mesh_params = +// NOTE I'm arbitrarily choosing these. +match node_type { + NetworkNodeType::Bootstrap => MeshParams { + mesh_n_high: libp2p_config.bootstrap_mesh_n_high, + mesh_n_low: libp2p_config.bootstrap_mesh_n_low, + mesh_outbound_min: libp2p_config.bootstrap_mesh_outbound_min, + mesh_n: libp2p_config.bootstrap_mesh_n, + }, + NetworkNodeType::Regular => MeshParams { + mesh_n_high: libp2p_config.mesh_n_high, + mesh_n_low: libp2p_config.mesh_n_low, + mesh_outbound_min: libp2p_config.mesh_outbound_min, + mesh_n: libp2p_config.mesh_n, + }, + NetworkNodeType::Conductor => unreachable!(), +}; + config_builder.mesh_params(Some(mesh_params)); + + let mut all_keys = BTreeSet::new(); + let mut da_keys = BTreeSet::new(); + for i in 0..config.config.total_nodes.get() as u64 { + let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; + let pub_key = TYPES::SignatureKey::from_private(&privkey); + if i < config.config.da_committee_size as u64 { + da_keys.insert(pub_key.clone()); + } + all_keys.insert(pub_key); } + let node_config = config_builder.build().unwrap(); + + Libp2pNetwork::new( + NetworkingMetricsValue::new(), + node_config, + pub_key.clone(), + Arc::new(RwLock::new( + bootstrap_nodes + .iter() + .map(|(peer_id, addr)| (Some(*peer_id), addr.clone())) + .collect(), + )), + bs_len, + config.node_index as usize, + // NOTE: this introduces an invariant that the keys are assigned using this indexed + // function + all_keys, + da_keys.clone(), + da_keys.contains(&pub_key), + ) + .await + .unwrap() } /// Defines the behavior of a "run" of the network with a given configuration @@ -159,19 +325,19 @@ fn calculate_num_tx_per_round( pub trait RunDA< TYPES: NodeType, MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + DACHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMCHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCCHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, + VIDCHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Exchanges = Exchanges< TYPES, Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >, Storage = MemoryStorage, ConsensusMessage = SequencingMessage, @@ -179,6 +345,7 @@ pub trait RunDA< > where ::StateType: TestableState, ::BlockPayload: TestableBlock, + TYPES: NodeType, Leaf: TestableLeaf, Self: Sync, SystemContext: HotShotType, @@ -204,10 +371,10 @@ pub trait RunDA< let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let entry = pk.get_stake_table_entry(1u64); - let da_network = self.get_da_network(); - let quorum_network = self.get_quorum_network(); - let view_sync_network = self.get_view_sync_network(); - let vid_network = self.get_vid_network(); + let da_network = self.get_da_channel(); + let quorum_network = self.get_quorum_channel(); + let view_sync_network = self.get_view_sync_channel(); + let vid_network = self.get_vid_channel(); // Since we do not currently pass the election config type in the NetworkConfig, this will always be the default election config let quorum_election_config = config.config.election_config.clone().unwrap_or_else(|| { @@ -254,28 +421,26 @@ pub trait RunDA< } /// Starts HotShot consensus, returns when consensus has finished - async fn run_hotshot(&self, mut context: SystemContextHandle) { + async fn run_hotshot( + &self, + mut context: SystemContextHandle, + transactions: &mut Vec, + transactions_to_send_per_round: u64, + ) { let NetworkConfig { - padding, rounds, - transactions_per_round, node_index, - config: HotShotConfig { total_nodes, .. }, + start_delay_seconds, .. } = self.get_config(); - let size = mem::size_of::(); - let padding = padding.saturating_sub(size); - let mut txn_rng = StdRng::seed_from_u64(node_index); - - debug!("Adjusted padding size is {:?} bytes", padding); - let mut total_transactions_committed = 0; let mut total_transactions_sent = 0; - let transactions_to_send_per_round = - calculate_num_tx_per_round(node_index, total_nodes.get(), transactions_per_round); - info!("Starting hotshot!"); + error!("Sleeping for {start_delay_seconds} seconds before starting hotshot!"); + async_sleep(Duration::from_secs(start_delay_seconds)).await; + + error!("Starting hotshot!"); let start = Instant::now(); let (mut event_stream, _streamid) = context.get_event_stream(FilterEvent::default()).await; @@ -308,18 +473,14 @@ pub trait RunDA< if new_anchor >= anchor_view { anchor_view = leaf.view_number; } - } - // send transactions - for _ in 0..transactions_to_send_per_round { - let txn = - <::StateType as TestableState>::create_random_transaction( - None, - &mut txn_rng, - padding as u64, - ); - _ = context.submit_transaction(txn).await.unwrap(); - total_transactions_sent += 1; + // send transactions + for _ in 0..transactions_to_send_per_round { + let tx = transactions.remove(0); + + _ = context.submit_transaction(tx).await.unwrap(); + total_transactions_sent += 1; + } } if let Some(size) = block_size { @@ -355,16 +516,16 @@ pub trait RunDA< } /// Returns the da network for this run - fn get_da_network(&self) -> DANETWORK; + fn get_da_channel(&self) -> DACHANNEL; /// Returns the quorum network for this run - fn get_quorum_network(&self) -> QUORUMNETWORK; + fn get_quorum_channel(&self) -> QUORUMCHANNEL; ///Returns view sync network for this run - fn get_view_sync_network(&self) -> VIEWSYNCNETWORK; + fn get_view_sync_channel(&self) -> VIEWSYNCCHANNEL; ///Returns VID network for this run - fn get_vid_network(&self) -> VIDNETWORK; + fn get_vid_channel(&self) -> VIDCHANNEL; /// Returns the config for this run fn get_config(&self) -> NetworkConfig; @@ -379,10 +540,10 @@ pub struct WebServerDARun< MEMBERSHIP: Membership, > { config: NetworkConfig, - quorum_network: WebCommChannel, - da_network: WebCommChannel, - view_sync_network: WebCommChannel, - vid_network: WebCommChannel, + quorum_channel: WebCommChannel, + da_channel: WebCommChannel, + view_sync_channel: WebCommChannel, + vid_channel: WebCommChannel, } #[async_trait] @@ -448,36 +609,27 @@ where // Get our own key let pub_key = config.config.my_own_validator_config.public_key.clone(); - // Get the configuration for the web server + // extract values from config (for DA network) let WebServerConfig { host, port, wait_between_polls, - }: WebServerConfig = config.clone().web_server_config.unwrap(); + }: WebServerConfig = config.clone().da_web_server_config.unwrap(); - let underlying_quorum_network = WebServerNetwork::create( - &host.to_string(), - port, - wait_between_polls, - pub_key.clone(), - false, - ); + // create and wait for underlying network + let underlying_quorum_network = + webserver_network_from_config::(config.clone(), pub_key.clone()).await; - // Create the network - let quorum_network: WebCommChannel = + underlying_quorum_network.wait_for_ready().await; + + // create communication channels + let quorum_channel: WebCommChannel = WebCommChannel::new(underlying_quorum_network.clone().into()); - let view_sync_network: WebCommChannel = + let view_sync_channel: WebCommChannel = WebCommChannel::new(underlying_quorum_network.into()); - let WebServerConfig { - host, - port, - wait_between_polls, - }: WebServerConfig = config.clone().da_web_server_config.unwrap(); - - // Each node runs the DA network so that leaders have access to transactions and DA votes - let da_network: WebCommChannel = WebCommChannel::new( + let da_channel: WebCommChannel = WebCommChannel::new( WebServerNetwork::create( &host.to_string(), port, @@ -488,34 +640,34 @@ where .into(), ); - let vid_network: WebCommChannel = WebCommChannel::new( + let vid_channel: WebCommChannel = WebCommChannel::new( WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true) .into(), ); WebServerDARun { config, - quorum_network, - da_network, - view_sync_network, - vid_network, + quorum_channel, + da_channel, + view_sync_channel, + vid_channel, } } - fn get_da_network(&self) -> WebCommChannel { - self.da_network.clone() + fn get_da_channel(&self) -> WebCommChannel { + self.da_channel.clone() } - fn get_quorum_network(&self) -> WebCommChannel { - self.quorum_network.clone() + fn get_quorum_channel(&self) -> WebCommChannel { + self.quorum_channel.clone() } - fn get_view_sync_network(&self) -> WebCommChannel { - self.view_sync_network.clone() + fn get_view_sync_channel(&self) -> WebCommChannel { + self.view_sync_channel.clone() } - fn get_vid_network(&self) -> WebCommChannel { - self.vid_network.clone() + fn get_vid_channel(&self) -> WebCommChannel { + self.vid_channel.clone() } fn get_config(&self) -> NetworkConfig { @@ -529,10 +681,10 @@ where pub struct Libp2pDARun, MEMBERSHIP: Membership> { config: NetworkConfig, - quorum_network: Libp2pCommChannel, - da_network: Libp2pCommChannel, - view_sync_network: Libp2pCommChannel, - vid_network: Libp2pCommChannel, + quorum_channel: Libp2pCommChannel, + da_channel: Libp2pCommChannel, + view_sync_channel: Libp2pCommChannel, + vid_channel: Libp2pCommChannel, } #[async_trait] @@ -595,159 +747,210 @@ where async fn initialize_networking( config: NetworkConfig, ) -> Libp2pDARun { - let pubkey = config.config.my_own_validator_config.public_key.clone(); - let mut config = config; - let libp2p_config = config - .libp2p_config - .take() - .expect("Configuration is not for a Libp2p network"); - let bs_len = libp2p_config.bootstrap_nodes.len(); - let bootstrap_nodes: Vec<(PeerId, Multiaddr)> = libp2p_config - .bootstrap_nodes - .iter() - .map(|(addr, pair)| { - let kp = Keypair::from_protobuf_encoding(pair).unwrap(); - let peer_id = PeerId::from_public_key(&kp.public()); - let multiaddr = - Multiaddr::from_str(&format!("/ip4/{}/udp/{}/quic-v1", addr.ip(), addr.port())) - .unwrap(); - (peer_id, multiaddr) - }) - .collect(); - let identity = libp2p_generate_indexed_identity(config.seed, config.node_index); - let node_type = if (config.node_index as usize) < bs_len { - NetworkNodeType::Bootstrap - } else { - NetworkNodeType::Regular - }; - let node_index = config.node_index; - let port_index = match libp2p_config.index_ports { - true => node_index, - false => 0, - }; - let bound_addr: Multiaddr = format!( - "/{}/{}/udp/{}/quic-v1", - if libp2p_config.public_ip.is_ipv4() { - "ip4" - } else { - "ip6" - }, - libp2p_config.public_ip, - libp2p_config.base_port as u64 + port_index - ) - .parse() - .unwrap(); - - // generate network - let mut config_builder = NetworkNodeConfigBuilder::default(); - assert!(config.config.total_nodes.get() > 2); - let replicated_nodes = NonZeroUsize::new(config.config.total_nodes.get() - 2).unwrap(); - config_builder.replication_factor(replicated_nodes); - config_builder.identity(identity.clone()); - - config_builder.bound_addr(Some(bound_addr.clone())); - - let to_connect_addrs = bootstrap_nodes - .iter() - .map(|(peer_id, multiaddr)| (Some(*peer_id), multiaddr.clone())) - .collect(); - - config_builder.to_connect_addrs(to_connect_addrs); - - let mesh_params = - // NOTE I'm arbitrarily choosing these. - match node_type { - NetworkNodeType::Bootstrap => MeshParams { - mesh_n_high: libp2p_config.bootstrap_mesh_n_high, - mesh_n_low: libp2p_config.bootstrap_mesh_n_low, - mesh_outbound_min: libp2p_config.bootstrap_mesh_outbound_min, - mesh_n: libp2p_config.bootstrap_mesh_n, - }, - NetworkNodeType::Regular => MeshParams { - mesh_n_high: libp2p_config.mesh_n_high, - mesh_n_low: libp2p_config.mesh_n_low, - mesh_outbound_min: libp2p_config.mesh_outbound_min, - mesh_n: libp2p_config.mesh_n, - }, - NetworkNodeType::Conductor => unreachable!(), - }; - config_builder.mesh_params(Some(mesh_params)); - - let mut all_keys = BTreeSet::new(); - let mut da_keys = BTreeSet::new(); - for i in 0..config.config.total_nodes.get() as u64 { - let pubkey = <::SignatureKey>::get_public_key( - config - .config - .known_nodes_with_stake - .get(i as usize) - .expect("node_id should be within the range of known_nodes"), - ); - if i < config.config.da_committee_size as u64 { - da_keys.insert(pubkey.clone()); - } - all_keys.insert(pubkey); - } - let node_config = config_builder.build().unwrap(); - let underlying_quorum_network = Libp2pNetwork::new( - NetworkingMetricsValue::new(), - node_config, - pubkey.clone(), - Arc::new(RwLock::new( - bootstrap_nodes - .iter() - .map(|(peer_id, addr)| (Some(*peer_id), addr.clone())) - .collect(), - )), - bs_len, - config.node_index as usize, - // NOTE: this introduces an invariant that the keys are assigned using this indexed - // function - all_keys, - da_keys.clone(), - da_keys.contains(&pubkey), - ) - .await - .unwrap(); + let pub_key = config.config.my_own_validator_config.public_key.clone(); + + // create and wait for underlying network + let underlying_quorum_network = + libp2p_network_from_config::(config.clone(), pub_key).await; underlying_quorum_network.wait_for_ready().await; - // Create the network - let quorum_network: Libp2pCommChannel = + // create communication channels + let quorum_channel: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - let view_sync_network: Libp2pCommChannel = + let view_sync_channel: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - let da_network: Libp2pCommChannel = + let da_channel: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - let vid_network: Libp2pCommChannel = + let vid_channel: Libp2pCommChannel = Libp2pCommChannel::new(underlying_quorum_network.clone().into()); Libp2pDARun { config, - quorum_network, - da_network, - view_sync_network, - vid_network, + quorum_channel, + da_channel, + view_sync_channel, + vid_channel, } } - fn get_da_network(&self) -> Libp2pCommChannel { - self.da_network.clone() + fn get_da_channel(&self) -> Libp2pCommChannel { + self.da_channel.clone() } - fn get_quorum_network(&self) -> Libp2pCommChannel { - self.quorum_network.clone() + fn get_quorum_channel(&self) -> Libp2pCommChannel { + self.quorum_channel.clone() } - fn get_view_sync_network(&self) -> Libp2pCommChannel { - self.view_sync_network.clone() + fn get_view_sync_channel(&self) -> Libp2pCommChannel { + self.view_sync_channel.clone() } - fn get_vid_network(&self) -> Libp2pCommChannel { - self.vid_network.clone() + fn get_vid_channel(&self) -> Libp2pCommChannel { + self.vid_channel.clone() + } + + fn get_config(&self) -> NetworkConfig { + self.config.clone() + } +} + +// Combined network + +/// Represents a combined-network-based run +pub struct CombinedDARun< + TYPES: NodeType, + I: NodeImplementation, + MEMBERSHIP: Membership, +> { + config: NetworkConfig, + quorum_channel: CombinedCommChannel, + da_channel: CombinedCommChannel, + view_sync_channel: CombinedCommChannel, + vid_channel: CombinedCommChannel, +} + +#[async_trait] +impl< + TYPES: NodeType< + Transaction = VIDTransaction, + BlockPayload = VIDBlockPayload, + BlockHeader = VIDBlockHeader, + >, + MEMBERSHIP: Membership + Debug, + NODE: NodeImplementation< + TYPES, + Exchanges = Exchanges< + TYPES, + Message, + QuorumExchange< + TYPES, + MEMBERSHIP, + CombinedCommChannel, + Message, + >, + CommitteeExchange< + TYPES, + MEMBERSHIP, + CombinedCommChannel, + Message, + >, + ViewSyncExchange< + TYPES, + MEMBERSHIP, + CombinedCommChannel, + Message, + >, + VIDExchange< + TYPES, + MEMBERSHIP, + CombinedCommChannel, + Message, + >, + >, + Storage = MemoryStorage, + ConsensusMessage = SequencingMessage, + >, + > + RunDA< + TYPES, + MEMBERSHIP, + CombinedCommChannel, + CombinedCommChannel, + CombinedCommChannel, + CombinedCommChannel, + NODE, + > for CombinedDARun +where + ::StateType: TestableState, + ::BlockPayload: TestableBlock, + Leaf: TestableLeaf, + Self: Sync, +{ + async fn initialize_networking( + config: NetworkConfig, + ) -> CombinedDARun { + // generate our own key + let (pub_key, _privkey) = + <::SignatureKey as SignatureKey>::generated_from_seed_indexed( + config.seed, + config.node_index, + ); + + // create and wait for libp2p network + let libp2p_underlying_quorum_network = + libp2p_network_from_config::(config.clone(), pub_key.clone()).await; + + libp2p_underlying_quorum_network.wait_for_ready().await; + + // extract values from config (for webserver DA network) + let WebServerConfig { + host, + port, + wait_between_polls, + }: WebServerConfig = config.clone().da_web_server_config.unwrap(); + + // create and wait for underlying webserver network + let webserver_underlying_quorum_network = + webserver_network_from_config::(config.clone(), pub_key.clone()).await; + + let webserver_underlying_da_network = + WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true); + + webserver_underlying_quorum_network.wait_for_ready().await; + + // combine the two communication channels + let quorum_channel = CombinedCommChannel::new(Arc::new(CombinedNetworks( + webserver_underlying_quorum_network.clone(), + libp2p_underlying_quorum_network.clone(), + PhantomData, + ))); + + let view_sync_channel = CombinedCommChannel::new(Arc::new(CombinedNetworks( + webserver_underlying_quorum_network.clone(), + libp2p_underlying_quorum_network.clone(), + PhantomData, + ))); + + let da_channel: CombinedCommChannel = + CombinedCommChannel::new(Arc::new(CombinedNetworks( + webserver_underlying_da_network, + libp2p_underlying_quorum_network.clone(), + PhantomData, + ))); + + let vid_channel = CombinedCommChannel::new(Arc::new(CombinedNetworks( + webserver_underlying_quorum_network, + libp2p_underlying_quorum_network, + PhantomData, + ))); + + CombinedDARun { + config, + quorum_channel, + da_channel, + view_sync_channel, + vid_channel, + } + } + + fn get_da_channel(&self) -> CombinedCommChannel { + self.da_channel.clone() + } + + fn get_quorum_channel(&self) -> CombinedCommChannel { + self.quorum_channel.clone() + } + + fn get_view_sync_channel(&self) -> CombinedCommChannel { + self.view_sync_channel.clone() + } + + fn get_vid_channel(&self) -> CombinedCommChannel { + self.vid_channel.clone() } fn get_config(&self) -> NetworkConfig { @@ -763,24 +966,24 @@ pub async fn main_entry_point< BlockHeader = VIDBlockHeader, >, MEMBERSHIP: Membership + Debug, - DANETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - QUORUMNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIEWSYNCNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, - VIDNETWORK: CommunicationChannel, MEMBERSHIP> + Debug, + DACHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, + QUORUMCHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, + VIEWSYNCCHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, + VIDCHANNEL: CommunicationChannel, MEMBERSHIP> + Debug, NODE: NodeImplementation< TYPES, Exchanges = Exchanges< TYPES, Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, + QuorumExchange>, + CommitteeExchange>, + ViewSyncExchange>, + VIDExchange>, >, Storage = MemoryStorage, ConsensusMessage = SequencingMessage, >, - RUNDA: RunDA, + RUNDA: RunDA, >( args: ValidatorArgs, ) where @@ -791,7 +994,7 @@ pub async fn main_entry_point< setup_logging(); setup_backtrace(); - info!("Starting validator"); + error!("Starting validator"); let orchestrator_client: OrchestratorClient = OrchestratorClient::connect_to_orchestrator(args.clone()).await; @@ -799,36 +1002,83 @@ pub async fn main_entry_point< // Identify with the orchestrator let public_ip = match args.public_ip { Some(ip) => ip, - None => IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + None => local_ip_address::local_ip().unwrap(), }; - info!( + error!( "Identifying with orchestrator using IP address {}", public_ip.to_string() ); let node_index: u16 = orchestrator_client .identify_with_orchestrator(public_ip.to_string()) .await; - info!("Finished identifying; our node index is {node_index}"); - info!("Getting config from orchestrator"); + error!("Finished identifying; our node index is {node_index}"); + error!("Getting config from orchestrator"); let mut run_config = orchestrator_client .get_config_from_orchestrator::(node_index) .await; run_config.node_index = node_index.into(); + + let (public_key, private_key) = + <::SignatureKey as SignatureKey>::generated_from_seed_indexed( + run_config.seed, + node_index.into(), + ); + run_config.config.my_own_validator_config = ValidatorConfig { + public_key, + private_key, + stake_value: 1, + }; //run_config.libp2p_config.as_mut().unwrap().public_ip = args.public_ip.unwrap(); - info!("Initializing networking"); + error!("Initializing networking"); let run = RUNDA::initialize_networking(run_config.clone()).await; let hotshot = run.initialize_state_and_hotshot().await; - info!("Waiting for start command from orchestrator"); + // pre-generate transactions + let NetworkConfig { + transaction_size, + rounds, + transactions_per_round, + node_index, + config: HotShotConfig { total_nodes, .. }, + .. + } = run_config; + + let mut txn_rng = StdRng::seed_from_u64(node_index); + let transactions_to_send_per_round = + calculate_num_tx_per_round(node_index, total_nodes.get(), transactions_per_round); + let mut transactions = Vec::new(); + + for round in 0..rounds { + for _ in 0..transactions_to_send_per_round { + let mut txn = ::create_random_transaction( + None, + &mut txn_rng, + transaction_size as u64, + ); + + // prepend destined view number to transaction + let view_execute_number: u64 = round as u64 + 4; + txn.0[0..8].copy_from_slice(&view_execute_number.to_be_bytes()); + + transactions.push(txn); + } + } + + error!("Waiting for start command from orchestrator"); orchestrator_client .wait_for_all_nodes_ready(run_config.clone().node_index) .await; - info!("All nodes are ready! Starting HotShot"); - run.run_hotshot(hotshot).await; + error!("All nodes are ready! Starting HotShot"); + run.run_hotshot( + hotshot, + &mut transactions, + transactions_to_send_per_round as u64, + ) + .await; } pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { diff --git a/hotshot/examples/libp2p/all.rs b/hotshot/examples/libp2p/all.rs new file mode 100644 index 0000000000..6610c09a60 --- /dev/null +++ b/hotshot/examples/libp2p/all.rs @@ -0,0 +1,80 @@ +pub mod types; + +use crate::infra::load_config_from_file; +use crate::types::ThisRun; +use async_compatibility_layer::art::async_spawn; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use clap::Parser; +use hotshot::demo::DemoMembership; +use hotshot::demo::DemoTypes; +use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_orchestrator::config::NetworkConfig; +use hotshot_types::traits::node_implementation::NodeType; +use std::net::{IpAddr, Ipv4Addr}; +use tracing::instrument; + +use crate::{ + infra::run_orchestrator, + infra::{ConfigArgs, OrchestratorArgs}, + types::{DANetwork, NodeImpl, QuorumNetwork, VIDNetwork, ViewSyncNetwork}, +}; + +#[path = "../infra/mod.rs"] +pub mod infra; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::main(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + + // use configfile args + let args = ConfigArgs::parse(); + + // orchestrator + async_spawn(run_orchestrator::< + DemoTypes, + DemoMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + VIDNetwork, + NodeImpl, + >(OrchestratorArgs { + host: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + port: 4444, + config_file: args.config_file.clone(), + })); + + // nodes + let config: NetworkConfig< + ::SignatureKey, + ::ElectionConfigType, + > = load_config_from_file::(args.config_file); + let mut nodes = Vec::new(); + for _ in 0..config.config.total_nodes.into() { + let node = async_spawn(async move { + infra::main_entry_point::< + DemoTypes, + DemoMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + VIDNetwork, + NodeImpl, + ThisRun, + >(ValidatorArgs { + host: "127.0.0.1".to_string(), + port: 4444, + public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + }) + .await + }); + nodes.push(node); + } + futures::future::join_all(nodes).await; +} diff --git a/hotshot/examples/web-server-da/README.md b/hotshot/examples/web-server-da/README.md deleted file mode 100644 index a65ec0bb92..0000000000 --- a/hotshot/examples/web-server-da/README.md +++ /dev/null @@ -1,29 +0,0 @@ -Commands to run da examples: -1a)Start web servers by either running 3 servers: -just async_std example web-server -- -just async_std example web-server -- -just async_std example web-server -- - -1b)Or use multi-web-server to spin up all three: -just async_std example multi-web-server -- - -2) Start orchestrator: -just async_std example web-server-da-orchestrator -- - -3a) Start validator: -just async_std example web-server-da-validator -- - -3b) Or start multiple validators: -just async_std example multi-validator -- - -I.e. -just async_std example web-server -- 9000 -just async_std example web-server -- 9001 -just async_std example web-server -- 9002 -just async_std example web-server-da-orchestrator -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml -just async_std example web-server-da-validator -- 2 0.0.0.0 4444 - -OR: -just async_std example multi-web-server -- 9000 9001 9002 -just async_std example web-server-da-orchestrator -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml -just async_std example multi-validator -- 10 0.0.0.0 4444 \ No newline at end of file diff --git a/hotshot/examples/webserver/README.md b/hotshot/examples/webserver/README.md new file mode 100644 index 0000000000..ebe9571b5f --- /dev/null +++ b/hotshot/examples/webserver/README.md @@ -0,0 +1,29 @@ +Commands to run da examples: +1a)Start web servers by either running 3 servers: +just async_std example webserver -- +just async_std example webserver -- +just async_std example webserver -- + +1b)Or use multi-webserver to spin up all three: +just async_std example multi-webserver -- + +2) Start orchestrator: +just async_std example orchestrator-webserver -- + +3a) Start validator: +just async_std example validator-webserver -- + +3b) Or start multiple validators: +just async_std example multi-validator-webserver -- + +I.e. +just async_std example webserver -- 9000 +just async_std example webserver -- 9001 +just async_std example webserver -- 9002 +just async_std example orchestrator-webserver -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml +just async_std example validator-webserver -- 2 0.0.0.0 4444 + +OR: +just async_std example multi-webserver -- 9000 9001 9002 +just async_std example orchestrator-webserver -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml +just async_std example multi-validator-webserver -- 10 0.0.0.0 4444 \ No newline at end of file diff --git a/hotshot/examples/webserver/all.rs b/hotshot/examples/webserver/all.rs new file mode 100644 index 0000000000..005d02b7aa --- /dev/null +++ b/hotshot/examples/webserver/all.rs @@ -0,0 +1,98 @@ +pub mod types; + +use crate::infra::load_config_from_file; +use crate::infra::{ConfigArgs, OrchestratorArgs}; +use crate::types::ThisRun; +use crate::{ + infra::run_orchestrator, + types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}, +}; +use std::net::{IpAddr, Ipv4Addr}; +use std::sync::Arc; + +#[path = "../infra/mod.rs"] +pub mod infra; + +use async_compatibility_layer::{art::async_spawn, channel::oneshot}; +use clap::Parser; +use hotshot::demo::{DemoMembership, DemoTypes}; +use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_orchestrator::config::NetworkConfig; +use hotshot_types::traits::node_implementation::NodeType; +use tracing::error; +use types::VIDNetwork; + +#[cfg_attr(async_executor_impl = "tokio", tokio::main)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +async fn main() { + // use configfile args + let args = ConfigArgs::parse(); + + // spawn web servers + let (server_shutdown_sender_cdn, server_shutdown_cdn) = oneshot(); + let (server_shutdown_sender_da, server_shutdown_da) = oneshot(); + let _sender = Arc::new(server_shutdown_sender_cdn); + let _sender = Arc::new(server_shutdown_sender_da); + + async_spawn(async move { + if let Err(e) = hotshot_web_server::run_web_server::< + ::SignatureKey, + >(Some(server_shutdown_cdn), 9000) + .await + { + error!("Problem starting cdn web server: {:?}", e); + } + }); + async_spawn(async move { + if let Err(e) = hotshot_web_server::run_web_server::< + ::SignatureKey, + >(Some(server_shutdown_da), 9001) + .await + { + error!("Problem starting da web server: {:?}", e); + } + }); + + // web server orchestrator + async_spawn(run_orchestrator::< + DemoTypes, + DemoMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + VIDNetwork, + NodeImpl, + >(OrchestratorArgs { + host: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + port: 4444, + config_file: args.config_file.clone(), + })); + + // multi validator run + let config: NetworkConfig< + ::SignatureKey, + ::ElectionConfigType, + > = load_config_from_file::(args.config_file); + let mut nodes = Vec::new(); + for _ in 0..(config.config.total_nodes.get()) { + let node = async_spawn(async move { + infra::main_entry_point::< + DemoTypes, + DemoMembership, + DANetwork, + QuorumNetwork, + ViewSyncNetwork, + VIDNetwork, + NodeImpl, + ThisRun, + >(ValidatorArgs { + host: "127.0.0.1".to_string(), + port: 4444, + public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + }) + .await + }); + nodes.push(node); + } + let _result = futures::future::join_all(nodes).await; +} diff --git a/hotshot/examples/web-server-da/multi-validator.rs b/hotshot/examples/webserver/multi-validator.rs similarity index 100% rename from hotshot/examples/web-server-da/multi-validator.rs rename to hotshot/examples/webserver/multi-validator.rs diff --git a/hotshot/examples/web-server-da/multi-web-server.rs b/hotshot/examples/webserver/multi-webserver.rs similarity index 63% rename from hotshot/examples/web-server-da/multi-web-server.rs rename to hotshot/examples/webserver/multi-webserver.rs index f954050ad0..4cf6078b50 100644 --- a/hotshot/examples/web-server-da/multi-web-server.rs +++ b/hotshot/examples/webserver/multi-webserver.rs @@ -1,6 +1,10 @@ use std::sync::Arc; -use async_compatibility_layer::{art::async_spawn, channel::oneshot}; +use async_compatibility_layer::{ + art::async_spawn, + channel::oneshot, + logging::{setup_backtrace, setup_logging}, +}; use clap::Parser; use hotshot::demo::DemoTypes; use tracing::error; @@ -11,19 +15,18 @@ struct MultiWebServerArgs { da_port: u16, view_sync_port: u16, } -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] + +#[cfg_attr(async_executor_impl = "tokio", tokio::main)] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] async fn main() { + setup_backtrace(); + setup_logging(); + let args = MultiWebServerArgs::parse(); let (server_shutdown_sender_cdn, server_shutdown_cdn) = oneshot(); let (server_shutdown_sender_da, server_shutdown_da) = oneshot(); - let (server_shutdown_sender_view_sync, server_shutdown_view_sync) = oneshot(); let _sender = Arc::new(server_shutdown_sender_cdn); let _sender = Arc::new(server_shutdown_sender_da); - let _sender = Arc::new(server_shutdown_sender_view_sync); let cdn_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< @@ -33,7 +36,6 @@ async fn main() { { error!("Problem starting cdn web server: {:?}", e); } - error!("cdn"); }); let da_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< @@ -43,17 +45,7 @@ async fn main() { { error!("Problem starting da web server: {:?}", e); } - error!("da"); }); - let vs_server = async_spawn(async move { - if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, - >(Some(server_shutdown_view_sync), args.view_sync_port) - .await - { - error!("Problem starting view sync web server: {:?}", e); - } - error!("vs"); - }); - let _result = futures::future::join_all(vec![cdn_server, da_server, vs_server]).await; + + let _result = futures::future::join_all(vec![cdn_server, da_server]).await; } diff --git a/hotshot/examples/web-server-da/orchestrator.rs b/hotshot/examples/webserver/orchestrator.rs similarity index 100% rename from hotshot/examples/web-server-da/orchestrator.rs rename to hotshot/examples/webserver/orchestrator.rs diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/webserver/types.rs similarity index 100% rename from hotshot/examples/web-server-da/types.rs rename to hotshot/examples/webserver/types.rs diff --git a/hotshot/examples/web-server-da/validator.rs b/hotshot/examples/webserver/validator.rs similarity index 100% rename from hotshot/examples/web-server-da/validator.rs rename to hotshot/examples/webserver/validator.rs diff --git a/hotshot/examples/web-server-da/web-server.rs b/hotshot/examples/webserver/webserver.rs similarity index 86% rename from hotshot/examples/web-server-da/web-server.rs rename to hotshot/examples/webserver/webserver.rs index 9c4912c6a3..391fc60028 100644 --- a/hotshot/examples/web-server-da/web-server.rs +++ b/hotshot/examples/webserver/webserver.rs @@ -11,10 +11,8 @@ use clap::Parser; struct WebServerArgs { port: u16, } -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] + +#[cfg_attr(async_executor_impl = "tokio", tokio::main)] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] async fn main() { setup_backtrace(); diff --git a/orchestrator/README.md b/orchestrator/README.md index 43e8a6e28c..2e0b6f0443 100644 --- a/orchestrator/README.md +++ b/orchestrator/README.md @@ -2,6 +2,6 @@ This crate implements an orchestrator that coordinates starting the network with a particular configuration. It is useful for testing and benchmarking. Like the web server, the orchestrator is built using [Tide Disco](https://github.com/EspressoSystems/tide-disco). -To run the orchestrator for a libp2p network: `cargo run --example libp2p-orchestrator --features="full-ci,channel-async-std" 0.0.0.0 3333 ./orchestrator/default-libp2p-run-config.toml ` +To run the orchestrator for a libp2p network: `just async_std example orchestrator-libp2p 0.0.0.0 3333 ./crates/orchestrator/run-config` -To run the orchestrator for a libp2p network: `cargo run --example web-server-orchestrator --features="full-ci,channel-async-std" 0.0.0.0 3333 ./orchestrator/default-web-server-run-config.toml ` \ No newline at end of file +To run the orchestrator for a libp2p network: `just async_std example orchestrator-webserver 0.0.0.0 3333 ./crates/orchestrator/run-config.toml ` \ No newline at end of file diff --git a/orchestrator/default-run-config.toml b/orchestrator/default-run-config.toml deleted file mode 100644 index ee8333f80a..0000000000 --- a/orchestrator/default-run-config.toml +++ /dev/null @@ -1,70 +0,0 @@ -rounds = 10 -transactions_per_round = 1 -node_index = 0 -seed = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, -] -padding = 100 -start_delay_seconds = 30 - -[config] -total_nodes = 1 -max_transactions = 100 -min_transactions = 0 -next_view_timeout = 30000 -timeout_ratio = [ - 11, - 10, -] -round_start_delay = 1 -start_delay = 1 -num_bootstrap = 4 - -[config.propose_min_round_time] -secs = 0 -nanos = 0 - -# TODO (Keyao) Clean up configuration parameters. -# -[config.propose_max_round_time] -secs = 2 -nanos = 0 - -[web_server_config] -host = "0.0.0.0" -port = 9000 - -[web_server_config.wait_between_polls] -secs = 0 -nanos = 100000000 # 100 ms diff --git a/orchestrator/default-web-server-run-config.toml b/orchestrator/default-web-server-run-config.toml deleted file mode 100644 index c5d0bd0253..0000000000 --- a/orchestrator/default-web-server-run-config.toml +++ /dev/null @@ -1,79 +0,0 @@ -rounds = 10 -transactions_per_round = 1 -node_index = 0 -seed = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, -] -padding = 100 -start_delay_seconds = 30 - -[config] -total_nodes = 10 -committee_nodes = 5 -max_transactions = 100 -min_transactions = 0 -next_view_timeout = 30000 -timeout_ratio = [ - 11, - 10, -] -round_start_delay = 1 -start_delay = 1 -num_bootstrap = 4 - -[config.propose_min_round_time] -secs = 0 -nanos = 0 - -# TODO (Keyao) Clean up configuration parameters. -# -[config.propose_max_round_time] -secs = 2 -nanos = 0 - -[web_server_config] -host = "127.0.0.1" -port = 9000 - -[da_web_server_config] -host = "127.0.0.1" -port = 9001 - -[web_server_config.wait_between_polls] -secs = 0 -nanos = 100000000 # 100 ms - -[da_web_server_config.wait_between_polls] -secs = 0 -nanos = 100000000 # 100 ms diff --git a/orchestrator/default-libp2p-run-config.toml b/orchestrator/run-config.toml similarity index 72% rename from orchestrator/default-libp2p-run-config.toml rename to orchestrator/run-config.toml index a353ed06f5..f15a45fae1 100644 --- a/orchestrator/default-libp2p-run-config.toml +++ b/orchestrator/run-config.toml @@ -1,5 +1,6 @@ -rounds = 10 -transactions_per_round = 12 +rounds = 100 +transactions_per_round = 1 +transaction_size = 1749 node_index = 0 seed = [ 0, @@ -35,21 +36,7 @@ seed = [ 0, 0, ] -padding = 10 -start_delay_seconds = 60 - -[libp2p_config] -index_ports = true -bootstrap_mesh_n_high = 4 -bootstrap_mesh_n_low = 4 -bootstrap_mesh_outbound_min = 2 -bootstrap_mesh_n = 4 -mesh_n_high = 4 -mesh_n_low = 4 -mesh_outbound_min = 2 -mesh_n = 4 -online_time = 10 -base_port = 9000 +start_delay_seconds = 0 [config] total_nodes = 10 @@ -65,6 +52,35 @@ round_start_delay = 1 start_delay = 1 num_bootstrap = 5 +[libp2p_config] +index_ports = true +bootstrap_mesh_n_high = 4 +bootstrap_mesh_n_low = 4 +bootstrap_mesh_outbound_min = 2 +bootstrap_mesh_n = 4 +mesh_n_high = 4 +mesh_n_low = 4 +mesh_outbound_min = 2 +mesh_n = 4 +online_time = 10 +base_port = 9000 + +[web_server_config] +host = "127.0.0.1" +port = 9000 + +[da_web_server_config] +host = "127.0.0.1" +port = 9001 + +[web_server_config.wait_between_polls] +secs = 0 +nanos = 100000000 # 10 ms + +[da_web_server_config.wait_between_polls] +secs = 0 +nanos = 100000000 # 10 ms + [config.propose_min_round_time] secs = 0 nanos = 0 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index cec5deaf3d..7593bd180a 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -68,7 +68,7 @@ pub struct NetworkConfig { pub propose_max_round_time: Duration, pub node_index: u64, pub seed: [u8; 32], - pub padding: usize, + pub transaction_size: usize, pub start_delay_seconds: u64, pub key_type_name: String, pub election_config_type_name: String, @@ -85,7 +85,7 @@ impl Default for NetworkConfig { transactions_per_round: default_transactions_per_round(), node_index: 0, seed: [0u8; 32], - padding: default_padding(), + transaction_size: default_transaction_size(), libp2p_config: None, config: HotShotConfigFile::default().into(), start_delay_seconds: 60, @@ -112,8 +112,8 @@ pub struct NetworkConfigFile { pub node_index: u64, #[serde(default)] pub seed: [u8; 32], - #[serde(default = "default_padding")] - pub padding: usize, + #[serde(default = "default_transaction_size")] + pub transaction_size: usize, #[serde(default = "default_start_delay_seconds")] pub start_delay_seconds: u64, #[serde(default)] @@ -141,7 +141,7 @@ impl From> for NetworkC propose_max_round_time: val.config.propose_max_round_time, propose_min_round_time: val.config.propose_min_round_time, seed: val.seed, - padding: val.padding, + transaction_size: val.transaction_size, libp2p_config: val.libp2p_config.map(|libp2p_config| Libp2pConfig { num_bootstrap_nodes: val.config.num_bootstrap, index_ports: libp2p_config.index_ports, @@ -179,8 +179,10 @@ impl From> for NetworkC pub struct HotShotConfigFile { /// Total number of nodes in the network pub total_nodes: NonZeroUsize, + #[serde(skip)] /// My own public key, secret key, stake value pub my_own_validator_config: ValidatorConfig, + #[serde(skip)] /// The known nodes' public key and stake value pub known_nodes_with_stake: Vec, /// Number of committee nodes @@ -283,7 +285,7 @@ fn default_rounds() -> usize { fn default_transactions_per_round() -> usize { 10 } -fn default_padding() -> usize { +fn default_transaction_size() -> usize { 100 } diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index c4c665c0dc..09447973a6 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -99,12 +99,7 @@ where //add new node's key to stake table if self.config.web_server_config.clone().is_some() { - let new_key = self - .config - .config - .my_own_validator_config - .public_key - .clone(); + let new_key = &self.config.config.my_own_validator_config.public_key; let client_clone = self.client.clone().unwrap(); async move { client_clone From d37595e538a2c2d0ce45270a3f91c705fa9462cb Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 17 Nov 2023 12:07:55 -0500 Subject: [PATCH 0420/1393] renaming --- hotshot-state-prover/src/circuit.rs | 20 ++++++++++---------- hotshot-state-prover/src/lib.rs | 6 +++--- types/src/traits/state.rs | 8 ++++---- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 59ef07a002..355a4c028f 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -52,7 +52,7 @@ pub struct LightClientStateVar { /// Private list holding all variables /// vars[0]: view number /// vars[1]: block height - /// vars[2]: block commitment + /// vars[2]: block commitment root /// vars[3]: fee ledger commitment /// vars[4-6]: stake table commitment vars: [Variable; 7], @@ -89,8 +89,8 @@ impl PublicInput { self.0[2] } - /// Return the block commitment of the light client state - pub fn block_comm(&self) -> F { + /// Return the block commitment root of the light client state + pub fn block_comm_root(&self) -> F { self.0[3] } @@ -131,7 +131,7 @@ impl LightClientStateVar { vars: [ circuit.create_public_variable(view_number_f)?, circuit.create_public_variable(block_height_f)?, - circuit.create_public_variable(state.block_comm)?, + circuit.create_public_variable(state.block_comm_root)?, circuit.create_public_variable(state.fee_ledger_comm)?, circuit.create_public_variable(state.stake_table_comm.0)?, circuit.create_public_variable(state.stake_table_comm.1)?, @@ -148,7 +148,7 @@ impl LightClientStateVar { self.vars[1] } - pub fn block_comm(&self) -> Variable { + pub fn block_comm_root(&self) -> Variable { self.vars[2] } @@ -175,7 +175,7 @@ impl AsRef<[Variable]> for LightClientStateVar { /// - a list of stake table entries (`Vec<(SchnorrVerKey, Amount)>`) /// - a bit vector indicates the signers /// - a list of schnorr signatures of the updated states (`Vec`), default if the node doesn't sign the state -/// - updated light client state (`(view_number, block_height, block_comm, fee_ledger_comm, stake_table_comm)`) +/// - updated light client state (`(view_number, block_height, block_comm_root, fee_ledger_comm, stake_table_comm)`) /// - a quorum threshold /// Lengths of input vectors should not exceed the `STAKE_TABLE_CAPACITY`. /// The list of stake table entries, bit indicators and signatures will be padded to the `STAKE_TABLE_CAPACITY`. @@ -300,7 +300,7 @@ where threshold, view_number_f, block_height_f, - lightclient_state.block_comm, + lightclient_state.block_comm_root, lightclient_state.fee_ledger_comm, lightclient_state.stake_table_comm.0, lightclient_state.stake_table_comm.1, @@ -403,7 +403,7 @@ where let lightclient_state = LightClientState { view_number: 0, block_height: 0, - block_comm: F::default(), + block_comm_root: F::default(), fee_ledger_comm: F::default(), stake_table_comm: (F::default(), F::default(), F::default()), }; @@ -441,7 +441,7 @@ mod tests { .map(|(_, stake_amount, schnorr_key)| (schnorr_key, stake_amount)) .collect::>(); - let block_comm = + let block_comm_root = VariableLengthRescueCRHF::::evaluate(vec![F::from(1u32), F::from(2u32)]).unwrap() [0]; let fee_ledger_comm = @@ -451,7 +451,7 @@ mod tests { let lightclient_state = LightClientState { view_number: 100, block_height: 73, - block_comm, + block_comm_root, fee_ledger_comm, stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), }; diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index d4984de75e..484ced9500 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -48,7 +48,7 @@ pub fn preprocess(srs: &UniversalSrs) -> Result<(ProvingKey, VerifyingKey), Plon /// Given a proving key and /// - a list of stake table entries (`Vec<(BLSVerKey, Amount, SchnorrVerKey)>`) /// - a list of schnorr signatures of the updated states (`Vec`), default if the node doesn't sign the state -/// - updated light client state (`(view_number, block_height, block_comm, fee_ledger_comm, stake_table_comm)`) +/// - updated light client state (`(view_number, block_height, block_comm_root, fee_ledger_comm, stake_table_comm)`) /// - a bit vector indicates the signers /// - a quorum threshold /// Returns error or a pair (proof, public_inputs) asserting that @@ -182,7 +182,7 @@ mod tests { let (bls_keys, schnorr_keys) = key_pairs_for_testing(num_validators, &mut prng); let st = stake_table_for_testing(&bls_keys, &schnorr_keys); - let block_comm = VariableLengthRescueCRHF::::evaluate(vec![ + let block_comm_root = VariableLengthRescueCRHF::::evaluate(vec![ BaseField::from(1u32), BaseField::from(2u32), ]) @@ -196,7 +196,7 @@ mod tests { let lightclient_state = LightClientState { view_number: 100, block_height: 73, - block_comm, + block_comm_root, fee_ledger_comm, stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), }; diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index bed7fed42b..d691b1d9b6 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -227,8 +227,8 @@ pub struct LightClientState { pub view_number: usize, /// Current block height pub block_height: usize, - /// Block commitment - pub block_comm: F, + /// Root of the block commitment tree + pub block_comm_root: F, /// Commitment for fee ledger pub fee_ledger_comm: F, /// Commitment for the stake table @@ -240,7 +240,7 @@ impl From> for [F; 7] { [ F::from(state.view_number as u64), F::from(state.block_height as u64), - state.block_comm, + state.block_comm_root, state.fee_ledger_comm, state.stake_table_comm.0, state.stake_table_comm.1, @@ -253,7 +253,7 @@ impl From<&LightClientState> for [F; 7] { [ F::from(state.view_number as u64), F::from(state.block_height as u64), - state.block_comm, + state.block_comm_root, state.fee_ledger_comm, state.stake_table_comm.0, state.stake_table_comm.1, From bc78dafa376d176d0177771fcc77310e798bf40e Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 17 Nov 2023 13:18:40 -0500 Subject: [PATCH 0421/1393] Completely remove Exchange and Exchanges traits --- hotshot/examples/infra/mod.rs | 111 +-- hotshot/examples/libp2p/types.rs | 18 +- hotshot/examples/web-server-da/types.rs | 18 +- hotshot/src/lib.rs | 143 ++-- hotshot/src/tasks/mod.rs | 74 +- .../src/traits/networking/combined_network.rs | 4 +- .../src/traits/networking/libp2p_network.rs | 5 +- .../src/traits/networking/memory_network.rs | 5 +- .../traits/networking/web_server_network.rs | 4 +- hotshot/src/types/handle.rs | 44 +- task-impls/src/consensus.rs | 18 +- testing/src/node_types.rs | 405 +--------- testing/src/task_helpers.rs | 64 +- testing/src/test_builder.rs | 35 +- testing/src/test_launcher.rs | 37 +- testing/src/test_runner.rs | 96 +-- testing/tests/consensus_task.rs | 17 +- testing/tests/da_task.rs | 17 +- testing/tests/memory_network.rs | 13 +- testing/tests/network_task.rs | 14 +- testing/tests/vid_task.rs | 17 +- testing/tests/view_sync_task.rs | 12 +- types/src/traits/election.rs | 542 +------------ types/src/traits/network.rs | 8 +- types/src/traits/node_implementation.rs | 748 +++++++++--------- 25 files changed, 637 insertions(+), 1832 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 85ac52f3a5..643789fcc2 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -12,7 +12,7 @@ use hotshot::{ NodeImplementation, }, types::{SignatureKey, SystemContextHandle}, - HotShotType, Networks, SystemContext, + HotShotType, Memberships, Networks, SystemContext, }; use hotshot_orchestrator::{ self, @@ -20,19 +20,16 @@ use hotshot_orchestrator::{ config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, }; use hotshot_task::task::FilterEvent; -use hotshot_types::{block_impl::VIDBlockHeader, traits::election::VIDExchange}; +use hotshot_types::block_impl::VIDBlockHeader; use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, consensus::ConsensusMetricsValue, data::{Leaf, TestableLeaf}, event::{Event, EventType}, - message::Message, traits::{ - election::{ - CommitteeExchange, ConsensusExchange, Membership, QuorumExchange, ViewSyncExchange, - }, + election::Membership, network::CommunicationChannel, - node_implementation::{CommitteeEx, Exchanges, ExchangesType, NodeType, QuorumEx}, + node_implementation::NodeType, state::{ConsensusTime, TestableBlock, TestableState}, }, HotShotConfig, @@ -92,18 +89,7 @@ pub async fn run_orchestrator< QUORUMNETWORK: CommunicationChannel + Debug, VIEWSYNCNETWORK: CommunicationChannel + Debug, VIDNETWORK: CommunicationChannel + Debug, - NODE: NodeImplementation< - TYPES, - Exchanges = Exchanges< - TYPES, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, - >, - Storage = MemoryStorage, - >, + NODE: NodeImplementation>, >( OrchestratorArgs { host, @@ -145,14 +131,6 @@ pub trait RunDA< TYPES, QuorumNetwork = QUORUMNETWORK, CommitteeNetwork = DANETWORK, - Exchanges = Exchanges< - TYPES, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, - >, Storage = MemoryStorage, >, > where @@ -181,25 +159,16 @@ pub trait RunDA< let pk = config.config.my_own_validator_config.public_key.clone(); let sk = config.config.my_own_validator_config.private_key.clone(); let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); - let entry = pk.get_stake_table_entry(1u64); let da_network = self.get_da_network(); let quorum_network = self.get_quorum_network(); - let view_sync_network = self.get_view_sync_network(); - let vid_network = self.get_vid_network(); // Since we do not currently pass the election config type in the NetworkConfig, this will always be the default election config let quorum_election_config = config.config.election_config.clone().unwrap_or_else(|| { - as ConsensusExchange< - TYPES, - Message, - >>::Membership::default_election_config(config.config.total_nodes.get() as u64) + TYPES::Membership::default_election_config(config.config.total_nodes.get() as u64) }); - let committee_election_config = as ConsensusExchange< - TYPES, - Message, - >>::Membership::default_election_config( + let committee_election_config = TYPES::Membership::default_election_config( config.config.da_committee_size.try_into().unwrap(), ); let networks_bundle = Networks { @@ -207,19 +176,25 @@ pub trait RunDA< da_network: da_network.clone(), _pd: PhantomData, }; - let exchanges = NODE::Exchanges::create( - known_nodes_with_stake.clone(), - (quorum_election_config, committee_election_config), - ( - quorum_network.clone(), - da_network.clone(), - view_sync_network.clone(), - vid_network.clone(), + + let memberships = Memberships { + quorum_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config.clone(), ), - pk.clone(), - entry.clone(), - sk.clone(), - ); + da_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + committee_election_config, + ), + vid_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config.clone(), + ), + view_sync_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config, + ), + }; SystemContext::init( pk, @@ -227,7 +202,7 @@ pub trait RunDA< config.node_index, config.config, MemoryStorage::empty(), - exchanges, + memberships, networks_bundle, initializer, ConsensusMetricsValue::new(), @@ -376,14 +351,6 @@ impl< TYPES, QuorumNetwork = WebCommChannel, CommitteeNetwork = WebCommChannel, - Exchanges = Exchanges< - TYPES, - Message, - QuorumExchange, Message>, - CommitteeExchange, Message>, - ViewSyncExchange, Message>, - VIDExchange, Message>, - >, Storage = MemoryStorage, >, > @@ -504,24 +471,6 @@ impl< TYPES, QuorumNetwork = Libp2pCommChannel, CommitteeNetwork = Libp2pCommChannel, - Exchanges = Exchanges< - TYPES, - Message, - QuorumExchange, Message>, - CommitteeExchange< - TYPES, - TYPES::Membership, - Libp2pCommChannel, - Message, - >, - ViewSyncExchange< - TYPES, - TYPES::Membership, - Libp2pCommChannel, - Message, - >, - VIDExchange, Message>, - >, Storage = MemoryStorage, >, > @@ -717,14 +666,6 @@ pub async fn main_entry_point< TYPES, QuorumNetwork = QUORUMNETWORK, CommitteeNetwork = DANETWORK, - Exchanges = Exchanges< - TYPES, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, - >, Storage = MemoryStorage, >, RUNDA: RunDA, diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index c583796f47..b75fafbe7f 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -1,15 +1,9 @@ use crate::infra::Libp2pDARun; use hotshot::{ - demo::{DemoMembership, DemoTypes}, + demo::DemoTypes, traits::implementations::{Libp2pCommChannel, MemoryStorage}, }; -use hotshot_types::{ - message::Message, - traits::{ - election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, - node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, - }, -}; +use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation, NodeType}; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -25,14 +19,6 @@ impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; - type Exchanges = Exchanges< - DemoTypes, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, - >; fn new_channel_maps( start_view: ::Time, diff --git a/hotshot/examples/web-server-da/types.rs b/hotshot/examples/web-server-da/types.rs index af5735b5ec..60c86f90d1 100644 --- a/hotshot/examples/web-server-da/types.rs +++ b/hotshot/examples/web-server-da/types.rs @@ -1,15 +1,9 @@ use crate::infra::WebServerDARun; use hotshot::{ - demo::{DemoMembership, DemoTypes}, + demo::DemoTypes, traits::implementations::{MemoryStorage, WebCommChannel}, }; -use hotshot_types::{ - message::Message, - traits::{ - election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, - node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, - }, -}; +use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation, NodeType}; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -25,14 +19,6 @@ impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type CommitteeNetwork = DANetwork; type QuorumNetwork = QuorumNetwork; - type Exchanges = Exchanges< - DemoTypes, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, - >; fn new_channel_maps( start_view: ::Time, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 455887b91f..02ee34731d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -50,10 +50,6 @@ use hotshot_task::{ task_launcher::TaskRunner, }; use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; -use hotshot_types::{ - simple_certificate::QuorumCertificate, - traits::{election::ViewSyncExchangeType, node_implementation::TimeoutEx}, -}; use hotshot_types::{ consensus::{BlockPayloadStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, @@ -63,14 +59,11 @@ use hotshot_types::{ DataMessage, InternalTrigger, Message, MessageKind, ProcessedGeneralConsensusMessage, SequencingMessage, }, + simple_certificate::QuorumCertificate, traits::{ consensus_api::{ConsensusApi, ConsensusSharedApi}, - election::ConsensusExchange, network::{CommunicationChannel, NetworkError}, - node_implementation::{ - ChannelMaps, CommitteeEx, ExchangesType, NodeType, QuorumEx, SendToTasks, VIDEx, - ViewSyncEx, - }, + node_implementation::{ChannelMaps, NodeType, SendToTasks}, signature_key::SignatureKey, state::ConsensusTime, storage::StoredView, @@ -138,9 +131,6 @@ pub struct SystemContextInner> { /// This `HotShot` instance's storage backend storage: I::Storage, - /// This `HotShot` instance's way to interact with the nodes needed to form a quorum and/or DA certificate. - pub exchanges: Arc, - /// Networks used by the instance of hotshot pub networks: Arc>, @@ -185,14 +175,14 @@ impl> SystemContext { /// Creates a new hotshot with the given configuration options and sets it up with the given /// genesis block #[allow(clippy::too_many_arguments)] - #[instrument(skip(private_key, storage, exchanges, networks, initializer, metrics))] + #[instrument(skip(private_key, storage, memberships, networks, initializer, metrics))] pub async fn new( public_key: TYPES::SignatureKey, private_key: ::PrivateKey, nonce: u64, config: HotShotConfig, storage: I::Storage, - exchanges: I::Exchanges, + memberships: Memberships, networks: Networks, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -242,13 +232,6 @@ impl> SystemContext { }; let consensus = Arc::new(RwLock::new(consensus)); - let memberships = Memberships { - quorum_membership: exchanges.quorum_exchange().membership().clone(), - da_membership: exchanges.committee_exchange().membership().clone(), - vid_membership: exchanges.vid_exchange().membership().clone(), - view_sync_membership: exchanges.view_sync_exchange().membership().clone(), - }; - let inner: Arc> = Arc::new(SystemContextInner { id: nonce, channel_maps: I::new_channel_maps(start_view), @@ -257,7 +240,6 @@ impl> SystemContext { private_key, config, storage, - exchanges: Arc::new(exchanges), networks: Arc::new(networks), memberships: Arc::new(memberships), event_sender: RwLock::default(), @@ -332,19 +314,14 @@ impl> SystemContext { async_spawn(async move { let _result = api .inner - .exchanges - .committee_exchange() - .network() + .networks + .da_network .broadcast_message( Message { sender: api.inner.public_key.clone(), kind: MessageKind::from(message), }, - &api.inner - .exchanges - .committee_exchange() - .membership() - .clone(), + &api.inner.memberships.da_membership.clone(), ) .await; }); @@ -397,7 +374,7 @@ impl> SystemContext { node_id: u64, config: HotShotConfig, storage: I::Storage, - exchanges: I::Exchanges, + memberships: Memberships, networks: Networks, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -418,7 +395,7 @@ impl> SystemContext { node_id, config, storage, - exchanges, + memberships, networks, initializer, metrics, @@ -449,13 +426,12 @@ impl> SystemContext { async_spawn_local(async move { if inner - .exchanges - .quorum_exchange() - .network() + .networks + .quorum_network .broadcast_message( Message { sender: pk, kind }, // TODO this is morally wrong - &inner.exchanges.quorum_exchange().membership().clone(), + &inner.memberships.quorum_membership.clone(), ) .await .is_err() @@ -479,9 +455,8 @@ impl> SystemContext { recipient: TYPES::SignatureKey, ) -> std::result::Result<(), NetworkError> { self.inner - .exchanges - .quorum_exchange() - .network() + .networks + .quorum_network .direct_message( Message { sender: self.inner.public_key.clone(), @@ -638,17 +613,6 @@ pub trait HotShotType> { #[async_trait] impl> HotShotType for SystemContext -where - QuorumEx: - ConsensusExchange, Membership = TYPES::Membership> + 'static, - CommitteeEx: - ConsensusExchange, Membership = TYPES::Membership> + 'static, - ViewSyncEx: - ViewSyncExchangeType, Membership = TYPES::Membership> + 'static, - VIDEx: - ConsensusExchange, Membership = TYPES::Membership> + 'static, - TimeoutEx: - ConsensusExchange, Membership = TYPES::Membership> + 'static, { fn consensus(&self) -> &Arc>> { &self.inner.consensus @@ -663,10 +627,12 @@ where let output_event_stream = self.inner.output_event_stream.clone(); let internal_event_stream = self.inner.internal_event_stream.clone(); - let quorum_exchange = self.inner.exchanges.quorum_exchange().clone(); - let committee_exchange = self.inner.exchanges.committee_exchange().clone(); - let view_sync_exchange = self.inner.exchanges.view_sync_exchange().clone(); - let vid_exchange = self.inner.exchanges.vid_exchange().clone(); + let quorum_network = self.inner.networks.quorum_network.clone(); + let da_network = self.inner.networks.da_network.clone(); + let quorum_membership = self.inner.memberships.quorum_membership.clone(); + let da_membership = self.inner.memberships.da_membership.clone(); + let vid_membership = self.inner.memberships.vid_membership.clone(); + let view_sync_membership = self.inner.memberships.view_sync_membership.clone(); let handle = SystemContextHandle { registry, @@ -679,52 +645,45 @@ where let task_runner = add_network_message_task( task_runner, internal_event_stream.clone(), - quorum_exchange.clone(), - ) - .await; - let task_runner = add_network_message_task( - task_runner, - internal_event_stream.clone(), - committee_exchange.clone(), + quorum_network.clone(), ) .await; let task_runner = add_network_message_task( task_runner, internal_event_stream.clone(), - view_sync_exchange.clone(), - ) - .await; - let task_runner = add_network_message_task( - task_runner, - internal_event_stream.clone(), - vid_exchange.clone(), + da_network.clone(), ) .await; + let task_runner = add_network_event_task( task_runner, internal_event_stream.clone(), - quorum_exchange.clone(), + quorum_network.clone(), + quorum_membership, NetworkTaskKind::Quorum, ) .await; let task_runner = add_network_event_task( task_runner, internal_event_stream.clone(), - committee_exchange.clone(), + da_network.clone(), + da_membership, NetworkTaskKind::Committee, ) .await; let task_runner = add_network_event_task( task_runner, internal_event_stream.clone(), - view_sync_exchange.clone(), + quorum_network.clone(), + view_sync_membership, NetworkTaskKind::ViewSync, ) .await; let task_runner = add_network_event_task( task_runner, internal_event_stream.clone(), - vid_exchange.clone(), + quorum_network.clone(), + vid_membership, NetworkTaskKind::VID, ) .await; @@ -834,9 +793,8 @@ impl> ConsensusApi debug!(?message, ?recipient, "send_direct_message"); async_spawn_local(async move { inner - .exchanges - .quorum_exchange() - .network() + .networks + .quorum_network .direct_message( Message { sender: inner.public_key.clone(), @@ -858,9 +816,8 @@ impl> ConsensusApi debug!(?message, ?recipient, "send_direct_message"); async_spawn_local(async move { inner - .exchanges - .committee_exchange() - .network() + .networks + .da_network .direct_message( Message { sender: inner.public_key.clone(), @@ -881,15 +838,14 @@ impl> ConsensusApi ) -> std::result::Result<(), NetworkError> { debug!(?message, "send_broadcast_message"); self.inner - .exchanges - .quorum_exchange() - .network() + .networks + .quorum_network .broadcast_message( Message { sender: self.inner.public_key.clone(), kind: MessageKind::from_consensus_message(message), }, - &self.inner.exchanges.quorum_exchange().membership().clone(), + &self.inner.memberships.quorum_membership.clone(), ) .await?; Ok(()) @@ -901,20 +857,14 @@ impl> ConsensusApi ) -> std::result::Result<(), NetworkError> { debug!(?message, "send_da_broadcast_message"); self.inner - .exchanges - .committee_exchange() - .network() + .networks + .da_network .broadcast_message( Message { sender: self.inner.public_key.clone(), kind: MessageKind::from_consensus_message(message), }, - &self - .inner - .exchanges - .committee_exchange() - .membership() - .clone(), + &self.inner.memberships.da_membership.clone(), ) .await?; Ok(()) @@ -929,19 +879,14 @@ impl> ConsensusApi async_spawn(async move { let _result = api .inner - .exchanges - .committee_exchange() - .network() + .networks + .da_network .broadcast_message( Message { sender: api.inner.public_key.clone(), kind: MessageKind::from(message), }, - &api.inner - .exchanges - .committee_exchange() - .membership() - .clone(), + &api.inner.memberships.da_membership.clone(), ) .await; }); diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 1ef7bfc201..9d7ab1ab1b 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -26,14 +26,11 @@ use hotshot_task_impls::{ }; use hotshot_types::{ event::Event, - message::{Message, Messages}, + message::Messages, traits::{ consensus_api::ConsensusSharedApi, - election::{ConsensusExchange, ViewSyncExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, - node_implementation::{ - CommitteeEx, NodeImplementation, NodeType, QuorumEx, VIDEx, ViewSyncEx, - }, + node_implementation::{NodeImplementation, NodeType}, state::ConsensusTime, BlockPayload, }, @@ -57,21 +54,14 @@ pub enum GlobalEvent { /// Add the network task to handle messages and publish events. /// # Panics /// Is unable to panic. This section here is just to satisfy clippy -pub async fn add_network_message_task< - TYPES: NodeType, - EXCHANGE: ConsensusExchange, Membership = TYPES::Membership> + 'static, ->( +pub async fn add_network_message_task>( task_runner: TaskRunner, event_stream: ChannelStream>, - exchange: EXCHANGE, -) -> TaskRunner -// This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. -where - EXCHANGE::Networking: CommunicationChannel, -{ - let channel = exchange.network().clone(); + channel: NET, +) -> TaskRunner { + let net = channel.clone(); let broadcast_stream = GeneratedStream::>::new(Arc::new(move || { - let network = channel.clone(); + let network = net.clone(); let closure = async move { loop { let msgs = Messages( @@ -89,9 +79,9 @@ where }; Some(boxed_sync(closure)) })); - let channel = exchange.network().clone(); + let net = channel.clone(); let direct_stream = GeneratedStream::>::new(Arc::new(move || { - let network = channel.clone(); + let network = net.clone(); let closure = async move { loop { let msgs = Messages( @@ -152,24 +142,14 @@ where /// Add the network task to handle events and send messages. /// # Panics /// Is unable to panic. This section here is just to satisfy clippy -pub async fn add_network_event_task< - TYPES: NodeType, - EXCHANGE: ConsensusExchange, Membership = TYPES::Membership> + 'static, ->( +pub async fn add_network_event_task>( task_runner: TaskRunner, event_stream: ChannelStream>, - exchange: EXCHANGE, + channel: NET, + membership: TYPES::Membership, task_kind: NetworkTaskKind, -) -> TaskRunner -// This bound is required so that we can call the `recv_msgs` function of `CommunicationChannel`. -where - EXCHANGE::Networking: CommunicationChannel, -{ - let filter = - NetworkEventTaskState::>::Networking>::filter( - task_kind, - ); - let channel = exchange.network().clone(); +) -> TaskRunner { + let filter = NetworkEventTaskState::::filter(task_kind); let network_state: NetworkEventTaskState<_, _> = NetworkEventTaskState { channel, event_stream: event_stream.clone(), @@ -178,9 +158,10 @@ where let registry = task_runner.registry.clone(); let network_event_handler = HandleEvent(Arc::new( move |event, mut state: NetworkEventTaskState<_, _>| { - let membership = exchange.membership().clone(); + let mem = membership.clone(); + async move { - let completion_status = state.handle_event(event, &membership).await; + let completion_status = state.handle_event(event, &mem).await; (completion_status, state) } .boxed() @@ -300,10 +281,7 @@ pub async fn add_vid_task>( task_runner: TaskRunner, event_stream: ChannelStream>, handle: SystemContextHandle, -) -> TaskRunner -where - VIDEx: ConsensusExchange, Membership = TYPES::Membership>, -{ +) -> TaskRunner { // build the vid task let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), @@ -360,10 +338,7 @@ pub async fn add_da_task>( task_runner: TaskRunner, event_stream: ChannelStream>, handle: SystemContextHandle, -) -> TaskRunner -where - CommitteeEx: ConsensusExchange, Membership = TYPES::Membership>, -{ +) -> TaskRunner { // build the da task let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), @@ -419,10 +394,7 @@ pub async fn add_transaction_task> task_runner: TaskRunner, event_stream: ChannelStream>, handle: SystemContextHandle, -) -> TaskRunner -where - QuorumEx: ConsensusExchange, Membership = TYPES::Membership>, -{ +) -> TaskRunner { // build the transactions task let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), @@ -478,11 +450,7 @@ pub async fn add_view_sync_task>( task_runner: TaskRunner, event_stream: ChannelStream>, handle: SystemContextHandle, -) -> TaskRunner -where - ViewSyncEx: - ViewSyncExchangeType, Membership = TYPES::Membership>, -{ +) -> TaskRunner { let api = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 50a4d9820d..b653197374 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -357,9 +357,7 @@ impl CommunicationChannel for CombinedCommChannel } } -impl TestableChannelImplementation> - for CombinedCommChannel -{ +impl TestableChannelImplementation for CombinedCommChannel { fn generate_network() -> Box) -> Self + 'static> { Box::new(move |network| CombinedCommChannel::new(network)) } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index a655314736..81b8f6ab27 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -883,10 +883,7 @@ where } } -impl - TestableChannelImplementation, TYPES::SignatureKey>> - for Libp2pCommChannel -{ +impl TestableChannelImplementation for Libp2pCommChannel { fn generate_network( ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> { diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index d2e34edb90..4c67e1d6f7 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -548,10 +548,7 @@ where } } -impl - TestableChannelImplementation, TYPES::SignatureKey>> - for MemoryCommChannel -{ +impl TestableChannelImplementation for MemoryCommChannel { fn generate_network( ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> { diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 39da919655..48dc1913a1 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -1256,9 +1256,7 @@ impl TestableNetworkingImplementation for WebCommChannel } } -impl TestableChannelImplementation> - for WebCommChannel -{ +impl TestableChannelImplementation for WebCommChannel { fn generate_network() -> Box>) -> Self + 'static> { Box::new(move |network| WebCommChannel::new(network)) } diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 138dc659eb..f0ddeefc3c 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -1,6 +1,5 @@ //! Provides an event-streaming handle for a [`HotShot`] running in the background -use crate::QuorumCertificate; use crate::{traits::NodeImplementation, types::Event, SystemContext}; use async_compatibility_layer::channel::UnboundedStream; use async_lock::RwLock; @@ -14,7 +13,6 @@ use hotshot_task::{ BoxSyncFuture, }; use hotshot_task_impls::events::HotShotEvent; -use hotshot_types::data::Leaf; use hotshot_types::simple_vote::QuorumData; use hotshot_types::{ consensus::Consensus, @@ -22,20 +20,14 @@ use hotshot_types::{ event::EventType, message::{MessageKind, SequencingMessage}, traits::{ - election::{ConsensusExchange, QuorumExchangeType}, - node_implementation::{ExchangesType, NodeType}, - state::ConsensusTime, - storage::Storage, + election::Membership, network::CommunicationChannel, node_implementation::NodeType, + state::ConsensusTime, storage::Storage, }, }; +use hotshot_types::{data::Leaf, simple_certificate::QuorumCertificate}; use std::sync::Arc; use tracing::error; -#[cfg(feature = "hotshot-testing")] -use commit::Commitment; -#[cfg(feature = "hotshot-testing")] -use hotshot_types::traits::signature_key::EncodedSignature; - /// Event streaming handle for a [`SystemContext`] instance running in the background /// /// This type provides the means to message and interact with a background [`SystemContext`] instance, @@ -184,7 +176,18 @@ impl + 'static> SystemContextHandl /// Block the underlying quorum (and committee) networking interfaces until node is /// successfully initialized into the networks. pub async fn wait_for_networks_ready(&self) { - self.hotshot.inner.exchanges.wait_for_networks_ready().await; + self.hotshot + .inner + .networks + .quorum_network + .wait_for_ready() + .await; + self.hotshot + .inner + .networks + .da_network + .wait_for_ready() + .await; } /// Shut down the the inner hotshot and wait until all background threads are closed. @@ -211,8 +214,8 @@ impl + 'static> SystemContextHandl pub async fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { self.hotshot .inner - .exchanges - .quorum_exchange() + .memberships + .quorum_membership .get_leader(view_number) } @@ -228,19 +231,6 @@ impl + 'static> SystemContextHandl self.hotshot.inner.consensus.read().await.cur_view } - /// Wrapper around `HotShotConsensusApi`'s `sign_validating_or_commitment_proposal` function - #[cfg(feature = "hotshot-testing")] - pub fn sign_validating_or_commitment_proposal( - &self, - leaf_commitment: &Commitment>, - ) -> EncodedSignature { - let inner = self.hotshot.inner.clone(); - inner - .exchanges - .quorum_exchange() - .sign_validating_or_commitment_proposal::(leaf_commitment) - } - /// Wrapper around `HotShotConsensusApi`'s `send_broadcast_consensus_message` function #[cfg(feature = "hotshot-testing")] pub async fn send_broadcast_consensus_message(&self, msg: SequencingMessage) { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 170417c087..22d6b26c1d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -19,15 +19,15 @@ use hotshot_types::{ consensus::{Consensus, View}, data::{Leaf, QuorumProposal}, event::{Event, EventType}, - message::{GeneralConsensusMessage, Message, Proposal}, + message::{GeneralConsensusMessage, Proposal}, simple_certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, traits::{ block_contents::BlockHeader, consensus_api::ConsensusApi, - election::{ConsensusExchange, Membership}, + election::Membership, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{CommitteeEx, NodeImplementation, NodeType, QuorumEx, TimeoutEx}, + node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, state::ConsensusTime, BlockPayload, @@ -60,11 +60,7 @@ pub struct ConsensusTaskState< TYPES: NodeType, I: NodeImplementation, A: ConsensusApi + 'static, -> where - QuorumEx: ConsensusExchange>, - CommitteeEx: ConsensusExchange>, - TimeoutEx: ConsensusExchange>, -{ +> { /// Our public key pub public_key: TYPES::SignatureKey, /// Our Private Key @@ -134,11 +130,7 @@ pub struct ConsensusTaskState< } /// State for the vote collection task. This handles the building of a QC from a votes received -pub struct VoteCollectionTaskState> -where - QuorumEx: ConsensusExchange>, - TimeoutEx: ConsensusExchange>, -{ +pub struct VoteCollectionTaskState> { /// Network for all nodes pub quorum_network: Arc, /// Membership for Timeout votes/certs diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index a78d4d23f3..945bc5feea 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -1,15 +1,12 @@ -use hotshot::traits::{ - election::static_committee::GeneralStaticCommittee, implementations::CombinedNetworks, -}; -use std::sync::Arc; +use hotshot::traits::election::static_committee::GeneralStaticCommittee; use hotshot::{ demo::DemoState, traits::{ election::static_committee::{StaticCommittee, StaticElectionConfig}, implementations::{ - CombinedCommChannel, Libp2pCommChannel, Libp2pNetwork, MemoryCommChannel, - MemoryNetwork, MemoryStorage, WebCommChannel, WebServerNetwork, + CombinedCommChannel, Libp2pCommChannel, MemoryCommChannel, MemoryStorage, + WebCommChannel, }, NodeImplementation, }, @@ -18,12 +15,7 @@ use hotshot::{ use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, data::ViewNumber, - message::Message, - traits::{ - election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, - network::{TestableChannelImplementation, TestableNetworkingImplementation}, - node_implementation::{ChannelMaps, Exchanges, NodeType, TestableExchange}, - }, + traits::node_implementation::{ChannelMaps, NodeType}, }; use serde::{Deserialize, Serialize}; @@ -84,34 +76,12 @@ type StaticCombinedQuorumComm = CombinedCommChannel; pub type StaticMemoryViewSyncComm = MemoryCommChannel; -type StaticLibp2pViewSyncComm = Libp2pCommChannel; - -type StaticWebViewSyncComm = WebCommChannel; - -type StaticCombinedViewSyncComm = CombinedCommChannel; - pub type StaticMemoryVIDComm = MemoryCommChannel; -type StaticLibp2pVIDComm = Libp2pCommChannel; - -type StaticWebVIDComm = WebCommChannel; - -type StaticCombinedVIDComm = CombinedCommChannel; - -pub type SequencingLibp2pExchange = Exchanges< - TestTypes, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, ->; - impl NodeImplementation for Libp2pImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticLibp2pQuorumComm; type CommitteeNetwork = StaticLibp2pDAComm; - type Exchanges = SequencingLibp2pExchange; fn new_channel_maps( start_view: ::Time, @@ -123,172 +93,10 @@ impl NodeImplementation for Libp2pImpl { } } -impl TestableExchange> for SequencingLibp2pExchange { - #[allow(clippy::arc_with_non_send_sync)] - fn gen_comm_channels( - expected_node_count: usize, - num_bootstrap: usize, - da_committee_size: usize, - ) -> Box< - dyn Fn( - u64, - ) -> ( - , - >>::Networking, - , - >>::Networking, - , - >>::Networking, - , - >>::Networking, - ) + 'static, - > { - let network_generator = Arc::new(, - ::SignatureKey, - > as TestableNetworkingImplementation>::generator( - expected_node_count, - num_bootstrap, - 0, - da_committee_size, - false, - )); - - Box::new(move |id| { - let network = Arc::new(network_generator(id)); - let quorum_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network.clone()); - let committee_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network.clone()); - let view_sync_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network.clone()); - let vid_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network); - - (quorum_chan, committee_chan, view_sync_chan, vid_chan) - }) - } -} - -pub type SequencingMemoryExchange = Exchanges< - TestTypes, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, ->; - -impl TestableExchange> for SequencingMemoryExchange { - #[allow(clippy::arc_with_non_send_sync)] - fn gen_comm_channels( - expected_node_count: usize, - num_bootstrap: usize, - da_committee_size: usize, - ) -> Box< - dyn Fn( - u64, - ) -> ( - , - >>::Networking, - , - >>::Networking, - , - >>::Networking, - , - >>::Networking, - ) + 'static, - > { - let network_generator = Arc::new(, - ::SignatureKey, - > as TestableNetworkingImplementation>::generator( - expected_node_count, - num_bootstrap, - 0, - da_committee_size, - false, - )); - let network_da_generator = Arc::new(, - ::SignatureKey, - > as TestableNetworkingImplementation>::generator( - expected_node_count, - num_bootstrap, - 1, - da_committee_size, - true, - )); - Box::new(move |id| { - let network = Arc::new(network_generator(id)); - let network_da = Arc::new(network_da_generator(id)); - let quorum_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network.clone()); - let committee_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network_da.clone()); - let view_sync_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network_da); - let vid_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network); - - (quorum_chan, committee_chan, view_sync_chan, vid_chan) - }) - } -} - impl NodeImplementation for MemoryImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticMemoryQuorumComm; type CommitteeNetwork = StaticMemoryDAComm; - type Exchanges = SequencingMemoryExchange; fn new_channel_maps( start_view: ::Time, @@ -300,105 +108,10 @@ impl NodeImplementation for MemoryImpl { } } -// man these generics are big oof -// they're a LOT -// when are we getting HKT for rust -// smh my head - -pub type SequencingWebExchanges = Exchanges< - TestTypes, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, ->; - -impl TestableExchange> for SequencingWebExchanges { - #[allow(clippy::arc_with_non_send_sync)] - fn gen_comm_channels( - expected_node_count: usize, - num_bootstrap: usize, - da_committee_size: usize, - ) -> Box< - dyn Fn( - u64, - ) -> ( - , - >>::Networking, - , - >>::Networking, - , - >>::Networking, - , - >>::Networking, - ) + 'static, - > { - let network_generator = - Arc::new( as TestableNetworkingImplementation< - TestTypes, - >>::generator( - expected_node_count, - num_bootstrap, - 0, - da_committee_size, - false, - )); - let network_da_generator = Arc::new( - as TestableNetworkingImplementation>::generator( - expected_node_count, - num_bootstrap, - 1, - da_committee_size, - true, - ), - ); - Box::new(move |id| { - let network = Arc::new(network_generator(id)); - let network_da = Arc::new(network_da_generator(id)); - let quorum_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network.clone()); - let committee_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network_da.clone()); - let view_sync_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network); - let vid_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network_da); - - (quorum_chan, committee_chan, view_sync_chan, vid_chan) - }) - } -} - impl NodeImplementation for WebImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticWebQuorumComm; type CommitteeNetwork = StaticWebDAComm; - type Exchanges = SequencingWebExchanges; fn new_channel_maps( start_view: ::Time, @@ -410,20 +123,10 @@ impl NodeImplementation for WebImpl { } } -pub type CombinedExchange = Exchanges< - TestTypes, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, ->; - impl NodeImplementation for CombinedImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticCombinedQuorumComm; type CommitteeNetwork = StaticCombinedDAComm; - type Exchanges = CombinedExchange; fn new_channel_maps( start_view: ::Time, @@ -434,103 +137,3 @@ impl NodeImplementation for CombinedImpl { ) } } - -impl TestableExchange> for CombinedExchange { - #[allow(clippy::arc_with_non_send_sync)] - fn gen_comm_channels( - expected_node_count: usize, - num_bootstrap: usize, - da_committee_size: usize, - ) -> Box< - dyn Fn( - u64, - ) -> ( - , - >>::Networking, - , - >>::Networking, - , - >>::Networking, - , - >>::Networking, - ) + 'static, - > { - let web_server_network_generator = - Arc::new( as TestableNetworkingImplementation< - TestTypes, - >>::generator( - expected_node_count, - num_bootstrap, - 0, - da_committee_size, - false, - )); - - let web_server_network_da_generator = Arc::new( - as TestableNetworkingImplementation>::generator( - expected_node_count, - num_bootstrap, - 1, - da_committee_size, - true, - ), - ); - - let libp2p_network_generator = Arc::new(, - ::SignatureKey, - > as TestableNetworkingImplementation>::generator( - expected_node_count, - num_bootstrap, - 2, - da_committee_size, - true, - )); - - // libp2p - Box::new(move |id| { - let web_server_network = web_server_network_generator(id); - let web_server_network_da = web_server_network_da_generator(id); - - let libp2p_network = libp2p_network_generator(id); - - let network = Arc::new(CombinedNetworks(web_server_network, libp2p_network.clone())); - let network_da = Arc::new(CombinedNetworks(web_server_network_da, libp2p_network)); - - let quorum_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network.clone()); - let committee_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network_da.clone()); - let view_sync_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network); - - let vid_chan = - <, - >>::Networking as TestableChannelImplementation<_, _>>::generate_network( - )(network_da); - (quorum_chan, committee_chan, view_sync_chan, vid_chan) - }) - } -} diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 643604a0d7..915007f879 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -6,9 +6,8 @@ use crate::{ }; use commit::Committable; use hotshot::{ - traits::NodeImplementation, types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, - HotShotConsensusApi, HotShotInitializer, Networks, SystemContext, + HotShotConsensusApi, HotShotInitializer, Memberships, Networks, SystemContext, }; use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::HotShotEvent; @@ -16,13 +15,13 @@ use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, NUM_CHUNKS, NUM_STORAGE_NODES}, consensus::ConsensusMetricsValue, data::{Leaf, QuorumProposal, VidScheme, ViewNumber}, - message::{Message, Proposal}, + message::Proposal, simple_certificate::QuorumCertificate, traits::{ block_contents::BlockHeader, consensus_api::ConsensusSharedApi, - election::{ConsensusExchange, Membership}, - node_implementation::{CommitteeEx, ExchangesType, NodeType, QuorumEx}, + election::Membership, + node_implementation::NodeType, signature_key::EncodedSignature, state::{ConsensusTime, TestableBlock}, }, @@ -48,32 +47,43 @@ pub async fn build_system_handle( let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = config.my_own_validator_config.private_key.clone(); let public_key = config.my_own_validator_config.public_key; - let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { - as ConsensusExchange< - TestTypes, - Message, - >>::Membership::default_election_config(config.total_nodes.get() as u64) - }); + let quorum_election_config = + config.election_config.clone().unwrap_or_else(|| { + ::Membership::default_election_config( + config.total_nodes.get() as u64 + ) + }); - let committee_election_config = config.election_config.clone().unwrap_or_else(|| { - as ConsensusExchange< - TestTypes, - Message, - >>::Membership::default_election_config(config.total_nodes.get() as u64) - }); + let committee_election_config = + config.election_config.clone().unwrap_or_else(|| { + ::Membership::default_election_config( + config.total_nodes.get() as u64 + ) + }); let networks_bundle = Networks { quorum_network: networks.0.clone(), da_network: networks.1.clone(), _pd: PhantomData, }; - let exchanges = >::Exchanges::create( - known_nodes_with_stake.clone(), - (quorum_election_config, committee_election_config), - networks, - public_key, - public_key.get_stake_table_entry(1u64), - private_key.clone(), - ); + + let memberships = Memberships { + quorum_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config.clone(), + ), + da_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + committee_election_config, + ), + vid_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config.clone(), + ), + view_sync_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config, + ), + }; SystemContext::init( public_key, @@ -81,7 +91,7 @@ pub async fn build_system_handle( node_id, config, storage, - exchanges, + memberships, networks_bundle, initializer, ConsensusMetricsValue::new(), @@ -100,8 +110,6 @@ async fn build_quorum_proposal_and_signature( let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let _quorum_exchange = api.inner.exchanges.quorum_exchange().clone(); - let parent_view_number = &consensus.high_qc.get_view_number(); let Some(parent_view) = consensus.state_map.get(parent_view_number) else { panic!("Couldn't find high QC parent in state map."); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index dec8e71c3a..b31657c15d 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,14 +1,12 @@ use hotshot::types::SignatureKey; use hotshot_orchestrator::config::ValidatorConfigFile; -use hotshot_types::traits::election::{ConsensusExchange, Membership}; +use hotshot_types::traits::election::Membership; use std::{num::NonZeroUsize, sync::Arc, time::Duration}; use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; -use hotshot_types::message::Message; use hotshot_types::{ - traits::node_implementation::{NodeType, QuorumEx, TestableExchange}, - ExecutionType, HotShotConfig, ValidatorConfig, + traits::node_implementation::NodeType, ExecutionType, HotShotConfig, ValidatorConfig, }; use super::completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}; @@ -187,7 +185,6 @@ impl TestMetadata { ) -> TestLauncher where I: NodeImplementation, - >::Exchanges: TestableExchange>, SystemContext: HotShotType, { let TestMetadata { @@ -241,11 +238,8 @@ impl TestMetadata { propose_min_round_time: Duration::from_millis(0), propose_max_round_time: Duration::from_millis(1000), // TODO what's the difference between this and the second config? - election_config: Some( as ConsensusExchange< - TYPES, - Message, - >>::Membership::default_election_config( - total_nodes as u64 + election_config: Some(TYPES::Membership::default_election_config( + total_nodes as u64, )), }; let TimingData { @@ -272,18 +266,15 @@ impl TestMetadata { let overall_safety_task_generator = overall_safety_properties.build(); let spinning_task_generator = spinning_properties.build(); TestLauncher { - resource_generator: - ResourceGenerators { - channel_generator: - <>::Exchanges as TestableExchange< - _, - _, - >>::gen_comm_channels( - total_nodes, num_bootstrap_nodes, da_committee_size - ), - storage: Box::new(|_| I::construct_tmp_storage().unwrap()), - config, - }, + resource_generator: ResourceGenerators { + channel_generator: >::gen_comm_channels( + total_nodes, + num_bootstrap_nodes, + da_committee_size, + ), + storage: Box::new(|_| I::construct_tmp_storage().unwrap()), + config, + }, metadata: self, txn_task_generator, overall_safety_task_generator, diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 76b9cee98e..3771eb841f 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -8,15 +8,7 @@ use hotshot_task::{ task::HotShotTaskCompleted, task_launcher::TaskRunner, }; -use hotshot_types::{ - message::Message, - traits::{ - election::ConsensusExchange, - network::CommunicationChannel, - node_implementation::{ExchangesType, NodeType, QuorumCommChannel, QuorumNetwork}, - }, - HotShotConfig, -}; +use hotshot_types::{traits::node_implementation::NodeType, HotShotConfig}; use crate::spinning_task::SpinningTask; @@ -26,31 +18,13 @@ use super::{ }; pub type Networks = ( - <<>::Exchanges as ExchangesType< - TYPES, - Message, - >>::QuorumExchange as ConsensusExchange>>::Networking, - <<>::Exchanges as ExchangesType< - TYPES, - Message, - >>::CommitteeExchange as ConsensusExchange>>::Networking, - <<>::Exchanges as ExchangesType< - TYPES, - Message, - >>::ViewSyncExchange as ConsensusExchange>>::Networking, - <<>::Exchanges as ExchangesType< - TYPES, - Message, - >>::VIDExchange as ConsensusExchange>>::Networking, + >::QuorumNetwork, + >::CommitteeNetwork, ); /// Wrapper for a function that takes a `node_id` and returns an instance of `T`. pub type Generator = Box T + 'static>; -/// Wrapper Type for quorum function that takes a `ConnectedNetwork` and returns a `CommunicationChannel` -pub type QuorumNetworkGenerator = - Box>) -> T + 'static>; - /// Wrapper Type for committee function that takes a `ConnectedNetwork` and returns a `CommunicationChannel` pub type CommitteeNetworkGenerator = Box) -> T + 'static>; @@ -76,10 +50,7 @@ pub type Hook = Box< >; /// generators for resources used by each node -pub struct ResourceGenerators> -where - QuorumCommChannel: CommunicationChannel, -{ +pub struct ResourceGenerators> { // generate channels pub channel_generator: Generator>, /// generate a new storage for each node diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index c8bdf99508..5ee9c54d59 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -7,7 +7,7 @@ use crate::{ spinning_task::UpDown, test_launcher::{Networks, TestLauncher}, }; -use hotshot::{traits::NodeImplementation, types::SystemContextHandle}; +use hotshot::{types::SystemContextHandle, Memberships}; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, HotShotType, SystemContext}; use hotshot_task::{ @@ -15,15 +15,7 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::ConsensusMetricsValue, - message::Message, - traits::{ - election::{ConsensusExchange, Membership}, - network::CommunicationChannel, - node_implementation::{ - CommitteeCommChannel, ExchangesType, NodeType, QuorumCommChannel, QuorumEx, - }, - signature_key::SignatureKey, - }, + traits::{election::Membership, node_implementation::NodeType}, HotShotConfig, ValidatorConfig, }; use std::{ @@ -42,10 +34,7 @@ pub struct Node> { /// The runner of a test network /// spin up and down nodes, execute rounds -pub struct TestRunner> -where - QuorumCommChannel: CommunicationChannel, -{ +pub struct TestRunner> { pub(crate) launcher: TestLauncher, pub(crate) nodes: Vec>, pub(crate) late_start: HashMap>, @@ -56,22 +45,10 @@ where impl> TestRunner where SystemContext: HotShotType, - QuorumCommChannel: CommunicationChannel, - I: NodeImplementation< - TYPES, - QuorumNetwork = QuorumCommChannel, - CommitteeNetwork = CommitteeCommChannel, - >, + I: TestableNodeImplementation, { /// excecute test - pub async fn run_test(mut self) - where - I::Exchanges: ExchangesType< - TYPES, - Message, - ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), - >, - { + pub async fn run_test(mut self) { let spinning_changes = self .launcher .metadata @@ -190,19 +167,7 @@ where } /// add nodes - pub async fn add_nodes(&mut self, total: usize, late_start: &HashSet) -> Vec - where - I::Exchanges: ExchangesType< - TYPES, - Message, - ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), - >, - I: NodeImplementation< - TYPES, - QuorumNetwork = QuorumCommChannel, - CommitteeNetwork = CommitteeCommChannel, - >, - { + pub async fn add_nodes(&mut self, total: usize, late_start: &HashSet) -> Vec { let mut results = vec![]; for i in 0..total { tracing::debug!("launch node {}", i); @@ -239,31 +204,15 @@ where initializer: HotShotInitializer, config: HotShotConfig, validator_config: ValidatorConfig, - ) -> SystemContext - where - I::Exchanges: ExchangesType< - TYPES, - Message, - ElectionConfigs = (TYPES::ElectionConfigType, I::CommitteeElectionConfig), - >, - I: NodeImplementation< - TYPES, - QuorumNetwork = QuorumCommChannel, - CommitteeNetwork = CommitteeCommChannel, - >, - { + ) -> SystemContext { let node_id = self.next_node_id; self.next_node_id += 1; let known_nodes_with_stake = config.known_nodes_with_stake.clone(); // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); - let entry = public_key.get_stake_table_entry(validator_config.stake_value); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { - as ConsensusExchange< - TYPES, - Message, - >>::Membership::default_election_config(config.total_nodes.get() as u64) + TYPES::Membership::default_election_config(config.total_nodes.get() as u64) }); let committee_election_config = I::committee_election_config_generator(); let network_bundle = hotshot::Networks { @@ -271,24 +220,33 @@ where da_network: networks.1.clone(), _pd: PhantomData, }; - let exchanges = I::Exchanges::create( - known_nodes_with_stake.clone(), - ( - quorum_election_config, + + let memberships = Memberships { + quorum_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config.clone(), + ), + da_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), committee_election_config(config.da_committee_size as u64), ), - networks, - public_key.clone(), - entry.clone(), - private_key.clone(), - ); + vid_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config.clone(), + ), + view_sync_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config, + ), + }; + SystemContext::new( public_key, private_key, node_id, config, storage, - exchanges, + memberships, network_bundle, initializer, ConsensusMetricsValue::new(), diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 974dc7fb36..5f53cfa4e4 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -10,15 +10,16 @@ use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, task_helpers::{build_quorum_proposal, key_pair_for_id}, }; -use hotshot_types::simple_vote::QuorumData; use hotshot_types::simple_vote::QuorumVote; use hotshot_types::vote::Certificate; use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, - traits::{ - election::ConsensusExchange, node_implementation::ExchangesType, state::ConsensusTime, - }, + traits::state::ConsensusTime, +}; +use hotshot_types::{ + simple_vote::QuorumData, + traits::{consensus_api::ConsensusSharedApi, election::Membership}, }; use std::collections::HashMap; @@ -32,7 +33,7 @@ async fn build_vote( let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let quorum_exchange = api.inner.exchanges.quorum_exchange().clone(); + let membership = api.inner.memberships.quorum_membership.clone(); let justify_qc = proposal.justify_qc.clone(); let view = ViewNumber::new(*proposal.view_number); @@ -65,15 +66,15 @@ async fn build_vote( block_payload: None, rejected: Vec::new(), timestamp: 0, - proposer_id: quorum_exchange.get_leader(view).to_bytes(), + proposer_id: membership.get_leader(view).to_bytes(), }; let vote = QuorumVote::::create_signed_vote( QuorumData { leaf_commit: leaf.commit(), }, view, - quorum_exchange.public_key(), - quorum_exchange.private_key(), + api.public_key(), + api.private_key(), ); GeneralConsensusMessage::::Vote(vote) } diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 28fa3988f3..571c3ef2c7 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,5 +1,5 @@ use commit::Committable; -use hotshot::HotShotConsensusApi; +use hotshot::{types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; use hotshot_types::{ @@ -7,8 +7,7 @@ use hotshot_types::{ data::{DAProposal, ViewNumber}, simple_vote::{DAData, DAVote}, traits::{ - consensus_api::ConsensusSharedApi, election::ConsensusExchange, - node_implementation::ExchangesType, state::ConsensusTime, + consensus_api::ConsensusSharedApi, node_implementation::NodeType, state::ConsensusTime, }, }; use std::{collections::HashMap, marker::PhantomData}; @@ -22,9 +21,7 @@ async fn test_da_task() { use hotshot::tasks::add_da_task; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{ - block_impl::VIDBlockPayload, message::Proposal, traits::election::CommitteeExchangeType, - }; + use hotshot_types::{block_impl::VIDBlockPayload, message::Proposal}; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -34,7 +31,6 @@ async fn test_da_task() { let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); let transactions = vec![VIDTransaction(vec![0])]; let encoded_txns = VIDTransaction::encode(transactions.clone()).unwrap(); @@ -44,7 +40,8 @@ async fn test_da_task() { payload_commitment, }; - let signature = committee_exchange.sign_da_proposal(&block.commit()); + let signature = + ::SignatureKey::sign(api.private_key(), block.commit().as_ref()); let proposal = DAProposal { block_payload: block.clone(), view_number: ViewNumber::new(2), @@ -89,8 +86,8 @@ async fn test_da_task() { payload_commit: block.commit(), }, ViewNumber::new(2), - committee_exchange.public_key(), - committee_exchange.private_key(), + api.public_key(), + api.private_key(), ); output.insert(HotShotEvent::DAVoteSend(da_vote), 1); diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 151e912647..3c99b4e2bc 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -12,12 +12,9 @@ use hotshot::types::bn254::{BLSPrivKey, BLSPubKey}; use hotshot::types::SignatureKey; use hotshot_types::block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}; use hotshot_types::message::Message; -use hotshot_types::traits::election::{ - CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange, -}; use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; -use hotshot_types::traits::node_implementation::{ChannelMaps, Exchanges, NodeType}; +use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType}; use hotshot_types::{ data::ViewNumber, message::{DataMessage, MessageKind}, @@ -68,14 +65,6 @@ impl NodeImplementation for TestImpl { type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; - type Exchanges = Exchanges< - Test, - Message, - QuorumExchange>, - CommitteeExchange>, - ViewSyncExchange>, - VIDExchange>, - >; fn new_channel_maps( start_view: ::Time, diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index f374be0649..d625265926 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -1,5 +1,5 @@ use commit::Committable; -use hotshot::HotShotConsensusApi; +use hotshot::{types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, @@ -7,9 +7,7 @@ use hotshot_testing::{ }; use hotshot_types::{ data::{DAProposal, VidSchemeTrait, ViewNumber}, - traits::{ - consensus_api::ConsensusSharedApi, node_implementation::ExchangesType, state::ConsensusTime, - }, + traits::{consensus_api::ConsensusSharedApi, state::ConsensusTime}, }; use std::{collections::HashMap, marker::PhantomData}; @@ -27,7 +25,6 @@ async fn test_network_task() { block_impl::{VIDBlockPayload, VIDTransaction}, data::VidDisperse, message::Proposal, - traits::election::CommitteeExchangeType, }; async_compatibility_layer::logging::setup_logging(); @@ -38,7 +35,6 @@ async fn test_network_task() { let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); let priv_key = api.private_key(); let vid = vid_init(); @@ -50,7 +46,11 @@ async fn test_network_task() { transactions, payload_commitment, }; - let signature = committee_exchange.sign_da_proposal(&block.commit()); + let signature = + ::SignatureKey::sign( + api.private_key(), + block.commit().as_ref(), + ); let da_proposal = Proposal { data: DAProposal { block_payload: block.clone(), diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 6b71a210c1..0e0f15465d 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -1,5 +1,5 @@ use commit::Committable; -use hotshot::{tasks::add_vid_task, HotShotConsensusApi}; +use hotshot::{tasks::add_vid_task, types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, @@ -8,12 +8,9 @@ use hotshot_testing::{ use hotshot_types::{ block_impl::VIDTransaction, data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, - traits::{ - consensus_api::ConsensusSharedApi, election::ConsensusExchange, - node_implementation::ExchangesType, state::ConsensusTime, - }, + traits::{consensus_api::ConsensusSharedApi, state::ConsensusTime}, }; -use hotshot_types::{simple_vote::VIDVote, traits::election::VIDExchangeType}; +use hotshot_types::{simple_vote::VIDVote, traits::node_implementation::NodeType}; use std::collections::HashMap; use std::marker::PhantomData; @@ -35,7 +32,6 @@ async fn test_vid_task() { let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let vid_exchange = api.inner.exchanges.vid_exchange().clone(); let pub_key = *api.public_key(); let vid = vid_init(); @@ -48,7 +44,8 @@ async fn test_vid_task() { payload_commitment, }; - let signature = vid_exchange.sign_vid_disperse(&block.commit()); + let signature = + ::SignatureKey::sign(api.private_key(), block.commit().as_ref()); let proposal: DAProposal = DAProposal { block_payload: block.clone(), view_number: ViewNumber::new(2), @@ -96,8 +93,8 @@ async fn test_vid_task() { payload_commit: block.commit(), }, ViewNumber::new(2), - vid_exchange.public_key(), - vid_exchange.private_key(), + api.public_key(), + api.private_key(), ); output.insert(HotShotEvent::VidVoteSend(vid_vote), 1); diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 19abb33c7d..46ed361c03 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,12 +1,7 @@ use hotshot::HotShotConsensusApi; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; -use hotshot_types::{ - data::ViewNumber, - traits::{ - election::ConsensusExchange, node_implementation::ExchangesType, state::ConsensusTime, - }, -}; +use hotshot_types::{data::ViewNumber, traits::state::ConsensusTime}; use std::collections::HashMap; #[cfg(test)] @@ -29,7 +24,6 @@ async fn test_view_sync_task() { let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let view_sync_exchange = api.inner.exchanges.view_sync_exchange().clone(); let vote_data = ViewSyncPreCommitData { relay: 0, @@ -38,8 +32,8 @@ async fn test_view_sync_task() { let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote( vote_data, ::Time::new(5), - view_sync_exchange.public_key(), - view_sync_exchange.private_key(), + hotshot_types::traits::consensus_api::ConsensusSharedApi::public_key(&api), + hotshot_types::traits::consensus_api::ConsensusSharedApi::private_key(&api), ); tracing::error!("Vote in test is {:?}", vote.clone()); diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 35b43536f9..d8cfeba6e2 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -3,21 +3,12 @@ // Needed to avoid the non-biding `let` warning. #![allow(clippy::let_underscore_untyped)] -use super::{ - node_implementation::{NodeImplementation, NodeType}, - signature_key::EncodedSignature, -}; -use crate::data::Leaf; +use super::node_implementation::NodeType; -use crate::traits::{ - network::{CommunicationChannel, NetworkMsg}, - signature_key::SignatureKey, -}; -use commit::Commitment; -use derivative::Derivative; +use crate::traits::signature_key::SignatureKey; use snafu::Snafu; -use std::{collections::BTreeSet, fmt::Debug, hash::Hash, marker::PhantomData, num::NonZeroU64}; +use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; /// Error for election problems #[derive(Snafu, Debug)] @@ -86,530 +77,3 @@ pub trait Membership: /// Returns the threshold for a specific `Membership` implementation fn failure_threshold(&self) -> NonZeroU64; } - -/// Protocol for exchanging proposals and votes to make decisions in a distributed network. -/// -/// An instance of [`ConsensusExchange`] represents the state of one participant in the protocol, -/// allowing them to vote and query information about the overall state of the protocol (such as -/// membership and leader status). -pub trait ConsensusExchange: Send + Sync { - /// The committee eligible to make decisions. - type Membership: Membership; - /// Network used by [`Membership`](Self::Membership) to communicate. - type Networking: CommunicationChannel; - - /// Join a [`ConsensusExchange`] with the given identity (`pk` and `sk`). - fn create( - entries: Vec<::StakeTableEntry>, - config: TYPES::ElectionConfigType, - network: Self::Networking, - pk: TYPES::SignatureKey, - entry: ::StakeTableEntry, - sk: ::PrivateKey, - ) -> Self; - - /// The network being used by this exchange. - fn network(&self) -> &Self::Networking; - - /// The leader of the [`Membership`](Self::Membership) at time `view_number`. - fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { - self.membership().get_leader(view_number) - } - - /// Whether this participant is leader at time `view_number`. - fn is_leader(&self, view_number: TYPES::Time) -> bool { - &self.get_leader(view_number) == self.public_key() - } - - /// Threshold required to approve a [`Proposal`](Self::Proposal). - fn success_threshold(&self) -> NonZeroU64 { - self.membership().success_threshold() - } - - /// Threshold required to know a success threshold will not be reached - fn failure_threshold(&self) -> NonZeroU64 { - self.membership().failure_threshold() - } - - /// The total number of nodes in the committee. - fn total_nodes(&self) -> usize { - self.membership().total_nodes() - } - - /// The committee which votes on proposals. - fn membership(&self) -> &TYPES::Membership; - - /// This participant's public key. - fn public_key(&self) -> &TYPES::SignatureKey; - - /// This participant's private key. - fn private_key(&self) -> &::PrivateKey; -} - -/// A [`ConsensusExchange`] where participants vote to provide availability for blobs of data. -pub trait CommitteeExchangeType: - ConsensusExchange -{ - /// Sign a DA proposal. - fn sign_da_proposal( - &self, - payload_commitment: &Commitment, - ) -> EncodedSignature; -} - -/// Standard implementation of [`CommitteeExchangeType`] utilizing a DA committee. -#[derive(Derivative)] -#[derivative(Clone, Debug)] -pub struct CommitteeExchange< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, -> { - /// The network being used by this exchange. - network: NETWORK, - /// The committee which votes on proposals. - membership: TYPES::Membership, - /// This participant's public key. - public_key: TYPES::SignatureKey, - /// Entry with public key and staking value for certificate aggregation - entry: ::StakeTableEntry, - /// This participant's private key. - #[derivative(Debug = "ignore")] - private_key: ::PrivateKey, - #[doc(hidden)] - _pd: PhantomData<(TYPES, MEMBERSHIP, M)>, -} - -impl< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, - > CommitteeExchangeType for CommitteeExchange -{ - /// Sign a DA proposal.Self as ConsensusExchange - fn sign_da_proposal( - &self, - payload_commitment: &Commitment, - ) -> EncodedSignature { - let signature = TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()); - signature - } -} - -impl< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, - > ConsensusExchange for CommitteeExchange -{ - type Membership = MEMBERSHIP; - type Networking = NETWORK; - - fn create( - entries: Vec<::StakeTableEntry>, - config: TYPES::ElectionConfigType, - network: Self::Networking, - pk: TYPES::SignatureKey, - entry: ::StakeTableEntry, - sk: ::PrivateKey, - ) -> Self { - let membership = ::Membership::create_election(entries, config); - Self { - network, - membership, - public_key: pk, - entry, - private_key: sk, - _pd: PhantomData, - } - } - fn network(&self) -> &NETWORK { - &self.network - } - - fn membership(&self) -> &TYPES::Membership { - &self.membership - } - fn public_key(&self) -> &TYPES::SignatureKey { - &self.public_key - } - fn private_key(&self) -> &<::SignatureKey as SignatureKey>::PrivateKey { - &self.private_key - } -} - -/// A [`ConsensusExchange`] where participants vote to provide availability for blobs of data. -pub trait VIDExchangeType: ConsensusExchange { - /// Sign a VID disperse - fn sign_vid_disperse( - &self, - payload_commitment: &Commitment, - ) -> EncodedSignature; -} - -/// Standard implementation of [`VIDExchangeType`] -#[derive(Derivative)] -#[derivative(Clone, Debug)] -pub struct VIDExchange< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, -> { - /// The network being used by this exchange. - network: NETWORK, - /// The committee which votes on proposals. - membership: TYPES::Membership, - /// This participant's public key. - public_key: TYPES::SignatureKey, - /// Entry with public key and staking value for certificate aggregation - entry: ::StakeTableEntry, - /// This participant's private key. - #[derivative(Debug = "ignore")] - private_key: ::PrivateKey, - #[doc(hidden)] - _pd: PhantomData<(TYPES, MEMBERSHIP, M)>, -} - -impl< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, - > VIDExchangeType for VIDExchange -{ - /// Sign a VID proposal. - fn sign_vid_disperse( - &self, - payload_commitment: &Commitment, - ) -> EncodedSignature { - let signature = TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()); - signature - } -} - -impl< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, - > ConsensusExchange for VIDExchange -{ - type Membership = MEMBERSHIP; - type Networking = NETWORK; - - fn create( - entries: Vec<::StakeTableEntry>, - config: TYPES::ElectionConfigType, - network: Self::Networking, - pk: TYPES::SignatureKey, - entry: ::StakeTableEntry, - sk: ::PrivateKey, - ) -> Self { - let membership = ::Membership::create_election(entries, config); - Self { - network, - membership, - public_key: pk, - entry, - private_key: sk, - _pd: PhantomData, - } - } - fn network(&self) -> &NETWORK { - &self.network - } - - fn membership(&self) -> &TYPES::Membership { - &self.membership - } - fn public_key(&self) -> &TYPES::SignatureKey { - &self.public_key - } - fn private_key(&self) -> &<::SignatureKey as SignatureKey>::PrivateKey { - &self.private_key - } -} - -/// A [`ConsensusExchange`] where participants vote to append items to a log. -pub trait QuorumExchangeType: ConsensusExchange { - /// Sign a validating or commitment proposal. - fn sign_validating_or_commitment_proposal>( - &self, - leaf_commitment: &Commitment>, - ) -> EncodedSignature; - - /// Sign a block payload commitment. - fn sign_payload_commitment( - &self, - payload_commitment: Commitment, - ) -> EncodedSignature; -} - -/// Standard implementation of [`QuroumExchangeType`] based on Hot Stuff consensus. -#[derive(Derivative)] -#[derivative(Clone, Debug)] -pub struct QuorumExchange< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, -> { - /// The network being used by this exchange. - network: NETWORK, - /// The committee which votes on proposals. - membership: TYPES::Membership, - /// This participant's public key. - public_key: TYPES::SignatureKey, - /// Entry with public key and staking value for certificate aggregation - entry: ::StakeTableEntry, - /// This participant's private key. - #[derivative(Debug = "ignore")] - private_key: ::PrivateKey, - #[doc(hidden)] - _pd: PhantomData<(Leaf, MEMBERSHIP, M)>, -} - -impl< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, - > QuorumExchangeType for QuorumExchange -{ - /// Sign a validating or commitment proposal. - fn sign_validating_or_commitment_proposal>( - &self, - leaf_commitment: &Commitment>, - ) -> EncodedSignature { - let signature = TYPES::SignatureKey::sign(&self.private_key, leaf_commitment.as_ref()); - signature - } - - fn sign_payload_commitment( - &self, - payload_commitment: Commitment<::BlockPayload>, - ) -> EncodedSignature { - TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()) - } -} - -impl< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, - > ConsensusExchange for QuorumExchange -{ - type Membership = MEMBERSHIP; - type Networking = NETWORK; - - fn create( - entries: Vec<::StakeTableEntry>, - config: TYPES::ElectionConfigType, - network: Self::Networking, - pk: TYPES::SignatureKey, - entry: ::StakeTableEntry, - sk: ::PrivateKey, - ) -> Self { - let membership = ::Membership::create_election(entries, config); - Self { - network, - membership, - public_key: pk, - entry, - private_key: sk, - _pd: PhantomData, - } - } - - fn network(&self) -> &NETWORK { - &self.network - } - - fn membership(&self) -> &TYPES::Membership { - &self.membership - } - fn public_key(&self) -> &TYPES::SignatureKey { - &self.public_key - } - fn private_key(&self) -> &<::SignatureKey as SignatureKey>::PrivateKey { - &self.private_key - } -} - -/// A [`ConsensusExchange`] where participants synchronize which view the network should be in. -pub trait ViewSyncExchangeType: - ConsensusExchange -{ -} - -/// Standard implementation of [`ViewSyncExchangeType`] based on Hot Stuff consensus. -#[derive(Derivative)] -#[derivative(Clone, Debug)] -pub struct ViewSyncExchange< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, -> { - /// The network being used by this exchange. - network: NETWORK, - /// The committee which votes on proposals. - membership: TYPES::Membership, - /// This participant's public key. - public_key: TYPES::SignatureKey, - /// Entry with public key and staking value for certificate aggregation in the stake table. - entry: ::StakeTableEntry, - /// This participant's private key. - #[derivative(Debug = "ignore")] - private_key: ::PrivateKey, - #[doc(hidden)] - _pd: PhantomData<(MEMBERSHIP, M)>, -} - -impl< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, - > ViewSyncExchangeType for ViewSyncExchange -{ -} - -impl< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, - > ConsensusExchange for ViewSyncExchange -{ - type Membership = MEMBERSHIP; - type Networking = NETWORK; - - fn create( - entries: Vec<::StakeTableEntry>, - config: TYPES::ElectionConfigType, - network: Self::Networking, - pk: TYPES::SignatureKey, - entry: ::StakeTableEntry, - sk: ::PrivateKey, - ) -> Self { - let membership = ::Membership::create_election(entries, config); - Self { - network, - membership, - public_key: pk, - entry, - private_key: sk, - _pd: PhantomData, - } - } - - fn network(&self) -> &NETWORK { - &self.network - } - - fn membership(&self) -> &TYPES::Membership { - &self.membership - } - fn public_key(&self) -> &TYPES::SignatureKey { - &self.public_key - } - fn private_key(&self) -> &<::SignatureKey as SignatureKey>::PrivateKey { - &self.private_key - } -} - -// TODO ED All the exchange structs are the same. We could just considate them into one struct -/// Standard implementation of a Timeout Exchange based on Hot Stuff consensus. -#[derive(Derivative)] -#[derivative(Clone, Debug)] -pub struct TimeoutExchange< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, -> { - /// The network being used by this exchange. - network: NETWORK, - /// The committee which votes on proposals. - membership: TYPES::Membership, - /// This participant's public key. - public_key: TYPES::SignatureKey, - /// Entry with public key and staking value for certificate aggregation in the stake table. - entry: ::StakeTableEntry, - /// This participant's private key. - #[derivative(Debug = "ignore")] - private_key: ::PrivateKey, - #[doc(hidden)] - _pd: PhantomData<(MEMBERSHIP, M)>, -} - -impl< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, - > TimeoutExchange -{ -} - -/// Trait defining functiosn for a `TimeoutExchange` -pub trait TimeoutExchangeType: ConsensusExchange {} - -impl< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, - > TimeoutExchangeType for TimeoutExchange -{ -} - -impl< - TYPES: NodeType, - MEMBERSHIP: Membership, - NETWORK: CommunicationChannel, - M: NetworkMsg, - > ConsensusExchange for TimeoutExchange -{ - type Membership = MEMBERSHIP; - type Networking = NETWORK; - - fn create( - entries: Vec<::StakeTableEntry>, - config: TYPES::ElectionConfigType, - network: Self::Networking, - pk: TYPES::SignatureKey, - entry: ::StakeTableEntry, - sk: ::PrivateKey, - ) -> Self { - let membership = ::Membership::create_election(entries, config); - Self { - network, - membership, - public_key: pk, - entry, - private_key: sk, - _pd: PhantomData, - } - } - - fn network(&self) -> &NETWORK { - &self.network - } - - fn membership(&self) -> &TYPES::Membership { - &self.membership - } - fn public_key(&self) -> &TYPES::SignatureKey { - &self.public_key - } - fn private_key(&self) -> &<::SignatureKey as SignatureKey>::PrivateKey { - &self.private_key - } -} diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 9c6b6bb137..72ead0d2db 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -368,11 +368,11 @@ pub trait TestableNetworkingImplementation { fn in_flight_message_count(&self) -> Option; } /// Describes additional functionality needed by the test communication channel -pub trait TestableChannelImplementation: - CommunicationChannel -{ +pub trait TestableChannelImplementation: CommunicationChannel { /// generates the `CommunicationChannel` given it's associated network type - fn generate_network() -> Box) -> Self + 'static>; + #[allow(clippy::type_complexity)] + fn generate_network( + ) -> Box>::NETWORK>) -> Self + 'static>; } /// Changes that can occur in the network diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 98b5bf15a0..c65168cc73 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -5,18 +5,15 @@ use super::{ block_contents::{BlockHeader, Transaction}, - election::{ - CommitteeExchangeType, ConsensusExchange, ElectionConfig, QuorumExchangeType, - TimeoutExchange, TimeoutExchangeType, VIDExchangeType, ViewSyncExchangeType, - }, - network::{CommunicationChannel, NetworkMsg, TestableNetworkingImplementation}, + election::ElectionConfig, + network::{CommunicationChannel, TestableNetworkingImplementation}, state::{ConsensusTime, TestableBlock, TestableState}, storage::{StorageError, StorageState, TestableStorage}, State, }; use crate::{ data::{Leaf, TestableLeaf}, - message::{Message, ProcessedSequencingMessage}, + message::ProcessedSequencingMessage, traits::{ election::Membership, network::TestableChannelImplementation, signature_key::SignatureKey, storage::Storage, BlockPayload, @@ -30,7 +27,6 @@ use std::{ collections::BTreeMap, fmt::Debug, hash::Hash, - marker::PhantomData, sync::{atomic::AtomicBool, Arc}, }; /// Alias for the [`ProcessedConsensusMessage`] type of a [`NodeImplementation`]. @@ -116,11 +112,6 @@ pub trait NodeImplementation: /// Storage type for this consensus implementation type Storage: Storage + Clone; - /// Consensus type selected exchanges. - /// - /// Implements either `ValidatingExchangesType` or `ExchangesType`. - type Exchanges: ExchangesType>; - /// Network for all nodes type QuorumNetwork: CommunicationChannel; /// Network for those in the DA committee @@ -133,315 +124,315 @@ pub trait NodeImplementation: ) -> (ChannelMaps, Option>); } -/// Contains the protocols for exchanging proposals and votes. -#[allow(clippy::type_complexity)] -#[async_trait] -pub trait ExchangesType: Send + Sync { - /// Protocol for exchanging data availability proposals and votes. - type CommitteeExchange: CommitteeExchangeType + Clone + Debug; - - /// Protocol for exchanging VID proposals and votes - type VIDExchange: VIDExchangeType + Clone + Debug; - - /// Get the committee exchange - fn committee_exchange(&self) -> &Self::CommitteeExchange; - - /// Get the timeout exchange - fn timeout_exchange(&self) -> &Self::TimeoutExchange; - - // type QuorumNetwork: CommunicationChannel; - // type CommitteeNetwork: CommunicationChannel; - - /// Protocol for exchanging quorum proposals and votes. - type QuorumExchange: QuorumExchangeType + Clone + Debug; - - /// Protocol for exchanging view sync proposals and votes. - type ViewSyncExchange: ViewSyncExchangeType + Clone + Debug; - - /// Protocol for receiving timeout votes - type TimeoutExchange: TimeoutExchangeType + Clone + Debug; - - /// Election configurations for exchanges - type ElectionConfigs; - - /// Create all exchanges. - fn create( - entries: Vec<::StakeTableEntry>, - configs: Self::ElectionConfigs, - networks: ( - >::Networking, - >::Networking, - >::Networking, - >::Networking, - ), - pk: TYPES::SignatureKey, - entry: ::StakeTableEntry, - sk: ::PrivateKey, - ) -> Self; - - /// Get the quorum exchange. - fn quorum_exchange(&self) -> &Self::QuorumExchange; - - /// Get the view sync exchange. - fn view_sync_exchange(&self) -> &Self::ViewSyncExchange; - - /// Get the VID exchange - fn vid_exchange(&self) -> &Self::VIDExchange; - - /// BlockPayload the underlying networking interfaces until node is successfully initialized into the - /// networks. - async fn wait_for_networks_ready(&self); - - /// Shut down the the underlying networking interfaces. - async fn shut_down_networks(&self); -} - -/// an exchange that is testable -pub trait TestableExchange: - ExchangesType -{ - /// generate communication channels - #[allow(clippy::type_complexity)] - fn gen_comm_channels( - expected_node_count: usize, - num_bootstrap: usize, - da_committee_size: usize, - ) -> Box< - dyn Fn( - u64, - ) -> ( - >::Networking, - >::Networking, - >::Networking, - >::Networking, - ) + 'static, - >; -} - -/// Implementes [`ExchangesType`]. -#[derive(Clone, Debug)] -pub struct Exchanges< - TYPES: NodeType, - MESSAGE: NetworkMsg, - QUORUMEXCHANGE: QuorumExchangeType + Clone + Debug, - COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, - VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, - VIDEXCHANGE: VIDExchangeType + Clone + Debug, -> { - /// Quorum exchange. - quorum_exchange: QUORUMEXCHANGE, - - /// View sync exchange. - view_sync_exchange: VIEWSYNCEXCHANGE, - - /// Committee exchange. - committee_exchange: COMMITTEEEXCHANGE, - - /// VID exchange - vid_exchange: VIDEXCHANGE, - - /// Timeout exchange - // This type can be simplified once we rework the exchanges trait - // It is here to avoid needing to instantiate it where all the other exchanges are instantiated - // https://github.com/EspressoSystems/HotShot/issues/1799 - #[allow(clippy::type_complexity)] - pub timeout_exchange: - TimeoutExchange< - TYPES, - < as ExchangesType>::QuorumExchange as ConsensusExchange< - TYPES, - MESSAGE, - >>::Membership, - >::Networking, - MESSAGE, - >, - - /// Phantom data - _phantom: PhantomData<(TYPES, MESSAGE)>, -} - -#[async_trait] -impl - ExchangesType - for Exchanges -where - TYPES: NodeType, - MESSAGE: NetworkMsg, - QUORUMEXCHANGE: QuorumExchangeType + Clone + Debug, - COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, - VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, - VIDEXCHANGE: VIDExchangeType + Clone + Debug, -{ - type CommitteeExchange = COMMITTEEEXCHANGE; - type QuorumExchange = QUORUMEXCHANGE; - type ViewSyncExchange = VIEWSYNCEXCHANGE; - type VIDExchange = VIDEXCHANGE; - #[allow(clippy::type_complexity)] - type TimeoutExchange = - TimeoutExchange< - TYPES, - < as ExchangesType>::QuorumExchange as ConsensusExchange< - TYPES, - MESSAGE, - >>::Membership, - >::Networking, - MESSAGE, - >; - - type ElectionConfigs = (TYPES::ElectionConfigType, TYPES::ElectionConfigType); - - fn committee_exchange(&self) -> &COMMITTEEEXCHANGE { - &self.committee_exchange - } - - fn timeout_exchange(&self) -> &Self::TimeoutExchange { - &self.timeout_exchange - } - - fn create( - entries: Vec<::StakeTableEntry>, - configs: Self::ElectionConfigs, - networks: ( - >::Networking, - >::Networking, - >::Networking, - >::Networking, - ), - pk: TYPES::SignatureKey, - entry: ::StakeTableEntry, - sk: ::PrivateKey, - ) -> Self { - let quorum_exchange = QUORUMEXCHANGE::create( - entries.clone(), - configs.0.clone(), - networks.0.clone(), - pk.clone(), - entry.clone(), - sk.clone(), - ); - #[allow(clippy::type_complexity)] - let timeout_exchange: TimeoutExchange< - TYPES, - < as ExchangesType>::QuorumExchange as ConsensusExchange< - TYPES, - MESSAGE, - >>::Membership, - >::Networking, - MESSAGE, - > = TimeoutExchange::create( - entries.clone(), - configs.0.clone(), - networks.0, - pk.clone(), - entry.clone(), - sk.clone(), - ); - - let view_sync_exchange = VIEWSYNCEXCHANGE::create( - entries.clone(), - configs.0, - networks.2, - pk.clone(), - entry.clone(), - sk.clone(), - ); - - let committee_exchange = COMMITTEEEXCHANGE::create( - entries.clone(), - configs.1.clone(), - networks.1, - pk.clone(), - entry.clone(), - sk.clone(), - ); - - // RM TODO: figure out if this is the proper config - // issue: https://github.com/EspressoSystems/HotShot/issues/1918 - let vid_exchange = VIDEXCHANGE::create(entries, configs.1, networks.3, pk, entry, sk); - - Self { - quorum_exchange, - committee_exchange, - view_sync_exchange, - vid_exchange, - timeout_exchange, - _phantom: PhantomData, - } - } - - fn quorum_exchange(&self) -> &Self::QuorumExchange { - &self.quorum_exchange - } - - fn view_sync_exchange(&self) -> &Self::ViewSyncExchange { - &self.view_sync_exchange - } - - fn vid_exchange(&self) -> &Self::VIDExchange { - &self.vid_exchange - } - - async fn wait_for_networks_ready(&self) { - self.quorum_exchange.network().wait_for_ready().await; - self.committee_exchange.network().wait_for_ready().await; - } - - async fn shut_down_networks(&self) { - self.quorum_exchange.network().shut_down().await; - self.committee_exchange.network().shut_down().await; - } -} - -/// Alias for the [`QuorumExchange`] type. -pub type QuorumEx = <>::Exchanges as ExchangesType< - TYPES, - Message, ->>::QuorumExchange; - -/// Alias for `TimeoutExchange` type -pub type TimeoutEx = <>::Exchanges as ExchangesType< - TYPES, - Message, ->>::TimeoutExchange; - -/// Alias for the [`CommitteeExchange`] type. -pub type CommitteeEx = <>::Exchanges as ExchangesType< - TYPES, - Message, ->>::CommitteeExchange; - -/// Alias for the [`VIDExchange`] type. -pub type VIDEx = <>::Exchanges as ExchangesType< - TYPES, - Message, ->>::VIDExchange; - -/// Alias for the [`ViewSyncExchange`] type. -pub type ViewSyncEx = <>::Exchanges as ExchangesType< - TYPES, - Message, ->>::ViewSyncExchange; +// /// Contains the protocols for exchanging proposals and votes. +// #[allow(clippy::type_complexity)] +// #[async_trait] +// pub trait ExchangesType: Send + Sync { +// /// Protocol for exchanging data availability proposals and votes. +// type CommitteeExchange: CommitteeExchangeType + Clone + Debug; + +// /// Protocol for exchanging VID proposals and votes +// type VIDExchange: VIDExchangeType + Clone + Debug; + +// /// Get the committee exchange +// fn committee_exchange(&self) -> &Self::CommitteeExchange; + +// /// Get the timeout exchange +// fn timeout_exchange(&self) -> &Self::TimeoutExchange; + +// // type QuorumNetwork: CommunicationChannel; +// // type CommitteeNetwork: CommunicationChannel; + +// /// Protocol for exchanging quorum proposals and votes. +// type QuorumExchange: QuorumExchangeType + Clone + Debug; + +// /// Protocol for exchanging view sync proposals and votes. +// type ViewSyncExchange: ViewSyncExchangeType + Clone + Debug; + +// /// Protocol for receiving timeout votes +// type TimeoutExchange: TimeoutExchangeType + Clone + Debug; + +// /// Election configurations for exchanges +// type ElectionConfigs; + +// /// Create all exchanges. +// fn create( +// entries: Vec<::StakeTableEntry>, +// configs: Self::ElectionConfigs, +// networks: ( +// >::Networking, +// >::Networking, +// >::Networking, +// >::Networking, +// ), +// pk: TYPES::SignatureKey, +// entry: ::StakeTableEntry, +// sk: ::PrivateKey, +// ) -> Self; + +// /// Get the quorum exchange. +// fn quorum_exchange(&self) -> &Self::QuorumExchange; + +// /// Get the view sync exchange. +// fn view_sync_exchange(&self) -> &Self::ViewSyncExchange; + +// /// Get the VID exchange +// fn vid_exchange(&self) -> &Self::VIDExchange; + +// /// BlockPayload the underlying networking interfaces until node is successfully initialized into the +// /// networks. +// async fn wait_for_networks_ready(&self); + +// /// Shut down the the underlying networking interfaces. +// async fn shut_down_networks(&self); +// } + +// /// an exchange that is testable +// pub trait TestableExchange: +// ExchangesType +// { +// /// generate communication channels +// #[allow(clippy::type_complexity)] +// fn gen_comm_channels( +// expected_node_count: usize, +// num_bootstrap: usize, +// da_committee_size: usize, +// ) -> Box< +// dyn Fn( +// u64, +// ) -> ( +// >::Networking, +// >::Networking, +// >::Networking, +// >::Networking, +// ) + 'static, +// >; +// } + +// /// Implementes [`ExchangesType`]. +// #[derive(Clone, Debug)] +// pub struct Exchanges< +// TYPES: NodeType, +// MESSAGE: NetworkMsg, +// QUORUMEXCHANGE: QuorumExchangeType + Clone + Debug, +// COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, +// VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, +// VIDEXCHANGE: VIDExchangeType + Clone + Debug, +// > { +// /// Quorum exchange. +// quorum_exchange: QUORUMEXCHANGE, + +// /// View sync exchange. +// view_sync_exchange: VIEWSYNCEXCHANGE, + +// /// Committee exchange. +// committee_exchange: COMMITTEEEXCHANGE, + +// /// VID exchange +// vid_exchange: VIDEXCHANGE, + +// /// Timeout exchange +// // This type can be simplified once we rework the exchanges trait +// // It is here to avoid needing to instantiate it where all the other exchanges are instantiated +// // https://github.com/EspressoSystems/HotShot/issues/1799 +// #[allow(clippy::type_complexity)] +// pub timeout_exchange: +// TimeoutExchange< +// TYPES, +// < as ExchangesType>::QuorumExchange as ConsensusExchange< +// TYPES, +// MESSAGE, +// >>::Membership, +// >::Networking, +// MESSAGE, +// >, + +// /// Phantom data +// _phantom: PhantomData<(TYPES, MESSAGE)>, +// } + +// #[async_trait] +// impl +// ExchangesType +// for Exchanges +// where +// TYPES: NodeType, +// MESSAGE: NetworkMsg, +// QUORUMEXCHANGE: QuorumExchangeType + Clone + Debug, +// COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, +// VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, +// VIDEXCHANGE: VIDExchangeType + Clone + Debug, +// { +// type CommitteeExchange = COMMITTEEEXCHANGE; +// type QuorumExchange = QUORUMEXCHANGE; +// type ViewSyncExchange = VIEWSYNCEXCHANGE; +// type VIDExchange = VIDEXCHANGE; +// #[allow(clippy::type_complexity)] +// type TimeoutExchange = +// TimeoutExchange< +// TYPES, +// < as ExchangesType>::QuorumExchange as ConsensusExchange< +// TYPES, +// MESSAGE, +// >>::Membership, +// >::Networking, +// MESSAGE, +// >; + +// type ElectionConfigs = (TYPES::ElectionConfigType, TYPES::ElectionConfigType); + +// fn committee_exchange(&self) -> &COMMITTEEEXCHANGE { +// &self.committee_exchange +// } + +// fn timeout_exchange(&self) -> &Self::TimeoutExchange { +// &self.timeout_exchange +// } + +// fn create( +// entries: Vec<::StakeTableEntry>, +// configs: Self::ElectionConfigs, +// networks: ( +// >::Networking, +// >::Networking, +// >::Networking, +// >::Networking, +// ), +// pk: TYPES::SignatureKey, +// entry: ::StakeTableEntry, +// sk: ::PrivateKey, +// ) -> Self { +// let quorum_exchange = QUORUMEXCHANGE::create( +// entries.clone(), +// configs.0.clone(), +// networks.0.clone(), +// pk.clone(), +// entry.clone(), +// sk.clone(), +// ); +// #[allow(clippy::type_complexity)] +// let timeout_exchange: TimeoutExchange< +// TYPES, +// < as ExchangesType>::QuorumExchange as ConsensusExchange< +// TYPES, +// MESSAGE, +// >>::Membership, +// >::Networking, +// MESSAGE, +// > = TimeoutExchange::create( +// entries.clone(), +// configs.0.clone(), +// networks.0, +// pk.clone(), +// entry.clone(), +// sk.clone(), +// ); + +// let view_sync_exchange = VIEWSYNCEXCHANGE::create( +// entries.clone(), +// configs.0, +// networks.2, +// pk.clone(), +// entry.clone(), +// sk.clone(), +// ); + +// let committee_exchange = COMMITTEEEXCHANGE::create( +// entries.clone(), +// configs.1.clone(), +// networks.1, +// pk.clone(), +// entry.clone(), +// sk.clone(), +// ); + +// // RM TODO: figure out if this is the proper config +// // issue: https://github.com/EspressoSystems/HotShot/issues/1918 +// let vid_exchange = VIDEXCHANGE::create(entries, configs.1, networks.3, pk, entry, sk); + +// Self { +// quorum_exchange, +// committee_exchange, +// view_sync_exchange, +// vid_exchange, +// timeout_exchange, +// _phantom: PhantomData, +// } +// } + +// fn quorum_exchange(&self) -> &Self::QuorumExchange { +// &self.quorum_exchange +// } + +// fn view_sync_exchange(&self) -> &Self::ViewSyncExchange { +// &self.view_sync_exchange +// } + +// fn vid_exchange(&self) -> &Self::VIDExchange { +// &self.vid_exchange +// } + +// async fn wait_for_networks_ready(&self) { +// self.quorum_exchange.network().wait_for_ready().await; +// self.committee_exchange.network().wait_for_ready().await; +// } + +// async fn shut_down_networks(&self) { +// self.quorum_exchange.network().shut_down().await; +// self.committee_exchange.network().shut_down().await; +// } +// } + +// /// Alias for the [`QuorumExchange`] type. +// pub type QuorumEx = <>::Exchanges as ExchangesType< +// TYPES, +// Message, +// >>::QuorumExchange; + +// /// Alias for `TimeoutExchange` type +// pub type TimeoutEx = <>::Exchanges as ExchangesType< +// TYPES, +// Message, +// >>::TimeoutExchange; + +// /// Alias for the [`CommitteeExchange`] type. +// pub type CommitteeEx = <>::Exchanges as ExchangesType< +// TYPES, +// Message, +// >>::CommitteeExchange; + +// /// Alias for the [`VIDExchange`] type. +// pub type VIDEx = <>::Exchanges as ExchangesType< +// TYPES, +// Message, +// >>::VIDExchange; + +// /// Alias for the [`ViewSyncExchange`] type. +// pub type ViewSyncEx = <>::Exchanges as ExchangesType< +// TYPES, +// Message, +// >>::ViewSyncExchange; /// extra functions required on a node implementation to be usable by hotshot-testing #[allow(clippy::type_complexity)] @@ -486,25 +477,33 @@ pub trait TestableNodeImplementation: NodeImplementation /// Return the full internal state. This is useful for debugging. async fn get_full_state(storage: &Self::Storage) -> StorageState; + + /// Generate the communication channels for testing + fn gen_comm_channels( + expected_node_count: usize, + num_bootstrap: usize, + da_committee_size: usize, + ) -> Box (Self::QuorumNetwork, Self::CommitteeNetwork)>; } #[async_trait] impl> TestableNodeImplementation for I where - CommitteeNetwork: TestableNetworkingImplementation, - QuorumNetwork: TestableNetworkingImplementation, - QuorumCommChannel: TestableChannelImplementation>, - CommitteeCommChannel: TestableChannelImplementation>, - ViewSyncCommChannel: TestableChannelImplementation>, TYPES::StateType: TestableState, TYPES::BlockPayload: TestableBlock, I::Storage: TestableStorage, + I::QuorumNetwork: TestableChannelImplementation, + I::CommitteeNetwork: TestableChannelImplementation, + <>::QuorumNetwork as CommunicationChannel>::NETWORK: + TestableNetworkingImplementation, + <>::CommitteeNetwork as CommunicationChannel>::NETWORK: + TestableNetworkingImplementation, { type CommitteeElectionConfig = TYPES::ElectionConfigType; fn committee_election_config_generator( ) -> Box Self::CommitteeElectionConfig + 'static> { - Box::new(|num_nodes| >::default_election_config(num_nodes)) + Box::new(|num_nodes| ::Membership::default_election_config(num_nodes)) } fn state_create_random_transaction( @@ -538,47 +537,82 @@ where async fn get_full_state(storage: &Self::Storage) -> StorageState { >::get_full_state(storage).await } + #[allow(clippy::arc_with_non_send_sync)] + fn gen_comm_channels( + expected_node_count: usize, + num_bootstrap: usize, + da_committee_size: usize, + ) -> Box (Self::QuorumNetwork, Self::CommitteeNetwork)> { + let network_generator = <>::NETWORK as TestableNetworkingImplementation>::generator( + expected_node_count, + num_bootstrap, + 0, + da_committee_size, + false, + ); + let da_generator = <>::NETWORK as TestableNetworkingImplementation>::generator( + expected_node_count, + num_bootstrap, + 1, + da_committee_size, + true, + ); + + Box::new(move |id| { + let network = Arc::new(network_generator(id)); + let network_da = Arc::new(da_generator(id)); + let quorum_chan = + >::generate_network()( + network.clone(), + ); + let committee_chan = + >::generate_network()( + network_da, + ); + (quorum_chan, committee_chan) + }) + } } -/// Communication channel for [`QuorumProposalType`] and [`QuorumVote`]. -pub type QuorumCommChannel = - as ConsensusExchange>>::Networking; +// /// Communication channel for [`QuorumProposalType`] and [`QuorumVote`]. +// pub type QuorumCommChannel = +// as ConsensusExchange>>::Networking; -/// Communication channel for [`ViewSyncProposalType`] and [`ViewSyncVote`]. -pub type ViewSyncCommChannel = - as ConsensusExchange>>::Networking; +// /// Communication channel for [`ViewSyncProposalType`] and [`ViewSyncVote`]. +// pub type ViewSyncCommChannel = +// as ConsensusExchange>>::Networking; -/// Communication channel for [`CommitteeProposalType`] and [`DAVote`]. -pub type CommitteeCommChannel = - as ConsensusExchange>>::Networking; +// /// Communication channel for [`CommitteeProposalType`] and [`DAVote`]. +// pub type CommitteeCommChannel = +// as ConsensusExchange>>::Networking; -/// Protocol for determining membership in a consensus committee. -pub type QuorumMembership = - as ConsensusExchange>>::Membership; +// /// Protocol for determining membership in a consensus committee. +// pub type QuorumMembership = +// as ConsensusExchange>>::Membership; -/// TYPE aliase for the membership of VID exchange -pub type VIDMembership = - as ConsensusExchange>>::Membership; +// /// TYPE aliase for the membership of VID exchange +// pub type VIDMembership = +// as ConsensusExchange>>::Membership; -/// Protocol for determining membership in a DA committee. -pub type CommitteeMembership = - as ConsensusExchange>>::Membership; +// /// Protocol for determining membership in a DA committee. +// pub type CommitteeMembership = +// as ConsensusExchange>>::Membership; -/// Protocol for determining membership in a view sync committee. -pub type ViewSyncMembership = - as ConsensusExchange>>::Membership; +// /// Protocol for determining membership in a view sync committee. +// pub type ViewSyncMembership = +// as ConsensusExchange>>::Membership; -/// Type for the underlying quorum `ConnectedNetwork` that will be shared (for now) b/t Communication Channels -pub type QuorumNetwork = - as CommunicationChannel>::NETWORK; +// /// Type for the underlying quorum `ConnectedNetwork` that will be shared (for now) b/t Communication Channels +// pub type QuorumNetwork = +// as CommunicationChannel>::NETWORK; -/// Type for the underlying committee `ConnectedNetwork` that will be shared (for now) b/t Communication Channels -pub type CommitteeNetwork = - as CommunicationChannel>::NETWORK; +// /// Type for the underlying committee `ConnectedNetwork` that will be shared (for now) b/t Communication Channels +// pub type CommitteeNetwork = +// as CommunicationChannel>::NETWORK; -/// Type for the underlying view sync `ConnectedNetwork` that will be shared (for now) b/t Communication Channels -pub type ViewSyncNetwork = - as CommunicationChannel>::NETWORK; +// /// Type for the underlying view sync `ConnectedNetwork` that will be shared (for now) b/t Communication Channels +// pub type ViewSyncNetwork = +// as CommunicationChannel>::NETWORK; /// Trait with all the type definitions that are used in the current hotshot setup. pub trait NodeType: From 6d1017f003eb382e7ea5445dfd4a8531c18389ee Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 17 Nov 2023 13:21:07 -0500 Subject: [PATCH 0422/1393] remove more commented code --- types/src/traits/node_implementation.rs | 351 ------------------------ 1 file changed, 351 deletions(-) diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index c65168cc73..fe3b211796 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -124,316 +124,6 @@ pub trait NodeImplementation: ) -> (ChannelMaps, Option>); } -// /// Contains the protocols for exchanging proposals and votes. -// #[allow(clippy::type_complexity)] -// #[async_trait] -// pub trait ExchangesType: Send + Sync { -// /// Protocol for exchanging data availability proposals and votes. -// type CommitteeExchange: CommitteeExchangeType + Clone + Debug; - -// /// Protocol for exchanging VID proposals and votes -// type VIDExchange: VIDExchangeType + Clone + Debug; - -// /// Get the committee exchange -// fn committee_exchange(&self) -> &Self::CommitteeExchange; - -// /// Get the timeout exchange -// fn timeout_exchange(&self) -> &Self::TimeoutExchange; - -// // type QuorumNetwork: CommunicationChannel; -// // type CommitteeNetwork: CommunicationChannel; - -// /// Protocol for exchanging quorum proposals and votes. -// type QuorumExchange: QuorumExchangeType + Clone + Debug; - -// /// Protocol for exchanging view sync proposals and votes. -// type ViewSyncExchange: ViewSyncExchangeType + Clone + Debug; - -// /// Protocol for receiving timeout votes -// type TimeoutExchange: TimeoutExchangeType + Clone + Debug; - -// /// Election configurations for exchanges -// type ElectionConfigs; - -// /// Create all exchanges. -// fn create( -// entries: Vec<::StakeTableEntry>, -// configs: Self::ElectionConfigs, -// networks: ( -// >::Networking, -// >::Networking, -// >::Networking, -// >::Networking, -// ), -// pk: TYPES::SignatureKey, -// entry: ::StakeTableEntry, -// sk: ::PrivateKey, -// ) -> Self; - -// /// Get the quorum exchange. -// fn quorum_exchange(&self) -> &Self::QuorumExchange; - -// /// Get the view sync exchange. -// fn view_sync_exchange(&self) -> &Self::ViewSyncExchange; - -// /// Get the VID exchange -// fn vid_exchange(&self) -> &Self::VIDExchange; - -// /// BlockPayload the underlying networking interfaces until node is successfully initialized into the -// /// networks. -// async fn wait_for_networks_ready(&self); - -// /// Shut down the the underlying networking interfaces. -// async fn shut_down_networks(&self); -// } - -// /// an exchange that is testable -// pub trait TestableExchange: -// ExchangesType -// { -// /// generate communication channels -// #[allow(clippy::type_complexity)] -// fn gen_comm_channels( -// expected_node_count: usize, -// num_bootstrap: usize, -// da_committee_size: usize, -// ) -> Box< -// dyn Fn( -// u64, -// ) -> ( -// >::Networking, -// >::Networking, -// >::Networking, -// >::Networking, -// ) + 'static, -// >; -// } - -// /// Implementes [`ExchangesType`]. -// #[derive(Clone, Debug)] -// pub struct Exchanges< -// TYPES: NodeType, -// MESSAGE: NetworkMsg, -// QUORUMEXCHANGE: QuorumExchangeType + Clone + Debug, -// COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, -// VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, -// VIDEXCHANGE: VIDExchangeType + Clone + Debug, -// > { -// /// Quorum exchange. -// quorum_exchange: QUORUMEXCHANGE, - -// /// View sync exchange. -// view_sync_exchange: VIEWSYNCEXCHANGE, - -// /// Committee exchange. -// committee_exchange: COMMITTEEEXCHANGE, - -// /// VID exchange -// vid_exchange: VIDEXCHANGE, - -// /// Timeout exchange -// // This type can be simplified once we rework the exchanges trait -// // It is here to avoid needing to instantiate it where all the other exchanges are instantiated -// // https://github.com/EspressoSystems/HotShot/issues/1799 -// #[allow(clippy::type_complexity)] -// pub timeout_exchange: -// TimeoutExchange< -// TYPES, -// < as ExchangesType>::QuorumExchange as ConsensusExchange< -// TYPES, -// MESSAGE, -// >>::Membership, -// >::Networking, -// MESSAGE, -// >, - -// /// Phantom data -// _phantom: PhantomData<(TYPES, MESSAGE)>, -// } - -// #[async_trait] -// impl -// ExchangesType -// for Exchanges -// where -// TYPES: NodeType, -// MESSAGE: NetworkMsg, -// QUORUMEXCHANGE: QuorumExchangeType + Clone + Debug, -// COMMITTEEEXCHANGE: CommitteeExchangeType + Clone + Debug, -// VIEWSYNCEXCHANGE: ViewSyncExchangeType + Clone + Debug, -// VIDEXCHANGE: VIDExchangeType + Clone + Debug, -// { -// type CommitteeExchange = COMMITTEEEXCHANGE; -// type QuorumExchange = QUORUMEXCHANGE; -// type ViewSyncExchange = VIEWSYNCEXCHANGE; -// type VIDExchange = VIDEXCHANGE; -// #[allow(clippy::type_complexity)] -// type TimeoutExchange = -// TimeoutExchange< -// TYPES, -// < as ExchangesType>::QuorumExchange as ConsensusExchange< -// TYPES, -// MESSAGE, -// >>::Membership, -// >::Networking, -// MESSAGE, -// >; - -// type ElectionConfigs = (TYPES::ElectionConfigType, TYPES::ElectionConfigType); - -// fn committee_exchange(&self) -> &COMMITTEEEXCHANGE { -// &self.committee_exchange -// } - -// fn timeout_exchange(&self) -> &Self::TimeoutExchange { -// &self.timeout_exchange -// } - -// fn create( -// entries: Vec<::StakeTableEntry>, -// configs: Self::ElectionConfigs, -// networks: ( -// >::Networking, -// >::Networking, -// >::Networking, -// >::Networking, -// ), -// pk: TYPES::SignatureKey, -// entry: ::StakeTableEntry, -// sk: ::PrivateKey, -// ) -> Self { -// let quorum_exchange = QUORUMEXCHANGE::create( -// entries.clone(), -// configs.0.clone(), -// networks.0.clone(), -// pk.clone(), -// entry.clone(), -// sk.clone(), -// ); -// #[allow(clippy::type_complexity)] -// let timeout_exchange: TimeoutExchange< -// TYPES, -// < as ExchangesType>::QuorumExchange as ConsensusExchange< -// TYPES, -// MESSAGE, -// >>::Membership, -// >::Networking, -// MESSAGE, -// > = TimeoutExchange::create( -// entries.clone(), -// configs.0.clone(), -// networks.0, -// pk.clone(), -// entry.clone(), -// sk.clone(), -// ); - -// let view_sync_exchange = VIEWSYNCEXCHANGE::create( -// entries.clone(), -// configs.0, -// networks.2, -// pk.clone(), -// entry.clone(), -// sk.clone(), -// ); - -// let committee_exchange = COMMITTEEEXCHANGE::create( -// entries.clone(), -// configs.1.clone(), -// networks.1, -// pk.clone(), -// entry.clone(), -// sk.clone(), -// ); - -// // RM TODO: figure out if this is the proper config -// // issue: https://github.com/EspressoSystems/HotShot/issues/1918 -// let vid_exchange = VIDEXCHANGE::create(entries, configs.1, networks.3, pk, entry, sk); - -// Self { -// quorum_exchange, -// committee_exchange, -// view_sync_exchange, -// vid_exchange, -// timeout_exchange, -// _phantom: PhantomData, -// } -// } - -// fn quorum_exchange(&self) -> &Self::QuorumExchange { -// &self.quorum_exchange -// } - -// fn view_sync_exchange(&self) -> &Self::ViewSyncExchange { -// &self.view_sync_exchange -// } - -// fn vid_exchange(&self) -> &Self::VIDExchange { -// &self.vid_exchange -// } - -// async fn wait_for_networks_ready(&self) { -// self.quorum_exchange.network().wait_for_ready().await; -// self.committee_exchange.network().wait_for_ready().await; -// } - -// async fn shut_down_networks(&self) { -// self.quorum_exchange.network().shut_down().await; -// self.committee_exchange.network().shut_down().await; -// } -// } - -// /// Alias for the [`QuorumExchange`] type. -// pub type QuorumEx = <>::Exchanges as ExchangesType< -// TYPES, -// Message, -// >>::QuorumExchange; - -// /// Alias for `TimeoutExchange` type -// pub type TimeoutEx = <>::Exchanges as ExchangesType< -// TYPES, -// Message, -// >>::TimeoutExchange; - -// /// Alias for the [`CommitteeExchange`] type. -// pub type CommitteeEx = <>::Exchanges as ExchangesType< -// TYPES, -// Message, -// >>::CommitteeExchange; - -// /// Alias for the [`VIDExchange`] type. -// pub type VIDEx = <>::Exchanges as ExchangesType< -// TYPES, -// Message, -// >>::VIDExchange; - -// /// Alias for the [`ViewSyncExchange`] type. -// pub type ViewSyncEx = <>::Exchanges as ExchangesType< -// TYPES, -// Message, -// >>::ViewSyncExchange; - /// extra functions required on a node implementation to be usable by hotshot-testing #[allow(clippy::type_complexity)] #[async_trait] @@ -537,7 +227,6 @@ where async fn get_full_state(storage: &Self::Storage) -> StorageState { >::get_full_state(storage).await } - #[allow(clippy::arc_with_non_send_sync)] fn gen_comm_channels( expected_node_count: usize, num_bootstrap: usize, @@ -574,46 +263,6 @@ where } } -// /// Communication channel for [`QuorumProposalType`] and [`QuorumVote`]. -// pub type QuorumCommChannel = -// as ConsensusExchange>>::Networking; - -// /// Communication channel for [`ViewSyncProposalType`] and [`ViewSyncVote`]. -// pub type ViewSyncCommChannel = -// as ConsensusExchange>>::Networking; - -// /// Communication channel for [`CommitteeProposalType`] and [`DAVote`]. -// pub type CommitteeCommChannel = -// as ConsensusExchange>>::Networking; - -// /// Protocol for determining membership in a consensus committee. -// pub type QuorumMembership = -// as ConsensusExchange>>::Membership; - -// /// TYPE aliase for the membership of VID exchange -// pub type VIDMembership = -// as ConsensusExchange>>::Membership; - -// /// Protocol for determining membership in a DA committee. -// pub type CommitteeMembership = -// as ConsensusExchange>>::Membership; - -// /// Protocol for determining membership in a view sync committee. -// pub type ViewSyncMembership = -// as ConsensusExchange>>::Membership; - -// /// Type for the underlying quorum `ConnectedNetwork` that will be shared (for now) b/t Communication Channels -// pub type QuorumNetwork = -// as CommunicationChannel>::NETWORK; - -// /// Type for the underlying committee `ConnectedNetwork` that will be shared (for now) b/t Communication Channels -// pub type CommitteeNetwork = -// as CommunicationChannel>::NETWORK; - -// /// Type for the underlying view sync `ConnectedNetwork` that will be shared (for now) b/t Communication Channels -// pub type ViewSyncNetwork = -// as CommunicationChannel>::NETWORK; - /// Trait with all the type definitions that are used in the current hotshot setup. pub trait NodeType: Clone From 3cd80ad964a6a6e72dc61398b9864d96ff376888 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 17 Nov 2023 10:25:07 -0800 Subject: [PATCH 0423/1393] increase view_sync_timeout --- hotshot/src/tasks/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 8db56a4f9f..b7c8c00f5f 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -520,7 +520,7 @@ where num_timeouts_tracked: 0, replica_task_map: HashMap::default(), relay_task_map: HashMap::default(), - view_sync_timeout: Duration::new(5, 0), + view_sync_timeout: Duration::new(30, 0), id: handle.hotshot.inner.id, last_garbage_collected_view: TYPES::Time::new(0), }; From 5b07c4d146d0f6bb493b0287a436e326140cdd71 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 17 Nov 2023 14:15:25 -0500 Subject: [PATCH 0424/1393] remove commented code line --- types/src/vote.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/types/src/vote.rs b/types/src/vote.rs index b37045f824..0d71f7f145 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -36,7 +36,6 @@ pub trait Vote: HasViewNumber { /// Gets the public signature key of the votes creator/sender fn get_signing_key(&self) -> TYPES::SignatureKey; - // fn create_signed_vote(Self::Commitment, Self::Membership) ?? } /// Any type that is associated with a view From dac1157cb96147dbf72dde39054eadca7b89a812 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 17 Nov 2023 14:34:34 -0500 Subject: [PATCH 0425/1393] fix lint/build --- hotshot/examples/combined/types.rs | 3 +-- hotshot/examples/infra/mod.rs | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/hotshot/examples/combined/types.rs b/hotshot/examples/combined/types.rs index f08b47c9c2..c68d60396b 100644 --- a/hotshot/examples/combined/types.rs +++ b/hotshot/examples/combined/types.rs @@ -4,7 +4,7 @@ use hotshot::{ traits::implementations::{CombinedCommChannel, MemoryStorage}, }; use hotshot_types::{ - message::{Message, SequencingMessage}, + message::Message, traits::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, @@ -34,7 +34,6 @@ impl NodeImplementation for NodeImpl { ViewSyncExchange>, VIDExchange>, >; - type ConsensusMessage = SequencingMessage; fn new_channel_maps( start_view: ::Time, diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 6172d0e7a7..d58c484b09 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -848,7 +848,6 @@ impl< >, >, Storage = MemoryStorage, - ConsensusMessage = SequencingMessage, >, > RunDA< From 1f3f59d43a4bf91df116bdddfae2f54cb3cd5ab6 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 17 Nov 2023 15:13:54 -0500 Subject: [PATCH 0426/1393] fix build after merge --- hotshot/examples/combined/types.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/hotshot/examples/combined/types.rs b/hotshot/examples/combined/types.rs index 64f41e28de..fdad905045 100644 --- a/hotshot/examples/combined/types.rs +++ b/hotshot/examples/combined/types.rs @@ -9,7 +9,6 @@ use hotshot_types::{ election::{CommitteeExchange, QuorumExchange, VIDExchange, ViewSyncExchange}, node_implementation::{ChannelMaps, Exchanges, NodeImplementation, NodeType}, }, - vote::ViewSyncVote, }; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -22,8 +21,6 @@ pub type VIDNetwork = CombinedCommChannel; pub type QuorumNetwork = CombinedCommChannel; pub type ViewSyncNetwork = CombinedCommChannel; -pub type ThisViewSyncVote = ViewSyncVote; - impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type Exchanges = Exchanges< From afc2ab4fe63dd42a68e0fd157ac23091d7cb6184 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 17 Nov 2023 14:28:20 -0800 Subject: [PATCH 0427/1393] Increase the duration to get the expected number of successful views. --- testing/src/test_builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 5b4f33b63d..3745e6c012 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -139,7 +139,7 @@ impl TestMetadata { completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // Increase the duration to get the expected number of successful views. - duration: Duration::new(200, 0), + duration: Duration::new(250, 0), }, ), overall_safety_properties: OverallSafetyPropertiesDescription { From 4c27ea0d9ae60e29345dde737502adb7c715785f Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 17 Nov 2023 15:24:36 -0800 Subject: [PATCH 0428/1393] try more completion time --- testing/src/test_builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 3745e6c012..ec52a53df6 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -139,7 +139,7 @@ impl TestMetadata { completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // Increase the duration to get the expected number of successful views. - duration: Duration::new(250, 0), + duration: Duration::new(300, 0), }, ), overall_safety_properties: OverallSafetyPropertiesDescription { From 91ef1e520d959b4a78b4f155ca26b80bff0d7ec4 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 17 Nov 2023 16:09:05 -0800 Subject: [PATCH 0429/1393] Replace payload commitment with VID commitment, update DA proposal --- hotshot/src/lib.rs | 10 ++- hotshot/src/tasks/mod.rs | 20 +++-- hotshot/src/traits/storage/memory_storage.rs | 2 +- task-impls/src/consensus.rs | 18 ++-- task-impls/src/da.rs | 58 ++++++------- task-impls/src/events.rs | 7 +- task-impls/src/transactions.rs | 16 ++-- task-impls/src/vid.rs | 24 ++---- testing/src/overall_safety_task.rs | 20 ++--- testing/src/task_helpers.rs | 3 +- testing/tests/da_task.rs | 11 +-- testing/tests/network_task.rs | 12 +-- testing/tests/vid_task.rs | 12 +-- types/src/block_impl.rs | 51 ++---------- types/src/consensus.rs | 29 ++++--- types/src/data.rs | 87 ++++++++++++++------ types/src/error.rs | 16 ++-- types/src/simple_certificate.rs | 6 +- types/src/simple_vote.rs | 18 ++-- types/src/traits/block_contents.rs | 40 +++++---- types/src/traits/election.rs | 39 +++------ types/src/utils.rs | 15 ++-- 22 files changed, 255 insertions(+), 259 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 180714119a..41433c73be 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -57,7 +57,7 @@ use hotshot_types::{ use hotshot_types::{ consensus::{BlockPayloadStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, - data::Leaf, + data::{Leaf, VidCommitment}, error::StorageSnafu, message::{ ConsensusMessageType, DataMessage, InternalTrigger, Message, MessageKind, @@ -193,7 +193,9 @@ impl> SystemContext { let mut saved_block_payloads = BlockPayloadStore::default(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); if let Some(payload) = anchored_leaf.get_block_payload() { - saved_block_payloads.insert(payload); + if let Err(e) = saved_block_payloads.insert(payload) { + return Err(HotShotError::BlockError { source: e }); + } } let start_view = anchored_leaf.get_view_number(); @@ -625,7 +627,7 @@ where CommitteeEx: ConsensusExchange< TYPES, Message, - Commitment = Commitment, + Commitment = VidCommitment, Membership = MEMBERSHIP, > + 'static, ViewSyncEx: @@ -633,7 +635,7 @@ where VIDEx: ConsensusExchange< TYPES, Message, - Commitment = Commitment, + Commitment = VidCommitment, Membership = MEMBERSHIP, > + 'static, TimeoutEx: ConsensusExchange< diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 092032068a..23a6601920 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -2,7 +2,7 @@ use crate::{async_spawn, types::SystemContextHandle, HotShotConsensusApi}; use async_compatibility_layer::art::async_sleep; -use commit::{Commitment, Committable}; +use commit::Commitment; use futures::FutureExt; use hotshot_task::{ boxed_sync, @@ -25,10 +25,11 @@ use hotshot_task_impls::{ view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; use hotshot_types::{ - data::Leaf, + data::{Leaf, VidCommitment}, event::Event, message::{Message, Messages, SequencingMessage}, traits::{ + block_contents::vid_commitment, election::{ConsensusExchange, Membership, ViewSyncExchangeType}, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{ @@ -232,8 +233,7 @@ pub async fn add_consensus_task< where QuorumEx: ConsensusExchange, Commitment = Commitment>>, - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange, Commitment = VidCommitment>, TimeoutEx: ConsensusExchange, Commitment = Commitment>, { @@ -249,7 +249,11 @@ where consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), - payload_commitment_and_metadata: Some((payload.commit(), metadata)), + payload_commitment_and_metadata: Some(( + // Encoding a genesis payload should not fail. + vid_commitment(payload.encode().unwrap().collect()), + metadata, + )), quorum_exchange: c_api.inner.exchanges.quorum_exchange().clone().into(), timeout_exchange: c_api.inner.exchanges.timeout_exchange().clone().into(), api: c_api.clone(), @@ -323,8 +327,7 @@ pub async fn add_vid_task< handle: SystemContextHandle, ) -> TaskRunner where - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange, Commitment = VidCommitment>, { // build the vid task let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -385,8 +388,7 @@ pub async fn add_da_task< handle: SystemContextHandle, ) -> TaskRunner where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange, Commitment = VidCommitment>, { // build the da task let c_api: HotShotConsensusApi = HotShotConsensusApi { diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 9c7fd14d4c..95a5fc8fcf 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -151,7 +151,7 @@ mod test { let payload = VIDBlockPayload::genesis(); let header = VIDBlockHeader { block_number: 0, - payload_commitment: payload.commit(), + payload_commitment: payload.payload_commitment, }; let dummy_leaf_commit = fake_commitment::>(); let data = hotshot_types::simple_vote::QuorumData { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 4be8ec398e..7f4460e9e2 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -17,7 +17,7 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::{Consensus, View}, - data::{Leaf, QuorumProposal}, + data::{Leaf, QuorumProposal, VidCommitment}, event::{Event, EventType}, message::{GeneralConsensusMessage, Message, Proposal, SequencingMessage}, simple_certificate::{ @@ -56,7 +56,7 @@ use tracing::{debug, error, info, instrument}; pub struct ConsensusTaskError {} /// Alias for the block payload commitment and the associated metadata. -type CommitmentAndMetadata = (Commitment, ::Metadata); +type CommitmentAndMetadata = (VidCommitment, ::Metadata); /// The state for the consensus task. Contains all of the information for the implementation /// of consensus @@ -67,8 +67,7 @@ pub struct ConsensusTaskState< > where QuorumEx: ConsensusExchange, Commitment = Commitment>>, - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange, Commitment = VidCommitment>, TimeoutEx: ConsensusExchange, Commitment = Commitment>, { @@ -304,8 +303,7 @@ impl< where QuorumEx: ConsensusExchange, Commitment = Commitment>>, - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange, Commitment = VidCommitment>, TimeoutEx: ConsensusExchange, Commitment = Commitment>, { @@ -786,7 +784,7 @@ where .saved_block_payloads .get(leaf.get_payload_commitment()) { - if let Err(e) = leaf.fill_block_payload(payload.clone()) { + if let Err(e) = leaf.fill_block_payload(payload) { error!( "Saved block payload and commitment don't match: {:?}", e @@ -1332,8 +1330,7 @@ impl< where QuorumEx: ConsensusExchange, Commitment = Commitment>>, - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange, Commitment = VidCommitment>, TimeoutEx: ConsensusExchange, Commitment = Commitment>, { @@ -1370,8 +1367,7 @@ pub async fn sequencing_consensus_handle< where QuorumEx: ConsensusExchange, Commitment = Commitment>>, - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange, Commitment = VidCommitment>, TimeoutEx: ConsensusExchange, Commitment = Commitment>, { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 78f2cdc79b..78bab6240f 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -3,7 +3,6 @@ use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use bitvec::prelude::*; -use commit::{Commitment, Committable}; use either::{Either, Left, Right}; use futures::FutureExt; use hotshot_task::{ @@ -14,17 +13,17 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::{Consensus, View}, - data::DAProposal, + data::{DAProposal, VidCommitment}, message::{Message, Proposal, SequencingMessage}, simple_vote::{DAData, DAVote2}, traits::{ + block_contents::{vid_commitment, BlockPayload}, consensus_api::ConsensusApi, election::{CommitteeExchangeType, ConsensusExchange, Membership}, network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{CommitteeEx, NodeImplementation, NodeType}, signature_key::SignatureKey, state::ConsensusTime, - BlockPayload, }, utils::ViewInner, vote2::HasViewNumber, @@ -48,8 +47,7 @@ pub struct DATaskState< I: NodeImplementation>, A: ConsensusApi + 'static, > where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange, Commitment = VidCommitment>, { /// The state's api pub api: A, @@ -78,8 +76,7 @@ pub struct DATaskState< /// Struct to maintain DA Vote Collection task state pub struct DAVoteCollectionTaskState> where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange, Commitment = VidCommitment>, { /// the committee exchange pub committee_exchange: Arc>, @@ -102,8 +99,7 @@ where } impl> TS for DAVoteCollectionTaskState where - CommitteeEx: - ConsensusExchange, Commitment = Commitment> + CommitteeEx: ConsensusExchange, Commitment = VidCommitment> { } @@ -116,8 +112,7 @@ async fn vote_handle>( DAVoteCollectionTaskState, ) where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange, Commitment = VidCommitment>, { match event { HotShotEvent::DAVoteRecv(vote) => { @@ -175,8 +170,7 @@ impl< A: ConsensusApi + 'static, > DATaskState where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange, Commitment = VidCommitment>, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] @@ -213,16 +207,18 @@ where return None; } - debug!( - "Got a DA block with {} transactions!", - proposal.data.block_payload.transaction_commitments().len() - ); - let payload_commitment = proposal.data.block_payload.commit(); + // TODO (Keyao) + // debug!( + // "Got a DA block with {} transactions!", + // proposal.data.block_payload.transaction_commitments().len() + // ); + let payload_commitment = vid_commitment(proposal.data.encoded_transactions.clone()); // ED Is this the right leader? let view_leader_key = self.committee_exchange.get_leader(view); if view_leader_key != sender { - error!("DA proposal doesn't have expected leader key for view {} \n DA proposal is: {:?}", *view, proposal.data.clone()); + // TODO (Keyao) + // error!("DA proposal doesn't have expected leader key for view {} \n DA proposal is: {:?}", *view, proposal.data.clone()); return None; } @@ -265,15 +261,19 @@ where // there is already a view there: the replica task may have inserted a `Leaf` view which // contains strictly more information. consensus.state_map.entry(view).or_insert(View { - view_inner: ViewInner::DA { - block: payload_commitment, - }, + view_inner: ViewInner::DA { payload_commitment }, }); // Record the block payload we have promised to make available. - consensus + if let Err(e) = consensus .saved_block_payloads - .insert(proposal.data.block_payload); + .insert(BlockPayload::from_bytes( + proposal.data.encoded_transactions.into_iter(), + proposal.data.metadata, + )) + { + error!("Failed to build the block payload: {:?}.", e); + } } HotShotEvent::DAVoteRecv(vote) => { debug!("DA vote recv, Main Task {:?}", vote.get_view_number(),); @@ -394,20 +394,21 @@ where return None; } - HotShotEvent::BlockReady(payload, metadata, view) => { + HotShotEvent::BlockReady(encoded_transactions, metadata, view) => { self.committee_exchange .network() .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) .await; - let payload_commitment = payload.commit(); + let payload_commitment = vid_commitment(encoded_transactions.clone()); let signature = self .committee_exchange .sign_da_proposal(&payload_commitment); // TODO (Keyao) Fix the payload sending and receiving for the DA proposal. // let data: DAProposal = DAProposal { - block_payload: payload.clone(), + encoded_transactions, + metadata: metadata.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? view_number: view, }; @@ -472,8 +473,7 @@ impl< A: ConsensusApi + 'static, > TS for DATaskState where - CommitteeEx: - ConsensusExchange, Commitment = Commitment>, + CommitteeEx: ConsensusExchange, Commitment = VidCommitment>, { } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 3a306f63db..181f01a958 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,9 +1,8 @@ use crate::view_sync::ViewSyncPhase; -use commit::Commitment; use either::Either; use hotshot_types::{ - data::{DAProposal, Leaf, QuorumProposal, VidDisperse}, + data::{DAProposal, Leaf, QuorumProposal, VidCommitment, VidDisperse}, message::Proposal, simple_certificate::{ DACertificate2, QuorumCertificate2, TimeoutCertificate2, VIDCertificate2, @@ -96,12 +95,12 @@ pub enum HotShotEvent> { TransactionSend(TYPES::Transaction, TYPES::SignatureKey), /// Event to send block payload commitment and metadata from DA leader to the quorum; internal event only SendPayloadCommitmentAndMetadata( - Commitment, + VidCommitment, ::Metadata, ), /// Event when the transactions task has a block formed BlockReady( - TYPES::BlockPayload, + Vec, ::Metadata, TYPES::Time, ), diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 6eb195ccad..85d37ada09 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -13,11 +13,11 @@ use hotshot_task::{ task_impls::HSTWithEvent, }; use hotshot_types::{ - block_impl::{NUM_CHUNKS, NUM_STORAGE_NODES}, consensus::Consensus, data::{test_srs, Leaf, VidDisperse, VidScheme, VidSchemeTrait}, message::{Message, Proposal, SequencingMessage}, traits::{ + block_contents::{NUM_CHUNKS, NUM_STORAGE_NODES}, consensus_api::ConsensusApi, election::{ConsensusExchange, QuorumExchangeType}, node_implementation::{NodeImplementation, NodeType, QuorumEx}, @@ -221,8 +221,8 @@ where return None; } }; - let encoded_txns = match payload.encode() { - Ok(encoded) => encoded, + let encoded_transactions = match payload.encode() { + Ok(encoded) => encoded.into_iter().collect::>(), Err(e) => { error!("Failed to encode the block payload: {:?}.", e); return None; @@ -234,15 +234,13 @@ where // changes. // TODO let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - let vid_disperse = vid - .disperse(encoded_txns.into_iter().collect::>()) - .unwrap(); + let vid_disperse = vid.disperse(encoded_transactions.clone()).unwrap(); // TODO never clone a block // https://github.com/EspressoSystems/HotShot/issues/1858 self.event_stream .publish(HotShotEvent::BlockReady( - payload.clone(), + encoded_transactions.into_iter().collect::>(), metadata, view + 1, )) @@ -256,14 +254,14 @@ where Proposal { data: VidDisperse { view_number: view + 1, - payload_commitment: payload.commit(), + payload_commitment: vid_disperse.commit, shares: vid_disperse.shares, common: vid_disperse.common, }, // TODO (Keyao) This is also signed in DA task. signature: self .quorum_exchange - .sign_payload_commitment(payload.commit()), + .sign_payload_commitment(vid_disperse.commit), _pd: PhantomData, }, self.quorum_exchange.public_key().clone(), diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 888f131738..3469c322e8 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -3,7 +3,6 @@ use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use bitvec::prelude::*; -use commit::Commitment; use either::{Either, Left, Right}; use futures::FutureExt; use hotshot_task::{ @@ -15,6 +14,7 @@ use hotshot_task::{ use hotshot_types::traits::{network::ConsensusIntentEvent, node_implementation::VIDMembership}; use hotshot_types::{ consensus::{Consensus, View}, + data::VidCommitment, message::{Message, SequencingMessage}, traits::{ consensus_api::ConsensusApi, @@ -47,8 +47,7 @@ pub struct VIDTaskState< I: NodeImplementation>, A: ConsensusApi + 'static, > where - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange, Commitment = VidCommitment>, { /// The state's api pub api: A, @@ -77,8 +76,7 @@ pub struct VIDTaskState< /// Struct to maintain VID Vote Collection task state pub struct VIDVoteCollectionTaskState> where - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange, Commitment = VidCommitment>, { /// the vid exchange pub vid_exchange: Arc>, @@ -97,8 +95,7 @@ where } impl> TS for VIDVoteCollectionTaskState where - VIDEx: - ConsensusExchange, Commitment = Commitment> + VIDEx: ConsensusExchange, Commitment = VidCommitment> { } @@ -113,8 +110,7 @@ async fn vote_handle( where TYPES: NodeType, I: NodeImplementation, - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange, Commitment = VidCommitment>, { match event { HotShotEvent::VidVoteRecv(vote) => { @@ -173,8 +169,7 @@ impl< A: ConsensusApi + 'static, > VIDTaskState where - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange, Commitment = VidCommitment>, { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] @@ -338,9 +333,7 @@ where // there is already a view there: the replica task may have inserted a `Leaf` view which // contains strictly more information. consensus.state_map.entry(view).or_insert(View { - view_inner: ViewInner::DA { - block: payload_commitment, - }, + view_inner: ViewInner::DA { payload_commitment }, }); // Record the block we have promised to make available. @@ -427,8 +420,7 @@ impl< A: ConsensusApi + 'static, > TS for VIDTaskState where - VIDEx: - ConsensusExchange, Commitment = Commitment>, + VIDEx: ConsensusExchange, Commitment = VidCommitment>, { } diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 27091c5493..dd51462207 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -1,13 +1,5 @@ -use commit::Commitment; -use either::Either; -use hotshot_task::{event_stream::EventStream, Merge}; -use hotshot_task_impls::events::HotShotEvent; -use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, - sync::Arc, -}; - use async_compatibility_layer::channel::UnboundedStream; +use either::Either; use futures::FutureExt; use hotshot::{traits::TestableNodeImplementation, HotShotError}; use hotshot_task::{ @@ -16,14 +8,20 @@ use hotshot_task::{ task_impls::{HSTWithEventAndMessage, TaskBuilder}, MergeN, }; +use hotshot_task::{event_stream::EventStream, Merge}; +use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ - data::Leaf, + data::{Leaf, VidCommitment}, error::RoundTimedoutState, event::{Event, EventType}, simple_certificate::QuorumCertificate2, traits::node_implementation::NodeType, }; use snafu::Snafu; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + sync::Arc, +}; use crate::{test_launcher::TaskGenerator, test_runner::Node}; pub type StateAndBlock = (Vec, Vec); @@ -106,7 +104,7 @@ pub struct RoundResult { pub leaf_map: HashMap, usize>, /// block -> # entries decided on that block - pub block_map: HashMap, usize>, + pub block_map: HashMap, /// state -> # entries decided on that state pub state_map: HashMap<(), usize>, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 10668afe8d..d13d19dec5 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -13,13 +13,14 @@ use hotshot::{ use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ - block_impl::{VIDBlockHeader, VIDBlockPayload, NUM_CHUNKS, NUM_STORAGE_NODES}, + block_impl::{VIDBlockHeader, VIDBlockPayload}, consensus::ConsensusMetricsValue, data::{Leaf, QuorumProposal, VidScheme, ViewNumber}, message::{Message, Proposal}, simple_certificate::QuorumCertificate2, traits::{ block_contents::BlockHeader, + block_contents::{NUM_CHUNKS, NUM_STORAGE_NODES}, consensus_api::ConsensusSharedApi, election::{ConsensusExchange, Membership}, node_implementation::{CommitteeEx, ExchangesType, NodeType, QuorumEx}, diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 368b36a317..6d3fb82118 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -3,6 +3,7 @@ use hotshot::HotShotConsensusApi; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; use hotshot_types::{ + block_impl::vid_commitment, block_impl::VIDTransaction, data::{DAProposal, ViewNumber}, simple_vote::{DAData, DAVote2}, @@ -37,8 +38,8 @@ async fn test_da_task() { let committee_exchange = api.inner.exchanges.committee_exchange().clone(); let pub_key = *api.public_key(); let transactions = vec![VIDTransaction(vec![0])]; - let encoded_txns = VIDTransaction::encode(transactions.clone()).unwrap(); - let payload_commitment = VIDBlockPayload::vid_commitment(&encoded_txns); + let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); + let payload_commitment = vid_commitment(&encoded_transactions); let block = VIDBlockPayload { transactions, payload_commitment, @@ -46,7 +47,7 @@ async fn test_da_task() { let signature = committee_exchange.sign_da_proposal(&block.commit()); let proposal = DAProposal { - block_payload: block.clone(), + encoded_transactions, view_number: ViewNumber::new(2), }; let message = Proposal { @@ -66,7 +67,7 @@ async fn test_da_task() { input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); input.push(HotShotEvent::BlockReady( - block.clone(), + encoded_transactions, (), ViewNumber::new(2), )); @@ -76,7 +77,7 @@ async fn test_da_task() { output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); output.insert( - HotShotEvent::BlockReady(block.clone(), (), ViewNumber::new(2)), + HotShotEvent::BlockReady(encoded_transactions, (), ViewNumber::new(2)), 1, ); output.insert( diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index f374be0649..8b52b7b484 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -6,7 +6,7 @@ use hotshot_testing::{ task_helpers::{build_quorum_proposal, vid_init}, }; use hotshot_types::{ - data::{DAProposal, VidSchemeTrait, ViewNumber}, + data::{DAProposal, ViewNumber}, traits::{ consensus_api::ConsensusSharedApi, node_implementation::ExchangesType, state::ConsensusTime, }, @@ -43,8 +43,8 @@ async fn test_network_task() { let priv_key = api.private_key(); let vid = vid_init(); let transactions = vec![VIDTransaction(vec![0])]; - let encoded_txns = VIDTransaction::encode(transactions.clone()).unwrap(); - let vid_disperse = vid.disperse(&encoded_txns).unwrap(); + let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); + let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; let block = VIDBlockPayload { transactions, @@ -53,7 +53,7 @@ async fn test_network_task() { let signature = committee_exchange.sign_da_proposal(&block.commit()); let da_proposal = Proposal { data: DAProposal { - block_payload: block.clone(), + encoded_transactions, view_number: ViewNumber::new(2), }, signature, @@ -79,7 +79,7 @@ async fn test_network_task() { input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::BlockReady( - block.clone(), + encoded_transactions, (), ViewNumber::new(2), )); @@ -101,7 +101,7 @@ async fn test_network_task() { 2, // 2 occurrences: 1 from `input`, 1 from the DA task ); output.insert( - HotShotEvent::BlockReady(block.clone(), (), ViewNumber::new(2)), + HotShotEvent::BlockReady(encoded_transactions, (), ViewNumber::new(2)), 2, ); output.insert( diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 167881f508..7ac0969c7e 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -7,7 +7,7 @@ use hotshot_testing::{ }; use hotshot_types::{ block_impl::VIDTransaction, - data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, + data::{DAProposal, VidDisperse, ViewNumber}, traits::{ consensus_api::ConsensusSharedApi, election::ConsensusExchange, node_implementation::ExchangesType, state::ConsensusTime, @@ -40,8 +40,8 @@ async fn test_vid_task() { let vid = vid_init(); let transactions = vec![VIDTransaction(vec![0])]; - let encoded_txns = VIDTransaction::encode(transactions.clone()).unwrap(); - let vid_disperse = vid.disperse(&encoded_txns).unwrap(); + let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); + let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; let block = VIDBlockPayload { transactions, @@ -50,7 +50,7 @@ async fn test_vid_task() { let signature = vid_exchange.sign_vid_disperse(&block.commit()); let proposal: DAProposal = DAProposal { - block_payload: block.clone(), + encoded_transactions, view_number: ViewNumber::new(2), }; let message = Proposal { @@ -77,7 +77,7 @@ async fn test_vid_task() { input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); input.push(HotShotEvent::BlockReady( - block.clone(), + encoded_transactions, (), ViewNumber::new(2), )); @@ -87,7 +87,7 @@ async fn test_vid_task() { output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); output.insert( - HotShotEvent::BlockReady(block.clone(), (), ViewNumber::new(2)), + HotShotEvent::BlockReady(encoded_transactions, (), ViewNumber::new(2)), 1, ); diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 8cca78fa1c..8c48024914 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -7,25 +7,17 @@ use std::{ }; use crate::{ - data::{test_srs, VidScheme, VidSchemeTrait}, + data::{VidCommitment, VidScheme, VidSchemeTrait}, traits::{ - block_contents::{BlockError, BlockHeader, Transaction}, + block_contents::{vid_commitment, BlockError, BlockHeader, Transaction}, state::TestableBlock, BlockPayload, }, }; -use ark_serialize::CanonicalDeserialize; use commit::{Commitment, Committable}; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; -// TODO -/// Number of storage nodes for VID initiation. -pub const NUM_STORAGE_NODES: usize = 8; -// TODO -/// Number of chunks for VID initiation. -pub const NUM_CHUNKS: usize = 8; - /// The transaction in a [`VIDBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct VIDTransaction(pub Vec); @@ -83,20 +75,6 @@ pub struct VIDBlockPayload { } impl VIDBlockPayload { - #[must_use] - /// Compute the VID payload commitment. - /// # Panics - /// If the VID computation fails. - pub fn vid_commitment(encoded_transactions: &[u8]) -> ::Commit { - // TODO - let srs = test_srs(NUM_STORAGE_NODES); - // TODO We are using constant numbers for now, but they will change as the quorum size - // changes. - // TODO - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, srs).unwrap(); - vid.disperse(encoded_transactions).unwrap().commit - } - /// Create a genesis block payload with transaction bytes `vec![0]`, to be used for /// consensus task initiation. /// # Panics @@ -108,22 +86,11 @@ impl VIDBlockPayload { let encoded = VIDTransaction::encode(vec![VIDTransaction(txns.clone())]).unwrap(); VIDBlockPayload { transactions: vec![VIDTransaction(txns)], - payload_commitment: Self::vid_commitment(&encoded), + payload_commitment: vid_commitment(encoded), } } } -impl Committable for VIDBlockPayload { - fn commit(&self) -> Commitment { - as CanonicalDeserialize>::deserialize(&*self.payload_commitment) - .expect("conversion from VidScheme::Commit to Commitment should succeed") - } - - fn tag() -> String { - "VID_BLOCK_PAYLOAD".to_string() - } -} - impl Display for VIDBlockPayload { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "BlockPayload #txns={}", self.transactions.len()) @@ -154,7 +121,7 @@ impl BlockPayload for VIDBlockPayload { Ok(( Self { transactions: txns_vec, - payload_commitment: Self::vid_commitment(&encoded), + payload_commitment: vid_commitment(encoded), }, (), )) @@ -184,7 +151,7 @@ impl BlockPayload for VIDBlockPayload { Self { transactions, - payload_commitment: Self::vid_commitment(&encoded_vec), + payload_commitment: vid_commitment(encoded_vec), } } @@ -210,14 +177,14 @@ pub struct VIDBlockHeader { /// Block number. pub block_number: u64, /// VID commitment to the payload. - pub payload_commitment: Commitment, + pub payload_commitment: VidCommitment, } impl BlockHeader for VIDBlockHeader { type Payload = VIDBlockPayload; fn new( - payload_commitment: Commitment, + payload_commitment: VidCommitment, _metadata: ::Metadata, parent_header: &Self, ) -> Self { @@ -236,7 +203,7 @@ impl BlockHeader for VIDBlockHeader { ( Self { block_number: 0, - payload_commitment: payload.commit(), + payload_commitment: payload.payload_commitment, }, payload, metadata, @@ -247,7 +214,7 @@ impl BlockHeader for VIDBlockHeader { self.block_number } - fn payload_commitment(&self) -> Commitment { + fn payload_commitment(&self) -> VidCommitment { self.payload_commitment } } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 6af690228d..ac5c286f51 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -7,10 +7,11 @@ pub use crate::{ use displaydoc::Display; use crate::{ - data::Leaf, + data::{Leaf, VidCommitment}, error::HotShotError, simple_certificate::QuorumCertificate2, traits::{ + block_contents::vid_commitment, metrics::{Counter, Gauge, Histogram, Label, Metrics}, node_implementation::NodeType, BlockPayload, @@ -360,10 +361,10 @@ impl Consensus { /// before all but one branch are ultimately garbage collected. #[derive(Clone, Debug, Derivative)] #[derivative(Default(bound = ""))] -pub struct BlockPayloadStore(HashMap, (PAYLOAD, u64)>); +pub struct BlockPayloadStore(HashMap); impl BlockPayloadStore { - /// Save payload commitment for later retrieval. + /// Save the payload commitment for later retrieval. /// /// After calling this function, and before the corresponding call to [`remove`](Self::remove), /// `self.get(payload_commitment)` will return `Some(payload)`. @@ -372,28 +373,32 @@ impl BlockPayloadStore { /// multiple calls to [`insert`](Self::insert) for the same payload commitment result in /// multiple owning references to the payload commitment. [`remove`](Self::remove) must be /// called once for each reference before the payload commitment will be deallocated. - pub fn insert(&mut self, payload: PAYLOAD) { + /// + /// # Errors + /// If the transaction length conversion fails. + pub fn insert(&mut self, payload: PAYLOAD) -> Result<(), PAYLOAD::Error> { self.0 - .entry(payload.commit()) + .entry(vid_commitment(payload.clone().encode()?.collect())) .and_modify(|(_, refcount)| *refcount += 1) .or_insert((payload, 1)); + Ok(()) } - /// Get a saved set of transaction commitments, if available. + /// Get a saved block payload, if available. /// - /// If a set of transaction commitments has been saved with [`insert`](Self::insert), this - /// function will retrieve it. It may return [`None`] if a block with the given commitment has - /// not been saved or if the block has been dropped with [`remove`](Self::remove). + /// If a payload has been saved with [`insert`](Self::insert), this function will retrieve it. + /// It may return [`None`] if a block with the given commitment has not been saved or if the + /// block has been dropped with [`remove`](Self::remove). #[must_use] - pub fn get(&self, payload_commitment: Commitment) -> Option<&PAYLOAD> { + pub fn get(&self, payload_commitment: VidCommitment) -> Option<&PAYLOAD> { self.0.get(&payload_commitment).map(|(payload, _)| payload) } - /// Drop a reference to a saved set of transaction commitments. + /// Drop a reference to a saved block payload. /// /// If the set exists and this call drops the last reference to it, the set will be returned, /// Otherwise, the return value is [`None`]. - pub fn remove(&mut self, payload_commitment: Commitment) -> Option { + pub fn remove(&mut self, payload_commitment: VidCommitment) -> Option { if let Entry::Occupied(mut e) = self.0.entry(payload_commitment) { let (_, refcount) = e.get_mut(); *refcount -= 1; diff --git a/types/src/data.rs b/types/src/data.rs index fa49893b10..987a308e85 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -19,10 +19,11 @@ use crate::{ use ark_bls12_381::Bls12_381; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; use bincode::Options; -use commit::{Commitment, Committable}; +use commit::{Commitment, Committable, RawCommitmentBuilder}; use derivative::Derivative; use hotshot_constants::GENESIS_PROPOSER_ID; use hotshot_utils::bincode::bincode_opts; +// use jf_primitives::pcs::prelude::Commitment; use jf_primitives::pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; @@ -66,7 +67,7 @@ impl ConsensusTime for ViewNumber { impl Committable for ViewNumber { fn commit(&self) -> Commitment { - let builder = commit::RawCommitmentBuilder::new("View Number Commitment"); + let builder = RawCommitmentBuilder::new("View Number Commitment"); builder.u64(self.0).finalize() } } @@ -114,8 +115,10 @@ pub type TxnCommitment = Commitment>; /// A proposal to start providing data availability for a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct DAProposal { - /// BlockPayload leaf wants to apply - pub block_payload: TYPES::BlockPayload, + /// Encoded transactions in the block to be applied. + pub encoded_transactions: Vec, + /// Metadata of the block to be applied. + pub metadata: ::Metadata, /// View this proposal applies to pub view_number: TYPES::Time, } @@ -123,6 +126,30 @@ pub struct DAProposal { /// The VID scheme type used in `HotShot`. pub type VidScheme = jf_primitives::vid::advz::Advz; pub use jf_primitives::vid::VidScheme as VidSchemeTrait; +/// VID commitment. +pub type VidCommitment = ::Commit; +// pub type VidCommitment = Commitment<::Commit>; +// struct DummyVidCommitment; +// impl Committable for DummyVidCommitment { +// fn commit(&self) -> Commitment { +// Commitment([0u8; 32], PhantomData) +// } + +// fn tag() -> String { +// "DUMMY_TAG".to_string() +// } +// } + +// impl Committable for VidCommit { +// fn commit(&self) -> Commitment { +// as CanonicalDeserialize>::deserialize(self) +// .expect("conversion from VidScheme::Commit to Commitment should succeed") +// } + +// fn tag() -> String { +// "VID_COMMIT".to_string() +// } +// } /// VID dispersal data /// @@ -132,7 +159,7 @@ pub struct VidDisperse { /// The view number for which this VID data is intended pub view_number: TYPES::Time, /// Block payload commitment - pub payload_commitment: Commitment, + pub payload_commitment: VidCommitment, /// VID shares dispersed among storage nodes pub shares: Vec<::Share>, /// VID common data sent to all storage nodes @@ -212,14 +239,14 @@ impl HasViewNumber for ViewSyncCertificate { /// full. It is guaranteed to contain, at least, a cryptographic commitment to the block, and it /// provides an interface for resolving the commitment to a full block if the full block is /// available. -pub trait DeltasType: +pub trait DeltasType: Clone + Debug + for<'a> Deserialize<'a> + PartialEq + Eq + std::hash::Hash + Send + Serialize + Sync { /// Errors reported by this type. type Error: std::error::Error; /// Get a cryptographic commitment to the block represented by this delta. - fn payload_commitment(&self) -> Commitment; + fn payload_commitment(&self) -> VidCommitment; /// Get the full block if it is available, otherwise return this object unchanged. /// @@ -227,7 +254,7 @@ pub trait DeltasType: /// /// Returns the original [`DeltasType`], unchanged, in an [`Err`] variant in the case where the /// full block is not currently available. - fn try_resolve(self) -> Result; + fn try_resolve(self) -> Result; /// Fill this [`DeltasType`] by providing a complete block. /// @@ -238,18 +265,23 @@ pub trait DeltasType: /// /// Fails if `block` does not match `self.payload_commitment()`, or if the block is not able to be /// stored for some implementation-defined reason. - fn fill(&mut self, block: BlockPayload) -> Result<(), Self::Error>; + fn fill(&mut self, block: PAYLOAD) -> Result<(), Self::Error>; } /// Error which occurs when [`Leaf::fill_block_payload`] is called with a payload commitment /// that does not match the internal payload commitment of the leaf. #[derive(Clone, Copy, Debug, Snafu)] -#[snafu(display("the block payload {:?} has commitment {} (expected {})", payload, payload.commit(), commitment))] +#[snafu(display( + "the block payload {:?} has commitment (expected )", + payload, + // vid_commitment(payload), + // commitment +))] pub struct InconsistentPayloadCommitmentError { /// The block payload with the wrong commitment. payload: PAYLOAD, - /// The expected commitment. - commitment: Commitment, + // /// The expected commitment. + // commitment: VidCommitment, } /// Additional functions required to use a [`Leaf`] with hotshot-testing. @@ -378,16 +410,17 @@ impl Leaf { /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()`. pub fn fill_block_payload( &mut self, - block_payload: TYPES::BlockPayload, + _block_payload: &TYPES::BlockPayload, ) -> Result<(), InconsistentPayloadCommitmentError> { - if block_payload.commit() != self.block_header.payload_commitment() { - return Err(InconsistentPayloadCommitmentError { - payload: block_payload, - commitment: self.block_header.payload_commitment(), - }); - } - self.block_payload = Some(block_payload); - Ok(()) + unimplemented!("TODO Keyao"); + // if block_payload.commit() != self.block_header.payload_commitment() { + // return Err(InconsistentPayloadCommitmentError { + // payload: block_payload, + // // commitment: self.block_header.payload_commitment(), + // }); + // } + // self.block_payload = Some(block_payload); + // Ok(()) } /// Optional block payload. pub fn get_block_payload(&self) -> Option { @@ -395,7 +428,7 @@ impl Leaf { } /// A commitment to the block payload contained in this leaf. - pub fn get_payload_commitment(&self) -> Commitment { + pub fn get_payload_commitment(&self) -> VidCommitment { self.get_block_header().payload_commitment() } /// The blockchain state after appending this leaf. @@ -446,14 +479,14 @@ where /// Fake the thing a genesis block points to. Needed to avoid infinite recursion #[must_use] pub fn fake_commitment() -> Commitment { - commit::RawCommitmentBuilder::new("Dummy commitment for arbitrary genesis").finalize() + RawCommitmentBuilder::new("Dummy commitment for arbitrary genesis").finalize() } /// create a random commitment #[must_use] pub fn random_commitment(rng: &mut dyn rand::RngCore) -> Commitment { let random_array: Vec = (0u8..100u8).map(|_| rng.gen_range(0..255)).collect(); - commit::RawCommitmentBuilder::new("Random Commitment") + RawCommitmentBuilder::new("Random Commitment") .constant_str("Random Field") .var_size_bytes(&random_array) .finalize() @@ -544,6 +577,7 @@ pub fn serialize_signature2( impl Committable for Leaf { fn commit(&self) -> commit::Commitment { + let payload_commitment_bytes: [u8; 32] = self.get_payload_commitment().into(); let signatures_bytes = if self.justify_qc.is_genesis { let mut bytes = vec![]; bytes.extend("genesis".as_bytes()); @@ -553,11 +587,12 @@ impl Committable for Leaf { }; // Skip the transaction commitments, so that the repliacs can reconstruct the leaf. - commit::RawCommitmentBuilder::new("leaf commitment") + RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) .u64_field("block number", self.get_height()) .field("parent Leaf commitment", self.parent_commitment) - .field("block payload commitment", self.get_payload_commitment()) + .constant_str("block payload commitment") + .fixed_size_bytes(&payload_commitment_bytes) .constant_str("justify_qc view number") .u64(*self.justify_qc.view_number) .field( diff --git a/types/src/error.rs b/types/src/error.rs index c6ccc7af1a..a8edd760d2 100644 --- a/types/src/error.rs +++ b/types/src/error.rs @@ -3,7 +3,9 @@ //! This module provides [`HotShotError`], which is an enum representing possible faults that can //! occur while interacting with this crate. -use crate::traits::{node_implementation::NodeType, storage::StorageError}; +use crate::traits::{ + block_contents::BlockPayload, node_implementation::NodeType, storage::StorageError, +}; use snafu::Snafu; use std::num::NonZeroU64; @@ -31,12 +33,12 @@ pub enum HotShotError { /// The underlying network fault source: crate::traits::network::NetworkError, }, - /// A block failed verification - #[snafu(display("Failed verification of block"))] - BadBlock {}, - /// A block was not consistent with the existing state - #[snafu(display("Inconsistent block"))] - InconsistentBlock {}, + /// Failure in the block. + #[snafu(display("Failed to build or verify a block: {source}"))] + BlockError { + /// The underlying block error. + source: ::Error, + }, /// Failure in networking layer #[snafu(display("Failure in networking layer: {source}"))] NetworkFault { diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 8827cbbbd5..5e03994aa2 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -125,13 +125,11 @@ impl QuorumCertificate2 { /// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` pub type QuorumCertificate2 = SimpleCertificate>; /// Type alias for a DA certificate over `DAData` -pub type DACertificate2 = - SimpleCertificate::BlockPayload>>; +pub type DACertificate2 = SimpleCertificate; /// Type alias for a Timeout certificate over a view number pub type TimeoutCertificate2 = SimpleCertificate>; /// type alias for a VID certificate -pub type VIDCertificate2 = - SimpleCertificate::BlockPayload>>; +pub type VIDCertificate2 = SimpleCertificate; // TODO ED Update this to use the correct threshold instead of the default `success_threshold` /// Type alias for a `ViewSyncPreCommit` certificate over a view number diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 6c88625704..6b367d67d0 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -6,7 +6,7 @@ use commit::{Commitment, Committable}; use serde::{Deserialize, Serialize}; use crate::{ - data::Leaf, + data::{Leaf, VidCommitment}, traits::{ election::Membership, node_implementation::NodeType, @@ -24,9 +24,9 @@ pub struct QuorumData { } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a DA vote. -pub struct DAData { +pub struct DAData { /// Commitment to a block payload - pub payload_commit: Commitment, + pub payload_commit: VidCommitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a timeout vote. @@ -36,9 +36,9 @@ pub struct TimeoutData { } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a VID vote. -pub struct VIDData { +pub struct VIDData { /// Commitment to the block payload the VID vote is on. - pub payload_commit: Commitment, + pub payload_commit: VidCommitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Pre Commit vote. @@ -166,14 +166,14 @@ impl Committable for TimeoutData { } } -impl Committable for DAData { +impl Committable for DAData { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("DA Vote") .var_size_bytes(self.payload_commit.as_ref()) .finalize() } } -impl Committable for VIDData { +impl Committable for VIDData { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("VID Vote") .var_size_bytes(self.payload_commit.as_ref()) @@ -219,9 +219,9 @@ impl = SimpleVote, M>; /// DA vote type alias -pub type DAVote2 = SimpleVote::BlockPayload>, M>; +pub type DAVote2 = SimpleVote; /// VID vote type alias -pub type VIDVote2 = SimpleVote::BlockPayload>, M>; +pub type VIDVote2 = SimpleVote; /// Timeout Vote type alias pub type TimeoutVote2 = SimpleVote, M>; /// View Sync Commit Vote type alias diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 647359157e..95d600fc46 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -3,6 +3,7 @@ //! This module provides the [`Transaction`], [`BlockPayload`], and [`BlockHeader`] traits, which //! describe the behaviors that a block is expected to have. +use crate::data::{test_srs, VidCommitment, VidScheme, VidSchemeTrait}; use commit::{Commitment, Committable}; use serde::{de::DeserializeOwned, Serialize}; use snafu::Snafu; @@ -13,6 +14,13 @@ use std::{ hash::Hash, }; +// TODO +/// Number of storage nodes for VID initiation. +pub const NUM_STORAGE_NODES: usize = 8; +// TODO +/// Number of chunks for VID initiation. +pub const NUM_CHUNKS: usize = 8; + /// The error type for block and its transactions. #[derive(Snafu, Debug)] pub enum BlockError { @@ -37,17 +45,7 @@ pub trait Transaction: /// sent between threads, and can have a hash produced of it /// * Must be hashable pub trait BlockPayload: - Serialize - + Clone - + Debug - + Display - + Hash - + PartialEq - + Eq - + Send - + Sync - + Committable - + DeserializeOwned + Serialize + Clone + Debug + Display + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned { /// The error type for this type of block type Error: Error + Debug + Send + Sync; @@ -56,7 +54,7 @@ pub trait BlockPayload: type Transaction: Transaction; /// Data created during block building which feeds into the block header - type Metadata: Clone + Debug + Eq + Hash + Send + Sync; + type Metadata: Clone + Debug + DeserializeOwned + Eq + Hash + Send + Sync + Serialize; /// Encoded payload. type Encode<'a>: 'a + Iterator + Send @@ -91,6 +89,20 @@ pub trait BlockPayload: fn transaction_commitments(&self) -> Vec>; } +/// Compute the VID payload commitment. +/// # Panics +/// If the VID computation fails. +#[must_use] +pub fn vid_commitment(encoded_transactions: Vec) -> ::Commit { + // TODO + let srs = test_srs(NUM_STORAGE_NODES); + // TODO We are using constant numbers for now, but they will change as the quorum size + // changes. + // TODO + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, srs).unwrap(); + vid.disperse(encoded_transactions).unwrap().commit +} + /// Header of a block, which commits to a [`BlockPayload`]. pub trait BlockHeader: Serialize + Clone + Debug + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned @@ -100,7 +112,7 @@ pub trait BlockHeader: /// Build a header with the payload commitment, metadata, and parent header. fn new( - payload_commitment: Commitment, + payload_commitment: VidCommitment, metadata: ::Metadata, parent_header: &Self, ) -> Self; @@ -116,5 +128,5 @@ pub trait BlockHeader: fn block_number(&self) -> u64; /// Get the payload commitment. - fn payload_commitment(&self) -> Commitment; + fn payload_commitment(&self) -> VidCommitment; } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 1f4479f075..223fcf654c 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -7,7 +7,10 @@ use super::{ node_implementation::{NodeImplementation, NodeType}, signature_key::EncodedSignature, }; -use crate::{certificate::AssembledSignature, data::Leaf}; +use crate::{ + certificate::AssembledSignature, + data::{Leaf, VidCommitment}, +}; use crate::{ traits::{ @@ -316,10 +319,7 @@ pub trait CommitteeExchangeType: ConsensusExchange { /// Sign a DA proposal. - fn sign_da_proposal( - &self, - payload_commitment: &Commitment, - ) -> EncodedSignature; + fn sign_da_proposal(&self, payload_commitment: &VidCommitment) -> EncodedSignature; } /// Standard implementation of [`CommitteeExchangeType`] utilizing a DA committee. @@ -354,10 +354,7 @@ impl< > CommitteeExchangeType for CommitteeExchange { /// Sign a DA proposal. - fn sign_da_proposal( - &self, - payload_commitment: &Commitment, - ) -> EncodedSignature { + fn sign_da_proposal(&self, payload_commitment: &VidCommitment) -> EncodedSignature { let signature = TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()); signature } @@ -372,7 +369,7 @@ impl< { type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = Commitment; + type Commitment = (); fn create( entries: Vec<::StakeTableEntry>, @@ -411,10 +408,7 @@ impl< /// A [`ConsensusExchange`] where participants vote to provide availability for blobs of data. pub trait VIDExchangeType: ConsensusExchange { /// Sign a VID disperse - fn sign_vid_disperse( - &self, - payload_commitment: &Commitment, - ) -> EncodedSignature; + fn sign_vid_disperse(&self, payload_commitment: &VidCommitment) -> EncodedSignature; } /// Standard implementation of [`VIDExchangeType`] @@ -449,10 +443,7 @@ impl< > VIDExchangeType for VIDExchange { /// Sign a VID proposal. - fn sign_vid_disperse( - &self, - payload_commitment: &Commitment, - ) -> EncodedSignature { + fn sign_vid_disperse(&self, payload_commitment: &VidCommitment) -> EncodedSignature { let signature = TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()); signature } @@ -467,7 +458,7 @@ impl< { type Membership = MEMBERSHIP; type Networking = NETWORK; - type Commitment = Commitment; + type Commitment = (); fn create( entries: Vec<::StakeTableEntry>, @@ -512,10 +503,7 @@ pub trait QuorumExchangeType: ConsensusExchange< ) -> EncodedSignature; /// Sign a block payload commitment. - fn sign_payload_commitment( - &self, - payload_commitment: Commitment, - ) -> EncodedSignature; + fn sign_payload_commitment(&self, payload_commitment: VidCommitment) -> EncodedSignature; } /// Standard implementation of [`QuroumExchangeType`] based on Hot Stuff consensus. @@ -558,10 +546,7 @@ impl< signature } - fn sign_payload_commitment( - &self, - payload_commitment: Commitment<::BlockPayload>, - ) -> EncodedSignature { + fn sign_payload_commitment(&self, payload_commitment: VidCommitment) -> EncodedSignature { TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()) } } diff --git a/types/src/utils.rs b/types/src/utils.rs index 64d6aae159..18e45ffdf1 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -1,6 +1,9 @@ //! Utility functions, type aliases, helper structs and enum definitions. -use crate::{data::Leaf, traits::node_implementation::NodeType}; +use crate::{ + data::{Leaf, VidCommitment}, + traits::node_implementation::NodeType, +}; use commit::Commitment; use std::ops::Deref; @@ -13,8 +16,8 @@ pub enum ViewInner { /// made. This saves memory when a leader fails and subverts a DoS attack where malicious /// leaders repeatedly request availability for blocks that they never propose. DA { - /// Available block. - block: Commitment, + /// Payload commitment to the available block. + payload_commitment: VidCommitment, }, /// Undecided view Leaf { @@ -38,9 +41,9 @@ impl ViewInner { /// return the underlying block paylod commitment if it exists #[must_use] - pub fn get_payload_commitment(&self) -> Option> { - if let Self::DA { block } = self { - Some(*block) + pub fn get_payload_commitment(&self) -> Option { + if let Self::DA { payload_commitment } = self { + Some(*payload_commitment) } else { None } From 8b4269809af9898c4b4d5bf8e8b3853dd6ffe6c8 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 20 Nov 2023 10:00:06 -0500 Subject: [PATCH 0430/1393] review comments --- hotshot/src/lib.rs | 10 +++++++++- hotshot/src/types/handle.rs | 16 ++-------------- task-impls/src/transactions.rs | 3 --- 3 files changed, 11 insertions(+), 18 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 02ee34731d..afbc586e1e 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -102,7 +102,15 @@ pub struct Networks> { pub da_network: I::CommitteeNetwork, /// Phantom for TYPES and I - pub _pd: PhantomData<(TYPES, I)>, //TODO: Do we need seperate networks for Viewsync/VID? + pub _pd: PhantomData<(TYPES, I)>, +} + +impl> Networks { + /// wait for all networks to be ready + pub async fn wait_for_networks_ready(&self) { + self.quorum_network.wait_for_ready().await; + self.da_network.wait_for_ready().await; + } } /// Bundle of all the memberships a consensus instance uses diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index f0ddeefc3c..87975429e4 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -20,8 +20,7 @@ use hotshot_types::{ event::EventType, message::{MessageKind, SequencingMessage}, traits::{ - election::Membership, network::CommunicationChannel, node_implementation::NodeType, - state::ConsensusTime, storage::Storage, + election::Membership, node_implementation::NodeType, state::ConsensusTime, storage::Storage, }, }; use hotshot_types::{data::Leaf, simple_certificate::QuorumCertificate}; @@ -176,18 +175,7 @@ impl + 'static> SystemContextHandl /// Block the underlying quorum (and committee) networking interfaces until node is /// successfully initialized into the networks. pub async fn wait_for_networks_ready(&self) { - self.hotshot - .inner - .networks - .quorum_network - .wait_for_ready() - .await; - self.hotshot - .inner - .networks - .da_network - .wait_for_ready() - .await; + self.hotshot.inner.networks.wait_for_networks_ready().await; } /// Shut down the the inner hotshot and wait until all background threads are closed. diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index c8f467fd52..73cebe39d6 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -82,9 +82,6 @@ pub struct TransactionTaskState< pub id: u64, } -// We have two `TransactionTaskState` implementations with different bounds. The implementation -// here requires `TYPES: NodeType`, -// whereas it's just `TYPES: NodeType` in the second implementation. impl, A: ConsensusApi + 'static> TransactionTaskState { From 008ab93a69dc790a2a58b4ac96cde550a16619bb Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 20 Nov 2023 17:33:31 -0500 Subject: [PATCH 0431/1393] initial generics for common vote accumulation --- task-impls/src/lib.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 01dcd1da6f..5c132ffeb9 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -34,3 +34,6 @@ pub mod view_sync; /// The task which implements verifiable information dispersal pub mod vid; + +/// Generic task for collecting votes +pub mod vote; From 7a120141fd92be32a70a90268fb1f0afae91d6df Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 20 Nov 2023 17:57:31 -0500 Subject: [PATCH 0432/1393] impls for new vote accumulator task --- task-impls/src/vote.rs | 458 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 458 insertions(+) create mode 100644 task-impls/src/vote.rs diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs new file mode 100644 index 0000000000..1354bb70a5 --- /dev/null +++ b/task-impls/src/vote.rs @@ -0,0 +1,458 @@ +use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; + +use crate::events::HotShotEvent; +use async_compatibility_layer::art::async_spawn; +use async_trait::async_trait; +use bitvec::prelude::*; +use either::Either::{self, Left, Right}; +use futures::FutureExt; +use hotshot_task::{ + event_stream::{ChannelStream, EventStream}, + global_registry::GlobalRegistry, + task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, + task_impls::{HSTWithEvent, TaskBuilder}, +}; +use hotshot_types::{ + simple_certificate::{ + DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCommitCertificate2, + ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + }, + simple_vote::{ + DAVote, QuorumVote, TimeoutVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + ViewSyncPreCommitVote, + }, + traits::{election::Membership, node_implementation::NodeType}, + vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, +}; +use snafu::Snafu; +use tracing::{debug, error}; + +#[derive(Snafu, Debug)] +/// Stub of a vote error +pub struct VoteTaskError {} + +/// Task state for collecting votes of one type and emiting a certificate +pub struct VoteCollectionTaskState< + TYPES: NodeType, + VOTE: Vote, + CERT: Certificate + Debug, +> { + /// Public key for this node. + pub public_key: TYPES::SignatureKey, + + /// Membership for voting + pub membership: TYPES::Membership, + + /// accumulator handles aggregating the votes + pub accumulator: Option>, + + /// The view which we are collecting votes for + pub view: TYPES::Time, + + /// global event stream + pub event_stream: ChannelStream>, + + /// Node id + pub id: u64, +} + +/// Describes the functions a vote must implement for it to be aggregatable by the generic vote collection task +pub trait AggregatableVote< + TYPES: NodeType, + VOTE: Vote, + CERT: Certificate, +> +{ + /// return the leader for this votes + fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey; + + /// return the Hotshot event for the completion of this CERT + fn make_cert_event(certificate: CERT, key: &TYPES::SignatureKey) -> HotShotEvent; +} + +impl< + TYPES: NodeType, + VOTE: Vote + AggregatableVote, + CERT: Certificate + Debug, + > VoteCollectionTaskState +{ + /// Take one vote and accumultate it. Returns either the cert or the updated state + /// after the vote is accumulated + pub async fn accumulate_vote(mut self, vote: &VOTE) -> (Option, Self) { + if vote.get_leader(&self.membership) != self.public_key { + return (None, self); + } + + if vote.get_view_number() != self.view { + error!( + "Vote view does not match! vote view is {} current view is {}", + *vote.get_view_number(), + *self.view + ); + return (None, self); + } + let Some(accumulator) = self.accumulator else { + return (None, self); + }; + match accumulator.accumulate(vote, &self.membership) { + Either::Left(acc) => { + self.accumulator = Some(acc); + (None, self) + } + Either::Right(qc) => { + debug!("Certificate Formed! {:?}", qc); + self.event_stream + .publish(VOTE::make_cert_event(qc, &self.public_key)) + .await; + self.accumulator = None; + (Some(HotShotTaskCompleted::ShutDown), self) + } + } + } +} + +impl< + TYPES: NodeType, + VOTE: Vote + + AggregatableVote + + std::marker::Send + + std::marker::Sync + + 'static, + CERT: Certificate + + Debug + + std::marker::Send + + std::marker::Sync + + 'static, + > TS for VoteCollectionTaskState +{ +} + +/// Types for a vote accumulator Task +pub type VoteTaskStateTypes = HSTWithEvent< + VoteTaskError, + HotShotEvent, + ChannelStream>, + VoteCollectionTaskState, +>; + +/// Trait for types which will handle a vote event. +#[async_trait] +pub trait HandleVoteEvent +where + TYPES: NodeType, + VOTE: Vote + AggregatableVote, + CERT: Certificate + Debug, +{ + /// Handle a vote event + async fn handle_event( + self, + event: HotShotEvent, + ) -> ( + Option, + VoteCollectionTaskState, + ); +} + +/// Info needed to create a vote accumulator task +#[allow(missing_docs)] +pub struct AccumulatorInfo<'a, TYPES: NodeType> { + pub public_key: TYPES::SignatureKey, + pub membership: &'a TYPES::Membership, + pub view: TYPES::Time, + pub event_stream: ChannelStream>, + pub id: u64, + pub registry: GlobalRegistry, +} + +/// Generic function for spawnnig a vote task. Returns the event stream id of the spawned task if created +/// # Panics +/// Calls unwrap but should never panic. +pub async fn spawn_vote_accumulator( + info: &AccumulatorInfo<'_, TYPES>, + vote: VOTE, + event: HotShotEvent, +) -> Option +where + TYPES: NodeType, + VOTE: Vote + + AggregatableVote + + AggregatableVote + + std::marker::Send + + std::marker::Sync + + 'static, + CERT: Certificate + + Debug + + AggregatableVote + + std::marker::Send + + std::marker::Sync + + 'static, + VoteCollectionTaskState: HandleVoteEvent, +{ + if vote.get_leader(info.membership) != info.public_key { + return None; + } + + if vote.get_view_number() != info.view { + error!( + "Vote view does not match! vote view is {} current view is {}", + *vote.get_view_number(), + *info.view + ); + return None; + } + let new_accumulator = VoteAccumulator { + vote_outcomes: HashMap::new(), + sig_lists: Vec::new(), + signers: bitvec![0;info. membership.total_nodes()], + phantom: PhantomData, + }; + + let mut state = VoteCollectionTaskState:: { + event_stream: info.event_stream.clone(), + membership: info.membership.clone(), + public_key: info.public_key.clone(), + accumulator: Some(new_accumulator), + view: info.view, + id: info.id, + }; + + let result = state.handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return None; + } + + state = result.1; + + let name = format!("View Sync Relay Task for view {:?}", vote.get_view_number()); + + let relay_handle_event = HandleEvent(Arc::new( + move |event, state: VoteCollectionTaskState| { + async move { state.handle_event(event).await }.boxed() + }, + )); + + let filter = FilterEvent::default(); + let builder = TaskBuilder::>::new(name) + .register_event_stream(state.event_stream.clone(), filter) + .await + .register_registry(&mut info.registry.clone()) + .await + .register_state(state) + .register_event_handler(relay_handle_event); + + let event_stream_id = builder.get_stream_id().unwrap(); + + let _task = async_spawn(async move { VoteTaskStateTypes::build(builder).launch().await }); + + Some(event_stream_id) +} + +/// Alias for Quorum vote accumulator +type QuorumVoteState = + VoteCollectionTaskState, QuorumCertificate>; +/// Alias for Quorum vote accumulator +type DAVoteState = VoteCollectionTaskState, DACertificate>; +/// Alias for Quorum vote accumulator +type TimeoutVoteState = + VoteCollectionTaskState, TimeoutCertificate>; +/// Alias for Quorum vote accumulator +type ViewSyncPreCommitState = VoteCollectionTaskState< + TYPES, + ViewSyncPreCommitVote, + ViewSyncPreCommitCertificate2, +>; +/// Alias for Quorum vote accumulator +type ViewSyncCommitVoteState = + VoteCollectionTaskState, ViewSyncCommitCertificate2>; +/// Alias for Quorum vote accumulator +type ViewSyncFinalizeVoteState = VoteCollectionTaskState< + TYPES, + ViewSyncFinalizeVote, + ViewSyncFinalizeCertificate2, +>; + +impl AggregatableVote, QuorumCertificate> + for QuorumVote +{ + fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.get_leader(self.get_view_number()) + } + fn make_cert_event( + certificate: QuorumCertificate, + _key: &TYPES::SignatureKey, + ) -> HotShotEvent { + HotShotEvent::QCFormed(Left(certificate)) + } +} + +impl AggregatableVote, DACertificate> + for DAVote +{ + fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.get_leader(self.get_view_number()) + } + fn make_cert_event( + certificate: DACertificate, + key: &TYPES::SignatureKey, + ) -> HotShotEvent { + HotShotEvent::DACSend(certificate, key.clone()) + } +} + +impl AggregatableVote, TimeoutCertificate> + for TimeoutVote +{ + fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.get_leader(self.get_view_number()) + } + fn make_cert_event( + certificate: TimeoutCertificate, + _key: &TYPES::SignatureKey, + ) -> HotShotEvent { + HotShotEvent::QCFormed(Right(certificate)) + } +} + +impl + AggregatableVote, ViewSyncCommitCertificate2> + for ViewSyncCommitVote +{ + fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.get_leader(self.get_view_number()) + } + fn make_cert_event( + certificate: ViewSyncCommitCertificate2, + key: &TYPES::SignatureKey, + ) -> HotShotEvent { + HotShotEvent::ViewSyncCommitCertificate2Send(certificate, key.clone()) + } +} + +impl + AggregatableVote, ViewSyncPreCommitCertificate2> + for ViewSyncPreCommitVote +{ + fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.get_leader(self.get_view_number()) + } + fn make_cert_event( + certificate: ViewSyncPreCommitCertificate2, + key: &TYPES::SignatureKey, + ) -> HotShotEvent { + HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, key.clone()) + } +} + +impl + AggregatableVote, ViewSyncFinalizeCertificate2> + for ViewSyncFinalizeVote +{ + fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.get_leader(self.get_view_number()) + } + fn make_cert_event( + certificate: ViewSyncFinalizeCertificate2, + key: &TYPES::SignatureKey, + ) -> HotShotEvent { + HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, key.clone()) + } +} + +// Handlers for all vote accumulators +#[async_trait] +impl HandleVoteEvent, QuorumCertificate> + for QuorumVoteState +{ + async fn handle_event( + self, + event: HotShotEvent, + ) -> (Option, QuorumVoteState) { + match event { + HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(&vote).await, + _ => (None, self), + } + } +} + +#[async_trait] +impl HandleVoteEvent, DACertificate> + for DAVoteState +{ + async fn handle_event( + self, + event: HotShotEvent, + ) -> (Option, DAVoteState) { + match event { + HotShotEvent::DAVoteRecv(vote) => self.accumulate_vote(&vote).await, + _ => (None, self), + } + } +} + +#[async_trait] +impl HandleVoteEvent, TimeoutCertificate> + for TimeoutVoteState +{ + async fn handle_event( + self, + event: HotShotEvent, + ) -> (Option, TimeoutVoteState) { + match event { + HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(&vote).await, + _ => (None, self), + } + } +} + +#[async_trait] +impl + HandleVoteEvent, ViewSyncPreCommitCertificate2> + for ViewSyncPreCommitState +{ + async fn handle_event( + self, + event: HotShotEvent, + ) -> (Option, ViewSyncPreCommitState) { + match event { + HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => self.accumulate_vote(&vote).await, + _ => (None, self), + } + } +} + +#[async_trait] +impl + HandleVoteEvent, ViewSyncCommitCertificate2> + for ViewSyncCommitVoteState +{ + async fn handle_event( + self, + event: HotShotEvent, + ) -> (Option, ViewSyncCommitVoteState) { + match event { + HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(&vote).await, + _ => (None, self), + } + } +} + +#[async_trait] +impl + HandleVoteEvent, ViewSyncFinalizeCertificate2> + for ViewSyncFinalizeVoteState +{ + async fn handle_event( + self, + event: HotShotEvent, + ) -> ( + Option, + ViewSyncFinalizeVoteState, + ) { + match event { + HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => self.accumulate_vote(&vote).await, + _ => (None, self), + } + } +} From 7367fc0105e308ffe49a318bf6c169e47d6947ae Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 20 Nov 2023 15:56:21 -0800 Subject: [PATCH 0433/1393] Fix build --- hotshot/src/demo.rs | 3 +- hotshot/src/lib.rs | 2 +- hotshot/src/tasks/mod.rs | 5 +++- task-impls/src/consensus.rs | 4 +-- task-impls/src/da.rs | 14 ++------- task-impls/src/transactions.rs | 3 +- testing/src/task_helpers.rs | 5 ++-- testing/tests/da_task.rs | 24 +++++++-------- testing/tests/network_task.rs | 22 +++++--------- testing/tests/vid_task.rs | 18 +++++------- types/src/block_impl.rs | 4 +-- types/src/data.rs | 47 ++++++++++++++---------------- types/src/simple_certificate.rs | 5 ++-- types/src/simple_vote.rs | 4 +-- types/src/traits/block_contents.rs | 10 ------- 15 files changed, 67 insertions(+), 103 deletions(-) diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs index 57d3e8a284..3ef2fafc25 100644 --- a/hotshot/src/demo.rs +++ b/hotshot/src/demo.rs @@ -12,9 +12,8 @@ use derivative::Derivative; use hotshot_signature_key::bn254::BLSPubKey; use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, - data::{fake_commitment, ViewNumber}, + data::{fake_commitment, BlockError, ViewNumber}, traits::{ - block_contents::BlockError, election::Membership, node_implementation::NodeType, state::{ConsensusTime, TestableState}, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 546063cd10..4089485786 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -53,7 +53,7 @@ use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ consensus::{BlockPayloadStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, - data::{Leaf, VidCommitment}, + data::Leaf, error::StorageSnafu, message::{ DataMessage, InternalTrigger, Message, MessageKind, ProcessedGeneralConsensusMessage, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 9d8d1925ab..26d1d35f88 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -27,6 +27,7 @@ use hotshot_types::{ event::Event, message::Messages, traits::{ + block_contents::vid_commitment, consensus_api::ConsensusSharedApi, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{NodeImplementation, NodeType}, @@ -204,13 +205,15 @@ pub async fn add_consensus_task>( }; let registry = task_runner.registry.clone(); let (payload, metadata) = ::genesis(); + // Impossible for `unwrap` to fail on the genesis payload. + let payload_commitment = vid_commitment(payload.encode().unwrap().collect()); // build the consensus task let consensus_state = ConsensusTaskState { registry: registry.clone(), consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), - payload_commitment_and_metadata: Some((payload.commit(), metadata)), + payload_commitment_and_metadata: Some((payload_commitment, metadata)), api: c_api.clone(), _pd: PhantomData, vote_collector: None, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ab2da40911..36b4ce6d95 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -4,7 +4,7 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use bitvec::prelude::*; -use commit::{Commitment, Committable}; +use commit::Committable; use core::time::Duration; use either::Either; use futures::FutureExt; @@ -739,7 +739,7 @@ impl, A: ConsensusApi + .saved_block_payloads .get(leaf.get_payload_commitment()) { - if let Err(e) = leaf.fill_block_payload(payload) { + if let Err(e) = leaf.fill_block_payload(payload.clone()) { error!( "Saved block payload and commitment don't match: {:?}", e diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 59ddb89e83..e5a0034f4d 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -195,18 +195,12 @@ impl, A: ConsensusApi + return None; } - // TODO (Keyao) - // debug!( - // "Got a DA block with {} transactions!", - // proposal.data.block_payload.transaction_commitments().len() - // ); let payload_commitment = vid_commitment(proposal.data.encoded_transactions.clone()); // ED Is this the right leader? let view_leader_key = self.da_membership.get_leader(view); if view_leader_key != sender { - // TODO (Keyao) - // error!("DA proposal doesn't have expected leader key for view {} \n DA proposal is: {:?}", *view, proposal.data.clone()); + error!("DA proposal doesn't have expected leader key for view {} \n DA proposal is: {:?}", *view, proposal.data.clone()); return None; } @@ -376,16 +370,14 @@ impl, A: ConsensusApi + return None; } - HotShotEvent::BlockReady(payload, metadata, view) => { + HotShotEvent::BlockReady(encoded_transactions, metadata, view) => { self.da_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) .await; - let payload_commitment = payload.commit(); + let payload_commitment = vid_commitment(encoded_transactions.clone()); let signature = TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()); - // TODO (Keyao) Fix the payload sending and receiving for the DA proposal. - // let data: DAProposal = DAProposal { encoded_transactions, metadata: metadata.clone(), diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4c6b7fd73e..6d5014a023 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -257,10 +257,9 @@ impl, A: ConsensusApi + shares: vid_disperse.shares, common: vid_disperse.common, }, - // TODO (Keyao) This is also signed in DA task. signature: TYPES::SignatureKey::sign( &self.private_key, - payload.commit().as_ref(), + &vid_disperse.commit, ), _pd: PhantomData, }, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index ecc9677a07..50204bc6c3 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -19,12 +19,13 @@ use hotshot_types::{ simple_certificate::QuorumCertificate, traits::{ block_contents::BlockHeader, - block_contents::{NUM_CHUNKS, NUM_STORAGE_NODES}, + block_contents::{vid_commitment, NUM_CHUNKS, NUM_STORAGE_NODES}, consensus_api::ConsensusSharedApi, election::Membership, node_implementation::NodeType, signature_key::EncodedSignature, state::{ConsensusTime, TestableBlock}, + BlockPayload, }, vote::HasViewNumber, }; @@ -125,7 +126,7 @@ async fn build_quorum_proposal_and_signature( // every event input is seen on the event stream in the output. let block = ::genesis(); - let payload_commitment = block.commit(); + let payload_commitment = vid_commitment(block.encode().unwrap().collect()); let block_header = VIDBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); let leaf = Leaf { view_number: ViewNumber::new(view), diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 2441e61f39..1892f8c71e 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,14 +1,13 @@ -use commit::Committable; use hotshot::{types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; use hotshot_types::{ - block_impl::vid_commitment, block_impl::VIDTransaction, data::{DAProposal, ViewNumber}, simple_vote::{DAData, DAVote}, traits::{ - consensus_api::ConsensusSharedApi, node_implementation::NodeType, state::ConsensusTime, + block_contents::vid_commitment, consensus_api::ConsensusSharedApi, + node_implementation::NodeType, state::ConsensusTime, }, }; use std::{collections::HashMap, marker::PhantomData}; @@ -22,7 +21,7 @@ async fn test_da_task() { use hotshot::tasks::add_da_task; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{block_impl::VIDBlockPayload, message::Proposal}; + use hotshot_types::message::Proposal; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -35,16 +34,13 @@ async fn test_da_task() { let pub_key = *api.public_key(); let transactions = vec![VIDTransaction(vec![0])]; let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); - let payload_commitment = vid_commitment(&encoded_transactions); - let block = VIDBlockPayload { - transactions, - payload_commitment, - }; + let payload_commitment = vid_commitment(encoded_transactions.clone()); let signature = - ::SignatureKey::sign(api.private_key(), block.commit().as_ref()); + ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()); let proposal = DAProposal { - encoded_transactions, + encoded_transactions: encoded_transactions.clone(), + metadata: (), view_number: ViewNumber::new(2), }; let message = Proposal { @@ -64,7 +60,7 @@ async fn test_da_task() { input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); input.push(HotShotEvent::BlockReady( - encoded_transactions, + encoded_transactions.clone(), (), ViewNumber::new(2), )); @@ -78,13 +74,13 @@ async fn test_da_task() { 1, ); output.insert( - HotShotEvent::SendPayloadCommitmentAndMetadata(block.commit(), ()), + HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), 1, ); output.insert(HotShotEvent::DAProposalSend(message.clone(), pub_key), 1); let da_vote = DAVote::create_signed_vote( DAData { - payload_commit: block.commit(), + payload_commit: payload_commitment, }, ViewNumber::new(2), api.public_key(), diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index da7d525471..91cbf3a560 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -1,4 +1,3 @@ -use commit::Committable; use hotshot::{types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ @@ -21,11 +20,7 @@ use std::{collections::HashMap, marker::PhantomData}; async fn test_network_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction}, - data::VidDisperse, - message::Proposal, - }; + use hotshot_types::{block_impl::VIDTransaction, data::VidDisperse, message::Proposal}; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -42,18 +37,15 @@ async fn test_network_task() { let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; - let block = VIDBlockPayload { - transactions, - payload_commitment, - }; let signature = ::SignatureKey::sign( api.private_key(), - block.commit().as_ref(), + payload_commitment.as_ref(), ); let da_proposal = Proposal { data: DAProposal { - encoded_transactions, + encoded_transactions: encoded_transactions.clone(), + metadata: (), view_number: ViewNumber::new(2), }, signature, @@ -65,7 +57,7 @@ async fn test_network_task() { let da_vid_disperse = Proposal { data: VidDisperse { view_number: da_proposal.data.view_number, - payload_commitment: block.commit(), + payload_commitment, shares: vid_disperse.shares, common: vid_disperse.common, }, @@ -79,7 +71,7 @@ async fn test_network_task() { input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::BlockReady( - encoded_transactions, + encoded_transactions.clone(), (), ViewNumber::new(2), )); @@ -125,7 +117,7 @@ async fn test_network_task() { 1, ); output.insert( - HotShotEvent::SendPayloadCommitmentAndMetadata(block.commit(), ()), + HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), 1, ); output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index aad70254cc..ace93a2115 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -1,4 +1,3 @@ -use commit::Committable; use hotshot::{tasks::add_vid_task, types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ @@ -22,7 +21,7 @@ use std::marker::PhantomData; async fn test_vid_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{block_impl::VIDBlockPayload, message::Proposal}; + use hotshot_types::message::Proposal; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -39,15 +38,12 @@ async fn test_vid_task() { let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; - let block = VIDBlockPayload { - transactions, - payload_commitment, - }; let signature = - ::SignatureKey::sign(api.private_key(), block.commit().as_ref()); + ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()); let proposal: DAProposal = DAProposal { - encoded_transactions, + encoded_transactions: encoded_transactions.clone(), + metadata: (), view_number: ViewNumber::new(2), }; let message = Proposal { @@ -58,7 +54,7 @@ async fn test_vid_task() { let vid_proposal = Proposal { data: VidDisperse { view_number: message.data.view_number, - payload_commitment: block.commit(), + payload_commitment, shares: vid_disperse.shares, common: vid_disperse.common, }, @@ -74,7 +70,7 @@ async fn test_vid_task() { input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); input.push(HotShotEvent::BlockReady( - encoded_transactions, + encoded_transactions.clone(), (), ViewNumber::new(2), )); @@ -90,7 +86,7 @@ async fn test_vid_task() { let vid_vote = VIDVote::create_signed_vote( hotshot_types::simple_vote::VIDData { - payload_commit: block.commit(), + payload_commit: payload_commitment, }, ViewNumber::new(2), api.public_key(), diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 8c48024914..d11b395d87 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -7,9 +7,9 @@ use std::{ }; use crate::{ - data::{VidCommitment, VidScheme, VidSchemeTrait}, + data::{BlockError, VidCommitment, VidScheme, VidSchemeTrait}, traits::{ - block_contents::{vid_commitment, BlockError, BlockHeader, Transaction}, + block_contents::{vid_commitment, BlockHeader, Transaction}, state::TestableBlock, BlockPayload, }, diff --git a/types/src/data.rs b/types/src/data.rs index 998e353736..a6ee8becc1 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -6,6 +6,7 @@ use crate::{ simple_certificate::{QuorumCertificate, TimeoutCertificate}, traits::{ + block_contents::vid_commitment, block_contents::BlockHeader, node_implementation::NodeType, signature_key::{EncodedPublicKey, SignatureKey}, @@ -257,20 +258,15 @@ pub trait DeltasType: fn fill(&mut self, block: PAYLOAD) -> Result<(), Self::Error>; } -/// Error which occurs when [`Leaf::fill_block_payload`] is called with a payload commitment -/// that does not match the internal payload commitment of the leaf. -#[derive(Clone, Copy, Debug, Snafu)] -#[snafu(display( - "the block payload {:?} has commitment (expected )", - payload, - // vid_commitment(payload), - // commitment -))] -pub struct InconsistentPayloadCommitmentError { - /// The block payload with the wrong commitment. - payload: PAYLOAD, - // /// The expected commitment. - // commitment: VidCommitment, +/// The error type for block and its transactions. +#[derive(Snafu, Debug)] +pub enum BlockError { + /// Invalid block header. + InvalidBlockHeader, + /// Invalid transaction length. + InvalidTransactionLength, + /// Inconsistent payload commitment. + InconsistentPayloadCommitment, } /// Additional functions required to use a [`Leaf`] with hotshot-testing. @@ -399,17 +395,18 @@ impl Leaf { /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()`. pub fn fill_block_payload( &mut self, - _block_payload: &TYPES::BlockPayload, - ) -> Result<(), InconsistentPayloadCommitmentError> { - unimplemented!("TODO Keyao"); - // if block_payload.commit() != self.block_header.payload_commitment() { - // return Err(InconsistentPayloadCommitmentError { - // payload: block_payload, - // // commitment: self.block_header.payload_commitment(), - // }); - // } - // self.block_payload = Some(block_payload); - // Ok(()) + block_payload: TYPES::BlockPayload, + ) -> Result<(), BlockError> { + let encoded_txns = match block_payload.encode() { + Ok(encoded) => encoded.into_iter().collect(), + Err(_) => return Err(BlockError::InvalidTransactionLength), + }; + let commitment = vid_commitment(encoded_txns); + if commitment != self.block_header.payload_commitment() { + return Err(BlockError::InconsistentPayloadCommitment); + } + self.block_payload = Some(block_payload); + Ok(()) } /// Optional block payload. pub fn get_block_payload(&self) -> Option { diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 0b19c83a8f..638891e3fa 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -125,12 +125,11 @@ impl QuorumCertificate { /// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` pub type QuorumCertificate = SimpleCertificate>; /// Type alias for a DA certificate over `DAData` -pub type DACertificate = SimpleCertificate::BlockPayload>>; +pub type DACertificate = SimpleCertificate; /// Type alias for a Timeout certificate over a view number pub type TimeoutCertificate = SimpleCertificate>; /// type alias for a VID certificate -pub type VIDCertificate = - SimpleCertificate::BlockPayload>>; +pub type VIDCertificate = SimpleCertificate; // TODO ED Update this to use the correct threshold instead of the default `success_threshold` /// Type alias for a `ViewSyncPreCommit` certificate over a view number diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 1ada57c921..94f3f52429 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -208,9 +208,9 @@ impl = SimpleVote>; /// DA vote type alias -pub type DAVote = SimpleVote::BlockPayload>>; +pub type DAVote = SimpleVote; /// VID vote type alias -pub type VIDVote = SimpleVote::BlockPayload>>; +pub type VIDVote = SimpleVote; /// Timeout Vote type alias pub type TimeoutVote = SimpleVote>; /// View Sync Commit Vote type alias diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 95d600fc46..452266e627 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -6,7 +6,6 @@ use crate::data::{test_srs, VidCommitment, VidScheme, VidSchemeTrait}; use commit::{Commitment, Committable}; use serde::{de::DeserializeOwned, Serialize}; -use snafu::Snafu; use std::{ error::Error, @@ -21,15 +20,6 @@ pub const NUM_STORAGE_NODES: usize = 8; /// Number of chunks for VID initiation. pub const NUM_CHUNKS: usize = 8; -/// The error type for block and its transactions. -#[derive(Snafu, Debug)] -pub enum BlockError { - /// Invalid block header. - InvalidBlockHeader, - /// Invalid transaction length. - InvalidTransactionLength, -} - /// Abstraction over any type of transaction. Used by [`BlockPayload`]. pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash From 27bc418115636d8627cc7d675986d6f74f9083b0 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 20 Nov 2023 17:57:00 -0800 Subject: [PATCH 0434/1393] Increase next_view_timeout to make tests pass --- testing/src/test_builder.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index b31657c15d..8667b32acc 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -64,7 +64,7 @@ pub struct TestMetadata { impl Default for TimingData { fn default() -> Self { Self { - next_view_timeout: 1000, + next_view_timeout: 1500, timeout_ratio: (11, 10), round_start_delay: 100, start_delay: 100, @@ -144,7 +144,7 @@ impl TestMetadata { ..Default::default() }, timing_data: TimingData { - next_view_timeout: 1000, + next_view_timeout: 3000, ..TimingData::default() }, ..TestMetadata::default() From 3f74a5f201de50e35d73cf7e6fb0c90c580dceba Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 21 Nov 2023 10:14:56 -0500 Subject: [PATCH 0435/1393] Fix Develop Lints for Clippy Update (#2093) * fix dht lint * Update task_impls panic lint * try installing the latest toolchain --------- Co-authored-by: elliedavidson <118024407+elliedavidson@users.noreply.github.com> --- libp2p-networking/tests/counter.rs | 1 - task/src/task_impls.rs | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index d0cbd88569..bb803cee02 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -330,7 +330,6 @@ async fn run_dht_rounds( } Ok(v) => { assert_eq!(v, value); - break; } } } diff --git a/task/src/task_impls.rs b/task/src/task_impls.rs index 057717057b..768e011775 100644 --- a/task/src/task_impls.rs +++ b/task/src/task_impls.rs @@ -299,6 +299,7 @@ pub mod test { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[allow(clippy::should_panic_without_expect)] #[should_panic] async fn test_init_with_event_stream() { setup_logging(); From 56f0d18ac8300c20e3e840df798c22beaac72d08 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 21 Nov 2023 10:27:35 -0500 Subject: [PATCH 0436/1393] All types needed to replace vote acc tasks --- task-impls/src/vote.rs | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 1354bb70a5..9faf64ea90 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -151,6 +151,9 @@ where Option, VoteCollectionTaskState, ); + + /// Event filter to use for this event + fn filter(event: &HotShotEvent) -> bool; } /// Info needed to create a vote accumulator task @@ -233,7 +236,9 @@ where }, )); - let filter = FilterEvent::default(); + let filter = FilterEvent(Arc::new( + as HandleVoteEvent>::filter, + )); let builder = TaskBuilder::>::new(name) .register_event_stream(state.event_stream.clone(), filter) .await @@ -374,6 +379,9 @@ impl HandleVoteEvent, QuorumCertificat _ => (None, self), } } + fn filter(event: &HotShotEvent) -> bool { + matches!(event, HotShotEvent::QuorumVoteRecv(_)) + } } #[async_trait] @@ -389,6 +397,9 @@ impl HandleVoteEvent, DACertificate _ => (None, self), } } + fn filter(event: &HotShotEvent) -> bool { + matches!(event, HotShotEvent::DAVoteRecv(_)) + } } #[async_trait] @@ -404,6 +415,9 @@ impl HandleVoteEvent, TimeoutCertific _ => (None, self), } } + fn filter(event: &HotShotEvent) -> bool { + matches!(event, HotShotEvent::TimeoutVoteRecv(_)) + } } #[async_trait] @@ -420,6 +434,9 @@ impl _ => (None, self), } } + fn filter(event: &HotShotEvent) -> bool { + matches!(event, HotShotEvent::ViewSyncPreCommitVoteRecv(_)) + } } #[async_trait] @@ -436,6 +453,9 @@ impl _ => (None, self), } } + fn filter(event: &HotShotEvent) -> bool { + matches!(event, HotShotEvent::ViewSyncCommitVoteRecv(_)) + } } #[async_trait] @@ -455,4 +475,7 @@ impl _ => (None, self), } } + fn filter(event: &HotShotEvent) -> bool { + matches!(event, HotShotEvent::ViewSyncFinalizeVoteRecv(_)) + } } From d03d928ab88efcee4b4d6caf4442c58cd1292e03 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 21 Nov 2023 08:04:07 -0800 Subject: [PATCH 0437/1393] Remove unused code --- types/src/data.rs | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/types/src/data.rs b/types/src/data.rs index a6ee8becc1..1d10340162 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -128,28 +128,6 @@ pub type VidScheme = jf_primitives::vid::advz::Advz::Commit; -// pub type VidCommitment = Commitment<::Commit>; -// struct DummyVidCommitment; -// impl Committable for DummyVidCommitment { -// fn commit(&self) -> Commitment { -// Commitment([0u8; 32], PhantomData) -// } - -// fn tag() -> String { -// "DUMMY_TAG".to_string() -// } -// } - -// impl Committable for VidCommit { -// fn commit(&self) -> Commitment { -// as CanonicalDeserialize>::deserialize(self) -// .expect("conversion from VidScheme::Commit to Commitment should succeed") -// } - -// fn tag() -> String { -// "VID_COMMIT".to_string() -// } -// } /// VID dispersal data /// From 8e1e5c1cd89520f3734e462e1c10d6a0acd81390 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 21 Nov 2023 12:42:39 -0500 Subject: [PATCH 0438/1393] starting to integrate to consensus task --- task-impls/src/consensus.rs | 276 ++++++++++++++++++------------------ task-impls/src/vote.rs | 12 +- 2 files changed, 144 insertions(+), 144 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 22d6b26c1d..d5a86712c1 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -129,144 +129,144 @@ pub struct ConsensusTaskState< pub id: u64, } -/// State for the vote collection task. This handles the building of a QC from a votes received -pub struct VoteCollectionTaskState> { - /// Network for all nodes - pub quorum_network: Arc, - /// Membership for Timeout votes/certs - pub timeout_membership: Arc, - /// Membership for Quorum Certs/votes - pub quorum_membership: Arc, - - #[allow(clippy::type_complexity)] - /// Accumulator for votes - pub accumulator: Either< - VoteAccumulator, QuorumCertificate>, - QuorumCertificate, - >, - - /// Accumulator for votes - #[allow(clippy::type_complexity)] - pub timeout_accumulator: Either< - VoteAccumulator, TimeoutCertificate>, - TimeoutCertificate, - >, - /// View which this vote collection task is collecting votes in - pub cur_view: TYPES::Time, - /// The event stream shared by all tasks - pub event_stream: ChannelStream>, - /// Node id - pub id: u64, -} - -impl> TS for VoteCollectionTaskState {} - -#[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "Quorum Vote Collection Task", level = "error")] - -async fn vote_handle>( - mut state: VoteCollectionTaskState, - event: HotShotEvent, -) -> ( - std::option::Option, - VoteCollectionTaskState, -) { - match event { - HotShotEvent::QuorumVoteRecv(vote) => { - // For the case where we receive votes after we've made a certificate - if state.accumulator.is_right() { - return (None, state); - } - - if vote.get_view_number() != state.cur_view { - error!( - "Vote view does not match! vote view is {} current view is {}", - *vote.get_view_number(), - *state.cur_view - ); - return (None, state); - } - - let accumulator = state.accumulator.left().unwrap(); - - match accumulator.accumulate(&vote, &state.quorum_membership) { - Either::Left(acc) => { - state.accumulator = Either::Left(acc); - return (None, state); - } - Either::Right(qc) => { - debug!("QCFormed! {:?}", qc.view_number); - state - .event_stream - .publish(HotShotEvent::QCFormed(either::Left(qc.clone()))) - .await; - state.accumulator = Either::Right(qc.clone()); - - // No longer need to poll for votes - state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - *qc.view_number, - )) - .await; - - return (Some(HotShotTaskCompleted::ShutDown), state); - } - } - } - // TODO: Code below is redundant of code above; can be fixed - // during exchange refactor - // https://github.com/EspressoSystems/HotShot/issues/1799 - HotShotEvent::TimeoutVoteRecv(vote) => { - debug!("Received timeout vote for view {}", *vote.get_view_number()); - if state.timeout_accumulator.is_right() { - return (None, state); - } - - if vote.get_view_number() != state.cur_view { - error!( - "Vote view does not match! vote view is {} current view is {}", - *vote.get_view_number(), - *state.cur_view - ); - return (None, state); - } - - let accumulator = state.timeout_accumulator.left().unwrap(); - - match accumulator.accumulate(&vote, &state.timeout_membership) { - Either::Left(acc) => { - state.timeout_accumulator = Either::Left(acc); - return (None, state); - } - Either::Right(qc) => { - debug!("QCFormed! {:?}", qc.view_number); - state - .event_stream - .publish(HotShotEvent::QCFormed(either::Right(qc.clone()))) - .await; - state.timeout_accumulator = Either::Right(qc.clone()); - - // No longer need to poll for votes - state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - *qc.view_number, - )) - .await; - - return (Some(HotShotTaskCompleted::ShutDown), state); - } - } - } - HotShotEvent::Shutdown => { - return (Some(HotShotTaskCompleted::ShutDown), state); - } - _ => { - error!("Unexpected event"); - } - } - (None, state) -} +// /// State for the vote collection task. This handles the building of a QC from a votes received +// pub struct VoteCollectionTaskState> { +// /// Network for all nodes +// pub quorum_network: Arc, +// /// Membership for Timeout votes/certs +// pub timeout_membership: Arc, +// /// Membership for Quorum Certs/votes +// pub quorum_membership: Arc, + +// #[allow(clippy::type_complexity)] +// /// Accumulator for votes +// pub accumulator: Either< +// VoteAccumulator, QuorumCertificate>, +// QuorumCertificate, +// >, + +// /// Accumulator for votes +// #[allow(clippy::type_complexity)] +// pub timeout_accumulator: Either< +// VoteAccumulator, TimeoutCertificate>, +// TimeoutCertificate, +// >, +// /// View which this vote collection task is collecting votes in +// pub cur_view: TYPES::Time, +// /// The event stream shared by all tasks +// pub event_stream: ChannelStream>, +// /// Node id +// pub id: u64, +// } + +// impl> TS for VoteCollectionTaskState {} + +// #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "Quorum Vote Collection Task", level = "error")] + +// async fn vote_handle>( +// mut state: VoteCollectionTaskState, +// event: HotShotEvent, +// ) -> ( +// std::option::Option, +// VoteCollectionTaskState, +// ) { +// match event { +// HotShotEvent::QuorumVoteRecv(vote) => { +// // For the case where we receive votes after we've made a certificate +// if state.accumulator.is_right() { +// return (None, state); +// } + +// if vote.get_view_number() != state.cur_view { +// error!( +// "Vote view does not match! vote view is {} current view is {}", +// *vote.get_view_number(), +// *state.cur_view +// ); +// return (None, state); +// } + +// let accumulator = state.accumulator.left().unwrap(); + +// match accumulator.accumulate(&vote, &state.quorum_membership) { +// Either::Left(acc) => { +// state.accumulator = Either::Left(acc); +// return (None, state); +// } +// Either::Right(qc) => { +// debug!("QCFormed! {:?}", qc.view_number); +// state +// .event_stream +// .publish(HotShotEvent::QCFormed(either::Left(qc.clone()))) +// .await; +// state.accumulator = Either::Right(qc.clone()); + +// // No longer need to poll for votes +// state +// .quorum_network +// .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( +// *qc.view_number, +// )) +// .await; + +// return (Some(HotShotTaskCompleted::ShutDown), state); +// } +// } +// } +// // TODO: Code below is redundant of code above; can be fixed +// // during exchange refactor +// // https://github.com/EspressoSystems/HotShot/issues/1799 +// HotShotEvent::TimeoutVoteRecv(vote) => { +// debug!("Received timeout vote for view {}", *vote.get_view_number()); +// if state.timeout_accumulator.is_right() { +// return (None, state); +// } + +// if vote.get_view_number() != state.cur_view { +// error!( +// "Vote view does not match! vote view is {} current view is {}", +// *vote.get_view_number(), +// *state.cur_view +// ); +// return (None, state); +// } + +// let accumulator = state.timeout_accumulator.left().unwrap(); + +// match accumulator.accumulate(&vote, &state.timeout_membership) { +// Either::Left(acc) => { +// state.timeout_accumulator = Either::Left(acc); +// return (None, state); +// } +// Either::Right(qc) => { +// debug!("QCFormed! {:?}", qc.view_number); +// state +// .event_stream +// .publish(HotShotEvent::QCFormed(either::Right(qc.clone()))) +// .await; +// state.timeout_accumulator = Either::Right(qc.clone()); + +// // No longer need to poll for votes +// state +// .quorum_network +// .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( +// *qc.view_number, +// )) +// .await; + +// return (Some(HotShotTaskCompleted::ShutDown), state); +// } +// } +// } +// HotShotEvent::Shutdown => { +// return (Some(HotShotTaskCompleted::ShutDown), state); +// } +// _ => { +// error!("Unexpected event"); +// } +// } +// (None, state) +// } impl, A: ConsensusApi + 'static> ConsensusTaskState diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 9faf64ea90..1700ff26f6 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -174,7 +174,8 @@ pub async fn spawn_vote_accumulator( info: &AccumulatorInfo<'_, TYPES>, vote: VOTE, event: HotShotEvent, -) -> Option + name: String, +) -> Option<(TYPES::Time, usize, usize> where TYPES: NodeType, VOTE: Vote @@ -228,8 +229,6 @@ where state = result.1; - let name = format!("View Sync Relay Task for view {:?}", vote.get_view_number()); - let relay_handle_event = HandleEvent(Arc::new( move |event, state: VoteCollectionTaskState| { async move { state.handle_event(event).await }.boxed() @@ -248,10 +247,11 @@ where .register_event_handler(relay_handle_event); let event_stream_id = builder.get_stream_id().unwrap(); + let id = builder.get_task_id().unwrap() let _task = async_spawn(async move { VoteTaskStateTypes::build(builder).launch().await }); - Some(event_stream_id) + Some((vote.get_view_number(), id, event_stream_id)) } /// Alias for Quorum vote accumulator @@ -282,7 +282,7 @@ impl AggregatableVote, QuorumCertifica for QuorumVote { fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_view_number()) + membership.get_leader(self.get_view_number() + 1) } fn make_cert_event( certificate: QuorumCertificate, @@ -310,7 +310,7 @@ impl AggregatableVote, TimeoutCertifi for TimeoutVote { fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_view_number()) + membership.get_leader(self.get_view_number() + 1) } fn make_cert_event( certificate: TimeoutCertificate, From 0a754215accf2d13457440c9e0131eeee4e3d0da Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 22 Nov 2023 13:59:53 -0800 Subject: [PATCH 0439/1393] recheck VidDisperseSend, VidDisperseRecv, VidVoteSend, VidVoteRecv, VidCertSend, and VidCertRecv --- task-impls/src/consensus.rs | 2 +- task-impls/src/network.rs | 1 - task-impls/src/vid.rs | 56 ++++++++++++++----------------------- 3 files changed, 22 insertions(+), 37 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 22d6b26c1d..309509a20f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1103,7 +1103,7 @@ impl, A: ConsensusApi + let view = cert.get_view_number(); self.vid_certs.insert(view, cert); - + // Sishan NOTE TODO // RM TODO: VOTING } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 78f0beaab8..e3d2fef958 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -98,7 +98,6 @@ impl NetworkMessageTaskState { HotShotEvent::DAVoteRecv(vote.clone()) } CommitteeConsensusMessage::DACertificate(cert) => { - // panic!("Recevid DA C! "); HotShotEvent::DACRecv(cert) } CommitteeConsensusMessage::VidDisperseMsg(proposal) => { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 4628fc1a67..e31b89d73c 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -13,7 +13,7 @@ use hotshot_task::{ }; use hotshot_types::traits::network::ConsensusIntentEvent; use hotshot_types::{ - consensus::{Consensus, View}, + consensus::Consensus, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -21,7 +21,6 @@ use hotshot_types::{ signature_key::SignatureKey, state::ConsensusTime, }, - utils::ViewInner, }; use hotshot_types::{ simple_certificate::VIDCertificate, @@ -117,7 +116,6 @@ where "VID vote recv, collection task {:?}", vote.get_view_number() ); - // panic!("Vote handle received VID vote for view {}", *vote.current_view); // For the case where we receive votes after we've made a certificate if state.accumulator.is_right() { @@ -172,11 +170,6 @@ impl, A: ConsensusApi + ) -> Option { match event { HotShotEvent::VidVoteRecv(vote) => { - // warn!( - // "VID vote recv, Main Task {:?}, key: {:?}", - // vote.current_view, - // self.committee_exchange.public_key() - // ); // Check if we are the leader and the vote is from the sender. let view = vote.get_view_number(); if self.membership.get_leader(view) != self.public_key { @@ -248,11 +241,9 @@ impl, A: ConsensusApi + }; } HotShotEvent::VidDisperseRecv(disperse, sender) => { - // TODO copy-pasted from DAProposalRecv https://github.com/EspressoSystems/HotShot/issues/1690 - debug!( - "VID disperse received for view: {:?}", - disperse.data.get_view_number() - ); + let view = disperse.data.get_view_number(); + + debug!("VID disperse received for view: {:?}", view); // stop polling for the received disperse self.network @@ -261,12 +252,9 @@ impl, A: ConsensusApi + )) .await; - // ED NOTE: Assuming that the next view leader is the one who sends DA proposal for this view - let view = disperse.data.get_view_number(); - // Allow VID disperse date that is one view older, in case we have updated the // view. - // Adding `+ 1` on the LHS rather tahn `- 1` on the RHS, to avoid the overflow + // Adding `+ 1` on the LHS rather than `- 1` on the RHS, to avoid the overflow // error due to subtracting the genesis view number. if view + 1 < self.cur_view { warn!("Throwing away VID disperse data that is more than one view older"); @@ -276,7 +264,7 @@ impl, A: ConsensusApi + debug!("VID disperse data is fresh."); let payload_commitment = disperse.data.payload_commitment; - // ED Is this the right leader? + // Check whether the sender is the right leader for this view let view_leader_key = self.membership.get_leader(view); if view_leader_key != sender { error!("VID proposal doesn't have expected leader key for view {} \n DA proposal is: [N/A for VID]", *view); @@ -287,16 +275,16 @@ impl, A: ConsensusApi + error!("Could not verify VID proposal sig."); return None; } - + // Sishan NOTE TODO: check whether this part is needed? How consensus committee functioned in vid task? if !self.membership.has_stake(&self.public_key) { - debug!( - "We were not chosen for consensus committee on {:?}", + error!( + "We were not chosen for consensus-vid committee on {:?}", self.cur_view ); return None; } - // Generate and send vote + // Generate and send vote after receive and validate disperse (VID share) let vote = VIDVote::create_signed_vote( VIDData { payload_commit: payload_commitment, @@ -306,9 +294,6 @@ impl, A: ConsensusApi + &self.private_key, ); - // ED Don't think this is necessary? - // self.cur_view = view; - debug!( "Sending vote to the VID leader {:?}", vote.get_view_number() @@ -316,16 +301,17 @@ impl, A: ConsensusApi + self.event_stream .publish(HotShotEvent::VidVoteSend(vote)) .await; - let mut consensus = self.consensus.write().await; - - // Ensure this view is in the view map for garbage collection, but do not overwrite if - // there is already a view there: the replica task may have inserted a `Leaf` view which - // contains strictly more information. - consensus.state_map.entry(view).or_insert(View { - view_inner: ViewInner::DA { - block: payload_commitment, - }, - }); + + // Sishan NOTE TODO: what is consensus.state_map? + // let mut consensus = self.consensus.write().await; + // // Ensure this view is in the view map for garbage collection, but do not overwrite if + // // there is already a view there: the replica task may have inserted a `Leaf` view which + // // contains strictly more information. + // consensus.state_map.entry(view).or_insert(View { + // view_inner: ViewInner::DA { + // block: payload_commitment, + // }, + // }); // Record the block we have promised to make available. // TODO https://github.com/EspressoSystems/HotShot/issues/1692 From baf577af287b6b401cf6afd6335f7a00ca02beee Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Fri, 24 Nov 2023 15:34:25 -0500 Subject: [PATCH 0440/1393] feat: towards nuking encoded{signature, publickey} --- hotshot-signature-key/src/bn254.rs | 2 +- hotshot-signature-key/src/bn254/bn254_pub.rs | 37 +++++++------------- task-impls/src/consensus.rs | 10 +++--- testing/src/task_helpers.rs | 4 +-- testing/tests/consensus_task.rs | 2 +- testing/tests/network_task.rs | 5 +-- types/src/data.rs | 11 +++--- types/src/message.rs | 3 +- types/src/simple_vote.rs | 10 +++--- types/src/traits/signature_key.rs | 5 +-- types/src/traits/storage.rs | 4 +-- types/src/vote.rs | 23 ++++-------- 12 files changed, 49 insertions(+), 67 deletions(-) diff --git a/hotshot-signature-key/src/bn254.rs b/hotshot-signature-key/src/bn254.rs index 2414c89c9c..ea0f395fb0 100644 --- a/hotshot-signature-key/src/bn254.rs +++ b/hotshot-signature-key/src/bn254.rs @@ -1,5 +1,5 @@ //! Demonstration implementation of the [`SignatureKey`] trait using BN254 -use hotshot_types::traits::signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}; +use hotshot_types::traits::signature_key::{EncodedPublicKey, SignatureKey}; /// `BLSPrivKey` implementation mod bn254_priv; /// `BLSPubKey` implementation diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index f08c03e144..3d3d605287 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -1,4 +1,4 @@ -use super::{BLSPrivKey, EncodedPublicKey, EncodedSignature, SignatureKey}; +use super::{BLSPrivKey, EncodedPublicKey, SignatureKey}; use bincode::Options; use bitvec::prelude::*; use blake3::traits::digest::generic_array::GenericArray; @@ -53,21 +53,15 @@ impl SignatureKey for BLSPubKey { type QCType = (Self::PureAssembledSignatureType, BitVec); #[instrument(skip(self))] - fn validate(&self, signature: &EncodedSignature, data: &[u8]) -> bool { + fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool { let ver_key = self.pub_key; - let x: Result<::Signature, _> = - bincode_opts().deserialize(&signature.0); - match x { - Ok(s) => { - // This is the validation for QC partial signature before append(). - let generic_msg: &GenericArray = GenericArray::from_slice(data); - BLSOverBN254CurveSignatureScheme::verify(&(), &ver_key, generic_msg, &s).is_ok() - } - Err(_) => false, - } + + // This is the validation for QC partial signature before append(). + let generic_msg: &GenericArray = GenericArray::from_slice(data); + BLSOverBN254CurveSignatureScheme::verify(&(), &ver_key, generic_msg, &signature).is_ok() } - fn sign(sk: &Self::PrivateKey, data: &[u8]) -> EncodedSignature { + fn sign(sk: &Self::PrivateKey, data: &[u8]) -> Self::PureAssembledSignatureType { let generic_msg = GenericArray::from_slice(data); let agg_signature_wrap = BitVectorQC::::sign( &(), @@ -77,19 +71,12 @@ impl SignatureKey for BLSPubKey { ); match agg_signature_wrap { Ok(agg_signature) => { - // Convert the signature to bytes and return - let bytes = bincode_opts().serialize(&agg_signature); - match bytes { - Ok(bytes) => EncodedSignature(bytes), - Err(e) => { - warn!(?e, "Failed to serialize signature in sign()"); - EncodedSignature(vec![]) - } - } + agg_signature } - Err(e) => { - warn!(?e, "Failed to sign"); - EncodedSignature(vec![]) + Err(_e) => { + unreachable!("TODO is this possible"); + // warn!(?e, "Failed to sign"); + // EncodedSignature(vec![]) } } } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 22d6b26c1d..a52e6c7834 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -341,7 +341,7 @@ impl, A: ConsensusApi + block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: self.quorum_membership.get_leader(view).to_bytes(), + proposer_id: self.quorum_membership.get_leader(view), }; let vote = QuorumVote::::create_signed_vote( QuorumData { @@ -401,7 +401,7 @@ impl, A: ConsensusApi + block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: self.quorum_membership.get_leader(view).to_bytes(), + proposer_id: self.quorum_membership.get_leader(view), }; // Validate the DAC. @@ -623,7 +623,7 @@ impl, A: ConsensusApi + block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: sender.to_bytes(), + proposer_id: sender, }; let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; @@ -648,7 +648,7 @@ impl, A: ConsensusApi + block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: sender.to_bytes(), + proposer_id: sender, }; let leaf_commitment = leaf.commit(); @@ -1248,7 +1248,7 @@ impl, A: ConsensusApi + block_payload: None, rejected: vec![], timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: self.api.public_key().to_bytes(), + proposer_id: self.api.public_key().clone(), }; let signature = TYPES::SignatureKey::sign(&self.private_key, leaf.commit().as_ref()); diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 915007f879..689aad90c4 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -104,7 +104,7 @@ async fn build_quorum_proposal_and_signature( handle: &SystemContextHandle, private_key: &::PrivateKey, view: u64, -) -> (QuorumProposal, EncodedSignature) { +) -> (QuorumProposal, ::PureAssembledSignatureType) { let consensus_lock = handle.get_consensus(); let consensus = consensus_lock.read().await; let api: HotShotConsensusApi = HotShotConsensusApi { @@ -134,7 +134,7 @@ async fn build_quorum_proposal_and_signature( block_payload: None, rejected: vec![], timestamp: 0, - proposer_id: api.public_key().to_bytes(), + proposer_id: *api.public_key(), }; let signature = ::sign(private_key, leaf.commit().as_ref()); let proposal = QuorumProposal:: { diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 5f53cfa4e4..3c861b302f 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -66,7 +66,7 @@ async fn build_vote( block_payload: None, rejected: Vec::new(), timestamp: 0, - proposer_id: membership.get_leader(view).to_bytes(), + proposer_id: membership.get_leader(view), }; let vote = QuorumVote::::create_signed_vote( QuorumData { diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index d625265926..1cbb10028d 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -24,7 +24,7 @@ async fn test_network_task() { use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, data::VidDisperse, - message::Proposal, + message::Proposal, traits::node_implementation::NodeType, }; async_compatibility_layer::logging::setup_logging(); @@ -60,6 +60,7 @@ async fn test_network_task() { _pd: PhantomData, }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; + let quorum_signature : &<::SignatureKey as SignatureKey>::PureAssembledSignatureType = &da_proposal.signature; // TODO for now reuse the same block payload commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 let da_vid_disperse = Proposal { @@ -69,7 +70,7 @@ async fn test_network_task() { shares: vid_disperse.shares, common: vid_disperse.common, }, - signature: da_proposal.signature.clone(), + signature: quorum_signature.clone(), _pd: PhantomData, }; diff --git a/types/src/data.rs b/types/src/data.rs index b58ceb183d..4cb7c0cd4c 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -101,8 +101,9 @@ impl std::ops::Sub for ViewNumber { /// Generate the genesis block proposer ID from the defined constant #[must_use] -pub fn genesis_proposer_id() -> EncodedPublicKey { - EncodedPublicKey(GENESIS_PROPOSER_ID.to_vec()) +pub fn genesis_proposer_id() -> SIGKEY { + todo!() + // EncodedPublicKey(GENESIS_PROPOSER_ID.to_vec()) } /// The `Transaction` type associated with a `State`, as a syntactic shortcut @@ -174,7 +175,7 @@ pub struct QuorumProposal { pub timeout_certificate: Option>, /// the propser id - pub proposer_id: EncodedPublicKey, + pub proposer_id: TYPES::SignatureKey, } impl HasViewNumber for DAProposal { @@ -286,7 +287,7 @@ pub struct Leaf { pub timestamp: i128, /// the proposer id of the leaf - pub proposer_id: EncodedPublicKey, + pub proposer_id: TYPES::SignatureKey, } impl PartialEq for Leaf { @@ -399,7 +400,7 @@ impl Leaf { self.timestamp } /// Identity of the network participant who proposed this leaf. - pub fn get_proposer_id(&self) -> EncodedPublicKey { + pub fn get_proposer_id(&self) -> TYPES::SignatureKey { self.proposer_id.clone() } /// Create a leaf from information stored about a view. diff --git a/types/src/message.rs b/types/src/message.rs index 44cd8ec351..aa74a2ac47 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -11,6 +11,7 @@ use crate::simple_certificate::{ use crate::simple_vote::{ DAVote, TimeoutVote, VIDVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }; +use crate::traits::signature_key::SignatureKey; use crate::vote::HasViewNumber; use crate::{ data::{DAProposal, VidDisperse}, @@ -466,7 +467,7 @@ pub struct Proposal + Deserializ /// The data being proposed. pub data: PROPOSAL, /// The proposal must be signed by the view leader - pub signature: EncodedSignature, + pub signature: ::PureAssembledSignatureType, /// Phantom for TYPES pub _pd: PhantomData, } diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index af2588c1f3..944972c77f 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -85,11 +85,11 @@ mod sealed { impl Sealed for C {} } -/// A simple yes vote over some votable type. +/// A simple yes vote over some votable type. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct SimpleVote { /// The signature share associated with this vote - pub signature: (EncodedPublicKey, EncodedSignature), + pub signature: (TYPES::SignatureKey, ::PureAssembledSignatureType), /// The leaf commitment being voted on. pub data: DATA, /// The view this vote was cast for @@ -106,10 +106,10 @@ impl Vote for SimpleVote ::SignatureKey { - ::from_bytes(&self.signature.0).unwrap() + self.signature.0.clone() } - fn get_signature(&self) -> EncodedSignature { + fn get_signature(&self) -> ::PureAssembledSignatureType { self.signature.1.clone() } @@ -132,7 +132,7 @@ impl SimpleVote { ) -> Self { let signature = TYPES::SignatureKey::sign(private_key, data.commit().as_ref()); Self { - signature: (pub_key.to_bytes(), signature), + signature: (pub_key.clone(), signature), data, view_number: view, } diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index ed1818ca9a..3166d72272 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -40,6 +40,7 @@ pub trait StakeTableEntryType { } /// Trait for abstracting public key signatures +/// Self is the public key type pub trait SignatureKey: Send + Sync @@ -103,9 +104,9 @@ pub trait SignatureKey: // Signature type represented as a vec/slice of bytes to let the implementer handle the nuances // of serialization, to avoid Cryptographic pitfalls /// Validate a signature - fn validate(&self, signature: &EncodedSignature, data: &[u8]) -> bool; + fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool; /// Produce a signature - fn sign(private_key: &Self::PrivateKey, data: &[u8]) -> EncodedSignature; + fn sign(private_key: &Self::PrivateKey, data: &[u8]) -> Self::PureAssembledSignatureType; /// Produce a public key from a private key fn from_private(private_key: &Self::PrivateKey) -> Self; /// Serialize a public key to bytes diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 9f831a672e..c3c57686bc 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -137,7 +137,7 @@ pub struct StoredView { pub timestamp: i128, /// the proposer id #[derivative(PartialEq = "ignore")] - pub proposer_id: EncodedPublicKey, + pub proposer_id: TYPES::SignatureKey, } impl StoredView @@ -154,7 +154,7 @@ where block_payload: Option, parent_commitment: Commitment>, rejected: Vec<::Transaction>, - proposer_id: EncodedPublicKey, + proposer_id: TYPES::SignatureKey, ) -> Self { Self { view_number: qc.get_view_number(), diff --git a/types/src/vote.rs b/types/src/vote.rs index 0d71f7f145..e634a0ff21 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -28,7 +28,7 @@ pub trait Vote: HasViewNumber { type Commitment: Voteable; /// Get the signature of the vote sender - fn get_signature(&self) -> EncodedSignature; + fn get_signature(&self) -> ::PureAssembledSignatureType; /// Gets the data which was voted on by this vote fn get_data(&self) -> &Self::Commitment; /// Gets the Data commitment of the vote @@ -79,7 +79,7 @@ pub struct VoteAccumulator< CERT: Certificate, > { /// Map of all signatures accumlated so far - pub vote_outcomes: VoteMap2>, + pub vote_outcomes: VoteMap2, TYPES::SignatureKey, ::PureAssembledSignatureType>, /// A list of valid signatures for certificate aggregation pub sig_lists: Vec<::PureAssembledSignatureType>, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check @@ -114,14 +114,7 @@ impl, CERT: Certificate::PureAssembledSignatureType = - bincode_opts() - .deserialize(&vote.get_signature().0) - .expect("Deserialization on the signature shouldn't be able to fail."); + let original_signature: ::PureAssembledSignatureType = vote.get_signature(); let (total_stake_casted, total_vote_map) = self .vote_outcomes @@ -129,9 +122,7 @@ impl, CERT: Certificate, CERT: Certificate, CERT: Certificate = HashMap< +type VoteMap2 = HashMap< COMMITMENT, ( U256, - BTreeMap, + BTreeMap, ), >; From 005906f395c348fe5948da19a748a0217f39e64d Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sat, 25 Nov 2023 05:33:22 -0500 Subject: [PATCH 0441/1393] feat: sign returns a result --- hotshot-signature-key/src/bn254/bn254_pub.rs | 23 ++++----- task-impls/src/consensus.rs | 54 +++++++++++++------- task-impls/src/da.rs | 17 ++++-- task-impls/src/transactions.rs | 39 +++++++------- task-impls/src/vid.rs | 7 ++- task-impls/src/view_sync.rs | 42 ++++++++++----- testing/src/task_helpers.rs | 9 ++-- testing/tests/consensus_task.rs | 9 ++-- testing/tests/da_task.rs | 6 ++- testing/tests/network_task.rs | 6 ++- testing/tests/vid_task.rs | 6 ++- testing/tests/view_sync_task.rs | 3 +- types/src/data.rs | 3 +- types/src/message.rs | 1 - types/src/simple_vote.rs | 26 ++++++---- types/src/traits/signature_key.rs | 10 +++- types/src/traits/storage.rs | 2 +- types/src/vote.rs | 26 ++++------ 18 files changed, 172 insertions(+), 117 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 3d3d605287..111232baac 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -8,6 +8,7 @@ use hotshot_qc::bit_vector_old::{ }; use hotshot_types::traits::qc::QuorumCertificate; use hotshot_utils::bincode::bincode_opts; +use jf_primitives::errors::PrimitivesError; use jf_primitives::signatures::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey}, SignatureScheme, @@ -51,6 +52,7 @@ impl SignatureKey for BLSPubKey { type PureAssembledSignatureType = ::Signature; type QCType = (Self::PureAssembledSignatureType, BitVec); + type SignError = PrimitivesError; #[instrument(skip(self))] fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool { @@ -58,27 +60,20 @@ impl SignatureKey for BLSPubKey { // This is the validation for QC partial signature before append(). let generic_msg: &GenericArray = GenericArray::from_slice(data); - BLSOverBN254CurveSignatureScheme::verify(&(), &ver_key, generic_msg, &signature).is_ok() + BLSOverBN254CurveSignatureScheme::verify(&(), &ver_key, generic_msg, signature).is_ok() } - fn sign(sk: &Self::PrivateKey, data: &[u8]) -> Self::PureAssembledSignatureType { + fn sign( + sk: &Self::PrivateKey, + data: &[u8], + ) -> Result { let generic_msg = GenericArray::from_slice(data); - let agg_signature_wrap = BitVectorQC::::sign( + BitVectorQC::::sign( &(), generic_msg, &sk.priv_key, &mut rand::thread_rng(), - ); - match agg_signature_wrap { - Ok(agg_signature) => { - agg_signature - } - Err(_e) => { - unreachable!("TODO is this possible"); - // warn!(?e, "Failed to sign"); - // EncodedSignature(vec![]) - } - } + ) } fn from_private(private_key: &Self::PrivateKey) -> Self { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a52e6c7834..c696298051 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -343,25 +343,29 @@ impl, A: ConsensusApi + timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_membership.get_leader(view), }; - let vote = QuorumVote::::create_signed_vote( + if let Ok(vote) = QuorumVote::::create_signed_vote( QuorumData { leaf_commit: leaf.commit(), }, view, &self.public_key, &self.private_key, - ); - let message = GeneralConsensusMessage::::Vote(vote); - - if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.get_view_number() + 1 - ); - self.event_stream - .publish(HotShotEvent::QuorumVoteSend(vote)) - .await; - return true; + ) { + let message = GeneralConsensusMessage::::Vote(vote); + + if let GeneralConsensusMessage::Vote(vote) = message { + debug!( + "Sending vote to next quorum leader {:?}", + vote.get_view_number() + 1 + ); + self.event_stream + .publish(HotShotEvent::QuorumVoteSend(vote)) + .await; + return true; + } + } else { + error!("Unable to sign quorum vote!"); + return false; } } @@ -414,15 +418,19 @@ impl, A: ConsensusApi + error!("Block payload commitment does not equal parent commitment"); return false; } - let vote = QuorumVote::::create_signed_vote( + if let Ok(vote) = QuorumVote::::create_signed_vote( QuorumData { leaf_commit: leaf.commit(), }, view, &self.public_key, &self.private_key, - ); - GeneralConsensusMessage::::Vote(vote) + ) { + GeneralConsensusMessage::::Vote(vote) + } else { + error!("Unable to sign quorum vote!"); + return false; + } } else { error!( "Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", @@ -1141,12 +1149,15 @@ impl, A: ConsensusApi + return; } - let vote = TimeoutVote::create_signed_vote( + let Ok(vote) = TimeoutVote::create_signed_vote( TimeoutData { view }, view, &self.public_key, &self.private_key, - ); + ) else { + error!("Failed to sign timeout vote!"); + return; + }; self.event_stream .publish(HotShotEvent::TimeoutVoteSend(vote)) @@ -1251,7 +1262,12 @@ impl, A: ConsensusApi + proposer_id: self.api.public_key().clone(), }; - let signature = TYPES::SignatureKey::sign(&self.private_key, leaf.commit().as_ref()); + let Ok(signature) = + TYPES::SignatureKey::sign(&self.private_key, leaf.commit().as_ref()) + else { + error!("Failed to sign leaf!"); + return false; + }; // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. let proposal = QuorumProposal { block_header: leaf.block_header.clone(), diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 776c755925..4206570d18 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -222,14 +222,17 @@ impl, A: ConsensusApi + return None; } // Generate and send vote - let vote = DAVote::create_signed_vote( + let Ok(vote) = DAVote::create_signed_vote( DAData { payload_commit: payload_commitment, }, view, &self.public_key, &self.private_key, - ); + ) else { + error!("Failed to sign DA Vote!"); + return None; + }; // ED Don't think this is necessary? // self.cur_view = view; @@ -377,8 +380,14 @@ impl, A: ConsensusApi + .await; let payload_commitment = payload.commit(); - let signature = - TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()); + let Ok(signature) = + TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()) + else { + // TODO is this correct? + // Should we be doing more? + error!("Failed to sign block payload!"); + return None; + }; // TODO (Keyao) Fix the payload sending and receiving for the DA proposal. // let data: DAProposal = DAProposal { diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 73cebe39d6..e37b4ea222 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -250,25 +250,28 @@ impl, A: ConsensusApi + // TODO (Keyao) Determine and update where to publish VidDisperseSend. // debug!("publishing VID disperse for view {}", *view + 1); - self.event_stream - .publish(HotShotEvent::VidDisperseSend( - Proposal { - data: VidDisperse { - view_number: view + 1, - payload_commitment: payload.commit(), - shares: vid_disperse.shares, - common: vid_disperse.common, + if let Ok(signature) = + TYPES::SignatureKey::sign(&self.private_key, payload.commit().as_ref()) + { + self.event_stream + .publish(HotShotEvent::VidDisperseSend( + Proposal { + data: VidDisperse { + view_number: view + 1, + payload_commitment: payload.commit(), + shares: vid_disperse.shares, + common: vid_disperse.common, + }, + // TODO (Keyao) This is also signed in DA task. + signature, + _pd: PhantomData, }, - // TODO (Keyao) This is also signed in DA task. - signature: TYPES::SignatureKey::sign( - &self.private_key, - payload.commit().as_ref(), - ), - _pd: PhantomData, - }, - self.public_key.clone(), - )) - .await; + self.public_key.clone(), + )) + .await; + } else { + error!("Failed to sign payload for vid disperal!"); + } return None; } HotShotEvent::Shutdown => { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 4628fc1a67..ed1d98cc3d 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -297,14 +297,17 @@ impl, A: ConsensusApi + } // Generate and send vote - let vote = VIDVote::create_signed_vote( + let Ok(vote) = VIDVote::create_signed_vote( VIDData { payload_commit: payload_commitment, }, view, &self.public_key, &self.private_key, - ); + ) else { + error!("Failed to sign VID Vote"); + return None; + }; // ED Don't think this is necessary? // self.cur_view = view; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index c3882111e9..5ec87bb4cb 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -807,7 +807,7 @@ impl, A: ConsensusApi + self.relay = certificate.get_data().relay; } - let vote = ViewSyncCommitVote::::create_signed_vote( + let Ok(vote) = ViewSyncCommitVote::::create_signed_vote( ViewSyncCommitData { relay: certificate.get_data().relay, round: self.next_view, @@ -815,7 +815,10 @@ impl, A: ConsensusApi + self.next_view, &self.public_key, &self.private_key, - ); + ) else { + error!("Failed to sign view sync commit vote!"); + return (None, self); + }; let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { @@ -875,7 +878,7 @@ impl, A: ConsensusApi + self.relay = certificate.get_data().relay; } - let vote = ViewSyncFinalizeVote::::create_signed_vote( + let Ok(vote) = ViewSyncFinalizeVote::::create_signed_vote( ViewSyncFinalizeData { relay: certificate.get_data().relay, round: self.next_view, @@ -883,7 +886,10 @@ impl, A: ConsensusApi + self.next_view, &self.public_key, &self.private_key, - ); + ) else { + error!("Failed to sign view sync finalized vote!"); + return (None, self); + }; let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { @@ -958,7 +964,7 @@ impl, A: ConsensusApi + return (None, self); } - let vote = ViewSyncPreCommitVote::::create_signed_vote( + let Ok(vote) = ViewSyncPreCommitVote::::create_signed_vote( ViewSyncPreCommitData { relay: 0, round: view_number, @@ -966,7 +972,10 @@ impl, A: ConsensusApi + view_number, &self.public_key, &self.private_key, - ); + ) else { + error!("Failed to sign pre commit vote!"); + return (None, self); + }; let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { @@ -1002,7 +1011,7 @@ impl, A: ConsensusApi + self.relay += 1; match self.phase { ViewSyncPhase::None => { - let vote = ViewSyncPreCommitVote::::create_signed_vote( + let Ok(vote) = ViewSyncPreCommitVote::::create_signed_vote( ViewSyncPreCommitData { relay: self.relay, round: self.next_view, @@ -1010,7 +1019,10 @@ impl, A: ConsensusApi + self.next_view, &self.public_key, &self.private_key, - ); + ) else { + error!("Failed to sign vote!"); + return (None, self); + }; let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); @@ -1021,7 +1033,7 @@ impl, A: ConsensusApi + } } ViewSyncPhase::PreCommit => { - let vote = ViewSyncCommitVote::::create_signed_vote( + let Ok(vote) = ViewSyncCommitVote::::create_signed_vote( ViewSyncCommitData { relay: self.relay, round: self.next_view, @@ -1029,7 +1041,10 @@ impl, A: ConsensusApi + self.next_view, &self.public_key, &self.private_key, - ); + ) else { + error!("Failed to sign vote!"); + return (None, self); + }; let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); @@ -1040,7 +1055,7 @@ impl, A: ConsensusApi + } } ViewSyncPhase::Commit => { - let vote = ViewSyncFinalizeVote::::create_signed_vote( + let Ok(vote) = ViewSyncFinalizeVote::::create_signed_vote( ViewSyncFinalizeData { relay: self.relay, round: self.next_view, @@ -1048,7 +1063,10 @@ impl, A: ConsensusApi + self.next_view, &self.public_key, &self.private_key, - ); + ) else { + error!("Failed to sign vote!"); + return (None, self); + }; let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 689aad90c4..1a66b5469c 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -22,7 +22,6 @@ use hotshot_types::{ consensus_api::ConsensusSharedApi, election::Membership, node_implementation::NodeType, - signature_key::EncodedSignature, state::{ConsensusTime, TestableBlock}, }, vote::HasViewNumber, @@ -104,7 +103,10 @@ async fn build_quorum_proposal_and_signature( handle: &SystemContextHandle, private_key: &::PrivateKey, view: u64, -) -> (QuorumProposal, ::PureAssembledSignatureType) { +) -> ( + QuorumProposal, + ::PureAssembledSignatureType, +) { let consensus_lock = handle.get_consensus(); let consensus = consensus_lock.read().await; let api: HotShotConsensusApi = HotShotConsensusApi { @@ -136,7 +138,8 @@ async fn build_quorum_proposal_and_signature( timestamp: 0, proposer_id: *api.public_key(), }; - let signature = ::sign(private_key, leaf.commit().as_ref()); + let signature = ::sign(private_key, leaf.commit().as_ref()) + .expect("Failed to sign leaf commitment!"); let proposal = QuorumProposal:: { block_header, view_number: ViewNumber::new(view), diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 3c861b302f..3787f50cf2 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,9 +1,5 @@ use commit::Committable; -use hotshot::{ - tasks::add_consensus_task, - types::{SignatureKey, SystemContextHandle}, - HotShotConsensusApi, -}; +use hotshot::{tasks::add_consensus_task, types::SystemContextHandle, HotShotConsensusApi}; use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ @@ -75,7 +71,8 @@ async fn build_vote( view, api.public_key(), api.private_key(), - ); + ) + .expect("Failed to create quorum vote"); GeneralConsensusMessage::::Vote(vote) } diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 571c3ef2c7..de9e722dd0 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -41,7 +41,8 @@ async fn test_da_task() { }; let signature = - ::SignatureKey::sign(api.private_key(), block.commit().as_ref()); + ::SignatureKey::sign(api.private_key(), block.commit().as_ref()) + .expect("Faild to sign block payload!"); let proposal = DAProposal { block_payload: block.clone(), view_number: ViewNumber::new(2), @@ -88,7 +89,8 @@ async fn test_da_task() { ViewNumber::new(2), api.public_key(), api.private_key(), - ); + ) + .expect("Failed to sign da vote"); output.insert(HotShotEvent::DAVoteSend(da_vote), 1); output.insert(HotShotEvent::DAProposalRecv(message, pub_key), 1); diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 1cbb10028d..8d4d147369 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -24,7 +24,8 @@ async fn test_network_task() { use hotshot_types::{ block_impl::{VIDBlockPayload, VIDTransaction}, data::VidDisperse, - message::Proposal, traits::node_implementation::NodeType, + message::Proposal, + traits::node_implementation::NodeType, }; async_compatibility_layer::logging::setup_logging(); @@ -50,7 +51,8 @@ async fn test_network_task() { ::SignatureKey::sign( api.private_key(), block.commit().as_ref(), - ); + ) + .expect("Failed to sign block commitment"); let da_proposal = Proposal { data: DAProposal { block_payload: block.clone(), diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 0e0f15465d..a302be85b8 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -45,7 +45,8 @@ async fn test_vid_task() { }; let signature = - ::SignatureKey::sign(api.private_key(), block.commit().as_ref()); + ::SignatureKey::sign(api.private_key(), block.commit().as_ref()) + .expect("Failed to sign block payload!"); let proposal: DAProposal = DAProposal { block_payload: block.clone(), view_number: ViewNumber::new(2), @@ -95,7 +96,8 @@ async fn test_vid_task() { ViewNumber::new(2), api.public_key(), api.private_key(), - ); + ) + .expect("Failed to sign vid vote"); output.insert(HotShotEvent::VidVoteSend(vid_vote), 1); output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 46ed361c03..c740e07600 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -34,7 +34,8 @@ async fn test_view_sync_task() { ::Time::new(5), hotshot_types::traits::consensus_api::ConsensusSharedApi::public_key(&api), hotshot_types::traits::consensus_api::ConsensusSharedApi::private_key(&api), - ); + ) + .unwrap(); tracing::error!("Vote in test is {:?}", vote.clone()); diff --git a/types/src/data.rs b/types/src/data.rs index 4cb7c0cd4c..44e07914f8 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -8,7 +8,7 @@ use crate::{ traits::{ block_contents::BlockHeader, node_implementation::NodeType, - signature_key::{EncodedPublicKey, SignatureKey}, + signature_key::SignatureKey, state::{ConsensusTime, TestableBlock, TestableState}, storage::StoredView, BlockPayload, State, @@ -20,7 +20,6 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, Serializatio use bincode::Options; use commit::{Commitment, Committable}; use derivative::Derivative; -use hotshot_constants::GENESIS_PROPOSER_ID; use hotshot_utils::bincode::bincode_opts; use jf_primitives::pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}; use rand::Rng; diff --git a/types/src/message.rs b/types/src/message.rs index aa74a2ac47..c8e76570d2 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -19,7 +19,6 @@ use crate::{ traits::{ network::{NetworkMsg, ViewMessage}, node_implementation::NodeType, - signature_key::EncodedSignature, }, }; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 944972c77f..48b2520b5e 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -7,10 +7,7 @@ use serde::{Deserialize, Serialize}; use crate::{ data::Leaf, - traits::{ - node_implementation::NodeType, - signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey}, - }, + traits::{node_implementation::NodeType, signature_key::SignatureKey}, vote::{HasViewNumber, Vote}, }; @@ -89,7 +86,10 @@ mod sealed { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct SimpleVote { /// The signature share associated with this vote - pub signature: (TYPES::SignatureKey, ::PureAssembledSignatureType), + pub signature: ( + TYPES::SignatureKey, + ::PureAssembledSignatureType, + ), /// The leaf commitment being voted on. pub data: DATA, /// The view this vote was cast for @@ -124,17 +124,21 @@ impl Vote for SimpleVote SimpleVote { /// Creates and signs a simple vote + /// # Errors + /// If we are unable to sign the data pub fn create_signed_vote( data: DATA, view: TYPES::Time, pub_key: &TYPES::SignatureKey, private_key: &::PrivateKey, - ) -> Self { - let signature = TYPES::SignatureKey::sign(private_key, data.commit().as_ref()); - Self { - signature: (pub_key.clone(), signature), - data, - view_number: view, + ) -> Result::SignError> { + match TYPES::SignatureKey::sign(private_key, data.commit().as_ref()) { + Ok(signature) => Ok(Self { + signature: (pub_key.clone(), signature), + data, + view_number: view, + }), + Err(e) => Err(e), } } } diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 3166d72272..305445fe27 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -101,12 +101,20 @@ pub trait SignatureKey: + Serialize + for<'a> Deserialize<'a>; + /// Type of error that can occur when signing data + type SignError: std::error::Error + Send + Sync; + // Signature type represented as a vec/slice of bytes to let the implementer handle the nuances // of serialization, to avoid Cryptographic pitfalls /// Validate a signature fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool; /// Produce a signature - fn sign(private_key: &Self::PrivateKey, data: &[u8]) -> Self::PureAssembledSignatureType; + /// # Errors + /// If unable to sign the data with the key + fn sign( + private_key: &Self::PrivateKey, + data: &[u8], + ) -> Result; /// Produce a public key from a private key fn from_private(private_key: &Self::PrivateKey) -> Self; /// Serialize a public key to bytes diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index c3c57686bc..eca798d35f 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -1,6 +1,6 @@ //! Abstraction over on-disk storage of node state -use super::{node_implementation::NodeType, signature_key::EncodedPublicKey}; +use super::node_implementation::NodeType; use crate::{ data::Leaf, simple_certificate::QuorumCertificate, traits::BlockPayload, vote::HasViewNumber, }; diff --git a/types/src/vote.rs b/types/src/vote.rs index e634a0ff21..70b23a8a20 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -5,12 +5,10 @@ use std::{ marker::PhantomData, }; -use bincode::Options; use bitvec::vec::BitVec; use commit::Commitment; use either::Either; use ethereum_types::U256; -use hotshot_utils::bincode::bincode_opts; use tracing::error; use crate::{ @@ -18,7 +16,7 @@ use crate::{ traits::{ election::Membership, node_implementation::NodeType, - signature_key::{EncodedPublicKey, EncodedSignature, SignatureKey, StakeTableEntryType}, + signature_key::{SignatureKey, StakeTableEntryType}, }, }; @@ -79,7 +77,11 @@ pub struct VoteAccumulator< CERT: Certificate, > { /// Map of all signatures accumlated so far - pub vote_outcomes: VoteMap2, TYPES::SignatureKey, ::PureAssembledSignatureType>, + pub vote_outcomes: VoteMap2< + Commitment, + TYPES::SignatureKey, + ::PureAssembledSignatureType, + >, /// A list of valid signatures for certificate aggregation pub sig_lists: Vec<::PureAssembledSignatureType>, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check @@ -114,7 +116,8 @@ impl, CERT: Certificate::PureAssembledSignatureType = vote.get_signature(); + let original_signature: ::PureAssembledSignatureType = + vote.get_signature(); let (total_stake_casted, total_vote_map) = self .vote_outcomes @@ -135,10 +138,7 @@ impl, CERT: Certificate= CERT::threshold(membership).into() { // Assemble QC @@ -167,10 +167,4 @@ impl, CERT: Certificate = HashMap< - COMMITMENT, - ( - U256, - BTreeMap, - ), ->; +type VoteMap2 = HashMap)>; From f943da5c848ab3b2cb3e7100ed864b0d50c9e81e Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sat, 25 Nov 2023 05:45:43 -0500 Subject: [PATCH 0442/1393] feat: move genesis pk into signaturekey --- constants/src/lib.rs | 3 --- hotshot-signature-key/src/bn254/bn254_pub.rs | 6 ++++++ hotshot-signature-key/src/lib.rs | 2 +- hotshot/src/traits/storage/memory_storage.rs | 5 +++-- types/src/data.rs | 9 +-------- types/src/traits/signature_key.rs | 4 ++++ 6 files changed, 15 insertions(+), 14 deletions(-) diff --git a/constants/src/lib.rs b/constants/src/lib.rs index 0b1b769650..fbc9a00b1d 100644 --- a/constants/src/lib.rs +++ b/constants/src/lib.rs @@ -1,8 +1,5 @@ //! configurable constants for hotshot -/// the ID of the genesis block proposer -pub const GENESIS_PROPOSER_ID: [u8; 2] = [4, 2]; - /// the number of views to gather information for ahead of time pub const LOOK_AHEAD: u64 = 5; diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 111232baac..3a6c11ab19 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -146,4 +146,10 @@ impl SignatureKey for BLSPubKey { BitVectorQC::::assemble(real_qc_pp, signers, sigs) .expect("this assembling shouldn't fail") } + + fn genesis_proposer_pk() -> Self { + BLSPubKey { + pub_key: unimplemented!(), + } + } } diff --git a/hotshot-signature-key/src/lib.rs b/hotshot-signature-key/src/lib.rs index 2e8f711bab..08a9c6b33e 100644 --- a/hotshot-signature-key/src/lib.rs +++ b/hotshot-signature-key/src/lib.rs @@ -1,5 +1,5 @@ //! This crates offer implementations of quorum certificates used in HotShot. -#![deny(warnings)] +// #![deny(warnings)] #![deny(missing_docs)] pub mod bn254; diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index db59363198..f41bb93d62 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -112,9 +112,10 @@ mod test { use super::*; use commit::Committable; use hotshot_signature_key::bn254::BLSPubKey; + use hotshot_types::traits::signature_key::SignatureKey; use hotshot_types::{ block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, - data::{fake_commitment, genesis_proposer_id, Leaf, ViewNumber}, + data::{fake_commitment, Leaf, ViewNumber}, simple_certificate::QuorumCertificate, traits::{node_implementation::NodeType, state::dummy::DummyState, state::ConsensusTime}, }; @@ -171,7 +172,7 @@ mod test { Some(payload), dummy_leaf_commit, Vec::new(), - genesis_proposer_id(), + <::SignatureKey as SignatureKey>::genesis_proposer_pk(), ) } diff --git a/types/src/data.rs b/types/src/data.rs index 44e07914f8..cb674e7353 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -98,13 +98,6 @@ impl std::ops::Sub for ViewNumber { } } -/// Generate the genesis block proposer ID from the defined constant -#[must_use] -pub fn genesis_proposer_id() -> SIGKEY { - todo!() - // EncodedPublicKey(GENESIS_PROPOSER_ID.to_vec()) -} - /// The `Transaction` type associated with a `State`, as a syntactic shortcut pub type Transaction = <::BlockPayload as BlockPayload>::Transaction; /// `Commitment` to the `Transaction` type associated with a `State`, as a syntactic shortcut @@ -334,7 +327,7 @@ impl Leaf { block_payload: Some(block_payload), rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), - proposer_id: genesis_proposer_id(), + proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), } } diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 305445fe27..8945ed4ed5 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -149,4 +149,8 @@ pub trait SignatureKey: signers: &BitSlice, sigs: &[Self::PureAssembledSignatureType], ) -> Self::QCType; + + /// generates the genesis public key. Meant to be dummy/filler + #[must_use] + fn genesis_proposer_pk() -> Self; } From b46f9ac5b490adf7f13add2dcac45b9a7149b940 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 26 Nov 2023 10:33:10 -0500 Subject: [PATCH 0443/1393] feat: incorporating suggested changes --- hotshot/src/traits/networking/libp2p_network.rs | 11 +++++------ hotshot/src/traits/networking/web_server_network.rs | 3 ++- testing/tests/unreliable_network.rs | 8 ++++---- types/src/traits/network.rs | 9 +++++---- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index e4364c97c2..6c00fef7b3 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -113,6 +113,7 @@ struct Libp2pNetworkInner { /// NOTE: supposed to represent a ViewNumber but we /// haven't made that atomic yet and we prefer lock-free latest_seen_view: Arc, + #[cfg(feature = "hotshot-testing")] /// reliability_config reliability_config: Option>, /// if we're a member of the DA committee or not @@ -346,6 +347,7 @@ impl Libp2pNetwork { // proposals on". We need this because to have consensus info injected we need a working // network already. In the worst case, we send a few lookups we don't need. latest_seen_view: Arc::new(AtomicU64::new(0)), + #[cfg(feature = "hotshot-testing")] reliability_config, is_da, }), @@ -614,10 +616,9 @@ impl ConnectedNetwork for Libp2p .map_err(|_| NetworkError::ShutDown)?; } - // TODO maybe we should lift the metrics mutex up a level and copy the inner pattern - // ask during pair programming - // or maybe channels would be better? + // NOTE: metrics is threadsafe, so clone is fine (and lightweight) let metrics = self.inner.metrics.clone(); + #[cfg(feature = "hotshot-testing")] if let Some(ref config) = &self.inner.reliability_config { let handle = self.inner.handle.clone(); @@ -695,10 +696,8 @@ impl ConnectedNetwork for Libp2p } }; - // TODO maybe we should lift the metrics mutex up a level and copy the inner pattern - // ask during pair programming - // or maybe channels would be better? let metrics = self.inner.metrics.clone(); + #[cfg(feature = "hotshot-testing")] if let Some(ref config) = &self.inner.reliability_config { let handle = self.inner.handle.clone(); diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index f1ca099e91..9eb174df03 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -1277,7 +1277,8 @@ impl, MEMBERSHIP: Membership Duration { Duration::from_millis( - Uniform::new_inclusive(self.delay_low_ms, self.timeout_ms) + Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) .sample(&mut rand::thread_rng()), ) } @@ -580,7 +581,7 @@ impl SynchronousNetwork { #[must_use] pub fn new(timeout: u64, delay_low_ms: u64) -> Self { SynchronousNetwork { - timeout_ms: timeout, + delay_high_ms: timeout, delay_low_ms, } } From 495014d264bbe6aa8ddfc2215c2702e55df2fefb Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 26 Nov 2023 11:10:50 -0500 Subject: [PATCH 0444/1393] chore: fix lints when compiling without hotshot-testing --- hotshot/Cargo.toml | 135 ++++++++--------- hotshot/examples/infra/mod.rs | 1 - hotshot/src/lib.rs | 7 +- .../src/traits/networking/combined_network.rs | 9 +- .../src/traits/networking/libp2p_network.rs | 139 ++++++++++-------- hotshot/src/types/handle.rs | 12 +- 6 files changed, 164 insertions(+), 139 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index f0a62be572..3eb091a32b 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -28,75 +28,76 @@ hotshot-testing = [] # libp2p [[example]] name = "validator-libp2p" -required-features = ["demo", "libp2p/rsa"] +features = ["demo", "libp2p/rsa"] +excluded-features = ["hotshot-testing"] path = "examples/libp2p/validator.rs" -[[example]] -name = "multi-validator-libp2p" -required-features = ["demo", "libp2p/rsa"] -path = "examples/libp2p/multi-validator.rs" - -[[example]] -name = "orchestrator-libp2p" -required-features = ["demo", "libp2p/rsa"] -path = "examples/libp2p/orchestrator.rs" - -[[example]] -name = "all-libp2p" -required-features = ["demo", "libp2p/rsa"] -path = "examples/libp2p/all.rs" - -# webserver -[[example]] -name = "webserver" -required-features = ["demo", "libp2p/rsa"] -path = "examples/webserver/webserver.rs" - -[[example]] -name = "orchestrator-webserver" -required-features = ["demo", "libp2p/rsa"] -path = "examples/webserver/orchestrator.rs" - -[[example]] -name = "validator-webserver" -required-features = ["demo", "libp2p/rsa"] -path = "examples/webserver/validator.rs" - -[[example]] -name = "multi-validator-webserver" -required-features = ["demo", "libp2p/rsa"] -path = "examples/webserver/multi-validator.rs" - -[[example]] -name = "multi-webserver" -required-features = ["demo", "libp2p/rsa"] -path = "examples/webserver/multi-webserver.rs" - -[[example]] -name = "all-webserver" -required-features = ["demo", "libp2p/rsa"] -path = "examples/webserver/all.rs" - -# combined -[[example]] -name = "all-combined" -required-features = ["demo", "libp2p/rsa"] -path = "examples/combined/all.rs" - -[[example]] -name = "multi-validator-combined" -required-features = ["demo", "libp2p/rsa"] -path = "examples/combined/multi-validator.rs" - -[[example]] -name = "validator-combined" -required-features = ["demo", "libp2p/rsa"] -path = "examples/combined/validator.rs" - -[[example]] -name = "orchestrator-combined" -required-features = ["demo", "libp2p/rsa"] -path = "examples/combined/orchestrator.rs" +# [[example]] +# name = "multi-validator-libp2p" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/libp2p/multi-validator.rs" +# +# [[example]] +# name = "orchestrator-libp2p" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/libp2p/orchestrator.rs" +# +# [[example]] +# name = "all-libp2p" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/libp2p/all.rs" +# +# # webserver +# [[example]] +# name = "webserver" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/webserver/webserver.rs" +# +# [[example]] +# name = "orchestrator-webserver" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/webserver/orchestrator.rs" +# +# [[example]] +# name = "validator-webserver" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/webserver/validator.rs" +# +# [[example]] +# name = "multi-validator-webserver" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/webserver/multi-validator.rs" +# +# [[example]] +# name = "multi-webserver" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/webserver/multi-webserver.rs" +# +# [[example]] +# name = "all-webserver" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/webserver/all.rs" +# +# # combined +# [[example]] +# name = "all-combined" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/combined/all.rs" +# +# [[example]] +# name = "multi-validator-combined" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/combined/multi-validator.rs" +# +# [[example]] +# name = "validator-combined" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/combined/validator.rs" +# +# [[example]] +# name = "orchestrator-combined" +# required-features = ["demo", "libp2p/rsa"] +# path = "examples/combined/orchestrator.rs" [dependencies] async-compatibility-layer = { workspace = true } diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 149ebf3848..94013545c0 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -291,7 +291,6 @@ async fn libp2p_network_from_config( // NOTE: this introduces an invariant that the keys are assigned using this indexed // function all_keys, - None, da_keys.clone(), da_keys.contains(&pub_key), ) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index afbc586e1e..5ec6a64be8 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -51,6 +51,9 @@ use hotshot_task::{ }; use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; +#[cfg(feature = "hotshot-testing")] +use hotshot_types::traits::node_implementation::ChannelMaps; + use hotshot_types::{ consensus::{BlockPayloadStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, data::Leaf, @@ -63,7 +66,7 @@ use hotshot_types::{ traits::{ consensus_api::{ConsensusApi, ConsensusSharedApi}, network::{CommunicationChannel, NetworkError}, - node_implementation::{ChannelMaps, NodeType, SendToTasks}, + node_implementation::{NodeType, SendToTasks}, signature_key::SignatureKey, state::ConsensusTime, storage::StoredView, @@ -158,6 +161,7 @@ pub struct SystemContextInner> { /// Channels for sending/recv-ing proposals and votes for quorum and committee exchanges, the /// latter of which is only applicable for sequencing consensus. + #[cfg(feature = "hotshot-testing")] channel_maps: (ChannelMaps, Option>), // global_registry: GlobalRegistry, @@ -242,6 +246,7 @@ impl> SystemContext { let inner: Arc> = Arc::new(SystemContextInner { id: nonce, + #[cfg(feature = "hotshot-testing")] channel_maps: I::new_channel_maps(start_view), consensus, public_key, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 6b7ab6d4b9..ebe4f2d98e 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -20,15 +20,16 @@ use futures::join; use async_compatibility_layer::channel::UnboundedSendError; use hotshot_task::{boxed_sync, BoxSyncFuture}; +#[cfg(feature = "hotshot-testing")] +use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ data::ViewNumber, message::Message, traits::{ election::Membership, network::{ - CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, NetworkReliability, - TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, - ViewMessage, + CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, + TestableChannelImplementation, TransmitType, ViewMessage, }, node_implementation::NodeType, }, @@ -144,6 +145,7 @@ pub struct CombinedNetworks( pub Libp2pNetwork, TYPES::SignatureKey>, ); +#[cfg(feature = "hotshot-testing")] impl TestableNetworkingImplementation for CombinedNetworks { fn generator( expected_node_count: usize, @@ -184,6 +186,7 @@ impl TestableNetworkingImplementation for CombinedNetwor } } +#[cfg(feature = "hotshot-testing")] impl TestableNetworkingImplementation for CombinedCommChannel { fn generator( expected_node_count: usize, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 14ad5d847a..a1de58bf8e 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -2,8 +2,10 @@ //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network use super::NetworkingMetricsValue; +#[cfg(feature = "hotshot-testing")] +use async_compatibility_layer::art::async_block_on; use async_compatibility_layer::{ - art::{async_block_on, async_sleep, async_spawn}, + art::{async_sleep, async_spawn}, channel::{unbounded, UnboundedReceiver, UnboundedSendError, UnboundedSender}, }; use async_lock::RwLock; @@ -12,6 +14,8 @@ use bimap::BiHashMap; use bincode::Options; use hotshot_constants::LOOK_AHEAD; use hotshot_task::{boxed_sync, BoxSyncFuture}; +#[cfg(feature = "hotshot-testing")] +use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ data::ViewNumber, message::{Message, MessageKind}, @@ -19,34 +23,36 @@ use hotshot_types::{ election::Membership, network::{ CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, - NetworkError, NetworkMsg, NetworkReliability, TestableChannelImplementation, - TestableNetworkingImplementation, TransmitType, ViewMessage, + NetworkError, NetworkMsg, TestableChannelImplementation, TransmitType, ViewMessage, }, node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, }, }; + use hotshot_utils::bincode::bincode_opts; use libp2p_identity::PeerId; +#[cfg(feature = "hotshot-testing")] +use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder}; + use libp2p_networking::{ network::{ - MeshParams, NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, - NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, - NetworkNodeType, + NetworkNodeConfig, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeType, }, reexport::Multiaddr, }; use serde::Serialize; use snafu::ResultExt; +#[cfg(feature = "hotshot-testing")] +use std::{collections::HashSet, num::NonZeroUsize, str::FromStr}; + use std::{ - collections::{BTreeSet, HashSet}, + collections::BTreeSet, fmt::Debug, marker::PhantomData, - num::NonZeroUsize, - str::FromStr, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, Arc, @@ -127,6 +133,7 @@ pub struct Libp2pNetwork { inner: Arc>, } +#[cfg(feature = "hotshot-testing")] impl TestableNetworkingImplementation for Libp2pNetwork, TYPES::SignatureKey> where @@ -240,6 +247,7 @@ where num_bootstrap, node_id as usize, keys, + #[cfg(feature = "hotshot-testing")] reliability_config_dup, da.clone(), da.contains(&pubkey), @@ -295,7 +303,7 @@ impl Libp2pNetwork { id: usize, // HACK committee_pks: BTreeSet, - reliability_config: Option>, + #[cfg(feature = "hotshot-testing")] reliability_config: Option>, da_pks: BTreeSet, is_da: bool, ) -> Result, NetworkError> { @@ -622,35 +630,37 @@ impl ConnectedNetwork for Libp2p } // NOTE: metrics is threadsafe, so clone is fine (and lightweight) - let metrics = self.inner.metrics.clone(); #[cfg(feature = "hotshot-testing")] - if let Some(ref config) = &self.inner.reliability_config { - let handle = self.inner.handle.clone(); - - let serialized_msg = bincode_opts() - .serialize(&message) - .context(FailedToSerializeSnafu)?; - let fut = config.clone().chaos_send_msg( - serialized_msg, - Arc::new(move |msg: Vec| { - let topic_2 = topic.clone(); - let handle_2 = handle.clone(); - let metrics_2 = metrics.clone(); - boxed_sync(async move { - match handle_2.gossip_no_serialize(topic_2, msg).await { - Err(e) => { - metrics_2.message_failed_to_send.add(1); - warn!("Failed to broadcast to libp2p: {:?}", e); - } - Ok(()) => { - metrics_2.outgoing_direct_message_count.add(1); + { + let metrics = self.inner.metrics.clone(); + if let Some(ref config) = &self.inner.reliability_config { + let handle = self.inner.handle.clone(); + + let serialized_msg = bincode_opts() + .serialize(&message) + .context(FailedToSerializeSnafu)?; + let fut = config.clone().chaos_send_msg( + serialized_msg, + Arc::new(move |msg: Vec| { + let topic_2 = topic.clone(); + let handle_2 = handle.clone(); + let metrics_2 = metrics.clone(); + boxed_sync(async move { + match handle_2.gossip_no_serialize(topic_2, msg).await { + Err(e) => { + metrics_2.message_failed_to_send.add(1); + warn!("Failed to broadcast to libp2p: {:?}", e); + } + Ok(()) => { + metrics_2.outgoing_direct_message_count.add(1); + } } - } - }) - }), - ); - async_spawn(fut); - return Ok(()); + }) + }), + ); + async_spawn(fut); + return Ok(()); + } } match self.inner.handle.gossip(topic, &message).await { @@ -701,34 +711,36 @@ impl ConnectedNetwork for Libp2p } }; - let metrics = self.inner.metrics.clone(); #[cfg(feature = "hotshot-testing")] - if let Some(ref config) = &self.inner.reliability_config { - let handle = self.inner.handle.clone(); - - let serialized_msg = bincode_opts() - .serialize(&message) - .context(FailedToSerializeSnafu)?; - let fut = config.clone().chaos_send_msg( - serialized_msg, - Arc::new(move |msg: Vec| { - let handle_2 = handle.clone(); - let metrics_2 = metrics.clone(); - boxed_sync(async move { - match handle_2.direct_request_no_serialize(pid, msg).await { - Err(e) => { - metrics_2.message_failed_to_send.add(1); - warn!("Failed to broadcast to libp2p: {:?}", e); - } - Ok(()) => { - metrics_2.outgoing_direct_message_count.add(1); + { + let metrics = self.inner.metrics.clone(); + if let Some(ref config) = &self.inner.reliability_config { + let handle = self.inner.handle.clone(); + + let serialized_msg = bincode_opts() + .serialize(&message) + .context(FailedToSerializeSnafu)?; + let fut = config.clone().chaos_send_msg( + serialized_msg, + Arc::new(move |msg: Vec| { + let handle_2 = handle.clone(); + let metrics_2 = metrics.clone(); + boxed_sync(async move { + match handle_2.direct_request_no_serialize(pid, msg).await { + Err(e) => { + metrics_2.message_failed_to_send.add(1); + warn!("Failed to broadcast to libp2p: {:?}", e); + } + Ok(()) => { + metrics_2.outgoing_direct_message_count.add(1); + } } - } - }) - }), - ); - async_spawn(fut); - return Ok(()); + }) + }), + ); + async_spawn(fut); + return Ok(()); + } } match self.inner.handle.direct_request(pid, &message).await { @@ -832,6 +844,7 @@ impl Libp2pCommChannel { } } +#[cfg(feature = "hotshot-testing")] impl TestableNetworkingImplementation for Libp2pCommChannel where MessageKind: ViewMessage, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 87975429e4..9c576043ca 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -13,15 +13,19 @@ use hotshot_task::{ BoxSyncFuture, }; use hotshot_task_impls::events::HotShotEvent; +#[cfg(feature = "hotshot-testing")] +use hotshot_types::{ + message::{MessageKind, SequencingMessage}, + traits::election::Membership, +}; + use hotshot_types::simple_vote::QuorumData; + use hotshot_types::{ consensus::Consensus, error::HotShotError, event::EventType, - message::{MessageKind, SequencingMessage}, - traits::{ - election::Membership, node_implementation::NodeType, state::ConsensusTime, storage::Storage, - }, + traits::{node_implementation::NodeType, state::ConsensusTime, storage::Storage}, }; use hotshot_types::{data::Leaf, simple_certificate::QuorumCertificate}; use std::sync::Arc; From 66151e7a2ea425a06575dc6b2dfe7ba2603c02f1 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 26 Nov 2023 11:17:00 -0500 Subject: [PATCH 0445/1393] chore: fix lints when compiling normally --- hotshot/Cargo.toml | 135 ++++++++++++++++++++++----------------------- 1 file changed, 67 insertions(+), 68 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 3eb091a32b..f0a62be572 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -28,76 +28,75 @@ hotshot-testing = [] # libp2p [[example]] name = "validator-libp2p" -features = ["demo", "libp2p/rsa"] -excluded-features = ["hotshot-testing"] +required-features = ["demo", "libp2p/rsa"] path = "examples/libp2p/validator.rs" -# [[example]] -# name = "multi-validator-libp2p" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/libp2p/multi-validator.rs" -# -# [[example]] -# name = "orchestrator-libp2p" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/libp2p/orchestrator.rs" -# -# [[example]] -# name = "all-libp2p" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/libp2p/all.rs" -# -# # webserver -# [[example]] -# name = "webserver" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/webserver/webserver.rs" -# -# [[example]] -# name = "orchestrator-webserver" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/webserver/orchestrator.rs" -# -# [[example]] -# name = "validator-webserver" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/webserver/validator.rs" -# -# [[example]] -# name = "multi-validator-webserver" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/webserver/multi-validator.rs" -# -# [[example]] -# name = "multi-webserver" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/webserver/multi-webserver.rs" -# -# [[example]] -# name = "all-webserver" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/webserver/all.rs" -# -# # combined -# [[example]] -# name = "all-combined" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/combined/all.rs" -# -# [[example]] -# name = "multi-validator-combined" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/combined/multi-validator.rs" -# -# [[example]] -# name = "validator-combined" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/combined/validator.rs" -# -# [[example]] -# name = "orchestrator-combined" -# required-features = ["demo", "libp2p/rsa"] -# path = "examples/combined/orchestrator.rs" +[[example]] +name = "multi-validator-libp2p" +required-features = ["demo", "libp2p/rsa"] +path = "examples/libp2p/multi-validator.rs" + +[[example]] +name = "orchestrator-libp2p" +required-features = ["demo", "libp2p/rsa"] +path = "examples/libp2p/orchestrator.rs" + +[[example]] +name = "all-libp2p" +required-features = ["demo", "libp2p/rsa"] +path = "examples/libp2p/all.rs" + +# webserver +[[example]] +name = "webserver" +required-features = ["demo", "libp2p/rsa"] +path = "examples/webserver/webserver.rs" + +[[example]] +name = "orchestrator-webserver" +required-features = ["demo", "libp2p/rsa"] +path = "examples/webserver/orchestrator.rs" + +[[example]] +name = "validator-webserver" +required-features = ["demo", "libp2p/rsa"] +path = "examples/webserver/validator.rs" + +[[example]] +name = "multi-validator-webserver" +required-features = ["demo", "libp2p/rsa"] +path = "examples/webserver/multi-validator.rs" + +[[example]] +name = "multi-webserver" +required-features = ["demo", "libp2p/rsa"] +path = "examples/webserver/multi-webserver.rs" + +[[example]] +name = "all-webserver" +required-features = ["demo", "libp2p/rsa"] +path = "examples/webserver/all.rs" + +# combined +[[example]] +name = "all-combined" +required-features = ["demo", "libp2p/rsa"] +path = "examples/combined/all.rs" + +[[example]] +name = "multi-validator-combined" +required-features = ["demo", "libp2p/rsa"] +path = "examples/combined/multi-validator.rs" + +[[example]] +name = "validator-combined" +required-features = ["demo", "libp2p/rsa"] +path = "examples/combined/validator.rs" + +[[example]] +name = "orchestrator-combined" +required-features = ["demo", "libp2p/rsa"] +path = "examples/combined/orchestrator.rs" [dependencies] async-compatibility-layer = { workspace = true } From 73ba29896aa2a897ed9f296c42c72a422bb1822b Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 27 Nov 2023 07:54:14 -0500 Subject: [PATCH 0446/1393] Combined network tests (#1994) * combined testing harness & initial test * some more tests * use hotshot testing instead of debug assertions * don't filter duplicate transactions * properly shut down between sequential tests * fix timeout tests * fmt * change to 120 second test duration * merge * review changes 1 * pr review changes 2 * tokio lint --- constants/src/lib.rs | 8 +- hotshot/src/lib.rs | 6 + .../src/traits/networking/combined_network.rs | 28 +- .../src/traits/networking/libp2p_network.rs | 18 +- .../src/traits/networking/memory_network.rs | 8 + .../traits/networking/web_server_network.rs | 58 ++++- hotshot/src/types/handle.rs | 5 +- .../src/network/behaviours/dht/mod.rs | 2 - .../src/network/behaviours/direct_message.rs | 12 +- libp2p-networking/src/network/node.rs | 12 +- task-impls/src/consensus.rs | 1 - task-impls/src/transactions.rs | 3 +- testing/Cargo.toml | 1 + testing/src/completion_task.rs | 3 + testing/src/spinning_task.rs | 21 +- testing/src/test_builder.rs | 4 +- testing/src/test_runner.rs | 16 +- testing/tests/combined_network.rs | 245 +++++++++++++++++- testing/tests/timeout.rs | 4 +- types/src/traits/network.rs | 6 + web_server/src/lib.rs | 7 +- 21 files changed, 412 insertions(+), 56 deletions(-) diff --git a/constants/src/lib.rs b/constants/src/lib.rs index 0b1b769650..279ad62890 100644 --- a/constants/src/lib.rs +++ b/constants/src/lib.rs @@ -13,7 +13,11 @@ pub const KAD_DEFAULT_REPUB_INTERVAL_SEC: u64 = 28800; pub const COMBINED_NETWORK_CACHE_SIZE: usize = 1000; /// the number of messages to attempt to send over the primary network before switching to prefer the secondary network -pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 10; +pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; /// the number of messages to send over the secondary network before re-attempting the (presumed down) primary network -pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 10; +pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 5; + +/// the amount of time to wait for async_std tests to spin down the Libp2p listeners +/// and allow future tests to run +pub const ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME: u64 = 4; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index afbc586e1e..a918cdb85c 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -111,6 +111,12 @@ impl> Networks { self.quorum_network.wait_for_ready().await; self.da_network.wait_for_ready().await; } + + /// shut down all networks + pub async fn shut_down_networks(&self) { + self.quorum_network.shut_down().await; + self.da_network.shut_down().await; + } } /// Bundle of all the memberships a consensus instance uses diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index b653197374..6908cd1562 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -12,7 +12,7 @@ use std::{ hash::Hasher, sync::atomic::{AtomicU64, Ordering}, }; -use tracing::error; +use tracing::warn; use async_trait::async_trait; @@ -215,6 +215,14 @@ impl TestableNetworkingImplementation for CombinedCommCh impl CommunicationChannel for CombinedCommChannel { type NETWORK = CombinedNetworks; + fn pause(&self) { + self.networks.0.pause(); + } + + fn resume(&self) { + self.networks.0.resume(); + } + async fn wait_for_ready(&self) { join!( self.primary().wait_for_ready(), @@ -246,9 +254,9 @@ impl CommunicationChannel for CombinedCommChannel ::Membership::get_committee(election, message.get_view_number()); // broadcast optimistically on both networks, but if the primary network is down, skip it - if self.primary_down.load(Ordering::Relaxed) < COMBINED_NETWORK_MIN_PRIMARY_FAILURES - || self.primary_down.load(Ordering::Relaxed) % COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL - == 0 + let primary_down = self.primary_down.load(Ordering::Relaxed); + if primary_down < COMBINED_NETWORK_MIN_PRIMARY_FAILURES + || primary_down % COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL == 0 { // broadcast on the primary network as it is not down, or we are checking if it is back up match self @@ -260,7 +268,7 @@ impl CommunicationChannel for CombinedCommChannel self.primary_down.store(0, Ordering::Relaxed); } Err(e) => { - error!("Error on primary network: {}", e); + warn!("Error on primary network: {}", e); self.primary_down.fetch_add(1, Ordering::Relaxed); } }; @@ -277,9 +285,9 @@ impl CommunicationChannel for CombinedCommChannel recipient: TYPES::SignatureKey, ) -> Result<(), NetworkError> { // DM optimistically on both networks, but if the primary network is down, skip it - if self.primary_down.load(Ordering::Relaxed) < COMBINED_NETWORK_MIN_PRIMARY_FAILURES - || self.primary_down.load(Ordering::Relaxed) % COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL - == 0 + let primary_down = self.primary_down.load(Ordering::Relaxed); + if primary_down < COMBINED_NETWORK_MIN_PRIMARY_FAILURES + || primary_down % COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL == 0 { // message on the primary network as it is not down, or we are checking if it is back up match self @@ -291,7 +299,7 @@ impl CommunicationChannel for CombinedCommChannel self.primary_down.store(0, Ordering::Relaxed); } Err(e) => { - error!("Error on primary network: {}", e); + warn!("Error on primary network: {}", e); self.primary_down.fetch_add(1, Ordering::Relaxed); } }; @@ -309,6 +317,7 @@ impl CommunicationChannel for CombinedCommChannel Self: 'b, { // recv on both networks because nodes may be accessible only on either. discard duplicates + // TODO: improve this algorithm: https://github.com/EspressoSystems/HotShot/issues/2089 let closure = async move { let mut primary_msgs = self.primary().recv_msgs(transmit_type).await?; let mut secondary_msgs = self.secondary().recv_msgs(transmit_type).await?; @@ -317,6 +326,7 @@ impl CommunicationChannel for CombinedCommChannel let mut filtered_msgs = Vec::with_capacity(primary_msgs.len()); for msg in primary_msgs { + // see if we've already seen this message if !self .message_cache .read() diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 81b8f6ab27..00453289de 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -337,7 +337,9 @@ impl Libp2pNetwork { bootstrap_addrs_len, bootstrap_addrs, is_ready: Arc::new(AtomicBool::new(false)), - dht_timeout: Duration::from_secs(30), + // This is optimal for 10-30 nodes. TODO: parameterize this for both tests and examples + // https://github.com/EspressoSystems/HotShot/issues/2088 + dht_timeout: Duration::from_secs(8), is_bootstrapped: Arc::new(AtomicBool::new(false)), metrics, topic_map, @@ -541,7 +543,7 @@ impl Libp2pNetwork { } } } - error!("Network receiever shut down!"); + warn!("Network receiever shut down!"); Ok::<(), NetworkError>(()) }); } @@ -566,11 +568,11 @@ impl ConnectedNetwork for Libp2p Self: 'b, { let closure = async move { - self.inner.node_lookup_send.send(None).await.unwrap(); if self.inner.handle.is_killed() { error!("Called shut down when already shut down! Noop."); } else { - self.inner.handle.shutdown().await.unwrap(); + let _ = self.inner.node_lookup_send.send(None).await; + let _ = self.inner.handle.shutdown().await; } }; boxed_sync(closure) @@ -815,6 +817,14 @@ where { type NETWORK = Libp2pNetwork, TYPES::SignatureKey>; + fn pause(&self) { + unimplemented!("Pausing not implemented for the Libp2p network"); + } + + fn resume(&self) { + unimplemented!("Resuming not implemented for the Libp2p network"); + } + async fn wait_for_ready(&self) { self.0.wait_for_ready().await; } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 4c67e1d6f7..92453920e6 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -496,6 +496,14 @@ where { type NETWORK = MemoryNetwork, TYPES::SignatureKey>; + fn pause(&self) { + unimplemented!("Pausing not implemented for the memory network"); + } + + fn resume(&self) { + unimplemented!("Resuming not implemented for the memory network"); + } + async fn wait_for_ready(&self) { self.0.wait_for_ready().await; } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 48dc1913a1..9594818236 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -38,7 +38,7 @@ use std::{ time::Duration, }; use surf_disco::error::ClientError; -use tracing::{debug, error, info}; +use tracing::{debug, error, info, warn}; /// Represents the communication channel abstraction for the web server #[derive(Clone, Debug)] pub struct WebCommChannel(Arc>); @@ -79,6 +79,18 @@ impl WebServerNetwork { source: WebServerNetworkError::ClientError, }) } + + /// Pauses the underlying network + pub fn pause(&self) { + error!("Pausing CDN network"); + self.inner.running.store(false, Ordering::Relaxed); + } + + /// Resumes the underlying network + pub fn resume(&self) { + error!("Resuming CDN network"); + self.inner.running.store(true, Ordering::Relaxed); + } } /// Represents the core of web server networking @@ -586,6 +598,14 @@ impl CommunicationChannel for WebCommChannel { .await; } + fn pause(&self) { + self.0.pause(); + } + + fn resume(&self) { + self.0.resume(); + } + /// checks if the network is ready /// nonblocking async fn is_ready(&self) -> bool { @@ -701,6 +721,12 @@ impl ConnectedNetwork, TYPES::Signatur message: Message, _recipients: BTreeSet, ) -> Result<(), NetworkError> { + // short circuit if we are shut down + #[cfg(feature = "hotshot-testing")] + if !self.inner.running.load(Ordering::Relaxed) { + return Err(NetworkError::ShutDown); + } + let network_msg = Self::parse_post_message(message); match network_msg { Ok(network_msg) => self.post_message_to_web_server(network_msg).await, @@ -717,6 +743,11 @@ impl ConnectedNetwork, TYPES::Signatur message: Message, _recipient: TYPES::SignatureKey, ) -> Result<(), NetworkError> { + // short circuit if we are shut down + #[cfg(feature = "hotshot-testing")] + if !self.inner.running.load(Ordering::Relaxed) { + return Err(NetworkError::ShutDown); + } let network_msg = Self::parse_post_message(message); match network_msg { Ok(network_msg) => { @@ -769,6 +800,11 @@ impl ConnectedNetwork, TYPES::Signatur #[allow(clippy::too_many_lines)] async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { + #[cfg(feature = "hotshot-testing")] + if !self.inner.running.load(Ordering::Relaxed) { + return; + } + debug!( "Injecting event: {:?} is da {}", event.clone(), @@ -796,7 +832,7 @@ impl ConnectedNetwork, TYPES::Signatur .poll_web_server(receiver, MessagePurpose::Proposal, view_number) .await { - error!( + warn!( "Background receive proposal polling encountered an error: {:?}", e ); @@ -836,7 +872,7 @@ impl ConnectedNetwork, TYPES::Signatur .poll_web_server(receiver, MessagePurpose::VidDisperse, view_number) .await { - error!( + warn!( "Background receive VID disperse polling encountered an error: {:?}", e ); @@ -870,7 +906,7 @@ impl ConnectedNetwork, TYPES::Signatur .poll_web_server(receiver, MessagePurpose::CurrentProposal, 1) .await { - error!( + warn!( "Background receive proposal polling encountered an error: {:?}", e ); @@ -891,7 +927,7 @@ impl ConnectedNetwork, TYPES::Signatur .poll_web_server(receiver, MessagePurpose::Vote, view_number) .await { - error!( + warn!( "Background receive proposal polling encountered an error: {:?}", e ); @@ -928,7 +964,7 @@ impl ConnectedNetwork, TYPES::Signatur .poll_web_server(receiver, MessagePurpose::VidVote, view_number) .await { - error!( + warn!( "Background receive proposal polling encountered an error: {:?}", e ); @@ -966,7 +1002,7 @@ impl ConnectedNetwork, TYPES::Signatur .poll_web_server(receiver, MessagePurpose::DAC, view_number) .await { - error!( + warn!( "Background receive proposal polling encountered an error: {:?}", e ); @@ -1003,7 +1039,7 @@ impl ConnectedNetwork, TYPES::Signatur .poll_web_server(receiver, MessagePurpose::VidCert, view_number) .await { - error!( + warn!( "Background receive proposal polling encountered an error: {:?}", e ); @@ -1069,7 +1105,7 @@ impl ConnectedNetwork, TYPES::Signatur ) .await { - error!( + warn!( "Background receive proposal polling encountered an error: {:?}", e ); @@ -1099,7 +1135,7 @@ impl ConnectedNetwork, TYPES::Signatur ) .await { - error!( + warn!( "Background receive proposal polling encountered an error: {:?}", e ); @@ -1152,7 +1188,7 @@ impl ConnectedNetwork, TYPES::Signatur .poll_web_server(receiver, MessagePurpose::Data, view_number) .await { - error!( + warn!( "Background receive transaction polling encountered an error: {:?}", e ); diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 87975429e4..ad4b44f62d 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -186,7 +186,10 @@ impl + 'static> SystemContextHandl 'a: 'b, Self: 'b, { - boxed_sync(async move { self.registry.shutdown_all().await }) + boxed_sync(async move { + self.hotshot.inner.networks.shut_down_networks().await; + self.registry.shutdown_all().await; + }) } /// return the timeout for a view of the underlying `SystemContext` diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index a22056c907..431f86a610 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -452,7 +452,6 @@ impl DHTBehaviour { result: QueryResult::Bootstrap(Err(e)), .. } => { - warn!("DHT: Bootstrap attempt failed. Retrying shortly."); let BootstrapError::Timeout { num_remaining, .. } = e; if num_remaining.is_none() { error!( @@ -562,7 +561,6 @@ impl NetworkBehaviour for DHTBehaviour { { match self.kadem.bootstrap() { Ok(_) => { - info!("started bootstrap for peer {:?}", self.peer_id); self.bootstrap_state.state = State::Started; } Err(e) => { diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index a50538e4ae..9b1fd1a0c0 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -69,17 +69,19 @@ impl DMBehaviour { } Event::OutboundFailure { peer, - request_id, + request_id: _, error, } => { error!( "outbound failure to send message to {:?} with error {:?}", peer, error ); - if let Some(mut req) = self.in_progress_rr.remove(&request_id) { - req.backoff.start_next(false); - self.failed_rr.push_back(req); - } + // RM TODO: make direct messages have n (and not infinite) retries + // issue: https://github.com/EspressoSystems/HotShot/issues/2003 + // if let Some(mut req) = self.in_progress_rr.remove(&request_id) { + // req.backoff.start_next(false); + // self.failed_rr.push_back(req); + // } } Event::Message { message, peer, .. } => match message { Message::Request { diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 543bd24d71..1853781025 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -15,6 +15,7 @@ use super::{ error::{GossipsubBuildSnafu, GossipsubConfigSnafu, NetworkError, TransportSnafu}, gen_transport, ClientRequest, NetworkDef, NetworkEvent, NetworkEventInternal, NetworkNodeType, }; + use crate::network::{ behaviours::{ dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery}, @@ -32,6 +33,7 @@ use async_compatibility_layer::{ use either::Either; use futures::{select, FutureExt, StreamExt}; use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; +use libp2p::core::transport::ListenerId; use libp2p::{ core::{muxing::StreamMuxerBox, transport::Boxed}, gossipsub::{ @@ -80,6 +82,8 @@ pub struct NetworkNode { swarm: Swarm, /// the configuration parameters of the netework config: NetworkNodeConfig, + /// the listener id we are listening on, if it exists + listener_id: Option, } impl NetworkNode { @@ -101,7 +105,7 @@ impl NetworkNode { &mut self, listen_addr: Multiaddr, ) -> Result { - self.swarm.listen_on(listen_addr).context(TransportSnafu)?; + self.listener_id = Some(self.swarm.listen_on(listen_addr).context(TransportSnafu)?); let addr = loop { if let Some(SwarmEvent::NewListenAddr { address, .. }) = self.swarm.next().await { break address; @@ -303,6 +307,7 @@ impl NetworkNode { peer_id, swarm, config, + listener_id: None, }) } @@ -371,7 +376,10 @@ impl NetworkNode { // NOTE used by test with conductor only } ClientRequest::Shutdown => { - warn!("Libp2p listener shutting down"); + if let Some(listener_id) = self.listener_id { + self.swarm.remove_listener(listener_id); + } + return Ok(true); } ClientRequest::GossipMsg(topic, contents) => { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 22d6b26c1d..ffb43943bb 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -792,7 +792,6 @@ impl, A: ConsensusApi + } #[allow(clippy::cast_precision_loss)] if new_decide_reached { - debug!("about to publish decide"); self.event_stream .publish(HotShotEvent::LeafDecided(leaf_views.clone())) .await; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 73cebe39d6..58736ca556 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -33,7 +33,7 @@ use std::{ sync::Arc, time::Instant, }; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, info, instrument, warn}; /// A type alias for `HashMap, T>` type CommitmentMap = HashMap, T>; @@ -250,6 +250,7 @@ impl, A: ConsensusApi + // TODO (Keyao) Determine and update where to publish VidDisperseSend. // debug!("publishing VID disperse for view {}", *view + 1); + info!("New view: {}", *view); self.event_stream .publish(HotShotEvent::VidDisperseSend( Proposal { diff --git a/testing/Cargo.toml b/testing/Cargo.toml index d4d1fdf97e..7c28b2366c 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -27,6 +27,7 @@ hotshot = { path = "../hotshot", features = [ ], default-features = false } hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } +hotshot-constants = { path = "../constants" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index 9e405519cf..669148682d 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -78,6 +78,9 @@ impl TimeBasedCompletionTaskDescription { async move { match event { GlobalTestEvent::ShutDown => { + for node in &state.handles { + node.handle.clone().shut_down().await; + } (Some(HotShotTaskCompleted::ShutDown), state) } } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 1144dbd2d8..fd00f35bb8 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -15,7 +15,8 @@ use hotshot_task::{ task_impls::{HSTWithEventAndMessage, TaskBuilder}, GeneratedStream, }; -use hotshot_types::traits::node_implementation::NodeType; + +use hotshot_types::traits::{network::CommunicationChannel, node_implementation::NodeType}; use snafu::Snafu; #[derive(Snafu, Debug)] pub struct SpinningTaskErr {} @@ -45,6 +46,10 @@ pub enum UpDown { Up, /// spin the node down Down, + /// spin the node's network up + NetworkUp, + /// spin the node's network down + NetworkDown, } /// denotes a change in node state @@ -52,7 +57,7 @@ pub enum UpDown { pub struct ChangeNode { /// the index of the node pub idx: usize, - /// spin the node up or down + /// spin the node or node's network up or down pub updown: UpDown, } @@ -120,6 +125,18 @@ impl SpinningTaskDescription { node.handle.shut_down().await; } } + UpDown::NetworkUp => { + if let Some(handle) = state.handles.get(idx) { + handle.networks.0.resume(); + handle.networks.1.resume(); + } + } + UpDown::NetworkDown => { + if let Some(handle) = state.handles.get(idx) { + handle.networks.0.pause(); + handle.networks.1.pause(); + } + } } } } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index b31657c15d..33c9c04ace 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -202,9 +202,9 @@ impl TestMetadata { // We assign known_nodes' public key and stake value here rather than read from config file since it's a test. let known_nodes_with_stake = (0..total_nodes) - .map(|node_id| { + .map(|node_id_| { let cur_validator_config: ValidatorConfig = - ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id as u64, 1); + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id_ as u64, 1); cur_validator_config .public_key diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 5ee9c54d59..1f76a13515 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -13,6 +13,7 @@ use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, HotShotTyp use hotshot_task::{ event_stream::ChannelStream, global_registry::GlobalRegistry, task_launcher::TaskRunner, }; +use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::ConsensusMetricsValue, traits::{election::Membership, node_implementation::NodeType}, @@ -29,6 +30,7 @@ use tracing::info; #[derive(Clone)] pub struct Node> { pub node_id: u64, + pub networks: Networks, pub handle: SystemContextHandle, } @@ -140,6 +142,11 @@ where .await; task_runner = task_runner.add_task(id, "Test Overall Safety Task".to_string(), task); + // wait for networks to be ready + for node in &nodes { + node.networks.0.wait_for_ready().await; + } + // Start hotshot for node in nodes { if !late_start_nodes.contains(&node.node_id) { @@ -180,13 +187,20 @@ where let validator_config = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); let hotshot = self - .add_node_with_config(networks, storage, initializer, config, validator_config) + .add_node_with_config( + networks.clone(), + storage, + initializer, + config, + validator_config, + ) .await; if late_start.contains(&node_id) { self.late_start.insert(node_id, hotshot); } else { self.nodes.push(Node { node_id, + networks, handle: hotshot.run_tasks().await, }); } diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index 77a3243407..ec9b253dc4 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -1,14 +1,19 @@ use std::time::Duration; +#[cfg(async_executor_impl = "async-std")] +use hotshot_constants::ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME; + use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, node_types::{CombinedImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, }; +use rand::Rng; use tracing::instrument; -/// web server with libp2p network test +/// A run with both the webserver and libp2p functioning properly #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) @@ -19,6 +24,164 @@ async fn test_combined_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let metadata: TestMetadata = TestMetadata { + timing_data: TimingData { + round_start_delay: 25, + next_view_timeout: 10000, + start_delay: 120000, + + ..Default::default() + }, + overall_safety_properties: OverallSafetyPropertiesDescription { + num_successful_views: 25, + ..Default::default() + }, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(120), + }, + ), + ..TestMetadata::default_multiple_rounds() + }; + + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; + + // async_std needs time to spin down the handler + #[cfg(async_executor_impl = "async-std")] + async_std::task::sleep(Duration::from_secs(ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME)).await; +} + +// A run where the webserver crashes part-way through +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_combined_network_webserver_crash() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata: TestMetadata = TestMetadata { + timing_data: TimingData { + round_start_delay: 25, + next_view_timeout: 10000, + start_delay: 120000, + + ..Default::default() + }, + overall_safety_properties: OverallSafetyPropertiesDescription { + num_successful_views: 35, + ..Default::default() + }, + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(120), + }, + ), + ..TestMetadata::default_multiple_rounds() + }; + + let mut all_nodes = vec![]; + for node in 0..metadata.total_nodes { + all_nodes.push(ChangeNode { + idx: node, + updown: UpDown::NetworkDown, + }); + } + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::new(1, 0), all_nodes)], + }; + + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; + + // async_std needs time to spin down the handler + #[cfg(async_executor_impl = "async-std")] + async_std::task::sleep(Duration::from_secs(ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME)).await; +} + +// A run where the webserver crashes partway through +// and then comes back up +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_combined_network_reup() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata: TestMetadata = TestMetadata { + timing_data: TimingData { + round_start_delay: 25, + next_view_timeout: 10000, + start_delay: 120000, + + ..Default::default() + }, + overall_safety_properties: OverallSafetyPropertiesDescription { + num_successful_views: 35, + ..Default::default() + }, + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(120), + }, + ), + ..TestMetadata::default_multiple_rounds() + }; + + let mut all_down = vec![]; + let mut all_up = vec![]; + for node in 0..metadata.total_nodes { + all_down.push(ChangeNode { + idx: node, + updown: UpDown::NetworkDown, + }); + all_up.push(ChangeNode { + idx: node, + updown: UpDown::NetworkUp, + }); + } + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![ + (Duration::from_millis(500), all_up), + (Duration::from_millis(500), all_down), + ], + }; + + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; + + // async_std needs time to spin down the handler + #[cfg(async_executor_impl = "async-std")] + async_std::task::sleep(Duration::from_secs(ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME)).await; +} + +// A run where half of the nodes disconnect from the webserver +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_combined_network_half_dc() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata: TestMetadata = TestMetadata { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10000, @@ -33,20 +196,60 @@ async fn test_combined_network() { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(1_200_000), + duration: Duration::from_secs(120), }, ), ..TestMetadata::default_multiple_rounds() }; + let mut half = vec![]; + for node in 0..metadata.total_nodes / 2 { + half.push(ChangeNode { + idx: node, + updown: UpDown::NetworkDown, + }); + } + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(Duration::new(1, 0), half)], + }; + metadata .gen_launcher::(0) .launch() .run_test() - .await + .await; + + // async_std needs time to spin down the handler + #[cfg(async_executor_impl = "async-std")] + async_std::task::sleep(Duration::from_secs(ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME)).await; } -// stress test for web server with libp2p +fn generate_random_node_changes(total_nodes: usize) -> Vec<(Duration, Vec)> { + let mut rng = rand::thread_rng(); + let mut node_changes = vec![]; + + for _ in 0..total_nodes * 2 { + let updown = if rng.gen::() { + UpDown::NetworkUp + } else { + UpDown::NetworkDown + }; + + let node_change = ChangeNode { + idx: rng.gen_range(0..total_nodes), + updown, + }; + + let duration = Duration::new(rng.gen_range(1..3), 0); + + node_changes.push((duration, vec![node_change])); + } + + node_changes +} + +// A fuzz test, where random network events take place on all nodes #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) @@ -54,13 +257,41 @@ async fn test_combined_network() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] #[ignore] -async fn test_stress_combined_network() { +async fn test_stress_combined_network_fuzzy() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata::default_stress(); + let mut metadata = TestMetadata { + num_bootstrap_nodes: 10, + total_nodes: 20, + start_nodes: 20, + + timing_data: TimingData { + round_start_delay: 25, + next_view_timeout: 10000, + start_delay: 120000, + + ..Default::default() + }, + + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(120), + }, + ), + ..TestMetadata::default_stress() + }; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: generate_random_node_changes(metadata.total_nodes), + }; + metadata .gen_launcher::(0) .launch() .run_test() - .await + .await; + + // async_std needs time to spin down the handler + #[cfg(async_executor_impl = "async-std")] + async_std::task::sleep(Duration::from_secs(ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME)).await; } diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 645ce0dfab..0a82fe3127 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -95,7 +95,7 @@ async fn test_timeout_libp2p() { ..Default::default() }; let dead_nodes = vec![ChangeNode { - idx: 0, + idx: 5, updown: UpDown::Down, }]; @@ -107,7 +107,7 @@ async fn test_timeout_libp2p() { }; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::from_millis(500), dead_nodes)], + node_changes: vec![(Duration::from_millis(2000), dead_nodes)], }; metadata.completion_task_description = diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 72ead0d2db..1ccdee6314 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -236,6 +236,12 @@ pub trait CommunicationChannel: Clone + Debug + Send + Sync + ' /// into the network async fn wait_for_ready(&self); + /// Pauses the underlying network + fn pause(&self); + + /// Resumes the underlying network + fn resume(&self); + /// checks if the network is ready /// nonblocking async fn is_ready(&self) -> bool; diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index b1e267d982..f7d76d014e 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -5,7 +5,6 @@ use async_compatibility_layer::channel::OneShotReceiver; use async_lock::RwLock; use clap::Args; use futures::FutureExt; -use tracing::error; use hotshot_types::traits::signature_key::{EncodedPublicKey, SignatureKey}; use rand::{distributions::Alphanumeric, rngs::StdRng, thread_rng, Rng, SeedableRng}; @@ -433,7 +432,7 @@ impl WebServerDataSource for WebServerState { } /// Stores a received proposal in the `WebServerState` fn post_proposal(&mut self, view_number: u64, mut proposal: Vec) -> Result<(), Error> { - error!("Received proposal for view {}", view_number); + info!("Received proposal for view {}", view_number); if view_number > self.recent_proposal { self.recent_proposal = view_number; @@ -454,7 +453,7 @@ impl WebServerDataSource for WebServerState { } fn post_vid_disperse(&mut self, view_number: u64, mut disperse: Vec) -> Result<(), Error> { - error!("Received VID disperse for view {}", view_number); + info!("Received VID disperse for view {}", view_number); if view_number > self.recent_vid_disperse { self.recent_vid_disperse = view_number; } @@ -526,7 +525,7 @@ impl WebServerDataSource for WebServerState { view_number: u64, mut certificate: Vec, ) -> Result<(), Error> { - error!("Received VID Certificate for view {}", view_number); + info!("Received VID Certificate for view {}", view_number); // Only keep proposal history for MAX_VIEWS number of view if self.vid_certificates.len() >= MAX_VIEWS { From d8c6c11e92c4f3c57259918027657380b4904f42 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 27 Nov 2023 13:23:51 -0800 Subject: [PATCH 0447/1393] Update PayloadCommitmentStore, use commit_only, add issue links, minor fixes --- hotshot/src/lib.rs | 22 ++++++++---- task-impls/src/consensus.rs | 10 ++++-- task-impls/src/da.rs | 16 +++------ task-impls/src/transactions.rs | 2 +- task-impls/src/vid.rs | 2 +- task/src/task_impls.rs | 1 - types/src/block_impl.rs | 2 ++ types/src/consensus.rs | 55 ++++++++++++++---------------- types/src/data.rs | 2 ++ types/src/traits/block_contents.rs | 5 ++- 10 files changed, 63 insertions(+), 54 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 4089485786..db5d88e91f 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -52,7 +52,9 @@ use hotshot_task::{ use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ - consensus::{BlockPayloadStore, Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, + consensus::{ + Consensus, ConsensusMetricsValue, PayloadCommitmentStore, View, ViewInner, ViewQueue, + }, data::Leaf, error::StorageSnafu, message::{ @@ -67,6 +69,7 @@ use hotshot_types::{ signature_key::SignatureKey, state::ConsensusTime, storage::StoredView, + BlockPayload, }, HotShotConfig, }; @@ -218,12 +221,19 @@ impl> SystemContext { ); let mut saved_leaves = HashMap::new(); - let mut saved_block_payloads = BlockPayloadStore::default(); + let mut saved_payload_commitments = PayloadCommitmentStore::default(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); + let payload_commitment = anchored_leaf.get_payload_commitment(); if let Some(payload) = anchored_leaf.get_block_payload() { - if let Err(e) = saved_block_payloads.insert(payload) { - return Err(HotShotError::BlockError { source: e }); - } + let encoded_txns = match payload.encode() { + // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. + // + Ok(encoded) => encoded.into_iter().collect(), + Err(e) => { + return Err(HotShotError::BlockError { source: e }); + } + }; + saved_payload_commitments.insert(payload_commitment, encoded_txns); } let start_view = anchored_leaf.get_view_number(); @@ -233,7 +243,7 @@ impl> SystemContext { cur_view: start_view, last_decided_view: anchored_leaf.get_view_number(), saved_leaves, - saved_block_payloads, + saved_payload_commitments, // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 locked_view: anchored_leaf.get_view_number(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 36b4ce6d95..29e424d94e 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -735,11 +735,15 @@ impl, A: ConsensusApi + // If the block payload is available for this leaf, include it in // the leaf chain that we send to the client. - if let Some(payload) = consensus - .saved_block_payloads + if let Some(encoded_txns) = consensus + .saved_payload_commitments .get(leaf.get_payload_commitment()) { - if let Err(e) = leaf.fill_block_payload(payload.clone()) { + let payload = BlockPayload::from_bytes( + encoded_txns.clone().into_iter(), + leaf.get_block_header().metadata(), + ); + if let Err(e) = leaf.fill_block_payload(payload) { error!( "Saved block payload and commitment don't match: {:?}", e diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index e5a0034f4d..c31a540974 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -18,7 +18,7 @@ use hotshot_types::{ message::Proposal, simple_vote::{DAData, DAVote}, traits::{ - block_contents::{vid_commitment, BlockPayload}, + block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, network::{CommunicationChannel, ConsensusIntentEvent}, @@ -242,16 +242,10 @@ impl, A: ConsensusApi + view_inner: ViewInner::DA { payload_commitment }, }); - // Record the block payload we have promised to make available. - if let Err(e) = consensus - .saved_block_payloads - .insert(BlockPayload::from_bytes( - proposal.data.encoded_transactions.into_iter(), - proposal.data.metadata, - )) - { - error!("Failed to build the block payload: {:?}.", e); - } + // Record the block payload commitment we have promised to make available. + consensus + .saved_payload_commitments + .insert(payload_commitment, proposal.data.encoded_transactions); } HotShotEvent::DAVoteRecv(vote) => { debug!("DA vote recv, Main Task {:?}", vote.get_view_number(),); diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 6d5014a023..6fa0d69b85 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -239,7 +239,7 @@ impl, A: ConsensusApi + // https://github.com/EspressoSystems/HotShot/issues/1858 self.event_stream .publish(HotShotEvent::BlockReady( - encoded_transactions.into_iter().collect::>(), + encoded_transactions, metadata, view + 1, )) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 1a2dca56c7..af787c629f 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -327,7 +327,7 @@ impl, A: ConsensusApi + // Record the block we have promised to make available. // TODO https://github.com/EspressoSystems/HotShot/issues/1692 - // consensus.saved_block_payloads.insert(proposal.data.block_payload); + // consensus.saved_payload_commitments.insert(proposal.data.block_payload); } HotShotEvent::VidCertRecv(cert) => { self.network diff --git a/task/src/task_impls.rs b/task/src/task_impls.rs index 768e011775..057717057b 100644 --- a/task/src/task_impls.rs +++ b/task/src/task_impls.rs @@ -299,7 +299,6 @@ pub mod test { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[allow(clippy::should_panic_without_expect)] #[should_panic] async fn test_init_with_event_stream() { setup_logging(); diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index d11b395d87..03b68ba188 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -217,4 +217,6 @@ impl BlockHeader for VIDBlockHeader { fn payload_commitment(&self) -> VidCommitment { self.payload_commitment } + + fn metadata(&self) -> ::Metadata {} } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 4a2c96396c..85934ddba0 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -11,10 +11,8 @@ use crate::{ error::HotShotError, simple_certificate::QuorumCertificate, traits::{ - block_contents::vid_commitment, metrics::{Counter, Gauge, Histogram, Label, Metrics}, node_implementation::NodeType, - BlockPayload, }, utils::Terminator, }; @@ -49,10 +47,11 @@ pub struct Consensus { /// - includes the MOST RECENT decided leaf pub saved_leaves: CommitmentMap>, - /// Saved block payloads + /// Saved block payload commitments. /// - /// Contains the block payload for every leaf in `saved_leaves` if that payload is available. - pub saved_block_payloads: BlockPayloadStore, + /// Contains the block payload commitment and encoded transactions for every leaf in + /// `saved_leaves` if that payload is available. + pub saved_payload_commitments: PayloadCommitmentStore, /// The `locked_qc` view number pub locked_view: TYPES::Time, @@ -298,7 +297,7 @@ impl Consensus { } /// garbage collects based on state change - /// right now, this removes from both the `saved_block_payloads` + /// right now, this removes from both the `saved_payload_commitments` /// and `state_map` fields of `Consensus` /// # Panics /// On inconsistent stored entries @@ -323,15 +322,15 @@ impl Consensus { self.state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_payload_commitment()) - .for_each(|block| { - self.saved_block_payloads.remove(block); + .for_each(|payload_commitment| { + self.saved_payload_commitments.remove(payload_commitment); }); self.state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_leaf_commitment()) .for_each(|leaf| { if let Some(removed) = self.saved_leaves.remove(&leaf) { - self.saved_block_payloads + self.saved_payload_commitments .remove(removed.get_payload_commitment()); } }); @@ -353,7 +352,7 @@ impl Consensus { } } -/// Mapping from block payload commitments to the payloads. +/// Mapping from block payload commitments to the encoded transactions. /// /// Entries in this mapping are reference-counted, so multiple consensus objects can refer to the /// same block, and the block will only be deleted after _all_ such objects are garbage collected. @@ -361,50 +360,46 @@ impl Consensus { /// before all but one branch are ultimately garbage collected. #[derive(Clone, Debug, Derivative)] #[derivative(Default(bound = ""))] -pub struct BlockPayloadStore(HashMap); +pub struct PayloadCommitmentStore(HashMap, u64)>); -impl BlockPayloadStore { +impl PayloadCommitmentStore { /// Save the payload commitment for later retrieval. /// /// After calling this function, and before the corresponding call to [`remove`](Self::remove), - /// `self.get(payload_commitment)` will return `Some(payload)`. + /// `self.get(payload_commitment)` will return `Some(encoded_transactions)`. /// /// This function will increment a reference count on the saved payload commitment, so that /// multiple calls to [`insert`](Self::insert) for the same payload commitment result in /// multiple owning references to the payload commitment. [`remove`](Self::remove) must be /// called once for each reference before the payload commitment will be deallocated. - /// - /// # Errors - /// If the transaction length conversion fails. - pub fn insert(&mut self, payload: PAYLOAD) -> Result<(), PAYLOAD::Error> { + pub fn insert(&mut self, payload_commitment: VidCommitment, encoded_transactions: Vec) { self.0 - .entry(vid_commitment(payload.clone().encode()?.collect())) + .entry(payload_commitment) .and_modify(|(_, refcount)| *refcount += 1) - .or_insert((payload, 1)); - Ok(()) + .or_insert((encoded_transactions, 1)); } - /// Get a saved block payload, if available. + /// Get the saved encoded transactions, if available. /// - /// If a payload has been saved with [`insert`](Self::insert), this function will retrieve it. - /// It may return [`None`] if a block with the given commitment has not been saved or if the - /// block has been dropped with [`remove`](Self::remove). + /// If the encoded transactions has been saved with [`insert`](Self::insert), this function + /// will retrieve it. It may return [`None`] if a block with the given commitment has not been + /// saved or if the block has been dropped with [`remove`](Self::remove). #[must_use] - pub fn get(&self, payload_commitment: VidCommitment) -> Option<&PAYLOAD> { - self.0.get(&payload_commitment).map(|(payload, _)| payload) + pub fn get(&self, payload_commitment: VidCommitment) -> Option<&Vec> { + self.0.get(&payload_commitment).map(|(encoded, _)| encoded) } - /// Drop a reference to a saved block payload. + /// Drop a reference to the saved encoded transactions. /// /// If the set exists and this call drops the last reference to it, the set will be returned, /// Otherwise, the return value is [`None`]. - pub fn remove(&mut self, payload_commitment: VidCommitment) -> Option { + pub fn remove(&mut self, payload_commitment: VidCommitment) -> Option> { if let Entry::Occupied(mut e) = self.0.entry(payload_commitment) { let (_, refcount) = e.get_mut(); *refcount -= 1; if *refcount == 0 { - let (payload, _) = e.remove(); - return Some(payload); + let (encoded, _) = e.remove(); + return Some(encoded); } } None diff --git a/types/src/data.rs b/types/src/data.rs index 1d10340162..a9069e654a 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -376,6 +376,8 @@ impl Leaf { block_payload: TYPES::BlockPayload, ) -> Result<(), BlockError> { let encoded_txns = match block_payload.encode() { + // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. + // Ok(encoded) => encoded.into_iter().collect(), Err(_) => return Err(BlockError::InvalidTransactionLength), }; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 452266e627..822f0b6010 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -90,7 +90,7 @@ pub fn vid_commitment(encoded_transactions: Vec) -> let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, srs).unwrap(); - vid.disperse(encoded_transactions).unwrap().commit + vid.commit_only(encoded_transactions).unwrap() } /// Header of a block, which commits to a [`BlockPayload`]. @@ -119,4 +119,7 @@ pub trait BlockHeader: /// Get the payload commitment. fn payload_commitment(&self) -> VidCommitment; + + /// Get the metadata. + fn metadata(&self) -> ::Metadata; } From e69921bcf2a1b97f5c198d04f787b9eb203838f0 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 27 Nov 2023 16:58:15 -0500 Subject: [PATCH 0448/1393] remove vote task from consensus --- task-impls/src/consensus.rs | 329 ++++-------------------------------- task-impls/src/vote.rs | 16 +- 2 files changed, 40 insertions(+), 305 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index d5a86712c1..ddeae21e1b 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,19 +1,19 @@ -use crate::events::HotShotEvent; +use crate::{ + events::HotShotEvent, + vote::{spawn_vote_accumulator, AccumulatorInfo}, +}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use bitvec::prelude::*; use commit::{Commitment, Committable}; use core::time::Duration; -use either::Either; -use futures::FutureExt; use hotshot_constants::LOOK_AHEAD; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, - task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEvent, TaskBuilder}, + task::{HotShotTaskCompleted, TS}, + task_impls::HSTWithEvent, }; use hotshot_types::{ consensus::{Consensus, View}, @@ -33,7 +33,7 @@ use hotshot_types::{ BlockPayload, }, utils::{Terminator, ViewInner}, - vote::{Certificate, HasViewNumber, VoteAccumulator}, + vote::{Certificate, HasViewNumber}, }; use tracing::warn; @@ -129,145 +129,6 @@ pub struct ConsensusTaskState< pub id: u64, } -// /// State for the vote collection task. This handles the building of a QC from a votes received -// pub struct VoteCollectionTaskState> { -// /// Network for all nodes -// pub quorum_network: Arc, -// /// Membership for Timeout votes/certs -// pub timeout_membership: Arc, -// /// Membership for Quorum Certs/votes -// pub quorum_membership: Arc, - -// #[allow(clippy::type_complexity)] -// /// Accumulator for votes -// pub accumulator: Either< -// VoteAccumulator, QuorumCertificate>, -// QuorumCertificate, -// >, - -// /// Accumulator for votes -// #[allow(clippy::type_complexity)] -// pub timeout_accumulator: Either< -// VoteAccumulator, TimeoutCertificate>, -// TimeoutCertificate, -// >, -// /// View which this vote collection task is collecting votes in -// pub cur_view: TYPES::Time, -// /// The event stream shared by all tasks -// pub event_stream: ChannelStream>, -// /// Node id -// pub id: u64, -// } - -// impl> TS for VoteCollectionTaskState {} - -// #[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "Quorum Vote Collection Task", level = "error")] - -// async fn vote_handle>( -// mut state: VoteCollectionTaskState, -// event: HotShotEvent, -// ) -> ( -// std::option::Option, -// VoteCollectionTaskState, -// ) { -// match event { -// HotShotEvent::QuorumVoteRecv(vote) => { -// // For the case where we receive votes after we've made a certificate -// if state.accumulator.is_right() { -// return (None, state); -// } - -// if vote.get_view_number() != state.cur_view { -// error!( -// "Vote view does not match! vote view is {} current view is {}", -// *vote.get_view_number(), -// *state.cur_view -// ); -// return (None, state); -// } - -// let accumulator = state.accumulator.left().unwrap(); - -// match accumulator.accumulate(&vote, &state.quorum_membership) { -// Either::Left(acc) => { -// state.accumulator = Either::Left(acc); -// return (None, state); -// } -// Either::Right(qc) => { -// debug!("QCFormed! {:?}", qc.view_number); -// state -// .event_stream -// .publish(HotShotEvent::QCFormed(either::Left(qc.clone()))) -// .await; -// state.accumulator = Either::Right(qc.clone()); - -// // No longer need to poll for votes -// state -// .quorum_network -// .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( -// *qc.view_number, -// )) -// .await; - -// return (Some(HotShotTaskCompleted::ShutDown), state); -// } -// } -// } -// // TODO: Code below is redundant of code above; can be fixed -// // during exchange refactor -// // https://github.com/EspressoSystems/HotShot/issues/1799 -// HotShotEvent::TimeoutVoteRecv(vote) => { -// debug!("Received timeout vote for view {}", *vote.get_view_number()); -// if state.timeout_accumulator.is_right() { -// return (None, state); -// } - -// if vote.get_view_number() != state.cur_view { -// error!( -// "Vote view does not match! vote view is {} current view is {}", -// *vote.get_view_number(), -// *state.cur_view -// ); -// return (None, state); -// } - -// let accumulator = state.timeout_accumulator.left().unwrap(); - -// match accumulator.accumulate(&vote, &state.timeout_membership) { -// Either::Left(acc) => { -// state.timeout_accumulator = Either::Left(acc); -// return (None, state); -// } -// Either::Right(qc) => { -// debug!("QCFormed! {:?}", qc.view_number); -// state -// .event_stream -// .publish(HotShotEvent::QCFormed(either::Right(qc.clone()))) -// .await; -// state.timeout_accumulator = Either::Right(qc.clone()); - -// // No longer need to poll for votes -// state -// .quorum_network -// .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( -// *qc.view_number, -// )) -// .await; - -// return (Some(HotShotTaskCompleted::ShutDown), state); -// } -// } -// } -// HotShotEvent::Shutdown => { -// return (Some(HotShotTaskCompleted::ShutDown), state); -// } -// _ => { -// error!("Unexpected event"); -// } -// } -// (None, state) -// } - impl, A: ConsensusApi + 'static> ConsensusTaskState { @@ -859,27 +720,9 @@ impl, A: ConsensusApi + self.da_certs.remove(&time); } } - HotShotEvent::QuorumVoteRecv(vote) => { + HotShotEvent::QuorumVoteRecv(ref vote) => { debug!("Received quroum vote: {:?}", vote.get_view_number()); - if self - .quorum_membership - .get_leader(vote.get_view_number() + 1) - != self.public_key - { - error!( - "We are not the leader for view {} are we the leader for view + 1? {}", - *vote.get_view_number() + 1, - self.quorum_membership - .get_leader(vote.get_view_number() + 2) - == self.public_key - ); - return; - } - - let handle_event = HandleEvent(Arc::new(move |event, state| { - async move { vote_handle(state, event).await }.boxed() - })); let collection_view = if let Some((collection_view, collection_task, _)) = &self.vote_collector { if vote.get_view_number() > *collection_view { @@ -892,86 +735,31 @@ impl, A: ConsensusApi + }; if vote.get_view_number() > collection_view { - // Todo check if we are the leader - let new_accumulator = VoteAccumulator { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.quorum_membership.total_nodes()], - phantom: PhantomData, - }; - - let accumulator = - new_accumulator.accumulate(&vote, self.quorum_membership.as_ref()); - - // TODO Create default functions for accumulators - // https://github.com/EspressoSystems/HotShot/issues/1797 - let timeout_accumulator = VoteAccumulator { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.timeout_membership.total_nodes()], - phantom: PhantomData, - }; - - let state = VoteCollectionTaskState { - quorum_network: self.quorum_network.clone(), - quorum_membership: self.quorum_membership.clone(), - timeout_membership: self.timeout_membership.clone(), - accumulator, - timeout_accumulator: either::Left(timeout_accumulator), - cur_view: vote.get_view_number(), + debug!("Starting vote handle for view {:?}", vote.get_view_number()); + let info = AccumulatorInfo { + public_key: self.public_key.clone(), + membership: self.quorum_membership.clone(), + view: vote.get_view_number(), event_stream: self.event_stream.clone(), id: self.id, + registry: self.registry.clone(), }; let name = "Quorum Vote Collection"; - let filter = FilterEvent(Arc::new(|event| { - matches!( - event, - HotShotEvent::QuorumVoteRecv(_) | HotShotEvent::TimeoutVoteRecv(_) - ) - })); - - let builder = - TaskBuilder::>::new(name.to_string()) - .register_event_stream(self.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(state) - .register_event_handler(handle_event); - let id = builder.get_task_id().unwrap(); - let stream_id = builder.get_stream_id().unwrap(); - - self.vote_collector = Some((vote.get_view_number(), id, stream_id)); - - let _task = async_spawn(async move { - VoteCollectionTypes::build(builder).launch().await; - }); - debug!("Starting vote handle for view {:?}", vote.get_view_number()); + self.vote_collector = spawn_vote_accumulator::< + TYPES, + QuorumVote, + QuorumCertificate, + >( + &info, vote.clone(), event, name.to_string() + ) + .await; } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream - .direct_message(stream_id, HotShotEvent::QuorumVoteRecv(vote)) + .direct_message(stream_id, HotShotEvent::QuorumVoteRecv(vote.clone())) .await; } } - HotShotEvent::TimeoutVoteRecv(vote) => { - if self - .timeout_membership - .get_leader(vote.get_view_number() + 1) - != self.public_key - { - error!( - "We are not the leader for view {} are we the leader for view + 1? {}", - *vote.get_view_number() + 1, - self.timeout_membership - .get_leader(vote.get_view_number() + 2) - == self.public_key - ); - return; - } - - let handle_event = HandleEvent(Arc::new(move |event, state| { - async move { vote_handle(state, event).await }.boxed() - })); + HotShotEvent::TimeoutVoteRecv(ref vote) => { let collection_view = if let Some((collection_view, collection_task, _)) = &self.vote_collector { if vote.get_view_number() > *collection_view { @@ -984,64 +772,21 @@ impl, A: ConsensusApi + }; if vote.get_view_number() > collection_view { - // Todo check if we are the leader - let new_accumulator = VoteAccumulator { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.timeout_membership.total_nodes()], - phantom: PhantomData, - }; - - let timeout_accumulator = - new_accumulator.accumulate(&vote, self.quorum_membership.as_ref()); - - let quorum_accumulator = VoteAccumulator { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.quorum_membership.total_nodes()], - phantom: PhantomData, - }; - - // self.timeout_accumulator = accumulator; - - let state = VoteCollectionTaskState { - quorum_network: self.quorum_network.clone(), - quorum_membership: self.quorum_membership.clone(), - timeout_membership: self.timeout_membership.clone(), - accumulator: either::Left(quorum_accumulator), - timeout_accumulator, - cur_view: vote.get_view_number(), + debug!("Starting vote handle for view {:?}", vote.get_view_number()); + let info = AccumulatorInfo { + public_key: self.public_key.clone(), + membership: self.timeout_membership.clone(), + view: vote.get_view_number(), event_stream: self.event_stream.clone(), id: self.id, + registry: self.registry.clone(), }; let name = "Quorum Vote Collection"; - let filter = FilterEvent(Arc::new(|event| { - matches!( - event, - HotShotEvent::QuorumVoteRecv(_) | HotShotEvent::TimeoutVoteRecv(_) - ) - })); - - let builder = - TaskBuilder::>::new(name.to_string()) - .register_event_stream(self.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(state) - .register_event_handler(handle_event); - let id = builder.get_task_id().unwrap(); - let stream_id = builder.get_stream_id().unwrap(); - - self.vote_collector = Some((vote.get_view_number(), id, stream_id)); - - let _task = async_spawn(async move { - VoteCollectionTypes::build(builder).launch().await; - }); - debug!("Starting vote handle for view {:?}", vote.get_view_number()); + self.vote_collector = + spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream - .direct_message(stream_id, HotShotEvent::TimeoutVoteRecv(vote)) + .direct_message(stream_id, HotShotEvent::TimeoutVoteRecv(vote.clone())) .await; } } @@ -1289,14 +1034,6 @@ impl, A: ConsensusApi> T { } -/// Type allias for consensus' vote collection task -pub type VoteCollectionTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - VoteCollectionTaskState, ->; - /// Type alias for Consensus task pub type ConsensusTaskTypes = HSTWithEvent< ConsensusTaskError, diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 1700ff26f6..1b5ab7baae 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -41,7 +41,7 @@ pub struct VoteCollectionTaskState< pub public_key: TYPES::SignatureKey, /// Membership for voting - pub membership: TYPES::Membership, + pub membership: Arc, /// accumulator handles aggregating the votes pub accumulator: Option>, @@ -158,9 +158,9 @@ where /// Info needed to create a vote accumulator task #[allow(missing_docs)] -pub struct AccumulatorInfo<'a, TYPES: NodeType> { +pub struct AccumulatorInfo { pub public_key: TYPES::SignatureKey, - pub membership: &'a TYPES::Membership, + pub membership: Arc, pub view: TYPES::Time, pub event_stream: ChannelStream>, pub id: u64, @@ -171,28 +171,26 @@ pub struct AccumulatorInfo<'a, TYPES: NodeType> { /// # Panics /// Calls unwrap but should never panic. pub async fn spawn_vote_accumulator( - info: &AccumulatorInfo<'_, TYPES>, + info: &AccumulatorInfo, vote: VOTE, event: HotShotEvent, name: String, -) -> Option<(TYPES::Time, usize, usize> +) -> Option<(TYPES::Time, usize, usize)> where TYPES: NodeType, VOTE: Vote - + AggregatableVote + AggregatableVote + std::marker::Send + std::marker::Sync + 'static, CERT: Certificate + Debug - + AggregatableVote + std::marker::Send + std::marker::Sync + 'static, VoteCollectionTaskState: HandleVoteEvent, { - if vote.get_leader(info.membership) != info.public_key { + if vote.get_leader(info.membership.as_ref()) != info.public_key { return None; } @@ -247,7 +245,7 @@ where .register_event_handler(relay_handle_event); let event_stream_id = builder.get_stream_id().unwrap(); - let id = builder.get_task_id().unwrap() + let id = builder.get_task_id().unwrap(); let _task = async_spawn(async move { VoteTaskStateTypes::build(builder).launch().await }); From b361fa9e13bb8a5db8cf155d9c5281182b410749 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 27 Nov 2023 15:05:05 -0800 Subject: [PATCH 0449/1393] Fix clippy --- task/src/task_impls.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/task/src/task_impls.rs b/task/src/task_impls.rs index 057717057b..768e011775 100644 --- a/task/src/task_impls.rs +++ b/task/src/task_impls.rs @@ -299,6 +299,7 @@ pub mod test { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[allow(clippy::should_panic_without_expect)] #[should_panic] async fn test_init_with_event_stream() { setup_logging(); From beacacf9606474454523cc8bf54b6c6ea1b9b1c9 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 27 Nov 2023 16:41:57 -0800 Subject: [PATCH 0450/1393] Rename the payload store --- hotshot/src/lib.rs | 10 ++++------ task-impls/src/consensus.rs | 5 ++--- task-impls/src/da.rs | 4 ++-- task-impls/src/vid.rs | 2 +- types/src/consensus.rs | 20 +++++++++----------- 5 files changed, 18 insertions(+), 23 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index db5d88e91f..06d20bed2b 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -52,9 +52,7 @@ use hotshot_task::{ use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ - consensus::{ - Consensus, ConsensusMetricsValue, PayloadCommitmentStore, View, ViewInner, ViewQueue, - }, + consensus::{Consensus, ConsensusMetricsValue, PayloadStore, View, ViewInner, ViewQueue}, data::Leaf, error::StorageSnafu, message::{ @@ -221,7 +219,7 @@ impl> SystemContext { ); let mut saved_leaves = HashMap::new(); - let mut saved_payload_commitments = PayloadCommitmentStore::default(); + let mut saved_payloads = PayloadStore::default(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); let payload_commitment = anchored_leaf.get_payload_commitment(); if let Some(payload) = anchored_leaf.get_block_payload() { @@ -233,7 +231,7 @@ impl> SystemContext { return Err(HotShotError::BlockError { source: e }); } }; - saved_payload_commitments.insert(payload_commitment, encoded_txns); + saved_payloads.insert(payload_commitment, encoded_txns); } let start_view = anchored_leaf.get_view_number(); @@ -243,7 +241,7 @@ impl> SystemContext { cur_view: start_view, last_decided_view: anchored_leaf.get_view_number(), saved_leaves, - saved_payload_commitments, + saved_payloads, // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 locked_view: anchored_leaf.get_view_number(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 29e424d94e..b6ead843b6 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -735,9 +735,8 @@ impl, A: ConsensusApi + // If the block payload is available for this leaf, include it in // the leaf chain that we send to the client. - if let Some(encoded_txns) = consensus - .saved_payload_commitments - .get(leaf.get_payload_commitment()) + if let Some(encoded_txns) = + consensus.saved_payloads.get(leaf.get_payload_commitment()) { let payload = BlockPayload::from_bytes( encoded_txns.clone().into_iter(), diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index c31a540974..a1041cd2f8 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -242,9 +242,9 @@ impl, A: ConsensusApi + view_inner: ViewInner::DA { payload_commitment }, }); - // Record the block payload commitment we have promised to make available. + // Record the payload we have promised to make available. consensus - .saved_payload_commitments + .saved_payloads .insert(payload_commitment, proposal.data.encoded_transactions); } HotShotEvent::DAVoteRecv(vote) => { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index af787c629f..774cd331cd 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -327,7 +327,7 @@ impl, A: ConsensusApi + // Record the block we have promised to make available. // TODO https://github.com/EspressoSystems/HotShot/issues/1692 - // consensus.saved_payload_commitments.insert(proposal.data.block_payload); + // consensus.saved_payloads.insert(proposal.data.block_payload); } HotShotEvent::VidCertRecv(cert) => { self.network diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 85934ddba0..351b834dfb 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -47,11 +47,11 @@ pub struct Consensus { /// - includes the MOST RECENT decided leaf pub saved_leaves: CommitmentMap>, - /// Saved block payload commitments. + /// Saved payloads. /// /// Contains the block payload commitment and encoded transactions for every leaf in /// `saved_leaves` if that payload is available. - pub saved_payload_commitments: PayloadCommitmentStore, + pub saved_payloads: PayloadStore, /// The `locked_qc` view number pub locked_view: TYPES::Time, @@ -296,9 +296,8 @@ impl Consensus { Err(HotShotError::LeafNotFound {}) } - /// garbage collects based on state change - /// right now, this removes from both the `saved_payload_commitments` - /// and `state_map` fields of `Consensus` + /// Garbage collects based on state change right now, this removes from both the + /// `saved_payloads` and `state_map` fields of `Consensus`. /// # Panics /// On inconsistent stored entries #[allow(clippy::unused_async)] // async for API compatibility reasons @@ -323,15 +322,14 @@ impl Consensus { .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_payload_commitment()) .for_each(|payload_commitment| { - self.saved_payload_commitments.remove(payload_commitment); + self.saved_payloads.remove(payload_commitment); }); self.state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_leaf_commitment()) .for_each(|leaf| { if let Some(removed) = self.saved_leaves.remove(&leaf) { - self.saved_payload_commitments - .remove(removed.get_payload_commitment()); + self.saved_payloads.remove(removed.get_payload_commitment()); } }); self.state_map = self.state_map.split_off(&new_anchor_view); @@ -360,10 +358,10 @@ impl Consensus { /// before all but one branch are ultimately garbage collected. #[derive(Clone, Debug, Derivative)] #[derivative(Default(bound = ""))] -pub struct PayloadCommitmentStore(HashMap, u64)>); +pub struct PayloadStore(HashMap, u64)>); -impl PayloadCommitmentStore { - /// Save the payload commitment for later retrieval. +impl PayloadStore { + /// Save the encoded transactions for later retrieval. /// /// After calling this function, and before the corresponding call to [`remove`](Self::remove), /// `self.get(payload_commitment)` will return `Some(encoded_transactions)`. From af8d4783e6708027c57ab4ce3595764cddffdfb3 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 28 Nov 2023 10:07:19 -0500 Subject: [PATCH 0451/1393] use vote collector in DA task --- hotshot/src/tasks/mod.rs | 1 + task-impls/src/consensus.rs | 5 +- task-impls/src/da.rs | 161 +++++------------------------------- web_server/src/lib.rs | 7 +- 4 files changed, 25 insertions(+), 149 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 9d7ab1ab1b..bbf6a9e0cb 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -215,6 +215,7 @@ pub async fn add_consensus_task>( api: c_api.clone(), _pd: PhantomData, vote_collector: None, + timeout_vote_collector: None, timeout_task: async_spawn(async move {}), event_stream: event_stream.clone(), output_event_stream: output_stream, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ddeae21e1b..9935f78989 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -101,9 +101,8 @@ pub struct ConsensusTaskState< /// Current Vote collection task, with it's view. pub vote_collector: Option<(TYPES::Time, usize, usize)>, - /// Have we already sent a proposal for a particular view - /// since proposal can be sent either on QCFormed event or ViewChange event - // pub proposal_sent: HashMap, + /// Current timeout vote collection task with its view + pub timeout_vote_collector: Option<(TYPES::Time, usize, usize)>, /// timeout task handle pub timeout_task: JoinHandle<()>, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 776c755925..55828afd04 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -1,18 +1,16 @@ -use crate::events::HotShotEvent; -use async_compatibility_layer::art::async_spawn; +use crate::{ + events::HotShotEvent, + vote::{spawn_vote_accumulator, AccumulatorInfo}, +}; use async_lock::RwLock; -use bitvec::prelude::*; use commit::Committable; -use either::{Either, Left, Right}; -use futures::FutureExt; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, - task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEvent, TaskBuilder}, + task::{HotShotTaskCompleted, TS}, + task_impls::HSTWithEvent, }; -use hotshot_types::simple_certificate::DACertificate; use hotshot_types::{ consensus::{Consensus, View}, data::DAProposal, @@ -29,11 +27,10 @@ use hotshot_types::{ }, utils::ViewInner, vote::HasViewNumber, - vote::VoteAccumulator, }; use snafu::Snafu; -use std::{collections::HashMap, marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc}; use tracing::{debug, error, instrument, warn}; #[derive(Snafu, Debug)] @@ -79,86 +76,6 @@ pub struct DATaskState< pub id: u64, } -/// Struct to maintain DA Vote Collection task state -pub struct DAVoteCollectionTaskState> { - /// Membership for the DA committee - pub da_membership: Arc, - - /// Network for DA - pub da_network: Arc, - // #[allow(clippy::type_complexity)] - /// Accumulates DA votes - pub accumulator: - Either, DACertificate>, DACertificate>, - /// the current view - pub cur_view: TYPES::Time, - /// event stream for channel events - pub event_stream: ChannelStream>, - /// This Nodes public key - pub public_key: TYPES::SignatureKey, - - /// This Nodes private key - pub private_key: ::PrivateKey, - /// the id of this task state - pub id: u64, -} - -impl> TS for DAVoteCollectionTaskState {} - -#[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "DA Vote Collection Task", level = "error")] -async fn vote_handle>( - mut state: DAVoteCollectionTaskState, - event: HotShotEvent, -) -> ( - std::option::Option, - DAVoteCollectionTaskState, -) { - match event { - HotShotEvent::DAVoteRecv(vote) => { - debug!("DA vote recv, collection task {:?}", vote.get_view_number()); - // panic!("Vote handle received DA vote for view {}", *vote.get_view_number()); - - // For the case where we receive votes after we've made a certificate - if state.accumulator.is_right() { - debug!("DA accumulator finished view: {:?}", state.cur_view); - return (None, state); - } - - let accumulator = state.accumulator.left().unwrap(); - - match accumulator.accumulate(&vote, state.da_membership.as_ref()) { - Left(new_accumulator) => { - state.accumulator = either::Left(new_accumulator); - } - - Right(dac) => { - debug!("Sending DAC! {:?}", dac.view_number); - state - .event_stream - .publish(HotShotEvent::DACSend(dac.clone(), state.public_key.clone())) - .await; - - state.accumulator = Right(dac.clone()); - state - .da_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - *dac.view_number, - )) - .await; - - // Return completed at this point - return (Some(HotShotTaskCompleted::ShutDown), state); - } - } - } - HotShotEvent::Shutdown => return (Some(HotShotTaskCompleted::ShutDown), state), - _ => { - error!("unexpected event {:?}", event); - } - } - (None, state) -} - impl, A: ConsensusApi + 'static> DATaskState { @@ -254,23 +171,18 @@ impl, A: ConsensusApi + .saved_block_payloads .insert(proposal.data.block_payload); } - HotShotEvent::DAVoteRecv(vote) => { - debug!("DA vote recv, Main Task {:?}", vote.get_view_number(),); + HotShotEvent::DAVoteRecv(ref vote) => { + debug!("DA vote recv, Main Task {:?}", vote.get_view_number()); // Check if we are the leader and the vote is from the sender. let view = vote.get_view_number(); if self.da_membership.get_leader(view) != self.public_key { error!("We are not the committee leader for view {} are we leader for next view? {}", *view, self.da_membership.get_leader(view + 1) == self.public_key); return None; } - - let handle_event = HandleEvent(Arc::new(move |event, state| { - async move { vote_handle(state, event).await }.boxed() - })); let collection_view = if let Some((collection_view, collection_id, _)) = &self.vote_collector { // TODO: Is this correct for consecutive leaders? if view > *collection_view { - // warn!("shutting down for view {:?}", collection_view); self.registry.shutdown_task(*collection_id).await; } *collection_view @@ -279,48 +191,21 @@ impl, A: ConsensusApi + }; if view > collection_view { - let new_accumulator = VoteAccumulator { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.da_membership.total_nodes()], - phantom: PhantomData, - }; - - let accumulator = - new_accumulator.accumulate(&vote, self.da_membership.as_ref()); - - let state = DAVoteCollectionTaskState { - accumulator, - cur_view: view, + debug!("Starting vote handle for view {:?}", vote.get_view_number()); + let info = AccumulatorInfo { + public_key: self.public_key.clone(), + membership: self.da_membership.clone(), + view: vote.get_view_number(), event_stream: self.event_stream.clone(), id: self.id, - da_membership: self.da_membership.clone(), - da_network: self.da_network.clone(), - public_key: self.public_key.clone(), - private_key: self.private_key.clone(), + registry: self.registry.clone(), }; - let name = "DA Vote Collection"; - let filter = FilterEvent(Arc::new(|event| { - matches!(event, HotShotEvent::DAVoteRecv(_)) - })); - let builder = - TaskBuilder::>::new(name.to_string()) - .register_event_stream(self.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(state) - .register_event_handler(handle_event); - let id = builder.get_task_id().unwrap(); - let stream_id = builder.get_stream_id().unwrap(); - let _task = - async_spawn( - async move { DAVoteCollectionTypes::build(builder).launch().await }, - ); - self.vote_collector = Some((view, id, stream_id)); + let name = "Quorum Vote Collection"; + self.vote_collector = + spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream - .direct_message(stream_id, HotShotEvent::DAVoteRecv(vote)) + .direct_message(stream_id, HotShotEvent::DAVoteRecv(vote.clone())) .await; }; } @@ -445,14 +330,6 @@ impl, A: ConsensusApi + { } -/// Type alias for DA Vote Collection Types -pub type DAVoteCollectionTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - DAVoteCollectionTaskState, ->; - /// Type alias for DA Task Types pub type DATaskTypes = HSTWithEvent< ConsensusTaskError, diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index b1e267d982..72a6e89e36 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -278,7 +278,6 @@ impl WebServerDataSource for WebServerState { let votes = self.view_sync_votes.get(&view_number); let mut ret_votes = vec![]; if let Some(votes) = votes { - // error!("Passed in index is: {} self index is: {}", index, *self.vote_index.get(&view_number).unwrap()); for i in index..*self.view_sync_vote_index.get(&view_number).unwrap() { ret_votes.push(votes[i as usize].1.clone()); } @@ -433,7 +432,7 @@ impl WebServerDataSource for WebServerState { } /// Stores a received proposal in the `WebServerState` fn post_proposal(&mut self, view_number: u64, mut proposal: Vec) -> Result<(), Error> { - error!("Received proposal for view {}", view_number); + debug!("Received proposal for view {}", view_number); if view_number > self.recent_proposal { self.recent_proposal = view_number; @@ -454,7 +453,7 @@ impl WebServerDataSource for WebServerState { } fn post_vid_disperse(&mut self, view_number: u64, mut disperse: Vec) -> Result<(), Error> { - error!("Received VID disperse for view {}", view_number); + debug!("Received VID disperse for view {}", view_number); if view_number > self.recent_vid_disperse { self.recent_vid_disperse = view_number; } @@ -526,7 +525,7 @@ impl WebServerDataSource for WebServerState { view_number: u64, mut certificate: Vec, ) -> Result<(), Error> { - error!("Received VID Certificate for view {}", view_number); + debug!("Received VID Certificate for view {}", view_number); // Only keep proposal history for MAX_VIEWS number of view if self.vid_certificates.len() >= MAX_VIEWS { From ab84ee814cbd0d9167f17fdac88d0bf94192784b Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 28 Nov 2023 10:18:06 -0500 Subject: [PATCH 0452/1393] use vote accumulator for VID --- task-impls/src/vid.rs | 170 +++++------------------------------------ task-impls/src/vote.rs | 50 ++++++++++-- web_server/src/lib.rs | 1 - 3 files changed, 60 insertions(+), 161 deletions(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 4628fc1a67..8ceb28e26a 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -1,15 +1,14 @@ -use crate::events::HotShotEvent; -use async_compatibility_layer::art::async_spawn; +use crate::{ + events::HotShotEvent, + vote::{spawn_vote_accumulator, AccumulatorInfo}, +}; use async_lock::RwLock; -use bitvec::prelude::*; -use either::{Either, Left, Right}; -use futures::FutureExt; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, - task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEvent, TaskBuilder}, + task::{HotShotTaskCompleted, TS}, + task_impls::HSTWithEvent, }; use hotshot_types::traits::network::ConsensusIntentEvent; use hotshot_types::{ @@ -24,15 +23,13 @@ use hotshot_types::{ utils::ViewInner, }; use hotshot_types::{ - simple_certificate::VIDCertificate, simple_vote::{VIDData, VIDVote}, traits::network::CommunicationChannel, - vote::{HasViewNumber, VoteAccumulator}, + vote::HasViewNumber, }; use snafu::Snafu; -use std::marker::PhantomData; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use tracing::{debug, error, instrument, warn}; #[derive(Snafu, Debug)] @@ -73,94 +70,6 @@ pub struct VIDTaskState< pub id: u64, } -/// Struct to maintain VID Vote Collection task state -pub struct VIDVoteCollectionTaskState> { - /// Network for all nodes - pub network: Arc, - /// Membership for teh quorum - pub membership: Arc, - /// This Nodes Public Key - pub public_key: TYPES::SignatureKey, - /// Our Private Key - pub private_key: ::PrivateKey, - #[allow(clippy::type_complexity)] - /// Accumulates VID votes - pub accumulator: Either< - VoteAccumulator, VIDCertificate>, - VIDCertificate, - >, - /// the current view - pub cur_view: TYPES::Time, - /// event stream for channel events - pub event_stream: ChannelStream>, - /// the id of this task state - pub id: u64, -} - -impl> TS for VIDVoteCollectionTaskState {} - -#[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "VID Vote Collection Task", level = "error")] -async fn vote_handle( - mut state: VIDVoteCollectionTaskState, - event: HotShotEvent, -) -> ( - Option, - VIDVoteCollectionTaskState, -) -where - TYPES: NodeType, - I: NodeImplementation, -{ - match event { - HotShotEvent::VidVoteRecv(vote) => { - debug!( - "VID vote recv, collection task {:?}", - vote.get_view_number() - ); - // panic!("Vote handle received VID vote for view {}", *vote.current_view); - - // For the case where we receive votes after we've made a certificate - if state.accumulator.is_right() { - debug!("VID accumulator finished view: {:?}", state.cur_view); - return (None, state); - } - - let accumulator = state.accumulator.left().unwrap(); - match accumulator.accumulate(&vote, state.membership.as_ref()) { - Left(new_accumulator) => { - state.accumulator = either::Left(new_accumulator); - } - - Right(vid_cert) => { - debug!("Sending VID cert! {:?}", vid_cert.view_number); - state - .event_stream - .publish(HotShotEvent::VidCertSend( - vid_cert.clone(), - state.public_key.clone(), - )) - .await; - - state.accumulator = Right(vid_cert.clone()); - state - .network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDVotes( - *vid_cert.view_number, - )) - .await; - - // Return completed at this point - return (Some(HotShotTaskCompleted::ShutDown), state); - } - } - } - _ => { - error!("unexpected event {:?}", event); - } - } - (None, state) -} - impl, A: ConsensusApi + 'static> VIDTaskState { @@ -171,13 +80,7 @@ impl, A: ConsensusApi + event: HotShotEvent, ) -> Option { match event { - HotShotEvent::VidVoteRecv(vote) => { - // warn!( - // "VID vote recv, Main Task {:?}, key: {:?}", - // vote.current_view, - // self.committee_exchange.public_key() - // ); - // Check if we are the leader and the vote is from the sender. + HotShotEvent::VidVoteRecv(ref vote) => { let view = vote.get_view_number(); if self.membership.get_leader(view) != self.public_key { error!( @@ -187,10 +90,6 @@ impl, A: ConsensusApi + ); return None; } - - let handle_event = HandleEvent(Arc::new(move |event, state| { - async move { vote_handle(state, event).await }.boxed() - })); let collection_view = if let Some((collection_view, collection_id, _)) = &self.vote_collector { // TODO: Is this correct for consecutive leaders? @@ -204,46 +103,21 @@ impl, A: ConsensusApi + }; if view > collection_view { - let new_accumulator = VoteAccumulator { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.membership.total_nodes()], - phantom: PhantomData, - }; - - let accumulator = new_accumulator.accumulate(&vote, self.membership.as_ref()); - - let state = VIDVoteCollectionTaskState { - network: self.network.clone(), - membership: self.membership.clone(), + debug!("Starting vote handle for view {:?}", vote.get_view_number()); + let info = AccumulatorInfo { public_key: self.public_key.clone(), - private_key: self.private_key.clone(), - accumulator, - cur_view: view, + membership: self.membership.clone(), + view: vote.get_view_number(), event_stream: self.event_stream.clone(), id: self.id, + registry: self.registry.clone(), }; - let name = "VID Vote Collection"; - let filter = FilterEvent(Arc::new(|event| { - matches!(event, HotShotEvent::VidVoteRecv(_)) - })); - let builder = - TaskBuilder::>::new(name.to_string()) - .register_event_stream(self.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(state) - .register_event_handler(handle_event); - let id = builder.get_task_id().unwrap(); - let stream_id = builder.get_stream_id().unwrap(); - let _task = async_spawn(async move { - VIDVoteCollectionTypes::build(builder).launch().await - }); - self.vote_collector = Some((view, id, stream_id)); + let name = "Quorum Vote Collection"; + self.vote_collector = + spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; } else if let Some((_, _, stream_id)) = self.vote_collector { self.event_stream - .direct_message(stream_id, HotShotEvent::VidVoteRecv(vote)) + .direct_message(stream_id, HotShotEvent::VidVoteRecv(vote.clone())) .await; }; } @@ -406,14 +280,6 @@ impl, A: ConsensusApi + { } -/// Type alias for VID Vote Collection Types -pub type VIDVoteCollectionTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - VIDVoteCollectionTaskState, ->; - /// Type alias for VID Task Types pub type VIDTaskTypes = HSTWithEvent< ConsensusTaskError, diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 1b5ab7baae..36fe81bff7 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -14,11 +14,11 @@ use hotshot_task::{ }; use hotshot_types::{ simple_certificate::{ - DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCommitCertificate2, - ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate, + ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DAVote, QuorumVote, TimeoutVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + DAVote, QuorumVote, TimeoutVote, VIDVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{election::Membership, node_implementation::NodeType}, @@ -255,21 +255,23 @@ where /// Alias for Quorum vote accumulator type QuorumVoteState = VoteCollectionTaskState, QuorumCertificate>; -/// Alias for Quorum vote accumulator +/// Alias for DA vote accumulator type DAVoteState = VoteCollectionTaskState, DACertificate>; -/// Alias for Quorum vote accumulator +/// Alias for VID vote accumulator state +type VIDVoteState = VoteCollectionTaskState, VIDCertificate>; +/// Alias for Timeout vote accumulator type TimeoutVoteState = VoteCollectionTaskState, TimeoutCertificate>; -/// Alias for Quorum vote accumulator +/// Alias for View Sync Pre Commit vote accumulator type ViewSyncPreCommitState = VoteCollectionTaskState< TYPES, ViewSyncPreCommitVote, ViewSyncPreCommitCertificate2, >; -/// Alias for Quorum vote accumulator +/// Alias for View Sync Commit vote accumulator type ViewSyncCommitVoteState = VoteCollectionTaskState, ViewSyncCommitCertificate2>; -/// Alias for Quorum vote accumulator +/// Alias for View Sync Finalize vote accumulator type ViewSyncFinalizeVoteState = VoteCollectionTaskState< TYPES, ViewSyncFinalizeVote, @@ -318,6 +320,20 @@ impl AggregatableVote, TimeoutCertifi } } +impl AggregatableVote, VIDCertificate> + for VIDVote +{ + fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.get_leader(self.get_view_number() + 1) + } + fn make_cert_event( + certificate: VIDCertificate, + key: &TYPES::SignatureKey, + ) -> HotShotEvent { + HotShotEvent::VidCertSend(certificate, key.clone()) + } +} + impl AggregatableVote, ViewSyncCommitCertificate2> for ViewSyncCommitVote @@ -418,6 +434,24 @@ impl HandleVoteEvent, TimeoutCertific } } +#[async_trait] +impl HandleVoteEvent, VIDCertificate> + for VIDVoteState +{ + async fn handle_event( + self, + event: HotShotEvent, + ) -> (Option, VIDVoteState) { + match event { + HotShotEvent::VidVoteRecv(vote) => self.accumulate_vote(&vote).await, + _ => (None, self), + } + } + fn filter(event: &HotShotEvent) -> bool { + matches!(event, HotShotEvent::VidVoteRecv(_)) + } +} + #[async_trait] impl HandleVoteEvent, ViewSyncPreCommitCertificate2> diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 72a6e89e36..5a866f652e 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -5,7 +5,6 @@ use async_compatibility_layer::channel::OneShotReceiver; use async_lock::RwLock; use clap::Args; use futures::FutureExt; -use tracing::error; use hotshot_types::traits::signature_key::{EncodedPublicKey, SignatureKey}; use rand::{distributions::Alphanumeric, rngs::StdRng, thread_rng, Rng, SeedableRng}; From 277ce52aebd174bbbc466f75f9d6a20afd7d6dee Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 28 Nov 2023 10:43:02 -0500 Subject: [PATCH 0453/1393] replace view sync vote accumulator --- task-impls/src/view_sync.rs | 468 ++++-------------------------------- 1 file changed, 52 insertions(+), 416 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index c3882111e9..ea0b170ced 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1,18 +1,18 @@ #![allow(clippy::module_name_repetitions)] -use crate::events::HotShotEvent; +use crate::{ + events::HotShotEvent, + vote::{spawn_vote_accumulator, AccumulatorInfo}, +}; use async_compatibility_layer::art::{async_sleep, async_spawn}; - -use either::Either::{self, Left, Right}; +use either::Either; use futures::FutureExt; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; +use hotshot_types::{simple_vote::ViewSyncFinalizeData, traits::signature_key::SignatureKey}; use hotshot_types::{ - simple_certificate::{ - ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, - }, simple_vote::{ ViewSyncCommitData, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitData, ViewSyncPreCommitVote, @@ -20,9 +20,7 @@ use hotshot_types::{ traits::network::ConsensusIntentEvent, vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; -use hotshot_types::{simple_vote::ViewSyncFinalizeData, traits::signature_key::SignatureKey}; -use bitvec::prelude::*; use hotshot_task::global_registry::GlobalRegistry; use hotshot_types::{ message::GeneralConsensusMessage, @@ -35,7 +33,7 @@ use hotshot_types::{ }, }; use snafu::Snafu; -use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; +use std::{collections::HashMap, sync::Arc, time::Duration}; use tracing::{debug, error, info, instrument}; #[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] /// Phases of view sync @@ -307,8 +305,9 @@ impl< }); } - HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { - if let Some(relay_task) = self.relay_task_map.get(&vote.get_view_number()) { + HotShotEvent::ViewSyncPreCommitVoteRecv(ref vote) => { + let vote_view = vote.get_view_number(); + if let Some(relay_task) = self.relay_task_map.get(&vote_view) { // Forward event then return self.event_stream .direct_message(relay_task.event_stream_id, event) @@ -319,7 +318,7 @@ impl< // We do not have a relay task already running, so start one if self .membership - .get_leader(vote.get_view_number() + vote.get_data().relay) + .get_leader(vote_view + vote.get_data().relay) != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` @@ -327,78 +326,26 @@ impl< return; } - let new_accumulator = VoteAccumulator { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.membership.total_nodes()], - phantom: PhantomData, - }; - - let mut relay_state = ViewSyncRelayTaskState::< - TYPES, - I, - ViewSyncPreCommitVote, - ViewSyncPreCommitCertificate2, - > { - event_stream: self.event_stream.clone(), - membership: self.membership.clone(), - network: self.network.clone(), + let name = format!("View Sync Relay Task for view {vote_view:?}"); + let info = AccumulatorInfo { public_key: self.public_key.clone(), - private_key: self.private_key.clone(), - accumulator: either::Left(new_accumulator), + membership: self.membership.clone(), + view: vote_view, + event_stream: self.event_stream.clone(), id: self.id, + registry: self.registry.clone(), }; - - let result = relay_state.handle_event(event.clone()).await; - - if result.0 == Some(HotShotTaskCompleted::ShutDown) { - // The protocol has finished - return; + let vote_collector = + spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; + if let Some((_, _, event_stream_id)) = vote_collector { + self.relay_task_map + .insert(vote_view, ViewSyncTaskInfo { event_stream_id }); } - - relay_state = result.1; - - let name = format!("View Sync Relay Task for view {:?}", vote.get_view_number()); - - let relay_handle_event = HandleEvent(Arc::new( - move |event, - state: ViewSyncRelayTaskState< - TYPES, - I, - ViewSyncPreCommitVote, - ViewSyncPreCommitCertificate2, - >| { - async move { state.handle_event(event).await }.boxed() - }, - )); - - let filter = FilterEvent::default(); - let builder = TaskBuilder::< - ViewSyncRelayTaskStateTypes< - TYPES, - I, - ViewSyncPreCommitVote, - ViewSyncPreCommitCertificate2, - >, - >::new(name) - .register_event_stream(relay_state.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(relay_state) - .register_event_handler(relay_handle_event); - - let event_stream_id = builder.get_stream_id().unwrap(); - - self.relay_task_map - .insert(vote.get_view_number(), ViewSyncTaskInfo { event_stream_id }); - let _view_sync_relay_task = async_spawn(async move { - ViewSyncRelayTaskStateTypes::build(builder).launch().await - }); } - HotShotEvent::ViewSyncCommitVoteRecv(vote) => { - if let Some(relay_task) = self.relay_task_map.get(&vote.get_view_number()) { + HotShotEvent::ViewSyncCommitVoteRecv(ref vote) => { + let vote_view = vote.get_view_number(); + if let Some(relay_task) = self.relay_task_map.get(&vote_view) { // Forward event then return self.event_stream .direct_message(relay_task.event_stream_id, event) @@ -409,7 +356,7 @@ impl< // We do not have a relay task already running, so start one if self .membership - .get_leader(vote.get_view_number() + vote.get_data().relay) + .get_leader(vote_view + vote.get_data().relay) != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` @@ -417,78 +364,26 @@ impl< return; } - let new_accumulator = VoteAccumulator { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.membership.total_nodes()], - phantom: PhantomData, - }; - - let mut relay_state = ViewSyncRelayTaskState::< - TYPES, - I, - ViewSyncCommitVote, - ViewSyncCommitCertificate2, - > { - event_stream: self.event_stream.clone(), - membership: self.membership.clone(), - network: self.network.clone(), + let name = format!("View Sync Relay Task for view {vote_view:?}"); + let info = AccumulatorInfo { public_key: self.public_key.clone(), - private_key: self.private_key.clone(), - accumulator: either::Left(new_accumulator), + membership: self.membership.clone(), + view: vote_view, + event_stream: self.event_stream.clone(), id: self.id, + registry: self.registry.clone(), }; - - let result = relay_state.handle_event(event.clone()).await; - - if result.0 == Some(HotShotTaskCompleted::ShutDown) { - // The protocol has finished - return; + let vote_collector = + spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; + if let Some((_, _, event_stream_id)) = vote_collector { + self.relay_task_map + .insert(vote_view, ViewSyncTaskInfo { event_stream_id }); } - - relay_state = result.1; - - let name = format!("View Sync Relay Task for view {:?}", vote.get_view_number()); - - let relay_handle_event = HandleEvent(Arc::new( - move |event, - state: ViewSyncRelayTaskState< - TYPES, - I, - ViewSyncCommitVote, - ViewSyncCommitCertificate2, - >| { - async move { state.handle_event(event).await }.boxed() - }, - )); - - let filter = FilterEvent::default(); - let builder = TaskBuilder::< - ViewSyncRelayTaskStateTypes< - TYPES, - I, - ViewSyncCommitVote, - ViewSyncCommitCertificate2, - >, - >::new(name) - .register_event_stream(relay_state.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(relay_state) - .register_event_handler(relay_handle_event); - - let event_stream_id = builder.get_stream_id().unwrap(); - - self.relay_task_map - .insert(vote.get_view_number(), ViewSyncTaskInfo { event_stream_id }); - let _view_sync_relay_task = async_spawn(async move { - ViewSyncRelayTaskStateTypes::build(builder).launch().await - }); } - HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { - if let Some(relay_task) = self.relay_task_map.get(&vote.get_view_number()) { + HotShotEvent::ViewSyncFinalizeVoteRecv(ref vote) => { + let vote_view = vote.get_view_number(); + if let Some(relay_task) = self.relay_task_map.get(&vote_view) { // Forward event then return self.event_stream .direct_message(relay_task.event_stream_id, event) @@ -499,7 +394,7 @@ impl< // We do not have a relay task already running, so start one if self .membership - .get_leader(vote.get_view_number() + vote.get_data().relay) + .get_leader(vote_view + vote.get_data().relay) != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` @@ -507,74 +402,21 @@ impl< return; } - let new_accumulator = VoteAccumulator { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.membership.total_nodes()], - phantom: PhantomData, - }; - - let mut relay_state = ViewSyncRelayTaskState::< - TYPES, - I, - ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate2, - > { - event_stream: self.event_stream.clone(), - membership: self.membership.clone(), - network: self.network.clone(), + let name = format!("View Sync Relay Task for view {vote_view:?}"); + let info = AccumulatorInfo { public_key: self.public_key.clone(), - private_key: self.private_key.clone(), - accumulator: either::Left(new_accumulator), + membership: self.membership.clone(), + view: vote.get_view_number(), + event_stream: self.event_stream.clone(), id: self.id, + registry: self.registry.clone(), }; - - let result = relay_state.handle_event(event.clone()).await; - - if result.0 == Some(HotShotTaskCompleted::ShutDown) { - // The protocol has finished - return; + let vote_collector = + spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; + if let Some((_, _, event_stream_id)) = vote_collector { + self.relay_task_map + .insert(vote_view, ViewSyncTaskInfo { event_stream_id }); } - - relay_state = result.1; - - let name = format!("View Sync Relay Task for view {:?}", vote.get_view_number()); - - let relay_handle_event = HandleEvent(Arc::new( - move |event, - state: ViewSyncRelayTaskState< - TYPES, - I, - ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate2, - >| { - async move { state.handle_event(event).await }.boxed() - }, - )); - - let filter = FilterEvent::default(); - let builder = TaskBuilder::< - ViewSyncRelayTaskStateTypes< - TYPES, - I, - ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate2, - >, - >::new(name) - .register_event_stream(relay_state.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(relay_state) - .register_event_handler(relay_handle_event); - - let event_stream_id = builder.get_stream_id().unwrap(); - - self.relay_task_map - .insert(vote.get_view_number(), ViewSyncTaskInfo { event_stream_id }); - let _view_sync_relay_task = async_spawn(async move { - ViewSyncRelayTaskStateTypes::build(builder).launch().await - }); } &HotShotEvent::ViewChange(new_view) => { @@ -1087,209 +929,3 @@ impl, A: ConsensusApi + (None, self) } } - -impl> - ViewSyncRelayTaskState< - TYPES, - I, - ViewSyncPreCommitVote, - ViewSyncPreCommitCertificate2, - > -{ - /// Handles incoming events for the view sync relay task - #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] - #[allow(clippy::type_complexity)] - pub async fn handle_event( - mut self, - event: HotShotEvent, - ) -> ( - std::option::Option, - ViewSyncRelayTaskState< - TYPES, - I, - ViewSyncPreCommitVote, - ViewSyncPreCommitCertificate2, - >, - ) { - match event { - HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { - // Ignore this vote if we are not the correct relay - // TODO ED Replace exchange with membership - if self - .membership - .get_leader(vote.get_data().round + vote.get_data().relay) - != self.public_key - { - info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); - return (None, self); - } - - debug!( - "Accumulating ViewSyncPreCommitVote for round {} and relay {}", - *vote.get_data().round, - vote.get_data().relay - ); - - match self.accumulator { - Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), - Left(accumulator) => { - match accumulator.accumulate(&vote, self.membership.as_ref()) { - Left(new_accumulator) => { - self.accumulator = Either::Left(new_accumulator); - } - Right(certificate) => { - self.event_stream - .publish(HotShotEvent::ViewSyncPreCommitCertificate2Send( - certificate.clone(), - self.public_key.clone(), - )) - .await; - self.accumulator = Right(certificate); - - return (Some(HotShotTaskCompleted::ShutDown), self); - } - } - } - }; - (None, self) - } - - _ => (None, self), - } - } -} - -impl> - ViewSyncRelayTaskState, ViewSyncCommitCertificate2> -{ - /// Handles incoming events for the view sync relay task - #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] - #[allow(clippy::type_complexity)] - pub async fn handle_event( - mut self, - event: HotShotEvent, - ) -> ( - std::option::Option, - ViewSyncRelayTaskState< - TYPES, - I, - ViewSyncCommitVote, - ViewSyncCommitCertificate2, - >, - ) { - match event { - HotShotEvent::ViewSyncCommitVoteRecv(vote) => { - // Ignore this vote if we are not the correct relay - if self - .membership - .get_leader(vote.get_data().round + vote.get_data().relay) - != self.public_key - { - info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); - return (None, self); - } - - debug!( - "Accumulating ViewSyncCommitVote for round {} and relay {}", - *vote.get_data().round, - vote.get_data().relay - ); - - match self.accumulator { - Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), - Left(accumulator) => { - match accumulator.accumulate(&vote, self.membership.as_ref()) { - Left(new_accumulator) => { - self.accumulator = Either::Left(new_accumulator); - } - Right(certificate) => { - self.event_stream - .publish(HotShotEvent::ViewSyncCommitCertificate2Send( - certificate.clone(), - self.public_key.clone(), - )) - .await; - self.accumulator = Right(certificate); - - return (Some(HotShotTaskCompleted::ShutDown), self); - } - } - } - }; - (None, self) - } - - _ => (None, self), - } - } -} - -impl> - ViewSyncRelayTaskState< - TYPES, - I, - ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate2, - > -{ - /// Handles incoming events for the view sync relay task - #[instrument(skip_all, fields(id = self.id), name = "View Sync Relay Task", level = "error")] - #[allow(clippy::type_complexity)] - pub async fn handle_event( - mut self, - event: HotShotEvent, - ) -> ( - std::option::Option, - ViewSyncRelayTaskState< - TYPES, - I, - ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate2, - >, - ) { - match event { - HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { - // Ignore this vote if we are not the correct relay - if self - .membership - .get_leader(vote.get_data().round + vote.get_data().relay) - != self.public_key - { - info!("We are not the correct relay for this vote; vote was intended for relay {}", vote.get_data().relay); - return (None, self); - } - - debug!( - "Accumulating ViewSyncFinalizetVote for round {} and relay {}", - *vote.get_data().round, - vote.get_data().relay - ); - - match self.accumulator { - Right(_) => return (Some(HotShotTaskCompleted::ShutDown), self), - Left(accumulator) => { - match accumulator.accumulate(&vote, self.membership.as_ref()) { - Left(new_accumulator) => { - self.accumulator = Either::Left(new_accumulator); - } - Right(certificate) => { - self.event_stream - .publish(HotShotEvent::ViewSyncFinalizeCertificate2Send( - certificate.clone(), - self.public_key.clone(), - )) - .await; - self.accumulator = Right(certificate); - - return (Some(HotShotTaskCompleted::ShutDown), self); - } - } - } - }; - (None, self) - } - - _ => (None, self), - } - } -} From daf8ebc1564eaaf62f346594b59b18d155316d3a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 28 Nov 2023 11:25:44 -0500 Subject: [PATCH 0454/1393] Fix leader issue with view sync vote tasks --- task-impls/src/vote.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 36fe81bff7..7b4258f45c 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -191,6 +191,7 @@ where VoteCollectionTaskState: HandleVoteEvent, { if vote.get_leader(info.membership.as_ref()) != info.public_key { + error!("Vote is not to the leader"); return None; } @@ -339,7 +340,7 @@ impl for ViewSyncCommitVote { fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_view_number()) + membership.get_leader(self.get_data().round + self.get_data().relay) } fn make_cert_event( certificate: ViewSyncCommitCertificate2, @@ -354,7 +355,7 @@ impl for ViewSyncPreCommitVote { fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_view_number()) + membership.get_leader(self.get_data().round + self.get_data().relay) } fn make_cert_event( certificate: ViewSyncPreCommitCertificate2, @@ -369,7 +370,7 @@ impl for ViewSyncFinalizeVote { fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_view_number()) + membership.get_leader(self.get_data().round + self.get_data().relay) } fn make_cert_event( certificate: ViewSyncFinalizeCertificate2, From f327c2dfb21d2649c97f24e2fe61e9495c494764 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 28 Nov 2023 11:27:38 -0500 Subject: [PATCH 0455/1393] fix task names --- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 2 +- task-impls/src/vid.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9935f78989..aed8af5fdd 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -780,7 +780,7 @@ impl, A: ConsensusApi + id: self.id, registry: self.registry.clone(), }; - let name = "Quorum Vote Collection"; + let name = "Timeout Vote Collection"; self.vote_collector = spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; } else if let Some((_, _, stream_id)) = self.vote_collector { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 55828afd04..09b2607145 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -200,7 +200,7 @@ impl, A: ConsensusApi + id: self.id, registry: self.registry.clone(), }; - let name = "Quorum Vote Collection"; + let name = "DA Vote Collection"; self.vote_collector = spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; } else if let Some((_, _, stream_id)) = self.vote_collector { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 8ceb28e26a..bae8b8185a 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -112,7 +112,7 @@ impl, A: ConsensusApi + id: self.id, registry: self.registry.clone(), }; - let name = "Quorum Vote Collection"; + let name = "VID Vote Collection"; self.vote_collector = spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; } else if let Some((_, _, stream_id)) = self.vote_collector { From 96c07b8198c8fc0c05ff30f5f91ce1fdce063856 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 28 Nov 2023 11:48:18 -0500 Subject: [PATCH 0456/1393] lower a log level, fix vid leader view num --- task-impls/src/vote.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 7b4258f45c..c75599e33d 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -191,7 +191,7 @@ where VoteCollectionTaskState: HandleVoteEvent, { if vote.get_leader(info.membership.as_ref()) != info.public_key { - error!("Vote is not to the leader"); + debug!("Vote is not to the leader"); return None; } @@ -325,7 +325,7 @@ impl AggregatableVote, VIDCertificate { fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_view_number() + 1) + membership.get_leader(self.get_view_number()) } fn make_cert_event( certificate: VIDCertificate, From 3a59d2b75e0ee39268870a6c6a3d6a6ba299cd6c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 28 Nov 2023 13:48:13 -0800 Subject: [PATCH 0457/1393] mark places need to be updated --- task-impls/src/consensus.rs | 5 +++-- task-impls/src/vid.rs | 9 +-------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 309509a20f..7d1b964db1 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -295,7 +295,7 @@ impl, A: ConsensusApi + } #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] - + // Check if we are able to vote, like whether the proposal is valid, whether we have DAC and VID share, and if so, vote async fn vote_if_able(&self) -> bool { if !self.quorum_membership.has_stake(&self.public_key) { debug!( @@ -367,6 +367,7 @@ impl, A: ConsensusApi + // Only vote if you have the DA cert // ED Need to update the view number this is stored under? + // Sishan NOTE TODO: Add the logic of "it does not vote until it has seen its VID share" if let Some(cert) = self.da_certs.get(&(proposal.get_view_number())) { let view = cert.view_number; // TODO: do some of this logic without the vote token check, only do that when voting. @@ -1103,7 +1104,7 @@ impl, A: ConsensusApi + let view = cert.get_view_number(); self.vid_certs.insert(view, cert); - // Sishan NOTE TODO + // Sishan NOTE TODO: delete it // RM TODO: VOTING } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index e31b89d73c..dbad980e87 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -263,6 +263,7 @@ impl, A: ConsensusApi + debug!("VID disperse data is fresh."); let payload_commitment = disperse.data.payload_commitment; + // Sishan NOTE TODO: Add to the storage that we have received the VID disperse for a specific view / block // Check whether the sender is the right leader for this view let view_leader_key = self.membership.get_leader(view); @@ -275,14 +276,6 @@ impl, A: ConsensusApi + error!("Could not verify VID proposal sig."); return None; } - // Sishan NOTE TODO: check whether this part is needed? How consensus committee functioned in vid task? - if !self.membership.has_stake(&self.public_key) { - error!( - "We were not chosen for consensus-vid committee on {:?}", - self.cur_view - ); - return None; - } // Generate and send vote after receive and validate disperse (VID share) let vote = VIDVote::create_signed_vote( From e4ad4daa390c66530639f9595af77a91500a9e7a Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 28 Nov 2023 17:55:21 -0500 Subject: [PATCH 0458/1393] VID: Reorganize and create new events (#2114) --- hotshot/src/tasks/mod.rs | 2 +- task-impls/Cargo.toml | 1 + task-impls/src/da.rs | 23 +++++----- task-impls/src/events.rs | 6 ++- task-impls/src/network.rs | 4 +- task-impls/src/transactions.rs | 45 +++--------------- task-impls/src/vid.rs | 61 ++++++++++++++++++++++++- testing/Cargo.toml | 1 + testing/src/task_helpers.rs | 2 +- testing/tests/da_task.rs | 14 +++--- testing/tests/network_task.rs | 26 +++++++---- testing/tests/vid_task.rs | 37 +++++++++++---- types/src/block_impl.rs | 6 +-- types/src/data.rs | 2 +- types/src/traits/block_contents.rs | 2 +- types/src/traits/node_implementation.rs | 4 +- 16 files changed, 146 insertions(+), 90 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 26d1d35f88..41f3131b1c 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -206,7 +206,7 @@ pub async fn add_consensus_task>( let registry = task_runner.registry.clone(); let (payload, metadata) = ::genesis(); // Impossible for `unwrap` to fail on the genesis payload. - let payload_commitment = vid_commitment(payload.encode().unwrap().collect()); + let payload_commitment = vid_commitment(&payload.encode().unwrap().collect()); // build the consensus task let consensus_state = ConsensusTaskState { registry: registry.clone(), diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 08c900b53b..f6d9792041 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -26,6 +26,7 @@ rand_chacha = { workspace = true } hotshot-utils = { path = "../utils" } bincode = { workspace = true } bitvec = { workspace = true } +sha2 = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index a1041cd2f8..91b76ff936 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -30,6 +30,7 @@ use hotshot_types::{ vote::HasViewNumber, vote::VoteAccumulator, }; +use sha2::{Digest, Sha256}; use snafu::Snafu; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; @@ -195,7 +196,8 @@ impl, A: ConsensusApi + return None; } - let payload_commitment = vid_commitment(proposal.data.encoded_transactions.clone()); + let payload_commitment = vid_commitment(&proposal.data.encoded_transactions); + let encoded_transactions_hash = Sha256::digest(&proposal.data.encoded_transactions); // ED Is this the right leader? let view_leader_key = self.da_membership.get_leader(view); @@ -204,7 +206,7 @@ impl, A: ConsensusApi + return None; } - if !view_leader_key.validate(&proposal.signature, payload_commitment.as_ref()) { + if !view_leader_key.validate(&proposal.signature, &encoded_transactions_hash) { error!("Could not verify proposal."); return None; } @@ -364,14 +366,17 @@ impl, A: ConsensusApi + return None; } - HotShotEvent::BlockReady(encoded_transactions, metadata, view) => { + HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view) => { self.da_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) .await; - let payload_commitment = vid_commitment(encoded_transactions.clone()); + // quick hash the encoded txns with sha256 + let encoded_transactions_hash = Sha256::digest(&encoded_transactions); + + // sign the encoded transactions as opposed to the VID commitment let signature = - TYPES::SignatureKey::sign(&self.private_key, payload_commitment.as_ref()); + TYPES::SignatureKey::sign(&self.private_key, &encoded_transactions_hash); let data: DAProposal = DAProposal { encoded_transactions, metadata: metadata.clone(), @@ -386,12 +391,6 @@ impl, A: ConsensusApi + _pd: PhantomData, }; - self.event_stream - .publish(HotShotEvent::SendPayloadCommitmentAndMetadata( - payload_commitment, - metadata, - )) - .await; self.event_stream .publish(HotShotEvent::DAProposalSend( message.clone(), @@ -424,7 +423,7 @@ impl, A: ConsensusApi + HotShotEvent::DAProposalRecv(_, _) | HotShotEvent::DAVoteRecv(_) | HotShotEvent::Shutdown - | HotShotEvent::BlockReady(_, _, _) + | HotShotEvent::TransactionsSequenced(_, _, _) | HotShotEvent::Timeout(_) | HotShotEvent::ViewChange(_) ) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 79e1192f4c..12a4d3d0df 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -92,12 +92,14 @@ pub enum HotShotEvent { VidCommitment, ::Metadata, ), - /// Event when the transactions task has a block formed - BlockReady( + /// Event when the transactions task has sequenced transactions. Contains the encoded transactions + TransactionsSequenced( Vec, ::Metadata, TYPES::Time, ), + /// Event when the transactions task has a block formed + BlockReady(VidDisperse, TYPES::Time), /// Event when consensus decided on a leaf LeafDecided(Vec>), /// Send VID shares to VID storage nodes; emitted by the DA leader diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 78f0beaab8..79979d6470 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -267,7 +267,7 @@ impl> HotShotEvent::ViewSyncCommitCertificate2Send(certificate, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::ViewSyncCommitCertificate(certificate.clone()), + GeneralConsensusMessage::ViewSyncCommitCertificate(certificate), ))), TransmitType::Broadcast, None, @@ -276,7 +276,7 @@ impl> HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate.clone()), + GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate), ))), TransmitType::Broadcast, None, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 9c7f8d12fa..2693934978 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -14,10 +14,8 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::Consensus, - data::{test_srs, Leaf, VidDisperse, VidScheme, VidSchemeTrait}, - message::Proposal, + data::Leaf, traits::{ - block_contents::{NUM_CHUNKS, NUM_STORAGE_NODES}, consensus_api::ConsensusApi, election::Membership, node_implementation::{NodeImplementation, NodeType}, @@ -29,11 +27,10 @@ use hotshot_utils::bincode::bincode_opts; use snafu::Snafu; use std::{ collections::{HashMap, HashSet}, - marker::PhantomData, sync::Arc, time::Instant, }; -use tracing::{debug, error, info, instrument, warn}; +use tracing::{debug, error, instrument, warn}; /// A type alias for `HashMap, T>` type CommitmentMap = HashMap, T>; @@ -220,6 +217,8 @@ impl, A: ConsensusApi + return None; } }; + + // encode the transactions let encoded_transactions = match payload.encode() { Ok(encoded) => encoded.into_iter().collect::>(), Err(e) => { @@ -227,46 +226,16 @@ impl, A: ConsensusApi + return None; } }; - // TODO - let srs = test_srs(NUM_STORAGE_NODES); - // TODO We are using constant numbers for now, but they will change as the quorum size - // changes. - // TODO - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); - let vid_disperse = vid.disperse(encoded_transactions.clone()).unwrap(); - - // TODO never clone a block - // https://github.com/EspressoSystems/HotShot/issues/1858 + + // send the sequenced transactions to VID and DA tasks self.event_stream - .publish(HotShotEvent::BlockReady( + .publish(HotShotEvent::TransactionsSequenced( encoded_transactions, metadata, view + 1, )) .await; - // TODO (Keyao) Determine and update where to publish VidDisperseSend. - // - debug!("publishing VID disperse for view {}", *view + 1); - info!("New view: {}", *view); - self.event_stream - .publish(HotShotEvent::VidDisperseSend( - Proposal { - data: VidDisperse { - view_number: view + 1, - payload_commitment: vid_disperse.commit, - shares: vid_disperse.shares, - common: vid_disperse.common, - }, - signature: TYPES::SignatureKey::sign( - &self.private_key, - &vid_disperse.commit, - ), - _pd: PhantomData, - }, - self.public_key.clone(), - )) - .await; return None; } HotShotEvent::Shutdown => { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 774cd331cd..7cf8bf3e23 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -11,9 +11,10 @@ use hotshot_task::{ task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEvent, TaskBuilder}, }; -use hotshot_types::traits::network::ConsensusIntentEvent; use hotshot_types::{ consensus::{Consensus, View}, + data::VidDisperse, + message::Proposal, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -23,6 +24,13 @@ use hotshot_types::{ }, utils::ViewInner, }; +use hotshot_types::{ + data::{test_srs, VidScheme, VidSchemeTrait}, + traits::{ + block_contents::{NUM_CHUNKS, NUM_STORAGE_NODES}, + network::ConsensusIntentEvent, + }, +}; use hotshot_types::{ simple_certificate::VIDCertificate, simple_vote::{VIDData, VIDVote}, @@ -336,6 +344,55 @@ impl, A: ConsensusApi + )) .await; } + + HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view_number) => { + // TODO + let srs = test_srs(NUM_STORAGE_NODES); + // TODO We are using constant numbers for now, but they will change as the quorum size + // changes. + // TODO + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + let vid_disperse = vid.disperse(encoded_transactions.clone()).unwrap(); + + // send the commitment and metadata to consensus for block building + self.event_stream + .publish(HotShotEvent::SendPayloadCommitmentAndMetadata( + vid_disperse.commit, + metadata, + )) + .await; + + // send the block to the VID dispersal function + self.event_stream + .publish(HotShotEvent::BlockReady( + VidDisperse { + view_number, + payload_commitment: vid_disperse.commit, + shares: vid_disperse.shares, + common: vid_disperse.common, + }, + view_number, + )) + .await; + } + + HotShotEvent::BlockReady(vid_disperse, view_number) => { + debug!("publishing VID disperse for view {}", *view_number); + self.event_stream + .publish(HotShotEvent::VidDisperseSend( + Proposal { + signature: TYPES::SignatureKey::sign( + &self.private_key, + &vid_disperse.payload_commitment, + ), + data: vid_disperse, + _pd: PhantomData, + }, + self.public_key.clone(), + )) + .await; + } + HotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { return None; @@ -393,6 +450,8 @@ impl, A: ConsensusApi + | HotShotEvent::VidDisperseRecv(_, _) | HotShotEvent::VidVoteRecv(_) | HotShotEvent::VidCertRecv(_) + | HotShotEvent::TransactionsSequenced(_, _, _) + | HotShotEvent::BlockReady(_, _) | HotShotEvent::ViewChange(_) ) } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 7c28b2366c..1ef4a2865b 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -39,6 +39,7 @@ tracing = { workspace = true } serde = { workspace = true } ethereum-types = { workspace = true } bitvec = { workspace = true } +sha2 = { workspace = true } [dev-dependencies] async-lock = { workspace = true } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 50204bc6c3..ebdb53904f 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -126,7 +126,7 @@ async fn build_quorum_proposal_and_signature( // every event input is seen on the event stream in the output. let block = ::genesis(); - let payload_commitment = vid_commitment(block.encode().unwrap().collect()); + let payload_commitment = vid_commitment(&block.encode().unwrap().collect()); let block_header = VIDBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); let leaf = Leaf { view_number: ViewNumber::new(view), diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 1892f8c71e..d418122f98 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -10,6 +10,7 @@ use hotshot_types::{ node_implementation::NodeType, state::ConsensusTime, }, }; +use sha2::{Digest, Sha256}; use std::{collections::HashMap, marker::PhantomData}; #[cfg_attr( @@ -34,10 +35,11 @@ async fn test_da_task() { let pub_key = *api.public_key(); let transactions = vec![VIDTransaction(vec![0])]; let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); - let payload_commitment = vid_commitment(encoded_transactions.clone()); + let payload_commitment = vid_commitment(&encoded_transactions); + let encoded_transactions_hash = Sha256::digest(&encoded_transactions); let signature = - ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()); + ::SignatureKey::sign(api.private_key(), &encoded_transactions_hash); let proposal = DAProposal { encoded_transactions: encoded_transactions.clone(), metadata: (), @@ -59,7 +61,7 @@ async fn test_da_task() { // In view 1, node 2 is the next leader. input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - input.push(HotShotEvent::BlockReady( + input.push(HotShotEvent::TransactionsSequenced( encoded_transactions.clone(), (), ViewNumber::new(2), @@ -70,11 +72,7 @@ async fn test_da_task() { output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); output.insert( - HotShotEvent::BlockReady(encoded_transactions, (), ViewNumber::new(2)), - 1, - ); - output.insert( - HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), + HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), 1, ); output.insert(HotShotEvent::DAProposalSend(message.clone(), pub_key), 1); diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 91cbf3a560..0d0f469864 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -52,15 +52,17 @@ async fn test_network_task() { _pd: PhantomData, }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; + + let da_vid_disperse_inner = VidDisperse { + view_number: da_proposal.data.view_number, + payload_commitment, + shares: vid_disperse.shares, + common: vid_disperse.common, + }; // TODO for now reuse the same block payload commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 let da_vid_disperse = Proposal { - data: VidDisperse { - view_number: da_proposal.data.view_number, - payload_commitment, - shares: vid_disperse.shares, - common: vid_disperse.common, - }, + data: da_vid_disperse_inner.clone(), signature: da_proposal.signature.clone(), _pd: PhantomData, }; @@ -70,11 +72,15 @@ async fn test_network_task() { let mut output = HashMap::new(); input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(HotShotEvent::BlockReady( + input.push(HotShotEvent::TransactionsSequenced( encoded_transactions.clone(), (), ViewNumber::new(2), )); + input.push(HotShotEvent::BlockReady( + da_vid_disperse_inner.clone(), + ViewNumber::new(2), + )); input.push(HotShotEvent::DAProposalSend(da_proposal.clone(), pub_key)); input.push(HotShotEvent::VidDisperseSend( da_vid_disperse.clone(), @@ -93,7 +99,7 @@ async fn test_network_task() { 2, // 2 occurrences: 1 from `input`, 1 from the DA task ); output.insert( - HotShotEvent::BlockReady(encoded_transactions, (), ViewNumber::new(2)), + HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), 2, ); output.insert( @@ -120,6 +126,10 @@ async fn test_network_task() { HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), 1, ); + output.insert( + HotShotEvent::BlockReady(da_vid_disperse_inner, ViewNumber::new(2)), + 2, + ); output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); output.insert( HotShotEvent::QuorumProposalRecv(quorum_proposal, pub_key), diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index ace93a2115..b99189a572 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -51,13 +51,14 @@ async fn test_vid_task() { signature, _pd: PhantomData, }; + let vid_disperse = VidDisperse { + view_number: message.data.view_number, + payload_commitment, + shares: vid_disperse.shares, + common: vid_disperse.common, + }; let vid_proposal = Proposal { - data: VidDisperse { - view_number: message.data.view_number, - payload_commitment, - shares: vid_disperse.shares, - common: vid_disperse.common, - }, + data: vid_disperse.clone(), signature: message.signature.clone(), _pd: PhantomData, }; @@ -69,20 +70,38 @@ async fn test_vid_task() { // In view 1, node 2 is the next leader. input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - input.push(HotShotEvent::BlockReady( + input.push(HotShotEvent::TransactionsSequenced( encoded_transactions.clone(), (), ViewNumber::new(2), )); - + input.push(HotShotEvent::BlockReady( + vid_disperse.clone(), + ViewNumber::new(2), + )); + input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); output.insert( - HotShotEvent::BlockReady(encoded_transactions, (), ViewNumber::new(2)), + HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), + 1, + ); + + output.insert( + HotShotEvent::BlockReady(vid_disperse, ViewNumber::new(2)), + 2, + ); + + output.insert( + HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), 1, ); + output.insert( + HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), + 2, // 2 occurrences: 1 from `input`, 1 from the DA task + ); let vid_vote = VIDVote::create_signed_vote( hotshot_types::simple_vote::VIDData { diff --git a/types/src/block_impl.rs b/types/src/block_impl.rs index 03b68ba188..c24110bc23 100644 --- a/types/src/block_impl.rs +++ b/types/src/block_impl.rs @@ -86,7 +86,7 @@ impl VIDBlockPayload { let encoded = VIDTransaction::encode(vec![VIDTransaction(txns.clone())]).unwrap(); VIDBlockPayload { transactions: vec![VIDTransaction(txns)], - payload_commitment: vid_commitment(encoded), + payload_commitment: vid_commitment(&encoded), } } } @@ -121,7 +121,7 @@ impl BlockPayload for VIDBlockPayload { Ok(( Self { transactions: txns_vec, - payload_commitment: vid_commitment(encoded), + payload_commitment: vid_commitment(&encoded), }, (), )) @@ -151,7 +151,7 @@ impl BlockPayload for VIDBlockPayload { Self { transactions, - payload_commitment: vid_commitment(encoded_vec), + payload_commitment: vid_commitment(&encoded_vec), } } diff --git a/types/src/data.rs b/types/src/data.rs index a9069e654a..624f9fd257 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -381,7 +381,7 @@ impl Leaf { Ok(encoded) => encoded.into_iter().collect(), Err(_) => return Err(BlockError::InvalidTransactionLength), }; - let commitment = vid_commitment(encoded_txns); + let commitment = vid_commitment(&encoded_txns); if commitment != self.block_header.payload_commitment() { return Err(BlockError::InconsistentPayloadCommitment); } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 822f0b6010..7aee100bd3 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -83,7 +83,7 @@ pub trait BlockPayload: /// # Panics /// If the VID computation fails. #[must_use] -pub fn vid_commitment(encoded_transactions: Vec) -> ::Commit { +pub fn vid_commitment(encoded_transactions: &Vec) -> ::Commit { // TODO let srs = test_srs(NUM_STORAGE_NODES); // TODO We are using constant numbers for now, but they will change as the quorum size diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index fe3b211796..a8ceae04c1 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -251,9 +251,7 @@ where let network = Arc::new(network_generator(id)); let network_da = Arc::new(da_generator(id)); let quorum_chan = - >::generate_network()( - network.clone(), - ); + >::generate_network()(network); let committee_chan = >::generate_network()( network_da, From db73644d4b6096a2d2b07c502ecde5dd2ba1435e Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 28 Nov 2023 16:44:09 -0800 Subject: [PATCH 0459/1393] Fix build --- hotshot-stake-table/Cargo.toml | 2 +- hotshot-state-prover/Cargo.toml | 2 +- orchestrator/Cargo.toml | 5 ++- task/src/task_impls.rs | 2 +- testing/tests/catchup.rs | 58 --------------------------------- types/Cargo.toml | 6 ++-- types/src/data.rs | 2 +- web_server/Cargo.toml | 2 +- 8 files changed, 9 insertions(+), 70 deletions(-) diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 97756ac736..4cb53eb2a9 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -23,7 +23,7 @@ jf-primitives = { workspace = true } jf-relation = { workspace = true } jf-utils = { workspace = true } serde = { workspace = true, features = ["rc"] } -tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag = "0.3.0" } +tagged-base64 = { workspace = true } typenum = { workspace = true } [dev-dependencies] diff --git a/hotshot-state-prover/Cargo.toml b/hotshot-state-prover/Cargo.toml index 8b2ecfa928..73f095aef0 100644 --- a/hotshot-state-prover/Cargo.toml +++ b/hotshot-state-prover/Cargo.toml @@ -25,7 +25,7 @@ jf-primitives = { workspace = true } jf-relation = { workspace = true } jf-utils = { workspace = true } serde = { workspace = true, features = ["rc"] } -tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag = "0.3.0" } +tagged-base64 = { workspace = true } typenum = { workspace = true } hotshot-stake-table = { path = "../hotshot-stake-table" } diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index b4f093972d..f445e70c24 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -17,14 +17,13 @@ hotshot-types = { version = "0.1.0", path = "../types", default-features = false hotshot-utils = { path = "../utils" } hotshot-signature-key = { path = "../hotshot-signature-key" } libp2p-networking = { workspace = true } -tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.4.1" } +tide-disco = { workspace = true } surf-disco = { workspace = true } tracing = { workspace = true } serde = { workspace = true } serde_json = "1.0.96" snafu = { workspace = true } -# TODO upgrade to toml = { workspace = true } https://github.com/EspressoSystems/HotShot/issues/1698 -toml = "0.5.9" +toml = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/task/src/task_impls.rs b/task/src/task_impls.rs index 768e011775..cb650be9f0 100644 --- a/task/src/task_impls.rs +++ b/task/src/task_impls.rs @@ -299,7 +299,7 @@ pub mod test { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[allow(clippy::should_panic_without_expect)] + // #[allow(clippy::should_panic_without_expect)] #[should_panic] async fn test_init_with_event_stream() { setup_logging(); diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index a52ac0de0a..cf9cd4e43a 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -115,64 +115,6 @@ async fn test_catchup_web() { .await; } -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_catchup_web() { - use std::time::Duration; - - use hotshot_testing::{ - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{SequencingTestTypes, SequencingWebImpl}, - overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::{TestMetadata, TimingData}, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let timing_data = TimingData { - next_view_timeout: 1000, - ..Default::default() - }; - let mut metadata = TestMetadata::default(); - let catchup_nodes = vec![ChangeNode { - idx: 18, - updown: UpDown::Up, - }]; - - metadata.timing_data = timing_data; - metadata.start_nodes = 20; - metadata.total_nodes = 20; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::from_millis(2500), catchup_nodes)], - }; - - metadata.completion_task_description = - CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(100000), - }, - ); - metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - check_leaf: true, - ..Default::default() - }; - - // only alow for the view which the catchup node hasn't started to fail - metadata.overall_safety_properties.num_failed_views = 1; - - metadata - .gen_launcher::() - .launch() - .run_test() - .await; -} - /// Test that one node catches up and has sucessful views after coming back #[cfg(test)] #[cfg_attr( diff --git a/types/Cargo.toml b/types/Cargo.toml index e4c5b36bc9..1fa7499124 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -14,9 +14,7 @@ demo = [] arbitrary = { version = "1.3", features = ["derive"] } ark-bls12-381 = { workspace = true } ark-ff = "0.4.0" -ark-serialize = { version = "0.3", features = [ - "derive", -] } # TODO upgrade to 0.4 and inherit this dep from workspace https://github.com/EspressoSystems/HotShot/issues/1700 +ark-serialize = { workspace = true, features = ["derive"] } ark-std = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } @@ -49,7 +47,7 @@ serde = { workspace = true } sha2 = { workspace = true } sha3 = "^0.10" snafu = { workspace = true } -tagged-base64 = { git = "https://github.com/espressosystems/tagged-base64", tag = "0.3.0" } +tagged-base64 = { workspace = true } time = { workspace = true } tracing = { workspace = true } typenum = { workspace = true } diff --git a/types/src/data.rs b/types/src/data.rs index a9069e654a..84fb565659 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -17,7 +17,7 @@ use crate::{ vote::{Certificate, HasViewNumber}, }; use ark_bls12_381::Bls12_381; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::Options; use commit::{Commitment, Committable, RawCommitmentBuilder}; use derivative::Derivative; diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index bc2520065a..bb5988339b 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -21,7 +21,7 @@ libp2p-core = { version = "0.40.0", default-features = false } hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } jf-primitives = { workspace = true } -tide-disco = { git = "https://github.com/EspressoSystems/tide-disco.git", tag = "v0.4.1" } +tide-disco = { workspace = true } tracing = { workspace = true } rand = { workspace = true } serde = { workspace = true } From 6ed1ddc9fc7717be4767ff3b3557615821f66327 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 28 Nov 2023 16:49:04 -0800 Subject: [PATCH 0460/1393] Fix fmt --- task/src/task_impls.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task/src/task_impls.rs b/task/src/task_impls.rs index cb650be9f0..768e011775 100644 --- a/task/src/task_impls.rs +++ b/task/src/task_impls.rs @@ -299,7 +299,7 @@ pub mod test { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - // #[allow(clippy::should_panic_without_expect)] + #[allow(clippy::should_panic_without_expect)] #[should_panic] async fn test_init_with_event_stream() { setup_logging(); From 487a07fa9fa096caa0d0228e3db35465c83cea7b Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Wed, 29 Nov 2023 14:16:11 -0500 Subject: [PATCH 0461/1393] [light-client-integration] add state signature keys to the config (#2123) * hardcoding schnorr keys inside validator config * fix typo * address comments --- hotshot-stake-table/src/vec_based.rs | 4 +- hotshot-stake-table/src/vec_based/config.rs | 10 +-- hotshot-state-prover/Cargo.toml | 8 +- hotshot-state-prover/src/circuit.rs | 68 +++++++------- hotshot-state-prover/src/lib.rs | 21 ++--- hotshot/examples/infra/mod.rs | 10 +-- orchestrator/src/config.rs | 8 +- types/Cargo.toml | 3 +- types/src/lib.rs | 5 ++ types/src/light_client.rs | 98 +++++++++++++++++++++ types/src/traits/state.rs | 43 --------- 11 files changed, 162 insertions(+), 116 deletions(-) create mode 100644 types/src/light_client.rs diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index c3212ca955..d9945d2de1 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -369,7 +369,7 @@ where #[cfg(test)] mod tests { - use super::config::{BLSVerKey, FieldType as F, SchnorrVerKey}; + use super::config::{FieldType as F, QCVerKey, StateVerKey}; use super::StakeTable; use ark_std::{rand::SeedableRng, vec::Vec}; use ethereum_types::U256; @@ -379,7 +379,7 @@ mod tests { #[test] fn test_stake_table() -> Result<(), StakeTableError> { - let mut st = StakeTable::::new(); + let mut st = StakeTable::::new(); let mut prng = jf_utils::test_rng(); let keys = (0..10) .map(|_| { diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index 338aa05080..14cf976bd9 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -4,17 +4,17 @@ use ark_ff::PrimeField; use ark_std::vec; use jf_utils::to_bytes; -/// BLS verification key as indexing key -pub use jf_primitives::signatures::bls_over_bn254::VerKey as BLSVerKey; /// Schnorr verification key as auxiliary information -pub type SchnorrVerKey = jf_primitives::signatures::schnorr::VerKey; +pub use hotshot_types::light_client::StateVerKey; +/// BLS verification key as indexing key +pub use jf_primitives::signatures::bls_over_bn254::VerKey as QCVerKey; /// Type for commitment pub type FieldType = ark_ed_on_bn254::Fq; /// Hashable representation of a key /// NOTE: commitment is only used in light client contract. /// For this application, we needs only hash the Schnorr verfication key. -impl ToFields for SchnorrVerKey { +impl ToFields for StateVerKey { const SIZE: usize = 2; fn to_fields(&self) -> Vec { @@ -23,7 +23,7 @@ impl ToFields for SchnorrVerKey { } } -impl ToFields for BLSVerKey { +impl ToFields for QCVerKey { const SIZE: usize = 2; fn to_fields(&self) -> Vec { diff --git a/hotshot-state-prover/Cargo.toml b/hotshot-state-prover/Cargo.toml index 8b2ecfa928..b77c76d2be 100644 --- a/hotshot-state-prover/Cargo.toml +++ b/hotshot-state-prover/Cargo.toml @@ -7,10 +7,10 @@ edition = { workspace = true } rust-version = { workspace = true } [dependencies] -ark-bn254 = "0.4.0" -ark-ec = "0.4.0" -ark-ed-on-bn254 = "0.4.0" -ark-ff = "0.4.0" +ark-bn254 = { workspace = true } +ark-ec = { workspace = true } +ark-ed-on-bn254 = { workspace = true } +ark-ff = { workspace = true } ark-serialize = { workspace = true } ark-std = { workspace = true } bincode = { workspace = true } diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 355a4c028f..22967cfe9b 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -5,7 +5,7 @@ use ark_ff::PrimeField; use ark_std::borrow::Borrow; use ethereum_types::U256; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; -use hotshot_types::traits::state::LightClientState; +use hotshot_types::light_client::LightClientState; use jf_plonk::errors::PlonkError; use jf_primitives::{ circuit::{ @@ -27,21 +27,21 @@ pub(crate) fn u256_to_field(v: &U256) -> F { /// Variable for stake table entry #[derive(Clone, Debug)] pub struct StakeTableEntryVar { - /// Schnorr verification keys - pub schnorr_ver_key: VerKeyVar, + /// state verification keys + pub state_ver_key: VerKeyVar, /// Stake amount pub stake_amount: Variable, } /// Light client state Variable -/// The stake table commitment is a triple (bls_keys_comm, schnorr_keys_comm, stake_amount_comm). +/// The stake table commitment is a triple (qc_keys_comm, state_keys_comm, stake_amount_comm). /// Variable for a stake table commitment #[derive(Clone, Debug)] pub struct StakeTableCommVar { - /// Commitment for BLS keys - pub bls_keys_comm: Variable, - /// Commitment for Schnorr keys - pub schnorr_keys_comm: Variable, + /// Commitment for QC verification keys + pub qc_keys_comm: Variable, + /// Commitment for state verification keys + pub state_keys_comm: Variable, /// Commitment for stake amount pub stake_amount_comm: Variable, } @@ -104,13 +104,13 @@ impl PublicInput { (self.0[5], self.0[6], self.0[7]) } - /// Return the bls key commitment of the light client state - pub fn bls_key_comm(&self) -> F { + /// Return the qc key commitment of the light client state + pub fn qc_key_comm(&self) -> F { self.0[5] } - /// Return the schnorr key commitment of the light client state - pub fn schnorr_key_comm(&self) -> F { + /// Return the state key commitment of the light client state + pub fn state_key_comm(&self) -> F { self.0[6] } @@ -158,8 +158,8 @@ impl LightClientStateVar { pub fn stake_table_comm(&self) -> StakeTableCommVar { StakeTableCommVar { - bls_keys_comm: self.vars[4], - schnorr_keys_comm: self.vars[5], + qc_keys_comm: self.vars[4], + state_keys_comm: self.vars[5], stake_amount_comm: self.vars[6], } } @@ -182,7 +182,7 @@ impl AsRef<[Variable]> for LightClientStateVar { /// It checks that /// - the signer's accumulated weight exceeds the quorum threshold /// - the stake table corresponds to the one committed in the light client state -/// - all signed schnorr signatures are valid +/// - all signed Schnorr signatures are valid /// and returns /// - A circuit for proof generation /// - A list of public inputs for verification @@ -245,10 +245,10 @@ where let mut stake_table_var = stake_table_entries .map(|item| { let item = item.borrow(); - let schnorr_ver_key = circuit.create_signature_vk_variable(&item.0)?; + let state_ver_key = circuit.create_signature_vk_variable(&item.0)?; let stake_amount = circuit.create_variable(u256_to_field::(&item.1))?; Ok(StakeTableEntryVar { - schnorr_ver_key, + state_ver_key, stake_amount, }) }) @@ -256,11 +256,11 @@ where stake_table_var.extend( (0..stake_table_entries_pad_len) .map(|_| { - let schnorr_ver_key = + let state_ver_key = circuit.create_signature_vk_variable(&SchnorrVerKey::

::default())?; let stake_amount = circuit.create_variable(F::default())?; Ok(StakeTableEntryVar { - schnorr_ver_key, + state_ver_key, stake_amount, }) }) @@ -332,20 +332,18 @@ where circuit.enforce_leq(threshold_pub_var, acc_amount_var)?; // checking the commitment for the list of schnorr keys - let schnorr_ver_key_preimage_vars = stake_table_var + let state_ver_key_preimage_vars = stake_table_var .iter() - .flat_map(|var| [var.schnorr_ver_key.0.get_x(), var.schnorr_ver_key.0.get_y()]) + .flat_map(|var| [var.state_ver_key.0.get_x(), var.state_ver_key.0.get_y()]) .collect::>(); - let schnorr_ver_key_comm = RescueNativeGadget::::rescue_sponge_with_padding( + let state_ver_key_comm = RescueNativeGadget::::rescue_sponge_with_padding( &mut circuit, - &schnorr_ver_key_preimage_vars, + &state_ver_key_preimage_vars, 1, )?[0]; circuit.enforce_equal( - schnorr_ver_key_comm, - lightclient_state_pub_var - .stake_table_comm() - .schnorr_keys_comm, + state_ver_key_comm, + lightclient_state_pub_var.stake_table_comm().state_keys_comm, )?; // checking the commitment for the list of stake amounts @@ -372,7 +370,7 @@ where .map(|(entry, sig)| { SignatureGadget::<_, P>::check_signature_validity( &mut circuit, - &entry.schnorr_ver_key, + &entry.state_ver_key, lightclient_state_pub_var.as_ref(), &sig, ) @@ -432,13 +430,13 @@ mod tests { let num_validators = 10; let mut prng = test_rng(); - let (bls_keys, schnorr_keys) = key_pairs_for_testing(num_validators, &mut prng); - let st = stake_table_for_testing(&bls_keys, &schnorr_keys); + let (qc_keys, state_keys) = key_pairs_for_testing(num_validators, &mut prng); + let st = stake_table_for_testing(&qc_keys, &state_keys); let entries = st .try_iter(SnapshotVersion::LastEpochStart) .unwrap() - .map(|(_, stake_amount, schnorr_key)| (schnorr_key, stake_amount)) + .map(|(_, stake_amount, state_key)| (state_key, stake_amount)) .collect::>(); let block_comm_root = @@ -457,7 +455,7 @@ mod tests { }; let state_msg: [F; 7] = lightclient_state.clone().into(); - let sigs = schnorr_keys + let sigs = state_keys .iter() .map(|(key, _)| SchnorrSignatureScheme::::sign(&(), key, state_msg, &mut prng)) .collect::, PrimitivesError>>() @@ -535,7 +533,7 @@ mod tests { let mut bad_lightclient_state = lightclient_state.clone(); bad_lightclient_state.stake_table_comm.1 = F::default(); let bad_state_msg: [F; 7] = bad_lightclient_state.clone().into(); - let sig_for_bad_state = schnorr_keys + let sig_for_bad_state = state_keys .iter() .map(|(key, _)| { SchnorrSignatureScheme::::sign(&(), key, bad_state_msg, &mut prng) @@ -556,10 +554,10 @@ mod tests { // bad path: incorrect signatures let mut wrong_light_client_state = lightclient_state.clone(); - // state with a different bls key commitment + // state with a different qc key commitment wrong_light_client_state.stake_table_comm.0 = F::default(); let wrong_state_msg: [F; 7] = wrong_light_client_state.into(); - let wrong_sigs = schnorr_keys + let wrong_sigs = state_keys .iter() .map(|(key, _)| { SchnorrSignatureScheme::::sign(&(), key, wrong_state_msg, &mut prng) diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index 484ced9500..fb848030ef 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -7,15 +7,16 @@ pub mod circuit; mod utils; use ark_bn254::Bn254; +use ark_ed_on_bn254::EdwardsConfig; use ark_std::{ borrow::Borrow, rand::{CryptoRng, RngCore}, }; use circuit::PublicInput; use ethereum_types::U256; -use hotshot_types::traits::{ - stake_table::{SnapshotVersion, StakeTableScheme}, - state::LightClientState, +use hotshot_types::{ + light_client::{LightClientState, StateVerKey}, + traits::stake_table::{SnapshotVersion, StakeTableScheme}, }; use jf_plonk::{ errors::PlonkError, @@ -25,9 +26,7 @@ use jf_plonk::{ use jf_primitives::signatures::schnorr::Signature; /// BLS verification key, base field and Schnorr verification key -pub use hotshot_stake_table::vec_based::config::{ - BLSVerKey, FieldType as BaseField, SchnorrVerKey, -}; +pub use hotshot_stake_table::vec_based::config::{FieldType as BaseField, QCVerKey}; /// Proving key pub type ProvingKey = jf_plonk::proof_system::structs::ProvingKey; /// Verifying key @@ -36,8 +35,6 @@ pub type VerifyingKey = jf_plonk::proof_system::structs::VerifyingKey; pub type Proof = jf_plonk::proof_system::structs::Proof; /// Universal SRS pub type UniversalSrs = jf_plonk::proof_system::structs::UniversalSrs; -/// Curve config for Schnorr signatures -pub use ark_ed_on_bn254::EdwardsConfig; /// Given a SRS, returns the proving key and verifying key for state update pub fn preprocess(srs: &UniversalSrs) -> Result<(ProvingKey, VerifyingKey), PlonkError> { @@ -65,7 +62,7 @@ pub fn generate_state_update_proof( threshold: &U256, ) -> Result<(Proof, PublicInput), PlonkError> where - ST: StakeTableScheme, + ST: StakeTableScheme, ST::IntoIter: ExactSizeIterator, R: CryptoRng + RngCore, BitIter: IntoIterator, @@ -105,9 +102,9 @@ mod tests { One, }; use ethereum_types::U256; - use hotshot_types::traits::{ - stake_table::{SnapshotVersion, StakeTableScheme}, - state::LightClientState, + use hotshot_types::{ + light_client::LightClientState, + traits::stake_table::{SnapshotVersion, StakeTableScheme}, }; use jf_plonk::{ proof_system::{PlonkKzgSnark, UniversalSNARK}, diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 040c37571b..ac0475b961 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -884,16 +884,12 @@ pub async fn main_entry_point< run_config.node_index = node_index.into(); - let (public_key, private_key) = - <::SignatureKey as SignatureKey>::generated_from_seed_indexed( + run_config.config.my_own_validator_config = + ValidatorConfig::<::SignatureKey>::generated_from_seed_indexed( run_config.seed, node_index.into(), + 1, ); - run_config.config.my_own_validator_config = ValidatorConfig { - public_key, - private_key, - stake_value: 1, - }; //run_config.libp2p_config.as_mut().unwrap().public_ip = args.public_ip.unwrap(); error!("Initializing networking"); diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 7593bd180a..b07a5caa98 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -292,13 +292,7 @@ fn default_transaction_size() -> usize { impl From for ValidatorConfig { fn from(val: ValidatorConfigFile) -> Self { // here stake_value is set to 1, since we don't input stake_value from ValidatorConfigFile for now - let validator_config = - ValidatorConfig::generated_from_seed_indexed(val.seed, val.node_id, 1); - ValidatorConfig { - public_key: validator_config.public_key, - private_key: validator_config.private_key, - stake_value: validator_config.stake_value, - } + ValidatorConfig::generated_from_seed_indexed(val.seed, val.node_id, 1) } } impl From for HotShotConfig { diff --git a/types/Cargo.toml b/types/Cargo.toml index 601b03b8f5..cd63d62e12 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -13,7 +13,8 @@ demo = [] [dependencies] arbitrary = { version = "1.3", features = ["derive"] } ark-bls12-381 = { workspace = true } -ark-ff = "0.4.0" +ark-ed-on-bn254 = { workspace = true } +ark-ff = { workspace = true } ark-serialize = { version = "0.3", features = [ "derive", ] } # TODO upgrade to 0.4 and inherit this dep from workspace https://github.com/EspressoSystems/HotShot/issues/1700 diff --git a/types/src/lib.rs b/types/src/lib.rs index b024eac08c..2abb198e96 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -17,6 +17,7 @@ pub mod consensus; pub mod data; pub mod error; pub mod event; +pub mod light_client; pub mod message; pub mod simple_certificate; pub mod simple_vote; @@ -46,6 +47,8 @@ pub struct ValidatorConfig { pub private_key: KEY::PrivateKey, /// The validator's stake pub stake_value: u64, + /// the validator's key pairs for state signing/verification + pub state_key_pair: light_client::StateKeyPair, } impl ValidatorConfig { @@ -53,10 +56,12 @@ impl ValidatorConfig { #[must_use] pub fn generated_from_seed_indexed(seed: [u8; 32], index: u64, stake_value: u64) -> Self { let (public_key, private_key) = KEY::generated_from_seed_indexed(seed, index); + let state_key_pairs = light_client::StateKeyPair::generate_from_seed_indexed(seed, index); Self { public_key, private_key, stake_value, + state_key_pair: state_key_pairs, } } } diff --git a/types/src/light_client.rs b/types/src/light_client.rs new file mode 100644 index 0000000000..741e178609 --- /dev/null +++ b/types/src/light_client.rs @@ -0,0 +1,98 @@ +//! Types and structs associated with light client state + +use ark_ff::PrimeField; +use jf_primitives::signatures::schnorr; + +/// A serialized light client state for proof generation +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)] +pub struct LightClientState { + /// Current view number + pub view_number: usize, + /// Current block height + pub block_height: usize, + /// Root of the block commitment tree + pub block_comm_root: F, + /// Commitment for fee ledger + pub fee_ledger_comm: F, + /// Commitment for the stake table + pub stake_table_comm: (F, F, F), +} + +impl From> for [F; 7] { + fn from(state: LightClientState) -> Self { + [ + F::from(state.view_number as u64), + F::from(state.block_height as u64), + state.block_comm_root, + state.fee_ledger_comm, + state.stake_table_comm.0, + state.stake_table_comm.1, + state.stake_table_comm.2, + ] + } +} +impl From<&LightClientState> for [F; 7] { + fn from(state: &LightClientState) -> Self { + [ + F::from(state.view_number as u64), + F::from(state.block_height as u64), + state.block_comm_root, + state.fee_ledger_comm, + state.stake_table_comm.0, + state.stake_table_comm.1, + state.stake_table_comm.2, + ] + } +} + +use ark_ed_on_bn254::EdwardsConfig as Config; +use rand::SeedableRng; +use rand_chacha::ChaCha20Rng; + +/// Signatures +pub type StateSignature = schnorr::Signature; +/// Verification key for verifying state signatures +pub type StateVerKey = schnorr::VerKey; +/// Signing key for signing a light client state +pub type StateSignKey = schnorr::SignKey; +/// Key pairs for signing/verifying a light client state +#[derive(Debug, Default, Clone, serde::Serialize, serde::Deserialize)] +pub struct StateKeyPair(schnorr::KeyPair); + +impl std::ops::Deref for StateKeyPair { + type Target = schnorr::KeyPair; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl StateKeyPair { + /// Generate key pairs from `thread_rng()` + #[must_use] + pub fn generate() -> StateKeyPair { + schnorr::KeyPair::generate(&mut rand::thread_rng()).into() + } + + /// Generate key pairs from seed + #[must_use] + pub fn generate_from_seed(seed: [u8; 32]) -> StateKeyPair { + schnorr::KeyPair::generate(&mut ChaCha20Rng::from_seed(seed)).into() + } + + /// Generate key pairs from an index and a seed + #[must_use] + pub fn generate_from_seed_indexed(seed: [u8; 32], index: u64) -> StateKeyPair { + let mut hasher = blake3::Hasher::new(); + hasher.update(&seed); + hasher.update(&index.to_le_bytes()); + let new_seed = *hasher.finalize().as_bytes(); + Self::generate_from_seed(new_seed) + } +} + +impl From> for StateKeyPair { + fn from(value: schnorr::KeyPair) -> Self { + StateKeyPair(value) + } +} diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index d691b1d9b6..4899e9976a 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -4,7 +4,6 @@ //! network state, which is modified by the transactions contained within blocks. use crate::traits::BlockPayload; -use ark_ff::PrimeField; use commit::Committable; use espresso_systems_common::hotshot::tag; use serde::{de::DeserializeOwned, Serialize}; @@ -219,45 +218,3 @@ pub mod dummy { } } } - -/// A serialized light client state for proof generation -#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)] -pub struct LightClientState { - /// Current view number - pub view_number: usize, - /// Current block height - pub block_height: usize, - /// Root of the block commitment tree - pub block_comm_root: F, - /// Commitment for fee ledger - pub fee_ledger_comm: F, - /// Commitment for the stake table - pub stake_table_comm: (F, F, F), -} - -impl From> for [F; 7] { - fn from(state: LightClientState) -> Self { - [ - F::from(state.view_number as u64), - F::from(state.block_height as u64), - state.block_comm_root, - state.fee_ledger_comm, - state.stake_table_comm.0, - state.stake_table_comm.1, - state.stake_table_comm.2, - ] - } -} -impl From<&LightClientState> for [F; 7] { - fn from(state: &LightClientState) -> Self { - [ - F::from(state.view_number as u64), - F::from(state.block_height as u64), - state.block_comm_root, - state.fee_ledger_comm, - state.stake_table_comm.0, - state.stake_table_comm.1, - state.stake_table_comm.2, - ] - } -} From d865eceb00fb246b46d79dad71f624afd6bfd26c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 29 Nov 2023 16:07:33 -0800 Subject: [PATCH 0462/1393] deal with VidDisperseRecv in consensus task, add the condition that only vote after getting shares --- hotshot/src/tasks/mod.rs | 2 +- task-impls/src/consensus.rs | 69 ++++++++++++++++++++++++++++++++----- task-impls/src/vid.rs | 36 +++---------------- 3 files changed, 66 insertions(+), 41 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index f5f1208f1a..f21ecb1e4a 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -221,7 +221,7 @@ pub async fn add_consensus_task>( event_stream: event_stream.clone(), output_event_stream: output_stream, da_certs: HashMap::new(), - vid_certs: HashMap::new(), + vid_shares: HashMap::new(), current_proposal: None, id: handle.hotshot.inner.id, public_key: c_api.public_key().clone(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 5a8c05fd88..76c1136ad3 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -17,10 +17,10 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::{Consensus, View}, - data::{Leaf, QuorumProposal, VidCommitment}, + data::{Leaf, QuorumProposal, VidCommitment, VidDisperse}, event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, - simple_certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate}, + simple_certificate::{DACertificate, QuorumCertificate, TimeoutCertificate}, simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, traits::{ block_contents::BlockHeader, @@ -117,8 +117,8 @@ pub struct ConsensusTaskState< /// All the DA certs we've received for current and future views. pub da_certs: HashMap>, - /// All the VID certs we've received for current and future views. - pub vid_certs: HashMap>, + /// All the VID shares we've received for current and future views. + pub vid_shares: HashMap>>, /// The most recent proposal we have, will correspond to the current view if Some() /// Will be none if the view advanced through timeout/view_sync @@ -365,9 +365,15 @@ impl, A: ConsensusApi + } } + // Only vote if you has seen the VID share for this view + if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { + } else { + error!("We have not seen the VID share for this view {:?} yet, so we cannot vote.", proposal.view_number); + return false; + } + // Only vote if you have the DA cert // ED Need to update the view number this is stored under? - // Sishan NOTE TODO: Add the logic of "it does not vote until it has seen its VID share" if let Some(cert) = self.da_certs.get(&(proposal.get_view_number())) { let view = cert.view_number; // TODO: do some of this logic without the vote token check, only do that when voting. @@ -1101,20 +1107,66 @@ impl, A: ConsensusApi + self.current_proposal = None; } } + HotShotEvent::VidDisperseRecv(disperse, sender) => { + let view = disperse.data.get_view_number(); + + debug!("VID disperse received for view: {:?} in consensus task", view); + + // stop polling for the received disperse + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse( + *disperse.data.view_number, + )) + .await; + + // Allow VID disperse date that is one view older, in case we have updated the + // view. + // Adding `+ 1` on the LHS rather than `- 1` on the RHS, to avoid the overflow + // error due to subtracting the genesis view number. + if view + 1 < self.cur_view { + warn!("Throwing away VID disperse data that is more than one view older"); + return; + } + + debug!("VID disperse data is fresh."); + let payload_commitment = disperse.data.payload_commitment; + + // Check whether the sender is the right leader for this view + let view_leader_key = self.committee_membership.get_leader(view); + if view_leader_key != sender { + error!("VID dispersal/share is not from expected leader key for view {} \n", *view); + return; + } + + if !view_leader_key.validate(&disperse.signature, payload_commitment.as_ref()) { + error!("Could not verify VID dispersal/share sig."); + return; + } + + // Add to the storage that we have received the VID disperse for a specific view + self.vid_shares.insert(view, disperse.clone()); + } HotShotEvent::VidCertRecv(cert) => { debug!("VID cert received for view ! {}", *cert.view_number); - let view = cert.get_view_number(); - self.vid_certs.insert(view, cert); + let _view = cert.get_view_number(); // Sishan NOTE TODO: delete it // RM TODO: VOTING } HotShotEvent::ViewChange(new_view) => { - debug!("View Change event for view {}", *new_view); + debug!("View Change event for view {} in consensus task", *new_view); let old_view_number = self.cur_view; + // Start polling for VID disperse for the new view + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForVIDDisperse( + *old_view_number + 1, + )) + .await; + + // update the view in state to the one in the message // Publish a view change event to the application if !self.update_view(new_view).await { @@ -1340,6 +1392,7 @@ pub fn consensus_event_filter(event: &HotShotEvent) -> b | HotShotEvent::SendPayloadCommitmentAndMetadata(_, _) | HotShotEvent::Timeout(_) | HotShotEvent::TimeoutVoteRecv(_) + | HotShotEvent::VidDisperseRecv(_, _) | HotShotEvent::Shutdown, ) } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 1b4df3376b..9d489eea2d 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -24,7 +24,7 @@ use hotshot_types::{ }; use hotshot_types::{ simple_certificate::VIDCertificate, - simple_vote::{VIDData, VIDVote}, + simple_vote::VIDVote, traits::network::CommunicationChannel, vote::{HasViewNumber, VoteAccumulator}, }; @@ -243,7 +243,7 @@ impl, A: ConsensusApi + HotShotEvent::VidDisperseRecv(disperse, sender) => { let view = disperse.data.get_view_number(); - debug!("VID disperse received for view: {:?}", view); + debug!("VID disperse received for view: {:?} in VID task", view); // stop polling for the received disperse self.network @@ -263,47 +263,19 @@ impl, A: ConsensusApi + debug!("VID disperse data is fresh."); let payload_commitment = disperse.data.payload_commitment; - // Sishan NOTE TODO: Add to the storage that we have received the VID disperse for a specific view / block // Check whether the sender is the right leader for this view let view_leader_key = self.membership.get_leader(view); if view_leader_key != sender { - error!("VID proposal doesn't have expected leader key for view {} \n DA proposal is: [N/A for VID]", *view); + error!("VID dispersal/share is not from expected leader key for view {} \n", *view); return None; } if !view_leader_key.validate(&disperse.signature, payload_commitment.as_ref()) { - error!("Could not verify VID proposal sig."); + error!("Could not verify VID dispersal/share sig."); return None; } - // Generate and send vote after receive and validate disperse (VID share) - let vote = VIDVote::create_signed_vote( - VIDData { - payload_commit: payload_commitment, - }, - view, - &self.public_key, - &self.private_key, - ); - - debug!( - "Sending vote to the VID leader {:?}", - vote.get_view_number() - ); - self.event_stream - .publish(HotShotEvent::VidVoteSend(vote)) - .await; - - // Sishan NOTE TODO: what is consensus.state_map? - // let mut consensus = self.consensus.write().await; - // // Ensure this view is in the view map for garbage collection, but do not overwrite if - // // there is already a view there: the replica task may have inserted a `Leaf` view which - // // contains strictly more information. - // consensus.state_map.entry(view).or_insert(View { - // view_inner: ViewInner::DA { payload_commitment, }, - // }); - // Record the block we have promised to make available. // TODO https://github.com/EspressoSystems/HotShot/issues/1692 // consensus.saved_payloads.insert(proposal.data.block_payload); From d9a93570808c333b61aa5ec26125360f3adf327a Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 29 Nov 2023 16:17:03 -0800 Subject: [PATCH 0463/1393] update test_vid_task --- testing/tests/vid_task.rs | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index ace93a2115..69c0b830c3 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -9,7 +9,7 @@ use hotshot_types::{ data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, traits::{consensus_api::ConsensusSharedApi, state::ConsensusTime}, }; -use hotshot_types::{simple_vote::VIDVote, traits::node_implementation::NodeType}; +use hotshot_types::traits::node_implementation::NodeType; use std::collections::HashMap; use std::marker::PhantomData; @@ -84,16 +84,6 @@ async fn test_vid_task() { 1, ); - let vid_vote = VIDVote::create_signed_vote( - hotshot_types::simple_vote::VIDData { - payload_commit: payload_commitment, - }, - ViewNumber::new(2), - api.public_key(), - api.private_key(), - ); - output.insert(HotShotEvent::VidVoteSend(vid_vote), 1); - output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::Shutdown, 1); From a84f337251de3a17bd61639de0dfd3a0013bae05 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 29 Nov 2023 16:41:41 -0800 Subject: [PATCH 0464/1393] tune some CI parameters --- hotshot/src/tasks/mod.rs | 2 +- testing/src/overall_safety_task.rs | 2 +- testing/src/test_builder.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index f21ecb1e4a..14a1f7ffb6 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -470,7 +470,7 @@ pub async fn add_view_sync_task>( num_timeouts_tracked: 0, replica_task_map: HashMap::default(), relay_task_map: HashMap::default(), - view_sync_timeout: Duration::new(30, 0), + view_sync_timeout: Duration::new(10, 0), id: handle.hotshot.inner.id, last_garbage_collected_view: TYPES::Time::new(0), }; diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 0f276bea5c..a27b0a67f2 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -366,7 +366,7 @@ impl Default for OverallSafetyPropertiesDescription { check_leaf: false, check_state: true, check_block: true, - num_failed_views: 0, + num_failed_views: 5, transaction_threshold: 0, // very strict threshold_calculator: Arc::new(|_num_live, num_total| 2 * num_total / 3 + 1), diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 76c1c4838b..f0543f2987 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -137,7 +137,7 @@ impl TestMetadata { completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // Increase the duration to get the expected number of successful views. - duration: Duration::new(300, 0), + duration: Duration::new(200, 0), }, ), overall_safety_properties: OverallSafetyPropertiesDescription { From 72427f3c4dd5de1d281f9666481c46ae57c70d0e Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 29 Nov 2023 17:39:25 -0800 Subject: [PATCH 0465/1393] remove useless vid-related structures(vidvote, vidcert, vidvotecollection etc.) --- .../traits/networking/web_server_network.rs | 124 ----------- task-impls/src/consensus.rs | 8 - task-impls/src/events.rs | 20 +- task-impls/src/network.rs | 24 -- task-impls/src/vid.rs | 206 +----------------- types/src/message.rs | 37 +--- types/src/simple_certificate.rs | 4 +- types/src/simple_vote.rs | 2 - types/src/traits/network.rs | 12 - web_server/api.toml | 32 --- web_server/src/lib.rs | 32 --- 11 files changed, 10 insertions(+), 491 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 9594818236..2c9b600f5f 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -122,12 +122,6 @@ struct Inner { /// Task map for quorum votes. vote_task_map: Arc>>>>, - /// Task map for vid votes - vid_vote_task_map: - Arc>>>>, - /// Task map for VID certs - vid_cert_task_map: - Arc>>>>, /// Task map for VID disperse data vid_disperse_task_map: Arc>>>>, @@ -177,8 +171,6 @@ impl Inner { } MessagePurpose::DAC => config::get_da_certificate_route(view_number), MessagePurpose::VidDisperse => config::get_vid_disperse_route(view_number), // like `Proposal` - MessagePurpose::VidVote => config::get_vid_vote_route(view_number, vote_index), // like `Vote` - MessagePurpose::VidCert => config::get_vid_certificate_route(view_number), // like `DAC` }; if message_purpose == MessagePurpose::Data { @@ -251,14 +243,6 @@ impl Inner { direct_poll_queue.push(vote.clone()); } } - MessagePurpose::VidVote => { - // TODO copy-pasted from `MessagePurpose::Vote` https://github.com/EspressoSystems/HotShot/issues/1690 - let mut direct_poll_queue = self.direct_poll_queue.write().await; - for vote in &deserialized_messages { - vote_index += 1; - direct_poll_queue.push(vote.clone()); - } - } MessagePurpose::DAC => { debug!( "Received DAC from web server for view {} {}", @@ -274,22 +258,6 @@ impl Inner { // In future we should check to make sure DAC is valid return Ok(()); } - MessagePurpose::VidCert => { - // TODO copy-pasted from `MessagePurpose::DAC` https://github.com/EspressoSystems/HotShot/issues/1690 - debug!( - "Received VID cert from web server for view {} {}", - view_number, self.is_da - ); - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - self.broadcast_poll_queue - .write() - .await - .push(deserialized_messages[0].clone()); - - // return if we found a VID cert, since there will only be 1 per view - // In future we should check to make sure VID cert is valid - return Ok(()); - } MessagePurpose::VidDisperse => { // TODO copy-pasted from `MessagePurpose::Proposal` https://github.com/EspressoSystems/HotShot/issues/1690 @@ -360,8 +328,6 @@ impl Inner { // TODO ED Should add extra error checking here to make sure we are intending to cancel a task ConsensusIntentEvent::CancelPollForVotes(event_view) | ConsensusIntentEvent::CancelPollForProposal(event_view) - | ConsensusIntentEvent::CancelPollForVIDVotes(event_view) - | ConsensusIntentEvent::CancelPollForVIDCertificate(event_view) | ConsensusIntentEvent::CancelPollForDAC(event_view) | ConsensusIntentEvent::CancelPollForVIDDisperse(event_view) | ConsensusIntentEvent::CancelPollForViewSyncVotes(event_view) => { @@ -535,8 +501,6 @@ impl WebServerNetwork { tx_index: Arc::default(), proposal_task_map: Arc::default(), vote_task_map: Arc::default(), - vid_vote_task_map: Arc::default(), - vid_cert_task_map: Arc::default(), vid_disperse_task_map: Arc::default(), dac_task_map: Arc::default(), view_sync_cert_task_map: Arc::default(), @@ -572,9 +536,7 @@ impl WebServerNetwork { } MessagePurpose::ViewSyncVote => config::post_view_sync_vote_route(*view_number), MessagePurpose::DAC => config::post_da_certificate_route(*view_number), - MessagePurpose::VidVote => config::post_vid_vote_route(*view_number), MessagePurpose::VidDisperse => config::post_vid_disperse_route(*view_number), - MessagePurpose::VidCert => config::post_vid_certificate_route(*view_number), }; let network_msg: SendMsg> = SendMsg { @@ -951,43 +913,6 @@ impl ConnectedNetwork, TYPES::Signatur .await; } } - ConsensusIntentEvent::PollForVIDVotes(view_number) => { - let mut task_map = self.inner.vid_vote_task_map.write().await; - if let Entry::Vacant(e) = task_map.entry(view_number) { - // create new task - let (sender, receiver) = unbounded(); - e.insert(sender); - async_spawn({ - let inner_clone = self.inner.clone(); - async move { - if let Err(e) = inner_clone - .poll_web_server(receiver, MessagePurpose::VidVote, view_number) - .await - { - warn!( - "Background receive proposal polling encountered an error: {:?}", - e - ); - } - } - }); - } else { - error!("Somehow task already existed!"); - } - - // GC proposal collection if we are two views in the future - // TODO ED This won't work for vote collection, last task is more than 2 view ago depending on size of network, will need to rely on cancel task from consensus - if let Some((_, sender)) = task_map.remove_entry(&(view_number.wrapping_sub(2))) { - // Send task cancel message to old task - - // If task already exited we expect an error - let _res = sender - .send(ConsensusIntentEvent::CancelPollForVIDVotes( - view_number.wrapping_sub(2), - )) - .await; - } - } ConsensusIntentEvent::PollForDAC(view_number) => { let mut task_map = self.inner.dac_task_map.write().await; @@ -1026,42 +951,6 @@ impl ConnectedNetwork, TYPES::Signatur } } - ConsensusIntentEvent::PollForVIDCertificate(view_number) => { - let mut task_map = self.inner.vid_cert_task_map.write().await; - if let Entry::Vacant(e) = task_map.entry(view_number) { - // create new task - let (sender, receiver) = unbounded(); - e.insert(sender); - async_spawn({ - let inner_clone = self.inner.clone(); - async move { - if let Err(e) = inner_clone - .poll_web_server(receiver, MessagePurpose::VidCert, view_number) - .await - { - warn!( - "Background receive proposal polling encountered an error: {:?}", - e - ); - } - } - }); - } else { - error!("Somehow task already existed!"); - } - - // GC proposal collection if we are two views in the future - if let Some((_, sender)) = task_map.remove_entry(&(view_number.wrapping_sub(2))) { - // Send task cancel message to old task - - // If task already exited we expect an error - let _res = sender - .send(ConsensusIntentEvent::CancelPollForVIDCertificate( - view_number.wrapping_sub(2), - )) - .await; - } - } ConsensusIntentEvent::CancelPollForVotes(view_number) => { let mut task_map = self.inner.vote_task_map.write().await; @@ -1075,19 +964,6 @@ impl ConnectedNetwork, TYPES::Signatur } } - ConsensusIntentEvent::CancelPollForVIDVotes(view_number) => { - let mut task_map = self.inner.vid_vote_task_map.write().await; - - if let Some((_, sender)) = task_map.remove_entry(&(view_number)) { - // Send task cancel message to old task - - // If task already exited we expect an error - let _res = sender - .send(ConsensusIntentEvent::CancelPollForVIDVotes(view_number)) - .await; - } - } - ConsensusIntentEvent::PollForViewSyncCertificate(view_number) => { let mut task_map = self.inner.view_sync_cert_task_map.write().await; if let Entry::Vacant(e) = task_map.entry(view_number) { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 76c1136ad3..ca12dc0cf2 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1146,14 +1146,6 @@ impl, A: ConsensusApi + // Add to the storage that we have received the VID disperse for a specific view self.vid_shares.insert(view, disperse.clone()); } - HotShotEvent::VidCertRecv(cert) => { - debug!("VID cert received for view ! {}", *cert.view_number); - - let _view = cert.get_view_number(); - // Sishan NOTE TODO: delete it - // RM TODO: VOTING - } - HotShotEvent::ViewChange(new_view) => { debug!("View Change event for view {} in consensus task", *new_view); diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 79e1192f4c..f15509d300 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -5,11 +5,11 @@ use hotshot_types::{ data::{DAProposal, Leaf, QuorumProposal, VidCommitment, VidDisperse}, message::Proposal, simple_certificate::{ - DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate, + DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DAVote, QuorumVote, TimeoutVote, VIDVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + DAVote, QuorumVote, TimeoutVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{node_implementation::NodeType, BlockPayload}, @@ -108,20 +108,4 @@ pub enum HotShotEvent { /// /// Like [`DAProposalRecv`]. VidDisperseRecv(Proposal>, TYPES::SignatureKey), - /// Send a VID vote to the VID leader; emitted by VID storage nodes in the DA task after seeing a valid VID dispersal - /// - /// Like [`DAVoteSend`] - VidVoteSend(VIDVote), - /// A VID vote has been received by the network; handled by the DA task - /// - /// Like [`DAVoteRecv`] - VidVoteRecv(VIDVote), - /// The VID leader has collected enough votes to form a VID cert; emitted by the VID leader in the DA task; sent to the entire network via the networking task - /// - /// Like [`DACSend`] - VidCertSend(VIDCertificate, TYPES::SignatureKey), - /// A VID cert has been recieved by the network; handled by the consensus task - /// - /// Like [`DACRecv`] - VidCertRecv(VIDCertificate), } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index e3d2fef958..ed7f0ebdc7 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -103,12 +103,6 @@ impl NetworkMessageTaskState { CommitteeConsensusMessage::VidDisperseMsg(proposal) => { HotShotEvent::VidDisperseRecv(proposal, sender) } - CommitteeConsensusMessage::VidVote(vote) => { - HotShotEvent::VidVoteRecv(vote.clone()) - } - CommitteeConsensusMessage::VidCertificate(cert) => { - HotShotEvent::VidCertRecv(cert) - } }, }; // TODO (Keyao benchmarking) Update these event variants (similar to the @@ -198,14 +192,6 @@ impl> TransmitType::Broadcast, None, ), - HotShotEvent::VidVoteSend(vote) => ( - vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Right( - CommitteeConsensusMessage::VidVote(vote.clone()), - ))), - TransmitType::Direct, - Some(membership.get_leader(vote.get_view_number())), - ), HotShotEvent::DAVoteSend(vote) => ( vote.get_signing_key(), MessageKind::::from_consensus_message(SequencingMessage(Right( @@ -214,14 +200,6 @@ impl> TransmitType::Direct, Some(membership.get_leader(vote.get_view_number())), ), - HotShotEvent::VidCertSend(certificate, sender) => ( - sender, - MessageKind::::from_consensus_message(SequencingMessage(Right( - CommitteeConsensusMessage::VidCertificate(certificate), - ))), - TransmitType::Broadcast, - None, - ), // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee HotShotEvent::DACSend(certificate, sender) => ( sender, @@ -363,8 +341,6 @@ impl> event, HotShotEvent::Shutdown | HotShotEvent::VidDisperseSend(_, _) - | HotShotEvent::VidCertSend(_, _) - | HotShotEvent::VidVoteSend(_) | HotShotEvent::ViewChange(_) ) } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 9d489eea2d..76a448f8c6 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -1,15 +1,10 @@ use crate::events::HotShotEvent; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; - -use bitvec::prelude::*; -use either::{Either, Left, Right}; -use futures::FutureExt; use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, + event_stream::ChannelStream, global_registry::GlobalRegistry, - task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEvent, TaskBuilder}, + task::{HotShotTaskCompleted, TS}, + task_impls::HSTWithEvent, }; use hotshot_types::traits::network::ConsensusIntentEvent; use hotshot_types::{ @@ -19,19 +14,15 @@ use hotshot_types::{ election::Membership, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, - state::ConsensusTime, }, }; use hotshot_types::{ - simple_certificate::VIDCertificate, - simple_vote::VIDVote, traits::network::CommunicationChannel, - vote::{HasViewNumber, VoteAccumulator}, + vote::HasViewNumber, }; use snafu::Snafu; -use std::marker::PhantomData; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use tracing::{debug, error, instrument, warn}; #[derive(Snafu, Debug)] @@ -72,92 +63,6 @@ pub struct VIDTaskState< pub id: u64, } -/// Struct to maintain VID Vote Collection task state -pub struct VIDVoteCollectionTaskState> { - /// Network for all nodes - pub network: Arc, - /// Membership for teh quorum - pub membership: Arc, - /// This Nodes Public Key - pub public_key: TYPES::SignatureKey, - /// Our Private Key - pub private_key: ::PrivateKey, - #[allow(clippy::type_complexity)] - /// Accumulates VID votes - pub accumulator: Either< - VoteAccumulator, VIDCertificate>, - VIDCertificate, - >, - /// the current view - pub cur_view: TYPES::Time, - /// event stream for channel events - pub event_stream: ChannelStream>, - /// the id of this task state - pub id: u64, -} - -impl> TS for VIDVoteCollectionTaskState {} - -#[instrument(skip_all, fields(id = state.id, view = *state.cur_view), name = "VID Vote Collection Task", level = "error")] -async fn vote_handle( - mut state: VIDVoteCollectionTaskState, - event: HotShotEvent, -) -> ( - Option, - VIDVoteCollectionTaskState, -) -where - TYPES: NodeType, - I: NodeImplementation, -{ - match event { - HotShotEvent::VidVoteRecv(vote) => { - debug!( - "VID vote recv, collection task {:?}", - vote.get_view_number() - ); - - // For the case where we receive votes after we've made a certificate - if state.accumulator.is_right() { - debug!("VID accumulator finished view: {:?}", state.cur_view); - return (None, state); - } - - let accumulator = state.accumulator.left().unwrap(); - match accumulator.accumulate(&vote, state.membership.as_ref()) { - Left(new_accumulator) => { - state.accumulator = either::Left(new_accumulator); - } - - Right(vid_cert) => { - debug!("Sending VID cert! {:?}", vid_cert.view_number); - state - .event_stream - .publish(HotShotEvent::VidCertSend( - vid_cert.clone(), - state.public_key.clone(), - )) - .await; - - state.accumulator = Right(vid_cert.clone()); - state - .network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDVotes( - *vid_cert.view_number, - )) - .await; - - // Return completed at this point - return (Some(HotShotTaskCompleted::ShutDown), state); - } - } - } - _ => { - error!("unexpected event {:?}", event); - } - } - (None, state) -} impl, A: ConsensusApi + 'static> VIDTaskState @@ -169,77 +74,6 @@ impl, A: ConsensusApi + event: HotShotEvent, ) -> Option { match event { - HotShotEvent::VidVoteRecv(vote) => { - // Check if we are the leader and the vote is from the sender. - let view = vote.get_view_number(); - if self.membership.get_leader(view) != self.public_key { - error!( - "We are not the VID leader for view {} are we leader for next view? {}", - *view, - self.membership.get_leader(view + 1) == self.public_key - ); - return None; - } - - let handle_event = HandleEvent(Arc::new(move |event, state| { - async move { vote_handle(state, event).await }.boxed() - })); - let collection_view = - if let Some((collection_view, collection_id, _)) = &self.vote_collector { - // TODO: Is this correct for consecutive leaders? - if view > *collection_view { - // warn!("shutting down for view {:?}", collection_view); - self.registry.shutdown_task(*collection_id).await; - } - *collection_view - } else { - TYPES::Time::new(0) - }; - - if view > collection_view { - let new_accumulator = VoteAccumulator { - vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0; self.membership.total_nodes()], - phantom: PhantomData, - }; - - let accumulator = new_accumulator.accumulate(&vote, self.membership.as_ref()); - - let state = VIDVoteCollectionTaskState { - network: self.network.clone(), - membership: self.membership.clone(), - public_key: self.public_key.clone(), - private_key: self.private_key.clone(), - accumulator, - cur_view: view, - event_stream: self.event_stream.clone(), - id: self.id, - }; - let name = "VID Vote Collection"; - let filter = FilterEvent(Arc::new(|event| { - matches!(event, HotShotEvent::VidVoteRecv(_)) - })); - let builder = - TaskBuilder::>::new(name.to_string()) - .register_event_stream(self.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(state) - .register_event_handler(handle_event); - let id = builder.get_task_id().unwrap(); - let stream_id = builder.get_stream_id().unwrap(); - let _task = async_spawn(async move { - VIDVoteCollectionTypes::build(builder).launch().await - }); - self.vote_collector = Some((view, id, stream_id)); - } else if let Some((_, _, stream_id)) = self.vote_collector { - self.event_stream - .direct_message(stream_id, HotShotEvent::VidVoteRecv(vote)) - .await; - }; - } HotShotEvent::VidDisperseRecv(disperse, sender) => { let view = disperse.data.get_view_number(); @@ -280,13 +114,6 @@ impl, A: ConsensusApi + // TODO https://github.com/EspressoSystems/HotShot/issues/1692 // consensus.saved_payloads.insert(proposal.data.block_payload); } - HotShotEvent::VidCertRecv(cert) => { - self.network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDCertificate( - *cert.view_number, - )) - .await; - } HotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { return None; @@ -304,25 +131,12 @@ impl, A: ConsensusApi + )) .await; - self.network - .inject_consensus_info(ConsensusIntentEvent::PollForVIDCertificate( - *self.cur_view + 1, - )) - .await; - // If we are not the next leader, we should exit if self.membership.get_leader(self.cur_view + 1) != self.public_key { // panic!("We are not the DA leader for view {}", *self.cur_view + 1); return None; } - // Start polling for VID votes for the "next view" - self.network - .inject_consensus_info(ConsensusIntentEvent::PollForVIDVotes( - *self.cur_view + 1, - )) - .await; - return None; } @@ -342,8 +156,6 @@ impl, A: ConsensusApi + event, HotShotEvent::Shutdown | HotShotEvent::VidDisperseRecv(_, _) - | HotShotEvent::VidVoteRecv(_) - | HotShotEvent::VidCertRecv(_) | HotShotEvent::ViewChange(_) ) } @@ -355,14 +167,6 @@ impl, A: ConsensusApi + { } -/// Type alias for VID Vote Collection Types -pub type VIDVoteCollectionTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - VIDVoteCollectionTaskState, ->; - /// Type alias for VID Task Types pub type VIDTaskTypes = HSTWithEvent< ConsensusTaskError, diff --git a/types/src/message.rs b/types/src/message.rs index 44cd8ec351..6513413358 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -5,11 +5,11 @@ use crate::data::QuorumProposal; use crate::simple_certificate::{ - DACertificate, VIDCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, + DACertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }; use crate::simple_vote::{ - DAVote, TimeoutVote, VIDVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, + DAVote, TimeoutVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }; use crate::vote::HasViewNumber; use crate::{ @@ -76,10 +76,6 @@ pub enum MessagePurpose { Data, /// VID disperse, like [`Proposal`]. VidDisperse, - /// VID vote, like [`Vote`]. - VidVote, - /// VID certificate, like [`DAC`]. - VidCert, } // TODO (da) make it more customized to the consensus layer, maybe separating the specific message @@ -202,10 +198,6 @@ pub enum ProcessedCommitteeConsensusMessage { DACertificate(DACertificate, TYPES::SignatureKey), /// VID dispersal data. Like [`DAProposal`] VidDisperseMsg(Proposal>, TYPES::SignatureKey), - /// Vote from VID storage node. Like [`DAVote`] - VidVote(VIDVote, TYPES::SignatureKey), - /// Certificate for VID. Like [`DACertificate`] - VidCertificate(VIDCertificate, TYPES::SignatureKey), } impl From> @@ -225,12 +217,6 @@ impl From> ProcessedCommitteeConsensusMessage::VidDisperseMsg(disperse, _) => { CommitteeConsensusMessage::VidDisperseMsg(disperse) } - ProcessedCommitteeConsensusMessage::VidVote(v, _) => { - CommitteeConsensusMessage::VidVote(v) - } - ProcessedCommitteeConsensusMessage::VidCertificate(cert, _) => { - CommitteeConsensusMessage::VidCertificate(cert) - } } } } @@ -251,12 +237,6 @@ impl ProcessedCommitteeConsensusMessage { CommitteeConsensusMessage::VidDisperseMsg(disperse) => { ProcessedCommitteeConsensusMessage::VidDisperseMsg(disperse, sender) } - CommitteeConsensusMessage::VidVote(v) => { - ProcessedCommitteeConsensusMessage::VidVote(v, sender) - } - CommitteeConsensusMessage::VidCertificate(cert) => { - ProcessedCommitteeConsensusMessage::VidCertificate(cert, sender) - } } } } @@ -336,15 +316,6 @@ pub enum CommitteeConsensusMessage { /// Like [`DAProposal`]. Use `Msg` suffix to distinguish from [`VidDisperse`]. /// TODO this variant should not be a [`CommitteeConsensusMessage`] because VidDisperseMsg(Proposal>), - - /// Vote for VID disperse data - /// - /// Like [`DAVote`]. - VidVote(VIDVote), - /// VID certificate data is available - /// - /// Like [`DACertificate`] - VidCertificate(VIDCertificate), } /// Messages for sequencing consensus. @@ -404,11 +375,9 @@ impl SequencingMessage { vote_message.get_view_number() } CommitteeConsensusMessage::DACertificate(cert) => cert.view_number, - CommitteeConsensusMessage::VidCertificate(cert) => cert.view_number, CommitteeConsensusMessage::VidDisperseMsg(disperse) => { disperse.data.get_view_number() } - CommitteeConsensusMessage::VidVote(vote) => vote.get_view_number(), } } } @@ -438,10 +407,8 @@ impl SequencingMessage { Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, CommitteeConsensusMessage::DAVote(_) => MessagePurpose::Vote, - CommitteeConsensusMessage::VidVote(_) => MessagePurpose::VidVote, CommitteeConsensusMessage::DACertificate(_) => MessagePurpose::DAC, CommitteeConsensusMessage::VidDisperseMsg(_) => MessagePurpose::VidDisperse, - CommitteeConsensusMessage::VidCertificate(_) => MessagePurpose::VidCert, }, } } diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 638891e3fa..9b64a9160d 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -12,7 +12,7 @@ use ethereum_types::U256; use crate::{ data::Leaf, simple_vote::{ - DAData, QuorumData, TimeoutData, VIDData, ViewSyncCommitData, ViewSyncFinalizeData, + DAData, QuorumData, TimeoutData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, }, traits::{ @@ -128,8 +128,6 @@ pub type QuorumCertificate = SimpleCertificate>; pub type DACertificate = SimpleCertificate; /// Type alias for a Timeout certificate over a view number pub type TimeoutCertificate = SimpleCertificate>; -/// type alias for a VID certificate -pub type VIDCertificate = SimpleCertificate; // TODO ED Update this to use the correct threshold instead of the default `success_threshold` /// Type alias for a `ViewSyncPreCommit` certificate over a view number diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 94f3f52429..849aa9c4aa 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -209,8 +209,6 @@ impl = SimpleVote>; /// DA vote type alias pub type DAVote = SimpleVote; -/// VID vote type alias -pub type VIDVote = SimpleVote; /// Timeout Vote type alias pub type TimeoutVote = SimpleVote>; /// View Sync Commit Vote type alias diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 1ccdee6314..3a8e754aae 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -142,8 +142,6 @@ pub enum NetworkError { pub enum ConsensusIntentEvent { /// Poll for votes for a particular view PollForVotes(u64), - /// Poll for VID votes for a particular view - PollForVIDVotes(u64), /// Poll for a proposal for a particular view PollForProposal(u64), /// Poll for VID disperse data for a particular view @@ -152,8 +150,6 @@ pub enum ConsensusIntentEvent { PollForCurrentProposal, /// Poll for a DAC for a particular view PollForDAC(u64), - /// Poll for a VID certificate for a certain view - PollForVIDCertificate(u64), /// Poll for view sync votes starting at a particular view PollForViewSyncVotes(u64), /// Poll for view sync proposals (certificates) for a particular view @@ -164,16 +160,12 @@ pub enum ConsensusIntentEvent { PollFutureLeader(u64, K), /// Cancel polling for votes CancelPollForVotes(u64), - /// Cancel polling for VID votes for a particular view - CancelPollForVIDVotes(u64), /// Cancel polling for view sync votes. CancelPollForViewSyncVotes(u64), /// Cancel polling for proposals. CancelPollForProposal(u64), /// Cancal polling for DAC. CancelPollForDAC(u64), - /// Cancel polling for VID certificate - CancelPollForVIDCertificate(u64), /// Cancel polling for view sync certificate. CancelPollForViewSyncCertificate(u64), /// Cancel polling for VID disperse data @@ -194,13 +186,9 @@ impl ConsensusIntentEvent { | ConsensusIntentEvent::CancelPollForViewSyncVotes(view_number) | ConsensusIntentEvent::CancelPollForVotes(view_number) | ConsensusIntentEvent::CancelPollForProposal(view_number) - | ConsensusIntentEvent::PollForVIDCertificate(view_number) - | ConsensusIntentEvent::PollForVIDVotes(view_number) | ConsensusIntentEvent::PollForVIDDisperse(view_number) | ConsensusIntentEvent::CancelPollForVIDDisperse(view_number) | ConsensusIntentEvent::CancelPollForDAC(view_number) - | ConsensusIntentEvent::CancelPollForVIDCertificate(view_number) - | ConsensusIntentEvent::CancelPollForVIDVotes(view_number) | ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) | ConsensusIntentEvent::PollForViewSyncCertificate(view_number) | ConsensusIntentEvent::PollForTransactions(view_number) diff --git a/web_server/api.toml b/web_server/api.toml index 64c843d3a7..6e33d6fdf0 100644 --- a/web_server/api.toml +++ b/web_server/api.toml @@ -52,13 +52,6 @@ DOC = """ Return the DA certificate for a given view number """ -# GET the VID certificate for a view, where the view is passed as an argument -[route.getvidcertificate] -PATH = ["vid_certificate/:view_number"] -":view_number" = "Integer" -DOC = """ -Return the VID certificate for a given view number -""" # POST a DA certificate, where the view is passed as an argument [route.postcertificate] @@ -69,14 +62,6 @@ DOC = """ Post the DA certificate for a given view_number """ -# POST a VID certificate, where the view is passed as an argument -[route.postvidcertificate] -PATH = ["vid_certificate/:view_number"] -METHOD = "POST" -":view_number" = "Integer" -DOC = """ -Post the VID certificate for a given view_number -""" # GET all the votes from a given index for a given view number [route.getvotes] @@ -88,15 +73,6 @@ DOC = """ Get all votes for a view number """ -# GET all the VID votes from a given index for a given view number -[route.getvidvotes] -PATH = ["vid_votes/:view_number/:index"] -":view_number" = "Integer" -":index" = "Integer" -METHOD = "GET" -DOC = """ -Get all VID votes for a view number -""" # POST a vote, where the view number is passed as an argument [route.postvote] @@ -107,14 +83,6 @@ DOC = """ Send a vote """ -# POST a VID vote, where the view number is passed as an argument -[route.postvidvote] -PATH = ["vid_votes/:view_number"] -":view_number" = "Integer" -METHOD = "POST" -DOC = """ -Send a VID vote -""" # GET all transactions starting at :index [route.gettransactions] diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index f7d76d014e..87483366df 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -687,13 +687,6 @@ where } .boxed() })? - .get("getvidcertificate", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - state.get_vid_certificate(view_number) - } - .boxed() - })? .get("getvotes", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; @@ -702,14 +695,6 @@ where } .boxed() })? - .get("getvidvotes", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let index: u64 = req.integer_param("index")?; - state.get_vid_votes(view_number, index) - } - .boxed() - })? .get("getviewsyncvotes", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; @@ -734,15 +719,6 @@ where } .boxed() })? - .post("postvidvote", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - // Using body_bytes because we don't want to deserialize; body_auto or body_json deserializes automatically - let vote = req.body_bytes(); - state.post_vid_vote(view_number, vote) - } - .boxed() - })? .post("postviewsyncvote", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; @@ -784,14 +760,6 @@ where } .boxed() })? - .post("postvidcertificate", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let cert = req.body_bytes(); - state.post_vid_certificate(view_number, cert) - } - .boxed() - })? .post("posttransaction", |req, state| { async move { let txns = req.body_bytes(); From 51cc741e5da18ad1d61a866663294728733479f7 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 29 Nov 2023 17:41:42 -0800 Subject: [PATCH 0466/1393] lint --- task-impls/src/consensus.rs | 16 ++++++++++++---- task-impls/src/events.rs | 4 ++-- task-impls/src/vid.rs | 11 +++++------ testing/tests/vid_task.rs | 2 +- 4 files changed, 20 insertions(+), 13 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ca12dc0cf2..223c5c1cce 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -368,7 +368,10 @@ impl, A: ConsensusApi + // Only vote if you has seen the VID share for this view if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { } else { - error!("We have not seen the VID share for this view {:?} yet, so we cannot vote.", proposal.view_number); + error!( + "We have not seen the VID share for this view {:?} yet, so we cannot vote.", + proposal.view_number + ); return false; } @@ -1110,7 +1113,10 @@ impl, A: ConsensusApi + HotShotEvent::VidDisperseRecv(disperse, sender) => { let view = disperse.data.get_view_number(); - debug!("VID disperse received for view: {:?} in consensus task", view); + debug!( + "VID disperse received for view: {:?} in consensus task", + view + ); // stop polling for the received disperse self.quorum_network @@ -1134,7 +1140,10 @@ impl, A: ConsensusApi + // Check whether the sender is the right leader for this view let view_leader_key = self.committee_membership.get_leader(view); if view_leader_key != sender { - error!("VID dispersal/share is not from expected leader key for view {} \n", *view); + error!( + "VID dispersal/share is not from expected leader key for view {} \n", + *view + ); return; } @@ -1158,7 +1167,6 @@ impl, A: ConsensusApi + )) .await; - // update the view in state to the one in the message // Publish a view change event to the application if !self.update_view(new_view).await { diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index f15509d300..02430290bb 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -5,8 +5,8 @@ use hotshot_types::{ data::{DAProposal, Leaf, QuorumProposal, VidCommitment, VidDisperse}, message::Proposal, simple_certificate::{ - DACertificate, QuorumCertificate, TimeoutCertificate, - ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCommitCertificate2, + ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ DAVote, QuorumVote, TimeoutVote, ViewSyncCommitVote, ViewSyncFinalizeVote, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 76a448f8c6..08f98da3cd 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -16,10 +16,7 @@ use hotshot_types::{ signature_key::SignatureKey, }, }; -use hotshot_types::{ - traits::network::CommunicationChannel, - vote::HasViewNumber, -}; +use hotshot_types::{traits::network::CommunicationChannel, vote::HasViewNumber}; use snafu::Snafu; use std::sync::Arc; @@ -63,7 +60,6 @@ pub struct VIDTaskState< pub id: u64, } - impl, A: ConsensusApi + 'static> VIDTaskState { @@ -101,7 +97,10 @@ impl, A: ConsensusApi + // Check whether the sender is the right leader for this view let view_leader_key = self.membership.get_leader(view); if view_leader_key != sender { - error!("VID dispersal/share is not from expected leader key for view {} \n", *view); + error!( + "VID dispersal/share is not from expected leader key for view {} \n", + *view + ); return None; } diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 69c0b830c3..44ceebe1ba 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -4,12 +4,12 @@ use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, task_helpers::vid_init, }; +use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ block_impl::VIDTransaction, data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, traits::{consensus_api::ConsensusSharedApi, state::ConsensusTime}, }; -use hotshot_types::traits::node_implementation::NodeType; use std::collections::HashMap; use std::marker::PhantomData; From ec0569c2c31c30ae3312ff3f3987185c75577756 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 30 Nov 2023 09:57:35 -0500 Subject: [PATCH 0467/1393] Updated orchestrator and web server to use url instead of hostname with hardcoded http protocol --- hotshot/examples/infra/mod.rs | 22 +++++++++---------- hotshot/examples/webserver/validator.rs | 2 +- hotshot/examples/webserver/webserver.rs | 3 ++- .../traits/networking/web_server_network.rs | 8 ++++--- orchestrator/run-config.toml | 4 ++-- orchestrator/src/client.rs | 5 ++--- orchestrator/src/config.rs | 2 +- orchestrator/src/lib.rs | 6 ++--- web_server/src/lib.rs | 3 ++- 9 files changed, 29 insertions(+), 26 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index ac0475b961..c331e40c3c 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -78,8 +78,8 @@ use tracing::{error, info, warn}; )] /// Arguments passed to the orchestrator pub struct OrchestratorArgs { - /// The address the orchestrator runs on - pub host: IpAddr, + /// The url the orchestrator runs on; this should be in the form of `http://localhost` or `http://0.0.0.0` + pub url: String, /// The port the orchestrator runs on pub port: u16, /// The configuration file to be used for this run @@ -138,7 +138,7 @@ pub async fn run_orchestrator< NODE: NodeImplementation>, >( OrchestratorArgs { - host, + url, port, config_file, }: OrchestratorArgs, @@ -148,7 +148,7 @@ pub async fn run_orchestrator< let _result = hotshot_orchestrator::run_orchestrator::< TYPES::SignatureKey, TYPES::ElectionConfigType, - >(run_config, host, port) + >(run_config, url, port) .await; } @@ -169,13 +169,13 @@ async fn webserver_network_from_config( ) -> WebServerNetwork { // Get the configuration for the web server let WebServerConfig { - host, + url, port, wait_between_polls, }: WebServerConfig = config.clone().web_server_config.unwrap(); WebServerNetwork::create( - &host.to_string(), + url, port, wait_between_polls, pub_key.clone(), @@ -550,7 +550,7 @@ where // extract values from config (for DA network) let WebServerConfig { - host, + url, port, wait_between_polls, }: WebServerConfig = config.clone().da_web_server_config.unwrap(); @@ -570,7 +570,7 @@ where let da_channel: WebCommChannel = WebCommChannel::new( WebServerNetwork::create( - &host.to_string(), + url.clone(), port, wait_between_polls, pub_key.clone(), @@ -580,7 +580,7 @@ where ); let vid_channel: WebCommChannel = WebCommChannel::new( - WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true) + WebServerNetwork::create(url, port, wait_between_polls, pub_key, true) .into(), ); @@ -764,7 +764,7 @@ where // extract values from config (for webserver DA network) let WebServerConfig { - host, + url, port, wait_between_polls, }: WebServerConfig = config.clone().da_web_server_config.unwrap(); @@ -774,7 +774,7 @@ where webserver_network_from_config::(config.clone(), pub_key.clone()).await; let webserver_underlying_da_network = - WebServerNetwork::create(&host.to_string(), port, wait_between_polls, pub_key, true); + WebServerNetwork::create(url, port, wait_between_polls, pub_key, true); webserver_underlying_quorum_network.wait_for_ready().await; diff --git a/hotshot/examples/webserver/validator.rs b/hotshot/examples/webserver/validator.rs index 5e6c91cbca..0d7ba8eddf 100644 --- a/hotshot/examples/webserver/validator.rs +++ b/hotshot/examples/webserver/validator.rs @@ -25,7 +25,7 @@ async fn main() { let args = ValidatorArgs::parse(); info!( "connecting to orchestrator at {:?}:{:?}", - args.host, args.port + args.url, args.port ); infra::main_entry_point::< DemoTypes, diff --git a/hotshot/examples/webserver/webserver.rs b/hotshot/examples/webserver/webserver.rs index 391fc60028..2d2822e0be 100644 --- a/hotshot/examples/webserver/webserver.rs +++ b/hotshot/examples/webserver/webserver.rs @@ -9,6 +9,7 @@ use clap::Parser; #[derive(Parser, Debug)] struct WebServerArgs { + url: String, port: u16, } @@ -22,6 +23,6 @@ async fn main() { let _sender = Arc::new(server_shutdown_sender); let _result = hotshot_web_server::run_web_server::< ::SignatureKey, - >(Some(server_shutdown), args.port) + >(Some(server_shutdown), args.url, args.port) .await; } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 9594818236..1996e4f36e 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -506,13 +506,13 @@ impl WebServerNetwork { /// # Panics /// if the web server url is malformed pub fn create( - host: &str, + url: String, port: u16, wait_between_polls: Duration, key: TYPES::SignatureKey, is_da_server: bool, ) -> Self { - let base_url_string = format!("http://{host}:{port}"); + let base_url_string = format!("{url}:{port}"); info!("Connecting to web server at {base_url_string:?} is da: {is_da_server}"); let base_url = base_url_string.parse(); @@ -1231,12 +1231,14 @@ impl TestableNetworkingImplementation for WebServerNetwo ) -> Box Self + 'static> { let (server_shutdown_sender, server_shutdown) = oneshot(); let sender = Arc::new(server_shutdown_sender); + let url = "http://localhost"; // TODO ED Restrict this to be an open port using portpicker let port = random::(); info!("Launching web server on port {port}"); // Start web server async_spawn(hotshot_web_server::run_web_server::( Some(server_shutdown), + url.to_owned(), port, )); @@ -1253,7 +1255,7 @@ impl TestableNetworkingImplementation for WebServerNetwo Box::new(move |id| { let sender = Arc::clone(&sender); let mut network = WebServerNetwork::create( - "0.0.0.0", + "http://localhost".to_string(), port, Duration::from_millis(100), known_nodes[id as usize].clone(), diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index f15a45fae1..80ef5aa4f2 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -66,11 +66,11 @@ online_time = 10 base_port = 9000 [web_server_config] -host = "127.0.0.1" +url = "http://localhost" port = 9000 [da_web_server_config] -host = "127.0.0.1" +url = "http://localhost" port = 9001 [web_server_config.wait_between_polls] diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index d612479fb4..6db01f7c9c 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -23,7 +23,7 @@ pub struct OrchestratorClient { /// Arguments passed to the validator pub struct ValidatorArgs { /// The address the orchestrator runs on - pub host: String, + pub url: String, /// The port the orchestrator runs on pub port: u16, /// This node's public IP address, for libp2p @@ -34,8 +34,7 @@ pub struct ValidatorArgs { impl OrchestratorClient { /// Creates the client that connects to the orchestrator pub async fn connect_to_orchestrator(args: ValidatorArgs) -> Self { - let base_url = format!("{0}:{1}", args.host, args.port); - let base_url = format!("http://{base_url}").parse().unwrap(); + let base_url = format!("{0}:{1}", args.url, args.port).parse().unwrap(); let client = surf_disco::Client::::new(base_url); // TODO ED: Add healthcheck wait here OrchestratorClient { client } diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index b07a5caa98..7082b318b0 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -52,7 +52,7 @@ pub struct Libp2pConfigFile { #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct WebServerConfig { - pub host: IpAddr, + pub url: String, pub port: u16, pub wait_between_polls: Duration, } diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 09447973a6..0280863483 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -237,7 +237,7 @@ where /// Runs the orchestrator pub async fn run_orchestrator( network_config: NetworkConfig, - host: IpAddr, + url: String, port: u16, ) -> io::Result<()> where @@ -252,6 +252,6 @@ where let mut app = App::>, ServerError>::with_state(state); app.register_module("api", api.unwrap()) .expect("Error registering api"); - tracing::error!("lisening on {:?}:{:?}", host, port); - app.serve(format!("http://{host}:{port}")).await + tracing::error!("lisening on {:?}:{:?}", url, port); + app.serve(format!("{url}:{port}")).await } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index f7d76d014e..0da18fdc04 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -854,6 +854,7 @@ where pub async fn run_web_server( shutdown_listener: Option>, + url: String, port: u16, ) -> io::Result<()> { let options = Options::default(); @@ -864,7 +865,7 @@ pub async fn run_web_server( app.register_module("api", api).unwrap(); - let app_future = app.serve(format!("http://0.0.0.0:{port}")); + let app_future = app.serve(format!("{url}:{port}")); info!("Web server started on port {port}"); From f47abaa90e3e1bb7c160f29e94f1b55ec45ce285 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 30 Nov 2023 10:12:27 -0500 Subject: [PATCH 0468/1393] review comments --- task-impls/src/consensus.rs | 28 ++++++++++++++++++++++++++++ task-impls/src/vote.rs | 18 +++++++++--------- 2 files changed, 37 insertions(+), 9 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index c6c48e4a14..1509a7896f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -723,6 +723,20 @@ impl, A: ConsensusApi + } HotShotEvent::QuorumVoteRecv(ref vote) => { debug!("Received quroum vote: {:?}", vote.get_view_number()); + if self + .quorum_membership + .get_leader(vote.get_view_number() + 1) + != self.public_key + { + error!( + "We are not the leader for view {} are we the leader for view + 1? {}", + *vote.get_view_number() + 1, + self.quorum_membership + .get_leader(vote.get_view_number() + 2) + == self.public_key + ); + return; + } let collection_view = if let Some((collection_view, collection_task, _)) = &self.vote_collector { @@ -761,6 +775,20 @@ impl, A: ConsensusApi + } } HotShotEvent::TimeoutVoteRecv(ref vote) => { + if self + .timeout_membership + .get_leader(vote.get_view_number() + 1) + != self.public_key + { + error!( + "We are not the leader for view {} are we the leader for view + 1? {}", + *vote.get_view_number() + 1, + self.timeout_membership + .get_leader(vote.get_view_number() + 2) + == self.public_key + ); + return; + } let collection_view = if let Some((collection_view, collection_task, _)) = &self.vote_collector { if vote.get_view_number() > *collection_view { diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index c75599e33d..b0ad30b57d 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -99,10 +99,10 @@ impl< self.accumulator = Some(acc); (None, self) } - Either::Right(qc) => { - debug!("Certificate Formed! {:?}", qc); + Either::Right(cert) => { + debug!("Certificate Formed! {:?}", cert); self.event_stream - .publish(VOTE::make_cert_event(qc, &self.public_key)) + .publish(VOTE::make_cert_event(cert, &self.public_key)) .await; self.accumulator = None; (Some(HotShotTaskCompleted::ShutDown), self) @@ -157,13 +157,18 @@ where } /// Info needed to create a vote accumulator task -#[allow(missing_docs)] pub struct AccumulatorInfo { + /// This nodes Pub Key pub public_key: TYPES::SignatureKey, + /// Membership we are accumulation votes for pub membership: Arc, + /// View of the votes we are collecting pub view: TYPES::Time, + /// Global event stream shared by all consensus tasks pub event_stream: ChannelStream>, + /// This nodes id pub id: u64, + /// Task Registry for all tasks used by this node pub registry: GlobalRegistry, } @@ -190,11 +195,6 @@ where + 'static, VoteCollectionTaskState: HandleVoteEvent, { - if vote.get_leader(info.membership.as_ref()) != info.public_key { - debug!("Vote is not to the leader"); - return None; - } - if vote.get_view_number() != info.view { error!( "Vote view does not match! vote view is {} current view is {}", From ba4adb26777d2b00b5a3935ab20e7ef8ce4491bd Mon Sep 17 00:00:00 2001 From: Rob Date: Thu, 30 Nov 2023 10:18:03 -0500 Subject: [PATCH 0469/1393] orchestrator spelling --- orchestrator/src/lib.rs | 2 +- task-impls/src/network.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 0280863483..69bb39e68e 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -252,6 +252,6 @@ where let mut app = App::>, ServerError>::with_state(state); app.register_module("api", api.unwrap()) .expect("Error registering api"); - tracing::error!("lisening on {:?}:{:?}", url, port); + tracing::error!("listening on {:?}:{:?}", url, port); app.serve(format!("{url}:{port}")).await } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 79979d6470..bff11ac6ae 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -157,7 +157,7 @@ impl> /// # Panics /// Panic sif a direct message event is received with no recipient #[allow(clippy::too_many_lines)] // TODO https://github.com/EspressoSystems/HotShot/issues/1704 - #[instrument(skip_all, fields(view = *self.view), name = "Newtork Task", level = "error")] + #[instrument(skip_all, fields(view = *self.view), name = "Network Task", level = "error")] pub async fn handle_event( &mut self, From 6ceaa0b804a6e0a75c13a943c3b57f3452bffc80 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 30 Nov 2023 10:38:10 -0500 Subject: [PATCH 0470/1393] Fix parser trait bounds and lints --- hotshot/examples/combined/all.rs | 16 ++++++--- hotshot/examples/combined/multi-validator.rs | 8 +++-- hotshot/examples/combined/validator.rs | 2 +- hotshot/examples/infra/mod.rs | 36 +++---------------- hotshot/examples/libp2p/all.rs | 4 +-- hotshot/examples/libp2p/multi-validator.rs | 8 +++-- hotshot/examples/libp2p/validator.rs | 2 +- hotshot/examples/webserver/all.rs | 16 ++++++--- hotshot/examples/webserver/multi-validator.rs | 7 ++-- hotshot/examples/webserver/multi-webserver.rs | 20 ++++++++--- hotshot/examples/webserver/webserver.rs | 2 +- .../traits/networking/web_server_network.rs | 6 ++-- web_server/src/lib.rs | 2 +- 13 files changed, 67 insertions(+), 62 deletions(-) diff --git a/hotshot/examples/combined/all.rs b/hotshot/examples/combined/all.rs index a9740813ea..48cc1cbb89 100644 --- a/hotshot/examples/combined/all.rs +++ b/hotshot/examples/combined/all.rs @@ -45,7 +45,11 @@ async fn main() { async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, - >(Some(server_shutdown_cdn), 9000) + >( + Some(server_shutdown_cdn), + "http://localhost".to_string(), + 9000, + ) .await { error!("Problem starting cdn web server: {:?}", e); @@ -54,7 +58,11 @@ async fn main() { async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, - >(Some(server_shutdown_da), 9001) + >( + Some(server_shutdown_da), + "http://localhost".to_string(), + 9001, + ) .await { error!("Problem starting da web server: {:?}", e); @@ -70,7 +78,7 @@ async fn main() { VIDNetwork, NodeImpl, >(OrchestratorArgs { - host: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + url: "http://localhost".to_string(), port: 4444, config_file: args.config_file.clone(), })); @@ -92,7 +100,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs { - host: "127.0.0.1".to_string(), + url: "http://localhost".to_string(), port: 4444, public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), }) diff --git a/hotshot/examples/combined/multi-validator.rs b/hotshot/examples/combined/multi-validator.rs index c227ee1818..c5249f4ba7 100644 --- a/hotshot/examples/combined/multi-validator.rs +++ b/hotshot/examples/combined/multi-validator.rs @@ -21,7 +21,7 @@ struct MultiValidatorArgs { /// Number of validators to run pub num_nodes: u16, /// The address the orchestrator runs on - pub host: IpAddr, + pub url: String, /// The port the orchestrator runs on pub port: u16, /// This node's public IP address, for libp2p @@ -41,11 +41,13 @@ async fn main() { let args = MultiValidatorArgs::parse(); tracing::error!( "connecting to orchestrator at {:?}:{:?}", - args.host, + args.url, args.port ); let mut nodes = Vec::new(); for _ in 0..args.num_nodes { + let url: String = args.url.clone(); + let node = async_spawn(async move { infra::main_entry_point::< DemoTypes, @@ -56,7 +58,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs { - host: args.host.to_string(), + url, port: args.port, public_ip: args.public_ip, }) diff --git a/hotshot/examples/combined/validator.rs b/hotshot/examples/combined/validator.rs index 5e6c91cbca..0d7ba8eddf 100644 --- a/hotshot/examples/combined/validator.rs +++ b/hotshot/examples/combined/validator.rs @@ -25,7 +25,7 @@ async fn main() { let args = ValidatorArgs::parse(); info!( "connecting to orchestrator at {:?}:{:?}", - args.host, args.port + args.url, args.port ); infra::main_entry_point::< DemoTypes, diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index c331e40c3c..83b360dbc5 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -55,20 +55,8 @@ use std::{collections::BTreeSet, sync::Arc}; use std::{num::NonZeroUsize, str::FromStr}; use libp2p_identity::PeerId; -// use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}; use std::fmt::Debug; -use std::{ - //collections::{BTreeSet, VecDeque}, - fs, - net::IpAddr, - //num::NonZeroUsize, - //str::FromStr, - //sync::Arc, - //time::{Duration, Instant}, - time::Instant, -}; -//use surf_disco::error::ClientError; -//use surf_disco::Client; +use std::{fs, time::Instant}; use tracing::{error, info, warn}; #[derive(Parser, Debug, Clone)] @@ -174,13 +162,7 @@ async fn webserver_network_from_config( wait_between_polls, }: WebServerConfig = config.clone().web_server_config.unwrap(); - WebServerNetwork::create( - url, - port, - wait_between_polls, - pub_key.clone(), - false, - ) + WebServerNetwork::create(&url, port, wait_between_polls, pub_key.clone(), false) } async fn libp2p_network_from_config( @@ -569,19 +551,11 @@ where WebCommChannel::new(underlying_quorum_network.into()); let da_channel: WebCommChannel = WebCommChannel::new( - WebServerNetwork::create( - url.clone(), - port, - wait_between_polls, - pub_key.clone(), - true, - ) - .into(), + WebServerNetwork::create(&url, port, wait_between_polls, pub_key.clone(), true).into(), ); let vid_channel: WebCommChannel = WebCommChannel::new( - WebServerNetwork::create(url, port, wait_between_polls, pub_key, true) - .into(), + WebServerNetwork::create(&url, port, wait_between_polls, pub_key, true).into(), ); WebServerDARun { @@ -774,7 +748,7 @@ where webserver_network_from_config::(config.clone(), pub_key.clone()).await; let webserver_underlying_da_network = - WebServerNetwork::create(url, port, wait_between_polls, pub_key, true); + WebServerNetwork::create(&url, port, wait_between_polls, pub_key, true); webserver_underlying_quorum_network.wait_for_ready().await; diff --git a/hotshot/examples/libp2p/all.rs b/hotshot/examples/libp2p/all.rs index c8637fe7a7..cf0f9e69b2 100644 --- a/hotshot/examples/libp2p/all.rs +++ b/hotshot/examples/libp2p/all.rs @@ -43,7 +43,7 @@ async fn main() { VIDNetwork, NodeImpl, >(OrchestratorArgs { - host: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + url: "http://localhost".to_string(), port: 4444, config_file: args.config_file.clone(), })); @@ -65,7 +65,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs { - host: "127.0.0.1".to_string(), + url: "http://localhost".to_string(), port: 4444, public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), }) diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index c227ee1818..c5249f4ba7 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -21,7 +21,7 @@ struct MultiValidatorArgs { /// Number of validators to run pub num_nodes: u16, /// The address the orchestrator runs on - pub host: IpAddr, + pub url: String, /// The port the orchestrator runs on pub port: u16, /// This node's public IP address, for libp2p @@ -41,11 +41,13 @@ async fn main() { let args = MultiValidatorArgs::parse(); tracing::error!( "connecting to orchestrator at {:?}:{:?}", - args.host, + args.url, args.port ); let mut nodes = Vec::new(); for _ in 0..args.num_nodes { + let url: String = args.url.clone(); + let node = async_spawn(async move { infra::main_entry_point::< DemoTypes, @@ -56,7 +58,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs { - host: args.host.to_string(), + url, port: args.port, public_ip: args.public_ip, }) diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index 5e6c91cbca..0d7ba8eddf 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -25,7 +25,7 @@ async fn main() { let args = ValidatorArgs::parse(); info!( "connecting to orchestrator at {:?}:{:?}", - args.host, args.port + args.url, args.port ); infra::main_entry_point::< DemoTypes, diff --git a/hotshot/examples/webserver/all.rs b/hotshot/examples/webserver/all.rs index 2f13786f02..1e53857a6e 100644 --- a/hotshot/examples/webserver/all.rs +++ b/hotshot/examples/webserver/all.rs @@ -37,7 +37,11 @@ async fn main() { async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, - >(Some(server_shutdown_cdn), 9000) + >( + Some(server_shutdown_cdn), + "http://localhost".to_string(), + 9000, + ) .await { error!("Problem starting cdn web server: {:?}", e); @@ -46,7 +50,11 @@ async fn main() { async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, - >(Some(server_shutdown_da), 9001) + >( + Some(server_shutdown_da), + "http://localhost".to_string(), + 9001, + ) .await { error!("Problem starting da web server: {:?}", e); @@ -62,7 +70,7 @@ async fn main() { VIDNetwork, NodeImpl, >(OrchestratorArgs { - host: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + url: "http://localhost".to_string(), port: 4444, config_file: args.config_file.clone(), })); @@ -84,7 +92,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs { - host: "127.0.0.1".to_string(), + url: "http://localhost".to_string(), port: 4444, public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), }) diff --git a/hotshot/examples/webserver/multi-validator.rs b/hotshot/examples/webserver/multi-validator.rs index c227ee1818..a0405a6de4 100644 --- a/hotshot/examples/webserver/multi-validator.rs +++ b/hotshot/examples/webserver/multi-validator.rs @@ -21,7 +21,7 @@ struct MultiValidatorArgs { /// Number of validators to run pub num_nodes: u16, /// The address the orchestrator runs on - pub host: IpAddr, + pub url: String, /// The port the orchestrator runs on pub port: u16, /// This node's public IP address, for libp2p @@ -41,11 +41,12 @@ async fn main() { let args = MultiValidatorArgs::parse(); tracing::error!( "connecting to orchestrator at {:?}:{:?}", - args.host, + args.url, args.port ); let mut nodes = Vec::new(); for _ in 0..args.num_nodes { + let url = args.url.clone(); let node = async_spawn(async move { infra::main_entry_point::< DemoTypes, @@ -56,7 +57,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs { - host: args.host.to_string(), + url, port: args.port, public_ip: args.public_ip, }) diff --git a/hotshot/examples/webserver/multi-webserver.rs b/hotshot/examples/webserver/multi-webserver.rs index 4cf6078b50..d6f1018a41 100644 --- a/hotshot/examples/webserver/multi-webserver.rs +++ b/hotshot/examples/webserver/multi-webserver.rs @@ -11,7 +11,9 @@ use tracing::error; #[derive(Parser, Debug)] struct MultiWebServerArgs { - cdn_port: u16, + consensus_url: String, + da_url: String, + consensus_port: u16, da_port: u16, view_sync_port: u16, } @@ -28,10 +30,14 @@ async fn main() { let _sender = Arc::new(server_shutdown_sender_cdn); let _sender = Arc::new(server_shutdown_sender_da); - let cdn_server = async_spawn(async move { + let consensus_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, - >(Some(server_shutdown_cdn), args.cdn_port) + >( + Some(server_shutdown_cdn), + args.consensus_url.to_string(), + args.consensus_port, + ) .await { error!("Problem starting cdn web server: {:?}", e); @@ -40,12 +46,16 @@ async fn main() { let da_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, - >(Some(server_shutdown_da), args.da_port) + >( + Some(server_shutdown_da), + args.da_url.to_string(), + args.da_port, + ) .await { error!("Problem starting da web server: {:?}", e); } }); - let _result = futures::future::join_all(vec![cdn_server, da_server]).await; + let _result = futures::future::join_all(vec![consensus_server, da_server]).await; } diff --git a/hotshot/examples/webserver/webserver.rs b/hotshot/examples/webserver/webserver.rs index 2d2822e0be..533ffa351e 100644 --- a/hotshot/examples/webserver/webserver.rs +++ b/hotshot/examples/webserver/webserver.rs @@ -9,7 +9,7 @@ use clap::Parser; #[derive(Parser, Debug)] struct WebServerArgs { - url: String, + url: String, port: u16, } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 1996e4f36e..d70249d457 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -506,7 +506,7 @@ impl WebServerNetwork { /// # Panics /// if the web server url is malformed pub fn create( - url: String, + url: &str, port: u16, wait_between_polls: Duration, key: TYPES::SignatureKey, @@ -1238,7 +1238,7 @@ impl TestableNetworkingImplementation for WebServerNetwo // Start web server async_spawn(hotshot_web_server::run_web_server::( Some(server_shutdown), - url.to_owned(), + url.to_owned(), port, )); @@ -1255,7 +1255,7 @@ impl TestableNetworkingImplementation for WebServerNetwo Box::new(move |id| { let sender = Arc::clone(&sender); let mut network = WebServerNetwork::create( - "http://localhost".to_string(), + "http://localhost", port, Duration::from_millis(100), known_nodes[id as usize].clone(), diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 0da18fdc04..d1b552b9a0 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -854,7 +854,7 @@ where pub async fn run_web_server( shutdown_listener: Option>, - url: String, + url: String, port: u16, ) -> io::Result<()> { let options = Options::default(); From e01ed9faa53dbb6ca37f56682fa2d5599175874a Mon Sep 17 00:00:00 2001 From: Rob Date: Thu, 30 Nov 2023 10:57:29 -0500 Subject: [PATCH 0471/1393] update readme --- hotshot/examples/webserver/README.md | 27 +++++++++---------- hotshot/examples/webserver/multi-webserver.rs | 1 - 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/hotshot/examples/webserver/README.md b/hotshot/examples/webserver/README.md index ebe9571b5f..e7991228f2 100644 --- a/hotshot/examples/webserver/README.md +++ b/hotshot/examples/webserver/README.md @@ -1,29 +1,28 @@ Commands to run da examples: 1a)Start web servers by either running 3 servers: -just async_std example webserver -- -just async_std example webserver -- -just async_std example webserver -- +just async_std example webserver -- +just async_std example webserver -- 1b)Or use multi-webserver to spin up all three: -just async_std example multi-webserver -- +just async_std example multi-webserver -- 2) Start orchestrator: -just async_std example orchestrator-webserver -- +just async_std example orchestrator-webserver -- 3a) Start validator: -just async_std example validator-webserver -- +just async_std example validator-webserver -- 3b) Or start multiple validators: -just async_std example multi-validator-webserver -- +just async_std example multi-validator-webserver -- I.e. -just async_std example webserver -- 9000 -just async_std example webserver -- 9001 -just async_std example webserver -- 9002 -just async_std example orchestrator-webserver -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml -just async_std example validator-webserver -- 2 0.0.0.0 4444 +just async_std example webserver -- http://127.0.0.1 9000 +just async_std example webserver -- http://127.0.0.1 9001 +just async_std example webserver -- http://127.0.0.1 9002 +just async_std example orchestrator-webserver -- http://127.0.0.1 4444 ./orchestrator/default-run-config.toml +just async_std example validator-webserver -- 2 http://127.0.0.1 4444 OR: just async_std example multi-webserver -- 9000 9001 9002 -just async_std example orchestrator-webserver -- 0.0.0.0 4444 ./orchestrator/default-run-config.toml -just async_std example multi-validator-webserver -- 10 0.0.0.0 4444 \ No newline at end of file +just async_std example orchestrator-webserver -- http://127.0.0.1 4444 ./orchestrator/default-run-config.toml +just async_std example multi-validator-webserver -- 10 http://127.0.0.1 4444 \ No newline at end of file diff --git a/hotshot/examples/webserver/multi-webserver.rs b/hotshot/examples/webserver/multi-webserver.rs index d6f1018a41..182080aba4 100644 --- a/hotshot/examples/webserver/multi-webserver.rs +++ b/hotshot/examples/webserver/multi-webserver.rs @@ -15,7 +15,6 @@ struct MultiWebServerArgs { da_url: String, consensus_port: u16, da_port: u16, - view_sync_port: u16, } #[cfg_attr(async_executor_impl = "tokio", tokio::main)] From 63b29950b82d9b0c9e0f8e2b43d72df06675de56 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 30 Nov 2023 12:49:10 -0500 Subject: [PATCH 0472/1393] Separate out crypto tests (#2117) * separate out crypto tests * ignore libp2p timeout test --- hotshot-qc/src/bit_vector.rs | 2 +- hotshot-qc/src/bit_vector_old.rs | 2 +- hotshot-qc/src/snarked/circuit.rs | 4 ++-- hotshot-stake-table/src/mt_based.rs | 2 +- hotshot-stake-table/src/mt_based/internal.rs | 4 ++-- hotshot-stake-table/src/vec_based.rs | 2 +- hotshot-state-prover/src/circuit.rs | 3 ++- hotshot-state-prover/src/lib.rs | 2 +- testing/tests/timeout.rs | 1 + 9 files changed, 12 insertions(+), 10 deletions(-) diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index 833d62c673..1ac940c618 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -352,7 +352,7 @@ mod tests { }; } #[test] - fn test_quorum_certificate() { + fn crypto_test_quorum_certificate() { test_quorum_certificate!(BLSOverBN254CurveSignatureScheme); } } diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs index 9fda83cbd7..4ee2129140 100644 --- a/hotshot-qc/src/bit_vector_old.rs +++ b/hotshot-qc/src/bit_vector_old.rs @@ -319,7 +319,7 @@ mod tests { }; } #[test] - fn test_quorum_certificate() { + fn crypto_test_quorum_certificate() { test_quorum_certificate!(BLSOverBN254CurveSignatureScheme); } } diff --git a/hotshot-qc/src/snarked/circuit.rs b/hotshot-qc/src/snarked/circuit.rs index 00a0478afa..96ac750c1b 100644 --- a/hotshot-qc/src/snarked/circuit.rs +++ b/hotshot-qc/src/snarked/circuit.rs @@ -334,7 +334,7 @@ mod tests { }; #[test] - fn test_vk_aggregate_sw_circuit() -> Result<(), CircuitError> { + fn crypto_test_vk_aggregate_sw_circuit() -> Result<(), CircuitError> { let a_ecc = Fq377::zero(); test_vk_aggregate_sw_circuit_helper::(a_ecc)?; let a_ecc = Fq254::zero(); @@ -440,7 +440,7 @@ mod tests { } #[test] - fn test_vk_aggregate_te_circuit() -> Result<(), CircuitError> { + fn crypto_test_vk_aggregate_te_circuit() -> Result<(), CircuitError> { let d_ecc : Fq377 = MontFp!("122268283598675559488486339158635529096981886914877139579534153582033676785385790730042363341236035746924960903179"); test_vk_aggregate_te_circuit_helper::(d_ecc) } diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index ecdb15776c..93760e349e 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -230,7 +230,7 @@ mod tests { type Key = ark_bn254::Fq; #[test] - fn test_stake_table() -> Result<(), StakeTableError> { + fn crypto_test_stake_table() -> Result<(), StakeTableError> { let mut st = StakeTable::::new(3); let keys = (0..10).map(Key::from).collect::>(); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(0)); diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index 5ab25b855c..9325b24378 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -599,7 +599,7 @@ mod tests { type Key = ark_bn254::Fq; #[test] - fn test_persistent_merkle_tree() { + fn crypto_test_persistent_merkle_tree() { let height = 3; let mut roots = vec![Arc::new(PersistentMerkleNode::::Empty)]; let path = (0..10) @@ -712,7 +712,7 @@ mod tests { } #[test] - fn test_mt_iter() { + fn crypto_test_mt_iter() { let height = 3; let capacity = config::TREE_BRANCH.pow(height); let mut rng = test_rng(); diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index d9945d2de1..ec3b537fe0 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -378,7 +378,7 @@ mod tests { use jf_primitives::signatures::{SchnorrSignatureScheme, SignatureScheme}; #[test] - fn test_stake_table() -> Result<(), StakeTableError> { + fn crypto_test_stake_table() -> Result<(), StakeTableError> { let mut st = StakeTable::::new(); let mut prng = jf_utils::test_rng(); let keys = (0..10) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 22967cfe9b..06a3e75143 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -426,7 +426,8 @@ mod tests { type F = ark_ed_on_bn254::Fq; #[test] - fn test_circuit_building() { + + fn crypto_test_circuit_building() { let num_validators = 10; let mut prng = test_rng(); diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index fb848030ef..29f3334cea 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -172,7 +172,7 @@ mod tests { } #[test] - fn test_proof_generation() { + fn crypto_test_proof_generation() { let num_validators = 10; let mut prng = test_rng(); diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 0a82fe3127..a6d2164f33 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -69,6 +69,7 @@ async fn test_timeout_web() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_timeout_libp2p() { use std::time::Duration; From 5a6e6195ac0f31ec02d6a51cb05a4c6aadcd63cc Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 30 Nov 2023 11:52:37 -0800 Subject: [PATCH 0473/1393] Test CI with increased duration and next_view_timeout --- testing/src/test_builder.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index f0543f2987..d3734bfcb3 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -137,14 +137,14 @@ impl TestMetadata { completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // Increase the duration to get the expected number of successful views. - duration: Duration::new(200, 0), + duration: Duration::new(300, 0), }, ), overall_safety_properties: OverallSafetyPropertiesDescription { ..Default::default() }, timing_data: TimingData { - next_view_timeout: 3000, + next_view_timeout: 5000, ..TimingData::default() }, ..TestMetadata::default() From a6903a518dcc629c3d9372de1b83dbaee49dd910 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 30 Nov 2023 13:04:02 -0800 Subject: [PATCH 0474/1393] resolve a part of comments: log level, cancel poll after val, quorum membership --- task-impls/src/consensus.rs | 26 ++++++++-------- task-impls/src/vid.rs | 49 ++---------------------------- testing/src/overall_safety_task.rs | 2 +- 3 files changed, 16 insertions(+), 61 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 223c5c1cce..00f9a1ac4b 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -368,7 +368,7 @@ impl, A: ConsensusApi + // Only vote if you has seen the VID share for this view if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { } else { - error!( + info!( "We have not seen the VID share for this view {:?} yet, so we cannot vote.", proposal.view_number ); @@ -567,7 +567,7 @@ impl, A: ConsensusApi + let view_leader_key = self.quorum_membership.get_leader(view); if view_leader_key != sender { - error!("Leader key does not match key in proposal"); + warn!("Leader key does not match key in proposal"); return; } @@ -1118,13 +1118,6 @@ impl, A: ConsensusApi + view ); - // stop polling for the received disperse - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse( - *disperse.data.view_number, - )) - .await; - // Allow VID disperse date that is one view older, in case we have updated the // view. // Adding `+ 1` on the LHS rather than `- 1` on the RHS, to avoid the overflow @@ -1134,13 +1127,13 @@ impl, A: ConsensusApi + return; } - debug!("VID disperse data is fresh."); + info!("VID disperse data is s not more than one view older."); let payload_commitment = disperse.data.payload_commitment; // Check whether the sender is the right leader for this view - let view_leader_key = self.committee_membership.get_leader(view); + let view_leader_key = self.quorum_membership.get_leader(view); if view_leader_key != sender { - error!( + warn!( "VID dispersal/share is not from expected leader key for view {} \n", *view ); @@ -1148,10 +1141,17 @@ impl, A: ConsensusApi + } if !view_leader_key.validate(&disperse.signature, payload_commitment.as_ref()) { - error!("Could not verify VID dispersal/share sig."); + warn!("Could not verify VID dispersal/share sig."); return; } + // stop polling for the received disperse after verifying it's valid + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse( + *disperse.data.view_number, + )) + .await; + // Add to the storage that we have received the VID disperse for a specific view self.vid_shares.insert(view, disperse.clone()); } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 551bff7e22..9118c9082a 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -6,6 +6,7 @@ use hotshot_task::{ task::{HotShotTaskCompleted, TS}, task_impls::HSTWithEvent, }; +use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::Consensus, data::VidDisperse, @@ -24,13 +25,12 @@ use hotshot_types::{ network::ConsensusIntentEvent, }, }; -use hotshot_types::{traits::network::CommunicationChannel, vote::HasViewNumber}; use hotshot_task::event_stream::EventStream; use snafu::Snafu; use std::marker::PhantomData; use std::sync::Arc; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, instrument}; #[derive(Snafu, Debug)] /// Error type for consensus tasks @@ -80,50 +80,6 @@ impl, A: ConsensusApi + event: HotShotEvent, ) -> Option { match event { - HotShotEvent::VidDisperseRecv(disperse, sender) => { - let view = disperse.data.get_view_number(); - - debug!("VID disperse received for view: {:?} in VID task", view); - - // stop polling for the received disperse - self.network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse( - *disperse.data.view_number, - )) - .await; - - // Allow VID disperse date that is one view older, in case we have updated the - // view. - // Adding `+ 1` on the LHS rather than `- 1` on the RHS, to avoid the overflow - // error due to subtracting the genesis view number. - if view + 1 < self.cur_view { - warn!("Throwing away VID disperse data that is more than one view older"); - return None; - } - - debug!("VID disperse data is fresh."); - let payload_commitment = disperse.data.payload_commitment; - - // Check whether the sender is the right leader for this view - let view_leader_key = self.membership.get_leader(view); - if view_leader_key != sender { - error!( - "VID dispersal/share is not from expected leader key for view {} \n", - *view - ); - return None; - } - - if !view_leader_key.validate(&disperse.signature, payload_commitment.as_ref()) { - error!("Could not verify VID dispersal/share sig."); - return None; - } - - // Record the block we have promised to make available. - // TODO https://github.com/EspressoSystems/HotShot/issues/1692 - // consensus.saved_payloads.insert(proposal.data.block_payload); - } - HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view_number) => { // TODO let srs = test_srs(NUM_STORAGE_NODES); @@ -213,7 +169,6 @@ impl, A: ConsensusApi + matches!( event, HotShotEvent::Shutdown - | HotShotEvent::VidDisperseRecv(_, _) | HotShotEvent::TransactionsSequenced(_, _, _) | HotShotEvent::BlockReady(_, _) | HotShotEvent::ViewChange(_) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index a27b0a67f2..0f276bea5c 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -366,7 +366,7 @@ impl Default for OverallSafetyPropertiesDescription { check_leaf: false, check_state: true, check_block: true, - num_failed_views: 5, + num_failed_views: 0, transaction_threshold: 0, // very strict threshold_calculator: Arc::new(|_num_live, num_total| 2 * num_total / 3 + 1), From f165a39195eae7dfad7ff75e055719096b77bf00 Mon Sep 17 00:00:00 2001 From: Rob Date: Thu, 30 Nov 2023 16:19:16 -0500 Subject: [PATCH 0475/1393] Merge `separate out crypto tests` --- hotshot-qc/src/bit_vector.rs | 2 +- hotshot-qc/src/bit_vector_old.rs | 2 +- hotshot-qc/src/snarked/circuit.rs | 4 ++-- hotshot-stake-table/src/mt_based.rs | 2 +- hotshot-stake-table/src/mt_based/internal.rs | 4 ++-- hotshot-stake-table/src/vec_based.rs | 2 +- hotshot-state-prover/src/circuit.rs | 2 +- hotshot-state-prover/src/lib.rs | 2 +- testing/tests/timeout.rs | 1 + 9 files changed, 11 insertions(+), 10 deletions(-) diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index 833d62c673..1ac940c618 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -352,7 +352,7 @@ mod tests { }; } #[test] - fn test_quorum_certificate() { + fn crypto_test_quorum_certificate() { test_quorum_certificate!(BLSOverBN254CurveSignatureScheme); } } diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs index 9fda83cbd7..4ee2129140 100644 --- a/hotshot-qc/src/bit_vector_old.rs +++ b/hotshot-qc/src/bit_vector_old.rs @@ -319,7 +319,7 @@ mod tests { }; } #[test] - fn test_quorum_certificate() { + fn crypto_test_quorum_certificate() { test_quorum_certificate!(BLSOverBN254CurveSignatureScheme); } } diff --git a/hotshot-qc/src/snarked/circuit.rs b/hotshot-qc/src/snarked/circuit.rs index 00a0478afa..96ac750c1b 100644 --- a/hotshot-qc/src/snarked/circuit.rs +++ b/hotshot-qc/src/snarked/circuit.rs @@ -334,7 +334,7 @@ mod tests { }; #[test] - fn test_vk_aggregate_sw_circuit() -> Result<(), CircuitError> { + fn crypto_test_vk_aggregate_sw_circuit() -> Result<(), CircuitError> { let a_ecc = Fq377::zero(); test_vk_aggregate_sw_circuit_helper::(a_ecc)?; let a_ecc = Fq254::zero(); @@ -440,7 +440,7 @@ mod tests { } #[test] - fn test_vk_aggregate_te_circuit() -> Result<(), CircuitError> { + fn crypto_test_vk_aggregate_te_circuit() -> Result<(), CircuitError> { let d_ecc : Fq377 = MontFp!("122268283598675559488486339158635529096981886914877139579534153582033676785385790730042363341236035746924960903179"); test_vk_aggregate_te_circuit_helper::(d_ecc) } diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index ecdb15776c..93760e349e 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -230,7 +230,7 @@ mod tests { type Key = ark_bn254::Fq; #[test] - fn test_stake_table() -> Result<(), StakeTableError> { + fn crypto_test_stake_table() -> Result<(), StakeTableError> { let mut st = StakeTable::::new(3); let keys = (0..10).map(Key::from).collect::>(); assert_eq!(st.total_stake(SnapshotVersion::Head)?, U256::from(0)); diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index 5ab25b855c..9325b24378 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -599,7 +599,7 @@ mod tests { type Key = ark_bn254::Fq; #[test] - fn test_persistent_merkle_tree() { + fn crypto_test_persistent_merkle_tree() { let height = 3; let mut roots = vec![Arc::new(PersistentMerkleNode::::Empty)]; let path = (0..10) @@ -712,7 +712,7 @@ mod tests { } #[test] - fn test_mt_iter() { + fn crypto_test_mt_iter() { let height = 3; let capacity = config::TREE_BRANCH.pow(height); let mut rng = test_rng(); diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index c3212ca955..042bb3defe 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -378,7 +378,7 @@ mod tests { use jf_primitives::signatures::{SchnorrSignatureScheme, SignatureScheme}; #[test] - fn test_stake_table() -> Result<(), StakeTableError> { + fn crypto_test_stake_table() -> Result<(), StakeTableError> { let mut st = StakeTable::::new(); let mut prng = jf_utils::test_rng(); let keys = (0..10) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 355a4c028f..22db3fdd9f 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -428,7 +428,7 @@ mod tests { type F = ark_ed_on_bn254::Fq; #[test] - fn test_circuit_building() { + fn crypto_test_circuit_building() { let num_validators = 10; let mut prng = test_rng(); diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index 484ced9500..9bf0032559 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -175,7 +175,7 @@ mod tests { } #[test] - fn test_proof_generation() { + fn crypto_test_proof_generation() { let num_validators = 10; let mut prng = test_rng(); diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 0a82fe3127..a6d2164f33 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -69,6 +69,7 @@ async fn test_timeout_web() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_timeout_libp2p() { use std::time::Duration; From f0b62d0d0e8442f9d27dbd0bac783cb17d9b213b Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 30 Nov 2023 13:25:05 -0800 Subject: [PATCH 0476/1393] log level & typo --- task-impls/src/consensus.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 00f9a1ac4b..562adfc6f8 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -368,7 +368,7 @@ impl, A: ConsensusApi + // Only vote if you has seen the VID share for this view if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { } else { - info!( + debug!( "We have not seen the VID share for this view {:?} yet, so we cannot vote.", proposal.view_number ); @@ -452,13 +452,13 @@ impl, A: ConsensusApi + return true; } } - info!( + debug!( "Couldn't find DAC cert in certs, meaning we haven't received it yet for view {:?}", *proposal.get_view_number(), ); return false; } - info!( + debug!( "Could not vote because we don't have a proposal yet for view {}", *self.cur_view ); @@ -1127,7 +1127,7 @@ impl, A: ConsensusApi + return; } - info!("VID disperse data is s not more than one view older."); + info!("VID disperse data is not more than one view older."); let payload_commitment = disperse.data.payload_commitment; // Check whether the sender is the right leader for this view From 4a4bfc06e433e565bef0509fc6da113bb2f9e9df Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 30 Nov 2023 13:38:41 -0800 Subject: [PATCH 0477/1393] remove redundant clone --- task-impls/src/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 562adfc6f8..b72fc4769a 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1153,7 +1153,7 @@ impl, A: ConsensusApi + .await; // Add to the storage that we have received the VID disperse for a specific view - self.vid_shares.insert(view, disperse.clone()); + self.vid_shares.insert(view, disperse); } HotShotEvent::ViewChange(new_view) => { debug!("View Change event for view {} in consensus task", *new_view); From 9fc5170ddb8ee2f65f34cd2e6875e371c68c2b10 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 30 Nov 2023 15:31:24 -0800 Subject: [PATCH 0478/1393] Add issue link for changing VidDisperse structure --- task-impls/src/consensus.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index b72fc4769a..ce4fd61cc7 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -118,6 +118,9 @@ pub struct ConsensusTaskState< pub da_certs: HashMap>, /// All the VID shares we've received for current and future views. + /// In the future we will need a different struct similar to VidDisperse except + /// it stores only one share. + /// TODO https://github.com/EspressoSystems/HotShot/issues/2146 pub vid_shares: HashMap>>, /// The most recent proposal we have, will correspond to the current view if Some() From d611eac3b2cb8ad09f6f1bb1cab705236688d54f Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 30 Nov 2023 16:19:19 -0800 Subject: [PATCH 0479/1393] add comments for vote_if_able() --- task-impls/src/consensus.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ce4fd61cc7..2e7d5d0e51 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -118,7 +118,7 @@ pub struct ConsensusTaskState< pub da_certs: HashMap>, /// All the VID shares we've received for current and future views. - /// In the future we will need a different struct similar to VidDisperse except + /// In the future we will need a different struct similar to VidDisperse except /// it stores only one share. /// TODO https://github.com/EspressoSystems/HotShot/issues/2146 pub vid_shares: HashMap>>, @@ -298,7 +298,8 @@ impl, A: ConsensusApi + } #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] - // Check if we are able to vote, like whether the proposal is valid, whether we have DAC and VID share, and if so, vote + // Check if we are able to vote, like whether the proposal is valid, + // whether we have DAC and VID share, and if so, vote. async fn vote_if_able(&self) -> bool { if !self.quorum_membership.has_stake(&self.public_key) { debug!( @@ -309,8 +310,9 @@ impl, A: ConsensusApi + } if let Some(proposal) = &self.current_proposal { // ED Need to account for the genesis DA cert + // No need to check vid share nor da cert for genesis if proposal.justify_qc.is_genesis && proposal.view_number == TYPES::Time::new(1) { - // warn!("Proposal is genesis!"); + info!("Proposal is genesis!"); let view = TYPES::Time::new(*proposal.view_number); let justify_qc = proposal.justify_qc.clone(); From 510ece00e4bd2343d39be0f45d29f875285a247c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 30 Nov 2023 22:41:57 -0800 Subject: [PATCH 0480/1393] add pre-processing for test vid vote logic in consensus task --- testing/tests/consensus_task.rs | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 5f53cfa4e4..632f2efb50 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -87,9 +87,13 @@ async fn build_vote( #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[ignore] async fn test_consensus_task() { + use std::marker::PhantomData; + use hotshot_testing::task_helpers::vid_init; + use hotshot_types::data::VidSchemeTrait; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::simple_certificate::QuorumCertificate; + use hotshot_types::{simple_certificate::QuorumCertificate, data::VidDisperse, message::Proposal, traits::node_implementation::NodeType, block_impl::VIDTransaction}; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -98,6 +102,32 @@ async fn test_consensus_task() { // We assign node's key pair rather than read from config file since it's a test let (private_key, public_key) = key_pair_for_id(1); + + + let api: HotShotConsensusApi = HotShotConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let _pub_key = *api.public_key(); + let vid = vid_init(); + let transactions = vec![VIDTransaction(vec![0])]; + let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); + let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); + let payload_commitment = vid_disperse.commit; + + let signature = + ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()); + let vid_disperse = VidDisperse { + view_number: ViewNumber::new(2), + payload_commitment, + shares: vid_disperse.shares, + common: vid_disperse.common, + }; + let _vid_proposal: Proposal> = Proposal { + data: vid_disperse.clone(), + signature: signature, + _pd: PhantomData, + }; + let mut input = Vec::new(); let mut output = HashMap::new(); From eded7b954b3cffcfeceac500e8ab36e8808e53e0 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 30 Nov 2023 23:11:42 -0800 Subject: [PATCH 0481/1393] add the issue 1732 --- task-impls/src/consensus.rs | 1 + task-impls/src/vote.rs | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index aa630e3d1e..e90ffac24f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -120,6 +120,7 @@ pub struct ConsensusTaskState< /// In the future we will need a different struct similar to VidDisperse except /// it stores only one share. /// TODO https://github.com/EspressoSystems/HotShot/issues/2146 + /// TODO https://github.com/EspressoSystems/HotShot/issues/1732 pub vid_shares: HashMap>>, /// The most recent proposal we have, will correspond to the current view if Some() diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index d43c0198f5..578fcd6b96 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -14,8 +14,8 @@ use hotshot_task::{ }; use hotshot_types::{ simple_certificate::{ - DACertificate, QuorumCertificate, TimeoutCertificate, - ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCommitCertificate2, + ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ DAVote, QuorumVote, TimeoutVote, ViewSyncCommitVote, ViewSyncFinalizeVote, From 28946a931afa08cdfb20bf28f2f9897bfde81ff0 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 1 Dec 2023 10:28:17 -0800 Subject: [PATCH 0482/1393] change testing parameters for view timeout --- testing/src/test_builder.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index f0543f2987..d3734bfcb3 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -137,14 +137,14 @@ impl TestMetadata { completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // Increase the duration to get the expected number of successful views. - duration: Duration::new(200, 0), + duration: Duration::new(300, 0), }, ), overall_safety_properties: OverallSafetyPropertiesDescription { ..Default::default() }, timing_data: TimingData { - next_view_timeout: 3000, + next_view_timeout: 5000, ..TimingData::default() }, ..TestMetadata::default() From 14626efbd209f3dcd292055e87d6af3da8a7edac Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 1 Dec 2023 13:49:45 -0500 Subject: [PATCH 0483/1393] [Stability] Rework spinning task to be function of view (#2130) * spinning task to be function of view * change number of rounds, fmt * remove internal events --- testing/src/spinning_task.rs | 170 +++++++++++++++++------------- testing/src/test_runner.rs | 16 ++- testing/tests/basic.rs | 12 +-- testing/tests/catchup.rs | 8 +- testing/tests/combined_network.rs | 23 ++-- testing/tests/timeout.rs | 4 +- 6 files changed, 130 insertions(+), 103 deletions(-) diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index fd00f35bb8..84e7eb91d6 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -1,40 +1,32 @@ -use std::{ - collections::HashMap, - sync::{atomic::AtomicUsize, Arc}, - time::Duration, -}; +use std::{collections::HashMap, sync::Arc}; -use crate::{test_launcher::TaskGenerator, test_runner::Node, GlobalTestEvent}; -use async_compatibility_layer::art::async_sleep; +use async_compatibility_layer::channel::UnboundedStream; use futures::FutureExt; use hotshot::{traits::TestableNodeImplementation, HotShotType, SystemContext}; use hotshot_task::{ - boxed_sync, event_stream::ChannelStream, task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, task_impls::{HSTWithEventAndMessage, TaskBuilder}, - GeneratedStream, + MergeN, }; - -use hotshot_types::traits::{network::CommunicationChannel, node_implementation::NodeType}; +use hotshot_types::traits::network::CommunicationChannel; +use hotshot_types::{event::Event, traits::node_implementation::NodeType}; use snafu::Snafu; + +use crate::{test_launcher::TaskGenerator, test_runner::Node}; +pub type StateAndBlock = (Vec, Vec); + +use super::GlobalTestEvent; + #[derive(Snafu, Debug)] pub struct SpinningTaskErr {} -/// Completion task types -pub type SpinningTaskTypes = HSTWithEventAndMessage< - SpinningTaskErr, - GlobalTestEvent, - ChannelStream, - (), - GeneratedStream<()>, - SpinningTask, ->; - +/// Spinning task state pub struct SpinningTask> { pub(crate) handles: Vec>, pub(crate) late_start: HashMap>, - pub(crate) changes: Vec>, + pub(crate) changes: HashMap>, + pub(crate) latest_view: Option, } impl> TS for SpinningTask {} @@ -63,17 +55,15 @@ pub struct ChangeNode { #[derive(Clone, Debug)] pub struct SpinningTaskDescription { - pub node_changes: Vec<(Duration, Vec)>, + pub node_changes: Vec<(u64, Vec)>, } impl SpinningTaskDescription { + /// build a task pub fn build>( self, - ) -> TaskGenerator> - where - SystemContext: HotShotType, - { - Box::new(move |state, mut registry, test_event_stream| { + ) -> TaskGenerator> { + Box::new(move |mut state, mut registry, test_event_stream| { async move { let event_handler = HandleEvent::>(Arc::new(move |event, state| { @@ -86,65 +76,85 @@ impl SpinningTaskDescription { } .boxed() })); - let atomic_idx = Arc::new(AtomicUsize::new(0)); - let sleep_durations = Arc::new( - self.node_changes - .clone() - .into_iter() - .map(|(d, _)| d) - .collect::>(), - ); - let stream_generator = GeneratedStream::new(Arc::new(move || { - let atomic_idx = atomic_idx.clone(); - let sleep_durations = sleep_durations.clone(); - let atomic_idx = atomic_idx.fetch_add(1, std::sync::atomic::Ordering::SeqCst); - sleep_durations.get(atomic_idx).copied().map(|duration| { - let fut = async move { - async_sleep(duration).await; - }; - boxed_sync(fut) - }) - })); + let message_handler = HandleMessage::>(Arc::new( - move |_msg, mut state| { + move |msg, mut state| { async move { - if let Some(nodes_to_change) = state.changes.pop() { - for ChangeNode { idx, updown } in nodes_to_change { - match updown { - UpDown::Up => { - if let Some(node) = - state.late_start.remove(&idx.try_into().unwrap()) - { - tracing::error!("Spinning up node late"); - let handle = node.run_tasks().await; - handle.hotshot.start_consensus().await; + let Event { + view_number, + event: _, + } = msg.1; + + // if we have not seen this view before + if state.latest_view.is_none() + || view_number > state.latest_view.unwrap() + { + // perform operations on the nodes + if let Some(operations) = state.changes.remove(&view_number) { + for ChangeNode { idx, updown } in operations { + match updown { + UpDown::Up => { + if let Some(node) = state + .late_start + .remove(&idx.try_into().unwrap()) + { + tracing::error!( + "Node {} spinning up late", + idx + ); + let handle = node.run_tasks().await; + handle.hotshot.start_consensus().await; + } } - } - UpDown::Down => { - if let Some(node) = state.handles.get_mut(idx) { - node.handle.shut_down().await; + UpDown::Down => { + if let Some(node) = state.handles.get_mut(idx) { + tracing::error!("Node {} shutting down", idx); + node.handle.shut_down().await; + } } - } - UpDown::NetworkUp => { - if let Some(handle) = state.handles.get(idx) { - handle.networks.0.resume(); - handle.networks.1.resume(); + UpDown::NetworkUp => { + if let Some(handle) = state.handles.get(idx) { + tracing::error!( + "Node {} networks resuming", + idx + ); + handle.networks.0.resume(); + handle.networks.1.resume(); + } } - } - UpDown::NetworkDown => { - if let Some(handle) = state.handles.get(idx) { - handle.networks.0.pause(); - handle.networks.1.pause(); + UpDown::NetworkDown => { + if let Some(handle) = state.handles.get(idx) { + tracing::error!( + "Node {} networks pausing", + idx + ); + handle.networks.0.pause(); + handle.networks.1.pause(); + } } } } } + + // update our latest view + state.latest_view = Some(view_number); } + (None, state) } .boxed() }, )); + + let mut streams = vec![]; + for handle in &mut state.handles { + let s1 = handle + .handle + .get_event_stream_known_impl(FilterEvent::default()) + .await + .0; + streams.push(s1); + } let builder = TaskBuilder::>::new( "Test Spinning Task".to_string(), ) @@ -152,10 +162,10 @@ impl SpinningTaskDescription { .await .register_registry(&mut registry) .await - .register_state(state) - .register_event_handler(event_handler) .register_message_handler(message_handler) - .register_message_stream(stream_generator); + .register_message_stream(MergeN::new(streams)) + .register_event_handler(event_handler) + .register_state(state); let task_id = builder.get_task_id().unwrap(); (task_id, SpinningTaskTypes::build(builder).launch()) } @@ -163,3 +173,13 @@ impl SpinningTaskDescription { }) } } + +/// types for safety task +pub type SpinningTaskTypes = HSTWithEventAndMessage< + SpinningTaskErr, + GlobalTestEvent, + ChannelStream, + (usize, Event), + MergeN>>, + SpinningTask, +>; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 1f76a13515..16704e7bfe 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -4,7 +4,7 @@ use super::{ txn_task::TxnTask, }; use crate::{ - spinning_task::UpDown, + spinning_task::{ChangeNode, UpDown}, test_launcher::{Networks, TestLauncher}, }; use hotshot::{types::SystemContextHandle, Memberships}; @@ -16,7 +16,7 @@ use hotshot_task::{ use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::ConsensusMetricsValue, - traits::{election::Membership, node_implementation::NodeType}, + traits::{election::Membership, node_implementation::NodeType, state::ConsensusTime}, HotShotConfig, ValidatorConfig, }; use std::{ @@ -114,10 +114,20 @@ where task_runner = task_runner.add_task(id, "Test Completion Task".to_string(), task); // add spinning task + // map spinning to view + let mut changes: HashMap> = HashMap::new(); + for (view, mut change) in spinning_changes { + changes + .entry(TYPES::Time::new(view)) + .or_insert_with(Vec::new) + .append(&mut change); + } + let spinning_task_state = crate::spinning_task::SpinningTask { handles: nodes.clone(), late_start, - changes: spinning_changes.into_iter().map(|(_, b)| b).collect(), + latest_view: None, + changes, }; let (id, task) = (launcher.spinning_task_generator)( diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index a1acf9fa04..b259e2f918 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -38,8 +38,6 @@ async fn test_success() { )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_with_failures_one() { - use std::time::Duration; - use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, @@ -60,7 +58,7 @@ async fn test_with_failures_one() { }]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), dead_nodes)], + node_changes: vec![(5, dead_nodes)], }; metadata .gen_launcher::(0) @@ -77,8 +75,6 @@ async fn test_with_failures_one() { )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_with_failures_half_f() { - use std::time::Duration; - use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, @@ -109,7 +105,7 @@ async fn test_with_failures_half_f() { ]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), dead_nodes)], + node_changes: vec![(5, dead_nodes)], }; metadata.overall_safety_properties.num_failed_views = 6; metadata @@ -127,8 +123,6 @@ async fn test_with_failures_half_f() { )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_with_failures_f() { - use std::time::Duration; - use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, @@ -174,7 +168,7 @@ async fn test_with_failures_f() { ]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), dead_nodes)], + node_changes: vec![(5, dead_nodes)], }; metadata .gen_launcher::(0) diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 607b2f764c..47bcba167b 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -38,7 +38,7 @@ async fn test_catchup() { metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), catchup_nodes)], + node_changes: vec![(25, catchup_nodes)], }; metadata.completion_task_description = @@ -93,7 +93,7 @@ async fn test_catchup_web() { metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::from_millis(400), catchup_nodes)], + node_changes: vec![(25, catchup_nodes)], }; metadata.completion_task_description = @@ -150,7 +150,7 @@ async fn test_catchup_one_node() { metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::from_millis(400), catchup_nodes)], + node_changes: vec![(25, catchup_nodes)], }; metadata.completion_task_description = @@ -216,7 +216,7 @@ async fn test_catchup_in_view_sync() { metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(5, 0), catchup_nodes)], + node_changes: vec![(25, catchup_nodes)], }; metadata.completion_task_description = diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index ec9b253dc4..b78f48a39e 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -94,7 +94,7 @@ async fn test_combined_network_webserver_crash() { } metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), all_nodes)], + node_changes: vec![(5, all_nodes)], }; metadata @@ -154,10 +154,7 @@ async fn test_combined_network_reup() { } metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![ - (Duration::from_millis(500), all_up), - (Duration::from_millis(500), all_down), - ], + node_changes: vec![(13, all_up), (5, all_down)], }; metadata @@ -211,7 +208,7 @@ async fn test_combined_network_half_dc() { } metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(Duration::new(1, 0), half)], + node_changes: vec![(5, half)], }; metadata @@ -225,7 +222,10 @@ async fn test_combined_network_half_dc() { async_std::task::sleep(Duration::from_secs(ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME)).await; } -fn generate_random_node_changes(total_nodes: usize) -> Vec<(Duration, Vec)> { +fn generate_random_node_changes( + total_nodes: usize, + total_num_rounds: usize, +) -> Vec<(u64, Vec)> { let mut rng = rand::thread_rng(); let mut node_changes = vec![]; @@ -241,9 +241,9 @@ fn generate_random_node_changes(total_nodes: usize) -> Vec<(Duration, Vec Date: Fri, 1 Dec 2023 17:14:07 -0800 Subject: [PATCH 0484/1393] revisit metrics and solve typo --- types/src/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 351b834dfb..557543c10a 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -68,7 +68,7 @@ pub struct Consensus { pub struct ConsensusMetricsValue { /// The values that are being tracked pub values: Arc>, - /// The number of last synced synced block height + /// The number of last synced block height pub last_synced_block_height: Box, /// The number of last decided view pub last_decided_view: Box, From 0cbbb8f7f75c06e1c9b76a4e185f03bc14f0fb81 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sat, 2 Dec 2023 11:17:25 -0500 Subject: [PATCH 0485/1393] feat: implement genesis pk generator --- hotshot-signature-key/src/bn254/bn254_pub.rs | 6 +++++- testing/tests/network_task.rs | 9 +++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 3a6c11ab19..3b8183a986 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -148,8 +148,12 @@ impl SignatureKey for BLSPubKey { } fn genesis_proposer_pk() -> Self { + use jf_primitives::signatures::bls_over_bn254::KeyPair; + use rand::rngs::mock::StepRng; + let mut my_rng = StepRng::new(42, 1337); + let kp = KeyPair::generate(&mut my_rng); BLSPubKey { - pub_key: unimplemented!(), + pub_key: kp.ver_key(), } } } diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index f34025dc44..927bf85a0d 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -41,12 +41,9 @@ async fn test_network_task() { let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; let signature = - ::SignatureKey::sign( - api.private_key(), - payload_commitment.as_ref(), - ) - .expect("Failed to sign block commitment"); - let da_proposal = Proposal { + ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()) + .expect("Failed to sign block commitment"); + let da_proposal = Proposal:: { data: DAProposal { encoded_transactions: encoded_transactions.clone(), metadata: (), From 13a6dc4ca53b915518d8d606832d7d0cbe81f98b Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sat, 2 Dec 2023 11:53:55 -0500 Subject: [PATCH 0486/1393] fix: accidental test breakage --- task/src/task_impls.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/task/src/task_impls.rs b/task/src/task_impls.rs index 4ff8dd1045..768e011775 100644 --- a/task/src/task_impls.rs +++ b/task/src/task_impls.rs @@ -300,6 +300,7 @@ pub mod test { )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[allow(clippy::should_panic_without_expect)] + #[should_panic] async fn test_init_with_event_stream() { setup_logging(); let task = TaskBuilder::::new("Test Task".to_string()); From b1070ce937d6da6b21f8c5652ecef551ca5fe91f Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Sat, 2 Dec 2023 18:31:47 -0500 Subject: [PATCH 0487/1393] [Stability] Fix DHT timeout (#2150) * separate out crypto tests * spinning task to be function of view * change number of rounds, fmt * ignore libp2p timeout test * dht fixes * down later * test * Revert "test" This reverts commit dc24ecb842494d7e2bc0cf66ee10283cf0ae547d. * Revert "down later" This reverts commit 9e0e25724d43ea8aa28959a8c75933881fae029d. * Revert "dht fixes" This reverts commit 2c7de9db65b2fe98790ddda0515d9a8024ababdd. * push * decrease dht timeout * change delays --- hotshot/src/traits/networking/libp2p_network.rs | 2 +- libp2p-networking/src/network/node/handle.rs | 2 +- testing/tests/timeout.rs | 8 +++++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 00453289de..73512d808e 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -339,7 +339,7 @@ impl Libp2pNetwork { is_ready: Arc::new(AtomicBool::new(false)), // This is optimal for 10-30 nodes. TODO: parameterize this for both tests and examples // https://github.com/EspressoSystems/HotShot/issues/2088 - dht_timeout: Duration::from_secs(8), + dht_timeout: Duration::from_secs(1), is_bootstrapped: Arc::new(AtomicBool::new(false)), metrics, topic_map, diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 39c763ac31..5c50d61cb7 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -297,7 +297,7 @@ impl NetworkNodeHandle { let pid = self.get_record_timeout::(&key, dht_timeout).await?; // pid lookup for routing - self.lookup_pid(pid).await?; + // self.lookup_pid(pid).await?; Ok(pid) } diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 6b84e95b2e..17a109dabd 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -69,7 +69,6 @@ async fn test_timeout_web() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_timeout_libp2p() { use std::time::Duration; @@ -87,16 +86,19 @@ async fn test_timeout_libp2p() { async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { next_view_timeout: 2000, + start_delay: 2000, + round_start_delay: 1000, ..Default::default() }; let mut metadata = TestMetadata { total_nodes: 10, start_nodes: 10, + num_bootstrap_nodes: 10, ..Default::default() }; let dead_nodes = vec![ChangeNode { - idx: 5, + idx: 9, updown: UpDown::Down, }]; @@ -108,7 +110,7 @@ async fn test_timeout_libp2p() { }; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(2, dead_nodes)], + node_changes: vec![(5, dead_nodes)], }; metadata.completion_task_description = From 65fbc940890b5f4d1a7997956ab724e16b895de0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 3 Dec 2023 10:52:06 -0500 Subject: [PATCH 0488/1393] Bump jf-plonk from `265eaaa` to `833d7ca` (#2138) * Bump jf-plonk from `265eaaa` to `833d7ca` Bumps [jf-plonk](https://github.com/EspressoSystems/jellyfish) from `265eaaa` to `833d7ca`. - [Release notes](https://github.com/EspressoSystems/jellyfish/releases) - [Commits](https://github.com/EspressoSystems/jellyfish/compare/265eaaa059e0a501824e5aadf7ac5f32d9e4a6b8...833d7ca3acdce33ae29dbd319a86cce9e69e2fd1) --- updated-dependencies: - dependency-name: jf-plonk dependency-type: direct:production ... Signed-off-by: dependabot[bot] * update powers of h --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Rob --- hotshot-state-prover/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index 29f3334cea..86eba4420c 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -166,6 +166,7 @@ mod tests { powers_of_g, h, beta_h, + powers_of_h: vec![h, beta_h], }; end_timer!(setup_time); Ok(pp) From 722e5a145358bc2cbece4e3a515622c2328c7dfe Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 4 Dec 2023 10:21:11 -0500 Subject: [PATCH 0489/1393] VID share distribution algorithm (#2149) * vid share distribution algorithm * lint * remove println * update algorithm and issue * update power of two algo * remove casts * update algo * simplify --- task-impls/src/vid.rs | 31 ++++++++++++++--------------- testing/src/task_helpers.rs | 20 +++++++++++++++---- testing/tests/network_task.rs | 19 +++++++++++------- testing/tests/vid_task.rs | 19 +++++++++++------- types/src/data.rs | 37 +++++++++++++++++++++++++++++++---- 5 files changed, 88 insertions(+), 38 deletions(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 9118c9082a..17e51629bf 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -20,10 +20,7 @@ use hotshot_types::{ }; use hotshot_types::{ data::{test_srs, VidScheme, VidSchemeTrait}, - traits::{ - block_contents::{NUM_CHUNKS, NUM_STORAGE_NODES}, - network::ConsensusIntentEvent, - }, + traits::network::ConsensusIntentEvent, }; use hotshot_task::event_stream::EventStream; @@ -54,7 +51,7 @@ pub struct VIDTaskState< pub consensus: Arc>>, /// Network for all nodes pub network: Arc, - /// Membership for teh quorum + /// Membership for the quorum pub membership: Arc, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -81,12 +78,19 @@ impl, A: ConsensusApi + ) -> Option { match event { HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view_number) => { + // get quorum committee for dispersal + let num_quorum_committee = self.membership.get_committee(view_number).len(); + // TODO - let srs = test_srs(NUM_STORAGE_NODES); - // TODO We are using constant numbers for now, but they will change as the quorum size - // changes. - // TODO - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + let srs = test_srs(num_quorum_committee); + + // calculate the last power of two + // TODO change after https://github.com/EspressoSystems/jellyfish/issues/339 + // issue: https://github.com/EspressoSystems/HotShot/issues/2152 + let chunk_size = 1 << num_quorum_committee.ilog2(); + + // calculate vid shares + let vid = VidScheme::new(chunk_size, num_quorum_committee, &srs).unwrap(); let vid_disperse = vid.disperse(encoded_transactions.clone()).unwrap(); // send the commitment and metadata to consensus for block building @@ -100,12 +104,7 @@ impl, A: ConsensusApi + // send the block to the VID dispersal function self.event_stream .publish(HotShotEvent::BlockReady( - VidDisperse { - view_number, - payload_commitment: vid_disperse.commit, - shares: vid_disperse.shares, - common: vid_disperse.common, - }, + VidDisperse::from_membership(view_number, vid_disperse, &self.membership), view_number, )) .await; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index ebdb53904f..25c4b5575f 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -18,8 +18,8 @@ use hotshot_types::{ message::Proposal, simple_certificate::QuorumCertificate, traits::{ + block_contents::vid_commitment, block_contents::BlockHeader, - block_contents::{vid_commitment, NUM_CHUNKS, NUM_STORAGE_NODES}, consensus_api::ConsensusSharedApi, election::Membership, node_implementation::NodeType, @@ -171,7 +171,19 @@ pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey (private_key, public_key) } -pub fn vid_init() -> VidScheme { - let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); - VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, srs).unwrap() +pub fn vid_init( + membership: TYPES::Membership, + view_number: TYPES::Time, +) -> VidScheme { + let num_committee = membership.get_committee(view_number).len(); + + // calculate the last power of two + // TODO change after https://github.com/EspressoSystems/jellyfish/issues/339 + // issue: https://github.com/EspressoSystems/HotShot/issues/2152 + let chunk_size = 1 << num_committee.ilog2(); + + // TODO + let srs = hotshot_types::data::test_srs(num_committee); + + VidScheme::new(chunk_size, num_committee, srs).unwrap() } diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 0d0f469864..9508ab10dd 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -32,7 +32,12 @@ async fn test_network_task() { }; let pub_key = *api.public_key(); let priv_key = api.private_key(); - let vid = vid_init(); + + // quorum membership for VID share distribution + let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); + + let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(0)); + let transactions = vec![VIDTransaction(vec![0])]; let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); @@ -53,12 +58,12 @@ async fn test_network_task() { }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; - let da_vid_disperse_inner = VidDisperse { - view_number: da_proposal.data.view_number, - payload_commitment, - shares: vid_disperse.shares, - common: vid_disperse.common, - }; + let da_vid_disperse_inner = VidDisperse::from_membership( + da_proposal.data.view_number, + vid_disperse, + &quorum_membership.into(), + ); + // TODO for now reuse the same block payload commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 let da_vid_disperse = Proposal { diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 185c54d03f..0e721892d1 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -33,7 +33,11 @@ async fn test_vid_task() { }; let pub_key = *api.public_key(); - let vid = vid_init(); + // quorum membership for VID share distribution + let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); + + let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(0)); + let transactions = vec![VIDTransaction(vec![0])]; let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); @@ -51,12 +55,13 @@ async fn test_vid_task() { signature, _pd: PhantomData, }; - let vid_disperse = VidDisperse { - view_number: message.data.view_number, - payload_commitment, - shares: vid_disperse.shares, - common: vid_disperse.common, - }; + + let vid_disperse = VidDisperse::from_membership( + message.data.view_number, + vid_disperse, + &quorum_membership.into(), + ); + let vid_proposal = Proposal { data: vid_disperse.clone(), signature: message.signature.clone(), diff --git a/types/src/data.rs b/types/src/data.rs index 624f9fd257..3b8e4c707f 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -8,6 +8,7 @@ use crate::{ traits::{ block_contents::vid_commitment, block_contents::BlockHeader, + election::Membership, node_implementation::NodeType, signature_key::{EncodedPublicKey, SignatureKey}, state::{ConsensusTime, TestableBlock, TestableState}, @@ -23,14 +24,18 @@ use commit::{Commitment, Committable, RawCommitmentBuilder}; use derivative::Derivative; use hotshot_constants::GENESIS_PROPOSER_ID; use hotshot_utils::bincode::bincode_opts; -// use jf_primitives::pcs::prelude::Commitment; -use jf_primitives::pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}; +use jf_primitives::{ + pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}, + vid::VidDisperse as JfVidDisperse, +}; use rand::Rng; use serde::{Deserialize, Serialize}; use snafu::Snafu; use std::{ + collections::BTreeMap, fmt::{Debug, Display}, hash::Hash, + sync::Arc, }; /// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. @@ -138,12 +143,36 @@ pub struct VidDisperse { pub view_number: TYPES::Time, /// Block payload commitment pub payload_commitment: VidCommitment, - /// VID shares dispersed among storage nodes - pub shares: Vec<::Share>, + /// A storage node's key and its corresponding VID share + pub shares: BTreeMap::Share>, /// VID common data sent to all storage nodes pub common: ::Common, } +impl VidDisperse { + /// Create VID dispersal from a specified membership + /// Uses the specified function to calculate share dispersal + /// Allows for more complex stake table functionality + pub fn from_membership( + view_number: TYPES::Time, + mut vid_disperse: JfVidDisperse, + membership: &Arc, + ) -> Self { + let shares = membership + .get_committee(view_number) + .iter() + .map(|node| (node.clone(), vid_disperse.shares.remove(0))) + .collect(); + + Self { + view_number, + shares, + common: vid_disperse.common, + payload_commitment: vid_disperse.commit, + } + } +} + /// Trusted KZG setup for VID. /// /// TESTING ONLY: don't use this in production From 1ccd023cffb3a9c3f473508d3e57059f87aa4b90 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Mon, 4 Dec 2023 10:34:50 -0500 Subject: [PATCH 0490/1393] Update web server and orchestrator to use Url library --- hotshot/examples/combined/all.rs | 11 +++--- hotshot/examples/infra/mod.rs | 26 +++++-------- hotshot/examples/libp2p/all.rs | 5 ++- hotshot/examples/webserver/all.rs | 10 ++--- hotshot/examples/webserver/multi-webserver.rs | 19 +++------- hotshot/examples/webserver/webserver.rs | 6 +-- .../traits/networking/web_server_network.rs | 38 ++++++++----------- orchestrator/run-config.toml | 7 ++-- orchestrator/src/config.rs | 4 +- orchestrator/src/lib.rs | 9 ++--- web_server/src/lib.rs | 9 ++--- 11 files changed, 56 insertions(+), 88 deletions(-) diff --git a/hotshot/examples/combined/all.rs b/hotshot/examples/combined/all.rs index 48cc1cbb89..c727bdcbad 100644 --- a/hotshot/examples/combined/all.rs +++ b/hotshot/examples/combined/all.rs @@ -12,6 +12,7 @@ use hotshot_orchestrator::config::NetworkConfig; use hotshot_types::traits::node_implementation::NodeType; use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; +use surf_disco::Url; use tracing::{error, instrument}; use crate::{ @@ -47,8 +48,7 @@ async fn main() { ::SignatureKey, >( Some(server_shutdown_cdn), - "http://localhost".to_string(), - 9000, + Url::parse("http://localhost:9000").unwrap(), ) .await { @@ -60,8 +60,7 @@ async fn main() { ::SignatureKey, >( Some(server_shutdown_da), - "http://localhost".to_string(), - 9001, + Url::parse("http://localhost:9001").unwrap(), ) .await { @@ -78,8 +77,8 @@ async fn main() { VIDNetwork, NodeImpl, >(OrchestratorArgs { - url: "http://localhost".to_string(), - port: 4444, + url: Url::parse("http://localhost:4444").unwrap(), + config_file: args.config_file.clone(), })); diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 83b360dbc5..77ded04821 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -53,6 +53,7 @@ use std::marker::PhantomData; use std::time::Duration; use std::{collections::BTreeSet, sync::Arc}; use std::{num::NonZeroUsize, str::FromStr}; +use surf_disco::Url; use libp2p_identity::PeerId; use std::fmt::Debug; @@ -66,10 +67,8 @@ use tracing::{error, info, warn}; )] /// Arguments passed to the orchestrator pub struct OrchestratorArgs { - /// The url the orchestrator runs on; this should be in the form of `http://localhost` or `http://0.0.0.0` - pub url: String, - /// The port the orchestrator runs on - pub port: u16, + /// The url the orchestrator runs on; this should be in the form of `http://localhost:5555` or `http://0.0.0.0:5555` + pub url: Url, /// The configuration file to be used for this run pub config_file: String, } @@ -125,18 +124,14 @@ pub async fn run_orchestrator< VIDCHANNEL: CommunicationChannel + Debug, NODE: NodeImplementation>, >( - OrchestratorArgs { - url, - port, - config_file, - }: OrchestratorArgs, + OrchestratorArgs { url, config_file }: OrchestratorArgs, ) { error!("Starting orchestrator",); let run_config = load_config_from_file::(config_file); let _result = hotshot_orchestrator::run_orchestrator::< TYPES::SignatureKey, TYPES::ElectionConfigType, - >(run_config, url, port) + >(run_config, url) .await; } @@ -158,11 +153,10 @@ async fn webserver_network_from_config( // Get the configuration for the web server let WebServerConfig { url, - port, wait_between_polls, }: WebServerConfig = config.clone().web_server_config.unwrap(); - WebServerNetwork::create(&url, port, wait_between_polls, pub_key.clone(), false) + WebServerNetwork::create(url, wait_between_polls, pub_key.clone(), false) } async fn libp2p_network_from_config( @@ -533,7 +527,6 @@ where // extract values from config (for DA network) let WebServerConfig { url, - port, wait_between_polls, }: WebServerConfig = config.clone().da_web_server_config.unwrap(); @@ -551,11 +544,11 @@ where WebCommChannel::new(underlying_quorum_network.into()); let da_channel: WebCommChannel = WebCommChannel::new( - WebServerNetwork::create(&url, port, wait_between_polls, pub_key.clone(), true).into(), + WebServerNetwork::create(url.clone(), wait_between_polls, pub_key.clone(), true).into(), ); let vid_channel: WebCommChannel = WebCommChannel::new( - WebServerNetwork::create(&url, port, wait_between_polls, pub_key, true).into(), + WebServerNetwork::create(url, wait_between_polls, pub_key, true).into(), ); WebServerDARun { @@ -739,7 +732,6 @@ where // extract values from config (for webserver DA network) let WebServerConfig { url, - port, wait_between_polls, }: WebServerConfig = config.clone().da_web_server_config.unwrap(); @@ -748,7 +740,7 @@ where webserver_network_from_config::(config.clone(), pub_key.clone()).await; let webserver_underlying_da_network = - WebServerNetwork::create(&url, port, wait_between_polls, pub_key, true); + WebServerNetwork::create(url, wait_between_polls, pub_key, true); webserver_underlying_quorum_network.wait_for_ready().await; diff --git a/hotshot/examples/libp2p/all.rs b/hotshot/examples/libp2p/all.rs index cf0f9e69b2..cf738c9a71 100644 --- a/hotshot/examples/libp2p/all.rs +++ b/hotshot/examples/libp2p/all.rs @@ -10,6 +10,7 @@ use hotshot_orchestrator::client::ValidatorArgs; use hotshot_orchestrator::config::NetworkConfig; use hotshot_types::traits::node_implementation::NodeType; use std::net::{IpAddr, Ipv4Addr}; +use surf_disco::Url; use tracing::instrument; use crate::{ @@ -43,8 +44,8 @@ async fn main() { VIDNetwork, NodeImpl, >(OrchestratorArgs { - url: "http://localhost".to_string(), - port: 4444, + url: Url::parse("http://localhost:4444").unwrap(), + config_file: args.config_file.clone(), })); diff --git a/hotshot/examples/webserver/all.rs b/hotshot/examples/webserver/all.rs index 1e53857a6e..f0a774e65e 100644 --- a/hotshot/examples/webserver/all.rs +++ b/hotshot/examples/webserver/all.rs @@ -19,6 +19,7 @@ use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_orchestrator::config::NetworkConfig; use hotshot_types::traits::node_implementation::NodeType; +use surf_disco::Url; use tracing::error; use types::VIDNetwork; @@ -39,8 +40,7 @@ async fn main() { ::SignatureKey, >( Some(server_shutdown_cdn), - "http://localhost".to_string(), - 9000, + Url::parse("http://localhost:9000").unwrap(), ) .await { @@ -52,8 +52,7 @@ async fn main() { ::SignatureKey, >( Some(server_shutdown_da), - "http://localhost".to_string(), - 9001, + Url::parse("http://localhost:9001").unwrap(), ) .await { @@ -70,8 +69,7 @@ async fn main() { VIDNetwork, NodeImpl, >(OrchestratorArgs { - url: "http://localhost".to_string(), - port: 4444, + url: Url::parse("http://localhost:4444").unwrap(), config_file: args.config_file.clone(), })); diff --git a/hotshot/examples/webserver/multi-webserver.rs b/hotshot/examples/webserver/multi-webserver.rs index 182080aba4..1b0aa3d069 100644 --- a/hotshot/examples/webserver/multi-webserver.rs +++ b/hotshot/examples/webserver/multi-webserver.rs @@ -7,14 +7,13 @@ use async_compatibility_layer::{ }; use clap::Parser; use hotshot::demo::DemoTypes; +use surf_disco::Url; use tracing::error; #[derive(Parser, Debug)] struct MultiWebServerArgs { - consensus_url: String, - da_url: String, - consensus_port: u16, - da_port: u16, + consensus_url: Url, + da_url: Url, } #[cfg_attr(async_executor_impl = "tokio", tokio::main)] @@ -32,11 +31,7 @@ async fn main() { let consensus_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, - >( - Some(server_shutdown_cdn), - args.consensus_url.to_string(), - args.consensus_port, - ) + >(Some(server_shutdown_cdn), args.consensus_url) .await { error!("Problem starting cdn web server: {:?}", e); @@ -45,11 +40,7 @@ async fn main() { let da_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, - >( - Some(server_shutdown_da), - args.da_url.to_string(), - args.da_port, - ) + >(Some(server_shutdown_da), args.da_url) .await { error!("Problem starting da web server: {:?}", e); diff --git a/hotshot/examples/webserver/webserver.rs b/hotshot/examples/webserver/webserver.rs index 533ffa351e..c9bc3c1279 100644 --- a/hotshot/examples/webserver/webserver.rs +++ b/hotshot/examples/webserver/webserver.rs @@ -1,5 +1,6 @@ use hotshot::demo::DemoTypes; use std::sync::Arc; +use surf_disco::Url; use async_compatibility_layer::{ channel::oneshot, @@ -9,8 +10,7 @@ use clap::Parser; #[derive(Parser, Debug)] struct WebServerArgs { - url: String, - port: u16, + url: Url, } #[cfg_attr(async_executor_impl = "tokio", tokio::main)] @@ -23,6 +23,6 @@ async fn main() { let _sender = Arc::new(server_shutdown_sender); let _result = hotshot_web_server::run_web_server::< ::SignatureKey, - >(Some(server_shutdown), args.url, args.port) + >(Some(server_shutdown), args.url) .await; } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 7ba586b256..ce2e60e733 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -27,6 +27,7 @@ use hotshot_types::{ use hotshot_web_server::{self, config}; use rand::random; use serde::{Deserialize, Serialize}; +use surf_disco::Url; use hotshot_types::traits::network::ViewMessage; use std::{ @@ -472,22 +473,15 @@ impl WebServerNetwork { /// # Panics /// if the web server url is malformed pub fn create( - url: &str, - port: u16, + url: Url, wait_between_polls: Duration, key: TYPES::SignatureKey, is_da_server: bool, ) -> Self { - let base_url_string = format!("{url}:{port}"); - info!("Connecting to web server at {base_url_string:?} is da: {is_da_server}"); - - let base_url = base_url_string.parse(); - if base_url.is_err() { - error!("Web server url {:?} is malformed", base_url_string); - } + info!("Connecting to web server at {url:?} is da: {is_da_server}"); // TODO ED Wait for healthcheck - let client = surf_disco::Client::::new(base_url.unwrap()); + let client = surf_disco::Client::::new(url); let inner = Arc::new(Inner { broadcast_poll_queue: Arc::default(), @@ -802,7 +796,7 @@ impl ConnectedNetwork, TYPES::Signatur } }); } else { - error!("Somehow task already existed!"); + debug!("Somehow task already existed!"); } // GC proposal collection if we are two views in the future @@ -842,7 +836,7 @@ impl ConnectedNetwork, TYPES::Signatur } }); } else { - error!("Somehow task already existed!"); + debug!("Somehow task already existed!"); } // GC proposal collection if we are two views in the future @@ -897,7 +891,7 @@ impl ConnectedNetwork, TYPES::Signatur } }); } else { - error!("Somehow task already existed!"); + debug!("Somehow task already existed!"); } // GC proposal collection if we are two views in the future @@ -935,7 +929,7 @@ impl ConnectedNetwork, TYPES::Signatur } }); } else { - error!("Somehow task already existed!"); + debug!("Somehow task already existed!"); } // GC proposal collection if we are two views in the future @@ -989,7 +983,7 @@ impl ConnectedNetwork, TYPES::Signatur } }); } else { - error!("Somehow task already existed!"); + debug!("Somehow task already existed!"); } // TODO ED Do we need to GC before returning? Or will view sync task handle that? @@ -1019,7 +1013,7 @@ impl ConnectedNetwork, TYPES::Signatur } }); } else { - error!("Somehow task already existed!"); + debug!("Somehow task already existed!"); } } @@ -1072,7 +1066,7 @@ impl ConnectedNetwork, TYPES::Signatur } }); } else { - error!("Somehow task already existed!"); + debug!("Somehow task already existed!"); } // TODO ED Do we need to GC before returning? Or will view sync task handle that? @@ -1107,15 +1101,13 @@ impl TestableNetworkingImplementation for WebServerNetwo ) -> Box Self + 'static> { let (server_shutdown_sender, server_shutdown) = oneshot(); let sender = Arc::new(server_shutdown_sender); - let url = "http://localhost"; - // TODO ED Restrict this to be an open port using portpicker let port = random::(); + let url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); info!("Launching web server on port {port}"); // Start web server async_spawn(hotshot_web_server::run_web_server::( Some(server_shutdown), - url.to_owned(), - port, + url, )); // We assign known_nodes' public key and stake value rather than read from config file since it's a test @@ -1130,9 +1122,9 @@ impl TestableNetworkingImplementation for WebServerNetwo // Start each node's web server client Box::new(move |id| { let sender = Arc::clone(&sender); + let url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); let mut network = WebServerNetwork::create( - "http://localhost", - port, + url, Duration::from_millis(100), known_nodes[id as usize].clone(), is_da, diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 80ef5aa4f2..e3d06f86f8 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -66,12 +66,11 @@ online_time = 10 base_port = 9000 [web_server_config] -url = "http://localhost" -port = 9000 +url = "http://localhost:9000" [da_web_server_config] -url = "http://localhost" -port = 9001 +url = "http://localhost:9001" + [web_server_config.wait_between_polls] secs = 0 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 7082b318b0..9966c8a9fd 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -10,6 +10,7 @@ use std::{ path::PathBuf, time::Duration, }; +use surf_disco::Url; use toml; use tracing::error; #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] @@ -52,8 +53,7 @@ pub struct Libp2pConfigFile { #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct WebServerConfig { - pub url: String, - pub port: u16, + pub url: Url, pub wait_between_polls: Duration, } diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 69bb39e68e..eda551f924 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -10,7 +10,7 @@ use std::{ }; use tide_disco::{Api, App}; -use surf_disco::error::ClientError; +use surf_disco::{error::ClientError, Url}; use tide_disco::{ api::ApiError, error::ServerError, @@ -237,8 +237,7 @@ where /// Runs the orchestrator pub async fn run_orchestrator( network_config: NetworkConfig, - url: String, - port: u16, + url: Url, ) -> io::Result<()> where KEY: SignatureKey + 'static + serde::Serialize, @@ -252,6 +251,6 @@ where let mut app = App::>, ServerError>::with_state(state); app.register_module("api", api.unwrap()) .expect("Error registering api"); - tracing::error!("listening on {:?}:{:?}", url, port); - app.serve(format!("{url}:{port}")).await + tracing::error!("listening on {:?}", url); + app.serve(url).await } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index b4eefb0144..8b492e21bf 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -13,7 +13,7 @@ use tide_disco::{ api::ApiError, error::ServerError, method::{ReadState, WriteState}, - Api, App, StatusCode, + Api, App, StatusCode, Url, }; use tracing::{debug, info}; @@ -821,8 +821,7 @@ where pub async fn run_web_server( shutdown_listener: Option>, - url: String, - port: u16, + url: Url, ) -> io::Result<()> { let options = Options::default(); @@ -832,9 +831,7 @@ pub async fn run_web_server( app.register_module("api", api).unwrap(); - let app_future = app.serve(format!("{url}:{port}")); - - info!("Web server started on port {port}"); + let app_future = app.serve(url); app_future.await } From 56974a187000c51aa05d1114beef1a0b0b32839f Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 4 Dec 2023 12:46:24 -0500 Subject: [PATCH 0491/1393] skip libp2p timeout test (#2157) --- testing/tests/timeout.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 17a109dabd..4c5946d775 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -68,6 +68,7 @@ async fn test_timeout_web() { async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) )] +#[ignore] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_timeout_libp2p() { use std::time::Duration; From d23119ca432ea27d61eb6e32cbbc60a22a6df5a5 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 4 Dec 2023 15:15:24 -0500 Subject: [PATCH 0492/1393] revert vid share distribution algo (#2161) --- task-impls/src/vid.rs | 31 +++++++++++++++-------------- testing/src/task_helpers.rs | 20 ++++--------------- testing/tests/network_task.rs | 19 +++++++----------- testing/tests/vid_task.rs | 19 +++++++----------- types/src/data.rs | 37 ++++------------------------------- 5 files changed, 38 insertions(+), 88 deletions(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 17e51629bf..9118c9082a 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -20,7 +20,10 @@ use hotshot_types::{ }; use hotshot_types::{ data::{test_srs, VidScheme, VidSchemeTrait}, - traits::network::ConsensusIntentEvent, + traits::{ + block_contents::{NUM_CHUNKS, NUM_STORAGE_NODES}, + network::ConsensusIntentEvent, + }, }; use hotshot_task::event_stream::EventStream; @@ -51,7 +54,7 @@ pub struct VIDTaskState< pub consensus: Arc>>, /// Network for all nodes pub network: Arc, - /// Membership for the quorum + /// Membership for teh quorum pub membership: Arc, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -78,19 +81,12 @@ impl, A: ConsensusApi + ) -> Option { match event { HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view_number) => { - // get quorum committee for dispersal - let num_quorum_committee = self.membership.get_committee(view_number).len(); - // TODO - let srs = test_srs(num_quorum_committee); - - // calculate the last power of two - // TODO change after https://github.com/EspressoSystems/jellyfish/issues/339 - // issue: https://github.com/EspressoSystems/HotShot/issues/2152 - let chunk_size = 1 << num_quorum_committee.ilog2(); - - // calculate vid shares - let vid = VidScheme::new(chunk_size, num_quorum_committee, &srs).unwrap(); + let srs = test_srs(NUM_STORAGE_NODES); + // TODO We are using constant numbers for now, but they will change as the quorum size + // changes. + // TODO + let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); let vid_disperse = vid.disperse(encoded_transactions.clone()).unwrap(); // send the commitment and metadata to consensus for block building @@ -104,7 +100,12 @@ impl, A: ConsensusApi + // send the block to the VID dispersal function self.event_stream .publish(HotShotEvent::BlockReady( - VidDisperse::from_membership(view_number, vid_disperse, &self.membership), + VidDisperse { + view_number, + payload_commitment: vid_disperse.commit, + shares: vid_disperse.shares, + common: vid_disperse.common, + }, view_number, )) .await; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 25c4b5575f..ebdb53904f 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -18,8 +18,8 @@ use hotshot_types::{ message::Proposal, simple_certificate::QuorumCertificate, traits::{ - block_contents::vid_commitment, block_contents::BlockHeader, + block_contents::{vid_commitment, NUM_CHUNKS, NUM_STORAGE_NODES}, consensus_api::ConsensusSharedApi, election::Membership, node_implementation::NodeType, @@ -171,19 +171,7 @@ pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey (private_key, public_key) } -pub fn vid_init( - membership: TYPES::Membership, - view_number: TYPES::Time, -) -> VidScheme { - let num_committee = membership.get_committee(view_number).len(); - - // calculate the last power of two - // TODO change after https://github.com/EspressoSystems/jellyfish/issues/339 - // issue: https://github.com/EspressoSystems/HotShot/issues/2152 - let chunk_size = 1 << num_committee.ilog2(); - - // TODO - let srs = hotshot_types::data::test_srs(num_committee); - - VidScheme::new(chunk_size, num_committee, srs).unwrap() +pub fn vid_init() -> VidScheme { + let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); + VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, srs).unwrap() } diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 9508ab10dd..0d0f469864 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -32,12 +32,7 @@ async fn test_network_task() { }; let pub_key = *api.public_key(); let priv_key = api.private_key(); - - // quorum membership for VID share distribution - let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); - - let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(0)); - + let vid = vid_init(); let transactions = vec![VIDTransaction(vec![0])]; let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); @@ -58,12 +53,12 @@ async fn test_network_task() { }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; - let da_vid_disperse_inner = VidDisperse::from_membership( - da_proposal.data.view_number, - vid_disperse, - &quorum_membership.into(), - ); - + let da_vid_disperse_inner = VidDisperse { + view_number: da_proposal.data.view_number, + payload_commitment, + shares: vid_disperse.shares, + common: vid_disperse.common, + }; // TODO for now reuse the same block payload commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 let da_vid_disperse = Proposal { diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 0e721892d1..185c54d03f 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -33,11 +33,7 @@ async fn test_vid_task() { }; let pub_key = *api.public_key(); - // quorum membership for VID share distribution - let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); - - let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(0)); - + let vid = vid_init(); let transactions = vec![VIDTransaction(vec![0])]; let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); @@ -55,13 +51,12 @@ async fn test_vid_task() { signature, _pd: PhantomData, }; - - let vid_disperse = VidDisperse::from_membership( - message.data.view_number, - vid_disperse, - &quorum_membership.into(), - ); - + let vid_disperse = VidDisperse { + view_number: message.data.view_number, + payload_commitment, + shares: vid_disperse.shares, + common: vid_disperse.common, + }; let vid_proposal = Proposal { data: vid_disperse.clone(), signature: message.signature.clone(), diff --git a/types/src/data.rs b/types/src/data.rs index 3b8e4c707f..624f9fd257 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -8,7 +8,6 @@ use crate::{ traits::{ block_contents::vid_commitment, block_contents::BlockHeader, - election::Membership, node_implementation::NodeType, signature_key::{EncodedPublicKey, SignatureKey}, state::{ConsensusTime, TestableBlock, TestableState}, @@ -24,18 +23,14 @@ use commit::{Commitment, Committable, RawCommitmentBuilder}; use derivative::Derivative; use hotshot_constants::GENESIS_PROPOSER_ID; use hotshot_utils::bincode::bincode_opts; -use jf_primitives::{ - pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}, - vid::VidDisperse as JfVidDisperse, -}; +// use jf_primitives::pcs::prelude::Commitment; +use jf_primitives::pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; use snafu::Snafu; use std::{ - collections::BTreeMap, fmt::{Debug, Display}, hash::Hash, - sync::Arc, }; /// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. @@ -143,36 +138,12 @@ pub struct VidDisperse { pub view_number: TYPES::Time, /// Block payload commitment pub payload_commitment: VidCommitment, - /// A storage node's key and its corresponding VID share - pub shares: BTreeMap::Share>, + /// VID shares dispersed among storage nodes + pub shares: Vec<::Share>, /// VID common data sent to all storage nodes pub common: ::Common, } -impl VidDisperse { - /// Create VID dispersal from a specified membership - /// Uses the specified function to calculate share dispersal - /// Allows for more complex stake table functionality - pub fn from_membership( - view_number: TYPES::Time, - mut vid_disperse: JfVidDisperse, - membership: &Arc, - ) -> Self { - let shares = membership - .get_committee(view_number) - .iter() - .map(|node| (node.clone(), vid_disperse.shares.remove(0))) - .collect(); - - Self { - view_number, - shares, - common: vid_disperse.common, - payload_commitment: vid_disperse.commit, - } - } -} - /// Trusted KZG setup for VID. /// /// TESTING ONLY: don't use this in production From 2ff10f6ab8ff1e31ff8fcc9cef29171ce5027896 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 4 Dec 2023 13:50:34 -0800 Subject: [PATCH 0493/1393] Fix lint --- testing/tests/timeout.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 8aa4f27396..1dda55c9c3 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -68,7 +68,6 @@ async fn test_timeout_web() { async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) )] -#[ignore] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[ignore] async fn test_timeout_libp2p() { From 6bee8df414015bdf6dc8d7174a72c55cc17a25f6 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Tue, 5 Dec 2023 12:52:41 +0000 Subject: [PATCH 0494/1393] #2059: Organize non-prod code (#2134) * Move example/test implementations to hotshot_testing Closes #2059 * Warn when using demo impls in production builds * Refactor demo/test types Collapsed three virtually identical `NodeType` implementations into one, split code from `testing::demo` into more descriptive modules and renamed various test types to conform to a single naming scheme. * Remove 'demo' features Those weren't actually gating anything anymore --- hotshot/Cargo.toml | 40 ++-- hotshot/examples/combined/all.rs | 16 +- hotshot/examples/combined/multi-validator.rs | 4 +- hotshot/examples/combined/orchestrator.rs | 4 +- hotshot/examples/combined/types.rs | 24 ++- hotshot/examples/combined/validator.rs | 4 +- hotshot/examples/infra/mod.rs | 31 ++- hotshot/examples/libp2p/all.rs | 12 +- hotshot/examples/libp2p/multi-validator.rs | 4 +- hotshot/examples/libp2p/orchestrator.rs | 4 +- hotshot/examples/libp2p/types.rs | 24 ++- hotshot/examples/libp2p/validator.rs | 4 +- hotshot/examples/webserver/all.rs | 16 +- hotshot/examples/webserver/multi-validator.rs | 4 +- hotshot/examples/webserver/multi-webserver.rs | 6 +- hotshot/examples/webserver/orchestrator.rs | 4 +- hotshot/examples/webserver/types.rs | 24 ++- hotshot/examples/webserver/validator.rs | 4 +- hotshot/examples/webserver/webserver.rs | 4 +- hotshot/src/demo.rs | 181 ------------------ hotshot/src/lib.rs | 2 - hotshot/src/traits.rs | 5 - .../src/traits/networking/combined_network.rs | 10 +- hotshot/src/traits/storage/memory_storage.rs | 51 ++--- testing/Cargo.toml | 11 +- .../src/block_types.rs | 55 +++--- testing/src/lib.rs | 18 +- testing/src/node_types.rs | 15 +- testing/src/state_types.rs | 106 ++++++++++ testing/src/task_helpers.rs | 6 +- testing/tests/da_task.rs | 10 +- testing/tests/memory_network.rs | 16 +- testing/tests/network_task.rs | 8 +- testing/tests/vid_task.rs | 6 +- types/Cargo.toml | 3 - types/src/lib.rs | 2 +- types/src/traits/state.rs | 95 --------- web_server/Cargo.toml | 4 - 38 files changed, 318 insertions(+), 519 deletions(-) delete mode 100644 hotshot/src/demo.rs rename types/src/block_impl.rs => testing/src/block_types.rs (81%) create mode 100644 testing/src/state_types.rs diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index f0a62be572..612872f976 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -8,14 +8,7 @@ version = "0.3.3" rust-version = "1.65.0" [features] -default = ["demo", "docs", "doc-images"] - -# Enable demo/testing logic -demo = [ - "hotshot-types/demo", - "libp2p/rsa", - "dep:derivative", -] +default = ["docs", "doc-images"] # Features required for binaries bin-orchestrator = ["clap"] @@ -28,74 +21,74 @@ hotshot-testing = [] # libp2p [[example]] name = "validator-libp2p" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/libp2p/validator.rs" [[example]] name = "multi-validator-libp2p" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/libp2p/multi-validator.rs" [[example]] name = "orchestrator-libp2p" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/libp2p/orchestrator.rs" [[example]] name = "all-libp2p" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/libp2p/all.rs" # webserver [[example]] name = "webserver" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/webserver/webserver.rs" [[example]] name = "orchestrator-webserver" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/webserver/orchestrator.rs" [[example]] name = "validator-webserver" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/webserver/validator.rs" [[example]] name = "multi-validator-webserver" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/webserver/multi-validator.rs" [[example]] name = "multi-webserver" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/webserver/multi-webserver.rs" [[example]] name = "all-webserver" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/webserver/all.rs" # combined [[example]] name = "all-combined" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/combined/all.rs" [[example]] name = "multi-validator-combined" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/combined/multi-validator.rs" [[example]] name = "validator-combined" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/combined/validator.rs" [[example]] name = "orchestrator-combined" -required-features = ["demo", "libp2p/rsa"] +required-features = ["libp2p/rsa"] path = "examples/combined/orchestrator.rs" [dependencies] @@ -147,4 +140,5 @@ async-std = { workspace = true } blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } serde_json = "1.0.108" -toml = { workspace = true } \ No newline at end of file +toml = { workspace = true } +hotshot-testing = { path = "../testing" } diff --git a/hotshot/examples/combined/all.rs b/hotshot/examples/combined/all.rs index c727bdcbad..b55d0d9374 100644 --- a/hotshot/examples/combined/all.rs +++ b/hotshot/examples/combined/all.rs @@ -6,9 +6,9 @@ use async_compatibility_layer::art::async_spawn; use async_compatibility_layer::channel::oneshot; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_orchestrator::config::NetworkConfig; +use hotshot_testing::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeType; use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; @@ -45,7 +45,7 @@ async fn main() { async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >( Some(server_shutdown_cdn), Url::parse("http://localhost:9000").unwrap(), @@ -57,7 +57,7 @@ async fn main() { }); async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >( Some(server_shutdown_da), Url::parse("http://localhost:9001").unwrap(), @@ -70,7 +70,7 @@ async fn main() { // orchestrator async_spawn(run_orchestrator::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, @@ -84,14 +84,14 @@ async fn main() { // nodes let config: NetworkConfig< - ::SignatureKey, - ::ElectionConfigType, - > = load_config_from_file::(args.config_file); + ::SignatureKey, + ::ElectionConfigType, + > = load_config_from_file::(args.config_file); let mut nodes = Vec::new(); for _ in 0..config.config.total_nodes.into() { let node = async_spawn(async move { infra::main_entry_point::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/combined/multi-validator.rs b/hotshot/examples/combined/multi-validator.rs index c5249f4ba7..ff9b5507bd 100644 --- a/hotshot/examples/combined/multi-validator.rs +++ b/hotshot/examples/combined/multi-validator.rs @@ -3,8 +3,8 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_testing::state_types::TestTypes; use std::net::IpAddr; use tracing::instrument; use types::VIDNetwork; @@ -50,7 +50,7 @@ async fn main() { let node = async_spawn(async move { infra::main_entry_point::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/combined/orchestrator.rs b/hotshot/examples/combined/orchestrator.rs index f21ee05552..fd61f16648 100644 --- a/hotshot/examples/combined/orchestrator.rs +++ b/hotshot/examples/combined/orchestrator.rs @@ -2,7 +2,7 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot_testing::state_types::TestTypes; use tracing::instrument; use types::VIDNetwork; @@ -24,7 +24,7 @@ async fn main() { setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator::( + run_orchestrator::( args, ) .await; diff --git a/hotshot/examples/combined/types.rs b/hotshot/examples/combined/types.rs index e34c6f1b99..36091ac507 100644 --- a/hotshot/examples/combined/types.rs +++ b/hotshot/examples/combined/types.rs @@ -1,8 +1,6 @@ use crate::infra::CombinedDARun; -use hotshot::{ - demo::DemoTypes, - traits::implementations::{CombinedCommChannel, MemoryStorage}, -}; +use hotshot::traits::implementations::{CombinedCommChannel, MemoryStorage}; +use hotshot_testing::state_types::TestTypes; use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation, NodeType}; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -10,20 +8,20 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type DANetwork = CombinedCommChannel; -pub type VIDNetwork = CombinedCommChannel; -pub type QuorumNetwork = CombinedCommChannel; -pub type ViewSyncNetwork = CombinedCommChannel; +pub type DANetwork = CombinedCommChannel; +pub type VIDNetwork = CombinedCommChannel; +pub type QuorumNetwork = CombinedCommChannel; +pub type ViewSyncNetwork = CombinedCommChannel; -impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { + start_view: ::Time, + ) -> (ChannelMaps, Option>) { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = CombinedDARun; +pub type ThisRun = CombinedDARun; diff --git a/hotshot/examples/combined/validator.rs b/hotshot/examples/combined/validator.rs index 0d7ba8eddf..52cbf7bcb1 100644 --- a/hotshot/examples/combined/validator.rs +++ b/hotshot/examples/combined/validator.rs @@ -1,6 +1,6 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot_testing::state_types::TestTypes; use tracing::{info, instrument}; use types::VIDNetwork; @@ -28,7 +28,7 @@ async fn main() { args.url, args.port ); infra::main_entry_point::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 77ded04821..6d3618999e 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -22,12 +22,11 @@ use hotshot_orchestrator::{ config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, }; use hotshot_task::task::FilterEvent; -use hotshot_types::block_impl::VIDBlockHeader; +use hotshot_testing::block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}; use hotshot_types::message::Message; use hotshot_types::traits::network::ConnectedNetwork; use hotshot_types::ValidatorConfig; use hotshot_types::{ - block_impl::{VIDBlockPayload, VIDTransaction}, consensus::ConsensusMetricsValue, data::{Leaf, TestableLeaf}, event::{Event, EventType}, @@ -291,7 +290,7 @@ pub trait RunDA< > where ::StateType: TestableState, ::BlockPayload: TestableBlock, - TYPES: NodeType, + TYPES: NodeType, Leaf: TestableLeaf, Self: Sync, SystemContext: HotShotType, @@ -372,7 +371,7 @@ pub trait RunDA< async fn run_hotshot( &self, mut context: SystemContextHandle, - transactions: &mut Vec, + transactions: &mut Vec, transactions_to_send_per_round: u64, ) { let NetworkConfig { @@ -493,9 +492,9 @@ pub struct WebServerDARun { #[async_trait] impl< TYPES: NodeType< - Transaction = VIDTransaction, - BlockPayload = VIDBlockPayload, - BlockHeader = VIDBlockHeader, + Transaction = TestTransaction, + BlockPayload = TestBlockPayload, + BlockHeader = TestBlockHeader, >, NODE: NodeImplementation< TYPES, @@ -595,9 +594,9 @@ pub struct Libp2pDARun { #[async_trait] impl< TYPES: NodeType< - Transaction = VIDTransaction, - BlockPayload = VIDBlockPayload, - BlockHeader = VIDBlockHeader, + Transaction = TestTransaction, + BlockPayload = TestBlockPayload, + BlockHeader = TestBlockHeader, >, NODE: NodeImplementation< TYPES, @@ -688,9 +687,9 @@ pub struct CombinedDARun { #[async_trait] impl< TYPES: NodeType< - Transaction = VIDTransaction, - BlockPayload = VIDBlockPayload, - BlockHeader = VIDBlockHeader, + Transaction = TestTransaction, + BlockPayload = TestBlockPayload, + BlockHeader = TestBlockHeader, >, NODE: NodeImplementation< TYPES, @@ -799,9 +798,9 @@ where /// Main entry point for validators pub async fn main_entry_point< TYPES: NodeType< - Transaction = VIDTransaction, - BlockPayload = VIDBlockPayload, - BlockHeader = VIDBlockHeader, + Transaction = TestTransaction, + BlockPayload = TestBlockPayload, + BlockHeader = TestBlockHeader, >, DACHANNEL: CommunicationChannel + Debug, QUORUMCHANNEL: CommunicationChannel + Debug, diff --git a/hotshot/examples/libp2p/all.rs b/hotshot/examples/libp2p/all.rs index cf738c9a71..e375269dd2 100644 --- a/hotshot/examples/libp2p/all.rs +++ b/hotshot/examples/libp2p/all.rs @@ -5,9 +5,9 @@ use crate::types::ThisRun; use async_compatibility_layer::art::async_spawn; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_orchestrator::config::NetworkConfig; +use hotshot_testing::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeType; use std::net::{IpAddr, Ipv4Addr}; use surf_disco::Url; @@ -37,7 +37,7 @@ async fn main() { // orchestrator async_spawn(run_orchestrator::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, @@ -51,14 +51,14 @@ async fn main() { // nodes let config: NetworkConfig< - ::SignatureKey, - ::ElectionConfigType, - > = load_config_from_file::(args.config_file); + ::SignatureKey, + ::ElectionConfigType, + > = load_config_from_file::(args.config_file); let mut nodes = Vec::new(); for _ in 0..config.config.total_nodes.into() { let node = async_spawn(async move { infra::main_entry_point::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index c5249f4ba7..ff9b5507bd 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -3,8 +3,8 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_testing::state_types::TestTypes; use std::net::IpAddr; use tracing::instrument; use types::VIDNetwork; @@ -50,7 +50,7 @@ async fn main() { let node = async_spawn(async move { infra::main_entry_point::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs index 54cf3550ae..4f6bf5f085 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -2,7 +2,7 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot_testing::state_types::TestTypes; use tracing::instrument; use crate::infra::run_orchestrator; @@ -23,7 +23,7 @@ async fn main() { setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator::( + run_orchestrator::( args, ) .await; diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index b75fafbe7f..23f75da4ed 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -1,8 +1,6 @@ use crate::infra::Libp2pDARun; -use hotshot::{ - demo::DemoTypes, - traits::implementations::{Libp2pCommChannel, MemoryStorage}, -}; +use hotshot::traits::implementations::{Libp2pCommChannel, MemoryStorage}; +use hotshot_testing::state_types::TestTypes; use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation, NodeType}; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -10,20 +8,20 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type DANetwork = Libp2pCommChannel; -pub type VIDNetwork = Libp2pCommChannel; -pub type QuorumNetwork = Libp2pCommChannel; -pub type ViewSyncNetwork = Libp2pCommChannel; +pub type DANetwork = Libp2pCommChannel; +pub type VIDNetwork = Libp2pCommChannel; +pub type QuorumNetwork = Libp2pCommChannel; +pub type ViewSyncNetwork = Libp2pCommChannel; -impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { + start_view: ::Time, + ) -> (ChannelMaps, Option>) { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = Libp2pDARun; +pub type ThisRun = Libp2pDARun; diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index 0d7ba8eddf..52cbf7bcb1 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -1,6 +1,6 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot_testing::state_types::TestTypes; use tracing::{info, instrument}; use types::VIDNetwork; @@ -28,7 +28,7 @@ async fn main() { args.url, args.port ); infra::main_entry_point::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/webserver/all.rs b/hotshot/examples/webserver/all.rs index f0a774e65e..93cf45d87b 100644 --- a/hotshot/examples/webserver/all.rs +++ b/hotshot/examples/webserver/all.rs @@ -15,9 +15,9 @@ pub mod infra; use async_compatibility_layer::{art::async_spawn, channel::oneshot}; use clap::Parser; -use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_orchestrator::config::NetworkConfig; +use hotshot_testing::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeType; use surf_disco::Url; use tracing::error; @@ -37,7 +37,7 @@ async fn main() { async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >( Some(server_shutdown_cdn), Url::parse("http://localhost:9000").unwrap(), @@ -49,7 +49,7 @@ async fn main() { }); async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >( Some(server_shutdown_da), Url::parse("http://localhost:9001").unwrap(), @@ -62,7 +62,7 @@ async fn main() { // web server orchestrator async_spawn(run_orchestrator::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, @@ -75,14 +75,14 @@ async fn main() { // multi validator run let config: NetworkConfig< - ::SignatureKey, - ::ElectionConfigType, - > = load_config_from_file::(args.config_file); + ::SignatureKey, + ::ElectionConfigType, + > = load_config_from_file::(args.config_file); let mut nodes = Vec::new(); for _ in 0..(config.config.total_nodes.get()) { let node = async_spawn(async move { infra::main_entry_point::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/webserver/multi-validator.rs b/hotshot/examples/webserver/multi-validator.rs index a0405a6de4..c9bbebceb6 100644 --- a/hotshot/examples/webserver/multi-validator.rs +++ b/hotshot/examples/webserver/multi-validator.rs @@ -3,8 +3,8 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demo::DemoTypes; use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_testing::state_types::TestTypes; use std::net::IpAddr; use tracing::instrument; use types::VIDNetwork; @@ -49,7 +49,7 @@ async fn main() { let url = args.url.clone(); let node = async_spawn(async move { infra::main_entry_point::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/webserver/multi-webserver.rs b/hotshot/examples/webserver/multi-webserver.rs index 1b0aa3d069..aba2762bf1 100644 --- a/hotshot/examples/webserver/multi-webserver.rs +++ b/hotshot/examples/webserver/multi-webserver.rs @@ -6,7 +6,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot_testing::state_types::TestTypes; use surf_disco::Url; use tracing::error; @@ -30,7 +30,7 @@ async fn main() { let consensus_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >(Some(server_shutdown_cdn), args.consensus_url) .await { @@ -39,7 +39,7 @@ async fn main() { }); let da_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >(Some(server_shutdown_da), args.da_url) .await { diff --git a/hotshot/examples/webserver/orchestrator.rs b/hotshot/examples/webserver/orchestrator.rs index f21ee05552..fd61f16648 100644 --- a/hotshot/examples/webserver/orchestrator.rs +++ b/hotshot/examples/webserver/orchestrator.rs @@ -2,7 +2,7 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot_testing::state_types::TestTypes; use tracing::instrument; use types::VIDNetwork; @@ -24,7 +24,7 @@ async fn main() { setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator::( + run_orchestrator::( args, ) .await; diff --git a/hotshot/examples/webserver/types.rs b/hotshot/examples/webserver/types.rs index 60c86f90d1..822f27bc44 100644 --- a/hotshot/examples/webserver/types.rs +++ b/hotshot/examples/webserver/types.rs @@ -1,8 +1,6 @@ use crate::infra::WebServerDARun; -use hotshot::{ - demo::DemoTypes, - traits::implementations::{MemoryStorage, WebCommChannel}, -}; +use hotshot::traits::implementations::{MemoryStorage, WebCommChannel}; +use hotshot_testing::state_types::TestTypes; use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation, NodeType}; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -10,20 +8,20 @@ use std::fmt::Debug; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -pub type DANetwork = WebCommChannel; -pub type VIDNetwork = WebCommChannel; -pub type QuorumNetwork = WebCommChannel; -pub type ViewSyncNetwork = WebCommChannel; +pub type DANetwork = WebCommChannel; +pub type VIDNetwork = WebCommChannel; +pub type QuorumNetwork = WebCommChannel; +pub type ViewSyncNetwork = WebCommChannel; -impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; type CommitteeNetwork = DANetwork; type QuorumNetwork = QuorumNetwork; fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { + start_view: ::Time, + ) -> (ChannelMaps, Option>) { (ChannelMaps::new(start_view), None) } } -pub type ThisRun = WebServerDARun; +pub type ThisRun = WebServerDARun; diff --git a/hotshot/examples/webserver/validator.rs b/hotshot/examples/webserver/validator.rs index 0d7ba8eddf..52cbf7bcb1 100644 --- a/hotshot/examples/webserver/validator.rs +++ b/hotshot/examples/webserver/validator.rs @@ -1,6 +1,6 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot::demo::DemoTypes; +use hotshot_testing::state_types::TestTypes; use tracing::{info, instrument}; use types::VIDNetwork; @@ -28,7 +28,7 @@ async fn main() { args.url, args.port ); infra::main_entry_point::< - DemoTypes, + TestTypes, DANetwork, QuorumNetwork, ViewSyncNetwork, diff --git a/hotshot/examples/webserver/webserver.rs b/hotshot/examples/webserver/webserver.rs index c9bc3c1279..07ffab4f88 100644 --- a/hotshot/examples/webserver/webserver.rs +++ b/hotshot/examples/webserver/webserver.rs @@ -1,4 +1,4 @@ -use hotshot::demo::DemoTypes; +use hotshot_testing::state_types::TestTypes; use std::sync::Arc; use surf_disco::Url; @@ -22,7 +22,7 @@ async fn main() { let (server_shutdown_sender, server_shutdown) = oneshot(); let _sender = Arc::new(server_shutdown_sender); let _result = hotshot_web_server::run_web_server::< - ::SignatureKey, + ::SignatureKey, >(Some(server_shutdown), args.url) .await; } diff --git a/hotshot/src/demo.rs b/hotshot/src/demo.rs deleted file mode 100644 index 3ef2fafc25..0000000000 --- a/hotshot/src/demo.rs +++ /dev/null @@ -1,181 +0,0 @@ -//! Sequencing consensus demo -//! -//! This module provides an implementation of the `HotShot` suite of traits that implements a -//! basic demonstration of sequencing consensus. -//! -//! These implementations are useful in examples and integration testing, but are not suitable for -//! production use. -use crate::traits::election::static_committee::{GeneralStaticCommittee, StaticElectionConfig}; -use commit::{Commitment, Committable}; -use derivative::Derivative; - -use hotshot_signature_key::bn254::BLSPubKey; -use hotshot_types::{ - block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, - data::{fake_commitment, BlockError, ViewNumber}, - traits::{ - election::Membership, - node_implementation::NodeType, - state::{ConsensusTime, TestableState}, - BlockPayload, State, - }, -}; - -use serde::{Deserialize, Serialize}; -use std::{fmt::Debug, marker::PhantomData}; - -/// sequencing demo entry state -#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct DemoState { - /// the block height - block_height: u64, - /// the view number - view_number: ViewNumber, - /// the previous state commitment - prev_state_commitment: Commitment, -} - -impl Committable for DemoState { - fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("Demo State Commit") - .u64_field("block_height", self.block_height) - .u64_field("view_number", *self.view_number) - .field("prev_state_commitment", self.prev_state_commitment) - .finalize() - } - - fn tag() -> String { - "SEQUENCING_DEMO_STATE".to_string() - } -} - -impl Default for DemoState { - fn default() -> Self { - Self { - block_height: 0, - view_number: ViewNumber::genesis(), - prev_state_commitment: fake_commitment(), - } - } -} - -impl State for DemoState { - type Error = BlockError; - - type BlockHeader = VIDBlockHeader; - - type BlockPayload = VIDBlockPayload; - - type Time = ViewNumber; - - fn validate_block(&self, _block_header: &Self::BlockHeader, view_number: &Self::Time) -> bool { - if view_number == &ViewNumber::genesis() { - &self.view_number == view_number - } else { - self.view_number < *view_number - } - } - - fn initialize() -> Self { - let mut state = Self::default(); - state.block_height += 1; - state - } - - fn append( - &self, - block_header: &Self::BlockHeader, - view_number: &Self::Time, - ) -> Result { - if !self.validate_block(block_header, view_number) { - return Err(BlockError::InvalidBlockHeader); - } - - Ok(DemoState { - block_height: self.block_height + 1, - view_number: *view_number, - prev_state_commitment: self.commit(), - }) - } - - fn on_commit(&self) {} -} - -impl TestableState for DemoState { - fn create_random_transaction( - _state: Option<&Self>, - _rng: &mut dyn rand::RngCore, - padding: u64, - ) -> ::Transaction { - /// clippy appeasement for `RANDOM_TX_BASE_SIZE` - const RANDOM_TX_BASE_SIZE: usize = 8; - VIDTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) - } -} -/// Implementation of [`NodeType`] for [`VDemoNode`] -#[derive( - Copy, - Clone, - Debug, - Default, - Hash, - PartialEq, - Eq, - PartialOrd, - Ord, - serde::Serialize, - serde::Deserialize, -)] -pub struct DemoTypes; - -impl NodeType for DemoTypes { - type Time = ViewNumber; - type BlockHeader = VIDBlockHeader; - type BlockPayload = VIDBlockPayload; - type SignatureKey = BLSPubKey; - type Transaction = VIDTransaction; - type ElectionConfigType = StaticElectionConfig; - type StateType = DemoState; - type Membership = DemoMembership; -} - -/// Alias for the static committee used in the Demo apps -pub type DemoMembership = GeneralStaticCommittee::SignatureKey>; - -/// The node implementation for the sequencing demo -#[derive(Derivative)] -#[derivative(Clone(bound = ""))] -pub struct DemoNode(PhantomData) -where - MEMBERSHIP: Membership + std::fmt::Debug; - -impl DemoNode -where - MEMBERSHIP: Membership + std::fmt::Debug, -{ - /// Create a new `DemoNode` - #[must_use] - pub fn new() -> Self { - DemoNode(PhantomData) - } -} - -impl Debug for DemoNode -where - MEMBERSHIP: Membership + std::fmt::Debug, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("DemoNode") - .field("_phantom", &"phantom") - .finish() - } -} - -impl Default for DemoNode -where - MEMBERSHIP: Membership + std::fmt::Debug, -{ - fn default() -> Self { - Self::new() - } -} diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 37a6cabc16..4224394a81 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -19,8 +19,6 @@ #[cfg(feature = "docs")] pub mod documentation; -#[cfg(feature = "demo")] -pub mod demo; /// Contains traits consumed by [`HotShot`] pub mod traits; /// Contains types used by the crate diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 4452420213..9684bd61c7 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -22,8 +22,3 @@ pub mod implementations { storage::memory_storage::MemoryStorage, // atomic_storage::AtomicStorage, }; } - -/// Dummy testing implementations -pub mod dummy { - pub use hotshot_types::traits::state::dummy::DummyState; -} diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 6908cd1562..301f2ca89c 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -375,7 +375,7 @@ impl TestableChannelImplementation for CombinedCommChann #[cfg(test)] mod test { - use hotshot_types::block_impl::VIDTransaction; + use hotshot_testing::block_types::TestTransaction; use super::*; use tracing::instrument; @@ -412,8 +412,8 @@ mod test { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_hash_calculation() { - let message1 = VIDTransaction(vec![0; 32]); - let message2 = VIDTransaction(vec![1; 32]); + let message1 = TestTransaction(vec![0; 32]); + let message2 = TestTransaction(vec![1; 32]); assert_eq!(calculate_hash_of(&message1), calculate_hash_of(&message1)); assert_ne!(calculate_hash_of(&message1), calculate_hash_of(&message2)); @@ -426,8 +426,8 @@ mod test { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_cache_integrity() { - let message1 = VIDTransaction(vec![0; 32]); - let message2 = VIDTransaction(vec![1; 32]); + let message1 = TestTransaction(vec![0; 32]); + let message2 = TestTransaction(vec![1; 32]); let mut cache = Cache::new(3); diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 5771e6f471..5b7ddfd25e 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -107,60 +107,35 @@ impl Storage for MemoryStorage { #[cfg(test)] mod test { - use crate::traits::election::static_committee::{GeneralStaticCommittee, StaticElectionConfig}; use super::*; use commit::Committable; - use hotshot_signature_key::bn254::BLSPubKey; + use hotshot_testing::{ + block_types::{TestBlockHeader, TestBlockPayload}, + node_types::TestTypes, + }; use hotshot_types::{ - block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, - data::{fake_commitment, genesis_proposer_id, Leaf, ViewNumber}, + data::{fake_commitment, genesis_proposer_id, Leaf}, simple_certificate::QuorumCertificate, - traits::{node_implementation::NodeType, state::dummy::DummyState, state::ConsensusTime}, + traits::{node_implementation::NodeType, state::ConsensusTime}, }; - use std::{fmt::Debug, hash::Hash, marker::PhantomData}; + use std::marker::PhantomData; use tracing::instrument; - #[derive( - Copy, - Clone, - Debug, - Default, - Hash, - PartialEq, - Eq, - PartialOrd, - Ord, - serde::Serialize, - serde::Deserialize, - )] - struct DummyTypes; - - impl NodeType for DummyTypes { - type Time = ViewNumber; - type BlockHeader = VIDBlockHeader; - type BlockPayload = VIDBlockPayload; - type SignatureKey = BLSPubKey; - type Transaction = VIDTransaction; - type ElectionConfigType = StaticElectionConfig; - type StateType = DummyState; - type Membership = GeneralStaticCommittee; - } - - fn random_stored_view(view_number: ::Time) -> StoredView { - let payload = VIDBlockPayload::genesis(); - let header = VIDBlockHeader { + fn random_stored_view(view_number: ::Time) -> StoredView { + let payload = TestBlockPayload::genesis(); + let header = TestBlockHeader { block_number: 0, payload_commitment: payload.payload_commitment, }; - let dummy_leaf_commit = fake_commitment::>(); + let dummy_leaf_commit = fake_commitment::>(); let data = hotshot_types::simple_vote::QuorumData { leaf_commit: dummy_leaf_commit, }; let commit = data.commit(); StoredView::from_qc_block_and_state( QuorumCertificate { - is_genesis: view_number == ::Time::genesis(), + is_genesis: view_number == ::Time::genesis(), data, vote_commitment: commit, signatures: None, @@ -183,7 +158,7 @@ mod test { #[instrument] async fn memory_storage() { let storage = MemoryStorage::construct_tmp_storage().unwrap(); - let genesis = random_stored_view(::Time::genesis()); + let genesis = random_stored_view(::Time::genesis()); storage .append_single_view(genesis.clone()) .await diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 1ef4a2865b..3c4b3dd621 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -6,20 +6,24 @@ description = "Types and traits for the HotShot consesus module" authors = ["Espresso Systems "] [features] -default = ["demo"] -demo = ["hotshot/demo"] - +default = [] # NOTE this is used to activate the slow tests we don't wish to run in CI slow-tests = [] [dependencies] ark-bls12-381 = { workspace = true } +ark-serialize = { version = "0.3", features = [ + "derive", +] } # TODO upgrade to 0.4 and inherit this dep from workspace https://github.com/EspressoSystems/HotShot/issues/1700 +espresso-systems-common = { workspace = true } async-compatibility-layer = { workspace = true } async-trait = { workspace = true } # needed for vrf demo # so non-optional for now blake3 = { workspace = true, features = ["traits-preview"] } +sha3 = "^0.10" commit = { workspace = true } +derivative = "2.2.0" either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", features = [ @@ -29,6 +33,7 @@ hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } hotshot-constants = { path = "../constants" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } +hotshot-signature-key = { path = "../hotshot-signature-key" } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } jf-primitives = { workspace = true } diff --git a/types/src/block_impl.rs b/testing/src/block_types.rs similarity index 81% rename from types/src/block_impl.rs rename to testing/src/block_types.rs index c24110bc23..35960fd0a7 100644 --- a/types/src/block_impl.rs +++ b/testing/src/block_types.rs @@ -1,12 +1,10 @@ -//! This module provides implementations of block traits for examples and tests only. -//! TODO (Keyao) Organize non-production code. -//! use std::{ fmt::{Debug, Display}, mem::size_of, }; -use crate::{ +use commit::{Commitment, Committable}; +use hotshot_types::{ data::{BlockError, VidCommitment, VidScheme, VidSchemeTrait}, traits::{ block_contents::{vid_commitment, BlockHeader, Transaction}, @@ -14,15 +12,14 @@ use crate::{ BlockPayload, }, }; -use commit::{Commitment, Committable}; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; -/// The transaction in a [`VIDBlockPayload`]. +/// The transaction in a [`TestBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct VIDTransaction(pub Vec); +pub struct TestTransaction(pub Vec); -impl VIDTransaction { +impl TestTransaction { /// Encode a list of transactions into bytes. /// /// # Errors @@ -49,7 +46,7 @@ impl VIDTransaction { } } -impl Committable for VIDTransaction { +impl Committable for TestTransaction { fn commit(&self) -> Commitment { let builder = commit::RawCommitmentBuilder::new("Txn Comm"); let mut hasher = Keccak256::new(); @@ -63,18 +60,18 @@ impl Committable for VIDTransaction { } } -impl Transaction for VIDTransaction {} +impl Transaction for TestTransaction {} -/// A [`BlockPayload`] that contains a list of `VIDTransaction`. +/// A [`BlockPayload`] that contains a list of `TestTransaction`. #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct VIDBlockPayload { +pub struct TestBlockPayload { /// List of transactions. - pub transactions: Vec, + pub transactions: Vec, /// VID commitment to the block payload. pub payload_commitment: ::Commit, } -impl VIDBlockPayload { +impl TestBlockPayload { /// Create a genesis block payload with transaction bytes `vec![0]`, to be used for /// consensus task initiation. /// # Panics @@ -83,21 +80,21 @@ impl VIDBlockPayload { pub fn genesis() -> Self { let txns: Vec = vec![0]; // It's impossible for `encode` to fail because the transaciton length is very small. - let encoded = VIDTransaction::encode(vec![VIDTransaction(txns.clone())]).unwrap(); - VIDBlockPayload { - transactions: vec![VIDTransaction(txns)], + let encoded = TestTransaction::encode(vec![TestTransaction(txns.clone())]).unwrap(); + TestBlockPayload { + transactions: vec![TestTransaction(txns)], payload_commitment: vid_commitment(&encoded), } } } -impl Display for VIDBlockPayload { +impl Display for TestBlockPayload { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "BlockPayload #txns={}", self.transactions.len()) } } -impl TestableBlock for VIDBlockPayload { +impl TestableBlock for TestBlockPayload { fn genesis() -> Self { Self::genesis() } @@ -107,17 +104,17 @@ impl TestableBlock for VIDBlockPayload { } } -impl BlockPayload for VIDBlockPayload { +impl BlockPayload for TestBlockPayload { type Error = BlockError; - type Transaction = VIDTransaction; + type Transaction = TestTransaction; type Metadata = (); type Encode<'a> = as IntoIterator>::IntoIter; fn from_transactions( transactions: impl IntoIterator, ) -> Result<(Self, Self::Metadata), Self::Error> { - let txns_vec: Vec = transactions.into_iter().collect(); - let encoded = VIDTransaction::encode(txns_vec.clone())?; + let txns_vec: Vec = transactions.into_iter().collect(); + let encoded = TestTransaction::encode(txns_vec.clone())?; Ok(( Self { transactions: txns_vec, @@ -143,7 +140,7 @@ impl BlockPayload for VIDBlockPayload { // Get the transaction. let next_index = txn_start_index + txn_len; - transactions.push(VIDTransaction( + transactions.push(TestTransaction( encoded_vec[txn_start_index..next_index].to_vec(), )); current_index = next_index; @@ -160,7 +157,7 @@ impl BlockPayload for VIDBlockPayload { } fn encode(&self) -> Result, Self::Error> { - Ok(VIDTransaction::encode(self.transactions.clone())?.into_iter()) + Ok(TestTransaction::encode(self.transactions.clone())?.into_iter()) } fn transaction_commitments(&self) -> Vec> { @@ -171,17 +168,17 @@ impl BlockPayload for VIDBlockPayload { } } -/// A [`BlockHeader`] that commits to [`VIDBlockPayload`]. +/// A [`BlockHeader`] that commits to [`TestBlockPayload`]. #[derive(PartialEq, Eq, Hash, Clone, Debug, Deserialize, Serialize)] -pub struct VIDBlockHeader { +pub struct TestBlockHeader { /// Block number. pub block_number: u64, /// VID commitment to the payload. pub payload_commitment: VidCommitment, } -impl BlockHeader for VIDBlockHeader { - type Payload = VIDBlockPayload; +impl BlockHeader for TestBlockHeader { + type Payload = TestBlockPayload; fn new( payload_commitment: VidCommitment, diff --git a/testing/src/lib.rs b/testing/src/lib.rs index da15e2dfde..7e39871ed1 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -1,3 +1,9 @@ +#![cfg_attr( + // hotshot_example option is set manually in justfile when running examples + not(any(test, debug_assertions, hotshot_example)), + deprecated = "suspicious usage of testing/demo implementations in non-test/non-debug build" +)] + use hotshot_task::{event_stream::ChannelStream, task_impls::HSTWithEvent}; /// Helpers for initializing system context handle and building tasks. @@ -21,12 +27,18 @@ pub mod txn_task; /// task that decides when things are complete pub mod completion_task; -/// node types -pub mod node_types; - /// task to spin nodes up and down pub mod spinning_task; +/// block types +pub mod block_types; + +/// Implementations for testing/examples +pub mod state_types; + +/// node types +pub mod node_types; + // TODO node changer (spin up and down) #[derive(Clone, Debug)] diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 945bc5feea..1014c434e4 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -1,7 +1,11 @@ use hotshot::traits::election::static_committee::GeneralStaticCommittee; +use crate::{ + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + state_types::TestState, +}; + use hotshot::{ - demo::DemoState, traits::{ election::static_committee::{StaticCommittee, StaticElectionConfig}, implementations::{ @@ -13,7 +17,6 @@ use hotshot::{ types::bn254::BLSPubKey, }; use hotshot_types::{ - block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, data::ViewNumber, traits::node_implementation::{ChannelMaps, NodeType}, }; @@ -35,12 +38,12 @@ use serde::{Deserialize, Serialize}; pub struct TestTypes; impl NodeType for TestTypes { type Time = ViewNumber; - type BlockHeader = VIDBlockHeader; - type BlockPayload = VIDBlockPayload; + type BlockHeader = TestBlockHeader; + type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; - type Transaction = VIDTransaction; + type Transaction = TestTransaction; type ElectionConfigType = StaticElectionConfig; - type StateType = DemoState; + type StateType = TestState; type Membership = GeneralStaticCommittee; } diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs new file mode 100644 index 0000000000..3513aea6d6 --- /dev/null +++ b/testing/src/state_types.rs @@ -0,0 +1,106 @@ +//! Implementations for examples and tests only +use commit::{Commitment, Committable}; + +use hotshot_types::{ + data::{fake_commitment, BlockError, ViewNumber}, + traits::{ + state::{ConsensusTime, TestableState}, + BlockPayload, State, + }, +}; + +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; + +use crate::block_types::TestTransaction; +use crate::block_types::{TestBlockHeader, TestBlockPayload}; +pub use crate::node_types::TestTypes; + +/// sequencing demo entry state +#[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] +pub struct TestState { + /// the block height + block_height: u64, + /// the view number + view_number: ViewNumber, + /// the previous state commitment + prev_state_commitment: Commitment, +} + +impl Committable for TestState { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("Test State Commit") + .u64_field("block_height", self.block_height) + .u64_field("view_number", *self.view_number) + .field("prev_state_commitment", self.prev_state_commitment) + .finalize() + } + + fn tag() -> String { + "SEQUENCING_TEST_STATE".to_string() + } +} + +impl Default for TestState { + fn default() -> Self { + Self { + block_height: 0, + view_number: ViewNumber::genesis(), + prev_state_commitment: fake_commitment(), + } + } +} + +impl State for TestState { + type Error = BlockError; + + type BlockHeader = TestBlockHeader; + + type BlockPayload = TestBlockPayload; + + type Time = ViewNumber; + + fn validate_block(&self, _block_header: &Self::BlockHeader, view_number: &Self::Time) -> bool { + if view_number == &ViewNumber::genesis() { + &self.view_number == view_number + } else { + self.view_number < *view_number + } + } + + fn initialize() -> Self { + let mut state = Self::default(); + state.block_height += 1; + state + } + + fn append( + &self, + block_header: &Self::BlockHeader, + view_number: &Self::Time, + ) -> Result { + if !self.validate_block(block_header, view_number) { + return Err(BlockError::InvalidBlockHeader); + } + + Ok(TestState { + block_height: self.block_height + 1, + view_number: *view_number, + prev_state_commitment: self.commit(), + }) + } + + fn on_commit(&self) {} +} + +impl TestableState for TestState { + fn create_random_transaction( + _state: Option<&Self>, + _rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction { + /// clippy appeasement for `RANDOM_TX_BASE_SIZE` + const RANDOM_TX_BASE_SIZE: usize = 8; + TestTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) + } +} diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index ebdb53904f..aec6cfadcc 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -1,6 +1,7 @@ use std::marker::PhantomData; use crate::{ + block_types::{TestBlockHeader, TestBlockPayload}, node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, }; @@ -12,7 +13,6 @@ use hotshot::{ use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ - block_impl::{VIDBlockHeader, VIDBlockPayload}, consensus::ConsensusMetricsValue, data::{Leaf, QuorumProposal, VidScheme, ViewNumber}, message::Proposal, @@ -125,9 +125,9 @@ async fn build_quorum_proposal_and_signature( let parent_leaf = leaf.clone(); // every event input is seen on the event stream in the output. - let block = ::genesis(); + let block = ::genesis(); let payload_commitment = vid_commitment(&block.encode().unwrap().collect()); - let block_header = VIDBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); + let block_header = TestBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); let leaf = Leaf { view_number: ViewNumber::new(view), justify_qc: consensus.high_qc.clone(), diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index d418122f98..81e01425eb 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,8 +1,10 @@ use hotshot::{types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; -use hotshot_testing::node_types::{MemoryImpl, TestTypes}; +use hotshot_testing::{ + block_types::TestTransaction, + node_types::{MemoryImpl, TestTypes}, +}; use hotshot_types::{ - block_impl::VIDTransaction, data::{DAProposal, ViewNumber}, simple_vote::{DAData, DAVote}, traits::{ @@ -33,8 +35,8 @@ async fn test_da_task() { inner: handle.hotshot.inner.clone(), }; let pub_key = *api.public_key(); - let transactions = vec![VIDTransaction(vec![0])]; - let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); + let transactions = vec![TestTransaction(vec![0])]; + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let payload_commitment = vid_commitment(&encoded_transactions); let encoded_transactions_hash = Sha256::digest(&encoded_transactions); diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 3c99b4e2bc..5a19a404a0 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -2,7 +2,6 @@ use std::collections::BTreeSet; use std::sync::Arc; use async_compatibility_layer::logging::setup_logging; -use hotshot::demo::DemoState; use hotshot::traits::election::static_committee::{GeneralStaticCommittee, StaticElectionConfig}; use hotshot::traits::implementations::{ MasterMap, MemoryCommChannel, MemoryNetwork, MemoryStorage, NetworkingMetricsValue, @@ -10,7 +9,10 @@ use hotshot::traits::implementations::{ use hotshot::traits::NodeImplementation; use hotshot::types::bn254::{BLSPrivKey, BLSPubKey}; use hotshot::types::SignatureKey; -use hotshot_types::block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}; +use hotshot_testing::{ + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + state_types::TestState, +}; use hotshot_types::message::Message; use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; @@ -43,12 +45,12 @@ pub struct Test; impl NodeType for Test { type Time = ViewNumber; - type BlockHeader = VIDBlockHeader; - type BlockPayload = VIDBlockPayload; + type BlockHeader = TestBlockHeader; + type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; - type Transaction = VIDTransaction; + type Transaction = TestTransaction; type ElectionConfigType = StaticElectionConfig; - type StateType = DemoState; + type StateType = TestState; type Membership = GeneralStaticCommittee; } @@ -109,7 +111,7 @@ fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec::new(0), )), }; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 0d0f469864..02188d0bea 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -19,8 +19,8 @@ use std::{collections::HashMap, marker::PhantomData}; #[ignore] async fn test_network_task() { use hotshot_task_impls::harness::run_harness; - use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{block_impl::VIDTransaction, data::VidDisperse, message::Proposal}; + use hotshot_testing::{block_types::TestTransaction, task_helpers::build_system_handle}; + use hotshot_types::{data::VidDisperse, message::Proposal}; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -33,8 +33,8 @@ async fn test_network_task() { let pub_key = *api.public_key(); let priv_key = api.private_key(); let vid = vid_init(); - let transactions = vec![VIDTransaction(vec![0])]; - let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); + let transactions = vec![TestTransaction(vec![0])]; + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; let signature = diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 185c54d03f..1c224db08d 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -1,12 +1,12 @@ use hotshot::{tasks::add_vid_task, types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ + block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, task_helpers::vid_init, }; use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ - block_impl::VIDTransaction, data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, traits::{consensus_api::ConsensusSharedApi, state::ConsensusTime}, }; @@ -34,8 +34,8 @@ async fn test_vid_task() { let pub_key = *api.public_key(); let vid = vid_init(); - let transactions = vec![VIDTransaction(vec![0])]; - let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); + let transactions = vec![TestTransaction(vec![0])]; + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; diff --git a/types/Cargo.toml b/types/Cargo.toml index cd63d62e12..0bb3cd7028 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -7,8 +7,6 @@ readme = "../README.md" version = "0.1.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html -[features] -demo = [] [dependencies] arbitrary = { version = "1.3", features = ["derive"] } @@ -48,7 +46,6 @@ rand = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } -sha3 = "^0.10" snafu = { workspace = true } tagged-base64 = { git = "https://github.com/EspressoSystems/tagged-base64", tag = "0.2.4" } time = { workspace = true } diff --git a/types/src/lib.rs b/types/src/lib.rs index 2abb198e96..5467fa6b2b 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -12,7 +12,6 @@ use displaydoc::Display; use std::{num::NonZeroUsize, time::Duration}; use traits::{election::ElectionConfig, signature_key::SignatureKey}; -pub mod block_impl; pub mod consensus; pub mod data; pub mod error; @@ -24,6 +23,7 @@ pub mod simple_vote; pub mod traits; pub mod utils; pub mod vote; + /// the type of consensus to run. Either: /// wait for a signal to start a view, /// or constantly run diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 4899e9976a..1b0a84343c 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -5,7 +5,6 @@ use crate::traits::BlockPayload; use commit::Committable; -use espresso_systems_common::hotshot::tag; use serde::{de::DeserializeOwned, Serialize}; use std::{ error::Error, @@ -124,97 +123,3 @@ pub trait TestableBlock: BlockPayload + Debug { /// the number of transactions in this block fn txn_count(&self) -> u64; } - -/// Dummy implementation of `State` for unit tests -pub mod dummy { - use super::{tag, Committable, Debug, Hash, Serialize, State, TestableState}; - use crate::{ - block_impl::{VIDBlockHeader, VIDBlockPayload, VIDTransaction}, - data::ViewNumber, - }; - use rand::Rng; - use serde::Deserialize; - - /// Dummy error - #[derive(Debug)] - pub struct DummyError; - - impl std::error::Error for DummyError {} - - impl std::fmt::Display for DummyError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("A bad thing happened") - } - } - - /// The dummy state - #[derive(Clone, Debug, Default, Hash, PartialEq, Eq, Serialize, Deserialize)] - pub struct DummyState { - /// Some dummy data - nonce: u64, - } - - impl Committable for DummyState { - fn commit(&self) -> commit::Commitment { - commit::RawCommitmentBuilder::new("Dummy State Comm") - .u64_field("Nonce", self.nonce) - .finalize() - } - - fn tag() -> String { - tag::DUMMY_STATE.to_string() - } - } - - impl DummyState { - /// Generate a random `DummyState` - pub fn random(r: &mut dyn rand::RngCore) -> Self { - Self { - nonce: r.gen_range(1..1_000_000), - } - } - } - - impl State for DummyState { - type Error = DummyError; - type BlockHeader = VIDBlockHeader; - type BlockPayload = VIDBlockPayload; - type Time = ViewNumber; - - fn validate_block( - &self, - _block_header: &Self::BlockHeader, - _view_number: &Self::Time, - ) -> bool { - false - } - - fn initialize() -> Self { - let mut state = Self::default(); - state.nonce += 1; - state - } - - fn append( - &self, - _block_header: &Self::BlockHeader, - _view_number: &Self::Time, - ) -> Result { - Ok(Self { - nonce: self.nonce + 1, - }) - } - - fn on_commit(&self) {} - } - - impl TestableState for DummyState { - fn create_random_transaction( - _state: Option<&Self>, - _: &mut dyn rand::RngCore, - _: u64, - ) -> VIDTransaction { - VIDTransaction(vec![0u8]) - } - } -} diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index bc2520065a..70555de446 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -5,10 +5,6 @@ version = "0.1.1" readme = "README.md" edition = "2021" -[features] -default = ["demo"] -demo = ["hotshot-types/demo"] - [dependencies] ark-bls12-381 = { workspace = true } async-compatibility-layer = { workspace = true } From 03339655636bb530828cba87002416666196c74d Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 5 Dec 2023 11:49:43 -0800 Subject: [PATCH 0495/1393] Make Committable a subtrait of BlockHeader --- testing/src/block_types.rs | 20 ++++++++++++++++++-- testing/src/state_types.rs | 2 +- types/src/traits/block_contents.rs | 2 +- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index 35960fd0a7..a49e93c5ed 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -3,7 +3,7 @@ use std::{ mem::size_of, }; -use commit::{Commitment, Committable}; +use commit::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ data::{BlockError, VidCommitment, VidScheme, VidSchemeTrait}, traits::{ @@ -56,7 +56,7 @@ impl Committable for TestTransaction { } fn tag() -> String { - "SEQUENCING_TXN".to_string() + "TEST_TXN".to_string() } } @@ -217,3 +217,19 @@ impl BlockHeader for TestBlockHeader { fn metadata(&self) -> ::Metadata {} } + +impl Committable for TestBlockHeader { + fn commit(&self) -> Commitment { + let payload_commitment_bytes: [u8; 32] = self.payload_commitment().into(); + + RawCommitmentBuilder::new("Header Comm") + .u64_field("block number", self.block_number()) + .constant_str("payload commitment") + .fixed_size_bytes(&payload_commitment_bytes) + .finalize() + } + + fn tag() -> String { + "TEST_HEADER".to_string() + } +} diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 3513aea6d6..5047376415 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -37,7 +37,7 @@ impl Committable for TestState { } fn tag() -> String { - "SEQUENCING_TEST_STATE".to_string() + "TEST_STATE".to_string() } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 7aee100bd3..859bf3e173 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -95,7 +95,7 @@ pub fn vid_commitment(encoded_transactions: &Vec) -> Date: Tue, 5 Dec 2023 13:14:45 -0800 Subject: [PATCH 0496/1393] add vote test with vid to test_consensus_task --- testing/tests/consensus_task.rs | 46 ++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 632f2efb50..1f3249dc6e 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -107,7 +107,7 @@ async fn test_consensus_task() { let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let _pub_key = *api.public_key(); + let pub_key = *api.public_key(); let vid = vid_init(); let transactions = vec![VIDTransaction(vec![0])]; let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); @@ -122,7 +122,7 @@ async fn test_consensus_task() { shares: vid_disperse.shares, common: vid_disperse.common, }; - let _vid_proposal: Proposal> = Proposal { + let vid_proposal: Proposal> = Proposal { data: vid_disperse.clone(), signature: signature, _pd: PhantomData, @@ -140,6 +140,19 @@ async fn test_consensus_task() { proposal.clone(), public_key, )); + // followings are for the test of vote logic with vid + input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(HotShotEvent::TransactionsSequenced( + encoded_transactions.clone(), + (), + ViewNumber::new(1), + )); + input.push(HotShotEvent::BlockReady( + vid_disperse.clone(), + ViewNumber::new(1), + )); + input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); + input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::QCFormed(either::Left(qc)), 1); @@ -151,7 +164,34 @@ async fn test_consensus_task() { HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), 1, ); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); + output.insert( + HotShotEvent::ViewChange(ViewNumber::new(1)), + 2, // 1 from `QuorumProposalRecv`, 1 from input + ); + output.insert( + HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), + 1, + ); + output.insert( + HotShotEvent::BlockReady(vid_disperse, ViewNumber::new(2)), + 2, + ); + output.insert( + HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), + 1, + ); + output.insert( + HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), + 2, // 2 occurrences: 1 from `input`, 1 from the DA task + ); + output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); + + if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal.data).await { + output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); + input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); + output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); + } + output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| { From b6f9fa3fec020870e7f8fae6a389b2931ff2b090 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 5 Dec 2023 15:34:36 -0800 Subject: [PATCH 0497/1393] merge develop --- testing/tests/consensus_task.rs | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 1f3249dc6e..70122b7398 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -87,13 +87,16 @@ async fn build_vote( #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[ignore] async fn test_consensus_task() { - use std::marker::PhantomData; - use hotshot_testing::task_helpers::vid_init; - use hotshot_types::data::VidSchemeTrait; use hotshot_task_impls::harness::run_harness; + use hotshot_testing::block_types::TestTransaction; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{simple_certificate::QuorumCertificate, data::VidDisperse, message::Proposal, traits::node_implementation::NodeType, block_impl::VIDTransaction}; - + use hotshot_testing::task_helpers::vid_init; + use hotshot_types::data::VidSchemeTrait; + use hotshot_types::{ + data::VidDisperse, message::Proposal, simple_certificate::QuorumCertificate, + traits::node_implementation::NodeType, + }; + use std::marker::PhantomData; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -102,29 +105,27 @@ async fn test_consensus_task() { // We assign node's key pair rather than read from config file since it's a test let (private_key, public_key) = key_pair_for_id(1); - - let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; let pub_key = *api.public_key(); let vid = vid_init(); - let transactions = vec![VIDTransaction(vec![0])]; - let encoded_transactions = VIDTransaction::encode(transactions.clone()).unwrap(); + let transactions = vec![TestTransaction(vec![0])]; + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; let signature = ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()); let vid_disperse = VidDisperse { - view_number: ViewNumber::new(2), + view_number: ViewNumber::new(2), payload_commitment, shares: vid_disperse.shares, common: vid_disperse.common, }; - let vid_proposal: Proposal> = Proposal { + let vid_proposal: Proposal> = Proposal { data: vid_disperse.clone(), - signature: signature, + signature, _pd: PhantomData, }; @@ -165,9 +166,9 @@ async fn test_consensus_task() { 1, ); output.insert( - HotShotEvent::ViewChange(ViewNumber::new(1)), + HotShotEvent::ViewChange(ViewNumber::new(1)), 2, // 1 from `QuorumProposalRecv`, 1 from input - ); + ); output.insert( HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), 1, From 0cbbbd3bea6340e0b02d45f7f3942b3bc24c7816 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 6 Dec 2023 10:49:56 -0800 Subject: [PATCH 0498/1393] added test_consensus_with_vid_vote(), comments to be deleted, flow to be re-examined --- testing/tests/consensus_task.rs | 172 +++++++++++++++++++++++++++----- 1 file changed, 145 insertions(+), 27 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 70122b7398..aeff90d895 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -23,6 +23,7 @@ use hotshot_types::{ }; use std::collections::HashMap; +use tracing::error; async fn build_vote( handle: &SystemContextHandle, @@ -85,15 +86,77 @@ async fn build_vote( tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_consensus_task() { use hotshot_task_impls::harness::run_harness; - use hotshot_testing::block_types::TestTransaction; use hotshot_testing::task_helpers::build_system_handle; + use hotshot_types::simple_certificate::QuorumCertificate; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(1).await.0; + // We assign node's key pair rather than read from config file since it's a test + let (private_key, public_key) = key_pair_for_id(1); + + let mut input = Vec::new(); + let mut output = HashMap::new(); + + // Trigger a proposal to send by creating a new QC. Then recieve that proposal and update view based on the valid QC in the proposal + let qc = QuorumCertificate::::genesis(); + let proposal = build_quorum_proposal(&handle, &private_key, 1).await; + + input.push(HotShotEvent::QCFormed(either::Left(qc.clone()))); + input.push(HotShotEvent::QuorumProposalRecv( + proposal.clone(), + public_key, + )); + + input.push(HotShotEvent::Shutdown); + + output.insert(HotShotEvent::QCFormed(either::Left(qc)), 1); + output.insert( + HotShotEvent::QuorumProposalSend(proposal.clone(), public_key), + 1, + ); + output.insert( + HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), + 1, + ); + output.insert( + HotShotEvent::ViewChange(ViewNumber::new(1)), + 1, + ); + + if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal.data).await { + output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); + input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); + output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); + } + + output.insert(HotShotEvent::Shutdown, 1); + + let build_fn = |task_runner, event_stream| { + add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) + }; + + run_harness(input, output, None, build_fn).await; +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] +async fn test_consensus_with_vid_vote() { + use hotshot_task_impls::harness::run_harness; + use hotshot_testing::task_helpers::build_system_handle; + use hotshot_testing::block_types::TestTransaction; use hotshot_testing::task_helpers::vid_init; use hotshot_types::data::VidSchemeTrait; use hotshot_types::{ - data::VidDisperse, message::Proposal, simple_certificate::QuorumCertificate, + data::VidDisperse, message::Proposal, traits::node_implementation::NodeType, }; use std::marker::PhantomData; @@ -101,10 +164,13 @@ async fn test_consensus_task() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(1).await.0; + let handle = build_system_handle(2).await.0; // We assign node's key pair rather than read from config file since it's a test - let (private_key, public_key) = key_pair_for_id(1); + // In view 2, node 2 is the leader. + let (private_key_view1, public_key_view1) = key_pair_for_id(1); + // let (private_key_view2, public_key_view2) = key_pair_for_id(2); + // For the test of vote logic with vid let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; @@ -132,17 +198,16 @@ async fn test_consensus_task() { let mut input = Vec::new(); let mut output = HashMap::new(); - // Trigger a proposal to send by creating a new QC. Then recieve that proposal and update view based on the valid QC in the proposal - let qc = QuorumCertificate::::genesis(); - let proposal = build_quorum_proposal(&handle, &private_key, 1).await; - - input.push(HotShotEvent::QCFormed(either::Left(qc.clone()))); + let proposal_view1 = build_quorum_proposal(&handle, &private_key_view1, 1).await; + // Do a view change, so that it's not the genesis view, and vid vote is needed + input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); + error!("proposal_view1 = {:?}", proposal_view1); input.push(HotShotEvent::QuorumProposalRecv( - proposal.clone(), - public_key, + proposal_view1.clone(), + public_key_view1, )); + // followings are for the test of vote logic with vid - input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::TransactionsSequenced( encoded_transactions.clone(), (), @@ -154,27 +219,19 @@ async fn test_consensus_task() { )); input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); - input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::QCFormed(either::Left(qc)), 1); - output.insert( - HotShotEvent::QuorumProposalSend(proposal.clone(), public_key), - 1, - ); + output.insert( - HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), + HotShotEvent::QuorumProposalRecv(proposal_view1.clone(), public_key_view1), 1, ); + output.insert( - HotShotEvent::ViewChange(ViewNumber::new(1)), - 2, // 1 from `QuorumProposalRecv`, 1 from input - ); - output.insert( - HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), + HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(1)), 1, ); output.insert( - HotShotEvent::BlockReady(vid_disperse, ViewNumber::new(2)), + HotShotEvent::BlockReady(vid_disperse, ViewNumber::new(1)), 2, ); output.insert( @@ -187,12 +244,72 @@ async fn test_consensus_task() { ); output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); - if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal.data).await { + if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view1.data).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); } + + + // input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); + // let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; + // error!("proposal_view2 = {:?}", proposal_view2); + // // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader + // input.push(HotShotEvent::QuorumProposalRecv( + // proposal_view2.clone(), + // public_key_view2, + // )); + // // followings are for the test of vote logic with vid + // input.push(HotShotEvent::TransactionsSequenced( + // encoded_transactions.clone(), + // (), + // ViewNumber::new(2), + // )); + // input.push(HotShotEvent::BlockReady( + // vid_disperse.clone(), + // ViewNumber::new(2), + // )); + // input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); + // input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); + + // output.insert( + // HotShotEvent::QuorumProposalRecv(proposal_view2.clone(), public_key_view2), + // 1, + // ); + // output.insert( + // HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), + // 1, + // ); + // output.insert( + // HotShotEvent::BlockReady(vid_disperse, ViewNumber::new(2)), + // 2, + // ); + // output.insert( + // HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), + // 1, + // ); + // output.insert( + // HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), + // 2, // 2 occurrences: 1 from `input`, 1 from the DA task + // ); + // output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); + // if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { + // output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); + // input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); + // output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); + // } + + output.insert( + HotShotEvent::ViewChange(ViewNumber::new(1)), + 2, // 2 occurrences: 1 from `QuorumProposalRecv`, 1 from input + ); + // output.insert( + // HotShotEvent::ViewChange(ViewNumber::new(2)), + // 2, // 2 occurrences: 1 from `QuorumProposalRecv`?, 1 from input + // ); + + input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| { @@ -251,3 +368,4 @@ async fn test_consensus_vote() { run_harness(input, output, None, build_fn).await; } + From e7e12beaea63f20c07884676aa3e8a4021e41e4d Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 6 Dec 2023 11:26:32 -0800 Subject: [PATCH 0499/1393] remove values & take in box for metrics init --- hotshot/examples/infra/mod.rs | 4 ++-- hotshot/src/traits/networking.rs | 15 ++++----------- hotshot/src/traits/networking/libp2p_network.rs | 2 +- hotshot/src/traits/networking/memory_network.rs | 7 ++++++- testing/src/task_helpers.rs | 2 +- testing/src/test_runner.rs | 2 +- testing/tests/memory_network.rs | 12 ++++++------ types/src/consensus.rs | 15 ++++----------- 8 files changed, 25 insertions(+), 34 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 83b360dbc5..257f2c957b 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -259,7 +259,7 @@ match node_type { let node_config = config_builder.build().unwrap(); Libp2pNetwork::new( - NetworkingMetricsValue::new(), + NetworkingMetricsValue::default(), node_config, pub_key.clone(), Arc::new(RwLock::new( @@ -367,7 +367,7 @@ pub trait RunDA< memberships, networks_bundle, initializer, - ConsensusMetricsValue::new(), + ConsensusMetricsValue::default(), ) .await .expect("Could not init hotshot") diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index 0cfeacc779..87851e3454 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -15,7 +15,7 @@ use std::{ }; use custom_debug::Debug; -use hotshot_types::traits::metrics::{Counter, Gauge, Histogram, Label, Metrics}; +use hotshot_types::traits::metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}; pub use hotshot_types::traits::network::{ ChannelSendSnafu, CouldNotDeliverSnafu, FailedToDeserializeSnafu, FailedToSerializeSnafu, NetworkError, NetworkReliability, NoSuchNodeSnafu, ShutDownSnafu, @@ -25,8 +25,6 @@ pub use hotshot_types::traits::network::{ #[derive(Clone, Debug)] pub struct NetworkingMetricsValue { #[allow(dead_code)] - /// The values that are being tracked - pub values: Arc>, /// A [`Gauge`] which tracks how many peers are connected pub connected_peers: Box, /// A [`Counter`] which tracks how many messages have been received directly @@ -163,14 +161,9 @@ impl Label for NetworkingMetrics { impl NetworkingMetricsValue { /// Create a new instance of this [`NetworkingMetricsValue`] struct, setting all the counters and gauges #[must_use] - pub fn new() -> Self { - let values = Arc::default(); - let metrics: Box = Box::new(NetworkingMetrics { - prefix: String::new(), - values: Arc::clone(&values), - }); + #[allow(clippy::borrowed_box)] + pub fn new(metrics: &Box) -> Self { Self { - values, connected_peers: metrics.create_gauge(String::from("connected_peers"), None), incoming_direct_message_count: metrics .create_counter(String::from("incoming_direct_message_count"), None), @@ -188,6 +181,6 @@ impl NetworkingMetricsValue { impl Default for NetworkingMetricsValue { fn default() -> Self { - Self::new() + Self::new(&NoMetrics::boxed()) } } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 00453289de..fc8992658a 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -228,7 +228,7 @@ where let da = da_keys.clone(); async_block_on(async move { match Libp2pNetwork::new( - NetworkingMetricsValue::new(), + NetworkingMetricsValue::default(), config, pubkey.clone(), bootstrap_addrs_ref, diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 92453920e6..48ebfe1719 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -255,7 +255,12 @@ impl TestableNetworkingImplementation Box::new(move |node_id| { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); - MemoryNetwork::new(pubkey, NetworkingMetricsValue::new(), master.clone(), None) + MemoryNetwork::new( + pubkey, + NetworkingMetricsValue::default(), + master.clone(), + None, + ) }) } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index ebdb53904f..d8d70b3a14 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -96,7 +96,7 @@ pub async fn build_system_handle( memberships, networks_bundle, initializer, - ConsensusMetricsValue::new(), + ConsensusMetricsValue::default(), ) .await .expect("Could not init hotshot") diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 16704e7bfe..d2b62a5419 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -273,7 +273,7 @@ where memberships, network_bundle, initializer, - ConsensusMetricsValue::new(), + ConsensusMetricsValue::default(), ) .await .expect("Could not init hotshot") diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 3c99b4e2bc..0e036ec0a9 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -165,7 +165,7 @@ async fn memory_network_direct_queue() { let pub_key_1 = get_pubkey(); let network1 = MemoryNetwork::new( pub_key_1, - NetworkingMetricsValue::new(), + NetworkingMetricsValue::default(), group.clone(), Option::None, ); @@ -173,7 +173,7 @@ async fn memory_network_direct_queue() { let pub_key_2 = get_pubkey(); let network2 = MemoryNetwork::new( pub_key_2, - NetworkingMetricsValue::new(), + NetworkingMetricsValue::default(), group, Option::None, ); @@ -230,14 +230,14 @@ async fn memory_network_broadcast_queue() { let pub_key_1 = get_pubkey(); let network1 = MemoryNetwork::new( pub_key_1, - NetworkingMetricsValue::new(), + NetworkingMetricsValue::default(), group.clone(), Option::None, ); let pub_key_2 = get_pubkey(); let network2 = MemoryNetwork::new( pub_key_2, - NetworkingMetricsValue::new(), + NetworkingMetricsValue::default(), group, Option::None, ); @@ -300,14 +300,14 @@ async fn memory_network_test_in_flight_message_count() { let pub_key_1 = get_pubkey(); let network1 = MemoryNetwork::new( pub_key_1, - NetworkingMetricsValue::new(), + NetworkingMetricsValue::default(), group.clone(), Option::None, ); let pub_key_2 = get_pubkey(); let network2 = MemoryNetwork::new( pub_key_2, - NetworkingMetricsValue::new(), + NetworkingMetricsValue::default(), group, Option::None, ); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 557543c10a..a81d3b3bbd 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -11,7 +11,7 @@ use crate::{ error::HotShotError, simple_certificate::QuorumCertificate, traits::{ - metrics::{Counter, Gauge, Histogram, Label, Metrics}, + metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}, node_implementation::NodeType, }, utils::Terminator, @@ -66,8 +66,6 @@ pub struct Consensus { /// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces #[derive(Clone, Debug)] pub struct ConsensusMetricsValue { - /// The values that are being tracked - pub values: Arc>, /// The number of last synced block height pub last_synced_block_height: Box, /// The number of last decided view @@ -205,14 +203,9 @@ impl Label for ConsensusMetrics { impl ConsensusMetricsValue { /// Create a new instance of this [`ConsensusMetricsValue`] struct, setting all the counters and gauges #[must_use] - pub fn new() -> Self { - let values = Arc::default(); - let metrics: Box = Box::new(ConsensusMetrics { - prefix: String::new(), - values: Arc::clone(&values), - }); + #[allow(clippy::borrowed_box)] + pub fn new(metrics: &Box) -> Self { Self { - values, last_synced_block_height: metrics .create_gauge(String::from("last_synced_block_height"), None), last_decided_view: metrics.create_gauge(String::from("last_decided_view"), None), @@ -233,7 +226,7 @@ impl ConsensusMetricsValue { impl Default for ConsensusMetricsValue { fn default() -> Self { - Self::new() + Self::new(&NoMetrics::boxed()) } } From 090afe4fc767803404e8035f9f45ee31d4d0afd2 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 6 Dec 2023 11:48:50 -0800 Subject: [PATCH 0500/1393] nit --- hotshot/src/traits/networking.rs | 5 ++--- types/src/consensus.rs | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index 87851e3454..dfc2100b74 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -161,8 +161,7 @@ impl Label for NetworkingMetrics { impl NetworkingMetricsValue { /// Create a new instance of this [`NetworkingMetricsValue`] struct, setting all the counters and gauges #[must_use] - #[allow(clippy::borrowed_box)] - pub fn new(metrics: &Box) -> Self { + pub fn new(metrics: &dyn Metrics) -> Self { Self { connected_peers: metrics.create_gauge(String::from("connected_peers"), None), incoming_direct_message_count: metrics @@ -181,6 +180,6 @@ impl NetworkingMetricsValue { impl Default for NetworkingMetricsValue { fn default() -> Self { - Self::new(&NoMetrics::boxed()) + Self::new(&*NoMetrics::boxed()) } } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a81d3b3bbd..cc98c48d51 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -203,8 +203,7 @@ impl Label for ConsensusMetrics { impl ConsensusMetricsValue { /// Create a new instance of this [`ConsensusMetricsValue`] struct, setting all the counters and gauges #[must_use] - #[allow(clippy::borrowed_box)] - pub fn new(metrics: &Box) -> Self { + pub fn new(metrics: &dyn Metrics) -> Self { Self { last_synced_block_height: metrics .create_gauge(String::from("last_synced_block_height"), None), @@ -226,7 +225,7 @@ impl ConsensusMetricsValue { impl Default for ConsensusMetricsValue { fn default() -> Self { - Self::new(&NoMetrics::boxed()) + Self::new(&*NoMetrics::boxed()) } } From ea68c602e511157d27d43bf5af7d2d069e613bd3 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 6 Dec 2023 18:23:49 -0800 Subject: [PATCH 0501/1393] add test_consensus_no_vote_without_vid_share, cannot understand why test_consensus_with_vid_vote do not vote in view2 --- testing/tests/consensus_task.rs | 170 +++++++++++++++++--------------- 1 file changed, 91 insertions(+), 79 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index aeff90d895..4ee57204db 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -23,7 +23,6 @@ use hotshot_types::{ }; use std::collections::HashMap; -use tracing::error; async fn build_vote( handle: &SystemContextHandle, @@ -148,7 +147,6 @@ async fn test_consensus_task() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_consensus_with_vid_vote() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; @@ -166,9 +164,9 @@ async fn test_consensus_with_vid_vote() { let handle = build_system_handle(2).await.0; // We assign node's key pair rather than read from config file since it's a test - // In view 2, node 2 is the leader. let (private_key_view1, public_key_view1) = key_pair_for_id(1); - // let (private_key_view2, public_key_view2) = key_pair_for_id(2); + // In view 2, node 2 is the leader. + let (private_key_view2, public_key_view2) = key_pair_for_id(2); // For the test of vote logic with vid let api: HotShotConsensusApi = HotShotConsensusApi { @@ -201,113 +199,127 @@ async fn test_consensus_with_vid_vote() { let proposal_view1 = build_quorum_proposal(&handle, &private_key_view1, 1).await; // Do a view change, so that it's not the genesis view, and vid vote is needed input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); - error!("proposal_view1 = {:?}", proposal_view1); input.push(HotShotEvent::QuorumProposalRecv( proposal_view1.clone(), public_key_view1, )); - // followings are for the test of vote logic with vid - input.push(HotShotEvent::TransactionsSequenced( - encoded_transactions.clone(), - (), - ViewNumber::new(1), - )); - input.push(HotShotEvent::BlockReady( - vid_disperse.clone(), - ViewNumber::new(1), - )); - input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); - input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); - - output.insert( HotShotEvent::QuorumProposalRecv(proposal_view1.clone(), public_key_view1), 1, ); + // Since it's genesis view, node can vote without dac and vid share + if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view1.data).await { + output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); + input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); + output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); + } + + + + input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); + let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; + // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader + input.push(HotShotEvent::QuorumProposalRecv( + proposal_view2.clone(), + public_key_view2, + )); + // followings are for the test of vote logic with vid + input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); + output.insert( - HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(1)), + HotShotEvent::QuorumProposalRecv(proposal_view2.clone(), public_key_view2), 1, ); + output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); + // if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { + // output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); + // } + output.insert( - HotShotEvent::BlockReady(vid_disperse, ViewNumber::new(1)), - 2, + HotShotEvent::ViewChange(ViewNumber::new(1)), + 2, // 2 occurrences: 1 from `QuorumProposalRecv`, 1 from input ); output.insert( - HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), - 1, + HotShotEvent::ViewChange(ViewNumber::new(2)), + 2, // 2 occurrences: 1 from `QuorumProposalRecv`?, 1 from input ); + + input.push(HotShotEvent::Shutdown); + output.insert(HotShotEvent::Shutdown, 1); + + let build_fn = |task_runner, event_stream| { + add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) + }; + + run_harness(input, output, None, build_fn).await; +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_consensus_no_vote_without_vid_share() { + use hotshot_task_impls::harness::run_harness; + use hotshot_testing::task_helpers::build_system_handle; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + // We assign node's key pair rather than read from config file since it's a test + let (private_key_view1, public_key_view1) = key_pair_for_id(1); + // In view 2, node 2 is the leader. + let (private_key_view2, public_key_view2) = key_pair_for_id(2); + + let mut input = Vec::new(); + let mut output = HashMap::new(); + + let proposal_view1 = build_quorum_proposal(&handle, &private_key_view1, 1).await; + // Do a view change, so that it's not the genesis view, and vid vote is needed + input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(HotShotEvent::QuorumProposalRecv( + proposal_view1.clone(), + public_key_view1, + )); + + output.insert( - HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), - 2, // 2 occurrences: 1 from `input`, 1 from the DA task + HotShotEvent::QuorumProposalRecv(proposal_view1.clone(), public_key_view1), + 1, ); - output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); + // Since it's genesis view, node can vote without dac and vid share if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view1.data).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); } + input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); + let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; + input.push(HotShotEvent::QuorumProposalRecv( + proposal_view2.clone(), + public_key_view2, + )); - - // input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - // let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; - // error!("proposal_view2 = {:?}", proposal_view2); - // // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader - // input.push(HotShotEvent::QuorumProposalRecv( - // proposal_view2.clone(), - // public_key_view2, - // )); - // // followings are for the test of vote logic with vid - // input.push(HotShotEvent::TransactionsSequenced( - // encoded_transactions.clone(), - // (), - // ViewNumber::new(2), - // )); - // input.push(HotShotEvent::BlockReady( - // vid_disperse.clone(), - // ViewNumber::new(2), - // )); - // input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); - // input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); - - // output.insert( - // HotShotEvent::QuorumProposalRecv(proposal_view2.clone(), public_key_view2), - // 1, - // ); - // output.insert( - // HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), - // 1, - // ); - // output.insert( - // HotShotEvent::BlockReady(vid_disperse, ViewNumber::new(2)), - // 2, - // ); - // output.insert( - // HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), - // 1, - // ); - // output.insert( - // HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), - // 2, // 2 occurrences: 1 from `input`, 1 from the DA task - // ); - // output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); - // if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { - // output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); - // input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); - // output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); - // } + // Without vid share, there is no HotShotEvent::QuorumVoteSend in the output. + output.insert( + HotShotEvent::QuorumProposalRecv(proposal_view2.clone(), public_key_view2), + 1, + ); output.insert( HotShotEvent::ViewChange(ViewNumber::new(1)), 2, // 2 occurrences: 1 from `QuorumProposalRecv`, 1 from input ); - // output.insert( - // HotShotEvent::ViewChange(ViewNumber::new(2)), - // 2, // 2 occurrences: 1 from `QuorumProposalRecv`?, 1 from input - // ); + output.insert( + HotShotEvent::ViewChange(ViewNumber::new(2)), + 2, // 2 occurrences: 1 from `QuorumProposalRecv`?, 1 from input + ); input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); From 516310f94dd52faae7dc1a47e50d7e38986c3cec Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 6 Dec 2023 22:10:52 -0800 Subject: [PATCH 0502/1393] add todo comments and rework the flow of test_consensus_with_vid_vote --- task-impls/src/consensus.rs | 2 +- testing/tests/consensus_task.rs | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e90ffac24f..5c97bb538f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -875,7 +875,7 @@ impl, A: ConsensusApi + } } HotShotEvent::DACRecv(cert) => { - debug!("DAC Recved for view ! {}", *cert.view_number); + debug!("DAC Received for view ! {}", *cert.view_number); let view = cert.view_number; self.quorum_network diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 4ee57204db..4cc6f1ecbb 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -219,20 +219,26 @@ async fn test_consensus_with_vid_vote() { input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); + // Sishan TODO: this proposal_view2's justify_qc does not have correct view number let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; + + // For the test of vote logic with vid + // Sishan TODO: Still need a valid DAC cert + input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); + // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader input.push(HotShotEvent::QuorumProposalRecv( proposal_view2.clone(), public_key_view2, )); - // followings are for the test of vote logic with vid - input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); output.insert( HotShotEvent::QuorumProposalRecv(proposal_view2.clone(), public_key_view2), 1, ); output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); + + // Sishan TODO: Uncomment this after the above TODO is done // if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { // output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); // } From de77a19c5292e54440312288ccefc939ba91c302 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 6 Dec 2023 22:12:28 -0800 Subject: [PATCH 0503/1393] lint --- testing/tests/consensus_task.rs | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 4cc6f1ecbb..6326ac743a 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -109,7 +109,7 @@ async fn test_consensus_task() { proposal.clone(), public_key, )); - + input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::QCFormed(either::Left(qc)), 1); @@ -121,10 +121,7 @@ async fn test_consensus_task() { HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), 1, ); - output.insert( - HotShotEvent::ViewChange(ViewNumber::new(1)), - 1, - ); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal.data).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); @@ -149,13 +146,12 @@ async fn test_consensus_task() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_with_vid_vote() { use hotshot_task_impls::harness::run_harness; - use hotshot_testing::task_helpers::build_system_handle; use hotshot_testing::block_types::TestTransaction; + use hotshot_testing::task_helpers::build_system_handle; use hotshot_testing::task_helpers::vid_init; use hotshot_types::data::VidSchemeTrait; use hotshot_types::{ - data::VidDisperse, message::Proposal, - traits::node_implementation::NodeType, + data::VidDisperse, message::Proposal, traits::node_implementation::NodeType, }; use std::marker::PhantomData; @@ -216,12 +212,10 @@ async fn test_consensus_with_vid_vote() { output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); } - - input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); // Sishan TODO: this proposal_view2's justify_qc does not have correct view number let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; - + // For the test of vote logic with vid // Sishan TODO: Still need a valid DAC cert input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); @@ -244,11 +238,11 @@ async fn test_consensus_with_vid_vote() { // } output.insert( - HotShotEvent::ViewChange(ViewNumber::new(1)), + HotShotEvent::ViewChange(ViewNumber::new(1)), 2, // 2 occurrences: 1 from `QuorumProposalRecv`, 1 from input ); output.insert( - HotShotEvent::ViewChange(ViewNumber::new(2)), + HotShotEvent::ViewChange(ViewNumber::new(2)), 2, // 2 occurrences: 1 from `QuorumProposalRecv`?, 1 from input ); @@ -292,7 +286,6 @@ async fn test_consensus_no_vote_without_vid_share() { public_key_view1, )); - output.insert( HotShotEvent::QuorumProposalRecv(proposal_view1.clone(), public_key_view1), 1, @@ -319,11 +312,11 @@ async fn test_consensus_no_vote_without_vid_share() { ); output.insert( - HotShotEvent::ViewChange(ViewNumber::new(1)), + HotShotEvent::ViewChange(ViewNumber::new(1)), 2, // 2 occurrences: 1 from `QuorumProposalRecv`, 1 from input ); output.insert( - HotShotEvent::ViewChange(ViewNumber::new(2)), + HotShotEvent::ViewChange(ViewNumber::new(2)), 2, // 2 occurrences: 1 from `QuorumProposalRecv`?, 1 from input ); @@ -386,4 +379,3 @@ async fn test_consensus_vote() { run_harness(input, output, None, build_fn).await; } - From d20fef72d991ec961a1967ee301ed35a5724215e Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 7 Dec 2023 12:33:41 -0800 Subject: [PATCH 0504/1393] type --- task-impls/src/consensus.rs | 2 +- testing/tests/consensus_task.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 5c97bb538f..d8c3512c09 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -414,7 +414,7 @@ impl, A: ConsensusApi + match event { HotShotEvent::QuorumProposalRecv(proposal, sender) => { debug!( - "Receved Quorum Propsoal for view {}", + "Received Quorum Propsoal for view {}", *proposal.data.view_number ); diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 6326ac743a..e7faebe023 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -158,7 +158,7 @@ async fn test_consensus_with_vid_vote() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let (handle, _event_stream) = build_system_handle(2).await; // We assign node's key pair rather than read from config file since it's a test let (private_key_view1, public_key_view1) = key_pair_for_id(1); // In view 2, node 2 is the leader. From 80cb4d36ea737104f702a109447f6e4ddace704b Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 7 Dec 2023 17:03:34 -0800 Subject: [PATCH 0505/1393] assign proposal_view2.justify_qc to bypass check in the test --- testing/tests/consensus_task.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index e7faebe023..c58c595063 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -212,11 +212,11 @@ async fn test_consensus_with_vid_vote() { output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); } - input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - // Sishan TODO: this proposal_view2's justify_qc does not have correct view number - let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; - // For the test of vote logic with vid + input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); + let mut proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; + // This proposal_view2's justify_qc does not have correct view number, therefore we assign one + proposal_view2.data.justify_qc.view_number = ViewNumber::new(2); // Sishan TODO: Still need a valid DAC cert input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); From 18a9631d105957114b0be87f2db4bb9d2ea67904 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 8 Dec 2023 08:42:34 -0500 Subject: [PATCH 0506/1393] fix orchestrator validator args --- hotshot/examples/combined/multi-validator.rs | 13 ++++--------- hotshot/examples/libp2p/multi-validator.rs | 12 +++--------- hotshot/examples/webserver/multi-validator.rs | 12 +++--------- orchestrator/src/client.rs | 8 +++----- 4 files changed, 13 insertions(+), 32 deletions(-) diff --git a/hotshot/examples/combined/multi-validator.rs b/hotshot/examples/combined/multi-validator.rs index ff9b5507bd..d399c575ee 100644 --- a/hotshot/examples/combined/multi-validator.rs +++ b/hotshot/examples/combined/multi-validator.rs @@ -6,6 +6,7 @@ use clap::Parser; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_testing::state_types::TestTypes; use std::net::IpAddr; +use surf_disco::Url; use tracing::instrument; use types::VIDNetwork; @@ -21,9 +22,7 @@ struct MultiValidatorArgs { /// Number of validators to run pub num_nodes: u16, /// The address the orchestrator runs on - pub url: String, - /// The port the orchestrator runs on - pub port: u16, + pub url: Url, /// This node's public IP address, for libp2p /// If no IP address is passed in, it will default to 127.0.0.1 pub public_ip: Option, @@ -39,11 +38,7 @@ async fn main() { setup_logging(); setup_backtrace(); let args = MultiValidatorArgs::parse(); - tracing::error!( - "connecting to orchestrator at {:?}:{:?}", - args.url, - args.port - ); + tracing::error!("connecting to orchestrator at {:?}:{:?}", args.url,); let mut nodes = Vec::new(); for _ in 0..args.num_nodes { let url: String = args.url.clone(); @@ -59,7 +54,7 @@ async fn main() { ThisRun, >(ValidatorArgs { url, - port: args.port, + public_ip: args.public_ip, }) .await diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index ff9b5507bd..c5875c5dc6 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -6,6 +6,7 @@ use clap::Parser; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_testing::state_types::TestTypes; use std::net::IpAddr; +use surf_disco::Url; use tracing::instrument; use types::VIDNetwork; @@ -21,9 +22,7 @@ struct MultiValidatorArgs { /// Number of validators to run pub num_nodes: u16, /// The address the orchestrator runs on - pub url: String, - /// The port the orchestrator runs on - pub port: u16, + pub url: Url, /// This node's public IP address, for libp2p /// If no IP address is passed in, it will default to 127.0.0.1 pub public_ip: Option, @@ -39,11 +38,7 @@ async fn main() { setup_logging(); setup_backtrace(); let args = MultiValidatorArgs::parse(); - tracing::error!( - "connecting to orchestrator at {:?}:{:?}", - args.url, - args.port - ); + tracing::error!("connecting to orchestrator at {:?}:{:?}", args.url,); let mut nodes = Vec::new(); for _ in 0..args.num_nodes { let url: String = args.url.clone(); @@ -59,7 +54,6 @@ async fn main() { ThisRun, >(ValidatorArgs { url, - port: args.port, public_ip: args.public_ip, }) .await diff --git a/hotshot/examples/webserver/multi-validator.rs b/hotshot/examples/webserver/multi-validator.rs index c9bbebceb6..85ba9a9abe 100644 --- a/hotshot/examples/webserver/multi-validator.rs +++ b/hotshot/examples/webserver/multi-validator.rs @@ -6,6 +6,7 @@ use clap::Parser; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_testing::state_types::TestTypes; use std::net::IpAddr; +use surf_disco::Url; use tracing::instrument; use types::VIDNetwork; @@ -21,9 +22,7 @@ struct MultiValidatorArgs { /// Number of validators to run pub num_nodes: u16, /// The address the orchestrator runs on - pub url: String, - /// The port the orchestrator runs on - pub port: u16, + pub url: Url, /// This node's public IP address, for libp2p /// If no IP address is passed in, it will default to 127.0.0.1 pub public_ip: Option, @@ -39,11 +38,7 @@ async fn main() { setup_logging(); setup_backtrace(); let args = MultiValidatorArgs::parse(); - tracing::error!( - "connecting to orchestrator at {:?}:{:?}", - args.url, - args.port - ); + tracing::error!("connecting to orchestrator at {:?}", args.url,); let mut nodes = Vec::new(); for _ in 0..args.num_nodes { let url = args.url.clone(); @@ -58,7 +53,6 @@ async fn main() { ThisRun, >(ValidatorArgs { url, - port: args.port, public_ip: args.public_ip, }) .await diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 6db01f7c9c..ea3b986228 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -7,6 +7,7 @@ use futures::{Future, FutureExt}; use hotshot_types::traits::node_implementation::NodeType; use surf_disco::{error::ClientError, Client}; +use tide_disco::Url; /// Holds the client connection to the orchestrator pub struct OrchestratorClient { @@ -23,9 +24,7 @@ pub struct OrchestratorClient { /// Arguments passed to the validator pub struct ValidatorArgs { /// The address the orchestrator runs on - pub url: String, - /// The port the orchestrator runs on - pub port: u16, + pub url: Url, /// This node's public IP address, for libp2p /// If no IP address is passed in, it will default to 127.0.0.1 pub public_ip: Option, @@ -34,8 +33,7 @@ pub struct ValidatorArgs { impl OrchestratorClient { /// Creates the client that connects to the orchestrator pub async fn connect_to_orchestrator(args: ValidatorArgs) -> Self { - let base_url = format!("{0}:{1}", args.url, args.port).parse().unwrap(); - let client = surf_disco::Client::::new(base_url); + let client = surf_disco::Client::::new(args.url); // TODO ED: Add healthcheck wait here OrchestratorClient { client } } From e31333d74a64cd14c5bf67d428c04ad9bfc527fd Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 8 Dec 2023 08:54:56 -0500 Subject: [PATCH 0507/1393] Update crates/hotshot/examples/combined/multi-validator.rs Co-authored-by: Mathis --- hotshot/examples/combined/multi-validator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/examples/combined/multi-validator.rs b/hotshot/examples/combined/multi-validator.rs index d399c575ee..9ef67056f5 100644 --- a/hotshot/examples/combined/multi-validator.rs +++ b/hotshot/examples/combined/multi-validator.rs @@ -38,7 +38,7 @@ async fn main() { setup_logging(); setup_backtrace(); let args = MultiValidatorArgs::parse(); - tracing::error!("connecting to orchestrator at {:?}:{:?}", args.url,); + tracing::error!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for _ in 0..args.num_nodes { let url: String = args.url.clone(); From be1c256e2fe2a7d2d090183b7af3d8959c34a01e Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 8 Dec 2023 08:57:56 -0500 Subject: [PATCH 0508/1393] update error logging --- hotshot/examples/combined/validator.rs | 5 +---- hotshot/examples/libp2p/multi-validator.rs | 2 +- hotshot/examples/libp2p/validator.rs | 5 +---- hotshot/examples/webserver/multi-validator.rs | 2 +- hotshot/examples/webserver/validator.rs | 5 +---- 5 files changed, 5 insertions(+), 14 deletions(-) diff --git a/hotshot/examples/combined/validator.rs b/hotshot/examples/combined/validator.rs index 52cbf7bcb1..9b0babfd16 100644 --- a/hotshot/examples/combined/validator.rs +++ b/hotshot/examples/combined/validator.rs @@ -23,10 +23,7 @@ async fn main() { setup_logging(); setup_backtrace(); let args = ValidatorArgs::parse(); - info!( - "connecting to orchestrator at {:?}:{:?}", - args.url, args.port - ); + info!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::< TestTypes, DANetwork, diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index c5875c5dc6..bf0fbe95a8 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -38,7 +38,7 @@ async fn main() { setup_logging(); setup_backtrace(); let args = MultiValidatorArgs::parse(); - tracing::error!("connecting to orchestrator at {:?}:{:?}", args.url,); + tracing::error!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for _ in 0..args.num_nodes { let url: String = args.url.clone(); diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index 52cbf7bcb1..9b0babfd16 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -23,10 +23,7 @@ async fn main() { setup_logging(); setup_backtrace(); let args = ValidatorArgs::parse(); - info!( - "connecting to orchestrator at {:?}:{:?}", - args.url, args.port - ); + info!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::< TestTypes, DANetwork, diff --git a/hotshot/examples/webserver/multi-validator.rs b/hotshot/examples/webserver/multi-validator.rs index 85ba9a9abe..1e20151a6d 100644 --- a/hotshot/examples/webserver/multi-validator.rs +++ b/hotshot/examples/webserver/multi-validator.rs @@ -38,7 +38,7 @@ async fn main() { setup_logging(); setup_backtrace(); let args = MultiValidatorArgs::parse(); - tracing::error!("connecting to orchestrator at {:?}", args.url,); + tracing::error!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for _ in 0..args.num_nodes { let url = args.url.clone(); diff --git a/hotshot/examples/webserver/validator.rs b/hotshot/examples/webserver/validator.rs index 52cbf7bcb1..9b0babfd16 100644 --- a/hotshot/examples/webserver/validator.rs +++ b/hotshot/examples/webserver/validator.rs @@ -23,10 +23,7 @@ async fn main() { setup_logging(); setup_backtrace(); let args = ValidatorArgs::parse(); - info!( - "connecting to orchestrator at {:?}:{:?}", - args.url, args.port - ); + info!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::< TestTypes, DANetwork, From 04759ec8c4afc0c14411adf458d09c6e7b669f24 Mon Sep 17 00:00:00 2001 From: Rob Date: Fri, 8 Dec 2023 09:06:58 -0500 Subject: [PATCH 0509/1393] update 'all' examples --- hotshot/examples/combined/all.rs | 8 +++++--- hotshot/examples/libp2p/all.rs | 8 ++++---- hotshot/examples/webserver/all.rs | 8 +++++--- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/hotshot/examples/combined/all.rs b/hotshot/examples/combined/all.rs index b55d0d9374..3324c583cd 100644 --- a/hotshot/examples/combined/all.rs +++ b/hotshot/examples/combined/all.rs @@ -43,6 +43,8 @@ async fn main() { let _sender = Arc::new(server_shutdown_sender_cdn); let _sender = Arc::new(server_shutdown_sender_da); + let orchestrator_url = Url::parse("http://localhost:4444").unwrap(); + async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, @@ -77,7 +79,7 @@ async fn main() { VIDNetwork, NodeImpl, >(OrchestratorArgs { - url: Url::parse("http://localhost:4444").unwrap(), + url: orchestrator_url.clone(), config_file: args.config_file.clone(), })); @@ -89,6 +91,7 @@ async fn main() { > = load_config_from_file::(args.config_file); let mut nodes = Vec::new(); for _ in 0..config.config.total_nodes.into() { + let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { infra::main_entry_point::< TestTypes, @@ -99,8 +102,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs { - url: "http://localhost".to_string(), - port: 4444, + url: orchestrator_url, public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), }) .await diff --git a/hotshot/examples/libp2p/all.rs b/hotshot/examples/libp2p/all.rs index e375269dd2..f806cf3ee1 100644 --- a/hotshot/examples/libp2p/all.rs +++ b/hotshot/examples/libp2p/all.rs @@ -34,6 +34,7 @@ async fn main() { // use configfile args let args = ConfigArgs::parse(); + let orchestrator_url = Url::parse("http://localhost:4444").unwrap(); // orchestrator async_spawn(run_orchestrator::< @@ -44,8 +45,7 @@ async fn main() { VIDNetwork, NodeImpl, >(OrchestratorArgs { - url: Url::parse("http://localhost:4444").unwrap(), - + url: orchestrator_url.clone(), config_file: args.config_file.clone(), })); @@ -56,6 +56,7 @@ async fn main() { > = load_config_from_file::(args.config_file); let mut nodes = Vec::new(); for _ in 0..config.config.total_nodes.into() { + let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { infra::main_entry_point::< TestTypes, @@ -66,8 +67,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs { - url: "http://localhost".to_string(), - port: 4444, + url: orchestrator_url, public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), }) .await diff --git a/hotshot/examples/webserver/all.rs b/hotshot/examples/webserver/all.rs index 93cf45d87b..c52f7512d3 100644 --- a/hotshot/examples/webserver/all.rs +++ b/hotshot/examples/webserver/all.rs @@ -60,6 +60,8 @@ async fn main() { } }); + let orchestrator_url = Url::parse("http://localhost:4444").unwrap(); + // web server orchestrator async_spawn(run_orchestrator::< TestTypes, @@ -69,7 +71,7 @@ async fn main() { VIDNetwork, NodeImpl, >(OrchestratorArgs { - url: Url::parse("http://localhost:4444").unwrap(), + url: orchestrator_url.clone(), config_file: args.config_file.clone(), })); @@ -80,6 +82,7 @@ async fn main() { > = load_config_from_file::(args.config_file); let mut nodes = Vec::new(); for _ in 0..(config.config.total_nodes.get()) { + let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { infra::main_entry_point::< TestTypes, @@ -90,8 +93,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs { - url: "http://localhost".to_string(), - port: 4444, + url: orchestrator_url, public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), }) .await From 3f352f5e23121210fe5a53bb837a06ba4682708e Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 8 Dec 2023 11:53:22 -0800 Subject: [PATCH 0510/1393] oh no assigning manually cannot create justify_qc --- testing/tests/consensus_task.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index c58c595063..dadda5fb80 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -153,6 +153,7 @@ async fn test_consensus_with_vid_vote() { use hotshot_types::{ data::VidDisperse, message::Proposal, traits::node_implementation::NodeType, }; + use tracing::error; use std::marker::PhantomData; async_compatibility_layer::logging::setup_logging(); @@ -214,9 +215,8 @@ async fn test_consensus_with_vid_vote() { // For the test of vote logic with vid input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - let mut proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; - // This proposal_view2's justify_qc does not have correct view number, therefore we assign one - proposal_view2.data.justify_qc.view_number = ViewNumber::new(2); + // Sishan TOOD: this proposal on view 2 doesn't have a valid justify QC + let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; // Sishan TODO: Still need a valid DAC cert input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); From f76f42a9698f93f009264c1ec180a112ba090a0f Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 8 Dec 2023 16:52:27 -0500 Subject: [PATCH 0511/1393] VID share distribution algorithm (2) (#2163) * original commit * second set of changes * increase test duration * Merge develop into rm/vid-share-distribution-algo-take2 * build, lint, etc examples by default * remove quorum membership from DA * remove commitment calculation from payload * remove quorum membership OTW * merge --- hotshot/Cargo.toml | 14 ------ hotshot/examples/combined/multi-validator.rs | 2 +- hotshot/examples/infra/mod.rs | 4 +- hotshot/examples/libp2p/multi-validator.rs | 2 +- hotshot/src/tasks/mod.rs | 12 ++++- hotshot/src/traits/storage/memory_storage.rs | 7 ++- task-impls/src/consensus.rs | 8 +--- task-impls/src/da.rs | 10 +++- task-impls/src/events.rs | 2 +- task-impls/src/network.rs | 6 +-- task-impls/src/vid.rs | 31 ++++++------ testing/src/block_types.rs | 17 ++----- testing/src/task_helpers.rs | 30 ++++++++++-- testing/src/test_builder.rs | 2 +- testing/tests/da_task.rs | 12 ++++- testing/tests/network_task.rs | 17 ++++--- testing/tests/vid_task.rs | 18 ++++--- types/src/data.rs | 50 +++++++++++++++++--- types/src/traits/block_contents.rs | 24 +++++++--- 19 files changed, 172 insertions(+), 96 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 612872f976..49af631ee4 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -21,74 +21,60 @@ hotshot-testing = [] # libp2p [[example]] name = "validator-libp2p" -required-features = ["libp2p/rsa"] path = "examples/libp2p/validator.rs" [[example]] name = "multi-validator-libp2p" -required-features = ["libp2p/rsa"] path = "examples/libp2p/multi-validator.rs" [[example]] name = "orchestrator-libp2p" -required-features = ["libp2p/rsa"] path = "examples/libp2p/orchestrator.rs" [[example]] name = "all-libp2p" -required-features = ["libp2p/rsa"] path = "examples/libp2p/all.rs" # webserver [[example]] name = "webserver" -required-features = ["libp2p/rsa"] path = "examples/webserver/webserver.rs" [[example]] name = "orchestrator-webserver" -required-features = ["libp2p/rsa"] path = "examples/webserver/orchestrator.rs" [[example]] name = "validator-webserver" -required-features = ["libp2p/rsa"] path = "examples/webserver/validator.rs" [[example]] name = "multi-validator-webserver" -required-features = ["libp2p/rsa"] path = "examples/webserver/multi-validator.rs" [[example]] name = "multi-webserver" -required-features = ["libp2p/rsa"] path = "examples/webserver/multi-webserver.rs" [[example]] name = "all-webserver" -required-features = ["libp2p/rsa"] path = "examples/webserver/all.rs" # combined [[example]] name = "all-combined" -required-features = ["libp2p/rsa"] path = "examples/combined/all.rs" [[example]] name = "multi-validator-combined" -required-features = ["libp2p/rsa"] path = "examples/combined/multi-validator.rs" [[example]] name = "validator-combined" -required-features = ["libp2p/rsa"] path = "examples/combined/validator.rs" [[example]] name = "orchestrator-combined" -required-features = ["libp2p/rsa"] path = "examples/combined/orchestrator.rs" [dependencies] diff --git a/hotshot/examples/combined/multi-validator.rs b/hotshot/examples/combined/multi-validator.rs index 9ef67056f5..5b550caf22 100644 --- a/hotshot/examples/combined/multi-validator.rs +++ b/hotshot/examples/combined/multi-validator.rs @@ -41,7 +41,7 @@ async fn main() { tracing::error!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for _ in 0..args.num_nodes { - let url: String = args.url.clone(); + let url = args.url.clone(); let node = async_spawn(async move { infra::main_entry_point::< diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 26d919fc0c..9243eb9cef 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -153,9 +153,9 @@ async fn webserver_network_from_config( let WebServerConfig { url, wait_between_polls, - }: WebServerConfig = config.clone().web_server_config.unwrap(); + }: WebServerConfig = config.web_server_config.unwrap(); - WebServerNetwork::create(url, wait_between_polls, pub_key.clone(), false) + WebServerNetwork::create(url, wait_between_polls, pub_key, false) } async fn libp2p_network_from_config( diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index bf0fbe95a8..9fb1199b88 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -41,7 +41,7 @@ async fn main() { tracing::error!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for _ in 0..args.num_nodes { - let url: String = args.url.clone(); + let url = args.url.clone(); let node = async_spawn(async move { infra::main_entry_point::< diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 50cd306e86..95679ab3d5 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -23,6 +23,7 @@ use hotshot_task_impls::{ vid::{VIDTaskState, VIDTaskTypes}, view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; +use hotshot_types::traits::election::Membership; use hotshot_types::{ event::Event, message::Messages, @@ -206,7 +207,15 @@ pub async fn add_consensus_task>( let registry = task_runner.registry.clone(); let (payload, metadata) = ::genesis(); // Impossible for `unwrap` to fail on the genesis payload. - let payload_commitment = vid_commitment(&payload.encode().unwrap().collect()); + let payload_commitment = vid_commitment( + &payload.encode().unwrap().collect(), + handle + .hotshot + .inner + .memberships + .quorum_membership + .total_nodes(), + ); // build the consensus task let consensus_state = ConsensusTaskState { registry: registry.clone(), @@ -353,6 +362,7 @@ pub async fn add_da_task>( consensus: handle.hotshot.get_consensus(), da_membership: c_api.inner.memberships.da_membership.clone().into(), da_network: c_api.inner.networks.da_network.clone().into(), + quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), cur_view: TYPES::Time::new(0), vote_collector: None, event_stream: event_stream.clone(), diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 5b7ddfd25e..94ffdef83d 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -117,7 +117,10 @@ mod test { use hotshot_types::{ data::{fake_commitment, genesis_proposer_id, Leaf}, simple_certificate::QuorumCertificate, - traits::{node_implementation::NodeType, state::ConsensusTime}, + traits::{ + block_contents::genesis_vid_commitment, node_implementation::NodeType, + state::ConsensusTime, + }, }; use std::marker::PhantomData; use tracing::instrument; @@ -126,7 +129,7 @@ mod test { let payload = TestBlockPayload::genesis(); let header = TestBlockHeader { block_number: 0, - payload_commitment: payload.payload_commitment, + payload_commitment: genesis_vid_commitment(), }; let dummy_leaf_commit = fake_commitment::>(); let data = hotshot_types::simple_vote::QuorumData { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e90ffac24f..04e13f9c65 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -618,12 +618,8 @@ impl, A: ConsensusApi + encoded_txns.clone().into_iter(), leaf.get_block_header().metadata(), ); - if let Err(e) = leaf.fill_block_payload(payload) { - error!( - "Saved block payload and commitment don't match: {:?}", - e - ); - } + + leaf.fill_block_payload_unchecked(payload); } leaf_views.push(leaf.clone()); diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index dce9a470c2..cbf438484b 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -57,6 +57,11 @@ pub struct DATaskState< /// Membership for the DA committee pub da_membership: Arc, + /// Membership for the quorum committee + /// We need this only for calculating the proper VID scheme + /// from the number of nodes in the quorum. + pub quorum_membership: Arc, + /// Network for DA pub da_network: Arc, @@ -113,7 +118,10 @@ impl, A: ConsensusApi + return None; } - let payload_commitment = vid_commitment(&proposal.data.encoded_transactions); + let payload_commitment = vid_commitment( + &proposal.data.encoded_transactions, + self.quorum_membership.total_nodes(), + ); let encoded_transactions_hash = Sha256::digest(&proposal.data.encoded_transactions); // ED Is this the right leader? diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 08d61bbe76..9409b34067 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -92,7 +92,7 @@ pub enum HotShotEvent { VidCommitment, ::Metadata, ), - /// Event when the transactions task has sequenced transactions. Contains the encoded transactions + /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number TransactionsSequenced( Vec, ::Metadata, diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a88da8b3ac..cdef8af4d2 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -56,7 +56,7 @@ impl NetworkMessageTaskState { let event = match consensus_message.0 { Either::Left(general_message) => match general_message { GeneralConsensusMessage::Proposal(proposal) => { - HotShotEvent::QuorumProposalRecv(proposal.clone(), sender) + HotShotEvent::QuorumProposalRecv(proposal, sender) } GeneralConsensusMessage::Vote(vote) => { HotShotEvent::QuorumVoteRecv(vote.clone()) @@ -92,7 +92,7 @@ impl NetworkMessageTaskState { }, Either::Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(proposal) => { - HotShotEvent::DAProposalRecv(proposal.clone(), sender) + HotShotEvent::DAProposalRecv(proposal, sender) } CommitteeConsensusMessage::DAVote(vote) => { HotShotEvent::DAVoteRecv(vote.clone()) @@ -236,7 +236,7 @@ impl> HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate.clone()), + GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate), ))), TransmitType::Broadcast, None, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 9118c9082a..8e72414743 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -20,10 +20,7 @@ use hotshot_types::{ }; use hotshot_types::{ data::{test_srs, VidScheme, VidSchemeTrait}, - traits::{ - block_contents::{NUM_CHUNKS, NUM_STORAGE_NODES}, - network::ConsensusIntentEvent, - }, + traits::network::ConsensusIntentEvent, }; use hotshot_task::event_stream::EventStream; @@ -54,7 +51,7 @@ pub struct VIDTaskState< pub consensus: Arc>>, /// Network for all nodes pub network: Arc, - /// Membership for teh quorum + /// Membership for the quorum pub membership: Arc, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -81,12 +78,19 @@ impl, A: ConsensusApi + ) -> Option { match event { HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view_number) => { + // get the number of quorum committee members to be used for VID calculation + let num_quorum_committee = self.membership.total_nodes(); + // TODO - let srs = test_srs(NUM_STORAGE_NODES); - // TODO We are using constant numbers for now, but they will change as the quorum size - // changes. - // TODO - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, &srs).unwrap(); + let srs = test_srs(num_quorum_committee); + + // calculate the last power of two + // TODO change after https://github.com/EspressoSystems/jellyfish/issues/339 + // issue: https://github.com/EspressoSystems/HotShot/issues/2152 + let chunk_size = 1 << num_quorum_committee.ilog2(); + + // calculate vid shares + let vid = VidScheme::new(chunk_size, num_quorum_committee, &srs).unwrap(); let vid_disperse = vid.disperse(encoded_transactions.clone()).unwrap(); // send the commitment and metadata to consensus for block building @@ -100,12 +104,7 @@ impl, A: ConsensusApi + // send the block to the VID dispersal function self.event_stream .publish(HotShotEvent::BlockReady( - VidDisperse { - view_number, - payload_commitment: vid_disperse.commit, - shares: vid_disperse.shares, - common: vid_disperse.common, - }, + VidDisperse::from_membership(view_number, vid_disperse, &self.membership), view_number, )) .await; diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index a49e93c5ed..39fa591795 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -5,9 +5,9 @@ use std::{ use commit::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ - data::{BlockError, VidCommitment, VidScheme, VidSchemeTrait}, + data::{BlockError, VidCommitment}, traits::{ - block_contents::{vid_commitment, BlockHeader, Transaction}, + block_contents::{genesis_vid_commitment, BlockHeader, Transaction}, state::TestableBlock, BlockPayload, }, @@ -67,8 +67,6 @@ impl Transaction for TestTransaction {} pub struct TestBlockPayload { /// List of transactions. pub transactions: Vec, - /// VID commitment to the block payload. - pub payload_commitment: ::Commit, } impl TestBlockPayload { @@ -80,10 +78,8 @@ impl TestBlockPayload { pub fn genesis() -> Self { let txns: Vec = vec![0]; // It's impossible for `encode` to fail because the transaciton length is very small. - let encoded = TestTransaction::encode(vec![TestTransaction(txns.clone())]).unwrap(); TestBlockPayload { transactions: vec![TestTransaction(txns)], - payload_commitment: vid_commitment(&encoded), } } } @@ -114,11 +110,9 @@ impl BlockPayload for TestBlockPayload { transactions: impl IntoIterator, ) -> Result<(Self, Self::Metadata), Self::Error> { let txns_vec: Vec = transactions.into_iter().collect(); - let encoded = TestTransaction::encode(txns_vec.clone())?; Ok(( Self { transactions: txns_vec, - payload_commitment: vid_commitment(&encoded), }, (), )) @@ -146,10 +140,7 @@ impl BlockPayload for TestBlockPayload { current_index = next_index; } - Self { - transactions, - payload_commitment: vid_commitment(&encoded_vec), - } + Self { transactions } } fn genesis() -> (Self, Self::Metadata) { @@ -200,7 +191,7 @@ impl BlockHeader for TestBlockHeader { ( Self { block_number: 0, - payload_commitment: payload.payload_commitment, + payload_commitment: genesis_vid_commitment(), }, payload, metadata, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index e4a66fdcc1..dc7a3b43d0 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -18,8 +18,8 @@ use hotshot_types::{ message::Proposal, simple_certificate::QuorumCertificate, traits::{ + block_contents::vid_commitment, block_contents::BlockHeader, - block_contents::{vid_commitment, NUM_CHUNKS, NUM_STORAGE_NODES}, consensus_api::ConsensusSharedApi, election::Membership, node_implementation::NodeType, @@ -126,7 +126,15 @@ async fn build_quorum_proposal_and_signature( // every event input is seen on the event stream in the output. let block = ::genesis(); - let payload_commitment = vid_commitment(&block.encode().unwrap().collect()); + let payload_commitment = vid_commitment( + &block.encode().unwrap().collect(), + handle + .hotshot + .inner + .memberships + .quorum_membership + .total_nodes(), + ); let block_header = TestBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); let leaf = Leaf { view_number: ViewNumber::new(view), @@ -171,7 +179,19 @@ pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey (private_key, public_key) } -pub fn vid_init() -> VidScheme { - let srs = hotshot_types::data::test_srs(NUM_STORAGE_NODES); - VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, srs).unwrap() +pub fn vid_init( + membership: TYPES::Membership, + view_number: TYPES::Time, +) -> VidScheme { + let num_committee = membership.get_committee(view_number).len(); + + // calculate the last power of two + // TODO change after https://github.com/EspressoSystems/jellyfish/issues/339 + // issue: https://github.com/EspressoSystems/HotShot/issues/2152 + let chunk_size = 1 << num_committee.ilog2(); + + // TODO + let srs = hotshot_types::data::test_srs(num_committee); + + VidScheme::new(chunk_size, num_committee, srs).unwrap() } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index d3734bfcb3..4e59dc75d1 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -137,7 +137,7 @@ impl TestMetadata { completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // Increase the duration to get the expected number of successful views. - duration: Duration::new(300, 0), + duration: Duration::new(340, 0), }, ), overall_safety_properties: OverallSafetyPropertiesDescription { diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 81e01425eb..4e786713fd 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -8,7 +8,7 @@ use hotshot_types::{ data::{DAProposal, ViewNumber}, simple_vote::{DAData, DAVote}, traits::{ - block_contents::vid_commitment, consensus_api::ConsensusSharedApi, + block_contents::vid_commitment, consensus_api::ConsensusSharedApi, election::Membership, node_implementation::NodeType, state::ConsensusTime, }, }; @@ -37,7 +37,15 @@ async fn test_da_task() { let pub_key = *api.public_key(); let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); - let payload_commitment = vid_commitment(&encoded_transactions); + let payload_commitment = vid_commitment( + &encoded_transactions, + handle + .hotshot + .inner + .memberships + .quorum_membership + .total_nodes(), + ); let encoded_transactions_hash = Sha256::digest(&encoded_transactions); let signature = diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 02188d0bea..4252ffb770 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -32,7 +32,10 @@ async fn test_network_task() { }; let pub_key = *api.public_key(); let priv_key = api.private_key(); - let vid = vid_init(); + // quorum membership for VID share distribution + let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); + + let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(0)); let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); @@ -53,12 +56,12 @@ async fn test_network_task() { }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; - let da_vid_disperse_inner = VidDisperse { - view_number: da_proposal.data.view_number, - payload_commitment, - shares: vid_disperse.shares, - common: vid_disperse.common, - }; + let da_vid_disperse_inner = VidDisperse::from_membership( + da_proposal.data.view_number, + vid_disperse, + &quorum_membership.clone().into(), + ); + // TODO for now reuse the same block payload commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 let da_vid_disperse = Proposal { diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 1c224db08d..8757a3d414 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -33,7 +33,10 @@ async fn test_vid_task() { }; let pub_key = *api.public_key(); - let vid = vid_init(); + // quorum membership for VID share distribution + let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); + + let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(0)); let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); @@ -51,12 +54,13 @@ async fn test_vid_task() { signature, _pd: PhantomData, }; - let vid_disperse = VidDisperse { - view_number: message.data.view_number, - payload_commitment, - shares: vid_disperse.shares, - common: vid_disperse.common, - }; + + let vid_disperse = VidDisperse::from_membership( + message.data.view_number, + vid_disperse, + &quorum_membership.clone().into(), + ); + let vid_proposal = Proposal { data: vid_disperse.clone(), signature: message.signature.clone(), diff --git a/types/src/data.rs b/types/src/data.rs index 71a6170df8..fc8797f0db 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -8,6 +8,7 @@ use crate::{ traits::{ block_contents::vid_commitment, block_contents::BlockHeader, + election::Membership, node_implementation::NodeType, signature_key::{EncodedPublicKey, SignatureKey}, state::{ConsensusTime, TestableBlock, TestableState}, @@ -23,14 +24,18 @@ use commit::{Commitment, Committable, RawCommitmentBuilder}; use derivative::Derivative; use hotshot_constants::GENESIS_PROPOSER_ID; use hotshot_utils::bincode::bincode_opts; -// use jf_primitives::pcs::prelude::Commitment; -use jf_primitives::pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}; +use jf_primitives::{ + pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}, + vid::VidDisperse as JfVidDisperse, +}; use rand::Rng; use serde::{Deserialize, Serialize}; use snafu::Snafu; use std::{ + collections::BTreeMap, fmt::{Debug, Display}, hash::Hash, + sync::Arc, }; /// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. @@ -138,12 +143,36 @@ pub struct VidDisperse { pub view_number: TYPES::Time, /// Block payload commitment pub payload_commitment: VidCommitment, - /// VID shares dispersed among storage nodes - pub shares: Vec<::Share>, + /// A storage node's key and its corresponding VID share + pub shares: BTreeMap::Share>, /// VID common data sent to all storage nodes pub common: ::Common, } +impl VidDisperse { + /// Create VID dispersal from a specified membership + /// Uses the specified function to calculate share dispersal + /// Allows for more complex stake table functionality + pub fn from_membership( + view_number: TYPES::Time, + mut vid_disperse: JfVidDisperse, + membership: &Arc, + ) -> Self { + let shares = membership + .get_committee(view_number) + .iter() + .map(|node| (node.clone(), vid_disperse.shares.remove(0))) + .collect(); + + Self { + view_number, + shares, + common: vid_disperse.common, + payload_commitment: vid_disperse.commit, + } + } +} + /// Trusted KZG setup for VID. /// /// TESTING ONLY: don't use this in production @@ -370,10 +399,12 @@ impl Leaf { /// /// # Errors /// - /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()`. + /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()` + /// or if the transactions are of invalid length pub fn fill_block_payload( &mut self, block_payload: TYPES::BlockPayload, + num_storage_nodes: usize, ) -> Result<(), BlockError> { let encoded_txns = match block_payload.encode() { // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. @@ -381,13 +412,20 @@ impl Leaf { Ok(encoded) => encoded.into_iter().collect(), Err(_) => return Err(BlockError::InvalidTransactionLength), }; - let commitment = vid_commitment(&encoded_txns); + let commitment = vid_commitment(&encoded_txns, num_storage_nodes); if commitment != self.block_header.payload_commitment() { return Err(BlockError::InconsistentPayloadCommitment); } self.block_payload = Some(block_payload); Ok(()) } + + /// Fill this leaf with the block payload, without checking + /// header and payload consistency + pub fn fill_block_payload_unchecked(&mut self, block_payload: TYPES::BlockPayload) { + self.block_payload = Some(block_payload); + } + /// Optional block payload. pub fn get_block_payload(&self) -> Option { self.block_payload.clone() diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 859bf3e173..c75315fe58 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -59,7 +59,8 @@ pub trait BlockPayload: transactions: impl IntoIterator, ) -> Result<(Self, Self::Metadata), Self::Error>; - /// Build a payload with the encoded transaction bytes and metadata. + /// Build a payload with the encoded transaction bytes, metadata, + /// and the associated number of VID storage nodes /// /// `I` may be, but not necessarily is, the `Encode` type directly from `fn encode`. fn from_bytes(encoded_transactions: I, metadata: Self::Metadata) -> Self @@ -83,16 +84,25 @@ pub trait BlockPayload: /// # Panics /// If the VID computation fails. #[must_use] -pub fn vid_commitment(encoded_transactions: &Vec) -> ::Commit { +pub fn vid_commitment( + encoded_transactions: &Vec, + num_storage_nodes: usize, +) -> ::Commit { + let num_chunks = 1 << num_storage_nodes.ilog2(); + // TODO - let srs = test_srs(NUM_STORAGE_NODES); - // TODO We are using constant numbers for now, but they will change as the quorum size - // changes. - // TODO - let vid = VidScheme::new(NUM_CHUNKS, NUM_STORAGE_NODES, srs).unwrap(); + let srs = test_srs(num_storage_nodes); + + let vid = VidScheme::new(num_chunks, num_storage_nodes, srs).unwrap(); vid.commit_only(encoded_transactions).unwrap() } +/// Computes the (empty) genesis VID commitment +#[must_use] +pub fn genesis_vid_commitment() -> ::Commit { + vid_commitment(&vec![], 8) +} + /// Header of a block, which commits to a [`BlockPayload`]. pub trait BlockHeader: Serialize + Clone + Debug + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned + Committable From bd808cd582fbd11b656f9cd60f8bcd3e8a32a026 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 8 Dec 2023 21:30:47 -0500 Subject: [PATCH 0512/1393] [Tech Debt] Allow nodes to rejoin by saving index and config (#2168) * allow nodes to rejoin by saving config file and index * refactor * improvements and comments * Merge in main * PR improvements --- hotshot/Cargo.toml | 2 +- hotshot/examples/combined/all.rs | 1 + hotshot/examples/combined/multi-validator.rs | 25 +-- hotshot/examples/infra/mod.rs | 46 +++-- hotshot/examples/libp2p/all.rs | 1 + hotshot/examples/libp2p/multi-validator.rs | 24 +-- hotshot/examples/webserver/all.rs | 1 + hotshot/examples/webserver/multi-validator.rs | 25 +-- orchestrator/Cargo.toml | 1 + orchestrator/src/client.rs | 98 ++++++++--- orchestrator/src/config.rs | 161 ++++++++++++++++++ 11 files changed, 275 insertions(+), 110 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 49af631ee4..74bd1eab9d 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -127,4 +127,4 @@ blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } serde_json = "1.0.108" toml = { workspace = true } -hotshot-testing = { path = "../testing" } +hotshot-testing = { path = "../testing" } \ No newline at end of file diff --git a/hotshot/examples/combined/all.rs b/hotshot/examples/combined/all.rs index 3324c583cd..5f1e5a0408 100644 --- a/hotshot/examples/combined/all.rs +++ b/hotshot/examples/combined/all.rs @@ -104,6 +104,7 @@ async fn main() { >(ValidatorArgs { url: orchestrator_url, public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + network_config_file: None, }) .await }); diff --git a/hotshot/examples/combined/multi-validator.rs b/hotshot/examples/combined/multi-validator.rs index 5b550caf22..79c05e5867 100644 --- a/hotshot/examples/combined/multi-validator.rs +++ b/hotshot/examples/combined/multi-validator.rs @@ -3,10 +3,8 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use hotshot_testing::state_types::TestTypes; -use std::net::IpAddr; -use surf_disco::Url; use tracing::instrument; use types::VIDNetwork; @@ -17,17 +15,6 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[derive(Parser, Debug, Clone)] -struct MultiValidatorArgs { - /// Number of validators to run - pub num_nodes: u16, - /// The address the orchestrator runs on - pub url: Url, - /// This node's public IP address, for libp2p - /// If no IP address is passed in, it will default to 127.0.0.1 - pub public_ip: Option, -} - #[cfg_attr( async_executor_impl = "tokio", tokio::main(flavor = "multi_thread", worker_threads = 2) @@ -40,8 +27,8 @@ async fn main() { let args = MultiValidatorArgs::parse(); tracing::error!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); - for _ in 0..args.num_nodes { - let url = args.url.clone(); + for node_index in 0..args.num_nodes { + let args = args.clone(); let node = async_spawn(async move { infra::main_entry_point::< @@ -52,11 +39,7 @@ async fn main() { VIDNetwork, NodeImpl, ThisRun, - >(ValidatorArgs { - url, - - public_ip: args.public_ip, - }) + >(ValidatorArgs::from_multi_args(args, node_index)) .await }); nodes.push(node); diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 9243eb9cef..a162d53ef3 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -16,6 +16,7 @@ use hotshot::{ types::{SignatureKey, SystemContextHandle}, HotShotType, Memberships, Networks, SystemContext, }; +use hotshot_orchestrator::config::NetworkConfigSource; use hotshot_orchestrator::{ self, client::{OrchestratorClient, ValidatorArgs}, @@ -825,34 +826,27 @@ pub async fn main_entry_point< error!("Starting validator"); - let orchestrator_client: OrchestratorClient = - OrchestratorClient::connect_to_orchestrator(args.clone()).await; - - // Identify with the orchestrator + // see what our public identity will be let public_ip = match args.public_ip { Some(ip) => ip, None => local_ip_address::local_ip().unwrap(), }; - error!( - "Identifying with orchestrator using IP address {}", - public_ip.to_string() - ); - let node_index: u16 = orchestrator_client - .identify_with_orchestrator(public_ip.to_string()) - .await; - error!("Finished identifying; our node index is {node_index}"); - error!("Getting config from orchestrator"); - - let mut run_config = orchestrator_client - .get_config_from_orchestrator::(node_index) - .await; - - run_config.node_index = node_index.into(); + + let orchestrator_client: OrchestratorClient = + OrchestratorClient::new(args.clone(), public_ip.to_string()).await; + + // conditionally save/load config from file or orchestrator + let (mut run_config, source) = + NetworkConfig::from_file_or_orchestrator(&orchestrator_client, args.network_config_file) + .await; + + let node_index = run_config.node_index; + error!("Retrieved config; our node index is {node_index}"); run_config.config.my_own_validator_config = ValidatorConfig::<::SignatureKey>::generated_from_seed_indexed( run_config.seed, - node_index.into(), + node_index, 1, ); //run_config.libp2p_config.as_mut().unwrap().public_ip = args.public_ip.unwrap(); @@ -892,12 +886,14 @@ pub async fn main_entry_point< } } - error!("Waiting for start command from orchestrator"); - orchestrator_client - .wait_for_all_nodes_ready(run_config.clone().node_index) - .await; + if let NetworkConfigSource::Orchestrator = source { + error!("Waiting for the start command from orchestrator"); + orchestrator_client + .wait_for_all_nodes_ready(run_config.clone().node_index) + .await; + } - error!("All nodes are ready! Starting HotShot"); + error!("Starting HotShot"); run.run_hotshot( hotshot, &mut transactions, diff --git a/hotshot/examples/libp2p/all.rs b/hotshot/examples/libp2p/all.rs index f806cf3ee1..1b77ed3ba3 100644 --- a/hotshot/examples/libp2p/all.rs +++ b/hotshot/examples/libp2p/all.rs @@ -69,6 +69,7 @@ async fn main() { >(ValidatorArgs { url: orchestrator_url, public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + network_config_file: None, }) .await }); diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index 9fb1199b88..79c05e5867 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -3,10 +3,8 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use hotshot_testing::state_types::TestTypes; -use std::net::IpAddr; -use surf_disco::Url; use tracing::instrument; use types::VIDNetwork; @@ -17,17 +15,6 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[derive(Parser, Debug, Clone)] -struct MultiValidatorArgs { - /// Number of validators to run - pub num_nodes: u16, - /// The address the orchestrator runs on - pub url: Url, - /// This node's public IP address, for libp2p - /// If no IP address is passed in, it will default to 127.0.0.1 - pub public_ip: Option, -} - #[cfg_attr( async_executor_impl = "tokio", tokio::main(flavor = "multi_thread", worker_threads = 2) @@ -40,8 +27,8 @@ async fn main() { let args = MultiValidatorArgs::parse(); tracing::error!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); - for _ in 0..args.num_nodes { - let url = args.url.clone(); + for node_index in 0..args.num_nodes { + let args = args.clone(); let node = async_spawn(async move { infra::main_entry_point::< @@ -52,10 +39,7 @@ async fn main() { VIDNetwork, NodeImpl, ThisRun, - >(ValidatorArgs { - url, - public_ip: args.public_ip, - }) + >(ValidatorArgs::from_multi_args(args, node_index)) .await }); nodes.push(node); diff --git a/hotshot/examples/webserver/all.rs b/hotshot/examples/webserver/all.rs index c52f7512d3..467b446573 100644 --- a/hotshot/examples/webserver/all.rs +++ b/hotshot/examples/webserver/all.rs @@ -95,6 +95,7 @@ async fn main() { >(ValidatorArgs { url: orchestrator_url, public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + network_config_file: None, }) .await }); diff --git a/hotshot/examples/webserver/multi-validator.rs b/hotshot/examples/webserver/multi-validator.rs index 1e20151a6d..79c05e5867 100644 --- a/hotshot/examples/webserver/multi-validator.rs +++ b/hotshot/examples/webserver/multi-validator.rs @@ -3,10 +3,8 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use hotshot_testing::state_types::TestTypes; -use std::net::IpAddr; -use surf_disco::Url; use tracing::instrument; use types::VIDNetwork; @@ -17,17 +15,6 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[derive(Parser, Debug, Clone)] -struct MultiValidatorArgs { - /// Number of validators to run - pub num_nodes: u16, - /// The address the orchestrator runs on - pub url: Url, - /// This node's public IP address, for libp2p - /// If no IP address is passed in, it will default to 127.0.0.1 - pub public_ip: Option, -} - #[cfg_attr( async_executor_impl = "tokio", tokio::main(flavor = "multi_thread", worker_threads = 2) @@ -40,8 +27,9 @@ async fn main() { let args = MultiValidatorArgs::parse(); tracing::error!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); - for _ in 0..args.num_nodes { - let url = args.url.clone(); + for node_index in 0..args.num_nodes { + let args = args.clone(); + let node = async_spawn(async move { infra::main_entry_point::< TestTypes, @@ -51,10 +39,7 @@ async fn main() { VIDNetwork, NodeImpl, ThisRun, - >(ValidatorArgs { - url, - public_ip: args.public_ip, - }) + >(ValidatorArgs::from_multi_args(args, node_index)) .await }); nodes.push(node); diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index f445e70c24..f9c4467b7d 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -24,6 +24,7 @@ serde = { workspace = true } serde_json = "1.0.96" snafu = { workspace = true } toml = { workspace = true } +thiserror = "1.0.50" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index ea3b986228..2a50915ea6 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -5,13 +5,14 @@ use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::traits::{election::ElectionConfig, signature_key::SignatureKey}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; /// Holds the client connection to the orchestrator pub struct OrchestratorClient { client: surf_disco::Client, + pub identity: String, } // VALIDATOR @@ -28,21 +29,78 @@ pub struct ValidatorArgs { /// This node's public IP address, for libp2p /// If no IP address is passed in, it will default to 127.0.0.1 pub public_ip: Option, + /// An optional network config file to save to/load from + /// Allows for rejoining the network on a complete state loss + #[arg(short, long)] + pub network_config_file: Option, +} + +#[derive(Parser, Debug, Clone)] +pub struct MultiValidatorArgs { + /// Number of validators to run + pub num_nodes: u16, + /// The address the orchestrator runs on + pub url: Url, + /// This node's public IP address, for libp2p + /// If no IP address is passed in, it will default to 127.0.0.1 + pub public_ip: Option, + /// An optional network config file to save to/load from + /// Allows for rejoining the network on a complete state loss + #[arg(short, long)] + pub network_config_file: Option, +} + +impl ValidatorArgs { + /// Constructs `ValidatorArgs` from `MultiValidatorArgs` and a node index. + /// + /// If `network_config_file` is present in `MultiValidatorArgs`, it appends the node index to it to create a unique file name for each node. + /// + /// # Arguments + /// + /// * `multi_args` - A `MultiValidatorArgs` instance containing the base arguments for the construction. + /// * `node_index` - A `u16` representing the index of the node for which the args are being constructed. + /// + /// # Returns + /// + /// This function returns a new instance of `ValidatorArgs`. + /// + /// # Examples + /// + /// ```no_run + /// let multi_args = MultiValidatorArgs::new(); + /// let node_index = 1; + /// let instance = Self::from_multi_args(multi_args, node_index); + /// ``` + pub fn from_multi_args(multi_args: MultiValidatorArgs, node_index: u16) -> Self { + Self { + url: multi_args.url, + public_ip: multi_args.public_ip, + network_config_file: multi_args + .network_config_file + .map(|s| format!("{}-{}", s, node_index)), + } + } } impl OrchestratorClient { - /// Creates the client that connects to the orchestrator - pub async fn connect_to_orchestrator(args: ValidatorArgs) -> Self { + /// Creates the client that will connect to the orchestrator + pub async fn new(args: ValidatorArgs, identity: String) -> Self { let client = surf_disco::Client::::new(args.url); // TODO ED: Add healthcheck wait here - OrchestratorClient { client } + OrchestratorClient { client, identity } } - /// Sends an identify message to the server - /// Returns this validator's node_index in the network - pub async fn identify_with_orchestrator(&self, identity: String) -> u16 { + /// Sends an identify message to the orchestrator and attempts to get its config + /// Returns both the node_index and the run configuration from the orchestrator + /// Will block until both are returned + #[allow(clippy::type_complexity)] + pub async fn get_config( + &self, + identity: String, + ) -> NetworkConfig { + // get the node index let identity = identity.as_str(); - let f = |client: Client| { + let identity = |client: Client| { async move { let node_index: Result = client .post(&format!("api/identity/{identity}")) @@ -52,23 +110,12 @@ impl OrchestratorClient { } .boxed() }; - self.wait_for_fn_from_orchestrator(f).await - } + let node_index = self.wait_for_fn_from_orchestrator(identity).await; - /// Returns the run configuration from the orchestrator - /// Will block until the configuration is returned - #[allow(clippy::type_complexity)] - - pub async fn get_config_from_orchestrator( - &self, - node_index: u16, - ) -> NetworkConfig { + // get the corresponding config let f = |client: Client| { async move { - let config: Result< - NetworkConfig, - ClientError, - > = client + let config: Result, ClientError> = client .post(&format!("api/config/{node_index}")) .send() .await; @@ -76,7 +123,12 @@ impl OrchestratorClient { } .boxed() }; - self.wait_for_fn_from_orchestrator(f).await + + let mut config = self.wait_for_fn_from_orchestrator(f).await; + + config.node_index = node_index as u64; + + config } /// Tells the orchestrator this validator is ready to start diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 9966c8a9fd..8049f4c294 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -11,8 +11,12 @@ use std::{ time::Duration, }; use surf_disco::Url; +use thiserror::Error; use toml; use tracing::error; + +use crate::client::OrchestratorClient; + #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { pub bootstrap_nodes: Vec<(SocketAddr, Vec)>, @@ -57,6 +61,18 @@ pub struct WebServerConfig { pub wait_between_polls: Duration, } +#[derive(Error, Debug)] +pub enum NetworkConfigError { + #[error("Failed to read NetworkConfig from file")] + ReadFromFileError(std::io::Error), + #[error("Failed to deserialize loaded NetworkConfig")] + DeserializeError(bincode::Error), + #[error("Failed to write NetworkConfig to file")] + WriteToFileError(std::io::Error), + #[error("Failed to serialize NetworkConfig")] + SerializeError(bincode::Error), +} + #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] #[serde(bound(deserialize = ""))] pub struct NetworkConfig { @@ -78,6 +94,151 @@ pub struct NetworkConfig { pub da_web_server_config: Option, } +pub enum NetworkConfigSource { + Orchestrator, + File, +} + +impl NetworkConfig { + /// Asynchronously retrieves a `NetworkConfig` either from a file or from an orchestrator. + /// + /// This function takes an `OrchestratorClient`, an identity string, and an optional file path. + /// + /// If a file path is provided, the function will first attempt to load the `NetworkConfig` from the file. + /// If the file does not exist or cannot be read, the function will fall back to retrieving the `NetworkConfig` from the orchestrator. + /// The retrieved `NetworkConfig` is then saved back to the file for future use. + /// + /// If no file path is provided, the function will directly retrieve the `NetworkConfig` from the orchestrator. + /// + /// # Arguments + /// + /// * `client` - An `OrchestratorClient` used to retrieve the `NetworkConfig` from the orchestrator. + /// * `identity` - A string representing the identity for which to retrieve the `NetworkConfig`. + /// * `file` - An optional string representing the path to the file from which to load the `NetworkConfig`. + /// + /// # Returns + /// + /// This function returns a tuple containing a `NetworkConfig` and a `NetworkConfigSource`. The `NetworkConfigSource` indicates whether the `NetworkConfig` was loaded from a file or retrieved from the orchestrator. + /// + /// # Examples + /// + /// ```no_run + /// let client = OrchestratorClient::new(); + /// let identity = "my_identity".to_string(); + /// let file = Some("/path/to/my/config".to_string()); + /// let (config, source) = NetworkConfig::from_file_or_orchestrator(client, file).await; + /// ``` + pub async fn from_file_or_orchestrator( + client: &OrchestratorClient, + file: Option, + ) -> (NetworkConfig, NetworkConfigSource) { + if let Some(file) = file { + // if we pass in file, try there first + match Self::from_file(file.clone()) { + Ok(config) => (config, NetworkConfigSource::File), + Err(e) => { + // fallback to orchestrator + error!("{e}"); + + let config = client.get_config(client.identity.clone()).await; + + // save to file if we fell back + if let Err(e) = config.to_file(file) { + error!("{e}"); + }; + + (config, NetworkConfigSource::Orchestrator) + } + } + } else { + error!("Retrieving config from the orchestrator"); + + // otherwise just get from orchestrator + ( + client.get_config(client.identity.clone()).await, + NetworkConfigSource::Orchestrator, + ) + } + } + + /// Loads a `NetworkConfig` from a file. + /// + /// This function takes a file path as a string, reads the file, and then deserializes the contents into a `NetworkConfig`. + /// + /// # Arguments + /// + /// * `file` - A string representing the path to the file from which to load the `NetworkConfig`. + /// + /// # Returns + /// + /// This function returns a `Result` that contains a `NetworkConfig` if the file was successfully read and deserialized, or a `NetworkConfigError` if an error occurred. + /// + /// # Errors + /// + /// This function will return an error if the file cannot be read or if the contents cannot be deserialized into a `NetworkConfig`. + /// + /// # Examples + /// + /// ```no_run + /// let file = "/path/to/my/config".to_string(); + /// let config = NetworkConfig::from_file(file).unwrap(); + /// ``` + pub fn from_file(file: String) -> Result { + // read from file + let data = match fs::read(file) { + Ok(data) => data, + Err(e) => { + return Err(NetworkConfigError::ReadFromFileError(e)); + } + }; + + // deserialize + match bincode::deserialize(&data) { + Ok(data) => Ok(data), + Err(e) => Err(NetworkConfigError::DeserializeError(e)), + } + } + + /// Serializes the `NetworkConfig` and writes it to a file. + /// + /// This function takes a file path as a string, serializes the `NetworkConfig` into binary format using `bincode`, and then writes the serialized data to the file. + /// + /// # Arguments + /// + /// * `file` - A string representing the path to the file where the `NetworkConfig` should be saved. + /// + /// # Returns + /// + /// This function returns a `Result` that contains `()` if the `NetworkConfig` was successfully serialized and written to the file, or a `NetworkConfigError` if an error occurred. + /// + /// # Errors + /// + /// This function will return an error if the `NetworkConfig` cannot be serialized or if the file cannot be written. + /// + /// # Examples + /// + /// ```no_run + /// let file = "/path/to/my/config".to_string(); + /// let config = NetworkConfig::from_file(file); + /// config.to_file(file).unwrap(); + /// ``` + pub fn to_file(&self, file: String) -> Result<(), NetworkConfigError> { + // serialize + let serialized = match bincode::serialize(self) { + Ok(data) => data, + Err(e) => { + return Err(NetworkConfigError::SerializeError(e)); + } + }; + + // write to file + match fs::write(file, serialized) { + Ok(()) => Ok(()), + Err(e) => Err(NetworkConfigError::WriteToFileError(e)), + } + } +} + impl Default for NetworkConfig { fn default() -> Self { Self { From 14a52ec2211c26f44156f0c15ae7863c653d4c9c Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Mon, 11 Dec 2023 18:06:35 +0000 Subject: [PATCH 0513/1393] Remove ConsensusApi & HotShotType (#2166) * Remove ConsensusApi & HotShotType Closes #1194 Those traits had only one implementor each at this point, all used implementations moved to `impl`s for corresponding types. Renamed `ConsensusSharedApi` to `ConsensusApi`. --- hotshot/examples/infra/mod.rs | 3 +- hotshot/src/lib.rs | 242 ++---------------------------- hotshot/src/tasks/mod.rs | 2 +- task-impls/src/consensus.rs | 7 +- testing/src/spinning_task.rs | 2 +- testing/src/task_helpers.rs | 2 +- testing/src/test_builder.rs | 2 - testing/src/test_runner.rs | 3 +- testing/tests/consensus_task.rs | 2 +- testing/tests/da_task.rs | 2 +- testing/tests/network_task.rs | 2 +- testing/tests/vid_task.rs | 2 +- testing/tests/view_sync_task.rs | 4 +- types/src/traits/consensus_api.rs | 128 ++-------------- 14 files changed, 38 insertions(+), 365 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index a162d53ef3..4b9e7bb2ea 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -14,7 +14,7 @@ use hotshot::{ NodeImplementation, }, types::{SignatureKey, SystemContextHandle}, - HotShotType, Memberships, Networks, SystemContext, + Memberships, Networks, SystemContext, }; use hotshot_orchestrator::config::NetworkConfigSource; use hotshot_orchestrator::{ @@ -294,7 +294,6 @@ pub trait RunDA< TYPES: NodeType, Leaf: TestableLeaf, Self: Sync, - SystemContext: HotShotType, { /// Initializes networking, returns self async fn initialize_networking( diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 4224394a81..89e1a24123 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -55,11 +55,10 @@ use hotshot_types::{ error::StorageSnafu, message::{ DataMessage, InternalTrigger, Message, MessageKind, ProcessedGeneralConsensusMessage, - SequencingMessage, }, simple_certificate::QuorumCertificate, traits::{ - consensus_api::{ConsensusApi, ConsensusSharedApi}, + consensus_api::ConsensusApi, network::{CommunicationChannel, NetworkError}, node_implementation::{ChannelMaps, NodeType, SendToTasks}, signature_key::SignatureKey, @@ -406,10 +405,7 @@ impl> SystemContext { ChannelStream>, ), HotShotError, - > - where - SystemContext: HotShotType, - { + > { // Save a clone of the storage for the handle let hotshot = Self::new( public_key, @@ -532,116 +528,18 @@ impl> SystemContext { } } -/// [`HotShot`] implementations that depend on [`TYPES::ConsensusType`]. -#[async_trait] -pub trait HotShotType> { +impl> SystemContext { /// Get the [`hotstuff`] field of [`HotShot`]. - fn consensus(&self) -> &Arc>>; - - /// Spawn all tasks that operate on the given [`HotShot`]. - /// - /// For a list of which tasks are being spawned, see this module's documentation. - async fn run_tasks(self) -> SystemContextHandle; - - // decide which handler to call based on the message variant and `transmit_type` - // async fn handle_message(&self, item: Message, transmit_type: TransmitType) { - // match (item.kind, transmit_type) { - // (MessageKind::Consensus(msg), TransmitType::Broadcast) => { - // self.handle_broadcast_consensus_message(msg, item.sender) - // .await; - // } - // (MessageKind::Consensus(msg), TransmitType::Direct) => { - // self.handle_direct_consensus_message(msg, item.sender).await; - // } - // (MessageKind::Data(msg), TransmitType::Broadcast) => { - // self.handle_broadcast_data_message(msg, item.sender).await; - // } - // (MessageKind::Data(msg), TransmitType::Direct) => { - // self.handle_direct_data_message(msg, item.sender).await; - // } - // (MessageKind::_Unreachable(_), _) => unimplemented!(), - // }; - // } - - // Handle an incoming [`ConsensusMessage`] that was broadcasted on the network. - // async fn handle_broadcast_consensus_message( - // &self, - // msg: I::ConsensusMessage, - // sender: TYPES::SignatureKey, - // ); - - // Handle an incoming [`ConsensusMessage`] directed at this node. - // async fn handle_direct_consensus_message( - // &self, - // msg: I::ConsensusMessage, - // sender: TYPES::SignatureKey, - // ); - - // Handle an incoming [`DataMessage`] that was broadcasted on the network - // async fn handle_broadcast_data_message( - // &self, - // msg: DataMessage, - // _sender: TYPES::SignatureKey, - // ) { - // // TODO validate incoming broadcast message based on sender signature key - // match msg { - // DataMessage::SubmitTransaction(transaction, _view_number) => { - // let size = bincode_opts().serialized_size(&transaction).unwrap_or(0); - // - // // The API contract requires the hash to be unique - // // so we can assume entry == incoming txn - // // even if eq not satisfied - // // so insert is an idempotent operation - // let mut new = false; - // self.transactions() - // .modify(|txns| { - // new = txns.insert(transaction.commit(), transaction).is_none(); - // }) - // .await; - // - // if new { - // // If this is a new transaction, update metrics. - // let consensus = self.consensus().read().await; - // consensus.metrics.outstanding_transactions.update(1); - // consensus - // .metrics - // .outstanding_transactions_memory_size - // .update(i64::try_from(size).unwrap_or_else(|e| { - // warn!("Conversion failed: {e}. Using the max value."); - // i64::MAX - // })); - // } - // } - // } - // } - - // Handle an incoming [`DataMessage`] that directed at this node - // #[allow(clippy::unused_async)] // async for API compatibility reasons - // async fn handle_direct_data_message( - // &self, - // msg: DataMessage, - // _sender: TYPES::SignatureKey, - // ) { - // debug!(?msg, "Incoming direct data message"); - // match msg { - // DataMessage::SubmitTransaction(_, _) => { - // // Log exceptional situation and proceed - // warn!(?msg, "Broadcast message received over direct channel"); - // } - // } - // } -} - -#[async_trait] -impl> HotShotType - for SystemContext -{ - fn consensus(&self) -> &Arc>> { + #[must_use] + pub fn consensus(&self) -> &Arc>> { &self.inner.consensus } + /// Spawn all tasks that operate on [`HotShot`]. + /// + /// For a list of which tasks are being spawned, see this module's documentation. #[allow(clippy::too_many_lines)] - async fn run_tasks(self) -> SystemContextHandle { + pub async fn run_tasks(self) -> SystemContextHandle { // ED Need to set first first number to 1, or properly trigger the change upon start let task_runner = TaskRunner::new(); let registry = task_runner.registry.clone(); @@ -740,7 +638,7 @@ pub struct HotShotConsensusApi> { } #[async_trait] -impl> ConsensusSharedApi +impl> ConsensusApi for HotShotConsensusApi { fn total_nodes(&self) -> NonZeroUsize { @@ -763,12 +661,6 @@ impl> ConsensusSharedApi self.inner.config.min_transactions } - /// Generates and encodes a vote token - - async fn should_start_round(&self, _: TYPES::Time) -> bool { - false - } - async fn send_event(&self, event: Event) { debug!(?event, "send_event"); let mut event_sender = self.inner.event_sender.write().await; @@ -802,120 +694,6 @@ impl> ConsensusSharedApi } } -#[async_trait] -impl> ConsensusApi - for HotShotConsensusApi -{ - async fn send_direct_message( - &self, - recipient: TYPES::SignatureKey, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError> { - let inner = self.inner.clone(); - debug!(?message, ?recipient, "send_direct_message"); - async_spawn_local(async move { - inner - .networks - .quorum_network - .direct_message( - Message { - sender: inner.public_key.clone(), - kind: MessageKind::from_consensus_message(message), - }, - recipient, - ) - .await - }); - Ok(()) - } - - async fn send_direct_da_message( - &self, - recipient: TYPES::SignatureKey, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError> { - let inner = self.inner.clone(); - debug!(?message, ?recipient, "send_direct_message"); - async_spawn_local(async move { - inner - .networks - .da_network - .direct_message( - Message { - sender: inner.public_key.clone(), - kind: MessageKind::from_consensus_message(message), - }, - recipient, - ) - .await - }); - Ok(()) - } - - // TODO (DA) Refactor ConsensusApi and HotShot to use SystemContextInner directly. - // - async fn send_broadcast_message( - &self, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError> { - debug!(?message, "send_broadcast_message"); - self.inner - .networks - .quorum_network - .broadcast_message( - Message { - sender: self.inner.public_key.clone(), - kind: MessageKind::from_consensus_message(message), - }, - &self.inner.memberships.quorum_membership.clone(), - ) - .await?; - Ok(()) - } - - async fn send_da_broadcast( - &self, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError> { - debug!(?message, "send_da_broadcast_message"); - self.inner - .networks - .da_network - .broadcast_message( - Message { - sender: self.inner.public_key.clone(), - kind: MessageKind::from_consensus_message(message), - }, - &self.inner.memberships.da_membership.clone(), - ) - .await?; - Ok(()) - } - - async fn send_transaction( - &self, - message: DataMessage, - ) -> std::result::Result<(), NetworkError> { - debug!(?message, "send_broadcast_message"); - let api = self.clone(); - async_spawn(async move { - let _result = api - .inner - .networks - .da_network - .broadcast_message( - Message { - sender: api.inner.public_key.clone(), - kind: MessageKind::from(message), - }, - &api.inner.memberships.da_membership.clone(), - ) - .await; - }); - Ok(()) - } -} - /// initializer struct for creating starting block pub struct HotShotInitializer { /// the leaf specified initialization diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 95679ab3d5..f150319b2b 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -29,7 +29,7 @@ use hotshot_types::{ message::Messages, traits::{ block_contents::vid_commitment, - consensus_api::ConsensusSharedApi, + consensus_api::ConsensusApi, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{NodeImplementation, NodeType}, state::ConsensusTime, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 04e13f9c65..98eaca2369 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -553,7 +553,12 @@ impl, A: ConsensusApi + ); let safety_check = outcome.is_ok(); if let Err(e) = outcome { - self.api.send_view_error(view, Arc::new(e)).await; + self.api + .send_event(Event { + view_number: view, + event: EventType::Error { error: Arc::new(e) }, + }) + .await; return; } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 84e7eb91d6..82cee453c3 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, sync::Arc}; use async_compatibility_layer::channel::UnboundedStream; use futures::FutureExt; -use hotshot::{traits::TestableNodeImplementation, HotShotType, SystemContext}; +use hotshot::{traits::TestableNodeImplementation, SystemContext}; use hotshot_task::{ event_stream::ChannelStream, task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index dc7a3b43d0..0120cc7a33 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -20,7 +20,7 @@ use hotshot_types::{ traits::{ block_contents::vid_commitment, block_contents::BlockHeader, - consensus_api::ConsensusSharedApi, + consensus_api::ConsensusApi, election::Membership, node_implementation::NodeType, signature_key::EncodedSignature, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 4e59dc75d1..e858816e3f 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -18,7 +18,6 @@ use crate::{ use super::{ overall_safety_task::OverallSafetyPropertiesDescription, txn_task::TxnTaskDescription, }; -use hotshot::{HotShotType, SystemContext}; /// data describing how a round should be timed. #[derive(Clone, Debug, Copy)] pub struct TimingData { @@ -185,7 +184,6 @@ impl TestMetadata { ) -> TestLauncher where I: NodeImplementation, - SystemContext: HotShotType, { let TestMetadata { total_nodes, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 0bd75d1d79..515d4a8787 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -9,7 +9,7 @@ use crate::{ }; use hotshot::{types::SystemContextHandle, Memberships}; -use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, HotShotType, SystemContext}; +use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; use hotshot_task::{ event_stream::ChannelStream, global_registry::GlobalRegistry, task_launcher::TaskRunner, }; @@ -46,7 +46,6 @@ pub struct TestRunner> { impl> TestRunner where - SystemContext: HotShotType, I: TestableNodeImplementation, { /// excecute test diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 5f53cfa4e4..208ed914d1 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -19,7 +19,7 @@ use hotshot_types::{ }; use hotshot_types::{ simple_vote::QuorumData, - traits::{consensus_api::ConsensusSharedApi, election::Membership}, + traits::{consensus_api::ConsensusApi, election::Membership}, }; use std::collections::HashMap; diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 4e786713fd..a9aed44656 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -8,7 +8,7 @@ use hotshot_types::{ data::{DAProposal, ViewNumber}, simple_vote::{DAData, DAVote}, traits::{ - block_contents::vid_commitment, consensus_api::ConsensusSharedApi, election::Membership, + block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, node_implementation::NodeType, state::ConsensusTime, }, }; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 4252ffb770..b3c91eea9d 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -6,7 +6,7 @@ use hotshot_testing::{ }; use hotshot_types::{ data::{DAProposal, VidSchemeTrait, ViewNumber}, - traits::{consensus_api::ConsensusSharedApi, state::ConsensusTime}, + traits::{consensus_api::ConsensusApi, state::ConsensusTime}, }; use std::{collections::HashMap, marker::PhantomData}; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 8757a3d414..9852642048 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -8,7 +8,7 @@ use hotshot_testing::{ use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, - traits::{consensus_api::ConsensusSharedApi, state::ConsensusTime}, + traits::{consensus_api::ConsensusApi, state::ConsensusTime}, }; use std::collections::HashMap; use std::marker::PhantomData; diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 46ed361c03..6cd1818cd5 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -32,8 +32,8 @@ async fn test_view_sync_task() { let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote( vote_data, ::Time::new(5), - hotshot_types::traits::consensus_api::ConsensusSharedApi::public_key(&api), - hotshot_types::traits::consensus_api::ConsensusSharedApi::private_key(&api), + hotshot_types::traits::consensus_api::ConsensusApi::public_key(&api), + hotshot_types::traits::consensus_api::ConsensusApi::private_key(&api), ); tracing::error!("Vote in test is {:?}", vote.clone()); diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index a05f01f297..2fa0417c52 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -2,12 +2,8 @@ use crate::{ data::Leaf, - error::HotShotError, - event::{Event, EventType}, - message::{DataMessage, SequencingMessage}, - simple_certificate::QuorumCertificate, + event::Event, traits::{ - network::NetworkError, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, storage::StorageError, @@ -15,12 +11,11 @@ use crate::{ }; use async_trait::async_trait; -use std::{num::NonZeroUsize, sync::Arc, time::Duration}; +use std::{num::NonZeroUsize, time::Duration}; -/// The API that [`HotStuff`] needs to talk to the system, implemented for both validating and -/// sequencing consensus. +/// The API that [`HotStuff`] needs to talk to the system #[async_trait] -pub trait ConsensusSharedApi>: Send + Sync { +pub trait ConsensusApi>: Send + Sync { /// Total number of nodes in the network. Also known as `n`. fn total_nodes(&self) -> NonZeroUsize; @@ -31,126 +26,25 @@ pub trait ConsensusSharedApi>: Sen /// If this time is reached, the leader has to send a propose without transactions. fn propose_max_round_time(&self) -> Duration; - /// Store a leaf in the storage - async fn store_leaf( - &self, - old_anchor_view: TYPES::Time, - leaf: Leaf, - ) -> Result<(), StorageError>; - /// Retuns the maximum transactions allowed in a block fn max_transactions(&self) -> NonZeroUsize; /// Returns the minimum transactions that must be in a block fn min_transactions(&self) -> usize; - /// Returns `true` if hotstuff should start the given round. A round can also be started manually by sending `NewView` to the leader. - /// - /// In production code this should probably always return `true`. - async fn should_start_round(&self, view_number: TYPES::Time) -> bool; - - /// Notify the system of an event within `hotshot-consensus`. - async fn send_event(&self, event: Event); - /// Get a reference to the public key. fn public_key(&self) -> &TYPES::SignatureKey; /// Get a reference to the private key. fn private_key(&self) -> &::PrivateKey; - // Utility functions - - /// notifies client of an error - async fn send_view_error(&self, view_number: TYPES::Time, error: Arc>) { - self.send_event(Event { - view_number, - event: EventType::Error { error }, - }) - .await; - } - - /// notifies client of a replica timeout - async fn send_replica_timeout(&self, view_number: TYPES::Time) { - self.send_event(Event { - view_number, - event: EventType::ReplicaViewTimeout { view_number }, - }) - .await; - } - - /// notifies client of a next leader timeout - async fn send_next_leader_timeout(&self, view_number: TYPES::Time) { - self.send_event(Event { - view_number, - event: EventType::NextLeaderViewTimeout { view_number }, - }) - .await; - } - - /// sends a decide event down the channel - async fn send_decide( - &self, - view_number: TYPES::Time, - leaf_views: Vec>, - decide_qc: QuorumCertificate, - ) { - self.send_event(Event { - view_number, - event: EventType::Decide { - leaf_chain: Arc::new(leaf_views), - qc: Arc::new(decide_qc), - block_size: None, - }, - }) - .await; - } - - /// Sends a `ViewFinished` event - async fn send_view_finished(&self, view_number: TYPES::Time) { - self.send_event(Event { - view_number, - event: EventType::ViewFinished { view_number }, - }) - .await; - } -} - -/// The API that [`HotStuff`] needs to talk to the system, for sequencing consensus. -#[async_trait] -pub trait ConsensusApi>: - ConsensusSharedApi -{ - /// Send a direct message to the given recipient - async fn send_direct_message( - &self, - recipient: TYPES::SignatureKey, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError>; - - /// send a direct message using the DA communication channel - async fn send_direct_da_message( - &self, - recipient: TYPES::SignatureKey, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError>; - - /// Send a broadcast message to the entire network. - async fn send_broadcast_message( - &self, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError>; - - /// Send a broadcast to the DA comitee, stub for now - async fn send_da_broadcast( - &self, - message: SequencingMessage, - ) -> std::result::Result<(), NetworkError>; + /// Notify the system of an event within `hotshot-consensus`. + async fn send_event(&self, event: Event); - /// Send a message with a transaction. - /// This function is deprecated in favor of `submit_transaction` in `handle.rs` - #[deprecated] - async fn send_transaction( + /// Store a leaf in the storage + async fn store_leaf( &self, - message: DataMessage, - ) -> std::result::Result<(), NetworkError>; + old_anchor_view: TYPES::Time, + leaf: Leaf, + ) -> Result<(), StorageError>; } From eb6e899aa931ea608355e791f7aa7272b759f407 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 11 Dec 2023 11:08:21 -0800 Subject: [PATCH 0514/1393] fix metrics: block height --- task-impls/src/consensus.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 98eaca2369..1e88904b94 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -582,6 +582,7 @@ impl, A: ConsensusApi + let mut current_chain_length = 0usize; if parent_view + 1 == view { current_chain_length += 1; + let mut is_ancestor = false; // For metrics use: mark whether we're going to the ancestor of the leaf, only assign block_height for the most recent one. if let Err(e) = consensus.visit_leaf_ancestors( parent_view, Terminator::Exclusive(old_anchor_view), @@ -609,11 +610,13 @@ impl, A: ConsensusApi + // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above if new_decide_reached { let mut leaf = leaf.clone(); - consensus - .metrics - .last_synced_block_height - .set(usize::try_from(leaf.get_height()).unwrap_or(0)); - + if !is_ancestor { + consensus + .metrics + .last_synced_block_height + .set(usize::try_from(leaf.get_height()).unwrap_or(0)); + } + is_ancestor = true; // If the block payload is available for this leaf, include it in // the leaf chain that we send to the client. if let Some(encoded_txns) = From e8184a7d7b181deb92301bbdb7e41b792021272b Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 11 Dec 2023 11:49:26 -0800 Subject: [PATCH 0515/1393] update if condition --- task-impls/src/consensus.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 1e88904b94..77e50ef3ce 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -582,7 +582,6 @@ impl, A: ConsensusApi + let mut current_chain_length = 0usize; if parent_view + 1 == view { current_chain_length += 1; - let mut is_ancestor = false; // For metrics use: mark whether we're going to the ancestor of the leaf, only assign block_height for the most recent one. if let Err(e) = consensus.visit_leaf_ancestors( parent_view, Terminator::Exclusive(old_anchor_view), @@ -610,13 +609,12 @@ impl, A: ConsensusApi + // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above if new_decide_reached { let mut leaf = leaf.clone(); - if !is_ancestor { + if leaf.view_number == new_anchor_view { consensus .metrics .last_synced_block_height .set(usize::try_from(leaf.get_height()).unwrap_or(0)); } - is_ancestor = true; // If the block payload is available for this leaf, include it in // the leaf chain that we send to the client. if let Some(encoded_txns) = From 939cb35086ed77835d52f6dc595451aff2fdb7fb Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 11 Dec 2023 14:57:39 -0500 Subject: [PATCH 0516/1393] fix catchup route (#2192) --- web_server/api.toml | 2 +- web_server/src/config.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web_server/api.toml b/web_server/api.toml index 6e33d6fdf0..2453ca4d3f 100644 --- a/web_server/api.toml +++ b/web_server/api.toml @@ -21,7 +21,7 @@ Return the VID disperse data for a given view number # GET the proposal for a view, where the view is passed as an argument [route.getrecentproposal] -PATH = ["proposal/"] +PATH = ["proposal/recent"] DOC = """ Return the proposal for the most recent view the server has """ diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 70d57b726d..4e5bf6f1db 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -18,7 +18,7 @@ pub fn post_proposal_route(view_number: u64) -> String { } pub fn get_recent_proposal_route() -> String { - "api/proposal".to_string() + "api/proposal/recent".to_string() } pub fn get_da_certificate_route(view_number: u64) -> String { From 02a7fcaa26ec1c37ef9231c8a46964e37e2d0de1 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 11 Dec 2023 15:45:13 -0500 Subject: [PATCH 0517/1393] fix genesis commitment (#2194) --- testing/src/block_types.rs | 6 ++---- types/src/traits/block_contents.rs | 4 ++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index 39fa591795..7c50040583 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -70,16 +70,14 @@ pub struct TestBlockPayload { } impl TestBlockPayload { - /// Create a genesis block payload with transaction bytes `vec![0]`, to be used for + /// Create a genesis block payload with bytes `vec![0]`, to be used for /// consensus task initiation. /// # Panics /// If the `VidScheme` construction fails. #[must_use] pub fn genesis() -> Self { - let txns: Vec = vec![0]; - // It's impossible for `encode` to fail because the transaciton length is very small. TestBlockPayload { - transactions: vec![TestTransaction(txns)], + transactions: vec![], } } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index c75315fe58..4054fdc447 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -98,6 +98,10 @@ pub fn vid_commitment( } /// Computes the (empty) genesis VID commitment +/// The number of storage nodes does not do anything, unless in the future we add fake transactions +/// to the genesis payload. +/// +/// In that case, the payloads may mismatch and cause problems. #[must_use] pub fn genesis_vid_commitment() -> ::Commit { vid_commitment(&vec![], 8) From 3143490430d1dd49329f33493a1e028f6a0f5257 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 11 Dec 2023 16:39:21 -0500 Subject: [PATCH 0518/1393] Cancel consensus timeouts on new view --- hotshot/src/tasks/mod.rs | 4 ++-- task-impls/src/consensus.rs | 10 +++++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index f150319b2b..704a62fd35 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -1,6 +1,6 @@ //! Provides a number of tasks that run continuously on a [`HotShot`] -use crate::{async_spawn, types::SystemContextHandle, HotShotConsensusApi}; +use crate::{types::SystemContextHandle, HotShotConsensusApi}; use async_compatibility_layer::art::async_sleep; use futures::FutureExt; use hotshot_task::{ @@ -227,7 +227,7 @@ pub async fn add_consensus_task>( _pd: PhantomData, vote_collector: None, timeout_vote_collector: None, - timeout_task: async_spawn(async move {}), + timeout_task: None, event_stream: event_stream.clone(), output_event_stream: output_stream, da_certs: HashMap::new(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 98eaca2369..f776f63a27 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -105,7 +105,7 @@ pub struct ConsensusTaskState< pub timeout_vote_collector: Option<(TYPES::Time, usize, usize)>, /// timeout task handle - pub timeout_task: JoinHandle<()>, + pub timeout_task: Option>, /// Global events stream to publish events pub event_stream: ChannelStream>, @@ -340,6 +340,10 @@ impl, A: ConsensusApi + "Updating view from {} to {} in consensus task", *self.cur_view, *new_view ); + // cancel the old timeout task + if let Some(timeout_task) = self.timeout_task.take() { + timeout_task.cancel().await; + } // Remove old certs, we won't vote on past views for view in *self.cur_view..*new_view - 1 { @@ -381,7 +385,7 @@ impl, A: ConsensusApi + // Spawn a timeout task if we did actually update view let timeout = self.timeout; - self.timeout_task = async_spawn({ + self.timeout_task = Some(async_spawn({ let stream = self.event_stream.clone(); // Nuance: We timeout on the view + 1 here because that means that we have // not seen evidence to transition to this new view @@ -392,7 +396,7 @@ impl, A: ConsensusApi + .publish(HotShotEvent::Timeout(TYPES::Time::new(*view_number))) .await; } - }); + })); let consensus = self.consensus.read().await; consensus .metrics From 8947ba4a874a29ba207addaedeb9fe45a7bd32ba Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 12 Dec 2023 09:47:44 -0500 Subject: [PATCH 0519/1393] cancel timeouts in view sync --- task-impls/src/consensus.rs | 6 ++++++ task-impls/src/view_sync.rs | 35 +++++++++++++++++++++++++++-------- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f776f63a27..7f2224725f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -993,6 +993,12 @@ impl, A: ConsensusApi + "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view ); + self.output_event_stream.publish(Event{ + view_number: self.cur_view, + event: EventType::ReplicaViewTimeout { + view_number: self.cur_view, + } + }).await; let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index ea0b170ced..d4d9c82591 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -4,6 +4,7 @@ use crate::{ vote::{spawn_vote_accumulator, AccumulatorInfo}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_std::task::JoinHandle; use either::Either; use futures::FutureExt; use hotshot_task::{ @@ -138,6 +139,8 @@ pub struct ViewSyncReplicaTaskState< pub finalized: bool, /// Whether we have already sent a view change event for `next_view` pub sent_view_change_event: bool, + /// Timeout task handle, when it expires we try the next relay + pub timeout_task: Option>, /// Our node id; for logging pub id: u64, @@ -254,6 +257,7 @@ impl< finalized: false, sent_view_change_event: false, phase: ViewSyncPhase::None, + timeout_task: None, membership: self.membership.clone(), network: self.network.clone(), public_key: self.public_key.clone(), @@ -516,6 +520,7 @@ impl< finalized: false, sent_view_change_event: false, phase: ViewSyncPhase::None, + timeout_task: None, membership: self.membership.clone(), network: self.network.clone(), public_key: self.public_key.clone(), @@ -666,7 +671,11 @@ impl, A: ConsensusApi + .await; } - async_spawn({ + if let Some(timeout_task) = self.timeout_task.take() { + timeout_task.cancel().await; + } + + self.timeout_task = Some(async_spawn({ let stream = self.event_stream.clone(); let phase = self.phase.clone(); async move { @@ -680,7 +689,7 @@ impl, A: ConsensusApi + )) .await; } - }); + })); } HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { @@ -737,7 +746,10 @@ impl, A: ConsensusApi + .publish(HotShotEvent::ViewChange(self.next_view)) .await; - async_spawn({ + if let Some(timeout_task) = self.timeout_task.take() { + timeout_task.cancel().await; + } + self.timeout_task = Some(async_spawn({ let stream = self.event_stream.clone(); let phase = self.phase.clone(); async move { @@ -751,7 +763,7 @@ impl, A: ConsensusApi + )) .await; } - }); + })); } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { @@ -788,6 +800,10 @@ impl, A: ConsensusApi + self.relay = certificate.get_data().relay; } + if let Some(timeout_task) = self.timeout_task.take() { + timeout_task.cancel().await; + } + self.event_stream .publish(HotShotEvent::ViewChange(self.next_view)) .await; @@ -817,7 +833,7 @@ impl, A: ConsensusApi + .await; } - async_spawn({ + self.timeout_task = Some(async_spawn({ let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; @@ -830,7 +846,7 @@ impl, A: ConsensusApi + )) .await; } - }); + })); return (None, self); } @@ -841,6 +857,9 @@ impl, A: ConsensusApi + && relay == self.relay && last_seen_certificate == self.phase { + if let Some(timeout_task) = self.timeout_task.take() { + timeout_task.cancel().await; + } self.relay += 1; match self.phase { ViewSyncPhase::None => { @@ -906,7 +925,7 @@ impl, A: ConsensusApi + } } - async_spawn({ + self.timeout_task = Some(async_spawn({ let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; @@ -919,7 +938,7 @@ impl, A: ConsensusApi + )) .await; } - }); + })); return (None, self); } From a505622ce24d66c33fb8f4ea1c46801422578ae7 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 12 Dec 2023 11:22:18 -0500 Subject: [PATCH 0520/1393] Timeout now counts as failure in test --- testing/src/overall_safety_task.rs | 47 ++++++++++++++++++++++++------ testing/tests/catchup.rs | 3 +- 2 files changed, 39 insertions(+), 11 deletions(-) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 0f276bea5c..d430624b6c 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -18,6 +18,7 @@ use hotshot_types::{ traits::node_implementation::NodeType, }; use snafu::Snafu; +use tracing::error; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, @@ -93,7 +94,7 @@ pub struct RoundResult { success_nodes: HashMap>, QuorumCertificate)>, /// Nodes that failed to commit this round - pub failed_nodes: HashMap>>>, + pub failed_nodes: HashMap>>, /// whether or not the round succeeded (for a custom defn of succeeded) pub status: ViewStatus, @@ -158,20 +159,21 @@ impl RoundCtx { pub fn insert_error_to_context( &mut self, view_number: TYPES::Time, + idx: usize, error: Arc>, ) { match self.round_results.entry(view_number) { - Entry::Occupied(mut o) => match o.get_mut().failed_nodes.entry(*view_number) { + Entry::Occupied(mut o) => match o.get_mut().failed_nodes.entry(idx as u64) { Entry::Occupied(mut o2) => { - o2.get_mut().push(error); + *o2.get_mut() = error; } Entry::Vacant(v) => { - v.insert(vec![error]); + v.insert(error); } }, Entry::Vacant(v) => { let mut round_result = RoundResult::default(); - round_result.failed_nodes.insert(*view_number, vec![error]); + round_result.failed_nodes.insert(idx as u64, error); v.insert(round_result); } } @@ -233,6 +235,12 @@ impl RoundResult { maybe_leaf } + pub fn check_if_failed(&mut self, threshold: usize, total_num_nodes: usize) -> bool { + let num_decided = self.success_nodes.len(); + let num_failed = self.failed_nodes.len(); + let remaining_nodes = total_num_nodes - (num_decided + num_failed); + remaining_nodes + num_decided >= threshold + } /// determines whether or not the round passes /// also do a safety check #[allow(clippy::too_many_arguments, clippy::let_unit_value)] @@ -443,7 +451,7 @@ impl OverallSafetyPropertiesDescription { if let Either::Left(Event { view_number, event }) = maybe_event { let key = match event { EventType::Error { error } => { - state.ctx.insert_error_to_context(view_number, error); + state.ctx.insert_error_to_context(view_number, idx, error); None } EventType::Decide { @@ -470,13 +478,12 @@ impl OverallSafetyPropertiesDescription { } } } - // TODO Emit this event in the consensus task once canceling the timeout task is implemented EventType::ReplicaViewTimeout { view_number } => { let error = Arc::new(HotShotError::::ViewTimeoutError { view_number, state: RoundTimedoutState::TestCollectRoundEventsTimedOut, }); - state.ctx.insert_error_to_context(view_number, error); + state.ctx.insert_error_to_context(view_number, idx, error); None } _ => return (None, state), @@ -514,7 +521,7 @@ impl OverallSafetyPropertiesDescription { } ViewStatus::Failed => { state.ctx.failed_views.insert(view_number); - if state.ctx.failed_views.len() >= self.num_failed_views { + if state.ctx.failed_views.len() > self.num_failed_views { state .test_event_stream .publish(GlobalTestEvent::ShutDown) @@ -542,6 +549,28 @@ impl OverallSafetyPropertiesDescription { } } } + else { + if view.check_if_failed(threshold, state.handles.len()) { + view.status = ViewStatus::Failed; + state.ctx.failed_views.insert(view_number); + if state.ctx.failed_views.len() > self.num_failed_views { + state + .test_event_stream + .publish(GlobalTestEvent::ShutDown) + .await; + return ( + Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::TooManyFailures { + got: state.ctx.failed_views.len(), + expected: num_failed_rounds_total, + }, + ))), + state, + ); + } + return (None, state); + } + } } diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 07def0a843..870db9da47 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -122,7 +122,6 @@ async fn test_catchup_web() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_catchup_one_node() { use std::time::Duration; @@ -182,7 +181,6 @@ async fn test_catchup_one_node() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_catchup_in_view_sync() { use std::time::Duration; @@ -228,6 +226,7 @@ async fn test_catchup_in_view_sync() { ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { check_leaf: true, + num_failed_views: 2, ..Default::default() }; From 20d123e8cd74db2f0f36766cc6ed7293167be3f6 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 12 Dec 2023 11:26:29 -0500 Subject: [PATCH 0521/1393] create config path when it DNE (#2196) --- hotshot/examples/infra/mod.rs | 2 +- orchestrator/src/config.rs | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 4b9e7bb2ea..ad3407b951 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -387,7 +387,7 @@ pub trait RunDA< error!("Sleeping for {start_delay_seconds} seconds before starting hotshot!"); async_sleep(Duration::from_secs(start_delay_seconds)).await; - error!("Starting hotshot!"); + error!("Starting HotShot example!"); let start = Instant::now(); let (mut event_stream, _streamid) = context.get_event_stream(FilterEvent::default()).await; diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 8049f4c294..ce69bd7d09 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -2,7 +2,6 @@ use hotshot_types::{ traits::{election::ElectionConfig, signature_key::SignatureKey}, ExecutionType, HotShotConfig, ValidatorConfig, }; -use std::fs; use std::{ env, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -10,6 +9,7 @@ use std::{ path::PathBuf, time::Duration, }; +use std::{fs, path::Path}; use surf_disco::Url; use thiserror::Error; use toml; @@ -106,6 +106,7 @@ impl NetworkConfig { /// /// If a file path is provided, the function will first attempt to load the `NetworkConfig` from the file. /// If the file does not exist or cannot be read, the function will fall back to retrieving the `NetworkConfig` from the orchestrator. + /// In this case, if the path to the file does not exist, it will be created. /// The retrieved `NetworkConfig` is then saved back to the file for future use. /// /// If no file path is provided, the function will directly retrieve the `NetworkConfig` from the orchestrator. @@ -138,11 +139,18 @@ impl NetworkConfig { Ok(config) => (config, NetworkConfigSource::File), Err(e) => { // fallback to orchestrator - error!("{e}"); + error!("{e}, falling back to orchestrator"); let config = client.get_config(client.identity.clone()).await; // save to file if we fell back + // ensure the directory containing the config file exists + if let Some(dir) = Path::new(&file).parent() { + if let Err(e) = fs::create_dir_all(dir) { + error!("Failed to recursively create path to config file: {e}") + } + } + if let Err(e) = config.to_file(file) { error!("{e}"); }; From fc7ddea8fe26d5a05ea350e5b3a5d35fc572b001 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 12 Dec 2023 12:00:55 -0500 Subject: [PATCH 0522/1393] don't return if failed safety check (#2198) --- task-impls/src/consensus.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 77e50ef3ce..19e6d3b3cf 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -559,7 +559,6 @@ impl, A: ConsensusApi + event: EventType::Error { error: Arc::new(e) }, }) .await; - return; } // Skip if both saftey and liveness checks fail. From f419f9e8801adf1c8eb352cdd6990a1c98479d68 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 12 Dec 2023 12:12:10 -0500 Subject: [PATCH 0523/1393] fixes and slacken test params --- task-impls/src/consensus.rs | 12 +++++----- testing/src/overall_safety_task.rs | 38 ++++++++++++------------------ testing/tests/basic.rs | 3 +++ testing/tests/catchup.rs | 7 +++++- testing/tests/timeout.rs | 2 ++ 5 files changed, 32 insertions(+), 30 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7f2224725f..9db23e20fe 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -993,12 +993,12 @@ impl, A: ConsensusApi + "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view ); - self.output_event_stream.publish(Event{ - view_number: self.cur_view, - event: EventType::ReplicaViewTimeout { - view_number: self.cur_view, - } - }).await; + self.output_event_stream + .publish(Event { + view_number: view, + event: EventType::ReplicaViewTimeout { view_number: view }, + }) + .await; let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index d430624b6c..21909bc6c6 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -18,7 +18,6 @@ use hotshot_types::{ traits::node_implementation::NodeType, }; use snafu::Snafu; -use tracing::error; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, @@ -31,28 +30,26 @@ use super::GlobalTestEvent; /// the status of a view #[derive(Debug, Clone)] -pub enum ViewStatus { +pub enum ViewStatus { /// success Ok, /// failure Failed, /// safety violation - Err(OverallSafetyTaskErr), + Err(OverallSafetyTaskErr), /// in progress InProgress, } /// possible errors #[derive(Snafu, Debug, Clone)] -pub enum OverallSafetyTaskErr { +pub enum OverallSafetyTaskErr { /// inconsistent txn nums InconsistentTxnsNum { map: HashMap }, /// too many failed views TooManyFailures { - /// expected number of failures - expected: usize, - /// actual number of failures - got: usize, + /// vec of failed views + failed_views: HashSet, }, /// not enough decides NotEnoughDecides { @@ -97,7 +94,7 @@ pub struct RoundResult { pub failed_nodes: HashMap>>, /// whether or not the round succeeded (for a custom defn of succeeded) - pub status: ViewStatus, + pub status: ViewStatus, /// NOTE: technically a map is not needed /// left one anyway for ease of viewing @@ -411,7 +408,7 @@ impl OverallSafetyPropertiesDescription { if state.ctx.successful_views.len() < num_successful_views { return ( Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::NotEnoughDecides { + OverallSafetyTaskErr::::NotEnoughDecides { got: state.ctx.successful_views.len(), expected: num_successful_views, }, @@ -425,9 +422,8 @@ impl OverallSafetyPropertiesDescription { { return ( Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::TooManyFailures { - got: state.ctx.failed_views.len(), - expected: num_failed_rounds_total, + OverallSafetyTaskErr::::TooManyFailures { + failed_views: state.ctx.failed_views.clone(), }, ))), state, @@ -528,9 +524,8 @@ impl OverallSafetyPropertiesDescription { .await; return ( Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::TooManyFailures { - got: state.ctx.failed_views.len(), - expected: num_failed_rounds_total, + OverallSafetyTaskErr::::TooManyFailures { + failed_views: state.ctx.failed_views.clone(), }, ))), state, @@ -549,8 +544,7 @@ impl OverallSafetyPropertiesDescription { } } } - else { - if view.check_if_failed(threshold, state.handles.len()) { + else if view.check_if_failed(threshold, state.handles.len()) { view.status = ViewStatus::Failed; state.ctx.failed_views.insert(view_number); if state.ctx.failed_views.len() > self.num_failed_views { @@ -560,9 +554,8 @@ impl OverallSafetyPropertiesDescription { .await; return ( Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::TooManyFailures { - got: state.ctx.failed_views.len(), - expected: num_failed_rounds_total, + OverallSafetyTaskErr::::TooManyFailures { + failed_views: state.ctx.failed_views.clone(), }, ))), state, @@ -570,7 +563,6 @@ impl OverallSafetyPropertiesDescription { } return (None, state); } - } } @@ -619,7 +611,7 @@ impl OverallSafetyPropertiesDescription { /// overall types for safety task pub type OverallSafetyTaskTypes = HSTWithEventAndMessage< - OverallSafetyTaskErr, + OverallSafetyTaskErr, GlobalTestEvent, ChannelStream, (usize, Either, HotShotEvent>), diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 26a16a20b1..8506891404 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -108,7 +108,10 @@ async fn test_with_failures_half_f() { metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(5, dead_nodes)], }; + + // TODO: this should only have 3 failures for each down leader, investigate why it fails additional views metadata.overall_safety_properties.num_failed_views = 6; + metadata.overall_safety_properties.num_successful_views = 22; metadata .gen_launcher::(0) .launch() diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 870db9da47..b376fee0cd 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -48,6 +48,7 @@ async fn test_catchup() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + num_failed_views: 25, check_leaf: true, ..Default::default() }; @@ -104,6 +105,7 @@ async fn test_catchup_web() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + num_failed_views: 25, check_leaf: true, ..Default::default() }; @@ -122,6 +124,7 @@ async fn test_catchup_web() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_catchup_one_node() { use std::time::Duration; @@ -160,6 +163,7 @@ async fn test_catchup_one_node() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + num_failed_views: 25, check_leaf: true, ..Default::default() }; @@ -181,6 +185,7 @@ async fn test_catchup_one_node() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_catchup_in_view_sync() { use std::time::Duration; @@ -226,7 +231,7 @@ async fn test_catchup_in_view_sync() { ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { check_leaf: true, - num_failed_views: 2, + num_failed_views: 25, ..Default::default() }; diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 1dda55c9c3..43b1313e9a 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -39,6 +39,7 @@ async fn test_timeout_web() { metadata.timing_data = timing_data; metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + num_failed_views: 25, num_successful_views: 25, ..Default::default() }; @@ -106,6 +107,7 @@ async fn test_timeout_libp2p() { metadata.timing_data = timing_data; metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + num_failed_views: 25, num_successful_views: 25, ..Default::default() }; From a0404b1b67de4293860af49509a567db56dbbc6f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 12 Dec 2023 13:09:29 -0500 Subject: [PATCH 0524/1393] log when we can't propose due to missing vid info --- task-impls/src/consensus.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 19e6d3b3cf..623d1e4205 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1112,6 +1112,8 @@ impl, A: ConsensusApi + .await; self.payload_commitment_and_metadata = None; return true; + } else { + warn!("Cannot propose because we don't have the VID payload commitment and metadata"); } debug!("Self block was None"); false From 81b91ce32c00c38e4ec760d0b04ebdee93c7f56a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 12 Dec 2023 13:58:20 -0500 Subject: [PATCH 0525/1393] fix propose --- task-impls/src/consensus.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 623d1e4205..bc798a7e10 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -994,6 +994,11 @@ impl, A: ConsensusApi + } HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, metadata) => { self.payload_commitment_and_metadata = Some((payload_commitment, metadata)); + // let high_qc = self.consensus.read().await.high_qc.clone(); + // let leader_view = high_qc.get_view_number() + 1; + // if self.quorum_membership.get_leader(leader_view) == self.public_key { + // self.publish_proposal_if_able(high_qc, leader_view, None).await; + // } } _ => {} } From d5479287c25eeee16150c135ba366af4cb56639f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 12 Dec 2023 14:00:09 -0500 Subject: [PATCH 0526/1393] fix lint --- task-impls/src/consensus.rs | 3 +-- types/src/traits/block_contents.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 623d1e4205..aebd4ac8a3 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1112,9 +1112,8 @@ impl, A: ConsensusApi + .await; self.payload_commitment_and_metadata = None; return true; - } else { - warn!("Cannot propose because we don't have the VID payload commitment and metadata"); } + warn!("Cannot propose because we don't have the VID payload commitment and metadata"); debug!("Self block was None"); false } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 4054fdc447..6ed52e0533 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -100,7 +100,7 @@ pub fn vid_commitment( /// Computes the (empty) genesis VID commitment /// The number of storage nodes does not do anything, unless in the future we add fake transactions /// to the genesis payload. -/// +/// /// In that case, the payloads may mismatch and cause problems. #[must_use] pub fn genesis_vid_commitment() -> ::Commit { From 464b56d739e74ec4a0740f15fffa01c223c7abc2 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 12 Dec 2023 14:15:41 -0500 Subject: [PATCH 0527/1393] propose if we can with payload commitment and metadata --- task-impls/src/consensus.rs | 13 ++++++------- types/src/traits/block_contents.rs | 2 +- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index bc798a7e10..7ebebfb849 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -994,11 +994,11 @@ impl, A: ConsensusApi + } HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, metadata) => { self.payload_commitment_and_metadata = Some((payload_commitment, metadata)); - // let high_qc = self.consensus.read().await.high_qc.clone(); - // let leader_view = high_qc.get_view_number() + 1; - // if self.quorum_membership.get_leader(leader_view) == self.public_key { - // self.publish_proposal_if_able(high_qc, leader_view, None).await; - // } + let high_qc = self.consensus.read().await.high_qc.clone(); + let leader_view = high_qc.get_view_number() + 1; + if self.quorum_membership.get_leader(leader_view) == self.public_key { + self.publish_proposal_if_able(high_qc, leader_view, None).await; + } } _ => {} } @@ -1117,9 +1117,8 @@ impl, A: ConsensusApi + .await; self.payload_commitment_and_metadata = None; return true; - } else { - warn!("Cannot propose because we don't have the VID payload commitment and metadata"); } + warn!("Cannot propose because we don't have the VID payload commitment and metadata"); debug!("Self block was None"); false } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 4054fdc447..6ed52e0533 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -100,7 +100,7 @@ pub fn vid_commitment( /// Computes the (empty) genesis VID commitment /// The number of storage nodes does not do anything, unless in the future we add fake transactions /// to the genesis payload. -/// +/// /// In that case, the payloads may mismatch and cause problems. #[must_use] pub fn genesis_vid_commitment() -> ::Commit { From a3de0d7e77e377be7620bd25222f66a984df5319 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 12 Dec 2023 12:30:01 -0800 Subject: [PATCH 0528/1393] Reduce num_successful_views, modify logging --- task-impls/src/consensus.rs | 11 +++++++---- testing/tests/basic.rs | 2 ++ types/src/traits/block_contents.rs | 2 +- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 77e50ef3ce..1933edcd0b 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1009,10 +1009,13 @@ impl, A: ConsensusApi + timeout_certificate: Option>, ) -> bool { if self.quorum_membership.get_leader(view) != self.public_key { - error!( - "Somehow we formed a QC but are not the leader for the next view {:?}", - view - ); + // This is expected for view 1, so skipping the logging. + if view != TYPES::Time::new(1) { + error!( + "Somehow we formed a QC but are not the leader for the next view {:?}", + view + ); + } return false; } diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 26a16a20b1..83292894ee 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -109,6 +109,8 @@ async fn test_with_failures_half_f() { node_changes: vec![(5, dead_nodes)], }; metadata.overall_safety_properties.num_failed_views = 6; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 22; metadata .gen_launcher::(0) .launch() diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 4054fdc447..6ed52e0533 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -100,7 +100,7 @@ pub fn vid_commitment( /// Computes the (empty) genesis VID commitment /// The number of storage nodes does not do anything, unless in the future we add fake transactions /// to the genesis payload. -/// +/// /// In that case, the payloads may mismatch and cause problems. #[must_use] pub fn genesis_vid_commitment() -> ::Commit { From 1d7841cd20dcc94bb726e901d6018748b3fccdd9 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 12 Dec 2023 15:35:35 -0800 Subject: [PATCH 0529/1393] Remove ignore tag --- testing/tests/consensus_task.rs | 1 - testing/tests/network_task.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 208ed914d1..ca93ab956e 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -85,7 +85,6 @@ async fn build_vote( tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_consensus_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index b3c91eea9d..a6fe1cf322 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -16,7 +16,6 @@ use std::{collections::HashMap, marker::PhantomData}; tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_network_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::{block_types::TestTransaction, task_helpers::build_system_handle}; From bddc07f815a4c5d572ae460201438c5dba3f79bf Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 12 Dec 2023 18:18:28 -0800 Subject: [PATCH 0530/1393] Fix network test --- task-impls/src/consensus.rs | 3 ++- task-impls/src/harness.rs | 30 ++++++++++++--------- testing/tests/network_task.rs | 50 +++++++++++++++-------------------- 3 files changed, 41 insertions(+), 42 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 74037feb6c..98ed988b46 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -997,7 +997,8 @@ impl, A: ConsensusApi + let high_qc = self.consensus.read().await.high_qc.clone(); let leader_view = high_qc.get_view_number() + 1; if self.quorum_membership.get_leader(leader_view) == self.public_key { - self.publish_proposal_if_able(high_qc, leader_view, None).await; + self.publish_proposal_if_able(high_qc, leader_view, None) + .await; } } _ => {} diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 277f878312..eadcd2ca7f 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -94,19 +94,25 @@ pub fn handle_event( std::option::Option, TestHarnessState, ) { - assert!( - state.expected_output.contains_key(&event), - "Got an unexpected event: {event:?}", - ); - let num_expected = state.expected_output.get_mut(&event).unwrap(); - if *num_expected == 1 { - state.expected_output.remove(&event); - } else { - *num_expected -= 1; - } + // Skip the output check if we are not expecting any output, to avoid the failure in case the + // shutdown signal arrives later than the new event. + if !state.expected_output.is_empty() { + assert!( + state.expected_output.contains_key(&event), + "Got an unexpected event: {event:?}", + ); + + let num_expected = state.expected_output.get_mut(&event).unwrap(); + if *num_expected == 1 { + state.expected_output.remove(&event); + } else { + *num_expected -= 1; + } - if state.expected_output.is_empty() { - return (Some(HotShotTaskCompleted::ShutDown), state); + if state.expected_output.is_empty() { + return (Some(HotShotTaskCompleted::ShutDown), state); + } } + (None, state) } diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index a6fe1cf322..e2c9ecbbde 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -8,6 +8,7 @@ use hotshot_types::{ data::{DAProposal, VidSchemeTrait, ViewNumber}, traits::{consensus_api::ConsensusApi, state::ConsensusTime}, }; +use sha2::{Digest, Sha256}; use std::{collections::HashMap, marker::PhantomData}; #[cfg(test)] @@ -18,7 +19,7 @@ use std::{collections::HashMap, marker::PhantomData}; #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_network_task() { use hotshot_task_impls::harness::run_harness; - use hotshot_testing::{block_types::TestTransaction, task_helpers::build_system_handle}; + use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::{data::VidDisperse, message::Proposal}; async_compatibility_layer::logging::setup_logging(); @@ -35,14 +36,14 @@ async fn test_network_task() { let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(0)); - let transactions = vec![TestTransaction(vec![0])]; - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let encoded_transactions = Vec::new(); + let encoded_transactions_hash = Sha256::digest(&encoded_transactions); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; let signature = ::SignatureKey::sign( api.private_key(), - payload_commitment.as_ref(), + &encoded_transactions_hash, ); let da_proposal = Proposal { data: DAProposal { @@ -55,7 +56,7 @@ async fn test_network_task() { }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; - let da_vid_disperse_inner = VidDisperse::from_membership( + let vid_disperse_inner = VidDisperse::from_membership( da_proposal.data.view_number, vid_disperse, &quorum_membership.clone().into(), @@ -63,8 +64,8 @@ async fn test_network_task() { // TODO for now reuse the same block payload commitment and signature as DA committee // https://github.com/EspressoSystems/jellyfish/issues/369 - let da_vid_disperse = Proposal { - data: da_vid_disperse_inner.clone(), + let vid_proposal = Proposal { + data: vid_disperse_inner.clone(), signature: da_proposal.signature.clone(), _pd: PhantomData, }; @@ -80,14 +81,11 @@ async fn test_network_task() { ViewNumber::new(2), )); input.push(HotShotEvent::BlockReady( - da_vid_disperse_inner.clone(), + vid_disperse_inner.clone(), ViewNumber::new(2), )); input.push(HotShotEvent::DAProposalSend(da_proposal.clone(), pub_key)); - input.push(HotShotEvent::VidDisperseSend( - da_vid_disperse.clone(), - pub_key, - )); + input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); input.push(HotShotEvent::QuorumProposalSend( quorum_proposal.clone(), pub_key, @@ -96,25 +94,22 @@ async fn test_network_task() { input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 2); - output.insert( - HotShotEvent::DAProposalSend(da_proposal.clone(), pub_key), - 2, // 2 occurrences: 1 from `input`, 1 from the DA task - ); output.insert( HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), - 2, + 2, // 2 occurrences: 1 from `input`, 1 from the transactions task ); output.insert( - HotShotEvent::VidDisperseRecv(da_vid_disperse.clone(), pub_key), - 1, + HotShotEvent::BlockReady(vid_disperse_inner, ViewNumber::new(2)), + 2, // 2 occurrences: 1 from `input`, 1 from the VID task ); output.insert( - HotShotEvent::VidDisperseSend(da_vid_disperse, pub_key), + HotShotEvent::DAProposalSend(da_proposal.clone(), pub_key), 2, // 2 occurrences: 1 from `input`, 1 from the DA task ); - output.insert(HotShotEvent::Timeout(ViewNumber::new(1)), 1); - output.insert(HotShotEvent::Timeout(ViewNumber::new(2)), 1); - + output.insert( + HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), + 1, + ); // Only one output from the input. // The consensus task will fail to send a second proposal, like the DA task does, due to the // view number check in `publish_proposal_if_able` in consensus.rs, and we will see an error in @@ -124,20 +119,17 @@ async fn test_network_task() { HotShotEvent::QuorumProposalSend(quorum_proposal.clone(), pub_key), 1, ); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 2); output.insert( HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), 1, ); - output.insert( - HotShotEvent::BlockReady(da_vid_disperse_inner, ViewNumber::new(2)), - 2, - ); - output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); output.insert( HotShotEvent::QuorumProposalRecv(quorum_proposal, pub_key), 1, ); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 2); + output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); + output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, _| async { task_runner }; From a14a34e9f17e41bf259db5abe224701716f5735c Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 12 Dec 2023 23:39:10 -0800 Subject: [PATCH 0531/1393] Fix signatures, simplify harness, remove unused consts --- task-impls/src/harness.rs | 32 +++++++++++++----------------- testing/tests/network_task.rs | 20 ++++++++++++------- types/src/traits/block_contents.rs | 7 ------- 3 files changed, 27 insertions(+), 32 deletions(-) diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index eadcd2ca7f..9c85c54a07 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -94,24 +94,20 @@ pub fn handle_event( std::option::Option, TestHarnessState, ) { - // Skip the output check if we are not expecting any output, to avoid the failure in case the - // shutdown signal arrives later than the new event. - if !state.expected_output.is_empty() { - assert!( - state.expected_output.contains_key(&event), - "Got an unexpected event: {event:?}", - ); - - let num_expected = state.expected_output.get_mut(&event).unwrap(); - if *num_expected == 1 { - state.expected_output.remove(&event); - } else { - *num_expected -= 1; - } - - if state.expected_output.is_empty() { - return (Some(HotShotTaskCompleted::ShutDown), state); - } + assert!( + state.expected_output.contains_key(&event), + "Got an unexpected event: {event:?}", + ); + + let num_expected = state.expected_output.get_mut(&event).unwrap(); + if *num_expected == 1 { + state.expected_output.remove(&event); + } else { + *num_expected -= 1; + } + + if state.expected_output.is_empty() { + return (Some(HotShotTaskCompleted::ShutDown), state); } (None, state) diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index e2c9ecbbde..40bc8c6760 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -35,23 +35,29 @@ async fn test_network_task() { // quorum membership for VID share distribution let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); - let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(0)); let encoded_transactions = Vec::new(); let encoded_transactions_hash = Sha256::digest(&encoded_transactions); + let da_signature = + ::SignatureKey::sign( + api.private_key(), + &encoded_transactions_hash, + ); + let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(2)); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; - let signature = + let vid_signature = ::SignatureKey::sign( api.private_key(), - &encoded_transactions_hash, + payload_commitment.as_ref(), ); + let da_proposal = Proposal { data: DAProposal { encoded_transactions: encoded_transactions.clone(), metadata: (), view_number: ViewNumber::new(2), }, - signature, + signature: da_signature, _pd: PhantomData, }; let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; @@ -66,7 +72,7 @@ async fn test_network_task() { // https://github.com/EspressoSystems/jellyfish/issues/369 let vid_proposal = Proposal { data: vid_disperse_inner.clone(), - signature: da_proposal.signature.clone(), + signature: vid_signature, _pd: PhantomData, }; @@ -75,6 +81,7 @@ async fn test_network_task() { let mut output = HashMap::new(); input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); + input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); input.push(HotShotEvent::TransactionsSequenced( encoded_transactions.clone(), (), @@ -90,7 +97,6 @@ async fn test_network_task() { quorum_proposal.clone(), pub_key, )); - input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 2); @@ -108,7 +114,7 @@ async fn test_network_task() { ); output.insert( HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), - 1, + 2, // 2 occurrences: 1 from `input`, 1 from the VID task ); // Only one output from the input. // The consensus task will fail to send a second proposal, like the DA task does, due to the diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 6ed52e0533..54e8d8b67b 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -13,13 +13,6 @@ use std::{ hash::Hash, }; -// TODO -/// Number of storage nodes for VID initiation. -pub const NUM_STORAGE_NODES: usize = 8; -// TODO -/// Number of chunks for VID initiation. -pub const NUM_CHUNKS: usize = 8; - /// Abstraction over any type of transaction. Used by [`BlockPayload`]. pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash From 9fee8586932140190c9bcfada3a2ba209a47009d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 13 Dec 2023 11:52:24 -0500 Subject: [PATCH 0532/1393] lint --- task-impls/src/consensus.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index dd6ae79f75..900821ece6 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1007,7 +1007,8 @@ impl, A: ConsensusApi + let high_qc = self.consensus.read().await.high_qc.clone(); let leader_view = high_qc.get_view_number() + 1; if self.quorum_membership.get_leader(leader_view) == self.public_key { - self.publish_proposal_if_able(high_qc, leader_view, None).await; + self.publish_proposal_if_able(high_qc, leader_view, None) + .await; } } _ => {} From 04cb62e08434b8f40f2b8b396abc79d96a0ba215 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 13 Dec 2023 12:27:26 -0500 Subject: [PATCH 0533/1393] fix tokio --- task-impls/src/consensus.rs | 3 +++ task-impls/src/view_sync.rs | 18 +++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 900821ece6..471dcc9d5a 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -342,7 +342,10 @@ impl, A: ConsensusApi + ); // cancel the old timeout task if let Some(timeout_task) = self.timeout_task.take() { + #[cfg(async_executor_impl = "async-std")] timeout_task.cancel().await; + #[cfg(async_executor_impl = "tokio")] + timeout_task.abort(); } // Remove old certs, we won't vote on past views diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index d4d9c82591..306f27ac01 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -4,7 +4,6 @@ use crate::{ vote::{spawn_vote_accumulator, AccumulatorInfo}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use async_std::task::JoinHandle; use either::Either; use futures::FutureExt; use hotshot_task::{ @@ -22,6 +21,8 @@ use hotshot_types::{ vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use hotshot_task::global_registry::GlobalRegistry; use hotshot_types::{ message::GeneralConsensusMessage, @@ -35,7 +36,10 @@ use hotshot_types::{ }; use snafu::Snafu; use std::{collections::HashMap, sync::Arc, time::Duration}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument}; + #[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] /// Phases of view sync pub enum ViewSyncPhase { @@ -672,7 +676,10 @@ impl, A: ConsensusApi + } if let Some(timeout_task) = self.timeout_task.take() { + #[cfg(async_executor_impl = "async-std")] timeout_task.cancel().await; + #[cfg(async_executor_impl = "tokio")] + timeout_task.abort(); } self.timeout_task = Some(async_spawn({ @@ -747,7 +754,10 @@ impl, A: ConsensusApi + .await; if let Some(timeout_task) = self.timeout_task.take() { + #[cfg(async_executor_impl = "async-std")] timeout_task.cancel().await; + #[cfg(async_executor_impl = "tokio")] + timeout_task.abort(); } self.timeout_task = Some(async_spawn({ let stream = self.event_stream.clone(); @@ -801,7 +811,10 @@ impl, A: ConsensusApi + } if let Some(timeout_task) = self.timeout_task.take() { + #[cfg(async_executor_impl = "async-std")] timeout_task.cancel().await; + #[cfg(async_executor_impl = "tokio")] + timeout_task.abort(); } self.event_stream @@ -858,7 +871,10 @@ impl, A: ConsensusApi + && last_seen_certificate == self.phase { if let Some(timeout_task) = self.timeout_task.take() { + #[cfg(async_executor_impl = "async-std")] timeout_task.cancel().await; + #[cfg(async_executor_impl = "tokio")] + timeout_task.abort(); } self.relay += 1; match self.phase { From c4dc2d2787ff9b30b24da76b426f6fe4af2aa497 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 13 Dec 2023 13:15:34 -0500 Subject: [PATCH 0534/1393] increase num failures on many tests --- testing/tests/basic.rs | 1 + testing/tests/combined_network.rs | 4 ++++ testing/tests/web_server.rs | 1 + 3 files changed, 6 insertions(+) diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 8506891404..8a79625ec9 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -61,6 +61,7 @@ async fn test_with_failures_one() { node_changes: vec![(5, dead_nodes)], }; metadata.overall_safety_properties.num_failed_views = 2; + metadata.overall_safety_properties.num_successful_views = 25; metadata .gen_launcher::(0) .launch() diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index b78f48a39e..f392a512c2 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -32,6 +32,7 @@ async fn test_combined_network() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { + num_failed_views: 33, num_successful_views: 25, ..Default::default() }, @@ -73,6 +74,7 @@ async fn test_combined_network_webserver_crash() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { + num_failed_views: 33, num_successful_views: 35, ..Default::default() }, @@ -128,6 +130,7 @@ async fn test_combined_network_reup() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { + num_failed_views: 33, num_successful_views: 35, ..Default::default() }, @@ -187,6 +190,7 @@ async fn test_combined_network_half_dc() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { + num_failed_views: 33, num_successful_views: 35, ..Default::default() }, diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index 037cddcb3b..d99501faec 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -28,6 +28,7 @@ async fn web_server_network() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { + num_failed_views: 33, num_successful_views: 35, ..Default::default() }, From 0c51b9488fe27bc630d7c18889b4e79b82b7a1e3 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 13 Dec 2023 10:23:16 -0800 Subject: [PATCH 0535/1393] Move genesis_vid_commitment to testing --- hotshot/src/traits/storage/memory_storage.rs | 7 ++----- testing/src/block_types.rs | 14 ++++++++++++-- types/src/traits/block_contents.rs | 10 ---------- 3 files changed, 14 insertions(+), 17 deletions(-) diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 94ffdef83d..76d78c72a0 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -111,16 +111,13 @@ mod test { use super::*; use commit::Committable; use hotshot_testing::{ - block_types::{TestBlockHeader, TestBlockPayload}, + block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, node_types::TestTypes, }; use hotshot_types::{ data::{fake_commitment, genesis_proposer_id, Leaf}, simple_certificate::QuorumCertificate, - traits::{ - block_contents::genesis_vid_commitment, node_implementation::NodeType, - state::ConsensusTime, - }, + traits::{node_implementation::NodeType, state::ConsensusTime}, }; use std::marker::PhantomData; use tracing::instrument; diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index 7c50040583..fe61bb8ccd 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -5,9 +5,9 @@ use std::{ use commit::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ - data::{BlockError, VidCommitment}, + data::{BlockError, VidCommitment, VidScheme, VidSchemeTrait}, traits::{ - block_contents::{genesis_vid_commitment, BlockHeader, Transaction}, + block_contents::{vid_commitment, BlockHeader, Transaction}, state::TestableBlock, BlockPayload, }, @@ -157,6 +157,16 @@ impl BlockPayload for TestBlockPayload { } } +/// Computes the (empty) genesis VID commitment +/// The number of storage nodes does not do anything, unless in the future we add fake transactions +/// to the genesis payload. +/// +/// In that case, the payloads may mismatch and cause problems. +#[must_use] +pub fn genesis_vid_commitment() -> ::Commit { + vid_commitment(&vec![], 8) +} + /// A [`BlockHeader`] that commits to [`TestBlockPayload`]. #[derive(PartialEq, Eq, Hash, Clone, Debug, Deserialize, Serialize)] pub struct TestBlockHeader { diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 54e8d8b67b..0c05108b00 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -90,16 +90,6 @@ pub fn vid_commitment( vid.commit_only(encoded_transactions).unwrap() } -/// Computes the (empty) genesis VID commitment -/// The number of storage nodes does not do anything, unless in the future we add fake transactions -/// to the genesis payload. -/// -/// In that case, the payloads may mismatch and cause problems. -#[must_use] -pub fn genesis_vid_commitment() -> ::Commit { - vid_commitment(&vec![], 8) -} - /// Header of a block, which commits to a [`BlockPayload`]. pub trait BlockHeader: Serialize + Clone + Debug + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned + Committable From 4e0897b0afe5d989a9378ab300f36b492c96abe0 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 13 Dec 2023 13:39:23 -0500 Subject: [PATCH 0536/1393] reduce logging level in many places --- task-impls/src/consensus.rs | 12 +++++++----- task-impls/src/da.rs | 2 +- task-impls/src/transactions.rs | 4 ++-- task-impls/src/vid.rs | 4 ++-- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7ebebfb849..0e9a4b12f9 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -871,7 +871,9 @@ impl, A: ConsensusApi + .publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) .await { - warn!("Wasn't able to publish proposal"); + debug!( + "Wasn't able to publish proposal when QC was formed, still may publish" + ); } } } @@ -906,7 +908,7 @@ impl, A: ConsensusApi + return; } - info!("VID disperse data is not more than one view older."); + debug!("VID disperse data is not more than one view older."); let payload_commitment = disperse.data.payload_commitment; // Check whether the sender is the right leader for this view @@ -997,7 +999,8 @@ impl, A: ConsensusApi + let high_qc = self.consensus.read().await.high_qc.clone(); let leader_view = high_qc.get_view_number() + 1; if self.quorum_membership.get_leader(leader_view) == self.public_key { - self.publish_proposal_if_able(high_qc, leader_view, None).await; + self.publish_proposal_if_able(high_qc, leader_view, None) + .await; } } _ => {} @@ -1118,8 +1121,7 @@ impl, A: ConsensusApi + self.payload_commitment_and_metadata = None; return true; } - warn!("Cannot propose because we don't have the VID payload commitment and metadata"); - debug!("Self block was None"); + debug!("Cannot propose because we don't have the VID payload commitment and metadata"); false } } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index cbf438484b..662761b1aa 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -218,7 +218,7 @@ impl, A: ConsensusApi + } if *view - *self.cur_view > 1 { - error!("View changed by more than 1 going to view {:?}", view); + warn!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 2693934978..bb64562052 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -170,7 +170,7 @@ impl, A: ConsensusApi + } if *view - *self.cur_view > 1 { - error!("View changed by more than 1 going to view {:?}", view); + warn!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; @@ -283,7 +283,7 @@ impl, A: ConsensusApi + match result { Err(_) => { // Fall through below to updating new block - error!( + debug!( "propose_max_round_time passed, sending transactions we have so far" ); } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 8e72414743..47c3f2e783 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -27,7 +27,7 @@ use hotshot_task::event_stream::EventStream; use snafu::Snafu; use std::marker::PhantomData; use std::sync::Arc; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, instrument, warn}; #[derive(Snafu, Debug)] /// Error type for consensus tasks @@ -133,7 +133,7 @@ impl, A: ConsensusApi + } if *view - *self.cur_view > 1 { - error!("View changed by more than 1 going to view {:?}", view); + warn!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; From e124010582064ed7713e8e514194d9dbe13442e1 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 13 Dec 2023 14:10:55 -0500 Subject: [PATCH 0537/1393] reduce view_sync logging --- task-impls/src/view_sync.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index ea0b170ced..d003365330 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -34,7 +34,7 @@ use hotshot_types::{ }; use snafu::Snafu; use std::{collections::HashMap, sync::Arc, time::Duration}; -use tracing::{debug, error, info, instrument}; +use tracing::{debug, error, info, instrument, warn}; #[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] /// Phases of view sync pub enum ViewSyncPhase { @@ -620,7 +620,7 @@ impl, A: ConsensusApi + // Ignore certificate if it is for an older round if certificate.get_view_number() < self.next_view { - error!("We're already in a higher round"); + warn!("We're already in a higher round"); return (None, self); } @@ -671,7 +671,7 @@ impl, A: ConsensusApi + let phase = self.phase.clone(); async move { async_sleep(self.view_sync_timeout).await; - error!("Vote sending timed out in ViewSyncCertificateRecv"); + debug!("Vote sending timed out in ViewSyncCertificateRecv"); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -688,7 +688,7 @@ impl, A: ConsensusApi + // Ignore certificate if it is for an older round if certificate.get_view_number() < self.next_view { - error!("We're already in a higher round"); + warn!("We're already in a higher round"); return (None, self); } @@ -742,7 +742,7 @@ impl, A: ConsensusApi + let phase = self.phase.clone(); async move { async_sleep(self.view_sync_timeout).await; - error!("Vote sending timed out in ViewSyncCertificateRecv"); + debug!("Vote sending timed out in ViewSyncCertificateRecv"); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -759,7 +759,7 @@ impl, A: ConsensusApi + // Ignore certificate if it is for an older round if certificate.get_view_number() < self.next_view { - error!("We're already in a higher round"); + warn!("We're already in a higher round"); return (None, self); } @@ -821,7 +821,7 @@ impl, A: ConsensusApi + let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; - error!("Vote sending timed out in ViewSyncTrigger"); + debug!("Vote sending timed out in ViewSyncTrigger"); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -910,7 +910,7 @@ impl, A: ConsensusApi + let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; - error!("Vote sending timed out in ViewSyncTimeout"); + debug!("Vote sending timed out in ViewSyncTimeout"); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), From 968a22676c04ed4ffd094d38e2bee085b9e8c4f8 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 13 Dec 2023 14:12:41 -0500 Subject: [PATCH 0538/1393] info instead of debug --- task-impls/src/view_sync.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index d003365330..babf2e0d34 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -671,7 +671,7 @@ impl, A: ConsensusApi + let phase = self.phase.clone(); async move { async_sleep(self.view_sync_timeout).await; - debug!("Vote sending timed out in ViewSyncCertificateRecv"); + info!("Vote sending timed out in ViewSyncCertificateRecv"); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -742,7 +742,7 @@ impl, A: ConsensusApi + let phase = self.phase.clone(); async move { async_sleep(self.view_sync_timeout).await; - debug!("Vote sending timed out in ViewSyncCertificateRecv"); + info!("Vote sending timed out in ViewSyncCertificateRecv"); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -821,7 +821,7 @@ impl, A: ConsensusApi + let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; - debug!("Vote sending timed out in ViewSyncTrigger"); + info!("Vote sending timed out in ViewSyncTrigger"); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -910,7 +910,7 @@ impl, A: ConsensusApi + let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; - debug!("Vote sending timed out in ViewSyncTimeout"); + info!("Vote sending timed out in ViewSyncTimeout"); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), From 1abc4920dd9952a1eee2e1ba2e922db0075726dd Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 13 Dec 2023 20:15:51 -0500 Subject: [PATCH 0539/1393] [Stability] Config file changes (#2219) * move config file creation * update config to JSON --- orchestrator/src/config.rs | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index ce69bd7d09..67ae88e5c3 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -66,11 +66,13 @@ pub enum NetworkConfigError { #[error("Failed to read NetworkConfig from file")] ReadFromFileError(std::io::Error), #[error("Failed to deserialize loaded NetworkConfig")] - DeserializeError(bincode::Error), + DeserializeError(serde_json::Error), #[error("Failed to write NetworkConfig to file")] WriteToFileError(std::io::Error), #[error("Failed to serialize NetworkConfig")] - SerializeError(bincode::Error), + SerializeError(serde_json::Error), + #[error("Failed to recursively create path to NetworkConfig")] + FailedToCreatePath(std::io::Error), } #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] @@ -144,13 +146,6 @@ impl NetworkConfig { let config = client.get_config(client.identity.clone()).await; // save to file if we fell back - // ensure the directory containing the config file exists - if let Some(dir) = Path::new(&file).parent() { - if let Err(e) = fs::create_dir_all(dir) { - error!("Failed to recursively create path to config file: {e}") - } - } - if let Err(e) = config.to_file(file) { error!("{e}"); }; @@ -201,7 +196,7 @@ impl NetworkConfig { }; // deserialize - match bincode::deserialize(&data) { + match serde_json::from_slice(&data) { Ok(data) => Ok(data), Err(e) => Err(NetworkConfigError::DeserializeError(e)), } @@ -209,7 +204,7 @@ impl NetworkConfig { /// Serializes the `NetworkConfig` and writes it to a file. /// - /// This function takes a file path as a string, serializes the `NetworkConfig` into binary format using `bincode`, and then writes the serialized data to the file. + /// This function takes a file path as a string, serializes the `NetworkConfig` into JSON format using `serde_json` and then writes the serialized data to the file. /// /// # Arguments /// @@ -231,8 +226,15 @@ impl NetworkConfig { /// config.to_file(file).unwrap(); /// ``` pub fn to_file(&self, file: String) -> Result<(), NetworkConfigError> { + // ensure the directory containing the config file exists + if let Some(dir) = Path::new(&file).parent() { + if let Err(e) = fs::create_dir_all(dir) { + return Err(NetworkConfigError::FailedToCreatePath(e)); + } + } + // serialize - let serialized = match bincode::serialize(self) { + let serialized = match serde_json::to_string_pretty(self) { Ok(data) => data, Err(e) => { return Err(NetworkConfigError::SerializeError(e)); From 5b83cb2cdfbf5605c7441fa48556c46db45062cc Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 13 Dec 2023 20:30:18 -0500 Subject: [PATCH 0540/1393] [Stability] Cancel polling for old votes (#2223) * cancel polling for votes properly * fix view sync cert cancel * revert timeout vote changes * cancel poll for proposal during timeout --- hotshot/src/tasks/mod.rs | 4 ---- task-impls/src/consensus.rs | 30 +++++++++++++++++++++++++++++- task-impls/src/view_sync.rs | 19 +++++++++++++++++++ 3 files changed, 48 insertions(+), 5 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index f150319b2b..49d71c0843 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -246,10 +246,6 @@ pub async fn add_consensus_task>( .quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) .await; - consensus_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForProposal(1)) - .await; let filter = FilterEvent(Arc::new(consensus_event_filter)); let consensus_name = "Consensus Task"; let consensus_event_handler = HandleEvent(Arc::new( diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 57f7d04f91..7b06804e53 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -414,7 +414,7 @@ impl, A: ConsensusApi + match event { HotShotEvent::QuorumProposalRecv(proposal, sender) => { debug!( - "Receved Quorum Propsoal for view {}", + "Receved Quorum Proposal for view {}", *proposal.data.view_number ); @@ -840,6 +840,13 @@ impl, A: ConsensusApi + debug!("QC Formed event happened!"); if let either::Right(qc) = cert.clone() { + // cancel poll for votes + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *qc.view_number, + )) + .await; + debug!( "Attempting to publish proposal after forming a TC for view {}", *qc.view_number @@ -861,6 +868,13 @@ impl, A: ConsensusApi + let mut consensus = self.consensus.write().await; consensus.high_qc = qc.clone(); + // cancel poll for votes + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *qc.view_number, + )) + .await; + drop(consensus); debug!( "Attempting to publish proposal after forming a QC for view {}", @@ -885,6 +899,10 @@ impl, A: ConsensusApi + .inject_consensus_info(ConsensusIntentEvent::CancelPollForDAC(*view)) .await; + self.committee_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) + .await; + self.da_certs.insert(view, cert); if self.vote_if_able().await { @@ -977,6 +995,16 @@ impl, A: ConsensusApi + return; } + // cancel poll for votes + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) + .await; + + // cancel poll for proposal + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal(*view)) + .await; + let vote = TimeoutVote::create_signed_vote( TimeoutData { view }, view, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index babf2e0d34..58771d4fd3 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -465,6 +465,11 @@ impl< return; } + // cancel poll for votes + self.network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view_number)) + .await; + self.num_timeouts_tracked += 1; error!( "Num timeouts tracked since last view change is {}. View {} timed out", @@ -782,6 +787,20 @@ impl, A: ConsensusApi + return (None, self); } + // cancel poll for votes + self.network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForViewSyncVotes( + *certificate.view_number, + )) + .await; + + // cancel poll for view sync cert + self.network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForViewSyncCertificate( + *certificate.view_number, + )) + .await; + self.phase = last_seen_certificate; if certificate.get_data().relay > self.relay { From df20124c0eaf81a3fc4655cec875f15193cc1b1f Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 14 Dec 2023 08:47:55 -0500 Subject: [PATCH 0541/1393] [Stability] Garbage collect polls for all views < latest-2 (#2226) --- .../traits/networking/web_server_network.rs | 119 ++++++++++-------- task-impls/src/view_sync.rs | 5 + 2 files changed, 74 insertions(+), 50 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index ce2e60e733..ad0b2dd078 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -30,8 +30,9 @@ use serde::{Deserialize, Serialize}; use surf_disco::Url; use hotshot_types::traits::network::ViewMessage; +use std::collections::BTreeMap; use std::{ - collections::{hash_map::Entry, BTreeSet, HashMap}, + collections::{btree_map::Entry, BTreeSet}, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -119,25 +120,25 @@ struct Inner { /// Task map for quorum proposals. proposal_task_map: - Arc>>>>, + Arc>>>>, /// Task map for quorum votes. vote_task_map: - Arc>>>>, + Arc>>>>, /// Task map for VID disperse data vid_disperse_task_map: - Arc>>>>, + Arc>>>>, /// Task map for DACs. dac_task_map: - Arc>>>>, + Arc>>>>, /// Task map for view sync certificates. view_sync_cert_task_map: - Arc>>>>, + Arc>>>>, /// Task map for view sync votes. view_sync_vote_task_map: - Arc>>>>, + Arc>>>>, /// Task map for transactions txn_task_map: - Arc>>>>, + Arc>>>>, } impl Inner { @@ -330,6 +331,7 @@ impl Inner { ConsensusIntentEvent::CancelPollForVotes(event_view) | ConsensusIntentEvent::CancelPollForProposal(event_view) | ConsensusIntentEvent::CancelPollForDAC(event_view) + | ConsensusIntentEvent::CancelPollForViewSyncCertificate(event_view) | ConsensusIntentEvent::CancelPollForVIDDisperse(event_view) | ConsensusIntentEvent::CancelPollForViewSyncVotes(event_view) => { if view_number == event_view { @@ -768,8 +770,6 @@ impl ConnectedNetwork, TYPES::Signatur ); // TODO ED Need to handle canceling tasks that don't receive their expected output (such a proposal that never comes) - // TODO ED Need to GC all old views, not just singular views, could lead to a network leak - match event { ConsensusIntentEvent::PollForProposal(view_number) => { // Check if we already have a task for this (we shouldn't) @@ -799,15 +799,13 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // GC proposal collection if we are two views in the future - if let Some((_, sender)) = task_map.remove_entry(&view_number.wrapping_sub(2)) { - // Send task cancel message to old task - - // If task already exited we expect an error - let _res = sender - .send(ConsensusIntentEvent::CancelPollForProposal( - view_number.wrapping_sub(2), - )) + // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. + let view_minus_2 = view_number.saturating_sub(2); + let range = task_map.range(..view_minus_2); + for (view, task) in range { + // Cancel the old task by sending a message to it. If the task already exited we expect an error + let _res = task + .send(ConsensusIntentEvent::CancelPollForProposal(*view)) .await; } } @@ -839,15 +837,13 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // GC proposal collection if we are two views in the future - if let Some((_, sender)) = task_map.remove_entry(&view_number.wrapping_sub(2)) { - // Send task cancel message to old task - - // If task already exited we expect an error - let _res = sender - .send(ConsensusIntentEvent::CancelPollForVIDDisperse( - view_number.wrapping_sub(2), - )) + // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. + let view_minus_2 = view_number.saturating_sub(2); + let range = task_map.range(..view_minus_2); + for (view, task) in range { + // Cancel the old task by sending a message to it. If the task already exited we expect an error + let _res = task + .send(ConsensusIntentEvent::CancelPollForVIDDisperse(*view)) .await; } } @@ -894,16 +890,13 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // GC proposal collection if we are two views in the future - // TODO ED This won't work for vote collection, last task is more than 2 view ago depending on size of network, will need to rely on cancel task from consensus - if let Some((_, sender)) = task_map.remove_entry(&(view_number.wrapping_sub(2))) { - // Send task cancel message to old task - - // If task already exited we expect an error - let _res = sender - .send(ConsensusIntentEvent::CancelPollForVotes( - view_number.wrapping_sub(2), - )) + // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. + let view_minus_2 = view_number.saturating_sub(2); + let range = task_map.range(..view_minus_2); + for (view, task) in range { + // Cancel the old task by sending a message to it. If the task already exited we expect an error + let _res = task + .send(ConsensusIntentEvent::CancelPollForVotes(*view)) .await; } } @@ -932,15 +925,13 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // GC proposal collection if we are two views in the future - if let Some((_, sender)) = task_map.remove_entry(&(view_number.wrapping_sub(2))) { - // Send task cancel message to old task - - // If task already exited we expect an error - let _res = sender - .send(ConsensusIntentEvent::CancelPollForDAC( - view_number.wrapping_sub(2), - )) + // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. + let view_minus_2 = view_number.saturating_sub(2); + let range = task_map.range(..view_minus_2); + for (view, task) in range { + // Cancel the old task by sending a message to it. If the task already exited we expect an error + let _res = task + .send(ConsensusIntentEvent::CancelPollForDAC(*view)) .await; } } @@ -986,7 +977,17 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // TODO ED Do we need to GC before returning? Or will view sync task handle that? + // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. + let view_minus_2 = view_number.saturating_sub(2); + let range = task_map.range(..view_minus_2); + for (view, task) in range { + // Cancel the old task by sending a message to it. If the task already exited we expect an error + let _res = task + .send(ConsensusIntentEvent::CancelPollForViewSyncCertificate( + *view, + )) + .await; + } } ConsensusIntentEvent::PollForViewSyncVotes(view_number) => { let mut task_map = self.inner.view_sync_vote_task_map.write().await; @@ -1015,6 +1016,16 @@ impl ConnectedNetwork, TYPES::Signatur } else { debug!("Somehow task already existed!"); } + + // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. + let view_minus_2 = view_number.saturating_sub(2); + let range = task_map.range(..view_minus_2); + for (view, task) in range { + // Cancel the old task by sending a message to it. If the task already exited we expect an error + let _res = task + .send(ConsensusIntentEvent::CancelPollForViewSyncVotes(*view)) + .await; + } } ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) => { @@ -1047,7 +1058,7 @@ impl ConnectedNetwork, TYPES::Signatur } ConsensusIntentEvent::PollForTransactions(view_number) => { let mut task_map = self.inner.txn_task_map.write().await; - if let std::collections::hash_map::Entry::Vacant(e) = task_map.entry(view_number) { + if let Entry::Vacant(e) = task_map.entry(view_number) { // create new task let (sender, receiver) = unbounded(); e.insert(sender); @@ -1069,7 +1080,15 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // TODO ED Do we need to GC before returning? Or will view sync task handle that? + // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. + let view_minus_2 = view_number.saturating_sub(2); + let range = task_map.range(..view_minus_2); + for (view, task) in range { + // Cancel the old task by sending a message to it. If the task already exited we expect an error + let _res = task + .send(ConsensusIntentEvent::CancelPollForTransactions(*view)) + .await; + } } ConsensusIntentEvent::CancelPollForTransactions(view_number) => { let mut task_map = self.inner.txn_task_map.write().await; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 58771d4fd3..cae79db753 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -509,6 +509,11 @@ impl< subscribe_view, )) .await; + // Also subscribe to the latest view for the same reason. The GC will remove the above poll + // in the case that one doesn't resolve but this one does. + self.network + .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) + .await; self.network .inject_consensus_info(ConsensusIntentEvent::PollForDAC(subscribe_view)) From 03d2121e0e54aaf081829ed947103bb61b96b27c Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Dec 2023 09:21:42 -0500 Subject: [PATCH 0542/1393] create helper to cancel tasks, simplify math --- task-impls/src/consensus.rs | 6 ++---- task-impls/src/lib.rs | 3 +++ task-impls/src/view_sync.rs | 21 +++++---------------- testing/src/overall_safety_task.rs | 4 +--- 4 files changed, 11 insertions(+), 23 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 471dcc9d5a..cc24fa968d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,5 +1,6 @@ use crate::{ events::HotShotEvent, + helpers::cancel_task, vote::{spawn_vote_accumulator, AccumulatorInfo}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -342,10 +343,7 @@ impl, A: ConsensusApi + ); // cancel the old timeout task if let Some(timeout_task) = self.timeout_task.take() { - #[cfg(async_executor_impl = "async-std")] - timeout_task.cancel().await; - #[cfg(async_executor_impl = "tokio")] - timeout_task.abort(); + cancel_task(timeout_task).await; } // Remove old certs, we won't vote on past views diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 5c132ffeb9..f81bbd9347 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -37,3 +37,6 @@ pub mod vid; /// Generic task for collecting votes pub mod vote; + +/// Helper functions used by any task +mod helpers; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 306f27ac01..36d3c08c6f 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1,6 +1,7 @@ #![allow(clippy::module_name_repetitions)] use crate::{ events::HotShotEvent, + helpers::cancel_task, vote::{spawn_vote_accumulator, AccumulatorInfo}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -676,10 +677,7 @@ impl, A: ConsensusApi + } if let Some(timeout_task) = self.timeout_task.take() { - #[cfg(async_executor_impl = "async-std")] - timeout_task.cancel().await; - #[cfg(async_executor_impl = "tokio")] - timeout_task.abort(); + cancel_task(timeout_task).await; } self.timeout_task = Some(async_spawn({ @@ -754,10 +752,7 @@ impl, A: ConsensusApi + .await; if let Some(timeout_task) = self.timeout_task.take() { - #[cfg(async_executor_impl = "async-std")] - timeout_task.cancel().await; - #[cfg(async_executor_impl = "tokio")] - timeout_task.abort(); + cancel_task(timeout_task).await; } self.timeout_task = Some(async_spawn({ let stream = self.event_stream.clone(); @@ -811,10 +806,7 @@ impl, A: ConsensusApi + } if let Some(timeout_task) = self.timeout_task.take() { - #[cfg(async_executor_impl = "async-std")] - timeout_task.cancel().await; - #[cfg(async_executor_impl = "tokio")] - timeout_task.abort(); + cancel_task(timeout_task).await; } self.event_stream @@ -871,10 +863,7 @@ impl, A: ConsensusApi + && last_seen_certificate == self.phase { if let Some(timeout_task) = self.timeout_task.take() { - #[cfg(async_executor_impl = "async-std")] - timeout_task.cancel().await; - #[cfg(async_executor_impl = "tokio")] - timeout_task.abort(); + cancel_task(timeout_task).await; } self.relay += 1; match self.phase { diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 21909bc6c6..f6b6a96c4f 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -233,10 +233,8 @@ impl RoundResult { } pub fn check_if_failed(&mut self, threshold: usize, total_num_nodes: usize) -> bool { - let num_decided = self.success_nodes.len(); let num_failed = self.failed_nodes.len(); - let remaining_nodes = total_num_nodes - (num_decided + num_failed); - remaining_nodes + num_decided >= threshold + total_num_nodes - num_failed >= threshold } /// determines whether or not the round passes /// also do a safety check From 202814ab758169c3c98272f975256e95698cd0eb Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Dec 2023 09:31:27 -0500 Subject: [PATCH 0543/1393] add helper fn file --- task-impls/src/helpers.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 task-impls/src/helpers.rs diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs new file mode 100644 index 0000000000..c50f776500 --- /dev/null +++ b/task-impls/src/helpers.rs @@ -0,0 +1,12 @@ +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; + +/// Cancel a task +pub async fn cancel_task(task: JoinHandle) { + #[cfg(async_executor_impl = "async-std")] + task.cancel().await; + #[cfg(async_executor_impl = "tokio")] + task.abort(); +} From 62bf849df0929b5b678c8ac82a623839909ad64d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Dec 2023 10:02:25 -0500 Subject: [PATCH 0544/1393] make parameters more lax --- testing/tests/basic.rs | 2 +- testing/tests/catchup.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 815bc12870..73120c9844 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -111,7 +111,7 @@ async fn test_with_failures_half_f() { }; // TODO: this should only have 3 failures for each down leader, investigate why it fails additional views - metadata.overall_safety_properties.num_failed_views = 6; + metadata.overall_safety_properties.num_failed_views = 8; // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 22; metadata diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index b376fee0cd..848f62e27e 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -52,7 +52,6 @@ async fn test_catchup() { check_leaf: true, ..Default::default() }; - metadata.overall_safety_properties.num_failed_views = 2; metadata .gen_launcher::(0) From 487ffa46332e788f31c5a7bfebe8c76616f6b86c Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Dec 2023 12:23:49 -0500 Subject: [PATCH 0545/1393] Update view sync for 2 timeouts --- task-impls/src/view_sync.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index cae79db753..eec24a3c3d 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -476,11 +476,12 @@ impl< self.num_timeouts_tracked, *view_number ); - if self.num_timeouts_tracked > 3 { + if self.num_timeouts_tracked >= 3 { error!("Too many consecutive timeouts! This shouldn't happen"); } - if self.num_timeouts_tracked > 2 { + if self.num_timeouts_tracked >= 2 { + error!("Starting view sync protocol for view {}", *view_number + 1); // Start polling for view sync certificates self.network .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncCertificate( @@ -743,6 +744,12 @@ impl, A: ConsensusApi + .publish(HotShotEvent::ViewSyncFinalizeVoteSend(vote)) .await; } + + error!( + "View sync protocol has received view sync evidence to update the view to {}", + *self.next_view + ); + self.event_stream .publish(HotShotEvent::ViewChange(self.next_view)) .await; From dc3e1b1edce29739a36b74e3422efa46ecb68dd2 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Dec 2023 12:31:08 -0500 Subject: [PATCH 0546/1393] flip future polling views --- task-impls/src/view_sync.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index eec24a3c3d..c16bdfb30a 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -500,9 +500,9 @@ impl< let subscribe_view = if self.membership.get_leader(TYPES::Time::new(next_view)) == self.public_key { - next_view - } else { next_view + 1 + } else { + next_view }; // Subscribe to the next view just in case there is progress being made self.network From 997e1004979b98c34efb0dc067d49967fc8cde18 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Dec 2023 12:58:50 -0500 Subject: [PATCH 0547/1393] poll for current proposal on view sync timeout --- .../traits/networking/web_server_network.rs | 36 ++++++++++++------- task-impls/src/view_sync.rs | 4 +++ 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index ad0b2dd078..c7270ac0ae 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -139,6 +139,9 @@ struct Inner { /// Task map for transactions txn_task_map: Arc>>>>, + /// Task polling for current propsal + current_proposal_task: + Arc>>>>, } impl Inner { @@ -502,6 +505,7 @@ impl WebServerNetwork { view_sync_cert_task_map: Arc::default(), view_sync_vote_task_map: Arc::default(), txn_task_map: Arc::default(), + current_proposal_task: Arc::default(), }); inner.connected.store(true, Ordering::Relaxed); @@ -848,23 +852,29 @@ impl ConnectedNetwork, TYPES::Signatur } } ConsensusIntentEvent::PollForCurrentProposal => { - // create new task - let (_, receiver) = unbounded(); - - async_spawn({ - let inner_clone = self.inner.clone(); - async move { - if let Err(e) = inner_clone - .poll_web_server(receiver, MessagePurpose::CurrentProposal, 1) - .await - { - warn!( + let mut proposal_task = self.inner.current_proposal_task.write().await; + if proposal_task.is_none() { + // create new task + let (sender, receiver) = unbounded(); + *proposal_task = Some(sender); + + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server(receiver, MessagePurpose::CurrentProposal, 1) + .await + { + warn!( "Background receive proposal polling encountered an error: {:?}", e ); + } + let mut proposal_task = inner_clone.current_proposal_task.write().await; + *proposal_task = None; } - } - }); + }); + } } ConsensusIntentEvent::PollForVotes(view_number) => { let mut task_map = self.inner.vote_task_map.write().await; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index cae79db753..63763b5cbb 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -865,6 +865,10 @@ impl, A: ConsensusApi + && relay == self.relay && last_seen_certificate == self.phase { + // Keep tyring to get a more recent proposal to catch up to + self.network + .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) + .await; self.relay += 1; match self.phase { ViewSyncPhase::None => { From 1ab2c13591f6ff5db22e8f0232d66b1958331e31 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 14 Dec 2023 13:55:41 -0500 Subject: [PATCH 0548/1393] Update view sync unit tests --- testing/tests/view_sync_task.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 6cd1818cd5..615707888d 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -27,11 +27,11 @@ async fn test_view_sync_task() { let vote_data = ViewSyncPreCommitData { relay: 0, - round: ::Time::new(5), + round: ::Time::new(4), }; let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote( vote_data, - ::Time::new(5), + ::Time::new(4), hotshot_types::traits::consensus_api::ConsensusApi::public_key(&api), hotshot_types::traits::consensus_api::ConsensusApi::private_key(&api), ); @@ -44,16 +44,13 @@ async fn test_view_sync_task() { input.push(HotShotEvent::Timeout(ViewNumber::new(2))); input.push(HotShotEvent::Timeout(ViewNumber::new(3))); - input.push(HotShotEvent::Timeout(ViewNumber::new(4))); input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Timeout(ViewNumber::new(2)), 1); output.insert(HotShotEvent::Timeout(ViewNumber::new(3)), 1); - output.insert(HotShotEvent::Timeout(ViewNumber::new(4)), 1); output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(3)), 1); output.insert(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone()), 1); output.insert(HotShotEvent::Shutdown, 1); From 2221373e43eec2f6c94f8492db4b7862940f0c2d Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 14 Dec 2023 15:23:33 -0500 Subject: [PATCH 0549/1393] [Stability] Only use open ports for test runs (#2230) * fix combined network test determinism * fix test prerequisites * fix lints --- constants/src/lib.rs | 4 ---- hotshot/Cargo.toml | 1 + .../src/traits/networking/libp2p_network.rs | 10 +++++--- .../traits/networking/web_server_network.rs | 6 +++-- libp2p-networking/src/network/mod.rs | 9 +++++--- libp2p-networking/src/network/node.rs | 8 +++---- libp2p-networking/src/network/node/handle.rs | 2 +- testing/tests/combined_network.rs | 23 ------------------- 8 files changed, 23 insertions(+), 40 deletions(-) diff --git a/constants/src/lib.rs b/constants/src/lib.rs index 279ad62890..6ca29a2de0 100644 --- a/constants/src/lib.rs +++ b/constants/src/lib.rs @@ -17,7 +17,3 @@ pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; /// the number of messages to send over the secondary network before re-attempting the (presumed down) primary network pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 5; - -/// the amount of time to wait for async_std tests to spin down the Libp2p listeners -/// and allow future tests to run -pub const ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME: u64 = 4; diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 74bd1eab9d..39105e6b64 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -114,6 +114,7 @@ surf-disco = { workspace = true } time = { workspace = true } local-ip-address = "0.5.6" dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } +portpicker = "0.1.1" tracing = { workspace = true } typenum = { workspace = true } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index a30ccb577a..caf2d72d3e 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -142,7 +142,7 @@ where fn generator( expected_node_count: usize, num_bootstrap: usize, - network_id: usize, + _network_id: usize, da_committee_size: usize, _is_da: bool, ) -> Box Self + 'static> { @@ -173,9 +173,13 @@ where node_id, node_id < num_bootstrap as u64 ); + + // pick a free, unused UDP port for testing + let port = portpicker::pick_unused_port().expect("Could not find an open port"); + let addr = - // Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/0/quic-v1")).unwrap(); - Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{}{}/quic-v1", 5000 + node_id, network_id)).unwrap(); + Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{port}/quic-v1")).unwrap(); + // We assign node's public key and stake value rather than read from config file since it's a test let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index c7270ac0ae..21f9dcdec0 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -25,7 +25,6 @@ use hotshot_types::{ }, }; use hotshot_web_server::{self, config}; -use rand::random; use serde::{Deserialize, Serialize}; use surf_disco::Url; @@ -1130,7 +1129,10 @@ impl TestableNetworkingImplementation for WebServerNetwo ) -> Box Self + 'static> { let (server_shutdown_sender, server_shutdown) = oneshot(); let sender = Arc::new(server_shutdown_sender); - let port = random::(); + + // pick random, unused port + let port = portpicker::pick_unused_port().expect("Could not find an open port"); + let url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); info!("Launching web server on port {port}"); // Start web server diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 2d35fdca5e..05134a34bf 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -195,13 +195,16 @@ pub fn gen_multiaddr(port: u16) -> Multiaddr { build_multiaddr!(Ip4([0, 0, 0, 0]), Udp(port), QuicV1) } +/// `BoxedTransport` is a type alias for a boxed tuple containing a `PeerId` and a `StreamMuxerBox`. +/// +/// This type is used to represent a transport in the libp2p network framework. The `PeerId` is a unique identifier for each peer in the network, and the `StreamMuxerBox` is a type of multiplexer that can handle multiple substreams over a single connection. +type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; + /// Generate authenticated transport /// # Errors /// could not sign the quic key with `identity` #[instrument(skip(identity))] -pub async fn gen_transport( - identity: Keypair, -) -> Result, NetworkError> { +pub async fn gen_transport(identity: Keypair) -> Result { let quic_transport = { let mut config = quic::Config::new(&identity); config.handshake_timeout = std::time::Duration::from_secs(20); diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 1853781025..e4c44ca401 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -13,7 +13,8 @@ pub use self::{ use super::{ behaviours::gossip::GossipBehaviour, error::{GossipsubBuildSnafu, GossipsubConfigSnafu, NetworkError, TransportSnafu}, - gen_transport, ClientRequest, NetworkDef, NetworkEvent, NetworkEventInternal, NetworkNodeType, + gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkEvent, NetworkEventInternal, + NetworkNodeType, }; use crate::network::{ @@ -35,7 +36,6 @@ use futures::{select, FutureExt, StreamExt}; use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; use libp2p::core::transport::ListenerId; use libp2p::{ - core::{muxing::StreamMuxerBox, transport::Boxed}, gossipsub::{ Behaviour as Gossipsub, ConfigBuilder as GossipsubConfigBuilder, Message as GossipsubMessage, MessageAuthenticity, MessageId, Topic, ValidationMode, @@ -150,7 +150,7 @@ impl NetworkNode { /// Currently: /// * Generates a random key pair and associated [`PeerId`] /// * Launches a hopefully production ready transport: - /// QUIC v1 (RFC 9000) + DNS + Websocket + XX auth + /// QUIC v1 (RFC 9000) + DNS /// * Generates a connection to the "broadcast" topic /// * Creates a swarm to manage peers and events #[instrument] @@ -163,7 +163,7 @@ impl NetworkNode { }; let peer_id = PeerId::from(identity.public()); debug!(?peer_id); - let transport: Boxed<(PeerId, StreamMuxerBox)> = gen_transport(identity.clone()).await?; + let transport: BoxedTransport = gen_transport(identity.clone()).await?; trace!("Launched network transport"); // Generate the swarm let mut swarm: Swarm = { diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 5c50d61cb7..ae5a77a0c5 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -88,7 +88,7 @@ impl NetworkNodeHandle { /// constructs a new node listening on `known_addr` #[instrument] pub async fn new(config: NetworkNodeConfig, id: usize) -> Result { - //`randomly assigned port + // randomly assigned port let listen_addr = config .bound_addr .clone() diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index b78f48a39e..663d6a32b8 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -1,8 +1,5 @@ use std::time::Duration; -#[cfg(async_executor_impl = "async-std")] -use hotshot_constants::ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME; - use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, node_types::{CombinedImpl, TestTypes}, @@ -48,10 +45,6 @@ async fn test_combined_network() { .launch() .run_test() .await; - - // async_std needs time to spin down the handler - #[cfg(async_executor_impl = "async-std")] - async_std::task::sleep(Duration::from_secs(ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME)).await; } // A run where the webserver crashes part-way through @@ -102,10 +95,6 @@ async fn test_combined_network_webserver_crash() { .launch() .run_test() .await; - - // async_std needs time to spin down the handler - #[cfg(async_executor_impl = "async-std")] - async_std::task::sleep(Duration::from_secs(ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME)).await; } // A run where the webserver crashes partway through @@ -162,10 +151,6 @@ async fn test_combined_network_reup() { .launch() .run_test() .await; - - // async_std needs time to spin down the handler - #[cfg(async_executor_impl = "async-std")] - async_std::task::sleep(Duration::from_secs(ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME)).await; } // A run where half of the nodes disconnect from the webserver @@ -216,10 +201,6 @@ async fn test_combined_network_half_dc() { .launch() .run_test() .await; - - // async_std needs time to spin down the handler - #[cfg(async_executor_impl = "async-std")] - async_std::task::sleep(Duration::from_secs(ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME)).await; } fn generate_random_node_changes( @@ -293,8 +274,4 @@ async fn test_stress_combined_network_fuzzy() { .launch() .run_test() .await; - - // async_std needs time to spin down the handler - #[cfg(async_executor_impl = "async-std")] - async_std::task::sleep(Duration::from_secs(ASYNC_STD_LIBP2P_LISTENER_SPINDOWN_TIME)).await; } From 1e3d8d4433fdd139b1b0dc8259f702dcd551112a Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 14 Dec 2023 16:05:13 -0500 Subject: [PATCH 0550/1393] [Stability] Fix GC for tasks (#2235) * remove cancelled polls from the taskmap * remove debug prints * merge current proposal changes --- hotshot/Cargo.toml | 1 + .../traits/networking/web_server_network.rs | 194 +++++++++++------- 2 files changed, 116 insertions(+), 79 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 39105e6b64..2483dbe8bd 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -114,6 +114,7 @@ surf-disco = { workspace = true } time = { workspace = true } local-ip-address = "0.5.6" dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } +derive_more = "0.99.17" portpicker = "0.1.1" tracing = { workspace = true } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 21f9dcdec0..7d4d1294cf 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -11,6 +11,7 @@ use async_compatibility_layer::{ }; use async_lock::RwLock; use async_trait::async_trait; +use derive_more::{Deref, DerefMut}; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ message::{Message, MessagePurpose}, @@ -94,6 +95,79 @@ impl WebServerNetwork { } } +/// `TaskChannel` is a type alias for an unbounded sender channel that sends `ConsensusIntentEvent`s. +/// +/// This channel is used to send events to a task. The `K` type parameter is the type of the key used in the `ConsensusIntentEvent`. +/// +/// # Examples +/// +/// ``` +/// let (tx, _rx): (TaskChannel, _) = tokio::sync::mpsc::unbounded_channel(); +/// ``` +/// +/// # Note +/// +/// This type alias is used in the context of a `TaskMap`, where each task is represented by a `TaskChannel`. +type TaskChannel = UnboundedSender>; + +/// `TaskMap` is a wrapper around a `BTreeMap` that maps view numbers to tasks. +/// +/// Each task is represented by a `TaskChannel` that can be used to send events to the task. +/// The key `K` is a type that implements the `SignatureKey` trait. +/// +/// # Examples +/// +/// ``` +/// use your_crate::TaskMap; +/// let mut map: TaskMap = TaskMap::default(); +/// ``` +/// +/// # Note +/// +/// This struct is `Clone`, `Deref`, and `DerefMut`, so it can be used just like a `BTreeMap`. +#[derive(Debug, Clone, Deref, DerefMut)] +struct TaskMap(BTreeMap>); + +impl Default for TaskMap { + fn default() -> Self { + Self(BTreeMap::default()) + } +} + +impl TaskMap { + /// Prunes tasks that are polling for a view less than or equal to `current_view - 2`. + /// + /// This method cancels and removes all entries in the task map that are polling for a view less than or equal to `current_view - 2`. + /// The cancellation is performed by sending a `cancel_event` to the task. + /// + /// # Arguments + /// + /// * `current_view` - The current view number. Tasks polling for a view less than or equal to `current_view - 2` will be pruned. + /// * `cancel_event_fn` - A function that takes a view number and returns a `ConsensusIntentEvent` to be sent to the task for cancellation. + /// + /// # Examples + /// + /// ``` + /// let mut map: TaskMap = TaskMap::default(); + /// map.prune_tasks(10, ConsensusIntentEvent::CancelPollForProposal).await; + /// ``` + async fn prune_tasks( + &mut self, + current_view: u64, + cancel_event_fn: fn(u64) -> ConsensusIntentEvent, + ) { + let cutoff_view = current_view.saturating_sub(2); + let views_to_remove: Vec<_> = self.range(..cutoff_view).map(|(key, _)| *key).collect(); + + for view in views_to_remove { + let task = self.remove(&view); + if let Some(task) = task { + let _ = task.send(cancel_event_fn(view)).await; + } + } + } +} + /// Represents the core of web server networking #[derive(Debug)] struct Inner { @@ -118,26 +192,19 @@ struct Inner { tx_index: Arc>, /// Task map for quorum proposals. - proposal_task_map: - Arc>>>>, + proposal_task_map: Arc>>, /// Task map for quorum votes. - vote_task_map: - Arc>>>>, + vote_task_map: Arc>>, /// Task map for VID disperse data - vid_disperse_task_map: - Arc>>>>, + vid_disperse_task_map: Arc>>, /// Task map for DACs. - dac_task_map: - Arc>>>>, + dac_task_map: Arc>>, /// Task map for view sync certificates. - view_sync_cert_task_map: - Arc>>>>, + view_sync_cert_task_map: Arc>>, /// Task map for view sync votes. - view_sync_vote_task_map: - Arc>>>>, + view_sync_vote_task_map: Arc>>, /// Task map for transactions - txn_task_map: - Arc>>>>, + txn_task_map: Arc>>, /// Task polling for current propsal current_proposal_task: Arc>>>>, @@ -802,15 +869,10 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. - let view_minus_2 = view_number.saturating_sub(2); - let range = task_map.range(..view_minus_2); - for (view, task) in range { - // Cancel the old task by sending a message to it. If the task already exited we expect an error - let _res = task - .send(ConsensusIntentEvent::CancelPollForProposal(*view)) - .await; - } + // Cancel old, stale tasks + task_map + .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForProposal) + .await; } ConsensusIntentEvent::PollForVIDDisperse(view_number) => { // Check if we already have a task for this (we shouldn't) @@ -840,15 +902,10 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. - let view_minus_2 = view_number.saturating_sub(2); - let range = task_map.range(..view_minus_2); - for (view, task) in range { - // Cancel the old task by sending a message to it. If the task already exited we expect an error - let _res = task - .send(ConsensusIntentEvent::CancelPollForVIDDisperse(*view)) - .await; - } + // Cancel old, stale tasks + task_map + .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForVIDDisperse) + .await; } ConsensusIntentEvent::PollForCurrentProposal => { let mut proposal_task = self.inner.current_proposal_task.write().await; @@ -899,15 +956,10 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. - let view_minus_2 = view_number.saturating_sub(2); - let range = task_map.range(..view_minus_2); - for (view, task) in range { - // Cancel the old task by sending a message to it. If the task already exited we expect an error - let _res = task - .send(ConsensusIntentEvent::CancelPollForVotes(*view)) - .await; - } + // Cancel old, stale tasks + task_map + .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForVotes) + .await; } ConsensusIntentEvent::PollForDAC(view_number) => { @@ -934,15 +986,10 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. - let view_minus_2 = view_number.saturating_sub(2); - let range = task_map.range(..view_minus_2); - for (view, task) in range { - // Cancel the old task by sending a message to it. If the task already exited we expect an error - let _res = task - .send(ConsensusIntentEvent::CancelPollForDAC(*view)) - .await; - } + // Cancel old, stale tasks + task_map + .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForDAC) + .await; } ConsensusIntentEvent::CancelPollForVotes(view_number) => { @@ -986,17 +1033,13 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. - let view_minus_2 = view_number.saturating_sub(2); - let range = task_map.range(..view_minus_2); - for (view, task) in range { - // Cancel the old task by sending a message to it. If the task already exited we expect an error - let _res = task - .send(ConsensusIntentEvent::CancelPollForViewSyncCertificate( - *view, - )) - .await; - } + // Cancel old, stale tasks + task_map + .prune_tasks( + view_number, + ConsensusIntentEvent::CancelPollForViewSyncCertificate, + ) + .await; } ConsensusIntentEvent::PollForViewSyncVotes(view_number) => { let mut task_map = self.inner.view_sync_vote_task_map.write().await; @@ -1026,15 +1069,13 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. - let view_minus_2 = view_number.saturating_sub(2); - let range = task_map.range(..view_minus_2); - for (view, task) in range { - // Cancel the old task by sending a message to it. If the task already exited we expect an error - let _res = task - .send(ConsensusIntentEvent::CancelPollForViewSyncVotes(*view)) - .await; - } + // Cancel old, stale tasks + task_map + .prune_tasks( + view_number, + ConsensusIntentEvent::CancelPollForViewSyncVotes, + ) + .await; } ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) => { @@ -1089,15 +1130,10 @@ impl ConnectedNetwork, TYPES::Signatur debug!("Somehow task already existed!"); } - // Remove all entries in the task map that are polling for a view less than or equal to `view_number - 2`. - let view_minus_2 = view_number.saturating_sub(2); - let range = task_map.range(..view_minus_2); - for (view, task) in range { - // Cancel the old task by sending a message to it. If the task already exited we expect an error - let _res = task - .send(ConsensusIntentEvent::CancelPollForTransactions(*view)) - .await; - } + // Cancel old, stale tasks + task_map + .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForTransactions) + .await; } ConsensusIntentEvent::CancelPollForTransactions(view_number) => { let mut task_map = self.inner.txn_task_map.write().await; From 22fc3374495770b4fc64e2618344b0798b45e2b0 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 14 Dec 2023 18:58:52 -0500 Subject: [PATCH 0551/1393] [Stability] Disable saving of VID shares (for now) (#2238) * disable saving of VID shares (for now) * add to RUSTSEC ignore --- task-impls/src/consensus.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7b06804e53..3f43311c45 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -231,15 +231,17 @@ impl, A: ConsensusApi + } } + // TODO: re-enable this when HotShot/the sequencer needs the shares for something + // issue: https://github.com/EspressoSystems/HotShot/issues/2236 // Only vote if you has seen the VID share for this view - if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { - } else { - debug!( - "We have not seen the VID share for this view {:?} yet, so we cannot vote.", - proposal.view_number - ); - return false; - } + // if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { + // } else { + // debug!( + // "We have not seen the VID share for this view {:?} yet, so we cannot vote.", + // proposal.view_number + // ); + // return false; + // } // Only vote if you have the DA cert // ED Need to update the view number this is stored under? @@ -952,7 +954,9 @@ impl, A: ConsensusApi + .await; // Add to the storage that we have received the VID disperse for a specific view - self.vid_shares.insert(view, disperse); + // TODO: re-enable this when HotShot/the sequencer needs the shares for something + // issue: https://github.com/EspressoSystems/HotShot/issues/2236 + // self.vid_shares.insert(view, disperse); } HotShotEvent::ViewChange(new_view) => { debug!("View Change event for view {} in consensus task", *new_view); From 0d5bbde479683afe4b8273be13fb8cbc4a792cef Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 14 Dec 2023 16:42:42 -0800 Subject: [PATCH 0552/1393] Fix catchup tests --- testing/tests/catchup.rs | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 07def0a843..cdab5d3b6a 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -38,13 +38,14 @@ async fn test_catchup() { metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(25, catchup_nodes)], + // Start the nodes before their leadership. + node_changes: vec![(15, catchup_nodes)], }; metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(100000), + duration: Duration::from_secs(60), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { @@ -52,6 +53,8 @@ async fn test_catchup() { ..Default::default() }; metadata.overall_safety_properties.num_failed_views = 2; + // Make sure we keep commiting rounds after the catchup, but not the full 50. + metadata.overall_safety_properties.num_successful_views = 22; metadata .gen_launcher::(0) @@ -122,7 +125,6 @@ async fn test_catchup_web() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_catchup_one_node() { use std::time::Duration; @@ -151,21 +153,23 @@ async fn test_catchup_one_node() { metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(25, catchup_nodes)], + // Start the node before its leadership. + node_changes: vec![(15, catchup_nodes)], }; metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(20000), + duration: Duration::from_secs(60), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() }; - // only alow for the view which the catchup node hasn't started to fail - metadata.overall_safety_properties.num_failed_views = 5; + metadata.overall_safety_properties.num_failed_views = 1; + // Make sure we keep commiting rounds after the catchup, but not the full 50. + metadata.overall_safety_properties.num_successful_views = 22; metadata .gen_launcher::(0) @@ -175,14 +179,12 @@ async fn test_catchup_one_node() { } /// Same as `test_catchup` except we start the nodes after their leadership so they join during view sync -/// This fails for the same reason as the timeout test and should work once that is fixed. #[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_catchup_in_view_sync() { use std::time::Duration; @@ -223,7 +225,7 @@ async fn test_catchup_in_view_sync() { metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(10000), + duration: Duration::from_secs(60), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { From dc29801d4d6330af9657a24e14f11a5cb31f346d Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 15 Dec 2023 09:25:50 -0800 Subject: [PATCH 0553/1393] added helper functions for manually creating cert --- hotshot/src/types/handle.rs | 21 +++++- task-impls/src/consensus.rs | 23 +++--- testing/Cargo.toml | 1 + testing/src/task_helpers.rs | 125 ++++++++++++++++++++++++++++++-- testing/tests/consensus_task.rs | 21 +----- 5 files changed, 155 insertions(+), 36 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index ad4b44f62d..e43e568dd8 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -20,12 +20,15 @@ use hotshot_types::{ event::EventType, message::{MessageKind, SequencingMessage}, traits::{ - election::Membership, node_implementation::NodeType, state::ConsensusTime, storage::Storage, + node_implementation::NodeType, state::ConsensusTime, storage::Storage, }, }; use hotshot_types::{data::Leaf, simple_certificate::QuorumCertificate}; use std::sync::Arc; use tracing::error; +use hotshot_types::traits::election::Membership; +use ethereum_types::U256; +use hotshot_types::traits::signature_key::SignatureKey; /// Event streaming handle for a [`SystemContext`] instance running in the background /// @@ -198,6 +201,22 @@ impl + 'static> SystemContextHandl } // Below is for testing only: + + /// Gets the current membership of the [`HotShot`] instance + #[cfg(feature = "hotshot-testing")] + pub fn get_committee_qc_stake_table(&self) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry>{ + self.hotshot + .inner + .memberships + .quorum_membership.get_committee_qc_stake_table() + } + + /// Gets the threshold of current membership of the [`HotShot`] instance + #[cfg(feature = "hotshot-testing")] + pub fn get_threshold(&self) -> U256 { + U256::from(self.hotshot.inner.memberships.quorum_membership.success_threshold().get()) + } + /// Wrapper for `HotShotConsensusApi`'s `get_leader` function #[allow(clippy::unused_async)] // async for API compatibility reasons diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index d8c3512c09..ea559b72ac 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -319,7 +319,7 @@ impl, A: ConsensusApi + } } debug!( - "Couldn't find DAC cert in certs, meaning we haven't received it yet for view {:?}", + "Received VID share, but couldn't find DAC cert in certs, meaning we haven't received it yet for view {:?}", *proposal.get_view_number(), ); return false; @@ -413,8 +413,8 @@ impl, A: ConsensusApi + pub async fn handle_event(&mut self, event: HotShotEvent) { match event { HotShotEvent::QuorumProposalRecv(proposal, sender) => { - debug!( - "Received Quorum Propsoal for view {}", + error!( + "Received Quorum Proposal for view {}", *proposal.data.view_number ); @@ -424,19 +424,19 @@ impl, A: ConsensusApi + *proposal.data.view_number, )) .await; - + error!("Step 1"); // Sishan TODO: remove these error message let view = proposal.data.get_view_number(); if view < self.cur_view { debug!("Proposal is from an older view {:?}", proposal.data.clone()); return; } - + error!("Step 2"); let view_leader_key = self.quorum_membership.get_leader(view); if view_leader_key != sender { warn!("Leader key does not match key in proposal"); return; } - + error!("Step 3, proposal.data.justify_qc.get_view_number() = {:?}, view - 1 = {:?}", proposal.data.justify_qc.get_view_number(), view - 1); // Verify a timeout certificate exists and is valid if proposal.data.justify_qc.get_view_number() != view - 1 { let Some(timeout_cert) = proposal.data.timeout_certificate.clone() else { @@ -456,7 +456,7 @@ impl, A: ConsensusApi + return; } } - + error!("Step 4"); let justify_qc = proposal.data.justify_qc.clone(); if !justify_qc.is_valid_cert(self.quorum_membership.as_ref()) { @@ -465,14 +465,14 @@ impl, A: ConsensusApi + consensus.metrics.invalid_qc.update(1); return; } - + error!("Step 5"); // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here self.update_view(view).await; self.current_proposal = Some(proposal.data.clone()); let consensus = self.consensus.upgradable_read().await; - + error!("Step 6"); // Construct the leaf. let parent = if justify_qc.is_genesis { self.genesis_leaf().await @@ -562,7 +562,7 @@ impl, A: ConsensusApi + error!("Failed safety check and liveness check"); return; } - + error!("Step 7"); let high_qc = leaf.justify_qc.clone(); let mut new_anchor_view = consensus.last_decided_view; let mut new_locked_view = consensus.locked_view; @@ -717,7 +717,7 @@ impl, A: ConsensusApi + == self.current_proposal.clone().unwrap().view_number; // todo get rid of this clone let qc = consensus.high_qc.clone(); - + error!("Step 8"); drop(consensus); if should_propose { debug!( @@ -727,6 +727,7 @@ impl, A: ConsensusApi + self.publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) .await; } + error!("Step 9, plan to go to vote_if_able()"); if !self.vote_if_able().await { return; } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 3c4b3dd621..a45ee2b25e 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -20,6 +20,7 @@ async-compatibility-layer = { workspace = true } async-trait = { workspace = true } # needed for vrf demo # so non-optional for now +bincode = { workspace = true } blake3 = { workspace = true, features = ["traits-preview"] } sha3 = "^0.10" commit = { workspace = true } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index aec6cfadcc..cf8a87eb38 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -30,6 +30,15 @@ use hotshot_types::{ vote::HasViewNumber, }; +use hotshot_types::vote::Certificate; +use bitvec::bitvec; +use hotshot_types::simple_vote::QuorumVote; +use hotshot_types::simple_vote::QuorumData; +use hotshot_types::vote::Vote; +use hotshot_utils::bincode::bincode_opts; +use bincode::Options; +use tracing::error; + pub async fn build_system_handle( node_id: u64, ) -> ( @@ -102,9 +111,86 @@ pub async fn build_system_handle( .expect("Could not init hotshot") } +pub fn build_assembled_sig< + TYPES: NodeType, + VOTE: Vote, + CERT: Certificate + >( + leaf: Leaf, + handle: &SystemContextHandle, + view: TYPES::Time +) -> ::QCType { + + // Assemble QC + let stake_table = handle.get_committee_qc_stake_table(); + let real_qc_pp: ::QCParams = + ::get_public_parameter( + stake_table.clone(), + handle.get_threshold(), + ); + let total_nodes = stake_table.len(); + let signers = bitvec![1; total_nodes]; + let mut sig_lists = Vec::new(); + + // calculate vote + for node_id in 0..total_nodes { + let (private_key, public_key) = key_pair_for_id(node_id.try_into().unwrap()); + let vote = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(), + }, + view, + &public_key, + &private_key, + ); + let original_signature: ::PureAssembledSignatureType = + bincode_opts() + .deserialize(&vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); + sig_lists.push(original_signature); + } + + let real_qc_sig = ::assemble( + &real_qc_pp, + signers.as_bitslice(), + &sig_lists[..], + ); + + real_qc_sig +} + +pub fn build_qc< + TYPES: NodeType, + VOTE: Vote>, + CERT: Certificate + >( + real_qc_sig: ::QCType, + leaf: Leaf, + view: TYPES::Time, + public_key: &TYPES::SignatureKey, + private_key: &::PrivateKey, +) -> CERT { + let vote = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(), + }, + view, + public_key, + private_key, + ); + let cert = CERT::create_signed_certificate( + vote.get_data_commitment(), + vote.get_data().clone(), + real_qc_sig, + vote.get_view_number(), + ); + cert +} + async fn build_quorum_proposal_and_signature( handle: &SystemContextHandle, private_key: &::PrivateKey, + public_key: &BLSPubKey, view: u64, ) -> (QuorumProposal, EncodedSignature) { let consensus_lock = handle.get_consensus(); @@ -123,13 +209,14 @@ async fn build_quorum_proposal_and_signature( panic!("Failed to find high QC parent."); }; let parent_leaf = leaf.clone(); + error!("parent_leaf's view = {:?}", parent_leaf.view_number); // every event input is seen on the event stream in the output. let block = ::genesis(); let payload_commitment = vid_commitment(&block.encode().unwrap().collect()); let block_header = TestBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); let leaf = Leaf { - view_number: ViewNumber::new(view), + view_number: ViewNumber::new(1), justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), @@ -140,13 +227,40 @@ async fn build_quorum_proposal_and_signature( }; let signature = ::sign(private_key, leaf.commit().as_ref()); let proposal = QuorumProposal:: { - block_header, - view_number: ViewNumber::new(view), + block_header: block_header.clone(), + view_number: ViewNumber::new(1), justify_qc: QuorumCertificate::genesis(), timeout_certificate: None, - proposer_id: leaf.proposer_id, + proposer_id: leaf.proposer_id.clone(), }; + if view == 2 { + let created_assembled_sig = + build_assembled_sig::, QuorumCertificate>(leaf.clone(), handle, ViewNumber::new(1)); + let created_qc = build_qc::, QuorumCertificate>(created_assembled_sig, leaf.clone(), ViewNumber::new(1), public_key, private_key); + let parent_leaf = leaf.clone(); + let leaf_view2 = Leaf { + view_number: ViewNumber::new(2), + justify_qc: created_qc.clone(), + parent_commitment: parent_leaf.commit(), + block_header: block_header.clone(), + block_payload: None, + rejected: vec![], + timestamp: 0, + proposer_id: api.public_key().to_bytes(), + }; + let signature_view2 = ::sign(private_key, leaf_view2.commit().as_ref()); + let proposal_view2 = QuorumProposal:: { + block_header, + view_number: ViewNumber::new(2), + justify_qc: created_qc, + timeout_certificate: None, + proposer_id: leaf_view2.proposer_id, + }; + error!("Have you really entered view 2?"); + return (proposal_view2, signature_view2) + } + (proposal, signature) } @@ -155,8 +269,9 @@ pub async fn build_quorum_proposal( private_key: &::PrivateKey, view: u64, ) -> Proposal> { + let public_key = &BLSPubKey::from_private(private_key); let (proposal, signature) = - build_quorum_proposal_and_signature(handle, private_key, view).await; + build_quorum_proposal_and_signature(handle, private_key, public_key, view).await; Proposal { data: proposal, signature, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index dadda5fb80..4df643c3b9 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -161,7 +161,6 @@ async fn test_consensus_with_vid_vote() { let (handle, _event_stream) = build_system_handle(2).await; // We assign node's key pair rather than read from config file since it's a test - let (private_key_view1, public_key_view1) = key_pair_for_id(1); // In view 2, node 2 is the leader. let (private_key_view2, public_key_view2) = key_pair_for_id(2); @@ -193,27 +192,10 @@ async fn test_consensus_with_vid_vote() { let mut input = Vec::new(); let mut output = HashMap::new(); - let proposal_view1 = build_quorum_proposal(&handle, &private_key_view1, 1).await; // Do a view change, so that it's not the genesis view, and vid vote is needed input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(HotShotEvent::QuorumProposalRecv( - proposal_view1.clone(), - public_key_view1, - )); - output.insert( - HotShotEvent::QuorumProposalRecv(proposal_view1.clone(), public_key_view1), - 1, - ); - - // Since it's genesis view, node can vote without dac and vid share - if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view1.data).await { - output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); - input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); - output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); - } - - // For the test of vote logic with vid + // For the test of vote logic with vid, starting view 2 we need vid share input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); // Sishan TOOD: this proposal on view 2 doesn't have a valid justify QC let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; @@ -262,6 +244,7 @@ async fn test_consensus_with_vid_vote() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_consensus_no_vote_without_vid_share() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; From 9fbf395d4b05f4ff52ff5aa37f8dbf62b56b6954 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 15 Dec 2023 14:14:49 -0500 Subject: [PATCH 0554/1393] change theshold for pre-commit cert --- task-impls/src/view_sync.rs | 40 +---------------------- types/src/simple_certificate.rs | 57 +++++++++++++++++++++++++-------- types/src/vote.rs | 4 +++ 3 files changed, 48 insertions(+), 53 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index d31e9b6219..16bc153b6a 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -901,7 +901,7 @@ impl, A: ConsensusApi + .await; self.relay += 1; match self.phase { - ViewSyncPhase::None => { + ViewSyncPhase::None | ViewSyncPhase::PreCommit | ViewSyncPhase::Commit => { let vote = ViewSyncPreCommitVote::::create_signed_vote( ViewSyncPreCommitData { relay: self.relay, @@ -920,44 +920,6 @@ impl, A: ConsensusApi + .await; } } - ViewSyncPhase::PreCommit => { - let vote = ViewSyncCommitVote::::create_signed_vote( - ViewSyncCommitData { - relay: self.relay, - round: self.next_view, - }, - self.next_view, - &self.public_key, - &self.private_key, - ); - let message = - GeneralConsensusMessage::::ViewSyncCommitVote(vote); - - if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { - self.event_stream - .publish(HotShotEvent::ViewSyncCommitVoteSend(vote)) - .await; - } - } - ViewSyncPhase::Commit => { - let vote = ViewSyncFinalizeVote::::create_signed_vote( - ViewSyncFinalizeData { - relay: self.relay, - round: self.next_view, - }, - self.next_view, - &self.public_key, - &self.private_key, - ); - let message = - GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); - - if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { - self.event_stream - .publish(HotShotEvent::ViewSyncFinalizeVoteSend(vote)) - .await; - } - } ViewSyncPhase::Finalize => { // This should never occur unimplemented!() diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 9b64a9160d..893ca7518b 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -24,9 +24,35 @@ use crate::{ use serde::{Deserialize, Serialize}; +/// Trait which allows use to inject different threshold calculations into a Certificate type +pub trait Threshhold { + /// Calculate a threshold based on the membership + fn threshold>(membership: &MEMBERSHIP) -> u64; +} + +/// Defines a threshold which is 2f + 1 (Amount needed for Quorum) +#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] +pub struct SuccessThreshhold {} + +impl Threshhold for SuccessThreshhold { + fn threshold>(membership: &MEMBERSHIP) -> u64 { + membership.success_threshold().into() + } +} + +/// Defines a threshold which is f + 1 (i.e at least one of the stake is honest) +#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] +pub struct OneHonestThreshhold {} + +impl Threshhold for OneHonestThreshhold { + fn threshold>(membership: &MEMBERSHIP) -> u64 { + membership.failure_threshold().into() + } +} + /// A certificate which can be created by aggregating many simple votes on the commitment. #[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] -pub struct SimpleCertificate { +pub struct SimpleCertificate> { /// The data this certificate is for. I.e the thing that was voted on to create this Certificate pub data: VOTEABLE, /// commitment of all the votes this cert should be signed over @@ -37,14 +63,15 @@ pub struct SimpleCertificate { pub signatures: Option<::QCType>, /// If this QC is for the genesis block pub is_genesis: bool, - /// phantom data for `MEMBERSHIP` and `TYPES` - pub _pd: PhantomData, + /// phantom data for `THRESHHOLD` and `TYPES` + pub _pd: PhantomData<(TYPES, THRESHHOLD)>, } -impl Certificate - for SimpleCertificate +impl> + Certificate for SimpleCertificate { type Voteable = VOTEABLE; + type Threshhold = THRESHHOLD; fn create_signed_certificate( vote_commitment: Commitment, @@ -76,7 +103,7 @@ impl Certificate ) } fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.success_threshold().into() + THRESHHOLD::threshold(membership) } fn get_data(&self) -> &Self::Voteable { &self.data @@ -86,8 +113,8 @@ impl Certificate } } -impl HasViewNumber - for SimpleCertificate +impl> + HasViewNumber for SimpleCertificate { fn get_view_number(&self) -> TYPES::Time { self.view_number @@ -123,18 +150,20 @@ impl QuorumCertificate { } /// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` -pub type QuorumCertificate = SimpleCertificate>; +pub type QuorumCertificate = SimpleCertificate, SuccessThreshhold>; /// Type alias for a DA certificate over `DAData` -pub type DACertificate = SimpleCertificate; +pub type DACertificate = SimpleCertificate; /// Type alias for a Timeout certificate over a view number -pub type TimeoutCertificate = SimpleCertificate>; +pub type TimeoutCertificate = + SimpleCertificate, SuccessThreshhold>; // TODO ED Update this to use the correct threshold instead of the default `success_threshold` /// Type alias for a `ViewSyncPreCommit` certificate over a view number pub type ViewSyncPreCommitCertificate2 = - SimpleCertificate>; + SimpleCertificate, OneHonestThreshhold>; /// Type alias for a `ViewSyncCommit` certificate over a view number -pub type ViewSyncCommitCertificate2 = SimpleCertificate>; +pub type ViewSyncCommitCertificate2 = + SimpleCertificate, SuccessThreshhold>; /// Type alias for a `ViewSyncFinalize` certificate over a view number pub type ViewSyncFinalizeCertificate2 = - SimpleCertificate>; + SimpleCertificate, SuccessThreshhold>; diff --git a/types/src/vote.rs b/types/src/vote.rs index 0d71f7f145..dec92f6a32 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -14,6 +14,7 @@ use hotshot_utils::bincode::bincode_opts; use tracing::error; use crate::{ + simple_certificate::Threshhold, simple_vote::Voteable, traits::{ election::Membership, @@ -53,6 +54,9 @@ pub trait Certificate: HasViewNumber { /// The data commitment this certificate certifies. type Voteable: Voteable; + /// Threshold Functions + type Threshhold: Threshhold; + /// Build a certificate from the data commitment and the quorum of signers fn create_signed_certificate( vote_commitment: Commitment, From a91af151620e5da9afdcdc90529311875d90afe5 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 15 Dec 2023 15:42:49 -0500 Subject: [PATCH 0555/1393] fix bugs --- task-impls/src/view_sync.rs | 119 ++++++++++++-------------------- types/src/simple_certificate.rs | 2 +- 2 files changed, 46 insertions(+), 75 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 16bc153b6a..e649f1e4f3 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -5,7 +5,6 @@ use crate::{ vote::{spawn_vote_accumulator, AccumulatorInfo}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use either::Either; use futures::FutureExt; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, @@ -19,7 +18,7 @@ use hotshot_types::{ ViewSyncPreCommitVote, }, traits::network::ConsensusIntentEvent, - vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, + vote::{Certificate, HasViewNumber, Vote}, }; #[cfg(async_executor_impl = "async-std")] @@ -98,7 +97,7 @@ pub struct ViewSyncTaskState< pub replica_task_map: HashMap, /// Map of running relay tasks - pub relay_task_map: HashMap, + pub relay_task_map: HashMap<(TYPES::Time, ViewSyncPhase), ViewSyncTaskInfo>, /// Timeout duration for view sync rounds pub view_sync_timeout: Duration, @@ -175,51 +174,6 @@ pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< ViewSyncReplicaTaskState, >; -/// State of a view sync relay task -pub struct ViewSyncRelayTaskState< - TYPES: NodeType, - I: NodeImplementation, - VOTE: Vote, - CERTIFICATE: Certificate, -> { - /// Event stream to publish events to - pub event_stream: ChannelStream>, - /// Network for all nodes - pub network: Arc, - /// Membership for teh quorum - pub membership: Arc, - /// This Nodes Public Key - pub public_key: TYPES::SignatureKey, - /// Our Private Key - pub private_key: ::PrivateKey, - - /// Vote accumulator - #[allow(clippy::type_complexity)] - pub accumulator: Either, CERTIFICATE>, - /// Our node id; for logging - pub id: u64, -} - -impl< - TYPES: NodeType, - I: NodeImplementation, - VOTE: Vote + std::marker::Send + std::marker::Sync + 'static, - CERTIFICATE: Certificate - + std::marker::Send - + std::marker::Sync - + 'static, - > TS for ViewSyncRelayTaskState -{ -} - -/// Types used by the view sync relay task -pub type ViewSyncRelayTaskStateTypes = HSTWithEvent< - ViewSyncTaskError, - HotShotEvent, - ChannelStream>, - ViewSyncRelayTaskState, ->; - impl< TYPES: NodeType, I: NodeImplementation, @@ -315,7 +269,8 @@ impl< HotShotEvent::ViewSyncPreCommitVoteRecv(ref vote) => { let vote_view = vote.get_view_number(); - if let Some(relay_task) = self.relay_task_map.get(&vote_view) { + let view_phase = (vote_view, ViewSyncPhase::PreCommit); + if let Some(relay_task) = self.relay_task_map.get(&view_phase) { // Forward event then return self.event_stream .direct_message(relay_task.event_stream_id, event) @@ -347,13 +302,14 @@ impl< spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; if let Some((_, _, event_stream_id)) = vote_collector { self.relay_task_map - .insert(vote_view, ViewSyncTaskInfo { event_stream_id }); + .insert(view_phase, ViewSyncTaskInfo { event_stream_id }); } } HotShotEvent::ViewSyncCommitVoteRecv(ref vote) => { let vote_view = vote.get_view_number(); - if let Some(relay_task) = self.relay_task_map.get(&vote_view) { + let view_phase = (vote_view, ViewSyncPhase::Commit); + if let Some(relay_task) = self.relay_task_map.get(&view_phase) { // Forward event then return self.event_stream .direct_message(relay_task.event_stream_id, event) @@ -385,13 +341,14 @@ impl< spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; if let Some((_, _, event_stream_id)) = vote_collector { self.relay_task_map - .insert(vote_view, ViewSyncTaskInfo { event_stream_id }); + .insert(view_phase, ViewSyncTaskInfo { event_stream_id }); } } HotShotEvent::ViewSyncFinalizeVoteRecv(ref vote) => { let vote_view = vote.get_view_number(); - if let Some(relay_task) = self.relay_task_map.get(&vote_view) { + let view_phase = (vote_view, ViewSyncPhase::Finalize); + if let Some(relay_task) = self.relay_task_map.get(&view_phase) { // Forward event then return self.event_stream .direct_message(relay_task.event_stream_id, event) @@ -423,7 +380,7 @@ impl< spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; if let Some((_, _, event_stream_id)) = vote_collector { self.relay_task_map - .insert(vote_view, ViewSyncTaskInfo { event_stream_id }); + .insert(view_phase, ViewSyncTaskInfo { event_stream_id }); } } @@ -452,8 +409,31 @@ impl< ) .await; } - if let Some((_key, relay_task_info)) = - self.relay_task_map.remove_entry(&TYPES::Time::new(i)) + if let Some((_key, relay_task_info)) = self + .relay_task_map + .remove_entry(&(TYPES::Time::new(i), ViewSyncPhase::PreCommit)) + { + self.event_stream + .direct_message( + relay_task_info.event_stream_id, + HotShotEvent::Shutdown, + ) + .await; + } + if let Some((_key, relay_task_info)) = self + .relay_task_map + .remove_entry(&(TYPES::Time::new(i), ViewSyncPhase::Commit)) + { + self.event_stream + .direct_message( + relay_task_info.event_stream_id, + HotShotEvent::Shutdown, + ) + .await; + } + if let Some((_key, relay_task_info)) = self + .relay_task_map + .remove_entry(&(TYPES::Time::new(i), ViewSyncPhase::Finalize)) { self.event_stream .direct_message( @@ -658,11 +638,6 @@ impl, A: ConsensusApi + return (Some(HotShotTaskCompleted::ShutDown), self); } - // Ignore if the certificate is for an already seen phase - if last_seen_certificate <= self.phase { - return (None, self); - } - self.phase = last_seen_certificate; if certificate.get_data().relay > self.relay { @@ -695,7 +670,7 @@ impl, A: ConsensusApi + let phase = self.phase.clone(); async move { async_sleep(self.view_sync_timeout).await; - info!("Vote sending timed out in ViewSyncCertificateRecv"); + info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", self.relay); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -730,11 +705,6 @@ impl, A: ConsensusApi + return (Some(HotShotTaskCompleted::ShutDown), self); } - // Ignore if the certificate is for an already seen phase - if last_seen_certificate <= self.phase { - return (None, self); - } - self.phase = last_seen_certificate; if certificate.get_data().relay > self.relay { @@ -775,7 +745,10 @@ impl, A: ConsensusApi + let phase = self.phase.clone(); async move { async_sleep(self.view_sync_timeout).await; - info!("Vote sending timed out in ViewSyncCertificateRecv"); + info!( + "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", + self.relay + ); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -810,11 +783,6 @@ impl, A: ConsensusApi + return (Some(HotShotTaskCompleted::ShutDown), self); } - // Ignore if the certificate is for an already seen phase - if last_seen_certificate <= self.phase { - return (None, self); - } - // cancel poll for votes self.network .inject_consensus_info(ConsensusIntentEvent::CancelPollForViewSyncVotes( @@ -930,7 +898,10 @@ impl, A: ConsensusApi + let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; - info!("Vote sending timed out in ViewSyncTimeout"); + info!( + "Vote sending timed out in ViewSyncTimeout relay = {}", + self.relay + ); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 893ca7518b..6afac3ae52 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -94,7 +94,7 @@ impl::get_public_parameter( membership.get_committee_qc_stake_table(), - U256::from(membership.success_threshold().get()), + U256::from(Self::threshold(membership)), ); ::check( &real_qc_pp, From 9aaca5c2a08d940e29d6242be7466576f19620e7 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 15 Dec 2023 16:40:07 -0500 Subject: [PATCH 0556/1393] threshhold -> threshold --- types/src/simple_certificate.rs | 41 ++++++++++++++++----------------- types/src/vote.rs | 4 ++-- 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 6afac3ae52..00d56c0805 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -25,16 +25,16 @@ use crate::{ use serde::{Deserialize, Serialize}; /// Trait which allows use to inject different threshold calculations into a Certificate type -pub trait Threshhold { +pub trait Threshold { /// Calculate a threshold based on the membership fn threshold>(membership: &MEMBERSHIP) -> u64; } /// Defines a threshold which is 2f + 1 (Amount needed for Quorum) #[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] -pub struct SuccessThreshhold {} +pub struct SuccessThreshold {} -impl Threshhold for SuccessThreshhold { +impl Threshold for SuccessThreshold { fn threshold>(membership: &MEMBERSHIP) -> u64 { membership.success_threshold().into() } @@ -42,9 +42,9 @@ impl Threshhold for SuccessThreshhold { /// Defines a threshold which is f + 1 (i.e at least one of the stake is honest) #[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] -pub struct OneHonestThreshhold {} +pub struct OneHonestThreshold {} -impl Threshhold for OneHonestThreshhold { +impl Threshold for OneHonestThreshold { fn threshold>(membership: &MEMBERSHIP) -> u64 { membership.failure_threshold().into() } @@ -52,7 +52,7 @@ impl Threshhold for OneHonestThreshhold { /// A certificate which can be created by aggregating many simple votes on the commitment. #[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] -pub struct SimpleCertificate> { +pub struct SimpleCertificate> { /// The data this certificate is for. I.e the thing that was voted on to create this Certificate pub data: VOTEABLE, /// commitment of all the votes this cert should be signed over @@ -63,15 +63,15 @@ pub struct SimpleCertificate::QCType>, /// If this QC is for the genesis block pub is_genesis: bool, - /// phantom data for `THRESHHOLD` and `TYPES` - pub _pd: PhantomData<(TYPES, THRESHHOLD)>, + /// phantom data for `THRESHOLD` and `TYPES` + pub _pd: PhantomData<(TYPES, THRESHOLD)>, } -impl> - Certificate for SimpleCertificate +impl> Certificate + for SimpleCertificate { type Voteable = VOTEABLE; - type Threshhold = THRESHHOLD; + type Threshold = THRESHOLD; fn create_signed_certificate( vote_commitment: Commitment, @@ -103,7 +103,7 @@ impl>(membership: &MEMBERSHIP) -> u64 { - THRESHHOLD::threshold(membership) + THRESHOLD::threshold(membership) } fn get_data(&self) -> &Self::Voteable { &self.data @@ -113,8 +113,8 @@ impl> - HasViewNumber for SimpleCertificate +impl> + HasViewNumber for SimpleCertificate { fn get_view_number(&self) -> TYPES::Time { self.view_number @@ -150,20 +150,19 @@ impl QuorumCertificate { } /// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` -pub type QuorumCertificate = SimpleCertificate, SuccessThreshhold>; +pub type QuorumCertificate = SimpleCertificate, SuccessThreshold>; /// Type alias for a DA certificate over `DAData` -pub type DACertificate = SimpleCertificate; +pub type DACertificate = SimpleCertificate; /// Type alias for a Timeout certificate over a view number -pub type TimeoutCertificate = - SimpleCertificate, SuccessThreshhold>; +pub type TimeoutCertificate = SimpleCertificate, SuccessThreshold>; // TODO ED Update this to use the correct threshold instead of the default `success_threshold` /// Type alias for a `ViewSyncPreCommit` certificate over a view number pub type ViewSyncPreCommitCertificate2 = - SimpleCertificate, OneHonestThreshhold>; + SimpleCertificate, OneHonestThreshold>; /// Type alias for a `ViewSyncCommit` certificate over a view number pub type ViewSyncCommitCertificate2 = - SimpleCertificate, SuccessThreshhold>; + SimpleCertificate, SuccessThreshold>; /// Type alias for a `ViewSyncFinalize` certificate over a view number pub type ViewSyncFinalizeCertificate2 = - SimpleCertificate, SuccessThreshhold>; + SimpleCertificate, SuccessThreshold>; diff --git a/types/src/vote.rs b/types/src/vote.rs index dec92f6a32..50aeebd960 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -14,7 +14,7 @@ use hotshot_utils::bincode::bincode_opts; use tracing::error; use crate::{ - simple_certificate::Threshhold, + simple_certificate::Threshold, simple_vote::Voteable, traits::{ election::Membership, @@ -55,7 +55,7 @@ pub trait Certificate: HasViewNumber { type Voteable: Voteable; /// Threshold Functions - type Threshhold: Threshhold; + type Threshold: Threshold; /// Build a certificate from the data commitment and the quorum of signers fn create_signed_certificate( From 444636db37763e369d725aa93c172e1ac765a658 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 15 Dec 2023 16:41:05 -0500 Subject: [PATCH 0557/1393] remove todo --- types/src/simple_certificate.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 00d56c0805..ab1e721a0d 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -155,8 +155,6 @@ pub type QuorumCertificate = SimpleCertificate, pub type DACertificate = SimpleCertificate; /// Type alias for a Timeout certificate over a view number pub type TimeoutCertificate = SimpleCertificate, SuccessThreshold>; - -// TODO ED Update this to use the correct threshold instead of the default `success_threshold` /// Type alias for a `ViewSyncPreCommit` certificate over a view number pub type ViewSyncPreCommitCertificate2 = SimpleCertificate, OneHonestThreshold>; From ac9f32f687cc9700fd056728c867cbca8e54bf56 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 18 Dec 2023 07:56:16 -0500 Subject: [PATCH 0558/1393] increase max # of saved views (#2249) --- web_server/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 4e5bf6f1db..9eab8140a4 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -3,7 +3,7 @@ pub const DEFAULT_WEB_SERVER_DA_PORT: u16 = 9001; pub const DEFAULT_WEB_SERVER_VIEW_SYNC_PORT: u16 = 9002; /// How many views to keep in memory -pub const MAX_VIEWS: usize = 25; +pub const MAX_VIEWS: usize = 100; /// How many transactions to keep in memory pub const MAX_TXNS: usize = 500; /// How many transactions to return at once From c94eaf3463f52c7d63574881e23a60534974034f Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 18 Dec 2023 08:43:33 -0500 Subject: [PATCH 0559/1393] [Stability] Poll for latest view sync cert (#2246) * add polling for latest view sync cert * update comment * add cancelling to view change event --- hotshot/Cargo.toml | 1 + hotshot/src/tasks/mod.rs | 2 +- .../traits/networking/web_server_network.rs | 111 ++++++++++++++++-- task-impls/src/view_sync.rs | 27 ++++- types/src/message.rs | 6 +- types/src/traits/network.rs | 12 +- web_server/api.toml | 13 +- web_server/src/config.rs | 8 +- web_server/src/lib.rs | 38 ++++-- 9 files changed, 178 insertions(+), 40 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 2483dbe8bd..2cb0b08fc0 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -116,6 +116,7 @@ local-ip-address = "0.5.6" dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } derive_more = "0.99.17" portpicker = "0.1.1" +lru = "0.12.1" tracing = { workspace = true } typenum = { workspace = true } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index dd903d7741..b47dfa99a2 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -244,7 +244,7 @@ pub async fn add_consensus_task>( }; consensus_state .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) + .inject_consensus_info(ConsensusIntentEvent::PollForLatestQuorumProposal) .await; let filter = FilterEvent(Arc::new(consensus_event_filter)); let consensus_name = "Consensus Task"; diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 7d4d1294cf..859085b8c6 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -26,7 +26,11 @@ use hotshot_types::{ }, }; use hotshot_web_server::{self, config}; +use lru::LruCache; use serde::{Deserialize, Serialize}; +use std::collections::hash_map::DefaultHasher; +use std::hash::{Hash, Hasher}; +use std::num::NonZeroUsize; use surf_disco::Url; use hotshot_types::traits::network::ViewMessage; @@ -53,6 +57,15 @@ impl WebCommChannel { } } +/// # Note +/// +/// This function uses `DefaultHasher` instead of cryptographic hash functions like SHA-256 because of an `AsRef` requirement. +fn hash(t: &T) -> u64 { + let mut s = DefaultHasher::new(); + t.hash(&mut s); + s.finish() +} + /// The web server network state #[derive(Clone, Debug)] pub struct WebServerNetwork { @@ -205,9 +218,10 @@ struct Inner { view_sync_vote_task_map: Arc>>, /// Task map for transactions txn_task_map: Arc>>, - /// Task polling for current propsal - current_proposal_task: - Arc>>>>, + /// Task polling for latest quorum propsal + latest_quorum_proposal_task: Arc>>>, + /// Task polling for latest view sync proposal + latest_view_sync_proposal_task: Arc>>>, } impl Inner { @@ -221,6 +235,7 @@ impl Inner { ) -> Result<(), NetworkError> { let mut vote_index = 0; let mut tx_index = 0; + let mut seen_proposals = LruCache::new(NonZeroUsize::new(100).unwrap()); if message_purpose == MessagePurpose::Data { tx_index = *self.tx_index.read().await; @@ -230,7 +245,10 @@ impl Inner { while self.running.load(Ordering::Relaxed) { let endpoint = match message_purpose { MessagePurpose::Proposal => config::get_proposal_route(view_number), - MessagePurpose::CurrentProposal => config::get_recent_proposal_route(), + MessagePurpose::LatestQuorumProposal => config::get_latest_quorum_proposal_route(), + MessagePurpose::LatestViewSyncProposal => { + config::get_latest_view_sync_proposal_route() + } MessagePurpose::Vote => config::get_vote_route(view_number, vote_index), MessagePurpose::Data => config::get_transactions_route(tx_index), MessagePurpose::Internal => unimplemented!(), @@ -292,7 +310,7 @@ impl Inner { // } // } } - MessagePurpose::CurrentProposal => { + MessagePurpose::LatestQuorumProposal => { // Only pushing the first proposal since we will soon only be allowing 1 proposal per view self.broadcast_poll_queue .write() @@ -301,6 +319,20 @@ impl Inner { return Ok(()); } + MessagePurpose::LatestViewSyncProposal => { + let mut broadcast_poll_queue = + self.broadcast_poll_queue.write().await; + + for cert in &deserialized_messages { + let hash = hash(&cert); + if seen_proposals.put(hash, ()).is_none() { + broadcast_poll_queue.push(cert.clone()); + } + } + + // additional sleep to reduce load on web server + async_sleep(Duration::from_millis(300)).await; + } MessagePurpose::Vote => { // error!( // "Received {} votes from web server for view {} is da {}", @@ -374,7 +406,10 @@ impl Inner { // TODO ED Need to add vote indexing to web server for view sync certs for cert in &deserialized_messages { vote_index += 1; - broadcast_poll_queue.push(cert.clone()); + let hash = hash(cert); + if seen_proposals.put(hash, ()).is_none() { + broadcast_poll_queue.push(cert.clone()); + } } } @@ -420,6 +455,10 @@ impl Inner { } } + ConsensusIntentEvent::CancelPollForLatestViewSyncProposal => { + return Ok(()); + } + _ => { unimplemented!() } @@ -499,7 +538,7 @@ pub struct SendMsg { } /// A message being received from the web server -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash)] #[serde(bound(deserialize = ""))] pub struct RecvMsg { /// The optional message being received @@ -571,7 +610,8 @@ impl WebServerNetwork { view_sync_cert_task_map: Arc::default(), view_sync_vote_task_map: Arc::default(), txn_task_map: Arc::default(), - current_proposal_task: Arc::default(), + latest_quorum_proposal_task: Arc::default(), + latest_view_sync_proposal_task: Arc::default(), }); inner.connected.store(true, Ordering::Relaxed); @@ -593,7 +633,9 @@ impl WebServerNetwork { MessagePurpose::Proposal => config::post_proposal_route(*view_number), MessagePurpose::Vote => config::post_vote_route(*view_number), MessagePurpose::Data => config::post_transactions_route(), - MessagePurpose::Internal | MessagePurpose::CurrentProposal => { + MessagePurpose::Internal + | MessagePurpose::LatestQuorumProposal + | MessagePurpose::LatestViewSyncProposal => { return Err(WebServerNetworkError::EndpointError) } MessagePurpose::ViewSyncProposal => { @@ -907,8 +949,8 @@ impl ConnectedNetwork, TYPES::Signatur .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForVIDDisperse) .await; } - ConsensusIntentEvent::PollForCurrentProposal => { - let mut proposal_task = self.inner.current_proposal_task.write().await; + ConsensusIntentEvent::PollForLatestQuorumProposal => { + let mut proposal_task = self.inner.latest_quorum_proposal_task.write().await; if proposal_task.is_none() { // create new task let (sender, receiver) = unbounded(); @@ -918,7 +960,7 @@ impl ConnectedNetwork, TYPES::Signatur let inner_clone = self.inner.clone(); async move { if let Err(e) = inner_clone - .poll_web_server(receiver, MessagePurpose::CurrentProposal, 1) + .poll_web_server(receiver, MessagePurpose::LatestQuorumProposal, 1) .await { warn!( @@ -926,12 +968,44 @@ impl ConnectedNetwork, TYPES::Signatur e ); } - let mut proposal_task = inner_clone.current_proposal_task.write().await; + let mut proposal_task = + inner_clone.latest_quorum_proposal_task.write().await; *proposal_task = None; } }); } } + ConsensusIntentEvent::PollForLatestViewSyncProposal => { + let mut latest_view_sync_proposal_task = + self.inner.latest_view_sync_proposal_task.write().await; + if latest_view_sync_proposal_task.is_none() { + // create new task + let (sender, receiver) = unbounded(); + *latest_view_sync_proposal_task = Some(sender); + + async_spawn({ + let inner_clone = self.inner.clone(); + async move { + if let Err(e) = inner_clone + .poll_web_server( + receiver, + MessagePurpose::LatestViewSyncProposal, + 1, + ) + .await + { + warn!( + "Background receive proposal polling encountered an error: {:?}", + e + ); + } + let mut latest_view_sync_proposal_task = + inner_clone.latest_view_sync_proposal_task.write().await; + *latest_view_sync_proposal_task = None; + } + }); + } + } ConsensusIntentEvent::PollForVotes(view_number) => { let mut task_map = self.inner.vote_task_map.write().await; if let Entry::Vacant(e) = task_map.entry(view_number) { @@ -1005,6 +1079,17 @@ impl ConnectedNetwork, TYPES::Signatur } } + ConsensusIntentEvent::CancelPollForLatestViewSyncProposal => { + let mut latest_view_sync_proposal_task = + self.inner.latest_view_sync_proposal_task.write().await; + + if let Some(thing) = latest_view_sync_proposal_task.take() { + let _res = thing + .send(ConsensusIntentEvent::CancelPollForLatestViewSyncProposal) + .await; + } + } + ConsensusIntentEvent::PollForViewSyncCertificate(view_number) => { let mut task_map = self.inner.view_sync_cert_task_map.write().await; if let Entry::Vacant(e) = task_map.entry(view_number) { diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index d31e9b6219..64851cb903 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -441,6 +441,14 @@ impl< // Garbage collect old tasks // We could put this into a separate async task, but that would require making several fields on ViewSyncTaskState thread-safe and harm readability. In the common case this will have zero tasks to clean up. + // cancel poll for votes + self.network + .inject_consensus_info( + ConsensusIntentEvent::CancelPollForLatestViewSyncProposal, + ) + .await; + + // run GC for i in *self.last_garbage_collected_view..*self.current_view { if let Some((_key, replica_task_info)) = self.replica_task_map.remove_entry(&TYPES::Time::new(i)) @@ -502,6 +510,12 @@ impl< *view_number + 1, )) .await; + + // Poll for future view sync certificates + self.network + .inject_consensus_info(ConsensusIntentEvent::PollForLatestViewSyncProposal) + .await; + // Spawn replica task let next_view = *view_number + 1; // Subscribe to the view after we are leader since we know we won't propose in the next view if we are leader. @@ -521,7 +535,7 @@ impl< // Also subscribe to the latest view for the same reason. The GC will remove the above poll // in the case that one doesn't resolve but this one does. self.network - .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) + .inject_consensus_info(ConsensusIntentEvent::PollForLatestQuorumProposal) .await; self.network @@ -829,6 +843,13 @@ impl, A: ConsensusApi + )) .await; + // Cancel poll for future view sync certificates + self.network + .inject_consensus_info( + ConsensusIntentEvent::CancelPollForLatestViewSyncProposal, + ) + .await; + self.phase = last_seen_certificate; if certificate.get_data().relay > self.relay { @@ -895,9 +916,9 @@ impl, A: ConsensusApi + if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; } - // Keep tyring to get a more recent proposal to catch up to + // Keep trying to get a more recent proposal to catch up to self.network - .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) + .inject_consensus_info(ConsensusIntentEvent::PollForLatestQuorumProposal) .await; self.relay += 1; match self.phase { diff --git a/types/src/message.rs b/types/src/message.rs index 6513413358..a4bb68907f 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -60,8 +60,10 @@ pub struct Messages(pub Vec>); pub enum MessagePurpose { /// Message with a quorum proposal. Proposal, - /// Message with most recent proposal the server has - CurrentProposal, + /// Message with most recent quorum proposal the server has + LatestQuorumProposal, + /// Message with most recent view sync proposal the server has + LatestViewSyncProposal, /// Message with a quorum vote. Vote, /// Message with a view sync vote. diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 3a8e754aae..df1bea4bbd 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -146,8 +146,10 @@ pub enum ConsensusIntentEvent { PollForProposal(u64), /// Poll for VID disperse data for a particular view PollForVIDDisperse(u64), - /// Poll for the most recent proposal the webserver has - PollForCurrentProposal, + /// Poll for the most recent quorum proposal the webserver has + PollForLatestQuorumProposal, + /// Poll for the most recent view sync proposal the webserver has + PollForLatestViewSyncProposal, /// Poll for a DAC for a particular view PollForDAC(u64), /// Poll for view sync votes starting at a particular view @@ -172,6 +174,8 @@ pub enum ConsensusIntentEvent { CancelPollForVIDDisperse(u64), /// Cancel polling for transactions CancelPollForTransactions(u64), + /// Cancel polling for most recent view sync proposal + CancelPollForLatestViewSyncProposal, } impl ConsensusIntentEvent { @@ -194,7 +198,9 @@ impl ConsensusIntentEvent { | ConsensusIntentEvent::PollForTransactions(view_number) | ConsensusIntentEvent::CancelPollForTransactions(view_number) | ConsensusIntentEvent::PollFutureLeader(view_number, _) => *view_number, - ConsensusIntentEvent::PollForCurrentProposal => 1, + ConsensusIntentEvent::PollForLatestQuorumProposal + | ConsensusIntentEvent::PollForLatestViewSyncProposal + | ConsensusIntentEvent::CancelPollForLatestViewSyncProposal => 1, } } } diff --git a/web_server/api.toml b/web_server/api.toml index 2453ca4d3f..224f786d7d 100644 --- a/web_server/api.toml +++ b/web_server/api.toml @@ -19,9 +19,16 @@ DOC = """ Return the VID disperse data for a given view number """ -# GET the proposal for a view, where the view is passed as an argument -[route.getrecentproposal] -PATH = ["proposal/recent"] +# GET the latest quorum proposal +[route.get_latest_quorum_proposal] +PATH = ["proposal/latest"] +DOC = """ +Return the proposal for the most recent view the server has +""" + +# GET the latest quorum proposal +[route.get_latest_view_sync_proposal] +PATH = ["view_sync_proposal/latest"] DOC = """ Return the proposal for the most recent view the server has """ diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 9eab8140a4..43ed97bb9a 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -17,8 +17,12 @@ pub fn post_proposal_route(view_number: u64) -> String { format!("api/proposal/{view_number}") } -pub fn get_recent_proposal_route() -> String { - "api/proposal/recent".to_string() +pub fn get_latest_quorum_proposal_route() -> String { + "api/proposal/latest".to_string() +} + +pub fn get_latest_view_sync_proposal_route() -> String { + "api/view_sync_proposal/latest".to_string() } pub fn get_da_certificate_route(view_number: u64) -> String { diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 636b08c70c..0ca7575c81 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -34,7 +34,10 @@ struct WebServerState { /// view for oldest proposals in memory oldest_proposal: u64, /// view for the most recent proposal to help nodes catchup - recent_proposal: u64, + latest_quorum_proposal: u64, + /// view for the most recent view sync proposal + latest_view_sync_proposal: u64, + /// view for teh oldest DA certificate oldest_certificate: u64, @@ -88,7 +91,8 @@ impl WebServerState { num_txns: 0, oldest_vote: 0, oldest_proposal: 0, - recent_proposal: 0, + latest_quorum_proposal: 0, + latest_view_sync_proposal: 0, oldest_certificate: 0, shutdown: None, stake_table: Vec::new(), @@ -129,7 +133,8 @@ impl WebServerState { /// Trait defining methods needed for the `WebServerState` pub trait WebServerDataSource { fn get_proposal(&self, view_number: u64) -> Result>>, Error>; - fn get_recent_proposal(&self) -> Result>>, Error>; + fn get_latest_quorum_proposal(&self) -> Result>>, Error>; + fn get_latest_view_sync_proposal(&self) -> Result>>, Error>; fn get_view_sync_proposal( &self, view_number: u64, @@ -214,8 +219,12 @@ impl WebServerDataSource for WebServerState { } } - fn get_recent_proposal(&self) -> Result>>, Error> { - self.get_proposal(self.recent_proposal) + fn get_latest_quorum_proposal(&self) -> Result>>, Error> { + self.get_proposal(self.latest_quorum_proposal) + } + + fn get_latest_view_sync_proposal(&self) -> Result>>, Error> { + self.get_view_sync_proposal(self.latest_view_sync_proposal, 0) } fn get_view_sync_proposal( @@ -433,12 +442,8 @@ impl WebServerDataSource for WebServerState { fn post_proposal(&mut self, view_number: u64, mut proposal: Vec) -> Result<(), Error> { info!("Received proposal for view {}", view_number); - if view_number > self.recent_proposal { - self.recent_proposal = view_number; - } - - if view_number > self.recent_proposal { - self.recent_proposal = view_number; + if view_number > self.latest_quorum_proposal { + self.latest_quorum_proposal = view_number; } // Only keep proposal history for MAX_VIEWS number of view @@ -480,6 +485,10 @@ impl WebServerDataSource for WebServerState { view_number: u64, proposal: Vec, ) -> Result<(), Error> { + if view_number > self.latest_view_sync_proposal { + self.latest_view_sync_proposal = view_number; + } + // Only keep proposal history for MAX_VIEWS number of view if self.view_sync_proposals.len() >= MAX_VIEWS { self.view_sync_proposals @@ -672,8 +681,11 @@ where } .boxed() })? - .get("getrecentproposal", |_req, state| { - async move { state.get_recent_proposal() }.boxed() + .get("get_latest_quorum_proposal", |_req, state| { + async move { state.get_latest_quorum_proposal() }.boxed() + })? + .get("get_latest_view_sync_proposal", |_req, state| { + async move { state.get_latest_view_sync_proposal() }.boxed() })? .get("getviewsyncproposal", |req, state| { async move { From ac74c48eae42e652e88fa38db0f33faceb060255 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 18 Dec 2023 09:46:59 -0500 Subject: [PATCH 0560/1393] get rid of phase in view sync relay --- task-impls/src/view_sync.rs | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index e649f1e4f3..29a57672a3 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -134,8 +134,6 @@ pub struct ViewSyncReplicaTaskState< pub current_view: TYPES::Time, /// Round HotShot wishes to be in pub next_view: TYPES::Time, - /// The last seen phase of the view sync protocol - pub phase: ViewSyncPhase, /// The relay index we are currently on pub relay: u64, /// Whether we have seen a finalized certificate @@ -214,7 +212,6 @@ impl< relay: 0, finalized: false, sent_view_change_event: false, - phase: ViewSyncPhase::None, timeout_task: None, membership: self.membership.clone(), network: self.network.clone(), @@ -514,7 +511,6 @@ impl< relay: 0, finalized: false, sent_view_change_event: false, - phase: ViewSyncPhase::None, timeout_task: None, membership: self.membership.clone(), network: self.network.clone(), @@ -638,8 +634,6 @@ impl, A: ConsensusApi + return (Some(HotShotTaskCompleted::ShutDown), self); } - self.phase = last_seen_certificate; - if certificate.get_data().relay > self.relay { self.relay = certificate.get_data().relay; } @@ -667,7 +661,7 @@ impl, A: ConsensusApi + self.timeout_task = Some(async_spawn({ let stream = self.event_stream.clone(); - let phase = self.phase.clone(); + let phase = last_seen_certificate; async move { async_sleep(self.view_sync_timeout).await; info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", self.relay); @@ -705,8 +699,6 @@ impl, A: ConsensusApi + return (Some(HotShotTaskCompleted::ShutDown), self); } - self.phase = last_seen_certificate; - if certificate.get_data().relay > self.relay { self.relay = certificate.get_data().relay; } @@ -742,7 +734,7 @@ impl, A: ConsensusApi + } self.timeout_task = Some(async_spawn({ let stream = self.event_stream.clone(); - let phase = self.phase.clone(); + let phase = last_seen_certificate; async move { async_sleep(self.view_sync_timeout).await; info!( @@ -761,8 +753,6 @@ impl, A: ConsensusApi + } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { - let last_seen_certificate = ViewSyncPhase::Finalize; - // Ignore certificate if it is for an older round if certificate.get_view_number() < self.next_view { warn!("We're already in a higher round"); @@ -797,8 +787,6 @@ impl, A: ConsensusApi + )) .await; - self.phase = last_seen_certificate; - if certificate.get_data().relay > self.relay { self.relay = certificate.get_data().relay; } @@ -856,10 +844,7 @@ impl, A: ConsensusApi + HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { // Shouldn't ever receive a timeout for a relay higher than ours - if TYPES::Time::new(*round) == self.next_view - && relay == self.relay - && last_seen_certificate == self.phase - { + if TYPES::Time::new(*round) == self.next_view && relay == self.relay { if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; } @@ -868,7 +853,7 @@ impl, A: ConsensusApi + .inject_consensus_info(ConsensusIntentEvent::PollForCurrentProposal) .await; self.relay += 1; - match self.phase { + match last_seen_certificate { ViewSyncPhase::None | ViewSyncPhase::PreCommit | ViewSyncPhase::Commit => { let vote = ViewSyncPreCommitVote::::create_signed_vote( ViewSyncPreCommitData { From 54d0ccdec174e405cf9c79b1740ec5e71cb75614 Mon Sep 17 00:00:00 2001 From: Alex Xiong Date: Tue, 19 Dec 2023 01:42:50 +0800 Subject: [PATCH 0561/1393] refactor: allow configurable stake table capacity (#2250) --- hotshot-stake-table/src/vec_based.rs | 26 +++++++++++------------- hotshot-state-prover/src/circuit.rs | 30 ++++++++++++++++++---------- hotshot-state-prover/src/lib.rs | 30 +++++++++++++++++----------- hotshot-state-prover/src/utils.rs | 3 ++- 4 files changed, 52 insertions(+), 37 deletions(-) diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index ec3b537fe0..35ce56de27 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -44,6 +44,8 @@ where K2: Eq + Hash + Clone + Default + ToFields, F: RescueParameter, { + /// upper bound on table size + capacity: usize, /// The most up-to-date stake table, where the incoming transactions shall be performed on. head: StakeTableSnapshot, /// The snapshot of stake table at the beginning of the current epoch @@ -240,20 +242,19 @@ where F: RescueParameter, { /// Initiating an empty stake table. - pub fn new() -> Self { - let bls_comm_preimage = - vec![F::default(); STAKE_TABLE_CAPACITY * >::SIZE]; + pub fn new(capacity: usize) -> Self { + let bls_comm_preimage = vec![F::default(); capacity * >::SIZE]; let default_bls_comm = VariableLengthRescueCRHF::::evaluate(&bls_comm_preimage).unwrap()[0]; - let schnorr_comm_preimage = - vec![F::default(); STAKE_TABLE_CAPACITY * >::SIZE]; + let schnorr_comm_preimage = vec![F::default(); capacity * >::SIZE]; let default_schnorr_comm = VariableLengthRescueCRHF::::evaluate(&schnorr_comm_preimage).unwrap()[0]; - let stake_comm_preimage = vec![F::default(); STAKE_TABLE_CAPACITY]; + let stake_comm_preimage = vec![F::default(); capacity]; let default_stake_comm = VariableLengthRescueCRHF::::evaluate(&stake_comm_preimage).unwrap()[0]; let default_comm = (default_bls_comm, default_schnorr_comm, default_stake_comm); Self { + capacity, head: StakeTableSnapshot::default(), epoch_start: StakeTableSnapshot::default(), last_epoch_start: StakeTableSnapshot::default(), @@ -296,7 +297,7 @@ where /// Commitment of a stake table is a triple (bls_keys_comm, schnorr_keys_comm, stake_amount_comm) /// TODO(Chengyu): The BLS verification keys doesn't implement Default. Thus we directly pad with `F::default()`. fn compute_head_comm(&mut self) -> (F, F, F) { - let padding_len = STAKE_TABLE_CAPACITY - self.head.bls_keys.len(); + let padding_len = self.capacity - self.head.bls_keys.len(); // Compute rescue hash for bls keys let mut bls_comm_preimage = self .head @@ -304,10 +305,7 @@ where .iter() .flat_map(|key| key.to_fields()) .collect::>(); - bls_comm_preimage.resize( - STAKE_TABLE_CAPACITY * >::SIZE, - F::default(), - ); + bls_comm_preimage.resize(self.capacity * >::SIZE, F::default()); let bls_comm = VariableLengthRescueCRHF::::evaluate(bls_comm_preimage).unwrap()[0]; // Compute rescue hash for Schnorr keys @@ -328,7 +326,7 @@ where .iter() .map(|x| u256_to_field(x)) .collect::>(); - stake_comm_preimage.resize(STAKE_TABLE_CAPACITY, F::default()); + stake_comm_preimage.resize(self.capacity, F::default()); let stake_comm = VariableLengthRescueCRHF::::evaluate(stake_comm_preimage).unwrap()[0]; (bls_comm, schnorr_comm, stake_comm) @@ -363,7 +361,7 @@ where F: RescueParameter, { fn default() -> Self { - Self::new() + Self::new(STAKE_TABLE_CAPACITY) } } @@ -379,7 +377,7 @@ mod tests { #[test] fn crypto_test_stake_table() -> Result<(), StakeTableError> { - let mut st = StakeTable::::new(); + let mut st = StakeTable::::default(); let mut prng = jf_utils::test_rng(); let keys = (0..10) .map(|_| { diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index ec3a4251ae..e56818f4fc 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -4,7 +4,6 @@ use ark_ec::twisted_edwards::TECurveConfig; use ark_ff::PrimeField; use ark_std::borrow::Borrow; use ethereum_types::U256; -use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use hotshot_types::light_client::LightClientState; use jf_plonk::errors::PlonkError; use jf_primitives::{ @@ -187,7 +186,7 @@ impl AsRef<[Variable]> for LightClientStateVar { /// - A circuit for proof generation /// - A list of public inputs for verification /// - A PlonkError if any error happens when building the circuit -pub(crate) fn build( +pub(crate) fn build( stake_table_entries: STIter, signer_bit_vec: BitIter, signatures: SigIter, @@ -392,7 +391,7 @@ where } /// Internal function to build a dummy circuit -pub(crate) fn build_for_preprocessing( +pub(crate) fn build_for_preprocessing( ) -> Result<(PlonkCircuit, PublicInput), PlonkError> where F: RescueParameter, @@ -405,7 +404,7 @@ where fee_ledger_comm: F::default(), stake_table_comm: (F::default(), F::default(), F::default()), }; - build::(&[], &[], &[], &lightclient_state, &U256::zero()) + build::(&[], &[], &[], &lightclient_state, &U256::zero()) } #[cfg(test)] @@ -424,6 +423,7 @@ mod tests { use jf_utils::test_rng; type F = ark_ed_on_bn254::Fq; + const ST_CAPACITY: usize = 20; #[test] fn crypto_test_circuit_building() { @@ -431,7 +431,7 @@ mod tests { let mut prng = test_rng(); let (qc_keys, state_keys) = key_pairs_for_testing(num_validators, &mut prng); - let st = stake_table_for_testing(&qc_keys, &state_keys); + let st = stake_table_for_testing(ST_CAPACITY, &qc_keys, &state_keys); let entries = st .try_iter(SnapshotVersion::LastEpochStart) @@ -477,7 +477,7 @@ mod tests { }) .collect::>(); // good path - let (circuit, public_inputs) = build( + let (circuit, public_inputs) = build::<_, _, _, _, _, ST_CAPACITY>( &entries, &bit_vec, &bit_masked_sigs, @@ -489,7 +489,7 @@ mod tests { .check_circuit_satisfiability(public_inputs.as_ref()) .is_ok()); - let (circuit, public_inputs) = build( + let (circuit, public_inputs) = build::<_, _, _, _, _, ST_CAPACITY>( &entries, &bit_vec, &bit_masked_sigs, @@ -517,7 +517,7 @@ mod tests { } }) .collect::>(); - let (bad_circuit, public_inputs) = build( + let (bad_circuit, public_inputs) = build::<_, _, _, _, _, ST_CAPACITY>( &entries, &bad_bit_vec, &bad_bit_masked_sigs, @@ -540,7 +540,7 @@ mod tests { }) .collect::, PrimitivesError>>() .unwrap(); - let (bad_circuit, public_inputs) = build( + let (bad_circuit, public_inputs) = build::<_, _, _, _, _, ST_CAPACITY>( &entries, &bit_vec, &sig_for_bad_state, @@ -564,7 +564,7 @@ mod tests { }) .collect::, PrimitivesError>>() .unwrap(); - let (bad_circuit, public_inputs) = build( + let (bad_circuit, public_inputs) = build::<_, _, _, _, _, ST_CAPACITY>( &entries, &bit_vec, &wrong_sigs, @@ -575,5 +575,15 @@ mod tests { assert!(bad_circuit .check_circuit_satisfiability(public_inputs.as_ref()) .is_err()); + + // bad path: overflowing stake table size + assert!(build::<_, _, _, _, _, 9>( + &entries, + &bit_vec, + &bit_masked_sigs, + &lightclient_state, + &U256::from(26u32), + ) + .is_err()) } } diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index 86eba4420c..b5f2349b76 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -37,8 +37,11 @@ pub type Proof = jf_plonk::proof_system::structs::Proof; pub type UniversalSrs = jf_plonk::proof_system::structs::UniversalSrs; /// Given a SRS, returns the proving key and verifying key for state update -pub fn preprocess(srs: &UniversalSrs) -> Result<(ProvingKey, VerifyingKey), PlonkError> { - let (circuit, _) = circuit::build_for_preprocessing::()?; +pub fn preprocess( + srs: &UniversalSrs, +) -> Result<(ProvingKey, VerifyingKey), PlonkError> { + let (circuit, _) = + circuit::build_for_preprocessing::()?; PlonkKzgSnark::preprocess(srs, &circuit) } @@ -52,7 +55,7 @@ pub fn preprocess(srs: &UniversalSrs) -> Result<(ProvingKey, VerifyingKey), Plon /// - the signer's accumulated weight exceeds the quorum threshold /// - the stake table corresponds to the one committed in the light client state /// - all signed schnorr signatures are valid -pub fn generate_state_update_proof( +pub fn generate_state_update_proof( rng: &mut R, pk: &ProvingKey, stake_table: &ST, @@ -76,7 +79,7 @@ where .try_iter(SnapshotVersion::LastEpochStart) .unwrap() .map(|(_, stake_amount, schnorr_key)| (schnorr_key, stake_amount)); - let (circuit, public_inputs) = circuit::build( + let (circuit, public_inputs) = circuit::build::<_, _, _, _, _, STAKE_TABLE_CAPACITY>( stake_table_entries, signer_bit_vec, signatures, @@ -118,6 +121,8 @@ mod tests { use jf_relation::Circuit; use jf_utils::test_rng; + const ST_CAPACITY: usize = 20; + // FIXME(Chengyu): see fn universal_setup_for_testing( max_degree: usize, @@ -178,7 +183,7 @@ mod tests { let mut prng = test_rng(); let (bls_keys, schnorr_keys) = key_pairs_for_testing(num_validators, &mut prng); - let st = stake_table_for_testing(&bls_keys, &schnorr_keys); + let st = stake_table_for_testing(ST_CAPACITY, &bls_keys, &schnorr_keys); let block_comm_root = VariableLengthRescueCRHF::::evaluate(vec![ BaseField::from(1u32), @@ -223,18 +228,19 @@ mod tests { .collect::>(); // good path - let num_gates = build_for_preprocessing::() - .unwrap() - .0 - .num_gates(); + let num_gates = + build_for_preprocessing::() + .unwrap() + .0 + .num_gates(); let test_srs = universal_setup_for_testing(num_gates + 2, &mut prng).unwrap(); ark_std::println!("Number of constraint in the circuit: {}", num_gates); - let result = preprocess(&test_srs); + let result = preprocess::(&test_srs); assert!(result.is_ok()); let (pk, vk) = result.unwrap(); - let result = generate_state_update_proof( + let result = generate_state_update_proof::<_, _, _, _, ST_CAPACITY>( &mut prng, &pk, &st, @@ -255,7 +261,7 @@ mod tests { .is_ok()); // minimum bad path, other bad cases are checked inside `circuit.rs` - let result = generate_state_update_proof( + let result = generate_state_update_proof::<_, _, _, _, ST_CAPACITY>( &mut prng, &pk, &st, diff --git a/hotshot-state-prover/src/utils.rs b/hotshot-state-prover/src/utils.rs index 2bbc18e053..f910b121e4 100644 --- a/hotshot-state-prover/src/utils.rs +++ b/hotshot-state-prover/src/utils.rs @@ -32,10 +32,11 @@ pub(crate) fn key_pairs_for_testing( /// Helper function for test pub(crate) fn stake_table_for_testing( + capacity: usize, bls_keys: &[BLSVerKey], schnorr_keys: &[(SchnorrSignKey, SchnorrVerKey)], ) -> StakeTable { - let mut st = StakeTable::::new(); + let mut st = StakeTable::::new(capacity); // Registering keys bls_keys .iter() From 74dd01e3c6c13503c4e05bc761c5ff4dc4606139 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 18 Dec 2023 11:27:29 -0800 Subject: [PATCH 0562/1393] Add wait --- task-impls/src/harness.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 9c85c54a07..88084fd6e8 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -10,7 +10,7 @@ use hotshot_task::{ }; use hotshot_types::traits::node_implementation::NodeType; use snafu::Snafu; -use std::{collections::HashMap, future::Future, sync::Arc}; +use std::{collections::HashMap, future::Future, sync::Arc, thread::sleep, time::Duration}; /// The state for the test harness task. Keeps track of which events and how many we expect to get pub struct TestHarnessState { @@ -107,6 +107,8 @@ pub fn handle_event( } if state.expected_output.is_empty() { + // Sleep before the shutdown in case other tasks are still running. + sleep(Duration::from_millis(100)); return (Some(HotShotTaskCompleted::ShutDown), state); } From 1023bedafa1081deb6f40e0abb36e0183d17c437 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 18 Dec 2023 11:47:49 -0800 Subject: [PATCH 0563/1393] Fix error after sync --- testing/tests/catchup.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index d60aef767a..132bde4c31 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -53,7 +53,6 @@ async fn test_catchup() { check_leaf: true, ..Default::default() }; - metadata.overall_safety_properties.num_failed_views = 2; // Make sure we keep commiting rounds after the catchup, but not the full 50. metadata.overall_safety_properties.num_successful_views = 22; From 5abbbb4e07cf4d27f4e1df57efc8100981a0d9c1 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 18 Dec 2023 15:33:46 -0800 Subject: [PATCH 0564/1393] Make failure requirement stricter/ --- testing/tests/catchup.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 132bde4c31..92c718436f 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -49,12 +49,12 @@ async fn test_catchup() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 25, + // Make sure we keep commiting rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 5, check_leaf: true, ..Default::default() }; - // Make sure we keep commiting rounds after the catchup, but not the full 50. - metadata.overall_safety_properties.num_successful_views = 22; metadata .gen_launcher::(0) @@ -107,7 +107,7 @@ async fn test_catchup_web() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 25, + num_failed_views: 5, check_leaf: true, ..Default::default() }; @@ -165,13 +165,12 @@ async fn test_catchup_one_node() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 25, + // Make sure we keep commiting rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 1, check_leaf: true, ..Default::default() }; - metadata.overall_safety_properties.num_failed_views = 1; - // Make sure we keep commiting rounds after the catchup, but not the full 50. - metadata.overall_safety_properties.num_successful_views = 22; metadata .gen_launcher::(0) @@ -232,7 +231,7 @@ async fn test_catchup_in_view_sync() { ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { check_leaf: true, - num_failed_views: 25, + num_failed_views: 5, ..Default::default() }; From 9f7627d7eba8ba2cf8ccf48c21d913d9923da1a5 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 18 Dec 2023 16:55:08 -0800 Subject: [PATCH 0565/1393] Further improve the network test --- task-impls/src/harness.rs | 30 +++++++++++++++++++++--------- testing/tests/consensus_task.rs | 4 ++-- testing/tests/da_task.rs | 2 +- testing/tests/network_task.rs | 15 ++++++++++----- testing/tests/vid_task.rs | 2 +- testing/tests/view_sync_task.rs | 2 +- 6 files changed, 36 insertions(+), 19 deletions(-) diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 88084fd6e8..2ee224b7cd 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -10,7 +10,7 @@ use hotshot_task::{ }; use hotshot_types::traits::node_implementation::NodeType; use snafu::Snafu; -use std::{collections::HashMap, future::Future, sync::Arc, thread::sleep, time::Duration}; +use std::{collections::HashMap, future::Future, sync::Arc}; /// The state for the test harness task. Keeps track of which events and how many we expect to get pub struct TestHarnessState { @@ -35,7 +35,10 @@ pub type TestHarnessTaskTypes = HSTWithEvent< /// Runs a test by building the task using `build_fn` and then passing it the `input` events /// and testing the make sure all of the `expected_output` events are seen /// -/// `event_stream` - if given, will be used to register the task builder. +/// # Arguments +/// * `event_stream` - if given, will be used to register the task builder. +/// * `allow_extra_output` - whether to allow an extra output after we've seen all expected +/// outputs. Should be `false` in most cases. /// /// # Panics /// Panics if any state the test expects is not set. Panicing causes a test failure @@ -45,6 +48,7 @@ pub async fn run_harness( expected_output: HashMap, usize>, event_stream: Option>>, build_fn: impl FnOnce(TaskRunner, ChannelStream>) -> Fut, + allow_extra_output: bool, ) where TYPES: NodeType, Fut: Future, @@ -54,7 +58,7 @@ pub async fn run_harness( let event_stream = event_stream.unwrap_or_default(); let state = TestHarnessState { expected_output }; let handler = HandleEvent(Arc::new(move |event, state| { - async move { handle_event(event, state) }.boxed() + async move { handle_event(event, state, allow_extra_output) }.boxed() })); let filter = FilterEvent::default(); let builder = TaskBuilder::>::new("test_harness".to_string()) @@ -84,20 +88,30 @@ pub async fn run_harness( /// Handles an event for the Test Harness Task. If the event is expected, remove it from /// the `expected_output` in state. If unexpected fail test. /// +/// # Arguments +/// * `allow_extra_output` - whether to allow an extra output after we've seen all expected +/// outputs. Should be `false` in most cases. +/// /// # Panics /// Will panic to fail the test when it receives and unexpected event #[allow(clippy::needless_pass_by_value)] pub fn handle_event( event: HotShotEvent, mut state: TestHarnessState, + allow_extra_output: bool, ) -> ( std::option::Option, TestHarnessState, ) { - assert!( - state.expected_output.contains_key(&event), - "Got an unexpected event: {event:?}", - ); + // Check the output in either case: + // * We allow outputs only in our expected output set. + // * We haven't received all expected outputs yet. + if !allow_extra_output || !state.expected_output.is_empty() { + assert!( + state.expected_output.contains_key(&event), + "Got an unexpected event: {event:?}", + ); + } let num_expected = state.expected_output.get_mut(&event).unwrap(); if *num_expected == 1 { @@ -107,8 +121,6 @@ pub fn handle_event( } if state.expected_output.is_empty() { - // Sleep before the shutdown in case other tasks are still running. - sleep(Duration::from_millis(100)); return (Some(HotShotTaskCompleted::ShutDown), state); } diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index ca93ab956e..8ff918a143 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -127,7 +127,7 @@ async fn test_consensus_task() { add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) }; - run_harness(input, output, None, build_fn).await; + run_harness(input, output, None, build_fn, false).await; } #[cfg(test)] @@ -177,5 +177,5 @@ async fn test_consensus_vote() { add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) }; - run_harness(input, output, None, build_fn).await; + run_harness(input, output, None, build_fn, false).await; } diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index a9aed44656..6299308118 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -103,5 +103,5 @@ async fn test_da_task() { let build_fn = |task_runner, event_stream| add_da_task(task_runner, event_stream, handle); - run_harness(input, output, None, build_fn).await; + run_harness(input, output, None, build_fn, false).await; } diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 40bc8c6760..5fe96444a0 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -97,7 +97,9 @@ async fn test_network_task() { quorum_proposal.clone(), pub_key, )); - input.push(HotShotEvent::Shutdown); + // Don't send `Shutdown` as other task unit tests do, to avoid nondeterministic behaviors due + // to some tasks shut down earlier than the testing harness and we don't get all the expected + // events. output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 2); output.insert( @@ -110,7 +112,7 @@ async fn test_network_task() { ); output.insert( HotShotEvent::DAProposalSend(da_proposal.clone(), pub_key), - 2, // 2 occurrences: 1 from `input`, 1 from the DA task + 3, // 2 occurrences: 1 from `input`, 2 from the DA task ); output.insert( HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), @@ -128,7 +130,7 @@ async fn test_network_task() { output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 2); output.insert( HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), - 1, + 2, // 2 occurrences: both from the VID task ); output.insert( HotShotEvent::QuorumProposalRecv(quorum_proposal, pub_key), @@ -136,8 +138,11 @@ async fn test_network_task() { ); output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); - output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, _| async { task_runner }; - run_harness(input, output, Some(event_stream), build_fn).await; + // There may be extra outputs not in the expected set, e.g., a second `VidDisperseRecv` if the + // VID task runs fast. All event types we want to test should be seen by this point, so waiting + // for more events will not help us test more cases for now. Therefore, we set + // `allow_extra_output` to `true` for deterministic test result. + run_harness(input, output, Some(event_stream), build_fn, true).await; } diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 9852642048..bdfd595487 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -113,5 +113,5 @@ async fn test_vid_task() { let build_fn = |task_runner, event_stream| add_vid_task(task_runner, event_stream, handle); - run_harness(input, output, None, build_fn).await; + run_harness(input, output, None, build_fn, false).await; } diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 615707888d..abbeaa9727 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -58,5 +58,5 @@ async fn test_view_sync_task() { let build_fn = |task_runner, event_stream| add_view_sync_task(task_runner, event_stream, handle); - run_harness(input, output, None, build_fn).await; + run_harness(input, output, None, build_fn, false).await; } From f827de8c5b7af8d58aaeb3d627d27cc56ca0dd5d Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 18 Dec 2023 17:19:24 -0800 Subject: [PATCH 0566/1393] finished proposal creation and consensus state map update for manual view 2 --- hotshot/src/types/handle.rs | 33 ++++++++----- task-impls/src/consensus.rs | 13 +++-- testing/src/task_helpers.rs | 86 +++++++++++++++++++++------------ testing/tests/consensus_task.rs | 3 +- 4 files changed, 84 insertions(+), 51 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index e43e568dd8..ca0d907b1b 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -4,6 +4,7 @@ use crate::{traits::NodeImplementation, types::Event, SystemContext}; use async_compatibility_layer::channel::UnboundedStream; use async_lock::RwLock; use commit::Committable; +use ethereum_types::U256; use futures::Stream; use hotshot_task::{ boxed_sync, @@ -14,21 +15,18 @@ use hotshot_task::{ }; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::simple_vote::QuorumData; +use hotshot_types::traits::election::Membership; +use hotshot_types::traits::signature_key::SignatureKey; use hotshot_types::{ consensus::Consensus, error::HotShotError, event::EventType, message::{MessageKind, SequencingMessage}, - traits::{ - node_implementation::NodeType, state::ConsensusTime, storage::Storage, - }, + traits::{node_implementation::NodeType, state::ConsensusTime, storage::Storage}, }; use hotshot_types::{data::Leaf, simple_certificate::QuorumCertificate}; use std::sync::Arc; use tracing::error; -use hotshot_types::traits::election::Membership; -use ethereum_types::U256; -use hotshot_types::traits::signature_key::SignatureKey; /// Event streaming handle for a [`SystemContext`] instance running in the background /// @@ -201,22 +199,31 @@ impl + 'static> SystemContextHandl } // Below is for testing only: - + /// Gets the current membership of the [`HotShot`] instance #[cfg(feature = "hotshot-testing")] - pub fn get_committee_qc_stake_table(&self) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry>{ + pub fn get_committee_qc_stake_table( + &self, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.hotshot - .inner - .memberships - .quorum_membership.get_committee_qc_stake_table() + .inner + .memberships + .quorum_membership + .get_committee_qc_stake_table() } /// Gets the threshold of current membership of the [`HotShot`] instance #[cfg(feature = "hotshot-testing")] pub fn get_threshold(&self) -> U256 { - U256::from(self.hotshot.inner.memberships.quorum_membership.success_threshold().get()) + U256::from( + self.hotshot + .inner + .memberships + .quorum_membership + .success_threshold() + .get(), + ) } - /// Wrapper for `HotShotConsensusApi`'s `get_leader` function #[allow(clippy::unused_async)] // async for API compatibility reasons diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ea559b72ac..d4284a9402 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -162,6 +162,7 @@ impl, A: ConsensusApi + // Check if we are able to vote, like whether the proposal is valid, // whether we have DAC and VID share, and if so, vote. async fn vote_if_able(&self) -> bool { + error!("Step 1 inside vote_if_able()"); // Sishan TODO: remove these error message if !self.quorum_membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", @@ -169,6 +170,7 @@ impl, A: ConsensusApi + ); return false; } + error!("Step 2 inside vote_if_able()"); if let Some(proposal) = &self.current_proposal { // ED Need to account for the genesis DA cert // No need to check vid share nor da cert for genesis @@ -230,7 +232,7 @@ impl, A: ConsensusApi + return true; } } - + error!("Step 3 inside vote_if_able(), going to check vid share"); // Only vote if you has seen the VID share for this view if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { } else { @@ -318,7 +320,7 @@ impl, A: ConsensusApi + return true; } } - debug!( + error!( // Sishan TODO: change to debug level "Received VID share, but couldn't find DAC cert in certs, meaning we haven't received it yet for view {:?}", *proposal.get_view_number(), ); @@ -436,7 +438,11 @@ impl, A: ConsensusApi + warn!("Leader key does not match key in proposal"); return; } - error!("Step 3, proposal.data.justify_qc.get_view_number() = {:?}, view - 1 = {:?}", proposal.data.justify_qc.get_view_number(), view - 1); + error!( + "Step 3, proposal.data.justify_qc.get_view_number() = {:?}, view - 1 = {:?}", + proposal.data.justify_qc.get_view_number(), + view - 1 + ); // Verify a timeout certificate exists and is valid if proposal.data.justify_qc.get_view_number() != view - 1 { let Some(timeout_cert) = proposal.data.timeout_certificate.clone() else { @@ -483,7 +489,6 @@ impl, A: ConsensusApi + .cloned() }; - // // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { // If no parent then just update our state map and return. We will not vote. diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index cf8a87eb38..b43a85d457 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -30,13 +30,16 @@ use hotshot_types::{ vote::HasViewNumber, }; -use hotshot_types::vote::Certificate; +use async_std::sync::RwLockUpgradableReadGuard; +use bincode::Options; use bitvec::bitvec; -use hotshot_types::simple_vote::QuorumVote; use hotshot_types::simple_vote::QuorumData; +use hotshot_types::simple_vote::QuorumVote; +use hotshot_types::utils::View; +use hotshot_types::utils::ViewInner; +use hotshot_types::vote::Certificate; use hotshot_types::vote::Vote; use hotshot_utils::bincode::bincode_opts; -use bincode::Options; use tracing::error; pub async fn build_system_handle( @@ -112,15 +115,14 @@ pub async fn build_system_handle( } pub fn build_assembled_sig< - TYPES: NodeType, - VOTE: Vote, - CERT: Certificate - >( - leaf: Leaf, - handle: &SystemContextHandle, - view: TYPES::Time + TYPES: NodeType, + VOTE: Vote, + CERT: Certificate, +>( + leaf: Leaf, + handle: &SystemContextHandle, + view: TYPES::Time, ) -> ::QCType { - // Assemble QC let stake_table = handle.get_committee_qc_stake_table(); let real_qc_pp: ::QCParams = @@ -144,30 +146,30 @@ pub fn build_assembled_sig< &private_key, ); let original_signature: ::PureAssembledSignatureType = - bincode_opts() - .deserialize(&vote.get_signature().0) - .expect("Deserialization on the signature shouldn't be able to fail."); + bincode_opts() + .deserialize(&vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); sig_lists.push(original_signature); } let real_qc_sig = ::assemble( &real_qc_pp, signers.as_bitslice(), - &sig_lists[..], + &sig_lists[..], ); - real_qc_sig + real_qc_sig } pub fn build_qc< TYPES: NodeType, - VOTE: Vote>, - CERT: Certificate - >( - real_qc_sig: ::QCType, - leaf: Leaf, - view: TYPES::Time, - public_key: &TYPES::SignatureKey, + VOTE: Vote>, + CERT: Certificate, +>( + real_qc_sig: ::QCType, + leaf: Leaf, + view: TYPES::Time, + public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, ) -> CERT { let vote = QuorumVote::::create_signed_vote( @@ -193,8 +195,10 @@ async fn build_quorum_proposal_and_signature( public_key: &BLSPubKey, view: u64, ) -> (QuorumProposal, EncodedSignature) { - let consensus_lock = handle.get_consensus(); - let consensus = consensus_lock.read().await; + let temp_consensus = handle.get_consensus(); + let cur_consensus = temp_consensus.upgradable_read().await; + let mut consensus = RwLockUpgradableReadGuard::upgrade(cur_consensus).await; + let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; @@ -209,7 +213,6 @@ async fn build_quorum_proposal_and_signature( panic!("Failed to find high QC parent."); }; let parent_leaf = leaf.clone(); - error!("parent_leaf's view = {:?}", parent_leaf.view_number); // every event input is seen on the event stream in the output. let block = ::genesis(); @@ -235,13 +238,31 @@ async fn build_quorum_proposal_and_signature( }; if view == 2 { - let created_assembled_sig = - build_assembled_sig::, QuorumCertificate>(leaf.clone(), handle, ViewNumber::new(1)); - let created_qc = build_qc::, QuorumCertificate>(created_assembled_sig, leaf.clone(), ViewNumber::new(1), public_key, private_key); + consensus.state_map.insert( + ViewNumber::new(1), + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + }, + }, + ); + consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + let created_assembled_sig = build_assembled_sig::< + TestTypes, + QuorumVote, + QuorumCertificate, + >(leaf.clone(), handle, ViewNumber::new(1)); + let created_qc = build_qc::, QuorumCertificate>( + created_assembled_sig, + leaf.clone(), + ViewNumber::new(1), + public_key, + private_key, + ); let parent_leaf = leaf.clone(); let leaf_view2 = Leaf { view_number: ViewNumber::new(2), - justify_qc: created_qc.clone(), + justify_qc: created_qc.clone(), parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), block_payload: None, @@ -249,7 +270,8 @@ async fn build_quorum_proposal_and_signature( timestamp: 0, proposer_id: api.public_key().to_bytes(), }; - let signature_view2 = ::sign(private_key, leaf_view2.commit().as_ref()); + let signature_view2 = + ::sign(private_key, leaf_view2.commit().as_ref()); let proposal_view2 = QuorumProposal:: { block_header, view_number: ViewNumber::new(2), @@ -258,7 +280,7 @@ async fn build_quorum_proposal_and_signature( proposer_id: leaf_view2.proposer_id, }; error!("Have you really entered view 2?"); - return (proposal_view2, signature_view2) + return (proposal_view2, signature_view2); } (proposal, signature) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 4df643c3b9..4b557a2cc8 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -153,7 +153,6 @@ async fn test_consensus_with_vid_vote() { use hotshot_types::{ data::VidDisperse, message::Proposal, traits::node_implementation::NodeType, }; - use tracing::error; use std::marker::PhantomData; async_compatibility_layer::logging::setup_logging(); @@ -244,7 +243,7 @@ async fn test_consensus_with_vid_vote() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] +#[ignore] // Sishan TODO: delete this ignore later. async fn test_consensus_no_vote_without_vid_share() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; From 74135e0108b8c2b0f87254d903ad05ce19e74f21 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 18 Dec 2023 18:52:13 -0800 Subject: [PATCH 0567/1393] add test_consensus_no_vote_without_vid_share --- task-impls/src/consensus.rs | 3 +- testing/tests/consensus_task.rs | 69 +++++++++++++++++++++------------ 2 files changed, 47 insertions(+), 25 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index d4284a9402..13ef395cac 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -236,7 +236,8 @@ impl, A: ConsensusApi + // Only vote if you has seen the VID share for this view if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { } else { - debug!( + error!( + // Sishan TODO: change to debug level "We have not seen the VID share for this view {:?} yet, so we cannot vote.", proposal.view_number ); diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 4b557a2cc8..11bce6cfc4 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -144,7 +144,7 @@ async fn test_consensus_task() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_consensus_with_vid_vote() { +async fn test_consensus_with_vid() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::block_types::TestTransaction; use hotshot_testing::task_helpers::build_system_handle; @@ -196,9 +196,9 @@ async fn test_consensus_with_vid_vote() { // For the test of vote logic with vid, starting view 2 we need vid share input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - // Sishan TOOD: this proposal on view 2 doesn't have a valid justify QC let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; // Sishan TODO: Still need a valid DAC cert + // https://github.com/EspressoSystems/HotShot/issues/2255 input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader @@ -214,10 +214,13 @@ async fn test_consensus_with_vid_vote() { output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); // Sishan TODO: Uncomment this after the above TODO is done + // https://github.com/EspressoSystems/HotShot/issues/2255 // if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { // output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); // } + // Sishan TODO: track the logging message "Received VID share, but couldn't find DAC cert in certs xxx", + output.insert( HotShotEvent::ViewChange(ViewNumber::new(1)), 2, // 2 occurrences: 1 from `QuorumProposalRecv`, 1 from input @@ -243,55 +246,73 @@ async fn test_consensus_with_vid_vote() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] // Sishan TODO: delete this ignore later. +#[allow(clippy::should_panic_without_expect)] +#[should_panic] async fn test_consensus_no_vote_without_vid_share() { use hotshot_task_impls::harness::run_harness; + use hotshot_testing::block_types::TestTransaction; use hotshot_testing::task_helpers::build_system_handle; + use hotshot_testing::task_helpers::vid_init; + use hotshot_types::data::VidSchemeTrait; + use hotshot_types::{ + data::VidDisperse, message::Proposal, traits::node_implementation::NodeType, + }; + use std::marker::PhantomData; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let (handle, _event_stream) = build_system_handle(2).await; // We assign node's key pair rather than read from config file since it's a test - let (private_key_view1, public_key_view1) = key_pair_for_id(1); // In view 2, node 2 is the leader. let (private_key_view2, public_key_view2) = key_pair_for_id(2); + // For the test of vote logic with vid + let api: HotShotConsensusApi = HotShotConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let pub_key = *api.public_key(); + let vid = vid_init(); + let transactions = vec![TestTransaction(vec![0])]; + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); + let payload_commitment = vid_disperse.commit; + + let signature = + ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()); + let vid_disperse = VidDisperse { + view_number: ViewNumber::new(2), + payload_commitment, + shares: vid_disperse.shares, + common: vid_disperse.common, + }; + let vid_proposal: Proposal> = Proposal { + data: vid_disperse.clone(), + signature, + _pd: PhantomData, + }; + let mut input = Vec::new(); let mut output = HashMap::new(); - let proposal_view1 = build_quorum_proposal(&handle, &private_key_view1, 1).await; // Do a view change, so that it's not the genesis view, and vid vote is needed input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(HotShotEvent::QuorumProposalRecv( - proposal_view1.clone(), - public_key_view1, - )); - - output.insert( - HotShotEvent::QuorumProposalRecv(proposal_view1.clone(), public_key_view1), - 1, - ); - - // Since it's genesis view, node can vote without dac and vid share - if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view1.data).await { - output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); - input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); - output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); - } + // For the test of vote logic with vid, starting view 2 we need vid share input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; + + // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader input.push(HotShotEvent::QuorumProposalRecv( proposal_view2.clone(), public_key_view2, )); - // Without vid share, there is no HotShotEvent::QuorumVoteSend in the output. output.insert( HotShotEvent::QuorumProposalRecv(proposal_view2.clone(), public_key_view2), 1, ); + output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); output.insert( HotShotEvent::ViewChange(ViewNumber::new(1)), @@ -301,7 +322,7 @@ async fn test_consensus_no_vote_without_vid_share() { HotShotEvent::ViewChange(ViewNumber::new(2)), 2, // 2 occurrences: 1 from `QuorumProposalRecv`?, 1 from input ); - + // Sishan TODO: would better track the log ""We have not seen the VID share for this view ..." input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); From 251b56f30c911cf16c5c11ccfc04ae536937430f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 19 Dec 2023 09:51:39 -0500 Subject: [PATCH 0568/1393] attempt to dedup events --- hotshot/src/tasks/mod.rs | 2 +- task-impls/src/network.rs | 8 +- task-impls/src/view_sync.rs | 240 +++++++++++++----------------------- 3 files changed, 92 insertions(+), 158 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b47dfa99a2..c66eb7b784 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -475,7 +475,7 @@ pub async fn add_view_sync_task>( private_key: api.private_key().clone(), api, num_timeouts_tracked: 0, - replica_task_map: HashMap::default(), + replica_task_map: HashMap::default().into(), relay_task_map: HashMap::default(), view_sync_timeout: Duration::new(10, 0), id: handle.hotshot.inner.id, diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index cdef8af4d2..ec326ef54d 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -293,10 +293,10 @@ impl> TransmitType::Broadcast => self.channel.broadcast_message(message, membership).await, }; - match transmit_result { - Ok(()) => {} - Err(e) => error!("Failed to send message from network task: {:?}", e), - } + // match transmit_result { + // Ok(()) => {} + // Err(e) => error!("Failed to send message from network task: {:?}", e), + // } None } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 8e893c552e..bab0c93916 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -5,11 +5,11 @@ use crate::{ vote::{spawn_vote_accumulator, AccumulatorInfo}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use futures::FutureExt; +use async_lock::RwLock; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, - task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEvent, TaskBuilder}, + task::{HotShotTaskCompleted, TS}, + task_impls::HSTWithEvent, }; use hotshot_types::{simple_vote::ViewSyncFinalizeData, traits::signature_key::SignatureKey}; use hotshot_types::{ @@ -94,7 +94,7 @@ pub struct ViewSyncTaskState< pub num_timeouts_tracked: u64, /// Map of running replica tasks - pub replica_task_map: HashMap, + pub replica_task_map: RwLock>>, /// Map of running relay tasks pub relay_task_map: HashMap<(TYPES::Time, ViewSyncPhase), ViewSyncTaskInfo>, @@ -181,87 +181,86 @@ impl< #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task - pub async fn handle_event(&mut self, event: HotShotEvent) { - match &event { - HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { - info!("Received view sync cert for phase {:?}", certificate); - - // This certificate is old, we can throw it away - // If next view = cert round, then that means we should already have a task running for it - if self.current_view > certificate.get_view_number() { - debug!("Already in a higher view than the view sync message"); - return; - } - - if let Some(replica_task) = - self.replica_task_map.get(&certificate.get_view_number()) - { - // Forward event then return - debug!("Forwarding message"); - self.event_stream - .direct_message(replica_task.event_stream_id, event) - .await; - return; - } - - // We do not have a replica task already running, so start one - let mut replica_state: ViewSyncReplicaTaskState = - ViewSyncReplicaTaskState { - current_view: certificate.get_view_number(), - next_view: certificate.get_view_number(), - relay: 0, - finalized: false, - sent_view_change_event: false, - timeout_task: None, - membership: self.membership.clone(), - network: self.network.clone(), - public_key: self.public_key.clone(), - private_key: self.private_key.clone(), - api: self.api.clone(), - event_stream: self.event_stream.clone(), - view_sync_timeout: self.view_sync_timeout, - id: self.id, - }; + pub async fn send_to_or_create_replica( + &mut self, + event: HotShotEvent, + view: TYPES::Time, + ) { + // This certificate is old, we can throw it away + // If next view = cert round, then that means we should already have a task running for it + let mut task_map = self.replica_task_map.write().await; + if self.current_view > view { + error!("Already in a higher view than the view sync message"); + return; + } - let result = replica_state.handle_event(event.clone()).await; + if let Some(replica_task) = task_map.remove(&view) { + // Forward event then return + error!("Forwarding message"); + let result = replica_task.handle_event(event.clone()).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { - // The protocol has finished - return; - } + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } - replica_state = result.1; + task_map.insert(view, result.1); + return; + } - let name = format!( - "View Sync Replica Task: Attempting to enter view {:?} from view {:?}", - self.next_view, self.current_view - ); + // We do not have a replica task already running, so start one + let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { + current_view: view, + next_view: view, + relay: 0, + finalized: false, + sent_view_change_event: false, + timeout_task: None, + membership: self.membership.clone(), + network: self.network.clone(), + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), + api: self.api.clone(), + event_stream: self.event_stream.clone(), + view_sync_timeout: self.view_sync_timeout, + id: self.id, + }; + + let result = replica_state.handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } - let replica_handle_event = HandleEvent(Arc::new( - move |event, state: ViewSyncReplicaTaskState| { - async move { state.handle_event(event).await }.boxed() - }, - )); - - let filter = FilterEvent::default(); - let builder = TaskBuilder::>::new(name) - .register_event_stream(replica_state.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(replica_state) - .register_event_handler(replica_handle_event); - - let event_stream_id = builder.get_stream_id().unwrap(); - - self.replica_task_map.insert( - certificate.get_view_number(), - ViewSyncTaskInfo { event_stream_id }, - ); + replica_state = result.1; - let _view_sync_replica_task = async_spawn(async move { - ViewSyncReplicaTaskStateTypes::build(builder).launch().await - }); + task_map.insert(view, replica_state); + } + #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] + #[allow(clippy::type_complexity)] + /// Handles incoming events for the main view sync task + pub async fn handle_event(&mut self, event: HotShotEvent) { + match &event { + HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { + error!("Received view sync cert for phase {:?}", certificate); + let view = certificate.get_view_number(); + self.send_to_or_create_replica(event, view).await; + } + HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { + error!("Received view sync cert for phase {:?}", certificate); + let view = certificate.get_view_number(); + self.send_to_or_create_replica(event, view).await; + } + HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + error!("Received view sync cert for phase {:?}", certificate); + let view = certificate.get_view_number(); + self.send_to_or_create_replica(event, view).await; + } + HotShotEvent::ViewSyncTimeout(view, _, _) => { + error!("view sync timeout in main task {:?}", view); + let view = *view; + self.send_to_or_create_replica(event, view).await; } HotShotEvent::ViewSyncPreCommitVoteRecv(ref vote) => { @@ -404,16 +403,10 @@ impl< // run GC for i in *self.last_garbage_collected_view..*self.current_view { - if let Some((_key, replica_task_info)) = - self.replica_task_map.remove_entry(&TYPES::Time::new(i)) - { - self.event_stream - .direct_message( - replica_task_info.event_stream_id, - HotShotEvent::Shutdown, - ) - .await; - } + self.replica_task_map + .write() + .await + .remove_entry(&TYPES::Time::new(i)); if let Some((_key, relay_task_info)) = self .relay_task_map .remove_entry(&(TYPES::Time::new(i), ViewSyncPhase::PreCommit)) @@ -518,67 +511,8 @@ impl< self.network .inject_consensus_info(ConsensusIntentEvent::PollForDAC(subscribe_view)) .await; - - let mut replica_state = ViewSyncReplicaTaskState { - current_view: self.current_view, - next_view: TYPES::Time::new(next_view), - relay: 0, - finalized: false, - sent_view_change_event: false, - timeout_task: None, - membership: self.membership.clone(), - network: self.network.clone(), - public_key: self.public_key.clone(), - private_key: self.private_key.clone(), - api: self.api.clone(), - event_stream: self.event_stream.clone(), - view_sync_timeout: self.view_sync_timeout, - id: self.id, - }; - - let result = replica_state - .handle_event(HotShotEvent::ViewSyncTrigger(view_number + 1)) + self.send_to_or_create_replica(event, TYPES::Time::new(next_view)) .await; - - if result.0 == Some(HotShotTaskCompleted::ShutDown) { - // The protocol has finished - return; - } - - replica_state = result.1; - - let name = format!( - "View Sync Replica Task: Attempting to enter view {:?} from view {:?}", - *view_number + 1, - *view_number - ); - - let replica_handle_event = HandleEvent(Arc::new( - move |event, state: ViewSyncReplicaTaskState| { - async move { state.handle_event(event).await }.boxed() - }, - )); - - let filter = FilterEvent::default(); - let builder = - TaskBuilder::>::new(name) - .register_event_stream(replica_state.event_stream.clone(), filter) - .await - .register_registry(&mut self.registry.clone()) - .await - .register_state(replica_state) - .register_event_handler(replica_handle_event); - - let event_stream_id = builder.get_stream_id().unwrap(); - - self.replica_task_map.insert( - TYPES::Time::new(*view_number + 1), - ViewSyncTaskInfo { event_stream_id }, - ); - - let _view_sync_replica_task = async_spawn(async move { - ViewSyncReplicaTaskStateTypes::build(builder).launch().await - }); } else { // If this is the first timeout we've seen advance to the next view self.current_view = view_number; @@ -678,7 +612,7 @@ impl, A: ConsensusApi + let phase = last_seen_certificate; async move { async_sleep(self.view_sync_timeout).await; - info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", self.relay); + error!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", self.relay); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -751,7 +685,7 @@ impl, A: ConsensusApi + let phase = last_seen_certificate; async move { async_sleep(self.view_sync_timeout).await; - info!( + error!( "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", self.relay ); @@ -849,7 +783,7 @@ impl, A: ConsensusApi + let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; - info!("Vote sending timed out in ViewSyncTrigger"); + error!("Vote sending timed out in ViewSyncTrigger"); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -904,7 +838,7 @@ impl, A: ConsensusApi + let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; - info!( + error!( "Vote sending timed out in ViewSyncTimeout relay = {}", self.relay ); From f7c15c8d796f130fbf98370d2c57598a10fbb0f9 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 19 Dec 2023 11:10:05 -0500 Subject: [PATCH 0569/1393] view sync working --- task-impls/src/view_sync.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index bab0c93916..2fe1e79669 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -190,13 +190,13 @@ impl< // If next view = cert round, then that means we should already have a task running for it let mut task_map = self.replica_task_map.write().await; if self.current_view > view { - error!("Already in a higher view than the view sync message"); + debug!("Already in a higher view than the view sync message"); return; } if let Some(replica_task) = task_map.remove(&view) { // Forward event then return - error!("Forwarding message"); + debug!("Forwarding message"); let result = replica_task.handle_event(event.clone()).await; if result.0 == Some(HotShotTaskCompleted::ShutDown) { @@ -243,22 +243,22 @@ impl< pub async fn handle_event(&mut self, event: HotShotEvent) { match &event { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { - error!("Received view sync cert for phase {:?}", certificate); + debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.get_view_number(); self.send_to_or_create_replica(event, view).await; } HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { - error!("Received view sync cert for phase {:?}", certificate); + debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.get_view_number(); self.send_to_or_create_replica(event, view).await; } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { - error!("Received view sync cert for phase {:?}", certificate); + debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.get_view_number(); self.send_to_or_create_replica(event, view).await; } HotShotEvent::ViewSyncTimeout(view, _, _) => { - error!("view sync timeout in main task {:?}", view); + debug!("view sync timeout in main task {:?}", view); let view = *view; self.send_to_or_create_replica(event, view).await; } @@ -511,7 +511,7 @@ impl< self.network .inject_consensus_info(ConsensusIntentEvent::PollForDAC(subscribe_view)) .await; - self.send_to_or_create_replica(event, TYPES::Time::new(next_view)) + self.send_to_or_create_replica(HotShotEvent::ViewSyncTrigger(view_number + 1), view_number + 1) .await; } else { // If this is the first timeout we've seen advance to the next view From b36ce3a9e0d26383cdadc71282e859ab4198bd50 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 19 Dec 2023 11:42:43 -0500 Subject: [PATCH 0570/1393] [Stability] Disallow old votes (#2252) * don't allow old votes * lint --- web_server/src/lib.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 0ca7575c81..3babb30670 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -386,6 +386,15 @@ impl WebServerDataSource for WebServerState { self.oldest_vote += 1; } } + + // don't accept the vote if it is too old + if self.oldest_vote > view_number { + return Err(ServerError { + status: StatusCode::Gone, + message: "Posted vote is too old".to_string(), + }); + } + let next_index = self.vote_index.entry(view_number).or_insert(0); self.votes .entry(view_number) @@ -406,6 +415,15 @@ impl WebServerDataSource for WebServerState { self.oldest_vid_vote += 1; } } + + // don't accept the vote if it is too old + if self.oldest_vid_vote > view_number { + return Err(ServerError { + status: StatusCode::Gone, + message: "Posted vid vote is too old".to_string(), + }); + } + let next_index = self.vid_vote_index.entry(view_number).or_insert(0); self.vid_votes .entry(view_number) @@ -428,6 +446,15 @@ impl WebServerDataSource for WebServerState { self.oldest_view_sync_vote += 1; } } + + // don't accept the vote if it is too old + if self.oldest_view_sync_vote > view_number { + return Err(ServerError { + status: StatusCode::Gone, + message: "Posted view sync vote is too old".to_string(), + }); + } + let next_index = self.view_sync_vote_index.entry(view_number).or_insert(0); self.view_sync_votes .entry(view_number) From d27a413986389015bacc5bca26191a5ae14e3377 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 19 Dec 2023 09:13:53 -0800 Subject: [PATCH 0571/1393] a little cleanup on comment --- testing/src/task_helpers.rs | 1 - testing/tests/consensus_task.rs | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index b43a85d457..5278bb3b86 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -279,7 +279,6 @@ async fn build_quorum_proposal_and_signature( timeout_certificate: None, proposer_id: leaf_view2.proposer_id, }; - error!("Have you really entered view 2?"); return (proposal_view2, signature_view2); } diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 11bce6cfc4..a0598b2f6b 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -197,7 +197,7 @@ async fn test_consensus_with_vid() { // For the test of vote logic with vid, starting view 2 we need vid share input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; - // Sishan TODO: Still need a valid DAC cert + // TODO: Still need a valid DAC cert, now tracking logging instead // https://github.com/EspressoSystems/HotShot/issues/2255 input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); @@ -213,7 +213,7 @@ async fn test_consensus_with_vid() { ); output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); - // Sishan TODO: Uncomment this after the above TODO is done + // TODO: Uncomment this after the above TODO is done // https://github.com/EspressoSystems/HotShot/issues/2255 // if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { // output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); From 589e4c7c3363f4a18bf2647308460b31e8490bc1 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 19 Dec 2023 12:49:40 -0500 Subject: [PATCH 0572/1393] [Stability] Quick fix for Libp2p CI (#2261) * increase num failed view cutoff * increase round start delay --- testing/tests/libp2p.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index 03ff96fbe3..5ed8fcd315 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -4,7 +4,7 @@ use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, node_types::{Libp2pImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, - test_builder::TestMetadata, + test_builder::{TestMetadata, TimingData}, }; use tracing::instrument; @@ -28,6 +28,10 @@ async fn libp2p_network() { duration: Duration::new(240, 0), }, ), + timing_data: TimingData { + round_start_delay: 100, + ..Default::default() + }, ..TestMetadata::default_multiple_rounds() }; From 8b9e9a07c9e1b206a4f3d1f2475bbe7835eacaee Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 19 Dec 2023 10:26:54 -0800 Subject: [PATCH 0573/1393] comment clean up --- task-impls/src/consensus.rs | 28 ++++++++++++---------------- testing/tests/consensus_task.rs | 2 +- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 13ef395cac..f850531f33 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -162,7 +162,7 @@ impl, A: ConsensusApi + // Check if we are able to vote, like whether the proposal is valid, // whether we have DAC and VID share, and if so, vote. async fn vote_if_able(&self) -> bool { - error!("Step 1 inside vote_if_able()"); // Sishan TODO: remove these error message + if !self.quorum_membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", @@ -170,7 +170,7 @@ impl, A: ConsensusApi + ); return false; } - error!("Step 2 inside vote_if_able()"); + if let Some(proposal) = &self.current_proposal { // ED Need to account for the genesis DA cert // No need to check vid share nor da cert for genesis @@ -232,7 +232,7 @@ impl, A: ConsensusApi + return true; } } - error!("Step 3 inside vote_if_able(), going to check vid share"); + // Only vote if you has seen the VID share for this view if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { } else { @@ -427,23 +427,19 @@ impl, A: ConsensusApi + *proposal.data.view_number, )) .await; - error!("Step 1"); // Sishan TODO: remove these error message + let view = proposal.data.get_view_number(); if view < self.cur_view { debug!("Proposal is from an older view {:?}", proposal.data.clone()); return; } - error!("Step 2"); + let view_leader_key = self.quorum_membership.get_leader(view); if view_leader_key != sender { warn!("Leader key does not match key in proposal"); return; } - error!( - "Step 3, proposal.data.justify_qc.get_view_number() = {:?}, view - 1 = {:?}", - proposal.data.justify_qc.get_view_number(), - view - 1 - ); + // Verify a timeout certificate exists and is valid if proposal.data.justify_qc.get_view_number() != view - 1 { let Some(timeout_cert) = proposal.data.timeout_certificate.clone() else { @@ -463,7 +459,7 @@ impl, A: ConsensusApi + return; } } - error!("Step 4"); + let justify_qc = proposal.data.justify_qc.clone(); if !justify_qc.is_valid_cert(self.quorum_membership.as_ref()) { @@ -472,14 +468,14 @@ impl, A: ConsensusApi + consensus.metrics.invalid_qc.update(1); return; } - error!("Step 5"); + // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here self.update_view(view).await; self.current_proposal = Some(proposal.data.clone()); let consensus = self.consensus.upgradable_read().await; - error!("Step 6"); + // Construct the leaf. let parent = if justify_qc.is_genesis { self.genesis_leaf().await @@ -568,7 +564,7 @@ impl, A: ConsensusApi + error!("Failed safety check and liveness check"); return; } - error!("Step 7"); + let high_qc = leaf.justify_qc.clone(); let mut new_anchor_view = consensus.last_decided_view; let mut new_locked_view = consensus.locked_view; @@ -723,7 +719,7 @@ impl, A: ConsensusApi + == self.current_proposal.clone().unwrap().view_number; // todo get rid of this clone let qc = consensus.high_qc.clone(); - error!("Step 8"); + drop(consensus); if should_propose { debug!( @@ -733,7 +729,7 @@ impl, A: ConsensusApi + self.publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) .await; } - error!("Step 9, plan to go to vote_if_able()"); + if !self.vote_if_able().await { return; } diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index a0598b2f6b..a6ded3a4c4 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -322,7 +322,7 @@ async fn test_consensus_no_vote_without_vid_share() { HotShotEvent::ViewChange(ViewNumber::new(2)), 2, // 2 occurrences: 1 from `QuorumProposalRecv`?, 1 from input ); - // Sishan TODO: would better track the log ""We have not seen the VID share for this view ..." + // Sishan TODO: would better track the log "We have not seen the VID share for this view ..." input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); From 91ad806bfc75878f1d0b6bf90de6379f481bbe80 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 19 Dec 2023 10:43:44 -0800 Subject: [PATCH 0574/1393] cleanup --- task-impls/src/consensus.rs | 1 - testing/src/task_helpers.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f850531f33..fa4fba54ab 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -162,7 +162,6 @@ impl, A: ConsensusApi + // Check if we are able to vote, like whether the proposal is valid, // whether we have DAC and VID share, and if so, vote. async fn vote_if_able(&self) -> bool { - if !self.quorum_membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 5278bb3b86..a92a313575 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -40,7 +40,6 @@ use hotshot_types::utils::ViewInner; use hotshot_types::vote::Certificate; use hotshot_types::vote::Vote; use hotshot_utils::bincode::bincode_opts; -use tracing::error; pub async fn build_system_handle( node_id: u64, From 0d76f01c87ad2d18f8e78ec516a38d0748f9686d Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Tue, 19 Dec 2023 19:53:59 +0100 Subject: [PATCH 0575/1393] Don't ignore webserver task completing, which it shouldn't (#2260) --- .../src/traits/networking/web_server_network.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 859085b8c6..92745c8500 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -1257,10 +1257,17 @@ impl TestableNetworkingImplementation for WebServerNetwo let url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); info!("Launching web server on port {port}"); // Start web server - async_spawn(hotshot_web_server::run_web_server::( - Some(server_shutdown), - url, - )); + async_spawn(async { + match hotshot_web_server::run_web_server::( + Some(server_shutdown), + url, + ) + .await + { + Ok(()) => error!("Web server future finished unexpectedly"), + Err(e) => error!("Web server task failed: {e}"), + } + }); // We assign known_nodes' public key and stake value rather than read from config file since it's a test let known_nodes = (0..expected_node_count as u64) From 2d39db9c9f53ab3c20190efe299d809b31d9015a Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 19 Dec 2023 16:25:41 -0800 Subject: [PATCH 0576/1393] solve conflicts --- task-impls/src/consensus.rs | 8 ++++-- testing/tests/consensus_task.rs | 51 +++++++++++++++++++-------------- 2 files changed, 34 insertions(+), 25 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index bb55695fde..cc667b26e3 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -236,9 +236,10 @@ impl, A: ConsensusApi + // TODO: re-enable this when HotShot/the sequencer needs the shares for something // issue: https://github.com/EspressoSystems/HotShot/issues/2236 // Only vote if you has seen the VID share for this view + // Sishan TODO: comment this for now if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { } else { - debug!( + error!( // Sishan TODO: change to debug level "We have not seen the VID share for this view {:?} yet, so we cannot vote.", proposal.view_number ); @@ -323,7 +324,7 @@ impl, A: ConsensusApi + } } error!( // Sishan TODO: change to debug level - "Received VID share, but couldn't find DAC cert in certs, meaning we haven't received it yet for view {:?}", + "Received VID share, but couldn't find DAC cert for view {:?}", *proposal.get_view_number(), ); return false; @@ -962,7 +963,8 @@ impl, A: ConsensusApi + // Add to the storage that we have received the VID disperse for a specific view // TODO: re-enable this when HotShot/the sequencer needs the shares for something // issue: https://github.com/EspressoSystems/HotShot/issues/2236 - // self.vid_shares.insert(view, disperse); + // Sishan: comment this for now + self.vid_shares.insert(view, disperse); } HotShotEvent::ViewChange(new_view) => { debug!("View Change event for view {} in consensus task", *new_view); diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index c7209d9249..d7f2acaceb 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -168,23 +168,26 @@ async fn test_consensus_with_vid() { inner: handle.hotshot.inner.clone(), }; let pub_key = *api.public_key(); - let vid = vid_init(); + let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); + let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(2)); let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; - let signature = + let vid_signature = ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()); - let vid_disperse = VidDisperse { - view_number: ViewNumber::new(2), - payload_commitment, - shares: vid_disperse.shares, - common: vid_disperse.common, - }; - let vid_proposal: Proposal> = Proposal { - data: vid_disperse.clone(), - signature, + let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); + let vid_disperse_inner = VidDisperse::from_membership( + ViewNumber::new(2), + vid_disperse, + &quorum_membership.clone().into(), + ); + // TODO for now reuse the same block payload commitment and signature as DA committee + // https://github.com/EspressoSystems/jellyfish/issues/369 + let vid_proposal = Proposal { + data: vid_disperse_inner.clone(), + signature: vid_signature, _pd: PhantomData, }; @@ -272,26 +275,30 @@ async fn test_consensus_no_vote_without_vid_share() { inner: handle.hotshot.inner.clone(), }; let pub_key = *api.public_key(); - let vid = vid_init(); + let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); + let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(2)); let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; - let signature = + let vid_signature = ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()); - let vid_disperse = VidDisperse { - view_number: ViewNumber::new(2), - payload_commitment, - shares: vid_disperse.shares, - common: vid_disperse.common, - }; - let vid_proposal: Proposal> = Proposal { - data: vid_disperse.clone(), - signature, + let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); + let vid_disperse_inner = VidDisperse::from_membership( + ViewNumber::new(2), + vid_disperse, + &quorum_membership.clone().into(), + ); + // TODO for now reuse the same block payload commitment and signature as DA committee + // https://github.com/EspressoSystems/jellyfish/issues/369 + let vid_proposal = Proposal { + data: vid_disperse_inner.clone(), + signature: vid_signature, _pd: PhantomData, }; + let mut input = Vec::new(); let mut output = HashMap::new(); From ed04378f79b46cfb114170c289b9d579beac06e2 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 19 Dec 2023 16:28:53 -0800 Subject: [PATCH 0577/1393] lint --- task-impls/src/consensus.rs | 6 ++++-- testing/tests/consensus_task.rs | 1 - 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index cc667b26e3..657c745be9 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -239,7 +239,8 @@ impl, A: ConsensusApi + // Sishan TODO: comment this for now if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { } else { - error!( // Sishan TODO: change to debug level + error!( + // Sishan TODO: change to debug level "We have not seen the VID share for this view {:?} yet, so we cannot vote.", proposal.view_number ); @@ -323,7 +324,8 @@ impl, A: ConsensusApi + return true; } } - error!( // Sishan TODO: change to debug level + error!( + // Sishan TODO: change to debug level "Received VID share, but couldn't find DAC cert for view {:?}", *proposal.get_view_number(), ); diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index d7f2acaceb..daf9ae6790 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -298,7 +298,6 @@ async fn test_consensus_no_vote_without_vid_share() { _pd: PhantomData, }; - let mut input = Vec::new(); let mut output = HashMap::new(); From 0329614eae846a52730d36b45442afaf7dd1eba1 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 19 Dec 2023 16:36:10 -0800 Subject: [PATCH 0578/1393] ci tokio lint --- testing/Cargo.toml | 1 + testing/src/task_helpers.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/testing/Cargo.toml b/testing/Cargo.toml index a45ee2b25e..1651d88985 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -46,6 +46,7 @@ serde = { workspace = true } ethereum-types = { workspace = true } bitvec = { workspace = true } sha2 = { workspace = true } +async-lock = { workspace = true } [dev-dependencies] async-lock = { workspace = true } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 3195b9e305..3201b779b1 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -30,7 +30,7 @@ use hotshot_types::{ vote::HasViewNumber, }; -use async_std::sync::RwLockUpgradableReadGuard; +use async_lock::RwLockUpgradableReadGuard; use bincode::Options; use bitvec::bitvec; use hotshot_types::simple_vote::QuorumData; From e65a90e2ae8a63de5e1290dc81a3574d8a6b3c20 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 19 Dec 2023 22:12:24 -0800 Subject: [PATCH 0579/1393] move test_consensus_vote --- testing/tests/consensus_task.rs | 99 +++++++++++++++++---------------- 1 file changed, 50 insertions(+), 49 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index daf9ae6790..707a8a2f92 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -138,6 +138,56 @@ async fn test_consensus_task() { run_harness(input, output, None, build_fn).await; } +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_consensus_vote() { + use hotshot_task_impls::harness::run_harness; + use hotshot_testing::task_helpers::build_system_handle; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + // We assign node's key pair rather than read from config file since it's a test + let (private_key, public_key) = key_pair_for_id(1); + + let mut input = Vec::new(); + let mut output = HashMap::new(); + + let proposal = build_quorum_proposal(&handle, &private_key, 1).await; + + // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader + input.push(HotShotEvent::QuorumProposalRecv( + proposal.clone(), + public_key, + )); + output.insert( + HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), + 1, + ); + let proposal = proposal.data; + if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal).await { + output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); + input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); + output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); + } + + output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); + + input.push(HotShotEvent::Shutdown); + output.insert(HotShotEvent::Shutdown, 1); + + let build_fn = |task_runner, event_stream| { + add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) + }; + + run_harness(input, output, None, build_fn).await; +} + #[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", @@ -339,52 +389,3 @@ async fn test_consensus_no_vote_without_vid_share() { run_harness(input, output, None, build_fn).await; } -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_consensus_vote() { - use hotshot_task_impls::harness::run_harness; - use hotshot_testing::task_helpers::build_system_handle; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle(2).await.0; - // We assign node's key pair rather than read from config file since it's a test - let (private_key, public_key) = key_pair_for_id(1); - - let mut input = Vec::new(); - let mut output = HashMap::new(); - - let proposal = build_quorum_proposal(&handle, &private_key, 1).await; - - // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader - input.push(HotShotEvent::QuorumProposalRecv( - proposal.clone(), - public_key, - )); - output.insert( - HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), - 1, - ); - let proposal = proposal.data; - if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal).await { - output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); - input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); - output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); - } - - output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); - - input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::Shutdown, 1); - - let build_fn = |task_runner, event_stream| { - add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) - }; - - run_harness(input, output, None, build_fn).await; -} From 0c91a9e38d998eefb39fb09b4ad81bf83d2162ca Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 19 Dec 2023 22:13:57 -0800 Subject: [PATCH 0580/1393] lint --- testing/tests/consensus_task.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 707a8a2f92..d87adf1613 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -388,4 +388,3 @@ async fn test_consensus_no_vote_without_vid_share() { run_harness(input, output, None, build_fn).await; } - From 018b57a480ebbe92803cce479dfce3e2cd5f1634 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 20 Dec 2023 12:59:08 -0500 Subject: [PATCH 0581/1393] [Stability] Fix DA cert leak (#2268) * fix DA cert mem leak * change to hashmap --- hotshot/src/lib.rs | 1 + hotshot/src/tasks/mod.rs | 1 - task-impls/src/consensus.rs | 30 +++++++++++++----------------- types/src/consensus.rs | 8 +++++++- 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 89e1a24123..ad2795ab95 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -245,6 +245,7 @@ impl> SystemContext { last_decided_view: anchored_leaf.get_view_number(), saved_leaves, saved_payloads, + saved_da_certs: HashMap::new(), // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 locked_view: anchored_leaf.get_view_number(), diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b47dfa99a2..9bf6200255 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -230,7 +230,6 @@ pub async fn add_consensus_task>( timeout_task: None, event_stream: event_stream.clone(), output_event_stream: output_stream, - da_certs: HashMap::new(), vid_shares: HashMap::new(), current_proposal: None, id: handle.hotshot.inner.id, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index b4ee7cb74d..1e7d6564fb 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -21,7 +21,7 @@ use hotshot_types::{ data::{Leaf, QuorumProposal, VidCommitment, VidDisperse}, event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, - simple_certificate::{DACertificate, QuorumCertificate, TimeoutCertificate}, + simple_certificate::{QuorumCertificate, TimeoutCertificate}, simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, traits::{ block_contents::BlockHeader, @@ -114,9 +114,6 @@ pub struct ConsensusTaskState< /// Event stream to publish events to the application layer pub output_event_stream: ChannelStream>, - /// All the DA certs we've received for current and future views. - pub da_certs: HashMap>, - /// All the VID shares we've received for current and future views. /// In the future we will need a different struct similar to VidDisperse except /// it stores only one share. @@ -246,7 +243,13 @@ impl, A: ConsensusApi + // Only vote if you have the DA cert // ED Need to update the view number this is stored under? - if let Some(cert) = self.da_certs.get(&(proposal.get_view_number())) { + if let Some(cert) = self + .consensus + .read() + .await + .saved_da_certs + .get(&(proposal.get_view_number())) + { let view = cert.view_number; // TODO: do some of this logic without the vote token check, only do that when voting. let justify_qc = proposal.justify_qc.clone(); @@ -347,12 +350,6 @@ impl, A: ConsensusApi + if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; } - - // Remove old certs, we won't vote on past views - for view in *self.cur_view..*new_view - 1 { - let v = TYPES::Time::new(view); - self.da_certs.remove(&v); - } self.cur_view = new_view; // Poll the future leader for lookahead @@ -739,11 +736,6 @@ impl, A: ConsensusApi + return; } self.current_proposal = None; - - for v in (*self.cur_view)..=(*view) { - let time = TYPES::Time::new(v); - self.da_certs.remove(&time); - } } HotShotEvent::QuorumVoteRecv(ref vote) => { debug!("Received quroum vote: {:?}", vote.get_view_number()); @@ -910,7 +902,11 @@ impl, A: ConsensusApi + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) .await; - self.da_certs.insert(view, cert); + self.consensus + .write() + .await + .saved_da_certs + .insert(view, cert); if self.vote_if_able().await { self.current_proposal = None; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index cc98c48d51..e86939804e 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -9,7 +9,7 @@ use displaydoc::Display; use crate::{ data::{Leaf, VidCommitment}, error::HotShotError, - simple_certificate::QuorumCertificate, + simple_certificate::{DACertificate, QuorumCertificate}, traits::{ metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}, node_implementation::NodeType, @@ -36,6 +36,10 @@ pub struct Consensus { // TODO(https://github.com/EspressoSystems/hotshot/issues/153): Allow this to be loaded from `Storage`? pub state_map: BTreeMap>, + /// All the DA certs we've received for current and future views. + /// view -> DA cert + pub saved_da_certs: HashMap>, + /// cur_view from pseudocode pub cur_view: TYPES::Time, @@ -310,6 +314,8 @@ impl Consensus { ); } // perform gc + self.saved_da_certs + .retain(|view_number, _| *view_number >= old_anchor_view); self.state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_payload_commitment()) From e37438de03691910469b50e7e0b43a9ec4fb757f Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Dec 2023 10:13:20 -0800 Subject: [PATCH 0582/1393] comment --- task-impls/src/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 657c745be9..e2aec2a296 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -965,7 +965,7 @@ impl, A: ConsensusApi + // Add to the storage that we have received the VID disperse for a specific view // TODO: re-enable this when HotShot/the sequencer needs the shares for something // issue: https://github.com/EspressoSystems/HotShot/issues/2236 - // Sishan: comment this for now + // Sishan TODO: comment this for now self.vid_shares.insert(view, disperse); } HotShotEvent::ViewChange(new_view) => { From 81edf0c71e2b0b0021f64661c889eb34fa2bf26f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 20 Dec 2023 14:18:24 -0500 Subject: [PATCH 0583/1393] use vote collector state, don't spawn task --- hotshot/src/tasks/mod.rs | 10 +- task-impls/src/consensus.rs | 86 +++++++++--------- task-impls/src/da.rs | 51 ++++++----- task-impls/src/network.rs | 8 +- task-impls/src/view_sync.rs | 177 ++++++++++++++++++------------------ task-impls/src/vote.rs | 36 +------- 6 files changed, 176 insertions(+), 192 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index c66eb7b784..6eeef416a0 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -225,8 +225,8 @@ pub async fn add_consensus_task>( payload_commitment_and_metadata: Some((payload_commitment, metadata)), api: c_api.clone(), _pd: PhantomData, - vote_collector: None, - timeout_vote_collector: None, + vote_collector: None.into(), + timeout_vote_collector: None.into(), timeout_task: None, event_stream: event_stream.clone(), output_event_stream: output_stream, @@ -360,7 +360,7 @@ pub async fn add_da_task>( da_network: c_api.inner.networks.da_network.clone().into(), quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), cur_view: TYPES::Time::new(0), - vote_collector: None, + vote_collector: None.into(), event_stream: event_stream.clone(), public_key: c_api.public_key().clone(), private_key: c_api.private_key().clone(), @@ -476,7 +476,9 @@ pub async fn add_view_sync_task>( api, num_timeouts_tracked: 0, replica_task_map: HashMap::default().into(), - relay_task_map: HashMap::default(), + pre_commit_relay_map: HashMap::default().into(), + commit_relay_map: HashMap::default().into(), + finalize_relay_map: HashMap::default().into(), view_sync_timeout: Duration::new(10, 0), id: handle.hotshot.inner.id, last_garbage_collected_view: TYPES::Time::new(0), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index b4ee7cb74d..d586420307 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,7 +1,7 @@ use crate::{ events::HotShotEvent, helpers::cancel_task, - vote::{spawn_vote_accumulator, AccumulatorInfo}, + vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; @@ -38,6 +38,7 @@ use hotshot_types::{ }; use tracing::warn; +use crate::vote::HandleVoteEvent; use snafu::Snafu; use std::{ collections::{HashMap, HashSet}, @@ -55,6 +56,9 @@ pub struct ConsensusTaskError {} /// Alias for the block payload commitment and the associated metadata. type CommitmentAndMetadata = (VidCommitment, ::Metadata); +/// Alias for Optional type for Vote Collectors +type VoteCollectorOption = Option>; + /// The state for the consensus task. Contains all of the information for the implementation /// of consensus pub struct ConsensusTaskState< @@ -100,10 +104,12 @@ pub struct ConsensusTaskState< pub _pd: PhantomData, /// Current Vote collection task, with it's view. - pub vote_collector: Option<(TYPES::Time, usize, usize)>, + pub vote_collector: + RwLock, QuorumCertificate>>, /// Current timeout vote collection task with its view - pub timeout_vote_collector: Option<(TYPES::Time, usize, usize)>, + pub timeout_vote_collector: + RwLock, TimeoutCertificate>>, /// timeout task handle pub timeout_task: Option>, @@ -761,19 +767,13 @@ impl, A: ConsensusApi + ); return; } + let mut collector = self.vote_collector.write().await; - let collection_view = - if let Some((collection_view, collection_task, _)) = &self.vote_collector { - if vote.get_view_number() > *collection_view { - // ED I think we'd want to let that task timeout to avoid a griefing vector - self.registry.shutdown_task(*collection_task).await; - } - *collection_view - } else { - TYPES::Time::new(0) - }; + let maybe_task = collector.take(); - if vote.get_view_number() > collection_view { + if maybe_task.is_none() + || vote.get_view_number() > maybe_task.as_ref().unwrap().view + { debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), @@ -783,19 +783,20 @@ impl, A: ConsensusApi + id: self.id, registry: self.registry.clone(), }; - let name = "Quorum Vote Collection"; - self.vote_collector = spawn_vote_accumulator::< + *collector = create_vote_accumulator::< TYPES, QuorumVote, QuorumCertificate, - >( - &info, vote.clone(), event, name.to_string() - ) + >(&info, vote.clone(), event) .await; - } else if let Some((_, _, stream_id)) = self.vote_collector { - self.event_stream - .direct_message(stream_id, HotShotEvent::QuorumVoteRecv(vote.clone())) - .await; + } else { + let result = maybe_task.unwrap().handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } + *collector = Some(result.1); } } HotShotEvent::TimeoutVoteRecv(ref vote) => { @@ -813,34 +814,35 @@ impl, A: ConsensusApi + ); return; } - let collection_view = - if let Some((collection_view, collection_task, _)) = &self.vote_collector { - if vote.get_view_number() > *collection_view { - // ED I think we'd want to let that task timeout to avoid a griefing vector - self.registry.shutdown_task(*collection_task).await; - } - *collection_view - } else { - TYPES::Time::new(0) - }; + let mut collector = self.timeout_vote_collector.write().await; + let maybe_task = collector.take(); - if vote.get_view_number() > collection_view { + if maybe_task.is_none() + || vote.get_view_number() > maybe_task.as_ref().unwrap().view + { debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: self.timeout_membership.clone(), + membership: self.quorum_membership.clone(), view: vote.get_view_number(), event_stream: self.event_stream.clone(), id: self.id, registry: self.registry.clone(), }; - let name = "Timeout Vote Collection"; - self.vote_collector = - spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; - } else if let Some((_, _, stream_id)) = self.vote_collector { - self.event_stream - .direct_message(stream_id, HotShotEvent::TimeoutVoteRecv(vote.clone())) - .await; + *collector = create_vote_accumulator::< + TYPES, + TimeoutVote, + TimeoutCertificate, + >(&info, vote.clone(), event) + .await; + } else { + let result = maybe_task.unwrap().handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } + *collector = Some(result.1); } } HotShotEvent::QCFormed(cert) => { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 662761b1aa..97e530ebfc 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -1,6 +1,6 @@ use crate::{ events::HotShotEvent, - vote::{spawn_vote_accumulator, AccumulatorInfo}, + vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; use async_lock::RwLock; @@ -14,6 +14,7 @@ use hotshot_types::{ consensus::{Consensus, View}, data::DAProposal, message::Proposal, + simple_certificate::DACertificate, simple_vote::{DAData, DAVote}, traits::{ block_contents::vid_commitment, @@ -29,10 +30,14 @@ use hotshot_types::{ }; use sha2::{Digest, Sha256}; +use crate::vote::HandleVoteEvent; use snafu::Snafu; use std::{marker::PhantomData, sync::Arc}; use tracing::{debug, error, instrument, warn}; +/// Alias for Optional type for Vote Collectors +type VoteCollectorOption = Option>; + #[derive(Snafu, Debug)] /// Error type for consensus tasks pub struct ConsensusTaskError {} @@ -65,8 +70,8 @@ pub struct DATaskState< /// Network for DA pub da_network: Arc, - /// The view and ID of the current vote collection task, if there is one. - pub vote_collector: Option<(TYPES::Time, usize, usize)>, + /// The current vote collection task, if there is one. + pub vote_collector: RwLock, DACertificate>>, /// Global events stream to publish events pub event_stream: ChannelStream>, @@ -182,18 +187,13 @@ impl, A: ConsensusApi + error!("We are not the committee leader for view {} are we leader for next view? {}", *view, self.da_membership.get_leader(view + 1) == self.public_key); return None; } - let collection_view = - if let Some((collection_view, collection_id, _)) = &self.vote_collector { - // TODO: Is this correct for consecutive leaders? - if view > *collection_view { - self.registry.shutdown_task(*collection_id).await; - } - *collection_view - } else { - TYPES::Time::new(0) - }; + let mut collector = self.vote_collector.write().await; - if view > collection_view { + let maybe_task = collector.take(); + + if maybe_task.is_none() + || vote.get_view_number() > maybe_task.as_ref().unwrap().view + { debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), @@ -203,14 +203,21 @@ impl, A: ConsensusApi + id: self.id, registry: self.registry.clone(), }; - let name = "DA Vote Collection"; - self.vote_collector = - spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; - } else if let Some((_, _, stream_id)) = self.vote_collector { - self.event_stream - .direct_message(stream_id, HotShotEvent::DAVoteRecv(vote.clone())) - .await; - }; + *collector = create_vote_accumulator::< + TYPES, + DAVote, + DACertificate, + >(&info, vote.clone(), event) + .await; + } else { + let result = maybe_task.unwrap().handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return None; + } + *collector = Some(result.1); + } } HotShotEvent::ViewChange(view) => { if *self.cur_view >= *view { diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index ec326ef54d..cdef8af4d2 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -293,10 +293,10 @@ impl> TransmitType::Broadcast => self.channel.broadcast_message(message, membership).await, }; - // match transmit_result { - // Ok(()) => {} - // Err(e) => error!("Failed to send message from network task: {:?}", e), - // } + match transmit_result { + Ok(()) => {} + Err(e) => error!("Failed to send message from network task: {:?}", e), + } None } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 2fe1e79669..6f88f3e0a0 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -2,7 +2,7 @@ use crate::{ events::HotShotEvent, helpers::cancel_task, - vote::{spawn_vote_accumulator, AccumulatorInfo}, + vote::{create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; @@ -11,7 +11,13 @@ use hotshot_task::{ task::{HotShotTaskCompleted, TS}, task_impls::HSTWithEvent, }; -use hotshot_types::{simple_vote::ViewSyncFinalizeData, traits::signature_key::SignatureKey}; +use hotshot_types::{ + simple_certificate::{ + ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + }, + simple_vote::ViewSyncFinalizeData, + traits::signature_key::SignatureKey, +}; use hotshot_types::{ simple_vote::{ ViewSyncCommitData, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitData, @@ -35,7 +41,7 @@ use hotshot_types::{ }, }; use snafu::Snafu; -use std::{collections::HashMap, sync::Arc, time::Duration}; +use std::{collections::HashMap, fmt::Debug, sync::Arc, time::Duration}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; @@ -52,17 +58,14 @@ pub enum ViewSyncPhase { Finalize, } -#[derive(Default)] -/// Information about view sync sub-tasks -pub struct ViewSyncTaskInfo { - /// Id of the event stream of a certain task - event_stream_id: usize, -} - #[derive(Snafu, Debug)] /// Stub of a view sync error pub struct ViewSyncTaskError {} +/// Type alias for a map from View Number to Vote Task +type RelayMap = + HashMap<::Time, VoteCollectionTaskState>; + /// Main view sync task state pub struct ViewSyncTaskState< TYPES: NodeType, @@ -96,8 +99,15 @@ pub struct ViewSyncTaskState< /// Map of running replica tasks pub replica_task_map: RwLock>>, - /// Map of running relay tasks - pub relay_task_map: HashMap<(TYPES::Time, ViewSyncPhase), ViewSyncTaskInfo>, + /// Map of pre-commit vote accumulates for the relay + pub pre_commit_relay_map: + RwLock, ViewSyncPreCommitCertificate2>>, + /// Map of commit vote accumulates for the relay + pub commit_relay_map: + RwLock, ViewSyncCommitCertificate2>>, + /// Map of finalize vote accumulates for the relay + pub finalize_relay_map: + RwLock, ViewSyncFinalizeCertificate2>>, /// Timeout duration for view sync rounds pub view_sync_timeout: Duration, @@ -237,6 +247,7 @@ impl< task_map.insert(view, replica_state); } + #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task @@ -264,13 +275,18 @@ impl< } HotShotEvent::ViewSyncPreCommitVoteRecv(ref vote) => { + let mut map = self.pre_commit_relay_map.write().await; let vote_view = vote.get_view_number(); - let view_phase = (vote_view, ViewSyncPhase::PreCommit); - if let Some(relay_task) = self.relay_task_map.get(&view_phase) { - // Forward event then return - self.event_stream - .direct_message(relay_task.event_stream_id, event) - .await; + if let Some(relay_task) = map.remove(&vote_view) { + debug!("Forwarding message"); + let result = relay_task.handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } + + map.insert(vote_view, result.1); return; } @@ -285,7 +301,6 @@ impl< return; } - let name = format!("View Sync Relay Task for view {vote_view:?}"); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: self.membership.clone(), @@ -294,22 +309,25 @@ impl< id: self.id, registry: self.registry.clone(), }; - let vote_collector = - spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; - if let Some((_, _, event_stream_id)) = vote_collector { - self.relay_task_map - .insert(view_phase, ViewSyncTaskInfo { event_stream_id }); + let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; + if let Some(vote_task) = vote_collector { + map.insert(vote_view, vote_task); } } HotShotEvent::ViewSyncCommitVoteRecv(ref vote) => { + let mut map = self.commit_relay_map.write().await; let vote_view = vote.get_view_number(); - let view_phase = (vote_view, ViewSyncPhase::Commit); - if let Some(relay_task) = self.relay_task_map.get(&view_phase) { - // Forward event then return - self.event_stream - .direct_message(relay_task.event_stream_id, event) - .await; + if let Some(relay_task) = map.remove(&vote_view) { + debug!("Forwarding message"); + let result = relay_task.handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } + + map.insert(vote_view, result.1); return; } @@ -324,7 +342,6 @@ impl< return; } - let name = format!("View Sync Relay Task for view {vote_view:?}"); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: self.membership.clone(), @@ -333,22 +350,25 @@ impl< id: self.id, registry: self.registry.clone(), }; - let vote_collector = - spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; - if let Some((_, _, event_stream_id)) = vote_collector { - self.relay_task_map - .insert(view_phase, ViewSyncTaskInfo { event_stream_id }); + let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; + if let Some(vote_task) = vote_collector { + map.insert(vote_view, vote_task); } } HotShotEvent::ViewSyncFinalizeVoteRecv(ref vote) => { + let mut map = self.finalize_relay_map.write().await; let vote_view = vote.get_view_number(); - let view_phase = (vote_view, ViewSyncPhase::Finalize); - if let Some(relay_task) = self.relay_task_map.get(&view_phase) { - // Forward event then return - self.event_stream - .direct_message(relay_task.event_stream_id, event) - .await; + if let Some(relay_task) = map.remove(&vote_view) { + debug!("Forwarding message"); + let result = relay_task.handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return; + } + + map.insert(vote_view, result.1); return; } @@ -363,20 +383,17 @@ impl< return; } - let name = format!("View Sync Relay Task for view {vote_view:?}"); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: self.membership.clone(), - view: vote.get_view_number(), + view: vote_view, event_stream: self.event_stream.clone(), id: self.id, registry: self.registry.clone(), }; - let vote_collector = - spawn_vote_accumulator(&info, vote.clone(), event, name.to_string()).await; - if let Some((_, _, event_stream_id)) = vote_collector { - self.relay_task_map - .insert(view_phase, ViewSyncTaskInfo { event_stream_id }); + let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; + if let Some(vote_task) = vote_collector { + map.insert(vote_view, vote_task); } } @@ -407,39 +424,18 @@ impl< .write() .await .remove_entry(&TYPES::Time::new(i)); - if let Some((_key, relay_task_info)) = self - .relay_task_map - .remove_entry(&(TYPES::Time::new(i), ViewSyncPhase::PreCommit)) - { - self.event_stream - .direct_message( - relay_task_info.event_stream_id, - HotShotEvent::Shutdown, - ) - .await; - } - if let Some((_key, relay_task_info)) = self - .relay_task_map - .remove_entry(&(TYPES::Time::new(i), ViewSyncPhase::Commit)) - { - self.event_stream - .direct_message( - relay_task_info.event_stream_id, - HotShotEvent::Shutdown, - ) - .await; - } - if let Some((_key, relay_task_info)) = self - .relay_task_map - .remove_entry(&(TYPES::Time::new(i), ViewSyncPhase::Finalize)) - { - self.event_stream - .direct_message( - relay_task_info.event_stream_id, - HotShotEvent::Shutdown, - ) - .await; - } + self.pre_commit_relay_map + .write() + .await + .remove_entry(&TYPES::Time::new(i)); + self.commit_relay_map + .write() + .await + .remove_entry(&TYPES::Time::new(i)); + self.finalize_relay_map + .write() + .await + .remove_entry(&TYPES::Time::new(i)); } self.last_garbage_collected_view = self.current_view - 1; @@ -511,8 +507,11 @@ impl< self.network .inject_consensus_info(ConsensusIntentEvent::PollForDAC(subscribe_view)) .await; - self.send_to_or_create_replica(HotShotEvent::ViewSyncTrigger(view_number + 1), view_number + 1) - .await; + self.send_to_or_create_replica( + HotShotEvent::ViewSyncTrigger(view_number + 1), + view_number + 1, + ) + .await; } else { // If this is the first timeout we've seen advance to the next view self.current_view = view_number; @@ -612,7 +611,7 @@ impl, A: ConsensusApi + let phase = last_seen_certificate; async move { async_sleep(self.view_sync_timeout).await; - error!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", self.relay); + info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", self.relay); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -668,7 +667,7 @@ impl, A: ConsensusApi + .await; } - error!( + info!( "View sync protocol has received view sync evidence to update the view to {}", *self.next_view ); @@ -685,7 +684,7 @@ impl, A: ConsensusApi + let phase = last_seen_certificate; async move { async_sleep(self.view_sync_timeout).await; - error!( + info!( "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", self.relay ); @@ -783,7 +782,7 @@ impl, A: ConsensusApi + let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; - error!("Vote sending timed out in ViewSyncTrigger"); + info!("Vote sending timed out in ViewSyncTrigger"); stream .publish(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*self.next_view), @@ -838,7 +837,7 @@ impl, A: ConsensusApi + let stream = self.event_stream.clone(); async move { async_sleep(self.view_sync_timeout).await; - error!( + info!( "Vote sending timed out in ViewSyncTimeout relay = {}", self.relay ); diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 578fcd6b96..bf20a7bad9 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -1,16 +1,14 @@ use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; use crate::events::HotShotEvent; -use async_compatibility_layer::art::async_spawn; use async_trait::async_trait; use bitvec::prelude::*; use either::Either::{self, Left, Right}; -use futures::FutureExt; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, global_registry::GlobalRegistry, - task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEvent, TaskBuilder}, + task::{HotShotTaskCompleted, TS}, + task_impls::HSTWithEvent, }; use hotshot_types::{ simple_certificate::{ @@ -175,12 +173,11 @@ pub struct AccumulatorInfo { /// Generic function for spawnnig a vote task. Returns the event stream id of the spawned task if created /// # Panics /// Calls unwrap but should never panic. -pub async fn spawn_vote_accumulator( +pub async fn create_vote_accumulator( info: &AccumulatorInfo, vote: VOTE, event: HotShotEvent, - name: String, -) -> Option<(TYPES::Time, usize, usize)> +) -> Option> where TYPES: NodeType, VOTE: Vote @@ -227,30 +224,7 @@ where } state = result.1; - - let relay_handle_event = HandleEvent(Arc::new( - move |event, state: VoteCollectionTaskState| { - async move { state.handle_event(event).await }.boxed() - }, - )); - - let filter = FilterEvent(Arc::new( - as HandleVoteEvent>::filter, - )); - let builder = TaskBuilder::>::new(name) - .register_event_stream(state.event_stream.clone(), filter) - .await - .register_registry(&mut info.registry.clone()) - .await - .register_state(state) - .register_event_handler(relay_handle_event); - - let event_stream_id = builder.get_stream_id().unwrap(); - let id = builder.get_task_id().unwrap(); - - let _task = async_spawn(async move { VoteTaskStateTypes::build(builder).launch().await }); - - Some((vote.get_view_number(), id, event_stream_id)) + Some(state) } /// Alias for Quorum vote accumulator From f227c6c0d034603c328d2319d1c64788b25ffeb0 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 20 Dec 2023 12:31:23 -0800 Subject: [PATCH 0584/1393] Fix dependency --- testing/Cargo.toml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 3c4b3dd621..f3133e6335 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -12,9 +12,7 @@ slow-tests = [] [dependencies] ark-bls12-381 = { workspace = true } -ark-serialize = { version = "0.3", features = [ - "derive", -] } # TODO upgrade to 0.4 and inherit this dep from workspace https://github.com/EspressoSystems/HotShot/issues/1700 +ark-serialize = { workspace = true, features = ["derive"] } espresso-systems-common = { workspace = true } async-compatibility-layer = { workspace = true } async-trait = { workspace = true } From 85c826edd43298846d792266e5ab659a439c4dca Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 20 Dec 2023 12:37:22 -0800 Subject: [PATCH 0585/1393] Increase num_failed_views --- testing/tests/basic.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 73120c9844..217f6b91e7 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -60,7 +60,7 @@ async fn test_with_failures_one() { metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(5, dead_nodes)], }; - metadata.overall_safety_properties.num_failed_views = 2; + metadata.overall_safety_properties.num_failed_views = 3; metadata.overall_safety_properties.num_successful_views = 25; metadata .gen_launcher::(0) From e4557872a9e46d32b2c63726300a5964cb15ad70 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 20 Dec 2023 16:08:33 -0500 Subject: [PATCH 0586/1393] repro --- task-impls/src/network.rs | 8 +- testing/tests/basic.rs | 200 +++++++++++++++++++------------------- 2 files changed, 104 insertions(+), 104 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index cdef8af4d2..ec326ef54d 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -293,10 +293,10 @@ impl> TransmitType::Broadcast => self.channel.broadcast_message(message, membership).await, }; - match transmit_result { - Ok(()) => {} - Err(e) => error!("Failed to send message from network task: {:?}", e), - } + // match transmit_result { + // Ok(()) => {} + // Err(e) => error!("Failed to send message from network task: {:?}", e), + // } None } diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 73120c9844..ebc101045d 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -31,43 +31,43 @@ async fn test_success() { } /// Test one node leaving the network. -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_with_failures_one() { - use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::TestMetadata, - }; +// #[cfg(test)] +// #[cfg_attr( +// async_executor_impl = "tokio", +// tokio::test(flavor = "multi_thread", worker_threads = 2) +// )] +// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +// async fn test_with_failures_one() { +// use hotshot_testing::{ +// node_types::{MemoryImpl, TestTypes}, +// spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, +// test_builder::TestMetadata, +// }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes(); - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ChangeNode { - idx: 19, - updown: UpDown::Down, - }]; +// async_compatibility_layer::logging::setup_logging(); +// async_compatibility_layer::logging::setup_backtrace(); +// let mut metadata = TestMetadata::default_more_nodes(); +// // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the +// // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the +// // following issue. +// // TODO: Update message broadcasting to avoid hanging +// // +// let dead_nodes = vec![ChangeNode { +// idx: 19, +// updown: UpDown::Down, +// }]; - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], - }; - metadata.overall_safety_properties.num_failed_views = 2; - metadata.overall_safety_properties.num_successful_views = 25; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} +// metadata.spinning_properties = SpinningTaskDescription { +// node_changes: vec![(5, dead_nodes)], +// }; +// metadata.overall_safety_properties.num_failed_views = 2; +// metadata.overall_safety_properties.num_successful_views = 25; +// metadata +// .gen_launcher::(0) +// .launch() +// .run_test() +// .await; +// } /// Test f/2 nodes leaving the network. #[cfg(test)] @@ -100,10 +100,10 @@ async fn test_with_failures_half_f() { idx: 18, updown: UpDown::Down, }, - ChangeNode { - idx: 19, - updown: UpDown::Down, - }, + // ChangeNode { + // idx: 19, + // updown: UpDown::Down, + // }, ]; metadata.spinning_properties = SpinningTaskDescription { @@ -111,9 +111,9 @@ async fn test_with_failures_half_f() { }; // TODO: this should only have 3 failures for each down leader, investigate why it fails additional views - metadata.overall_safety_properties.num_failed_views = 8; + metadata.overall_safety_properties.num_failed_views = 2; // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 22; + metadata.overall_safety_properties.num_successful_views = 25; metadata .gen_launcher::(0) .launch() @@ -121,64 +121,64 @@ async fn test_with_failures_half_f() { .await; } -/// Test f nodes leaving the network. -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_with_failures_f() { - use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::TestMetadata, - }; +// /// Test f nodes leaving the network. +// #[cfg(test)] +// #[cfg_attr( +// async_executor_impl = "tokio", +// tokio::test(flavor = "multi_thread", worker_threads = 2) +// )] +// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +// async fn test_with_failures_f() { +// use hotshot_testing::{ +// node_types::{MemoryImpl, TestTypes}, +// spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, +// test_builder::TestMetadata, +// }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes(); - metadata.overall_safety_properties.num_failed_views = 6; - // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 22; - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ - ChangeNode { - idx: 14, - updown: UpDown::Down, - }, - ChangeNode { - idx: 15, - updown: UpDown::Down, - }, - ChangeNode { - idx: 16, - updown: UpDown::Down, - }, - ChangeNode { - idx: 17, - updown: UpDown::Down, - }, - ChangeNode { - idx: 18, - updown: UpDown::Down, - }, - ChangeNode { - idx: 19, - updown: UpDown::Down, - }, - ]; +// async_compatibility_layer::logging::setup_logging(); +// async_compatibility_layer::logging::setup_backtrace(); +// let mut metadata = TestMetadata::default_more_nodes(); +// metadata.overall_safety_properties.num_failed_views = 6; +// // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts +// metadata.overall_safety_properties.num_successful_views = 27; +// // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the +// // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the +// // following issue. +// // TODO: Update message broadcasting to avoid hanging +// // +// let dead_nodes = vec![ +// ChangeNode { +// idx: 14, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 15, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 16, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 17, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 18, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 19, +// updown: UpDown::Down, +// }, +// ]; - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], - }; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} +// metadata.spinning_properties = SpinningTaskDescription { +// node_changes: vec![(5, dead_nodes)], +// }; +// metadata +// .gen_launcher::(0) +// .launch() +// .run_test() +// .await; +// } From 223a3b549592a2072fd253edb5128e58026a662a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 20 Dec 2023 16:09:58 -0500 Subject: [PATCH 0587/1393] Revert "repro" This reverts commit 3b9a5d58352abe3be77848f0384dd7a2e7608376. --- task-impls/src/network.rs | 8 +- testing/tests/basic.rs | 200 +++++++++++++++++++------------------- 2 files changed, 104 insertions(+), 104 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index ec326ef54d..cdef8af4d2 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -293,10 +293,10 @@ impl> TransmitType::Broadcast => self.channel.broadcast_message(message, membership).await, }; - // match transmit_result { - // Ok(()) => {} - // Err(e) => error!("Failed to send message from network task: {:?}", e), - // } + match transmit_result { + Ok(()) => {} + Err(e) => error!("Failed to send message from network task: {:?}", e), + } None } diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index ebc101045d..73120c9844 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -31,43 +31,43 @@ async fn test_success() { } /// Test one node leaving the network. -// #[cfg(test)] -// #[cfg_attr( -// async_executor_impl = "tokio", -// tokio::test(flavor = "multi_thread", worker_threads = 2) -// )] -// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -// async fn test_with_failures_one() { -// use hotshot_testing::{ -// node_types::{MemoryImpl, TestTypes}, -// spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, -// test_builder::TestMetadata, -// }; +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_with_failures_one() { + use hotshot_testing::{ + node_types::{MemoryImpl, TestTypes}, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestMetadata, + }; -// async_compatibility_layer::logging::setup_logging(); -// async_compatibility_layer::logging::setup_backtrace(); -// let mut metadata = TestMetadata::default_more_nodes(); -// // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the -// // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the -// // following issue. -// // TODO: Update message broadcasting to avoid hanging -// // -// let dead_nodes = vec![ChangeNode { -// idx: 19, -// updown: UpDown::Down, -// }]; + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata = TestMetadata::default_more_nodes(); + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ChangeNode { + idx: 19, + updown: UpDown::Down, + }]; -// metadata.spinning_properties = SpinningTaskDescription { -// node_changes: vec![(5, dead_nodes)], -// }; -// metadata.overall_safety_properties.num_failed_views = 2; -// metadata.overall_safety_properties.num_successful_views = 25; -// metadata -// .gen_launcher::(0) -// .launch() -// .run_test() -// .await; -// } + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)], + }; + metadata.overall_safety_properties.num_failed_views = 2; + metadata.overall_safety_properties.num_successful_views = 25; + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; +} /// Test f/2 nodes leaving the network. #[cfg(test)] @@ -100,10 +100,10 @@ async fn test_with_failures_half_f() { idx: 18, updown: UpDown::Down, }, - // ChangeNode { - // idx: 19, - // updown: UpDown::Down, - // }, + ChangeNode { + idx: 19, + updown: UpDown::Down, + }, ]; metadata.spinning_properties = SpinningTaskDescription { @@ -111,9 +111,9 @@ async fn test_with_failures_half_f() { }; // TODO: this should only have 3 failures for each down leader, investigate why it fails additional views - metadata.overall_safety_properties.num_failed_views = 2; + metadata.overall_safety_properties.num_failed_views = 8; // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 25; + metadata.overall_safety_properties.num_successful_views = 22; metadata .gen_launcher::(0) .launch() @@ -121,64 +121,64 @@ async fn test_with_failures_half_f() { .await; } -// /// Test f nodes leaving the network. -// #[cfg(test)] -// #[cfg_attr( -// async_executor_impl = "tokio", -// tokio::test(flavor = "multi_thread", worker_threads = 2) -// )] -// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -// async fn test_with_failures_f() { -// use hotshot_testing::{ -// node_types::{MemoryImpl, TestTypes}, -// spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, -// test_builder::TestMetadata, -// }; +/// Test f nodes leaving the network. +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_with_failures_f() { + use hotshot_testing::{ + node_types::{MemoryImpl, TestTypes}, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestMetadata, + }; -// async_compatibility_layer::logging::setup_logging(); -// async_compatibility_layer::logging::setup_backtrace(); -// let mut metadata = TestMetadata::default_more_nodes(); -// metadata.overall_safety_properties.num_failed_views = 6; -// // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts -// metadata.overall_safety_properties.num_successful_views = 27; -// // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the -// // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the -// // following issue. -// // TODO: Update message broadcasting to avoid hanging -// // -// let dead_nodes = vec![ -// ChangeNode { -// idx: 14, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 15, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 16, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 17, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 18, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 19, -// updown: UpDown::Down, -// }, -// ]; + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata = TestMetadata::default_more_nodes(); + metadata.overall_safety_properties.num_failed_views = 6; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 22; + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 14, + updown: UpDown::Down, + }, + ChangeNode { + idx: 15, + updown: UpDown::Down, + }, + ChangeNode { + idx: 16, + updown: UpDown::Down, + }, + ChangeNode { + idx: 17, + updown: UpDown::Down, + }, + ChangeNode { + idx: 18, + updown: UpDown::Down, + }, + ChangeNode { + idx: 19, + updown: UpDown::Down, + }, + ]; -// metadata.spinning_properties = SpinningTaskDescription { -// node_changes: vec![(5, dead_nodes)], -// }; -// metadata -// .gen_launcher::(0) -// .launch() -// .run_test() -// .await; -// } + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)], + }; + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; +} From dc34145e9a6f3494b8e152d28ed87c45b3ba4410 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Dec 2023 15:58:05 -0800 Subject: [PATCH 0588/1393] added dac formation --- task-impls/src/consensus.rs | 8 ++-- testing/src/task_helpers.rs | 65 ++++++++++++++++++++++++++++++++- testing/tests/consensus_task.rs | 34 ++++++++++++++--- 3 files changed, 96 insertions(+), 11 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e2aec2a296..53e11e05af 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -239,8 +239,7 @@ impl, A: ConsensusApi + // Sishan TODO: comment this for now if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { } else { - error!( - // Sishan TODO: change to debug level + debug!( "We have not seen the VID share for this view {:?} yet, so we cannot vote.", proposal.view_number ); @@ -324,8 +323,7 @@ impl, A: ConsensusApi + return true; } } - error!( - // Sishan TODO: change to debug level + debug!( "Received VID share, but couldn't find DAC cert for view {:?}", *proposal.get_view_number(), ); @@ -903,7 +901,7 @@ impl, A: ConsensusApi + } } HotShotEvent::DACRecv(cert) => { - debug!("DAC Received for view ! {}", *cert.view_number); + debug!("DAC Received for view {}!", *cert.view_number); let view = cert.view_number; self.quorum_network diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 3201b779b1..d473926c83 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -14,9 +14,10 @@ use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, - data::{Leaf, QuorumProposal, VidScheme, ViewNumber}, + data::{Leaf, QuorumProposal, VidCommitment, VidScheme, ViewNumber}, message::Proposal, simple_certificate::QuorumCertificate, + simple_vote::{DAData, DAVote, SimpleVote}, traits::{ block_contents::vid_commitment, block_contents::BlockHeader, @@ -188,6 +189,68 @@ pub fn build_qc< cert } +pub fn build_dac< + TYPES: NodeType, + VOTE: Vote, + CERT: Certificate, +>( + payload_commitment: VidCommitment, + handle: &SystemContextHandle, + view: TYPES::Time, + public_key: &TYPES::SignatureKey, + private_key: &::PrivateKey, +) -> CERT { + let stake_table = handle.get_committee_qc_stake_table(); + let real_qc_pp: ::QCParams = + ::get_public_parameter( + stake_table.clone(), + handle.get_threshold(), + ); + let total_nodes = stake_table.len(); + let signers = bitvec![1; total_nodes]; + let mut sig_lists = Vec::new(); + + // calculate vote + for node_id in 0..total_nodes { + let (private_key_i, public_key_i) = key_pair_for_id(node_id.try_into().unwrap()); + let da_vote: SimpleVote = DAVote::create_signed_vote( + DAData { + payload_commit: payload_commitment, + }, + view, + &public_key_i, + &private_key_i, + ); + let original_signature: ::PureAssembledSignatureType = + bincode_opts() + .deserialize(&da_vote.get_signature().0) + .expect("Deserialization on the signature shouldn't be able to fail."); + sig_lists.push(original_signature); + } + + let real_qc_sig = ::assemble( + &real_qc_pp, + signers.as_bitslice(), + &sig_lists[..], + ); + + let vote: SimpleVote = DAVote::create_signed_vote( + DAData { + payload_commit: payload_commitment, + }, + view, + public_key, + private_key, + ); + let cert = CERT::create_signed_certificate( + vote.get_data_commitment(), + vote.get_data().clone(), + real_qc_sig, + vote.get_view_number(), + ); + cert +} + async fn build_quorum_proposal_and_signature( handle: &SystemContextHandle, private_key: &::PrivateKey, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index d87adf1613..7dbf4301c6 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -196,10 +196,17 @@ async fn test_consensus_vote() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_with_vid() { use hotshot_task_impls::harness::run_harness; + use hotshot_testing::block_types::TestBlockPayload; use hotshot_testing::block_types::TestTransaction; + use hotshot_testing::task_helpers::build_dac; use hotshot_testing::task_helpers::build_system_handle; use hotshot_testing::task_helpers::vid_init; use hotshot_types::data::VidSchemeTrait; + use hotshot_types::simple_certificate::DACertificate; + use hotshot_types::simple_vote::DAVote; + use hotshot_types::traits::block_contents::vid_commitment; + use hotshot_types::traits::state::TestableBlock; + use hotshot_types::traits::BlockPayload; use hotshot_types::{ data::VidDisperse, message::Proposal, traits::node_implementation::NodeType, }; @@ -252,6 +259,24 @@ async fn test_consensus_with_vid() { let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; // TODO: Still need a valid DAC cert, now tracking logging instead // https://github.com/EspressoSystems/HotShot/issues/2255 + let block = ::genesis(); + let da_payload_commitment = vid_commitment( + &block.encode().unwrap().collect(), + handle + .hotshot + .inner + .memberships + .quorum_membership + .total_nodes(), + ); + let dac_view2 = build_dac::, DACertificate>( + da_payload_commitment, + &handle, + ViewNumber::new(2), + &public_key_view2, + &private_key_view2, + ); + input.push(HotShotEvent::DACRecv(dac_view2.clone())); input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader @@ -264,15 +289,14 @@ async fn test_consensus_with_vid() { HotShotEvent::QuorumProposalRecv(proposal_view2.clone(), public_key_view2), 1, ); + output.insert(HotShotEvent::DACRecv(dac_view2), 1); output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); // TODO: Uncomment this after the above TODO is done // https://github.com/EspressoSystems/HotShot/issues/2255 - // if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { - // output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); - // } - - // Sishan TODO: track the logging message "Received VID share, but couldn't find DAC cert in certs xxx", + if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { + output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); + } output.insert( HotShotEvent::ViewChange(ViewNumber::new(1)), From ea82cf032a00c96b434c1bf9d17c3a52ee8433ef Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Dec 2023 16:26:56 -0800 Subject: [PATCH 0589/1393] added [ignore] for vid test for now --- testing/tests/consensus_task.rs | 102 +------------------------------- 1 file changed, 3 insertions(+), 99 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 7dbf4301c6..d5a8814bc1 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -194,6 +194,9 @@ async fn test_consensus_vote() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +// TODO: re-enable this when HotShot/the sequencer needs the shares for something +// issue: https://github.com/EspressoSystems/HotShot/issues/2236 +// #[ignore] // Sishan TODO: comment this later async fn test_consensus_with_vid() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::block_types::TestBlockPayload; @@ -257,8 +260,6 @@ async fn test_consensus_with_vid() { // For the test of vote logic with vid, starting view 2 we need vid share input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; - // TODO: Still need a valid DAC cert, now tracking logging instead - // https://github.com/EspressoSystems/HotShot/issues/2255 let block = ::genesis(); let da_payload_commitment = vid_commitment( &block.encode().unwrap().collect(), @@ -292,8 +293,6 @@ async fn test_consensus_with_vid() { output.insert(HotShotEvent::DACRecv(dac_view2), 1); output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); - // TODO: Uncomment this after the above TODO is done - // https://github.com/EspressoSystems/HotShot/issues/2255 if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); } @@ -317,98 +316,3 @@ async fn test_consensus_with_vid() { run_harness(input, output, None, build_fn).await; } -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[allow(clippy::should_panic_without_expect)] -#[should_panic] -async fn test_consensus_no_vote_without_vid_share() { - use hotshot_task_impls::harness::run_harness; - use hotshot_testing::block_types::TestTransaction; - use hotshot_testing::task_helpers::build_system_handle; - use hotshot_testing::task_helpers::vid_init; - use hotshot_types::data::VidSchemeTrait; - use hotshot_types::{ - data::VidDisperse, message::Proposal, traits::node_implementation::NodeType, - }; - use std::marker::PhantomData; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let (handle, _event_stream) = build_system_handle(2).await; - // We assign node's key pair rather than read from config file since it's a test - // In view 2, node 2 is the leader. - let (private_key_view2, public_key_view2) = key_pair_for_id(2); - - // For the test of vote logic with vid - let api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - let pub_key = *api.public_key(); - let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); - let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(2)); - let transactions = vec![TestTransaction(vec![0])]; - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); - let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); - let payload_commitment = vid_disperse.commit; - - let vid_signature = - ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()); - let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); - let vid_disperse_inner = VidDisperse::from_membership( - ViewNumber::new(2), - vid_disperse, - &quorum_membership.clone().into(), - ); - // TODO for now reuse the same block payload commitment and signature as DA committee - // https://github.com/EspressoSystems/jellyfish/issues/369 - let vid_proposal = Proposal { - data: vid_disperse_inner.clone(), - signature: vid_signature, - _pd: PhantomData, - }; - - let mut input = Vec::new(); - let mut output = HashMap::new(); - - // Do a view change, so that it's not the genesis view, and vid vote is needed - input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); - - // For the test of vote logic with vid, starting view 2 we need vid share - input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; - - // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader - input.push(HotShotEvent::QuorumProposalRecv( - proposal_view2.clone(), - public_key_view2, - )); - - output.insert( - HotShotEvent::QuorumProposalRecv(proposal_view2.clone(), public_key_view2), - 1, - ); - output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); - - output.insert( - HotShotEvent::ViewChange(ViewNumber::new(1)), - 2, // 2 occurrences: 1 from `QuorumProposalRecv`, 1 from input - ); - output.insert( - HotShotEvent::ViewChange(ViewNumber::new(2)), - 2, // 2 occurrences: 1 from `QuorumProposalRecv`?, 1 from input - ); - // Sishan TODO: would better track the log "We have not seen the VID share for this view ..." - input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::Shutdown, 1); - - let build_fn = |task_runner, event_stream| { - add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) - }; - - run_harness(input, output, None, build_fn).await; -} From c67af2785ff6fa5c0211499656439584ea2a940b Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Dec 2023 17:27:20 -0800 Subject: [PATCH 0590/1393] parameterizing view number in testing proposal creation --- testing/src/task_helpers.rs | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index d473926c83..af480a2599 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -41,6 +41,7 @@ use hotshot_types::utils::ViewInner; use hotshot_types::vote::Certificate; use hotshot_types::vote::Vote; use hotshot_utils::bincode::bincode_opts; +use tracing::error; pub async fn build_system_handle( node_id: u64, @@ -288,7 +289,7 @@ async fn build_quorum_proposal_and_signature( .total_nodes(), ); let block_header = TestBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); - let leaf = Leaf { + let mut leaf = Leaf { view_number: ViewNumber::new(1), justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), @@ -298,16 +299,15 @@ async fn build_quorum_proposal_and_signature( timestamp: 0, proposer_id: api.public_key().to_bytes(), }; - let signature = ::sign(private_key, leaf.commit().as_ref()); - let proposal = QuorumProposal:: { + let mut signature = ::sign(private_key, leaf.commit().as_ref()); + let mut proposal = QuorumProposal:: { block_header: block_header.clone(), view_number: ViewNumber::new(1), justify_qc: QuorumCertificate::genesis(), timeout_certificate: None, proposer_id: leaf.proposer_id.clone(), }; - - if view == 2 { + for cur_view in 2..=view { consensus.state_map.insert( ViewNumber::new(1), View { @@ -321,7 +321,7 @@ async fn build_quorum_proposal_and_signature( TestTypes, QuorumVote, QuorumCertificate, - >(leaf.clone(), handle, ViewNumber::new(1)); + >(leaf.clone(), handle, ViewNumber::new(cur_view - 1)); let created_qc = build_qc::, QuorumCertificate>( created_assembled_sig, leaf.clone(), @@ -330,8 +330,8 @@ async fn build_quorum_proposal_and_signature( private_key, ); let parent_leaf = leaf.clone(); - let leaf_view2 = Leaf { - view_number: ViewNumber::new(2), + let leaf_new_view = Leaf { + view_number: ViewNumber::new(cur_view), justify_qc: created_qc.clone(), parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), @@ -340,16 +340,18 @@ async fn build_quorum_proposal_and_signature( timestamp: 0, proposer_id: api.public_key().to_bytes(), }; - let signature_view2 = - ::sign(private_key, leaf_view2.commit().as_ref()); - let proposal_view2 = QuorumProposal:: { - block_header, - view_number: ViewNumber::new(2), + let signature_new_view = + ::sign(private_key, leaf_new_view.commit().as_ref()); + let proposal_new_view = QuorumProposal:: { + block_header: block_header.clone(), + view_number: ViewNumber::new(cur_view), justify_qc: created_qc, timeout_certificate: None, - proposer_id: leaf_view2.proposer_id, + proposer_id: leaf_new_view.clone().proposer_id, }; - return (proposal_view2, signature_view2); + proposal = proposal_new_view; + signature = signature_new_view; + leaf = leaf_new_view; } (proposal, signature) From 9f9adf19737e898600fe3d4b7a7ea37e2e192fa7 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Dec 2023 18:03:51 -0800 Subject: [PATCH 0591/1393] unify membership in testing --- hotshot/src/types/handle.rs | 27 --------------------------- task-impls/src/consensus.rs | 3 ++- testing/src/task_helpers.rs | 26 ++++++++++++++------------ testing/tests/consensus_task.rs | 11 +++-------- 4 files changed, 19 insertions(+), 48 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index ca0d907b1b..37ff894f76 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -4,7 +4,6 @@ use crate::{traits::NodeImplementation, types::Event, SystemContext}; use async_compatibility_layer::channel::UnboundedStream; use async_lock::RwLock; use commit::Committable; -use ethereum_types::U256; use futures::Stream; use hotshot_task::{ boxed_sync, @@ -16,7 +15,6 @@ use hotshot_task::{ use hotshot_task_impls::events::HotShotEvent; use hotshot_types::simple_vote::QuorumData; use hotshot_types::traits::election::Membership; -use hotshot_types::traits::signature_key::SignatureKey; use hotshot_types::{ consensus::Consensus, error::HotShotError, @@ -200,31 +198,6 @@ impl + 'static> SystemContextHandl // Below is for testing only: - /// Gets the current membership of the [`HotShot`] instance - #[cfg(feature = "hotshot-testing")] - pub fn get_committee_qc_stake_table( - &self, - ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { - self.hotshot - .inner - .memberships - .quorum_membership - .get_committee_qc_stake_table() - } - - /// Gets the threshold of current membership of the [`HotShot`] instance - #[cfg(feature = "hotshot-testing")] - pub fn get_threshold(&self) -> U256 { - U256::from( - self.hotshot - .inner - .memberships - .quorum_membership - .success_threshold() - .get(), - ) - } - /// Wrapper for `HotShotConsensusApi`'s `get_leader` function #[allow(clippy::unused_async)] // async for API compatibility reasons #[cfg(feature = "hotshot-testing")] diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 53e11e05af..02f76e5009 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -313,7 +313,8 @@ impl, A: ConsensusApi + }; if let GeneralConsensusMessage::Vote(vote) = message { - debug!( + error!( + // Sishan TODO: change to debug level later "Sending vote to next quorum leader {:?}", vote.get_view_number() ); diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index af480a2599..1e7a9c68c9 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -6,6 +6,7 @@ use crate::{ test_builder::TestMetadata, }; use commit::Committable; +use ethereum_types::U256; use hotshot::{ types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, HotShotConsensusApi, HotShotInitializer, Memberships, Networks, SystemContext, @@ -41,7 +42,6 @@ use hotshot_types::utils::ViewInner; use hotshot_types::vote::Certificate; use hotshot_types::vote::Vote; use hotshot_utils::bincode::bincode_opts; -use tracing::error; pub async fn build_system_handle( node_id: u64, @@ -121,15 +121,15 @@ pub fn build_assembled_sig< CERT: Certificate, >( leaf: Leaf, - handle: &SystemContextHandle, + membership: &TYPES::Membership, view: TYPES::Time, ) -> ::QCType { // Assemble QC - let stake_table = handle.get_committee_qc_stake_table(); + let stake_table = membership.get_committee_qc_stake_table(); let real_qc_pp: ::QCParams = ::get_public_parameter( stake_table.clone(), - handle.get_threshold(), + U256::from(CERT::threshold(membership)), ); let total_nodes = stake_table.len(); let signers = bitvec![1; total_nodes]; @@ -196,16 +196,16 @@ pub fn build_dac< CERT: Certificate, >( payload_commitment: VidCommitment, - handle: &SystemContextHandle, + membership: &TYPES::Membership, view: TYPES::Time, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, ) -> CERT { - let stake_table = handle.get_committee_qc_stake_table(); + let stake_table = membership.get_committee_qc_stake_table(); let real_qc_pp: ::QCParams = ::get_public_parameter( stake_table.clone(), - handle.get_threshold(), + U256::from(CERT::threshold(membership)), ); let total_nodes = stake_table.len(); let signers = bitvec![1; total_nodes]; @@ -317,11 +317,13 @@ async fn build_quorum_proposal_and_signature( }, ); consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); - let created_assembled_sig = build_assembled_sig::< - TestTypes, - QuorumVote, - QuorumCertificate, - >(leaf.clone(), handle, ViewNumber::new(cur_view - 1)); + let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); + let created_assembled_sig = + build_assembled_sig::, QuorumCertificate>( + leaf.clone(), + &quorum_membership, + ViewNumber::new(cur_view - 1), + ); let created_qc = build_qc::, QuorumCertificate>( created_assembled_sig, leaf.clone(), diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index d5a8814bc1..87b5ae7eba 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -263,16 +263,12 @@ async fn test_consensus_with_vid() { let block = ::genesis(); let da_payload_commitment = vid_commitment( &block.encode().unwrap().collect(), - handle - .hotshot - .inner - .memberships - .quorum_membership - .total_nodes(), + quorum_membership.total_nodes(), ); + let dac_view2 = build_dac::, DACertificate>( da_payload_commitment, - &handle, + &quorum_membership, ViewNumber::new(2), &public_key_view2, &private_key_view2, @@ -315,4 +311,3 @@ async fn test_consensus_with_vid() { run_harness(input, output, None, build_fn).await; } - From e81949de8355f713b700e6e74f24f1097fe587d0 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Dec 2023 22:54:07 -0800 Subject: [PATCH 0592/1393] trying to unify cert formation in task_helpers --- task-impls/src/consensus.rs | 2 +- testing/src/task_helpers.rs | 54 ++++++++++++++++++++++--------------- 2 files changed, 34 insertions(+), 22 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 02f76e5009..20d4f9cc98 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -316,7 +316,7 @@ impl, A: ConsensusApi + error!( // Sishan TODO: change to debug level later "Sending vote to next quorum leader {:?}", - vote.get_view_number() + vote.get_view_number() + 1 ); self.event_stream .publish(HotShotEvent::QuorumVoteSend(vote)) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 1e7a9c68c9..49bfa1334a 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -5,7 +5,7 @@ use crate::{ node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, }; -use commit::Committable; +use commit::{Commitment, Committable}; use ethereum_types::U256; use hotshot::{ types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, @@ -120,7 +120,7 @@ pub fn build_assembled_sig< VOTE: Vote, CERT: Certificate, >( - leaf: Leaf, + leaf_commitment: Commitment>, membership: &TYPES::Membership, view: TYPES::Time, ) -> ::QCType { @@ -140,7 +140,7 @@ pub fn build_assembled_sig< let (private_key, public_key) = key_pair_for_id(node_id.try_into().unwrap()); let vote = QuorumVote::::create_signed_vote( QuorumData { - leaf_commit: leaf.commit(), + leaf_commit: leaf_commitment, }, view, &public_key, @@ -163,19 +163,21 @@ pub fn build_assembled_sig< } pub fn build_qc< - TYPES: NodeType, + TYPES: NodeType, VOTE: Vote>, CERT: Certificate, >( - real_qc_sig: ::QCType, - leaf: Leaf, + leaf_commitment: Commitment>, + membership: &TYPES::Membership, view: TYPES::Time, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, ) -> CERT { + let real_qc_sig = build_assembled_sig::(leaf_commitment, membership, view); + let vote = QuorumVote::::create_signed_vote( QuorumData { - leaf_commit: leaf.commit(), + leaf_commit: leaf_commitment, }, view, public_key, @@ -190,7 +192,7 @@ pub fn build_qc< cert } -pub fn build_dac< +pub fn build_assembled_sig_da< TYPES: NodeType, VOTE: Vote, CERT: Certificate, @@ -198,9 +200,7 @@ pub fn build_dac< payload_commitment: VidCommitment, membership: &TYPES::Membership, view: TYPES::Time, - public_key: &TYPES::SignatureKey, - private_key: &::PrivateKey, -) -> CERT { +) -> ::QCType { let stake_table = membership.get_committee_qc_stake_table(); let real_qc_pp: ::QCParams = ::get_public_parameter( @@ -214,7 +214,8 @@ pub fn build_dac< // calculate vote for node_id in 0..total_nodes { let (private_key_i, public_key_i) = key_pair_for_id(node_id.try_into().unwrap()); - let da_vote: SimpleVote = DAVote::create_signed_vote( + // DAVote = SimpleVote + let da_vote: SimpleVote = SimpleVote::::create_signed_vote( DAData { payload_commit: payload_commitment, }, @@ -235,6 +236,23 @@ pub fn build_dac< &sig_lists[..], ); + real_qc_sig +} + +pub fn build_dac< + TYPES: NodeType, + VOTE: Vote, + CERT: Certificate, +>( + payload_commitment: VidCommitment, + membership: &TYPES::Membership, + view: TYPES::Time, + public_key: &TYPES::SignatureKey, + private_key: &::PrivateKey, +) -> CERT { + let real_qc_sig = + build_assembled_sig_da::(payload_commitment, membership, view); + let vote: SimpleVote = DAVote::create_signed_vote( DAData { payload_commit: payload_commitment, @@ -318,16 +336,10 @@ async fn build_quorum_proposal_and_signature( ); consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); - let created_assembled_sig = - build_assembled_sig::, QuorumCertificate>( - leaf.clone(), - &quorum_membership, - ViewNumber::new(cur_view - 1), - ); let created_qc = build_qc::, QuorumCertificate>( - created_assembled_sig, - leaf.clone(), - ViewNumber::new(1), + leaf.commit(), + &quorum_membership, + ViewNumber::new(cur_view - 1), public_key, private_key, ); From 3fa2e750d88d5905596e7b2e17c70b385d7589a3 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Dec 2023 23:34:49 -0800 Subject: [PATCH 0593/1393] unify build_assembled_sig for quorumdata and dadata --- testing/src/task_helpers.rs | 84 ++++++++------------------------- testing/tests/consensus_task.rs | 7 ++- 2 files changed, 25 insertions(+), 66 deletions(-) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 49bfa1334a..5aaf88e04d 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -15,7 +15,7 @@ use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, - data::{Leaf, QuorumProposal, VidCommitment, VidScheme, ViewNumber}, + data::{Leaf, QuorumProposal, VidScheme, ViewNumber}, message::Proposal, simple_certificate::QuorumCertificate, simple_vote::{DAData, DAVote, SimpleVote}, @@ -43,6 +43,9 @@ use hotshot_types::vote::Certificate; use hotshot_types::vote::Vote; use hotshot_utils::bincode::bincode_opts; +use std::{fmt::Debug, hash::Hash}; +use serde::Serialize; + pub async fn build_system_handle( node_id: u64, ) -> ( @@ -115,53 +118,6 @@ pub async fn build_system_handle( .expect("Could not init hotshot") } -pub fn build_assembled_sig< - TYPES: NodeType, - VOTE: Vote, - CERT: Certificate, ->( - leaf_commitment: Commitment>, - membership: &TYPES::Membership, - view: TYPES::Time, -) -> ::QCType { - // Assemble QC - let stake_table = membership.get_committee_qc_stake_table(); - let real_qc_pp: ::QCParams = - ::get_public_parameter( - stake_table.clone(), - U256::from(CERT::threshold(membership)), - ); - let total_nodes = stake_table.len(); - let signers = bitvec![1; total_nodes]; - let mut sig_lists = Vec::new(); - - // calculate vote - for node_id in 0..total_nodes { - let (private_key, public_key) = key_pair_for_id(node_id.try_into().unwrap()); - let vote = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: leaf_commitment, - }, - view, - &public_key, - &private_key, - ); - let original_signature: ::PureAssembledSignatureType = - bincode_opts() - .deserialize(&vote.get_signature().0) - .expect("Deserialization on the signature shouldn't be able to fail."); - sig_lists.push(original_signature); - } - - let real_qc_sig = ::assemble( - &real_qc_pp, - signers.as_bitslice(), - &sig_lists[..], - ); - - real_qc_sig -} - pub fn build_qc< TYPES: NodeType, VOTE: Vote>, @@ -173,7 +129,11 @@ pub fn build_qc< public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, ) -> CERT { - let real_qc_sig = build_assembled_sig::(leaf_commitment, membership, view); + let quorum_data = QuorumData { + leaf_commit: leaf_commitment, + }; + let real_qc_sig = + build_assembled_sig::>(quorum_data.clone(), membership, view); let vote = QuorumVote::::create_signed_vote( QuorumData { @@ -192,12 +152,13 @@ pub fn build_qc< cert } -pub fn build_assembled_sig_da< +pub fn build_assembled_sig< TYPES: NodeType, - VOTE: Vote, + VOTE: Vote, CERT: Certificate, + DATAType: Committable + Clone + Eq + Hash + Serialize + Debug + 'static, >( - payload_commitment: VidCommitment, + data: DATAType, membership: &TYPES::Membership, view: TYPES::Time, ) -> ::QCType { @@ -211,21 +172,18 @@ pub fn build_assembled_sig_da< let signers = bitvec![1; total_nodes]; let mut sig_lists = Vec::new(); - // calculate vote + // assemble the vote for node_id in 0..total_nodes { let (private_key_i, public_key_i) = key_pair_for_id(node_id.try_into().unwrap()); - // DAVote = SimpleVote - let da_vote: SimpleVote = SimpleVote::::create_signed_vote( - DAData { - payload_commit: payload_commitment, - }, + let vote: SimpleVote = SimpleVote::::create_signed_vote( + data.clone(), view, &public_key_i, &private_key_i, ); let original_signature: ::PureAssembledSignatureType = bincode_opts() - .deserialize(&da_vote.get_signature().0) + .deserialize(&vote.get_signature().0) .expect("Deserialization on the signature shouldn't be able to fail."); sig_lists.push(original_signature); } @@ -244,19 +202,17 @@ pub fn build_dac< VOTE: Vote, CERT: Certificate, >( - payload_commitment: VidCommitment, + data: DAData, membership: &TYPES::Membership, view: TYPES::Time, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, ) -> CERT { let real_qc_sig = - build_assembled_sig_da::(payload_commitment, membership, view); + build_assembled_sig::(data.clone(), membership, view); let vote: SimpleVote = DAVote::create_signed_vote( - DAData { - payload_commit: payload_commitment, - }, + data, view, public_key, private_key, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 87b5ae7eba..23dc1d943c 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -206,6 +206,7 @@ async fn test_consensus_with_vid() { use hotshot_testing::task_helpers::vid_init; use hotshot_types::data::VidSchemeTrait; use hotshot_types::simple_certificate::DACertificate; + use hotshot_types::simple_vote::DAData; use hotshot_types::simple_vote::DAVote; use hotshot_types::traits::block_contents::vid_commitment; use hotshot_types::traits::state::TestableBlock; @@ -265,9 +266,11 @@ async fn test_consensus_with_vid() { &block.encode().unwrap().collect(), quorum_membership.total_nodes(), ); - + let da_data = DAData { + payload_commit: da_payload_commitment, + }; let dac_view2 = build_dac::, DACertificate>( - da_payload_commitment, + da_data, &quorum_membership, ViewNumber::new(2), &public_key_view2, From 3ef92b793735b97c098e38dea05c1b28c3349e0d Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Dec 2023 23:35:26 -0800 Subject: [PATCH 0594/1393] lint --- testing/src/task_helpers.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 5aaf88e04d..6e1122d848 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -43,8 +43,8 @@ use hotshot_types::vote::Certificate; use hotshot_types::vote::Vote; use hotshot_utils::bincode::bincode_opts; -use std::{fmt::Debug, hash::Hash}; use serde::Serialize; +use std::{fmt::Debug, hash::Hash}; pub async fn build_system_handle( node_id: u64, @@ -132,8 +132,11 @@ pub fn build_qc< let quorum_data = QuorumData { leaf_commit: leaf_commitment, }; - let real_qc_sig = - build_assembled_sig::>(quorum_data.clone(), membership, view); + let real_qc_sig = build_assembled_sig::>( + quorum_data.clone(), + membership, + view, + ); let vote = QuorumVote::::create_signed_vote( QuorumData { @@ -211,12 +214,8 @@ pub fn build_dac< let real_qc_sig = build_assembled_sig::(data.clone(), membership, view); - let vote: SimpleVote = DAVote::create_signed_vote( - data, - view, - public_key, - private_key, - ); + let vote: SimpleVote = + DAVote::create_signed_vote(data, view, public_key, private_key); let cert = CERT::create_signed_certificate( vote.get_data_commitment(), vote.get_data().clone(), From 6b47fcecd4ed6aba306e006a081cd3be799f6a3b Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 20 Dec 2023 23:54:54 -0800 Subject: [PATCH 0595/1393] unify build_cert in task_helpers.rs --- testing/src/task_helpers.rs | 68 ++++++++++----------------------- testing/tests/consensus_task.rs | 21 +++++----- 2 files changed, 31 insertions(+), 58 deletions(-) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 6e1122d848..c388102ce6 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -5,7 +5,7 @@ use crate::{ node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, }; -use commit::{Commitment, Committable}; +use commit::Committable; use ethereum_types::U256; use hotshot::{ types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, @@ -18,7 +18,7 @@ use hotshot_types::{ data::{Leaf, QuorumProposal, VidScheme, ViewNumber}, message::Proposal, simple_certificate::QuorumCertificate, - simple_vote::{DAData, DAVote, SimpleVote}, + simple_vote::SimpleVote, traits::{ block_contents::vid_commitment, block_contents::BlockHeader, @@ -118,34 +118,23 @@ pub async fn build_system_handle( .expect("Could not init hotshot") } -pub fn build_qc< +pub fn build_cert< TYPES: NodeType, - VOTE: Vote>, + DATAType: Committable + Clone + Eq + Hash + Serialize + Debug + 'static, + VOTE: Vote, CERT: Certificate, >( - leaf_commitment: Commitment>, + data: DATAType, membership: &TYPES::Membership, view: TYPES::Time, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, ) -> CERT { - let quorum_data = QuorumData { - leaf_commit: leaf_commitment, - }; - let real_qc_sig = build_assembled_sig::>( - quorum_data.clone(), - membership, - view, - ); + let real_qc_sig = + build_assembled_sig::(data.clone(), membership, view); - let vote = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: leaf_commitment, - }, - view, - public_key, - private_key, - ); + let vote = + SimpleVote::::create_signed_vote(data, view, public_key, private_key); let cert = CERT::create_signed_certificate( vote.get_data_commitment(), vote.get_data().clone(), @@ -200,31 +189,6 @@ pub fn build_assembled_sig< real_qc_sig } -pub fn build_dac< - TYPES: NodeType, - VOTE: Vote, - CERT: Certificate, ->( - data: DAData, - membership: &TYPES::Membership, - view: TYPES::Time, - public_key: &TYPES::SignatureKey, - private_key: &::PrivateKey, -) -> CERT { - let real_qc_sig = - build_assembled_sig::(data.clone(), membership, view); - - let vote: SimpleVote = - DAVote::create_signed_vote(data, view, public_key, private_key); - let cert = CERT::create_signed_certificate( - vote.get_data_commitment(), - vote.get_data().clone(), - real_qc_sig, - vote.get_view_number(), - ); - cert -} - async fn build_quorum_proposal_and_signature( handle: &SystemContextHandle, private_key: &::PrivateKey, @@ -291,8 +255,16 @@ async fn build_quorum_proposal_and_signature( ); consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); - let created_qc = build_qc::, QuorumCertificate>( - leaf.commit(), + let quorum_data = QuorumData { + leaf_commit: leaf.commit(), + }; + let created_qc = build_cert::< + TestTypes, + QuorumData, + QuorumVote, + QuorumCertificate, + >( + quorum_data, &quorum_membership, ViewNumber::new(cur_view - 1), public_key, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 23dc1d943c..74bed5c249 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -201,7 +201,7 @@ async fn test_consensus_with_vid() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::block_types::TestBlockPayload; use hotshot_testing::block_types::TestTransaction; - use hotshot_testing::task_helpers::build_dac; + use hotshot_testing::task_helpers::build_cert; use hotshot_testing::task_helpers::build_system_handle; use hotshot_testing::task_helpers::vid_init; use hotshot_types::data::VidSchemeTrait; @@ -269,14 +269,15 @@ async fn test_consensus_with_vid() { let da_data = DAData { payload_commit: da_payload_commitment, }; - let dac_view2 = build_dac::, DACertificate>( - da_data, - &quorum_membership, - ViewNumber::new(2), - &public_key_view2, - &private_key_view2, - ); - input.push(HotShotEvent::DACRecv(dac_view2.clone())); + let created_dac_view2 = + build_cert::, DACertificate>( + da_data, + &quorum_membership, + ViewNumber::new(2), + &public_key_view2, + &private_key_view2, + ); + input.push(HotShotEvent::DACRecv(created_dac_view2.clone())); input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader @@ -289,7 +290,7 @@ async fn test_consensus_with_vid() { HotShotEvent::QuorumProposalRecv(proposal_view2.clone(), public_key_view2), 1, ); - output.insert(HotShotEvent::DACRecv(dac_view2), 1); + output.insert(HotShotEvent::DACRecv(created_dac_view2), 1); output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { From 32c36404e361c8f03fc26f654c63f6b321af120d Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 21 Dec 2023 00:03:37 -0800 Subject: [PATCH 0596/1393] recomment vid-related test and code, until 2236 solved --- task-impls/src/consensus.rs | 25 +++++++++++-------------- testing/tests/consensus_task.rs | 2 +- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 20d4f9cc98..a62416bf5c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -236,15 +236,14 @@ impl, A: ConsensusApi + // TODO: re-enable this when HotShot/the sequencer needs the shares for something // issue: https://github.com/EspressoSystems/HotShot/issues/2236 // Only vote if you has seen the VID share for this view - // Sishan TODO: comment this for now - if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { - } else { - debug!( - "We have not seen the VID share for this view {:?} yet, so we cannot vote.", - proposal.view_number - ); - return false; - } + // if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { + // } else { + // debug!( + // "We have not seen the VID share for this view {:?} yet, so we cannot vote.", + // proposal.view_number + // ); + // return false; + // } // Only vote if you have the DA cert // ED Need to update the view number this is stored under? @@ -313,8 +312,7 @@ impl, A: ConsensusApi + }; if let GeneralConsensusMessage::Vote(vote) = message { - error!( - // Sishan TODO: change to debug level later + debug!( "Sending vote to next quorum leader {:?}", vote.get_view_number() + 1 ); @@ -424,7 +422,7 @@ impl, A: ConsensusApi + match event { HotShotEvent::QuorumProposalRecv(proposal, sender) => { debug!( - "Receved Quorum Propsoal for view {}", + "Received Quorum Proposal for view {}", *proposal.data.view_number ); @@ -964,8 +962,7 @@ impl, A: ConsensusApi + // Add to the storage that we have received the VID disperse for a specific view // TODO: re-enable this when HotShot/the sequencer needs the shares for something // issue: https://github.com/EspressoSystems/HotShot/issues/2236 - // Sishan TODO: comment this for now - self.vid_shares.insert(view, disperse); + // self.vid_shares.insert(view, disperse); } HotShotEvent::ViewChange(new_view) => { debug!("View Change event for view {} in consensus task", *new_view); diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 74bed5c249..5285bdc699 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -196,7 +196,7 @@ async fn test_consensus_vote() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] // TODO: re-enable this when HotShot/the sequencer needs the shares for something // issue: https://github.com/EspressoSystems/HotShot/issues/2236 -// #[ignore] // Sishan TODO: comment this later +#[ignore] async fn test_consensus_with_vid() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::block_types::TestBlockPayload; From 85fde6a714983f906f3dde23201677a2b3d28d58 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 21 Dec 2023 08:36:19 -0800 Subject: [PATCH 0597/1393] solve merge conflicts --- testing/tests/consensus_task.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index ce1359e381..88997e2e0e 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -313,5 +313,5 @@ async fn test_consensus_with_vid() { add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) }; - run_harness(input, output, None, build_fn).await; + run_harness(input, output, None, build_fn, false).await; } From 49da4971b5e6cfb2bba76c6a4c0fecce0b36b949 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 21 Dec 2023 14:10:23 -0500 Subject: [PATCH 0598/1393] send dac after skipped view --- task-impls/src/transactions.rs | 14 ++++++++-- testing/tests/basic.rs | 51 ++++++++++++++++++++++++++++++++-- 2 files changed, 60 insertions(+), 5 deletions(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index bb64562052..625c731cad 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -169,12 +169,15 @@ impl, A: ConsensusApi + return None; } + let mut make_block = false; if *view - *self.cur_view > 1 { - warn!("View changed by more than 1 going to view {:?}", view); + error!("View changed by more than 1 going to view {:?}", view); + make_block = self.membership.get_leader(view) == self.public_key } self.cur_view = view; - if self.membership.get_leader(self.cur_view + 1) != self.public_key { + // return if we aren't the next leader or we skipped last view and aren't the current leader. + if !make_block && self.membership.get_leader(self.cur_view + 1) != self.public_key { return None; } @@ -228,11 +231,16 @@ impl, A: ConsensusApi + }; // send the sequenced transactions to VID and DA tasks + let block_view = if make_block { + view + } else { + view + 1 + }; self.event_stream .publish(HotShotEvent::TransactionsSequenced( encoded_transactions, metadata, - view + 1, + block_view, )) .await; diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 217f6b91e7..60a3eba38b 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -110,8 +110,7 @@ async fn test_with_failures_half_f() { node_changes: vec![(5, dead_nodes)], }; - // TODO: this should only have 3 failures for each down leader, investigate why it fails additional views - metadata.overall_safety_properties.num_failed_views = 8; + metadata.overall_safety_properties.num_failed_views = 3; // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 22; metadata @@ -182,3 +181,51 @@ async fn test_with_failures_f() { .run_test() .await; } + +/// Test that a good leader can succeed in the view directly after view sync +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_with_failures_2() { + use hotshot_testing::{ + node_types::{MemoryImpl, TestTypes}, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestMetadata, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata = TestMetadata::default_more_nodes(); + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 17, + updown: UpDown::Down, + }, + ChangeNode { + idx: 18, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)], + }; + + // 2 nodes fail triggering view sync, expect no other timeouts + metadata.overall_safety_properties.num_failed_views = 2; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 25; + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; +} From d2ea4b0c6f5631b64f31debc1523c56956fbf5b5 Mon Sep 17 00:00:00 2001 From: Rob Date: Thu, 21 Dec 2023 15:34:09 -0500 Subject: [PATCH 0599/1393] fix polling --- task-impls/src/view_sync.rs | 4 ++++ testing/tests/basic.rs | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 8e893c552e..9404ff94ff 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -739,6 +739,10 @@ impl, A: ConsensusApi + *self.next_view ); + self.event_stream + .publish(HotShotEvent::ViewChange(self.next_view-1)) + .await; + self.event_stream .publish(HotShotEvent::ViewChange(self.next_view)) .await; diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 60a3eba38b..78fe3e5c3e 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -191,7 +191,7 @@ async fn test_with_failures_f() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_with_failures_2() { use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, + node_types::{WebImpl, TestTypes}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::TestMetadata, }; @@ -224,7 +224,7 @@ async fn test_with_failures_2() { // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 25; metadata - .gen_launcher::(0) + .gen_launcher::(0) .launch() .run_test() .await; From a38bd8065682b61738eb8055cc775a4439b9839d Mon Sep 17 00:00:00 2001 From: Rob Date: Thu, 21 Dec 2023 15:37:18 -0500 Subject: [PATCH 0600/1393] lint --- task-impls/src/transactions.rs | 8 ++------ task-impls/src/view_sync.rs | 2 +- testing/tests/basic.rs | 2 +- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 625c731cad..7b6df7300a 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -172,7 +172,7 @@ impl, A: ConsensusApi + let mut make_block = false; if *view - *self.cur_view > 1 { error!("View changed by more than 1 going to view {:?}", view); - make_block = self.membership.get_leader(view) == self.public_key + make_block = self.membership.get_leader(view) == self.public_key; } self.cur_view = view; @@ -231,11 +231,7 @@ impl, A: ConsensusApi + }; // send the sequenced transactions to VID and DA tasks - let block_view = if make_block { - view - } else { - view + 1 - }; + let block_view = if make_block { view } else { view + 1 }; self.event_stream .publish(HotShotEvent::TransactionsSequenced( encoded_transactions, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 9404ff94ff..922d6fa025 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -740,7 +740,7 @@ impl, A: ConsensusApi + ); self.event_stream - .publish(HotShotEvent::ViewChange(self.next_view-1)) + .publish(HotShotEvent::ViewChange(self.next_view - 1)) .await; self.event_stream diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 78fe3e5c3e..ab2c768841 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -191,7 +191,7 @@ async fn test_with_failures_f() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_with_failures_2() { use hotshot_testing::{ - node_types::{WebImpl, TestTypes}, + node_types::{TestTypes, WebImpl}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::TestMetadata, }; From 259b99bb1fa513a2b644b209c7bfea9710043b62 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 21 Dec 2023 16:53:45 -0500 Subject: [PATCH 0601/1393] wip --- hotshot/src/tasks/mod.rs | 1 + task-impls/src/consensus.rs | 52 ++--- task-impls/src/events.rs | 1 + task-impls/src/transactions.rs | 8 +- task-impls/src/vid.rs | 1 + task-impls/src/view_sync.rs | 2 +- testing/tests/basic.rs | 357 +++++++++++++++++---------------- testing/tests/network_task.rs | 2 +- testing/tests/vid_task.rs | 2 +- 9 files changed, 215 insertions(+), 211 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 9bf6200255..9ab1fb3063 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -228,6 +228,7 @@ pub async fn add_consensus_task>( vote_collector: None, timeout_vote_collector: None, timeout_task: None, + timeout_cert: None, event_stream: event_stream.clone(), output_event_stream: output_stream, vid_shares: HashMap::new(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 1e7d6564fb..9e5c5be25d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -108,6 +108,9 @@ pub struct ConsensusTaskState< /// timeout task handle pub timeout_task: Option>, + /// last Timeout Certificate this node formed + pub timeout_cert: Option>, + /// Global events stream to publish events pub event_stream: ChannelStream>, @@ -159,7 +162,7 @@ impl, A: ConsensusApi + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] // Check if we are able to vote, like whether the proposal is valid, // whether we have DAC and VID share, and if so, vote. - async fn vote_if_able(&self) -> bool { + async fn vote_if_able(&mut self) -> bool { if !self.quorum_membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", @@ -215,6 +218,8 @@ impl, A: ConsensusApi + &self.public_key, &self.private_key, ); + + self.payload_commitment_and_metadata = None; let message = GeneralConsensusMessage::::Vote(vote); if let GeneralConsensusMessage::Vote(vote) = message { @@ -293,7 +298,7 @@ impl, A: ConsensusApi + && cert.get_data().payload_commit != proposal.block_header.payload_commitment() { - error!("Block payload commitment does not equal parent commitment"); + error!("Block payload commitment does not equal da cert payload commitment. View = {}", *view); return false; } let vote = QuorumVote::::create_signed_vote( @@ -417,7 +422,7 @@ impl, A: ConsensusApi + pub async fn handle_event(&mut self, event: HotShotEvent) { match event { HotShotEvent::QuorumProposalRecv(proposal, sender) => { - debug!( + error!( "Receved Quorum Proposal for view {}", *proposal.data.view_number ); @@ -729,7 +734,7 @@ impl, A: ConsensusApi + "Attempting to publish proposal after voting; now in view: {}", *new_view ); - self.publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) + self.publish_proposal_if_able(qc.view_number + 1, None) .await; } if !self.vote_if_able().await { @@ -839,6 +844,7 @@ impl, A: ConsensusApi + debug!("QC Formed event happened!"); if let either::Right(qc) = cert.clone() { + self.timeout_cert = Some(qc.clone()); // cancel poll for votes self.quorum_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( @@ -846,21 +852,16 @@ impl, A: ConsensusApi + )) .await; - debug!( + error!( "Attempting to publish proposal after forming a TC for view {}", *qc.view_number ); let view = qc.view_number + 1; - let high_qc = self.consensus.read().await.high_qc.clone(); - - if self - .publish_proposal_if_able(high_qc, view, Some(qc.clone())) - .await - { + if self.publish_proposal_if_able(view, Some(qc.clone())).await { } else { - warn!("Wasn't able to publish proposal"); + error!("Wasn't able to publish proposal"); } } if let either::Left(qc) = cert { @@ -881,7 +882,7 @@ impl, A: ConsensusApi + ); if !self - .publish_proposal_if_able(qc.clone(), qc.view_number + 1, None) + .publish_proposal_if_able(qc.view_number + 1, None) .await { debug!( @@ -891,7 +892,7 @@ impl, A: ConsensusApi + } } HotShotEvent::DACRecv(cert) => { - debug!("DAC Recved for view ! {}", *cert.view_number); + error!("DAC Recved for view ! {}", *cert.view_number); let view = cert.view_number; self.quorum_network @@ -1033,13 +1034,15 @@ impl, A: ConsensusApi + let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } - HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, metadata) => { + HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, metadata, view) => { + error!("got commit and meta {:?}", payload_commitment); self.payload_commitment_and_metadata = Some((payload_commitment, metadata)); - let high_qc = self.consensus.read().await.high_qc.clone(); - let leader_view = high_qc.get_view_number() + 1; - if self.quorum_membership.get_leader(leader_view) == self.public_key { - self.publish_proposal_if_able(high_qc, leader_view, None) - .await; + if let Some(proposal) = &self.current_proposal { + if self.quorum_membership.get_leader(view) == self.public_key + && proposal.get_view_number() + 1 == view + { + self.publish_proposal_if_able(view, None).await; + } } } _ => {} @@ -1050,7 +1053,6 @@ impl, A: ConsensusApi + #[allow(clippy::too_many_lines)] pub async fn publish_proposal_if_able( &mut self, - _qc: QuorumCertificate, view: TYPES::Time, timeout_certificate: Option>, ) -> bool { @@ -1150,7 +1152,7 @@ impl, A: ConsensusApi + signature, _pd: PhantomData, }; - debug!( + error!( "Sending proposal for view {:?} \n {:?}", leaf.view_number, "" ); @@ -1163,7 +1165,7 @@ impl, A: ConsensusApi + self.payload_commitment_and_metadata = None; return true; } - debug!("Cannot propose because we don't have the VID payload commitment and metadata"); + error!("Cannot propose because we don't have the VID payload commitment and metadata"); false } } @@ -1210,10 +1212,10 @@ pub fn consensus_event_filter(event: &HotShotEvent) -> b | HotShotEvent::QCFormed(_) | HotShotEvent::DACRecv(_) | HotShotEvent::ViewChange(_) - | HotShotEvent::SendPayloadCommitmentAndMetadata(_, _) + | HotShotEvent::SendPayloadCommitmentAndMetadata(..) | HotShotEvent::Timeout(_) | HotShotEvent::TimeoutVoteRecv(_) - | HotShotEvent::VidDisperseRecv(_, _) + | HotShotEvent::VidDisperseRecv(..) | HotShotEvent::Shutdown, ) } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 9409b34067..4c2d23010b 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -91,6 +91,7 @@ pub enum HotShotEvent { SendPayloadCommitmentAndMetadata( VidCommitment, ::Metadata, + TYPES::Time, ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number TransactionsSequenced( diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 625c731cad..7b6df7300a 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -172,7 +172,7 @@ impl, A: ConsensusApi + let mut make_block = false; if *view - *self.cur_view > 1 { error!("View changed by more than 1 going to view {:?}", view); - make_block = self.membership.get_leader(view) == self.public_key + make_block = self.membership.get_leader(view) == self.public_key; } self.cur_view = view; @@ -231,11 +231,7 @@ impl, A: ConsensusApi + }; // send the sequenced transactions to VID and DA tasks - let block_view = if make_block { - view - } else { - view + 1 - }; + let block_view = if make_block { view } else { view + 1 }; self.event_stream .publish(HotShotEvent::TransactionsSequenced( encoded_transactions, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 47c3f2e783..7607b7af57 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -98,6 +98,7 @@ impl, A: ConsensusApi + .publish(HotShotEvent::SendPayloadCommitmentAndMetadata( vid_disperse.commit, metadata, + view_number, )) .await; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 9404ff94ff..922d6fa025 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -740,7 +740,7 @@ impl, A: ConsensusApi + ); self.event_stream - .publish(HotShotEvent::ViewChange(self.next_view-1)) + .publish(HotShotEvent::ViewChange(self.next_view - 1)) .await; self.event_stream diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 78fe3e5c3e..378afb3883 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -1,186 +1,186 @@ -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_success() { - use hotshot_testing::{ - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{MemoryImpl, TestTypes}, - test_builder::TestMetadata, - }; - use std::time::Duration; +// #[cfg(test)] +// #[cfg_attr( +// async_executor_impl = "tokio", +// tokio::test(flavor = "multi_thread", worker_threads = 2) +// )] +// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +// async fn test_success() { +// use hotshot_testing::{ +// completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, +// node_types::{MemoryImpl, TestTypes}, +// test_builder::TestMetadata, +// }; +// use std::time::Duration; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - ..TestMetadata::default() - }; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} +// async_compatibility_layer::logging::setup_logging(); +// async_compatibility_layer::logging::setup_backtrace(); +// let metadata = TestMetadata { +// // allow more time to pass in CI +// completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( +// TimeBasedCompletionTaskDescription { +// duration: Duration::from_secs(60), +// }, +// ), +// ..TestMetadata::default() +// }; +// metadata +// .gen_launcher::(0) +// .launch() +// .run_test() +// .await; +// } -/// Test one node leaving the network. -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_with_failures_one() { - use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::TestMetadata, - }; +// /// Test one node leaving the network. +// #[cfg(test)] +// #[cfg_attr( +// async_executor_impl = "tokio", +// tokio::test(flavor = "multi_thread", worker_threads = 2) +// )] +// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +// async fn test_with_failures_one() { +// use hotshot_testing::{ +// node_types::{MemoryImpl, TestTypes}, +// spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, +// test_builder::TestMetadata, +// }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes(); - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ChangeNode { - idx: 19, - updown: UpDown::Down, - }]; +// async_compatibility_layer::logging::setup_logging(); +// async_compatibility_layer::logging::setup_backtrace(); +// let mut metadata = TestMetadata::default_more_nodes(); +// // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the +// // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the +// // following issue. +// // TODO: Update message broadcasting to avoid hanging +// // +// let dead_nodes = vec![ChangeNode { +// idx: 19, +// updown: UpDown::Down, +// }]; - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], - }; - metadata.overall_safety_properties.num_failed_views = 3; - metadata.overall_safety_properties.num_successful_views = 25; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} +// metadata.spinning_properties = SpinningTaskDescription { +// node_changes: vec![(5, dead_nodes)], +// }; +// metadata.overall_safety_properties.num_failed_views = 3; +// metadata.overall_safety_properties.num_successful_views = 25; +// metadata +// .gen_launcher::(0) +// .launch() +// .run_test() +// .await; +// } -/// Test f/2 nodes leaving the network. -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_with_failures_half_f() { - use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::TestMetadata, - }; +// /// Test f/2 nodes leaving the network. +// #[cfg(test)] +// #[cfg_attr( +// async_executor_impl = "tokio", +// tokio::test(flavor = "multi_thread", worker_threads = 2) +// )] +// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +// async fn test_with_failures_half_f() { +// use hotshot_testing::{ +// node_types::{MemoryImpl, TestTypes}, +// spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, +// test_builder::TestMetadata, +// }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes(); - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ - ChangeNode { - idx: 17, - updown: UpDown::Down, - }, - ChangeNode { - idx: 18, - updown: UpDown::Down, - }, - ChangeNode { - idx: 19, - updown: UpDown::Down, - }, - ]; +// async_compatibility_layer::logging::setup_logging(); +// async_compatibility_layer::logging::setup_backtrace(); +// let mut metadata = TestMetadata::default_more_nodes(); +// // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the +// // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the +// // following issue. +// // TODO: Update message broadcasting to avoid hanging +// // +// let dead_nodes = vec![ +// ChangeNode { +// idx: 17, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 18, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 19, +// updown: UpDown::Down, +// }, +// ]; - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], - }; +// metadata.spinning_properties = SpinningTaskDescription { +// node_changes: vec![(5, dead_nodes)], +// }; - metadata.overall_safety_properties.num_failed_views = 3; - // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 22; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} +// metadata.overall_safety_properties.num_failed_views = 3; +// // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts +// metadata.overall_safety_properties.num_successful_views = 22; +// metadata +// .gen_launcher::(0) +// .launch() +// .run_test() +// .await; +// } -/// Test f nodes leaving the network. -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_with_failures_f() { - use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::TestMetadata, - }; +// /// Test f nodes leaving the network. +// #[cfg(test)] +// #[cfg_attr( +// async_executor_impl = "tokio", +// tokio::test(flavor = "multi_thread", worker_threads = 2) +// )] +// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +// async fn test_with_failures_f() { +// use hotshot_testing::{ +// node_types::{MemoryImpl, TestTypes}, +// spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, +// test_builder::TestMetadata, +// }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes(); - metadata.overall_safety_properties.num_failed_views = 6; - // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 22; - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ - ChangeNode { - idx: 14, - updown: UpDown::Down, - }, - ChangeNode { - idx: 15, - updown: UpDown::Down, - }, - ChangeNode { - idx: 16, - updown: UpDown::Down, - }, - ChangeNode { - idx: 17, - updown: UpDown::Down, - }, - ChangeNode { - idx: 18, - updown: UpDown::Down, - }, - ChangeNode { - idx: 19, - updown: UpDown::Down, - }, - ]; +// async_compatibility_layer::logging::setup_logging(); +// async_compatibility_layer::logging::setup_backtrace(); +// let mut metadata = TestMetadata::default_more_nodes(); +// metadata.overall_safety_properties.num_failed_views = 6; +// // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts +// metadata.overall_safety_properties.num_successful_views = 22; +// // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the +// // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the +// // following issue. +// // TODO: Update message broadcasting to avoid hanging +// // +// let dead_nodes = vec![ +// ChangeNode { +// idx: 14, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 15, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 16, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 17, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 18, +// updown: UpDown::Down, +// }, +// ChangeNode { +// idx: 19, +// updown: UpDown::Down, +// }, +// ]; - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], - }; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} +// metadata.spinning_properties = SpinningTaskDescription { +// node_changes: vec![(5, dead_nodes)], +// }; +// metadata +// .gen_launcher::(0) +// .launch() +// .run_test() +// .await; +// } /// Test that a good leader can succeed in the view directly after view sync #[cfg(test)] @@ -191,7 +191,7 @@ async fn test_with_failures_f() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_with_failures_2() { use hotshot_testing::{ - node_types::{WebImpl, TestTypes}, + node_types::{MemoryImpl, TestTypes, WebImpl}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::TestMetadata, }; @@ -199,6 +199,9 @@ async fn test_with_failures_2() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let mut metadata = TestMetadata::default_more_nodes(); + metadata.total_nodes = 12; + metadata.da_committee_size = 12; + metadata.start_nodes = 12; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -206,11 +209,11 @@ async fn test_with_failures_2() { // let dead_nodes = vec![ ChangeNode { - idx: 17, + idx: 10, updown: UpDown::Down, }, ChangeNode { - idx: 18, + idx: 11, updown: UpDown::Down, }, ]; @@ -222,9 +225,9 @@ async fn test_with_failures_2() { // 2 nodes fail triggering view sync, expect no other timeouts metadata.overall_safety_properties.num_failed_views = 2; // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 25; + metadata.overall_safety_properties.num_successful_views = 15; metadata - .gen_launcher::(0) + .gen_launcher::(0) .launch() .run_test() .await; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 5fe96444a0..20d3b964b0 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -129,7 +129,7 @@ async fn test_network_task() { ); output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 2); output.insert( - HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), + HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), 2, // 2 occurrences: both from the VID task ); output.insert( diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index bdfd595487..eb435e69ad 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -99,7 +99,7 @@ async fn test_vid_task() { ); output.insert( - HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, ()), + HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), 1, ); output.insert( From cbdc95db9166f444f873085b3b7460c619e261f6 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 21 Dec 2023 17:00:43 -0500 Subject: [PATCH 0602/1393] it works? --- task-impls/src/consensus.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9e5c5be25d..7af0770a05 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1044,6 +1044,11 @@ impl, A: ConsensusApi + self.publish_proposal_if_able(view, None).await; } } + if let Some(tc) = &self.timeout_cert { + if self.quorum_membership.get_leader(tc.get_view_number() + 1) == self.public_key { + self.publish_proposal_if_able(view, self.timeout_cert.clone()).await; + } + } } _ => {} } @@ -1147,6 +1152,7 @@ impl, A: ConsensusApi + proposer_id: leaf.proposer_id, }; + self.timeout_cert = None; let message = Proposal { data: proposal, signature, From 9c930bef898c39607466d419e1c0d33bf5efeed0 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 21 Dec 2023 17:06:15 -0500 Subject: [PATCH 0603/1393] now it works? --- task-impls/src/consensus.rs | 10 ++++------ testing/tests/basic.rs | 2 +- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7af0770a05..b074179a10 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1037,12 +1037,10 @@ impl, A: ConsensusApi + HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, metadata, view) => { error!("got commit and meta {:?}", payload_commitment); self.payload_commitment_and_metadata = Some((payload_commitment, metadata)); - if let Some(proposal) = &self.current_proposal { - if self.quorum_membership.get_leader(view) == self.public_key - && proposal.get_view_number() + 1 == view - { - self.publish_proposal_if_able(view, None).await; - } + if self.quorum_membership.get_leader(view) == self.public_key + && self.consensus.read().await.high_qc.get_view_number() == view + { + self.publish_proposal_if_able(view, None).await; } if let Some(tc) = &self.timeout_cert { if self.quorum_membership.get_leader(tc.get_view_number() + 1) == self.public_key { diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 378afb3883..cac98b9c36 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -227,7 +227,7 @@ async fn test_with_failures_2() { // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 15; metadata - .gen_launcher::(0) + .gen_launcher::(0) .launch() .run_test() .await; From 727cca7d9eadfddbecc4ce350ed7389396654056 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 21 Dec 2023 17:26:28 -0500 Subject: [PATCH 0604/1393] revert logging changes, add memory test --- task-impls/src/consensus.rs | 19 +- testing/tests/basic.rs | 419 ++++++++++++++++++++---------------- 2 files changed, 246 insertions(+), 192 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index b074179a10..a5b767d66d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -852,7 +852,7 @@ impl, A: ConsensusApi + )) .await; - error!( + debug!( "Attempting to publish proposal after forming a TC for view {}", *qc.view_number ); @@ -861,7 +861,7 @@ impl, A: ConsensusApi + if self.publish_proposal_if_able(view, Some(qc.clone())).await { } else { - error!("Wasn't able to publish proposal"); + warn!("Wasn't able to publish proposal"); } } if let either::Left(qc) = cert { @@ -892,7 +892,7 @@ impl, A: ConsensusApi + } } HotShotEvent::DACRecv(cert) => { - error!("DAC Recved for view ! {}", *cert.view_number); + debug!("DAC Recved for view ! {}", *cert.view_number); let view = cert.view_number; self.quorum_network @@ -1035,7 +1035,7 @@ impl, A: ConsensusApi + consensus.metrics.number_of_timeouts.add(1); } HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, metadata, view) => { - error!("got commit and meta {:?}", payload_commitment); + debug!("got commit and meta {:?}", payload_commitment); self.payload_commitment_and_metadata = Some((payload_commitment, metadata)); if self.quorum_membership.get_leader(view) == self.public_key && self.consensus.read().await.high_qc.get_view_number() == view @@ -1043,8 +1043,11 @@ impl, A: ConsensusApi + self.publish_proposal_if_able(view, None).await; } if let Some(tc) = &self.timeout_cert { - if self.quorum_membership.get_leader(tc.get_view_number() + 1) == self.public_key { - self.publish_proposal_if_able(view, self.timeout_cert.clone()).await; + if self.quorum_membership.get_leader(tc.get_view_number() + 1) + == self.public_key + { + self.publish_proposal_if_able(view, self.timeout_cert.clone()) + .await; } } } @@ -1156,7 +1159,7 @@ impl, A: ConsensusApi + signature, _pd: PhantomData, }; - error!( + debug!( "Sending proposal for view {:?} \n {:?}", leaf.view_number, "" ); @@ -1169,7 +1172,7 @@ impl, A: ConsensusApi + self.payload_commitment_and_metadata = None; return true; } - error!("Cannot propose because we don't have the VID payload commitment and metadata"); + debug!("Cannot propose because we don't have the VID payload commitment and metadata"); false } } diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index cac98b9c36..0a1cdae806 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -1,186 +1,186 @@ -// #[cfg(test)] -// #[cfg_attr( -// async_executor_impl = "tokio", -// tokio::test(flavor = "multi_thread", worker_threads = 2) -// )] -// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -// async fn test_success() { -// use hotshot_testing::{ -// completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, -// node_types::{MemoryImpl, TestTypes}, -// test_builder::TestMetadata, -// }; -// use std::time::Duration; - -// async_compatibility_layer::logging::setup_logging(); -// async_compatibility_layer::logging::setup_backtrace(); -// let metadata = TestMetadata { -// // allow more time to pass in CI -// completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( -// TimeBasedCompletionTaskDescription { -// duration: Duration::from_secs(60), -// }, -// ), -// ..TestMetadata::default() -// }; -// metadata -// .gen_launcher::(0) -// .launch() -// .run_test() -// .await; -// } - -// /// Test one node leaving the network. -// #[cfg(test)] -// #[cfg_attr( -// async_executor_impl = "tokio", -// tokio::test(flavor = "multi_thread", worker_threads = 2) -// )] -// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -// async fn test_with_failures_one() { -// use hotshot_testing::{ -// node_types::{MemoryImpl, TestTypes}, -// spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, -// test_builder::TestMetadata, -// }; - -// async_compatibility_layer::logging::setup_logging(); -// async_compatibility_layer::logging::setup_backtrace(); -// let mut metadata = TestMetadata::default_more_nodes(); -// // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the -// // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the -// // following issue. -// // TODO: Update message broadcasting to avoid hanging -// // -// let dead_nodes = vec![ChangeNode { -// idx: 19, -// updown: UpDown::Down, -// }]; - -// metadata.spinning_properties = SpinningTaskDescription { -// node_changes: vec![(5, dead_nodes)], -// }; -// metadata.overall_safety_properties.num_failed_views = 3; -// metadata.overall_safety_properties.num_successful_views = 25; -// metadata -// .gen_launcher::(0) -// .launch() -// .run_test() -// .await; -// } - -// /// Test f/2 nodes leaving the network. -// #[cfg(test)] -// #[cfg_attr( -// async_executor_impl = "tokio", -// tokio::test(flavor = "multi_thread", worker_threads = 2) -// )] -// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -// async fn test_with_failures_half_f() { -// use hotshot_testing::{ -// node_types::{MemoryImpl, TestTypes}, -// spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, -// test_builder::TestMetadata, -// }; - -// async_compatibility_layer::logging::setup_logging(); -// async_compatibility_layer::logging::setup_backtrace(); -// let mut metadata = TestMetadata::default_more_nodes(); -// // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the -// // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the -// // following issue. -// // TODO: Update message broadcasting to avoid hanging -// // -// let dead_nodes = vec![ -// ChangeNode { -// idx: 17, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 18, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 19, -// updown: UpDown::Down, -// }, -// ]; - -// metadata.spinning_properties = SpinningTaskDescription { -// node_changes: vec![(5, dead_nodes)], -// }; - -// metadata.overall_safety_properties.num_failed_views = 3; -// // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts -// metadata.overall_safety_properties.num_successful_views = 22; -// metadata -// .gen_launcher::(0) -// .launch() -// .run_test() -// .await; -// } - -// /// Test f nodes leaving the network. -// #[cfg(test)] -// #[cfg_attr( -// async_executor_impl = "tokio", -// tokio::test(flavor = "multi_thread", worker_threads = 2) -// )] -// #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -// async fn test_with_failures_f() { -// use hotshot_testing::{ -// node_types::{MemoryImpl, TestTypes}, -// spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, -// test_builder::TestMetadata, -// }; - -// async_compatibility_layer::logging::setup_logging(); -// async_compatibility_layer::logging::setup_backtrace(); -// let mut metadata = TestMetadata::default_more_nodes(); -// metadata.overall_safety_properties.num_failed_views = 6; -// // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts -// metadata.overall_safety_properties.num_successful_views = 22; -// // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the -// // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the -// // following issue. -// // TODO: Update message broadcasting to avoid hanging -// // -// let dead_nodes = vec![ -// ChangeNode { -// idx: 14, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 15, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 16, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 17, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 18, -// updown: UpDown::Down, -// }, -// ChangeNode { -// idx: 19, -// updown: UpDown::Down, -// }, -// ]; - -// metadata.spinning_properties = SpinningTaskDescription { -// node_changes: vec![(5, dead_nodes)], -// }; -// metadata -// .gen_launcher::(0) -// .launch() -// .run_test() -// .await; -// } +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_success() { + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + node_types::{MemoryImpl, TestTypes}, + test_builder::TestMetadata, + }; + use std::time::Duration; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + ..TestMetadata::default() + }; + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; +} + +/// Test one node leaving the network. +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_with_failures_one() { + use hotshot_testing::{ + node_types::{MemoryImpl, TestTypes}, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestMetadata, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata = TestMetadata::default_more_nodes(); + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ChangeNode { + idx: 19, + updown: UpDown::Down, + }]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)], + }; + metadata.overall_safety_properties.num_failed_views = 3; + metadata.overall_safety_properties.num_successful_views = 25; + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; +} + +/// Test f/2 nodes leaving the network. +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_with_failures_half_f() { + use hotshot_testing::{ + node_types::{MemoryImpl, TestTypes}, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestMetadata, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata = TestMetadata::default_more_nodes(); + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 17, + updown: UpDown::Down, + }, + ChangeNode { + idx: 18, + updown: UpDown::Down, + }, + ChangeNode { + idx: 19, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)], + }; + + metadata.overall_safety_properties.num_failed_views = 3; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 22; + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; +} + +/// Test f nodes leaving the network. +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_with_failures_f() { + use hotshot_testing::{ + node_types::{MemoryImpl, TestTypes}, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestMetadata, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata = TestMetadata::default_more_nodes(); + metadata.overall_safety_properties.num_failed_views = 6; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 22; + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 14, + updown: UpDown::Down, + }, + ChangeNode { + idx: 15, + updown: UpDown::Down, + }, + ChangeNode { + idx: 16, + updown: UpDown::Down, + }, + ChangeNode { + idx: 17, + updown: UpDown::Down, + }, + ChangeNode { + idx: 18, + updown: UpDown::Down, + }, + ChangeNode { + idx: 19, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)], + }; + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; +} /// Test that a good leader can succeed in the view directly after view sync #[cfg(test)] @@ -191,7 +191,58 @@ #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_with_failures_2() { use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes, WebImpl}, + node_types::{MemoryImpl, TestTypes}, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestMetadata, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata = TestMetadata::default_more_nodes(); + metadata.total_nodes = 12; + metadata.da_committee_size = 12; + metadata.start_nodes = 12; + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 10, + updown: UpDown::Down, + }, + ChangeNode { + idx: 11, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)], + }; + + // 2 nodes fail triggering view sync, expect no other timeouts + metadata.overall_safety_properties.num_failed_views = 2; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 15; + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; +} + +/// Test that a good leader can succeed in the view directly after view sync +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_with_failures_2_web() { + use hotshot_testing::{ + node_types::{TestTypes, WebImpl}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::TestMetadata, }; From 76f234587eb400f7284ab2d924466b452bbb681b Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 21 Dec 2023 17:50:26 -0500 Subject: [PATCH 0605/1393] fix logging again --- task-impls/src/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a5b767d66d..2a2afe834f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -422,7 +422,7 @@ impl, A: ConsensusApi + pub async fn handle_event(&mut self, event: HotShotEvent) { match event { HotShotEvent::QuorumProposalRecv(proposal, sender) => { - error!( + debug!( "Receved Quorum Proposal for view {}", *proposal.data.view_number ); From 0c00c53a056f8b2c118e278eb4cbe62951999c82 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 21 Dec 2023 15:27:53 -0800 Subject: [PATCH 0606/1393] wrap() to expect() for vote creation in test_view_sync_task --- testing/tests/view_sync_task.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index c740e07600..a1ec3c3630 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -35,7 +35,7 @@ async fn test_view_sync_task() { hotshot_types::traits::consensus_api::ConsensusSharedApi::public_key(&api), hotshot_types::traits::consensus_api::ConsensusSharedApi::private_key(&api), ) - .unwrap(); + .expect("Should be able to create a ViewSyncPreCommitVote."); tracing::error!("Vote in test is {:?}", vote.clone()); From 848bd97a273ae3e47f7b9a453475dff8380f92a4 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 21 Dec 2023 16:35:36 -0800 Subject: [PATCH 0607/1393] fully remove EncodedSignature --- types/src/traits/signature_key.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index e21587675f..25ebfdd7d2 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -22,17 +22,6 @@ use tagged_base64::tagged; )] pub struct EncodedPublicKey(#[debug(with = "custom_debug::hexbuf")] pub Vec); -/// Type saftey wrapper for byte encoded signature -#[derive( - Clone, custom_debug::Debug, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, -)] -pub struct EncodedSignature(#[debug(with = "custom_debug::hexbuf")] pub Vec); - -impl AsRef<[u8]> for EncodedSignature { - fn as_ref(&self) -> &[u8] { - self.0.as_slice() - } -} /// Type representing stake table entries in a `StakeTable` pub trait StakeTableEntryType { /// Get the stake value From cf4f91816b98beea7c2cd34530efae0848b9812d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 22 Dec 2023 10:26:20 -0500 Subject: [PATCH 0608/1393] Check if genesis before clearing payload and metadata --- task-impls/src/consensus.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 2c227164d6..9be47e7280 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -24,7 +24,7 @@ use hotshot_types::{ simple_certificate::{QuorumCertificate, TimeoutCertificate}, simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, traits::{ - block_contents::BlockHeader, + block_contents::{vid_commitment, BlockHeader}, consensus_api::ConsensusApi, election::Membership, network::{CommunicationChannel, ConsensusIntentEvent}, @@ -224,8 +224,17 @@ impl, A: ConsensusApi + &self.public_key, &self.private_key, ); - - self.payload_commitment_and_metadata = None; + if let Some((payload_commit, meta)) = &self.payload_commitment_and_metadata { + let (genesis_payload, genesis_meta) = + ::genesis(); + let genesis_commitment = vid_commitment( + &genesis_payload.encode().unwrap().collect(), + self.quorum_membership.total_nodes(), + ); + if meta == &genesis_meta && payload_commit == &genesis_commitment { + self.payload_commitment_and_metadata = None; + } + } let message = GeneralConsensusMessage::::Vote(vote); if let GeneralConsensusMessage::Vote(vote) = message { From 7a94cf8663690e045cf01d12d3bec63f516d3e49 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 22 Dec 2023 10:52:58 -0500 Subject: [PATCH 0609/1393] move vid calc after vote send --- task-impls/src/consensus.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9be47e7280..aa455154d0 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -224,17 +224,7 @@ impl, A: ConsensusApi + &self.public_key, &self.private_key, ); - if let Some((payload_commit, meta)) = &self.payload_commitment_and_metadata { - let (genesis_payload, genesis_meta) = - ::genesis(); - let genesis_commitment = vid_commitment( - &genesis_payload.encode().unwrap().collect(), - self.quorum_membership.total_nodes(), - ); - if meta == &genesis_meta && payload_commit == &genesis_commitment { - self.payload_commitment_and_metadata = None; - } - } + let message = GeneralConsensusMessage::::Vote(vote); if let GeneralConsensusMessage::Vote(vote) = message { @@ -245,6 +235,17 @@ impl, A: ConsensusApi + self.event_stream .publish(HotShotEvent::QuorumVoteSend(vote)) .await; + if let Some((payload_commit, meta)) = &self.payload_commitment_and_metadata { + let (genesis_payload, genesis_meta) = + ::genesis(); + let genesis_commitment = vid_commitment( + &genesis_payload.encode().unwrap().collect(), + self.quorum_membership.total_nodes(), + ); + if meta == &genesis_meta && payload_commit == &genesis_commitment { + self.payload_commitment_and_metadata = None; + } + } return true; } } From c2647b45d1cbc9e8ac91fc9410508bbdedcb6e9f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 22 Dec 2023 11:42:05 -0500 Subject: [PATCH 0610/1393] add is_genesis flag to CommitmentAndMetadata --- hotshot/src/tasks/mod.rs | 10 ++++++++-- task-impls/src/consensus.rs | 33 +++++++++++++++++++-------------- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 3c7c43da2f..50f89fd79f 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -12,7 +12,9 @@ use hotshot_task::{ GeneratedStream, Merge, }; use hotshot_task_impls::{ - consensus::{consensus_event_filter, ConsensusTaskState, ConsensusTaskTypes}, + consensus::{ + consensus_event_filter, CommitmentAndMetadata, ConsensusTaskState, ConsensusTaskTypes, + }, da::{DATaskState, DATaskTypes}, events::HotShotEvent, network::{ @@ -222,7 +224,11 @@ pub async fn add_consensus_task>( consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), - payload_commitment_and_metadata: Some((payload_commitment, metadata)), + payload_commitment_and_metadata: Some(CommitmentAndMetadata { + commitment: payload_commitment, + metadata, + is_genesis: true, + }), api: c_api.clone(), _pd: PhantomData, vote_collector: None.into(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index aa455154d0..2609e86417 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -24,7 +24,7 @@ use hotshot_types::{ simple_certificate::{QuorumCertificate, TimeoutCertificate}, simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, traits::{ - block_contents::{vid_commitment, BlockHeader}, + block_contents::BlockHeader, consensus_api::ConsensusApi, election::Membership, network::{CommunicationChannel, ConsensusIntentEvent}, @@ -54,7 +54,14 @@ use tracing::{debug, error, info, instrument}; pub struct ConsensusTaskError {} /// Alias for the block payload commitment and the associated metadata. -type CommitmentAndMetadata = (VidCommitment, ::Metadata); +pub struct CommitmentAndMetadata { + /// Vid Commitment + pub commitment: VidCommitment, + /// Metadata for the block payload + pub metadata: ::Metadata, + /// Flag for if this data represents the genesis block + pub is_genesis: bool, +} /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; @@ -235,14 +242,8 @@ impl, A: ConsensusApi + self.event_stream .publish(HotShotEvent::QuorumVoteSend(vote)) .await; - if let Some((payload_commit, meta)) = &self.payload_commitment_and_metadata { - let (genesis_payload, genesis_meta) = - ::genesis(); - let genesis_commitment = vid_commitment( - &genesis_payload.encode().unwrap().collect(), - self.quorum_membership.total_nodes(), - ); - if meta == &genesis_meta && payload_commit == &genesis_commitment { + if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { + if commit_and_metadata.is_genesis { self.payload_commitment_and_metadata = None; } } @@ -1048,7 +1049,11 @@ impl, A: ConsensusApi + } HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, metadata, view) => { debug!("got commit and meta {:?}", payload_commitment); - self.payload_commitment_and_metadata = Some((payload_commitment, metadata)); + self.payload_commitment_and_metadata = Some(CommitmentAndMetadata { + commitment: payload_commitment, + metadata, + is_genesis: false, + }); if self.quorum_membership.get_leader(view) == self.public_key && self.consensus.read().await.high_qc.get_view_number() == view { @@ -1139,14 +1144,14 @@ impl, A: ConsensusApi + // TODO do some sort of sanity check on the view number that it matches decided } - if let Some((payload_commitment, metadata)) = &self.payload_commitment_and_metadata { + if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { let leaf = Leaf { view_number: view, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), block_header: TYPES::BlockHeader::new( - *payload_commitment, - metadata.clone(), + commit_and_metadata.commitment, + commit_and_metadata.metadata.clone(), &parent_header, ), block_payload: None, From d6fb13bbd201f647d0d9682543c4bd396fe6cc53 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 22 Dec 2023 15:17:50 -0500 Subject: [PATCH 0611/1393] fix proposal logic (#2277) --- task-impls/src/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 2609e86417..05e7b9bf2f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1055,7 +1055,7 @@ impl, A: ConsensusApi + is_genesis: false, }); if self.quorum_membership.get_leader(view) == self.public_key - && self.consensus.read().await.high_qc.get_view_number() == view + && self.consensus.read().await.high_qc.get_view_number() + 1 == view { self.publish_proposal_if_able(view, None).await; } From e58c1c970701b86ad5ee75bf87328733e962c688 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 29 Dec 2023 14:08:06 -0500 Subject: [PATCH 0612/1393] fix clippy lints (#2280) --- hotshot/examples/infra/mod.rs | 2 +- hotshot/src/traits/networking.rs | 3 +-- .../src/traits/networking/combined_network.rs | 24 +++++++++---------- .../src/network/behaviours/dht/cache.rs | 16 ++++++------- libp2p-networking/src/network/node.rs | 4 +--- libp2p-networking/tests/common/mod.rs | 2 +- libp2p-networking/tests/counter.rs | 8 +++---- 7 files changed, 28 insertions(+), 31 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index ad3407b951..4d58de6f57 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -413,7 +413,7 @@ pub trait RunDA< block_size, } => { // this might be a obob - if let Some(leaf) = leaf_chain.get(0) { + if let Some(leaf) = leaf_chain.first() { info!("Decide event for leaf: {}", *leaf.view_number); let new_anchor = leaf.view_number; diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index dfc2100b74..f372f1c111 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -17,8 +17,7 @@ use std::{ use custom_debug::Debug; use hotshot_types::traits::metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}; pub use hotshot_types::traits::network::{ - ChannelSendSnafu, CouldNotDeliverSnafu, FailedToDeserializeSnafu, FailedToSerializeSnafu, - NetworkError, NetworkReliability, NoSuchNodeSnafu, ShutDownSnafu, + FailedToSerializeSnafu, NetworkError, NetworkReliability, }; /// Contains several `NetworkingMetrics` that we're interested in from the networking interfaces diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 301f2ca89c..d5a0c0ae73 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -44,7 +44,7 @@ struct Cache { /// The maximum number of items to store in the cache capacity: usize, /// The cache itself - cache: HashSet, + inner: HashSet, /// The hashes of the messages in the cache, in order of insertion hashes: Vec, } @@ -54,14 +54,14 @@ impl Cache { fn new(capacity: usize) -> Self { Self { capacity, - cache: HashSet::with_capacity(capacity), + inner: HashSet::with_capacity(capacity), hashes: Vec::with_capacity(capacity), } } /// Insert a hash into the cache fn insert(&mut self, hash: u64) { - if self.cache.contains(&hash) { + if self.inner.contains(&hash) { return; } @@ -70,23 +70,23 @@ impl Cache { if over > 0 { for _ in 0..over { let hash = self.hashes.remove(0); - self.cache.remove(&hash); + self.inner.remove(&hash); } } - self.cache.insert(hash); + self.inner.insert(hash); self.hashes.push(hash); } /// Check if the cache contains a hash fn contains(&self, hash: u64) -> bool { - self.cache.contains(&hash) + self.inner.contains(&hash) } /// Get the number of items in the cache #[cfg(test)] fn len(&self) -> usize { - self.cache.len() + self.inner.len() } } @@ -393,12 +393,12 @@ mod test { cache.insert(2); cache.insert(3); cache.insert(4); - assert_eq!(cache.cache.len(), 3); + assert_eq!(cache.inner.len(), 3); assert_eq!(cache.hashes.len(), 3); - assert!(!cache.cache.contains(&1)); - assert!(cache.cache.contains(&2)); - assert!(cache.cache.contains(&3)); - assert!(cache.cache.contains(&4)); + assert!(!cache.inner.contains(&1)); + assert!(cache.inner.contains(&2)); + assert!(cache.inner.contains(&3)); + assert!(cache.inner.contains(&4)); assert!(!cache.hashes.contains(&1)); assert!(cache.hashes.contains(&2)); assert!(cache.hashes.contains(&3)); diff --git a/libp2p-networking/src/network/behaviours/dht/cache.rs b/libp2p-networking/src/network/behaviours/dht/cache.rs index 602bb41e16..a278984222 100644 --- a/libp2p-networking/src/network/behaviours/dht/cache.rs +++ b/libp2p-networking/src/network/behaviours/dht/cache.rs @@ -65,7 +65,7 @@ pub struct Cache { config: Config, /// the cache for records (key -> value) - cache: Arc, Vec>>, + inner: Arc, Vec>>, /// the expiries for the dht cache, in order (expiry time -> key) expiries: Arc>>>, @@ -76,7 +76,7 @@ pub struct Cache { impl Cache { pub async fn new(config: Config) -> Self { let cache = Self { - cache: Arc::new(DashMap::new()), + inner: Arc::new(DashMap::new()), expiries: Arc::new(RwLock::new(BTreeMap::new())), config, disk_parity_delta: Arc::new(AtomicU32::new(0)), @@ -102,7 +102,7 @@ impl Cache { let now = SystemTime::now(); for (expiry, (key, value)) in cache { if now < expiry { - self.cache.insert(key.clone(), value); + self.inner.insert(key.clone(), value); self.expiries.write().await.insert(expiry, key); } } @@ -120,7 +120,7 @@ impl Cache { let mut cache_to_write = HashMap::new(); let expiries = self.expiries.read().await; for (expiry, key) in &*expiries { - if let Some(entry) = self.cache.get(key) { + if let Some(entry) = self.inner.get(key) { cache_to_write.insert(expiry, (key, entry.value().clone())); } else { tracing::warn!("key not found in cache: {:?}", key); @@ -149,7 +149,7 @@ impl Cache { while let Some((expires, key)) = expiries.pop_first() { if now > expires { - self.cache.remove(&key); + self.inner.remove(&key); removed += 1; } else { expiries.insert(expires, key); @@ -168,12 +168,12 @@ impl Cache { self.save_if_necessary().await; // get - self.cache.get(key) + self.inner.get(key) } pub async fn insert(&self, key: Vec, value: Vec) { // insert into cache and expiries - self.cache.insert(key.clone(), value); + self.inner.insert(key.clone(), value); self.expiries .write() .await @@ -241,7 +241,7 @@ mod test { // check that the cache and expiries are empty assert!(cache.expiries.read().await.is_empty()); - assert!(cache.cache.is_empty()); + assert!(cache.inner.is_empty()); } /// cache add test diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index e4c44ca401..c42530a562 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -5,9 +5,7 @@ pub use self::{ config::{ MeshParams, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, }, - handle::{ - network_node_handle_error, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, - }, + handle::{network_node_handle_error, NetworkNodeHandle, NetworkNodeHandleError}, }; use super::{ diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index ca9e967319..067fa62cbd 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -190,7 +190,7 @@ pub async fn spin_up_swarms( .collect::>() ); - for (_idx, handle) in handles[0..num_of_nodes].iter().enumerate() { + for handle in handles[0..num_of_nodes].iter() { let to_share = bootstrap_addrs.clone(); handle .add_known_peers( diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index bb803cee02..7cfab40473 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -371,15 +371,15 @@ async fn run_request_response_increment_all( requestee_handle.modify_state(|s| *s += 1).await; info!("RR REQUESTEE IS {:?}", requestee_handle.peer_id()); let mut futs = Vec::new(); - for (_i, h) in handles.iter().enumerate() { - if h.lookup_pid(requestee_handle.peer_id()).await.is_err() { + for handle in handles.iter() { + if handle.lookup_pid(requestee_handle.peer_id()).await.is_err() { error!("ERROR LOOKING UP REQUESTEE ADDRS"); } // NOTE uncomment if debugging // let _ = h.print_routing_table().await; // skip `requestee_handle` - if h.peer_id() != requestee_handle.peer_id() { - let requester_handle = h.clone(); + if handle.peer_id() != requestee_handle.peer_id() { + let requester_handle = handle.clone(); futs.push(run_request_response_increment( requester_handle, requestee_handle.clone(), From ac5a7064674233d820f71eb60f09c94f72d7382b Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Fri, 29 Dec 2023 15:58:22 -0500 Subject: [PATCH 0613/1393] feat: move the testing crate --- testing-macros/Cargo.toml | 40 ++ testing-macros/src/lib.rs | 610 ++++++++++++++++++ testing-macros/tests/cross/failures.rs | 69 ++ testing-macros/tests/cross/random_tests.rs | 211 ++++++ testing-macros/tests/cross/smoke.rs | 29 + testing-macros/tests/integration/failures.rs | 75 +++ .../tests/integration/random_tests.rs | 1 + testing-macros/tests/integration/smoke.rs | 39 ++ testing-macros/tests/tests.rs | 11 + 9 files changed, 1085 insertions(+) create mode 100644 testing-macros/Cargo.toml create mode 100644 testing-macros/src/lib.rs create mode 100644 testing-macros/tests/cross/failures.rs create mode 100644 testing-macros/tests/cross/random_tests.rs create mode 100644 testing-macros/tests/cross/smoke.rs create mode 100644 testing-macros/tests/integration/failures.rs create mode 100644 testing-macros/tests/integration/random_tests.rs create mode 100644 testing-macros/tests/integration/smoke.rs create mode 100644 testing-macros/tests/tests.rs diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml new file mode 100644 index 0000000000..21d78db0f8 --- /dev/null +++ b/testing-macros/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "hotshot-testing-macros" +version = "0.1.0" +edition = "2021" +description = "Macros for creating hotshot tests" + +[dependencies] +ark-bls12-381 = { workspace = true } +async-compatibility-layer = { workspace = true } +async-trait = { workspace = true } +# needed for vrf demo +# so non-optional for now +blake3 = { workspace = true, features = ["traits-preview"] } +commit = { workspace = true } +either = { workspace = true } +futures = { workspace = true } +hotshot = { path = "../hotshot", default-features = false } +hotshot-types = { path = "../types", default-features = false } +hotshot-testing = { path = "../testing", default-features = false } +jf-primitives = { workspace = true } +rand = { workspace = true } +snafu = { workspace = true } +tracing = { workspace = true } +serde = { workspace = true } +# proc macro stuff +quote = "1.0.33" +syn = { version = "2.0.43", features = ["full", "extra-traits"] } +proc-macro2 = "1.0.71" +derive_builder = "0.12.0" + +[dev-dependencies] +async-lock = { workspace = true } + +[lib] +proc-macro = true + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } diff --git a/testing-macros/src/lib.rs b/testing-macros/src/lib.rs new file mode 100644 index 0000000000..01056f0520 --- /dev/null +++ b/testing-macros/src/lib.rs @@ -0,0 +1,610 @@ +extern crate proc_macro; + +use proc_macro::TokenStream; +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote}; +use syn::parse::Result; +use syn::{ + parse::{Parse, ParseStream}, + parse_macro_input, Expr, ExprArray, ExprPath, ExprTuple, Ident, LitBool, Token, +}; + +/// Supported consensus types by macro +#[derive(Debug, Clone)] +enum SupportedConsensusTypes { + Consensus, +} + +/// description of a crosstest +#[derive(derive_builder::Builder, Debug, Clone)] +struct CrossTestData { + /// consensus time impls + time_types: ExprArray, + /// demo type list of tuples + demo_types: ExprArray, + /// signature key impls + signature_key_types: ExprArray, + /// communication channel impls + comm_channels: ExprArray, + /// storage impls + storages: ExprArray, + /// name of the test + test_name: Ident, + /// test description/spec + test_builder: Expr, + /// whether or not to hide behind slow feature flag + slow: LitBool, +} + +/// we internally choose types +#[derive(derive_builder::Builder, Debug, Clone)] +struct CrossAllTypesSpec { + /// name of the test + test_name: Ident, + /// test description/spec + test_builder: Expr, + /// whether or not to hide behind slow feature flag + slow: LitBool, +} + +impl Parse for CrossAllTypesSpec { + fn parse(input: ParseStream) -> Result { + let mut description = CrossAllTypesSpecBuilder::create_empty(); + while !description.is_ready() { + if input.peek(keywords::TestName) { + let _ = input.parse::()?; + input.parse::()?; + let test_name = input.parse::()?; + description.test_name(test_name); + } else if input.peek(keywords::TestBuilder) { + let _ = input.parse::()?; + input.parse::()?; + let test_builder = input.parse::()?; + description.test_builder(test_builder); + } else if input.peek(keywords::Slow) { + let _ = input.parse::()?; + input.parse::()?; + let slow = input.parse::()?; + description.slow(slow); + } else { + panic!("Unexpected token. Expected one f: Time, DemoType, SignatureKey, CommChannel, Storage, TestName, TestBuilder, Slow"); + } + if input.peek(Token![,]) { + input.parse::()?; + } + } + description + .build() + .map_err(|e| syn::Error::new(proc_macro2::Span::call_site(), format!("{}", e))) + } +} + +impl CrossAllTypesSpecBuilder { + fn is_ready(&self) -> bool { + self.test_name.is_some() && self.test_builder.is_some() && self.slow.is_some() + } +} + +impl CrossTestDataBuilder { + fn is_ready(&self) -> bool { + self.time_types.is_some() + && self.demo_types.is_some() + && self.signature_key_types.is_some() + && self.comm_channels.is_some() + && self.storages.is_some() + && self.test_name.is_some() + && self.test_builder.is_some() + && self.slow.is_some() + } +} + +/// requisite data to generate a single test +#[derive(derive_builder::Builder, Debug, Clone)] +struct TestData { + time_type: ExprPath, + demo_types: ExprTuple, + signature_key_type: ExprPath, + comm_channel: ExprPath, + storage: ExprPath, + test_name: Ident, + test_builder: Expr, + slow: LitBool, +} + +/// trait make a string lower and snake case +trait ToLowerSnakeStr { + /// make a lower and snake case string + fn to_lower_snake_str(&self) -> String; +} + +impl ToLowerSnakeStr for ExprPath { + fn to_lower_snake_str(&self) -> String { + self.path + .segments + .iter() + .fold("".to_string(), |mut acc, s| { + acc.push_str(&s.ident.to_string().to_lowercase()); + acc.push('_'); + acc + }) + .to_lowercase() + } +} + +impl ToLowerSnakeStr for ExprTuple { + fn to_lower_snake_str(&self) -> String { + self.elems + .iter() + .map(|x| { + let Expr::Path(expr_path) = x else { + panic!("Expected path expr, got {:?}", x) + }; + expr_path + }) + .fold("".to_string(), |mut acc, s| { + acc.push_str(&s.to_lower_snake_str()); + acc + }) + } +} + +impl TestData { + fn generate_test(&self) -> TokenStream2 { + let TestData { + time_type, + demo_types, + signature_key_type, + test_name, + test_builder, + slow, + comm_channel, + storage, + } = self; + + let (supported_consensus_type, demo_state) = { + let mut tuple = demo_types.elems.iter(); + let first_ele = tuple + .next() + .expect("First element of tuple must be the consensus type."); + + let Expr::Path(expr_path) = first_ele else { + panic!("Expected path expr, got {:?}", first_ele) + }; + let Some(ident) = expr_path.path.get_ident() else { + panic!("Expected ident, got {:?}", expr_path.path) + }; + let consensus_type = if ident == "Consensus" { + SupportedConsensusTypes::Consensus + } else { + panic!("Unsupported consensus type: {ident:?}") + }; + + let demo_state = tuple + .next() + .expect("Seecond element of tuple must state type"); + (consensus_type, demo_state) + }; + + let slow_attribute = if slow.value() { + quote! { #[cfg(feature = "slow-tests")] } + } else { + quote! {} + }; + + let (consensus_type, leaf, vote, proposal, consensus_message, exchanges) = + match supported_consensus_type { + SupportedConsensusTypes::Consensus => { + let consensus_type = quote! { + hotshot_types::traits::consensus_type::sequencing_consensus::Consensus + }; + let leaf = quote! { + hotshot_types::data::SequencingLeaf + }; + let vote = quote! { + hotshot_types::vote::DAVote + }; + let proposal = quote! { + hotshot_types::data::DAProposal + }; + let consensus_message = quote! { + hotshot_types::message::SequencingMessage + }; + let committee_exchange = quote! { + hotshot_types::traits::election::CommitteeExchange< + TestTypes, + CommitteeMembership, + #comm_channel< + TestTypes, + TestNodeImpl, + #proposal, + #vote, + CommitteeMembership, + >, + hotshot_types::message::Message, + > + }; + let exchanges = quote! { + hotshot_types::traits::node_implementation::SequencingExchanges< + TestTypes, + hotshot_types::message::Message, + TestQuorumExchange, + #committee_exchange + > + }; + + ( + consensus_type, + leaf, + vote, + proposal, + consensus_message, + exchanges, + ) + } + }; + + quote! { + + #[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, + )] + struct TestTypes; + + #[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, + )] + struct TestNodeImpl; + + + impl hotshot_types::traits::node_implementation::NodeType for TestTypes { + type ConsensusType = #consensus_type; + type Time = #time_type; + type BlockHeader = <#demo_state as hotshot_types::traits::State>::BlockHeader; + type SignatureKey = #signature_key_type; + type Transaction = <<#demo_state as hotshot_types::traits::State>::BlockPayload as hotshot_types::traits::BlockPayload>::Transaction; + type StateType = #demo_state; + type VoteTokenType = hotshot::traits::election::static_committee::StaticVoteToken; + type ElectionConfigType = hotshot::traits::election::static_committee::StaticElectionConfig; + } + + type CommitteeMembership = hotshot::traits::election::static_committee::GeneralStaticCommittee; + + type TestQuorumExchange = + hotshot_types::traits::election::QuorumExchange< + TestTypes, + #leaf, + #proposal, + CommitteeMembership, + #comm_channel< + TestTypes, + TestNodeImpl, + #proposal, + #vote, + CommitteeMembership, + >, + hotshot_types::message::Message, + >; + + impl hotshot_types::traits::node_implementation::NodeImplementation for TestNodeImpl { + type Leaf = #leaf; + type Storage = #storage; + type Exchanges = #exchanges; + + fn new_channel_maps( + start_view: #time_type + ) -> ( + hotshot_types::traits::node_implementation::ChannelMaps, + Option> + ) { + let committee_channel_maps = if std::any::type_name::<#consensus_type>() == + std::any::type_name::() + { + None + } else { + Some(hotshot_types::traits::node_implementation::ChannelMaps::new(start_view)) + }; + ( + hotshot_types::traits::node_implementation::ChannelMaps::new(start_view), + committee_channel_maps + ) + } + } + + #slow_attribute + #[cfg_attr( + feature = "tokio-executor", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(feature = "async-std-executor", async_std::test)] + #[tracing::instrument] + async fn #test_name() { + hotshot_testing::test_builder::TestBuilder::build::< + TestTypes, + TestNodeImpl + >(#test_builder).launch().run_test().await.unwrap(); + } + } + } +} + +/// macro specific custom keywords +mod keywords { + syn::custom_keyword!(Time); + syn::custom_keyword!(DemoType); + syn::custom_keyword!(SignatureKey); + syn::custom_keyword!(Vote); + syn::custom_keyword!(TestName); + syn::custom_keyword!(TestBuilder); + syn::custom_keyword!(Slow); + syn::custom_keyword!(CommChannel); + syn::custom_keyword!(Storage); +} + +impl Parse for CrossTestData { + fn parse(input: ParseStream) -> Result { + let mut description = CrossTestDataBuilder::create_empty(); + + while !description.is_ready() { + if input.peek(keywords::Time) { + let _ = input.parse::()?; + input.parse::()?; + let time_types = input.parse::()?; + description.time_types(time_types); + } else if input.peek(keywords::DemoType) { + let _ = input.parse::()?; + input.parse::()?; + let demo_types = input.parse::()?; + description.demo_types(demo_types); + } else if input.peek(keywords::SignatureKey) { + let _ = input.parse::()?; + input.parse::()?; + let signature_key_types = input.parse::()?; + description.signature_key_types(signature_key_types); + } else if input.peek(keywords::CommChannel) { + let _ = input.parse::()?; + input.parse::()?; + let comm_channels = input.parse::()?; + description.comm_channels(comm_channels); + } else if input.peek(keywords::Storage) { + let _ = input.parse::()?; + input.parse::()?; + let storages = input.parse::()?; + description.storages(storages); + } else if input.peek(keywords::TestName) { + let _ = input.parse::()?; + input.parse::()?; + let test_name = input.parse::()?; + description.test_name(test_name); + } else if input.peek(keywords::TestBuilder) { + let _ = input.parse::()?; + input.parse::()?; + let test_builder = input.parse::()?; + description.test_builder(test_builder); + } else if input.peek(keywords::Slow) { + let _ = input.parse::()?; + input.parse::()?; + let slow = input.parse::()?; + description.slow(slow); + } else { + panic!("Unexpected token. Expected one f: Time, DemoType, SignatureKey, CommChannel, Storage, TestName, TestBuilder, Slow"); + } + if input.peek(Token![,]) { + input.parse::()?; + } + } + description + .build() + .map_err(|e| syn::Error::new(proc_macro2::Span::call_site(), format!("{}", e))) + } +} + +fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { + let demo_types = test_spec + .demo_types + .elems + .iter() + .map(|t| { + let Expr::Tuple(p) = t else { + panic!("Expected Tuple! Got {:?}", t) + }; + p + }) + .collect::>(); + + let comm_channels = test_spec.comm_channels.elems.iter().map(|t| { + let Expr::Path(p) = t else { + panic!("Expected Path for Comm Channel! Got {:?}", t) + }; + p + }); + + let storages = test_spec.storages.elems.iter().map(|t| { + let Expr::Path(p) = t else { + panic!("Expected Path for Storage! Got {:?}", t) + }; + p + }); + + let time_types = test_spec.time_types.elems.iter().map(|t| { + let Expr::Path(p) = t else { + panic!("Expected Path for Time Type! Got {:?}", t) + }; + p + }); + + let signature_key_types = test_spec.signature_key_types.elems.iter().map(|t| { + let Expr::Path(p) = t else { + panic!("Expected Path for Signature Key Type! Got {:?}", t) + }; + p + }); + + let mut result = quote! {}; + + for demo_type in demo_types.clone() { + let mut demo_mod = quote! {}; + for comm_channel in comm_channels.clone() { + let mut comm_mod = quote! {}; + for storage in storages.clone() { + let mut storage_mod = quote! {}; + for time_type in time_types.clone() { + let mut time_mod = quote! {}; + for signature_key_type in signature_key_types.clone() { + let test_data = TestDataBuilder::create_empty() + .time_type(time_type.clone()) + .demo_types(demo_type.clone()) + .signature_key_type(signature_key_type.clone()) + .comm_channel(comm_channel.clone()) + .storage(storage.clone()) + .test_name(test_spec.test_name.clone()) + .test_builder(test_spec.test_builder.clone()) + .slow(test_spec.slow.clone()) + .build() + .unwrap(); + let test = test_data.generate_test(); + + let signature_key_str = + format_ident!("{}", signature_key_type.to_lower_snake_str()); + let sig_result = quote! { + pub mod #signature_key_str { + use super::*; + #test + } + }; + time_mod.extend(sig_result); + } + + let time_str = format_ident!("{}", time_type.to_lower_snake_str()); + let time_result = quote! { + pub mod #time_str { + use super::*; + #time_mod + } + }; + storage_mod.extend(time_result); + } + let storage_str = format_ident!("{}", storage.to_lower_snake_str()); + let storage_result = quote! { + pub mod #storage_str { + use super::*; + #storage_mod + } + }; + comm_mod.extend(storage_result); + } + let comm_channel_str = format_ident!("{}", comm_channel.to_lower_snake_str()); + let comm_result = quote! { + pub mod #comm_channel_str { + use super::*; + #comm_mod + } + }; + demo_mod.extend(comm_result); + } + let demo_str = format_ident!("{}", demo_type.to_lower_snake_str()); + let demo_result = quote! { + pub mod #demo_str { + use super::*; + #demo_mod + } + }; + result.extend(demo_result); + } + let name = test_spec.test_name; + quote! { + pub mod #name { + use super::*; + #result + } + } + .into() +} + +/// Generate a cartesian product of tests across all types +/// Arguments: +/// - `DemoType: [(ConsensusTypeName1, DemoStateType1), (ConsensusTypeName2, DemoStateType2) ...]` - a list of tuples of cusonsensus state + `State` implementations +/// - `SignatureKey: [SignatureKey1, SignatureKey2, ...]` - a list of `SignatureKey` implementations +/// - `CommChannel: [CommChannel1, CommChannel2, ..]` - a list of `CommunicationChannel` implementations +/// - `Time: [ Time1, Time2, ...]` - a list of `ConsensusTime` implementations]` +/// - `TestName: example_test` - the name of the test +/// - `TestBuilder: { some_test_builder_expression }` - the `TestBuilder` to use +/// - `Storage: Storage1, Storage2, ...` - a list of `Storage` implementations to use +/// - `Slow`: whether or not this set of tests are hidden behind the `slow` feature flag +/// Example usage: +/// ``` +/// hotshot_testing_macros::cross_tests!( +/// DemoType: [(ValidatingConsensus, hotshot::demos::vdemo::VDemoState) ], +/// SignatureKey: [ hotshot_types::traits::signature_key::ed25519::Ed25519Pub ], +/// CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], +/// Storage: [ hotshot::traits::implementations::MemoryStorage ], +/// Time: [ hotshot_types::data::ViewNumber ], +/// TestName: ten_tx_seven_nodes_fast, +/// TestBuilder: hotshot_testing::test_builder::TestBuilder { +/// total_nodes: 7, +/// start_nodes: 7, +/// num_succeeds: 10, +/// txn_ids: either::Either::Right(1), +/// ..hotshot_testing::test_builder::TestBuilder::default() +/// }, +/// Slow: false, +/// ); +/// ``` +#[proc_macro] +pub fn cross_tests(input: TokenStream) -> TokenStream { + let test_spec = parse_macro_input!(input as CrossTestData); + cross_tests_internal(test_spec) +} + +/// Generate a cartesian product of tests across all impls known to `hotshot_types` +/// Arguments: +/// - `TestName: example_test` - the name of the test +/// - `TestBuilder: { some_test_builder_expression }` - the `TestBuilder` to use +/// - `Slow`: whether or not this set of tests are hidden behind the `slow` feature flag +/// Example usage: +/// ``` +/// hotshot_testing_macros::cross_all_types!( +/// TestName: example_test, +/// TestBuilder: hotshot_testing::test_builder::TestBuilder::default(), +/// Slow: false, +/// ); +/// ``` +#[proc_macro] +pub fn cross_all_types(input: TokenStream) -> TokenStream { + let CrossAllTypesSpec { + test_name, + test_builder, + slow, + } = parse_macro_input!(input as CrossAllTypesSpec); + let tokens = quote! { + DemoType: [ /* (Consensus, hotshot_testing::demo::DemoState), */ (hotshot::demos::vdemo::VDemoState) ], + SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], + CommChannel: [ hotshot::traits::implementations::Libp2pCommChannel, hotshot::traits::implementations::CentralizedCommChannel ], + Time: [ hotshot_types::data::ViewNumber ], + Storage: [ hotshot::traits::implementations::MemoryStorage ], + TestName: #test_name, + TestBuilder: #test_builder, + Slow: #slow, + }.into(); + let test_spec = parse_macro_input!(tokens as CrossTestData); + cross_tests_internal(test_spec) +} diff --git a/testing-macros/tests/cross/failures.rs b/testing-macros/tests/cross/failures.rs new file mode 100644 index 0000000000..9159a42be1 --- /dev/null +++ b/testing-macros/tests/cross/failures.rs @@ -0,0 +1,69 @@ +use hotshot_testing_macros::cross_all_types; + +// This test simulates a single permanent failed node +cross_all_types!( + TestName: single_permanent_failure_slow, + TestBuilder: hotshot_testing::test_builder::TestBuilder { + metadata: hotshot_testing::test_builder::TestMetadata { + total_nodes: 7, + start_nodes: 7, + num_succeeds: 10, + timing_data: hotshot_testing::test_builder::TimingData { + next_view_timeout: 1000, + ..hotshot_testing::test_builder::TimingData::default() + }, + failure_threshold: 20, + ..hotshot_testing::test_builder::TestMetadata::default() + }, + setup: + Some(hotshot_testing::round_builder::RoundSetupBuilder { + scheduled_changes: vec![ + hotshot_testing::round_builder::ChangeNode { + idx: 5, + view: 1, + updown: hotshot_testing::round_builder::UpDown::Down + }, + ], + ..Default::default() + }), + check: None + }, + Slow: true, +); + +// This test simulates two permanent failed nodes +// +// With n=7, this is the maximum failures that the network can tolerate +cross_all_types!( + TestName: double_permanent_failure_slow, + TestBuilder: hotshot_testing::test_builder::TestBuilder { + metadata: hotshot_testing::test_builder::TestMetadata { + total_nodes: 7, + start_nodes: 7, + num_succeeds: 10, + failure_threshold: 20, + timing_data: hotshot_testing::test_builder::TimingData { + next_view_timeout: 1000, + ..hotshot_testing::test_builder::TimingData::default() + }, + ..hotshot_testing::test_builder::TestMetadata::default() + }, + setup: + Some(hotshot_testing::round_builder::RoundSetupBuilder { + scheduled_changes: vec![ + hotshot_testing::round_builder::ChangeNode { + idx: 5, + view: 1, + updown: hotshot_testing::round_builder::UpDown::Down + }, + hotshot_testing::round_builder::ChangeNode { + idx: 6, + view: 1, + updown: hotshot_testing::round_builder::UpDown::Down }, + ], + ..Default::default() + }), + check: None + } + Slow: true, +); diff --git a/testing-macros/tests/cross/random_tests.rs b/testing-macros/tests/cross/random_tests.rs new file mode 100644 index 0000000000..771ea61b0b --- /dev/null +++ b/testing-macros/tests/cross/random_tests.rs @@ -0,0 +1,211 @@ +// #[cfg(feature = "slow-tests")] +// use either::Either::Right; +// #[cfg(feature = "slow-tests")] +// use hotshot_testing::test_builder::{get_tolerance, TestMetadata}; +// use hotshot_testing_macros::cross_all_types; +// #[cfg(feature = "slow-tests")] +// use std::{collections::HashSet, iter::FromIterator}; + +// TODO these need to be fixed. But the slow runs fail anyway. We should re-enable when we decide +// to debug and fix them. + +// cross_all_types!( +// TestName: test_fail_first_node_regression_small, +// TestDescription: GeneralTestDescriptionBuilder { +// total_nodes: 10, +// start_nodes: 10, +// num_succeeds: 40, +// txn_ids: Right(30), +// ids_to_shut_down: vec![vec![0].into_iter().collect::>()], +// failure_threshold: 5, +// ..GeneralTestDescriptionBuilder::default() +// }, +// Slow: true +// ); +// +// cross_all_types!( +// TestName: test_fifty_nodes_regression, +// TestDescription: GeneralTestDescriptionBuilder { +// total_nodes: 50, +// start_nodes: 50, +// num_succeeds: 40, +// txn_ids: Right(30), +// ..GeneralTestDescriptionBuilder::default() +// }, +// Slow: true +// ); +// +// cross_all_types!( +// TestName: test_ninety_nodes_regression, +// TestDescription: GeneralTestDescriptionBuilder { +// total_nodes: 90, +// start_nodes: 90, +// num_succeeds: 40, +// txn_ids: Right(30), +// ..GeneralTestDescriptionBuilder::default() +// }, +// Slow: true +// ); +// +// cross_all_types!( +// TestName: test_large_num_txns_regression, +// TestDescription: GeneralTestDescriptionBuilder { +// total_nodes: 10, +// start_nodes: 10, +// num_succeeds: 40, +// txn_ids: Right(500), +// ..GeneralTestDescriptionBuilder::default() +// }, +// Slow: true +// ); +// +// cross_all_types!( +// TestName: test_fail_last_node_regression, +// TestDescription: GeneralTestDescriptionBuilder { +// total_nodes: 53, +// start_nodes: 53, +// num_succeeds: 40, +// txn_ids: Right(30), +// ids_to_shut_down: vec![vec![52].into_iter().collect::>()], +// ..GeneralTestDescriptionBuilder::default() +// }, +// Slow: true +// ); +// +// cross_all_types!( +// TestName: test_fail_first_node_regression, +// TestDescription: GeneralTestDescriptionBuilder { +// total_nodes: 76, +// start_nodes: 76, +// num_succeeds: 40, +// txn_ids: Right(30), +// ids_to_shut_down: vec![vec![0].into_iter().collect::>()], +// ..GeneralTestDescriptionBuilder::default() +// }, +// Slow: true +// ); +// +// cross_all_types!( +// TestName: test_fail_last_f_nodes_regression, +// TestDescription: GeneralTestDescriptionBuilder { +// total_nodes: 75, +// start_nodes: 75, +// num_succeeds: 40, +// txn_ids: Right(30), +// ids_to_shut_down: vec![HashSet::::from_iter( +// (0..get_tolerance(75)).map(|x| 74 - x), +// )], +// ..GeneralTestDescriptionBuilder::default() +// }, +// Slow: true +// ); +// +// cross_all_types!( +// TestName: test_fail_last_f_plus_one_nodes_regression, +// TestDescription: GeneralTestDescriptionBuilder { +// total_nodes: 15, +// start_nodes: 15, +// txn_ids: Right(30), +// ids_to_shut_down: vec![HashSet::::from_iter( +// (0..get_tolerance(15) + 1).map(|x| 14 - x), +// )], +// ..GeneralTestDescriptionBuilder::default() +// }, +// Slow: true +// ); +// +// cross_all_types!( +// TestName: test_mul_txns_regression, +// TestDescription: GeneralTestDescriptionBuilder { +// total_nodes: 30, +// start_nodes: 30, +// txn_ids: Right(30), +// ..GeneralTestDescriptionBuilder::default() +// }, +// Slow: true +// ); + +// TODO re-enable these tests if we decide to use proptest +// +// cross_all_types_proptes!( +// test_large_num_nodes_random, +// GeneralTestDescriptionBuilder { +// total_nodes: num_nodes, +// start_nodes: num_nodes, +// ..GeneralTestDescriptionBuilder::default() +// }, +// keep: true, +// slow: true, +// args: num_nodes in 50..100usize +// ); +// +// cross_all_types_proptest!( +// test_fail_last_node_random, +// GeneralTestDescriptionBuilder { +// total_nodes: num_nodes, +// start_nodes: num_nodes, +// txn_ids: Right(30), +// ids_to_shut_down: vec![vec![(num_nodes - 1) as u64].into_iter().collect()], +// ..GeneralTestDescriptionBuilder::default() +// }, +// keep: true, +// slow: true, +// args: num_nodes in 30..100usize +// ); +// +// cross_all_types_proptest!( +// test_fail_first_node_random, +// GeneralTestDescriptionBuilder { +// total_nodes: num_nodes, +// start_nodes: num_nodes, +// txn_ids: Right(30), +// ids_to_shut_down: vec![vec![0].into_iter().collect()], +// ..GeneralTestDescriptionBuilder::default() +// }, +// keep: true, +// slow: true, +// args: num_nodes in 30..100usize +// ); +// +// cross_all_types_proptest!( +// test_fail_last_f_nodes_random, +// GeneralTestDescriptionBuilder { +// total_nodes: num_nodes, +// start_nodes: num_nodes, +// num_succeeds: 40, +// txn_ids: Right(30), +// ids_to_shut_down: vec![HashSet::::from_iter((0..get_tolerance(num_nodes as u64)).map(|x| (num_nodes as u64) - x - 1))], +// ..GeneralTestDescriptionBuilder::default() +// }, +// keep: true, +// slow: true, +// args: num_nodes in 30..100usize +// ); +// +// cross_all_types_proptest!( +// test_fail_first_f_nodes_random, +// GeneralTestDescriptionBuilder { +// total_nodes: num_nodes, +// start_nodes: num_nodes, +// num_succeeds: 40, +// txn_ids: Right(30), +// ids_to_shut_down: vec![HashSet::::from_iter(0..get_tolerance(num_nodes as u64))], +// ..GeneralTestDescriptionBuilder::default() +// }, +// keep: true, +// slow: true, +// args: num_nodes in 30..100usize +// ); +// +// cross_all_types_proptest!( +// test_mul_txns_random, +// GeneralTestDescriptionBuilder { +// total_nodes: 30, +// start_nodes: 30, +// txn_ids: Left(vec![vec![txn_proposer_1, txn_proposer_2]]), +// ..GeneralTestDescriptionBuilder::default() +// }, +// keep: true, +// slow: true, +// args: txn_proposer_1 in 0..15u64, txn_proposer_2 in 15..30u64 +// ); diff --git a/testing-macros/tests/cross/smoke.rs b/testing-macros/tests/cross/smoke.rs new file mode 100644 index 0000000000..87c2aaf41c --- /dev/null +++ b/testing-macros/tests/cross/smoke.rs @@ -0,0 +1,29 @@ +use hotshot_testing_macros::cross_all_types; + +cross_all_types!( + TestName: ten_tx_five_nodes_slow, + TestBuilder: hotshot_testing::test_builder::TestBuilder { + metadata: hotshot_testing::test_builder::TestMetadata { + total_nodes: 5, + start_nodes: 5, + num_succeeds: 10, + ..Default::default() + }, + ..Default::default() + }, + Slow: true +); + +cross_all_types!( + TestName: ten_tx_seven_nodes_slow, + TestBuilder: hotshot_testing::test_builder::TestBuilder { + metadata: hotshot_testing::test_builder::TestMetadata { + total_nodes: 7, + start_nodes: 7, + num_succeeds: 10, + ..Default::default() + }, + ..Default::default() + }, + Slow: true +); diff --git a/testing-macros/tests/integration/failures.rs b/testing-macros/tests/integration/failures.rs new file mode 100644 index 0000000000..71a1ba735a --- /dev/null +++ b/testing-macros/tests/integration/failures.rs @@ -0,0 +1,75 @@ +use hotshot_testing_macros::cross_tests; + +cross_tests!( + DemoType: [ (hotshot::demos::vdemo::VDemoState) ], + SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], + CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], + Storage: [ hotshot::traits::implementations::MemoryStorage ], + Time: [ hotshot_types::data::ViewNumber ], + TestName: single_permanent_failure_fast, + TestBuilder: hotshot_testing::test_builder::TestBuilder { + metadata: hotshot_testing::test_builder::TestMetadata { + total_nodes: 7, + start_nodes: 7, + num_succeeds: 10, + timing_data: hotshot_testing::test_builder::TimingData { + next_view_timeout: 1000, + ..hotshot_testing::test_builder::TimingData::default() + }, + failure_threshold: 20, + ..hotshot_testing::test_builder::TestMetadata::default() + }, + setup: + Some(hotshot_testing::round_builder::RoundSetupBuilder { + scheduled_changes: vec![ + hotshot_testing::round_builder::ChangeNode { + idx: 5, + view: 1, + updown: hotshot_testing::round_builder::UpDown::Down + }, + ], + ..Default::default() + }), + check: None + }, + Slow: false, +); + +cross_tests!( + DemoType: [ (hotshot::demos::vdemo::VDemoState) ], + SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], + CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], + Storage: [ hotshot::traits::implementations::MemoryStorage ], + Time: [ hotshot_types::data::ViewNumber ], + TestName: double_permanent_failure_fast, + TestBuilder: hotshot_testing::test_builder::TestBuilder { + metadata: hotshot_testing::test_builder::TestMetadata { + total_nodes: 7, + start_nodes: 7, + num_succeeds: 10, + failure_threshold: 20, + timing_data: hotshot_testing::test_builder::TimingData { + next_view_timeout: 1000, + ..hotshot_testing::test_builder::TimingData::default() + }, + ..hotshot_testing::test_builder::TestMetadata::default() + }, + setup: + Some(hotshot_testing::round_builder::RoundSetupBuilder { + scheduled_changes: vec![ + hotshot_testing::round_builder::ChangeNode { + idx: 5, + view: 1, + updown: hotshot_testing::round_builder::UpDown::Down + }, + hotshot_testing::round_builder::ChangeNode { + idx: 6, + view: 1, + updown: hotshot_testing::round_builder::UpDown::Down }, + ], + ..Default::default() + }), + check: None + }, + Slow: false, +); diff --git a/testing-macros/tests/integration/random_tests.rs b/testing-macros/tests/integration/random_tests.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/testing-macros/tests/integration/random_tests.rs @@ -0,0 +1 @@ + diff --git a/testing-macros/tests/integration/smoke.rs b/testing-macros/tests/integration/smoke.rs new file mode 100644 index 0000000000..e784bddf45 --- /dev/null +++ b/testing-macros/tests/integration/smoke.rs @@ -0,0 +1,39 @@ +use hotshot_testing_macros::cross_tests; + +cross_tests!( + DemoType: [(hotshot::demos::vdemo::VDemoState)], + SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], + CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], + Storage: [ hotshot::traits::implementations::MemoryStorage ], + Time: [ hotshot_types::data::ViewNumber ], + TestName: ten_tx_five_nodes_fast, + TestBuilder: hotshot_testing::test_builder::TestBuilder { + metadata: hotshot_testing::test_builder::TestMetadata { + total_nodes: 5, + start_nodes: 5, + num_succeeds: 10, + ..Default::default() + }, + ..Default::default() + }, + Slow: false, +); + +cross_tests!( + DemoType: [(hotshot::demos::vdemo::VDemoState) ], + SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], + CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], + Storage: [ hotshot::traits::implementations::MemoryStorage ], + Time: [ hotshot_types::data::ViewNumber ], + TestName: ten_tx_seven_nodes_fast, + TestBuilder: hotshot_testing::test_builder::TestBuilder { + metadata: hotshot_testing::test_builder::TestMetadata { + total_nodes: 7, + start_nodes: 7, + num_succeeds: 10, + ..Default::default() + }, + ..Default::default() + }, + Slow: false, +); diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs new file mode 100644 index 0000000000..96b92d7f52 --- /dev/null +++ b/testing-macros/tests/tests.rs @@ -0,0 +1,11 @@ +mod integration { + mod failures; + mod random_tests; + mod smoke; +} + +mod cross { + mod failures; + mod random_tests; + mod smoke; +} From 1864667eeb54bc1891d6e6acbda2dbbeaf948744 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sat, 30 Dec 2023 11:20:25 -0500 Subject: [PATCH 0614/1393] feat: revamped macro --- testing-macros/src/lib.rs | 533 +++---------------- testing-macros/tests/cross/failures.rs | 136 ++--- testing-macros/tests/cross/smoke.rs | 58 +- testing-macros/tests/integration/failures.rs | 92 +--- testing-macros/tests/integration/smoke.rs | 78 +-- 5 files changed, 240 insertions(+), 657 deletions(-) diff --git a/testing-macros/src/lib.rs b/testing-macros/src/lib.rs index 01056f0520..5b7ac6daac 100644 --- a/testing-macros/src/lib.rs +++ b/testing-macros/src/lib.rs @@ -9,106 +9,40 @@ use syn::{ parse_macro_input, Expr, ExprArray, ExprPath, ExprTuple, Ident, LitBool, Token, }; -/// Supported consensus types by macro -#[derive(Debug, Clone)] -enum SupportedConsensusTypes { - Consensus, -} - /// description of a crosstest #[derive(derive_builder::Builder, Debug, Clone)] struct CrossTestData { - /// consensus time impls - time_types: ExprArray, - /// demo type list of tuples - demo_types: ExprArray, - /// signature key impls - signature_key_types: ExprArray, - /// communication channel impls - comm_channels: ExprArray, - /// storage impls - storages: ExprArray, - /// name of the test - test_name: Ident, - /// test description/spec - test_builder: Expr, - /// whether or not to hide behind slow feature flag - slow: LitBool, -} - -/// we internally choose types -#[derive(derive_builder::Builder, Debug, Clone)] -struct CrossAllTypesSpec { + /// imlementations + impls: ExprArray, + /// types + types: ExprArray, /// name of the test test_name: Ident, /// test description/spec - test_builder: Expr, - /// whether or not to hide behind slow feature flag - slow: LitBool, -} - -impl Parse for CrossAllTypesSpec { - fn parse(input: ParseStream) -> Result { - let mut description = CrossAllTypesSpecBuilder::create_empty(); - while !description.is_ready() { - if input.peek(keywords::TestName) { - let _ = input.parse::()?; - input.parse::()?; - let test_name = input.parse::()?; - description.test_name(test_name); - } else if input.peek(keywords::TestBuilder) { - let _ = input.parse::()?; - input.parse::()?; - let test_builder = input.parse::()?; - description.test_builder(test_builder); - } else if input.peek(keywords::Slow) { - let _ = input.parse::()?; - input.parse::()?; - let slow = input.parse::()?; - description.slow(slow); - } else { - panic!("Unexpected token. Expected one f: Time, DemoType, SignatureKey, CommChannel, Storage, TestName, TestBuilder, Slow"); - } - if input.peek(Token![,]) { - input.parse::()?; - } - } - description - .build() - .map_err(|e| syn::Error::new(proc_macro2::Span::call_site(), format!("{}", e))) - } -} - -impl CrossAllTypesSpecBuilder { - fn is_ready(&self) -> bool { - self.test_name.is_some() && self.test_builder.is_some() && self.slow.is_some() - } + metadata: Expr, + /// whether or not to ignore + ignore: LitBool, } impl CrossTestDataBuilder { fn is_ready(&self) -> bool { - self.time_types.is_some() - && self.demo_types.is_some() - && self.signature_key_types.is_some() - && self.comm_channels.is_some() - && self.storages.is_some() + self.impls.is_some() + && self.types.is_some() && self.test_name.is_some() - && self.test_builder.is_some() - && self.slow.is_some() + && self.metadata.is_some() + && self.test_name.is_some() + && self.ignore.is_some() } } /// requisite data to generate a single test #[derive(derive_builder::Builder, Debug, Clone)] struct TestData { - time_type: ExprPath, - demo_types: ExprTuple, - signature_key_type: ExprPath, - comm_channel: ExprPath, - storage: ExprPath, + ty: ExprPath, + imply: ExprPath, test_name: Ident, - test_builder: Expr, - slow: LitBool, + metadata: Expr, + ignore: LitBool, } /// trait make a string lower and snake case @@ -151,185 +85,21 @@ impl ToLowerSnakeStr for ExprTuple { impl TestData { fn generate_test(&self) -> TokenStream2 { let TestData { - time_type, - demo_types, - signature_key_type, + ty, + imply, test_name, - test_builder, - slow, - comm_channel, - storage, + metadata, + ignore, } = self; - let (supported_consensus_type, demo_state) = { - let mut tuple = demo_types.elems.iter(); - let first_ele = tuple - .next() - .expect("First element of tuple must be the consensus type."); - - let Expr::Path(expr_path) = first_ele else { - panic!("Expected path expr, got {:?}", first_ele) - }; - let Some(ident) = expr_path.path.get_ident() else { - panic!("Expected ident, got {:?}", expr_path.path) - }; - let consensus_type = if ident == "Consensus" { - SupportedConsensusTypes::Consensus - } else { - panic!("Unsupported consensus type: {ident:?}") - }; - - let demo_state = tuple - .next() - .expect("Seecond element of tuple must state type"); - (consensus_type, demo_state) - }; - - let slow_attribute = if slow.value() { - quote! { #[cfg(feature = "slow-tests")] } + let slow_attribute = if ignore.value() { + // quote! { #[cfg(feature = "slow-tests")] } + quote! { #[ignore] } } else { quote! {} }; - - let (consensus_type, leaf, vote, proposal, consensus_message, exchanges) = - match supported_consensus_type { - SupportedConsensusTypes::Consensus => { - let consensus_type = quote! { - hotshot_types::traits::consensus_type::sequencing_consensus::Consensus - }; - let leaf = quote! { - hotshot_types::data::SequencingLeaf - }; - let vote = quote! { - hotshot_types::vote::DAVote - }; - let proposal = quote! { - hotshot_types::data::DAProposal - }; - let consensus_message = quote! { - hotshot_types::message::SequencingMessage - }; - let committee_exchange = quote! { - hotshot_types::traits::election::CommitteeExchange< - TestTypes, - CommitteeMembership, - #comm_channel< - TestTypes, - TestNodeImpl, - #proposal, - #vote, - CommitteeMembership, - >, - hotshot_types::message::Message, - > - }; - let exchanges = quote! { - hotshot_types::traits::node_implementation::SequencingExchanges< - TestTypes, - hotshot_types::message::Message, - TestQuorumExchange, - #committee_exchange - > - }; - - ( - consensus_type, - leaf, - vote, - proposal, - consensus_message, - exchanges, - ) - } - }; - quote! { - #[derive( - Copy, - Clone, - Debug, - Default, - Hash, - PartialEq, - Eq, - PartialOrd, - Ord, - serde::Serialize, - serde::Deserialize, - )] - struct TestTypes; - - #[derive( - Copy, - Clone, - Debug, - Default, - Hash, - PartialEq, - Eq, - PartialOrd, - Ord, - serde::Serialize, - serde::Deserialize, - )] - struct TestNodeImpl; - - - impl hotshot_types::traits::node_implementation::NodeType for TestTypes { - type ConsensusType = #consensus_type; - type Time = #time_type; - type BlockHeader = <#demo_state as hotshot_types::traits::State>::BlockHeader; - type SignatureKey = #signature_key_type; - type Transaction = <<#demo_state as hotshot_types::traits::State>::BlockPayload as hotshot_types::traits::BlockPayload>::Transaction; - type StateType = #demo_state; - type VoteTokenType = hotshot::traits::election::static_committee::StaticVoteToken; - type ElectionConfigType = hotshot::traits::election::static_committee::StaticElectionConfig; - } - - type CommitteeMembership = hotshot::traits::election::static_committee::GeneralStaticCommittee; - - type TestQuorumExchange = - hotshot_types::traits::election::QuorumExchange< - TestTypes, - #leaf, - #proposal, - CommitteeMembership, - #comm_channel< - TestTypes, - TestNodeImpl, - #proposal, - #vote, - CommitteeMembership, - >, - hotshot_types::message::Message, - >; - - impl hotshot_types::traits::node_implementation::NodeImplementation for TestNodeImpl { - type Leaf = #leaf; - type Storage = #storage; - type Exchanges = #exchanges; - - fn new_channel_maps( - start_view: #time_type - ) -> ( - hotshot_types::traits::node_implementation::ChannelMaps, - Option> - ) { - let committee_channel_maps = if std::any::type_name::<#consensus_type>() == - std::any::type_name::() - { - None - } else { - Some(hotshot_types::traits::node_implementation::ChannelMaps::new(start_view)) - }; - ( - hotshot_types::traits::node_implementation::ChannelMaps::new(start_view), - committee_channel_maps - ) - } - } - #slow_attribute #[cfg_attr( feature = "tokio-executor", @@ -338,10 +108,10 @@ impl TestData { #[cfg_attr(feature = "async-std-executor", async_std::test)] #[tracing::instrument] async fn #test_name() { - hotshot_testing::test_builder::TestBuilder::build::< - TestTypes, - TestNodeImpl - >(#test_builder).launch().run_test().await.unwrap(); + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + // TODO this should be zero, right? I can also provide this as input + (#metadata).gen_launcher::<#ty, #imply>(0).launch().run_test().await; } } } @@ -349,15 +119,11 @@ impl TestData { /// macro specific custom keywords mod keywords { - syn::custom_keyword!(Time); - syn::custom_keyword!(DemoType); - syn::custom_keyword!(SignatureKey); - syn::custom_keyword!(Vote); + syn::custom_keyword!(Metadata); + syn::custom_keyword!(Ignore); syn::custom_keyword!(TestName); - syn::custom_keyword!(TestBuilder); - syn::custom_keyword!(Slow); - syn::custom_keyword!(CommChannel); - syn::custom_keyword!(Storage); + syn::custom_keyword!(Types); + syn::custom_keyword!(Impls); } impl Parse for CrossTestData { @@ -365,48 +131,35 @@ impl Parse for CrossTestData { let mut description = CrossTestDataBuilder::create_empty(); while !description.is_ready() { - if input.peek(keywords::Time) { - let _ = input.parse::()?; - input.parse::()?; - let time_types = input.parse::()?; - description.time_types(time_types); - } else if input.peek(keywords::DemoType) { - let _ = input.parse::()?; - input.parse::()?; - let demo_types = input.parse::()?; - description.demo_types(demo_types); - } else if input.peek(keywords::SignatureKey) { - let _ = input.parse::()?; - input.parse::()?; - let signature_key_types = input.parse::()?; - description.signature_key_types(signature_key_types); - } else if input.peek(keywords::CommChannel) { - let _ = input.parse::()?; + if input.peek(keywords::Types) { + let _ = input.parse::()?; input.parse::()?; - let comm_channels = input.parse::()?; - description.comm_channels(comm_channels); - } else if input.peek(keywords::Storage) { - let _ = input.parse::()?; + let types = input.parse::()?; + description.types(types); + } else if input.peek(keywords::Impls) { + let _ = input.parse::()?; input.parse::()?; - let storages = input.parse::()?; - description.storages(storages); + let impls = input.parse::()?; + description.impls(impls); } else if input.peek(keywords::TestName) { let _ = input.parse::()?; input.parse::()?; let test_name = input.parse::()?; description.test_name(test_name); - } else if input.peek(keywords::TestBuilder) { - let _ = input.parse::()?; + } else if input.peek(keywords::Metadata) { + let _ = input.parse::()?; input.parse::()?; - let test_builder = input.parse::()?; - description.test_builder(test_builder); - } else if input.peek(keywords::Slow) { - let _ = input.parse::()?; + let metadata = input.parse::()?; + description.metadata(metadata); + } else if input.peek(keywords::Ignore) { + let _ = input.parse::()?; input.parse::()?; - let slow = input.parse::()?; - description.slow(slow); + let ignore = input.parse::()?; + description.ignore(ignore); } else { - panic!("Unexpected token. Expected one f: Time, DemoType, SignatureKey, CommChannel, Storage, TestName, TestBuilder, Slow"); + panic!( + "Unexpected token. Expected one of: Metadata, Ignore, Impls, Types, Testname" + ); } if input.peek(Token![,]) { input.parse::()?; @@ -419,116 +172,51 @@ impl Parse for CrossTestData { } fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { - let demo_types = test_spec - .demo_types - .elems - .iter() - .map(|t| { - let Expr::Tuple(p) = t else { - panic!("Expected Tuple! Got {:?}", t) - }; - p - }) - .collect::>(); - - let comm_channels = test_spec.comm_channels.elems.iter().map(|t| { - let Expr::Path(p) = t else { - panic!("Expected Path for Comm Channel! Got {:?}", t) - }; - p - }); - - let storages = test_spec.storages.elems.iter().map(|t| { + let impls = test_spec.impls.elems.iter().map(|t| { let Expr::Path(p) = t else { - panic!("Expected Path for Storage! Got {:?}", t) + panic!("Expected Path for Impl! Got {:?}", t) }; p }); - - let time_types = test_spec.time_types.elems.iter().map(|t| { + // + let types = test_spec.types.elems.iter().map(|t| { let Expr::Path(p) = t else { - panic!("Expected Path for Time Type! Got {:?}", t) - }; - p - }); - - let signature_key_types = test_spec.signature_key_types.elems.iter().map(|t| { - let Expr::Path(p) = t else { - panic!("Expected Path for Signature Key Type! Got {:?}", t) + panic!("Expected Path for Type! Got {:?}", t) }; p }); let mut result = quote! {}; - - for demo_type in demo_types.clone() { - let mut demo_mod = quote! {}; - for comm_channel in comm_channels.clone() { - let mut comm_mod = quote! {}; - for storage in storages.clone() { - let mut storage_mod = quote! {}; - for time_type in time_types.clone() { - let mut time_mod = quote! {}; - for signature_key_type in signature_key_types.clone() { - let test_data = TestDataBuilder::create_empty() - .time_type(time_type.clone()) - .demo_types(demo_type.clone()) - .signature_key_type(signature_key_type.clone()) - .comm_channel(comm_channel.clone()) - .storage(storage.clone()) - .test_name(test_spec.test_name.clone()) - .test_builder(test_spec.test_builder.clone()) - .slow(test_spec.slow.clone()) - .build() - .unwrap(); - let test = test_data.generate_test(); - - let signature_key_str = - format_ident!("{}", signature_key_type.to_lower_snake_str()); - let sig_result = quote! { - pub mod #signature_key_str { - use super::*; - #test - } - }; - time_mod.extend(sig_result); - } - - let time_str = format_ident!("{}", time_type.to_lower_snake_str()); - let time_result = quote! { - pub mod #time_str { - use super::*; - #time_mod - } - }; - storage_mod.extend(time_result); - } - let storage_str = format_ident!("{}", storage.to_lower_snake_str()); - let storage_result = quote! { - pub mod #storage_str { - use super::*; - #storage_mod - } - }; - comm_mod.extend(storage_result); - } - let comm_channel_str = format_ident!("{}", comm_channel.to_lower_snake_str()); - let comm_result = quote! { - pub mod #comm_channel_str { + for ty in types.clone() { + let mut type_mod = quote! {}; + for imp in impls.clone() { + let test_data = TestDataBuilder::create_empty() + .test_name(test_spec.test_name.clone()) + .metadata(test_spec.metadata.clone()) + .ignore(test_spec.ignore.clone()) + .imply(imp.clone()) + .ty(ty.clone()) + .build() + .unwrap(); + let test = test_data.generate_test(); + + let impl_str = format_ident!("{}", imp.to_lower_snake_str()); + let impl_result = quote! { + pub mod #impl_str { use super::*; - #comm_mod + #test } }; - demo_mod.extend(comm_result); + type_mod.extend(impl_result); } - let demo_str = format_ident!("{}", demo_type.to_lower_snake_str()); - let demo_result = quote! { - pub mod #demo_str { + let ty_str = format_ident!("{}", ty.to_lower_snake_str()); + let typ_result = quote! { + pub mod #ty_str { use super::*; - #demo_mod + #type_mod } }; - result.extend(demo_result); + result.extend(typ_result); } let name = test_spec.test_name; quote! { @@ -542,69 +230,14 @@ fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { /// Generate a cartesian product of tests across all types /// Arguments: -/// - `DemoType: [(ConsensusTypeName1, DemoStateType1), (ConsensusTypeName2, DemoStateType2) ...]` - a list of tuples of cusonsensus state + `State` implementations -/// - `SignatureKey: [SignatureKey1, SignatureKey2, ...]` - a list of `SignatureKey` implementations -/// - `CommChannel: [CommChannel1, CommChannel2, ..]` - a list of `CommunicationChannel` implementations -/// - `Time: [ Time1, Time2, ...]` - a list of `ConsensusTime` implementations]` +/// - `Impls: []` - a list of types that implement nodetype +/// - `Metadata`: TestMetadata::default()` - test metadata +/// - `Types: []` - a list types that implement NodeImplementation over the types in `Impls` /// - `TestName: example_test` - the name of the test -/// - `TestBuilder: { some_test_builder_expression }` - the `TestBuilder` to use -/// - `Storage: Storage1, Storage2, ...` - a list of `Storage` implementations to use -/// - `Slow`: whether or not this set of tests are hidden behind the `slow` feature flag -/// Example usage: -/// ``` -/// hotshot_testing_macros::cross_tests!( -/// DemoType: [(ValidatingConsensus, hotshot::demos::vdemo::VDemoState) ], -/// SignatureKey: [ hotshot_types::traits::signature_key::ed25519::Ed25519Pub ], -/// CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], -/// Storage: [ hotshot::traits::implementations::MemoryStorage ], -/// Time: [ hotshot_types::data::ViewNumber ], -/// TestName: ten_tx_seven_nodes_fast, -/// TestBuilder: hotshot_testing::test_builder::TestBuilder { -/// total_nodes: 7, -/// start_nodes: 7, -/// num_succeeds: 10, -/// txn_ids: either::Either::Right(1), -/// ..hotshot_testing::test_builder::TestBuilder::default() -/// }, -/// Slow: false, -/// ); -/// ``` +/// - `Ignore`: whether or not this set of tests are ignored +/// Example usage: see tests in this module #[proc_macro] pub fn cross_tests(input: TokenStream) -> TokenStream { let test_spec = parse_macro_input!(input as CrossTestData); cross_tests_internal(test_spec) } - -/// Generate a cartesian product of tests across all impls known to `hotshot_types` -/// Arguments: -/// - `TestName: example_test` - the name of the test -/// - `TestBuilder: { some_test_builder_expression }` - the `TestBuilder` to use -/// - `Slow`: whether or not this set of tests are hidden behind the `slow` feature flag -/// Example usage: -/// ``` -/// hotshot_testing_macros::cross_all_types!( -/// TestName: example_test, -/// TestBuilder: hotshot_testing::test_builder::TestBuilder::default(), -/// Slow: false, -/// ); -/// ``` -#[proc_macro] -pub fn cross_all_types(input: TokenStream) -> TokenStream { - let CrossAllTypesSpec { - test_name, - test_builder, - slow, - } = parse_macro_input!(input as CrossAllTypesSpec); - let tokens = quote! { - DemoType: [ /* (Consensus, hotshot_testing::demo::DemoState), */ (hotshot::demos::vdemo::VDemoState) ], - SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], - CommChannel: [ hotshot::traits::implementations::Libp2pCommChannel, hotshot::traits::implementations::CentralizedCommChannel ], - Time: [ hotshot_types::data::ViewNumber ], - Storage: [ hotshot::traits::implementations::MemoryStorage ], - TestName: #test_name, - TestBuilder: #test_builder, - Slow: #slow, - }.into(); - let test_spec = parse_macro_input!(tokens as CrossTestData); - cross_tests_internal(test_spec) -} diff --git a/testing-macros/tests/cross/failures.rs b/testing-macros/tests/cross/failures.rs index 9159a42be1..f7dc473299 100644 --- a/testing-macros/tests/cross/failures.rs +++ b/testing-macros/tests/cross/failures.rs @@ -1,69 +1,69 @@ -use hotshot_testing_macros::cross_all_types; - -// This test simulates a single permanent failed node -cross_all_types!( - TestName: single_permanent_failure_slow, - TestBuilder: hotshot_testing::test_builder::TestBuilder { - metadata: hotshot_testing::test_builder::TestMetadata { - total_nodes: 7, - start_nodes: 7, - num_succeeds: 10, - timing_data: hotshot_testing::test_builder::TimingData { - next_view_timeout: 1000, - ..hotshot_testing::test_builder::TimingData::default() - }, - failure_threshold: 20, - ..hotshot_testing::test_builder::TestMetadata::default() - }, - setup: - Some(hotshot_testing::round_builder::RoundSetupBuilder { - scheduled_changes: vec![ - hotshot_testing::round_builder::ChangeNode { - idx: 5, - view: 1, - updown: hotshot_testing::round_builder::UpDown::Down - }, - ], - ..Default::default() - }), - check: None - }, - Slow: true, -); - -// This test simulates two permanent failed nodes +// use hotshot_testing_macros::cross_all_types; // -// With n=7, this is the maximum failures that the network can tolerate -cross_all_types!( - TestName: double_permanent_failure_slow, - TestBuilder: hotshot_testing::test_builder::TestBuilder { - metadata: hotshot_testing::test_builder::TestMetadata { - total_nodes: 7, - start_nodes: 7, - num_succeeds: 10, - failure_threshold: 20, - timing_data: hotshot_testing::test_builder::TimingData { - next_view_timeout: 1000, - ..hotshot_testing::test_builder::TimingData::default() - }, - ..hotshot_testing::test_builder::TestMetadata::default() - }, - setup: - Some(hotshot_testing::round_builder::RoundSetupBuilder { - scheduled_changes: vec![ - hotshot_testing::round_builder::ChangeNode { - idx: 5, - view: 1, - updown: hotshot_testing::round_builder::UpDown::Down - }, - hotshot_testing::round_builder::ChangeNode { - idx: 6, - view: 1, - updown: hotshot_testing::round_builder::UpDown::Down }, - ], - ..Default::default() - }), - check: None - } - Slow: true, -); +// // This test simulates a single permanent failed node +// cross_all_types!( +// TestName: single_permanent_failure_slow, +// TestBuilder: hotshot_testing::test_builder::TestBuilder { +// metadata: hotshot_testing::test_builder::TestMetadata { +// total_nodes: 7, +// start_nodes: 7, +// num_succeeds: 10, +// timing_data: hotshot_testing::test_builder::TimingData { +// next_view_timeout: 1000, +// ..hotshot_testing::test_builder::TimingData::default() +// }, +// failure_threshold: 20, +// ..hotshot_testing::test_builder::TestMetadata::default() +// }, +// setup: +// Some(hotshot_testing::round_builder::RoundSetupBuilder { +// scheduled_changes: vec![ +// hotshot_testing::round_builder::ChangeNode { +// idx: 5, +// view: 1, +// updown: hotshot_testing::round_builder::UpDown::Down +// }, +// ], +// ..Default::default() +// }), +// check: None +// }, +// Slow: true, +// ); +// +// // This test simulates two permanent failed nodes +// // +// // With n=7, this is the maximum failures that the network can tolerate +// cross_all_types!( +// TestName: double_permanent_failure_slow, +// TestBuilder: hotshot_testing::test_builder::TestBuilder { +// metadata: hotshot_testing::test_builder::TestMetadata { +// total_nodes: 7, +// start_nodes: 7, +// num_succeeds: 10, +// failure_threshold: 20, +// timing_data: hotshot_testing::test_builder::TimingData { +// next_view_timeout: 1000, +// ..hotshot_testing::test_builder::TimingData::default() +// }, +// ..hotshot_testing::test_builder::TestMetadata::default() +// }, +// setup: +// Some(hotshot_testing::round_builder::RoundSetupBuilder { +// scheduled_changes: vec![ +// hotshot_testing::round_builder::ChangeNode { +// idx: 5, +// view: 1, +// updown: hotshot_testing::round_builder::UpDown::Down +// }, +// hotshot_testing::round_builder::ChangeNode { +// idx: 6, +// view: 1, +// updown: hotshot_testing::round_builder::UpDown::Down }, +// ], +// ..Default::default() +// }), +// check: None +// } +// Slow: true, +// ); diff --git a/testing-macros/tests/cross/smoke.rs b/testing-macros/tests/cross/smoke.rs index 87c2aaf41c..85238c90f5 100644 --- a/testing-macros/tests/cross/smoke.rs +++ b/testing-macros/tests/cross/smoke.rs @@ -1,29 +1,29 @@ -use hotshot_testing_macros::cross_all_types; - -cross_all_types!( - TestName: ten_tx_five_nodes_slow, - TestBuilder: hotshot_testing::test_builder::TestBuilder { - metadata: hotshot_testing::test_builder::TestMetadata { - total_nodes: 5, - start_nodes: 5, - num_succeeds: 10, - ..Default::default() - }, - ..Default::default() - }, - Slow: true -); - -cross_all_types!( - TestName: ten_tx_seven_nodes_slow, - TestBuilder: hotshot_testing::test_builder::TestBuilder { - metadata: hotshot_testing::test_builder::TestMetadata { - total_nodes: 7, - start_nodes: 7, - num_succeeds: 10, - ..Default::default() - }, - ..Default::default() - }, - Slow: true -); +// use hotshot_testing_macros::cross_all_types; +// +// cross_all_types!( +// TestName: ten_tx_five_nodes_slow, +// TestBuilder: hotshot_testing::test_builder::TestBuilder { +// metadata: hotshot_testing::test_builder::TestMetadata { +// total_nodes: 5, +// start_nodes: 5, +// num_succeeds: 10, +// ..Default::default() +// }, +// ..Default::default() +// }, +// Slow: true +// ); +// +// cross_all_types!( +// TestName: ten_tx_seven_nodes_slow, +// TestBuilder: hotshot_testing::test_builder::TestBuilder { +// metadata: hotshot_testing::test_builder::TestMetadata { +// total_nodes: 7, +// start_nodes: 7, +// num_succeeds: 10, +// ..Default::default() +// }, +// ..Default::default() +// }, +// Slow: true +// ); diff --git a/testing-macros/tests/integration/failures.rs b/testing-macros/tests/integration/failures.rs index 71a1ba735a..76f2da60c8 100644 --- a/testing-macros/tests/integration/failures.rs +++ b/testing-macros/tests/integration/failures.rs @@ -1,75 +1,25 @@ +use hotshot_testing::completion_task::CompletionTaskDescription; +use hotshot_testing::completion_task::TimeBasedCompletionTaskDescription; +use hotshot_testing::node_types::TestTypes; +use hotshot_testing::node_types::{Libp2pImpl, MemoryImpl}; +use hotshot_testing::test_builder::TestMetadata; use hotshot_testing_macros::cross_tests; cross_tests!( - DemoType: [ (hotshot::demos::vdemo::VDemoState) ], - SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], - CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], - Storage: [ hotshot::traits::implementations::MemoryStorage ], - Time: [ hotshot_types::data::ViewNumber ], - TestName: single_permanent_failure_fast, - TestBuilder: hotshot_testing::test_builder::TestBuilder { - metadata: hotshot_testing::test_builder::TestMetadata { - total_nodes: 7, - start_nodes: 7, - num_succeeds: 10, - timing_data: hotshot_testing::test_builder::TimingData { - next_view_timeout: 1000, - ..hotshot_testing::test_builder::TimingData::default() - }, - failure_threshold: 20, - ..hotshot_testing::test_builder::TestMetadata::default() - }, - setup: - Some(hotshot_testing::round_builder::RoundSetupBuilder { - scheduled_changes: vec![ - hotshot_testing::round_builder::ChangeNode { - idx: 5, - view: 1, - updown: hotshot_testing::round_builder::UpDown::Down - }, - ], - ..Default::default() - }), - check: None - }, - Slow: false, -); - -cross_tests!( - DemoType: [ (hotshot::demos::vdemo::VDemoState) ], - SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], - CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], - Storage: [ hotshot::traits::implementations::MemoryStorage ], - Time: [ hotshot_types::data::ViewNumber ], - TestName: double_permanent_failure_fast, - TestBuilder: hotshot_testing::test_builder::TestBuilder { - metadata: hotshot_testing::test_builder::TestMetadata { - total_nodes: 7, - start_nodes: 7, - num_succeeds: 10, - failure_threshold: 20, - timing_data: hotshot_testing::test_builder::TimingData { - next_view_timeout: 1000, - ..hotshot_testing::test_builder::TimingData::default() - }, - ..hotshot_testing::test_builder::TestMetadata::default() - }, - setup: - Some(hotshot_testing::round_builder::RoundSetupBuilder { - scheduled_changes: vec![ - hotshot_testing::round_builder::ChangeNode { - idx: 5, - view: 1, - updown: hotshot_testing::round_builder::UpDown::Down - }, - hotshot_testing::round_builder::ChangeNode { - idx: 6, - view: 1, - updown: hotshot_testing::round_builder::UpDown::Down }, - ], - ..Default::default() - }), - check: None - }, - Slow: false, + Metadata: + TestMetadata { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: std::time::Duration::from_secs(60), + }, + ), + ..TestMetadata::default() + }, + Ignore: false, + TestName: single_permanent_failure_fast, + // types that implement nodetype + Types: [TestTypes], + // forall impl in Impls, forall type in Types, impl : NodeImplementation + Impls: [MemoryImpl, Libp2pImpl], ); diff --git a/testing-macros/tests/integration/smoke.rs b/testing-macros/tests/integration/smoke.rs index e784bddf45..1f23b24889 100644 --- a/testing-macros/tests/integration/smoke.rs +++ b/testing-macros/tests/integration/smoke.rs @@ -1,39 +1,39 @@ -use hotshot_testing_macros::cross_tests; - -cross_tests!( - DemoType: [(hotshot::demos::vdemo::VDemoState)], - SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], - CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], - Storage: [ hotshot::traits::implementations::MemoryStorage ], - Time: [ hotshot_types::data::ViewNumber ], - TestName: ten_tx_five_nodes_fast, - TestBuilder: hotshot_testing::test_builder::TestBuilder { - metadata: hotshot_testing::test_builder::TestMetadata { - total_nodes: 5, - start_nodes: 5, - num_succeeds: 10, - ..Default::default() - }, - ..Default::default() - }, - Slow: false, -); - -cross_tests!( - DemoType: [(hotshot::demos::vdemo::VDemoState) ], - SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], - CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], - Storage: [ hotshot::traits::implementations::MemoryStorage ], - Time: [ hotshot_types::data::ViewNumber ], - TestName: ten_tx_seven_nodes_fast, - TestBuilder: hotshot_testing::test_builder::TestBuilder { - metadata: hotshot_testing::test_builder::TestMetadata { - total_nodes: 7, - start_nodes: 7, - num_succeeds: 10, - ..Default::default() - }, - ..Default::default() - }, - Slow: false, -); +// use hotshot_testing_macros::cross_tests; +// +// cross_tests!( +// DemoType: [(hotshot::demos::vdemo::VDemoState)], +// SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], +// CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], +// Storage: [ hotshot::traits::implementations::MemoryStorage ], +// Time: [ hotshot_types::data::ViewNumber ], +// TestName: ten_tx_five_nodes_fast, +// TestBuilder: hotshot_testing::test_builder::TestBuilder { +// metadata: hotshot_testing::test_builder::TestMetadata { +// total_nodes: 5, +// start_nodes: 5, +// num_succeeds: 10, +// ..Default::default() +// }, +// ..Default::default() +// }, +// Slow: false, +// ); +// +// cross_tests!( +// DemoType: [(hotshot::demos::vdemo::VDemoState) ], +// SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], +// CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], +// Storage: [ hotshot::traits::implementations::MemoryStorage ], +// Time: [ hotshot_types::data::ViewNumber ], +// TestName: ten_tx_seven_nodes_fast, +// TestBuilder: hotshot_testing::test_builder::TestBuilder { +// metadata: hotshot_testing::test_builder::TestMetadata { +// total_nodes: 7, +// start_nodes: 7, +// num_succeeds: 10, +// ..Default::default() +// }, +// ..Default::default() +// }, +// Slow: false, +// ); From f565c825c9bf2793a144436ced598893827f2962 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 31 Dec 2023 14:06:20 -0500 Subject: [PATCH 0615/1393] feat: compiling view sync task --- testing/src/completion_task.rs | 4 +- testing/src/lib.rs | 3 + testing/src/test_builder.rs | 31 ++++++--- testing/src/test_launcher.rs | 19 +++++- testing/src/test_runner.rs | 16 ++++- testing/src/view_sync_task.rs | 114 +++++++++++++++++++++++++++++++++ 6 files changed, 172 insertions(+), 15 deletions(-) create mode 100644 testing/src/view_sync_task.rs diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index 669148682d..241b84e27b 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -19,11 +19,11 @@ use super::{test_launcher::TaskGenerator, GlobalTestEvent}; /// the idea here is to run as long as we want -/// Data Availability task error +/// Completion Task error #[derive(Snafu, Debug)] pub struct CompletionTaskErr {} -/// Data availability task state +/// Completion task state pub struct CompletionTask> { pub(crate) test_event_stream: ChannelStream, pub(crate) handles: Vec>, diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 7e39871ed1..48426a2f7b 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -30,6 +30,9 @@ pub mod completion_task; /// task to spin nodes up and down pub mod spinning_task; +/// task for checking if view sync got activated +pub mod view_sync_task; + /// block types pub mod block_types; diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index e858816e3f..af51ac9c09 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -12,7 +12,7 @@ use hotshot_types::{ use super::completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}; use crate::{ spinning_task::SpinningTaskDescription, - test_launcher::{ResourceGenerators, TestLauncher}, + test_launcher::{ResourceGenerators, TestLauncher}, view_sync_task::ViewSyncTaskDescription, }; use super::{ @@ -58,6 +58,8 @@ pub struct TestMetadata { pub min_transactions: usize, /// timing data pub timing_data: TimingData, + /// view sync check task + pub view_sync_properties: ViewSyncTaskDescription } impl Default for TimingData { @@ -75,10 +77,11 @@ impl Default for TimingData { impl TestMetadata { pub fn default_stress() -> Self { + let num_nodes = 100; TestMetadata { num_bootstrap_nodes: 15, - total_nodes: 100, - start_nodes: 100, + total_nodes: num_nodes, + start_nodes: num_nodes, overall_safety_properties: OverallSafetyPropertiesDescription { num_successful_views: 50, check_leaf: true, @@ -95,14 +98,16 @@ impl TestMetadata { round_start_delay: 25, ..TimingData::default() }, + view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes), ..TestMetadata::default() } } pub fn default_multiple_rounds() -> TestMetadata { + let num_nodes = 10; TestMetadata { - total_nodes: 10, - start_nodes: 10, + total_nodes: num_nodes, + start_nodes: num_nodes, overall_safety_properties: OverallSafetyPropertiesDescription { num_successful_views: 20, check_leaf: true, @@ -117,15 +122,17 @@ impl TestMetadata { round_start_delay: 25, ..TimingData::default() }, + view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes), ..TestMetadata::default() } } /// Default setting with 20 nodes and 8 views of successful views. pub fn default_more_nodes() -> TestMetadata { + let num_nodes = 20; TestMetadata { - total_nodes: 20, - start_nodes: 20, + total_nodes: num_nodes, + start_nodes: num_nodes, num_bootstrap_nodes: 20, // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the @@ -146,6 +153,7 @@ impl TestMetadata { next_view_timeout: 5000, ..TimingData::default() }, + view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes), ..TestMetadata::default() } } @@ -154,11 +162,12 @@ impl TestMetadata { impl Default for TestMetadata { /// by default, just a single round fn default() -> Self { + let num_nodes = 5; Self { timing_data: TimingData::default(), min_transactions: 0, - total_nodes: 5, - start_nodes: 5, + total_nodes: num_nodes, + start_nodes: num_nodes, num_bootstrap_nodes: 5, da_committee_size: 5, spinning_properties: SpinningTaskDescription { @@ -173,6 +182,7 @@ impl Default for TestMetadata { duration: Duration::from_millis(10000), }, ), + view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes) } } } @@ -195,6 +205,7 @@ impl TestMetadata { completion_task_description, overall_safety_properties, spinning_properties, + view_sync_properties, .. } = self.clone(); @@ -263,6 +274,7 @@ impl TestMetadata { let completion_task_generator = completion_task_description.build_and_launch(); let overall_safety_task_generator = overall_safety_properties.build(); let spinning_task_generator = spinning_properties.build(); + let view_sync_task_generator = view_sync_properties.build(); TestLauncher { resource_generator: ResourceGenerators { channel_generator: >::gen_comm_channels( @@ -278,6 +290,7 @@ impl TestMetadata { overall_safety_task_generator, completion_task_generator, spinning_task_generator, + view_sync_task_generator, hooks: vec![], } .modify_default_config(mod_config) diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 3771eb841f..4a58df5009 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -10,7 +10,7 @@ use hotshot_task::{ }; use hotshot_types::{traits::node_implementation::NodeType, HotShotConfig}; -use crate::spinning_task::SpinningTask; +use crate::{spinning_task::SpinningTask, view_sync_task::ViewSyncTask}; use super::{ completion_task::CompletionTask, overall_safety_task::OverallSafetyTask, @@ -71,9 +71,11 @@ pub struct TestLauncher> { pub completion_task_generator: TaskGenerator>, /// overall safety task generator pub overall_safety_task_generator: TaskGenerator>, - + /// task for spinning nodes up/down pub spinning_task_generator: TaskGenerator>, - + /// task for view sync + pub view_sync_task_generator: TaskGenerator>, + /// extra hooks in case we want to check additional things pub hooks: Vec, } @@ -133,6 +135,17 @@ impl> TestLauncher>, + ) -> Self { + Self { + view_sync_task_generator, + ..self + } + } + /// override resource generators pub fn with_resource_generator(self, resource_generator: ResourceGenerators) -> Self { Self { diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 515d4a8787..4d46256971 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -5,7 +5,7 @@ use super::{ }; use crate::{ spinning_task::{ChangeNode, UpDown}, - test_launcher::{Networks, TestLauncher}, + test_launcher::{Networks, TestLauncher}, view_sync_task::ViewSyncTask, }; use hotshot::{types::SystemContextHandle, Memberships}; @@ -146,6 +146,20 @@ where .await; task_runner = task_runner.add_task(id, "Test Overall Safety Task".to_string(), task); + /// add view sync task + let view_sync_task_state = ViewSyncTask { + test_event_stream: test_event_stream.clone(), + handles: nodes.clone(), + hit_view_sync: HashMap::new(), + }; + + let (id, task) = (launcher.view_sync_task_generator)( + view_sync_task_state, + registry.clone(), + test_event_stream.clone(), + ).await; + task_runner = task_runner.add_task(id, "View Sync Task".to_string(), task); + // wait for networks to be ready for node in &nodes { node.networks.0.wait_for_ready().await; diff --git a/testing/src/view_sync_task.rs b/testing/src/view_sync_task.rs new file mode 100644 index 0000000000..e99329e998 --- /dev/null +++ b/testing/src/view_sync_task.rs @@ -0,0 +1,114 @@ +use std::{sync::Arc, collections::{HashSet, HashMap}}; +use futures::FutureExt; +use hotshot_task::task::HotShotTaskTypes; +use async_compatibility_layer::channel::UnboundedStream; +use hotshot_task::{task_impls::{TaskBuilder, HSTWithEventAndMessage}, task::{FilterEvent, HandleMessage, HandleEvent, TS}, MergeN, event_stream::ChannelStream}; +use hotshot_task_impls::events::HotShotEvent; +use hotshot_types::traits::node_implementation::{TestableNodeImplementation, NodeType}; +use snafu::Snafu; + +use crate::{test_launcher::TaskGenerator, test_runner::Node, GlobalTestEvent}; + +/// ViewSync Task error +#[derive(Snafu, Debug)] +pub struct ViewSyncTaskErr {} + +/// ViewSync task state +pub struct ViewSyncTask> { + /// the global event stream + pub(crate) test_event_stream: ChannelStream, + /// the node handles + pub(crate) handles: Vec>, + /// nodes that hit view sync + pub(crate) hit_view_sync: HashMap::Time> +} + +impl> TS for ViewSyncTask {} + +/// ViewSync task types +pub type ViewSyncTaskTypes = HSTWithEventAndMessage< + ViewSyncTaskErr, + GlobalTestEvent, + ChannelStream, + (usize, HotShotEvent), + MergeN>>, + ViewSyncTask, +>; + +#[derive(Clone, Debug, Copy)] +pub enum ShouldHitViewSync { + /// the node should hit view sync + Yes, + /// the node should not hit view sync + No, + /// don't care if the node should hit view sync + DontCare +} + +/// Description for a view sync task. +#[derive(Clone, Debug)] +pub enum ViewSyncTaskDescription { + /// (min, max) number nodes that may hit view sync, inclusive + Threshold(usize, usize), + /// node idx -> whether or not the node should hit view sync + /// if node not in map, assumed to be `ShouldHItViewSync::DontCare` + Precise(HashMap) +} + +impl ViewSyncTaskDescription { + pub fn build>( + self, + ) -> TaskGenerator> { + Box::new(move |mut state, mut registry, test_event_stream| { + async move { + + let event_handler = HandleEvent::>(Arc::new(move |event, state| { + async move { + match event { + GlobalTestEvent::ShutDown => { + todo!() + // logic checking stuff + } + } + }.boxed() + + })); + + let message_handler = HandleMessage::>(Arc::new( + move |msg, mut state| { + todo!() + } + )); + let mut streams = vec![]; + for handle in &mut state.handles { + let stream = handle.handle.get_internal_event_stream_known_impl(FilterEvent::default()).await.0; + streams.push(stream); + } + + let builder = TaskBuilder::>::new( + "Test Completion Task".to_string(), + ) + .register_event_stream(test_event_stream, FilterEvent::default()) + .await + .register_registry(&mut registry) + .await + .register_state(state) + .register_event_handler(event_handler) + .register_message_handler(message_handler) + .register_message_stream(MergeN::new(streams)); + let task_id = builder.get_task_id().unwrap(); + (task_id, ViewSyncTaskTypes::build(builder).launch()) + }.boxed() + }) + + // match self { + // ViewSyncTaskDescription::Threshold(threshold) => { + // + // }, + // ViewSyncTaskDescription::Precise(map) => { + // + // } + // } + } + +} From ae8130878a145a3f45822e7bac35242c3e644feb Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 31 Dec 2023 14:52:34 -0500 Subject: [PATCH 0616/1393] feat: view sync check task --- testing/src/test_builder.rs | 7 +- testing/src/test_runner.rs | 11 +-- testing/src/view_sync_task.rs | 162 ++++++++++++++++++++++++++-------- 3 files changed, 135 insertions(+), 45 deletions(-) diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index af51ac9c09..0d0e617513 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -12,7 +12,8 @@ use hotshot_types::{ use super::completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}; use crate::{ spinning_task::SpinningTaskDescription, - test_launcher::{ResourceGenerators, TestLauncher}, view_sync_task::ViewSyncTaskDescription, + test_launcher::{ResourceGenerators, TestLauncher}, + view_sync_task::ViewSyncTaskDescription, }; use super::{ @@ -59,7 +60,7 @@ pub struct TestMetadata { /// timing data pub timing_data: TimingData, /// view sync check task - pub view_sync_properties: ViewSyncTaskDescription + pub view_sync_properties: ViewSyncTaskDescription, } impl Default for TimingData { @@ -182,7 +183,7 @@ impl Default for TestMetadata { duration: Duration::from_millis(10000), }, ), - view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes) + view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes), } } } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 4d46256971..776f9268e5 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -5,7 +5,8 @@ use super::{ }; use crate::{ spinning_task::{ChangeNode, UpDown}, - test_launcher::{Networks, TestLauncher}, view_sync_task::ViewSyncTask, + test_launcher::{Networks, TestLauncher}, + view_sync_task::ViewSyncTask, }; use hotshot::{types::SystemContextHandle, Memberships}; @@ -146,18 +147,18 @@ where .await; task_runner = task_runner.add_task(id, "Test Overall Safety Task".to_string(), task); - /// add view sync task + // add view sync task let view_sync_task_state = ViewSyncTask { - test_event_stream: test_event_stream.clone(), handles: nodes.clone(), - hit_view_sync: HashMap::new(), + hit_view_sync: HashSet::new(), }; let (id, task) = (launcher.view_sync_task_generator)( view_sync_task_state, registry.clone(), test_event_stream.clone(), - ).await; + ) + .await; task_runner = task_runner.add_task(id, "View Sync Task".to_string(), task); // wait for networks to be ready diff --git a/testing/src/view_sync_task.rs b/testing/src/view_sync_task.rs index e99329e998..f3a26ce157 100644 --- a/testing/src/view_sync_task.rs +++ b/testing/src/view_sync_task.rs @@ -1,26 +1,34 @@ -use std::{sync::Arc, collections::{HashSet, HashMap}}; -use futures::FutureExt; -use hotshot_task::task::HotShotTaskTypes; use async_compatibility_layer::channel::UnboundedStream; -use hotshot_task::{task_impls::{TaskBuilder, HSTWithEventAndMessage}, task::{FilterEvent, HandleMessage, HandleEvent, TS}, MergeN, event_stream::ChannelStream}; +use futures::FutureExt; +use hotshot_task::task::{HotShotTaskCompleted, HotShotTaskTypes}; +use hotshot_task::{ + event_stream::ChannelStream, + task::{FilterEvent, HandleEvent, HandleMessage, TS}, + task_impls::{HSTWithEventAndMessage, TaskBuilder}, + MergeN, +}; use hotshot_task_impls::events::HotShotEvent; -use hotshot_types::traits::node_implementation::{TestableNodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplementation}; use snafu::Snafu; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use crate::{test_launcher::TaskGenerator, test_runner::Node, GlobalTestEvent}; /// ViewSync Task error -#[derive(Snafu, Debug)] -pub struct ViewSyncTaskErr {} +#[derive(Snafu, Debug, Clone)] +pub struct ViewSyncTaskErr { + hit_view_sync: HashSet, +} /// ViewSync task state pub struct ViewSyncTask> { - /// the global event stream - pub(crate) test_event_stream: ChannelStream, /// the node handles pub(crate) handles: Vec>, /// nodes that hit view sync - pub(crate) hit_view_sync: HashMap::Time> + pub(crate) hit_view_sync: HashSet, } impl> TS for ViewSyncTask {} @@ -42,7 +50,7 @@ pub enum ShouldHitViewSync { /// the node should not hit view sync No, /// don't care if the node should hit view sync - DontCare + Ignore, } /// Description for a view sync task. @@ -52,7 +60,7 @@ pub enum ViewSyncTaskDescription { Threshold(usize, usize), /// node idx -> whether or not the node should hit view sync /// if node not in map, assumed to be `ShouldHItViewSync::DontCare` - Precise(HashMap) + Precise(HashMap), } impl ViewSyncTaskDescription { @@ -61,44 +69,125 @@ impl ViewSyncTaskDescription { ) -> TaskGenerator> { Box::new(move |mut state, mut registry, test_event_stream| { async move { - - let event_handler = HandleEvent::>(Arc::new(move |event, state| { - async move { - match event { - GlobalTestEvent::ShutDown => { - todo!() - // logic checking stuff + let event_handler = + HandleEvent::>(Arc::new(move |event, state| { + let self_dup = self.clone(); + async move { + match event { + GlobalTestEvent::ShutDown => match self_dup.clone() { + ViewSyncTaskDescription::Threshold(min, max) => { + let num_hits = state.hit_view_sync.len(); + if min <= num_hits && num_hits <= max { + (Some(HotShotTaskCompleted::ShutDown), state) + } else { + ( + Some(HotShotTaskCompleted::Error(Box::new( + ViewSyncTaskErr { + hit_view_sync: state.hit_view_sync.clone(), + }, + ))), + state, + ) + } + } + ViewSyncTaskDescription::Precise(map) => { + for (id, should_hit) in map { + match should_hit { + ShouldHitViewSync::Yes => { + if !state.hit_view_sync.contains(&id) { + return ( + Some(HotShotTaskCompleted::Error( + Box::new(ViewSyncTaskErr { + hit_view_sync: state + .hit_view_sync + .clone(), + }), + )), + state, + ); + } + } + ShouldHitViewSync::No => { + if state.hit_view_sync.contains(&id) { + return ( + Some(HotShotTaskCompleted::Error( + Box::new(ViewSyncTaskErr { + hit_view_sync: state + .hit_view_sync + .clone(), + }), + )), + state, + ); + } + } + ShouldHitViewSync::Ignore => {} + } + } + (Some(HotShotTaskCompleted::ShutDown), state) + } + }, } } - }.boxed() - - })); + .boxed() + })); let message_handler = HandleMessage::>(Arc::new( - move |msg, mut state| { - todo!() + // NOTE: could short circuit on entering view sync if we're not supposed to + // enter view sync. I opted not to do this just to gather more information + // (since we'll fail the test later anyway) + move |(id, msg), mut state| { + async move { + match msg { + // all the view sync events + HotShotEvent::ViewSyncTimeout(_, _, _) + | HotShotEvent::ViewSyncPreCommitVoteRecv(_) + | HotShotEvent::ViewSyncCommitVoteRecv(_) + | HotShotEvent::ViewSyncFinalizeVoteRecv(_) + | HotShotEvent::ViewSyncPreCommitVoteSend(_) + | HotShotEvent::ViewSyncCommitVoteSend(_) + | HotShotEvent::ViewSyncFinalizeVoteSend(_) + | HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) + | HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) + | HotShotEvent::ViewSyncTrigger(_) => { + state.hit_view_sync.insert(id); + } + _ => (), + } + (None, state) } - )); + .boxed() + }, + )); let mut streams = vec![]; for handle in &mut state.handles { - let stream = handle.handle.get_internal_event_stream_known_impl(FilterEvent::default()).await.0; + let stream = handle + .handle + .get_internal_event_stream_known_impl(FilterEvent::default()) + .await + .0; streams.push(stream); } let builder = TaskBuilder::>::new( "Test Completion Task".to_string(), - ) - .register_event_stream(test_event_stream, FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_state(state) - .register_event_handler(event_handler) - .register_message_handler(message_handler) - .register_message_stream(MergeN::new(streams)); + ) + .register_event_stream(test_event_stream, FilterEvent::default()) + .await + .register_registry(&mut registry) + .await + .register_state(state) + .register_event_handler(event_handler) + .register_message_handler(message_handler) + .register_message_stream(MergeN::new(streams)); let task_id = builder.get_task_id().unwrap(); (task_id, ViewSyncTaskTypes::build(builder).launch()) - }.boxed() + } + .boxed() }) // match self { @@ -110,5 +199,4 @@ impl ViewSyncTaskDescription { // } // } } - } From 3bc74e148fa4ba03d02a9097b9bc7386ff1e8f9a Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 31 Dec 2023 15:11:57 -0500 Subject: [PATCH 0617/1393] fix: catchup test --- testing/tests/catchup.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 92c718436f..078a9aa053 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -218,6 +218,8 @@ async fn test_catchup_in_view_sync() { metadata.timing_data = timing_data; metadata.start_nodes = 18; metadata.total_nodes = 20; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(25, catchup_nodes)], From f9ce1de39287f94ff54b90e0277996284d55eed2 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 31 Dec 2023 19:35:49 -0500 Subject: [PATCH 0618/1393] fix: catchup test --- testing/tests/catchup.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 078a9aa053..642819143c 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -37,6 +37,9 @@ async fn test_catchup() { metadata.start_nodes = 18; metadata.total_nodes = 20; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); + metadata.spinning_properties = SpinningTaskDescription { // Start the nodes before their leadership. node_changes: vec![(15, catchup_nodes)], From 69957ca767ec779bd3085e9589eccb8b0770a3d6 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 1 Jan 2024 10:21:30 -0500 Subject: [PATCH 0619/1393] chore: silence libp2p lints --- constants/Cargo.toml | 3 + hotshot-qc/Cargo.toml | 3 + hotshot-qc/src/lib.rs | 3 - hotshot-signature-key/Cargo.toml | 3 + hotshot-signature-key/src/lib.rs | 3 - hotshot-stake-table/Cargo.toml | 3 + hotshot-stake-table/src/lib.rs | 3 - hotshot-stake-table/src/vec_based.rs | 8 +- hotshot-state-prover/Cargo.toml | 3 + hotshot/Cargo.toml | 5 +- hotshot/src/lib.rs | 13 -- libp2p-networking/Cargo.toml | 3 + libp2p-networking/examples/counter.rs | 1 + libp2p-networking/src/lib.rs | 8 -- .../src/network/behaviours/dht/cache.rs | 15 ++- .../src/network/behaviours/dht/mod.rs | 6 + .../src/network/behaviours/direct_message.rs | 1 + .../src/network/behaviours/gossip.rs | 1 + libp2p-networking/src/network/def.rs | 3 + libp2p-networking/src/network/mod.rs | 3 + libp2p-networking/src/network/node.rs | 4 + libp2p-networking/src/network/node/handle.rs | 6 + libp2p-networking/tests/common/mod.rs | 10 +- libp2p-networking/tests/counter.rs | 124 ++++++++++-------- orchestrator/Cargo.toml | 3 + task-impls/Cargo.toml | 3 + task-impls/src/lib.rs | 10 -- task/Cargo.toml | 3 + task/src/lib.rs | 8 -- testing/Cargo.toml | 3 + testing/src/lib.rs | 2 - types/Cargo.toml | 3 + types/src/lib.rs | 10 -- types/src/traits/election.rs | 2 +- utils/Cargo.toml | 3 + utils/src/bincode.rs | 2 +- utils/src/lib.rs | 9 -- web_server/Cargo.toml | 3 + 38 files changed, 162 insertions(+), 137 deletions(-) diff --git a/constants/Cargo.toml b/constants/Cargo.toml index 6f04253d2e..bfa2c2d314 100644 --- a/constants/Cargo.toml +++ b/constants/Cargo.toml @@ -3,3 +3,6 @@ name = "hotshot-constants" version.workspace = true [dependencies] + +[lints] +workspace = true diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 9c53fb335a..96956c6302 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -34,3 +34,6 @@ hotshot-stake-table = { path = "../hotshot-stake-table" } default = ["parallel"] std = ["ark-std/std"] parallel = ["jf-primitives/parallel", "jf-utils/parallel"] + +[lints] +workspace = true diff --git a/hotshot-qc/src/lib.rs b/hotshot-qc/src/lib.rs index 7c56aa6393..5b88c4f0fb 100644 --- a/hotshot-qc/src/lib.rs +++ b/hotshot-qc/src/lib.rs @@ -1,7 +1,4 @@ //! This crates offer implementations of quorum certificates used in HotShot. -#![deny(warnings)] -#![deny(missing_docs)] - pub mod bit_vector; pub mod bit_vector_old; pub mod snarked; diff --git a/hotshot-signature-key/Cargo.toml b/hotshot-signature-key/Cargo.toml index a98f1b73f7..2fae2f1706 100644 --- a/hotshot-signature-key/Cargo.toml +++ b/hotshot-signature-key/Cargo.toml @@ -21,3 +21,6 @@ rand_chacha = { workspace = true } serde = { workspace = true } tracing = { workspace = true } typenum = { workspace = true } + +[lints] +workspace = true diff --git a/hotshot-signature-key/src/lib.rs b/hotshot-signature-key/src/lib.rs index 2e8f711bab..6f59cd4b35 100644 --- a/hotshot-signature-key/src/lib.rs +++ b/hotshot-signature-key/src/lib.rs @@ -1,5 +1,2 @@ //! This crates offer implementations of quorum certificates used in HotShot. -#![deny(warnings)] -#![deny(missing_docs)] - pub mod bn254; diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 4cb53eb2a9..0f8a8b709a 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -33,3 +33,6 @@ rand_chacha = { workspace = true } default = ["parallel"] std = ["ark-std/std", "ark-serialize/std", "ark-ff/std"] parallel = ["jf-primitives/parallel", "jf-utils/parallel", "ark-ff/parallel"] + +[lints] +workspace = true diff --git a/hotshot-stake-table/src/lib.rs b/hotshot-stake-table/src/lib.rs index e2a6e80d36..f3b7b7be39 100644 --- a/hotshot-stake-table/src/lib.rs +++ b/hotshot-stake-table/src/lib.rs @@ -1,7 +1,4 @@ //! This crate contains some stake table implementations for HotShot system. -#![deny(warnings)] -#![deny(missing_docs)] - pub mod config; pub mod mt_based; pub mod utils; diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 35ce56de27..b0faaf8aa0 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -137,7 +137,7 @@ where } fn len(&self, version: SnapshotVersion) -> Result { - Ok(self.get_version(version)?.bls_keys.len()) + Ok(self.get_version(&version)?.bls_keys.len()) } fn contains_key(&self, key: &Self::Key) -> bool { @@ -149,7 +149,7 @@ where version: SnapshotVersion, key: &Self::Key, ) -> Result { - let table = self.get_version(version)?; + let table = self.get_version(&version)?; let pos = self.lookup_pos(key)?; if pos >= table.bls_keys.len() { Err(StakeTableError::KeyNotFound) @@ -172,7 +172,7 @@ where version: SnapshotVersion, key: &Self::Key, ) -> Result<(Self::Amount, Self::Aux, Self::LookupProof), StakeTableError> { - let table = self.get_version(version)?; + let table = self.get_version(&version)?; let pos = self.lookup_pos(key)?; if pos >= table.bls_keys.len() { Err(StakeTableError::KeyNotFound) @@ -343,7 +343,7 @@ where fn get_version( &self, - version: SnapshotVersion, + version: &SnapshotVersion, ) -> Result<&StakeTableSnapshot, StakeTableError> { match version { SnapshotVersion::Head => Ok(&self.head), diff --git a/hotshot-state-prover/Cargo.toml b/hotshot-state-prover/Cargo.toml index 826b9c0efd..5f0fc9936d 100644 --- a/hotshot-state-prover/Cargo.toml +++ b/hotshot-state-prover/Cargo.toml @@ -33,3 +33,6 @@ hotshot-stake-table = { path = "../hotshot-stake-table" } default = ["parallel"] std = ["ark-std/std", "ark-serialize/std", "ark-ff/std"] parallel = ["jf-primitives/parallel", "jf-utils/parallel", "ark-ff/parallel"] + +[lints] +workspace = true diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 2cb0b08fc0..67a1d88d58 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -130,4 +130,7 @@ blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } serde_json = "1.0.108" toml = { workspace = true } -hotshot-testing = { path = "../testing" } \ No newline at end of file +hotshot-testing = { path = "../testing" } + +[lints] +workspace = true \ No newline at end of file diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index ad2795ab95..b566743286 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -1,16 +1,3 @@ -#![warn( - clippy::all, - clippy::pedantic, - rust_2018_idioms, - missing_docs, - clippy::missing_docs_in_private_items, - clippy::panic -)] -#![allow(clippy::module_name_repetitions)] -// Temporary -#![allow(clippy::cast_possible_truncation)] -// Temporary, should be disabled after the completion of the NodeImplementation refactor -#![allow(clippy::type_complexity)] //! Provides a generic rust implementation of the `HotShot` BFT protocol //! //! See the [protocol documentation](https://github.com/EspressoSystems/hotshot-spec) for a protocol description. diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index ed86459d3d..50625f1f8d 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -75,3 +75,6 @@ netlink-sys = { git = "https://github.com/espressosystems/netlink.git", version ], optional = true } netlink-packet-generic = { git = "https://github.com/espressosystems/netlink.git", version = "0.2.0", optional = true } + +[lints] +workspace = true diff --git a/libp2p-networking/examples/counter.rs b/libp2p-networking/examples/counter.rs index b3c33fdb55..28d18c128b 100644 --- a/libp2p-networking/examples/counter.rs +++ b/libp2p-networking/examples/counter.rs @@ -14,6 +14,7 @@ // #[async_main] // #[instrument] // async fn main() -> Result<()> { +/// this is dead code fn main() -> Result<(), ()> { // let args = CliOpt::parse(); // diff --git a/libp2p-networking/src/lib.rs b/libp2p-networking/src/lib.rs index 210cd22421..c5b2546c4d 100644 --- a/libp2p-networking/src/lib.rs +++ b/libp2p-networking/src/lib.rs @@ -1,11 +1,3 @@ -#![warn( - clippy::all, - clippy::pedantic, - rust_2018_idioms, - missing_docs, - clippy::panic -)] -#![allow(clippy::module_name_repetitions)] //! Library for p2p communication /// Example message used by the UI library diff --git a/libp2p-networking/src/network/behaviours/dht/cache.rs b/libp2p-networking/src/network/behaviours/dht/cache.rs index a278984222..f33c857f89 100644 --- a/libp2p-networking/src/network/behaviours/dht/cache.rs +++ b/libp2p-networking/src/network/behaviours/dht/cache.rs @@ -44,12 +44,16 @@ pub enum CacheError { }, } +/// configuration describing the cache #[derive(Clone, derive_builder::Builder, custom_debug::Debug, Default)] pub struct Config { #[builder(default = "Some(\"dht.cache\".to_string())")] + /// filename to save to pub filename: Option, #[builder(default = "Duration::from_secs(KAD_DEFAULT_REPUB_INTERVAL_SEC * 16)")] + /// time before entry expires pub expiry: Duration, + /// max differences with disk before write #[builder(default = "4")] pub max_disk_parity_delta: u32, } @@ -60,20 +64,20 @@ impl Default for Cache { } } +/// key value cache pub struct Cache { /// the cache's config config: Config, - /// the cache for records (key -> value) inner: Arc, Vec>>, /// the expiries for the dht cache, in order (expiry time -> key) expiries: Arc>>>, - /// number of inserts since the last save disk_parity_delta: Arc, } impl Cache { + /// create a new cache pub async fn new(config: Config) -> Self { let cache = Self { inner: Arc::new(DashMap::new()), @@ -90,6 +94,7 @@ impl Cache { cache } + /// load from file if configured to do so pub async fn load(&self) -> Result<(), CacheError> { if let Some(filename) = &self.config.filename { let encoded = std::fs::read(filename).context(DiskSnafu)?; @@ -111,6 +116,7 @@ impl Cache { Ok(()) } + /// save to file if configured to do so pub async fn save(&self) -> Result<(), CacheError> { if let Some(filename) = &self.config.filename { // prune first @@ -142,6 +148,7 @@ impl Cache { Ok(()) } + /// prune stale entries async fn prune(&self) { let now = SystemTime::now(); let mut expiries = self.expiries.write().await; @@ -162,6 +169,7 @@ impl Cache { } } + /// get value for `key` if exists pub async fn get(&self, key: &Vec) -> Option, Vec>> { // prune, save if necessary self.prune().await; @@ -171,6 +179,8 @@ impl Cache { self.inner.get(key) } + /// insert key and value into cache and experies, then save to disk if max disk parity delta + /// exceeded pub async fn insert(&self, key: Vec, value: Vec) { // insert into cache and expiries self.inner.insert(key.clone(), value); @@ -184,6 +194,7 @@ impl Cache { self.save_if_necessary().await; } + /// save to disk if differences is over max disk parity delta async fn save_if_necessary(&self) { let cur_disk_parity_delta = self.disk_parity_delta.load(Ordering::Relaxed); if cur_disk_parity_delta >= self.config.max_disk_parity_delta { diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 431f86a610..448922a0c3 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -5,6 +5,7 @@ use std::{ time::Duration, }; +/// a local caching layer for the DHT key value pairs mod cache; use async_compatibility_layer::art::async_block_on; @@ -23,7 +24,11 @@ use libp2p::{ use libp2p_identity::PeerId; use tracing::{error, info, warn}; +/// the number of nodes required to get an answer from +/// in order to trust that the answer is correct when retrieving from the DHT +/// TODO why are tehre two of these? pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; +/// the maximum number of nodes to query in the DHT at any one time const MAX_DHT_QUERY_SIZE: usize = 5; use self::cache::Cache; @@ -374,6 +379,7 @@ impl DHTBehaviour { impl DHTBehaviour { #![allow(clippy::too_many_lines)] + /// handle a DHT event fn dht_handle_event(&mut self, event: KademliaEvent) { match event { KademliaEvent::OutboundQueryProgressed { diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index 9b1fd1a0c0..67a1dad987 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -51,6 +51,7 @@ pub enum DMEvent { } impl DMBehaviour { + /// handle a direct message event fn handle_dm_event(&mut self, event: Event) { match event { Event::InboundFailure { diff --git a/libp2p-networking/src/network/behaviours/gossip.rs b/libp2p-networking/src/network/behaviours/gossip.rs index bfc946c9b5..458e11d9f3 100644 --- a/libp2p-networking/src/network/behaviours/gossip.rs +++ b/libp2p-networking/src/network/behaviours/gossip.rs @@ -36,6 +36,7 @@ pub enum GossipEvent { } impl GossipBehaviour { + /// handle a gossip event fn gossip_handle_event(&mut self, event: Event) { match event { Event::Message { message, .. } => { diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 06f7cb24e5..061e6251a5 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -22,6 +22,9 @@ use super::{ use libp2p_swarm_derive::NetworkBehaviour; +/// the number of nodes required to get an answer from +/// in order to trust that the answer is correct when retrieving from the DHT +/// TODO why are tehre two of these? pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; /// Overarching network behaviour performing: diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 05134a34bf..9359b1f8eb 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -1,7 +1,10 @@ /// networking behaviours wrapping libp2p's behaviours pub mod behaviours; +/// defines the swarm and network definition (internal) mod def; +/// libp2p network errors pub mod error; +/// functionality of a libp2p network node mod node; pub use self::{ diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index c42530a562..0a129cdf91 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -1,4 +1,8 @@ +/// configuration for the libp2p network (e.g. how it should be built) mod config; + +/// libp2p network handle +/// allows for control over the libp2p network mod handle; pub use self::{ diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index ae5a77a0c5..5dce442809 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -37,21 +37,26 @@ use tracing::{debug, info, instrument}; pub struct NetworkNodeHandle { /// network configuration network_config: NetworkNodeConfig, + /// the state of the replica state: Arc>, + /// send an action to the networkbehaviour send_network: UnboundedSender, /// the local address we're listening on listen_addr: Multiaddr, + /// the peer id of the networkbehaviour peer_id: PeerId, + /// human readable id id: usize, /// A list of webui listeners that are listening for changes on this node webui_listeners: Arc>>>, + /// network node receiver receiver: NetworkNodeReceiver, } @@ -75,6 +80,7 @@ pub struct NetworkNodeReceiver { } impl NetworkNodeReceiver { + /// recv a network event pub async fn recv(&self) -> Result { if self.killed.load(Ordering::Relaxed) { return Err(NetworkNodeHandleError::Killed); diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index 067fa62cbd..31d8dc020b 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -82,7 +82,7 @@ fn gen_peerid_map(handles: &[Arc>]) -> HashMap(handles: &[Arc>]) { let m = gen_peerid_map(handles); warn!("PRINTING CONNECTION STATES"); - for handle in handles.iter() { + for handle in handles{ warn!( "peer {}, connected to {:?}", handle.id(), @@ -130,13 +130,13 @@ pub async fn spin_up_swarms( .bound_addr(Some(addr)) .ttl(None) .republication_interval(None); - let node = NetworkNodeHandle::new( + let node = Box::pin(NetworkNodeHandle::new( config .build() .context(NodeConfigSnafu) .context(HandleSnafu)?, i, - ) + )) .await .context(HandleSnafu)?; let node = Arc::new(node); @@ -165,7 +165,7 @@ pub async fn spin_up_swarms( .build() .context(NodeConfigSnafu) .context(HandleSnafu)?; - let node = NetworkNodeHandle::new(regular_node_config.clone(), j + num_bootstrap) + let node = Box::pin(NetworkNodeHandle::new(regular_node_config.clone(), j + num_bootstrap)) .await .context(HandleSnafu)?; let node = Arc::new(node); @@ -190,7 +190,7 @@ pub async fn spin_up_swarms( .collect::>() ); - for handle in handles[0..num_of_nodes].iter() { + for handle in &handles[0..num_of_nodes] { let to_share = bootstrap_addrs.clone(); handle .add_known_peers( diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 7cfab40473..e883936fa3 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -1,5 +1,6 @@ -mod common; +#![allow(clippy::panic)] +mod common; use crate::common::print_connections; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; @@ -94,7 +95,7 @@ pub async fn counter_handle_network_event( handle .modify_state(|s| { if *s == from { - *s = to + *s = to; } }) .await; @@ -120,6 +121,8 @@ pub async fn counter_handle_network_event( /// `requester_handle` asks for `requestee_handle`'s state, /// and then `requester_handle` updates its state to equal `requestee_handle`. +/// # Panics +/// on error async fn run_request_response_increment<'a>( requester_handle: Arc>, requestee_handle: Arc>, @@ -158,14 +161,14 @@ async fn run_request_response_increment<'a>( let s1 = requester_handle.state().await; // sanity check - if s1 != new_state { + if s1 == new_state { + Ok(()) + } else { Err(TestError::State { id: requester_handle.id(), expected: new_state, actual: s1, }) - } else { - Ok(()) } } .await @@ -224,7 +227,7 @@ async fn run_gossip_round( } let mut failing = Vec::new(); - for handle in handles.iter() { + for handle in handles { let handle_state = handle.state().await; if handle_state != new_state { failing.push(handle.id()); @@ -243,15 +246,15 @@ async fn run_intersperse_many_rounds( handles: Vec>>, timeout: Duration, ) { - for i in 0..NUM_ROUNDS as u32 { + for i in 0..u32::try_from(NUM_ROUNDS).unwrap(){ if i % 2 == 0 { run_request_response_increment_all(&handles, timeout).await; } else { - run_gossip_rounds(&handles, 1, i, timeout).await + run_gossip_rounds(&handles, 1, i, timeout).await; } } - for h in handles.into_iter() { - assert_eq!(h.state().await, NUM_ROUNDS as u32); + for h in handles { + assert_eq!(h.state().await, u32::try_from(NUM_ROUNDS).unwrap()); } } @@ -273,35 +276,47 @@ async fn run_request_response_many_rounds( for _i in 0..NUM_ROUNDS { run_request_response_increment_all(&handles, timeout).await; } - for h in handles.into_iter() { - assert_eq!(h.state().await, NUM_ROUNDS as u32); + for h in handles { + assert_eq!(h.state().await, u32::try_from(NUM_ROUNDS).unwrap()); } } +/// runs one round of request response +/// # Panics +/// on error pub async fn run_request_response_one_round( handles: Vec>>, timeout: Duration, ) { run_request_response_increment_all(&handles, timeout).await; - for h in handles.into_iter() { + for h in handles{ assert_eq!(h.state().await, 1); } } +/// runs multiple rounds of gossip +/// # Panics +/// on error pub async fn run_gossip_many_rounds( handles: Vec>>, timeout: Duration, ) { - run_gossip_rounds(&handles, NUM_ROUNDS, 0, timeout).await + run_gossip_rounds(&handles, NUM_ROUNDS, 0, timeout).await; } +/// runs one round of gossip +/// # Panics +/// on error async fn run_gossip_one_round( handles: Vec>>, timeout: Duration, ) { - run_gossip_rounds(&handles, 1, 0, timeout).await + run_gossip_rounds(&handles, 1, 0, timeout).await; } +/// runs many rounds of dht +/// # Panics +/// on error async fn run_dht_rounds( handles: &[Arc>], timeout: Duration, @@ -313,15 +328,16 @@ async fn run_dht_rounds( debug!("begin round {}", i); let msg_handle = get_random_handle(handles, &mut rng); let mut key = vec![0; DHT_KV_PADDING]; - key.push((starting_val + i) as u8); + let inc_val = u8::try_from(starting_val + i).unwrap(); + key.push(inc_val); let mut value = vec![0; DHT_KV_PADDING]; - value.push((starting_val + i) as u8); + value.push(inc_val); // put the key msg_handle.put_record(&key, &value).await.unwrap(); // get the key from the other nodes - for handle in handles.iter() { + for handle in handles { let result: Result, NetworkNodeHandleError> = handle.get_record_timeout(&key, timeout).await; match result { @@ -371,7 +387,7 @@ async fn run_request_response_increment_all( requestee_handle.modify_state(|s| *s += 1).await; info!("RR REQUESTEE IS {:?}", requestee_handle.peer_id()); let mut futs = Vec::new(); - for handle in handles.iter() { + for handle in handles { if handle.lookup_pid(requestee_handle.peer_id()).await.is_err() { error!("ERROR LOOKING UP REQUESTEE ADDRS"); } @@ -411,7 +427,7 @@ async fn run_request_response_increment_all( async_sleep(Duration::from_secs(1)).await; } - if results.read().await.iter().any(|x| x.is_err()) { + if results.read().await.iter().any(Result::is_err) { print_connections(handles).await; let mut states = vec![]; for handle in handles { @@ -429,14 +445,14 @@ async fn run_request_response_increment_all( #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_request_response_one_round() { - test_bed( + Box::pin(test_bed( run_request_response_one_round, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, - ) - .await + )) + .await; } /// stress test of direct messsage @@ -447,14 +463,14 @@ async fn test_coverage_request_response_one_round() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_request_response_many_rounds() { - test_bed( + Box::pin(test_bed( run_request_response_many_rounds, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, - ) - .await + )) + .await; } /// stress test of broadcast + direct message @@ -465,14 +481,14 @@ async fn test_coverage_request_response_many_rounds() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_intersperse_many_rounds() { - test_bed( + Box::pin(test_bed( run_intersperse_many_rounds, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, - ) - .await + )) + .await; } /// stress teset that we can broadcast a message out and get counter increments @@ -483,13 +499,13 @@ async fn test_coverage_intersperse_many_rounds() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_gossip_many_rounds() { - test_bed( + Box::pin(test_bed( run_gossip_many_rounds, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, - ) + )) .await; } @@ -501,13 +517,13 @@ async fn test_coverage_gossip_many_rounds() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_gossip_one_round() { - test_bed( + Box::pin(test_bed( run_gossip_one_round, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, - ) + )) .await; } @@ -520,14 +536,14 @@ async fn test_coverage_gossip_one_round() { #[instrument] #[ignore] async fn test_stress_request_response_one_round() { - test_bed( + Box::pin(test_bed( run_request_response_one_round, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, - ) - .await + )) + .await; } /// stress test of direct messsage @@ -539,14 +555,14 @@ async fn test_stress_request_response_one_round() { #[instrument] #[ignore] async fn test_stress_request_response_many_rounds() { - test_bed( + Box::pin(test_bed( run_request_response_many_rounds, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, - ) - .await + )) + .await; } /// stress test of broadcast + direct message @@ -558,14 +574,14 @@ async fn test_stress_request_response_many_rounds() { #[instrument] #[ignore] async fn test_stress_intersperse_many_rounds() { - test_bed( + Box::pin(test_bed( run_intersperse_many_rounds, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, - ) - .await + )) + .await; } /// stress teset that we can broadcast a message out and get counter increments @@ -577,13 +593,13 @@ async fn test_stress_intersperse_many_rounds() { #[instrument] #[ignore] async fn test_stress_gossip_many_rounds() { - test_bed( + Box::pin(test_bed( run_gossip_many_rounds, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, - ) + )) .await; } @@ -596,13 +612,13 @@ async fn test_stress_gossip_many_rounds() { #[instrument] #[ignore] async fn test_stress_gossip_one_round() { - test_bed( + Box::pin(test_bed( run_gossip_one_round, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, - ) + )) .await; } @@ -615,13 +631,13 @@ async fn test_stress_gossip_one_round() { #[instrument] #[ignore] async fn test_stress_dht_one_round() { - test_bed( + Box::pin(test_bed( run_dht_one_round, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, - ) + )) .await; } @@ -634,13 +650,13 @@ async fn test_stress_dht_one_round() { #[instrument] #[ignore] async fn test_stress_dht_many_rounds() { - test_bed( + Box::pin(test_bed( run_dht_many_rounds, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, - ) + )) .await; } @@ -652,13 +668,13 @@ async fn test_stress_dht_many_rounds() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_dht_one_round() { - test_bed( + Box::pin(test_bed( run_dht_one_round, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, - ) + )) .await; } @@ -670,12 +686,12 @@ async fn test_coverage_dht_one_round() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_dht_many_rounds() { - test_bed( + Box::pin(test_bed( run_dht_many_rounds, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, - ) + )) .await; } diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index f9c4467b7d..e1cd2dd43c 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -30,3 +30,6 @@ thiserror = "1.0.50" tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } + +[lints] +workspace = true diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index f6d9792041..1fb8edb3ca 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -32,3 +32,6 @@ sha2 = { workspace = true } tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } + +[lints] +workspace = true diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index f81bbd9347..8521b525d4 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -1,16 +1,6 @@ //! The consensus layer for hotshot. This currently implements sequencing //! consensus in an event driven way -#![warn( - clippy::all, - clippy::pedantic, - rust_2018_idioms, - missing_docs, - clippy::missing_docs_in_private_items, - clippy::panic -)] -#![allow(clippy::module_name_repetitions)] - /// the task which implements the main parts of consensus pub mod consensus; diff --git a/task/Cargo.toml b/task/Cargo.toml index cb1703831f..03b65c1934 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -22,3 +22,6 @@ tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } + +[lints] +workspace = true diff --git a/task/src/lib.rs b/task/src/lib.rs index 1d4c8b602e..3682b2b986 100644 --- a/task/src/lib.rs +++ b/task/src/lib.rs @@ -1,13 +1,5 @@ //! Abstractions meant for usage with long running consensus tasks //! and testing harness -#![warn( - clippy::all, - clippy::pedantic, - rust_2018_idioms, - missing_docs, - clippy::missing_docs_in_private_items, - clippy::panic -)] use crate::task::PassType; use either::Either; diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 3c4b3dd621..aaf8e3501e 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -54,3 +54,6 @@ bincode = { workspace = true } # GG any better options for serialization? tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } + +[lints] +workspace = true diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 7e39871ed1..56be5fff07 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -39,8 +39,6 @@ pub mod state_types; /// node types pub mod node_types; -// TODO node changer (spin up and down) - #[derive(Clone, Debug)] pub enum GlobalTestEvent { ShutDown, diff --git a/types/Cargo.toml b/types/Cargo.toml index 97efbc066f..7277db168f 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -58,3 +58,6 @@ async-std = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } + +[lints] +workspace = true diff --git a/types/src/lib.rs b/types/src/lib.rs index 5467fa6b2b..759c3aa374 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -1,14 +1,4 @@ //! Types and Traits for the `HotShot` consensus module -#![warn( - clippy::all, - clippy::pedantic, - rust_2018_idioms, - missing_docs, - clippy::missing_docs_in_private_items, - clippy::panic -)] -#![allow(clippy::module_name_repetitions)] - use displaydoc::Display; use std::{num::NonZeroUsize, time::Duration}; use traits::{election::ElectionConfig, signature_key::SignatureKey}; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index d8cfeba6e2..8b884b9a33 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -1,6 +1,6 @@ //! The election trait, used to decide which node is the leader and determine if a vote is valid. -// Needed to avoid the non-biding `let` warning. +// Needed to avoid the non-binding `let` warning. #![allow(clippy::let_underscore_untyped)] use super::node_implementation::NodeType; diff --git a/utils/Cargo.toml b/utils/Cargo.toml index c438a33888..c266322f6b 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -8,3 +8,6 @@ version = "0.1.0" [dependencies] bincode = { workspace = true } + +[lints] +workspace = true diff --git a/utils/src/bincode.rs b/utils/src/bincode.rs index 6371179b86..540d893c53 100644 --- a/utils/src/bincode.rs +++ b/utils/src/bincode.rs @@ -1,4 +1,4 @@ -#![allow(clippy::module_name_repetitions, clippy::type_complexity)] +#![allow(clippy::type_complexity)] use bincode::{ config::{ LittleEndian, RejectTrailing, VarintEncoding, WithOtherEndian, WithOtherIntEncoding, diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 325ab683b1..5af39d88b0 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -1,13 +1,4 @@ //! Contains general utility structures and methods -#![warn( - clippy::all, - clippy::pedantic, - rust_2018_idioms, - missing_docs, - clippy::missing_docs_in_private_items, - clippy::panic -)] - /// Provides bincode options pub mod bincode; diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index c58955d583..22c032c631 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -33,3 +33,6 @@ hotshot-types = { path = "../types", default-features = false } tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } + +[lints] +workspace = true From fc7dc681c792a495e215183f90f1469d20f2bd86 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 1 Jan 2024 10:39:07 -0500 Subject: [PATCH 0620/1393] chore: webserver lints --- hotshot-stake-table/src/mt_based/internal.rs | 2 +- hotshot-stake-table/src/vec_based.rs | 9 ++-- web_server/src/config.rs | 24 +++++++++++ web_server/src/lib.rs | 44 ++++++++++++++++++-- 4 files changed, 70 insertions(+), 9 deletions(-) diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index 9325b24378..5b25eb9525 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -732,7 +732,7 @@ mod tests { .register(height as usize, &paths[i], &keys[i], amounts[i]) .unwrap(); } - for (i, (k, v, _)) in (*root).clone().into_iter().enumerate() { + for (i, (k, v, ())) in (*root).clone().into_iter().enumerate() { assert_eq!((k, v), (keys[i], amounts[i])); } } diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index b0faaf8aa0..237b79cfcb 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -221,7 +221,7 @@ where } fn try_iter(&self, version: SnapshotVersion) -> Result { - let table = self.get_version(version)?; + let table = self.get_version(&version)?; let owned = (0..table.bls_keys.len()) .map(|i| { ( @@ -294,7 +294,7 @@ where } /// Helper function to recompute the stake table commitment for head version - /// Commitment of a stake table is a triple (bls_keys_comm, schnorr_keys_comm, stake_amount_comm) + /// Commitment of a stake table is a triple `(bls_keys_comm, schnorr_keys_comm, stake_amount_comm)` /// TODO(Chengyu): The BLS verification keys doesn't implement Default. Thus we directly pad with `F::default()`. fn compute_head_comm(&mut self) -> (F, F, F) { let padding_len = self.capacity - self.head.bls_keys.len(); @@ -303,7 +303,7 @@ where .head .bls_keys .iter() - .flat_map(|key| key.to_fields()) + .flat_map(ToFields::to_fields) .collect::>(); bls_comm_preimage.resize(self.capacity * >::SIZE, F::default()); let bls_comm = VariableLengthRescueCRHF::::evaluate(bls_comm_preimage).unwrap()[0]; @@ -314,7 +314,7 @@ where .schnorr_keys .iter() .chain(ark_std::iter::repeat(&K2::default()).take(padding_len)) - .flat_map(|key| key.to_fields()) + .flat_map(ToFields::to_fields) .collect::>(); let schnorr_comm = VariableLengthRescueCRHF::::evaluate(schnorr_comm_preimage).unwrap()[0]; @@ -341,6 +341,7 @@ where } } + /// returns the snapshot version fn get_version( &self, version: &SnapshotVersion, diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 43ed97bb9a..846ecc70cd 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -1,5 +1,8 @@ +/// the default port on which to run the web server pub const DEFAULT_WEB_SERVER_PORT: u16 = 9000; +/// the default port on which to serve Data availability functionality pub const DEFAULT_WEB_SERVER_DA_PORT: u16 = 9001; +/// the default port on which to serve View Sync functionality pub const DEFAULT_WEB_SERVER_VIEW_SYNC_PORT: u16 = 9002; /// How many views to keep in memory @@ -9,86 +12,107 @@ pub const MAX_TXNS: usize = 500; /// How many transactions to return at once pub const TX_BATCH_SIZE: u64 = 1; +/// get proposal pub fn get_proposal_route(view_number: u64) -> String { format!("api/proposal/{view_number}") } +/// post proposal pub fn post_proposal_route(view_number: u64) -> String { format!("api/proposal/{view_number}") } +/// get latest qc pub fn get_latest_quorum_proposal_route() -> String { "api/proposal/latest".to_string() } +/// get latest view sync proposal pub fn get_latest_view_sync_proposal_route() -> String { "api/view_sync_proposal/latest".to_string() } +/// get latest certificate pub fn get_da_certificate_route(view_number: u64) -> String { format!("api/certificate/{view_number}") } +/// post data availability certificate pub fn post_da_certificate_route(view_number: u64) -> String { format!("api/certificate/{view_number}") } +/// get vote pub fn get_vote_route(view_number: u64, index: u64) -> String { format!("api/votes/{view_number}/{index}") } +/// post vote pub fn post_vote_route(view_number: u64) -> String { format!("api/votes/{view_number}") } +/// get vid dispersal pub fn get_vid_disperse_route(view_number: u64) -> String { format!("api/vid_disperse/{view_number}") } +/// post vid dispersal pub fn post_vid_disperse_route(view_number: u64) -> String { format!("api/vid_disperse/{view_number}") } +/// get vid vote pub fn get_vid_vote_route(view_number: u64, index: u64) -> String { format!("api/vid_votes/{view_number}/{index}") } +/// post vid vote pub fn post_vid_vote_route(view_number: u64) -> String { format!("api/vid_votes/{view_number}") } +/// get vid certificate pub fn get_vid_certificate_route(view_number: u64) -> String { format!("api/vid_certificate/{view_number}") } +/// post vid certificate pub fn post_vid_certificate_route(view_number: u64) -> String { format!("api/vid_certificate/{view_number}") } +/// get transactions pub fn get_transactions_route(index: u64) -> String { format!("api/transactions/{index}") } +/// post transactions pub fn post_transactions_route() -> String { "api/transactions".to_string() } +/// post stake table pub fn post_staketable_route() -> String { "api/staketable".to_string() } +/// post view sync proposal pub fn post_view_sync_proposal_route(view_number: u64) -> String { format!("api/view_sync_proposal/{view_number}") } +/// get view sync proposal pub fn get_view_sync_proposal_route(view_number: u64, index: u64) -> String { format!("api/view_sync_proposal/{view_number}/{index}") } +/// post view sync vote pub fn post_view_sync_vote_route(view_number: u64) -> String { format!("api/view_sync_vote/{view_number}") } +/// get view sync vote pub fn get_view_sync_vote_route(view_number: u64, index: u64) -> String { format!("api/view_sync_vote/{view_number}/{index}") } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 3babb30670..f885aa0205 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -1,3 +1,6 @@ +//! Web server for HotShot + +/// Configuration for the webserver pub mod config; use crate::config::{MAX_TXNS, MAX_VIEWS, TX_BATCH_SIZE}; @@ -132,16 +135,22 @@ impl WebServerState { /// Trait defining methods needed for the `WebServerState` pub trait WebServerDataSource { + /// get proposal fn get_proposal(&self, view_number: u64) -> Result>>, Error>; + /// get latest quanrum proposal fn get_latest_quorum_proposal(&self) -> Result>>, Error>; + /// get latest view sync proposal fn get_latest_view_sync_proposal(&self) -> Result>>, Error>; + /// get view sync proposal fn get_view_sync_proposal( &self, view_number: u64, index: u64, ) -> Result>>, Error>; + /// get vote fn get_votes(&self, view_number: u64, index: u64) -> Result>>, Error>; + /// get view sync votes fn get_view_sync_votes( &self, view_number: u64, @@ -149,29 +158,47 @@ pub trait WebServerDataSource { ) -> Result>>, Error>; #[allow(clippy::type_complexity)] + /// get transactions fn get_transactions(&self, index: u64) -> Result>)>, Error>; + /// get da certificate fn get_da_certificate(&self, index: u64) -> Result>>, Error>; + /// post vote fn post_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; + /// post view sync vote fn post_view_sync_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; + /// post proposal fn post_proposal(&mut self, view_number: u64, proposal: Vec) -> Result<(), Error>; + /// post view sync proposal fn post_view_sync_proposal(&mut self, view_number: u64, proposal: Vec) -> Result<(), Error>; + /// post data avaiability certificate fn post_da_certificate(&mut self, view_number: u64, cert: Vec) -> Result<(), Error>; + /// post transaction fn post_transaction(&mut self, txn: Vec) -> Result<(), Error>; + /// post staketable fn post_staketable(&mut self, key: Vec) -> Result<(), Error>; + /// post completed transaction fn post_completed_transaction(&mut self, block: Vec) -> Result<(), Error>; + /// post secret proposal fn post_secret_proposal(&mut self, _view_number: u64, _proposal: Vec) -> Result<(), Error>; + /// post proposal fn proposal(&self, view_number: u64) -> Option<(String, Vec)>; + /// post vid disperal fn post_vid_disperse(&mut self, view_number: u64, disperse: Vec) -> Result<(), Error>; + /// post vid vote fn post_vid_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; + /// post vid certificate fn post_vid_certificate(&mut self, view_number: u64, certificate: Vec) -> Result<(), Error>; + /// get vid dispersal fn get_vid_disperse(&self, view_number: u64) -> Result>>, Error>; + /// get vid votes fn get_vid_votes(&self, view_number: u64, index: u64) -> Result>>, Error>; + /// get vid certificates fn get_vid_certificate(&self, index: u64) -> Result>>, Error>; } @@ -670,9 +697,11 @@ impl WebServerDataSource for WebServerState { } } +/// configurability options for the web server #[derive(Args, Default)] pub struct Options { #[arg(long = "web-server-api-path", env = "WEB_SERVER_API_PATH")] + /// path to API pub api_path: Option, } @@ -839,21 +868,20 @@ where Err(ServerError { status: StatusCode::BadRequest, message: format!( - "Wrong secret value for proposal for view {:?}", - view_number + "Wrong secret value for proposal for view {view_number:?}" ), }) } } else { Err(ServerError { status: StatusCode::BadRequest, - message: format!("Proposal already submitted for view {:?}", view_number), + message: format!("Proposal already submitted for view {view_number:?}"), }) } } else { Err(ServerError { status: StatusCode::BadRequest, - message: format!("No endpoint for view number {} yet", view_number), + message: format!("No endpoint for view number {view_number:?} yet"), }) } } @@ -862,16 +890,24 @@ where Ok(api) } +/// run the web server +/// # Errors +/// TODO +/// this looks like it will panic not error +/// # Panics +/// on error pub async fn run_web_server( shutdown_listener: Option>, url: Url, ) -> io::Result<()> { let options = Options::default(); + // TODO should this be unwrap? let api = define_api(&options).unwrap(); let state = State::new(WebServerState::new().with_shutdown_signal(shutdown_listener)); let mut app = App::, Error>::with_state(state); + // TODO should this be unwrap? app.register_module("api", api).unwrap(); let app_future = app.serve(url); From 0a511da13ba9782019ccb997f00167a19d70eaf8 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 1 Jan 2024 10:48:52 -0500 Subject: [PATCH 0621/1393] chore: more lints --- hotshot-stake-table/src/vec_based.rs | 34 ++++++++++++++++++---------- libp2p-networking/tests/counter.rs | 1 + 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 237b79cfcb..0296f00497 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -16,10 +16,14 @@ use serde::{Deserialize, Serialize}; pub mod config; +/// a snapshot of the stake table #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] struct StakeTableSnapshot { + /// bls keys pub bls_keys: Vec, + /// schnorr pub schnorr_keys: Vec, + /// amount of stake pub stake_amount: Vec, } @@ -55,7 +59,9 @@ where /// Total stakes for different versions head_total_stake: U256, + /// TODO document epoch_start_total_stake: U256, + /// TODO document last_epoch_start_total_stake: U256, /// We only support committing the finalized versions. @@ -64,6 +70,8 @@ where /// - Second item is the rescue hash of the Schnorr keys /// - Third item is the rescue hash of all the stake amounts epoch_start_comm: (F, F, F), + + /// TODO document last_epoch_start_comm: (F, F, F), /// The mapping from public keys to their location in the Merkle tree. @@ -93,18 +101,15 @@ where amount: Self::Amount, aux: Self::Aux, ) -> Result<(), StakeTableError> { - match self.bls_mapping.get(&new_key) { - Some(_) => Err(StakeTableError::ExistingKey), - None => { - let pos = self.bls_mapping.len(); - self.head.bls_keys.push(new_key.clone()); - self.head.schnorr_keys.push(aux); - self.head.stake_amount.push(amount); - self.head_total_stake += amount; - self.bls_mapping.insert(new_key, pos); - Ok(()) - } - } + if self.bls_mapping.get(&new_key).is_some() { Err(StakeTableError::ExistingKey) } else { + let pos = self.bls_mapping.len(); + self.head.bls_keys.push(new_key.clone()); + self.head.schnorr_keys.push(aux); + self.head.stake_amount.push(amount); + self.head_total_stake += amount; + self.bls_mapping.insert(new_key, pos); + Ok(()) + } } fn deregister(&mut self, existing_key: &Self::Key) -> Result<(), StakeTableError> { @@ -242,6 +247,9 @@ where F: RescueParameter, { /// Initiating an empty stake table. + /// # Panics + /// If unable to evaluate a preimage + #[must_use] pub fn new(capacity: usize) -> Self { let bls_comm_preimage = vec![F::default(); capacity * >::SIZE]; let default_bls_comm = @@ -280,6 +288,8 @@ where /// Set the stake withheld by `key` to be `value`. /// Return the previous stake if succeed. + /// # Errors + /// Errors if key is not in the stake table pub fn set_value(&mut self, key: &K1, value: U256) -> Result { match self.bls_mapping.get(key) { Some(pos) => { diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index e883936fa3..c10fa1e536 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -378,6 +378,7 @@ async fn run_gossip_rounds( /// increments its state by 1, /// then has all other peers request its state /// and update their state to the recv'ed state +#[allow(clippy::similar_names)] async fn run_request_response_increment_all( handles: &[Arc>], timeout: Duration, From d68594bc7be7c964cc7025ff61985a5cef7c1139 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 1 Jan 2024 12:15:59 -0500 Subject: [PATCH 0622/1393] doc: fix lints --- hotshot-state-prover/src/circuit.rs | 10 +++++----- hotshot/src/lib.rs | 8 ++++---- hotshot/src/tasks/mod.rs | 2 +- hotshot/src/traits/storage/memory_storage.rs | 2 +- hotshot/src/types/event.rs | 2 +- hotshot/src/types/handle.rs | 13 +++++-------- task-impls/src/consensus.rs | 4 ++-- task-impls/src/events.rs | 4 ++-- task/src/lib.rs | 2 +- types/src/message.rs | 2 +- types/src/traits/consensus_api.rs | 3 ++- types/src/traits/node_implementation.rs | 1 - 12 files changed, 25 insertions(+), 28 deletions(-) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index e56818f4fc..1eebd6bcdd 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -49,11 +49,11 @@ pub struct StakeTableCommVar { #[derive(Clone, Debug)] pub struct LightClientStateVar { /// Private list holding all variables - /// vars[0]: view number - /// vars[1]: block height - /// vars[2]: block commitment root - /// vars[3]: fee ledger commitment - /// vars[4-6]: stake table commitment + /// `vars[0]`: view number + /// `vars[1]`: block height + /// `vars[2]`: block commitment root + /// `vars[3]`: fee ledger commitment + /// `vars[4-6]`: stake table commitment vars: [Variable; 7], } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index ad2795ab95..cf81d1b6b3 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -19,7 +19,7 @@ #[cfg(feature = "docs")] pub mod documentation; -/// Contains traits consumed by [`HotShot`] +/// Contains traits consumed by [`SystemContext`] pub mod traits; /// Contains types used by the crate pub mod types; @@ -530,13 +530,13 @@ impl> SystemContext { } impl> SystemContext { - /// Get the [`hotstuff`] field of [`HotShot`]. + /// Get access to [`Consensus`] #[must_use] pub fn consensus(&self) -> &Arc>> { &self.inner.consensus } - /// Spawn all tasks that operate on [`HotShot`]. + /// Spawn all tasks that operate on [`SystemContextHandle`]. /// /// For a list of which tasks are being spawned, see this module's documentation. #[allow(clippy::too_many_lines)] @@ -631,7 +631,7 @@ impl> SystemContext { } } -/// A handle that exposes the interface that hotstuff needs to interact with [`HotShot`] +/// A handle that exposes the interface that hotstuff needs to interact with a [`SystemContextInner`] #[derive(Clone, Debug)] pub struct HotShotConsensusApi> { /// Reference to the [`SystemContextInner`] diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 50f89fd79f..563afb53cd 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -1,4 +1,4 @@ -//! Provides a number of tasks that run continuously on a [`HotShot`] +//! Provides a number of tasks that run continuously use crate::{types::SystemContextHandle, HotShotConsensusApi}; use async_compatibility_layer::art::async_sleep; diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 76d78c72a0..4cc35fea06 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -22,7 +22,7 @@ struct MemoryStorageInternal { failed: BTreeSet, } -/// In memory, ephemeral, storage for a [`HotShot`](crate::HotShot) instance +/// In memory, ephemeral, storage for a [`SystemContext`](crate::SystemContext) instance #[derive(Clone)] pub struct MemoryStorage { /// The inner state of this [`MemoryStorage`] diff --git a/hotshot/src/types/event.rs b/hotshot/src/types/event.rs index a64b1be068..772bd28bf5 100644 --- a/hotshot/src/types/event.rs +++ b/hotshot/src/types/event.rs @@ -1,3 +1,3 @@ -//! Events that a [`HotShot`](crate::HotShot) instance can emit +//! Events that a [`SystemContext`](crate::SystemContext) instance can emit pub use hotshot_types::event::{Event, EventType}; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index ad4b44f62d..0d52a18842 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -1,4 +1,4 @@ -//! Provides an event-streaming handle for a [`HotShot`] running in the background +//! Provides an event-streaming handle for a [`SystemContext`] running in the background use crate::{traits::NodeImplementation, types::Event, SystemContext}; use async_compatibility_layer::channel::UnboundedStream; @@ -33,17 +33,14 @@ use tracing::error; /// allowing the ability to receive [`Event`]s from it, send transactions to it, and interact with /// the underlying storage. pub struct SystemContextHandle> { - /// The [sender](BroadcastSender) for the output stream from the background process - /// - /// This is kept around as an implementation detail, as the [`BroadcastSender::handle_async`] - /// method is needed to generate new receivers to expose to the user + /// The [sender](ChannelStream) for the output stream from the background process pub(crate) output_event_stream: ChannelStream>, /// access to the internal ev ent stream, in case we need to, say, shut something down pub(crate) internal_event_stream: ChannelStream>, /// registry for controlling tasks pub(crate) registry: GlobalRegistry, - /// Internal reference to the underlying [`HotShot`] + /// Internal reference to the underlying [`SystemContext`] pub hotshot: SystemContext, /// Our copy of the `Storage` view for a hotshot @@ -96,7 +93,7 @@ impl + 'static> SystemContextHandl self.internal_event_stream.subscribe(filter).await } - /// Gets the current committed state of the [`HotShot`] instance + /// Gets the current committed state of the [`SystemContext`] instance /// /// # Errors /// @@ -113,7 +110,7 @@ impl + 'static> SystemContextHandl self.hotshot.get_decided_leaf().await } - /// Submits a transaction to the backing [`HotShot`] instance. + /// Submits a transaction to the backing [`SystemContext`] instance. /// /// The current node broadcasts the transaction to all nodes on the network. /// diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 05e7b9bf2f..8ecf4c747f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -133,8 +133,8 @@ pub struct ConsensusTaskState< /// All the VID shares we've received for current and future views. /// In the future we will need a different struct similar to VidDisperse except /// it stores only one share. - /// TODO https://github.com/EspressoSystems/HotShot/issues/2146 - /// TODO https://github.com/EspressoSystems/HotShot/issues/1732 + /// TODO + /// TODO pub vid_shares: HashMap>>, /// The most recent proposal we have, will correspond to the current view if Some() diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 4c2d23010b..9bbfc7e832 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -105,10 +105,10 @@ pub enum HotShotEvent { LeafDecided(Vec>), /// Send VID shares to VID storage nodes; emitted by the DA leader /// - /// Like [`DAProposalSend`]. + /// Like [`HotShotEvent::DAProposalSend`]. VidDisperseSend(Proposal>, TYPES::SignatureKey), /// Vid disperse data has been received from the network; handled by the DA task /// - /// Like [`DAProposalRecv`]. + /// Like [`HotShotEvent::DAProposalRecv`]. VidDisperseRecv(Proposal>, TYPES::SignatureKey), } diff --git a/task/src/lib.rs b/task/src/lib.rs index 1d4c8b602e..375a9e5654 100644 --- a/task/src/lib.rs +++ b/task/src/lib.rs @@ -143,7 +143,7 @@ impl Stream for MergeN { // for usage with `MessageStream` // TODO move this to async-compatibility-layer #[pin_project] -/// Stream returned by the [`merge`](super::StreamExt::merge) method. +/// Stream type that merges two underlying streams pub struct Merge { /// first stream to merge #[pin] diff --git a/types/src/message.rs b/types/src/message.rs index a4bb68907f..ce76b7e141 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -95,7 +95,7 @@ pub enum MessageKind { impl MessageKind { // Can't implement `From` directly due to potential conflict with // `From`. - /// Construct a [`MessageKind`] from [`I::ConsensusMessage`]. + /// Construct a [`MessageKind`] from [`SequencingMessage`]. pub fn from_consensus_message(m: SequencingMessage) -> Self { Self::Consensus(m) } diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index 2fa0417c52..9cf6389b01 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -13,7 +13,8 @@ use async_trait::async_trait; use std::{num::NonZeroUsize, time::Duration}; -/// The API that [`HotStuff`] needs to talk to the system +/// The API that tasks use to talk to the system +/// TODO do we plan to drop this? #[async_trait] pub trait ConsensusApi>: Send + Sync { /// Total number of nodes in the network. Also known as `n`. diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index a8ceae04c1..7948d75bf1 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -29,7 +29,6 @@ use std::{ hash::Hash, sync::{atomic::AtomicBool, Arc}, }; -/// Alias for the [`ProcessedConsensusMessage`] type of a [`NodeImplementation`]. /// struct containing messages for a view to send to a replica or DA committee member. #[derive(Clone)] From a812615d8fb04a46dccb6d40713d69764adaa1f8 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 1 Jan 2024 12:33:57 -0500 Subject: [PATCH 0623/1393] fix: doc tests by ignoring most of them --- .../src/traits/networking/web_server_network.rs | 15 ++++++++------- orchestrator/src/client.rs | 5 ++++- orchestrator/src/config.rs | 12 ++++++++---- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 92745c8500..fba1f1f22a 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -114,7 +114,7 @@ impl WebServerNetwork { /// /// # Examples /// -/// ``` +/// ```ignore /// let (tx, _rx): (TaskChannel, _) = tokio::sync::mpsc::unbounded_channel(); /// ``` /// @@ -130,9 +130,9 @@ type TaskChannel = UnboundedSender>; /// /// # Examples /// -/// ``` -/// use your_crate::TaskMap; -/// let mut map: TaskMap = TaskMap::default(); +/// ```ignore +/// use crate::TaskMap; +/// let mut map: TaskMap = TaskMap::default(); /// ``` /// /// # Note @@ -160,9 +160,10 @@ impl TaskMap { /// /// # Examples /// - /// ``` - /// let mut map: TaskMap = TaskMap::default(); - /// map.prune_tasks(10, ConsensusIntentEvent::CancelPollForProposal).await; + /// ```ignore + /// use crate::TaskMap; + /// let mut map: TaskMap = TaskMap::default(); + /// map.prune_tasks(10, ConsensusIntentEvent::CancelPollForProposal(5)).await; /// ``` async fn prune_tasks( &mut self, diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 2a50915ea6..bebd5ddb47 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -66,7 +66,10 @@ impl ValidatorArgs { /// /// # Examples /// - /// ```no_run + /// ```ignore + /// // NOTE this is a toy example, + /// // the user will need to construct a multivalidatorargs since `new` does not exist + /// use hotshot_orchestrator::client::MultiValidatorArgs; /// let multi_args = MultiValidatorArgs::new(); /// let node_index = 1; /// let instance = Self::from_multi_args(multi_args, node_index); diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 67ae88e5c3..f062e83db5 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -125,11 +125,13 @@ impl NetworkConfig { /// /// # Examples /// - /// ```no_run + /// ```ignore + /// use hotshot_orchestrator::config::NetworkConfig; + /// use hotshot_orchestrator::client::OrchestratorClient; /// let client = OrchestratorClient::new(); /// let identity = "my_identity".to_string(); /// let file = Some("/path/to/my/config".to_string()); - /// let (config, source) = NetworkConfig::from_file_or_orchestrator(client, file).await; + /// let (config, source) = NetworkConfig::from_file_or_orchestrator(client, file); /// ``` pub async fn from_file_or_orchestrator( client: &OrchestratorClient, @@ -182,7 +184,8 @@ impl NetworkConfig { /// /// # Examples /// - /// ```no_run + /// ```ignore + /// use hotshot_orchestrator::config::NetworkConfig; /// let file = "/path/to/my/config".to_string(); /// let config = NetworkConfig::from_file(file).unwrap(); /// ``` @@ -220,7 +223,8 @@ impl NetworkConfig { /// /// # Examples /// - /// ```no_run + /// ```ignore + /// use hotshot_orchestrator::config::NetworkConfig; /// let file = "/path/to/my/config".to_string(); /// let config = NetworkConfig::from_file(file); /// config.to_file(file).unwrap(); From 1a1950377a609e389ac322b7756233a145c79730 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 11:54:50 -0500 Subject: [PATCH 0624/1393] feat: run doc on PRs --- orchestrator/src/config.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index f062e83db5..29be3ec76d 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -186,8 +186,14 @@ impl NetworkConfig { /// /// ```ignore /// use hotshot_orchestrator::config::NetworkConfig; + /// use hotshot_signature_key::bn254::BLSPubKey; + /// use hotshot::traits::election::static_committee::StaticElectionConfig; /// let file = "/path/to/my/config".to_string(); - /// let config = NetworkConfig::from_file(file).unwrap(); + /// // NOTE: broken due to staticelectionconfig not being importable + /// // cannot import staticelectionconfig from hotshot without creating circular dependency + /// // making this work probably involves the `types` crate implementing a dummy + /// // electionconfigtype just ot make this example work + /// let config = NetworkConfig::::from_file(file).unwrap(); /// ``` pub fn from_file(file: String) -> Result { // read from file From 8816543a81e663e7a24d22285b59ba5c25566310 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Jan 2024 12:01:31 -0500 Subject: [PATCH 0625/1393] Bump serde_json from 1.0.108 to 1.0.109 (#2289) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.108 to 1.0.109. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.108...v1.0.109) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- hotshot/Cargo.toml | 2 +- libp2p-networking/Cargo.toml | 2 +- types/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 2cb0b08fc0..54a99eb52b 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -128,6 +128,6 @@ async-std = { workspace = true } [dev-dependencies] blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } -serde_json = "1.0.108" +serde_json = "1.0.109" toml = { workspace = true } hotshot-testing = { path = "../testing" } \ No newline at end of file diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index ed86459d3d..3b5c2be48b 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -42,7 +42,7 @@ libp2p-noise = { version = "0.44.0", default-features = false } parking_lot = "0.12.1" rand = { workspace = true } serde = { workspace = true } -serde_json = "1.0.108" +serde_json = "1.0.109" snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", diff --git a/types/Cargo.toml b/types/Cargo.toml index 97efbc066f..9c25b8fcf9 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -51,7 +51,7 @@ tracing = { workspace = true } typenum = { workspace = true } [dev-dependencies] -serde_json = "1.0.108" +serde_json = "1.0.109" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } From 8abb514e4ac5407fdf3ccf721010db2abfe190ac Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 12:02:34 -0500 Subject: [PATCH 0626/1393] fix: link issue, remove TODO --- types/src/traits/consensus_api.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index 9cf6389b01..aed315dfc7 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -14,7 +14,7 @@ use async_trait::async_trait; use std::{num::NonZeroUsize, time::Duration}; /// The API that tasks use to talk to the system -/// TODO do we plan to drop this? +/// TODO we plan to drop this #[async_trait] pub trait ConsensusApi>: Send + Sync { /// Total number of nodes in the network. Also known as `n`. From 71eb7aab8771225010779f5436b2cda9ecd34111 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 12:20:18 -0500 Subject: [PATCH 0627/1393] feat: hide imports --- .../src/traits/networking/web_server_network.rs | 4 ++-- orchestrator/src/client.rs | 2 +- orchestrator/src/config.rs | 14 +++++++------- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index fba1f1f22a..7d7767bdc9 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -131,7 +131,7 @@ type TaskChannel = UnboundedSender>; /// # Examples /// /// ```ignore -/// use crate::TaskMap; +/// # use crate::TaskMap; /// let mut map: TaskMap = TaskMap::default(); /// ``` /// @@ -161,7 +161,7 @@ impl TaskMap { /// # Examples /// /// ```ignore - /// use crate::TaskMap; + /// # use crate::TaskMap; /// let mut map: TaskMap = TaskMap::default(); /// map.prune_tasks(10, ConsensusIntentEvent::CancelPollForProposal(5)).await; /// ``` diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index bebd5ddb47..5498acd711 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -69,7 +69,7 @@ impl ValidatorArgs { /// ```ignore /// // NOTE this is a toy example, /// // the user will need to construct a multivalidatorargs since `new` does not exist - /// use hotshot_orchestrator::client::MultiValidatorArgs; + /// # use hotshot_orchestrator::client::MultiValidatorArgs; /// let multi_args = MultiValidatorArgs::new(); /// let node_index = 1; /// let instance = Self::from_multi_args(multi_args, node_index); diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 29be3ec76d..95ac9bc6d8 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -126,8 +126,8 @@ impl NetworkConfig { /// # Examples /// /// ```ignore - /// use hotshot_orchestrator::config::NetworkConfig; - /// use hotshot_orchestrator::client::OrchestratorClient; + /// # use hotshot_orchestrator::config::NetworkConfig; + /// # use hotshot_orchestrator::client::OrchestratorClient; /// let client = OrchestratorClient::new(); /// let identity = "my_identity".to_string(); /// let file = Some("/path/to/my/config".to_string()); @@ -184,10 +184,10 @@ impl NetworkConfig { /// /// # Examples /// - /// ```ignore - /// use hotshot_orchestrator::config::NetworkConfig; - /// use hotshot_signature_key::bn254::BLSPubKey; - /// use hotshot::traits::election::static_committee::StaticElectionConfig; + /// ```no_run + /// # use hotshot_orchestrator::config::NetworkConfig; + /// # use hotshot_signature_key::bn254::BLSPubKey; + /// // # use hotshot::traits::election::static_committee::StaticElectionConfig; /// let file = "/path/to/my/config".to_string(); /// // NOTE: broken due to staticelectionconfig not being importable /// // cannot import staticelectionconfig from hotshot without creating circular dependency @@ -230,7 +230,7 @@ impl NetworkConfig { /// # Examples /// /// ```ignore - /// use hotshot_orchestrator::config::NetworkConfig; + /// # use hotshot_orchestrator::config::NetworkConfig; /// let file = "/path/to/my/config".to_string(); /// let config = NetworkConfig::from_file(file); /// config.to_file(file).unwrap(); From f761b80083054df07b8860b4c1ab86e10f1228cc Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 12:22:19 -0500 Subject: [PATCH 0628/1393] fix: s/ignore/no_run --- orchestrator/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 95ac9bc6d8..db28af4ed7 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -184,7 +184,7 @@ impl NetworkConfig { /// /// # Examples /// - /// ```no_run + /// ```ignore /// # use hotshot_orchestrator::config::NetworkConfig; /// # use hotshot_signature_key::bn254::BLSPubKey; /// // # use hotshot::traits::election::static_committee::StaticElectionConfig; From 1b2eaebe7cee7572a93033005cb46bc94151a056 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 20:13:31 -0500 Subject: [PATCH 0629/1393] chore: more lints --- hotshot-qc/src/bit_vector.rs | 4 +- hotshot-qc/src/bit_vector_old.rs | 4 +- hotshot-qc/src/lib.rs | 2 +- hotshot-qc/src/snarked.rs | 2 +- hotshot-qc/src/snarked/circuit.rs | 1 + hotshot-signature-key/src/lib.rs | 2 +- hotshot-stake-table/src/lib.rs | 2 +- hotshot-stake-table/src/mt_based.rs | 44 +++---- hotshot-stake-table/src/mt_based/internal.rs | 32 ++++- hotshot-stake-table/src/vec_based/config.rs | 16 ++- hotshot/examples/infra/mod.rs | 2 +- libp2p-networking/tests/counter.rs | 1 + orchestrator/Cargo.toml | 1 + orchestrator/src/client.rs | 18 ++- orchestrator/src/config.rs | 122 ++++++++++++++----- orchestrator/src/lib.rs | 37 +++++- web_server/src/lib.rs | 122 ++++++++++++------- 17 files changed, 294 insertions(+), 118 deletions(-) diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index 1ac940c618..cd860d18f1 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -1,5 +1,5 @@ -//! Implementation for BitVectorQC that uses BLS signature + Bit vector. -//! See more details in HotShot paper. +//! Implementation for `BitVectorQC` that uses BLS signature + Bit vector. +//! See more details in `HotShot` paper. use ark_std::{ fmt::Debug, diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs index 4ee2129140..fcb12a8398 100644 --- a/hotshot-qc/src/bit_vector_old.rs +++ b/hotshot-qc/src/bit_vector_old.rs @@ -1,5 +1,5 @@ -//! Implementation for BitVectorQC that uses BLS signature + Bit vector. -//! See more details in HotShot paper. +//! Implementation for `BitVectorQC` that uses BLS signature + Bit vector. +//! See more details in `HotShot` paper. use ark_std::{ fmt::Debug, diff --git a/hotshot-qc/src/lib.rs b/hotshot-qc/src/lib.rs index 5b88c4f0fb..c39c2835b1 100644 --- a/hotshot-qc/src/lib.rs +++ b/hotshot-qc/src/lib.rs @@ -1,4 +1,4 @@ -//! This crates offer implementations of quorum certificates used in HotShot. +//! This crates offer implementations of quorum certificates used in `HotShot`. pub mod bit_vector; pub mod bit_vector_old; pub mod snarked; diff --git a/hotshot-qc/src/snarked.rs b/hotshot-qc/src/snarked.rs index 31fa537249..f5b54786b5 100644 --- a/hotshot-qc/src/snarked.rs +++ b/hotshot-qc/src/snarked.rs @@ -1,4 +1,4 @@ -//! This is a SNARKed QC implemenation, see more in HotShot paper. +//! This is a `SNARKed` QC implemenation, see more in the `HotShot` paper. mod circuit; diff --git a/hotshot-qc/src/snarked/circuit.rs b/hotshot-qc/src/snarked/circuit.rs index 96ac750c1b..aadddd65d9 100644 --- a/hotshot-qc/src/snarked/circuit.rs +++ b/hotshot-qc/src/snarked/circuit.rs @@ -36,6 +36,7 @@ pub fn compute_stake_table_hash: Sized + Clone { + /// The type of key type KeyType: Default; /// Returns a list of variables associated with this key variable. diff --git a/hotshot-signature-key/src/lib.rs b/hotshot-signature-key/src/lib.rs index 6f59cd4b35..eeda4331a0 100644 --- a/hotshot-signature-key/src/lib.rs +++ b/hotshot-signature-key/src/lib.rs @@ -1,2 +1,2 @@ -//! This crates offer implementations of quorum certificates used in HotShot. +//! This crates offer implementations of quorum certificates used in `HotShot`. pub mod bn254; diff --git a/hotshot-stake-table/src/lib.rs b/hotshot-stake-table/src/lib.rs index f3b7b7be39..fd4f19e9d9 100644 --- a/hotshot-stake-table/src/lib.rs +++ b/hotshot-stake-table/src/lib.rs @@ -1,4 +1,4 @@ -//! This crate contains some stake table implementations for HotShot system. +//! This crate contains some stake table implementations for `HotShot` system. pub mod config; pub mod mt_based; pub mod utils; diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index 93760e349e..c4bde25b2c 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -42,22 +42,20 @@ impl StakeTableScheme for StakeTable { &mut self, new_key: Self::Key, amount: Self::Amount, - _: Self::Aux, + (): Self::Aux, ) -> Result<(), StakeTableError> { - match self.mapping.get(&new_key) { - Some(_) => Err(StakeTableError::ExistingKey), - None => { - let pos = self.mapping.len(); - self.head = self.head.register( - self.height, - &to_merkle_path(pos, self.height), - &new_key, - amount, + if self.mapping.get(&new_key).is_some() { Err(StakeTableError::ExistingKey) } else { + let pos = self.mapping.len(); + self.head = self.head.register( + self.height, + &to_merkle_path(pos, self.height), + &new_key, + amount, )?; - self.mapping.insert(new_key, pos); - Ok(()) - } + self.mapping.insert(new_key, pos); + Ok(()) } + } fn deregister(&mut self, _existing_key: &Self::Key) -> Result<(), StakeTableError> { @@ -66,7 +64,7 @@ impl StakeTableScheme for StakeTable { } fn commitment(&self, version: SnapshotVersion) -> Result { - let root = Self::get_root(self, version)?; + let root = Self::get_root(self, &version)?; Ok(MerkleCommitment::new( root.commitment(), self.height, @@ -75,12 +73,12 @@ impl StakeTableScheme for StakeTable { } fn total_stake(&self, version: SnapshotVersion) -> Result { - let root = Self::get_root(self, version)?; + let root = Self::get_root(self, &version)?; Ok(root.total_stakes()) } fn len(&self, version: SnapshotVersion) -> Result { - let root = Self::get_root(self, version)?; + let root = Self::get_root(self, &version)?; Ok(root.num_keys()) } @@ -89,7 +87,7 @@ impl StakeTableScheme for StakeTable { } fn lookup(&self, version: SnapshotVersion, key: &K) -> Result { - let root = Self::get_root(self, version)?; + let root = Self::get_root(self, &version)?; match self.mapping.get(key) { Some(index) => { let branches = to_merkle_path(*index, self.height); @@ -104,7 +102,7 @@ impl StakeTableScheme for StakeTable { version: SnapshotVersion, key: &Self::Key, ) -> Result<(Self::Amount, Self::LookupProof), StakeTableError> { - let root = Self::get_root(self, version)?; + let root = Self::get_root(self, &version)?; let proof = match self.mapping.get(key) { Some(index) => { @@ -149,7 +147,7 @@ impl StakeTableScheme for StakeTable { } /// Almost uniformly samples a key weighted by its stake from the - /// last_epoch_start stake table + /// `last_epoch_start` stake table fn sample( &self, rng: &mut (impl SeedableRng + CryptoRngCore), @@ -163,7 +161,7 @@ impl StakeTableScheme for StakeTable { } fn try_iter(&self, version: SnapshotVersion) -> Result { - let root = Self::get_root(self, version)?; + let root = Self::get_root(self, &version)?; Ok(internal::IntoIter::new(root)) } } @@ -181,10 +179,10 @@ impl StakeTable { } } - // returns the root of stake table at `version` + /// returns the root of stake table at `version` fn get_root( &self, - version: SnapshotVersion, + version: &SnapshotVersion, ) -> Result>, StakeTableError> { match version { SnapshotVersion::Head => Ok(Arc::clone(&self.head)), @@ -202,6 +200,8 @@ impl StakeTable { /// Set the stake withheld by `key` to be `value`. /// Return the previous stake if succeed. + /// # Errors + /// Errors if the key is not found in the staketable pub fn set_value(&mut self, key: &K, value: U256) -> Result { match self.mapping.get(key) { Some(pos) => { diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index 5b25eb9525..8ebef5ac3e 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -32,19 +32,29 @@ impl Key for T where #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(bound = "K: Key")] pub(crate) enum PersistentMerkleNode { + /// Empty Empty, + /// A branch Branch { + /// field type #[serde(with = "canonical")] comm: FieldType, + /// children children: [Arc>; TREE_BRANCH], + /// number of keys num_keys: usize, + /// total stake total_stakes: U256, }, + /// A leaf Leaf { + /// field type #[serde(with = "canonical")] comm: FieldType, + /// the key #[serde(with = "canonical")] key: K, + /// the value value: U256, }, } @@ -52,13 +62,19 @@ pub(crate) enum PersistentMerkleNode { /// A compressed Merkle node for Merkle path #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum MerklePathEntry { + /// A branch Branch { + /// Position in tree pos: usize, + /// Siblings #[serde(with = "canonical")] siblings: [FieldType; TREE_BRANCH - 1], }, + /// A leaf Leaf { + /// the key key: K, + /// the value value: U256, }, } @@ -75,14 +91,17 @@ pub struct MerkleProof { } impl MerkleProof { + /// Returns the height of the tree pub fn tree_height(&self) -> usize { self.path.len() - 1 } + /// Returns the index of the given key pub fn index(&self) -> &usize { &self.index } + /// TODO document pub fn get_key(&self) -> Option<&K> { match self.path.first() { Some(MerklePathEntry::Leaf { key, value: _ }) => Some(key), @@ -90,6 +109,7 @@ impl MerkleProof { } } + /// TODO document pub fn get_value(&self) -> Option<&U256> { match self.path.first() { Some(MerklePathEntry::Leaf { key: _, value }) => Some(value), @@ -97,6 +117,7 @@ impl MerkleProof { } } + /// TODO document pub fn get_key_value(&self) -> Option<(&K, &U256)> { match self.path.first() { Some(MerklePathEntry::Leaf { key, value }) => Some((key, value)), @@ -104,6 +125,7 @@ impl MerkleProof { } } + /// TODO document pub fn compute_root(&self) -> Result { match self.path.first() { Some(MerklePathEntry::Leaf { key, value }) => { @@ -125,13 +147,14 @@ impl MerkleProof { .map_err(|_| StakeTableError::RescueError)?[0]; Ok(comm) } - _ => Err(StakeTableError::MalformedProof), + MerklePathEntry::Leaf{ .. } => Err(StakeTableError::MalformedProof), }) } _ => Err(StakeTableError::MalformedProof), } } + /// TODO document pub fn verify(&self, comm: &MerkleCommitment) -> Result<(), StakeTableError> { if self.tree_height() != comm.tree_height() || !self.compute_root()?.eq(comm.digest()) { Err(StakeTableError::VerificationError) @@ -154,18 +177,22 @@ pub struct MerkleCommitment { } impl MerkleCommitment { + /// Creates a new merkle commitment pub fn new(comm: FieldType, height: usize, size: usize) -> Self { Self { comm, height, size } } + /// Returns the digest of the tree pub fn digest(&self) -> &FieldType { &self.comm } + /// Returns the height of the tree pub fn tree_height(&self) -> usize { self.height } + /// Returns the number of leaves pub fn size(&self) -> usize { self.size } @@ -181,7 +208,7 @@ impl PersistentMerkleNode { children: _, num_keys: _, total_stakes: _, - } => *comm, + } | PersistentMerkleNode::Leaf { comm, key: _, @@ -511,6 +538,7 @@ impl PersistentMerkleNode { /// An owning iterator over the (key, value) entries of a `PersistentMerkleNode` /// Traverse using post-order: children from left to right, finally visit the current. pub struct IntoIter { + /// The unvisited key values unvisited: Vec>>, } diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index 14cf976bd9..ed1a2370f4 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -27,11 +27,15 @@ impl ToFields for QCVerKey { const SIZE: usize = 2; fn to_fields(&self) -> Vec { - let bytes = to_bytes!(&self.to_affine()).unwrap(); - vec![ - FieldType::from_le_bytes_mod_order(&bytes[..31]), - FieldType::from_le_bytes_mod_order(&bytes[31..62]), - FieldType::from_le_bytes_mod_order(&bytes[62..]), - ] + match to_bytes!(&self.to_affine()) { + Ok(bytes) => { + vec![ + FieldType::from_le_bytes_mod_order(&bytes[..31]), + FieldType::from_le_bytes_mod_order(&bytes[31..62]), + FieldType::from_le_bytes_mod_order(&bytes[62..]), + ] + }, + Err(_) => unreachable!(), + } } } diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 4d58de6f57..0e7c372c15 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -832,7 +832,7 @@ pub async fn main_entry_point< }; let orchestrator_client: OrchestratorClient = - OrchestratorClient::new(args.clone(), public_ip.to_string()).await; + OrchestratorClient::new(args.clone(), public_ip.to_string()); // conditionally save/load config from file or orchestrator let (mut run_config, source) = diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index c10fa1e536..e58a66b609 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -123,6 +123,7 @@ pub async fn counter_handle_network_event( /// and then `requester_handle` updates its state to equal `requestee_handle`. /// # Panics /// on error +#[allow(clippy::similar_names)] async fn run_request_response_increment<'a>( requester_handle: Arc>, requestee_handle: Arc>, diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index e1cd2dd43c..877ae97528 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -25,6 +25,7 @@ serde_json = "1.0.96" snafu = { workspace = true } toml = { workspace = true } thiserror = "1.0.50" +serde-inline-default = "0.1.1" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 2a50915ea6..653e2fdb03 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -11,7 +11,9 @@ use tide_disco::Url; /// Holds the client connection to the orchestrator pub struct OrchestratorClient { + /// the client client: surf_disco::Client, + /// the identity pub identity: String, } @@ -35,6 +37,7 @@ pub struct ValidatorArgs { pub network_config_file: Option, } +/// arguments to run multiple validators #[derive(Parser, Debug, Clone)] pub struct MultiValidatorArgs { /// Number of validators to run @@ -71,28 +74,33 @@ impl ValidatorArgs { /// let node_index = 1; /// let instance = Self::from_multi_args(multi_args, node_index); /// ``` + #[must_use] pub fn from_multi_args(multi_args: MultiValidatorArgs, node_index: u16) -> Self { Self { url: multi_args.url, public_ip: multi_args.public_ip, network_config_file: multi_args .network_config_file - .map(|s| format!("{}-{}", s, node_index)), + .map(|s| format!("{s}-{node_index}")), } } } impl OrchestratorClient { /// Creates the client that will connect to the orchestrator - pub async fn new(args: ValidatorArgs, identity: String) -> Self { + #[must_use] + pub fn new(args: ValidatorArgs, identity: String) -> Self { let client = surf_disco::Client::::new(args.url); // TODO ED: Add healthcheck wait here OrchestratorClient { client, identity } } /// Sends an identify message to the orchestrator and attempts to get its config - /// Returns both the node_index and the run configuration from the orchestrator + /// Returns both the `node_index` and the run configuration from the orchestrator /// Will block until both are returned + /// # Panics + /// if unable to convert the node index from usize into u64 + /// (only applicable on 32 bit systems) #[allow(clippy::type_complexity)] pub async fn get_config( &self, @@ -126,13 +134,15 @@ impl OrchestratorClient { let mut config = self.wait_for_fn_from_orchestrator(f).await; - config.node_index = node_index as u64; + config.node_index = u64::try_from(node_index).unwrap(); config } /// Tells the orchestrator this validator is ready to start /// Blocks until the orchestrator indicates all nodes are ready to start + /// # Panics + /// Panics if unable to post. pub async fn wait_for_all_nodes_ready(&self, node_index: u64) -> bool { let send_ready_f = |client: Client| { async move { diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 67ae88e5c3..e1063283b0 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -2,6 +2,7 @@ use hotshot_types::{ traits::{election::ElectionConfig, signature_key::SignatureKey}, ExecutionType, HotShotConfig, ValidatorConfig, }; +use serde_inline_default::serde_inline_default; use std::{ env, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -17,87 +18,148 @@ use tracing::error; use crate::client::OrchestratorClient; +/// Configuration describing a libp2p node #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { + /// bootstrap nodes (socket, serialized public key) pub bootstrap_nodes: Vec<(SocketAddr, Vec)>, + /// number of bootstrap nodes pub num_bootstrap_nodes: usize, + /// public ip of this node pub public_ip: IpAddr, + /// port to run libp2p on pub base_port: u16, + /// global index of node (for testing purposes a uid) pub node_index: u64, + /// whether or not to index ports pub index_ports: bool, + /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes pub bootstrap_mesh_n_high: usize, + /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes pub bootstrap_mesh_n_low: usize, + /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes pub bootstrap_mesh_outbound_min: usize, + /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes pub bootstrap_mesh_n: usize, + /// corresponds to libp2p DHT parameter of the same name pub mesh_n_high: usize, + /// corresponds to libp2p DHT parameter of the same name pub mesh_n_low: usize, + /// corresponds to libp2p DHT parameter of the same name pub mesh_outbound_min: usize, + /// corresponds to libp2p DHT parameter of the same name pub mesh_n: usize, + /// timeout before starting the next view pub next_view_timeout: u64, + /// minimum time to wait for a view pub propose_min_round_time: Duration, + /// maximum time to wait for a view pub propose_max_round_time: Duration, + /// time node has been running pub online_time: u64, + /// number of transactions per view pub num_txn_per_round: usize, } +/// configuration serialized into a file #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfigFile { + /// whether or not to index ports pub index_ports: bool, + /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes pub bootstrap_mesh_n_high: usize, + /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes pub bootstrap_mesh_n_low: usize, + /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes pub bootstrap_mesh_outbound_min: usize, + /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes pub bootstrap_mesh_n: usize, + /// corresponds to libp2p DHT parameter of the same name pub mesh_n_high: usize, + /// corresponds to libp2p DHT parameter of the same name pub mesh_n_low: usize, + /// corresponds to libp2p DHT parameter of the same name pub mesh_outbound_min: usize, + /// corresponds to libp2p DHT parameter of the same name pub mesh_n: usize, + /// time node has been running pub online_time: u64, + /// port to run libp2p on pub base_port: u16, } +/// configuration for a web server #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct WebServerConfig { + /// the url to run on pub url: Url, + /// the time to wait between polls pub wait_between_polls: Duration, } +/// a network configuration error #[derive(Error, Debug)] pub enum NetworkConfigError { + /// Failed to read NetworkConfig from file #[error("Failed to read NetworkConfig from file")] ReadFromFileError(std::io::Error), + /// Failed to deserialize loaded NetworkConfig #[error("Failed to deserialize loaded NetworkConfig")] DeserializeError(serde_json::Error), + /// Failed to write NetworkConfig to file #[error("Failed to write NetworkConfig to file")] WriteToFileError(std::io::Error), + /// Failed to serialize NetworkConfig #[error("Failed to serialize NetworkConfig")] SerializeError(serde_json::Error), + /// Failed to recursively create path to NetworkConfig #[error("Failed to recursively create path to NetworkConfig")] FailedToCreatePath(std::io::Error), } +/// a network configuration #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] #[serde(bound(deserialize = ""))] pub struct NetworkConfig { + /// number of views to run pub rounds: usize, + /// number of transactions per view pub transactions_per_round: usize, + /// number of bootstrap nodes pub num_bootrap: usize, + /// timeout before starting the next view pub next_view_timeout: u64, + /// minimum time to wait for a view pub propose_min_round_time: Duration, + /// maximum time to wait for a view pub propose_max_round_time: Duration, + /// global index of node (for testing purposes a uid) pub node_index: u64, + /// unique seed (for randomness? TODO) pub seed: [u8; 32], + /// size of transactions pub transaction_size: usize, + /// delay before beginning consensus pub start_delay_seconds: u64, + /// name of the key type (for debugging) pub key_type_name: String, + /// election config type (for debugging) pub election_config_type_name: String, + /// the libp2p config pub libp2p_config: Option, + /// the hotshot config pub config: HotShotConfig, + /// the webserver config pub web_server_config: Option, + /// the data availability web server config pub da_web_server_config: Option, } +/// the source of the network config pub enum NetworkConfigSource { + /// we source the network configuration from the orchestrator Orchestrator, + /// we source the network configuration from a config file on disk File, } @@ -252,11 +314,11 @@ impl NetworkConfig { impl Default for NetworkConfig { fn default() -> Self { Self { - rounds: default_rounds(), - transactions_per_round: default_transactions_per_round(), + rounds: ORCHESTRATOR_DEFAULT_NUM_ROUNDS, + transactions_per_round: ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND, node_index: 0, seed: [0u8; 32], - transaction_size: default_transaction_size(), + transaction_size: ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE, libp2p_config: None, config: HotShotConfigFile::default().into(), start_delay_seconds: 60, @@ -272,35 +334,43 @@ impl Default for NetworkConfig { } } +/// a network config stored in a file +#[serde_inline_default] #[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] #[serde(bound(deserialize = ""))] pub struct NetworkConfigFile { - #[serde(default = "default_rounds")] + /// number of views to run + #[serde_inline_default(ORCHESTRATOR_DEFAULT_NUM_ROUNDS)] pub rounds: usize, - #[serde(default = "default_transactions_per_round")] + /// number of transactions per view + #[serde_inline_default(ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND)] pub transactions_per_round: usize, + /// global index of node (for testing purposes a uid) #[serde(default)] pub node_index: u64, + /// unique seed (for randomness? TODO) #[serde(default)] pub seed: [u8; 32], - #[serde(default = "default_transaction_size")] + /// size of transactions + #[serde_inline_default(ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE)] pub transaction_size: usize, - #[serde(default = "default_start_delay_seconds")] + /// delay before beginning consensus + #[serde_inline_default(ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS)] pub start_delay_seconds: u64, + /// the libp2p config #[serde(default)] pub libp2p_config: Option, + /// the hotshot config file #[serde(default)] pub config: HotShotConfigFile, - #[serde(default = "default_web_server_config")] + /// the webserver config + #[serde(default)] pub web_server_config: Option, - #[serde(default = "default_web_server_config")] + /// the data availability web server config + #[serde(default)] pub da_web_server_config: Option, } -fn default_web_server_config() -> Option { - None -} - impl From> for NetworkConfig { fn from(val: NetworkConfigFile) -> Self { NetworkConfig { @@ -391,6 +461,9 @@ pub struct ValidatorConfigFile { } impl ValidatorConfigFile { + /// read the validator config from a file + /// # Panics + /// Panics if unable to get the current working directory pub fn from_file(dir_str: &str) -> Self { let current_working_dir = match env::current_dir() { Ok(dir) => dir, @@ -448,17 +521,14 @@ impl From> for HotS } } } - -// This is hacky, blame serde for not having something like `default_value = "10"` -fn default_rounds() -> usize { - 10 -} -fn default_transactions_per_round() -> usize { - 10 -} -fn default_transaction_size() -> usize { - 100 -} +/// default number of rounds to run +pub const ORCHESTRATOR_DEFAULT_NUM_ROUNDS : usize = 10; +/// default number of transactions per round +pub const ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND : usize = 10; +/// default size of transactions +pub const ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE : usize = 100; +/// default delay before beginning consensus +pub const ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS : u64 = 60; impl From for ValidatorConfig { fn from(val: ValidatorConfigFile) -> Self { @@ -503,7 +573,3 @@ impl Default for HotShotConfigFile { } } } - -fn default_start_delay_seconds() -> u64 { - 60 -} diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index eda551f924..dce4faf508 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -1,4 +1,8 @@ +//! Orchestrator for manipulating nodes and recording results during a run of `HotShot` tests + +/// The orchestrator's clients pub mod client; +/// Configuration for the orchestrator pub mod config; use async_lock::RwLock; @@ -26,6 +30,10 @@ use libp2p::identity::{ Keypair, }; +/// Generate an keypair based on a `seed` and an `index` +/// # Panics +/// This panics if libp2p is unable to generate a secret key from the seed +#[must_use] pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { let mut hasher = blake3::Hasher::new(); hasher.update(&seed); @@ -35,6 +43,7 @@ pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { >::from(sk_bytes).into() } +/// The state of the orchestrator #[derive(Default, Clone)] struct OrchestratorState { /// Tracks the latest node index we have generated a configuration for @@ -53,6 +62,7 @@ struct OrchestratorState { impl OrchestratorState { + /// create a new [`OrchestratorState`] pub fn new(network_config: NetworkConfig) -> Self { let mut web_client = None; if network_config.web_server_config.is_some() { @@ -69,14 +79,30 @@ impl } } +/// An api exposed by the orchestrator pub trait OrchestratorApi { + /// post endpoint for identity + /// # Errors + /// if unable to serve fn post_identity(&mut self, identity: IpAddr) -> Result; + /// post endpoint for each node's config + /// # Errors + /// if unable to serve fn post_getconfig( &mut self, node_index: u16, ) -> Result, ServerError>; + /// get endpoint for whether or not the run has started + /// # Errors + /// if unable to serve fn get_start(&self) -> Result; + /// post endpoint for whether or not all nodes are ready + /// # Errors + /// if unable to serve fn post_ready(&mut self) -> Result<(), ServerError>; + /// post endpoint for the results of the run + /// # Errors + /// if unable to serve fn post_run_results(&mut self) -> Result<(), ServerError>; } @@ -116,10 +142,9 @@ where let libp2p_config_clone = self.config.libp2p_config.clone().unwrap(); // Designate node as bootstrap node and store its identity information if libp2p_config_clone.bootstrap_nodes.len() < libp2p_config_clone.num_bootstrap_nodes { - let port_index = match libp2p_config_clone.index_ports { - true => node_index, - false => 0, - }; + let port_index = if libp2p_config_clone.index_ports { + node_index + } else {0}; let socketaddr = SocketAddr::new(identity, libp2p_config_clone.base_port + port_index); let keypair = libp2p_generate_indexed_identity(self.config.seed, node_index.into()); @@ -235,6 +260,10 @@ where } /// Runs the orchestrator +/// # Errors +/// This errors if tide disco runs into an issue during serving +/// # Panics +/// This panics if unable to register the api with tide disco pub async fn run_orchestrator( network_config: NetworkConfig, url: Url, diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index f885aa0205..cecd2a44d7 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -124,6 +124,7 @@ impl WebServerState { view_sync_proposal_index: HashMap::new(), } } + /// Provide a shutdown signal to the server pub fn with_shutdown_signal(mut self, shutdown_listener: Option>) -> Self { if self.shutdown.is_some() { panic!("A shutdown signal is already registered and can not be registered twice"); @@ -158,47 +159,81 @@ pub trait WebServerDataSource { ) -> Result>>, Error>; #[allow(clippy::type_complexity)] - /// get transactions + /// Get transactions + /// # Errors + /// Error if unable to serve. fn get_transactions(&self, index: u64) -> Result>)>, Error>; - /// get da certificate + /// Get da certificate + /// # Errors + /// Error if unable to serve. fn get_da_certificate(&self, index: u64) -> Result>>, Error>; - /// post vote + /// Post vote + /// # Errors + /// Error if unable to serve. fn post_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; - /// post view sync vote + /// Post view sync vote + /// # Errors + /// Error if unable to serve. fn post_view_sync_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; - /// post proposal + /// Post proposal + /// # Errors + /// Error if unable to serve. fn post_proposal(&mut self, view_number: u64, proposal: Vec) -> Result<(), Error>; - /// post view sync proposal + /// Post view sync proposal + /// # Errors + /// Error if unable to serve. fn post_view_sync_proposal(&mut self, view_number: u64, proposal: Vec) -> Result<(), Error>; - /// post data avaiability certificate + /// Post data avaiability certificate + /// # Errors + /// Error if unable to serve. fn post_da_certificate(&mut self, view_number: u64, cert: Vec) -> Result<(), Error>; - /// post transaction + /// Post transaction + /// # Errors + /// Error if unable to serve. fn post_transaction(&mut self, txn: Vec) -> Result<(), Error>; - /// post staketable + /// Post staketable + /// # Errors + /// Error if unable to serve. fn post_staketable(&mut self, key: Vec) -> Result<(), Error>; - /// post completed transaction + /// Post completed transaction + /// # Errors + /// Error if unable to serve. fn post_completed_transaction(&mut self, block: Vec) -> Result<(), Error>; - /// post secret proposal + /// Post secret proposal + /// # Errors + /// Error if unable to serve. fn post_secret_proposal(&mut self, _view_number: u64, _proposal: Vec) -> Result<(), Error>; - /// post proposal + /// Post proposal + /// # Errors + /// Error if unable to serve. fn proposal(&self, view_number: u64) -> Option<(String, Vec)>; - /// post vid disperal + /// Post vid disperal + /// # Errors + /// Error if unable to serve. fn post_vid_disperse(&mut self, view_number: u64, disperse: Vec) -> Result<(), Error>; - /// post vid vote + /// Post vid vote + /// # Errors + /// Error if unable to serve. fn post_vid_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; /// post vid certificate + /// # Errors + /// Error if unable to serve. fn post_vid_certificate(&mut self, view_number: u64, certificate: Vec) -> Result<(), Error>; - /// get vid dispersal + /// Get vid dispersal + /// Error if unable to serve. fn get_vid_disperse(&self, view_number: u64) -> Result>>, Error>; - /// get vid votes + /// Get vid votes + /// # Errors + /// Error if unable to serve. fn get_vid_votes(&self, view_number: u64, index: u64) -> Result>>, Error>; - /// get vid certificates + /// Get vid certificates + /// Error if unable to serve. fn get_vid_certificate(&self, index: u64) -> Result>>, Error>; } @@ -263,13 +298,13 @@ impl WebServerDataSource for WebServerState { let mut ret_proposals = vec![]; if let Some(cert) = proposals { for i in index..*self.view_sync_proposal_index.get(&view_number).unwrap() { - ret_proposals.push(cert[i as usize].1.clone()); + ret_proposals.push(cert[usize::try_from(i).unwrap()].1.clone()); } } - if !ret_proposals.is_empty() { - Ok(Some(ret_proposals)) - } else { + if ret_proposals.is_empty() { Ok(None) + } else { + Ok(Some(ret_proposals)) } } @@ -279,13 +314,13 @@ impl WebServerDataSource for WebServerState { let mut ret_votes = vec![]; if let Some(votes) = votes { for i in index..*self.vote_index.get(&view_number).unwrap() { - ret_votes.push(votes[i as usize].1.clone()); + ret_votes.push(votes[usize::try_from(i).unwrap()].1.clone()); } } - if !ret_votes.is_empty() { - Ok(Some(ret_votes)) - } else { + if ret_votes.is_empty() { Ok(None) + } else { + Ok(Some(ret_votes)) } } @@ -295,13 +330,13 @@ impl WebServerDataSource for WebServerState { let mut ret_votes = vec![]; if let Some(vid_votes) = vid_votes { for i in index..*self.vid_vote_index.get(&view_number).unwrap() { - ret_votes.push(vid_votes[i as usize].1.clone()); + ret_votes.push(vid_votes[usize::try_from(i).unwrap()].1.clone()); } } - if !ret_votes.is_empty() { - Ok(Some(ret_votes)) - } else { + if ret_votes.is_empty() { Ok(None) + } else { + Ok(Some(ret_votes)) } } @@ -314,13 +349,13 @@ impl WebServerDataSource for WebServerState { let mut ret_votes = vec![]; if let Some(votes) = votes { for i in index..*self.view_sync_vote_index.get(&view_number).unwrap() { - ret_votes.push(votes[i as usize].1.clone()); + ret_votes.push(votes[usize::try_from(i).unwrap()].1.clone()); } } - if !ret_votes.is_empty() { - Ok(Some(ret_votes)) - } else { + if ret_votes.is_empty() { Ok(None) + } else { + Ok(Some(ret_votes)) } } @@ -333,34 +368,34 @@ impl WebServerDataSource for WebServerState { let lowest_in_memory_txs = if self.num_txns < MAX_TXNS.try_into().unwrap() { 0 } else { - self.num_txns as usize - MAX_TXNS + usize::try_from(self.num_txns).unwrap() - MAX_TXNS }; - let starting_index = if (index as usize) < lowest_in_memory_txs { + let starting_index = if (usize::try_from(index).unwrap()) < lowest_in_memory_txs { lowest_in_memory_txs } else { - index as usize + usize::try_from(index).unwrap() }; for idx in starting_index..=self.num_txns.try_into().unwrap() { if let Some(txn) = self.transactions.get(&(idx as u64)) { - txns_to_return.push(txn.clone()) + txns_to_return.push(txn.clone()); } - if txns_to_return.len() >= TX_BATCH_SIZE as usize { + if txns_to_return.len() >= usize::try_from(TX_BATCH_SIZE).unwrap() { break; } } - if !txns_to_return.is_empty() { - debug!("Returning this many txs {}", txns_to_return.len()); - //starting_index is the oldest index of the returned txns - Ok(Some((starting_index as u64, txns_to_return))) - } else { + if txns_to_return.is_empty() { Err(ServerError { // TODO ED: Why does NoContent status code cause errors? status: StatusCode::NotImplemented, message: format!("Transaction not found for index {index}"), }) + } else { + debug!("Returning this many txs {}", txns_to_return.len()); + //starting_index is the oldest index of the returned txns + Ok(Some((starting_index as u64, txns_to_return))) } } @@ -706,6 +741,7 @@ pub struct Options { } /// Sets up all API routes +#[allow(clippy::too_many_lines)] fn define_api(options: &Options) -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, From af2925325bd8a9fb3335eed10f27cf45524f7859 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 21:30:23 -0500 Subject: [PATCH 0630/1393] chore: more lints --- hotshot-stake-table/src/mt_based/config.rs | 1 + hotshot-stake-table/src/vec_based.rs | 6 +- hotshot-stake-table/src/vec_based/config.rs | 1 + .../src/traits/election/static_committee.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 9 ++- .../traits/networking/web_server_network.rs | 11 ++- libp2p-networking/examples/counter.rs | 11 ++- orchestrator/src/lib.rs | 4 +- web_server/src/config.rs | 21 +++++ web_server/src/lib.rs | 81 ++++++++++++------- 10 files changed, 101 insertions(+), 46 deletions(-) diff --git a/hotshot-stake-table/src/mt_based/config.rs b/hotshot-stake-table/src/mt_based/config.rs index 5d733bc725..a41bf5a66b 100644 --- a/hotshot-stake-table/src/mt_based/config.rs +++ b/hotshot-stake-table/src/mt_based/config.rs @@ -23,6 +23,7 @@ impl ToFields for FieldType { impl ToFields for bls_over_bn254::VerKey { const SIZE: usize = 2; fn to_fields(&self) -> Vec { + #[allow(clippy::ignored_unit_patterns)] let bytes = jf_utils::to_bytes!(&self.to_affine()).unwrap(); let x = ::from_le_bytes_mod_order(&bytes[..32]); let y = ::from_le_bytes_mod_order(&bytes[32..]); diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 0296f00497..3048e6cba5 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -389,14 +389,14 @@ mod tests { #[test] fn crypto_test_stake_table() -> Result<(), StakeTableError> { let mut st = StakeTable::::default(); - let mut prng = jf_utils::test_rng(); + let mut pseudo_rng = jf_utils::test_rng(); let keys = (0..10) .map(|_| { ( - BLSOverBN254CurveSignatureScheme::key_gen(&(), &mut prng) + BLSOverBN254CurveSignatureScheme::key_gen(&(), &mut pseudo_rng) .unwrap() .1, - SchnorrSignatureScheme::key_gen(&(), &mut prng).unwrap().1, + SchnorrSignatureScheme::key_gen(&(), &mut pseudo_rng).unwrap().1, ) }) .collect::>(); diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index ed1a2370f4..fe6ab10a48 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -27,6 +27,7 @@ impl ToFields for QCVerKey { const SIZE: usize = 2; fn to_fields(&self) -> Vec { + #[allow(clippy::ignored_unit_patterns)] match to_bytes!(&self.to_affine()) { Ok(bytes) => { vec![ diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index d3bddaf4de..0c18c44bfe 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -58,7 +58,7 @@ where /// Index the vector of public keys with the current view number fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { - let index = (*view_number % self.nodes_with_stake.len() as u64) as usize; + let index = usize::try_from(*view_number % self.nodes_with_stake.len() as u64).unwrap(); let res = self.nodes_with_stake[index].clone(); TYPES::SignatureKey::get_public_key(&res) } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index caf2d72d3e..ea5dd2aa2d 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -55,6 +55,10 @@ use std::{ }; use tracing::{error, info, instrument, warn}; +/// convienence alias for the type for bootstrap addresses +/// concurrency primitives are needed for having tests +pub type BootstrapAddrs = Arc, Multiaddr)>>>; + /// hardcoded topic of QC used pub const QC_TOPIC: &str = "global"; @@ -237,7 +241,7 @@ where pubkey.clone(), bootstrap_addrs_ref, num_bootstrap, - node_id as usize, + usize::try_from(node_id).unwrap(), keys, da.clone(), da.contains(&pubkey), @@ -288,7 +292,7 @@ impl Libp2pNetwork { metrics: NetworkingMetricsValue, config: NetworkNodeConfig, pk: K, - bootstrap_addrs: Arc, Multiaddr)>>>, + bootstrap_addrs: BootstrapAddrs, bootstrap_addrs_len: usize, id: usize, // HACK @@ -375,6 +379,7 @@ impl Libp2pNetwork { // cancels on shutdown while let Ok(Some((view_number, pk))) = node_lookup_recv.recv().await { /// defines lookahead threshold based on the constant + #[allow(clippy::cast_possible_truncation)] const THRESHOLD: u64 = (LOOK_AHEAD as f64 * 0.8) as u64; info!("Performing lookup for peer {:?}", pk); diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 92745c8500..00a076d20c 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -45,6 +45,10 @@ use std::{ }; use surf_disco::error::ClientError; use tracing::{debug, error, info, warn}; + +/// convenience alias alias for the result of getting transactions from the web server +pub type TxnResult = Result>>)>, NetworkError>; + /// Represents the communication channel abstraction for the web server #[derive(Clone, Debug)] pub struct WebCommChannel(Arc>); @@ -477,9 +481,8 @@ impl Inner { async fn get_txs_from_web_server( &self, endpoint: String, - ) -> Result>>)>, NetworkError> { - let result: Result>)>, ClientError> = - self.client.get(&endpoint).send().await; + ) -> TxnResult { + let result : Result>)>, _> = self.client.get(&endpoint).send().await; match result { Err(_error) => Err(NetworkError::WebServer { source: WebServerNetworkError::ClientError, @@ -1285,7 +1288,7 @@ impl TestableNetworkingImplementation for WebServerNetwo let mut network = WebServerNetwork::create( url, Duration::from_millis(100), - known_nodes[id as usize].clone(), + known_nodes[usize::try_from(id).unwrap()].clone(), is_da, ); network.server_shutdown_signal = Some(sender); diff --git a/libp2p-networking/examples/counter.rs b/libp2p-networking/examples/counter.rs index 28d18c128b..7208eeb719 100644 --- a/libp2p-networking/examples/counter.rs +++ b/libp2p-networking/examples/counter.rs @@ -1,3 +1,4 @@ +//! This is dead code, do not use // pub mod common; // // use async_compatibility_layer::art::async_main; @@ -14,8 +15,7 @@ // #[async_main] // #[instrument] // async fn main() -> Result<()> { -/// this is dead code -fn main() -> Result<(), ()> { +// fn main() -> Result<(), ()> { // let args = CliOpt::parse(); // // #[cfg(all(feature = "lossy_network", target_os = "linux"))] @@ -51,5 +51,8 @@ fn main() -> Result<(), ()> { // network.undo_isolate().await?; // } // - Ok(()) -} +// Ok(()) +// } +// +/// dead code +fn main() {} diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index dce4faf508..fcc961d3d5 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -272,13 +272,13 @@ where KEY: SignatureKey + 'static + serde::Serialize, ELECTION: ElectionConfig + 'static + serde::Serialize, { - let api = define_api().map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api")); + let web_api = define_api().map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api")); let state: RwLock> = RwLock::new(OrchestratorState::new(network_config)); let mut app = App::>, ServerError>::with_state(state); - app.register_module("api", api.unwrap()) + app.register_module("api", web_api.unwrap()) .expect("Error registering api"); tracing::error!("listening on {:?}", url); app.serve(url).await diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 846ecc70cd..f6847394e8 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -13,106 +13,127 @@ pub const MAX_TXNS: usize = 500; pub const TX_BATCH_SIZE: u64 = 1; /// get proposal +#[must_use] pub fn get_proposal_route(view_number: u64) -> String { format!("api/proposal/{view_number}") } /// post proposal +#[must_use] pub fn post_proposal_route(view_number: u64) -> String { format!("api/proposal/{view_number}") } /// get latest qc +#[must_use] pub fn get_latest_quorum_proposal_route() -> String { "api/proposal/latest".to_string() } /// get latest view sync proposal +#[must_use] pub fn get_latest_view_sync_proposal_route() -> String { "api/view_sync_proposal/latest".to_string() } /// get latest certificate +#[must_use] pub fn get_da_certificate_route(view_number: u64) -> String { format!("api/certificate/{view_number}") } /// post data availability certificate +#[must_use] pub fn post_da_certificate_route(view_number: u64) -> String { format!("api/certificate/{view_number}") } /// get vote +#[must_use] pub fn get_vote_route(view_number: u64, index: u64) -> String { format!("api/votes/{view_number}/{index}") } /// post vote +#[must_use] pub fn post_vote_route(view_number: u64) -> String { format!("api/votes/{view_number}") } /// get vid dispersal +#[must_use] pub fn get_vid_disperse_route(view_number: u64) -> String { format!("api/vid_disperse/{view_number}") } /// post vid dispersal +#[must_use] pub fn post_vid_disperse_route(view_number: u64) -> String { format!("api/vid_disperse/{view_number}") } /// get vid vote +#[must_use] pub fn get_vid_vote_route(view_number: u64, index: u64) -> String { format!("api/vid_votes/{view_number}/{index}") } /// post vid vote +#[must_use] pub fn post_vid_vote_route(view_number: u64) -> String { format!("api/vid_votes/{view_number}") } /// get vid certificate +#[must_use] pub fn get_vid_certificate_route(view_number: u64) -> String { format!("api/vid_certificate/{view_number}") } /// post vid certificate +#[must_use] pub fn post_vid_certificate_route(view_number: u64) -> String { format!("api/vid_certificate/{view_number}") } /// get transactions +#[must_use] pub fn get_transactions_route(index: u64) -> String { format!("api/transactions/{index}") } /// post transactions +#[must_use] pub fn post_transactions_route() -> String { "api/transactions".to_string() } /// post stake table +#[must_use] pub fn post_staketable_route() -> String { "api/staketable".to_string() } /// post view sync proposal +#[must_use] pub fn post_view_sync_proposal_route(view_number: u64) -> String { format!("api/view_sync_proposal/{view_number}") } /// get view sync proposal +#[must_use] pub fn get_view_sync_proposal_route(view_number: u64, index: u64) -> String { format!("api/view_sync_proposal/{view_number}/{index}") } /// post view sync vote +#[must_use] pub fn post_view_sync_vote_route(view_number: u64) -> String { format!("api/view_sync_vote/{view_number}") } /// get view sync vote +#[must_use] pub fn get_view_sync_vote_route(view_number: u64, index: u64) -> String { format!("api/view_sync_vote/{view_number}/{index}") } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index cecd2a44d7..b5bd986b3a 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -1,4 +1,4 @@ -//! Web server for HotShot +//! Web server for `HotShot` /// Configuration for the webserver pub mod config; @@ -20,17 +20,22 @@ use tide_disco::{ }; use tracing::{debug, info}; +/// Convience alias for a lock over the state of the app +/// TODO this is used in two places. It might be clearer to just inline type State = RwLock>; +/// Convience alias for errors in this crate type Error = ServerError; /// State that tracks proposals and votes the server receives /// Data is stored as a `Vec` to not incur overhead from deserializing +// TODO should the view numbers be generic over time? struct WebServerState { /// view number -> (secret, proposal) proposals: HashMap)>, - + /// for view sync: view number -> (secret, proposal) + /// TODO guessing here view_sync_proposals: HashMap)>>, - + /// view number -> index view_sync_proposal_index: HashMap, /// view number -> (secret, da_certificates) da_certificates: HashMap)>, @@ -40,39 +45,43 @@ struct WebServerState { latest_quorum_proposal: u64, /// view for the most recent view sync proposal latest_view_sync_proposal: u64, - - /// view for teh oldest DA certificate + /// view for the oldest DA certificate oldest_certificate: u64, - + /// view for the oldest view sync certificate oldest_view_sync_proposal: u64, /// view number -> Vec(index, vote) votes: HashMap)>>, - + /// view sync: view number -> Vec(index, vote) view_sync_votes: HashMap)>>, /// view number -> highest vote index for that view number vote_index: HashMap, - + /// view_sync: view number -> highest vote index for that view number view_sync_vote_index: HashMap, /// view number of oldest votes in memory oldest_vote: u64, - + /// view sync: view number of oldest votes in memory oldest_view_sync_vote: u64, - + /// view number -> (secret, proposal) + /// TODO is this right? vid_disperses: HashMap)>, + /// view for the oldest vid disperal oldest_vid_disperse: u64, + /// view of most recent vid dispersal recent_vid_disperse: u64, - + /// TODO document, not sure what this is vid_votes: HashMap)>>, + /// oldest vid vote view number oldest_vid_vote: u64, - // recent_vid_vote: u64, + /// recent_vid_vote view number vid_certificates: HashMap)>, + /// oldest vid certificate view number oldest_vid_certificate: u64, - // recent_vid_certificate: u64, + /// recent_vid_certificate: u64, vid_vote_index: HashMap, - /// index -> transaction // TODO ED Make indexable by hash of tx transactions: HashMap>, + /// TODO document txn_lookup: HashMap, u64>, /// highest transaction index num_txns: u64, @@ -86,6 +95,7 @@ struct WebServerState { } impl WebServerState { + /// Create new web server state fn new() -> Self { Self { proposals: HashMap::new(), @@ -125,10 +135,11 @@ impl WebServerState { } } /// Provide a shutdown signal to the server + /// # Panics + /// Panics if already shut down + #[allow(clippy::panic)] pub fn with_shutdown_signal(mut self, shutdown_listener: Option>) -> Self { - if self.shutdown.is_some() { - panic!("A shutdown signal is already registered and can not be registered twice"); - } + assert!(self.shutdown.is_none(), "A shutdown signal is already registered and can not be registered twice"); self.shutdown = shutdown_listener; self } @@ -136,22 +147,34 @@ impl WebServerState { /// Trait defining methods needed for the `WebServerState` pub trait WebServerDataSource { - /// get proposal + /// Get proposal + /// # Errors + /// Error if unable to serve. fn get_proposal(&self, view_number: u64) -> Result>>, Error>; - /// get latest quanrum proposal + /// Get latest quanrum proposal + /// # Errors + /// Error if unable to serve. fn get_latest_quorum_proposal(&self) -> Result>>, Error>; - /// get latest view sync proposal + /// Get latest view sync proposal + /// # Errors + /// Error if unable to serve. fn get_latest_view_sync_proposal(&self) -> Result>>, Error>; - /// get view sync proposal + /// Get view sync proposal + /// # Errors + /// Error if unable to serve. fn get_view_sync_proposal( &self, view_number: u64, index: u64, ) -> Result>>, Error>; - /// get vote + /// Get vote + /// # Errors + /// Error if unable to serve. fn get_votes(&self, view_number: u64, index: u64) -> Result>>, Error>; - /// get view sync votes + /// Get view sync votes + /// # Errors + /// Error if unable to serve. fn get_view_sync_votes( &self, view_number: u64, @@ -210,7 +233,6 @@ pub trait WebServerDataSource { /// # Errors /// Error if unable to serve. fn proposal(&self, view_number: u64) -> Option<(String, Vec)>; - /// Post vid disperal /// # Errors /// Error if unable to serve. @@ -224,8 +246,8 @@ pub trait WebServerDataSource { /// Error if unable to serve. fn post_vid_certificate(&mut self, view_number: u64, certificate: Vec) -> Result<(), Error>; - /// Get vid dispersal + /// # Errors /// Error if unable to serve. fn get_vid_disperse(&self, view_number: u64) -> Result>>, Error>; /// Get vid votes @@ -233,6 +255,7 @@ pub trait WebServerDataSource { /// Error if unable to serve. fn get_vid_votes(&self, view_number: u64, index: u64) -> Result>>, Error>; /// Get vid certificates + /// # Errors /// Error if unable to serve. fn get_vid_certificate(&self, index: u64) -> Result>>, Error>; } @@ -931,20 +954,18 @@ where /// TODO /// this looks like it will panic not error /// # Panics -/// on error +/// on errors creating or registering the tide disco api pub async fn run_web_server( shutdown_listener: Option>, url: Url, ) -> io::Result<()> { let options = Options::default(); - // TODO should this be unwrap? - let api = define_api(&options).unwrap(); + let web_api = define_api(&options).unwrap(); let state = State::new(WebServerState::new().with_shutdown_signal(shutdown_listener)); let mut app = App::, Error>::with_state(state); - // TODO should this be unwrap? - app.register_module("api", api).unwrap(); + app.register_module("api", web_api).unwrap(); let app_future = app.serve(url); From 53e7e56e92fcefaa3b5b10b951d6d40ac3e36a57 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 22:28:15 -0500 Subject: [PATCH 0631/1393] chore: more lints --- hotshot-qc/src/snarked/circuit.rs | 2 ++ hotshot-state-prover/src/circuit.rs | 24 +++++++++++++-- hotshot-state-prover/src/lib.rs | 17 +++++++---- hotshot-state-prover/src/utils.rs | 3 +- testing/src/completion_task.rs | 8 ++++- testing/src/lib.rs | 8 +++++ testing/src/node_types.rs | 17 +++++++++++ testing/src/overall_safety_task.rs | 45 ++++++++++++++++++++--------- testing/src/spinning_task.rs | 13 +++++++++ testing/src/state_types.rs | 2 +- testing/src/task_helpers.rs | 15 +++++++++- testing/src/test_builder.rs | 18 +++++++++--- testing/src/test_launcher.rs | 15 +++++++++- testing/src/test_runner.rs | 23 ++++++++++++--- testing/src/txn_task.rs | 7 +++-- testing/tests/network_task.rs | 2 +- testing/tests/vid_task.rs | 2 +- 17 files changed, 182 insertions(+), 39 deletions(-) diff --git a/hotshot-qc/src/snarked/circuit.rs b/hotshot-qc/src/snarked/circuit.rs index aadddd65d9..5ec3ce068a 100644 --- a/hotshot-qc/src/snarked/circuit.rs +++ b/hotshot-qc/src/snarked/circuit.rs @@ -369,6 +369,7 @@ mod tests { ); let agg_vk_point: SWPoint = agg_vk_point.into_affine().into(); let vk_points: Vec> = vk_points.iter().map(|p| p.into_affine().into()).collect(); + #[allow(clippy::cast_sign_loss)] let stake_amts: Vec = (0..5).map(|i| F::from((i + 1) as u32)).collect(); let threshold = F::from(6u8); let digest = compute_stake_table_hash::>(&stake_amts[..], &vk_points[..]); @@ -473,6 +474,7 @@ mod tests { ); let agg_vk_point: TEPoint = agg_vk_point.into_affine().into(); let vk_points: Vec> = vk_points.iter().map(|p| p.into_affine().into()).collect(); + #[allow(clippy::cast_sign_loss)] let stake_amts: Vec = (0..5).map(|i| F::from((i + 1) as u32)).collect(); let threshold = F::from(6u8); let digest = compute_stake_table_hash::>(&stake_amts[..], &vk_points[..]); diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index e56818f4fc..a10520cc1b 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -33,7 +33,7 @@ pub struct StakeTableEntryVar { } /// Light client state Variable -/// The stake table commitment is a triple (qc_keys_comm, state_keys_comm, stake_amount_comm). +/// The stake table commitment is a triple `(qc_keys_comm, state_keys_comm, stake_amount_comm)`. /// Variable for a stake table commitment #[derive(Clone, Debug)] pub struct StakeTableCommVar { @@ -74,52 +74,63 @@ impl From> for PublicInput { impl PublicInput { /// Return the threshold + #[must_use] pub fn threshold(&self) -> F { self.0[0] } /// Return the view number of the light client state + #[must_use] pub fn view_number(&self) -> F { self.0[1] } /// Return the block height of the light client state + #[must_use] pub fn block_height(&self) -> F { self.0[2] } /// Return the block commitment root of the light client state + #[must_use] pub fn block_comm_root(&self) -> F { self.0[3] } /// Return the fee ledger commitment of the light client state + #[must_use] pub fn fee_ledger_comm(&self) -> F { self.0[4] } /// Return the stake table commitment of the light client state + #[must_use] pub fn stake_table_comm(&self) -> (F, F, F) { (self.0[5], self.0[6], self.0[7]) } /// Return the qc key commitment of the light client state + #[must_use] pub fn qc_key_comm(&self) -> F { self.0[5] } /// Return the state key commitment of the light client state + #[must_use] pub fn state_key_comm(&self) -> F { self.0[6] } /// Return the stake amount commitment of the light client state + #[must_use] pub fn stake_amount_comm(&self) -> F { self.0[7] } } impl LightClientStateVar { + /// # Errors + /// if unable to create any of the public variables pub fn new( circuit: &mut PlonkCircuit, state: &LightClientState, @@ -139,22 +150,27 @@ impl LightClientStateVar { }) } + #[must_use] pub fn view_number(&self) -> Variable { self.vars[0] } + #[must_use] pub fn block_height(&self) -> Variable { self.vars[1] } + #[must_use] pub fn block_comm_root(&self) -> Variable { self.vars[2] } + #[must_use] pub fn fee_ledger_comm(&self) -> Variable { self.vars[3] } + #[must_use] pub fn stake_table_comm(&self) -> StakeTableCommVar { StakeTableCommVar { qc_keys_comm: self.vars[4], @@ -185,7 +201,8 @@ impl AsRef<[Variable]> for LightClientStateVar { /// and returns /// - A circuit for proof generation /// - A list of public inputs for verification -/// - A PlonkError if any error happens when building the circuit +/// - A `PlonkError` if any error happens when building the circuit +#[allow(clippy::too_many_lines)] pub(crate) fn build( stake_table_entries: STIter, signer_bit_vec: BitIter, @@ -426,6 +443,7 @@ mod tests { const ST_CAPACITY: usize = 20; #[test] + #[allow(clippy::too_many_lines)] fn crypto_test_circuit_building() { let num_validators = 10; let mut prng = test_rng(); @@ -584,6 +602,6 @@ mod tests { &lightclient_state, &U256::from(26u32), ) - .is_err()) + .is_err()); } } diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index b5f2349b76..486e3003cc 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -1,4 +1,4 @@ -//! SNARK-assisted light client state update verification in HotShot +//! SNARK-assisted light client state update verification in `HotShot` /// State verifier circuit builder pub mod circuit; @@ -37,11 +37,13 @@ pub type Proof = jf_plonk::proof_system::structs::Proof; pub type UniversalSrs = jf_plonk::proof_system::structs::UniversalSrs; /// Given a SRS, returns the proving key and verifying key for state update +/// # Errors +/// Errors if unable to preprocess +#[allow(clippy::cast_possible_truncation)] pub fn preprocess( srs: &UniversalSrs, ) -> Result<(ProvingKey, VerifyingKey), PlonkError> { - let (circuit, _) = - circuit::build_for_preprocessing::()?; + let (circuit, _) = circuit::build_for_preprocessing::()?; PlonkKzgSnark::preprocess(srs, &circuit) } @@ -51,10 +53,14 @@ pub fn preprocess( /// - updated light client state (`(view_number, block_height, block_comm_root, fee_ledger_comm, stake_table_comm)`) /// - a bit vector indicates the signers /// - a quorum threshold -/// Returns error or a pair (proof, public_inputs) asserting that +/// Returns error or a pair `(proof, public_inputs)` asserting that /// - the signer's accumulated weight exceeds the quorum threshold /// - the stake table corresponds to the one committed in the light client state /// - all signed schnorr signatures are valid +/// # Errors +/// Errors if unable to generate proof +/// # Panics +/// if the stake table is not up to date pub fn generate_state_update_proof( rng: &mut R, pk: &ProvingKey, @@ -124,6 +130,7 @@ mod tests { const ST_CAPACITY: usize = 20; // FIXME(Chengyu): see + #[allow(clippy::unnecessary_wraps)] fn universal_setup_for_testing( max_degree: usize, rng: &mut R, @@ -234,7 +241,7 @@ mod tests { .0 .num_gates(); let test_srs = universal_setup_for_testing(num_gates + 2, &mut prng).unwrap(); - ark_std::println!("Number of constraint in the circuit: {}", num_gates); + ark_std::println!("Number of constraint in the circuit: {num_gates}"); let result = preprocess::(&test_srs); assert!(result.is_ok()); diff --git a/hotshot-state-prover/src/utils.rs b/hotshot-state-prover/src/utils.rs index f910b121e4..90511375db 100644 --- a/hotshot-state-prover/src/utils.rs +++ b/hotshot-state-prover/src/utils.rs @@ -31,6 +31,7 @@ pub(crate) fn key_pairs_for_testing( } /// Helper function for test +#[allow(clippy::cast_possible_truncation)] pub(crate) fn stake_table_for_testing( capacity: usize, bls_keys: &[BLSVerKey], @@ -44,7 +45,7 @@ pub(crate) fn stake_table_for_testing( .zip(schnorr_keys) .for_each(|((i, bls_key), (_, schnorr_key))| { st.register(*bls_key, U256::from((i + 1) as u32), schnorr_key.clone()) - .unwrap() + .unwrap(); }); // Freeze the stake table st.advance(); diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index 669148682d..9f63da94e5 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -25,7 +25,9 @@ pub struct CompletionTaskErr {} /// Data availability task state pub struct CompletionTask> { + /// the test level event stream pub(crate) test_event_stream: ChannelStream, + /// handles to the nodes in the test pub(crate) handles: Vec>, } @@ -57,6 +59,7 @@ pub enum CompletionTaskDescription { impl CompletionTaskDescription { /// Build and launch a completion task. + #[must_use] pub fn build_and_launch>( self, ) -> TaskGenerator> { @@ -68,6 +71,9 @@ impl CompletionTaskDescription { impl TimeBasedCompletionTaskDescription { /// create the task and launch it + /// # Panics + /// if cannot obtain task id after launching + #[must_use] pub fn build_and_launch>( self, ) -> TaskGenerator> { @@ -88,7 +94,7 @@ impl TimeBasedCompletionTaskDescription { .boxed() })); let message_handler = - HandleMessage::>(Arc::new(move |_, state| { + HandleMessage::>(Arc::new(move |(), state| { async move { state .test_event_stream diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 56be5fff07..a5639cd410 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -1,3 +1,5 @@ +//! Testing infrastructure for `HotShot` + #![cfg_attr( // hotshot_example option is set manually in justfile when running examples not(any(test, debug_assertions, hotshot_example)), @@ -39,15 +41,21 @@ pub mod state_types; /// node types pub mod node_types; +/// global event at the test level #[derive(Clone, Debug)] pub enum GlobalTestEvent { + /// the test is shutting down ShutDown, } +/// the reason for shutting down the test pub enum ShutDownReason { + /// the test is shutting down because of a safety violation SafetyViolation, + /// the test is shutting down because the test has completed successfully SuccessfullyCompleted, } +/// type alias for the type of tasks created in testing pub type TestTask = HSTWithEvent, STATE>; diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 1014c434e4..c2601354f3 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -35,6 +35,8 @@ use serde::{Deserialize, Serialize}; serde::Serialize, serde::Deserialize, )] +/// filler struct to implement node type and allow us +/// to select our traits pub struct TestTypes; impl NodeType for TestTypes { type Time = ViewNumber; @@ -47,38 +49,53 @@ impl NodeType for TestTypes { type Membership = GeneralStaticCommittee; } +/// Memory network implementation #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct MemoryImpl; +/// Libp2p network implementation #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct Libp2pImpl; +/// Web server network implementation #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct WebImpl; +/// Combined Network implementation (libp2p + web sever) #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct CombinedImpl; +/// static committee type alias pub type StaticMembership = StaticCommittee; +/// memory network pub type StaticMemoryDAComm = MemoryCommChannel; +/// libp2p network type StaticLibp2pDAComm = Libp2pCommChannel; +/// web server network communication channel type StaticWebDAComm = WebCommChannel; +/// combined network type StaticCombinedDAComm = CombinedCommChannel; +/// memory comm channel pub type StaticMemoryQuorumComm = MemoryCommChannel; +/// libp2p comm channel type StaticLibp2pQuorumComm = Libp2pCommChannel; +/// web server comm channel type StaticWebQuorumComm = WebCommChannel; +/// combined network (libp2p + web server) type StaticCombinedQuorumComm = CombinedCommChannel; +/// memory network pub type StaticMemoryViewSyncComm = MemoryCommChannel; +/// memory network pub type StaticMemoryVIDComm = MemoryCommChannel; impl NodeImplementation for Libp2pImpl { diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index f6b6a96c4f..1d50724457 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -24,6 +24,7 @@ use std::{ }; use crate::{test_launcher::TaskGenerator, test_runner::Node}; +/// convenience type alias for state and block pub type StateAndBlock = (Vec, Vec); use super::GlobalTestEvent; @@ -45,7 +46,10 @@ pub enum ViewStatus { #[derive(Snafu, Debug, Clone)] pub enum OverallSafetyTaskErr { /// inconsistent txn nums - InconsistentTxnsNum { map: HashMap }, + InconsistentTxnsNum { + /// node idx -> number transactions + map: HashMap + }, /// too many failed views TooManyFailures { /// vec of failed views @@ -104,21 +108,24 @@ pub struct RoundResult { /// block -> # entries decided on that block pub block_map: HashMap, + // TODO should we delete this? + // state is empty now /// state -> # entries decided on that state pub state_map: HashMap<(), usize>, + /// node idx -> number transactions pub num_txns_map: HashMap, } impl Default for RoundResult { fn default() -> Self { Self { - success_nodes: Default::default(), - failed_nodes: Default::default(), - leaf_map: Default::default(), - block_map: Default::default(), - state_map: Default::default(), - num_txns_map: Default::default(), + success_nodes: HashMap::default(), + failed_nodes: HashMap::default(), + leaf_map: HashMap::default(), + block_map: HashMap::default(), + state_map: HashMap::default(), + num_txns_map: HashMap::default(), status: ViewStatus::InProgress, } } @@ -129,9 +136,9 @@ impl Default for RoundResult { impl Default for RoundCtx { fn default() -> Self { Self { - round_results: Default::default(), - failed_views: Default::default(), - successful_views: Default::default(), + round_results: HashMap::default(), + failed_views: HashSet::default(), + successful_views: HashSet::default(), } } } @@ -232,18 +239,22 @@ impl RoundResult { maybe_leaf } + /// check if the test failed due to not enough nodes getting through enough views pub fn check_if_failed(&mut self, threshold: usize, total_num_nodes: usize) -> bool { let num_failed = self.failed_nodes.len(); total_num_nodes - num_failed >= threshold } /// determines whether or not the round passes /// also do a safety check + /// # Panics + /// if the `num_txns_map` is somehow empty + /// This should never happen because this function should never be called in that case #[allow(clippy::too_many_arguments, clippy::let_unit_value)] pub fn update_status( &mut self, threshold: usize, total_num_nodes: usize, - key: Leaf, + key: &Leaf, check_leaf: bool, check_state: bool, check_block: bool, @@ -293,7 +304,7 @@ impl RoundResult { if *self.block_map.get(&block_key).unwrap() == threshold && *self.state_map.get(&state_key).unwrap() == threshold - && *self.leaf_map.get(&key).unwrap() == threshold + && *self.leaf_map.get(key).unwrap() == threshold { self.status = ViewStatus::Ok; return; @@ -307,6 +318,7 @@ impl RoundResult { } /// generate leaves + #[must_use] pub fn gen_leaves(&self) -> HashMap, usize> { let mut leaves = HashMap::, usize>::new(); @@ -358,7 +370,8 @@ impl std::fmt::Debug for OverallSafetyPropertiesDescription { .field("check_state", &self.check_state) .field("check_block", &self.check_block) .field("num_failed_rounds_total", &self.num_failed_views) - .finish() + .field("transaction_threshold", &self.transaction_threshold) + .finish_non_exhaustive() } } @@ -379,6 +392,10 @@ impl Default for OverallSafetyPropertiesDescription { impl OverallSafetyPropertiesDescription { /// build a task + /// # Panics + /// if an internal variant that the prior views are filled is violated + #[must_use] + #[allow(clippy::too_many_lines)] pub fn build>( self, ) -> TaskGenerator> { @@ -493,7 +510,7 @@ impl OverallSafetyPropertiesDescription { view.update_status( threshold, state.handles.len(), - key, + &key, check_leaf, check_state, check_block, diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 82cee453c3..d98d1ebebc 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -14,18 +14,24 @@ use hotshot_types::{event::Event, traits::node_implementation::NodeType}; use snafu::Snafu; use crate::{test_launcher::TaskGenerator, test_runner::Node}; +/// convience type for state and block pub type StateAndBlock = (Vec, Vec); use super::GlobalTestEvent; +/// error for the spinning task #[derive(Snafu, Debug)] pub struct SpinningTaskErr {} /// Spinning task state pub struct SpinningTask> { + /// handle to the nodes pub(crate) handles: Vec>, + /// late start nodes pub(crate) late_start: HashMap>, + /// time based changes pub(crate) changes: HashMap>, + /// most recent view seen by spinning task pub(crate) latest_view: Option, } @@ -53,13 +59,20 @@ pub struct ChangeNode { pub updown: UpDown, } +/// description of the spinning task +/// (used to build a spinning task) #[derive(Clone, Debug)] pub struct SpinningTaskDescription { + /// the changes in node status, time -> changes pub node_changes: Vec<(u64, Vec)>, } impl SpinningTaskDescription { /// build a task + /// # Panics + /// If there is no latest view + /// or if the node id is over `u32::MAX` + #[must_use] pub fn build>( self, ) -> TaskGenerator> { diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 5047376415..bb188dd253 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -101,6 +101,6 @@ impl TestableState for TestState { ) -> ::Transaction { /// clippy appeasement for `RANDOM_TX_BASE_SIZE` const RANDOM_TX_BASE_SIZE: usize = 8; - TestTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) + TestTransaction(vec![0; RANDOM_TX_BASE_SIZE + usize::try_from(padding).unwrap()]) } } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 0120cc7a33..e7ca9bbf6a 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use std::marker::PhantomData; use crate::{ @@ -30,6 +31,9 @@ use hotshot_types::{ vote::HasViewNumber, }; +/// create the [`SystemContextHandle`] from a node id +/// # Panics +/// if cannot create a [`HotShotInitializer`] pub async fn build_system_handle( node_id: u64, ) -> ( @@ -102,6 +106,8 @@ pub async fn build_system_handle( .expect("Could not init hotshot") } +/// create a quorum proposal and signature +/// used for unit tests async fn build_quorum_proposal_and_signature( handle: &SystemContextHandle, private_key: &::PrivateKey, @@ -158,6 +164,7 @@ async fn build_quorum_proposal_and_signature( (proposal, signature) } +/// create a quorum proposal pub async fn build_quorum_proposal( handle: &SystemContextHandle, private_key: &::PrivateKey, @@ -172,6 +179,8 @@ pub async fn build_quorum_proposal( } } +/// get the keypair for a node id +#[must_use] pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey, BLSPubKey) { let private_key = ::generated_from_seed_indexed([0u8; 32], node_id).1; @@ -179,8 +188,12 @@ pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey (private_key, public_key) } +/// initialize VID +/// # Panics +/// if unable to create a [`VidScheme`] +#[must_use] pub fn vid_init( - membership: TYPES::Membership, + membership: &TYPES::Membership, view_number: TYPES::Time, ) -> VidScheme { let num_committee = membership.get_committee(view_number).len(); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index e858816e3f..bb4237773b 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -46,13 +46,13 @@ pub struct TestMetadata { pub num_bootstrap_nodes: usize, /// Size of the DA committee for the test pub da_committee_size: usize, - // overall safety property description + /// overall safety property description pub overall_safety_properties: OverallSafetyPropertiesDescription, /// spinning properties pub spinning_properties: SpinningTaskDescription, - // txns timing + /// txns timing pub txn_description: TxnTaskDescription, - // completion task + /// completion task pub completion_task_description: CompletionTaskDescription, /// Minimum transactions required for a block pub min_transactions: usize, @@ -74,6 +74,8 @@ impl Default for TimingData { } impl TestMetadata { + /// the default metadata for a stress test + #[must_use] pub fn default_stress() -> Self { TestMetadata { num_bootstrap_nodes: 15, @@ -99,6 +101,8 @@ impl TestMetadata { } } + /// the default metadata for multiple rounds + #[must_use] pub fn default_multiple_rounds() -> TestMetadata { TestMetadata { total_nodes: 10, @@ -113,7 +117,7 @@ impl TestMetadata { threshold_calculator: Arc::new(|_active, total| (2 * total / 3 + 1)), }, timing_data: TimingData { - start_delay: 120000, + start_delay: 120_000, round_start_delay: 25, ..TimingData::default() }, @@ -122,6 +126,7 @@ impl TestMetadata { } /// Default setting with 20 nodes and 8 views of successful views. + #[must_use] pub fn default_more_nodes() -> TestMetadata { TestMetadata { total_nodes: 20, @@ -178,6 +183,11 @@ impl Default for TestMetadata { } impl TestMetadata { + /// turn a description of a test (e.g. a [`TestMetadata`]) into + /// a [`TestLauncher`] that can be used to launch the test. + /// # Panics + /// if some of the the configuration values are zero + #[must_use] pub fn gen_launcher>( self, node_id: u64, diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 3771eb841f..00ee440837 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -17,6 +17,7 @@ use super::{ test_builder::TestMetadata, test_runner::TestRunner, txn_task::TxnTask, GlobalTestEvent, }; +/// convience type alias for the networks available pub type Networks = ( >::QuorumNetwork, >::CommitteeNetwork, @@ -28,6 +29,7 @@ pub type Generator = Box T + 'static>; /// Wrapper Type for committee function that takes a `ConnectedNetwork` and returns a `CommunicationChannel` pub type CommitteeNetworkGenerator = Box) -> T + 'static>; +/// Wrapper Type for view sync function that takes a `ConnectedNetwork` and returns a `CommunicationChannel` pub type ViewSyncNetworkGenerator = Box) -> T + 'static>; /// Wrapper type for a task generator. @@ -51,7 +53,7 @@ pub type Hook = Box< /// generators for resources used by each node pub struct ResourceGenerators> { - // generate channels + /// generate channels pub channel_generator: Generator>, /// generate a new storage for each node pub storage: Generator<>::Storage>, @@ -72,13 +74,16 @@ pub struct TestLauncher> { /// overall safety task generator pub overall_safety_task_generator: TaskGenerator>, + /// spinning task generator pub spinning_task_generator: TaskGenerator>, + /// additional hooks to add in custom checks pub hooks: Vec, } impl> TestLauncher { /// launch the test + #[must_use] pub fn launch(self) -> TestRunner { TestRunner { launcher: self, @@ -90,6 +95,7 @@ impl> TestLauncher>, @@ -101,6 +107,7 @@ impl> TestLauncher>, @@ -112,6 +119,7 @@ impl> TestLauncher>, @@ -123,6 +131,7 @@ impl> TestLauncher>, @@ -134,6 +143,7 @@ impl> TestLauncher) -> Self { Self { resource_generator, @@ -142,17 +152,20 @@ impl> TestLauncher Self { self.hooks.push(hook); self } /// overwrite hooks with more hooks + #[must_use] pub fn with_hooks(self, hooks: Vec) -> Self { Self { hooks, ..self } } /// Modifies the config used when generating nodes with `f` + #[must_use] pub fn modify_default_config( mut self, mut f: impl FnMut(&mut HotShotConfig), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 515d4a8787..f7d3aa8ebf 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use super::{ completion_task::CompletionTask, overall_safety_task::{OverallSafetyTask, RoundCtx}, @@ -27,20 +28,29 @@ use std::{ #[allow(deprecated)] use tracing::info; +/// a node participating in a test #[derive(Clone)] pub struct Node> { + /// the unique identifier of the node pub node_id: u64, + /// the networks of the node pub networks: Networks, + /// the handle to the node's internals pub handle: SystemContextHandle, } /// The runner of a test network /// spin up and down nodes, execute rounds pub struct TestRunner> { + /// test launcher, contains a bunch of useful metadata and closures pub(crate) launcher: TestLauncher, + /// nodes in the test pub(crate) nodes: Vec>, + /// nodes with a late start pub(crate) late_start: HashMap>, + /// the next node unique identifier pub(crate) next_node_id: u64, + /// overarching test task pub(crate) task_runner: TaskRunner, } @@ -49,6 +59,9 @@ where I: TestableNodeImplementation, { /// excecute test + /// # Panics + /// if the test fails + #[allow(clippy::too_many_lines)] pub async fn run_test(mut self) { let spinning_changes = self .launcher @@ -164,7 +177,7 @@ where for (name, result) in results { match result { hotshot_task::task::HotShotTaskCompleted::ShutDown => { - info!("Task {} shut down successfully", name) + info!("Task {} shut down successfully", name); } hotshot_task::task::HotShotTaskCompleted::Error(e) => error_list.push((name, e)), _ => { @@ -172,12 +185,12 @@ where } } } - if !error_list.is_empty() { - panic!("TEST FAILED! Results: {:?}", error_list); - } + assert!(error_list.is_empty(), "TEST FAILED! Results: {error_list:?}"); } /// add nodes + /// # Panics + /// Panics if unable to create a [`HotShotInitializer`] pub async fn add_nodes(&mut self, total: usize, late_start: &HashSet) -> Vec { let mut results = vec![]; for i in 0..total { @@ -215,6 +228,8 @@ where } /// add a specific node with a config + /// # Panics + /// if unable to initialize the node's `SystemContext` based on the config pub async fn add_node_with_config( &mut self, networks: Networks, diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index 90e9a53f4d..1c4f7c2850 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -56,6 +56,9 @@ pub enum TxnTaskDescription { impl TxnTaskDescription { /// build a task + /// # Panics + /// if unable to get task id + #[must_use] pub fn build>( self, ) -> TaskGenerator> @@ -68,7 +71,7 @@ impl TxnTaskDescription { // consistency check match self { TxnTaskDescription::RoundRobinTimeBased(_) => { - assert!(state.next_node_idx.is_some()) + assert!(state.next_node_idx.is_some()); } TxnTaskDescription::DistributionBased => assert!(state.next_node_idx.is_none()), } @@ -88,7 +91,7 @@ impl TxnTaskDescription { .boxed() })); let message_handler = - HandleMessage::>(Arc::new(move |_, mut state| { + HandleMessage::>(Arc::new(move |(), mut state| { async move { if let Some(idx) = state.next_node_idx { // submit to idx handle diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 20d3b964b0..6b56e28ae8 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -42,7 +42,7 @@ async fn test_network_task() { api.private_key(), &encoded_transactions_hash, ); - let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(2)); + let vid = vid_init::(&quorum_membership, ViewNumber::new(2)); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; let vid_signature = diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index eb435e69ad..9648a81310 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -36,7 +36,7 @@ async fn test_vid_task() { // quorum membership for VID share distribution let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); - let vid = vid_init::(quorum_membership.clone(), ViewNumber::new(0)); + let vid = vid_init::(&quorum_membership, ViewNumber::new(0)); let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); From b6b73dc3d4f51054c99b990a714ea9b3d927965b Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 23:01:11 -0500 Subject: [PATCH 0632/1393] chore: more lints --- hotshot/examples/combined/all.rs | 7 ++- hotshot/examples/combined/multi-validator.rs | 5 +- hotshot/examples/combined/orchestrator.rs | 3 + hotshot/examples/combined/types.rs | 6 ++ hotshot/examples/combined/validator.rs | 3 + hotshot/examples/infra/mod.rs | 59 +++++++++++++++---- hotshot/examples/libp2p/all.rs | 7 ++- hotshot/examples/libp2p/multi-validator.rs | 5 +- hotshot/examples/libp2p/orchestrator.rs | 4 ++ hotshot/examples/libp2p/types.rs | 6 ++ hotshot/examples/libp2p/validator.rs | 3 + hotshot/examples/webserver/all.rs | 7 ++- hotshot/examples/webserver/multi-validator.rs | 5 +- hotshot/examples/webserver/orchestrator.rs | 4 ++ hotshot/examples/webserver/types.rs | 6 ++ hotshot/examples/webserver/validator.rs | 3 + testing/tests/memory_network.rs | 1 + 17 files changed, 112 insertions(+), 22 deletions(-) diff --git a/hotshot/examples/combined/all.rs b/hotshot/examples/combined/all.rs index 5f1e5a0408..de602d4bea 100644 --- a/hotshot/examples/combined/all.rs +++ b/hotshot/examples/combined/all.rs @@ -1,3 +1,5 @@ +//! A example program using both the web server and libp2p +/// types used for this example pub mod types; use crate::infra::load_config_from_file; @@ -21,6 +23,7 @@ use crate::{ types::{DANetwork, NodeImpl, QuorumNetwork, VIDNetwork, ViewSyncNetwork}, }; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; @@ -88,7 +91,7 @@ async fn main() { let config: NetworkConfig< ::SignatureKey, ::ElectionConfigType, - > = load_config_from_file::(args.config_file); + > = load_config_from_file::(&args.config_file); let mut nodes = Vec::new(); for _ in 0..config.config.total_nodes.into() { let orchestrator_url = orchestrator_url.clone(); @@ -106,7 +109,7 @@ async fn main() { public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), network_config_file: None, }) - .await + .await; }); nodes.push(node); } diff --git a/hotshot/examples/combined/multi-validator.rs b/hotshot/examples/combined/multi-validator.rs index 79c05e5867..7329986afc 100644 --- a/hotshot/examples/combined/multi-validator.rs +++ b/hotshot/examples/combined/multi-validator.rs @@ -1,3 +1,4 @@ +//! A multi-validator using both the web server libp2p use async_compatibility_layer::{ art::async_spawn, logging::{setup_backtrace, setup_logging}, @@ -10,8 +11,10 @@ use types::VIDNetwork; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; +/// types used for this example pub mod types; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; @@ -40,7 +43,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs::from_multi_args(args, node_index)) - .await + .await; }); nodes.push(node); } diff --git a/hotshot/examples/combined/orchestrator.rs b/hotshot/examples/combined/orchestrator.rs index fd61f16648..ba038fabc1 100644 --- a/hotshot/examples/combined/orchestrator.rs +++ b/hotshot/examples/combined/orchestrator.rs @@ -1,3 +1,5 @@ +//! Orchestrator using the web server +/// types used for this example pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; @@ -10,6 +12,7 @@ use crate::infra::run_orchestrator; use crate::infra::OrchestratorArgs; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; diff --git a/hotshot/examples/combined/types.rs b/hotshot/examples/combined/types.rs index 36091ac507..4e5d342275 100644 --- a/hotshot/examples/combined/types.rs +++ b/hotshot/examples/combined/types.rs @@ -5,12 +5,17 @@ use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation use serde::{Deserialize, Serialize}; use std::fmt::Debug; +/// dummy struct so we can choose types #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} +/// convenience type alias pub type DANetwork = CombinedCommChannel; +/// convenience type alias pub type VIDNetwork = CombinedCommChannel; +/// convenience type alias pub type QuorumNetwork = CombinedCommChannel; +/// convenience type alias pub type ViewSyncNetwork = CombinedCommChannel; impl NodeImplementation for NodeImpl { @@ -24,4 +29,5 @@ impl NodeImplementation for NodeImpl { (ChannelMaps::new(start_view), None) } } +/// convenience type alias pub type ThisRun = CombinedDARun; diff --git a/hotshot/examples/combined/validator.rs b/hotshot/examples/combined/validator.rs index 9b0babfd16..04c0aa7cee 100644 --- a/hotshot/examples/combined/validator.rs +++ b/hotshot/examples/combined/validator.rs @@ -1,3 +1,4 @@ +//! A validator using both the web server and libp2p use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_testing::state_types::TestTypes; @@ -8,8 +9,10 @@ use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork} use hotshot_orchestrator::client::ValidatorArgs; +/// types used for this example pub mod types; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 0e7c372c15..d562397c58 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use async_compatibility_layer::art::async_sleep; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_lock::RwLock; @@ -85,10 +86,13 @@ pub struct ConfigArgs { } /// Reads a network configuration from a given filepath +/// # Panics +/// if unable to convert the config file into toml +#[must_use] pub fn load_config_from_file( - config_file: String, + config_file: &str, ) -> NetworkConfig { - let config_file_as_string: String = fs::read_to_string(config_file.as_str()) + let config_file_as_string: String = fs::read_to_string(config_file) .unwrap_or_else(|_| panic!("Could not read config file located at {config_file}")); let config_toml: NetworkConfigFile = toml::from_str::>(&config_file_as_string) @@ -127,7 +131,7 @@ pub async fn run_orchestrator< OrchestratorArgs { url, config_file }: OrchestratorArgs, ) { error!("Starting orchestrator",); - let run_config = load_config_from_file::(config_file); + let run_config = load_config_from_file::(&config_file); let _result = hotshot_orchestrator::run_orchestrator::< TYPES::SignatureKey, TYPES::ElectionConfigType, @@ -136,17 +140,20 @@ pub async fn run_orchestrator< } /// Helper function to calculate the nuymber of transactions to send per node per round +#[allow(clippy::cast_possible_truncation)] fn calculate_num_tx_per_round( node_index: u64, total_num_nodes: usize, transactions_per_round: usize, ) -> usize { transactions_per_round / total_num_nodes - + ((total_num_nodes - 1 - node_index as usize) < (transactions_per_round % total_num_nodes)) - as usize + + usize::from((total_num_nodes - 1 - node_index as usize) < (transactions_per_round % total_num_nodes)) } -async fn webserver_network_from_config( +/// create a web server network from a config file + public key +/// # Panics +/// Panics if the web server config doesn't exist in `config` +fn webserver_network_from_config( config: NetworkConfig, pub_key: TYPES::SignatureKey, ) -> WebServerNetwork { @@ -159,6 +166,11 @@ async fn webserver_network_from_config( WebServerNetwork::create(url, wait_between_polls, pub_key, false) } +#[allow(clippy::cast_possible_truncation)] +#[allow(clippy::cast_lossless)] +/// Create a libp2p network from a config file and public key +/// # Panics +/// If unable to create bootstrap nodes multiaddres or the libp2p config is invalid async fn libp2p_network_from_config( config: NetworkConfig, pub_key: TYPES::SignatureKey, @@ -188,10 +200,9 @@ async fn libp2p_network_from_config( NetworkNodeType::Regular }; let node_index = config.node_index; - let port_index = match libp2p_config.index_ports { - true => node_index, - false => 0, - }; + let port_index = if libp2p_config.index_ports { + node_index + } else {0}; let bound_addr: Multiaddr = format!( "/{}/{}/udp/{}/quic-v1", if libp2p_config.public_ip.is_ipv4() { @@ -252,6 +263,7 @@ match node_type { } let node_config = config_builder.build().unwrap(); + #[allow(clippy::cast_possible_truncation)] Libp2pNetwork::new( NetworkingMetricsValue::default(), node_config, @@ -425,7 +437,7 @@ pub trait RunDA< for _ in 0..transactions_to_send_per_round { let tx = transactions.remove(0); - _ = context.submit_transaction(tx).await.unwrap(); + () = context.submit_transaction(tx).await.unwrap(); total_transactions_sent += 1; } } @@ -482,10 +494,15 @@ pub trait RunDA< /// Represents a web server-based run pub struct WebServerDARun { + /// the network configuration config: NetworkConfig, + /// quorum channel quorum_channel: WebCommChannel, + /// data availability channel da_channel: WebCommChannel, + /// view sync channel view_sync_channel: WebCommChannel, + /// vid channel vid_channel: WebCommChannel, } @@ -531,7 +548,7 @@ where // create and wait for underlying network let underlying_quorum_network = - webserver_network_from_config::(config.clone(), pub_key.clone()).await; + webserver_network_from_config::(config.clone(), pub_key.clone()); underlying_quorum_network.wait_for_ready().await; @@ -584,10 +601,15 @@ where /// Represents a libp2p-based run pub struct Libp2pDARun { + /// the network configuration config: NetworkConfig, + /// quorum channel quorum_channel: Libp2pCommChannel, + /// data availability channel da_channel: Libp2pCommChannel, + /// view sync channel view_sync_channel: Libp2pCommChannel, + /// vid channel vid_channel: Libp2pCommChannel, } @@ -677,10 +699,15 @@ where /// Represents a combined-network-based run pub struct CombinedDARun { + /// the network configuration config: NetworkConfig, + /// quorum channel quorum_channel: CombinedCommChannel, + /// data availability channel da_channel: CombinedCommChannel, + /// view sync channel view_sync_channel: CombinedCommChannel, + /// vid channel vid_channel: CombinedCommChannel, } @@ -736,7 +763,7 @@ where // create and wait for underlying webserver network let webserver_underlying_quorum_network = - webserver_network_from_config::(config.clone(), pub_key.clone()).await; + webserver_network_from_config::(config.clone(), pub_key.clone()); let webserver_underlying_da_network = WebServerNetwork::create(url, wait_between_polls, pub_key, true); @@ -796,6 +823,8 @@ where } /// Main entry point for validators +/// # Panics +/// if unable to get the local ip address pub async fn main_entry_point< TYPES: NodeType< Transaction = TestTransaction, @@ -901,6 +930,10 @@ pub async fn main_entry_point< .await; } +/// generate a libp2p identity based on a seed and idx +/// # Panics +/// if unable to create a secret key out of bytes +#[must_use] pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { let mut hasher = blake3::Hasher::new(); hasher.update(&seed); diff --git a/hotshot/examples/libp2p/all.rs b/hotshot/examples/libp2p/all.rs index 1b77ed3ba3..3dda9d6007 100644 --- a/hotshot/examples/libp2p/all.rs +++ b/hotshot/examples/libp2p/all.rs @@ -1,3 +1,5 @@ +//! A example program using libp2p +/// types used for this example pub mod types; use crate::infra::load_config_from_file; @@ -19,6 +21,7 @@ use crate::{ types::{DANetwork, NodeImpl, QuorumNetwork, VIDNetwork, ViewSyncNetwork}, }; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; @@ -53,7 +56,7 @@ async fn main() { let config: NetworkConfig< ::SignatureKey, ::ElectionConfigType, - > = load_config_from_file::(args.config_file); + > = load_config_from_file::(&args.config_file); let mut nodes = Vec::new(); for _ in 0..config.config.total_nodes.into() { let orchestrator_url = orchestrator_url.clone(); @@ -71,7 +74,7 @@ async fn main() { public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), network_config_file: None, }) - .await + .await; }); nodes.push(node); } diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index 79c05e5867..f64d2f25ac 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -1,3 +1,4 @@ +//! A multi-validator using libp2p use async_compatibility_layer::{ art::async_spawn, logging::{setup_backtrace, setup_logging}, @@ -10,8 +11,10 @@ use types::VIDNetwork; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; +/// types used for this example pub mod types; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; @@ -40,7 +43,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs::from_multi_args(args, node_index)) - .await + .await; }); nodes.push(node); } diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs index 4f6bf5f085..9298b9e372 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -1,3 +1,6 @@ +//! An orchestrator using libp2p + +/// types used for this example pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; @@ -9,6 +12,7 @@ use crate::infra::run_orchestrator; use crate::infra::OrchestratorArgs; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, VIDNetwork, ViewSyncNetwork}; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 23f75da4ed..449e518f48 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -5,12 +5,17 @@ use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation use serde::{Deserialize, Serialize}; use std::fmt::Debug; +/// dummy struct so we can choose types #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} +/// convenience type alias pub type DANetwork = Libp2pCommChannel; +/// convenience type alias pub type VIDNetwork = Libp2pCommChannel; +/// convenience type alias pub type QuorumNetwork = Libp2pCommChannel; +/// convenience type alias pub type ViewSyncNetwork = Libp2pCommChannel; impl NodeImplementation for NodeImpl { @@ -24,4 +29,5 @@ impl NodeImplementation for NodeImpl { (ChannelMaps::new(start_view), None) } } +/// convenience type alias pub type ThisRun = Libp2pDARun; diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index 9b0babfd16..d27f7f1ea1 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -1,3 +1,4 @@ +//! A validator using libp2p use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_testing::state_types::TestTypes; @@ -8,8 +9,10 @@ use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork} use hotshot_orchestrator::client::ValidatorArgs; +/// types used for this example pub mod types; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; diff --git a/hotshot/examples/webserver/all.rs b/hotshot/examples/webserver/all.rs index 467b446573..eb92ee77d4 100644 --- a/hotshot/examples/webserver/all.rs +++ b/hotshot/examples/webserver/all.rs @@ -1,3 +1,5 @@ +//! A example program using the web server +/// types used for this example pub mod types; use crate::infra::load_config_from_file; @@ -10,6 +12,7 @@ use crate::{ use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; @@ -79,7 +82,7 @@ async fn main() { let config: NetworkConfig< ::SignatureKey, ::ElectionConfigType, - > = load_config_from_file::(args.config_file); + > = load_config_from_file::(&args.config_file); let mut nodes = Vec::new(); for _ in 0..(config.config.total_nodes.get()) { let orchestrator_url = orchestrator_url.clone(); @@ -97,7 +100,7 @@ async fn main() { public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), network_config_file: None, }) - .await + .await; }); nodes.push(node); } diff --git a/hotshot/examples/webserver/multi-validator.rs b/hotshot/examples/webserver/multi-validator.rs index 79c05e5867..780aa24207 100644 --- a/hotshot/examples/webserver/multi-validator.rs +++ b/hotshot/examples/webserver/multi-validator.rs @@ -1,3 +1,4 @@ +//! A multi-validator using the web server use async_compatibility_layer::{ art::async_spawn, logging::{setup_backtrace, setup_logging}, @@ -10,8 +11,10 @@ use types::VIDNetwork; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; +/// types used for this example pub mod types; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; @@ -40,7 +43,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs::from_multi_args(args, node_index)) - .await + .await; }); nodes.push(node); } diff --git a/hotshot/examples/webserver/orchestrator.rs b/hotshot/examples/webserver/orchestrator.rs index fd61f16648..5a39e56471 100644 --- a/hotshot/examples/webserver/orchestrator.rs +++ b/hotshot/examples/webserver/orchestrator.rs @@ -1,3 +1,6 @@ +//! A orchestrator using the web server + +/// types used for this example pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; @@ -10,6 +13,7 @@ use crate::infra::run_orchestrator; use crate::infra::OrchestratorArgs; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; diff --git a/hotshot/examples/webserver/types.rs b/hotshot/examples/webserver/types.rs index 822f27bc44..03d8fc36ed 100644 --- a/hotshot/examples/webserver/types.rs +++ b/hotshot/examples/webserver/types.rs @@ -5,12 +5,17 @@ use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation use serde::{Deserialize, Serialize}; use std::fmt::Debug; +/// dummy struct so we can choose types #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} +/// convenience type alias pub type DANetwork = WebCommChannel; +/// convenience type alias pub type VIDNetwork = WebCommChannel; +/// convenience type alias pub type QuorumNetwork = WebCommChannel; +/// convenience type alias pub type ViewSyncNetwork = WebCommChannel; impl NodeImplementation for NodeImpl { @@ -24,4 +29,5 @@ impl NodeImplementation for NodeImpl { (ChannelMaps::new(start_view), None) } } +/// convenience type alias pub type ThisRun = WebServerDARun; diff --git a/hotshot/examples/webserver/validator.rs b/hotshot/examples/webserver/validator.rs index 9b0babfd16..4bfcd9d78c 100644 --- a/hotshot/examples/webserver/validator.rs +++ b/hotshot/examples/webserver/validator.rs @@ -1,3 +1,4 @@ +//! A validator using the web server use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_testing::state_types::TestTypes; @@ -8,8 +9,10 @@ use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork} use hotshot_orchestrator::client::ValidatorArgs; +/// types used for this example pub mod types; +/// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 6978b28d6d..4eabf8c5b1 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use std::collections::BTreeSet; use std::sync::Arc; From e438016f84048dbd4101204e96eab24aa472444b Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 23:08:29 -0500 Subject: [PATCH 0633/1393] chore: more lints --- hotshot/examples/webserver/multi-webserver.rs | 4 ++++ hotshot/examples/webserver/webserver.rs | 3 +++ testing/tests/catchup.rs | 2 +- testing/tests/combined_network.rs | 20 +++++++++---------- testing/tests/consensus_task.rs | 1 + testing/tests/gen_key_pair.rs | 6 ++++-- testing/tests/libp2p.rs | 4 ++-- testing/tests/web_server.rs | 4 ++-- 8 files changed, 27 insertions(+), 17 deletions(-) diff --git a/hotshot/examples/webserver/multi-webserver.rs b/hotshot/examples/webserver/multi-webserver.rs index aba2762bf1..e338835c07 100644 --- a/hotshot/examples/webserver/multi-webserver.rs +++ b/hotshot/examples/webserver/multi-webserver.rs @@ -1,3 +1,4 @@ +//! A multi web server use std::sync::Arc; use async_compatibility_layer::{ @@ -10,9 +11,12 @@ use hotshot_testing::state_types::TestTypes; use surf_disco::Url; use tracing::error; +/// Arguments to run multiple web servers #[derive(Parser, Debug)] struct MultiWebServerArgs { + /// consensus url consensus_url: Url, + /// data availability server url da_url: Url, } diff --git a/hotshot/examples/webserver/webserver.rs b/hotshot/examples/webserver/webserver.rs index 07ffab4f88..e6c9a468c8 100644 --- a/hotshot/examples/webserver/webserver.rs +++ b/hotshot/examples/webserver/webserver.rs @@ -1,3 +1,4 @@ +//! web server example use hotshot_testing::state_types::TestTypes; use std::sync::Arc; use surf_disco::Url; @@ -8,8 +9,10 @@ use async_compatibility_layer::{ }; use clap::Parser; +/// web server arguments #[derive(Parser, Debug)] struct WebServerArgs { + /// url to run on url: Url, } diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 92c718436f..265e4c03a8 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -103,7 +103,7 @@ async fn test_catchup_web() { metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(100000), + duration: Duration::from_millis(100_000), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index 5aacf2a8ea..9e0befae8c 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -23,8 +23,8 @@ async fn test_combined_network() { let metadata: TestMetadata = TestMetadata { timing_data: TimingData { round_start_delay: 25, - next_view_timeout: 10000, - start_delay: 120000, + next_view_timeout: 10_000, + start_delay: 120_000, ..Default::default() }, @@ -61,8 +61,8 @@ async fn test_combined_network_webserver_crash() { let mut metadata: TestMetadata = TestMetadata { timing_data: TimingData { round_start_delay: 25, - next_view_timeout: 10000, - start_delay: 120000, + next_view_timeout: 10_000, + start_delay: 120_000, ..Default::default() }, @@ -113,8 +113,8 @@ async fn test_combined_network_reup() { let mut metadata: TestMetadata = TestMetadata { timing_data: TimingData { round_start_delay: 25, - next_view_timeout: 10000, - start_delay: 120000, + next_view_timeout: 10_000, + start_delay: 120_000, ..Default::default() }, @@ -169,8 +169,8 @@ async fn test_combined_network_half_dc() { let mut metadata: TestMetadata = TestMetadata { timing_data: TimingData { round_start_delay: 25, - next_view_timeout: 10000, - start_delay: 120000, + next_view_timeout: 10_000, + start_delay: 120_000, ..Default::default() }, @@ -252,8 +252,8 @@ async fn test_stress_combined_network_fuzzy() { timing_data: TimingData { round_start_delay: 25, - next_view_timeout: 10000, - start_delay: 120000, + next_view_timeout: 10_000, + start_delay: 120_000, ..Default::default() }, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 8ff918a143..47e67fa541 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use commit::Committable; use hotshot::{ tasks::add_consensus_task, diff --git a/testing/tests/gen_key_pair.rs b/testing/tests/gen_key_pair.rs index ab7cbcb1aa..5ace645967 100644 --- a/testing/tests/gen_key_pair.rs +++ b/testing/tests/gen_key_pair.rs @@ -1,3 +1,5 @@ +#![allow(clippy::panic)] + #[cfg(test)] mod tests { use core::panic; @@ -28,9 +30,9 @@ mod tests { + "/../../config/ValidatorConfigOutput"; match File::create(filename) { Err(why) => panic!("couldn't create file for output key pairs: {}", why), - Ok(mut file) => match write!(file, "{:?}", my_own_validator_config) { + Ok(mut file) => match write!(file, "{my_own_validator_config:?}", ) { Err(why) => panic!("couldn't generate key pairs and write to the file: {}", why), - Ok(_) => println!("successfully wrote to file for output key pairs"), + Ok(()) => println!("successfully wrote to file for output key pairs"), }, } } diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index 5ed8fcd315..8fa03a8179 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -39,7 +39,7 @@ async fn libp2p_network() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } /// stress test for libp2p @@ -58,5 +58,5 @@ async fn test_stress_libp2p_network() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index d99501faec..81277e7821 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -22,8 +22,8 @@ async fn web_server_network() { let metadata = TestMetadata { timing_data: TimingData { round_start_delay: 25, - next_view_timeout: 10000, - start_delay: 120000, + next_view_timeout: 10_000, + start_delay: 120_000, ..Default::default() }, From e3a889c753cb7455fe79e8bdd69932df91eaca6c Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 23:08:43 -0500 Subject: [PATCH 0634/1393] chore: fmt --- hotshot-stake-table/src/mt_based.rs | 7 +- hotshot-stake-table/src/mt_based/internal.rs | 6 +- hotshot-stake-table/src/vec_based.rs | 24 ++++--- hotshot-stake-table/src/vec_based/config.rs | 2 +- hotshot-state-prover/src/lib.rs | 3 +- hotshot/examples/infra/mod.rs | 9 ++- .../traits/networking/web_server_network.rs | 7 +- libp2p-networking/examples/counter.rs | 70 +++++++++---------- libp2p-networking/tests/common/mod.rs | 11 +-- libp2p-networking/tests/counter.rs | 4 +- orchestrator/src/config.rs | 8 +-- orchestrator/src/lib.rs | 7 +- testing/src/overall_safety_task.rs | 2 +- testing/src/state_types.rs | 5 +- testing/src/test_runner.rs | 5 +- testing/tests/gen_key_pair.rs | 2 +- web_server/src/lib.rs | 5 +- 17 files changed, 100 insertions(+), 77 deletions(-) diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index c4bde25b2c..235fbb3d19 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -44,18 +44,19 @@ impl StakeTableScheme for StakeTable { amount: Self::Amount, (): Self::Aux, ) -> Result<(), StakeTableError> { - if self.mapping.get(&new_key).is_some() { Err(StakeTableError::ExistingKey) } else { + if self.mapping.get(&new_key).is_some() { + Err(StakeTableError::ExistingKey) + } else { let pos = self.mapping.len(); self.head = self.head.register( self.height, &to_merkle_path(pos, self.height), &new_key, amount, - )?; + )?; self.mapping.insert(new_key, pos); Ok(()) } - } fn deregister(&mut self, _existing_key: &Self::Key) -> Result<(), StakeTableError> { diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index 8ebef5ac3e..a308d235b4 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -147,7 +147,7 @@ impl MerkleProof { .map_err(|_| StakeTableError::RescueError)?[0]; Ok(comm) } - MerklePathEntry::Leaf{ .. } => Err(StakeTableError::MalformedProof), + MerklePathEntry::Leaf { .. } => Err(StakeTableError::MalformedProof), }) } _ => Err(StakeTableError::MalformedProof), @@ -208,8 +208,8 @@ impl PersistentMerkleNode { children: _, num_keys: _, total_stakes: _, - } | - PersistentMerkleNode::Leaf { + } + | PersistentMerkleNode::Leaf { comm, key: _, value: _, diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 3048e6cba5..1221bbe53b 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -101,15 +101,17 @@ where amount: Self::Amount, aux: Self::Aux, ) -> Result<(), StakeTableError> { - if self.bls_mapping.get(&new_key).is_some() { Err(StakeTableError::ExistingKey) } else { - let pos = self.bls_mapping.len(); - self.head.bls_keys.push(new_key.clone()); - self.head.schnorr_keys.push(aux); - self.head.stake_amount.push(amount); - self.head_total_stake += amount; - self.bls_mapping.insert(new_key, pos); - Ok(()) - } + if self.bls_mapping.get(&new_key).is_some() { + Err(StakeTableError::ExistingKey) + } else { + let pos = self.bls_mapping.len(); + self.head.bls_keys.push(new_key.clone()); + self.head.schnorr_keys.push(aux); + self.head.stake_amount.push(amount); + self.head_total_stake += amount; + self.bls_mapping.insert(new_key, pos); + Ok(()) + } } fn deregister(&mut self, existing_key: &Self::Key) -> Result<(), StakeTableError> { @@ -396,7 +398,9 @@ mod tests { BLSOverBN254CurveSignatureScheme::key_gen(&(), &mut pseudo_rng) .unwrap() .1, - SchnorrSignatureScheme::key_gen(&(), &mut pseudo_rng).unwrap().1, + SchnorrSignatureScheme::key_gen(&(), &mut pseudo_rng) + .unwrap() + .1, ) }) .collect::>(); diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index fe6ab10a48..0ba64b7bda 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -35,7 +35,7 @@ impl ToFields for QCVerKey { FieldType::from_le_bytes_mod_order(&bytes[31..62]), FieldType::from_le_bytes_mod_order(&bytes[62..]), ] - }, + } Err(_) => unreachable!(), } } diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs index 486e3003cc..fc17f6d1e0 100644 --- a/hotshot-state-prover/src/lib.rs +++ b/hotshot-state-prover/src/lib.rs @@ -43,7 +43,8 @@ pub type UniversalSrs = jf_plonk::proof_system::structs::UniversalSrs; pub fn preprocess( srs: &UniversalSrs, ) -> Result<(ProvingKey, VerifyingKey), PlonkError> { - let (circuit, _) = circuit::build_for_preprocessing::()?; + let (circuit, _) = + circuit::build_for_preprocessing::()?; PlonkKzgSnark::preprocess(srs, &circuit) } diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index d562397c58..9916004e80 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -147,7 +147,10 @@ fn calculate_num_tx_per_round( transactions_per_round: usize, ) -> usize { transactions_per_round / total_num_nodes - + usize::from((total_num_nodes - 1 - node_index as usize) < (transactions_per_round % total_num_nodes)) + + usize::from( + (total_num_nodes - 1 - node_index as usize) + < (transactions_per_round % total_num_nodes), + ) } /// create a web server network from a config file + public key @@ -202,7 +205,9 @@ async fn libp2p_network_from_config( let node_index = config.node_index; let port_index = if libp2p_config.index_ports { node_index - } else {0}; + } else { + 0 + }; let bound_addr: Multiaddr = format!( "/{}/{}/udp/{}/quic-v1", if libp2p_config.public_ip.is_ipv4() { diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 00a076d20c..0696814aa6 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -478,11 +478,8 @@ impl Inner { } /// Fetches transactions from web server - async fn get_txs_from_web_server( - &self, - endpoint: String, - ) -> TxnResult { - let result : Result>)>, _> = self.client.get(&endpoint).send().await; + async fn get_txs_from_web_server(&self, endpoint: String) -> TxnResult { + let result: Result>)>, _> = self.client.get(&endpoint).send().await; match result { Err(_error) => Err(NetworkError::WebServer { source: WebServerNetworkError::ClientError, diff --git a/libp2p-networking/examples/counter.rs b/libp2p-networking/examples/counter.rs index 7208eeb719..54a27b24dd 100644 --- a/libp2p-networking/examples/counter.rs +++ b/libp2p-networking/examples/counter.rs @@ -16,41 +16,41 @@ // #[instrument] // async fn main() -> Result<()> { // fn main() -> Result<(), ()> { - // let args = CliOpt::parse(); - // - // #[cfg(all(feature = "lossy_network", target_os = "linux"))] - // let network = { - // use crate::common::lossy_network::LOSSY_QDISC; - // let mut builder = LossyNetworkBuilder::default(); - // builder - // .env_type(args.env_type_delegate.env_type) - // .netem_config(LOSSY_QDISC); - // match args.env_type_delegate.env_type { - // ExecutionEnvironment::Docker => { - // builder.eth_name("eth0".to_string()).isolation_config(None) - // } - // ExecutionEnvironment::Metal => builder - // .eth_name("ens5".to_string()) - // .isolation_config(Some(IsolationConfig::default())), - // }; - // builder.build() - // }?; - // - // #[cfg(all(feature = "lossy_network", target_os = "linux"))] - // { - // network.isolate().await?; - // network.create_qdisc().await?; - // } - // - // start_main(args).await?; - // - // #[cfg(all(feature = "lossy_network", target_os = "linux"))] - // { - // // implicitly deletes qdisc in the case of metal run - // // leaves qdisc alive in docker run with expectation docker does cleanup - // network.undo_isolate().await?; - // } - // +// let args = CliOpt::parse(); +// +// #[cfg(all(feature = "lossy_network", target_os = "linux"))] +// let network = { +// use crate::common::lossy_network::LOSSY_QDISC; +// let mut builder = LossyNetworkBuilder::default(); +// builder +// .env_type(args.env_type_delegate.env_type) +// .netem_config(LOSSY_QDISC); +// match args.env_type_delegate.env_type { +// ExecutionEnvironment::Docker => { +// builder.eth_name("eth0".to_string()).isolation_config(None) +// } +// ExecutionEnvironment::Metal => builder +// .eth_name("ens5".to_string()) +// .isolation_config(Some(IsolationConfig::default())), +// }; +// builder.build() +// }?; +// +// #[cfg(all(feature = "lossy_network", target_os = "linux"))] +// { +// network.isolate().await?; +// network.create_qdisc().await?; +// } +// +// start_main(args).await?; +// +// #[cfg(all(feature = "lossy_network", target_os = "linux"))] +// { +// // implicitly deletes qdisc in the case of metal run +// // leaves qdisc alive in docker run with expectation docker does cleanup +// network.undo_isolate().await?; +// } +// // Ok(()) // } // diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index 31d8dc020b..c23c077c60 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -82,7 +82,7 @@ fn gen_peerid_map(handles: &[Arc>]) -> HashMap(handles: &[Arc>]) { let m = gen_peerid_map(handles); warn!("PRINTING CONNECTION STATES"); - for handle in handles{ + for handle in handles { warn!( "peer {}, connected to {:?}", handle.id(), @@ -165,9 +165,12 @@ pub async fn spin_up_swarms( .build() .context(NodeConfigSnafu) .context(HandleSnafu)?; - let node = Box::pin(NetworkNodeHandle::new(regular_node_config.clone(), j + num_bootstrap)) - .await - .context(HandleSnafu)?; + let node = Box::pin(NetworkNodeHandle::new( + regular_node_config.clone(), + j + num_bootstrap, + )) + .await + .context(HandleSnafu)?; let node = Arc::new(node); connecting_futs.push({ let node = node.clone(); diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index e58a66b609..93aab7ea26 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -247,7 +247,7 @@ async fn run_intersperse_many_rounds( handles: Vec>>, timeout: Duration, ) { - for i in 0..u32::try_from(NUM_ROUNDS).unwrap(){ + for i in 0..u32::try_from(NUM_ROUNDS).unwrap() { if i % 2 == 0 { run_request_response_increment_all(&handles, timeout).await; } else { @@ -290,7 +290,7 @@ pub async fn run_request_response_one_round( timeout: Duration, ) { run_request_response_increment_all(&handles, timeout).await; - for h in handles{ + for h in handles { assert_eq!(h.state().await, 1); } } diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index e1063283b0..f9f88ab96b 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -522,13 +522,13 @@ impl From> for HotS } } /// default number of rounds to run -pub const ORCHESTRATOR_DEFAULT_NUM_ROUNDS : usize = 10; +pub const ORCHESTRATOR_DEFAULT_NUM_ROUNDS: usize = 10; /// default number of transactions per round -pub const ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND : usize = 10; +pub const ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND: usize = 10; /// default size of transactions -pub const ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE : usize = 100; +pub const ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE: usize = 100; /// default delay before beginning consensus -pub const ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS : u64 = 60; +pub const ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS: u64 = 60; impl From for ValidatorConfig { fn from(val: ValidatorConfigFile) -> Self { diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index fcc961d3d5..0453fecbd5 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -144,7 +144,9 @@ where if libp2p_config_clone.bootstrap_nodes.len() < libp2p_config_clone.num_bootstrap_nodes { let port_index = if libp2p_config_clone.index_ports { node_index - } else {0}; + } else { + 0 + }; let socketaddr = SocketAddr::new(identity, libp2p_config_clone.base_port + port_index); let keypair = libp2p_generate_indexed_identity(self.config.seed, node_index.into()); @@ -272,7 +274,8 @@ where KEY: SignatureKey + 'static + serde::Serialize, ELECTION: ElectionConfig + 'static + serde::Serialize, { - let web_api = define_api().map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api")); + let web_api = + define_api().map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api")); let state: RwLock> = RwLock::new(OrchestratorState::new(network_config)); diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 1d50724457..e8c68c685e 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -48,7 +48,7 @@ pub enum OverallSafetyTaskErr { /// inconsistent txn nums InconsistentTxnsNum { /// node idx -> number transactions - map: HashMap + map: HashMap, }, /// too many failed views TooManyFailures { diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index bb188dd253..990f3ed6e9 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -101,6 +101,9 @@ impl TestableState for TestState { ) -> ::Transaction { /// clippy appeasement for `RANDOM_TX_BASE_SIZE` const RANDOM_TX_BASE_SIZE: usize = 8; - TestTransaction(vec![0; RANDOM_TX_BASE_SIZE + usize::try_from(padding).unwrap()]) + TestTransaction(vec![ + 0; + RANDOM_TX_BASE_SIZE + usize::try_from(padding).unwrap() + ]) } } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index f7d3aa8ebf..7de8902e7e 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -185,7 +185,10 @@ where } } } - assert!(error_list.is_empty(), "TEST FAILED! Results: {error_list:?}"); + assert!( + error_list.is_empty(), + "TEST FAILED! Results: {error_list:?}" + ); } /// add nodes diff --git a/testing/tests/gen_key_pair.rs b/testing/tests/gen_key_pair.rs index 5ace645967..d427be8976 100644 --- a/testing/tests/gen_key_pair.rs +++ b/testing/tests/gen_key_pair.rs @@ -30,7 +30,7 @@ mod tests { + "/../../config/ValidatorConfigOutput"; match File::create(filename) { Err(why) => panic!("couldn't create file for output key pairs: {}", why), - Ok(mut file) => match write!(file, "{my_own_validator_config:?}", ) { + Ok(mut file) => match write!(file, "{my_own_validator_config:?}",) { Err(why) => panic!("couldn't generate key pairs and write to the file: {}", why), Ok(()) => println!("successfully wrote to file for output key pairs"), }, diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index b5bd986b3a..80d247e863 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -139,7 +139,10 @@ impl WebServerState { /// Panics if already shut down #[allow(clippy::panic)] pub fn with_shutdown_signal(mut self, shutdown_listener: Option>) -> Self { - assert!(self.shutdown.is_none(), "A shutdown signal is already registered and can not be registered twice"); + assert!( + self.shutdown.is_none(), + "A shutdown signal is already registered and can not be registered twice" + ); self.shutdown = shutdown_listener; self } From af2d62e8c426c8b7c9ba47d22424f46770f46180 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 23:12:08 -0500 Subject: [PATCH 0635/1393] chore: more lints --- hotshot/examples/infra/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 9916004e80..287769a0a9 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -171,6 +171,7 @@ fn webserver_network_from_config( #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_lossless)] +#[allow(clippy::too_many_lines)] /// Create a libp2p network from a config file and public key /// # Panics /// If unable to create bootstrap nodes multiaddres or the libp2p config is invalid From f9cc05b25c464013f46476b488d5f38fe21d7e0d Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 23:18:50 -0500 Subject: [PATCH 0636/1393] chore: more lints --- orchestrator/src/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 653e2fdb03..268dc526fa 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -134,7 +134,7 @@ impl OrchestratorClient { let mut config = self.wait_for_fn_from_orchestrator(f).await; - config.node_index = u64::try_from(node_index).unwrap(); + config.node_index = From::::from(node_index); config } From 6253fe45a55ff4ef162ddfd0bcfa401a904b086c Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 2 Jan 2024 23:31:06 -0500 Subject: [PATCH 0637/1393] chore: fix doc --- hotshot-state-prover/src/circuit.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 1d531da59c..497e0bcf11 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -57,6 +57,7 @@ pub struct LightClientStateVar { vars: [Variable; 7], } +/// public input #[derive(Clone, Debug)] pub struct PublicInput(Vec); @@ -150,26 +151,31 @@ impl LightClientStateVar { }) } + /// TODO doc #[must_use] pub fn view_number(&self) -> Variable { self.vars[0] } + /// TODO doc #[must_use] pub fn block_height(&self) -> Variable { self.vars[1] } + /// TODO doc #[must_use] pub fn block_comm_root(&self) -> Variable { self.vars[2] } + /// TODO doc #[must_use] pub fn fee_ledger_comm(&self) -> Variable { self.vars[3] } + /// TODO doc #[must_use] pub fn stake_table_comm(&self) -> StakeTableCommVar { StakeTableCommVar { From debd9fb5bc84b56210440a08013d755ff55793df Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 4 Jan 2024 16:43:23 -0500 Subject: [PATCH 0638/1393] added feature to randomize leader election --- hotshot/Cargo.toml | 3 ++- hotshot/src/traits/election/static_committee.rs | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 54a99eb52b..94db2456ae 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -17,6 +17,7 @@ bin-orchestrator = ["clap"] docs = [] doc-images = [] hotshot-testing = [] +randomized-leader-election = [] # libp2p [[example]] @@ -130,4 +131,4 @@ blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } serde_json = "1.0.109" toml = { workspace = true } -hotshot-testing = { path = "../testing" } \ No newline at end of file +hotshot-testing = { path = "../testing" } diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index d3bddaf4de..5da007a778 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -10,6 +10,9 @@ use serde::{Deserialize, Serialize}; use std::{marker::PhantomData, num::NonZeroU64}; use tracing::debug; +#[cfg(feature = "randomized-leader-election")] +use rand::{Rng, rngs::StdRng}; + /// Dummy implementation of [`Membership`] #[derive(Clone, Debug, Eq, PartialEq, Hash)] @@ -56,6 +59,7 @@ where self.committee_nodes_with_stake.clone() } + #[cfg(not(feature = "randomized-leader-election"))] /// Index the vector of public keys with the current view number fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { let index = (*view_number % self.nodes_with_stake.len() as u64) as usize; @@ -63,6 +67,16 @@ where TYPES::SignatureKey::get_public_key(&res) } + #[cfg(feature = "randomized-leader-election")] + /// Index the vector of public keys with a random number generated using the current view number as a seed + fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { + let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number as u64); + let randomized_view_number: u64 = rng.gen(); + let index = (randomized_view_number % self.nodes_with_stake.len() as u64) as usize; + let res = self.nodes_with_stake[index].clone(); + TYPES::SignatureKey::get_public_key(&res) + } + fn has_stake(&self, pub_key: &PUBKEY) -> bool { let entry = pub_key.get_stake_table_entry(1u64); self.committee_nodes_with_stake.contains(&entry) From 4eb3df490009dfc7240a2e61de87df09a9734bd5 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 4 Jan 2024 16:43:23 -0500 Subject: [PATCH 0639/1393] added feature to randomize leader election --- testing/tests/network_task.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 20d3b964b0..a0a99635ec 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -17,6 +17,7 @@ use std::{collections::HashMap, marker::PhantomData}; tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_network_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; From 56fc39d2fc3c85969fa642ca420340dd8e8e5564 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 5 Jan 2024 13:22:10 -0500 Subject: [PATCH 0640/1393] re-include network_task test --- testing/tests/network_task.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index a0a99635ec..20d3b964b0 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -17,7 +17,6 @@ use std::{collections::HashMap, marker::PhantomData}; tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] async fn test_network_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; From da7719ae1975708a0b634564b13e36c2957063d5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 03:46:43 +0000 Subject: [PATCH 0641/1393] Bump serde_json from 1.0.109 to 1.0.111 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.109 to 1.0.111. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.109...v1.0.111) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- hotshot/Cargo.toml | 2 +- libp2p-networking/Cargo.toml | 2 +- types/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 94db2456ae..e8588d41b4 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -129,6 +129,6 @@ async-std = { workspace = true } [dev-dependencies] blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } -serde_json = "1.0.109" +serde_json = "1.0.111" toml = { workspace = true } hotshot-testing = { path = "../testing" } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 3b5c2be48b..1b7f90ed06 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -42,7 +42,7 @@ libp2p-noise = { version = "0.44.0", default-features = false } parking_lot = "0.12.1" rand = { workspace = true } serde = { workspace = true } -serde_json = "1.0.109" +serde_json = "1.0.111" snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", diff --git a/types/Cargo.toml b/types/Cargo.toml index 9c25b8fcf9..d50f668d3f 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -51,7 +51,7 @@ tracing = { workspace = true } typenum = { workspace = true } [dev-dependencies] -serde_json = "1.0.109" +serde_json = "1.0.111" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } From 3ee1e7c7a1ab91e1542e1f6c68099509acd9ad59 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Tue, 9 Jan 2024 14:15:37 +0100 Subject: [PATCH 0642/1393] Log progress every 100 views (#2265) --- task-impls/src/consensus.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 184b8fbedf..4cf73556c1 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -375,6 +375,13 @@ impl, A: ConsensusApi + "Updating view from {} to {} in consensus task", *self.cur_view, *new_view ); + + if *self.cur_view / 100 != *new_view / 100 { + // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): + // switch to info! when INFO logs become less cluttered + error!("Progress: entered view {:>6}", *new_view); + } + // cancel the old timeout task if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; From 380637b5ed64d8d29152e47aa4af66e76255b315 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Tue, 9 Jan 2024 14:16:31 +0100 Subject: [PATCH 0643/1393] Remove `timestamp` field from `Leaf` (#2307) --- task-impls/src/consensus.rs | 5 ----- testing/src/task_helpers.rs | 1 - testing/tests/consensus_task.rs | 1 - types/src/data.rs | 11 ----------- types/src/traits/storage.rs | 4 ---- 5 files changed, 22 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 4cf73556c1..b7909dfec2 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -220,7 +220,6 @@ impl, A: ConsensusApi + block_header: proposal.block_header.clone(), block_payload: None, rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_membership.get_leader(view), }; let Ok(vote) = QuorumVote::::create_signed_vote( @@ -307,7 +306,6 @@ impl, A: ConsensusApi + block_header: proposal.block_header.clone(), block_payload: None, rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.quorum_membership.get_leader(view), }; @@ -538,7 +536,6 @@ impl, A: ConsensusApi + block_header: proposal.data.block_header, block_payload: None, rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender, }; @@ -563,7 +560,6 @@ impl, A: ConsensusApi + block_header: proposal.data.block_header, block_payload: None, rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: sender, }; let leaf_commitment = leaf.commit(); @@ -1173,7 +1169,6 @@ impl, A: ConsensusApi + ), block_payload: None, rejected: vec![], - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: self.api.public_key().clone(), }; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index ae349b7523..6c852d6160 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -145,7 +145,6 @@ async fn build_quorum_proposal_and_signature( block_header: block_header.clone(), block_payload: None, rejected: vec![], - timestamp: 0, proposer_id: *api.public_key(), }; let signature = ::sign(private_key, leaf.commit().as_ref()) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 9858dc61e2..851391259b 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -61,7 +61,6 @@ async fn build_vote( block_header: proposal.block_header, block_payload: None, rejected: Vec::new(), - timestamp: 0, proposer_id: membership.get_leader(view), }; let vote = QuorumVote::::create_signed_vote( diff --git a/types/src/data.rs b/types/src/data.rs index 040404b0e4..4af670ed67 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -309,10 +309,6 @@ pub struct Leaf { /// Transactions that were marked for rejection while collecting the block. pub rejected: Vec<::Transaction>, - // TODO (Keyao) Remove. - /// the timestamp the leaf was constructed at, in nanoseconds. Only exposed for dashboard stats - pub timestamp: i128, - /// the proposer id of the leaf pub proposer_id: TYPES::SignatureKey, } @@ -361,7 +357,6 @@ impl Leaf { block_header, block_payload: Some(block_payload), rejected: Vec::new(), - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), } } @@ -435,10 +430,6 @@ impl Leaf { pub fn get_rejected(&self) -> Vec<::Transaction> { self.rejected.clone() } - /// Real-world time when this leaf was created. - pub fn get_timestamp(&self) -> i128 { - self.timestamp - } /// Identity of the network participant who proposed this leaf. pub fn get_proposer_id(&self) -> TYPES::SignatureKey { self.proposer_id.clone() @@ -452,7 +443,6 @@ impl Leaf { block_header: stored_view.block_header, block_payload: stored_view.block_payload, rejected: stored_view.rejected, - timestamp: stored_view.timestamp, proposer_id: stored_view.proposer_id, } } @@ -554,7 +544,6 @@ where block_header: leaf.get_block_header().clone(), block_payload: leaf.get_block_payload(), rejected: leaf.get_rejected(), - timestamp: leaf.get_timestamp(), proposer_id: leaf.get_proposer_id(), } } diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index eca798d35f..4c6e9fe27f 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -132,9 +132,6 @@ pub struct StoredView { pub block_payload: Option, /// transactions rejected in this view pub rejected: Vec, - /// the timestamp this view was recv-ed in nanonseconds - #[derivative(PartialEq = "ignore")] - pub timestamp: i128, /// the proposer id #[derivative(PartialEq = "ignore")] pub proposer_id: TYPES::SignatureKey, @@ -163,7 +160,6 @@ where block_header, block_payload, rejected, - timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), proposer_id, } } From cbd12cf7c4a8ea080b1a02ec377fdbc23184019c Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 9 Jan 2024 11:39:15 -0500 Subject: [PATCH 0644/1393] feat: parameter tweak for async tests --- testing/src/overall_safety_task.rs | 2 ++ testing/tests/unreliable_network.rs | 27 ++++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index f6b6a96c4f..5d703d941e 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -22,6 +22,7 @@ use std::{ collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, }; +use tracing::error; use crate::{test_launcher::TaskGenerator, test_runner::Node}; pub type StateAndBlock = (Vec, Vec); @@ -254,6 +255,7 @@ impl RoundResult { let remaining_nodes = total_num_nodes - (num_decided + num_failed); if check_leaf && self.leaf_map.len() != 1 { + error!("LEAF MAP (that is mismatched) IS: {:?}", self.leaf_map); self.status = ViewStatus::Err(OverallSafetyTaskErr::MismatchedLeaf); return; } diff --git a/testing/tests/unreliable_network.rs b/testing/tests/unreliable_network.rs index 7c2457c65c..b959185abc 100644 --- a/testing/tests/unreliable_network.rs +++ b/testing/tests/unreliable_network.rs @@ -1,3 +1,4 @@ +use hotshot_testing::test_builder::TimingData; use hotshot_types::traits::network::AsynchronousNetwork; use hotshot_types::traits::network::ChaosNetwork; use hotshot_types::traits::network::PartiallySynchronousNetwork; @@ -87,6 +88,7 @@ async fn test_memory_network_sync() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] #[instrument] async fn libp2p_network_async() { async_compatibility_layer::logging::setup_logging(); @@ -94,6 +96,7 @@ async fn libp2p_network_async() { let metadata = TestMetadata { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, + num_failed_views: 50, ..Default::default() }, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( @@ -101,8 +104,13 @@ async fn libp2p_network_async() { duration: Duration::new(240, 0), }, ), + timing_data: TimingData { + timeout_ratio: (1, 1), + next_view_timeout: 1000, + ..TestMetadata::default_multiple_rounds().timing_data + }, unreliable_network: Some(Box::new(AsynchronousNetwork { - keep_numerator: 8, + keep_numerator: 9, keep_denominator: 10, delay_low_ms: 4, delay_high_ms: 30, @@ -122,6 +130,7 @@ async fn libp2p_network_async() { async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) )] +#[ignore] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_async() { use hotshot_testing::{ @@ -134,15 +143,25 @@ async fn test_memory_network_async() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let metadata = TestMetadata { + overall_safety_properties: OverallSafetyPropertiesDescription { + check_leaf: true, + num_failed_views: 5000, + ..Default::default() + }, // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { duration: Duration::from_secs(240), }, ), + timing_data: TimingData { + timeout_ratio: (1, 1), + next_view_timeout: 1000, + ..TestMetadata::default_multiple_rounds().timing_data + }, unreliable_network: Some(Box::new(AsynchronousNetwork { - keep_numerator: 8, - keep_denominator: 10, + keep_numerator: 95, + keep_denominator: 100, delay_low_ms: 4, delay_high_ms: 30, })), @@ -249,6 +268,7 @@ async fn libp2p_network_partially_sync() { async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) )] +#[ignore] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_chaos() { use hotshot_testing::{ @@ -289,6 +309,7 @@ async fn test_memory_network_chaos() { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] #[instrument] async fn libp2p_network_chaos() { async_compatibility_layer::logging::setup_logging(); From 09bab7fabab1dbad01bc33f72f82a1578909de79 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Tue, 9 Jan 2024 11:46:01 -0500 Subject: [PATCH 0645/1393] feat: brendon's suggested changes --- testing/src/view_sync_task.rs | 53 +---------------------------------- 1 file changed, 1 insertion(+), 52 deletions(-) diff --git a/testing/src/view_sync_task.rs b/testing/src/view_sync_task.rs index f3a26ce157..a94fdb7d1c 100644 --- a/testing/src/view_sync_task.rs +++ b/testing/src/view_sync_task.rs @@ -10,10 +10,7 @@ use hotshot_task::{ use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplementation}; use snafu::Snafu; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; +use std::{collections::HashSet, sync::Arc}; use crate::{test_launcher::TaskGenerator, test_runner::Node, GlobalTestEvent}; @@ -58,9 +55,6 @@ pub enum ShouldHitViewSync { pub enum ViewSyncTaskDescription { /// (min, max) number nodes that may hit view sync, inclusive Threshold(usize, usize), - /// node idx -> whether or not the node should hit view sync - /// if node not in map, assumed to be `ShouldHItViewSync::DontCare` - Precise(HashMap), } impl ViewSyncTaskDescription { @@ -90,42 +84,6 @@ impl ViewSyncTaskDescription { ) } } - ViewSyncTaskDescription::Precise(map) => { - for (id, should_hit) in map { - match should_hit { - ShouldHitViewSync::Yes => { - if !state.hit_view_sync.contains(&id) { - return ( - Some(HotShotTaskCompleted::Error( - Box::new(ViewSyncTaskErr { - hit_view_sync: state - .hit_view_sync - .clone(), - }), - )), - state, - ); - } - } - ShouldHitViewSync::No => { - if state.hit_view_sync.contains(&id) { - return ( - Some(HotShotTaskCompleted::Error( - Box::new(ViewSyncTaskErr { - hit_view_sync: state - .hit_view_sync - .clone(), - }), - )), - state, - ); - } - } - ShouldHitViewSync::Ignore => {} - } - } - (Some(HotShotTaskCompleted::ShutDown), state) - } }, } } @@ -189,14 +147,5 @@ impl ViewSyncTaskDescription { } .boxed() }) - - // match self { - // ViewSyncTaskDescription::Threshold(threshold) => { - // - // }, - // ViewSyncTaskDescription::Precise(map) => { - // - // } - // } } } From ee0ea7f57dcd8041b4d8576c06db312545a3e100 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 9 Jan 2024 11:40:38 -0800 Subject: [PATCH 0646/1393] Modify validate function --- hotshot/src/traits/election/static_committee.rs | 2 +- testing/src/state_types.rs | 12 +++++++++--- types/src/traits/state.rs | 12 +++++++++--- 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 5da007a778..60175f338d 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -11,7 +11,7 @@ use std::{marker::PhantomData, num::NonZeroU64}; use tracing::debug; #[cfg(feature = "randomized-leader-election")] -use rand::{Rng, rngs::StdRng}; +use rand::{rngs::StdRng, Rng}; /// Dummy implementation of [`Membership`] diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 5047376415..97652cf751 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -60,7 +60,12 @@ impl State for TestState { type Time = ViewNumber; - fn validate_block(&self, _block_header: &Self::BlockHeader, view_number: &Self::Time) -> bool { + fn validate_block( + &self, + _curr_block_header: &Self::BlockHeader, + _prev_block_header: &Self::BlockHeader, + view_number: &Self::Time, + ) -> bool { if view_number == &ViewNumber::genesis() { &self.view_number == view_number } else { @@ -76,10 +81,11 @@ impl State for TestState { fn append( &self, - block_header: &Self::BlockHeader, + curr_block_header: &Self::BlockHeader, + prev_block_header: &Self::BlockHeader, view_number: &Self::Time, ) -> Result { - if !self.validate_block(block_header, view_number) { + if !self.validate_block(curr_block_header, prev_block_header, view_number) { return Err(BlockError::InvalidBlockHeader); } diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 1b0a84343c..627cbf520e 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -47,8 +47,13 @@ pub trait State: /// Time compatibility needed for reward collection type Time: ConsensusTime; - /// Returns true if and only if the provided block header is valid and can extend this state - fn validate_block(&self, block_header: &Self::BlockHeader, view_number: &Self::Time) -> bool; + /// Returns true if and only if the current block header is valid and can extend this state. + fn validate_block( + &self, + curr_block_header: &Self::BlockHeader, + prev_block_header: &Self::BlockHeader, + view_number: &Self::Time, + ) -> bool; /// Initialize the state. fn initialize() -> Self; @@ -60,7 +65,8 @@ pub trait State: /// Should produce and error if appending this block header would lead to an invalid state fn append( &self, - block_header: &Self::BlockHeader, + curr_block_header: &Self::BlockHeader, + prev_block_header: &Self::BlockHeader, view_number: &Self::Time, ) -> Result; From 151b833513aeccb8263364e19093d7a065041121 Mon Sep 17 00:00:00 2001 From: MRain Date: Tue, 9 Jan 2024 15:49:08 -0500 Subject: [PATCH 0647/1393] remove EncodedPublicKey and use ark-serialize --- hotshot-signature-key/Cargo.toml | 1 + hotshot-signature-key/src/bn254.rs | 2 +- hotshot-signature-key/src/bn254/bn254_pub.rs | 29 ++++++----------- types/src/traits/signature_key.rs | 26 ++++----------- web_server/src/lib.rs | 34 +++++++++----------- 5 files changed, 33 insertions(+), 59 deletions(-) diff --git a/hotshot-signature-key/Cargo.toml b/hotshot-signature-key/Cargo.toml index a98f1b73f7..efbbee9c08 100644 --- a/hotshot-signature-key/Cargo.toml +++ b/hotshot-signature-key/Cargo.toml @@ -7,6 +7,7 @@ edition = { workspace = true } rust-version = { workspace = true } [dependencies] +ark-serialize = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } blake3 = { workspace = true } diff --git a/hotshot-signature-key/src/bn254.rs b/hotshot-signature-key/src/bn254.rs index ea0f395fb0..25acd2daf1 100644 --- a/hotshot-signature-key/src/bn254.rs +++ b/hotshot-signature-key/src/bn254.rs @@ -1,5 +1,5 @@ //! Demonstration implementation of the [`SignatureKey`] trait using BN254 -use hotshot_types::traits::signature_key::{EncodedPublicKey, SignatureKey}; +use hotshot_types::traits::signature_key::SignatureKey; /// `BLSPrivKey` implementation mod bn254_priv; /// `BLSPubKey` implementation diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/hotshot-signature-key/src/bn254/bn254_pub.rs index 3b8183a986..64f1901750 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/hotshot-signature-key/src/bn254/bn254_pub.rs @@ -1,5 +1,4 @@ -use super::{BLSPrivKey, EncodedPublicKey, SignatureKey}; -use bincode::Options; +use super::{BLSPrivKey, SignatureKey}; use bitvec::prelude::*; use blake3::traits::digest::generic_array::GenericArray; use ethereum_types::U256; @@ -7,7 +6,6 @@ use hotshot_qc::bit_vector_old::{ BitVectorQC, QCParams as JFQCParams, StakeTableEntry as JFStakeTableEntry, }; use hotshot_types::traits::qc::QuorumCertificate; -use hotshot_utils::bincode::bincode_opts; use jf_primitives::errors::PrimitivesError; use jf_primitives::signatures::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey}, @@ -15,7 +13,7 @@ use jf_primitives::signatures::{ }; use serde::{Deserialize, Serialize}; use std::{cmp::Ordering, fmt::Debug}; -use tracing::{debug, instrument, warn}; +use tracing::{instrument, warn}; use typenum::U32; /// Public key type for an bn254 [`SignatureKey`] pair @@ -81,23 +79,16 @@ impl SignatureKey for BLSPubKey { Self { pub_key } } - fn to_bytes(&self) -> EncodedPublicKey { - let pub_key_bytes = bincode_opts() - .serialize(&self.pub_key) - .expect("This serialization shouldn't be able to fail"); - EncodedPublicKey(pub_key_bytes) + fn to_bytes(&self) -> Vec { + let mut buf = vec![]; + ark_serialize::CanonicalSerialize::serialize_compressed(&self.pub_key, &mut buf) + .expect("Serialization should not fail."); + buf } - #[instrument] - fn from_bytes(bytes: &EncodedPublicKey) -> Option { - let x: Result = bincode_opts().deserialize(&bytes.0); - match x { - Ok(pub_key) => Some(BLSPubKey { pub_key }), - Err(e) => { - debug!(?e, "Failed to deserialize public key"); - None - } - } + fn from_bytes(bytes: &[u8]) -> Result { + let pub_key: VerKey = ark_serialize::CanonicalDeserialize::deserialize_compressed(bytes)?; + Ok(Self { pub_key }) } fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey) { diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 25ebfdd7d2..d6af846f52 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -1,26 +1,9 @@ //! Minimal compatibility over public key signatures -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bitvec::prelude::*; -use espresso_systems_common::hotshot::tag; use ethereum_types::U256; +use jf_primitives::errors::PrimitivesError; use serde::{Deserialize, Serialize}; use std::{fmt::Debug, hash::Hash}; -use tagged_base64::tagged; - -/// Type saftey wrapper for byte encoded keys -#[tagged(tag::ENCODED_PUB_KEY)] -#[derive( - Clone, - custom_debug::Debug, - Hash, - CanonicalSerialize, - CanonicalDeserialize, - PartialEq, - Eq, - PartialOrd, - Ord, -)] -pub struct EncodedPublicKey(#[debug(with = "custom_debug::hexbuf")] pub Vec); /// Type representing stake table entries in a `StakeTable` pub trait StakeTableEntryType { @@ -107,9 +90,12 @@ pub trait SignatureKey: /// Produce a public key from a private key fn from_private(private_key: &Self::PrivateKey) -> Self; /// Serialize a public key to bytes - fn to_bytes(&self) -> EncodedPublicKey; + fn to_bytes(&self) -> Vec; /// Deserialize a public key from bytes - fn from_bytes(bytes: &EncodedPublicKey) -> Option; + /// # Errors + /// + /// Will return `Err` if deserialization fails + fn from_bytes(bytes: &[u8]) -> Result; /// Generate a new key pair fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey); diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 3babb30670..1e00750e9e 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -6,7 +6,7 @@ use async_lock::RwLock; use clap::Args; use futures::FutureExt; -use hotshot_types::traits::signature_key::{EncodedPublicKey, SignatureKey}; +use hotshot_types::traits::signature_key::SignatureKey; use rand::{distributions::Alphanumeric, rngs::StdRng, thread_rng, Rng, SeedableRng}; use std::{collections::HashMap, io, path::PathBuf}; use tide_disco::{ @@ -605,24 +605,20 @@ impl WebServerDataSource for WebServerState { fn post_staketable(&mut self, key: Vec) -> Result<(), Error> { // KALEY TODO: need security checks here - let new_key = KEY::from_bytes(&(EncodedPublicKey(key))); - if let Some(new_key) = new_key { - let node_index = self.stake_table.len() as u64; - //generate secret for leader's first submission endpoint when key is added - let secret = thread_rng() - .sample_iter(&Alphanumeric) - .take(30) - .map(char::from) - .collect(); - self.proposals.insert(node_index, (secret, Vec::new())); - self.stake_table.push(new_key); - Ok(()) - } else { - Err(ServerError { - status: StatusCode::BadRequest, - message: "Only signature keys can be added to stake table".to_string(), - }) - } + let new_key = KEY::from_bytes(&key).map_err(|_| ServerError { + status: StatusCode::BadRequest, + message: "Only signature keys can be added to stake table".to_string(), + })?; + let node_index = self.stake_table.len() as u64; + //generate secret for leader's first submission endpoint when key is added + let secret = thread_rng() + .sample_iter(&Alphanumeric) + .take(30) + .map(char::from) + .collect(); + self.proposals.insert(node_index, (secret, Vec::new())); + self.stake_table.push(new_key); + Ok(()) } fn post_completed_transaction(&mut self, txn: Vec) -> Result<(), Error> { From da057db698745c59a1085d0a03c1d661727bd389 Mon Sep 17 00:00:00 2001 From: bfish713 Date: Tue, 9 Jan 2024 16:45:34 -0500 Subject: [PATCH 0648/1393] spawn vid in new task --- task-impls/src/vid.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 7607b7af57..6b164a815b 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -1,5 +1,9 @@ use crate::events::HotShotEvent; use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::spawn_blocking; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::spawn_blocking; use hotshot_task::{ event_stream::ChannelStream, global_registry::GlobalRegistry, @@ -90,8 +94,12 @@ impl, A: ConsensusApi + let chunk_size = 1 << num_quorum_committee.ilog2(); // calculate vid shares - let vid = VidScheme::new(chunk_size, num_quorum_committee, &srs).unwrap(); - let vid_disperse = vid.disperse(encoded_transactions.clone()).unwrap(); + let vid_disperse = spawn_blocking(move || { + let vid = VidScheme::new(chunk_size, num_quorum_committee, &srs).unwrap(); + vid.disperse(encoded_transactions.clone()).unwrap() + }) + .await; + // send the commitment and metadata to consensus for block building self.event_stream From 89e2866e6be85e29381f09ec3f44bd92bbc98345 Mon Sep 17 00:00:00 2001 From: bfish713 Date: Tue, 9 Jan 2024 17:01:56 -0500 Subject: [PATCH 0649/1393] fix tokio --- task-impls/src/vid.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 6b164a815b..af6ada909f 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -2,8 +2,6 @@ use crate::events::HotShotEvent; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::spawn_blocking; use hotshot_task::{ event_stream::ChannelStream, global_registry::GlobalRegistry, @@ -26,6 +24,8 @@ use hotshot_types::{ data::{test_srs, VidScheme, VidSchemeTrait}, traits::network::ConsensusIntentEvent, }; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::spawn_blocking; use hotshot_task::event_stream::EventStream; use snafu::Snafu; @@ -96,11 +96,12 @@ impl, A: ConsensusApi + // calculate vid shares let vid_disperse = spawn_blocking(move || { let vid = VidScheme::new(chunk_size, num_quorum_committee, &srs).unwrap(); - vid.disperse(encoded_transactions.clone()).unwrap() + vid.disperse(encoded_transactions.clone()).unwrap() }) .await; - + #[cfg(async_executor_impl = "tokio")] + let vid_disperse = vid_disperse.unwrap(); // send the commitment and metadata to consensus for block building self.event_stream .publish(HotShotEvent::SendPayloadCommitmentAndMetadata( From 9f61a3d98a762035246f170915b611333726460b Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Tue, 9 Jan 2024 17:25:34 -0500 Subject: [PATCH 0650/1393] Pass block metadata by reference instead of value --- hotshot/src/traits/election/static_committee.rs | 2 +- testing/src/block_types.rs | 6 ++++-- types/src/traits/block_contents.rs | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 5da007a778..60175f338d 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -11,7 +11,7 @@ use std::{marker::PhantomData, num::NonZeroU64}; use tracing::debug; #[cfg(feature = "randomized-leader-election")] -use rand::{Rng, rngs::StdRng}; +use rand::{rngs::StdRng, Rng}; /// Dummy implementation of [`Membership`] diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index fe61bb8ccd..ebddb94fa8 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -116,7 +116,7 @@ impl BlockPayload for TestBlockPayload { )) } - fn from_bytes(encoded_transactions: E, _metadata: Self::Metadata) -> Self + fn from_bytes(encoded_transactions: E, _metadata: &Self::Metadata) -> Self where E: Iterator, { @@ -214,7 +214,9 @@ impl BlockHeader for TestBlockHeader { self.payload_commitment } - fn metadata(&self) -> ::Metadata {} + fn metadata(&self) -> &::Metadata { + &() + } } impl Committable for TestBlockHeader { diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 0c05108b00..f3d3769c35 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -56,7 +56,7 @@ pub trait BlockPayload: /// and the associated number of VID storage nodes /// /// `I` may be, but not necessarily is, the `Encode` type directly from `fn encode`. - fn from_bytes(encoded_transactions: I, metadata: Self::Metadata) -> Self + fn from_bytes(encoded_transactions: I, metadata: &Self::Metadata) -> Self where I: Iterator; @@ -118,5 +118,5 @@ pub trait BlockHeader: fn payload_commitment(&self) -> VidCommitment; /// Get the metadata. - fn metadata(&self) -> ::Metadata; + fn metadata(&self) -> &::Metadata; } From baaa05767d4121e1828c6a9e199b43b657cfa12b Mon Sep 17 00:00:00 2001 From: bfish713 Date: Wed, 10 Jan 2024 10:41:09 -0500 Subject: [PATCH 0651/1393] ignore network_task test --- testing/tests/network_task.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 20d3b964b0..a0a99635ec 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -17,6 +17,7 @@ use std::{collections::HashMap, marker::PhantomData}; tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_network_task() { use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; From c8a90dd01a426b4e28b997fc7f21b8eaaa6bce9d Mon Sep 17 00:00:00 2001 From: bfish713 Date: Wed, 10 Jan 2024 13:46:17 -0500 Subject: [PATCH 0652/1393] comment about unwrap --- task-impls/src/vid.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index c1ac1bea58..cabefaaa38 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -101,6 +101,7 @@ impl, A: ConsensusApi + .await; #[cfg(async_executor_impl = "tokio")] + // Unwrap here will just propogate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); // send the commitment and metadata to consensus for block building self.event_stream From 52827e9c6bb6f9fcf8781823df0a641be7bfc0dc Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 10 Jan 2024 12:54:10 -0800 Subject: [PATCH 0653/1393] Add metadata to state and state to leaf --- hotshot/src/lib.rs | 7 +- .../src/traits/election/static_committee.rs | 2 +- hotshot/src/traits/storage/memory_storage.rs | 4 +- task-impls/src/consensus.rs | 91 ++++++++++++------- testing/src/overall_safety_task.rs | 2 +- testing/src/state_types.rs | 4 + testing/src/task_helpers.rs | 12 ++- testing/tests/consensus_task.rs | 6 +- types/src/data.rs | 13 ++- types/src/traits/node_implementation.rs | 6 +- types/src/traits/state.rs | 5 + types/src/traits/storage.rs | 4 + types/src/utils.rs | 6 +- 13 files changed, 116 insertions(+), 46 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index cf81d1b6b3..baaabefe01 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -64,7 +64,7 @@ use hotshot_types::{ signature_key::SignatureKey, state::ConsensusTime, storage::StoredView, - BlockPayload, + BlockPayload, State, }, HotShotConfig, }; @@ -217,6 +217,7 @@ impl> SystemContext { View { view_inner: ViewInner::Leaf { leaf: anchored_leaf.commit(), + metadata: anchored_leaf.get_state().metadata(), }, }, ); @@ -355,13 +356,13 @@ impl> SystemContext { /// # Panics /// /// Panics if internal state for consensus is inconsistent - pub async fn get_state(&self) { + pub async fn get_state(&self) -> TYPES::StateType { self.inner .consensus .read() .await .get_decided_leaf() - .get_state(); + .get_state() } /// Returns a copy of the consensus struct diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 5da007a778..60175f338d 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -11,7 +11,7 @@ use std::{marker::PhantomData, num::NonZeroU64}; use tracing::debug; #[cfg(feature = "randomized-leader-election")] -use rand::{Rng, rngs::StdRng}; +use rand::{rngs::StdRng, Rng}; /// Dummy implementation of [`Membership`] diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index fe672fd558..aa69b886da 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -113,12 +113,13 @@ mod test { use hotshot_testing::{ block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, node_types::TestTypes, + state_types::TestState, }; use hotshot_types::{ data::{fake_commitment, Leaf}, simple_certificate::QuorumCertificate, traits::{ - node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, + node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, State, }, }; use std::marker::PhantomData; @@ -147,6 +148,7 @@ mod test { header, Some(payload), dummy_leaf_commit, + TestState::initialize(), Vec::new(), <::SignatureKey as SignatureKey>::genesis_proposer_pk(), ) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index b7909dfec2..5173051dba 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -31,7 +31,7 @@ use hotshot_types::{ node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, state::ConsensusTime, - BlockPayload, + BlockPayload, State, }, utils::{Terminator, ViewInner}, vote::{Certificate, HasViewNumber}, @@ -212,13 +212,17 @@ impl, A: ConsensusApi + return false; }; let parent_commitment = parent.commit(); - + let Ok(state) = parent.state.append(&proposal.block_header.clone(), &view) else { + error!("Block header doesn't extend the proposal",); + return false; + }; let leaf: Leaf<_> = Leaf { view_number: view, justify_qc: proposal.justify_qc.clone(), parent_commitment, block_header: proposal.block_header.clone(), block_payload: None, + state, rejected: Vec::new(), proposer_id: self.quorum_membership.get_leader(view), }; @@ -298,6 +302,10 @@ impl, A: ConsensusApi + return false; }; let parent_commitment = parent.commit(); + let Ok(state) = parent.state.append(&proposal.block_header.clone(), &view) else { + error!("Block header doesn't extend the proposal",); + return false; + }; let leaf: Leaf<_> = Leaf { view_number: view, @@ -305,6 +313,7 @@ impl, A: ConsensusApi + parent_commitment, block_header: proposal.block_header.clone(), block_payload: None, + state, rejected: Vec::new(), proposer_id: self.quorum_membership.get_leader(view), }; @@ -524,41 +533,52 @@ impl, A: ConsensusApi + // // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { - // If no parent then just update our state map and return. We will not vote. - error!( - "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.get_data().leaf_commit - ); - let leaf = Leaf { - view_number: view, - justify_qc: justify_qc.clone(), - parent_commitment: justify_qc.get_data().leaf_commit, - block_header: proposal.data.block_header, - block_payload: None, - rejected: Vec::new(), - proposer_id: sender, - }; - - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus.state_map.insert( - view, - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - }, - }, - ); - consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + // TODO (Keyao) Can we store the state in this case? + // // If no parent then just update our state map and return. We will not vote. + // error!( + // "Proposal's parent missing from storage with commitment: {:?}", + // justify_qc.get_data().leaf_commit + // ); + // let leaf = Leaf { + // view_number: view, + // justify_qc: justify_qc.clone(), + // parent_commitment: justify_qc.get_data().leaf_commit, + // block_header: proposal.data.block_header, + // state: parent.state.append(proposal.block_header.clone(), view), + // block_payload: None, + // rejected: Vec::new(), + // proposer_id: sender, + // }; + + // let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + // consensus.state_map.insert( + // view, + // View { + // view_inner: ViewInner::Leaf { + // leaf: leaf.commit(), + // metadata: leaf.get_state().metadata(), + // }, + // }, + // ); + // consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); return; }; let parent_commitment = parent.commit(); + let Ok(state) = parent + .state + .append(&proposal.data.block_header.clone(), &view) + else { + error!("Block header doesn't extend the proposal",); + return; + }; let leaf: Leaf<_> = Leaf { view_number: view, justify_qc: justify_qc.clone(), parent_commitment, block_header: proposal.data.block_header, block_payload: None, + state, rejected: Vec::new(), proposer_id: sender, }; @@ -699,6 +719,7 @@ impl, A: ConsensusApi + View { view_inner: ViewInner::Leaf { leaf: leaf.commit(), + metadata: leaf.get_state().metadata(), }, }, ); @@ -1158,16 +1179,22 @@ impl, A: ConsensusApi + } if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { + let block_header = TYPES::BlockHeader::new( + commit_and_metadata.commitment, + commit_and_metadata.metadata.clone(), + &parent_header, + ); + let Ok(state) = parent_leaf.state.append(&block_header.clone(), &view) else { + error!("Block header doesn't extend the proposal",); + return false; + }; let leaf = Leaf { view_number: view, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), - block_header: TYPES::BlockHeader::new( - commit_and_metadata.commitment, - commit_and_metadata.metadata.clone(), - &parent_header, - ), + block_header: block_header.clone(), block_payload: None, + state, rejected: vec![], proposer_id: self.api.public_key().clone(), }; diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index f6b6a96c4f..f41497febd 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -105,7 +105,7 @@ pub struct RoundResult { pub block_map: HashMap, /// state -> # entries decided on that state - pub state_map: HashMap<(), usize>, + pub state_map: HashMap, pub num_txns_map: HashMap, } diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 5047376415..7ba64cbac0 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -60,6 +60,8 @@ impl State for TestState { type Time = ViewNumber; + type Metadata = (); + fn validate_block(&self, _block_header: &Self::BlockHeader, view_number: &Self::Time) -> bool { if view_number == &ViewNumber::genesis() { &self.view_number == view_number @@ -91,6 +93,8 @@ impl State for TestState { } fn on_commit(&self) {} + + fn metadata(&self) -> Self::Metadata {} } impl TestableState for TestState { diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 6c852d6160..9a8713d85f 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -24,7 +24,7 @@ use hotshot_types::{ election::Membership, node_implementation::NodeType, state::{ConsensusTime, TestableBlock}, - BlockPayload, + BlockPayload, State, }, vote::HasViewNumber, }; @@ -137,13 +137,21 @@ async fn build_quorum_proposal_and_signature( .quorum_membership .total_nodes(), ); + let view_number = ViewNumber::new(view); let block_header = TestBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); + let Ok(state) = parent_leaf + .state + .append(&block_header.clone(), &view_number) + else { + panic!("Block header doesn't extend the proposal",); + }; let leaf = Leaf { - view_number: ViewNumber::new(view), + view_number, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), block_payload: None, + state, rejected: vec![], proposer_id: *api.public_key(), }; diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 851391259b..26bf9487e6 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -11,7 +11,7 @@ use hotshot_types::vote::Certificate; use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, - traits::state::ConsensusTime, + traits::{state::ConsensusTime, State}, }; use hotshot_types::{ simple_vote::QuorumData, @@ -53,6 +53,9 @@ async fn build_vote( }; let parent_commitment = parent.commit(); + let Ok(state) = parent.state.append(&proposal.block_header.clone(), &view) else { + panic!("Block header doesn't extend the proposal",); + }; let leaf: Leaf<_> = Leaf { view_number: view, @@ -60,6 +63,7 @@ async fn build_vote( parent_commitment, block_header: proposal.block_header, block_payload: None, + state, rejected: Vec::new(), proposer_id: membership.get_leader(view), }; diff --git a/types/src/data.rs b/types/src/data.rs index 4af670ed67..2aeb86fb79 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -306,6 +306,9 @@ pub struct Leaf { /// It may be empty for nodes not in the DA committee. pub block_payload: Option, + /// State. + pub state: TYPES::StateType, + /// Transactions that were marked for rejection while collecting the block. pub rejected: Vec<::Transaction>, @@ -319,6 +322,7 @@ impl PartialEq for Leaf { && self.justify_qc == other.justify_qc && self.parent_commitment == other.parent_commitment && self.block_header == other.block_header + && self.state == other.state && self.rejected == other.rejected } } @@ -329,6 +333,7 @@ impl Hash for Leaf { self.justify_qc.hash(state); self.parent_commitment.hash(state); self.block_header.hash(state); + self.state.hash(state); self.rejected.hash(state); } } @@ -356,6 +361,7 @@ impl Leaf { parent_commitment: fake_commitment(), block_header, block_payload: Some(block_payload), + state: ::initialize(), rejected: Vec::new(), proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), } @@ -424,8 +430,9 @@ impl Leaf { self.get_block_header().payload_commitment() } /// The blockchain state after appending this leaf. - // The Sequencing Leaf doesn't have a state. - pub fn get_state(&self) {} + pub fn get_state(&self) -> TYPES::StateType { + self.state.clone() + } /// Transactions rejected or invalidated by the application of this leaf. pub fn get_rejected(&self) -> Vec<::Transaction> { self.rejected.clone() @@ -442,6 +449,7 @@ impl Leaf { parent_commitment: stored_view.parent, block_header: stored_view.block_header, block_payload: stored_view.block_payload, + state: stored_view.state, rejected: stored_view.rejected, proposer_id: stored_view.proposer_id, } @@ -543,6 +551,7 @@ where justify_qc: leaf.get_justify_qc(), block_header: leaf.get_block_header().clone(), block_payload: leaf.get_block_payload(), + state: leaf.get_state(), rejected: leaf.get_rejected(), proposer_id: leaf.get_proposer_id(), } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 7948d75bf1..b886684d7d 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -297,7 +297,11 @@ pub trait NodeType: type ElectionConfigType: ElectionConfig; /// The state type that this hotshot setup is using. - type StateType: State; + type StateType: State< + BlockHeader = Self::BlockHeader, + BlockPayload = Self::BlockPayload, + Time = Self::Time, + >; /// Membership used for this implementation type Membership: Membership; diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 1b0a84343c..52ed410dc8 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -46,6 +46,8 @@ pub trait State: type BlockPayload: BlockPayload; /// Time compatibility needed for reward collection type Time: ConsensusTime; + /// Application-specific data. + type Metadata: Debug + Send + Sync; /// Returns true if and only if the provided block header is valid and can extend this state fn validate_block(&self, block_header: &Self::BlockHeader, view_number: &Self::Time) -> bool; @@ -66,6 +68,9 @@ pub trait State: /// Gets called to notify the persistence backend that this state has been committed fn on_commit(&self); + + /// Get the application-specific data. + fn metadata(&self) -> Self::Metadata; } // TODO Seuqnecing here means involving DA in consensus diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 4c6e9fe27f..f189eca84c 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -130,6 +130,8 @@ pub struct StoredView { /// /// It may be empty for nodes not in the DA committee. pub block_payload: Option, + /// State. + pub state: TYPES::StateType, /// transactions rejected in this view pub rejected: Vec, /// the proposer id @@ -150,6 +152,7 @@ where block_header: TYPES::BlockHeader, block_payload: Option, parent_commitment: Commitment>, + state: TYPES::StateType, rejected: Vec<::Transaction>, proposer_id: TYPES::SignatureKey, ) -> Self { @@ -159,6 +162,7 @@ where justify_qc: qc, block_header, block_payload, + state, rejected, proposer_id, } diff --git a/types/src/utils.rs b/types/src/utils.rs index 18e45ffdf1..9e1e4ff733 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -2,7 +2,7 @@ use crate::{ data::{Leaf, VidCommitment}, - traits::node_implementation::NodeType, + traits::{node_implementation::NodeType, State}, }; use commit::Commitment; use std::ops::Deref; @@ -23,6 +23,8 @@ pub enum ViewInner { Leaf { /// Proposed leaf leaf: Commitment>, + /// Application-specific data. + metadata: ::Metadata, }, /// Leaf has failed Failed, @@ -32,7 +34,7 @@ impl ViewInner { /// return the underlying leaf hash if it exists #[must_use] pub fn get_leaf_commitment(&self) -> Option>> { - if let Self::Leaf { leaf } = self { + if let Self::Leaf { leaf, .. } = self { Some(*leaf) } else { None From 00d6f57cfaf3ccb0daf718bd7e1c12d1b2232452 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 10 Jan 2024 13:50:47 -0800 Subject: [PATCH 0654/1393] Rename --- testing/src/state_types.rs | 10 +++++----- types/src/traits/state.rs | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 97652cf751..794e3ed680 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -62,8 +62,8 @@ impl State for TestState { fn validate_block( &self, - _curr_block_header: &Self::BlockHeader, - _prev_block_header: &Self::BlockHeader, + _proposed_header: &Self::BlockHeader, + _parent_header: &Self::BlockHeader, view_number: &Self::Time, ) -> bool { if view_number == &ViewNumber::genesis() { @@ -81,11 +81,11 @@ impl State for TestState { fn append( &self, - curr_block_header: &Self::BlockHeader, - prev_block_header: &Self::BlockHeader, + proposed_header: &Self::BlockHeader, + parent_header: &Self::BlockHeader, view_number: &Self::Time, ) -> Result { - if !self.validate_block(curr_block_header, prev_block_header, view_number) { + if !self.validate_block(proposed_header, parent_header, view_number) { return Err(BlockError::InvalidBlockHeader); } diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 627cbf520e..bf863093df 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -47,11 +47,11 @@ pub trait State: /// Time compatibility needed for reward collection type Time: ConsensusTime; - /// Returns true if and only if the current block header is valid and can extend this state. + /// Returns true if and only if the proposed block header is valid and can extend this state. fn validate_block( &self, - curr_block_header: &Self::BlockHeader, - prev_block_header: &Self::BlockHeader, + proposed_header: &Self::BlockHeader, + parent_header: &Self::BlockHeader, view_number: &Self::Time, ) -> bool; @@ -65,8 +65,8 @@ pub trait State: /// Should produce and error if appending this block header would lead to an invalid state fn append( &self, - curr_block_header: &Self::BlockHeader, - prev_block_header: &Self::BlockHeader, + proposed_header: &Self::BlockHeader, + parent_header: &Self::BlockHeader, view_number: &Self::Time, ) -> Result; From 4d2f5fcbdcd9e69f77d2e946f67076f202c54a9a Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 10 Jan 2024 23:14:35 -0500 Subject: [PATCH 0655/1393] debug --- testing/src/task_helpers.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 8cf841b2db..73fb140590 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -245,9 +245,11 @@ async fn build_quorum_proposal_and_signature( timeout_certificate: None, proposer_id: leaf.proposer_id, }; + + // Only view 2 is tested, higher views are not tested for cur_view in 2..=view { consensus.state_map.insert( - ViewNumber::new(1), + ViewNumber::new(cur_view - 1), View { view_inner: ViewInner::Leaf { leaf: leaf.commit(), @@ -280,7 +282,7 @@ async fn build_quorum_proposal_and_signature( block_payload: None, rejected: vec![], timestamp: 0, - proposer_id: *api.public_key(), + proposer_id: quorum_membership.get_leader(ViewNumber::new(cur_view)), }; let signature_new_view = ::sign(private_key, leaf_new_view.commit().as_ref()) From bb08388354e6edb82d103286e8701613640d07fc Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 10 Jan 2024 23:21:41 -0500 Subject: [PATCH 0656/1393] merge --- testing/src/task_helpers.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 66c6aca068..196225385f 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -280,7 +280,6 @@ async fn build_quorum_proposal_and_signature( block_header: block_header.clone(), block_payload: None, rejected: vec![], - timestamp: 0, proposer_id: quorum_membership.get_leader(ViewNumber::new(cur_view)), }; let signature_new_view = From 406821a75df92a5116c49eff9caa08f4945a9428 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 12 Jan 2024 07:01:18 -0800 Subject: [PATCH 0657/1393] Get state from consensus if parent is missing --- task-impls/src/consensus.rs | 62 ++++++++++++++++++++----------------- types/src/consensus.rs | 13 ++++++-- 2 files changed, 45 insertions(+), 30 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 5173051dba..036b1361ea 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -533,34 +533,40 @@ impl, A: ConsensusApi + // // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { - // TODO (Keyao) Can we store the state in this case? - // // If no parent then just update our state map and return. We will not vote. - // error!( - // "Proposal's parent missing from storage with commitment: {:?}", - // justify_qc.get_data().leaf_commit - // ); - // let leaf = Leaf { - // view_number: view, - // justify_qc: justify_qc.clone(), - // parent_commitment: justify_qc.get_data().leaf_commit, - // block_header: proposal.data.block_header, - // state: parent.state.append(proposal.block_header.clone(), view), - // block_payload: None, - // rejected: Vec::new(), - // proposer_id: sender, - // }; - - // let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - // consensus.state_map.insert( - // view, - // View { - // view_inner: ViewInner::Leaf { - // leaf: leaf.commit(), - // metadata: leaf.get_state().metadata(), - // }, - // }, - // ); - // consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + // If no parent then just update our state map and return. We will not vote. + error!( + "Proposal's parent missing from storage with commitment: {:?}", + justify_qc.get_data().leaf_commit + ); + let Ok(state) = consensus + .get_decided_state() + .append(&proposal.data.block_header.clone(), &view) + else { + error!("Block header doesn't extend the proposal",); + return; + }; + let leaf = Leaf { + view_number: view, + justify_qc: justify_qc.clone(), + parent_commitment: justify_qc.get_data().leaf_commit, + block_header: proposal.data.block_header, + state, + block_payload: None, + rejected: Vec::new(), + proposer_id: sender, + }; + + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + consensus.state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + metadata: leaf.get_state().metadata(), + }, + }, + ); + consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); return; }; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index e86939804e..796c04627e 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -333,9 +333,9 @@ impl Consensus { self.state_map = self.state_map.split_off(&new_anchor_view); } - /// Gets the last decided state + /// Gets the last decided leaf /// # Panics - /// if the last decided view's state does not exist in the state map + /// if the last decided view does not exist in the state map /// this should never happen. #[must_use] pub fn get_decided_leaf(&self) -> Leaf { @@ -346,6 +346,15 @@ impl Consensus { .expect("Decided state not found! Consensus internally inconsistent"); self.saved_leaves.get(&leaf).unwrap().clone() } + + /// Gets the last decided state + /// # Panics + /// if the last decided view does not exist in the state map + /// this should never happen. + #[must_use] + pub fn get_decided_state(&self) -> TYPES::StateType { + self.get_decided_leaf().get_state() + } } /// Mapping from block payload commitments to the encoded transactions. From 6eebaf2508194e2b344478d90cd8a2ef09e25a26 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 12 Jan 2024 07:40:43 -0800 Subject: [PATCH 0658/1393] Combine functions --- testing/src/state_types.rs | 34 ++++++++++++---------------------- types/src/traits/state.rs | 22 ++++++++-------------- 2 files changed, 20 insertions(+), 36 deletions(-) diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 794e3ed680..71e66edb42 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -60,35 +60,19 @@ impl State for TestState { type Time = ViewNumber; - fn validate_block( + fn validate_and_apply_header( &self, _proposed_header: &Self::BlockHeader, _parent_header: &Self::BlockHeader, view_number: &Self::Time, - ) -> bool { - if view_number == &ViewNumber::genesis() { - &self.view_number == view_number - } else { - self.view_number < *view_number - } - } - - fn initialize() -> Self { - let mut state = Self::default(); - state.block_height += 1; - state - } - - fn append( - &self, - proposed_header: &Self::BlockHeader, - parent_header: &Self::BlockHeader, - view_number: &Self::Time, ) -> Result { - if !self.validate_block(proposed_header, parent_header, view_number) { + if view_number == &ViewNumber::genesis() { + if &self.view_number != view_number { + return Err(BlockError::InvalidBlockHeader); + } + } else if self.view_number >= *view_number { return Err(BlockError::InvalidBlockHeader); } - Ok(TestState { block_height: self.block_height + 1, view_number: *view_number, @@ -96,6 +80,12 @@ impl State for TestState { }) } + fn initialize() -> Self { + let mut state = Self::default(); + state.block_height += 1; + state + } + fn on_commit(&self) {} } diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index bf863093df..4b6048d89d 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -47,29 +47,23 @@ pub trait State: /// Time compatibility needed for reward collection type Time: ConsensusTime; - /// Returns true if and only if the proposed block header is valid and can extend this state. - fn validate_block( - &self, - proposed_header: &Self::BlockHeader, - parent_header: &Self::BlockHeader, - view_number: &Self::Time, - ) -> bool; - - /// Initialize the state. - fn initialize() -> Self; - - /// Appends the given block header to this state, returning an new state + /// Check if the proposed block header is valid and apply it to the state if so. + /// + /// Returns the new state. /// /// # Errors /// - /// Should produce and error if appending this block header would lead to an invalid state - fn append( + /// If the block header is invalid or appending it would lead to an invalid state. + fn validate_and_apply_header( &self, proposed_header: &Self::BlockHeader, parent_header: &Self::BlockHeader, view_number: &Self::Time, ) -> Result; + /// Initialize the state. + fn initialize() -> Self; + /// Gets called to notify the persistence backend that this state has been committed fn on_commit(&self); } From c1368321d7c8294a03e09851ed7c89f658439838 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 12 Jan 2024 08:28:24 -0800 Subject: [PATCH 0659/1393] Fix doc --- types/src/traits/state.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 4b6048d89d..4134a2782a 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -21,10 +21,9 @@ use super::block_contents::BlockHeader; /// This trait represents the behaviors that the 'global' ledger state must have: /// * A defined error type ([`Error`](State::Error)) /// * The type of block that modifies this type of state ([`BlockPayload`](State::BlockPayload)) -/// * The ability to validate that a block is actually a valid extension of this state -/// ([`validate_block`](State::validate_block)) -/// * The ability to produce a new state, with the modifications from the block applied -/// ([`append`](State::append)) +/// * The ability to validate that a block header is actually a valid extension of this state and +/// produce a new state, with the modifications from the block applied +/// ([`validate_and_apply_header`](State::validate_and_apply_header)) pub trait State: Serialize + DeserializeOwned From 2ea9f619b1323400a48c25f018becf048a0739be Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 12 Jan 2024 18:32:28 +0100 Subject: [PATCH 0660/1393] [TECH DEBT] Organize dependencies (#2390) * Set proper blake3 features * Remove unused dependencies --- hotshot-qc/Cargo.toml | 4 ---- hotshot-signature-key/Cargo.toml | 2 +- hotshot-stake-table/Cargo.toml | 6 ------ hotshot-state-prover/Cargo.toml | 11 +---------- hotshot/Cargo.toml | 15 +++------------ libp2p-networking/Cargo.toml | 32 -------------------------------- orchestrator/Cargo.toml | 9 +-------- task-impls/Cargo.toml | 7 +------ testing/Cargo.toml | 19 ------------------- types/Cargo.toml | 8 -------- web_server/Cargo.toml | 10 ---------- 11 files changed, 7 insertions(+), 116 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 9c53fb335a..80b306023d 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -8,13 +8,9 @@ rust-version = { workspace = true } [dependencies] ark-bls12-377 = "0.4.0" -ark-bls12-381 = { workspace = true } ark-bn254 = "0.4.0" ark-ec = { workspace = true } ark-ff = "0.4.0" -ark-pallas = "0.4.0" -ark-poly = "0.4.0" -ark-serialize = { workspace = true } ark-std = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } diff --git a/hotshot-signature-key/Cargo.toml b/hotshot-signature-key/Cargo.toml index a98f1b73f7..4bb420c985 100644 --- a/hotshot-signature-key/Cargo.toml +++ b/hotshot-signature-key/Cargo.toml @@ -9,7 +9,7 @@ rust-version = { workspace = true } [dependencies] bincode = { workspace = true } bitvec = { workspace = true } -blake3 = { workspace = true } +blake3 = { workspace = true, features = ["traits-preview"] } custom_debug = { workspace = true } ethereum-types = { workspace = true } hotshot-qc = { path = "../hotshot-qc" } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 4cb53eb2a9..db78fbece7 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -12,19 +12,13 @@ ark-ed-on-bn254 = "0.4.0" ark-ff = "0.4.0" ark-serialize = { workspace = true } ark-std = { workspace = true } -bincode = { workspace = true } -bitvec = { workspace = true } digest = { workspace = true } -displaydoc = { version = "0.2.3", default-features = false } ethereum-types = { workspace = true } -generic-array = { workspace = true } hotshot-types = { path = "../types" } jf-primitives = { workspace = true } -jf-relation = { workspace = true } jf-utils = { workspace = true } serde = { workspace = true, features = ["rc"] } tagged-base64 = { workspace = true } -typenum = { workspace = true } [dev-dependencies] rand_chacha = { workspace = true } diff --git a/hotshot-state-prover/Cargo.toml b/hotshot-state-prover/Cargo.toml index 826b9c0efd..1554b2ed8d 100644 --- a/hotshot-state-prover/Cargo.toml +++ b/hotshot-state-prover/Cargo.toml @@ -11,25 +11,16 @@ ark-bn254 = { workspace = true } ark-ec = { workspace = true } ark-ed-on-bn254 = { workspace = true } ark-ff = { workspace = true } -ark-serialize = { workspace = true } ark-std = { workspace = true } -bincode = { workspace = true } -bitvec = { workspace = true } -digest = { workspace = true } -displaydoc = { version = "0.2.3", default-features = false } ethereum-types = { workspace = true } -generic-array = "0.14.7" hotshot-types = { path = "../types" } jf-plonk = { workspace = true } jf-primitives = { workspace = true } jf-relation = { workspace = true } jf-utils = { workspace = true } -serde = { workspace = true, features = ["rc"] } -tagged-base64 = { workspace = true } -typenum = { workspace = true } hotshot-stake-table = { path = "../hotshot-stake-table" } [features] default = ["parallel"] -std = ["ark-std/std", "ark-serialize/std", "ark-ff/std"] +std = ["ark-std/std", "ark-ff/std"] parallel = ["jf-primitives/parallel", "jf-utils/parallel", "ark-ff/parallel"] diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index e8588d41b4..466aafee2f 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -84,17 +84,13 @@ async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6.3" bincode = { workspace = true } -bitvec = { workspace = true } clap = { version = "4.4", features = ["derive", "env"], optional = true } commit = { workspace = true } hotshot-constants = { path = "../constants" } custom_debug = { workspace = true } dashmap = "5.5.1" -derivative = { version = "2.2.0", optional = true } either = { workspace = true } embed-doc-image = "0.1.4" -espresso-systems-common = { workspace = true } -ethereum-types = { workspace = true } futures = { workspace = true } hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } @@ -103,32 +99,27 @@ hotshot-types = { path = "../types", version = "0.1.0", default-features = false hotshot-utils = { path = "../utils" } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } -jf-primitives = { workspace = true } -libp2p = { workspace = true } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } rand = { workspace = true } -rand_chacha = { workspace = true } serde = { workspace = true, features = ["rc"] } snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } -local-ip-address = "0.5.6" -dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } derive_more = "0.99.17" portpicker = "0.1.1" lru = "0.12.1" tracing = { workspace = true } -typenum = { workspace = true } + [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } [dev-dependencies] -blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } -serde_json = "1.0.111" toml = { workspace = true } +blake3 = { workspace = true } +local-ip-address = "0.5.6" hotshot-testing = { path = "../testing" } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 1b7f90ed06..805c24c2bc 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -11,25 +11,12 @@ authors = ["Espresso Systems "] default = ["webui"] webui = [] -# # this only has effect on linux -# lossy_network = [ -# "nix", -# "netlink-packet-route", -# "netlink-packet-utils", -# "netlink-packet-core", -# "netlink-proto", -# "netlink-sys", -# "netlink-packet-generic", -# "rtnetlink", -# ] - [dependencies] async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } blake3 = { workspace = true } -color-eyre = "0.6.2" custom_debug = { workspace = true } derive_builder = "0.12.0" either = { workspace = true } @@ -38,8 +25,6 @@ hotshot-constants = { path = "../constants" } hotshot-utils = { path = "../utils" } libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } -libp2p-noise = { version = "0.44.0", default-features = false } -parking_lot = "0.12.1" rand = { workspace = true } serde = { workspace = true } serde_json = "1.0.111" @@ -58,20 +43,3 @@ tokio-stream = "0.1.14" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] libp2p = { workspace = true, features = ["async-std"] } async-std = { workspace = true } - - -[target.'cfg(target_os = "linux")'.dependencies] -## lossy_network dependencies -nix = { version = "0.27.1", optional = true } -rtnetlink = { git = "https://github.com/espressosystems/netlink.git", version = "0.9.1", features = [ - "smol_socket", -], default-features = false, optional = true } -netlink-packet-route = { git = "https://github.com/espressosystems/netlink.git", version = "0.11.0", optional = true } -netlink-packet-utils = { git = "https://github.com/espressosystems/netlink.git", version = "0.5.1", optional = true } -netlink-packet-core = { git = "https://github.com/espressosystems/netlink.git", version = "0.4.2", optional = true } -netlink-proto = { git = "https://github.com/espressosystems/netlink.git", version = "0.9.2", optional = true } -netlink-sys = { git = "https://github.com/espressosystems/netlink.git", version = "0.8.2", features = [ - "smol_socket", -], optional = true } -netlink-packet-generic = { git = "https://github.com/espressosystems/netlink.git", version = "0.2.0", optional = true } - diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index f9c4467b7d..7ad22be0d8 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -6,23 +6,16 @@ edition = "2021" [dependencies] async-compatibility-layer = { workspace = true } async-lock = { workspace = true } -async-trait = { workspace = true } -bincode = { workspace = true } clap = { version = "4.0", features = ["derive", "env"], optional = false } futures = { workspace = true } -libp2p-core = { version = "0.40.0", default-features = false } libp2p = { workspace = true } -blake3 = { workspace = true, features = ["traits-preview"] } +blake3 = { workspace = true } hotshot-types = { version = "0.1.0", path = "../types", default-features = false } -hotshot-utils = { path = "../utils" } -hotshot-signature-key = { path = "../hotshot-signature-key" } -libp2p-networking = { workspace = true } tide-disco = { workspace = true } surf-disco = { workspace = true } tracing = { workspace = true } serde = { workspace = true } serde_json = "1.0.96" -snafu = { workspace = true } toml = { workspace = true } thiserror = "1.0.50" diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index f6d9792041..65940bdc42 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -10,20 +10,15 @@ async-compatibility-layer = { workspace = true } async-trait = { workspace = true } either = { workspace = true } futures = { workspace = true } -serde = { workspace = true } snafu = { workspace = true } async-lock = { workspace = true } tracing = { workspace = true } -atomic_enum = "0.2.0" -pin-project = "1.1.3" hotshot-constants = { path = "../constants", default-features = false } hotshot-types = { path = "../types", default-features = false } hotshot-task = { path = "../task", default-features = false } +hotshot-utils = { path = "../utils" } time = { workspace = true } commit = { workspace = true } -jf-primitives = { workspace = true } -rand_chacha = { workspace = true } -hotshot-utils = { path = "../utils" } bincode = { workspace = true } bitvec = { workspace = true } sha2 = { workspace = true } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index f3133e6335..32318f1924 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -11,43 +11,24 @@ default = [] slow-tests = [] [dependencies] -ark-bls12-381 = { workspace = true } -ark-serialize = { workspace = true, features = ["derive"] } -espresso-systems-common = { workspace = true } async-compatibility-layer = { workspace = true } -async-trait = { workspace = true } -# needed for vrf demo -# so non-optional for now -blake3 = { workspace = true, features = ["traits-preview"] } sha3 = "^0.10" commit = { workspace = true } -derivative = "2.2.0" either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", features = [ "hotshot-testing", ], default-features = false } hotshot-types = { path = "../types", default-features = false } -hotshot-utils = { path = "../utils" } -hotshot-constants = { path = "../constants" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-signature-key = { path = "../hotshot-signature-key" } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } -jf-primitives = { workspace = true } rand = { workspace = true } -rand_chacha = { workspace = true } snafu = { workspace = true } tracing = { workspace = true } serde = { workspace = true } -ethereum-types = { workspace = true } -bitvec = { workspace = true } sha2 = { workspace = true } -[dev-dependencies] -async-lock = { workspace = true } -bincode = { workspace = true } # GG any better options for serialization? - [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] diff --git a/types/Cargo.toml b/types/Cargo.toml index d50f668d3f..4b95630243 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -9,7 +9,6 @@ version = "0.1.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -arbitrary = { version = "1.3", features = ["derive"] } ark-bls12-381 = { workspace = true } ark-ed-on-bn254 = { workspace = true } ark-ff = { workspace = true } @@ -30,10 +29,7 @@ dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } either = { workspace = true, features = ["serde"] } espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } -futures = { workspace = true } generic-array = { workspace = true } -hex_fmt = "0.3.0" -hotshot-constants = { path = "../constants" } hotshot-task = { path = "../task", default-features = false } hotshot-utils = { path = "../utils" } jf-plonk = { workspace = true } @@ -48,10 +44,6 @@ snafu = { workspace = true } tagged-base64 = { workspace = true } time = { workspace = true } tracing = { workspace = true } -typenum = { workspace = true } - -[dev-dependencies] -serde_json = "1.0.111" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index c58955d583..7807258e19 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -6,24 +6,14 @@ readme = "README.md" edition = "2021" [dependencies] -ark-bls12-381 = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } -async-trait = { workspace = true } -bincode = { workspace = true } clap = { version = "4.0", features = ["derive", "env"], optional = false } futures = { workspace = true } -libp2p-core = { version = "0.40.0", default-features = false } hotshot-types = { path = "../types", default-features = false } -hotshot-utils = { path = "../utils" } -jf-primitives = { workspace = true } tide-disco = { workspace = true } tracing = { workspace = true } rand = { workspace = true } -serde = { workspace = true } -serde_json = "1.0.96" -snafu = { workspace = true } -tide = { version = "0.16.0", default-features = false } toml = { workspace = true } [dev-dependencies] From 62d2f590bf7a35bd9a35f969e050adb2e3bca5f8 Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 12 Jan 2024 13:35:54 -0500 Subject: [PATCH 0661/1393] sig_key refactor!!! --- hotshot-qc/src/lib.rs | 1 + hotshot-signature-key/Cargo.toml | 24 -- hotshot-signature-key/src/bn254.rs | 8 - hotshot-signature-key/src/bn254/bn254_priv.rs | 69 ---- hotshot-signature-key/src/lib.rs | 5 - hotshot/Cargo.toml | 1 - .../src/traits/election/static_committee.rs | 2 +- hotshot/src/types.rs | 7 +- orchestrator/Cargo.toml | 1 - orchestrator/src/config.rs | 2 +- testing/Cargo.toml | 1 - testing/src/node_types.rs | 15 +- testing/src/task_helpers.rs | 2 +- testing/tests/gen_key_pair.rs | 2 +- testing/tests/memory_network.rs | 4 +- types/src/lib.rs | 3 + types/src/qc.rs | 315 ++++++++++++++++++ .../src/signature_key.rs | 108 +++--- types/src/stake_table.rs | 31 ++ types/src/traits/signature_key.rs | 3 + 20 files changed, 415 insertions(+), 189 deletions(-) delete mode 100644 hotshot-signature-key/Cargo.toml delete mode 100644 hotshot-signature-key/src/bn254.rs delete mode 100644 hotshot-signature-key/src/bn254/bn254_priv.rs delete mode 100644 hotshot-signature-key/src/lib.rs create mode 100644 types/src/qc.rs rename hotshot-signature-key/src/bn254/bn254_pub.rs => types/src/signature_key.rs (54%) create mode 100644 types/src/stake_table.rs diff --git a/hotshot-qc/src/lib.rs b/hotshot-qc/src/lib.rs index 7c56aa6393..23e8b13b6f 100644 --- a/hotshot-qc/src/lib.rs +++ b/hotshot-qc/src/lib.rs @@ -1,4 +1,5 @@ //! This crates offer implementations of quorum certificates used in HotShot. +//! Deprecated crate!!! #![deny(warnings)] #![deny(missing_docs)] diff --git a/hotshot-signature-key/Cargo.toml b/hotshot-signature-key/Cargo.toml deleted file mode 100644 index efbbee9c08..0000000000 --- a/hotshot-signature-key/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "hotshot-signature-key" -description = "Signature instantiations" -version = { workspace = true } -authors = { workspace = true } -edition = { workspace = true } -rust-version = { workspace = true } - -[dependencies] -ark-serialize = { workspace = true } -bincode = { workspace = true } -bitvec = { workspace = true } -blake3 = { workspace = true } -custom_debug = { workspace = true } -ethereum-types = { workspace = true } -hotshot-qc = { path = "../hotshot-qc" } -hotshot-types = { path = "../types" } -hotshot-utils = { path = "../utils" } -jf-primitives = { workspace = true } -rand = { workspace = true } -rand_chacha = { workspace = true } -serde = { workspace = true } -tracing = { workspace = true } -typenum = { workspace = true } diff --git a/hotshot-signature-key/src/bn254.rs b/hotshot-signature-key/src/bn254.rs deleted file mode 100644 index 25acd2daf1..0000000000 --- a/hotshot-signature-key/src/bn254.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! Demonstration implementation of the [`SignatureKey`] trait using BN254 -use hotshot_types::traits::signature_key::SignatureKey; -/// `BLSPrivKey` implementation -mod bn254_priv; -/// `BLSPubKey` implementation -mod bn254_pub; - -pub use self::{bn254_priv::BLSPrivKey, bn254_pub::BLSPubKey}; diff --git a/hotshot-signature-key/src/bn254/bn254_priv.rs b/hotshot-signature-key/src/bn254/bn254_priv.rs deleted file mode 100644 index d69995b737..0000000000 --- a/hotshot-signature-key/src/bn254/bn254_priv.rs +++ /dev/null @@ -1,69 +0,0 @@ -use custom_debug::Debug; -use jf_primitives::signatures::bls_over_bn254::{KeyPair as QCKeyPair, SignKey as QCSignKey}; -use rand::SeedableRng; -use rand_chacha::ChaCha20Rng; -use serde::{Deserialize, Serialize}; -use std::cmp::Ordering; - -/// Private key type for a bn254 keypair -#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug, Hash)] -pub struct BLSPrivKey { - /// The private key for this keypair - pub(super) priv_key: QCSignKey, -} - -impl BLSPrivKey { - /// Generate a new private key from scratch - #[must_use] - pub fn generate() -> Self { - let key_pair = QCKeyPair::generate(&mut rand::thread_rng()); - let priv_key = key_pair.sign_key_ref(); - Self { - priv_key: priv_key.clone(), - } - } - - #[must_use] - /// Get real seed used for random key generation function - pub fn get_seed_from_seed_indexed(seed: [u8; 32], index: u64) -> [u8; 32] { - let mut hasher = blake3::Hasher::new(); - hasher.update(&seed); - hasher.update(&index.to_le_bytes()); - let new_seed = *hasher.finalize().as_bytes(); - new_seed - } - - /// Generate a new private key from a seed - #[must_use] - pub fn generate_from_seed(seed: [u8; 32]) -> Self { - let key_pair = QCKeyPair::generate(&mut ChaCha20Rng::from_seed(seed)); - let priv_key = key_pair.sign_key_ref(); - Self { - priv_key: priv_key.clone(), - } - } - - /// Generate a new private key from a seed and a number - /// - /// Hashes the seed and the number together using blake3. This method is - /// useful for testing - #[must_use] - pub fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> Self { - let new_seed = Self::get_seed_from_seed_indexed(seed, index); - Self::generate_from_seed(new_seed) - } -} - -impl PartialOrd for BLSPrivKey { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for BLSPrivKey { - fn cmp(&self, other: &Self) -> Ordering { - let self_bytes = &self.priv_key.to_string(); - let other_bytes = &other.priv_key.to_string(); - self_bytes.cmp(other_bytes) - } -} diff --git a/hotshot-signature-key/src/lib.rs b/hotshot-signature-key/src/lib.rs deleted file mode 100644 index 2e8f711bab..0000000000 --- a/hotshot-signature-key/src/lib.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! This crates offer implementations of quorum certificates used in HotShot. -#![deny(warnings)] -#![deny(missing_docs)] - -pub mod bn254; diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index e8588d41b4..21449070b8 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -98,7 +98,6 @@ ethereum-types = { workspace = true } futures = { workspace = true } hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-signature-key = { path = "../hotshot-signature-key" } hotshot-types = { path = "../types", version = "0.1.0", default-features = false } hotshot-utils = { path = "../utils" } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 60175f338d..6e95823726 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,5 +1,5 @@ // use ark_bls12_381::Parameters as Param381; -use hotshot_signature_key::bn254::BLSPubKey; +use hotshot_types::signature_key::BLSPubKey; use hotshot_types::traits::{ election::{ElectionConfig, Membership}, node_implementation::NodeType, diff --git a/hotshot/src/types.rs b/hotshot/src/types.rs index 8a9c8b94bf..b018bf1813 100644 --- a/hotshot/src/types.rs +++ b/hotshot/src/types.rs @@ -3,5 +3,8 @@ mod handle; pub use event::{Event, EventType}; pub use handle::SystemContextHandle; -pub use hotshot_signature_key::bn254; -pub use hotshot_types::{message::Message, traits::signature_key::SignatureKey}; +pub use hotshot_types::{ + message::Message, + signature_key::{BLSPrivKey, BLSPubKey}, + traits::signature_key::SignatureKey, +}; diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index f9c4467b7d..944634a71f 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -15,7 +15,6 @@ libp2p = { workspace = true } blake3 = { workspace = true, features = ["traits-preview"] } hotshot-types = { version = "0.1.0", path = "../types", default-features = false } hotshot-utils = { path = "../utils" } -hotshot-signature-key = { path = "../hotshot-signature-key" } libp2p-networking = { workspace = true } tide-disco = { workspace = true } surf-disco = { workspace = true } diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index db28af4ed7..e6f14c423c 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -186,7 +186,7 @@ impl NetworkConfig { /// /// ```ignore /// # use hotshot_orchestrator::config::NetworkConfig; - /// # use hotshot_signature_key::bn254::BLSPubKey; + /// # use hotshot_types::signature_key::BLSPubKey; /// // # use hotshot::traits::election::static_committee::StaticElectionConfig; /// let file = "/path/to/my/config".to_string(); /// // NOTE: broken due to staticelectionconfig not being importable diff --git a/testing/Cargo.toml b/testing/Cargo.toml index f3133e6335..a49db61c4f 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -31,7 +31,6 @@ hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } hotshot-constants = { path = "../constants" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-signature-key = { path = "../hotshot-signature-key" } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } jf-primitives = { workspace = true } diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 1014c434e4..a63207f73d 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -5,19 +5,16 @@ use crate::{ state_types::TestState, }; -use hotshot::{ - traits::{ - election::static_committee::{StaticCommittee, StaticElectionConfig}, - implementations::{ - CombinedCommChannel, Libp2pCommChannel, MemoryCommChannel, MemoryStorage, - WebCommChannel, - }, - NodeImplementation, +use hotshot::traits::{ + election::static_committee::{StaticCommittee, StaticElectionConfig}, + implementations::{ + CombinedCommChannel, Libp2pCommChannel, MemoryCommChannel, MemoryStorage, WebCommChannel, }, - types::bn254::BLSPubKey, + NodeImplementation, }; use hotshot_types::{ data::ViewNumber, + signature_key::BLSPubKey, traits::node_implementation::{ChannelMaps, NodeType}, }; use serde::{Deserialize, Serialize}; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 6c852d6160..b6d0aadb2b 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -7,7 +7,7 @@ use crate::{ }; use commit::Committable; use hotshot::{ - types::{bn254::BLSPubKey, SignatureKey, SystemContextHandle}, + types::{BLSPubKey, SignatureKey, SystemContextHandle}, HotShotConsensusApi, HotShotInitializer, Memberships, Networks, SystemContext, }; use hotshot_task::event_stream::ChannelStream; diff --git a/testing/tests/gen_key_pair.rs b/testing/tests/gen_key_pair.rs index ab7cbcb1aa..f2085616ee 100644 --- a/testing/tests/gen_key_pair.rs +++ b/testing/tests/gen_key_pair.rs @@ -1,7 +1,7 @@ #[cfg(test)] mod tests { use core::panic; - use hotshot::types::{bn254::BLSPubKey, SignatureKey}; + use hotshot::types::{BLSPubKey, SignatureKey}; use hotshot_orchestrator::config::ValidatorConfigFile; use hotshot_types::ValidatorConfig; use std::env; diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 6978b28d6d..f5b277fce6 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -7,13 +7,13 @@ use hotshot::traits::implementations::{ MasterMap, MemoryCommChannel, MemoryNetwork, MemoryStorage, NetworkingMetricsValue, }; use hotshot::traits::NodeImplementation; -use hotshot::types::bn254::{BLSPrivKey, BLSPubKey}; use hotshot::types::SignatureKey; use hotshot_testing::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::TestState, }; use hotshot_types::message::Message; +use hotshot_types::signature_key::BLSPubKey; use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType}; @@ -96,7 +96,7 @@ fn get_pubkey() -> BLSPubKey { // random 32 bytes let mut bytes = [0; 32]; rand::thread_rng().fill_bytes(&mut bytes); - BLSPubKey::from_private(&BLSPrivKey::generate_from_seed(bytes)) + BLSPubKey::generated_from_seed_indexed(bytes, 0).0 } /// create a message diff --git a/types/src/lib.rs b/types/src/lib.rs index 5467fa6b2b..b88c824ad9 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -18,8 +18,11 @@ pub mod error; pub mod event; pub mod light_client; pub mod message; +pub mod qc; +pub mod signature_key; pub mod simple_certificate; pub mod simple_vote; +pub mod stake_table; pub mod traits; pub mod utils; pub mod vote; diff --git a/types/src/qc.rs b/types/src/qc.rs new file mode 100644 index 0000000000..68242ddb82 --- /dev/null +++ b/types/src/qc.rs @@ -0,0 +1,315 @@ +//! Implementation for `BitVectorQC` that uses BLS signature + Bit vector. +//! See more details in hotshot paper. + +use crate::{ + stake_table::StakeTableEntry, + traits::{qc::QuorumCertificate, signature_key::SignatureKey}, +}; +use ark_std::{ + fmt::Debug, + format, + marker::PhantomData, + rand::{CryptoRng, RngCore}, + vec, + vec::Vec, +}; +use bitvec::prelude::*; +use ethereum_types::U256; +use generic_array::GenericArray; +use jf_primitives::{ + errors::{PrimitivesError, PrimitivesError::ParameterError}, + signatures::AggregateableSignatureSchemes, +}; +use serde::{Deserialize, Serialize}; +use typenum::U32; + +/// An implementation of QC using BLS signature and a bit-vector. +#[derive(Serialize, Deserialize)] +pub struct BitVectorQC Deserialize<'a>>( + PhantomData, +); + +/// Public parameters of [`BitVectorQC`] +#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash)] +#[serde(bound(deserialize = ""))] +pub struct QCParams Deserialize<'a>> { + /// the stake table (snapshot) this QC is verified against + pub stake_entries: Vec>, + /// threshold for the accumulated "weight" of votes to form a QC + pub threshold: U256, + /// public parameter for the aggregated signature scheme + pub agg_sig_pp: P, +} + +impl QuorumCertificate for BitVectorQC +where + A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a>, + A::VerificationKey: SignatureKey, +{ + type QCProverParams = QCParams; + + // TODO: later with SNARKs we'll use a smaller verifier parameter + type QCVerifierParams = QCParams; + + type QC = (A::Signature, BitVec); + type MessageLength = U32; + type QuorumSize = U256; + + fn sign( + agg_sig_pp: &A::PublicParameter, + message: &GenericArray, + sk: &A::SigningKey, + prng: &mut R, + ) -> Result { + A::sign(agg_sig_pp, sk, message, prng) + } + + fn assemble( + qc_pp: &Self::QCProverParams, + signers: &BitSlice, + sigs: &[A::Signature], + ) -> Result { + if signers.len() != qc_pp.stake_entries.len() { + return Err(ParameterError(format!( + "bit vector len {} != the number of stake entries {}", + signers.len(), + qc_pp.stake_entries.len(), + ))); + } + let total_weight: U256 = + qc_pp + .stake_entries + .iter() + .zip(signers.iter()) + .fold(U256::zero(), |acc, (entry, b)| { + if *b { + acc + entry.stake_amount + } else { + acc + } + }); + if total_weight < qc_pp.threshold { + return Err(ParameterError(format!( + "total_weight {} less than threshold {}", + total_weight, qc_pp.threshold, + ))); + } + let mut ver_keys = vec![]; + for (entry, b) in qc_pp.stake_entries.iter().zip(signers.iter()) { + if *b { + ver_keys.push(entry.stake_key.clone()); + } + } + if ver_keys.len() != sigs.len() { + return Err(ParameterError(format!( + "the number of ver_keys {} != the number of partial signatures {}", + ver_keys.len(), + sigs.len(), + ))); + } + let sig = A::aggregate(&qc_pp.agg_sig_pp, &ver_keys[..], sigs)?; + + Ok((sig, signers.into())) + } + + fn check( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray, + qc: &Self::QC, + ) -> Result { + let (sig, signers) = qc; + if signers.len() != qc_vp.stake_entries.len() { + return Err(ParameterError(format!( + "signers bit vector len {} != the number of stake entries {}", + signers.len(), + qc_vp.stake_entries.len(), + ))); + } + let total_weight: U256 = + qc_vp + .stake_entries + .iter() + .zip(signers.iter()) + .fold(U256::zero(), |acc, (entry, b)| { + if *b { + acc + entry.stake_amount + } else { + acc + } + }); + if total_weight < qc_vp.threshold { + return Err(ParameterError(format!( + "total_weight {} less than threshold {}", + total_weight, qc_vp.threshold, + ))); + } + let mut ver_keys = vec![]; + for (entry, b) in qc_vp.stake_entries.iter().zip(signers.iter()) { + if *b { + ver_keys.push(entry.stake_key.clone()); + } + } + A::multi_sig_verify(&qc_vp.agg_sig_pp, &ver_keys[..], message, sig)?; + + Ok(total_weight) + } + + fn trace( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray<::MessageUnit, Self::MessageLength>, + qc: &Self::QC, + ) -> Result::VerificationKey>, PrimitivesError> { + let (_sig, signers) = qc; + if signers.len() != qc_vp.stake_entries.len() { + return Err(ParameterError(format!( + "signers bit vector len {} != the number of stake entries {}", + signers.len(), + qc_vp.stake_entries.len(), + ))); + } + + Self::check(qc_vp, message, qc)?; + + let signer_pks: Vec<_> = qc_vp + .stake_entries + .iter() + .zip(signers.iter()) + .filter(|(_, b)| **b) + .map(|(pk, _)| pk.stake_key.clone()) + .collect(); + Ok(signer_pks) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use jf_primitives::signatures::{ + bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, + SignatureScheme, + }; + + macro_rules! test_quorum_certificate { + ($aggsig:tt) => { + let mut rng = jf_utils::test_rng(); + let agg_sig_pp = $aggsig::param_gen(Some(&mut rng)).unwrap(); + let key_pair1 = KeyPair::generate(&mut rng); + let key_pair2 = KeyPair::generate(&mut rng); + let key_pair3 = KeyPair::generate(&mut rng); + let entry1 = StakeTableEntry { + stake_key: key_pair1.ver_key(), + stake_amount: U256::from(3u8), + }; + let entry2 = StakeTableEntry { + stake_key: key_pair2.ver_key(), + stake_amount: U256::from(5u8), + }; + let entry3 = StakeTableEntry { + stake_key: key_pair3.ver_key(), + stake_amount: U256::from(7u8), + }; + let qc_pp = QCParams { + stake_entries: vec![entry1, entry2, entry3], + threshold: U256::from(10u8), + agg_sig_pp, + }; + let msg = [72u8; 32]; + let sig1 = BitVectorQC::<$aggsig>::sign( + &agg_sig_pp, + &msg.into(), + key_pair1.sign_key_ref(), + &mut rng, + ) + .unwrap(); + let sig2 = BitVectorQC::<$aggsig>::sign( + &agg_sig_pp, + &msg.into(), + key_pair2.sign_key_ref(), + &mut rng, + ) + .unwrap(); + let sig3 = BitVectorQC::<$aggsig>::sign( + &agg_sig_pp, + &msg.into(), + key_pair3.sign_key_ref(), + &mut rng, + ) + .unwrap(); + + // happy path + let signers = bitvec![0, 1, 1]; + let qc = BitVectorQC::<$aggsig>::assemble( + &qc_pp, + signers.as_bitslice(), + &[sig2.clone(), sig3.clone()], + ) + .unwrap(); + assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &qc).is_ok()); + assert_eq!( + BitVectorQC::<$aggsig>::trace(&qc_pp, &msg.into(), &qc).unwrap(), + vec![key_pair2.ver_key(), key_pair3.ver_key()], + ); + + // Check the QC and the QCParams can be serialized / deserialized + assert_eq!( + qc, + bincode::deserialize(&bincode::serialize(&qc).unwrap()).unwrap() + ); + + assert_eq!( + qc_pp, + bincode::deserialize(&bincode::serialize(&qc_pp).unwrap()).unwrap() + ); + + // bad paths + // number of signatures unmatch + assert!(BitVectorQC::<$aggsig>::assemble( + &qc_pp, + signers.as_bitslice(), + &[sig2.clone()] + ) + .is_err()); + // total weight under threshold + let active_bad = bitvec![1, 1, 0]; + assert!(BitVectorQC::<$aggsig>::assemble( + &qc_pp, + active_bad.as_bitslice(), + &[sig1.clone(), sig2.clone()] + ) + .is_err()); + // wrong bool vector length + let active_bad_2 = bitvec![0, 1, 1, 0]; + assert!(BitVectorQC::<$aggsig>::assemble( + &qc_pp, + active_bad_2.as_bitslice(), + &[sig2, sig3], + ) + .is_err()); + + assert!(BitVectorQC::<$aggsig>::check( + &qc_pp, + &msg.into(), + &(qc.0.clone(), active_bad) + ) + .is_err()); + assert!(BitVectorQC::<$aggsig>::check( + &qc_pp, + &msg.into(), + &(qc.0.clone(), active_bad_2) + ) + .is_err()); + let bad_msg = [70u8; 32]; + assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); + + let bad_sig = &sig1; + assert!( + BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &(bad_sig.clone(), qc.1)) + .is_err() + ); + }; + } + #[test] + fn test_quorum_certificate() { + test_quorum_certificate!(BLSOverBN254CurveSignatureScheme); + } +} diff --git a/hotshot-signature-key/src/bn254/bn254_pub.rs b/types/src/signature_key.rs similarity index 54% rename from hotshot-signature-key/src/bn254/bn254_pub.rs rename to types/src/signature_key.rs index 64f1901750..f634eebf77 100644 --- a/hotshot-signature-key/src/bn254/bn254_pub.rs +++ b/types/src/signature_key.rs @@ -1,52 +1,37 @@ -use super::{BLSPrivKey, SignatureKey}; -use bitvec::prelude::*; -use blake3::traits::digest::generic_array::GenericArray; -use ethereum_types::U256; -use hotshot_qc::bit_vector_old::{ - BitVectorQC, QCParams as JFQCParams, StakeTableEntry as JFStakeTableEntry, +//! Types and structs for the hotshot signature keys + +use crate::{ + qc::{BitVectorQC, QCParams}, + stake_table::StakeTableEntry, + traits::{qc::QuorumCertificate, signature_key::SignatureKey}, }; -use hotshot_types::traits::qc::QuorumCertificate; -use jf_primitives::errors::PrimitivesError; -use jf_primitives::signatures::{ - bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey}, - SignatureScheme, +use bitvec::{slice::BitSlice, vec::BitVec}; +use ethereum_types::U256; +use generic_array::GenericArray; +use jf_primitives::{ + errors::PrimitivesError, + signatures::{ + bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair, SignKey, VerKey}, + SignatureScheme, + }, }; -use serde::{Deserialize, Serialize}; -use std::{cmp::Ordering, fmt::Debug}; -use tracing::{instrument, warn}; +use rand::SeedableRng; +use rand_chacha::ChaCha20Rng; +use tracing::instrument; use typenum::U32; -/// Public key type for an bn254 [`SignatureKey`] pair -/// -/// This type makes use of noise for non-deterministic signatures. -#[derive(Clone, PartialEq, Eq, Hash, Copy, Serialize, Deserialize, Debug)] - -pub struct BLSPubKey { - /// The public key for this keypair - pub_key: VerKey, -} - -impl PartialOrd for BLSPubKey { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for BLSPubKey { - fn cmp(&self, other: &Self) -> Ordering { - let self_bytes = &self.pub_key.to_string(); - let other_bytes = &other.pub_key.to_string(); - self_bytes.cmp(other_bytes) - } -} +/// BLS private key used to sign a message +pub type BLSPrivKey = SignKey; +/// BLS public key used to verify a signature +pub type BLSPubKey = VerKey; +/// Public parameters for BLS signature scheme +pub type BLSPublicParam = (); impl SignatureKey for BLSPubKey { type PrivateKey = BLSPrivKey; - type StakeTableEntry = JFStakeTableEntry; - type QCParams = JFQCParams< - ::VerificationKey, - ::PublicParameter, - >; + type StakeTableEntry = StakeTableEntry; + type QCParams = + QCParams::PublicParameter>; type PureAssembledSignatureType = ::Signature; type QCType = (Self::PureAssembledSignatureType, BitVec); @@ -54,11 +39,9 @@ impl SignatureKey for BLSPubKey { #[instrument(skip(self))] fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool { - let ver_key = self.pub_key; - // This is the validation for QC partial signature before append(). let generic_msg: &GenericArray = GenericArray::from_slice(data); - BLSOverBN254CurveSignatureScheme::verify(&(), &ver_key, generic_msg, signature).is_ok() + BLSOverBN254CurveSignatureScheme::verify(&(), self, generic_msg, signature).is_ok() } fn sign( @@ -69,51 +52,53 @@ impl SignatureKey for BLSPubKey { BitVectorQC::::sign( &(), generic_msg, - &sk.priv_key, + sk, &mut rand::thread_rng(), ) } fn from_private(private_key: &Self::PrivateKey) -> Self { - let pub_key = VerKey::from(&private_key.priv_key); - Self { pub_key } + BLSPubKey::from(private_key) } fn to_bytes(&self) -> Vec { let mut buf = vec![]; - ark_serialize::CanonicalSerialize::serialize_compressed(&self.pub_key, &mut buf) + ark_serialize::CanonicalSerialize::serialize_compressed(self, &mut buf) .expect("Serialization should not fail."); buf } fn from_bytes(bytes: &[u8]) -> Result { - let pub_key: VerKey = ark_serialize::CanonicalDeserialize::deserialize_compressed(bytes)?; - Ok(Self { pub_key }) + Ok(ark_serialize::CanonicalDeserialize::deserialize_compressed( + bytes, + )?) } fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey) { - let priv_key = Self::PrivateKey::generated_from_seed_indexed(seed, index); - (Self::from_private(&priv_key), priv_key) + let mut hasher = blake3::Hasher::new(); + hasher.update(&seed); + hasher.update(&index.to_le_bytes()); + let new_seed = *hasher.finalize().as_bytes(); + let kp = KeyPair::generate(&mut ChaCha20Rng::from_seed(new_seed)); + (kp.ver_key(), kp.sign_key_ref().clone()) } fn get_stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry { - JFStakeTableEntry { - stake_key: self.pub_key, + StakeTableEntry { + stake_key: *self, stake_amount: U256::from(stake), } } fn get_public_key(entry: &Self::StakeTableEntry) -> Self { - Self { - pub_key: entry.stake_key, - } + entry.stake_key } fn get_public_parameter( stake_entries: Vec, threshold: U256, ) -> Self::QCParams { - JFQCParams { + QCParams { stake_entries, threshold, agg_sig_pp: (), @@ -139,12 +124,9 @@ impl SignatureKey for BLSPubKey { } fn genesis_proposer_pk() -> Self { - use jf_primitives::signatures::bls_over_bn254::KeyPair; use rand::rngs::mock::StepRng; let mut my_rng = StepRng::new(42, 1337); let kp = KeyPair::generate(&mut my_rng); - BLSPubKey { - pub_key: kp.ver_key(), - } + kp.ver_key() } } diff --git a/types/src/stake_table.rs b/types/src/stake_table.rs new file mode 100644 index 0000000000..7c6525e0eb --- /dev/null +++ b/types/src/stake_table.rs @@ -0,0 +1,31 @@ +//! Types and structs related to the stake table + +use crate::traits::signature_key::{SignatureKey, StakeTableEntryType}; +use ethereum_types::U256; +use serde::{Deserialize, Serialize}; + +/// Stake table entry +#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash, Eq)] +#[serde(bound(deserialize = ""))] +pub struct StakeTableEntry { + /// The public key + pub stake_key: K, + /// The associated stake amount + pub stake_amount: U256, +} + +impl StakeTableEntryType for StakeTableEntry { + /// Get the stake amount + fn get_stake(&self) -> U256 { + self.stake_amount + } +} + +impl StakeTableEntry { + /// Get the public key + pub fn get_key(&self) -> &K { + &self.stake_key + } +} + +// TODO(Chengyu): add stake table snapshot here diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index d6af846f52..6f66aab02e 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -4,6 +4,7 @@ use ethereum_types::U256; use jf_primitives::errors::PrimitivesError; use serde::{Deserialize, Serialize}; use std::{fmt::Debug, hash::Hash}; +use tagged_base64::TaggedBase64; /// Type representing stake table entries in a `StakeTable` pub trait StakeTableEntryType { @@ -26,6 +27,8 @@ pub trait SignatureKey: + Eq + PartialOrd + Ord + + TryFrom + + TryInto { /// The private key type for this signature algorithm type PrivateKey: Send From affcc6ad2c2510f00fef7d0cb095885506b08509 Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 12 Jan 2024 13:41:33 -0500 Subject: [PATCH 0662/1393] Rename `QuorumCertificate` to `QuorumCertificateScheme`. --- types/src/qc.rs | 4 ++-- types/src/signature_key.rs | 2 +- types/src/traits/qc.rs | 4 +++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/types/src/qc.rs b/types/src/qc.rs index 68242ddb82..4fbe6d00d7 100644 --- a/types/src/qc.rs +++ b/types/src/qc.rs @@ -3,7 +3,7 @@ use crate::{ stake_table::StakeTableEntry, - traits::{qc::QuorumCertificate, signature_key::SignatureKey}, + traits::{qc::QuorumCertificateScheme, signature_key::SignatureKey}, }; use ark_std::{ fmt::Debug, @@ -41,7 +41,7 @@ pub struct QCParams Deserialize<'a>> { pub agg_sig_pp: P, } -impl QuorumCertificate for BitVectorQC +impl QuorumCertificateScheme for BitVectorQC where A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a>, A::VerificationKey: SignatureKey, diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index f634eebf77..1f47a0e018 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -3,7 +3,7 @@ use crate::{ qc::{BitVectorQC, QCParams}, stake_table::StakeTableEntry, - traits::{qc::QuorumCertificate, signature_key::SignatureKey}, + traits::{qc::QuorumCertificateScheme, signature_key::SignatureKey}, }; use bitvec::{slice::BitSlice, vec::BitVec}; use ethereum_types::U256; diff --git a/types/src/traits/qc.rs b/types/src/traits/qc.rs index 1d3529d5fb..213b51977e 100644 --- a/types/src/traits/qc.rs +++ b/types/src/traits/qc.rs @@ -11,7 +11,9 @@ use jf_primitives::{errors::PrimitivesError, signatures::AggregateableSignatureS use serde::{Deserialize, Serialize}; /// Trait for validating a QC built from different signatures on the same message -pub trait QuorumCertificate Deserialize<'a>> +pub trait QuorumCertificateScheme< + A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a>, +> { /// Public parameters for generating the QC /// E.g: snark proving/verifying keys, list of (or pointer to) public keys stored in the smart contract. From c32e8ba81af2237c27b66999ad46aeb6fb7f4334 Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 12 Jan 2024 13:43:17 -0500 Subject: [PATCH 0663/1393] Merge from main branch --- hotshot-qc/Cargo.toml | 4 ---- hotshot-stake-table/Cargo.toml | 6 ------ hotshot-state-prover/Cargo.toml | 11 +---------- hotshot/Cargo.toml | 15 +++------------ libp2p-networking/Cargo.toml | 32 -------------------------------- orchestrator/Cargo.toml | 8 +------- task-impls/Cargo.toml | 7 +------ testing/Cargo.toml | 18 ------------------ types/Cargo.toml | 8 -------- web_server/Cargo.toml | 10 ---------- 10 files changed, 6 insertions(+), 113 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 9c53fb335a..80b306023d 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -8,13 +8,9 @@ rust-version = { workspace = true } [dependencies] ark-bls12-377 = "0.4.0" -ark-bls12-381 = { workspace = true } ark-bn254 = "0.4.0" ark-ec = { workspace = true } ark-ff = "0.4.0" -ark-pallas = "0.4.0" -ark-poly = "0.4.0" -ark-serialize = { workspace = true } ark-std = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 4cb53eb2a9..db78fbece7 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -12,19 +12,13 @@ ark-ed-on-bn254 = "0.4.0" ark-ff = "0.4.0" ark-serialize = { workspace = true } ark-std = { workspace = true } -bincode = { workspace = true } -bitvec = { workspace = true } digest = { workspace = true } -displaydoc = { version = "0.2.3", default-features = false } ethereum-types = { workspace = true } -generic-array = { workspace = true } hotshot-types = { path = "../types" } jf-primitives = { workspace = true } -jf-relation = { workspace = true } jf-utils = { workspace = true } serde = { workspace = true, features = ["rc"] } tagged-base64 = { workspace = true } -typenum = { workspace = true } [dev-dependencies] rand_chacha = { workspace = true } diff --git a/hotshot-state-prover/Cargo.toml b/hotshot-state-prover/Cargo.toml index 826b9c0efd..1554b2ed8d 100644 --- a/hotshot-state-prover/Cargo.toml +++ b/hotshot-state-prover/Cargo.toml @@ -11,25 +11,16 @@ ark-bn254 = { workspace = true } ark-ec = { workspace = true } ark-ed-on-bn254 = { workspace = true } ark-ff = { workspace = true } -ark-serialize = { workspace = true } ark-std = { workspace = true } -bincode = { workspace = true } -bitvec = { workspace = true } -digest = { workspace = true } -displaydoc = { version = "0.2.3", default-features = false } ethereum-types = { workspace = true } -generic-array = "0.14.7" hotshot-types = { path = "../types" } jf-plonk = { workspace = true } jf-primitives = { workspace = true } jf-relation = { workspace = true } jf-utils = { workspace = true } -serde = { workspace = true, features = ["rc"] } -tagged-base64 = { workspace = true } -typenum = { workspace = true } hotshot-stake-table = { path = "../hotshot-stake-table" } [features] default = ["parallel"] -std = ["ark-std/std", "ark-serialize/std", "ark-ff/std"] +std = ["ark-std/std", "ark-ff/std"] parallel = ["jf-primitives/parallel", "jf-utils/parallel", "ark-ff/parallel"] diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 21449070b8..7772dbbecc 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -84,17 +84,13 @@ async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6.3" bincode = { workspace = true } -bitvec = { workspace = true } clap = { version = "4.4", features = ["derive", "env"], optional = true } commit = { workspace = true } hotshot-constants = { path = "../constants" } custom_debug = { workspace = true } dashmap = "5.5.1" -derivative = { version = "2.2.0", optional = true } either = { workspace = true } embed-doc-image = "0.1.4" -espresso-systems-common = { workspace = true } -ethereum-types = { workspace = true } futures = { workspace = true } hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } @@ -102,32 +98,27 @@ hotshot-types = { path = "../types", version = "0.1.0", default-features = false hotshot-utils = { path = "../utils" } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } -jf-primitives = { workspace = true } -libp2p = { workspace = true } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } rand = { workspace = true } -rand_chacha = { workspace = true } serde = { workspace = true, features = ["rc"] } snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } -local-ip-address = "0.5.6" -dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } derive_more = "0.99.17" portpicker = "0.1.1" lru = "0.12.1" tracing = { workspace = true } -typenum = { workspace = true } + [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } [dev-dependencies] -blake3 = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } -serde_json = "1.0.111" toml = { workspace = true } +blake3 = { workspace = true } +local-ip-address = "0.5.6" hotshot-testing = { path = "../testing" } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 1b7f90ed06..805c24c2bc 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -11,25 +11,12 @@ authors = ["Espresso Systems "] default = ["webui"] webui = [] -# # this only has effect on linux -# lossy_network = [ -# "nix", -# "netlink-packet-route", -# "netlink-packet-utils", -# "netlink-packet-core", -# "netlink-proto", -# "netlink-sys", -# "netlink-packet-generic", -# "rtnetlink", -# ] - [dependencies] async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } blake3 = { workspace = true } -color-eyre = "0.6.2" custom_debug = { workspace = true } derive_builder = "0.12.0" either = { workspace = true } @@ -38,8 +25,6 @@ hotshot-constants = { path = "../constants" } hotshot-utils = { path = "../utils" } libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } -libp2p-noise = { version = "0.44.0", default-features = false } -parking_lot = "0.12.1" rand = { workspace = true } serde = { workspace = true } serde_json = "1.0.111" @@ -58,20 +43,3 @@ tokio-stream = "0.1.14" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] libp2p = { workspace = true, features = ["async-std"] } async-std = { workspace = true } - - -[target.'cfg(target_os = "linux")'.dependencies] -## lossy_network dependencies -nix = { version = "0.27.1", optional = true } -rtnetlink = { git = "https://github.com/espressosystems/netlink.git", version = "0.9.1", features = [ - "smol_socket", -], default-features = false, optional = true } -netlink-packet-route = { git = "https://github.com/espressosystems/netlink.git", version = "0.11.0", optional = true } -netlink-packet-utils = { git = "https://github.com/espressosystems/netlink.git", version = "0.5.1", optional = true } -netlink-packet-core = { git = "https://github.com/espressosystems/netlink.git", version = "0.4.2", optional = true } -netlink-proto = { git = "https://github.com/espressosystems/netlink.git", version = "0.9.2", optional = true } -netlink-sys = { git = "https://github.com/espressosystems/netlink.git", version = "0.8.2", features = [ - "smol_socket", -], optional = true } -netlink-packet-generic = { git = "https://github.com/espressosystems/netlink.git", version = "0.2.0", optional = true } - diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 944634a71f..7ad22be0d8 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -6,22 +6,16 @@ edition = "2021" [dependencies] async-compatibility-layer = { workspace = true } async-lock = { workspace = true } -async-trait = { workspace = true } -bincode = { workspace = true } clap = { version = "4.0", features = ["derive", "env"], optional = false } futures = { workspace = true } -libp2p-core = { version = "0.40.0", default-features = false } libp2p = { workspace = true } -blake3 = { workspace = true, features = ["traits-preview"] } +blake3 = { workspace = true } hotshot-types = { version = "0.1.0", path = "../types", default-features = false } -hotshot-utils = { path = "../utils" } -libp2p-networking = { workspace = true } tide-disco = { workspace = true } surf-disco = { workspace = true } tracing = { workspace = true } serde = { workspace = true } serde_json = "1.0.96" -snafu = { workspace = true } toml = { workspace = true } thiserror = "1.0.50" diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index f6d9792041..65940bdc42 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -10,20 +10,15 @@ async-compatibility-layer = { workspace = true } async-trait = { workspace = true } either = { workspace = true } futures = { workspace = true } -serde = { workspace = true } snafu = { workspace = true } async-lock = { workspace = true } tracing = { workspace = true } -atomic_enum = "0.2.0" -pin-project = "1.1.3" hotshot-constants = { path = "../constants", default-features = false } hotshot-types = { path = "../types", default-features = false } hotshot-task = { path = "../task", default-features = false } +hotshot-utils = { path = "../utils" } time = { workspace = true } commit = { workspace = true } -jf-primitives = { workspace = true } -rand_chacha = { workspace = true } -hotshot-utils = { path = "../utils" } bincode = { workspace = true } bitvec = { workspace = true } sha2 = { workspace = true } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index a49db61c4f..32318f1924 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -11,42 +11,24 @@ default = [] slow-tests = [] [dependencies] -ark-bls12-381 = { workspace = true } -ark-serialize = { workspace = true, features = ["derive"] } -espresso-systems-common = { workspace = true } async-compatibility-layer = { workspace = true } -async-trait = { workspace = true } -# needed for vrf demo -# so non-optional for now -blake3 = { workspace = true, features = ["traits-preview"] } sha3 = "^0.10" commit = { workspace = true } -derivative = "2.2.0" either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", features = [ "hotshot-testing", ], default-features = false } hotshot-types = { path = "../types", default-features = false } -hotshot-utils = { path = "../utils" } -hotshot-constants = { path = "../constants" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } -jf-primitives = { workspace = true } rand = { workspace = true } -rand_chacha = { workspace = true } snafu = { workspace = true } tracing = { workspace = true } serde = { workspace = true } -ethereum-types = { workspace = true } -bitvec = { workspace = true } sha2 = { workspace = true } -[dev-dependencies] -async-lock = { workspace = true } -bincode = { workspace = true } # GG any better options for serialization? - [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] diff --git a/types/Cargo.toml b/types/Cargo.toml index d50f668d3f..4b95630243 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -9,7 +9,6 @@ version = "0.1.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -arbitrary = { version = "1.3", features = ["derive"] } ark-bls12-381 = { workspace = true } ark-ed-on-bn254 = { workspace = true } ark-ff = { workspace = true } @@ -30,10 +29,7 @@ dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } either = { workspace = true, features = ["serde"] } espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } -futures = { workspace = true } generic-array = { workspace = true } -hex_fmt = "0.3.0" -hotshot-constants = { path = "../constants" } hotshot-task = { path = "../task", default-features = false } hotshot-utils = { path = "../utils" } jf-plonk = { workspace = true } @@ -48,10 +44,6 @@ snafu = { workspace = true } tagged-base64 = { workspace = true } time = { workspace = true } tracing = { workspace = true } -typenum = { workspace = true } - -[dev-dependencies] -serde_json = "1.0.111" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index c58955d583..7807258e19 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -6,24 +6,14 @@ readme = "README.md" edition = "2021" [dependencies] -ark-bls12-381 = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } -async-trait = { workspace = true } -bincode = { workspace = true } clap = { version = "4.0", features = ["derive", "env"], optional = false } futures = { workspace = true } -libp2p-core = { version = "0.40.0", default-features = false } hotshot-types = { path = "../types", default-features = false } -hotshot-utils = { path = "../utils" } -jf-primitives = { workspace = true } tide-disco = { workspace = true } tracing = { workspace = true } rand = { workspace = true } -serde = { workspace = true } -serde_json = "1.0.96" -snafu = { workspace = true } -tide = { version = "0.16.0", default-features = false } toml = { workspace = true } [dev-dependencies] From 4e088c64bf4f41d5bc920a442b4c9549e002b2bd Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 12 Jan 2024 13:44:21 -0500 Subject: [PATCH 0664/1393] fix cargo toml --- types/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/types/Cargo.toml b/types/Cargo.toml index 4b95630243..971d1ab721 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -44,6 +44,7 @@ snafu = { workspace = true } tagged-base64 = { workspace = true } time = { workspace = true } tracing = { workspace = true } +typenum = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } From ed35185ed9f047d524531dec4473b5455b3ac213 Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 12 Jan 2024 14:42:00 -0500 Subject: [PATCH 0665/1393] update bound for signaturekey --- types/src/traits/signature_key.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 6f66aab02e..9bbc1a223d 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -3,7 +3,10 @@ use bitvec::prelude::*; use ethereum_types::U256; use jf_primitives::errors::PrimitivesError; use serde::{Deserialize, Serialize}; -use std::{fmt::Debug, hash::Hash}; +use std::{ + fmt::{Debug, Display}, + hash::Hash, +}; use tagged_base64::TaggedBase64; /// Type representing stake table entries in a `StakeTable` @@ -27,8 +30,9 @@ pub trait SignatureKey: + Eq + PartialOrd + Ord - + TryFrom - + TryInto + + Display + + for<'a> TryFrom<&'a TaggedBase64> + + Into { /// The private key type for this signature algorithm type PrivateKey: Send From 960488602303a1d7fbbaba073a40697f31016f69 Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 12 Jan 2024 15:07:59 -0500 Subject: [PATCH 0666/1393] stupid bug --- hotshot-stake-table/src/vec_based/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index 14cf976bd9..9a4e38f1b9 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -24,7 +24,7 @@ impl ToFields for StateVerKey { } impl ToFields for QCVerKey { - const SIZE: usize = 2; + const SIZE: usize = 3; fn to_fields(&self) -> Vec { let bytes = to_bytes!(&self.to_affine()).unwrap(); From fb1c0abd2f2bc218068a508b9b0dc82fad4a03b4 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 12 Jan 2024 12:29:50 -0800 Subject: [PATCH 0667/1393] Replace the last decided leaf with the correct one --- task-impls/src/consensus.rs | 89 ++++++++++++++++++++------------- testing/src/state_types.rs | 2 + testing/src/task_helpers.rs | 9 ++-- testing/tests/consensus_task.rs | 6 ++- types/src/consensus.rs | 33 +++++++----- 5 files changed, 86 insertions(+), 53 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 036b1361ea..b2b0e76de5 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -212,7 +212,11 @@ impl, A: ConsensusApi + return false; }; let parent_commitment = parent.commit(); - let Ok(state) = parent.state.append(&proposal.block_header.clone(), &view) else { + let Ok(state) = parent.state.validate_and_apply_header( + &proposal.block_header.clone(), + &parent.block_header.clone(), + &view, + ) else { error!("Block header doesn't extend the proposal",); return false; }; @@ -302,7 +306,11 @@ impl, A: ConsensusApi + return false; }; let parent_commitment = parent.commit(); - let Ok(state) = parent.state.append(&proposal.block_header.clone(), &view) else { + let Ok(state) = parent.state.validate_and_apply_header( + &proposal.block_header.clone(), + &parent.block_header.clone(), + &view, + ) else { error!("Block header doesn't extend the proposal",); return false; }; @@ -538,43 +546,50 @@ impl, A: ConsensusApi + "Proposal's parent missing from storage with commitment: {:?}", justify_qc.get_data().leaf_commit ); - let Ok(state) = consensus - .get_decided_state() - .append(&proposal.data.block_header.clone(), &view) - else { - error!("Block header doesn't extend the proposal",); - return; - }; - let leaf = Leaf { - view_number: view, - justify_qc: justify_qc.clone(), - parent_commitment: justify_qc.get_data().leaf_commit, - block_header: proposal.data.block_header, - state, - block_payload: None, - rejected: Vec::new(), - proposer_id: sender, - }; - - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus.state_map.insert( - view, - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - metadata: leaf.get_state().metadata(), + // TODO (Keyao) If not, should/how do we update? + if let Ok(parent_leaf) = consensus.get_leaf(view - 1) { + let parent_state = parent_leaf.get_state(); + let Ok(state) = parent_state.validate_and_apply_header( + &proposal.data.block_header.clone(), + &parent_leaf.block_header.clone(), + &view, + ) else { + error!("Block header doesn't extend the proposal",); + return; + }; + let leaf = Leaf { + view_number: view, + justify_qc: justify_qc.clone(), + // TODO (Keyao) Use info from justify QC or the parent leaf? + parent_commitment: justify_qc.get_data().leaf_commit, + block_header: proposal.data.block_header, + state, + block_payload: None, + rejected: Vec::new(), + proposer_id: sender, + }; + + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + consensus.state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + metadata: leaf.get_state().metadata(), + }, }, - }, - ); - consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + ); + consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + } return; }; let parent_commitment = parent.commit(); - let Ok(state) = parent - .state - .append(&proposal.data.block_header.clone(), &view) - else { + let Ok(state) = parent.state.validate_and_apply_header( + &proposal.data.block_header.clone(), + &parent.block_header.clone(), + &view, + ) else { error!("Block header doesn't extend the proposal",); return; }; @@ -1190,7 +1205,11 @@ impl, A: ConsensusApi + commit_and_metadata.metadata.clone(), &parent_header, ); - let Ok(state) = parent_leaf.state.append(&block_header.clone(), &view) else { + let Ok(state) = parent_leaf.state.validate_and_apply_header( + &block_header.clone(), + &parent_leaf.block_header.clone(), + &view, + ) else { error!("Block header doesn't extend the proposal",); return false; }; diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 7d6f8d60d7..078e3f8f65 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -60,6 +60,8 @@ impl State for TestState { type Time = ViewNumber; + type Metadata = (); + fn validate_and_apply_header( &self, _proposed_header: &Self::BlockHeader, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 9a8713d85f..00272e5fc3 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -139,10 +139,11 @@ async fn build_quorum_proposal_and_signature( ); let view_number = ViewNumber::new(view); let block_header = TestBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); - let Ok(state) = parent_leaf - .state - .append(&block_header.clone(), &view_number) - else { + let Ok(state) = parent_leaf.state.validate_and_apply_header( + &block_header.clone(), + &parent_leaf.block_header.clone(), + &view_number, + ) else { panic!("Block header doesn't extend the proposal",); }; let leaf = Leaf { diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 26bf9487e6..38ca3933a5 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -53,7 +53,11 @@ async fn build_vote( }; let parent_commitment = parent.commit(); - let Ok(state) = parent.state.append(&proposal.block_header.clone(), &view) else { + let Ok(state) = parent.state.validate_and_apply_header( + &proposal.block_header.clone(), + &parent.block_header.clone(), + &view, + ) else { panic!("Block header doesn't extend the proposal",); }; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 796c04627e..eca478d53c 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -333,27 +333,34 @@ impl Consensus { self.state_map = self.state_map.split_off(&new_anchor_view); } - /// Gets the last decided leaf + /// Get the leaf corresponding to the given view. + /// + /// # Errors + /// If the view doesn't exist in the saved leaves set, due to the garbage collection for views + /// that are older than the last decided view. + /// /// # Panics - /// if the last decided view does not exist in the state map - /// this should never happen. - #[must_use] - pub fn get_decided_leaf(&self) -> Leaf { - let decided_view_num = self.last_decided_view; - let view = self.state_map.get(&decided_view_num).unwrap(); + /// If the last decided view does not exist in the state map, which should never happen. + pub fn get_leaf(&self, view: TYPES::Time) -> Result, HotShotError> { + let view = self.state_map.get(&view).unwrap(); let leaf = view .get_leaf_commitment() .expect("Decided state not found! Consensus internally inconsistent"); - self.saved_leaves.get(&leaf).unwrap().clone() + Ok(self.saved_leaves.get(&leaf).unwrap().clone()) } - /// Gets the last decided state + /// Get the last decided leaf. + /// /// # Panics - /// if the last decided view does not exist in the state map - /// this should never happen. + /// If the last decided view does not exist in + /// * The saved leaves set, or + /// * The state map, + /// + /// Either of which should never happen. #[must_use] - pub fn get_decided_state(&self) -> TYPES::StateType { - self.get_decided_leaf().get_state() + pub fn get_decided_leaf(&self) -> Leaf { + let last_decided_view = self.last_decided_view; + self.get_leaf(last_decided_view).unwrap() } } From 821d8a4b58effb87fe4e0b2a85ba1474f23a8e62 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Fri, 12 Jan 2024 16:30:30 -0500 Subject: [PATCH 0668/1393] Update jellyfish --- task-impls/src/vid.rs | 7 ++++--- testing/src/block_types.rs | 4 +--- types/src/data.rs | 3 +-- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index cabefaaa38..a4b9338f87 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -122,9 +122,10 @@ impl, A: ConsensusApi + } HotShotEvent::BlockReady(vid_disperse, view_number) => { - let Ok(signature) = - TYPES::SignatureKey::sign(&self.private_key, &vid_disperse.payload_commitment) - else { + let Ok(signature) = TYPES::SignatureKey::sign( + &self.private_key, + vid_disperse.payload_commitment.as_ref().as_ref(), + ) else { error!("VID: failed to sign dispersal payload"); return None; }; diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index ebddb94fa8..ba701cbbc3 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -221,12 +221,10 @@ impl BlockHeader for TestBlockHeader { impl Committable for TestBlockHeader { fn commit(&self) -> Commitment { - let payload_commitment_bytes: [u8; 32] = self.payload_commitment().into(); - RawCommitmentBuilder::new("Header Comm") .u64_field("block number", self.block_number()) .constant_str("payload commitment") - .fixed_size_bytes(&payload_commitment_bytes) + .fixed_size_bytes(self.payload_commitment().as_ref().as_ref()) .finalize() } diff --git a/types/src/data.rs b/types/src/data.rs index 4af670ed67..0c3994d4c9 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -504,7 +504,6 @@ pub fn serialize_signature2( impl Committable for Leaf { fn commit(&self) -> commit::Commitment { - let payload_commitment_bytes: [u8; 32] = self.get_payload_commitment().into(); let signatures_bytes = if self.justify_qc.is_genesis { let mut bytes = vec![]; bytes.extend("genesis".as_bytes()); @@ -519,7 +518,7 @@ impl Committable for Leaf { .u64_field("block number", self.get_height()) .field("parent Leaf commitment", self.parent_commitment) .constant_str("block payload commitment") - .fixed_size_bytes(&payload_commitment_bytes) + .fixed_size_bytes(self.get_payload_commitment().as_ref().as_ref()) .constant_str("justify_qc view number") .u64(*self.justify_qc.view_number) .field( From 20ea3f0083714c63ebb878c008792861d6891076 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 12 Jan 2024 17:14:09 -0500 Subject: [PATCH 0669/1393] more comments added --- testing/src/task_helpers.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 196225385f..da473389f2 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -225,6 +225,7 @@ async fn build_quorum_proposal_and_signature( .total_nodes(), ); let block_header = TestBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); + // current leaf that can be re-assigned everytime when entering a new view let mut leaf = Leaf { view_number: ViewNumber::new(1), justify_qc: consensus.high_qc.clone(), @@ -247,6 +248,7 @@ async fn build_quorum_proposal_and_signature( // Only view 2 is tested, higher views are not tested for cur_view in 2..=view { + // save states for the previous view to pass all the qc checks consensus.state_map.insert( ViewNumber::new(cur_view - 1), View { @@ -256,6 +258,7 @@ async fn build_quorum_proposal_and_signature( }, ); consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + // create a qc by aggregate signatures on the previous view (the data signed is last leaf commitment) let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); let quorum_data = QuorumData { leaf_commit: leaf.commit(), @@ -272,6 +275,7 @@ async fn build_quorum_proposal_and_signature( public_key, private_key, ); + // create a new leaf for the current view let parent_leaf = leaf.clone(); let leaf_new_view = Leaf { view_number: ViewNumber::new(cur_view), From 22ab2eaf87c14396239395ab297bc1a51a592cfb Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Sun, 14 Jan 2024 15:57:44 -0500 Subject: [PATCH 0670/1393] add metadata arg to BlockPayload::transaction_commitments (#2399) --- hotshot/src/lib.rs | 8 +++++++- hotshot/src/types/handle.rs | 14 +++++++++----- task-impls/src/consensus.rs | 6 ++++-- task-impls/src/transactions.rs | 7 +++++-- testing/src/block_types.rs | 2 +- types/src/traits/block_contents.rs | 2 +- 6 files changed, 27 insertions(+), 12 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index cf81d1b6b3..f85af247a5 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -60,7 +60,7 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, network::{CommunicationChannel, NetworkError}, - node_implementation::{ChannelMaps, NodeType, SendToTasks}, + node_implementation::{NodeType, SendToTasks}, signature_key::SignatureKey, state::ConsensusTime, storage::StoredView, @@ -78,6 +78,10 @@ use std::{ }; use tasks::add_vid_task; use tracing::{debug, error, info, instrument, trace, warn}; + +#[cfg(feature = "hotshot-testing")] +use hotshot_types::traits::node_implementation::ChannelMaps; + // -- Rexports // External /// Reexport rand crate @@ -162,6 +166,7 @@ pub struct SystemContextInner> { /// Channels for sending/recv-ing proposals and votes for quorum and committee exchanges, the /// latter of which is only applicable for sequencing consensus. + #[cfg(feature = "hotshot-testing")] channel_maps: (ChannelMaps, Option>), // global_registry: GlobalRegistry, @@ -256,6 +261,7 @@ impl> SystemContext { let inner: Arc> = Arc::new(SystemContextInner { id: nonce, + #[cfg(feature = "hotshot-testing")] channel_maps: I::new_channel_maps(start_view), consensus, public_key, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 0d52a18842..2040061489 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -16,17 +16,21 @@ use hotshot_task_impls::events::HotShotEvent; use hotshot_types::simple_vote::QuorumData; use hotshot_types::{ consensus::Consensus, + data::Leaf, error::HotShotError, event::EventType, - message::{MessageKind, SequencingMessage}, - traits::{ - election::Membership, node_implementation::NodeType, state::ConsensusTime, storage::Storage, - }, + simple_certificate::QuorumCertificate, + traits::{node_implementation::NodeType, state::ConsensusTime, storage::Storage}, }; -use hotshot_types::{data::Leaf, simple_certificate::QuorumCertificate}; use std::sync::Arc; use tracing::error; +#[cfg(feature = "hotshot-testing")] +use hotshot_types::{ + message::{MessageKind, SequencingMessage}, + traits::election::Membership, +}; + /// Event streaming handle for a [`SystemContext`] instance running in the background /// /// This type provides the means to message and interact with a background [`SystemContext`] instance, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index b7909dfec2..8d190e16ff 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -664,8 +664,10 @@ impl, A: ConsensusApi + } leaf_views.push(leaf.clone()); - if let Some(payload) = leaf.block_payload { - for txn in payload.transaction_commitments() { + if let Some(ref payload) = leaf.block_payload { + for txn in payload + .transaction_commitments(leaf.get_block_header().metadata()) + { included_txns.insert(txn); } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 7b6df7300a..e162a6c26f 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -16,6 +16,7 @@ use hotshot_types::{ consensus::Consensus, data::Leaf, traits::{ + block_contents::BlockHeader, consensus_api::ConsensusApi, election::Membership, node_implementation::{NodeImplementation, NodeType}, @@ -121,8 +122,10 @@ impl, A: ConsensusApi + let mut included_txn_size = 0; let mut included_txn_count = 0; for leaf in leaf_chain { - if let Some(payload) = leaf.block_payload { - for txn in payload.transaction_commitments() { + if let Some(ref payload) = leaf.block_payload { + for txn in + payload.transaction_commitments(leaf.get_block_header().metadata()) + { included_txns.insert(txn); } } diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index ba701cbbc3..20572510a1 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -149,7 +149,7 @@ impl BlockPayload for TestBlockPayload { Ok(TestTransaction::encode(self.transactions.clone())?.into_iter()) } - fn transaction_commitments(&self) -> Vec> { + fn transaction_commitments(&self, _metadata: &Self::Metadata) -> Vec> { self.transactions .iter() .map(commit::Committable::commit) diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index f3d3769c35..5b8bf39600 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -70,7 +70,7 @@ pub trait BlockPayload: fn encode(&self) -> Result, Self::Error>; /// List of transaction commitments. - fn transaction_commitments(&self) -> Vec>; + fn transaction_commitments(&self, metadata: &Self::Metadata) -> Vec>; } /// Compute the VID payload commitment. From f467157eb9c245c3e5228b1c7054fba234cb597a Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 15 Jan 2024 06:19:10 -0800 Subject: [PATCH 0671/1393] Remove fn initialize --- testing/src/state_types.rs | 6 ------ types/src/traits/state.rs | 3 --- 2 files changed, 9 deletions(-) diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 71e66edb42..222e2f6672 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -80,12 +80,6 @@ impl State for TestState { }) } - fn initialize() -> Self { - let mut state = Self::default(); - state.block_height += 1; - state - } - fn on_commit(&self) {} } diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 4134a2782a..5274c5abba 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -60,9 +60,6 @@ pub trait State: view_number: &Self::Time, ) -> Result; - /// Initialize the state. - fn initialize() -> Self; - /// Gets called to notify the persistence backend that this state has been committed fn on_commit(&self); } From daf05256f40049ddc1d6f8bc4162c5f09e6900ed Mon Sep 17 00:00:00 2001 From: MRain Date: Mon, 15 Jan 2024 09:59:58 -0500 Subject: [PATCH 0672/1393] update jellyfish --- testing/src/block_types.rs | 5 ++++- types/src/signature_key.rs | 4 +--- types/src/traits/block_contents.rs | 5 ++++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index 20572510a1..a395de232f 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -149,7 +149,10 @@ impl BlockPayload for TestBlockPayload { Ok(TestTransaction::encode(self.transactions.clone())?.into_iter()) } - fn transaction_commitments(&self, _metadata: &Self::Metadata) -> Vec> { + fn transaction_commitments( + &self, + _metadata: &Self::Metadata, + ) -> Vec> { self.transactions .iter() .map(commit::Committable::commit) diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index 1f47a0e018..30ee459a7e 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -124,9 +124,7 @@ impl SignatureKey for BLSPubKey { } fn genesis_proposer_pk() -> Self { - use rand::rngs::mock::StepRng; - let mut my_rng = StepRng::new(42, 1337); - let kp = KeyPair::generate(&mut my_rng); + let kp = KeyPair::generate(&mut ChaCha20Rng::from_seed([0u8; 32])); kp.ver_key() } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 5b8bf39600..9bac9b6d7f 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -70,7 +70,10 @@ pub trait BlockPayload: fn encode(&self) -> Result, Self::Error>; /// List of transaction commitments. - fn transaction_commitments(&self, metadata: &Self::Metadata) -> Vec>; + fn transaction_commitments( + &self, + metadata: &Self::Metadata, + ) -> Vec>; } /// Compute the VID payload commitment. From 858abdffe926309b3e32edb689d5e21f2d2510b8 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 15 Jan 2024 12:43:56 -0800 Subject: [PATCH 0673/1393] Modify initialize function --- hotshot/src/traits/storage/memory_storage.rs | 4 ++-- task-impls/src/consensus.rs | 1 - testing/src/block_types.rs | 5 ++++- testing/src/state_types.rs | 7 +++++++ types/src/data.rs | 4 ++-- types/src/traits/block_contents.rs | 5 ++++- types/src/traits/state.rs | 5 +++++ 7 files changed, 24 insertions(+), 7 deletions(-) diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index aa69b886da..39f75426d3 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -145,10 +145,10 @@ mod test { view_number, _pd: PhantomData, }, - header, + header.clone(), Some(payload), dummy_leaf_commit, - TestState::initialize(), + TestState::initialize(&header), Vec::new(), <::SignatureKey as SignatureKey>::genesis_proposer_pk(), ) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e603704fdb..a145438f60 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -546,7 +546,6 @@ impl, A: ConsensusApi + "Proposal's parent missing from storage with commitment: {:?}", justify_qc.get_data().leaf_commit ); - // TODO (Keyao) If not, should/how do we update? if let Ok(parent_leaf) = consensus.get_leaf(view - 1) { let parent_state = parent_leaf.get_state(); let Ok(state) = parent_state.validate_and_apply_header( diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index 20572510a1..a395de232f 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -149,7 +149,10 @@ impl BlockPayload for TestBlockPayload { Ok(TestTransaction::encode(self.transactions.clone())?.into_iter()) } - fn transaction_commitments(&self, _metadata: &Self::Metadata) -> Vec> { + fn transaction_commitments( + &self, + _metadata: &Self::Metadata, + ) -> Vec> { self.transactions .iter() .map(commit::Committable::commit) diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index a4775fe9c7..e146ffbce4 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -82,6 +82,13 @@ impl State for TestState { }) } + fn initialize(block_header: &Self::BlockHeader) -> Self { + Self { + block_height: block_header.block_number, + ..Default::default() + } + } + fn on_commit(&self) {} fn metadata(&self) -> Self::Metadata {} diff --git a/types/src/data.rs b/types/src/data.rs index 4f5575b3e1..c24ecc4670 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -359,9 +359,9 @@ impl Leaf { view_number: TYPES::Time::genesis(), justify_qc: QuorumCertificate::::genesis(), parent_commitment: fake_commitment(), - block_header, + block_header: block_header.clone(), block_payload: Some(block_payload), - state: ::initialize(), + state: ::initialize(&block_header), rejected: Vec::new(), proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 5b8bf39600..9bac9b6d7f 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -70,7 +70,10 @@ pub trait BlockPayload: fn encode(&self) -> Result, Self::Error>; /// List of transaction commitments. - fn transaction_commitments(&self, metadata: &Self::Metadata) -> Vec>; + fn transaction_commitments( + &self, + metadata: &Self::Metadata, + ) -> Vec>; } /// Compute the VID payload commitment. diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 79ab6f6132..e55971a455 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -62,6 +62,11 @@ pub trait State: view_number: &Self::Time, ) -> Result; + /// Initialize the state with the given block header. + /// + /// This can also be used to reinitialize the state for catchup. + fn initialize(block_header: &Self::BlockHeader) -> Self; + /// Gets called to notify the persistence backend that this state has been committed fn on_commit(&self); From 1960d3e72ddb7d7747e308ebfcce1e01e2605342 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 15 Jan 2024 13:29:26 -0800 Subject: [PATCH 0674/1393] Add InstanceState trait and impl, add it to NodeTypes, rename State and state. --- hotshot/examples/infra/mod.rs | 15 ++++--- hotshot/src/lib.rs | 6 +-- hotshot/src/tasks/mod.rs | 2 +- hotshot/src/traits.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 2 +- hotshot/src/traits/storage/memory_storage.rs | 7 +-- hotshot/src/types/handle.rs | 2 +- task-impls/src/consensus.rs | 4 +- task-impls/src/da.rs | 2 +- task-impls/src/view_sync.rs | 2 +- testing/src/block_types.rs | 3 +- testing/src/node_types.rs | 5 ++- testing/src/overall_safety_task.rs | 2 +- testing/src/state_types.rs | 20 +++++---- testing/src/task_helpers.rs | 7 ++- testing/src/test_runner.rs | 2 +- testing/tests/atomic_storage.rs | 6 +-- testing/tests/consensus_task.rs | 2 +- testing/tests/da_task.rs | 2 +- testing/tests/memory_network.rs | 8 ++-- testing/tests/network_task.rs | 2 +- testing/tests/vid_task.rs | 2 +- testing/tests/view_sync_task.rs | 2 +- types/src/data.rs | 24 +++++----- types/src/simple_certificate.rs | 2 +- types/src/traits.rs | 4 +- types/src/traits/block_contents.rs | 9 ++++ types/src/traits/node_implementation.rs | 25 ++++++----- types/src/traits/{state.rs => states.rs} | 44 ++++++++----------- types/src/traits/storage.rs | 4 +- types/src/utils.rs | 4 +- 31 files changed, 118 insertions(+), 105 deletions(-) rename types/src/traits/{state.rs => states.rs} (89%) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 4d58de6f57..d2f79e4ac6 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -32,10 +32,11 @@ use hotshot_types::{ data::{Leaf, TestableLeaf}, event::{Event, EventType}, traits::{ + block_contents::TestableBlock, election::Membership, network::CommunicationChannel, node_implementation::NodeType, - state::{ConsensusTime, TestableBlock, TestableState}, + states::{ConsensusTime, TestableState}, }, HotShotConfig, }; @@ -289,7 +290,7 @@ pub trait RunDA< Storage = MemoryStorage, >, > where - ::StateType: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, TYPES: NodeType, Leaf: TestableLeaf, @@ -512,7 +513,7 @@ impl< NODE, > for WebServerDARun where - ::StateType: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -614,7 +615,7 @@ impl< NODE, > for Libp2pDARun where - ::StateType: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -707,7 +708,7 @@ impl< NODE, > for CombinedDARun where - ::StateType: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -816,7 +817,7 @@ pub async fn main_entry_point< >( args: ValidatorArgs, ) where - ::StateType: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, { @@ -871,7 +872,7 @@ pub async fn main_entry_point< for round in 0..rounds { for _ in 0..transactions_to_send_per_round { - let mut txn = ::create_random_transaction( + let mut txn = ::create_random_transaction( None, &mut txn_rng, transaction_size as u64, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 61b6f14cd0..bfb9a9bdda 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -62,9 +62,9 @@ use hotshot_types::{ network::{CommunicationChannel, NetworkError}, node_implementation::{NodeType, SendToTasks}, signature_key::SignatureKey, - state::ConsensusTime, + states::{ConsensusTime, ValidatedState}, storage::StoredView, - BlockPayload, State, + BlockPayload, }, HotShotConfig, }; @@ -362,7 +362,7 @@ impl> SystemContext { /// # Panics /// /// Panics if internal state for consensus is inconsistent - pub async fn get_state(&self) -> TYPES::StateType { + pub async fn get_state(&self) -> TYPES::ValidatedState { self.inner .consensus .read() diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 563afb53cd..918b9c8ce0 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -34,7 +34,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{NodeImplementation, NodeType}, - state::ConsensusTime, + states::ConsensusTime, BlockPayload, }, }; diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 9684bd61c7..203fa39705 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -4,7 +4,7 @@ mod networking; mod node_implementation; mod storage; -pub use hotshot_types::traits::{BlockPayload, State}; +pub use hotshot_types::traits::{BlockPayload, ValidatedState}; pub use networking::{NetworkError, NetworkReliability}; pub use node_implementation::{NodeImplementation, TestableNodeImplementation}; pub use storage::{Result as StorageResult, Storage}; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index caf2d72d3e..24e6c3d11a 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -24,7 +24,7 @@ use hotshot_types::{ }, node_implementation::NodeType, signature_key::SignatureKey, - state::ConsensusTime, + states::ConsensusTime, }, }; use hotshot_utils::bincode::bincode_opts; diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 39f75426d3..a09beecd4c 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -113,13 +113,14 @@ mod test { use hotshot_testing::{ block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, node_types::TestTypes, - state_types::TestState, + state_types::TestValidatedState, }; use hotshot_types::{ data::{fake_commitment, Leaf}, simple_certificate::QuorumCertificate, traits::{ - node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, State, + node_implementation::NodeType, signature_key::SignatureKey, states::ConsensusTime, + ValidatedState, }, }; use std::marker::PhantomData; @@ -148,7 +149,7 @@ mod test { header.clone(), Some(payload), dummy_leaf_commit, - TestState::initialize(&header), + TestValidatedState::initialize(&header), Vec::new(), <::SignatureKey as SignatureKey>::genesis_proposer_pk(), ) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 2040061489..31e2adab73 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -20,7 +20,7 @@ use hotshot_types::{ error::HotShotError, event::EventType, simple_certificate::QuorumCertificate, - traits::{node_implementation::NodeType, state::ConsensusTime, storage::Storage}, + traits::{node_implementation::NodeType, states::ConsensusTime, storage::Storage}, }; use std::sync::Arc; use tracing::error; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a145438f60..3a9e5aee9f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -30,8 +30,8 @@ use hotshot_types::{ network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, - state::ConsensusTime, - BlockPayload, State, + states::{ConsensusTime, ValidatedState}, + BlockPayload, }, utils::{Terminator, ViewInner}, vote::{Certificate, HasViewNumber}, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 0e5bb160c1..3c523ba59a 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -23,7 +23,7 @@ use hotshot_types::{ network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, - state::ConsensusTime, + states::ConsensusTime, }, utils::ViewInner, vote::HasViewNumber, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 1c9e607fb0..3626e496d1 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -37,7 +37,7 @@ use hotshot_types::{ election::Membership, network::CommunicationChannel, node_implementation::{NodeImplementation, NodeType}, - state::ConsensusTime, + states::ConsensusTime, }, }; use snafu::Snafu; diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index a395de232f..19597d336c 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -7,8 +7,7 @@ use commit::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ data::{BlockError, VidCommitment, VidScheme, VidSchemeTrait}, traits::{ - block_contents::{vid_commitment, BlockHeader, Transaction}, - state::TestableBlock, + block_contents::{vid_commitment, BlockHeader, TestableBlock, Transaction}, BlockPayload, }, }; diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index a63207f73d..c000f7060f 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -2,7 +2,7 @@ use hotshot::traits::election::static_committee::GeneralStaticCommittee; use crate::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::TestState, + state_types::{TestInstanceState, TestValidatedState}, }; use hotshot::traits::{ @@ -40,7 +40,8 @@ impl NodeType for TestTypes { type SignatureKey = BLSPubKey; type Transaction = TestTransaction; type ElectionConfigType = StaticElectionConfig; - type StateType = TestState; + type ValidatedState = TestValidatedState; + type InstanceState = TestInstanceState; type Membership = GeneralStaticCommittee; } diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index f41497febd..340fce34d5 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -105,7 +105,7 @@ pub struct RoundResult { pub block_map: HashMap, /// state -> # entries decided on that state - pub state_map: HashMap, + pub state_map: HashMap, pub num_txns_map: HashMap, } diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index e146ffbce4..2153573d63 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -4,8 +4,8 @@ use commit::{Commitment, Committable}; use hotshot_types::{ data::{fake_commitment, BlockError, ViewNumber}, traits::{ - state::{ConsensusTime, TestableState}, - BlockPayload, State, + states::{ConsensusTime, InstanceState, TestableState, ValidatedState}, + BlockPayload, }, }; @@ -18,7 +18,7 @@ pub use crate::node_types::TestTypes; /// sequencing demo entry state #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct TestState { +pub struct TestValidatedState { /// the block height block_height: u64, /// the view number @@ -27,7 +27,7 @@ pub struct TestState { prev_state_commitment: Commitment, } -impl Committable for TestState { +impl Committable for TestValidatedState { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("Test State Commit") .u64_field("block_height", self.block_height) @@ -41,7 +41,7 @@ impl Committable for TestState { } } -impl Default for TestState { +impl Default for TestValidatedState { fn default() -> Self { Self { block_height: 0, @@ -51,7 +51,7 @@ impl Default for TestState { } } -impl State for TestState { +impl ValidatedState for TestValidatedState { type Error = BlockError; type BlockHeader = TestBlockHeader; @@ -75,7 +75,7 @@ impl State for TestState { } else if self.view_number >= *view_number { return Err(BlockError::InvalidBlockHeader); } - Ok(TestState { + Ok(TestValidatedState { block_height: self.block_height + 1, view_number: *view_number, prev_state_commitment: self.commit(), @@ -94,7 +94,7 @@ impl State for TestState { fn metadata(&self) -> Self::Metadata {} } -impl TestableState for TestState { +impl TestableState for TestValidatedState { fn create_random_transaction( _state: Option<&Self>, _rng: &mut dyn rand::RngCore, @@ -105,3 +105,7 @@ impl TestableState for TestState { TestTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) } } + +pub struct TestInstanceState {} + +impl InstanceState for TestInstanceState {} diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 5d6aceaec5..f4314071a8 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -18,13 +18,12 @@ use hotshot_types::{ message::Proposal, simple_certificate::QuorumCertificate, traits::{ - block_contents::vid_commitment, - block_contents::BlockHeader, + block_contents::{vid_commitment, BlockHeader, TestableBlock}, consensus_api::ConsensusApi, election::Membership, node_implementation::NodeType, - state::{ConsensusTime, TestableBlock}, - BlockPayload, State, + states::{ConsensusTime, ValidatedState}, + BlockPayload, }, vote::HasViewNumber, }; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 776f9268e5..3466f6d8ed 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -17,7 +17,7 @@ use hotshot_task::{ use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::ConsensusMetricsValue, - traits::{election::Membership, node_implementation::NodeType, state::ConsensusTime}, + traits::{election::Membership, node_implementation::NodeType, states::ConsensusTime}, HotShotConfig, ValidatorConfig, }; use std::{ diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs index a10bee76ed..0071621978 100644 --- a/testing/tests/atomic_storage.rs +++ b/testing/tests/atomic_storage.rs @@ -5,9 +5,9 @@ use hotshot::{ random_quorom_certificate, random_transaction, random_validating_leaf, VDemoBlock, VDemoState, }, - traits::{BlockPayload, State, Storage}, + traits::{BlockPayload, Storage, ValidatedState}, }; -use hotshot_types::{data::ViewNumber, traits::state::TestableState}; +use hotshot_types::{data::ViewNumber, traits::statesTestableState}; use rand::thread_rng; type AtomicStorage = hotshot::traits::implementations::AtomicStorage; @@ -94,7 +94,7 @@ async fn test_happy_path_leaves() { let mut store = AtomicStorage::open(path).expect("Could not open atomic store"); // Add some leaves - let mut leaves = Vec::>::new(); + let mut leaves = Vec::>::new(); for _ in 0..10 { let leaf = random_validating_leaf(DEntryBlock { previous_block: StateHash::random(), diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 38ca3933a5..e8e2ccf36c 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -11,7 +11,7 @@ use hotshot_types::vote::Certificate; use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, - traits::{state::ConsensusTime, State}, + traits::{states::ConsensusTime, ValidatedState}, }; use hotshot_types::{ simple_vote::QuorumData, diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 3d532126b0..1edb772a09 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -9,7 +9,7 @@ use hotshot_types::{ simple_vote::{DAData, DAVote}, traits::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, - node_implementation::NodeType, state::ConsensusTime, + node_implementation::NodeType, states::ConsensusTime, }, }; use sha2::{Digest, Sha256}; diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index f5b277fce6..43f428455b 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -8,9 +8,10 @@ use hotshot::traits::implementations::{ }; use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; +use hotshot_testing::state_types::TestInstanceState; use hotshot_testing::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::TestState, + state_types::TestValidatedState, }; use hotshot_types::message::Message; use hotshot_types::signature_key::BLSPubKey; @@ -20,7 +21,7 @@ use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType}; use hotshot_types::{ data::ViewNumber, message::{DataMessage, MessageKind}, - traits::state::ConsensusTime, + traits::states::ConsensusTime, }; use rand::rngs::StdRng; use rand::{RngCore, SeedableRng}; @@ -50,7 +51,8 @@ impl NodeType for Test { type SignatureKey = BLSPubKey; type Transaction = TestTransaction; type ElectionConfigType = StaticElectionConfig; - type StateType = TestState; + type ValidatedState = TestValidatedState; + type InstanceState = TestInstanceState; type Membership = GeneralStaticCommittee; } diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index ce8cd3a46f..09b9504e8a 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -6,7 +6,7 @@ use hotshot_testing::{ }; use hotshot_types::{ data::{DAProposal, VidSchemeTrait, ViewNumber}, - traits::{consensus_api::ConsensusApi, state::ConsensusTime}, + traits::{consensus_api::ConsensusApi, states::ConsensusTime}, }; use sha2::{Digest, Sha256}; use std::{collections::HashMap, marker::PhantomData}; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index a57d20412b..7a1206380e 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -8,7 +8,7 @@ use hotshot_testing::{ use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, - traits::{consensus_api::ConsensusApi, state::ConsensusTime}, + traits::{consensus_api::ConsensusApi, states::ConsensusTime}, }; use std::collections::HashMap; use std::marker::PhantomData; diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 2a0f5c94df..11f72f22a8 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,7 +1,7 @@ use hotshot::HotShotConsensusApi; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; -use hotshot_types::{data::ViewNumber, traits::state::ConsensusTime}; +use hotshot_types::{data::ViewNumber, traits::states::ConsensusTime}; use std::collections::HashMap; #[cfg(test)] diff --git a/types/src/data.rs b/types/src/data.rs index c24ecc4670..cc1dcc9716 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -6,14 +6,13 @@ use crate::{ simple_certificate::{QuorumCertificate, TimeoutCertificate}, traits::{ - block_contents::vid_commitment, - block_contents::BlockHeader, + block_contents::{vid_commitment, BlockHeader, TestableBlock}, election::Membership, node_implementation::NodeType, signature_key::SignatureKey, - state::{ConsensusTime, TestableBlock, TestableState}, + states::{ConsensusTime, TestableState, ValidatedState}, storage::StoredView, - BlockPayload, State, + BlockPayload, }, vote::{Certificate, HasViewNumber}, }; @@ -105,9 +104,10 @@ impl std::ops::Sub for ViewNumber { } } -/// The `Transaction` type associated with a `State`, as a syntactic shortcut -pub type Transaction = <::BlockPayload as BlockPayload>::Transaction; -/// `Commitment` to the `Transaction` type associated with a `State`, as a syntactic shortcut +/// The `Transaction` type associated with a `ValidatedState`, as a syntactic shortcut +pub type Transaction = + <::BlockPayload as BlockPayload>::Transaction; +/// `Commitment` to the `Transaction` type associated with a `ValidatedState`, as a syntactic shortcut pub type TxnCommitment = Commitment>; /// A proposal to start providing data availability for a block. @@ -307,7 +307,7 @@ pub struct Leaf { pub block_payload: Option, /// State. - pub state: TYPES::StateType, + pub state: TYPES::ValidatedState, /// Transactions that were marked for rejection while collecting the block. pub rejected: Vec<::Transaction>, @@ -361,7 +361,7 @@ impl Leaf { parent_commitment: fake_commitment(), block_header: block_header.clone(), block_payload: Some(block_payload), - state: ::initialize(&block_header), + state: ::initialize(&block_header), rejected: Vec::new(), proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), } @@ -430,7 +430,7 @@ impl Leaf { self.get_block_header().payload_commitment() } /// The blockchain state after appending this leaf. - pub fn get_state(&self) -> TYPES::StateType { + pub fn get_state(&self) -> TYPES::ValidatedState { self.state.clone() } /// Transactions rejected or invalidated by the application of this leaf. @@ -458,7 +458,7 @@ impl Leaf { impl TestableLeaf for Leaf where - TYPES::StateType: TestableState, + TYPES::ValidatedState: TestableState, TYPES::BlockPayload: TestableBlock, { type NodeType = TYPES; @@ -468,7 +468,7 @@ where rng: &mut dyn rand::RngCore, padding: u64, ) -> <::BlockPayload as BlockPayload>::Transaction { - TYPES::StateType::create_random_transaction(None, rng, padding) + TYPES::ValidatedState::create_random_transaction(None, rng, padding) } } /// Fake the thing a genesis block points to. Needed to avoid infinite recursion diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index ab1e721a0d..2bcbdf74b7 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -17,7 +17,7 @@ use crate::{ }, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, - state::ConsensusTime, + states::ConsensusTime, }, vote::{Certificate, HasViewNumber}, }; diff --git a/types/src/traits.rs b/types/src/traits.rs index 9c25e5fcb8..a698d2c158 100644 --- a/types/src/traits.rs +++ b/types/src/traits.rs @@ -8,8 +8,8 @@ pub mod node_implementation; pub mod qc; pub mod signature_key; pub mod stake_table; -pub mod state; +pub mod states; pub mod storage; pub use block_contents::BlockPayload; -pub use state::State; +pub use states::ValidatedState; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 9bac9b6d7f..de0b496964 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -76,6 +76,15 @@ pub trait BlockPayload: ) -> Vec>; } +/// extra functions required on block to be usable by hotshot-testing +pub trait TestableBlock: BlockPayload + Debug { + /// generate a genesis block + fn genesis() -> Self; + + /// the number of transactions in this block + fn txn_count(&self) -> u64; +} + /// Compute the VID payload commitment. /// # Panics /// If the VID computation fails. diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index b886684d7d..123f2b3a58 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -4,12 +4,12 @@ //! describing the overall behavior of a node, as a composition of implementations of the node trait. use super::{ - block_contents::{BlockHeader, Transaction}, + block_contents::{BlockHeader, TestableBlock, Transaction}, election::ElectionConfig, network::{CommunicationChannel, TestableNetworkingImplementation}, - state::{ConsensusTime, TestableBlock, TestableState}, + states::{ConsensusTime, InstanceState, TestableState}, storage::{StorageError, StorageState, TestableStorage}, - State, + ValidatedState, }; use crate::{ data::{Leaf, TestableLeaf}, @@ -138,7 +138,7 @@ pub trait TestableNodeImplementation: NodeImplementation /// otherwise panics /// `padding` is the bytes of padding to add to the transaction fn state_create_random_transaction( - state: Option<&TYPES::StateType>, + state: Option<&TYPES::ValidatedState>, rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction; @@ -178,7 +178,7 @@ pub trait TestableNodeImplementation: NodeImplementation #[async_trait] impl> TestableNodeImplementation for I where - TYPES::StateType: TestableState, + TYPES::ValidatedState: TestableState, TYPES::BlockPayload: TestableBlock, I::Storage: TestableStorage, I::QuorumNetwork: TestableChannelImplementation, @@ -196,11 +196,11 @@ where } fn state_create_random_transaction( - state: Option<&TYPES::StateType>, + state: Option<&TYPES::ValidatedState>, rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { - ::create_random_transaction(state, rng, padding) + ::create_random_transaction(state, rng, padding) } fn leaf_create_random_transaction( @@ -279,13 +279,13 @@ pub trait NodeType: { /// The time type that this hotshot setup is using. /// - /// This should be the same `Time` that `StateType::Time` is using. + /// This should be the same `Time` that `ValidatedState::Time` is using. type Time: ConsensusTime; /// The block header type that this hotshot setup is using. type BlockHeader: BlockHeader; /// The block type that this hotshot setup is using. /// - /// This should be the same block that `StateType::BlockPayload` is using. + /// This should be the same block that `ValidatedState::BlockPayload` is using. type BlockPayload: BlockPayload; /// The signature key that this hotshot setup is using. type SignatureKey: SignatureKey; @@ -296,13 +296,16 @@ pub trait NodeType: /// The election config type that this hotshot setup is using. type ElectionConfigType: ElectionConfig; - /// The state type that this hotshot setup is using. - type StateType: State< + /// The validated state type that this hotshot setup is using. + type ValidatedState: ValidatedState< BlockHeader = Self::BlockHeader, BlockPayload = Self::BlockPayload, Time = Self::Time, >; + /// The instance-level state type that this hotshot setup is using. + type InstanceState: InstanceState; + /// Membership used for this implementation type Membership: Membership; } diff --git a/types/src/traits/state.rs b/types/src/traits/states.rs similarity index 89% rename from types/src/traits/state.rs rename to types/src/traits/states.rs index e55971a455..5350849ab6 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/states.rs @@ -14,7 +14,7 @@ use std::{ ops::{Deref, Sub}, }; -use super::block_contents::BlockHeader; +use super::block_contents::{BlockHeader, TestableBlock}; /// Abstraction over the state that blocks modify /// @@ -24,7 +24,7 @@ use super::block_contents::BlockHeader; /// * The ability to validate that a block header is actually a valid extension of this state and /// produce a new state, with the modifications from the block applied /// ([`validate_and_apply_header`](State::validate_and_apply_header)) -pub trait State: +pub trait ValidatedState: Serialize + DeserializeOwned + Clone @@ -74,6 +74,21 @@ pub trait State: fn metadata(&self) -> Self::Metadata; } +/// extra functions required on state to be usable by hotshot-testing +pub trait TestableState: ValidatedState +where + ::BlockPayload: TestableBlock, +{ + /// Creates random transaction if possible + /// otherwise panics + /// `padding` is the bytes of padding to add to the transaction + fn create_random_transaction( + state: Option<&Self>, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction; +} + // TODO Seuqnecing here means involving DA in consensus /// Trait for time compatibility needed for reward collection @@ -106,26 +121,5 @@ pub trait ConsensusTime: fn get_u64(&self) -> u64; } -/// extra functions required on state to be usable by hotshot-testing -pub trait TestableState: State -where - ::BlockPayload: TestableBlock, -{ - /// Creates random transaction if possible - /// otherwise panics - /// `padding` is the bytes of padding to add to the transaction - fn create_random_transaction( - state: Option<&Self>, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> ::Transaction; -} - -/// extra functions required on block to be usable by hotshot-testing -pub trait TestableBlock: BlockPayload + Debug { - /// generate a genesis block - fn genesis() -> Self; - - /// the number of transactions in this block - fn txn_count(&self) -> u64; -} +/// Instance-level state, which allows us to fetch missing validated state. +pub trait InstanceState {} diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index f189eca84c..3401583dab 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -131,7 +131,7 @@ pub struct StoredView { /// It may be empty for nodes not in the DA committee. pub block_payload: Option, /// State. - pub state: TYPES::StateType, + pub state: TYPES::ValidatedState, /// transactions rejected in this view pub rejected: Vec, /// the proposer id @@ -152,7 +152,7 @@ where block_header: TYPES::BlockHeader, block_payload: Option, parent_commitment: Commitment>, - state: TYPES::StateType, + state: TYPES::ValidatedState, rejected: Vec<::Transaction>, proposer_id: TYPES::SignatureKey, ) -> Self { diff --git a/types/src/utils.rs b/types/src/utils.rs index 9e1e4ff733..e23cd85129 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -2,7 +2,7 @@ use crate::{ data::{Leaf, VidCommitment}, - traits::{node_implementation::NodeType, State}, + traits::{node_implementation::NodeType, ValidatedState}, }; use commit::Commitment; use std::ops::Deref; @@ -24,7 +24,7 @@ pub enum ViewInner { /// Proposed leaf leaf: Commitment>, /// Application-specific data. - metadata: ::Metadata, + metadata: ::Metadata, }, /// Leaf has failed Failed, From 9bce4da5ace5ed43ba745a5b1b265f1cc8272bd4 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 15 Jan 2024 13:30:19 -0800 Subject: [PATCH 0675/1393] Revert "Add InstanceState trait and impl, add it to NodeTypes, rename State and state." This reverts commit 69418a1411a22e0693b44d5a0d82c630dd7d94f6. --- hotshot/examples/infra/mod.rs | 15 +++---- hotshot/src/lib.rs | 6 +-- hotshot/src/tasks/mod.rs | 2 +- hotshot/src/traits.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 2 +- hotshot/src/traits/storage/memory_storage.rs | 7 ++- hotshot/src/types/handle.rs | 2 +- task-impls/src/consensus.rs | 4 +- task-impls/src/da.rs | 2 +- task-impls/src/view_sync.rs | 2 +- testing/src/block_types.rs | 3 +- testing/src/node_types.rs | 5 +-- testing/src/overall_safety_task.rs | 2 +- testing/src/state_types.rs | 20 ++++----- testing/src/task_helpers.rs | 7 +-- testing/src/test_runner.rs | 2 +- testing/tests/atomic_storage.rs | 6 +-- testing/tests/consensus_task.rs | 2 +- testing/tests/da_task.rs | 2 +- testing/tests/memory_network.rs | 8 ++-- testing/tests/network_task.rs | 2 +- testing/tests/vid_task.rs | 2 +- testing/tests/view_sync_task.rs | 2 +- types/src/data.rs | 24 +++++----- types/src/simple_certificate.rs | 2 +- types/src/traits.rs | 4 +- types/src/traits/block_contents.rs | 9 ---- types/src/traits/node_implementation.rs | 25 +++++------ types/src/traits/{states.rs => state.rs} | 44 +++++++++++-------- types/src/traits/storage.rs | 4 +- types/src/utils.rs | 4 +- 31 files changed, 105 insertions(+), 118 deletions(-) rename types/src/traits/{states.rs => state.rs} (89%) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index d2f79e4ac6..4d58de6f57 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -32,11 +32,10 @@ use hotshot_types::{ data::{Leaf, TestableLeaf}, event::{Event, EventType}, traits::{ - block_contents::TestableBlock, election::Membership, network::CommunicationChannel, node_implementation::NodeType, - states::{ConsensusTime, TestableState}, + state::{ConsensusTime, TestableBlock, TestableState}, }, HotShotConfig, }; @@ -290,7 +289,7 @@ pub trait RunDA< Storage = MemoryStorage, >, > where - ::ValidatedState: TestableState, + ::StateType: TestableState, ::BlockPayload: TestableBlock, TYPES: NodeType, Leaf: TestableLeaf, @@ -513,7 +512,7 @@ impl< NODE, > for WebServerDARun where - ::ValidatedState: TestableState, + ::StateType: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -615,7 +614,7 @@ impl< NODE, > for Libp2pDARun where - ::ValidatedState: TestableState, + ::StateType: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -708,7 +707,7 @@ impl< NODE, > for CombinedDARun where - ::ValidatedState: TestableState, + ::StateType: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -817,7 +816,7 @@ pub async fn main_entry_point< >( args: ValidatorArgs, ) where - ::ValidatedState: TestableState, + ::StateType: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, { @@ -872,7 +871,7 @@ pub async fn main_entry_point< for round in 0..rounds { for _ in 0..transactions_to_send_per_round { - let mut txn = ::create_random_transaction( + let mut txn = ::create_random_transaction( None, &mut txn_rng, transaction_size as u64, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index bfb9a9bdda..61b6f14cd0 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -62,9 +62,9 @@ use hotshot_types::{ network::{CommunicationChannel, NetworkError}, node_implementation::{NodeType, SendToTasks}, signature_key::SignatureKey, - states::{ConsensusTime, ValidatedState}, + state::ConsensusTime, storage::StoredView, - BlockPayload, + BlockPayload, State, }, HotShotConfig, }; @@ -362,7 +362,7 @@ impl> SystemContext { /// # Panics /// /// Panics if internal state for consensus is inconsistent - pub async fn get_state(&self) -> TYPES::ValidatedState { + pub async fn get_state(&self) -> TYPES::StateType { self.inner .consensus .read() diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 918b9c8ce0..563afb53cd 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -34,7 +34,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{NodeImplementation, NodeType}, - states::ConsensusTime, + state::ConsensusTime, BlockPayload, }, }; diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 203fa39705..9684bd61c7 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -4,7 +4,7 @@ mod networking; mod node_implementation; mod storage; -pub use hotshot_types::traits::{BlockPayload, ValidatedState}; +pub use hotshot_types::traits::{BlockPayload, State}; pub use networking::{NetworkError, NetworkReliability}; pub use node_implementation::{NodeImplementation, TestableNodeImplementation}; pub use storage::{Result as StorageResult, Storage}; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 24e6c3d11a..caf2d72d3e 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -24,7 +24,7 @@ use hotshot_types::{ }, node_implementation::NodeType, signature_key::SignatureKey, - states::ConsensusTime, + state::ConsensusTime, }, }; use hotshot_utils::bincode::bincode_opts; diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index a09beecd4c..39f75426d3 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -113,14 +113,13 @@ mod test { use hotshot_testing::{ block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, node_types::TestTypes, - state_types::TestValidatedState, + state_types::TestState, }; use hotshot_types::{ data::{fake_commitment, Leaf}, simple_certificate::QuorumCertificate, traits::{ - node_implementation::NodeType, signature_key::SignatureKey, states::ConsensusTime, - ValidatedState, + node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, State, }, }; use std::marker::PhantomData; @@ -149,7 +148,7 @@ mod test { header.clone(), Some(payload), dummy_leaf_commit, - TestValidatedState::initialize(&header), + TestState::initialize(&header), Vec::new(), <::SignatureKey as SignatureKey>::genesis_proposer_pk(), ) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 31e2adab73..2040061489 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -20,7 +20,7 @@ use hotshot_types::{ error::HotShotError, event::EventType, simple_certificate::QuorumCertificate, - traits::{node_implementation::NodeType, states::ConsensusTime, storage::Storage}, + traits::{node_implementation::NodeType, state::ConsensusTime, storage::Storage}, }; use std::sync::Arc; use tracing::error; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 3a9e5aee9f..a145438f60 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -30,8 +30,8 @@ use hotshot_types::{ network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, - states::{ConsensusTime, ValidatedState}, - BlockPayload, + state::ConsensusTime, + BlockPayload, State, }, utils::{Terminator, ViewInner}, vote::{Certificate, HasViewNumber}, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 3c523ba59a..0e5bb160c1 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -23,7 +23,7 @@ use hotshot_types::{ network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, - states::ConsensusTime, + state::ConsensusTime, }, utils::ViewInner, vote::HasViewNumber, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 3626e496d1..1c9e607fb0 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -37,7 +37,7 @@ use hotshot_types::{ election::Membership, network::CommunicationChannel, node_implementation::{NodeImplementation, NodeType}, - states::ConsensusTime, + state::ConsensusTime, }, }; use snafu::Snafu; diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index 19597d336c..a395de232f 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -7,7 +7,8 @@ use commit::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ data::{BlockError, VidCommitment, VidScheme, VidSchemeTrait}, traits::{ - block_contents::{vid_commitment, BlockHeader, TestableBlock, Transaction}, + block_contents::{vid_commitment, BlockHeader, Transaction}, + state::TestableBlock, BlockPayload, }, }; diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index c000f7060f..a63207f73d 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -2,7 +2,7 @@ use hotshot::traits::election::static_committee::GeneralStaticCommittee; use crate::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::{TestInstanceState, TestValidatedState}, + state_types::TestState, }; use hotshot::traits::{ @@ -40,8 +40,7 @@ impl NodeType for TestTypes { type SignatureKey = BLSPubKey; type Transaction = TestTransaction; type ElectionConfigType = StaticElectionConfig; - type ValidatedState = TestValidatedState; - type InstanceState = TestInstanceState; + type StateType = TestState; type Membership = GeneralStaticCommittee; } diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 340fce34d5..f41497febd 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -105,7 +105,7 @@ pub struct RoundResult { pub block_map: HashMap, /// state -> # entries decided on that state - pub state_map: HashMap, + pub state_map: HashMap, pub num_txns_map: HashMap, } diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 2153573d63..e146ffbce4 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -4,8 +4,8 @@ use commit::{Commitment, Committable}; use hotshot_types::{ data::{fake_commitment, BlockError, ViewNumber}, traits::{ - states::{ConsensusTime, InstanceState, TestableState, ValidatedState}, - BlockPayload, + state::{ConsensusTime, TestableState}, + BlockPayload, State, }, }; @@ -18,7 +18,7 @@ pub use crate::node_types::TestTypes; /// sequencing demo entry state #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct TestValidatedState { +pub struct TestState { /// the block height block_height: u64, /// the view number @@ -27,7 +27,7 @@ pub struct TestValidatedState { prev_state_commitment: Commitment, } -impl Committable for TestValidatedState { +impl Committable for TestState { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("Test State Commit") .u64_field("block_height", self.block_height) @@ -41,7 +41,7 @@ impl Committable for TestValidatedState { } } -impl Default for TestValidatedState { +impl Default for TestState { fn default() -> Self { Self { block_height: 0, @@ -51,7 +51,7 @@ impl Default for TestValidatedState { } } -impl ValidatedState for TestValidatedState { +impl State for TestState { type Error = BlockError; type BlockHeader = TestBlockHeader; @@ -75,7 +75,7 @@ impl ValidatedState for TestValidatedState { } else if self.view_number >= *view_number { return Err(BlockError::InvalidBlockHeader); } - Ok(TestValidatedState { + Ok(TestState { block_height: self.block_height + 1, view_number: *view_number, prev_state_commitment: self.commit(), @@ -94,7 +94,7 @@ impl ValidatedState for TestValidatedState { fn metadata(&self) -> Self::Metadata {} } -impl TestableState for TestValidatedState { +impl TestableState for TestState { fn create_random_transaction( _state: Option<&Self>, _rng: &mut dyn rand::RngCore, @@ -105,7 +105,3 @@ impl TestableState for TestValidatedState { TestTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) } } - -pub struct TestInstanceState {} - -impl InstanceState for TestInstanceState {} diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index f4314071a8..5d6aceaec5 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -18,12 +18,13 @@ use hotshot_types::{ message::Proposal, simple_certificate::QuorumCertificate, traits::{ - block_contents::{vid_commitment, BlockHeader, TestableBlock}, + block_contents::vid_commitment, + block_contents::BlockHeader, consensus_api::ConsensusApi, election::Membership, node_implementation::NodeType, - states::{ConsensusTime, ValidatedState}, - BlockPayload, + state::{ConsensusTime, TestableBlock}, + BlockPayload, State, }, vote::HasViewNumber, }; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 3466f6d8ed..776f9268e5 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -17,7 +17,7 @@ use hotshot_task::{ use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::ConsensusMetricsValue, - traits::{election::Membership, node_implementation::NodeType, states::ConsensusTime}, + traits::{election::Membership, node_implementation::NodeType, state::ConsensusTime}, HotShotConfig, ValidatorConfig, }; use std::{ diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs index 0071621978..a10bee76ed 100644 --- a/testing/tests/atomic_storage.rs +++ b/testing/tests/atomic_storage.rs @@ -5,9 +5,9 @@ use hotshot::{ random_quorom_certificate, random_transaction, random_validating_leaf, VDemoBlock, VDemoState, }, - traits::{BlockPayload, Storage, ValidatedState}, + traits::{BlockPayload, State, Storage}, }; -use hotshot_types::{data::ViewNumber, traits::statesTestableState}; +use hotshot_types::{data::ViewNumber, traits::state::TestableState}; use rand::thread_rng; type AtomicStorage = hotshot::traits::implementations::AtomicStorage; @@ -94,7 +94,7 @@ async fn test_happy_path_leaves() { let mut store = AtomicStorage::open(path).expect("Could not open atomic store"); // Add some leaves - let mut leaves = Vec::>::new(); + let mut leaves = Vec::>::new(); for _ in 0..10 { let leaf = random_validating_leaf(DEntryBlock { previous_block: StateHash::random(), diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index e8e2ccf36c..38ca3933a5 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -11,7 +11,7 @@ use hotshot_types::vote::Certificate; use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, - traits::{states::ConsensusTime, ValidatedState}, + traits::{state::ConsensusTime, State}, }; use hotshot_types::{ simple_vote::QuorumData, diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 1edb772a09..3d532126b0 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -9,7 +9,7 @@ use hotshot_types::{ simple_vote::{DAData, DAVote}, traits::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, - node_implementation::NodeType, states::ConsensusTime, + node_implementation::NodeType, state::ConsensusTime, }, }; use sha2::{Digest, Sha256}; diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 43f428455b..f5b277fce6 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -8,10 +8,9 @@ use hotshot::traits::implementations::{ }; use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; -use hotshot_testing::state_types::TestInstanceState; use hotshot_testing::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::TestValidatedState, + state_types::TestState, }; use hotshot_types::message::Message; use hotshot_types::signature_key::BLSPubKey; @@ -21,7 +20,7 @@ use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType}; use hotshot_types::{ data::ViewNumber, message::{DataMessage, MessageKind}, - traits::states::ConsensusTime, + traits::state::ConsensusTime, }; use rand::rngs::StdRng; use rand::{RngCore, SeedableRng}; @@ -51,8 +50,7 @@ impl NodeType for Test { type SignatureKey = BLSPubKey; type Transaction = TestTransaction; type ElectionConfigType = StaticElectionConfig; - type ValidatedState = TestValidatedState; - type InstanceState = TestInstanceState; + type StateType = TestState; type Membership = GeneralStaticCommittee; } diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 09b9504e8a..ce8cd3a46f 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -6,7 +6,7 @@ use hotshot_testing::{ }; use hotshot_types::{ data::{DAProposal, VidSchemeTrait, ViewNumber}, - traits::{consensus_api::ConsensusApi, states::ConsensusTime}, + traits::{consensus_api::ConsensusApi, state::ConsensusTime}, }; use sha2::{Digest, Sha256}; use std::{collections::HashMap, marker::PhantomData}; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 7a1206380e..a57d20412b 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -8,7 +8,7 @@ use hotshot_testing::{ use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, - traits::{consensus_api::ConsensusApi, states::ConsensusTime}, + traits::{consensus_api::ConsensusApi, state::ConsensusTime}, }; use std::collections::HashMap; use std::marker::PhantomData; diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 11f72f22a8..2a0f5c94df 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,7 +1,7 @@ use hotshot::HotShotConsensusApi; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; -use hotshot_types::{data::ViewNumber, traits::states::ConsensusTime}; +use hotshot_types::{data::ViewNumber, traits::state::ConsensusTime}; use std::collections::HashMap; #[cfg(test)] diff --git a/types/src/data.rs b/types/src/data.rs index cc1dcc9716..c24ecc4670 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -6,13 +6,14 @@ use crate::{ simple_certificate::{QuorumCertificate, TimeoutCertificate}, traits::{ - block_contents::{vid_commitment, BlockHeader, TestableBlock}, + block_contents::vid_commitment, + block_contents::BlockHeader, election::Membership, node_implementation::NodeType, signature_key::SignatureKey, - states::{ConsensusTime, TestableState, ValidatedState}, + state::{ConsensusTime, TestableBlock, TestableState}, storage::StoredView, - BlockPayload, + BlockPayload, State, }, vote::{Certificate, HasViewNumber}, }; @@ -104,10 +105,9 @@ impl std::ops::Sub for ViewNumber { } } -/// The `Transaction` type associated with a `ValidatedState`, as a syntactic shortcut -pub type Transaction = - <::BlockPayload as BlockPayload>::Transaction; -/// `Commitment` to the `Transaction` type associated with a `ValidatedState`, as a syntactic shortcut +/// The `Transaction` type associated with a `State`, as a syntactic shortcut +pub type Transaction = <::BlockPayload as BlockPayload>::Transaction; +/// `Commitment` to the `Transaction` type associated with a `State`, as a syntactic shortcut pub type TxnCommitment = Commitment>; /// A proposal to start providing data availability for a block. @@ -307,7 +307,7 @@ pub struct Leaf { pub block_payload: Option, /// State. - pub state: TYPES::ValidatedState, + pub state: TYPES::StateType, /// Transactions that were marked for rejection while collecting the block. pub rejected: Vec<::Transaction>, @@ -361,7 +361,7 @@ impl Leaf { parent_commitment: fake_commitment(), block_header: block_header.clone(), block_payload: Some(block_payload), - state: ::initialize(&block_header), + state: ::initialize(&block_header), rejected: Vec::new(), proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), } @@ -430,7 +430,7 @@ impl Leaf { self.get_block_header().payload_commitment() } /// The blockchain state after appending this leaf. - pub fn get_state(&self) -> TYPES::ValidatedState { + pub fn get_state(&self) -> TYPES::StateType { self.state.clone() } /// Transactions rejected or invalidated by the application of this leaf. @@ -458,7 +458,7 @@ impl Leaf { impl TestableLeaf for Leaf where - TYPES::ValidatedState: TestableState, + TYPES::StateType: TestableState, TYPES::BlockPayload: TestableBlock, { type NodeType = TYPES; @@ -468,7 +468,7 @@ where rng: &mut dyn rand::RngCore, padding: u64, ) -> <::BlockPayload as BlockPayload>::Transaction { - TYPES::ValidatedState::create_random_transaction(None, rng, padding) + TYPES::StateType::create_random_transaction(None, rng, padding) } } /// Fake the thing a genesis block points to. Needed to avoid infinite recursion diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 2bcbdf74b7..ab1e721a0d 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -17,7 +17,7 @@ use crate::{ }, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, - states::ConsensusTime, + state::ConsensusTime, }, vote::{Certificate, HasViewNumber}, }; diff --git a/types/src/traits.rs b/types/src/traits.rs index a698d2c158..9c25e5fcb8 100644 --- a/types/src/traits.rs +++ b/types/src/traits.rs @@ -8,8 +8,8 @@ pub mod node_implementation; pub mod qc; pub mod signature_key; pub mod stake_table; -pub mod states; +pub mod state; pub mod storage; pub use block_contents::BlockPayload; -pub use states::ValidatedState; +pub use state::State; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index de0b496964..9bac9b6d7f 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -76,15 +76,6 @@ pub trait BlockPayload: ) -> Vec>; } -/// extra functions required on block to be usable by hotshot-testing -pub trait TestableBlock: BlockPayload + Debug { - /// generate a genesis block - fn genesis() -> Self; - - /// the number of transactions in this block - fn txn_count(&self) -> u64; -} - /// Compute the VID payload commitment. /// # Panics /// If the VID computation fails. diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 123f2b3a58..b886684d7d 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -4,12 +4,12 @@ //! describing the overall behavior of a node, as a composition of implementations of the node trait. use super::{ - block_contents::{BlockHeader, TestableBlock, Transaction}, + block_contents::{BlockHeader, Transaction}, election::ElectionConfig, network::{CommunicationChannel, TestableNetworkingImplementation}, - states::{ConsensusTime, InstanceState, TestableState}, + state::{ConsensusTime, TestableBlock, TestableState}, storage::{StorageError, StorageState, TestableStorage}, - ValidatedState, + State, }; use crate::{ data::{Leaf, TestableLeaf}, @@ -138,7 +138,7 @@ pub trait TestableNodeImplementation: NodeImplementation /// otherwise panics /// `padding` is the bytes of padding to add to the transaction fn state_create_random_transaction( - state: Option<&TYPES::ValidatedState>, + state: Option<&TYPES::StateType>, rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction; @@ -178,7 +178,7 @@ pub trait TestableNodeImplementation: NodeImplementation #[async_trait] impl> TestableNodeImplementation for I where - TYPES::ValidatedState: TestableState, + TYPES::StateType: TestableState, TYPES::BlockPayload: TestableBlock, I::Storage: TestableStorage, I::QuorumNetwork: TestableChannelImplementation, @@ -196,11 +196,11 @@ where } fn state_create_random_transaction( - state: Option<&TYPES::ValidatedState>, + state: Option<&TYPES::StateType>, rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { - ::create_random_transaction(state, rng, padding) + ::create_random_transaction(state, rng, padding) } fn leaf_create_random_transaction( @@ -279,13 +279,13 @@ pub trait NodeType: { /// The time type that this hotshot setup is using. /// - /// This should be the same `Time` that `ValidatedState::Time` is using. + /// This should be the same `Time` that `StateType::Time` is using. type Time: ConsensusTime; /// The block header type that this hotshot setup is using. type BlockHeader: BlockHeader; /// The block type that this hotshot setup is using. /// - /// This should be the same block that `ValidatedState::BlockPayload` is using. + /// This should be the same block that `StateType::BlockPayload` is using. type BlockPayload: BlockPayload; /// The signature key that this hotshot setup is using. type SignatureKey: SignatureKey; @@ -296,16 +296,13 @@ pub trait NodeType: /// The election config type that this hotshot setup is using. type ElectionConfigType: ElectionConfig; - /// The validated state type that this hotshot setup is using. - type ValidatedState: ValidatedState< + /// The state type that this hotshot setup is using. + type StateType: State< BlockHeader = Self::BlockHeader, BlockPayload = Self::BlockPayload, Time = Self::Time, >; - /// The instance-level state type that this hotshot setup is using. - type InstanceState: InstanceState; - /// Membership used for this implementation type Membership: Membership; } diff --git a/types/src/traits/states.rs b/types/src/traits/state.rs similarity index 89% rename from types/src/traits/states.rs rename to types/src/traits/state.rs index 5350849ab6..e55971a455 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/state.rs @@ -14,7 +14,7 @@ use std::{ ops::{Deref, Sub}, }; -use super::block_contents::{BlockHeader, TestableBlock}; +use super::block_contents::BlockHeader; /// Abstraction over the state that blocks modify /// @@ -24,7 +24,7 @@ use super::block_contents::{BlockHeader, TestableBlock}; /// * The ability to validate that a block header is actually a valid extension of this state and /// produce a new state, with the modifications from the block applied /// ([`validate_and_apply_header`](State::validate_and_apply_header)) -pub trait ValidatedState: +pub trait State: Serialize + DeserializeOwned + Clone @@ -74,21 +74,6 @@ pub trait ValidatedState: fn metadata(&self) -> Self::Metadata; } -/// extra functions required on state to be usable by hotshot-testing -pub trait TestableState: ValidatedState -where - ::BlockPayload: TestableBlock, -{ - /// Creates random transaction if possible - /// otherwise panics - /// `padding` is the bytes of padding to add to the transaction - fn create_random_transaction( - state: Option<&Self>, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> ::Transaction; -} - // TODO Seuqnecing here means involving DA in consensus /// Trait for time compatibility needed for reward collection @@ -121,5 +106,26 @@ pub trait ConsensusTime: fn get_u64(&self) -> u64; } -/// Instance-level state, which allows us to fetch missing validated state. -pub trait InstanceState {} +/// extra functions required on state to be usable by hotshot-testing +pub trait TestableState: State +where + ::BlockPayload: TestableBlock, +{ + /// Creates random transaction if possible + /// otherwise panics + /// `padding` is the bytes of padding to add to the transaction + fn create_random_transaction( + state: Option<&Self>, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction; +} + +/// extra functions required on block to be usable by hotshot-testing +pub trait TestableBlock: BlockPayload + Debug { + /// generate a genesis block + fn genesis() -> Self; + + /// the number of transactions in this block + fn txn_count(&self) -> u64; +} diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 3401583dab..f189eca84c 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -131,7 +131,7 @@ pub struct StoredView { /// It may be empty for nodes not in the DA committee. pub block_payload: Option, /// State. - pub state: TYPES::ValidatedState, + pub state: TYPES::StateType, /// transactions rejected in this view pub rejected: Vec, /// the proposer id @@ -152,7 +152,7 @@ where block_header: TYPES::BlockHeader, block_payload: Option, parent_commitment: Commitment>, - state: TYPES::ValidatedState, + state: TYPES::StateType, rejected: Vec<::Transaction>, proposer_id: TYPES::SignatureKey, ) -> Self { diff --git a/types/src/utils.rs b/types/src/utils.rs index e23cd85129..9e1e4ff733 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -2,7 +2,7 @@ use crate::{ data::{Leaf, VidCommitment}, - traits::{node_implementation::NodeType, ValidatedState}, + traits::{node_implementation::NodeType, State}, }; use commit::Commitment; use std::ops::Deref; @@ -24,7 +24,7 @@ pub enum ViewInner { /// Proposed leaf leaf: Commitment>, /// Application-specific data. - metadata: ::Metadata, + metadata: ::Metadata, }, /// Leaf has failed Failed, From 1c07e9aafe139f8b86058e42e736e5d30da640b0 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 15 Jan 2024 13:31:52 -0800 Subject: [PATCH 0676/1393] Revert "Revert "Add InstanceState trait and impl, add it to NodeTypes, rename State and state."" This reverts commit db91ca5643b21221038c4f09d37a835d8c7e5677. --- hotshot/examples/infra/mod.rs | 15 ++++--- hotshot/src/lib.rs | 6 +-- hotshot/src/tasks/mod.rs | 2 +- hotshot/src/traits.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 2 +- hotshot/src/traits/storage/memory_storage.rs | 7 +-- hotshot/src/types/handle.rs | 2 +- task-impls/src/consensus.rs | 4 +- task-impls/src/da.rs | 2 +- task-impls/src/view_sync.rs | 2 +- testing/src/block_types.rs | 3 +- testing/src/node_types.rs | 5 ++- testing/src/overall_safety_task.rs | 2 +- testing/src/state_types.rs | 20 +++++---- testing/src/task_helpers.rs | 7 ++- testing/src/test_runner.rs | 2 +- testing/tests/atomic_storage.rs | 6 +-- testing/tests/consensus_task.rs | 2 +- testing/tests/da_task.rs | 2 +- testing/tests/memory_network.rs | 8 ++-- testing/tests/network_task.rs | 2 +- testing/tests/vid_task.rs | 2 +- testing/tests/view_sync_task.rs | 2 +- types/src/data.rs | 24 +++++----- types/src/simple_certificate.rs | 2 +- types/src/traits.rs | 4 +- types/src/traits/block_contents.rs | 9 ++++ types/src/traits/node_implementation.rs | 25 ++++++----- types/src/traits/{state.rs => states.rs} | 44 ++++++++----------- types/src/traits/storage.rs | 4 +- types/src/utils.rs | 4 +- 31 files changed, 118 insertions(+), 105 deletions(-) rename types/src/traits/{state.rs => states.rs} (89%) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 4d58de6f57..d2f79e4ac6 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -32,10 +32,11 @@ use hotshot_types::{ data::{Leaf, TestableLeaf}, event::{Event, EventType}, traits::{ + block_contents::TestableBlock, election::Membership, network::CommunicationChannel, node_implementation::NodeType, - state::{ConsensusTime, TestableBlock, TestableState}, + states::{ConsensusTime, TestableState}, }, HotShotConfig, }; @@ -289,7 +290,7 @@ pub trait RunDA< Storage = MemoryStorage, >, > where - ::StateType: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, TYPES: NodeType, Leaf: TestableLeaf, @@ -512,7 +513,7 @@ impl< NODE, > for WebServerDARun where - ::StateType: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -614,7 +615,7 @@ impl< NODE, > for Libp2pDARun where - ::StateType: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -707,7 +708,7 @@ impl< NODE, > for CombinedDARun where - ::StateType: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -816,7 +817,7 @@ pub async fn main_entry_point< >( args: ValidatorArgs, ) where - ::StateType: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, { @@ -871,7 +872,7 @@ pub async fn main_entry_point< for round in 0..rounds { for _ in 0..transactions_to_send_per_round { - let mut txn = ::create_random_transaction( + let mut txn = ::create_random_transaction( None, &mut txn_rng, transaction_size as u64, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 61b6f14cd0..bfb9a9bdda 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -62,9 +62,9 @@ use hotshot_types::{ network::{CommunicationChannel, NetworkError}, node_implementation::{NodeType, SendToTasks}, signature_key::SignatureKey, - state::ConsensusTime, + states::{ConsensusTime, ValidatedState}, storage::StoredView, - BlockPayload, State, + BlockPayload, }, HotShotConfig, }; @@ -362,7 +362,7 @@ impl> SystemContext { /// # Panics /// /// Panics if internal state for consensus is inconsistent - pub async fn get_state(&self) -> TYPES::StateType { + pub async fn get_state(&self) -> TYPES::ValidatedState { self.inner .consensus .read() diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 563afb53cd..918b9c8ce0 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -34,7 +34,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, node_implementation::{NodeImplementation, NodeType}, - state::ConsensusTime, + states::ConsensusTime, BlockPayload, }, }; diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 9684bd61c7..203fa39705 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -4,7 +4,7 @@ mod networking; mod node_implementation; mod storage; -pub use hotshot_types::traits::{BlockPayload, State}; +pub use hotshot_types::traits::{BlockPayload, ValidatedState}; pub use networking::{NetworkError, NetworkReliability}; pub use node_implementation::{NodeImplementation, TestableNodeImplementation}; pub use storage::{Result as StorageResult, Storage}; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index caf2d72d3e..24e6c3d11a 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -24,7 +24,7 @@ use hotshot_types::{ }, node_implementation::NodeType, signature_key::SignatureKey, - state::ConsensusTime, + states::ConsensusTime, }, }; use hotshot_utils::bincode::bincode_opts; diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 39f75426d3..a09beecd4c 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -113,13 +113,14 @@ mod test { use hotshot_testing::{ block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, node_types::TestTypes, - state_types::TestState, + state_types::TestValidatedState, }; use hotshot_types::{ data::{fake_commitment, Leaf}, simple_certificate::QuorumCertificate, traits::{ - node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, State, + node_implementation::NodeType, signature_key::SignatureKey, states::ConsensusTime, + ValidatedState, }, }; use std::marker::PhantomData; @@ -148,7 +149,7 @@ mod test { header.clone(), Some(payload), dummy_leaf_commit, - TestState::initialize(&header), + TestValidatedState::initialize(&header), Vec::new(), <::SignatureKey as SignatureKey>::genesis_proposer_pk(), ) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 2040061489..31e2adab73 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -20,7 +20,7 @@ use hotshot_types::{ error::HotShotError, event::EventType, simple_certificate::QuorumCertificate, - traits::{node_implementation::NodeType, state::ConsensusTime, storage::Storage}, + traits::{node_implementation::NodeType, states::ConsensusTime, storage::Storage}, }; use std::sync::Arc; use tracing::error; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a145438f60..3a9e5aee9f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -30,8 +30,8 @@ use hotshot_types::{ network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, - state::ConsensusTime, - BlockPayload, State, + states::{ConsensusTime, ValidatedState}, + BlockPayload, }, utils::{Terminator, ViewInner}, vote::{Certificate, HasViewNumber}, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 0e5bb160c1..3c523ba59a 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -23,7 +23,7 @@ use hotshot_types::{ network::{CommunicationChannel, ConsensusIntentEvent}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, - state::ConsensusTime, + states::ConsensusTime, }, utils::ViewInner, vote::HasViewNumber, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 1c9e607fb0..3626e496d1 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -37,7 +37,7 @@ use hotshot_types::{ election::Membership, network::CommunicationChannel, node_implementation::{NodeImplementation, NodeType}, - state::ConsensusTime, + states::ConsensusTime, }, }; use snafu::Snafu; diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index a395de232f..19597d336c 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -7,8 +7,7 @@ use commit::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ data::{BlockError, VidCommitment, VidScheme, VidSchemeTrait}, traits::{ - block_contents::{vid_commitment, BlockHeader, Transaction}, - state::TestableBlock, + block_contents::{vid_commitment, BlockHeader, TestableBlock, Transaction}, BlockPayload, }, }; diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index a63207f73d..c000f7060f 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -2,7 +2,7 @@ use hotshot::traits::election::static_committee::GeneralStaticCommittee; use crate::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::TestState, + state_types::{TestInstanceState, TestValidatedState}, }; use hotshot::traits::{ @@ -40,7 +40,8 @@ impl NodeType for TestTypes { type SignatureKey = BLSPubKey; type Transaction = TestTransaction; type ElectionConfigType = StaticElectionConfig; - type StateType = TestState; + type ValidatedState = TestValidatedState; + type InstanceState = TestInstanceState; type Membership = GeneralStaticCommittee; } diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index f41497febd..340fce34d5 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -105,7 +105,7 @@ pub struct RoundResult { pub block_map: HashMap, /// state -> # entries decided on that state - pub state_map: HashMap, + pub state_map: HashMap, pub num_txns_map: HashMap, } diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index e146ffbce4..2153573d63 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -4,8 +4,8 @@ use commit::{Commitment, Committable}; use hotshot_types::{ data::{fake_commitment, BlockError, ViewNumber}, traits::{ - state::{ConsensusTime, TestableState}, - BlockPayload, State, + states::{ConsensusTime, InstanceState, TestableState, ValidatedState}, + BlockPayload, }, }; @@ -18,7 +18,7 @@ pub use crate::node_types::TestTypes; /// sequencing demo entry state #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct TestState { +pub struct TestValidatedState { /// the block height block_height: u64, /// the view number @@ -27,7 +27,7 @@ pub struct TestState { prev_state_commitment: Commitment, } -impl Committable for TestState { +impl Committable for TestValidatedState { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("Test State Commit") .u64_field("block_height", self.block_height) @@ -41,7 +41,7 @@ impl Committable for TestState { } } -impl Default for TestState { +impl Default for TestValidatedState { fn default() -> Self { Self { block_height: 0, @@ -51,7 +51,7 @@ impl Default for TestState { } } -impl State for TestState { +impl ValidatedState for TestValidatedState { type Error = BlockError; type BlockHeader = TestBlockHeader; @@ -75,7 +75,7 @@ impl State for TestState { } else if self.view_number >= *view_number { return Err(BlockError::InvalidBlockHeader); } - Ok(TestState { + Ok(TestValidatedState { block_height: self.block_height + 1, view_number: *view_number, prev_state_commitment: self.commit(), @@ -94,7 +94,7 @@ impl State for TestState { fn metadata(&self) -> Self::Metadata {} } -impl TestableState for TestState { +impl TestableState for TestValidatedState { fn create_random_transaction( _state: Option<&Self>, _rng: &mut dyn rand::RngCore, @@ -105,3 +105,7 @@ impl TestableState for TestState { TestTransaction(vec![0; RANDOM_TX_BASE_SIZE + (padding as usize)]) } } + +pub struct TestInstanceState {} + +impl InstanceState for TestInstanceState {} diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 5d6aceaec5..f4314071a8 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -18,13 +18,12 @@ use hotshot_types::{ message::Proposal, simple_certificate::QuorumCertificate, traits::{ - block_contents::vid_commitment, - block_contents::BlockHeader, + block_contents::{vid_commitment, BlockHeader, TestableBlock}, consensus_api::ConsensusApi, election::Membership, node_implementation::NodeType, - state::{ConsensusTime, TestableBlock}, - BlockPayload, State, + states::{ConsensusTime, ValidatedState}, + BlockPayload, }, vote::HasViewNumber, }; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 776f9268e5..3466f6d8ed 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -17,7 +17,7 @@ use hotshot_task::{ use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::ConsensusMetricsValue, - traits::{election::Membership, node_implementation::NodeType, state::ConsensusTime}, + traits::{election::Membership, node_implementation::NodeType, states::ConsensusTime}, HotShotConfig, ValidatorConfig, }; use std::{ diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs index a10bee76ed..0071621978 100644 --- a/testing/tests/atomic_storage.rs +++ b/testing/tests/atomic_storage.rs @@ -5,9 +5,9 @@ use hotshot::{ random_quorom_certificate, random_transaction, random_validating_leaf, VDemoBlock, VDemoState, }, - traits::{BlockPayload, State, Storage}, + traits::{BlockPayload, Storage, ValidatedState}, }; -use hotshot_types::{data::ViewNumber, traits::state::TestableState}; +use hotshot_types::{data::ViewNumber, traits::statesTestableState}; use rand::thread_rng; type AtomicStorage = hotshot::traits::implementations::AtomicStorage; @@ -94,7 +94,7 @@ async fn test_happy_path_leaves() { let mut store = AtomicStorage::open(path).expect("Could not open atomic store"); // Add some leaves - let mut leaves = Vec::>::new(); + let mut leaves = Vec::>::new(); for _ in 0..10 { let leaf = random_validating_leaf(DEntryBlock { previous_block: StateHash::random(), diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 38ca3933a5..e8e2ccf36c 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -11,7 +11,7 @@ use hotshot_types::vote::Certificate; use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, - traits::{state::ConsensusTime, State}, + traits::{states::ConsensusTime, ValidatedState}, }; use hotshot_types::{ simple_vote::QuorumData, diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 3d532126b0..1edb772a09 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -9,7 +9,7 @@ use hotshot_types::{ simple_vote::{DAData, DAVote}, traits::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, - node_implementation::NodeType, state::ConsensusTime, + node_implementation::NodeType, states::ConsensusTime, }, }; use sha2::{Digest, Sha256}; diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index f5b277fce6..43f428455b 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -8,9 +8,10 @@ use hotshot::traits::implementations::{ }; use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; +use hotshot_testing::state_types::TestInstanceState; use hotshot_testing::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::TestState, + state_types::TestValidatedState, }; use hotshot_types::message::Message; use hotshot_types::signature_key::BLSPubKey; @@ -20,7 +21,7 @@ use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType}; use hotshot_types::{ data::ViewNumber, message::{DataMessage, MessageKind}, - traits::state::ConsensusTime, + traits::states::ConsensusTime, }; use rand::rngs::StdRng; use rand::{RngCore, SeedableRng}; @@ -50,7 +51,8 @@ impl NodeType for Test { type SignatureKey = BLSPubKey; type Transaction = TestTransaction; type ElectionConfigType = StaticElectionConfig; - type StateType = TestState; + type ValidatedState = TestValidatedState; + type InstanceState = TestInstanceState; type Membership = GeneralStaticCommittee; } diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index ce8cd3a46f..09b9504e8a 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -6,7 +6,7 @@ use hotshot_testing::{ }; use hotshot_types::{ data::{DAProposal, VidSchemeTrait, ViewNumber}, - traits::{consensus_api::ConsensusApi, state::ConsensusTime}, + traits::{consensus_api::ConsensusApi, states::ConsensusTime}, }; use sha2::{Digest, Sha256}; use std::{collections::HashMap, marker::PhantomData}; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index a57d20412b..7a1206380e 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -8,7 +8,7 @@ use hotshot_testing::{ use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, - traits::{consensus_api::ConsensusApi, state::ConsensusTime}, + traits::{consensus_api::ConsensusApi, states::ConsensusTime}, }; use std::collections::HashMap; use std::marker::PhantomData; diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 2a0f5c94df..11f72f22a8 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,7 +1,7 @@ use hotshot::HotShotConsensusApi; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; -use hotshot_types::{data::ViewNumber, traits::state::ConsensusTime}; +use hotshot_types::{data::ViewNumber, traits::states::ConsensusTime}; use std::collections::HashMap; #[cfg(test)] diff --git a/types/src/data.rs b/types/src/data.rs index c24ecc4670..cc1dcc9716 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -6,14 +6,13 @@ use crate::{ simple_certificate::{QuorumCertificate, TimeoutCertificate}, traits::{ - block_contents::vid_commitment, - block_contents::BlockHeader, + block_contents::{vid_commitment, BlockHeader, TestableBlock}, election::Membership, node_implementation::NodeType, signature_key::SignatureKey, - state::{ConsensusTime, TestableBlock, TestableState}, + states::{ConsensusTime, TestableState, ValidatedState}, storage::StoredView, - BlockPayload, State, + BlockPayload, }, vote::{Certificate, HasViewNumber}, }; @@ -105,9 +104,10 @@ impl std::ops::Sub for ViewNumber { } } -/// The `Transaction` type associated with a `State`, as a syntactic shortcut -pub type Transaction = <::BlockPayload as BlockPayload>::Transaction; -/// `Commitment` to the `Transaction` type associated with a `State`, as a syntactic shortcut +/// The `Transaction` type associated with a `ValidatedState`, as a syntactic shortcut +pub type Transaction = + <::BlockPayload as BlockPayload>::Transaction; +/// `Commitment` to the `Transaction` type associated with a `ValidatedState`, as a syntactic shortcut pub type TxnCommitment = Commitment>; /// A proposal to start providing data availability for a block. @@ -307,7 +307,7 @@ pub struct Leaf { pub block_payload: Option, /// State. - pub state: TYPES::StateType, + pub state: TYPES::ValidatedState, /// Transactions that were marked for rejection while collecting the block. pub rejected: Vec<::Transaction>, @@ -361,7 +361,7 @@ impl Leaf { parent_commitment: fake_commitment(), block_header: block_header.clone(), block_payload: Some(block_payload), - state: ::initialize(&block_header), + state: ::initialize(&block_header), rejected: Vec::new(), proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), } @@ -430,7 +430,7 @@ impl Leaf { self.get_block_header().payload_commitment() } /// The blockchain state after appending this leaf. - pub fn get_state(&self) -> TYPES::StateType { + pub fn get_state(&self) -> TYPES::ValidatedState { self.state.clone() } /// Transactions rejected or invalidated by the application of this leaf. @@ -458,7 +458,7 @@ impl Leaf { impl TestableLeaf for Leaf where - TYPES::StateType: TestableState, + TYPES::ValidatedState: TestableState, TYPES::BlockPayload: TestableBlock, { type NodeType = TYPES; @@ -468,7 +468,7 @@ where rng: &mut dyn rand::RngCore, padding: u64, ) -> <::BlockPayload as BlockPayload>::Transaction { - TYPES::StateType::create_random_transaction(None, rng, padding) + TYPES::ValidatedState::create_random_transaction(None, rng, padding) } } /// Fake the thing a genesis block points to. Needed to avoid infinite recursion diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index ab1e721a0d..2bcbdf74b7 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -17,7 +17,7 @@ use crate::{ }, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, - state::ConsensusTime, + states::ConsensusTime, }, vote::{Certificate, HasViewNumber}, }; diff --git a/types/src/traits.rs b/types/src/traits.rs index 9c25e5fcb8..a698d2c158 100644 --- a/types/src/traits.rs +++ b/types/src/traits.rs @@ -8,8 +8,8 @@ pub mod node_implementation; pub mod qc; pub mod signature_key; pub mod stake_table; -pub mod state; +pub mod states; pub mod storage; pub use block_contents::BlockPayload; -pub use state::State; +pub use states::ValidatedState; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 9bac9b6d7f..de0b496964 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -76,6 +76,15 @@ pub trait BlockPayload: ) -> Vec>; } +/// extra functions required on block to be usable by hotshot-testing +pub trait TestableBlock: BlockPayload + Debug { + /// generate a genesis block + fn genesis() -> Self; + + /// the number of transactions in this block + fn txn_count(&self) -> u64; +} + /// Compute the VID payload commitment. /// # Panics /// If the VID computation fails. diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index b886684d7d..123f2b3a58 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -4,12 +4,12 @@ //! describing the overall behavior of a node, as a composition of implementations of the node trait. use super::{ - block_contents::{BlockHeader, Transaction}, + block_contents::{BlockHeader, TestableBlock, Transaction}, election::ElectionConfig, network::{CommunicationChannel, TestableNetworkingImplementation}, - state::{ConsensusTime, TestableBlock, TestableState}, + states::{ConsensusTime, InstanceState, TestableState}, storage::{StorageError, StorageState, TestableStorage}, - State, + ValidatedState, }; use crate::{ data::{Leaf, TestableLeaf}, @@ -138,7 +138,7 @@ pub trait TestableNodeImplementation: NodeImplementation /// otherwise panics /// `padding` is the bytes of padding to add to the transaction fn state_create_random_transaction( - state: Option<&TYPES::StateType>, + state: Option<&TYPES::ValidatedState>, rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction; @@ -178,7 +178,7 @@ pub trait TestableNodeImplementation: NodeImplementation #[async_trait] impl> TestableNodeImplementation for I where - TYPES::StateType: TestableState, + TYPES::ValidatedState: TestableState, TYPES::BlockPayload: TestableBlock, I::Storage: TestableStorage, I::QuorumNetwork: TestableChannelImplementation, @@ -196,11 +196,11 @@ where } fn state_create_random_transaction( - state: Option<&TYPES::StateType>, + state: Option<&TYPES::ValidatedState>, rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { - ::create_random_transaction(state, rng, padding) + ::create_random_transaction(state, rng, padding) } fn leaf_create_random_transaction( @@ -279,13 +279,13 @@ pub trait NodeType: { /// The time type that this hotshot setup is using. /// - /// This should be the same `Time` that `StateType::Time` is using. + /// This should be the same `Time` that `ValidatedState::Time` is using. type Time: ConsensusTime; /// The block header type that this hotshot setup is using. type BlockHeader: BlockHeader; /// The block type that this hotshot setup is using. /// - /// This should be the same block that `StateType::BlockPayload` is using. + /// This should be the same block that `ValidatedState::BlockPayload` is using. type BlockPayload: BlockPayload; /// The signature key that this hotshot setup is using. type SignatureKey: SignatureKey; @@ -296,13 +296,16 @@ pub trait NodeType: /// The election config type that this hotshot setup is using. type ElectionConfigType: ElectionConfig; - /// The state type that this hotshot setup is using. - type StateType: State< + /// The validated state type that this hotshot setup is using. + type ValidatedState: ValidatedState< BlockHeader = Self::BlockHeader, BlockPayload = Self::BlockPayload, Time = Self::Time, >; + /// The instance-level state type that this hotshot setup is using. + type InstanceState: InstanceState; + /// Membership used for this implementation type Membership: Membership; } diff --git a/types/src/traits/state.rs b/types/src/traits/states.rs similarity index 89% rename from types/src/traits/state.rs rename to types/src/traits/states.rs index e55971a455..5350849ab6 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/states.rs @@ -14,7 +14,7 @@ use std::{ ops::{Deref, Sub}, }; -use super::block_contents::BlockHeader; +use super::block_contents::{BlockHeader, TestableBlock}; /// Abstraction over the state that blocks modify /// @@ -24,7 +24,7 @@ use super::block_contents::BlockHeader; /// * The ability to validate that a block header is actually a valid extension of this state and /// produce a new state, with the modifications from the block applied /// ([`validate_and_apply_header`](State::validate_and_apply_header)) -pub trait State: +pub trait ValidatedState: Serialize + DeserializeOwned + Clone @@ -74,6 +74,21 @@ pub trait State: fn metadata(&self) -> Self::Metadata; } +/// extra functions required on state to be usable by hotshot-testing +pub trait TestableState: ValidatedState +where + ::BlockPayload: TestableBlock, +{ + /// Creates random transaction if possible + /// otherwise panics + /// `padding` is the bytes of padding to add to the transaction + fn create_random_transaction( + state: Option<&Self>, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction; +} + // TODO Seuqnecing here means involving DA in consensus /// Trait for time compatibility needed for reward collection @@ -106,26 +121,5 @@ pub trait ConsensusTime: fn get_u64(&self) -> u64; } -/// extra functions required on state to be usable by hotshot-testing -pub trait TestableState: State -where - ::BlockPayload: TestableBlock, -{ - /// Creates random transaction if possible - /// otherwise panics - /// `padding` is the bytes of padding to add to the transaction - fn create_random_transaction( - state: Option<&Self>, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> ::Transaction; -} - -/// extra functions required on block to be usable by hotshot-testing -pub trait TestableBlock: BlockPayload + Debug { - /// generate a genesis block - fn genesis() -> Self; - - /// the number of transactions in this block - fn txn_count(&self) -> u64; -} +/// Instance-level state, which allows us to fetch missing validated state. +pub trait InstanceState {} diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index f189eca84c..3401583dab 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -131,7 +131,7 @@ pub struct StoredView { /// It may be empty for nodes not in the DA committee. pub block_payload: Option, /// State. - pub state: TYPES::StateType, + pub state: TYPES::ValidatedState, /// transactions rejected in this view pub rejected: Vec, /// the proposer id @@ -152,7 +152,7 @@ where block_header: TYPES::BlockHeader, block_payload: Option, parent_commitment: Commitment>, - state: TYPES::StateType, + state: TYPES::ValidatedState, rejected: Vec<::Transaction>, proposer_id: TYPES::SignatureKey, ) -> Self { diff --git a/types/src/utils.rs b/types/src/utils.rs index 9e1e4ff733..e23cd85129 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -2,7 +2,7 @@ use crate::{ data::{Leaf, VidCommitment}, - traits::{node_implementation::NodeType, State}, + traits::{node_implementation::NodeType, ValidatedState}, }; use commit::Commitment; use std::ops::Deref; @@ -24,7 +24,7 @@ pub enum ViewInner { /// Proposed leaf leaf: Commitment>, /// Application-specific data. - metadata: ::Metadata, + metadata: ::Metadata, }, /// Leaf has failed Failed, From d154d136393309816e872d39ea4b1dc5b55a73b2 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 16 Jan 2024 07:09:55 -0800 Subject: [PATCH 0677/1393] Add instance to validated state --- testing/src/state_types.rs | 2 ++ types/src/traits/states.rs | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 2153573d63..f8909efbc1 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -54,6 +54,8 @@ impl Default for TestValidatedState { impl ValidatedState for TestValidatedState { type Error = BlockError; + type Instance = TestInstanceState; + type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 5350849ab6..f4c410b45c 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -5,6 +5,7 @@ use crate::traits::BlockPayload; use commit::Committable; +use jf_plonk::proof_system::batch_arg::Instance; use serde::{de::DeserializeOwned, Serialize}; use std::{ error::Error, @@ -39,6 +40,8 @@ pub trait ValidatedState: { /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; + /// The type of the instance-level state this state is assocaited with + type Instance: InstanceState; /// The type of block header this state is associated with type BlockHeader: BlockHeader; /// The type of block payload this state is associated with @@ -51,12 +54,16 @@ pub trait ValidatedState: /// Check if the proposed block header is valid and apply it to the state if so. /// /// Returns the new state. + /// + /// # Arguments + /// * `instance` - Immutable instance-level state. /// /// # Errors /// /// If the block header is invalid or appending it would lead to an invalid state. fn validate_and_apply_header( &self, + instance: &Self::Instance, proposed_header: &Self::BlockHeader, parent_header: &Self::BlockHeader, view_number: &Self::Time, From fe87a23142cbaf9c06997b274f0ae3a11153f69b Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 12:01:51 -0500 Subject: [PATCH 0678/1393] check liveness checkeven if we cannot find the parent in storage --- task-impls/src/consensus.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 05e7b9bf2f..263960ab00 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -539,6 +539,18 @@ impl, A: ConsensusApi + ); consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + // If we are missing the parent from storage, the safety check will fail. But we can + // still vote if the liveness check succeeds. + let liveness_check = justify_qc.get_view_number() > consensus.locked_view; + + drop(consensus); + + if liveness_check { + if self.vote_if_able().await { + self.current_proposal = None; + } + } + return; }; let parent_commitment = parent.commit(); From 9e7a5d60506d33f86891576a098e441386f71551 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 12:07:53 -0500 Subject: [PATCH 0679/1393] Fix clippy warning --- libp2p-networking/src/network/behaviours/dht/cache.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/libp2p-networking/src/network/behaviours/dht/cache.rs b/libp2p-networking/src/network/behaviours/dht/cache.rs index 602bb41e16..dceb36d3d8 100644 --- a/libp2p-networking/src/network/behaviours/dht/cache.rs +++ b/libp2p-networking/src/network/behaviours/dht/cache.rs @@ -65,6 +65,7 @@ pub struct Cache { config: Config, /// the cache for records (key -> value) + #[allow(clippy::struct_field_names)] cache: Arc, Vec>>, /// the expiries for the dht cache, in order (expiry time -> key) expiries: Arc>>>, From 987c98fcb5d58b29b55343881393816cda2fc1da Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 16 Jan 2024 12:32:48 -0500 Subject: [PATCH 0680/1393] more comments added --- testing/src/task_helpers.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index da473389f2..0b1604faf3 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -195,23 +195,25 @@ async fn build_quorum_proposal_and_signature( QuorumProposal, ::PureAssembledSignatureType, ) { - let temp_consensus = handle.get_consensus(); - let cur_consensus = temp_consensus.upgradable_read().await; + // build the genesis view + let genesis_consensus = handle.get_consensus(); + let cur_consensus = genesis_consensus.upgradable_read().await; let mut consensus = RwLockUpgradableReadGuard::upgrade(cur_consensus).await; let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; + // parent_view_number should be equal to 0 let parent_view_number = &consensus.high_qc.get_view_number(); let Some(parent_view) = consensus.state_map.get(parent_view_number) else { panic!("Couldn't find high QC parent in state map."); }; - let Some(leaf) = parent_view.get_leaf_commitment() else { + let Some(leaf_view_0) = parent_view.get_leaf_commitment() else { panic!("Parent of high QC points to a view without a proposal"); }; - let Some(leaf) = consensus.saved_leaves.get(&leaf) else { + let Some(leaf_view_0) = consensus.saved_leaves.get(&leaf_view_0) else { panic!("Failed to find high QC parent."); }; - let parent_leaf = leaf.clone(); + let parent_leaf = leaf_view_0.clone(); // every event input is seen on the event stream in the output. let block = ::genesis(); From bcd9d3f5a7b5accc588ee13bacd952f96f5c5ceb Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 16 Jan 2024 12:40:13 -0500 Subject: [PATCH 0681/1393] lint --- hotshot/src/types/handle.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 39fef671e1..52f3946ecb 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -27,9 +27,7 @@ use std::sync::Arc; use tracing::error; #[cfg(feature = "hotshot-testing")] -use hotshot_types::{ - message::{MessageKind, SequencingMessage}, -}; +use hotshot_types::message::{MessageKind, SequencingMessage}; /// Event streaming handle for a [`SystemContext`] instance running in the background /// From 1b071ba5b42f6cb0a83574b7f5b4e3b80d490a05 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 13:32:26 -0500 Subject: [PATCH 0682/1393] Fix clippy lints --- hotshot/examples/infra/mod.rs | 2 +- hotshot/src/traits/networking.rs | 3 +-- hotshot/src/traits/networking/combined_network.rs | 1 + libp2p-networking/src/network/node.rs | 4 +--- libp2p-networking/tests/common/mod.rs | 2 +- libp2p-networking/tests/counter.rs | 1 + task-impls/src/consensus.rs | 12 ++++++------ 7 files changed, 12 insertions(+), 13 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index ad3407b951..4d58de6f57 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -413,7 +413,7 @@ pub trait RunDA< block_size, } => { // this might be a obob - if let Some(leaf) = leaf_chain.get(0) { + if let Some(leaf) = leaf_chain.first() { info!("Decide event for leaf: {}", *leaf.view_number); let new_anchor = leaf.view_number; diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index dfc2100b74..f372f1c111 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -17,8 +17,7 @@ use std::{ use custom_debug::Debug; use hotshot_types::traits::metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}; pub use hotshot_types::traits::network::{ - ChannelSendSnafu, CouldNotDeliverSnafu, FailedToDeserializeSnafu, FailedToSerializeSnafu, - NetworkError, NetworkReliability, NoSuchNodeSnafu, ShutDownSnafu, + FailedToSerializeSnafu, NetworkError, NetworkReliability, }; /// Contains several `NetworkingMetrics` that we're interested in from the networking interfaces diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 301f2ca89c..eec9e6c479 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -44,6 +44,7 @@ struct Cache { /// The maximum number of items to store in the cache capacity: usize, /// The cache itself + #[allow(clippy::struct_field_names)] cache: HashSet, /// The hashes of the messages in the cache, in order of insertion hashes: Vec, diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index e4c44ca401..c42530a562 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -5,9 +5,7 @@ pub use self::{ config::{ MeshParams, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, }, - handle::{ - network_node_handle_error, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, - }, + handle::{network_node_handle_error, NetworkNodeHandle, NetworkNodeHandleError}, }; use super::{ diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index ca9e967319..63da978150 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -189,7 +189,7 @@ pub async fn spin_up_swarms( .map(|(a, b)| (Some(*a), b.clone())) .collect::>() ); - + #[allow(clippy::unused_enumerate_index)] for (_idx, handle) in handles[0..num_of_nodes].iter().enumerate() { let to_share = bootstrap_addrs.clone(); handle diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index bb803cee02..cbda5c44c1 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -371,6 +371,7 @@ async fn run_request_response_increment_all( requestee_handle.modify_state(|s| *s += 1).await; info!("RR REQUESTEE IS {:?}", requestee_handle.peer_id()); let mut futs = Vec::new(); + #[allow(clippy::unused_enumerate_index)] for (_i, h) in handles.iter().enumerate() { if h.lookup_pid(requestee_handle.peer_id()).await.is_err() { error!("ERROR LOOKING UP REQUESTEE ADDRS"); diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 263960ab00..8b1542bab6 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -509,10 +509,8 @@ impl, A: ConsensusApi + .cloned() }; - // // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { - // If no parent then just update our state map and return. We will not vote. error!( "Proposal's parent missing from storage with commitment: {:?}", justify_qc.get_data().leaf_commit @@ -543,12 +541,14 @@ impl, A: ConsensusApi + // still vote if the liveness check succeeds. let liveness_check = justify_qc.get_view_number() > consensus.locked_view; + if justify_qc.get_view_number() > consensus.high_qc.view_number { + consensus.high_qc = justify_qc; + } + drop(consensus); - if liveness_check { - if self.vote_if_able().await { - self.current_proposal = None; - } + if liveness_check && self.vote_if_able().await { + self.current_proposal = None; } return; From 427ef9c9b9da5d294efeef768021cf80c3f36644 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 13:45:41 -0500 Subject: [PATCH 0683/1393] Update where current_proposal is set --- task-impls/src/consensus.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 8b1542bab6..1fdd5251a6 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -495,8 +495,6 @@ impl, A: ConsensusApi + // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here self.update_view(view).await; - self.current_proposal = Some(proposal.data.clone()); - let consensus = self.consensus.upgradable_read().await; // Construct the leaf. @@ -519,7 +517,7 @@ impl, A: ConsensusApi + view_number: view, justify_qc: justify_qc.clone(), parent_commitment: justify_qc.get_data().leaf_commit, - block_header: proposal.data.block_header, + block_header: proposal.data.block_header.clone(), block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), @@ -547,8 +545,12 @@ impl, A: ConsensusApi + drop(consensus); - if liveness_check && self.vote_if_able().await { - self.current_proposal = None; + if liveness_check { + self.current_proposal = Some(proposal.data.clone()); + + if self.vote_if_able().await { + self.current_proposal = None; + } } return; @@ -605,6 +607,8 @@ impl, A: ConsensusApi + return; } + self.current_proposal = Some(proposal.data.clone()); + let high_qc = leaf.justify_qc.clone(); let mut new_anchor_view = consensus.last_decided_view; let mut new_locked_view = consensus.locked_view; From 3f6a14e65307c4569bb379960616c2617e08d0ab Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 16 Jan 2024 10:48:50 -0800 Subject: [PATCH 0684/1393] Update error handling --- task-impls/src/consensus.rs | 1 - types/src/consensus.rs | 9 +++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a145438f60..f20381f1ac 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -559,7 +559,6 @@ impl, A: ConsensusApi + let leaf = Leaf { view_number: view, justify_qc: justify_qc.clone(), - // TODO (Keyao) Use info from justify QC or the parent leaf? parent_commitment: justify_qc.get_data().leaf_commit, block_header: proposal.data.block_header, state, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index eca478d53c..69831a7051 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -336,13 +336,14 @@ impl Consensus { /// Get the leaf corresponding to the given view. /// /// # Errors - /// If the view doesn't exist in the saved leaves set, due to the garbage collection for views - /// that are older than the last decided view. + /// If the view doesn't exist in the state map, due to the garbage collection for views older + /// older than the last decided view. /// /// # Panics - /// If the last decided view does not exist in the state map, which should never happen. + /// If the last decided view exists in the state map but not in the saved leaves, which should + /// never happen. pub fn get_leaf(&self, view: TYPES::Time) -> Result, HotShotError> { - let view = self.state_map.get(&view).unwrap(); + let view = self.state_map.get(&view).unwrap_or_else(||{return }); let leaf = view .get_leaf_commitment() .expect("Decided state not found! Consensus internally inconsistent"); From dfba486f772f98e23d330dd0da7f9a563ade42ad Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 13:55:04 -0500 Subject: [PATCH 0685/1393] Add clone --- task-impls/src/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 1fdd5251a6..12fabb5764 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -560,7 +560,7 @@ impl, A: ConsensusApi + view_number: view, justify_qc: justify_qc.clone(), parent_commitment, - block_header: proposal.data.block_header, + block_header: proposal.data.block_header.clone(), block_payload: None, rejected: Vec::new(), timestamp: time::OffsetDateTime::now_utc().unix_timestamp_nanos(), From 940956ed2200c70d3df9a34f2ea2e2ba5227188c Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 13:58:07 -0500 Subject: [PATCH 0686/1393] Add should_propose logic --- task-impls/src/consensus.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 12fabb5764..ed99c5b9ea 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -543,6 +543,15 @@ impl, A: ConsensusApi + consensus.high_qc = justify_qc; } + let new_view = self.current_proposal.clone().unwrap().view_number + 1; + + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = self.quorum_membership.get_leader(new_view) + == self.public_key + && consensus.high_qc.view_number + == self.current_proposal.clone().unwrap().view_number; + let qc = consensus.high_qc.clone(); + drop(consensus); if liveness_check { @@ -553,6 +562,15 @@ impl, A: ConsensusApi + } } + if should_propose { + debug!( + "Attempting to publish proposal after voting; now in view: {}", + *new_view + ); + self.publish_proposal_if_able(qc.view_number + 1, None) + .await; + } + return; }; let parent_commitment = parent.commit(); From 5a64c73d10989dc6ce24cda50ab6c558729748a0 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 14:12:59 -0500 Subject: [PATCH 0687/1393] fix should_propose logic --- task-impls/src/consensus.rs | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ed99c5b9ea..ea3207760e 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -543,34 +543,33 @@ impl, A: ConsensusApi + consensus.high_qc = justify_qc; } - let new_view = self.current_proposal.clone().unwrap().view_number + 1; - - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = self.quorum_membership.get_leader(new_view) - == self.public_key - && consensus.high_qc.view_number - == self.current_proposal.clone().unwrap().view_number; - let qc = consensus.high_qc.clone(); + let high_qc = consensus.high_qc.clone(); drop(consensus); if liveness_check { self.current_proposal = Some(proposal.data.clone()); + let new_view = proposal.data.view_number + 1; + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = self.quorum_membership.get_leader(new_view) + == self.public_key + && high_qc.view_number + == self.current_proposal.clone().unwrap().view_number; + let qc = high_qc.clone(); + if should_propose { + debug!( + "Attempting to publish proposal after voting; now in view: {}", + *new_view + ); + self.publish_proposal_if_able(qc.view_number + 1, None) + .await; + } if self.vote_if_able().await { self.current_proposal = None; } } - if should_propose { - debug!( - "Attempting to publish proposal after voting; now in view: {}", - *new_view - ); - self.publish_proposal_if_able(qc.view_number + 1, None) - .await; - } - return; }; let parent_commitment = parent.commit(); From d43400875800605fca461a639c66f3824eabba25 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 14:33:46 -0500 Subject: [PATCH 0688/1393] Update high_qc no matter if we vote or not --- task-impls/src/consensus.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ea3207760e..0988a2a8ff 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -4,7 +4,7 @@ use crate::{ vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use async_lock::{RwLock, RwLockUpgradableReadGuard}; +use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use commit::Committable; @@ -495,7 +495,11 @@ impl, A: ConsensusApi + // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here self.update_view(view).await; - let consensus = self.consensus.upgradable_read().await; + let mut consensus = self.consensus.write().await; + + if justify_qc.get_view_number() > consensus.high_qc.view_number { + consensus.high_qc = justify_qc.clone(); + } // Construct the leaf. let parent = if justify_qc.is_genesis { @@ -524,7 +528,6 @@ impl, A: ConsensusApi + proposer_id: sender.to_bytes(), }; - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; consensus.state_map.insert( view, View { @@ -539,10 +542,6 @@ impl, A: ConsensusApi + // still vote if the liveness check succeeds. let liveness_check = justify_qc.get_view_number() > consensus.locked_view; - if justify_qc.get_view_number() > consensus.high_qc.view_number { - consensus.high_qc = justify_qc; - } - let high_qc = consensus.high_qc.clone(); drop(consensus); @@ -713,7 +712,6 @@ impl, A: ConsensusApi + }; // promote lock here to add proposal to statemap - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; if high_qc.view_number > consensus.high_qc.view_number { consensus.high_qc = high_qc; } From d808bcb5987657ba8636da166c8b3f7cf00e8e95 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 14:43:03 -0500 Subject: [PATCH 0689/1393] remove redundant high_qc check --- task-impls/src/consensus.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 0988a2a8ff..47a9d1ed3d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -711,10 +711,6 @@ impl, A: ConsensusApi + HashSet::new() }; - // promote lock here to add proposal to statemap - if high_qc.view_number > consensus.high_qc.view_number { - consensus.high_qc = high_qc; - } consensus.state_map.insert( view, View { From 8b29ed3ebac25d04842ce5fe5eec21f01ac5bce2 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 14:45:17 -0500 Subject: [PATCH 0690/1393] remove redundant high_qc check --- task-impls/src/consensus.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 47a9d1ed3d..7d0f62577d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -625,7 +625,6 @@ impl, A: ConsensusApi + self.current_proposal = Some(proposal.data.clone()); - let high_qc = leaf.justify_qc.clone(); let mut new_anchor_view = consensus.last_decided_view; let mut new_locked_view = consensus.locked_view; let mut last_view_number_visited = view; From 2710e601aceb43e941b732ac358159da06ae6b03 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 16 Jan 2024 15:31:09 -0500 Subject: [PATCH 0691/1393] more comments --- testing/src/task_helpers.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 1a034b1e19..7fbe21cf64 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -251,6 +251,7 @@ async fn build_quorum_proposal_and_signature( // Only view 2 is tested, higher views are not tested for cur_view in 2..=view { // save states for the previous view to pass all the qc checks + // In the long term, we want to get rid of this, do not manually update consensus state consensus.state_map.insert( ViewNumber::new(cur_view - 1), View { From f9545538565b3c2187430daf5a960d3caa9b30d8 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 15:45:52 -0500 Subject: [PATCH 0692/1393] Fix deadlock --- task-impls/src/consensus.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7d0f62577d..0ea05a593d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -4,7 +4,7 @@ use crate::{ vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use async_lock::RwLock; +use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use commit::Committable; @@ -495,11 +495,7 @@ impl, A: ConsensusApi + // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here self.update_view(view).await; - let mut consensus = self.consensus.write().await; - - if justify_qc.get_view_number() > consensus.high_qc.view_number { - consensus.high_qc = justify_qc.clone(); - } + let consensus = self.consensus.upgradable_read().await; // Construct the leaf. let parent = if justify_qc.is_genesis { @@ -511,6 +507,13 @@ impl, A: ConsensusApi + .cloned() }; + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + + if justify_qc.get_view_number() > consensus.high_qc.view_number { + debug!("Updating high QC"); + consensus.high_qc = justify_qc.clone(); + } + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { error!( From e6544620d97217d41e9ff5989631470e7bf1b706 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:02:35 -0500 Subject: [PATCH 0693/1393] Add logging --- task-impls/src/consensus.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 0ea05a593d..bcd1165e91 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -571,6 +571,7 @@ impl, A: ConsensusApi + self.current_proposal = None; } } + warn!("Failed liveneess check; cannot find parent either"); return; }; From 5a191b03e8720796231af584451ba8b2b3501a9a Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:21:50 -0500 Subject: [PATCH 0694/1393] Update logs --- task-impls/src/consensus.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index bcd1165e91..24f4bf1e2c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -546,6 +546,8 @@ impl, A: ConsensusApi + let liveness_check = justify_qc.get_view_number() > consensus.locked_view; let high_qc = consensus.high_qc.clone(); + let locked_view = consensus.locked_view.clone(); + drop(consensus); @@ -571,7 +573,7 @@ impl, A: ConsensusApi + self.current_proposal = None; } } - warn!("Failed liveneess check; cannot find parent either"); + warn!("Failed liveneess check; cannot find parent either\n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", high_qc, proposal.data.clone(), locked_view); return; }; @@ -623,7 +625,7 @@ impl, A: ConsensusApi + // Skip if both saftey and liveness checks fail. if !safety_check && !liveness_check { - error!("Failed safety check and liveness check"); + error!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc, proposal.data.clone(), consensus.locked_view); return; } From e170066dad3cb3486acc764ff6066c1f6c619520 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:24:52 -0500 Subject: [PATCH 0695/1393] Fix lint --- task-impls/src/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 24f4bf1e2c..ce8b07b37c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -546,7 +546,7 @@ impl, A: ConsensusApi + let liveness_check = justify_qc.get_view_number() > consensus.locked_view; let high_qc = consensus.high_qc.clone(); - let locked_view = consensus.locked_view.clone(); + let locked_view = consensus.locked_view; drop(consensus); From 6c56263e47ef2be34fad5fbcf457c8f6f0fcb492 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 18 Jan 2024 12:14:48 -0500 Subject: [PATCH 0696/1393] Add message versioning + types for HotShot upgradability (#2393) * added upgrade message type * added ability to run arbitrary cargo commands with just * added unit tests and fixed message/certificate type * added justfile command * pass through rustfmt * update after rebasing onto main * fixed for removal of hotshot_signature_key * fix lints * fixed import * fixed upgrade message types, added upgrade event types * cleanup * switched to using common version struct * fixed test * removed empty file --- constants/Cargo.toml | 4 +- constants/src/lib.rs | 12 ++++ hotshot/src/lib.rs | 9 ++- .../src/traits/election/static_committee.rs | 4 ++ .../traits/networking/web_server_network.rs | 11 ++++ hotshot/src/traits/storage/atomic_storage.rs | 6 +- task-impls/src/events.rs | 20 +++++-- task-impls/src/network.rs | 12 +++- testing/Cargo.toml | 2 + testing/tests/memory_network.rs | 2 + testing/tests/unit.rs | 3 + testing/tests/unit/message.rs | 58 +++++++++++++++++++ types/Cargo.toml | 1 + types/src/data.rs | 20 +++++++ types/src/message.rs | 34 ++++++++++- types/src/simple_certificate.rs | 17 +++++- types/src/simple_vote.rs | 35 +++++++++++ types/src/traits/consensus_type.rs | 0 types/src/traits/election.rs | 3 + web_server/src/config.rs | 8 +++ 20 files changed, 246 insertions(+), 15 deletions(-) create mode 100644 testing/tests/unit.rs create mode 100644 testing/tests/unit/message.rs delete mode 100644 types/src/traits/consensus_type.rs diff --git a/constants/Cargo.toml b/constants/Cargo.toml index 6f04253d2e..bfaa0eb731 100644 --- a/constants/Cargo.toml +++ b/constants/Cargo.toml @@ -1,5 +1,7 @@ [package] name = "hotshot-constants" -version.workspace = true +edition = { workspace = true } +version = { workspace = true } [dependencies] +serde = { workspace = true } diff --git a/constants/src/lib.rs b/constants/src/lib.rs index d61a1a3c75..15231aaaaf 100644 --- a/constants/src/lib.rs +++ b/constants/src/lib.rs @@ -1,5 +1,7 @@ //! configurable constants for hotshot +use serde::{Deserialize, Serialize}; + /// the number of views to gather information for ahead of time pub const LOOK_AHEAD: u64 = 5; @@ -14,3 +16,13 @@ pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; /// the number of messages to send over the secondary network before re-attempting the (presumed down) primary network pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 5; + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Type for protocol version number +pub struct Version { + pub major: u16, + pub minor: u16, +} + +/// Constants for the current version number used by the program +pub const PROGRAM_PROTOCOL_VERSION: Version = Version { major: 0, minor: 1 }; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index f85af247a5..1d54d75f57 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -43,6 +43,7 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; use async_trait::async_trait; use commit::Committable; use custom_debug::Debug; +use hotshot_constants::PROGRAM_PROTOCOL_VERSION; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, task_launcher::TaskRunner, @@ -346,6 +347,7 @@ impl> SystemContext { .da_network .broadcast_message( Message { + version: PROGRAM_PROTOCOL_VERSION, sender: api.inner.public_key.clone(), kind: MessageKind::from(message), }, @@ -454,7 +456,11 @@ impl> SystemContext { .networks .quorum_network .broadcast_message( - Message { sender: pk, kind }, + Message { + version: PROGRAM_PROTOCOL_VERSION, + sender: pk, + kind, + }, // TODO this is morally wrong &inner.memberships.quorum_membership.clone(), ) @@ -484,6 +490,7 @@ impl> SystemContext { .quorum_network .direct_message( Message { + version: PROGRAM_PROTOCOL_VERSION, sender: self.inner.public_key.clone(), kind: kind.into(), }, diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 6e95823726..ccb9798fd4 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -124,6 +124,10 @@ where NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64) / 3) + 1).unwrap() } + fn upgrade_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64 * 9) / 10) + 1).unwrap() + } + fn get_committee( &self, _view_number: ::Time, diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 7d7767bdc9..a6990e894c 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -261,6 +261,7 @@ impl Inner { } MessagePurpose::DAC => config::get_da_certificate_route(view_number), MessagePurpose::VidDisperse => config::get_vid_disperse_route(view_number), // like `Proposal` + MessagePurpose::Upgrade => config::get_upgrade_route(view_number), }; if message_purpose == MessagePurpose::Data { @@ -417,6 +418,15 @@ impl Inner { MessagePurpose::Internal => { error!("Received internal message in web server network"); } + + MessagePurpose::Upgrade => { + self.broadcast_poll_queue + .write() + .await + .push(deserialized_messages[0].clone()); + + return Ok(()); + } } } Ok(None) => { @@ -646,6 +656,7 @@ impl WebServerNetwork { MessagePurpose::ViewSyncVote => config::post_view_sync_vote_route(*view_number), MessagePurpose::DAC => config::post_da_certificate_route(*view_number), MessagePurpose::VidDisperse => config::post_vid_disperse_route(*view_number), + MessagePurpose::Upgrade => config::post_upgrade_route(*view_number), }; let network_msg: SendMsg> = SendMsg { diff --git a/hotshot/src/traits/storage/atomic_storage.rs b/hotshot/src/traits/storage/atomic_storage.rs index e3903fbf32..e4c9a0ffc6 100644 --- a/hotshot/src/traits/storage/atomic_storage.rs +++ b/hotshot/src/traits/storage/atomic_storage.rs @@ -10,9 +10,9 @@ use async_trait::async_trait; use atomic_store::{AtomicStore, AtomicStoreLoader}; use commit::Commitment; use hotshot_types::traits::storage::{ - AtomicStoreSnafu, Storage, StorageError, StorageResult, StorageState, StorageUpdater, - TestableStorage, - }; + AtomicStoreSnafu, Storage, StorageError, StorageResult, StorageState, StorageUpdater, + TestableStorage, +}; use serde::{de::DeserializeOwned, Serialize}; use snafu::ResultExt; use std::{path::Path, sync::Arc}; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 9bbfc7e832..92e49d02ea 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -2,14 +2,14 @@ use crate::view_sync::ViewSyncPhase; use either::Either; use hotshot_types::{ - data::{DAProposal, Leaf, QuorumProposal, VidCommitment, VidDisperse}, + data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidCommitment, VidDisperse}, message::Proposal, simple_certificate::{ - DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCommitCertificate2, - ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, + ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DAVote, QuorumVote, TimeoutVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + DAVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{node_implementation::NodeType, BlockPayload}, @@ -111,4 +111,16 @@ pub enum HotShotEvent { /// /// Like [`HotShotEvent::DAProposalRecv`]. VidDisperseRecv(Proposal>, TYPES::SignatureKey), + /// Upgrade proposal has been received from the network + UpgradeProposalRecv(UpgradeProposal), + /// Upgrade proposal has been sent to the network + UpgradeProposalSend(UpgradeProposal), + /// Upgrade vote has been received from the network + UpgradeVoteRecv(UpgradeVote), + /// Upgrade vote has been sent to the network + UpgradeVoteSend(UpgradeVote), + /// Upgrade certificate has been received from the network + UpgradeCertificateRecv(UpgradeCertificate), + /// Upgrade certificate has been sent to the network + UpgradeCertificateSend(UpgradeCertificate), } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index cdef8af4d2..05587207a9 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -1,5 +1,6 @@ use crate::events::HotShotEvent; use either::Either::{self, Left, Right}; +use hotshot_constants::PROGRAM_PROTOCOL_VERSION; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, task::{FilterEvent, HotShotTaskCompleted, TS}, @@ -85,6 +86,15 @@ impl NetworkMessageTaskState { GeneralConsensusMessage::TimeoutVote(message) => { HotShotEvent::TimeoutVoteRecv(message) } + GeneralConsensusMessage::UpgradeCertificate(message) => { + HotShotEvent::UpgradeCertificateRecv(message) + } + GeneralConsensusMessage::UpgradeProposal(message) => { + HotShotEvent::UpgradeProposalRecv(message) + } + GeneralConsensusMessage::UpgradeVote(message) => { + HotShotEvent::UpgradeVoteRecv(message) + } GeneralConsensusMessage::InternalTrigger(_) => { error!("Got unexpected message type in network task!"); return; @@ -279,8 +289,8 @@ impl> return None; } }; - let message = Message { + version: PROGRAM_PROTOCOL_VERSION, sender, kind: message_kind, }; diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 32318f1924..f79c6210d9 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -13,12 +13,14 @@ slow-tests = [] [dependencies] async-compatibility-layer = { workspace = true } sha3 = "^0.10" +bincode = { workspace = true } commit = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", features = [ "hotshot-testing", ], default-features = false } +hotshot-constants = { path = "../constants" } hotshot-types = { path = "../types", default-features = false } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index f5b277fce6..998da85301 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -8,6 +8,7 @@ use hotshot::traits::implementations::{ }; use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; +use hotshot_constants::PROGRAM_PROTOCOL_VERSION; use hotshot_testing::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::TestState, @@ -109,6 +110,7 @@ fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec = ViewSyncCommitData { + relay: 37, + round: view_number, + }; + let simple_certificate = SimpleCertificate { + data: data.clone(), + vote_commitment: data.commit(), + view_number, + signatures: None, + is_genesis: false, + _pd: PhantomData, + }; + let message = Message { + version: version.clone(), + sender, + kind: MessageKind::Consensus(SequencingMessage(Left( + GeneralConsensusMessage::ViewSyncCommitCertificate(simple_certificate), + ))), + }; + let serialized_message: Vec = bincode::serialize(&message).unwrap(); + // The versions we've read from the message + let major_version_read = u16::from_le_bytes(serialized_message[..2].try_into().unwrap()); + let minor_version_read = u16::from_le_bytes(serialized_message[2..4].try_into().unwrap()); + + assert_eq!(version.major, major_version_read); + assert_eq!(version.minor, minor_version_read); +} diff --git a/types/Cargo.toml b/types/Cargo.toml index cb7bd033be..f899534a0e 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -30,6 +30,7 @@ either = { workspace = true, features = ["serde"] } espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } generic-array = { workspace = true } +hotshot-constants = { path = "../constants" } hotshot-task = { path = "../task", default-features = false } hotshot-utils = { path = "../utils" } jf-plonk = { workspace = true } diff --git a/types/src/data.rs b/types/src/data.rs index 0c3994d4c9..569e218f10 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -5,6 +5,7 @@ use crate::{ simple_certificate::{QuorumCertificate, TimeoutCertificate}, + simple_vote::UpgradeProposalData, traits::{ block_contents::vid_commitment, block_contents::BlockHeader, @@ -121,6 +122,19 @@ pub struct DAProposal { pub view_number: TYPES::Time, } +/// A proposal to upgrade the network +#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound = "TYPES: NodeType")] +pub struct UpgradeProposal +where + TYPES: NodeType, +{ + /// The information about which version we are upgrading to. + pub upgrade_proposal: UpgradeProposalData, + /// View this proposal applies to + pub view_number: TYPES::Time, +} + /// The VID scheme type used in `HotShot`. pub type VidScheme = jf_primitives::vid::advz::Advz; pub use jf_primitives::vid::VidScheme as VidSchemeTrait; @@ -223,6 +237,12 @@ impl HasViewNumber for QuorumProposal { } } +impl HasViewNumber for UpgradeProposal { + fn get_view_number(&self) -> TYPES::Time { + self.view_number + } +} + /// A state change encoded in a leaf. /// /// [`DeltasType`] represents a [block](NodeType::BlockPayload), but it may not contain the block in diff --git a/types/src/message.rs b/types/src/message.rs index f56116d121..337982beac 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -3,13 +3,14 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. -use crate::data::QuorumProposal; +use crate::data::{QuorumProposal, UpgradeProposal}; use crate::simple_certificate::{ - DACertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, + DACertificate, UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }; use crate::simple_vote::{ - DAVote, TimeoutVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, + DAVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + ViewSyncPreCommitVote, }; use crate::traits::signature_key::SignatureKey; use crate::vote::HasViewNumber; @@ -24,6 +25,7 @@ use crate::{ use derivative::Derivative; use either::Either::{self, Left, Right}; +use hotshot_constants::Version; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use std::{fmt::Debug, marker::PhantomData}; @@ -32,6 +34,9 @@ use std::{fmt::Debug, marker::PhantomData}; #[derive(Serialize, Deserialize, Clone, Debug, Derivative, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] pub struct Message { + /// The version of the protocol in use for this message + pub version: Version, + /// The sender of this message pub sender: TYPES::SignatureKey, @@ -78,6 +83,8 @@ pub enum MessagePurpose { Data, /// VID disperse, like [`Proposal`]. VidDisperse, + /// Message with an upgrade proposal. + Upgrade, } // TODO (da) make it more customized to the consensus layer, maybe separating the specific message @@ -184,6 +191,9 @@ impl ProcessedGeneralConsensusMessage { GeneralConsensusMessage::ViewSyncPreCommitCertificate(_) => unimplemented!(), GeneralConsensusMessage::ViewSyncCommitCertificate(_) => unimplemented!(), GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => unimplemented!(), + GeneralConsensusMessage::UpgradeCertificate(_) => unimplemented!(), + GeneralConsensusMessage::UpgradeProposal(_) => unimplemented!(), + GeneralConsensusMessage::UpgradeVote(_) => unimplemented!(), } } } @@ -295,6 +305,15 @@ pub enum GeneralConsensusMessage { /// Message with a Timeout vote TimeoutVote(TimeoutVote), + /// Message with an upgrade certificate + UpgradeCertificate(UpgradeCertificate), + + /// Message with an upgrade proposal + UpgradeProposal(UpgradeProposal), + + /// Message with an upgrade vote + UpgradeVote(UpgradeVote), + /// Internal ONLY message indicating a view interrupt. #[serde(skip)] InternalTrigger(InternalTrigger), @@ -364,6 +383,11 @@ impl SequencingMessage { GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { message.get_view_number() } + GeneralConsensusMessage::UpgradeCertificate(message) => { + message.get_view_number() + } + GeneralConsensusMessage::UpgradeProposal(message) => message.get_view_number(), + GeneralConsensusMessage::UpgradeVote(message) => message.get_view_number(), } } Right(committee_message) => { @@ -405,6 +429,10 @@ impl SequencingMessage { | GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => { MessagePurpose::ViewSyncProposal } + + GeneralConsensusMessage::UpgradeCertificate(_) + | GeneralConsensusMessage::UpgradeProposal(_) + | GeneralConsensusMessage::UpgradeVote(_) => MessagePurpose::Upgrade, }, Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index ab1e721a0d..cd609755e2 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -12,8 +12,8 @@ use ethereum_types::U256; use crate::{ data::Leaf, simple_vote::{ - DAData, QuorumData, TimeoutData, ViewSyncCommitData, ViewSyncFinalizeData, - ViewSyncPreCommitData, Voteable, + DAData, QuorumData, TimeoutData, UpgradeProposalData, ViewSyncCommitData, + ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, }, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, @@ -50,6 +50,16 @@ impl Threshold for OneHonestThreshold { } } +/// Defines a threshold which is 0.9n + 1 (i.e. over 90% of the nodes with stake) +#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] +pub struct UpgradeThreshold {} + +impl Threshold for UpgradeThreshold { + fn threshold>(membership: &MEMBERSHIP) -> u64 { + membership.upgrade_threshold().into() + } +} + /// A certificate which can be created by aggregating many simple votes on the commitment. #[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] pub struct SimpleCertificate> { @@ -164,3 +174,6 @@ pub type ViewSyncCommitCertificate2 = /// Type alias for a `ViewSyncFinalize` certificate over a view number pub type ViewSyncFinalizeCertificate2 = SimpleCertificate, SuccessThreshold>; +/// Type alias for a `UpgradeCertificate`, which is a `SimpleCertificate` of `UpgradeProposalData` +pub type UpgradeCertificate = + SimpleCertificate, UpgradeThreshold>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index b5d39e6a55..41f9e9e85d 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -3,8 +3,11 @@ use std::{fmt::Debug, hash::Hash}; use commit::{Commitment, Committable}; +use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; +use hotshot_constants::Version; + use crate::{ data::{Leaf, VidCommitment}, traits::{node_implementation::NodeType, signature_key::SignatureKey}, @@ -60,6 +63,20 @@ pub struct ViewSyncFinalizeData { /// The view number we are trying to sync on pub round: TYPES::Time, } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Upgrade vote. +pub struct UpgradeProposalData { + /// The old version that we are upgrading from. + pub old_version: Version, + /// The new version that we are upgrading to. + pub new_version: Version, + /// A unique identifier for the specific protocol being voted on. + pub new_version_hash: Vec, + /// The last block for which the old version will be in effect. + pub old_version_last_block: TYPES::Time, + /// The first block for which the new version will be in effect. + pub new_version_first_block: TYPES::Time, +} /// Marker trait for data or commitments that can be voted on. /// Only structs in this file can implement voteable. This is enforced with the `Sealed` trait @@ -166,6 +183,7 @@ impl Committable for DAData { .finalize() } } + impl Committable for VIDData { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("VID Vote") @@ -174,6 +192,21 @@ impl Committable for VIDData { } } +impl Committable for UpgradeProposalData { + fn commit(&self) -> Commitment { + let builder = commit::RawCommitmentBuilder::new("Upgrade Vote"); + builder + .u64(*self.new_version_first_block) + .u64(*self.old_version_last_block) + .var_size_bytes(self.new_version_hash.as_slice()) + .u16(self.new_version.minor) + .u16(self.new_version.major) + .u16(self.old_version.minor) + .u16(self.old_version.major) + .finalize() + } +} + /// This implements commit for all the types which contain a view and relay public key. fn view_and_relay_commit( view: TYPES::Time, @@ -221,3 +254,5 @@ pub type ViewSyncCommitVote = SimpleVote pub type ViewSyncPreCommitVote = SimpleVote>; /// View Sync Finalize Vote type alias pub type ViewSyncFinalizeVote = SimpleVote>; +/// Upgrade proposal vote +pub type UpgradeVote = SimpleVote>; diff --git a/types/src/traits/consensus_type.rs b/types/src/traits/consensus_type.rs deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index d8cfeba6e2..3eaf3f690e 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -76,4 +76,7 @@ pub trait Membership: /// Returns the threshold for a specific `Membership` implementation fn failure_threshold(&self) -> NonZeroU64; + + /// Returns the threshold required to upgrade the network protocol + fn upgrade_threshold(&self) -> NonZeroU64; } diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 43ed97bb9a..e022a4271d 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -49,6 +49,14 @@ pub fn post_vid_disperse_route(view_number: u64) -> String { format!("api/vid_disperse/{view_number}") } +pub fn get_upgrade_route(view_number: u64) -> String { + format!("api/upgrade/{view_number}") +} + +pub fn post_upgrade_route(view_number: u64) -> String { + format!("api/upgrade/{view_number}") +} + pub fn get_vid_vote_route(view_number: u64, index: u64) -> String { format!("api/vid_votes/{view_number}/{index}") } From fd565f3d971601e1cad9b01ece6a96fa5f32a643 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 18 Jan 2024 16:55:35 -0800 Subject: [PATCH 0697/1393] Remove state from leaf, add full state to view --- hotshot/src/lib.rs | 31 ++++--- hotshot/src/traits/storage/memory_storage.rs | 4 +- hotshot/src/types/handle.rs | 4 +- task-impls/src/consensus.rs | 95 ++++++-------------- testing/src/overall_safety_task.rs | 28 +----- testing/src/state_types.rs | 2 +- testing/src/task_helpers.rs | 10 +-- testing/src/test_builder.rs | 2 - testing/tests/consensus_task.rs | 10 +-- types/src/consensus.rs | 39 ++++---- types/src/data.rs | 13 +-- types/src/traits/state.rs | 12 ++- types/src/traits/storage.rs | 4 - types/src/utils.rs | 16 +++- 14 files changed, 90 insertions(+), 180 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 5e998fa548..6dc48e2802 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -223,7 +223,7 @@ impl> SystemContext { View { view_inner: ViewInner::Leaf { leaf: anchored_leaf.commit(), - metadata: anchored_leaf.get_state().metadata(), + state: TYPES::StateType::genesis(), }, }, ); @@ -359,20 +359,6 @@ impl> SystemContext { Ok(()) } - /// Returns a copy of the state - /// - /// # Panics - /// - /// Panics if internal state for consensus is inconsistent - pub async fn get_state(&self) -> TYPES::StateType { - self.inner - .consensus - .read() - .await - .get_decided_leaf() - .get_state() - } - /// Returns a copy of the consensus struct #[must_use] pub fn get_consensus(&self) -> Arc>> { @@ -381,11 +367,24 @@ impl> SystemContext { /// Returns a copy of the last decided leaf /// # Panics - /// Panics if internal state for consensus is inconsistent + /// Panics if internal leaf for consensus is inconsistent pub async fn get_decided_leaf(&self) -> Leaf { self.inner.consensus.read().await.get_decided_leaf() } + /// Returns a copy of the last decided validated state. + /// + /// # Panics + /// Panics if internal state for consensus is inconsistent + pub async fn get_decided_state(&self) -> TYPES::StateType { + self.inner + .consensus + .read() + .await + .get_decided_state() + .clone() + } + /// Initializes a new hotshot and does the work of setting up all the background tasks /// /// Assumes networking implementation is already primed. diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 39f75426d3..7b1a4670d3 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -113,13 +113,12 @@ mod test { use hotshot_testing::{ block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, node_types::TestTypes, - state_types::TestState, }; use hotshot_types::{ data::{fake_commitment, Leaf}, simple_certificate::QuorumCertificate, traits::{ - node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, State, + node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, }, }; use std::marker::PhantomData; @@ -148,7 +147,6 @@ mod test { header.clone(), Some(payload), dummy_leaf_commit, - TestState::initialize(&header), Vec::new(), <::SignatureKey as SignatureKey>::genesis_proposer_pk(), ) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 2040061489..af42672e93 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -102,8 +102,8 @@ impl + 'static> SystemContextHandl /// # Errors /// /// Returns an error if the underlying `Storage` returns an error - pub async fn get_state(&self) { - self.hotshot.get_state().await; + pub async fn get_state(&self) -> TYPES::StateType { + self.hotshot.get_decided_state().await } /// Gets most recent decided leaf diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f20381f1ac..ddc3ae1be0 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -205,28 +205,19 @@ impl, A: ConsensusApi + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { error!( - "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.get_data().leaf_commit, - proposal.view_number, - ); + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.get_data().leaf_commit, + proposal.view_number, + ); return false; }; let parent_commitment = parent.commit(); - let Ok(state) = parent.state.validate_and_apply_header( - &proposal.block_header.clone(), - &parent.block_header.clone(), - &view, - ) else { - error!("Block header doesn't extend the proposal",); - return false; - }; let leaf: Leaf<_> = Leaf { view_number: view, justify_qc: proposal.justify_qc.clone(), parent_commitment, block_header: proposal.block_header.clone(), block_payload: None, - state, rejected: Vec::new(), proposer_id: self.quorum_membership.get_leader(view), }; @@ -306,14 +297,6 @@ impl, A: ConsensusApi + return false; }; let parent_commitment = parent.commit(); - let Ok(state) = parent.state.validate_and_apply_header( - &proposal.block_header.clone(), - &parent.block_header.clone(), - &view, - ) else { - error!("Block header doesn't extend the proposal",); - return false; - }; let leaf: Leaf<_> = Leaf { view_number: view, @@ -321,7 +304,6 @@ impl, A: ConsensusApi + parent_commitment, block_header: proposal.block_header.clone(), block_payload: None, - state, rejected: Vec::new(), proposer_id: self.quorum_membership.get_leader(view), }; @@ -538,7 +520,6 @@ impl, A: ConsensusApi + .cloned() }; - // // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { // If no parent then just update our state map and return. We will not vote. @@ -546,44 +527,34 @@ impl, A: ConsensusApi + "Proposal's parent missing from storage with commitment: {:?}", justify_qc.get_data().leaf_commit ); - if let Ok(parent_leaf) = consensus.get_leaf(view - 1) { - let parent_state = parent_leaf.get_state(); - let Ok(state) = parent_state.validate_and_apply_header( - &proposal.data.block_header.clone(), - &parent_leaf.block_header.clone(), - &view, - ) else { - error!("Block header doesn't extend the proposal",); - return; - }; - let leaf = Leaf { - view_number: view, - justify_qc: justify_qc.clone(), - parent_commitment: justify_qc.get_data().leaf_commit, - block_header: proposal.data.block_header, - state, - block_payload: None, - rejected: Vec::new(), - proposer_id: sender, - }; - - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus.state_map.insert( - view, - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - metadata: leaf.get_state().metadata(), - }, + let leaf = Leaf { + view_number: view, + justify_qc: justify_qc.clone(), + parent_commitment: justify_qc.get_data().leaf_commit, + block_header: proposal.data.block_header.clone(), + block_payload: None, + rejected: Vec::new(), + proposer_id: sender, + }; + let state = + ::from_header(&proposal.data.block_header); + + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + consensus.state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state, }, - ); - consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); - } + }, + ); + consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); return; }; let parent_commitment = parent.commit(); - let Ok(state) = parent.state.validate_and_apply_header( + let Ok(state) = consensus.get_decided_state().validate_and_apply_header( &proposal.data.block_header.clone(), &parent.block_header.clone(), &view, @@ -597,7 +568,6 @@ impl, A: ConsensusApi + parent_commitment, block_header: proposal.data.block_header, block_payload: None, - state, rejected: Vec::new(), proposer_id: sender, }; @@ -740,7 +710,7 @@ impl, A: ConsensusApi + View { view_inner: ViewInner::Leaf { leaf: leaf.commit(), - metadata: leaf.get_state().metadata(), + state, }, }, ); @@ -1205,21 +1175,12 @@ impl, A: ConsensusApi + commit_and_metadata.metadata.clone(), &parent_header, ); - let Ok(state) = parent_leaf.state.validate_and_apply_header( - &block_header.clone(), - &parent_leaf.block_header.clone(), - &view, - ) else { - error!("Block header doesn't extend the proposal",); - return false; - }; let leaf = Leaf { view_number: view, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), block_payload: None, - state, rejected: vec![], proposer_id: self.api.public_key().clone(), }; diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index f41497febd..80179c27a8 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -104,9 +104,6 @@ pub struct RoundResult { /// block -> # entries decided on that block pub block_map: HashMap, - /// state -> # entries decided on that state - pub state_map: HashMap, - pub num_txns_map: HashMap, } @@ -117,7 +114,6 @@ impl Default for RoundResult { failed_nodes: Default::default(), leaf_map: Default::default(), block_map: Default::default(), - state_map: Default::default(), num_txns_map: Default::default(), status: ViewStatus::InProgress, } @@ -199,16 +195,8 @@ impl RoundResult { } } - let (state, payload_commitment) = (leaf.get_state(), leaf.get_payload_commitment()); + let payload_commitment = leaf.get_payload_commitment(); - match self.state_map.entry(state) { - std::collections::hash_map::Entry::Occupied(mut o) => { - *o.get_mut() += 1; - } - std::collections::hash_map::Entry::Vacant(v) => { - v.insert(1); - } - } match self.block_map.entry(payload_commitment) { std::collections::hash_map::Entry::Occupied(mut o) => { *o.get_mut() += 1; @@ -245,7 +233,6 @@ impl RoundResult { total_num_nodes: usize, key: Leaf, check_leaf: bool, - check_state: bool, check_block: bool, transaction_threshold: u64, ) { @@ -258,11 +245,6 @@ impl RoundResult { return; } - if check_state && self.state_map.len() != 1 { - self.status = ViewStatus::Err(OverallSafetyTaskErr::InconsistentStates); - return; - } - if check_block && self.block_map.len() != 1 { self.status = ViewStatus::Err(OverallSafetyTaskErr::InconsistentBlocks); return; @@ -288,11 +270,9 @@ impl RoundResult { // if not, return error // if neither, continue through - let state_key = key.get_state(); let block_key = key.get_payload_commitment(); if *self.block_map.get(&block_key).unwrap() == threshold - && *self.state_map.get(&state_key).unwrap() == threshold && *self.leaf_map.get(&key).unwrap() == threshold { self.status = ViewStatus::Ok; @@ -334,8 +314,6 @@ pub struct OverallSafetyPropertiesDescription { pub num_successful_views: usize, /// whether or not to check the leaf pub check_leaf: bool, - /// whether or not to check the state - pub check_state: bool, /// whether or not to check the block pub check_block: bool, /// whether or not to check that we have threshold amounts of transactions each block @@ -355,7 +333,6 @@ impl std::fmt::Debug for OverallSafetyPropertiesDescription { f.debug_struct("OverallSafetyPropertiesDescription") .field("num successful views", &self.num_successful_views) .field("check leaf", &self.check_leaf) - .field("check_state", &self.check_state) .field("check_block", &self.check_block) .field("num_failed_rounds_total", &self.num_failed_views) .finish() @@ -367,7 +344,6 @@ impl Default for OverallSafetyPropertiesDescription { Self { num_successful_views: 50, check_leaf: false, - check_state: true, check_block: true, num_failed_views: 0, transaction_threshold: 0, @@ -384,7 +360,6 @@ impl OverallSafetyPropertiesDescription { ) -> TaskGenerator> { let Self { check_leaf, - check_state, check_block, num_failed_views: num_failed_rounds_total, num_successful_views, @@ -495,7 +470,6 @@ impl OverallSafetyPropertiesDescription { state.handles.len(), key, check_leaf, - check_state, check_block, transaction_threshold, ); diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index e146ffbce4..3b32952847 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -82,7 +82,7 @@ impl State for TestState { }) } - fn initialize(block_header: &Self::BlockHeader) -> Self { + fn from_header(block_header: &Self::BlockHeader) -> Self { Self { block_height: block_header.block_number, ..Default::default() diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 5d6aceaec5..ce43d88a05 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -24,7 +24,7 @@ use hotshot_types::{ election::Membership, node_implementation::NodeType, state::{ConsensusTime, TestableBlock}, - BlockPayload, State, + BlockPayload, }, vote::HasViewNumber, }; @@ -139,20 +139,12 @@ async fn build_quorum_proposal_and_signature( ); let view_number = ViewNumber::new(view); let block_header = TestBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); - let Ok(state) = parent_leaf.state.validate_and_apply_header( - &block_header.clone(), - &parent_leaf.block_header.clone(), - &view_number, - ) else { - panic!("Block header doesn't extend the proposal",); - }; let leaf = Leaf { view_number, justify_qc: consensus.high_qc.clone(), parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), block_payload: None, - state, rejected: vec![], proposer_id: *api.public_key(), }; diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 0d0e617513..a1a50aca69 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -86,7 +86,6 @@ impl TestMetadata { overall_safety_properties: OverallSafetyPropertiesDescription { num_successful_views: 50, check_leaf: true, - check_state: true, check_block: true, num_failed_views: 15, transaction_threshold: 0, @@ -112,7 +111,6 @@ impl TestMetadata { overall_safety_properties: OverallSafetyPropertiesDescription { num_successful_views: 20, check_leaf: true, - check_state: true, check_block: true, num_failed_views: 8, transaction_threshold: 0, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 38ca3933a5..851391259b 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -11,7 +11,7 @@ use hotshot_types::vote::Certificate; use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, - traits::{state::ConsensusTime, State}, + traits::state::ConsensusTime, }; use hotshot_types::{ simple_vote::QuorumData, @@ -53,13 +53,6 @@ async fn build_vote( }; let parent_commitment = parent.commit(); - let Ok(state) = parent.state.validate_and_apply_header( - &proposal.block_header.clone(), - &parent.block_header.clone(), - &view, - ) else { - panic!("Block header doesn't extend the proposal",); - }; let leaf: Leaf<_> = Leaf { view_number: view, @@ -67,7 +60,6 @@ async fn build_vote( parent_commitment, block_header: proposal.block_header, block_payload: None, - state, rejected: Vec::new(), proposer_id: membership.get_leader(view), }; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 69831a7051..489fb7421f 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -333,35 +333,30 @@ impl Consensus { self.state_map = self.state_map.split_off(&new_anchor_view); } - /// Get the leaf corresponding to the given view. - /// - /// # Errors - /// If the view doesn't exist in the state map, due to the garbage collection for views older - /// older than the last decided view. - /// + /// Gets the last decided leaf. /// # Panics - /// If the last decided view exists in the state map but not in the saved leaves, which should - /// never happen. - pub fn get_leaf(&self, view: TYPES::Time) -> Result, HotShotError> { - let view = self.state_map.get(&view).unwrap_or_else(||{return }); + /// if the last decided view's leaf does not exist in the state map or saved leaves, which + /// should never happen. + #[must_use] + pub fn get_decided_leaf(&self) -> Leaf { + let decided_view_num = self.last_decided_view; + let view = self.state_map.get(&decided_view_num).unwrap(); let leaf = view .get_leaf_commitment() - .expect("Decided state not found! Consensus internally inconsistent"); - Ok(self.saved_leaves.get(&leaf).unwrap().clone()) + .expect("Decided leaf not found! Consensus internally inconsistent"); + self.saved_leaves.get(&leaf).unwrap().clone() } - /// Get the last decided leaf. - /// + /// Gets the last decided state. /// # Panics - /// If the last decided view does not exist in - /// * The saved leaves set, or - /// * The state map, - /// - /// Either of which should never happen. + /// if the last decided view's state does not exist in the state map or saved leaves, which + /// should never happen. #[must_use] - pub fn get_decided_leaf(&self) -> Leaf { - let last_decided_view = self.last_decided_view; - self.get_leaf(last_decided_view).unwrap() + pub fn get_decided_state(&self) -> &TYPES::StateType { + let decided_view_num = self.last_decided_view; + let view = self.state_map.get(&decided_view_num).unwrap(); + view.get_state() + .expect("Decided state not found! Consensus internally inconsistent") } } diff --git a/types/src/data.rs b/types/src/data.rs index bc19fb8dd5..37bdd1b9d6 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -326,9 +326,6 @@ pub struct Leaf { /// It may be empty for nodes not in the DA committee. pub block_payload: Option, - /// State. - pub state: TYPES::StateType, - /// Transactions that were marked for rejection while collecting the block. pub rejected: Vec<::Transaction>, @@ -342,7 +339,6 @@ impl PartialEq for Leaf { && self.justify_qc == other.justify_qc && self.parent_commitment == other.parent_commitment && self.block_header == other.block_header - && self.state == other.state && self.rejected == other.rejected } } @@ -353,7 +349,6 @@ impl Hash for Leaf { self.justify_qc.hash(state); self.parent_commitment.hash(state); self.block_header.hash(state); - self.state.hash(state); self.rejected.hash(state); } } @@ -381,7 +376,6 @@ impl Leaf { parent_commitment: fake_commitment(), block_header: block_header.clone(), block_payload: Some(block_payload), - state: ::initialize(&block_header), rejected: Vec::new(), proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), } @@ -449,10 +443,7 @@ impl Leaf { pub fn get_payload_commitment(&self) -> VidCommitment { self.get_block_header().payload_commitment() } - /// The blockchain state after appending this leaf. - pub fn get_state(&self) -> TYPES::StateType { - self.state.clone() - } + /// Transactions rejected or invalidated by the application of this leaf. pub fn get_rejected(&self) -> Vec<::Transaction> { self.rejected.clone() @@ -469,7 +460,6 @@ impl Leaf { parent_commitment: stored_view.parent, block_header: stored_view.block_header, block_payload: stored_view.block_payload, - state: stored_view.state, rejected: stored_view.rejected, proposer_id: stored_view.proposer_id, } @@ -570,7 +560,6 @@ where justify_qc: leaf.get_justify_qc(), block_header: leaf.get_block_header().clone(), block_payload: leaf.get_block_payload(), - state: leaf.get_state(), rejected: leaf.get_rejected(), proposer_id: leaf.get_proposer_id(), } diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index e55971a455..5b43dc3847 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -62,10 +62,16 @@ pub trait State: view_number: &Self::Time, ) -> Result; - /// Initialize the state with the given block header. + /// Construct the state with the given block header. /// - /// This can also be used to reinitialize the state for catchup. - fn initialize(block_header: &Self::BlockHeader) -> Self; + /// This can also be used to rebuild the state for catchup. + fn from_header(block_header: &Self::BlockHeader) -> Self; + + /// Construct a genesis state. + #[must_use] + fn genesis() -> Self { + Self::from_header(&Self::BlockHeader::genesis().0) + } /// Gets called to notify the persistence backend that this state has been committed fn on_commit(&self); diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index f189eca84c..4c6e9fe27f 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -130,8 +130,6 @@ pub struct StoredView { /// /// It may be empty for nodes not in the DA committee. pub block_payload: Option, - /// State. - pub state: TYPES::StateType, /// transactions rejected in this view pub rejected: Vec, /// the proposer id @@ -152,7 +150,6 @@ where block_header: TYPES::BlockHeader, block_payload: Option, parent_commitment: Commitment>, - state: TYPES::StateType, rejected: Vec<::Transaction>, proposer_id: TYPES::SignatureKey, ) -> Self { @@ -162,7 +159,6 @@ where justify_qc: qc, block_header, block_payload, - state, rejected, proposer_id, } diff --git a/types/src/utils.rs b/types/src/utils.rs index 9e1e4ff733..6df7749cf0 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -2,7 +2,7 @@ use crate::{ data::{Leaf, VidCommitment}, - traits::{node_implementation::NodeType, State}, + traits::node_implementation::NodeType, }; use commit::Commitment; use std::ops::Deref; @@ -23,8 +23,8 @@ pub enum ViewInner { Leaf { /// Proposed leaf leaf: Commitment>, - /// Application-specific data. - metadata: ::Metadata, + /// Validated state. + state: TYPES::StateType, }, /// Leaf has failed Failed, @@ -41,6 +41,16 @@ impl ViewInner { } } + /// return the underlying validated state if it exists + #[must_use] + pub fn get_state(&self) -> Option<&TYPES::StateType> { + if let Self::Leaf { state, .. } = self { + Some(state) + } else { + None + } + } + /// return the underlying block paylod commitment if it exists #[must_use] pub fn get_payload_commitment(&self) -> Option { From 3ca1e7c1f397062c32a3bcc0f027907cfa75c0a3 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 18 Jan 2024 17:22:49 -0800 Subject: [PATCH 0698/1393] Fix build for test after merging --- testing/src/task_helpers.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 012acbd50f..7920b937cb 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -3,6 +3,7 @@ use std::marker::PhantomData; use crate::{ block_types::{TestBlockHeader, TestBlockPayload}, node_types::{MemoryImpl, TestTypes}, + state_types::TestState, test_builder::TestMetadata, }; use commit::Committable; @@ -26,7 +27,7 @@ use hotshot_types::{ election::Membership, node_implementation::NodeType, state::{ConsensusTime, TestableBlock}, - BlockPayload, + BlockPayload, State, }, vote::HasViewNumber, }; @@ -227,7 +228,6 @@ async fn build_quorum_proposal_and_signature( .quorum_membership .total_nodes(), ); - let view_number = ViewNumber::new(view); let block_header = TestBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); // current leaf that can be re-assigned everytime when entering a new view let mut leaf = Leaf { @@ -239,6 +239,7 @@ async fn build_quorum_proposal_and_signature( rejected: vec![], proposer_id: *api.public_key(), }; + let mut parent_state = ::from_header(&parent_leaf.block_header); let mut signature = ::sign(private_key, leaf.commit().as_ref()) .expect("Failed to sign leaf commitment!"); @@ -252,6 +253,9 @@ async fn build_quorum_proposal_and_signature( // Only view 2 is tested, higher views are not tested for cur_view in 2..=view { + let state_new_view = parent_state + .validate_and_apply_header(&block_header, &block_header, &ViewNumber::new(cur_view - 1)) + .unwrap(); // save states for the previous view to pass all the qc checks // In the long term, we want to get rid of this, do not manually update consensus state consensus.state_map.insert( @@ -259,6 +263,7 @@ async fn build_quorum_proposal_and_signature( View { view_inner: ViewInner::Leaf { leaf: leaf.commit(), + state: state_new_view.clone(), }, }, ); @@ -304,6 +309,7 @@ async fn build_quorum_proposal_and_signature( proposal = proposal_new_view; signature = signature_new_view; leaf = leaf_new_view; + parent_state = state_new_view; } (proposal, signature) From 523efb72d3a1f6d746e158c6ea42591a446d183e Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 19 Jan 2024 18:03:35 +0100 Subject: [PATCH 0699/1393] [BUILDER] Add external events for builder (#2408) Added external events for `Transaction`, `DAProposal` and `QuorumProposal` --- hotshot/src/lib.rs | 61 ++++++++++++++++++++++++---------- task-impls/src/consensus.rs | 19 +++++++++-- task-impls/src/da.rs | 12 +++++++ task-impls/src/transactions.rs | 59 ++++++++++++++++++++------------ types/src/event.rs | 27 ++++++++++++++- 5 files changed, 135 insertions(+), 43 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 1d54d75f57..5003adea7b 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -43,6 +43,7 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; use async_trait::async_trait; use commit::Committable; use custom_debug::Debug; +use futures::join; use hotshot_constants::PROGRAM_PROTOCOL_VERSION; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, @@ -54,6 +55,7 @@ use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, PayloadStore, View, ViewInner, ViewQueue}, data::Leaf, error::StorageSnafu, + event::EventType, message::{ DataMessage, InternalTrigger, Message, MessageKind, ProcessedGeneralConsensusMessage, }, @@ -321,6 +323,20 @@ impl> SystemContext { }; } + /// Emit an external event + // A copypasta of `ConsensusApi::send_event` + // TODO: remove with https://github.com/EspressoSystems/HotShot/issues/2407 + async fn send_external_event(&self, event: Event) { + debug!(?event, "send_external_event"); + let mut event_sender = self.inner.event_sender.write().await; + if let Some(sender) = &*event_sender { + if let Err(e) = sender.send_async(event).await { + error!(?e, "Could not send event to event_sender"); + *event_sender = None; + } + } + } + /// Publishes a transaction asynchronously to the network /// /// # Errors @@ -335,25 +351,36 @@ impl> SystemContext { // Wrap up a message // TODO place a view number here that makes sense // we haven't worked out how this will work yet - let message = DataMessage::SubmitTransaction(transaction, TYPES::Time::new(0)); + let message = DataMessage::SubmitTransaction(transaction.clone(), TYPES::Time::new(0)); let api = self.clone(); - // TODO We should have a function that can return a network error if there is one - // but first we'd need to ensure our network implementations can support that - // (and not hang instead) + async_spawn(async move { - let _result = api - .inner - .networks - .da_network - .broadcast_message( - Message { - version: PROGRAM_PROTOCOL_VERSION, - sender: api.inner.public_key.clone(), - kind: MessageKind::from(message), - }, - &api.inner.memberships.da_membership.clone(), - ) - .await; + let da_membership = &api.inner.memberships.da_membership.clone(); + join! { + // TODO We should have a function that can return a network error if there is one + // but first we'd need to ensure our network implementations can support that + // (and not hang instead) + // + api + .inner + .networks + .da_network + .broadcast_message( + Message { + version: PROGRAM_PROTOCOL_VERSION, + sender: api.inner.public_key.clone(), + kind: MessageKind::from(message), + }, + da_membership, + ), + api + .send_external_event(Event { + view_number: api.inner.consensus.read().await.cur_view, + event: EventType::Transactions { + transactions: vec![transaction], + }, + }), + } }); Ok(()) } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9478f55f7a..1dabbe6636 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -557,10 +557,10 @@ impl, A: ConsensusApi + view_number: view, justify_qc: justify_qc.clone(), parent_commitment, - block_header: proposal.data.block_header, + block_header: proposal.data.block_header.clone(), block_payload: None, rejected: Vec::new(), - proposer_id: sender, + proposer_id: sender.clone(), }; let leaf_commitment = leaf.commit(); @@ -603,6 +603,17 @@ impl, A: ConsensusApi + return; } + // We accept the proposal, notify the application layer + self.api + .send_event(Event { + view_number: self.cur_view, + event: EventType::QuorumProposal { + proposal: proposal.clone(), + sender, + }, + }) + .await; + let high_qc = leaf.justify_qc.clone(); let mut new_anchor_view = consensus.last_decided_view; let mut new_locked_view = consensus.locked_view; @@ -1200,12 +1211,14 @@ impl, A: ConsensusApi + "Sending proposal for view {:?} \n {:?}", leaf.view_number, "" ); + self.event_stream .publish(HotShotEvent::QuorumProposalSend( - message, + message.clone(), self.public_key.clone(), )) .await; + self.payload_commitment_and_metadata = None; return true; } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 0e5bb160c1..a420a15946 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -13,6 +13,7 @@ use hotshot_task::{ use hotshot_types::{ consensus::{Consensus, View}, data::DAProposal, + event::{Event, EventType}, message::Proposal, simple_certificate::DACertificate, simple_vote::{DAData, DAVote}, @@ -141,6 +142,17 @@ impl, A: ConsensusApi + return None; } + // Proposal is fresh and valid, notify the application layer + self.api + .send_event(Event { + view_number: self.cur_view, + event: EventType::DAProposal { + proposal: proposal.clone(), + sender: sender.clone(), + }, + }) + .await; + if !self.da_membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index e162a6c26f..3fe87ab312 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -15,6 +15,7 @@ use hotshot_task::{ use hotshot_types::{ consensus::Consensus, data::Leaf, + event::{Event, EventType}, traits::{ block_contents::BlockHeader, consensus_api::ConsensusApi, @@ -92,28 +93,42 @@ impl, A: ConsensusApi + ) -> Option { match event { HotShotEvent::TransactionsRecv(transactions) => { - let consensus = self.consensus.read().await; - self.transactions - .modify(|txns| { - for transaction in transactions { - let size = bincode_opts().serialized_size(&transaction).unwrap_or(0); - - // If we didn't already know about this transaction, update our mempool metrics. - if !self.seen_transactions.remove(&transaction.commit()) - && txns.insert(transaction.commit(), transaction).is_none() - { - consensus.metrics.outstanding_transactions.update(1); - consensus - .metrics - .outstanding_transactions_memory_size - .update(i64::try_from(size).unwrap_or_else(|e| { - warn!("Conversion failed: {e}. Using the max value."); - i64::MAX - })); - } - } - }) - .await; + futures::join! { + self.api + .send_event(Event { + view_number: self.cur_view, + event: EventType::Transactions { + transactions: transactions.clone(), + }, + }), + async { + let consensus = self.consensus.read().await; + self.transactions + .modify(|txns| { + for transaction in transactions { + let size = + bincode_opts().serialized_size(&transaction).unwrap_or(0); + + // If we didn't already know about this transaction, update our mempool metrics. + if !self.seen_transactions.remove(&transaction.commit()) + && txns.insert(transaction.commit(), transaction).is_none() + { + consensus.metrics.outstanding_transactions.update(1); + consensus + .metrics + .outstanding_transactions_memory_size + .update(i64::try_from(size).unwrap_or_else(|e| { + warn!( + "Conversion failed: {e}. Using the max value." + ); + i64::MAX + })); + } + } + }) + .await; + } + }; return None; } diff --git a/types/src/event.rs b/types/src/event.rs index 666a45086d..7b4b55a754 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -1,7 +1,10 @@ //! Events that a `HotShot` instance can emit use crate::{ - data::Leaf, error::HotShotError, simple_certificate::QuorumCertificate, + data::{DAProposal, Leaf, QuorumProposal}, + error::HotShotError, + message::Proposal, + simple_certificate::QuorumCertificate, traits::node_implementation::NodeType, }; @@ -62,4 +65,26 @@ pub enum EventType { /// The view number that has just finished view_number: TYPES::Time, }, + /// New transactions were received from the network + /// or submitted to the network by us + Transactions { + /// The list of transactions + transactions: Vec, + }, + /// DA proposal was received from the network + /// or submitted to the network by us + DAProposal { + /// Contents of the proposal + proposal: Proposal>, + /// Public key of the leader submitting the proposal + sender: TYPES::SignatureKey, + }, + /// Quorum proposal was received from the network + /// or submitted to the network by us + QuorumProposal { + /// Contents of the proposal + proposal: Proposal>, + /// Public key of the leader submitting the proposal + sender: TYPES::SignatureKey, + }, } From 8162bdf231f7862b18a5bf9c501b088b9e26a6b2 Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 19 Jan 2024 14:00:59 -0500 Subject: [PATCH 0700/1393] Add documents --- hotshot-stake-table/src/mt_based/internal.rs | 14 +++++++++----- hotshot-stake-table/src/vec_based.rs | 9 +++++---- hotshot-state-prover/src/circuit.rs | 10 +++++----- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index a308d235b4..89c3719661 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -101,7 +101,7 @@ impl MerkleProof { &self.index } - /// TODO document + /// Returns the public key of the associated stake table entry, if there's any. pub fn get_key(&self) -> Option<&K> { match self.path.first() { Some(MerklePathEntry::Leaf { key, value: _ }) => Some(key), @@ -109,7 +109,7 @@ impl MerkleProof { } } - /// TODO document + /// Returns the stake amount of the associated stake table entry, if there's any. pub fn get_value(&self) -> Option<&U256> { match self.path.first() { Some(MerklePathEntry::Leaf { key: _, value }) => Some(value), @@ -117,7 +117,7 @@ impl MerkleProof { } } - /// TODO document + /// Returns the associated stake table entry, if there's any. pub fn get_key_value(&self) -> Option<(&K, &U256)> { match self.path.first() { Some(MerklePathEntry::Leaf { key, value }) => Some((key, value)), @@ -125,7 +125,9 @@ impl MerkleProof { } } - /// TODO document + /// Compute the root of this Merkle proof. + /// # Errors + /// Errors could be triggered by internal Rescue hash, or if the proof is malformed. pub fn compute_root(&self) -> Result { match self.path.first() { Some(MerklePathEntry::Leaf { key, value }) => { @@ -154,7 +156,9 @@ impl MerkleProof { } } - /// TODO document + /// Verify the Merkle proof against the provided Merkle commitment. + /// # Errors + /// Error could be triggered while computing the root of this proof, or if the verification fails. pub fn verify(&self, comm: &MerkleCommitment) -> Result<(), StakeTableError> { if self.tree_height() != comm.tree_height() || !self.compute_root()?.eq(comm.digest()) { Err(StakeTableError::VerificationError) diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 1221bbe53b..06e178f38d 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -57,13 +57,14 @@ where /// The stake table used for leader election. last_epoch_start: StakeTableSnapshot, - /// Total stakes for different versions + /// Total stakes in the most update-to-date stake table head_total_stake: U256, - /// TODO document + /// Total stakes in the snapshot version `EpochStart` epoch_start_total_stake: U256, - /// TODO document + /// Total stakes in the snapshot version `LastEpochStart` last_epoch_start_total_stake: U256, + /// Commitment of the stake table snapshot version `EpochStart` /// We only support committing the finalized versions. /// Commitment for a finalized version is a triple where /// - First item is the rescue hash of the bls keys @@ -71,7 +72,7 @@ where /// - Third item is the rescue hash of all the stake amounts epoch_start_comm: (F, F, F), - /// TODO document + /// Commitment of the stake table snapshot version `LastEpochStart` last_epoch_start_comm: (F, F, F), /// The mapping from public keys to their location in the Merkle tree. diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index 497e0bcf11..013fbbe5b2 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -151,31 +151,31 @@ impl LightClientStateVar { }) } - /// TODO doc + /// Returns the view number #[must_use] pub fn view_number(&self) -> Variable { self.vars[0] } - /// TODO doc + /// Returns the block height #[must_use] pub fn block_height(&self) -> Variable { self.vars[1] } - /// TODO doc + /// Returns the Merkle root of the block commitments #[must_use] pub fn block_comm_root(&self) -> Variable { self.vars[2] } - /// TODO doc + /// Returns the commitment of the fee ledger #[must_use] pub fn fee_ledger_comm(&self) -> Variable { self.vars[3] } - /// TODO doc + /// Returns the commitment of the associated stake table #[must_use] pub fn stake_table_comm(&self) -> StakeTableCommVar { StakeTableCommVar { From c6aecbe4d4b5fee1c589ccaccd0cdc9e20685776 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 21 Jan 2024 12:30:59 -0500 Subject: [PATCH 0701/1393] fix: ci --- hotshot/examples/infra/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index e48f1d3984..32ae61f3e0 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -267,6 +267,7 @@ async fn libp2p_network_from_config( // NOTE: this introduces an invariant that the keys are assigned using this indexed // function all_keys, + None, da_keys.clone(), da_keys.contains(&pub_key), ) From 16a7eb67cab1223d4866418455bfd86600677c50 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 21 Jan 2024 12:53:30 -0500 Subject: [PATCH 0702/1393] chore: nuking comments --- testing-macros/tests/cross/failures.rs | 69 ------ testing-macros/tests/cross/random_tests.rs | 211 ------------------ testing-macros/tests/cross/smoke.rs | 29 --- testing-macros/tests/integration/failures.rs | 25 --- .../tests/integration/random_tests.rs | 1 - testing-macros/tests/integration/smoke.rs | 39 ---- 6 files changed, 374 deletions(-) delete mode 100644 testing-macros/tests/cross/failures.rs delete mode 100644 testing-macros/tests/cross/random_tests.rs delete mode 100644 testing-macros/tests/cross/smoke.rs delete mode 100644 testing-macros/tests/integration/failures.rs delete mode 100644 testing-macros/tests/integration/random_tests.rs delete mode 100644 testing-macros/tests/integration/smoke.rs diff --git a/testing-macros/tests/cross/failures.rs b/testing-macros/tests/cross/failures.rs deleted file mode 100644 index f7dc473299..0000000000 --- a/testing-macros/tests/cross/failures.rs +++ /dev/null @@ -1,69 +0,0 @@ -// use hotshot_testing_macros::cross_all_types; -// -// // This test simulates a single permanent failed node -// cross_all_types!( -// TestName: single_permanent_failure_slow, -// TestBuilder: hotshot_testing::test_builder::TestBuilder { -// metadata: hotshot_testing::test_builder::TestMetadata { -// total_nodes: 7, -// start_nodes: 7, -// num_succeeds: 10, -// timing_data: hotshot_testing::test_builder::TimingData { -// next_view_timeout: 1000, -// ..hotshot_testing::test_builder::TimingData::default() -// }, -// failure_threshold: 20, -// ..hotshot_testing::test_builder::TestMetadata::default() -// }, -// setup: -// Some(hotshot_testing::round_builder::RoundSetupBuilder { -// scheduled_changes: vec![ -// hotshot_testing::round_builder::ChangeNode { -// idx: 5, -// view: 1, -// updown: hotshot_testing::round_builder::UpDown::Down -// }, -// ], -// ..Default::default() -// }), -// check: None -// }, -// Slow: true, -// ); -// -// // This test simulates two permanent failed nodes -// // -// // With n=7, this is the maximum failures that the network can tolerate -// cross_all_types!( -// TestName: double_permanent_failure_slow, -// TestBuilder: hotshot_testing::test_builder::TestBuilder { -// metadata: hotshot_testing::test_builder::TestMetadata { -// total_nodes: 7, -// start_nodes: 7, -// num_succeeds: 10, -// failure_threshold: 20, -// timing_data: hotshot_testing::test_builder::TimingData { -// next_view_timeout: 1000, -// ..hotshot_testing::test_builder::TimingData::default() -// }, -// ..hotshot_testing::test_builder::TestMetadata::default() -// }, -// setup: -// Some(hotshot_testing::round_builder::RoundSetupBuilder { -// scheduled_changes: vec![ -// hotshot_testing::round_builder::ChangeNode { -// idx: 5, -// view: 1, -// updown: hotshot_testing::round_builder::UpDown::Down -// }, -// hotshot_testing::round_builder::ChangeNode { -// idx: 6, -// view: 1, -// updown: hotshot_testing::round_builder::UpDown::Down }, -// ], -// ..Default::default() -// }), -// check: None -// } -// Slow: true, -// ); diff --git a/testing-macros/tests/cross/random_tests.rs b/testing-macros/tests/cross/random_tests.rs deleted file mode 100644 index 771ea61b0b..0000000000 --- a/testing-macros/tests/cross/random_tests.rs +++ /dev/null @@ -1,211 +0,0 @@ -// #[cfg(feature = "slow-tests")] -// use either::Either::Right; -// #[cfg(feature = "slow-tests")] -// use hotshot_testing::test_builder::{get_tolerance, TestMetadata}; -// use hotshot_testing_macros::cross_all_types; -// #[cfg(feature = "slow-tests")] -// use std::{collections::HashSet, iter::FromIterator}; - -// TODO these need to be fixed. But the slow runs fail anyway. We should re-enable when we decide -// to debug and fix them. - -// cross_all_types!( -// TestName: test_fail_first_node_regression_small, -// TestDescription: GeneralTestDescriptionBuilder { -// total_nodes: 10, -// start_nodes: 10, -// num_succeeds: 40, -// txn_ids: Right(30), -// ids_to_shut_down: vec![vec![0].into_iter().collect::>()], -// failure_threshold: 5, -// ..GeneralTestDescriptionBuilder::default() -// }, -// Slow: true -// ); -// -// cross_all_types!( -// TestName: test_fifty_nodes_regression, -// TestDescription: GeneralTestDescriptionBuilder { -// total_nodes: 50, -// start_nodes: 50, -// num_succeeds: 40, -// txn_ids: Right(30), -// ..GeneralTestDescriptionBuilder::default() -// }, -// Slow: true -// ); -// -// cross_all_types!( -// TestName: test_ninety_nodes_regression, -// TestDescription: GeneralTestDescriptionBuilder { -// total_nodes: 90, -// start_nodes: 90, -// num_succeeds: 40, -// txn_ids: Right(30), -// ..GeneralTestDescriptionBuilder::default() -// }, -// Slow: true -// ); -// -// cross_all_types!( -// TestName: test_large_num_txns_regression, -// TestDescription: GeneralTestDescriptionBuilder { -// total_nodes: 10, -// start_nodes: 10, -// num_succeeds: 40, -// txn_ids: Right(500), -// ..GeneralTestDescriptionBuilder::default() -// }, -// Slow: true -// ); -// -// cross_all_types!( -// TestName: test_fail_last_node_regression, -// TestDescription: GeneralTestDescriptionBuilder { -// total_nodes: 53, -// start_nodes: 53, -// num_succeeds: 40, -// txn_ids: Right(30), -// ids_to_shut_down: vec![vec![52].into_iter().collect::>()], -// ..GeneralTestDescriptionBuilder::default() -// }, -// Slow: true -// ); -// -// cross_all_types!( -// TestName: test_fail_first_node_regression, -// TestDescription: GeneralTestDescriptionBuilder { -// total_nodes: 76, -// start_nodes: 76, -// num_succeeds: 40, -// txn_ids: Right(30), -// ids_to_shut_down: vec![vec![0].into_iter().collect::>()], -// ..GeneralTestDescriptionBuilder::default() -// }, -// Slow: true -// ); -// -// cross_all_types!( -// TestName: test_fail_last_f_nodes_regression, -// TestDescription: GeneralTestDescriptionBuilder { -// total_nodes: 75, -// start_nodes: 75, -// num_succeeds: 40, -// txn_ids: Right(30), -// ids_to_shut_down: vec![HashSet::::from_iter( -// (0..get_tolerance(75)).map(|x| 74 - x), -// )], -// ..GeneralTestDescriptionBuilder::default() -// }, -// Slow: true -// ); -// -// cross_all_types!( -// TestName: test_fail_last_f_plus_one_nodes_regression, -// TestDescription: GeneralTestDescriptionBuilder { -// total_nodes: 15, -// start_nodes: 15, -// txn_ids: Right(30), -// ids_to_shut_down: vec![HashSet::::from_iter( -// (0..get_tolerance(15) + 1).map(|x| 14 - x), -// )], -// ..GeneralTestDescriptionBuilder::default() -// }, -// Slow: true -// ); -// -// cross_all_types!( -// TestName: test_mul_txns_regression, -// TestDescription: GeneralTestDescriptionBuilder { -// total_nodes: 30, -// start_nodes: 30, -// txn_ids: Right(30), -// ..GeneralTestDescriptionBuilder::default() -// }, -// Slow: true -// ); - -// TODO re-enable these tests if we decide to use proptest -// -// cross_all_types_proptes!( -// test_large_num_nodes_random, -// GeneralTestDescriptionBuilder { -// total_nodes: num_nodes, -// start_nodes: num_nodes, -// ..GeneralTestDescriptionBuilder::default() -// }, -// keep: true, -// slow: true, -// args: num_nodes in 50..100usize -// ); -// -// cross_all_types_proptest!( -// test_fail_last_node_random, -// GeneralTestDescriptionBuilder { -// total_nodes: num_nodes, -// start_nodes: num_nodes, -// txn_ids: Right(30), -// ids_to_shut_down: vec![vec![(num_nodes - 1) as u64].into_iter().collect()], -// ..GeneralTestDescriptionBuilder::default() -// }, -// keep: true, -// slow: true, -// args: num_nodes in 30..100usize -// ); -// -// cross_all_types_proptest!( -// test_fail_first_node_random, -// GeneralTestDescriptionBuilder { -// total_nodes: num_nodes, -// start_nodes: num_nodes, -// txn_ids: Right(30), -// ids_to_shut_down: vec![vec![0].into_iter().collect()], -// ..GeneralTestDescriptionBuilder::default() -// }, -// keep: true, -// slow: true, -// args: num_nodes in 30..100usize -// ); -// -// cross_all_types_proptest!( -// test_fail_last_f_nodes_random, -// GeneralTestDescriptionBuilder { -// total_nodes: num_nodes, -// start_nodes: num_nodes, -// num_succeeds: 40, -// txn_ids: Right(30), -// ids_to_shut_down: vec![HashSet::::from_iter((0..get_tolerance(num_nodes as u64)).map(|x| (num_nodes as u64) - x - 1))], -// ..GeneralTestDescriptionBuilder::default() -// }, -// keep: true, -// slow: true, -// args: num_nodes in 30..100usize -// ); -// -// cross_all_types_proptest!( -// test_fail_first_f_nodes_random, -// GeneralTestDescriptionBuilder { -// total_nodes: num_nodes, -// start_nodes: num_nodes, -// num_succeeds: 40, -// txn_ids: Right(30), -// ids_to_shut_down: vec![HashSet::::from_iter(0..get_tolerance(num_nodes as u64))], -// ..GeneralTestDescriptionBuilder::default() -// }, -// keep: true, -// slow: true, -// args: num_nodes in 30..100usize -// ); -// -// cross_all_types_proptest!( -// test_mul_txns_random, -// GeneralTestDescriptionBuilder { -// total_nodes: 30, -// start_nodes: 30, -// txn_ids: Left(vec![vec![txn_proposer_1, txn_proposer_2]]), -// ..GeneralTestDescriptionBuilder::default() -// }, -// keep: true, -// slow: true, -// args: txn_proposer_1 in 0..15u64, txn_proposer_2 in 15..30u64 -// ); diff --git a/testing-macros/tests/cross/smoke.rs b/testing-macros/tests/cross/smoke.rs deleted file mode 100644 index 85238c90f5..0000000000 --- a/testing-macros/tests/cross/smoke.rs +++ /dev/null @@ -1,29 +0,0 @@ -// use hotshot_testing_macros::cross_all_types; -// -// cross_all_types!( -// TestName: ten_tx_five_nodes_slow, -// TestBuilder: hotshot_testing::test_builder::TestBuilder { -// metadata: hotshot_testing::test_builder::TestMetadata { -// total_nodes: 5, -// start_nodes: 5, -// num_succeeds: 10, -// ..Default::default() -// }, -// ..Default::default() -// }, -// Slow: true -// ); -// -// cross_all_types!( -// TestName: ten_tx_seven_nodes_slow, -// TestBuilder: hotshot_testing::test_builder::TestBuilder { -// metadata: hotshot_testing::test_builder::TestMetadata { -// total_nodes: 7, -// start_nodes: 7, -// num_succeeds: 10, -// ..Default::default() -// }, -// ..Default::default() -// }, -// Slow: true -// ); diff --git a/testing-macros/tests/integration/failures.rs b/testing-macros/tests/integration/failures.rs deleted file mode 100644 index 76f2da60c8..0000000000 --- a/testing-macros/tests/integration/failures.rs +++ /dev/null @@ -1,25 +0,0 @@ -use hotshot_testing::completion_task::CompletionTaskDescription; -use hotshot_testing::completion_task::TimeBasedCompletionTaskDescription; -use hotshot_testing::node_types::TestTypes; -use hotshot_testing::node_types::{Libp2pImpl, MemoryImpl}; -use hotshot_testing::test_builder::TestMetadata; -use hotshot_testing_macros::cross_tests; - -cross_tests!( - Metadata: - TestMetadata { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: std::time::Duration::from_secs(60), - }, - ), - ..TestMetadata::default() - }, - Ignore: false, - TestName: single_permanent_failure_fast, - // types that implement nodetype - Types: [TestTypes], - // forall impl in Impls, forall type in Types, impl : NodeImplementation - Impls: [MemoryImpl, Libp2pImpl], -); diff --git a/testing-macros/tests/integration/random_tests.rs b/testing-macros/tests/integration/random_tests.rs deleted file mode 100644 index 8b13789179..0000000000 --- a/testing-macros/tests/integration/random_tests.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/testing-macros/tests/integration/smoke.rs b/testing-macros/tests/integration/smoke.rs deleted file mode 100644 index 1f23b24889..0000000000 --- a/testing-macros/tests/integration/smoke.rs +++ /dev/null @@ -1,39 +0,0 @@ -// use hotshot_testing_macros::cross_tests; -// -// cross_tests!( -// DemoType: [(hotshot::demos::vdemo::VDemoState)], -// SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], -// CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], -// Storage: [ hotshot::traits::implementations::MemoryStorage ], -// Time: [ hotshot_types::data::ViewNumber ], -// TestName: ten_tx_five_nodes_fast, -// TestBuilder: hotshot_testing::test_builder::TestBuilder { -// metadata: hotshot_testing::test_builder::TestMetadata { -// total_nodes: 5, -// start_nodes: 5, -// num_succeeds: 10, -// ..Default::default() -// }, -// ..Default::default() -// }, -// Slow: false, -// ); -// -// cross_tests!( -// DemoType: [(hotshot::demos::vdemo::VDemoState) ], -// SignatureKey: [ hotshot_types::traits::signature_key::bn254::BLSPubKey ], -// CommChannel: [ hotshot::traits::implementations::MemoryCommChannel ], -// Storage: [ hotshot::traits::implementations::MemoryStorage ], -// Time: [ hotshot_types::data::ViewNumber ], -// TestName: ten_tx_seven_nodes_fast, -// TestBuilder: hotshot_testing::test_builder::TestBuilder { -// metadata: hotshot_testing::test_builder::TestMetadata { -// total_nodes: 7, -// start_nodes: 7, -// num_succeeds: 10, -// ..Default::default() -// }, -// ..Default::default() -// }, -// Slow: false, -// ); From 96857c57aaf5f127e91dfc2fa82b0a2beab00839 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 21 Jan 2024 12:55:32 -0500 Subject: [PATCH 0703/1393] chore: drop tests we aren't using --- testing-macros/tests/tests.rs | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs index 96b92d7f52..70b786d12e 100644 --- a/testing-macros/tests/tests.rs +++ b/testing-macros/tests/tests.rs @@ -1,11 +1 @@ -mod integration { - mod failures; - mod random_tests; - mod smoke; -} - -mod cross { - mod failures; - mod random_tests; - mod smoke; -} +// TODO From 9d35bf819f3ba4319af84b7c0e9cee7606f04f6d Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 21 Jan 2024 18:30:36 -0500 Subject: [PATCH 0704/1393] chore: add issue --- libp2p-networking/src/network/behaviours/dht/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 448922a0c3..c2b5f55531 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -26,7 +26,8 @@ use tracing::{error, info, warn}; /// the number of nodes required to get an answer from /// in order to trust that the answer is correct when retrieving from the DHT -/// TODO why are tehre two of these? +/// TODO why are there two of these? +/// pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; /// the maximum number of nodes to query in the DHT at any one time const MAX_DHT_QUERY_SIZE: usize = 5; From e0255f7e7c8f5eea905eae9d9f627deb4e1c85d1 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 21 Jan 2024 18:31:09 -0500 Subject: [PATCH 0705/1393] chore: add issue --- libp2p-networking/src/network/def.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 061e6251a5..a1b387b10d 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -24,7 +24,8 @@ use libp2p_swarm_derive::NetworkBehaviour; /// the number of nodes required to get an answer from /// in order to trust that the answer is correct when retrieving from the DHT -/// TODO why are tehre two of these? +/// TODO why are there two of these? +/// pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; /// Overarching network behaviour performing: From 7537fe6c94ca8d7879508fa26e1473a30678c2a0 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 21 Jan 2024 18:32:51 -0500 Subject: [PATCH 0706/1393] chore: add issue --- testing/src/overall_safety_task.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index e8c68c685e..81e36b5c92 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -111,6 +111,7 @@ pub struct RoundResult { // TODO should we delete this? // state is empty now /// state -> # entries decided on that state + /// pub state_map: HashMap<(), usize>, /// node idx -> number transactions From ab9417c2093940aed1f4cfe1423162f596b2ee6a Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 21 Jan 2024 18:37:48 -0500 Subject: [PATCH 0707/1393] feat: address ellie's comments --- web_server/src/lib.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 5bbe068087..589bf8c6a9 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -32,10 +32,9 @@ type Error = ServerError; struct WebServerState { /// view number -> (secret, proposal) proposals: HashMap)>, - /// for view sync: view number -> (secret, proposal) - /// TODO guessing here + /// for view sync: view number -> (relay, certificate) view_sync_proposals: HashMap)>>, - /// view number -> index + /// view number -> relay view_sync_proposal_index: HashMap, /// view number -> (secret, da_certificates) da_certificates: HashMap)>, @@ -51,7 +50,7 @@ struct WebServerState { oldest_view_sync_proposal: u64, /// view number -> Vec(index, vote) votes: HashMap)>>, - /// view sync: view number -> Vec(index, vote) + /// view sync: view number -> Vec(relay, vote) view_sync_votes: HashMap)>>, /// view number -> highest vote index for that view number vote_index: HashMap, @@ -61,14 +60,13 @@ struct WebServerState { oldest_vote: u64, /// view sync: view number of oldest votes in memory oldest_view_sync_vote: u64, - /// view number -> (secret, proposal) - /// TODO is this right? + /// view number -> (secret, string) vid_disperses: HashMap)>, /// view for the oldest vid disperal oldest_vid_disperse: u64, /// view of most recent vid dispersal recent_vid_disperse: u64, - /// TODO document, not sure what this is + /// votes that a node got, that is, their VID share vid_votes: HashMap)>>, /// oldest vid vote view number oldest_vid_vote: u64, @@ -81,7 +79,7 @@ struct WebServerState { /// index -> transaction // TODO ED Make indexable by hash of tx transactions: HashMap>, - /// TODO document + /// tx hash -> tx index, is currently unused txn_lookup: HashMap, u64>, /// highest transaction index num_txns: u64, From 672a292d6f0d5baefdd1a03c1f642f8a9b06d1e3 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 21 Jan 2024 18:54:03 -0500 Subject: [PATCH 0708/1393] feat: allow panicks in examples and tests --- hotshot/examples/combined/all.rs | 1 + hotshot/examples/combined/multi-validator.rs | 1 + hotshot/examples/combined/orchestrator.rs | 1 + hotshot/examples/combined/validator.rs | 1 + hotshot/examples/libp2p/all.rs | 1 + hotshot/examples/libp2p/multi-validator.rs | 1 + hotshot/examples/libp2p/orchestrator.rs | 1 + hotshot/examples/libp2p/validator.rs | 1 + hotshot/examples/webserver/all.rs | 1 + hotshot/examples/webserver/multi-validator.rs | 1 + hotshot/examples/webserver/multi-webserver.rs | 1 + hotshot/examples/webserver/orchestrator.rs | 1 + hotshot/examples/webserver/validator.rs | 1 + hotshot/examples/webserver/webserver.rs | 1 + testing/src/lib.rs | 1 + testing/tests/atomic_storage.rs | 1 + testing/tests/basic.rs | 1 + testing/tests/catchup.rs | 1 + testing/tests/combined_network.rs | 1 + testing/tests/da_task.rs | 1 + testing/tests/libp2p.rs | 1 + testing/tests/network_task.rs | 1 + testing/tests/timeout.rs | 1 + testing/tests/unit.rs | 2 ++ testing/tests/unit/message.rs | 1 + testing/tests/vid_task.rs | 1 + testing/tests/view_sync_task.rs | 1 + testing/tests/web_server.rs | 1 + 28 files changed, 29 insertions(+) diff --git a/hotshot/examples/combined/all.rs b/hotshot/examples/combined/all.rs index de602d4bea..aa59df0687 100644 --- a/hotshot/examples/combined/all.rs +++ b/hotshot/examples/combined/all.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! A example program using both the web server and libp2p /// types used for this example pub mod types; diff --git a/hotshot/examples/combined/multi-validator.rs b/hotshot/examples/combined/multi-validator.rs index 7329986afc..2a4c575313 100644 --- a/hotshot/examples/combined/multi-validator.rs +++ b/hotshot/examples/combined/multi-validator.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! A multi-validator using both the web server libp2p use async_compatibility_layer::{ art::async_spawn, diff --git a/hotshot/examples/combined/orchestrator.rs b/hotshot/examples/combined/orchestrator.rs index ba038fabc1..07d295141a 100644 --- a/hotshot/examples/combined/orchestrator.rs +++ b/hotshot/examples/combined/orchestrator.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! Orchestrator using the web server /// types used for this example pub mod types; diff --git a/hotshot/examples/combined/validator.rs b/hotshot/examples/combined/validator.rs index 04c0aa7cee..4b7aa8064b 100644 --- a/hotshot/examples/combined/validator.rs +++ b/hotshot/examples/combined/validator.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! A validator using both the web server and libp2p use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; diff --git a/hotshot/examples/libp2p/all.rs b/hotshot/examples/libp2p/all.rs index 3dda9d6007..30b7876de6 100644 --- a/hotshot/examples/libp2p/all.rs +++ b/hotshot/examples/libp2p/all.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! A example program using libp2p /// types used for this example pub mod types; diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index f64d2f25ac..d401ca3eb5 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! A multi-validator using libp2p use async_compatibility_layer::{ art::async_spawn, diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs index 9298b9e372..ce55bee17d 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! An orchestrator using libp2p /// types used for this example diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index d27f7f1ea1..79bbcfda40 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! A validator using libp2p use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; diff --git a/hotshot/examples/webserver/all.rs b/hotshot/examples/webserver/all.rs index eb92ee77d4..5d66446c4c 100644 --- a/hotshot/examples/webserver/all.rs +++ b/hotshot/examples/webserver/all.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! A example program using the web server /// types used for this example pub mod types; diff --git a/hotshot/examples/webserver/multi-validator.rs b/hotshot/examples/webserver/multi-validator.rs index 780aa24207..f0b86c7bb8 100644 --- a/hotshot/examples/webserver/multi-validator.rs +++ b/hotshot/examples/webserver/multi-validator.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! A multi-validator using the web server use async_compatibility_layer::{ art::async_spawn, diff --git a/hotshot/examples/webserver/multi-webserver.rs b/hotshot/examples/webserver/multi-webserver.rs index e338835c07..9c0db71d2b 100644 --- a/hotshot/examples/webserver/multi-webserver.rs +++ b/hotshot/examples/webserver/multi-webserver.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! A multi web server use std::sync::Arc; diff --git a/hotshot/examples/webserver/orchestrator.rs b/hotshot/examples/webserver/orchestrator.rs index 5a39e56471..71c8cfa3b5 100644 --- a/hotshot/examples/webserver/orchestrator.rs +++ b/hotshot/examples/webserver/orchestrator.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! A orchestrator using the web server /// types used for this example diff --git a/hotshot/examples/webserver/validator.rs b/hotshot/examples/webserver/validator.rs index 4bfcd9d78c..525701c8ff 100644 --- a/hotshot/examples/webserver/validator.rs +++ b/hotshot/examples/webserver/validator.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! A validator using the web server use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; diff --git a/hotshot/examples/webserver/webserver.rs b/hotshot/examples/webserver/webserver.rs index e6c9a468c8..2a448b06c3 100644 --- a/hotshot/examples/webserver/webserver.rs +++ b/hotshot/examples/webserver/webserver.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] //! web server example use hotshot_testing::state_types::TestTypes; use std::sync::Arc; diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 1c1718a918..5d86caa04a 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -5,6 +5,7 @@ not(any(test, debug_assertions, hotshot_example)), deprecated = "suspicious usage of testing/demo implementations in non-test/non-debug build" )] +#![allow(clippy::panicking_unwrap)] use hotshot_task::{event_stream::ChannelStream, task_impls::HSTWithEvent}; diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs index a10bee76ed..126c73a582 100644 --- a/testing/tests/atomic_storage.rs +++ b/testing/tests/atomic_storage.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] #![cfg(foo)] use hotshot::{ certificate::QuorumCertificate, diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index 0a1cdae806..f661fc914d 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] #[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 88cde9eb4f..7e81593403 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] #[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index 9e0befae8c..9beef1d5f0 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use std::time::Duration; use hotshot_testing::{ diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 3d532126b0..dad488cc8b 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use hotshot::{types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index 8fa03a8179..4fec27dc9b 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use std::time::Duration; use hotshot_testing::{ diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index d47638506f..3ee9936398 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use hotshot::{types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 43b1313e9a..959bba21ac 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] #[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", diff --git a/testing/tests/unit.rs b/testing/tests/unit.rs index ff857ae443..501d848551 100644 --- a/testing/tests/unit.rs +++ b/testing/tests/unit.rs @@ -1,3 +1,5 @@ +#![allow(clippy::panic)] + mod unit { mod message; } diff --git a/testing/tests/unit/message.rs b/testing/tests/unit/message.rs index 6e03fcc0ef..57a0cd9d00 100644 --- a/testing/tests/unit/message.rs +++ b/testing/tests/unit/message.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] #[cfg(test)] use std::marker::PhantomData; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 68d40f4d2c..763351ae3c 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use hotshot::{tasks::add_vid_task, types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 2a0f5c94df..9f56254323 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use hotshot::HotShotConsensusApi; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index 81277e7821..e42e6e1e79 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -1,3 +1,4 @@ +#![allow(clippy::panic)] use std::time::Duration; use async_compatibility_layer::logging::shutdown_logging; From 632e6cfeff9c234cca9231b2f529411f3e17ea94 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 21 Jan 2024 19:03:55 -0500 Subject: [PATCH 0709/1393] fix: increase timeout --- testing/tests/unreliable_network.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/tests/unreliable_network.rs b/testing/tests/unreliable_network.rs index b959185abc..c2e4a56249 100644 --- a/testing/tests/unreliable_network.rs +++ b/testing/tests/unreliable_network.rs @@ -30,7 +30,7 @@ async fn libp2p_network_sync() { }, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::new(240, 0), + duration: Duration::new(360, 0), }, ), unreliable_network: Some(Box::new(SynchronousNetwork { From 55f7bf06fa388d8d21457a4e795227b9a2e3b67d Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 22 Jan 2024 10:32:05 -0500 Subject: [PATCH 0710/1393] Revert "feat: allow panicks in examples and tests" This reverts commit a730e22036b7f719fe3d8b1bae253df0e79de876. --- hotshot/examples/combined/all.rs | 1 - hotshot/examples/combined/multi-validator.rs | 1 - hotshot/examples/combined/orchestrator.rs | 1 - hotshot/examples/combined/validator.rs | 1 - hotshot/examples/libp2p/all.rs | 1 - hotshot/examples/libp2p/multi-validator.rs | 1 - hotshot/examples/libp2p/orchestrator.rs | 1 - hotshot/examples/libp2p/validator.rs | 1 - hotshot/examples/webserver/all.rs | 1 - hotshot/examples/webserver/multi-validator.rs | 1 - hotshot/examples/webserver/multi-webserver.rs | 1 - hotshot/examples/webserver/orchestrator.rs | 1 - hotshot/examples/webserver/validator.rs | 1 - hotshot/examples/webserver/webserver.rs | 1 - testing/src/lib.rs | 1 - testing/tests/atomic_storage.rs | 1 - testing/tests/basic.rs | 1 - testing/tests/catchup.rs | 1 - testing/tests/combined_network.rs | 1 - testing/tests/da_task.rs | 1 - testing/tests/libp2p.rs | 1 - testing/tests/network_task.rs | 1 - testing/tests/timeout.rs | 1 - testing/tests/unit.rs | 2 -- testing/tests/unit/message.rs | 1 - testing/tests/vid_task.rs | 1 - testing/tests/view_sync_task.rs | 1 - testing/tests/web_server.rs | 1 - 28 files changed, 29 deletions(-) diff --git a/hotshot/examples/combined/all.rs b/hotshot/examples/combined/all.rs index aa59df0687..de602d4bea 100644 --- a/hotshot/examples/combined/all.rs +++ b/hotshot/examples/combined/all.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! A example program using both the web server and libp2p /// types used for this example pub mod types; diff --git a/hotshot/examples/combined/multi-validator.rs b/hotshot/examples/combined/multi-validator.rs index 2a4c575313..7329986afc 100644 --- a/hotshot/examples/combined/multi-validator.rs +++ b/hotshot/examples/combined/multi-validator.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! A multi-validator using both the web server libp2p use async_compatibility_layer::{ art::async_spawn, diff --git a/hotshot/examples/combined/orchestrator.rs b/hotshot/examples/combined/orchestrator.rs index 07d295141a..ba038fabc1 100644 --- a/hotshot/examples/combined/orchestrator.rs +++ b/hotshot/examples/combined/orchestrator.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! Orchestrator using the web server /// types used for this example pub mod types; diff --git a/hotshot/examples/combined/validator.rs b/hotshot/examples/combined/validator.rs index 4b7aa8064b..04c0aa7cee 100644 --- a/hotshot/examples/combined/validator.rs +++ b/hotshot/examples/combined/validator.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! A validator using both the web server and libp2p use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; diff --git a/hotshot/examples/libp2p/all.rs b/hotshot/examples/libp2p/all.rs index 30b7876de6..3dda9d6007 100644 --- a/hotshot/examples/libp2p/all.rs +++ b/hotshot/examples/libp2p/all.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! A example program using libp2p /// types used for this example pub mod types; diff --git a/hotshot/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs index d401ca3eb5..f64d2f25ac 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/hotshot/examples/libp2p/multi-validator.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! A multi-validator using libp2p use async_compatibility_layer::{ art::async_spawn, diff --git a/hotshot/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs index ce55bee17d..9298b9e372 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/hotshot/examples/libp2p/orchestrator.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! An orchestrator using libp2p /// types used for this example diff --git a/hotshot/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs index 79bbcfda40..d27f7f1ea1 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/hotshot/examples/libp2p/validator.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! A validator using libp2p use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; diff --git a/hotshot/examples/webserver/all.rs b/hotshot/examples/webserver/all.rs index 5d66446c4c..eb92ee77d4 100644 --- a/hotshot/examples/webserver/all.rs +++ b/hotshot/examples/webserver/all.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! A example program using the web server /// types used for this example pub mod types; diff --git a/hotshot/examples/webserver/multi-validator.rs b/hotshot/examples/webserver/multi-validator.rs index f0b86c7bb8..780aa24207 100644 --- a/hotshot/examples/webserver/multi-validator.rs +++ b/hotshot/examples/webserver/multi-validator.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! A multi-validator using the web server use async_compatibility_layer::{ art::async_spawn, diff --git a/hotshot/examples/webserver/multi-webserver.rs b/hotshot/examples/webserver/multi-webserver.rs index 9c0db71d2b..e338835c07 100644 --- a/hotshot/examples/webserver/multi-webserver.rs +++ b/hotshot/examples/webserver/multi-webserver.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! A multi web server use std::sync::Arc; diff --git a/hotshot/examples/webserver/orchestrator.rs b/hotshot/examples/webserver/orchestrator.rs index 71c8cfa3b5..5a39e56471 100644 --- a/hotshot/examples/webserver/orchestrator.rs +++ b/hotshot/examples/webserver/orchestrator.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! A orchestrator using the web server /// types used for this example diff --git a/hotshot/examples/webserver/validator.rs b/hotshot/examples/webserver/validator.rs index 525701c8ff..4bfcd9d78c 100644 --- a/hotshot/examples/webserver/validator.rs +++ b/hotshot/examples/webserver/validator.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! A validator using the web server use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; diff --git a/hotshot/examples/webserver/webserver.rs b/hotshot/examples/webserver/webserver.rs index 2a448b06c3..e6c9a468c8 100644 --- a/hotshot/examples/webserver/webserver.rs +++ b/hotshot/examples/webserver/webserver.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] //! web server example use hotshot_testing::state_types::TestTypes; use std::sync::Arc; diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 5d86caa04a..1c1718a918 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -5,7 +5,6 @@ not(any(test, debug_assertions, hotshot_example)), deprecated = "suspicious usage of testing/demo implementations in non-test/non-debug build" )] -#![allow(clippy::panicking_unwrap)] use hotshot_task::{event_stream::ChannelStream, task_impls::HSTWithEvent}; diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs index 126c73a582..a10bee76ed 100644 --- a/testing/tests/atomic_storage.rs +++ b/testing/tests/atomic_storage.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] #![cfg(foo)] use hotshot::{ certificate::QuorumCertificate, diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs index f661fc914d..0a1cdae806 100644 --- a/testing/tests/basic.rs +++ b/testing/tests/basic.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] #[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 7e81593403..88cde9eb4f 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] #[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index 9beef1d5f0..9e0befae8c 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] use std::time::Duration; use hotshot_testing::{ diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index dad488cc8b..3d532126b0 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] use hotshot::{types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index 4fec27dc9b..8fa03a8179 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] use std::time::Duration; use hotshot_testing::{ diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 3ee9936398..d47638506f 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] use hotshot::{types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 959bba21ac..43b1313e9a 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] #[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", diff --git a/testing/tests/unit.rs b/testing/tests/unit.rs index 501d848551..ff857ae443 100644 --- a/testing/tests/unit.rs +++ b/testing/tests/unit.rs @@ -1,5 +1,3 @@ -#![allow(clippy::panic)] - mod unit { mod message; } diff --git a/testing/tests/unit/message.rs b/testing/tests/unit/message.rs index 57a0cd9d00..6e03fcc0ef 100644 --- a/testing/tests/unit/message.rs +++ b/testing/tests/unit/message.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] #[cfg(test)] use std::marker::PhantomData; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 763351ae3c..68d40f4d2c 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] use hotshot::{tasks::add_vid_task, types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 9f56254323..2a0f5c94df 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] use hotshot::HotShotConsensusApi; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index e42e6e1e79..81277e7821 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] use std::time::Duration; use async_compatibility_layer::logging::shutdown_logging; From f32b3146d4000c558243a42bc5b640d595962757 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 22 Jan 2024 10:41:29 -0500 Subject: [PATCH 0711/1393] chore: testing macro lints --- testing-macros/src/lib.rs | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/testing-macros/src/lib.rs b/testing-macros/src/lib.rs index 5b7ac6daac..9f246b4c66 100644 --- a/testing-macros/src/lib.rs +++ b/testing-macros/src/lib.rs @@ -1,4 +1,4 @@ -extern crate proc_macro; +//! Macros for testing over all network implementations and nodetype implementations use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; @@ -25,6 +25,7 @@ struct CrossTestData { } impl CrossTestDataBuilder { + /// if we've extracted all the metadata fn is_ready(&self) -> bool { self.impls.is_some() && self.types.is_some() @@ -38,10 +39,15 @@ impl CrossTestDataBuilder { /// requisite data to generate a single test #[derive(derive_builder::Builder, Debug, Clone)] struct TestData { + /// type ty: ExprPath, + /// impl imply: ExprPath, + /// name of test test_name: Ident, + /// test description metadata: Expr, + /// whether or not to ignore the test ignore: LitBool, } @@ -56,7 +62,7 @@ impl ToLowerSnakeStr for ExprPath { self.path .segments .iter() - .fold("".to_string(), |mut acc, s| { + .fold(String::new(), |mut acc, s| { acc.push_str(&s.ident.to_string().to_lowercase()); acc.push('_'); acc @@ -66,16 +72,18 @@ impl ToLowerSnakeStr for ExprPath { } impl ToLowerSnakeStr for ExprTuple { + /// allow panic because this is a compiler error + #[allow(clippy::panic)] fn to_lower_snake_str(&self) -> String { self.elems .iter() .map(|x| { let Expr::Path(expr_path) = x else { - panic!("Expected path expr, got {:?}", x) + panic!("Expected path expr, got {x:?}"); }; expr_path }) - .fold("".to_string(), |mut acc, s| { + .fold(String::new(), |mut acc, s| { acc.push_str(&s.to_lower_snake_str()); acc }) @@ -83,6 +91,7 @@ impl ToLowerSnakeStr for ExprTuple { } impl TestData { + /// generate the code for a single test fn generate_test(&self) -> TokenStream2 { let TestData { ty, @@ -127,7 +136,9 @@ mod keywords { } impl Parse for CrossTestData { - fn parse(input: ParseStream) -> Result { + /// allow panic because this is a compiler error + #[allow(clippy::panic)] + fn parse(input: ParseStream<'_>) -> Result { let mut description = CrossTestDataBuilder::create_empty(); while !description.is_ready() { @@ -167,21 +178,24 @@ impl Parse for CrossTestData { } description .build() - .map_err(|e| syn::Error::new(proc_macro2::Span::call_site(), format!("{}", e))) + .map_err(|e| syn::Error::new(proc_macro2::Span::call_site(), format!("{e}"))) } } +/// Helper function to do the actual code gen +/// allow panic because this is a compiler error +#[allow(clippy::panic)] fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { let impls = test_spec.impls.elems.iter().map(|t| { let Expr::Path(p) = t else { - panic!("Expected Path for Impl! Got {:?}", t) + panic!("Expected Path for Impl! Got {t:?}"); }; p }); // let types = test_spec.types.elems.iter().map(|t| { let Expr::Path(p) = t else { - panic!("Expected Path for Type! Got {:?}", t) + panic!("Expected Path for Type! Got {t:?}"); }; p }); @@ -231,8 +245,8 @@ fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { /// Generate a cartesian product of tests across all types /// Arguments: /// - `Impls: []` - a list of types that implement nodetype -/// - `Metadata`: TestMetadata::default()` - test metadata -/// - `Types: []` - a list types that implement NodeImplementation over the types in `Impls` +/// - `Metadata`: `TestMetadata::default()` - test metadata +/// - `Types: []` - a list types that implement `NodeImplementation` over the types in `Impls` /// - `TestName: example_test` - the name of the test /// - `Ignore`: whether or not this set of tests are ignored /// Example usage: see tests in this module From 628872b931b33c0a56b46652ed2b93020d9c327c Mon Sep 17 00:00:00 2001 From: MRain Date: Mon, 22 Jan 2024 11:38:16 -0500 Subject: [PATCH 0712/1393] update serialization macro for light client state --- types/src/light_client.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/types/src/light_client.rs b/types/src/light_client.rs index 741e178609..24ab6561e6 100644 --- a/types/src/light_client.rs +++ b/types/src/light_client.rs @@ -1,10 +1,16 @@ //! Types and structs associated with light client state +use ark_ed_on_bn254::EdwardsConfig as Config; use ark_ff::PrimeField; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use jf_primitives::signatures::schnorr; +use rand::SeedableRng; +use rand_chacha::ChaCha20Rng; +use tagged_base64::tagged; -/// A serialized light client state for proof generation -#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)] +/// A light client state +#[tagged("LIGHT_CLIENT_STATE")] +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize, Default)] pub struct LightClientState { /// Current view number pub view_number: usize, @@ -45,10 +51,6 @@ impl From<&LightClientState> for [F; 7] { } } -use ark_ed_on_bn254::EdwardsConfig as Config; -use rand::SeedableRng; -use rand_chacha::ChaCha20Rng; - /// Signatures pub type StateSignature = schnorr::Signature; /// Verification key for verifying state signatures From 5969bfc11c4bcd1a955d9d3fbd15a13bc75b4fe6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 17:06:33 +0000 Subject: [PATCH 0713/1393] Bump proc-macro2 from 1.0.76 to 1.0.78 Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.76 to 1.0.78. - [Release notes](https://github.com/dtolnay/proc-macro2/releases) - [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.76...1.0.78) --- updated-dependencies: - dependency-name: proc-macro2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- testing-macros/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index ba1d73ed1d..398a03e720 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -25,7 +25,7 @@ serde = { workspace = true } # proc macro stuff quote = "1.0.33" syn = { version = "2.0.43", features = ["full", "extra-traits"] } -proc-macro2 = "1.0.71" +proc-macro2 = "1.0.78" derive_builder = "0.12.0" [dev-dependencies] From cb5a2dfd99442b9a27c279febbc993712a828deb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 17:06:39 +0000 Subject: [PATCH 0714/1393] Bump local-ip-address from 0.5.6 to 0.5.7 Bumps [local-ip-address](https://github.com/EstebanBorai/local-ip-address) from 0.5.6 to 0.5.7. - [Release notes](https://github.com/EstebanBorai/local-ip-address/releases) - [Changelog](https://github.com/EstebanBorai/local-ip-address/blob/main/CHANGELOG.md) - [Commits](https://github.com/EstebanBorai/local-ip-address/commits) --- updated-dependencies: - dependency-name: local-ip-address dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- hotshot/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index c6df94a090..8ed04362ed 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -120,7 +120,7 @@ async-std = { workspace = true } clap = { version = "4.4", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } -local-ip-address = "0.5.6" +local-ip-address = "0.5.7" hotshot-testing = { path = "../testing" } [lints] From 9e008f3edbe91811490b002a5f0f0be85e8f0bbc Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 22 Jan 2024 11:46:45 -0800 Subject: [PATCH 0715/1393] Fix doc, fmt and lint --- hotshot/src/traits/storage/memory_storage.rs | 2 +- hotshot/src/types/handle.rs | 15 +++++++-------- task-impls/src/consensus.rs | 2 +- types/src/consensus.rs | 6 ++++-- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 7b1a4670d3..fe672fd558 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -144,7 +144,7 @@ mod test { view_number, _pd: PhantomData, }, - header.clone(), + header, Some(payload), dummy_leaf_commit, Vec::new(), diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 80259e4f1a..55764f81ce 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -95,19 +95,18 @@ impl + 'static> SystemContextHandl self.internal_event_stream.subscribe(filter).await } - /// Gets the current committed state of the [`SystemContext`] instance + /// Get the last decided validated state of the [`SystemContext`] instance. /// - /// # Errors - /// - /// Returns an error if the underlying `Storage` returns an error - pub async fn get_state(&self) -> TYPES::StateType { + /// # Panics + /// If the internal consensus is in an inconsistent state. + pub async fn get_decided_state(&self) -> TYPES::StateType { self.hotshot.get_decided_state().await } - /// Gets most recent decided leaf - /// # Panics + /// Get the last decided leaf of the [`SystemContext`] instance. /// - /// Panics if internal consensus is in an inconsistent state. + /// # Panics + /// If the internal consensus is in an inconsistent state. pub async fn get_decided_leaf(&self) -> Leaf { self.hotshot.get_decided_leaf().await } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e8742ca77f..6639972679 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1206,7 +1206,7 @@ impl, A: ConsensusApi + }; // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. let proposal = QuorumProposal { - block_header: leaf.block_header.clone(), + block_header, view_number: leaf.view_number, justify_qc: consensus.high_qc.clone(), timeout_certificate: timeout_certificate.or_else(|| None), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 489fb7421f..b55e963d39 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -334,6 +334,7 @@ impl Consensus { } /// Gets the last decided leaf. + /// /// # Panics /// if the last decided view's leaf does not exist in the state map or saved leaves, which /// should never happen. @@ -347,9 +348,10 @@ impl Consensus { self.saved_leaves.get(&leaf).unwrap().clone() } - /// Gets the last decided state. + /// Gets the last decided validated state. + /// /// # Panics - /// if the last decided view's state does not exist in the state map or saved leaves, which + /// If the last decided view's state does not exist in the state map or saved leaves, which /// should never happen. #[must_use] pub fn get_decided_state(&self) -> &TYPES::StateType { From fa635d371cc5e4720727a29989eae4b7d353bb61 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 22 Jan 2024 12:25:52 -0800 Subject: [PATCH 0716/1393] Fix lint --- task-impls/src/consensus.rs | 3 +-- testing/src/overall_safety_task.rs | 2 +- testing/src/task_helpers.rs | 1 + 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a25cc14446..dd39cbc75c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -560,8 +560,7 @@ impl, A: ConsensusApi + let liveness_check = justify_qc.get_view_number() > consensus.locked_view; let high_qc = consensus.high_qc.clone(); - let locked_view = consensus.locked_view; - + let locked_view = consensus.locked_view; drop(consensus); diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 9a35e482c4..c85736d1e8 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -107,7 +107,7 @@ pub struct RoundResult { /// block -> # entries decided on that block pub block_map: HashMap, - + /// node idx -> number transactions pub num_txns_map: HashMap, } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 4e61a76cfb..0fd17171cc 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -197,6 +197,7 @@ pub fn build_assembled_sig< } /// build a quorum proposal and signature +#[allow(clippy::too_many_lines)] async fn build_quorum_proposal_and_signature( handle: &SystemContextHandle, private_key: &::PrivateKey, From cceeaf9ba9c92ca2da1faf2a93ddf5f4c2252daa Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 22 Jan 2024 17:50:36 -0800 Subject: [PATCH 0717/1393] Add instance state to consensus and fix build --- hotshot/src/lib.rs | 12 ++++---- hotshot/src/tasks/mod.rs | 6 ++-- hotshot/src/traits/storage/memory_storage.rs | 2 +- hotshot/src/types/handle.rs | 2 +- task-impls/src/consensus.rs | 14 ++++++---- task-impls/src/da.rs | 2 +- task-impls/src/transactions.rs | 3 +- task-impls/src/view_sync.rs | 4 +-- task-impls/src/vote.rs | 2 +- testing/src/state_types.rs | 16 +++++++---- testing/src/task_helpers.rs | 16 +++++++---- testing/tests/consensus_task.rs | 9 +++--- testing/tests/memory_network.rs | 2 +- testing/tests/unit/message.rs | 2 +- types/src/consensus.rs | 26 ++++++++++-------- types/src/traits/node_implementation.rs | 7 +++-- types/src/traits/states.rs | 29 ++++++++++++-------- types/src/utils.rs | 4 +-- 18 files changed, 92 insertions(+), 66 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 9ff180ff7f..86e61b720c 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -206,13 +206,14 @@ impl> SystemContext { .context(StorageSnafu)?; // insert genesis (or latest block) to state map - let mut state_map = BTreeMap::default(); - state_map.insert( + let mut validated_state_map = BTreeMap::default(); + let (validated_state, instance_state) = TYPES::ValidatedState::genesis(); + validated_state_map.insert( anchored_leaf.get_view_number(), View { view_inner: ViewInner::Leaf { leaf: anchored_leaf.commit(), - state: TYPES::StateType::genesis(), + state: validated_state, }, }, ); @@ -236,7 +237,8 @@ impl> SystemContext { let start_view = anchored_leaf.get_view_number(); let consensus = Consensus { - state_map, + instance_state, + validated_state_map, cur_view: start_view, last_decided_view: anchored_leaf.get_view_number(), saved_leaves, @@ -390,7 +392,7 @@ impl> SystemContext { /// /// # Panics /// Panics if internal state for consensus is inconsistent - pub async fn get_decided_state(&self) -> TYPES::StateType { + pub async fn get_decided_state(&self) -> TYPES::ValidatedState { self.inner .consensus .read() diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 918b9c8ce0..9afb93c65c 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -23,7 +23,7 @@ use hotshot_task_impls::{ }, transactions::{TransactionTaskState, TransactionsTaskTypes}, vid::{VIDTaskState, VIDTaskTypes}, - view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, + view_sync::{ViewSyncTaskState, ViewSyncTaskValidatedStates}, }; use hotshot_types::traits::election::Membership; use hotshot_types::{ @@ -509,7 +509,7 @@ pub async fn add_view_sync_task>( )); let view_sync_task_builder = TaskBuilder::< - ViewSyncTaskStateTypes>, + ViewSyncTaskValidatedStates>, >::new(view_sync_name.to_string()) .register_event_stream(event_stream.clone(), view_sync_event_filter) .await @@ -521,7 +521,7 @@ pub async fn add_view_sync_task>( // we *just* registered let view_sync_task_id = view_sync_task_builder.get_task_id().unwrap(); - let view_sync_task = ViewSyncTaskStateTypes::build(view_sync_task_builder).launch(); + let view_sync_task = ViewSyncTaskValidatedStates::build(view_sync_task_builder).launch(); task_runner.add_task( view_sync_task_id, view_sync_name.to_string(), diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index fe672fd558..1a4a3b99c4 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -118,7 +118,7 @@ mod test { data::{fake_commitment, Leaf}, simple_certificate::QuorumCertificate, traits::{ - node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, + node_implementation::NodeType, signature_key::SignatureKey, states::ConsensusTime, }, }; use std::marker::PhantomData; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index d1d51127a1..8c5c03bf8f 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -99,7 +99,7 @@ impl + 'static> SystemContextHandl /// /// # Panics /// If the internal consensus is in an inconsistent state. - pub async fn get_decided_state(&self) -> TYPES::StateType { + pub async fn get_decided_state(&self) -> TYPES::ValidatedState { self.hotshot.get_decided_state().await } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index c62d65270a..07bf2da8c6 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -154,7 +154,7 @@ impl, A: ConsensusApi + async fn genesis_leaf(&self) -> Option> { let consensus = self.consensus.read().await; - let Some(genesis_view) = consensus.state_map.get(&TYPES::Time::genesis()) else { + let Some(genesis_view) = consensus.validated_state_map.get(&TYPES::Time::genesis()) else { error!("Couldn't find genesis view in state map."); return None; }; @@ -541,10 +541,11 @@ impl, A: ConsensusApi + rejected: Vec::new(), proposer_id: sender, }; - let state = - ::from_header(&proposal.data.block_header); + let state = ::from_header( + &proposal.data.block_header, + ); - consensus.state_map.insert( + consensus.validated_state_map.insert( view, View { view_inner: ViewInner::Leaf { @@ -592,6 +593,7 @@ impl, A: ConsensusApi + }; let parent_commitment = parent.commit(); let Ok(state) = consensus.get_decided_state().validate_and_apply_header( + &consensus.instance_state, &proposal.data.block_header.clone(), &parent.block_header.clone(), &view, @@ -749,7 +751,7 @@ impl, A: ConsensusApi + HashSet::new() }; - consensus.state_map.insert( + consensus.validated_state_map.insert( view, View { view_inner: ViewInner::Leaf { @@ -1164,7 +1166,7 @@ impl, A: ConsensusApi + let parent_view_number = &consensus.high_qc.get_view_number(); let mut reached_decided = false; - let Some(parent_view) = consensus.state_map.get(parent_view_number) else { + let Some(parent_view) = consensus.validated_state_map.get(parent_view_number) else { // This should have been added by the replica? error!("Couldn't find parent view in state map, waiting for replica to see proposal\n parent view number: {}", **parent_view_number); return false; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index fb990557ba..55582b55eb 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -185,7 +185,7 @@ impl, A: ConsensusApi + // Ensure this view is in the view map for garbage collection, but do not overwrite if // there is already a view there: the replica task may have inserted a `Leaf` view which // contains strictly more information. - consensus.state_map.entry(view).or_insert(View { + consensus.validated_state_map.entry(view).or_insert(View { view_inner: ViewInner::DA { payload_commitment }, }); diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 3fe87ab312..4bb3d46091 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -204,7 +204,8 @@ impl, A: ConsensusApi + let consensus = self.consensus.read().await; let parent_view_number = &consensus.high_qc.view_number; - let Some(parent_view) = consensus.state_map.get(parent_view_number) else { + let Some(parent_view) = consensus.validated_state_map.get(parent_view_number) + else { error!( "Couldn't find high QC parent in state map. Parent view {:?}", parent_view_number diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 3626e496d1..6912a6fad1 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -125,7 +125,7 @@ impl< } /// Types for the main view sync task -pub type ViewSyncTaskStateTypes = HSTWithEvent< +pub type ViewSyncTaskValidatedStates = HSTWithEvent< ViewSyncTaskError, HotShotEvent, ChannelStream>, @@ -175,7 +175,7 @@ impl, A: ConsensusApi + } /// Types for view sync replica state -pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< +pub type ViewSyncReplicaTaskValidatedStates = HSTWithEvent< ViewSyncTaskError, HotShotEvent, ChannelStream>, diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index bf20a7bad9..1407f39136 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -126,7 +126,7 @@ impl< } /// Types for a vote accumulator Task -pub type VoteTaskStateTypes = HSTWithEvent< +pub type VoteTaskValidatedStates = HSTWithEvent< VoteTaskError, HotShotEvent, ChannelStream>, diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 84a4d10156..093c71f6d9 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -16,7 +16,16 @@ use crate::block_types::TestTransaction; use crate::block_types::{TestBlockHeader, TestBlockPayload}; pub use crate::node_types::TestTypes; -/// sequencing demo entry state +/// Instance-level state implementation for testing purposes. +pub struct TestInstanceState {} + +impl InstanceState for TestInstanceState { + fn new() -> Self { + TestInstanceState {} + } +} + +/// Validated state implementation for testing purposes. #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct TestValidatedState { /// the block height @@ -66,6 +75,7 @@ impl ValidatedState for TestValidatedState { fn validate_and_apply_header( &self, + _instance: &Self::Instance, _proposed_header: &Self::BlockHeader, _parent_header: &Self::BlockHeader, view_number: &Self::Time, @@ -110,7 +120,3 @@ impl TestableState for TestValidatedState { ]) } } - -pub struct TestInstanceState {} - -impl InstanceState for TestInstanceState {} diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 300e38a903..e35b119345 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -4,7 +4,7 @@ use std::marker::PhantomData; use crate::{ block_types::{TestBlockHeader, TestBlockPayload}, node_types::{MemoryImpl, TestTypes}, - state_types::TestState, + state_types::{TestInstanceState, TestValidatedState}, test_builder::TestMetadata, }; use commit::Committable; @@ -216,7 +216,7 @@ async fn build_quorum_proposal_and_signature( // parent_view_number should be equal to 0 let parent_view_number = &consensus.high_qc.get_view_number(); assert_eq!(parent_view_number.get_u64(), 0); - let Some(parent_view) = consensus.state_map.get(parent_view_number) else { + let Some(parent_view) = consensus.validated_state_map.get(parent_view_number) else { panic!("Couldn't find high QC parent in state map."); }; let Some(leaf_view_0) = parent_view.get_leaf_commitment() else { @@ -249,7 +249,8 @@ async fn build_quorum_proposal_and_signature( rejected: vec![], proposer_id: *api.public_key(), }; - let mut parent_state = ::from_header(&parent_leaf.block_header); + let mut parent_state = + ::from_header(&parent_leaf.block_header); let mut signature = ::sign(private_key, leaf.commit().as_ref()) .expect("Failed to sign leaf commitment!"); @@ -264,11 +265,16 @@ async fn build_quorum_proposal_and_signature( // Only view 2 is tested, higher views are not tested for cur_view in 2..=view { let state_new_view = parent_state - .validate_and_apply_header(&block_header, &block_header, &ViewNumber::new(cur_view - 1)) + .validate_and_apply_header( + &TestInstanceState {}, + &block_header, + &block_header, + &ViewNumber::new(cur_view - 1), + ) .unwrap(); // save states for the previous view to pass all the qc checks // In the long term, we want to get rid of this, do not manually update consensus state - consensus.state_map.insert( + consensus.validated_state_map.insert( ViewNumber::new(cur_view - 1), View { view_inner: ViewInner::Leaf { diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index c54a4f865f..41e49c865e 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -12,7 +12,7 @@ use hotshot_types::vote::Certificate; use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, - traits::state::ConsensusTime, + traits::states::ConsensusTime, }; use hotshot_types::{ simple_vote::QuorumData, @@ -35,7 +35,7 @@ async fn build_vote( let justify_qc = proposal.justify_qc.clone(); let view = ViewNumber::new(*proposal.view_number); let parent = if justify_qc.is_genesis { - let Some(genesis_view) = consensus.state_map.get(&ViewNumber::new(0)) else { + let Some(genesis_view) = consensus.validated_state_map.get(&ViewNumber::new(0)) else { panic!("Couldn't find genesis view in state map."); }; let Some(leaf) = genesis_view.get_leaf_commitment() else { @@ -195,6 +195,7 @@ async fn test_consensus_vote() { // issue: https://github.com/EspressoSystems/HotShot/issues/2236 #[ignore] async fn test_consensus_with_vid() { + use hotshot::traits::BlockPayload; use hotshot::types::SignatureKey; use hotshot_task_impls::harness::run_harness; use hotshot_testing::block_types::TestBlockPayload; @@ -206,9 +207,7 @@ async fn test_consensus_with_vid() { use hotshot_types::simple_certificate::DACertificate; use hotshot_types::simple_vote::DAData; use hotshot_types::simple_vote::DAVote; - use hotshot_types::traits::block_contents::vid_commitment; - use hotshot_types::traits::state::TestableBlock; - use hotshot_types::traits::BlockPayload; + use hotshot_types::traits::block_contents::{vid_commitment, TestableBlock}; use hotshot_types::{ data::VidDisperse, message::Proposal, traits::node_implementation::NodeType, }; diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 4299a9c954..2c052242d9 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -9,8 +9,8 @@ use hotshot::traits::implementations::{ }; use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; -use hotshot_testing::state_types::TestInstanceState; use hotshot_constants::PROGRAM_PROTOCOL_VERSION; +use hotshot_testing::state_types::TestInstanceState; use hotshot_testing::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::TestValidatedState, diff --git a/testing/tests/unit/message.rs b/testing/tests/unit/message.rs index 6e03fcc0ef..1e9f4c88e4 100644 --- a/testing/tests/unit/message.rs +++ b/testing/tests/unit/message.rs @@ -13,7 +13,7 @@ use hotshot_types::{ signature_key::BLSPubKey, simple_certificate::SimpleCertificate, simple_vote::ViewSyncCommitData, - traits::{signature_key::SignatureKey, state::ConsensusTime}, + traits::{signature_key::SignatureKey, states::ConsensusTime}, }; #[test] diff --git a/types/src/consensus.rs b/types/src/consensus.rs index b55e963d39..4e370c67ab 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -32,9 +32,11 @@ type CommitmentMap = HashMap, T>; /// This will contain the state of all rounds. #[derive(custom_debug::Debug)] pub struct Consensus { - /// The phases that are currently loaded in memory - // TODO(https://github.com/EspressoSystems/hotshot/issues/153): Allow this to be loaded from `Storage`? - pub state_map: BTreeMap>, + /// Immutable instance-level state. + pub instance_state: TYPES::InstanceState, + + /// The validated states that are currently loaded in memory. + pub validated_state_map: BTreeMap>, /// All the DA certs we've received for current and future views. /// view -> DA cert @@ -254,7 +256,7 @@ impl Consensus { where F: FnMut(&Leaf) -> bool, { - let mut next_leaf = if let Some(view) = self.state_map.get(&start_from) { + let mut next_leaf = if let Some(view) = self.validated_state_map.get(&start_from) { view.get_leaf_commitment() .ok_or_else(|| HotShotError::InvalidState { context: format!( @@ -293,7 +295,7 @@ impl Consensus { } /// Garbage collects based on state change right now, this removes from both the - /// `saved_payloads` and `state_map` fields of `Consensus`. + /// `saved_payloads` and `validated_state_map` fields of `Consensus`. /// # Panics /// On inconsistent stored entries #[allow(clippy::unused_async)] // async for API compatibility reasons @@ -304,7 +306,7 @@ impl Consensus { ) { // state check let anchor_entry = self - .state_map + .validated_state_map .iter() .next() .expect("INCONSISTENT STATE: anchor leaf not in state map!"); @@ -316,13 +318,13 @@ impl Consensus { // perform gc self.saved_da_certs .retain(|view_number, _| *view_number >= old_anchor_view); - self.state_map + self.validated_state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_payload_commitment()) .for_each(|payload_commitment| { self.saved_payloads.remove(payload_commitment); }); - self.state_map + self.validated_state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_leaf_commitment()) .for_each(|leaf| { @@ -330,7 +332,7 @@ impl Consensus { self.saved_payloads.remove(removed.get_payload_commitment()); } }); - self.state_map = self.state_map.split_off(&new_anchor_view); + self.validated_state_map = self.validated_state_map.split_off(&new_anchor_view); } /// Gets the last decided leaf. @@ -341,7 +343,7 @@ impl Consensus { #[must_use] pub fn get_decided_leaf(&self) -> Leaf { let decided_view_num = self.last_decided_view; - let view = self.state_map.get(&decided_view_num).unwrap(); + let view = self.validated_state_map.get(&decided_view_num).unwrap(); let leaf = view .get_leaf_commitment() .expect("Decided leaf not found! Consensus internally inconsistent"); @@ -354,9 +356,9 @@ impl Consensus { /// If the last decided view's state does not exist in the state map or saved leaves, which /// should never happen. #[must_use] - pub fn get_decided_state(&self) -> &TYPES::StateType { + pub fn get_decided_state(&self) -> &TYPES::ValidatedState { let decided_view_num = self.last_decided_view; - let view = self.state_map.get(&decided_view_num).unwrap(); + let view = self.validated_state_map.get(&decided_view_num).unwrap(); view.get_state() .expect("Decided state not found! Consensus internally inconsistent") } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 123f2b3a58..4835a594ae 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -296,16 +296,17 @@ pub trait NodeType: /// The election config type that this hotshot setup is using. type ElectionConfigType: ElectionConfig; + /// The instance-level state type that this hotshot setup is using. + type InstanceState: InstanceState; + /// The validated state type that this hotshot setup is using. type ValidatedState: ValidatedState< + Instance = Self::InstanceState, BlockHeader = Self::BlockHeader, BlockPayload = Self::BlockPayload, Time = Self::Time, >; - /// The instance-level state type that this hotshot setup is using. - type InstanceState: InstanceState; - /// Membership used for this implementation type Membership: Membership; } diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 600e361ffa..033aa13c30 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -1,11 +1,11 @@ -//! Abstraction over the global state that blocks modify +//! Abstractions over the immutable instance-level state and hte global state that blocks modify. //! -//! This module provides the [`State`] trait, which serves as an compatibility over the current -//! network state, which is modified by the transactions contained within blocks. +//! This module provides the [`InstanceState`] and [`ValidatedState`] traits, which serve as +//! compatibilities over the current network state, which is modified by the transactions contained +//! within blocks. use crate::traits::BlockPayload; use commit::Committable; -use jf_plonk::proof_system::batch_arg::Instance; use serde::{de::DeserializeOwned, Serialize}; use std::{ error::Error, @@ -17,6 +17,13 @@ use std::{ use super::block_contents::{BlockHeader, TestableBlock}; +/// Instance-level state, which allows us to fetch missing validated state. +pub trait InstanceState: Send + Sync { + /// Construct the state. + #[must_use] + fn new() -> Self; +} + /// Abstraction over the state that blocks modify /// /// This trait represents the behaviors that the 'global' ledger state must have: @@ -54,7 +61,7 @@ pub trait ValidatedState: /// Check if the proposed block header is valid and apply it to the state if so. /// /// Returns the new state. - /// + /// /// # Arguments /// * `instance` - Immutable instance-level state. /// @@ -74,10 +81,13 @@ pub trait ValidatedState: /// This can also be used to rebuild the state for catchup. fn from_header(block_header: &Self::BlockHeader) -> Self; - /// Construct a genesis state. + /// Construct a genesis validated state and the instance-level state. #[must_use] - fn genesis() -> Self { - Self::from_header(&Self::BlockHeader::genesis().0) + fn genesis() -> (Self, Self::Instance) { + ( + Self::from_header(&Self::BlockHeader::genesis().0), + ::new(), + ) } /// Gets called to notify the persistence backend that this state has been committed @@ -133,6 +143,3 @@ pub trait ConsensusTime: /// Get the u64 format of time fn get_u64(&self) -> u64; } - -/// Instance-level state, which allows us to fetch missing validated state. -pub trait InstanceState {} diff --git a/types/src/utils.rs b/types/src/utils.rs index 6df7749cf0..0af493bb97 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -24,7 +24,7 @@ pub enum ViewInner { /// Proposed leaf leaf: Commitment>, /// Validated state. - state: TYPES::StateType, + state: TYPES::ValidatedState, }, /// Leaf has failed Failed, @@ -43,7 +43,7 @@ impl ViewInner { /// return the underlying validated state if it exists #[must_use] - pub fn get_state(&self) -> Option<&TYPES::StateType> { + pub fn get_state(&self) -> Option<&TYPES::ValidatedState> { if let Self::Leaf { state, .. } = self { Some(state) } else { From 21dbf1393fa4f0b23a0f52027793050826825a1a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Jan 2024 03:05:23 +0000 Subject: [PATCH 0718/1393] Bump derive_builder from 0.12.0 to 0.13.0 Bumps [derive_builder](https://github.com/colin-kiegel/rust-derive-builder) from 0.12.0 to 0.13.0. - [Release notes](https://github.com/colin-kiegel/rust-derive-builder/releases) - [Commits](https://github.com/colin-kiegel/rust-derive-builder/compare/v0.12.0...v0.13.0) --- updated-dependencies: - dependency-name: derive_builder dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- libp2p-networking/Cargo.toml | 2 +- testing-macros/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index a0e56ed4da..6e6eb49461 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -18,7 +18,7 @@ async-trait = { workspace = true } bincode = { workspace = true } blake3 = { workspace = true } custom_debug = { workspace = true } -derive_builder = "0.12.0" +derive_builder = "0.13.0" either = { workspace = true } futures = { workspace = true } hotshot-constants = { path = "../constants" } diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 398a03e720..7445ecda3b 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -26,7 +26,7 @@ serde = { workspace = true } quote = "1.0.33" syn = { version = "2.0.43", features = ["full", "extra-traits"] } proc-macro2 = "1.0.78" -derive_builder = "0.12.0" +derive_builder = "0.13.0" [dev-dependencies] async-lock = { workspace = true } From 5127ebe273f13993b3daa53625e670aa051a8bb3 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 23 Jan 2024 07:56:24 -0500 Subject: [PATCH 0719/1393] [CI Stability] Immediately fail test on Libp2p panic (#2430) * fix panic * fix lints * lints x2 * set timeout coverage back --- libp2p-networking/tests/counter.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 93aab7ea26..e6faaaf2f3 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -148,7 +148,8 @@ async fn run_request_response_increment<'a>( match stream.next().await.unwrap() { Ok(()) => {} - Err(e) => panic!("timeout : {e:?} waiting handle {requestee_pid:?} to update state"), + Err(e) => {error!("timed out waiting for {requestee_pid:?} to update state: {e}"); + std::process::exit(-1)}, } requester_handle .direct_request(requestee_pid, &CounterMessage::AskForCounter) @@ -156,8 +157,8 @@ async fn run_request_response_increment<'a>( .context(HandleSnafu)?; match stream.next().await.unwrap() { Ok(()) => {} - Err(e) => panic!("timeout : {e:?} waiting handle {requestee_pid:?} to update state"), - } + Err(e) => {error!("timed out waiting for {requestee_pid:?} to update state: {e}"); + std::process::exit(-1)}, } let s1 = requester_handle.state().await; @@ -212,7 +213,10 @@ async fn run_gossip_round( // unwrap is okay because stream must have 2 * (len - 1) elements match merged_streams.next().await.unwrap() { Ok(()) => {} - Err(e) => panic!("timeout : {e:?} waiting handle {i:?} to subscribe to state events"), + Err(e) => { + error!("timed out waiting for handle {i:?} to subscribe to state events: {e}"); + std::process::exit(-1) + } } } @@ -343,7 +347,8 @@ async fn run_dht_rounds( handle.get_record_timeout(&key, timeout).await; match result { Err(e) => { - panic!("DHT error {e:?} during GET"); + error!("DHT error {e:?} during GET"); + std::process::exit(-1); } Ok(v) => { assert_eq!(v, value); @@ -435,7 +440,8 @@ async fn run_request_response_increment_all( for handle in handles { states.push(handle.state().await); } - panic!("states: {states:?}"); + error!("states: {states:?}"); + std::process::exit(-1); } } From 47c83a1f4813352402093a36615ca9cfe4dc20bb Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 23 Jan 2024 11:15:55 -0500 Subject: [PATCH 0720/1393] index payloads by view --- hotshot/src/lib.rs | 7 +++---- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 2 +- types/src/consensus.rs | 13 +++---------- 4 files changed, 8 insertions(+), 16 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 69a10e582a..9612df7a78 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -39,7 +39,7 @@ use hotshot_task::{ use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ - consensus::{Consensus, ConsensusMetricsValue, PayloadStore, View, ViewInner, ViewQueue}, + consensus::{Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, data::Leaf, error::StorageSnafu, event::EventType, @@ -217,9 +217,8 @@ impl> SystemContext { ); let mut saved_leaves = HashMap::new(); - let mut saved_payloads = PayloadStore::default(); + let mut saved_payloads = BTreeMap::new(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); - let payload_commitment = anchored_leaf.get_payload_commitment(); if let Some(payload) = anchored_leaf.get_block_payload() { let encoded_txns = match payload.encode() { // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. @@ -229,7 +228,7 @@ impl> SystemContext { return Err(HotShotError::BlockError { source: e }); } }; - saved_payloads.insert(payload_commitment, encoded_txns); + saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns); } let start_view = anchored_leaf.get_view_number(); diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e2848584c3..7acc4254cf 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -702,7 +702,7 @@ impl, A: ConsensusApi + // If the block payload is available for this leaf, include it in // the leaf chain that we send to the client. if let Some(encoded_txns) = - consensus.saved_payloads.get(leaf.get_payload_commitment()) + consensus.saved_payloads.get(&leaf.get_view_number()) { let payload = BlockPayload::from_bytes( encoded_txns.clone().into_iter(), diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index a420a15946..bab588d1dd 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -192,7 +192,7 @@ impl, A: ConsensusApi + // Record the payload we have promised to make available. consensus .saved_payloads - .insert(payload_commitment, proposal.data.encoded_transactions); + .insert(view, proposal.data.encoded_transactions); } HotShotEvent::DAVoteRecv(ref vote) => { debug!("DA vote recv, Main Task {:?}", vote.get_view_number()); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index e86939804e..d18622ae03 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -55,7 +55,7 @@ pub struct Consensus { /// /// Contains the block payload commitment and encoded transactions for every leaf in /// `saved_leaves` if that payload is available. - pub saved_payloads: PayloadStore, + pub saved_payloads: BTreeMap>, /// The `locked_qc` view number pub locked_view: TYPES::Time, @@ -316,21 +316,14 @@ impl Consensus { // perform gc self.saved_da_certs .retain(|view_number, _| *view_number >= old_anchor_view); - self.state_map - .range(old_anchor_view..new_anchor_view) - .filter_map(|(_view_number, view)| view.get_payload_commitment()) - .for_each(|payload_commitment| { - self.saved_payloads.remove(payload_commitment); - }); self.state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_leaf_commitment()) .for_each(|leaf| { - if let Some(removed) = self.saved_leaves.remove(&leaf) { - self.saved_payloads.remove(removed.get_payload_commitment()); - } + self.saved_leaves.remove(&leaf); }); self.state_map = self.state_map.split_off(&new_anchor_view); + self.saved_payloads = self.saved_payloads.split_off(&new_anchor_view); } /// Gets the last decided state From 7b46ad3395216c2166c4271071a43af8146fea83 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 23 Jan 2024 11:40:56 -0500 Subject: [PATCH 0721/1393] Index payload store by view --- hotshot/src/lib.rs | 7 +++---- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 2 +- types/src/consensus.rs | 13 +++---------- 4 files changed, 8 insertions(+), 16 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index ad2795ab95..2995467458 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -50,7 +50,7 @@ use hotshot_task::{ use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; use hotshot_types::{ - consensus::{Consensus, ConsensusMetricsValue, PayloadStore, View, ViewInner, ViewQueue}, + consensus::{Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, data::Leaf, error::StorageSnafu, message::{ @@ -222,9 +222,8 @@ impl> SystemContext { ); let mut saved_leaves = HashMap::new(); - let mut saved_payloads = PayloadStore::default(); + let mut saved_payloads = BTreeMap::new(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); - let payload_commitment = anchored_leaf.get_payload_commitment(); if let Some(payload) = anchored_leaf.get_block_payload() { let encoded_txns = match payload.encode() { // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. @@ -234,7 +233,7 @@ impl> SystemContext { return Err(HotShotError::BlockError { source: e }); } }; - saved_payloads.insert(payload_commitment, encoded_txns); + saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns); } let start_view = anchored_leaf.get_view_number(); diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ce8b07b37c..3a2a34d35b 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -680,7 +680,7 @@ impl, A: ConsensusApi + // If the block payload is available for this leaf, include it in // the leaf chain that we send to the client. if let Some(encoded_txns) = - consensus.saved_payloads.get(leaf.get_payload_commitment()) + consensus.saved_payloads.get(&leaf.get_view_number()) { let payload = BlockPayload::from_bytes( encoded_txns.clone().into_iter(), diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 97e530ebfc..f0c183b39f 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -177,7 +177,7 @@ impl, A: ConsensusApi + // Record the payload we have promised to make available. consensus .saved_payloads - .insert(payload_commitment, proposal.data.encoded_transactions); + .insert(view, proposal.data.encoded_transactions); } HotShotEvent::DAVoteRecv(ref vote) => { debug!("DA vote recv, Main Task {:?}", vote.get_view_number()); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index e86939804e..19649da97c 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -55,7 +55,7 @@ pub struct Consensus { /// /// Contains the block payload commitment and encoded transactions for every leaf in /// `saved_leaves` if that payload is available. - pub saved_payloads: PayloadStore, + pub saved_payloads: BTreeMap>, /// The `locked_qc` view number pub locked_view: TYPES::Time, @@ -316,21 +316,14 @@ impl Consensus { // perform gc self.saved_da_certs .retain(|view_number, _| *view_number >= old_anchor_view); - self.state_map - .range(old_anchor_view..new_anchor_view) - .filter_map(|(_view_number, view)| view.get_payload_commitment()) - .for_each(|payload_commitment| { - self.saved_payloads.remove(payload_commitment); - }); self.state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_leaf_commitment()) .for_each(|leaf| { - if let Some(removed) = self.saved_leaves.remove(&leaf) { - self.saved_payloads.remove(removed.get_payload_commitment()); - } + self.saved_leaves.remove(&leaf); }); self.state_map = self.state_map.split_off(&new_anchor_view); + self.saved_payloads = self.saved_payloads.split_off(&new_anchor_view); } /// Gets the last decided state From 648d45ac4ba282518c337212fccbe83e1f94b31a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 23 Jan 2024 11:44:45 -0500 Subject: [PATCH 0722/1393] format --- task-impls/src/consensus.rs | 3 +-- types/src/consensus.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 3a2a34d35b..cc024d6ac9 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -546,8 +546,7 @@ impl, A: ConsensusApi + let liveness_check = justify_qc.get_view_number() > consensus.locked_view; let high_qc = consensus.high_qc.clone(); - let locked_view = consensus.locked_view; - + let locked_view = consensus.locked_view; drop(consensus); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 19649da97c..d18622ae03 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -320,7 +320,7 @@ impl Consensus { .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_leaf_commitment()) .for_each(|leaf| { - self.saved_leaves.remove(&leaf); + self.saved_leaves.remove(&leaf); }); self.state_map = self.state_map.split_off(&new_anchor_view); self.saved_payloads = self.saved_payloads.split_off(&new_anchor_view); From d2a64bcb8d6fafece33e72a2a3a4746768ebe1ac Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 23 Jan 2024 12:05:30 -0500 Subject: [PATCH 0723/1393] update comment --- types/src/consensus.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index d18622ae03..16e4d8b710 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -53,8 +53,7 @@ pub struct Consensus { /// Saved payloads. /// - /// Contains the block payload commitment and encoded transactions for every leaf in - /// `saved_leaves` if that payload is available. + /// Encoded transactions for every view if we got a payload for that view. pub saved_payloads: BTreeMap>, /// The `locked_qc` view number From 8ec915a79ac57a26bc96de343d6ebb48e05bca12 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 23 Jan 2024 12:06:15 -0500 Subject: [PATCH 0724/1393] update comment --- types/src/consensus.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index d18622ae03..16e4d8b710 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -53,8 +53,7 @@ pub struct Consensus { /// Saved payloads. /// - /// Contains the block payload commitment and encoded transactions for every leaf in - /// `saved_leaves` if that payload is available. + /// Encoded transactions for every view if we got a payload for that view. pub saved_payloads: BTreeMap>, /// The `locked_qc` view number From 21a92fea0650434fdb90efc798e2e439d01239f3 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 23 Jan 2024 12:12:39 -0500 Subject: [PATCH 0725/1393] kill 'PayloadStore' --- task-impls/src/consensus.rs | 3 +- types/src/consensus.rs | 60 ++----------------------------------- 2 files changed, 4 insertions(+), 59 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7acc4254cf..6e7f540816 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -558,8 +558,7 @@ impl, A: ConsensusApi + let liveness_check = justify_qc.get_view_number() > consensus.locked_view; let high_qc = consensus.high_qc.clone(); - let locked_view = consensus.locked_view; - + let locked_view = consensus.locked_view; drop(consensus); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 16e4d8b710..80bea89de4 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -7,7 +7,7 @@ pub use crate::{ use displaydoc::Display; use crate::{ - data::{Leaf, VidCommitment}, + data::Leaf, error::HotShotError, simple_certificate::{DACertificate, QuorumCertificate}, traits::{ @@ -17,9 +17,9 @@ use crate::{ utils::Terminator, }; use commit::Commitment; -use derivative::Derivative; + use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap}, sync::{Arc, Mutex}, }; use tracing::error; @@ -339,57 +339,3 @@ impl Consensus { self.saved_leaves.get(&leaf).unwrap().clone() } } - -/// Mapping from block payload commitments to the encoded transactions. -/// -/// Entries in this mapping are reference-counted, so multiple consensus objects can refer to the -/// same block, and the block will only be deleted after _all_ such objects are garbage collected. -/// For example, multiple leaves may temporarily reference the same block on different branches, -/// before all but one branch are ultimately garbage collected. -#[derive(Clone, Debug, Derivative)] -#[derivative(Default(bound = ""))] -pub struct PayloadStore(HashMap, u64)>); - -impl PayloadStore { - /// Save the encoded transactions for later retrieval. - /// - /// After calling this function, and before the corresponding call to [`remove`](Self::remove), - /// `self.get(payload_commitment)` will return `Some(encoded_transactions)`. - /// - /// This function will increment a reference count on the saved payload commitment, so that - /// multiple calls to [`insert`](Self::insert) for the same payload commitment result in - /// multiple owning references to the payload commitment. [`remove`](Self::remove) must be - /// called once for each reference before the payload commitment will be deallocated. - pub fn insert(&mut self, payload_commitment: VidCommitment, encoded_transactions: Vec) { - self.0 - .entry(payload_commitment) - .and_modify(|(_, refcount)| *refcount += 1) - .or_insert((encoded_transactions, 1)); - } - - /// Get the saved encoded transactions, if available. - /// - /// If the encoded transactions has been saved with [`insert`](Self::insert), this function - /// will retrieve it. It may return [`None`] if a block with the given commitment has not been - /// saved or if the block has been dropped with [`remove`](Self::remove). - #[must_use] - pub fn get(&self, payload_commitment: VidCommitment) -> Option<&Vec> { - self.0.get(&payload_commitment).map(|(encoded, _)| encoded) - } - - /// Drop a reference to the saved encoded transactions. - /// - /// If the set exists and this call drops the last reference to it, the set will be returned, - /// Otherwise, the return value is [`None`]. - pub fn remove(&mut self, payload_commitment: VidCommitment) -> Option> { - if let Entry::Occupied(mut e) = self.0.entry(payload_commitment) { - let (_, refcount) = e.get_mut(); - *refcount -= 1; - if *refcount == 0 { - let (encoded, _) = e.remove(); - return Some(encoded); - } - } - None - } -} From e2d675958a640d0a270511371c422d3b3f5e7a83 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 23 Jan 2024 12:14:17 -0500 Subject: [PATCH 0726/1393] kill 'PayloadStore' --- types/src/consensus.rs | 60 +++--------------------------------------- 1 file changed, 3 insertions(+), 57 deletions(-) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 16e4d8b710..80bea89de4 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -7,7 +7,7 @@ pub use crate::{ use displaydoc::Display; use crate::{ - data::{Leaf, VidCommitment}, + data::Leaf, error::HotShotError, simple_certificate::{DACertificate, QuorumCertificate}, traits::{ @@ -17,9 +17,9 @@ use crate::{ utils::Terminator, }; use commit::Commitment; -use derivative::Derivative; + use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap}, sync::{Arc, Mutex}, }; use tracing::error; @@ -339,57 +339,3 @@ impl Consensus { self.saved_leaves.get(&leaf).unwrap().clone() } } - -/// Mapping from block payload commitments to the encoded transactions. -/// -/// Entries in this mapping are reference-counted, so multiple consensus objects can refer to the -/// same block, and the block will only be deleted after _all_ such objects are garbage collected. -/// For example, multiple leaves may temporarily reference the same block on different branches, -/// before all but one branch are ultimately garbage collected. -#[derive(Clone, Debug, Derivative)] -#[derivative(Default(bound = ""))] -pub struct PayloadStore(HashMap, u64)>); - -impl PayloadStore { - /// Save the encoded transactions for later retrieval. - /// - /// After calling this function, and before the corresponding call to [`remove`](Self::remove), - /// `self.get(payload_commitment)` will return `Some(encoded_transactions)`. - /// - /// This function will increment a reference count on the saved payload commitment, so that - /// multiple calls to [`insert`](Self::insert) for the same payload commitment result in - /// multiple owning references to the payload commitment. [`remove`](Self::remove) must be - /// called once for each reference before the payload commitment will be deallocated. - pub fn insert(&mut self, payload_commitment: VidCommitment, encoded_transactions: Vec) { - self.0 - .entry(payload_commitment) - .and_modify(|(_, refcount)| *refcount += 1) - .or_insert((encoded_transactions, 1)); - } - - /// Get the saved encoded transactions, if available. - /// - /// If the encoded transactions has been saved with [`insert`](Self::insert), this function - /// will retrieve it. It may return [`None`] if a block with the given commitment has not been - /// saved or if the block has been dropped with [`remove`](Self::remove). - #[must_use] - pub fn get(&self, payload_commitment: VidCommitment) -> Option<&Vec> { - self.0.get(&payload_commitment).map(|(encoded, _)| encoded) - } - - /// Drop a reference to the saved encoded transactions. - /// - /// If the set exists and this call drops the last reference to it, the set will be returned, - /// Otherwise, the return value is [`None`]. - pub fn remove(&mut self, payload_commitment: VidCommitment) -> Option> { - if let Entry::Occupied(mut e) = self.0.entry(payload_commitment) { - let (_, refcount) = e.get_mut(); - *refcount -= 1; - if *refcount == 0 { - let (encoded, _) = e.remove(); - return Some(encoded); - } - } - None - } -} From e6286b8f57dccb8c22ee4fd3bd90c1b23a9fb20a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 23 Jan 2024 13:37:20 -0500 Subject: [PATCH 0727/1393] store and empty block during init --- hotshot/src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 2995467458..acceff2c14 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -225,7 +225,7 @@ impl> SystemContext { let mut saved_payloads = BTreeMap::new(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); if let Some(payload) = anchored_leaf.get_block_payload() { - let encoded_txns = match payload.encode() { + let encoded_txns: Vec = match payload.encode() { // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. // Ok(encoded) => encoded.into_iter().collect(), @@ -233,7 +233,9 @@ impl> SystemContext { return Err(HotShotError::BlockError { source: e }); } }; - saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns); + saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns.clone()); + // Insert the genesis block for the first view, that's what the first leader will propose + saved_payloads.insert(TYPES::Time::new(1), encoded_txns); } let start_view = anchored_leaf.get_view_number(); From c6bf2040642b91e9178b9d729650c0607cb91714 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 24 Jan 2024 10:32:25 -0500 Subject: [PATCH 0728/1393] Init view 1 with empty payload as well --- hotshot/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 9612df7a78..8608902394 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -220,7 +220,7 @@ impl> SystemContext { let mut saved_payloads = BTreeMap::new(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); if let Some(payload) = anchored_leaf.get_block_payload() { - let encoded_txns = match payload.encode() { + let encoded_txns: Vec = match payload.encode() { // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. // Ok(encoded) => encoded.into_iter().collect(), @@ -228,7 +228,8 @@ impl> SystemContext { return Err(HotShotError::BlockError { source: e }); } }; - saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns); + saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns.clone()); + saved_payloads.insert(TYPES::Time::new(1), encoded_txns); } let start_view = anchored_leaf.get_view_number(); From 60cafc0da8369ccfa1e1ab273187e77797179ad4 Mon Sep 17 00:00:00 2001 From: MRain Date: Wed, 24 Jan 2024 10:57:34 -0500 Subject: [PATCH 0729/1393] adding more derives for light client state --- types/src/light_client.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/types/src/light_client.rs b/types/src/light_client.rs index 24ab6561e6..2918902977 100644 --- a/types/src/light_client.rs +++ b/types/src/light_client.rs @@ -10,7 +10,18 @@ use tagged_base64::tagged; /// A light client state #[tagged("LIGHT_CLIENT_STATE")] -#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize, Default)] +#[derive( + Clone, + Debug, + CanonicalSerialize, + CanonicalDeserialize, + Default, + Eq, + PartialEq, + PartialOrd, + Ord, + Hash, +)] pub struct LightClientState { /// Current view number pub view_number: usize, From 3088ca322a8986a67bd1c7f9def0e2448c038ecf Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 24 Jan 2024 11:00:41 -0500 Subject: [PATCH 0730/1393] lint unrelated files --- testing/tests/unreliable_network.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testing/tests/unreliable_network.rs b/testing/tests/unreliable_network.rs index c2e4a56249..e39ae11e98 100644 --- a/testing/tests/unreliable_network.rs +++ b/testing/tests/unreliable_network.rs @@ -44,7 +44,7 @@ async fn libp2p_network_sync() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } #[cfg(test)] @@ -122,7 +122,7 @@ async fn libp2p_network_async() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } #[cfg(test)] @@ -260,7 +260,7 @@ async fn libp2p_network_partially_sync() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } #[cfg(test)] @@ -339,5 +339,5 @@ async fn libp2p_network_chaos() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } From 38e501fa6dd2c32e09f9b47a0f24549f9abff790 Mon Sep 17 00:00:00 2001 From: MRain Date: Wed, 24 Jan 2024 12:37:55 -0500 Subject: [PATCH 0731/1393] fix lint --- task-impls/src/consensus.rs | 3 +-- testing/tests/unreliable_network.rs | 8 ++++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e2848584c3..b2bb79f895 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -558,8 +558,7 @@ impl, A: ConsensusApi + let liveness_check = justify_qc.get_view_number() > consensus.locked_view; let high_qc = consensus.high_qc.clone(); - let locked_view = consensus.locked_view; - + let locked_view = consensus.locked_view; drop(consensus); diff --git a/testing/tests/unreliable_network.rs b/testing/tests/unreliable_network.rs index c2e4a56249..e39ae11e98 100644 --- a/testing/tests/unreliable_network.rs +++ b/testing/tests/unreliable_network.rs @@ -44,7 +44,7 @@ async fn libp2p_network_sync() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } #[cfg(test)] @@ -122,7 +122,7 @@ async fn libp2p_network_async() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } #[cfg(test)] @@ -260,7 +260,7 @@ async fn libp2p_network_partially_sync() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } #[cfg(test)] @@ -339,5 +339,5 @@ async fn libp2p_network_chaos() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } From 862977f4ba5528c1f95be9385b68d7583680e0ea Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 24 Jan 2024 12:39:57 -0800 Subject: [PATCH 0732/1393] Get the parent rather than decided state --- task-impls/src/consensus.rs | 8 ++++++-- testing/tests/unreliable_network.rs | 8 ++++---- types/src/consensus.rs | 16 ++++++++++++---- 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index dd39cbc75c..f52dbb0647 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -590,8 +590,11 @@ impl, A: ConsensusApi + return; }; - let parent_commitment = parent.commit(); - let Ok(state) = consensus.get_decided_state().validate_and_apply_header( + let Some(parent_state) = consensus.get_state(parent.view_number) else { + error!("Parent state not found! Consensus internally inconsistent"); + return; + }; + let Ok(state) = parent_state.validate_and_apply_header( &proposal.data.block_header.clone(), &parent.block_header.clone(), &view, @@ -599,6 +602,7 @@ impl, A: ConsensusApi + error!("Block header doesn't extend the proposal",); return; }; + let parent_commitment = parent.commit(); let leaf: Leaf<_> = Leaf { view_number: view, justify_qc: justify_qc.clone(), diff --git a/testing/tests/unreliable_network.rs b/testing/tests/unreliable_network.rs index c2e4a56249..e39ae11e98 100644 --- a/testing/tests/unreliable_network.rs +++ b/testing/tests/unreliable_network.rs @@ -44,7 +44,7 @@ async fn libp2p_network_sync() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } #[cfg(test)] @@ -122,7 +122,7 @@ async fn libp2p_network_async() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } #[cfg(test)] @@ -260,7 +260,7 @@ async fn libp2p_network_partially_sync() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } #[cfg(test)] @@ -339,5 +339,5 @@ async fn libp2p_network_chaos() { .gen_launcher::(0) .launch() .run_test() - .await + .await; } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index b55e963d39..e456dba20d 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -348,16 +348,24 @@ impl Consensus { self.saved_leaves.get(&leaf).unwrap().clone() } + /// Gets the validated state with the given view number, if in the state map. + #[must_use] + pub fn get_state(&self, view_number: TYPES::Time) -> Option<&TYPES::StateType> { + match self.state_map.get(&view_number) { + Some(view) => view.get_state(), + None => None, + } + } + /// Gets the last decided validated state. /// /// # Panics - /// If the last decided view's state does not exist in the state map or saved leaves, which - /// should never happen. + /// If the last decided view's state does not exist in the state map, which should never + /// happen. #[must_use] pub fn get_decided_state(&self) -> &TYPES::StateType { let decided_view_num = self.last_decided_view; - let view = self.state_map.get(&decided_view_num).unwrap(); - view.get_state() + self.get_state(decided_view_num) .expect("Decided state not found! Consensus internally inconsistent") } } From 6d82623df05a48aaf93ee4930f8711f3ed879b9b Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 24 Jan 2024 17:23:37 -0800 Subject: [PATCH 0733/1393] Fix more conflicts --- types/src/consensus.rs | 22 ++-------------------- types/src/traits/node_implementation.rs | 4 ++-- 2 files changed, 4 insertions(+), 22 deletions(-) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a2f733bf69..e379060458 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -319,25 +319,12 @@ impl Consensus { .retain(|view_number, _| *view_number >= old_anchor_view); self.validated_state_map .range(old_anchor_view..new_anchor_view) -<<<<<<< HEAD - .filter_map(|(_view_number, view)| view.get_payload_commitment()) - .for_each(|payload_commitment| { - self.saved_payloads.remove(payload_commitment); - }); - self.validated_state_map - .range(old_anchor_view..new_anchor_view) -======= ->>>>>>> keyao-store-data .filter_map(|(_view_number, view)| view.get_leaf_commitment()) .for_each(|leaf| { self.saved_leaves.remove(&leaf); }); -<<<<<<< HEAD self.validated_state_map = self.validated_state_map.split_off(&new_anchor_view); -======= - self.state_map = self.state_map.split_off(&new_anchor_view); self.saved_payloads = self.saved_payloads.split_off(&new_anchor_view); ->>>>>>> keyao-store-data } /// Gets the last decided leaf. @@ -357,8 +344,8 @@ impl Consensus { /// Gets the validated state with the given view number, if in the state map. #[must_use] - pub fn get_state(&self, view_number: TYPES::Time) -> Option<&TYPES::StateType> { - match self.state_map.get(&view_number) { + pub fn get_state(&self, view_number: TYPES::Time) -> Option<&TYPES::ValidatedState> { + match self.validated_state_map.get(&view_number) { Some(view) => view.get_state(), None => None, } @@ -372,12 +359,7 @@ impl Consensus { #[must_use] pub fn get_decided_state(&self) -> &TYPES::ValidatedState { let decided_view_num = self.last_decided_view; -<<<<<<< HEAD - let view = self.validated_state_map.get(&decided_view_num).unwrap(); - view.get_state() -======= self.get_state(decided_view_num) ->>>>>>> keyao-store-data .expect("Decided state not found! Consensus internally inconsistent") } } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 49a6421463..62a0fb03ac 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -7,7 +7,7 @@ use super::{ block_contents::{BlockHeader, TestableBlock, Transaction}, election::ElectionConfig, network::{CommunicationChannel, NetworkReliability, TestableNetworkingImplementation}, - states::{ConsensusTime, TestableBlock, TestableState}, + states::{ConsensusTime, TestableState}, storage::{StorageError, StorageState, TestableStorage}, ValidatedState, }; @@ -16,7 +16,7 @@ use crate::{ message::ProcessedSequencingMessage, traits::{ election::Membership, network::TestableChannelImplementation, signature_key::SignatureKey, - storage::Storage, BlockPayload, + states::InstanceState, storage::Storage, BlockPayload, }, }; use async_compatibility_layer::channel::{unbounded, UnboundedReceiver, UnboundedSender}; From 63a0b84170b072aba19b17dc9c00fbf4421e103f Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 25 Jan 2024 11:46:11 -0800 Subject: [PATCH 0734/1393] Add state to header, remove metadata() --- task-impls/src/consensus.rs | 3 ++- testing/src/block_types.rs | 4 ++++ testing/src/state_types.rs | 2 -- testing/src/task_helpers.rs | 9 +++++++-- types/src/traits/block_contents.rs | 11 +++++++++-- types/src/traits/node_implementation.rs | 2 +- types/src/traits/state.rs | 3 --- types/src/utils.rs | 9 +++++++++ 8 files changed, 32 insertions(+), 11 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e054b2004b..82cba4b5ce 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1174,7 +1174,7 @@ impl, A: ConsensusApi + return false; }; // Leaf hash in view inner does not match high qc hash - Why? - let Some(leaf_commitment) = parent_view.get_leaf_commitment() else { + let Some((leaf_commitment, state)) = parent_view.get_leaf() else { error!( ?parent_view_number, ?parent_view, @@ -1223,6 +1223,7 @@ impl, A: ConsensusApi + commit_and_metadata.commitment, commit_and_metadata.metadata.clone(), &parent_header, + state, ); let leaf = Leaf { view_number: view, diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index a395de232f..525a83378a 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -15,6 +15,8 @@ use hotshot_types::{ use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; +use crate::state_types::TestState; + /// The transaction in a [`TestBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct TestTransaction(pub Vec); @@ -181,11 +183,13 @@ pub struct TestBlockHeader { impl BlockHeader for TestBlockHeader { type Payload = TestBlockPayload; + type State = TestState; fn new( payload_commitment: VidCommitment, _metadata: ::Metadata, parent_header: &Self, + _parent_state: &Self::State, ) -> Self { Self { block_number: parent_header.block_number + 1, diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 5092cb267f..61a335fd8d 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -90,8 +90,6 @@ impl State for TestState { } fn on_commit(&self) {} - - fn metadata(&self) -> Self::Metadata {} } impl TestableState for TestState { diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 0fd17171cc..b01203c4ad 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -239,7 +239,13 @@ async fn build_quorum_proposal_and_signature( .quorum_membership .total_nodes(), ); - let block_header = TestBlockHeader::new(payload_commitment, (), &parent_leaf.block_header); + let mut parent_state = ::from_header(&parent_leaf.block_header); + let block_header = TestBlockHeader::new( + payload_commitment, + (), + &parent_leaf.block_header, + &parent_state, + ); // current leaf that can be re-assigned everytime when entering a new view let mut leaf = Leaf { view_number: ViewNumber::new(1), @@ -250,7 +256,6 @@ async fn build_quorum_proposal_and_signature( rejected: vec![], proposer_id: *api.public_key(), }; - let mut parent_state = ::from_header(&parent_leaf.block_header); let mut signature = ::sign(private_key, leaf.commit().as_ref()) .expect("Failed to sign leaf commitment!"); diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 9bac9b6d7f..00a7880c74 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -3,7 +3,10 @@ //! This module provides the [`Transaction`], [`BlockPayload`], and [`BlockHeader`] traits, which //! describe the behaviors that a block is expected to have. -use crate::data::{test_srs, VidCommitment, VidScheme, VidSchemeTrait}; +use crate::{ + data::{test_srs, VidCommitment, VidScheme, VidSchemeTrait}, + traits::State, +}; use commit::{Commitment, Committable}; use serde::{de::DeserializeOwned, Serialize}; @@ -100,11 +103,15 @@ pub trait BlockHeader: /// Block payload associated with the commitment. type Payload: BlockPayload; - /// Build a header with the payload commitment, metadata, and parent header. + /// Validated state. + type State: State; + + /// Build a header with the payload commitment, metadata, parent header, and parent state. fn new( payload_commitment: VidCommitment, metadata: ::Metadata, parent_header: &Self, + parent_state: &Self::State, ) -> Self; /// Build the genesis header, payload, and metadata. diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index a2c8329378..10796f2275 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -286,7 +286,7 @@ pub trait NodeType: /// This should be the same `Time` that `StateType::Time` is using. type Time: ConsensusTime; /// The block header type that this hotshot setup is using. - type BlockHeader: BlockHeader; + type BlockHeader: BlockHeader; /// The block type that this hotshot setup is using. /// /// This should be the same block that `StateType::BlockPayload` is using. diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index 5b43dc3847..b4b8ed51e4 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -75,9 +75,6 @@ pub trait State: /// Gets called to notify the persistence backend that this state has been committed fn on_commit(&self); - - /// Get the application-specific data. - fn metadata(&self) -> Self::Metadata; } // TODO Seuqnecing here means involving DA in consensus diff --git a/types/src/utils.rs b/types/src/utils.rs index 6df7749cf0..c1b2ae3add 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -31,6 +31,15 @@ pub enum ViewInner { } impl ViewInner { + /// Return the underlying undecide leaf view if it exists. + pub fn get_leaf(&self) -> Option<(Commitment>, &TYPES::StateType)> { + if let Self::Leaf { leaf, state } = self { + Some((*leaf, state)) + } else { + None + } + } + /// return the underlying leaf hash if it exists #[must_use] pub fn get_leaf_commitment(&self) -> Option>> { From 8ebf6a5c89ff3dfcf630404e52bba9ebd1688e71 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 25 Jan 2024 19:37:19 -0500 Subject: [PATCH 0735/1393] [FIX_CI] Fix test issues (#2446) --- hotshot/src/lib.rs | 13 ++ hotshot/src/tasks/mod.rs | 32 +++-- .../src/traits/networking/libp2p_network.rs | 2 +- hotshot/src/types/handle.rs | 9 ++ .../src/network/behaviours/direct_message.rs | 10 +- task-impls/src/consensus.rs | 2 +- testing/src/spinning_task.rs | 123 ++++++++++++------ testing/src/test_builder.rs | 11 +- testing/src/test_runner.rs | 29 ++++- testing/src/txn_task.rs | 33 +++-- testing/tests/catchup.rs | 29 ++--- testing/tests/libp2p.rs | 3 +- 12 files changed, 195 insertions(+), 101 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index f5821beeda..0d4dbfb963 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -386,6 +386,19 @@ impl> SystemContext { self.inner.consensus.read().await.get_decided_leaf() } + /// [Non-blocking] instantly returns a copy of the last decided leaf if + /// it is available to be read. If not, we return `None`. + /// + /// # Panics + /// Panics if internal state for consensus is inconsistent + #[must_use] + pub fn try_get_decided_leaf(&self) -> Option> { + self.inner + .consensus + .try_read() + .map(|guard| guard.get_decided_leaf()) + } + /// Returns a copy of the last decided validated state. /// /// # Panics diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 563afb53cd..f0704e39c4 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -44,6 +44,7 @@ use std::{ sync::Arc, time::Duration, }; +use tracing::error; /// event for global event stream #[derive(Clone, Debug)] @@ -67,12 +68,16 @@ pub async fn add_network_message_task Messages(msgs), + Err(err) => { + error!("failed to receive broadcast messages: {err}"); + + // return zero messages so we sleep and try again + Messages(vec![]) + } + }; + if msgs.0.is_empty() { async_sleep(Duration::from_millis(100)).await; } else { @@ -87,12 +92,15 @@ pub async fn add_network_message_task Messages(msgs), + Err(err) => { + error!("failed to receive direct messages: {err}"); + + // return zero messages so we sleep and try again + Messages(vec![]) + } + }; if msgs.0.is_empty() { async_sleep(Duration::from_millis(100)).await; } else { diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 8b633139af..e837397c63 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -362,7 +362,7 @@ impl Libp2pNetwork { is_ready: Arc::new(AtomicBool::new(false)), // This is optimal for 10-30 nodes. TODO: parameterize this for both tests and examples // https://github.com/EspressoSystems/HotShot/issues/2088 - dht_timeout: Duration::from_secs(1), + dht_timeout: Duration::from_secs(2), is_bootstrapped: Arc::new(AtomicBool::new(false)), metrics, topic_map, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 4440609478..9577880031 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -113,6 +113,15 @@ impl + 'static> SystemContextHandl self.hotshot.get_decided_leaf().await } + /// Tries to get the most recent decided leaf, returning instantly + /// if we can't acquire the lock. + /// + /// # Panics + /// Panics if internal consensus is in an inconsistent state. + pub fn try_get_decided_leaf(&self) -> Option> { + self.hotshot.try_get_decided_leaf() + } + /// Submits a transaction to the backing [`SystemContext`] instance. /// /// The current node broadcasts the transaction to all nodes on the network. diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index 67a1dad987..e396ec82e1 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -70,7 +70,7 @@ impl DMBehaviour { } Event::OutboundFailure { peer, - request_id: _, + request_id, error, } => { error!( @@ -79,10 +79,10 @@ impl DMBehaviour { ); // RM TODO: make direct messages have n (and not infinite) retries // issue: https://github.com/EspressoSystems/HotShot/issues/2003 - // if let Some(mut req) = self.in_progress_rr.remove(&request_id) { - // req.backoff.start_next(false); - // self.failed_rr.push_back(req); - // } + if let Some(mut req) = self.in_progress_rr.remove(&request_id) { + req.backoff.start_next(false); + self.failed_rr.push_back(req); + } } Event::Message { message, peer, .. } => match message { Message::Request { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e054b2004b..ca5d2596d7 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -737,7 +737,7 @@ impl, A: ConsensusApi + true }, ) { - error!("publishing view error"); + error!("view publish error {e}"); self.output_event_stream .publish(Event { view_number: view, diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index d98d1ebebc..145f7d95a8 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -1,8 +1,11 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; use async_compatibility_layer::channel::UnboundedStream; use futures::FutureExt; -use hotshot::{traits::TestableNodeImplementation, SystemContext}; +use hotshot::traits::TestableNodeImplementation; use hotshot_task::{ event_stream::ChannelStream, task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, @@ -10,10 +13,14 @@ use hotshot_task::{ MergeN, }; use hotshot_types::traits::network::CommunicationChannel; +use hotshot_types::traits::state::ConsensusTime; use hotshot_types::{event::Event, traits::node_implementation::NodeType}; use snafu::Snafu; -use crate::{test_launcher::TaskGenerator, test_runner::Node}; +use crate::{ + test_launcher::TaskGenerator, + test_runner::{LateStartNode, Node}, +}; /// convience type for state and block pub type StateAndBlock = (Vec, Vec); @@ -28,9 +35,9 @@ pub struct SpinningTask> { /// handle to the nodes pub(crate) handles: Vec>, /// late start nodes - pub(crate) late_start: HashMap>, + pub(crate) late_start: HashMap>, /// time based changes - pub(crate) changes: HashMap>, + pub(crate) changes: BTreeMap>, /// most recent view seen by spinning task pub(crate) latest_view: Option, } @@ -73,6 +80,7 @@ impl SpinningTaskDescription { /// If there is no latest view /// or if the node id is over `u32::MAX` #[must_use] + #[allow(clippy::too_many_lines)] pub fn build>( self, ) -> TaskGenerator> { @@ -83,6 +91,12 @@ impl SpinningTaskDescription { async move { match event { GlobalTestEvent::ShutDown => { + // We do this here as well as in the completion task + // because that task has no knowledge of our late start handles. + for node in &state.handles { + node.handle.clone().shut_down().await; + } + (Some(HotShotTaskCompleted::ShutDown), state) } } @@ -103,46 +117,71 @@ impl SpinningTaskDescription { || view_number > state.latest_view.unwrap() { // perform operations on the nodes - if let Some(operations) = state.changes.remove(&view_number) { - for ChangeNode { idx, updown } in operations { - match updown { - UpDown::Up => { - if let Some(node) = state - .late_start - .remove(&idx.try_into().unwrap()) - { - tracing::error!( - "Node {} spinning up late", - idx - ); - let handle = node.run_tasks().await; - handle.hotshot.start_consensus().await; + + // We want to make sure we didn't miss any views (for example, there is no decide event + // if we get a timeout) + let views_with_relevant_changes: Vec<_> = state + .changes + .range(TYPES::Time::new(0)..view_number) + .map(|(k, _v)| *k) + .collect(); + + for view in views_with_relevant_changes { + if let Some(operations) = state.changes.remove(&view) { + for ChangeNode { idx, updown } in operations { + match updown { + UpDown::Up => { + if let Some(node) = state + .late_start + .remove(&idx.try_into().unwrap()) + { + tracing::error!( + "Node {} spinning up late", + idx + ); + + // create node and add to state, so we can shut them down properly later + let node = Node { + node_id: idx.try_into().unwrap(), + networks: node.networks, + handle: node.context.run_tasks().await, + }; + + // bootstrap consensus by sending the event + node.handle.hotshot.start_consensus().await; + + // add nodes to our state + state.handles.push(node); + } } - } - UpDown::Down => { - if let Some(node) = state.handles.get_mut(idx) { - tracing::error!("Node {} shutting down", idx); - node.handle.shut_down().await; + UpDown::Down => { + if let Some(node) = state.handles.get_mut(idx) { + tracing::error!( + "Node {} shutting down", + idx + ); + node.handle.shut_down().await; + } } - } - UpDown::NetworkUp => { - if let Some(handle) = state.handles.get(idx) { - tracing::error!( - "Node {} networks resuming", - idx - ); - handle.networks.0.resume(); - handle.networks.1.resume(); + UpDown::NetworkUp => { + if let Some(handle) = state.handles.get(idx) { + tracing::error!( + "Node {} networks resuming", + idx + ); + handle.networks.0.resume(); + handle.networks.1.resume(); + } } - } - UpDown::NetworkDown => { - if let Some(handle) = state.handles.get(idx) { - tracing::error!( - "Node {} networks pausing", - idx - ); - handle.networks.0.pause(); - handle.networks.1.pause(); + UpDown::NetworkDown => { + if let Some(handle) = state.handles.get(idx) { + tracing::error!( + "Node {} networks pausing", + idx + ); + handle.networks.0.pause(); + handle.networks.1.pause(); + } } } } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index ac1ab4d69e..cdfd1cc270 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -84,7 +84,7 @@ impl TestMetadata { pub fn default_stress() -> Self { let num_nodes = 100; TestMetadata { - num_bootstrap_nodes: 15, + num_bootstrap_nodes: num_nodes, total_nodes: num_nodes, start_nodes: num_nodes, overall_safety_properties: OverallSafetyPropertiesDescription { @@ -112,6 +112,9 @@ impl TestMetadata { pub fn default_multiple_rounds() -> TestMetadata { let num_nodes = 10; TestMetadata { + // TODO: remove once we have fixed the DHT timeout issue + // https://github.com/EspressoSystems/HotShot/issues/2088 + num_bootstrap_nodes: num_nodes, total_nodes: num_nodes, start_nodes: num_nodes, overall_safety_properties: OverallSafetyPropertiesDescription { @@ -139,7 +142,7 @@ impl TestMetadata { TestMetadata { total_nodes: num_nodes, start_nodes: num_nodes, - num_bootstrap_nodes: 20, + num_bootstrap_nodes: num_nodes, // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -174,8 +177,8 @@ impl Default for TestMetadata { min_transactions: 0, total_nodes: num_nodes, start_nodes: num_nodes, - num_bootstrap_nodes: 5, - da_committee_size: 5, + num_bootstrap_nodes: num_nodes, + da_committee_size: num_nodes, spinning_properties: SpinningTaskDescription { node_changes: vec![], }, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index f5b6a238c0..aadd6f1ffb 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -22,7 +22,7 @@ use hotshot_types::{ HotShotConfig, ValidatorConfig, }; use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, marker::PhantomData, }; @@ -32,14 +32,23 @@ use tracing::info; /// a node participating in a test #[derive(Clone)] pub struct Node> { - /// the unique identifier of the node + /// The node's unique identifier pub node_id: u64, - /// the networks of the node + /// The underlying networks belonging to the node pub networks: Networks, - /// the handle to the node's internals + /// The handle to the node's internals pub handle: SystemContextHandle, } +/// A yet-to-be-started node that participates in tests +#[derive(Clone)] +pub struct LateStartNode> { + /// The underlying networks belonging to the node + pub networks: Networks, + /// The context to which we will use to launch HotShot when it's time + pub context: SystemContext, +} + /// The runner of a test network /// spin up and down nodes, execute rounds pub struct TestRunner> { @@ -48,7 +57,7 @@ pub struct TestRunner> { /// nodes in the test pub(crate) nodes: Vec>, /// nodes with a late start - pub(crate) late_start: HashMap>, + pub(crate) late_start: HashMap>, /// the next node unique identifier pub(crate) next_node_id: u64, /// overarching test task @@ -123,7 +132,7 @@ where // add spinning task // map spinning to view - let mut changes: HashMap> = HashMap::new(); + let mut changes: BTreeMap> = BTreeMap::new(); for (view, mut change) in spinning_changes { changes .entry(TYPES::Time::new(view)) @@ -231,7 +240,13 @@ where ) .await; if late_start.contains(&node_id) { - self.late_start.insert(node_id, hotshot); + self.late_start.insert( + node_id, + LateStartNode { + networks, + context: hotshot, + }, + ); } else { self.nodes.push(Node { node_id, diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index 1c4f7c2850..994a37baeb 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -1,5 +1,5 @@ use crate::test_runner::Node; -use async_compatibility_layer::art::async_sleep; +use async_compatibility_layer::art::{async_sleep, async_timeout}; use futures::FutureExt; use hotshot::traits::TestableNodeImplementation; use hotshot_task::{ @@ -13,6 +13,7 @@ use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; use rand::thread_rng; use snafu::Snafu; use std::{sync::Arc, time::Duration}; +use tracing::error; use super::{test_launcher::TaskGenerator, GlobalTestEvent}; @@ -107,16 +108,28 @@ impl TxnTaskDescription { // we're assuming all nodes have the same leaf. // If they don't match, this is probably fine since // it should be caught by an assertion (and the txn will be rejected anyway) - let leaf = node.handle.get_decided_leaf().await; - let txn = I::leaf_create_random_transaction( - &leaf, - &mut thread_rng(), - 0, - ); - node.handle - .submit_transaction(txn.clone()) + + // Attempts to grab the most recently decided leaf. On failure, we don't + // send a transaction. This is to prevent deadlock. + if let Some(leaf) = node.handle.try_get_decided_leaf() { + let txn = I::leaf_create_random_transaction( + &leaf, + &mut thread_rng(), + 0, + ); + + // Time out if we can't get a lock on consensus in a reasonable time. This is to + // prevent deadlock. + if let Err(err) = async_timeout( + Duration::from_secs(1), + node.handle.submit_transaction(txn.clone()), + ) .await - .expect("Could not send transaction"); + { + error!("Failed to send test transaction: {err}"); + }; + } + (None, state) } } diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 88cde9eb4f..64bd08adfb 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -22,19 +22,13 @@ async fn test_catchup() { ..Default::default() }; let mut metadata = TestMetadata::default(); - let catchup_nodes = vec![ - ChangeNode { - idx: 18, - updown: UpDown::Up, - }, - ChangeNode { - idx: 19, - updown: UpDown::Up, - }, - ]; + let catchup_node = vec![ChangeNode { + idx: 19, + updown: UpDown::Up, + }]; metadata.timing_data = timing_data; - metadata.start_nodes = 18; + metadata.start_nodes = 19; metadata.total_nodes = 20; metadata.view_sync_properties = @@ -42,7 +36,7 @@ async fn test_catchup() { metadata.spinning_properties = SpinningTaskDescription { // Start the nodes before their leadership. - node_changes: vec![(15, catchup_nodes)], + node_changes: vec![(13, catchup_node)], }; metadata.completion_task_description = @@ -94,13 +88,13 @@ async fn test_catchup_web() { idx: 18, updown: UpDown::Up, }]; - metadata.timing_data = timing_data; metadata.start_nodes = 19; metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(25, catchup_nodes)], + // Start the nodes before their leadership. + node_changes: vec![(10, catchup_nodes)], }; metadata.completion_task_description = @@ -151,14 +145,13 @@ async fn test_catchup_one_node() { idx: 18, updown: UpDown::Up, }]; - metadata.timing_data = timing_data; metadata.start_nodes = 19; metadata.total_nodes = 20; metadata.spinning_properties = SpinningTaskDescription { - // Start the node before its leadership. - node_changes: vec![(15, catchup_nodes)], + // Start the nodes before their leadership. + node_changes: vec![(10, catchup_nodes)], }; metadata.completion_task_description = @@ -225,7 +218,7 @@ async fn test_catchup_in_view_sync() { hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(25, catchup_nodes)], + node_changes: vec![(10, catchup_nodes)], }; metadata.completion_task_description = diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index 8fa03a8179..623b714b6b 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -29,7 +29,8 @@ async fn libp2p_network() { }, ), timing_data: TimingData { - round_start_delay: 100, + next_view_timeout: 4000, + propose_max_round_time: Duration::from_millis(300), ..Default::default() }, ..TestMetadata::default_multiple_rounds() From 53b41c5c021648ae20d536702a0fd5ff8e3cef1f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Jan 2024 03:52:01 +0000 Subject: [PATCH 0736/1393] Bump pin-project from 1.1.3 to 1.1.4 Bumps [pin-project](https://github.com/taiki-e/pin-project) from 1.1.3 to 1.1.4. - [Release notes](https://github.com/taiki-e/pin-project/releases) - [Changelog](https://github.com/taiki-e/pin-project/blob/main/CHANGELOG.md) - [Commits](https://github.com/taiki-e/pin-project/compare/v1.1.3...v1.1.4) --- updated-dependencies: - dependency-name: pin-project dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- task/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task/Cargo.toml b/task/Cargo.toml index 03b65c1934..51a80a1829 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -15,7 +15,7 @@ snafu = { workspace = true } async-lock = { workspace = true } tracing = { workspace = true } atomic_enum = "0.2.0" -pin-project = "1.1.3" +pin-project = "1.1.4" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } From 7e99c6827572a25bac3540897e3b5677a51e63f2 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 26 Jan 2024 12:03:25 -0800 Subject: [PATCH 0737/1393] Fix after merge --- testing/src/block_types.rs | 4 ++-- testing/src/task_helpers.rs | 3 ++- types/src/traits/block_contents.rs | 4 ++-- types/src/traits/node_implementation.rs | 2 +- types/src/utils.rs | 2 +- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index 43b38b0cb9..9819a99da2 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -14,7 +14,7 @@ use hotshot_types::{ use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; -use crate::state_types::TestState; +use crate::state_types::TestValidatedState; /// The transaction in a [`TestBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] @@ -182,7 +182,7 @@ pub struct TestBlockHeader { impl BlockHeader for TestBlockHeader { type Payload = TestBlockPayload; - type State = TestState; + type State = TestValidatedState; fn new( payload_commitment: VidCommitment, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 4767db1f83..cbdfcc9f0c 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -238,7 +238,8 @@ async fn build_quorum_proposal_and_signature( .quorum_membership .total_nodes(), ); - let mut parent_state = ::from_header(&parent_leaf.block_header); + let mut parent_state = + ::from_header(&parent_leaf.block_header); let block_header = TestBlockHeader::new( payload_commitment, (), diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index a71d611235..93fde24996 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -5,7 +5,7 @@ use crate::{ data::{test_srs, VidCommitment, VidScheme, VidSchemeTrait}, - traits::State, + traits::ValidatedState, }; use commit::{Commitment, Committable}; use serde::{de::DeserializeOwned, Serialize}; @@ -113,7 +113,7 @@ pub trait BlockHeader: type Payload: BlockPayload; /// Validated state. - type State: State; + type State: ValidatedState; /// Build a header with the payload commitment, metadata, parent header, and parent state. fn new( diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 50190b87ef..e548e4f277 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -286,7 +286,7 @@ pub trait NodeType: /// This should be the same `Time` that `ValidatedState::Time` is using. type Time: ConsensusTime; /// The block header type that this hotshot setup is using. - type BlockHeader: BlockHeader; + type BlockHeader: BlockHeader; /// The block type that this hotshot setup is using. /// /// This should be the same block that `ValidatedState::BlockPayload` is using. diff --git a/types/src/utils.rs b/types/src/utils.rs index 97e5a2d18d..187996e360 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -32,7 +32,7 @@ pub enum ViewInner { impl ViewInner { /// Return the underlying undecide leaf view if it exists. - pub fn get_leaf(&self) -> Option<(Commitment>, &TYPES::StateType)> { + pub fn get_leaf(&self) -> Option<(Commitment>, &TYPES::ValidatedState)> { if let Self::Leaf { leaf, state } = self { Some((*leaf, state)) } else { From e02c3b47315932330aee188fd7baba061e380cea Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 26 Jan 2024 12:08:57 -0800 Subject: [PATCH 0738/1393] Remove metadata type --- testing/src/state_types.rs | 2 -- types/src/traits/state.rs | 2 -- 2 files changed, 4 deletions(-) diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 61a335fd8d..aba587fce4 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -60,8 +60,6 @@ impl State for TestState { type Time = ViewNumber; - type Metadata = (); - fn validate_and_apply_header( &self, _proposed_header: &Self::BlockHeader, diff --git a/types/src/traits/state.rs b/types/src/traits/state.rs index b4b8ed51e4..0a10b58a9f 100644 --- a/types/src/traits/state.rs +++ b/types/src/traits/state.rs @@ -45,8 +45,6 @@ pub trait State: type BlockPayload: BlockPayload; /// Time compatibility needed for reward collection type Time: ConsensusTime; - /// Application-specific data. - type Metadata: Debug + Send + Sync; /// Check if the proposed block header is valid and apply it to the state if so. /// From e38b69e83f16064029957772563027c3e9aecc25 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 26 Jan 2024 17:04:21 -0800 Subject: [PATCH 0739/1393] Add instance state to init --- hotshot/examples/infra/mod.rs | 12 ++++++++++-- hotshot/src/lib.rs | 14 +++++++++++--- testing/src/spinning_task.rs | 2 +- testing/src/state_types.rs | 7 ++----- testing/src/task_helpers.rs | 1 + testing/src/test_runner.rs | 5 ++++- types/src/traits/states.rs | 15 ++++----------- 7 files changed, 33 insertions(+), 23 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index d825de0563..984f5f7d7b 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -24,7 +24,10 @@ use hotshot_orchestrator::{ config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, }; use hotshot_task::task::FilterEvent; -use hotshot_testing::block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}; +use hotshot_testing::{ + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + state_types::TestInstanceState, +}; use hotshot_types::message::Message; use hotshot_types::traits::network::ConnectedNetwork; use hotshot_types::ValidatorConfig; @@ -297,7 +300,7 @@ async fn libp2p_network_from_config( /// Defines the behavior of a "run" of the network with a given configuration #[async_trait] pub trait RunDA< - TYPES: NodeType, + TYPES: NodeType, DACHANNEL: CommunicationChannel + Debug, QUORUMCHANNEL: CommunicationChannel + Debug, VIEWSYNCCHANNEL: CommunicationChannel + Debug, @@ -380,6 +383,7 @@ pub trait RunDA< memberships, networks_bundle, initializer, + TestInstanceState {}, ConsensusMetricsValue::default(), ) .await @@ -520,6 +524,7 @@ impl< Transaction = TestTransaction, BlockPayload = TestBlockPayload, BlockHeader = TestBlockHeader, + InstanceState = TestInstanceState, >, NODE: NodeImplementation< TYPES, @@ -627,6 +632,7 @@ impl< Transaction = TestTransaction, BlockPayload = TestBlockPayload, BlockHeader = TestBlockHeader, + InstanceState = TestInstanceState, >, NODE: NodeImplementation< TYPES, @@ -725,6 +731,7 @@ impl< Transaction = TestTransaction, BlockPayload = TestBlockPayload, BlockHeader = TestBlockHeader, + InstanceState = TestInstanceState, >, NODE: NodeImplementation< TYPES, @@ -838,6 +845,7 @@ pub async fn main_entry_point< Transaction = TestTransaction, BlockPayload = TestBlockPayload, BlockHeader = TestBlockHeader, + InstanceState = TestInstanceState, >, DACHANNEL: CommunicationChannel + Debug, QUORUMCHANNEL: CommunicationChannel + Debug, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index cadc0f714d..475dfcdef9 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -179,8 +179,11 @@ pub struct SystemContext> { } impl> SystemContext { - /// Creates a new hotshot with the given configuration options and sets it up with the given + /// Creates a new [`SystemContext`] with the given configuration options and sets it up with the given /// genesis block + /// + /// To do a full initialization, use `fn init` instead, which will set up background tasks as + /// well. #[allow(clippy::too_many_arguments)] #[instrument(skip(private_key, storage, memberships, networks, initializer, metrics))] pub async fn new( @@ -192,6 +195,7 @@ impl> SystemContext { memberships: Memberships, networks: Networks, initializer: HotShotInitializer, + instance_state: TYPES::InstanceState, metrics: ConsensusMetricsValue, ) -> Result> { debug!("Creating a new hotshot"); @@ -207,7 +211,7 @@ impl> SystemContext { // insert genesis (or latest block) to state map let mut validated_state_map = BTreeMap::default(); - let (validated_state, instance_state) = TYPES::ValidatedState::genesis(); + let validated_state = TYPES::ValidatedState::genesis(); validated_state_map.insert( anchored_leaf.get_view_number(), View { @@ -414,7 +418,7 @@ impl> SystemContext { .clone() } - /// Initializes a new hotshot and does the work of setting up all the background tasks + /// Initializes a new [`SystemContext`] and does the work of setting up all the background tasks /// /// Assumes networking implementation is already primed. /// @@ -423,6 +427,8 @@ impl> SystemContext { /// Upon encountering an unrecoverable error, such as a failure to send to a broadcast channel, /// the `HotShot` instance will log the error and shut down. /// + /// To construct a [`SystemContext`] without setting up tasks, use `fn new` instead. + /// /// # Errors /// /// Will return an error when the storage failed to insert the first `QuorumCertificate` @@ -436,6 +442,7 @@ impl> SystemContext { memberships: Memberships, networks: Networks, initializer: HotShotInitializer, + instance_state: TYPES::InstanceState, metrics: ConsensusMetricsValue, ) -> Result< ( @@ -454,6 +461,7 @@ impl> SystemContext { memberships, networks, initializer, + instance_state, metrics, ) .await?; diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 145f7d95a8..9213476fb8 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -13,7 +13,7 @@ use hotshot_task::{ MergeN, }; use hotshot_types::traits::network::CommunicationChannel; -use hotshot_types::traits::state::ConsensusTime; +use hotshot_types::traits::states::ConsensusTime; use hotshot_types::{event::Event, traits::node_implementation::NodeType}; use snafu::Snafu; diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 7d17eeefa3..101a98aa89 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -17,13 +17,10 @@ use crate::block_types::{TestBlockHeader, TestBlockPayload}; pub use crate::node_types::TestTypes; /// Instance-level state implementation for testing purposes. +#[derive(Debug)] pub struct TestInstanceState {} -impl InstanceState for TestInstanceState { - fn new() -> Self { - TestInstanceState {} - } -} +impl InstanceState for TestInstanceState {} /// Validated state implementation for testing purposes. #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index cbdfcc9f0c..fd06015c7c 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -113,6 +113,7 @@ pub async fn build_system_handle( memberships, networks_bundle, initializer, + TestInstanceState {}, ConsensusMetricsValue::default(), ) .await diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 00d9f72f8f..5d022a714b 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -6,6 +6,7 @@ use super::{ }; use crate::{ spinning_task::{ChangeNode, UpDown}, + state_types::TestInstanceState, test_launcher::{Networks, TestLauncher}, view_sync_task::ViewSyncTask, }; @@ -64,7 +65,8 @@ pub struct TestRunner> { pub(crate) task_runner: TaskRunner, } -impl> TestRunner +impl, I: TestableNodeImplementation> + TestRunner where I: TestableNodeImplementation, { @@ -315,6 +317,7 @@ where memberships, network_bundle, initializer, + TestInstanceState {}, ConsensusMetricsValue::default(), ) .await diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 57ccb0e3f4..3cf996137b 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -18,11 +18,7 @@ use std::{ use super::block_contents::{BlockHeader, TestableBlock}; /// Instance-level state, which allows us to fetch missing validated state. -pub trait InstanceState: Send + Sync { - /// Construct the state. - #[must_use] - fn new() -> Self; -} +pub trait InstanceState: Debug + Send + Sync {} /// Abstraction over the state that blocks modify /// @@ -79,13 +75,10 @@ pub trait ValidatedState: /// This can also be used to rebuild the state for catchup. fn from_header(block_header: &Self::BlockHeader) -> Self; - /// Construct a genesis validated state and the instance-level state. + /// Construct a genesis validated state. #[must_use] - fn genesis() -> (Self, Self::Instance) { - ( - Self::from_header(&Self::BlockHeader::genesis().0), - ::new(), - ) + fn genesis() -> Self { + Self::from_header(&Self::BlockHeader::genesis().0) } /// Gets called to notify the persistence backend that this state has been committed From e94529744e9c010bae46fe8ed1278b40f0ecb544 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Sat, 27 Jan 2024 22:56:05 -0500 Subject: [PATCH 0740/1393] Checky relay when accumulation view sync votes --- task-impls/src/view_sync.rs | 52 ++++++++++++++++--------------------- task-impls/src/vote.rs | 5 ++-- types/src/vote.rs | 25 +++++++++++------- 3 files changed, 41 insertions(+), 41 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index d28faf42da..072f83268c 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -41,7 +41,7 @@ use hotshot_types::{ }, }; use snafu::Snafu; -use std::{collections::HashMap, fmt::Debug, sync::Arc, time::Duration}; +use std::{collections::BTreeMap, collections::HashMap, fmt::Debug, sync::Arc, time::Duration}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; @@ -62,9 +62,9 @@ pub enum ViewSyncPhase { /// Stub of a view sync error pub struct ViewSyncTaskError {} -/// Type alias for a map from View Number to Vote Task +/// Type alias for a map from View Number to Relay to Vote Task type RelayMap = - HashMap<::Time, VoteCollectionTaskState>; + HashMap<::Time, BTreeMap>>; /// Main view sync task state pub struct ViewSyncTaskState< @@ -198,12 +198,13 @@ impl< ) { // This certificate is old, we can throw it away // If next view = cert round, then that means we should already have a task running for it - let mut task_map = self.replica_task_map.write().await; if self.current_view > view { debug!("Already in a higher view than the view sync message"); return; } + let mut task_map = self.replica_task_map.write().await; + if let Some(replica_task) = task_map.remove(&view) { // Forward event then return debug!("Forwarding message"); @@ -277,7 +278,9 @@ impl< HotShotEvent::ViewSyncPreCommitVoteRecv(ref vote) => { let mut map = self.pre_commit_relay_map.write().await; let vote_view = vote.get_view_number(); - if let Some(relay_task) = map.remove(&vote_view) { + let relay = vote.get_data().relay; + let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); + if let Some(relay_task) = relay_map.remove(&relay) { debug!("Forwarding message"); let result = relay_task.handle_event(event.clone()).await; @@ -285,17 +288,12 @@ impl< // The protocol has finished return; } - - map.insert(vote_view, result.1); + relay_map.insert(relay, result.1); return; } // We do not have a relay task already running, so start one - if self - .membership - .get_leader(vote_view + vote.get_data().relay) - != self.public_key - { + if self.membership.get_leader(vote_view + relay) != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); return; @@ -311,14 +309,16 @@ impl< }; let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; if let Some(vote_task) = vote_collector { - map.insert(vote_view, vote_task); + relay_map.insert(relay, vote_task); } } HotShotEvent::ViewSyncCommitVoteRecv(ref vote) => { let mut map = self.commit_relay_map.write().await; let vote_view = vote.get_view_number(); - if let Some(relay_task) = map.remove(&vote_view) { + let relay = vote.get_data().relay; + let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); + if let Some(relay_task) = relay_map.remove(&relay) { debug!("Forwarding message"); let result = relay_task.handle_event(event.clone()).await; @@ -327,16 +327,12 @@ impl< return; } - map.insert(vote_view, result.1); + relay_map.insert(relay, result.1); return; } // We do not have a relay task already running, so start one - if self - .membership - .get_leader(vote_view + vote.get_data().relay) - != self.public_key - { + if self.membership.get_leader(vote_view + relay) != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); return; @@ -352,14 +348,16 @@ impl< }; let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; if let Some(vote_task) = vote_collector { - map.insert(vote_view, vote_task); + relay_map.insert(relay, vote_task); } } HotShotEvent::ViewSyncFinalizeVoteRecv(ref vote) => { let mut map = self.finalize_relay_map.write().await; let vote_view = vote.get_view_number(); - if let Some(relay_task) = map.remove(&vote_view) { + let relay = vote.get_data().relay; + let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); + if let Some(relay_task) = relay_map.remove(&relay) { debug!("Forwarding message"); let result = relay_task.handle_event(event.clone()).await; @@ -368,16 +366,12 @@ impl< return; } - map.insert(vote_view, result.1); + relay_map.insert(relay, result.1); return; } // We do not have a relay task already running, so start one - if self - .membership - .get_leader(vote_view + vote.get_data().relay) - != self.public_key - { + if self.membership.get_leader(vote_view + relay) != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); return; @@ -393,7 +387,7 @@ impl< }; let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; if let Some(vote_task) = vote_collector { - map.insert(vote_view, vote_task); + relay_map.insert(relay, vote_task); } } diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index bf20a7bad9..3fcec0ce9e 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -2,7 +2,6 @@ use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; use crate::events::HotShotEvent; use async_trait::async_trait; -use bitvec::prelude::*; use either::Either::{self, Left, Right}; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, @@ -202,8 +201,8 @@ where } let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), - sig_lists: Vec::new(), - signers: bitvec![0;info. membership.total_nodes()], + sig_lists: HashMap::new(), + signers: HashMap::new(), phantom: PhantomData, }; diff --git a/types/src/vote.rs b/types/src/vote.rs index 50aeebd960..12ee3ae75b 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -6,7 +6,7 @@ use std::{ }; use bincode::Options; -use bitvec::vec::BitVec; +use bitvec::{bitvec, vec::BitVec}; use commit::Commitment; use either::Either; use ethereum_types::U256; @@ -85,9 +85,12 @@ pub struct VoteAccumulator< /// Map of all signatures accumlated so far pub vote_outcomes: VoteMap2>, /// A list of valid signatures for certificate aggregation - pub sig_lists: Vec<::PureAssembledSignatureType>, + pub sig_lists: HashMap< + Commitment, + Vec<::PureAssembledSignatureType>, + >, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check - pub signers: BitVec, + pub signers: HashMap, BitVec>, /// Phantom data to specify the types this accumulator is for pub phantom: PhantomData<(TYPES, VOTE, CERT)>, } @@ -138,13 +141,17 @@ impl, CERT: Certificate, CERT: Certificate::assemble( &real_qc_pp, - self.signers.as_bitslice(), - &self.sig_lists[..], + signers.as_bitslice(), + &sig_list[..], ); let cert = CERT::create_signed_certificate( From e99bdf49b86387f316f39a702ec8e50a43798adf Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Sun, 28 Jan 2024 09:17:59 -0500 Subject: [PATCH 0741/1393] feat: add tests --- .../src/traits/networking/libp2p_network.rs | 2 +- testing-macros/src/lib.rs | 27 +- testing-macros/tests/tests.rs | 188 +++++++++++- testing/tests/basic.rs | 285 ------------------ 4 files changed, 201 insertions(+), 301 deletions(-) delete mode 100644 testing/tests/basic.rs diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index e837397c63..107e624707 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -264,7 +264,7 @@ where { Ok(network) => network, Err(err) => { - panic!("Failed to create network: {err}"); + panic!("Failed to create network: {err:?}"); } } }) diff --git a/testing-macros/src/lib.rs b/testing-macros/src/lib.rs index 9f246b4c66..ae6821222e 100644 --- a/testing-macros/src/lib.rs +++ b/testing-macros/src/lib.rs @@ -108,20 +108,19 @@ impl TestData { quote! {} }; quote! { - - #slow_attribute - #[cfg_attr( - feature = "tokio-executor", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(feature = "async-std-executor", async_std::test)] - #[tracing::instrument] - async fn #test_name() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - // TODO this should be zero, right? I can also provide this as input - (#metadata).gen_launcher::<#ty, #imply>(0).launch().run_test().await; - } + #[cfg(test)] + #slow_attribute + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tracing::instrument] + async fn #test_name() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + (#metadata).gen_launcher::<#ty, #imply>(0).launch().run_test().await; + } } } } diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs index 70b786d12e..969e0a0896 100644 --- a/testing-macros/tests/tests.rs +++ b/testing-macros/tests/tests.rs @@ -1 +1,187 @@ -// TODO +use hotshot_testing::completion_task::{ + CompletionTaskDescription, TimeBasedCompletionTaskDescription, +}; +use hotshot_testing::node_types::{CombinedImpl, Libp2pImpl, MemoryImpl, WebImpl}; +use hotshot_testing::spinning_task::ChangeNode; +use hotshot_testing::spinning_task::SpinningTaskDescription; +use hotshot_testing::spinning_task::UpDown; +use hotshot_testing::state_types::TestTypes; +use hotshot_testing::test_builder::TestMetadata; +use hotshot_testing_macros::cross_tests; +use std::time::Duration; + +cross_tests!( + TestName: test_success, + Impls: [MemoryImpl, WebImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + TestMetadata { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + ..TestMetadata::default() + } + }, +); + +// Test one node leaving the network. +cross_tests!( + TestName: test_with_failures_one, + Impls: [MemoryImpl, Libp2pImpl, WebImpl, CombinedImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let mut metadata = TestMetadata::default_more_nodes(); + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ChangeNode { + idx: 19, + updown: UpDown::Down, + }]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)], + }; + metadata.overall_safety_properties.num_failed_views = 3; + metadata.overall_safety_properties.num_successful_views = 25; + metadata + } +); + +// Test f/2 nodes leaving the network. +cross_tests!( + TestName: test_with_failures_half_f, + Impls: [MemoryImpl, Libp2pImpl, WebImpl, CombinedImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let mut metadata = TestMetadata::default_more_nodes(); + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 17, + updown: UpDown::Down, + }, + ChangeNode { + idx: 18, + updown: UpDown::Down, + }, + ChangeNode { + idx: 19, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)], + }; + + metadata.overall_safety_properties.num_failed_views = 3; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 22; + metadata + } +); + +// Test f nodes leaving the network. +cross_tests!( + TestName: test_with_failures_f, + Impls: [MemoryImpl, Libp2pImpl, WebImpl, CombinedImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + + let mut metadata = TestMetadata::default_more_nodes(); + metadata.overall_safety_properties.num_failed_views = 6; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 22; + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 14, + updown: UpDown::Down, + }, + ChangeNode { + idx: 15, + updown: UpDown::Down, + }, + ChangeNode { + idx: 16, + updown: UpDown::Down, + }, + ChangeNode { + idx: 17, + updown: UpDown::Down, + }, + ChangeNode { + idx: 18, + updown: UpDown::Down, + }, + ChangeNode { + idx: 19, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)], + }; + + metadata + } +); + +// Test that a good leader can succeed in the view directly after view sync +cross_tests!( + TestName: test_with_failures_2, + Impls: [MemoryImpl, Libp2pImpl, WebImpl, CombinedImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let mut metadata = TestMetadata::default_more_nodes(); + metadata.total_nodes = 12; + metadata.da_committee_size = 12; + metadata.start_nodes = 12; + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 10, + updown: UpDown::Down, + }, + ChangeNode { + idx: 11, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)], + }; + + // 2 nodes fail triggering view sync, expect no other timeouts + metadata.overall_safety_properties.num_failed_views = 2; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 15; + + metadata + } +); diff --git a/testing/tests/basic.rs b/testing/tests/basic.rs deleted file mode 100644 index 0a1cdae806..0000000000 --- a/testing/tests/basic.rs +++ /dev/null @@ -1,285 +0,0 @@ -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_success() { - use hotshot_testing::{ - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{MemoryImpl, TestTypes}, - test_builder::TestMetadata, - }; - use std::time::Duration; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - ..TestMetadata::default() - }; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} - -/// Test one node leaving the network. -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_with_failures_one() { - use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::TestMetadata, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes(); - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ChangeNode { - idx: 19, - updown: UpDown::Down, - }]; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], - }; - metadata.overall_safety_properties.num_failed_views = 3; - metadata.overall_safety_properties.num_successful_views = 25; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} - -/// Test f/2 nodes leaving the network. -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_with_failures_half_f() { - use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::TestMetadata, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes(); - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ - ChangeNode { - idx: 17, - updown: UpDown::Down, - }, - ChangeNode { - idx: 18, - updown: UpDown::Down, - }, - ChangeNode { - idx: 19, - updown: UpDown::Down, - }, - ]; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], - }; - - metadata.overall_safety_properties.num_failed_views = 3; - // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 22; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} - -/// Test f nodes leaving the network. -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_with_failures_f() { - use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::TestMetadata, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes(); - metadata.overall_safety_properties.num_failed_views = 6; - // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 22; - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ - ChangeNode { - idx: 14, - updown: UpDown::Down, - }, - ChangeNode { - idx: 15, - updown: UpDown::Down, - }, - ChangeNode { - idx: 16, - updown: UpDown::Down, - }, - ChangeNode { - idx: 17, - updown: UpDown::Down, - }, - ChangeNode { - idx: 18, - updown: UpDown::Down, - }, - ChangeNode { - idx: 19, - updown: UpDown::Down, - }, - ]; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], - }; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} - -/// Test that a good leader can succeed in the view directly after view sync -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_with_failures_2() { - use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::TestMetadata, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes(); - metadata.total_nodes = 12; - metadata.da_committee_size = 12; - metadata.start_nodes = 12; - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ - ChangeNode { - idx: 10, - updown: UpDown::Down, - }, - ChangeNode { - idx: 11, - updown: UpDown::Down, - }, - ]; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], - }; - - // 2 nodes fail triggering view sync, expect no other timeouts - metadata.overall_safety_properties.num_failed_views = 2; - // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 15; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} - -/// Test that a good leader can succeed in the view directly after view sync -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_with_failures_2_web() { - use hotshot_testing::{ - node_types::{TestTypes, WebImpl}, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::TestMetadata, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata::default_more_nodes(); - metadata.total_nodes = 12; - metadata.da_committee_size = 12; - metadata.start_nodes = 12; - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ - ChangeNode { - idx: 10, - updown: UpDown::Down, - }, - ChangeNode { - idx: 11, - updown: UpDown::Down, - }, - ]; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], - }; - - // 2 nodes fail triggering view sync, expect no other timeouts - metadata.overall_safety_properties.num_failed_views = 2; - // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 15; - metadata - .gen_launcher::(0) - .launch() - .run_test() - .await; -} From e345298caf7bd84bac71eb97e1a54963dd7a2dbd Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Sun, 28 Jan 2024 22:09:08 -0500 Subject: [PATCH 0742/1393] [HOTFIX] Poll forever for latest data (#2473) * change naming from proposal to certificate * remove poll cancellation for view sync certificate * poll forever for latest data * add cancellation on shutdown * lint * change back to debug * improve cancellation * change to btreemap --- hotshot/src/tasks/mod.rs | 8 + .../traits/networking/web_server_network.rs | 223 ++++++++++-------- task-impls/src/view_sync.rs | 28 --- types/src/message.rs | 10 +- types/src/traits/network.rs | 13 +- web_server/api.toml | 12 +- web_server/src/config.rs | 12 +- web_server/src/lib.rs | 96 ++++---- 8 files changed, 201 insertions(+), 201 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index f0704e39c4..231215dd2f 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -256,10 +256,18 @@ pub async fn add_consensus_task>( quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), committee_membership: c_api.inner.memberships.da_membership.clone().into(), }; + // Poll (forever) for the latest quorum proposal consensus_state .quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForLatestQuorumProposal) .await; + + // Poll (forever) for the latest view sync certificate + consensus_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForLatestViewSyncCertificate) + .await; + let filter = FilterEvent(Arc::new(consensus_event_filter)); let consensus_name = "Consensus Task"; let consensus_event_handler = HandleEvent(Arc::new( diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 0ea946cf98..ff7d827cca 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -223,10 +223,12 @@ struct Inner { view_sync_vote_task_map: Arc>>, /// Task map for transactions txn_task_map: Arc>>, - /// Task polling for latest quorum propsal + #[allow(clippy::type_complexity)] + /// A handle on the task polling for latest quorum propsal latest_quorum_proposal_task: Arc>>>, - /// Task polling for latest view sync proposal - latest_view_sync_proposal_task: Arc>>>, + #[allow(clippy::type_complexity)] + /// A handle on the task polling for the latest view sync certificate + latest_view_sync_certificate_task: Arc>>>, } impl Inner { @@ -237,10 +239,12 @@ impl Inner { receiver: UnboundedReceiver>, message_purpose: MessagePurpose, view_number: u64, + additional_wait: Duration, ) -> Result<(), NetworkError> { let mut vote_index = 0; let mut tx_index = 0; - let mut seen_proposals = LruCache::new(NonZeroUsize::new(100).unwrap()); + let mut seen_quorum_proposals = LruCache::new(NonZeroUsize::new(100).unwrap()); + let mut seen_view_sync_certificates = LruCache::new(NonZeroUsize::new(100).unwrap()); if message_purpose == MessagePurpose::Data { tx_index = *self.tx_index.read().await; @@ -251,14 +255,14 @@ impl Inner { let endpoint = match message_purpose { MessagePurpose::Proposal => config::get_proposal_route(view_number), MessagePurpose::LatestQuorumProposal => config::get_latest_quorum_proposal_route(), - MessagePurpose::LatestViewSyncProposal => { - config::get_latest_view_sync_proposal_route() + MessagePurpose::LatestViewSyncCertificate => { + config::get_latest_view_sync_certificate_route() } MessagePurpose::Vote => config::get_vote_route(view_number, vote_index), MessagePurpose::Data => config::get_transactions_route(tx_index), MessagePurpose::Internal => unimplemented!(), - MessagePurpose::ViewSyncProposal => { - config::get_view_sync_proposal_route(view_number, vote_index) + MessagePurpose::ViewSyncCertificate => { + config::get_view_sync_certificate_route(view_number, vote_index) } MessagePurpose::ViewSyncVote => { config::get_view_sync_vote_route(view_number, vote_index) @@ -284,10 +288,10 @@ impl Inner { debug!("tx index is {}", tx_index); } Ok(None) => { - async_sleep(self.wait_between_polls).await; + async_sleep(self.wait_between_polls + additional_wait).await; } Err(_e) => { - async_sleep(self.wait_between_polls).await; + async_sleep(self.wait_between_polls + additional_wait).await; } } } else { @@ -301,10 +305,8 @@ impl Inner { } MessagePurpose::Proposal => { // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - self.broadcast_poll_queue - .write() - .await - .push(deserialized_messages[0].clone()); + let proposal = deserialized_messages[0].clone(); + self.broadcast_poll_queue.write().await.push(proposal); return Ok(()); // Wait for the view to change before polling for proposals again @@ -318,26 +320,23 @@ impl Inner { } MessagePurpose::LatestQuorumProposal => { // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - self.broadcast_poll_queue - .write() - .await - .push(deserialized_messages[0].clone()); - - return Ok(()); + let proposal = deserialized_messages[0].clone(); + let hash = hash(&proposal); + // Only allow unseen proposals to be pushed to the queue + if seen_quorum_proposals.put(hash, ()).is_none() { + self.broadcast_poll_queue.write().await.push(proposal); + } } - MessagePurpose::LatestViewSyncProposal => { + MessagePurpose::LatestViewSyncCertificate => { let mut broadcast_poll_queue = self.broadcast_poll_queue.write().await; for cert in &deserialized_messages { let hash = hash(&cert); - if seen_proposals.put(hash, ()).is_none() { + if seen_view_sync_certificates.put(hash, ()).is_none() { broadcast_poll_queue.push(cert.clone()); } } - - // additional sleep to reduce load on web server - async_sleep(Duration::from_millis(300)).await; } MessagePurpose::Vote => { // error!( @@ -399,7 +398,7 @@ impl Inner { direct_poll_queue.push(vote.clone()); } } - MessagePurpose::ViewSyncProposal => { + MessagePurpose::ViewSyncCertificate => { // error!( // "Received {} view sync certs from web server for view {} is da {}", // deserialized_messages.len(), @@ -413,7 +412,7 @@ impl Inner { for cert in &deserialized_messages { vote_index += 1; let hash = hash(cert); - if seen_proposals.put(hash, ()).is_none() { + if seen_view_sync_certificates.put(hash, ()).is_none() { broadcast_poll_queue.push(cert.clone()); } } @@ -434,11 +433,11 @@ impl Inner { } } Ok(None) => { - async_sleep(self.wait_between_polls).await; + async_sleep(self.wait_between_polls + additional_wait).await; } Err(_e) => { // error!("error is {:?}", _e); - async_sleep(self.wait_between_polls).await; + async_sleep(self.wait_between_polls + additional_wait).await; } } } @@ -452,6 +451,10 @@ impl Inner { | ConsensusIntentEvent::CancelPollForDAC(event_view) | ConsensusIntentEvent::CancelPollForViewSyncCertificate(event_view) | ConsensusIntentEvent::CancelPollForVIDDisperse(event_view) + | ConsensusIntentEvent::CancelPollForLatestProposal(event_view) + | ConsensusIntentEvent::CancelPollForLatestViewSyncCertificate( + event_view, + ) | ConsensusIntentEvent::CancelPollForViewSyncVotes(event_view) => { if view_number == event_view { debug!("Shutting down polling task for view {}", event_view); @@ -470,10 +473,6 @@ impl Inner { } } - ConsensusIntentEvent::CancelPollForLatestViewSyncProposal => { - return Ok(()); - } - _ => { unimplemented!() } @@ -622,7 +621,7 @@ impl WebServerNetwork { view_sync_vote_task_map: Arc::default(), txn_task_map: Arc::default(), latest_quorum_proposal_task: Arc::default(), - latest_view_sync_proposal_task: Arc::default(), + latest_view_sync_certificate_task: Arc::default(), }); inner.connected.store(true, Ordering::Relaxed); @@ -646,12 +645,12 @@ impl WebServerNetwork { MessagePurpose::Data => config::post_transactions_route(), MessagePurpose::Internal | MessagePurpose::LatestQuorumProposal - | MessagePurpose::LatestViewSyncProposal => { + | MessagePurpose::LatestViewSyncCertificate => { return Err(WebServerNetworkError::EndpointError) } - MessagePurpose::ViewSyncProposal => { - // error!("Posting view sync proposal route is: {}", config::post_view_sync_proposal_route(*view_number)); - config::post_view_sync_proposal_route(*view_number) + MessagePurpose::ViewSyncCertificate => { + // error!("Posting view sync proposal route is: {}", config::post_view_sync_certificate_route(*view_number)); + config::post_view_sync_certificate_route(*view_number) } MessagePurpose::ViewSyncVote => config::post_view_sync_vote_route(*view_number), MessagePurpose::DAC => config::post_da_certificate_route(*view_number), @@ -791,6 +790,19 @@ impl ConnectedNetwork, TYPES::Signatur Self: 'b, { let closure = async move { + // Cancel poll for latest quorum proposal on shutdown + if let Some(ref sender) = *self.inner.latest_quorum_proposal_task.read().await { + let _ = sender + .send(ConsensusIntentEvent::CancelPollForLatestProposal(1)) + .await; + }; + + // Cancel poll for latest view sync certificate on shutdown + if let Some(ref sender) = *self.inner.latest_view_sync_certificate_task.read().await { + let _ = sender + .send(ConsensusIntentEvent::CancelPollForLatestViewSyncCertificate(1)) + .await; + }; self.inner.running.store(false, Ordering::Relaxed); }; boxed_sync(closure) @@ -909,7 +921,12 @@ impl ConnectedNetwork, TYPES::Signatur let inner_clone = self.inner.clone(); async move { if let Err(e) = inner_clone - .poll_web_server(receiver, MessagePurpose::Proposal, view_number) + .poll_web_server( + receiver, + MessagePurpose::Proposal, + view_number, + Duration::ZERO, + ) .await { warn!( @@ -942,7 +959,12 @@ impl ConnectedNetwork, TYPES::Signatur let inner_clone = self.inner.clone(); async move { if let Err(e) = inner_clone - .poll_web_server(receiver, MessagePurpose::VidDisperse, view_number) + .poll_web_server( + receiver, + MessagePurpose::VidDisperse, + view_number, + Duration::ZERO, + ) .await { warn!( @@ -962,58 +984,59 @@ impl ConnectedNetwork, TYPES::Signatur .await; } ConsensusIntentEvent::PollForLatestQuorumProposal => { - let mut proposal_task = self.inner.latest_quorum_proposal_task.write().await; - if proposal_task.is_none() { - // create new task - let (sender, receiver) = unbounded(); - *proposal_task = Some(sender); + // Only start this task if we haven't already started it. + let mut cancel_handle = self.inner.latest_quorum_proposal_task.write().await; + if cancel_handle.is_none() { + let inner = self.inner.clone(); - async_spawn({ - let inner_clone = self.inner.clone(); - async move { - if let Err(e) = inner_clone - .poll_web_server(receiver, MessagePurpose::LatestQuorumProposal, 1) - .await - { - warn!( - "Background receive proposal polling encountered an error: {:?}", + // Create sender and receiver for cancelling the task + let (sender, receiver) = unbounded(); + *cancel_handle = Some(sender); + + // Create the new task + async_spawn(async move { + if let Err(e) = inner + .poll_web_server( + receiver, + MessagePurpose::LatestQuorumProposal, + 1, + Duration::from_millis(500), + ) + .await + { + warn!( + "Background receive latest quorum proposal polling encountered an error: {:?}", e ); - } - let mut proposal_task = - inner_clone.latest_quorum_proposal_task.write().await; - *proposal_task = None; } }); } } - ConsensusIntentEvent::PollForLatestViewSyncProposal => { - let mut latest_view_sync_proposal_task = - self.inner.latest_view_sync_proposal_task.write().await; - if latest_view_sync_proposal_task.is_none() { - // create new task - let (sender, receiver) = unbounded(); - *latest_view_sync_proposal_task = Some(sender); + ConsensusIntentEvent::PollForLatestViewSyncCertificate => { + // Only start this task if we haven't already started it. + let mut cancel_handle = self.inner.latest_view_sync_certificate_task.write().await; + if cancel_handle.is_none() { + let inner = self.inner.clone(); - async_spawn({ - let inner_clone = self.inner.clone(); - async move { - if let Err(e) = inner_clone - .poll_web_server( - receiver, - MessagePurpose::LatestViewSyncProposal, - 1, - ) - .await - { - warn!( - "Background receive proposal polling encountered an error: {:?}", + // Create sender and receiver for cancelling the task + let (sender, receiver) = unbounded(); + *cancel_handle = Some(sender); + + // Create the new task + async_spawn(async move { + if let Err(e) = inner + .poll_web_server( + receiver, + MessagePurpose::LatestViewSyncCertificate, + 1, + Duration::from_millis(500), + ) + .await + { + warn!( + "Background receive latest view sync certificate polling encountered an error: {:?}", e ); - } - let mut latest_view_sync_proposal_task = - inner_clone.latest_view_sync_proposal_task.write().await; - *latest_view_sync_proposal_task = None; } }); } @@ -1028,7 +1051,12 @@ impl ConnectedNetwork, TYPES::Signatur let inner_clone = self.inner.clone(); async move { if let Err(e) = inner_clone - .poll_web_server(receiver, MessagePurpose::Vote, view_number) + .poll_web_server( + receiver, + MessagePurpose::Vote, + view_number, + Duration::ZERO, + ) .await { warn!( @@ -1058,7 +1086,12 @@ impl ConnectedNetwork, TYPES::Signatur let inner_clone = self.inner.clone(); async move { if let Err(e) = inner_clone - .poll_web_server(receiver, MessagePurpose::DAC, view_number) + .poll_web_server( + receiver, + MessagePurpose::DAC, + view_number, + Duration::ZERO, + ) .await { warn!( @@ -1091,17 +1124,6 @@ impl ConnectedNetwork, TYPES::Signatur } } - ConsensusIntentEvent::CancelPollForLatestViewSyncProposal => { - let mut latest_view_sync_proposal_task = - self.inner.latest_view_sync_proposal_task.write().await; - - if let Some(thing) = latest_view_sync_proposal_task.take() { - let _res = thing - .send(ConsensusIntentEvent::CancelPollForLatestViewSyncProposal) - .await; - } - } - ConsensusIntentEvent::PollForViewSyncCertificate(view_number) => { let mut task_map = self.inner.view_sync_cert_task_map.write().await; if let Entry::Vacant(e) = task_map.entry(view_number) { @@ -1114,8 +1136,9 @@ impl ConnectedNetwork, TYPES::Signatur if let Err(e) = inner_clone .poll_web_server( receiver, - MessagePurpose::ViewSyncProposal, + MessagePurpose::ViewSyncCertificate, view_number, + Duration::ZERO, ) .await { @@ -1152,6 +1175,7 @@ impl ConnectedNetwork, TYPES::Signatur receiver, MessagePurpose::ViewSyncVote, view_number, + Duration::ZERO, ) .await { @@ -1213,7 +1237,12 @@ impl ConnectedNetwork, TYPES::Signatur let inner_clone = self.inner.clone(); async move { if let Err(e) = inner_clone - .poll_web_server(receiver, MessagePurpose::Data, view_number) + .poll_web_server( + receiver, + MessagePurpose::Data, + view_number, + Duration::ZERO, + ) .await { warn!( diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 1c9e607fb0..687d7dba44 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -412,12 +412,6 @@ impl< // Garbage collect old tasks // We could put this into a separate async task, but that would require making several fields on ViewSyncTaskState thread-safe and harm readability. In the common case this will have zero tasks to clean up. // cancel poll for votes - self.network - .inject_consensus_info( - ConsensusIntentEvent::CancelPollForLatestViewSyncProposal, - ) - .await; - // run GC for i in *self.last_garbage_collected_view..*self.current_view { self.replica_task_map @@ -477,11 +471,6 @@ impl< )) .await; - // Poll for future view sync certificates - self.network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestViewSyncProposal) - .await; - // Spawn replica task let next_view = *view_number + 1; // Subscribe to the view after we are leader since we know we won't propose in the next view if we are leader. @@ -498,12 +487,6 @@ impl< subscribe_view, )) .await; - // Also subscribe to the latest view for the same reason. The GC will remove the above poll - // in the case that one doesn't resolve but this one does. - self.network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestQuorumProposal) - .await; - self.network .inject_consensus_info(ConsensusIntentEvent::PollForDAC(subscribe_view)) .await; @@ -744,13 +727,6 @@ impl, A: ConsensusApi + )) .await; - // Cancel poll for future view sync certificates - self.network - .inject_consensus_info( - ConsensusIntentEvent::CancelPollForLatestViewSyncProposal, - ) - .await; - if certificate.get_data().relay > self.relay { self.relay = certificate.get_data().relay; } @@ -815,10 +791,6 @@ impl, A: ConsensusApi + if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; } - // Keep trying to get a more recent proposal to catch up to - self.network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestQuorumProposal) - .await; self.relay += 1; match last_seen_certificate { ViewSyncPhase::None | ViewSyncPhase::PreCommit | ViewSyncPhase::Commit => { diff --git a/types/src/message.rs b/types/src/message.rs index 337982beac..f84718aa6c 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -67,14 +67,14 @@ pub enum MessagePurpose { Proposal, /// Message with most recent quorum proposal the server has LatestQuorumProposal, - /// Message with most recent view sync proposal the server has - LatestViewSyncProposal, + /// Message with most recent view sync certificate the server has + LatestViewSyncCertificate, /// Message with a quorum vote. Vote, /// Message with a view sync vote. ViewSyncVote, - /// Message with a view sync proposal. - ViewSyncProposal, + /// Message with a view sync certificate. + ViewSyncCertificate, /// Message with a DAC. DAC, /// Message for internal use @@ -427,7 +427,7 @@ impl SequencingMessage { GeneralConsensusMessage::ViewSyncPreCommitCertificate(_) | GeneralConsensusMessage::ViewSyncCommitCertificate(_) | GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => { - MessagePurpose::ViewSyncProposal + MessagePurpose::ViewSyncCertificate } GeneralConsensusMessage::UpgradeCertificate(_) diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 3ef3edc00f..7a2748098b 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -150,7 +150,7 @@ pub enum ConsensusIntentEvent { /// Poll for the most recent quorum proposal the webserver has PollForLatestQuorumProposal, /// Poll for the most recent view sync proposal the webserver has - PollForLatestViewSyncProposal, + PollForLatestViewSyncCertificate, /// Poll for a DAC for a particular view PollForDAC(u64), /// Poll for view sync votes starting at a particular view @@ -167,6 +167,10 @@ pub enum ConsensusIntentEvent { CancelPollForViewSyncVotes(u64), /// Cancel polling for proposals. CancelPollForProposal(u64), + /// Cancel polling for the latest proposal. + CancelPollForLatestProposal(u64), + /// Cancel polling for the latest view sync certificate + CancelPollForLatestViewSyncCertificate(u64), /// Cancal polling for DAC. CancelPollForDAC(u64), /// Cancel polling for view sync certificate. @@ -175,8 +179,6 @@ pub enum ConsensusIntentEvent { CancelPollForVIDDisperse(u64), /// Cancel polling for transactions CancelPollForTransactions(u64), - /// Cancel polling for most recent view sync proposal - CancelPollForLatestViewSyncProposal, } impl ConsensusIntentEvent { @@ -191,6 +193,8 @@ impl ConsensusIntentEvent { | ConsensusIntentEvent::CancelPollForViewSyncVotes(view_number) | ConsensusIntentEvent::CancelPollForVotes(view_number) | ConsensusIntentEvent::CancelPollForProposal(view_number) + | ConsensusIntentEvent::CancelPollForLatestProposal(view_number) + | ConsensusIntentEvent::CancelPollForLatestViewSyncCertificate(view_number) | ConsensusIntentEvent::PollForVIDDisperse(view_number) | ConsensusIntentEvent::CancelPollForVIDDisperse(view_number) | ConsensusIntentEvent::CancelPollForDAC(view_number) @@ -200,8 +204,7 @@ impl ConsensusIntentEvent { | ConsensusIntentEvent::CancelPollForTransactions(view_number) | ConsensusIntentEvent::PollFutureLeader(view_number, _) => *view_number, ConsensusIntentEvent::PollForLatestQuorumProposal - | ConsensusIntentEvent::PollForLatestViewSyncProposal - | ConsensusIntentEvent::CancelPollForLatestViewSyncProposal => 1, + | ConsensusIntentEvent::PollForLatestViewSyncCertificate => 1, } } } diff --git a/web_server/api.toml b/web_server/api.toml index 224f786d7d..9aa9e4f3b2 100644 --- a/web_server/api.toml +++ b/web_server/api.toml @@ -27,8 +27,8 @@ Return the proposal for the most recent view the server has """ # GET the latest quorum proposal -[route.get_latest_view_sync_proposal] -PATH = ["view_sync_proposal/latest"] +[route.get_latest_view_sync_certificate] +PATH = ["view_sync_certificate/latest"] DOC = """ Return the proposal for the most recent view the server has """ @@ -155,8 +155,8 @@ GET a view sync vote """ # POST a view sync proposal, where the view number is passed as an argument -[route.postviewsyncproposal] -PATH = ["view_sync_proposal/:view_number"] +[route.postviewsynccertificate] +PATH = ["view_sync_certificate/:view_number"] ":view_number" = "Integer" METHOD = "POST" DOC = """ @@ -164,8 +164,8 @@ Send a view sync vote """ # GET a view sync certificate, where the view number is passed as an argument -[route.getviewsyncproposal] -PATH = ["view_sync_proposal/:view_number/:index"] +[route.getviewsynccertificate] +PATH = ["view_sync_certificate/:view_number/:index"] ":view_number" = "Integer" ":index" = "Integer" METHOD = "GET" diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 85bee4ae32..af2ba638f5 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -32,8 +32,8 @@ pub fn get_latest_quorum_proposal_route() -> String { /// get latest view sync proposal #[must_use] -pub fn get_latest_view_sync_proposal_route() -> String { - "api/view_sync_proposal/latest".to_string() +pub fn get_latest_view_sync_certificate_route() -> String { + "api/view_sync_certificate/latest".to_string() } /// get latest certificate @@ -128,14 +128,14 @@ pub fn post_staketable_route() -> String { /// post view sync proposal #[must_use] -pub fn post_view_sync_proposal_route(view_number: u64) -> String { - format!("api/view_sync_proposal/{view_number}") +pub fn post_view_sync_certificate_route(view_number: u64) -> String { + format!("api/view_sync_certificate/{view_number}") } /// get view sync proposal #[must_use] -pub fn get_view_sync_proposal_route(view_number: u64, index: u64) -> String { - format!("api/view_sync_proposal/{view_number}/{index}") +pub fn get_view_sync_certificate_route(view_number: u64, index: u64) -> String { + format!("api/view_sync_certificate/{view_number}/{index}") } /// post view sync vote diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 589bf8c6a9..049ebc5d3f 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -11,7 +11,11 @@ use futures::FutureExt; use hotshot_types::traits::signature_key::SignatureKey; use rand::{distributions::Alphanumeric, rngs::StdRng, thread_rng, Rng, SeedableRng}; -use std::{collections::HashMap, io, path::PathBuf}; +use std::{ + collections::{BTreeMap, HashMap}, + io, + path::PathBuf, +}; use tide_disco::{ api::ApiError, error::ServerError, @@ -31,23 +35,19 @@ type Error = ServerError; // TODO should the view numbers be generic over time? struct WebServerState { /// view number -> (secret, proposal) - proposals: HashMap)>, + proposals: BTreeMap)>, /// for view sync: view number -> (relay, certificate) - view_sync_proposals: HashMap)>>, + view_sync_certificates: BTreeMap)>>, /// view number -> relay - view_sync_proposal_index: HashMap, + view_sync_certificate_index: HashMap, /// view number -> (secret, da_certificates) da_certificates: HashMap)>, - /// view for oldest proposals in memory - oldest_proposal: u64, /// view for the most recent proposal to help nodes catchup latest_quorum_proposal: u64, /// view for the most recent view sync proposal - latest_view_sync_proposal: u64, + latest_view_sync_certificate: u64, /// view for the oldest DA certificate oldest_certificate: u64, - /// view for the oldest view sync certificate - oldest_view_sync_proposal: u64, /// view number -> Vec(index, vote) votes: HashMap)>>, /// view sync: view number -> Vec(relay, vote) @@ -96,14 +96,13 @@ impl WebServerState { /// Create new web server state fn new() -> Self { Self { - proposals: HashMap::new(), + proposals: BTreeMap::new(), da_certificates: HashMap::new(), votes: HashMap::new(), num_txns: 0, oldest_vote: 0, - oldest_proposal: 0, latest_quorum_proposal: 0, - latest_view_sync_proposal: 0, + latest_view_sync_certificate: 0, oldest_certificate: 0, shutdown: None, stake_table: Vec::new(), @@ -111,7 +110,7 @@ impl WebServerState { transactions: HashMap::new(), txn_lookup: HashMap::new(), _prng: StdRng::from_entropy(), - view_sync_proposals: HashMap::new(), + view_sync_certificates: BTreeMap::new(), view_sync_votes: HashMap::new(), view_sync_vote_index: HashMap::new(), @@ -128,8 +127,7 @@ impl WebServerState { vid_vote_index: HashMap::new(), oldest_view_sync_vote: 0, - oldest_view_sync_proposal: 0, - view_sync_proposal_index: HashMap::new(), + view_sync_certificate_index: HashMap::new(), } } /// Provide a shutdown signal to the server @@ -159,11 +157,11 @@ pub trait WebServerDataSource { /// Get latest view sync proposal /// # Errors /// Error if unable to serve. - fn get_latest_view_sync_proposal(&self) -> Result>>, Error>; + fn get_latest_view_sync_certificate(&self) -> Result>>, Error>; /// Get view sync proposal /// # Errors /// Error if unable to serve. - fn get_view_sync_proposal( + fn get_view_sync_certificate( &self, view_number: u64, index: u64, @@ -204,11 +202,14 @@ pub trait WebServerDataSource { /// # Errors /// Error if unable to serve. fn post_proposal(&mut self, view_number: u64, proposal: Vec) -> Result<(), Error>; - /// Post view sync proposal + /// Post view sync certificate /// # Errors /// Error if unable to serve. - fn post_view_sync_proposal(&mut self, view_number: u64, proposal: Vec) - -> Result<(), Error>; + fn post_view_sync_certificate( + &mut self, + view_number: u64, + certificate: Vec, + ) -> Result<(), Error>; /// Post data avaiability certificate /// # Errors @@ -309,19 +310,19 @@ impl WebServerDataSource for WebServerState { self.get_proposal(self.latest_quorum_proposal) } - fn get_latest_view_sync_proposal(&self) -> Result>>, Error> { - self.get_view_sync_proposal(self.latest_view_sync_proposal, 0) + fn get_latest_view_sync_certificate(&self) -> Result>>, Error> { + self.get_view_sync_certificate(self.latest_view_sync_certificate, 0) } - fn get_view_sync_proposal( + fn get_view_sync_certificate( &self, view_number: u64, index: u64, ) -> Result>>, Error> { - let proposals = self.view_sync_proposals.get(&view_number); + let proposals = self.view_sync_certificates.get(&view_number); let mut ret_proposals = vec![]; if let Some(cert) = proposals { - for i in index..*self.view_sync_proposal_index.get(&view_number).unwrap() { + for i in index..*self.view_sync_certificate_index.get(&view_number).unwrap() { ret_proposals.push(cert[usize::try_from(i).unwrap()].1.clone()); } } @@ -561,10 +562,7 @@ impl WebServerDataSource for WebServerState { // Only keep proposal history for MAX_VIEWS number of view if self.proposals.len() >= MAX_VIEWS { - self.proposals.remove(&self.oldest_proposal); - while !self.proposals.contains_key(&self.oldest_proposal) { - self.oldest_proposal += 1; - } + self.proposals.pop_first(); } self.proposals .entry(view_number) @@ -593,35 +591,28 @@ impl WebServerDataSource for WebServerState { Ok(()) } - fn post_view_sync_proposal( + fn post_view_sync_certificate( &mut self, view_number: u64, proposal: Vec, ) -> Result<(), Error> { - if view_number > self.latest_view_sync_proposal { - self.latest_view_sync_proposal = view_number; + if view_number > self.latest_view_sync_certificate { + self.latest_view_sync_certificate = view_number; } // Only keep proposal history for MAX_VIEWS number of view - if self.view_sync_proposals.len() >= MAX_VIEWS { - self.view_sync_proposals - .remove(&self.oldest_view_sync_proposal); - while !self - .view_sync_proposals - .contains_key(&self.oldest_view_sync_proposal) - { - self.oldest_view_sync_proposal += 1; - } + if self.view_sync_certificates.len() >= MAX_VIEWS { + self.view_sync_certificates.pop_first(); } let next_index = self - .view_sync_proposal_index + .view_sync_certificate_index .entry(view_number) .or_insert(0); - self.view_sync_proposals + self.view_sync_certificates .entry(view_number) .and_modify(|current_props| current_props.push((*next_index, proposal.clone()))) .or_insert_with(|| vec![(*next_index, proposal)]); - self.view_sync_proposal_index + self.view_sync_certificate_index .entry(view_number) .and_modify(|index| *index += 1); Ok(()) @@ -730,10 +721,7 @@ impl WebServerDataSource for WebServerState { // Only keep proposal history for MAX_VIEWS number of views if self.proposals.len() >= MAX_VIEWS { - self.proposals.remove(&self.oldest_proposal); - while !self.proposals.contains_key(&self.oldest_proposal) { - self.oldest_proposal += 1; - } + self.proposals.pop_first(); } self.proposals .entry(view_number) @@ -796,14 +784,14 @@ where .get("get_latest_quorum_proposal", |_req, state| { async move { state.get_latest_quorum_proposal() }.boxed() })? - .get("get_latest_view_sync_proposal", |_req, state| { - async move { state.get_latest_view_sync_proposal() }.boxed() + .get("get_latest_view_sync_certificate", |_req, state| { + async move { state.get_latest_view_sync_certificate() }.boxed() })? - .get("getviewsyncproposal", |req, state| { + .get("getviewsynccertificate", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; let index: u64 = req.integer_param("index")?; - state.get_view_sync_proposal(view_number, index) + state.get_view_sync_certificate(view_number, index) } .boxed() })? @@ -871,11 +859,11 @@ where } .boxed() })? - .post("postviewsyncproposal", |req, state| { + .post("postviewsynccertificate", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; let proposal = req.body_bytes(); - state.post_view_sync_proposal(view_number, proposal) + state.post_view_sync_certificate(view_number, proposal) } .boxed() })? From 38bab44ac3601a9e3d750b675dc7bcc87e35807e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Jan 2024 03:23:25 +0000 Subject: [PATCH 0743/1393] Bump serde_json from 1.0.111 to 1.0.112 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.111 to 1.0.112. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.111...v1.0.112) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- libp2p-networking/Cargo.toml | 2 +- types/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 6e6eb49461..29058167a0 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -27,7 +27,7 @@ libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } rand = { workspace = true } serde = { workspace = true } -serde_json = "1.0.111" +serde_json = "1.0.112" snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", diff --git a/types/Cargo.toml b/types/Cargo.toml index 7f7ff2a97a..3d43d017b3 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -48,7 +48,7 @@ tracing = { workspace = true } typenum = { workspace = true } [dev-dependencies] -serde_json = "1.0.111" +serde_json = "1.0.112" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } From 67574a3e0189e8196241ad9acaf78f537aadc51b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Jan 2024 03:24:07 +0000 Subject: [PATCH 0744/1393] Bump lru from 0.12.1 to 0.12.2 Bumps [lru](https://github.com/jeromefroe/lru-rs) from 0.12.1 to 0.12.2. - [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/jeromefroe/lru-rs/compare/0.12.1...0.12.2) --- updated-dependencies: - dependency-name: lru dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- hotshot/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 8ed04362ed..d80337bd88 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -107,7 +107,7 @@ surf-disco = { workspace = true } time = { workspace = true } derive_more = "0.99.17" portpicker = "0.1.1" -lru = "0.12.1" +lru = "0.12.2" tracing = { workspace = true } From 48839c36e006dd0b126a610f2225fbf858248c89 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Sun, 28 Jan 2024 22:24:14 -0500 Subject: [PATCH 0745/1393] combine the map in vote accumulator --- task-impls/src/vote.rs | 1 - types/src/vote.rs | 22 ++++++++++++---------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 3fcec0ce9e..93986834e8 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -201,7 +201,6 @@ where } let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), - sig_lists: HashMap::new(), signers: HashMap::new(), phantom: PhantomData, }; diff --git a/types/src/vote.rs b/types/src/vote.rs index dec0b49c7b..808f127562 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -73,7 +73,14 @@ pub trait Certificate: HasViewNumber { /// Get the vote commitment which the votes commit to fn get_data_commitment(&self) -> Commitment; } - +/// Mapping of vote commitment to sigatures and bitvec +type SignersMap = HashMap< + COMMITMENT, + ( + BitVec, + Vec<::PureAssembledSignatureType>, + ), +>; /// Accumulates votes until a certificate is formed. This implementation works for all simple vote and certificate pairs pub struct VoteAccumulator< TYPES: NodeType, @@ -86,13 +93,9 @@ pub struct VoteAccumulator< TYPES::SignatureKey, ::PureAssembledSignatureType, >, - /// A list of valid signatures for certificate aggregation - pub sig_lists: HashMap< - Commitment, - Vec<::PureAssembledSignatureType>, - >, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check - pub signers: HashMap, BitVec>, + /// And a list of valid signatures for certificate aggregation + pub signers: SignersMap, TYPES::SignatureKey>, /// Phantom data to specify the types this accumulator is for pub phantom: PhantomData<(TYPES, VOTE, CERT)>, } @@ -135,11 +138,10 @@ impl, CERT: Certificate Date: Mon, 29 Jan 2024 13:37:17 -0500 Subject: [PATCH 0746/1393] fix poll sleep, remove deduplication for single cert (#2481) --- hotshot/src/traits/networking/web_server_network.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index ff7d827cca..8733595c9f 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -411,10 +411,7 @@ impl Inner { // TODO ED Need to add vote indexing to web server for view sync certs for cert in &deserialized_messages { vote_index += 1; - let hash = hash(cert); - if seen_view_sync_certificates.put(hash, ()).is_none() { - broadcast_poll_queue.push(cert.clone()); - } + broadcast_poll_queue.push(cert.clone()); } } @@ -433,13 +430,14 @@ impl Inner { } } Ok(None) => { - async_sleep(self.wait_between_polls + additional_wait).await; + async_sleep(self.wait_between_polls).await; } Err(_e) => { // error!("error is {:?}", _e); - async_sleep(self.wait_between_polls + additional_wait).await; + async_sleep(self.wait_between_polls).await; } } + async_sleep(additional_wait).await; } let maybe_event = receiver.try_recv(); match maybe_event { From f68c372dc49e006b84b68a8fc972255bce0748c2 Mon Sep 17 00:00:00 2001 From: chad Date: Sat, 27 Jan 2024 21:25:48 -0500 Subject: [PATCH 0747/1393] refactor: remove unused rejected field from Leaf struct (#2466) --- hotshot/src/traits/storage/memory_storage.rs | 1 - task-impls/src/consensus.rs | 5 ----- testing/src/task_helpers.rs | 2 -- testing/tests/consensus_task.rs | 1 - types/src/data.rs | 12 ------------ types/src/traits/storage.rs | 8 +------- 6 files changed, 1 insertion(+), 28 deletions(-) diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index fe672fd558..ffbcf235de 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -147,7 +147,6 @@ mod test { header, Some(payload), dummy_leaf_commit, - Vec::new(), <::SignatureKey as SignatureKey>::genesis_proposer_pk(), ) } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ca5d2596d7..afc3a5beff 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -219,7 +219,6 @@ impl, A: ConsensusApi + parent_commitment, block_header: proposal.block_header.clone(), block_payload: None, - rejected: Vec::new(), proposer_id: self.quorum_membership.get_leader(view), }; let Ok(vote) = QuorumVote::::create_signed_vote( @@ -305,7 +304,6 @@ impl, A: ConsensusApi + parent_commitment, block_header: proposal.block_header.clone(), block_payload: None, - rejected: Vec::new(), proposer_id: self.quorum_membership.get_leader(view), }; @@ -538,7 +536,6 @@ impl, A: ConsensusApi + parent_commitment: justify_qc.get_data().leaf_commit, block_header: proposal.data.block_header.clone(), block_payload: None, - rejected: Vec::new(), proposer_id: sender, }; let state = @@ -609,7 +606,6 @@ impl, A: ConsensusApi + parent_commitment, block_header: proposal.data.block_header.clone(), block_payload: None, - rejected: Vec::new(), proposer_id: sender.clone(), }; let leaf_commitment = leaf.commit(); @@ -1230,7 +1226,6 @@ impl, A: ConsensusApi + parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), block_payload: None, - rejected: vec![], proposer_id: self.api.public_key().clone(), }; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 0fd17171cc..3a8bae5ec7 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -247,7 +247,6 @@ async fn build_quorum_proposal_and_signature( parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), block_payload: None, - rejected: vec![], proposer_id: *api.public_key(), }; let mut parent_state = ::from_header(&parent_leaf.block_header); @@ -304,7 +303,6 @@ async fn build_quorum_proposal_and_signature( parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), block_payload: None, - rejected: vec![], proposer_id: quorum_membership.get_leader(ViewNumber::new(cur_view)), }; let signature_new_view = diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index c54a4f865f..29468a1284 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -61,7 +61,6 @@ async fn build_vote( parent_commitment, block_header: proposal.block_header, block_payload: None, - rejected: Vec::new(), proposer_id: membership.get_leader(view), }; let vote = QuorumVote::::create_signed_vote( diff --git a/types/src/data.rs b/types/src/data.rs index 37bdd1b9d6..9274ad8eb1 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -326,9 +326,6 @@ pub struct Leaf { /// It may be empty for nodes not in the DA committee. pub block_payload: Option, - /// Transactions that were marked for rejection while collecting the block. - pub rejected: Vec<::Transaction>, - /// the proposer id of the leaf pub proposer_id: TYPES::SignatureKey, } @@ -339,7 +336,6 @@ impl PartialEq for Leaf { && self.justify_qc == other.justify_qc && self.parent_commitment == other.parent_commitment && self.block_header == other.block_header - && self.rejected == other.rejected } } @@ -349,7 +345,6 @@ impl Hash for Leaf { self.justify_qc.hash(state); self.parent_commitment.hash(state); self.block_header.hash(state); - self.rejected.hash(state); } } @@ -376,7 +371,6 @@ impl Leaf { parent_commitment: fake_commitment(), block_header: block_header.clone(), block_payload: Some(block_payload), - rejected: Vec::new(), proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), } } @@ -444,10 +438,6 @@ impl Leaf { self.get_block_header().payload_commitment() } - /// Transactions rejected or invalidated by the application of this leaf. - pub fn get_rejected(&self) -> Vec<::Transaction> { - self.rejected.clone() - } /// Identity of the network participant who proposed this leaf. pub fn get_proposer_id(&self) -> TYPES::SignatureKey { self.proposer_id.clone() @@ -460,7 +450,6 @@ impl Leaf { parent_commitment: stored_view.parent, block_header: stored_view.block_header, block_payload: stored_view.block_payload, - rejected: stored_view.rejected, proposer_id: stored_view.proposer_id, } } @@ -560,7 +549,6 @@ where justify_qc: leaf.get_justify_qc(), block_header: leaf.get_block_header().clone(), block_payload: leaf.get_block_payload(), - rejected: leaf.get_rejected(), proposer_id: leaf.get_proposer_id(), } } diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 4c6e9fe27f..a91c326442 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -1,9 +1,7 @@ //! Abstraction over on-disk storage of node state use super::node_implementation::NodeType; -use crate::{ - data::Leaf, simple_certificate::QuorumCertificate, traits::BlockPayload, vote::HasViewNumber, -}; +use crate::{data::Leaf, simple_certificate::QuorumCertificate, vote::HasViewNumber}; use async_trait::async_trait; use commit::Commitment; use derivative::Derivative; @@ -130,8 +128,6 @@ pub struct StoredView { /// /// It may be empty for nodes not in the DA committee. pub block_payload: Option, - /// transactions rejected in this view - pub rejected: Vec, /// the proposer id #[derivative(PartialEq = "ignore")] pub proposer_id: TYPES::SignatureKey, @@ -150,7 +146,6 @@ where block_header: TYPES::BlockHeader, block_payload: Option, parent_commitment: Commitment>, - rejected: Vec<::Transaction>, proposer_id: TYPES::SignatureKey, ) -> Self { Self { @@ -159,7 +154,6 @@ where justify_qc: qc, block_header, block_payload, - rejected, proposer_id, } } From 0436e8a8a1db6009cd91983efb64cc94b7579bd7 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 29 Jan 2024 17:40:36 -0800 Subject: [PATCH 0748/1393] Restore unrelated renamings --- hotshot/src/tasks/mod.rs | 6 +++--- task-impls/src/view_sync.rs | 4 ++-- task-impls/src/vote.rs | 2 +- types/src/traits/states.rs | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 4c418d40c4..6e4946bde3 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -23,7 +23,7 @@ use hotshot_task_impls::{ }, transactions::{TransactionTaskState, TransactionsTaskTypes}, vid::{VIDTaskState, VIDTaskTypes}, - view_sync::{ViewSyncTaskState, ViewSyncTaskValidatedStates}, + view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; use hotshot_types::traits::election::Membership; use hotshot_types::{ @@ -525,7 +525,7 @@ pub async fn add_view_sync_task>( )); let view_sync_task_builder = TaskBuilder::< - ViewSyncTaskValidatedStates>, + ViewSyncTaskStateTypes>, >::new(view_sync_name.to_string()) .register_event_stream(event_stream.clone(), view_sync_event_filter) .await @@ -537,7 +537,7 @@ pub async fn add_view_sync_task>( // we *just* registered let view_sync_task_id = view_sync_task_builder.get_task_id().unwrap(); - let view_sync_task = ViewSyncTaskValidatedStates::build(view_sync_task_builder).launch(); + let view_sync_task = ViewSyncTaskStateTypes::build(view_sync_task_builder).launch(); task_runner.add_task( view_sync_task_id, view_sync_name.to_string(), diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 55f9662359..29791e862f 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -125,7 +125,7 @@ impl< } /// Types for the main view sync task -pub type ViewSyncTaskValidatedStates = HSTWithEvent< +pub type ViewSyncTaskStateTypes = HSTWithEvent< ViewSyncTaskError, HotShotEvent, ChannelStream>, @@ -175,7 +175,7 @@ impl, A: ConsensusApi + } /// Types for view sync replica state -pub type ViewSyncReplicaTaskValidatedStates = HSTWithEvent< +pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< ViewSyncTaskError, HotShotEvent, ChannelStream>, diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 1407f39136..bf20a7bad9 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -126,7 +126,7 @@ impl< } /// Types for a vote accumulator Task -pub type VoteTaskValidatedStates = HSTWithEvent< +pub type VoteTaskStateTypes = HSTWithEvent< VoteTaskError, HotShotEvent, ChannelStream>, diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 3cf996137b..4d5b4a8891 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -23,7 +23,7 @@ pub trait InstanceState: Debug + Send + Sync {} /// Abstraction over the state that blocks modify /// /// This trait represents the behaviors that the 'global' ledger state must have: -/// * A defined error type ([`Error`](State::Error)) +/// * A defined error type ([`Error`](ValidatedState::Error)) /// * The type of block that modifies this type of state ([`BlockPayload`](State::BlockPayload)) /// * The ability to validate that a block header is actually a valid extension of this state and /// produce a new state, with the modifications from the block applied From e8ba93e334e48560bd3876d3824144818b18d283 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 29 Jan 2024 17:58:04 -0800 Subject: [PATCH 0749/1393] Remove Committable from state and move ConsensusTime --- hotshot/examples/infra/mod.rs | 4 +- hotshot/src/lib.rs | 4 +- hotshot/src/tasks/mod.rs | 3 +- .../src/traits/networking/libp2p_network.rs | 3 +- hotshot/src/traits/storage/memory_storage.rs | 2 +- hotshot/src/types/handle.rs | 2 +- task-impls/src/consensus.rs | 4 +- task-impls/src/da.rs | 3 +- task-impls/src/view_sync.rs | 3 +- testing/src/spinning_task.rs | 3 +- testing/src/state_types.rs | 4 +- testing/src/task_helpers.rs | 4 +- testing/src/test_runner.rs | 2 +- testing/tests/consensus_task.rs | 2 +- testing/tests/da_task.rs | 2 +- testing/tests/memory_network.rs | 3 +- testing/tests/network_task.rs | 2 +- testing/tests/unit/message.rs | 2 +- testing/tests/vid_task.rs | 4 +- testing/tests/view_sync_task.rs | 2 +- types/src/data.rs | 4 +- types/src/simple_certificate.rs | 2 +- types/src/traits/node_implementation.rs | 36 ++++++++++++++- types/src/traits/states.rs | 44 ++----------------- 24 files changed, 67 insertions(+), 77 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 984f5f7d7b..d6ee0b8f35 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -39,8 +39,8 @@ use hotshot_types::{ block_contents::TestableBlock, election::Membership, network::CommunicationChannel, - node_implementation::NodeType, - states::{ConsensusTime, TestableState}, + node_implementation::{ConsensusTime,NodeType}, + states::{ TestableState}, }, HotShotConfig, }; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 475dfcdef9..a9d2c153dd 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -53,9 +53,9 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, network::{CommunicationChannel, NetworkError}, - node_implementation::{NodeType, SendToTasks}, + node_implementation::{ConsensusTime,NodeType, SendToTasks}, signature_key::SignatureKey, - states::{ConsensusTime, ValidatedState}, + states::{ ValidatedState}, storage::StoredView, BlockPayload, }, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 6e4946bde3..066e62ed3e 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -33,8 +33,7 @@ use hotshot_types::{ block_contents::vid_commitment, consensus_api::ConsensusApi, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, - node_implementation::{NodeImplementation, NodeType}, - states::ConsensusTime, + node_implementation::{NodeImplementation,ConsensusTime, NodeType}, BlockPayload, }, }; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 76c2a2214b..a4ec2b4d77 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -25,9 +25,8 @@ use hotshot_types::{ CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, NetworkMsg, TestableChannelImplementation, TransmitType, ViewMessage, }, - node_implementation::NodeType, + node_implementation::{ConsensusTime,NodeType}, signature_key::SignatureKey, - states::ConsensusTime, }, }; diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index 1a4a3b99c4..d1d69de75c 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -118,7 +118,7 @@ mod test { data::{fake_commitment, Leaf}, simple_certificate::QuorumCertificate, traits::{ - node_implementation::NodeType, signature_key::SignatureKey, states::ConsensusTime, + node_implementation::{ConsensusTime,NodeType}, signature_key::SignatureKey, }, }; use std::marker::PhantomData; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 53495f4333..d6db4c3561 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -26,7 +26,7 @@ use hotshot_types::{ error::HotShotError, event::EventType, simple_certificate::QuorumCertificate, - traits::{node_implementation::NodeType, states::ConsensusTime, storage::Storage}, + traits::{node_implementation::{ConsensusTime,NodeType}, storage::Storage}, }; use std::sync::Arc; use tracing::error; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 07c20b881f..5c5fc2e620 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -28,9 +28,9 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::Membership, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::{ConsensusTime,NodeImplementation, NodeType}, signature_key::SignatureKey, - states::{ConsensusTime, ValidatedState}, + states::{ ValidatedState}, BlockPayload, }, utils::{Terminator, ViewInner}, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 373d435d7c..b175fbd5ae 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -22,9 +22,8 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::Membership, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::{ConsensusTime,NodeImplementation, NodeType}, signature_key::SignatureKey, - states::ConsensusTime, }, utils::ViewInner, vote::HasViewNumber, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 29791e862f..90c053272d 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -36,8 +36,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::Membership, network::CommunicationChannel, - node_implementation::{NodeImplementation, NodeType}, - states::ConsensusTime, + node_implementation::{ConsensusTime,NodeImplementation, NodeType}, }, }; use snafu::Snafu; diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 9213476fb8..89dedd131e 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -13,8 +13,7 @@ use hotshot_task::{ MergeN, }; use hotshot_types::traits::network::CommunicationChannel; -use hotshot_types::traits::states::ConsensusTime; -use hotshot_types::{event::Event, traits::node_implementation::NodeType}; +use hotshot_types::{event::Event, traits::node_implementation::{ConsensusTime,NodeType}}; use snafu::Snafu; use crate::{ diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 101a98aa89..071cdabdce 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -4,8 +4,8 @@ use commit::{Commitment, Committable}; use hotshot_types::{ data::{fake_commitment, BlockError, ViewNumber}, traits::{ - states::{ConsensusTime, InstanceState, TestableState, ValidatedState}, - BlockPayload, + states::{InstanceState, TestableState, ValidatedState}, + BlockPayload,node_implementation::ConsensusTime, }, }; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index fd06015c7c..0af0c486ab 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -25,8 +25,8 @@ use hotshot_types::{ block_contents::{vid_commitment, BlockHeader, TestableBlock}, consensus_api::ConsensusApi, election::Membership, - node_implementation::NodeType, - states::{ConsensusTime, ValidatedState}, + node_implementation::{ConsensusTime,NodeType}, + states::{ ValidatedState}, BlockPayload, }, vote::HasViewNumber, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 5d022a714b..116c21cff8 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -19,7 +19,7 @@ use hotshot_task::{ use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::ConsensusMetricsValue, - traits::{election::Membership, node_implementation::NodeType, states::ConsensusTime}, + traits::{election::Membership, node_implementation::{ConsensusTime,NodeType}}, HotShotConfig, ValidatorConfig, }; use std::{ diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 41e49c865e..058eb8a320 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -12,7 +12,7 @@ use hotshot_types::vote::Certificate; use hotshot_types::{ data::{Leaf, QuorumProposal, ViewNumber}, message::GeneralConsensusMessage, - traits::states::ConsensusTime, + traits::node_implementation::ConsensusTime, }; use hotshot_types::{ simple_vote::QuorumData, diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 1edb772a09..d7915edb05 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -9,7 +9,7 @@ use hotshot_types::{ simple_vote::{DAData, DAVote}, traits::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, - node_implementation::NodeType, states::ConsensusTime, + node_implementation::{ConsensusTime,NodeType} }, }; use sha2::{Digest, Sha256}; diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 2c052242d9..94186dd1b7 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -19,11 +19,10 @@ use hotshot_types::message::Message; use hotshot_types::signature_key::BLSPubKey; use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; -use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType}; +use hotshot_types::traits::node_implementation::{ConsensusTime,ChannelMaps, NodeType}; use hotshot_types::{ data::ViewNumber, message::{DataMessage, MessageKind}, - traits::states::ConsensusTime, }; use rand::rngs::StdRng; use rand::{RngCore, SeedableRng}; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 7750a979d8..a3810fc21b 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -6,7 +6,7 @@ use hotshot_testing::{ }; use hotshot_types::{ data::{DAProposal, VidSchemeTrait, ViewNumber}, - traits::{consensus_api::ConsensusApi, states::ConsensusTime}, + traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, }; use sha2::{Digest, Sha256}; use std::{collections::HashMap, marker::PhantomData}; diff --git a/testing/tests/unit/message.rs b/testing/tests/unit/message.rs index 1e9f4c88e4..5bb777ff9a 100644 --- a/testing/tests/unit/message.rs +++ b/testing/tests/unit/message.rs @@ -13,7 +13,7 @@ use hotshot_types::{ signature_key::BLSPubKey, simple_certificate::SimpleCertificate, simple_vote::ViewSyncCommitData, - traits::{signature_key::SignatureKey, states::ConsensusTime}, + traits::{signature_key::SignatureKey, node_implementation::ConsensusTime}, }; #[test] diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index e3325300bc..4b706e833c 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -5,10 +5,10 @@ use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, task_helpers::vid_init, }; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::traits::node_implementation::{ConsensusTime,NodeType}; use hotshot_types::{ data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, - traits::{consensus_api::ConsensusApi, states::ConsensusTime}, + traits::{consensus_api::ConsensusApi}, }; use std::collections::HashMap; use std::marker::PhantomData; diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 11f72f22a8..5dd956a145 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,7 +1,7 @@ use hotshot::HotShotConsensusApi; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::node_types::{MemoryImpl, TestTypes}; -use hotshot_types::{data::ViewNumber, traits::states::ConsensusTime}; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; use std::collections::HashMap; #[cfg(test)] diff --git a/types/src/data.rs b/types/src/data.rs index 41fd6ef25b..6036fddb10 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -9,9 +9,9 @@ use crate::{ traits::{ block_contents::{vid_commitment, BlockHeader, TestableBlock}, election::Membership, - node_implementation::NodeType, + node_implementation::{ConsensusTime,NodeType}, signature_key::SignatureKey, - states::{ConsensusTime, TestableState, ValidatedState}, + states::{TestableState, ValidatedState}, storage::StoredView, BlockPayload, }, diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index a7358241c3..b45263393e 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -17,7 +17,7 @@ use crate::{ }, traits::{ election::Membership, node_implementation::NodeType, signature_key::SignatureKey, - states::ConsensusTime, + node_implementation::ConsensusTime, }, vote::{Certificate, HasViewNumber}, }; diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index e548e4f277..982adf9d6a 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -7,7 +7,7 @@ use super::{ block_contents::{BlockHeader, TestableBlock, Transaction}, election::ElectionConfig, network::{CommunicationChannel, NetworkReliability, TestableNetworkingImplementation}, - states::{ConsensusTime, TestableState}, + states::{ TestableState}, storage::{StorageError, StorageState, TestableStorage}, ValidatedState, }; @@ -22,9 +22,11 @@ use crate::{ use async_compatibility_layer::channel::{unbounded, UnboundedReceiver, UnboundedSender}; use async_lock::{Mutex, RwLock}; use async_trait::async_trait; +use commit::Committable; use serde::{Deserialize, Serialize}; use std::{ - collections::BTreeMap, + collections::BTreeMap, ops, + ops::{Deref, Sub}, fmt::Debug, hash::Hash, sync::{atomic::AtomicBool, Arc}, @@ -264,6 +266,36 @@ where } } +/// Trait for time compatibility needed for reward collection +pub trait ConsensusTime: + PartialOrd + + Ord + + Send + + Sync + + Debug + + Clone + + Copy + + Hash + + Deref + + serde::Serialize + + for<'de> serde::Deserialize<'de> + + ops::AddAssign + + ops::Add + + Sub + + 'static + + Committable +{ + /// Create a new instance of this time unit at time number 0 + #[must_use] + fn genesis() -> Self { + Self::new(0) + } + /// Create a new instance of this time unit + fn new(val: u64) -> Self; + /// Get the u64 format of time + fn get_u64(&self) -> u64; +} + /// Trait with all the type definitions that are used in the current hotshot setup. pub trait NodeType: Clone diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 4d5b4a8891..a77128fa82 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -4,17 +4,13 @@ //! compatibilities over the current network state, which is modified by the transactions contained //! within blocks. -use crate::traits::BlockPayload; -use commit::Committable; +use crate::traits::{node_implementation::ConsensusTime,BlockPayload}; use serde::{de::DeserializeOwned, Serialize}; use std::{ error::Error, fmt::Debug, hash::Hash, - ops, - ops::{Deref, Sub}, }; - use super::block_contents::{BlockHeader, TestableBlock}; /// Instance-level state, which allows us to fetch missing validated state. @@ -24,10 +20,11 @@ pub trait InstanceState: Debug + Send + Sync {} /// /// This trait represents the behaviors that the 'global' ledger state must have: /// * A defined error type ([`Error`](ValidatedState::Error)) -/// * The type of block that modifies this type of state ([`BlockPayload`](State::BlockPayload)) +/// * The type of block that modifies this type of state ([`BlockPayload`](ValidatedStates:: +/// BlockPayload)) /// * The ability to validate that a block header is actually a valid extension of this state and /// produce a new state, with the modifications from the block applied -/// ([`validate_and_apply_header`](State::validate_and_apply_header)) +/// ([`validate_and_apply_header`](ValidatedState::validate_and_apply_header)) pub trait ValidatedState: Serialize + DeserializeOwned @@ -39,7 +36,6 @@ pub trait ValidatedState: + Eq + Send + Sync - + Committable { /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; @@ -99,35 +95,3 @@ where padding: u64, ) -> ::Transaction; } - -// TODO Seuqnecing here means involving DA in consensus - -/// Trait for time compatibility needed for reward collection -pub trait ConsensusTime: - PartialOrd - + Ord - + Send - + Sync - + Debug - + Clone - + Copy - + Hash - + Deref - + serde::Serialize - + for<'de> serde::Deserialize<'de> - + ops::AddAssign - + ops::Add - + Sub - + 'static - + Committable -{ - /// Create a new instance of this time unit at time number 0 - #[must_use] - fn genesis() -> Self { - Self::new(0) - } - /// Create a new instance of this time unit - fn new(val: u64) -> Self; - /// Get the u64 format of time - fn get_u64(&self) -> u64; -} From e4191e67f46077cdebb82e58617d1e67ddd3d93c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jan 2024 03:31:29 +0000 Subject: [PATCH 0750/1393] Bump serde_json from 1.0.112 to 1.0.113 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.112 to 1.0.113. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.112...v1.0.113) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- libp2p-networking/Cargo.toml | 2 +- types/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 29058167a0..96467a6bd0 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -27,7 +27,7 @@ libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } rand = { workspace = true } serde = { workspace = true } -serde_json = "1.0.112" +serde_json = "1.0.113" snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", diff --git a/types/Cargo.toml b/types/Cargo.toml index 3d43d017b3..ffe6b819d4 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -48,7 +48,7 @@ tracing = { workspace = true } typenum = { workspace = true } [dev-dependencies] -serde_json = "1.0.112" +serde_json = "1.0.113" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } From 468248c37541a47cf2f17ab218bf6ad983608b97 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 30 Jan 2024 10:36:27 -0500 Subject: [PATCH 0751/1393] try moving the wait (#2483) --- hotshot/src/traits/networking/web_server_network.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 8733595c9f..794e068b43 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -252,6 +252,8 @@ impl Inner { }; while self.running.load(Ordering::Relaxed) { + async_sleep(additional_wait).await; + let endpoint = match message_purpose { MessagePurpose::Proposal => config::get_proposal_route(view_number), MessagePurpose::LatestQuorumProposal => config::get_latest_quorum_proposal_route(), @@ -437,7 +439,6 @@ impl Inner { async_sleep(self.wait_between_polls).await; } } - async_sleep(additional_wait).await; } let maybe_event = receiver.try_recv(); match maybe_event { From 5185858939dcf54b56ad346e3a76bd8d6bf1eb01 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 30 Jan 2024 10:49:51 -0500 Subject: [PATCH 0752/1393] Remove old documentation links --- hotshot/src/documentation.rs | 11 ----------- hotshot/src/lib.rs | 1 - 2 files changed, 12 deletions(-) diff --git a/hotshot/src/documentation.rs b/hotshot/src/documentation.rs index 86850a7eec..25630e22bc 100644 --- a/hotshot/src/documentation.rs +++ b/hotshot/src/documentation.rs @@ -6,14 +6,3 @@ clippy::missing_docs_in_private_items, non_camel_case_types )] -#![cfg_attr(feature = "doc-images", -cfg_attr(all(), -doc = ::embed_doc_image::embed_image!("basic_hotstuff", "../../docs/HotShotDocs/img/basic_hotstuff.svg")), -doc = ::embed_doc_image::embed_image!("chained_hotstuff", "../../docs/HotShotDocs/img/chained_hotstuff.svg")) -] -#![cfg_attr( - not(feature = "doc-images"), - doc = "**Doc images not enabled**. Compile with feature `doc-images` and Rust version >= 1.54 \ - to enable." -)] -#![ doc = include_str!("../../../docs/HotShotDocs/main.md")] diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 0d4dbfb963..1e28539d94 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -1,6 +1,5 @@ //! Provides a generic rust implementation of the `HotShot` BFT protocol //! -//! See the [protocol documentation](https://github.com/EspressoSystems/hotshot-spec) for a protocol description. // Documentation module #[cfg(feature = "docs")] From ecaa3dff2aa20d3506eb9968b5c4434f48c9dfce Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 30 Jan 2024 11:35:36 -0800 Subject: [PATCH 0753/1393] Add instance state to header and state constructor --- hotshot/examples/infra/mod.rs | 6 ++-- hotshot/src/lib.rs | 12 ++++--- hotshot/src/tasks/mod.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 2 +- hotshot/src/traits/storage/memory_storage.rs | 3 +- hotshot/src/types/handle.rs | 5 ++- task-impls/src/consensus.rs | 5 +-- task-impls/src/da.rs | 2 +- task-impls/src/view_sync.rs | 2 +- testing/src/block_types.rs | 7 ++-- testing/src/spinning_task.rs | 5 ++- testing/src/state_types.rs | 3 +- testing/src/task_helpers.rs | 7 ++-- testing/src/test_runner.rs | 8 +++-- testing/tests/da_task.rs | 6 ++-- testing/tests/memory_network.rs | 2 +- testing/tests/unit/message.rs | 2 +- testing/tests/vid_task.rs | 4 +-- types/src/data.rs | 6 ++-- types/src/simple_certificate.rs | 4 +-- types/src/traits/block_contents.rs | 8 +++-- types/src/traits/node_implementation.rs | 7 ++-- types/src/traits/states.rs | 33 ++++++------------- 23 files changed, 77 insertions(+), 64 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index d6ee0b8f35..5b9fd0e58e 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -39,8 +39,8 @@ use hotshot_types::{ block_contents::TestableBlock, election::Membership, network::CommunicationChannel, - node_implementation::{ConsensusTime,NodeType}, - states::{ TestableState}, + node_implementation::{ConsensusTime, NodeType}, + states::TestableState, }, HotShotConfig, }; @@ -328,7 +328,7 @@ pub trait RunDA< /// get the anchored view /// Note: sequencing leaf does not have state, so does not return state async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { - let initializer = hotshot::HotShotInitializer::::from_genesis() + let initializer = hotshot::HotShotInitializer::::from_genesis(&TestInstanceState {}) .expect("Couldn't generate genesis block"); let config = self.get_config(); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index a9d2c153dd..86ed946078 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -53,9 +53,9 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, network::{CommunicationChannel, NetworkError}, - node_implementation::{ConsensusTime,NodeType, SendToTasks}, + node_implementation::{ConsensusTime, NodeType, SendToTasks}, signature_key::SignatureKey, - states::{ ValidatedState}, + states::ValidatedState, storage::StoredView, BlockPayload, }, @@ -211,7 +211,7 @@ impl> SystemContext { // insert genesis (or latest block) to state map let mut validated_state_map = BTreeMap::default(); - let validated_state = TYPES::ValidatedState::genesis(); + let validated_state = TYPES::ValidatedState::genesis(&instance_state); validated_state_map.insert( anchored_leaf.get_view_number(), View { @@ -755,9 +755,11 @@ impl HotShotInitializer { /// initialize from genesis /// # Errors /// If we are unable to apply the genesis block to the default state - pub fn from_genesis() -> Result> { + pub fn from_genesis( + instance_state: &TYPES::InstanceState, + ) -> Result> { Ok(Self { - inner: Leaf::genesis(), + inner: Leaf::genesis(instance_state), }) } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 066e62ed3e..2805e77675 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -33,7 +33,7 @@ use hotshot_types::{ block_contents::vid_commitment, consensus_api::ConsensusApi, network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, - node_implementation::{NodeImplementation,ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, BlockPayload, }, }; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index a4ec2b4d77..0da20d0738 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -25,7 +25,7 @@ use hotshot_types::{ CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, NetworkMsg, TestableChannelImplementation, TransmitType, ViewMessage, }, - node_implementation::{ConsensusTime,NodeType}, + node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, }; diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index d1d69de75c..2fa7d04b72 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -118,7 +118,8 @@ mod test { data::{fake_commitment, Leaf}, simple_certificate::QuorumCertificate, traits::{ - node_implementation::{ConsensusTime,NodeType}, signature_key::SignatureKey, + node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, }, }; use std::marker::PhantomData; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index d6db4c3561..bb6d32de33 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -26,7 +26,10 @@ use hotshot_types::{ error::HotShotError, event::EventType, simple_certificate::QuorumCertificate, - traits::{node_implementation::{ConsensusTime,NodeType}, storage::Storage}, + traits::{ + node_implementation::{ConsensusTime, NodeType}, + storage::Storage, + }, }; use std::sync::Arc; use tracing::error; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 5c5fc2e620..eb4c335e60 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -28,9 +28,9 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::Membership, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{ConsensusTime,NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, - states::{ ValidatedState}, + states::ValidatedState, BlockPayload, }, utils::{Terminator, ViewInner}, @@ -1224,6 +1224,7 @@ impl, A: ConsensusApi + let block_header = TYPES::BlockHeader::new( commit_and_metadata.commitment, commit_and_metadata.metadata.clone(), + &consensus.instance_state, &parent_header, state, ); diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index b175fbd5ae..9f4ec122c6 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -22,7 +22,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::Membership, network::{CommunicationChannel, ConsensusIntentEvent}, - node_implementation::{ConsensusTime,NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, }, utils::ViewInner, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 90c053272d..bc0222930c 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -36,7 +36,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::Membership, network::CommunicationChannel, - node_implementation::{ConsensusTime,NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; use snafu::Snafu; diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index 9819a99da2..b58a116932 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -8,7 +8,7 @@ use hotshot_types::{ data::{BlockError, VidCommitment, VidScheme, VidSchemeTrait}, traits::{ block_contents::{vid_commitment, BlockHeader, TestableBlock, Transaction}, - BlockPayload, + BlockPayload, ValidatedState, }, }; use serde::{Deserialize, Serialize}; @@ -187,6 +187,7 @@ impl BlockHeader for TestBlockHeader { fn new( payload_commitment: VidCommitment, _metadata: ::Metadata, + _instance_state: &::Instance, parent_header: &Self, _parent_state: &Self::State, ) -> Self { @@ -196,7 +197,9 @@ impl BlockHeader for TestBlockHeader { } } - fn genesis() -> ( + fn genesis( + _instance_state: &::Instance, + ) -> ( Self, Self::Payload, ::Metadata, diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 89dedd131e..d3311a8a24 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -13,7 +13,10 @@ use hotshot_task::{ MergeN, }; use hotshot_types::traits::network::CommunicationChannel; -use hotshot_types::{event::Event, traits::node_implementation::{ConsensusTime,NodeType}}; +use hotshot_types::{ + event::Event, + traits::node_implementation::{ConsensusTime, NodeType}, +}; use snafu::Snafu; use crate::{ diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 071cdabdce..4241ab783f 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -4,8 +4,9 @@ use commit::{Commitment, Committable}; use hotshot_types::{ data::{fake_commitment, BlockError, ViewNumber}, traits::{ + node_implementation::ConsensusTime, states::{InstanceState, TestableState, ValidatedState}, - BlockPayload,node_implementation::ConsensusTime, + BlockPayload, }, }; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 0af0c486ab..08acae040c 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -25,8 +25,8 @@ use hotshot_types::{ block_contents::{vid_commitment, BlockHeader, TestableBlock}, consensus_api::ConsensusApi, election::Membership, - node_implementation::{ConsensusTime,NodeType}, - states::{ ValidatedState}, + node_implementation::{ConsensusTime, NodeType}, + states::ValidatedState, BlockPayload, }, vote::HasViewNumber, @@ -61,7 +61,7 @@ pub async fn build_system_handle( let storage = (launcher.resource_generator.storage)(node_id); let config = launcher.resource_generator.config.clone(); - let initializer = HotShotInitializer::::from_genesis().unwrap(); + let initializer = HotShotInitializer::::from_genesis(&TestInstanceState {}).unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = config.my_own_validator_config.private_key.clone(); @@ -244,6 +244,7 @@ async fn build_quorum_proposal_and_signature( let block_header = TestBlockHeader::new( payload_commitment, (), + &TestInstanceState {}, &parent_leaf.block_header, &parent_state, ); diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 116c21cff8..6f7c466323 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -19,7 +19,10 @@ use hotshot_task::{ use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::ConsensusMetricsValue, - traits::{election::Membership, node_implementation::{ConsensusTime,NodeType}}, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, NodeType}, + }, HotShotConfig, ValidatorConfig, }; use std::{ @@ -227,7 +230,8 @@ where let node_id = self.next_node_id; let storage = (self.launcher.resource_generator.storage)(node_id); let config = self.launcher.resource_generator.config.clone(); - let initializer = HotShotInitializer::::from_genesis().unwrap(); + let initializer = + HotShotInitializer::::from_genesis(&TestInstanceState {}).unwrap(); let networks = (self.launcher.resource_generator.channel_generator)(node_id); // We assign node's public key and stake value rather than read from config file since it's a test let validator_config = diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index d7915edb05..fa0e12eb43 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -8,8 +8,10 @@ use hotshot_types::{ data::{DAProposal, ViewNumber}, simple_vote::{DAData, DAVote}, traits::{ - block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, - node_implementation::{ConsensusTime,NodeType} + block_contents::vid_commitment, + consensus_api::ConsensusApi, + election::Membership, + node_implementation::{ConsensusTime, NodeType}, }, }; use sha2::{Digest, Sha256}; diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 94186dd1b7..7648d30174 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -19,7 +19,7 @@ use hotshot_types::message::Message; use hotshot_types::signature_key::BLSPubKey; use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; -use hotshot_types::traits::node_implementation::{ConsensusTime,ChannelMaps, NodeType}; +use hotshot_types::traits::node_implementation::{ChannelMaps, ConsensusTime, NodeType}; use hotshot_types::{ data::ViewNumber, message::{DataMessage, MessageKind}, diff --git a/testing/tests/unit/message.rs b/testing/tests/unit/message.rs index 5bb777ff9a..21f0ac4ac1 100644 --- a/testing/tests/unit/message.rs +++ b/testing/tests/unit/message.rs @@ -13,7 +13,7 @@ use hotshot_types::{ signature_key::BLSPubKey, simple_certificate::SimpleCertificate, simple_vote::ViewSyncCommitData, - traits::{signature_key::SignatureKey, node_implementation::ConsensusTime}, + traits::{node_implementation::ConsensusTime, signature_key::SignatureKey}, }; #[test] diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 4b706e833c..85e1d27fb2 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -5,10 +5,10 @@ use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, task_helpers::vid_init, }; -use hotshot_types::traits::node_implementation::{ConsensusTime,NodeType}; +use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; use hotshot_types::{ data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, - traits::{consensus_api::ConsensusApi}, + traits::consensus_api::ConsensusApi, }; use std::collections::HashMap; use std::marker::PhantomData; diff --git a/types/src/data.rs b/types/src/data.rs index 6036fddb10..5d42e0da71 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -9,7 +9,7 @@ use crate::{ traits::{ block_contents::{vid_commitment, BlockHeader, TestableBlock}, election::Membership, - node_implementation::{ConsensusTime,NodeType}, + node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, states::{TestableState, ValidatedState}, storage::StoredView, @@ -368,8 +368,8 @@ impl Display for Leaf { impl Leaf { /// Create a new leaf from its components. #[must_use] - pub fn genesis() -> Self { - let (block_header, block_payload, _) = TYPES::BlockHeader::genesis(); + pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { + let (block_header, block_payload, _) = TYPES::BlockHeader::genesis(instance_state); Self { view_number: TYPES::Time::genesis(), justify_qc: QuorumCertificate::::genesis(), diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index b45263393e..1592583e90 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -16,8 +16,8 @@ use crate::{ ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, }, traits::{ - election::Membership, node_implementation::NodeType, signature_key::SignatureKey, - node_implementation::ConsensusTime, + election::Membership, node_implementation::ConsensusTime, node_implementation::NodeType, + signature_key::SignatureKey, }, vote::{Certificate, HasViewNumber}, }; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 93fde24996..0feb0e1382 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -115,16 +115,20 @@ pub trait BlockHeader: /// Validated state. type State: ValidatedState; - /// Build a header with the payload commitment, metadata, parent header, and parent state. + /// Build a header with the payload commitment, metadata, instance-level state, parent header, + /// and parent state. fn new( payload_commitment: VidCommitment, metadata: ::Metadata, + instance_state: &::Instance, parent_header: &Self, parent_state: &Self::State, ) -> Self; /// Build the genesis header, payload, and metadata. - fn genesis() -> ( + fn genesis( + instance_state: &::Instance, + ) -> ( Self, Self::Payload, ::Metadata, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 982adf9d6a..df0dac8d0d 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -7,7 +7,7 @@ use super::{ block_contents::{BlockHeader, TestableBlock, Transaction}, election::ElectionConfig, network::{CommunicationChannel, NetworkReliability, TestableNetworkingImplementation}, - states::{ TestableState}, + states::TestableState, storage::{StorageError, StorageState, TestableStorage}, ValidatedState, }; @@ -25,10 +25,11 @@ use async_trait::async_trait; use commit::Committable; use serde::{Deserialize, Serialize}; use std::{ - collections::BTreeMap, ops, - ops::{Deref, Sub}, + collections::BTreeMap, fmt::Debug, hash::Hash, + ops, + ops::{Deref, Sub}, sync::{atomic::AtomicBool, Arc}, }; diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index a77128fa82..3c1e58d828 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -4,14 +4,10 @@ //! compatibilities over the current network state, which is modified by the transactions contained //! within blocks. -use crate::traits::{node_implementation::ConsensusTime,BlockPayload}; -use serde::{de::DeserializeOwned, Serialize}; -use std::{ - error::Error, - fmt::Debug, - hash::Hash, -}; use super::block_contents::{BlockHeader, TestableBlock}; +use crate::traits::{node_implementation::ConsensusTime, BlockPayload}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{error::Error, fmt::Debug, hash::Hash}; /// Instance-level state, which allows us to fetch missing validated state. pub trait InstanceState: Debug + Send + Sync {} @@ -20,29 +16,20 @@ pub trait InstanceState: Debug + Send + Sync {} /// /// This trait represents the behaviors that the 'global' ledger state must have: /// * A defined error type ([`Error`](ValidatedState::Error)) -/// * The type of block that modifies this type of state ([`BlockPayload`](ValidatedStates:: -/// BlockPayload)) +/// * The type of block that modifies this type of state ([`BlockPayload`](`ValidatedStates:: +/// BlockPayload`)) /// * The ability to validate that a block header is actually a valid extension of this state and /// produce a new state, with the modifications from the block applied -/// ([`validate_and_apply_header`](ValidatedState::validate_and_apply_header)) +/// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header)) pub trait ValidatedState: - Serialize - + DeserializeOwned - + Clone - + Debug - + Default - + Hash - + PartialEq - + Eq - + Send - + Sync + Serialize + DeserializeOwned + Clone + Debug + Default + Hash + PartialEq + Eq + Send + Sync { /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; /// The type of the instance-level state this state is assocaited with type Instance: InstanceState; /// The type of block header this state is associated with - type BlockHeader: BlockHeader; + type BlockHeader: BlockHeader; /// The type of block payload this state is associated with type BlockPayload: BlockPayload; /// Time compatibility needed for reward collection @@ -73,8 +60,8 @@ pub trait ValidatedState: /// Construct a genesis validated state. #[must_use] - fn genesis() -> Self { - Self::from_header(&Self::BlockHeader::genesis().0) + fn genesis(instance: &Self::Instance) -> Self { + Self::from_header(&Self::BlockHeader::genesis(instance).0) } /// Gets called to notify the persistence backend that this state has been committed From 952ba740a592a37cf533896465efd0e8246d721e Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 10:24:51 -0500 Subject: [PATCH 0754/1393] remove unused `event_sender` --- hotshot/src/lib.rs | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 0d4dbfb963..88ea0fcb40 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -23,7 +23,6 @@ use crate::{ }; use async_compatibility_layer::{ art::{async_spawn, async_spawn_local}, - async_primitives::broadcast::BroadcastSender, channel::UnboundedSender, }; use async_lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; @@ -70,7 +69,7 @@ use std::{ time::Duration, }; use tasks::add_vid_task; -use tracing::{debug, error, info, instrument, trace, warn}; +use tracing::{debug, info, instrument, trace, warn}; // -- Rexports // External @@ -143,11 +142,6 @@ pub struct SystemContextInner> { /// Memberships used by consensus pub memberships: Arc>, - // pub quorum_network: Arc; - // pub committee_network: Arc; - /// Sender for [`Event`]s - event_sender: RwLock>>>, - /// the metrics that the implementor is using. _metrics: Arc, @@ -261,7 +255,6 @@ impl> SystemContext { storage, networks: Arc::new(networks), memberships: Arc::new(memberships), - event_sender: RwLock::default(), _metrics: consensus_metrics.clone(), internal_event_stream: ChannelStream::new(), output_event_stream: ChannelStream::new(), @@ -316,13 +309,8 @@ impl> SystemContext { // TODO: remove with https://github.com/EspressoSystems/HotShot/issues/2407 async fn send_external_event(&self, event: Event) { debug!(?event, "send_external_event"); - let mut event_sender = self.inner.event_sender.write().await; - if let Some(sender) = &*event_sender { - if let Err(e) = sender.send_async(event).await { - error!(?e, "Could not send event to event_sender"); - *event_sender = None; - } - } + self.inner.output_event_stream.publish(event).await; + } /// Publishes a transaction asynchronously to the network @@ -704,13 +692,7 @@ impl> ConsensusApi async fn send_event(&self, event: Event) { debug!(?event, "send_event"); - let mut event_sender = self.inner.event_sender.write().await; - if let Some(sender) = &*event_sender { - if let Err(e) = sender.send_async(event).await { - error!(?e, "Could not send event to event_sender"); - *event_sender = None; - } - } + self.inner.output_event_stream.publish(event).await; } fn public_key(&self) -> &TYPES::SignatureKey { From fa99c2cda8e3fac6970f4b32f92928086c1cf69e Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 31 Jan 2024 08:19:17 -0800 Subject: [PATCH 0755/1393] Get instance state from HotShotInitializer, move function parameters --- hotshot/examples/infra/mod.rs | 1 - hotshot/src/lib.rs | 17 +++++++++++------ task-impls/src/consensus.rs | 9 ++++----- testing/src/block_types.rs | 6 +++--- testing/src/state_types.rs | 18 ++---------------- testing/src/task_helpers.rs | 14 ++++---------- testing/src/test_runner.rs | 1 - types/src/traits/block_contents.rs | 6 +++--- types/src/traits/states.rs | 5 ++--- 9 files changed, 29 insertions(+), 48 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 5b9fd0e58e..9763c0696f 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -383,7 +383,6 @@ pub trait RunDA< memberships, networks_bundle, initializer, - TestInstanceState {}, ConsensusMetricsValue::default(), ) .await diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 86ed946078..073c60ac29 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -195,13 +195,13 @@ impl> SystemContext { memberships: Memberships, networks: Networks, initializer: HotShotInitializer, - instance_state: TYPES::InstanceState, metrics: ConsensusMetricsValue, ) -> Result> { debug!("Creating a new hotshot"); let consensus_metrics = Arc::new(metrics); let anchored_leaf = initializer.inner; + let instance_state = initializer.instance_state; // insert to storage storage @@ -442,7 +442,6 @@ impl> SystemContext { memberships: Memberships, networks: Networks, initializer: HotShotInitializer, - instance_state: TYPES::InstanceState, metrics: ConsensusMetricsValue, ) -> Result< ( @@ -461,7 +460,6 @@ impl> SystemContext { memberships, networks, initializer, - instance_state, metrics, ) .await?; @@ -749,6 +747,9 @@ impl> ConsensusApi pub struct HotShotInitializer { /// the leaf specified initialization inner: Leaf, + + /// Instance-level state. + instance_state: TYPES::InstanceState, } impl HotShotInitializer { @@ -760,11 +761,15 @@ impl HotShotInitializer { ) -> Result> { Ok(Self { inner: Leaf::genesis(instance_state), + instance_state: instance_state.clone(), }) } - /// reload previous state based on most recent leaf - pub fn from_reload(anchor_leaf: Leaf) -> Self { - Self { inner: anchor_leaf } + /// reload previous state based on most recent leaf and the instance-level state. + pub fn from_reload(anchor_leaf: Leaf, instance_state: TYPES::InstanceState) -> Self { + Self { + inner: anchor_leaf, + instance_state, + } } } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 6b862f4063..9c35a62abe 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -594,9 +594,8 @@ impl, A: ConsensusApi + }; let Ok(state) = parent_state.validate_and_apply_header( &consensus.instance_state, - &proposal.data.block_header.clone(), &parent.block_header.clone(), - &view, + &proposal.data.block_header.clone(), ) else { error!("Block header doesn't extend the proposal",); return; @@ -1218,11 +1217,11 @@ impl, A: ConsensusApi + if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { let block_header = TYPES::BlockHeader::new( - commit_and_metadata.commitment, - commit_and_metadata.metadata.clone(), + state, &consensus.instance_state, &parent_header, - state, + commit_and_metadata.commitment, + commit_and_metadata.metadata.clone(), ); let leaf = Leaf { view_number: view, diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index b58a116932..c581f7726a 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -185,11 +185,11 @@ impl BlockHeader for TestBlockHeader { type State = TestValidatedState; fn new( - payload_commitment: VidCommitment, - _metadata: ::Metadata, + _parent_state: &Self::State, _instance_state: &::Instance, parent_header: &Self, - _parent_state: &Self::State, + payload_commitment: VidCommitment, + _metadata: ::Metadata, ) -> Self { Self { block_number: parent_header.block_number + 1, diff --git a/testing/src/state_types.rs b/testing/src/state_types.rs index 4241ab783f..7d2c6794b5 100644 --- a/testing/src/state_types.rs +++ b/testing/src/state_types.rs @@ -4,7 +4,6 @@ use commit::{Commitment, Committable}; use hotshot_types::{ data::{fake_commitment, BlockError, ViewNumber}, traits::{ - node_implementation::ConsensusTime, states::{InstanceState, TestableState, ValidatedState}, BlockPayload, }, @@ -18,7 +17,7 @@ use crate::block_types::{TestBlockHeader, TestBlockPayload}; pub use crate::node_types::TestTypes; /// Instance-level state implementation for testing purposes. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct TestInstanceState {} impl InstanceState for TestInstanceState {} @@ -28,8 +27,6 @@ impl InstanceState for TestInstanceState {} pub struct TestValidatedState { /// the block height block_height: u64, - /// the view number - view_number: ViewNumber, /// the previous state commitment prev_state_commitment: Commitment, } @@ -38,7 +35,6 @@ impl Committable for TestValidatedState { fn commit(&self) -> Commitment { commit::RawCommitmentBuilder::new("Test State Commit") .u64_field("block_height", self.block_height) - .u64_field("view_number", *self.view_number) .field("prev_state_commitment", self.prev_state_commitment) .finalize() } @@ -52,7 +48,6 @@ impl Default for TestValidatedState { fn default() -> Self { Self { block_height: 0, - view_number: ViewNumber::genesis(), prev_state_commitment: fake_commitment(), } } @@ -72,20 +67,11 @@ impl ValidatedState for TestValidatedState { fn validate_and_apply_header( &self, _instance: &Self::Instance, - _proposed_header: &Self::BlockHeader, _parent_header: &Self::BlockHeader, - view_number: &Self::Time, + _proposed_header: &Self::BlockHeader, ) -> Result { - if view_number == &ViewNumber::genesis() { - if &self.view_number != view_number { - return Err(BlockError::InvalidBlockHeader); - } - } else if self.view_number >= *view_number { - return Err(BlockError::InvalidBlockHeader); - } Ok(TestValidatedState { block_height: self.block_height + 1, - view_number: *view_number, prev_state_commitment: self.commit(), }) } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 9ca92cacdd..73068e94eb 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -113,7 +113,6 @@ pub async fn build_system_handle( memberships, networks_bundle, initializer, - TestInstanceState {}, ConsensusMetricsValue::default(), ) .await @@ -242,11 +241,11 @@ async fn build_quorum_proposal_and_signature( let mut parent_state = ::from_header(&parent_leaf.block_header); let block_header = TestBlockHeader::new( - payload_commitment, - (), + &parent_state, &TestInstanceState {}, &parent_leaf.block_header, - &parent_state, + payload_commitment, + (), ); // current leaf that can be re-assigned everytime when entering a new view let mut leaf = Leaf { @@ -271,12 +270,7 @@ async fn build_quorum_proposal_and_signature( // Only view 2 is tested, higher views are not tested for cur_view in 2..=view { let state_new_view = parent_state - .validate_and_apply_header( - &TestInstanceState {}, - &block_header, - &block_header, - &ViewNumber::new(cur_view - 1), - ) + .validate_and_apply_header(&TestInstanceState {}, &block_header, &block_header) .unwrap(); // save states for the previous view to pass all the qc checks // In the long term, we want to get rid of this, do not manually update consensus state diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 6f7c466323..f1d4cf237b 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -321,7 +321,6 @@ where memberships, network_bundle, initializer, - TestInstanceState {}, ConsensusMetricsValue::default(), ) .await diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 0feb0e1382..92785ed1b2 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -118,11 +118,11 @@ pub trait BlockHeader: /// Build a header with the payload commitment, metadata, instance-level state, parent header, /// and parent state. fn new( - payload_commitment: VidCommitment, - metadata: ::Metadata, + parent_state: &Self::State, instance_state: &::Instance, parent_header: &Self, - parent_state: &Self::State, + payload_commitment: VidCommitment, + metadata: ::Metadata, ) -> Self; /// Build the genesis header, payload, and metadata. diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 3c1e58d828..0d7d5fe5ee 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -10,7 +10,7 @@ use serde::{de::DeserializeOwned, Serialize}; use std::{error::Error, fmt::Debug, hash::Hash}; /// Instance-level state, which allows us to fetch missing validated state. -pub trait InstanceState: Debug + Send + Sync {} +pub trait InstanceState: Clone + Debug + Send + Sync {} /// Abstraction over the state that blocks modify /// @@ -48,9 +48,8 @@ pub trait ValidatedState: fn validate_and_apply_header( &self, instance: &Self::Instance, - proposed_header: &Self::BlockHeader, parent_header: &Self::BlockHeader, - view_number: &Self::Time, + proposed_header: &Self::BlockHeader, ) -> Result; /// Construct the state with the given block header. From c7e85ff752bfd3682d7b2f1869d11eee41f7cc2c Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Wed, 31 Jan 2024 11:33:10 -0500 Subject: [PATCH 0756/1393] feat(light-client): remove state-prover crate (#2491) * remove state-prover crate and update types * address comments --- hotshot-state-prover/Cargo.toml | 29 -- hotshot-state-prover/src/circuit.rs | 613 ---------------------------- hotshot-state-prover/src/lib.rs | 283 ------------- hotshot-state-prover/src/utils.rs | 54 --- types/src/light_client.rs | 136 +++++- 5 files changed, 121 insertions(+), 994 deletions(-) delete mode 100644 hotshot-state-prover/Cargo.toml delete mode 100644 hotshot-state-prover/src/circuit.rs delete mode 100644 hotshot-state-prover/src/lib.rs delete mode 100644 hotshot-state-prover/src/utils.rs diff --git a/hotshot-state-prover/Cargo.toml b/hotshot-state-prover/Cargo.toml deleted file mode 100644 index 8187e84da3..0000000000 --- a/hotshot-state-prover/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "hotshot-state-prover" -description = "Generate state update proof for HotShot light client" -version = { workspace = true } -authors = { workspace = true } -edition = { workspace = true } -rust-version = { workspace = true } - -[dependencies] -ark-bn254 = { workspace = true } -ark-ec = { workspace = true } -ark-ed-on-bn254 = { workspace = true } -ark-ff = { workspace = true } -ark-std = { workspace = true } -ethereum-types = { workspace = true } -hotshot-types = { path = "../types" } -jf-plonk = { workspace = true } -jf-primitives = { workspace = true } -jf-relation = { workspace = true } -jf-utils = { workspace = true } -hotshot-stake-table = { path = "../hotshot-stake-table" } - -[features] -default = ["parallel"] -std = ["ark-std/std", "ark-ff/std"] -parallel = ["jf-primitives/parallel", "jf-utils/parallel", "ark-ff/parallel"] - -[lints] -workspace = true diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs deleted file mode 100644 index 013fbbe5b2..0000000000 --- a/hotshot-state-prover/src/circuit.rs +++ /dev/null @@ -1,613 +0,0 @@ -//! Circuit implementation for verifying light client state update - -use ark_ec::twisted_edwards::TECurveConfig; -use ark_ff::PrimeField; -use ark_std::borrow::Borrow; -use ethereum_types::U256; -use hotshot_types::light_client::LightClientState; -use jf_plonk::errors::PlonkError; -use jf_primitives::{ - circuit::{ - rescue::RescueNativeGadget, - signature::schnorr::{SignatureGadget, VerKeyVar}, - }, - rescue::RescueParameter, - signatures::schnorr::{Signature, VerKey as SchnorrVerKey}, -}; -use jf_relation::{errors::CircuitError, Circuit, PlonkCircuit, Variable}; - -/// Lossy conversion of a U256 into a field element. -pub(crate) fn u256_to_field(v: &U256) -> F { - let mut bytes = vec![0u8; 32]; - v.to_little_endian(&mut bytes); - F::from_le_bytes_mod_order(&bytes) -} - -/// Variable for stake table entry -#[derive(Clone, Debug)] -pub struct StakeTableEntryVar { - /// state verification keys - pub state_ver_key: VerKeyVar, - /// Stake amount - pub stake_amount: Variable, -} - -/// Light client state Variable -/// The stake table commitment is a triple `(qc_keys_comm, state_keys_comm, stake_amount_comm)`. -/// Variable for a stake table commitment -#[derive(Clone, Debug)] -pub struct StakeTableCommVar { - /// Commitment for QC verification keys - pub qc_keys_comm: Variable, - /// Commitment for state verification keys - pub state_keys_comm: Variable, - /// Commitment for stake amount - pub stake_amount_comm: Variable, -} - -/// Light client state Variable -#[derive(Clone, Debug)] -pub struct LightClientStateVar { - /// Private list holding all variables - /// `vars[0]`: view number - /// `vars[1]`: block height - /// `vars[2]`: block commitment root - /// `vars[3]`: fee ledger commitment - /// `vars[4-6]`: stake table commitment - vars: [Variable; 7], -} - -/// public input -#[derive(Clone, Debug)] -pub struct PublicInput(Vec); - -impl AsRef<[F]> for PublicInput { - fn as_ref(&self) -> &[F] { - &self.0 - } -} - -impl From> for PublicInput { - fn from(v: Vec) -> Self { - Self(v) - } -} - -impl PublicInput { - /// Return the threshold - #[must_use] - pub fn threshold(&self) -> F { - self.0[0] - } - - /// Return the view number of the light client state - #[must_use] - pub fn view_number(&self) -> F { - self.0[1] - } - - /// Return the block height of the light client state - #[must_use] - pub fn block_height(&self) -> F { - self.0[2] - } - - /// Return the block commitment root of the light client state - #[must_use] - pub fn block_comm_root(&self) -> F { - self.0[3] - } - - /// Return the fee ledger commitment of the light client state - #[must_use] - pub fn fee_ledger_comm(&self) -> F { - self.0[4] - } - - /// Return the stake table commitment of the light client state - #[must_use] - pub fn stake_table_comm(&self) -> (F, F, F) { - (self.0[5], self.0[6], self.0[7]) - } - - /// Return the qc key commitment of the light client state - #[must_use] - pub fn qc_key_comm(&self) -> F { - self.0[5] - } - - /// Return the state key commitment of the light client state - #[must_use] - pub fn state_key_comm(&self) -> F { - self.0[6] - } - - /// Return the stake amount commitment of the light client state - #[must_use] - pub fn stake_amount_comm(&self) -> F { - self.0[7] - } -} - -impl LightClientStateVar { - /// # Errors - /// if unable to create any of the public variables - pub fn new( - circuit: &mut PlonkCircuit, - state: &LightClientState, - ) -> Result { - let view_number_f = F::from(state.view_number as u64); - let block_height_f = F::from(state.block_height as u64); - Ok(Self { - vars: [ - circuit.create_public_variable(view_number_f)?, - circuit.create_public_variable(block_height_f)?, - circuit.create_public_variable(state.block_comm_root)?, - circuit.create_public_variable(state.fee_ledger_comm)?, - circuit.create_public_variable(state.stake_table_comm.0)?, - circuit.create_public_variable(state.stake_table_comm.1)?, - circuit.create_public_variable(state.stake_table_comm.2)?, - ], - }) - } - - /// Returns the view number - #[must_use] - pub fn view_number(&self) -> Variable { - self.vars[0] - } - - /// Returns the block height - #[must_use] - pub fn block_height(&self) -> Variable { - self.vars[1] - } - - /// Returns the Merkle root of the block commitments - #[must_use] - pub fn block_comm_root(&self) -> Variable { - self.vars[2] - } - - /// Returns the commitment of the fee ledger - #[must_use] - pub fn fee_ledger_comm(&self) -> Variable { - self.vars[3] - } - - /// Returns the commitment of the associated stake table - #[must_use] - pub fn stake_table_comm(&self) -> StakeTableCommVar { - StakeTableCommVar { - qc_keys_comm: self.vars[4], - state_keys_comm: self.vars[5], - stake_amount_comm: self.vars[6], - } - } -} - -impl AsRef<[Variable]> for LightClientStateVar { - fn as_ref(&self) -> &[Variable] { - &self.vars - } -} - -/// A function that takes as input: -/// - a list of stake table entries (`Vec<(SchnorrVerKey, Amount)>`) -/// - a bit vector indicates the signers -/// - a list of schnorr signatures of the updated states (`Vec`), default if the node doesn't sign the state -/// - updated light client state (`(view_number, block_height, block_comm_root, fee_ledger_comm, stake_table_comm)`) -/// - a quorum threshold -/// Lengths of input vectors should not exceed the `STAKE_TABLE_CAPACITY`. -/// The list of stake table entries, bit indicators and signatures will be padded to the `STAKE_TABLE_CAPACITY`. -/// It checks that -/// - the signer's accumulated weight exceeds the quorum threshold -/// - the stake table corresponds to the one committed in the light client state -/// - all signed Schnorr signatures are valid -/// and returns -/// - A circuit for proof generation -/// - A list of public inputs for verification -/// - A `PlonkError` if any error happens when building the circuit -#[allow(clippy::too_many_lines)] -pub(crate) fn build( - stake_table_entries: STIter, - signer_bit_vec: BitIter, - signatures: SigIter, - lightclient_state: &LightClientState, - threshold: &U256, -) -> Result<(PlonkCircuit, PublicInput), PlonkError> -where - F: RescueParameter, - P: TECurveConfig, - STIter: IntoIterator, - STIter::Item: Borrow<(SchnorrVerKey

, U256)>, - STIter::IntoIter: ExactSizeIterator, - BitIter: IntoIterator, - BitIter::Item: Borrow, - BitIter::IntoIter: ExactSizeIterator, - SigIter: IntoIterator, - SigIter::Item: Borrow>, - SigIter::IntoIter: ExactSizeIterator, -{ - let stake_table_entries = stake_table_entries.into_iter(); - let signer_bit_vec = signer_bit_vec.into_iter(); - let signatures = signatures.into_iter(); - if stake_table_entries.len() > STAKE_TABLE_CAPACITY { - return Err(PlonkError::CircuitError(CircuitError::ParameterError( - format!( - "Number of input stake table entries {} exceeds the capacity {}", - stake_table_entries.len(), - STAKE_TABLE_CAPACITY, - ), - ))); - } - if signer_bit_vec.len() > STAKE_TABLE_CAPACITY { - return Err(PlonkError::CircuitError(CircuitError::ParameterError( - format!( - "Length of input bit vector {} exceeds the capacity {}", - signer_bit_vec.len(), - STAKE_TABLE_CAPACITY, - ), - ))); - } - if signatures.len() > STAKE_TABLE_CAPACITY { - return Err(PlonkError::CircuitError(CircuitError::ParameterError( - format!( - "Number of input signatures {} exceeds the capacity {}", - signatures.len(), - STAKE_TABLE_CAPACITY, - ), - ))); - } - - let mut circuit = PlonkCircuit::new_turbo_plonk(); - - // creating variables for stake table entries - let stake_table_entries_pad_len = STAKE_TABLE_CAPACITY - stake_table_entries.len(); - let mut stake_table_var = stake_table_entries - .map(|item| { - let item = item.borrow(); - let state_ver_key = circuit.create_signature_vk_variable(&item.0)?; - let stake_amount = circuit.create_variable(u256_to_field::(&item.1))?; - Ok(StakeTableEntryVar { - state_ver_key, - stake_amount, - }) - }) - .collect::, CircuitError>>()?; - stake_table_var.extend( - (0..stake_table_entries_pad_len) - .map(|_| { - let state_ver_key = - circuit.create_signature_vk_variable(&SchnorrVerKey::

::default())?; - let stake_amount = circuit.create_variable(F::default())?; - Ok(StakeTableEntryVar { - state_ver_key, - stake_amount, - }) - }) - .collect::, CircuitError>>()?, - ); - - // creating variables for signatures - let sig_pad_len = STAKE_TABLE_CAPACITY - signatures.len(); - let mut sig_vars = signatures - .map(|sig| circuit.create_signature_variable(sig.borrow())) - .collect::, CircuitError>>()?; - sig_vars.extend( - (0..sig_pad_len) - .map(|_| circuit.create_signature_variable(&Signature::

::default())) - .collect::, CircuitError>>()?, - ); - - // creating Boolean variables for the bit vector - let bit_vec_pad_len = STAKE_TABLE_CAPACITY - signer_bit_vec.len(); - let mut signer_bit_vec_var = signer_bit_vec - .map(|b| circuit.create_boolean_variable(*b.borrow())) - .collect::, CircuitError>>()?; - signer_bit_vec_var.extend( - (0..bit_vec_pad_len) - .map(|_| circuit.create_boolean_variable(false)) - .collect::, CircuitError>>()?, - ); - - let threshold = u256_to_field::(threshold); - let threshold_pub_var = circuit.create_public_variable(threshold)?; - - let lightclient_state_pub_var = LightClientStateVar::new(&mut circuit, lightclient_state)?; - - let view_number_f = F::from(lightclient_state.view_number as u64); - let block_height_f = F::from(lightclient_state.block_height as u64); - let public_inputs = vec![ - threshold, - view_number_f, - block_height_f, - lightclient_state.block_comm_root, - lightclient_state.fee_ledger_comm, - lightclient_state.stake_table_comm.0, - lightclient_state.stake_table_comm.1, - lightclient_state.stake_table_comm.2, - ]; - - // Checking whether the accumulated weight exceeds the quorum threshold - let mut signed_amount_var = (0..STAKE_TABLE_CAPACITY / 2) - .map(|i| { - circuit.mul_add( - &[ - stake_table_var[2 * i].stake_amount, - signer_bit_vec_var[2 * i].0, - stake_table_var[2 * i + 1].stake_amount, - signer_bit_vec_var[2 * i + 1].0, - ], - &[F::one(), F::one()], - ) - }) - .collect::, CircuitError>>()?; - // Adding the last if STAKE_TABLE_CAPACITY is not a multiple of 2 - if STAKE_TABLE_CAPACITY % 2 == 1 { - signed_amount_var.push(circuit.mul( - stake_table_var[STAKE_TABLE_CAPACITY - 1].stake_amount, - signer_bit_vec_var[STAKE_TABLE_CAPACITY - 1].0, - )?); - } - let acc_amount_var = circuit.sum(&signed_amount_var)?; - circuit.enforce_leq(threshold_pub_var, acc_amount_var)?; - - // checking the commitment for the list of schnorr keys - let state_ver_key_preimage_vars = stake_table_var - .iter() - .flat_map(|var| [var.state_ver_key.0.get_x(), var.state_ver_key.0.get_y()]) - .collect::>(); - let state_ver_key_comm = RescueNativeGadget::::rescue_sponge_with_padding( - &mut circuit, - &state_ver_key_preimage_vars, - 1, - )?[0]; - circuit.enforce_equal( - state_ver_key_comm, - lightclient_state_pub_var.stake_table_comm().state_keys_comm, - )?; - - // checking the commitment for the list of stake amounts - let stake_amount_preimage_vars = stake_table_var - .iter() - .map(|var| var.stake_amount) - .collect::>(); - let stake_amount_comm = RescueNativeGadget::::rescue_sponge_with_padding( - &mut circuit, - &stake_amount_preimage_vars, - 1, - )?[0]; - circuit.enforce_equal( - stake_amount_comm, - lightclient_state_pub_var - .stake_table_comm() - .stake_amount_comm, - )?; - - // checking all signatures - let verification_result_vars = stake_table_var - .iter() - .zip(sig_vars) - .map(|(entry, sig)| { - SignatureGadget::<_, P>::check_signature_validity( - &mut circuit, - &entry.state_ver_key, - lightclient_state_pub_var.as_ref(), - &sig, - ) - }) - .collect::, CircuitError>>()?; - let bit_x_result_vars = signer_bit_vec_var - .iter() - .zip(verification_result_vars) - .map(|(&bit, result)| { - let neg_bit = circuit.logic_neg(bit)?; - circuit.logic_or(neg_bit, result) - }) - .collect::, CircuitError>>()?; - let sig_ver_result = circuit.logic_and_all(&bit_x_result_vars)?; - circuit.enforce_true(sig_ver_result.0)?; - - circuit.finalize_for_arithmetization()?; - Ok((circuit, public_inputs.into())) -} - -/// Internal function to build a dummy circuit -pub(crate) fn build_for_preprocessing( -) -> Result<(PlonkCircuit, PublicInput), PlonkError> -where - F: RescueParameter, - P: TECurveConfig, -{ - let lightclient_state = LightClientState { - view_number: 0, - block_height: 0, - block_comm_root: F::default(), - fee_ledger_comm: F::default(), - stake_table_comm: (F::default(), F::default(), F::default()), - }; - build::(&[], &[], &[], &lightclient_state, &U256::zero()) -} - -#[cfg(test)] -mod tests { - use super::{build, LightClientState}; - use crate::utils::{key_pairs_for_testing, stake_table_for_testing}; - use ark_ed_on_bn254::EdwardsConfig as Config; - use ethereum_types::U256; - use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableScheme}; - use jf_primitives::{ - crhf::{VariableLengthRescueCRHF, CRHF}, - errors::PrimitivesError, - signatures::{schnorr::Signature, SchnorrSignatureScheme, SignatureScheme}, - }; - use jf_relation::Circuit; - use jf_utils::test_rng; - - type F = ark_ed_on_bn254::Fq; - const ST_CAPACITY: usize = 20; - - #[test] - #[allow(clippy::too_many_lines)] - fn crypto_test_circuit_building() { - let num_validators = 10; - let mut prng = test_rng(); - - let (qc_keys, state_keys) = key_pairs_for_testing(num_validators, &mut prng); - let st = stake_table_for_testing(ST_CAPACITY, &qc_keys, &state_keys); - - let entries = st - .try_iter(SnapshotVersion::LastEpochStart) - .unwrap() - .map(|(_, stake_amount, state_key)| (state_key, stake_amount)) - .collect::>(); - - let block_comm_root = - VariableLengthRescueCRHF::::evaluate(vec![F::from(1u32), F::from(2u32)]).unwrap() - [0]; - let fee_ledger_comm = - VariableLengthRescueCRHF::::evaluate(vec![F::from(3u32), F::from(5u32)]).unwrap() - [0]; - - let lightclient_state = LightClientState { - view_number: 100, - block_height: 73, - block_comm_root, - fee_ledger_comm, - stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), - }; - let state_msg: [F; 7] = lightclient_state.clone().into(); - - let sigs = state_keys - .iter() - .map(|(key, _)| SchnorrSignatureScheme::::sign(&(), key, state_msg, &mut prng)) - .collect::, PrimitivesError>>() - .unwrap(); - - // bit vector with total weight 26 - let bit_vec = [ - true, true, true, false, true, true, false, false, true, false, - ]; - let bit_masked_sigs = bit_vec - .iter() - .zip(sigs.iter()) - .map(|(bit, sig)| { - if *bit { - sig.clone() - } else { - Signature::::default() - } - }) - .collect::>(); - // good path - let (circuit, public_inputs) = build::<_, _, _, _, _, ST_CAPACITY>( - &entries, - &bit_vec, - &bit_masked_sigs, - &lightclient_state, - &U256::from(26u32), - ) - .unwrap(); - assert!(circuit - .check_circuit_satisfiability(public_inputs.as_ref()) - .is_ok()); - - let (circuit, public_inputs) = build::<_, _, _, _, _, ST_CAPACITY>( - &entries, - &bit_vec, - &bit_masked_sigs, - &lightclient_state, - &U256::from(10u32), - ) - .unwrap(); - assert!(circuit - .check_circuit_satisfiability(public_inputs.as_ref()) - .is_ok()); - - // bad path: total weight doesn't meet the threshold - // bit vector with total weight 23 - let bad_bit_vec = [ - true, true, true, true, true, false, false, true, false, false, - ]; - let bad_bit_masked_sigs = bad_bit_vec - .iter() - .zip(sigs.iter()) - .map(|(bit, sig)| { - if *bit { - sig.clone() - } else { - Signature::::default() - } - }) - .collect::>(); - let (bad_circuit, public_inputs) = build::<_, _, _, _, _, ST_CAPACITY>( - &entries, - &bad_bit_vec, - &bad_bit_masked_sigs, - &lightclient_state, - &U256::from(25u32), - ) - .unwrap(); - assert!(bad_circuit - .check_circuit_satisfiability(public_inputs.as_ref()) - .is_err()); - - // bad path: bad stake table commitment - let mut bad_lightclient_state = lightclient_state.clone(); - bad_lightclient_state.stake_table_comm.1 = F::default(); - let bad_state_msg: [F; 7] = bad_lightclient_state.clone().into(); - let sig_for_bad_state = state_keys - .iter() - .map(|(key, _)| { - SchnorrSignatureScheme::::sign(&(), key, bad_state_msg, &mut prng) - }) - .collect::, PrimitivesError>>() - .unwrap(); - let (bad_circuit, public_inputs) = build::<_, _, _, _, _, ST_CAPACITY>( - &entries, - &bit_vec, - &sig_for_bad_state, - &bad_lightclient_state, - &U256::from(26u32), - ) - .unwrap(); - assert!(bad_circuit - .check_circuit_satisfiability(public_inputs.as_ref()) - .is_err()); - - // bad path: incorrect signatures - let mut wrong_light_client_state = lightclient_state.clone(); - // state with a different qc key commitment - wrong_light_client_state.stake_table_comm.0 = F::default(); - let wrong_state_msg: [F; 7] = wrong_light_client_state.into(); - let wrong_sigs = state_keys - .iter() - .map(|(key, _)| { - SchnorrSignatureScheme::::sign(&(), key, wrong_state_msg, &mut prng) - }) - .collect::, PrimitivesError>>() - .unwrap(); - let (bad_circuit, public_inputs) = build::<_, _, _, _, _, ST_CAPACITY>( - &entries, - &bit_vec, - &wrong_sigs, - &lightclient_state, - &U256::from(26u32), - ) - .unwrap(); - assert!(bad_circuit - .check_circuit_satisfiability(public_inputs.as_ref()) - .is_err()); - - // bad path: overflowing stake table size - assert!(build::<_, _, _, _, _, 9>( - &entries, - &bit_vec, - &bit_masked_sigs, - &lightclient_state, - &U256::from(26u32), - ) - .is_err()); - } -} diff --git a/hotshot-state-prover/src/lib.rs b/hotshot-state-prover/src/lib.rs deleted file mode 100644 index fc17f6d1e0..0000000000 --- a/hotshot-state-prover/src/lib.rs +++ /dev/null @@ -1,283 +0,0 @@ -//! SNARK-assisted light client state update verification in `HotShot` - -/// State verifier circuit builder -pub mod circuit; -/// Utilities for test -#[cfg(test)] -mod utils; - -use ark_bn254::Bn254; -use ark_ed_on_bn254::EdwardsConfig; -use ark_std::{ - borrow::Borrow, - rand::{CryptoRng, RngCore}, -}; -use circuit::PublicInput; -use ethereum_types::U256; -use hotshot_types::{ - light_client::{LightClientState, StateVerKey}, - traits::stake_table::{SnapshotVersion, StakeTableScheme}, -}; -use jf_plonk::{ - errors::PlonkError, - proof_system::{PlonkKzgSnark, UniversalSNARK}, - transcript::SolidityTranscript, -}; -use jf_primitives::signatures::schnorr::Signature; - -/// BLS verification key, base field and Schnorr verification key -pub use hotshot_stake_table::vec_based::config::{FieldType as BaseField, QCVerKey}; -/// Proving key -pub type ProvingKey = jf_plonk::proof_system::structs::ProvingKey; -/// Verifying key -pub type VerifyingKey = jf_plonk::proof_system::structs::VerifyingKey; -/// Proof -pub type Proof = jf_plonk::proof_system::structs::Proof; -/// Universal SRS -pub type UniversalSrs = jf_plonk::proof_system::structs::UniversalSrs; - -/// Given a SRS, returns the proving key and verifying key for state update -/// # Errors -/// Errors if unable to preprocess -#[allow(clippy::cast_possible_truncation)] -pub fn preprocess( - srs: &UniversalSrs, -) -> Result<(ProvingKey, VerifyingKey), PlonkError> { - let (circuit, _) = - circuit::build_for_preprocessing::()?; - PlonkKzgSnark::preprocess(srs, &circuit) -} - -/// Given a proving key and -/// - a list of stake table entries (`Vec<(BLSVerKey, Amount, SchnorrVerKey)>`) -/// - a list of schnorr signatures of the updated states (`Vec`), default if the node doesn't sign the state -/// - updated light client state (`(view_number, block_height, block_comm_root, fee_ledger_comm, stake_table_comm)`) -/// - a bit vector indicates the signers -/// - a quorum threshold -/// Returns error or a pair `(proof, public_inputs)` asserting that -/// - the signer's accumulated weight exceeds the quorum threshold -/// - the stake table corresponds to the one committed in the light client state -/// - all signed schnorr signatures are valid -/// # Errors -/// Errors if unable to generate proof -/// # Panics -/// if the stake table is not up to date -pub fn generate_state_update_proof( - rng: &mut R, - pk: &ProvingKey, - stake_table: &ST, - signer_bit_vec: BitIter, - signatures: SigIter, - lightclient_state: &LightClientState, - threshold: &U256, -) -> Result<(Proof, PublicInput), PlonkError> -where - ST: StakeTableScheme, - ST::IntoIter: ExactSizeIterator, - R: CryptoRng + RngCore, - BitIter: IntoIterator, - BitIter::Item: Borrow, - BitIter::IntoIter: ExactSizeIterator, - SigIter: IntoIterator, - SigIter::Item: Borrow>, - SigIter::IntoIter: ExactSizeIterator, -{ - let stake_table_entries = stake_table - .try_iter(SnapshotVersion::LastEpochStart) - .unwrap() - .map(|(_, stake_amount, schnorr_key)| (schnorr_key, stake_amount)); - let (circuit, public_inputs) = circuit::build::<_, _, _, _, _, STAKE_TABLE_CAPACITY>( - stake_table_entries, - signer_bit_vec, - signatures, - lightclient_state, - threshold, - )?; - let proof = PlonkKzgSnark::::prove::<_, _, SolidityTranscript>(rng, &circuit, pk, None)?; - Ok((proof, public_inputs)) -} - -#[cfg(test)] -mod tests { - use super::{ - utils::{key_pairs_for_testing, stake_table_for_testing}, - BaseField, UniversalSrs, - }; - use crate::{circuit::build_for_preprocessing, generate_state_update_proof, preprocess}; - use ark_bn254::Bn254; - use ark_ec::pairing::Pairing; - use ark_ed_on_bn254::EdwardsConfig as Config; - use ark_std::{ - rand::{CryptoRng, RngCore}, - One, - }; - use ethereum_types::U256; - use hotshot_types::{ - light_client::LightClientState, - traits::stake_table::{SnapshotVersion, StakeTableScheme}, - }; - use jf_plonk::{ - proof_system::{PlonkKzgSnark, UniversalSNARK}, - transcript::SolidityTranscript, - }; - use jf_primitives::{ - crhf::{VariableLengthRescueCRHF, CRHF}, - errors::PrimitivesError, - signatures::{schnorr::Signature, SchnorrSignatureScheme, SignatureScheme}, - }; - use jf_relation::Circuit; - use jf_utils::test_rng; - - const ST_CAPACITY: usize = 20; - - // FIXME(Chengyu): see - #[allow(clippy::unnecessary_wraps)] - fn universal_setup_for_testing( - max_degree: usize, - rng: &mut R, - ) -> Result - where - R: RngCore + CryptoRng, - { - use ark_ec::{scalar_mul::fixed_base::FixedBase, CurveGroup}; - use ark_ff::PrimeField; - use ark_std::{end_timer, start_timer, UniformRand}; - - let setup_time = start_timer!(|| format!("KZG10::Setup with degree {}", max_degree)); - let beta = ::ScalarField::rand(rng); - let g = ::G1::rand(rng); - let h = ::G2::rand(rng); - - let mut powers_of_beta = vec![::ScalarField::one()]; - - let mut cur = beta; - for _ in 0..max_degree { - powers_of_beta.push(cur); - cur *= β - } - - let window_size = FixedBase::get_mul_window_size(max_degree + 1); - - let scalar_bits = ::ScalarField::MODULUS_BIT_SIZE as usize; - let g_time = start_timer!(|| "Generating powers of G"); - // TODO: parallelization - let g_table = FixedBase::get_window_table(scalar_bits, window_size, g); - let powers_of_g = FixedBase::msm::<::G1>( - scalar_bits, - window_size, - &g_table, - &powers_of_beta, - ); - end_timer!(g_time); - - let powers_of_g = ::G1::normalize_batch(&powers_of_g); - - let h = h.into_affine(); - let beta_h = (h * beta).into_affine(); - - let pp = UniversalSrs { - powers_of_g, - h, - beta_h, - powers_of_h: vec![h, beta_h], - }; - end_timer!(setup_time); - Ok(pp) - } - - #[test] - fn crypto_test_proof_generation() { - let num_validators = 10; - let mut prng = test_rng(); - - let (bls_keys, schnorr_keys) = key_pairs_for_testing(num_validators, &mut prng); - let st = stake_table_for_testing(ST_CAPACITY, &bls_keys, &schnorr_keys); - - let block_comm_root = VariableLengthRescueCRHF::::evaluate(vec![ - BaseField::from(1u32), - BaseField::from(2u32), - ]) - .unwrap()[0]; - let fee_ledger_comm = VariableLengthRescueCRHF::::evaluate(vec![ - BaseField::from(3u32), - BaseField::from(5u32), - ]) - .unwrap()[0]; - - let lightclient_state = LightClientState { - view_number: 100, - block_height: 73, - block_comm_root, - fee_ledger_comm, - stake_table_comm: st.commitment(SnapshotVersion::LastEpochStart).unwrap(), - }; - let state_msg: [BaseField; 7] = lightclient_state.clone().into(); - - let sigs = schnorr_keys - .iter() - .map(|(key, _)| SchnorrSignatureScheme::::sign(&(), key, state_msg, &mut prng)) - .collect::, PrimitivesError>>() - .unwrap(); - - // bit vector with total weight 26 - let bit_vec = [ - true, true, true, false, true, true, false, false, true, false, - ]; - let bit_masked_sigs = bit_vec - .iter() - .zip(sigs.iter()) - .map(|(bit, sig)| { - if *bit { - sig.clone() - } else { - Signature::::default() - } - }) - .collect::>(); - - // good path - let num_gates = - build_for_preprocessing::() - .unwrap() - .0 - .num_gates(); - let test_srs = universal_setup_for_testing(num_gates + 2, &mut prng).unwrap(); - ark_std::println!("Number of constraint in the circuit: {num_gates}"); - - let result = preprocess::(&test_srs); - assert!(result.is_ok()); - let (pk, vk) = result.unwrap(); - - let result = generate_state_update_proof::<_, _, _, _, ST_CAPACITY>( - &mut prng, - &pk, - &st, - &bit_vec, - &bit_masked_sigs, - &lightclient_state, - &U256::from(26u32), - ); - assert!(result.is_ok()); - - let (proof, public_inputs) = result.unwrap(); - assert!(PlonkKzgSnark::::verify::( - &vk, - public_inputs.as_ref(), - &proof, - None - ) - .is_ok()); - - // minimum bad path, other bad cases are checked inside `circuit.rs` - let result = generate_state_update_proof::<_, _, _, _, ST_CAPACITY>( - &mut prng, - &pk, - &st, - &bit_vec, - &bit_masked_sigs, - &lightclient_state, - &U256::from(100u32), - ); - assert!(result.is_err()); - } -} diff --git a/hotshot-state-prover/src/utils.rs b/hotshot-state-prover/src/utils.rs deleted file mode 100644 index 90511375db..0000000000 --- a/hotshot-state-prover/src/utils.rs +++ /dev/null @@ -1,54 +0,0 @@ -use ark_ed_on_bn254::EdwardsConfig; -use ark_std::rand::{CryptoRng, RngCore}; -use ethereum_types::U256; -use hotshot_stake_table::vec_based::StakeTable; -use hotshot_types::traits::stake_table::StakeTableScheme; -use jf_primitives::signatures::{ - bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey as BLSVerKey}, - SchnorrSignatureScheme, SignatureScheme, -}; - -type F = ark_ed_on_bn254::Fq; -type SchnorrVerKey = jf_primitives::signatures::schnorr::VerKey; -type SchnorrSignKey = jf_primitives::signatures::schnorr::SignKey; - -/// Helper function for test -pub(crate) fn key_pairs_for_testing( - num_validators: usize, - prng: &mut R, -) -> (Vec, Vec<(SchnorrSignKey, SchnorrVerKey)>) { - let bls_keys = (0..num_validators) - .map(|_| { - BLSOverBN254CurveSignatureScheme::key_gen(&(), prng) - .unwrap() - .1 - }) - .collect::>(); - let schnorr_keys = (0..num_validators) - .map(|_| SchnorrSignatureScheme::key_gen(&(), prng).unwrap()) - .collect::>(); - (bls_keys, schnorr_keys) -} - -/// Helper function for test -#[allow(clippy::cast_possible_truncation)] -pub(crate) fn stake_table_for_testing( - capacity: usize, - bls_keys: &[BLSVerKey], - schnorr_keys: &[(SchnorrSignKey, SchnorrVerKey)], -) -> StakeTable { - let mut st = StakeTable::::new(capacity); - // Registering keys - bls_keys - .iter() - .enumerate() - .zip(schnorr_keys) - .for_each(|((i, bls_key), (_, schnorr_key))| { - st.register(*bls_key, U256::from((i + 1) as u32), schnorr_key.clone()) - .unwrap(); - }); - // Freeze the stake table - st.advance(); - st.advance(); - st -} diff --git a/types/src/light_client.rs b/types/src/light_client.rs index 2918902977..4816291ff7 100644 --- a/types/src/light_client.rs +++ b/types/src/light_client.rs @@ -3,11 +3,55 @@ use ark_ed_on_bn254::EdwardsConfig as Config; use ark_ff::PrimeField; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ethereum_types::U256; use jf_primitives::signatures::schnorr; use rand::SeedableRng; use rand_chacha::ChaCha20Rng; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; use tagged_base64::tagged; +/// Base field in the prover circuit +pub type CircuitField = ark_ed_on_bn254::Fq; +/// Concrete type for light client state +pub type LightClientState = GenericLightClientState; +/// Signature scheme +pub type StateSignatureScheme = + jf_primitives::signatures::schnorr::SchnorrSignatureScheme; +/// Signatures +pub type StateSignature = schnorr::Signature; +/// Verification key for verifying state signatures +pub type StateVerKey = schnorr::VerKey; +/// Signing key for signing a light client state +pub type StateSignKey = schnorr::SignKey; +/// Concrete for circuit's public input +pub type PublicInput = GenericPublicInput; +/// Key pairs for signing/verifying a light client state +#[derive(Debug, Default, Clone, serde::Serialize, serde::Deserialize)] +pub struct StateKeyPair(schnorr::KeyPair); + +/// Request body to send to the state relay server +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize)] +pub struct StateSignatureRequestBody { + /// The public key associated with this request + pub key: StateVerKey, + /// The associated light client state + pub state: LightClientState, + /// The associated signature of the light client state + pub signature: StateSignature, +} + +/// The state signatures bundle is a light client state and its signatures collected +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct StateSignaturesBundle { + /// The state for this signatures bundle + pub state: LightClientState, + /// The collected signatures + pub signatures: HashMap, + /// Total stakes associated with the signer + pub accumulated_weight: U256, +} + /// A light client state #[tagged("LIGHT_CLIENT_STATE")] #[derive( @@ -22,7 +66,7 @@ use tagged_base64::tagged; Ord, Hash, )] -pub struct LightClientState { +pub struct GenericLightClientState { /// Current view number pub view_number: usize, /// Current block height @@ -35,8 +79,8 @@ pub struct LightClientState { pub stake_table_comm: (F, F, F), } -impl From> for [F; 7] { - fn from(state: LightClientState) -> Self { +impl From> for [F; 7] { + fn from(state: GenericLightClientState) -> Self { [ F::from(state.view_number as u64), F::from(state.block_height as u64), @@ -48,8 +92,8 @@ impl From> for [F; 7] { ] } } -impl From<&LightClientState> for [F; 7] { - fn from(state: &LightClientState) -> Self { +impl From<&GenericLightClientState> for [F; 7] { + fn from(state: &GenericLightClientState) -> Self { [ F::from(state.view_number as u64), F::from(state.block_height as u64), @@ -62,16 +106,6 @@ impl From<&LightClientState> for [F; 7] { } } -/// Signatures -pub type StateSignature = schnorr::Signature; -/// Verification key for verifying state signatures -pub type StateVerKey = schnorr::VerKey; -/// Signing key for signing a light client state -pub type StateSignKey = schnorr::SignKey; -/// Key pairs for signing/verifying a light client state -#[derive(Debug, Default, Clone, serde::Serialize, serde::Deserialize)] -pub struct StateKeyPair(schnorr::KeyPair); - impl std::ops::Deref for StateKeyPair { type Target = schnorr::KeyPair; @@ -109,3 +143,75 @@ impl From> for StateKeyPair { StateKeyPair(value) } } + +/// Public input to the light client state prover service +#[derive(Clone, Debug)] +pub struct GenericPublicInput(Vec); + +impl AsRef<[F]> for GenericPublicInput { + fn as_ref(&self) -> &[F] { + &self.0 + } +} + +impl From> for GenericPublicInput { + fn from(v: Vec) -> Self { + Self(v) + } +} + +impl GenericPublicInput { + /// Return the threshold + #[must_use] + pub fn threshold(&self) -> F { + self.0[0] + } + + /// Return the view number of the light client state + #[must_use] + pub fn view_number(&self) -> F { + self.0[1] + } + + /// Return the block height of the light client state + #[must_use] + pub fn block_height(&self) -> F { + self.0[2] + } + + /// Return the block commitment root of the light client state + #[must_use] + pub fn block_comm_root(&self) -> F { + self.0[3] + } + + /// Return the fee ledger commitment of the light client state + #[must_use] + pub fn fee_ledger_comm(&self) -> F { + self.0[4] + } + + /// Return the stake table commitment of the light client state + #[must_use] + pub fn stake_table_comm(&self) -> (F, F, F) { + (self.0[5], self.0[6], self.0[7]) + } + + /// Return the qc key commitment of the light client state + #[must_use] + pub fn qc_key_comm(&self) -> F { + self.0[5] + } + + /// Return the state key commitment of the light client state + #[must_use] + pub fn state_key_comm(&self) -> F { + self.0[6] + } + + /// Return the stake amount commitment of the light client state + #[must_use] + pub fn stake_amount_comm(&self) -> F { + self.0[7] + } +} From 803dc02c7a24c82508eaa141ac7fe9cfa2493b08 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 20:45:53 -0500 Subject: [PATCH 0757/1393] Remve dead `ChannelMaps`, `ViewQueue`, and `SendToTask` --- hotshot/examples/combined/types.rs | 8 +-- hotshot/examples/libp2p/types.rs | 8 +-- hotshot/examples/webserver/types.rs | 8 +-- hotshot/src/lib.rs | 51 +--------------- hotshot/src/types/handle.rs | 36 ----------- testing/src/node_types.rs | 40 +------------ testing/tests/memory_network.rs | 8 +-- types/src/consensus.rs | 5 +- types/src/traits/node_implementation.rs | 79 +------------------------ 9 files changed, 10 insertions(+), 233 deletions(-) diff --git a/hotshot/examples/combined/types.rs b/hotshot/examples/combined/types.rs index 4e5d342275..94980e0925 100644 --- a/hotshot/examples/combined/types.rs +++ b/hotshot/examples/combined/types.rs @@ -1,7 +1,7 @@ use crate::infra::CombinedDARun; use hotshot::traits::implementations::{CombinedCommChannel, MemoryStorage}; use hotshot_testing::state_types::TestTypes; -use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -22,12 +22,6 @@ impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - (ChannelMaps::new(start_view), None) - } } /// convenience type alias pub type ThisRun = CombinedDARun; diff --git a/hotshot/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs index 449e518f48..446905bab6 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/hotshot/examples/libp2p/types.rs @@ -1,7 +1,7 @@ use crate::infra::Libp2pDARun; use hotshot::traits::implementations::{Libp2pCommChannel, MemoryStorage}; use hotshot_testing::state_types::TestTypes; -use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -22,12 +22,6 @@ impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - (ChannelMaps::new(start_view), None) - } } /// convenience type alias pub type ThisRun = Libp2pDARun; diff --git a/hotshot/examples/webserver/types.rs b/hotshot/examples/webserver/types.rs index 03d8fc36ed..46a466ed5f 100644 --- a/hotshot/examples/webserver/types.rs +++ b/hotshot/examples/webserver/types.rs @@ -1,7 +1,7 @@ use crate::infra::WebServerDARun; use hotshot::traits::implementations::{MemoryStorage, WebCommChannel}; use hotshot_testing::state_types::TestTypes; -use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -22,12 +22,6 @@ impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type CommitteeNetwork = DANetwork; type QuorumNetwork = QuorumNetwork; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - (ChannelMaps::new(start_view), None) - } } /// convenience type alias pub type ThisRun = WebServerDARun; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index f33289e65d..d2ec3ad72c 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -25,7 +25,7 @@ use async_compatibility_layer::{ art::{async_spawn, async_spawn_local}, channel::UnboundedSender, }; -use async_lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; +use async_lock::RwLock; use async_trait::async_trait; use commit::Committable; use custom_debug::Debug; @@ -37,11 +37,8 @@ use hotshot_task::{ }; use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; -#[cfg(feature = "hotshot-testing")] -use hotshot_types::traits::node_implementation::ChannelMaps; - use hotshot_types::{ - consensus::{Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, + consensus::{Consensus, ConsensusMetricsValue, View, ViewInner}, data::Leaf, error::StorageSnafu, event::EventType, @@ -52,7 +49,7 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, network::{CommunicationChannel, NetworkError}, - node_implementation::{ConsensusTime, NodeType, SendToTasks}, + node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, states::ValidatedState, storage::StoredView, @@ -148,11 +145,6 @@ pub struct SystemContextInner> { /// The hotstuff implementation consensus: Arc>>, - /// Channels for sending/recv-ing proposals and votes for quorum and committee exchanges, the - /// latter of which is only applicable for sequencing consensus. - #[cfg(feature = "hotshot-testing")] - channel_maps: (ChannelMaps, Option>), - // global_registry: GlobalRegistry, /// Access to the output event stream. output_event_stream: ChannelStream>, @@ -253,7 +245,6 @@ impl> SystemContext { let inner: Arc> = Arc::new(SystemContextInner { id: nonce, #[cfg(feature = "hotshot-testing")] - channel_maps: I::new_channel_maps(start_view), consensus, public_key, private_key, @@ -316,7 +307,6 @@ impl> SystemContext { async fn send_external_event(&self, event: Event) { debug!(?event, "send_external_event"); self.inner.output_event_stream.publish(event).await; - } /// Publishes a transaction asynchronously to the network @@ -528,41 +518,6 @@ impl> SystemContext { pub fn get_next_view_timeout(&self) -> u64 { self.inner.config.next_view_timeout } - - /// given a view number and a upgradable read lock on a channel map, inserts entry into map if it - /// doesn't exist, or creates entry. Then returns a clone of the entry - pub async fn create_or_obtain_chan_from_read( - view_num: TYPES::Time, - channel_map: RwLockUpgradableReadGuard<'_, SendToTasks>, - ) -> ViewQueue { - // check if we have the entry - // if we don't, insert - if let Some(vq) = channel_map.channel_map.get(&view_num) { - vq.clone() - } else { - let mut channel_map = - RwLockUpgradableReadGuard::<'_, SendToTasks>::upgrade(channel_map).await; - let new_view_queue = ViewQueue::default(); - let vq = new_view_queue.clone(); - // NOTE: the read lock is held until all other read locks are DROPPED and - // the read lock may be turned into a write lock. - // This means that the `channel_map` will not change. So we don't need - // to check again to see if a channel was added - - channel_map.channel_map.insert(view_num, new_view_queue); - vq - } - } - - /// given a view number and a write lock on a channel map, inserts entry into map if it - /// doesn't exist, or creates entry. Then returns a clone of the entry - #[allow(clippy::unused_async)] // async for API compatibility reasons - pub async fn create_or_obtain_chan_from_write( - view_num: TYPES::Time, - mut channel_map: RwLockWriteGuard<'_, SendToTasks>, - ) -> ViewQueue { - channel_map.channel_map.entry(view_num).or_default().clone() - } } impl> SystemContext { diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index bb6d32de33..3a3c8e6921 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -255,40 +255,4 @@ impl + 'static> SystemContextHandl .send_direct_message(MessageKind::from_consensus_message(msg), recipient) .await; } - - /// Get length of the replica's receiver channel - #[cfg(feature = "hotshot-testing")] - pub async fn get_replica_receiver_channel_len( - &self, - view_number: TYPES::Time, - ) -> Option { - use async_compatibility_layer::channel::UnboundedReceiver; - - let channel_map = self.hotshot.inner.channel_maps.0.vote_channel.read().await; - let chan = channel_map.channel_map.get(&view_number)?; - let receiver = chan.receiver_chan.lock().await; - UnboundedReceiver::len(&*receiver) - } - - /// Get length of the next leaders's receiver channel - #[cfg(feature = "hotshot-testing")] - pub async fn get_next_leader_receiver_channel_len( - &self, - view_number: TYPES::Time, - ) -> Option { - use async_compatibility_layer::channel::UnboundedReceiver; - - let channel_map = self - .hotshot - .inner - .channel_maps - .0 - .proposal_channel - .read() - .await; - let chan = channel_map.channel_map.get(&view_number)?; - - let receiver = chan.receiver_chan.lock().await; - UnboundedReceiver::len(&*receiver) - } } diff --git a/testing/src/node_types.rs b/testing/src/node_types.rs index 4edb033443..18de95cc22 100644 --- a/testing/src/node_types.rs +++ b/testing/src/node_types.rs @@ -13,9 +13,7 @@ use hotshot::traits::{ NodeImplementation, }; use hotshot_types::{ - data::ViewNumber, - signature_key::BLSPubKey, - traits::node_implementation::{ChannelMaps, NodeType}, + data::ViewNumber, signature_key::BLSPubKey, traits::node_implementation::NodeType, }; use serde::{Deserialize, Serialize}; @@ -100,58 +98,22 @@ impl NodeImplementation for Libp2pImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticLibp2pQuorumComm; type CommitteeNetwork = StaticLibp2pDAComm; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - ( - ChannelMaps::new(start_view), - Some(ChannelMaps::new(start_view)), - ) - } } impl NodeImplementation for MemoryImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticMemoryQuorumComm; type CommitteeNetwork = StaticMemoryDAComm; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - ( - ChannelMaps::new(start_view), - Some(ChannelMaps::new(start_view)), - ) - } } impl NodeImplementation for WebImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticWebQuorumComm; type CommitteeNetwork = StaticWebDAComm; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - ( - ChannelMaps::new(start_view), - Some(ChannelMaps::new(start_view)), - ) - } } impl NodeImplementation for CombinedImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticCombinedQuorumComm; type CommitteeNetwork = StaticCombinedDAComm; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - ( - ChannelMaps::new(start_view), - Some(ChannelMaps::new(start_view)), - ) - } } diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 7648d30174..dbbdd8f983 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -19,7 +19,7 @@ use hotshot_types::message::Message; use hotshot_types::signature_key::BLSPubKey; use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; -use hotshot_types::traits::node_implementation::{ChannelMaps, ConsensusTime, NodeType}; +use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; use hotshot_types::{ data::ViewNumber, message::{DataMessage, MessageKind}, @@ -70,12 +70,6 @@ impl NodeImplementation for TestImpl { type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - (ChannelMaps::new(start_view), None) - } } /// fake Eq diff --git a/types/src/consensus.rs b/types/src/consensus.rs index e379060458..6c62cb3d1b 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -1,9 +1,6 @@ //! Provides the core consensus types -pub use crate::{ - traits::node_implementation::ViewQueue, - utils::{View, ViewInner}, -}; +pub use crate::utils::{View, ViewInner}; use displaydoc::Display; use crate::{ diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index df0dac8d0d..2a6da5cada 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -13,93 +13,22 @@ use super::{ }; use crate::{ data::{Leaf, TestableLeaf}, - message::ProcessedSequencingMessage, traits::{ election::Membership, network::TestableChannelImplementation, signature_key::SignatureKey, states::InstanceState, storage::Storage, BlockPayload, }, }; -use async_compatibility_layer::channel::{unbounded, UnboundedReceiver, UnboundedSender}; -use async_lock::{Mutex, RwLock}; use async_trait::async_trait; use commit::Committable; use serde::{Deserialize, Serialize}; use std::{ - collections::BTreeMap, fmt::Debug, hash::Hash, ops, ops::{Deref, Sub}, - sync::{atomic::AtomicBool, Arc}, + sync::Arc, }; -/// struct containing messages for a view to send to a replica or DA committee member. -#[derive(Clone)] -pub struct ViewQueue { - /// to send networking events to a replica or DA committee member. - pub sender_chan: UnboundedSender>, - - /// to recv networking events for a replica or DA committee member. - pub receiver_chan: Arc>>>, - - /// `true` if this queue has already received a proposal - pub has_received_proposal: Arc, -} - -impl Default for ViewQueue { - /// create new view queue - fn default() -> Self { - let (s, r) = unbounded(); - ViewQueue { - sender_chan: s, - receiver_chan: Arc::new(Mutex::new(r)), - has_received_proposal: Arc::new(AtomicBool::new(false)), - } - } -} - -/// metadata for sending information to the leader, replica, or DA committee member. -pub struct SendToTasks { - /// the current view number - /// this should always be in sync with `Consensus` - pub cur_view: TYPES::Time, - - /// a map from view number to ViewQueue - /// one of (replica|next leader)'s' task for view i will be listening on the channel in here - pub channel_map: BTreeMap>, -} - -impl SendToTasks { - /// create new sendtosasks - #[must_use] - pub fn new(view_num: TYPES::Time) -> Self { - SendToTasks { - cur_view: view_num, - channel_map: BTreeMap::default(), - } - } -} - -/// Channels for sending/recv-ing proposals and votes. -#[derive(Clone)] -pub struct ChannelMaps { - /// Channel for the next consensus leader or DA leader. - pub proposal_channel: Arc>>, - - /// Channel for the replica or DA committee member. - pub vote_channel: Arc>>, -} - -impl ChannelMaps { - /// Create channels starting from a given view. - pub fn new(start_view: TYPES::Time) -> Self { - Self { - proposal_channel: Arc::new(RwLock::new(SendToTasks::new(start_view))), - vote_channel: Arc::new(RwLock::new(SendToTasks::new(start_view))), - } - } -} - /// Node implementation aggregate trait /// /// This trait exists to collect multiple behavior implementations into one type, to allow @@ -118,12 +47,6 @@ pub trait NodeImplementation: type QuorumNetwork: CommunicationChannel; /// Network for those in the DA committee type CommitteeNetwork: CommunicationChannel; - - /// Create channels for sending/recv-ing proposals and votes for quorum and committee - /// exchanges, the latter of which is only applicable for sequencing consensus. - fn new_channel_maps( - start_view: TYPES::Time, - ) -> (ChannelMaps, Option>); } /// extra functions required on a node implementation to be usable by hotshot-testing From f344b2d79514bf43ce784fb6a27b6cf17f669134 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 21:21:56 -0500 Subject: [PATCH 0758/1393] Remove InternalTrigger, unused genesis fn, DeltasType and unused pub fns --- hotshot/src/lib.rs | 110 ++---------------------------------- hotshot/src/types/handle.rs | 73 +----------------------- task-impls/src/network.rs | 4 -- types/src/data.rs | 35 ------------ types/src/message.rs | 27 --------- 5 files changed, 6 insertions(+), 243 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index d2ec3ad72c..8f631a238d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -21,10 +21,7 @@ use crate::{ traits::{NodeImplementation, Storage}, types::{Event, SystemContextHandle}, }; -use async_compatibility_layer::{ - art::{async_spawn, async_spawn_local}, - channel::UnboundedSender, -}; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; use commit::Committable; @@ -42,13 +39,11 @@ use hotshot_types::{ data::Leaf, error::StorageSnafu, event::EventType, - message::{ - DataMessage, InternalTrigger, Message, MessageKind, ProcessedGeneralConsensusMessage, - }, + message::{DataMessage, Message, MessageKind}, simple_certificate::QuorumCertificate, traits::{ consensus_api::ConsensusApi, - network::{CommunicationChannel, NetworkError}, + network::CommunicationChannel, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, states::ValidatedState, @@ -66,7 +61,7 @@ use std::{ time::Duration, }; use tasks::add_vid_task; -use tracing::{debug, info, instrument, trace, warn}; +use tracing::{debug, info, instrument, trace}; // -- Rexports // External @@ -270,37 +265,6 @@ impl> SystemContext { .await; } - /// Marks a given view number as timed out. This should be called a fixed period after a round is started. - /// - /// If the round has already ended then this function will essentially be a no-op. Otherwise `run_round` will return shortly after this function is called. - /// # Panics - /// Panics if the current view is not in the channel map - #[instrument( - skip_all, - fields(id = self.inner.id, view = *current_view), - name = "Timeout consensus tasks", - level = "warn" - )] - pub async fn timeout_view( - &self, - current_view: TYPES::Time, - send_replica: UnboundedSender>, - send_next_leader: Option>>, - ) { - let msg = ProcessedGeneralConsensusMessage::::InternalTrigger( - InternalTrigger::Timeout(current_view), - ); - if let Some(chan) = send_next_leader { - if chan.send(msg.clone()).await.is_err() { - debug!("Error timing out next leader task"); - } - }; - // NOTE this should always exist - if send_replica.send(msg).await.is_err() { - debug!("Error timing out replica task"); - }; - } - /// Emit an external event // A copypasta of `ConsensusApi::send_event` // TODO: remove with https://github.com/EspressoSystems/HotShot/issues/2407 @@ -447,72 +411,6 @@ impl> SystemContext { Ok((handle, internal_event_stream)) } - /// Send a broadcast message. - /// - /// This is an alias for `hotshot.inner.networking.broadcast_message(msg.into())`. - /// - /// # Errors - /// - /// Will return any errors that the underlying `broadcast_message` can return. - // this clippy lint is silly. This is async by requirement of the trait. - #[allow(clippy::unused_async)] - pub async fn send_broadcast_message( - &self, - kind: impl Into>, - ) -> std::result::Result<(), NetworkError> { - let inner = self.inner.clone(); - let pk = self.inner.public_key.clone(); - let kind = kind.into(); - - async_spawn_local(async move { - if inner - .networks - .quorum_network - .broadcast_message( - Message { - version: PROGRAM_PROTOCOL_VERSION, - sender: pk, - kind, - }, - // TODO this is morally wrong - &inner.memberships.quorum_membership.clone(), - ) - .await - .is_err() - { - warn!("Failed to broadcast message"); - }; - }); - Ok(()) - } - - /// Send a direct message to a given recipient. - /// - /// This is an alias for `hotshot.inner.networking.message_node(msg.into(), recipient)`. - /// - /// # Errors - /// - /// Will return any errors that the underlying `message_node` can return. - pub async fn send_direct_message( - &self, - kind: impl Into>, - recipient: TYPES::SignatureKey, - ) -> std::result::Result<(), NetworkError> { - self.inner - .networks - .quorum_network - .direct_message( - Message { - version: PROGRAM_PROTOCOL_VERSION, - sender: self.inner.public_key.clone(), - kind: kind.into(), - }, - recipient, - ) - .await?; - Ok(()) - } - /// return the timeout for a view for `self` #[must_use] pub fn get_next_view_timeout(&self) -> u64 { diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 3a3c8e6921..20f6daf059 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -3,7 +3,6 @@ use crate::{traits::NodeImplementation, types::Event, SystemContext}; use async_compatibility_layer::channel::UnboundedStream; use async_lock::RwLock; -use commit::Committable; use futures::Stream; use hotshot_task::{ boxed_sync, @@ -14,25 +13,12 @@ use hotshot_task::{ }; use hotshot_task_impls::events::HotShotEvent; #[cfg(feature = "hotshot-testing")] -use hotshot_types::{ - message::{MessageKind, SequencingMessage}, - traits::election::Membership, -}; +use hotshot_types::traits::election::Membership; -use hotshot_types::simple_vote::QuorumData; use hotshot_types::{ - consensus::Consensus, - data::Leaf, - error::HotShotError, - event::EventType, - simple_certificate::QuorumCertificate, - traits::{ - node_implementation::{ConsensusTime, NodeType}, - storage::Storage, - }, + consensus::Consensus, data::Leaf, error::HotShotError, traits::node_implementation::NodeType, }; use std::sync::Arc; -use tracing::error; /// Event streaming handle for a [`SystemContext`] instance running in the background /// @@ -140,39 +126,6 @@ impl + 'static> SystemContextHandl self.hotshot.publish_transaction_async(tx).await } - /// performs the genesis initializaiton - pub async fn maybe_do_genesis_init(&self) { - let _anchor = self.storage(); - if let Ok(anchor_leaf) = self.storage().get_anchored_view().await { - if anchor_leaf.view_number == TYPES::Time::genesis() { - let leaf = Leaf::from_stored_view(anchor_leaf); - let mut qc = QuorumCertificate::::genesis(); - qc.data = QuorumData { - leaf_commit: leaf.commit(), - }; - let event = Event { - view_number: TYPES::Time::genesis(), - event: EventType::Decide { - leaf_chain: Arc::new(vec![leaf]), - qc: Arc::new(qc), - block_size: None, - }, - }; - self.output_event_stream.publish(event).await; - } - } else { - // TODO (justin) this seems bad. I think we should hard error in this case?? - error!("Hotshot storage has no anchor leaf!"); - } - } - - /// begin consensus by sending a genesis event - /// Use `start_consensus` on `SystemContext` instead - #[deprecated] - pub async fn start_consensus_deprecated(&self) { - self.maybe_do_genesis_init().await; - } - /// Provides a reference to the underlying storage for this [`SystemContext`], allowing access to /// historical data pub fn storage(&self) -> &I::Storage { @@ -233,26 +186,4 @@ impl + 'static> SystemContextHandl pub async fn get_current_view(&self) -> TYPES::Time { self.hotshot.inner.consensus.read().await.cur_view } - - /// Wrapper around `HotShotConsensusApi`'s `send_broadcast_consensus_message` function - #[cfg(feature = "hotshot-testing")] - pub async fn send_broadcast_consensus_message(&self, msg: SequencingMessage) { - let _result = self - .hotshot - .send_broadcast_message(MessageKind::from_consensus_message(msg)) - .await; - } - - /// Wrapper around `HotShotConsensusApi`'s `send_direct_consensus_message` function - #[cfg(feature = "hotshot-testing")] - pub async fn send_direct_consensus_message( - &self, - msg: SequencingMessage, - recipient: TYPES::SignatureKey, - ) { - let _result = self - .hotshot - .send_direct_message(MessageKind::from_consensus_message(msg), recipient) - .await; - } } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 05587207a9..a4209dec15 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -95,10 +95,6 @@ impl NetworkMessageTaskState { GeneralConsensusMessage::UpgradeVote(message) => { HotShotEvent::UpgradeVoteRecv(message) } - GeneralConsensusMessage::InternalTrigger(_) => { - error!("Got unexpected message type in network task!"); - return; - } }, Either::Right(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(proposal) => { diff --git a/types/src/data.rs b/types/src/data.rs index 66537bdb73..7605c646b0 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -243,41 +243,6 @@ impl HasViewNumber for UpgradeProposal { } } -/// A state change encoded in a leaf. -/// -/// [`DeltasType`] represents a [block](NodeType::BlockPayload), but it may not contain the block in -/// full. It is guaranteed to contain, at least, a cryptographic commitment to the block, and it -/// provides an interface for resolving the commitment to a full block if the full block is -/// available. -pub trait DeltasType: - Clone + Debug + for<'a> Deserialize<'a> + PartialEq + Eq + std::hash::Hash + Send + Serialize + Sync -{ - /// Errors reported by this type. - type Error: std::error::Error; - - /// Get a cryptographic commitment to the block represented by this delta. - fn payload_commitment(&self) -> VidCommitment; - - /// Get the full block if it is available, otherwise return this object unchanged. - /// - /// # Errors - /// - /// Returns the original [`DeltasType`], unchanged, in an [`Err`] variant in the case where the - /// full block is not currently available. - fn try_resolve(self) -> Result; - - /// Fill this [`DeltasType`] by providing a complete block. - /// - /// After this function succeeds, [`try_resolve`](Self::try_resolve) is guaranteed to return - /// `Ok(block)`. - /// - /// # Errors - /// - /// Fails if `block` does not match `self.payload_commitment()`, or if the block is not able to be - /// stored for some implementation-defined reason. - fn fill(&mut self, block: PAYLOAD) -> Result<(), Self::Error>; -} - /// The error type for block and its transactions. #[derive(Snafu, Debug)] pub enum BlockError { diff --git a/types/src/message.rs b/types/src/message.rs index f84718aa6c..53ff69f1ec 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -132,15 +132,6 @@ impl ViewMessage for MessageKind { } } -/// Internal triggers sent by consensus messages. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -pub enum InternalTrigger { - // May add other triggers if necessary. - /// Internal timeout at the specified view number. - Timeout(TYPES::Time), -} - /// A processed consensus message for both validating and sequencing consensus. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] #[serde(bound(deserialize = ""))] @@ -149,9 +140,6 @@ pub enum ProcessedGeneralConsensusMessage { Proposal(Proposal>, TYPES::SignatureKey), /// Message with a quorum vote. Vote(QuorumVote, TYPES::SignatureKey), - /// Internal ONLY message indicating a view interrupt. - #[serde(skip)] - InternalTrigger(InternalTrigger), } impl From> @@ -163,9 +151,6 @@ impl From> GeneralConsensusMessage::Proposal(p) } ProcessedGeneralConsensusMessage::Vote(v, _) => GeneralConsensusMessage::Vote(v), - ProcessedGeneralConsensusMessage::InternalTrigger(a) => { - GeneralConsensusMessage::InternalTrigger(a) - } } } } @@ -180,9 +165,6 @@ impl ProcessedGeneralConsensusMessage { ProcessedGeneralConsensusMessage::Proposal(p, sender) } GeneralConsensusMessage::Vote(v) => ProcessedGeneralConsensusMessage::Vote(v, sender), - GeneralConsensusMessage::InternalTrigger(a) => { - ProcessedGeneralConsensusMessage::InternalTrigger(a) - } // ED NOTE These are deprecated GeneralConsensusMessage::TimeoutVote(_) => unimplemented!(), GeneralConsensusMessage::ViewSyncPreCommitVote(_) => unimplemented!(), @@ -313,10 +295,6 @@ pub enum GeneralConsensusMessage { /// Message with an upgrade vote UpgradeVote(UpgradeVote), - - /// Internal ONLY message indicating a view interrupt. - #[serde(skip)] - InternalTrigger(InternalTrigger), } #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] @@ -360,10 +338,6 @@ impl SequencingMessage { p.data.get_view_number() } GeneralConsensusMessage::Vote(vote_message) => vote_message.get_view_number(), - GeneralConsensusMessage::InternalTrigger(trigger) => match trigger { - InternalTrigger::Timeout(time) => *time, - }, - GeneralConsensusMessage::TimeoutVote(message) => message.get_view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { message.get_view_number() @@ -419,7 +393,6 @@ impl SequencingMessage { GeneralConsensusMessage::Vote(_) | GeneralConsensusMessage::TimeoutVote(_) => { MessagePurpose::Vote } - GeneralConsensusMessage::InternalTrigger(_) => MessagePurpose::Internal, GeneralConsensusMessage::ViewSyncPreCommitVote(_) | GeneralConsensusMessage::ViewSyncCommitVote(_) | GeneralConsensusMessage::ViewSyncFinalizeVote(_) => MessagePurpose::ViewSyncVote, From a6aa68b5bfefac1e302e0d43480d7616271c645c Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Thu, 1 Feb 2024 11:49:05 -0500 Subject: [PATCH 0759/1393] feat: ellie comments --- hotshot/src/traits/networking/libp2p_network.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 107e624707..97eb7f86fb 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -264,7 +264,7 @@ where { Ok(network) => network, Err(err) => { - panic!("Failed to create network: {err:?}"); + panic!("Failed to create libp2p network: {err:?}"); } } }) From e34d081d0dadcad060f0a25ade3c65268fa4cc34 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 1 Feb 2024 23:19:21 -0500 Subject: [PATCH 0760/1393] remove libp2p from macro tests...for now --- testing-macros/tests/tests.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs index 969e0a0896..186087ec7c 100644 --- a/testing-macros/tests/tests.rs +++ b/testing-macros/tests/tests.rs @@ -1,7 +1,7 @@ use hotshot_testing::completion_task::{ CompletionTaskDescription, TimeBasedCompletionTaskDescription, }; -use hotshot_testing::node_types::{CombinedImpl, Libp2pImpl, MemoryImpl, WebImpl}; +use hotshot_testing::node_types::{MemoryImpl, WebImpl}; use hotshot_testing::spinning_task::ChangeNode; use hotshot_testing::spinning_task::SpinningTaskDescription; use hotshot_testing::spinning_task::UpDown; @@ -31,11 +31,13 @@ cross_tests!( // Test one node leaving the network. cross_tests!( TestName: test_with_failures_one, - Impls: [MemoryImpl, Libp2pImpl, WebImpl, CombinedImpl], + Impls: [MemoryImpl, WebImpl], Types: [TestTypes], Ignore: false, Metadata: { let mut metadata = TestMetadata::default_more_nodes(); + metadata.num_bootstrap_nodes = 19; + metadata.start_nodes = 19; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -58,11 +60,13 @@ cross_tests!( // Test f/2 nodes leaving the network. cross_tests!( TestName: test_with_failures_half_f, - Impls: [MemoryImpl, Libp2pImpl, WebImpl, CombinedImpl], + Impls: [MemoryImpl, WebImpl], Types: [TestTypes], Ignore: false, Metadata: { let mut metadata = TestMetadata::default_more_nodes(); + metadata.num_bootstrap_nodes = 17; + metadata.start_nodes = 17; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -97,7 +101,7 @@ cross_tests!( // Test f nodes leaving the network. cross_tests!( TestName: test_with_failures_f, - Impls: [MemoryImpl, Libp2pImpl, WebImpl, CombinedImpl], + Impls: [MemoryImpl, WebImpl], Types: [TestTypes], Ignore: false, Metadata: { @@ -106,6 +110,8 @@ cross_tests!( metadata.overall_safety_properties.num_failed_views = 6; // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 22; + metadata.num_bootstrap_nodes = 14; + metadata.start_nodes = 14; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -149,14 +155,15 @@ cross_tests!( // Test that a good leader can succeed in the view directly after view sync cross_tests!( TestName: test_with_failures_2, - Impls: [MemoryImpl, Libp2pImpl, WebImpl, CombinedImpl], + Impls: [MemoryImpl, WebImpl], Types: [TestTypes], Ignore: false, Metadata: { let mut metadata = TestMetadata::default_more_nodes(); + metadata.num_bootstrap_nodes = 10; metadata.total_nodes = 12; metadata.da_committee_size = 12; - metadata.start_nodes = 12; + metadata.start_nodes = 10; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. From 3cecb99be0f99f5498898ebfd94289a6810519f8 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 2 Feb 2024 13:07:36 -0500 Subject: [PATCH 0761/1393] Add version handling for incoming messages (#2451) Incoming messages are now version-checked before deserialization. The current network protocol version is set to 0.1, and only messages with this version are handled. --- constants/Cargo.toml | 1 + constants/src/lib.rs | 6 +- hotshot/src/lib.rs | 4 +- .../src/traits/networking/libp2p_network.rs | 139 +++-- .../traits/networking/web_server_network.rs | 484 ++++++++++-------- task-impls/src/network.rs | 4 +- testing/Cargo.toml | 1 + testing/tests/memory_network.rs | 4 +- testing/tests/unit.rs | 1 + testing/tests/unit/message.rs | 8 +- testing/tests/unit/version.rs | 33 ++ types/src/traits/network.rs | 2 + utils/Cargo.toml | 1 + utils/src/bincode.rs | 6 +- utils/src/lib.rs | 3 + utils/src/version.rs | 16 + 16 files changed, 443 insertions(+), 270 deletions(-) create mode 100644 testing/tests/unit/version.rs create mode 100644 utils/src/version.rs diff --git a/constants/Cargo.toml b/constants/Cargo.toml index 72219ccf74..dcb3ae0d78 100644 --- a/constants/Cargo.toml +++ b/constants/Cargo.toml @@ -5,5 +5,6 @@ version = { workspace = true } [dependencies] serde = { workspace = true } + [lints] workspace = true diff --git a/constants/src/lib.rs b/constants/src/lib.rs index 5d357cbd49..3a44c0902a 100644 --- a/constants/src/lib.rs +++ b/constants/src/lib.rs @@ -17,7 +17,7 @@ pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; /// the number of messages to send over the secondary network before re-attempting the (presumed down) primary network pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 5; -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Hash, Eq)] /// Type for protocol version number pub struct Version { /// major version number @@ -26,5 +26,5 @@ pub struct Version { pub minor: u16, } -/// Constants for the current version number used by the program -pub const PROGRAM_PROTOCOL_VERSION: Version = Version { major: 0, minor: 1 }; +/// Constant for protocol version 0.1. +pub const VERSION_0_1: Version = Version { major: 0, minor: 1 }; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 8f631a238d..1ba00da58e 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -27,7 +27,7 @@ use async_trait::async_trait; use commit::Committable; use custom_debug::Debug; use futures::join; -use hotshot_constants::PROGRAM_PROTOCOL_VERSION; +use hotshot_constants::VERSION_0_1; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, task_launcher::TaskRunner, @@ -303,7 +303,7 @@ impl> SystemContext { .da_network .broadcast_message( Message { - version: PROGRAM_PROTOCOL_VERSION, + version: VERSION_0_1, sender: api.inner.public_key.clone(), kind: MessageKind::from(message), }, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index ece3d38ef1..434139d9f9 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -12,7 +12,7 @@ use async_lock::RwLock; use async_trait::async_trait; use bimap::BiHashMap; use bincode::Options; -use hotshot_constants::LOOK_AHEAD; +use hotshot_constants::{Version, LOOK_AHEAD, VERSION_0_1}; use hotshot_task::{boxed_sync, BoxSyncFuture}; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; @@ -29,8 +29,7 @@ use hotshot_types::{ signature_key::SignatureKey, }, }; - -use hotshot_utils::bincode::bincode_opts; +use hotshot_utils::{bincode::bincode_opts, version::read_version}; use libp2p_identity::PeerId; #[cfg(feature = "hotshot-testing")] use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder}; @@ -68,10 +67,17 @@ pub type BootstrapAddrs = Arc, Multiaddr)>>>; pub const QC_TOPIC: &str = "global"; /// Stubbed out Ack +/// +/// Note: as part of versioning for upgradability, +/// all network messages must begin with a 4-byte version number. +/// +/// Hence: +/// * `Empty` *must* be a struct (enums are serialized with a leading byte for the variant), and +/// * we must have an explicit version field. #[derive(Serialize)] -pub enum Empty { - /// Empty value - Empty, +pub struct Empty { + /// network protocol version number in use + version: Version, } impl Debug for Libp2pNetwork { @@ -376,7 +382,7 @@ impl Libp2pNetwork { }), }; - result.spawn_event_generator(direct_send, broadcast_send); + result.handle_event_generator(direct_send, broadcast_send); result.spawn_node_lookup(node_lookup_recv); result.spawn_connect(id); @@ -517,9 +523,63 @@ impl Libp2pNetwork { .map_err(Into::::into) } + /// Handle events for Version 0.1 of the protocol. + async fn handle_recvd_events_0_1( + &self, + msg: NetworkEvent, + direct_send: &UnboundedSender, + broadcast_send: &UnboundedSender, + ) -> Result<(), NetworkError> { + match msg { + GossipMsg(msg, _topic) => { + let result: Result = bincode_opts().deserialize(&msg); + if let Ok(result) = result { + broadcast_send + .send(result) + .await + .map_err(|_| NetworkError::ChannelSend)?; + } + } + DirectRequest(msg, _pid, chan) => { + let result: Result = bincode_opts() + .deserialize(&msg) + .context(FailedToSerializeSnafu); + if let Ok(result) = result { + direct_send + .send(result) + .await + .map_err(|_| NetworkError::ChannelSend)?; + } + if self + .inner + .handle + .direct_response( + chan, + &Empty { + version: VERSION_0_1, + }, + ) + .await + .is_err() + { + error!("failed to ack!"); + }; + } + DirectResponse(msg, _) => { + let _result: Result = bincode_opts() + .deserialize(&msg) + .context(FailedToSerializeSnafu); + } + NetworkEvent::IsBootstrapped => { + error!("handle_recvd_events_0_1 received `NetworkEvent::IsBootstrapped`, which should be impossible."); + } + } + Ok::<(), NetworkError>(()) + } + /// task to propagate messages to handlers /// terminates on shut down of network - fn spawn_event_generator( + fn handle_event_generator( &self, direct_send: UnboundedSender, broadcast_send: UnboundedSender, @@ -527,49 +587,36 @@ impl Libp2pNetwork { let handle = self.clone(); let is_bootstrapped = self.inner.is_bootstrapped.clone(); async_spawn(async move { - while let Ok(msg) = handle.inner.handle.receiver().recv().await { - match msg { - GossipMsg(msg, _topic) => { - let result: Result = bincode_opts().deserialize(&msg); - if let Ok(result) = result { - broadcast_send - .send(result) - .await - .map_err(|_| NetworkError::ChannelSend)?; - } - } - DirectRequest(msg, _pid, chan) => { - let result: Result = bincode_opts() - .deserialize(&msg) - .context(FailedToSerializeSnafu); - if let Ok(result) = result { - direct_send - .send(result) - .await - .map_err(|_| NetworkError::ChannelSend)?; - } - if handle - .inner - .handle - .direct_response(chan, &Empty::Empty) - .await - .is_err() - { - error!("failed to ack!"); - }; - } - DirectResponse(msg, _) => { - let _result: Result = bincode_opts() - .deserialize(&msg) - .context(FailedToSerializeSnafu); - } + while let Ok(message) = handle.inner.handle.receiver().recv().await { + match &message { NetworkEvent::IsBootstrapped => { is_bootstrapped.store(true, Ordering::Relaxed); } + GossipMsg(raw, _) | DirectRequest(raw, _, _) | DirectResponse(raw, _) => { + let message_version = read_version(raw); + match message_version { + Some(VERSION_0_1) => { + let _ = handle + .handle_recvd_events_0_1(message, &direct_send, &broadcast_send) + .await; + } + Some(version) => { + warn!( + "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", + version, message + ); + } + _ => { + warn!( + "Received message with unreadable version number.\n\nPayload:\n\n{:?}", + message + ); + } + } + } } } - warn!("Network receiever shut down!"); - Ok::<(), NetworkError>(()) + warn!("Network receiver shut down!"); }); } } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 794e068b43..f05818963f 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -12,6 +12,7 @@ use async_compatibility_layer::{ use async_lock::RwLock; use async_trait::async_trait; use derive_more::{Deref, DerefMut}; +use hotshot_constants::VERSION_0_1; use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ message::{Message, MessagePurpose}, @@ -25,6 +26,7 @@ use hotshot_types::{ signature_key::SignatureKey, }, }; +use hotshot_utils::version::read_version; use hotshot_web_server::{self, config}; use lru::LruCache; use serde::{Deserialize, Serialize}; @@ -47,7 +49,7 @@ use surf_disco::error::ClientError; use tracing::{debug, error, info, warn}; /// convenience alias alias for the result of getting transactions from the web server -pub type TxnResult = Result>>)>, NetworkError>; +pub type TxnResult = Result>)>, ClientError>; /// Represents the communication channel abstraction for the web server #[derive(Clone, Debug)] @@ -192,9 +194,9 @@ struct Inner { /// Our own key _own_key: TYPES::SignatureKey, /// Queue for broadcasted messages - broadcast_poll_queue: Arc>>>>, + broadcast_poll_queue_0_1: Arc>>>>, /// Queue for direct messages - direct_poll_queue: Arc>>>>, + direct_poll_queue_0_1: Arc>>>>, /// Client is running running: AtomicBool, /// The web server connection is ready @@ -205,10 +207,8 @@ struct Inner { wait_between_polls: Duration, /// Whether we are connecting to a DA server is_da: bool, - /// The last tx_index we saw from the web server tx_index: Arc>, - /// Task map for quorum proposals. proposal_task_map: Arc>>, /// Task map for quorum votes. @@ -233,6 +233,160 @@ struct Inner { impl Inner { #![allow(clippy::too_many_lines)] + + /// Handle version 0.1 transactions + /// + /// * `first_tx_index` - the index of the first transaction received from the server in the latest batch. + /// * `tx_index` - the last transaction index we saw from the web server. + async fn handle_tx_0_1(&self, tx: Vec, first_tx_index: u64, tx_index: &mut u64) { + let broadcast_poll_queue = &self.broadcast_poll_queue_0_1; + if first_tx_index > *tx_index + 1 { + debug!( + "missed txns from {} to {}", + *tx_index + 1, + first_tx_index - 1 + ); + *tx_index = first_tx_index - 1; + } + + *tx_index += 1; + + if let Ok(deserialized_message_inner) = bincode::deserialize::>(&tx) { + let deserialized_message = RecvMsg { + message: Some(deserialized_message_inner), + }; + broadcast_poll_queue + .write() + .await + .push(deserialized_message.clone()); + } else { + async_sleep(self.wait_between_polls).await; + } + + debug!("tx index is {}", tx_index); + } + + /// Handle version 0.1 messages + /// + /// Returns `should_return` as a boolean, which is: + /// * `true` if we've seen enough this round and the `poll_web_server` function should return + /// * `false` if we want to receive further messages from the server. + #[allow(clippy::too_many_arguments)] + async fn handle_message_0_1( + &self, + message: Vec, + view_number: u64, + message_purpose: MessagePurpose, + vote_index: &mut u64, + seen_quorum_proposals: &mut LruCache, + seen_view_sync_certificates: &mut LruCache, + ) -> bool { + let broadcast_poll_queue = &self.broadcast_poll_queue_0_1; + let direct_poll_queue = &self.direct_poll_queue_0_1; + if let Ok(deserialized_message_inner) = bincode::deserialize::>(&message) { + let deserialized_message = RecvMsg { + message: Some(deserialized_message_inner), + }; + match message_purpose { + MessagePurpose::Data => { + error!("We should not receive transactions in this function"); + } + MessagePurpose::Proposal => { + let proposal = deserialized_message.clone(); + broadcast_poll_queue.write().await.push(proposal); + + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + return true; + } + MessagePurpose::LatestQuorumProposal => { + let proposal = deserialized_message.clone(); + let hash = hash(&proposal); + // Only allow unseen proposals to be pushed to the queue + if seen_quorum_proposals.put(hash, ()).is_none() { + broadcast_poll_queue.write().await.push(proposal); + } + + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + return true; + } + MessagePurpose::LatestViewSyncCertificate => { + let cert = deserialized_message.clone(); + let hash = hash(&cert); + if seen_view_sync_certificates.put(hash, ()).is_none() { + broadcast_poll_queue.write().await.push(cert); + } + return false; + } + MessagePurpose::Vote => { + let vote = deserialized_message.clone(); + *vote_index += 1; + direct_poll_queue.write().await.push(vote); + + return false; + } + MessagePurpose::DAC => { + debug!( + "Received DAC from web server for view {} {}", + view_number, self.is_da + ); + broadcast_poll_queue + .write() + .await + .push(deserialized_message.clone()); + + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + // return if we found a DAC, since there will only be 1 per view + // In future we should check to make sure DAC is valid + return true; + } + MessagePurpose::VidDisperse => { + // TODO copy-pasted from `MessagePurpose::Proposal` https://github.com/EspressoSystems/HotShot/issues/1690 + + self.broadcast_poll_queue_0_1 + .write() + .await + .push(deserialized_message.clone()); + + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + return true; + } + MessagePurpose::ViewSyncVote => { + let vote = deserialized_message.clone(); + *vote_index += 1; + direct_poll_queue.write().await.push(vote); + + return false; + } + MessagePurpose::ViewSyncCertificate => { + // TODO ED Special case this for view sync + // TODO ED Need to add vote indexing to web server for view sync certs + let cert = deserialized_message.clone(); + *vote_index += 1; + broadcast_poll_queue.write().await.push(cert); + + return false; + } + + MessagePurpose::Internal => { + error!("Received internal message in web server network"); + + return false; + } + + MessagePurpose::Upgrade => { + broadcast_poll_queue + .write() + .await + .push(deserialized_message.clone()); + + return true; + } + } + } + + false + } + /// Pull a web server. async fn poll_web_server( &self, @@ -274,172 +428,131 @@ impl Inner { MessagePurpose::Upgrade => config::get_upgrade_route(view_number), }; - if message_purpose == MessagePurpose::Data { - let possible_message = self.get_txs_from_web_server(endpoint).await; - match possible_message { - Ok(Some((index, deserialized_messages))) => { - let mut broadcast_poll_queue = self.broadcast_poll_queue.write().await; - if index > tx_index + 1 { - debug!("missed txns from {} to {}", tx_index + 1, index - 1); - tx_index = index - 1; - } - for tx in &deserialized_messages { - tx_index += 1; - broadcast_poll_queue.push(tx.clone()); - } - debug!("tx index is {}", tx_index); - } - Ok(None) => { - async_sleep(self.wait_between_polls + additional_wait).await; - } - Err(_e) => { - async_sleep(self.wait_between_polls + additional_wait).await; - } - } - } else { - let possible_message = self.get_message_from_web_server(endpoint).await; - - match possible_message { - Ok(Some(deserialized_messages)) => { - match message_purpose { - MessagePurpose::Data => { - error!("We should not receive transactions in this function"); + if let MessagePurpose::Data = message_purpose { + let possible_message: TxnResult = self.client.get(&endpoint).send().await; + // Deserialize and process transactions from the server. + // If something goes wrong at any point, we sleep for wait_between_polls + // then try again next time. + if let Ok(Some((first_tx_index, txs))) = possible_message { + for tx_raw in txs { + // This is very hacky. + // + // Fundamentally, tx_raw is a serialized Option(Message). + // The problem is, we want to extract the serialized Message + // *without* deserializing the entire tx_raw + // (because, a priori, the serialization of Message might depend on the version number, + // which we have not yet read at this point). + // + // So we use the fact that the bincode serialization of Option(_) is a single leading byte + // (0 for None and 1 for Some). Dropping the first byte then yields the serialized Message. + // + // It would be nice to do this with serde primitives, but I'm not sure how. + + match tx_raw.first() { + Some(0) => { + continue; } - MessagePurpose::Proposal => { - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - let proposal = deserialized_messages[0].clone(); - self.broadcast_poll_queue.write().await.push(proposal); + Some(1) => { + let tx = tx_raw[1..].to_vec(); + let tx_version = read_version(&tx); - return Ok(()); - // Wait for the view to change before polling for proposals again - // let event = receiver.recv().await; - // match event { - // Ok(event) => view_number = event.view_number(), - // Err(_r) => { - // error!("Proposal receiver error! It was likely shutdown") - // } - // } - } - MessagePurpose::LatestQuorumProposal => { - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - let proposal = deserialized_messages[0].clone(); - let hash = hash(&proposal); - // Only allow unseen proposals to be pushed to the queue - if seen_quorum_proposals.put(hash, ()).is_none() { - self.broadcast_poll_queue.write().await.push(proposal); - } - } - MessagePurpose::LatestViewSyncCertificate => { - let mut broadcast_poll_queue = - self.broadcast_poll_queue.write().await; - - for cert in &deserialized_messages { - let hash = hash(&cert); - if seen_view_sync_certificates.put(hash, ()).is_none() { - broadcast_poll_queue.push(cert.clone()); + match tx_version { + Some(VERSION_0_1) => { + self.handle_tx_0_1(tx, first_tx_index, &mut tx_index).await; + } + Some(version) => { + warn!( + "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", + version, + tx + ); + } + _ => { + warn!( + "Received message with unreadable version number.\n\nPayload:\n\n{:?}", + tx + ); } } } - MessagePurpose::Vote => { - // error!( - // "Received {} votes from web server for view {} is da {}", - // deserialized_messages.len(), - // view_number, - // self.is_da - // ); - let mut direct_poll_queue = self.direct_poll_queue.write().await; - for vote in &deserialized_messages { - vote_index += 1; - direct_poll_queue.push(vote.clone()); - } - } - MessagePurpose::DAC => { - debug!( - "Received DAC from web server for view {} {}", - view_number, self.is_da - ); - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - self.broadcast_poll_queue - .write() - .await - .push(deserialized_messages[0].clone()); - - // return if we found a DAC, since there will only be 1 per view - // In future we should check to make sure DAC is valid - return Ok(()); + _ => { + warn!("Could not deserialize transaction: {:?}", tx_raw); } - MessagePurpose::VidDisperse => { - // TODO copy-pasted from `MessagePurpose::Proposal` https://github.com/EspressoSystems/HotShot/issues/1690 - - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - self.broadcast_poll_queue - .write() - .await - .push(deserialized_messages[0].clone()); - - return Ok(()); - // Wait for the view to change before polling for proposals again - // let event = receiver.recv().await; - // match event { - // Ok(event) => view_number = event.view_number(), - // Err(_r) => { - // error!("Proposal receiver error! It was likely shutdown") - // } - // } - } - MessagePurpose::ViewSyncVote => { - // error!( - // "Received {} view sync votes from web server for view {} is da {}", - // deserialized_messages.len(), - // view_number, - // self.is_da - // ); - let mut direct_poll_queue = self.direct_poll_queue.write().await; - for vote in &deserialized_messages { - vote_index += 1; - direct_poll_queue.push(vote.clone()); - } + } + } + } else { + async_sleep(self.wait_between_polls + additional_wait).await; + } + } else { + let possible_message: Result>>, ClientError> = + self.client.get(&endpoint).send().await; + if let Ok(Some(messages)) = possible_message { + for message_raw in messages { + // This is very hacky. + // + // Fundamentally, message_raw is a serialized Option(Message). + // The problem is, we want to extract the serialized Message + // *without* deserializing the entire message_raw + // (because, a priori, the serialization of Message might depend on the version number, + // which we have not yet read at this point). + // + // So we use the fact that the bincode serialization of Option(_) is a single leading byte + // (0 for None and 1 for Some). Dropping the first byte then yields the serialized Message. + // + // It would be nice to do this with serde primitives, but I'm not sure how. + + match message_raw.first() { + Some(0) => { + continue; } - MessagePurpose::ViewSyncCertificate => { - // error!( - // "Received {} view sync certs from web server for view {} is da {}", - // deserialized_messages.len(), - // view_number, - // self.is_da - // ); - let mut broadcast_poll_queue = - self.broadcast_poll_queue.write().await; - // TODO ED Special case this for view sync - // TODO ED Need to add vote indexing to web server for view sync certs - for cert in &deserialized_messages { - vote_index += 1; - broadcast_poll_queue.push(cert.clone()); + Some(1) => { + let message = message_raw[1..].to_vec(); + let message_version = read_version(&message); + + let should_return; + + match message_version { + Some(VERSION_0_1) => { + should_return = self + .handle_message_0_1( + message, + view_number, + message_purpose, + &mut vote_index, + &mut seen_quorum_proposals, + &mut seen_view_sync_certificates, + ) + .await; + + if should_return { + return Ok(()); + } + } + Some(version) => { + warn!( + "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", + version, + message + ); + } + _ => { + warn!( + "Received message with unreadable version number.\n\nPayload:\n\n{:?}", + message + ); + } } } - - MessagePurpose::Internal => { - error!("Received internal message in web server network"); - } - - MessagePurpose::Upgrade => { - self.broadcast_poll_queue - .write() - .await - .push(deserialized_messages[0].clone()); - - return Ok(()); + _ => { + warn!("Could not deserialize message: {:?}", message_raw); } } } - Ok(None) => { - async_sleep(self.wait_between_polls).await; - } - Err(_e) => { - // error!("error is {:?}", _e); - async_sleep(self.wait_between_polls).await; - } + } else { + async_sleep(self.wait_between_polls).await; } } + let maybe_event = receiver.try_recv(); match maybe_event { Ok(event) => { @@ -485,55 +598,6 @@ impl Inner { } Err(NetworkError::ShutDown) } - - /// Fetches transactions from web server - async fn get_txs_from_web_server(&self, endpoint: String) -> TxnResult { - let result: Result>)>, _> = self.client.get(&endpoint).send().await; - match result { - Err(_error) => Err(NetworkError::WebServer { - source: WebServerNetworkError::ClientError, - }), - Ok(Some((index, messages))) => { - let mut deserialized_messages = Vec::new(); - for message in &messages { - let deserialized_message = bincode::deserialize(message); - if let Err(e) = deserialized_message { - return Err(NetworkError::FailedToDeserialize { source: e }); - } - deserialized_messages.push(deserialized_message.unwrap()); - } - Ok(Some((index, deserialized_messages))) - } - Ok(None) => Ok(None), - } - } - - /// Sends a GET request to the webserver for some specified endpoint - /// Returns a vec of deserialized, received messages or an error - async fn get_message_from_web_server( - &self, - endpoint: String, - ) -> Result>>>, NetworkError> { - let result: Result>>, ClientError> = - self.client.get(&endpoint).send().await; - match result { - Err(_error) => Err(NetworkError::WebServer { - source: WebServerNetworkError::ClientError, - }), - Ok(Some(messages)) => { - let mut deserialized_messages = Vec::new(); - for message in &messages { - let deserialized_message = bincode::deserialize(message); - if let Err(e) = deserialized_message { - return Err(NetworkError::FailedToDeserialize { source: e }); - } - deserialized_messages.push(deserialized_message.unwrap()); - } - Ok(Some(deserialized_messages)) - } - Ok(None) => Ok(None), - } - } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] @@ -603,8 +667,8 @@ impl WebServerNetwork { let client = surf_disco::Client::::new(url); let inner = Arc::new(Inner { - broadcast_poll_queue: Arc::default(), - direct_poll_queue: Arc::default(), + broadcast_poll_queue_0_1: Arc::default(), + direct_poll_queue_0_1: Arc::default(), running: AtomicBool::new(true), connected: AtomicBool::new(false), client, @@ -869,7 +933,7 @@ impl ConnectedNetwork, TYPES::Signatur let closure = async move { match transmit_type { TransmitType::Direct => { - let mut queue = self.inner.direct_poll_queue.write().await; + let mut queue = self.inner.direct_poll_queue_0_1.write().await; Ok(queue .drain(..) .collect::>() @@ -878,7 +942,7 @@ impl ConnectedNetwork, TYPES::Signatur .collect()) } TransmitType::Broadcast => { - let mut queue = self.inner.broadcast_poll_queue.write().await; + let mut queue = self.inner.broadcast_poll_queue_0_1.write().await; Ok(queue .drain(..) .collect::>() diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a4209dec15..9bd7992a0c 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -1,6 +1,6 @@ use crate::events::HotShotEvent; use either::Either::{self, Left, Right}; -use hotshot_constants::PROGRAM_PROTOCOL_VERSION; +use hotshot_constants::VERSION_0_1; use hotshot_task::{ event_stream::{ChannelStream, EventStream}, task::{FilterEvent, HotShotTaskCompleted, TS}, @@ -286,7 +286,7 @@ impl> } }; let message = Message { - version: PROGRAM_PROTOCOL_VERSION, + version: VERSION_0_1, sender, kind: message_kind, }; diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 95f09d8826..f202f94117 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -22,6 +22,7 @@ hotshot = { path = "../hotshot", features = [ ], default-features = false } hotshot-constants = { path = "../constants" } hotshot-types = { path = "../types", default-features = false } +hotshot-utils = { path = "../utils" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index dbbdd8f983..d52d6bddd8 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -9,7 +9,7 @@ use hotshot::traits::implementations::{ }; use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; -use hotshot_constants::PROGRAM_PROTOCOL_VERSION; +use hotshot_constants::VERSION_0_1; use hotshot_testing::state_types::TestInstanceState; use hotshot_testing::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, @@ -106,7 +106,7 @@ fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec = bincode::serialize(&message).unwrap(); + let serialized_message: Vec = bincode_opts().serialize(&message).unwrap(); // The versions we've read from the message let major_version_read = u16::from_le_bytes(serialized_message[..2].try_into().unwrap()); let minor_version_read = u16::from_le_bytes(serialized_message[2..4].try_into().unwrap()); diff --git a/testing/tests/unit/version.rs b/testing/tests/unit/version.rs new file mode 100644 index 0000000000..813bcd9e0d --- /dev/null +++ b/testing/tests/unit/version.rs @@ -0,0 +1,33 @@ +#[cfg(test)] +use hotshot_constants::Version; +use hotshot_utils::version::read_version; + +#[test] +/// Check that the version number is read correctly. +fn read_version_1() { + let bytes: [u8; 6] = [0, 0, 1, 0, 4, 9]; + let version = Version { major: 0, minor: 1 }; + assert_eq!(read_version(&bytes), Some(version)); +} + +#[test] +/// Check that the version number is read correctly. +fn read_version_2() { + let bytes: [u8; 4] = [9, 0, 3, 0]; + let version = Version { major: 9, minor: 3 }; + assert_eq!(read_version(&bytes), Some(version)); +} + +#[test] +/// Check that `None` is returned if there are not enough bytes. +fn read_version_insufficient_bytes_1() { + let bytes: [u8; 3] = [0, 0, 0]; + assert_eq!(read_version(&bytes), None); +} + +#[test] +/// Check that `None` is returned if there are not enough bytes. +fn read_version_insufficient_bytes_2() { + let bytes: [u8; 0] = []; + assert_eq!(read_version(&bytes), None); +} diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 7a2748098b..fe0739b3a3 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -215,6 +215,8 @@ pub trait NetworkMsg: { } +impl NetworkMsg for Vec {} + /// a message pub trait ViewMessage { /// get the view out of the message diff --git a/utils/Cargo.toml b/utils/Cargo.toml index c266322f6b..f5622e104e 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -8,6 +8,7 @@ version = "0.1.0" [dependencies] bincode = { workspace = true } +hotshot-constants = { path = "../constants" } [lints] workspace = true diff --git a/utils/src/bincode.rs b/utils/src/bincode.rs index 540d893c53..e321e54b06 100644 --- a/utils/src/bincode.rs +++ b/utils/src/bincode.rs @@ -1,7 +1,7 @@ #![allow(clippy::type_complexity)] use bincode::{ config::{ - LittleEndian, RejectTrailing, VarintEncoding, WithOtherEndian, WithOtherIntEncoding, + FixintEncoding, LittleEndian, RejectTrailing, WithOtherEndian, WithOtherIntEncoding, WithOtherLimit, WithOtherTrailing, }, DefaultOptions, Options, @@ -16,13 +16,13 @@ use bincode::{ pub fn bincode_opts() -> WithOtherTrailing< WithOtherIntEncoding< WithOtherEndian, LittleEndian>, - VarintEncoding, + FixintEncoding, >, RejectTrailing, > { bincode::DefaultOptions::new() .with_no_limit() .with_little_endian() - .with_varint_encoding() + .with_fixint_encoding() .reject_trailing_bytes() } diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 5af39d88b0..9f735513ca 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -2,3 +2,6 @@ /// Provides bincode options pub mod bincode; + +/// Provides version utilities +pub mod version; diff --git a/utils/src/version.rs b/utils/src/version.rs new file mode 100644 index 0000000000..34aadb9c88 --- /dev/null +++ b/utils/src/version.rs @@ -0,0 +1,16 @@ +//! Utilities for reading version number + +use hotshot_constants::Version; + +/// Read the version number from a message (passed a byte vector), +/// returning `None` is there are not enough bytes. +#[must_use] +#[allow(clippy::module_name_repetitions)] +pub fn read_version(message: &[u8]) -> Option { + let bytes_major = message.get(0..2)?.try_into().ok()?; + let bytes_minor = message.get(2..4)?.try_into().ok()?; + let major = u16::from_le_bytes(bytes_major); + let minor = u16::from_le_bytes(bytes_minor); + + Some(Version { major, minor }) +} From 35b3e52bbb41d3c5811722f9e0a08f9c98e641c2 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 2 Feb 2024 14:54:12 -0500 Subject: [PATCH 0762/1393] [Stability] Always poll for latest DA proposal (#2515) * always poll for latest DA proposal * lints * terminology after merge --- hotshot/examples/infra/mod.rs | 3 +- hotshot/src/tasks/mod.rs | 18 +++++++++++- .../traits/networking/web_server_network.rs | 28 +++++++++---------- types/src/message.rs | 6 ++-- types/src/traits/network.rs | 6 ++-- web_server/api.toml | 2 +- web_server/src/config.rs | 2 +- web_server/src/lib.rs | 18 ++++++------ 8 files changed, 49 insertions(+), 34 deletions(-) diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 9763c0696f..6e391b72b8 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -473,8 +473,7 @@ pub trait RunDA< EventType::NextLeaderViewTimeout { view_number } => { warn!("Timed out as the next leader in view {:?}", view_number); } - EventType::ViewFinished { view_number: _ } => {} - _ => unimplemented!(), + _ => {} } } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 2805e77675..f8fae206e2 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -258,9 +258,25 @@ pub async fn add_consensus_task>( // Poll (forever) for the latest quorum proposal consensus_state .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestQuorumProposal) + .inject_consensus_info(ConsensusIntentEvent::PollForLatestProposal) .await; + // See if we're in the DA committee + // This will not work for epochs (because dynamic subscription + // With the Push CDN, we are _always_ polling for latest anyway. + let is_da = consensus_state + .committee_membership + .get_committee(::Time::new(0)) + .contains(&consensus_state.public_key); + + // If we are, poll for latest DA proposal. + if is_da { + consensus_state + .committee_network + .inject_consensus_info(ConsensusIntentEvent::PollForLatestProposal) + .await; + } + // Poll (forever) for the latest view sync certificate consensus_state .quorum_network diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index f05818963f..1de57a26a3 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -225,7 +225,7 @@ struct Inner { txn_task_map: Arc>>, #[allow(clippy::type_complexity)] /// A handle on the task polling for latest quorum propsal - latest_quorum_proposal_task: Arc>>>, + latest_proposal_task: Arc>>>, #[allow(clippy::type_complexity)] /// A handle on the task polling for the latest view sync certificate latest_view_sync_certificate_task: Arc>>>, @@ -278,7 +278,7 @@ impl Inner { view_number: u64, message_purpose: MessagePurpose, vote_index: &mut u64, - seen_quorum_proposals: &mut LruCache, + seen_proposals: &mut LruCache, seen_view_sync_certificates: &mut LruCache, ) -> bool { let broadcast_poll_queue = &self.broadcast_poll_queue_0_1; @@ -298,11 +298,11 @@ impl Inner { // Only pushing the first proposal since we will soon only be allowing 1 proposal per view return true; } - MessagePurpose::LatestQuorumProposal => { + MessagePurpose::LatestProposal => { let proposal = deserialized_message.clone(); let hash = hash(&proposal); // Only allow unseen proposals to be pushed to the queue - if seen_quorum_proposals.put(hash, ()).is_none() { + if seen_proposals.put(hash, ()).is_none() { broadcast_poll_queue.write().await.push(proposal); } @@ -397,7 +397,7 @@ impl Inner { ) -> Result<(), NetworkError> { let mut vote_index = 0; let mut tx_index = 0; - let mut seen_quorum_proposals = LruCache::new(NonZeroUsize::new(100).unwrap()); + let mut seen_proposals = LruCache::new(NonZeroUsize::new(100).unwrap()); let mut seen_view_sync_certificates = LruCache::new(NonZeroUsize::new(100).unwrap()); if message_purpose == MessagePurpose::Data { @@ -410,7 +410,7 @@ impl Inner { let endpoint = match message_purpose { MessagePurpose::Proposal => config::get_proposal_route(view_number), - MessagePurpose::LatestQuorumProposal => config::get_latest_quorum_proposal_route(), + MessagePurpose::LatestProposal => config::get_latest_proposal_route(), MessagePurpose::LatestViewSyncCertificate => { config::get_latest_view_sync_certificate_route() } @@ -519,7 +519,7 @@ impl Inner { view_number, message_purpose, &mut vote_index, - &mut seen_quorum_proposals, + &mut seen_proposals, &mut seen_view_sync_certificates, ) .await; @@ -683,7 +683,7 @@ impl WebServerNetwork { view_sync_cert_task_map: Arc::default(), view_sync_vote_task_map: Arc::default(), txn_task_map: Arc::default(), - latest_quorum_proposal_task: Arc::default(), + latest_proposal_task: Arc::default(), latest_view_sync_certificate_task: Arc::default(), }); @@ -707,7 +707,7 @@ impl WebServerNetwork { MessagePurpose::Vote => config::post_vote_route(*view_number), MessagePurpose::Data => config::post_transactions_route(), MessagePurpose::Internal - | MessagePurpose::LatestQuorumProposal + | MessagePurpose::LatestProposal | MessagePurpose::LatestViewSyncCertificate => { return Err(WebServerNetworkError::EndpointError) } @@ -853,8 +853,8 @@ impl ConnectedNetwork, TYPES::Signatur Self: 'b, { let closure = async move { - // Cancel poll for latest quorum proposal on shutdown - if let Some(ref sender) = *self.inner.latest_quorum_proposal_task.read().await { + // Cancel poll for latest proposal on shutdown + if let Some(ref sender) = *self.inner.latest_proposal_task.read().await { let _ = sender .send(ConsensusIntentEvent::CancelPollForLatestProposal(1)) .await; @@ -1046,9 +1046,9 @@ impl ConnectedNetwork, TYPES::Signatur .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForVIDDisperse) .await; } - ConsensusIntentEvent::PollForLatestQuorumProposal => { + ConsensusIntentEvent::PollForLatestProposal => { // Only start this task if we haven't already started it. - let mut cancel_handle = self.inner.latest_quorum_proposal_task.write().await; + let mut cancel_handle = self.inner.latest_proposal_task.write().await; if cancel_handle.is_none() { let inner = self.inner.clone(); @@ -1061,7 +1061,7 @@ impl ConnectedNetwork, TYPES::Signatur if let Err(e) = inner .poll_web_server( receiver, - MessagePurpose::LatestQuorumProposal, + MessagePurpose::LatestProposal, 1, Duration::from_millis(500), ) diff --git a/types/src/message.rs b/types/src/message.rs index 53ff69f1ec..97f7f94ad9 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -63,10 +63,10 @@ pub struct Messages(pub Vec>); /// A message type agnostic description of a message's purpose #[derive(PartialEq, Copy, Clone)] pub enum MessagePurpose { - /// Message with a quorum proposal. + /// Message with a [quorum/DA] proposal. Proposal, - /// Message with most recent quorum proposal the server has - LatestQuorumProposal, + /// Message with most recent [quorum/DA] proposal the server has + LatestProposal, /// Message with most recent view sync certificate the server has LatestViewSyncCertificate, /// Message with a quorum vote. diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index fe0739b3a3..c968913e2c 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -147,8 +147,8 @@ pub enum ConsensusIntentEvent { PollForProposal(u64), /// Poll for VID disperse data for a particular view PollForVIDDisperse(u64), - /// Poll for the most recent quorum proposal the webserver has - PollForLatestQuorumProposal, + /// Poll for the most recent [quorum/da] proposal the webserver has + PollForLatestProposal, /// Poll for the most recent view sync proposal the webserver has PollForLatestViewSyncCertificate, /// Poll for a DAC for a particular view @@ -203,7 +203,7 @@ impl ConsensusIntentEvent { | ConsensusIntentEvent::PollForTransactions(view_number) | ConsensusIntentEvent::CancelPollForTransactions(view_number) | ConsensusIntentEvent::PollFutureLeader(view_number, _) => *view_number, - ConsensusIntentEvent::PollForLatestQuorumProposal + ConsensusIntentEvent::PollForLatestProposal | ConsensusIntentEvent::PollForLatestViewSyncCertificate => 1, } } diff --git a/web_server/api.toml b/web_server/api.toml index 9aa9e4f3b2..d6c6363d47 100644 --- a/web_server/api.toml +++ b/web_server/api.toml @@ -20,7 +20,7 @@ Return the VID disperse data for a given view number """ # GET the latest quorum proposal -[route.get_latest_quorum_proposal] +[route.get_latest_proposal] PATH = ["proposal/latest"] DOC = """ Return the proposal for the most recent view the server has diff --git a/web_server/src/config.rs b/web_server/src/config.rs index af2ba638f5..0cbf8f47be 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -26,7 +26,7 @@ pub fn post_proposal_route(view_number: u64) -> String { /// get latest qc #[must_use] -pub fn get_latest_quorum_proposal_route() -> String { +pub fn get_latest_proposal_route() -> String { "api/proposal/latest".to_string() } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 049ebc5d3f..7ef28fbb27 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -43,7 +43,7 @@ struct WebServerState { /// view number -> (secret, da_certificates) da_certificates: HashMap)>, /// view for the most recent proposal to help nodes catchup - latest_quorum_proposal: u64, + latest_proposal: u64, /// view for the most recent view sync proposal latest_view_sync_certificate: u64, /// view for the oldest DA certificate @@ -101,7 +101,7 @@ impl WebServerState { votes: HashMap::new(), num_txns: 0, oldest_vote: 0, - latest_quorum_proposal: 0, + latest_proposal: 0, latest_view_sync_certificate: 0, oldest_certificate: 0, shutdown: None, @@ -153,7 +153,7 @@ pub trait WebServerDataSource { /// Get latest quanrum proposal /// # Errors /// Error if unable to serve. - fn get_latest_quorum_proposal(&self) -> Result>>, Error>; + fn get_latest_proposal(&self) -> Result>>, Error>; /// Get latest view sync proposal /// # Errors /// Error if unable to serve. @@ -306,8 +306,8 @@ impl WebServerDataSource for WebServerState { } } - fn get_latest_quorum_proposal(&self) -> Result>>, Error> { - self.get_proposal(self.latest_quorum_proposal) + fn get_latest_proposal(&self) -> Result>>, Error> { + self.get_proposal(self.latest_proposal) } fn get_latest_view_sync_certificate(&self) -> Result>>, Error> { @@ -556,8 +556,8 @@ impl WebServerDataSource for WebServerState { fn post_proposal(&mut self, view_number: u64, mut proposal: Vec) -> Result<(), Error> { info!("Received proposal for view {}", view_number); - if view_number > self.latest_quorum_proposal { - self.latest_quorum_proposal = view_number; + if view_number > self.latest_proposal { + self.latest_proposal = view_number; } // Only keep proposal history for MAX_VIEWS number of view @@ -781,8 +781,8 @@ where } .boxed() })? - .get("get_latest_quorum_proposal", |_req, state| { - async move { state.get_latest_quorum_proposal() }.boxed() + .get("get_latest_proposal", |_req, state| { + async move { state.get_latest_proposal() }.boxed() })? .get("get_latest_view_sync_certificate", |_req, state| { async move { state.get_latest_view_sync_certificate() }.boxed() From e8114106af0438566948e5ef76ce3d9eb76030c7 Mon Sep 17 00:00:00 2001 From: MRain Date: Fri, 2 Feb 2024 15:32:37 -0500 Subject: [PATCH 0763/1393] clean faulty feature requirement --- hotshot/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 1ba00da58e..52c3460a89 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -239,7 +239,6 @@ impl> SystemContext { let inner: Arc> = Arc::new(SystemContextInner { id: nonce, - #[cfg(feature = "hotshot-testing")] consensus, public_key, private_key, From cae4ff13846432463d51ebdc82d4814d352c9deb Mon Sep 17 00:00:00 2001 From: chad Date: Sun, 4 Feb 2024 18:28:09 -0500 Subject: [PATCH 0764/1393] refactor: move NUM_REPLICATED_TO_TRUST to mod file (#2434) --- .../src/network/behaviours/dht/mod.rs | 3 +-- libp2p-networking/src/network/def.rs | 6 ------ libp2p-networking/src/network/node.rs | 15 ++++++--------- 3 files changed, 7 insertions(+), 17 deletions(-) diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index c2b5f55531..acc136c26b 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -26,9 +26,8 @@ use tracing::{error, info, warn}; /// the number of nodes required to get an answer from /// in order to trust that the answer is correct when retrieving from the DHT -/// TODO why are there two of these? -/// pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; + /// the maximum number of nodes to query in the DHT at any one time const MAX_DHT_QUERY_SIZE: usize = 5; diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index a1b387b10d..c412e6fd11 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -22,12 +22,6 @@ use super::{ use libp2p_swarm_derive::NetworkBehaviour; -/// the number of nodes required to get an answer from -/// in order to trust that the answer is correct when retrieving from the DHT -/// TODO why are there two of these? -/// -pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; - /// Overarching network behaviour performing: /// - network topology discovoery /// - direct messaging diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 0a129cdf91..bbed9e2d66 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -19,15 +19,12 @@ use super::{ NetworkNodeType, }; -use crate::network::{ - behaviours::{ - dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery}, - direct_message::{DMBehaviour, DMEvent}, - direct_message_codec::{DirectMessageProtocol, MAX_MSG_SIZE_DM}, - exponential_backoff::ExponentialBackoff, - gossip::GossipEvent, - }, - def::NUM_REPLICATED_TO_TRUST, +use crate::network::behaviours::{ + dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, + direct_message::{DMBehaviour, DMEvent}, + direct_message_codec::{DirectMessageProtocol, MAX_MSG_SIZE_DM}, + exponential_backoff::ExponentialBackoff, + gossip::GossipEvent, }; use async_compatibility_layer::{ art::async_spawn, From 9050341c839f01981e28e43a98c6b5c4cc323163 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Mon, 5 Feb 2024 18:10:20 +0100 Subject: [PATCH 0765/1393] [BUILDER] Add builder_commitment method to BlockPayload (#2526) --- testing/src/block_types.rs | 9 +++++++++ types/src/traits/block_contents.rs | 4 ++++ types/src/utils.rs | 32 ++++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+) diff --git a/testing/src/block_types.rs b/testing/src/block_types.rs index c581f7726a..834e0646c0 100644 --- a/testing/src/block_types.rs +++ b/testing/src/block_types.rs @@ -10,6 +10,7 @@ use hotshot_types::{ block_contents::{vid_commitment, BlockHeader, TestableBlock, Transaction}, BlockPayload, ValidatedState, }, + utils::BuilderCommitment, }; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; @@ -159,6 +160,14 @@ impl BlockPayload for TestBlockPayload { .map(commit::Committable::commit) .collect() } + + fn builder_commitment(&self, _metadata: &Self::Metadata) -> BuilderCommitment { + let mut digest = sha2::Sha256::new(); + for txn in &self.transactions { + digest.update(&txn.0); + } + BuilderCommitment::from_raw_digest(digest.finalize()) + } } /// Computes the (empty) genesis VID commitment diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 92785ed1b2..7ac59e097e 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -6,6 +6,7 @@ use crate::{ data::{test_srs, VidCommitment, VidScheme, VidSchemeTrait}, traits::ValidatedState, + utils::BuilderCommitment, }; use commit::{Commitment, Committable}; use serde::{de::DeserializeOwned, Serialize}; @@ -77,6 +78,9 @@ pub trait BlockPayload: &self, metadata: &Self::Metadata, ) -> Vec>; + + /// Generate commitment that builders use to sign block options. + fn builder_commitment(&self, metadata: &Self::Metadata) -> BuilderCommitment; } /// extra functions required on block to be usable by hotshot-testing diff --git a/types/src/utils.rs b/types/src/utils.rs index 187996e360..46b33c1e0d 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -4,8 +4,13 @@ use crate::{ data::{Leaf, VidCommitment}, traits::node_implementation::NodeType, }; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use commit::Commitment; +use digest::OutputSizeUser; +use sha2::Digest; use std::ops::Deref; +use tagged_base64::tagged; +use typenum::Unsigned; /// A view's state #[derive(Debug)] @@ -101,3 +106,30 @@ pub enum Terminator { /// Stop including this view number Inclusive(T), } + +/// Type alias for byte array of SHA256 digest length +type Sha256Digest = [u8; ::OutputSize::USIZE]; + +#[tagged("BUILDER_COMMITMENT")] +#[derive(Clone, Debug, Hash, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] +/// Commitment that builders use to sign block options. +/// A thin wrapper around a Sha256 digest. +pub struct BuilderCommitment(Sha256Digest); + +impl BuilderCommitment { + /// Create new commitment for `data` + pub fn from_bytes(data: impl AsRef<[u8]>) -> Self { + Self(sha2::Sha256::digest(data.as_ref()).into()) + } + + /// Create a new commitment from a raw Sha256 digest + pub fn from_raw_digest(digest: impl Into) -> Self { + Self(digest.into()) + } +} + +impl AsRef for BuilderCommitment { + fn as_ref(&self) -> &Sha256Digest { + &self.0 + } +} From 7317c9c4b20c8ce4c06c9c533ffa956a3f64189e Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 5 Feb 2024 16:27:40 -0500 Subject: [PATCH 0766/1393] Wrap validated state in Arc to avoid unncessary clones --- hotshot/src/lib.rs | 4 ++-- hotshot/src/types/handle.rs | 2 +- task-impls/src/consensus.rs | 5 +++-- testing/src/task_helpers.rs | 17 ++++++++++------- types/src/consensus.rs | 4 ++-- types/src/traits/states.rs | 2 +- types/src/utils.rs | 16 ++++++++++------ 7 files changed, 29 insertions(+), 21 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 63f8d93ac4..e337b8eba8 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -191,7 +191,7 @@ impl> SystemContext { // insert genesis (or latest block) to state map let mut validated_state_map = BTreeMap::default(); - let validated_state = TYPES::ValidatedState::genesis(&instance_state); + let validated_state = Arc::new(TYPES::ValidatedState::genesis(&instance_state)); validated_state_map.insert( anchored_leaf.get_view_number(), View { @@ -349,7 +349,7 @@ impl> SystemContext { /// /// # Panics /// Panics if internal state for consensus is inconsistent - pub async fn get_decided_state(&self) -> TYPES::ValidatedState { + pub async fn get_decided_state(&self) -> Arc { self.inner .consensus .read() diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 20f6daf059..940c7e533a 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -90,7 +90,7 @@ impl + 'static> SystemContextHandl /// /// # Panics /// If the internal consensus is in an inconsistent state. - pub async fn get_decided_state(&self) -> TYPES::ValidatedState { + pub async fn get_decided_state(&self) -> Arc { self.hotshot.get_decided_state().await } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9c35a62abe..34ad675ee2 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -538,9 +538,9 @@ impl, A: ConsensusApi + block_payload: None, proposer_id: sender, }; - let state = ::from_header( + let state = Arc::new(::from_header( &proposal.data.block_header, - ); + )); consensus.validated_state_map.insert( view, @@ -600,6 +600,7 @@ impl, A: ConsensusApi + error!("Block header doesn't extend the proposal",); return; }; + let state = Arc::new(state); let parent_commitment = parent.commit(); let leaf: Leaf<_> = Leaf { view_number: view, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 73068e94eb..a8c752a1d4 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -42,7 +42,7 @@ use hotshot_types::vote::Certificate; use hotshot_types::vote::Vote; use serde::Serialize; -use std::{fmt::Debug, hash::Hash}; +use std::{fmt::Debug, hash::Hash, sync::Arc}; /// create the [`SystemContextHandle`] from a node id /// # Panics @@ -238,10 +238,11 @@ async fn build_quorum_proposal_and_signature( .quorum_membership .total_nodes(), ); - let mut parent_state = - ::from_header(&parent_leaf.block_header); + let mut parent_state = Arc::new(::from_header( + &parent_leaf.block_header, + )); let block_header = TestBlockHeader::new( - &parent_state, + &*parent_state, &TestInstanceState {}, &parent_leaf.block_header, payload_commitment, @@ -269,9 +270,11 @@ async fn build_quorum_proposal_and_signature( // Only view 2 is tested, higher views are not tested for cur_view in 2..=view { - let state_new_view = parent_state - .validate_and_apply_header(&TestInstanceState {}, &block_header, &block_header) - .unwrap(); + let state_new_view = Arc::new( + parent_state + .validate_and_apply_header(&TestInstanceState {}, &block_header, &block_header) + .unwrap(), + ); // save states for the previous view to pass all the qc checks // In the long term, we want to get rid of this, do not manually update consensus state consensus.validated_state_map.insert( diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 6c62cb3d1b..d668cd8f39 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -341,7 +341,7 @@ impl Consensus { /// Gets the validated state with the given view number, if in the state map. #[must_use] - pub fn get_state(&self, view_number: TYPES::Time) -> Option<&TYPES::ValidatedState> { + pub fn get_state(&self, view_number: TYPES::Time) -> Option<&Arc> { match self.validated_state_map.get(&view_number) { Some(view) => view.get_state(), None => None, @@ -354,7 +354,7 @@ impl Consensus { /// If the last decided view's state does not exist in the state map, which should never /// happen. #[must_use] - pub fn get_decided_state(&self) -> &TYPES::ValidatedState { + pub fn get_decided_state(&self) -> &Arc { let decided_view_num = self.last_decided_view; self.get_state(decided_view_num) .expect("Decided state not found! Consensus internally inconsistent") diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 0d7d5fe5ee..522793aa1f 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -22,7 +22,7 @@ pub trait InstanceState: Clone + Debug + Send + Sync {} /// produce a new state, with the modifications from the block applied /// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header)) pub trait ValidatedState: - Serialize + DeserializeOwned + Clone + Debug + Default + Hash + PartialEq + Eq + Send + Sync + Serialize + DeserializeOwned + Debug + Default + Hash + PartialEq + Eq + Send + Sync { /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; diff --git a/types/src/utils.rs b/types/src/utils.rs index 46b33c1e0d..f55aca47fa 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -8,7 +8,7 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use commit::Commitment; use digest::OutputSizeUser; use sha2::Digest; -use std::ops::Deref; +use std::{ops::Deref, sync::Arc}; use tagged_base64::tagged; use typenum::Unsigned; @@ -27,17 +27,21 @@ pub enum ViewInner { /// Undecided view Leaf { /// Proposed leaf - leaf: Commitment>, + leaf: LeafCommitment, /// Validated state. - state: TYPES::ValidatedState, + state: Arc, }, /// Leaf has failed Failed, } +/// The hash of a leaf. +pub type LeafCommitment = Commitment>; + impl ViewInner { /// Return the underlying undecide leaf view if it exists. - pub fn get_leaf(&self) -> Option<(Commitment>, &TYPES::ValidatedState)> { + #[must_use] + pub fn get_leaf(&self) -> Option<(LeafCommitment, &Arc)> { if let Self::Leaf { leaf, state } = self { Some((*leaf, state)) } else { @@ -47,7 +51,7 @@ impl ViewInner { /// return the underlying leaf hash if it exists #[must_use] - pub fn get_leaf_commitment(&self) -> Option>> { + pub fn get_leaf_commitment(&self) -> Option> { if let Self::Leaf { leaf, .. } = self { Some(*leaf) } else { @@ -57,7 +61,7 @@ impl ViewInner { /// return the underlying validated state if it exists #[must_use] - pub fn get_state(&self) -> Option<&TYPES::ValidatedState> { + pub fn get_state(&self) -> Option<&Arc> { if let Self::Leaf { state, .. } = self { Some(state) } else { From 2ead1981a16eb0b7c28151dc62fe643824470d16 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 5 Feb 2024 16:35:38 -0500 Subject: [PATCH 0767/1393] Add get_state method to get undecided state by view number --- hotshot/src/lib.rs | 13 ++++++++++++- hotshot/src/types/handle.rs | 11 +++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e337b8eba8..5318fe8061 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -345,7 +345,7 @@ impl> SystemContext { .map(|guard| guard.get_decided_leaf()) } - /// Returns a copy of the last decided validated state. + /// Returns the last decided validated state. /// /// # Panics /// Panics if internal state for consensus is inconsistent @@ -358,6 +358,17 @@ impl> SystemContext { .clone() } + /// Get the validated state from a given `view`. + /// + /// Returns the requested state, if the [`SystemContext`] is tracking this view. Consensus + /// tracks views that have not yet been decided but could be in the future. This function may + /// return [`None`] if the requested view has already been decided (but see + /// [`get_decided_state`](Self::get_decided_state)) or if there is no path for the requested + /// view to ever be decided. + pub async fn get_state(&self, view: TYPES::Time) -> Option> { + self.inner.consensus.read().await.get_state(view).cloned() + } + /// Initializes a new [`SystemContext`] and does the work of setting up all the background tasks /// /// Assumes networking implementation is already primed. diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 940c7e533a..862fa8d27a 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -94,6 +94,17 @@ impl + 'static> SystemContextHandl self.hotshot.get_decided_state().await } + /// Get the validated state from a given `view`. + /// + /// Returns the requested state, if the [`SystemContext`] is tracking this view. Consensus + /// tracks views that have not yet been decided but could be in the future. This function may + /// return [`None`] if the requested view has already been decided (but see + /// [`get_decided_state`](Self::get_decided_state)) or if there is no path for the requested + /// view to ever be decided. + pub async fn get_state(&self, view: TYPES::Time) -> Option> { + self.hotshot.get_state(view).await + } + /// Get the last decided leaf of the [`SystemContext`] instance. /// /// # Panics From 92fcd54d100e92c6a1e80801647b911d10f8eaf8 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 5 Feb 2024 15:29:15 -0800 Subject: [PATCH 0768/1393] Emit Decide for genesis --- task-impls/src/consensus.rs | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9c35a62abe..f62ff53603 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -507,9 +507,30 @@ impl, A: ConsensusApi + let consensus = self.consensus.upgradable_read().await; - // Construct the leaf. + // Get the parent leaf. let parent = if justify_qc.is_genesis { - self.genesis_leaf().await + // Send the `Decide` event for the genesis block if the justify QC is genesis. + let leaf = self.genesis_leaf().await; + match leaf { + Some(ref leaf) => { + self.output_event_stream + .publish(Event { + view_number: TYPES::Time::genesis(), + event: EventType::Decide { + leaf_chain: Arc::new(vec![leaf.clone()]), + qc: Arc::new(justify_qc.clone()), + block_size: None, + }, + }) + .await; + } + None => { + error!( + "Failed to find the genesis leaf while the justify QC is genesis." + ); + } + } + leaf } else { consensus .saved_leaves From 8b9cb9139c0adb87ed8328380d0e9321423fe03a Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 6 Feb 2024 12:21:47 -0800 Subject: [PATCH 0769/1393] Skip genesis leaf in safety task checks --- testing/src/overall_safety_task.rs | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index d52ca5ebba..aa8b111ae3 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -15,7 +15,7 @@ use hotshot_types::{ error::RoundTimedoutState, event::{Event, EventType}, simple_certificate::QuorumCertificate, - traits::node_implementation::NodeType, + traits::node_implementation::{ConsensusTime, NodeType}, }; use snafu::Snafu; use std::{ @@ -192,12 +192,15 @@ impl RoundResult { let maybe_leaf: Option> = result.0.into_iter().last(); if let Some(leaf) = maybe_leaf.clone() { - match self.leaf_map.entry(leaf.clone()) { - std::collections::hash_map::Entry::Occupied(mut o) => { - *o.get_mut() += 1; - } - std::collections::hash_map::Entry::Vacant(v) => { - v.insert(1); + // Skip the genesis leaf. + if leaf.get_view_number() != TYPES::Time::genesis() { + match self.leaf_map.entry(leaf.clone()) { + std::collections::hash_map::Entry::Occupied(mut o) => { + *o.get_mut() += 1; + } + std::collections::hash_map::Entry::Vacant(v) => { + v.insert(1); + } } } @@ -246,6 +249,11 @@ impl RoundResult { check_block: bool, transaction_threshold: u64, ) { + // Skip for the genesis leaf. + if key.get_view_number() == TYPES::Time::genesis() { + return; + } + let num_decided = self.success_nodes.len(); let num_failed = self.failed_nodes.len(); let remaining_nodes = total_num_nodes - (num_decided + num_failed); From c54f82dc4c504f2c1cf561c6549a6b174c08081e Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 6 Feb 2024 17:01:31 -0500 Subject: [PATCH 0770/1393] Fix test for genesis --- testing/src/overall_safety_task.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index aa8b111ae3..51451bcc64 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -453,6 +453,10 @@ impl OverallSafetyPropertiesDescription { qc, block_size: maybe_block_size, } => { + // Skip the genesis leaf. + if leaf_chain.len() == 1 && leaf_chain[0].get_view_number() == TYPES::Time::genesis() { + return (None, state); + } let paired_up = (leaf_chain.to_vec(), (*qc).clone()); match state.ctx.round_results.entry(view_number) { Entry::Occupied(mut o) => o.get_mut().insert_into_result( From ca4da92dae7196f3e6696570ac2a8d889b0f95bb Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 6 Feb 2024 17:03:57 -0500 Subject: [PATCH 0771/1393] Remove the other if --- testing/src/overall_safety_task.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 51451bcc64..e3eb95310f 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -192,15 +192,12 @@ impl RoundResult { let maybe_leaf: Option> = result.0.into_iter().last(); if let Some(leaf) = maybe_leaf.clone() { - // Skip the genesis leaf. - if leaf.get_view_number() != TYPES::Time::genesis() { - match self.leaf_map.entry(leaf.clone()) { - std::collections::hash_map::Entry::Occupied(mut o) => { - *o.get_mut() += 1; - } - std::collections::hash_map::Entry::Vacant(v) => { - v.insert(1); - } + match self.leaf_map.entry(leaf.clone()) { + std::collections::hash_map::Entry::Occupied(mut o) => { + *o.get_mut() += 1; + } + std::collections::hash_map::Entry::Vacant(v) => { + v.insert(1); } } From e6a26fc9d5a8a66ff32fa0fcebc69b5cf591250e Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 6 Feb 2024 14:36:01 -0800 Subject: [PATCH 0772/1393] Remove redundant check --- testing/src/overall_safety_task.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index e3eb95310f..d145a2fd58 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -246,11 +246,6 @@ impl RoundResult { check_block: bool, transaction_threshold: u64, ) { - // Skip for the genesis leaf. - if key.get_view_number() == TYPES::Time::genesis() { - return; - } - let num_decided = self.success_nodes.len(); let num_failed = self.failed_nodes.len(); let remaining_nodes = total_num_nodes - (num_decided + num_failed); From d1c7562d143c851f6e0ed979ebe10ce8902097e7 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 7 Feb 2024 16:09:48 -0500 Subject: [PATCH 0773/1393] Libp2p test with Failures (#2543) * test with one net * More logging * fix: add longer timeout and patttern match on record finish type * clean up * Fix some logging * Backoff timeout for unrealiable libp2p test * add more time for libp2p asynchronous test * longer dht timout just to be sure, add retries --------- Co-authored-by: Justin Restivo --- .../src/traits/networking/libp2p_network.rs | 2 +- .../src/network/behaviours/dht/mod.rs | 43 +++++++++------ libp2p-networking/src/network/node.rs | 6 ++- libp2p-networking/src/network/node/handle.rs | 2 +- testing/src/test_builder.rs | 4 +- testing/src/test_launcher.rs | 14 +++-- testing/src/test_runner.rs | 21 ++++++-- testing/tests/libp2p.rs | 52 +++++++++++++++++++ testing/tests/unreliable_network.rs | 6 ++- types/src/traits/node_implementation.rs | 32 ++++++------ web_server/src/lib.rs | 2 +- 11 files changed, 136 insertions(+), 48 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 434139d9f9..19f384d75c 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -367,7 +367,7 @@ impl Libp2pNetwork { is_ready: Arc::new(AtomicBool::new(false)), // This is optimal for 10-30 nodes. TODO: parameterize this for both tests and examples // https://github.com/EspressoSystems/HotShot/issues/2088 - dht_timeout: Duration::from_secs(2), + dht_timeout: Duration::from_secs(120), is_bootstrapped: Arc::new(AtomicBool::new(false)), metrics, topic_map, diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index acc136c26b..78a902143c 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -29,7 +29,7 @@ use tracing::{error, info, warn}; pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; /// the maximum number of nodes to query in the DHT at any one time -const MAX_DHT_QUERY_SIZE: usize = 5; +const MAX_DHT_QUERY_SIZE: usize = 50; use self::cache::Cache; @@ -249,9 +249,10 @@ impl DHTBehaviour { if let Some(entry) = async_block_on(self.cache.get(&key)) { // exists in cache if chan.send(entry.value().clone()).is_err() { - warn!("Get DHT: channel closed before get record request result could be sent"); + error!("Get DHT: channel closed before get record request result could be sent"); } } else { + tracing::debug!("DHT cache miss, key: {:?}", key); // doesn't exist in cache, actually propagate request let qid = self.kadem.get_record(key.clone().into()); let query = KadGetQuery { @@ -268,17 +269,27 @@ impl DHTBehaviour { } /// update state based on recv-ed get query - fn handle_get_query(&mut self, record_results: GetRecordResult, id: QueryId, last: bool) { + fn handle_get_query(&mut self, record_results: GetRecordResult, id: QueryId, mut last: bool) { if let Some(query) = self.in_progress_get_record_queries.get_mut(&id) { - if let Ok(GetRecordOk::FoundRecord(record)) = record_results { - match query.records.entry(record.record.value) { - std::collections::hash_map::Entry::Occupied(mut o) => { - let num_entries = o.get_mut(); - *num_entries += 1; - } - std::collections::hash_map::Entry::Vacant(v) => { - v.insert(1); + match record_results { + Ok(results) => match results { + GetRecordOk::FoundRecord(record) => { + match query.records.entry(record.record.value) { + std::collections::hash_map::Entry::Occupied(mut o) => { + let num_entries = o.get_mut(); + *num_entries += 1; + } + std::collections::hash_map::Entry::Vacant(v) => { + v.insert(1); + } + } } + GetRecordOk::FinishedWithNoAdditionalRecord { + cache_candidates: _, + } => last = true, + }, + Err(err) => { + error!("GOT ERROR IN KAD QUERY {:?}", err); } } } else { @@ -286,6 +297,7 @@ impl DHTBehaviour { return; } + // BUG if last { if let Some(KadGetQuery { backoff, @@ -315,17 +327,17 @@ impl DHTBehaviour { // return value if notify.send(r).is_err() { - warn!("Get DHT: channel closed before get record request result could be sent"); + error!("Get DHT: channel closed before get record request result could be sent"); } } // lack of replication => error else if records_len < NUM_REPLICATED_TO_TRUST { - warn!("Get DHT: Record not replicated enough for {:?}! requerying with more nodes", progress); + error!("Get DHT: Record not replicated enough for {:?}! requerying with more nodes", progress); self.get_record(key, notify, num_replicas, backoff, retry_count); } // many records that don't match => disagreement else if records_len > MAX_DHT_QUERY_SIZE { - warn!( + error!( "Get DHT: Record disagreed upon; {:?}! requerying with more nodes", progress ); @@ -339,7 +351,7 @@ impl DHTBehaviour { NonZeroUsize::new(num_replicas.get() + 1).unwrap_or(num_replicas); self.get_record(key, notify, new_factor, backoff, retry_count); - warn!("Get DHT: Internal disagreement for get dht request {:?}! requerying with more nodes", progress); + error!("Get DHT: Internal disagreement for get dht request {:?}! requerying with more nodes", progress); } } } @@ -605,6 +617,7 @@ impl NetworkBehaviour for DHTBehaviour { self.queued_get_record_queries.push_back(req); } } + while let Some(req) = self.queued_put_record_queries.pop_front() { if req.backoff.is_expired() { self.put_record(req); diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index bbed9e2d66..a99c52bcd8 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -246,13 +246,17 @@ impl NetworkNode { .unwrap_or(Duration::from_secs(KAD_DEFAULT_REPUB_INTERVAL_SEC)); let ttl = Some(config.ttl.unwrap_or(16 * record_republication_interval)); kconfig - .set_parallelism(NonZeroUsize::new(1).unwrap()) + .set_parallelism(NonZeroUsize::new(5).unwrap()) .set_provider_publication_interval(Some(record_republication_interval)) .set_publication_interval(Some(record_republication_interval)) .set_record_ttl(ttl); + // allowing panic here because something is very wrong if this fales + #[allow(clippy::panic)] if let Some(factor) = config.replication_factor { kconfig.set_replication_factor(factor); + } else { + panic!("Replication factor not set"); } let kadem = Behaviour::with_config(peer_id, MemoryStore::new(peer_id), kconfig); diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 8a24c00d26..885f66b097 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -373,7 +373,7 @@ impl NetworkNodeHandle { key: &impl Serialize, timeout: Duration, ) -> Result { - let result = async_timeout(timeout, self.get_record(key, 1)).await; + let result = async_timeout(timeout, self.get_record(key, 3)).await; match result { Err(e) => Err(e).context(TimeoutSnafu), Ok(r) => r, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index cdfd1cc270..f64f113a63 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -68,7 +68,7 @@ pub struct TestMetadata { impl Default for TimingData { fn default() -> Self { Self { - next_view_timeout: 1500, + next_view_timeout: 2500, timeout_ratio: (11, 10), round_start_delay: 100, start_delay: 100, @@ -184,7 +184,7 @@ impl Default for TestMetadata { }, overall_safety_properties: OverallSafetyPropertiesDescription::default(), // arbitrary, haven't done the math on this - txn_description: TxnTaskDescription::RoundRobinTimeBased(Duration::from_millis(10)), + txn_description: TxnTaskDescription::RoundRobinTimeBased(Duration::from_millis(100)), completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // TODO ED Put a configurable time here - 10 seconds for now diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 178c075183..591253af4a 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use futures::future::BoxFuture; use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; @@ -8,7 +8,10 @@ use hotshot_task::{ task::HotShotTaskCompleted, task_launcher::TaskRunner, }; -use hotshot_types::{traits::node_implementation::NodeType, HotShotConfig}; +use hotshot_types::{ + traits::{network::CommunicationChannel, node_implementation::NodeType}, + HotShotConfig, +}; use crate::{spinning_task::SpinningTask, view_sync_task::ViewSyncTask}; @@ -20,7 +23,7 @@ use super::{ /// convience type alias for the networks available pub type Networks = ( >::QuorumNetwork, - >::CommitteeNetwork, + >::QuorumNetwork, ); /// Wrapper for a function that takes a `node_id` and returns an instance of `T`. @@ -84,13 +87,14 @@ pub struct TestLauncher> { impl> TestLauncher { /// launch the test #[must_use] - pub fn launch(self) -> TestRunner { - TestRunner { + pub fn launch>(self) -> TestRunner { + TestRunner:: { launcher: self, nodes: Vec::new(), late_start: HashMap::new(), next_node_id: 0, task_runner: TaskRunner::default(), + _pd: PhantomData, } } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index f1d4cf237b..57edbf45e4 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -16,7 +16,9 @@ use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemCont use hotshot_task::{ event_stream::ChannelStream, global_registry::GlobalRegistry, task_launcher::TaskRunner, }; -use hotshot_types::traits::network::CommunicationChannel; +use hotshot_types::traits::{ + network::CommunicationChannel, node_implementation::NodeImplementation, +}; use hotshot_types::{ consensus::ConsensusMetricsValue, traits::{ @@ -55,7 +57,11 @@ pub struct LateStartNode> /// The runner of a test network /// spin up and down nodes, execute rounds -pub struct TestRunner> { +pub struct TestRunner< + TYPES: NodeType, + I: TestableNodeImplementation, + N: CommunicationChannel, +> { /// test launcher, contains a bunch of useful metadata and closures pub(crate) launcher: TestLauncher, /// nodes in the test @@ -66,12 +72,18 @@ pub struct TestRunner> { pub(crate) next_node_id: u64, /// overarching test task pub(crate) task_runner: TaskRunner, + /// PhantomData for N + pub(crate) _pd: PhantomData, } -impl, I: TestableNodeImplementation> - TestRunner +impl< + TYPES: NodeType, + I: TestableNodeImplementation, + N: CommunicationChannel, + > TestRunner where I: TestableNodeImplementation, + I: NodeImplementation, { /// excecute test /// # Panics @@ -103,6 +115,7 @@ where late_start, next_node_id: _, mut task_runner, + _pd: PhantomData, } = self; let registry = GlobalRegistry::default(); let test_event_stream = ChannelStream::new(); diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index 623b714b6b..f7c4650648 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -4,6 +4,7 @@ use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, node_types::{Libp2pImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, }; use tracing::instrument; @@ -43,6 +44,57 @@ async fn libp2p_network() { .await; } +/// libp2p network test with failures +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn libp2p_network_failures_2() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata = TestMetadata { + overall_safety_properties: OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::new(240, 0), + }, + ), + timing_data: TimingData { + next_view_timeout: 25000, + propose_max_round_time: Duration::from_millis(100), + ..Default::default() + }, + ..TestMetadata::default_multiple_rounds() + }; + + let dead_nodes = vec![ChangeNode { + idx: 11, + updown: UpDown::Down, + }]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(3, dead_nodes)], + }; + metadata.total_nodes = 12; + metadata.da_committee_size = 12; + metadata.start_nodes = 12; + // 2 nodes fail triggering view sync, expect no other timeouts + metadata.overall_safety_properties.num_failed_views = 1; + // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 15; + + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; +} + /// stress test for libp2p #[cfg_attr( async_executor_impl = "tokio", diff --git a/testing/tests/unreliable_network.rs b/testing/tests/unreliable_network.rs index e39ae11e98..756d980f1a 100644 --- a/testing/tests/unreliable_network.rs +++ b/testing/tests/unreliable_network.rs @@ -106,7 +106,7 @@ async fn libp2p_network_async() { ), timing_data: TimingData { timeout_ratio: (1, 1), - next_view_timeout: 1000, + next_view_timeout: 25000, ..TestMetadata::default_multiple_rounds().timing_data }, unreliable_network: Some(Box::new(AsynchronousNetwork { @@ -197,6 +197,10 @@ async fn test_memory_network_partially_sync() { duration: Duration::from_secs(240), }, ), + timing_data: TimingData { + next_view_timeout: 25000, + ..Default::default() + }, unreliable_network: Some(Box::new(PartiallySynchronousNetwork { asynchronous: AsynchronousNetwork { keep_numerator: 8, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 2a6da5cada..7cd2ef070e 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -99,7 +99,7 @@ pub trait TestableNodeImplementation: NodeImplementation num_bootstrap: usize, da_committee_size: usize, reliability_config: Option>, - ) -> Box (Self::QuorumNetwork, Self::CommitteeNetwork)>; + ) -> Box (Self::QuorumNetwork, Self::QuorumNetwork)>; } #[async_trait] @@ -158,8 +158,8 @@ where num_bootstrap: usize, da_committee_size: usize, reliability_config: Option>, - ) -> Box (Self::QuorumNetwork, Self::CommitteeNetwork)> { - let network_generator = <>::NETWORK as TestableNetworkingImplementation>::generator( + ) -> Box (Self::QuorumNetwork, Self::QuorumNetwork)> { + let quorum_generator = <>::NETWORK as TestableNetworkingImplementation>::generator( expected_node_count, num_bootstrap, 0, @@ -167,24 +167,22 @@ where false, reliability_config.clone(), ); - let da_generator = <>::NETWORK as TestableNetworkingImplementation>::generator( - expected_node_count, - num_bootstrap, - 1, - da_committee_size, - true, - reliability_config - ); + let da_generator = <>::NETWORK as TestableNetworkingImplementation>::generator( + expected_node_count, + num_bootstrap, + 1, + da_committee_size, + false, + reliability_config, + ); Box::new(move |id| { - let network = Arc::new(network_generator(id)); - let network_da = Arc::new(da_generator(id)); + let quorum = Arc::new(quorum_generator(id)); + let da = Arc::new(da_generator(id)); let quorum_chan = - >::generate_network()(network); + >::generate_network()(quorum); let committee_chan = - >::generate_network()( - network_da, - ); + >::generate_network()(da); (quorum_chan, committee_chan) }) } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 7ef28fbb27..fb02e97b07 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -273,7 +273,7 @@ impl WebServerDataSource for WebServerState { if proposal.1.is_empty() { Err(ServerError { status: StatusCode::NotImplemented, - message: format!("Proposal not found for view {view_number}"), + message: format!("Proposal empty for view {view_number}"), }) } else { Ok(Some(vec![proposal.1.clone()])) From 4875091f1424c58921752aa05e7c6f5d039f7e13 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:19:39 -0500 Subject: [PATCH 0774/1393] Add a task to handle an upgrade vote (#2532) --- hotshot/src/lib.rs | 5 +- hotshot/src/tasks/mod.rs | 58 +++++++++ task-impls/src/events.rs | 6 +- task-impls/src/lib.rs | 3 + task-impls/src/network.rs | 5 +- task-impls/src/upgrade.rs | 251 ++++++++++++++++++++++++++++++++++++++ task-impls/src/vote.rs | 42 ++++++- types/src/event.rs | 10 +- types/src/message.rs | 16 +-- 9 files changed, 370 insertions(+), 26 deletions(-) create mode 100644 task-impls/src/upgrade.rs diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 5318fe8061..7d93a97449 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -15,7 +15,7 @@ pub mod tasks; use crate::{ tasks::{ add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, - add_transaction_task, add_view_sync_task, + add_transaction_task, add_upgrade_task, add_view_sync_task, }, traits::{NodeImplementation, Storage}, types::{Event, SystemContextHandle}, @@ -419,7 +419,6 @@ impl> SystemContext { Ok((handle, internal_event_stream)) } - /// return the timeout for a view for `self` #[must_use] pub fn get_next_view_timeout(&self) -> u64 { @@ -521,6 +520,8 @@ impl> SystemContext { add_transaction_task(task_runner, internal_event_stream.clone(), handle.clone()).await; let task_runner = add_view_sync_task(task_runner, internal_event_stream.clone(), handle.clone()).await; + let task_runner = + add_upgrade_task(task_runner, internal_event_stream.clone(), handle.clone()).await; async_spawn(async move { let _ = task_runner.launch().await; info!("Task runner exited!"); diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index f8fae206e2..270ed1f0c4 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -22,6 +22,7 @@ use hotshot_task_impls::{ NetworkMessageTaskTypes, NetworkTaskKind, }, transactions::{TransactionTaskState, TransactionsTaskTypes}, + upgrade::{UpgradeTaskState, UpgradeTaskTypes}, vid::{VIDTaskState, VIDTaskTypes}, view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, }; @@ -376,6 +377,63 @@ pub async fn add_vid_task>( task_runner.add_task(vid_task_id, vid_name.to_string(), vid_task) } +/// add the Upgrade task. +/// +/// # Panics +/// +/// Uses .unwrap(), though this should never panic. +pub async fn add_upgrade_task>( + task_runner: TaskRunner, + event_stream: ChannelStream>, + handle: SystemContextHandle, +) -> TaskRunner { + let c_api: HotShotConsensusApi = HotShotConsensusApi { + inner: handle.hotshot.inner.clone(), + }; + let registry = task_runner.registry.clone(); + let upgrade_state = UpgradeTaskState { + api: c_api.clone(), + registry: registry.clone(), + cur_view: TYPES::Time::new(0), + quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), + quorum_network: c_api.inner.networks.quorum_network.clone().into(), + should_vote: |_upgrade_proposal| false, + vote_collector: None.into(), + event_stream: event_stream.clone(), + public_key: c_api.public_key().clone(), + private_key: c_api.private_key().clone(), + id: handle.hotshot.inner.id, + }; + let upgrade_event_handler = HandleEvent(Arc::new( + move |event, mut state: UpgradeTaskState>| { + async move { + let completion_status = state.handle_event(event).await; + (completion_status, state) + } + .boxed() + }, + )); + let upgrade_name = "Upgrade Task"; + let upgrade_event_filter = FilterEvent(Arc::new( + UpgradeTaskState::>::filter, + )); + + let upgrade_task_builder = TaskBuilder::< + UpgradeTaskTypes>, + >::new(upgrade_name.to_string()) + .register_event_stream(event_stream.clone(), upgrade_event_filter) + .await + .register_registry(&mut registry.clone()) + .await + .register_state(upgrade_state) + .register_event_handler(upgrade_event_handler); + // impossible for unwrap to fail + // we *just* registered + let upgrade_task_id = upgrade_task_builder.get_task_id().unwrap(); + let upgrade_task = UpgradeTaskTypes::build(upgrade_task_builder).launch(); + task_runner.add_task(upgrade_task_id, upgrade_name.to_string(), upgrade_task) +} + /// add the Data Availability task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 92e49d02ea..d1d94c8438 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -112,15 +112,13 @@ pub enum HotShotEvent { /// Like [`HotShotEvent::DAProposalRecv`]. VidDisperseRecv(Proposal>, TYPES::SignatureKey), /// Upgrade proposal has been received from the network - UpgradeProposalRecv(UpgradeProposal), + UpgradeProposalRecv(Proposal>, TYPES::SignatureKey), /// Upgrade proposal has been sent to the network UpgradeProposalSend(UpgradeProposal), /// Upgrade vote has been received from the network UpgradeVoteRecv(UpgradeVote), /// Upgrade vote has been sent to the network UpgradeVoteSend(UpgradeVote), - /// Upgrade certificate has been received from the network - UpgradeCertificateRecv(UpgradeCertificate), /// Upgrade certificate has been sent to the network - UpgradeCertificateSend(UpgradeCertificate), + UpgradeCertificateFormed(UpgradeCertificate), } diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 8521b525d4..8299bf1edc 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -28,5 +28,8 @@ pub mod vid; /// Generic task for collecting votes pub mod vote; +/// Task for handling upgrades +pub mod upgrade; + /// Helper functions used by any task mod helpers; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 9bd7992a0c..79bedc2260 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -86,11 +86,8 @@ impl NetworkMessageTaskState { GeneralConsensusMessage::TimeoutVote(message) => { HotShotEvent::TimeoutVoteRecv(message) } - GeneralConsensusMessage::UpgradeCertificate(message) => { - HotShotEvent::UpgradeCertificateRecv(message) - } GeneralConsensusMessage::UpgradeProposal(message) => { - HotShotEvent::UpgradeProposalRecv(message) + HotShotEvent::UpgradeProposalRecv(message, sender) } GeneralConsensusMessage::UpgradeVote(message) => { HotShotEvent::UpgradeVoteRecv(message) diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs new file mode 100644 index 0000000000..0b223a5f04 --- /dev/null +++ b/task-impls/src/upgrade.rs @@ -0,0 +1,251 @@ +use crate::{ + events::HotShotEvent, + vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, +}; +use async_lock::RwLock; + +use hotshot_task::{ + event_stream::{ChannelStream, EventStream}, + global_registry::GlobalRegistry, + task::{HotShotTaskCompleted, TS}, + task_impls::HSTWithEvent, +}; +use hotshot_types::{ + event::{Event, EventType}, + simple_certificate::UpgradeCertificate, + simple_vote::{UpgradeProposalData, UpgradeVote}, + traits::{ + consensus_api::ConsensusApi, + election::Membership, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + signature_key::SignatureKey, + }, + vote::HasViewNumber, +}; + +use crate::vote::HandleVoteEvent; +use snafu::Snafu; +use std::sync::Arc; +use tracing::{debug, error, instrument, warn}; + +/// Alias for Optional type for Vote Collectors +type VoteCollectorOption = Option>; + +#[derive(Snafu, Debug)] +/// Error type for consensus tasks +pub struct ConsensusTaskError {} + +/// Tracks state of a DA task +pub struct UpgradeTaskState< + TYPES: NodeType, + I: NodeImplementation, + A: ConsensusApi + 'static, +> { + /// The state's api + pub api: A, + /// Global registry task for the state + pub registry: GlobalRegistry, + + /// View number this view is executing in. + pub cur_view: TYPES::Time, + + /// Membership for Quorum Certs/votes + pub quorum_membership: Arc, + /// Network for all nodes + pub quorum_network: Arc, + + /// Whether we should vote affirmatively on a given upgrade proposal (true) or not (false) + pub should_vote: fn(UpgradeProposalData) -> bool, + + /// The current vote collection task, if there is one. + pub vote_collector: + RwLock, UpgradeCertificate>>, + + /// Global events stream to publish events + pub event_stream: ChannelStream>, + + /// This Nodes public key + pub public_key: TYPES::SignatureKey, + + /// This Nodes private key + pub private_key: ::PrivateKey, + + /// This state's ID + pub id: u64, +} + +impl, A: ConsensusApi + 'static> + UpgradeTaskState +{ + /// main task event handler + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Upgrade Task", level = "error")] + pub async fn handle_event( + &mut self, + event: HotShotEvent, + ) -> Option { + match event { + HotShotEvent::UpgradeProposalRecv(proposal, sender) => { + let should_vote = self.should_vote; + // If the proposal does not match our upgrade target, we immediately exit. + if !should_vote(proposal.data.upgrade_proposal.clone()) { + warn!( + "Received unexpected upgrade proposal:\n{:?}", + proposal.data + ); + return None; + } + + // If we have an upgrade target, we validate that the proposal is relevant for the current view. + + debug!( + "Upgrade proposal received for view: {:?}", + proposal.data.get_view_number() + ); + // NOTE: Assuming that the next view leader is the one who sends an upgrade proposal for this view + let view = proposal.data.get_view_number(); + + // Allow an upgrade proposal that is one view older, in case we have voted on a quorum + // proposal and updated the view. + // `self.cur_view` should be at least 1 since there is a view change before getting + // the `UpgradeProposalRecv` event. Otherewise, the view number subtraction below will + // cause an overflow error. + // TODO Come back to this - we probably don't need this, but we should also never receive a UpgradeCertificate where this fails, investigate block ready so it doesn't make one for the genesis block + + if self.cur_view != TYPES::Time::genesis() && view < self.cur_view - 1 { + warn!("Discarding old upgrade proposal; the proposal is for view {:?}, but the current view is {:?}.", + view, + self.cur_view + ); + return None; + } + + // We then validate that the proposal was issued by the leader for the view. + let view_leader_key = self.quorum_membership.get_leader(view); + if view_leader_key != sender { + error!("Upgrade proposal doesn't have expected leader key for view {} \n Upgrade proposal is: {:?}", *view, proposal.data.clone()); + return None; + } + + // At this point, we've checked that: + // * the proposal was expected, + // * the proposal is valid, and + // * the proposal is recent, + // so we notify the application layer + self.api + .send_event(Event { + view_number: self.cur_view, + event: EventType::UpgradeProposal { + proposal: proposal.clone(), + sender: sender.clone(), + }, + }) + .await; + + // If everything is fine up to here, we generate and send a vote on the proposal. + let Ok(vote) = UpgradeVote::create_signed_vote( + proposal.data.upgrade_proposal, + view, + &self.public_key, + &self.private_key, + ) else { + error!("Failed to sign UpgradeVote!"); + return None; + }; + debug!("Sending upgrade vote {:?}", vote.get_view_number()); + self.event_stream + .publish(HotShotEvent::UpgradeVoteSend(vote)) + .await; + } + HotShotEvent::UpgradeVoteRecv(ref vote) => { + debug!("Upgrade vote recv, Main Task {:?}", vote.get_view_number()); + // Check if we are the leader. + let view = vote.get_view_number(); + if self.quorum_membership.get_leader(view) != self.public_key { + error!( + "We are not the leader for view {} are we leader for next view? {}", + *view, + self.quorum_membership.get_leader(view + 1) == self.public_key + ); + return None; + } + let mut collector = self.vote_collector.write().await; + + let maybe_task = collector.take(); + + if maybe_task.is_none() + || vote.get_view_number() > maybe_task.as_ref().unwrap().view + { + debug!("Starting vote handle for view {:?}", vote.get_view_number()); + let info = AccumulatorInfo { + public_key: self.public_key.clone(), + membership: self.quorum_membership.clone(), + view: vote.get_view_number(), + event_stream: self.event_stream.clone(), + id: self.id, + registry: self.registry.clone(), + }; + *collector = create_vote_accumulator::< + TYPES, + UpgradeVote, + UpgradeCertificate, + >(&info, vote.clone(), event) + .await; + } else { + let result = maybe_task.unwrap().handle_event(event.clone()).await; + + if result.0 == Some(HotShotTaskCompleted::ShutDown) { + // The protocol has finished + return None; + } + *collector = Some(result.1); + } + } + HotShotEvent::ViewChange(view) => { + if *self.cur_view >= *view { + return None; + } + + if *view - *self.cur_view > 1 { + warn!("View changed by more than 1 going to view {:?}", view); + } + self.cur_view = view; + + return None; + } + HotShotEvent::Shutdown => { + error!("Shutting down because of shutdown signal!"); + return Some(HotShotTaskCompleted::ShutDown); + } + _ => { + error!("unexpected event {:?}", event); + } + } + None + } + + /// Filter the upgrade event. + pub fn filter(event: &HotShotEvent) -> bool { + matches!( + event, + HotShotEvent::UpgradeProposalRecv(_, _) + | HotShotEvent::UpgradeVoteRecv(_) + | HotShotEvent::Shutdown + | HotShotEvent::Timeout(_) + | HotShotEvent::ViewChange(_) + ) + } +} + +/// task state implementation for DA Task +impl, A: ConsensusApi + 'static> TS + for UpgradeTaskState +{ +} + +/// Type alias for DA Task Types +pub type UpgradeTaskTypes = HSTWithEvent< + ConsensusTaskError, + HotShotEvent, + ChannelStream>, + UpgradeTaskState, +>; diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 93986834e8..e41f34cdba 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -11,11 +11,11 @@ use hotshot_task::{ }; use hotshot_types::{ simple_certificate::{ - DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCommitCertificate2, - ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, + ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DAVote, QuorumVote, TimeoutVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + DAVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{election::Membership, node_implementation::NodeType}, @@ -233,6 +233,9 @@ type DAVoteState = VoteCollectionTaskState, DACertif /// Alias for Timeout vote accumulator type TimeoutVoteState = VoteCollectionTaskState, TimeoutCertificate>; +/// Alias for upgrade vote accumulator +type UpgradeVoteState = + VoteCollectionTaskState, UpgradeCertificate>; /// Alias for View Sync Pre Commit vote accumulator type ViewSyncPreCommitState = VoteCollectionTaskState< TYPES, @@ -263,6 +266,20 @@ impl AggregatableVote, QuorumCertifica } } +impl AggregatableVote, UpgradeCertificate> + for UpgradeVote +{ + fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.get_leader(self.get_view_number()) + } + fn make_cert_event( + certificate: UpgradeCertificate, + _key: &TYPES::SignatureKey, + ) -> HotShotEvent { + HotShotEvent::UpgradeCertificateFormed(certificate) + } +} + impl AggregatableVote, DACertificate> for DAVote { @@ -355,6 +372,25 @@ impl HandleVoteEvent, QuorumCertificat } } +// Handlers for all vote accumulators +#[async_trait] +impl HandleVoteEvent, UpgradeCertificate> + for UpgradeVoteState +{ + async fn handle_event( + self, + event: HotShotEvent, + ) -> (Option, UpgradeVoteState) { + match event { + HotShotEvent::UpgradeVoteRecv(vote) => self.accumulate_vote(&vote).await, + _ => (None, self), + } + } + fn filter(event: &HotShotEvent) -> bool { + matches!(event, HotShotEvent::UpgradeVoteRecv(_)) + } +} + #[async_trait] impl HandleVoteEvent, DACertificate> for DAVoteState diff --git a/types/src/event.rs b/types/src/event.rs index 7b4b55a754..2063e374a9 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -1,7 +1,7 @@ //! Events that a `HotShot` instance can emit use crate::{ - data::{DAProposal, Leaf, QuorumProposal}, + data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal}, error::HotShotError, message::Proposal, simple_certificate::QuorumCertificate, @@ -87,4 +87,12 @@ pub enum EventType { /// Public key of the leader submitting the proposal sender: TYPES::SignatureKey, }, + /// Upgrade proposal was received from the network + /// or submitted to the network by us + UpgradeProposal { + /// Contents of the proposal + proposal: Proposal>, + /// Public key of the leader submitting the proposal + sender: TYPES::SignatureKey, + }, } diff --git a/types/src/message.rs b/types/src/message.rs index 97f7f94ad9..73d64f8a52 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -5,7 +5,7 @@ use crate::data::{QuorumProposal, UpgradeProposal}; use crate::simple_certificate::{ - DACertificate, UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, + DACertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }; use crate::simple_vote::{ @@ -173,7 +173,6 @@ impl ProcessedGeneralConsensusMessage { GeneralConsensusMessage::ViewSyncPreCommitCertificate(_) => unimplemented!(), GeneralConsensusMessage::ViewSyncCommitCertificate(_) => unimplemented!(), GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => unimplemented!(), - GeneralConsensusMessage::UpgradeCertificate(_) => unimplemented!(), GeneralConsensusMessage::UpgradeProposal(_) => unimplemented!(), GeneralConsensusMessage::UpgradeVote(_) => unimplemented!(), } @@ -287,11 +286,8 @@ pub enum GeneralConsensusMessage { /// Message with a Timeout vote TimeoutVote(TimeoutVote), - /// Message with an upgrade certificate - UpgradeCertificate(UpgradeCertificate), - /// Message with an upgrade proposal - UpgradeProposal(UpgradeProposal), + UpgradeProposal(Proposal>), /// Message with an upgrade vote UpgradeVote(UpgradeVote), @@ -357,10 +353,7 @@ impl SequencingMessage { GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { message.get_view_number() } - GeneralConsensusMessage::UpgradeCertificate(message) => { - message.get_view_number() - } - GeneralConsensusMessage::UpgradeProposal(message) => message.get_view_number(), + GeneralConsensusMessage::UpgradeProposal(message) => message.data.get_view_number(), GeneralConsensusMessage::UpgradeVote(message) => message.get_view_number(), } } @@ -403,8 +396,7 @@ impl SequencingMessage { MessagePurpose::ViewSyncCertificate } - GeneralConsensusMessage::UpgradeCertificate(_) - | GeneralConsensusMessage::UpgradeProposal(_) + GeneralConsensusMessage::UpgradeProposal(_) | GeneralConsensusMessage::UpgradeVote(_) => MessagePurpose::Upgrade, }, Right(committee_message) => match committee_message { From 94ff69a26a999ca2b2e2c291a859b687774b432b Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 9 Feb 2024 16:17:14 -0500 Subject: [PATCH 0775/1393] Speed Up DHT Retrieval (#2555) * Only wait for required number of records * Lower timeout and lint * more linting/remove `post_identity` from ochestrator * fix webserver * Always Remove query when it's completed * spanw a task for cache set * Actually block again * block * give a little more time for libp2p * clone the cache and spawn task for insert --- hotshot/src/tasks/mod.rs | 2 +- .../traits/networking/web_server_network.rs | 9 +- .../src/network/behaviours/dht/cache.rs | 2 + .../src/network/behaviours/dht/mod.rs | 46 +++++++--- orchestrator/api.toml | 9 -- orchestrator/src/lib.rs | 88 +------------------ task-impls/src/transactions.rs | 5 +- task-impls/src/upgrade.rs | 5 +- testing/tests/libp2p.rs | 4 +- types/src/message.rs | 4 +- 10 files changed, 49 insertions(+), 125 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 270ed1f0c4..05c053f09a 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -381,7 +381,7 @@ pub async fn add_vid_task>( /// /// # Panics /// -/// Uses .unwrap(), though this should never panic. +/// Uses .`unwrap()`, though this should never panic. pub async fn add_upgrade_task>( task_runner: TaskRunner, event_stream: ChannelStream>, diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 1de57a26a3..604e3f109d 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -317,7 +317,7 @@ impl Inner { } return false; } - MessagePurpose::Vote => { + MessagePurpose::Vote | MessagePurpose::ViewSyncVote => { let vote = deserialized_message.clone(); *vote_index += 1; direct_poll_queue.write().await.push(vote); @@ -350,13 +350,6 @@ impl Inner { // Only pushing the first proposal since we will soon only be allowing 1 proposal per view return true; } - MessagePurpose::ViewSyncVote => { - let vote = deserialized_message.clone(); - *vote_index += 1; - direct_poll_queue.write().await.push(vote); - - return false; - } MessagePurpose::ViewSyncCertificate => { // TODO ED Special case this for view sync // TODO ED Need to add vote indexing to web server for view sync certs diff --git a/libp2p-networking/src/network/behaviours/dht/cache.rs b/libp2p-networking/src/network/behaviours/dht/cache.rs index f33c857f89..9afde9bd7c 100644 --- a/libp2p-networking/src/network/behaviours/dht/cache.rs +++ b/libp2p-networking/src/network/behaviours/dht/cache.rs @@ -65,6 +65,8 @@ impl Default for Cache { } /// key value cache + +#[derive(Clone)] pub struct Cache { /// the cache's config config: Config, diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 78a902143c..b62cdc26e4 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -8,7 +8,7 @@ use std::{ /// a local caching layer for the DHT key value pairs mod cache; -use async_compatibility_layer::art::async_block_on; +use async_compatibility_layer::art::{async_block_on, async_spawn}; use futures::channel::oneshot::Sender; use libp2p::kad::Behaviour as KademliaBehaviour; use libp2p::kad::Event as KademliaEvent; @@ -270,7 +270,7 @@ impl DHTBehaviour { /// update state based on recv-ed get query fn handle_get_query(&mut self, record_results: GetRecordResult, id: QueryId, mut last: bool) { - if let Some(query) = self.in_progress_get_record_queries.get_mut(&id) { + let num = if let Some(query) = self.in_progress_get_record_queries.get_mut(&id) { match record_results { Ok(results) => match results { GetRecordOk::FoundRecord(record) => { @@ -278,27 +278,34 @@ impl DHTBehaviour { std::collections::hash_map::Entry::Occupied(mut o) => { let num_entries = o.get_mut(); *num_entries += 1; + *num_entries } std::collections::hash_map::Entry::Vacant(v) => { v.insert(1); + 1 } } } GetRecordOk::FinishedWithNoAdditionalRecord { cache_candidates: _, - } => last = true, + } => { + tracing::debug!("GetRecord Finished with No Additional Record"); + last = true; + 0 + } }, Err(err) => { error!("GOT ERROR IN KAD QUERY {:?}", err); + 0 } } } else { // inactive entry return; - } + }; // BUG - if last { + if num > NUM_REPLICATED_TO_TRUST { if let Some(KadGetQuery { backoff, progress, @@ -323,18 +330,17 @@ impl DHTBehaviour { .find(|(_, v)| *v >= NUM_REPLICATED_TO_TRUST) { // insert into cache - async_block_on(self.cache.insert(key, r.clone())); + // TODO we should find a better place to set the cache + // https://github.com/EspressoSystems/HotShot/issues/2554 + let cache = self.cache.clone(); + let val = r.clone(); + async_spawn(async move { cache.insert(key, val).await }); // return value if notify.send(r).is_err() { error!("Get DHT: channel closed before get record request result could be sent"); } } - // lack of replication => error - else if records_len < NUM_REPLICATED_TO_TRUST { - error!("Get DHT: Record not replicated enough for {:?}! requerying with more nodes", progress); - self.get_record(key, notify, num_replicas, backoff, retry_count); - } // many records that don't match => disagreement else if records_len > MAX_DHT_QUERY_SIZE { error!( @@ -354,6 +360,24 @@ impl DHTBehaviour { error!("Get DHT: Internal disagreement for get dht request {:?}! requerying with more nodes", progress); } } + } else if last { + if let Some(KadGetQuery { + backoff, + progress, + notify, + num_replicas, + key, + retry_count, + records, + }) = self.in_progress_get_record_queries.remove(&id) + { + let records_len = records.iter().fold(0, |acc, (_k, v)| acc + v); + // lack of replication => error + if records_len < NUM_REPLICATED_TO_TRUST { + error!("Get DHT: Record not replicated enough for {:?}! requerying with more nodes", progress); + self.get_record(key, notify, num_replicas, backoff, retry_count); + } + } } } diff --git a/orchestrator/api.toml b/orchestrator/api.toml index e9bc32c270..e855a07369 100644 --- a/orchestrator/api.toml +++ b/orchestrator/api.toml @@ -3,15 +3,6 @@ NAME = "orchestrator" DESCRIPTION = "Orchestrator for HotShot" FORMAT_VERSION = "0.1.0" -# POST node's identity -[route.postidentity] -PATH = ["identity/:identity"] -METHOD = "POST" -":identity" = "Literal" -DOC = """ -POST a node's identity (IP address) to the orchestrator. Returns the node's node_index -""" - # POST retrieve the network configuration [route.post_getconfig] PATH = ["config/:node_index"] diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 0453fecbd5..2d9a27eec7 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -7,14 +7,10 @@ pub mod config; use async_lock::RwLock; use hotshot_types::traits::{election::ElectionConfig, signature_key::SignatureKey}; -use std::{ - io, - io::ErrorKind, - net::{IpAddr, SocketAddr}, -}; +use std::{io, io::ErrorKind}; use tide_disco::{Api, App}; -use surf_disco::{error::ClientError, Url}; +use surf_disco::Url; use tide_disco::{ api::ApiError, error::ServerError, @@ -46,8 +42,6 @@ pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { /// The state of the orchestrator #[derive(Default, Clone)] struct OrchestratorState { - /// Tracks the latest node index we have generated a configuration for - latest_index: u16, /// The network configuration config: NetworkConfig, /// Whether nodes should start their HotShot instances @@ -55,8 +49,6 @@ struct OrchestratorState { start: bool, /// The total nodes that have posted they are ready to start pub nodes_connected: u64, - /// connection to the web server - client: Option>, } impl @@ -64,27 +56,16 @@ impl { /// create a new [`OrchestratorState`] pub fn new(network_config: NetworkConfig) -> Self { - let mut web_client = None; - if network_config.web_server_config.is_some() { - let base_url = "http://0.0.0.0/9000".to_string().parse().unwrap(); - web_client = Some(surf_disco::Client::::new(base_url)); - } OrchestratorState { - latest_index: 0, config: network_config, start: false, nodes_connected: 0, - client: web_client, } } } /// An api exposed by the orchestrator pub trait OrchestratorApi { - /// post endpoint for identity - /// # Errors - /// if unable to serve - fn post_identity(&mut self, identity: IpAddr) -> Result; /// post endpoint for each node's config /// # Errors /// if unable to serve @@ -111,56 +92,6 @@ where KEY: serde::Serialize + Clone + SignatureKey, ELECTION: serde::Serialize + Clone + Send + ElectionConfig, { - fn post_identity(&mut self, identity: IpAddr) -> Result { - let node_index = self.latest_index; - self.latest_index += 1; - - // TODO https://github.com/EspressoSystems/HotShot/issues/850 - if usize::from(node_index) >= self.config.config.total_nodes.get() { - return Err(ServerError { - status: tide_disco::StatusCode::BadRequest, - message: "Network has reached capacity".to_string(), - }); - } - - //add new node's key to stake table - if self.config.web_server_config.clone().is_some() { - let new_key = &self.config.config.my_own_validator_config.public_key; - let client_clone = self.client.clone().unwrap(); - async move { - client_clone - .post::<()>("api/staketable") - .body_binary(&new_key) - .unwrap() - .send() - .await - } - .boxed(); - } - - if self.config.libp2p_config.clone().is_some() { - let libp2p_config_clone = self.config.libp2p_config.clone().unwrap(); - // Designate node as bootstrap node and store its identity information - if libp2p_config_clone.bootstrap_nodes.len() < libp2p_config_clone.num_bootstrap_nodes { - let port_index = if libp2p_config_clone.index_ports { - node_index - } else { - 0 - }; - let socketaddr = - SocketAddr::new(identity, libp2p_config_clone.base_port + port_index); - let keypair = libp2p_generate_indexed_identity(self.config.seed, node_index.into()); - self.config - .libp2p_config - .as_mut() - .unwrap() - .bootstrap_nodes - .push((socketaddr, keypair.to_protobuf_encoding().unwrap())); - } - } - Ok(node_index) - } - // Assumes nodes will set their own index that they received from the // 'identity' endpoint fn post_getconfig( @@ -229,20 +160,7 @@ where ))) .expect("API file is not valid toml"); let mut api = Api::::new(api_toml)?; - api.post("postidentity", |req, state| { - async move { - let identity = req.string_param("identity")?.parse::(); - if identity.is_err() { - return Err(ServerError { - status: tide_disco::StatusCode::BadRequest, - message: "Identity is not a properly formed IP address".to_string(), - }); - } - state.post_identity(identity.unwrap()) - } - .boxed() - })? - .post("post_getconfig", |req, state| { + api.post("post_getconfig", |req, state| { async move { let node_index = req.integer_param("node_index")?; state.post_getconfig(node_index) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4bb3d46091..32b9c0c492 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -270,10 +270,7 @@ impl, A: ConsensusApi + } #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] - async fn wait_for_transactions( - &self, - _parent_leaf: Leaf, - ) -> Option> { + async fn wait_for_transactions(&self, _: Leaf) -> Option> { let task_start_time = Instant::now(); // TODO (Keyao) Investigate the use of transaction hash diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 0b223a5f04..ec0dea8231 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -88,10 +88,7 @@ impl, A: ConsensusApi + let should_vote = self.should_vote; // If the proposal does not match our upgrade target, we immediately exit. if !should_vote(proposal.data.upgrade_proposal.clone()) { - warn!( - "Received unexpected upgrade proposal:\n{:?}", - proposal.data - ); + warn!("Received unexpected upgrade proposal:\n{:?}", proposal.data); return None; } diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index f7c4650648..89448e1c09 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -61,11 +61,11 @@ async fn libp2p_network_failures_2() { }, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::new(240, 0), + duration: Duration::from_secs(240), }, ), timing_data: TimingData { - next_view_timeout: 25000, + next_view_timeout: 4000, propose_max_round_time: Duration::from_millis(100), ..Default::default() }, diff --git a/types/src/message.rs b/types/src/message.rs index 73d64f8a52..aa2d67982a 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -353,7 +353,9 @@ impl SequencingMessage { GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { message.get_view_number() } - GeneralConsensusMessage::UpgradeProposal(message) => message.data.get_view_number(), + GeneralConsensusMessage::UpgradeProposal(message) => { + message.data.get_view_number() + } GeneralConsensusMessage::UpgradeVote(message) => message.get_view_number(), } } From 9059fc49fdb1970aed504ea63f071960cb163b7f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 9 Feb 2024 19:16:53 -0500 Subject: [PATCH 0776/1393] [CX_CLEANUP] Integrate new `Task`, in the code and remove the old (#2493) * lint up to missing docs in task-impls * All builds but the tests~ * All builds but the tests in testing * linting * Test can run, but they fail * some fixes * cargo fix * replace broadcast with broadcast_direct * test almost running correctly, maybe not passing * Add Tasks to project, success test passes * test can pass * fixing tests, basic passes except unit tests * fixed consensus task test * fixing other task tests * Remove more cruft * replace event sending with helper for better errors * lint after merge * fixing unit tests and lints for both async types * remove superfluous log and fix doc * Remove Cargo.lock from tasks * cleanup * Addressing comments * Update tasks based on feedback * remove outdated # Panics doc comments * missed on # Panics * Reveiw comments minus filters * Lower a log level, fix task/cargo.toml * Remove todo --- constants/src/lib.rs | 3 + hotshot/Cargo.toml | 3 +- hotshot/examples/infra/mod.rs | 5 +- hotshot/src/lib.rs | 173 ++-- hotshot/src/tasks/mod.rs | 499 +++------ .../src/traits/networking/combined_network.rs | 3 +- .../src/traits/networking/libp2p_network.rs | 3 +- .../src/traits/networking/memory_network.rs | 7 +- .../traits/networking/web_server_network.rs | 3 +- hotshot/src/types/handle.rs | 66 +- task-impls/Cargo.toml | 3 +- task-impls/src/consensus.rs | 282 +++-- task-impls/src/da.rs | 94 +- task-impls/src/events.rs | 4 + task-impls/src/harness.rs | 117 ++- task-impls/src/helpers.rs | 20 + task-impls/src/lib.rs | 2 +- task-impls/src/network.rs | 223 ++-- task-impls/src/transactions.rs | 76 +- task-impls/src/upgrade.rs | 88 +- task-impls/src/vid.rs | 93 +- task-impls/src/view_sync.rs | 369 +++---- task-impls/src/vote.rs | 156 +-- task/Cargo.toml | 25 +- task/src/dependency.rs | 270 +++++ task/src/dependency_task.rs | 140 +++ task/src/event_stream.rs | 268 ----- task/src/global_registry.rs | 214 ---- task/src/lib.rs | 389 +------ task/src/task.rs | 963 +++++++----------- task/src/task_impls.rs | 457 --------- task/src/task_launcher.rs | 68 -- task/src/task_state.rs | 182 ---- testing/Cargo.toml | 5 +- testing/src/completion_task.rs | 143 +-- testing/src/lib.rs | 14 - testing/src/overall_safety_task.rs | 449 ++++---- testing/src/per_node_safety_task.rs | 258 ----- testing/src/soundness_task.rs | 1 - testing/src/spinning_task.rs | 274 ++--- testing/src/task_helpers.rs | 5 +- testing/src/test_builder.rs | 17 +- testing/src/test_launcher.rs | 129 +-- testing/src/test_runner.rs | 206 ++-- testing/src/timeout_task.rs | 1 - testing/src/txn_task.rs | 203 ++-- testing/src/view_sync_task.rs | 191 ++-- testing/tests/consensus_task.rs | 64 +- testing/tests/da_task.rs | 29 +- testing/tests/network_task.rs | 7 +- testing/tests/vid_task.rs | 34 +- testing/tests/view_sync_task.rs | 32 +- types/Cargo.toml | 1 - types/src/lib.rs | 19 +- types/src/traits/network.rs | 4 +- types/src/vote.rs | 12 +- 56 files changed, 2556 insertions(+), 4810 deletions(-) create mode 100644 task/src/dependency.rs create mode 100644 task/src/dependency_task.rs delete mode 100644 task/src/event_stream.rs delete mode 100644 task/src/global_registry.rs delete mode 100644 task/src/task_impls.rs delete mode 100644 task/src/task_launcher.rs delete mode 100644 task/src/task_state.rs delete mode 100644 testing/src/per_node_safety_task.rs delete mode 100644 testing/src/soundness_task.rs delete mode 100644 testing/src/timeout_task.rs diff --git a/constants/src/lib.rs b/constants/src/lib.rs index 3a44c0902a..621df3cdfe 100644 --- a/constants/src/lib.rs +++ b/constants/src/lib.rs @@ -28,3 +28,6 @@ pub struct Version { /// Constant for protocol version 0.1. pub const VERSION_0_1: Version = Version { major: 0, minor: 1 }; + +/// Default Channel Size for consensus event sharing +pub const EVENT_CHANNEL_SIZE: usize = 100_000; diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index d80337bd88..546489612c 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -79,6 +79,7 @@ name = "orchestrator-combined" path = "examples/combined/orchestrator.rs" [dependencies] +async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } @@ -96,7 +97,6 @@ hotshot-web-server = { version = "0.1.1", path = "../web_server", default-featur hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-types = { path = "../types", version = "0.1.0", default-features = false } hotshot-utils = { path = "../utils" } -hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } @@ -108,6 +108,7 @@ time = { workspace = true } derive_more = "0.99.17" portpicker = "0.1.1" lru = "0.12.2" +hotshot-task = { path = "../task" } tracing = { workspace = true } diff --git a/hotshot/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs index 6e391b72b8..4d97d4cc3e 100644 --- a/hotshot/examples/infra/mod.rs +++ b/hotshot/examples/infra/mod.rs @@ -23,7 +23,6 @@ use hotshot_orchestrator::{ client::{OrchestratorClient, ValidatorArgs}, config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, }; -use hotshot_task::task::FilterEvent; use hotshot_testing::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::TestInstanceState, @@ -393,7 +392,7 @@ pub trait RunDA< /// Starts HotShot consensus, returns when consensus has finished async fn run_hotshot( &self, - mut context: SystemContextHandle, + context: SystemContextHandle, transactions: &mut Vec, transactions_to_send_per_round: u64, ) { @@ -413,7 +412,7 @@ pub trait RunDA< error!("Starting HotShot example!"); let start = Instant::now(); - let (mut event_stream, _streamid) = context.get_event_stream(FilterEvent::default()).await; + let mut event_stream = context.get_event_stream(); let mut anchor_view: TYPES::Time = ::genesis(); let mut num_successful_commits = 0; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 7d93a97449..e948c47879 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -20,19 +20,19 @@ use crate::{ traits::{NodeImplementation, Storage}, types::{Event, SystemContextHandle}, }; +use async_broadcast::{broadcast, InactiveReceiver, Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; use commit::Committable; use custom_debug::Debug; use futures::join; -use hotshot_constants::VERSION_0_1; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - task_launcher::TaskRunner, -}; -use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; +use hotshot_constants::{EVENT_CHANNEL_SIZE, VERSION_0_1}; +use hotshot_task_impls::events::HotShotEvent; +use hotshot_task_impls::helpers::broadcast_event; +use hotshot_task_impls::network; +use hotshot_task::task::TaskRegistry; use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, View, ViewInner}, data::Leaf, @@ -60,7 +60,7 @@ use std::{ time::Duration, }; use tasks::add_vid_task; -use tracing::{debug, info, instrument, trace}; +use tracing::{debug, instrument, trace}; // -- Rexports // External @@ -122,7 +122,7 @@ pub struct SystemContextInner> { private_key: ::PrivateKey, /// Configuration items for this hotshot instance - config: HotShotConfig, + pub config: HotShotConfig, /// This `HotShot` instance's storage backend storage: I::Storage, @@ -141,13 +141,16 @@ pub struct SystemContextInner> { // global_registry: GlobalRegistry, /// Access to the output event stream. - output_event_stream: ChannelStream>, + pub output_event_stream: (Sender>, InactiveReceiver>), /// access to the internal event stream, in case we need to, say, shut something down - internal_event_stream: ChannelStream>, + internal_event_stream: ( + Sender>, + InactiveReceiver>, + ), /// uid for instrumentation - id: u64, + pub id: u64, } /// Thread safe, shared view of a `HotShot` @@ -236,6 +239,13 @@ impl> SystemContext { }; let consensus = Arc::new(RwLock::new(consensus)); + let (internal_tx, internal_rx) = broadcast(EVENT_CHANNEL_SIZE); + let (mut external_tx, external_rx) = broadcast(EVENT_CHANNEL_SIZE); + + // This makes it so we won't block on broadcasting if there is not a receiver + // Our own copy of the receiver is inactive so it doesn't count. + external_tx.set_await_active(false); + let inner: Arc> = Arc::new(SystemContextInner { id: nonce, consensus, @@ -246,21 +256,27 @@ impl> SystemContext { networks: Arc::new(networks), memberships: Arc::new(memberships), _metrics: consensus_metrics.clone(), - internal_event_stream: ChannelStream::new(), - output_event_stream: ChannelStream::new(), + internal_event_stream: (internal_tx, internal_rx.deactivate()), + output_event_stream: (external_tx, external_rx.deactivate()), }); Ok(Self { inner }) } /// "Starts" consensus by sending a `QCFormed` event + /// + /// # Panics + /// Panics if sending genesis fails pub async fn start_consensus(&self) { + debug!("Starting Consensus"); self.inner .internal_event_stream - .publish(HotShotEvent::QCFormed(either::Left( + .0 + .broadcast_direct(HotShotEvent::QCFormed(either::Left( QuorumCertificate::genesis(), ))) - .await; + .await + .expect("Genesis Broadcast failed"); } /// Emit an external event @@ -268,7 +284,7 @@ impl> SystemContext { // TODO: remove with https://github.com/EspressoSystems/HotShot/issues/2407 async fn send_external_event(&self, event: Event) { debug!(?event, "send_external_event"); - self.inner.output_event_stream.publish(event).await; + broadcast_event(event, &self.inner.output_event_stream.0).await; } /// Publishes a transaction asynchronously to the network @@ -397,7 +413,8 @@ impl> SystemContext { ) -> Result< ( SystemContextHandle, - ChannelStream>, + Sender>, + Receiver>, ), HotShotError, > { @@ -415,9 +432,9 @@ impl> SystemContext { ) .await?; let handle = hotshot.clone().run_tasks().await; - let internal_event_stream = hotshot.inner.internal_event_stream.clone(); + let (tx, rx) = hotshot.inner.internal_event_stream.clone(); - Ok((handle, internal_event_stream)) + Ok((handle, tx, rx.activate())) } /// return the timeout for a view for `self` #[must_use] @@ -439,8 +456,7 @@ impl> SystemContext { #[allow(clippy::too_many_lines)] pub async fn run_tasks(self) -> SystemContextHandle { // ED Need to set first first number to 1, or properly trigger the change upon start - let task_runner = TaskRunner::new(); - let registry = task_runner.registry.clone(); + let registry = Arc::new(TaskRegistry::default()); let output_event_stream = self.inner.output_event_stream.clone(); let internal_event_stream = self.inner.internal_event_stream.clone(); @@ -452,80 +468,97 @@ impl> SystemContext { let vid_membership = self.inner.memberships.vid_membership.clone(); let view_sync_membership = self.inner.memberships.view_sync_membership.clone(); + let (event_tx, event_rx) = internal_event_stream.clone(); + let handle = SystemContextHandle { - registry, + registry: registry.clone(), output_event_stream: output_event_stream.clone(), internal_event_stream: internal_event_stream.clone(), hotshot: self.clone(), storage: self.inner.storage.clone(), }; - let task_runner = add_network_message_task( - task_runner, - internal_event_stream.clone(), - quorum_network.clone(), - ) - .await; - let task_runner = add_network_message_task( - task_runner, - internal_event_stream.clone(), - da_network.clone(), - ) - .await; + add_network_message_task(registry.clone(), event_tx.clone(), quorum_network.clone()).await; + add_network_message_task(registry.clone(), event_tx.clone(), da_network.clone()).await; - let task_runner = add_network_event_task( - task_runner, - internal_event_stream.clone(), + add_network_event_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), quorum_network.clone(), quorum_membership, - NetworkTaskKind::Quorum, + network::quorum_filter, ) .await; - let task_runner = add_network_event_task( - task_runner, - internal_event_stream.clone(), + add_network_event_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), da_network.clone(), da_membership, - NetworkTaskKind::Committee, + network::committee_filter, ) .await; - let task_runner = add_network_event_task( - task_runner, - internal_event_stream.clone(), + add_network_event_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), quorum_network.clone(), view_sync_membership, - NetworkTaskKind::ViewSync, + network::view_sync_filter, ) .await; - let task_runner = add_network_event_task( - task_runner, - internal_event_stream.clone(), + add_network_event_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), quorum_network.clone(), vid_membership, - NetworkTaskKind::VID, + network::vid_filter, ) .await; - let task_runner = add_consensus_task( - task_runner, - internal_event_stream.clone(), - output_event_stream.clone(), - handle.clone(), + add_consensus_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, + ) + .await; + add_da_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, + ) + .await; + add_vid_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, + ) + .await; + add_transaction_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, + ) + .await; + add_view_sync_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, + ) + .await; + add_upgrade_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, ) .await; - let task_runner = - add_da_task(task_runner, internal_event_stream.clone(), handle.clone()).await; - let task_runner = - add_vid_task(task_runner, internal_event_stream.clone(), handle.clone()).await; - let task_runner = - add_transaction_task(task_runner, internal_event_stream.clone(), handle.clone()).await; - let task_runner = - add_view_sync_task(task_runner, internal_event_stream.clone(), handle.clone()).await; - let task_runner = - add_upgrade_task(task_runner, internal_event_stream.clone(), handle.clone()).await; - async_spawn(async move { - let _ = task_runner.launch().await; - info!("Task runner exited!"); - }); handle } } @@ -563,7 +596,7 @@ impl> ConsensusApi async fn send_event(&self, event: Event) { debug!(?event, "send_event"); - self.inner.output_event_stream.publish(event).await; + broadcast_event(event, &self.inner.output_event_stream.0).await; } fn public_key(&self) -> &TYPES::SignatureKey { diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 05c053f09a..aafa3b1d5b 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -1,30 +1,19 @@ //! Provides a number of tasks that run continuously use crate::{types::SystemContextHandle, HotShotConsensusApi}; -use async_compatibility_layer::art::async_sleep; -use futures::FutureExt; -use hotshot_task::{ - boxed_sync, - event_stream::ChannelStream, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes}, - task_impls::TaskBuilder, - task_launcher::TaskRunner, - GeneratedStream, Merge, -}; +use async_broadcast::{Receiver, Sender}; +use async_compatibility_layer::art::{async_sleep, async_spawn}; + +use hotshot_task::task::{Task, TaskRegistry}; use hotshot_task_impls::{ - consensus::{ - consensus_event_filter, CommitmentAndMetadata, ConsensusTaskState, ConsensusTaskTypes, - }, - da::{DATaskState, DATaskTypes}, + consensus::{CommitmentAndMetadata, ConsensusTaskState}, + da::DATaskState, events::HotShotEvent, - network::{ - NetworkEventTaskState, NetworkEventTaskTypes, NetworkMessageTaskState, - NetworkMessageTaskTypes, NetworkTaskKind, - }, - transactions::{TransactionTaskState, TransactionsTaskTypes}, - upgrade::{UpgradeTaskState, UpgradeTaskTypes}, - vid::{VIDTaskState, VIDTaskTypes}, - view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, + network::{NetworkEventTaskState, NetworkMessageTaskState}, + transactions::TransactionTaskState, + upgrade::UpgradeTaskState, + vid::VIDTaskState, + view_sync::ViewSyncTaskState, }; use hotshot_types::traits::election::Membership; use hotshot_types::{ @@ -56,165 +45,96 @@ pub enum GlobalEvent { } /// Add the network task to handle messages and publish events. -/// # Panics -/// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_message_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, + task_reg: Arc, + event_stream: Sender>, channel: NET, -) -> TaskRunner { +) { let net = channel.clone(); - let broadcast_stream = GeneratedStream::>::new(Arc::new(move || { - let network = net.clone(); - let closure = async move { - loop { - let msgs = match network.recv_msgs(TransmitType::Broadcast).await { - Ok(msgs) => Messages(msgs), - Err(err) => { - error!("failed to receive broadcast messages: {err}"); + let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { + event_stream: event_stream.clone(), + }; - // return zero messages so we sleep and try again - Messages(vec![]) - } - }; + // TODO we don't need two async tasks for this, we should combine the + // by getting rid of `TransmitType` + // https://github.com/EspressoSystems/HotShot/issues/2377 + let network = net.clone(); + let mut state = network_state.clone(); + let broadcast_handle = async_spawn(async move { + loop { + let msgs = match network.recv_msgs(TransmitType::Broadcast).await { + Ok(msgs) => Messages(msgs), + Err(err) => { + error!("failed to receive broadcast messages: {err}"); - if msgs.0.is_empty() { - async_sleep(Duration::from_millis(100)).await; - } else { - break msgs; + // return zero messages so we sleep and try again + Messages(vec![]) } + }; + if msgs.0.is_empty() { + // TODO: Stop sleeping here: https://github.com/EspressoSystems/HotShot/issues/2558 + async_sleep(Duration::from_millis(100)).await; + } else { + state.handle_messages(msgs.0).await; } - }; - Some(boxed_sync(closure)) - })); - let net = channel.clone(); - let direct_stream = GeneratedStream::>::new(Arc::new(move || { - let network = net.clone(); - let closure = async move { - loop { - let msgs = match network.recv_msgs(TransmitType::Direct).await { - Ok(msgs) => Messages(msgs), - Err(err) => { - error!("failed to receive direct messages: {err}"); + } + }); + let network = net.clone(); + let mut state = network_state.clone(); + let direct_handle = async_spawn(async move { + loop { + let msgs = match network.recv_msgs(TransmitType::Direct).await { + Ok(msgs) => Messages(msgs), + Err(err) => { + error!("failed to receive direct messages: {err}"); - // return zero messages so we sleep and try again - Messages(vec![]) - } - }; - if msgs.0.is_empty() { - async_sleep(Duration::from_millis(100)).await; - } else { - break msgs; + // return zero messages so we sleep and try again + Messages(vec![]) } - } - }; - Some(boxed_sync(closure)) - })); - let message_stream = Merge::new(broadcast_stream, direct_stream); - let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { - event_stream: event_stream.clone(), - }; - let registry = task_runner.registry.clone(); - let network_message_handler = HandleMessage(Arc::new( - move |messages: either::Either, Messages>, - mut state: NetworkMessageTaskState| { - let messages = match messages { - either::Either::Left(messages) | either::Either::Right(messages) => messages, }; - async move { - state.handle_messages(messages.0).await; - (None, state) + if msgs.0.is_empty() { + // TODO: Stop sleeping here: https://github.com/EspressoSystems/HotShot/issues/2558 + async_sleep(Duration::from_millis(100)).await; + } else { + state.handle_messages(msgs.0).await; } - .boxed() - }, - )); - let networking_name = "Networking Task"; - - let networking_task_builder = - TaskBuilder::>::new(networking_name.to_string()) - .register_message_stream(message_stream) - .register_registry(&mut registry.clone()) - .await - .register_state(network_state) - .register_message_handler(network_message_handler); - - // impossible for unwraps to fail - // we *just* registered - let networking_task_id = networking_task_builder.get_task_id().unwrap(); - let networking_task = NetworkMessageTaskTypes::build(networking_task_builder).launch(); - - task_runner.add_task( - networking_task_id, - networking_name.to_string(), - networking_task, - ) + } + }); + task_reg.register(direct_handle).await; + task_reg.register(broadcast_handle).await; } /// Add the network task to handle events and send messages. -/// # Panics -/// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_event_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, + task_reg: Arc, + tx: Sender>, + rx: Receiver>, channel: NET, membership: TYPES::Membership, - task_kind: NetworkTaskKind, -) -> TaskRunner { - let filter = NetworkEventTaskState::::filter(task_kind); + filter: fn(&HotShotEvent) -> bool, +) { let network_state: NetworkEventTaskState<_, _> = NetworkEventTaskState { channel, - event_stream: event_stream.clone(), view: TYPES::Time::genesis(), + membership, + filter, }; - let registry = task_runner.registry.clone(); - let network_event_handler = HandleEvent(Arc::new( - move |event, mut state: NetworkEventTaskState<_, _>| { - let mem = membership.clone(); - - async move { - let completion_status = state.handle_event(event, &mem).await; - (completion_status, state) - } - .boxed() - }, - )); - let networking_name = "Networking Task"; - - let networking_task_builder = - TaskBuilder::>::new(networking_name.to_string()) - .register_event_stream(event_stream.clone(), filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(network_state) - .register_event_handler(network_event_handler); - - // impossible for unwraps to fail - // we *just* registered - let networking_task_id = networking_task_builder.get_task_id().unwrap(); - let networking_task = NetworkEventTaskTypes::build(networking_task_builder).launch(); - - task_runner.add_task( - networking_task_id, - networking_name.to_string(), - networking_task, - ) + let task = Task::new(tx, rx, task_reg.clone(), network_state); + task_reg.run_task(task).await; } -/// add the consensus task +/// Create the consensus task state /// # Panics -/// Is unable to panic. This section here is just to satisfy clippy -pub async fn add_consensus_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, - output_stream: ChannelStream>, - handle: SystemContextHandle, -) -> TaskRunner { +/// If genesis payload can't be encoded. This should not be possible +pub async fn create_consensus_state>( + output_stream: Sender>, + handle: &SystemContextHandle, +) -> ConsensusTaskState> { let consensus = handle.hotshot.get_consensus(); let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let registry = task_runner.registry.clone(); + let (payload, metadata) = ::genesis(); // Impossible for `unwrap` to fail on the genesis payload. let payload_commitment = vid_commitment( @@ -228,7 +148,6 @@ pub async fn add_consensus_task>( ); // build the consensus task let consensus_state = ConsensusTaskState { - registry: registry.clone(), consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), @@ -243,7 +162,6 @@ pub async fn add_consensus_task>( timeout_vote_collector: None.into(), timeout_task: None, timeout_cert: None, - event_stream: event_stream.clone(), output_event_stream: output_stream, vid_shares: HashMap::new(), current_proposal: None, @@ -283,58 +201,34 @@ pub async fn add_consensus_task>( .quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForLatestViewSyncCertificate) .await; + consensus_state +} - let filter = FilterEvent(Arc::new(consensus_event_filter)); - let consensus_name = "Consensus Task"; - let consensus_event_handler = HandleEvent(Arc::new( - move |event, mut state: ConsensusTaskState>| { - async move { - if let HotShotEvent::Shutdown = event { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - state.handle_event(event).await; - (None, state) - } - } - .boxed() - }, - )); - let consensus_task_builder = TaskBuilder::< - ConsensusTaskTypes>, - >::new(consensus_name.to_string()) - .register_event_stream(event_stream.clone(), filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(consensus_state) - .register_event_handler(consensus_event_handler); - // impossible for unwrap to fail - // we *just* registered - let consensus_task_id = consensus_task_builder.get_task_id().unwrap(); - let consensus_task = ConsensusTaskTypes::build(consensus_task_builder).launch(); - - task_runner.add_task( - consensus_task_id, - consensus_name.to_string(), - consensus_task, - ) +/// add the consensus task +pub async fn add_consensus_task>( + task_reg: Arc, + tx: Sender>, + rx: Receiver>, + handle: &SystemContextHandle, +) { + let state = + create_consensus_state(handle.hotshot.inner.output_event_stream.0.clone(), handle).await; + let task = Task::new(tx, rx, task_reg.clone(), state); + task_reg.run_task(task).await; } /// add the VID task -/// # Panics -/// Is unable to panic. This section here is just to satisfy clippy pub async fn add_vid_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, - handle: SystemContextHandle, -) -> TaskRunner { + task_reg: Arc, + tx: Sender>, + rx: Receiver>, + handle: &SystemContextHandle, +) { // build the vid task let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let registry = task_runner.registry.clone(); let vid_state = VIDTaskState { - registry: registry.clone(), api: c_api.clone(), consensus: handle.hotshot.get_consensus(), cur_view: TYPES::Time::new(0), @@ -343,38 +237,11 @@ pub async fn add_vid_task>( membership: c_api.inner.memberships.vid_membership.clone().into(), public_key: c_api.public_key().clone(), private_key: c_api.private_key().clone(), - event_stream: event_stream.clone(), id: handle.hotshot.inner.id, }; - let vid_event_handler = HandleEvent(Arc::new( - move |event, mut state: VIDTaskState>| { - async move { - let completion_status = state.handle_event(event).await; - (completion_status, state) - } - .boxed() - }, - )); - let vid_name = "VID Task"; - let vid_event_filter = FilterEvent(Arc::new( - VIDTaskState::>::filter, - )); - let vid_task_builder = - TaskBuilder::>>::new( - vid_name.to_string(), - ) - .register_event_stream(event_stream.clone(), vid_event_filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(vid_state) - .register_event_handler(vid_event_handler); - // impossible for unwrap to fail - // we *just* registered - let vid_task_id = vid_task_builder.get_task_id().unwrap(); - let vid_task = VIDTaskTypes::build(vid_task_builder).launch(); - task_runner.add_task(vid_task_id, vid_name.to_string(), vid_task) + let task = Task::new(tx, rx, task_reg.clone(), vid_state); + task_reg.run_task(task).await; } /// add the Upgrade task. @@ -383,72 +250,41 @@ pub async fn add_vid_task>( /// /// Uses .`unwrap()`, though this should never panic. pub async fn add_upgrade_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, - handle: SystemContextHandle, -) -> TaskRunner { + task_reg: Arc, + tx: Sender>, + rx: Receiver>, + handle: &SystemContextHandle, +) { let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let registry = task_runner.registry.clone(); let upgrade_state = UpgradeTaskState { api: c_api.clone(), - registry: registry.clone(), cur_view: TYPES::Time::new(0), quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), quorum_network: c_api.inner.networks.quorum_network.clone().into(), should_vote: |_upgrade_proposal| false, vote_collector: None.into(), - event_stream: event_stream.clone(), public_key: c_api.public_key().clone(), private_key: c_api.private_key().clone(), id: handle.hotshot.inner.id, }; - let upgrade_event_handler = HandleEvent(Arc::new( - move |event, mut state: UpgradeTaskState>| { - async move { - let completion_status = state.handle_event(event).await; - (completion_status, state) - } - .boxed() - }, - )); - let upgrade_name = "Upgrade Task"; - let upgrade_event_filter = FilterEvent(Arc::new( - UpgradeTaskState::>::filter, - )); - - let upgrade_task_builder = TaskBuilder::< - UpgradeTaskTypes>, - >::new(upgrade_name.to_string()) - .register_event_stream(event_stream.clone(), upgrade_event_filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(upgrade_state) - .register_event_handler(upgrade_event_handler); - // impossible for unwrap to fail - // we *just* registered - let upgrade_task_id = upgrade_task_builder.get_task_id().unwrap(); - let upgrade_task = UpgradeTaskTypes::build(upgrade_task_builder).launch(); - task_runner.add_task(upgrade_task_id, upgrade_name.to_string(), upgrade_task) + let task = Task::new(tx, rx, task_reg.clone(), upgrade_state); + task_reg.run_task(task).await; } /// add the Data Availability task -/// # Panics -/// Is unable to panic. This section here is just to satisfy clippy pub async fn add_da_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, - handle: SystemContextHandle, -) -> TaskRunner { + task_reg: Arc, + tx: Sender>, + rx: Receiver>, + handle: &SystemContextHandle, +) { // build the da task let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let registry = task_runner.registry.clone(); let da_state = DATaskState { - registry: registry.clone(), api: c_api.clone(), consensus: handle.hotshot.get_consensus(), da_membership: c_api.inner.memberships.da_membership.clone().into(), @@ -456,56 +292,27 @@ pub async fn add_da_task>( quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), cur_view: TYPES::Time::new(0), vote_collector: None.into(), - event_stream: event_stream.clone(), public_key: c_api.public_key().clone(), private_key: c_api.private_key().clone(), id: handle.hotshot.inner.id, }; - let da_event_handler = HandleEvent(Arc::new( - move |event, mut state: DATaskState>| { - async move { - let completion_status = state.handle_event(event).await; - (completion_status, state) - } - .boxed() - }, - )); - let da_name = "DA Task"; - let da_event_filter = FilterEvent(Arc::new( - DATaskState::>::filter, - )); - let da_task_builder = TaskBuilder::>>::new( - da_name.to_string(), - ) - .register_event_stream(event_stream.clone(), da_event_filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(da_state) - .register_event_handler(da_event_handler); - // impossible for unwrap to fail - // we *just* registered - let da_task_id = da_task_builder.get_task_id().unwrap(); - let da_task = DATaskTypes::build(da_task_builder).launch(); - task_runner.add_task(da_task_id, da_name.to_string(), da_task) + let task = Task::new(tx, rx, task_reg.clone(), da_state); + task_reg.run_task(task).await; } /// add the Transaction Handling task -/// # Panics -/// Is unable to panic. This section here is just to satisfy clippy pub async fn add_transaction_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, - handle: SystemContextHandle, -) -> TaskRunner { + task_reg: Arc, + tx: Sender>, + rx: Receiver>, + handle: &SystemContextHandle, +) { // build the transactions task let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let registry = task_runner.registry.clone(); let transactions_state = TransactionTaskState { - registry: registry.clone(), api: c_api.clone(), consensus: handle.hotshot.get_consensus(), transactions: Arc::default(), @@ -515,53 +322,24 @@ pub async fn add_transaction_task> membership: c_api.inner.memberships.quorum_membership.clone().into(), public_key: c_api.public_key().clone(), private_key: c_api.private_key().clone(), - event_stream: event_stream.clone(), id: handle.hotshot.inner.id, }; - let transactions_event_handler = HandleEvent(Arc::new( - move |event, mut state: TransactionTaskState>| { - async move { - let completion_status = state.handle_event(event).await; - (completion_status, state) - } - .boxed() - }, - )); - let transactions_name = "Transactions Task"; - let transactions_event_filter = FilterEvent(Arc::new( - TransactionTaskState::>::filter, - )); - let transactions_task_builder = TaskBuilder::< - TransactionsTaskTypes>, - >::new(transactions_name.to_string()) - .register_event_stream(event_stream.clone(), transactions_event_filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(transactions_state) - .register_event_handler(transactions_event_handler); - // impossible for unwrap to fail - // we *just* registered - let da_task_id = transactions_task_builder.get_task_id().unwrap(); - let da_task = TransactionsTaskTypes::build(transactions_task_builder).launch(); - task_runner.add_task(da_task_id, transactions_name.to_string(), da_task) + let task = Task::new(tx, rx, task_reg.clone(), transactions_state); + task_reg.run_task(task).await; } /// add the view sync task -/// # Panics -/// Is unable to panic. This section here is just to satisfy clippy pub async fn add_view_sync_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, - handle: SystemContextHandle, -) -> TaskRunner { + task_reg: Arc, + tx: Sender>, + rx: Receiver>, + handle: &SystemContextHandle, +) { let api = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; // build the view sync task let view_sync_state = ViewSyncTaskState { - registry: task_runner.registry.clone(), - event_stream: event_stream.clone(), current_view: TYPES::Time::new(0), next_view: TYPES::Time::new(0), network: api.inner.networks.quorum_network.clone().into(), @@ -578,42 +356,7 @@ pub async fn add_view_sync_task>( id: handle.hotshot.inner.id, last_garbage_collected_view: TYPES::Time::new(0), }; - let registry = task_runner.registry.clone(); - let view_sync_event_handler = HandleEvent(Arc::new( - move |event, mut state: ViewSyncTaskState>| { - async move { - if let HotShotEvent::Shutdown = event { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - state.handle_event(event).await; - (None, state) - } - } - .boxed() - }, - )); - let view_sync_name = "ViewSync Task"; - let view_sync_event_filter = FilterEvent(Arc::new( - ViewSyncTaskState::>::filter, - )); - - let view_sync_task_builder = TaskBuilder::< - ViewSyncTaskStateTypes>, - >::new(view_sync_name.to_string()) - .register_event_stream(event_stream.clone(), view_sync_event_filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(view_sync_state) - .register_event_handler(view_sync_event_handler); - // impossible for unwrap to fail - // we *just* registered - let view_sync_task_id = view_sync_task_builder.get_task_id().unwrap(); - let view_sync_task = ViewSyncTaskStateTypes::build(view_sync_task_builder).launch(); - task_runner.add_task( - view_sync_task_id, - view_sync_name.to_string(), - view_sync_task, - ) + let task = Task::new(tx, rx, task_reg.clone(), view_sync_state); + task_reg.run_task(task).await; } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 81471dfe1f..9f061e8bd4 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -19,10 +19,10 @@ use async_trait::async_trait; use futures::join; use async_compatibility_layer::channel::UnboundedSendError; -use hotshot_task::{boxed_sync, BoxSyncFuture}; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ + boxed_sync, data::ViewNumber, message::Message, traits::{ @@ -33,6 +33,7 @@ use hotshot_types::{ }, node_implementation::NodeType, }, + BoxSyncFuture, }; use std::{collections::hash_map::DefaultHasher, sync::Arc}; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 19f384d75c..9a8162c6ba 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -13,10 +13,10 @@ use async_trait::async_trait; use bimap::BiHashMap; use bincode::Options; use hotshot_constants::{Version, LOOK_AHEAD, VERSION_0_1}; -use hotshot_task::{boxed_sync, BoxSyncFuture}; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ + boxed_sync, data::ViewNumber, message::{Message, MessageKind}, traits::{ @@ -28,6 +28,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, + BoxSyncFuture, }; use hotshot_utils::{bincode::bincode_opts, version::read_version}; use libp2p_identity::PeerId; diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 3e28a5871e..ae9f1c53dc 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -13,8 +13,8 @@ use async_trait::async_trait; use bincode::Options; use dashmap::DashMap; use futures::StreamExt; -use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ + boxed_sync, message::{Message, MessageKind}, traits::{ election::Membership, @@ -25,6 +25,7 @@ use hotshot_types::{ node_implementation::NodeType, signature_key::SignatureKey, }, + BoxSyncFuture, }; use hotshot_utils::bincode::bincode_opts; use rand::Rng; @@ -300,7 +301,7 @@ impl ConnectedNetwork for Memory message: M, recipients: BTreeSet, ) -> Result<(), NetworkError> { - debug!(?message, "Broadcasting message"); + trace!(?message, "Broadcasting message"); // Bincode the message let vec = bincode_opts() .serialize(&message) @@ -348,7 +349,7 @@ impl ConnectedNetwork for Memory #[instrument(name = "MemoryNetwork::direct_message")] async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError> { - debug!(?message, ?recipient, "Sending direct message"); + // debug!(?message, ?recipient, "Sending direct message"); // Bincode the message let vec = bincode_opts() .serialize(&message) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 604e3f109d..a5dcb90a30 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -13,8 +13,8 @@ use async_lock::RwLock; use async_trait::async_trait; use derive_more::{Deref, DerefMut}; use hotshot_constants::VERSION_0_1; -use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ + boxed_sync, message::{Message, MessagePurpose}, traits::{ network::{ @@ -25,6 +25,7 @@ use hotshot_types::{ node_implementation::NodeType, signature_key::SignatureKey, }, + BoxSyncFuture, }; use hotshot_utils::version::read_version; use hotshot_web_server::{self, config}; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 862fa8d27a..fc322a0159 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -1,20 +1,17 @@ //! Provides an event-streaming handle for a [`SystemContext`] running in the background use crate::{traits::NodeImplementation, types::Event, SystemContext}; -use async_compatibility_layer::channel::UnboundedStream; +use async_broadcast::{InactiveReceiver, Receiver, Sender}; + use async_lock::RwLock; use futures::Stream; -use hotshot_task::{ - boxed_sync, - event_stream::{ChannelStream, EventStream, StreamId}, - global_registry::GlobalRegistry, - task::FilterEvent, - BoxSyncFuture, -}; + use hotshot_task_impls::events::HotShotEvent; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::election::Membership; +use hotshot_task::task::TaskRegistry; +use hotshot_types::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ consensus::Consensus, data::Leaf, error::HotShotError, traits::node_implementation::NodeType, }; @@ -25,13 +22,19 @@ use std::sync::Arc; /// This type provides the means to message and interact with a background [`SystemContext`] instance, /// allowing the ability to receive [`Event`]s from it, send transactions to it, and interact with /// the underlying storage. +#[derive(Clone)] pub struct SystemContextHandle> { - /// The [sender](ChannelStream) for the output stream from the background process - pub(crate) output_event_stream: ChannelStream>, - /// access to the internal ev ent stream, in case we need to, say, shut something down - pub(crate) internal_event_stream: ChannelStream>, + /// The [sender](Sender) and an `InactiveReceiver` to keep the channel open. + /// The Channel will output all the events. Subscribers will get an activated + /// clone of the `Receiver` when they get output stream. + pub(crate) output_event_stream: (Sender>, InactiveReceiver>), + /// access to the internal event stream, in case we need to, say, shut something down + pub(crate) internal_event_stream: ( + Sender>, + InactiveReceiver>, + ), /// registry for controlling tasks - pub(crate) registry: GlobalRegistry, + pub(crate) registry: Arc, /// Internal reference to the underlying [`SystemContext`] pub hotshot: SystemContext, @@ -40,38 +43,18 @@ pub struct SystemContextHandle> { pub(crate) storage: I::Storage, } -impl + 'static> Clone - for SystemContextHandle -{ - fn clone(&self) -> Self { - Self { - registry: self.registry.clone(), - output_event_stream: self.output_event_stream.clone(), - internal_event_stream: self.internal_event_stream.clone(), - hotshot: self.hotshot.clone(), - storage: self.storage.clone(), - } - } -} - impl + 'static> SystemContextHandle { /// obtains a stream to expose to the user - pub async fn get_event_stream( - &mut self, - filter: FilterEvent>, - ) -> (impl Stream>, StreamId) { - self.output_event_stream.subscribe(filter).await + pub fn get_event_stream(&self) -> impl Stream> { + self.output_event_stream.1.activate_cloned() } /// HACK so we can know the types when running tests... /// there are two cleaner solutions: /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper - pub async fn get_event_stream_known_impl( - &mut self, - filter: FilterEvent>, - ) -> (UnboundedStream>, StreamId) { - self.output_event_stream.subscribe(filter).await + pub fn get_event_stream_known_impl(&self) -> Receiver> { + self.output_event_stream.1.activate_cloned() } /// HACK so we can know the types when running tests... @@ -79,11 +62,8 @@ impl + 'static> SystemContextHandl /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper /// NOTE: this is only used for sanity checks in our tests - pub async fn get_internal_event_stream_known_impl( - &mut self, - filter: FilterEvent>, - ) -> (UnboundedStream>, StreamId) { - self.internal_event_stream.subscribe(filter).await + pub fn get_internal_event_stream_known_impl(&self) -> Receiver> { + self.internal_event_stream.1.activate_cloned() } /// Get the last decided validated state of the [`SystemContext`] instance. @@ -164,7 +144,7 @@ impl + 'static> SystemContextHandl { boxed_sync(async move { self.hotshot.inner.networks.shut_down_networks().await; - self.registry.shutdown_all().await; + self.registry.shutdown().await; }) } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index e1a40992b3..b236dc23da 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -15,13 +15,14 @@ async-lock = { workspace = true } tracing = { workspace = true } hotshot-constants = { path = "../constants", default-features = false } hotshot-types = { path = "../types", default-features = false } -hotshot-task = { path = "../task", default-features = false } hotshot-utils = { path = "../utils" } time = { workspace = true } commit = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } sha2 = { workspace = true } +hotshot-task = { path = "../task" } +async-broadcast = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index c1e5d08b6f..ffe19d0a37 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,6 +1,6 @@ use crate::{ - events::HotShotEvent, - helpers::cancel_task, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -10,12 +10,10 @@ use async_std::task::JoinHandle; use commit::Committable; use core::time::Duration; use hotshot_constants::LOOK_AHEAD; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - global_registry::GlobalRegistry, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; +use hotshot_task::task::{Task, TaskState}; + +use async_broadcast::Sender; + use hotshot_types::{ consensus::{Consensus, View}, data::{Leaf, QuorumProposal, VidCommitment, VidDisperse}, @@ -77,8 +75,6 @@ pub struct ConsensusTaskState< pub public_key: TYPES::SignatureKey, /// Our Private Key pub private_key: ::PrivateKey, - /// The global task registry - pub registry: GlobalRegistry, /// Reference to consensus. The replica will require a write lock on this. pub consensus: Arc>>, /// View timeout from config. @@ -124,11 +120,8 @@ pub struct ConsensusTaskState< /// last Timeout Certificate this node formed pub timeout_cert: Option>, - /// Global events stream to publish events - pub event_stream: ChannelStream>, - - /// Event stream to publish events to the application layer - pub output_event_stream: ChannelStream>, + /// Output events to application + pub output_event_stream: async_broadcast::Sender>, /// All the VID shares we've received for current and future views. /// In the future we will need a different struct similar to VidDisperse except @@ -175,7 +168,7 @@ impl, A: ConsensusApi + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] // Check if we are able to vote, like whether the proposal is valid, // whether we have DAC and VID share, and if so, vote. - async fn vote_if_able(&mut self) -> bool { + async fn vote_if_able(&mut self, event_stream: &Sender>) -> bool { if !self.quorum_membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", @@ -240,9 +233,7 @@ impl, A: ConsensusApi + "Sending vote to next quorum leader {:?}", vote.get_view_number() + 1 ); - self.event_stream - .publish(HotShotEvent::QuorumVoteSend(vote)) - .await; + broadcast_event(HotShotEvent::QuorumVoteSend(vote), event_stream).await; if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { if commit_and_metadata.is_genesis { self.payload_commitment_and_metadata = None; @@ -343,9 +334,7 @@ impl, A: ConsensusApi + "Sending vote to next quorum leader {:?}", vote.get_view_number() + 1 ); - self.event_stream - .publish(HotShotEvent::QuorumVoteSend(vote)) - .await; + broadcast_event(HotShotEvent::QuorumVoteSend(vote), event_stream).await; return true; } } @@ -365,7 +354,11 @@ impl, A: ConsensusApi + /// Must only update the view and GC if the view actually changes #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus update view", level = "error")] - async fn update_view(&mut self, new_view: TYPES::Time) -> bool { + async fn update_view( + &mut self, + new_view: TYPES::Time, + event_stream: &Sender>, + ) -> bool { if *self.cur_view < *new_view { debug!( "Updating view from {} to {} in consensus task", @@ -411,22 +404,22 @@ impl, A: ConsensusApi + .await; } - self.event_stream - .publish(HotShotEvent::ViewChange(new_view)) - .await; + broadcast_event(HotShotEvent::ViewChange(new_view), event_stream).await; // Spawn a timeout task if we did actually update view let timeout = self.timeout; self.timeout_task = Some(async_spawn({ - let stream = self.event_stream.clone(); + let stream = event_stream.clone(); // Nuance: We timeout on the view + 1 here because that means that we have // not seen evidence to transition to this new view let view_number = self.cur_view + 1; async move { async_sleep(Duration::from_millis(timeout)).await; - stream - .publish(HotShotEvent::Timeout(TYPES::Time::new(*view_number))) - .await; + broadcast_event( + HotShotEvent::Timeout(TYPES::Time::new(*view_number)), + &stream, + ) + .await; } })); let consensus = self.consensus.read().await; @@ -446,7 +439,11 @@ impl, A: ConsensusApi + /// Handles a consensus event received on the event stream #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] - pub async fn handle_event(&mut self, event: HotShotEvent) { + pub async fn handle( + &mut self, + event: HotShotEvent, + event_stream: Sender>, + ) { match event { HotShotEvent::QuorumProposalRecv(proposal, sender) => { debug!( @@ -503,7 +500,7 @@ impl, A: ConsensusApi + } // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here - self.update_view(view).await; + self.update_view(view, &event_stream).await; let consensus = self.consensus.upgradable_read().await; @@ -513,16 +510,18 @@ impl, A: ConsensusApi + let leaf = self.genesis_leaf().await; match leaf { Some(ref leaf) => { - self.output_event_stream - .publish(Event { + broadcast_event( + Event { view_number: TYPES::Time::genesis(), event: EventType::Decide { leaf_chain: Arc::new(vec![leaf.clone()]), qc: Arc::new(justify_qc.clone()), block_size: None, }, - }) - .await; + }, + &self.output_event_stream, + ) + .await; } None => { error!( @@ -598,10 +597,10 @@ impl, A: ConsensusApi + "Attempting to publish proposal after voting; now in view: {}", *new_view ); - self.publish_proposal_if_able(qc.view_number + 1, None) + self.publish_proposal_if_able(qc.view_number + 1, None, &event_stream) .await; } - if self.vote_if_able().await { + if self.vote_if_able(&event_stream).await { self.current_proposal = None; } } @@ -757,12 +756,14 @@ impl, A: ConsensusApi + }, ) { error!("view publish error {e}"); - self.output_event_stream - .publish(Event { + broadcast_event( + Event { view_number: view, event: EventType::Error { error: e.into() }, - }) - .await; + }, + &self.output_event_stream, + ) + .await; } } @@ -787,17 +788,17 @@ impl, A: ConsensusApi + } #[allow(clippy::cast_precision_loss)] if new_decide_reached { - self.event_stream - .publish(HotShotEvent::LeafDecided(leaf_views.clone())) - .await; - let decide_sent = self.output_event_stream.publish(Event { - view_number: consensus.last_decided_view, - event: EventType::Decide { - leaf_chain: Arc::new(leaf_views), - qc: Arc::new(new_decide_qc.unwrap()), - block_size: Some(included_txns_set.len().try_into().unwrap()), + let decide_sent = broadcast_event( + Event { + view_number: consensus.last_decided_view, + event: EventType::Decide { + leaf_chain: Arc::new(leaf_views), + qc: Arc::new(new_decide_qc.unwrap()), + block_size: Some(included_txns_set.len().try_into().unwrap()), + }, }, - }); + &self.output_event_stream, + ); let old_anchor_view = consensus.last_decided_view; consensus .collect_garbage(old_anchor_view, new_anchor_view) @@ -823,6 +824,7 @@ impl, A: ConsensusApi + debug!("Sending Decide for view {:?}", consensus.last_decided_view); debug!("Decided txns len {:?}", included_txns_set.len()); decide_sent.await; + debug!("decide send succeeded"); } let new_view = self.current_proposal.clone().unwrap().view_number + 1; @@ -840,11 +842,11 @@ impl, A: ConsensusApi + "Attempting to publish proposal after voting; now in view: {}", *new_view ); - self.publish_proposal_if_able(qc.view_number + 1, None) + self.publish_proposal_if_able(qc.view_number + 1, None, &event_stream) .await; } - if !self.vote_if_able().await { + if !self.vote_if_able(&event_stream).await { return; } self.current_proposal = None; @@ -867,34 +869,33 @@ impl, A: ConsensusApi + } let mut collector = self.vote_collector.write().await; - let maybe_task = collector.take(); - - if maybe_task.is_none() - || vote.get_view_number() > maybe_task.as_ref().unwrap().view + if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view { debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: self.quorum_membership.clone(), view: vote.get_view_number(), - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; *collector = create_vote_accumulator::< TYPES, QuorumVote, QuorumCertificate, - >(&info, vote.clone(), event) + >(&info, vote.clone(), event, &event_stream) .await; } else { - let result = maybe_task.unwrap().handle_event(event.clone()).await; + let result = collector + .as_mut() + .unwrap() + .handle_event(event.clone(), &event_stream) + .await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { + *collector = None; // The protocol has finished return; } - *collector = Some(result.1); } } HotShotEvent::TimeoutVoteRecv(ref vote) => { @@ -913,34 +914,34 @@ impl, A: ConsensusApi + return; } let mut collector = self.timeout_vote_collector.write().await; - let maybe_task = collector.take(); - if maybe_task.is_none() - || vote.get_view_number() > maybe_task.as_ref().unwrap().view + if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view { debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: self.quorum_membership.clone(), view: vote.get_view_number(), - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; *collector = create_vote_accumulator::< TYPES, TimeoutVote, TimeoutCertificate, - >(&info, vote.clone(), event) + >(&info, vote.clone(), event, &event_stream) .await; } else { - let result = maybe_task.unwrap().handle_event(event.clone()).await; + let result = collector + .as_mut() + .unwrap() + .handle_event(event.clone(), &event_stream) + .await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { + *collector = None; // The protocol has finished return; } - *collector = Some(result.1); } } HotShotEvent::QCFormed(cert) => { @@ -962,7 +963,10 @@ impl, A: ConsensusApi + let view = qc.view_number + 1; - if self.publish_proposal_if_able(view, Some(qc.clone())).await { + if self + .publish_proposal_if_able(view, Some(qc.clone()), &event_stream) + .await + { } else { warn!("Wasn't able to publish proposal"); } @@ -985,7 +989,7 @@ impl, A: ConsensusApi + ); if !self - .publish_proposal_if_able(qc.view_number + 1, None) + .publish_proposal_if_able(qc.view_number + 1, None, &event_stream) .await { debug!( @@ -1010,9 +1014,9 @@ impl, A: ConsensusApi + .write() .await .saved_da_certs - .insert(view, cert); + .insert(view, cert.clone()); - if self.vote_if_able().await { + if self.vote_if_able(&event_stream).await { self.current_proposal = None; } } @@ -1077,19 +1081,21 @@ impl, A: ConsensusApi + // update the view in state to the one in the message // Publish a view change event to the application - if !self.update_view(new_view).await { + if !self.update_view(new_view, &event_stream).await { debug!("view not updated"); return; } - self.output_event_stream - .publish(Event { + broadcast_event( + Event { view_number: old_view_number, event: EventType::ViewFinished { view_number: old_view_number, }, - }) - .await; + }, + &self.output_event_stream, + ) + .await; } HotShotEvent::Timeout(view) => { // NOTE: We may optionally have the timeout task listen for view change events @@ -1124,19 +1130,20 @@ impl, A: ConsensusApi + return; }; - self.event_stream - .publish(HotShotEvent::TimeoutVoteSend(vote)) - .await; + broadcast_event(HotShotEvent::TimeoutVoteSend(vote), &event_stream).await; debug!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view ); - self.output_event_stream - .publish(Event { + + broadcast_event( + Event { view_number: view, event: EventType::ReplicaViewTimeout { view_number: view }, - }) - .await; + }, + &self.output_event_stream, + ) + .await; let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } @@ -1150,14 +1157,19 @@ impl, A: ConsensusApi + if self.quorum_membership.get_leader(view) == self.public_key && self.consensus.read().await.high_qc.get_view_number() + 1 == view { - self.publish_proposal_if_able(view, None).await; + self.publish_proposal_if_able(view, None, &event_stream) + .await; } if let Some(tc) = &self.timeout_cert { if self.quorum_membership.get_leader(tc.get_view_number() + 1) == self.public_key { - self.publish_proposal_if_able(view, self.timeout_cert.clone()) - .await; + self.publish_proposal_if_able( + view, + self.timeout_cert.clone(), + &event_stream, + ) + .await; } } } @@ -1171,6 +1183,7 @@ impl, A: ConsensusApi + &mut self, view: TYPES::Time, timeout_certificate: Option>, + event_stream: &Sender>, ) -> bool { if self.quorum_membership.get_leader(view) != self.public_key { // This is expected for view 1, so skipping the logging. @@ -1280,12 +1293,11 @@ impl, A: ConsensusApi + leaf.view_number, "" ); - self.event_stream - .publish(HotShotEvent::QuorumProposalSend( - message.clone(), - self.public_key.clone(), - )) - .await; + broadcast_event( + HotShotEvent::QuorumProposalSend(message.clone(), self.public_key.clone()), + event_stream, + ) + .await; self.payload_commitment_and_metadata = None; return true; @@ -1295,52 +1307,36 @@ impl, A: ConsensusApi + } } -impl, A: ConsensusApi> TS +impl, A: ConsensusApi + 'static> TaskState for ConsensusTaskState { -} - -/// Type alias for Consensus task -pub type ConsensusTaskTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - ConsensusTaskState, ->; - -/// Event handle for consensus -pub async fn sequencing_consensus_handle< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi + 'static, ->( - event: HotShotEvent, - mut state: ConsensusTaskState, -) -> ( - std::option::Option, - ConsensusTaskState, -) { - if let HotShotEvent::Shutdown = event { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - state.handle_event(event).await; - (None, state) + type Event = HotShotEvent; + type Output = (); + fn filter(&self, event: &HotShotEvent) -> bool { + !matches!( + event, + HotShotEvent::QuorumProposalRecv(_, _) + | HotShotEvent::QuorumVoteRecv(_) + | HotShotEvent::QCFormed(_) + | HotShotEvent::DACRecv(_) + | HotShotEvent::ViewChange(_) + | HotShotEvent::SendPayloadCommitmentAndMetadata(..) + | HotShotEvent::Timeout(_) + | HotShotEvent::TimeoutVoteRecv(_) + | HotShotEvent::VidDisperseRecv(..) + | HotShotEvent::Shutdown, + ) + } + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> + where + Self: Sized, + { + let sender = task.clone_sender(); + tracing::trace!("sender queue len {}", sender.len()); + task.state_mut().handle(event, sender).await; + None + } + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) } -} - -/// Filter for consensus, returns true for event types the consensus task subscribes to. -pub fn consensus_event_filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::QuorumProposalRecv(_, _) - | HotShotEvent::QuorumVoteRecv(_) - | HotShotEvent::QCFormed(_) - | HotShotEvent::DACRecv(_) - | HotShotEvent::ViewChange(_) - | HotShotEvent::SendPayloadCommitmentAndMetadata(..) - | HotShotEvent::Timeout(_) - | HotShotEvent::TimeoutVoteRecv(_) - | HotShotEvent::VidDisperseRecv(..) - | HotShotEvent::Shutdown, - ) } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 9f4ec122c6..577afbaca6 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -1,15 +1,12 @@ use crate::{ - events::HotShotEvent, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; +use async_broadcast::Sender; use async_lock::RwLock; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - global_registry::GlobalRegistry, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{Consensus, View}, data::DAProposal, @@ -50,8 +47,6 @@ pub struct DATaskState< > { /// The state's api pub api: A, - /// Global registry task for the state - pub registry: GlobalRegistry, /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -73,9 +68,6 @@ pub struct DATaskState< /// The current vote collection task, if there is one. pub vote_collector: RwLock, DACertificate>>, - /// Global events stream to publish events - pub event_stream: ChannelStream>, - /// This Nodes public key pub public_key: TYPES::SignatureKey, @@ -91,9 +83,10 @@ impl, A: ConsensusApi + { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] - pub async fn handle_event( + pub async fn handle( &mut self, event: HotShotEvent, + event_stream: Sender>, ) -> Option { match event { HotShotEvent::DAProposalRecv(proposal, sender) => { @@ -176,9 +169,8 @@ impl, A: ConsensusApi + // self.cur_view = view; debug!("Sending vote to the DA leader {:?}", vote.get_view_number()); - self.event_stream - .publish(HotShotEvent::DAVoteSend(vote)) - .await; + + broadcast_event(HotShotEvent::DAVoteSend(vote), &event_stream).await; let mut consensus = self.consensus.write().await; // Ensure this view is in the view map for garbage collection, but do not overwrite if @@ -203,34 +195,33 @@ impl, A: ConsensusApi + } let mut collector = self.vote_collector.write().await; - let maybe_task = collector.take(); - - if maybe_task.is_none() - || vote.get_view_number() > maybe_task.as_ref().unwrap().view + if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view { debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: self.da_membership.clone(), view: vote.get_view_number(), - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; *collector = create_vote_accumulator::< TYPES, DAVote, DACertificate, - >(&info, vote.clone(), event) + >(&info, vote.clone(), event, &event_stream) .await; } else { - let result = maybe_task.unwrap().handle_event(event.clone()).await; + let result = collector + .as_mut() + .unwrap() + .handle_event(event.clone(), &event_stream) + .await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { + *collector = None; // The protocol has finished return None; } - *collector = Some(result.1); } } HotShotEvent::ViewChange(view) => { @@ -310,12 +301,11 @@ impl, A: ConsensusApi + _pd: PhantomData, }; - self.event_stream - .publish(HotShotEvent::DAProposalSend( - message.clone(), - self.public_key.clone(), - )) - .await; + broadcast_event( + HotShotEvent::DAProposalSend(message.clone(), self.public_key.clone()), + &event_stream, + ) + .await; } HotShotEvent::Timeout(view) => { @@ -326,7 +316,7 @@ impl, A: ConsensusApi + HotShotEvent::Shutdown => { error!("Shutting down because of shutdown signal!"); - return Some(HotShotTaskCompleted::ShutDown); + return Some(HotShotTaskCompleted); } _ => { error!("unexpected event {:?}", event); @@ -334,10 +324,18 @@ impl, A: ConsensusApi + } None } +} - /// Filter the DA event. - pub fn filter(event: &HotShotEvent) -> bool { - matches!( +/// task state implementation for DA Task +impl, A: ConsensusApi + 'static> TaskState + for DATaskState +{ + type Event = HotShotEvent; + + type Output = HotShotTaskCompleted; + + fn filter(&self, event: &HotShotEvent) -> bool { + !matches!( event, HotShotEvent::DAProposalRecv(_, _) | HotShotEvent::DAVoteRecv(_) @@ -347,18 +345,16 @@ impl, A: ConsensusApi + | HotShotEvent::ViewChange(_) ) } -} -/// task state implementation for DA Task -impl, A: ConsensusApi + 'static> TS - for DATaskState -{ -} + async fn handle_event( + event: Self::Event, + task: &mut Task, + ) -> Option { + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await + } -/// Type alias for DA Task Types -pub type DATaskTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - DATaskState, ->; + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index d1d94c8438..1002c5d5e8 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -15,6 +15,10 @@ use hotshot_types::{ traits::{node_implementation::NodeType, BlockPayload}, }; +/// Marker that the task completed +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub struct HotShotTaskCompleted; + /// All of the possible events that can be passed between Sequecning `HotShot` tasks #[derive(Eq, Hash, PartialEq, Debug, Clone)] pub enum HotShotEvent { diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 2ee224b7cd..509a664751 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -1,36 +1,35 @@ -use crate::events::HotShotEvent; -use async_compatibility_layer::art::async_spawn; +use crate::events::{HotShotEvent, HotShotTaskCompleted}; +use async_broadcast::broadcast; -use futures::FutureExt; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEvent, TaskBuilder}, - task_launcher::TaskRunner, -}; +use async_compatibility_layer::art::async_timeout; +use hotshot_task::task::{Task, TaskRegistry, TaskState}; use hotshot_types::traits::node_implementation::NodeType; -use snafu::Snafu; -use std::{collections::HashMap, future::Future, sync::Arc}; +use std::{collections::HashMap, sync::Arc, time::Duration}; /// The state for the test harness task. Keeps track of which events and how many we expect to get pub struct TestHarnessState { /// The expected events we get from the test. Maps an event to the number of times we expect to see it expected_output: HashMap, usize>, + /// If true we won't fail the test if extra events come in + allow_extra_output: bool, } -impl TS for TestHarnessState {} +impl TaskState for TestHarnessState { + type Event = HotShotEvent; + type Output = HotShotTaskCompleted; -/// Error emitted if the test harness task fails -#[derive(Snafu, Debug)] -pub struct TestHarnessTaskError {} + async fn handle_event( + event: Self::Event, + task: &mut Task, + ) -> Option { + let extra = task.state_mut().allow_extra_output; + handle_event(event, task, extra) + } -/// Type alias for the Test Harness Task -pub type TestHarnessTaskTypes = HSTWithEvent< - TestHarnessTaskError, - HotShotEvent, - ChannelStream>, - TestHarnessState, ->; + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} /// Runs a test by building the task using `build_fn` and then passing it the `input` events /// and testing the make sure all of the `expected_output` events are seen @@ -43,46 +42,47 @@ pub type TestHarnessTaskTypes = HSTWithEvent< /// # Panics /// Panics if any state the test expects is not set. Panicing causes a test failure #[allow(clippy::implicit_hasher)] -pub async fn run_harness( +#[allow(clippy::panic)] +pub async fn run_harness>>( input: Vec>, expected_output: HashMap, usize>, - event_stream: Option>>, - build_fn: impl FnOnce(TaskRunner, ChannelStream>) -> Fut, + state: S, allow_extra_output: bool, ) where TYPES: NodeType, - Fut: Future, + S: Send + 'static, { - let task_runner = TaskRunner::new(); - let registry = task_runner.registry.clone(); - let event_stream = event_stream.unwrap_or_default(); - let state = TestHarnessState { expected_output }; - let handler = HandleEvent(Arc::new(move |event, state| { - async move { handle_event(event, state, allow_extra_output) }.boxed() - })); - let filter = FilterEvent::default(); - let builder = TaskBuilder::>::new("test_harness".to_string()) - .register_event_stream(event_stream.clone(), filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(state) - .register_event_handler(handler); - - let id = builder.get_task_id().unwrap(); - - let task = TestHarnessTaskTypes::build(builder).launch(); - - let task_runner = task_runner.add_task(id, "test_harness".to_string(), task); - let task_runner = build_fn(task_runner, event_stream.clone()).await; - - let runner = async_spawn(async move { task_runner.launch().await }); + let registry = Arc::new(TaskRegistry::default()); + let mut tasks = vec![]; + // set up two broadcast channels so the test sends to the task and the task back to the test + let (to_task, from_test) = broadcast(1024); + let (to_test, from_task) = broadcast(1024); + let test_state = TestHarnessState { + expected_output, + allow_extra_output, + }; + + let test_task = Task::new( + to_test.clone(), + from_task.clone(), + registry.clone(), + test_state, + ); + let task = Task::new(to_test.clone(), from_test.clone(), registry.clone(), state); + + tasks.push(test_task.run()); + tasks.push(task.run()); for event in input { - let () = event_stream.publish(event).await; + to_task.broadcast_direct(event).await.unwrap(); } - let _ = runner.await; + if async_timeout(Duration::from_secs(2), futures::future::join_all(tasks)) + .await + .is_err() + { + panic!("Test timeout out before all all expected outputs received"); + } } /// Handles an event for the Test Harness Task. If the event is expected, remove it from @@ -97,12 +97,10 @@ pub async fn run_harness( #[allow(clippy::needless_pass_by_value)] pub fn handle_event( event: HotShotEvent, - mut state: TestHarnessState, + task: &mut Task>, allow_extra_output: bool, -) -> ( - std::option::Option, - TestHarnessState, -) { +) -> Option { + let state = task.state_mut(); // Check the output in either case: // * We allow outputs only in our expected output set. // * We haven't received all expected outputs yet. @@ -121,8 +119,9 @@ pub fn handle_event( } if state.expected_output.is_empty() { - return (Some(HotShotTaskCompleted::ShutDown), state); + tracing::error!("test harness task completed"); + return Some(HotShotTaskCompleted); } - (None, state) + None } diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index c50f776500..93376f7086 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -1,3 +1,4 @@ +use async_broadcast::{SendError, Sender}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; #[cfg(async_executor_impl = "tokio")] @@ -10,3 +11,22 @@ pub async fn cancel_task(task: JoinHandle) { #[cfg(async_executor_impl = "tokio")] task.abort(); } + +/// Helper function to send events and log errors +pub async fn broadcast_event(event: E, sender: &Sender) { + match sender.broadcast_direct(event).await { + Ok(None) => (), + Ok(Some(overflowed)) => { + tracing::error!( + "Event sender queue overflow, Oldest event removed form queue: {:?}", + overflowed + ); + } + Err(SendError(e)) => { + tracing::warn!( + "Event: {:?}\n Sending failed, event stream probably shutdown", + e + ); + } + } +} diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 8299bf1edc..1be4020af2 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -32,4 +32,4 @@ pub mod vote; pub mod upgrade; /// Helper functions used by any task -mod helpers; +pub mod helpers; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 79bedc2260..92dd284aa2 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -1,16 +1,15 @@ -use crate::events::HotShotEvent; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; +use async_broadcast::Sender; use either::Either::{self, Left, Right}; use hotshot_constants::VERSION_0_1; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - task::{FilterEvent, HotShotTaskCompleted, TS}, - task_impls::{HSTWithEvent, HSTWithMessage}, - GeneratedStream, Merge, -}; + +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ message::{ - CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, Messages, - SequencingMessage, + CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, SequencingMessage, }, traits::{ election::Membership, @@ -19,31 +18,82 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; -use snafu::Snafu; -use std::sync::Arc; use tracing::error; use tracing::instrument; -/// the type of network task -#[derive(Clone, Copy, Debug)] -pub enum NetworkTaskKind { - /// quorum: the normal "everyone" committee - Quorum, - /// da committee - Committee, - /// view sync - ViewSync, - /// vid - VID, +/// quorum filter +pub fn quorum_filter(event: &HotShotEvent) -> bool { + !matches!( + event, + HotShotEvent::QuorumProposalSend(_, _) + | HotShotEvent::QuorumVoteSend(_) + | HotShotEvent::Shutdown + | HotShotEvent::DACSend(_, _) + | HotShotEvent::ViewChange(_) + | HotShotEvent::TimeoutVoteSend(_) + ) +} + +/// committee filter +pub fn committee_filter(event: &HotShotEvent) -> bool { + !matches!( + event, + HotShotEvent::DAProposalSend(_, _) + | HotShotEvent::DAVoteSend(_) + | HotShotEvent::Shutdown + | HotShotEvent::ViewChange(_) + ) } +/// vid filter +pub fn vid_filter(event: &HotShotEvent) -> bool { + !matches!( + event, + HotShotEvent::Shutdown | HotShotEvent::VidDisperseSend(_, _) | HotShotEvent::ViewChange(_) + ) +} + +/// view sync filter +pub fn view_sync_filter(event: &HotShotEvent) -> bool { + !matches!( + event, + HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) + | HotShotEvent::ViewSyncPreCommitVoteSend(_) + | HotShotEvent::ViewSyncCommitVoteSend(_) + | HotShotEvent::ViewSyncFinalizeVoteSend(_) + | HotShotEvent::Shutdown + | HotShotEvent::ViewChange(_) + ) +} /// the network message task state +#[derive(Clone)] pub struct NetworkMessageTaskState { - /// event stream (used for publishing) - pub event_stream: ChannelStream>, + /// Sender to send internal events this task generates to other tasks + pub event_stream: Sender>, } -impl TS for NetworkMessageTaskState {} +impl TaskState for NetworkMessageTaskState { + type Event = Vec>; + type Output = (); + + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> + where + Self: Sized, + { + task.state_mut().handle_messages(event).await; + None + } + + fn filter(&self, _event: &Self::Event) -> bool { + false + } + + fn should_shutdown(_event: &Self::Event) -> bool { + false + } +} impl NetworkMessageTaskState { /// Handle the message. @@ -111,7 +161,7 @@ impl NetworkMessageTaskState { // TODO (Keyao benchmarking) Update these event variants (similar to the // `TransactionsRecv` event) so we can send one event for a vector of messages. // - self.event_stream.publish(event).await; + broadcast_event(event, &self.event_stream).await; } MessageKind::Data(message) => match message { hotshot_types::message::DataMessage::SubmitTransaction(transaction, _) => { @@ -121,9 +171,11 @@ impl NetworkMessageTaskState { }; } if !transactions.is_empty() { - self.event_stream - .publish(HotShotEvent::TransactionsRecv(transactions)) - .await; + broadcast_event( + HotShotEvent::TransactionsRecv(transactions), + &self.event_stream, + ) + .await; } } } @@ -132,16 +184,41 @@ impl NetworkMessageTaskState { pub struct NetworkEventTaskState> { /// comm channel pub channel: COMMCHANNEL, - /// event stream - pub event_stream: ChannelStream>, /// view number pub view: TYPES::Time, + /// membership for the channel + pub membership: TYPES::Membership, // TODO ED Need to add exchange so we can get the recipient key and our own key? + /// Filter which returns false for the events that this specific network task cares about + pub filter: fn(&HotShotEvent) -> bool, } -impl> TS +impl> TaskState for NetworkEventTaskState { + type Event = HotShotEvent; + + type Output = HotShotTaskCompleted; + + async fn handle_event( + event: Self::Event, + task: &mut Task, + ) -> Option { + let membership = task.state_mut().membership.clone(); + task.state_mut().handle_event(event, &membership).await + } + + fn should_shutdown(event: &Self::Event) -> bool { + if matches!(event, HotShotEvent::Shutdown) { + error!("Network Task received Shutdown event"); + return true; + } + false + } + + fn filter(&self, event: &Self::Event) -> bool { + (self.filter)(event) + } } impl> @@ -275,7 +352,7 @@ impl> } HotShotEvent::Shutdown => { error!("Networking task shutting down"); - return Some(HotShotTaskCompleted::ShutDown); + return Some(HotShotTaskCompleted); } event => { error!("Receieved unexpected message in network task {:?}", event); @@ -303,84 +380,4 @@ impl> None } - - /// network filter - pub fn filter(task_kind: NetworkTaskKind) -> FilterEvent> { - match task_kind { - NetworkTaskKind::Quorum => FilterEvent(Arc::new(Self::quorum_filter)), - NetworkTaskKind::Committee => FilterEvent(Arc::new(Self::committee_filter)), - NetworkTaskKind::ViewSync => FilterEvent(Arc::new(Self::view_sync_filter)), - NetworkTaskKind::VID => FilterEvent(Arc::new(Self::vid_filter)), - } - } - - /// quorum filter - fn quorum_filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::QuorumProposalSend(_, _) - | HotShotEvent::QuorumVoteSend(_) - | HotShotEvent::Shutdown - | HotShotEvent::DACSend(_, _) - | HotShotEvent::ViewChange(_) - | HotShotEvent::TimeoutVoteSend(_) - ) - } - - /// committee filter - fn committee_filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::DAProposalSend(_, _) - | HotShotEvent::DAVoteSend(_) - | HotShotEvent::Shutdown - | HotShotEvent::ViewChange(_) - ) - } - - /// vid filter - fn vid_filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::Shutdown - | HotShotEvent::VidDisperseSend(_, _) - | HotShotEvent::ViewChange(_) - ) - } - - /// view sync filter - fn view_sync_filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) - | HotShotEvent::ViewSyncPreCommitVoteSend(_) - | HotShotEvent::ViewSyncCommitVoteSend(_) - | HotShotEvent::ViewSyncFinalizeVoteSend(_) - | HotShotEvent::Shutdown - | HotShotEvent::ViewChange(_) - ) - } } - -/// network error (no errors right now, only stub) -#[derive(Snafu, Debug)] -pub struct NetworkTaskError {} - -/// networking message task types -pub type NetworkMessageTaskTypes = HSTWithMessage< - NetworkTaskError, - Either, Messages>, - // A combination of broadcast and direct streams. - Merge>, GeneratedStream>>, - NetworkMessageTaskState, ->; - -/// network event task types -pub type NetworkEventTaskTypes = HSTWithEvent< - NetworkTaskError, - HotShotEvent, - ChannelStream>, - NetworkEventTaskState, ->; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 32b9c0c492..d29142a5a2 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -1,4 +1,8 @@ -use crate::events::HotShotEvent; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; +use async_broadcast::Sender; use async_compatibility_layer::{ art::async_timeout, async_primitives::subscribable_rwlock::{ReadView, SubscribableRwLock}, @@ -6,12 +10,8 @@ use async_compatibility_layer::{ use async_lock::RwLock; use bincode::config::Options; use commit::{Commitment, Committable}; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - global_registry::GlobalRegistry, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; + +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, data::Leaf, @@ -49,8 +49,6 @@ pub struct TransactionTaskState< > { /// The state's api pub api: A, - /// Global registry task for the state - pub registry: GlobalRegistry, /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -70,9 +68,6 @@ pub struct TransactionTaskState< /// Membership for teh quorum pub membership: Arc, - /// Global events stream to publish events - pub event_stream: ChannelStream>, - /// This Nodes Public Key pub public_key: TYPES::SignatureKey, /// Our Private Key @@ -87,9 +82,10 @@ impl, A: ConsensusApi + /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] - pub async fn handle_event( + pub async fn handle( &mut self, event: HotShotEvent, + event_stream: Sender>, ) -> Option { match event { HotShotEvent::TransactionsRecv(transactions) => { @@ -183,6 +179,7 @@ impl, A: ConsensusApi + return None; } HotShotEvent::ViewChange(view) => { + debug!("view change in transactions to view {:?}", view); if *self.cur_view >= *view { return None; } @@ -196,6 +193,7 @@ impl, A: ConsensusApi + // return if we aren't the next leader or we skipped last view and aren't the current leader. if !make_block && self.membership.get_leader(self.cur_view + 1) != self.public_key { + debug!("Not next leader for view {:?}", self.cur_view); return None; } @@ -251,18 +249,16 @@ impl, A: ConsensusApi + // send the sequenced transactions to VID and DA tasks let block_view = if make_block { view } else { view + 1 }; - self.event_stream - .publish(HotShotEvent::TransactionsSequenced( - encoded_transactions, - metadata, - block_view, - )) - .await; + broadcast_event( + HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, block_view), + &event_stream, + ) + .await; return None; } HotShotEvent::Shutdown => { - return Some(HotShotTaskCompleted::ShutDown); + return Some(HotShotTaskCompleted); } _ => {} } @@ -333,10 +329,18 @@ impl, A: ConsensusApi + // .collect(); Some(txns) } +} + +/// task state implementation for Transactions Task +impl, A: ConsensusApi + 'static> TaskState + for TransactionTaskState +{ + type Event = HotShotEvent; - /// Event filter for the transaction task - pub fn filter(event: &HotShotEvent) -> bool { - matches!( + type Output = HotShotTaskCompleted; + + fn filter(&self, event: &HotShotEvent) -> bool { + !matches!( event, HotShotEvent::TransactionsRecv(_) | HotShotEvent::LeafDecided(_) @@ -344,18 +348,16 @@ impl, A: ConsensusApi + | HotShotEvent::ViewChange(_) ) } -} -/// task state implementation for Transactions Task -impl, A: ConsensusApi + 'static> TS - for TransactionTaskState -{ -} + async fn handle_event( + event: Self::Event, + task: &mut Task, + ) -> Option { + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await + } -/// Type alias for DA Task Types -pub type TransactionsTaskTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - TransactionTaskState, ->; + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index ec0dea8231..a77b6046fb 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -1,15 +1,12 @@ use crate::{ - events::HotShotEvent, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; +use async_broadcast::Sender; use async_lock::RwLock; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - global_registry::GlobalRegistry, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; +use hotshot_task::task::TaskState; use hotshot_types::{ event::{Event, EventType}, simple_certificate::UpgradeCertificate, @@ -43,9 +40,6 @@ pub struct UpgradeTaskState< > { /// The state's api pub api: A, - /// Global registry task for the state - pub registry: GlobalRegistry, - /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -61,9 +55,6 @@ pub struct UpgradeTaskState< pub vote_collector: RwLock, UpgradeCertificate>>, - /// Global events stream to publish events - pub event_stream: ChannelStream>, - /// This Nodes public key pub public_key: TYPES::SignatureKey, @@ -79,9 +70,10 @@ impl, A: ConsensusApi + { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Upgrade Task", level = "error")] - pub async fn handle_event( + pub async fn handle( &mut self, event: HotShotEvent, + tx: Sender>, ) -> Option { match event { HotShotEvent::UpgradeProposalRecv(proposal, sender) => { @@ -149,9 +141,7 @@ impl, A: ConsensusApi + return None; }; debug!("Sending upgrade vote {:?}", vote.get_view_number()); - self.event_stream - .publish(HotShotEvent::UpgradeVoteSend(vote)) - .await; + broadcast_event(HotShotEvent::UpgradeVoteSend(vote), &tx).await; } HotShotEvent::UpgradeVoteRecv(ref vote) => { debug!("Upgrade vote recv, Main Task {:?}", vote.get_view_number()); @@ -167,34 +157,33 @@ impl, A: ConsensusApi + } let mut collector = self.vote_collector.write().await; - let maybe_task = collector.take(); - - if maybe_task.is_none() - || vote.get_view_number() > maybe_task.as_ref().unwrap().view + if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view { debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: self.quorum_membership.clone(), view: vote.get_view_number(), - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; *collector = create_vote_accumulator::< TYPES, UpgradeVote, UpgradeCertificate, - >(&info, vote.clone(), event) + >(&info, vote.clone(), event, &tx) .await; } else { - let result = maybe_task.unwrap().handle_event(event.clone()).await; - - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + let result = collector + .as_mut() + .unwrap() + .handle_event(event.clone(), &tx) + .await; + + if result == Some(HotShotTaskCompleted) { + *collector = None; // The protocol has finished return None; } - *collector = Some(result.1); } } HotShotEvent::ViewChange(view) => { @@ -211,7 +200,7 @@ impl, A: ConsensusApi + } HotShotEvent::Shutdown => { error!("Shutting down because of shutdown signal!"); - return Some(HotShotTaskCompleted::ShutDown); + return Some(HotShotTaskCompleted); } _ => { error!("unexpected event {:?}", event); @@ -219,10 +208,31 @@ impl, A: ConsensusApi + } None } +} + +/// task state implementation for DA Task +impl, A: ConsensusApi + 'static> TaskState + for UpgradeTaskState +{ + type Event = HotShotEvent; + + type Output = HotShotTaskCompleted; + + async fn handle_event( + event: Self::Event, + task: &mut hotshot_task::task::Task, + ) -> Option { + let sender = task.clone_sender(); + tracing::trace!("sender queue len {}", sender.len()); + task.state_mut().handle(event, sender).await + } - /// Filter the upgrade event. - pub fn filter(event: &HotShotEvent) -> bool { - matches!( + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } + + fn filter(&self, event: &Self::Event) -> bool { + !matches!( event, HotShotEvent::UpgradeProposalRecv(_, _) | HotShotEvent::UpgradeVoteRecv(_) @@ -232,17 +242,3 @@ impl, A: ConsensusApi + ) } } - -/// task state implementation for DA Task -impl, A: ConsensusApi + 'static> TS - for UpgradeTaskState -{ -} - -/// Type alias for DA Task Types -pub type UpgradeTaskTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - UpgradeTaskState, ->; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index a4b9338f87..d07aeb2c10 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -1,13 +1,11 @@ -use crate::events::HotShotEvent; +use crate::events::{HotShotEvent, HotShotTaskCompleted}; +use crate::helpers::broadcast_event; +use async_broadcast::Sender; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; -use hotshot_task::{ - event_stream::ChannelStream, - global_registry::GlobalRegistry, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; + +use hotshot_task::task::{Task, TaskState}; use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::Consensus, @@ -27,7 +25,6 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; -use hotshot_task::event_stream::EventStream; use snafu::Snafu; use std::marker::PhantomData; use std::sync::Arc; @@ -45,8 +42,6 @@ pub struct VIDTaskState< > { /// The state's api pub api: A, - /// Global registry task for the state - pub registry: GlobalRegistry, /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -63,10 +58,6 @@ pub struct VIDTaskState< pub private_key: ::PrivateKey, /// The view and ID of the current vote collection task, if there is one. pub vote_collector: Option<(TYPES::Time, usize, usize)>, - - /// Global events stream to publish events - pub event_stream: ChannelStream>, - /// This state's ID pub id: u64, } @@ -76,9 +67,10 @@ impl, A: ConsensusApi + { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] - pub async fn handle_event( + pub async fn handle( &mut self, event: HotShotEvent, + event_stream: Sender>, ) -> Option { match event { HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view_number) => { @@ -104,21 +96,25 @@ impl, A: ConsensusApi + // Unwrap here will just propogate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); // send the commitment and metadata to consensus for block building - self.event_stream - .publish(HotShotEvent::SendPayloadCommitmentAndMetadata( + broadcast_event( + HotShotEvent::SendPayloadCommitmentAndMetadata( vid_disperse.commit, metadata, view_number, - )) - .await; + ), + &event_stream, + ) + .await; // send the block to the VID dispersal function - self.event_stream - .publish(HotShotEvent::BlockReady( + broadcast_event( + HotShotEvent::BlockReady( VidDisperse::from_membership(view_number, vid_disperse, &self.membership), view_number, - )) - .await; + ), + &event_stream, + ) + .await; } HotShotEvent::BlockReady(vid_disperse, view_number) => { @@ -130,16 +126,18 @@ impl, A: ConsensusApi + return None; }; debug!("publishing VID disperse for view {}", *view_number); - self.event_stream - .publish(HotShotEvent::VidDisperseSend( + broadcast_event( + HotShotEvent::VidDisperseSend( Proposal { signature, data: vid_disperse, _pd: PhantomData, }, self.public_key.clone(), - )) - .await; + ), + &event_stream, + ) + .await; } HotShotEvent::ViewChange(view) => { @@ -169,7 +167,7 @@ impl, A: ConsensusApi + } HotShotEvent::Shutdown => { - return Some(HotShotTaskCompleted::ShutDown); + return Some(HotShotTaskCompleted); } _ => { error!("unexpected event {:?}", event); @@ -177,10 +175,26 @@ impl, A: ConsensusApi + } None } +} - /// Filter the VID event. - pub fn filter(event: &HotShotEvent) -> bool { - matches!( +/// task state implementation for VID Task +impl, A: ConsensusApi + 'static> TaskState + for VIDTaskState +{ + type Event = HotShotEvent; + + type Output = HotShotTaskCompleted; + + async fn handle_event( + event: Self::Event, + task: &mut Task, + ) -> Option { + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await; + None + } + fn filter(&self, event: &Self::Event) -> bool { + !matches!( event, HotShotEvent::Shutdown | HotShotEvent::TransactionsSequenced(_, _, _) @@ -188,18 +202,7 @@ impl, A: ConsensusApi + | HotShotEvent::ViewChange(_) ) } + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } } - -/// task state implementation for VID Task -impl, A: ConsensusApi + 'static> TS - for VIDTaskState -{ -} - -/// Type alias for VID Task Types -pub type VIDTaskTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - VIDTaskState, ->; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index a123cee52f..04d47cda08 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1,16 +1,12 @@ #![allow(clippy::module_name_repetitions)] use crate::{ - events::HotShotEvent, - helpers::cancel_task, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, vote::{create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState}, }; +use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; use hotshot_types::{ simple_certificate::{ ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, @@ -29,7 +25,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use hotshot_task::global_registry::GlobalRegistry; +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ message::GeneralConsensusMessage, traits::{ @@ -71,10 +67,6 @@ pub struct ViewSyncTaskState< I: NodeImplementation, A: ConsensusApi + 'static + std::clone::Clone, > { - /// Registry to register sub tasks - pub registry: GlobalRegistry, - /// Event stream to publish events to - pub event_stream: ChannelStream>, /// View HotShot is currently in pub current_view: TYPES::Time, /// View HotShot wishes to be in @@ -119,17 +111,38 @@ impl< TYPES: NodeType, I: NodeImplementation, A: ConsensusApi + 'static + std::clone::Clone, - > TS for ViewSyncTaskState + > TaskState for ViewSyncTaskState { -} + type Event = HotShotEvent; + + type Output = (); -/// Types for the main view sync task -pub type ViewSyncTaskStateTypes = HSTWithEvent< - ViewSyncTaskError, - HotShotEvent, - ChannelStream>, - ViewSyncTaskState, ->; + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> { + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await; + None + } + + fn filter(&self, event: &Self::Event) -> bool { + !matches!( + event, + HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) + | HotShotEvent::ViewSyncPreCommitVoteRecv(_) + | HotShotEvent::ViewSyncCommitVoteRecv(_) + | HotShotEvent::ViewSyncFinalizeVoteRecv(_) + | HotShotEvent::Shutdown + | HotShotEvent::Timeout(_) + | HotShotEvent::ViewSyncTimeout(_, _, _) + | HotShotEvent::ViewChange(_) + ) + } + + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} /// State of a view sync replica task pub struct ViewSyncReplicaTaskState< @@ -164,22 +177,40 @@ pub struct ViewSyncReplicaTaskState< pub private_key: ::PrivateKey, /// HotShot consensus API pub api: A, - /// Event stream to publish events to - pub event_stream: ChannelStream>, } -impl, A: ConsensusApi + 'static> TS +impl, A: ConsensusApi + 'static> TaskState for ViewSyncReplicaTaskState { -} + type Event = HotShotEvent; + + type Output = (); + + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> { + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await; + None + } + fn filter(&self, event: &Self::Event) -> bool { + !matches!( + event, + HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) + | HotShotEvent::ViewSyncPreCommitVoteRecv(_) + | HotShotEvent::ViewSyncCommitVoteRecv(_) + | HotShotEvent::ViewSyncFinalizeVoteRecv(_) + | HotShotEvent::Shutdown + | HotShotEvent::Timeout(_) + | HotShotEvent::ViewSyncTimeout(_, _, _) + | HotShotEvent::ViewChange(_) + ) + } -/// Types for view sync replica state -pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< - ViewSyncTaskError, - HotShotEvent, - ChannelStream>, - ViewSyncReplicaTaskState, ->; + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} impl< TYPES: NodeType, @@ -194,6 +225,7 @@ impl< &mut self, event: HotShotEvent, view: TYPES::Time, + sender: &Sender>, ) { // This certificate is old, we can throw it away // If next view = cert round, then that means we should already have a task running for it @@ -204,17 +236,17 @@ impl< let mut task_map = self.replica_task_map.write().await; - if let Some(replica_task) = task_map.remove(&view) { + if let Some(replica_task) = task_map.get_mut(&view) { // Forward event then return debug!("Forwarding message"); - let result = replica_task.handle_event(event.clone()).await; + let result = replica_task.handle(event.clone(), sender.clone()).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished + task_map.remove(&view); return; } - task_map.insert(view, result.1); return; } @@ -231,47 +263,52 @@ impl< public_key: self.public_key.clone(), private_key: self.private_key.clone(), api: self.api.clone(), - event_stream: self.event_stream.clone(), view_sync_timeout: self.view_sync_timeout, id: self.id, }; - let result = replica_state.handle_event(event.clone()).await; + let result = replica_state.handle(event.clone(), sender.clone()).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished return; } - replica_state = result.1; - task_map.insert(view, replica_state); } #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task - pub async fn handle_event(&mut self, event: HotShotEvent) { + pub async fn handle( + &mut self, + event: HotShotEvent, + event_stream: Sender>, + ) { match &event { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.get_view_number(); - self.send_to_or_create_replica(event, view).await; + self.send_to_or_create_replica(event, view, &event_stream) + .await; } HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.get_view_number(); - self.send_to_or_create_replica(event, view).await; + self.send_to_or_create_replica(event, view, &event_stream) + .await; } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.get_view_number(); - self.send_to_or_create_replica(event, view).await; + self.send_to_or_create_replica(event, view, &event_stream) + .await; } HotShotEvent::ViewSyncTimeout(view, _, _) => { debug!("view sync timeout in main task {:?}", view); let view = *view; - self.send_to_or_create_replica(event, view).await; + self.send_to_or_create_replica(event, view, &event_stream) + .await; } HotShotEvent::ViewSyncPreCommitVoteRecv(ref vote) => { @@ -279,15 +316,14 @@ impl< let vote_view = vote.get_view_number(); let relay = vote.get_data().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); - if let Some(relay_task) = relay_map.remove(&relay) { + if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); - let result = relay_task.handle_event(event.clone()).await; + let result = relay_task.handle_event(event.clone(), &event_stream).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished - return; + map.remove(&vote_view); } - relay_map.insert(relay, result.1); return; } @@ -302,11 +338,10 @@ impl< public_key: self.public_key.clone(), membership: self.membership.clone(), view: vote_view, - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; - let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; + let vote_collector = + create_vote_accumulator(&info, vote.clone(), event, &event_stream).await; if let Some(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } @@ -317,16 +352,14 @@ impl< let vote_view = vote.get_view_number(); let relay = vote.get_data().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); - if let Some(relay_task) = relay_map.remove(&relay) { + if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); - let result = relay_task.handle_event(event.clone()).await; + let result = relay_task.handle_event(event.clone(), &event_stream).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished - return; + map.remove(&vote_view); } - - relay_map.insert(relay, result.1); return; } @@ -341,11 +374,10 @@ impl< public_key: self.public_key.clone(), membership: self.membership.clone(), view: vote_view, - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; - let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; + let vote_collector = + create_vote_accumulator(&info, vote.clone(), event, &event_stream).await; if let Some(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } @@ -356,16 +388,14 @@ impl< let vote_view = vote.get_view_number(); let relay = vote.get_data().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); - if let Some(relay_task) = relay_map.remove(&relay) { + if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); - let result = relay_task.handle_event(event.clone()).await; + let result = relay_task.handle_event(event.clone(), &event_stream).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished - return; + map.remove(&vote_view); } - - relay_map.insert(relay, result.1); return; } @@ -380,11 +410,10 @@ impl< public_key: self.public_key.clone(), membership: self.membership.clone(), view: vote_view, - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; - let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; + let vote_collector = + create_vote_accumulator(&info, vote.clone(), event, &event_stream).await; if let Some(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } @@ -486,39 +515,23 @@ impl< self.send_to_or_create_replica( HotShotEvent::ViewSyncTrigger(view_number + 1), view_number + 1, + &event_stream, ) .await; } else { // If this is the first timeout we've seen advance to the next view self.current_view = view_number; - self.event_stream - .publish(HotShotEvent::ViewChange(TYPES::Time::new( - *self.current_view, - ))) - .await; + broadcast_event( + HotShotEvent::ViewChange(TYPES::Time::new(*self.current_view)), + &event_stream, + ) + .await; } } _ => {} } } - - /// Filter view sync related events. - pub fn filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - | HotShotEvent::ViewSyncPreCommitVoteRecv(_) - | HotShotEvent::ViewSyncCommitVoteRecv(_) - | HotShotEvent::ViewSyncFinalizeVoteRecv(_) - | HotShotEvent::Shutdown - | HotShotEvent::Timeout(_) - | HotShotEvent::ViewSyncTimeout(_, _, _) - | HotShotEvent::ViewChange(_) - ) - } } impl, A: ConsensusApi + 'static> @@ -526,13 +539,11 @@ impl, A: ConsensusApi + { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task - pub async fn handle_event( - mut self, + pub async fn handle( + &mut self, event: HotShotEvent, - ) -> ( - std::option::Option, - ViewSyncReplicaTaskState, - ) { + event_stream: Sender>, + ) -> Option { match event { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { let last_seen_certificate = ViewSyncPhase::PreCommit; @@ -541,20 +552,20 @@ impl, A: ConsensusApi + if certificate.get_view_number() < self.next_view { warn!("We're already in a higher round"); - return (None, self); + return None; } // If certificate is not valid, return current state if !certificate.is_valid_cert(self.membership.as_ref()) { error!("Not valid view sync cert! {:?}", certificate.get_data()); - return (None, self); + return None; } // If certificate is for a higher round shutdown this task // since another task should have been started for the higher round if certificate.get_view_number() > self.next_view { - return (Some(HotShotTaskCompleted::ShutDown), self); + return Some(HotShotTaskCompleted); } if certificate.get_data().relay > self.relay { @@ -571,13 +582,12 @@ impl, A: ConsensusApi + &self.private_key, ) else { error!("Failed to sign ViewSyncCommitData!"); - return (None, self); + return None; }; let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { - self.event_stream - .publish(HotShotEvent::ViewSyncCommitVoteSend(vote)) + broadcast_event(HotShotEvent::ViewSyncCommitVoteSend(vote), &event_stream) .await; } @@ -586,18 +596,24 @@ impl, A: ConsensusApi + } self.timeout_task = Some(async_spawn({ - let stream = self.event_stream.clone(); + let stream = event_stream.clone(); let phase = last_seen_certificate; + let relay = self.relay; + let next_view = self.next_view; + let timeout = self.view_sync_timeout; async move { - async_sleep(self.view_sync_timeout).await; - info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", self.relay); - stream - .publish(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*self.next_view), - self.relay, + async_sleep(timeout).await; + info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); + + broadcast_event( + HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*next_view), + relay, phase, - )) - .await; + ), + &stream, + ) + .await; } })); } @@ -609,20 +625,20 @@ impl, A: ConsensusApi + if certificate.get_view_number() < self.next_view { warn!("We're already in a higher round"); - return (None, self); + return None; } // If certificate is not valid, return current state if !certificate.is_valid_cert(self.membership.as_ref()) { error!("Not valid view sync cert! {:?}", certificate.get_data()); - return (None, self); + return None; } // If certificate is for a higher round shutdown this task // since another task should have been started for the higher round if certificate.get_view_number() > self.next_view { - return (Some(HotShotTaskCompleted::ShutDown), self); + return Some(HotShotTaskCompleted); } if certificate.get_data().relay > self.relay { @@ -639,13 +655,12 @@ impl, A: ConsensusApi + &self.private_key, ) else { error!("Failed to sign view sync finalized vote!"); - return (None, self); + return None; }; let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { - self.event_stream - .publish(HotShotEvent::ViewSyncFinalizeVoteSend(vote)) + broadcast_event(HotShotEvent::ViewSyncFinalizeVoteSend(vote), &event_stream) .await; } @@ -654,33 +669,34 @@ impl, A: ConsensusApi + *self.next_view ); - self.event_stream - .publish(HotShotEvent::ViewChange(self.next_view - 1)) - .await; + broadcast_event(HotShotEvent::ViewChange(self.next_view - 1), &event_stream).await; - self.event_stream - .publish(HotShotEvent::ViewChange(self.next_view)) - .await; + broadcast_event(HotShotEvent::ViewChange(self.next_view), &event_stream).await; if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; } self.timeout_task = Some(async_spawn({ - let stream = self.event_stream.clone(); + let stream = event_stream.clone(); let phase = last_seen_certificate; + let relay = self.relay; + let next_view = self.next_view; + let timeout = self.view_sync_timeout; async move { - async_sleep(self.view_sync_timeout).await; + async_sleep(timeout).await; info!( "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", - self.relay + relay ); - stream - .publish(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*self.next_view), - self.relay, + broadcast_event( + HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*next_view), + relay, phase, - )) - .await; + ), + &stream, + ) + .await; } })); } @@ -690,20 +706,20 @@ impl, A: ConsensusApi + if certificate.get_view_number() < self.next_view { warn!("We're already in a higher round"); - return (None, self); + return None; } // If certificate is not valid, return current state if !certificate.is_valid_cert(self.membership.as_ref()) { error!("Not valid view sync cert! {:?}", certificate.get_data()); - return (None, self); + return None; } // If certificate is for a higher round shutdown this task // since another task should have been started for the higher round if certificate.get_view_number() > self.next_view { - return (Some(HotShotTaskCompleted::ShutDown), self); + return Some(HotShotTaskCompleted); } // cancel poll for votes @@ -728,16 +744,14 @@ impl, A: ConsensusApi + cancel_task(timeout_task).await; } - self.event_stream - .publish(HotShotEvent::ViewChange(self.next_view)) - .await; - return (Some(HotShotTaskCompleted::ShutDown), self); + broadcast_event(HotShotEvent::ViewChange(self.next_view), &event_stream).await; + return Some(HotShotTaskCompleted); } HotShotEvent::ViewSyncTrigger(view_number) => { if self.next_view != TYPES::Time::new(*view_number) { error!("Unexpected view number to triger view sync"); - return (None, self); + return None; } let Ok(vote) = ViewSyncPreCommitVote::::create_signed_vote( @@ -750,32 +764,36 @@ impl, A: ConsensusApi + &self.private_key, ) else { error!("Failed to sign pre commit vote!"); - return (None, self); + return None; }; let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { - self.event_stream - .publish(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) + broadcast_event(HotShotEvent::ViewSyncPreCommitVoteSend(vote), &event_stream) .await; } self.timeout_task = Some(async_spawn({ - let stream = self.event_stream.clone(); + let stream = event_stream.clone(); + let relay = self.relay; + let next_view = self.next_view; + let timeout = self.view_sync_timeout; async move { - async_sleep(self.view_sync_timeout).await; + async_sleep(timeout).await; info!("Vote sending timed out in ViewSyncTrigger"); - stream - .publish(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*self.next_view), - self.relay, + broadcast_event( + HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*next_view), + relay, ViewSyncPhase::None, - )) - .await; + ), + &stream, + ) + .await; } })); - return (None, self); + return None; } HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { @@ -797,15 +815,17 @@ impl, A: ConsensusApi + &self.private_key, ) else { error!("Failed to sign ViewSyncPreCommitData!"); - return (None, self); + return None; }; let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { - self.event_stream - .publish(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) - .await; + broadcast_event( + HotShotEvent::ViewSyncPreCommitVoteSend(vote), + &event_stream, + ) + .await; } } ViewSyncPhase::Finalize => { @@ -815,28 +835,33 @@ impl, A: ConsensusApi + } self.timeout_task = Some(async_spawn({ - let stream = self.event_stream.clone(); + let stream = event_stream.clone(); + let relay = self.relay; + let next_view = self.next_view; + let timeout = self.view_sync_timeout; async move { - async_sleep(self.view_sync_timeout).await; + async_sleep(timeout).await; info!( "Vote sending timed out in ViewSyncTimeout relay = {}", - self.relay + relay ); - stream - .publish(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*self.next_view), - self.relay, + broadcast_event( + HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*next_view), + relay, last_seen_certificate, - )) - .await; + ), + &stream, + ) + .await; } })); - return (None, self); + return None; } } - _ => return (None, self), + _ => return None, } - (None, self) + None } } diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index e41f34cdba..651332f39b 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -1,14 +1,14 @@ use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; -use crate::events::HotShotEvent; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; +use async_broadcast::Sender; use async_trait::async_trait; use either::Either::{self, Left, Right}; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - global_registry::GlobalRegistry, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; + +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ simple_certificate::{ DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, @@ -46,9 +46,6 @@ pub struct VoteCollectionTaskState< /// The view which we are collecting votes for pub view: TYPES::Time, - /// global event stream - pub event_stream: ChannelStream>, - /// Node id pub id: u64, } @@ -75,9 +72,13 @@ impl< { /// Take one vote and accumultate it. Returns either the cert or the updated state /// after the vote is accumulated - pub async fn accumulate_vote(mut self, vote: &VOTE) -> (Option, Self) { + pub async fn accumulate_vote( + &mut self, + vote: &VOTE, + event_stream: &Sender>, + ) -> Option { if vote.get_leader(&self.membership) != self.public_key { - return (None, self); + return None; } if vote.get_view_number() != self.view { @@ -86,23 +87,19 @@ impl< *vote.get_view_number(), *self.view ); - return (None, self); + return None; } - let Some(accumulator) = self.accumulator else { - return (None, self); + let Some(ref mut accumulator) = self.accumulator else { + return None; }; match accumulator.accumulate(vote, &self.membership) { - Either::Left(acc) => { - self.accumulator = Some(acc); - (None, self) - } + Either::Left(()) => None, Either::Right(cert) => { debug!("Certificate Formed! {:?}", cert); - self.event_stream - .publish(VOTE::make_cert_event(cert, &self.public_key)) - .await; + + broadcast_event(VOTE::make_cert_event(cert, &self.public_key), event_stream).await; self.accumulator = None; - (Some(HotShotTaskCompleted::ShutDown), self) + Some(HotShotTaskCompleted) } } } @@ -120,17 +117,23 @@ impl< + std::marker::Send + std::marker::Sync + 'static, - > TS for VoteCollectionTaskState + > TaskState for VoteCollectionTaskState +where + VoteCollectionTaskState: HandleVoteEvent, { -} + type Event = HotShotEvent; -/// Types for a vote accumulator Task -pub type VoteTaskStateTypes = HSTWithEvent< - VoteTaskError, - HotShotEvent, - ChannelStream>, - VoteCollectionTaskState, ->; + type Output = HotShotTaskCompleted; + + async fn handle_event(event: Self::Event, task: &mut Task) -> Option { + let sender = task.clone_sender(); + task.state_mut().handle_event(event, &sender).await + } + + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} /// Trait for types which will handle a vote event. #[async_trait] @@ -142,12 +145,10 @@ where { /// Handle a vote event async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> ( - Option, - VoteCollectionTaskState, - ); + sender: &Sender>, + ) -> Option; /// Event filter to use for this event fn filter(event: &HotShotEvent) -> bool; @@ -161,12 +162,8 @@ pub struct AccumulatorInfo { pub membership: Arc, /// View of the votes we are collecting pub view: TYPES::Time, - /// Global event stream shared by all consensus tasks - pub event_stream: ChannelStream>, /// This nodes id pub id: u64, - /// Task Registry for all tasks used by this node - pub registry: GlobalRegistry, } /// Generic function for spawnnig a vote task. Returns the event stream id of the spawned task if created @@ -176,6 +173,7 @@ pub async fn create_vote_accumulator( info: &AccumulatorInfo, vote: VOTE, event: HotShotEvent, + sender: &Sender>, ) -> Option> where TYPES: NodeType, @@ -206,7 +204,6 @@ where }; let mut state = VoteCollectionTaskState:: { - event_stream: info.event_stream.clone(), membership: info.membership.clone(), public_key: info.public_key.clone(), accumulator: Some(new_accumulator), @@ -214,14 +211,13 @@ where id: info.id, }; - let result = state.handle_event(event.clone()).await; + let result = state.handle_event(event.clone(), sender).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished return None; } - state = result.1; Some(state) } @@ -359,12 +355,13 @@ impl HandleVoteEvent, QuorumCertificat for QuorumVoteState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> (Option, QuorumVoteState) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, + _ => None, } } fn filter(event: &HotShotEvent) -> bool { @@ -378,12 +375,13 @@ impl HandleVoteEvent, UpgradeCertific for UpgradeVoteState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> (Option, UpgradeVoteState) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::UpgradeVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::UpgradeVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, + _ => None, } } fn filter(event: &HotShotEvent) -> bool { @@ -396,12 +394,13 @@ impl HandleVoteEvent, DACertificate for DAVoteState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> (Option, DAVoteState) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::DAVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::DAVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, + _ => None, } } fn filter(event: &HotShotEvent) -> bool { @@ -414,12 +413,13 @@ impl HandleVoteEvent, TimeoutCertific for TimeoutVoteState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> (Option, TimeoutVoteState) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, + _ => None, } } fn filter(event: &HotShotEvent) -> bool { @@ -433,12 +433,15 @@ impl for ViewSyncPreCommitState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> (Option, ViewSyncPreCommitState) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { + self.accumulate_vote(&vote, sender).await + } + _ => None, } } fn filter(event: &HotShotEvent) -> bool { @@ -452,12 +455,13 @@ impl for ViewSyncCommitVoteState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> (Option, ViewSyncCommitVoteState) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, + _ => None, } } fn filter(event: &HotShotEvent) -> bool { @@ -471,15 +475,15 @@ impl for ViewSyncFinalizeVoteState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> ( - Option, - ViewSyncFinalizeVoteState, - ) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { + self.accumulate_vote(&vote, sender).await + } + _ => None, } } fn filter(event: &HotShotEvent) -> bool { diff --git a/task/Cargo.toml b/task/Cargo.toml index 51a80a1829..39c531a637 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -1,27 +1,22 @@ [package] authors = ["Espresso Systems "] -description = "Async task abstraction for use in consensus" -edition = "2021" name = "hotshot-task" version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-compatibility-layer = { workspace = true } -async-trait = { workspace = true } -either = { workspace = true } -futures = { workspace = true } -serde = { workspace = true } -snafu = { workspace = true } -async-lock = { workspace = true } + +futures = "0.3.30" +async-broadcast = "0.6.0" tracing = { workspace = true } -atomic_enum = "0.2.0" -pin-project = "1.1.4" +async-compatibility-layer = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] -tokio = { workspace = true } - +tokio = { workspace= true, features = ["time", "rt-multi-thread", "macros", "sync"] } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } +async-std = { workspace= true, features = ["attributes"] } [lints] -workspace = true +workspace = true \ No newline at end of file diff --git a/task/src/dependency.rs b/task/src/dependency.rs new file mode 100644 index 0000000000..6ae793a7a4 --- /dev/null +++ b/task/src/dependency.rs @@ -0,0 +1,270 @@ +use async_broadcast::{Receiver, RecvError}; +use futures::future::BoxFuture; +use futures::stream::FuturesUnordered; +use futures::stream::StreamExt; +use futures::FutureExt; +use std::future::Future; + +/// Type which describes the idea of waiting for a dependency to complete +pub trait Dependency { + /// Complete will wait until it gets some value `T` then return the value + fn completed(self) -> impl Future> + Send; + /// Create an or dependency from this dependency and another + fn or + Send + 'static>(self, dep: D) -> OrDependency + where + T: Send + Sync + Clone + 'static, + Self: Sized + Send + 'static, + { + let mut or = OrDependency::from_deps(vec![self]); + or.add_dep(dep); + or + } + /// Create an and dependency from this dependency and another + fn and + Send + 'static>(self, dep: D) -> AndDependency + where + T: Send + Sync + Clone + 'static, + Self: Sized + Send + 'static, + { + let mut and = AndDependency::from_deps(vec![self]); + and.add_dep(dep); + and + } +} + +/// Used to combine dependencies to create `AndDependency`s or `OrDependency`s +trait CombineDependencies: + Sized + Dependency + Send + 'static +{ +} + +/// Defines a dependency that completes when all of its deps complete +pub struct AndDependency { + /// Dependencies being combined + deps: Vec>>, +} +impl Dependency> for AndDependency { + /// Returns a vector of all of the results from it's dependencies. + /// The results will be in a random order + async fn completed(self) -> Option> { + let futures = FuturesUnordered::from_iter(self.deps); + futures + .collect::>>() + .await + .into_iter() + .collect() + } +} + +impl AndDependency { + /// Create from a vec of deps + #[must_use] + pub fn from_deps(deps: Vec + Send + 'static>) -> Self { + let mut pinned = vec![]; + for dep in deps { + pinned.push(dep.completed().boxed()); + } + Self { deps: pinned } + } + /// Add another dependency + pub fn add_dep(&mut self, dep: impl Dependency + Send + 'static) { + self.deps.push(dep.completed().boxed()); + } + /// Add multiple dependencies + pub fn add_deps(&mut self, deps: AndDependency) { + for dep in deps.deps { + self.deps.push(dep); + } + } +} + +/// Defines a dependency that complets when one of it's dependencies compeltes +pub struct OrDependency { + /// Dependencies being combined + deps: Vec>>, +} +impl Dependency for OrDependency { + /// Returns the value of the first completed dependency + async fn completed(self) -> Option { + let mut futures = FuturesUnordered::from_iter(self.deps); + loop { + if let Some(maybe) = futures.next().await { + if maybe.is_some() { + return maybe; + } + } else { + return None; + } + } + } +} + +impl OrDependency { + /// Creat an `OrDependency` from a vec of dependencies + #[must_use] + pub fn from_deps(deps: Vec + Send + 'static>) -> Self { + let mut pinned = vec![]; + for dep in deps { + pinned.push(dep.completed().boxed()); + } + Self { deps: pinned } + } + /// Add another dependecy + pub fn add_dep(&mut self, dep: impl Dependency + Send + 'static) { + self.deps.push(dep.completed().boxed()); + } +} + +/// A dependency that listens on a chanel for an event +/// that matches what some value it wants. +pub struct EventDependency { + /// Channel of incomming events + pub(crate) event_rx: Receiver, + /// Closure which returns true if the incoming `T` is the + /// thing that completes this dependency + pub(crate) match_fn: Box bool + Send>, +} + +impl EventDependency { + /// Create a new `EventDependency` + #[must_use] + pub fn new(receiver: Receiver, match_fn: Box bool + Send>) -> Self { + Self { + event_rx: receiver, + match_fn: Box::new(match_fn), + } + } +} + +impl Dependency for EventDependency { + async fn completed(mut self) -> Option { + loop { + match self.event_rx.recv_direct().await { + Ok(event) => { + if (self.match_fn)(&event) { + return Some(event); + } + } + Err(RecvError::Overflowed(n)) => { + tracing::error!("Dependency Task overloaded, skipping {} events", n); + } + Err(RecvError::Closed) => { + return None; + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::{AndDependency, Dependency, EventDependency, OrDependency}; + use async_broadcast::{broadcast, Receiver}; + + fn eq_dep(rx: Receiver, val: usize) -> EventDependency { + EventDependency { + event_rx: rx, + match_fn: Box::new(move |v| *v == val), + } + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn it_works() { + let (tx, rx) = broadcast(10); + + let mut deps = vec![]; + for i in 0..5 { + tx.broadcast(i).await.unwrap(); + deps.push(eq_dep(rx.clone(), 5)); + } + + let and = AndDependency::from_deps(deps); + tx.broadcast(5).await.unwrap(); + let result = and.completed().await; + assert_eq!(result, Some(vec![5; 5])); + } + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn or_dep() { + let (tx, rx) = broadcast(10); + + tx.broadcast(5).await.unwrap(); + let mut deps = vec![]; + for _ in 0..5 { + deps.push(eq_dep(rx.clone(), 5)); + } + let or = OrDependency::from_deps(deps); + let result = or.completed().await; + assert_eq!(result, Some(5)); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn and_or_dep() { + let (tx, rx) = broadcast(10); + + tx.broadcast(1).await.unwrap(); + tx.broadcast(2).await.unwrap(); + tx.broadcast(3).await.unwrap(); + tx.broadcast(5).await.unwrap(); + tx.broadcast(6).await.unwrap(); + + let or1 = OrDependency::from_deps([eq_dep(rx.clone(), 4), eq_dep(rx.clone(), 6)].into()); + let or2 = OrDependency::from_deps([eq_dep(rx.clone(), 4), eq_dep(rx.clone(), 5)].into()); + let and = AndDependency::from_deps([or1, or2].into()); + let result = and.completed().await; + assert_eq!(result, Some(vec![6, 5])); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn or_and_dep() { + let (tx, rx) = broadcast(10); + + tx.broadcast(1).await.unwrap(); + tx.broadcast(2).await.unwrap(); + tx.broadcast(3).await.unwrap(); + tx.broadcast(4).await.unwrap(); + tx.broadcast(5).await.unwrap(); + + let and1 = eq_dep(rx.clone(), 4).and(eq_dep(rx.clone(), 6)); + let and2 = eq_dep(rx.clone(), 4).and(eq_dep(rx.clone(), 5)); + let or = and1.or(and2); + let result = or.completed().await; + assert_eq!(result, Some(vec![4, 5])); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn many_and_dep() { + let (tx, rx) = broadcast(10); + + tx.broadcast(1).await.unwrap(); + tx.broadcast(2).await.unwrap(); + tx.broadcast(3).await.unwrap(); + tx.broadcast(4).await.unwrap(); + tx.broadcast(5).await.unwrap(); + tx.broadcast(6).await.unwrap(); + + let mut and1 = eq_dep(rx.clone(), 4).and(eq_dep(rx.clone(), 6)); + let and2 = eq_dep(rx.clone(), 4).and(eq_dep(rx.clone(), 5)); + and1.add_deps(and2); + let result = and1.completed().await; + assert_eq!(result, Some(vec![4, 6, 4, 5])); + } +} diff --git a/task/src/dependency_task.rs b/task/src/dependency_task.rs new file mode 100644 index 0000000000..9db6786637 --- /dev/null +++ b/task/src/dependency_task.rs @@ -0,0 +1,140 @@ +#[cfg(async_executor_impl = "async-std")] +use async_std::task::{spawn, JoinHandle}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::{spawn, JoinHandle}; + +use futures::Future; + +use crate::dependency::Dependency; + +/// Defines a type that can handle the result of a dependency +pub trait HandleDepOutput: Send + Sized + Sync + 'static { + /// Type we expect from completed dependency + type Output: Send + Sync + 'static; + + /// Called once when the Dependency completes handles the results + fn handle_dep_result(self, res: Self::Output) -> impl Future + Send; +} + +/// A task that runs until it's dependency completes and it handles the result +pub struct DependencyTask + Send, H: HandleDepOutput + Send> { + /// Dependency this taks waits for + pub(crate) dep: D, + /// Handles the results returned from `self.dep.completed().await` + pub(crate) handle: H, +} + +impl + Send, H: HandleDepOutput + Send> DependencyTask { + /// Create a new `DependencyTask` + #[must_use] + pub fn new(dep: D, handle: H) -> Self { + Self { dep, handle } + } +} + +impl + Send + 'static, H: HandleDepOutput> DependencyTask { + /// Spawn the dependency task + pub fn run(self) -> JoinHandle<()> + where + Self: Sized, + { + spawn(async move { + if let Some(completed) = self.dep.completed().await { + self.handle.handle_dep_result(completed).await; + } + }) + } +} + +#[cfg(test)] +mod test { + + use std::time::Duration; + + use async_broadcast::{broadcast, Receiver, Sender}; + use futures::{stream::FuturesOrdered, StreamExt}; + + #[cfg(async_executor_impl = "async-std")] + use async_std::task::sleep; + #[cfg(async_executor_impl = "tokio")] + use tokio::time::sleep; + + use super::*; + use crate::dependency::*; + + #[derive(Clone, PartialEq, Eq, Debug)] + enum TaskResult { + Success(usize), + // Failure, + } + + struct DummyHandle { + sender: Sender, + } + impl HandleDepOutput for DummyHandle { + type Output = usize; + async fn handle_dep_result(self, res: usize) { + self.sender + .broadcast(TaskResult::Success(res)) + .await + .unwrap(); + } + } + + fn eq_dep(rx: Receiver, val: usize) -> EventDependency { + EventDependency { + event_rx: rx, + match_fn: Box::new(move |v| *v == val), + } + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + // allow unused for tokio because it's a test + #[allow(unused_must_use)] + async fn it_works() { + let (tx, rx) = broadcast(10); + let (res_tx, mut res_rx) = broadcast(10); + let dep = eq_dep(rx, 2); + let handle = DummyHandle { sender: res_tx }; + let join_handle = DependencyTask { dep, handle }.run(); + tx.broadcast(2).await.unwrap(); + assert_eq!(res_rx.recv().await.unwrap(), TaskResult::Success(2)); + + join_handle.await; + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn many_works() { + let (tx, rx) = broadcast(20); + let (res_tx, mut res_rx) = broadcast(20); + + let mut handles = vec![]; + for i in 0..10 { + let dep = eq_dep(rx.clone(), i); + let handle = DummyHandle { + sender: res_tx.clone(), + }; + handles.push(DependencyTask { dep, handle }.run()); + } + let tx2 = tx.clone(); + spawn(async move { + for i in 0..10 { + tx.broadcast(i).await.unwrap(); + sleep(Duration::from_millis(10)).await; + } + }); + for i in 0..10 { + assert_eq!(res_rx.recv().await.unwrap(), TaskResult::Success(i)); + } + tx2.broadcast(100).await.unwrap(); + FuturesOrdered::from_iter(handles).collect::>().await; + } +} diff --git a/task/src/event_stream.rs b/task/src/event_stream.rs deleted file mode 100644 index 5248fe4373..0000000000 --- a/task/src/event_stream.rs +++ /dev/null @@ -1,268 +0,0 @@ -use async_compatibility_layer::channel::{unbounded, UnboundedSender, UnboundedStream}; -use async_lock::RwLock; -use std::{ - collections::HashMap, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -use async_trait::async_trait; -use futures::Stream; - -use crate::task::{FilterEvent, PassType}; - -/// a stream that does nothing. -/// it's immediately closed -#[derive(Clone)] -pub struct DummyStream; - -impl Stream for DummyStream { - type Item = (); - - fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(None) - } -} - -#[async_trait] -impl EventStream for DummyStream { - type EventType = (); - - type StreamType = DummyStream; - - async fn publish(&self, _event: Self::EventType) {} - - async fn subscribe( - &self, - _filter: FilterEvent, - ) -> (Self::StreamType, StreamId) { - (DummyStream, 0) - } - - async fn unsubscribe(&self, _id: StreamId) {} - - async fn direct_message(&self, _id: StreamId, _event: Self::EventType) {} -} - -impl SendableStream for DummyStream {} - -/// this is only used for indexing -pub type StreamId = usize; - -/// a stream that plays nicely with async -pub trait SendableStream: Stream + Sync + Send + 'static {} - -/// Async pub sub event stream -/// NOTE: static bound indicates that if the type points to data, that data lives for the lifetime -/// of the program -#[async_trait] -pub trait EventStream: Clone + 'static + Sync + Send { - /// the type of event to process - type EventType: PassType; - /// the type of stream to use - type StreamType: SendableStream; - - /// publish an event to the event stream - async fn publish(&self, event: Self::EventType); - - /// subscribe to a particular set of events - /// specified by `filter`. Filter returns true if the event should be propagated - /// TODO (justin) rethink API, we might be able just to use `StreamExt::filter` and `Filter` - /// That would certainly be cleaner - async fn subscribe(&self, filter: FilterEvent) - -> (Self::StreamType, StreamId); - - /// unsubscribe from the stream - async fn unsubscribe(&self, id: StreamId); - - /// send direct message to node - async fn direct_message(&self, id: StreamId, event: Self::EventType); -} - -/// Event stream implementation using channels as the underlying primitive. -/// We want it to be cloneable -#[derive(Clone)] -pub struct ChannelStream { - /// inner field. Useful for having the stream itself - /// be clone - inner: Arc>>, -} - -/// trick to make the event stream clonable -struct ChannelStreamInner { - /// the subscribers to the channel - subscribers: HashMap, UnboundedSender)>, - /// the next unused assignable id - next_stream_id: StreamId, -} - -impl ChannelStream { - /// construct a new event stream - #[must_use] - pub fn new() -> Self { - Self { - inner: Arc::new(RwLock::new(ChannelStreamInner { - subscribers: HashMap::new(), - next_stream_id: 0, - })), - } - } -} - -impl Default for ChannelStream { - fn default() -> Self { - Self::new() - } -} - -impl SendableStream for UnboundedStream {} - -#[async_trait] -impl EventStream for ChannelStream { - type EventType = EVENT; - type StreamType = UnboundedStream; - - async fn direct_message(&self, id: StreamId, event: Self::EventType) { - let inner = self.inner.write().await; - match inner.subscribers.get(&id) { - Some((filter, sender)) => { - if filter(&event) { - match sender.send(event.clone()).await { - Ok(()) => (), - // error sending => stream is closed so remove it - Err(_) => self.unsubscribe(id).await, - } - } - } - None => { - tracing::debug!("Requested stream id not found"); - } - } - } - - /// publish an event to the event stream - async fn publish(&self, event: Self::EventType) { - let inner = self.inner.read().await; - for (uid, (filter, sender)) in &inner.subscribers { - if filter(&event) { - match sender.send(event.clone()).await { - Ok(()) => (), - // error sending => stream is closed so remove it - Err(_) => { - self.unsubscribe(*uid).await; - } - } - } - } - } - - async fn subscribe( - &self, - filter: FilterEvent, - ) -> (Self::StreamType, StreamId) { - let mut inner = self.inner.write().await; - let new_stream_id = inner.next_stream_id; - let (s, r) = unbounded(); - inner.next_stream_id += 1; - // NOTE: can never be already existing. - // so, this should always return `None` - inner.subscribers.insert(new_stream_id, (filter, s)); - (r.into_stream(), new_stream_id) - } - - async fn unsubscribe(&self, uid: StreamId) { - let mut inner = self.inner.write().await; - inner.subscribers.remove(&uid); - } -} - -#[cfg(test)] -pub mod test { - use crate::{event_stream::EventStream, StreamExt}; - use async_compatibility_layer::art::{async_sleep, async_spawn}; - use std::time::Duration; - - #[derive(Clone, Debug, PartialEq, Eq)] - enum TestMessage { - One, - Two, - Three, - } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_channel_stream_basic() { - use crate::task::FilterEvent; - - use super::ChannelStream; - - let channel_stream = ChannelStream::::new(); - let (mut stream, _) = channel_stream.subscribe(FilterEvent::default()).await; - let dup_channel_stream = channel_stream.clone(); - - let dup_dup_channel_stream = channel_stream.clone(); - - async_spawn(async move { - let (mut stream, _) = dup_channel_stream.subscribe(FilterEvent::default()).await; - assert!(stream.next().await.unwrap() == TestMessage::Three); - assert!(stream.next().await.unwrap() == TestMessage::One); - assert!(stream.next().await.unwrap() == TestMessage::Two); - }); - - async_spawn(async move { - dup_dup_channel_stream.publish(TestMessage::Three).await; - dup_dup_channel_stream.publish(TestMessage::One).await; - dup_dup_channel_stream.publish(TestMessage::Two).await; - }); - async_sleep(Duration::new(3, 0)).await; - - assert!(stream.next().await.unwrap() == TestMessage::Three); - assert!(stream.next().await.unwrap() == TestMessage::One); - assert!(stream.next().await.unwrap() == TestMessage::Two); - } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_channel_stream_xtreme() { - use crate::task::FilterEvent; - - use super::ChannelStream; - - let channel_stream = ChannelStream::::new(); - let mut streams = Vec::new(); - - for _i in 0..1000 { - let dup_channel_stream = channel_stream.clone(); - let (stream, _) = dup_channel_stream.subscribe(FilterEvent::default()).await; - streams.push(stream); - } - - let dup_dup_channel_stream = channel_stream.clone(); - - for _i in 0..1000 { - let mut stream = streams.pop().unwrap(); - async_spawn(async move { - for event in [TestMessage::One, TestMessage::Two, TestMessage::Three] { - for _ in 0..100 { - assert!(stream.next().await.unwrap() == event); - } - } - }); - } - - async_spawn(async move { - for event in [TestMessage::One, TestMessage::Two, TestMessage::Three] { - for _ in 0..100 { - dup_dup_channel_stream.publish(event.clone()).await; - } - } - }); - } -} diff --git a/task/src/global_registry.rs b/task/src/global_registry.rs deleted file mode 100644 index 1977c21c76..0000000000 --- a/task/src/global_registry.rs +++ /dev/null @@ -1,214 +0,0 @@ -use async_lock::RwLock; -use either::Either; -use futures::{future::BoxFuture, FutureExt}; -use std::{ - collections::{BTreeMap, BTreeSet}, - ops::Deref, - sync::Arc, -}; - -use crate::task_state::{TaskState, TaskStatus}; - -/// function to shut down gobal registry -#[derive(Clone)] -pub struct ShutdownFn(pub Arc BoxFuture<'static, ()> + Sync + Send>); - -// TODO this might cleaner as `run()` -// but then this pattern should change everywhere -impl Deref for ShutdownFn { - type Target = dyn Fn() -> BoxFuture<'static, ()> + Sync + Send; - - fn deref(&self) -> &Self::Target { - &*self.0 - } -} - -/// id of task. Usize instead of u64 because -/// used for primarily for indexing -pub type HotShotTaskId = usize; - -/// the global registry provides a place to: -/// - inquire about the state of various tasks -/// - gracefully shut down tasks -#[derive(Debug, Clone)] -pub struct GlobalRegistry { - /// up-to-date shared list of statuses - /// only used if `state_cache` is out of date - /// or if appending - state_list: Arc>>, - /// possibly stale read version of state - /// NOTE: must include entire state in order to - /// support both incrementing and reading. - /// Writing to the status should gracefully shut down the task - state_cache: BTreeMap, -} - -/// function to modify state -#[allow(clippy::type_complexity)] -struct Modifier(Box Either + Send>); - -impl Default for GlobalRegistry { - fn default() -> Self { - Self::new() - } -} - -impl GlobalRegistry { - /// create new registry - #[must_use] - pub fn new() -> Self { - Self { - state_list: Arc::new(RwLock::new(BTreeMap::default())), - state_cache: BTreeMap::default(), - } - } - - /// register with the global registry - /// return a function to the caller (task) that can be used to deregister - /// returns a function to call to shut down the task - /// and the unique identifier of the task - pub async fn register(&mut self, name: &str, status: TaskState) -> (ShutdownFn, HotShotTaskId) { - let mut list = self.state_list.write().await; - let next_id = list - .last_key_value() - .map(|(k, _v)| k) - .copied() - .unwrap_or_default() - + 1; - let new_entry = (status.clone(), name.to_string()); - let new_entry_dup = new_entry.0.clone(); - list.insert(next_id, new_entry.clone()); - - self.state_cache.insert(next_id, new_entry); - - let shutdown_fn = ShutdownFn(Arc::new(move || { - new_entry_dup.set_state(TaskStatus::Completed); - async move {}.boxed() - })); - (shutdown_fn, next_id) - } - - /// update the cache - async fn update_cache(&mut self) { - // NOTE: this can be done much more cleverly - // avoid one intersection by comparing max keys (constant time op vs O(n + m)) - // and debatable how often the other op needs to be run - // probably much much less often - let list = self.state_list.read().await; - let list_keys: BTreeSet = list.keys().copied().collect(); - let cache_keys: BTreeSet = self.state_cache.keys().copied().collect(); - // bleh not as efficient - let missing_key_list = list_keys.difference(&cache_keys); - let expired_key_list = cache_keys.difference(&list_keys); - - for expired_key in expired_key_list { - self.state_cache.remove(expired_key); - } - - for key in missing_key_list { - // technically shouldn't be possible for this to be none since - // we have a read lock - // nevertheless, this seems easier - if let Some(val) = list.get(key) { - self.state_cache.insert(*key, val.clone()); - } - } - } - - /// internal function to run `modifier` on `uid` - /// if it exists - async fn operate_on_task( - &mut self, - uid: HotShotTaskId, - modifier: Modifier, - ) -> Either { - // the happy path - if let Some(ele) = self.state_cache.get(&uid) { - modifier.0(&ele.0) - } - // the sad path - else { - self.update_cache().await; - if let Some(ele) = self.state_cache.get(&uid) { - modifier.0(&ele.0) - } else { - Either::Right(false) - } - } - } - - /// set `uid`'s state to paused - /// returns true upon success and false if `uid` is not registered - pub async fn pause_task(&mut self, uid: HotShotTaskId) -> bool { - let modifier = Modifier(Box::new(|state| { - state.set_state(TaskStatus::Paused); - Either::Right(true) - })); - match self.operate_on_task(uid, modifier).await { - Either::Left(_) => unreachable!(), - Either::Right(b) => b, - } - } - - /// set `uid`'s state to running - /// returns true upon success and false if `uid` is not registered - pub async fn run_task(&mut self, uid: HotShotTaskId) -> bool { - let modifier = Modifier(Box::new(|state| { - state.set_state(TaskStatus::Running); - Either::Right(true) - })); - match self.operate_on_task(uid, modifier).await { - Either::Left(_) => unreachable!(), - Either::Right(b) => b, - } - } - - /// if the `uid` is registered with the global registry - /// return its task status - /// this is a way to subscribe to state changes from the taskstatus - /// since `HotShotTaskStatus` implements stream - pub async fn get_task_state(&mut self, uid: HotShotTaskId) -> Option { - let modifier = Modifier(Box::new(|state| Either::Left(state.get_status()))); - match self.operate_on_task(uid, modifier).await { - Either::Left(state) => Some(state), - Either::Right(false) => None, - Either::Right(true) => unreachable!(), - } - } - - /// shut down a task from a different thread - /// returns true if succeeded - /// returns false if the task does not exist - pub async fn shutdown_task(&mut self, uid: usize) -> bool { - let modifier = Modifier(Box::new(|state| { - state.set_state(TaskStatus::Completed); - Either::Right(true) - })); - let result = match self.operate_on_task(uid, modifier).await { - Either::Left(_) => unreachable!(), - Either::Right(b) => b, - }; - let mut list = self.state_list.write().await; - list.remove(&uid); - result - } - - /// checks if all registered tasks have completed - pub async fn is_shutdown(&mut self) -> bool { - let task_list = self.state_list.read().await; - for task in (*task_list).values() { - if task.0.get_status() != TaskStatus::Completed { - return false; - } - } - true - } - - /// shut down all tasks in registry - pub async fn shutdown_all(&mut self) { - let mut task_list = self.state_list.write().await; - while let Some((_uid, task)) = task_list.pop_last() { - task.0.set_state(TaskStatus::Completed); - } - } -} diff --git a/task/src/lib.rs b/task/src/lib.rs index 918a0eaded..cf71eb7090 100644 --- a/task/src/lib.rs +++ b/task/src/lib.rs @@ -1,385 +1,8 @@ -//! Abstractions meant for usage with long running consensus tasks -//! and testing harness +//! Task primatives for `HotShot` -use crate::task::PassType; -use either::Either; -use event_stream::SendableStream; -use Poll::{Pending, Ready}; -// The spawner of the task should be able to fire and forget the task if it makes sense. -use futures::{stream::Fuse, Future, Stream, StreamExt}; -use std::{ - pin::Pin, - slice::SliceIndex, - sync::Arc, - task::{Context, Poll}, -}; -// NOTE use pin_project here because we're already bring in procedural macros elsewhere -// so there is no reason to use pin_project_lite -use pin_project::pin_project; - -/// Astractions over the state of a task and a stream -/// interface for task changes. Allows in the happy path -/// for lockless manipulation of tasks -/// and in the sad case, only the use of a `std::sync::mutex` -pub mod task_state; - -/// the global registry storing the status of all tasks -/// as well as the abiliity to terminate them -pub mod global_registry; - -/// mpmc streamable to all subscribed tasks -pub mod event_stream; - -/// The `HotShot` Task. The main point of this library. Uses all other abstractions -/// to create an abstraction over tasks +/// Simple Dependecy types +pub mod dependency; +/// Task which can uses dependencies +pub mod dependency_task; +/// Basic task types pub mod task; - -/// The hotshot task launcher. Useful for constructing tasks -pub mod task_launcher; - -/// the task implementations with different features -pub mod task_impls; - -/// merge `N` streams of the same type -#[pin_project] -pub struct MergeN { - /// Streams to be merged. - #[pin] - streams: Vec>, - /// idx to start polling - idx: usize, -} - -impl MergeN { - /// create a new stream - #[must_use] - pub fn new(streams: Vec) -> MergeN { - let fused_streams = streams.into_iter().map(StreamExt::fuse).collect(); - MergeN { - streams: fused_streams, - idx: 0, - } - } -} - -impl PassType for T {} - -impl SendableStream for MergeN {} - -// NOTE: yoinked from https://github.com/yoshuawuyts/futures-concurrency/ -// we should really just use `futures-concurrency`. I'm being lazy here -// and not bringing in yet another dependency. Note: their merge is implemented much -// more cleverly than this rather naive impl - -// NOTE: If this is implemented through the trait, this will work on both vecs and -// slices. -// -// From: https://github.com/rust-lang/rust/pull/78370/files -/// Get a pinned mutable pointer from a list. -pub(crate) fn get_pin_mut_from_vec( - slice: Pin<&mut Vec>, - index: I, -) -> Option> -where - I: SliceIndex<[T]>, -{ - // SAFETY: `get_unchecked_mut` is never used to move the slice inside `self` (`SliceIndex` - // is sealed and all `SliceIndex::get_mut` implementations never move elements). - // `x` is guaranteed to be pinned because it comes from `self` which is pinned. - unsafe { - slice - .get_unchecked_mut() - .get_mut(index) - .map(|x| Pin::new_unchecked(x)) - } -} - -impl Stream for MergeN { - // idx of the stream, item - type Item = (usize, ::Item); - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut me = self.project(); - - let idx = *me.idx; - *me.idx = (idx + 1) % me.streams.len(); - - let first_half = idx..me.streams.len(); - let second_half = 0..idx; - - let iterator = first_half.chain(second_half); - - let mut done = false; - - for i in iterator { - let stream = get_pin_mut_from_vec(me.streams.as_mut(), i).unwrap(); - - match stream.poll_next(cx) { - Ready(Some(val)) => return Ready(Some((i, val))), - Ready(None) => {} - Pending => done = false, - } - } - - if done { - Ready(None) - } else { - Pending - } - } -} - -// NOTE: yoinked /from async-std -// except this is executor agnostic (doesn't rely on async-std streamext/fuse) -// NOTE: usage of this is for combining streams into one main stream -// for usage with `MessageStream` -// TODO move this to async-compatibility-layer -#[pin_project] -/// Stream type that merges two underlying streams -pub struct Merge { - /// first stream to merge - #[pin] - a: Fuse, - /// second stream to merge - #[pin] - b: Fuse, - /// When `true`, poll `a` first, otherwise, `poll` b`. - a_first: bool, -} - -impl Merge { - /// create a new Merged stream - pub fn new(a: T, b: U) -> Merge - where - T: Stream, - U: Stream, - { - Merge { - a: a.fuse(), - b: b.fuse(), - a_first: true, - } - } -} - -impl Stream for Merge -where - T: Stream, - U: Stream, -{ - type Item = Either; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - let a_first = *me.a_first; - - // Toggle the flag - *me.a_first = !a_first; - - poll_next(me.a, me.b, cx, a_first) - } - - fn size_hint(&self) -> (usize, Option) { - let (a_lower, a_upper) = self.a.size_hint(); - let (b_lower, b_upper) = self.b.size_hint(); - - let upper = match (a_upper, b_upper) { - (Some(a_upper), Some(b_upper)) => Some(a_upper + b_upper), - _ => None, - }; - - (a_lower + b_lower, upper) - } -} - -impl SendableStream for Merge -where - T: Stream + Send + Sync + 'static, - U: Stream + Send + Sync + 'static, -{ -} - -/// poll the next item in the merged stream -fn poll_next( - first: Pin<&mut T>, - second: Pin<&mut U>, - cx: &mut Context<'_>, - order: bool, -) -> Poll>> -where - T: Stream, - U: Stream, -{ - let mut done = true; - - // there's definitely a better way to do this - if order { - match first.poll_next(cx) { - Ready(Some(val)) => return Ready(Some(Either::Left(val))), - Ready(None) => {} - Pending => done = false, - } - - match second.poll_next(cx) { - Ready(Some(val)) => return Ready(Some(Either::Right(val))), - Ready(None) => {} - Pending => done = false, - } - } else { - match second.poll_next(cx) { - Ready(Some(val)) => return Ready(Some(Either::Right(val))), - Ready(None) => {} - Pending => done = false, - } - - match first.poll_next(cx) { - Ready(Some(val)) => return Ready(Some(Either::Left(val))), - Ready(None) => {} - Pending => done = false, - } - } - - if done { - Ready(None) - } else { - Pending - } -} - -/// gotta make the futures sync -pub type BoxSyncFuture<'a, T> = Pin + Send + Sync + 'a>>; - -/// may be treated as a stream -#[pin_project(project = ProjectedStreamableThing)] -pub struct GeneratedStream { - // todo maybe type wrapper is in order - /// Stream generator. - generator: Arc Option> + Sync + Send>, - /// Optional in-progress future. - in_progress_fut: Option>, -} - -impl GeneratedStream { - /// create a generator - pub fn new( - generator: Arc Option> + Sync + Send>, - ) -> Self { - GeneratedStream { - generator, - in_progress_fut: None, - } - } -} - -impl Stream for GeneratedStream { - type Item = O; - - fn poll_next( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - let projection = self.project(); - match projection.in_progress_fut { - Some(fut) => { - // NOTE: this is entirely safe. - // We will ONLY poll if we've been awakened. - // otherwise, we won't poll. - match fut.as_mut().poll(cx) { - Ready(val) => { - *projection.in_progress_fut = None; - Poll::Ready(Some(val)) - } - Pending => Poll::Pending, - } - } - None => { - let wrapped_fut = (*projection.generator)(); - let Some(mut fut) = wrapped_fut else { - return Poll::Ready(None); - }; - match fut.as_mut().poll(cx) { - Ready(val) => { - *projection.in_progress_fut = None; - Poll::Ready(Some(val)) - } - Pending => { - *projection.in_progress_fut = Some(fut); - Poll::Pending - } - } - } - } - } -} - -/// yoinked from futures crate -pub fn assert_future(future: F) -> F -where - F: Future, -{ - future -} - -/// yoinked from futures crate, adds sync bound that we need -pub fn boxed_sync<'a, F>(fut: F) -> BoxSyncFuture<'a, F::Output> -where - F: Future + Sized + Send + Sync + 'a, -{ - assert_future::(Box::pin(fut)) -} - -impl SendableStream for GeneratedStream {} - -#[cfg(test)] -pub mod test { - use crate::{boxed_sync, Arc, GeneratedStream, StreamExt}; - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_stream_basic() { - let mut stream = GeneratedStream:: { - generator: Arc::new(move || { - let closure = async move { 5 }; - Some(boxed_sync(closure)) - }), - in_progress_fut: None, - }; - assert!(stream.next().await == Some(5)); - assert!(stream.next().await == Some(5)); - assert!(stream.next().await == Some(5)); - assert!(stream.next().await == Some(5)); - } - - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_stream_fancy() { - use async_compatibility_layer::art::async_sleep; - use std::{sync::atomic::Ordering, time::Duration}; - - let value = Arc::::default(); - let mut stream = GeneratedStream:: { - generator: Arc::new(move || { - let value = value.clone(); - let closure = async move { - let actual_value = value.load(Ordering::Relaxed); - value.store(actual_value + 1, Ordering::Relaxed); - async_sleep(Duration::new(0, 500)).await; - u32::from(actual_value) - }; - Some(boxed_sync(closure)) - }), - in_progress_fut: None, - }; - assert!(stream.next().await == Some(0)); - assert!(stream.next().await == Some(1)); - assert!(stream.next().await == Some(2)); - assert!(stream.next().await == Some(3)); - } -} diff --git a/task/src/task.rs b/task/src/task.rs index 8435ff0fcf..e87f3465d5 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -1,637 +1,454 @@ -use std::{ - fmt::{Debug, Formatter}, - ops::Deref, - pin::Pin, - task::{Context, Poll}, -}; - -use async_compatibility_layer::art::async_yield_now; -use either::Either::{self, Left, Right}; -use futures::{future::BoxFuture, stream::Fuse, Future, FutureExt, Stream, StreamExt}; -use pin_project::pin_project; use std::sync::Arc; - -use crate::{ - event_stream::{EventStream, SendableStream, StreamId}, - global_registry::{GlobalRegistry, HotShotTaskId, ShutdownFn}, - task_impls::TaskBuilder, - task_state::{TaskState, TaskStatus}, +use std::time::Duration; + +use async_broadcast::{Receiver, SendError, Sender}; +use async_compatibility_layer::art::async_timeout; +#[cfg(async_executor_impl = "async-std")] +use async_std::{ + sync::RwLock, + task::{spawn, JoinHandle}, }; +use futures::{future::select_all, Future}; -/// restrictions on types we wish to pass around. -/// Includes messages and events -pub trait PassType: Clone + Debug + Sync + Send + 'static {} +#[cfg(async_executor_impl = "async-std")] +use futures::future::join_all; -/// the task state -pub trait TS: Sync + Send + 'static {} +#[cfg(async_executor_impl = "tokio")] +use futures::future::try_join_all; -/// a task error that has nice qualities -#[allow(clippy::module_name_repetitions)] -pub trait TaskErr: std::error::Error + Sync + Send + 'static {} - -impl TaskErr for T {} +#[cfg(async_executor_impl = "tokio")] +use tokio::{ + sync::RwLock, + task::{spawn, JoinHandle}, +}; +use tracing::error; -/// group of types needed for a hotshot task -pub trait HotShotTaskTypes: 'static { - /// the event type from the event stream - type Event: PassType; - /// the state of the task - type State: TS; - /// the global event stream - type EventStream: EventStream; - /// the message stream to receive - type Message: PassType; - /// the steam of messages from other tasks - type MessageStream: SendableStream; - /// the error to return - type Error: TaskErr + 'static + ?Sized; +use crate::{ + dependency::Dependency, + dependency_task::{DependencyTask, HandleDepOutput}, +}; - /// build a task - /// NOTE: done here and not on `TaskBuilder` because - /// we want specific checks done on each variant - /// NOTE: all generics implement `Sized`, but this bound is - /// NOT applied to `Self` unless we specify - fn build(builder: TaskBuilder) -> HST +/// Type for mutable task state that can be used as the state for a `Task` +pub trait TaskState: Send { + /// Type of event sent and received by the task + type Event: Clone + Send + Sync + 'static; + /// The result returned when this task compeltes + type Output: Send; + /// Handle event and update state. Return true if the task is finished + /// false otherwise. The handler can access the state through `Task::state_mut` + fn handle_event( + event: Self::Event, + task: &mut Task, + ) -> impl Future> + Send where Self: Sized; -} -/// hot shot task -#[pin_project(project = ProjectedHST)] -#[allow(clippy::type_complexity)] -pub struct HST { - /// Optional ID of the stream. - pub(crate) stream_id: Option, - /// the eventual return value, post-cleanup - r_val: Option, - /// if we have a future for tracking shutdown progress - in_progress_shutdown_fut: Option>, - /// the in progress future - in_progress_fut: Option, HSTT::State)>>, - /// name of task - name: String, - /// state of the task - /// TODO make this boxed. We don't want to assume this is a small future. - /// since it concievably may be stored on the stack - #[pin] - status: TaskState, - /// functions performing cleanup - /// one should shut down the task - /// if we're tracking with a global registry - /// the other should unsubscribe from the stream - shutdown_fns: Vec, - /// shared stream - event_stream: MaybePinnedEventStream, - /// stream of messages - message_stream: Option>>>, - /// state - state: Option, - /// handler for events - handle_event: Option>, - /// handler for messages - handle_message: Option>, - /// task id - pub(crate) tid: Option, + /// Return true if the event should be filtered + fn filter(&self, _event: &Self::Event) -> bool { + // default doesn't filter + false + } + /// Do something with the result of the task before it shuts down + fn handle_result(&self, _res: &Self::Output) -> impl std::future::Future + Send { + async {} + } + /// Return true if the event should shut the task down + fn should_shutdown(event: &Self::Event) -> bool; + /// Handle anything before the task is completely shutdown + fn shutdown(&mut self) -> impl std::future::Future + Send { + async {} + } } -/// an option of a pinned boxed fused event stream -pub type MaybePinnedEventStream = - Option::EventStream as EventStream>::StreamType>>>>; - -/// ADT for wrapping all possible handler types -#[allow(dead_code)] -pub(crate) enum HotShotTaskHandler { - /// handle an event - HandleEvent(HandleEvent), - /// handle a message - HandleMessage(HandleMessage), - /// filter an event - FilterEvent(FilterEvent), - /// deregister with the registry - Shutdown(ShutdownFn), +/// Task state for a test. Similar to `TaskState` but it handles +/// messages as well as events. Messages are events that are +/// external to this task. (i.e. a test message would be an event from non test task) +/// This is used as state for `TestTask` and messages can come from many +/// different input streams. +pub trait TestTaskState: Send { + /// Message type handled by the task + type Message: Clone + Send + Sync + 'static; + /// Result returned by the test task on completion + type Output: Send; + /// The state type + type State: TaskState; + /// Handle and incoming message and return `Some` if the task is finished + fn handle_message( + message: Self::Message, + id: usize, + task: &mut TestTask, + ) -> impl Future> + Send + where + Self: Sized; } -/// Type wrapper for handling an event -#[allow(clippy::type_complexity)] -pub struct HandleEvent( - pub Arc< - dyn Fn( - HSTT::Event, - HSTT::State, - ) -> BoxFuture<'static, (Option, HSTT::State)> - + Sync - + Send, - >, -); - -impl Default for HandleEvent { - fn default() -> Self { - Self(Arc::new(|_event, state| { - async move { (None, state) }.boxed() - })) - } +/// A basic task which loops waiting for events to come from `event_receiver` +/// and then handles them using it's state +/// It sends events to other `Task`s through `event_sender` +/// This should be used as the primary building block for long running +/// or medium running tasks (i.e. anything that can't be described as a dependency task) +pub struct Task { + /// Sends events all tasks including itself + event_sender: Sender, + /// Receives events that are broadcast from any task, including itself + event_receiver: Receiver, + /// Contains this task, used to register any spawned tasks + registry: Arc, + /// The state of the task. It is fed events from `event_sender` + /// and mutates it state ocordingly. Also it signals the task + /// if it is complete/should shutdown + state: S, } -impl Deref for HandleEvent { - type Target = dyn Fn( - HSTT::Event, - HSTT::State, - ) -> BoxFuture<'static, (Option, HSTT::State)>; - - fn deref(&self) -> &Self::Target { - &*self.0 +impl Task { + /// Create a new task + pub fn new( + tx: Sender, + rx: Receiver, + registry: Arc, + state: S, + ) -> Self { + Task { + event_sender: tx, + event_receiver: rx, + registry, + state, + } } -} - -/// Type wrapper for handling a message -#[allow(clippy::type_complexity)] -pub struct HandleMessage( - pub Arc< - dyn Fn( - HSTT::Message, - HSTT::State, - ) -> BoxFuture<'static, (Option, HSTT::State)> - + Sync - + Send, - >, -); -impl Deref for HandleMessage { - type Target = dyn Fn( - HSTT::Message, - HSTT::State, - ) -> BoxFuture<'static, (Option, HSTT::State)>; - - fn deref(&self) -> &Self::Target { - &*self.0 + /// Spawn the task loop, consuming self. Will continue until + /// the task reaches some shutdown condition + pub fn run(mut self) -> JoinHandle<()> { + spawn(async move { + loop { + match self.event_receiver.recv_direct().await { + Ok(event) => { + if S::should_shutdown(&event) { + self.state.shutdown().await; + break; + } + if self.state.filter(&event) { + continue; + } + if let Some(res) = S::handle_event(event, &mut self).await { + self.state.handle_result(&res).await; + self.state.shutdown().await; + break; + } + } + Err(e) => { + tracing::error!("Failed to receiving from event stream Error: {}", e); + } + } + } + }) } -} - -/// Return `true` if the event should be filtered -#[derive(Clone)] -pub struct FilterEvent(pub Arc bool + Send + 'static + Sync>); -impl Default for FilterEvent { - fn default() -> Self { - Self(Arc::new(|_| true)) + /// Create a new event `Receiver` from this Task's receiver. + /// The returned receiver will get all messages not yet seen by this task + pub fn subscribe(&self) -> Receiver { + self.event_receiver.clone() } -} - -impl Deref for FilterEvent { - type Target = dyn Fn(&EVENT) -> bool + Send + 'static + Sync; - - fn deref(&self) -> &Self::Target { - &*self.0 + /// Get a new sender handle for events + pub fn sender(&self) -> &Sender { + &self.event_sender } -} - -impl HST { - /// Do a consistency check on the `HST` construction - pub(crate) fn base_check(&self) { - assert!(!self.shutdown_fns.is_empty(), "No shutdown functions"); - assert!( - self.in_progress_fut.is_none(), - "This future has already been polled" - ); - - assert!(self.state.is_some(), "Didn't register state"); - - assert!(self.tid.is_some(), "Didn't register global registry"); + /// Clone the sender handle + pub fn clone_sender(&self) -> Sender { + self.event_sender.clone() } - - /// perform event sanity checks - pub(crate) fn event_check(&self) { - assert!( - self.shutdown_fns.len() == 2, - "Expected 2 shutdown functions" - ); - assert!(self.event_stream.is_some(), "Didn't register event stream"); - assert!(self.handle_event.is_some(), "Didn't register event handler"); + /// Broadcast a message to all listening tasks + /// # Errors + /// Errors if the broadcast fails + pub async fn send(&self, event: S::Event) -> Result, SendError> { + self.event_sender.broadcast(event).await } - - /// perform message sanity checks - pub(crate) fn message_check(&self) { - assert!( - self.handle_message.is_some(), - "Didn't register message handler" - ); - assert!( - self.message_stream.is_some(), - "Didn't register message stream" - ); + /// Get a mutable reference to this tasks state + pub fn state_mut(&mut self) -> &mut S { + &mut self.state } - - /// register a handler with the task - #[must_use] - pub(crate) fn register_handler(self, handler: HotShotTaskHandler) -> Self { - match handler { - HotShotTaskHandler::HandleEvent(handler) => Self { - handle_event: Some(handler), - ..self - }, - HotShotTaskHandler::HandleMessage(handler) => Self { - handle_message: Some(handler), - ..self - }, - HotShotTaskHandler::FilterEvent(_handler) => unimplemented!(), - HotShotTaskHandler::Shutdown(_handler) => unimplemented!(), - } + /// Spawn a new task adn register it. It will get all events not seend + /// by the task creating it. + pub async fn run_sub_task(&self, state: S) { + let task = Task { + event_sender: self.clone_sender(), + event_receiver: self.subscribe(), + registry: self.registry.clone(), + state, + }; + // Note: await here is only awaiting the task to be added to the + // registry, not for the task to run. + self.registry.run_task(task).await; } +} - /// register an event stream with the task - pub(crate) async fn register_event_stream( - self, - event_stream: HSTT::EventStream, - filter: FilterEvent, - ) -> Self { - let (stream, uid) = event_stream.subscribe(filter).await; - - let mut shutdown_fns = self.shutdown_fns; - { - let event_stream = event_stream.clone(); - shutdown_fns.push(ShutdownFn(Arc::new(move || -> BoxFuture<'static, ()> { - let event_stream = event_stream.clone(); - async move { - event_stream.clone().unsubscribe(uid).await; - } - .boxed() - }))); - } - // TODO perhaps GC the event stream - // (unsunscribe) - Self { - event_stream: Some(Box::pin(stream.fuse())), - shutdown_fns, - stream_id: Some(uid), - ..self - } - } +/// Similar to `Task` but adds functionality for testing. Notably +/// it adds message receivers to collect events from many non-test tasks +pub struct TestTask { + /// Task which handles test events + task: Task, + /// Receivers for outside events + message_receivers: Vec>, +} - /// register a message with the task - #[must_use] - pub(crate) fn register_message_stream(self, stream: HSTT::MessageStream) -> Self { +impl< + S: TaskState + Send + 'static, + T: TestTaskState + Send + Sync + 'static, + > TestTask +{ + /// Create a test task + pub fn new(task: Task, rxs: Vec>) -> Self { Self { - message_stream: Some(Box::pin(stream.fuse())), - ..self + task, + message_receivers: rxs, } } + /// Runs the task, taking events from the the test events and the message receivers. + /// Consumes self and runs until some shutdown condition is met. + /// The join handle will return the result of the task, useful for deciding if the test + /// passed or not. + pub fn run(mut self) -> JoinHandle { + spawn(async move { + loop { + let mut futs = vec![]; + + if let Ok(event) = self.task.event_receiver.try_recv() { + if S::should_shutdown(&event) { + self.task.state.shutdown().await; + tracing::error!("Shutting down test task TODO!"); + todo!(); + } + if !self.state().filter(&event) { + if let Some(res) = S::handle_event(event, &mut self.task).await { + self.task.state.handle_result(&res).await; + self.task.state.shutdown().await; + return res; + } + } + } - /// register state with the task - #[must_use] - pub(crate) fn register_state(self, state: HSTT::State) -> Self { - Self { - state: Some(state), - ..self - } + for rx in &mut self.message_receivers { + futs.push(rx.recv()); + } + // if let Ok((Ok(msg), id, _)) = + match async_timeout(Duration::from_secs(1), select_all(futs)).await { + Ok((Ok(msg), id, _)) => { + if let Some(res) = T::handle_message(msg, id, &mut self).await { + self.task.state.handle_result(&res).await; + self.task.state.shutdown().await; + return res; + } + } + Err(e) => { + error!("Failed to get event from task. Error: {:?}", e); + } + Ok((Err(e), _, _)) => { + error!("A task channel returned an Error: {:?}", e); + } + } + } + }) } - /// register with the registry - pub(crate) async fn register_registry(self, registry: &mut GlobalRegistry) -> Self { - let (shutdown_fn, id) = registry.register(&self.name, self.status.clone()).await; - let mut shutdown_fns = self.shutdown_fns; - shutdown_fns.push(shutdown_fn); - Self { - shutdown_fns, - tid: Some(id), - ..self - } + /// Get a ref to state + pub fn state(&self) -> &S { + &self.task.state } - - /// create a new task - pub(crate) fn new(name: String) -> Self { - Self { - stream_id: None, - r_val: None, - name, - status: TaskState::new(), - event_stream: None, - state: None, - handle_event: None, - handle_message: None, - shutdown_fns: vec![], - message_stream: None, - in_progress_fut: None, - in_progress_shutdown_fut: None, - tid: None, - } + /// Get a mutable ref to state + pub fn state_mut(&mut self) -> &mut S { + self.task.state_mut() } - - /// launch the task - /// NOTE: the only way to get a `HST` is by usage - /// of one of the impls. Those all have checks enabled. - /// So, it should be safe to launch. - pub fn launch(self) -> BoxFuture<'static, HotShotTaskCompleted> { - Box::pin(self) + /// Send an event to other listening test tasks + /// + /// # Panics + /// panics if the event can't be sent (ok to panic in test) + pub async fn send_event(&self, event: S::Event) { + self.task.send(event).await.unwrap(); } } -/// enum describing how the tasks completed -pub enum HotShotTaskCompleted { - /// the task shut down successfully - ShutDown, - /// the task encountered an error - Error(Box), - /// the streams the task was listening for died - StreamsDied, - /// we somehow lost the state - /// this is definitely a bug. - LostState, - /// lost the return value somehow - LostReturnValue, - /// Stream exists but missing handler - MissingHandler, +#[derive(Default)] +/// A collection of tasks which can handle shutdown +pub struct TaskRegistry { + /// Tasks this registry controls + task_handles: RwLock>>, } -impl std::fmt::Debug for HotShotTaskCompleted { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - HotShotTaskCompleted::ShutDown => f.write_str("HotShotTaskCompleted::ShutDown"), - HotShotTaskCompleted::Error(_) => f.write_str("HotShotTaskCompleted::Error"), - HotShotTaskCompleted::StreamsDied => f.write_str("HotShotTaskCompleted::StreamsDied"), - HotShotTaskCompleted::LostState => f.write_str("HotShotTaskCompleted::LostState"), - HotShotTaskCompleted::LostReturnValue => { - f.write_str("HotShotTaskCompleted::LostReturnValue") - } - HotShotTaskCompleted::MissingHandler => { - f.write_str("HotShotTaskCompleted::MissingHandler") - } - } +impl TaskRegistry { + /// Add a task to the registry + pub async fn register(&self, handle: JoinHandle<()>) { + self.task_handles.write().await.push(handle); } -} - -impl PartialEq for HotShotTaskCompleted { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::Error(_l0), Self::Error(_r0)) => false, - _ => core::mem::discriminant(self) == core::mem::discriminant(other), + /// Try to cancel/abort the task this registry has + pub async fn shutdown(&self) { + let mut handles = self.task_handles.write().await; + while let Some(handle) = handles.pop() { + #[cfg(async_executor_impl = "async-std")] + handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + handle.abort(); } } -} - -impl<'pin, HSTT: HotShotTaskTypes> ProjectedHST<'pin, HSTT> { - /// launches the shutdown future - fn launch_shutdown_fut(&mut self, cx: &mut Context<'_>) -> Poll { - let fut = self.create_shutdown_fut(); - self.check_ip_shutdown_fut(fut, cx) + /// Take a task, run it, and register it + pub async fn run_task(&self, task: Task) + where + S: TaskState + Send + 'static, + { + self.register(task.run()).await; } - - /// checks the in progress shutdown future, `fut` - fn check_ip_shutdown_fut( - &mut self, - mut fut: Pin + Send>>, - cx: &mut Context<'_>, - ) -> Poll { - match fut.as_mut().poll(cx) { - Poll::Ready(()) => Poll::Ready( - self.r_val - .take() - .unwrap_or_else(|| HotShotTaskCompleted::LostReturnValue), - ), - Poll::Pending => { - *self.in_progress_shutdown_fut = Some(fut); - Poll::Pending - } - } + /// Create a new `DependencyTask` run it, and register it + pub async fn spawn_dependency_task( + &self, + dep: impl Dependency + Send + 'static, + handle: impl HandleDepOutput, + ) { + let join_handle = DependencyTask { dep, handle }.run(); + self.register(join_handle).await; } - - /// creates the shutdown future and returns it - fn create_shutdown_fut(&mut self) -> Pin + Send>> { - let shutdown_fns = self.shutdown_fns.clone(); - let fut = async move { - for shutdown_fn in shutdown_fns { - shutdown_fn().await; - } - } - .boxed(); - fut + /// Wait for the results of all the tasks registered + /// # Panics + /// Panics if one of the tasks paniced + pub async fn join_all(self) -> Vec<()> { + #[cfg(async_executor_impl = "async-std")] + let ret = join_all(self.task_handles.into_inner()).await; + #[cfg(async_executor_impl = "tokio")] + let ret = try_join_all(self.task_handles.into_inner()).await.unwrap(); + ret } +} - /// check the event stream - /// returns either a poll if there's a future IP - /// or a bool stating whether or not the stream is finished - fn check_event_stream( - &mut self, - cx: &mut Context<'_>, - ) -> Either, bool> { - let event_stream = self.event_stream.take(); - if let Some(mut inner_event_stream) = event_stream { - while let Poll::Ready(maybe_event) = inner_event_stream.as_mut().poll_next(cx) { - if let Some(event) = maybe_event { - if let Some(handle_event) = self.handle_event { - let maybe_state = self.state.take(); - if let Some(state) = maybe_state { - let mut fut = handle_event(event, state); - match fut.as_mut().poll(cx) { - Poll::Ready((result, state)) => { - if let Some(completed) = result { - *self.in_progress_fut = None; - *self.state = Some(state); - *self.r_val = Some(completed); - let result = self.launch_shutdown_fut(cx); - *self.event_stream = Some(inner_event_stream); - return Left(result); - } - // run a yield to tell the executor to go do work on other - // tasks if they are available - // this is necessary otherwise we could end up with one - // task that returns really quickly blocking the executor - // from dealing with other tasks. - let mut fut = async move { - async_yield_now().await; - (None, state) - } - .boxed(); - // if the executor has no extra work to do, - // continue to poll the event stream - if let Poll::Ready((_, state)) = fut.as_mut().poll(cx) { - *self.state = Some(state); - *self.in_progress_fut = None; - // NOTE: don't need to set event stream because - // that will be done on the next iteration - continue; - } - // otherwise, return pending and finish executing the - // yield later - *self.event_stream = Some(inner_event_stream); - *self.in_progress_fut = Some(fut); - return Left(Poll::Pending); - } - Poll::Pending => { - *self.in_progress_fut = Some(fut); - *self.event_stream = Some(inner_event_stream); - return Left(Poll::Pending); - } - } - } - // lost state case - *self.r_val = Some(HotShotTaskCompleted::LostState); - let result = self.launch_shutdown_fut(cx); - *self.event_stream = Some(inner_event_stream); - return Left(result); - } - // no handler case - *self.r_val = Some(HotShotTaskCompleted::MissingHandler); - let result = self.launch_shutdown_fut(cx); - *self.event_stream = Some(inner_event_stream); - return Left(result); - } - // this is a fused future so `None` will come every time after the stream - // finishes - *self.event_stream = Some(inner_event_stream); - return Right(true); - } - *self.event_stream = Some(inner_event_stream); - return Right(false); - } - // stream doesn't exist so trivially true - *self.event_stream = event_stream; - Right(true) +#[cfg(test)] +mod tests { + use super::*; + use async_broadcast::broadcast; + #[cfg(async_executor_impl = "async-std")] + use async_std::task::sleep; + use std::{collections::HashSet, time::Duration}; + #[cfg(async_executor_impl = "tokio")] + use tokio::time::sleep; + + #[derive(Default)] + pub struct DummyHandle { + val: usize, + seen: HashSet, } - /// check the message stream - /// returns either a poll if there's a future IP - /// or a bool stating whether or not the stream is finished - fn check_message_stream( - &mut self, - cx: &mut Context<'_>, - ) -> Either, bool> { - let message_stream = self.message_stream.take(); - if let Some(mut inner_message_stream) = message_stream { - while let Poll::Ready(maybe_msg) = inner_message_stream.as_mut().poll_next(cx) { - if let Some(msg) = maybe_msg { - if let Some(handle_msg) = self.handle_message { - let maybe_state = self.state.take(); - if let Some(state) = maybe_state { - let mut fut = handle_msg(msg, state); - match fut.as_mut().poll(cx) { - Poll::Ready((result, state)) => { - if let Some(completed) = result { - *self.in_progress_fut = None; - *self.state = Some(state); - *self.r_val = Some(completed); - let result = self.launch_shutdown_fut(cx); - *self.message_stream = Some(inner_message_stream); - return Left(result); - } - // run a yield to tell the executor to go do work on other - // tasks if they are available - // this is necessary otherwise we could end up with one - // task that returns really quickly blocking the executor - // from dealing with other tasks. - let mut fut = async move { - async_yield_now().await; - (None, state) - } - .boxed(); - // if the executor has no extra work to do, - // continue to poll the event stream - if let Poll::Ready((_, state)) = fut.as_mut().poll(cx) { - *self.state = Some(state); - *self.in_progress_fut = None; - // NOTE: don't need to set event stream because - // that will be done on the next iteration - continue; - } - // otherwise, return pending and finish executing the - // yield later - *self.message_stream = Some(inner_message_stream); - *self.in_progress_fut = Some(fut); - return Left(Poll::Pending); - } - Poll::Pending => { - *self.in_progress_fut = Some(fut); - *self.message_stream = Some(inner_message_stream); - return Left(Poll::Pending); - } - } - } - // lost state case - *self.r_val = Some(HotShotTaskCompleted::LostState); - let result = self.launch_shutdown_fut(cx); - *self.message_stream = Some(inner_message_stream); - return Left(result); - } - // no handler case - *self.r_val = Some(HotShotTaskCompleted::MissingHandler); - let result = self.launch_shutdown_fut(cx); - *self.message_stream = Some(inner_message_stream); - return Left(result); - } - // this is a fused future so `None` will come every time after the stream - // finishes - *self.message_stream = Some(inner_message_stream); - return Right(true); + #[allow(clippy::panic)] + impl TaskState for DummyHandle { + type Event = usize; + type Output = (); + async fn handle_event(event: usize, task: &mut Task) -> Option<()> { + sleep(Duration::from_millis(10)).await; + let state = task.state_mut(); + state.seen.insert(event); + if event > state.val { + state.val = event; + assert!( + state.val < 100, + "Test should shutdown before getting an event for 100" + ); + task.send(event + 1).await.unwrap(); } - *self.message_stream = Some(inner_message_stream); - return Right(false); + None } - // stream doesn't exist so trivially true - *self.message_stream = message_stream; - Right(true) - } -} - -// NOTE: this is a Future, but it could easily be a stream. -// but these are semantically equivalent because instead of -// returning when paused, we just return `Poll::Pending` -impl Future for HST { - type Output = HotShotTaskCompleted; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut projected = self.as_mut().project(); - - if let Some(fut) = projected.in_progress_shutdown_fut.take() { - return projected.check_ip_shutdown_fut(fut, cx); + fn should_shutdown(event: &usize) -> bool { + *event >= 98 } - - // check if task is complete - if let Some(state_change) = projected.status.as_mut().try_next() { - match state_change { - TaskStatus::NotStarted | TaskStatus::Paused => { - return Poll::Pending; - } - TaskStatus::Running => {} - TaskStatus::Completed => { - *projected.r_val = Some(HotShotTaskCompleted::ShutDown); - return projected.launch_shutdown_fut(cx); - } + async fn shutdown(&mut self) { + for i in 1..98 { + assert!(self.seen.contains(&i)); } } + } - // check if there's an in progress future - if let Some(in_progress_fut) = projected.in_progress_fut { - match in_progress_fut.as_mut().poll(cx) { - Poll::Ready((result, state)) => { - *projected.in_progress_fut = None; - *projected.state = Some(state); - // if the future errored out, return it, we're done - if let Some(completed) = result { - *projected.r_val = Some(completed); - return projected.launch_shutdown_fut(cx); - } - } - Poll::Pending => { - return Poll::Pending; - } + impl TestTaskState for DummyHandle { + type Message = String; + type Output = (); + type State = Self; + + async fn handle_message( + message: Self::Message, + _: usize, + _: &mut TestTask, + ) -> Option<()> { + if message == *"done".to_string() { + return Some(()); } + None } - - let event_stream_finished = match projected.check_event_stream(cx) { - Left(result) => return result, - Right(finished) => finished, + } + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[allow(unused_must_use)] + async fn it_works() { + let reg = Arc::new(TaskRegistry::default()); + let (tx, rx) = broadcast(10); + let task1 = Task:: { + event_sender: tx.clone(), + event_receiver: rx.clone(), + registry: reg.clone(), + state: DummyHandle::default(), + }; + tx.broadcast(1).await.unwrap(); + let task2 = Task:: { + event_sender: tx.clone(), + event_receiver: rx, + registry: reg, + state: DummyHandle::default(), }; + let handle = task2.run(); + let _res = task1.run().await; + handle.await; + } - let message_stream_finished = match projected.check_message_stream(cx) { - Left(result) => return result, - Right(finished) => finished, + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 10) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[allow(clippy::should_panic_without_expect)] + #[should_panic] + async fn test_works() { + let reg = Arc::new(TaskRegistry::default()); + let (tx, rx) = broadcast(10); + let (msg_tx, msg_rx) = broadcast(10); + let task1 = Task:: { + event_sender: tx.clone(), + event_receiver: rx.clone(), + registry: reg.clone(), + state: DummyHandle::default(), + }; + tx.broadcast(1).await.unwrap(); + let task2 = Task:: { + event_sender: tx.clone(), + event_receiver: rx, + registry: reg, + state: DummyHandle::default(), + }; + let test1 = TestTask::<_, DummyHandle> { + task: task1, + message_receivers: vec![msg_rx.clone()], + }; + let test2 = TestTask::<_, DummyHandle> { + task: task2, + message_receivers: vec![msg_rx.clone()], }; - if message_stream_finished && event_stream_finished { - tracing::error!("Message and event stream both finished!"); - *projected.r_val = Some(HotShotTaskCompleted::StreamsDied); - let result = projected.launch_shutdown_fut(cx); - return result; + let handle = test1.run(); + let handle2 = test2.run(); + sleep(Duration::from_millis(30)).await; + msg_tx.broadcast("done".into()).await.unwrap(); + #[cfg(async_executor_impl = "tokio")] + { + handle.await.unwrap(); + handle2.await.unwrap(); + } + #[cfg(async_executor_impl = "async-std")] + { + handle.await; + handle2.await; } - - Poll::Pending } } diff --git a/task/src/task_impls.rs b/task/src/task_impls.rs deleted file mode 100644 index 768e011775..0000000000 --- a/task/src/task_impls.rs +++ /dev/null @@ -1,457 +0,0 @@ -use futures::Stream; -use std::marker::PhantomData; - -use crate::{ - event_stream::{DummyStream, EventStream, SendableStream, StreamId}, - global_registry::{GlobalRegistry, HotShotTaskId}, - task::{ - FilterEvent, HandleEvent, HandleMessage, HotShotTaskHandler, HotShotTaskTypes, PassType, - TaskErr, HST, TS, - }, -}; - -/// trait to specify features -pub trait ImplMessageStream {} - -/// trait to specify features -pub trait ImplEventStream {} - -/// builder for task -pub struct TaskBuilder(HST); - -impl TaskBuilder { - /// register an event handler - #[must_use] - pub fn register_event_handler(self, handler: HandleEvent) -> Self - where - HSTT: ImplEventStream, - { - Self( - self.0 - .register_handler(HotShotTaskHandler::HandleEvent(handler)), - ) - } - - /// obtains stream id if it exists - pub fn get_stream_id(&self) -> Option { - self.0.stream_id - } - - /// register a message handler - #[must_use] - pub fn register_message_handler(self, handler: HandleMessage) -> Self - where - HSTT: ImplMessageStream, - { - Self( - self.0 - .register_handler(HotShotTaskHandler::HandleMessage(handler)), - ) - } - - /// register a message stream - #[must_use] - pub fn register_message_stream(self, stream: HSTT::MessageStream) -> Self - where - HSTT: ImplMessageStream, - { - Self(self.0.register_message_stream(stream)) - } - - /// register an event stream - pub async fn register_event_stream( - self, - stream: HSTT::EventStream, - filter: FilterEvent, - ) -> Self - where - HSTT: ImplEventStream, - { - Self(self.0.register_event_stream(stream, filter).await) - } - - /// register the state - #[must_use] - pub fn register_state(self, state: HSTT::State) -> Self { - Self(self.0.register_state(state)) - } - - /// register with the global registry - pub async fn register_registry(self, registry: &mut GlobalRegistry) -> Self { - Self(self.0.register_registry(registry).await) - } - - /// get the task id in the global registry - pub fn get_task_id(&self) -> Option { - self.0.tid - } - - /// create a new task builder - #[must_use] - pub fn new(name: String) -> Self { - Self(HST::new(name)) - } -} - -/// a hotshot task with an event stream -pub struct HSTWithEvent< - ERR: std::error::Error, - EVENT: PassType, - ESTREAM: EventStream, - STATE: TS, -> { - /// phantom data - _pd: PhantomData<(ERR, EVENT, ESTREAM, STATE)>, -} - -impl< - ERR: std::error::Error, - EVENT: PassType, - ESTREAM: EventStream, - STATE: TS, - > ImplEventStream for HSTWithEvent -{ -} - -impl, STATE: TS> - ImplMessageStream for HSTWithMessage -{ -} - -impl, STATE: TS> - HotShotTaskTypes for HSTWithEvent -{ - type Event = EVENT; - type State = STATE; - type EventStream = ESTREAM; - type Message = (); - type MessageStream = DummyStream; - type Error = ERR; - - fn build(builder: TaskBuilder) -> HST - where - Self: Sized, - { - builder.0.base_check(); - builder.0.event_check(); - builder.0 - } -} - -/// a hotshot task with a message -pub struct HSTWithMessage< - ERR: std::error::Error, - MSG: PassType, - MSTREAM: Stream, - STATE: TS, -> { - /// phantom data - _pd: PhantomData<(ERR, MSG, MSTREAM, STATE)>, -} - -impl, STATE: TS> HotShotTaskTypes - for HSTWithMessage -{ - type Event = (); - type State = STATE; - type EventStream = DummyStream; - type Message = MSG; - type MessageStream = MSTREAM; - type Error = ERR; - - fn build(builder: TaskBuilder) -> HST - where - Self: Sized, - { - builder.0.base_check(); - builder.0.message_check(); - builder.0 - } -} - -/// hotshot task with even and message -pub struct HSTWithEventAndMessage< - ERR: std::error::Error, - EVENT: PassType, - ESTREAM: EventStream, - MSG: PassType, - MSTREAM: Stream, - STATE: TS, -> { - /// phantom data - _pd: PhantomData<(ERR, EVENT, ESTREAM, MSG, MSTREAM, STATE)>, -} - -impl< - ERR: std::error::Error, - EVENT: PassType, - ESTREAM: EventStream, - MSG: PassType, - MSTREAM: Stream, - STATE: TS, - > ImplEventStream for HSTWithEventAndMessage -{ -} - -impl< - ERR: std::error::Error, - EVENT: PassType, - ESTREAM: EventStream, - MSG: PassType, - MSTREAM: Stream, - STATE: TS, - > ImplMessageStream for HSTWithEventAndMessage -{ -} - -impl< - ERR: TaskErr, - EVENT: PassType, - ESTREAM: EventStream, - MSG: PassType, - MSTREAM: SendableStream, - STATE: TS, - > HotShotTaskTypes for HSTWithEventAndMessage -{ - type Event = EVENT; - type State = STATE; - type EventStream = ESTREAM; - type Message = MSG; - type MessageStream = MSTREAM; - type Error = ERR; - - fn build(builder: TaskBuilder) -> HST - where - Self: Sized, - { - builder.0.base_check(); - builder.0.message_check(); - builder.0.event_check(); - builder.0 - } -} - -#[cfg(test)] -pub mod test { - use async_compatibility_layer::channel::{unbounded, UnboundedStream}; - use snafu::Snafu; - - use crate::{event_stream, event_stream::ChannelStream, task::TS}; - - use super::{HSTWithEvent, HSTWithEventAndMessage, HSTWithMessage}; - use crate::{event_stream::EventStream, task::HotShotTaskTypes, task_impls::TaskBuilder}; - use async_compatibility_layer::art::async_spawn; - use futures::FutureExt; - use std::sync::Arc; - - use crate::{ - global_registry::GlobalRegistry, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted}, - }; - use async_compatibility_layer::logging::setup_logging; - - #[derive(Snafu, Debug)] - pub struct Error {} - - #[derive(Clone, Debug, Eq, PartialEq, Hash)] - pub struct State {} - - #[derive(Clone, Debug, Eq, PartialEq, Hash, Default)] - pub struct CounterState { - num_events_recved: u64, - } - - #[derive(Clone, Debug, Eq, PartialEq, Hash)] - pub enum Event { - Finished, - Dummy, - } - - impl TS for State {} - - impl TS for CounterState {} - - #[derive(Clone, Debug, PartialEq, Eq, Hash)] - pub enum Message { - Finished, - Dummy, - } - - // TODO fill in generics for stream - - pub type AppliedHSTWithEvent = HSTWithEvent, State>; - pub type AppliedHSTWithEventCounterState = - HSTWithEvent, CounterState>; - pub type AppliedHSTWithMessage = - HSTWithMessage, State>; - pub type AppliedHSTWithEventMessage = HSTWithEventAndMessage< - Error, - Event, - ChannelStream, - Message, - UnboundedStream, - State, - >; - - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[allow(clippy::should_panic_without_expect)] - #[should_panic] - async fn test_init_with_event_stream() { - setup_logging(); - let task = TaskBuilder::::new("Test Task".to_string()); - AppliedHSTWithEvent::build(task).launch().await; - } - - // TODO this should be moved to async-compatibility-layer - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_channel_stream() { - use futures::StreamExt; - let (s, r) = unbounded(); - let mut stream: UnboundedStream = r.into_stream(); - s.send(Message::Dummy).await.unwrap(); - s.send(Message::Finished).await.unwrap(); - assert!(stream.next().await.unwrap() == Message::Dummy); - assert!(stream.next().await.unwrap() == Message::Finished); - } - - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_task_with_event_stream() { - setup_logging(); - let event_stream: event_stream::ChannelStream = event_stream::ChannelStream::new(); - let mut registry = GlobalRegistry::new(); - - let mut task_runner = crate::task_launcher::TaskRunner::default(); - - for i in 0..10000 { - let state = CounterState::default(); - let event_handler = HandleEvent(Arc::new(move |event, mut state: CounterState| { - async move { - if let Event::Dummy = event { - state.num_events_recved += 1; - } - - if state.num_events_recved == 100 { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - (None, state) - } - } - .boxed() - })); - let name = format!("Test Task {i:?}").to_string(); - let built_task = TaskBuilder::::new(name.clone()) - .register_event_stream(event_stream.clone(), FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_state(state) - .register_event_handler(event_handler); - let id = built_task.get_task_id().unwrap(); - let result = AppliedHSTWithEventCounterState::build(built_task).launch(); - task_runner = task_runner.add_task(id, name, result); - } - - async_spawn(async move { - for _ in 0..100 { - event_stream.publish(Event::Dummy).await; - } - }); - - let results = task_runner.launch().await; - for result in results { - assert!(result.1 == HotShotTaskCompleted::ShutDown); - } - } - - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_task_with_event_stream_xtreme() { - setup_logging(); - let event_stream: event_stream::ChannelStream = event_stream::ChannelStream::new(); - - let state = State {}; - - let mut registry = GlobalRegistry::new(); - - let event_handler = HandleEvent(Arc::new(move |event, state| { - async move { - if let Event::Finished = event { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - (None, state) - } - } - .boxed() - })); - - let built_task = TaskBuilder::::new("Test Task".to_string()) - .register_event_stream(event_stream.clone(), FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_state(state) - .register_event_handler(event_handler); - event_stream.publish(Event::Dummy).await; - event_stream.publish(Event::Dummy).await; - event_stream.publish(Event::Finished).await; - AppliedHSTWithEvent::build(built_task).launch().await; - } - - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_task_with_message_stream() { - setup_logging(); - let state = State {}; - - let mut registry = GlobalRegistry::new(); - - let (s, r) = async_compatibility_layer::channel::unbounded(); - - let message_handler = HandleMessage(Arc::new(move |message, state| { - async move { - if let Message::Finished = message { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - (None, state) - } - } - .boxed() - })); - - let built_task = TaskBuilder::::new("Test Task".to_string()) - .register_message_handler(message_handler) - .register_message_stream(r.into_stream()) - .register_registry(&mut registry) - .await - .register_state(state); - async_spawn(async move { - s.send(Message::Dummy).await.unwrap(); - s.send(Message::Finished).await.unwrap(); - }); - let result = AppliedHSTWithMessage::build(built_task).launch().await; - assert!(result == HotShotTaskCompleted::ShutDown); - } -} diff --git a/task/src/task_launcher.rs b/task/src/task_launcher.rs deleted file mode 100644 index deff065af2..0000000000 --- a/task/src/task_launcher.rs +++ /dev/null @@ -1,68 +0,0 @@ -use futures::future::{join_all, BoxFuture}; - -use crate::{ - global_registry::{GlobalRegistry, HotShotTaskId}, - task::HotShotTaskCompleted, -}; - -// TODO use genericarray + typenum to make this use the number of tasks as a parameter -/// runner for tasks -/// `N` specifies the number of tasks to ensure that the user -/// doesn't forget how many tasks they wished to add. -pub struct TaskRunner -// < -// const N: usize, -// > -{ - /// internal set of tasks to launch - tasks: Vec<( - HotShotTaskId, - String, - BoxFuture<'static, HotShotTaskCompleted>, - )>, - /// global registry - pub registry: GlobalRegistry, -} - -impl Default for TaskRunner { - fn default() -> Self { - Self::new() - } -} - -impl TaskRunner /* */ { - /// create new runner - #[must_use] - pub fn new() -> Self { - Self { - tasks: Vec::new(), - registry: GlobalRegistry::new(), - } - } - - // `name` is for logging purposes only and may be duplicated or inconsistent. - /// to support builder pattern - #[must_use] - pub fn add_task( - mut self, - id: HotShotTaskId, - name: String, - task: BoxFuture<'static, HotShotTaskCompleted>, - ) -> TaskRunner { - self.tasks.push((id, name, task)); - self - } - - /// returns a `Vec` because type isn't known - #[must_use] - pub async fn launch(self) -> Vec<(String, HotShotTaskCompleted)> { - let names = self - .tasks - .iter() - .map(|(_id, name, _)| name.clone()) - .collect::>(); - let result = join_all(self.tasks.into_iter().map(|(_, _, task)| task)).await; - - names.into_iter().zip(result).collect::>() - } -} diff --git a/task/src/task_state.rs b/task/src/task_state.rs deleted file mode 100644 index 01758965a1..0000000000 --- a/task/src/task_state.rs +++ /dev/null @@ -1,182 +0,0 @@ -use atomic_enum::atomic_enum; -use serde::{Deserialize, Serialize}; -use std::{ - fmt::Debug, - sync::{atomic::Ordering, Arc}, -}; - -/// Nit: wish this was for u8 but sadly no -/// Represents the status of a hotshot task -#[atomic_enum] -#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub enum TaskStatus { - /// the task hasn't started running - NotStarted = 0, - /// the task is running - Running = 1, - /// NOTE: not useful generally, but VERY useful for byzantine nodes - /// and testing malfunctions - /// we'll have a granular way to, from the registry, stop a task momentarily - /// and inspect/modify its state - Paused = 2, - /// the task completed - Completed = 3, -} - -/// The state of a task -/// `AtomicTaskStatus` + book keeping to notify btwn tasks -#[derive(Clone)] -pub struct TaskState { - /// previous status - prev: Arc, - /// next status - next: Arc, - // using `std::sync::mutex` here because it's faster than async's version - // wakers: Arc>>, -} - -impl Debug for TaskState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("TaskState") - .field("status", &self.get_status()) - .finish() - } -} -impl Default for TaskState { - fn default() -> Self { - Self::new() - } -} - -impl TaskState { - /// create a new state - #[must_use] - pub fn new() -> Self { - Self { - prev: Arc::new(TaskStatus::NotStarted.into()), - next: Arc::new(TaskStatus::NotStarted.into()), - // wakers: Arc::default(), - } - } - - /// create a task state from a task status - #[must_use] - pub fn from_status(state: Arc) -> Self { - let prev_state = AtomicTaskStatus::new(state.load(Ordering::SeqCst)); - Self { - prev: Arc::new(prev_state), - next: state, - // wakers: Arc::default(), - } - } - - /// sets the state - /// # Panics - /// should never panic unless internally a lock poison happens - /// this should NOT be possible - pub fn set_state(&self, state: TaskStatus) { - self.next.swap(state, Ordering::SeqCst); - // no panics, so can never be poisoned. - // let mut wakers = self.wakers.lock().unwrap(); - - // drain the wakers - // for waker in wakers.drain(..) { - // waker.wake(); - // } - } - /// gets a possibly stale version of the state - #[must_use] - pub fn get_status(&self) -> TaskStatus { - self.next.load(Ordering::SeqCst) - } -} - -// GNARLY bug @jbearer found -// cx gets *really* large in some cases -// impl Stream for TaskState { -// type Item = TaskStatus; -// -// #[unstable] -// fn poll_next( -// self: std::pin::Pin<&mut Self>, -// cx: &mut std::task::Context<'_>, -// ) -> std::task::Poll> { -// let next = self.next.load(Ordering::SeqCst); -// let prev = self.prev.swap(next, Ordering::SeqCst); -// // a new value has been set -// if prev == next { -// // no panics, so impossible to be poisoned -// self.wakers.lock().unwrap().push(cx.waker().clone()); -// -// // no value has been set, poll again later -// std::task::Poll::Pending -// } else { -// std::task::Poll::Ready(Some(next)) -// } -// } -// } - -impl TaskState { - /// Try to get the next task status. - #[must_use] - pub fn try_next(self: std::pin::Pin<&mut Self>) -> Option { - let next = self.next.load(Ordering::SeqCst); - let prev = self.prev.swap(next, Ordering::SeqCst); - // a new value has been set - if prev == next { - None - } else { - // drain the wakers to wake up the stream. - // we did change value - // let mut wakers = self.wakers.lock().unwrap(); - // for waker in wakers.drain(..) { - // waker.wake(); - // } - Some(next) - } - } -} - -#[cfg(test)] -pub mod test { - - // #[cfg(test)] - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // async fn test_state_stream() { - // setup_logging(); - // - // let mut task = crate::task_state::TaskState::new(); - // - // let task_dup = task.clone(); - // - // async_spawn(async move { - // async_sleep(std::time::Duration::from_secs(1)).await; - // task_dup.set_state(crate::task_state::TaskStatus::Running); - // async_sleep(std::time::Duration::from_secs(1)).await; - // task_dup.set_state(crate::task_state::TaskStatus::Paused); - // async_sleep(std::time::Duration::from_secs(1)).await; - // task_dup.set_state(crate::task_state::TaskStatus::Completed); - // }); - // - // // spawn new task that sleeps then increments - // - // assert_eq!( - // task.try_next().unwrap(), - // crate::task_state::TaskStatus::Running - // ); - // assert_eq!( - // task.next().unwrap(), - // crate::task_state::TaskStatus::Paused - // ); - // assert_eq!( - // task.next().unwrap(), - // crate::task_state::TaskStatus::Completed - // ); - // } - // TODO test global registry using either global + lazy_static - // or passing around global registry -} diff --git a/testing/Cargo.toml b/testing/Cargo.toml index f202f94117..ceb54a410f 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -11,6 +11,7 @@ default = [] slow-tests = [] [dependencies] +async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } sha3 = "^0.10" bincode = { workspace = true } @@ -24,7 +25,6 @@ hotshot-constants = { path = "../constants" } hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } rand = { workspace = true } snafu = { workspace = true } @@ -34,11 +34,10 @@ sha2 = { workspace = true } async-lock = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } +hotshot-task = { path = "../task" } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } -[lints] -workspace = true diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index e5367cb8bd..94efb83b3c 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -1,21 +1,19 @@ -use std::{sync::Arc, time::Duration}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +use std::time::Duration; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; -use async_compatibility_layer::art::async_sleep; -use futures::FutureExt; +use async_broadcast::{Receiver, Sender}; +use async_compatibility_layer::art::{async_spawn, async_timeout}; use hotshot::traits::TestableNodeImplementation; -use hotshot_task::{ - boxed_sync, - event_stream::{ChannelStream, EventStream}, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEventAndMessage, TaskBuilder}, - GeneratedStream, -}; +use hotshot_task_impls::helpers::broadcast_event; use hotshot_types::traits::node_implementation::NodeType; use snafu::Snafu; -use crate::test_runner::Node; +use crate::test_runner::{HotShotTaskCompleted, Node}; -use super::{test_launcher::TaskGenerator, GlobalTestEvent}; +use super::GlobalTestEvent; /// the idea here is to run as long as we want @@ -25,24 +23,39 @@ pub struct CompletionTaskErr {} /// Completion task state pub struct CompletionTask> { - /// the test level event stream - pub(crate) test_event_stream: ChannelStream, + pub tx: Sender, + + pub rx: Receiver, /// handles to the nodes in the test pub(crate) handles: Vec>, + /// Duration of the task. + pub duration: Duration, } -impl> TS for CompletionTask {} - -/// Completion task types -pub type CompletionTaskTypes = HSTWithEventAndMessage< - CompletionTaskErr, - GlobalTestEvent, - ChannelStream, - (), - GeneratedStream<()>, - CompletionTask, ->; - +impl> CompletionTask { + pub fn run(mut self) -> JoinHandle { + async_spawn(async move { + if async_timeout(self.duration, self.wait_for_shutdown()) + .await + .is_err() + { + broadcast_event(GlobalTestEvent::ShutDown, &self.tx).await; + } + for node in &self.handles { + node.handle.clone().shut_down().await; + } + HotShotTaskCompleted::ShutDown + }) + } + async fn wait_for_shutdown(&mut self) { + while let Ok(event) = self.rx.recv_direct().await { + if matches!(event, GlobalTestEvent::ShutDown) { + tracing::error!("Completion Task shutting down"); + return; + } + } + } +} /// Description for a time-based completion task. #[derive(Clone, Debug)] pub struct TimeBasedCompletionTaskDescription { @@ -56,81 +69,3 @@ pub enum CompletionTaskDescription { /// Time-based completion task. TimeBasedCompletionTaskBuilder(TimeBasedCompletionTaskDescription), } - -impl CompletionTaskDescription { - /// Build and launch a completion task. - #[must_use] - pub fn build_and_launch>( - self, - ) -> TaskGenerator> { - match self { - CompletionTaskDescription::TimeBasedCompletionTaskBuilder(td) => td.build_and_launch(), - } - } -} - -impl TimeBasedCompletionTaskDescription { - /// create the task and launch it - /// # Panics - /// if cannot obtain task id after launching - #[must_use] - pub fn build_and_launch>( - self, - ) -> TaskGenerator> { - Box::new(move |state, mut registry, test_event_stream| { - async move { - let event_handler = - HandleEvent::>(Arc::new(move |event, state| { - async move { - match event { - GlobalTestEvent::ShutDown => { - for node in &state.handles { - node.handle.clone().shut_down().await; - } - (Some(HotShotTaskCompleted::ShutDown), state) - } - } - } - .boxed() - })); - let message_handler = - HandleMessage::>(Arc::new(move |(), state| { - async move { - state - .test_event_stream - .publish(GlobalTestEvent::ShutDown) - .await; - for node in &state.handles { - node.handle.clone().shut_down().await; - } - (Some(HotShotTaskCompleted::ShutDown), state) - } - .boxed() - })); - // normally I'd say "let's use Interval from async-std!" - // but doing this is easier than unifying async-std with tokio's slightly different - // interval abstraction - let stream_generator = GeneratedStream::new(Arc::new(move || { - let fut = async move { - async_sleep(self.duration).await; - }; - Some(boxed_sync(fut)) - })); - let builder = TaskBuilder::>::new( - "Test Completion Task".to_string(), - ) - .register_event_stream(test_event_stream, FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_state(state) - .register_event_handler(event_handler) - .register_message_handler(message_handler) - .register_message_stream(stream_generator); - let task_id = builder.get_task_id().unwrap(); - (task_id, CompletionTaskTypes::build(builder).launch()) - } - .boxed() - }) - } -} diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 1c1718a918..c1a84af20a 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -6,8 +6,6 @@ deprecated = "suspicious usage of testing/demo implementations in non-test/non-debug build" )] -use hotshot_task::{event_stream::ChannelStream, task_impls::HSTWithEvent}; - /// Helpers for initializing system context handle and building tasks. pub mod task_helpers; @@ -50,15 +48,3 @@ pub enum GlobalTestEvent { /// the test is shutting down ShutDown, } - -/// the reason for shutting down the test -pub enum ShutDownReason { - /// the test is shutting down because of a safety violation - SafetyViolation, - /// the test is shutting down because the test has completed successfully - SuccessfullyCompleted, -} - -/// type alias for the type of tasks created in testing -pub type TestTask = - HSTWithEvent, STATE>; diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index d145a2fd58..693a0d46dd 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -1,15 +1,6 @@ -use async_compatibility_layer::channel::UnboundedStream; -use either::Either; -use futures::FutureExt; use hotshot::{traits::TestableNodeImplementation, HotShotError}; -use hotshot_task::{ - event_stream::ChannelStream, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEventAndMessage, TaskBuilder}, - MergeN, -}; -use hotshot_task::{event_stream::EventStream, Merge}; -use hotshot_task_impls::events::HotShotEvent; + +use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::{ data::{Leaf, VidCommitment}, error::RoundTimedoutState, @@ -24,7 +15,7 @@ use std::{ }; use tracing::error; -use crate::{test_launcher::TaskGenerator, test_runner::Node}; +use crate::test_runner::{HotShotTaskCompleted, Node}; /// convenience type alias for state and block pub type StateAndBlock = (Vec, Vec); @@ -77,11 +68,193 @@ pub struct OverallSafetyTask>, /// ctx pub ctx: RoundCtx, - /// event stream for publishing safety violations - pub test_event_stream: ChannelStream, + /// configure properties + pub properties: OverallSafetyPropertiesDescription, } -impl> TS for OverallSafetyTask {} +impl> TaskState + for OverallSafetyTask +{ + type Event = GlobalTestEvent; + + type Output = HotShotTaskCompleted; + + async fn handle_event(event: Self::Event, task: &mut Task) -> Option { + match event { + GlobalTestEvent::ShutDown => { + tracing::error!("Shutting down SafetyTask"); + let state = task.state_mut(); + let OverallSafetyPropertiesDescription { + check_leaf: _, + check_block: _, + num_failed_views: num_failed_rounds_total, + num_successful_views, + threshold_calculator: _, + transaction_threshold: _, + }: OverallSafetyPropertiesDescription = state.properties.clone(); + + let num_incomplete_views = state.ctx.round_results.len() + - state.ctx.successful_views.len() + - state.ctx.failed_views.len(); + + if state.ctx.successful_views.len() < num_successful_views { + return Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::::NotEnoughDecides { + got: state.ctx.successful_views.len(), + expected: num_successful_views, + }, + ))); + } + + if state.ctx.failed_views.len() + num_incomplete_views >= num_failed_rounds_total { + return Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::::TooManyFailures { + failed_views: state.ctx.failed_views.clone(), + }, + ))); + } + Some(HotShotTaskCompleted::ShutDown) + } + } + } + + fn should_shutdown(_event: &Self::Event) -> bool { + false + } +} + +impl> TestTaskState + for OverallSafetyTask +{ + type Message = Event; + + type Output = HotShotTaskCompleted; + + type State = Self; + + async fn handle_message( + message: Self::Message, + idx: usize, + task: &mut hotshot_task::task::TestTask, + ) -> Option { + let OverallSafetyPropertiesDescription { + check_leaf, + check_block, + num_failed_views, + num_successful_views, + threshold_calculator, + transaction_threshold, + }: OverallSafetyPropertiesDescription = task.state().properties.clone(); + let Event { view_number, event } = message; + let key = match event { + EventType::Error { error } => { + task.state_mut() + .ctx + .insert_error_to_context(view_number, idx, error); + None + } + EventType::Decide { + leaf_chain, + qc, + block_size: maybe_block_size, + } => { + // Skip the genesis leaf. + if leaf_chain.len() == 1 + && leaf_chain[0].get_view_number() == TYPES::Time::genesis() + { + return None; + } + let paired_up = (leaf_chain.to_vec(), (*qc).clone()); + match task.state_mut().ctx.round_results.entry(view_number) { + Entry::Occupied(mut o) => { + o.get_mut() + .insert_into_result(idx, paired_up, maybe_block_size) + } + Entry::Vacant(v) => { + let mut round_result = RoundResult::default(); + let key = round_result.insert_into_result(idx, paired_up, maybe_block_size); + v.insert(round_result); + key + } + } + } + EventType::ReplicaViewTimeout { view_number } => { + let error = Arc::new(HotShotError::::ViewTimeoutError { + view_number, + state: RoundTimedoutState::TestCollectRoundEventsTimedOut, + }); + task.state_mut() + .ctx + .insert_error_to_context(view_number, idx, error); + None + } + _ => return None, + }; + + // update view count + let threshold = + (threshold_calculator)(task.state().handles.len(), task.state().handles.len()); + + let len = task.state().handles.len(); + let view = task + .state_mut() + .ctx + .round_results + .get_mut(&view_number) + .unwrap(); + if let Some(key) = key { + view.update_status( + threshold, + len, + &key, + check_leaf, + check_block, + transaction_threshold, + ); + match view.status.clone() { + ViewStatus::Ok => { + task.state_mut().ctx.successful_views.insert(view_number); + if task.state_mut().ctx.successful_views.len() >= num_successful_views { + task.send_event(GlobalTestEvent::ShutDown).await; + return Some(HotShotTaskCompleted::ShutDown); + } + return None; + } + ViewStatus::Failed => { + task.state_mut().ctx.failed_views.insert(view_number); + if task.state_mut().ctx.failed_views.len() > num_failed_views { + task.send_event(GlobalTestEvent::ShutDown).await; + return Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::::TooManyFailures { + failed_views: task.state_mut().ctx.failed_views.clone(), + }, + ))); + } + return None; + } + ViewStatus::Err(e) => { + return Some(HotShotTaskCompleted::Error(Box::new(e))); + } + ViewStatus::InProgress => { + return None; + } + } + } else if view.check_if_failed(threshold, len) { + view.status = ViewStatus::Failed; + task.state_mut().ctx.failed_views.insert(view_number); + if task.state_mut().ctx.failed_views.len() > num_failed_views { + task.send_event(GlobalTestEvent::ShutDown).await; + return Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::::TooManyFailures { + failed_views: task.state_mut().ctx.failed_views.clone(), + }, + ))); + } + return None; + } + None + } +} /// Result of running a round of consensus #[derive(Debug)] @@ -365,249 +538,3 @@ impl Default for OverallSafetyPropertiesDescription { } } } - -impl OverallSafetyPropertiesDescription { - /// build a task - /// # Panics - /// if an internal variant that the prior views are filled is violated - #[must_use] - #[allow(clippy::too_many_lines)] - pub fn build>( - self, - ) -> TaskGenerator> { - let Self { - check_leaf, - check_block, - num_failed_views: num_failed_rounds_total, - num_successful_views, - threshold_calculator, - transaction_threshold, - }: Self = self; - - Box::new(move |mut state, mut registry, test_event_stream| { - async move { - let event_handler = HandleEvent::>(Arc::new( - move |event, state| { - async move { - match event { - GlobalTestEvent::ShutDown => { - let num_incomplete_views = state.ctx.round_results.len() - - state.ctx.successful_views.len() - - state.ctx.failed_views.len(); - - if state.ctx.successful_views.len() < num_successful_views { - return ( - Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::NotEnoughDecides { - got: state.ctx.successful_views.len(), - expected: num_successful_views, - }, - ))), - state, - ); - } - - if state.ctx.failed_views.len() + num_incomplete_views - >= num_failed_rounds_total - { - return ( - Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::TooManyFailures { - failed_views: state.ctx.failed_views.clone(), - }, - ))), - state, - ); - } - // TODO check if we got enough successful views - (Some(HotShotTaskCompleted::ShutDown), state) - } - } - } - .boxed() - }, - )); - - let message_handler = HandleMessage::>(Arc::new( - move |msg, mut state| { - let threshold_calculator = threshold_calculator.clone(); - async move { - - let (idx, maybe_event ) : (usize, Either<_, _>)= msg; - if let Either::Left(Event { view_number, event }) = maybe_event { - let key = match event { - EventType::Error { error } => { - state.ctx.insert_error_to_context(view_number, idx, error); - None - } - EventType::Decide { - leaf_chain, - qc, - block_size: maybe_block_size, - } => { - // Skip the genesis leaf. - if leaf_chain.len() == 1 && leaf_chain[0].get_view_number() == TYPES::Time::genesis() { - return (None, state); - } - let paired_up = (leaf_chain.to_vec(), (*qc).clone()); - match state.ctx.round_results.entry(view_number) { - Entry::Occupied(mut o) => o.get_mut().insert_into_result( - idx, - paired_up, - maybe_block_size, - ), - Entry::Vacant(v) => { - let mut round_result = RoundResult::default(); - let key = round_result.insert_into_result( - idx, - paired_up, - maybe_block_size, - ); - v.insert(round_result); - key - } - } - } - EventType::ReplicaViewTimeout { view_number } => { - let error = Arc::new(HotShotError::::ViewTimeoutError { - view_number, - state: RoundTimedoutState::TestCollectRoundEventsTimedOut, - }); - state.ctx.insert_error_to_context(view_number, idx, error); - None - } - _ => return (None, state), - }; - - // update view count - let threshold = - (threshold_calculator)(state.handles.len(), state.handles.len()); - - let view = state.ctx.round_results.get_mut(&view_number).unwrap(); - - if let Some(key) = key { - view.update_status( - threshold, - state.handles.len(), - &key, - check_leaf, - check_block, - transaction_threshold, - ); - match view.status.clone() { - ViewStatus::Ok => { - state.ctx.successful_views.insert(view_number); - if state.ctx.successful_views.len() - >= self.num_successful_views - { - state - .test_event_stream - .publish(GlobalTestEvent::ShutDown) - .await; - return (Some(HotShotTaskCompleted::ShutDown), state); - } - return (None, state); - } - ViewStatus::Failed => { - state.ctx.failed_views.insert(view_number); - if state.ctx.failed_views.len() > self.num_failed_views { - state - .test_event_stream - .publish(GlobalTestEvent::ShutDown) - .await; - return ( - Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::TooManyFailures { - failed_views: state.ctx.failed_views.clone(), - }, - ))), - state, - ); - } - return (None, state); - } - ViewStatus::Err(e) => { - return ( - Some(HotShotTaskCompleted::Error(Box::new(e))), - state, - ); - } - ViewStatus::InProgress => { - return (None, state); - } - } - } - else if view.check_if_failed(threshold, state.handles.len()) { - view.status = ViewStatus::Failed; - state.ctx.failed_views.insert(view_number); - if state.ctx.failed_views.len() > self.num_failed_views { - state - .test_event_stream - .publish(GlobalTestEvent::ShutDown) - .await; - return ( - Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::TooManyFailures { - failed_views: state.ctx.failed_views.clone(), - }, - ))), - state, - ); - } - return (None, state); - } - - } - - (None, state) - } - .boxed() - }, - )); - - let mut streams = vec![]; - for handle in &mut state.handles { - let s1 = - handle - .handle - .get_event_stream_known_impl(FilterEvent::default()) - .await - .0; - let s2 = - handle - .handle - .get_internal_event_stream_known_impl(FilterEvent::default()) - .await - .0; - streams.push( - Merge::new(s1, s2) - ); - } - let builder = TaskBuilder::>::new( - "Test Overall Safety Task".to_string(), - ) - .register_event_stream(test_event_stream, FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_message_handler(message_handler) - .register_message_stream(MergeN::new(streams)) - .register_event_handler(event_handler) - .register_state(state); - let task_id = builder.get_task_id().unwrap(); - (task_id, OverallSafetyTaskTypes::build(builder).launch()) - } - .boxed() - }) - } -} - -/// overall types for safety task -pub type OverallSafetyTaskTypes = HSTWithEventAndMessage< - OverallSafetyTaskErr, - GlobalTestEvent, - ChannelStream, - (usize, Either, HotShotEvent>), - MergeN>, UnboundedStream>>>, - OverallSafetyTask, ->; diff --git a/testing/src/per_node_safety_task.rs b/testing/src/per_node_safety_task.rs deleted file mode 100644 index af20f00b79..0000000000 --- a/testing/src/per_node_safety_task.rs +++ /dev/null @@ -1,258 +0,0 @@ -// // TODO rename this file to per-node -// -// use std::{ops::Deref, sync::Arc}; -// -// use async_compatibility_layer::channel::UnboundedStream; -// use either::Either; -// use futures::{ -// future::{BoxFuture, LocalBoxFuture}, -// FutureExt, -// }; -// use hotshot::traits::TestableNodeImplementation; -// use hotshot_task::{ -// event_stream::ChannelStream, -// global_registry::{GlobalRegistry, HotShotTaskId}, -// task::{ -// FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TaskErr, -// HST, TS, -// }, -// task_impls::{HSTWithEvent, HSTWithEventAndMessage, TaskBuilder}, -// }; -// use hotshot_types::{ -// event::{Event, EventType}, -// traits::node_implementation::NodeType, -// }; -// use nll::nll_todo::nll_todo; -// use snafu::Snafu; -// use tracing::log::warn; -// -// use crate::test_errors::ConsensusTestError; -// -// use super::{ -// completion_task::CompletionTask, -// node_ctx::{NodeCtx, ViewFailed, ViewStatus, ViewSuccess}, -// GlobalTestEvent, -// }; -// -// #[derive(Snafu, Debug)] -// pub enum PerNodeSafetyTaskErr { -// // TODO make this more detailed -// TooManyFailures, -// NotEnoughDecides, -// } -// impl TaskErr for PerNodeSafetyTaskErr {} -// -// /// Data availability task state -// /// -// #[derive(Debug)] -// pub struct PerNodeSafetyTask> { -// pub(crate) ctx: NodeCtx, -// } -// -// impl> Default -// for PerNodeSafetyTask -// { -// fn default() -> Self { -// Self { -// ctx: Default::default(), -// } -// } -// } -// -// impl> TS -// for PerNodeSafetyTask -// { -// } -// -// /// builder describing custom safety properties -// #[derive(Clone)] -// pub enum PerNodeSafetyTaskDescription< -// TYPES: NodeType, -// I: TestableNodeImplementation, -// > { -// GenProperties(PerNodeSafetyPropertiesDescription), -// CustomProperties(PerNodeSafetyFinisher), -// } -// -// /// properties used for gen -// #[derive(Clone, Debug)] -// pub struct PerNodeSafetyPropertiesDescription { -// /// number failed views -// pub num_failed_views: Option, -// /// number decide events -// pub num_decide_events: Option, -// } -// -// // basic consistency check for single node -// /// Exists for easier overriding -// /// runs at end of all tasks -// #[derive(Clone)] -// #[allow(clippy::type_complexity)] -// pub struct PerNodeSafetyFinisher< -// TYPES: NodeType, -// I: TestableNodeImplementation, -// >( -// pub Arc< -// dyn for<'a> Fn(&'a mut NodeCtx) -> BoxFuture<'a, Result<(), PerNodeSafetyTaskErr>> -// + Send -// + 'static -// + Sync, -// >, -// ); -// -// impl> Deref -// for PerNodeSafetyFinisher -// { -// type Target = dyn for<'a> Fn(&'a mut NodeCtx) -> BoxFuture<'a, Result<(), PerNodeSafetyTaskErr>> -// + Send -// + 'static -// + Sync; -// -// fn deref(&self) -> &Self::Target { -// &*self.0 -// } -// } -// -// impl> -// PerNodeSafetyTaskDescription -// { -// fn gen_finisher(self) -> PerNodeSafetyFinisher { -// match self { -// PerNodeSafetyTaskDescription::CustomProperties(finisher) => finisher, -// PerNodeSafetyTaskDescription::GenProperties(PerNodeSafetyPropertiesDescription { -// num_failed_views, -// num_decide_events, -// }) => PerNodeSafetyFinisher(Arc::new(move |ctx: &mut NodeCtx| { -// async move { -// let mut num_failed = 0; -// let mut num_decided = 0; -// for (_view_num, view_status) in &ctx.round_results { -// match view_status { -// ViewStatus::InProgress(_) => {} -// ViewStatus::ViewFailed(_) => { -// num_failed += 1; -// } -// ViewStatus::ViewSuccess(_) => { -// num_decided += 1; -// } -// } -// } -// if let Some(num_failed_views) = num_failed_views { -// if num_failed >= num_failed_views { -// return Err(PerNodeSafetyTaskErr::TooManyFailures); -// } -// } -// -// if let Some(num_decide_events) = num_decide_events { -// if num_decided < num_decide_events { -// return Err(PerNodeSafetyTaskErr::NotEnoughDecides); -// } -// } -// Ok(()) -// } -// .boxed() -// })), -// } -// } -// -// /// build -// pub fn build( -// self, -// // registry: &mut GlobalRegistry, -// // test_event_stream: ChannelStream, -// // hotshot_event_stream: UnboundedStream>, -// ) -> TaskGenerator< -// PerNodeSafetyTask -// > { -// Box::new( -// move |state, mut registry, test_event_stream, hotshot_event_stream| { -// // TODO this is cursed, there's definitely a better way to do this -// let desc = self.clone(); -// async move { -// let test_event_handler = HandleEvent::>(Arc::new( -// move |event, mut state| { -// let finisher = desc.clone().gen_finisher(); -// async move { -// match event { -// GlobalTestEvent::ShutDown => { -// let finished = finisher(&mut state.ctx).await; -// let result = match finished { -// Ok(()) => HotShotTaskCompleted::ShutDown, -// Err(err) => HotShotTaskCompleted::Error(Box::new(err)), -// }; -// return (Some(result), state); -// } -// _ => { -// unimplemented!() -// } -// } -// } -// .boxed() -// }, -// )); -// let message_handler = HandleMessage::>(Arc::new( -// move |msg, mut state| { -// async move { -// let Event { view_number, event } = msg; -// match event { -// EventType::Error { error } => { -// // TODO better warn with node idx -// warn!("View {:?} failed for a replica", view_number); -// state.ctx.round_results.insert( -// view_number, -// ViewStatus::ViewFailed(ViewFailed(error)), -// ); -// } -// EventType::Decide { leaf_chain, qc } => { -// state.ctx.round_results.insert( -// view_number, -// ViewStatus::ViewSuccess(ViewSuccess { -// agreed_state: -// -// }), -// ); -// } -// // these aren't failures -// EventType::ReplicaViewTimeout { view_number } -// | EventType::NextLeaderViewTimeout { view_number } -// | EventType::ViewFinished { view_number } => todo!(), -// _ => todo!(), -// } -// (None, state) -// } -// .boxed() -// }, -// )); -// -// let builder = TaskBuilder::>::new( -// "Safety Check Task".to_string(), -// ) -// .register_event_stream(test_event_stream, FilterEvent::default()) -// .await -// .register_registry(&mut registry) -// .await -// .register_state(state) -// .register_event_handler(test_event_handler) -// .register_message_handler(message_handler) -// .register_message_stream(hotshot_event_stream); -// let task_id = builder.get_task_id().unwrap(); -// (task_id, PerNodeSafetyTaskTypes::build(builder).launch()) -// } -// .boxed() -// }, -// ) -// } -// } -// -// // /// Data Availability task types -// pub type PerNodeSafetyTaskTypes< -// TYPES: NodeType, -// I: TestableNodeImplementation, -// > = HSTWithEventAndMessage< -// PerNodeSafetyTaskErr, -// GlobalTestEvent, -// ChannelStream, -// Event, -// UnboundedStream>, -// PerNodeSafetyTask, -// >; diff --git a/testing/src/soundness_task.rs b/testing/src/soundness_task.rs deleted file mode 100644 index 8b13789179..0000000000 --- a/testing/src/soundness_task.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index d3311a8a24..017e1497a0 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -1,28 +1,15 @@ -use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, -}; +use std::collections::HashMap; -use async_compatibility_layer::channel::UnboundedStream; -use futures::FutureExt; use hotshot::traits::TestableNodeImplementation; -use hotshot_task::{ - event_stream::ChannelStream, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEventAndMessage, TaskBuilder}, - MergeN, -}; + +use crate::test_runner::HotShotTaskCompleted; +use crate::test_runner::LateStartNode; +use crate::test_runner::Node; +use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::traits::network::CommunicationChannel; -use hotshot_types::{ - event::Event, - traits::node_implementation::{ConsensusTime, NodeType}, -}; +use hotshot_types::{event::Event, traits::node_implementation::NodeType}; use snafu::Snafu; - -use crate::{ - test_launcher::TaskGenerator, - test_runner::{LateStartNode, Node}, -}; +use std::collections::BTreeMap; /// convience type for state and block pub type StateAndBlock = (Vec, Vec); @@ -44,7 +31,88 @@ pub struct SpinningTask> { pub(crate) latest_view: Option, } -impl> TS for SpinningTask {} +impl> TaskState for SpinningTask { + type Event = GlobalTestEvent; + + type Output = HotShotTaskCompleted; + + async fn handle_event(event: Self::Event, _task: &mut Task) -> Option { + if matches!(event, GlobalTestEvent::ShutDown) { + return Some(HotShotTaskCompleted::ShutDown); + } + None + } + + fn should_shutdown(_event: &Self::Event) -> bool { + false + } +} + +impl> TestTaskState + for SpinningTask +{ + type Message = Event; + + type Output = HotShotTaskCompleted; + + type State = Self; + + async fn handle_message( + message: Self::Message, + _id: usize, + task: &mut hotshot_task::task::TestTask, + ) -> Option { + let Event { + view_number, + event: _, + } = message; + + let state = &mut task.state_mut(); + + // if we have not seen this view before + if state.latest_view.is_none() || view_number > state.latest_view.unwrap() { + // perform operations on the nodes + if let Some(operations) = state.changes.remove(&view_number) { + for ChangeNode { idx, updown } in operations { + match updown { + UpDown::Up => { + if let Some(node) = state.late_start.remove(&idx.try_into().unwrap()) { + tracing::error!("Node {} spinning up late", idx); + let handle = node.context.run_tasks().await; + handle.hotshot.start_consensus().await; + } + } + UpDown::Down => { + if let Some(node) = state.handles.get_mut(idx) { + tracing::error!("Node {} shutting down", idx); + node.handle.shut_down().await; + } + } + UpDown::NetworkUp => { + if let Some(handle) = state.handles.get(idx) { + tracing::error!("Node {} networks resuming", idx); + handle.networks.0.resume(); + handle.networks.1.resume(); + } + } + UpDown::NetworkDown => { + if let Some(handle) = state.handles.get(idx) { + tracing::error!("Node {} networks pausing", idx); + handle.networks.0.pause(); + handle.networks.1.pause(); + } + } + } + } + } + + // update our latest view + state.latest_view = Some(view_number); + } + + None + } +} /// Spin the node up or down #[derive(Clone, Debug)] @@ -75,165 +143,3 @@ pub struct SpinningTaskDescription { /// the changes in node status, time -> changes pub node_changes: Vec<(u64, Vec)>, } - -impl SpinningTaskDescription { - /// build a task - /// # Panics - /// If there is no latest view - /// or if the node id is over `u32::MAX` - #[must_use] - #[allow(clippy::too_many_lines)] - pub fn build>( - self, - ) -> TaskGenerator> { - Box::new(move |mut state, mut registry, test_event_stream| { - async move { - let event_handler = - HandleEvent::>(Arc::new(move |event, state| { - async move { - match event { - GlobalTestEvent::ShutDown => { - // We do this here as well as in the completion task - // because that task has no knowledge of our late start handles. - for node in &state.handles { - node.handle.clone().shut_down().await; - } - - (Some(HotShotTaskCompleted::ShutDown), state) - } - } - } - .boxed() - })); - - let message_handler = HandleMessage::>(Arc::new( - move |msg, mut state| { - async move { - let Event { - view_number, - event: _, - } = msg.1; - - // if we have not seen this view before - if state.latest_view.is_none() - || view_number > state.latest_view.unwrap() - { - // perform operations on the nodes - - // We want to make sure we didn't miss any views (for example, there is no decide event - // if we get a timeout) - let views_with_relevant_changes: Vec<_> = state - .changes - .range(TYPES::Time::new(0)..view_number) - .map(|(k, _v)| *k) - .collect(); - - for view in views_with_relevant_changes { - if let Some(operations) = state.changes.remove(&view) { - for ChangeNode { idx, updown } in operations { - match updown { - UpDown::Up => { - if let Some(node) = state - .late_start - .remove(&idx.try_into().unwrap()) - { - tracing::error!( - "Node {} spinning up late", - idx - ); - - // create node and add to state, so we can shut them down properly later - let node = Node { - node_id: idx.try_into().unwrap(), - networks: node.networks, - handle: node.context.run_tasks().await, - }; - - // bootstrap consensus by sending the event - node.handle.hotshot.start_consensus().await; - - // add nodes to our state - state.handles.push(node); - } - } - UpDown::Down => { - if let Some(node) = state.handles.get_mut(idx) { - tracing::error!( - "Node {} shutting down", - idx - ); - node.handle.shut_down().await; - } - } - UpDown::NetworkUp => { - if let Some(handle) = state.handles.get(idx) { - tracing::error!( - "Node {} networks resuming", - idx - ); - handle.networks.0.resume(); - handle.networks.1.resume(); - } - } - UpDown::NetworkDown => { - if let Some(handle) = state.handles.get(idx) { - tracing::error!( - "Node {} networks pausing", - idx - ); - handle.networks.0.pause(); - handle.networks.1.pause(); - } - } - } - } - } - } - - // update our latest view - state.latest_view = Some(view_number); - } - - (None, state) - } - .boxed() - }, - )); - - let mut streams = vec![]; - for handle in &mut state.handles { - let s1 = handle - .handle - .get_event_stream_known_impl(FilterEvent::default()) - .await - .0; - streams.push(s1); - } - let builder = TaskBuilder::>::new( - "Test Spinning Task".to_string(), - ) - .register_event_stream(test_event_stream, FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_message_handler(message_handler) - .register_message_stream(MergeN::new(streams)) - .register_event_handler(event_handler) - .register_state(state); - let task_id = builder.get_task_id().unwrap(); - (task_id, SpinningTaskTypes::build(builder).launch()) - } - .boxed() - }) - } -} - -/// types for safety task -pub type SpinningTaskTypes = HSTWithEventAndMessage< - SpinningTaskErr, - GlobalTestEvent, - ChannelStream, - (usize, Event), - MergeN>>, - SpinningTask, ->; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index a8c752a1d4..8a7a4744f0 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -13,7 +13,6 @@ use hotshot::{ types::{BLSPubKey, SignatureKey, SystemContextHandle}, HotShotConsensusApi, HotShotInitializer, Memberships, Networks, SystemContext, }; -use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -32,6 +31,7 @@ use hotshot_types::{ vote::HasViewNumber, }; +use async_broadcast::{Receiver, Sender}; use async_lock::RwLockUpgradableReadGuard; use bitvec::bitvec; use hotshot_types::simple_vote::QuorumData; @@ -51,7 +51,8 @@ pub async fn build_system_handle( node_id: u64, ) -> ( SystemContextHandle, - ChannelStream>, + Sender>, + Receiver>, ) { let builder = TestMetadata::default_multiple_rounds(); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index f64f113a63..4cba204166 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -216,12 +216,8 @@ impl TestMetadata { min_transactions, timing_data, da_committee_size, - txn_description, - completion_task_description, - overall_safety_properties, - spinning_properties, + unreliable_network, - view_sync_properties, .. } = self.clone(); @@ -286,11 +282,6 @@ impl TestMetadata { a.propose_max_round_time = propose_max_round_time; }; - let txn_task_generator = txn_description.build(); - let completion_task_generator = completion_task_description.build_and_launch(); - let overall_safety_task_generator = overall_safety_properties.build(); - let spinning_task_generator = spinning_properties.build(); - let view_sync_task_generator = view_sync_properties.build(); TestLauncher { resource_generator: ResourceGenerators { channel_generator: >::gen_comm_channels( @@ -303,12 +294,6 @@ impl TestMetadata { config, }, metadata: self, - txn_task_generator, - overall_safety_task_generator, - completion_task_generator, - spinning_task_generator, - view_sync_task_generator, - hooks: vec![], } .modify_default_config(mod_config) } diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 591253af4a..df7d0a6a47 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -1,24 +1,12 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; -use futures::future::BoxFuture; use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; -use hotshot_task::{ - event_stream::ChannelStream, - global_registry::{GlobalRegistry, HotShotTaskId}, - task::HotShotTaskCompleted, - task_launcher::TaskRunner, -}; use hotshot_types::{ traits::{network::CommunicationChannel, node_implementation::NodeType}, HotShotConfig, }; -use crate::{spinning_task::SpinningTask, view_sync_task::ViewSyncTask}; - -use super::{ - completion_task::CompletionTask, overall_safety_task::OverallSafetyTask, - test_builder::TestMetadata, test_runner::TestRunner, txn_task::TxnTask, GlobalTestEvent, -}; +use super::{test_builder::TestMetadata, test_runner::TestRunner}; /// convience type alias for the networks available pub type Networks = ( @@ -35,25 +23,6 @@ pub type CommitteeNetworkGenerator = Box) -> T + 'static>; /// Wrapper Type for view sync function that takes a `ConnectedNetwork` and returns a `CommunicationChannel` pub type ViewSyncNetworkGenerator = Box) -> T + 'static>; -/// Wrapper type for a task generator. -pub type TaskGenerator = Box< - dyn FnOnce( - TASK, - GlobalRegistry, - ChannelStream, - ) - -> BoxFuture<'static, (HotShotTaskId, BoxFuture<'static, HotShotTaskCompleted>)>, ->; - -/// Wrapper type for a hook. -pub type Hook = Box< - dyn FnOnce( - GlobalRegistry, - ChannelStream, - ) - -> BoxFuture<'static, (HotShotTaskId, BoxFuture<'static, HotShotTaskCompleted>)>, ->; - /// generators for resources used by each node pub struct ResourceGenerators> { /// generate channels @@ -70,18 +39,6 @@ pub struct TestLauncher> { pub resource_generator: ResourceGenerators, /// metadasta used for tasks pub metadata: TestMetadata, - /// overrideable txn task generator function - pub txn_task_generator: TaskGenerator>, - /// overrideable timeout task generator function - pub completion_task_generator: TaskGenerator>, - /// overall safety task generator - pub overall_safety_task_generator: TaskGenerator>, - /// task for spinning nodes up/down - pub spinning_task_generator: TaskGenerator>, - /// task for view sync - pub view_sync_task_generator: TaskGenerator>, - /// extra hooks in case we want to check additional things - pub hooks: Vec, } impl> TestLauncher { @@ -93,93 +50,9 @@ impl> TestLauncher>, - ) -> Self { - Self { - overall_safety_task_generator, - ..self - } - } - - /// override the safety task generator - #[must_use] - pub fn with_spinning_task_generator( - self, - spinning_task_generator: TaskGenerator>, - ) -> Self { - Self { - spinning_task_generator, - ..self - } - } - - /// overridde the completion task generator - #[must_use] - pub fn with_completion_task_generator( - self, - completion_task_generator: TaskGenerator>, - ) -> Self { - Self { - completion_task_generator, - ..self - } - } - - /// override the txn task generator - #[must_use] - pub fn with_txn_task_generator( - self, - txn_task_generator: TaskGenerator>, - ) -> Self { - Self { - txn_task_generator, - ..self - } - } - - /// override the view sync task generator - #[must_use] - pub fn with_view_sync_task_generator( - self, - view_sync_task_generator: TaskGenerator>, - ) -> Self { - Self { - view_sync_task_generator, - ..self - } - } - - /// override resource generators - #[must_use] - pub fn with_resource_generator(self, resource_generator: ResourceGenerators) -> Self { - Self { - resource_generator, - ..self - } - } - - /// add a hook - #[must_use] - pub fn add_hook(mut self, hook: Hook) -> Self { - self.hooks.push(hook); - self - } - - /// overwrite hooks with more hooks - #[must_use] - pub fn with_hooks(self, hooks: Vec) -> Self { - Self { hooks, ..self } - } - /// Modifies the config used when generating nodes with `f` #[must_use] pub fn modify_default_config( diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 57edbf45e4..7ae67b979a 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -5,17 +5,21 @@ use super::{ txn_task::TxnTask, }; use crate::{ - spinning_task::{ChangeNode, UpDown}, + completion_task::CompletionTaskDescription, + spinning_task::{ChangeNode, SpinningTask, UpDown}, state_types::TestInstanceState, test_launcher::{Networks, TestLauncher}, + txn_task::TxnTaskDescription, view_sync_task::ViewSyncTask, }; +use async_broadcast::broadcast; +use futures::future::join_all; use hotshot::{types::SystemContextHandle, Memberships}; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; -use hotshot_task::{ - event_stream::ChannelStream, global_registry::GlobalRegistry, task_launcher::TaskRunner, -}; + +use hotshot_constants::EVENT_CHANNEL_SIZE; +use hotshot_task::task::{Task, TaskRegistry, TestTask}; use hotshot_types::traits::{ network::CommunicationChannel, node_implementation::NodeImplementation, }; @@ -30,6 +34,7 @@ use hotshot_types::{ use std::{ collections::{BTreeMap, HashMap, HashSet}, marker::PhantomData, + sync::Arc, }; #[allow(deprecated)] @@ -70,12 +75,30 @@ pub struct TestRunner< pub(crate) late_start: HashMap>, /// the next node unique identifier pub(crate) next_node_id: u64, - /// overarching test task - pub(crate) task_runner: TaskRunner, - /// PhantomData for N + /// Phantom for N pub(crate) _pd: PhantomData, } +/// enum describing how the tasks completed +pub enum HotShotTaskCompleted { + /// the task shut down successfully + ShutDown, + /// the task encountered an error + Error(Box), + /// the streams the task was listening for died + StreamsDied, + /// we somehow lost the state + /// this is definitely a bug. + LostState, + /// lost the return value somehow + LostReturnValue, + /// Stream exists but missing handler + MissingHandler, +} + +pub trait TaskErr: std::error::Error + Sync + Send + 'static {} +impl TaskErr for T {} + impl< TYPES: NodeType, I: TestableNodeImplementation, @@ -90,6 +113,7 @@ where /// if the test fails #[allow(clippy::too_many_lines)] pub async fn run_test(mut self) { + let (tx, rx) = broadcast(EVENT_CHANNEL_SIZE); let spinning_changes = self .launcher .metadata @@ -108,45 +132,53 @@ where self.add_nodes(self.launcher.metadata.total_nodes, &late_start_nodes) .await; + let mut event_rxs = vec![]; + let mut internal_event_rxs = vec![]; + + for node in &self.nodes { + let r = node.handle.get_event_stream_known_impl(); + event_rxs.push(r); + } + for node in &self.nodes { + let r = node.handle.get_internal_event_stream_known_impl(); + internal_event_rxs.push(r); + } + + let reg = Arc::new(TaskRegistry::default()); let TestRunner { - launcher, + ref launcher, nodes, late_start, next_node_id: _, - mut task_runner, - _pd: PhantomData, + _pd: _, } = self; - let registry = GlobalRegistry::default(); - let test_event_stream = ChannelStream::new(); - // add transaction task - let txn_task_state = TxnTask { - handles: nodes.clone(), - next_node_idx: Some(0), - }; - let (id, task) = (launcher.txn_task_generator)( - txn_task_state, - registry.clone(), - test_event_stream.clone(), - ) - .await; - task_runner = - task_runner.add_task(id, "Test Transaction Submission Task".to_string(), task); + let mut task_futs = vec![]; + let meta = launcher.metadata.clone(); + + let txn_task = + if let TxnTaskDescription::RoundRobinTimeBased(duration) = meta.txn_description { + let txn_task = TxnTask { + handles: nodes.clone(), + next_node_idx: Some(0), + duration, + shutdown_chan: rx.clone(), + }; + Some(txn_task) + } else { + None + }; // add completion task - let completion_task_state = CompletionTask { + let CompletionTaskDescription::TimeBasedCompletionTaskBuilder(time_based) = + meta.completion_task_description; + let completion_task = CompletionTask { + tx: tx.clone(), + rx: rx.clone(), handles: nodes.clone(), - test_event_stream: test_event_stream.clone(), + duration: time_based.duration, }; - let (id, task) = (launcher.completion_task_generator)( - completion_task_state, - registry.clone(), - test_event_stream.clone(), - ) - .await; - - task_runner = task_runner.add_task(id, "Test Completion Task".to_string(), task); // add spinning task // map spinning to view @@ -158,48 +190,44 @@ where .append(&mut change); } - let spinning_task_state = crate::spinning_task::SpinningTask { + let spinning_task_state = SpinningTask { handles: nodes.clone(), late_start, latest_view: None, changes, }; - - let (id, task) = (launcher.spinning_task_generator)( - spinning_task_state, - registry.clone(), - test_event_stream.clone(), - ) - .await; - task_runner = task_runner.add_task(id, "Test Spinning Task".to_string(), task); - + let spinning_task = TestTask::, SpinningTask>::new( + Task::new(tx.clone(), rx.clone(), reg.clone(), spinning_task_state), + event_rxs.clone(), + ); // add safety task let overall_safety_task_state = OverallSafetyTask { handles: nodes.clone(), ctx: RoundCtx::default(), - test_event_stream: test_event_stream.clone(), + properties: self.launcher.metadata.overall_safety_properties, }; - let (id, task) = (launcher.overall_safety_task_generator)( - overall_safety_task_state, - registry.clone(), - test_event_stream.clone(), - ) - .await; - task_runner = task_runner.add_task(id, "Test Overall Safety Task".to_string(), task); + + let safety_task = TestTask::, OverallSafetyTask>::new( + Task::new( + tx.clone(), + rx.clone(), + reg.clone(), + overall_safety_task_state, + ), + event_rxs.clone(), + ); // add view sync task let view_sync_task_state = ViewSyncTask { - handles: nodes.clone(), hit_view_sync: HashSet::new(), + description: self.launcher.metadata.view_sync_properties, + _pd: PhantomData, }; - let (id, task) = (launcher.view_sync_task_generator)( - view_sync_task_state, - registry.clone(), - test_event_stream.clone(), - ) - .await; - task_runner = task_runner.add_task(id, "View Sync Task".to_string(), task); + let view_sync_task = TestTask::, ViewSyncTask>::new( + Task::new(tx.clone(), rx.clone(), reg.clone(), view_sync_task_state), + internal_event_rxs, + ); // wait for networks to be ready for node in &nodes { @@ -212,21 +240,57 @@ where node.handle.hotshot.start_consensus().await; } } - - let results = task_runner.launch().await; - + task_futs.push(safety_task.run()); + task_futs.push(view_sync_task.run()); + if let Some(txn) = txn_task { + task_futs.push(txn.run()); + } + task_futs.push(completion_task.run()); + task_futs.push(spinning_task.run()); let mut error_list = vec![]; - for (name, result) in results { - match result { - hotshot_task::task::HotShotTaskCompleted::ShutDown => { - info!("Task {} shut down successfully", name); + + #[cfg(async_executor_impl = "async-std")] + { + let results = join_all(task_futs).await; + tracing::error!("test tasks joined"); + for result in results { + match result { + HotShotTaskCompleted::ShutDown => { + info!("Task shut down successfully"); + } + HotShotTaskCompleted::Error(e) => error_list.push(e), + _ => { + panic!("Future impl for task abstraction failed! This should never happen"); + } } - hotshot_task::task::HotShotTaskCompleted::Error(e) => error_list.push((name, e)), - _ => { - panic!("Future impl for task abstraction failed! This should never happen"); + } + } + + #[cfg(async_executor_impl = "tokio")] + { + let results = join_all(task_futs).await; + + tracing::error!("test tasks joined"); + for result in results { + match result { + Ok(res) => { + match res { + HotShotTaskCompleted::ShutDown => { + info!("Task shut down successfully"); + } + HotShotTaskCompleted::Error(e) => error_list.push(e), + _ => { + panic!("Future impl for task abstraction failed! This should never happen"); + } + } + } + Err(e) => { + panic!("Error Joining the test task {:?}", e); + } } } } + assert!( error_list.is_empty(), "TEST FAILED! Results: {error_list:?}" diff --git a/testing/src/timeout_task.rs b/testing/src/timeout_task.rs deleted file mode 100644 index 8b13789179..0000000000 --- a/testing/src/timeout_task.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index 994a37baeb..4e510cc50a 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -1,21 +1,18 @@ -use crate::test_runner::Node; -use async_compatibility_layer::art::{async_sleep, async_timeout}; -use futures::FutureExt; +use crate::test_runner::{HotShotTaskCompleted, Node}; +use async_broadcast::{Receiver, TryRecvError}; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use hotshot::traits::TestableNodeImplementation; -use hotshot_task::{ - boxed_sync, - event_stream::ChannelStream, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEventAndMessage, TaskBuilder}, - GeneratedStream, -}; -use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::NodeType; use rand::thread_rng; use snafu::Snafu; -use std::{sync::Arc, time::Duration}; -use tracing::error; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; -use super::{test_launcher::TaskGenerator, GlobalTestEvent}; +use std::time::Duration; + +use super::GlobalTestEvent; // the obvious idea here is to pass in a "stream" that completes every `n` seconds // the stream construction can definitely be fancier but that's the baseline idea @@ -31,19 +28,58 @@ pub struct TxnTask> { pub handles: Vec>, /// Optional index of the next node. pub next_node_idx: Option, + /// time to wait between txns + pub duration: Duration, + /// + pub shutdown_chan: Receiver, } -impl> TS for TxnTask {} - -/// types for task that deices when things are completed -pub type TxnTaskTypes = HSTWithEventAndMessage< - TxnTaskErr, - GlobalTestEvent, - ChannelStream, - (), - GeneratedStream<()>, - TxnTask, ->; +impl> TxnTask { + pub fn run(mut self) -> JoinHandle { + async_spawn(async move { + async_sleep(Duration::from_millis(100)).await; + loop { + async_sleep(self.duration).await; + match self.shutdown_chan.try_recv() { + Ok(_event) => { + return HotShotTaskCompleted::ShutDown; + } + Err(TryRecvError::Empty) => {} + Err(_) => { + return HotShotTaskCompleted::StreamsDied; + } + } + self.submit_tx().await; + } + }) + } + async fn submit_tx(&mut self) { + if let Some(idx) = self.next_node_idx { + // submit to idx handle + // increment state + self.next_node_idx = Some((idx + 1) % self.handles.len()); + match self.handles.get(idx) { + None => { + tracing::error!("couldn't get node in txn task"); + // should do error + unimplemented!() + } + Some(node) => { + // use rand::seq::IteratorRandom; + // we're assuming all nodes have the same leaf. + // If they don't match, this is probably fine since + // it should be caught by an assertion (and the txn will be rejected anyway) + let leaf = node.handle.get_decided_leaf().await; + let txn = I::leaf_create_random_transaction(&leaf, &mut thread_rng(), 0); + node.handle + .submit_transaction(txn.clone()) + .await + .expect("Could not send transaction"); + } + } + } + } +} /// build the transaction task #[derive(Clone, Debug)] @@ -54,120 +90,3 @@ pub enum TxnTaskDescription { /// TODO DistributionBased, // others? } - -impl TxnTaskDescription { - /// build a task - /// # Panics - /// if unable to get task id - #[must_use] - pub fn build>( - self, - ) -> TaskGenerator> - where - TYPES: NodeType, - I: NodeImplementation, - { - Box::new(move |state, mut registry, test_event_stream| { - async move { - // consistency check - match self { - TxnTaskDescription::RoundRobinTimeBased(_) => { - assert!(state.next_node_idx.is_some()); - } - TxnTaskDescription::DistributionBased => assert!(state.next_node_idx.is_none()), - } - // TODO we'll possibly want multiple criterion including: - // - certain number of txns committed - // - anchor of certain depth - // - some other stuff? probably? - let event_handler = - HandleEvent::>(Arc::new(move |event, state| { - async move { - match event { - GlobalTestEvent::ShutDown => { - (Some(HotShotTaskCompleted::ShutDown), state) - } - } - } - .boxed() - })); - let message_handler = - HandleMessage::>(Arc::new(move |(), mut state| { - async move { - if let Some(idx) = state.next_node_idx { - // submit to idx handle - // increment state - state.next_node_idx = Some((idx + 1) % state.handles.len()); - match state.handles.get(idx) { - None => { - // should do error - unimplemented!() - } - Some(node) => { - // use rand::seq::IteratorRandom; - // we're assuming all nodes have the same leaf. - // If they don't match, this is probably fine since - // it should be caught by an assertion (and the txn will be rejected anyway) - - // Attempts to grab the most recently decided leaf. On failure, we don't - // send a transaction. This is to prevent deadlock. - if let Some(leaf) = node.handle.try_get_decided_leaf() { - let txn = I::leaf_create_random_transaction( - &leaf, - &mut thread_rng(), - 0, - ); - - // Time out if we can't get a lock on consensus in a reasonable time. This is to - // prevent deadlock. - if let Err(err) = async_timeout( - Duration::from_secs(1), - node.handle.submit_transaction(txn.clone()), - ) - .await - { - error!("Failed to send test transaction: {err}"); - }; - } - - (None, state) - } - } - } else { - // TODO make an issue - // in the case that this is random - // which I haven't implemented yet - unimplemented!() - } - } - .boxed() - })); - let stream_generator = match self { - TxnTaskDescription::RoundRobinTimeBased(duration) => { - GeneratedStream::new(Arc::new(move || { - let fut = async move { - async_sleep(duration).await; - }; - Some(boxed_sync(fut)) - })) - } - TxnTaskDescription::DistributionBased => unimplemented!(), - }; - let builder = TaskBuilder::>::new( - "Test Transaction Submission Task".to_string(), - ) - .register_event_stream(test_event_stream, FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_state(state) - .register_event_handler(event_handler) - .register_message_handler(message_handler) - .register_message_stream(stream_generator); - let task_id = builder.get_task_id().unwrap(); - (task_id, TxnTaskTypes::build(builder).launch()) - } - .boxed() - }) - } -} diff --git a/testing/src/view_sync_task.rs b/testing/src/view_sync_task.rs index 0da12d6a3b..139e6b73fd 100644 --- a/testing/src/view_sync_task.rs +++ b/testing/src/view_sync_task.rs @@ -1,18 +1,10 @@ -use async_compatibility_layer::channel::UnboundedStream; -use futures::FutureExt; -use hotshot_task::task::{HotShotTaskCompleted, HotShotTaskTypes}; -use hotshot_task::{ - event_stream::ChannelStream, - task::{FilterEvent, HandleEvent, HandleMessage, TS}, - task_impls::{HSTWithEventAndMessage, TaskBuilder}, - MergeN, -}; +use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplementation}; use snafu::Snafu; -use std::{collections::HashSet, sync::Arc}; +use std::{collections::HashSet, marker::PhantomData}; -use crate::{test_launcher::TaskGenerator, test_runner::Node, GlobalTestEvent}; +use crate::{test_runner::HotShotTaskCompleted, GlobalTestEvent}; /// `ViewSync` Task error #[derive(Snafu, Debug, Clone)] @@ -23,23 +15,79 @@ pub struct ViewSyncTaskErr { /// `ViewSync` task state pub struct ViewSyncTask> { - /// the node handles - pub(crate) handles: Vec>, /// nodes that hit view sync pub(crate) hit_view_sync: HashSet, + /// properties of task + pub(crate) description: ViewSyncTaskDescription, + /// Phantom data for TYPES and I + pub(crate) _pd: PhantomData<(TYPES, I)>, } -impl> TS for ViewSyncTask {} +impl> TaskState for ViewSyncTask { + type Event = GlobalTestEvent; -/// `ViewSync` task types -pub type ViewSyncTaskTypes = HSTWithEventAndMessage< - ViewSyncTaskErr, - GlobalTestEvent, - ChannelStream, - (usize, HotShotEvent), - MergeN>>, - ViewSyncTask, ->; + type Output = HotShotTaskCompleted; + + async fn handle_event(event: Self::Event, task: &mut Task) -> Option { + let state = task.state_mut(); + match event { + GlobalTestEvent::ShutDown => match state.description.clone() { + ViewSyncTaskDescription::Threshold(min, max) => { + let num_hits = state.hit_view_sync.len(); + if min <= num_hits && num_hits <= max { + Some(HotShotTaskCompleted::ShutDown) + } else { + Some(HotShotTaskCompleted::Error(Box::new(ViewSyncTaskErr { + hit_view_sync: state.hit_view_sync.clone(), + }))) + } + } + }, + } + } + + fn should_shutdown(_event: &Self::Event) -> bool { + false + } +} + +impl> TestTaskState + for ViewSyncTask +{ + type Message = HotShotEvent; + + type Output = HotShotTaskCompleted; + + type State = Self; + + async fn handle_message( + message: Self::Message, + id: usize, + task: &mut hotshot_task::task::TestTask, + ) -> Option { + match message { + // all the view sync events + HotShotEvent::ViewSyncTimeout(_, _, _) + | HotShotEvent::ViewSyncPreCommitVoteRecv(_) + | HotShotEvent::ViewSyncCommitVoteRecv(_) + | HotShotEvent::ViewSyncFinalizeVoteRecv(_) + | HotShotEvent::ViewSyncPreCommitVoteSend(_) + | HotShotEvent::ViewSyncCommitVoteSend(_) + | HotShotEvent::ViewSyncFinalizeVoteSend(_) + | HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) + | HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) + | HotShotEvent::ViewSyncTrigger(_) => { + task.state_mut().hit_view_sync.insert(id); + } + _ => (), + } + None + } +} /// enum desecribing whether a node should hit view sync #[derive(Clone, Debug, Copy)] @@ -58,100 +106,3 @@ pub enum ViewSyncTaskDescription { /// (min, max) number nodes that may hit view sync, inclusive Threshold(usize, usize), } - -impl ViewSyncTaskDescription { - /// build a view sync task from its description - /// # Panics - /// if there is an violation of the view sync description - #[must_use] - pub fn build>( - self, - ) -> TaskGenerator> { - Box::new(move |mut state, mut registry, test_event_stream| { - async move { - let event_handler = - HandleEvent::>(Arc::new(move |event, state| { - let self_dup = self.clone(); - async move { - match event { - GlobalTestEvent::ShutDown => match self_dup.clone() { - ViewSyncTaskDescription::Threshold(min, max) => { - let num_hits = state.hit_view_sync.len(); - if min <= num_hits && num_hits <= max { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - ( - Some(HotShotTaskCompleted::Error(Box::new( - ViewSyncTaskErr { - hit_view_sync: state.hit_view_sync.clone(), - }, - ))), - state, - ) - } - } - }, - } - } - .boxed() - })); - - let message_handler = HandleMessage::>(Arc::new( - // NOTE: could short circuit on entering view sync if we're not supposed to - // enter view sync. I opted not to do this just to gather more information - // (since we'll fail the test later anyway) - move |(id, msg), mut state| { - async move { - match msg { - // all the view sync events - HotShotEvent::ViewSyncTimeout(_, _, _) - | HotShotEvent::ViewSyncPreCommitVoteRecv(_) - | HotShotEvent::ViewSyncCommitVoteRecv(_) - | HotShotEvent::ViewSyncFinalizeVoteRecv(_) - | HotShotEvent::ViewSyncPreCommitVoteSend(_) - | HotShotEvent::ViewSyncCommitVoteSend(_) - | HotShotEvent::ViewSyncFinalizeVoteSend(_) - | HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - | HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) - | HotShotEvent::ViewSyncTrigger(_) => { - state.hit_view_sync.insert(id); - } - _ => (), - } - (None, state) - } - .boxed() - }, - )); - let mut streams = vec![]; - for handle in &mut state.handles { - let stream = handle - .handle - .get_internal_event_stream_known_impl(FilterEvent::default()) - .await - .0; - streams.push(stream); - } - - let builder = TaskBuilder::>::new( - "Test Completion Task".to_string(), - ) - .register_event_stream(test_event_stream, FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_state(state) - .register_event_handler(event_handler) - .register_message_handler(message_handler) - .register_message_stream(MergeN::new(streams)); - let task_id = builder.get_task_id().unwrap(); - (task_id, ViewSyncTaskTypes::build(builder).launch()) - } - .boxed() - }) - } -} diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index b5cdcc7887..8ed52d6aea 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,7 +1,6 @@ #![allow(clippy::panic)] use commit::Committable; -use hotshot::{tasks::add_consensus_task, types::SystemContextHandle, HotShotConsensusApi}; -use hotshot_task::event_stream::ChannelStream; +use hotshot::{types::SystemContextHandle, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, @@ -82,6 +81,7 @@ async fn build_vote( )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { + use hotshot::tasks::create_consensus_state; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::simple_certificate::QuorumCertificate; @@ -108,30 +108,22 @@ async fn test_consensus_task() { input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::QCFormed(either::Left(qc)), 1); output.insert( HotShotEvent::QuorumProposalSend(proposal.clone(), public_key), 1, ); - output.insert( - HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), - 1, - ); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal.data).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); - output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); } - output.insert(HotShotEvent::Shutdown, 1); - - let build_fn = |task_runner, event_stream| { - add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) - }; + let consensus_state = + create_consensus_state(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; - run_harness(input, output, None, build_fn, false).await; + run_harness(input, output, consensus_state, false).await; } #[cfg(test)] @@ -141,6 +133,7 @@ async fn test_consensus_task() { )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote() { + use hotshot::tasks::create_consensus_state; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; @@ -161,27 +154,21 @@ async fn test_consensus_vote() { proposal.clone(), public_key, )); - output.insert( - HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), - 1, - ); + let proposal = proposal.data; if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); - output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); } output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::Shutdown, 1); - let build_fn = |task_runner, event_stream| { - add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) - }; + let consensus_state = + create_consensus_state(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; - run_harness(input, output, None, build_fn, false).await; + run_harness(input, output, consensus_state, false).await; } #[cfg(test)] @@ -215,7 +202,7 @@ async fn test_consensus_with_vid() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let (handle, _event_stream) = build_system_handle(2).await; + let (handle, _tx, _rx) = build_system_handle(2).await; // We assign node's key pair rather than read from config file since it's a test // In view 2, node 2 is the leader. let (private_key_view2, public_key_view2) = key_pair_for_id(2); @@ -283,32 +270,21 @@ async fn test_consensus_with_vid() { public_key_view2, )); - output.insert( - HotShotEvent::QuorumProposalRecv(proposal_view2.clone(), public_key_view2), - 1, - ); - output.insert(HotShotEvent::DACRecv(created_dac_view2), 1); - output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); - if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); } - output.insert( - HotShotEvent::ViewChange(ViewNumber::new(1)), - 2, // 2 occurrences: 1 from `QuorumProposalRecv`, 1 from input - ); - output.insert( - HotShotEvent::ViewChange(ViewNumber::new(2)), - 2, // 2 occurrences: 1 from `QuorumProposalRecv`?, 1 from input - ); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); - let build_fn = |task_runner, event_stream| { - add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) - }; + let consensus_state = hotshot::tasks::create_consensus_state( + handle.hotshot.inner.output_event_stream.0.clone(), + &handle, + ) + .await; - run_harness(input, output, None, build_fn, false).await; + run_harness(input, output, consensus_state, false).await; } diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index fa0e12eb43..9371b4f913 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,5 +1,5 @@ use hotshot::{types::SignatureKey, HotShotConsensusApi}; -use hotshot_task_impls::events::HotShotEvent; +use hotshot_task_impls::{da::DATaskState, events::HotShotEvent}; use hotshot_testing::{ block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, @@ -23,7 +23,6 @@ use std::{collections::HashMap, marker::PhantomData}; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_da_task() { - use hotshot::tasks::add_da_task; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::message::Proposal; @@ -83,11 +82,6 @@ async fn test_da_task() { input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); - output.insert( - HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), - 1, - ); output.insert(HotShotEvent::DAProposalSend(message.clone(), pub_key), 1); let da_vote = DAVote::create_signed_vote( DAData { @@ -100,12 +94,17 @@ async fn test_da_task() { .expect("Failed to sign DAData"); output.insert(HotShotEvent::DAVoteSend(da_vote), 1); - output.insert(HotShotEvent::DAProposalRecv(message, pub_key), 1); - - output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); - output.insert(HotShotEvent::Shutdown, 1); - - let build_fn = |task_runner, event_stream| add_da_task(task_runner, event_stream, handle); - - run_harness(input, output, None, build_fn, false).await; + let da_state = DATaskState { + api: api.clone(), + consensus: handle.hotshot.get_consensus(), + da_membership: api.inner.memberships.da_membership.clone().into(), + da_network: api.inner.networks.da_network.clone().into(), + quorum_membership: api.inner.memberships.quorum_membership.clone().into(), + cur_view: ViewNumber::new(0), + vote_collector: None.into(), + public_key: *api.public_key(), + private_key: api.private_key().clone(), + id: handle.hotshot.inner.id, + }; + run_harness(input, output, da_state, false).await; } diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index a3810fc21b..447805d9c7 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -20,7 +20,6 @@ use std::{collections::HashMap, marker::PhantomData}; #[ignore] #[allow(clippy::too_many_lines)] async fn test_network_task() { - use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::{data::VidDisperse, message::Proposal}; @@ -28,7 +27,7 @@ async fn test_network_task() { async_compatibility_layer::logging::setup_backtrace(); // Build the API for node 2. - let (handle, event_stream) = build_system_handle(2).await; + let (handle, _tx, _rx) = build_system_handle(2).await; let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; @@ -143,10 +142,10 @@ async fn test_network_task() { output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); - let build_fn = |task_runner, _| async { task_runner }; + // let build_fn = |task_runner, _| async { task_runner }; // There may be extra outputs not in the expected set, e.g., a second `VidDisperseRecv` if the // VID task runs fast. All event types we want to test should be seen by this point, so waiting // for more events will not help us test more cases for now. Therefore, we set // `allow_extra_output` to `true` for deterministic test result. - run_harness(input, output, Some(event_stream), build_fn, true).await; + // run_harness(input, output, Some(event_stream), build_fn, true).await; } diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 85e1d27fb2..041fbc1b75 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -1,5 +1,5 @@ -use hotshot::{tasks::add_vid_task, types::SignatureKey, HotShotConsensusApi}; -use hotshot_task_impls::events::HotShotEvent; +use hotshot::{types::SignatureKey, HotShotConsensusApi}; +use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; use hotshot_testing::{ block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, @@ -68,7 +68,6 @@ async fn test_vid_task() { _pd: PhantomData, }; - // Every event input is seen on the event stream in the output. let mut input = Vec::new(); let mut output = HashMap::new(); @@ -88,15 +87,9 @@ async fn test_vid_task() { input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); - output.insert( - HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), - 1, - ); - output.insert( HotShotEvent::BlockReady(vid_disperse, ViewNumber::new(2)), - 2, + 1, ); output.insert( @@ -105,14 +98,19 @@ async fn test_vid_task() { ); output.insert( HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), - 2, // 2 occurrences: 1 from `input`, 1 from the DA task + 1, ); - output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); - output.insert(HotShotEvent::Shutdown, 1); - - let build_fn = |task_runner, event_stream| add_vid_task(task_runner, event_stream, handle); - - run_harness(input, output, None, build_fn, false).await; + let vid_state = VIDTaskState { + api: api.clone(), + consensus: handle.hotshot.get_consensus(), + cur_view: ViewNumber::new(0), + vote_collector: None, + network: api.inner.networks.quorum_network.clone().into(), + membership: api.inner.memberships.vid_membership.clone().into(), + public_key: *api.public_key(), + private_key: api.private_key().clone(), + id: handle.hotshot.inner.id, + }; + run_harness(input, output, vid_state, false).await; } diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 5dd956a145..a9a7a51a96 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -11,10 +11,12 @@ use std::collections::HashMap; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_task() { - use hotshot::tasks::add_view_sync_task; use hotshot_task_impls::harness::run_harness; + use hotshot_task_impls::view_sync::ViewSyncTaskState; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::simple_vote::ViewSyncPreCommitData; + use hotshot_types::traits::consensus_api::ConsensusApi; + use std::time::Duration; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -39,7 +41,6 @@ async fn test_view_sync_task() { tracing::error!("Vote in test is {:?}", vote.clone()); - // Every event input is seen on the event stream in the output. let mut input = Vec::new(); let mut output = HashMap::new(); @@ -48,16 +49,25 @@ async fn test_view_sync_task() { input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::Timeout(ViewNumber::new(2)), 1); - output.insert(HotShotEvent::Timeout(ViewNumber::new(3)), 1); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone()), 1); - output.insert(HotShotEvent::Shutdown, 1); - - let build_fn = - |task_runner, event_stream| add_view_sync_task(task_runner, event_stream, handle); - - run_harness(input, output, None, build_fn, false).await; + let view_sync_state = ViewSyncTaskState { + current_view: ViewNumber::new(0), + next_view: ViewNumber::new(0), + network: api.inner.networks.quorum_network.clone().into(), + membership: api.inner.memberships.view_sync_membership.clone().into(), + public_key: *api.public_key(), + private_key: api.private_key().clone(), + api, + num_timeouts_tracked: 0, + replica_task_map: HashMap::default().into(), + pre_commit_relay_map: HashMap::default().into(), + commit_relay_map: HashMap::default().into(), + finalize_relay_map: HashMap::default().into(), + view_sync_timeout: Duration::new(10, 0), + id: handle.hotshot.inner.id, + last_garbage_collected_view: ViewNumber::new(0), + }; + run_harness(input, output, view_sync_state, false).await; } diff --git a/types/Cargo.toml b/types/Cargo.toml index ffe6b819d4..9b8117bf88 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -31,7 +31,6 @@ espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } generic-array = { workspace = true } hotshot-constants = { path = "../constants" } -hotshot-task = { path = "../task", default-features = false } hotshot-utils = { path = "../utils" } jf-plonk = { workspace = true } jf-primitives = { workspace = true, features = ["test-srs"] } diff --git a/types/src/lib.rs b/types/src/lib.rs index 4b1cffac1a..388d8ad9bf 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -1,6 +1,6 @@ //! Types and Traits for the `HotShot` consensus module use displaydoc::Display; -use std::{num::NonZeroUsize, time::Duration}; +use std::{future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; use traits::{election::ElectionConfig, signature_key::SignatureKey}; pub mod consensus; pub mod data; @@ -17,6 +17,23 @@ pub mod traits; pub mod utils; pub mod vote; +/// Pinned future that is Send and Sync +pub type BoxSyncFuture<'a, T> = Pin + Send + Sync + 'a>>; + +/// yoinked from futures crate +pub fn assert_future(future: F) -> F +where + F: Future, +{ + future +} +/// yoinked from futures crate, adds sync bound that we need +pub fn boxed_sync<'a, F>(fut: F) -> BoxSyncFuture<'a, F::Output> +where + F: Future + Sized + Send + Sync + 'a, +{ + assert_future::(Box::pin(fut)) +} /// the type of consensus to run. Either: /// wait for a signal to start a view, /// or constantly run diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index c968913e2c..a2c8357a29 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -6,7 +6,6 @@ use async_compatibility_layer::art::async_sleep; #[cfg(async_executor_impl = "async-std")] use async_std::future::TimeoutError; use dyn_clone::DynClone; -use hotshot_task::{boxed_sync, BoxSyncFuture}; use libp2p_networking::network::NetworkNodeHandleError; #[cfg(async_executor_impl = "tokio")] use tokio::time::error::Elapsed as TimeoutError; @@ -16,6 +15,7 @@ use super::{node_implementation::NodeType, signature_key::SignatureKey}; use crate::{ data::ViewNumber, message::{Message, MessagePurpose}, + BoxSyncFuture, }; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; @@ -451,7 +451,7 @@ pub trait NetworkReliability: Debug + Sync + std::marker::Send + DynClone + 'sta } } }; - boxed_sync(closure) + Box::pin(closure) } } diff --git a/types/src/vote.rs b/types/src/vote.rs index 808f127562..ba49f4732d 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -108,17 +108,17 @@ impl, CERT: Certificate Either { + pub fn accumulate(&mut self, vote: &VOTE, membership: &TYPES::Membership) -> Either<(), CERT> { let key = vote.get_signing_key(); let vote_commitment = vote.get_data_commitment(); if !key.validate(&vote.get_signature(), vote_commitment.as_ref()) { error!("Invalid vote! Vote Data {:?}", vote.get_data()); - return Either::Left(self); + return Either::Left(()); } let Some(stake_table_entry) = membership.get_stake(&key) else { - return Either::Left(self); + return Either::Left(()); }; let stake_table = membership.get_committee_qc_stake_table(); let vote_node_id = stake_table @@ -136,7 +136,7 @@ impl, CERT: Certificate, CERT: Certificate, CERT: Certificate Date: Fri, 9 Feb 2024 15:08:42 -0500 Subject: [PATCH 0777/1393] Move examples to seperate crate, resolve circular dependancy --- examples/Cargo.toml | 129 ++++++++++++++++++ .../examples => examples}/combined/all.rs | 0 .../combined/multi-validator.rs | 0 .../combined/orchestrator.rs | 0 .../examples => examples}/combined/types.rs | 0 .../combined/validator.rs | 0 {hotshot/examples => examples}/infra/mod.rs | 0 {hotshot/examples => examples}/libp2p/all.rs | 0 .../libp2p/multi-validator.rs | 0 .../libp2p/orchestrator.rs | 0 .../examples => examples}/libp2p/types.rs | 0 .../examples => examples}/libp2p/validator.rs | 0 .../examples => examples}/webserver/README.md | 0 .../examples => examples}/webserver/all.rs | 0 .../webserver/multi-validator.rs | 0 .../webserver/multi-webserver.rs | 0 .../webserver/orchestrator.rs | 0 .../examples => examples}/webserver/types.rs | 0 .../webserver/validator.rs | 0 .../webserver/webserver.rs | 0 hotshot/Cargo.toml | 60 -------- hotshot/src/traits.rs | 2 +- .../src/traits/networking/combined_network.rs | 64 ++------- hotshot/src/traits/storage/memory_storage.rs | 74 ---------- testing/Cargo.toml | 2 +- testing/tests/combined_network.rs | 46 +++++++ testing/tests/storage.rs | 72 ++++++++++ 27 files changed, 264 insertions(+), 185 deletions(-) create mode 100644 examples/Cargo.toml rename {hotshot/examples => examples}/combined/all.rs (100%) rename {hotshot/examples => examples}/combined/multi-validator.rs (100%) rename {hotshot/examples => examples}/combined/orchestrator.rs (100%) rename {hotshot/examples => examples}/combined/types.rs (100%) rename {hotshot/examples => examples}/combined/validator.rs (100%) rename {hotshot/examples => examples}/infra/mod.rs (100%) rename {hotshot/examples => examples}/libp2p/all.rs (100%) rename {hotshot/examples => examples}/libp2p/multi-validator.rs (100%) rename {hotshot/examples => examples}/libp2p/orchestrator.rs (100%) rename {hotshot/examples => examples}/libp2p/types.rs (100%) rename {hotshot/examples => examples}/libp2p/validator.rs (100%) rename {hotshot/examples => examples}/webserver/README.md (100%) rename {hotshot/examples => examples}/webserver/all.rs (100%) rename {hotshot/examples => examples}/webserver/multi-validator.rs (100%) rename {hotshot/examples => examples}/webserver/multi-webserver.rs (100%) rename {hotshot/examples => examples}/webserver/orchestrator.rs (100%) rename {hotshot/examples => examples}/webserver/types.rs (100%) rename {hotshot/examples => examples}/webserver/validator.rs (100%) rename {hotshot/examples => examples}/webserver/webserver.rs (100%) create mode 100644 testing/tests/storage.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml new file mode 100644 index 0000000000..1b6c1d491e --- /dev/null +++ b/examples/Cargo.toml @@ -0,0 +1,129 @@ +[package] +authors = ["Espresso Systems "] +description = "HotShot Examples and binaries" +edition = "2021" +name = "hotshot-examples" +readme = "README.md" +version = "0.3.3" +rust-version = "1.65.0" + +[features] +default = ["docs", "doc-images"] + +# Features required for binaries +bin-orchestrator = ["clap"] + +# Build the extended documentation +docs = [] +doc-images = [] +hotshot-testing = [] +randomized-leader-election = [] + +# libp2p +[[example]] +name = "validator-libp2p" +path = "libp2p/validator.rs" + +[[example]] +name = "multi-validator-libp2p" +path = "libp2p/multi-validator.rs" + +[[example]] +name = "orchestrator-libp2p" +path = "libp2p/orchestrator.rs" + +[[example]] +name = "all-libp2p" +path = "libp2p/all.rs" + +# webserver +[[example]] +name = "webserver" +path = "webserver/webserver.rs" + +[[example]] +name = "orchestrator-webserver" +path = "webserver/orchestrator.rs" + +[[example]] +name = "validator-webserver" +path = "webserver/validator.rs" + +[[example]] +name = "multi-validator-webserver" +path = "webserver/multi-validator.rs" + +[[example]] +name = "multi-webserver" +path = "webserver/multi-webserver.rs" + +[[example]] +name = "all-webserver" +path = "webserver/all.rs" + +# combined +[[example]] +name = "all-combined" +path = "combined/all.rs" + +[[example]] +name = "multi-validator-combined" +path = "combined/multi-validator.rs" + +[[example]] +name = "validator-combined" +path = "combined/validator.rs" + +[[example]] +name = "orchestrator-combined" +path = "combined/orchestrator.rs" + +[dependencies] +async-broadcast = { workspace = true } +async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } +async-trait = { workspace = true } +bimap = "0.6.3" +bincode = { workspace = true } +clap = { version = "4.4", features = ["derive", "env"], optional = true } +commit = { workspace = true } +hotshot-constants = { path = "../constants" } +custom_debug = { workspace = true } +dashmap = "5.5.1" +either = { workspace = true } +embed-doc-image = "0.1.4" +futures = { workspace = true } +hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } +hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } +hotshot-types = { path = "../types", version = "0.1.0", default-features = false } +hotshot-utils = { path = "../utils" } +hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +libp2p-identity = { workspace = true } +libp2p-networking = { workspace = true } +rand = { workspace = true } +serde = { workspace = true, features = ["rc"] } +snafu = { workspace = true } +surf-disco = { workspace = true } +time = { workspace = true } +derive_more = "0.99.17" +portpicker = "0.1.1" +lru = "0.12.2" +hotshot-task = { path = "../task" } +hotshot = { path = "../hotshot" } + +tracing = { workspace = true } + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } + +[dev-dependencies] +clap = { version = "4.4", features = ["derive", "env"] } +toml = { workspace = true } +blake3 = { workspace = true } +local-ip-address = "0.5.7" +hotshot-testing = { path = "../testing" } + +[lints] +workspace = true diff --git a/hotshot/examples/combined/all.rs b/examples/combined/all.rs similarity index 100% rename from hotshot/examples/combined/all.rs rename to examples/combined/all.rs diff --git a/hotshot/examples/combined/multi-validator.rs b/examples/combined/multi-validator.rs similarity index 100% rename from hotshot/examples/combined/multi-validator.rs rename to examples/combined/multi-validator.rs diff --git a/hotshot/examples/combined/orchestrator.rs b/examples/combined/orchestrator.rs similarity index 100% rename from hotshot/examples/combined/orchestrator.rs rename to examples/combined/orchestrator.rs diff --git a/hotshot/examples/combined/types.rs b/examples/combined/types.rs similarity index 100% rename from hotshot/examples/combined/types.rs rename to examples/combined/types.rs diff --git a/hotshot/examples/combined/validator.rs b/examples/combined/validator.rs similarity index 100% rename from hotshot/examples/combined/validator.rs rename to examples/combined/validator.rs diff --git a/hotshot/examples/infra/mod.rs b/examples/infra/mod.rs similarity index 100% rename from hotshot/examples/infra/mod.rs rename to examples/infra/mod.rs diff --git a/hotshot/examples/libp2p/all.rs b/examples/libp2p/all.rs similarity index 100% rename from hotshot/examples/libp2p/all.rs rename to examples/libp2p/all.rs diff --git a/hotshot/examples/libp2p/multi-validator.rs b/examples/libp2p/multi-validator.rs similarity index 100% rename from hotshot/examples/libp2p/multi-validator.rs rename to examples/libp2p/multi-validator.rs diff --git a/hotshot/examples/libp2p/orchestrator.rs b/examples/libp2p/orchestrator.rs similarity index 100% rename from hotshot/examples/libp2p/orchestrator.rs rename to examples/libp2p/orchestrator.rs diff --git a/hotshot/examples/libp2p/types.rs b/examples/libp2p/types.rs similarity index 100% rename from hotshot/examples/libp2p/types.rs rename to examples/libp2p/types.rs diff --git a/hotshot/examples/libp2p/validator.rs b/examples/libp2p/validator.rs similarity index 100% rename from hotshot/examples/libp2p/validator.rs rename to examples/libp2p/validator.rs diff --git a/hotshot/examples/webserver/README.md b/examples/webserver/README.md similarity index 100% rename from hotshot/examples/webserver/README.md rename to examples/webserver/README.md diff --git a/hotshot/examples/webserver/all.rs b/examples/webserver/all.rs similarity index 100% rename from hotshot/examples/webserver/all.rs rename to examples/webserver/all.rs diff --git a/hotshot/examples/webserver/multi-validator.rs b/examples/webserver/multi-validator.rs similarity index 100% rename from hotshot/examples/webserver/multi-validator.rs rename to examples/webserver/multi-validator.rs diff --git a/hotshot/examples/webserver/multi-webserver.rs b/examples/webserver/multi-webserver.rs similarity index 100% rename from hotshot/examples/webserver/multi-webserver.rs rename to examples/webserver/multi-webserver.rs diff --git a/hotshot/examples/webserver/orchestrator.rs b/examples/webserver/orchestrator.rs similarity index 100% rename from hotshot/examples/webserver/orchestrator.rs rename to examples/webserver/orchestrator.rs diff --git a/hotshot/examples/webserver/types.rs b/examples/webserver/types.rs similarity index 100% rename from hotshot/examples/webserver/types.rs rename to examples/webserver/types.rs diff --git a/hotshot/examples/webserver/validator.rs b/examples/webserver/validator.rs similarity index 100% rename from hotshot/examples/webserver/validator.rs rename to examples/webserver/validator.rs diff --git a/hotshot/examples/webserver/webserver.rs b/examples/webserver/webserver.rs similarity index 100% rename from hotshot/examples/webserver/webserver.rs rename to examples/webserver/webserver.rs diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 546489612c..35b6a244f5 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -19,65 +19,6 @@ doc-images = [] hotshot-testing = [] randomized-leader-election = [] -# libp2p -[[example]] -name = "validator-libp2p" -path = "examples/libp2p/validator.rs" - -[[example]] -name = "multi-validator-libp2p" -path = "examples/libp2p/multi-validator.rs" - -[[example]] -name = "orchestrator-libp2p" -path = "examples/libp2p/orchestrator.rs" - -[[example]] -name = "all-libp2p" -path = "examples/libp2p/all.rs" - -# webserver -[[example]] -name = "webserver" -path = "examples/webserver/webserver.rs" - -[[example]] -name = "orchestrator-webserver" -path = "examples/webserver/orchestrator.rs" - -[[example]] -name = "validator-webserver" -path = "examples/webserver/validator.rs" - -[[example]] -name = "multi-validator-webserver" -path = "examples/webserver/multi-validator.rs" - -[[example]] -name = "multi-webserver" -path = "examples/webserver/multi-webserver.rs" - -[[example]] -name = "all-webserver" -path = "examples/webserver/all.rs" - -# combined -[[example]] -name = "all-combined" -path = "examples/combined/all.rs" - -[[example]] -name = "multi-validator-combined" -path = "examples/combined/multi-validator.rs" - -[[example]] -name = "validator-combined" -path = "examples/combined/validator.rs" - -[[example]] -name = "orchestrator-combined" -path = "examples/combined/orchestrator.rs" - [dependencies] async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } @@ -122,7 +63,6 @@ clap = { version = "4.4", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } local-ip-address = "0.5.7" -hotshot-testing = { path = "../testing" } [lints] workspace = true diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 203fa39705..3bd07c4c15 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -13,7 +13,7 @@ pub use storage::{Result as StorageResult, Storage}; pub mod implementations { pub use super::{ networking::{ - combined_network::{CombinedCommChannel, CombinedNetworks}, + combined_network::{calculate_hash_of, Cache, CombinedCommChannel, CombinedNetworks}, libp2p_network::{Libp2pCommChannel, Libp2pNetwork, PeerInfoVec}, memory_network::{MasterMap, MemoryCommChannel, MemoryNetwork}, web_server_network::{WebCommChannel, WebServerNetwork}, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 9f061e8bd4..ae093c5f72 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -42,7 +42,7 @@ use std::hash::Hash; /// A cache to keep track of the last n messages we've seen, avoids reprocessing duplicates /// from multiple networks #[derive(Clone, Debug)] -struct Cache { +pub struct Cache { /// The maximum number of items to store in the cache capacity: usize, /// The cache itself @@ -53,7 +53,8 @@ struct Cache { impl Cache { /// Create a new cache with the given capacity - fn new(capacity: usize) -> Self { + #[must_use] + pub fn new(capacity: usize) -> Self { Self { capacity, inner: HashSet::with_capacity(capacity), @@ -62,7 +63,7 @@ impl Cache { } /// Insert a hash into the cache - fn insert(&mut self, hash: u64) { + pub fn insert(&mut self, hash: u64) { if self.inner.contains(&hash) { return; } @@ -81,19 +82,26 @@ impl Cache { } /// Check if the cache contains a hash - fn contains(&self, hash: u64) -> bool { + #[must_use] + pub fn contains(&self, hash: u64) -> bool { self.inner.contains(&hash) } /// Get the number of items in the cache - #[cfg(test)] - fn len(&self) -> usize { + #[must_use] + pub fn len(&self) -> usize { self.inner.len() } + + /// True if the cache is empty false otherwise + #[must_use] + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } } /// Helper function to calculate a hash of a type that implements Hash -fn calculate_hash_of(t: &T) -> u64 { +pub fn calculate_hash_of(t: &T) -> u64 { let mut s = DefaultHasher::new(); t.hash(&mut s); s.finish() @@ -384,8 +392,6 @@ impl TestableChannelImplementation for CombinedCommChann #[cfg(test)] mod test { - use hotshot_testing::block_types::TestTransaction; - use super::*; use tracing::instrument; @@ -413,44 +419,4 @@ mod test { assert!(cache.hashes.contains(&3)); assert!(cache.hashes.contains(&4)); } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[instrument] - async fn test_hash_calculation() { - let message1 = TestTransaction(vec![0; 32]); - let message2 = TestTransaction(vec![1; 32]); - - assert_eq!(calculate_hash_of(&message1), calculate_hash_of(&message1)); - assert_ne!(calculate_hash_of(&message1), calculate_hash_of(&message2)); - } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[instrument] - async fn test_cache_integrity() { - let message1 = TestTransaction(vec![0; 32]); - let message2 = TestTransaction(vec![1; 32]); - - let mut cache = Cache::new(3); - - // test insertion integrity - cache.insert(calculate_hash_of(&message1)); - cache.insert(calculate_hash_of(&message2)); - - assert!(cache.contains(calculate_hash_of(&message1))); - assert!(cache.contains(calculate_hash_of(&message2))); - - // check that the cache is not modified on duplicate entries - cache.insert(calculate_hash_of(&message1)); - assert!(cache.contains(calculate_hash_of(&message1))); - assert!(cache.contains(calculate_hash_of(&message2))); - assert_eq!(cache.len(), 2); - } } diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index adaff123f2..bf4bc727eb 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -104,77 +104,3 @@ impl Storage for MemoryStorage { Ok(()) // do nothing } } - -#[cfg(test)] -mod test { - - use super::*; - use commit::Committable; - use hotshot_testing::{ - block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, - node_types::TestTypes, - }; - use hotshot_types::{ - data::{fake_commitment, Leaf}, - simple_certificate::QuorumCertificate, - traits::{ - node_implementation::{ConsensusTime, NodeType}, - signature_key::SignatureKey, - }, - }; - use std::marker::PhantomData; - use tracing::instrument; - - fn random_stored_view(view_number: ::Time) -> StoredView { - let payload = TestBlockPayload::genesis(); - let header = TestBlockHeader { - block_number: 0, - payload_commitment: genesis_vid_commitment(), - }; - let dummy_leaf_commit = fake_commitment::>(); - let data = hotshot_types::simple_vote::QuorumData { - leaf_commit: dummy_leaf_commit, - }; - let commit = data.commit(); - StoredView::from_qc_block_and_state( - QuorumCertificate { - is_genesis: view_number == ::Time::genesis(), - data, - vote_commitment: commit, - signatures: None, - view_number, - _pd: PhantomData, - }, - header, - Some(payload), - dummy_leaf_commit, - <::SignatureKey as SignatureKey>::genesis_proposer_pk(), - ) - } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[instrument] - async fn memory_storage() { - let storage = MemoryStorage::construct_tmp_storage().unwrap(); - let genesis = random_stored_view(::Time::genesis()); - storage - .append_single_view(genesis.clone()) - .await - .expect("Could not append block"); - assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); - storage - .cleanup_storage_up_to_view(genesis.view_number) - .await - .unwrap(); - assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); - storage - .cleanup_storage_up_to_view(genesis.view_number + 1) - .await - .unwrap(); - assert!(storage.get_anchored_view().await.is_err()); - } -} diff --git a/testing/Cargo.toml b/testing/Cargo.toml index ceb54a410f..9382fb63be 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -20,7 +20,7 @@ either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", features = [ "hotshot-testing", -], default-features = false } +] } hotshot-constants = { path = "../constants" } hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index 9e0befae8c..dc24d050ab 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -10,7 +10,53 @@ use hotshot_testing::{ use rand::Rng; use tracing::instrument; +use hotshot::traits::implementations::{calculate_hash_of, Cache}; +use hotshot_testing::block_types::TestTransaction; + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_hash_calculation() { + let message1 = TestTransaction(vec![0; 32]); + let message2 = TestTransaction(vec![1; 32]); + + assert_eq!(calculate_hash_of(&message1), calculate_hash_of(&message1)); + assert_ne!(calculate_hash_of(&message1), calculate_hash_of(&message2)); +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_cache_integrity() { + let message1 = TestTransaction(vec![0; 32]); + let message2 = TestTransaction(vec![1; 32]); + + let mut cache = Cache::new(3); + + // test insertion integrity + cache.insert(calculate_hash_of(&message1)); + cache.insert(calculate_hash_of(&message2)); + + assert!(cache.contains(calculate_hash_of(&message1))); + assert!(cache.contains(calculate_hash_of(&message2))); + + // check that the cache is not modified on duplicate entries + cache.insert(calculate_hash_of(&message1)); + assert!(cache.contains(calculate_hash_of(&message1))); + assert!(cache.contains(calculate_hash_of(&message2))); + assert_eq!(cache.len(), 2); +} + /// A run with both the webserver and libp2p functioning properly +#[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) diff --git a/testing/tests/storage.rs b/testing/tests/storage.rs new file mode 100644 index 0000000000..8ef2c5f711 --- /dev/null +++ b/testing/tests/storage.rs @@ -0,0 +1,72 @@ +use commit::Committable; +use hotshot::traits::implementations::MemoryStorage; +use hotshot::traits::Storage; +use hotshot_testing::{ + block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, + node_types::TestTypes, +}; +use hotshot_types::{ + data::{fake_commitment, Leaf}, + simple_certificate::QuorumCertificate, + traits::{ + node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, + storage::{StoredView, TestableStorage}, + }, +}; +use std::marker::PhantomData; +use tracing::instrument; + +fn random_stored_view(view_number: ::Time) -> StoredView { + let payload = TestBlockPayload::genesis(); + let header = TestBlockHeader { + block_number: 0, + payload_commitment: genesis_vid_commitment(), + }; + let dummy_leaf_commit = fake_commitment::>(); + let data = hotshot_types::simple_vote::QuorumData { + leaf_commit: dummy_leaf_commit, + }; + let commit = data.commit(); + StoredView::from_qc_block_and_state( + QuorumCertificate { + is_genesis: view_number == ::Time::genesis(), + data, + vote_commitment: commit, + signatures: None, + view_number, + _pd: PhantomData, + }, + header, + Some(payload), + dummy_leaf_commit, + <::SignatureKey as SignatureKey>::genesis_proposer_pk(), + ) +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn memory_storage() { + let storage = MemoryStorage::construct_tmp_storage().unwrap(); + let genesis = random_stored_view(::Time::genesis()); + storage + .append_single_view(genesis.clone()) + .await + .expect("Could not append block"); + assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); + storage + .cleanup_storage_up_to_view(genesis.view_number) + .await + .unwrap(); + assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); + storage + .cleanup_storage_up_to_view(genesis.view_number + 1) + .await + .unwrap(); + assert!(storage.get_anchored_view().await.is_err()); +} From 33e02ea898b62e7feb57973fb6f2eb93dc404a16 Mon Sep 17 00:00:00 2001 From: = Date: Fri, 9 Feb 2024 15:10:51 -0500 Subject: [PATCH 0778/1393] Revert "Move examples to seperate crate, resolve circular dependancy" This reverts commit 5e0bac90e0663a08bcdb105767bd229036b87fc6. --- examples/Cargo.toml | 129 ------------------ hotshot/Cargo.toml | 60 ++++++++ .../examples}/combined/all.rs | 0 .../examples}/combined/multi-validator.rs | 0 .../examples}/combined/orchestrator.rs | 0 .../examples}/combined/types.rs | 0 .../examples}/combined/validator.rs | 0 {examples => hotshot/examples}/infra/mod.rs | 0 {examples => hotshot/examples}/libp2p/all.rs | 0 .../examples}/libp2p/multi-validator.rs | 0 .../examples}/libp2p/orchestrator.rs | 0 .../examples}/libp2p/types.rs | 0 .../examples}/libp2p/validator.rs | 0 .../examples}/webserver/README.md | 0 .../examples}/webserver/all.rs | 0 .../examples}/webserver/multi-validator.rs | 0 .../examples}/webserver/multi-webserver.rs | 0 .../examples}/webserver/orchestrator.rs | 0 .../examples}/webserver/types.rs | 0 .../examples}/webserver/validator.rs | 0 .../examples}/webserver/webserver.rs | 0 hotshot/src/traits.rs | 2 +- .../src/traits/networking/combined_network.rs | 64 +++++++-- hotshot/src/traits/storage/memory_storage.rs | 74 ++++++++++ testing/Cargo.toml | 2 +- testing/tests/combined_network.rs | 46 ------- testing/tests/storage.rs | 72 ---------- 27 files changed, 185 insertions(+), 264 deletions(-) delete mode 100644 examples/Cargo.toml rename {examples => hotshot/examples}/combined/all.rs (100%) rename {examples => hotshot/examples}/combined/multi-validator.rs (100%) rename {examples => hotshot/examples}/combined/orchestrator.rs (100%) rename {examples => hotshot/examples}/combined/types.rs (100%) rename {examples => hotshot/examples}/combined/validator.rs (100%) rename {examples => hotshot/examples}/infra/mod.rs (100%) rename {examples => hotshot/examples}/libp2p/all.rs (100%) rename {examples => hotshot/examples}/libp2p/multi-validator.rs (100%) rename {examples => hotshot/examples}/libp2p/orchestrator.rs (100%) rename {examples => hotshot/examples}/libp2p/types.rs (100%) rename {examples => hotshot/examples}/libp2p/validator.rs (100%) rename {examples => hotshot/examples}/webserver/README.md (100%) rename {examples => hotshot/examples}/webserver/all.rs (100%) rename {examples => hotshot/examples}/webserver/multi-validator.rs (100%) rename {examples => hotshot/examples}/webserver/multi-webserver.rs (100%) rename {examples => hotshot/examples}/webserver/orchestrator.rs (100%) rename {examples => hotshot/examples}/webserver/types.rs (100%) rename {examples => hotshot/examples}/webserver/validator.rs (100%) rename {examples => hotshot/examples}/webserver/webserver.rs (100%) delete mode 100644 testing/tests/storage.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml deleted file mode 100644 index 1b6c1d491e..0000000000 --- a/examples/Cargo.toml +++ /dev/null @@ -1,129 +0,0 @@ -[package] -authors = ["Espresso Systems "] -description = "HotShot Examples and binaries" -edition = "2021" -name = "hotshot-examples" -readme = "README.md" -version = "0.3.3" -rust-version = "1.65.0" - -[features] -default = ["docs", "doc-images"] - -# Features required for binaries -bin-orchestrator = ["clap"] - -# Build the extended documentation -docs = [] -doc-images = [] -hotshot-testing = [] -randomized-leader-election = [] - -# libp2p -[[example]] -name = "validator-libp2p" -path = "libp2p/validator.rs" - -[[example]] -name = "multi-validator-libp2p" -path = "libp2p/multi-validator.rs" - -[[example]] -name = "orchestrator-libp2p" -path = "libp2p/orchestrator.rs" - -[[example]] -name = "all-libp2p" -path = "libp2p/all.rs" - -# webserver -[[example]] -name = "webserver" -path = "webserver/webserver.rs" - -[[example]] -name = "orchestrator-webserver" -path = "webserver/orchestrator.rs" - -[[example]] -name = "validator-webserver" -path = "webserver/validator.rs" - -[[example]] -name = "multi-validator-webserver" -path = "webserver/multi-validator.rs" - -[[example]] -name = "multi-webserver" -path = "webserver/multi-webserver.rs" - -[[example]] -name = "all-webserver" -path = "webserver/all.rs" - -# combined -[[example]] -name = "all-combined" -path = "combined/all.rs" - -[[example]] -name = "multi-validator-combined" -path = "combined/multi-validator.rs" - -[[example]] -name = "validator-combined" -path = "combined/validator.rs" - -[[example]] -name = "orchestrator-combined" -path = "combined/orchestrator.rs" - -[dependencies] -async-broadcast = { workspace = true } -async-compatibility-layer = { workspace = true } -async-lock = { workspace = true } -async-trait = { workspace = true } -bimap = "0.6.3" -bincode = { workspace = true } -clap = { version = "4.4", features = ["derive", "env"], optional = true } -commit = { workspace = true } -hotshot-constants = { path = "../constants" } -custom_debug = { workspace = true } -dashmap = "5.5.1" -either = { workspace = true } -embed-doc-image = "0.1.4" -futures = { workspace = true } -hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } -hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-types = { path = "../types", version = "0.1.0", default-features = false } -hotshot-utils = { path = "../utils" } -hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } -libp2p-identity = { workspace = true } -libp2p-networking = { workspace = true } -rand = { workspace = true } -serde = { workspace = true, features = ["rc"] } -snafu = { workspace = true } -surf-disco = { workspace = true } -time = { workspace = true } -derive_more = "0.99.17" -portpicker = "0.1.1" -lru = "0.12.2" -hotshot-task = { path = "../task" } -hotshot = { path = "../hotshot" } - -tracing = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] -tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } - -[dev-dependencies] -clap = { version = "4.4", features = ["derive", "env"] } -toml = { workspace = true } -blake3 = { workspace = true } -local-ip-address = "0.5.7" -hotshot-testing = { path = "../testing" } - -[lints] -workspace = true diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 35b6a244f5..546489612c 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -19,6 +19,65 @@ doc-images = [] hotshot-testing = [] randomized-leader-election = [] +# libp2p +[[example]] +name = "validator-libp2p" +path = "examples/libp2p/validator.rs" + +[[example]] +name = "multi-validator-libp2p" +path = "examples/libp2p/multi-validator.rs" + +[[example]] +name = "orchestrator-libp2p" +path = "examples/libp2p/orchestrator.rs" + +[[example]] +name = "all-libp2p" +path = "examples/libp2p/all.rs" + +# webserver +[[example]] +name = "webserver" +path = "examples/webserver/webserver.rs" + +[[example]] +name = "orchestrator-webserver" +path = "examples/webserver/orchestrator.rs" + +[[example]] +name = "validator-webserver" +path = "examples/webserver/validator.rs" + +[[example]] +name = "multi-validator-webserver" +path = "examples/webserver/multi-validator.rs" + +[[example]] +name = "multi-webserver" +path = "examples/webserver/multi-webserver.rs" + +[[example]] +name = "all-webserver" +path = "examples/webserver/all.rs" + +# combined +[[example]] +name = "all-combined" +path = "examples/combined/all.rs" + +[[example]] +name = "multi-validator-combined" +path = "examples/combined/multi-validator.rs" + +[[example]] +name = "validator-combined" +path = "examples/combined/validator.rs" + +[[example]] +name = "orchestrator-combined" +path = "examples/combined/orchestrator.rs" + [dependencies] async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } @@ -63,6 +122,7 @@ clap = { version = "4.4", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } local-ip-address = "0.5.7" +hotshot-testing = { path = "../testing" } [lints] workspace = true diff --git a/examples/combined/all.rs b/hotshot/examples/combined/all.rs similarity index 100% rename from examples/combined/all.rs rename to hotshot/examples/combined/all.rs diff --git a/examples/combined/multi-validator.rs b/hotshot/examples/combined/multi-validator.rs similarity index 100% rename from examples/combined/multi-validator.rs rename to hotshot/examples/combined/multi-validator.rs diff --git a/examples/combined/orchestrator.rs b/hotshot/examples/combined/orchestrator.rs similarity index 100% rename from examples/combined/orchestrator.rs rename to hotshot/examples/combined/orchestrator.rs diff --git a/examples/combined/types.rs b/hotshot/examples/combined/types.rs similarity index 100% rename from examples/combined/types.rs rename to hotshot/examples/combined/types.rs diff --git a/examples/combined/validator.rs b/hotshot/examples/combined/validator.rs similarity index 100% rename from examples/combined/validator.rs rename to hotshot/examples/combined/validator.rs diff --git a/examples/infra/mod.rs b/hotshot/examples/infra/mod.rs similarity index 100% rename from examples/infra/mod.rs rename to hotshot/examples/infra/mod.rs diff --git a/examples/libp2p/all.rs b/hotshot/examples/libp2p/all.rs similarity index 100% rename from examples/libp2p/all.rs rename to hotshot/examples/libp2p/all.rs diff --git a/examples/libp2p/multi-validator.rs b/hotshot/examples/libp2p/multi-validator.rs similarity index 100% rename from examples/libp2p/multi-validator.rs rename to hotshot/examples/libp2p/multi-validator.rs diff --git a/examples/libp2p/orchestrator.rs b/hotshot/examples/libp2p/orchestrator.rs similarity index 100% rename from examples/libp2p/orchestrator.rs rename to hotshot/examples/libp2p/orchestrator.rs diff --git a/examples/libp2p/types.rs b/hotshot/examples/libp2p/types.rs similarity index 100% rename from examples/libp2p/types.rs rename to hotshot/examples/libp2p/types.rs diff --git a/examples/libp2p/validator.rs b/hotshot/examples/libp2p/validator.rs similarity index 100% rename from examples/libp2p/validator.rs rename to hotshot/examples/libp2p/validator.rs diff --git a/examples/webserver/README.md b/hotshot/examples/webserver/README.md similarity index 100% rename from examples/webserver/README.md rename to hotshot/examples/webserver/README.md diff --git a/examples/webserver/all.rs b/hotshot/examples/webserver/all.rs similarity index 100% rename from examples/webserver/all.rs rename to hotshot/examples/webserver/all.rs diff --git a/examples/webserver/multi-validator.rs b/hotshot/examples/webserver/multi-validator.rs similarity index 100% rename from examples/webserver/multi-validator.rs rename to hotshot/examples/webserver/multi-validator.rs diff --git a/examples/webserver/multi-webserver.rs b/hotshot/examples/webserver/multi-webserver.rs similarity index 100% rename from examples/webserver/multi-webserver.rs rename to hotshot/examples/webserver/multi-webserver.rs diff --git a/examples/webserver/orchestrator.rs b/hotshot/examples/webserver/orchestrator.rs similarity index 100% rename from examples/webserver/orchestrator.rs rename to hotshot/examples/webserver/orchestrator.rs diff --git a/examples/webserver/types.rs b/hotshot/examples/webserver/types.rs similarity index 100% rename from examples/webserver/types.rs rename to hotshot/examples/webserver/types.rs diff --git a/examples/webserver/validator.rs b/hotshot/examples/webserver/validator.rs similarity index 100% rename from examples/webserver/validator.rs rename to hotshot/examples/webserver/validator.rs diff --git a/examples/webserver/webserver.rs b/hotshot/examples/webserver/webserver.rs similarity index 100% rename from examples/webserver/webserver.rs rename to hotshot/examples/webserver/webserver.rs diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 3bd07c4c15..203fa39705 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -13,7 +13,7 @@ pub use storage::{Result as StorageResult, Storage}; pub mod implementations { pub use super::{ networking::{ - combined_network::{calculate_hash_of, Cache, CombinedCommChannel, CombinedNetworks}, + combined_network::{CombinedCommChannel, CombinedNetworks}, libp2p_network::{Libp2pCommChannel, Libp2pNetwork, PeerInfoVec}, memory_network::{MasterMap, MemoryCommChannel, MemoryNetwork}, web_server_network::{WebCommChannel, WebServerNetwork}, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index ae093c5f72..9f061e8bd4 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -42,7 +42,7 @@ use std::hash::Hash; /// A cache to keep track of the last n messages we've seen, avoids reprocessing duplicates /// from multiple networks #[derive(Clone, Debug)] -pub struct Cache { +struct Cache { /// The maximum number of items to store in the cache capacity: usize, /// The cache itself @@ -53,8 +53,7 @@ pub struct Cache { impl Cache { /// Create a new cache with the given capacity - #[must_use] - pub fn new(capacity: usize) -> Self { + fn new(capacity: usize) -> Self { Self { capacity, inner: HashSet::with_capacity(capacity), @@ -63,7 +62,7 @@ impl Cache { } /// Insert a hash into the cache - pub fn insert(&mut self, hash: u64) { + fn insert(&mut self, hash: u64) { if self.inner.contains(&hash) { return; } @@ -82,26 +81,19 @@ impl Cache { } /// Check if the cache contains a hash - #[must_use] - pub fn contains(&self, hash: u64) -> bool { + fn contains(&self, hash: u64) -> bool { self.inner.contains(&hash) } /// Get the number of items in the cache - #[must_use] - pub fn len(&self) -> usize { + #[cfg(test)] + fn len(&self) -> usize { self.inner.len() } - - /// True if the cache is empty false otherwise - #[must_use] - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } } /// Helper function to calculate a hash of a type that implements Hash -pub fn calculate_hash_of(t: &T) -> u64 { +fn calculate_hash_of(t: &T) -> u64 { let mut s = DefaultHasher::new(); t.hash(&mut s); s.finish() @@ -392,6 +384,8 @@ impl TestableChannelImplementation for CombinedCommChann #[cfg(test)] mod test { + use hotshot_testing::block_types::TestTransaction; + use super::*; use tracing::instrument; @@ -419,4 +413,44 @@ mod test { assert!(cache.hashes.contains(&3)); assert!(cache.hashes.contains(&4)); } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_hash_calculation() { + let message1 = TestTransaction(vec![0; 32]); + let message2 = TestTransaction(vec![1; 32]); + + assert_eq!(calculate_hash_of(&message1), calculate_hash_of(&message1)); + assert_ne!(calculate_hash_of(&message1), calculate_hash_of(&message2)); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn test_cache_integrity() { + let message1 = TestTransaction(vec![0; 32]); + let message2 = TestTransaction(vec![1; 32]); + + let mut cache = Cache::new(3); + + // test insertion integrity + cache.insert(calculate_hash_of(&message1)); + cache.insert(calculate_hash_of(&message2)); + + assert!(cache.contains(calculate_hash_of(&message1))); + assert!(cache.contains(calculate_hash_of(&message2))); + + // check that the cache is not modified on duplicate entries + cache.insert(calculate_hash_of(&message1)); + assert!(cache.contains(calculate_hash_of(&message1))); + assert!(cache.contains(calculate_hash_of(&message2))); + assert_eq!(cache.len(), 2); + } } diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index bf4bc727eb..adaff123f2 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -104,3 +104,77 @@ impl Storage for MemoryStorage { Ok(()) // do nothing } } + +#[cfg(test)] +mod test { + + use super::*; + use commit::Committable; + use hotshot_testing::{ + block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, + node_types::TestTypes, + }; + use hotshot_types::{ + data::{fake_commitment, Leaf}, + simple_certificate::QuorumCertificate, + traits::{ + node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, + }, + }; + use std::marker::PhantomData; + use tracing::instrument; + + fn random_stored_view(view_number: ::Time) -> StoredView { + let payload = TestBlockPayload::genesis(); + let header = TestBlockHeader { + block_number: 0, + payload_commitment: genesis_vid_commitment(), + }; + let dummy_leaf_commit = fake_commitment::>(); + let data = hotshot_types::simple_vote::QuorumData { + leaf_commit: dummy_leaf_commit, + }; + let commit = data.commit(); + StoredView::from_qc_block_and_state( + QuorumCertificate { + is_genesis: view_number == ::Time::genesis(), + data, + vote_commitment: commit, + signatures: None, + view_number, + _pd: PhantomData, + }, + header, + Some(payload), + dummy_leaf_commit, + <::SignatureKey as SignatureKey>::genesis_proposer_pk(), + ) + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[instrument] + async fn memory_storage() { + let storage = MemoryStorage::construct_tmp_storage().unwrap(); + let genesis = random_stored_view(::Time::genesis()); + storage + .append_single_view(genesis.clone()) + .await + .expect("Could not append block"); + assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); + storage + .cleanup_storage_up_to_view(genesis.view_number) + .await + .unwrap(); + assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); + storage + .cleanup_storage_up_to_view(genesis.view_number + 1) + .await + .unwrap(); + assert!(storage.get_anchored_view().await.is_err()); + } +} diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 9382fb63be..ceb54a410f 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -20,7 +20,7 @@ either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", features = [ "hotshot-testing", -] } +], default-features = false } hotshot-constants = { path = "../constants" } hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index dc24d050ab..9e0befae8c 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -10,53 +10,7 @@ use hotshot_testing::{ use rand::Rng; use tracing::instrument; -use hotshot::traits::implementations::{calculate_hash_of, Cache}; -use hotshot_testing::block_types::TestTransaction; - -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_hash_calculation() { - let message1 = TestTransaction(vec![0; 32]); - let message2 = TestTransaction(vec![1; 32]); - - assert_eq!(calculate_hash_of(&message1), calculate_hash_of(&message1)); - assert_ne!(calculate_hash_of(&message1), calculate_hash_of(&message2)); -} - -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_cache_integrity() { - let message1 = TestTransaction(vec![0; 32]); - let message2 = TestTransaction(vec![1; 32]); - - let mut cache = Cache::new(3); - - // test insertion integrity - cache.insert(calculate_hash_of(&message1)); - cache.insert(calculate_hash_of(&message2)); - - assert!(cache.contains(calculate_hash_of(&message1))); - assert!(cache.contains(calculate_hash_of(&message2))); - - // check that the cache is not modified on duplicate entries - cache.insert(calculate_hash_of(&message1)); - assert!(cache.contains(calculate_hash_of(&message1))); - assert!(cache.contains(calculate_hash_of(&message2))); - assert_eq!(cache.len(), 2); -} - /// A run with both the webserver and libp2p functioning properly -#[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) diff --git a/testing/tests/storage.rs b/testing/tests/storage.rs deleted file mode 100644 index 8ef2c5f711..0000000000 --- a/testing/tests/storage.rs +++ /dev/null @@ -1,72 +0,0 @@ -use commit::Committable; -use hotshot::traits::implementations::MemoryStorage; -use hotshot::traits::Storage; -use hotshot_testing::{ - block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, - node_types::TestTypes, -}; -use hotshot_types::{ - data::{fake_commitment, Leaf}, - simple_certificate::QuorumCertificate, - traits::{ - node_implementation::{ConsensusTime, NodeType}, - signature_key::SignatureKey, - storage::{StoredView, TestableStorage}, - }, -}; -use std::marker::PhantomData; -use tracing::instrument; - -fn random_stored_view(view_number: ::Time) -> StoredView { - let payload = TestBlockPayload::genesis(); - let header = TestBlockHeader { - block_number: 0, - payload_commitment: genesis_vid_commitment(), - }; - let dummy_leaf_commit = fake_commitment::>(); - let data = hotshot_types::simple_vote::QuorumData { - leaf_commit: dummy_leaf_commit, - }; - let commit = data.commit(); - StoredView::from_qc_block_and_state( - QuorumCertificate { - is_genesis: view_number == ::Time::genesis(), - data, - vote_commitment: commit, - signatures: None, - view_number, - _pd: PhantomData, - }, - header, - Some(payload), - dummy_leaf_commit, - <::SignatureKey as SignatureKey>::genesis_proposer_pk(), - ) -} - -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn memory_storage() { - let storage = MemoryStorage::construct_tmp_storage().unwrap(); - let genesis = random_stored_view(::Time::genesis()); - storage - .append_single_view(genesis.clone()) - .await - .expect("Could not append block"); - assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); - storage - .cleanup_storage_up_to_view(genesis.view_number) - .await - .unwrap(); - assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); - storage - .cleanup_storage_up_to_view(genesis.view_number + 1) - .await - .unwrap(); - assert!(storage.get_anchored_view().await.is_err()); -} From dcdd7880bae4f2488cfc46cb89edeba53247e2d2 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 12 Feb 2024 11:05:00 -0500 Subject: [PATCH 0779/1393] fix: retry mechanic (#2560) --- libp2p-networking/Cargo.toml | 1 + .../src/network/behaviours/dht/mod.rs | 84 +++++++++---------- 2 files changed, 41 insertions(+), 44 deletions(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 96467a6bd0..17bbeb3061 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -35,6 +35,7 @@ tide = { version = "0.16", optional = true, default-features = false, features = tracing = { workspace = true } void = "1.0.2" dashmap = "5.5.3" +lazy_static = "1.4.0" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] libp2p = { workspace = true, features = ["tokio"] } diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index b62cdc26e4..970ea030d3 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -10,6 +10,7 @@ mod cache; use async_compatibility_layer::art::{async_block_on, async_spawn}; use futures::channel::oneshot::Sender; +use lazy_static::lazy_static; use libp2p::kad::Behaviour as KademliaBehaviour; use libp2p::kad::Event as KademliaEvent; use libp2p::{ @@ -28,8 +29,10 @@ use tracing::{error, info, warn}; /// in order to trust that the answer is correct when retrieving from the DHT pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; -/// the maximum number of nodes to query in the DHT at any one time -const MAX_DHT_QUERY_SIZE: usize = 50; +lazy_static! { + /// the maximum number of nodes to query in the DHT at any one time + static ref MAX_DHT_QUERY_SIZE: NonZeroUsize = NonZeroUsize::new(50).unwrap(); +} use self::cache::Cache; @@ -270,8 +273,8 @@ impl DHTBehaviour { /// update state based on recv-ed get query fn handle_get_query(&mut self, record_results: GetRecordResult, id: QueryId, mut last: bool) { - let num = if let Some(query) = self.in_progress_get_record_queries.get_mut(&id) { - match record_results { + let num = match self.in_progress_get_record_queries.get_mut(&id) { + Some(query) => match record_results { Ok(results) => match results { GetRecordOk::FoundRecord(record) => { match query.records.entry(record.record.value) { @@ -298,14 +301,18 @@ impl DHTBehaviour { error!("GOT ERROR IN KAD QUERY {:?}", err); 0 } + }, + None => { + // We already finished the query (or it's been cancelled). Do nothing and exit the + // function. + return; } - } else { - // inactive entry - return; }; - // BUG - if num > NUM_REPLICATED_TO_TRUST { + // if the query has completed and we need to retry + // or if the query has enoguh replicas to return to the client + // trigger retry or completion logic + if num >= NUM_REPLICATED_TO_TRUST || last { if let Some(KadGetQuery { backoff, progress, @@ -321,10 +328,12 @@ impl DHTBehaviour { return; } - let records_len = records.iter().fold(0, |acc, (_k, v)| acc + v); - // NOTE case where multiple nodes agree on different - // values is not handles + // values is not handled because it can't be hit. + // We optimistically choose whichever record returns the most trusted entries first + + // iterate through the records and find an value that has enough replicas + // to trust the value if let Some((r, _)) = records .into_iter() .find(|(_, v)| *v >= NUM_REPLICATED_TO_TRUST) @@ -341,41 +350,28 @@ impl DHTBehaviour { error!("Get DHT: channel closed before get record request result could be sent"); } } - // many records that don't match => disagreement - else if records_len > MAX_DHT_QUERY_SIZE { - error!( - "Get DHT: Record disagreed upon; {:?}! requerying with more nodes", - progress - ); - self.get_record(key, notify, num_replicas, backoff, retry_count); - } // disagreement => query more nodes else { - // there is some internal disagreement. + // there is some internal disagreement or not enough nodes returned // Initiate new query that hits more replicas - let new_factor = - NonZeroUsize::new(num_replicas.get() + 1).unwrap_or(num_replicas); - - self.get_record(key, notify, new_factor, backoff, retry_count); - error!("Get DHT: Internal disagreement for get dht request {:?}! requerying with more nodes", progress); - } - } - } else if last { - if let Some(KadGetQuery { - backoff, - progress, - notify, - num_replicas, - key, - retry_count, - records, - }) = self.in_progress_get_record_queries.remove(&id) - { - let records_len = records.iter().fold(0, |acc, (_k, v)| acc + v); - // lack of replication => error - if records_len < NUM_REPLICATED_TO_TRUST { - error!("Get DHT: Record not replicated enough for {:?}! requerying with more nodes", progress); - self.get_record(key, notify, num_replicas, backoff, retry_count); + if retry_count > 0 { + let new_retry_count = retry_count - 1; + error!("Get DHT: Internal disagreement for get dht request {:?}! requerying with more nodes. {:?} retries left", progress, new_retry_count); + let new_factor = NonZeroUsize::max( + NonZeroUsize::new(num_replicas.get() + 1).unwrap_or(num_replicas), + *MAX_DHT_QUERY_SIZE, + ); + self.queued_get_record_queries.push_back(KadGetQuery { + backoff, + progress: DHTProgress::NotStarted, + notify, + num_replicas: new_factor, + key, + retry_count: new_retry_count, + records: HashMap::default(), + }); + } + error!("Get DHT: Internal disagreement for get dht request {:?}! Giving up because out of retries. ", progress); } } } From 4ea20ef4303bada619bbc44db500d0d75c6211ec Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Mon, 12 Feb 2024 11:05:15 -0500 Subject: [PATCH 0780/1393] [TECH DEBT] - Upgrade libp2p (#2521) * feat: begin to upgrade libp2p * chore: cargo lock * upgrade: libp2p to 0.53.2 --- libp2p-networking/Cargo.toml | 1 + .../src/network/behaviours/dht/mod.rs | 14 ++- .../src/network/behaviours/direct_message.rs | 24 ++-- .../behaviours/direct_message_codec.rs | 113 +++++++++++++++++- .../src/network/behaviours/gossip.rs | 13 +- libp2p-networking/src/network/node.rs | 34 ++++-- 6 files changed, 155 insertions(+), 44 deletions(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 17bbeb3061..8b054fcda1 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -29,6 +29,7 @@ rand = { workspace = true } serde = { workspace = true } serde_json = "1.0.113" snafu = { workspace = true } +unsigned-varint = "0.7" tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", ] } diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 970ea030d3..a9e59d1785 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -528,6 +528,9 @@ impl DHTBehaviour { e @ KademliaEvent::OutboundQueryProgressed { .. } => { info!("Not handling dht event {:?}", e); } + e => { + error!("UNHANDLED NEW SWARM VARIANT: {e:?}"); + } } } } @@ -591,7 +594,6 @@ impl NetworkBehaviour for DHTBehaviour { fn poll( &mut self, cx: &mut std::task::Context<'_>, - params: &mut impl libp2p::swarm::PollParameters, ) -> Poll>> { if matches!(self.bootstrap_state.state, State::NotStarted) && self.bootstrap_state.backoff.is_expired() @@ -647,7 +649,7 @@ impl NetworkBehaviour for DHTBehaviour { } // poll behaviour which is a passthrough and call inject event - while let Poll::Ready(ready) = NetworkBehaviour::poll(&mut self.kadem, cx, params) { + while let Poll::Ready(ready) = NetworkBehaviour::poll(&mut self.kadem, cx) { match ready { ToSwarm::GenerateEvent(e) => { self.dht_handle_event(e); @@ -690,6 +692,9 @@ impl NetworkBehaviour for DHTBehaviour { ToSwarm::ExternalAddrExpired(c) => { return Poll::Ready(ToSwarm::ExternalAddrExpired(c)); } + e => { + error!("UNHANDLED NEW SWARM VARIANT: {e:?}"); + } } } if !self.event_queue.is_empty() { @@ -698,10 +703,7 @@ impl NetworkBehaviour for DHTBehaviour { Poll::Pending } - fn on_swarm_event( - &mut self, - event: libp2p::swarm::derive_prelude::FromSwarm<'_, Self::ConnectionHandler>, - ) { + fn on_swarm_event(&mut self, event: libp2p::swarm::derive_prelude::FromSwarm<'_>) { self.kadem.on_swarm_event(event); } diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index e396ec82e1..81e5d39692 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -4,11 +4,12 @@ use std::{ }; use libp2p::{ - request_response::{Behaviour, Event, Message, RequestId, ResponseChannel}, + request_response::{Behaviour, Event, Message, OutboundRequestId, ResponseChannel}, swarm::{NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm}, Multiaddr, }; use libp2p_identity::PeerId; +// use libp2p_request_response::Behaviour; use tracing::{error, info}; use super::{ @@ -34,7 +35,7 @@ pub struct DMBehaviour { /// The wrapped behaviour request_response: Behaviour, /// In progress queries - in_progress_rr: HashMap, + in_progress_rr: HashMap, /// Failed queries to be retried failed_rr: VecDeque, /// lsit of out events for parent behaviour @@ -56,17 +57,13 @@ impl DMBehaviour { match event { Event::InboundFailure { peer, - request_id, + request_id: _, error, } => { error!( "inbound failure to send message to {:?} with error {:?}", peer, error ); - if let Some(mut req) = self.in_progress_rr.remove(&request_id) { - req.backoff.start_next(false); - self.failed_rr.push_back(req); - } } Event::OutboundFailure { peer, @@ -122,10 +119,7 @@ impl NetworkBehaviour for DMBehaviour { type ToSwarm = DMEvent; - fn on_swarm_event( - &mut self, - event: libp2p::swarm::derive_prelude::FromSwarm<'_, Self::ConnectionHandler>, - ) { + fn on_swarm_event(&mut self, event: libp2p::swarm::derive_prelude::FromSwarm<'_>) { self.request_response.on_swarm_event(event); } @@ -142,7 +136,6 @@ impl NetworkBehaviour for DMBehaviour { fn poll( &mut self, cx: &mut std::task::Context<'_>, - params: &mut impl libp2p::swarm::PollParameters, ) -> Poll>> { while let Some(req) = self.failed_rr.pop_front() { if req.backoff.is_expired() { @@ -151,9 +144,7 @@ impl NetworkBehaviour for DMBehaviour { self.failed_rr.push_back(req); } } - while let Poll::Ready(ready) = - NetworkBehaviour::poll(&mut self.request_response, cx, params) - { + while let Poll::Ready(ready) = NetworkBehaviour::poll(&mut self.request_response, cx) { match ready { // NOTE: this generates request ToSwarm::GenerateEvent(e) => { @@ -197,6 +188,9 @@ impl NetworkBehaviour for DMBehaviour { ToSwarm::ExternalAddrExpired(c) => { return Poll::Ready(ToSwarm::ExternalAddrExpired(c)); } + e => { + error!("UNHANDLED NEW SWARM VARIANT: {e:?}"); + } } } if !self.out_event_queue.is_empty() { diff --git a/libp2p-networking/src/network/behaviours/direct_message_codec.rs b/libp2p-networking/src/network/behaviours/direct_message_codec.rs index 9abe0984b9..18584ec510 100644 --- a/libp2p-networking/src/network/behaviours/direct_message_codec.rs +++ b/libp2p-networking/src/network/behaviours/direct_message_codec.rs @@ -1,9 +1,7 @@ use async_trait::async_trait; +use futures::prelude::*; use futures::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use libp2p::{ - core::upgrade::{read_length_prefixed, write_length_prefixed}, - request_response::Codec, -}; +use libp2p::request_response::Codec; use serde::{Deserialize, Serialize}; use std::io; @@ -23,6 +21,113 @@ pub struct DirectMessageResponse(pub Vec); /// Maximum size of a direct message pub const MAX_MSG_SIZE_DM: usize = 100_000_000; +// NOTE: yoinked from libp2p +// +/// Writes a message to the given socket with a length prefix appended to it. Also flushes the socket. +/// +/// > **Note**: Prepends a variable-length prefix indicate the length of the message. This is +/// > compatible with what [`read_length_prefixed`] expects. +/// # Errors +/// On weird input from socket +pub async fn write_length_prefixed( + socket: &mut (impl AsyncWrite + Unpin), + data: impl AsRef<[u8]>, +) -> Result<(), io::Error> { + write_varint(socket, data.as_ref().len()).await?; + socket.write_all(data.as_ref()).await?; + socket.flush().await?; + + Ok(()) +} + +/// Writes a variable-length integer to the `socket`. +/// +/// > **Note**: Does **NOT** flush the socket. +/// # Errors +/// On weird input from socket +pub async fn write_varint( + socket: &mut (impl AsyncWrite + Unpin), + len: usize, +) -> Result<(), io::Error> { + let mut len_data = unsigned_varint::encode::usize_buffer(); + let encoded_len = unsigned_varint::encode::usize(len, &mut len_data).len(); + socket.write_all(&len_data[..encoded_len]).await?; + + Ok(()) +} + +/// Reads a variable-length integer from the `socket`. +/// +/// As a special exception, if the `socket` is empty and EOFs right at the beginning, then we +/// return `Ok(0)`. +/// +/// > **Note**: This function reads bytes one by one from the `socket`. It is therefore encouraged +/// > to use some sort of buffering mechanism. +/// # Errors +/// On weird input from socket +pub async fn read_varint(socket: &mut (impl AsyncRead + Unpin)) -> Result { + let mut buffer = unsigned_varint::encode::usize_buffer(); + let mut buffer_len = 0; + + loop { + match socket.read(&mut buffer[buffer_len..=buffer_len]).await? { + 0 => { + // Reaching EOF before finishing to read the length is an error, unless the EOF is + // at the very beginning of the substream, in which case we assume that the data is + // empty. + if buffer_len == 0 { + return Ok(0); + } + return Err(io::ErrorKind::UnexpectedEof.into()); + } + n => debug_assert_eq!(n, 1), + } + + buffer_len += 1; + + match unsigned_varint::decode::usize(&buffer[..buffer_len]) { + Ok((len, _)) => return Ok(len), + Err(unsigned_varint::decode::Error::Overflow) => { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "overflow in variable-length integer", + )); + } + // TODO: why do we have a `__Nonexhaustive` variant in the error? I don't know how to process it + // Err(unsigned_varint::decode::Error::Insufficient) => {} + Err(_) => {} + } + } +} + +/// Reads a length-prefixed message from the given socket. +/// +/// The `max_size` parameter is the maximum size in bytes of the message that we accept. This is +/// necessary in order to avoid `DoS` attacks where the remote sends us a message of several +/// gigabytes. +/// +/// > **Note**: Assumes that a variable-length prefix indicates the length of the message. This is +/// > compatible with what [`write_length_prefixed`] does. +/// # Errors +/// On weird input from socket +pub async fn read_length_prefixed( + socket: &mut (impl AsyncRead + Unpin), + max_size: usize, +) -> io::Result> { + let len = read_varint(socket).await?; + if len > max_size { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Received data size ({len} bytes) exceeds maximum ({max_size} bytes)"), + )); + } + + let mut buf = vec![0; len]; + socket.read_exact(&mut buf).await?; + + Ok(buf) +} + impl AsRef for DirectMessageProtocol { fn as_ref(&self) -> &str { "/HotShot/request_response/1.0" diff --git a/libp2p-networking/src/network/behaviours/gossip.rs b/libp2p-networking/src/network/behaviours/gossip.rs index 458e11d9f3..45ebd7968b 100644 --- a/libp2p-networking/src/network/behaviours/gossip.rs +++ b/libp2p-networking/src/network/behaviours/gossip.rs @@ -5,7 +5,7 @@ use std::{ use libp2p::{ gossipsub::{Behaviour, Event, IdentTopic, PublishError::Duplicate, TopicHash}, - swarm::{NetworkBehaviour, PollParameters, THandlerInEvent, THandlerOutEvent, ToSwarm}, + swarm::{NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm}, Multiaddr, }; use libp2p_identity::PeerId; @@ -64,24 +64,20 @@ impl NetworkBehaviour for GossipBehaviour { type ToSwarm = GossipEvent; - fn on_swarm_event( - &mut self, - event: libp2p::swarm::derive_prelude::FromSwarm<'_, Self::ConnectionHandler>, - ) { + fn on_swarm_event(&mut self, event: libp2p::swarm::derive_prelude::FromSwarm<'_>) { self.gossipsub.on_swarm_event(event); } fn poll( &mut self, cx: &mut std::task::Context<'_>, - params: &mut impl PollParameters, ) -> Poll>> { // retry sending shit if self.backoff.is_expired() { let published = self.drain_publish_gossips(); self.backoff.start_next(published); } - if let Poll::Ready(ready) = NetworkBehaviour::poll(&mut self.gossipsub, cx, params) { + if let Poll::Ready(ready) = NetworkBehaviour::poll(&mut self.gossipsub, cx) { match ready { ToSwarm::GenerateEvent(e) => { // add event to event queue which will be subsequently popped off. @@ -125,6 +121,9 @@ impl NetworkBehaviour for GossipBehaviour { ToSwarm::ExternalAddrExpired(c) => { return Poll::Ready(ToSwarm::ExternalAddrExpired(c)); } + e => { + error!("UNHANDLED NEW SWARM VARIANT: {e:?}"); + } } } if !self.out_event_queue.is_empty() { diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index a99c52bcd8..af5f682c3b 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -22,7 +22,7 @@ use super::{ use crate::network::behaviours::{ dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, direct_message::{DMBehaviour, DMEvent}, - direct_message_codec::{DirectMessageProtocol, MAX_MSG_SIZE_DM}, + direct_message_codec::{DirectMessageCodec, DirectMessageProtocol, MAX_MSG_SIZE_DM}, exponential_backoff::ExponentialBackoff, gossip::GossipEvent, }; @@ -30,7 +30,6 @@ use async_compatibility_layer::{ art::async_spawn, channel::{unbounded, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, }; -use either::Either; use futures::{select, FutureExt, StreamExt}; use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; use libp2p::core::transport::ListenerId; @@ -56,7 +55,6 @@ use rand::{prelude::SliceRandom, thread_rng}; use snafu::ResultExt; use std::{ collections::{HashMap, HashSet}, - io::Error, iter, num::{NonZeroU32, NonZeroUsize}, time::Duration, @@ -217,7 +215,12 @@ impl NetworkNode { // Use the (blake3) hash of a message as its ID .message_id_fn(message_id_fn) .build() - .map_err(|s| GossipsubConfigSnafu { message: s }.build())?; + .map_err(|s| { + GossipsubConfigSnafu { + message: s.to_string(), + } + .build() + })?; // - Build a gossipsub network behavior let gossipsub: Gossipsub = Gossipsub::new( @@ -263,10 +266,11 @@ impl NetworkNode { let rrconfig = RequestResponseConfig::default(); - let request_response = RequestResponse::new( - [(DirectMessageProtocol(), ProtocolSupport::Full)].into_iter(), - rrconfig, - ); + let request_response: libp2p::request_response::Behaviour = + RequestResponse::new( + [(DirectMessageProtocol(), ProtocolSupport::Full)].into_iter(), + rrconfig, + ); let network = NetworkDef::new( GossipBehaviour::new(gossipsub), @@ -440,10 +444,7 @@ impl NetworkNode { #[instrument(skip(self))] async fn handle_swarm_events( &mut self, - event: SwarmEvent< - NetworkEventInternal, - Either, Error>, void::Void>, - >, + event: SwarmEvent, send_to_client: &UnboundedSender, ) -> Result<(), NetworkError> { // Make the match cleaner @@ -503,6 +504,9 @@ impl NetworkNode { listener_id: _, address: _, } + | SwarmEvent::NewExternalAddrCandidate { .. } + | SwarmEvent::ExternalAddrConfirmed { .. } + | SwarmEvent::ExternalAddrExpired { .. } | SwarmEvent::IncomingConnection { connection_id: _, local_addr: _, @@ -588,6 +592,12 @@ impl NetworkNode { SwarmEvent::ListenerError { listener_id, error } => { info!("LISTENER ERROR {:?} {:?}", listener_id, error); } + _ => { + error!( + "Unhandled swarm event {:?}. This should not be possible.", + event + ); + } } Ok(()) } From 08a76fefeda19b9cee0c61f69cd688a33ebbe5bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 13:32:36 -0500 Subject: [PATCH 0781/1393] Bump clap from 4.4.18 to 4.5.0 (#2553) Bumps [clap](https://github.com/clap-rs/clap) from 4.4.18 to 4.5.0. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.4.18...clap_complete-v4.5.0) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- hotshot/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 546489612c..3f0510b65c 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -85,7 +85,7 @@ async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6.3" bincode = { workspace = true } -clap = { version = "4.4", features = ["derive", "env"], optional = true } +clap = { version = "4.5", features = ["derive", "env"], optional = true } commit = { workspace = true } hotshot-constants = { path = "../constants" } custom_debug = { workspace = true } @@ -118,7 +118,7 @@ tokio = { workspace = true } async-std = { workspace = true } [dev-dependencies] -clap = { version = "4.4", features = ["derive", "env"] } +clap = { version = "4.5", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } local-ip-address = "0.5.7" From d81a977c7820c37d49812ceb1cd57c93f312de41 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 12 Feb 2024 14:14:03 -0500 Subject: [PATCH 0782/1393] [TECH_DEBT] Move examples to Separate Crate + Resolve Circular Dependency (#2562) * Move examples to seperate crate, resolve circular dependancy * move test types to example types * add the types crate --- example-types/Cargo.toml | 41 ++++++ {testing => example-types}/src/block_types.rs | 0 example-types/src/lib.rs | 8 ++ {testing => example-types}/src/node_types.rs | 0 {testing => example-types}/src/state_types.rs | 0 examples/Cargo.toml | 129 ++++++++++++++++++ .../examples => examples}/combined/all.rs | 2 +- .../combined/multi-validator.rs | 2 +- .../combined/orchestrator.rs | 2 +- .../examples => examples}/combined/types.rs | 2 +- .../combined/validator.rs | 2 +- {hotshot/examples => examples}/infra/mod.rs | 8 +- {hotshot/examples => examples}/libp2p/all.rs | 2 +- .../libp2p/multi-validator.rs | 2 +- .../libp2p/orchestrator.rs | 2 +- .../examples => examples}/libp2p/types.rs | 2 +- .../examples => examples}/libp2p/validator.rs | 2 +- .../examples => examples}/webserver/README.md | 0 .../examples => examples}/webserver/all.rs | 2 +- .../webserver/multi-validator.rs | 2 +- .../webserver/multi-webserver.rs | 2 +- .../webserver/orchestrator.rs | 2 +- .../examples => examples}/webserver/types.rs | 2 +- .../webserver/validator.rs | 2 +- .../webserver/webserver.rs | 2 +- hotshot/Cargo.toml | 60 -------- hotshot/src/traits.rs | 2 +- .../src/traits/networking/combined_network.rs | 64 ++------- hotshot/src/traits/storage/memory_storage.rs | 74 ---------- testing-macros/Cargo.toml | 1 + testing-macros/tests/tests.rs | 4 +- testing/Cargo.toml | 3 +- testing/README.md | 10 +- testing/src/lib.rs | 9 -- testing/src/task_helpers.rs | 5 +- testing/src/test_runner.rs | 2 +- testing/tests/catchup.rs | 11 +- testing/tests/combined_network.rs | 48 ++++++- testing/tests/consensus_task.rs | 10 +- testing/tests/da_task.rs | 4 +- testing/tests/libp2p.rs | 2 +- testing/tests/lossy.rs | 2 +- testing/tests/memory_network.rs | 4 +- testing/tests/network_task.rs | 6 +- testing/tests/storage.rs | 72 ++++++++++ testing/tests/timeout.rs | 10 +- testing/tests/unit/message.rs | 2 +- testing/tests/unreliable_network.rs | 12 +- testing/tests/vid_task.rs | 7 +- testing/tests/view_sync_task.rs | 2 +- testing/tests/web_server.rs | 2 +- 51 files changed, 382 insertions(+), 266 deletions(-) create mode 100644 example-types/Cargo.toml rename {testing => example-types}/src/block_types.rs (100%) create mode 100644 example-types/src/lib.rs rename {testing => example-types}/src/node_types.rs (100%) rename {testing => example-types}/src/state_types.rs (100%) create mode 100644 examples/Cargo.toml rename {hotshot/examples => examples}/combined/all.rs (98%) rename {hotshot/examples => examples}/combined/multi-validator.rs (96%) rename {hotshot/examples => examples}/combined/orchestrator.rs (94%) rename {hotshot/examples => examples}/combined/types.rs (94%) rename {hotshot/examples => examples}/combined/validator.rs (95%) rename {hotshot/examples => examples}/infra/mod.rs (99%) rename {hotshot/examples => examples}/libp2p/all.rs (97%) rename {hotshot/examples => examples}/libp2p/multi-validator.rs (96%) rename {hotshot/examples => examples}/libp2p/orchestrator.rs (94%) rename {hotshot/examples => examples}/libp2p/types.rs (94%) rename {hotshot/examples => examples}/libp2p/validator.rs (95%) rename {hotshot/examples => examples}/webserver/README.md (100%) rename {hotshot/examples => examples}/webserver/all.rs (98%) rename {hotshot/examples => examples}/webserver/multi-validator.rs (96%) rename {hotshot/examples => examples}/webserver/multi-webserver.rs (97%) rename {hotshot/examples => examples}/webserver/orchestrator.rs (94%) rename {hotshot/examples => examples}/webserver/types.rs (94%) rename {hotshot/examples => examples}/webserver/validator.rs (95%) rename {hotshot/examples => examples}/webserver/webserver.rs (94%) create mode 100644 testing/tests/storage.rs diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml new file mode 100644 index 0000000000..eddc52b821 --- /dev/null +++ b/example-types/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "hotshot-example-types" +version = "0.1.0" +edition = "2021" +description = "Types and traits for the HotShot consesus module" +authors = ["Espresso Systems "] + +[features] +default = [] +# NOTE this is used to activate the slow tests we don't wish to run in CI +slow-tests = [] + +[dependencies] +async-broadcast = { workspace = true } +async-compatibility-layer = { workspace = true } +sha3 = "^0.10" +bincode = { workspace = true } +commit = { workspace = true } +either = { workspace = true } +futures = { workspace = true } +hotshot = { path = "../hotshot" } +hotshot-constants = { path = "../constants" } +hotshot-types = { path = "../types", default-features = false } +hotshot-utils = { path = "../utils" } +hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } +hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +rand = { workspace = true } +snafu = { workspace = true } +tracing = { workspace = true } +serde = { workspace = true } +sha2 = { workspace = true } +async-lock = { workspace = true } +bitvec = { workspace = true } +ethereum-types = { workspace = true } +hotshot-task = { path = "../task" } + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } + diff --git a/testing/src/block_types.rs b/example-types/src/block_types.rs similarity index 100% rename from testing/src/block_types.rs rename to example-types/src/block_types.rs diff --git a/example-types/src/lib.rs b/example-types/src/lib.rs new file mode 100644 index 0000000000..42610b84d7 --- /dev/null +++ b/example-types/src/lib.rs @@ -0,0 +1,8 @@ +/// block types +pub mod block_types; + +/// Implementations for testing/examples +pub mod state_types; + +/// node types +pub mod node_types; diff --git a/testing/src/node_types.rs b/example-types/src/node_types.rs similarity index 100% rename from testing/src/node_types.rs rename to example-types/src/node_types.rs diff --git a/testing/src/state_types.rs b/example-types/src/state_types.rs similarity index 100% rename from testing/src/state_types.rs rename to example-types/src/state_types.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml new file mode 100644 index 0000000000..9276214352 --- /dev/null +++ b/examples/Cargo.toml @@ -0,0 +1,129 @@ +[package] +authors = ["Espresso Systems "] +description = "HotShot Examples and binaries" +edition = "2021" +name = "hotshot-examples" +readme = "README.md" +version = "0.3.3" +rust-version = "1.65.0" + +[features] +default = ["docs", "doc-images"] + +# Features required for binaries +bin-orchestrator = ["clap"] + +# Build the extended documentation +docs = [] +doc-images = [] +hotshot-testing = [] +randomized-leader-election = [] + +# libp2p +[[example]] +name = "validator-libp2p" +path = "libp2p/validator.rs" + +[[example]] +name = "multi-validator-libp2p" +path = "libp2p/multi-validator.rs" + +[[example]] +name = "orchestrator-libp2p" +path = "libp2p/orchestrator.rs" + +[[example]] +name = "all-libp2p" +path = "libp2p/all.rs" + +# webserver +[[example]] +name = "webserver" +path = "webserver/webserver.rs" + +[[example]] +name = "orchestrator-webserver" +path = "webserver/orchestrator.rs" + +[[example]] +name = "validator-webserver" +path = "webserver/validator.rs" + +[[example]] +name = "multi-validator-webserver" +path = "webserver/multi-validator.rs" + +[[example]] +name = "multi-webserver" +path = "webserver/multi-webserver.rs" + +[[example]] +name = "all-webserver" +path = "webserver/all.rs" + +# combined +[[example]] +name = "all-combined" +path = "combined/all.rs" + +[[example]] +name = "multi-validator-combined" +path = "combined/multi-validator.rs" + +[[example]] +name = "validator-combined" +path = "combined/validator.rs" + +[[example]] +name = "orchestrator-combined" +path = "combined/orchestrator.rs" + +[dependencies] +async-broadcast = { workspace = true } +async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } +async-trait = { workspace = true } +bimap = "0.6.3" +bincode = { workspace = true } +clap = { version = "4.4", features = ["derive", "env"], optional = true } +commit = { workspace = true } +hotshot-constants = { path = "../constants" } +custom_debug = { workspace = true } +dashmap = "5.5.1" +either = { workspace = true } +embed-doc-image = "0.1.4" +futures = { workspace = true } +hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } +hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } +hotshot-types = { path = "../types", version = "0.1.0", default-features = false } +hotshot-utils = { path = "../utils" } +hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +libp2p-identity = { workspace = true } +libp2p-networking = { workspace = true } +rand = { workspace = true } +serde = { workspace = true, features = ["rc"] } +snafu = { workspace = true } +surf-disco = { workspace = true } +time = { workspace = true } +derive_more = "0.99.17" +portpicker = "0.1.1" +lru = "0.12.2" +hotshot-task = { path = "../task" } +hotshot = { path = "../hotshot" } +hotshot-example-types = { path = "../example-types" } + +tracing = { workspace = true } + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } + +[dev-dependencies] +clap = { version = "4.4", features = ["derive", "env"] } +toml = { workspace = true } +blake3 = { workspace = true } +local-ip-address = "0.5.7" + +[lints] +workspace = true diff --git a/hotshot/examples/combined/all.rs b/examples/combined/all.rs similarity index 98% rename from hotshot/examples/combined/all.rs rename to examples/combined/all.rs index de602d4bea..1d6f1c0a89 100644 --- a/hotshot/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -8,9 +8,9 @@ use async_compatibility_layer::art::async_spawn; use async_compatibility_layer::channel::oneshot; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; +use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_orchestrator::config::NetworkConfig; -use hotshot_testing::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeType; use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; diff --git a/hotshot/examples/combined/multi-validator.rs b/examples/combined/multi-validator.rs similarity index 96% rename from hotshot/examples/combined/multi-validator.rs rename to examples/combined/multi-validator.rs index 7329986afc..d97a7f7bc3 100644 --- a/hotshot/examples/combined/multi-validator.rs +++ b/examples/combined/multi-validator.rs @@ -4,8 +4,8 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; +use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; -use hotshot_testing::state_types::TestTypes; use tracing::instrument; use types::VIDNetwork; diff --git a/hotshot/examples/combined/orchestrator.rs b/examples/combined/orchestrator.rs similarity index 94% rename from hotshot/examples/combined/orchestrator.rs rename to examples/combined/orchestrator.rs index ba038fabc1..d4ced1536c 100644 --- a/hotshot/examples/combined/orchestrator.rs +++ b/examples/combined/orchestrator.rs @@ -4,7 +4,7 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot_testing::state_types::TestTypes; +use hotshot_example_types::state_types::TestTypes; use tracing::instrument; use types::VIDNetwork; diff --git a/hotshot/examples/combined/types.rs b/examples/combined/types.rs similarity index 94% rename from hotshot/examples/combined/types.rs rename to examples/combined/types.rs index 94980e0925..94c316f7ab 100644 --- a/hotshot/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -1,6 +1,6 @@ use crate::infra::CombinedDARun; use hotshot::traits::implementations::{CombinedCommChannel, MemoryStorage}; -use hotshot_testing::state_types::TestTypes; +use hotshot_example_types::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; diff --git a/hotshot/examples/combined/validator.rs b/examples/combined/validator.rs similarity index 95% rename from hotshot/examples/combined/validator.rs rename to examples/combined/validator.rs index 04c0aa7cee..d0493134d6 100644 --- a/hotshot/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -1,7 +1,7 @@ //! A validator using both the web server and libp2p use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot_testing::state_types::TestTypes; +use hotshot_example_types::state_types::TestTypes; use tracing::{info, instrument}; use types::VIDNetwork; diff --git a/hotshot/examples/infra/mod.rs b/examples/infra/mod.rs similarity index 99% rename from hotshot/examples/infra/mod.rs rename to examples/infra/mod.rs index 4d97d4cc3e..babf08a6bf 100644 --- a/hotshot/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -17,16 +17,16 @@ use hotshot::{ types::{SignatureKey, SystemContextHandle}, Memberships, Networks, SystemContext, }; +use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + state_types::TestInstanceState, +}; use hotshot_orchestrator::config::NetworkConfigSource; use hotshot_orchestrator::{ self, client::{OrchestratorClient, ValidatorArgs}, config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, }; -use hotshot_testing::{ - block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::TestInstanceState, -}; use hotshot_types::message::Message; use hotshot_types::traits::network::ConnectedNetwork; use hotshot_types::ValidatorConfig; diff --git a/hotshot/examples/libp2p/all.rs b/examples/libp2p/all.rs similarity index 97% rename from hotshot/examples/libp2p/all.rs rename to examples/libp2p/all.rs index 3dda9d6007..5b67f667d7 100644 --- a/hotshot/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -7,9 +7,9 @@ use crate::types::ThisRun; use async_compatibility_layer::art::async_spawn; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; +use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_orchestrator::config::NetworkConfig; -use hotshot_testing::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeType; use std::net::{IpAddr, Ipv4Addr}; use surf_disco::Url; diff --git a/hotshot/examples/libp2p/multi-validator.rs b/examples/libp2p/multi-validator.rs similarity index 96% rename from hotshot/examples/libp2p/multi-validator.rs rename to examples/libp2p/multi-validator.rs index f64d2f25ac..aec3325383 100644 --- a/hotshot/examples/libp2p/multi-validator.rs +++ b/examples/libp2p/multi-validator.rs @@ -4,8 +4,8 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; +use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; -use hotshot_testing::state_types::TestTypes; use tracing::instrument; use types::VIDNetwork; diff --git a/hotshot/examples/libp2p/orchestrator.rs b/examples/libp2p/orchestrator.rs similarity index 94% rename from hotshot/examples/libp2p/orchestrator.rs rename to examples/libp2p/orchestrator.rs index 9298b9e372..c26fc73bba 100644 --- a/hotshot/examples/libp2p/orchestrator.rs +++ b/examples/libp2p/orchestrator.rs @@ -5,7 +5,7 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot_testing::state_types::TestTypes; +use hotshot_example_types::state_types::TestTypes; use tracing::instrument; use crate::infra::run_orchestrator; diff --git a/hotshot/examples/libp2p/types.rs b/examples/libp2p/types.rs similarity index 94% rename from hotshot/examples/libp2p/types.rs rename to examples/libp2p/types.rs index 446905bab6..af535db8d7 100644 --- a/hotshot/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -1,6 +1,6 @@ use crate::infra::Libp2pDARun; use hotshot::traits::implementations::{Libp2pCommChannel, MemoryStorage}; -use hotshot_testing::state_types::TestTypes; +use hotshot_example_types::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; diff --git a/hotshot/examples/libp2p/validator.rs b/examples/libp2p/validator.rs similarity index 95% rename from hotshot/examples/libp2p/validator.rs rename to examples/libp2p/validator.rs index d27f7f1ea1..9873cac76e 100644 --- a/hotshot/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -1,7 +1,7 @@ //! A validator using libp2p use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot_testing::state_types::TestTypes; +use hotshot_example_types::state_types::TestTypes; use tracing::{info, instrument}; use types::VIDNetwork; diff --git a/hotshot/examples/webserver/README.md b/examples/webserver/README.md similarity index 100% rename from hotshot/examples/webserver/README.md rename to examples/webserver/README.md diff --git a/hotshot/examples/webserver/all.rs b/examples/webserver/all.rs similarity index 98% rename from hotshot/examples/webserver/all.rs rename to examples/webserver/all.rs index eb92ee77d4..99de0d9f93 100644 --- a/hotshot/examples/webserver/all.rs +++ b/examples/webserver/all.rs @@ -18,9 +18,9 @@ pub mod infra; use async_compatibility_layer::{art::async_spawn, channel::oneshot}; use clap::Parser; +use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_orchestrator::config::NetworkConfig; -use hotshot_testing::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeType; use surf_disco::Url; use tracing::error; diff --git a/hotshot/examples/webserver/multi-validator.rs b/examples/webserver/multi-validator.rs similarity index 96% rename from hotshot/examples/webserver/multi-validator.rs rename to examples/webserver/multi-validator.rs index 780aa24207..b975f07b0f 100644 --- a/hotshot/examples/webserver/multi-validator.rs +++ b/examples/webserver/multi-validator.rs @@ -4,8 +4,8 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; +use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; -use hotshot_testing::state_types::TestTypes; use tracing::instrument; use types::VIDNetwork; diff --git a/hotshot/examples/webserver/multi-webserver.rs b/examples/webserver/multi-webserver.rs similarity index 97% rename from hotshot/examples/webserver/multi-webserver.rs rename to examples/webserver/multi-webserver.rs index e338835c07..630ebe4c3d 100644 --- a/hotshot/examples/webserver/multi-webserver.rs +++ b/examples/webserver/multi-webserver.rs @@ -7,7 +7,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot_testing::state_types::TestTypes; +use hotshot_example_types::state_types::TestTypes; use surf_disco::Url; use tracing::error; diff --git a/hotshot/examples/webserver/orchestrator.rs b/examples/webserver/orchestrator.rs similarity index 94% rename from hotshot/examples/webserver/orchestrator.rs rename to examples/webserver/orchestrator.rs index 5a39e56471..62f2006f2e 100644 --- a/hotshot/examples/webserver/orchestrator.rs +++ b/examples/webserver/orchestrator.rs @@ -5,7 +5,7 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot_testing::state_types::TestTypes; +use hotshot_example_types::state_types::TestTypes; use tracing::instrument; use types::VIDNetwork; diff --git a/hotshot/examples/webserver/types.rs b/examples/webserver/types.rs similarity index 94% rename from hotshot/examples/webserver/types.rs rename to examples/webserver/types.rs index 46a466ed5f..0e67f7a742 100644 --- a/hotshot/examples/webserver/types.rs +++ b/examples/webserver/types.rs @@ -1,6 +1,6 @@ use crate::infra::WebServerDARun; use hotshot::traits::implementations::{MemoryStorage, WebCommChannel}; -use hotshot_testing::state_types::TestTypes; +use hotshot_example_types::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; diff --git a/hotshot/examples/webserver/validator.rs b/examples/webserver/validator.rs similarity index 95% rename from hotshot/examples/webserver/validator.rs rename to examples/webserver/validator.rs index 4bfcd9d78c..96bcde1807 100644 --- a/hotshot/examples/webserver/validator.rs +++ b/examples/webserver/validator.rs @@ -1,7 +1,7 @@ //! A validator using the web server use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot_testing::state_types::TestTypes; +use hotshot_example_types::state_types::TestTypes; use tracing::{info, instrument}; use types::VIDNetwork; diff --git a/hotshot/examples/webserver/webserver.rs b/examples/webserver/webserver.rs similarity index 94% rename from hotshot/examples/webserver/webserver.rs rename to examples/webserver/webserver.rs index e6c9a468c8..06158ae573 100644 --- a/hotshot/examples/webserver/webserver.rs +++ b/examples/webserver/webserver.rs @@ -1,5 +1,5 @@ //! web server example -use hotshot_testing::state_types::TestTypes; +use hotshot_example_types::state_types::TestTypes; use std::sync::Arc; use surf_disco::Url; diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 3f0510b65c..835aca8265 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -19,65 +19,6 @@ doc-images = [] hotshot-testing = [] randomized-leader-election = [] -# libp2p -[[example]] -name = "validator-libp2p" -path = "examples/libp2p/validator.rs" - -[[example]] -name = "multi-validator-libp2p" -path = "examples/libp2p/multi-validator.rs" - -[[example]] -name = "orchestrator-libp2p" -path = "examples/libp2p/orchestrator.rs" - -[[example]] -name = "all-libp2p" -path = "examples/libp2p/all.rs" - -# webserver -[[example]] -name = "webserver" -path = "examples/webserver/webserver.rs" - -[[example]] -name = "orchestrator-webserver" -path = "examples/webserver/orchestrator.rs" - -[[example]] -name = "validator-webserver" -path = "examples/webserver/validator.rs" - -[[example]] -name = "multi-validator-webserver" -path = "examples/webserver/multi-validator.rs" - -[[example]] -name = "multi-webserver" -path = "examples/webserver/multi-webserver.rs" - -[[example]] -name = "all-webserver" -path = "examples/webserver/all.rs" - -# combined -[[example]] -name = "all-combined" -path = "examples/combined/all.rs" - -[[example]] -name = "multi-validator-combined" -path = "examples/combined/multi-validator.rs" - -[[example]] -name = "validator-combined" -path = "examples/combined/validator.rs" - -[[example]] -name = "orchestrator-combined" -path = "examples/combined/orchestrator.rs" - [dependencies] async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } @@ -122,7 +63,6 @@ clap = { version = "4.5", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } local-ip-address = "0.5.7" -hotshot-testing = { path = "../testing" } [lints] workspace = true diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 203fa39705..3bd07c4c15 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -13,7 +13,7 @@ pub use storage::{Result as StorageResult, Storage}; pub mod implementations { pub use super::{ networking::{ - combined_network::{CombinedCommChannel, CombinedNetworks}, + combined_network::{calculate_hash_of, Cache, CombinedCommChannel, CombinedNetworks}, libp2p_network::{Libp2pCommChannel, Libp2pNetwork, PeerInfoVec}, memory_network::{MasterMap, MemoryCommChannel, MemoryNetwork}, web_server_network::{WebCommChannel, WebServerNetwork}, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 9f061e8bd4..ae093c5f72 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -42,7 +42,7 @@ use std::hash::Hash; /// A cache to keep track of the last n messages we've seen, avoids reprocessing duplicates /// from multiple networks #[derive(Clone, Debug)] -struct Cache { +pub struct Cache { /// The maximum number of items to store in the cache capacity: usize, /// The cache itself @@ -53,7 +53,8 @@ struct Cache { impl Cache { /// Create a new cache with the given capacity - fn new(capacity: usize) -> Self { + #[must_use] + pub fn new(capacity: usize) -> Self { Self { capacity, inner: HashSet::with_capacity(capacity), @@ -62,7 +63,7 @@ impl Cache { } /// Insert a hash into the cache - fn insert(&mut self, hash: u64) { + pub fn insert(&mut self, hash: u64) { if self.inner.contains(&hash) { return; } @@ -81,19 +82,26 @@ impl Cache { } /// Check if the cache contains a hash - fn contains(&self, hash: u64) -> bool { + #[must_use] + pub fn contains(&self, hash: u64) -> bool { self.inner.contains(&hash) } /// Get the number of items in the cache - #[cfg(test)] - fn len(&self) -> usize { + #[must_use] + pub fn len(&self) -> usize { self.inner.len() } + + /// True if the cache is empty false otherwise + #[must_use] + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } } /// Helper function to calculate a hash of a type that implements Hash -fn calculate_hash_of(t: &T) -> u64 { +pub fn calculate_hash_of(t: &T) -> u64 { let mut s = DefaultHasher::new(); t.hash(&mut s); s.finish() @@ -384,8 +392,6 @@ impl TestableChannelImplementation for CombinedCommChann #[cfg(test)] mod test { - use hotshot_testing::block_types::TestTransaction; - use super::*; use tracing::instrument; @@ -413,44 +419,4 @@ mod test { assert!(cache.hashes.contains(&3)); assert!(cache.hashes.contains(&4)); } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[instrument] - async fn test_hash_calculation() { - let message1 = TestTransaction(vec![0; 32]); - let message2 = TestTransaction(vec![1; 32]); - - assert_eq!(calculate_hash_of(&message1), calculate_hash_of(&message1)); - assert_ne!(calculate_hash_of(&message1), calculate_hash_of(&message2)); - } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[instrument] - async fn test_cache_integrity() { - let message1 = TestTransaction(vec![0; 32]); - let message2 = TestTransaction(vec![1; 32]); - - let mut cache = Cache::new(3); - - // test insertion integrity - cache.insert(calculate_hash_of(&message1)); - cache.insert(calculate_hash_of(&message2)); - - assert!(cache.contains(calculate_hash_of(&message1))); - assert!(cache.contains(calculate_hash_of(&message2))); - - // check that the cache is not modified on duplicate entries - cache.insert(calculate_hash_of(&message1)); - assert!(cache.contains(calculate_hash_of(&message1))); - assert!(cache.contains(calculate_hash_of(&message2))); - assert_eq!(cache.len(), 2); - } } diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs index adaff123f2..bf4bc727eb 100644 --- a/hotshot/src/traits/storage/memory_storage.rs +++ b/hotshot/src/traits/storage/memory_storage.rs @@ -104,77 +104,3 @@ impl Storage for MemoryStorage { Ok(()) // do nothing } } - -#[cfg(test)] -mod test { - - use super::*; - use commit::Committable; - use hotshot_testing::{ - block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, - node_types::TestTypes, - }; - use hotshot_types::{ - data::{fake_commitment, Leaf}, - simple_certificate::QuorumCertificate, - traits::{ - node_implementation::{ConsensusTime, NodeType}, - signature_key::SignatureKey, - }, - }; - use std::marker::PhantomData; - use tracing::instrument; - - fn random_stored_view(view_number: ::Time) -> StoredView { - let payload = TestBlockPayload::genesis(); - let header = TestBlockHeader { - block_number: 0, - payload_commitment: genesis_vid_commitment(), - }; - let dummy_leaf_commit = fake_commitment::>(); - let data = hotshot_types::simple_vote::QuorumData { - leaf_commit: dummy_leaf_commit, - }; - let commit = data.commit(); - StoredView::from_qc_block_and_state( - QuorumCertificate { - is_genesis: view_number == ::Time::genesis(), - data, - vote_commitment: commit, - signatures: None, - view_number, - _pd: PhantomData, - }, - header, - Some(payload), - dummy_leaf_commit, - <::SignatureKey as SignatureKey>::genesis_proposer_pk(), - ) - } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[instrument] - async fn memory_storage() { - let storage = MemoryStorage::construct_tmp_storage().unwrap(); - let genesis = random_stored_view(::Time::genesis()); - storage - .append_single_view(genesis.clone()) - .await - .expect("Could not append block"); - assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); - storage - .cleanup_storage_up_to_view(genesis.view_number) - .await - .unwrap(); - assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); - storage - .cleanup_storage_up_to_view(genesis.view_number + 1) - .await - .unwrap(); - assert!(storage.get_anchored_view().await.is_err()); - } -} diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 7445ecda3b..6b017b91f2 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -17,6 +17,7 @@ futures = { workspace = true } hotshot = { path = "../hotshot", default-features = false } hotshot-types = { path = "../types", default-features = false } hotshot-testing = { path = "../testing", default-features = false } +hotshot-example-types = { path = "../example-types" } jf-primitives = { workspace = true } rand = { workspace = true } snafu = { workspace = true } diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs index 186087ec7c..106948d90f 100644 --- a/testing-macros/tests/tests.rs +++ b/testing-macros/tests/tests.rs @@ -1,11 +1,11 @@ +use hotshot_example_types::node_types::{MemoryImpl, WebImpl}; +use hotshot_example_types::state_types::TestTypes; use hotshot_testing::completion_task::{ CompletionTaskDescription, TimeBasedCompletionTaskDescription, }; -use hotshot_testing::node_types::{MemoryImpl, WebImpl}; use hotshot_testing::spinning_task::ChangeNode; use hotshot_testing::spinning_task::SpinningTaskDescription; use hotshot_testing::spinning_task::UpDown; -use hotshot_testing::state_types::TestTypes; use hotshot_testing::test_builder::TestMetadata; use hotshot_testing_macros::cross_tests; use std::time::Duration; diff --git a/testing/Cargo.toml b/testing/Cargo.toml index ceb54a410f..aed8aea128 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -20,7 +20,7 @@ either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", features = [ "hotshot-testing", -], default-features = false } +] } hotshot-constants = { path = "../constants" } hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } @@ -35,6 +35,7 @@ async-lock = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } hotshot-task = { path = "../task" } +hotshot-example-types = { path = "../example-types" } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/testing/README.md b/testing/README.md index 59b281f421..67551acc79 100644 --- a/testing/README.md +++ b/testing/README.md @@ -23,11 +23,11 @@ Easily overriding setup/correctness checks/hooks and launching is all done by an ```rust use std::sync::Arc; use futures::FutureExt; -use hotshot_testing::test_types::StaticNodeImplType; -use hotshot_testing::round::RoundHook; -use hotshot_testing::test_types::StaticCommitteeTestTypes; -use hotshot_testing::test_builder::TestBuilder; -use hotshot_testing::test_builder::TestMetadata; +use hotshot_example_types::test_types::StaticNodeImplType; +use hotshot_example_types::round::RoundHook; +use hotshot_example_types::test_types::StaticCommitteeTestTypes; +use hotshot_example_types::test_builder::TestBuilder; +use hotshot_example_types::test_builder::TestMetadata; async { // specify general characteristics of the test in TestMetadata diff --git a/testing/src/lib.rs b/testing/src/lib.rs index c1a84af20a..224dfae3cf 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -33,15 +33,6 @@ pub mod spinning_task; /// task for checking if view sync got activated pub mod view_sync_task; -/// block types -pub mod block_types; - -/// Implementations for testing/examples -pub mod state_types; - -/// node types -pub mod node_types; - /// global event at the test level #[derive(Clone, Debug)] pub enum GlobalTestEvent { diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 8a7a4744f0..062b04c63e 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -1,12 +1,13 @@ #![allow(clippy::panic)] use std::marker::PhantomData; -use crate::{ +use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload}, node_types::{MemoryImpl, TestTypes}, state_types::{TestInstanceState, TestValidatedState}, - test_builder::TestMetadata, }; + +use crate::test_builder::TestMetadata; use commit::Committable; use ethereum_types::U256; use hotshot::{ diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 7ae67b979a..467b7b734d 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -7,7 +7,6 @@ use super::{ use crate::{ completion_task::CompletionTaskDescription, spinning_task::{ChangeNode, SpinningTask, UpDown}, - state_types::TestInstanceState, test_launcher::{Networks, TestLauncher}, txn_task::TxnTaskDescription, view_sync_task::ViewSyncTask, @@ -15,6 +14,7 @@ use crate::{ use async_broadcast::broadcast; use futures::future::join_all; use hotshot::{types::SystemContextHandle, Memberships}; +use hotshot_example_types::state_types::TestInstanceState; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 64bd08adfb..eae668139a 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -7,14 +7,13 @@ async fn test_catchup() { use std::time::Duration; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{MemoryImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { @@ -69,9 +68,9 @@ async fn test_catchup() { async fn test_catchup_web() { use std::time::Duration; + use hotshot_example_types::node_types::{TestTypes, WebImpl}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{TestTypes, WebImpl}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, @@ -126,14 +125,13 @@ async fn test_catchup_web() { async fn test_catchup_one_node() { use std::time::Duration; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{MemoryImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { @@ -185,14 +183,13 @@ async fn test_catchup_one_node() { async fn test_catchup_in_view_sync() { use std::time::Duration; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{MemoryImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index 9e0befae8c..ed6273a8a3 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -1,8 +1,8 @@ use std::time::Duration; +use hotshot_example_types::node_types::{CombinedImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{CombinedImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, @@ -10,7 +10,53 @@ use hotshot_testing::{ use rand::Rng; use tracing::instrument; +use hotshot::traits::implementations::{calculate_hash_of, Cache}; +use hotshot_example_types::block_types::TestTransaction; + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_hash_calculation() { + let message1 = TestTransaction(vec![0; 32]); + let message2 = TestTransaction(vec![1; 32]); + + assert_eq!(calculate_hash_of(&message1), calculate_hash_of(&message1)); + assert_ne!(calculate_hash_of(&message1), calculate_hash_of(&message2)); +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_cache_integrity() { + let message1 = TestTransaction(vec![0; 32]); + let message2 = TestTransaction(vec![1; 32]); + + let mut cache = Cache::new(3); + + // test insertion integrity + cache.insert(calculate_hash_of(&message1)); + cache.insert(calculate_hash_of(&message2)); + + assert!(cache.contains(calculate_hash_of(&message1))); + assert!(cache.contains(calculate_hash_of(&message2))); + + // check that the cache is not modified on duplicate entries + cache.insert(calculate_hash_of(&message1)); + assert!(cache.contains(calculate_hash_of(&message1))); + assert!(cache.contains(calculate_hash_of(&message2))); + assert_eq!(cache.len(), 2); +} + /// A run with both the webserver and libp2p functioning properly +#[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 8ed52d6aea..60df646828 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,11 +1,9 @@ #![allow(clippy::panic)] use commit::Committable; use hotshot::{types::SystemContextHandle, HotShotConsensusApi}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::events::HotShotEvent; -use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - task_helpers::{build_quorum_proposal, key_pair_for_id}, -}; +use hotshot_testing::task_helpers::{build_quorum_proposal, key_pair_for_id}; use hotshot_types::simple_vote::QuorumVote; use hotshot_types::vote::Certificate; use hotshot_types::{ @@ -183,9 +181,9 @@ async fn test_consensus_vote() { async fn test_consensus_with_vid() { use hotshot::traits::BlockPayload; use hotshot::types::SignatureKey; + use hotshot_example_types::block_types::TestBlockPayload; + use hotshot_example_types::block_types::TestTransaction; use hotshot_task_impls::harness::run_harness; - use hotshot_testing::block_types::TestBlockPayload; - use hotshot_testing::block_types::TestTransaction; use hotshot_testing::task_helpers::build_cert; use hotshot_testing::task_helpers::build_system_handle; use hotshot_testing::task_helpers::vid_init; diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 9371b4f913..33b40ff0c7 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,9 +1,9 @@ use hotshot::{types::SignatureKey, HotShotConsensusApi}; -use hotshot_task_impls::{da::DATaskState, events::HotShotEvent}; -use hotshot_testing::{ +use hotshot_example_types::{ block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, }; +use hotshot_task_impls::{da::DATaskState, events::HotShotEvent}; use hotshot_types::{ data::{DAProposal, ViewNumber}, simple_vote::{DAData, DAVote}, diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index 89448e1c09..44be24f9af 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -1,8 +1,8 @@ use std::time::Duration; +use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{Libp2pImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, diff --git a/testing/tests/lossy.rs b/testing/tests/lossy.rs index c829b07d01..2af0b3aca5 100644 --- a/testing/tests/lossy.rs +++ b/testing/tests/lossy.rs @@ -1,6 +1,6 @@ // TODO these should be config options for lossy network // #![allow(clippy::type_complexity)] -// use hotshot_testing::{ +// use hotshot_example_types::{ // network_reliability::{AsynchronousNetwork, PartiallySynchronousNetwork, SynchronousNetwork}, // test_builder::{TestBuilder, TestMetadata}, // test_types::{StaticCommitteeTestTypes, StaticNodeImplType}, diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index d52d6bddd8..ffe5d9d0ee 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -10,8 +10,8 @@ use hotshot::traits::implementations::{ use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; use hotshot_constants::VERSION_0_1; -use hotshot_testing::state_types::TestInstanceState; -use hotshot_testing::{ +use hotshot_example_types::state_types::TestInstanceState; +use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::TestValidatedState, }; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 447805d9c7..413bbb5f5f 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -1,9 +1,7 @@ use hotshot::{types::SignatureKey, HotShotConsensusApi}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::events::HotShotEvent; -use hotshot_testing::{ - node_types::{MemoryImpl, TestTypes}, - task_helpers::{build_quorum_proposal, vid_init}, -}; +use hotshot_testing::task_helpers::{build_quorum_proposal, vid_init}; use hotshot_types::{ data::{DAProposal, VidSchemeTrait, ViewNumber}, traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, diff --git a/testing/tests/storage.rs b/testing/tests/storage.rs new file mode 100644 index 0000000000..da26afbd23 --- /dev/null +++ b/testing/tests/storage.rs @@ -0,0 +1,72 @@ +use commit::Committable; +use hotshot::traits::implementations::MemoryStorage; +use hotshot::traits::Storage; +use hotshot_example_types::{ + block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, + node_types::TestTypes, +}; +use hotshot_types::{ + data::{fake_commitment, Leaf}, + simple_certificate::QuorumCertificate, + traits::{ + node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, + storage::{StoredView, TestableStorage}, + }, +}; +use std::marker::PhantomData; +use tracing::instrument; + +fn random_stored_view(view_number: ::Time) -> StoredView { + let payload = TestBlockPayload::genesis(); + let header = TestBlockHeader { + block_number: 0, + payload_commitment: genesis_vid_commitment(), + }; + let dummy_leaf_commit = fake_commitment::>(); + let data = hotshot_types::simple_vote::QuorumData { + leaf_commit: dummy_leaf_commit, + }; + let commit = data.commit(); + StoredView::from_qc_block_and_state( + QuorumCertificate { + is_genesis: view_number == ::Time::genesis(), + data, + vote_commitment: commit, + signatures: None, + view_number, + _pd: PhantomData, + }, + header, + Some(payload), + dummy_leaf_commit, + <::SignatureKey as SignatureKey>::genesis_proposer_pk(), + ) +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn memory_storage() { + let storage = MemoryStorage::construct_tmp_storage().unwrap(); + let genesis = random_stored_view(::Time::genesis()); + storage + .append_single_view(genesis.clone()) + .await + .expect("Could not append block"); + assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); + storage + .cleanup_storage_up_to_view(genesis.view_number) + .await + .unwrap(); + assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); + storage + .cleanup_storage_up_to_view(genesis.view_number + 1) + .await + .unwrap(); + assert!(storage.get_anchored_view().await.is_err()); +} diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 43b1313e9a..2ab05f386e 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -9,16 +9,15 @@ async fn test_timeout_web() { use std::time::Duration; - use hotshot_testing::node_types::WebImpl; + use hotshot_example_types::node_types::WebImpl; + use hotshot_example_types::node_types::TestTypes; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::TestTypes, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { @@ -74,16 +73,17 @@ async fn test_timeout_web() { async fn test_timeout_libp2p() { use std::time::Duration; - use hotshot_testing::node_types::Libp2pImpl; + use hotshot_example_types::node_types::Libp2pImpl; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::TestTypes, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestMetadata, TimingData}, }; + use hotshot_example_types::node_types::TestTypes; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { diff --git a/testing/tests/unit/message.rs b/testing/tests/unit/message.rs index e218ed050c..eb47244145 100644 --- a/testing/tests/unit/message.rs +++ b/testing/tests/unit/message.rs @@ -6,7 +6,7 @@ use either::Left; use hotshot_constants::Version; -use hotshot_testing::node_types::TestTypes; +use hotshot_example_types::node_types::TestTypes; use hotshot_utils::bincode::bincode_opts; diff --git a/testing/tests/unreliable_network.rs b/testing/tests/unreliable_network.rs index 756d980f1a..8af6ce2310 100644 --- a/testing/tests/unreliable_network.rs +++ b/testing/tests/unreliable_network.rs @@ -6,9 +6,9 @@ use hotshot_types::traits::network::SynchronousNetwork; use std::time::Duration; use std::time::Instant; +use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{Libp2pImpl, TestTypes}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::TestMetadata, }; @@ -54,9 +54,9 @@ async fn libp2p_network_sync() { )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_sync() { + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, }; use std::time::Duration; @@ -133,11 +133,12 @@ async fn libp2p_network_async() { #[ignore] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_async() { + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, }; + use std::time::Duration; async_compatibility_layer::logging::setup_logging(); @@ -181,11 +182,12 @@ async fn test_memory_network_async() { )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_partially_sync() { + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, }; + use std::time::Duration; async_compatibility_layer::logging::setup_logging(); @@ -275,9 +277,9 @@ async fn libp2p_network_partially_sync() { #[ignore] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_chaos() { + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{MemoryImpl, TestTypes}, test_builder::TestMetadata, }; use std::time::Duration; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 041fbc1b75..eadfaf4e84 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -1,10 +1,10 @@ use hotshot::{types::SignatureKey, HotShotConsensusApi}; -use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; -use hotshot_testing::{ +use hotshot_example_types::{ block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, - task_helpers::vid_init, }; +use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; +use hotshot_testing::task_helpers::{build_system_handle, vid_init}; use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; use hotshot_types::{ data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, @@ -20,7 +20,6 @@ use std::marker::PhantomData; #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_vid_task() { use hotshot_task_impls::harness::run_harness; - use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::message::Proposal; async_compatibility_layer::logging::setup_logging(); diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index a9a7a51a96..fd75e5944f 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,6 +1,6 @@ use hotshot::HotShotConsensusApi; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::events::HotShotEvent; -use hotshot_testing::node_types::{MemoryImpl, TestTypes}; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; use std::collections::HashMap; diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index 81277e7821..3871848e2c 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -1,9 +1,9 @@ use std::time::Duration; use async_compatibility_layer::logging::shutdown_logging; +use hotshot_example_types::node_types::{TestTypes, WebImpl}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - node_types::{TestTypes, WebImpl}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::{TestMetadata, TimingData}, }; From 58d17bc5dc05b6554ebd26694d2cc1e8d467d594 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 12 Feb 2024 14:57:42 -0500 Subject: [PATCH 0783/1393] [VID] Emit VID Share Info on Decide Event (#2563) * Store VID, emit during decide * Fix one more complex type * less `.0` --- examples/infra/mod.rs | 2 +- hotshot/src/tasks/mod.rs | 4 ++-- task-impls/src/consensus.rs | 37 +++++++++++++++--------------- testing/src/overall_safety_task.rs | 22 ++++++++---------- types/src/event.rs | 7 ++++-- 5 files changed, 37 insertions(+), 35 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index babf08a6bf..e056c6af78 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -435,7 +435,7 @@ pub trait RunDA< block_size, } => { // this might be a obob - if let Some(leaf) = leaf_chain.first() { + if let Some((leaf, _)) = leaf_chain.first() { info!("Decide event for leaf: {}", *leaf.view_number); let new_anchor = leaf.view_number; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index aafa3b1d5b..172f904b9f 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -28,7 +28,7 @@ use hotshot_types::{ }, }; use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, marker::PhantomData, sync::Arc, time::Duration, @@ -163,7 +163,7 @@ pub async fn create_consensus_state /// TODO - pub vid_shares: HashMap>>, + pub vid_shares: BTreeMap>>, /// The most recent proposal we have, will correspond to the current view if Some() /// Will be none if the view advanced through timeout/view_sync @@ -243,17 +242,15 @@ impl, A: ConsensusApi + } } - // TODO: re-enable this when HotShot/the sequencer needs the shares for something - // issue: https://github.com/EspressoSystems/HotShot/issues/2236 // Only vote if you has seen the VID share for this view - // if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { - // } else { - // debug!( - // "We have not seen the VID share for this view {:?} yet, so we cannot vote.", - // proposal.view_number - // ); - // return false; - // } + if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { + } else { + debug!( + "We have not seen the VID share for this view {:?} yet, so we cannot vote.", + proposal.view_number + ); + return false; + } // Only vote if you have the DA cert // ED Need to update the view number this is stored under? @@ -514,7 +511,7 @@ impl, A: ConsensusApi + Event { view_number: TYPES::Time::genesis(), event: EventType::Decide { - leaf_chain: Arc::new(vec![leaf.clone()]), + leaf_chain: Arc::new(vec![(leaf.clone(), None)]), qc: Arc::new(justify_qc.clone()), block_size: None, }, @@ -743,7 +740,11 @@ impl, A: ConsensusApi + leaf.fill_block_payload_unchecked(payload); } - leaf_views.push(leaf.clone()); + let vid = self + .vid_shares + .get(&leaf.get_view_number()) + .map(|vid_proposal| vid_proposal.data.clone()); + leaf_views.push((leaf.clone(), vid)); if let Some(ref payload) = leaf.block_payload { for txn in payload .transaction_commitments(leaf.get_block_header().metadata()) @@ -803,6 +804,7 @@ impl, A: ConsensusApi + consensus .collect_garbage(old_anchor_view, new_anchor_view) .await; + self.vid_shares = self.vid_shares.split_off(&new_anchor_view); consensus.last_decided_view = new_anchor_view; consensus.metrics.invalid_qc.set(0); consensus @@ -1063,9 +1065,8 @@ impl, A: ConsensusApi + .await; // Add to the storage that we have received the VID disperse for a specific view - // TODO: re-enable this when HotShot/the sequencer needs the shares for something - // issue: https://github.com/EspressoSystems/HotShot/issues/2236 - // self.vid_shares.insert(view, disperse); + self.vid_shares.insert(view, disperse); + self.vote_if_able(&event_stream).await; } HotShotEvent::ViewChange(new_view) => { debug!("View Change event for view {} in consensus task", *new_view); diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 693a0d46dd..8af675d08c 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -2,9 +2,9 @@ use hotshot::{traits::TestableNodeImplementation, HotShotError}; use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::{ - data::{Leaf, VidCommitment}, + data::{Leaf, VidCommitment, VidDisperse}, error::RoundTimedoutState, - event::{Event, EventType}, + event::{Event, EventType, LeafChain}, simple_certificate::QuorumCertificate, traits::node_implementation::{ConsensusTime, NodeType}, }; @@ -160,7 +160,7 @@ impl> TestTaskState } => { // Skip the genesis leaf. if leaf_chain.len() == 1 - && leaf_chain[0].get_view_number() == TYPES::Time::genesis() + && leaf_chain[0].0.get_view_number() == TYPES::Time::genesis() { return None; } @@ -206,7 +206,7 @@ impl> TestTaskState view.update_status( threshold, len, - &key, + &key.0, check_leaf, check_block, transaction_threshold, @@ -264,9 +264,7 @@ pub struct RoundResult { /// Nodes that committed this round /// id -> (leaf, qc) - // TODO GG: isn't it infeasible to store a Vec>? - #[allow(clippy::type_complexity)] - success_nodes: HashMap>, QuorumCertificate)>, + success_nodes: HashMap, QuorumCertificate)>, /// Nodes that failed to commit this round pub failed_nodes: HashMap>>, @@ -358,13 +356,13 @@ impl RoundResult { pub fn insert_into_result( &mut self, idx: usize, - result: (Vec>, QuorumCertificate), + result: (LeafChain, QuorumCertificate), maybe_block_size: Option, - ) -> Option> { + ) -> Option<(Leaf, Option>)> { self.success_nodes.insert(idx as u64, result.clone()); - let maybe_leaf: Option> = result.0.into_iter().last(); - if let Some(leaf) = maybe_leaf.clone() { + let maybe_leaf: Option<(Leaf, _)> = result.0.into_iter().last(); + if let Some((leaf, _)) = maybe_leaf.clone() { match self.leaf_map.entry(leaf.clone()) { std::collections::hash_map::Entry::Occupied(mut o) => { *o.get_mut() += 1; @@ -477,7 +475,7 @@ impl RoundResult { for (leaf_vec, _) in self.success_nodes.values() { let most_recent_leaf = leaf_vec.iter().last(); - if let Some(leaf) = most_recent_leaf { + if let Some((leaf, _)) = most_recent_leaf { match leaves.entry(leaf.clone()) { std::collections::hash_map::Entry::Occupied(mut o) => { *o.get_mut() += 1; diff --git a/types/src/event.rs b/types/src/event.rs index 2063e374a9..2e285ee79e 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -1,7 +1,7 @@ //! Events that a `HotShot` instance can emit use crate::{ - data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal}, + data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse}, error::HotShotError, message::Proposal, simple_certificate::QuorumCertificate, @@ -21,6 +21,8 @@ pub struct Event { pub event: EventType, } +/// The chain of leafs decided on with corresponding VID info if we have it +pub type LeafChain = Vec<(Leaf, Option>)>; /// The type and contents of a status event emitted by a `HotShot` instance /// /// This enum does not include metadata shared among all variants, such as the stage and view @@ -41,7 +43,8 @@ pub enum EventType { /// block first in the list. /// /// This list may be incomplete if the node is currently performing catchup. - leaf_chain: Arc>>, + /// Vid Info for a decided view may be missing if this node never saw it's share. + leaf_chain: Arc>, /// The QC signing the most recent leaf in `leaf_chain`. /// /// Note that the QC for each additional leaf in the chain can be obtained from the leaf From 006b047ccf7349496c4449a82056d6ead3d3f2d0 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 13 Feb 2024 11:19:51 +0800 Subject: [PATCH 0784/1393] [Key Management] Get peer's public key from orchestrator (#2525) * init push w bug * post pubkey to orchestrator and clean up * clean up * clean up * lint * lint * add hash set for posted public configs * clean up * solve comments * lint --- examples/infra/mod.rs | 56 +++++++----- examples/webserver/all.rs | 7 +- orchestrator/api.toml | 32 +++++++ orchestrator/src/client.rs | 43 ++++++++- orchestrator/src/config.rs | 1 + orchestrator/src/lib.rs | 175 +++++++++++++++++++++++++++++++++---- 6 files changed, 271 insertions(+), 43 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index e056c6af78..cdc42122a4 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -91,6 +91,13 @@ pub struct ConfigArgs { /// Reads a network configuration from a given filepath /// # Panics /// if unable to convert the config file into toml +/// # Note +/// This derived config is used for initialization of orchestrator, +/// therefore `known_nodes_with_stake` will be an initialized +/// vector full of the node's own config. +/// `my_own_validator_config` will be generated from seed here +/// for loading config from orchestrator, +/// or else it will be loaded from file. #[must_use] pub fn load_config_from_file( config_file: &str, @@ -104,20 +111,18 @@ pub fn load_config_from_file( let mut config: NetworkConfig = config_toml.into(); - // Generate network's public keys - let known_nodes: Vec<_> = (0..config.config.total_nodes.get()) - .map(|node_id| { - TYPES::SignatureKey::generated_from_seed_indexed( - config.seed, - node_id.try_into().unwrap(), - ) - .0 - }) - .collect(); - - config.config.known_nodes_with_stake = (0..config.config.total_nodes.get()) - .map(|node_id| known_nodes[node_id].get_stake_table_entry(1u64)) - .collect(); + // my_own_validator_config would be best to load from file, + // but its type is too complex to load so we'll generate it from seed now + config.config.my_own_validator_config = + ValidatorConfig::generated_from_seed_indexed(config.seed, config.node_index, 1); + let my_own_validator_config_with_stake = config + .config + .my_own_validator_config + .public_key + .get_stake_table_entry(1u64); + // initialize it with size for better assignment of other peers' config + config.config.known_nodes_with_stake = + vec![my_own_validator_config_with_stake; config.config.total_nodes.get() as usize]; config } @@ -878,19 +883,24 @@ pub async fn main_entry_point< // conditionally save/load config from file or orchestrator let (mut run_config, source) = - NetworkConfig::from_file_or_orchestrator(&orchestrator_client, args.network_config_file) - .await; + NetworkConfig::::from_file_or_orchestrator( + &orchestrator_client, + args.clone().network_config_file, + ) + .await; let node_index = run_config.node_index; error!("Retrieved config; our node index is {node_index}"); - run_config.config.my_own_validator_config = - ValidatorConfig::<::SignatureKey>::generated_from_seed_indexed( - run_config.seed, - node_index, - 1, - ); - //run_config.libp2p_config.as_mut().unwrap().public_ip = args.public_ip.unwrap(); + // one more round of orchestrator here to get peer's public key/config + let updated_config: NetworkConfig = + orchestrator_client + .post_and_wait_all_public_keys::( + run_config.node_index, + run_config.config.my_own_validator_config.public_key.clone(), + ) + .await; + run_config.config.known_nodes_with_stake = updated_config.config.known_nodes_with_stake; error!("Initializing networking"); let run = RUNDA::initialize_networking(run_config.clone()).await; diff --git a/examples/webserver/all.rs b/examples/webserver/all.rs index 99de0d9f93..d70de4bba5 100644 --- a/examples/webserver/all.rs +++ b/examples/webserver/all.rs @@ -29,9 +29,12 @@ use types::VIDNetwork; #[cfg_attr(async_executor_impl = "tokio", tokio::main)] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] async fn main() { + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; + setup_logging(); + setup_backtrace(); + // use configfile args let args = ConfigArgs::parse(); - // spawn web servers let (server_shutdown_sender_cdn, server_shutdown_cdn) = oneshot(); let (server_shutdown_sender_da, server_shutdown_da) = oneshot(); @@ -96,7 +99,7 @@ async fn main() { NodeImpl, ThisRun, >(ValidatorArgs { - url: orchestrator_url, + url: orchestrator_url.clone(), public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), network_config_file: None, }) diff --git a/orchestrator/api.toml b/orchestrator/api.toml index e855a07369..3b03e64764 100644 --- a/orchestrator/api.toml +++ b/orchestrator/api.toml @@ -3,6 +3,15 @@ NAME = "orchestrator" DESCRIPTION = "Orchestrator for HotShot" FORMAT_VERSION = "0.1.0" +# POST node's identity +[route.postidentity] +PATH = ["identity/:identity"] +METHOD = "POST" +":identity" = "Literal" +DOC = """ +POST a node's identity (IP address) to the orchestrator. Returns the node's node_index. +""" + # POST retrieve the network configuration [route.post_getconfig] PATH = ["config/:node_index"] @@ -14,6 +23,29 @@ This must be a POST request so we can update the OrchestratorState in the server received from the 'identity' endpoint """ +# POST the node's node index to generate public key for pubkey collection +[route.postpubkey] +PATH = ["pubkey/:node_index"] +METHOD = "POST" +":node_index" = "Integer" +DOC = """ +Post a node's node_index so that its public key could be posted and collected by the orchestrator. +""" + +# GET whether or not the config with all peers' public keys / configs are ready +[route.peer_pubconfig_ready] +PATH = ["peer_pub_ready"] +DOC = """ +Get whether the node can collect the final config which includs all peer's public config/info like public keys, returns a boolean. +""" + +# GET the updated config with all peers' public keys / configs +[route.config_after_peer_collected] +PATH = ["config_after_peer_collected"] +DOC = """ +Get the updated config with all peers' public keys / configs, returns a NetworkConfig. +""" + # POST whether the node is ready to begin the run # TODO ED Use the node index parameter [route.postready] diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 55213387c6..102c138b30 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -5,7 +5,10 @@ use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; -use hotshot_types::traits::{election::ElectionConfig, signature_key::SignatureKey}; +use hotshot_types::{ + traits::{election::ElectionConfig, signature_key::SignatureKey}, + ValidatorConfig, +}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; @@ -136,12 +139,48 @@ impl OrchestratorClient { }; let mut config = self.wait_for_fn_from_orchestrator(f).await; - config.node_index = From::::from(node_index); + // The orchestrator will generate keys for validator if it doesn't load keys from file + config.config.my_own_validator_config = + ValidatorConfig::::generated_from_seed_indexed(config.seed, config.node_index, 1); config } + /// Sends my public key to the orchestrator so that it can collect all public keys + /// And get the updated config + /// Blocks until the orchestrator collects all peer's public keys/configs + /// # Panics + /// if unable to post + pub async fn post_and_wait_all_public_keys( + &self, + node_index: u64, + my_pub_key: K, + ) -> NetworkConfig { + // send my public key + let _send_pubkey_ready_f: Result<(), ClientError> = self + .client + .post(&format!("api/pubkey/{node_index}")) + .body_binary(&my_pub_key.to_bytes()) + .unwrap() + .send() + .await; + + // wait for all nodes' public keys + let wait_for_all_nodes_pub_key = |client: Client| { + async move { client.get("api/peer_pub_ready").send().await }.boxed() + }; + self.wait_for_fn_from_orchestrator::<_, _, ()>(wait_for_all_nodes_pub_key) + .await; + + // get the newest updated config + self.client + .get("api/config_after_peer_collected") + .send() + .await + .expect("Unable to get the updated config") + } + /// Tells the orchestrator this validator is ready to start /// Blocks until the orchestrator indicates all nodes are ready to start /// # Panics diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 0c16d8a043..bdd3ddb84b 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -200,6 +200,7 @@ impl NetworkConfig { file: Option, ) -> (NetworkConfig, NetworkConfigSource) { if let Some(file) = file { + error!("Retrieving config from the file"); // if we pass in file, try there first match Self::from_file(file.clone()) { Ok(config) => (config, NetworkConfigSource::File), diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 2d9a27eec7..d5c8abc330 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -7,7 +7,12 @@ pub mod config; use async_lock::RwLock; use hotshot_types::traits::{election::ElectionConfig, signature_key::SignatureKey}; -use std::{io, io::ErrorKind}; +use std::{ + collections::HashSet, + io, + io::ErrorKind, + net::{IpAddr, SocketAddr}, +}; use tide_disco::{Api, App}; use surf_disco::Url; @@ -25,7 +30,6 @@ use libp2p::identity::{ ed25519::{Keypair as EdKeypair, SecretKey}, Keypair, }; - /// Generate an keypair based on a `seed` and an `index` /// # Panics /// This panics if libp2p is unable to generate a secret key from the seed @@ -42,8 +46,16 @@ pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { /// The state of the orchestrator #[derive(Default, Clone)] struct OrchestratorState { + /// Tracks the latest node index we have generated a configuration for + latest_index: u16, /// The network configuration config: NetworkConfig, + /// The total nodes that have posted their public keys + pub nodes_with_pubkey: u64, + /// Whether the network configuration has been updated with all the peer's public keys/configs + peer_pub_ready: bool, + /// The set of index for nodes that have posted their public keys/configs + pub_posted: HashSet, /// Whether nodes should start their HotShot instances /// Will be set to true once all nodes post they are ready to start start: bool, @@ -57,22 +69,46 @@ impl /// create a new [`OrchestratorState`] pub fn new(network_config: NetworkConfig) -> Self { OrchestratorState { + latest_index: 0, config: network_config, - start: false, + nodes_with_pubkey: 0, + peer_pub_ready: false, + pub_posted: HashSet::new(), nodes_connected: 0, + start: false, } } } /// An api exposed by the orchestrator pub trait OrchestratorApi { + /// post endpoint for identity + /// # Errors + /// if unable to serve + fn post_identity(&mut self, identity: IpAddr) -> Result; /// post endpoint for each node's config /// # Errors /// if unable to serve fn post_getconfig( &mut self, - node_index: u16, + _node_index: u16, ) -> Result, ServerError>; + /// post endpoint for each node's public key + /// # Errors + /// if unable to serve + fn register_public_key( + &mut self, + node_index: u64, + pubkey: &mut Vec, + ) -> Result<(), ServerError>; + /// post endpoint for whether or not all peers public keys are ready + /// # Errors + /// if unable to serve + fn peer_pub_ready(&self) -> Result; + /// get endpoint for the network config after all peers public keys are collected + /// # Errors + /// if unable to serve + fn get_config_after_peer_collected(&self) -> Result, ServerError>; /// get endpoint for whether or not the run has started /// # Errors /// if unable to serve @@ -92,6 +128,41 @@ where KEY: serde::Serialize + Clone + SignatureKey, ELECTION: serde::Serialize + Clone + Send + ElectionConfig, { + fn post_identity(&mut self, identity: IpAddr) -> Result { + let node_index = self.latest_index; + self.latest_index += 1; + + // TODO https://github.com/EspressoSystems/HotShot/issues/850 + if usize::from(node_index) >= self.config.config.total_nodes.get() { + return Err(ServerError { + status: tide_disco::StatusCode::BadRequest, + message: "Network has reached capacity".to_string(), + }); + } + + if self.config.libp2p_config.clone().is_some() { + let libp2p_config_clone = self.config.libp2p_config.clone().unwrap(); + // Designate node as bootstrap node and store its identity information + if libp2p_config_clone.bootstrap_nodes.len() < libp2p_config_clone.num_bootstrap_nodes { + let port_index = if libp2p_config_clone.index_ports { + node_index + } else { + 0 + }; + let socketaddr = + SocketAddr::new(identity, libp2p_config_clone.base_port + port_index); + let keypair = libp2p_generate_indexed_identity(self.config.seed, node_index.into()); + self.config + .libp2p_config + .as_mut() + .unwrap() + .bootstrap_nodes + .push((socketaddr, keypair.to_protobuf_encoding().unwrap())); + } + } + Ok(node_index) + } + // Assumes nodes will set their own index that they received from the // 'identity' endpoint fn post_getconfig( @@ -110,6 +181,58 @@ where Ok(self.config.clone()) } + #[allow(clippy::cast_possible_truncation)] + fn register_public_key( + &mut self, + node_index: u64, + pubkey: &mut Vec, + ) -> Result<(), ServerError> { + if self.pub_posted.contains(&node_index) { + return Err(ServerError { + status: tide_disco::StatusCode::BadRequest, + message: "Node has already posted public key".to_string(), + }); + } + self.pub_posted.insert(node_index); + + // Sishan NOTE: let me know if there's a better way to remove the first extra 8 bytes + // The guess is extra bytes are from orchestrator serialization + pubkey.drain(..8); + let register_pub_key = ::from_bytes(pubkey).unwrap(); + let register_pub_key_with_stake = register_pub_key.get_stake_table_entry(1u64); + self.config.config.known_nodes_with_stake[node_index as usize] = + register_pub_key_with_stake; + self.nodes_with_pubkey += 1; + println!( + "Node {:?} posted public key, now total num posted public key: {:?}", + node_index, self.nodes_with_pubkey + ); + if self.nodes_with_pubkey >= (self.config.config.total_nodes.get() as u64) { + self.peer_pub_ready = true; + } + Ok(()) + } + + fn peer_pub_ready(&self) -> Result { + if !self.peer_pub_ready { + return Err(ServerError { + status: tide_disco::StatusCode::BadRequest, + message: "Peer's public configs are not ready".to_string(), + }); + } + Ok(self.peer_pub_ready) + } + + fn get_config_after_peer_collected(&self) -> Result, ServerError> { + if !self.peer_pub_ready { + return Err(ServerError { + status: tide_disco::StatusCode::BadRequest, + message: "Peer's public configs are not ready".to_string(), + }); + } + Ok(self.config.clone()) + } + fn get_start(&self) -> Result { // println!("{}", self.start); if !self.start { @@ -126,15 +249,7 @@ where fn post_ready(&mut self) -> Result<(), ServerError> { self.nodes_connected += 1; println!("Nodes connected: {}", self.nodes_connected); - if self.nodes_connected - >= self - .config - .config - .known_nodes_with_stake - .len() - .try_into() - .unwrap() - { + if self.nodes_connected >= (self.config.config.total_nodes.get() as u64) { self.start = true; } Ok(()) @@ -160,16 +275,44 @@ where ))) .expect("API file is not valid toml"); let mut api = Api::::new(api_toml)?; - api.post("post_getconfig", |req, state| { + api.post("postidentity", |req, state| { + async move { + let identity = req.string_param("identity")?.parse::(); + if identity.is_err() { + return Err(ServerError { + status: tide_disco::StatusCode::BadRequest, + message: "Identity is not a properly formed IP address".to_string(), + }); + } + state.post_identity(identity.unwrap()) + } + .boxed() + })? + .post("post_getconfig", |req, state| { async move { let node_index = req.integer_param("node_index")?; state.post_getconfig(node_index) } .boxed() })? - .post("postready", |_req, state| { - async move { state.post_ready() }.boxed() + .post("postpubkey", |req, state| { + async move { + let node_index = req.integer_param("node_index")?; + let mut pubkey = req.body_bytes(); + state.register_public_key(node_index, &mut pubkey) + } + .boxed() + })? + .get("peer_pubconfig_ready", |_req, state| { + async move { state.peer_pub_ready() }.boxed() + })? + .get("config_after_peer_collected", |_req, state| { + async move { state.get_config_after_peer_collected() }.boxed() })? + .post( + "postready", + |_req, state: &mut ::State| async move { state.post_ready() }.boxed(), + )? .get("getstart", |_req, state| { async move { state.get_start() }.boxed() })? From 1e6dc7b0c10fb812caf77374b03fd2e7a9a8b9e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 09:30:58 -0500 Subject: [PATCH 0785/1393] Bump async-broadcast from 0.6.0 to 0.7.0 (#2570) Bumps [async-broadcast](https://github.com/smol-rs/async-broadcast) from 0.6.0 to 0.7.0. - [Changelog](https://github.com/smol-rs/async-broadcast/blob/master/CHANGELOG.md) - [Commits](https://github.com/smol-rs/async-broadcast/compare/0.6.0...0.7.0) --- updated-dependencies: - dependency-name: async-broadcast dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- task/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task/Cargo.toml b/task/Cargo.toml index 39c531a637..9ee39d4654 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] futures = "0.3.30" -async-broadcast = "0.6.0" +async-broadcast = "0.7.0" tracing = { workspace = true } async-compatibility-layer = { workspace = true } From 8c0a88e77ac8b69a03e99515b89a2d8beba72242 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 09:31:19 -0500 Subject: [PATCH 0786/1393] Bump derive_builder from 0.13.0 to 0.13.1 (#2567) Bumps [derive_builder](https://github.com/colin-kiegel/rust-derive-builder) from 0.13.0 to 0.13.1. - [Release notes](https://github.com/colin-kiegel/rust-derive-builder/releases) - [Commits](https://github.com/colin-kiegel/rust-derive-builder/compare/v0.13.0...v0.13.1) --- updated-dependencies: - dependency-name: derive_builder dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- libp2p-networking/Cargo.toml | 2 +- testing-macros/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 8b054fcda1..ac8a5c1c82 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -18,7 +18,7 @@ async-trait = { workspace = true } bincode = { workspace = true } blake3 = { workspace = true } custom_debug = { workspace = true } -derive_builder = "0.13.0" +derive_builder = "0.13.1" either = { workspace = true } futures = { workspace = true } hotshot-constants = { path = "../constants" } diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 6b017b91f2..2a18bba866 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -27,7 +27,7 @@ serde = { workspace = true } quote = "1.0.33" syn = { version = "2.0.43", features = ["full", "extra-traits"] } proc-macro2 = "1.0.78" -derive_builder = "0.13.0" +derive_builder = "0.13.1" [dev-dependencies] async-lock = { workspace = true } From 0040be1838af6d50999a4d1b6f69b31a36263890 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 13 Feb 2024 10:02:12 -0500 Subject: [PATCH 0787/1393] Remove Custom Libp2p Direct Message Codec (#2565) * Remove custom DM codec * cleanup * add a little timeout * rename and move constant --- libp2p-networking/examples/common/mod.rs | 4 +- .../src/network/behaviours/direct_message.rs | 35 ++- .../behaviours/direct_message_codec.rs | 200 ------------------ .../src/network/behaviours/mod.rs | 4 - libp2p-networking/src/network/def.rs | 7 +- libp2p-networking/src/network/mod.rs | 9 +- libp2p-networking/src/network/node.rs | 16 +- libp2p-networking/src/network/node/handle.rs | 7 +- testing/src/test_builder.rs | 2 +- 9 files changed, 34 insertions(+), 250 deletions(-) delete mode 100644 libp2p-networking/src/network/behaviours/direct_message_codec.rs diff --git a/libp2p-networking/examples/common/mod.rs b/libp2p-networking/examples/common/mod.rs index 8be084db29..648b4a55ec 100644 --- a/libp2p-networking/examples/common/mod.rs +++ b/libp2p-networking/examples/common/mod.rs @@ -20,7 +20,7 @@ use clap::{Args, Parser}; use libp2p::{multiaddr, request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; use libp2p_networking::network::{ - behaviours::direct_message_codec::DirectMessageResponse, deserialize_msg, + deserialize_msg, network_node_handle_error::NodeConfigSnafu, spin_up_swarm, NetworkEvent, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeType, }; @@ -279,7 +279,7 @@ pub async fn handle_normal_msg( handle: Arc)>>, msg: NormalMessage, // in case we need to reply to direct message - chan: Option>, + chan: Option>>, ) -> Result<(), NetworkNodeHandleError> { debug!("node={} handling normal msg {:?}", handle.id(), msg); // send reply logic diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index 81e5d39692..fd543f4348 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -3,19 +3,16 @@ use std::{ task::Poll, }; +use libp2p::request_response::cbor::Behaviour; use libp2p::{ - request_response::{Behaviour, Event, Message, OutboundRequestId, ResponseChannel}, + request_response::{Event, Message, OutboundRequestId, ResponseChannel}, swarm::{NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm}, Multiaddr, }; use libp2p_identity::PeerId; -// use libp2p_request_response::Behaviour; use tracing::{error, info}; -use super::{ - direct_message_codec::{DirectMessageCodec, DirectMessageRequest, DirectMessageResponse}, - exponential_backoff::ExponentialBackoff, -}; +use super::exponential_backoff::ExponentialBackoff; /// Request to direct message a peert pub struct DMRequest { @@ -33,7 +30,7 @@ pub struct DMRequest { /// usage: direct message peer pub struct DMBehaviour { /// The wrapped behaviour - request_response: Behaviour, + request_response: Behaviour, Vec>, /// In progress queries in_progress_rr: HashMap, /// Failed queries to be retried @@ -46,14 +43,14 @@ pub struct DMBehaviour { #[derive(Debug)] pub enum DMEvent { /// We received as Direct Request - DirectRequest(Vec, PeerId, ResponseChannel), + DirectRequest(Vec, PeerId, ResponseChannel>), /// We received a Direct Response DirectResponse(Vec, PeerId), } impl DMBehaviour { /// handle a direct message event - fn handle_dm_event(&mut self, event: Event) { + fn handle_dm_event(&mut self, event: Event, Vec>) { match event { Event::InboundFailure { peer, @@ -83,7 +80,7 @@ impl DMBehaviour { } Event::Message { message, peer, .. } => match message { Message::Request { - request: DirectMessageRequest(msg), + request: msg, channel, .. } => { @@ -95,7 +92,7 @@ impl DMBehaviour { } Message::Response { request_id, - response: DirectMessageResponse(msg), + response: msg, } => { // success, finished. if let Some(req) = self.in_progress_rr.remove(&request_id) { @@ -115,7 +112,7 @@ impl DMBehaviour { } impl NetworkBehaviour for DMBehaviour { - type ConnectionHandler = as NetworkBehaviour>::ConnectionHandler; + type ConnectionHandler = , Vec> as NetworkBehaviour>::ConnectionHandler; type ToSwarm = DMEvent; @@ -257,7 +254,7 @@ impl NetworkBehaviour for DMBehaviour { impl DMBehaviour { /// Create new behaviour based on request response #[must_use] - pub fn new(request_response: Behaviour) -> Self { + pub fn new(request_response: Behaviour, Vec>) -> Self { Self { request_response, in_progress_rr: HashMap::default(), @@ -286,21 +283,15 @@ impl DMBehaviour { let request_id = self .request_response - .send_request(&req.peer_id, DirectMessageRequest(req.data.clone())); + .send_request(&req.peer_id, req.data.clone()); info!("direct message request with id {:?}", request_id); self.in_progress_rr.insert(request_id, req); } /// Add a direct response for a channel - pub fn add_direct_response( - &mut self, - chan: ResponseChannel, - msg: Vec, - ) { - let res = self - .request_response - .send_response(chan, DirectMessageResponse(msg)); + pub fn add_direct_response(&mut self, chan: ResponseChannel>, msg: Vec) { + let res = self.request_response.send_response(chan, msg); if let Err(e) = res { error!("Error replying to direct message. {:?}", e); } diff --git a/libp2p-networking/src/network/behaviours/direct_message_codec.rs b/libp2p-networking/src/network/behaviours/direct_message_codec.rs deleted file mode 100644 index 18584ec510..0000000000 --- a/libp2p-networking/src/network/behaviours/direct_message_codec.rs +++ /dev/null @@ -1,200 +0,0 @@ -use async_trait::async_trait; -use futures::prelude::*; -use futures::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use libp2p::request_response::Codec; -use serde::{Deserialize, Serialize}; -use std::io; - -/// Protocol for direct messages -#[derive(Debug, Clone)] -pub struct DirectMessageProtocol(); -/// Codec for direct messages -#[derive(Clone, Debug, Serialize, Deserialize, Default)] -pub struct DirectMessageCodec(); -/// Wrapper type describing a serialized direct message -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct DirectMessageRequest(pub Vec); -/// wrapper type describing the response to direct message -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct DirectMessageResponse(pub Vec); - -/// Maximum size of a direct message -pub const MAX_MSG_SIZE_DM: usize = 100_000_000; - -// NOTE: yoinked from libp2p -// -/// Writes a message to the given socket with a length prefix appended to it. Also flushes the socket. -/// -/// > **Note**: Prepends a variable-length prefix indicate the length of the message. This is -/// > compatible with what [`read_length_prefixed`] expects. -/// # Errors -/// On weird input from socket -pub async fn write_length_prefixed( - socket: &mut (impl AsyncWrite + Unpin), - data: impl AsRef<[u8]>, -) -> Result<(), io::Error> { - write_varint(socket, data.as_ref().len()).await?; - socket.write_all(data.as_ref()).await?; - socket.flush().await?; - - Ok(()) -} - -/// Writes a variable-length integer to the `socket`. -/// -/// > **Note**: Does **NOT** flush the socket. -/// # Errors -/// On weird input from socket -pub async fn write_varint( - socket: &mut (impl AsyncWrite + Unpin), - len: usize, -) -> Result<(), io::Error> { - let mut len_data = unsigned_varint::encode::usize_buffer(); - let encoded_len = unsigned_varint::encode::usize(len, &mut len_data).len(); - socket.write_all(&len_data[..encoded_len]).await?; - - Ok(()) -} - -/// Reads a variable-length integer from the `socket`. -/// -/// As a special exception, if the `socket` is empty and EOFs right at the beginning, then we -/// return `Ok(0)`. -/// -/// > **Note**: This function reads bytes one by one from the `socket`. It is therefore encouraged -/// > to use some sort of buffering mechanism. -/// # Errors -/// On weird input from socket -pub async fn read_varint(socket: &mut (impl AsyncRead + Unpin)) -> Result { - let mut buffer = unsigned_varint::encode::usize_buffer(); - let mut buffer_len = 0; - - loop { - match socket.read(&mut buffer[buffer_len..=buffer_len]).await? { - 0 => { - // Reaching EOF before finishing to read the length is an error, unless the EOF is - // at the very beginning of the substream, in which case we assume that the data is - // empty. - if buffer_len == 0 { - return Ok(0); - } - return Err(io::ErrorKind::UnexpectedEof.into()); - } - n => debug_assert_eq!(n, 1), - } - - buffer_len += 1; - - match unsigned_varint::decode::usize(&buffer[..buffer_len]) { - Ok((len, _)) => return Ok(len), - Err(unsigned_varint::decode::Error::Overflow) => { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "overflow in variable-length integer", - )); - } - // TODO: why do we have a `__Nonexhaustive` variant in the error? I don't know how to process it - // Err(unsigned_varint::decode::Error::Insufficient) => {} - Err(_) => {} - } - } -} - -/// Reads a length-prefixed message from the given socket. -/// -/// The `max_size` parameter is the maximum size in bytes of the message that we accept. This is -/// necessary in order to avoid `DoS` attacks where the remote sends us a message of several -/// gigabytes. -/// -/// > **Note**: Assumes that a variable-length prefix indicates the length of the message. This is -/// > compatible with what [`write_length_prefixed`] does. -/// # Errors -/// On weird input from socket -pub async fn read_length_prefixed( - socket: &mut (impl AsyncRead + Unpin), - max_size: usize, -) -> io::Result> { - let len = read_varint(socket).await?; - if len > max_size { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!("Received data size ({len} bytes) exceeds maximum ({max_size} bytes)"), - )); - } - - let mut buf = vec![0; len]; - socket.read_exact(&mut buf).await?; - - Ok(buf) -} - -impl AsRef for DirectMessageProtocol { - fn as_ref(&self) -> &str { - "/HotShot/request_response/1.0" - } -} - -#[async_trait] -impl Codec for DirectMessageCodec { - type Protocol = DirectMessageProtocol; - - type Request = DirectMessageRequest; - - type Response = DirectMessageResponse; - - async fn read_request( - &mut self, - _: &DirectMessageProtocol, - io: &mut T, - ) -> io::Result - where - T: AsyncRead + Unpin + Send, - { - let msg = read_length_prefixed(io, MAX_MSG_SIZE_DM).await?; - - // NOTE we don't error here unless message is too big. - // We'll wrap this in a networkbehaviour and get parsing messages there - Ok(DirectMessageRequest(msg)) - } - - async fn read_response( - &mut self, - _: &DirectMessageProtocol, - io: &mut T, - ) -> io::Result - where - T: AsyncRead + Unpin + Send, - { - let msg = read_length_prefixed(io, MAX_MSG_SIZE_DM).await?; - Ok(DirectMessageResponse(msg)) - } - - async fn write_request( - &mut self, - _: &DirectMessageProtocol, - io: &mut T, - DirectMessageRequest(msg): DirectMessageRequest, - ) -> io::Result<()> - where - T: AsyncWrite + Unpin + Send, - { - write_length_prefixed(io, msg).await?; - io.close().await?; - - Ok(()) - } - - async fn write_response( - &mut self, - _: &DirectMessageProtocol, - io: &mut T, - DirectMessageResponse(msg): DirectMessageResponse, - ) -> io::Result<()> - where - T: AsyncWrite + Unpin + Send, - { - write_length_prefixed(io, msg).await?; - io.close().await?; - Ok(()) - } -} diff --git a/libp2p-networking/src/network/behaviours/mod.rs b/libp2p-networking/src/network/behaviours/mod.rs index 7d0feeeb25..869b6324c3 100644 --- a/libp2p-networking/src/network/behaviours/mod.rs +++ b/libp2p-networking/src/network/behaviours/mod.rs @@ -7,9 +7,5 @@ pub mod direct_message; /// exponential backoff type pub mod exponential_backoff; -/// Implementation of a codec for sending messages -/// for `RequestResponse` -pub mod direct_message_codec; - /// Wrapper around Kademlia pub mod dht; diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index c412e6fd11..bfbf197b40 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -13,7 +13,6 @@ use super::{ behaviours::{ dht::{DHTBehaviour, DHTEvent, KadPutQuery}, direct_message::{DMBehaviour, DMEvent, DMRequest}, - direct_message_codec::DirectMessageResponse, exponential_backoff::ExponentialBackoff, gossip::{GossipBehaviour, GossipEvent}, }, @@ -145,11 +144,7 @@ impl NetworkDef { } /// Add a direct response for a channel - pub fn add_direct_response( - &mut self, - chan: ResponseChannel, - msg: Vec, - ) { + pub fn add_direct_response(&mut self, chan: ResponseChannel>, msg: Vec) { self.request_response.add_direct_response(chan, msg); } } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 9359b1f8eb..523b5eef34 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -17,10 +17,7 @@ pub use self::{ }, }; -use self::behaviours::{ - dht::DHTEvent, direct_message::DMEvent, direct_message_codec::DirectMessageResponse, - gossip::GossipEvent, -}; +use self::behaviours::{dht::DHTEvent, direct_message::DMEvent, gossip::GossipEvent}; use bincode::Options; use futures::channel::oneshot::Sender; use hotshot_utils::bincode::bincode_opts; @@ -125,7 +122,7 @@ pub enum ClientRequest { retry_count: u8, }, /// client request to send a direct reply to a message - DirectResponse(ResponseChannel, Vec), + DirectResponse(ResponseChannel>, Vec), /// prune a peer Prune(PeerId), /// add vec of known peers or addresses @@ -169,7 +166,7 @@ pub enum NetworkEvent { /// Recv-ed a broadcast GossipMsg(Vec, TopicHash), /// Recv-ed a direct message from a node - DirectRequest(Vec, PeerId, ResponseChannel), + DirectRequest(Vec, PeerId, ResponseChannel>), /// Recv-ed a direct response from a node (that hopefully was initiated by this node) DirectResponse(Vec, PeerId), /// Report that kademlia has successfully bootstrapped into the network diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index af5f682c3b..de28d88f81 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -22,7 +22,6 @@ use super::{ use crate::network::behaviours::{ dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, direct_message::{DMBehaviour, DMEvent}, - direct_message_codec::{DirectMessageCodec, DirectMessageProtocol, MAX_MSG_SIZE_DM}, exponential_backoff::ExponentialBackoff, gossip::GossipEvent, }; @@ -32,7 +31,7 @@ use async_compatibility_layer::{ }; use futures::{select, FutureExt, StreamExt}; use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; -use libp2p::core::transport::ListenerId; +use libp2p::{core::transport::ListenerId, StreamProtocol}; use libp2p::{ gossipsub::{ Behaviour as Gossipsub, ConfigBuilder as GossipsubConfigBuilder, @@ -61,6 +60,9 @@ use std::{ }; use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; +/// Maximum size of a message +pub const MAX_GOSSIP_MSG_SIZE: usize = 200_000_000; + /// Wrapped num of connections pub const ESTABLISHED_LIMIT: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(ESTABLISHED_LIMIT_UNWR) }; @@ -211,7 +213,7 @@ impl NetworkNode { .mesh_outbound_min(params.mesh_outbound_min) .mesh_n(params.mesh_n) .history_length(500) - .max_transmit_size(2 * MAX_MSG_SIZE_DM) + .max_transmit_size(MAX_GOSSIP_MSG_SIZE) // Use the (blake3) hash of a message as its ID .message_id_fn(message_id_fn) .build() @@ -266,9 +268,13 @@ impl NetworkNode { let rrconfig = RequestResponseConfig::default(); - let request_response: libp2p::request_response::Behaviour = + let request_response: libp2p::request_response::cbor::Behaviour, Vec> = RequestResponse::new( - [(DirectMessageProtocol(), ProtocolSupport::Full)].into_iter(), + [( + StreamProtocol::new("/HotShot/request_response/1.0"), + ProtocolSupport::Full, + )] + .into_iter(), rrconfig, ); diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 885f66b097..69e17f3678 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -1,7 +1,6 @@ use crate::network::{ - behaviours::direct_message_codec::DirectMessageResponse, error::DHTError, gen_multiaddr, - ClientRequest, NetworkError, NetworkEvent, NetworkNode, NetworkNodeConfig, - NetworkNodeConfigBuilderError, + error::DHTError, gen_multiaddr, ClientRequest, NetworkError, NetworkEvent, NetworkNode, + NetworkNodeConfig, NetworkNodeConfigBuilderError, }; use async_compatibility_layer::{ art::{async_sleep, async_spawn, async_timeout, future::to, stream}, @@ -484,7 +483,7 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` pub async fn direct_response( &self, - chan: ResponseChannel, + chan: ResponseChannel>, msg: &impl Serialize, ) -> Result<(), NetworkNodeHandleError> { let serialized_msg = bincode_opts().serialize(msg).context(SerializationSnafu)?; diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 4cba204166..b3eafdc757 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -68,7 +68,7 @@ pub struct TestMetadata { impl Default for TimingData { fn default() -> Self { Self { - next_view_timeout: 2500, + next_view_timeout: 4000, timeout_ratio: (11, 10), round_start_delay: 100, start_delay: 100, From 543eb7a3f0eecdb8f715a498567bbf94c926dfcc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Feb 2024 08:12:44 -0500 Subject: [PATCH 0788/1393] Bump unsigned-varint from 0.7.2 to 0.8.0 (#2575) Bumps [unsigned-varint](https://github.com/paritytech/unsigned-varint) from 0.7.2 to 0.8.0. - [Changelog](https://github.com/paritytech/unsigned-varint/blob/master/CHANGELOG.md) - [Commits](https://github.com/paritytech/unsigned-varint/compare/v0.7.2...v0.8.0) --- updated-dependencies: - dependency-name: unsigned-varint dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- libp2p-networking/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index ac8a5c1c82..c815c4fe27 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -29,7 +29,7 @@ rand = { workspace = true } serde = { workspace = true } serde_json = "1.0.113" snafu = { workspace = true } -unsigned-varint = "0.7" +unsigned-varint = "0.8" tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", ] } From fefb60dd54d4359091c94107334f629dbd71f144 Mon Sep 17 00:00:00 2001 From: Justin Restivo Date: Wed, 14 Feb 2024 11:53:47 -0500 Subject: [PATCH 0789/1393] chore: drop varint (#2579) --- libp2p-networking/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index c815c4fe27..08261c72dc 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -29,7 +29,6 @@ rand = { workspace = true } serde = { workspace = true } serde_json = "1.0.113" snafu = { workspace = true } -unsigned-varint = "0.8" tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", ] } From 6369222d8c4d0c3bcb14e2734dd01caba028f1e7 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 14 Feb 2024 12:14:48 -0500 Subject: [PATCH 0790/1393] [TECH_DEBT] Remove `CommunicationChannel` trait (#2578) * Remove custom DM codec * cleanup * add a little timeout * rename and move constant * Comm channels gone from non tests * Completed the removal * wip gen both networks at once * gen both networks at once * fix lint error * fix combined test * lint --- example-types/src/node_types.rs | 30 +- examples/combined/all.rs | 24 +- examples/combined/multi-validator.rs | 15 +- examples/combined/orchestrator.rs | 8 +- examples/combined/types.rs | 10 +- examples/combined/validator.rs | 14 +- examples/infra/mod.rs | 217 ++++---------- examples/libp2p/all.rs | 24 +- examples/libp2p/multi-validator.rs | 15 +- examples/libp2p/orchestrator.rs | 7 +- examples/libp2p/types.rs | 15 +- examples/libp2p/validator.rs | 14 +- examples/webserver/all.rs | 25 +- examples/webserver/multi-validator.rs | 15 +- examples/webserver/orchestrator.rs | 8 +- examples/webserver/types.rs | 10 +- examples/webserver/validator.rs | 14 +- hotshot/src/lib.rs | 9 +- hotshot/src/tasks/mod.rs | 35 ++- hotshot/src/traits.rs | 10 +- .../src/traits/networking/combined_network.rs | 94 +++--- .../src/traits/networking/libp2p_network.rs | 171 +---------- .../src/traits/networking/memory_network.rs | 143 +--------- .../traits/networking/web_server_network.rs | 268 +++++------------- task-impls/src/consensus.rs | 2 +- task-impls/src/da.rs | 2 +- task-impls/src/network.rs | 22 +- task-impls/src/vid.rs | 2 +- task-impls/src/view_sync.rs | 2 +- testing/src/spinning_task.rs | 2 +- testing/src/test_builder.rs | 2 +- testing/src/test_launcher.rs | 17 +- testing/src/test_runner.rs | 11 +- testing/tests/da_task.rs | 2 +- testing/tests/memory_network.rs | 10 +- testing/tests/vid_task.rs | 2 +- testing/tests/view_sync_task.rs | 2 +- types/src/traits/network.rs | 97 +------ types/src/traits/node_implementation.rs | 53 ++-- 39 files changed, 374 insertions(+), 1049 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 18de95cc22..b84c2da482 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -8,12 +8,13 @@ use crate::{ use hotshot::traits::{ election::static_committee::{StaticCommittee, StaticElectionConfig}, implementations::{ - CombinedCommChannel, Libp2pCommChannel, MemoryCommChannel, MemoryStorage, WebCommChannel, + CombinedNetworks, Libp2pNetwork, MemoryNetwork, MemoryStorage, WebServerNetwork, }, NodeImplementation, }; use hotshot_types::{ - data::ViewNumber, signature_key::BLSPubKey, traits::node_implementation::NodeType, + data::ViewNumber, message::Message, signature_key::BLSPubKey, + traits::node_implementation::NodeType, }; use serde::{Deserialize, Serialize}; @@ -65,34 +66,31 @@ pub struct CombinedImpl; pub type StaticMembership = StaticCommittee; /// memory network -pub type StaticMemoryDAComm = MemoryCommChannel; +pub type StaticMemoryDAComm = + MemoryNetwork, ::SignatureKey>; /// libp2p network -type StaticLibp2pDAComm = Libp2pCommChannel; +type StaticLibp2pDAComm = Libp2pNetwork, ::SignatureKey>; /// web server network communication channel -type StaticWebDAComm = WebCommChannel; +type StaticWebDAComm = WebServerNetwork; /// combined network -type StaticCombinedDAComm = CombinedCommChannel; +type StaticCombinedDAComm = CombinedNetworks; /// memory comm channel -pub type StaticMemoryQuorumComm = MemoryCommChannel; +pub type StaticMemoryQuorumComm = + MemoryNetwork, ::SignatureKey>; /// libp2p comm channel -type StaticLibp2pQuorumComm = Libp2pCommChannel; +type StaticLibp2pQuorumComm = + Libp2pNetwork, ::SignatureKey>; /// web server comm channel -type StaticWebQuorumComm = WebCommChannel; +type StaticWebQuorumComm = WebServerNetwork; /// combined network (libp2p + web server) -type StaticCombinedQuorumComm = CombinedCommChannel; - -/// memory network -pub type StaticMemoryViewSyncComm = MemoryCommChannel; - -/// memory network -pub type StaticMemoryVIDComm = MemoryCommChannel; +type StaticCombinedQuorumComm = CombinedNetworks; impl NodeImplementation for Libp2pImpl { type Storage = MemoryStorage; diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 1d6f1c0a89..6249abb909 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -20,7 +20,7 @@ use tracing::{error, instrument}; use crate::{ infra::run_orchestrator, infra::{ConfigArgs, OrchestratorArgs}, - types::{DANetwork, NodeImpl, QuorumNetwork, VIDNetwork, ViewSyncNetwork}, + types::{DANetwork, NodeImpl, QuorumNetwork}, }; /// general infra used for this example @@ -78,8 +78,6 @@ async fn main() { TestTypes, DANetwork, QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, NodeImpl, >(OrchestratorArgs { url: orchestrator_url.clone(), @@ -96,19 +94,13 @@ async fn main() { for _ in 0..config.config.total_nodes.into() { let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { - infra::main_entry_point::< - TestTypes, - DANetwork, - QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, - NodeImpl, - ThisRun, - >(ValidatorArgs { - url: orchestrator_url, - public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), - network_config_file: None, - }) + infra::main_entry_point::( + ValidatorArgs { + url: orchestrator_url, + public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + network_config_file: None, + }, + ) .await; }); nodes.push(node); diff --git a/examples/combined/multi-validator.rs b/examples/combined/multi-validator.rs index d97a7f7bc3..f903f1e57c 100644 --- a/examples/combined/multi-validator.rs +++ b/examples/combined/multi-validator.rs @@ -7,9 +7,8 @@ use clap::Parser; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use tracing::instrument; -use types::VIDNetwork; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; /// types used for this example pub mod types; @@ -34,15 +33,9 @@ async fn main() { let args = args.clone(); let node = async_spawn(async move { - infra::main_entry_point::< - TestTypes, - DANetwork, - QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, - NodeImpl, - ThisRun, - >(ValidatorArgs::from_multi_args(args, node_index)) + infra::main_entry_point::( + ValidatorArgs::from_multi_args(args, node_index), + ) .await; }); nodes.push(node); diff --git a/examples/combined/orchestrator.rs b/examples/combined/orchestrator.rs index d4ced1536c..0069093505 100644 --- a/examples/combined/orchestrator.rs +++ b/examples/combined/orchestrator.rs @@ -6,11 +6,10 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; -use types::VIDNetwork; use crate::infra::run_orchestrator; use crate::infra::OrchestratorArgs; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; /// general infra used for this example #[path = "../infra/mod.rs"] @@ -27,8 +26,5 @@ async fn main() { setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator::( - args, - ) - .await; + run_orchestrator::(args).await; } diff --git a/examples/combined/types.rs b/examples/combined/types.rs index 94c316f7ab..9a904b592f 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -1,5 +1,5 @@ use crate::infra::CombinedDARun; -use hotshot::traits::implementations::{CombinedCommChannel, MemoryStorage}; +use hotshot::traits::implementations::{CombinedNetworks, MemoryStorage}; use hotshot_example_types::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; @@ -10,13 +10,13 @@ use std::fmt::Debug; pub struct NodeImpl {} /// convenience type alias -pub type DANetwork = CombinedCommChannel; +pub type DANetwork = CombinedNetworks; /// convenience type alias -pub type VIDNetwork = CombinedCommChannel; +pub type VIDNetwork = CombinedNetworks; /// convenience type alias -pub type QuorumNetwork = CombinedCommChannel; +pub type QuorumNetwork = CombinedNetworks; /// convenience type alias -pub type ViewSyncNetwork = CombinedCommChannel; +pub type ViewSyncNetwork = CombinedNetworks; impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; diff --git a/examples/combined/validator.rs b/examples/combined/validator.rs index d0493134d6..38c8dbe0b8 100644 --- a/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -3,9 +3,8 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; use tracing::{info, instrument}; -use types::VIDNetwork; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; use hotshot_orchestrator::client::ValidatorArgs; @@ -27,14 +26,5 @@ async fn main() { setup_backtrace(); let args = ValidatorArgs::parse(); info!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::< - TestTypes, - DANetwork, - QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, - NodeImpl, - ThisRun, - >(args) - .await; + infra::main_entry_point::(args).await; } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index cdc42122a4..80ef89bb02 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -5,13 +5,10 @@ use async_lock::RwLock; use async_trait::async_trait; use clap::Parser; use futures::StreamExt; -use hotshot::traits::implementations::{CombinedCommChannel, CombinedNetworks}; +use hotshot::traits::implementations::{CombinedNetworks, UnderlyingCombinedNetworks}; use hotshot::{ traits::{ - implementations::{ - Libp2pCommChannel, Libp2pNetwork, MemoryStorage, NetworkingMetricsValue, - WebCommChannel, WebServerNetwork, - }, + implementations::{Libp2pNetwork, MemoryStorage, NetworkingMetricsValue, WebServerNetwork}, NodeImplementation, }, types::{SignatureKey, SystemContextHandle}, @@ -37,7 +34,6 @@ use hotshot_types::{ traits::{ block_contents::TestableBlock, election::Membership, - network::CommunicationChannel, node_implementation::{ConsensusTime, NodeType}, states::TestableState, }, @@ -130,10 +126,8 @@ pub fn load_config_from_file( /// Runs the orchestrator pub async fn run_orchestrator< TYPES: NodeType, - DACHANNEL: CommunicationChannel + Debug, - QUORUMCHANNEL: CommunicationChannel + Debug, - VIEWSYNCCHANNEL: CommunicationChannel + Debug, - VIDCHANNEL: CommunicationChannel + Debug, + DACHANNEL: ConnectedNetwork, TYPES::SignatureKey> + Debug, + QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey> + Debug, NODE: NodeImplementation>, >( OrchestratorArgs { url, config_file }: OrchestratorArgs, @@ -305,14 +299,12 @@ async fn libp2p_network_from_config( #[async_trait] pub trait RunDA< TYPES: NodeType, - DACHANNEL: CommunicationChannel + Debug, - QUORUMCHANNEL: CommunicationChannel + Debug, - VIEWSYNCCHANNEL: CommunicationChannel + Debug, - VIDCHANNEL: CommunicationChannel + Debug, + DANET: ConnectedNetwork, TYPES::SignatureKey> + Debug, + QUORUMNET: ConnectedNetwork, TYPES::SignatureKey> + Debug, NODE: NodeImplementation< TYPES, - QuorumNetwork = QUORUMCHANNEL, - CommitteeNetwork = DACHANNEL, + QuorumNetwork = QUORUMNET, + CommitteeNetwork = DANET, Storage = MemoryStorage, >, > where @@ -354,8 +346,8 @@ pub trait RunDA< config.config.da_committee_size.try_into().unwrap(), ); let networks_bundle = Networks { - quorum_network: quorum_network.clone(), - da_network: da_network.clone(), + quorum_network: quorum_network.clone().into(), + da_network: da_network.clone().into(), _pd: PhantomData, }; @@ -489,16 +481,10 @@ pub trait RunDA< } /// Returns the da network for this run - fn get_da_channel(&self) -> DACHANNEL; + fn get_da_channel(&self) -> DANET; /// Returns the quorum network for this run - fn get_quorum_channel(&self) -> QUORUMCHANNEL; - - ///Returns view sync network for this run - fn get_view_sync_channel(&self) -> VIEWSYNCCHANNEL; - - ///Returns VID network for this run - fn get_vid_channel(&self) -> VIDCHANNEL; + fn get_quorum_channel(&self) -> QUORUMNET; /// Returns the config for this run fn get_config(&self) -> NetworkConfig; @@ -511,13 +497,9 @@ pub struct WebServerDARun { /// the network configuration config: NetworkConfig, /// quorum channel - quorum_channel: WebCommChannel, + quorum_channel: WebServerNetwork, /// data availability channel - da_channel: WebCommChannel, - /// view sync channel - view_sync_channel: WebCommChannel, - /// vid channel - vid_channel: WebCommChannel, + da_channel: WebServerNetwork, } #[async_trait] @@ -530,19 +512,11 @@ impl< >, NODE: NodeImplementation< TYPES, - QuorumNetwork = WebCommChannel, - CommitteeNetwork = WebCommChannel, + QuorumNetwork = WebServerNetwork, + CommitteeNetwork = WebServerNetwork, Storage = MemoryStorage, >, - > - RunDA< - TYPES, - WebCommChannel, - WebCommChannel, - WebCommChannel, - WebCommChannel, - NODE, - > for WebServerDARun + > RunDA, WebServerNetwork, NODE> for WebServerDARun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -567,46 +541,24 @@ where underlying_quorum_network.wait_for_ready().await; - // create communication channels - let quorum_channel: WebCommChannel = - WebCommChannel::new(underlying_quorum_network.clone().into()); - - let view_sync_channel: WebCommChannel = - WebCommChannel::new(underlying_quorum_network.into()); - - let da_channel: WebCommChannel = WebCommChannel::new( - WebServerNetwork::create(url.clone(), wait_between_polls, pub_key.clone(), true).into(), - ); - - let vid_channel: WebCommChannel = WebCommChannel::new( - WebServerNetwork::create(url, wait_between_polls, pub_key, true).into(), - ); + let da_channel: WebServerNetwork = + WebServerNetwork::create(url.clone(), wait_between_polls, pub_key.clone(), true); WebServerDARun { config, - quorum_channel, + quorum_channel: underlying_quorum_network, da_channel, - view_sync_channel, - vid_channel, } } - fn get_da_channel(&self) -> WebCommChannel { + fn get_da_channel(&self) -> WebServerNetwork { self.da_channel.clone() } - fn get_quorum_channel(&self) -> WebCommChannel { + fn get_quorum_channel(&self) -> WebServerNetwork { self.quorum_channel.clone() } - fn get_view_sync_channel(&self) -> WebCommChannel { - self.view_sync_channel.clone() - } - - fn get_vid_channel(&self) -> WebCommChannel { - self.vid_channel.clone() - } - fn get_config(&self) -> NetworkConfig { self.config.clone() } @@ -619,13 +571,9 @@ pub struct Libp2pDARun { /// the network configuration config: NetworkConfig, /// quorum channel - quorum_channel: Libp2pCommChannel, + quorum_channel: Libp2pNetwork, TYPES::SignatureKey>, /// data availability channel - da_channel: Libp2pCommChannel, - /// view sync channel - view_sync_channel: Libp2pCommChannel, - /// vid channel - vid_channel: Libp2pCommChannel, + da_channel: Libp2pNetwork, TYPES::SignatureKey>, } #[async_trait] @@ -638,17 +586,15 @@ impl< >, NODE: NodeImplementation< TYPES, - QuorumNetwork = Libp2pCommChannel, - CommitteeNetwork = Libp2pCommChannel, + QuorumNetwork = Libp2pNetwork, TYPES::SignatureKey>, + CommitteeNetwork = Libp2pNetwork, TYPES::SignatureKey>, Storage = MemoryStorage, >, > RunDA< TYPES, - Libp2pCommChannel, - Libp2pCommChannel, - Libp2pCommChannel, - Libp2pCommChannel, + Libp2pNetwork, TYPES::SignatureKey>, + Libp2pNetwork, TYPES::SignatureKey>, NODE, > for Libp2pDARun where @@ -663,49 +609,26 @@ where let pub_key = config.config.my_own_validator_config.public_key.clone(); // create and wait for underlying network - let underlying_quorum_network = - libp2p_network_from_config::(config.clone(), pub_key).await; - - underlying_quorum_network.wait_for_ready().await; - - // create communication channels - let quorum_channel: Libp2pCommChannel = - Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - - let view_sync_channel: Libp2pCommChannel = - Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + let quorum_channel = libp2p_network_from_config::(config.clone(), pub_key).await; - let da_channel: Libp2pCommChannel = - Libp2pCommChannel::new(underlying_quorum_network.clone().into()); - - let vid_channel: Libp2pCommChannel = - Libp2pCommChannel::new(underlying_quorum_network.clone().into()); + let da_channel = quorum_channel.clone(); + quorum_channel.wait_for_ready().await; Libp2pDARun { config, quorum_channel, da_channel, - view_sync_channel, - vid_channel, } } - fn get_da_channel(&self) -> Libp2pCommChannel { + fn get_da_channel(&self) -> Libp2pNetwork, TYPES::SignatureKey> { self.da_channel.clone() } - fn get_quorum_channel(&self) -> Libp2pCommChannel { + fn get_quorum_channel(&self) -> Libp2pNetwork, TYPES::SignatureKey> { self.quorum_channel.clone() } - fn get_view_sync_channel(&self) -> Libp2pCommChannel { - self.view_sync_channel.clone() - } - - fn get_vid_channel(&self) -> Libp2pCommChannel { - self.vid_channel.clone() - } - fn get_config(&self) -> NetworkConfig { self.config.clone() } @@ -718,13 +641,9 @@ pub struct CombinedDARun { /// the network configuration config: NetworkConfig, /// quorum channel - quorum_channel: CombinedCommChannel, + quorum_channel: CombinedNetworks, /// data availability channel - da_channel: CombinedCommChannel, - /// view sync channel - view_sync_channel: CombinedCommChannel, - /// vid channel - vid_channel: CombinedCommChannel, + da_channel: CombinedNetworks, } #[async_trait] @@ -738,18 +657,10 @@ impl< NODE: NodeImplementation< TYPES, Storage = MemoryStorage, - QuorumNetwork = CombinedCommChannel, - CommitteeNetwork = CombinedCommChannel, + QuorumNetwork = CombinedNetworks, + CommitteeNetwork = CombinedNetworks, >, - > - RunDA< - TYPES, - CombinedCommChannel, - CombinedCommChannel, - CombinedCommChannel, - CombinedCommChannel, - NODE, - > for CombinedDARun + > RunDA, CombinedNetworks, NODE> for CombinedDARun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -779,61 +690,39 @@ where }: WebServerConfig = config.clone().da_web_server_config.unwrap(); // create and wait for underlying webserver network - let webserver_underlying_quorum_network = + let web_quorum_network = webserver_network_from_config::(config.clone(), pub_key.clone()); - let webserver_underlying_da_network = - WebServerNetwork::create(url, wait_between_polls, pub_key, true); + let web_da_network = WebServerNetwork::create(url, wait_between_polls, pub_key, true); - webserver_underlying_quorum_network.wait_for_ready().await; + web_quorum_network.wait_for_ready().await; - // combine the two communication channels - let quorum_channel = CombinedCommChannel::new(Arc::new(CombinedNetworks( - webserver_underlying_quorum_network.clone(), - libp2p_underlying_quorum_network.clone(), - ))); + // combine the two communication channel - let view_sync_channel = CombinedCommChannel::new(Arc::new(CombinedNetworks( - webserver_underlying_quorum_network.clone(), + let da_channel = CombinedNetworks::new(Arc::new(UnderlyingCombinedNetworks( + web_da_network.clone(), libp2p_underlying_quorum_network.clone(), ))); - - let da_channel: CombinedCommChannel = - CombinedCommChannel::new(Arc::new(CombinedNetworks( - webserver_underlying_da_network, - libp2p_underlying_quorum_network.clone(), - ))); - - let vid_channel = CombinedCommChannel::new(Arc::new(CombinedNetworks( - webserver_underlying_quorum_network, - libp2p_underlying_quorum_network, + let quorum_channel = CombinedNetworks::new(Arc::new(UnderlyingCombinedNetworks( + web_quorum_network.clone(), + libp2p_underlying_quorum_network.clone(), ))); CombinedDARun { config, quorum_channel, da_channel, - view_sync_channel, - vid_channel, } } - fn get_da_channel(&self) -> CombinedCommChannel { + fn get_da_channel(&self) -> CombinedNetworks { self.da_channel.clone() } - fn get_quorum_channel(&self) -> CombinedCommChannel { + fn get_quorum_channel(&self) -> CombinedNetworks { self.quorum_channel.clone() } - fn get_view_sync_channel(&self) -> CombinedCommChannel { - self.view_sync_channel.clone() - } - - fn get_vid_channel(&self) -> CombinedCommChannel { - self.vid_channel.clone() - } - fn get_config(&self) -> NetworkConfig { self.config.clone() } @@ -849,17 +738,15 @@ pub async fn main_entry_point< BlockHeader = TestBlockHeader, InstanceState = TestInstanceState, >, - DACHANNEL: CommunicationChannel + Debug, - QUORUMCHANNEL: CommunicationChannel + Debug, - VIEWSYNCCHANNEL: CommunicationChannel + Debug, - VIDCHANNEL: CommunicationChannel + Debug, + DACHANNEL: ConnectedNetwork, TYPES::SignatureKey> + Debug, + QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey> + Debug, NODE: NodeImplementation< TYPES, QuorumNetwork = QUORUMCHANNEL, CommitteeNetwork = DACHANNEL, Storage = MemoryStorage, >, - RUNDA: RunDA, + RUNDA: RunDA, >( args: ValidatorArgs, ) where diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index 5b67f667d7..ddc8b472b9 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -18,7 +18,7 @@ use tracing::instrument; use crate::{ infra::run_orchestrator, infra::{ConfigArgs, OrchestratorArgs}, - types::{DANetwork, NodeImpl, QuorumNetwork, VIDNetwork, ViewSyncNetwork}, + types::{DANetwork, NodeImpl, QuorumNetwork}, }; /// general infra used for this example @@ -44,8 +44,6 @@ async fn main() { TestTypes, DANetwork, QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, NodeImpl, >(OrchestratorArgs { url: orchestrator_url.clone(), @@ -61,19 +59,13 @@ async fn main() { for _ in 0..config.config.total_nodes.into() { let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { - infra::main_entry_point::< - TestTypes, - DANetwork, - QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, - NodeImpl, - ThisRun, - >(ValidatorArgs { - url: orchestrator_url, - public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), - network_config_file: None, - }) + infra::main_entry_point::( + ValidatorArgs { + url: orchestrator_url, + public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + network_config_file: None, + }, + ) .await; }); nodes.push(node); diff --git a/examples/libp2p/multi-validator.rs b/examples/libp2p/multi-validator.rs index aec3325383..e085b498f6 100644 --- a/examples/libp2p/multi-validator.rs +++ b/examples/libp2p/multi-validator.rs @@ -7,9 +7,8 @@ use clap::Parser; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use tracing::instrument; -use types::VIDNetwork; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; /// types used for this example pub mod types; @@ -34,15 +33,9 @@ async fn main() { let args = args.clone(); let node = async_spawn(async move { - infra::main_entry_point::< - TestTypes, - DANetwork, - QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, - NodeImpl, - ThisRun, - >(ValidatorArgs::from_multi_args(args, node_index)) + infra::main_entry_point::( + ValidatorArgs::from_multi_args(args, node_index), + ) .await; }); nodes.push(node); diff --git a/examples/libp2p/orchestrator.rs b/examples/libp2p/orchestrator.rs index c26fc73bba..42e23a39d6 100644 --- a/examples/libp2p/orchestrator.rs +++ b/examples/libp2p/orchestrator.rs @@ -10,7 +10,7 @@ use tracing::instrument; use crate::infra::run_orchestrator; use crate::infra::OrchestratorArgs; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, VIDNetwork, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; /// general infra used for this example #[path = "../infra/mod.rs"] @@ -27,8 +27,5 @@ async fn main() { setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator::( - args, - ) - .await; + run_orchestrator::(args).await; } diff --git a/examples/libp2p/types.rs b/examples/libp2p/types.rs index af535db8d7..500581b9d6 100644 --- a/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -1,7 +1,10 @@ use crate::infra::Libp2pDARun; -use hotshot::traits::implementations::{Libp2pCommChannel, MemoryStorage}; +use hotshot::traits::implementations::{Libp2pNetwork, MemoryStorage}; use hotshot_example_types::state_types::TestTypes; -use hotshot_types::traits::node_implementation::NodeImplementation; +use hotshot_types::{ + message::Message, + traits::node_implementation::{NodeImplementation, NodeType}, +}; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -10,13 +13,9 @@ use std::fmt::Debug; pub struct NodeImpl {} /// convenience type alias -pub type DANetwork = Libp2pCommChannel; +pub type DANetwork = Libp2pNetwork, ::SignatureKey>; /// convenience type alias -pub type VIDNetwork = Libp2pCommChannel; -/// convenience type alias -pub type QuorumNetwork = Libp2pCommChannel; -/// convenience type alias -pub type ViewSyncNetwork = Libp2pCommChannel; +pub type QuorumNetwork = Libp2pNetwork, ::SignatureKey>; impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; diff --git a/examples/libp2p/validator.rs b/examples/libp2p/validator.rs index 9873cac76e..cebcd44d04 100644 --- a/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -3,9 +3,8 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; use tracing::{info, instrument}; -use types::VIDNetwork; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; use hotshot_orchestrator::client::ValidatorArgs; @@ -27,14 +26,5 @@ async fn main() { setup_backtrace(); let args = ValidatorArgs::parse(); info!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::< - TestTypes, - DANetwork, - QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, - NodeImpl, - ThisRun, - >(args) - .await; + infra::main_entry_point::(args).await; } diff --git a/examples/webserver/all.rs b/examples/webserver/all.rs index d70de4bba5..b68b3ec46e 100644 --- a/examples/webserver/all.rs +++ b/examples/webserver/all.rs @@ -7,7 +7,7 @@ use crate::infra::{ConfigArgs, OrchestratorArgs}; use crate::types::ThisRun; use crate::{ infra::run_orchestrator, - types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}, + types::{DANetwork, NodeImpl, QuorumNetwork}, }; use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; @@ -24,7 +24,6 @@ use hotshot_orchestrator::config::NetworkConfig; use hotshot_types::traits::node_implementation::NodeType; use surf_disco::Url; use tracing::error; -use types::VIDNetwork; #[cfg_attr(async_executor_impl = "tokio", tokio::main)] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] @@ -73,8 +72,6 @@ async fn main() { TestTypes, DANetwork, QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, NodeImpl, >(OrchestratorArgs { url: orchestrator_url.clone(), @@ -90,19 +87,13 @@ async fn main() { for _ in 0..(config.config.total_nodes.get()) { let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { - infra::main_entry_point::< - TestTypes, - DANetwork, - QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, - NodeImpl, - ThisRun, - >(ValidatorArgs { - url: orchestrator_url.clone(), - public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), - network_config_file: None, - }) + infra::main_entry_point::( + ValidatorArgs { + url: orchestrator_url, + public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + network_config_file: None, + }, + ) .await; }); nodes.push(node); diff --git a/examples/webserver/multi-validator.rs b/examples/webserver/multi-validator.rs index b975f07b0f..61d81c79c5 100644 --- a/examples/webserver/multi-validator.rs +++ b/examples/webserver/multi-validator.rs @@ -7,9 +7,8 @@ use clap::Parser; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use tracing::instrument; -use types::VIDNetwork; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; /// types used for this example pub mod types; @@ -34,15 +33,9 @@ async fn main() { let args = args.clone(); let node = async_spawn(async move { - infra::main_entry_point::< - TestTypes, - DANetwork, - QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, - NodeImpl, - ThisRun, - >(ValidatorArgs::from_multi_args(args, node_index)) + infra::main_entry_point::( + ValidatorArgs::from_multi_args(args, node_index), + ) .await; }); nodes.push(node); diff --git a/examples/webserver/orchestrator.rs b/examples/webserver/orchestrator.rs index 62f2006f2e..49080dae8a 100644 --- a/examples/webserver/orchestrator.rs +++ b/examples/webserver/orchestrator.rs @@ -7,11 +7,10 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; -use types::VIDNetwork; use crate::infra::run_orchestrator; use crate::infra::OrchestratorArgs; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; /// general infra used for this example #[path = "../infra/mod.rs"] @@ -28,8 +27,5 @@ async fn main() { setup_backtrace(); let args = OrchestratorArgs::parse(); - run_orchestrator::( - args, - ) - .await; + run_orchestrator::(args).await; } diff --git a/examples/webserver/types.rs b/examples/webserver/types.rs index 0e67f7a742..95abc27be1 100644 --- a/examples/webserver/types.rs +++ b/examples/webserver/types.rs @@ -1,5 +1,5 @@ use crate::infra::WebServerDARun; -use hotshot::traits::implementations::{MemoryStorage, WebCommChannel}; +use hotshot::traits::implementations::{MemoryStorage, WebServerNetwork}; use hotshot_example_types::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; @@ -10,13 +10,13 @@ use std::fmt::Debug; pub struct NodeImpl {} /// convenience type alias -pub type DANetwork = WebCommChannel; +pub type DANetwork = WebServerNetwork; /// convenience type alias -pub type VIDNetwork = WebCommChannel; +pub type VIDNetwork = WebServerNetwork; /// convenience type alias -pub type QuorumNetwork = WebCommChannel; +pub type QuorumNetwork = WebServerNetwork; /// convenience type alias -pub type ViewSyncNetwork = WebCommChannel; +pub type ViewSyncNetwork = WebServerNetwork; impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; diff --git a/examples/webserver/validator.rs b/examples/webserver/validator.rs index 96bcde1807..e335cae2be 100644 --- a/examples/webserver/validator.rs +++ b/examples/webserver/validator.rs @@ -3,9 +3,8 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; use tracing::{info, instrument}; -use types::VIDNetwork; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun, ViewSyncNetwork}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; use hotshot_orchestrator::client::ValidatorArgs; @@ -27,14 +26,5 @@ async fn main() { setup_backtrace(); let args = ValidatorArgs::parse(); info!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::< - TestTypes, - DANetwork, - QuorumNetwork, - ViewSyncNetwork, - VIDNetwork, - NodeImpl, - ThisRun, - >(args) - .await; + infra::main_entry_point::(args).await; } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e948c47879..240c740f06 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -42,7 +42,8 @@ use hotshot_types::{ simple_certificate::QuorumCertificate, traits::{ consensus_api::ConsensusApi, - network::CommunicationChannel, + election::Membership, + network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, states::ValidatedState, @@ -78,10 +79,10 @@ pub const H_256: usize = 32; /// Bundle of the networks used in consensus pub struct Networks> { /// Newtork for reaching all nodes - pub quorum_network: I::QuorumNetwork, + pub quorum_network: Arc, /// Network for reaching the DA committee - pub da_network: I::CommitteeNetwork, + pub da_network: Arc, /// Phantom for TYPES and I pub _pd: PhantomData<(TYPES, I)>, @@ -321,7 +322,7 @@ impl> SystemContext { sender: api.inner.public_key.clone(), kind: MessageKind::from(message), }, - da_membership, + da_membership.get_committee(TYPES::Time::new(0)), ), api .send_external_event(Event { diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 172f904b9f..14ef94c48a 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -15,18 +15,21 @@ use hotshot_task_impls::{ vid::VIDTaskState, view_sync::ViewSyncTaskState, }; -use hotshot_types::traits::election::Membership; use hotshot_types::{ event::Event, message::Messages, traits::{ block_contents::vid_commitment, consensus_api::ConsensusApi, - network::{CommunicationChannel, ConsensusIntentEvent, TransmitType}, + network::{ConsensusIntentEvent, TransmitType}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, BlockPayload, }, }; +use hotshot_types::{ + message::Message, + traits::{election::Membership, network::ConnectedNetwork}, +}; use std::{ collections::{BTreeMap, HashMap, HashSet}, marker::PhantomData, @@ -45,10 +48,13 @@ pub enum GlobalEvent { } /// Add the network task to handle messages and publish events. -pub async fn add_network_message_task>( +pub async fn add_network_message_task< + TYPES: NodeType, + NET: ConnectedNetwork, TYPES::SignatureKey>, +>( task_reg: Arc, event_stream: Sender>, - channel: NET, + channel: Arc, ) { let net = channel.clone(); let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { @@ -105,11 +111,14 @@ pub async fn add_network_message_task>( +pub async fn add_network_event_task< + TYPES: NodeType, + NET: ConnectedNetwork, TYPES::SignatureKey>, +>( task_reg: Arc, tx: Sender>, rx: Receiver>, - channel: NET, + channel: Arc, membership: TYPES::Membership, filter: fn(&HotShotEvent) -> bool, ) { @@ -168,8 +177,8 @@ pub async fn create_consensus_state>( consensus: handle.hotshot.get_consensus(), cur_view: TYPES::Time::new(0), vote_collector: None, - network: c_api.inner.networks.quorum_network.clone().into(), + network: c_api.inner.networks.quorum_network.clone(), membership: c_api.inner.memberships.vid_membership.clone().into(), public_key: c_api.public_key().clone(), private_key: c_api.private_key().clone(), @@ -262,7 +271,7 @@ pub async fn add_upgrade_task>( api: c_api.clone(), cur_view: TYPES::Time::new(0), quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), - quorum_network: c_api.inner.networks.quorum_network.clone().into(), + quorum_network: c_api.inner.networks.quorum_network.clone(), should_vote: |_upgrade_proposal| false, vote_collector: None.into(), public_key: c_api.public_key().clone(), @@ -288,7 +297,7 @@ pub async fn add_da_task>( api: c_api.clone(), consensus: handle.hotshot.get_consensus(), da_membership: c_api.inner.memberships.da_membership.clone().into(), - da_network: c_api.inner.networks.da_network.clone().into(), + da_network: c_api.inner.networks.da_network.clone(), quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), cur_view: TYPES::Time::new(0), vote_collector: None.into(), @@ -318,7 +327,7 @@ pub async fn add_transaction_task> transactions: Arc::default(), seen_transactions: HashSet::new(), cur_view: TYPES::Time::new(0), - network: c_api.inner.networks.quorum_network.clone().into(), + network: c_api.inner.networks.quorum_network.clone(), membership: c_api.inner.memberships.quorum_membership.clone().into(), public_key: c_api.public_key().clone(), private_key: c_api.private_key().clone(), @@ -342,7 +351,7 @@ pub async fn add_view_sync_task>( let view_sync_state = ViewSyncTaskState { current_view: TYPES::Time::new(0), next_view: TYPES::Time::new(0), - network: api.inner.networks.quorum_network.clone().into(), + network: api.inner.networks.quorum_network.clone(), membership: api.inner.memberships.view_sync_membership.clone().into(), public_key: api.public_key().clone(), private_key: api.private_key().clone(), diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 3bd07c4c15..48a2669493 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -13,10 +13,12 @@ pub use storage::{Result as StorageResult, Storage}; pub mod implementations { pub use super::{ networking::{ - combined_network::{calculate_hash_of, Cache, CombinedCommChannel, CombinedNetworks}, - libp2p_network::{Libp2pCommChannel, Libp2pNetwork, PeerInfoVec}, - memory_network::{MasterMap, MemoryCommChannel, MemoryNetwork}, - web_server_network::{WebCommChannel, WebServerNetwork}, + combined_network::{ + calculate_hash_of, Cache, CombinedNetworks, UnderlyingCombinedNetworks, + }, + libp2p_network::{Libp2pNetwork, PeerInfoVec}, + memory_network::{MasterMap, MemoryNetwork}, + web_server_network::WebServerNetwork, NetworkingMetricsValue, }, storage::memory_storage::MemoryStorage, // atomic_storage::AtomicStorage, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index ae093c5f72..d401ba15e8 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -8,7 +8,7 @@ use hotshot_constants::{ COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, }; use std::{ - collections::HashSet, + collections::{BTreeSet, HashSet}, hash::Hasher, sync::atomic::{AtomicU64, Ordering}, }; @@ -26,11 +26,7 @@ use hotshot_types::{ data::ViewNumber, message::Message, traits::{ - election::Membership, - network::{ - CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, - TestableChannelImplementation, TransmitType, ViewMessage, - }, + network::{ConnectedNetwork, ConsensusIntentEvent, TransmitType}, node_implementation::NodeType, }, BoxSyncFuture, @@ -110,9 +106,9 @@ pub fn calculate_hash_of(t: &T) -> u64 { /// A communication channel with 2 networks, where we can fall back to the slower network if the /// primary fails #[derive(Clone, Debug)] -pub struct CombinedCommChannel { +pub struct CombinedNetworks { /// The two networks we'll use for send/recv - networks: Arc>, + networks: Arc>, /// Last n seen messages to prevent processing duplicates message_cache: Arc>, @@ -121,10 +117,10 @@ pub struct CombinedCommChannel { primary_down: Arc, } -impl CombinedCommChannel { +impl CombinedNetworks { /// Constructor #[must_use] - pub fn new(networks: Arc>) -> Self { + pub fn new(networks: Arc>) -> Self { Self { networks, message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), @@ -149,7 +145,7 @@ impl CombinedCommChannel { /// We need this so we can impl `TestableNetworkingImplementation` /// on the tuple #[derive(Debug, Clone)] -pub struct CombinedNetworks( +pub struct UnderlyingCombinedNetworks( pub WebServerNetwork, pub Libp2pNetwork, TYPES::SignatureKey>, ); @@ -163,7 +159,7 @@ impl TestableNetworkingImplementation for CombinedNetwor da_committee_size: usize, is_da: bool, reliability_config: Option>, - ) -> Box Self + 'static> { + ) -> Box (Arc, Arc) + 'static> { let generators = ( TestableNetworkingImplementation for CombinedNetwor reliability_config, ) ); - Box::new(move |node_id| CombinedNetworks(generators.0(node_id), generators.1(node_id))) - } - - /// Get the number of messages in-flight. - /// - /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. - fn in_flight_message_count(&self) -> Option { - None - } -} - -#[cfg(feature = "hotshot-testing")] -impl TestableNetworkingImplementation for CombinedCommChannel { - fn generator( - expected_node_count: usize, - num_bootstrap: usize, - network_id: usize, - da_committee_size: usize, - is_da: bool, - reliability_config: Option>, - ) -> Box Self + 'static> { - let generator = as TestableNetworkingImplementation<_>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da, - reliability_config, - ); - Box::new(move |node_id| Self { - networks: generator(node_id).into(), - message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), - primary_down: Arc::new(AtomicU64::new(0)), + Box::new(move |node_id| { + let (quorum_web, da_web) = generators.0(node_id); + let (quorum_p2p, da_p2p) = generators.1(node_id); + let da_networks = UnderlyingCombinedNetworks( + Arc::>::into_inner(da_web).unwrap(), + Arc::, TYPES::SignatureKey>>::unwrap_or_clone(da_p2p), + ); + let quorum_networks = UnderlyingCombinedNetworks( + Arc::>::into_inner(quorum_web).unwrap(), + Arc::, TYPES::SignatureKey>>::unwrap_or_clone( + quorum_p2p, + ), + ); + let quorum_net = Self { + networks: Arc::new(quorum_networks), + message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), + primary_down: Arc::new(AtomicU64::new(0)), + }; + let da_net = Self { + networks: Arc::new(da_networks), + message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), + primary_down: Arc::new(AtomicU64::new(0)), + }; + (quorum_net.into(), da_net.into()) }) } @@ -229,9 +216,9 @@ impl TestableNetworkingImplementation for CombinedCommCh } #[async_trait] -impl CommunicationChannel for CombinedCommChannel { - type NETWORK = CombinedNetworks; - +impl ConnectedNetwork, TYPES::SignatureKey> + for CombinedNetworks +{ fn pause(&self) { self.networks.0.pause(); } @@ -265,11 +252,8 @@ impl CommunicationChannel for CombinedCommChannel async fn broadcast_message( &self, message: Message, - election: &TYPES::Membership, + recipients: BTreeSet, ) -> Result<(), NetworkError> { - let recipients = - ::Membership::get_committee(election, message.get_view_number()); - // broadcast optimistically on both networks, but if the primary network is down, skip it let primary_down = self.primary_down.load(Ordering::Relaxed); if primary_down < COMBINED_NETWORK_MIN_PRIMARY_FAILURES @@ -384,12 +368,6 @@ impl CommunicationChannel for CombinedCommChannel } } -impl TestableChannelImplementation for CombinedCommChannel { - fn generate_network() -> Box) -> Self + 'static> { - Box::new(move |network| CombinedCommChannel::new(network)) - } -} - #[cfg(test)] mod test { use super::*; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 9a8162c6ba..d7111c0cea 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -20,10 +20,9 @@ use hotshot_types::{ data::ViewNumber, message::{Message, MessageKind}, traits::{ - election::Membership, network::{ - CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, - NetworkError, NetworkMsg, TestableChannelImplementation, TransmitType, ViewMessage, + ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, + NetworkMsg, TransmitType, ViewMessage, }, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, @@ -51,7 +50,6 @@ use std::{collections::HashSet, num::NonZeroUsize, str::FromStr}; use std::{ collections::BTreeSet, fmt::Debug, - marker::PhantomData, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, Arc, @@ -166,7 +164,7 @@ where da_committee_size: usize, _is_da: bool, reliability_config: Option>, - ) -> Box Self + 'static> { + ) -> Box (Arc, Arc) + 'static> { assert!( da_committee_size <= expected_node_count, "DA committee size must be less than or equal to total # nodes" @@ -252,7 +250,7 @@ where let keys = all_keys.clone(); let da = da_keys.clone(); let reliability_config_dup = reliability_config.clone(); - async_block_on(async move { + let net = Arc::new(async_block_on(async move { match Libp2pNetwork::new( NetworkingMetricsValue::default(), config, @@ -273,7 +271,8 @@ where panic!("Failed to create libp2p network: {err:?}"); } } - }) + })); + (net.clone(), net) } }) } @@ -629,6 +628,14 @@ impl ConnectedNetwork for Libp2p self.wait_for_ready().await; } + fn pause(&self) { + unimplemented!("Pausing not implemented for the Libp2p network"); + } + + fn resume(&self) { + unimplemented!("Resuming not implemented for the Libp2p network"); + } + #[instrument(name = "Libp2pNetwork::ready_nonblocking", skip_all)] async fn is_ready(&self) -> bool { self.inner.is_ready.load(Ordering::Relaxed) @@ -886,153 +893,3 @@ impl ConnectedNetwork for Libp2p } } } - -/// libp2p identity communication channel -#[derive(Clone, Debug)] -pub struct Libp2pCommChannel( - Arc, TYPES::SignatureKey>>, - PhantomData, -); - -impl Libp2pCommChannel { - /// create a new libp2p communication channel - #[must_use] - pub fn new(network: Arc, TYPES::SignatureKey>>) -> Self { - Self(network, PhantomData) - } -} - -#[cfg(feature = "hotshot-testing")] -impl TestableNetworkingImplementation for Libp2pCommChannel -where - MessageKind: ViewMessage, -{ - /// Returns a boxed function `f(node_id, public_key) -> Libp2pNetwork` - /// with the purpose of generating libp2p networks. - /// Generates `num_bootstrap` bootstrap nodes. The remainder of nodes are normal - /// nodes with sane defaults. - /// # Panics - /// Returned function may panic either: - /// - An invalid configuration - /// (probably an issue with the defaults of this function) - /// - An inability to spin up the replica's network - fn generator( - expected_node_count: usize, - num_bootstrap: usize, - network_id: usize, - da_committee_size: usize, - is_da: bool, - reliability_config: Option>, - ) -> Box Self + 'static> { - let generator = , - TYPES::SignatureKey, - > as TestableNetworkingImplementation<_>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da, - reliability_config - ); - Box::new(move |node_id| Self(generator(node_id).into(), PhantomData)) - } - - fn in_flight_message_count(&self) -> Option { - None - } -} - -// FIXME maybe we should macro this...? It's repeated at verbatum EXCEPT for impl generics at the -// top -// we don't really want to make this the default implementation because that forces it to require ConnectedNetwork to be implemented. The struct we implement over might use multiple ConnectedNetworks -#[async_trait] -impl CommunicationChannel for Libp2pCommChannel -where - MessageKind: ViewMessage, -{ - type NETWORK = Libp2pNetwork, TYPES::SignatureKey>; - - fn pause(&self) { - unimplemented!("Pausing not implemented for the Libp2p network"); - } - - fn resume(&self) { - unimplemented!("Resuming not implemented for the Libp2p network"); - } - - async fn wait_for_ready(&self) { - self.0.wait_for_ready().await; - } - - async fn is_ready(&self) -> bool { - self.0.is_ready().await - } - - fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - self.0.shut_down().await; - }; - boxed_sync(closure) - } - - async fn broadcast_message( - &self, - message: Message, - membership: &TYPES::Membership, - ) -> Result<(), NetworkError> { - let recipients = ::Membership::get_committee( - membership, - message.kind.get_view_number(), - ); - self.0.broadcast_message(message, recipients).await - } - - async fn direct_message( - &self, - message: Message, - recipient: TYPES::SignatureKey, - ) -> Result<(), NetworkError> { - self.0.direct_message(message, recipient).await - } - - fn recv_msgs<'a, 'b>( - &'a self, - transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { self.0.recv_msgs(transmit_type).await }; - boxed_sync(closure) - } - - async fn queue_node_lookup( - &self, - view_number: ViewNumber, - pk: TYPES::SignatureKey, - ) -> Result<(), UnboundedSendError>> { - self.0.queue_node_lookup(view_number, pk).await - } - - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { - as ConnectedNetwork< - Message, - TYPES::SignatureKey, - >>::inject_consensus_info(&self.0, event) - .await; - } -} - -impl TestableChannelImplementation for Libp2pCommChannel { - fn generate_network( - ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> - { - Box::new(move |network| Libp2pCommChannel::new(network)) - } -} diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index ae9f1c53dc..10f8803f6a 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -15,13 +15,9 @@ use dashmap::DashMap; use futures::StreamExt; use hotshot_types::{ boxed_sync, - message::{Message, MessageKind}, + message::Message, traits::{ - election::Membership, - network::{ - CommunicationChannel, ConnectedNetwork, NetworkMsg, TestableChannelImplementation, - TestableNetworkingImplementation, TransmitType, ViewMessage, - }, + network::{ConnectedNetwork, NetworkMsg, TestableNetworkingImplementation, TransmitType}, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -251,18 +247,19 @@ impl TestableNetworkingImplementation _da_committee_size: usize, _is_da: bool, reliability_config: Option>, - ) -> Box Self + 'static> { + ) -> Box (Arc, Arc) + 'static> { let master: Arc<_> = MasterMap::new(); // We assign known_nodes' public key and stake value rather than read from config file since it's a test Box::new(move |node_id| { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); - MemoryNetwork::new( + let net = MemoryNetwork::new( pubkey, NetworkingMetricsValue::default(), master.clone(), reliability_config.clone(), - ) + ); + (net.clone().into(), net.into()) }) } @@ -277,6 +274,14 @@ impl ConnectedNetwork for Memory #[instrument(name = "MemoryNetwork::ready_blocking")] async fn wait_for_ready(&self) {} + fn pause(&self) { + unimplemented!("Pausing not implemented for the Memory network"); + } + + fn resume(&self) { + unimplemented!("Resuming not implemented for the Memory network"); + } + #[instrument(name = "MemoryNetwork::ready_nonblocking")] async fn is_ready(&self) -> bool { true @@ -450,123 +455,3 @@ impl ConnectedNetwork for Memory boxed_sync(closure) } } - -/// memory identity communication channel -#[derive(Clone, Debug)] -pub struct MemoryCommChannel( - Arc, TYPES::SignatureKey>>, -); - -impl MemoryCommChannel { - /// create new communication channel - #[must_use] - pub fn new(network: Arc, TYPES::SignatureKey>>) -> Self { - Self(network) - } -} - -impl TestableNetworkingImplementation for MemoryCommChannel -where - MessageKind: ViewMessage, -{ - fn generator( - expected_node_count: usize, - num_bootstrap: usize, - network_id: usize, - da_committee_size: usize, - is_da: bool, - reliability_config: Option>, - ) -> Box Self + 'static> { - let generator = , - TYPES::SignatureKey, - > as TestableNetworkingImplementation<_>>::generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - is_da, - reliability_config, - ); - Box::new(move |node_id| Self(generator(node_id).into())) - } - - fn in_flight_message_count(&self) -> Option { - Some(self.0.inner.in_flight_message_count.load(Ordering::Relaxed)) - } -} - -#[async_trait] -impl CommunicationChannel for MemoryCommChannel -where - MessageKind: ViewMessage, -{ - type NETWORK = MemoryNetwork, TYPES::SignatureKey>; - - fn pause(&self) { - unimplemented!("Pausing not implemented for the memory network"); - } - - fn resume(&self) { - unimplemented!("Resuming not implemented for the memory network"); - } - - async fn wait_for_ready(&self) { - self.0.wait_for_ready().await; - } - - async fn is_ready(&self) -> bool { - self.0.is_ready().await - } - - fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - self.0.shut_down().await; - }; - boxed_sync(closure) - } - - async fn broadcast_message( - &self, - message: Message, - election: &TYPES::Membership, - ) -> Result<(), NetworkError> { - let recipients = ::Membership::get_committee( - election, - message.kind.get_view_number(), - ); - self.0.broadcast_message(message, recipients).await - } - - async fn direct_message( - &self, - message: Message, - recipient: TYPES::SignatureKey, - ) -> Result<(), NetworkError> { - self.0.direct_message(message, recipient).await - } - - fn recv_msgs<'a, 'b>( - &'a self, - transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { self.0.recv_msgs(transmit_type).await }; - boxed_sync(closure) - } -} - -impl TestableChannelImplementation for MemoryCommChannel { - fn generate_network( - ) -> Box, TYPES::SignatureKey>>) -> Self + 'static> - { - Box::new(move |network| MemoryCommChannel::new(network)) - } -} diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index a5dcb90a30..861e55d3cd 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -18,9 +18,8 @@ use hotshot_types::{ message::{Message, MessagePurpose}, traits::{ network::{ - CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, NetworkError, NetworkMsg, - TestableChannelImplementation, TestableNetworkingImplementation, TransmitType, - WebServerNetworkError, + ConnectedNetwork, ConsensusIntentEvent, NetworkError, NetworkMsg, + TestableNetworkingImplementation, TransmitType, WebServerNetworkError, }, node_implementation::NodeType, signature_key::SignatureKey, @@ -52,18 +51,6 @@ use tracing::{debug, error, info, warn}; /// convenience alias alias for the result of getting transactions from the web server pub type TxnResult = Result>)>, ClientError>; -/// Represents the communication channel abstraction for the web server -#[derive(Clone, Debug)] -pub struct WebCommChannel(Arc>); - -impl WebCommChannel { - /// Create new communication channel - #[must_use] - pub fn new(network: Arc>) -> Self { - Self(network) - } -} - /// # Note /// /// This function uses `DefaultHasher` instead of cryptographic hash functions like SHA-256 because of an `AsRef` requirement. @@ -101,18 +88,6 @@ impl WebServerNetwork { source: WebServerNetworkError::ClientError, }) } - - /// Pauses the underlying network - pub fn pause(&self) { - error!("Pausing CDN network"); - self.inner.running.store(false, Ordering::Relaxed); - } - - /// Resumes the underlying network - pub fn resume(&self) { - error!("Resuming CDN network"); - self.inner.running.store(true, Ordering::Relaxed); - } } /// `TaskChannel` is a type alias for an unbounded sender channel that sends `ConsensusIntentEvent`s. @@ -721,104 +696,59 @@ impl WebServerNetwork { }; Ok(network_msg) } -} - -#[async_trait] -impl CommunicationChannel for WebCommChannel { - type NETWORK = WebServerNetwork; - /// Blocks until node is successfully initialized - /// into the network - async fn wait_for_ready(&self) { - as ConnectedNetwork< - Message, - TYPES::SignatureKey, - >>::wait_for_ready(&self.0) - .await; - } - - fn pause(&self) { - self.0.pause(); - } - - fn resume(&self) { - self.0.resume(); - } - /// checks if the network is ready - /// nonblocking - async fn is_ready(&self) -> bool { - as ConnectedNetwork, TYPES::SignatureKey>>::is_ready( - &self.0, - ) - .await - } - - /// Shut down this network. Afterwards this network should no longer be used. - /// - /// This should also cause other functions to immediately return with a [`NetworkError`] - fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - as ConnectedNetwork< - Message, - TYPES::SignatureKey, - >>::shut_down(&self.0) - .await; - }; - boxed_sync(closure) - } - - /// broadcast message to those listening on the communication channel - /// blocking - async fn broadcast_message( - &self, - message: Message, - _election: &TYPES::Membership, - ) -> Result<(), NetworkError> { - self.0.broadcast_message(message, BTreeSet::new()).await - } + /// Generates a single webserver network, for use in tests + fn single_generator( + expected_node_count: usize, + _num_bootstrap: usize, + _network_id: usize, + _da_committee_size: usize, + is_da: bool, + _reliability_config: &Option>, + ) -> Box Self + 'static> { + let (server_shutdown_sender, server_shutdown) = oneshot(); + let sender = Arc::new(server_shutdown_sender); - /// Sends a direct message to a specific node - /// blocking - async fn direct_message( - &self, - message: Message, - recipient: TYPES::SignatureKey, - ) -> Result<(), NetworkError> { - self.0.direct_message(message, recipient).await - } + // pick random, unused port + let port = portpicker::pick_unused_port().expect("Could not find an open port"); - /// Moves out the entire queue of received messages of 'transmit_type` - /// - /// Will unwrap the underlying `NetworkMessage` - /// blocking - fn recv_msgs<'a, 'b>( - &'a self, - transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - as ConnectedNetwork< - Message, - TYPES::SignatureKey, - >>::recv_msgs(&self.0, transmit_type) + let url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); + info!("Launching web server on port {port}"); + // Start web server + async_spawn(async { + match hotshot_web_server::run_web_server::( + Some(server_shutdown), + url, + ) .await - }; - boxed_sync(closure) - } + { + Ok(()) => error!("Web server future finished unexpectedly"), + Err(e) => error!("Web server task failed: {e}"), + } + }); - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { - as ConnectedNetwork< - Message, - TYPES::SignatureKey, - >>::inject_consensus_info(&self.0, event) - .await; + // We assign known_nodes' public key and stake value rather than read from config file since it's a test + let known_nodes = (0..expected_node_count as u64) + .map(|id| { + TYPES::SignatureKey::from_private( + &TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], id).1, + ) + }) + .collect::>(); + + // Start each node's web server client + Box::new(move |id| { + let sender = Arc::clone(&sender); + let url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); + let mut network = WebServerNetwork::create( + url, + Duration::from_millis(100), + known_nodes[usize::try_from(id).unwrap()].clone(), + is_da, + ); + network.server_shutdown_signal = Some(sender); + network + }) } } @@ -832,7 +762,15 @@ impl ConnectedNetwork, TYPES::Signatur async_sleep(Duration::from_secs(1)).await; } } + fn pause(&self) { + error!("Pausing CDN network"); + self.inner.running.store(false, Ordering::Relaxed); + } + fn resume(&self) { + error!("Resuming CDN network"); + self.inner.running.store(true, Ordering::Relaxed); + } /// checks if the network is ready /// nonblocking async fn is_ready(&self) -> bool { @@ -1339,93 +1277,35 @@ impl ConnectedNetwork, TYPES::Signatur } impl TestableNetworkingImplementation for WebServerNetwork { - fn generator( - expected_node_count: usize, - _num_bootstrap: usize, - _network_id: usize, - _da_committee_size: usize, - is_da: bool, - _reliability_config: Option>, - ) -> Box Self + 'static> { - let (server_shutdown_sender, server_shutdown) = oneshot(); - let sender = Arc::new(server_shutdown_sender); - - // pick random, unused port - let port = portpicker::pick_unused_port().expect("Could not find an open port"); - - let url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); - info!("Launching web server on port {port}"); - // Start web server - async_spawn(async { - match hotshot_web_server::run_web_server::( - Some(server_shutdown), - url, - ) - .await - { - Ok(()) => error!("Web server future finished unexpectedly"), - Err(e) => error!("Web server task failed: {e}"), - } - }); - - // We assign known_nodes' public key and stake value rather than read from config file since it's a test - let known_nodes = (0..expected_node_count as u64) - .map(|id| { - TYPES::SignatureKey::from_private( - &TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], id).1, - ) - }) - .collect::>(); - - // Start each node's web server client - Box::new(move |id| { - let sender = Arc::clone(&sender); - let url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); - let mut network = WebServerNetwork::create( - url, - Duration::from_millis(100), - known_nodes[usize::try_from(id).unwrap()].clone(), - is_da, - ); - network.server_shutdown_signal = Some(sender); - network - }) - } - - fn in_flight_message_count(&self) -> Option { - None - } -} - -impl TestableNetworkingImplementation for WebCommChannel { fn generator( expected_node_count: usize, num_bootstrap: usize, network_id: usize, da_committee_size: usize, - is_da: bool, - _reliability_config: Option>, - ) -> Box Self + 'static> { - let generator = as TestableNetworkingImplementation<_>>::generator( + _is_da: bool, + reliability_config: Option>, + ) -> Box (Arc, Arc) + 'static> { + let da_gen = Self::single_generator( + expected_node_count, + num_bootstrap, + network_id, + da_committee_size, + true, + &reliability_config, + ); + let quorum_gen = Self::single_generator( expected_node_count, num_bootstrap, network_id, da_committee_size, - is_da, - // network reliability is a testing feature - // not yet implemented for webcommchannel - None, + false, + &reliability_config, ); - Box::new(move |node_id| Self(generator(node_id).into())) + // Start each node's web server client + Box::new(move |id| (quorum_gen(id).into(), da_gen(id).into())) } fn in_flight_message_count(&self) -> Option { None } } - -impl TestableChannelImplementation for WebCommChannel { - fn generate_network() -> Box>) -> Self + 'static> { - Box::new(move |network| WebCommChannel::new(network)) - } -} diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 1cc0af9e75..af0ff75b08 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -25,7 +25,7 @@ use hotshot_types::{ block_contents::BlockHeader, consensus_api::ConsensusApi, election::Membership, - network::{CommunicationChannel, ConsensusIntentEvent}, + network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, states::ValidatedState, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 577afbaca6..8c574d7eab 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -18,7 +18,7 @@ use hotshot_types::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, - network::{CommunicationChannel, ConsensusIntentEvent}, + network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, }, diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 92dd284aa2..17299d554a 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, helpers::broadcast_event, @@ -13,7 +15,7 @@ use hotshot_types::{ }, traits::{ election::Membership, - network::{CommunicationChannel, TransmitType}, + network::{ConnectedNetwork, TransmitType, ViewMessage}, node_implementation::NodeType, }, vote::{HasViewNumber, Vote}, @@ -181,9 +183,12 @@ impl NetworkMessageTaskState { } /// network event task state -pub struct NetworkEventTaskState> { +pub struct NetworkEventTaskState< + TYPES: NodeType, + COMMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, +> { /// comm channel - pub channel: COMMCHANNEL, + pub channel: Arc, /// view number pub view: TYPES::Time, /// membership for the channel @@ -193,7 +198,7 @@ pub struct NetworkEventTaskState) -> bool, } -impl> TaskState +impl, TYPES::SignatureKey>> TaskState for NetworkEventTaskState { type Event = HotShotEvent; @@ -221,7 +226,7 @@ impl> TaskState } } -impl> +impl, TYPES::SignatureKey>> NetworkEventTaskState { /// Handle the given event. @@ -364,13 +369,18 @@ impl> sender, kind: message_kind, }; + let view = message.kind.get_view_number(); let transmit_result = match transmit_type { TransmitType::Direct => { self.channel .direct_message(message, recipient.unwrap()) .await } - TransmitType::Broadcast => self.channel.broadcast_message(message, membership).await, + TransmitType::Broadcast => { + self.channel + .broadcast_message(message, membership.get_committee(view)) + .await + } }; match transmit_result { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index d07aeb2c10..be40cb8e01 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -6,7 +6,7 @@ use async_lock::RwLock; use async_std::task::spawn_blocking; use hotshot_task::task::{Task, TaskState}; -use hotshot_types::traits::network::CommunicationChannel; +use hotshot_types::traits::network::ConnectedNetwork; use hotshot_types::{ consensus::Consensus, data::VidDisperse, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 04d47cda08..03ade80ae5 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -31,7 +31,7 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, election::Membership, - network::CommunicationChannel, + network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 017e1497a0..6235900810 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -6,7 +6,7 @@ use crate::test_runner::HotShotTaskCompleted; use crate::test_runner::LateStartNode; use crate::test_runner::Node; use hotshot_task::task::{Task, TaskState, TestTaskState}; -use hotshot_types::traits::network::CommunicationChannel; +use hotshot_types::traits::network::ConnectedNetwork; use hotshot_types::{event::Event, traits::node_implementation::NodeType}; use snafu::Snafu; use std::collections::BTreeMap; diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index b3eafdc757..567382aeba 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -284,7 +284,7 @@ impl TestMetadata { TestLauncher { resource_generator: ResourceGenerators { - channel_generator: >::gen_comm_channels( + channel_generator: >::gen_networks( total_nodes, num_bootstrap_nodes, da_committee_size, diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index df7d0a6a47..d6196b86d1 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -2,7 +2,8 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; use hotshot_types::{ - traits::{network::CommunicationChannel, node_implementation::NodeType}, + message::Message, + traits::{network::ConnectedNetwork, node_implementation::NodeType}, HotShotConfig, }; @@ -10,19 +11,13 @@ use super::{test_builder::TestMetadata, test_runner::TestRunner}; /// convience type alias for the networks available pub type Networks = ( - >::QuorumNetwork, - >::QuorumNetwork, + Arc<>::QuorumNetwork>, + Arc<>::QuorumNetwork>, ); /// Wrapper for a function that takes a `node_id` and returns an instance of `T`. pub type Generator = Box T + 'static>; -/// Wrapper Type for committee function that takes a `ConnectedNetwork` and returns a `CommunicationChannel` -pub type CommitteeNetworkGenerator = Box) -> T + 'static>; - -/// Wrapper Type for view sync function that takes a `ConnectedNetwork` and returns a `CommunicationChannel` -pub type ViewSyncNetworkGenerator = Box) -> T + 'static>; - /// generators for resources used by each node pub struct ResourceGenerators> { /// generate channels @@ -44,7 +39,9 @@ pub struct TestLauncher> { impl> TestLauncher { /// launch the test #[must_use] - pub fn launch>(self) -> TestRunner { + pub fn launch, TYPES::SignatureKey>>( + self, + ) -> TestRunner { TestRunner:: { launcher: self, nodes: Vec::new(), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 467b7b734d..cd35f8b222 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -20,9 +20,6 @@ use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemCont use hotshot_constants::EVENT_CHANNEL_SIZE; use hotshot_task::task::{Task, TaskRegistry, TestTask}; -use hotshot_types::traits::{ - network::CommunicationChannel, node_implementation::NodeImplementation, -}; use hotshot_types::{ consensus::ConsensusMetricsValue, traits::{ @@ -31,6 +28,10 @@ use hotshot_types::{ }, HotShotConfig, ValidatorConfig, }; +use hotshot_types::{ + message::Message, + traits::{network::ConnectedNetwork, node_implementation::NodeImplementation}, +}; use std::{ collections::{BTreeMap, HashMap, HashSet}, marker::PhantomData, @@ -65,7 +66,7 @@ pub struct LateStartNode> pub struct TestRunner< TYPES: NodeType, I: TestableNodeImplementation, - N: CommunicationChannel, + N: ConnectedNetwork, TYPES::SignatureKey>, > { /// test launcher, contains a bunch of useful metadata and closures pub(crate) launcher: TestLauncher, @@ -102,7 +103,7 @@ impl TaskErr for T {} impl< TYPES: NodeType, I: TestableNodeImplementation, - N: CommunicationChannel, + N: ConnectedNetwork, TYPES::SignatureKey>, > TestRunner where I: TestableNodeImplementation, diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 33b40ff0c7..4510cd3797 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -98,7 +98,7 @@ async fn test_da_task() { api: api.clone(), consensus: handle.hotshot.get_consensus(), da_membership: api.inner.memberships.da_membership.clone().into(), - da_network: api.inner.networks.da_network.clone().into(), + da_network: api.inner.networks.da_network.clone(), quorum_membership: api.inner.memberships.quorum_membership.clone().into(), cur_view: ViewNumber::new(0), vote_collector: None.into(), diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index ffe5d9d0ee..49c9a4081e 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use async_compatibility_layer::logging::setup_logging; use hotshot::traits::election::static_committee::{GeneralStaticCommittee, StaticElectionConfig}; use hotshot::traits::implementations::{ - MasterMap, MemoryCommChannel, MemoryNetwork, MemoryStorage, NetworkingMetricsValue, + MasterMap, MemoryNetwork, MemoryStorage, NetworkingMetricsValue, }; use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; @@ -61,10 +61,10 @@ impl NodeType for Test { pub struct TestImpl {} pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; -pub type DANetwork = MemoryCommChannel; -pub type QuorumNetwork = MemoryCommChannel; -pub type ViewSyncNetwork = MemoryCommChannel; -pub type VIDNetwork = MemoryCommChannel; +pub type DANetwork = MemoryNetwork, ::SignatureKey>; +pub type QuorumNetwork = MemoryNetwork, ::SignatureKey>; +pub type ViewSyncNetwork = MemoryNetwork, ::SignatureKey>; +pub type VIDNetwork = MemoryNetwork, ::SignatureKey>; impl NodeImplementation for TestImpl { type Storage = MemoryStorage; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index eadfaf4e84..6ca6366abe 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -105,7 +105,7 @@ async fn test_vid_task() { consensus: handle.hotshot.get_consensus(), cur_view: ViewNumber::new(0), vote_collector: None, - network: api.inner.networks.quorum_network.clone().into(), + network: api.inner.networks.quorum_network.clone(), membership: api.inner.memberships.vid_membership.clone().into(), public_key: *api.public_key(), private_key: api.private_key().clone(), diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index fd75e5944f..2fa93f6972 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -55,7 +55,7 @@ async fn test_view_sync_task() { let view_sync_state = ViewSyncTaskState { current_view: ViewNumber::new(0), next_view: ViewNumber::new(0), - network: api.inner.networks.quorum_network.clone().into(), + network: api.inner.networks.quorum_network.clone(), membership: api.inner.memberships.view_sync_membership.clone().into(), public_key: *api.public_key(), private_key: api.private_key().clone(), diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index a2c8357a29..d659fce2ba 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -12,11 +12,7 @@ use tokio::time::error::Elapsed as TimeoutError; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use super::{node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{ - data::ViewNumber, - message::{Message, MessagePurpose}, - BoxSyncFuture, -}; +use crate::{data::ViewNumber, message::MessagePurpose, BoxSyncFuture}; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; use rand::{ @@ -226,76 +222,6 @@ pub trait ViewMessage { fn purpose(&self) -> MessagePurpose; } -/// API for interacting directly with a consensus committee -/// intended to be implemented for both DA and for validating consensus committees -#[async_trait] -pub trait CommunicationChannel: Clone + Debug + Send + Sync + 'static { - /// Underlying Network implementation's type - type NETWORK; - /// Blocks until node is successfully initialized - /// into the network - async fn wait_for_ready(&self); - - /// Pauses the underlying network - fn pause(&self); - - /// Resumes the underlying network - fn resume(&self); - - /// checks if the network is ready - /// nonblocking - async fn is_ready(&self) -> bool; - - /// Shut down this network. Afterwards this network should no longer be used. - /// - /// This should also cause other functions to immediately return with a [`NetworkError`] - fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> - where - 'a: 'b, - Self: 'b; - - /// broadcast message to those listening on the communication channel - /// blocking - async fn broadcast_message( - &self, - message: Message, - election: &TYPES::Membership, - ) -> Result<(), NetworkError>; - - /// Sends a direct message to a specific node - /// blocking - async fn direct_message( - &self, - message: Message, - recipient: TYPES::SignatureKey, - ) -> Result<(), NetworkError>; - - /// Moves out the entire queue of received messages of 'transmit_type` - /// - /// Will unwrap the underlying `NetworkMessage` - /// blocking - fn recv_msgs<'a, 'b>( - &'a self, - transmit_type: TransmitType, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> - where - 'a: 'b, - Self: 'b; - - /// queues looking up a node - async fn queue_node_lookup( - &self, - _view_number: ViewNumber, - _pk: TYPES::SignatureKey, - ) -> Result<(), UnboundedSendError>> { - Ok(()) - } - - /// Injects consensus data such as view number into the networking implementation - /// blocking - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} -} - /// represents a networking implmentration /// exposes low level API for interacting with a network /// intended to be implemented for libp2p, the centralized server, @@ -304,6 +230,12 @@ pub trait CommunicationChannel: Clone + Debug + Send + Sync + ' pub trait ConnectedNetwork: Clone + Send + Sync + 'static { + /// Pauses the underlying network + fn pause(&self); + + /// Resumes the underlying network + fn resume(&self); + /// Blocks until the network is successfully initialized async fn wait_for_ready(&self); @@ -358,8 +290,12 @@ pub trait ConnectedNetwork: } /// Describes additional functionality needed by the test network implementation -pub trait TestableNetworkingImplementation { +pub trait TestableNetworkingImplementation +where + Self: Sized, +{ /// generates a network given an expected node count + #[allow(clippy::type_complexity)] fn generator( expected_node_count: usize, num_bootstrap: usize, @@ -367,20 +303,13 @@ pub trait TestableNetworkingImplementation { da_committee_size: usize, is_da: bool, reliability_config: Option>, - ) -> Box Self + 'static>; + ) -> Box (Arc, Arc) + 'static>; /// Get the number of messages in-flight. /// /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. fn in_flight_message_count(&self) -> Option; } -/// Describes additional functionality needed by the test communication channel -pub trait TestableChannelImplementation: CommunicationChannel { - /// generates the `CommunicationChannel` given it's associated network type - #[allow(clippy::type_complexity)] - fn generate_network( - ) -> Box>::NETWORK>) -> Self + 'static>; -} /// Changes that can occur in the network #[derive(Debug)] diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 7cd2ef070e..f3199a2bd6 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -6,16 +6,17 @@ use super::{ block_contents::{BlockHeader, TestableBlock, Transaction}, election::ElectionConfig, - network::{CommunicationChannel, NetworkReliability, TestableNetworkingImplementation}, + network::{ConnectedNetwork, NetworkReliability, TestableNetworkingImplementation}, states::TestableState, storage::{StorageError, StorageState, TestableStorage}, ValidatedState, }; use crate::{ data::{Leaf, TestableLeaf}, + message::Message, traits::{ - election::Membership, network::TestableChannelImplementation, signature_key::SignatureKey, - states::InstanceState, storage::Storage, BlockPayload, + election::Membership, signature_key::SignatureKey, states::InstanceState, storage::Storage, + BlockPayload, }, }; use async_trait::async_trait; @@ -44,9 +45,9 @@ pub trait NodeImplementation: type Storage: Storage + Clone; /// Network for all nodes - type QuorumNetwork: CommunicationChannel; + type QuorumNetwork: ConnectedNetwork, TYPES::SignatureKey>; /// Network for those in the DA committee - type CommitteeNetwork: CommunicationChannel; + type CommitteeNetwork: ConnectedNetwork, TYPES::SignatureKey>; } /// extra functions required on a node implementation to be usable by hotshot-testing @@ -94,12 +95,12 @@ pub trait TestableNodeImplementation: NodeImplementation async fn get_full_state(storage: &Self::Storage) -> StorageState; /// Generate the communication channels for testing - fn gen_comm_channels( + fn gen_networks( expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, reliability_config: Option>, - ) -> Box (Self::QuorumNetwork, Self::QuorumNetwork)>; + ) -> Box (Arc, Arc)>; } #[async_trait] @@ -108,12 +109,8 @@ where TYPES::ValidatedState: TestableState, TYPES::BlockPayload: TestableBlock, I::Storage: TestableStorage, - I::QuorumNetwork: TestableChannelImplementation, - I::CommitteeNetwork: TestableChannelImplementation, - <>::QuorumNetwork as CommunicationChannel>::NETWORK: - TestableNetworkingImplementation, - <>::CommitteeNetwork as CommunicationChannel>::NETWORK: - TestableNetworkingImplementation, + I::QuorumNetwork: TestableNetworkingImplementation, + I::CommitteeNetwork: TestableNetworkingImplementation, { type CommitteeElectionConfig = TYPES::ElectionConfigType; @@ -153,38 +150,20 @@ where async fn get_full_state(storage: &Self::Storage) -> StorageState { >::get_full_state(storage).await } - fn gen_comm_channels( + fn gen_networks( expected_node_count: usize, num_bootstrap: usize, da_committee_size: usize, reliability_config: Option>, - ) -> Box (Self::QuorumNetwork, Self::QuorumNetwork)> { - let quorum_generator = <>::NETWORK as TestableNetworkingImplementation>::generator( - expected_node_count, - num_bootstrap, - 0, - da_committee_size, - false, - reliability_config.clone(), - ); - let da_generator = <>::NETWORK as TestableNetworkingImplementation>::generator( + ) -> Box (Arc, Arc)> { + >::generator( expected_node_count, num_bootstrap, - 1, + 0, da_committee_size, false, - reliability_config, - ); - - Box::new(move |id| { - let quorum = Arc::new(quorum_generator(id)); - let da = Arc::new(da_generator(id)); - let quorum_chan = - >::generate_network()(quorum); - let committee_chan = - >::generate_network()(da); - (quorum_chan, committee_chan) - }) + reliability_config.clone(), + ) } } From 96107784f89653a200e9fa41ce444807c93fefee Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 14 Feb 2024 10:47:29 -0800 Subject: [PATCH 0791/1393] [CATCHUP] - Construct the genesis types directly rather than retrieve from storage (#2552) * Replace genesis function * Fix genesis state * try logging * remove logging * Undo justfile change * Add node in spinning task * Move start_consensus * Add clone * Create node first * Modify check --- task-impls/src/consensus.rs | 108 ++++++++++------------------- testing/src/overall_safety_task.rs | 3 +- testing/src/spinning_task.rs | 17 ++++- 3 files changed, 53 insertions(+), 75 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index af0ff75b08..6b7a9f5e8d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -141,29 +141,6 @@ pub struct ConsensusTaskState< impl, A: ConsensusApi + 'static> ConsensusTaskState { - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus genesis leaf", level = "error")] - - async fn genesis_leaf(&self) -> Option> { - let consensus = self.consensus.read().await; - - let Some(genesis_view) = consensus.validated_state_map.get(&TYPES::Time::genesis()) else { - error!("Couldn't find genesis view in state map."); - return None; - }; - let Some(leaf) = genesis_view.get_leaf_commitment() else { - error!( - ?genesis_view, - "Genesis view points to a view without a leaf" - ); - return None; - }; - let Some(leaf) = consensus.saved_leaves.get(&leaf) else { - error!("Failed to find genesis leaf."); - return None; - }; - Some(leaf.clone()) - } - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] // Check if we are able to vote, like whether the proposal is valid, // whether we have DAC and VID share, and if so, vote. @@ -177,6 +154,8 @@ impl, A: ConsensusApi + } if let Some(proposal) = &self.current_proposal { + let consensus = self.consensus.read().await; + // ED Need to account for the genesis DA cert // No need to check vid share nor da cert for genesis if proposal.justify_qc.is_genesis && proposal.view_number == TYPES::Time::new(1) { @@ -185,11 +164,9 @@ impl, A: ConsensusApi + let view = TYPES::Time::new(*proposal.view_number); let justify_qc = proposal.justify_qc.clone(); let parent = if justify_qc.is_genesis { - self.genesis_leaf().await + Some(Leaf::genesis(&consensus.instance_state)) } else { - self.consensus - .read() - .await + consensus .saved_leaves .get(&justify_qc.get_data().leaf_commit) .cloned() @@ -254,22 +231,14 @@ impl, A: ConsensusApi + // Only vote if you have the DA cert // ED Need to update the view number this is stored under? - if let Some(cert) = self - .consensus - .read() - .await - .saved_da_certs - .get(&(proposal.get_view_number())) - { + if let Some(cert) = consensus.saved_da_certs.get(&(proposal.get_view_number())) { let view = cert.view_number; // TODO: do some of this logic without the vote token check, only do that when voting. let justify_qc = proposal.justify_qc.clone(); let parent = if justify_qc.is_genesis { - self.genesis_leaf().await + Some(Leaf::genesis(&consensus.instance_state)) } else { - self.consensus - .read() - .await + consensus .saved_leaves .get(&justify_qc.get_data().leaf_commit) .cloned() @@ -501,37 +470,40 @@ impl, A: ConsensusApi + let consensus = self.consensus.upgradable_read().await; - // Get the parent leaf. + // Get the parent leaf and state. let parent = if justify_qc.is_genesis { // Send the `Decide` event for the genesis block if the justify QC is genesis. - let leaf = self.genesis_leaf().await; - match leaf { - Some(ref leaf) => { - broadcast_event( - Event { - view_number: TYPES::Time::genesis(), - event: EventType::Decide { - leaf_chain: Arc::new(vec![(leaf.clone(), None)]), - qc: Arc::new(justify_qc.clone()), - block_size: None, - }, - }, - &self.output_event_stream, - ) - .await; - } - None => { - error!( - "Failed to find the genesis leaf while the justify QC is genesis." - ); - } - } - leaf + let leaf = Leaf::genesis(&consensus.instance_state); + broadcast_event( + Event { + view_number: TYPES::Time::genesis(), + event: EventType::Decide { + leaf_chain: Arc::new(vec![(leaf.clone(), None)]), + qc: Arc::new(justify_qc.clone()), + block_size: None, + }, + }, + &self.output_event_stream, + ) + .await; + let state = Arc::new(TYPES::ValidatedState::genesis(&consensus.instance_state)); + Some((leaf, state)) } else { - consensus + match consensus .saved_leaves .get(&justify_qc.get_data().leaf_commit) .cloned() + { + Some(leaf) => { + if let Some(state) = consensus.get_state(leaf.view_number) { + Some((leaf, state.clone())) + } else { + error!("Parent state not found! Consensus internally inconsistent"); + return; + } + } + None => None, + } }; let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; @@ -542,7 +514,7 @@ impl, A: ConsensusApi + } // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some(parent) = parent else { + let Some((parent_leaf, parent_state)) = parent else { error!( "Proposal's parent missing from storage with commitment: {:?}", justify_qc.get_data().leaf_commit @@ -605,20 +577,16 @@ impl, A: ConsensusApi + return; }; - let Some(parent_state) = consensus.get_state(parent.view_number) else { - error!("Parent state not found! Consensus internally inconsistent"); - return; - }; let Ok(state) = parent_state.validate_and_apply_header( &consensus.instance_state, - &parent.block_header.clone(), + &parent_leaf.block_header.clone(), &proposal.data.block_header.clone(), ) else { error!("Block header doesn't extend the proposal",); return; }; let state = Arc::new(state); - let parent_commitment = parent.commit(); + let parent_commitment = parent_leaf.commit(); let leaf: Leaf<_> = Leaf { view_number: view, justify_qc: justify_qc.clone(), diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 8af675d08c..8cfea227d9 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -419,7 +419,6 @@ impl RoundResult { ) { let num_decided = self.success_nodes.len(); let num_failed = self.failed_nodes.len(); - let remaining_nodes = total_num_nodes - (num_decided + num_failed); if check_leaf && self.leaf_map.len() != 1 { error!("LEAF MAP (that is mismatched) IS: {:?}", self.leaf_map); @@ -462,7 +461,7 @@ impl RoundResult { } } - let is_success_possible = remaining_nodes + num_decided >= threshold; + let is_success_possible = total_num_nodes - num_failed >= threshold; if !is_success_possible { self.status = ViewStatus::Failed; } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 6235900810..c7e73fbde8 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -76,10 +76,21 @@ impl> TestTaskState for ChangeNode { idx, updown } in operations { match updown { UpDown::Up => { - if let Some(node) = state.late_start.remove(&idx.try_into().unwrap()) { + let node_id = idx.try_into().unwrap(); + if let Some(node) = state.late_start.remove(&node_id) { tracing::error!("Node {} spinning up late", idx); - let handle = node.context.run_tasks().await; - handle.hotshot.start_consensus().await; + + // Create the node and add it to the state, so we can shut them + // down properly later to avoid the overflow error in the overall + // safety task. + let node = Node { + node_id, + networks: node.networks, + handle: node.context.run_tasks().await, + }; + state.handles.push(node.clone()); + + node.handle.hotshot.start_consensus().await; } } UpDown::Down => { From e388f910d7dd8ea2bcf158f64356624f3143dc85 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 14 Feb 2024 15:18:13 -0500 Subject: [PATCH 0792/1393] Add handling for UpgradeCertificate (#2557) --- hotshot/src/tasks/mod.rs | 4 ++ task-impls/src/consensus.rs | 77 ++++++++++++++++++++++++++++++++++++- testing/src/task_helpers.rs | 2 + types/src/data.rs | 5 ++- 4 files changed, 86 insertions(+), 2 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 14ef94c48a..3f565b94fd 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -4,6 +4,7 @@ use crate::{types::SystemContextHandle, HotShotConsensusApi}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; +use hotshot_constants::VERSION_0_1; use hotshot_task::task::{Task, TaskRegistry}; use hotshot_task_impls::{ consensus::{CommitmentAndMetadata, ConsensusTaskState}, @@ -171,6 +172,9 @@ pub async fn create_consensus_state>, + /// last Upgrade Certificate this node formed + pub upgrade_cert: Option>, + + /// most recent decided upgrade certificate + pub decided_upgrade_cert: Option>, + + /// The current version of the network. + /// Updated on view change based on the most recent decided upgrade certificate. + pub current_network_version: Version, + /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -465,6 +476,28 @@ impl, A: ConsensusApi + return; } + // Validate the upgrade certificate, if one is attached. + // Continue unless the certificate is invalid. + // + // Note: we are *not* directly voting on the upgrade certificate here. + // Once a certificate has been (allegedly) formed, it has already been voted on. + // The certificate is either valid or invalid, and we are simply validating it. + // + // SS: It is possible that we may wish to vote against any quorum proposal + // if it attaches an upgrade certificate that we cannot support. + // But I don't think there's much point in this -- if the UpgradeCertificate + // threshhold (90%) has been reached, voting against the QuorumProposal on that basis + // will probably be completely symbolic anyway. + // + // We should just make sure we don't *sign* an UpgradeCertificate for an upgrade + // that we do not support. + if let Some(ref upgrade_cert) = proposal.data.upgrade_certificate { + if !upgrade_cert.is_valid_cert(self.quorum_membership.as_ref()) { + error!("Invalid upgrade_cert in proposal for view {}", *view); + return; + } + } + // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here self.update_view(view, &event_stream).await; @@ -695,6 +728,10 @@ impl, A: ConsensusApi + .last_synced_block_height .set(usize::try_from(leaf.get_height()).unwrap_or(0)); } + if let Some(ref upgrade_cert) = proposal.data.upgrade_certificate { + info!("Updating consensus state with decided upgrade certificate: {:?}", upgrade_cert); + self.decided_upgrade_cert = Some(upgrade_cert.clone()); + } // If the block payload is available for this leaf, include it in // the leaf chain that we send to the client. if let Some(encoded_txns) = @@ -968,6 +1005,17 @@ impl, A: ConsensusApi + } } } + HotShotEvent::UpgradeCertificateFormed(cert) => { + debug!( + "Upgrade certificate received for view {}!", + *cert.view_number + ); + + // Update our current upgrade_cert as long as it's still relevant. + if cert.view_number >= self.cur_view { + self.upgrade_cert = Some(cert); + } + } HotShotEvent::DACRecv(cert) => { debug!("DAC Received for view {}!", *cert.view_number); let view = cert.view_number; @@ -1055,6 +1103,16 @@ impl, A: ConsensusApi + return; } + // If we have a decided upgrade certificate, + // we may need to upgrade the protocol version on a view change. + if let Some(ref cert) = self.decided_upgrade_cert { + if new_view >= cert.data.new_version_first_block { + self.current_network_version = cert.data.new_version; + // Discard the old upgrade certificate, which is no longer relevant. + self.decided_upgrade_cert = None; + } + } + broadcast_event( Event { view_number: old_view_number, @@ -1242,12 +1300,29 @@ impl, A: ConsensusApi + error!("Failed to sign leaf.commit()!"); return false; }; + + let upgrade_cert = if self + .upgrade_cert + .as_ref() + .is_some_and(|cert| cert.view_number == view) + { + // If the cert view number matches, set upgrade_cert to self.upgrade_cert + // and set self.upgrade_cert to None. + // + // Note: the certificate is discarded, regardless of whether the vote on the proposal succeeds or not. + self.upgrade_cert.take() + } else { + // Otherwise, set upgrade_cert to None. + None + }; + // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. let proposal = QuorumProposal { block_header, view_number: leaf.view_number, justify_qc: consensus.high_qc.clone(), timeout_certificate: timeout_certificate.or_else(|| None), + upgrade_certificate: upgrade_cert, proposer_id: leaf.proposer_id, }; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 062b04c63e..a7187cd96c 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -267,6 +267,7 @@ async fn build_quorum_proposal_and_signature( view_number: ViewNumber::new(1), justify_qc: QuorumCertificate::genesis(), timeout_certificate: None, + upgrade_certificate: None, proposer_id: leaf.proposer_id, }; @@ -324,6 +325,7 @@ async fn build_quorum_proposal_and_signature( view_number: ViewNumber::new(cur_view), justify_qc: created_qc, timeout_certificate: None, + upgrade_certificate: None, proposer_id: leaf_new_view.clone().proposer_id, }; proposal = proposal_new_view; diff --git a/types/src/data.rs b/types/src/data.rs index 7605c646b0..ecc163e4fc 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -4,7 +4,7 @@ //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. use crate::{ - simple_certificate::{QuorumCertificate, TimeoutCertificate}, + simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, simple_vote::UpgradeProposalData, traits::{ block_contents::{vid_commitment, BlockHeader, TestableBlock}, @@ -215,6 +215,9 @@ pub struct QuorumProposal { /// Possible timeout certificate. Only present if the justify_qc is not for the preceding view pub timeout_certificate: Option>, + /// Possible upgrade certificate, which the leader may optionally attach. + pub upgrade_certificate: Option>, + /// the propser id pub proposer_id: TYPES::SignatureKey, } From 9c08ec299d2b1aa99ddb63df252283e45824886b Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 14 Feb 2024 17:54:45 -0500 Subject: [PATCH 0793/1393] Send the decided event interally (#2582) --- task-impls/src/consensus.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 33896e203d..67a507b5ce 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -689,6 +689,7 @@ impl, A: ConsensusApi + let mut new_decide_reached = false; let mut new_decide_qc = None; let mut leaf_views = Vec::new(); + let mut leafs_decided = Vec::new(); let mut included_txns = HashSet::new(); let old_anchor_view = consensus.last_decided_view; let parent_view = leaf.justify_qc.get_view_number(); @@ -749,7 +750,9 @@ impl, A: ConsensusApi + .vid_shares .get(&leaf.get_view_number()) .map(|vid_proposal| vid_proposal.data.clone()); + leaf_views.push((leaf.clone(), vid)); + leafs_decided.push(leaf.clone()); if let Some(ref payload) = leaf.block_payload { for txn in payload .transaction_commitments(leaf.get_block_header().metadata()) @@ -794,6 +797,7 @@ impl, A: ConsensusApi + } #[allow(clippy::cast_precision_loss)] if new_decide_reached { + broadcast_event(HotShotEvent::LeafDecided(leafs_decided), &event_stream).await; let decide_sent = broadcast_event( Event { view_number: consensus.last_decided_view, From bde7f951baabe34c1dbb10d695736be6b40f2e87 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 14 Feb 2024 19:40:44 -0500 Subject: [PATCH 0794/1393] [TESTING] Fix Test Parameters in Marcos (#2580) * Remove custom DM codec * cleanup * add a little timeout * rename and move constant * Comm channels gone from non tests * Completed the removal * wip gen both networks at once * gen both networks at once * fix lint error * fix combined test * lint * Testing improvements * wait for da network as well * allow some failures duing asycronous tests * don't check leaf in partial sync because the chains don't always match --- task-impls/src/harness.rs | 2 +- task-impls/src/upgrade.rs | 1 - task/src/task.rs | 4 ++-- testing-macros/tests/tests.rs | 5 +---- testing/src/test_builder.rs | 2 +- testing/src/test_runner.rs | 3 ++- testing/tests/unreliable_network.rs | 6 +++++- 7 files changed, 12 insertions(+), 11 deletions(-) diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 509a664751..d84c230940 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -119,7 +119,7 @@ pub fn handle_event( } if state.expected_output.is_empty() { - tracing::error!("test harness task completed"); + tracing::info!("test harness task completed"); return Some(HotShotTaskCompleted); } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index a77b6046fb..9d9d3a0958 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -237,7 +237,6 @@ impl, A: ConsensusApi + HotShotEvent::UpgradeProposalRecv(_, _) | HotShotEvent::UpgradeVoteRecv(_) | HotShotEvent::Shutdown - | HotShotEvent::Timeout(_) | HotShotEvent::ViewChange(_) ) } diff --git a/task/src/task.rs b/task/src/task.rs index e87f3465d5..1d9d12faa0 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -21,7 +21,7 @@ use tokio::{ sync::RwLock, task::{spawn, JoinHandle}, }; -use tracing::error; +use tracing::{error, warn}; use crate::{ dependency::Dependency, @@ -239,7 +239,7 @@ impl< } } Err(e) => { - error!("Failed to get event from task. Error: {:?}", e); + warn!("Failed to get event from task. Error: {:?}", e); } Ok((Err(e), _, _)) => { error!("A task channel returned an Error: {:?}", e); diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs index 106948d90f..3ce59878fa 100644 --- a/testing-macros/tests/tests.rs +++ b/testing-macros/tests/tests.rs @@ -37,7 +37,6 @@ cross_tests!( Metadata: { let mut metadata = TestMetadata::default_more_nodes(); metadata.num_bootstrap_nodes = 19; - metadata.start_nodes = 19; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -66,7 +65,6 @@ cross_tests!( Metadata: { let mut metadata = TestMetadata::default_more_nodes(); metadata.num_bootstrap_nodes = 17; - metadata.start_nodes = 17; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -111,7 +109,6 @@ cross_tests!( // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 22; metadata.num_bootstrap_nodes = 14; - metadata.start_nodes = 14; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -163,7 +160,7 @@ cross_tests!( metadata.num_bootstrap_nodes = 10; metadata.total_nodes = 12; metadata.da_committee_size = 12; - metadata.start_nodes = 10; + metadata.start_nodes = 12; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 567382aeba..0617d18d26 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -171,7 +171,7 @@ impl TestMetadata { impl Default for TestMetadata { /// by default, just a single round fn default() -> Self { - let num_nodes = 5; + let num_nodes = 6; Self { timing_data: TimingData::default(), min_transactions: 0, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index cd35f8b222..e72034d77e 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -233,6 +233,7 @@ where // wait for networks to be ready for node in &nodes { node.networks.0.wait_for_ready().await; + node.networks.1.wait_for_ready().await; } // Start hotshot @@ -253,7 +254,7 @@ where #[cfg(async_executor_impl = "async-std")] { let results = join_all(task_futs).await; - tracing::error!("test tasks joined"); + tracing::info!("test tasks joined"); for result in results { match result { HotShotTaskCompleted::ShutDown => { diff --git a/testing/tests/unreliable_network.rs b/testing/tests/unreliable_network.rs index 8af6ce2310..8dddff5329 100644 --- a/testing/tests/unreliable_network.rs +++ b/testing/tests/unreliable_network.rs @@ -193,6 +193,10 @@ async fn test_memory_network_partially_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let metadata = TestMetadata { + overall_safety_properties: OverallSafetyPropertiesDescription { + num_failed_views: 2, + ..Default::default() + }, // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { @@ -237,7 +241,7 @@ async fn libp2p_network_partially_sync() { async_compatibility_layer::logging::setup_backtrace(); let metadata = TestMetadata { overall_safety_properties: OverallSafetyPropertiesDescription { - check_leaf: true, + num_failed_views: 2, ..Default::default() }, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( From 6f02c0da4fb857a13827d926e578bf9fe089d803 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 15 Feb 2024 10:49:24 +0800 Subject: [PATCH 0795/1393] [Metrics] Add last_decided_time (#2577) * add last_decided_time * lint --- task-impls/Cargo.toml | 1 + task-impls/src/consensus.rs | 5 +++++ types/src/consensus.rs | 3 +++ 3 files changed, 9 insertions(+) diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index b236dc23da..cd01da8cc1 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -23,6 +23,7 @@ bitvec = { workspace = true } sha2 = { workspace = true } hotshot-task = { path = "../task" } async-broadcast = { workspace = true } +chrono = "0.4" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 67a507b5ce..f1d6afa6c6 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -38,6 +38,7 @@ use hotshot_types::{ use tracing::warn; use crate::vote::HandleVoteEvent; +use chrono::Utc; use snafu::Snafu; use std::{ collections::{BTreeMap, HashSet}, @@ -815,6 +816,10 @@ impl, A: ConsensusApi + .await; self.vid_shares = self.vid_shares.split_off(&new_anchor_view); consensus.last_decided_view = new_anchor_view; + consensus + .metrics + .last_decided_time + .set(Utc::now().timestamp().try_into().unwrap()); consensus.metrics.invalid_qc.set(0); consensus .metrics diff --git a/types/src/consensus.rs b/types/src/consensus.rs index d668cd8f39..02e51df512 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -72,6 +72,8 @@ pub struct ConsensusMetricsValue { pub last_synced_block_height: Box, /// The number of last decided view pub last_decided_view: Box, + /// Number of timestamp for the last decided time + pub last_decided_time: Box, /// The current view pub current_view: Box, /// Number of views that are in-flight since the last decided view @@ -210,6 +212,7 @@ impl ConsensusMetricsValue { last_synced_block_height: metrics .create_gauge(String::from("last_synced_block_height"), None), last_decided_view: metrics.create_gauge(String::from("last_decided_view"), None), + last_decided_time: metrics.create_gauge(String::from("last_decided_time"), None), current_view: metrics.create_gauge(String::from("current_view"), None), number_of_views_since_last_decide: metrics .create_gauge(String::from("number_of_views_since_last_decide"), None), From 923f9e8423296f346e7244ba8df590f1b3e5d408 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 15 Feb 2024 16:04:13 -0500 Subject: [PATCH 0796/1393] Fixes to Not Block Proposing/Fix Test Failures (#2589) * Enable testing macros for libp2p * increase # of nodes to 6 * increase timeout to 50000 * increase duration to 3400 * add just recepies for each test macro separately * Bring back original timeouts in test macros * Bring back original timeouts * Remove unnecessary just recipes * don't block the network task when trying to send a message * reset timeout * Don't skip txn sequencing if we don't have parent * lint * Ignore any decided chain that extends genesis * oops --------- Co-authored-by: Lukasz Rzasik --- task-impls/src/network.rs | 32 ++++++++++++--------------- task-impls/src/transactions.rs | 35 ++---------------------------- testing-macros/tests/tests.rs | 12 +++++----- testing/src/overall_safety_task.rs | 5 ++--- 4 files changed, 24 insertions(+), 60 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 17299d554a..33b98b5bf4 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -1,12 +1,12 @@ -use std::sync::Arc; - use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, helpers::broadcast_event, }; use async_broadcast::Sender; +use async_compatibility_layer::art::async_spawn; use either::Either::{self, Left, Right}; use hotshot_constants::VERSION_0_1; +use std::sync::Arc; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ @@ -370,23 +370,19 @@ impl, TYPES::Signa kind: message_kind, }; let view = message.kind.get_view_number(); - let transmit_result = match transmit_type { - TransmitType::Direct => { - self.channel - .direct_message(message, recipient.unwrap()) - .await - } - TransmitType::Broadcast => { - self.channel - .broadcast_message(message, membership.get_committee(view)) - .await - } - }; + let committee = membership.get_committee(view); + let net = self.channel.clone(); + async_spawn(async move { + let transmit_result = match transmit_type { + TransmitType::Direct => net.direct_message(message, recipient.unwrap()).await, + TransmitType::Broadcast => net.broadcast_message(message, committee).await, + }; - match transmit_result { - Ok(()) => {} - Err(e) => error!("Failed to send message from network task: {:?}", e), - } + match transmit_result { + Ok(()) => {} + Err(e) => error!("Failed to send message from network task: {:?}", e), + } + }); None } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index d29142a5a2..4a7936ca25 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -14,7 +14,6 @@ use commit::{Commitment, Committable}; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, - data::Leaf, event::{Event, EventType}, traits::{ block_contents::BlockHeader, @@ -196,39 +195,9 @@ impl, A: ConsensusApi + debug!("Not next leader for view {:?}", self.cur_view); return None; } - - // ED Copy of parent_leaf() function from sequencing leader - - let consensus = self.consensus.read().await; - let parent_view_number = &consensus.high_qc.view_number; - - let Some(parent_view) = consensus.validated_state_map.get(parent_view_number) - else { - error!( - "Couldn't find high QC parent in state map. Parent view {:?}", - parent_view_number - ); - return None; - }; - let Some(leaf) = parent_view.get_leaf_commitment() else { - error!( - ?parent_view_number, - ?parent_view, - "Parent of high QC points to a view without a proposal" - ); - return None; - }; - let Some(leaf) = consensus.saved_leaves.get(&leaf) else { - error!("Failed to find high QC parent."); - return None; - }; - let parent_leaf = leaf.clone(); - - drop(consensus); - // TODO (Keyao) Determine whether to allow empty blocks. // - let txns = self.wait_for_transactions(parent_leaf).await?; + let txns = self.wait_for_transactions().await?; let (payload, metadata) = match ::from_transactions(txns) { Ok((payload, metadata)) => (payload, metadata), @@ -266,7 +235,7 @@ impl, A: ConsensusApi + } #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] - async fn wait_for_transactions(&self, _: Leaf) -> Option> { + async fn wait_for_transactions(&self) -> Option> { let task_start_time = Instant::now(); // TODO (Keyao) Investigate the use of transaction hash diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs index 3ce59878fa..844d3dcafd 100644 --- a/testing-macros/tests/tests.rs +++ b/testing-macros/tests/tests.rs @@ -1,4 +1,4 @@ -use hotshot_example_types::node_types::{MemoryImpl, WebImpl}; +use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, WebImpl}; use hotshot_example_types::state_types::TestTypes; use hotshot_testing::completion_task::{ CompletionTaskDescription, TimeBasedCompletionTaskDescription, @@ -12,7 +12,7 @@ use std::time::Duration; cross_tests!( TestName: test_success, - Impls: [MemoryImpl, WebImpl], + Impls: [MemoryImpl, WebImpl, Libp2pImpl], Types: [TestTypes], Ignore: false, Metadata: { @@ -31,7 +31,7 @@ cross_tests!( // Test one node leaving the network. cross_tests!( TestName: test_with_failures_one, - Impls: [MemoryImpl, WebImpl], + Impls: [MemoryImpl, WebImpl, Libp2pImpl], Types: [TestTypes], Ignore: false, Metadata: { @@ -59,7 +59,7 @@ cross_tests!( // Test f/2 nodes leaving the network. cross_tests!( TestName: test_with_failures_half_f, - Impls: [MemoryImpl, WebImpl], + Impls: [MemoryImpl, WebImpl, Libp2pImpl], Types: [TestTypes], Ignore: false, Metadata: { @@ -99,7 +99,7 @@ cross_tests!( // Test f nodes leaving the network. cross_tests!( TestName: test_with_failures_f, - Impls: [MemoryImpl, WebImpl], + Impls: [MemoryImpl, WebImpl, Libp2pImpl], Types: [TestTypes], Ignore: false, Metadata: { @@ -152,7 +152,7 @@ cross_tests!( // Test that a good leader can succeed in the view directly after view sync cross_tests!( TestName: test_with_failures_2, - Impls: [MemoryImpl, WebImpl], + Impls: [MemoryImpl, WebImpl, Libp2pImpl], Types: [TestTypes], Ignore: false, Metadata: { diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 8cfea227d9..427a0d8de0 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -159,9 +159,7 @@ impl> TestTaskState block_size: maybe_block_size, } => { // Skip the genesis leaf. - if leaf_chain.len() == 1 - && leaf_chain[0].0.get_view_number() == TYPES::Time::genesis() - { + if leaf_chain.last().unwrap().0.get_view_number() == TYPES::Time::genesis() { return None; } let paired_up = (leaf_chain.to_vec(), (*qc).clone()); @@ -233,6 +231,7 @@ impl> TestTaskState return None; } ViewStatus::Err(e) => { + task.send_event(GlobalTestEvent::ShutDown).await; return Some(HotShotTaskCompleted::Error(Box::new(e))); } ViewStatus::InProgress => { From 56cdde05b49f3442f275b8d8869e59d1b47eedea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 23:54:38 -0500 Subject: [PATCH 0797/1393] Bump syn from 2.0.48 to 2.0.49 (#2595) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.48 to 2.0.49. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.48...2.0.49) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- testing-macros/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 2a18bba866..2307bd101e 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -25,7 +25,7 @@ tracing = { workspace = true } serde = { workspace = true } # proc macro stuff quote = "1.0.33" -syn = { version = "2.0.43", features = ["full", "extra-traits"] } +syn = { version = "2.0.49", features = ["full", "extra-traits"] } proc-macro2 = "1.0.78" derive_builder = "0.13.1" From 04078a5241e855b9c05c3a8b8fa1ec1759cb32db Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 16 Feb 2024 10:15:33 -0500 Subject: [PATCH 0798/1393] Send timeout vote to trigger next proposal (#2598) --- task-impls/src/view_sync.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 03ade80ae5..530848d5be 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -11,7 +11,7 @@ use hotshot_types::{ simple_certificate::{ ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, - simple_vote::ViewSyncFinalizeData, + simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}, traits::signature_key::SignatureKey, }; use hotshot_types::{ @@ -702,6 +702,23 @@ impl, A: ConsensusApi + } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + // HACK sending a timeout vote to the next leader so we they + // can actually propose. We don't give the leader the actual view sync cert + // so they have nothing to propose from. Proper fix is to handle the + // view sync cert in the consensus task as another cert to propose from + let Ok(vote) = TimeoutVote::create_signed_vote( + TimeoutData { + view: self.next_view - 1, + }, + self.next_view - 1, + &self.public_key, + &self.private_key, + ) else { + error!("Failed to sign TimeoutData!"); + return None; + }; + + broadcast_event(HotShotEvent::TimeoutVoteSend(vote), &event_stream).await; // Ignore certificate if it is for an older round if certificate.get_view_number() < self.next_view { warn!("We're already in a higher round"); From 9ec1d237fcbb3866c63317481b63ff6dcad33b6a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 16 Feb 2024 16:58:59 -0500 Subject: [PATCH 0799/1393] Use Internal Kademlia Storage as our cache (#2592) * Use internal storage as cache * Store the record if the get was successful --- .../src/network/behaviours/dht/cache.rs | 338 ------------------ .../src/network/behaviours/dht/mod.rs | 40 +-- libp2p-networking/src/network/node.rs | 3 +- 3 files changed, 15 insertions(+), 366 deletions(-) delete mode 100644 libp2p-networking/src/network/behaviours/dht/cache.rs diff --git a/libp2p-networking/src/network/behaviours/dht/cache.rs b/libp2p-networking/src/network/behaviours/dht/cache.rs deleted file mode 100644 index 9afde9bd7c..0000000000 --- a/libp2p-networking/src/network/behaviours/dht/cache.rs +++ /dev/null @@ -1,338 +0,0 @@ -use std::{ - collections::{BTreeMap, HashMap}, - sync::{ - atomic::{AtomicU32, Ordering}, - Arc, - }, - time::{Duration, SystemTime}, -}; - -use async_compatibility_layer::art::async_block_on; -use async_lock::RwLock; -use bincode::Options; -use dashmap::{mapref::one::Ref, DashMap}; -use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; -use hotshot_utils::bincode::bincode_opts; -use snafu::{ResultExt, Snafu}; - -/// Error wrapper type for cache -#[derive(Debug, Snafu)] -#[snafu(visibility(pub))] -pub enum CacheError { - /// Failed to read or write from disk - Disk { - /// source of error - source: std::io::Error, - }, - - /// Failure to serialize the cache - Serialization { - /// source of error - source: Box, - }, - - /// Failure to deserialize the cache - Deserialization { - /// source of error - source: Box, - }, - - /// General cache error - GeneralCache { - /// source of error - source: Box, - }, -} - -/// configuration describing the cache -#[derive(Clone, derive_builder::Builder, custom_debug::Debug, Default)] -pub struct Config { - #[builder(default = "Some(\"dht.cache\".to_string())")] - /// filename to save to - pub filename: Option, - #[builder(default = "Duration::from_secs(KAD_DEFAULT_REPUB_INTERVAL_SEC * 16)")] - /// time before entry expires - pub expiry: Duration, - /// max differences with disk before write - #[builder(default = "4")] - pub max_disk_parity_delta: u32, -} - -impl Default for Cache { - fn default() -> Self { - async_block_on(Self::new(Config::default())) - } -} - -/// key value cache - -#[derive(Clone)] -pub struct Cache { - /// the cache's config - config: Config, - /// the cache for records (key -> value) - inner: Arc, Vec>>, - /// the expiries for the dht cache, in order (expiry time -> key) - expiries: Arc>>>, - /// number of inserts since the last save - disk_parity_delta: Arc, -} - -impl Cache { - /// create a new cache - pub async fn new(config: Config) -> Self { - let cache = Self { - inner: Arc::new(DashMap::new()), - expiries: Arc::new(RwLock::new(BTreeMap::new())), - config, - disk_parity_delta: Arc::new(AtomicU32::new(0)), - }; - - // try loading from file - if let Err(err) = cache.load().await { - tracing::warn!("failed to load cache from file: {}", err); - }; - - cache - } - - /// load from file if configured to do so - pub async fn load(&self) -> Result<(), CacheError> { - if let Some(filename) = &self.config.filename { - let encoded = std::fs::read(filename).context(DiskSnafu)?; - - let cache: HashMap, Vec)> = bincode_opts() - .deserialize(&encoded) - .context(DeserializationSnafu)?; - - // inline prune and insert - let now = SystemTime::now(); - for (expiry, (key, value)) in cache { - if now < expiry { - self.inner.insert(key.clone(), value); - self.expiries.write().await.insert(expiry, key); - } - } - } - - Ok(()) - } - - /// save to file if configured to do so - pub async fn save(&self) -> Result<(), CacheError> { - if let Some(filename) = &self.config.filename { - // prune first - self.prune().await; - - // serialize - let mut cache_to_write = HashMap::new(); - let expiries = self.expiries.read().await; - for (expiry, key) in &*expiries { - if let Some(entry) = self.inner.get(key) { - cache_to_write.insert(expiry, (key, entry.value().clone())); - } else { - tracing::warn!("key not found in cache: {:?}", key); - Err(CacheError::GeneralCache { - source: Box::new(bincode::ErrorKind::Custom( - "key not found in cache".to_string(), - )), - })?; - }; - } - - let encoded = bincode_opts() - .serialize(&cache_to_write) - .context(SerializationSnafu)?; - - std::fs::write(filename, encoded).context(DiskSnafu)?; - } - - Ok(()) - } - - /// prune stale entries - async fn prune(&self) { - let now = SystemTime::now(); - let mut expiries = self.expiries.write().await; - let mut removed: u32 = 0; - - while let Some((expires, key)) = expiries.pop_first() { - if now > expires { - self.inner.remove(&key); - removed += 1; - } else { - expiries.insert(expires, key); - break; - } - } - - if removed > 0 { - self.disk_parity_delta.fetch_add(removed, Ordering::Relaxed); - } - } - - /// get value for `key` if exists - pub async fn get(&self, key: &Vec) -> Option, Vec>> { - // prune, save if necessary - self.prune().await; - self.save_if_necessary().await; - - // get - self.inner.get(key) - } - - /// insert key and value into cache and experies, then save to disk if max disk parity delta - /// exceeded - pub async fn insert(&self, key: Vec, value: Vec) { - // insert into cache and expiries - self.inner.insert(key.clone(), value); - self.expiries - .write() - .await - .insert(SystemTime::now() + self.config.expiry, key); - - // save if reached max disk parity delta - self.disk_parity_delta.fetch_add(1, Ordering::Relaxed); - self.save_if_necessary().await; - } - - /// save to disk if differences is over max disk parity delta - async fn save_if_necessary(&self) { - let cur_disk_parity_delta = self.disk_parity_delta.load(Ordering::Relaxed); - if cur_disk_parity_delta >= self.config.max_disk_parity_delta { - if let Err(err) = self.save().await { - tracing::error!("failed to save cache to file: {}", err); - }; - } - } -} - -#[cfg(test)] -mod test { - - use super::*; - use async_compatibility_layer::art::async_sleep; - use libp2p_identity::PeerId; - use tracing::instrument; - - /// cache eviction test - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[instrument] - async fn test_dht_cache_eviction() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - // cache with 1s eviction - let cache = Cache::new(Config { - filename: None, - expiry: Duration::from_secs(1), - max_disk_parity_delta: 4, - }) - .await; - - let (key, value) = (PeerId::random(), PeerId::random()); - - // insert - cache.insert(key.to_bytes(), value.to_bytes()).await; - - // check that it is in the cache and expiries - assert_eq!( - cache.get(&key.to_bytes()).await.unwrap().value(), - &value.to_bytes() - ); - assert_eq!(cache.expiries.read().await.len(), 1); - - // sleep for 1s - async_sleep(Duration::from_secs(1)).await; - - // check that now is evicted - assert!(cache.get(&key.to_bytes()).await.is_none()); - - // check that the cache and expiries are empty - assert!(cache.expiries.read().await.is_empty()); - assert!(cache.inner.is_empty()); - } - - /// cache add test - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[instrument] - async fn test_dht_cache_save_load() { - let _ = std::fs::remove_file("test.cache"); - - let cache = Cache::new(Config { - filename: Some("test.cache".to_string()), - expiry: Duration::from_secs(600), - max_disk_parity_delta: 4, - }) - .await; - - // add 10 key-value pairs to the cache - for i in 0u8..10u8 { - let (key, value) = (vec![i; 1], vec![i + 1; 1]); - cache.insert(key, value).await; - } - - // save the cache - cache.save().await.unwrap(); - - // load the cache - let cache = Cache::new(Config { - filename: Some("test.cache".to_string()), - expiry: Duration::from_secs(600), - max_disk_parity_delta: 4, - }) - .await; - - // check that the cache has the 10 key-value pairs - for i in 0u8..10u8 { - let (key, value) = (vec![i; 1], vec![i + 1; 1]); - assert_eq!(cache.get(&key).await.unwrap().value(), &value); - } - - // delete the cache file - let _ = std::fs::remove_file("test.cache"); - } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[instrument] - async fn test_dht_disk_parity() { - let _ = std::fs::remove_file("test.cache"); - - let cache = Cache::new(Config { - // tests run sequentially, shouldn't matter - filename: Some("test.cache".to_string()), - expiry: Duration::from_secs(600), - max_disk_parity_delta: 4, - }) - .await; - - // insert into cache - for i in 0..3 { - cache.insert(vec![i; 1], vec![i + 1; 1]).await; - } - - // check that file is not saved - assert!(!std::path::Path::new("test.cache").exists()); - - // insert into cache - cache.insert(vec![0; 1], vec![1; 1]).await; - - // check that file is saved - assert!(std::path::Path::new("test.cache").exists()); - - // delete the cache file - _ = std::fs::remove_file("test.cache"); - } -} diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index a9e59d1785..2a61afb9af 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -6,13 +6,10 @@ use std::{ }; /// a local caching layer for the DHT key value pairs -mod cache; - -use async_compatibility_layer::art::{async_block_on, async_spawn}; use futures::channel::oneshot::Sender; use lazy_static::lazy_static; -use libp2p::kad::Behaviour as KademliaBehaviour; use libp2p::kad::Event as KademliaEvent; +use libp2p::kad::{store::RecordStore, Behaviour as KademliaBehaviour}; use libp2p::{ kad::{ /* handler::KademliaHandlerIn, */ store::MemoryStore, BootstrapError, BootstrapOk, @@ -34,8 +31,6 @@ lazy_static! { static ref MAX_DHT_QUERY_SIZE: NonZeroUsize = NonZeroUsize::new(50).unwrap(); } -use self::cache::Cache; - use super::exponential_backoff::ExponentialBackoff; /// Behaviour wrapping libp2p's kademlia @@ -71,8 +66,6 @@ pub struct DHTBehaviour { pub peer_id: PeerId, /// replication factor pub replication_factor: NonZeroUsize, - /// kademlia cache - cache: Cache, } /// State of bootstrapping @@ -123,11 +116,11 @@ impl DHTBehaviour { /// Create a new DHT behaviour #[must_use] - pub async fn new( + pub fn new( mut kadem: KademliaBehaviour, pid: PeerId, replication_factor: NonZeroUsize, - cache_location: Option, + _: Option, ) -> Self { // needed because otherwise we stay in client mode when testing locally // and don't publish keys stuff @@ -156,13 +149,6 @@ impl DHTBehaviour { }, in_progress_get_closest_peers: HashMap::default(), replication_factor, - cache: Cache::new( - cache::ConfigBuilder::default() - .filename(cache_location) - .build() - .unwrap_or_default(), - ) - .await, } } @@ -249,9 +235,9 @@ impl DHTBehaviour { } // check cache before making the request - if let Some(entry) = async_block_on(self.cache.get(&key)) { + if let Some(entry) = self.kadem.store_mut().get(&key.clone().into()) { // exists in cache - if chan.send(entry.value().clone()).is_err() { + if chan.send(entry.value.clone()).is_err() { error!("Get DHT: channel closed before get record request result could be sent"); } } else { @@ -338,13 +324,15 @@ impl DHTBehaviour { .into_iter() .find(|(_, v)| *v >= NUM_REPLICATED_TO_TRUST) { - // insert into cache - // TODO we should find a better place to set the cache - // https://github.com/EspressoSystems/HotShot/issues/2554 - let cache = self.cache.clone(); - let val = r.clone(); - async_spawn(async move { cache.insert(key, val).await }); - + let record = Record { + key: key.into(), + value: r.clone(), + publisher: None, + expires: None, + }; + if self.kadem.store_mut().put(record).is_err() { + error!("Error putting DHT Get result into Record Store"); + } // return value if notify.send(r).is_err() { error!("Get DHT: channel closed before get record request result could be sent"); diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index de28d88f81..75335e5e2d 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -287,8 +287,7 @@ impl NetworkNode { .replication_factor .unwrap_or_else(|| NonZeroUsize::new(4).unwrap()), config.dht_cache_location.clone(), - ) - .await, + ), identify, DMBehaviour::new(request_response), ); From 30d279afd935cb2bfefc9e01846db55c080e547c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 09:32:23 -0500 Subject: [PATCH 0800/1393] Bump clap from 4.5.0 to 4.5.1 (#2606) Bumps [clap](https://github.com/clap-rs/clap) from 4.5.0 to 4.5.1. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.0...clap_complete-v4.5.1) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 9276214352..56da94836e 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -85,7 +85,7 @@ async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6.3" bincode = { workspace = true } -clap = { version = "4.4", features = ["derive", "env"], optional = true } +clap = { version = "4.5", features = ["derive", "env"], optional = true } commit = { workspace = true } hotshot-constants = { path = "../constants" } custom_debug = { workspace = true } @@ -120,7 +120,7 @@ tokio = { workspace = true } async-std = { workspace = true } [dev-dependencies] -clap = { version = "4.4", features = ["derive", "env"] } +clap = { version = "4.5", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } local-ip-address = "0.5.7" From 7c66fa940e4a38600c47da18806b585ef1d0d4ef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 23:58:20 -0500 Subject: [PATCH 0801/1393] Bump syn from 2.0.49 to 2.0.50 (#2613) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.49 to 2.0.50. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.49...2.0.50) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- testing-macros/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 2307bd101e..62cb5a176e 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -25,7 +25,7 @@ tracing = { workspace = true } serde = { workspace = true } # proc macro stuff quote = "1.0.33" -syn = { version = "2.0.49", features = ["full", "extra-traits"] } +syn = { version = "2.0.50", features = ["full", "extra-traits"] } proc-macro2 = "1.0.78" derive_builder = "0.13.1" From 6bb40ce6ded9bd93f58279721f5d81f37e22a055 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 23:58:41 -0500 Subject: [PATCH 0802/1393] Bump serde_json from 1.0.113 to 1.0.114 (#2614) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.113 to 1.0.114. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.113...v1.0.114) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- libp2p-networking/Cargo.toml | 2 +- types/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 08261c72dc..1d157f15ff 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -27,7 +27,7 @@ libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } rand = { workspace = true } serde = { workspace = true } -serde_json = "1.0.113" +serde_json = "1.0.114" snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", diff --git a/types/Cargo.toml b/types/Cargo.toml index 9b8117bf88..a9e6801b5b 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -47,7 +47,7 @@ tracing = { workspace = true } typenum = { workspace = true } [dev-dependencies] -serde_json = "1.0.113" +serde_json = "1.0.114" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } From 6c99a8cf3a2d506956bdd2da656d814d8654f7e6 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 20 Feb 2024 09:57:40 -0500 Subject: [PATCH 0803/1393] [LIBP2P] Call bootstrap repeatedly In DHT (#2610) * boostrap with timeout * Fix Startup * fixes * fix counter tests * Call bootstrap when routing changes * remove wait_to_connect * lint * add back wait_to_connect * add more context to comment --- .../src/traits/networking/libp2p_network.rs | 34 +++++----- .../src/network/behaviours/dht/mod.rs | 58 +++++------------ .../network/behaviours/exponential_backoff.rs | 4 ++ libp2p-networking/src/network/mod.rs | 1 - libp2p-networking/src/network/node/handle.rs | 64 +++++++++---------- libp2p-networking/tests/common/mod.rs | 10 ++- 6 files changed, 75 insertions(+), 96 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index d7111c0cea..633b70e87e 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -446,20 +446,7 @@ impl Libp2pNetwork { }; handle.add_known_peers(bs_addrs).await.unwrap(); - // 10 minute timeout - let timeout_duration = Duration::from_secs(600); - // perform connection - info!("WAITING TO CONNECT ON NODE {:?}", id); - handle - .wait_to_connect(4, id, timeout_duration) - .await - .unwrap(); - - let connected_num = handle.num_connected().await?; - metrics_connected_peers - .metrics - .connected_peers - .set(connected_num); + handle.begin_bootstrap().await?; while !is_bootstrapped.load(Ordering::Relaxed) { async_sleep(Duration::from_secs(1)).await; @@ -471,7 +458,6 @@ impl Libp2pNetwork { if is_da { handle.subscribe("DA".to_string()).await.unwrap(); } - // TODO figure out some way of passing in ALL keypairs. That way we can add the // global topic to the topic map // NOTE this wont' work without this change @@ -487,7 +473,6 @@ impl Libp2pNetwork { while handle.put_record(&pk, &handle.peer_id()).await.is_err() { async_sleep(Duration::from_secs(1)).await; } - info!( "Node {:?} is ready, type: {:?}", handle.peer_id(), @@ -497,13 +482,26 @@ impl Libp2pNetwork { while handle.put_record(&handle.peer_id(), &pk).await.is_err() { async_sleep(Duration::from_secs(1)).await; } - + // 10 minute timeout + let timeout_duration = Duration::from_secs(600); + // perform connection + info!("WAITING TO CONNECT ON NODE {:?}", id); + handle + .wait_to_connect(4, id, timeout_duration) + .await + .unwrap(); info!( "node {:?} is barring bootstrap, type: {:?}", handle.peer_id(), node_type ); + let connected_num = handle.num_connected().await?; + metrics_connected_peers + .metrics + .connected_peers + .set(connected_num); + is_ready.store(true, Ordering::Relaxed); info!("STARTING CONSENSUS ON {:?}", handle.peer_id()); Ok::<(), NetworkError>(()) @@ -531,7 +529,7 @@ impl Libp2pNetwork { broadcast_send: &UnboundedSender, ) -> Result<(), NetworkError> { match msg { - GossipMsg(msg, _topic) => { + GossipMsg(msg, _) => { let result: Result = bincode_opts().deserialize(&msg); if let Ok(result) = result { broadcast_send diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 2a61afb9af..54fe4dd13a 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -60,8 +60,6 @@ pub struct DHTBehaviour { pub kadem: KademliaBehaviour, /// State of bootstrapping pub bootstrap_state: Bootstrap, - /// State of last random walk - pub random_walk: RandomWalk, /// the peer id (useful only for debugging right now) pub peer_id: PeerId, /// replication factor @@ -77,14 +75,6 @@ pub struct Bootstrap { pub backoff: ExponentialBackoff, } -/// State of the periodic random walk -pub struct RandomWalk { - /// State of random walk - state: State, - /// Retry timeout - backoff: ExponentialBackoff, -} - /// State used for random walk and bootstrapping #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum State { @@ -92,8 +82,6 @@ pub enum State { NotStarted, /// In progress Started, - /// Sucessfully completed - Finished, } /// DHT event enum @@ -142,11 +130,6 @@ impl DHTBehaviour { state: State::NotStarted, backoff: ExponentialBackoff::new(2, Duration::from_secs(1)), }, - random_walk: RandomWalk { - state: State::NotStarted, - // TODO jr this may be way too frequent - backoff: ExponentialBackoff::new(2, Duration::from_secs(1)), - }, in_progress_get_closest_peers: HashMap::default(), replication_factor, } @@ -422,10 +405,7 @@ impl DHTBehaviour { if chan.send(()).is_err() { warn!("DHT: finished query but client no longer interested"); }; - } else { - self.random_walk.state = State::NotStarted; - self.random_walk.backoff.start_next(true); - } + }; info!( "peer {:?} successfully completed get closest peers for {:?} with peers {:?}", self.peer_id, key, peers @@ -434,10 +414,7 @@ impl DHTBehaviour { Err(e) => { if let Some(chan) = self.in_progress_get_closest_peers.remove(&query_id) { let _: Result<_, _> = chan.send(()); - } else { - self.random_walk.state = State::NotStarted; - self.random_walk.backoff.start_next(true); - } + }; warn!( "peer {:?} failed to get closest peers with {:?} and stats {:?}", self.peer_id, e, stats @@ -462,11 +439,13 @@ impl DHTBehaviour { .. } => { if num_remaining == 0 { - // if bootstrap is successful, restart. info!("Finished bootstrap for peer {:?}", self.peer_id); - self.bootstrap_state.state = State::Finished; + self.bootstrap_state.state = State::NotStarted; self.event_queue.push(DHTEvent::IsBootstrapped); - self.begin_bootstrap = false; + // After initial bootstrap suceeds do it every 2 minutes to maintain routing. + self.bootstrap_state.backoff = + ExponentialBackoff::new(1, Duration::from_secs(120)); + self.bootstrap_state.backoff.start_next(true); } else { warn!( "Bootstrap in progress: num remaining nodes to ping {:?}", @@ -512,7 +491,15 @@ impl DHTBehaviour { addresses: _, bucket_range: _, old_peer: _, - } => {} + } => { + // Trigger a new bootstrap when our table changes, if it's not running + // We do this to refresh our peers when we know routing has changed + // For more info see: https://github.com/libp2p/rust-libp2p/pull/4838 + // TODO: Remove once that pr is in a libp2p release + if self.bootstrap_state.state == State::NotStarted { + self.bootstrap_state.backoff.expire(); + } + } e @ KademliaEvent::OutboundQueryProgressed { .. } => { info!("Not handling dht event {:?}", e); } @@ -575,10 +562,6 @@ impl NetworkBehaviour for DHTBehaviour { type ToSwarm = DHTEvent; - // fn new_handler(&mut self) -> Self::ConnectionHandler { - // self.kadem.new_handler() - // } - fn poll( &mut self, cx: &mut std::task::Context<'_>, @@ -590,6 +573,7 @@ impl NetworkBehaviour for DHTBehaviour { match self.kadem.bootstrap() { Ok(_) => { self.bootstrap_state.state = State::Started; + info!("Starting bootstrap"); } Err(e) => { error!( @@ -605,14 +589,6 @@ impl NetworkBehaviour for DHTBehaviour { } } - if matches!(self.random_walk.state, State::NotStarted) - && self.random_walk.backoff.is_expired() - && matches!(self.bootstrap_state.state, State::Finished) - { - self.kadem.get_closest_peers(PeerId::random()); - self.random_walk.state = State::Started; - } - // retry put/gets if they are ready while let Some(req) = self.queued_get_record_queries.pop_front() { if req.backoff.is_expired() { diff --git a/libp2p-networking/src/network/behaviours/exponential_backoff.rs b/libp2p-networking/src/network/behaviours/exponential_backoff.rs index 62391a2ff0..2091f2abb2 100644 --- a/libp2p-networking/src/network/behaviours/exponential_backoff.rs +++ b/libp2p-networking/src/network/behaviours/exponential_backoff.rs @@ -61,6 +61,10 @@ impl ExponentialBackoff { true } } + /// Marked as expired regardless of time left. + pub fn expire(&mut self) { + self.started = None; + } } impl Default for ExponentialBackoff { diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 523b5eef34..ac8bcc8500 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -241,7 +241,6 @@ pub async fn spin_up_swarm( ) -> Result<(), NetworkNodeHandleError> { info!("known_nodes{:?}", known_nodes); handle.add_known_peers(known_nodes).await?; - handle.wait_to_connect(4, idx, timeout_len).await?; handle.subscribe("global".to_string()).await?; Ok(()) diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 69e17f3678..8790f20c7a 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -199,38 +199,6 @@ impl NetworkNodeHandle { }) } - /// Wait until at least `num_peers` have connected, or until `timeout` time has passed. - /// - /// # Errors - /// - /// Will return any networking error encountered, or `ConnectTimeout` if the `timeout` has elapsed. - pub async fn wait_to_connect( - &self, - num_peers: usize, - node_id: usize, - timeout: Duration, - ) -> Result<(), NetworkNodeHandleError> - where - S: Default + Debug, - { - let start = Instant::now(); - self.begin_bootstrap().await?; - let mut connected_ok = false; - while !connected_ok { - if start.elapsed() >= timeout { - return Err(NetworkNodeHandleError::ConnectTimeout); - } - async_sleep(Duration::from_secs(1)).await; - let num_connected = self.num_connected().await?; - info!( - "WAITING TO CONNECT, connected to {} / {} peers ON NODE {}", - num_connected, num_peers, node_id - ); - connected_ok = num_connected >= num_peers; - } - Ok(()) - } - /// Receives a reference of the internal `NetworkNodeReceiver`, which can be used to query for incoming messages. pub fn receiver(&self) -> &NetworkNodeReceiver { &self.receiver @@ -278,7 +246,37 @@ impl NetworkNodeHandle { self.send_request(req).await?; r.await.map_err(|_| NetworkNodeHandleError::RecvError) } - + /// Wait until at least `num_peers` have connected, or until `timeout` time has passed. + /// + /// # Errors + /// + /// Will return any networking error encountered, or `ConnectTimeout` if the `timeout` has elapsed. + pub async fn wait_to_connect( + &self, + num_peers: usize, + node_id: usize, + timeout: Duration, + ) -> Result<(), NetworkNodeHandleError> + where + S: Default + Debug, + { + let start = Instant::now(); + self.begin_bootstrap().await?; + let mut connected_ok = false; + while !connected_ok { + if start.elapsed() >= timeout { + return Err(NetworkNodeHandleError::ConnectTimeout); + } + async_sleep(Duration::from_secs(1)).await; + let num_connected = self.num_connected().await?; + info!( + "WAITING TO CONNECT, connected to {} / {} peers ON NODE {}", + num_connected, num_peers, node_id + ); + connected_ok = num_connected >= num_peers; + } + Ok(()) + } /// Look up a peer's addresses in kademlia /// NOTE: this should always be called before any `request_response` is initiated /// # Errors diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index c23c077c60..49bafb12ab 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -145,7 +145,11 @@ pub async fn spin_up_swarms( bootstrap_addrs.push((node.peer_id(), addr)); connecting_futs.push({ let node = node.clone(); - async move { node.wait_to_connect(4, i, timeout_len).await }.boxed_local() + async move { + node.begin_bootstrap().await?; + node.lookup_pid(PeerId::random()).await + } + .boxed_local() }); handles.push(node); } @@ -175,8 +179,8 @@ pub async fn spin_up_swarms( connecting_futs.push({ let node = node.clone(); async move { - node.wait_to_connect(4, num_bootstrap + j, timeout_len) - .await + node.begin_bootstrap().await?; + node.lookup_pid(PeerId::random()).await } .boxed_local() }); From a0b3a69165b71af32f9ca71b9650c8b263fa2744 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 20 Feb 2024 11:22:24 -0500 Subject: [PATCH 0804/1393] Unify HotShotConsensusApi into SystemContext (#2601) `HotShotConsensusApi` is now merged into `SystemContextHandle`. The `SystemContext` wrapper was removed, and `SystemContextInner` was renamed to `SystemContext`. `SystemContextHandle` now directly contains an `Arc` without an intermediate wrapper. A `CreateTaskState` trait is introduced for initialization of most HotShot tasks. --- hotshot/src/lib.rs | 103 +++++------- hotshot/src/tasks/mod.rs | 196 ++++------------------- hotshot/src/tasks/task_state.rs | 194 ++++++++++++++++++++++ hotshot/src/types/handle.rs | 11 +- libp2p-networking/examples/common/mod.rs | 3 +- testing/src/task_helpers.rs | 16 +- testing/src/test_runner.rs | 4 +- testing/tests/consensus_task.rs | 67 ++++---- testing/tests/da_task.rs | 47 ++---- testing/tests/network_task.rs | 17 +- testing/tests/vid_task.rs | 34 ++-- testing/tests/view_sync_task.rs | 34 ++-- 12 files changed, 363 insertions(+), 363 deletions(-) create mode 100644 hotshot/src/tasks/task_state.rs diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 240c740f06..0fe820fb32 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -25,7 +25,6 @@ use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; use commit::Committable; -use custom_debug::Debug; use futures::join; use hotshot_constants::{EVENT_CHANNEL_SIZE, VERSION_0_1}; use hotshot_task_impls::events::HotShotEvent; @@ -115,7 +114,8 @@ pub struct Memberships { } /// Holds the state needed to participate in `HotShot` consensus -pub struct SystemContextInner> { +#[derive(Clone)] +pub struct SystemContext> { /// The public key of this node public_key: TYPES::SignatureKey, @@ -154,16 +154,8 @@ pub struct SystemContextInner> { pub id: u64, } -/// Thread safe, shared view of a `HotShot` -// TODO Perhaps we can delete SystemContext since we only consume it in run_tasks() -#[derive(Clone)] -pub struct SystemContext> { - /// Handle to internal hotshot implementation - pub inner: Arc>, -} - impl> SystemContext { - /// Creates a new [`SystemContext`] with the given configuration options and sets it up with the given + /// Creates a new [`Arc`] with the given configuration options and sets it up with the given /// genesis block /// /// To do a full initialization, use `fn init` instead, which will set up background tasks as @@ -180,7 +172,7 @@ impl> SystemContext { networks: Networks, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, - ) -> Result> { + ) -> Result, HotShotError> { debug!("Creating a new hotshot"); let consensus_metrics = Arc::new(metrics); @@ -247,7 +239,7 @@ impl> SystemContext { // Our own copy of the receiver is inactive so it doesn't count. external_tx.set_await_active(false); - let inner: Arc> = Arc::new(SystemContextInner { + let inner: Arc> = Arc::new(SystemContext { id: nonce, consensus, public_key, @@ -261,7 +253,7 @@ impl> SystemContext { output_event_stream: (external_tx, external_rx.deactivate()), }); - Ok(Self { inner }) + Ok(inner) } /// "Starts" consensus by sending a `QCFormed` event @@ -270,8 +262,7 @@ impl> SystemContext { /// Panics if sending genesis fails pub async fn start_consensus(&self) { debug!("Starting Consensus"); - self.inner - .internal_event_stream + self.internal_event_stream .0 .broadcast_direct(HotShotEvent::QCFormed(either::Left( QuorumCertificate::genesis(), @@ -285,7 +276,7 @@ impl> SystemContext { // TODO: remove with https://github.com/EspressoSystems/HotShot/issues/2407 async fn send_external_event(&self, event: Event) { debug!(?event, "send_external_event"); - broadcast_event(event, &self.inner.output_event_stream.0).await; + broadcast_event(event, &self.output_event_stream.0).await; } /// Publishes a transaction asynchronously to the network @@ -306,27 +297,26 @@ impl> SystemContext { let api = self.clone(); async_spawn(async move { - let da_membership = &api.inner.memberships.da_membership.clone(); + let da_membership = &api.memberships.da_membership.clone(); join! { // TODO We should have a function that can return a network error if there is one // but first we'd need to ensure our network implementations can support that // (and not hang instead) // api - .inner .networks .da_network .broadcast_message( Message { version: VERSION_0_1, - sender: api.inner.public_key.clone(), + sender: api.public_key.clone(), kind: MessageKind::from(message), }, da_membership.get_committee(TYPES::Time::new(0)), ), api .send_external_event(Event { - view_number: api.inner.consensus.read().await.cur_view, + view_number: api.consensus.read().await.cur_view, event: EventType::Transactions { transactions: vec![transaction], }, @@ -339,14 +329,14 @@ impl> SystemContext { /// Returns a copy of the consensus struct #[must_use] pub fn get_consensus(&self) -> Arc>> { - self.inner.consensus.clone() + self.consensus.clone() } /// Returns a copy of the last decided leaf /// # Panics /// Panics if internal leaf for consensus is inconsistent pub async fn get_decided_leaf(&self) -> Leaf { - self.inner.consensus.read().await.get_decided_leaf() + self.consensus.read().await.get_decided_leaf() } /// [Non-blocking] instantly returns a copy of the last decided leaf if @@ -356,8 +346,7 @@ impl> SystemContext { /// Panics if internal state for consensus is inconsistent #[must_use] pub fn try_get_decided_leaf(&self) -> Option> { - self.inner - .consensus + self.consensus .try_read() .map(|guard| guard.get_decided_leaf()) } @@ -367,12 +356,7 @@ impl> SystemContext { /// # Panics /// Panics if internal state for consensus is inconsistent pub async fn get_decided_state(&self) -> Arc { - self.inner - .consensus - .read() - .await - .get_decided_state() - .clone() + self.consensus.read().await.get_decided_state().clone() } /// Get the validated state from a given `view`. @@ -383,7 +367,7 @@ impl> SystemContext { /// [`get_decided_state`](Self::get_decided_state)) or if there is no path for the requested /// view to ever be decided. pub async fn get_state(&self, view: TYPES::Time) -> Option> { - self.inner.consensus.read().await.get_state(view).cloned() + self.consensus.read().await.get_state(view).cloned() } /// Initializes a new [`SystemContext`] and does the work of setting up all the background tasks @@ -433,14 +417,14 @@ impl> SystemContext { ) .await?; let handle = hotshot.clone().run_tasks().await; - let (tx, rx) = hotshot.inner.internal_event_stream.clone(); + let (tx, rx) = hotshot.internal_event_stream.clone(); Ok((handle, tx, rx.activate())) } /// return the timeout for a view for `self` #[must_use] pub fn get_next_view_timeout(&self) -> u64 { - self.inner.config.next_view_timeout + self.config.next_view_timeout } } @@ -448,26 +432,26 @@ impl> SystemContext { /// Get access to [`Consensus`] #[must_use] pub fn consensus(&self) -> &Arc>> { - &self.inner.consensus + &self.consensus } /// Spawn all tasks that operate on [`SystemContextHandle`]. /// /// For a list of which tasks are being spawned, see this module's documentation. #[allow(clippy::too_many_lines)] - pub async fn run_tasks(self) -> SystemContextHandle { + pub async fn run_tasks(&self) -> SystemContextHandle { // ED Need to set first first number to 1, or properly trigger the change upon start let registry = Arc::new(TaskRegistry::default()); - let output_event_stream = self.inner.output_event_stream.clone(); - let internal_event_stream = self.inner.internal_event_stream.clone(); + let output_event_stream = self.output_event_stream.clone(); + let internal_event_stream = self.internal_event_stream.clone(); - let quorum_network = self.inner.networks.quorum_network.clone(); - let da_network = self.inner.networks.da_network.clone(); - let quorum_membership = self.inner.memberships.quorum_membership.clone(); - let da_membership = self.inner.memberships.da_membership.clone(); - let vid_membership = self.inner.memberships.vid_membership.clone(); - let view_sync_membership = self.inner.memberships.view_sync_membership.clone(); + let quorum_network = self.networks.quorum_network.clone(); + let da_network = self.networks.da_network.clone(); + let quorum_membership = self.memberships.quorum_membership.clone(); + let da_membership = self.memberships.da_membership.clone(); + let vid_membership = self.memberships.vid_membership.clone(); + let view_sync_membership = self.memberships.view_sync_membership.clone(); let (event_tx, event_rx) = internal_event_stream.clone(); @@ -475,8 +459,8 @@ impl> SystemContext { registry: registry.clone(), output_event_stream: output_event_stream.clone(), internal_event_stream: internal_event_stream.clone(), - hotshot: self.clone(), - storage: self.inner.storage.clone(), + hotshot: self.clone().into(), + storage: self.storage.clone(), }; add_network_message_task(registry.clone(), event_tx.clone(), quorum_network.clone()).await; @@ -564,48 +548,41 @@ impl> SystemContext { } } -/// A handle that exposes the interface that hotstuff needs to interact with a [`SystemContextInner`] -#[derive(Clone, Debug)] -pub struct HotShotConsensusApi> { - /// Reference to the [`SystemContextInner`] - pub inner: Arc>, -} - #[async_trait] impl> ConsensusApi - for HotShotConsensusApi + for SystemContextHandle { fn total_nodes(&self) -> NonZeroUsize { - self.inner.config.total_nodes + self.hotshot.config.total_nodes } fn propose_min_round_time(&self) -> Duration { - self.inner.config.propose_min_round_time + self.hotshot.config.propose_min_round_time } fn propose_max_round_time(&self) -> Duration { - self.inner.config.propose_max_round_time + self.hotshot.config.propose_max_round_time } fn max_transactions(&self) -> NonZeroUsize { - self.inner.config.max_transactions + self.hotshot.config.max_transactions } fn min_transactions(&self) -> usize { - self.inner.config.min_transactions + self.hotshot.config.min_transactions } async fn send_event(&self, event: Event) { debug!(?event, "send_event"); - broadcast_event(event, &self.inner.output_event_stream.0).await; + broadcast_event(event, &self.hotshot.output_event_stream.0).await; } fn public_key(&self) -> &TYPES::SignatureKey { - &self.inner.public_key + &self.hotshot.public_key } fn private_key(&self) -> &::PrivateKey { - &self.inner.private_key + &self.hotshot.private_key } async fn store_leaf( @@ -614,7 +591,7 @@ impl> ConsensusApi leaf: Leaf, ) -> std::result::Result<(), hotshot_types::traits::storage::StorageError> { let view_to_insert = StoredView::from(leaf); - let storage = &self.inner.storage; + let storage = &self.hotshot.storage; storage.append_single_view(view_to_insert).await?; storage.cleanup_storage_up_to_view(old_anchor_view).await?; storage.commit().await?; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 3f565b94fd..d250f549d8 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -1,13 +1,18 @@ //! Provides a number of tasks that run continuously -use crate::{types::SystemContextHandle, HotShotConsensusApi}; +/// Provides trait to create task states from a `SystemContextHandle` +pub mod task_state; + +use crate::tasks::task_state::CreateTaskState; +use crate::ConsensusApi; + +use crate::types::SystemContextHandle; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use hotshot_constants::VERSION_0_1; use hotshot_task::task::{Task, TaskRegistry}; use hotshot_task_impls::{ - consensus::{CommitmentAndMetadata, ConsensusTaskState}, + consensus::ConsensusTaskState, da::DATaskState, events::HotShotEvent, network::{NetworkEventTaskState, NetworkMessageTaskState}, @@ -17,26 +22,17 @@ use hotshot_task_impls::{ view_sync::ViewSyncTaskState, }; use hotshot_types::{ - event::Event, + message::Message, + traits::{election::Membership, network::ConnectedNetwork}, +}; +use hotshot_types::{ message::Messages, traits::{ - block_contents::vid_commitment, - consensus_api::ConsensusApi, network::{ConsensusIntentEvent, TransmitType}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, - BlockPayload, }, }; -use hotshot_types::{ - message::Message, - traits::{election::Membership, network::ConnectedNetwork}, -}; -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - marker::PhantomData, - sync::Arc, - time::Duration, -}; +use std::{sync::Arc, time::Duration}; use tracing::error; /// event for global event stream @@ -110,7 +106,6 @@ pub async fn add_network_message_task< task_reg.register(direct_handle).await; task_reg.register(broadcast_handle).await; } - /// Add the network task to handle events and send messages. pub async fn add_network_event_task< TYPES: NodeType, @@ -133,60 +128,14 @@ pub async fn add_network_event_task< task_reg.run_task(task).await; } -/// Create the consensus task state -/// # Panics -/// If genesis payload can't be encoded. This should not be possible -pub async fn create_consensus_state>( - output_stream: Sender>, - handle: &SystemContextHandle, -) -> ConsensusTaskState> { - let consensus = handle.hotshot.get_consensus(); - let c_api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - - let (payload, metadata) = ::genesis(); - // Impossible for `unwrap` to fail on the genesis payload. - let payload_commitment = vid_commitment( - &payload.encode().unwrap().collect(), - handle - .hotshot - .inner - .memberships - .quorum_membership - .total_nodes(), - ); - // build the consensus task - let consensus_state = ConsensusTaskState { - consensus, - timeout: handle.hotshot.inner.config.next_view_timeout, - cur_view: TYPES::Time::new(0), - payload_commitment_and_metadata: Some(CommitmentAndMetadata { - commitment: payload_commitment, - metadata, - is_genesis: true, - }), - api: c_api.clone(), - _pd: PhantomData, - vote_collector: None.into(), - timeout_vote_collector: None.into(), - timeout_task: None, - timeout_cert: None, - upgrade_cert: None, - decided_upgrade_cert: None, - current_network_version: VERSION_0_1, - output_event_stream: output_stream, - vid_shares: BTreeMap::new(), - current_proposal: None, - id: handle.hotshot.inner.id, - public_key: c_api.public_key().clone(), - private_key: c_api.private_key().clone(), - quorum_network: c_api.inner.networks.quorum_network.clone(), - committee_network: c_api.inner.networks.da_network.clone(), - timeout_membership: c_api.inner.memberships.quorum_membership.clone().into(), - quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), - committee_membership: c_api.inner.memberships.da_membership.clone().into(), - }; +/// Setup polls for the given `consensus_state` +pub async fn inject_consensus_polls< + TYPES: NodeType, + I: NodeImplementation, + API: ConsensusApi, +>( + consensus_state: &ConsensusTaskState, +) { // Poll (forever) for the latest quorum proposal consensus_state .quorum_network @@ -214,7 +163,6 @@ pub async fn create_consensus_state>( rx: Receiver>, handle: &SystemContextHandle, ) { - let state = - create_consensus_state(handle.hotshot.inner.output_event_stream.0.clone(), handle).await; - let task = Task::new(tx, rx, task_reg.clone(), state); + let consensus_state = ConsensusTaskState::create_from(handle); + + inject_consensus_polls(&consensus_state).await; + + let task = Task::new(tx, rx, task_reg.clone(), consensus_state); task_reg.run_task(task).await; } @@ -237,55 +187,23 @@ pub async fn add_vid_task>( rx: Receiver>, handle: &SystemContextHandle, ) { - // build the vid task - let c_api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - let vid_state = VIDTaskState { - api: c_api.clone(), - consensus: handle.hotshot.get_consensus(), - cur_view: TYPES::Time::new(0), - vote_collector: None, - network: c_api.inner.networks.quorum_network.clone(), - membership: c_api.inner.memberships.vid_membership.clone().into(), - public_key: c_api.public_key().clone(), - private_key: c_api.private_key().clone(), - id: handle.hotshot.inner.id, - }; - + let vid_state = VIDTaskState::create_from(handle); let task = Task::new(tx, rx, task_reg.clone(), vid_state); task_reg.run_task(task).await; } /// add the Upgrade task. -/// -/// # Panics -/// -/// Uses .`unwrap()`, though this should never panic. pub async fn add_upgrade_task>( task_reg: Arc, tx: Sender>, rx: Receiver>, handle: &SystemContextHandle, ) { - let c_api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - let upgrade_state = UpgradeTaskState { - api: c_api.clone(), - cur_view: TYPES::Time::new(0), - quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), - quorum_network: c_api.inner.networks.quorum_network.clone(), - should_vote: |_upgrade_proposal| false, - vote_collector: None.into(), - public_key: c_api.public_key().clone(), - private_key: c_api.private_key().clone(), - id: handle.hotshot.inner.id, - }; + let upgrade_state = UpgradeTaskState::create_from(handle); + let task = Task::new(tx, rx, task_reg.clone(), upgrade_state); task_reg.run_task(task).await; } - /// add the Data Availability task pub async fn add_da_task>( task_reg: Arc, @@ -294,21 +212,7 @@ pub async fn add_da_task>( handle: &SystemContextHandle, ) { // build the da task - let c_api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - let da_state = DATaskState { - api: c_api.clone(), - consensus: handle.hotshot.get_consensus(), - da_membership: c_api.inner.memberships.da_membership.clone().into(), - da_network: c_api.inner.networks.da_network.clone(), - quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), - cur_view: TYPES::Time::new(0), - vote_collector: None.into(), - public_key: c_api.public_key().clone(), - private_key: c_api.private_key().clone(), - id: handle.hotshot.inner.id, - }; + let da_state = DATaskState::create_from(handle); let task = Task::new(tx, rx, task_reg.clone(), da_state); task_reg.run_task(task).await; @@ -321,26 +225,12 @@ pub async fn add_transaction_task> rx: Receiver>, handle: &SystemContextHandle, ) { - // build the transactions task - let c_api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - let transactions_state = TransactionTaskState { - api: c_api.clone(), - consensus: handle.hotshot.get_consensus(), - transactions: Arc::default(), - seen_transactions: HashSet::new(), - cur_view: TYPES::Time::new(0), - network: c_api.inner.networks.quorum_network.clone(), - membership: c_api.inner.memberships.quorum_membership.clone().into(), - public_key: c_api.public_key().clone(), - private_key: c_api.private_key().clone(), - id: handle.hotshot.inner.id, - }; + let transactions_state = TransactionTaskState::create_from(handle); let task = Task::new(tx, rx, task_reg.clone(), transactions_state); task_reg.run_task(task).await; } + /// add the view sync task pub async fn add_view_sync_task>( task_reg: Arc, @@ -348,27 +238,7 @@ pub async fn add_view_sync_task>( rx: Receiver>, handle: &SystemContextHandle, ) { - let api = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - // build the view sync task - let view_sync_state = ViewSyncTaskState { - current_view: TYPES::Time::new(0), - next_view: TYPES::Time::new(0), - network: api.inner.networks.quorum_network.clone(), - membership: api.inner.memberships.view_sync_membership.clone().into(), - public_key: api.public_key().clone(), - private_key: api.private_key().clone(), - api, - num_timeouts_tracked: 0, - replica_task_map: HashMap::default().into(), - pre_commit_relay_map: HashMap::default().into(), - commit_relay_map: HashMap::default().into(), - finalize_relay_map: HashMap::default().into(), - view_sync_timeout: Duration::new(10, 0), - id: handle.hotshot.inner.id, - last_garbage_collected_view: TYPES::Time::new(0), - }; + let view_sync_state = ViewSyncTaskState::create_from(handle); let task = Task::new(tx, rx, task_reg.clone(), view_sync_state); task_reg.run_task(task).await; diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs new file mode 100644 index 0000000000..19f6e026dd --- /dev/null +++ b/hotshot/src/tasks/task_state.rs @@ -0,0 +1,194 @@ +use crate::types::SystemContextHandle; + +use hotshot_constants::VERSION_0_1; +use hotshot_task_impls::{ + consensus::{CommitmentAndMetadata, ConsensusTaskState}, + da::DATaskState, + transactions::TransactionTaskState, + upgrade::UpgradeTaskState, + vid::VIDTaskState, + view_sync::ViewSyncTaskState, +}; +use hotshot_types::traits::election::Membership; +use hotshot_types::traits::{ + block_contents::vid_commitment, + consensus_api::ConsensusApi, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + BlockPayload, +}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + marker::PhantomData, + sync::Arc, + time::Duration, +}; + +/// Trait for creating task states. +pub trait CreateTaskState +where + TYPES: NodeType, + I: NodeImplementation, +{ + /// Function to create the task state from a given `SystemContextHandle`. + fn create_from(handle: &SystemContextHandle) -> Self; +} + +impl> CreateTaskState + for UpgradeTaskState> +{ + fn create_from( + handle: &SystemContextHandle, + ) -> UpgradeTaskState> { + UpgradeTaskState { + api: handle.clone(), + cur_view: TYPES::Time::new(0), + quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + quorum_network: handle.hotshot.networks.quorum_network.clone(), + should_vote: |_upgrade_proposal| false, + vote_collector: None.into(), + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + id: handle.hotshot.id, + } + } +} + +impl> CreateTaskState + for VIDTaskState> +{ + fn create_from( + handle: &SystemContextHandle, + ) -> VIDTaskState> { + VIDTaskState { + api: handle.clone(), + consensus: handle.hotshot.get_consensus(), + cur_view: TYPES::Time::new(0), + vote_collector: None, + network: handle.hotshot.networks.quorum_network.clone(), + membership: handle.hotshot.memberships.vid_membership.clone().into(), + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + id: handle.hotshot.id, + } + } +} + +impl> CreateTaskState + for DATaskState> +{ + fn create_from( + handle: &SystemContextHandle, + ) -> DATaskState> { + DATaskState { + api: handle.clone(), + consensus: handle.hotshot.get_consensus(), + da_membership: handle.hotshot.memberships.da_membership.clone().into(), + da_network: handle.hotshot.networks.da_network.clone(), + quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + cur_view: TYPES::Time::new(0), + vote_collector: None.into(), + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + id: handle.hotshot.id, + } + } +} + +impl> CreateTaskState + for ViewSyncTaskState> +{ + fn create_from( + handle: &SystemContextHandle, + ) -> ViewSyncTaskState> { + ViewSyncTaskState { + current_view: TYPES::Time::new(0), + next_view: TYPES::Time::new(0), + network: handle.hotshot.networks.quorum_network.clone(), + membership: handle + .hotshot + .memberships + .view_sync_membership + .clone() + .into(), + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + api: handle.clone(), + num_timeouts_tracked: 0, + replica_task_map: HashMap::default().into(), + pre_commit_relay_map: HashMap::default().into(), + commit_relay_map: HashMap::default().into(), + finalize_relay_map: HashMap::default().into(), + view_sync_timeout: Duration::new(10, 0), + id: handle.hotshot.id, + last_garbage_collected_view: TYPES::Time::new(0), + } + } +} + +impl> CreateTaskState + for TransactionTaskState> +{ + fn create_from( + handle: &SystemContextHandle, + ) -> TransactionTaskState> { + TransactionTaskState { + api: handle.clone(), + consensus: handle.hotshot.get_consensus(), + transactions: Arc::default(), + seen_transactions: HashSet::new(), + cur_view: TYPES::Time::new(0), + network: handle.hotshot.networks.quorum_network.clone(), + membership: handle.hotshot.memberships.quorum_membership.clone().into(), + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + id: handle.hotshot.id, + } + } +} + +impl> CreateTaskState + for ConsensusTaskState> +{ + fn create_from( + handle: &SystemContextHandle, + ) -> ConsensusTaskState> { + let consensus = handle.hotshot.get_consensus(); + + let (payload, metadata) = ::genesis(); + // Impossible for `unwrap` to fail on the genesis payload. + let payload_commitment = vid_commitment( + &payload.encode().unwrap().collect(), + handle.hotshot.memberships.quorum_membership.total_nodes(), + ); + ConsensusTaskState { + consensus, + timeout: handle.hotshot.config.next_view_timeout, + cur_view: TYPES::Time::new(0), + payload_commitment_and_metadata: Some(CommitmentAndMetadata { + commitment: payload_commitment, + metadata, + is_genesis: true, + }), + api: handle.clone(), + _pd: PhantomData, + vote_collector: None.into(), + timeout_vote_collector: None.into(), + timeout_task: None, + timeout_cert: None, + upgrade_cert: None, + decided_upgrade_cert: None, + current_network_version: VERSION_0_1, + output_event_stream: handle.hotshot.output_event_stream.0.clone(), + vid_shares: BTreeMap::new(), + current_proposal: None, + id: handle.hotshot.id, + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + quorum_network: handle.hotshot.networks.quorum_network.clone(), + committee_network: handle.hotshot.networks.da_network.clone(), + timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + committee_membership: handle.hotshot.memberships.da_membership.clone().into(), + } + } +} diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index fc322a0159..3cd40e4fa8 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -37,7 +37,7 @@ pub struct SystemContextHandle> { pub(crate) registry: Arc, /// Internal reference to the underlying [`SystemContext`] - pub hotshot: SystemContext, + pub hotshot: Arc>, /// Our copy of the `Storage` view for a hotshot pub(crate) storage: I::Storage, @@ -131,7 +131,7 @@ impl + 'static> SystemContextHandl /// Block the underlying quorum (and committee) networking interfaces until node is /// successfully initialized into the networks. pub async fn wait_for_networks_ready(&self) { - self.hotshot.inner.networks.wait_for_networks_ready().await; + self.hotshot.networks.wait_for_networks_ready().await; } /// Shut down the the inner hotshot and wait until all background threads are closed. @@ -143,7 +143,7 @@ impl + 'static> SystemContextHandl Self: 'b, { boxed_sync(async move { - self.hotshot.inner.networks.shut_down_networks().await; + self.hotshot.networks.shut_down_networks().await; self.registry.shutdown().await; }) } @@ -160,7 +160,6 @@ impl + 'static> SystemContextHandl #[cfg(feature = "hotshot-testing")] pub async fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { self.hotshot - .inner .memberships .quorum_membership .get_leader(view_number) @@ -169,12 +168,12 @@ impl + 'static> SystemContextHandl /// Wrapper to get this node's public key #[cfg(feature = "hotshot-testing")] pub fn get_public_key(&self) -> TYPES::SignatureKey { - self.hotshot.inner.public_key.clone() + self.hotshot.public_key.clone() } /// Wrapper to get this node's current view #[cfg(feature = "hotshot-testing")] pub async fn get_current_view(&self) -> TYPES::Time { - self.hotshot.inner.consensus.read().await.cur_view + self.hotshot.consensus.read().await.cur_view } } diff --git a/libp2p-networking/examples/common/mod.rs b/libp2p-networking/examples/common/mod.rs index 648b4a55ec..ff00f4044f 100644 --- a/libp2p-networking/examples/common/mod.rs +++ b/libp2p-networking/examples/common/mod.rs @@ -20,8 +20,7 @@ use clap::{Args, Parser}; use libp2p::{multiaddr, request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; use libp2p_networking::network::{ - deserialize_msg, - network_node_handle_error::NodeConfigSnafu, spin_up_swarm, NetworkEvent, + deserialize_msg, network_node_handle_error::NodeConfigSnafu, spin_up_swarm, NetworkEvent, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeType, }; use rand::{ diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index a7187cd96c..4e65d21efd 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -12,7 +12,7 @@ use commit::Committable; use ethereum_types::U256; use hotshot::{ types::{BLSPubKey, SignatureKey, SystemContextHandle}, - HotShotConsensusApi, HotShotInitializer, Memberships, Networks, SystemContext, + HotShotInitializer, Memberships, Networks, SystemContext, }; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ @@ -212,9 +212,6 @@ async fn build_quorum_proposal_and_signature( let genesis_consensus = handle.get_consensus(); let cur_consensus = genesis_consensus.upgradable_read().await; let mut consensus = RwLockUpgradableReadGuard::upgrade(cur_consensus).await; - let api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; // parent_view_number should be equal to 0 let parent_view_number = &consensus.high_qc.get_view_number(); assert_eq!(parent_view_number.get_u64(), 0); @@ -233,12 +230,7 @@ async fn build_quorum_proposal_and_signature( let block = ::genesis(); let payload_commitment = vid_commitment( &block.encode().unwrap().collect(), - handle - .hotshot - .inner - .memberships - .quorum_membership - .total_nodes(), + handle.hotshot.memberships.quorum_membership.total_nodes(), ); let mut parent_state = Arc::new(::from_header( &parent_leaf.block_header, @@ -257,7 +249,7 @@ async fn build_quorum_proposal_and_signature( parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), block_payload: None, - proposer_id: *api.public_key(), + proposer_id: *handle.public_key(), }; let mut signature = ::sign(private_key, leaf.commit().as_ref()) @@ -291,7 +283,7 @@ async fn build_quorum_proposal_and_signature( ); consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); // create a qc by aggregate signatures on the previous view (the data signed is last leaf commitment) - let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let quorum_data = QuorumData { leaf_commit: leaf.commit(), }; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index e72034d77e..662997d144 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -58,7 +58,7 @@ pub struct LateStartNode> /// The underlying networks belonging to the node pub networks: Networks, /// The context to which we will use to launch HotShot when it's time - pub context: SystemContext, + pub context: Arc>, } /// The runner of a test network @@ -355,7 +355,7 @@ where initializer: HotShotInitializer, config: HotShotConfig, validator_config: ValidatorConfig, - ) -> SystemContext { + ) -> Arc> { let node_id = self.next_node_id; self.next_node_id += 1; let known_nodes_with_stake = config.known_nodes_with_stake.clone(); diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 60df646828..464e5ba073 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,6 +1,6 @@ #![allow(clippy::panic)] use commit::Committable; -use hotshot::{types::SystemContextHandle, HotShotConsensusApi}; +use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::task_helpers::{build_quorum_proposal, key_pair_for_id}; @@ -24,10 +24,7 @@ async fn build_vote( ) -> GeneralConsensusMessage { let consensus_lock = handle.get_consensus(); let consensus = consensus_lock.read().await; - let api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - let membership = api.inner.memberships.quorum_membership.clone(); + let membership = handle.hotshot.memberships.quorum_membership.clone(); let justify_qc = proposal.justify_qc.clone(); let view = ViewNumber::new(*proposal.view_number); @@ -65,8 +62,8 @@ async fn build_vote( leaf_commit: leaf.commit(), }, view, - api.public_key(), - api.private_key(), + handle.public_key(), + handle.private_key(), ) .expect("Failed to create quorum vote"); GeneralConsensusMessage::::Vote(vote) @@ -79,8 +76,8 @@ async fn build_vote( )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { - use hotshot::tasks::create_consensus_state; - use hotshot_task_impls::harness::run_harness; + use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; + use hotshot_task_impls::{consensus::ConsensusTaskState, harness::run_harness}; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::simple_certificate::QuorumCertificate; @@ -118,8 +115,13 @@ async fn test_consensus_task() { input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); } - let consensus_state = - create_consensus_state(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle); + + inject_consensus_polls(&consensus_state).await; run_harness(input, output, consensus_state, false).await; } @@ -131,8 +133,8 @@ async fn test_consensus_task() { )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote() { - use hotshot::tasks::create_consensus_state; - use hotshot_task_impls::harness::run_harness; + use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; + use hotshot_task_impls::{consensus::ConsensusTaskState, harness::run_harness}; use hotshot_testing::task_helpers::build_system_handle; async_compatibility_layer::logging::setup_logging(); @@ -163,8 +165,13 @@ async fn test_consensus_vote() { input.push(HotShotEvent::Shutdown); - let consensus_state = - create_consensus_state(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle); + + inject_consensus_polls(&consensus_state).await; run_harness(input, output, consensus_state, false).await; } @@ -179,11 +186,12 @@ async fn test_consensus_vote() { // issue: https://github.com/EspressoSystems/HotShot/issues/2236 #[ignore] async fn test_consensus_with_vid() { + use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use hotshot::traits::BlockPayload; use hotshot::types::SignatureKey; use hotshot_example_types::block_types::TestBlockPayload; use hotshot_example_types::block_types::TestTransaction; - use hotshot_task_impls::harness::run_harness; + use hotshot_task_impls::{consensus::ConsensusTaskState, harness::run_harness}; use hotshot_testing::task_helpers::build_cert; use hotshot_testing::task_helpers::build_system_handle; use hotshot_testing::task_helpers::vid_init; @@ -206,20 +214,19 @@ async fn test_consensus_with_vid() { let (private_key_view2, public_key_view2) = key_pair_for_id(2); // For the test of vote logic with vid - let api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - let pub_key = *api.public_key(); - let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); + let pub_key = *handle.public_key(); + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let vid = vid_init::(&quorum_membership, ViewNumber::new(2)); let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; - let vid_signature = - ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()) - .expect("Failed to sign payload commitment"); + let vid_signature = ::SignatureKey::sign( + handle.private_key(), + payload_commitment.as_ref(), + ) + .expect("Failed to sign payload commitment"); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let vid_disperse_inner = VidDisperse::from_membership( ViewNumber::new(2), @@ -278,11 +285,13 @@ async fn test_consensus_with_vid() { input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); - let consensus_state = hotshot::tasks::create_consensus_state( - handle.hotshot.inner.output_event_stream.0.clone(), - &handle, - ) - .await; + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle); + + inject_consensus_polls(&consensus_state).await; run_harness(input, output, consensus_state, false).await; } diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 4510cd3797..757ac926a5 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,8 +1,8 @@ -use hotshot::{types::SignatureKey, HotShotConsensusApi}; -use hotshot_example_types::{ - block_types::TestTransaction, - node_types::{MemoryImpl, TestTypes}, -}; +use hotshot::tasks::task_state::CreateTaskState; +use hotshot::types::SignatureKey; +use hotshot::types::SystemContextHandle; +use hotshot_example_types::node_types::MemoryImpl; +use hotshot_example_types::{block_types::TestTransaction, node_types::TestTypes}; use hotshot_task_impls::{da::DATaskState, events::HotShotEvent}; use hotshot_types::{ data::{DAProposal, ViewNumber}, @@ -32,26 +32,20 @@ async fn test_da_task() { // Build the API for node 2. let handle = build_system_handle(2).await.0; - let api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - let pub_key = *api.public_key(); + let pub_key = *handle.public_key(); let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let payload_commitment = vid_commitment( &encoded_transactions, - handle - .hotshot - .inner - .memberships - .quorum_membership - .total_nodes(), + handle.hotshot.memberships.quorum_membership.total_nodes(), ); let encoded_transactions_hash = Sha256::digest(&encoded_transactions); - let signature = - ::SignatureKey::sign(api.private_key(), &encoded_transactions_hash) - .expect("Failed to sign block payload"); + let signature = ::SignatureKey::sign( + handle.private_key(), + &encoded_transactions_hash, + ) + .expect("Failed to sign block payload"); let proposal = DAProposal { encoded_transactions: encoded_transactions.clone(), metadata: (), @@ -88,23 +82,12 @@ async fn test_da_task() { payload_commit: payload_commitment, }, ViewNumber::new(2), - api.public_key(), - api.private_key(), + handle.public_key(), + handle.private_key(), ) .expect("Failed to sign DAData"); output.insert(HotShotEvent::DAVoteSend(da_vote), 1); - let da_state = DATaskState { - api: api.clone(), - consensus: handle.hotshot.get_consensus(), - da_membership: api.inner.memberships.da_membership.clone().into(), - da_network: api.inner.networks.da_network.clone(), - quorum_membership: api.inner.memberships.quorum_membership.clone().into(), - cur_view: ViewNumber::new(0), - vote_collector: None.into(), - public_key: *api.public_key(), - private_key: api.private_key().clone(), - id: handle.hotshot.inner.id, - }; + let da_state = DATaskState::>::create_from(&handle); run_harness(input, output, da_state, false).await; } diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 413bbb5f5f..90dc33ec50 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -1,5 +1,5 @@ -use hotshot::{types::SignatureKey, HotShotConsensusApi}; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot::types::SignatureKey; +use hotshot_example_types::node_types::TestTypes; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::task_helpers::{build_quorum_proposal, vid_init}; use hotshot_types::{ @@ -26,19 +26,16 @@ async fn test_network_task() { // Build the API for node 2. let (handle, _tx, _rx) = build_system_handle(2).await; - let api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - let pub_key = *api.public_key(); - let priv_key = api.private_key(); + let pub_key = *handle.public_key(); + let priv_key = handle.private_key(); // quorum membership for VID share distribution - let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let encoded_transactions = Vec::new(); let encoded_transactions_hash = Sha256::digest(&encoded_transactions); let da_signature = ::SignatureKey::sign( - api.private_key(), + handle.private_key(), &encoded_transactions_hash, ) .expect("Failed to sign block payload"); @@ -47,7 +44,7 @@ async fn test_network_task() { let payload_commitment = vid_disperse.commit; let vid_signature = ::SignatureKey::sign( - api.private_key(), + handle.private_key(), payload_commitment.as_ref(), ) .expect("Failed to sign block commitment"); diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 6ca6366abe..4061099a1e 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -1,8 +1,5 @@ -use hotshot::{types::SignatureKey, HotShotConsensusApi}; -use hotshot_example_types::{ - block_types::TestTransaction, - node_types::{MemoryImpl, TestTypes}, -}; +use hotshot::types::SignatureKey; +use hotshot_example_types::{block_types::TestTransaction, node_types::TestTypes}; use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; use hotshot_testing::task_helpers::{build_system_handle, vid_init}; use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; @@ -27,13 +24,10 @@ async fn test_vid_task() { // Build the API for node 2. let handle = build_system_handle(2).await.0; - let api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; - let pub_key = *api.public_key(); + let pub_key = *handle.public_key(); // quorum membership for VID share distribution - let quorum_membership = handle.hotshot.inner.memberships.quorum_membership.clone(); + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let vid = vid_init::(&quorum_membership, ViewNumber::new(0)); let transactions = vec![TestTransaction(vec![0])]; @@ -41,9 +35,11 @@ async fn test_vid_task() { let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; - let signature = - ::SignatureKey::sign(api.private_key(), payload_commitment.as_ref()) - .expect("Failed to sign block payload!"); + let signature = ::SignatureKey::sign( + handle.private_key(), + payload_commitment.as_ref(), + ) + .expect("Failed to sign block payload!"); let proposal: DAProposal = DAProposal { encoded_transactions: encoded_transactions.clone(), metadata: (), @@ -101,15 +97,15 @@ async fn test_vid_task() { ); let vid_state = VIDTaskState { - api: api.clone(), + api: handle.clone(), consensus: handle.hotshot.get_consensus(), cur_view: ViewNumber::new(0), vote_collector: None, - network: api.inner.networks.quorum_network.clone(), - membership: api.inner.memberships.vid_membership.clone().into(), - public_key: *api.public_key(), - private_key: api.private_key().clone(), - id: handle.hotshot.inner.id, + network: handle.hotshot.networks.quorum_network.clone(), + membership: handle.hotshot.memberships.vid_membership.clone().into(), + public_key: *handle.public_key(), + private_key: handle.private_key().clone(), + id: handle.hotshot.id, }; run_harness(input, output, vid_state, false).await; } diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index 2fa93f6972..f32c99f09b 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -1,4 +1,5 @@ -use hotshot::HotShotConsensusApi; +use hotshot::tasks::task_state::CreateTaskState; +use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; @@ -15,17 +16,12 @@ async fn test_view_sync_task() { use hotshot_task_impls::view_sync::ViewSyncTaskState; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::simple_vote::ViewSyncPreCommitData; - use hotshot_types::traits::consensus_api::ConsensusApi; - use std::time::Duration; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); // Build the API for node 5. let handle = build_system_handle(5).await.0; - let api: HotShotConsensusApi = HotShotConsensusApi { - inner: handle.hotshot.inner.clone(), - }; let vote_data = ViewSyncPreCommitData { relay: 0, @@ -34,8 +30,8 @@ async fn test_view_sync_task() { let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote( vote_data, ::Time::new(4), - hotshot_types::traits::consensus_api::ConsensusApi::public_key(&api), - hotshot_types::traits::consensus_api::ConsensusApi::private_key(&api), + hotshot_types::traits::consensus_api::ConsensusApi::public_key(&handle), + hotshot_types::traits::consensus_api::ConsensusApi::private_key(&handle), ) .expect("Failed to create a ViewSyncPreCommitVote!"); @@ -52,22 +48,10 @@ async fn test_view_sync_task() { output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone()), 1); - let view_sync_state = ViewSyncTaskState { - current_view: ViewNumber::new(0), - next_view: ViewNumber::new(0), - network: api.inner.networks.quorum_network.clone(), - membership: api.inner.memberships.view_sync_membership.clone().into(), - public_key: *api.public_key(), - private_key: api.private_key().clone(), - api, - num_timeouts_tracked: 0, - replica_task_map: HashMap::default().into(), - pre_commit_relay_map: HashMap::default().into(), - commit_relay_map: HashMap::default().into(), - finalize_relay_map: HashMap::default().into(), - view_sync_timeout: Duration::new(10, 0), - id: handle.hotshot.inner.id, - last_garbage_collected_view: ViewNumber::new(0), - }; + let view_sync_state = ViewSyncTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle); run_harness(input, output, view_sync_state, false).await; } From b8040a620e3b74b5c33e2d9cbc6f2741a5fe5551 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 21 Feb 2024 10:00:43 -0500 Subject: [PATCH 0805/1393] Bump local-ip-address from 0.5.7 to 0.6.0 (#2626) Bumps [local-ip-address](https://github.com/EstebanBorai/local-ip-address) from 0.5.7 to 0.6.0. - [Release notes](https://github.com/EstebanBorai/local-ip-address/releases) - [Changelog](https://github.com/EstebanBorai/local-ip-address/blob/main/CHANGELOG.md) - [Commits](https://github.com/EstebanBorai/local-ip-address/commits) --- updated-dependencies: - dependency-name: local-ip-address dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/Cargo.toml | 2 +- hotshot/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 56da94836e..b2de976254 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -123,7 +123,7 @@ async-std = { workspace = true } clap = { version = "4.5", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } -local-ip-address = "0.5.7" +local-ip-address = "0.6.0" [lints] workspace = true diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 835aca8265..315b8371dd 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -62,7 +62,7 @@ async-std = { workspace = true } clap = { version = "4.5", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } -local-ip-address = "0.5.7" +local-ip-address = "0.6.0" [lints] workspace = true From a98df52b28107023d411d1589b0a00f828bf0664 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Thu, 22 Feb 2024 16:20:01 +0100 Subject: [PATCH 0806/1393] Adds DA committee broadcast TransmitType (#2616) * Adds DA committee broadcast TransmitType * adds TransmitType::DACommitteeBroadcast * memory and web impl just forward to broadcast_message * libp2p sends direct messages to DA Committee members * Send direct messages to DA committee members in parallel * Use into_iter on BTreeSet in da_broadcast_message --- .../src/traits/networking/combined_network.rs | 8 +++++ .../src/traits/networking/libp2p_network.rs | 33 +++++++++++++++++++ .../src/traits/networking/memory_network.rs | 16 +++++++++ .../traits/networking/web_server_network.rs | 16 +++++++++ task-impls/src/network.rs | 5 ++- types/src/traits/network.rs | 15 +++++++++ 6 files changed, 92 insertions(+), 1 deletion(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index d401ba15e8..bb064e6b34 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -280,6 +280,14 @@ impl ConnectedNetwork, TYPES::SignatureKey> .await } + async fn da_broadcast_message( + &self, + message: Message, + recipients: BTreeSet, + ) -> Result<(), NetworkError> { + self.broadcast_message(message, recipients).await + } + async fn direct_message( &self, message: Message, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 633b70e87e..dab01c30b7 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -47,6 +47,7 @@ use snafu::ResultExt; #[cfg(feature = "hotshot-testing")] use std::{collections::HashSet, num::NonZeroUsize, str::FromStr}; +use futures::future::join_all; use std::{ collections::BTreeSet, fmt::Debug, @@ -738,6 +739,32 @@ impl ConnectedNetwork for Libp2p } } + #[instrument(name = "Libp2pNetwork::da_broadcast_message", skip_all)] + async fn da_broadcast_message( + &self, + message: M, + recipients: BTreeSet, + ) -> Result<(), NetworkError> { + let future_results = recipients + .into_iter() + .map(|r| self.direct_message(message.clone(), r)); + let results = join_all(future_results).await; + + let errors: Vec<_> = results + .into_iter() + .filter_map(|r| match r { + Err(NetworkError::Libp2p { source }) => Some(source), + _ => None, + }) + .collect(); + + if errors.is_empty() { + Ok(()) + } else { + Err(NetworkError::Libp2pMulti { sources: errors }) + } + } + #[instrument(name = "Libp2pNetwork::direct_message", skip_all)] async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError> { if self.inner.handle.is_killed() { @@ -852,6 +879,12 @@ impl ConnectedNetwork for Libp2p .add(result.len()); Ok(result) } + TransmitType::DACommitteeBroadcast => { + error!("Received DACommitteeBroadcast, it should have not happened."); + Err(NetworkError::Libp2p { + source: NetworkNodeHandleError::Killed, + }) + } } } }; diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 10f8803f6a..85371b584d 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -13,6 +13,7 @@ use async_trait::async_trait; use bincode::Options; use dashmap::DashMap; use futures::StreamExt; +use hotshot_types::traits::network::MemoryNetworkError; use hotshot_types::{ boxed_sync, message::Message, @@ -352,6 +353,15 @@ impl ConnectedNetwork for Memory Ok(()) } + #[instrument(name = "MemoryNetwork::da_broadcast_message")] + async fn da_broadcast_message( + &self, + message: M, + recipients: BTreeSet, + ) -> Result<(), NetworkError> { + self.broadcast_message(message, recipients).await + } + #[instrument(name = "MemoryNetwork::direct_message")] async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError> { // debug!(?message, ?recipient, "Sending direct message"); @@ -450,6 +460,12 @@ impl ConnectedNetwork for Memory .add(ret.len()); Ok(ret) } + TransmitType::DACommitteeBroadcast => { + error!("Received DACommitteeBroadcast, it should have not happened."); + Err(NetworkError::MemoryNetwork { + source: MemoryNetworkError::Stub, + }) + } } }; boxed_sync(closure) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 861e55d3cd..505c93bac9 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -825,6 +825,16 @@ impl ConnectedNetwork, TYPES::Signatur } } + /// broadcast a message only to a DA committee + /// blocking + async fn da_broadcast_message( + &self, + message: Message, + recipients: BTreeSet, + ) -> Result<(), NetworkError> { + self.broadcast_message(message, recipients).await + } + /// Sends a direct message to a specific node /// blocking async fn direct_message( @@ -882,6 +892,12 @@ impl ConnectedNetwork, TYPES::Signatur .map(|x| x.get_message().unwrap()) .collect()) } + TransmitType::DACommitteeBroadcast => { + error!("Received DACommitteeBroadcast, it should have not happened."); + Err(NetworkError::WebServer { + source: WebServerNetworkError::ClientDisconnected, + }) + } } }; boxed_sync(closure) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 33b98b5bf4..250302ce47 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -274,7 +274,7 @@ impl, TYPES::Signa MessageKind::::from_consensus_message(SequencingMessage(Right( CommitteeConsensusMessage::DAProposal(proposal), ))), - TransmitType::Broadcast, + TransmitType::DACommitteeBroadcast, None, ), HotShotEvent::DAVoteSend(vote) => ( @@ -376,6 +376,9 @@ impl, TYPES::Signa let transmit_result = match transmit_type { TransmitType::Direct => net.direct_message(message, recipient.unwrap()).await, TransmitType::Broadcast => net.broadcast_message(message, committee).await, + TransmitType::DACommitteeBroadcast => { + net.da_broadcast_message(message, committee).await + } }; match transmit_result { diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index d659fce2ba..d1fed267a1 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -76,6 +76,8 @@ pub enum TransmitType { Direct, /// broadcast the message to all Broadcast, + /// broadcast to DA committee + DACommitteeBroadcast, } /// Error type for networking @@ -87,6 +89,11 @@ pub enum NetworkError { /// source of error source: NetworkNodeHandleError, }, + /// collection of libp2p secific errors + Libp2pMulti { + /// sources of errors + sources: Vec, + }, /// memory network specific errors MemoryNetwork { /// source of error @@ -258,6 +265,14 @@ pub trait ConnectedNetwork: recipients: BTreeSet, ) -> Result<(), NetworkError>; + /// broadcast a message only to a DA committee + /// blocking + async fn da_broadcast_message( + &self, + message: M, + recipients: BTreeSet, + ) -> Result<(), NetworkError>; + /// Sends a direct message to a specific node /// blocking async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError>; From 7910b1e3a7efaf4912988a343981e7a5cf6a7777 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 22 Feb 2024 13:38:57 -0500 Subject: [PATCH 0807/1393] Remove unused TaskError structs (#2637) --- task-impls/src/consensus.rs | 5 ----- task-impls/src/da.rs | 5 ----- task-impls/src/transactions.rs | 5 ----- task-impls/src/upgrade.rs | 5 ----- task-impls/src/vid.rs | 5 ----- task-impls/src/view_sync.rs | 5 ----- task-impls/src/vote.rs | 5 ----- 7 files changed, 35 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f1d6afa6c6..db8df961a6 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -39,7 +39,6 @@ use tracing::warn; use crate::vote::HandleVoteEvent; use chrono::Utc; -use snafu::Snafu; use std::{ collections::{BTreeMap, HashSet}, marker::PhantomData, @@ -49,10 +48,6 @@ use std::{ use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument}; -/// Error returned by the consensus task -#[derive(Snafu, Debug)] -pub struct ConsensusTaskError {} - /// Alias for the block payload commitment and the associated metadata. pub struct CommitmentAndMetadata { /// Vid Commitment diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 8c574d7eab..aa65a1d26d 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -28,17 +28,12 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; use crate::vote::HandleVoteEvent; -use snafu::Snafu; use std::{marker::PhantomData, sync::Arc}; use tracing::{debug, error, instrument, warn}; /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; -#[derive(Snafu, Debug)] -/// Error type for consensus tasks -pub struct ConsensusTaskError {} - /// Tracks state of a DA task pub struct DATaskState< TYPES: NodeType, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4a7936ca25..994870d414 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -25,7 +25,6 @@ use hotshot_types::{ }, }; use hotshot_utils::bincode::bincode_opts; -use snafu::Snafu; use std::{ collections::{HashMap, HashSet}, sync::Arc, @@ -36,10 +35,6 @@ use tracing::{debug, error, instrument, warn}; /// A type alias for `HashMap, T>` type CommitmentMap = HashMap, T>; -#[derive(Snafu, Debug)] -/// Error type for consensus tasks -pub struct ConsensusTaskError {} - /// Tracks state of a Transaction task pub struct TransactionTaskState< TYPES: NodeType, diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 9d9d3a0958..23a21cb3b9 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -21,17 +21,12 @@ use hotshot_types::{ }; use crate::vote::HandleVoteEvent; -use snafu::Snafu; use std::sync::Arc; use tracing::{debug, error, instrument, warn}; /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; -#[derive(Snafu, Debug)] -/// Error type for consensus tasks -pub struct ConsensusTaskError {} - /// Tracks state of a DA task pub struct UpgradeTaskState< TYPES: NodeType, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index be40cb8e01..7f02da307c 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -25,15 +25,10 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; -use snafu::Snafu; use std::marker::PhantomData; use std::sync::Arc; use tracing::{debug, error, instrument, warn}; -#[derive(Snafu, Debug)] -/// Error type for consensus tasks -pub struct ConsensusTaskError {} - /// Tracks state of a VID task pub struct VIDTaskState< TYPES: NodeType, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 530848d5be..1f0c05114d 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -35,7 +35,6 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; -use snafu::Snafu; use std::{collections::BTreeMap, collections::HashMap, fmt::Debug, sync::Arc, time::Duration}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; @@ -53,10 +52,6 @@ pub enum ViewSyncPhase { Finalize, } -#[derive(Snafu, Debug)] -/// Stub of a view sync error -pub struct ViewSyncTaskError {} - /// Type alias for a map from View Number to Relay to Vote Task type RelayMap = HashMap<::Time, BTreeMap>>; diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 651332f39b..35516a5683 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -21,13 +21,8 @@ use hotshot_types::{ traits::{election::Membership, node_implementation::NodeType}, vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; -use snafu::Snafu; use tracing::{debug, error}; -#[derive(Snafu, Debug)] -/// Stub of a vote error -pub struct VoteTaskError {} - /// Task state for collecting votes of one type and emiting a certificate pub struct VoteCollectionTaskState< TYPES: NodeType, From 95b9dde0d6efe7d77e636a9f9561e63532eb2b4e Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Fri, 23 Feb 2024 08:28:36 -0800 Subject: [PATCH 0808/1393] Update Jellyfish (#2640) * Pin to version (0.4.0) * Update VID construction * Brings in persistent merkle tree for use in sequencer --- task-impls/src/vid.rs | 4 +++- testing/src/task_helpers.rs | 4 ++-- types/src/traits/block_contents.rs | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 7f02da307c..d27dc04d19 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -82,7 +82,9 @@ impl, A: ConsensusApi + // calculate vid shares let vid_disperse = spawn_blocking(move || { - let vid = VidScheme::new(chunk_size, num_quorum_committee, &srs).unwrap(); + let multiplicity = 1; + let vid = VidScheme::new(chunk_size, num_quorum_committee, multiplicity, &srs) + .unwrap(); vid.disperse(encoded_transactions.clone()).unwrap() }) .await; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 4e65d21efd..4afec99b74 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -371,6 +371,6 @@ pub fn vid_init( // TODO let srs = hotshot_types::data::test_srs(num_committee); - - VidScheme::new(chunk_size, num_committee, srs).unwrap() + let multiplicity = 1; + VidScheme::new(chunk_size, num_committee, multiplicity, srs).unwrap() } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 7ac59e097e..8c5617803e 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -104,8 +104,8 @@ pub fn vid_commitment( // TODO let srs = test_srs(num_storage_nodes); - - let vid = VidScheme::new(num_chunks, num_storage_nodes, srs).unwrap(); + let multiplicity = 1; + let vid = VidScheme::new(num_chunks, num_storage_nodes, multiplicity, srs).unwrap(); vid.commit_only(encoded_transactions).unwrap() } From 846d7cc6d8c42b6d7cfc28fab5fcb9add7dd7691 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 23 Feb 2024 09:31:23 -0800 Subject: [PATCH 0809/1393] [CATCHUP] - Support and test restarting without losing state (#2612) * Add view to Initializer and update SystemContext initialization * Update test * Move late_decided_leaf and fix build * Add test * Replace genesis function * Fix genesis * Fix genesis state * try logging * remove logging * Undo justfile change * Fix build * Fix param after merge * Fix build * Rename and remove incremental fu * Fix build after merge * Add skip_late to test metadata, fix cur_view, modify num failures, remove instance_state reference * Update instead of increment view * Add view update to more tasks * Fix next_view for view sync --- examples/infra/mod.rs | 2 +- hotshot/src/lib.rs | 52 ++++++++----- hotshot/src/tasks/mod.rs | 12 +-- hotshot/src/tasks/task_state.rs | 37 +++++---- hotshot/src/types/handle.rs | 5 +- task-impls/src/consensus.rs | 19 +++-- task-impls/src/da.rs | 1 + task-impls/src/transactions.rs | 1 + task-impls/src/vid.rs | 1 + testing-macros/tests/tests.rs | 8 +- testing/src/spinning_task.rs | 61 ++++++++++++--- testing/src/task_helpers.rs | 2 +- testing/src/test_builder.rs | 17 +++-- testing/src/test_runner.rs | 130 +++++++++++++++++++------------- testing/tests/catchup.rs | 64 ++++++++++++++++ testing/tests/consensus_task.rs | 9 ++- testing/tests/da_task.rs | 2 +- testing/tests/view_sync_task.rs | 3 +- types/src/consensus.rs | 10 +-- 19 files changed, 308 insertions(+), 128 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 80ef89bb02..944456f917 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -324,7 +324,7 @@ pub trait RunDA< /// get the anchored view /// Note: sequencing leaf does not have state, so does not return state async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { - let initializer = hotshot::HotShotInitializer::::from_genesis(&TestInstanceState {}) + let initializer = hotshot::HotShotInitializer::::from_genesis(TestInstanceState {}) .expect("Couldn't generate genesis block"); let config = self.get_config(); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 0fe820fb32..26db8d482e 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -102,6 +102,7 @@ impl> Networks { } /// Bundle of all the memberships a consensus instance uses +#[derive(Clone)] pub struct Memberships { /// Quorum Membership pub quorum_membership: TYPES::Membership, @@ -155,8 +156,7 @@ pub struct SystemContext> { } impl> SystemContext { - /// Creates a new [`Arc`] with the given configuration options and sets it up with the given - /// genesis block + /// Creates a new [`Arc`] with the given configuration options. /// /// To do a full initialization, use `fn init` instead, which will set up background tasks as /// well. @@ -187,7 +187,9 @@ impl> SystemContext { // insert genesis (or latest block) to state map let mut validated_state_map = BTreeMap::default(); - let validated_state = Arc::new(TYPES::ValidatedState::genesis(&instance_state)); + let validated_state = Arc::new(TYPES::ValidatedState::from_header( + &anchored_leaf.block_header, + )); validated_state_map.insert( anchored_leaf.get_view_number(), View { @@ -211,10 +213,12 @@ impl> SystemContext { } }; saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns.clone()); + // View 1 doesn't have DA which is responsible for saving the payloads, so we store the + // payload for view 1 manually during the intialization. saved_payloads.insert(TYPES::Time::new(1), encoded_txns); } - let start_view = anchored_leaf.get_view_number(); + let start_view = initializer.start_view; let consensus = Consensus { instance_state, @@ -279,7 +283,7 @@ impl> SystemContext { broadcast_event(event, &self.output_event_stream.0).await; } - /// Publishes a transaction asynchronously to the network + /// Publishes a transaction asynchronously to the network. /// /// # Errors /// @@ -290,11 +294,12 @@ impl> SystemContext { transaction: TYPES::Transaction, ) -> Result<(), HotShotError> { trace!("Adding transaction to our own queue"); - // Wrap up a message - // TODO place a view number here that makes sense - // we haven't worked out how this will work yet - let message = DataMessage::SubmitTransaction(transaction.clone(), TYPES::Time::new(0)); + let api = self.clone(); + let view_number = api.consensus.read().await.cur_view; + + // Wrap up a message + let message = DataMessage::SubmitTransaction(transaction.clone(), view_number); async_spawn(async move { let da_membership = &api.memberships.da_membership.clone(); @@ -312,11 +317,11 @@ impl> SystemContext { sender: api.public_key.clone(), kind: MessageKind::from(message), }, - da_membership.get_committee(TYPES::Time::new(0)), + da_membership.get_committee(view_number), ), api .send_external_event(Event { - view_number: api.consensus.read().await.cur_view, + view_number, event: EventType::Transactions { transactions: vec![transaction], }, @@ -606,26 +611,37 @@ pub struct HotShotInitializer { /// Instance-level state. instance_state: TYPES::InstanceState, + + /// Starting view number that we are confident won't lead to a double vote after restart. + start_view: TYPES::Time, } impl HotShotInitializer { /// initialize from genesis /// # Errors /// If we are unable to apply the genesis block to the default state - pub fn from_genesis( - instance_state: &TYPES::InstanceState, - ) -> Result> { + pub fn from_genesis(instance_state: TYPES::InstanceState) -> Result> { Ok(Self { - inner: Leaf::genesis(instance_state), - instance_state: instance_state.clone(), + inner: Leaf::genesis(&instance_state), + instance_state, + start_view: TYPES::Time::new(0), }) } - /// reload previous state based on most recent leaf and the instance-level state. - pub fn from_reload(anchor_leaf: Leaf, instance_state: TYPES::InstanceState) -> Self { + /// Reload previous state based on most recent leaf and the instance-level state. + /// + /// # Arguments + /// * `start_view` - The minimum view number that we are confident won't lead to a double vote + /// after restart. + pub fn from_reload( + anchor_leaf: Leaf, + instance_state: TYPES::InstanceState, + start_view: TYPES::Time, + ) -> Self { Self { inner: anchor_leaf, instance_state, + start_view, } } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index d250f549d8..cefe1ecd4a 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -172,7 +172,7 @@ pub async fn add_consensus_task>( rx: Receiver>, handle: &SystemContextHandle, ) { - let consensus_state = ConsensusTaskState::create_from(handle); + let consensus_state = ConsensusTaskState::create_from(handle).await; inject_consensus_polls(&consensus_state).await; @@ -187,7 +187,7 @@ pub async fn add_vid_task>( rx: Receiver>, handle: &SystemContextHandle, ) { - let vid_state = VIDTaskState::create_from(handle); + let vid_state = VIDTaskState::create_from(handle).await; let task = Task::new(tx, rx, task_reg.clone(), vid_state); task_reg.run_task(task).await; } @@ -199,7 +199,7 @@ pub async fn add_upgrade_task>( rx: Receiver>, handle: &SystemContextHandle, ) { - let upgrade_state = UpgradeTaskState::create_from(handle); + let upgrade_state = UpgradeTaskState::create_from(handle).await; let task = Task::new(tx, rx, task_reg.clone(), upgrade_state); task_reg.run_task(task).await; @@ -212,7 +212,7 @@ pub async fn add_da_task>( handle: &SystemContextHandle, ) { // build the da task - let da_state = DATaskState::create_from(handle); + let da_state = DATaskState::create_from(handle).await; let task = Task::new(tx, rx, task_reg.clone(), da_state); task_reg.run_task(task).await; @@ -225,7 +225,7 @@ pub async fn add_transaction_task> rx: Receiver>, handle: &SystemContextHandle, ) { - let transactions_state = TransactionTaskState::create_from(handle); + let transactions_state = TransactionTaskState::create_from(handle).await; let task = Task::new(tx, rx, task_reg.clone(), transactions_state); task_reg.run_task(task).await; @@ -238,7 +238,7 @@ pub async fn add_view_sync_task>( rx: Receiver>, handle: &SystemContextHandle, ) { - let view_sync_state = ViewSyncTaskState::create_from(handle); + let view_sync_state = ViewSyncTaskState::create_from(handle).await; let task = Task::new(tx, rx, task_reg.clone(), view_sync_state); task_reg.run_task(task).await; diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 19f6e026dd..6e8d023113 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -1,5 +1,6 @@ use crate::types::SystemContextHandle; +use async_trait::async_trait; use hotshot_constants::VERSION_0_1; use hotshot_task_impls::{ consensus::{CommitmentAndMetadata, ConsensusTaskState}, @@ -24,24 +25,26 @@ use std::{ }; /// Trait for creating task states. +#[async_trait] pub trait CreateTaskState where TYPES: NodeType, I: NodeImplementation, { /// Function to create the task state from a given `SystemContextHandle`. - fn create_from(handle: &SystemContextHandle) -> Self; + async fn create_from(handle: &SystemContextHandle) -> Self; } +#[async_trait] impl> CreateTaskState for UpgradeTaskState> { - fn create_from( + async fn create_from( handle: &SystemContextHandle, ) -> UpgradeTaskState> { UpgradeTaskState { api: handle.clone(), - cur_view: TYPES::Time::new(0), + cur_view: handle.get_cur_view().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_network: handle.hotshot.networks.quorum_network.clone(), should_vote: |_upgrade_proposal| false, @@ -53,16 +56,17 @@ impl> CreateTaskState } } +#[async_trait] impl> CreateTaskState for VIDTaskState> { - fn create_from( + async fn create_from( handle: &SystemContextHandle, ) -> VIDTaskState> { VIDTaskState { api: handle.clone(), consensus: handle.hotshot.get_consensus(), - cur_view: TYPES::Time::new(0), + cur_view: handle.get_cur_view().await, vote_collector: None, network: handle.hotshot.networks.quorum_network.clone(), membership: handle.hotshot.memberships.vid_membership.clone().into(), @@ -73,10 +77,11 @@ impl> CreateTaskState } } +#[async_trait] impl> CreateTaskState for DATaskState> { - fn create_from( + async fn create_from( handle: &SystemContextHandle, ) -> DATaskState> { DATaskState { @@ -85,7 +90,7 @@ impl> CreateTaskState da_membership: handle.hotshot.memberships.da_membership.clone().into(), da_network: handle.hotshot.networks.da_network.clone(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - cur_view: TYPES::Time::new(0), + cur_view: handle.get_cur_view().await, vote_collector: None.into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -94,15 +99,17 @@ impl> CreateTaskState } } +#[async_trait] impl> CreateTaskState for ViewSyncTaskState> { - fn create_from( + async fn create_from( handle: &SystemContextHandle, ) -> ViewSyncTaskState> { + let cur_view = handle.get_cur_view().await; ViewSyncTaskState { - current_view: TYPES::Time::new(0), - next_view: TYPES::Time::new(0), + current_view: cur_view, + next_view: cur_view, network: handle.hotshot.networks.quorum_network.clone(), membership: handle .hotshot @@ -125,10 +132,11 @@ impl> CreateTaskState } } +#[async_trait] impl> CreateTaskState for TransactionTaskState> { - fn create_from( + async fn create_from( handle: &SystemContextHandle, ) -> TransactionTaskState> { TransactionTaskState { @@ -136,7 +144,7 @@ impl> CreateTaskState consensus: handle.hotshot.get_consensus(), transactions: Arc::default(), seen_transactions: HashSet::new(), - cur_view: TYPES::Time::new(0), + cur_view: handle.get_cur_view().await, network: handle.hotshot.networks.quorum_network.clone(), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), @@ -146,10 +154,11 @@ impl> CreateTaskState } } +#[async_trait] impl> CreateTaskState for ConsensusTaskState> { - fn create_from( + async fn create_from( handle: &SystemContextHandle, ) -> ConsensusTaskState> { let consensus = handle.hotshot.get_consensus(); @@ -163,7 +172,7 @@ impl> CreateTaskState ConsensusTaskState { consensus, timeout: handle.hotshot.config.next_view_timeout, - cur_view: TYPES::Time::new(0), + cur_view: handle.get_cur_view().await, payload_commitment_and_metadata: Some(CommitmentAndMetadata { commitment: payload_commitment, metadata, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 3cd40e4fa8..b6e1ded7f8 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -171,9 +171,8 @@ impl + 'static> SystemContextHandl self.hotshot.public_key.clone() } - /// Wrapper to get this node's current view - #[cfg(feature = "hotshot-testing")] - pub async fn get_current_view(&self) -> TYPES::Time { + /// Wrapper to get the view number this node is on. + pub async fn get_cur_view(&self) -> TYPES::Time { self.hotshot.consensus.read().await.cur_view } } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index db8df961a6..cda79cd3bf 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -395,15 +395,24 @@ impl, A: ConsensusApi + .await; } })); - let consensus = self.consensus.read().await; + let consensus = self.consensus.upgradable_read().await; consensus .metrics .current_view .set(usize::try_from(self.cur_view.get_u64()).unwrap()); - consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(self.cur_view.get_u64()).unwrap() - - usize::try_from(consensus.last_decided_view.get_u64()).unwrap(), - ); + // Do the comparison before the substraction to avoid potential overflow, since + // `last_decided_view` may be greater than `cur_view` if the node is catching up. + if usize::try_from(self.cur_view.get_u64()).unwrap() + > usize::try_from(consensus.last_decided_view.get_u64()).unwrap() + { + consensus.metrics.number_of_views_since_last_decide.set( + usize::try_from(self.cur_view.get_u64()).unwrap() + - usize::try_from(consensus.last_decided_view.get_u64()).unwrap(), + ); + } + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + consensus.update_view(new_view); + drop(consensus); return true; } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index aa65a1d26d..67850d5059 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -228,6 +228,7 @@ impl, A: ConsensusApi + warn!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; + self.consensus.write().await.update_view(view); // Inject view info into network let is_da = self diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 994870d414..293842ee15 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -184,6 +184,7 @@ impl, A: ConsensusApi + make_block = self.membership.get_leader(view) == self.public_key; } self.cur_view = view; + self.consensus.write().await.update_view(view); // return if we aren't the next leader or we skipped last view and aren't the current leader. if !make_block && self.membership.get_leader(self.cur_view + 1) != self.public_key { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index d27dc04d19..9d603a8cc2 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -146,6 +146,7 @@ impl, A: ConsensusApi + warn!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; + self.consensus.write().await.update_view(view); // Start polling for VID disperse for the new view self.network diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs index 844d3dcafd..cdd48b6b32 100644 --- a/testing-macros/tests/tests.rs +++ b/testing-macros/tests/tests.rs @@ -48,7 +48,7 @@ cross_tests!( }]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], + node_changes: vec![(5, dead_nodes)] }; metadata.overall_safety_properties.num_failed_views = 3; metadata.overall_safety_properties.num_successful_views = 25; @@ -86,7 +86,7 @@ cross_tests!( ]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], + node_changes: vec![(5, dead_nodes)] }; metadata.overall_safety_properties.num_failed_views = 3; @@ -142,7 +142,7 @@ cross_tests!( ]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], + node_changes: vec![(5, dead_nodes)] }; metadata @@ -178,7 +178,7 @@ cross_tests!( ]; metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)], + node_changes: vec![(5, dead_nodes)] }; // 2 nodes fail triggering view sync, expect no other timeouts diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index c7e73fbde8..8f37ec0dba 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -1,13 +1,20 @@ use std::collections::HashMap; -use hotshot::traits::TestableNodeImplementation; - use crate::test_runner::HotShotTaskCompleted; -use crate::test_runner::LateStartNode; -use crate::test_runner::Node; +use crate::test_runner::{LateStartNode, Node, TestRunner}; +use either::{Left, Right}; +use hotshot::{traits::TestableNodeImplementation, HotShotInitializer}; +use hotshot_example_types::state_types::TestInstanceState; use hotshot_task::task::{Task, TaskState, TestTaskState}; -use hotshot_types::traits::network::ConnectedNetwork; -use hotshot_types::{event::Event, traits::node_implementation::NodeType}; +use hotshot_types::{data::Leaf, ValidatorConfig}; +use hotshot_types::{ + event::Event, + message::Message, + traits::{ + network::ConnectedNetwork, + node_implementation::{NodeImplementation, NodeType}, + }, +}; use snafu::Snafu; use std::collections::BTreeMap; /// convience type for state and block @@ -29,6 +36,8 @@ pub struct SpinningTask> { pub(crate) changes: BTreeMap>, /// most recent view seen by spinning task pub(crate) latest_view: Option, + /// Last decided leaf that can be used as the anchor leaf to initialize the node. + pub(crate) last_decided_leaf: Leaf, } impl> TaskState for SpinningTask { @@ -48,8 +57,14 @@ impl> TaskState for Spinni } } -impl> TestTaskState - for SpinningTask +impl< + TYPES: NodeType, + I: TestableNodeImplementation, + N: ConnectedNetwork, TYPES::SignatureKey>, + > TestTaskState for SpinningTask +where + I: TestableNodeImplementation, + I: NodeImplementation, { type Message = Event; @@ -79,6 +94,34 @@ impl> TestTaskState let node_id = idx.try_into().unwrap(); if let Some(node) = state.late_start.remove(&node_id) { tracing::error!("Node {} spinning up late", idx); + let node_id = idx.try_into().unwrap(); + let context = match node.context { + Left(context) => context, + // Node not initialized. Initialize it + // based on the received leaf. + Right((storage, memberships, config)) => { + let initializer = HotShotInitializer::::from_reload( + state.last_decided_leaf.clone(), + TestInstanceState {}, + view_number, + ); + // We assign node's public key and stake value rather than read from config file since it's a test + let validator_config = + ValidatorConfig::generated_from_seed_indexed( + [0u8; 32], node_id, 1, + ); + TestRunner::add_node_with_config( + node_id, + node.networks.clone(), + storage, + memberships, + initializer, + config, + validator_config, + ) + .await + } + }; // Create the node and add it to the state, so we can shut them // down properly later to avoid the overflow error in the overall @@ -86,7 +129,7 @@ impl> TestTaskState let node = Node { node_id, networks: node.networks, - handle: node.context.run_tasks().await, + handle: context.run_tasks().await, }; state.handles.push(node.clone()); diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 4afec99b74..faf0b620a8 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -63,7 +63,7 @@ pub async fn build_system_handle( let storage = (launcher.resource_generator.storage)(node_id); let config = launcher.resource_generator.config.clone(); - let initializer = HotShotInitializer::::from_genesis(&TestInstanceState {}).unwrap(); + let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}).unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = config.my_own_validator_config.private_key.clone(); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 0617d18d26..4fb60d2b90 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -10,15 +10,15 @@ use hotshot_types::{ }; use super::completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}; +use super::{ + overall_safety_task::OverallSafetyPropertiesDescription, txn_task::TxnTaskDescription, +}; use crate::{ spinning_task::SpinningTaskDescription, test_launcher::{ResourceGenerators, TestLauncher}, view_sync_task::ViewSyncTaskDescription, }; - -use super::{ - overall_safety_task::OverallSafetyPropertiesDescription, txn_task::TxnTaskDescription, -}; +use hotshot_example_types::state_types::TestInstanceState; /// data describing how a round should be timed. #[derive(Clone, Debug, Copy)] pub struct TimingData { @@ -43,6 +43,9 @@ pub struct TestMetadata { pub total_nodes: usize, /// nodes available at start pub start_nodes: usize, + /// Whether to skip initializing nodes that will start late, which will catch up later with + /// `HotShotInitializer::from_reload` in the spinning task. + pub skip_late: bool, /// number of bootstrap nodes (libp2p usage only) pub num_bootstrap_nodes: usize, /// Size of the DA committee for the test @@ -177,6 +180,7 @@ impl Default for TestMetadata { min_transactions: 0, total_nodes: num_nodes, start_nodes: num_nodes, + skip_late: false, num_bootstrap_nodes: num_nodes, da_committee_size: num_nodes, spinning_properties: SpinningTaskDescription { @@ -203,7 +207,10 @@ impl TestMetadata { /// # Panics /// if some of the the configuration values are zero #[must_use] - pub fn gen_launcher>( + pub fn gen_launcher< + TYPES: NodeType, + I: TestableNodeImplementation, + >( self, node_id: u64, ) -> TestLauncher diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 662997d144..3148f23089 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -12,6 +12,7 @@ use crate::{ view_sync_task::ViewSyncTask, }; use async_broadcast::broadcast; +use either::Either::{self, Left, Right}; use futures::future::join_all; use hotshot::{types::SystemContextHandle, Memberships}; use hotshot_example_types::state_types::TestInstanceState; @@ -22,6 +23,7 @@ use hotshot_constants::EVENT_CHANNEL_SIZE; use hotshot_task::task::{Task, TaskRegistry, TestTask}; use hotshot_types::{ consensus::ConsensusMetricsValue, + data::Leaf, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeType}, @@ -52,13 +54,24 @@ pub struct Node> { pub handle: SystemContextHandle, } +/// Either the node context or the parameters to construct the context for nodes that start late. +pub type LateNodeContext = Either< + Arc>, + ( + >::Storage, + Memberships, + HotShotConfig<::SignatureKey, ::ElectionConfigType>, + ), +>; + /// A yet-to-be-started node that participates in tests #[derive(Clone)] pub struct LateStartNode> { /// The underlying networks belonging to the node pub networks: Networks, - /// The context to which we will use to launch HotShot when it's time - pub context: Arc>, + /// Either the context to which we will use to launch HotShot for initialized node when it's + /// time, or the parameters that will be used to initialize the node and launch HotShot. + pub context: LateNodeContext, } /// The runner of a test network @@ -110,6 +123,7 @@ where I: NodeImplementation, { /// excecute test + /// /// # Panics /// if the test fails #[allow(clippy::too_many_lines)] @@ -196,6 +210,7 @@ where late_start, latest_view: None, changes, + last_decided_leaf: Leaf::genesis(&TestInstanceState {}), }; let spinning_task = TestTask::, SpinningTask>::new( Task::new(tx.clone(), rx.clone(), reg.clone(), spinning_task_state), @@ -299,45 +314,82 @@ where ); } - /// add nodes + /// Add nodes. + /// /// # Panics /// Panics if unable to create a [`HotShotInitializer`] pub async fn add_nodes(&mut self, total: usize, late_start: &HashSet) -> Vec { let mut results = vec![]; for i in 0..total { - tracing::debug!("launch node {}", i); let node_id = self.next_node_id; + self.next_node_id += 1; + tracing::debug!("launch node {}", i); let storage = (self.launcher.resource_generator.storage)(node_id); let config = self.launcher.resource_generator.config.clone(); - let initializer = - HotShotInitializer::::from_genesis(&TestInstanceState {}).unwrap(); + let known_nodes_with_stake = config.known_nodes_with_stake.clone(); + let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { + TYPES::Membership::default_election_config(config.total_nodes.get() as u64) + }); + let committee_election_config = I::committee_election_config_generator(); + let memberships = Memberships { + quorum_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config.clone(), + ), + da_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + committee_election_config(config.da_committee_size as u64), + ), + vid_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config.clone(), + ), + view_sync_membership: ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config, + ), + }; let networks = (self.launcher.resource_generator.channel_generator)(node_id); - // We assign node's public key and stake value rather than read from config file since it's a test - let validator_config = - ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); - let hotshot = self - .add_node_with_config( - networks.clone(), - storage, - initializer, - config, - validator_config, - ) - .await; - if late_start.contains(&node_id) { + + if self.launcher.metadata.skip_late && late_start.contains(&node_id) { self.late_start.insert( node_id, LateStartNode { networks, - context: hotshot, + context: Right((storage, memberships, config)), }, ); } else { - self.nodes.push(Node { + let initializer = + HotShotInitializer::::from_genesis(TestInstanceState {}).unwrap(); + // We assign node's public key and stake value rather than read from config file since it's a test + let validator_config = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); + let hotshot = Self::add_node_with_config( node_id, - networks, - handle: hotshot.run_tasks().await, - }); + networks.clone(), + storage, + memberships, + initializer, + config, + validator_config, + ) + .await; + if late_start.contains(&node_id) { + self.late_start.insert( + node_id, + LateStartNode { + networks, + context: Left(hotshot), + }, + ); + } else { + self.nodes.push(Node { + node_id, + networks, + handle: hotshot.run_tasks().await, + }); + } } results.push(node_id); } @@ -349,48 +401,24 @@ where /// # Panics /// if unable to initialize the node's `SystemContext` based on the config pub async fn add_node_with_config( - &mut self, + node_id: u64, networks: Networks, storage: I::Storage, + memberships: Memberships, initializer: HotShotInitializer, config: HotShotConfig, validator_config: ValidatorConfig, ) -> Arc> { - let node_id = self.next_node_id; - self.next_node_id += 1; - let known_nodes_with_stake = config.known_nodes_with_stake.clone(); // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); - let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { - TYPES::Membership::default_election_config(config.total_nodes.get() as u64) - }); - let committee_election_config = I::committee_election_config_generator(); + let network_bundle = hotshot::Networks { quorum_network: networks.0.clone(), da_network: networks.1.clone(), _pd: PhantomData, }; - let memberships = Memberships { - quorum_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - quorum_election_config.clone(), - ), - da_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - committee_election_config(config.da_committee_size as u64), - ), - vid_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - quorum_election_config.clone(), - ), - view_sync_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - quorum_election_config, - ), - }; - SystemContext::new( public_key, private_key, diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index eae668139a..339103fb1b 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -236,3 +236,67 @@ async fn test_catchup_in_view_sync() { .run_test() .await; } + +// Almost the same as `test_catchup`, but with catchup nodes reloaded from anchor leaf rather than +// initialized from genesis. +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_catchup_reload() { + use std::time::Duration; + + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestMetadata, TimingData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let timing_data = TimingData { + next_view_timeout: 2000, + ..Default::default() + }; + let mut metadata = TestMetadata::default(); + let catchup_node = vec![ChangeNode { + idx: 19, + updown: UpDown::Up, + }]; + + metadata.timing_data = timing_data; + metadata.start_nodes = 19; + metadata.skip_late = true; + metadata.total_nodes = 20; + + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); + + metadata.spinning_properties = SpinningTaskDescription { + // Start the nodes before their leadership. + node_changes: vec![(13, catchup_node)], + }; + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep commiting rounds after the catchup, but not the full 50. + num_successful_views: 22, + check_leaf: true, + ..Default::default() + }; + + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; +} diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 464e5ba073..ff960e667f 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -119,7 +119,8 @@ async fn test_consensus_task() { TestTypes, MemoryImpl, SystemContextHandle, - >::create_from(&handle); + >::create_from(&handle) + .await; inject_consensus_polls(&consensus_state).await; @@ -169,7 +170,8 @@ async fn test_consensus_vote() { TestTypes, MemoryImpl, SystemContextHandle, - >::create_from(&handle); + >::create_from(&handle) + .await; inject_consensus_polls(&consensus_state).await; @@ -289,7 +291,8 @@ async fn test_consensus_with_vid() { TestTypes, MemoryImpl, SystemContextHandle, - >::create_from(&handle); + >::create_from(&handle) + .await; inject_consensus_polls(&consensus_state).await; diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 757ac926a5..fe06a34543 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -88,6 +88,6 @@ async fn test_da_task() { .expect("Failed to sign DAData"); output.insert(HotShotEvent::DAVoteSend(da_vote), 1); - let da_state = DATaskState::>::create_from(&handle); + let da_state = DATaskState::>::create_from(&handle).await; run_harness(input, output, da_state, false).await; } diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index f32c99f09b..f0d5679273 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -52,6 +52,7 @@ async fn test_view_sync_task() { TestTypes, MemoryImpl, SystemContextHandle, - >::create_from(&handle); + >::create_from(&handle) + .await; run_harness(input, output, view_sync_state, false).await; } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 02e51df512..3a1c1e06d5 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -39,7 +39,7 @@ pub struct Consensus { /// view -> DA cert pub saved_da_certs: HashMap>, - /// cur_view from pseudocode + /// View number that is currently on. pub cur_view: TYPES::Time, /// last view had a successful decide event @@ -235,11 +235,9 @@ impl Default for ConsensusMetricsValue { } impl Consensus { - /// increment the current view - /// NOTE may need to do gc here - pub fn increment_view(&mut self) -> TYPES::Time { - self.cur_view += 1; - self.cur_view + /// Update the current view. + pub fn update_view(&mut self, view_number: TYPES::Time) { + self.cur_view = view_number; } /// gather information from the parent chain of leafs From 5941350348413cd37e60cd01bce074b1a7417002 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Sun, 25 Feb 2024 14:59:50 -0500 Subject: [PATCH 0810/1393] [DEAD_CODE][LOW_PRIORITY] Removing a bunch of old libp2p examples (#2639) * Remove dead code * Remove done todo comment * Remove webui stuff --- .../src/traits/networking/libp2p_network.rs | 12 - .../examples/common/lossy_network.rs | 579 ------------- libp2p-networking/examples/common/mod.rs | 806 ------------------ libp2p-networking/examples/common/web.rs | 114 --- libp2p-networking/examples/counter.rs | 58 -- libp2p-networking/src/lib.rs | 3 - libp2p-networking/src/message.rs | 12 - .../src/network/behaviours/dht/mod.rs | 1 - .../src/network/behaviours/direct_message.rs | 2 - libp2p-networking/src/network/mod.rs | 21 +- libp2p-networking/src/network/node.rs | 1 - libp2p-networking/src/network/node/config.rs | 4 - libp2p-networking/src/network/node/handle.rs | 39 +- 13 files changed, 4 insertions(+), 1648 deletions(-) delete mode 100644 libp2p-networking/examples/common/lossy_network.rs delete mode 100644 libp2p-networking/examples/common/mod.rs delete mode 100644 libp2p-networking/examples/common/web.rs delete mode 100644 libp2p-networking/examples/counter.rs delete mode 100644 libp2p-networking/src/message.rs diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index dab01c30b7..03c570de70 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -510,18 +510,6 @@ impl Libp2pNetwork { }); } - /// make network aware of known peers - async fn _add_known_peers( - &self, - known_peers: Vec<(Option, Multiaddr)>, - ) -> Result<(), NetworkError> { - self.inner - .handle - .add_known_peers(known_peers) - .await - .map_err(Into::::into) - } - /// Handle events for Version 0.1 of the protocol. async fn handle_recvd_events_0_1( &self, diff --git a/libp2p-networking/examples/common/lossy_network.rs b/libp2p-networking/examples/common/lossy_network.rs deleted file mode 100644 index e5b207a327..0000000000 --- a/libp2p-networking/examples/common/lossy_network.rs +++ /dev/null @@ -1,579 +0,0 @@ -use super::ExecutionEnvironment; -use async_compatibility_layer::art::async_spawn; -use futures::TryStreamExt; -use netlink_packet_route::DecodeError; -use nix::{ - errno::Errno, - sched::{setns, CloneFlags}, -}; -use rtnetlink::{ - new_connection_with_socket, sys::SmolSocket, AddressHandle, Handle, NetemQdisc, - NetworkNamespace, RouteHandle, TcNetemCorrelations, TcNetemCorrupt, TcNetemDelay, TcNetemQopt, - NETNS_PATH, -}; -use snafu::{ResultExt, Snafu}; -use std::{ - fs::File, - net::{AddrParseError, Ipv4Addr}, - os::unix::{io::IntoRawFd, prelude::AsRawFd}, - path::Path, - process::Command, -}; -use tracing::{error, info}; - -/// hardcoded default values -pub const LOSSY_QDISC: NetemQdisc = NetemQdisc { - config: TcNetemQopt { - limit: 10240, - loss: 2, - gap: 0, - duplicate: 0, - }, - delay: Some(TcNetemDelay { - delay: 500000, - stddev: 0, - }), - correlations: Some(TcNetemCorrelations { - delay_corr: 0, - loss_corr: 0, - dup_corr: 0, - }), - corruption: Some(TcNetemCorrupt { prob: 2, corr: 0 }), - reorder: None, -}; - -async fn del_link(handle: Handle, name: String) -> Result<(), LossyNetworkError> { - let mut links = handle.link().get().match_name(name.clone()).execute(); - if let Some(link) = links.try_next().await.context(RtNetlinkSnafu)? { - Ok(handle - .link() - .del(link.header.index) - .execute() - .await - .context(RtNetlinkSnafu)?) - } else { - error!("link {} not found", name); - Ok(()) - } -} - -/// represent the current network namespace -/// (useful if returning) -struct Netns { - cur: File, -} - -impl Netns { - /// creates new network namespace - /// and enters namespace - async fn new(path: &str) -> Result { - // create new ns - NetworkNamespace::add(path.to_string()) - .await - .context(RtNetlinkSnafu)?; - - // entry new ns - let ns_path = Path::new(NETNS_PATH); - let file = File::open(ns_path.join(path)).context(IoSnafu)?; - - Ok(Self { cur: file }) - } -} - -/// A description of a lossy network -#[derive(Clone, derive_builder::Builder, custom_debug::Debug)] -pub struct LossyNetwork { - /// Ethernet interface that is connected to WAN - eth_name: String, - /// metadata describing how to isolate. Only used when `env_type` is `Metal` - isolation_config: Option, - /// The network loss conditions - netem_config: NetemQdisc, - /// the execution environment - env_type: ExecutionEnvironment, -} - -impl LossyNetwork { - /// Create isolated environment in separate network namespace via network bridge - pub async fn isolate(&self) -> Result<(), LossyNetworkError> { - if let Some(ref isolation_config) = self.isolation_config { - isolation_config.isolate_netlink(&self.eth_name).await? - } - Ok(()) - } - - /// Delete isolated environment and network bridge - pub async fn undo_isolate(&self) -> Result<(), LossyNetworkError> { - if let Some(ref isolation_config) = self.isolation_config { - isolation_config - .undo_isolate_netlink(self.eth_name.clone()) - .await? - } - Ok(()) - } - - /// Create a network qdisc - pub async fn create_qdisc(&self) -> Result<(), LossyNetworkError> { - match self.env_type { - ExecutionEnvironment::Docker => { - self.create_qdisc_netlink(&self.eth_name).await?; - } - ExecutionEnvironment::Metal => match self.isolation_config { - Some(ref isolation_config) => { - self.create_qdisc_netlink(&isolation_config.veth2_name.clone()) - .await?; - } - None => return Err(LossyNetworkError::InvalidConfig), - }, - } - Ok(()) - } - - /// Internal invocation to netlink library - /// to create the qdisc - async fn create_qdisc_netlink(&self, veth: &str) -> Result<(), LossyNetworkError> { - let (connection, handle, _) = - new_connection_with_socket::().context(IoSnafu)?; - async_spawn(connection); - let mut links = handle.link().get().match_name(veth.to_string()).execute(); - if let Some(link) = links.try_next().await.context(RtNetlinkSnafu)? { - handle - .qdisc() - .add(link.header.index as i32) - .netem(self.netem_config.clone()) - .context(DecodeSnafu)? - .execute() - .await - .context(RtNetlinkSnafu)? - } else { - return Err(LossyNetworkError::InvalidConfig); - } - Ok(()) - } -} - -/// Hardcoded default values for current AWS setup -impl Default for IsolationConfig { - fn default() -> Self { - Self { - counter_ns: "COUNTER_NS".to_string(), - bridge_addr: "172.13.0.1".to_string(), - bridge_name: "br0".to_string(), - veth_name: "veth1".to_string(), - veth2_name: "veth2".to_string(), - veth2_addr: "172.13.0.2".to_string(), - } - } -} - -/// A description of how the network should be isolated -#[derive(Clone, derive_builder::Builder, custom_debug::Debug)] -#[builder(default)] -pub struct IsolationConfig { - /// the network namespace name to create - counter_ns: String, - /// the bridge ip address - bridge_addr: String, - /// the bridge name - bridge_name: String, - /// the virtual ethernet interface name - /// that lives in the default/root network namespace - veth_name: String, - /// the virtual ethernet interface name - /// that lives in `counter_ns` - veth2_name: String, - /// the virtual ethernet interface ip address - /// that lives in `counter_ns` - veth2_addr: String, -} - -impl IsolationConfig { - /// Prepares server for latency by: - /// - creating a separate network namespace denoted `counter_ns` - /// - creating a virtual ethernet device (veth2) in this namespace - /// - bridging the virtual ethernet device within COUNTER_NS to the default/root network namespace - /// - adding firewall rules to allow traffic to flow between the network bridge and outside world - /// - execute the demo inside network namespace - async fn isolate_netlink(&self, eth_name: &str) -> Result<(), LossyNetworkError> { - let (connection, handle, _) = - new_connection_with_socket::().context(IoSnafu)?; - async_spawn(connection); - - // create new netns - let counter_ns_name = self.counter_ns.clone(); - let counter_ns = Netns::new(&counter_ns_name).await?; - - // create veth interfaces - let veth = self.veth_name.clone(); - let veth_2 = self.veth2_name.clone(); - - handle - .link() - .add() - .veth(veth.clone(), veth_2.clone()) - .execute() - .await - .context(RtNetlinkSnafu)?; - let veth_idx = handle - .link() - .get() - .match_name(veth.clone()) - .execute() - .try_next() - .await - .context(RtNetlinkSnafu)? - .ok_or(LossyNetworkError::InvalidConfig)? - .header - .index; - let veth_2_idx = handle - .link() - .get() - .match_name(veth_2.clone()) - .execute() - .try_next() - .await - .context(RtNetlinkSnafu)? - .ok_or(LossyNetworkError::InvalidConfig)? - .header - .index; - - // set interfaces up - handle - .link() - .set(veth_idx) - .up() - .execute() - .await - .context(RtNetlinkSnafu)?; - handle - .link() - .set(veth_2_idx) - .up() - .execute() - .await - .context(RtNetlinkSnafu)?; - - // move veth_2 to counter_ns - handle - .link() - .set(veth_2_idx) - .setns_by_fd(counter_ns.cur.as_raw_fd()) - .execute() - .await - .context(RtNetlinkSnafu)?; - - let bridge_name = self.bridge_name.clone(); - - handle - .link() - .add() - .bridge(bridge_name.clone()) - .execute() - .await - .context(RtNetlinkSnafu)?; - let bridge_idx = handle - .link() - .get() - .match_name(bridge_name.clone()) - .execute() - .try_next() - .await - .context(RtNetlinkSnafu)? - .ok_or(LossyNetworkError::InvalidConfig)? - .header - .index; - - // set bridge up - handle - .link() - .set(bridge_idx) - .up() - .execute() - .await - .context(RtNetlinkSnafu)?; - - // set veth master to bridge - handle - .link() - .set(veth_idx) - .master(bridge_idx) - .execute() - .await - .context(RtNetlinkSnafu)?; - - // add ip address to bridge - let bridge_addr = self - .bridge_addr - .parse::() - .context(AddrParseSnafu)?; - let bridge_range = 16; - AddressHandle::new(handle) - .add(bridge_idx, std::net::IpAddr::V4(bridge_addr), bridge_range) - .execute() - .await - .context(RtNetlinkSnafu)?; - - // switch to counter_ns - setns(counter_ns.cur.as_raw_fd(), CloneFlags::CLONE_NEWNET).context(SetNsSnafu)?; - - // get connection metadata in new net namespace - let (connection, handle, _) = - new_connection_with_socket::().context(IoSnafu)?; - async_spawn(connection); - - // set lo interface to up - let lo_idx = handle - .link() - .get() - .match_name("lo".to_string()) - .execute() - .try_next() - .await - .context(RtNetlinkSnafu)? - .ok_or(LossyNetworkError::InvalidConfig)? - .header - .index; - handle - .link() - .set(lo_idx) - .up() - .execute() - .await - .context(RtNetlinkSnafu)?; - - // set veth2 to up - let veth_2_idx = handle - .link() - .get() - .match_name(veth_2) - .execute() - .try_next() - .await - .context(RtNetlinkSnafu)? - .ok_or(LossyNetworkError::InvalidConfig)? - .header - .index; - handle - .link() - .set(veth_2_idx) - .up() - .execute() - .await - .context(RtNetlinkSnafu)?; - - // set veth2 address - let veth_2_addr = self - .veth2_addr - .parse::() - .context(AddrParseSnafu)?; - let veth_2_range = 16; - AddressHandle::new(handle.clone()) - .add(veth_2_idx, veth_2_addr, veth_2_range) - .execute() - .await - .context(RtNetlinkSnafu)?; - - // add route - let route = RouteHandle::new(handle).add(); - route - .v4() - .gateway(bridge_addr) - .execute() - .await - .context(RtNetlinkSnafu)?; - self.enable_firewall(eth_name).await?; - - Ok(()) - } - - /// Enables firewall rules to allow network bridge to function properly (e.g. no packets - /// dropped). Assumes firewall is via iptables - async fn enable_firewall(&self, eth_name: &str) -> Result<(), LossyNetworkError> { - // accept traffic on bridge - // iptables -A FORWARD -o ens5 -i br0 -j ACCEPT - info!( - "{:?}", - Command::new("iptables") - .args(["-A", "FORWARD", "-o", eth_name, "-i", "br0", "-j", "ACCEPT"]) - .output() - .context(IoSnafu)? - .status - ); - // iptables -A FORWARD -i ens5 -o br0 -j ACCEPT - info!( - "{:?}", - Command::new("iptables") - .args(["-A", "FORWARD", "-i", eth_name, "-o", "br0", "-j", "ACCEPT"]) - .output() - .context(IoSnafu)? - .status - ); - // NAT - // iptables -t nat -A POSTROUTING -s 172.20.0.1 -o ens5 -j MASQUERADE - info!( - "{:?}", - Command::new("iptables") - .args([ - "-t", - "nat", - "-A", - "POSTROUTING", - "-s", - &self.bridge_addr, - "-o", - "ens5", - "-j", - "MASQUERADE" - ]) - .output() - .context(IoSnafu)? - .status - ); - // iptables -t nat -A POSTROUTING -o br0 -j MASQUERADE - info!( - "{:?}", - Command::new("iptables") - .args([ - "-t", - "nat", - "-A", - "POSTROUTING", - "-o", - &self.bridge_name, - "-j", - "MASQUERADE" - ]) - .output() - .context(IoSnafu)? - .status - ); - // iptables -t nat -A POSTROUTING -s 172.20.0.0/16 -j MASQUERADE - info!( - "{:?}", - Command::new("iptables") - .args([ - "-t", - "nat", - "-A", - "POSTROUTING", - "-s", - &format!("{}/16", &self.bridge_addr), - "-j", - "MASQUERADE" - ]) - .output() - .context(IoSnafu)? - .status - ); - - Ok(()) - } - - /// tears down all created interfaces - /// deletes all iptables rules - /// deletes namespace - async fn undo_isolate_netlink(&self, eth_name: String) -> Result<(), LossyNetworkError> { - let root_ns_fd = File::open("/proc/1/ns/net").context(IoSnafu)?.into_raw_fd(); - setns(root_ns_fd, CloneFlags::CLONE_NEWNET).context(SetNsSnafu)?; - NetworkNamespace::del(self.counter_ns.to_string()) - .await - .context(RtNetlinkSnafu)?; - let (connection, handle, _) = - new_connection_with_socket::().context(IoSnafu)?; - async_spawn(connection); - del_link(handle, self.bridge_name.clone()).await?; - // delete creates iptables rules - self.undo_firewall(eth_name).await?; - Ok(()) - } - - /// deletes created iptables rules - async fn undo_firewall(&self, eth_name: String) -> Result<(), LossyNetworkError> { - // iptables -D FORWARD -o ens5 -i br0 -j ACCEPT - info!( - "{:?}", - Command::new("iptables") - .args(["-D", "FORWARD", "-o", ð_name, "-i", "br0", "-j", "ACCEPT"]) - .output() - .context(IoSnafu)? - .status - ); - // iptables -D FORWARD -i ens5 -o br0 -j ACCEPT - info!( - "{:?}", - Command::new("iptables") - .args(["-D", "FORWARD", "-i", ð_name, "-o", "br0", "-j", "ACCEPT"]) - .output() - .context(IoSnafu)? - .status - ); - // iptables -t nat -D POSTROUTING -s $bridge_addr -o ens5 -j MASQUERADE - info!( - "{:?}", - Command::new("iptables") - .args([ - "-t", - "nat", - "-D", - "POSTROUTING", - "-s", - &self.bridge_addr, - "-o", - "ens5", - "-j", - "MASQUERADE" - ]) - .output() - .context(IoSnafu)? - .status - ); - // iptables -t nat -D POSTROUTING -o br0 -j MASQUERADE - info!( - "{:?}", - Command::new("iptables") - .args([ - "-t", - "nat", - "-D", - "POSTROUTING", - "-o", - &self.bridge_name, - "-j", - "MASQUERADE" - ]) - .output() - .context(IoSnafu)? - .status - ); - // iptables -t nat -D POSTROUTING -s $bridge_addr/16 -j MASQUERADE - info!( - "{:?}", - Command::new("iptables") - .args([ - "-t", - "nat", - "-D", - "POSTROUTING", - "-s", - &format!("{}/16", self.bridge_addr), - "-j", - "MASQUERADE" - ]) - .output() - .context(IoSnafu)? - .status - ); - Ok(()) - } -} - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub))] -pub enum LossyNetworkError { - RtNetlink { source: rtnetlink::Error }, - Io { source: std::io::Error }, - SetNs { source: Errno }, - InvalidConfig, - Decode { source: DecodeError }, - AddrParse { source: AddrParseError }, -} diff --git a/libp2p-networking/examples/common/mod.rs b/libp2p-networking/examples/common/mod.rs deleted file mode 100644 index ff00f4044f..0000000000 --- a/libp2p-networking/examples/common/mod.rs +++ /dev/null @@ -1,806 +0,0 @@ -#[cfg(feature = "webui")] -pub mod web; - -#[cfg(all(feature = "lossy_network", target_os = "linux"))] -pub mod lossy_network; - -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -#[cfg(async_executor_impl = "async-std")] -use async_std::prelude::StreamExt; -#[cfg(async_executor_impl = "tokio")] -use tokio_stream::StreamExt; -#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] -compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} - -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - channel::oneshot, -}; -use clap::{Args, Parser}; -use libp2p::{multiaddr, request_response::ResponseChannel, Multiaddr}; -use libp2p_identity::PeerId; -use libp2p_networking::network::{ - deserialize_msg, network_node_handle_error::NodeConfigSnafu, spin_up_swarm, NetworkEvent, - NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeType, -}; -use rand::{ - distributions::Bernoulli, prelude::Distribution, seq::IteratorRandom, thread_rng, RngCore, -}; -use serde::{Deserialize, Serialize}; -use snafu::{ResultExt, Snafu}; -use std::{ - collections::{HashMap, HashSet}, - fmt::Debug, - str::FromStr, - sync::Arc, - time::{Duration, SystemTime}, -}; -use tracing::{debug, error, info, instrument, warn}; - -#[cfg(feature = "webui")] -use std::net::SocketAddr; - -// number of success responses we need in order -// to increment the round number. -const SUCCESS_NUMBER: usize = 15; - -/// probability numerator that recv-er node sends back timing stats -const SEND_NUMERATOR: u32 = 40; -/// probaiblity denominator that recv-er node sends back timing states -const SEND_DENOMINATOR: u32 = 100; - -/// the timeout before ending rounding -const TIMEOUT: Duration = Duration::from_secs(500); - -/// timeout before failing to broadcast -const BROADCAST_TIMEOUT: Duration = Duration::from_secs(10); - -// we want message size of 32kb -// so we pad with a randomly generated number -// in order to do this use: -// 8 bytes per u64 -// 32kb = 32000 bytes -// so 32000/8 usizes -const PADDING_SIZE: usize = 32000 / 8; - -pub type CounterState = Epoch; -pub type Epoch = u32; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum EpochType { - BroadcastViaGossip, - BroadcastViaDM, - DMViaDM, -} - -#[derive(Debug, Clone)] -pub struct ConductorState { - ready_set: HashSet, - current_epoch: EpochData, - previous_epochs: HashMap, -} - -#[derive(Debug, Clone)] -pub struct EpochData { - epoch_idx: Epoch, - epoch_type: EpochType, - node_states: HashMap, - message_durations: Vec, -} - -impl ConductorState { - /// returns time per data - pub fn aggregate_epochs(&self, num_nodes: usize) -> (Duration, usize) { - let tmp_entry = NormalMessage { - req: CounterRequest::StateRequest, - relay_to_conductor: false, - sent_ts: SystemTime::now(), - epoch: 0, - padding: vec![0; PADDING_SIZE], - }; - let data_size = std::mem::size_of_val(&tmp_entry.req) - + std::mem::size_of_val(&tmp_entry.relay_to_conductor) - + std::mem::size_of_val(&tmp_entry.sent_ts) - + std::mem::size_of_val(&tmp_entry.epoch) - + PADDING_SIZE * 8; - - let mut total_time = Duration::ZERO; - let mut total_data = 0; - for epoch_data in self.previous_epochs.values() { - if epoch_data.message_durations.iter().len() != num_nodes { - error!( - "didn't match! expected {} got {} ", - num_nodes, - epoch_data.message_durations.iter().len() - ); - } - if let Some(max_prop_time) = epoch_data.message_durations.iter().max() { - info!("data size is {}", data_size); - total_time += *max_prop_time; - total_data += data_size; - } else { - error!("No timing data available for this round!"); - } - } - (total_time, total_data) - } -} - -impl EpochData { - pub fn increment_epoch(&mut self) { - self.epoch_idx += 1; - } -} - -impl Default for ConductorState { - fn default() -> Self { - Self { - ready_set: Default::default(), - current_epoch: EpochData { - epoch_idx: 0, - epoch_type: EpochType::BroadcastViaGossip, - node_states: Default::default(), - message_durations: Default::default(), - }, - previous_epochs: Default::default(), - } - } -} - -impl ConductorState { - /// Increment conductor to the next epoch - pub fn complete_round(&mut self, next_epoch_type: EpochType) { - let current_epoch = self.current_epoch.clone(); - self.previous_epochs - .insert(current_epoch.epoch_idx, current_epoch); - self.current_epoch.epoch_type = next_epoch_type; - self.current_epoch.message_durations = Default::default(); - self.current_epoch.increment_epoch(); - } -} - -#[cfg(feature = "webui")] -impl web::WebInfo for ConductorState { - type Serialized = serde_json::Value; - - fn get_serializable(&self) -> Self::Serialized { - let mut map = serde_json::map::Map::new(); - for (peer, state) in self.current_epoch.node_states.iter() { - map.insert(peer.to_base58(), (*state).into()); - } - serde_json::Value::Object(map) - } -} - -#[cfg(feature = "webui")] -impl web::WebInfo for (CounterState, Option) { - type Serialized = (u32, Option); - fn get_serializable(&self) -> Self::Serialized { - *self - } -} - -/// Normal message. Sent amongst [`NetworkNodeType::Regular`] and [`NetworkNodeType::Bootstrap`] nodes -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub enum CounterRequest { - /// Request state - StateRequest, - /// Reply with state - StateResponse(CounterState), - /// kill node - Kill, -} - -/// Message sent between non-[`NetworkNodeType::Conductor`] nodes -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub struct NormalMessage { - /// timestamp when message was sent - sent_ts: SystemTime, - /// whether or not message shuld be relayed to conductor - relay_to_conductor: bool, - /// the underlying request the recv-ing node should take - req: CounterRequest, - /// the epoch the message was sent on - epoch: Epoch, - /// arbitrary amount of padding to vary message length - padding: Vec, -} - -/// A message sent and recv-ed by a ['NetworkNodeType::Regular'] or ['NetworkNodeType::Bootstrap'] node -/// that is to be relayed back to a [`NetworkNodeType::Conductor`] node -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub struct RelayedMessage { - /// peer - from_peer: PeerId, - /// time message took to propagate from sender to recv-er - duration: Duration, - /// the requeset being made - req: CounterRequest, - /// the epoch the request was made on - epoch: Epoch, -} - -/// A message sent and recv-ed by a ['NetworkNodeType::Regular'] or ['NetworkNodeType::Bootstrap'] node -/// that is to be relayed back to a [`NetworkNodeType::Conductor`] node -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub struct ConductorMessage { - /// the request the recv-ing node should make - req: CounterRequest, - state: Epoch, - /// the type of broadcast (direct or broadcast) - broadcast_type: ConductorMessageMethod, -} - -/// overall message -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub enum Message { - /// message to end from a peer to a peer - Normal(NormalMessage), - /// messaged recved and relayed to conductor - Relayed(RelayedMessage), - /// conductor requests that message is sent to node - /// that the node must send to other node(s) - Conductor(ConductorMessage), - // announce the conductor - ConductorIdIs(PeerId), - /// recv-ed the conductor id - RecvdConductor, - DummyRecv, -} - -impl NormalMessage { - /// convert a normal message into a message to relay to conductor - pub fn normal_to_relayed(&self, peer_id: PeerId) -> RelayedMessage { - let recv_ts = SystemTime::now(); - let elapsed_time = recv_ts - .duration_since(self.sent_ts) - .unwrap_or(Duration::MAX); - RelayedMessage { - from_peer: peer_id, - duration: elapsed_time, - req: self.req.clone(), - epoch: self.epoch, - } - } -} - -/// ways to send messages between nodes -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub enum ConductorMessageMethod { - /// broadcast message to all nodes - Broadcast, - /// direct message [`PeerId`] - DirectMessage(PeerId), -} - -/// handler for non-conductor nodes for normal messages -pub async fn handle_normal_msg( - handle: Arc)>>, - msg: NormalMessage, - // in case we need to reply to direct message - chan: Option>>, -) -> Result<(), NetworkNodeHandleError> { - debug!("node={} handling normal msg {:?}", handle.id(), msg); - // send reply logic - match msg.req { - // direct message only - CounterRequest::StateResponse(c) => { - handle - .modify_state(|s| { - debug!( - "node={} performing modify_state with c={c}, s={:?}", - handle.id(), - s - ); - if c >= s.0 { - s.0 = c - } - }) - .await; - if let Some(chan) = chan { - handle.direct_response(chan, &Message::DummyRecv).await?; - } - } - // only as a response - CounterRequest::StateRequest => { - if let Some(chan) = chan { - let state = handle.state().await; - let data = { - let mut rng = thread_rng(); - vec![rng.next_u64(); PADDING_SIZE] - }; - let response = Message::Normal(NormalMessage { - sent_ts: SystemTime::now(), - relay_to_conductor: true, - req: CounterRequest::StateResponse(state.0), - epoch: 0, - padding: data, - }); - handle.direct_response(chan, &response).await?; - } else { - error!("Error deserializing, channel closed!"); - } - } - CounterRequest::Kill => { - handle.shutdown().await?; - } - } - // relay the message to conductor - if msg.relay_to_conductor { - info!("Recv-ed message. Deciding if should relay to conductor."); - if let Some(conductor_id) = handle.state().await.1 { - // do a dice roll here to decide if we want to keep the thing - if Bernoulli::from_ratio(SEND_NUMERATOR, SEND_DENOMINATOR) - .unwrap() - .sample(&mut rand::thread_rng()) - { - info!("Deciding to relay to conductor"); - let relayed_msg = Message::Relayed(msg.normal_to_relayed(handle.peer_id())); - handle.direct_request(conductor_id, &relayed_msg).await?; - } - } else { - error!("We have a message to send to the conductor, but we do not know who the conductor is!"); - } - } - Ok(()) -} - -/// event handler for events from the swarm -/// - updates state based on events received -/// - replies to direct messages -#[instrument] -pub async fn regular_handle_network_event( - event: NetworkEvent, - handle: Arc)>>, -) -> Result<(), NetworkNodeHandleError> { - debug!("node={} handling event {:?}", handle.id(), event); - - use NetworkEvent::*; - match event { - IsBootstrapped => {} - GossipMsg(m, _) | DirectResponse(m, _) => { - if let Ok(msg) = deserialize_msg::(&m) { - info!("regular msg recved: {:?}", msg.clone()); - match msg { - Message::DummyRecv => { }, - Message::ConductorIdIs(peerid) => { - handle.modify_state(|s| { - s.1 = Some(peerid); - }).await; - } - Message::Normal(msg) => { - handle_normal_msg(handle.clone(), msg, None).await?; - } - // do nothing. We only expect to be reached out to by the conductor via - // direct message - Message::Conductor(..) /* only the conductor expects to receive a relayed message */ | Message::Relayed(..) => { } - // only sent to conductor node - Message::RecvdConductor => { - unreachable!(); - } - } - } else { - info!("FAILED TO PARSE GOSSIP OR DIRECT RESPONSE MESSAGE"); - } - } - DirectRequest(msg, _peer_id, chan) => { - if let Ok(msg) = deserialize_msg::(&msg) { - info!("from pid {:?} msg recved: {:?}", msg.clone(), _peer_id); - match msg { - Message::DummyRecv => { - handle.direct_response(chan, &Message::DummyRecv).await?; - } - // this is only done via broadcast - Message::ConductorIdIs(_) - // these are only sent to the conductor - | Message::Relayed(_) | Message::RecvdConductor => - { - handle.direct_response(chan, &Message::DummyRecv).await?; - } - Message::Normal(msg) => { - handle_normal_msg(handle.clone(), msg, Some(chan)).await?; - } - Message::Conductor(msg) => { - let data = { - let mut rng = thread_rng(); - vec![rng.next_u64(); PADDING_SIZE] - }; - let response = - Message::Normal(NormalMessage { - sent_ts: SystemTime::now(), - relay_to_conductor: true, - req: msg.req, - epoch: msg.state, - padding: data, - }); - match msg.broadcast_type { - // if the conductor says to broadcast - // perform broadcast with gossip protocol - ConductorMessageMethod::Broadcast => { - handle.gossip("global".to_string(), &response).await?; - } - ConductorMessageMethod::DirectMessage(pid) => { - handle.direct_request( - pid, - &response - ).await?; - } - } - handle.direct_response(chan, &Message::DummyRecv).await?; - } - } - } else { - } - } - } - Ok(()) -} - -/// convert node string into multi addr -pub fn parse_node(s: &str) -> Result { - let mut i = s.split(':'); - let ip = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; - let port = i.next().ok_or(multiaddr::Error::InvalidMultiaddr)?; - Multiaddr::from_str(&format!("/ip4/{ip}/udp/{port}/quic-v1")) -} - -#[cfg(feature = "webui")] -/// This will be flattened into CliOpt -#[derive(Args, Debug)] -pub struct WebUi { - /// Doc comment - #[arg(long = "webui")] - pub webui_addr: Option, -} - -#[cfg(not(feature = "webui"))] -/// This will be flattened into CliOpt -#[derive(Args, Debug)] -pub struct WebUi {} - -#[cfg(all(feature = "lossy_network", target_os = "linux"))] -/// This will be flattened into CliOpt -#[derive(Args, Debug)] -pub struct EnvType { - /// Doc comment - #[arg(long = "env")] - pub env_type: ExecutionEnvironment, -} -#[cfg(not(all(feature = "lossy_network", target_os = "linux")))] -/// This will be flattened into CliOpt -#[derive(Args, Debug)] -pub struct EnvType {} - -#[derive(Parser, Debug)] -pub struct CliOpt { - /// list of bootstrap node addrs - #[arg(long, value_parser = parse_node)] - pub to_connect_addrs: Vec, - /// total number of nodes - #[arg(long)] - pub num_nodes: usize, - /// the role this node plays - #[arg(long)] - pub node_type: NetworkNodeType, - /// internal interface to bind to - #[arg(long, value_parser = parse_node)] - pub bound_addr: Multiaddr, - /// If this value is set, a webserver will be spawned on this address with debug info - #[arg(long, value_parser = parse_node)] - pub conductor_addr: Multiaddr, - - #[command(flatten)] - pub webui_delegate: WebUi, - - #[command(flatten)] - pub env_type_delegate: EnvType, - - /// number of rounds of gossip - #[arg(long)] - pub num_gossip: u32, -} - -/// The execution environemnt type -#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)] -#[cfg(all(feature = "lossy_network", target_os = "linux"))] -pub enum ExecutionEnvironment { - /// execution environment is within docker - Docker, - /// execution environment is on metal - Metal, -} - -#[cfg(all(feature = "lossy_network", target_os = "linux"))] -impl FromStr for ExecutionEnvironment { - type Err = String; - - fn from_str(input: &str) -> Result { - match input { - "Docker" => Ok(ExecutionEnvironment::Docker), - "Metal" => Ok(ExecutionEnvironment::Metal), - _ => Err( - "Couldn't parse execution environment. Must be one of Metal, Docker".to_string(), - ), - } - } -} - -/// ['bootstrap_addrs`] list of bootstrap multiaddrs. Needed to bootstrap into network -/// [`num_nodes`] total number of nodes. Needed to create pruning rules -/// [`node_type`] the type of this node -/// ['bound_addr`] the address to bind to -pub async fn start_main(opts: CliOpt) -> Result<(), CounterError> { - setup_logging(); - setup_backtrace(); - let bootstrap_nodes = opts - .to_connect_addrs - .iter() - .cloned() - .map(|a| (None, a)) - .collect::>(); - - match opts.node_type { - NetworkNodeType::Conductor => { - let config = NetworkNodeConfigBuilder::default() - .bound_addr(Some(opts.bound_addr)) - .node_type(NetworkNodeType::Conductor) - .build() - .context(NodeConfigSnafu) - .context(HandleSnafu)?; - let handle = Arc::new( - NetworkNodeHandle::::new(config.clone(), 0) - .await - .context(HandleSnafu)?, - ); - - #[cfg(feature = "webui")] - if let Some(addr) = opts.webui_delegate.webui_addr { - web::spawn_server(Arc::clone(&handle), addr); - } - - spin_up_swarm(TIMEOUT, bootstrap_nodes, config, 0, &handle) - .await - .context(HandleSnafu)?; - info!("spun up!"); - - let handler_fut = handle.spawn_handler(conductor_handle_network_event).await; - info!("spawned handler"); - - handle.notify_webui().await; - - let conductor_peerid = handle.peer_id(); - - let (s, _r) = oneshot::(); - - async_spawn({ - let handle = handle.clone(); - // the "conductor id" - // periodically say "ignore me!" - async move { - loop { - // must wait for the listener to start - let msg = Message::ConductorIdIs(conductor_peerid); - if let Err(e) = handle - .gossip("global".to_string(), &msg) - .await - .context(HandleSnafu) - { - error!("Error {:?} gossiping the conductor ID to cluster.", e); - } - async_sleep(Duration::from_secs(1)).await; - } - } - }); - - // For now, just do a sleep waiting for nodes to spin up. It's easier. - async_sleep(Duration::from_secs(10)).await; - - // kill conductor id broadcast thread - s.send(true); - - for i in 0..opts.num_gossip { - info!("iteration i: {}", i); - handle - .modify_state(|s| s.current_epoch.epoch_type = EpochType::BroadcastViaGossip) - .await; - conductor_broadcast(BROADCAST_TIMEOUT, handle.clone()) - .await - .context(HandleSnafu)?; - handle - .modify_state(|s| s.complete_round(EpochType::BroadcastViaGossip)) - .await; - } - handler_fut.await; - - #[cfg(feature = "benchmark-output")] - { - trace!("result raw: {:?}", handle.state().await); - trace!( - "result: {:?}", - handle.state().await.aggregate_epochs(opts.num_nodes) - ); - } - } - // regular and bootstrap nodes - NetworkNodeType::Regular | NetworkNodeType::Bootstrap => { - let config = NetworkNodeConfigBuilder::default() - .bound_addr(Some(opts.bound_addr)) - .node_type(opts.node_type) - .build() - .context(NodeConfigSnafu) - .context(HandleSnafu)?; - - let node = NetworkNodeHandle::<(CounterState, Option)>::new(config.clone(), 0) - .await - .context(HandleSnafu)?; - - let handle = Arc::new(node); - #[cfg(feature = "webui")] - if let Some(addr) = opts.webui_delegate.webui_addr { - web::spawn_server(Arc::clone(&handle), addr); - } - - spin_up_swarm(TIMEOUT, bootstrap_nodes, config, 0, &handle) - .await - .context(HandleSnafu)?; - let handler_fut = handle.spawn_handler(regular_handle_network_event).await; - handler_fut.await; - // while !handle.is_killed().await { - // async_sleep(Duration::from_millis(100)).await; - // } - } - } - - Ok(()) -} - -pub async fn conductor_broadcast( - timeout: Duration, - handle: Arc>, -) -> Result<(), NetworkNodeHandleError> { - let new_state = handle.state().await.current_epoch.epoch_idx; - // nOTE it's probably easier to pass in a hard coded list of PIDs - // from test.py orchestration - let mut connected_peers = handle.connected_pids().await.unwrap(); - connected_peers.remove(&handle.peer_id()); - - let chosen_peer = *connected_peers.iter().choose(&mut thread_rng()).unwrap(); - - let request = CounterRequest::StateResponse(new_state); - - // tell the "leader" to do a "broadcast" message using gosisp protocol - let msg = Message::Conductor(ConductorMessage { - state: new_state, - req: request.clone(), - broadcast_type: ConductorMessageMethod::Broadcast, - }); - - let mut res_fut = Box::pin( - handle.state_wait_timeout_until_with_trigger(timeout, |state| { - state - .current_epoch - .node_states - .iter() - .filter(|(_, &s)| s >= new_state) - .count() - >= SUCCESS_NUMBER - }), - ); - - // wait for ready signal - res_fut.next().await.unwrap().unwrap(); - - // send direct message from conductor to leader to do broadcast - handle - .direct_request(chosen_peer, &msg) - .await - .context(HandleSnafu) - .unwrap(); - - if res_fut.next().await.unwrap().is_err() { - error!( - "TIMED OUT with {} msgs recv-ed", - handle.state().await.current_epoch.message_durations.len() - ); - } - - Ok(()) -} - -/// network event handler for conductor -#[instrument] -pub async fn conductor_handle_network_event( - event: NetworkEvent, - handle: Arc>, -) -> Result<(), NetworkNodeHandleError> { - use NetworkEvent::*; - match event { - IsBootstrapped => {} - GossipMsg(_m, _t) => { - // this node isn't going to participate in gossip/dms to update state - // it's only purpose is to recv relayed messages - } - DirectRequest(m, peer_id, chan) => { - info!("recv: {:?}", m); - async_spawn({ - let handle = handle.clone(); - async move { - handle.direct_response(chan, &Message::DummyRecv).await?; - Result::<(), NetworkNodeHandleError>::Ok(()) - } - }); - info!("finished spawning now deserializing"); - if let Ok(msg) = deserialize_msg::(&m) { - info!("desrialized MESSAGE IS {:?}", msg); - match msg { - Message::Relayed(msg) => { - match handle.state().await.current_epoch.epoch_type { - EpochType::BroadcastViaGossip => { - if let CounterRequest::StateResponse(..) = msg.req { - handle - .modify_state(|s| { - if msg.epoch >= s.current_epoch.epoch_idx { - s.current_epoch.message_durations.push(msg.duration) - } - - if msg.epoch > s.current_epoch.epoch_idx { - warn!("listening on epcoh {:?} but recv message on epoch {:?}", s.current_epoch.epoch_idx, msg.epoch); - } - - }) - .await; - let _ = handle.prune_peer(msg.from_peer).await; - } - } - EpochType::DMViaDM => { - info!("modifying state DM VIA DM {:?}", msg); - // NOTE maybe should check epoch - if let CounterRequest::StateRequest = msg.req { - handle - .modify_state(|s| { - s.current_epoch.message_durations.push(msg.duration); - }) - .await; - } - } - EpochType::BroadcastViaDM => { - unimplemented!("BroadcastViaDM is currently unimplemented"); - } - } - if let CounterRequest::StateResponse(state) = msg.req { - handle - .modify_state(|s| { - s.current_epoch.node_states.insert(peer_id, state); - }) - .await; - } - } - Message::RecvdConductor => { - handle - .modify_state(|s| { - s.ready_set.insert(peer_id); - }) - .await; - } - msg => { - info!("Unexpected message {:?}", msg); - - /* Do nothing. Conductor doesn't care about these messages. */ - } - } - } else { - error!("failed to deserialize msg"); - } - } - DirectResponse(_m, _peer_id) => { /* nothing to do here */ } - } - Ok(()) -} - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub))] -pub enum CounterError { - Handle { source: NetworkNodeHandleError }, - FileRead { source: std::io::Error }, - MissingBootstrap, -} diff --git a/libp2p-networking/examples/common/web.rs b/libp2p-networking/examples/common/web.rs deleted file mode 100644 index bbb635de11..0000000000 --- a/libp2p-networking/examples/common/web.rs +++ /dev/null @@ -1,114 +0,0 @@ -use async_compatibility_layer::art::async_spawn; -use libp2p_networking::network::NetworkNodeHandle; -use std::{net::SocketAddr, sync::Arc}; -use tracing::{debug, error, info}; - -/// Spawn a web server on the given `addr`. -/// This web server will host the static HTML page `/web/index.html` and expose a `sse` endpoint. -/// This `sse` endpoint will send status updates to the connected clients whenever `NetworkNodeHandle::state_changed` triggers. -/// -/// # Links -/// - SSE on wikipedia: -/// - SEE in `tide`: -pub fn spawn_server(state: Arc>, addr: SocketAddr) -where - S: WebInfo + Send + 'static + Clone, -{ - let mut tide = tide::with_state(state); - // Unwrap this in the calling thread so that if it fails we fail completely - // instead of not knowing why the web UI does not work - tide.at("/").get(|_| async move { - Ok(tide::Response::builder(200) - .content_type(tide::http::mime::HTML) - .body(include_str!("../../web/index.html")) - .build()) - }); - tide.at("/sse").get(tide::sse::endpoint( - |req: tide::Request>>, sender| async move { - let peer_addr = req.peer_addr(); - debug!(?peer_addr, "Web client connected, sending initial state"); - - let state = Arc::clone(req.state()); - network_state::State::new(&state) - .await - .send(&sender) - .await?; - - // Register a `Sender<()>` with the `NetworkNodeHandle` so we get notified when it changes - let mut receiver = state.register_webui_listener().await; - - while let Ok(()) = receiver.recv().await { - // TODO: I think this will not work as this `.lock` will conflict with the other lock, but we'll see - if let Err(e) = network_state::State::new(&state).await.send(&sender).await { - debug!(?peer_addr, ?e, "Could not send to client, aborting"); - break; - } - } - Ok(()) - }, - )); - async_spawn(async move { - info!(?addr, "Web UI listening on"); - if let Err(e) = tide.listen(addr).await { - error!(?e, "Web UI crashed, this is a bug"); - } - }); -} - -mod network_state { - - use libp2p_identity::PeerId; - use libp2p_networking::network::{NetworkNodeConfig, NetworkNodeHandle}; - - #[derive(serde::Serialize)] - pub struct State { - pub network_config: NetworkConfig, - pub state: S, - } - - #[derive(serde::Serialize)] - pub struct NetworkConfig { - pub node_type: String, - pub identity: String, - } - - #[derive(serde::Serialize)] - pub struct ConnectionState { - pub connected_peers: Vec, - pub connecting_peers: Vec, - pub known_peers: Vec, - } - - impl State { - pub async fn new(handle: &NetworkNodeHandle) -> Self - where - W: super::WebInfo + Send + 'static, - { - Self { - network_config: NetworkConfig::new(handle.peer_id(), handle.config()), - state: handle.state().await.get_serializable(), - } - } - pub async fn send(self, sender: &tide::sse::Sender) -> std::io::Result<()> { - let str = serde_json::to_string(&self).unwrap(); // serializing JSON should never fail - sender.send("node_state", &str, None).await - } - } - impl NetworkConfig { - fn new(identity: PeerId, c: &NetworkNodeConfig) -> Self { - Self { - node_type: format!("{:?}", c.node_type), - identity: identity.to_string(), - } - } - } -} - -/// Trait to unify the info that can be send to the web interface. -/// -/// This has to be implemented for all `S` in `NetworkNodeHandle`, e.g. `CounterState`, `ConductorState`, etc. -pub trait WebInfo: Sync + Send { - type Serialized: serde::Serialize + Send; - - fn get_serializable(&self) -> Self::Serialized; -} diff --git a/libp2p-networking/examples/counter.rs b/libp2p-networking/examples/counter.rs deleted file mode 100644 index 54a27b24dd..0000000000 --- a/libp2p-networking/examples/counter.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! This is dead code, do not use -// pub mod common; -// -// use async_compatibility_layer::art::async_main; -// use clap::Parser; -// use color_eyre::eyre::Result; -// #[cfg(all(feature = "lossy_network", target_os = "linux"))] -// use common::{ -// lossy_network::{IsolationConfig, LossyNetworkBuilder}, -// ExecutionEnvironment, -// }; -// use common::{start_main, CliOpt}; -// use tracing::instrument; -// -// #[async_main] -// #[instrument] -// async fn main() -> Result<()> { -// fn main() -> Result<(), ()> { -// let args = CliOpt::parse(); -// -// #[cfg(all(feature = "lossy_network", target_os = "linux"))] -// let network = { -// use crate::common::lossy_network::LOSSY_QDISC; -// let mut builder = LossyNetworkBuilder::default(); -// builder -// .env_type(args.env_type_delegate.env_type) -// .netem_config(LOSSY_QDISC); -// match args.env_type_delegate.env_type { -// ExecutionEnvironment::Docker => { -// builder.eth_name("eth0".to_string()).isolation_config(None) -// } -// ExecutionEnvironment::Metal => builder -// .eth_name("ens5".to_string()) -// .isolation_config(Some(IsolationConfig::default())), -// }; -// builder.build() -// }?; -// -// #[cfg(all(feature = "lossy_network", target_os = "linux"))] -// { -// network.isolate().await?; -// network.create_qdisc().await?; -// } -// -// start_main(args).await?; -// -// #[cfg(all(feature = "lossy_network", target_os = "linux"))] -// { -// // implicitly deletes qdisc in the case of metal run -// // leaves qdisc alive in docker run with expectation docker does cleanup -// network.undo_isolate().await?; -// } -// -// Ok(()) -// } -// -/// dead code -fn main() {} diff --git a/libp2p-networking/src/lib.rs b/libp2p-networking/src/lib.rs index c5b2546c4d..0bc4daf573 100644 --- a/libp2p-networking/src/lib.rs +++ b/libp2p-networking/src/lib.rs @@ -1,8 +1,5 @@ //! Library for p2p communication -/// Example message used by the UI library -pub mod message; - /// Network logic pub mod network; diff --git a/libp2p-networking/src/message.rs b/libp2p-networking/src/message.rs deleted file mode 100644 index e7382d0f03..0000000000 --- a/libp2p-networking/src/message.rs +++ /dev/null @@ -1,12 +0,0 @@ -use serde::{Deserialize, Serialize}; - -/// example message that may be sent to the swarm. Used in the UI -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub struct Message { - /// the peerid of the sender - pub sender: String, - /// the content of the message - pub content: String, - /// the topic associated with the msg - pub topic: String, -} diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 54fe4dd13a..0cbdbba462 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -108,7 +108,6 @@ impl DHTBehaviour { mut kadem: KademliaBehaviour, pid: PeerId, replication_factor: NonZeroUsize, - _: Option, ) -> Self { // needed because otherwise we stay in client mode when testing locally // and don't publish keys stuff diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index fd543f4348..fc0e645942 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -71,8 +71,6 @@ impl DMBehaviour { "outbound failure to send message to {:?} with error {:?}", peer, error ); - // RM TODO: make direct messages have n (and not infinite) retries - // issue: https://github.com/EspressoSystems/HotShot/issues/2003 if let Some(mut req) = self.in_progress_rr.remove(&request_id) { req.backoff.start_next(false); self.failed_rr.push_back(req); diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index ac8bcc8500..38614a855b 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -34,8 +34,8 @@ use libp2p::{ use libp2p_identity::PeerId; use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; -use std::{collections::HashSet, fmt::Debug, str::FromStr, sync::Arc, time::Duration}; -use tracing::{info, instrument}; +use std::{collections::HashSet, fmt::Debug, str::FromStr, sync::Arc}; +use tracing::instrument; #[cfg(async_executor_impl = "async-std")] use libp2p::dns::async_std::Transport as DnsTransport; @@ -229,23 +229,6 @@ pub async fn gen_transport(identity: Keypair) -> Result( - timeout_len: Duration, - known_nodes: Vec<(Option, Multiaddr)>, - config: NetworkNodeConfig, - idx: usize, - handle: &Arc>, -) -> Result<(), NetworkNodeHandleError> { - info!("known_nodes{:?}", known_nodes); - handle.add_known_peers(known_nodes).await?; - handle.subscribe("global".to_string()).await?; - - Ok(()) -} - /// Given a slice of handles assumed to be larger than 0, /// chooses one /// # Panics diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 75335e5e2d..6e6235fcd6 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -286,7 +286,6 @@ impl NetworkNode { config .replication_factor .unwrap_or_else(|| NonZeroUsize::new(4).unwrap()), - config.dht_cache_location.clone(), ), identify, DMBehaviour::new(request_response), diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index d97097e8e1..4d19b6f516 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -24,10 +24,6 @@ pub struct NetworkNodeConfig { #[builder(setter(into, strip_option), default = "DEFAULT_REPLICATION_FACTOR")] pub replication_factor: Option, - /// location of the dht cache, default is None - #[builder(default = "None")] - pub dht_cache_location: Option, - #[builder(default)] /// parameters for gossipsub mesh network pub mesh_params: Option, diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 8790f20c7a..64f45e457a 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -6,8 +6,8 @@ use async_compatibility_layer::{ art::{async_sleep, async_spawn, async_timeout, future::to, stream}, async_primitives::subscribable_mutex::SubscribableMutex, channel::{ - bounded, oneshot, OneShotReceiver, OneShotSender, Receiver, SendError, Sender, - UnboundedReceiver, UnboundedRecvError, UnboundedSender, + oneshot, OneShotReceiver, OneShotSender, SendError, UnboundedReceiver, UnboundedRecvError, + UnboundedSender, }, }; use async_lock::Mutex; @@ -52,9 +52,6 @@ pub struct NetworkNodeHandle { /// human readable id id: usize, - /// A list of webui listeners that are listening for changes on this node - webui_listeners: Arc>>>, - /// network node receiver receiver: NetworkNodeReceiver, } @@ -124,7 +121,6 @@ impl NetworkNodeHandle { listen_addr, peer_id, id, - webui_listeners: Arc::default(), receiver: NetworkNodeReceiver { kill_switch, killed: AtomicBool::new(false), @@ -141,9 +137,6 @@ impl NetworkNodeHandle { /// /// Will panic if a handler is already spawned #[allow(clippy::unused_async)] - // // Tokio and async_std disagree how this function should be linted - // #[allow(clippy::ignored_unit_patterns)] - pub async fn spawn_handler(self: &Arc, cb: F) -> impl Future where F: Fn(NetworkEvent, Arc>) -> RET + Sync + Send + 'static, @@ -396,26 +389,6 @@ impl NetworkNodeHandle { } } - /// Notify the webui that either the `state` or `connection_state` has changed. - /// - /// If the webui is not started, this will do nothing. - /// # Errors - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - pub async fn notify_webui(&self) { - let mut lock = self.webui_listeners.lock().await; - // Keep a list of indexes that are unable to send the update - let mut indexes_to_remove = Vec::new(); - for (idx, sender) in lock.iter().enumerate() { - if sender.send(()).await.is_err() { - indexes_to_remove.push(idx); - } - } - // Make sure to remove the indexes in reverse other, else removing an index will invalidate the following indexes. - for idx in indexes_to_remove.into_iter().rev() { - lock.remove(idx); - } - } - /// Subscribe to a topic /// # Errors /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed @@ -608,14 +581,6 @@ impl NetworkNodeHandle { self.receiver.killed.load(Ordering::Relaxed) } - /// Register a webui listener - pub async fn register_webui_listener(&self) -> Receiver<()> { - let (sender, receiver) = bounded(100); - let mut lock = self.webui_listeners.lock().await; - lock.push(sender); - receiver - } - /// Call `wait_timeout_until` on the state's [`SubscribableMutex`] /// # Errors /// Will throw a [`NetworkNodeHandleError::TimeoutError`] error upon timeout From 3a7e5148a8c492fea70cfe4eb7a678a75f07d6f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Feb 2024 10:09:15 -0500 Subject: [PATCH 0811/1393] Bump local-ip-address from 0.6.0 to 0.6.1 (#2657) Bumps [local-ip-address](https://github.com/EstebanBorai/local-ip-address) from 0.6.0 to 0.6.1. - [Release notes](https://github.com/EstebanBorai/local-ip-address/releases) - [Changelog](https://github.com/EstebanBorai/local-ip-address/blob/main/CHANGELOG.md) - [Commits](https://github.com/EstebanBorai/local-ip-address/commits) --- updated-dependencies: - dependency-name: local-ip-address dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/Cargo.toml | 2 +- hotshot/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b2de976254..675d45a754 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -123,7 +123,7 @@ async-std = { workspace = true } clap = { version = "4.5", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } -local-ip-address = "0.6.0" +local-ip-address = "0.6.1" [lints] workspace = true diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 315b8371dd..f361bfab69 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -62,7 +62,7 @@ async-std = { workspace = true } clap = { version = "4.5", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } -local-ip-address = "0.6.0" +local-ip-address = "0.6.1" [lints] workspace = true From 5f15fb8b1a25dbf52d723b236989a88156ef99a0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Feb 2024 13:48:02 -0500 Subject: [PATCH 0812/1393] Bump lru from 0.12.2 to 0.12.3 (#2658) Bumps [lru](https://github.com/jeromefroe/lru-rs) from 0.12.2 to 0.12.3. - [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/jeromefroe/lru-rs/compare/0.12.2...0.12.3) --- updated-dependencies: - dependency-name: lru dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/Cargo.toml | 2 +- hotshot/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 675d45a754..8ed876d41d 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -107,7 +107,7 @@ surf-disco = { workspace = true } time = { workspace = true } derive_more = "0.99.17" portpicker = "0.1.1" -lru = "0.12.2" +lru = "0.12.3" hotshot-task = { path = "../task" } hotshot = { path = "../hotshot" } hotshot-example-types = { path = "../example-types" } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index f361bfab69..666c3e1be2 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -48,7 +48,7 @@ surf-disco = { workspace = true } time = { workspace = true } derive_more = "0.99.17" portpicker = "0.1.1" -lru = "0.12.2" +lru = "0.12.3" hotshot-task = { path = "../task" } tracing = { workspace = true } From 293e9ff26ad84cc76d3b5333f2a4bf07517f4ec1 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 26 Feb 2024 14:18:36 -0800 Subject: [PATCH 0813/1393] [KEY MANAGEMENT] Add state pub keys to HotshotConfig (#2656) * add state pub keys to hotshotconfig * lint * refactor config getter for orchestrator * clean up --- examples/infra/mod.rs | 38 +++++------ .../src/traits/election/static_committee.rs | 11 +++- orchestrator/api.toml | 8 +++ orchestrator/src/client.rs | 30 ++++++--- orchestrator/src/config.rs | 63 ++++++++++++++++--- orchestrator/src/lib.rs | 36 +++++++++-- testing/src/test_builder.rs | 7 +-- types/src/lib.rs | 60 +++++++++++++++++- types/src/light_client.rs | 2 +- types/src/traits/election.rs | 4 +- 10 files changed, 202 insertions(+), 57 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 944456f917..01ebf512f0 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -26,6 +26,7 @@ use hotshot_orchestrator::{ }; use hotshot_types::message::Message; use hotshot_types::traits::network::ConnectedNetwork; +use hotshot_types::PeerConfig; use hotshot_types::ValidatorConfig; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -111,14 +112,9 @@ pub fn load_config_from_file( // but its type is too complex to load so we'll generate it from seed now config.config.my_own_validator_config = ValidatorConfig::generated_from_seed_indexed(config.seed, config.node_index, 1); - let my_own_validator_config_with_stake = config - .config - .my_own_validator_config - .public_key - .get_stake_table_entry(1u64); - // initialize it with size for better assignment of other peers' config + // initialize it with size for better assignment of peers' config config.config.known_nodes_with_stake = - vec![my_own_validator_config_with_stake; config.config.total_nodes.get() as usize]; + vec![PeerConfig::default(); config.config.total_nodes.get() as usize]; config } @@ -768,27 +764,25 @@ pub async fn main_entry_point< let orchestrator_client: OrchestratorClient = OrchestratorClient::new(args.clone(), public_ip.to_string()); + // We assume one node will not call this twice to generate two validator_config-s with same identity. + let my_own_validator_config = NetworkConfig::::generate_init_validator_config( + &orchestrator_client, + ).await; + // conditionally save/load config from file or orchestrator - let (mut run_config, source) = - NetworkConfig::::from_file_or_orchestrator( + // This is a function that will return correct complete config from orchestrator. + // It takes in a valid args.network_config_file when loading from file, or valid validator_config when loading from orchestrator, the invalid one will be ignored. + // It returns the complete config which also includes peer's public key and public config. + // This function will be taken solely by sequencer right after OrchestratorClient::new, + // which means the previous `generate_validator_config_when_init` will not be taken by sequencer, it's only for key pair generation for testing in hotshot. + let (run_config, source) = + NetworkConfig::::get_complete_config( &orchestrator_client, + my_own_validator_config, args.clone().network_config_file, ) .await; - let node_index = run_config.node_index; - error!("Retrieved config; our node index is {node_index}"); - - // one more round of orchestrator here to get peer's public key/config - let updated_config: NetworkConfig = - orchestrator_client - .post_and_wait_all_public_keys::( - run_config.node_index, - run_config.config.my_own_validator_config.public_key.clone(), - ) - .await; - run_config.config.known_nodes_with_stake = updated_config.config.known_nodes_with_stake; - error!("Initializing networking"); let run = RUNDA::initialize_networking(run_config.clone()).await; let hotshot = run.initialize_state_and_hotshot().await; diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index b14bf80ec7..2cf01a36f5 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -5,6 +5,7 @@ use hotshot_types::traits::{ node_implementation::NodeType, signature_key::SignatureKey, }; +use hotshot_types::PeerConfig; #[allow(deprecated)] use serde::{Deserialize, Serialize}; use std::{marker::PhantomData, num::NonZeroU64}; @@ -99,14 +100,18 @@ where } fn create_election( - keys_qc: Vec, + entries: Vec>, config: TYPES::ElectionConfigType, ) -> Self { - let mut committee_nodes_with_stake = keys_qc.clone(); + let nodes_with_stake: Vec = entries + .iter() + .map(|x| x.stake_table_entry.clone()) + .collect(); + let mut committee_nodes_with_stake: Vec = nodes_with_stake.clone(); debug!("Election Membership Size: {}", config.num_nodes); committee_nodes_with_stake.truncate(config.num_nodes.try_into().unwrap()); Self { - nodes_with_stake: keys_qc, + nodes_with_stake, committee_nodes_with_stake, _type_phantom: PhantomData, } diff --git a/orchestrator/api.toml b/orchestrator/api.toml index 3b03e64764..4258b0d71d 100644 --- a/orchestrator/api.toml +++ b/orchestrator/api.toml @@ -23,6 +23,14 @@ This must be a POST request so we can update the OrchestratorState in the server received from the 'identity' endpoint """ +# GET the latest temporary node index only for generating validator's key pair +[route.tmp_node_index] +PATH = ["tmp_node_index"] +METHOD = "POST" +DOC = """ +Get the latest temporary node index only for generating validator's key pair for testing in hotshot, later the generated key pairs might be bound with other node_index. +""" + # POST the node's node index to generate public key for pubkey collection [route.postpubkey] PATH = ["pubkey/:node_index"] diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 102c138b30..a429a3174e 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -7,11 +7,10 @@ use futures::{Future, FutureExt}; use hotshot_types::{ traits::{election::ElectionConfig, signature_key::SignatureKey}, - ValidatorConfig, + PeerConfig, }; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; - /// Holds the client connection to the orchestrator pub struct OrchestratorClient { /// the client @@ -102,13 +101,13 @@ impl OrchestratorClient { } /// Sends an identify message to the orchestrator and attempts to get its config - /// Returns both the `node_index` and the run configuration from the orchestrator + /// Returns both the `node_index` and the run configuration without peer's public config from the orchestrator /// Will block until both are returned /// # Panics /// if unable to convert the node index from usize into u64 /// (only applicable on 32 bit systems) #[allow(clippy::type_complexity)] - pub async fn get_config( + pub async fn get_config_without_peer( &self, identity: String, ) -> NetworkConfig { @@ -140,13 +139,26 @@ impl OrchestratorClient { let mut config = self.wait_for_fn_from_orchestrator(f).await; config.node_index = From::::from(node_index); - // The orchestrator will generate keys for validator if it doesn't load keys from file - config.config.my_own_validator_config = - ValidatorConfig::::generated_from_seed_indexed(config.seed, config.node_index, 1); config } + /// Post to the orchestrator and get the latest `node_index` + /// Then return it for the init validator config + /// # Panics + /// if unable to post + pub async fn get_node_index_for_init_validator_config(&self) -> u16 { + let cur_node_index = |client: Client| { + async move { + let cur_node_index: Result = + client.post("api/tmp_node_index").send().await; + cur_node_index + } + .boxed() + }; + self.wait_for_fn_from_orchestrator(cur_node_index).await + } + /// Sends my public key to the orchestrator so that it can collect all public keys /// And get the updated config /// Blocks until the orchestrator collects all peer's public keys/configs @@ -155,13 +167,13 @@ impl OrchestratorClient { pub async fn post_and_wait_all_public_keys( &self, node_index: u64, - my_pub_key: K, + my_pub_key: PeerConfig, ) -> NetworkConfig { // send my public key let _send_pubkey_ready_f: Result<(), ClientError> = self .client .post(&format!("api/pubkey/{node_index}")) - .body_binary(&my_pub_key.to_bytes()) + .body_binary(&PeerConfig::::to_bytes(&my_pub_key)) //&my_pub_key.stake_table_entry.get_public_key().to_bytes() .unwrap() .send() .await; diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index bdd3ddb84b..93c204d013 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,6 +1,6 @@ use hotshot_types::{ traits::{election::ElectionConfig, signature_key::SignatureKey}, - ExecutionType, HotShotConfig, ValidatorConfig, + ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, }; use serde_inline_default::serde_inline_default; use std::{ @@ -208,14 +208,16 @@ impl NetworkConfig { // fallback to orchestrator error!("{e}, falling back to orchestrator"); - let config = client.get_config(client.identity.clone()).await; + let config = client + .get_config_without_peer(client.identity.clone()) + .await; // save to file if we fell back if let Err(e) = config.to_file(file) { error!("{e}"); }; - (config, NetworkConfigSource::Orchestrator) + (config, NetworkConfigSource::File) } } } else { @@ -223,12 +225,58 @@ impl NetworkConfig { // otherwise just get from orchestrator ( - client.get_config(client.identity.clone()).await, + client + .get_config_without_peer(client.identity.clone()) + .await, NetworkConfigSource::Orchestrator, ) } } + /// Get a temporary node index for generating a validator config + pub async fn generate_init_validator_config(client: &OrchestratorClient) -> ValidatorConfig { + // This cur_node_index is only used for key pair generation, it's not bound with the node, + // lather the node with the generated key pair will get a new node_index from orchestrator. + let cur_node_index = client.get_node_index_for_init_validator_config().await; + ValidatorConfig::generated_from_seed_indexed([0u8; 32], cur_node_index.into(), 1) + } + + /// Asynchronously retrieves a `NetworkConfig` from an orchestrator. + /// The retrieved one includes correct `node_index` and peer's public config. + pub async fn get_complete_config( + client: &OrchestratorClient, + my_own_validator_config: ValidatorConfig, + file: Option, + ) -> (NetworkConfig, NetworkConfigSource) { + let (mut run_config, source) = Self::from_file_or_orchestrator(client, file).await; + let node_index = run_config.node_index; + + // Assign my_own_validator_config to the run_config if not loading from file + match source { + NetworkConfigSource::Orchestrator => { + run_config.config.my_own_validator_config = my_own_validator_config; + } + NetworkConfigSource::File => { + // do nothing, my_own_validator_config has already been loaded from file + } + } + + // one more round of orchestrator here to get peer's public key/config + let updated_config: NetworkConfig = client + .post_and_wait_all_public_keys::( + run_config.node_index, + run_config + .config + .my_own_validator_config + .get_public_config(), + ) + .await; + run_config.config.known_nodes_with_stake = updated_config.config.known_nodes_with_stake; + + error!("Retrieved config; our node index is {node_index}"); + (run_config, source) + } + /// Loads a `NetworkConfig` from a file. /// /// This function takes a file path as a string, reads the file, and then deserializes the contents into a `NetworkConfig`. @@ -436,7 +484,7 @@ pub struct HotShotConfigFile { pub my_own_validator_config: ValidatorConfig, #[serde(skip)] /// The known nodes' public key and stake value - pub known_nodes_with_stake: Vec, + pub known_nodes_with_stake: Vec>, /// Number of committee nodes pub committee_nodes: usize, /// Maximum transactions per block @@ -561,10 +609,7 @@ impl Default for HotShotConfigFile { .map(|node_id| { let cur_validator_config: ValidatorConfig = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); - - cur_validator_config - .public_key - .get_stake_table_entry(cur_validator_config.stake_value) + cur_validator_config.get_public_config() }) .collect(); Self { diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index d5c8abc330..5c3f644a2c 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -6,7 +6,10 @@ pub mod client; pub mod config; use async_lock::RwLock; -use hotshot_types::traits::{election::ElectionConfig, signature_key::SignatureKey}; +use hotshot_types::{ + traits::{election::ElectionConfig, signature_key::SignatureKey}, + PeerConfig, +}; use std::{ collections::HashSet, io, @@ -48,6 +51,8 @@ pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { struct OrchestratorState { /// Tracks the latest node index we have generated a configuration for latest_index: u16, + /// Tracks the latest temporary index we have generated for init validator's key pair + tmp_latest_index: u16, /// The network configuration config: NetworkConfig, /// The total nodes that have posted their public keys @@ -70,6 +75,7 @@ impl pub fn new(network_config: NetworkConfig) -> Self { OrchestratorState { latest_index: 0, + tmp_latest_index: 0, config: network_config, nodes_with_pubkey: 0, peer_pub_ready: false, @@ -93,6 +99,10 @@ pub trait OrchestratorApi { &mut self, _node_index: u16, ) -> Result, ServerError>; + /// get endpoint for the next available temporary node index + /// # Errors + /// if unable to serve + fn get_tmp_node_index(&mut self) -> Result; /// post endpoint for each node's public key /// # Errors /// if unable to serve @@ -181,6 +191,21 @@ where Ok(self.config.clone()) } + // Assumes one node do not get twice + fn get_tmp_node_index(&mut self) -> Result { + let tmp_node_index = self.tmp_latest_index; + self.tmp_latest_index += 1; + + if usize::from(tmp_node_index) >= self.config.config.total_nodes.get() { + return Err(ServerError { + status: tide_disco::StatusCode::BadRequest, + message: "Node index getter for key pair generation has reached capacity" + .to_string(), + }); + } + Ok(tmp_node_index) + } + #[allow(clippy::cast_possible_truncation)] fn register_public_key( &mut self, @@ -195,11 +220,9 @@ where } self.pub_posted.insert(node_index); - // Sishan NOTE: let me know if there's a better way to remove the first extra 8 bytes - // The guess is extra bytes are from orchestrator serialization + // The guess is extra 8 starting bytes are from orchestrator serialization pubkey.drain(..8); - let register_pub_key = ::from_bytes(pubkey).unwrap(); - let register_pub_key_with_stake = register_pub_key.get_stake_table_entry(1u64); + let register_pub_key_with_stake = PeerConfig::::from_bytes(pubkey).unwrap(); self.config.config.known_nodes_with_stake[node_index as usize] = register_pub_key_with_stake; self.nodes_with_pubkey += 1; @@ -295,6 +318,9 @@ where } .boxed() })? + .post("tmp_node_index", |_req, state| { + async move { state.get_tmp_node_index() }.boxed() + })? .post("postpubkey", |req, state| { async move { let node_index = req.integer_param("node_index")?; diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 4fb60d2b90..b5cfcecb88 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,4 +1,4 @@ -use hotshot::{traits::NetworkReliability, types::SignatureKey}; +use hotshot::traits::NetworkReliability; use hotshot_orchestrator::config::ValidatorConfigFile; use hotshot_types::traits::election::Membership; use std::{num::NonZeroUsize, sync::Arc, time::Duration}; @@ -233,10 +233,7 @@ impl TestMetadata { .map(|node_id_| { let cur_validator_config: ValidatorConfig = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id_ as u64, 1); - - cur_validator_config - .public_key - .get_stake_table_entry(cur_validator_config.stake_value) + cur_validator_config.get_public_config() }) .collect(); // But now to test validator's config, we input the info of my_own_validator from config file when node_id == 0. diff --git a/types/src/lib.rs b/types/src/lib.rs index 388d8ad9bf..148ce34603 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -1,6 +1,11 @@ //! Types and Traits for the `HotShot` consensus module +use bincode::Options; use displaydoc::Display; +use hotshot_utils::bincode::bincode_opts; +use light_client::StateVerKey; +use std::fmt::Debug; use std::{future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; +use tracing::error; use traits::{election::ElectionConfig, signature_key::SignatureKey}; pub mod consensus; pub mod data; @@ -74,6 +79,14 @@ impl ValidatorConfig { state_key_pair: state_key_pairs, } } + + /// get the public config of the validator + pub fn get_public_config(&self) -> PeerConfig { + PeerConfig { + stake_table_entry: self.public_key.get_stake_table_entry(self.stake_value), + state_ver_key: self.state_key_pair.0.ver_key(), + } + } } impl Default for ValidatorConfig { @@ -82,6 +95,51 @@ impl Default for ValidatorConfig { } } +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Display)] +#[serde(bound(deserialize = ""))] +/// structure of peers' config, including public key, stake value, and state key. +pub struct PeerConfig { + /// The peer's public key and stake value + pub stake_table_entry: KEY::StakeTableEntry, + /// the peer's state public key + pub state_ver_key: StateVerKey, +} + +impl PeerConfig { + /// Serialize a peer's config to bytes + pub fn to_bytes(config: &Self) -> Vec { + let x = bincode_opts().serialize(config); + match x { + Ok(x) => x, + Err(e) => { + error!(?e, "Failed to serialize public key"); + vec![] + } + } + } + + /// Deserialize a peer's config from bytes + /// # Errors + /// Will return `None` if deserialization fails + pub fn from_bytes(bytes: &[u8]) -> Option { + let x: Result, _> = bincode_opts().deserialize(bytes); + match x { + Ok(pub_key) => Some(pub_key), + Err(e) => { + error!(?e, "Failed to deserialize public key"); + None + } + } + } +} + +impl Default for PeerConfig { + fn default() -> Self { + let default_validator_config = ValidatorConfig::::default(); + default_validator_config.get_public_config() + } +} + /// Holds configuration for a `HotShot` #[derive(Clone, custom_debug::Debug, serde::Serialize, serde::Deserialize)] #[serde(bound(deserialize = ""))] @@ -95,7 +153,7 @@ pub struct HotShotConfig { /// Maximum transactions per block pub max_transactions: NonZeroUsize, /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter - pub known_nodes_with_stake: Vec, + pub known_nodes_with_stake: Vec>, /// My own validator config, including my public key, private key, stake value, serving as private parameter pub my_own_validator_config: ValidatorConfig, /// List of DA committee nodes for static DA committe diff --git a/types/src/light_client.rs b/types/src/light_client.rs index 4816291ff7..7d0b5d7dff 100644 --- a/types/src/light_client.rs +++ b/types/src/light_client.rs @@ -28,7 +28,7 @@ pub type StateSignKey = schnorr::SignKey; pub type PublicInput = GenericPublicInput; /// Key pairs for signing/verifying a light client state #[derive(Debug, Default, Clone, serde::Serialize, serde::Deserialize)] -pub struct StateKeyPair(schnorr::KeyPair); +pub struct StateKeyPair(pub schnorr::KeyPair); /// Request body to send to the state relay server #[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize)] diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 3499b50e3d..a69161f8b7 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -5,7 +5,7 @@ use super::node_implementation::NodeType; -use crate::traits::signature_key::SignatureKey; +use crate::{traits::signature_key::SignatureKey, PeerConfig}; use snafu::Snafu; use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; @@ -43,7 +43,7 @@ pub trait Membership: /// create an election /// TODO may want to move this to a testableelection trait fn create_election( - entries: Vec<::StakeTableEntry>, + entries: Vec>, config: TYPES::ElectionConfigType, ) -> Self; From 13c8055d1a0bc5fd65acd14d0a76041fd899613e Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 27 Feb 2024 05:58:10 -0800 Subject: [PATCH 0814/1393] [CATCHUP] - Fix state initialization upon restarting (#2663) * Use genesis state for restarting and support app-specified state for reload * Fix typo * Remove reference --- hotshot/src/lib.rs | 26 ++++++++++++++++++++++---- testing/src/spinning_task.rs | 1 + 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 26db8d482e..0e1faafd45 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -185,11 +185,17 @@ impl> SystemContext { .await .context(StorageSnafu)?; - // insert genesis (or latest block) to state map + // Get the validated state from the initializer or construct an incomplete one from the + // block header. + let validated_state = match initializer.validated_state { + Some(state) => state, + None => Arc::new(TYPES::ValidatedState::from_header( + &anchored_leaf.block_header, + )), + }; + + // Insert the validated state to state map. let mut validated_state_map = BTreeMap::default(); - let validated_state = Arc::new(TYPES::ValidatedState::from_header( - &anchored_leaf.block_header, - )); validated_state_map.insert( anchored_leaf.get_view_number(), View { @@ -612,6 +618,12 @@ pub struct HotShotInitializer { /// Instance-level state. instance_state: TYPES::InstanceState, + /// Optional validated state. + /// + /// If it's given, we'll use it to constrcut the `SystemContext`. Otherwise, we'll construct + /// the state from the block header. + validated_state: Option>, + /// Starting view number that we are confident won't lead to a double vote after restart. start_view: TYPES::Time, } @@ -621,9 +633,11 @@ impl HotShotInitializer { /// # Errors /// If we are unable to apply the genesis block to the default state pub fn from_genesis(instance_state: TYPES::InstanceState) -> Result> { + let validated_state = Some(Arc::new(TYPES::ValidatedState::genesis(&instance_state))); Ok(Self { inner: Leaf::genesis(&instance_state), instance_state, + validated_state, start_view: TYPES::Time::new(0), }) } @@ -633,14 +647,18 @@ impl HotShotInitializer { /// # Arguments /// * `start_view` - The minimum view number that we are confident won't lead to a double vote /// after restart. + /// * `validated_state` - Optional validated state that if given, will be used to constrcut the + /// `SystemContext`. pub fn from_reload( anchor_leaf: Leaf, instance_state: TYPES::InstanceState, + validated_state: Option>, start_view: TYPES::Time, ) -> Self { Self { inner: anchor_leaf, instance_state, + validated_state, start_view, } } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 8f37ec0dba..1de2267211 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -103,6 +103,7 @@ where let initializer = HotShotInitializer::::from_reload( state.last_decided_leaf.clone(), TestInstanceState {}, + None, view_number, ); // We assign node's public key and stake value rather than read from config file since it's a test From d21ce2851f44de267f72b7a7627eddba2438c18e Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Tue, 27 Feb 2024 16:05:14 +0100 Subject: [PATCH 0815/1393] Received direct and broadcast messages are handled in the same way (#2641) * Poll direct messages and broadcast messages in parallel * Merge broadcast and direct message channels * Merge broadcast and direct message queues in web impl * Merge identical match arms --- hotshot/src/tasks/mod.rs | 32 +-- hotshot/src/traits/networking.rs | 12 +- .../src/traits/networking/combined_network.rs | 7 +- .../src/traits/networking/libp2p_network.rs | 83 +++----- .../src/traits/networking/memory_network.rs | 193 +++++------------- .../traits/networking/web_server_network.rs | 79 +++---- testing/tests/memory_network.rs | 16 +- types/src/traits/network.rs | 1 - 8 files changed, 119 insertions(+), 304 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index cefe1ecd4a..5ef1926478 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -28,7 +28,7 @@ use hotshot_types::{ use hotshot_types::{ message::Messages, traits::{ - network::{ConsensusIntentEvent, TransmitType}, + network::ConsensusIntentEvent, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; @@ -63,12 +63,12 @@ pub async fn add_network_message_task< // https://github.com/EspressoSystems/HotShot/issues/2377 let network = net.clone(); let mut state = network_state.clone(); - let broadcast_handle = async_spawn(async move { + let handle = async_spawn(async move { loop { - let msgs = match network.recv_msgs(TransmitType::Broadcast).await { + let msgs = match network.recv_msgs().await { Ok(msgs) => Messages(msgs), Err(err) => { - error!("failed to receive broadcast messages: {err}"); + error!("failed to receive messages: {err}"); // return zero messages so we sleep and try again Messages(vec![]) @@ -82,29 +82,7 @@ pub async fn add_network_message_task< } } }); - let network = net.clone(); - let mut state = network_state.clone(); - let direct_handle = async_spawn(async move { - loop { - let msgs = match network.recv_msgs(TransmitType::Direct).await { - Ok(msgs) => Messages(msgs), - Err(err) => { - error!("failed to receive direct messages: {err}"); - - // return zero messages so we sleep and try again - Messages(vec![]) - } - }; - if msgs.0.is_empty() { - // TODO: Stop sleeping here: https://github.com/EspressoSystems/HotShot/issues/2558 - async_sleep(Duration::from_millis(100)).await; - } else { - state.handle_messages(msgs.0).await; - } - } - }); - task_reg.register(direct_handle).await; - task_reg.register(broadcast_handle).await; + task_reg.register(handle).await; } /// Add the network task to handle events and send messages. pub async fn add_network_event_task< diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index f372f1c111..61482f2bd9 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -26,10 +26,8 @@ pub struct NetworkingMetricsValue { #[allow(dead_code)] /// A [`Gauge`] which tracks how many peers are connected pub connected_peers: Box, - /// A [`Counter`] which tracks how many messages have been received directly - pub incoming_direct_message_count: Box, - /// A [`Counter`] which tracks how many messages have been received by broadcast - pub incoming_broadcast_message_count: Box, + /// A [`Counter`] which tracks how many messages have been received + pub incoming_message_count: Box, /// A [`Counter`] which tracks how many messages have been send directly pub outgoing_direct_message_count: Box, /// A [`Counter`] which tracks how many messages have been send by broadcast @@ -163,10 +161,8 @@ impl NetworkingMetricsValue { pub fn new(metrics: &dyn Metrics) -> Self { Self { connected_peers: metrics.create_gauge(String::from("connected_peers"), None), - incoming_direct_message_count: metrics - .create_counter(String::from("incoming_direct_message_count"), None), - incoming_broadcast_message_count: metrics - .create_counter(String::from("incoming_broadcast_message_count"), None), + incoming_message_count: metrics + .create_counter(String::from("incoming_message_count"), None), outgoing_direct_message_count: metrics .create_counter(String::from("outgoing_direct_message_count"), None), outgoing_broadcast_message_count: metrics diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index bb064e6b34..c064c635ab 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -26,7 +26,7 @@ use hotshot_types::{ data::ViewNumber, message::Message, traits::{ - network::{ConnectedNetwork, ConsensusIntentEvent, TransmitType}, + network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::NodeType, }, BoxSyncFuture, @@ -319,7 +319,6 @@ impl ConnectedNetwork, TYPES::SignatureKey> fn recv_msgs<'a, 'b>( &'a self, - transmit_type: TransmitType, ) -> BoxSyncFuture<'b, Result>, NetworkError>> where 'a: 'b, @@ -328,8 +327,8 @@ impl ConnectedNetwork, TYPES::SignatureKey> // recv on both networks because nodes may be accessible only on either. discard duplicates // TODO: improve this algorithm: https://github.com/EspressoSystems/HotShot/issues/2089 let closure = async move { - let mut primary_msgs = self.primary().recv_msgs(transmit_type).await?; - let mut secondary_msgs = self.secondary().recv_msgs(transmit_type).await?; + let mut primary_msgs = self.primary().recv_msgs().await?; + let mut secondary_msgs = self.secondary().recv_msgs().await?; primary_msgs.append(secondary_msgs.as_mut()); diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 03c570de70..aaac1fb152 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -22,7 +22,7 @@ use hotshot_types::{ traits::{ network::{ ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, - NetworkMsg, TransmitType, ViewMessage, + NetworkMsg, ViewMessage, }, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, @@ -97,13 +97,9 @@ struct Libp2pNetworkInner { /// handle to control the network handle: Arc>, /// map of known replica peer ids to public keys - broadcast_recv: UnboundedReceiver, + receiver: UnboundedReceiver, /// Sender for broadcast messages - broadcast_send: UnboundedSender, - /// Sender for direct messages (only used for sending messages back to oneself) - direct_send: UnboundedSender, - /// Receiver for direct messages - direct_recv: UnboundedReceiver, + sender: UnboundedSender, /// Sender for node lookup (relevant view number, key of node) (None for shutdown) node_lookup_send: UnboundedSender>, /// this is really cheating to enable local tests @@ -351,18 +347,15 @@ impl Libp2pNetwork { // unbounded channels may not be the best choice (spammed?) // if bounded figure out a way to log dropped msgs - let (direct_send, direct_recv) = unbounded(); - let (broadcast_send, broadcast_recv) = unbounded(); + let (sender, receiver) = unbounded(); let (node_lookup_send, node_lookup_recv) = unbounded(); let mut result = Libp2pNetwork { inner: Arc::new(Libp2pNetworkInner { handle: network_handle, - broadcast_recv, - direct_send: direct_send.clone(), - direct_recv, + receiver, + sender: sender.clone(), pk, - broadcast_send: broadcast_send.clone(), bootstrap_addrs_len, bootstrap_addrs, is_ready: Arc::new(AtomicBool::new(false)), @@ -383,7 +376,7 @@ impl Libp2pNetwork { }), }; - result.handle_event_generator(direct_send, broadcast_send); + result.handle_event_generator(sender); result.spawn_node_lookup(node_lookup_recv); result.spawn_connect(id); @@ -514,14 +507,13 @@ impl Libp2pNetwork { async fn handle_recvd_events_0_1( &self, msg: NetworkEvent, - direct_send: &UnboundedSender, - broadcast_send: &UnboundedSender, + sender: &UnboundedSender, ) -> Result<(), NetworkError> { match msg { GossipMsg(msg, _) => { let result: Result = bincode_opts().deserialize(&msg); if let Ok(result) = result { - broadcast_send + sender .send(result) .await .map_err(|_| NetworkError::ChannelSend)?; @@ -532,7 +524,7 @@ impl Libp2pNetwork { .deserialize(&msg) .context(FailedToSerializeSnafu); if let Ok(result) = result { - direct_send + sender .send(result) .await .map_err(|_| NetworkError::ChannelSend)?; @@ -568,8 +560,7 @@ impl Libp2pNetwork { /// terminates on shut down of network fn handle_event_generator( &self, - direct_send: UnboundedSender, - broadcast_send: UnboundedSender, + sender: UnboundedSender, ) { let handle = self.clone(); let is_bootstrapped = self.inner.is_bootstrapped.clone(); @@ -584,7 +575,7 @@ impl Libp2pNetwork { match message_version { Some(VERSION_0_1) => { let _ = handle - .handle_recvd_events_0_1(message, &direct_send, &broadcast_send) + .handle_recvd_events_0_1(message, &sender) .await; } Some(version) => { @@ -675,7 +666,7 @@ impl ConnectedNetwork for Libp2p if recipients.contains(&self.inner.pk) { // send to self self.inner - .broadcast_send + .sender .send(message.clone()) .await .map_err(|_| NetworkError::ShutDown)?; @@ -763,7 +754,7 @@ impl ConnectedNetwork for Libp2p if recipient == self.inner.pk { // panic if we already shut down? self.inner - .direct_send + .sender .send(message) .await .map_err(|_x| NetworkError::ShutDown)?; @@ -830,7 +821,6 @@ impl ConnectedNetwork for Libp2p #[instrument(name = "Libp2pNetwork::recv_msgs", skip_all)] fn recv_msgs<'a, 'b>( &'a self, - transmit_type: TransmitType, ) -> BoxSyncFuture<'b, Result, NetworkError>> where 'a: 'b, @@ -840,40 +830,17 @@ impl ConnectedNetwork for Libp2p if self.inner.handle.is_killed() { Err(NetworkError::ShutDown) } else { - match transmit_type { - TransmitType::Direct => { - let result = self - .inner - .direct_recv - .drain_at_least_one() - .await - .map_err(|_x| NetworkError::ShutDown)?; - self.inner - .metrics - .incoming_direct_message_count - .add(result.len()); - Ok(result) - } - TransmitType::Broadcast => { - let result = self - .inner - .broadcast_recv - .drain_at_least_one() - .await - .map_err(|_x| NetworkError::ShutDown)?; - self.inner - .metrics - .incoming_direct_message_count - .add(result.len()); - Ok(result) - } - TransmitType::DACommitteeBroadcast => { - error!("Received DACommitteeBroadcast, it should have not happened."); - Err(NetworkError::Libp2p { - source: NetworkNodeHandleError::Killed, - }) - } - } + let result = self + .inner + .receiver + .drain_at_least_one() + .await + .map_err(|_x| NetworkError::ShutDown)?; + self.inner + .metrics + .incoming_message_count + .add(result.len()); + Ok(result) } }; boxed_sync(closure) diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 85371b584d..f7aaf04392 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -6,19 +6,18 @@ use super::{FailedToSerializeSnafu, NetworkError, NetworkReliability, NetworkingMetricsValue}; use async_compatibility_layer::{ art::async_spawn, - channel::{bounded, Receiver, SendError, Sender}, + channel::{bounded, Receiver, SendError, Sender, BoundedStream}, }; use async_lock::{Mutex, RwLock}; use async_trait::async_trait; use bincode::Options; use dashmap::DashMap; -use futures::StreamExt; -use hotshot_types::traits::network::MemoryNetworkError; +use futures::{StreamExt}; use hotshot_types::{ boxed_sync, message::Message, traits::{ - network::{ConnectedNetwork, NetworkMsg, TestableNetworkingImplementation, TransmitType}, + network::{ConnectedNetwork, NetworkMsg, TestableNetworkingImplementation}, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -61,25 +60,13 @@ impl MasterMap { } } -/// Internal enum for combining streams -enum Combo { - /// Direct message - Direct(T), - /// Broadcast message - Broadcast(T), -} - /// Internal state for a `MemoryNetwork` instance #[derive(Debug)] struct MemoryNetworkInner { - /// Input for broadcast messages - broadcast_input: RwLock>>>, - /// Input for direct messages - direct_input: RwLock>>>, - /// Output for broadcast messages - broadcast_output: Mutex>, - /// Output for direct messages - direct_output: Mutex>, + /// Input for messages + input: RwLock>>>, + /// Output for messages + output: Mutex>, /// The master map master_map: Arc>, @@ -124,68 +111,36 @@ impl MemoryNetwork { reliability_config: Option>, ) -> MemoryNetwork { info!("Attaching new MemoryNetwork"); - let (broadcast_input, broadcast_task_recv) = bounded(128); - let (direct_input, direct_task_recv) = bounded(128); - let (broadcast_task_send, broadcast_output) = bounded(128); - let (direct_task_send, direct_output) = bounded(128); + let (input, task_recv) = bounded(128); + let (task_send, output) = bounded(128); let in_flight_message_count = AtomicUsize::new(0); trace!("Channels open, spawning background task"); async_spawn( async move { debug!("Starting background task"); - // direct input is right stream - let direct = direct_task_recv.into_stream().map(Combo::>::Direct); - // broadcast input is left stream - let broadcast = broadcast_task_recv - .into_stream() - .map(Combo::>::Broadcast); - // Combine the streams - let mut combined = futures::stream::select(direct, broadcast); + let mut task_stream: BoundedStream> = task_recv.into_stream(); trace!("Entering processing loop"); - while let Some(message) = combined.next().await { - match message { - Combo::Direct(vec) => { - trace!(?vec, "Incoming direct message"); - // Attempt to decode message - let x = bincode_opts().deserialize(&vec); - match x { - Ok(x) => { - let dts = direct_task_send.clone(); - let res = dts.send(x).await; - if res.is_ok() { - trace!("Passed message to output queue"); - } else { - error!("Output queue receivers are shutdown"); - } - } - Err(e) => { - warn!(?e, "Failed to decode incoming message, skipping"); - } + while let Some(vec) = task_stream.next().await { + trace!(?vec, "Incoming message"); + // Attempt to decode message + let x = bincode_opts().deserialize(&vec); + match x { + Ok(x) => { + let ts = task_send.clone(); + let res = ts.send(x).await; + if res.is_ok() { + trace!("Passed message to output queue"); + } else { + error!("Output queue receivers are shutdown"); } } - Combo::Broadcast(vec) => { - trace!(?vec, "Incoming broadcast message"); - // Attempt to decode message - let x = bincode_opts().deserialize(&vec); - match x { - Ok(x) => { - let bts = broadcast_task_send.clone(); - let res = bts.send(x).await; - if res.is_ok() { - trace!("Passed message to output queue"); - } else { - warn!("dropping packet!"); - } - } - Err(e) => { - warn!(?e, "Failed to decode incoming message, skipping"); - } - } + Err(e) => { + warn!(?e, "Failed to decode incoming message, skipping"); } } + warn!("Stream shutdown"); } - warn!("Stream shutdown"); } .instrument(info_span!("MemoryNetwork Background task", map = ?master_map)), ); @@ -193,10 +148,8 @@ impl MemoryNetwork { trace!("Task spawned, creating MemoryNetwork"); let mn = MemoryNetwork { inner: Arc::new(MemoryNetworkInner { - broadcast_input: RwLock::new(Some(broadcast_input)), - direct_input: RwLock::new(Some(direct_input)), - broadcast_output: Mutex::new(broadcast_output), - direct_output: Mutex::new(direct_output), + input: RwLock::new(Some(input)), + output: Mutex::new(output), master_map: master_map.clone(), in_flight_message_count, metrics, @@ -209,26 +162,12 @@ impl MemoryNetwork { mn } - /// Send a [`Vec`] message to the inner `broadcast_input` - async fn broadcast_input(&self, message: Vec) -> Result<(), SendError>> { + /// Send a [`Vec`] message to the inner `input` + async fn input(&self, message: Vec) -> Result<(), SendError>> { self.inner .in_flight_message_count .fetch_add(1, Ordering::Relaxed); - let input = self.inner.broadcast_input.read().await; - if let Some(input) = &*input { - self.inner.metrics.outgoing_broadcast_message_count.add(1); - input.send(message).await - } else { - Err(SendError(message)) - } - } - - /// Send a [`Vec`] message to the inner `direct_input` - async fn direct_input(&self, message: Vec) -> Result<(), SendError>> { - self.inner - .in_flight_message_count - .fetch_add(1, Ordering::Relaxed); - let input = self.inner.direct_input.read().await; + let input = self.inner.input.read().await; if let Some(input) = &*input { self.inner.metrics.outgoing_direct_message_count.add(1); input.send(message).await @@ -295,8 +234,7 @@ impl ConnectedNetwork for Memory Self: 'b, { let closure = async move { - *self.inner.broadcast_input.write().await = None; - *self.inner.direct_input.write().await = None; + *self.inner.input.write().await = None; }; boxed_sync(closure) } @@ -328,7 +266,7 @@ impl ConnectedNetwork for Memory Arc::new(move |msg: Vec| { let node3 = (node2).clone(); boxed_sync(async move { - let _res = node3.broadcast_input(msg).await; + let _res = node3.input(msg).await; // NOTE we're dropping metrics here but this is only for testing // purposes. I think that should be okay }) @@ -337,7 +275,7 @@ impl ConnectedNetwork for Memory async_spawn(fut); } } else { - let res = node.broadcast_input(vec.clone()).await; + let res = node.input(vec.clone()).await; match res { Ok(()) => { self.inner.metrics.outgoing_broadcast_message_count.add(1); @@ -379,7 +317,7 @@ impl ConnectedNetwork for Memory Arc::new(move |msg: Vec| { let node2 = node.clone(); boxed_sync(async move { - let _res = node2.direct_input(msg).await; + let _res = node2.input(msg).await; // NOTE we're dropping metrics here but this is only for testing // purposes. I think that should be okay }) @@ -389,7 +327,7 @@ impl ConnectedNetwork for Memory } Ok(()) } else { - let res = node.direct_input(vec).await; + let res = node.input(vec).await; match res { Ok(()) => { self.inner.metrics.outgoing_direct_message_count.add(1); @@ -416,57 +354,28 @@ impl ConnectedNetwork for Memory #[instrument(name = "MemoryNetwork::recv_msgs", skip_all)] fn recv_msgs<'a, 'b>( &'a self, - transmit_type: TransmitType, ) -> BoxSyncFuture<'b, Result, NetworkError>> where 'a: 'b, Self: 'b, { let closure = async move { - match transmit_type { - TransmitType::Direct => { - let ret = self - .inner - .direct_output - .lock() - .await - .drain_at_least_one() - .await - .map_err(|_x| NetworkError::ShutDown)?; - self.inner - .in_flight_message_count - .fetch_sub(ret.len(), Ordering::Relaxed); - self.inner - .metrics - .incoming_direct_message_count - .add(ret.len()); - Ok(ret) - } - TransmitType::Broadcast => { - let ret = self - .inner - .broadcast_output - .lock() - .await - .drain_at_least_one() - .await - .map_err(|_x| NetworkError::ShutDown)?; - self.inner - .in_flight_message_count - .fetch_sub(ret.len(), Ordering::Relaxed); - self.inner - .metrics - .incoming_broadcast_message_count - .add(ret.len()); - Ok(ret) - } - TransmitType::DACommitteeBroadcast => { - error!("Received DACommitteeBroadcast, it should have not happened."); - Err(NetworkError::MemoryNetwork { - source: MemoryNetworkError::Stub, - }) - } - } + let ret = self + .inner + .output + .lock() + .await + .drain_at_least_one() + .await + .map_err(|_x| NetworkError::ShutDown)?; + self.inner + .in_flight_message_count + .fetch_sub(ret.len(), Ordering::Relaxed); + self.inner + .metrics + .incoming_message_count + .add(ret.len()); + Ok(ret) }; boxed_sync(closure) } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 505c93bac9..93f48e0656 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -19,7 +19,7 @@ use hotshot_types::{ traits::{ network::{ ConnectedNetwork, ConsensusIntentEvent, NetworkError, NetworkMsg, - TestableNetworkingImplementation, TransmitType, WebServerNetworkError, + TestableNetworkingImplementation, WebServerNetworkError, }, node_implementation::NodeType, signature_key::SignatureKey, @@ -169,10 +169,8 @@ impl TaskMap { struct Inner { /// Our own key _own_key: TYPES::SignatureKey, - /// Queue for broadcasted messages - broadcast_poll_queue_0_1: Arc>>>>, - /// Queue for direct messages - direct_poll_queue_0_1: Arc>>>>, + /// Queue for messages + poll_queue_0_1: Arc>>>>, /// Client is running running: AtomicBool, /// The web server connection is ready @@ -215,7 +213,7 @@ impl Inner { /// * `first_tx_index` - the index of the first transaction received from the server in the latest batch. /// * `tx_index` - the last transaction index we saw from the web server. async fn handle_tx_0_1(&self, tx: Vec, first_tx_index: u64, tx_index: &mut u64) { - let broadcast_poll_queue = &self.broadcast_poll_queue_0_1; + let poll_queue = &self.poll_queue_0_1; if first_tx_index > *tx_index + 1 { debug!( "missed txns from {} to {}", @@ -231,7 +229,7 @@ impl Inner { let deserialized_message = RecvMsg { message: Some(deserialized_message_inner), }; - broadcast_poll_queue + poll_queue .write() .await .push(deserialized_message.clone()); @@ -257,8 +255,7 @@ impl Inner { seen_proposals: &mut LruCache, seen_view_sync_certificates: &mut LruCache, ) -> bool { - let broadcast_poll_queue = &self.broadcast_poll_queue_0_1; - let direct_poll_queue = &self.direct_poll_queue_0_1; + let poll_queue = &self.poll_queue_0_1; if let Ok(deserialized_message_inner) = bincode::deserialize::>(&message) { let deserialized_message = RecvMsg { message: Some(deserialized_message_inner), @@ -269,7 +266,7 @@ impl Inner { } MessagePurpose::Proposal => { let proposal = deserialized_message.clone(); - broadcast_poll_queue.write().await.push(proposal); + poll_queue.write().await.push(proposal); // Only pushing the first proposal since we will soon only be allowing 1 proposal per view return true; @@ -279,7 +276,7 @@ impl Inner { let hash = hash(&proposal); // Only allow unseen proposals to be pushed to the queue if seen_proposals.put(hash, ()).is_none() { - broadcast_poll_queue.write().await.push(proposal); + poll_queue.write().await.push(proposal); } // Only pushing the first proposal since we will soon only be allowing 1 proposal per view @@ -289,14 +286,14 @@ impl Inner { let cert = deserialized_message.clone(); let hash = hash(&cert); if seen_view_sync_certificates.put(hash, ()).is_none() { - broadcast_poll_queue.write().await.push(cert); + poll_queue.write().await.push(cert); } return false; } - MessagePurpose::Vote | MessagePurpose::ViewSyncVote => { + MessagePurpose::Vote | MessagePurpose::ViewSyncVote | MessagePurpose::ViewSyncCertificate => { let vote = deserialized_message.clone(); *vote_index += 1; - direct_poll_queue.write().await.push(vote); + poll_queue.write().await.push(vote); return false; } @@ -305,7 +302,7 @@ impl Inner { "Received DAC from web server for view {} {}", view_number, self.is_da ); - broadcast_poll_queue + poll_queue .write() .await .push(deserialized_message.clone()); @@ -318,7 +315,7 @@ impl Inner { MessagePurpose::VidDisperse => { // TODO copy-pasted from `MessagePurpose::Proposal` https://github.com/EspressoSystems/HotShot/issues/1690 - self.broadcast_poll_queue_0_1 + self.poll_queue_0_1 .write() .await .push(deserialized_message.clone()); @@ -326,15 +323,6 @@ impl Inner { // Only pushing the first proposal since we will soon only be allowing 1 proposal per view return true; } - MessagePurpose::ViewSyncCertificate => { - // TODO ED Special case this for view sync - // TODO ED Need to add vote indexing to web server for view sync certs - let cert = deserialized_message.clone(); - *vote_index += 1; - broadcast_poll_queue.write().await.push(cert); - - return false; - } MessagePurpose::Internal => { error!("Received internal message in web server network"); @@ -343,7 +331,7 @@ impl Inner { } MessagePurpose::Upgrade => { - broadcast_poll_queue + poll_queue .write() .await .push(deserialized_message.clone()); @@ -636,8 +624,7 @@ impl WebServerNetwork { let client = surf_disco::Client::::new(url); let inner = Arc::new(Inner { - broadcast_poll_queue_0_1: Arc::default(), - direct_poll_queue_0_1: Arc::default(), + poll_queue_0_1: Arc::default(), running: AtomicBool::new(true), connected: AtomicBool::new(false), client, @@ -860,45 +847,25 @@ impl ConnectedNetwork, TYPES::Signatur } } - /// Moves out the entire queue of received messages of 'transmit_type` + /// Moves out the entire queue of received messages /// /// Will unwrap the underlying `NetworkMessage` /// blocking fn recv_msgs<'a, 'b>( &'a self, - transmit_type: TransmitType, ) -> BoxSyncFuture<'b, Result>, NetworkError>> where 'a: 'b, Self: 'b, { let closure = async move { - match transmit_type { - TransmitType::Direct => { - let mut queue = self.inner.direct_poll_queue_0_1.write().await; - Ok(queue - .drain(..) - .collect::>() - .iter() - .map(|x| x.get_message().unwrap()) - .collect()) - } - TransmitType::Broadcast => { - let mut queue = self.inner.broadcast_poll_queue_0_1.write().await; - Ok(queue - .drain(..) - .collect::>() - .iter() - .map(|x| x.get_message().unwrap()) - .collect()) - } - TransmitType::DACommitteeBroadcast => { - error!("Received DACommitteeBroadcast, it should have not happened."); - Err(NetworkError::WebServer { - source: WebServerNetworkError::ClientDisconnected, - }) - } - } + let mut queue = self.inner.poll_queue_0_1.write().await; + Ok(queue + .drain(..) + .collect::>() + .iter() + .map(|x| x.get_message().unwrap()) + .collect()) }; boxed_sync(closure) } diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 49c9a4081e..f2cf270ed7 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -18,7 +18,7 @@ use hotshot_example_types::{ use hotshot_types::message::Message; use hotshot_types::signature_key::BLSPubKey; use hotshot_types::traits::network::TestableNetworkingImplementation; -use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; +use hotshot_types::traits::network::ConnectedNetwork; use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; use hotshot_types::{ data::ViewNumber, @@ -188,7 +188,7 @@ async fn memory_network_direct_queue() { .await .expect("Failed to message node"); let mut recv_messages = network2 - .recv_msgs(TransmitType::Direct) + .recv_msgs() .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); @@ -206,7 +206,7 @@ async fn memory_network_direct_queue() { .await .expect("Failed to message node"); let mut recv_messages = network1 - .recv_msgs(TransmitType::Direct) + .recv_msgs() .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); @@ -255,7 +255,7 @@ async fn memory_network_broadcast_queue() { .await .expect("Failed to message node"); let mut recv_messages = network2 - .recv_msgs(TransmitType::Broadcast) + .recv_msgs() .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); @@ -276,7 +276,7 @@ async fn memory_network_broadcast_queue() { .await .expect("Failed to message node"); let mut recv_messages = network1 - .recv_msgs(TransmitType::Broadcast) + .recv_msgs() .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); @@ -339,15 +339,15 @@ async fn memory_network_test_in_flight_message_count() { } while network1.in_flight_message_count().unwrap() > 0 { - network1.recv_msgs(TransmitType::Broadcast).await.unwrap(); + network1.recv_msgs().await.unwrap(); } while network2.in_flight_message_count().unwrap() > messages.len() { - network2.recv_msgs(TransmitType::Direct).await.unwrap(); + network2.recv_msgs().await.unwrap(); } while network2.in_flight_message_count().unwrap() > 0 { - network2.recv_msgs(TransmitType::Broadcast).await.unwrap(); + network2.recv_msgs().await.unwrap(); } assert_eq!(network1.in_flight_message_count(), Some(0)); diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index d1fed267a1..cd87548391 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -283,7 +283,6 @@ pub trait ConnectedNetwork: /// blocking fn recv_msgs<'a, 'b>( &'a self, - transmit_type: TransmitType, ) -> BoxSyncFuture<'b, Result, NetworkError>> where 'a: 'b, From e72196b59f78f5c37642edadb1391a7d4e949f47 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Feb 2024 10:12:40 -0500 Subject: [PATCH 0816/1393] Bump dyn-clone from 1.0.16 to 1.0.17 (#2664) Bumps [dyn-clone](https://github.com/dtolnay/dyn-clone) from 1.0.16 to 1.0.17. - [Release notes](https://github.com/dtolnay/dyn-clone/releases) - [Commits](https://github.com/dtolnay/dyn-clone/compare/f2f0a02f1f7190048153e5ea8f554db7377a50a9...51bf8816be5a73e38b59fd4d9dda2bc18e9c2429) --- updated-dependencies: - dependency-name: dyn-clone dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/Cargo.toml b/types/Cargo.toml index a9e6801b5b..2186379f0f 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -25,7 +25,7 @@ custom_debug = { workspace = true } derivative = "2.2.0" digest = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } -dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.16" } +dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } either = { workspace = true, features = ["serde"] } espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } From da969318e4021b5d693b35f3c6e291acd004224c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Feb 2024 10:12:57 -0500 Subject: [PATCH 0817/1393] Bump syn from 2.0.50 to 2.0.51 (#2665) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.50 to 2.0.51. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.50...2.0.51) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- testing-macros/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 62cb5a176e..e23c47b1af 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -25,7 +25,7 @@ tracing = { workspace = true } serde = { workspace = true } # proc macro stuff quote = "1.0.33" -syn = { version = "2.0.50", features = ["full", "extra-traits"] } +syn = { version = "2.0.51", features = ["full", "extra-traits"] } proc-macro2 = "1.0.78" derive_builder = "0.13.1" From d3482247aa508f3179cfdf6342be5493bd0ac1c4 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 27 Feb 2024 16:12:08 -0500 Subject: [PATCH 0818/1393] Fix cargo fmt check on CI (#2669) * error if cargo fmt fails * fix formatting --- .../src/traits/networking/combined_network.rs | 4 +--- .../src/traits/networking/libp2p_network.rs | 18 ++++----------- .../src/traits/networking/memory_network.rs | 13 ++++------- .../traits/networking/web_server_network.rs | 23 ++++++------------- testing/tests/memory_network.rs | 2 +- types/src/traits/network.rs | 4 +--- 6 files changed, 18 insertions(+), 46 deletions(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index c064c635ab..5fb08e34e5 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -317,9 +317,7 @@ impl ConnectedNetwork, TYPES::SignatureKey> self.secondary().direct_message(message, recipient).await } - fn recv_msgs<'a, 'b>( - &'a self, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> + fn recv_msgs<'a, 'b>(&'a self) -> BoxSyncFuture<'b, Result>, NetworkError>> where 'a: 'b, Self: 'b, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index aaac1fb152..6f38f4c20f 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -558,10 +558,7 @@ impl Libp2pNetwork { /// task to propagate messages to handlers /// terminates on shut down of network - fn handle_event_generator( - &self, - sender: UnboundedSender, - ) { + fn handle_event_generator(&self, sender: UnboundedSender) { let handle = self.clone(); let is_bootstrapped = self.inner.is_bootstrapped.clone(); async_spawn(async move { @@ -574,9 +571,7 @@ impl Libp2pNetwork { let message_version = read_version(raw); match message_version { Some(VERSION_0_1) => { - let _ = handle - .handle_recvd_events_0_1(message, &sender) - .await; + let _ = handle.handle_recvd_events_0_1(message, &sender).await; } Some(version) => { warn!( @@ -819,9 +814,7 @@ impl ConnectedNetwork for Libp2p } #[instrument(name = "Libp2pNetwork::recv_msgs", skip_all)] - fn recv_msgs<'a, 'b>( - &'a self, - ) -> BoxSyncFuture<'b, Result, NetworkError>> + fn recv_msgs<'a, 'b>(&'a self) -> BoxSyncFuture<'b, Result, NetworkError>> where 'a: 'b, Self: 'b, @@ -836,10 +829,7 @@ impl ConnectedNetwork for Libp2p .drain_at_least_one() .await .map_err(|_x| NetworkError::ShutDown)?; - self.inner - .metrics - .incoming_message_count - .add(result.len()); + self.inner.metrics.incoming_message_count.add(result.len()); Ok(result) } }; diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index f7aaf04392..210c80a065 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -6,13 +6,13 @@ use super::{FailedToSerializeSnafu, NetworkError, NetworkReliability, NetworkingMetricsValue}; use async_compatibility_layer::{ art::async_spawn, - channel::{bounded, Receiver, SendError, Sender, BoundedStream}, + channel::{bounded, BoundedStream, Receiver, SendError, Sender}, }; use async_lock::{Mutex, RwLock}; use async_trait::async_trait; use bincode::Options; use dashmap::DashMap; -use futures::{StreamExt}; +use futures::StreamExt; use hotshot_types::{ boxed_sync, message::Message, @@ -352,9 +352,7 @@ impl ConnectedNetwork for Memory } #[instrument(name = "MemoryNetwork::recv_msgs", skip_all)] - fn recv_msgs<'a, 'b>( - &'a self, - ) -> BoxSyncFuture<'b, Result, NetworkError>> + fn recv_msgs<'a, 'b>(&'a self) -> BoxSyncFuture<'b, Result, NetworkError>> where 'a: 'b, Self: 'b, @@ -371,10 +369,7 @@ impl ConnectedNetwork for Memory self.inner .in_flight_message_count .fetch_sub(ret.len(), Ordering::Relaxed); - self.inner - .metrics - .incoming_message_count - .add(ret.len()); + self.inner.metrics.incoming_message_count.add(ret.len()); Ok(ret) }; boxed_sync(closure) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 93f48e0656..d10600acef 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -229,10 +229,7 @@ impl Inner { let deserialized_message = RecvMsg { message: Some(deserialized_message_inner), }; - poll_queue - .write() - .await - .push(deserialized_message.clone()); + poll_queue.write().await.push(deserialized_message.clone()); } else { async_sleep(self.wait_between_polls).await; } @@ -290,7 +287,9 @@ impl Inner { } return false; } - MessagePurpose::Vote | MessagePurpose::ViewSyncVote | MessagePurpose::ViewSyncCertificate => { + MessagePurpose::Vote + | MessagePurpose::ViewSyncVote + | MessagePurpose::ViewSyncCertificate => { let vote = deserialized_message.clone(); *vote_index += 1; poll_queue.write().await.push(vote); @@ -302,10 +301,7 @@ impl Inner { "Received DAC from web server for view {} {}", view_number, self.is_da ); - poll_queue - .write() - .await - .push(deserialized_message.clone()); + poll_queue.write().await.push(deserialized_message.clone()); // Only pushing the first proposal since we will soon only be allowing 1 proposal per view // return if we found a DAC, since there will only be 1 per view @@ -331,10 +327,7 @@ impl Inner { } MessagePurpose::Upgrade => { - poll_queue - .write() - .await - .push(deserialized_message.clone()); + poll_queue.write().await.push(deserialized_message.clone()); return true; } @@ -851,9 +844,7 @@ impl ConnectedNetwork, TYPES::Signatur /// /// Will unwrap the underlying `NetworkMessage` /// blocking - fn recv_msgs<'a, 'b>( - &'a self, - ) -> BoxSyncFuture<'b, Result>, NetworkError>> + fn recv_msgs<'a, 'b>(&'a self) -> BoxSyncFuture<'b, Result>, NetworkError>> where 'a: 'b, Self: 'b, diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index f2cf270ed7..ea7b502c81 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -17,8 +17,8 @@ use hotshot_example_types::{ }; use hotshot_types::message::Message; use hotshot_types::signature_key::BLSPubKey; -use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::ConnectedNetwork; +use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; use hotshot_types::{ data::ViewNumber, diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index cd87548391..f5465129ce 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -281,9 +281,7 @@ pub trait ConnectedNetwork: /// /// Will unwrap the underlying `NetworkMessage` /// blocking - fn recv_msgs<'a, 'b>( - &'a self, - ) -> BoxSyncFuture<'b, Result, NetworkError>> + fn recv_msgs<'a, 'b>(&'a self) -> BoxSyncFuture<'b, Result, NetworkError>> where 'a: 'b, Self: 'b; From 90642f67d1fa6b8989bc5cfe7d7897959420f925 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Tue, 27 Feb 2024 14:08:26 -0800 Subject: [PATCH 0819/1393] Clean up header genesis (#2668) Previously, `Header::genesis` was required to also return a payload and metadata. This is redundant, because we also have `Payload::genesis`. In addition, `Header::genesis` was expected to compute a genesis VID commitment, since a VID commitment is required to construct a header. This breaks abstraction, since in all other cases only HotShot computes VID stuff, never the application. It is also error prone: I am adding a feature to the query service to handle the genesis VID case, where we don't receive VID data from HotShot. This feature requires that the genesis VID is computed uniformly regardless of the application. Currently, we use 8 storage nodes for this commitment in the HotShot example types (quite arbitrarily) and 1 in the sequencer types. This change standardizes the computation of the genesis VID commitment. --- example-types/src/block_types.rs | 35 ++++++++---------------------- example-types/src/state_types.rs | 6 ++++- testing/tests/storage.rs | 26 ++++++++-------------- types/src/data.rs | 20 ++++++++++++++--- types/src/traits/block_contents.rs | 14 +++++++----- types/src/traits/states.rs | 4 +--- 6 files changed, 50 insertions(+), 55 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 834e0646c0..1fad37bfcf 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -5,9 +5,9 @@ use std::{ use commit::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ - data::{BlockError, VidCommitment, VidScheme, VidSchemeTrait}, + data::{BlockError, VidCommitment}, traits::{ - block_contents::{vid_commitment, BlockHeader, TestableBlock, Transaction}, + block_contents::{BlockHeader, TestableBlock, Transaction}, BlockPayload, ValidatedState, }, utils::BuilderCommitment, @@ -170,16 +170,6 @@ impl BlockPayload for TestBlockPayload { } } -/// Computes the (empty) genesis VID commitment -/// The number of storage nodes does not do anything, unless in the future we add fake transactions -/// to the genesis payload. -/// -/// In that case, the payloads may mismatch and cause problems. -#[must_use] -pub fn genesis_vid_commitment() -> ::Commit { - vid_commitment(&vec![], 8) -} - /// A [`BlockHeader`] that commits to [`TestBlockPayload`]. #[derive(PartialEq, Eq, Hash, Clone, Debug, Deserialize, Serialize)] pub struct TestBlockHeader { @@ -208,20 +198,13 @@ impl BlockHeader for TestBlockHeader { fn genesis( _instance_state: &::Instance, - ) -> ( - Self, - Self::Payload, - ::Metadata, - ) { - let (payload, metadata) = ::genesis(); - ( - Self { - block_number: 0, - payload_commitment: genesis_vid_commitment(), - }, - payload, - metadata, - ) + payload_commitment: VidCommitment, + _metadata: ::Metadata, + ) -> Self { + Self { + block_number: 0, + payload_commitment, + } } fn block_number(&self) -> u64 { diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 7d2c6794b5..d7e356e301 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -17,7 +17,7 @@ use crate::block_types::{TestBlockHeader, TestBlockPayload}; pub use crate::node_types::TestTypes; /// Instance-level state implementation for testing purposes. -#[derive(Clone, Debug)] +#[derive(Clone, Copy, Debug, Default)] pub struct TestInstanceState {} impl InstanceState for TestInstanceState {} @@ -84,6 +84,10 @@ impl ValidatedState for TestValidatedState { } fn on_commit(&self) {} + + fn genesis(_instance: &Self::Instance) -> Self { + Self::default() + } } impl TestableState for TestValidatedState { diff --git a/testing/tests/storage.rs b/testing/tests/storage.rs index da26afbd23..89676b6f85 100644 --- a/testing/tests/storage.rs +++ b/testing/tests/storage.rs @@ -1,12 +1,9 @@ use commit::Committable; use hotshot::traits::implementations::MemoryStorage; use hotshot::traits::Storage; -use hotshot_example_types::{ - block_types::{genesis_vid_commitment, TestBlockHeader, TestBlockPayload}, - node_types::TestTypes, -}; +use hotshot_example_types::node_types::TestTypes; use hotshot_types::{ - data::{fake_commitment, Leaf}, + data::Leaf, simple_certificate::QuorumCertificate, traits::{ node_implementation::{ConsensusTime, NodeType}, @@ -18,15 +15,10 @@ use std::marker::PhantomData; use tracing::instrument; fn random_stored_view(view_number: ::Time) -> StoredView { - let payload = TestBlockPayload::genesis(); - let header = TestBlockHeader { - block_number: 0, - payload_commitment: genesis_vid_commitment(), - }; - let dummy_leaf_commit = fake_commitment::>(); - let data = hotshot_types::simple_vote::QuorumData { - leaf_commit: dummy_leaf_commit, - }; + let mut leaf = Leaf::genesis(&Default::default()); + leaf.view_number = view_number; + let leaf_commit = leaf.commit(); + let data = hotshot_types::simple_vote::QuorumData { leaf_commit }; let commit = data.commit(); StoredView::from_qc_block_and_state( QuorumCertificate { @@ -37,9 +29,9 @@ fn random_stored_view(view_number: ::Time) -> StoredView< view_number, _pd: PhantomData, }, - header, - Some(payload), - dummy_leaf_commit, + leaf.block_header, + leaf.block_payload, + leaf_commit, <::SignatureKey as SignatureKey>::genesis_proposer_pk(), ) } diff --git a/types/src/data.rs b/types/src/data.rs index ecc163e4fc..abea971a14 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -7,7 +7,9 @@ use crate::{ simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, simple_vote::UpgradeProposalData, traits::{ - block_contents::{vid_commitment, BlockHeader, TestableBlock}, + block_contents::{ + vid_commitment, BlockHeader, TestableBlock, GENESIS_VID_NUM_STORAGE_NODES, + }, election::Membership, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, @@ -330,15 +332,27 @@ impl Display for Leaf { impl Leaf { /// Create a new leaf from its components. + /// + /// # Panics + /// + /// Panics if the genesis payload (`TYPES::BlockPayload::genesis()`) is malformed (unable to be + /// interpreted as bytes). #[must_use] pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { - let (block_header, block_payload, _) = TYPES::BlockHeader::genesis(instance_state); + let (payload, metadata) = TYPES::BlockPayload::genesis(); + let payload_bytes = payload + .encode() + .expect("unable to encode genesis payload") + .collect(); + let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); + let block_header = + TYPES::BlockHeader::genesis(instance_state, payload_commitment, metadata); Self { view_number: TYPES::Time::genesis(), justify_qc: QuorumCertificate::::genesis(), parent_commitment: fake_commitment(), block_header: block_header.clone(), - block_payload: Some(block_payload), + block_payload: Some(payload), proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 8c5617803e..380c875102 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -109,6 +109,12 @@ pub fn vid_commitment( vid.commit_only(encoded_transactions).unwrap() } +/// The number of storage nodes to use when computing the genesis VID commitment. +/// +/// The number of storage nodes for the genesis VID commitment is arbitrary, since we don't actually +/// do dispersal for the genesis block. For simplicity and performance, we use 1. +pub const GENESIS_VID_NUM_STORAGE_NODES: usize = 1; + /// Header of a block, which commits to a [`BlockPayload`]. pub trait BlockHeader: Serialize + Clone + Debug + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned + Committable @@ -132,11 +138,9 @@ pub trait BlockHeader: /// Build the genesis header, payload, and metadata. fn genesis( instance_state: &::Instance, - ) -> ( - Self, - Self::Payload, - ::Metadata, - ); + payload_commitment: VidCommitment, + metadata: ::Metadata, + ) -> Self; /// Get the block number. fn block_number(&self) -> u64; diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 522793aa1f..8889f5691c 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -59,9 +59,7 @@ pub trait ValidatedState: /// Construct a genesis validated state. #[must_use] - fn genesis(instance: &Self::Instance) -> Self { - Self::from_header(&Self::BlockHeader::genesis(instance).0) - } + fn genesis(instance: &Self::Instance) -> Self; /// Gets called to notify the persistence backend that this state has been committed fn on_commit(&self); From 1587a5de6c54974cb04d8f80d78bc5a7fba83091 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 28 Feb 2024 10:30:03 -0500 Subject: [PATCH 0820/1393] chore: proper VidScheme constructor (#2670) * hello-world vid_scheme constructor * WIP comments for later * switch to newtype to hide impl details of VidScheme * replace types::data::VidScheme -> types::vid::VidSchemeType * clippy pacification * move VidCommitment from data.rs to vid.rs * new type aliases VidCommon, VidShare * fix downstream build warnings (our CI should check non-test builds too) * tidy comments and code * lint pacification * delete test_srs, move its body to vid_scheme * rename {Namespace|Transaction}ProofType -> {Large|Small}RangeProofType * use lazy_static for KZG SRS --- example-types/Cargo.toml | 1 - example-types/src/block_types.rs | 3 +- .../src/traits/networking/libp2p_network.rs | 15 +- libp2p-networking/Cargo.toml | 2 +- task-impls/Cargo.toml | 1 + task-impls/src/consensus.rs | 3 +- task-impls/src/events.rs | 3 +- task-impls/src/vid.rs | 24 +- testing/Cargo.toml | 6 +- testing/src/overall_safety_task.rs | 3 +- testing/src/task_helpers.rs | 22 +- testing/tests/consensus_task.rs | 7 +- testing/tests/network_task.rs | 7 +- testing/tests/vid_task.rs | 7 +- types/Cargo.toml | 2 + types/src/data.rs | 40 +-- types/src/lib.rs | 1 + types/src/simple_vote.rs | 3 +- types/src/traits/block_contents.rs | 15 +- types/src/utils.rs | 5 +- types/src/vid.rs | 270 ++++++++++++++++++ 21 files changed, 335 insertions(+), 105 deletions(-) create mode 100644 types/src/vid.rs diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index eddc52b821..50282613f4 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -38,4 +38,3 @@ hotshot-task = { path = "../task" } tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } - diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 1fad37bfcf..09be429a0c 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -5,12 +5,13 @@ use std::{ use commit::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ - data::{BlockError, VidCommitment}, + data::BlockError, traits::{ block_contents::{BlockHeader, TestableBlock, Transaction}, BlockPayload, ValidatedState, }, utils::BuilderCommitment, + vid::VidCommitment, }; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 6f38f4c20f..2122c19f90 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -13,22 +13,27 @@ use async_trait::async_trait; use bimap::BiHashMap; use bincode::Options; use hotshot_constants::{Version, LOOK_AHEAD, VERSION_0_1}; -#[cfg(feature = "hotshot-testing")] -use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ boxed_sync, data::ViewNumber, - message::{Message, MessageKind}, traits::{ network::{ ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, - NetworkMsg, ViewMessage, + NetworkMsg, }, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::ConsensusTime, signature_key::SignatureKey, }, BoxSyncFuture, }; +#[cfg(feature = "hotshot-testing")] +use hotshot_types::{ + message::{Message, MessageKind}, + traits::{ + network::{NetworkReliability, TestableNetworkingImplementation, ViewMessage}, + node_implementation::NodeType, + }, +}; use hotshot_utils::{bincode::bincode_opts, version::read_version}; use libp2p_identity::PeerId; #[cfg(feature = "hotshot-testing")] diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 1d157f15ff..f97ab08d9f 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -35,7 +35,7 @@ tide = { version = "0.16", optional = true, default-features = false, features = tracing = { workspace = true } void = "1.0.2" dashmap = "5.5.3" -lazy_static = "1.4.0" +lazy_static = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] libp2p = { workspace = true, features = ["tokio"] } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index cd01da8cc1..d4a29f63ca 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -16,6 +16,7 @@ tracing = { workspace = true } hotshot-constants = { path = "../constants", default-features = false } hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } +jf-primitives = { workspace = true } time = { workspace = true } commit = { workspace = true } bincode = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index cda79cd3bf..d39e559949 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -17,7 +17,7 @@ use async_broadcast::Sender; use hotshot_types::{ consensus::{Consensus, View}, - data::{Leaf, QuorumProposal, VidCommitment, VidDisperse}, + data::{Leaf, QuorumProposal, VidDisperse}, event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, @@ -33,6 +33,7 @@ use hotshot_types::{ BlockPayload, }, utils::{Terminator, ViewInner}, + vid::VidCommitment, vote::{Certificate, HasViewNumber}, }; use tracing::warn; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 1002c5d5e8..03a2a64a03 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -2,7 +2,7 @@ use crate::view_sync::ViewSyncPhase; use either::Either; use hotshot_types::{ - data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidCommitment, VidDisperse}, + data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse}, message::Proposal, simple_certificate::{ DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, @@ -13,6 +13,7 @@ use hotshot_types::{ ViewSyncPreCommitVote, }, traits::{node_implementation::NodeType, BlockPayload}, + vid::VidCommitment, }; /// Marker that the task completed diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 9d603a8cc2..750b3b36b0 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -6,7 +6,6 @@ use async_lock::RwLock; use async_std::task::spawn_blocking; use hotshot_task::task::{Task, TaskState}; -use hotshot_types::traits::network::ConnectedNetwork; use hotshot_types::{ consensus::Consensus, data::VidDisperse, @@ -14,14 +13,13 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, election::Membership, + network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, }, + vid::vid_scheme, }; -use hotshot_types::{ - data::{test_srs, VidScheme, VidSchemeTrait}, - traits::network::ConsensusIntentEvent, -}; +use jf_primitives::vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; @@ -70,22 +68,12 @@ impl, A: ConsensusApi + match event { HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view_number) => { // get the number of quorum committee members to be used for VID calculation - let num_quorum_committee = self.membership.total_nodes(); - - // TODO - let srs = test_srs(num_quorum_committee); - - // calculate the last power of two - // TODO change after https://github.com/EspressoSystems/jellyfish/issues/339 - // issue: https://github.com/EspressoSystems/HotShot/issues/2152 - let chunk_size = 1 << num_quorum_committee.ilog2(); + let num_storage_nodes = self.membership.total_nodes(); // calculate vid shares let vid_disperse = spawn_blocking(move || { - let multiplicity = 1; - let vid = VidScheme::new(chunk_size, num_quorum_committee, multiplicity, &srs) - .unwrap(); - vid.disperse(encoded_transactions.clone()).unwrap() + #[allow(clippy::panic)] + vid_scheme(num_storage_nodes).disperse(&encoded_transactions).unwrap_or_else(|err|panic!("VID disperse failure:\n\t(num_storage nodes,payload_byte_len)=({num_storage_nodes},{})\n\terror: : {err}", encoded_transactions.len())) }) .await; diff --git a/testing/Cargo.toml b/testing/Cargo.toml index aed8aea128..2040652626 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -18,14 +18,13 @@ bincode = { workspace = true } commit = { workspace = true } either = { workspace = true } futures = { workspace = true } -hotshot = { path = "../hotshot", features = [ - "hotshot-testing", -] } +hotshot = { path = "../hotshot", features = ["hotshot-testing"] } hotshot-constants = { path = "../constants" } hotshot-types = { path = "../types", default-features = false } hotshot-utils = { path = "../utils" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +jf-primitives = { workspace = true } rand = { workspace = true } snafu = { workspace = true } tracing = { workspace = true } @@ -41,4 +40,3 @@ hotshot-example-types = { path = "../example-types" } tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } - diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 427a0d8de0..39335f3dc3 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -2,11 +2,12 @@ use hotshot::{traits::TestableNodeImplementation, HotShotError}; use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::{ - data::{Leaf, VidCommitment, VidDisperse}, + data::{Leaf, VidDisperse}, error::RoundTimedoutState, event::{Event, EventType, LeafChain}, simple_certificate::QuorumCertificate, traits::node_implementation::{ConsensusTime, NodeType}, + vid::VidCommitment, }; use snafu::Snafu; use std::{ diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index faf0b620a8..81646b9f2f 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -17,7 +17,7 @@ use hotshot::{ use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, - data::{Leaf, QuorumProposal, VidScheme, ViewNumber}, + data::{Leaf, QuorumProposal, ViewNumber}, message::Proposal, simple_certificate::QuorumCertificate, simple_vote::SimpleVote, @@ -29,6 +29,7 @@ use hotshot_types::{ states::ValidatedState, BlockPayload, }, + vid::{vid_scheme, VidSchemeType}, vote::HasViewNumber, }; @@ -356,21 +357,12 @@ pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey /// initialize VID /// # Panics -/// if unable to create a [`VidScheme`] +/// if unable to create a [`VidSchemeType`] #[must_use] -pub fn vid_init( +pub fn vid_scheme_from_view_number( membership: &TYPES::Membership, view_number: TYPES::Time, -) -> VidScheme { - let num_committee = membership.get_committee(view_number).len(); - - // calculate the last power of two - // TODO change after https://github.com/EspressoSystems/jellyfish/issues/339 - // issue: https://github.com/EspressoSystems/HotShot/issues/2152 - let chunk_size = 1 << num_committee.ilog2(); - - // TODO - let srs = hotshot_types::data::test_srs(num_committee); - let multiplicity = 1; - VidScheme::new(chunk_size, num_committee, multiplicity, srs).unwrap() +) -> VidSchemeType { + let num_storage_nodes = membership.get_committee(view_number).len(); + vid_scheme(num_storage_nodes) } diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index ff960e667f..e045880c0e 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -15,7 +15,7 @@ use hotshot_types::{ simple_vote::QuorumData, traits::{consensus_api::ConsensusApi, election::Membership}, }; - +use jf_primitives::vid::VidScheme; use std::collections::HashMap; async fn build_vote( @@ -196,8 +196,7 @@ async fn test_consensus_with_vid() { use hotshot_task_impls::{consensus::ConsensusTaskState, harness::run_harness}; use hotshot_testing::task_helpers::build_cert; use hotshot_testing::task_helpers::build_system_handle; - use hotshot_testing::task_helpers::vid_init; - use hotshot_types::data::VidSchemeTrait; + use hotshot_testing::task_helpers::vid_scheme_from_view_number; use hotshot_types::simple_certificate::DACertificate; use hotshot_types::simple_vote::DAData; use hotshot_types::simple_vote::DAVote; @@ -218,7 +217,7 @@ async fn test_consensus_with_vid() { // For the test of vote logic with vid let pub_key = *handle.public_key(); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let vid = vid_init::(&quorum_membership, ViewNumber::new(2)); + let vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(2)); let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 90dc33ec50..78a94e3402 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -1,11 +1,12 @@ use hotshot::types::SignatureKey; use hotshot_example_types::node_types::TestTypes; use hotshot_task_impls::events::HotShotEvent; -use hotshot_testing::task_helpers::{build_quorum_proposal, vid_init}; +use hotshot_testing::task_helpers::{build_quorum_proposal, vid_scheme_from_view_number}; use hotshot_types::{ - data::{DAProposal, VidSchemeTrait, ViewNumber}, + data::{DAProposal, ViewNumber}, traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, }; +use jf_primitives::vid::VidScheme; use sha2::{Digest, Sha256}; use std::{collections::HashMap, marker::PhantomData}; @@ -39,7 +40,7 @@ async fn test_network_task() { &encoded_transactions_hash, ) .expect("Failed to sign block payload"); - let vid = vid_init::(&quorum_membership, ViewNumber::new(2)); + let vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(2)); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; let vid_signature = diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 4061099a1e..480c31b6e2 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -1,12 +1,13 @@ use hotshot::types::SignatureKey; use hotshot_example_types::{block_types::TestTransaction, node_types::TestTypes}; use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; -use hotshot_testing::task_helpers::{build_system_handle, vid_init}; +use hotshot_testing::task_helpers::{build_system_handle, vid_scheme_from_view_number}; use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; use hotshot_types::{ - data::{DAProposal, VidDisperse, VidSchemeTrait, ViewNumber}, + data::{DAProposal, VidDisperse, ViewNumber}, traits::consensus_api::ConsensusApi, }; +use jf_primitives::vid::VidScheme; use std::collections::HashMap; use std::marker::PhantomData; @@ -29,7 +30,7 @@ async fn test_vid_task() { // quorum membership for VID share distribution let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let vid = vid_init::(&quorum_membership, ViewNumber::new(0)); + let vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); diff --git a/types/Cargo.toml b/types/Cargo.toml index 2186379f0f..7a3b9ea06d 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -10,6 +10,7 @@ version = "0.1.0" [dependencies] ark-bls12-381 = { workspace = true } +ark-ec = { workspace = true } ark-ed-on-bn254 = { workspace = true } ark-ff = { workspace = true } ark-serialize = { workspace = true, features = ["derive"] } @@ -35,6 +36,7 @@ hotshot-utils = { path = "../utils" } jf-plonk = { workspace = true } jf-primitives = { workspace = true, features = ["test-srs"] } jf-utils = { workspace = true } +lazy_static = { workspace = true } libp2p-networking = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } diff --git a/types/src/data.rs b/types/src/data.rs index abea971a14..40e50aa47f 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -17,18 +17,15 @@ use crate::{ storage::StoredView, BlockPayload, }, + vid::{VidCommitment, VidCommon, VidSchemeType, VidShare}, vote::{Certificate, HasViewNumber}, }; -use ark_bls12_381::Bls12_381; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::Options; use commit::{Commitment, Committable, RawCommitmentBuilder}; use derivative::Derivative; use hotshot_utils::bincode::bincode_opts; -use jf_primitives::{ - pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme}, - vid::VidDisperse as JfVidDisperse, -}; +use jf_primitives::vid::VidDisperse as JfVidDisperse; use rand::Rng; use serde::{Deserialize, Serialize}; use snafu::Snafu; @@ -137,15 +134,11 @@ where pub view_number: TYPES::Time, } -/// The VID scheme type used in `HotShot`. -pub type VidScheme = jf_primitives::vid::advz::Advz; -pub use jf_primitives::vid::VidScheme as VidSchemeTrait; -/// VID commitment. -pub type VidCommitment = ::Commit; - /// VID dispersal data /// /// Like [`DAProposal`]. +/// +/// TODO move to vid.rs? #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct VidDisperse { /// The view number for which this VID data is intended @@ -153,9 +146,9 @@ pub struct VidDisperse { /// Block payload commitment pub payload_commitment: VidCommitment, /// A storage node's key and its corresponding VID share - pub shares: BTreeMap::Share>, + pub shares: BTreeMap, /// VID common data sent to all storage nodes - pub common: ::Common, + pub common: VidCommon, } impl VidDisperse { @@ -164,7 +157,7 @@ impl VidDisperse { /// Allows for more complex stake table functionality pub fn from_membership( view_number: TYPES::Time, - mut vid_disperse: JfVidDisperse, + mut vid_disperse: JfVidDisperse, membership: &Arc, ) -> Self { let shares = membership @@ -182,25 +175,6 @@ impl VidDisperse { } } -/// Trusted KZG setup for VID. -/// -/// TESTING ONLY: don't use this in production -/// TODO -/// -/// # Panics -/// ...because this is only for tests. This comment exists to pacify clippy. -#[must_use] -pub fn test_srs( - num_storage_nodes: usize, -) -> as PolynomialCommitmentScheme>::SRS { - let mut rng = jf_utils::test_rng(); - UnivariateKzgPCS::::gen_srs_for_testing( - &mut rng, - checked_fft_size(num_storage_nodes).unwrap(), - ) - .unwrap() -} - /// Proposal to append a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound(deserialize = ""))] diff --git a/types/src/lib.rs b/types/src/lib.rs index 148ce34603..341ad51520 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -20,6 +20,7 @@ pub mod simple_vote; pub mod stake_table; pub mod traits; pub mod utils; +pub mod vid; pub mod vote; /// Pinned future that is Send and Sync diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 41f9e9e85d..965474c08b 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -9,8 +9,9 @@ use serde::{Deserialize, Serialize}; use hotshot_constants::Version; use crate::{ - data::{Leaf, VidCommitment}, + data::Leaf, traits::{node_implementation::NodeType, signature_key::SignatureKey}, + vid::VidCommitment, vote::{HasViewNumber, Vote}, }; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 380c875102..7289791a02 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -4,11 +4,12 @@ //! describe the behaviors that a block is expected to have. use crate::{ - data::{test_srs, VidCommitment, VidScheme, VidSchemeTrait}, traits::ValidatedState, utils::BuilderCommitment, + vid::{vid_scheme, VidCommitment, VidSchemeType}, }; use commit::{Commitment, Committable}; +use jf_primitives::vid::VidScheme; use serde::{de::DeserializeOwned, Serialize}; use std::{ @@ -93,20 +94,16 @@ pub trait TestableBlock: BlockPayload + Debug { } /// Compute the VID payload commitment. +/// TODO(Gus) delete this function? /// # Panics /// If the VID computation fails. #[must_use] pub fn vid_commitment( encoded_transactions: &Vec, num_storage_nodes: usize, -) -> ::Commit { - let num_chunks = 1 << num_storage_nodes.ilog2(); - - // TODO - let srs = test_srs(num_storage_nodes); - let multiplicity = 1; - let vid = VidScheme::new(num_chunks, num_storage_nodes, multiplicity, srs).unwrap(); - vid.commit_only(encoded_transactions).unwrap() +) -> ::Commit { + #[allow(clippy::panic)] + vid_scheme(num_storage_nodes).commit_only(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:\n\t(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{}\n\t{err}", encoded_transactions.len())) } /// The number of storage nodes to use when computing the genesis VID commitment. diff --git a/types/src/utils.rs b/types/src/utils.rs index f55aca47fa..af5082f8df 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -1,9 +1,6 @@ //! Utility functions, type aliases, helper structs and enum definitions. -use crate::{ - data::{Leaf, VidCommitment}, - traits::node_implementation::NodeType, -}; +use crate::{data::Leaf, traits::node_implementation::NodeType, vid::VidCommitment}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use commit::Commitment; use digest::OutputSizeUser; diff --git a/types/src/vid.rs b/types/src/vid.rs new file mode 100644 index 0000000000..0fbc75f4bd --- /dev/null +++ b/types/src/vid.rs @@ -0,0 +1,270 @@ +//! This module provides: +//! - an opaque constructor [`vid_scheme`] that returns a new instance of a +//! VID scheme. +//! - type aliases [`VidCommitment`], [`VidCommon`], [`VidShare`] +//! for [`VidScheme`] assoc types. +//! +//! Purpose: the specific choice of VID scheme is an implementation detail. +//! This crate and all downstream crates should talk to the VID scheme only +//! via the traits exposed here. + +use ark_bls12_381::Bls12_381; +use jf_primitives::{ + pcs::{ + checked_fft_size, + prelude::{UnivariateKzgPCS, UnivariateUniversalParams}, + PolynomialCommitmentScheme, + }, + vid::{ + advz::{ + payload_prover::{LargeRangeProof, SmallRangeProof}, + Advz, + }, + payload_prover::{PayloadProver, Statement}, + precomputable::Precomputable, + VidDisperse, VidResult, VidScheme, + }, +}; +use lazy_static::lazy_static; +use serde::{Deserialize, Serialize}; +use sha2::Sha256; +use std::{fmt::Debug, ops::Range}; + +/// VID scheme constructor. +/// +/// Returns an opaque type that impls jellyfish traits: +/// [`VidScheme`], [`PayloadProver`], [`Precomputable`]. +/// +/// # Rust forbids naming impl Trait in return types +/// +/// Due to Rust limitations the return type of [`vid_scheme`] is a newtype +/// wrapper [`VidSchemeType`] that impls the above traits. +/// +/// We prefer that the return type of [`vid_scheme`] be `impl Trait` for the +/// above traits. But the ability to name an impl Trait return type is +/// currently missing from Rust: +/// - [Naming impl trait in return types - Impl trait initiative](https://rust-lang.github.io/impl-trait-initiative/explainer/rpit_names.html) +/// - [RFC: Type alias impl trait (TAIT)](https://github.com/rust-lang/rfcs/blob/master/text/2515-type_alias_impl_trait.md) +/// +/// # Panics +/// When the construction fails for the underlying VID scheme. +#[must_use] +pub fn vid_scheme(num_storage_nodes: usize) -> VidSchemeType { + // chunk_size is currently num_storage_nodes rounded down to a power of two + // TODO chunk_size should be a function of the desired erasure code rate + // https://github.com/EspressoSystems/HotShot/issues/2152 + let chunk_size = 1 << num_storage_nodes.ilog2(); + + // TODO intelligent choice of multiplicity + let multiplicity = 1; + + // TODO panic, return `Result`, or make `new` infallible upstream (eg. by panicking)? + #[allow(clippy::panic)] + VidSchemeType(Advz::new(chunk_size, num_storage_nodes, multiplicity, &*KZG_SRS).unwrap_or_else(|err| panic!("advz construction failure:\n\t(num_storage nodes,chunk_size,multiplicity)=({num_storage_nodes},{chunk_size},{multiplicity})\n\terror: : {err}"))) +} + +/// VID commitment type +pub type VidCommitment = ::Commit; +/// VID common type +pub type VidCommon = ::Common; +/// VID share type +pub type VidShare = ::Share; + +/// Newtype wrapper for a VID scheme type that impls +/// [`VidScheme`], [`PayloadProver`], [`Precomputable`]. +pub struct VidSchemeType(Advz); + +/// Newtype wrapper for a large payload range proof. +/// +/// Useful for namespace proofs. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct LargeRangeProofType( + // # Type complexity + // + // Jellyfish's `LargeRangeProof` type has a prime field generic parameter `F`. + // This `F` is determined by the type parameter `E` for `Advz`. + // Jellyfish needs a more ergonomic way for downstream users to refer to this type. + // + // There is a `KzgEval` type alias in jellyfish that helps a little, but it's currently private: + // + // If it were public then we could instead use + // `LargeRangeProof>` + // but that's still pretty crufty. + LargeRangeProof< as PolynomialCommitmentScheme>::Evaluation>, +); + +/// Newtype wrapper for a small payload range proof. +/// +/// Useful for transaction proofs. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct SmallRangeProofType( + // # Type complexity + // + // Similar to the comments in `LargeRangeProofType`. + SmallRangeProof< as PolynomialCommitmentScheme>::Proof>, +); + +lazy_static! { + /// SRS comment + /// + /// TODO use a proper SRS + /// https://github.com/EspressoSystems/HotShot/issues/1686 + static ref KZG_SRS: UnivariateUniversalParams = { + let mut rng = jf_utils::test_rng(); + UnivariateKzgPCS::::gen_srs_for_testing( + &mut rng, + // TODO what's the maximum possible SRS size? + checked_fft_size(200).unwrap(), + ) + .unwrap() + }; +} + +/// Private type alias for the EC pairing type parameter for [`Advz`]. +type E = Bls12_381; +/// Private type alias for the hash type parameter for [`Advz`]. +type H = Sha256; + +// THE REST OF THIS FILE IS BOILERPLATE +// +// All this boilerplate can be deleted when we finally get +// type alias impl trait (TAIT): +// [rfcs/text/2515-type_alias_impl_trait.md at master · rust-lang/rfcs](https://github.com/rust-lang/rfcs/blob/master/text/2515-type_alias_impl_trait.md) +impl VidScheme for VidSchemeType { + type Commit = as VidScheme>::Commit; + type Share = as VidScheme>::Share; + type Common = as VidScheme>::Common; + + fn commit_only(&self, payload: B) -> VidResult + where + B: AsRef<[u8]>, + { + self.0.commit_only(payload) + } + + fn disperse(&self, payload: B) -> VidResult> + where + B: AsRef<[u8]>, + { + self.0.disperse(payload).map(vid_disperse_conversion) + } + + fn verify_share( + &self, + share: &Self::Share, + common: &Self::Common, + commit: &Self::Commit, + ) -> VidResult> { + self.0.verify_share(share, common, commit) + } + + fn recover_payload(&self, shares: &[Self::Share], common: &Self::Common) -> VidResult> { + self.0.recover_payload(shares, common) + } + + fn is_consistent(commit: &Self::Commit, common: &Self::Common) -> VidResult<()> { + as VidScheme>::is_consistent(commit, common) + } + + fn get_payload_byte_len(common: &Self::Common) -> usize { + as VidScheme>::get_payload_byte_len(common) + } + + fn get_num_storage_nodes(common: &Self::Common) -> usize { + as VidScheme>::get_num_storage_nodes(common) + } + + fn get_multiplicity(common: &Self::Common) -> usize { + as VidScheme>::get_multiplicity(common) + } +} + +impl PayloadProver for VidSchemeType { + fn payload_proof(&self, payload: B, range: Range) -> VidResult + where + B: AsRef<[u8]>, + { + self.0 + .payload_proof(payload, range) + .map(LargeRangeProofType) + } + + fn payload_verify( + &self, + stmt: Statement<'_, Self>, + proof: &LargeRangeProofType, + ) -> VidResult> { + self.0.payload_verify(stmt_conversion(stmt), &proof.0) + } +} + +impl PayloadProver for VidSchemeType { + fn payload_proof(&self, payload: B, range: Range) -> VidResult + where + B: AsRef<[u8]>, + { + self.0 + .payload_proof(payload, range) + .map(SmallRangeProofType) + } + + fn payload_verify( + &self, + stmt: Statement<'_, Self>, + proof: &SmallRangeProofType, + ) -> VidResult> { + self.0.payload_verify(stmt_conversion(stmt), &proof.0) + } +} + +impl Precomputable for VidSchemeType { + type PrecomputeData = as Precomputable>::PrecomputeData; + + fn commit_only_precompute( + &self, + payload: B, + ) -> VidResult<(Self::Commit, Self::PrecomputeData)> + where + B: AsRef<[u8]>, + { + self.0.commit_only_precompute(payload) + } + + fn disperse_precompute( + &self, + payload: B, + data: &Self::PrecomputeData, + ) -> VidResult> + where + B: AsRef<[u8]>, + { + self.0 + .disperse_precompute(payload, data) + .map(vid_disperse_conversion) + } +} + +/// Convert a [`VidDisperse>`] to a [`VidDisperse`]. +/// +/// Foreign type rules prevent us from doing: +/// - `impl From> for VidDisperse>` +/// - `impl VidDisperse {...}` +/// and similarly for `Statement`. +/// Thus, we accomplish type conversion via functions. +fn vid_disperse_conversion(vid_disperse: VidDisperse>) -> VidDisperse { + VidDisperse { + shares: vid_disperse.shares, + common: vid_disperse.common, + commit: vid_disperse.commit, + } +} + +/// Convert a [`Statement<'_, VidSchemeType>`] to a [`Statement<'_, Advz>`]. +fn stmt_conversion(stmt: Statement<'_, VidSchemeType>) -> Statement<'_, Advz> { + Statement { + payload_subslice: stmt.payload_subslice, + range: stmt.range, + commit: stmt.commit, + common: stmt.common, + } +} From ce8683f80d6a93cac5b4251d01afde35163cc04f Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Wed, 28 Feb 2024 12:32:24 -0500 Subject: [PATCH 0821/1393] make serde_json a workspace dep (#2672) --- libp2p-networking/Cargo.toml | 2 +- orchestrator/Cargo.toml | 2 +- types/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index f97ab08d9f..d3ac0fc1ed 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -27,7 +27,7 @@ libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } rand = { workspace = true } serde = { workspace = true } -serde_json = "1.0.114" +serde_json = { workspace = true } snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index ac9163e46b..c131b0a93c 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -15,7 +15,7 @@ tide-disco = { workspace = true } surf-disco = { workspace = true } tracing = { workspace = true } serde = { workspace = true } -serde_json = "1.0.96" +serde_json = { workspace = true } toml = { workspace = true } thiserror = "1.0.50" serde-inline-default = "0.1.1" diff --git a/types/Cargo.toml b/types/Cargo.toml index 7a3b9ea06d..e57b807bc7 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -49,7 +49,7 @@ tracing = { workspace = true } typenum = { workspace = true } [dev-dependencies] -serde_json = "1.0.114" +serde_json = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } From 08dd96cc9e58f086e265cfee7d7c8fd6c81af61e Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 28 Feb 2024 12:55:22 -0500 Subject: [PATCH 0822/1393] Make `BlockHeader::new()` and `validate_and_apply_header()` async (#2673) --- example-types/src/block_types.rs | 2 +- example-types/src/state_types.rs | 2 +- task-impls/src/consensus.rs | 16 ++++++++++------ testing/src/task_helpers.rs | 4 +++- types/src/traits/block_contents.rs | 3 ++- types/src/traits/states.rs | 4 ++-- 6 files changed, 19 insertions(+), 12 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 09be429a0c..3079bf28f7 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -184,7 +184,7 @@ impl BlockHeader for TestBlockHeader { type Payload = TestBlockPayload; type State = TestValidatedState; - fn new( + async fn new( _parent_state: &Self::State, _instance_state: &::Instance, parent_header: &Self, diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index d7e356e301..26feb2bc04 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -64,7 +64,7 @@ impl ValidatedState for TestValidatedState { type Time = ViewNumber; - fn validate_and_apply_header( + async fn validate_and_apply_header( &self, _instance: &Self::Instance, _parent_header: &Self::BlockHeader, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index d39e559949..bbc3528b5f 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -616,11 +616,14 @@ impl, A: ConsensusApi + return; }; - let Ok(state) = parent_state.validate_and_apply_header( - &consensus.instance_state, - &parent_leaf.block_header.clone(), - &proposal.data.block_header.clone(), - ) else { + let Ok(state) = parent_state + .validate_and_apply_header( + &consensus.instance_state, + &parent_leaf.block_header.clone(), + &proposal.data.block_header.clone(), + ) + .await + else { error!("Block header doesn't extend the proposal",); return; }; @@ -1298,7 +1301,8 @@ impl, A: ConsensusApi + &parent_header, commit_and_metadata.commitment, commit_and_metadata.metadata.clone(), - ); + ) + .await; let leaf = Leaf { view_number: view, justify_qc: consensus.high_qc.clone(), diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 81646b9f2f..0f470193b5 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -242,7 +242,8 @@ async fn build_quorum_proposal_and_signature( &parent_leaf.block_header, payload_commitment, (), - ); + ) + .await; // current leaf that can be re-assigned everytime when entering a new view let mut leaf = Leaf { view_number: ViewNumber::new(1), @@ -269,6 +270,7 @@ async fn build_quorum_proposal_and_signature( let state_new_view = Arc::new( parent_state .validate_and_apply_header(&TestInstanceState {}, &block_header, &block_header) + .await .unwrap(), ); // save states for the previous view to pass all the qc checks diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 7289791a02..c9b8f0a65f 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -15,6 +15,7 @@ use serde::{de::DeserializeOwned, Serialize}; use std::{ error::Error, fmt::{Debug, Display}, + future::Future, hash::Hash, }; @@ -130,7 +131,7 @@ pub trait BlockHeader: parent_header: &Self, payload_commitment: VidCommitment, metadata: ::Metadata, - ) -> Self; + ) -> impl Future + Send; /// Build the genesis header, payload, and metadata. fn genesis( diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 8889f5691c..79ec856f4a 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -7,7 +7,7 @@ use super::block_contents::{BlockHeader, TestableBlock}; use crate::traits::{node_implementation::ConsensusTime, BlockPayload}; use serde::{de::DeserializeOwned, Serialize}; -use std::{error::Error, fmt::Debug, hash::Hash}; +use std::{error::Error, fmt::Debug, future::Future, hash::Hash}; /// Instance-level state, which allows us to fetch missing validated state. pub trait InstanceState: Clone + Debug + Send + Sync {} @@ -50,7 +50,7 @@ pub trait ValidatedState: instance: &Self::Instance, parent_header: &Self::BlockHeader, proposed_header: &Self::BlockHeader, - ) -> Result; + ) -> impl Future> + Send; /// Construct the state with the given block header. /// From d4aa216b7e39752dc0b8f2ad8cb84da6c05a7919 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 28 Feb 2024 14:55:47 -0800 Subject: [PATCH 0823/1393] [VALIDATED_STATE] - Give proposal and validation functions access to parent leaf (#2674) * Replace parent header with leaf * Remove associated types, remove clone, add to justfile --- example-types/src/block_types.rs | 38 ++++++++++++++----------- example-types/src/state_types.rs | 25 +++++++--------- examples/infra/mod.rs | 10 +++---- task-impls/src/consensus.rs | 13 +++++---- testing/src/task_helpers.rs | 10 +++---- types/src/data.rs | 10 ++----- types/src/traits/block_contents.rs | 29 ++++++++----------- types/src/traits/node_implementation.rs | 15 ++++------ types/src/traits/states.rs | 29 ++++++++++--------- 9 files changed, 85 insertions(+), 94 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 3079bf28f7..5156bf0e44 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -3,11 +3,13 @@ use std::{ mem::size_of, }; +use crate::node_types::TestTypes; use commit::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ - data::BlockError, + data::{BlockError, Leaf}, traits::{ block_contents::{BlockHeader, TestableBlock, Transaction}, + node_implementation::NodeType, BlockPayload, ValidatedState, }, utils::BuilderCommitment, @@ -16,8 +18,6 @@ use hotshot_types::{ use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; -use crate::state_types::TestValidatedState; - /// The transaction in a [`TestBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct TestTransaction(pub Vec); @@ -180,27 +180,24 @@ pub struct TestBlockHeader { pub payload_commitment: VidCommitment, } -impl BlockHeader for TestBlockHeader { - type Payload = TestBlockPayload; - type State = TestValidatedState; - +impl> BlockHeader for TestBlockHeader { async fn new( - _parent_state: &Self::State, - _instance_state: &::Instance, - parent_header: &Self, + _parent_state: &TYPES::ValidatedState, + _instance_state: &>::Instance, + parent_leaf: &Leaf, payload_commitment: VidCommitment, - _metadata: ::Metadata, + _metadata: ::Metadata, ) -> Self { Self { - block_number: parent_header.block_number + 1, + block_number: parent_leaf.block_header.block_number() + 1, payload_commitment, } } fn genesis( - _instance_state: &::Instance, + _instance_state: &>::Instance, payload_commitment: VidCommitment, - _metadata: ::Metadata, + _metadata: ::Metadata, ) -> Self { Self { block_number: 0, @@ -216,7 +213,7 @@ impl BlockHeader for TestBlockHeader { self.payload_commitment } - fn metadata(&self) -> &::Metadata { + fn metadata(&self) -> &::Metadata { &() } } @@ -224,9 +221,16 @@ impl BlockHeader for TestBlockHeader { impl Committable for TestBlockHeader { fn commit(&self) -> Commitment { RawCommitmentBuilder::new("Header Comm") - .u64_field("block number", self.block_number()) + .u64_field( + "block number", + >::block_number(self), + ) .constant_str("payload commitment") - .fixed_size_bytes(self.payload_commitment().as_ref().as_ref()) + .fixed_size_bytes( + >::payload_commitment(self) + .as_ref() + .as_ref(), + ) .finalize() } diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 26feb2bc04..a8593d5fea 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -2,8 +2,10 @@ use commit::{Commitment, Committable}; use hotshot_types::{ - data::{fake_commitment, BlockError, ViewNumber}, + data::{fake_commitment, BlockError, Leaf, ViewNumber}, traits::{ + block_contents::BlockHeader, + node_implementation::NodeType, states::{InstanceState, TestableState, ValidatedState}, BlockPayload, }, @@ -12,8 +14,7 @@ use hotshot_types::{ use serde::{Deserialize, Serialize}; use std::fmt::Debug; -use crate::block_types::TestTransaction; -use crate::block_types::{TestBlockHeader, TestBlockPayload}; +use crate::block_types::{TestBlockPayload, TestTransaction}; pub use crate::node_types::TestTypes; /// Instance-level state implementation for testing purposes. @@ -53,22 +54,18 @@ impl Default for TestValidatedState { } } -impl ValidatedState for TestValidatedState { +impl ValidatedState for TestValidatedState { type Error = BlockError; type Instance = TestInstanceState; - type BlockHeader = TestBlockHeader; - - type BlockPayload = TestBlockPayload; - type Time = ViewNumber; async fn validate_and_apply_header( &self, _instance: &Self::Instance, - _parent_header: &Self::BlockHeader, - _proposed_header: &Self::BlockHeader, + _parent_leaf: &Leaf, + _proposed_header: &TYPES::BlockHeader, ) -> Result { Ok(TestValidatedState { block_height: self.block_height + 1, @@ -76,9 +73,9 @@ impl ValidatedState for TestValidatedState { }) } - fn from_header(block_header: &Self::BlockHeader) -> Self { + fn from_header(block_header: &TYPES::BlockHeader) -> Self { Self { - block_height: block_header.block_number, + block_height: block_header.block_number(), ..Default::default() } } @@ -90,12 +87,12 @@ impl ValidatedState for TestValidatedState { } } -impl TestableState for TestValidatedState { +impl> TestableState for TestValidatedState { fn create_random_transaction( _state: Option<&Self>, _rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> ::Transaction { /// clippy appeasement for `RANDOM_TX_BASE_SIZE` const RANDOM_TX_BASE_SIZE: usize = 8; TestTransaction(vec![ diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 01ebf512f0..3c0b53a4d6 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -304,7 +304,7 @@ pub trait RunDA< Storage = MemoryStorage, >, > where - ::ValidatedState: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, TYPES: NodeType, Leaf: TestableLeaf, @@ -514,7 +514,7 @@ impl< >, > RunDA, WebServerNetwork, NODE> for WebServerDARun where - ::ValidatedState: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -594,7 +594,7 @@ impl< NODE, > for Libp2pDARun where - ::ValidatedState: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -658,7 +658,7 @@ impl< >, > RunDA, CombinedNetworks, NODE> for CombinedDARun where - ::ValidatedState: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, @@ -746,7 +746,7 @@ pub async fn main_entry_point< >( args: ValidatorArgs, ) where - ::ValidatedState: TestableState, + ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index bbc3528b5f..0e978304a7 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -566,9 +566,11 @@ impl, A: ConsensusApi + block_payload: None, proposer_id: sender, }; - let state = Arc::new(::from_header( - &proposal.data.block_header, - )); + let state = Arc::new( + >::from_header( + &proposal.data.block_header, + ), + ); consensus.validated_state_map.insert( view, @@ -619,7 +621,7 @@ impl, A: ConsensusApi + let Ok(state) = parent_state .validate_and_apply_header( &consensus.instance_state, - &parent_leaf.block_header.clone(), + &parent_leaf, &proposal.data.block_header.clone(), ) .await @@ -1275,7 +1277,6 @@ impl, A: ConsensusApi + } let parent_leaf = leaf.clone(); - let parent_header = parent_leaf.block_header.clone(); let original_parent_hash = parent_leaf.commit(); @@ -1298,7 +1299,7 @@ impl, A: ConsensusApi + let block_header = TYPES::BlockHeader::new( state, &consensus.instance_state, - &parent_header, + &parent_leaf, commit_and_metadata.commitment, commit_and_metadata.metadata.clone(), ) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 0f470193b5..1cc81ba8fa 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -233,13 +233,13 @@ async fn build_quorum_proposal_and_signature( &block.encode().unwrap().collect(), handle.hotshot.memberships.quorum_membership.total_nodes(), ); - let mut parent_state = Arc::new(::from_header( - &parent_leaf.block_header, - )); + let mut parent_state = Arc::new( + >::from_header(&parent_leaf.block_header), + ); let block_header = TestBlockHeader::new( &*parent_state, &TestInstanceState {}, - &parent_leaf.block_header, + &parent_leaf, payload_commitment, (), ) @@ -269,7 +269,7 @@ async fn build_quorum_proposal_and_signature( for cur_view in 2..=view { let state_new_view = Arc::new( parent_state - .validate_and_apply_header(&TestInstanceState {}, &block_header, &block_header) + .validate_and_apply_header(&TestInstanceState {}, &parent_leaf, &block_header) .await .unwrap(), ); diff --git a/types/src/data.rs b/types/src/data.rs index 40e50aa47f..bc09bbbd18 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -13,7 +13,7 @@ use crate::{ election::Membership, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, - states::{TestableState, ValidatedState}, + states::TestableState, storage::StoredView, BlockPayload, }, @@ -104,12 +104,6 @@ impl std::ops::Sub for ViewNumber { } } -/// The `Transaction` type associated with a `ValidatedState`, as a syntactic shortcut -pub type Transaction = - <::BlockPayload as BlockPayload>::Transaction; -/// `Commitment` to the `Transaction` type associated with a `ValidatedState`, as a syntactic shortcut -pub type TxnCommitment = Commitment>; - /// A proposal to start providing data availability for a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct DAProposal { @@ -414,7 +408,7 @@ impl Leaf { impl TestableLeaf for Leaf where - TYPES::ValidatedState: TestableState, + TYPES::ValidatedState: TestableState, TYPES::BlockPayload: TestableBlock, { type NodeType = TYPES; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index c9b8f0a65f..aee82425bc 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -4,7 +4,8 @@ //! describe the behaviors that a block is expected to have. use crate::{ - traits::ValidatedState, + data::Leaf, + traits::{node_implementation::NodeType, ValidatedState}, utils::BuilderCommitment, vid::{vid_scheme, VidCommitment, VidSchemeType}, }; @@ -114,30 +115,24 @@ pub fn vid_commitment( pub const GENESIS_VID_NUM_STORAGE_NODES: usize = 1; /// Header of a block, which commits to a [`BlockPayload`]. -pub trait BlockHeader: +pub trait BlockHeader: Serialize + Clone + Debug + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned + Committable { - /// Block payload associated with the commitment. - type Payload: BlockPayload; - - /// Validated state. - type State: ValidatedState; - - /// Build a header with the payload commitment, metadata, instance-level state, parent header, - /// and parent state. + /// Build a header with the parent validate state, instance-level state, parent leaf, payload + /// commitment, and metadata. fn new( - parent_state: &Self::State, - instance_state: &::Instance, - parent_header: &Self, + parent_state: &TYPES::ValidatedState, + instance_state: &>::Instance, + parent_leaf: &Leaf, payload_commitment: VidCommitment, - metadata: ::Metadata, + metadata: ::Metadata, ) -> impl Future + Send; /// Build the genesis header, payload, and metadata. fn genesis( - instance_state: &::Instance, + instance_state: &>::Instance, payload_commitment: VidCommitment, - metadata: ::Metadata, + metadata: ::Metadata, ) -> Self; /// Get the block number. @@ -147,5 +142,5 @@ pub trait BlockHeader: fn payload_commitment(&self) -> VidCommitment; /// Get the metadata. - fn metadata(&self) -> &::Metadata; + fn metadata(&self) -> &::Metadata; } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index f3199a2bd6..2c14bd9410 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -106,7 +106,7 @@ pub trait TestableNodeImplementation: NodeImplementation #[async_trait] impl> TestableNodeImplementation for I where - TYPES::ValidatedState: TestableState, + TYPES::ValidatedState: TestableState, TYPES::BlockPayload: TestableBlock, I::Storage: TestableStorage, I::QuorumNetwork: TestableNetworkingImplementation, @@ -124,7 +124,9 @@ where rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { - ::create_random_transaction(state, rng, padding) + >::create_random_transaction( + state, rng, padding, + ) } fn leaf_create_random_transaction( @@ -219,7 +221,7 @@ pub trait NodeType: /// This should be the same `Time` that `ValidatedState::Time` is using. type Time: ConsensusTime; /// The block header type that this hotshot setup is using. - type BlockHeader: BlockHeader; + type BlockHeader: BlockHeader; /// The block type that this hotshot setup is using. /// /// This should be the same block that `ValidatedState::BlockPayload` is using. @@ -237,12 +239,7 @@ pub trait NodeType: type InstanceState: InstanceState; /// The validated state type that this hotshot setup is using. - type ValidatedState: ValidatedState< - Instance = Self::InstanceState, - BlockHeader = Self::BlockHeader, - BlockPayload = Self::BlockPayload, - Time = Self::Time, - >; + type ValidatedState: ValidatedState; /// Membership used for this implementation type Membership: Membership; diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 79ec856f4a..d759e24cfb 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -4,8 +4,14 @@ //! compatibilities over the current network state, which is modified by the transactions contained //! within blocks. -use super::block_contents::{BlockHeader, TestableBlock}; -use crate::traits::{node_implementation::ConsensusTime, BlockPayload}; +use super::block_contents::TestableBlock; +use crate::{ + data::Leaf, + traits::{ + node_implementation::{ConsensusTime, NodeType}, + BlockPayload, + }, +}; use serde::{de::DeserializeOwned, Serialize}; use std::{error::Error, fmt::Debug, future::Future, hash::Hash}; @@ -21,17 +27,13 @@ pub trait InstanceState: Clone + Debug + Send + Sync {} /// * The ability to validate that a block header is actually a valid extension of this state and /// produce a new state, with the modifications from the block applied /// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header)) -pub trait ValidatedState: +pub trait ValidatedState: Serialize + DeserializeOwned + Debug + Default + Hash + PartialEq + Eq + Send + Sync { /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; /// The type of the instance-level state this state is assocaited with type Instance: InstanceState; - /// The type of block header this state is associated with - type BlockHeader: BlockHeader; - /// The type of block payload this state is associated with - type BlockPayload: BlockPayload; /// Time compatibility needed for reward collection type Time: ConsensusTime; @@ -48,14 +50,14 @@ pub trait ValidatedState: fn validate_and_apply_header( &self, instance: &Self::Instance, - parent_header: &Self::BlockHeader, - proposed_header: &Self::BlockHeader, + parent_leaf: &Leaf, + proposed_header: &TYPES::BlockHeader, ) -> impl Future> + Send; /// Construct the state with the given block header. /// /// This can also be used to rebuild the state for catchup. - fn from_header(block_header: &Self::BlockHeader) -> Self; + fn from_header(block_header: &TYPES::BlockHeader) -> Self; /// Construct a genesis validated state. #[must_use] @@ -66,9 +68,10 @@ pub trait ValidatedState: } /// extra functions required on state to be usable by hotshot-testing -pub trait TestableState: ValidatedState +pub trait TestableState: ValidatedState where - ::BlockPayload: TestableBlock, + TYPES: NodeType, + TYPES::BlockPayload: TestableBlock, { /// Creates random transaction if possible /// otherwise panics @@ -77,5 +80,5 @@ where state: Option<&Self>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> ::Transaction; } From 5a4700fcc1af833c63f566dc20496b573b14848e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Feb 2024 09:05:49 -0500 Subject: [PATCH 0824/1393] Bump syn from 2.0.51 to 2.0.52 (#2676) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.51 to 2.0.52. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.51...2.0.52) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- testing-macros/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index e23c47b1af..dcb0de323c 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -25,7 +25,7 @@ tracing = { workspace = true } serde = { workspace = true } # proc macro stuff quote = "1.0.33" -syn = { version = "2.0.51", features = ["full", "extra-traits"] } +syn = { version = "2.0.52", features = ["full", "extra-traits"] } proc-macro2 = "1.0.78" derive_builder = "0.13.1" From 7ec94e4ec24da8ebcd9caa6b0daad99ec4bf70af Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 29 Feb 2024 09:51:43 -0500 Subject: [PATCH 0825/1393] [LIBP2P][CLEANUP] Get rid of locked Channels, and Separate Test Code (#2661) * Don't lock channels * cargo fix * Allow killing the connected network handler * Fix issues * Remove State from NetworkNodeHandle * Remove async where it's unused * fix async std build * Fix erros a little --- .../src/traits/networking/libp2p_network.rs | 148 +++++---- libp2p-networking/src/network/mod.rs | 18 +- libp2p-networking/src/network/node.rs | 5 +- libp2p-networking/src/network/node/handle.rs | 283 ++++-------------- libp2p-networking/tests/common/mod.rs | 142 ++++++--- libp2p-networking/tests/counter.rs | 154 ++++++---- task-impls/src/consensus.rs | 4 +- types/src/consensus.rs | 7 +- 8 files changed, 357 insertions(+), 404 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 2122c19f90..ca554e6597 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -6,7 +6,7 @@ use super::NetworkingMetricsValue; use async_compatibility_layer::art::async_block_on; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, - channel::{unbounded, UnboundedReceiver, UnboundedSendError, UnboundedSender}, + channel::{bounded, unbounded, Sender, UnboundedReceiver, UnboundedSendError, UnboundedSender}, }; use async_lock::RwLock; use async_trait::async_trait; @@ -41,8 +41,10 @@ use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder}; use libp2p_networking::{ network::{ + spawn_network_node, NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, - NetworkNodeConfig, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeType, + NetworkNodeConfig, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, + NetworkNodeType, }, reexport::Multiaddr, }; @@ -52,7 +54,10 @@ use snafu::ResultExt; #[cfg(feature = "hotshot-testing")] use std::{collections::HashSet, num::NonZeroUsize, str::FromStr}; -use futures::future::join_all; +use futures::{ + future::{join_all, Either}, + FutureExt, +}; use std::{ collections::BTreeSet, fmt::Debug, @@ -100,7 +105,7 @@ struct Libp2pNetworkInner { /// this node's public key pk: K, /// handle to control the network - handle: Arc>, + handle: Arc, /// map of known replica peer ids to public keys receiver: UnboundedReceiver, /// Sender for broadcast messages @@ -133,6 +138,8 @@ struct Libp2pNetworkInner { reliability_config: Option>, /// if we're a member of the DA committee or not is_da: bool, + /// Killswitch sender + kill_switch: Sender<()>, } /// Networking implementation that uses libp2p @@ -323,12 +330,9 @@ impl Libp2pNetwork { is_da: bool, ) -> Result, NetworkError> { assert!(bootstrap_addrs_len > 4, "Need at least 5 bootstrap nodes"); - let network_handle = Arc::new( - Box::pin(NetworkNodeHandle::<()>::new(config, id)) - .await - .map_err(Into::::into)?, - ); - + let (mut rx, network_handle) = spawn_network_node(config.clone(), id) + .await + .map_err(Into::::into)?; // Make bootstrap mappings known if matches!( network_handle.config().node_type, @@ -354,10 +358,12 @@ impl Libp2pNetwork { // if bounded figure out a way to log dropped msgs let (sender, receiver) = unbounded(); let (node_lookup_send, node_lookup_recv) = unbounded(); + let (kill_tx, kill_rx) = bounded(1); + rx.set_kill_switch(kill_rx); let mut result = Libp2pNetwork { inner: Arc::new(Libp2pNetworkInner { - handle: network_handle, + handle: Arc::new(network_handle), receiver, sender: sender.clone(), pk, @@ -378,10 +384,11 @@ impl Libp2pNetwork { #[cfg(feature = "hotshot-testing")] reliability_config, is_da, + kill_switch: kill_tx, }), }; - result.handle_event_generator(sender); + result.handle_event_generator(sender, rx); result.spawn_node_lookup(node_lookup_recv); result.spawn_connect(id); @@ -563,38 +570,70 @@ impl Libp2pNetwork { /// task to propagate messages to handlers /// terminates on shut down of network - fn handle_event_generator(&self, sender: UnboundedSender) { + fn handle_event_generator( + &self, + sender: UnboundedSender, + mut network_rx: NetworkNodeReceiver, + ) { let handle = self.clone(); let is_bootstrapped = self.inner.is_bootstrapped.clone(); async_spawn(async move { - while let Ok(message) = handle.inner.handle.receiver().recv().await { - match &message { - NetworkEvent::IsBootstrapped => { - is_bootstrapped.store(true, Ordering::Relaxed); - } - GossipMsg(raw, _) | DirectRequest(raw, _, _) | DirectResponse(raw, _) => { - let message_version = read_version(raw); - match message_version { - Some(VERSION_0_1) => { - let _ = handle.handle_recvd_events_0_1(message, &sender).await; - } - Some(version) => { - warn!( - "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", - version, message - ); + let Some(mut kill_switch) = network_rx.take_kill_switch() else { + tracing::error!( + "`spawn_handle` was called on a network handle that was already closed" + ); + return; + }; + let mut kill_switch = kill_switch.recv().boxed(); + let mut next_msg = network_rx.recv().boxed(); + + loop { + let msg_or_killed = futures::future::select(next_msg, kill_switch).await; + match msg_or_killed { + Either::Left((Ok(message), other_stream)) => { + match &message { + NetworkEvent::IsBootstrapped => { + is_bootstrapped.store(true, Ordering::Relaxed); } - _ => { - warn!( - "Received message with unreadable version number.\n\nPayload:\n\n{:?}", - message - ); + GossipMsg(raw, _) + | DirectRequest(raw, _, _) + | DirectResponse(raw, _) => { + let message_version = read_version(raw); + match message_version { + Some(VERSION_0_1) => { + let _ = + handle.handle_recvd_events_0_1(message, &sender).await; + } + Some(version) => { + warn!( + "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", + version, message + ); + } + _ => { + warn!( + "Received message with unreadable version number.\n\nPayload:\n\n{:?}", + message + ); + } + } } - } + }; + // re-set the `kill_switch` for the next loop + kill_switch = other_stream; + // re-set `receiver.recv()` for the next loop + next_msg = network_rx.recv().boxed(); + } + Either::Left((Err(_), _)) => { + warn!("Network receiver shut down!"); + return; + } + Either::Right(_) => { + warn!("Event Handler shutdown"); + return; } } } - warn!("Network receiver shut down!"); }); } } @@ -626,12 +665,9 @@ impl ConnectedNetwork for Libp2p Self: 'b, { let closure = async move { - if self.inner.handle.is_killed() { - error!("Called shut down when already shut down! Noop."); - } else { - let _ = self.inner.node_lookup_send.send(None).await; - let _ = self.inner.handle.shutdown().await; - } + let _ = self.inner.handle.shutdown().await; + let _ = self.inner.node_lookup_send.send(None).await; + let _ = self.inner.kill_switch.send(()).await; }; boxed_sync(closure) } @@ -642,10 +678,6 @@ impl ConnectedNetwork for Libp2p message: M, recipients: BTreeSet, ) -> Result<(), NetworkError> { - if self.inner.handle.is_killed() { - return Err(NetworkError::ShutDown); - } - self.wait_for_ready().await; info!( "broadcasting msg: {:?} with nodes: {:?} connected", @@ -746,10 +778,6 @@ impl ConnectedNetwork for Libp2p #[instrument(name = "Libp2pNetwork::direct_message", skip_all)] async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError> { - if self.inner.handle.is_killed() { - return Err(NetworkError::ShutDown); - } - // short circuit if we're dming ourselves if recipient == self.inner.pk { // panic if we already shut down? @@ -825,18 +853,14 @@ impl ConnectedNetwork for Libp2p Self: 'b, { let closure = async move { - if self.inner.handle.is_killed() { - Err(NetworkError::ShutDown) - } else { - let result = self - .inner - .receiver - .drain_at_least_one() - .await - .map_err(|_x| NetworkError::ShutDown)?; - self.inner.metrics.incoming_message_count.add(result.len()); - Ok(result) - } + let result = self + .inner + .receiver + .drain_at_least_one() + .await + .map_err(|_x| NetworkError::ShutDown)?; + self.inner.metrics.incoming_message_count.add(result.len()); + Ok(result) }; boxed_sync(closure) } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 38614a855b..2c035b21fe 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -11,9 +11,9 @@ pub use self::{ def::NetworkDef, error::NetworkError, node::{ - network_node_handle_error, MeshParams, NetworkNode, NetworkNodeConfig, + network_node_handle_error, spawn_network_node, MeshParams, NetworkNode, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, NetworkNodeHandle, - NetworkNodeHandleError, + NetworkNodeHandleError, NetworkNodeReceiver, }, }; @@ -32,9 +32,8 @@ use libp2p::{ Multiaddr, Transport, }; use libp2p_identity::PeerId; -use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; -use std::{collections::HashSet, fmt::Debug, str::FromStr, sync::Arc}; +use std::{collections::HashSet, fmt::Debug, str::FromStr}; use tracing::instrument; #[cfg(async_executor_impl = "async-std")] @@ -228,14 +227,3 @@ pub async fn gen_transport(identity: Keypair) -> Result( - handles: &[Arc>], - rng: &mut dyn rand::RngCore, -) -> Arc> { - handles.iter().choose(rng).unwrap().clone() -} diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 6e6235fcd6..db9239b053 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -9,7 +9,10 @@ pub use self::{ config::{ MeshParams, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, }, - handle::{network_node_handle_error, NetworkNodeHandle, NetworkNodeHandleError}, + handle::{ + network_node_handle_error, spawn_network_node, NetworkNodeHandle, NetworkNodeHandleError, + NetworkNodeReceiver, + }, }; use super::{ diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 64f45e457a..d3fdb526dd 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -3,16 +3,10 @@ use crate::network::{ NetworkNodeConfig, NetworkNodeConfigBuilderError, }; use async_compatibility_layer::{ - art::{async_sleep, async_spawn, async_timeout, future::to, stream}, - async_primitives::subscribable_mutex::SubscribableMutex, - channel::{ - oneshot, OneShotReceiver, OneShotSender, SendError, UnboundedReceiver, UnboundedRecvError, - UnboundedSender, - }, + art::{async_sleep, async_timeout, future::to}, + channel::{Receiver, SendError, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, }; -use async_lock::Mutex; use bincode::Options; -use futures::{stream::FuturesOrdered, Future, FutureExt}; use hotshot_utils::bincode::bincode_opts; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; @@ -21,10 +15,6 @@ use snafu::{ResultExt, Snafu}; use std::{ collections::HashSet, fmt::Debug, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, time::{Duration, Instant}, }; use tracing::{debug, info, instrument}; @@ -32,14 +22,11 @@ use tracing::{debug, info, instrument}; /// A handle containing: /// - A reference to the state /// - Controls for the swarm -#[derive(Debug)] -pub struct NetworkNodeHandle { +#[derive(Debug, Clone)] +pub struct NetworkNodeHandle { /// network configuration network_config: NetworkNodeConfig, - /// the state of the replica - state: Arc>, - /// send an action to the networkbehaviour send_network: UnboundedSender, @@ -51,168 +38,87 @@ pub struct NetworkNodeHandle { /// human readable id id: usize, - - /// network node receiver - receiver: NetworkNodeReceiver, } /// internal network node receiver #[derive(Debug)] pub struct NetworkNodeReceiver { - /// whether or not the receiver is started - receiver_spawned: AtomicBool, - - /// whether or not the handle has been killed - killed: AtomicBool, - /// the receiver - receiver: Mutex>, + receiver: UnboundedReceiver, ///kill switch - recv_kill: Mutex>>, - - /// kill the event handler for events from the swarm - kill_switch: Mutex>>, + recv_kill: Option>, } impl NetworkNodeReceiver { /// recv a network event + /// # Errors + /// Errors if the receiver channel is closed pub async fn recv(&self) -> Result { - if self.killed.load(Ordering::Relaxed) { - return Err(NetworkNodeHandleError::Killed); - } - let lock = self.receiver.lock().await; - lock.recv().await.context(ReceiverEndedSnafu) + self.receiver.recv().await.context(ReceiverEndedSnafu) } -} - -impl NetworkNodeHandle { - /// constructs a new node listening on `known_addr` - #[instrument] - pub async fn new(config: NetworkNodeConfig, id: usize) -> Result { - // randomly assigned port - let listen_addr = config - .bound_addr - .clone() - .unwrap_or_else(|| gen_multiaddr(0)); - let mut network = NetworkNode::new(config.clone()) - .await - .context(NetworkSnafu)?; - - let peer_id = network.peer_id(); - let listen_addr = network - .start_listen(listen_addr) - .await - .context(NetworkSnafu)?; - info!("LISTEN ADDRESS IS {:?}", listen_addr); - // pin here to force the future onto the heap since it can be large - // in the case of flume - let (send_chan, recv_chan) = Box::pin(network.spawn_listeners()) - .await - .context(NetworkSnafu)?; - let (kill_switch, recv_kill) = oneshot(); - - let kill_switch = Mutex::new(Some(kill_switch)); - let recv_kill = Mutex::new(Some(recv_kill)); - Ok(NetworkNodeHandle { - network_config: config, - state: std::sync::Arc::default(), - send_network: send_chan, - listen_addr, - peer_id, - id, - receiver: NetworkNodeReceiver { - kill_switch, - killed: AtomicBool::new(false), - receiver: Mutex::new(recv_chan), - recv_kill, - receiver_spawned: AtomicBool::new(false), - }, - }) + /// Add a kill switch to the receiver + pub fn set_kill_switch(&mut self, kill_switch: Receiver<()>) { + self.recv_kill = Some(kill_switch); } - /// Spawn a handler `F` that will be notified every time a new [`NetworkEvent`] arrives. - /// - /// # Panics - /// - /// Will panic if a handler is already spawned - #[allow(clippy::unused_async)] - pub async fn spawn_handler(self: &Arc, cb: F) -> impl Future - where - F: Fn(NetworkEvent, Arc>) -> RET + Sync + Send + 'static, - RET: Future> + Send + 'static, - S: Send + 'static, - { - assert!( - !self.receiver.receiver_spawned.swap(true, Ordering::Relaxed), - "Handler is already spawned, this is a bug" - ); - - let handle = Arc::clone(self); - async_spawn(async move { - let receiver = handle.receiver.receiver.lock().await; - let Some(kill_switch) = handle.receiver.recv_kill.lock().await.take() else { - tracing::error!( - "`spawn_handle` was called on a network handle that was already closed" - ); - return; - }; - let mut next_msg = receiver.recv().boxed(); - let mut kill_switch = kill_switch.recv().boxed(); - loop { - match futures::future::select(next_msg, kill_switch).await { - futures::future::Either::Left((incoming_message, other_stream)) => { - let incoming_message = match incoming_message { - Ok(msg) => msg, - Err(e) => { - tracing::warn!(?e, "NetworkNodeHandle::spawn_handle was unable to receive more messages"); - return; - } - }; - if let Err(e) = cb(incoming_message, handle.clone()).await { - tracing::error!( - ?e, - "NetworkNodeHandle::spawn_handle returned an error" - ); - return; - } - - // re-set the `kill_switch` for the next loop - kill_switch = other_stream; - // re-set `receiver.recv()` for the next loop - next_msg = receiver.recv().boxed(); - } - futures::future::Either::Right(_) => { - // killed - handle.receiver.killed.store(true, Ordering::Relaxed); - return; - } - } - } - }) + /// Take the kill switch to allow killing the receiver task + pub fn take_kill_switch(&mut self) -> Option> { + self.recv_kill.take() } +} - /// Receives a reference of the internal `NetworkNodeReceiver`, which can be used to query for incoming messages. - pub fn receiver(&self) -> &NetworkNodeReceiver { - &self.receiver - } +/// Spawn a network node task task and return the handle and the receiver for it +/// # Errors +/// Errors if spawning the task fails +pub async fn spawn_network_node( + config: NetworkNodeConfig, + id: usize, +) -> Result<(NetworkNodeReceiver, NetworkNodeHandle), NetworkNodeHandleError> { + let mut network = NetworkNode::new(config.clone()) + .await + .context(NetworkSnafu)?; + // randomly assigned port + let listen_addr = config + .bound_addr + .clone() + .unwrap_or_else(|| gen_multiaddr(0)); + let peer_id = network.peer_id(); + let listen_addr = network + .start_listen(listen_addr) + .await + .context(NetworkSnafu)?; + // pin here to force the future onto the heap since it can be large + // in the case of flume + let (send_chan, recv_chan) = Box::pin(network.spawn_listeners()) + .await + .context(NetworkSnafu)?; + let receiver = NetworkNodeReceiver { + receiver: recv_chan, + recv_kill: None, + }; + + info!("LISTEN ADDRESS IS {:?}", listen_addr); + let handle = NetworkNodeHandle { + network_config: config, + send_network: send_chan, + listen_addr, + peer_id, + id, + }; + Ok((receiver, handle)) +} + +impl NetworkNodeHandle { /// Cleanly shuts down a swarm node /// This is done by sending a message to - /// the swarm event handler to stop handling events - /// and a message to the swarm itself to spin down + /// the swarm itself to spin down #[instrument] pub async fn shutdown(&self) -> Result<(), NetworkNodeHandleError> { self.send_request(ClientRequest::Shutdown).await?; - // if this fails, the thread has already been killed. - if let Some(kill_switch) = self.receiver.kill_switch.lock().await.take() { - kill_switch.send(()); - } else { - tracing::warn!("The network node handle is shutting down, but the kill switch was already consumed"); - } Ok(()) } - /// Notify the network to begin the bootstrap process /// # Errors /// If unable to send via `send_network`. This should only happen @@ -223,12 +129,11 @@ impl NetworkNodeHandle { } /// Get a reference to the network node handle's listen addr. + #[must_use] pub fn listen_addr(&self) -> Multiaddr { self.listen_addr.clone() } -} -impl NetworkNodeHandle { /// Print out the routing table used by kademlia /// NOTE: only for debugging purposes currently /// # Errors @@ -249,10 +154,7 @@ impl NetworkNodeHandle { num_peers: usize, node_id: usize, timeout: Duration, - ) -> Result<(), NetworkNodeHandleError> - where - S: Default + Debug, - { + ) -> Result<(), NetworkNodeHandleError> { let start = Instant::now(); self.begin_bootstrap().await?; let mut connected_ok = false; @@ -554,79 +456,22 @@ impl NetworkNodeHandle { } /// Get a reference to the network node handle's id. + #[must_use] pub fn id(&self) -> usize { self.id } /// Get a reference to the network node handle's peer id. + #[must_use] pub fn peer_id(&self) -> PeerId { self.peer_id } /// Return a reference to the network config + #[must_use] pub fn config(&self) -> &NetworkNodeConfig { &self.network_config } - - /// Modify the state. This will automatically call `state_changed` and `notify_webui` - pub async fn modify_state(&self, cb: F) - where - F: FnMut(&mut S), - { - self.state.modify(cb).await; - } - - /// Returns `true` if the network state is killed - pub fn is_killed(&self) -> bool { - self.receiver.killed.load(Ordering::Relaxed) - } - - /// Call `wait_timeout_until` on the state's [`SubscribableMutex`] - /// # Errors - /// Will throw a [`NetworkNodeHandleError::TimeoutError`] error upon timeout - pub async fn state_wait_timeout_until( - &self, - timeout: Duration, - f: F, - ) -> Result<(), NetworkNodeHandleError> - where - F: FnMut(&S) -> bool, - { - self.state - .wait_timeout_until(timeout, f) - .await - .context(TimeoutSnafu) - } - - /// Call `wait_timeout_until_with_trigger` on the state's [`SubscribableMutex`] - pub fn state_wait_timeout_until_with_trigger<'a, F>( - &'a self, - timeout: Duration, - f: F, - ) -> stream::to::Timeout + 'a>> - where - F: FnMut(&S) -> bool + 'a, - { - self.state.wait_timeout_until_with_trigger(timeout, f) - } - - /// Call `wait_until` on the state's [`SubscribableMutex`] - /// # Errors - /// Will throw a [`NetworkNodeHandleError::TimeoutError`] error upon timeout - pub async fn state_wait_until(&self, f: F) -> Result<(), NetworkNodeHandleError> - where - F: FnMut(&S) -> bool, - { - self.state.wait_until(f).await; - Ok(()) - } -} - -impl NetworkNodeHandle { - /// Get a clone of the internal state - pub async fn state(&self) -> S { - self.state.cloned().await - } } /// Error wrapper type for interacting with swarm handle diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index 49bafb12ab..c06ff9085d 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -1,14 +1,17 @@ use async_compatibility_layer::{ art::async_sleep, - channel::RecvError, + art::async_spawn, + async_primitives::subscribable_mutex::SubscribableMutex, + channel::{bounded, RecvError}, logging::{setup_backtrace, setup_logging}, }; use futures::{future::join_all, Future, FutureExt}; use libp2p::{identity::Keypair, Multiaddr}; use libp2p_identity::PeerId; use libp2p_networking::network::{ - network_node_handle_error::NodeConfigSnafu, NetworkEvent, NetworkNodeConfigBuilder, - NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeType, + network_node_handle_error::NodeConfigSnafu, spawn_network_node, NetworkEvent, + NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, + NetworkNodeType, }; use snafu::{ResultExt, Snafu}; use std::{ @@ -21,6 +24,64 @@ use std::{ }; use tracing::{info, instrument, warn}; +#[derive(Clone, Debug)] +pub(crate) struct HandleWithState { + pub(crate) handle: Arc, + pub(crate) state: Arc>, +} + +/// Spawn a handler `F` that will be notified every time a new [`NetworkEvent`] arrives. +/// +/// # Panics +/// +/// Will panic if a handler is already spawned +pub fn spawn_handler( + handle_and_state: HandleWithState, + mut receiver: NetworkNodeReceiver, + cb: F, +) -> impl Future +where + F: Fn(NetworkEvent, HandleWithState) -> RET + Sync + Send + 'static, + RET: Future> + Send + 'static, + S: Debug + Default + Send + Clone + 'static, +{ + async_spawn(async move { + let Some(mut kill_switch) = receiver.take_kill_switch() else { + tracing::error!( + "`spawn_handle` was called on a network handle that was already closed" + ); + return; + }; + let mut next_msg = receiver.recv().boxed(); + let mut kill_switch = kill_switch.recv().boxed(); + loop { + match futures::future::select(next_msg, kill_switch).await { + futures::future::Either::Left((incoming_message, other_stream)) => { + let incoming_message = match incoming_message { + Ok(msg) => msg, + Err(e) => { + tracing::warn!(?e, "NetworkNodeHandle::spawn_handle was unable to receive more messages"); + return; + } + }; + if let Err(e) = cb(incoming_message, handle_and_state.clone()).await { + tracing::error!(?e, "NetworkNodeHandle::spawn_handle returned an error"); + return; + } + + // re-set the `kill_switch` for the next loop + kill_switch = other_stream; + // re-set `receiver.recv()` for the next loop + next_msg = receiver.recv().boxed(); + } + futures::future::Either::Right(_) => { + return; + } + } + } + }) +} + /// General function to spin up testing infra /// perform tests by calling `run_test` /// then cleans up tests @@ -30,7 +91,7 @@ use tracing::{info, instrument, warn}; /// - Initialize network nodes /// - Kill network nodes /// - A test assertion fails -pub async fn test_bed( +pub async fn test_bed( run_test: F, client_handler: G, num_nodes: usize, @@ -39,21 +100,27 @@ pub async fn test_bed, FutG: Future> + 'static + Send + Sync, - F: FnOnce(Vec>>, Duration) -> FutF, - G: Fn(NetworkEvent, Arc>) -> FutG + 'static + Send + Sync, + F: FnOnce(Vec>, Duration) -> FutF, + G: Fn(NetworkEvent, HandleWithState) -> FutG + 'static + Send + Sync, { setup_logging(); setup_backtrace(); + let mut kill_switches = Vec::new(); // NOTE we want this to panic if we can't spin up the swarms. // that amounts to a failed test. - let handles = spin_up_swarms(num_nodes, timeout, num_of_bootstrap) + let handles_and_receivers = spin_up_swarms::(num_nodes, timeout, num_of_bootstrap) .await .unwrap(); + let (handles, receivers): (Vec<_>, Vec<_>) = handles_and_receivers.into_iter().unzip(); let mut handler_futures = Vec::new(); - for handle in &handles { - let handler_fut = handle.spawn_handler(client_handler.clone()).await; + for (i, mut rx) in receivers.into_iter().enumerate() { + let (kill_tx, kill_rx) = bounded(1); + let handle = &handles[i]; + kill_switches.push(kill_tx); + rx.set_kill_switch(kill_rx); + let handler_fut = spawn_handler(handle.clone(), rx, client_handler.clone()); handler_futures.push(handler_fut); } @@ -61,7 +128,10 @@ pub async fn test_bed(handles: &[Arc>]) -> HashMap { +fn gen_peerid_map(handles: &[Arc]) -> HashMap { let mut r_val = HashMap::new(); for handle in handles { r_val.insert(handle.peer_id(), handle.id()); @@ -79,7 +149,7 @@ fn gen_peerid_map(handles: &[Arc>]) -> HashMap(handles: &[Arc>]) { +pub async fn print_connections(handles: &[Arc]) { let m = gen_peerid_map(handles); warn!("PRINTING CONNECTION STATES"); for handle in handles { @@ -99,12 +169,13 @@ pub async fn print_connections(handles: &[Arc>]) { /// Spins up `num_of_nodes` nodes, connects them to each other /// and waits for connections to propagate to all nodes. +#[allow(clippy::type_complexity)] #[instrument] -pub async fn spin_up_swarms( +pub async fn spin_up_swarms( num_of_nodes: usize, timeout_len: Duration, num_bootstrap: usize, -) -> Result>>, TestError> { +) -> Result, NetworkNodeReceiver)>, TestError> { let mut handles = Vec::new(); let mut bootstrap_addrs = Vec::<(PeerId, Multiaddr)>::new(); let mut connecting_futs = Vec::new(); @@ -130,15 +201,11 @@ pub async fn spin_up_swarms( .bound_addr(Some(addr)) .ttl(None) .republication_interval(None); - let node = Box::pin(NetworkNodeHandle::new( - config - .build() - .context(NodeConfigSnafu) - .context(HandleSnafu)?, - i, - )) - .await - .context(HandleSnafu)?; + let config = config + .build() + .context(NodeConfigSnafu) + .context(HandleSnafu)?; + let (rx, node) = spawn_network_node(config.clone(), i).await.unwrap(); let node = Arc::new(node); let addr = node.listen_addr(); info!("listen addr for {} is {:?}", i, addr); @@ -151,7 +218,11 @@ pub async fn spin_up_swarms( } .boxed_local() }); - handles.push(node); + let node_with_state = HandleWithState { + handle: node.clone(), + state: Arc::default(), + }; + handles.push((node_with_state, rx)); } for j in 0..(num_of_nodes - num_bootstrap) { @@ -169,12 +240,10 @@ pub async fn spin_up_swarms( .build() .context(NodeConfigSnafu) .context(HandleSnafu)?; - let node = Box::pin(NetworkNodeHandle::new( - regular_node_config.clone(), - j + num_bootstrap, - )) - .await - .context(HandleSnafu)?; + let (rx, node) = spawn_network_node(regular_node_config.clone(), j + num_bootstrap) + .await + .unwrap(); + let node = Arc::new(node); connecting_futs.push({ let node = node.clone(); @@ -184,8 +253,11 @@ pub async fn spin_up_swarms( } .boxed_local() }); - - handles.push(node); + let node_with_state = HandleWithState { + handle: node.clone(), + state: Arc::default(), + }; + handles.push((node_with_state, rx)); } info!("BSADDRS ARE: {:?}", bootstrap_addrs); @@ -197,9 +269,10 @@ pub async fn spin_up_swarms( .collect::>() ); - for handle in &handles[0..num_of_nodes] { + for (handle, _) in &handles[0..num_of_nodes] { let to_share = bootstrap_addrs.clone(); handle + .handle .add_known_peers( to_share .iter() @@ -221,8 +294,9 @@ pub async fn spin_up_swarms( return Err(TestError::SpinupTimeout { failing_nodes }); } - for handle in &handles { + for (handle, _) in &handles { handle + .handle .subscribe("global".to_string()) .await .context(HandleSnafu)?; diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index e6faaaf2f3..7910220ad0 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -5,11 +5,10 @@ use crate::common::print_connections; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use bincode::Options; -use common::{test_bed, HandleSnafu, TestError}; +use common::{test_bed, HandleSnafu, HandleWithState, TestError}; use hotshot_utils::bincode::bincode_opts; -use libp2p_networking::network::{ - get_random_handle, NetworkEvent, NetworkNodeHandle, NetworkNodeHandleError, -}; +use libp2p_networking::network::{NetworkEvent, NetworkNodeHandleError}; +use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; use snafu::ResultExt; use std::{fmt::Debug, sync::Arc, time::Duration}; @@ -51,13 +50,24 @@ pub enum CounterMessage { Noop, } +/// Given a slice of handles assumed to be larger than 0, +/// chooses one +/// # Panics +/// panics if handles is of length 0 +fn get_random_handle( + handles: &[HandleWithState], + rng: &mut dyn rand::RngCore, +) -> HandleWithState { + handles.iter().choose(rng).unwrap().clone() +} + /// event handler for events from the swarm /// - updates state based on events received /// - replies to direct messages #[instrument] pub async fn counter_handle_network_event( event: NetworkEvent, - handle: Arc>, + handle: HandleWithState, ) -> Result<(), NetworkNodeHandleError> { use CounterMessage::*; use NetworkEvent::*; @@ -68,12 +78,13 @@ pub async fn counter_handle_network_event( match msg { // direct message only MyCounterIs(c) => { - handle.modify_state(|s| *s = c).await; + handle.state.modify(|s| *s = c).await; } // gossip message only IncrementCounter { from, to, .. } => { handle - .modify_state(|s| { + .state + .modify(|s| { if *s == from { *s = to; } @@ -93,24 +104,34 @@ pub async fn counter_handle_network_event( // direct message request IncrementCounter { from, to, .. } => { handle - .modify_state(|s| { + .state + .modify(|s| { if *s == from { *s = to; } }) .await; - handle.direct_response(chan, &CounterMessage::Noop).await?; + handle + .handle + .direct_response(chan, &CounterMessage::Noop) + .await?; } // direct message response AskForCounter => { - let response = MyCounterIs(handle.state().await); - handle.direct_response(chan, &response).await?; + let response = MyCounterIs(handle.state.copied().await); + handle.handle.direct_response(chan, &response).await?; } MyCounterIs(_) => { - handle.direct_response(chan, &CounterMessage::Noop).await?; + handle + .handle + .direct_response(chan, &CounterMessage::Noop) + .await?; } Noop => { - handle.direct_response(chan, &CounterMessage::Noop).await?; + handle + .handle + .direct_response(chan, &CounterMessage::Noop) + .await?; } } } @@ -125,33 +146,31 @@ pub async fn counter_handle_network_event( /// on error #[allow(clippy::similar_names)] async fn run_request_response_increment<'a>( - requester_handle: Arc>, - requestee_handle: Arc>, + requester_handle: HandleWithState, + requestee_handle: HandleWithState, timeout: Duration, ) -> Result<(), TestError> { async move { - let new_state = requestee_handle.state().await; + let new_state = requestee_handle.state.copied().await; // set up state change listener #[cfg(async_executor_impl = "async-std")] - let mut stream = requester_handle - .state_wait_timeout_until_with_trigger(timeout, move |state| *state == new_state); + let mut stream = requester_handle.state.wait_timeout_until_with_trigger(timeout, move |state| *state == new_state); #[cfg(async_executor_impl = "tokio")] let mut stream = Box::pin( - requester_handle - .state_wait_timeout_until_with_trigger(timeout, move |state| *state == new_state), + requester_handle.state.wait_timeout_until_with_trigger(timeout, move |state| *state == new_state), ); #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} - let requestee_pid = requestee_handle.peer_id(); + let requestee_pid = requestee_handle.handle.peer_id(); match stream.next().await.unwrap() { Ok(()) => {} Err(e) => {error!("timed out waiting for {requestee_pid:?} to update state: {e}"); std::process::exit(-1)}, } - requester_handle + requester_handle.handle .direct_request(requestee_pid, &CounterMessage::AskForCounter) .await .context(HandleSnafu)?; @@ -160,14 +179,14 @@ async fn run_request_response_increment<'a>( Err(e) => {error!("timed out waiting for {requestee_pid:?} to update state: {e}"); std::process::exit(-1)}, } - let s1 = requester_handle.state().await; + let s1 = requester_handle.state.copied().await; // sanity check if s1 == new_state { Ok(()) } else { Err(TestError::State { - id: requester_handle.id(), + id: requester_handle.handle.id(), expected: new_state, actual: s1, }) @@ -179,24 +198,24 @@ async fn run_request_response_increment<'a>( /// broadcasts `msg` from a randomly chosen handle /// then asserts that all nodes match `new_state` async fn run_gossip_round( - handles: &[Arc>], + handles: &[HandleWithState], msg: CounterMessage, new_state: CounterState, timeout_duration: Duration, ) -> Result<(), TestError> { let mut rng = rand::thread_rng(); let msg_handle = get_random_handle(handles, &mut rng); - msg_handle.modify_state(|s| *s = new_state).await; + msg_handle.state.modify(|s| *s = new_state).await; let mut futs = Vec::new(); let len = handles.len(); for handle in handles { // already modified, so skip msg_handle - if handle.peer_id() != msg_handle.peer_id() { - let stream = handle.state_wait_timeout_until_with_trigger(timeout_duration, |state| { - *state == new_state - }); + if handle.handle.peer_id() != msg_handle.handle.peer_id() { + let stream = handle + .state + .wait_timeout_until_with_trigger(timeout_duration, |state| *state == new_state); futs.push(Box::pin(stream)); } } @@ -221,6 +240,7 @@ async fn run_gossip_round( } msg_handle + .handle .gossip("global".to_string(), &msg) .await .context(HandleSnafu)?; @@ -233,14 +253,19 @@ async fn run_gossip_round( let mut failing = Vec::new(); for handle in handles { - let handle_state = handle.state().await; + let handle_state = handle.state.copied().await; if handle_state != new_state { - failing.push(handle.id()); + failing.push(handle.handle.id()); println!("state: {handle_state:?}, expected: {new_state:?}"); } } if !failing.is_empty() { - print_connections(handles).await; + let nodes = handles + .iter() + .cloned() + .map(|h| h.handle) + .collect::>(); + print_connections(nodes.as_slice()).await; return Err(TestError::GossipTimeout { failing }); } @@ -248,7 +273,7 @@ async fn run_gossip_round( } async fn run_intersperse_many_rounds( - handles: Vec>>, + handles: Vec>, timeout: Duration, ) { for i in 0..u32::try_from(NUM_ROUNDS).unwrap() { @@ -259,63 +284,54 @@ async fn run_intersperse_many_rounds( } } for h in handles { - assert_eq!(h.state().await, u32::try_from(NUM_ROUNDS).unwrap()); + assert_eq!(h.state.copied().await, u32::try_from(NUM_ROUNDS).unwrap()); } } -async fn run_dht_many_rounds( - handles: Vec>>, - timeout: Duration, -) { +async fn run_dht_many_rounds(handles: Vec>, timeout: Duration) { run_dht_rounds(&handles, timeout, 0, NUM_ROUNDS).await; } -async fn run_dht_one_round(handles: Vec>>, timeout: Duration) { +async fn run_dht_one_round(handles: Vec>, timeout: Duration) { run_dht_rounds(&handles, timeout, 0, 1).await; } async fn run_request_response_many_rounds( - handles: Vec>>, + handles: Vec>, timeout: Duration, ) { for _i in 0..NUM_ROUNDS { run_request_response_increment_all(&handles, timeout).await; } for h in handles { - assert_eq!(h.state().await, u32::try_from(NUM_ROUNDS).unwrap()); + assert_eq!(h.state.copied().await, u32::try_from(NUM_ROUNDS).unwrap()); } } /// runs one round of request response /// # Panics /// on error -pub async fn run_request_response_one_round( - handles: Vec>>, +async fn run_request_response_one_round( + handles: Vec>, timeout: Duration, ) { run_request_response_increment_all(&handles, timeout).await; for h in handles { - assert_eq!(h.state().await, 1); + assert_eq!(h.state.copied().await, 1); } } /// runs multiple rounds of gossip /// # Panics /// on error -pub async fn run_gossip_many_rounds( - handles: Vec>>, - timeout: Duration, -) { +async fn run_gossip_many_rounds(handles: Vec>, timeout: Duration) { run_gossip_rounds(&handles, NUM_ROUNDS, 0, timeout).await; } /// runs one round of gossip /// # Panics /// on error -async fn run_gossip_one_round( - handles: Vec>>, - timeout: Duration, -) { +async fn run_gossip_one_round(handles: Vec>, timeout: Duration) { run_gossip_rounds(&handles, 1, 0, timeout).await; } @@ -323,7 +339,7 @@ async fn run_gossip_one_round( /// # Panics /// on error async fn run_dht_rounds( - handles: &[Arc>], + handles: &[HandleWithState], timeout: Duration, starting_val: usize, num_rounds: usize, @@ -339,12 +355,12 @@ async fn run_dht_rounds( value.push(inc_val); // put the key - msg_handle.put_record(&key, &value).await.unwrap(); + msg_handle.handle.put_record(&key, &value).await.unwrap(); // get the key from the other nodes for handle in handles { let result: Result, NetworkNodeHandleError> = - handle.get_record_timeout(&key, timeout).await; + handle.handle.get_record_timeout(&key, timeout).await; match result { Err(e) => { error!("DHT error {e:?} during GET"); @@ -360,7 +376,7 @@ async fn run_dht_rounds( /// runs `num_rounds` of message broadcast, incrementing the state of all nodes each broadcast async fn run_gossip_rounds( - handles: &[Arc>], + handles: &[HandleWithState], num_rounds: usize, starting_state: CounterState, timeout: Duration, @@ -386,22 +402,27 @@ async fn run_gossip_rounds( /// and update their state to the recv'ed state #[allow(clippy::similar_names)] async fn run_request_response_increment_all( - handles: &[Arc>], + handles: &[HandleWithState], timeout: Duration, ) { let mut rng = rand::thread_rng(); let requestee_handle = get_random_handle(handles, &mut rng); - requestee_handle.modify_state(|s| *s += 1).await; - info!("RR REQUESTEE IS {:?}", requestee_handle.peer_id()); + requestee_handle.state.modify(|s| *s += 1).await; + info!("RR REQUESTEE IS {:?}", requestee_handle.handle.peer_id()); let mut futs = Vec::new(); for handle in handles { - if handle.lookup_pid(requestee_handle.peer_id()).await.is_err() { + if handle + .handle + .lookup_pid(requestee_handle.handle.peer_id()) + .await + .is_err() + { error!("ERROR LOOKING UP REQUESTEE ADDRS"); } // NOTE uncomment if debugging // let _ = h.print_routing_table().await; // skip `requestee_handle` - if handle.peer_id() != requestee_handle.peer_id() { + if handle.handle.peer_id() != requestee_handle.handle.peer_id() { let requester_handle = handle.clone(); futs.push(run_request_response_increment( requester_handle, @@ -435,10 +456,15 @@ async fn run_request_response_increment_all( } if results.read().await.iter().any(Result::is_err) { - print_connections(handles).await; + let nodes = handles + .iter() + .cloned() + .map(|h| h.handle) + .collect::>(); + print_connections(nodes.as_slice()).await; let mut states = vec![]; for handle in handles { - states.push(handle.state().await); + states.push(handle.state.copied().await); } error!("states: {states:?}"); std::process::exit(-1); diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 0e978304a7..528f7195e1 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -821,9 +821,7 @@ impl, A: ConsensusApi + &self.output_event_stream, ); let old_anchor_view = consensus.last_decided_view; - consensus - .collect_garbage(old_anchor_view, new_anchor_view) - .await; + consensus.collect_garbage(old_anchor_view, new_anchor_view); self.vid_shares = self.vid_shares.split_off(&new_anchor_view); consensus.last_decided_view = new_anchor_view; consensus diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 3a1c1e06d5..a2201036db 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -295,12 +295,7 @@ impl Consensus { /// `saved_payloads` and `validated_state_map` fields of `Consensus`. /// # Panics /// On inconsistent stored entries - #[allow(clippy::unused_async)] // async for API compatibility reasons - pub async fn collect_garbage( - &mut self, - old_anchor_view: TYPES::Time, - new_anchor_view: TYPES::Time, - ) { + pub fn collect_garbage(&mut self, old_anchor_view: TYPES::Time, new_anchor_view: TYPES::Time) { // state check let anchor_entry = self .validated_state_map From eb38164f60a99753a0ac8088ff9a5c7ac1dd9bd7 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 29 Feb 2024 18:08:13 -0500 Subject: [PATCH 0826/1393] [LIBP2P] Use Regular Gossip Behaviour (#2611) * Use the stock gossipsub * lower log level * Remove Gossip Behaviour * allow retryins in `NetworkNode` * Revert retry logic --- .../src/traits/networking/libp2p_network.rs | 6 +- .../src/network/behaviours/gossip.rs | 269 ------------------ .../src/network/behaviours/mod.rs | 3 - libp2p-networking/src/network/def.rs | 22 +- libp2p-networking/src/network/mod.rs | 8 +- libp2p-networking/src/network/node.rs | 28 +- libp2p-networking/tests/counter.rs | 2 +- 7 files changed, 40 insertions(+), 298 deletions(-) delete mode 100644 libp2p-networking/src/network/behaviours/gossip.rs diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index ca554e6597..20b2240659 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -522,7 +522,7 @@ impl Libp2pNetwork { sender: &UnboundedSender, ) -> Result<(), NetworkError> { match msg { - GossipMsg(msg, _) => { + GossipMsg(msg) => { let result: Result = bincode_opts().deserialize(&msg); if let Ok(result) = result { sender @@ -595,9 +595,7 @@ impl Libp2pNetwork { NetworkEvent::IsBootstrapped => { is_bootstrapped.store(true, Ordering::Relaxed); } - GossipMsg(raw, _) - | DirectRequest(raw, _, _) - | DirectResponse(raw, _) => { + GossipMsg(raw) | DirectRequest(raw, _, _) | DirectResponse(raw, _) => { let message_version = read_version(raw); match message_version { Some(VERSION_0_1) => { diff --git a/libp2p-networking/src/network/behaviours/gossip.rs b/libp2p-networking/src/network/behaviours/gossip.rs deleted file mode 100644 index 45ebd7968b..0000000000 --- a/libp2p-networking/src/network/behaviours/gossip.rs +++ /dev/null @@ -1,269 +0,0 @@ -use std::{ - collections::{HashSet, VecDeque}, - task::Poll, -}; - -use libp2p::{ - gossipsub::{Behaviour, Event, IdentTopic, PublishError::Duplicate, TopicHash}, - swarm::{NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm}, - Multiaddr, -}; -use libp2p_identity::PeerId; - -use tracing::{debug, error, info, warn}; - -use super::exponential_backoff::ExponentialBackoff; - -/// wrapper metadata around libp2p's gossip protocol -pub struct GossipBehaviour { - /// Timeout trackidng when to retry gossip - backoff: ExponentialBackoff, - /// The in progress gossip queries - in_progress_gossip: VecDeque<(IdentTopic, Vec)>, - /// The gossip behaviour - gossipsub: Behaviour, - /// Output events to parent behavioru - out_event_queue: Vec, - /// Set of topics we are subscribed to - subscribed_topics: HashSet, -} - -/// Output event -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum GossipEvent { - /// We received a gossip - GossipMsg(Vec, TopicHash), -} - -impl GossipBehaviour { - /// handle a gossip event - fn gossip_handle_event(&mut self, event: Event) { - match event { - Event::Message { message, .. } => { - // if we get an event from the gossipsub behaviour, push it - // onto the event queue (which will get popped during poll) - // and propagated back to the overall behaviour - self.out_event_queue - .push(GossipEvent::GossipMsg(message.data, message.topic)); - } - Event::Subscribed { topic, .. } => { - info!("subscribed to topic {}", topic); - } - Event::Unsubscribed { topic, .. } => { - info!("unsubscribed to topic {}", topic); - } - Event::GossipsubNotSupported { peer_id } => { - error!("gossipsub not supported on {}!", peer_id); - } - } - } -} - -impl NetworkBehaviour for GossipBehaviour { - type ConnectionHandler = ::ConnectionHandler; - - type ToSwarm = GossipEvent; - - fn on_swarm_event(&mut self, event: libp2p::swarm::derive_prelude::FromSwarm<'_>) { - self.gossipsub.on_swarm_event(event); - } - - fn poll( - &mut self, - cx: &mut std::task::Context<'_>, - ) -> Poll>> { - // retry sending shit - if self.backoff.is_expired() { - let published = self.drain_publish_gossips(); - self.backoff.start_next(published); - } - if let Poll::Ready(ready) = NetworkBehaviour::poll(&mut self.gossipsub, cx) { - match ready { - ToSwarm::GenerateEvent(e) => { - // add event to event queue which will be subsequently popped off. - self.gossip_handle_event(e); - } - ToSwarm::Dial { opts } => { - return Poll::Ready(ToSwarm::Dial { opts }); - } - ToSwarm::NotifyHandler { - peer_id, - handler, - event, - } => { - return Poll::Ready(ToSwarm::NotifyHandler { - peer_id, - handler, - event, - }); - } - ToSwarm::CloseConnection { - peer_id, - connection, - } => { - return Poll::Ready(ToSwarm::CloseConnection { - peer_id, - connection, - }); - } - ToSwarm::ListenOn { opts } => { - return Poll::Ready(ToSwarm::ListenOn { opts }); - } - ToSwarm::RemoveListener { id } => { - return Poll::Ready(ToSwarm::RemoveListener { id }); - } - ToSwarm::NewExternalAddrCandidate(c) => { - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(c)); - } - ToSwarm::ExternalAddrConfirmed(c) => { - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(c)); - } - ToSwarm::ExternalAddrExpired(c) => { - return Poll::Ready(ToSwarm::ExternalAddrExpired(c)); - } - e => { - error!("UNHANDLED NEW SWARM VARIANT: {e:?}"); - } - } - } - if !self.out_event_queue.is_empty() { - return Poll::Ready(ToSwarm::GenerateEvent(self.out_event_queue.remove(0))); - } - Poll::Pending - } - - fn on_connection_handler_event( - &mut self, - peer_id: PeerId, - connection_id: libp2p::swarm::derive_prelude::ConnectionId, - event: THandlerOutEvent, - ) { - self.gossipsub - .on_connection_handler_event(peer_id, connection_id, event); - } - - fn handle_pending_inbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, - ) -> Result<(), libp2p::swarm::ConnectionDenied> { - self.gossipsub - .handle_pending_inbound_connection(connection_id, local_addr, remote_addr) - } - - fn handle_established_inbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - peer: PeerId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, - ) -> Result, libp2p::swarm::ConnectionDenied> { - self.gossipsub.handle_established_inbound_connection( - connection_id, - peer, - local_addr, - remote_addr, - ) - } - - fn handle_pending_outbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - maybe_peer: Option, - addresses: &[Multiaddr], - effective_role: libp2p::core::Endpoint, - ) -> Result, libp2p::swarm::ConnectionDenied> { - self.gossipsub.handle_pending_outbound_connection( - connection_id, - maybe_peer, - addresses, - effective_role, - ) - } - - fn handle_established_outbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - peer: PeerId, - addr: &Multiaddr, - role_override: libp2p::core::Endpoint, - ) -> Result, libp2p::swarm::ConnectionDenied> { - self.gossipsub.handle_established_outbound_connection( - connection_id, - peer, - addr, - role_override, - ) - } -} - -impl GossipBehaviour { - /// Create new gossip behavioru based on gossipsub - #[must_use] - pub fn new(gossipsub: Behaviour) -> Self { - Self { - backoff: ExponentialBackoff::default(), - in_progress_gossip: VecDeque::default(), - gossipsub, - out_event_queue: Vec::default(), - subscribed_topics: HashSet::default(), - } - } - - /// Publish a given gossip - pub fn publish_gossip(&mut self, topic: IdentTopic, contents: Vec) { - let res = self.gossipsub.publish(topic.clone(), contents.clone()); - if let Err(e) = res { - if matches!(e, Duplicate) { - debug!("duplicate gossip message"); - } else { - error!("error publishing gossip message {:?}", e); - } - self.in_progress_gossip.push_back((topic, contents)); - } - } - - /// Subscribe to a given topic - pub fn subscribe_gossip(&mut self, t: &str) { - if self.subscribed_topics.contains(t) { - warn!( - "tried to subscribe to already subscribed topic {:?}. Noop.", - t - ); - } else if self.gossipsub.subscribe(&IdentTopic::new(t)).is_err() { - error!("error subscribing to topic {}", t); - } else { - info!("subscribed req to {:?}", t); - self.subscribed_topics.insert(t.to_string()); - } - } - - /// Unsubscribe from a given topic - pub fn unsubscribe_gossip(&mut self, t: &str) { - if self.subscribed_topics.contains(t) { - if self.gossipsub.unsubscribe(&IdentTopic::new(t)).is_err() { - error!("error unsubscribing to topic {}", t); - } else { - self.subscribed_topics.remove(t); - } - } else { - warn!("tried to unsubscribe to untracked topic {:?}. Noop.", t); - } - } - - /// Attempt to drain the internal gossip list, publishing each gossip - pub fn drain_publish_gossips(&mut self) -> bool { - let mut r_val = true; - - while let Some((topic, contents)) = self.in_progress_gossip.pop_front() { - let res = self.gossipsub.publish(topic.clone(), contents.clone()); - if res.is_err() { - self.in_progress_gossip.push_back((topic, contents)); - r_val = false; - break; - } - } - r_val - } -} diff --git a/libp2p-networking/src/network/behaviours/mod.rs b/libp2p-networking/src/network/behaviours/mod.rs index 869b6324c3..a093b22f9e 100644 --- a/libp2p-networking/src/network/behaviours/mod.rs +++ b/libp2p-networking/src/network/behaviours/mod.rs @@ -1,6 +1,3 @@ -/// Wrapper around gossipsub -pub mod gossip; - /// Wrapper around `RequestResponse` pub mod direct_message; diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index bfbf197b40..80223990e3 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -1,20 +1,19 @@ use futures::channel::oneshot::Sender; use libp2p::{ - gossipsub::IdentTopic as Topic, + gossipsub::{Behaviour as GossipBehaviour, Event as GossipEvent, IdentTopic}, identify::{Behaviour as IdentifyBehaviour, Event as IdentifyEvent}, request_response::ResponseChannel, Multiaddr, }; use libp2p_identity::PeerId; use std::num::NonZeroUsize; -use tracing::debug; +use tracing::{debug, error}; use super::{ behaviours::{ dht::{DHTBehaviour, DHTEvent, KadPutQuery}, direct_message::{DMBehaviour, DMEvent, DMRequest}, exponential_backoff::ExponentialBackoff, - gossip::{GossipBehaviour, GossipEvent}, }, NetworkEventInternal, }; @@ -85,18 +84,23 @@ impl NetworkDef { /// Gossip functions impl NetworkDef { /// Publish a given gossip - pub fn publish_gossip(&mut self, topic: Topic, contents: Vec) { - self.gossipsub.publish_gossip(topic, contents); + pub fn publish_gossip(&mut self, topic: IdentTopic, contents: Vec) { + if let Err(e) = self.gossipsub.publish(topic, contents) { + tracing::warn!("Failed to publish gossip message. Error: {:?}", e); + } } - /// Subscribe to a given topic pub fn subscribe_gossip(&mut self, t: &str) { - self.gossipsub.subscribe_gossip(t); + if let Err(e) = self.gossipsub.subscribe(&IdentTopic::new(t)) { + error!("Failed to subsribe to topic {:?}. Error: {:?}", t, e); + } } /// Unsubscribe from a given topic pub fn unsubscribe_gossip(&mut self, t: &str) { - self.gossipsub.unsubscribe_gossip(t); + if let Err(e) = self.gossipsub.unsubscribe(&IdentTopic::new(t)) { + error!("Failed to unsubsribe from topic {:?}. Error: {:?}", t, e); + } } } @@ -157,7 +161,7 @@ impl From for NetworkEventInternal { impl From for NetworkEventInternal { fn from(event: GossipEvent) -> Self { - Self::GossipEvent(event) + Self::GossipEvent(Box::new(event)) } } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 2c035b21fe..77f9f8f92c 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -17,14 +17,14 @@ pub use self::{ }, }; -use self::behaviours::{dht::DHTEvent, direct_message::DMEvent, gossip::GossipEvent}; +use self::behaviours::{dht::DHTEvent, direct_message::DMEvent}; use bincode::Options; use futures::channel::oneshot::Sender; use hotshot_utils::bincode::bincode_opts; use libp2p::{ build_multiaddr, core::{muxing::StreamMuxerBox, transport::Boxed}, - gossipsub::TopicHash, + gossipsub::Event as GossipEvent, identify::Event as IdentifyEvent, identity::Keypair, quic, @@ -163,7 +163,7 @@ pub enum ClientRequest { #[derive(Debug)] pub enum NetworkEvent { /// Recv-ed a broadcast - GossipMsg(Vec, TopicHash), + GossipMsg(Vec), /// Recv-ed a direct message from a node DirectRequest(Vec, PeerId, ResponseChannel>), /// Recv-ed a direct response from a node (that hopefully was initiated by this node) @@ -182,7 +182,7 @@ pub enum NetworkEventInternal { /// to store it on the heap. IdentifyEvent(Box), /// a gossip event - GossipEvent(GossipEvent), + GossipEvent(Box), /// a direct message event DMEvent(DMEvent), } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index db9239b053..a77ae15b32 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -16,7 +16,6 @@ pub use self::{ }; use super::{ - behaviours::gossip::GossipBehaviour, error::{GossipsubBuildSnafu, GossipsubConfigSnafu, NetworkError, TransportSnafu}, gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkEvent, NetworkEventInternal, NetworkNodeType, @@ -26,7 +25,6 @@ use crate::network::behaviours::{ dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, direct_message::{DMBehaviour, DMEvent}, exponential_backoff::ExponentialBackoff, - gossip::GossipEvent, }; use async_compatibility_layer::{ art::async_spawn, @@ -37,7 +35,7 @@ use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; use libp2p::{core::transport::ListenerId, StreamProtocol}; use libp2p::{ gossipsub::{ - Behaviour as Gossipsub, ConfigBuilder as GossipsubConfigBuilder, + Behaviour as Gossipsub, ConfigBuilder as GossipsubConfigBuilder, Event as GossipEvent, Message as GossipsubMessage, MessageAuthenticity, MessageId, Topic, ValidationMode, }, identify::{ @@ -282,7 +280,7 @@ impl NetworkNode { ); let network = NetworkDef::new( - GossipBehaviour::new(gossipsub), + gossipsub, DHTBehaviour::new( kadem, peer_id, @@ -397,7 +395,7 @@ impl NetworkNode { return Ok(true); } ClientRequest::GossipMsg(topic, contents) => { - behaviour.publish_gossip(Topic::new(topic), contents); + behaviour.publish_gossip(Topic::new(topic.clone()), contents.clone()); } ClientRequest::Subscribe(t, chan) => { behaviour.subscribe_gossip(&t); @@ -555,9 +553,23 @@ impl NetworkNode { } None } - NetworkEventInternal::GossipEvent(e) => match e { - GossipEvent::GossipMsg(data, topic) => { - Some(NetworkEvent::GossipMsg(data, topic)) + NetworkEventInternal::GossipEvent(e) => match *e { + GossipEvent::Message { + propagation_source: _peer_id, + message_id: _id, + message, + } => Some(NetworkEvent::GossipMsg(message.data)), + GossipEvent::Subscribed { peer_id, topic } => { + info!("Peer: {:?}, Subscribed to topic: {:?}", peer_id, topic); + None + } + GossipEvent::Unsubscribed { peer_id, topic } => { + info!("Peer: {:?}, Unsubscribed from topic: {:?}", peer_id, topic); + None + } + GossipEvent::GossipsubNotSupported { peer_id } => { + info!("Peer: {:?}, Does not support Gossip", peer_id); + None } }, NetworkEventInternal::DMEvent(e) => Some(match e { diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 7910220ad0..acb1b07cf4 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -73,7 +73,7 @@ pub async fn counter_handle_network_event( use NetworkEvent::*; match event { IsBootstrapped => {} - GossipMsg(m, _) | DirectResponse(m, _) => { + GossipMsg(m) | DirectResponse(m, _) => { if let Ok(msg) = bincode_opts().deserialize::(&m) { match msg { // direct message only From 21a7e07a864e58df17c1cc9ac91aba31418ca229 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 1 Mar 2024 14:01:10 -0500 Subject: [PATCH 0827/1393] Remove proccessed messages (#2683) --- types/src/message.rs | 123 ------------------------------------------- 1 file changed, 123 deletions(-) diff --git a/types/src/message.rs b/types/src/message.rs index aa2d67982a..b71d2c8c89 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -132,129 +132,6 @@ impl ViewMessage for MessageKind { } } -/// A processed consensus message for both validating and sequencing consensus. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -#[serde(bound(deserialize = ""))] -pub enum ProcessedGeneralConsensusMessage { - /// Message with a quorum proposal. - Proposal(Proposal>, TYPES::SignatureKey), - /// Message with a quorum vote. - Vote(QuorumVote, TYPES::SignatureKey), -} - -impl From> - for GeneralConsensusMessage -{ - fn from(value: ProcessedGeneralConsensusMessage) -> Self { - match value { - ProcessedGeneralConsensusMessage::Proposal(p, _) => { - GeneralConsensusMessage::Proposal(p) - } - ProcessedGeneralConsensusMessage::Vote(v, _) => GeneralConsensusMessage::Vote(v), - } - } -} - -impl ProcessedGeneralConsensusMessage { - /// Create a [`ProcessedGeneralConsensusMessage`] from a [`GeneralConsensusMessage`]. - /// # Panics - /// if reaching the unimplemented `ViewSync` case. - pub fn new(value: GeneralConsensusMessage, sender: TYPES::SignatureKey) -> Self { - match value { - GeneralConsensusMessage::Proposal(p) => { - ProcessedGeneralConsensusMessage::Proposal(p, sender) - } - GeneralConsensusMessage::Vote(v) => ProcessedGeneralConsensusMessage::Vote(v, sender), - // ED NOTE These are deprecated - GeneralConsensusMessage::TimeoutVote(_) => unimplemented!(), - GeneralConsensusMessage::ViewSyncPreCommitVote(_) => unimplemented!(), - GeneralConsensusMessage::ViewSyncCommitVote(_) => unimplemented!(), - GeneralConsensusMessage::ViewSyncFinalizeVote(_) => unimplemented!(), - GeneralConsensusMessage::ViewSyncPreCommitCertificate(_) => unimplemented!(), - GeneralConsensusMessage::ViewSyncCommitCertificate(_) => unimplemented!(), - GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => unimplemented!(), - GeneralConsensusMessage::UpgradeProposal(_) => unimplemented!(), - GeneralConsensusMessage::UpgradeVote(_) => unimplemented!(), - } - } -} - -/// A processed consensus message for the DA committee in sequencing consensus. -#[derive(Serialize, Clone, Debug, PartialEq)] -#[serde(bound(deserialize = ""))] -pub enum ProcessedCommitteeConsensusMessage { - /// Proposal for the DA committee. - DAProposal(Proposal>, TYPES::SignatureKey), - /// Vote from the DA committee. - DAVote(DAVote, TYPES::SignatureKey), - /// Certificate for the DA. - DACertificate(DACertificate, TYPES::SignatureKey), - /// VID dispersal data. Like [`DAProposal`] - VidDisperseMsg(Proposal>, TYPES::SignatureKey), -} - -impl From> - for CommitteeConsensusMessage -{ - fn from(value: ProcessedCommitteeConsensusMessage) -> Self { - match value { - ProcessedCommitteeConsensusMessage::DAProposal(p, _) => { - CommitteeConsensusMessage::DAProposal(p) - } - ProcessedCommitteeConsensusMessage::DAVote(v, _) => { - CommitteeConsensusMessage::DAVote(v) - } - ProcessedCommitteeConsensusMessage::DACertificate(cert, _) => { - CommitteeConsensusMessage::DACertificate(cert) - } - ProcessedCommitteeConsensusMessage::VidDisperseMsg(disperse, _) => { - CommitteeConsensusMessage::VidDisperseMsg(disperse) - } - } - } -} - -impl ProcessedCommitteeConsensusMessage { - /// Create a [`ProcessedCommitteeConsensusMessage`] from a [`CommitteeConsensusMessage`]. - pub fn new(value: CommitteeConsensusMessage, sender: TYPES::SignatureKey) -> Self { - match value { - CommitteeConsensusMessage::DAProposal(p) => { - ProcessedCommitteeConsensusMessage::DAProposal(p, sender) - } - CommitteeConsensusMessage::DAVote(v) => { - ProcessedCommitteeConsensusMessage::DAVote(v, sender) - } - CommitteeConsensusMessage::DACertificate(cert) => { - ProcessedCommitteeConsensusMessage::DACertificate(cert, sender) - } - CommitteeConsensusMessage::VidDisperseMsg(disperse) => { - ProcessedCommitteeConsensusMessage::VidDisperseMsg(disperse, sender) - } - } - } -} - -/// A processed consensus message for sequencing consensus. -pub type ProcessedSequencingMessage = - Either, ProcessedCommitteeConsensusMessage>; - -impl From> for SequencingMessage { - fn from(value: ProcessedSequencingMessage) -> Self { - match value { - Left(message) => SequencingMessage(Left(message.into())), - Right(message) => SequencingMessage(Right(message.into())), - } - } -} - -impl From> - for ProcessedSequencingMessage -{ - fn from(value: ProcessedGeneralConsensusMessage) -> Self { - Left(value) - } -} - #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] /// Messages related to both validating and sequencing consensus. From a4c9902364ce3cfb500311462fd4b0394c024bb5 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 4 Mar 2024 10:52:00 -0500 Subject: [PATCH 0828/1393] Add tests for the upgrade task (#2642) * Adds stronger testing harness * Adds an iterator to generate test data for views * Adds a test to validate that consensus correctly updates its internal state when receiving an UpgradeCertificate * Correctly a mistake in consensus logic that took the UpgradeCertificate from the received QuourmProposal rather than the decided leaf. --- hotshot/src/lib.rs | 1 + task-impls/src/consensus.rs | 10 +- task-impls/src/upgrade.rs | 2 +- task/src/task.rs | 5 + testing/src/lib.rs | 9 + testing/src/predicates.rs | 78 ++++++++ testing/src/script.rs | 106 +++++++++++ testing/src/task_helpers.rs | 139 ++++++++++++++- testing/src/view_generator.rs | 304 ++++++++++++++++++++++++++++++++ testing/tests/consensus_task.rs | 65 +------ testing/tests/upgrade_task.rs | 131 ++++++++++++++ types/src/consensus.rs | 8 +- 12 files changed, 786 insertions(+), 72 deletions(-) create mode 100644 testing/src/predicates.rs create mode 100644 testing/src/script.rs create mode 100644 testing/src/view_generator.rs create mode 100644 testing/tests/upgrade_task.rs diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 0e1faafd45..6779ca77c3 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -234,6 +234,7 @@ impl> SystemContext { saved_leaves, saved_payloads, saved_da_certs: HashMap::new(), + saved_upgrade_certs: HashMap::new(), // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 locked_view: anchored_leaf.get_view_number(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 528f7195e1..9e3431d5b0 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -498,7 +498,13 @@ impl, A: ConsensusApi + // We should just make sure we don't *sign* an UpgradeCertificate for an upgrade // that we do not support. if let Some(ref upgrade_cert) = proposal.data.upgrade_certificate { - if !upgrade_cert.is_valid_cert(self.quorum_membership.as_ref()) { + if upgrade_cert.is_valid_cert(self.quorum_membership.as_ref()) { + self.consensus + .write() + .await + .saved_upgrade_certs + .insert(view, upgrade_cert.clone()); + } else { error!("Invalid upgrade_cert in proposal for view {}", *view); return; } @@ -740,7 +746,7 @@ impl, A: ConsensusApi + .last_synced_block_height .set(usize::try_from(leaf.get_height()).unwrap_or(0)); } - if let Some(ref upgrade_cert) = proposal.data.upgrade_certificate { + if let Some(upgrade_cert) = consensus.saved_upgrade_certs.get(&leaf.get_view_number()) { info!("Updating consensus state with decided upgrade certificate: {:?}", upgrade_cert); self.decided_upgrade_cert = Some(upgrade_cert.clone()); } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 23a21cb3b9..480f022566 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -205,7 +205,7 @@ impl, A: ConsensusApi + } } -/// task state implementation for DA Task +/// task state implementation for the upgrade task impl, A: ConsensusApi + 'static> TaskState for UpgradeTaskState { diff --git a/task/src/task.rs b/task/src/task.rs index 1d9d12faa0..0d928bf41a 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -166,6 +166,11 @@ impl Task { pub fn state_mut(&mut self) -> &mut S { &mut self.state } + /// Get an immutable reference to this tasks state + pub fn state(&self) -> &S { + &self.state + } + /// Spawn a new task adn register it. It will get all events not seend /// by the task creating it. pub async fn run_sub_task(&self, state: S) { diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 224dfae3cf..9abc87a288 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -33,6 +33,15 @@ pub mod spinning_task; /// task for checking if view sync got activated pub mod view_sync_task; +/// predicates to use in tests +pub mod predicates; + +/// scripting harness for tests +pub mod script; + +/// view generator for tests +pub mod view_generator; + /// global event at the test level #[derive(Clone, Debug)] pub enum GlobalTestEvent { diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs new file mode 100644 index 0000000000..ec08dbbdf3 --- /dev/null +++ b/testing/src/predicates.rs @@ -0,0 +1,78 @@ +use hotshot_task_impls::{ + consensus::ConsensusTaskState, events::HotShotEvent, events::HotShotEvent::*, +}; +use hotshot_types::traits::node_implementation::NodeType; + +use hotshot::types::SystemContextHandle; + +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + +pub struct Predicate { + pub function: Box bool>, + pub info: String, +} + +pub fn exact(event: HotShotEvent) -> Predicate> +where + TYPES: NodeType, +{ + let info = format!("{:?}", event); + + Predicate { + function: Box::new(move |e| e == &event), + info, + } +} + +pub fn leaf_decided() -> Predicate> +where + TYPES: NodeType, +{ + let info = "LeafDecided".to_string(); + let function = |e: &_| matches!(e, LeafDecided(_)); + + Predicate { + function: Box::new(function), + info, + } +} + +pub fn quorum_vote_send() -> Predicate> +where + TYPES: NodeType, +{ + let info = "QuorumVoteSend".to_string(); + let function = |e: &_| matches!(e, QuorumVoteSend(_)); + + Predicate { + function: Box::new(function), + info, + } +} + +type ConsensusTaskTestState = + ConsensusTaskState>; + +pub fn consensus_predicate( + function: Box Fn(&'a ConsensusTaskTestState) -> bool>, + info: &str, +) -> Predicate { + Predicate { + function, + info: info.to_string(), + } +} + +pub fn no_decided_upgrade_cert() -> Predicate { + consensus_predicate( + Box::new(|state| state.decided_upgrade_cert.is_none()), + "expected decided_upgrade_cert to be None", + ) +} + +pub fn decided_upgrade_cert() -> Predicate { + consensus_predicate( + Box::new(|state| state.decided_upgrade_cert.is_some()), + "expected decided_upgrade_cert to be Some(_)", + ) +} diff --git a/testing/src/script.rs b/testing/src/script.rs new file mode 100644 index 0000000000..3889727f59 --- /dev/null +++ b/testing/src/script.rs @@ -0,0 +1,106 @@ +use crate::predicates::Predicate; +use async_broadcast::broadcast; +use hotshot_task_impls::events::HotShotEvent; + +use hotshot_task::task::{Task, TaskRegistry, TaskState}; +use hotshot_types::traits::node_implementation::NodeType; +use std::sync::Arc; + +/// A `TestScript` is a sequence of triples (input sequence, output sequence, assertions). +type TestScript = Vec>; + +pub struct TestScriptStage>> { + pub inputs: Vec>, + pub outputs: Vec>>, + pub asserts: Vec>, +} + +impl std::fmt::Debug for Predicate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(f, "{}", self.info) + } +} + +/// `run_test_script` reads a triple (inputs, outputs, asserts) in a `TestScript`, +/// It broadcasts all given inputs (in order) and waits to receive all outputs (in order). +/// Once the expected outputs have been received, it validates the task state at that stage +/// against the given assertions. +/// +/// If all assertions pass, it moves onto the next stage. If it receives an unexpected output +/// or fails to receive an output, the test fails immediately with a panic. +/// +/// Note: the task is not spawned with an async thread; instead, the harness just calls `handle_event`. +/// This has a few implications, e.g. shutting down tasks doesn't really make sense, +/// and event ordering is deterministic. +pub async fn run_test_script>>( + mut script: TestScript, + state: S, +) where + TYPES: NodeType, + S: Send + 'static, +{ + let registry = Arc::new(TaskRegistry::default()); + + let (test_input, task_receiver) = broadcast(1024); + // let (task_input, mut test_receiver) = broadcast(1024); + + let task_input = test_input.clone(); + let mut test_receiver = task_receiver.clone(); + + let mut task = Task::new( + task_input.clone(), + task_receiver.clone(), + registry.clone(), + state, + ); + + for (stage_number, stage) in script.iter_mut().enumerate() { + tracing::debug!("Beginning test stage {}", stage_number); + for input in &mut *stage.inputs { + if !task.state_mut().filter(input) { + tracing::debug!("Test sent: {:?}", input); + + if let Some(res) = S::handle_event(input.clone(), &mut task).await { + task.state_mut().handle_result(&res).await; + } + } + } + + for expected in &stage.outputs { + let output_missing_error = format!( + "Stage {} | Failed to receive output for predicate: {:?}", + stage_number, expected + ); + + if let Ok(received_output) = test_receiver.try_recv() { + tracing::debug!("Test received: {:?}", received_output); + assert!( + (expected.function)(&received_output), + "Stage {} | Output failed to satisfy {:?}", + stage_number, + expected + ); + } else { + panic!("{}", output_missing_error); + } + } + + for assert in &stage.asserts { + assert!( + (assert.function)(task.state()), + "Stage {} | Assertion on task state failed: {:?}", + stage_number, + assert + ); + } + + if let Ok(received_output) = test_receiver.try_recv() { + let extra_output_error = format!( + "Stage {} | Received unexpected additional output: {:?}", + stage_number, received_output + ); + + panic!("{}", extra_output_error); + } + } +} diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 1cc81ba8fa..9bed45f5a9 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -2,7 +2,7 @@ use std::marker::PhantomData; use hotshot_example_types::{ - block_types::{TestBlockHeader, TestBlockPayload}, + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, node_types::{MemoryImpl, TestTypes}, state_types::{TestInstanceState, TestValidatedState}, }; @@ -17,10 +17,10 @@ use hotshot::{ use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, - data::{Leaf, QuorumProposal, ViewNumber}, - message::Proposal, - simple_certificate::QuorumCertificate, - simple_vote::SimpleVote, + data::{Leaf, QuorumProposal, VidDisperse, ViewNumber}, + message::{GeneralConsensusMessage, Proposal}, + simple_certificate::{DACertificate, QuorumCertificate}, + simple_vote::{DAData, DAVote, SimpleVote}, traits::{ block_contents::{vid_commitment, BlockHeader, TestableBlock}, consensus_api::ConsensusApi, @@ -29,7 +29,7 @@ use hotshot_types::{ states::ValidatedState, BlockPayload, }, - vid::{vid_scheme, VidSchemeType}, + vid::{vid_scheme, VidCommitment, VidSchemeType}, vote::HasViewNumber, }; @@ -43,6 +43,8 @@ use hotshot_types::utils::ViewInner; use hotshot_types::vote::Certificate; use hotshot_types::vote::Vote; +use jf_primitives::vid::VidScheme; + use serde::Serialize; use std::{fmt::Debug, hash::Hash, sync::Arc}; @@ -368,3 +370,128 @@ pub fn vid_scheme_from_view_number( let num_storage_nodes = membership.get_committee(view_number).len(); vid_scheme(num_storage_nodes) } + +pub fn vid_payload_commitment( + quorum_membership: &::Membership, + view_number: ViewNumber, + transactions: Vec, +) -> VidCommitment { + let vid = vid_scheme_from_view_number::(quorum_membership, view_number); + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let vid_disperse = vid.disperse(encoded_transactions).unwrap(); + + vid_disperse.commit +} + +pub fn da_payload_commitment( + quorum_membership: &::Membership, + transactions: Vec, +) -> VidCommitment { + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + + vid_commitment(&encoded_transactions, quorum_membership.total_nodes()) +} + +pub fn build_vid_proposal( + quorum_membership: &::Membership, + view_number: ViewNumber, + transactions: Vec, + private_key: &::PrivateKey, +) -> Proposal> { + let vid = vid_scheme_from_view_number::(quorum_membership, view_number); + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); + + let payload_commitment = vid_disperse.commit; + + let vid_signature = + ::SignatureKey::sign(private_key, payload_commitment.as_ref()) + .expect("Failed to sign payload commitment"); + let vid_disperse = VidDisperse::from_membership( + view_number, + vid.disperse(&encoded_transactions).unwrap(), + &quorum_membership.clone().into(), + ); + + Proposal { + data: vid_disperse.clone(), + signature: vid_signature, + _pd: PhantomData, + } +} + +pub fn build_da_certificate( + quorum_membership: &::Membership, + view_number: ViewNumber, + transactions: Vec, + public_key: &::SignatureKey, + private_key: &::PrivateKey, +) -> DACertificate { + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + + let da_payload_commitment = + vid_commitment(&encoded_transactions, quorum_membership.total_nodes()); + + let da_data = DAData { + payload_commit: da_payload_commitment, + }; + + build_cert::, DACertificate>( + da_data, + quorum_membership, + view_number, + public_key, + private_key, + ) +} + +pub async fn build_vote( + handle: &SystemContextHandle, + proposal: QuorumProposal, +) -> GeneralConsensusMessage { + let consensus_lock = handle.get_consensus(); + let consensus = consensus_lock.read().await; + let membership = handle.hotshot.memberships.quorum_membership.clone(); + + let justify_qc = proposal.justify_qc.clone(); + let view = ViewNumber::new(*proposal.view_number); + let parent = if justify_qc.is_genesis { + let Some(genesis_view) = consensus.validated_state_map.get(&ViewNumber::new(0)) else { + panic!("Couldn't find genesis view in state map."); + }; + let Some(leaf) = genesis_view.get_leaf_commitment() else { + panic!("Genesis view points to a view without a leaf"); + }; + let Some(leaf) = consensus.saved_leaves.get(&leaf) else { + panic!("Failed to find genesis leaf."); + }; + leaf.clone() + } else { + consensus + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned() + .unwrap() + }; + + let parent_commitment = parent.commit(); + + let leaf: Leaf<_> = Leaf { + view_number: view, + justify_qc: proposal.justify_qc.clone(), + parent_commitment, + block_header: proposal.block_header, + block_payload: None, + proposer_id: membership.get_leader(view), + }; + let vote = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(), + }, + view, + handle.public_key(), + handle.private_key(), + ) + .expect("Failed to create quorum vote"); + GeneralConsensusMessage::::Vote(vote) +} diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs new file mode 100644 index 0000000000..9f8073921c --- /dev/null +++ b/testing/src/view_generator.rs @@ -0,0 +1,304 @@ +use std::marker::PhantomData; + +use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + node_types::{MemoryImpl, TestTypes}, + state_types::TestInstanceState, +}; + +use crate::task_helpers::{ + build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, +}; +use commit::Committable; + +use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; + +use hotshot_types::{ + data::{Leaf, QuorumProposal, VidDisperse, ViewNumber}, + message::Proposal, + simple_certificate::{DACertificate, QuorumCertificate, UpgradeCertificate}, + simple_vote::{UpgradeProposalData, UpgradeVote}, + traits::{ + consensus_api::ConsensusApi, + node_implementation::{ConsensusTime, NodeType}, + }, +}; + +use hotshot_types::simple_vote::QuorumData; +use hotshot_types::simple_vote::QuorumVote; + +#[derive(Clone)] +pub struct TestView { + pub quorum_proposal: Proposal>, + pub leaf: Leaf, + pub view_number: ViewNumber, + pub quorum_membership: ::Membership, + pub vid_proposal: ( + Proposal>, + ::SignatureKey, + ), + pub leader_public_key: ::SignatureKey, + pub da_certificate: DACertificate, + pub transactions: Vec, + upgrade_data: Option>, +} + +impl TestView { + pub fn genesis(quorum_membership: &::Membership) -> Self { + let genesis_view = ViewNumber::new(1); + + let transactions = Vec::new(); + + let (private_key, public_key) = key_pair_for_id(*genesis_view); + + let leader_public_key = public_key; + + let payload_commitment = da_payload_commitment(quorum_membership, transactions.clone()); + + let vid_proposal = build_vid_proposal( + quorum_membership, + genesis_view, + transactions.clone(), + &private_key, + ); + + let da_certificate = build_da_certificate( + quorum_membership, + genesis_view, + transactions.clone(), + &public_key, + &private_key, + ); + + let block_header = TestBlockHeader { + block_number: 1, + payload_commitment, + }; + + let proposal = QuorumProposal:: { + block_header: block_header.clone(), + view_number: genesis_view, + justify_qc: QuorumCertificate::genesis(), + timeout_certificate: None, + upgrade_certificate: None, + proposer_id: public_key, + }; + + let leaf = Leaf { + view_number: genesis_view, + justify_qc: QuorumCertificate::genesis(), + parent_commitment: Leaf::genesis(&TestInstanceState {}).commit(), + block_header: block_header.clone(), + // Note: this field is not relevant in calculating the leaf commitment. + block_payload: Some(TestBlockPayload { + transactions: transactions.clone(), + }), + // Note: this field is not relevant in calculating the leaf commitment. + proposer_id: public_key, + }; + + let signature = ::sign(&private_key, leaf.commit().as_ref()) + .expect("Failed to sign leaf commitment!"); + + let quorum_proposal = Proposal { + data: proposal, + signature, + _pd: PhantomData, + }; + + TestView { + quorum_proposal, + leaf, + view_number: genesis_view, + quorum_membership: quorum_membership.clone(), + vid_proposal: (vid_proposal, public_key), + da_certificate, + transactions, + leader_public_key, + upgrade_data: None, + } + } + + pub fn next_view(&self) -> Self { + let old = self; + let old_view = old.view_number; + let next_view = old_view + 1; + + let quorum_membership = &self.quorum_membership; + let transactions = &self.transactions; + + let quorum_data = QuorumData { + leaf_commit: old.leaf.commit(), + }; + + let (old_private_key, old_public_key) = key_pair_for_id(*old_view); + + let (private_key, public_key) = key_pair_for_id(*next_view); + + let leader_public_key = public_key; + + let payload_commitment = da_payload_commitment(quorum_membership, transactions.clone()); + + let vid_proposal = build_vid_proposal( + quorum_membership, + next_view, + transactions.clone(), + &private_key, + ); + + let da_certificate = build_da_certificate( + quorum_membership, + next_view, + transactions.clone(), + &public_key, + &private_key, + ); + + let quorum_certificate = build_cert::< + TestTypes, + QuorumData, + QuorumVote, + QuorumCertificate, + >( + quorum_data, + quorum_membership, + old_view, + &old_public_key, + &old_private_key, + ); + + let upgrade_certificate = if let Some(ref data) = self.upgrade_data { + let cert = build_cert::< + TestTypes, + UpgradeProposalData, + UpgradeVote, + UpgradeCertificate, + >( + data.clone(), + quorum_membership, + next_view, + &public_key, + &private_key, + ); + + Some(cert) + } else { + None + }; + + let block_header = TestBlockHeader { + block_number: *next_view, + payload_commitment, + }; + + let leaf = Leaf { + view_number: next_view, + justify_qc: quorum_certificate.clone(), + parent_commitment: old.leaf.commit(), + block_header: block_header.clone(), + // Note: this field is not relevant in calculating the leaf commitment. + block_payload: Some(TestBlockPayload { + transactions: transactions.clone(), + }), + // Note: this field is not relevant in calculating the leaf commitment. + proposer_id: public_key, + }; + + let signature = ::sign(&private_key, leaf.commit().as_ref()) + .expect("Failed to sign leaf commitment."); + + let proposal = QuorumProposal:: { + block_header: block_header.clone(), + view_number: next_view, + justify_qc: quorum_certificate.clone(), + timeout_certificate: None, + upgrade_certificate, + proposer_id: public_key, + }; + + let quorum_proposal = Proposal { + data: proposal, + signature, + _pd: PhantomData, + }; + + TestView { + quorum_proposal, + leaf, + view_number: next_view, + quorum_membership: quorum_membership.clone(), + vid_proposal: (vid_proposal, public_key), + da_certificate, + leader_public_key, + // Transactions and upgrade data need to be manually injected each view, + // so we reset for the next view. + transactions: Vec::new(), + upgrade_data: None, + } + } + + pub fn create_vote( + &self, + handle: &SystemContextHandle, + ) -> QuorumVote { + QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: self.leaf.commit(), + }, + self.view_number, + handle.public_key(), + handle.private_key(), + ) + .expect("Failed to generate a signature on QuorumVote") + } +} + +pub struct TestViewGenerator { + pub current_view: Option, + pub quorum_membership: ::Membership, +} + +impl TestViewGenerator { + pub fn generate(quorum_membership: ::Membership) -> Self { + TestViewGenerator { + current_view: None, + quorum_membership, + } + } + + pub fn add_upgrade(&mut self, upgrade_proposal_data: UpgradeProposalData) { + if let Some(ref view) = self.current_view { + self.current_view = Some(TestView { + upgrade_data: Some(upgrade_proposal_data), + ..view.clone() + }); + } else { + tracing::error!("Cannot attach upgrade proposal to the genesis view."); + } + } + + pub fn add_transactions(&mut self, transactions: Vec) { + if let Some(ref view) = self.current_view { + self.current_view = Some(TestView { + transactions, + ..view.clone() + }); + } else { + tracing::error!("Cannot attach transactions to the genesis view."); + } + } +} + +impl Iterator for TestViewGenerator { + type Item = TestView; + + fn next(&mut self) -> Option { + if let Some(view) = &self.current_view { + self.current_view = Some(TestView::next_view(view)); + } else { + self.current_view = Some(TestView::genesis(&self.quorum_membership)); + } + + self.current_view.clone() + } +} diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index e045880c0e..1242d98ac9 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,74 +1,15 @@ #![allow(clippy::panic)] -use commit::Committable; use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::events::HotShotEvent; -use hotshot_testing::task_helpers::{build_quorum_proposal, key_pair_for_id}; -use hotshot_types::simple_vote::QuorumVote; -use hotshot_types::vote::Certificate; +use hotshot_testing::task_helpers::{build_quorum_proposal, build_vote, key_pair_for_id}; +use hotshot_types::traits::{consensus_api::ConsensusApi, election::Membership}; use hotshot_types::{ - data::{Leaf, QuorumProposal, ViewNumber}, - message::GeneralConsensusMessage, - traits::node_implementation::ConsensusTime, -}; -use hotshot_types::{ - simple_vote::QuorumData, - traits::{consensus_api::ConsensusApi, election::Membership}, + data::ViewNumber, message::GeneralConsensusMessage, traits::node_implementation::ConsensusTime, }; use jf_primitives::vid::VidScheme; use std::collections::HashMap; -async fn build_vote( - handle: &SystemContextHandle, - proposal: QuorumProposal, -) -> GeneralConsensusMessage { - let consensus_lock = handle.get_consensus(); - let consensus = consensus_lock.read().await; - let membership = handle.hotshot.memberships.quorum_membership.clone(); - - let justify_qc = proposal.justify_qc.clone(); - let view = ViewNumber::new(*proposal.view_number); - let parent = if justify_qc.is_genesis { - let Some(genesis_view) = consensus.validated_state_map.get(&ViewNumber::new(0)) else { - panic!("Couldn't find genesis view in state map."); - }; - let Some(leaf) = genesis_view.get_leaf_commitment() else { - panic!("Genesis view points to a view without a leaf"); - }; - let Some(leaf) = consensus.saved_leaves.get(&leaf) else { - panic!("Failed to find genesis leaf."); - }; - leaf.clone() - } else { - consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - .unwrap() - }; - - let parent_commitment = parent.commit(); - - let leaf: Leaf<_> = Leaf { - view_number: view, - justify_qc: proposal.justify_qc.clone(), - parent_commitment, - block_header: proposal.block_header, - block_payload: None, - proposer_id: membership.get_leader(view), - }; - let vote = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: leaf.commit(), - }, - view, - handle.public_key(), - handle.private_key(), - ) - .expect("Failed to create quorum vote"); - GeneralConsensusMessage::::Vote(vote) -} - #[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs new file mode 100644 index 0000000000..3cb69c112c --- /dev/null +++ b/testing/tests/upgrade_task.rs @@ -0,0 +1,131 @@ +use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; +use hotshot::types::SystemContextHandle; +use hotshot_constants::Version; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; +use hotshot_testing::{predicates::*, view_generator::TestViewGenerator}; +use hotshot_types::{ + data::ViewNumber, simple_vote::UpgradeProposalData, traits::node_implementation::ConsensusTime, +}; + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_upgrade_task() { + use hotshot_testing::script::{run_test_script, TestScriptStage}; + use hotshot_testing::task_helpers::build_system_handle; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(1).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let old_version = Version { major: 0, minor: 1 }; + let new_version = Version { major: 0, minor: 2 }; + + let upgrade_data: UpgradeProposalData = UpgradeProposalData { + old_version, + new_version, + new_version_hash: [0u8; 12].to_vec(), + old_version_last_block: ViewNumber::new(5), + new_version_first_block: ViewNumber::new(7), + }; + + let mut proposals = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + let mut leaders = Vec::new(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + for view in (&mut generator).take(2) { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaders.push(view.leader_public_key); + } + + generator.add_upgrade(upgrade_data); + + for view in generator.take(4) { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaders.push(view.leader_public_key); + } + + let view_1 = TestScriptStage { + inputs: vec![QuorumProposalRecv(proposals[0].clone(), leaders[0])], + outputs: vec![ + exact(ViewChange(ViewNumber::new(1))), + exact(QuorumVoteSend(votes[0].clone())), + ], + asserts: vec![], + }; + + let view_2 = TestScriptStage { + inputs: vec![ + VidDisperseRecv(vids[1].0.clone(), vids[1].1), + QuorumProposalRecv(proposals[1].clone(), leaders[1]), + DACRecv(dacs[1].clone()), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(2))), + exact(QuorumVoteSend(votes[1].clone())), + ], + asserts: vec![no_decided_upgrade_cert()], + }; + + let view_3 = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(proposals[2].clone(), leaders[2]), + DACRecv(dacs[2].clone()), + VidDisperseRecv(vids[2].0.clone(), vids[2].1), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(3))), + leaf_decided(), + exact(QuorumVoteSend(votes[2].clone())), + ], + asserts: vec![no_decided_upgrade_cert()], + }; + + let view_4 = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(proposals[3].clone(), leaders[3]), + DACRecv(dacs[3].clone()), + VidDisperseRecv(vids[3].0.clone(), vids[3].1), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(4))), + leaf_decided(), + exact(QuorumVoteSend(votes[3].clone())), + ], + asserts: vec![no_decided_upgrade_cert()], + }; + + let view_5 = TestScriptStage { + inputs: vec![QuorumProposalRecv(proposals[4].clone(), leaders[4])], + outputs: vec![exact(ViewChange(ViewNumber::new(5))), leaf_decided()], + asserts: vec![decided_upgrade_cert()], + }; + + let script = vec![view_1, view_2, view_3, view_4, view_5]; + + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + + inject_consensus_polls(&consensus_state).await; + + run_test_script(script, consensus_state).await; +} diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a2201036db..ac978c41d8 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -6,7 +6,7 @@ use displaydoc::Display; use crate::{ data::Leaf, error::HotShotError, - simple_certificate::{DACertificate, QuorumCertificate}, + simple_certificate::{DACertificate, QuorumCertificate, UpgradeCertificate}, traits::{ metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}, node_implementation::NodeType, @@ -39,6 +39,10 @@ pub struct Consensus { /// view -> DA cert pub saved_da_certs: HashMap>, + /// All the upgrade certs we've received for current and future views. + /// view -> upgrade cert + pub saved_upgrade_certs: HashMap>, + /// View number that is currently on. pub cur_view: TYPES::Time, @@ -310,6 +314,8 @@ impl Consensus { // perform gc self.saved_da_certs .retain(|view_number, _| *view_number >= old_anchor_view); + self.saved_upgrade_certs + .retain(|view_number, _| *view_number >= old_anchor_view); self.validated_state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_leaf_commitment()) From 271119f62efae98c9fdad362137bef8c77590818 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 4 Mar 2024 11:52:12 -0500 Subject: [PATCH 0829/1393] [FALLBACK] Send DA broadcast Properly (#2684) * Send DA broadcast properly on combined * fmt * da_broadcast on primary as well --- hotshot/src/traits/networking/combined_network.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 5fb08e34e5..75650230f5 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -285,7 +285,17 @@ impl ConnectedNetwork, TYPES::SignatureKey> message: Message, recipients: BTreeSet, ) -> Result<(), NetworkError> { - self.broadcast_message(message, recipients).await + if self + .primary() + .da_broadcast_message(message.clone(), recipients.clone()) + .await + .is_err() + { + warn!("Failed broadcasting DA on primary"); + } + self.secondary() + .da_broadcast_message(message, recipients) + .await } async fn direct_message( From 95c1685ad5dea59e07817a085ad6303e9b61a2ce Mon Sep 17 00:00:00 2001 From: Himanshu Goyal Date: Mon, 4 Mar 2024 12:11:47 -0500 Subject: [PATCH 0830/1393] remove testing gating for get_leader (#2685) * remove testing gating for get_leader * remove test flag for membership --- hotshot/src/types/handle.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index b6e1ded7f8..0bdcccac36 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -7,7 +7,6 @@ use async_lock::RwLock; use futures::Stream; use hotshot_task_impls::events::HotShotEvent; -#[cfg(feature = "hotshot-testing")] use hotshot_types::traits::election::Membership; use hotshot_task::task::TaskRegistry; @@ -153,11 +152,8 @@ impl + 'static> SystemContextHandl self.hotshot.get_next_view_timeout() } - // Below is for testing only: - /// Wrapper for `HotShotConsensusApi`'s `get_leader` function #[allow(clippy::unused_async)] // async for API compatibility reasons - #[cfg(feature = "hotshot-testing")] pub async fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { self.hotshot .memberships @@ -165,6 +161,7 @@ impl + 'static> SystemContextHandl .get_leader(view_number) } + // Below is for testing only: /// Wrapper to get this node's public key #[cfg(feature = "hotshot-testing")] pub fn get_public_key(&self) -> TYPES::SignatureKey { From 68746c2200449465bd7d3fb19552288c281fe391 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 4 Mar 2024 16:44:16 -0800 Subject: [PATCH 0831/1393] Add timestamp to `TestBlockHeader` (#2696) This is extremely useful for downstream testing. For example, many tests in the query service rely on a timestamp. Without this change, we would have to implement our own test types or implement something very unrealistic (like always getting the current time every time we get the timestamp for the same header). This is also pretty realistic, as any real application will probably timestamp its blocks, and it demonstrates getting inputs in `Header::new` from the outside world (ie the hardware clock) in addition to just the inputs which are passed in, which is good for example purposes. --- example-types/Cargo.toml | 1 + example-types/src/block_types.rs | 19 +++++++++++++++++-- testing/src/view_generator.rs | 2 ++ 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index 50282613f4..eecae87e9b 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -29,6 +29,7 @@ snafu = { workspace = true } tracing = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } +time = { workspace = true } async-lock = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 5156bf0e44..43d9ba22fa 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -17,6 +17,7 @@ use hotshot_types::{ }; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; +use time::OffsetDateTime; /// The transaction in a [`TestBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] @@ -178,9 +179,13 @@ pub struct TestBlockHeader { pub block_number: u64, /// VID commitment to the payload. pub payload_commitment: VidCommitment, + /// Timestamp when this header was created. + pub timestamp: u64, } -impl> BlockHeader for TestBlockHeader { +impl> BlockHeader + for TestBlockHeader +{ async fn new( _parent_state: &TYPES::ValidatedState, _instance_state: &>::Instance, @@ -188,9 +193,18 @@ impl> BlockHeader for Te payload_commitment: VidCommitment, _metadata: ::Metadata, ) -> Self { + let parent = &parent_leaf.block_header; + + let mut timestamp = OffsetDateTime::now_utc().unix_timestamp() as u64; + if timestamp < parent.timestamp { + // Prevent decreasing timestamps. + timestamp = parent.timestamp; + } + Self { - block_number: parent_leaf.block_header.block_number() + 1, + block_number: parent.block_number + 1, payload_commitment, + timestamp, } } @@ -202,6 +216,7 @@ impl> BlockHeader for Te Self { block_number: 0, payload_commitment, + timestamp: 0, } } diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 9f8073921c..654cf0ff20 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -72,6 +72,7 @@ impl TestView { let block_header = TestBlockHeader { block_number: 1, + timestamp: 1, payload_commitment, }; @@ -188,6 +189,7 @@ impl TestView { let block_header = TestBlockHeader { block_number: *next_view, + timestamp: *next_view, payload_commitment, }; From 08110c9fc4914e772bc04851b8d2b339e44f1edc Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 5 Mar 2024 11:55:38 -0500 Subject: [PATCH 0832/1393] [Cleanup] Remove BoxedSync `RecvMsgs` (#2700) * remove boxedsync recv * add comment back --- .../src/traits/networking/combined_network.rs | 50 +++++++++---------- .../src/traits/networking/libp2p_network.rs | 30 ++++++----- .../src/traits/networking/memory_network.rs | 39 +++++++-------- .../traits/networking/web_server_network.rs | 29 ++++------- types/src/traits/network.rs | 11 ++-- 5 files changed, 70 insertions(+), 89 deletions(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 75650230f5..1f4738f241 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -327,40 +327,36 @@ impl ConnectedNetwork, TYPES::SignatureKey> self.secondary().direct_message(message, recipient).await } - fn recv_msgs<'a, 'b>(&'a self) -> BoxSyncFuture<'b, Result>, NetworkError>> - where - 'a: 'b, - Self: 'b, - { + /// Receive one or many messages from the underlying network. + /// + /// # Errors + /// Does not error + async fn recv_msgs(&self) -> Result>, NetworkError> { // recv on both networks because nodes may be accessible only on either. discard duplicates // TODO: improve this algorithm: https://github.com/EspressoSystems/HotShot/issues/2089 - let closure = async move { - let mut primary_msgs = self.primary().recv_msgs().await?; - let mut secondary_msgs = self.secondary().recv_msgs().await?; + let mut primary_msgs = self.primary().recv_msgs().await?; + let mut secondary_msgs = self.secondary().recv_msgs().await?; - primary_msgs.append(secondary_msgs.as_mut()); + primary_msgs.append(secondary_msgs.as_mut()); - let mut filtered_msgs = Vec::with_capacity(primary_msgs.len()); - for msg in primary_msgs { - // see if we've already seen this message - if !self - .message_cache - .read() + let mut filtered_msgs = Vec::with_capacity(primary_msgs.len()); + for msg in primary_msgs { + // see if we've already seen this message + if !self + .message_cache + .read() + .await + .contains(calculate_hash_of(&msg)) + { + filtered_msgs.push(msg.clone()); + self.message_cache + .write() .await - .contains(calculate_hash_of(&msg)) - { - filtered_msgs.push(msg.clone()); - self.message_cache - .write() - .await - .insert(calculate_hash_of(&msg)); - } + .insert(calculate_hash_of(&msg)); } + } - Ok(filtered_msgs) - }; - - boxed_sync(closure) + Ok(filtered_msgs) } async fn queue_node_lookup( diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 20b2240659..661494451c 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -844,23 +844,21 @@ impl ConnectedNetwork for Libp2p } } + /// Receive one or many messages from the underlying network. + /// + /// # Errors + /// If there is a network-related failure. #[instrument(name = "Libp2pNetwork::recv_msgs", skip_all)] - fn recv_msgs<'a, 'b>(&'a self) -> BoxSyncFuture<'b, Result, NetworkError>> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - let result = self - .inner - .receiver - .drain_at_least_one() - .await - .map_err(|_x| NetworkError::ShutDown)?; - self.inner.metrics.incoming_message_count.add(result.len()); - Ok(result) - }; - boxed_sync(closure) + async fn recv_msgs(&self) -> Result, NetworkError> { + let result = self + .inner + .receiver + .drain_at_least_one() + .await + .map_err(|_x| NetworkError::ShutDown)?; + self.inner.metrics.incoming_message_count.add(result.len()); + + Ok(result) } #[instrument(name = "Libp2pNetwork::queue_node_lookup", skip_all)] diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 210c80a065..20da1732e6 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -351,27 +351,24 @@ impl ConnectedNetwork for Memory } } + /// Receive one or many messages from the underlying network. + /// + /// # Errors + /// If the other side of the channel is closed #[instrument(name = "MemoryNetwork::recv_msgs", skip_all)] - fn recv_msgs<'a, 'b>(&'a self) -> BoxSyncFuture<'b, Result, NetworkError>> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - let ret = self - .inner - .output - .lock() - .await - .drain_at_least_one() - .await - .map_err(|_x| NetworkError::ShutDown)?; - self.inner - .in_flight_message_count - .fetch_sub(ret.len(), Ordering::Relaxed); - self.inner.metrics.incoming_message_count.add(ret.len()); - Ok(ret) - }; - boxed_sync(closure) + async fn recv_msgs(&self) -> Result, NetworkError> { + let ret = self + .inner + .output + .lock() + .await + .drain_at_least_one() + .await + .map_err(|_x| NetworkError::ShutDown)?; + self.inner + .in_flight_message_count + .fetch_sub(ret.len(), Ordering::Relaxed); + self.inner.metrics.incoming_message_count.add(ret.len()); + Ok(ret) } } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index d10600acef..5bae77c950 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -840,25 +840,18 @@ impl ConnectedNetwork, TYPES::Signatur } } - /// Moves out the entire queue of received messages + /// Receive one or many messages from the underlying network. /// - /// Will unwrap the underlying `NetworkMessage` - /// blocking - fn recv_msgs<'a, 'b>(&'a self) -> BoxSyncFuture<'b, Result>, NetworkError>> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - let mut queue = self.inner.poll_queue_0_1.write().await; - Ok(queue - .drain(..) - .collect::>() - .iter() - .map(|x| x.get_message().unwrap()) - .collect()) - }; - boxed_sync(closure) + /// # Errors + /// Does not error + async fn recv_msgs(&self) -> Result>, NetworkError> { + let mut queue = self.inner.poll_queue_0_1.write().await; + Ok(queue + .drain(..) + .collect::>() + .iter() + .map(|x| x.get_message().expect("failed to clone message")) + .collect()) } #[allow(clippy::too_many_lines)] diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index f5465129ce..87c8779f4e 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -277,14 +277,11 @@ pub trait ConnectedNetwork: /// blocking async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError>; - /// Moves out the entire queue of received messages of 'transmit_type` + /// Receive one or many messages from the underlying network. /// - /// Will unwrap the underlying `NetworkMessage` - /// blocking - fn recv_msgs<'a, 'b>(&'a self) -> BoxSyncFuture<'b, Result, NetworkError>> - where - 'a: 'b, - Self: 'b; + /// # Errors + /// If there is a network-related failure. + async fn recv_msgs(&self) -> Result, NetworkError>; /// queues lookup of a node async fn queue_node_lookup( From f020747c8be2a206e075b80868b45d7439faa47c Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 5 Mar 2024 11:56:59 -0500 Subject: [PATCH 0833/1393] Add Event for Validated Quorum Proposal (#2691) * Add Event for Validated Quorum Proposal * Fix consensus task * fix upgrade test * Fix the test...again * lint --- task-impls/src/consensus.rs | 6 ++++++ task-impls/src/events.rs | 2 ++ testing/tests/consensus_task.rs | 5 +++++ testing/tests/upgrade_task.rs | 10 +++++++++- 4 files changed, 22 insertions(+), 1 deletion(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9e3431d5b0..61f62693b0 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -698,6 +698,12 @@ impl, A: ConsensusApi + }, }) .await; + // Notify other tasks + broadcast_event( + HotShotEvent::QuorumProposalValidated(proposal.data.clone()), + &event_stream, + ) + .await; let mut new_anchor_view = consensus.last_decided_view; let mut new_locked_view = consensus.locked_view; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 03a2a64a03..8f61eb8402 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -43,6 +43,8 @@ pub enum HotShotEvent { QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote), + /// A proposal was validated. This means it comes from the correct leader and has a correct QC. + QuorumProposalValidated(QuorumProposal), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DAProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 1242d98ac9..2d2c3c7fbb 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -48,6 +48,10 @@ async fn test_consensus_task() { HotShotEvent::QuorumProposalSend(proposal.clone(), public_key), 1, ); + output.insert( + HotShotEvent::QuorumProposalValidated(proposal.data.clone()), + 1, + ); output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); @@ -98,6 +102,7 @@ async fn test_consensus_vote() { )); let proposal = proposal.data; + output.insert(HotShotEvent::QuorumProposalValidated(proposal.clone()), 1); if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs index 3cb69c112c..98051c9460 100644 --- a/testing/tests/upgrade_task.rs +++ b/testing/tests/upgrade_task.rs @@ -64,6 +64,7 @@ async fn test_upgrade_task() { inputs: vec![QuorumProposalRecv(proposals[0].clone(), leaders[0])], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), + exact(QuorumProposalValidated(proposals[0].data.clone())), exact(QuorumVoteSend(votes[0].clone())), ], asserts: vec![], @@ -77,6 +78,7 @@ async fn test_upgrade_task() { ], outputs: vec![ exact(ViewChange(ViewNumber::new(2))), + exact(QuorumProposalValidated(proposals[1].data.clone())), exact(QuorumVoteSend(votes[1].clone())), ], asserts: vec![no_decided_upgrade_cert()], @@ -90,6 +92,7 @@ async fn test_upgrade_task() { ], outputs: vec![ exact(ViewChange(ViewNumber::new(3))), + exact(QuorumProposalValidated(proposals[2].data.clone())), leaf_decided(), exact(QuorumVoteSend(votes[2].clone())), ], @@ -104,6 +107,7 @@ async fn test_upgrade_task() { ], outputs: vec![ exact(ViewChange(ViewNumber::new(4))), + exact(QuorumProposalValidated(proposals[3].data.clone())), leaf_decided(), exact(QuorumVoteSend(votes[3].clone())), ], @@ -112,7 +116,11 @@ async fn test_upgrade_task() { let view_5 = TestScriptStage { inputs: vec![QuorumProposalRecv(proposals[4].clone(), leaders[4])], - outputs: vec![exact(ViewChange(ViewNumber::new(5))), leaf_decided()], + outputs: vec![ + exact(ViewChange(ViewNumber::new(5))), + exact(QuorumProposalValidated(proposals[4].data.clone())), + leaf_decided(), + ], asserts: vec![decided_upgrade_cert()], }; From cdbb365c9c7fe97b768b693038deccfc0fcbe64d Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Tue, 5 Mar 2024 16:17:37 -0500 Subject: [PATCH 0834/1393] chore: new api for `StateKeyPair` (#2692) * new api for StateKeyPair * use jf tag --- types/src/light_client.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/types/src/light_client.rs b/types/src/light_client.rs index 7d0b5d7dff..ce98eedd3e 100644 --- a/types/src/light_client.rs +++ b/types/src/light_client.rs @@ -115,6 +115,12 @@ impl std::ops::Deref for StateKeyPair { } impl StateKeyPair { + /// Generate key pairs from private signing keys + #[must_use] + pub fn from_sign_key(sk: StateSignKey) -> Self { + Self(schnorr::KeyPair::::from(sk)) + } + /// Generate key pairs from `thread_rng()` #[must_use] pub fn generate() -> StateKeyPair { From a44aeceb39c5244f57d237824ce2d1a10f85c4a9 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Wed, 6 Mar 2024 08:36:55 +0100 Subject: [PATCH 0835/1393] Add update_view in ConnectedNetwork trait (#2678) * Add update_view in ConnectedNetwork trait * Fix lint errors * Delay sending messages via secondary network * Fix deadlock * Fmt and parametrise delay duration * Add support for tokio * Clean up * `send_both_networks` takes futures instead of enum * Use less verbose `impl Trait` as parameter * Run all cancel tasks at the same time * There can be multiple delayed tasks per given view --- .../src/traits/networking/combined_network.rs | 215 +++++++++++++----- task-impls/src/network.rs | 2 + types/src/traits/network.rs | 3 + 3 files changed, 163 insertions(+), 57 deletions(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 1f4738f241..b324ef730a 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -31,9 +31,23 @@ use hotshot_types::{ }, BoxSyncFuture, }; +use std::collections::BTreeMap; +use std::future::Future; use std::{collections::hash_map::DefaultHasher, sync::Arc}; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +use either::Either; +use futures::future::join_all; +use hotshot_task_impls::helpers::cancel_task; +use hotshot_types::message::{GeneralConsensusMessage, MessageKind}; +use hotshot_types::traits::network::ViewMessage; +use hotshot_types::traits::node_implementation::ConsensusTime; use std::hash::Hash; +use std::time::Duration; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; /// A cache to keep track of the last n messages we've seen, avoids reprocessing duplicates /// from multiple networks @@ -103,6 +117,9 @@ pub fn calculate_hash_of(t: &T) -> u64 { s.finish() } +/// thread-safe ref counted lock to a map of delayed tasks +type DelayedTasksLockedMap = Arc>>>>>; + /// A communication channel with 2 networks, where we can fall back to the slower network if the /// primary fails #[derive(Clone, Debug)] @@ -115,6 +132,12 @@ pub struct CombinedNetworks { /// If the primary network is down (0) or not, and for how many messages primary_down: Arc, + + /// delayed, cancelable tasks for secondary network + delayed_tasks: DelayedTasksLockedMap, + + /// how long to delay + delay_duration: Arc>, } impl CombinedNetworks { @@ -125,6 +148,8 @@ impl CombinedNetworks { networks, message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), primary_down: Arc::new(AtomicU64::new(0)), + delayed_tasks: Arc::default(), + delay_duration: Arc::new(RwLock::new(Duration::from_millis(1000))), } } @@ -139,6 +164,62 @@ impl CombinedNetworks { pub fn secondary(&self) -> &Libp2pNetwork, TYPES::SignatureKey> { &self.networks.1 } + + /// a helper function returning a bool whether a given message is of delayable type + fn should_delay(message: &Message) -> bool { + match &message.kind { + MessageKind::Consensus(consensus_message) => match &consensus_message.0 { + Either::Left(general_consensus_message) => { + matches!(general_consensus_message, GeneralConsensusMessage::Vote(_)) + } + Either::Right(_) => true, + }, + MessageKind::Data(_) => false, + } + } + + /// a helper function to send messages through both networks (possibly delayed) + async fn send_both_networks( + &self, + message: Message, + primary_future: impl Future> + Send + 'static, + secondary_future: impl Future> + Send + 'static, + ) -> Result<(), NetworkError> { + // send optimistically on both networks, but if the primary network is down, skip it + let primary_down = self.primary_down.load(Ordering::Relaxed); + let mut primary_failed = false; + if primary_down < COMBINED_NETWORK_MIN_PRIMARY_FAILURES + || primary_down % COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL == 0 + { + // send on the primary network as it is not down, or we are checking if it is back up + match primary_future.await { + Ok(()) => { + self.primary_down.store(0, Ordering::Relaxed); + } + Err(e) => { + warn!("Error on primary network: {}", e); + self.primary_down.fetch_add(1, Ordering::Relaxed); + primary_failed = true; + } + }; + } + + if !primary_failed && Self::should_delay(&message) { + let duration = *self.delay_duration.read().await; + self.delayed_tasks + .write() + .await + .entry(message.kind.get_view_number().get_u64()) + .or_default() + .push(async_spawn(async move { + async_sleep(duration).await; + secondary_future.await + })); + Ok(()) + } else { + secondary_future.await + } + } } /// Wrapper for the tuple of `WebServerNetwork` and `Libp2pNetwork` @@ -197,11 +278,15 @@ impl TestableNetworkingImplementation for CombinedNetwor networks: Arc::new(quorum_networks), message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), primary_down: Arc::new(AtomicU64::new(0)), + delayed_tasks: Arc::default(), + delay_duration: Arc::new(RwLock::new(Duration::from_millis(1000))), }; let da_net = Self { networks: Arc::new(da_networks), message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), primary_down: Arc::new(AtomicU64::new(0)), + delayed_tasks: Arc::default(), + delay_duration: Arc::new(RwLock::new(Duration::from_millis(1000))), }; (quorum_net.into(), da_net.into()) }) @@ -254,30 +339,25 @@ impl ConnectedNetwork, TYPES::SignatureKey> message: Message, recipients: BTreeSet, ) -> Result<(), NetworkError> { - // broadcast optimistically on both networks, but if the primary network is down, skip it - let primary_down = self.primary_down.load(Ordering::Relaxed); - if primary_down < COMBINED_NETWORK_MIN_PRIMARY_FAILURES - || primary_down % COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL == 0 - { - // broadcast on the primary network as it is not down, or we are checking if it is back up - match self - .primary() - .broadcast_message(message.clone(), recipients.clone()) - .await - { - Ok(()) => { - self.primary_down.store(0, Ordering::Relaxed); - } - Err(e) => { - warn!("Error on primary network: {}", e); - self.primary_down.fetch_add(1, Ordering::Relaxed); - } - }; - } - - self.secondary() - .broadcast_message(message, recipients) - .await + let primary = self.primary().clone(); + let secondary = self.secondary().clone(); + let primary_message = message.clone(); + let secondary_message = message.clone(); + let primary_recipients = recipients.clone(); + self.send_both_networks( + message, + async move { + primary + .broadcast_message(primary_message, primary_recipients) + .await + }, + async move { + secondary + .broadcast_message(secondary_message, recipients) + .await + }, + ) + .await } async fn da_broadcast_message( @@ -285,17 +365,25 @@ impl ConnectedNetwork, TYPES::SignatureKey> message: Message, recipients: BTreeSet, ) -> Result<(), NetworkError> { - if self - .primary() - .da_broadcast_message(message.clone(), recipients.clone()) - .await - .is_err() - { - warn!("Failed broadcasting DA on primary"); - } - self.secondary() - .da_broadcast_message(message, recipients) - .await + let primary = self.primary().clone(); + let secondary = self.secondary().clone(); + let primary_message = message.clone(); + let secondary_message = message.clone(); + let primary_recipients = recipients.clone(); + self.send_both_networks( + message, + async move { + primary + .da_broadcast_message(primary_message, primary_recipients) + .await + }, + async move { + secondary + .da_broadcast_message(secondary_message, recipients) + .await + }, + ) + .await } async fn direct_message( @@ -303,28 +391,21 @@ impl ConnectedNetwork, TYPES::SignatureKey> message: Message, recipient: TYPES::SignatureKey, ) -> Result<(), NetworkError> { - // DM optimistically on both networks, but if the primary network is down, skip it - let primary_down = self.primary_down.load(Ordering::Relaxed); - if primary_down < COMBINED_NETWORK_MIN_PRIMARY_FAILURES - || primary_down % COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL == 0 - { - // message on the primary network as it is not down, or we are checking if it is back up - match self - .primary() - .direct_message(message.clone(), recipient.clone()) - .await - { - Ok(()) => { - self.primary_down.store(0, Ordering::Relaxed); - } - Err(e) => { - warn!("Error on primary network: {}", e); - self.primary_down.fetch_add(1, Ordering::Relaxed); - } - }; - } - - self.secondary().direct_message(message, recipient).await + let primary = self.primary().clone(); + let secondary = self.secondary().clone(); + let primary_message = message.clone(); + let secondary_message = message.clone(); + let primary_recipient = recipient.clone(); + self.send_both_networks( + message, + async move { + primary + .direct_message(primary_message, primary_recipient) + .await + }, + async move { secondary.direct_message(secondary_message, recipient).await }, + ) + .await } /// Receive one or many messages from the underlying network. @@ -377,6 +458,26 @@ impl ConnectedNetwork, TYPES::SignatureKey> as ConnectedNetwork,TYPES::SignatureKey>>:: inject_consensus_info(self.secondary(), event).await; } + + async fn update_view(&self, view: &u64) { + let mut cancel_tasks = Vec::new(); + { + let mut map_lock = self.delayed_tasks.write().await; + while let Some((first_view, _tasks)) = map_lock.first_key_value() { + if first_view < view { + if let Some((_view, tasks)) = map_lock.pop_first() { + let mut ctasks = tasks.into_iter().map(cancel_task).collect(); + cancel_tasks.append(&mut ctasks); + } else { + break; + } + } else { + break; + } + } + } + join_all(cancel_tasks).await; + } } #[cfg(test)] diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 250302ce47..fd64c493b9 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -9,6 +9,7 @@ use hotshot_constants::VERSION_0_1; use std::sync::Arc; use hotshot_task::task::{Task, TaskState}; +use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ message::{ CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, SequencingMessage, @@ -353,6 +354,7 @@ impl, TYPES::Signa ), HotShotEvent::ViewChange(view) => { self.view = view; + self.channel.update_view(&self.view.get_u64()).await; return None; } HotShotEvent::Shutdown => { diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 87c8779f4e..549ae5ece9 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -296,6 +296,9 @@ pub trait ConnectedNetwork: /// blocking /// Ideally we would pass in the `Time` type, but that requires making the entire trait generic over NodeType async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} + + /// handles view update + async fn update_view(&self, _view: &u64) {} } /// Describes additional functionality needed by the test network implementation From 2c567c900a9cb4772e628492b3e30b4e708986c6 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 7 Mar 2024 23:25:00 -0500 Subject: [PATCH 0836/1393] [EASY] Don't limit Tokio to 2 Threads (#2729) * Get rid of worker_threads = 2 for tokio * fmt --- examples/combined/all.rs | 5 +- examples/combined/multi-validator.rs | 5 +- examples/combined/orchestrator.rs | 5 +- examples/combined/validator.rs | 5 +- examples/libp2p/all.rs | 5 +- examples/libp2p/multi-validator.rs | 5 +- examples/libp2p/orchestrator.rs | 5 +- examples/libp2p/validator.rs | 5 +- examples/webserver/multi-validator.rs | 5 +- examples/webserver/orchestrator.rs | 5 +- examples/webserver/validator.rs | 5 +- .../src/traits/networking/combined_network.rs | 5 +- libp2p-networking/tests/counter.rs | 70 ++++--------------- task/src/dependency.rs | 25 ++----- task/src/dependency_task.rs | 10 +-- task/src/task.rs | 5 +- testing-macros/src/lib.rs | 2 +- testing/tests/atomic_storage.rs | 10 +-- testing/tests/catchup.rs | 25 ++----- testing/tests/combined_network.rs | 35 ++-------- testing/tests/consensus_task.rs | 15 +--- testing/tests/da_task.rs | 5 +- testing/tests/libp2p.rs | 15 +--- testing/tests/lossy.rs | 8 +-- testing/tests/memory_network.rs | 25 ++----- testing/tests/network_task.rs | 5 +- testing/tests/storage.rs | 5 +- testing/tests/timeout.rs | 10 +-- testing/tests/unreliable_network.rs | 40 +++-------- testing/tests/upgrade_task.rs | 5 +- testing/tests/vid_task.rs | 5 +- testing/tests/view_sync_task.rs | 5 +- testing/tests/web_server.rs | 5 +- 33 files changed, 81 insertions(+), 309 deletions(-) diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 6249abb909..19550bed79 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -27,10 +27,7 @@ use crate::{ #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { diff --git a/examples/combined/multi-validator.rs b/examples/combined/multi-validator.rs index f903f1e57c..44a8943051 100644 --- a/examples/combined/multi-validator.rs +++ b/examples/combined/multi-validator.rs @@ -17,10 +17,7 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { diff --git a/examples/combined/orchestrator.rs b/examples/combined/orchestrator.rs index 0069093505..ab32f4e245 100644 --- a/examples/combined/orchestrator.rs +++ b/examples/combined/orchestrator.rs @@ -15,10 +15,7 @@ use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { diff --git a/examples/combined/validator.rs b/examples/combined/validator.rs index 38c8dbe0b8..8d72b3cdf0 100644 --- a/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -15,10 +15,7 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index ddc8b472b9..1e1fdb9a36 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -25,10 +25,7 @@ use crate::{ #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { diff --git a/examples/libp2p/multi-validator.rs b/examples/libp2p/multi-validator.rs index e085b498f6..36527bf848 100644 --- a/examples/libp2p/multi-validator.rs +++ b/examples/libp2p/multi-validator.rs @@ -17,10 +17,7 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { diff --git a/examples/libp2p/orchestrator.rs b/examples/libp2p/orchestrator.rs index 42e23a39d6..e45d0c2a59 100644 --- a/examples/libp2p/orchestrator.rs +++ b/examples/libp2p/orchestrator.rs @@ -16,10 +16,7 @@ use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { diff --git a/examples/libp2p/validator.rs b/examples/libp2p/validator.rs index cebcd44d04..6e3ca1db41 100644 --- a/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -15,10 +15,7 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { diff --git a/examples/webserver/multi-validator.rs b/examples/webserver/multi-validator.rs index 61d81c79c5..b93ed63409 100644 --- a/examples/webserver/multi-validator.rs +++ b/examples/webserver/multi-validator.rs @@ -17,10 +17,7 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { diff --git a/examples/webserver/orchestrator.rs b/examples/webserver/orchestrator.rs index 49080dae8a..a4d4a42436 100644 --- a/examples/webserver/orchestrator.rs +++ b/examples/webserver/orchestrator.rs @@ -16,10 +16,7 @@ use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { diff --git a/examples/webserver/validator.rs b/examples/webserver/validator.rs index e335cae2be..886847da66 100644 --- a/examples/webserver/validator.rs +++ b/examples/webserver/validator.rs @@ -15,10 +15,7 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::main(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] #[instrument] async fn main() { diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index b324ef730a..32fc0cfd12 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -486,10 +486,7 @@ mod test { use tracing::instrument; /// cache eviction test - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_cache_eviction() { diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index acb1b07cf4..d701a575b2 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -472,10 +472,7 @@ async fn run_request_response_increment_all( } /// simple case of direct message -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_request_response_one_round() { @@ -490,10 +487,7 @@ async fn test_coverage_request_response_one_round() { } /// stress test of direct messsage -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_request_response_many_rounds() { @@ -508,10 +502,7 @@ async fn test_coverage_request_response_many_rounds() { } /// stress test of broadcast + direct message -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_intersperse_many_rounds() { @@ -526,10 +517,7 @@ async fn test_coverage_intersperse_many_rounds() { } /// stress teset that we can broadcast a message out and get counter increments -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_gossip_many_rounds() { @@ -544,10 +532,7 @@ async fn test_coverage_gossip_many_rounds() { } /// simple case of broadcast message -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_gossip_one_round() { @@ -562,10 +547,7 @@ async fn test_coverage_gossip_one_round() { } /// simple case of direct message -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] #[ignore] @@ -581,10 +563,7 @@ async fn test_stress_request_response_one_round() { } /// stress test of direct messsage -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] #[ignore] @@ -600,10 +579,7 @@ async fn test_stress_request_response_many_rounds() { } /// stress test of broadcast + direct message -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] #[ignore] @@ -619,10 +595,7 @@ async fn test_stress_intersperse_many_rounds() { } /// stress teset that we can broadcast a message out and get counter increments -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] #[ignore] @@ -638,10 +611,7 @@ async fn test_stress_gossip_many_rounds() { } /// simple case of broadcast message -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] #[ignore] @@ -657,10 +627,7 @@ async fn test_stress_gossip_one_round() { } /// simple case of one dht publish event -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] #[ignore] @@ -676,10 +643,7 @@ async fn test_stress_dht_one_round() { } /// many dht publishing events -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] #[ignore] @@ -695,10 +659,7 @@ async fn test_stress_dht_many_rounds() { } /// simple case of one dht publish event -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_dht_one_round() { @@ -713,10 +674,7 @@ async fn test_coverage_dht_one_round() { } /// many dht publishing events -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_coverage_dht_many_rounds() { diff --git a/task/src/dependency.rs b/task/src/dependency.rs index 6ae793a7a4..b42c73e05d 100644 --- a/task/src/dependency.rs +++ b/task/src/dependency.rs @@ -167,10 +167,7 @@ mod tests { } } - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn it_works() { let (tx, rx) = broadcast(10); @@ -186,10 +183,7 @@ mod tests { let result = and.completed().await; assert_eq!(result, Some(vec![5; 5])); } - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn or_dep() { let (tx, rx) = broadcast(10); @@ -204,10 +198,7 @@ mod tests { assert_eq!(result, Some(5)); } - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn and_or_dep() { let (tx, rx) = broadcast(10); @@ -225,10 +216,7 @@ mod tests { assert_eq!(result, Some(vec![6, 5])); } - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn or_and_dep() { let (tx, rx) = broadcast(10); @@ -246,10 +234,7 @@ mod tests { assert_eq!(result, Some(vec![4, 5])); } - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn many_and_dep() { let (tx, rx) = broadcast(10); diff --git a/task/src/dependency_task.rs b/task/src/dependency_task.rs index 9db6786637..3603b4541f 100644 --- a/task/src/dependency_task.rs +++ b/task/src/dependency_task.rs @@ -88,10 +88,7 @@ mod test { } } - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] // allow unused for tokio because it's a test #[allow(unused_must_use)] @@ -107,10 +104,7 @@ mod test { join_handle.await; } - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn many_works() { let (tx, rx) = broadcast(20); diff --git a/task/src/task.rs b/task/src/task.rs index 0d928bf41a..f0cd013aa0 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -381,10 +381,7 @@ mod tests { None } } - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[allow(unused_must_use)] async fn it_works() { diff --git a/testing-macros/src/lib.rs b/testing-macros/src/lib.rs index ae6821222e..77459c00e1 100644 --- a/testing-macros/src/lib.rs +++ b/testing-macros/src/lib.rs @@ -112,7 +112,7 @@ impl TestData { #slow_attribute #[cfg_attr( async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) + tokio::test(flavor = "multi_thread") )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[tracing::instrument] diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs index 0071621978..741b4068aa 100644 --- a/testing/tests/atomic_storage.rs +++ b/testing/tests/atomic_storage.rs @@ -12,10 +12,7 @@ use rand::thread_rng; type AtomicStorage = hotshot::traits::implementations::AtomicStorage; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_happy_path_qcs() { // This folder will be destroyed when the last handle to it closes @@ -81,10 +78,7 @@ async fn test_happy_path_qcs() { } } -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_happy_path_leaves() { // This folder will be destroyed when the last handle to it closes diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 339103fb1b..3901bf817b 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -1,8 +1,5 @@ #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_catchup() { use std::time::Duration; @@ -60,10 +57,7 @@ async fn test_catchup() { } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_catchup_web() { use std::time::Duration; @@ -117,10 +111,7 @@ async fn test_catchup_web() { /// Test that one node catches up and has sucessful views after coming back #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_catchup_one_node() { use std::time::Duration; @@ -175,10 +166,7 @@ async fn test_catchup_one_node() { /// Same as `test_catchup` except we start the nodes after their leadership so they join during view sync #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_catchup_in_view_sync() { use std::time::Duration; @@ -240,10 +228,7 @@ async fn test_catchup_in_view_sync() { // Almost the same as `test_catchup`, but with catchup nodes reloaded from anchor leaf rather than // initialized from genesis. #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_catchup_reload() { use std::time::Duration; diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index ed6273a8a3..46882e5b13 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -14,10 +14,7 @@ use hotshot::traits::implementations::{calculate_hash_of, Cache}; use hotshot_example_types::block_types::TestTransaction; #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_hash_calculation() { @@ -29,10 +26,7 @@ async fn test_hash_calculation() { } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_cache_integrity() { @@ -57,10 +51,7 @@ async fn test_cache_integrity() { /// A run with both the webserver and libp2p functioning properly #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_combined_network() { @@ -95,10 +86,7 @@ async fn test_combined_network() { } // A run where the webserver crashes part-way through -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_combined_network_webserver_crash() { @@ -147,10 +135,7 @@ async fn test_combined_network_webserver_crash() { // A run where the webserver crashes partway through // and then comes back up -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_combined_network_reup() { @@ -203,10 +188,7 @@ async fn test_combined_network_reup() { } // A run where half of the nodes disconnect from the webserver -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_combined_network_half_dc() { @@ -281,10 +263,7 @@ fn generate_random_node_changes( } // A fuzz test, where random network events take place on all nodes -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] #[ignore] diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 2d2c3c7fbb..88b2094383 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -11,10 +11,7 @@ use jf_primitives::vid::VidScheme; use std::collections::HashMap; #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; @@ -73,10 +70,7 @@ async fn test_consensus_task() { } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote() { use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; @@ -125,10 +119,7 @@ async fn test_consensus_vote() { } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] // TODO: re-enable this when HotShot/the sequencer needs the shares for something // issue: https://github.com/EspressoSystems/HotShot/issues/2236 diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index fe06a34543..7d10503d2b 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -17,10 +17,7 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; use std::{collections::HashMap, marker::PhantomData}; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_da_task() { use hotshot_task_impls::harness::run_harness; diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index 44be24f9af..ddcf178bcc 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -10,10 +10,7 @@ use hotshot_testing::{ use tracing::instrument; /// libp2p network test -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn libp2p_network() { @@ -45,10 +42,7 @@ async fn libp2p_network() { } /// libp2p network test with failures -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn libp2p_network_failures_2() { @@ -96,10 +90,7 @@ async fn libp2p_network_failures_2() { } /// stress test for libp2p -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] #[ignore] diff --git a/testing/tests/lossy.rs b/testing/tests/lossy.rs index 2af0b3aca5..0559423b29 100644 --- a/testing/tests/lossy.rs +++ b/testing/tests/lossy.rs @@ -11,7 +11,7 @@ // // tests base level of working synchronous network // #[cfg_attr( // feature = "tokio-executor", -// tokio::test(flavor = "multi_thread", worker_threads = 2) +// tokio::test(flavor = "multi_thread") // )] // #[cfg_attr(feature = "async-std-executor", async_std::test)] // #[instrument] @@ -36,7 +36,7 @@ // // // tests network with forced packet delay // #[cfg_attr( // feature = "tokio-executor", -// tokio::test(flavor = "multi_thread", worker_threads = 2) +// tokio::test(flavor = "multi_thread") // )] // #[cfg_attr(feature = "async-std-executor", async_std::test)] // #[instrument] @@ -61,7 +61,7 @@ // // tests network with small packet delay and dropped packets // #[cfg_attr( // feature = "tokio-executor", -// tokio::test(flavor = "multi_thread", worker_threads = 2) +// tokio::test(flavor = "multi_thread") // )] // #[cfg_attr(feature = "async-std-executor", async_std::test)] // #[instrument] @@ -89,7 +89,7 @@ // /// tests network with asynchronous patch that eventually becomes synchronous // #[cfg_attr( // feature = "tokio-executor", -// tokio::test(flavor = "multi_thread", worker_threads = 2) +// tokio::test(flavor = "multi_thread") // )] // #[cfg_attr(feature = "async-std-executor", async_std::test)] // #[instrument] diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index ea7b502c81..90e88cbdee 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -119,10 +119,7 @@ fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec::Time) -> StoredView< } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn memory_storage() { diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 2ab05f386e..39b9e778f6 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -1,8 +1,5 @@ #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] // TODO Add memory network tests after this issue is finished: // https://github.com/EspressoSystems/HotShot/issues/1790 @@ -64,10 +61,7 @@ async fn test_timeout_web() { } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[ignore] async fn test_timeout_libp2p() { diff --git a/testing/tests/unreliable_network.rs b/testing/tests/unreliable_network.rs index 8dddff5329..4705419fb6 100644 --- a/testing/tests/unreliable_network.rs +++ b/testing/tests/unreliable_network.rs @@ -14,10 +14,7 @@ use hotshot_testing::{ }; use tracing::instrument; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn libp2p_network_sync() { @@ -48,10 +45,7 @@ async fn libp2p_network_sync() { } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_sync() { use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; @@ -83,10 +77,7 @@ async fn test_memory_network_sync() { .await; } -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[ignore] #[instrument] @@ -126,10 +117,7 @@ async fn libp2p_network_async() { } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[ignore] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_async() { @@ -176,10 +164,7 @@ async fn test_memory_network_async() { } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_partially_sync() { use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; @@ -230,10 +215,7 @@ async fn test_memory_network_partially_sync() { .await; } -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn libp2p_network_partially_sync() { @@ -274,10 +256,7 @@ async fn libp2p_network_partially_sync() { } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[ignore] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_chaos() { @@ -314,10 +293,7 @@ async fn test_memory_network_chaos() { .await; } -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[ignore] #[instrument] diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs index 98051c9460..556dccf286 100644 --- a/testing/tests/upgrade_task.rs +++ b/testing/tests/upgrade_task.rs @@ -8,10 +8,7 @@ use hotshot_types::{ data::ViewNumber, simple_vote::UpgradeProposalData, traits::node_implementation::ConsensusTime, }; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_upgrade_task() { use hotshot_testing::script::{run_test_script, TestScriptStage}; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 480c31b6e2..f964a69cb6 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -11,10 +11,7 @@ use jf_primitives::vid::VidScheme; use std::collections::HashMap; use std::marker::PhantomData; -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_vid_task() { use hotshot_task_impls::harness::run_harness; diff --git a/testing/tests/view_sync_task.rs b/testing/tests/view_sync_task.rs index f0d5679273..34141310cd 100644 --- a/testing/tests/view_sync_task.rs +++ b/testing/tests/view_sync_task.rs @@ -6,10 +6,7 @@ use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime use std::collections::HashMap; #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_task() { use hotshot_task_impls::harness::run_harness; diff --git a/testing/tests/web_server.rs b/testing/tests/web_server.rs index 3871848e2c..786229abda 100644 --- a/testing/tests/web_server.rs +++ b/testing/tests/web_server.rs @@ -10,10 +10,7 @@ use hotshot_testing::{ use tracing::instrument; /// Web server network test -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn web_server_network() { From 5c0736a41ed91feba34e99083c50fba6212624fb Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 8 Mar 2024 09:02:58 -0500 Subject: [PATCH 0837/1393] Implement Testing for Regression in Consensus Event Ordering (#2697) * Init new file and a couple of setup lines * working test for QC init case * new test passing * minimal third test * complete test * remove unused code * test negative case * move the view to use node 2 as the leaderg * remove dead code * receive instead * disambiguate a couple of lines, remove commented out code * grammar * Fix tests * improve test correctness * remove duplication in expression * fix naming * permute ordering 6 ways * clarify comment * fix doc comment * fix failing case, remove unsafe code * add a comment * fix names --- testing/src/predicates.rs | 20 +++++ testing/tests/proposal_ordering.rs | 132 +++++++++++++++++++++++++++++ 2 files changed, 152 insertions(+) create mode 100644 testing/tests/proposal_ordering.rs diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs index ec08dbbdf3..f84ff75ce3 100644 --- a/testing/src/predicates.rs +++ b/testing/src/predicates.rs @@ -50,6 +50,19 @@ where } } +pub fn quorum_proposal_send() -> Predicate> +where + TYPES: NodeType, +{ + let info = "QuorumProposalSend".to_string(); + let function = |e: &_| matches!(e, QuorumProposalSend(_, _)); + + Predicate { + function: Box::new(function), + info, + } +} + type ConsensusTaskTestState = ConsensusTaskState>; @@ -76,3 +89,10 @@ pub fn decided_upgrade_cert() -> Predicate { "expected decided_upgrade_cert to be Some(_)", ) } + +pub fn is_at_view_number(n: u64) -> Predicate { + consensus_predicate( + Box::new(move |state| *state.cur_view == n), + format!("expected cur view to be {}", n).as_str(), + ) +} diff --git a/testing/tests/proposal_ordering.rs b/testing/tests/proposal_ordering.rs new file mode 100644 index 0000000000..6c53c1dcc3 --- /dev/null +++ b/testing/tests/proposal_ordering.rs @@ -0,0 +1,132 @@ +use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; +use hotshot_testing::{ + predicates::{exact, is_at_view_number, quorum_proposal_send}, + task_helpers::vid_scheme_from_view_number, + view_generator::TestViewGenerator, +}; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use jf_primitives::vid::VidScheme; + +fn permute(inputs: Vec, order: Vec) -> Vec +where + T: Clone, +{ + let mut ordered_inputs = Vec::with_capacity(inputs.len()); + for &index in &order { + ordered_inputs.push(inputs[index].clone()); + } + ordered_inputs +} + +/// Runs a basic test where a qualified proposal occurs (i.e. not initiated by the genesis view or node 1). +/// This proposal should happen no matter how the `input_permutation` is specified. +async fn test_ordering_with_specific_order(input_permutation: Vec) { + use hotshot_testing::script::{run_test_script, TestScriptStage}; + use hotshot_testing::task_helpers::build_system_handle; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let node_id = 2; + let handle = build_system_handle(node_id).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let vid = + vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(node_id)); + + // Make some empty encoded transactions, we just care about having a commitment handy for the + // later calls. + let encoded_transactions = Vec::new(); + let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); + let payload_commitment = vid_disperse.commit; + + let mut proposals = Vec::new(); + let mut votes = Vec::new(); + let mut leaders = Vec::new(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + for view in (&mut generator).take(3) { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_vote(&handle)); + leaders.push(view.leader_public_key); + } + + // This stage transitions from the initial view to view 1 + let view_1 = TestScriptStage { + inputs: vec![QuorumProposalRecv(proposals[0].clone(), leaders[0])], + outputs: vec![ + exact(ViewChange(ViewNumber::new(1))), + exact(QuorumProposalValidated(proposals[0].data.clone())), + exact(QuorumVoteSend(votes[0].clone())), + ], + asserts: vec![is_at_view_number(1)], + }; + + // Node 2 is the leader up next, so we form the QC for it. + let cert = proposals[1].data.justify_qc.clone(); + let inputs = vec![ + QuorumProposalRecv(proposals[1].clone(), leaders[1]), + QCFormed(either::Left(cert)), + SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(node_id)), + ]; + + // The consensus task does not like it when the proposal received is the last thing to happen, + // at least not while keeping an arbitrary ordering. The testing framework does not allow us to + // check events out of order, so we instead just give the test what it wants, but this should + // still be okay. + let view_2_outputs = if input_permutation[2] == 0 { + vec![ + quorum_proposal_send(), + exact(ViewChange(ViewNumber::new(2))), + exact(QuorumProposalValidated(proposals[1].data.clone())), + ] + } else { + vec![ + exact(ViewChange(ViewNumber::new(2))), + exact(QuorumProposalValidated(proposals[1].data.clone())), + quorum_proposal_send(), + ] + }; + + let view_2_inputs = permute(inputs, input_permutation); + + // This stage transitions from view 1 to view 2. + let view_2 = TestScriptStage { + inputs: view_2_inputs, + outputs: view_2_outputs, + // We should end on view 2. + asserts: vec![is_at_view_number(2)], + }; + + let script = vec![view_1, view_2]; + + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + + run_test_script(script, consensus_state).await; +} + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +/// A leader node may receive one of a couple of possible events which can trigger a proposal. This +/// test ensures that, no matter what order these events are received in, the node will still +/// trigger the proposal event regardless. This is to catch a regression in which +/// `SendPayloadCommitmentAndMetadata`, when received last, resulted in no proposal occurring. +async fn test_proposal_ordering() { + test_ordering_with_specific_order(vec![0, 1, 2]).await; + test_ordering_with_specific_order(vec![0, 2, 1]).await; + test_ordering_with_specific_order(vec![1, 0, 2]).await; + test_ordering_with_specific_order(vec![2, 0, 1]).await; + test_ordering_with_specific_order(vec![1, 2, 0]).await; + test_ordering_with_specific_order(vec![2, 1, 0]).await; +} From 0346246d5135005ff95c672edccaff733e487697 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 8 Mar 2024 09:04:20 -0500 Subject: [PATCH 0838/1393] Refactor Consensus Task Tests to Use New Test Functions (#2716) * checkpoint initial tests * Init new file and a couple of setup lines * working test for QC init case * new test passing * minimal third test * complete test * remove unused code * test negative case * move the view to use node 2 as the leaderg * remove dead code * receive instead * disambiguate a couple of lines, remove commented out code * grammar * Fix tests * improve test correctness * remove duplication in expression * fix consensus task test * one test to go * fix build * remove old code from tree * re-add old tests --- testing/tests/consensus_task.rs | 144 +++++++++++++++++++++++++++++++- 1 file changed, 142 insertions(+), 2 deletions(-) diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 88b2094383..44d5137fec 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -14,6 +14,146 @@ use std::collections::HashMap; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { + use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; + use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; + use hotshot_testing::{ + predicates::{exact, is_at_view_number, quorum_proposal_send}, + script::{run_test_script, TestScriptStage}, + task_helpers::{build_system_handle, vid_scheme_from_view_number}, + view_generator::TestViewGenerator, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + // Make some empty encoded transactions, we just care about having a commitment handy for the + // later calls. We need the VID commitment to be able to propose later. + let vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(2)); + let encoded_transactions = Vec::new(); + let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); + let payload_commitment = vid_disperse.commit; + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + for view in (&mut generator).take(2) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_vote(&handle)); + } + + // Run view 1 (the genesis stage). + let view_1 = TestScriptStage { + inputs: vec![QuorumProposalRecv(proposals[0].clone(), leaders[0])], + outputs: vec![ + exact(ViewChange(ViewNumber::new(1))), + exact(QuorumProposalValidated(proposals[0].data.clone())), + exact(QuorumVoteSend(votes[0].clone())), + ], + asserts: vec![is_at_view_number(1)], + }; + + let cert = proposals[1].data.justify_qc.clone(); + + // Run view 2 and propose. + let view_2 = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(proposals[1].clone(), leaders[1]), + QCFormed(either::Left(cert)), + // We must have a payload commitment and metadata to propose. + SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(2))), + exact(QuorumProposalValidated(proposals[1].data.clone())), + quorum_proposal_send(), + ], + asserts: vec![is_at_view_number(2)], + }; + + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + + inject_consensus_polls(&consensus_state).await; + + run_test_script(vec![view_1, view_2], consensus_state).await; +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_consensus_vote() { + use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; + use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; + use hotshot_testing::{ + predicates::exact, + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, + view_generator::TestViewGenerator, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + for view in (&mut generator).take(2) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_vote(&handle)); + } + + // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader + let view_1 = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + QuorumVoteRecv(votes[0].clone()), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(1))), + exact(QuorumProposalValidated(proposals[0].data.clone())), + exact(QuorumVoteSend(votes[0].clone())), + ], + asserts: vec![], + }; + + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + + inject_consensus_polls(&consensus_state).await; + run_test_script(vec![view_1], consensus_state).await; +} + +/// TODO (jparr721): Nuke these old tests. Tracking: https://github.com/EspressoSystems/HotShot/issues/2727 +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_consensus_task_old() { use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use hotshot_task_impls::{consensus::ConsensusTaskState, harness::run_harness}; use hotshot_testing::task_helpers::build_system_handle; @@ -72,7 +212,7 @@ async fn test_consensus_task() { #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_consensus_vote() { +async fn test_consensus_vote_old() { use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use hotshot_task_impls::{consensus::ConsensusTaskState, harness::run_harness}; use hotshot_testing::task_helpers::build_system_handle; @@ -124,7 +264,7 @@ async fn test_consensus_vote() { // TODO: re-enable this when HotShot/the sequencer needs the shares for something // issue: https://github.com/EspressoSystems/HotShot/issues/2236 #[ignore] -async fn test_consensus_with_vid() { +async fn test_consensus_with_vid_old() { use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use hotshot::traits::BlockPayload; use hotshot::types::SignatureKey; From 5f19198358b09ffa3097efd799df338b98636e0d Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 8 Mar 2024 11:28:59 -0800 Subject: [PATCH 0839/1393] [Auto Benchmarks] Add stat collection (#2588) * throughput stats added * fix throughput * markers for latency * latency added * stats added: total num views, failed num views, kind of leader election * lint * parameterizing args and partially fix readme * non-required arguments infrastructure added * clean up and lint * finish parameterization * scripts added * scripts added...truely * gitignore changes wait to be replaced later * lint * comments * [Auto Benchmarks] Post benchmark stats to orchestrator (#2699) * post bench stats to orchestrator * deal benchmark results from all nodes * fmt * cleanup benchmeark results output in log, add argument for inputting running commit * add argument commit_sha * add argument commit_sha * fix bug of transaction_size input * add argument for orchestrator_url * rename post & get method to orchestrator * default setting of config file * fmt * EventType cleanup --- example-types/src/block_types.rs | 4 + examples/Cargo.toml | 1 + examples/combined/all.rs | 28 +--- examples/combined/orchestrator.rs | 14 +- examples/infra/mod.rs | 240 +++++++++++++++++++++++++---- examples/libp2p/all.rs | 26 +--- examples/libp2p/orchestrator.rs | 20 ++- examples/webserver/README.md | 21 ++- examples/webserver/all.rs | 21 +-- examples/webserver/orchestrator.rs | 20 ++- orchestrator/Cargo.toml | 2 + orchestrator/README.md | 4 +- orchestrator/api.toml | 19 ++- orchestrator/run-config.toml | 4 +- orchestrator/src/client.rs | 104 ++++++++++++- orchestrator/src/config.rs | 6 +- orchestrator/src/lib.rs | 120 ++++++++++++--- task-impls/src/consensus.rs | 8 + types/src/event.rs | 5 + types/src/traits/block_contents.rs | 3 + 20 files changed, 525 insertions(+), 145 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 43d9ba22fa..2fc6470de5 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -170,6 +170,10 @@ impl BlockPayload for TestBlockPayload { } BuilderCommitment::from_raw_digest(digest.finalize()) } + + fn get_transactions(&self) -> &Vec { + &self.transactions + } } /// A [`BlockHeader`] that commits to [`TestBlockPayload`]. diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 8ed876d41d..b97c456e03 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -111,6 +111,7 @@ lru = "0.12.3" hotshot-task = { path = "../task" } hotshot = { path = "../hotshot" } hotshot-example-types = { path = "../example-types" } +chrono = "0.4" tracing = { workspace = true } diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 19550bed79..4c1605c48a 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -2,27 +2,23 @@ /// types used for this example pub mod types; -use crate::infra::load_config_from_file; +use crate::infra::read_orchestrator_init_config; +use crate::infra::OrchestratorArgs; use crate::types::ThisRun; +use crate::{ + infra::run_orchestrator, + types::{DANetwork, NodeImpl, QuorumNetwork}, +}; use async_compatibility_layer::art::async_spawn; use async_compatibility_layer::channel::oneshot; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use clap::Parser; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; -use hotshot_orchestrator::config::NetworkConfig; -use hotshot_types::traits::node_implementation::NodeType; use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; use surf_disco::Url; use tracing::{error, instrument}; -use crate::{ - infra::run_orchestrator, - infra::{ConfigArgs, OrchestratorArgs}, - types::{DANetwork, NodeImpl, QuorumNetwork}, -}; - /// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; @@ -34,8 +30,7 @@ async fn main() { setup_logging(); setup_backtrace(); - // use configfile args - let args = ConfigArgs::parse(); + let (config, orchestrator_url) = read_orchestrator_init_config::(); // spawn web servers let (server_shutdown_sender_cdn, server_shutdown_cdn) = oneshot(); @@ -43,8 +38,6 @@ async fn main() { let _sender = Arc::new(server_shutdown_sender_cdn); let _sender = Arc::new(server_shutdown_sender_da); - let orchestrator_url = Url::parse("http://localhost:4444").unwrap(); - async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, @@ -78,15 +71,10 @@ async fn main() { NodeImpl, >(OrchestratorArgs { url: orchestrator_url.clone(), - - config_file: args.config_file.clone(), + config: config.clone(), })); // nodes - let config: NetworkConfig< - ::SignatureKey, - ::ElectionConfigType, - > = load_config_from_file::(&args.config_file); let mut nodes = Vec::new(); for _ in 0..config.config.total_nodes.into() { let orchestrator_url = orchestrator_url.clone(); diff --git a/examples/combined/orchestrator.rs b/examples/combined/orchestrator.rs index ab32f4e245..409fc4f16c 100644 --- a/examples/combined/orchestrator.rs +++ b/examples/combined/orchestrator.rs @@ -3,14 +3,13 @@ pub mod types; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use clap::Parser; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; +use crate::infra::read_orchestrator_init_config; use crate::infra::run_orchestrator; use crate::infra::OrchestratorArgs; use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; - /// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; @@ -21,7 +20,12 @@ pub mod infra; async fn main() { setup_logging(); setup_backtrace(); - let args = OrchestratorArgs::parse(); - - run_orchestrator::(args).await; + let (config, orchestrator_url) = read_orchestrator_init_config::(); + run_orchestrator::(OrchestratorArgs::< + TestTypes, + > { + url: orchestrator_url.clone(), + config: config.clone(), + }) + .await; } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 3c0b53a4d6..e2d7a79d2b 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -4,8 +4,10 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_lock::RwLock; use async_trait::async_trait; use clap::Parser; +use clap::{Arg, Command}; use futures::StreamExt; use hotshot::traits::implementations::{CombinedNetworks, UnderlyingCombinedNetworks}; +use hotshot::traits::BlockPayload; use hotshot::{ traits::{ implementations::{Libp2pNetwork, MemoryStorage, NetworkingMetricsValue, WebServerNetwork}, @@ -21,7 +23,7 @@ use hotshot_example_types::{ use hotshot_orchestrator::config::NetworkConfigSource; use hotshot_orchestrator::{ self, - client::{OrchestratorClient, ValidatorArgs}, + client::{BenchResults, OrchestratorClient, ValidatorArgs}, config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, }; use hotshot_types::message::Message; @@ -56,22 +58,19 @@ use std::{collections::BTreeSet, sync::Arc}; use std::{num::NonZeroUsize, str::FromStr}; use surf_disco::Url; +use chrono::Utc; use libp2p_identity::PeerId; use std::fmt::Debug; use std::{fs, time::Instant}; use tracing::{error, info, warn}; -#[derive(Parser, Debug, Clone)] -#[command( - name = "Multi-machine consensus", - about = "Simulates consensus among multiple machines" -)] +#[derive(Debug, Clone)] /// Arguments passed to the orchestrator -pub struct OrchestratorArgs { +pub struct OrchestratorArgs { /// The url the orchestrator runs on; this should be in the form of `http://localhost:5555` or `http://0.0.0.0:5555` pub url: Url, /// The configuration file to be used for this run - pub config_file: String, + pub config: NetworkConfig, } #[derive(Parser, Debug, Clone)] @@ -85,6 +84,130 @@ pub struct ConfigArgs { pub config_file: String, } +impl Default for ConfigArgs { + fn default() -> Self { + Self { + config_file: "./crates/orchestrator/run-config.toml".to_string(), + } + } +} + +/// Reads the orchestrator initialization config from the command line +/// # Panics +/// If unable to read the config file from the command line +#[allow(clippy::too_many_lines)] +pub fn read_orchestrator_init_config() -> ( + NetworkConfig, + Url, +) { + // assign default setting + let mut orchestrator_url = Url::parse("http://localhost:4444").unwrap(); + let mut args = ConfigArgs::default(); + // start reading from command line + let matches = Command::new("orchestrator") + .arg( + Arg::new("config_file") + .short('c') + .long("config_file") + .value_name("FILE") + .help("Sets a custom config file with default values, some might be changed if they are set manually in the command line") + .required(true), + ) + .arg( + Arg::new("total_nodes") + .short('n') + .long("total_nodes") + .value_name("NUM") + .help("Sets the total number of nodes") + .required(false), + ) + .arg( + Arg::new("da_committee_size") + .short('d') + .long("da_committee_size") + .value_name("NUM") + .help("Sets the size of the data availability committee") + .required(false), + ) + .arg( + Arg::new("transactions_per_round") + .short('t') + .long("transactions_per_round") + .value_name("NUM") + .help("Sets the number of transactions per round") + .required(false), + ) + .arg( + Arg::new("transaction_size") + .short('s') + .long("transaction_size") + .value_name("NUM") + .help("Sets the size of each transaction in bytes") + .required(false), + ) + .arg( + Arg::new("rounds") + .short('r') + .long("rounds") + .value_name("NUM") + .help("Sets the number of rounds to run") + .required(false), + ) + .arg( + Arg::new("commit_sha") + .short('m') + .long("commit_sha") + .value_name("SHA") + .help("Sets the commit sha to output in the results") + .required(false), + ) + .arg( + Arg::new("orchestrator_url") + .short('u') + .long("orchestrator_url") + .value_name("URL") + .help("Sets the url of the orchestrator") + .required(false), + ) + .get_matches(); + if let Some(config_file_string) = matches.get_one::("config_file") { + args = ConfigArgs { + config_file: config_file_string.clone(), + }; + } else { + error!("No config file provided, we'll use the default one."); + } + let mut config: NetworkConfig = + load_config_from_file::(&args.config_file); + + if let Some(total_nodes_string) = matches.get_one::("total_nodes") { + config.config.total_nodes = total_nodes_string.parse::().unwrap(); + config.config.known_nodes_with_stake = + vec![PeerConfig::default(); config.config.total_nodes.get() as usize]; + error!("config.config.total_nodes: {:?}", config.config.total_nodes); + } + if let Some(da_committee_size_string) = matches.get_one::("da_committee_size") { + config.config.da_committee_size = da_committee_size_string.parse::().unwrap(); + } + if let Some(transactions_per_round_string) = matches.get_one::("transactions_per_round") + { + config.transactions_per_round = transactions_per_round_string.parse::().unwrap(); + } + if let Some(transaction_size_string) = matches.get_one::("transaction_size") { + config.transaction_size = transaction_size_string.parse::().unwrap(); + } + if let Some(rounds_string) = matches.get_one::("rounds") { + config.rounds = rounds_string.parse::().unwrap(); + } + if let Some(commit_sha_string) = matches.get_one::("commit_sha") { + config.commit_sha = commit_sha_string.to_string(); + } + if let Some(orchestrator_url_string) = matches.get_one::("orchestrator_url") { + orchestrator_url = Url::parse(orchestrator_url_string).unwrap(); + } + (config, orchestrator_url) +} + /// Reads a network configuration from a given filepath /// # Panics /// if unable to convert the config file into toml @@ -109,7 +232,8 @@ pub fn load_config_from_file( config_toml.into(); // my_own_validator_config would be best to load from file, - // but its type is too complex to load so we'll generate it from seed now + // but its type is too complex to load so we'll generate it from seed now. + // Also this function is only used for orchestrator initialization now, so this value doesn't matter config.config.my_own_validator_config = ValidatorConfig::generated_from_seed_indexed(config.seed, config.node_index, 1); // initialize it with size for better assignment of peers' config @@ -126,14 +250,13 @@ pub async fn run_orchestrator< QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey> + Debug, NODE: NodeImplementation>, >( - OrchestratorArgs { url, config_file }: OrchestratorArgs, + OrchestratorArgs { url, config }: OrchestratorArgs, ) { error!("Starting orchestrator",); - let run_config = load_config_from_file::(&config_file); let _result = hotshot_orchestrator::run_orchestrator::< TYPES::SignatureKey, TYPES::ElectionConfigType, - >(run_config, url) + >(config, url) .await; } @@ -383,12 +506,14 @@ pub trait RunDA< } /// Starts HotShot consensus, returns when consensus has finished + #[allow(clippy::too_many_lines)] async fn run_hotshot( &self, context: SystemContextHandle, transactions: &mut Vec, transactions_to_send_per_round: u64, - ) { + transaction_size_in_bytes: u64, + ) -> BenchResults { let NetworkConfig { rounds, node_index, @@ -398,6 +523,10 @@ pub trait RunDA< let mut total_transactions_committed = 0; let mut total_transactions_sent = 0; + let mut minimum_latency = 1000; + let mut maximum_latency = 0; + let mut total_latency = 0; + let mut num_latency = 0; error!("Sleeping for {start_delay_seconds} seconds before starting hotshot!"); async_sleep(Duration::from_secs(start_delay_seconds)).await; @@ -408,6 +537,7 @@ pub trait RunDA< let mut event_stream = context.get_event_stream(); let mut anchor_view: TYPES::Time = ::genesis(); let mut num_successful_commits = 0; + let mut failed_num_views = 0; context.hotshot.start_consensus().await; @@ -427,10 +557,29 @@ pub trait RunDA< qc: _, block_size, } => { + let current_timestamp = Utc::now().timestamp(); // this might be a obob if let Some((leaf, _)) = leaf_chain.first() { info!("Decide event for leaf: {}", *leaf.view_number); + // iterate all the decided transactions to calculate latency + if let Some(block_payload) = &leaf.block_payload { + for tx in block_payload.get_transactions() { + let restored_timestamp_vec = + tx.0[tx.0.len() - 8..].to_vec(); + let restored_timestamp = i64::from_be_bytes( + restored_timestamp_vec.as_slice().try_into().unwrap(), + ); + let cur_latency = current_timestamp - restored_timestamp; + total_latency += cur_latency; + num_latency += 1; + minimum_latency = + std::cmp::min(minimum_latency, cur_latency); + maximum_latency = + std::cmp::max(maximum_latency, cur_latency); + } + } + let new_anchor = leaf.view_number; if new_anchor >= anchor_view { anchor_view = leaf.view_number; @@ -438,9 +587,16 @@ pub trait RunDA< // send transactions for _ in 0..transactions_to_send_per_round { - let tx = transactions.remove(0); - - () = context.submit_transaction(tx).await.unwrap(); + // append current timestamp to the tx to calc latency + let timestamp = Utc::now().timestamp(); + let mut tx = transactions.remove(0).0; + let mut timestamp_vec = timestamp.to_be_bytes().to_vec(); + tx.append(&mut timestamp_vec); + + () = context + .submit_transaction(TestTransaction(tx)) + .await + .unwrap(); total_transactions_sent += 1; } } @@ -460,20 +616,49 @@ pub trait RunDA< // when we make progress, submit new events } EventType::ReplicaViewTimeout { view_number } => { + failed_num_views += 1; warn!("Timed out as a replicas in view {:?}", view_number); } - EventType::NextLeaderViewTimeout { view_number } => { - warn!("Timed out as the next leader in view {:?}", view_number); + EventType::ViewTimeout { view_number } => { + failed_num_views += 1; + warn!("Timed out in view {:?}", view_number); } _ => {} } } } } - + let consensus_lock = context.hotshot.get_consensus(); + let consensus = consensus_lock.read().await; + let total_num_views = usize::try_from(consensus.locked_view.get_u64()).unwrap(); + // When posting to the orchestrator, note that the total number of views also include un-finalized views. + error!("Failed views: {failed_num_views}, Total views: {total_num_views}, num_successful_commits: {num_successful_commits}"); + // +2 is for uncommitted views + assert!(total_num_views <= (failed_num_views + num_successful_commits + 2)); // Output run results - let total_time_elapsed = start.elapsed(); + let total_time_elapsed = start.elapsed(); // in seconds error!("[{node_index}]: {rounds} rounds completed in {total_time_elapsed:?} - Total transactions sent: {total_transactions_sent} - Total transactions committed: {total_transactions_committed} - Total commitments: {num_successful_commits}"); + if total_transactions_committed != 0 { + // extra 8 bytes for timestamp + let throughput_bytes_per_sec = total_transactions_committed + * (transaction_size_in_bytes + 8) + / total_time_elapsed.as_secs(); + BenchResults { + avg_latency_in_sec: total_latency / num_latency, + num_latency, + minimum_latency_in_sec: minimum_latency, + maximum_latency_in_sec: maximum_latency, + throughput_bytes_per_sec, + total_transactions_committed, + transaction_size_in_bytes: transaction_size_in_bytes + 8, // extra 8 bytes for timestamp + total_time_elapsed_in_sec: total_time_elapsed.as_secs(), + total_num_views, + failed_num_views, + } + } else { + // all values with zero + BenchResults::default() + } } /// Returns the da network for this run @@ -826,12 +1011,15 @@ pub async fn main_entry_point< } error!("Starting HotShot"); - run.run_hotshot( - hotshot, - &mut transactions, - transactions_to_send_per_round as u64, - ) - .await; + let bench_results = run + .run_hotshot( + hotshot, + &mut transactions, + transactions_to_send_per_round as u64, + (transaction_size + 8) as u64, // extra 8 bytes for transaction base, see `create_random_transaction`. + ) + .await; + orchestrator_client.post_bench_results(bench_results).await; } /// generate a libp2p identity based on a seed and idx diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index 1e1fdb9a36..9e6f2846fc 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -2,25 +2,20 @@ /// types used for this example pub mod types; -use crate::infra::load_config_from_file; +use crate::infra::read_orchestrator_init_config; +use crate::infra::OrchestratorArgs; use crate::types::ThisRun; +use crate::{ + infra::run_orchestrator, + types::{DANetwork, NodeImpl, QuorumNetwork}, +}; use async_compatibility_layer::art::async_spawn; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use clap::Parser; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; -use hotshot_orchestrator::config::NetworkConfig; -use hotshot_types::traits::node_implementation::NodeType; use std::net::{IpAddr, Ipv4Addr}; -use surf_disco::Url; use tracing::instrument; -use crate::{ - infra::run_orchestrator, - infra::{ConfigArgs, OrchestratorArgs}, - types::{DANetwork, NodeImpl, QuorumNetwork}, -}; - /// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; @@ -33,8 +28,7 @@ async fn main() { setup_backtrace(); // use configfile args - let args = ConfigArgs::parse(); - let orchestrator_url = Url::parse("http://localhost:4444").unwrap(); + let (config, orchestrator_url) = read_orchestrator_init_config::(); // orchestrator async_spawn(run_orchestrator::< @@ -44,14 +38,10 @@ async fn main() { NodeImpl, >(OrchestratorArgs { url: orchestrator_url.clone(), - config_file: args.config_file.clone(), + config: config.clone(), })); // nodes - let config: NetworkConfig< - ::SignatureKey, - ::ElectionConfigType, - > = load_config_from_file::(&args.config_file); let mut nodes = Vec::new(); for _ in 0..config.config.total_nodes.into() { let orchestrator_url = orchestrator_url.clone(); diff --git a/examples/libp2p/orchestrator.rs b/examples/libp2p/orchestrator.rs index e45d0c2a59..8e677abdbe 100644 --- a/examples/libp2p/orchestrator.rs +++ b/examples/libp2p/orchestrator.rs @@ -3,14 +3,13 @@ /// types used for this example pub mod types; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use clap::Parser; -use hotshot_example_types::state_types::TestTypes; -use tracing::instrument; - +use crate::infra::read_orchestrator_init_config; use crate::infra::run_orchestrator; use crate::infra::OrchestratorArgs; use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use hotshot_example_types::state_types::TestTypes; +use tracing::instrument; /// general infra used for this example #[path = "../infra/mod.rs"] @@ -22,7 +21,12 @@ pub mod infra; async fn main() { setup_logging(); setup_backtrace(); - let args = OrchestratorArgs::parse(); - - run_orchestrator::(args).await; + let (config, orchestrator_url) = read_orchestrator_init_config::(); + run_orchestrator::(OrchestratorArgs::< + TestTypes, + > { + url: orchestrator_url.clone(), + config: config.clone(), + }) + .await; } diff --git a/examples/webserver/README.md b/examples/webserver/README.md index e7991228f2..9d74ab723a 100644 --- a/examples/webserver/README.md +++ b/examples/webserver/README.md @@ -16,13 +16,20 @@ just async_std example validator-webserver -- I.e. -just async_std example webserver -- http://127.0.0.1 9000 -just async_std example webserver -- http://127.0.0.1 9001 -just async_std example webserver -- http://127.0.0.1 9002 -just async_std example orchestrator-webserver -- http://127.0.0.1 4444 ./orchestrator/default-run-config.toml -just async_std example validator-webserver -- 2 http://127.0.0.1 4444 +just async_std example webserver -- http://127.0.0.1:9000 +just async_std example webserver -- http://127.0.0.1:9001 +just async_std example webserver -- http://127.0.0.1:9002 +just async_std example orchestrator-webserver -- http://127.0.0.1:4444 ./crates/orchestrator/run-config.toml +just async_std example validator-webserver -- 2 http://127.0.0.1:4444 OR: just async_std example multi-webserver -- 9000 9001 9002 -just async_std example orchestrator-webserver -- http://127.0.0.1 4444 ./orchestrator/default-run-config.toml -just async_std example multi-validator-webserver -- 10 http://127.0.0.1 4444 \ No newline at end of file +just async_std example orchestrator-webserver -- http://127.0.0.1:4444 ./crates/orchestrator/run-config.toml +just async_std example multi-validator-webserver -- 10 http://127.0.0.1:4444 + +================All of the above are out-dated================ +OR: +`just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://localhost:4444` +For other argument setting, checkout `read_orchestrator_initialization_config` in `crates/examples/infra/mod.rs`. +One example is: `just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --total_nodes 15`. +Another example is `just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --total_nodes 20 --da_committee_size 5 --transactions_per_round 10 --transaction_size 512 --rounds 100`, I'll get throughput `0.29M/s` for this one. \ No newline at end of file diff --git a/examples/webserver/all.rs b/examples/webserver/all.rs index b68b3ec46e..d7c2df99fe 100644 --- a/examples/webserver/all.rs +++ b/examples/webserver/all.rs @@ -2,8 +2,8 @@ /// types used for this example pub mod types; -use crate::infra::load_config_from_file; -use crate::infra::{ConfigArgs, OrchestratorArgs}; +use crate::infra::read_orchestrator_init_config; +use crate::infra::OrchestratorArgs; use crate::types::ThisRun; use crate::{ infra::run_orchestrator, @@ -17,11 +17,8 @@ use std::sync::Arc; pub mod infra; use async_compatibility_layer::{art::async_spawn, channel::oneshot}; -use clap::Parser; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; -use hotshot_orchestrator::config::NetworkConfig; -use hotshot_types::traits::node_implementation::NodeType; use surf_disco::Url; use tracing::error; @@ -32,8 +29,8 @@ async fn main() { setup_logging(); setup_backtrace(); - // use configfile args - let args = ConfigArgs::parse(); + let (config, orchestrator_url) = read_orchestrator_init_config::(); + // spawn web servers let (server_shutdown_sender_cdn, server_shutdown_cdn) = oneshot(); let (server_shutdown_sender_da, server_shutdown_da) = oneshot(); @@ -65,24 +62,18 @@ async fn main() { } }); - let orchestrator_url = Url::parse("http://localhost:4444").unwrap(); - // web server orchestrator async_spawn(run_orchestrator::< TestTypes, DANetwork, QuorumNetwork, NodeImpl, - >(OrchestratorArgs { + >(OrchestratorArgs:: { url: orchestrator_url.clone(), - config_file: args.config_file.clone(), + config: config.clone(), })); // multi validator run - let config: NetworkConfig< - ::SignatureKey, - ::ElectionConfigType, - > = load_config_from_file::(&args.config_file); let mut nodes = Vec::new(); for _ in 0..(config.config.total_nodes.get()) { let orchestrator_url = orchestrator_url.clone(); diff --git a/examples/webserver/orchestrator.rs b/examples/webserver/orchestrator.rs index a4d4a42436..b033e89fb5 100644 --- a/examples/webserver/orchestrator.rs +++ b/examples/webserver/orchestrator.rs @@ -3,14 +3,13 @@ /// types used for this example pub mod types; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use clap::Parser; -use hotshot_example_types::state_types::TestTypes; -use tracing::instrument; - +use crate::infra::read_orchestrator_init_config; use crate::infra::run_orchestrator; use crate::infra::OrchestratorArgs; use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use hotshot_example_types::state_types::TestTypes; +use tracing::instrument; /// general infra used for this example #[path = "../infra/mod.rs"] @@ -22,7 +21,12 @@ pub mod infra; async fn main() { setup_logging(); setup_backtrace(); - let args = OrchestratorArgs::parse(); - - run_orchestrator::(args).await; + let (config, orchestrator_url) = read_orchestrator_init_config::(); + run_orchestrator::(OrchestratorArgs::< + TestTypes, + > { + url: orchestrator_url.clone(), + config: config.clone(), + }) + .await; } diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index c131b0a93c..7b679472f0 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -11,6 +11,7 @@ futures = { workspace = true } libp2p = { workspace = true } blake3 = { workspace = true } hotshot-types = { version = "0.1.0", path = "../types", default-features = false } +hotshot-utils = { path = "../utils" } tide-disco = { workspace = true } surf-disco = { workspace = true } tracing = { workspace = true } @@ -19,6 +20,7 @@ serde_json = { workspace = true } toml = { workspace = true } thiserror = "1.0.50" serde-inline-default = "0.1.1" +csv = "1.3.0" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/orchestrator/README.md b/orchestrator/README.md index 2e0b6f0443..979dceb64c 100644 --- a/orchestrator/README.md +++ b/orchestrator/README.md @@ -2,6 +2,6 @@ This crate implements an orchestrator that coordinates starting the network with a particular configuration. It is useful for testing and benchmarking. Like the web server, the orchestrator is built using [Tide Disco](https://github.com/EspressoSystems/tide-disco). -To run the orchestrator for a libp2p network: `just async_std example orchestrator-libp2p 0.0.0.0 3333 ./crates/orchestrator/run-config` +To run the orchestrator for a libp2p network: `just async_std example orchestrator-libp2p http://0.0.0.0:3333 ./crates/orchestrator/run-config.toml` -To run the orchestrator for a libp2p network: `just async_std example orchestrator-webserver 0.0.0.0 3333 ./crates/orchestrator/run-config.toml ` \ No newline at end of file +To run the orchestrator for a webserver network: `just async_std example orchestrator-webserver http://0.0.0.0:3333 ./crates/orchestrator/run-config.toml ` \ No newline at end of file diff --git a/orchestrator/api.toml b/orchestrator/api.toml index 4258b0d71d..9abc5ebc5d 100644 --- a/orchestrator/api.toml +++ b/orchestrator/api.toml @@ -4,7 +4,7 @@ DESCRIPTION = "Orchestrator for HotShot" FORMAT_VERSION = "0.1.0" # POST node's identity -[route.postidentity] +[route.post_identity] PATH = ["identity/:identity"] METHOD = "POST" ":identity" = "Literal" @@ -24,15 +24,15 @@ received from the 'identity' endpoint """ # GET the latest temporary node index only for generating validator's key pair -[route.tmp_node_index] -PATH = ["tmp_node_index"] +[route.get_tmp_node_index] +PATH = ["get_tmp_node_index"] METHOD = "POST" DOC = """ Get the latest temporary node index only for generating validator's key pair for testing in hotshot, later the generated key pairs might be bound with other node_index. """ # POST the node's node index to generate public key for pubkey collection -[route.postpubkey] +[route.post_pubkey] PATH = ["pubkey/:node_index"] METHOD = "POST" ":node_index" = "Integer" @@ -48,15 +48,15 @@ Get whether the node can collect the final config which includs all peer's publi """ # GET the updated config with all peers' public keys / configs -[route.config_after_peer_collected] -PATH = ["config_after_peer_collected"] +[route.get_config_after_peer_collected] +PATH = ["get_config_after_peer_collected"] DOC = """ Get the updated config with all peers' public keys / configs, returns a NetworkConfig. """ # POST whether the node is ready to begin the run # TODO ED Use the node index parameter -[route.postready] +[route.post_ready] PATH = ["ready"] METHOD = "POST" ":node_index" = "Integer" @@ -65,16 +65,15 @@ Post whether the node with node_index is ready to start the run """ # GET whether or not to start the run -[route.getstart] +[route.get_start] PATH = ["start"] DOC = """ Get whether the node should start the run, returns a boolean """ # POST the run results -[route.postresults] +[route.post_results] PATH = ["results"] -":run_results" = "TaggedBase64" METHOD = "POST" DOC = """ Post run results. diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index e3d06f86f8..5c547bccc8 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -1,5 +1,5 @@ -rounds = 100 -transactions_per_round = 1 +rounds = 10 +transactions_per_round = 10 transaction_size = 1749 node_index = 0 seed = [ diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index a429a3174e..ca7e57822b 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -19,6 +19,91 @@ pub struct OrchestratorClient { pub identity: String, } +/// Struct describing a benchmark result +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Default, PartialEq)] +pub struct BenchResults { + /// The average latency of the transactions + pub avg_latency_in_sec: i64, + /// The number of transactions that were latency measured + pub num_latency: i64, + /// The minimum latency of the transactions + pub minimum_latency_in_sec: i64, + /// The maximum latency of the transactions + pub maximum_latency_in_sec: i64, + /// The throughput of the consensus protocol = number of transactions committed per second * transaction size in bytes + pub throughput_bytes_per_sec: u64, + /// The number of transactions committed during benchmarking + pub total_transactions_committed: u64, + /// The size of each transaction in bytes + pub transaction_size_in_bytes: u64, + /// The total time elapsed for benchmarking + pub total_time_elapsed_in_sec: u64, + /// The total number of views during benchmarking + pub total_num_views: usize, + /// The number of failed views during benchmarking + pub failed_num_views: usize, +} + +impl BenchResults { + /// printout the results of one example run + pub fn printout(&self) { + println!("====================="); + println!("Benchmark results:"); + println!( + "Average latency: {} seconds, Minimum latency: {} seconds, Maximum latency: {} seconds", + self.avg_latency_in_sec, self.minimum_latency_in_sec, self.maximum_latency_in_sec + ); + println!("Throughput: {} bytes/sec", self.throughput_bytes_per_sec); + println!( + "Total transactions committed: {}", + self.total_transactions_committed + ); + println!( + "Total number of views: {}, Failed number of views: {}", + self.total_num_views, self.failed_num_views + ); + println!("====================="); + } +} + +/// Struct describing a benchmark result needed for download, also include the config +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Default, PartialEq)] +pub struct BenchResultsDownloadConfig { + // Config starting here + /// The commit this benchmark was run on + pub commit_sha: String, + /// Total number of nodes + pub total_nodes: usize, + /// The size of the da committee + pub da_committee_size: usize, + /// Number of transactions submitted per round + pub transactions_per_round: usize, + /// The size of each transaction in bytes + pub transaction_size: u64, + /// The number of rounds + pub rounds: usize, + /// The type of leader election used + pub leader_election_type: String, + + // Results starting here + /// The average latency of the transactions + pub avg_latency_in_sec: i64, + /// The minimum latency of the transactions + pub minimum_latency_in_sec: i64, + /// The maximum latency of the transactions + pub maximum_latency_in_sec: i64, + /// The throughput of the consensus protocol = number of transactions committed per second * transaction size in bytes + pub throughput_bytes_per_sec: u64, + /// The number of transactions committed during benchmarking + pub total_transactions_committed: u64, + /// The total time elapsed for benchmarking + pub total_time_elapsed_in_sec: u64, + /// The total number of views during benchmarking + pub total_num_views: usize, + /// The number of failed views during benchmarking + pub failed_num_views: usize, +} + // VALIDATOR #[derive(Parser, Debug, Clone)] @@ -151,7 +236,7 @@ impl OrchestratorClient { let cur_node_index = |client: Client| { async move { let cur_node_index: Result = - client.post("api/tmp_node_index").send().await; + client.post("api/get_tmp_node_index").send().await; cur_node_index } .boxed() @@ -173,7 +258,7 @@ impl OrchestratorClient { let _send_pubkey_ready_f: Result<(), ClientError> = self .client .post(&format!("api/pubkey/{node_index}")) - .body_binary(&PeerConfig::::to_bytes(&my_pub_key)) //&my_pub_key.stake_table_entry.get_public_key().to_bytes() + .body_binary(&PeerConfig::::to_bytes(&my_pub_key)) .unwrap() .send() .await; @@ -187,7 +272,7 @@ impl OrchestratorClient { // get the newest updated config self.client - .get("api/config_after_peer_collected") + .get("api/get_config_after_peer_collected") .send() .await .expect("Unable to get the updated config") @@ -220,6 +305,19 @@ impl OrchestratorClient { .await } + /// Sends the benchmark metrics to the orchestrator + /// # Panics + /// Panics if unable to post + pub async fn post_bench_results(&self, bench_results: BenchResults) { + let _send_metrics_f: Result<(), ClientError> = self + .client + .post("api/results") + .body_json(&bench_results) + .unwrap() + .send() + .await; + } + /// Generic function that waits for the orchestrator to return a non-error /// Returns whatever type the given function returns async fn wait_for_fn_from_orchestrator(&self, f: F) -> GEN diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 93c204d013..954a432b18 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -153,6 +153,8 @@ pub struct NetworkConfig { pub web_server_config: Option, /// the data availability web server config pub da_web_server_config: Option, + /// the commit this run is based on + pub commit_sha: String, } /// the source of the network config @@ -273,7 +275,7 @@ impl NetworkConfig { .await; run_config.config.known_nodes_with_stake = updated_config.config.known_nodes_with_stake; - error!("Retrieved config; our node index is {node_index}"); + error!("Retrieved config; our node index is {node_index}."); (run_config, source) } @@ -389,6 +391,7 @@ impl Default for NetworkConfig { num_bootrap: 5, propose_min_round_time: Duration::from_secs(0), propose_max_round_time: Duration::from_secs(10), + commit_sha: String::new(), } } } @@ -469,6 +472,7 @@ impl From> for NetworkC start_delay_seconds: val.start_delay_seconds, web_server_config: val.web_server_config, da_web_server_config: val.da_web_server_config, + commit_sha: String::new(), } } } diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 5c3f644a2c..d375b8e588 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -6,6 +6,7 @@ pub mod client; pub mod config; use async_lock::RwLock; +use client::{BenchResults, BenchResultsDownloadConfig}; use hotshot_types::{ traits::{election::ElectionConfig, signature_key::SignatureKey}, PeerConfig, @@ -16,7 +17,7 @@ use std::{ io::ErrorKind, net::{IpAddr, SocketAddr}, }; -use tide_disco::{Api, App}; +use tide_disco::{Api, App, RequestError}; use surf_disco::Url; use tide_disco::{ @@ -29,6 +30,7 @@ use futures::FutureExt; use crate::config::NetworkConfig; +use csv::Writer; use libp2p::identity::{ ed25519::{Keypair as EdKeypair, SecretKey}, Keypair, @@ -56,7 +58,7 @@ struct OrchestratorState { /// The network configuration config: NetworkConfig, /// The total nodes that have posted their public keys - pub nodes_with_pubkey: u64, + nodes_with_pubkey: u64, /// Whether the network configuration has been updated with all the peer's public keys/configs peer_pub_ready: bool, /// The set of index for nodes that have posted their public keys/configs @@ -65,7 +67,11 @@ struct OrchestratorState { /// Will be set to true once all nodes post they are ready to start start: bool, /// The total nodes that have posted they are ready to start - pub nodes_connected: u64, + nodes_connected: u64, + /// The results of the benchmarks + bench_results: BenchResults, + /// The number of nodes that have posted their results + nodes_post_results: u64, } impl @@ -82,8 +88,36 @@ impl pub_posted: HashSet::new(), nodes_connected: 0, start: false, + bench_results: BenchResults::default(), + nodes_post_results: 0, } } + + /// Output the results to a csv file according to orchestrator state + pub fn output_to_csv(&self) { + let output_csv = BenchResultsDownloadConfig { + commit_sha: self.config.commit_sha.clone(), + total_nodes: self.config.config.total_nodes.into(), + da_committee_size: self.config.config.da_committee_size, + transactions_per_round: self.config.transactions_per_round, + transaction_size: self.bench_results.transaction_size_in_bytes, + rounds: self.config.rounds, + leader_election_type: self.config.election_config_type_name.clone(), + avg_latency_in_sec: self.bench_results.avg_latency_in_sec, + minimum_latency_in_sec: self.bench_results.minimum_latency_in_sec, + maximum_latency_in_sec: self.bench_results.maximum_latency_in_sec, + throughput_bytes_per_sec: self.bench_results.throughput_bytes_per_sec, + total_transactions_committed: self.bench_results.total_transactions_committed, + total_time_elapsed_in_sec: self.bench_results.total_time_elapsed_in_sec, + total_num_views: self.bench_results.total_num_views, + failed_num_views: self.bench_results.failed_num_views, + }; + // Open a file for writing + let mut wtr = Writer::from_path("scripts/benchmarks_results/results.csv").unwrap(); + let _ = wtr.serialize(output_csv); + let _ = wtr.flush(); + println!("Results successfully saved in scripts/benchmarks_results/results.csv"); + } } /// An api exposed by the orchestrator @@ -123,26 +157,25 @@ pub trait OrchestratorApi { /// # Errors /// if unable to serve fn get_start(&self) -> Result; - /// post endpoint for whether or not all nodes are ready + /// post endpoint for the results of the run /// # Errors /// if unable to serve - fn post_ready(&mut self) -> Result<(), ServerError>; - /// post endpoint for the results of the run + fn post_run_results(&mut self, metrics: BenchResults) -> Result<(), ServerError>; + /// post endpoint for whether or not all nodes are ready /// # Errors /// if unable to serve - fn post_run_results(&mut self) -> Result<(), ServerError>; + fn post_ready(&mut self) -> Result<(), ServerError>; } impl OrchestratorApi for OrchestratorState where - KEY: serde::Serialize + Clone + SignatureKey, - ELECTION: serde::Serialize + Clone + Send + ElectionConfig, + KEY: serde::Serialize + Clone + SignatureKey + 'static, + ELECTION: serde::Serialize + Clone + Send + ElectionConfig + 'static, { fn post_identity(&mut self, identity: IpAddr) -> Result { let node_index = self.latest_index; self.latest_index += 1; - // TODO https://github.com/EspressoSystems/HotShot/issues/850 if usize::from(node_index) >= self.config.config.total_nodes.get() { return Err(ServerError { status: tide_disco::StatusCode::BadRequest, @@ -220,7 +253,7 @@ where } self.pub_posted.insert(node_index); - // The guess is extra 8 starting bytes are from orchestrator serialization + // The guess is the first extra 8 bytes are from orchestrator serialization pubkey.drain(..8); let register_pub_key_with_stake = PeerConfig::::from_bytes(pubkey).unwrap(); self.config.config.known_nodes_with_stake[node_index as usize] = @@ -278,7 +311,50 @@ where Ok(()) } - fn post_run_results(&mut self) -> Result<(), ServerError> { + // Aggregates results of the run from all nodes + fn post_run_results(&mut self, metrics: BenchResults) -> Result<(), ServerError> { + if metrics.total_transactions_committed != 0 { + // Deal with the bench results + if self.bench_results.total_transactions_committed == 0 { + self.bench_results = metrics; + } else { + // Deal with the bench results from different nodes + let cur_metrics = self.bench_results.clone(); + self.bench_results.avg_latency_in_sec = (metrics.avg_latency_in_sec + * metrics.num_latency + + cur_metrics.avg_latency_in_sec * cur_metrics.num_latency) + / (metrics.num_latency + cur_metrics.num_latency); + self.bench_results.num_latency += metrics.num_latency; + self.bench_results.minimum_latency_in_sec = metrics + .minimum_latency_in_sec + .min(cur_metrics.minimum_latency_in_sec); + self.bench_results.maximum_latency_in_sec = metrics + .maximum_latency_in_sec + .max(cur_metrics.maximum_latency_in_sec); + self.bench_results.throughput_bytes_per_sec = metrics + .throughput_bytes_per_sec + .max(cur_metrics.throughput_bytes_per_sec); + self.bench_results.total_transactions_committed = metrics + .total_transactions_committed + .max(cur_metrics.total_transactions_committed); + assert_eq!( + metrics.transaction_size_in_bytes, + cur_metrics.transaction_size_in_bytes + ); + self.bench_results.total_time_elapsed_in_sec = metrics + .total_time_elapsed_in_sec + .max(cur_metrics.total_time_elapsed_in_sec); + self.bench_results.total_num_views = + metrics.total_num_views.min(cur_metrics.total_num_views); + self.bench_results.failed_num_views = + metrics.failed_num_views.max(cur_metrics.failed_num_views); + } + } + self.nodes_post_results += 1; + if self.nodes_post_results >= (self.config.config.total_nodes.get() as u64) { + self.bench_results.printout(); + self.output_to_csv(); + } Ok(()) } } @@ -298,7 +374,7 @@ where ))) .expect("API file is not valid toml"); let mut api = Api::::new(api_toml)?; - api.post("postidentity", |req, state| { + api.post("post_identity", |req, state| { async move { let identity = req.string_param("identity")?.parse::(); if identity.is_err() { @@ -318,10 +394,10 @@ where } .boxed() })? - .post("tmp_node_index", |_req, state| { + .post("get_tmp_node_index", |_req, state| { async move { state.get_tmp_node_index() }.boxed() })? - .post("postpubkey", |req, state| { + .post("post_pubkey", |req, state| { async move { let node_index = req.integer_param("node_index")?; let mut pubkey = req.body_bytes(); @@ -332,18 +408,22 @@ where .get("peer_pubconfig_ready", |_req, state| { async move { state.peer_pub_ready() }.boxed() })? - .get("config_after_peer_collected", |_req, state| { + .get("get_config_after_peer_collected", |_req, state| { async move { state.get_config_after_peer_collected() }.boxed() })? .post( - "postready", + "post_ready", |_req, state: &mut ::State| async move { state.post_ready() }.boxed(), )? - .get("getstart", |_req, state| { + .get("get_start", |_req, state| { async move { state.get_start() }.boxed() })? - .post("postresults", |_req, state| { - async move { state.post_run_results() }.boxed() + .post("post_results", |req, state| { + async move { + let metrics: Result = req.body_json(); + state.post_run_results(metrics.unwrap()) + } + .boxed() })?; Ok(api) } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 61f62693b0..98ce438696 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1187,6 +1187,14 @@ impl, A: ConsensusApi + }; broadcast_event(HotShotEvent::TimeoutVoteSend(vote), &event_stream).await; + broadcast_event( + Event { + view_number: view, + event: EventType::ViewTimeout { view_number: view }, + }, + &self.output_event_stream, + ) + .await; debug!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view diff --git a/types/src/event.rs b/types/src/event.rs index 2e285ee79e..14cd65c874 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -68,6 +68,11 @@ pub enum EventType { /// The view number that has just finished view_number: TYPES::Time, }, + /// The view timed out + ViewTimeout { + /// The view that timed out + view_number: TYPES::Time, + }, /// New transactions were received from the network /// or submitted to the network by us Transactions { diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index aee82425bc..b63d6d75d6 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -84,6 +84,9 @@ pub trait BlockPayload: /// Generate commitment that builders use to sign block options. fn builder_commitment(&self, metadata: &Self::Metadata) -> BuilderCommitment; + + /// Get the transactions in the payload. + fn get_transactions(&self) -> &Vec; } /// extra functions required on block to be usable by hotshot-testing From d743e2a274e5e0e17cabd7cf42ac0b0ce912448d Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 8 Mar 2024 19:10:09 -0500 Subject: [PATCH 0840/1393] use config seed (#2735) --- examples/infra/mod.rs | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index e2d7a79d2b..7ed7689637 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -851,26 +851,22 @@ where async fn initialize_networking( config: NetworkConfig, ) -> CombinedDARun { - // generate our own key - let (pub_key, _privkey) = - <::SignatureKey as SignatureKey>::generated_from_seed_indexed( - config.seed, - config.node_index, - ); + // Get our own key + let pub_key = config.config.my_own_validator_config.public_key.clone(); - // create and wait for libp2p network + // Create and wait for libp2p network let libp2p_underlying_quorum_network = libp2p_network_from_config::(config.clone(), pub_key.clone()).await; libp2p_underlying_quorum_network.wait_for_ready().await; - // extract values from config (for webserver DA network) + // Extract values from config (for webserver DA network) let WebServerConfig { url, wait_between_polls, }: WebServerConfig = config.clone().da_web_server_config.unwrap(); - // create and wait for underlying webserver network + // Create and wait for underlying webserver network let web_quorum_network = webserver_network_from_config::(config.clone(), pub_key.clone()); @@ -878,8 +874,7 @@ where web_quorum_network.wait_for_ready().await; - // combine the two communication channel - + // Combine the two communication channels let da_channel = CombinedNetworks::new(Arc::new(UnderlyingCombinedNetworks( web_da_network.clone(), libp2p_underlying_quorum_network.clone(), From 5966fdd2d2c8e3218c22112971f11e0033897718 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 11 Mar 2024 15:17:48 +0000 Subject: [PATCH 0841/1393] [Libp2p] Add Request Response for Data to networking layer (#2679) * Don't lock channels * cargo fix * Allow killing the connected network handler * Fix issues * Remove State from NetworkNodeHandle * Remove async where it's unused * fix async std build * Fix erros a little * Starting VID request response * handle network event * Bubble up request/response * Make Request and Response just bytes at network layer * Adding to ConnectedNetwork * Hooks for request/response in handle + trait * fix request * Remove request and response tasks for now * update mod.rs * Hooked up response flow to ConnectedNetwork * Refactor interface to return result to caller * Add request and response to message struct * Clean up some message stuff * Fix build error * Hook up request and response fully * Review myself, impl functions for combine * Change Receiver interface to spawn task * try_send instead of send * fix lint * clean up request response event handle fn * fix build --- .../src/traits/networking/combined_network.rs | 18 ++- .../src/traits/networking/libp2p_network.rs | 123 +++++++++++++++--- libp2p-networking/Cargo.toml | 1 + libp2p-networking/src/lib.rs | 1 + .../src/network/behaviours/mod.rs | 3 + .../network/behaviours/request_response.rs | 76 +++++++++++ libp2p-networking/src/network/def.rs | 23 +++- libp2p-networking/src/network/mod.rs | 28 +++- libp2p-networking/src/network/node.rs | 40 +++++- libp2p-networking/src/network/node/handle.rs | 57 +++++++- libp2p-networking/tests/counter.rs | 2 +- task-impls/src/network.rs | 8 +- types/Cargo.toml | 1 + types/src/message.rs | 18 ++- types/src/traits/network.rs | 70 +++++++++- 15 files changed, 424 insertions(+), 45 deletions(-) create mode 100644 libp2p-networking/src/network/behaviours/request_response.rs diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 32fc0cfd12..574725aebb 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -16,7 +16,7 @@ use tracing::warn; use async_trait::async_trait; -use futures::join; +use futures::{channel::mpsc, join}; use async_compatibility_layer::channel::UnboundedSendError; #[cfg(feature = "hotshot-testing")] @@ -26,7 +26,7 @@ use hotshot_types::{ data::ViewNumber, message::Message, traits::{ - network::{ConnectedNetwork, ConsensusIntentEvent}, + network::{ConnectedNetwork, ConsensusIntentEvent, ResponseChannel, ResponseMessage}, node_implementation::NodeType, }, BoxSyncFuture, @@ -304,6 +304,20 @@ impl TestableNetworkingImplementation for CombinedNetwor impl ConnectedNetwork, TYPES::SignatureKey> for CombinedNetworks { + async fn request_data( + &self, + request: Message, + recipient: TYPES::SignatureKey, + ) -> Result, NetworkError> { + self.secondary().request_data(request, recipient).await + } + + async fn spawn_request_receiver_task( + &self, + ) -> Option, ResponseChannel>)>> { + self.secondary().spawn_request_receiver_task().await + } + fn pause(&self) { self.networks.0.pause(); } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 661494451c..3199ce9edb 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -6,9 +6,9 @@ use super::NetworkingMetricsValue; use async_compatibility_layer::art::async_block_on; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, - channel::{bounded, unbounded, Sender, UnboundedReceiver, UnboundedSendError, UnboundedSender}, + channel::{self, bounded, unbounded, UnboundedReceiver, UnboundedSendError, UnboundedSender}, }; -use async_lock::RwLock; +use async_lock::{Mutex, RwLock}; use async_trait::async_trait; use bimap::BiHashMap; use bincode::Options; @@ -18,10 +18,10 @@ use hotshot_types::{ data::ViewNumber, traits::{ network::{ - ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, - NetworkMsg, + self, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, + NetworkMsg, ResponseMessage, }, - node_implementation::ConsensusTime, + node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, BoxSyncFuture, @@ -29,10 +29,7 @@ use hotshot_types::{ #[cfg(feature = "hotshot-testing")] use hotshot_types::{ message::{Message, MessageKind}, - traits::{ - network::{NetworkReliability, TestableNetworkingImplementation, ViewMessage}, - node_implementation::NodeType, - }, + traits::network::{NetworkReliability, TestableNetworkingImplementation, ViewMessage}, }; use hotshot_utils::{bincode::bincode_opts, version::read_version}; use libp2p_identity::PeerId; @@ -41,12 +38,13 @@ use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder}; use libp2p_networking::{ network::{ + behaviours::request_response::{Request, Response}, spawn_network_node, NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, NetworkNodeConfig, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, NetworkNodeType, }, - reexport::Multiaddr, + reexport::{Multiaddr, ResponseChannel}, }; use serde::Serialize; @@ -55,8 +53,9 @@ use snafu::ResultExt; use std::{collections::HashSet, num::NonZeroUsize, str::FromStr}; use futures::{ + channel::mpsc::{self, channel, Receiver, Sender}, future::{join_all, Either}, - FutureExt, + FutureExt, StreamExt, }; use std::{ collections::BTreeSet, @@ -96,6 +95,9 @@ impl Debug for Libp2pNetwork { } } +/// Locked Option of a receiver for moving the value out of the option +type TakeReceiver = Mutex)>>>; + /// Type alias for a shared collection of peerid, multiaddrs pub type PeerInfoVec = Arc, Multiaddr)>>>; @@ -106,8 +108,12 @@ struct Libp2pNetworkInner { pk: K, /// handle to control the network handle: Arc, - /// map of known replica peer ids to public keys + /// Message Receiver receiver: UnboundedReceiver, + /// Receiver for Requests for Data, includes the request and the response chan + /// Lock should only be used once to take the channel and move it into the request + /// handler task + requests_rx: TakeReceiver, /// Sender for broadcast messages sender: UnboundedSender, /// Sender for node lookup (relevant view number, key of node) (None for shutdown) @@ -139,7 +145,7 @@ struct Libp2pNetworkInner { /// if we're a member of the DA committee or not is_da: bool, /// Killswitch sender - kill_switch: Sender<()>, + kill_switch: channel::Sender<()>, } /// Networking implementation that uses libp2p @@ -357,6 +363,7 @@ impl Libp2pNetwork { // unbounded channels may not be the best choice (spammed?) // if bounded figure out a way to log dropped msgs let (sender, receiver) = unbounded(); + let (requests_tx, requests_rx) = channel(100); let (node_lookup_send, node_lookup_recv) = unbounded(); let (kill_tx, kill_rx) = bounded(1); rx.set_kill_switch(kill_rx); @@ -365,6 +372,7 @@ impl Libp2pNetwork { inner: Arc::new(Libp2pNetworkInner { handle: Arc::new(network_handle), receiver, + requests_rx: Mutex::new(Some(requests_rx)), sender: sender.clone(), pk, bootstrap_addrs_len, @@ -388,7 +396,7 @@ impl Libp2pNetwork { }), }; - result.handle_event_generator(sender, rx); + result.handle_event_generator(sender, requests_tx, rx); result.spawn_node_lookup(node_lookup_recv); result.spawn_connect(id); @@ -520,6 +528,7 @@ impl Libp2pNetwork { &self, msg: NetworkEvent, sender: &UnboundedSender, + mut request_tx: Sender<(M, ResponseChannel)>, ) -> Result<(), NetworkError> { match msg { GossipMsg(msg) => { @@ -564,6 +573,14 @@ impl Libp2pNetwork { NetworkEvent::IsBootstrapped => { error!("handle_recvd_events_0_1 received `NetworkEvent::IsBootstrapped`, which should be impossible."); } + NetworkEvent::ResponseRequested(msg, chan) => { + let reqeust = bincode_opts() + .deserialize(&msg.0) + .context(FailedToSerializeSnafu)?; + request_tx + .try_send((reqeust, chan)) + .map_err(|_| NetworkError::ChannelSend)?; + } } Ok::<(), NetworkError>(()) } @@ -573,6 +590,7 @@ impl Libp2pNetwork { fn handle_event_generator( &self, sender: UnboundedSender, + request_tx: Sender<(M, ResponseChannel)>, mut network_rx: NetworkNodeReceiver, ) { let handle = self.clone(); @@ -595,12 +613,20 @@ impl Libp2pNetwork { NetworkEvent::IsBootstrapped => { is_bootstrapped.store(true, Ordering::Relaxed); } - GossipMsg(raw) | DirectRequest(raw, _, _) | DirectResponse(raw, _) => { + GossipMsg(raw) + | DirectRequest(raw, _, _) + | DirectResponse(raw, _) + | NetworkEvent::ResponseRequested(Request(raw), _) => { let message_version = read_version(raw); match message_version { Some(VERSION_0_1) => { - let _ = - handle.handle_recvd_events_0_1(message, &sender).await; + let _ = handle + .handle_recvd_events_0_1( + message, + &sender, + request_tx.clone(), + ) + .await; } Some(version) => { warn!( @@ -638,6 +664,69 @@ impl Libp2pNetwork { #[async_trait] impl ConnectedNetwork for Libp2pNetwork { + async fn request_data( + &self, + request: M, + recipient: K, + ) -> Result, NetworkError> { + self.wait_for_ready().await; + + let pid = match self + .inner + .handle + .lookup_node::(recipient.clone(), self.inner.dht_timeout) + .await + { + Ok(pid) => pid, + Err(err) => { + self.inner.metrics.message_failed_to_send.add(1); + error!( + "Failed to message {:?} because could not find recipient peer id for pk {:?}", + request, recipient + ); + return Err(NetworkError::Libp2p { source: err }); + } + }; + match self.inner.handle.request_data(&request, pid).await { + Ok(response) => match response { + Some(msg) => { + let res = bincode_opts() + .deserialize(&msg.0) + .map_err(|e| NetworkError::FailedToDeserialize { source: e })?; + Ok(ResponseMessage::Found(res)) + } + None => Ok(ResponseMessage::NotFound), + }, + Err(e) => Err(e.into()), + } + } + + async fn spawn_request_receiver_task( + &self, + ) -> Option)>> { + let Some(mut internal_rx) = self.inner.requests_rx.lock().await.take() else { + return None; + }; + let handle = self.inner.handle.clone(); + let (mut tx, rx) = mpsc::channel(100); + async_spawn(async move { + while let Some((request, chan)) = internal_rx.next().await { + let (response_tx, response_rx) = futures::channel::oneshot::channel(); + if tx + .try_send((request, network::ResponseChannel(response_tx))) + .is_err() + { + continue; + } + let Ok(response) = response_rx.await else { + continue; + }; + let _ = handle.respond_data(&response, chan).await; + } + }); + + Some(rx) + } #[instrument(name = "Libp2pNetwork::ready_blocking", skip_all)] async fn wait_for_ready(&self) { self.wait_for_ready().await; diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index d3ac0fc1ed..e8dd1d4ebc 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -27,6 +27,7 @@ libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } rand = { workspace = true } serde = { workspace = true } +serde_bytes = { workspace = true } serde_json = { workspace = true } snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ diff --git a/libp2p-networking/src/lib.rs b/libp2p-networking/src/lib.rs index 0bc4daf573..b536cb49f3 100644 --- a/libp2p-networking/src/lib.rs +++ b/libp2p-networking/src/lib.rs @@ -5,6 +5,7 @@ pub mod network; /// symbols needed to implement a networking instance over libp2p-netorking pub mod reexport { + pub use libp2p::request_response::ResponseChannel; pub use libp2p::Multiaddr; pub use libp2p_identity::PeerId; } diff --git a/libp2p-networking/src/network/behaviours/mod.rs b/libp2p-networking/src/network/behaviours/mod.rs index a093b22f9e..18b7504acf 100644 --- a/libp2p-networking/src/network/behaviours/mod.rs +++ b/libp2p-networking/src/network/behaviours/mod.rs @@ -6,3 +6,6 @@ pub mod exponential_backoff; /// Wrapper around Kademlia pub mod dht; + +/// Request Response Handling for data requests +pub mod request_response; diff --git a/libp2p-networking/src/network/behaviours/request_response.rs b/libp2p-networking/src/network/behaviours/request_response.rs new file mode 100644 index 0000000000..8b3887e13c --- /dev/null +++ b/libp2p-networking/src/network/behaviours/request_response.rs @@ -0,0 +1,76 @@ +use std::collections::HashMap; + +use futures::channel::oneshot::Sender; +use libp2p::request_response::{Message, OutboundRequestId}; +use serde::{Deserialize, Serialize}; + +use crate::network::NetworkEvent; + +/// Request for Consenus data +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Request(#[serde(with = "serde_bytes")] pub Vec); + +/// Response for some VID data that we already collected +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Response( + /// Data + #[serde(with = "serde_bytes")] + pub Vec, +); + +#[derive(Default, Debug)] +/// Handler for request response messages +pub(crate) struct RequestResponseState { + /// Map requests to the their response channels + request_map: HashMap>>, +} + +impl RequestResponseState { + /// Handles messages from the `request_response` behaviour by sending them to the application + pub fn handle_request_response( + &mut self, + event: libp2p::request_response::Event, + ) -> Option { + match event { + libp2p::request_response::Event::Message { peer: _, message } => match message { + Message::Request { + request_id: _, + request, + channel, + } => Some(NetworkEvent::ResponseRequested(request, channel)), + Message::Response { + request_id, + response, + } => { + let Some(chan) = self.request_map.remove(&request_id) else { + return None; + }; + if chan.send(Some(response)).is_err() { + tracing::warn!("Failed to send resonse to client, channel closed."); + } + None + } + }, + libp2p::request_response::Event::OutboundFailure { + peer: _, + request_id, + error, + } => { + tracing::warn!("Error Sending Request {:?}", error); + let Some(chan) = self.request_map.remove(&request_id) else { + return None; + }; + if chan.send(None).is_err() { + tracing::warn!("Failed to send resonse to client, channel closed."); + } + None + } + libp2p::request_response::Event::InboundFailure { .. } + | libp2p::request_response::Event::ResponseSent { .. } => None, + } + } + /// Add a requests return channel to the map of pending requests + pub fn add_request(&mut self, id: OutboundRequestId, chan: Sender>) { + self.request_map.insert(id, chan); + } +} diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 80223990e3..9ae2f70d75 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -2,7 +2,7 @@ use futures::channel::oneshot::Sender; use libp2p::{ gossipsub::{Behaviour as GossipBehaviour, Event as GossipEvent, IdentTopic}, identify::{Behaviour as IdentifyBehaviour, Event as IdentifyEvent}, - request_response::ResponseChannel, + request_response::{cbor, ResponseChannel}, Multiaddr, }; use libp2p_identity::PeerId; @@ -14,6 +14,7 @@ use super::{ dht::{DHTBehaviour, DHTEvent, KadPutQuery}, direct_message::{DMBehaviour, DMEvent, DMRequest}, exponential_backoff::ExponentialBackoff, + request_response::{Request, Response}, }, NetworkEventInternal, }; @@ -46,7 +47,11 @@ pub struct NetworkDef { /// purpose: directly messaging peer #[debug(skip)] - pub request_response: DMBehaviour, + pub direct_message: DMBehaviour, + + /// Behaviour for requesting and receiving data + #[debug(skip)] + pub request_response: libp2p::request_response::cbor::Behaviour, } impl NetworkDef { @@ -56,12 +61,14 @@ impl NetworkDef { gossipsub: GossipBehaviour, dht: DHTBehaviour, identify: IdentifyBehaviour, - request_response: DMBehaviour, + direct_message: DMBehaviour, + request_response: cbor::Behaviour, ) -> NetworkDef { Self { gossipsub, dht, identify, + direct_message, request_response, } } @@ -144,12 +151,12 @@ impl NetworkDef { backoff: ExponentialBackoff::default(), retry_count, }; - self.request_response.add_direct_request(request); + self.direct_message.add_direct_request(request); } /// Add a direct response for a channel pub fn add_direct_response(&mut self, chan: ResponseChannel>, msg: Vec) { - self.request_response.add_direct_response(chan, msg); + self.direct_message.add_direct_response(chan, msg); } } @@ -176,3 +183,9 @@ impl From for NetworkEventInternal { Self::IdentifyEvent(Box::new(event)) } } + +impl From> for NetworkEventInternal { + fn from(event: libp2p::request_response::Event) -> Self { + Self::RequestResponseEvent(event) + } +} diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 77f9f8f92c..b614b61d35 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -17,9 +17,13 @@ pub use self::{ }, }; -use self::behaviours::{dht::DHTEvent, direct_message::DMEvent}; +use self::behaviours::{ + dht::DHTEvent, + direct_message::DMEvent, + request_response::{Request, Response}, +}; use bincode::Options; -use futures::channel::oneshot::Sender; +use futures::channel::oneshot::{self, Sender}; use hotshot_utils::bincode::bincode_opts; use libp2p::{ build_multiaddr, @@ -122,6 +126,22 @@ pub enum ClientRequest { }, /// client request to send a direct reply to a message DirectResponse(ResponseChannel>, Vec), + /// request for data from another peer + DataRequest { + /// request sent on wire + request: Request, + /// Peer to try sending the request to + peer: PeerId, + /// Send back request ID to client + chan: oneshot::Sender>, + }, + /// Respond with some data to another peer + DataResponse { + /// Data + response: Response, + /// Send back channel + chan: ResponseChannel, + }, /// prune a peer Prune(PeerId), /// add vec of known peers or addresses @@ -168,6 +188,8 @@ pub enum NetworkEvent { DirectRequest(Vec, PeerId, ResponseChannel>), /// Recv-ed a direct response from a node (that hopefully was initiated by this node) DirectResponse(Vec, PeerId), + /// A peer is asking us for data + ResponseRequested(Request, ResponseChannel), /// Report that kademlia has successfully bootstrapped into the network IsBootstrapped, } @@ -185,6 +207,8 @@ pub enum NetworkEventInternal { GossipEvent(Box), /// a direct message event DMEvent(DMEvent), + /// a request response event + RequestResponseEvent(libp2p::request_response::Event), } /// Bind all interfaces on port `port` diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index a77ae15b32..f1724f3900 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -25,6 +25,7 @@ use crate::network::behaviours::{ dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, direct_message::{DMBehaviour, DMEvent}, exponential_backoff::ExponentialBackoff, + request_response::{Request, RequestResponseState, Response}, }; use async_compatibility_layer::{ art::async_spawn, @@ -84,6 +85,8 @@ pub struct NetworkNode { config: NetworkNodeConfig, /// the listener id we are listening on, if it exists listener_id: Option, + /// Handler for requests and response behavior events. + request_response_state: RequestResponseState, } impl NetworkNode { @@ -269,14 +272,23 @@ impl NetworkNode { let rrconfig = RequestResponseConfig::default(); - let request_response: libp2p::request_response::cbor::Behaviour, Vec> = + let direct_message: libp2p::request_response::cbor::Behaviour, Vec> = + RequestResponse::new( + [( + StreamProtocol::new("/HotShot/direct_message/1.0"), + ProtocolSupport::Full, + )] + .into_iter(), + rrconfig.clone(), + ); + let request_response: libp2p::request_response::cbor::Behaviour = RequestResponse::new( [( StreamProtocol::new("/HotShot/request_response/1.0"), ProtocolSupport::Full, )] .into_iter(), - rrconfig, + rrconfig.clone(), ); let network = NetworkDef::new( @@ -289,7 +301,8 @@ impl NetworkNode { .unwrap_or_else(|| NonZeroUsize::new(4).unwrap()), ), identify, - DMBehaviour::new(request_response), + DMBehaviour::new(direct_message), + request_response, ); // build swarm @@ -320,6 +333,7 @@ impl NetworkNode { swarm, config, listener_id: None, + request_response_state: RequestResponseState::default(), }) } @@ -424,6 +438,23 @@ impl NetworkNode { ClientRequest::DirectResponse(chan, msg) => { behaviour.add_direct_response(chan, msg); } + ClientRequest::DataRequest { + request, + peer, + chan, + } => { + let id = behaviour.request_response.send_request(&peer, request); + self.request_response_state.add_request(id, chan); + } + ClientRequest::DataResponse { response, chan } => { + if behaviour + .request_response + .send_response(chan, response) + .is_err() + { + info!("Data Response dropped because response peer disconnected"); + } + } ClientRequest::AddKnownPeers(peers) => { self.add_known_peers(&peers); } @@ -580,6 +611,9 @@ impl NetworkNode { NetworkEvent::DirectResponse(data, pid) } }), + NetworkEventInternal::RequestResponseEvent(e) => { + self.request_response_state.handle_request_response(e) + } }; if let Some(event) = maybe_event { diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index d3fdb526dd..2dd7c5542c 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -1,12 +1,16 @@ use crate::network::{ - error::DHTError, gen_multiaddr, ClientRequest, NetworkError, NetworkEvent, NetworkNode, - NetworkNodeConfig, NetworkNodeConfigBuilderError, + behaviours::request_response::{Request, Response}, + error::{CancelledRequestSnafu, DHTError}, + gen_multiaddr, ClientRequest, NetworkError, NetworkEvent, NetworkNode, NetworkNodeConfig, + NetworkNodeConfigBuilderError, }; use async_compatibility_layer::{ art::{async_sleep, async_timeout, future::to}, channel::{Receiver, SendError, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, }; use bincode::Options; +use futures::channel::oneshot; + use hotshot_utils::bincode::bincode_opts; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; @@ -172,6 +176,51 @@ impl NetworkNodeHandle { } Ok(()) } + + /// Request another peer for some data we want. Returns the id of the request + /// + /// # Errors + /// + /// Will retrun a networking error if the channel closes before the result + /// can be sent back + pub async fn request_data( + &self, + request: &impl Serialize, + peer: PeerId, + ) -> Result, NetworkNodeHandleError> { + let (tx, rx) = oneshot::channel(); + let serialized_msg = bincode_opts() + .serialize(request) + .context(SerializationSnafu)?; + let req = ClientRequest::DataRequest { + request: Request(serialized_msg), + peer, + chan: tx, + }; + + self.send_request(req).await?; + + rx.await.map_err(|_| NetworkNodeHandleError::RecvError) + } + + /// Send a response to a request with the response channel + /// # Errors + /// Will error if the client request channel is closed, or serialization fails. + pub async fn respond_data( + &self, + response: &impl Serialize, + chan: ResponseChannel, + ) -> Result<(), NetworkNodeHandleError> { + let serialized_msg = bincode_opts() + .serialize(response) + .context(SerializationSnafu)?; + let req = ClientRequest::DataResponse { + response: Response(serialized_msg), + chan, + }; + self.send_request(req).await + } + /// Look up a peer's addresses in kademlia /// NOTE: this should always be called before any `request_response` is initiated /// # Errors @@ -209,8 +258,6 @@ impl NetworkNodeHandle { key: &impl Serialize, value: &impl Serialize, ) -> Result<(), NetworkNodeHandleError> { - use crate::network::error::CancelledRequestSnafu; - let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::PutDHT { key: bincode_opts().serialize(key).context(SerializationSnafu)?, @@ -236,8 +283,6 @@ impl NetworkNodeHandle { key: &impl Serialize, retry_count: u8, ) -> Result { - use crate::network::error::CancelledRequestSnafu; - let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetDHT { key: bincode_opts().serialize(key).context(SerializationSnafu)?, diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index d701a575b2..112c6595d8 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -72,7 +72,7 @@ pub async fn counter_handle_network_event( use CounterMessage::*; use NetworkEvent::*; match event { - IsBootstrapped => {} + IsBootstrapped | NetworkEvent::ResponseRequested(..) => {} GossipMsg(m) | DirectResponse(m, _) => { if let Ok(msg) = bincode_opts().deserialize::(&m) { match msg { diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index fd64c493b9..b3fb049544 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -12,7 +12,8 @@ use hotshot_task::task::{Task, TaskState}; use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ message::{ - CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, SequencingMessage, + CommitteeConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, + SequencingMessage, }, traits::{ election::Membership, @@ -21,8 +22,8 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; -use tracing::error; use tracing::instrument; +use tracing::{error, warn}; /// quorum filter pub fn quorum_filter(event: &HotShotEvent) -> bool { @@ -170,6 +171,9 @@ impl NetworkMessageTaskState { hotshot_types::message::DataMessage::SubmitTransaction(transaction, _) => { transactions.push(transaction); } + DataMessage::DataResponse(_) | DataMessage::RequestData(_) => { + warn!("Request and Response messages should not be received in the NetworkMessage task"); + } }, }; } diff --git a/types/Cargo.toml b/types/Cargo.toml index e57b807bc7..8c4964884a 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -28,6 +28,7 @@ digest = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } either = { workspace = true, features = ["serde"] } +futures = { workspace = true } espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } generic-array = { workspace = true } diff --git a/types/src/message.rs b/types/src/message.rs index b71d2c8c89..47ab483fbe 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -12,14 +12,15 @@ use crate::simple_vote::{ DAVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }; +use crate::traits::network::ResponseMessage; use crate::traits::signature_key::SignatureKey; use crate::vote::HasViewNumber; use crate::{ data::{DAProposal, VidDisperse}, simple_vote::QuorumVote, traits::{ - network::{NetworkMsg, ViewMessage}, - node_implementation::NodeType, + network::{DataRequest, NetworkMsg, ViewMessage}, + node_implementation::{ConsensusTime, NodeType}, }, }; @@ -119,15 +120,18 @@ impl ViewMessage for MessageKind { match &self { MessageKind::Consensus(message) => message.view_number(), MessageKind::Data(DataMessage::SubmitTransaction(_, v)) => *v, + MessageKind::Data(DataMessage::RequestData(msg)) => msg.view, + MessageKind::Data(DataMessage::DataResponse(msg)) => match msg { + ResponseMessage::Found(m) => m.view_number(), + ResponseMessage::NotFound => TYPES::Time::new(1), + }, } } fn purpose(&self) -> MessagePurpose { match &self { MessageKind::Consensus(message) => message.purpose(), - MessageKind::Data(message) => match message { - DataMessage::SubmitTransaction(_, _) => MessagePurpose::Data, - }, + MessageKind::Data(_) => MessagePurpose::Data, } } } @@ -296,6 +300,10 @@ pub enum DataMessage { /// TODO rethink this when we start to send these messages /// we only need the view number for broadcast SubmitTransaction(TYPES::Transaction, TYPES::Time), + /// A request for data + RequestData(DataRequest), + /// A response to a data request + DataResponse(ResponseMessage), } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 549ae5ece9..ab49bb282a 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -5,14 +5,21 @@ use async_compatibility_layer::art::async_sleep; #[cfg(async_executor_impl = "async-std")] use async_std::future::TimeoutError; +use derivative::Derivative; use dyn_clone::DynClone; +use futures::channel::{mpsc, oneshot}; use libp2p_networking::network::NetworkNodeHandleError; #[cfg(async_executor_impl = "tokio")] use tokio::time::error::Elapsed as TimeoutError; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use super::{node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{data::ViewNumber, message::MessagePurpose, BoxSyncFuture}; +use crate::{ + data::ViewNumber, + message::{MessagePurpose, SequencingMessage}, + vid::VidCommitment, + BoxSyncFuture, +}; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; use rand::{ @@ -21,7 +28,7 @@ use rand::{ }; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::{collections::BTreeSet, fmt::Debug, sync::Arc, time::Duration}; +use std::{collections::BTreeSet, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; impl From for NetworkError { fn from(error: NetworkNodeHandleError) -> Self { @@ -138,6 +145,8 @@ pub enum NetworkError { ShutDown, /// unable to cancel a request, the request has already been cancelled UnableToCancel, + /// The requested data was not found + NotFound, } #[derive(Clone, Debug)] @@ -218,6 +227,8 @@ pub trait NetworkMsg: { } +/// Trait that bundles what we need from a request ID +pub trait Id: Eq + PartialEq + Hash {} impl NetworkMsg for Vec {} /// a message @@ -229,6 +240,41 @@ pub trait ViewMessage { fn purpose(&self) -> MessagePurpose; } +/// Wraps a oneshot channel for responding to requests +pub struct ResponseChannel(pub oneshot::Sender); + +/// A request for some data that the consensus layer is asking for. +#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct DataRequest { + /// Hotshot key of who to send the request to + pub recipient: TYPES::SignatureKey, + /// Request + pub request: RequestKind, + /// View this message is for + pub view: TYPES::Time, +} + +/// Underlying data request +#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +pub enum RequestKind { + /// Request VID data by our key and the VID commitment + VID(VidCommitment, TYPES::SignatureKey), + /// Request a DA proposal for a certain view + DAProposal(TYPES::Time), +} + +/// A resopnse for a request. `SequencingMessage` is the same as other network messages +/// The kind of message `M` is is determined by what we requested +#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub enum ResponseMessage { + /// Peer returned us some data + Found(SequencingMessage), + /// Peer failed to get us data + NotFound, +} + /// represents a networking implmentration /// exposes low level API for interacting with a network /// intended to be implemented for libp2p, the centralized server, @@ -283,6 +329,26 @@ pub trait ConnectedNetwork: /// If there is a network-related failure. async fn recv_msgs(&self) -> Result, NetworkError>; + /// Ask request the network for some data. Returns the request ID for that data, + /// The ID returned can be used for cancelling the request + async fn request_data( + &self, + _request: M, + _recipient: K, + ) -> Result, NetworkError> { + Err(NetworkError::UnimplementedFeature) + } + + /// Spawn a request task in the given network layer. If it supports + /// Request and responses it will return the receiving end of a channel. + /// Requests the network receives will be sent over this channel along + /// with a return channel to send the response back to. + /// + /// Returns `None`` if network does not support handling requests + async fn spawn_request_receiver_task(&self) -> Option)>> { + None + } + /// queues lookup of a node async fn queue_node_lookup( &self, From 2e3114934ae7fbee0d3cb68e982cc6b65391630b Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 11 Mar 2024 17:32:49 +0000 Subject: [PATCH 0842/1393] [NETWORKING] Fixes for Combined Network (#2743) * select! in recv * Some combined fixes * Swap custom cache for LruCache * Remove one exprot --- hotshot/src/traits.rs | 4 +- .../src/traits/networking/combined_network.rs | 159 +++++------------- task-impls/src/network.rs | 2 +- testing/tests/combined_network.rs | 39 ----- types/src/traits/network.rs | 2 +- 5 files changed, 47 insertions(+), 159 deletions(-) diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 48a2669493..150d2bf015 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -13,9 +13,7 @@ pub use storage::{Result as StorageResult, Storage}; pub mod implementations { pub use super::{ networking::{ - combined_network::{ - calculate_hash_of, Cache, CombinedNetworks, UnderlyingCombinedNetworks, - }, + combined_network::{CombinedNetworks, UnderlyingCombinedNetworks}, libp2p_network::{Libp2pNetwork, PeerInfoVec}, memory_network::{MasterMap, MemoryNetwork}, web_server_network::WebServerNetwork, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 574725aebb..f0bef0e2c3 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -7,16 +7,18 @@ use hotshot_constants::{ COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, }; +use lru::LruCache; use std::{ - collections::{BTreeSet, HashSet}, + collections::BTreeSet, hash::Hasher, + num::NonZeroUsize, sync::atomic::{AtomicU64, Ordering}, }; use tracing::warn; use async_trait::async_trait; -use futures::{channel::mpsc, join}; +use futures::{channel::mpsc, join, select, FutureExt}; use async_compatibility_layer::channel::UnboundedSendError; #[cfg(feature = "hotshot-testing")] @@ -49,67 +51,6 @@ use std::time::Duration; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -/// A cache to keep track of the last n messages we've seen, avoids reprocessing duplicates -/// from multiple networks -#[derive(Clone, Debug)] -pub struct Cache { - /// The maximum number of items to store in the cache - capacity: usize, - /// The cache itself - inner: HashSet, - /// The hashes of the messages in the cache, in order of insertion - hashes: Vec, -} - -impl Cache { - /// Create a new cache with the given capacity - #[must_use] - pub fn new(capacity: usize) -> Self { - Self { - capacity, - inner: HashSet::with_capacity(capacity), - hashes: Vec::with_capacity(capacity), - } - } - - /// Insert a hash into the cache - pub fn insert(&mut self, hash: u64) { - if self.inner.contains(&hash) { - return; - } - - // calculate how much we are over and remove that many elements from the cache. deal with overflow - let over = (self.hashes.len() + 1).saturating_sub(self.capacity); - if over > 0 { - for _ in 0..over { - let hash = self.hashes.remove(0); - self.inner.remove(&hash); - } - } - - self.inner.insert(hash); - self.hashes.push(hash); - } - - /// Check if the cache contains a hash - #[must_use] - pub fn contains(&self, hash: u64) -> bool { - self.inner.contains(&hash) - } - - /// Get the number of items in the cache - #[must_use] - pub fn len(&self) -> usize { - self.inner.len() - } - - /// True if the cache is empty false otherwise - #[must_use] - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } -} - /// Helper function to calculate a hash of a type that implements Hash pub fn calculate_hash_of(t: &T) -> u64 { let mut s = DefaultHasher::new(); @@ -128,7 +69,7 @@ pub struct CombinedNetworks { networks: Arc>, /// Last n seen messages to prevent processing duplicates - message_cache: Arc>, + message_cache: Arc>>, /// If the primary network is down (0) or not, and for how many messages primary_down: Arc, @@ -142,11 +83,17 @@ pub struct CombinedNetworks { impl CombinedNetworks { /// Constructor + /// + /// # Panics + /// + /// Panics if `COMBINED_NETWORK_CACHE_SIZE` is 0 #[must_use] pub fn new(networks: Arc>) -> Self { Self { networks, - message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), + message_cache: Arc::new(RwLock::new(LruCache::new( + NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), + ))), primary_down: Arc::new(AtomicU64::new(0)), delayed_tasks: Arc::default(), delay_duration: Arc::new(RwLock::new(Duration::from_millis(1000))), @@ -276,14 +223,18 @@ impl TestableNetworkingImplementation for CombinedNetwor ); let quorum_net = Self { networks: Arc::new(quorum_networks), - message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), + message_cache: Arc::new(RwLock::new(LruCache::new( + NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), + ))), primary_down: Arc::new(AtomicU64::new(0)), delayed_tasks: Arc::default(), delay_duration: Arc::new(RwLock::new(Duration::from_millis(1000))), }; let da_net = Self { networks: Arc::new(da_networks), - message_cache: Arc::new(RwLock::new(Cache::new(COMBINED_NETWORK_CACHE_SIZE))), + message_cache: Arc::new(RwLock::new(LruCache::new( + NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), + ))), primary_down: Arc::new(AtomicU64::new(0)), delayed_tasks: Arc::default(), delay_duration: Arc::new(RwLock::new(Duration::from_millis(1000))), @@ -429,25 +380,28 @@ impl ConnectedNetwork, TYPES::SignatureKey> async fn recv_msgs(&self) -> Result>, NetworkError> { // recv on both networks because nodes may be accessible only on either. discard duplicates // TODO: improve this algorithm: https://github.com/EspressoSystems/HotShot/issues/2089 - let mut primary_msgs = self.primary().recv_msgs().await?; - let mut secondary_msgs = self.secondary().recv_msgs().await?; + let mut primary_fut = self.primary().recv_msgs().fuse(); + let mut secondary_fut = self.secondary().recv_msgs().fuse(); - primary_msgs.append(secondary_msgs.as_mut()); + let msgs = select! { + p = primary_fut => p?, + s = secondary_fut => s?, + }; - let mut filtered_msgs = Vec::with_capacity(primary_msgs.len()); - for msg in primary_msgs { + let mut filtered_msgs = Vec::with_capacity(msgs.len()); + for msg in msgs { // see if we've already seen this message if !self .message_cache .read() .await - .contains(calculate_hash_of(&msg)) + .contains(&calculate_hash_of(&msg)) { filtered_msgs.push(msg.clone()); self.message_cache .write() .await - .insert(calculate_hash_of(&msg)); + .put(calculate_hash_of(&msg), ()); } } @@ -473,51 +427,26 @@ impl ConnectedNetwork, TYPES::SignatureKey> inject_consensus_info(self.secondary(), event).await; } - async fn update_view(&self, view: &u64) { - let mut cancel_tasks = Vec::new(); - { - let mut map_lock = self.delayed_tasks.write().await; - while let Some((first_view, _tasks)) = map_lock.first_key_value() { - if first_view < view { - if let Some((_view, tasks)) = map_lock.pop_first() { - let mut ctasks = tasks.into_iter().map(cancel_task).collect(); - cancel_tasks.append(&mut ctasks); + fn update_view(&self, view: u64) { + let delayed_map = self.delayed_tasks.clone(); + async_spawn(async move { + let mut cancel_tasks = Vec::new(); + { + let mut map_lock = delayed_map.write().await; + while let Some((first_view, _tasks)) = map_lock.first_key_value() { + if *first_view < view { + if let Some((_view, tasks)) = map_lock.pop_first() { + let mut ctasks = tasks.into_iter().map(cancel_task).collect(); + cancel_tasks.append(&mut ctasks); + } else { + break; + } } else { break; } - } else { - break; } } - } - join_all(cancel_tasks).await; - } -} - -#[cfg(test)] -mod test { - use super::*; - use tracing::instrument; - - /// cache eviction test - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[instrument] - async fn test_cache_eviction() { - let mut cache = Cache::new(3); - cache.insert(1); - cache.insert(2); - cache.insert(3); - cache.insert(4); - assert_eq!(cache.inner.len(), 3); - assert_eq!(cache.hashes.len(), 3); - assert!(!cache.inner.contains(&1)); - assert!(cache.inner.contains(&2)); - assert!(cache.inner.contains(&3)); - assert!(cache.inner.contains(&4)); - assert!(!cache.hashes.contains(&1)); - assert!(cache.hashes.contains(&2)); - assert!(cache.hashes.contains(&3)); - assert!(cache.hashes.contains(&4)); + join_all(cancel_tasks).await; + }); } } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index b3fb049544..a31ca28682 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -358,7 +358,7 @@ impl, TYPES::Signa ), HotShotEvent::ViewChange(view) => { self.view = view; - self.channel.update_view(&self.view.get_u64()).await; + self.channel.update_view(self.view.get_u64()); return None; } HotShotEvent::Shutdown => { diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index 46882e5b13..fd2c973e8b 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -10,45 +10,6 @@ use hotshot_testing::{ use rand::Rng; use tracing::instrument; -use hotshot::traits::implementations::{calculate_hash_of, Cache}; -use hotshot_example_types::block_types::TestTransaction; - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_hash_calculation() { - let message1 = TestTransaction(vec![0; 32]); - let message2 = TestTransaction(vec![1; 32]); - - assert_eq!(calculate_hash_of(&message1), calculate_hash_of(&message1)); - assert_ne!(calculate_hash_of(&message1), calculate_hash_of(&message2)); -} - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_cache_integrity() { - let message1 = TestTransaction(vec![0; 32]); - let message2 = TestTransaction(vec![1; 32]); - - let mut cache = Cache::new(3); - - // test insertion integrity - cache.insert(calculate_hash_of(&message1)); - cache.insert(calculate_hash_of(&message2)); - - assert!(cache.contains(calculate_hash_of(&message1))); - assert!(cache.contains(calculate_hash_of(&message2))); - - // check that the cache is not modified on duplicate entries - cache.insert(calculate_hash_of(&message1)); - assert!(cache.contains(calculate_hash_of(&message1))); - assert!(cache.contains(calculate_hash_of(&message2))); - assert_eq!(cache.len(), 2); -} - /// A run with both the webserver and libp2p functioning properly #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index ab49bb282a..f3fec1c451 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -364,7 +364,7 @@ pub trait ConnectedNetwork: async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} /// handles view update - async fn update_view(&self, _view: &u64) {} + fn update_view(&self, _view: u64) {} } /// Describes additional functionality needed by the test network implementation From b0886ebbc70cd851c9c8165729fe0a946b511ca6 Mon Sep 17 00:00:00 2001 From: Himanshu Goyal Date: Mon, 11 Mar 2024 13:48:42 -0400 Subject: [PATCH 0843/1393] [Builder] Hotshot events for non-staking nodes (#2721) * add non_staking fields * make non-stakes fiels Option * add builder info into Hotshot config * remove option and rename * fix testnode * remove option * remove unwrap * fix test_builder * fix task_helper * fix test runner * filter based on stake values * replace total_nodes with num_nodes_with_stake * split da committee size * fix remaining da size split * fix lint * add separate get commitee utils * fix already pub keys for non-staking * use split get_committe * accomdate changes --- examples/combined/all.rs | 2 +- examples/infra/mod.rs | 41 +++++--- examples/libp2p/all.rs | 2 +- examples/webserver/all.rs | 2 +- hotshot/Cargo.toml | 16 ++-- hotshot/src/lib.rs | 4 +- hotshot/src/tasks/mod.rs | 2 +- .../src/traits/election/static_committee.rs | 86 ++++++++++++++--- orchestrator/api.toml | 2 +- orchestrator/src/config.rs | 30 ++++-- orchestrator/src/lib.rs | 14 +-- task-impls/src/da.rs | 2 +- task-impls/src/network.rs | 2 +- testing-macros/tests/tests.rs | 4 +- testing/src/task_helpers.rs | 31 ++++--- testing/src/test_builder.rs | 93 ++++++++++++------- testing/src/test_runner.rs | 17 +++- testing/tests/catchup.rs | 10 +- testing/tests/combined_network.rs | 10 +- testing/tests/libp2p.rs | 4 +- testing/tests/timeout.rs | 4 +- types/src/data.rs | 2 +- types/src/lib.rs | 13 ++- types/src/traits/election.rs | 15 ++- types/src/traits/node_implementation.rs | 11 ++- 25 files changed, 282 insertions(+), 137 deletions(-) diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 4c1605c48a..5cce2378d3 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -76,7 +76,7 @@ async fn main() { // nodes let mut nodes = Vec::new(); - for _ in 0..config.config.total_nodes.into() { + for _ in 0..config.config.num_nodes_with_stake.into() { let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { infra::main_entry_point::( diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 7ed7689637..f52fc20fe7 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -181,13 +181,16 @@ pub fn read_orchestrator_init_config() -> ( load_config_from_file::(&args.config_file); if let Some(total_nodes_string) = matches.get_one::("total_nodes") { - config.config.total_nodes = total_nodes_string.parse::().unwrap(); + config.config.num_nodes_with_stake = total_nodes_string.parse::().unwrap(); config.config.known_nodes_with_stake = - vec![PeerConfig::default(); config.config.total_nodes.get() as usize]; - error!("config.config.total_nodes: {:?}", config.config.total_nodes); + vec![PeerConfig::default(); config.config.num_nodes_with_stake.get() as usize]; + error!( + "config.config.total_nodes: {:?}", + config.config.num_nodes_with_stake + ); } if let Some(da_committee_size_string) = matches.get_one::("da_committee_size") { - config.config.da_committee_size = da_committee_size_string.parse::().unwrap(); + config.config.da_staked_committee_size = da_committee_size_string.parse::().unwrap(); } if let Some(transactions_per_round_string) = matches.get_one::("transactions_per_round") { @@ -238,7 +241,7 @@ pub fn load_config_from_file( ValidatorConfig::generated_from_seed_indexed(config.seed, config.node_index, 1); // initialize it with size for better assignment of peers' config config.config.known_nodes_with_stake = - vec![PeerConfig::default(); config.config.total_nodes.get() as usize]; + vec![PeerConfig::default(); config.config.num_nodes_with_stake.get() as usize]; config } @@ -345,8 +348,8 @@ async fn libp2p_network_from_config( // generate network let mut config_builder = NetworkNodeConfigBuilder::default(); - assert!(config.config.total_nodes.get() > 2); - let replicated_nodes = NonZeroUsize::new(config.config.total_nodes.get() - 2).unwrap(); + assert!(config.config.num_nodes_with_stake.get() > 2); + let replicated_nodes = NonZeroUsize::new(config.config.num_nodes_with_stake.get() - 2).unwrap(); config_builder.replication_factor(replicated_nodes); config_builder.identity(identity.clone()); @@ -380,10 +383,10 @@ async fn libp2p_network_from_config( let mut all_keys = BTreeSet::new(); let mut da_keys = BTreeSet::new(); - for i in 0..config.config.total_nodes.get() as u64 { + for i in 0..config.config.num_nodes_with_stake.get() as u64 { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; let pub_key = TYPES::SignatureKey::from_private(&privkey); - if i < config.config.da_committee_size as u64 { + if i < config.config.da_staked_committee_size as u64 { da_keys.insert(pub_key.clone()); } all_keys.insert(pub_key); @@ -458,11 +461,15 @@ pub trait RunDA< // Since we do not currently pass the election config type in the NetworkConfig, this will always be the default election config let quorum_election_config = config.config.election_config.clone().unwrap_or_else(|| { - TYPES::Membership::default_election_config(config.config.total_nodes.get() as u64) + TYPES::Membership::default_election_config( + config.config.num_nodes_with_stake.get() as u64, + config.config.num_nodes_without_stake as u64, + ) }); let committee_election_config = TYPES::Membership::default_election_config( - config.config.da_committee_size.try_into().unwrap(), + config.config.da_staked_committee_size.try_into().unwrap(), + config.config.num_nodes_without_stake as u64, ); let networks_bundle = Networks { quorum_network: quorum_network.clone().into(), @@ -973,13 +980,19 @@ pub async fn main_entry_point< rounds, transactions_per_round, node_index, - config: HotShotConfig { total_nodes, .. }, + config: HotShotConfig { + num_nodes_with_stake, + .. + }, .. } = run_config; let mut txn_rng = StdRng::seed_from_u64(node_index); - let transactions_to_send_per_round = - calculate_num_tx_per_round(node_index, total_nodes.get(), transactions_per_round); + let transactions_to_send_per_round = calculate_num_tx_per_round( + node_index, + num_nodes_with_stake.get(), + transactions_per_round, + ); let mut transactions = Vec::new(); for round in 0..rounds { diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index 9e6f2846fc..dbe486d07f 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -43,7 +43,7 @@ async fn main() { // nodes let mut nodes = Vec::new(); - for _ in 0..config.config.total_nodes.into() { + for _ in 0..config.config.num_nodes_with_stake.into() { let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { infra::main_entry_point::( diff --git a/examples/webserver/all.rs b/examples/webserver/all.rs index d7c2df99fe..140e514124 100644 --- a/examples/webserver/all.rs +++ b/examples/webserver/all.rs @@ -75,7 +75,7 @@ async fn main() { // multi validator run let mut nodes = Vec::new(); - for _ in 0..(config.config.total_nodes.get()) { + for _ in 0..(config.config.num_nodes_with_stake.get()) { let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { infra::main_entry_point::( diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 666c3e1be2..da8f125e0b 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -28,29 +28,29 @@ bimap = "0.6.3" bincode = { workspace = true } clap = { version = "4.5", features = ["derive", "env"], optional = true } commit = { workspace = true } -hotshot-constants = { path = "../constants" } custom_debug = { workspace = true } dashmap = "5.5.1" +derive_more = "0.99.17" either = { workspace = true } embed-doc-image = "0.1.4" +ethereum-types = { workspace = true } futures = { workspace = true } +hotshot-constants = { path = "../constants" } hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-types = { path = "../types", version = "0.1.0", default-features = false } +hotshot-task = { path = "../task" } hotshot-utils = { path = "../utils" } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } +lru = "0.12.3" +portpicker = "0.1.1" rand = { workspace = true } serde = { workspace = true, features = ["rc"] } snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } -derive_more = "0.99.17" -portpicker = "0.1.1" -lru = "0.12.3" -hotshot-task = { path = "../task" } - tracing = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] @@ -59,10 +59,10 @@ tokio = { workspace = true } async-std = { workspace = true } [dev-dependencies] -clap = { version = "4.5", features = ["derive", "env"] } -toml = { workspace = true } blake3 = { workspace = true } +clap = { version = "4.5", features = ["derive", "env"] } local-ip-address = "0.6.1" +toml = { workspace = true } [lints] workspace = true diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 6779ca77c3..a65eb869d9 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -324,7 +324,7 @@ impl> SystemContext { sender: api.public_key.clone(), kind: MessageKind::from(message), }, - da_membership.get_committee(view_number), + da_membership.get_whole_committee(view_number), ), api .send_external_event(Event { @@ -565,7 +565,7 @@ impl> ConsensusApi for SystemContextHandle { fn total_nodes(&self) -> NonZeroUsize { - self.hotshot.config.total_nodes + self.hotshot.config.num_nodes_with_stake } fn propose_min_round_time(&self) -> Duration { diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 5ef1926478..479204817d 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -125,7 +125,7 @@ pub async fn inject_consensus_polls< // With the Push CDN, we are _always_ polling for latest anyway. let is_da = consensus_state .committee_membership - .get_committee(::Time::new(0)) + .get_whole_committee(::Time::new(0)) .contains(&consensus_state.public_key); // If we are, poll for latest DA proposal. diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 2cf01a36f5..438451f575 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,19 +1,19 @@ // use ark_bls12_381::Parameters as Param381; +use ethereum_types::U256; use hotshot_types::signature_key::BLSPubKey; use hotshot_types::traits::{ election::{ElectionConfig, Membership}, node_implementation::NodeType, - signature_key::SignatureKey, + signature_key::{SignatureKey, StakeTableEntryType}, }; use hotshot_types::PeerConfig; +#[cfg(feature = "randomized-leader-election")] +use rand::{rngs::StdRng, Rng}; #[allow(deprecated)] use serde::{Deserialize, Serialize}; use std::{marker::PhantomData, num::NonZeroU64}; use tracing::debug; -#[cfg(feature = "randomized-leader-election")] -use rand::{rngs::StdRng, Rng}; - /// Dummy implementation of [`Membership`] #[derive(Clone, Debug, Eq, PartialEq, Hash)] @@ -22,6 +22,8 @@ pub struct GeneralStaticCommittee { nodes_with_stake: Vec, /// The nodes on the static committee and their stake committee_nodes_with_stake: Vec, + /// builder nodes + committee_nodes_without_stake: Vec, /// Node type phantom _type_phantom: PhantomData, } @@ -32,10 +34,15 @@ pub type StaticCommittee = GeneralStaticCommittee; impl GeneralStaticCommittee { /// Creates a new dummy elector #[must_use] - pub fn new(_nodes: &[PUBKEY], nodes_with_stake: Vec) -> Self { + pub fn new( + _nodes: &[PUBKEY], + nodes_with_stake: Vec, + nodes_without_stake: Vec, + ) -> Self { Self { nodes_with_stake: nodes_with_stake.clone(), committee_nodes_with_stake: nodes_with_stake, + committee_nodes_without_stake: nodes_without_stake, _type_phantom: PhantomData, } } @@ -45,7 +52,9 @@ impl GeneralStaticCommittee { #[derive(Default, Clone, Serialize, Deserialize, core::fmt::Debug)] pub struct StaticElectionConfig { /// Number of nodes on the committee - num_nodes: u64, + num_nodes_with_stake: u64, + /// Number of non staking nodes + num_nodes_without_stake: u64, } impl ElectionConfig for StaticElectionConfig {} @@ -95,8 +104,14 @@ where } } - fn default_election_config(num_nodes: u64) -> TYPES::ElectionConfigType { - StaticElectionConfig { num_nodes } + fn default_election_config( + num_nodes_with_stake: u64, + num_nodes_without_stake: u64, + ) -> TYPES::ElectionConfigType { + StaticElectionConfig { + num_nodes_with_stake, + num_nodes_without_stake, + } } fn create_election( @@ -107,12 +122,27 @@ where .iter() .map(|x| x.stake_table_entry.clone()) .collect(); - let mut committee_nodes_with_stake: Vec = nodes_with_stake.clone(); - debug!("Election Membership Size: {}", config.num_nodes); - committee_nodes_with_stake.truncate(config.num_nodes.try_into().unwrap()); + + let mut committee_nodes_with_stake: Vec = Vec::new(); + + let mut committee_nodes_without_stake: Vec = Vec::new(); + // filter out the committee nodes with non-zero state and zero stake + for node in &nodes_with_stake { + if node.get_stake() == U256::from(0) { + committee_nodes_without_stake.push(PUBKEY::get_public_key(node)); + } else { + committee_nodes_with_stake.push(node.clone()); + } + } + debug!("Election Membership Size: {}", config.num_nodes_with_stake); + // truncate committee_nodes_with_stake to only `num_nodes` + // since the `num_nodes_without_stake` are not part of the committee, + committee_nodes_with_stake.truncate(config.num_nodes_with_stake.try_into().unwrap()); + committee_nodes_without_stake.truncate(config.num_nodes_without_stake.try_into().unwrap()); Self { nodes_with_stake, committee_nodes_with_stake, + committee_nodes_without_stake, _type_phantom: PhantomData, } } @@ -133,7 +163,7 @@ where NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64 * 9) / 10) + 1).unwrap() } - fn get_committee( + fn get_staked_committee( &self, _view_number: ::Time, ) -> std::collections::BTreeSet<::SignatureKey> { @@ -146,4 +176,36 @@ where }) .collect() } + + fn get_non_staked_committee( + &self, + _view_number: ::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.committee_nodes_without_stake.iter().cloned().collect() + } + + fn get_whole_committee( + &self, + view_number: ::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + let mut committee = self.get_staked_committee(view_number); + committee.extend(self.get_non_staked_committee(view_number)); + committee + } +} + +impl GeneralStaticCommittee +where + TYPES: NodeType, +{ + #[allow(clippy::must_use_candidate)] + /// get the non-staked builder nodes + pub fn non_staked_nodes_count(&self) -> usize { + self.committee_nodes_without_stake.len() + } + #[allow(clippy::must_use_candidate)] + /// get all the non-staked nodes + pub fn get_non_staked_nodes(&self) -> Vec { + self.committee_nodes_without_stake.clone() + } } diff --git a/orchestrator/api.toml b/orchestrator/api.toml index 9abc5ebc5d..468137ce63 100644 --- a/orchestrator/api.toml +++ b/orchestrator/api.toml @@ -77,4 +77,4 @@ PATH = ["results"] METHOD = "POST" DOC = """ Post run results. -""" \ No newline at end of file +""" diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 954a432b18..61f17f1220 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -9,6 +9,7 @@ use std::{ num::NonZeroUsize, path::PathBuf, time::Duration, + vec, }; use std::{fs, path::Path}; use surf_disco::Url; @@ -481,16 +482,23 @@ impl From> for NetworkC #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] #[serde(bound(deserialize = ""))] pub struct HotShotConfigFile { - /// Total number of nodes in the network - pub total_nodes: NonZeroUsize, + /// Total number of staked nodes in the network + pub num_nodes_with_stake: NonZeroUsize, + /// Total number of non-staked nodes in the network + pub num_nodes_without_stake: usize, #[serde(skip)] /// My own public key, secret key, stake value pub my_own_validator_config: ValidatorConfig, #[serde(skip)] /// The known nodes' public key and stake value pub known_nodes_with_stake: Vec>, - /// Number of committee nodes - pub committee_nodes: usize, + #[serde(skip)] + /// The known non-staking nodes' + pub known_nodes_without_stake: Vec, + /// Number of staking committee nodes + pub staked_committee_nodes: usize, + /// Number of non-staking committee nodes + pub non_staked_committee_nodes: usize, /// Maximum transactions per block pub max_transactions: NonZeroUsize, /// Minimum transactions per block @@ -567,12 +575,15 @@ impl From> for HotS fn from(val: HotShotConfigFile) -> Self { HotShotConfig { execution_type: ExecutionType::Continuous, - total_nodes: val.total_nodes, + num_nodes_with_stake: val.num_nodes_with_stake, + num_nodes_without_stake: val.num_nodes_without_stake, max_transactions: val.max_transactions, min_transactions: val.min_transactions, known_nodes_with_stake: val.known_nodes_with_stake, + known_nodes_without_stake: val.known_nodes_without_stake, my_own_validator_config: val.my_own_validator_config, - da_committee_size: val.committee_nodes, + da_staked_committee_size: val.staked_committee_nodes, + da_non_staked_committee_size: val.non_staked_committee_nodes, next_view_timeout: val.next_view_timeout, timeout_ratio: val.timeout_ratio, round_start_delay: val.round_start_delay, @@ -617,10 +628,13 @@ impl Default for HotShotConfigFile { }) .collect(); Self { - total_nodes: NonZeroUsize::new(10).unwrap(), + num_nodes_with_stake: NonZeroUsize::new(10).unwrap(), + num_nodes_without_stake: 0, my_own_validator_config: ValidatorConfig::default(), known_nodes_with_stake: gen_known_nodes_with_stake, - committee_nodes: 5, + known_nodes_without_stake: vec![], + staked_committee_nodes: 5, + non_staked_committee_nodes: 0, max_transactions: NonZeroUsize::new(100).unwrap(), min_transactions: 1, next_view_timeout: 10000, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index d375b8e588..e8a95ce251 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -97,8 +97,8 @@ impl pub fn output_to_csv(&self) { let output_csv = BenchResultsDownloadConfig { commit_sha: self.config.commit_sha.clone(), - total_nodes: self.config.config.total_nodes.into(), - da_committee_size: self.config.config.da_committee_size, + total_nodes: self.config.config.num_nodes_with_stake.into(), + da_committee_size: self.config.config.da_staked_committee_size, transactions_per_round: self.config.transactions_per_round, transaction_size: self.bench_results.transaction_size_in_bytes, rounds: self.config.rounds, @@ -176,7 +176,7 @@ where let node_index = self.latest_index; self.latest_index += 1; - if usize::from(node_index) >= self.config.config.total_nodes.get() { + if usize::from(node_index) >= self.config.config.num_nodes_with_stake.get() { return Err(ServerError { status: tide_disco::StatusCode::BadRequest, message: "Network has reached capacity".to_string(), @@ -229,7 +229,7 @@ where let tmp_node_index = self.tmp_latest_index; self.tmp_latest_index += 1; - if usize::from(tmp_node_index) >= self.config.config.total_nodes.get() { + if usize::from(tmp_node_index) >= self.config.config.num_nodes_with_stake.get() { return Err(ServerError { status: tide_disco::StatusCode::BadRequest, message: "Node index getter for key pair generation has reached capacity" @@ -263,7 +263,7 @@ where "Node {:?} posted public key, now total num posted public key: {:?}", node_index, self.nodes_with_pubkey ); - if self.nodes_with_pubkey >= (self.config.config.total_nodes.get() as u64) { + if self.nodes_with_pubkey >= (self.config.config.num_nodes_with_stake.get() as u64) { self.peer_pub_ready = true; } Ok(()) @@ -305,7 +305,7 @@ where fn post_ready(&mut self) -> Result<(), ServerError> { self.nodes_connected += 1; println!("Nodes connected: {}", self.nodes_connected); - if self.nodes_connected >= (self.config.config.total_nodes.get() as u64) { + if self.nodes_connected >= (self.config.config.num_nodes_with_stake.get() as u64) { self.start = true; } Ok(()) @@ -351,7 +351,7 @@ where } } self.nodes_post_results += 1; - if self.nodes_post_results >= (self.config.config.total_nodes.get() as u64) { + if self.nodes_post_results >= (self.config.config.num_nodes_with_stake.get() as u64) { self.bench_results.printout(); self.output_to_csv(); } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 67850d5059..c2738b7e10 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -233,7 +233,7 @@ impl, A: ConsensusApi + // Inject view info into network let is_da = self .da_membership - .get_committee(self.cur_view + 1) + .get_whole_committee(self.cur_view + 1) .contains(&self.public_key); if is_da { diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a31ca28682..eb5c19e164 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -376,7 +376,7 @@ impl, TYPES::Signa kind: message_kind, }; let view = message.kind.get_view_number(); - let committee = membership.get_committee(view); + let committee = membership.get_whole_committee(view); let net = self.channel.clone(); async_spawn(async move { let transmit_result = match transmit_type { diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs index cdd48b6b32..e789c496bd 100644 --- a/testing-macros/tests/tests.rs +++ b/testing-macros/tests/tests.rs @@ -158,8 +158,8 @@ cross_tests!( Metadata: { let mut metadata = TestMetadata::default_more_nodes(); metadata.num_bootstrap_nodes = 10; - metadata.total_nodes = 12; - metadata.da_committee_size = 12; + metadata.num_nodes_with_stake = 12; + metadata.da_staked_committee_size = 12; metadata.start_nodes = 12; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 9bed45f5a9..bd166039cc 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -71,19 +71,22 @@ pub async fn build_system_handle( let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = config.my_own_validator_config.private_key.clone(); let public_key = config.my_own_validator_config.public_key; - let quorum_election_config = - config.election_config.clone().unwrap_or_else(|| { - ::Membership::default_election_config( - config.total_nodes.get() as u64 - ) - }); - - let committee_election_config = - config.election_config.clone().unwrap_or_else(|| { - ::Membership::default_election_config( - config.total_nodes.get() as u64 - ) - }); + + let _known_nodes_without_stake = config.known_nodes_without_stake.clone(); + + let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { + ::Membership::default_election_config( + config.num_nodes_with_stake.get() as u64, + config.num_nodes_without_stake as u64, + ) + }); + + let committee_election_config = config.election_config.clone().unwrap_or_else(|| { + ::Membership::default_election_config( + config.num_nodes_with_stake.get() as u64, + config.num_nodes_without_stake as u64, + ) + }); let networks_bundle = Networks { quorum_network: networks.0.clone(), da_network: networks.1.clone(), @@ -367,7 +370,7 @@ pub fn vid_scheme_from_view_number( membership: &TYPES::Membership, view_number: TYPES::Time, ) -> VidSchemeType { - let num_storage_nodes = membership.get_committee(view_number).len(); + let num_storage_nodes = membership.get_staked_committee(view_number).len(); vid_scheme(num_storage_nodes) } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index b5cfcecb88..81d1288995 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -39,8 +39,10 @@ pub struct TimingData { /// metadata describing a test #[derive(Clone, Debug)] pub struct TestMetadata { - /// Total number of nodes in the test - pub total_nodes: usize, + /// Total number of staked nodes in the test + pub num_nodes_with_stake: usize, + /// Total number of non-staked nodes in the test + pub num_nodes_without_stake: usize, /// nodes available at start pub start_nodes: usize, /// Whether to skip initializing nodes that will start late, which will catch up later with @@ -48,8 +50,10 @@ pub struct TestMetadata { pub skip_late: bool, /// number of bootstrap nodes (libp2p usage only) pub num_bootstrap_nodes: usize, - /// Size of the DA committee for the test - pub da_committee_size: usize, + /// Size of the staked DA committee for the test + pub da_staked_committee_size: usize, + /// Size of the non-staked DA committee for the test + pub da_non_staked_committee_size: usize, /// overall safety property description pub overall_safety_properties: OverallSafetyPropertiesDescription, /// spinning properties @@ -84,12 +88,16 @@ impl Default for TimingData { impl TestMetadata { /// the default metadata for a stress test #[must_use] + #[allow(clippy::redundant_field_names)] pub fn default_stress() -> Self { - let num_nodes = 100; + let num_nodes_with_stake = 100; + let num_nodes_without_stake = 0; + TestMetadata { - num_bootstrap_nodes: num_nodes, - total_nodes: num_nodes, - start_nodes: num_nodes, + num_bootstrap_nodes: num_nodes_with_stake, + num_nodes_with_stake: num_nodes_with_stake, + num_nodes_without_stake: num_nodes_without_stake, + start_nodes: num_nodes_with_stake, overall_safety_properties: OverallSafetyPropertiesDescription { num_successful_views: 50, check_leaf: true, @@ -105,21 +113,24 @@ impl TestMetadata { round_start_delay: 25, ..TimingData::default() }, - view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes), + view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), ..TestMetadata::default() } } /// the default metadata for multiple rounds #[must_use] + #[allow(clippy::redundant_field_names)] pub fn default_multiple_rounds() -> TestMetadata { - let num_nodes = 10; + let num_nodes_with_stake = 10; + let num_nodes_without_stake = 0; TestMetadata { // TODO: remove once we have fixed the DHT timeout issue // https://github.com/EspressoSystems/HotShot/issues/2088 - num_bootstrap_nodes: num_nodes, - total_nodes: num_nodes, - start_nodes: num_nodes, + num_bootstrap_nodes: num_nodes_with_stake, + num_nodes_with_stake: num_nodes_with_stake, + num_nodes_without_stake: num_nodes_without_stake, + start_nodes: num_nodes_with_stake, overall_safety_properties: OverallSafetyPropertiesDescription { num_successful_views: 20, check_leaf: true, @@ -133,25 +144,28 @@ impl TestMetadata { round_start_delay: 25, ..TimingData::default() }, - view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes), + view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), ..TestMetadata::default() } } /// Default setting with 20 nodes and 8 views of successful views. #[must_use] + #[allow(clippy::redundant_field_names)] pub fn default_more_nodes() -> TestMetadata { - let num_nodes = 20; + let num_nodes_with_stake = 20; + let num_nodes_without_stake = 0; TestMetadata { - total_nodes: num_nodes, - start_nodes: num_nodes, - num_bootstrap_nodes: num_nodes, + num_nodes_with_stake: num_nodes_with_stake, + num_nodes_without_stake: num_nodes_without_stake, + start_nodes: num_nodes_with_stake, + num_bootstrap_nodes: num_nodes_with_stake, // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. // TODO: Update message broadcasting to avoid hanging // - da_committee_size: 14, + da_staked_committee_size: 14, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // Increase the duration to get the expected number of successful views. @@ -165,7 +179,7 @@ impl TestMetadata { next_view_timeout: 5000, ..TimingData::default() }, - view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes), + view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), ..TestMetadata::default() } } @@ -173,16 +187,20 @@ impl TestMetadata { impl Default for TestMetadata { /// by default, just a single round + #[allow(clippy::redundant_field_names)] fn default() -> Self { - let num_nodes = 6; + let num_nodes_with_stake = 6; + let num_nodes_without_stake = 0; Self { timing_data: TimingData::default(), min_transactions: 0, - total_nodes: num_nodes, - start_nodes: num_nodes, + num_nodes_with_stake: num_nodes_with_stake, + num_nodes_without_stake: num_nodes_without_stake, + start_nodes: num_nodes_with_stake, skip_late: false, - num_bootstrap_nodes: num_nodes, - da_committee_size: num_nodes, + num_bootstrap_nodes: num_nodes_with_stake, + da_staked_committee_size: num_nodes_with_stake, + da_non_staked_committee_size: num_nodes_without_stake, spinning_properties: SpinningTaskDescription { node_changes: vec![], }, @@ -196,7 +214,7 @@ impl Default for TestMetadata { }, ), unreliable_network: None, - view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes), + view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), } } } @@ -218,18 +236,18 @@ impl TestMetadata { I: NodeImplementation, { let TestMetadata { - total_nodes, + num_nodes_with_stake, num_bootstrap_nodes, min_transactions, timing_data, - da_committee_size, - + da_staked_committee_size, + da_non_staked_committee_size, unreliable_network, .. } = self.clone(); // We assign known_nodes' public key and stake value here rather than read from config file since it's a test. - let known_nodes_with_stake = (0..total_nodes) + let known_nodes_with_stake = (0..num_nodes_with_stake) .map(|node_id_| { let cur_validator_config: ValidatorConfig = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id_ as u64, 1); @@ -248,13 +266,17 @@ impl TestMetadata { let config = HotShotConfig { // TODO this doesn't exist anymore execution_type: ExecutionType::Incremental, - total_nodes: NonZeroUsize::new(total_nodes).unwrap(), + num_nodes_with_stake: NonZeroUsize::new(num_nodes_with_stake).unwrap(), + // Currently making this zero for simplicity + num_nodes_without_stake: 0, num_bootstrap: num_bootstrap_nodes, min_transactions, max_transactions: NonZeroUsize::new(99999).unwrap(), known_nodes_with_stake, + known_nodes_without_stake: vec![], my_own_validator_config, - da_committee_size, + da_staked_committee_size, + da_non_staked_committee_size, next_view_timeout: 500, timeout_ratio: (11, 10), round_start_delay: 1, @@ -264,7 +286,8 @@ impl TestMetadata { propose_max_round_time: Duration::from_millis(1000), // TODO what's the difference between this and the second config? election_config: Some(TYPES::Membership::default_election_config( - total_nodes as u64, + num_nodes_with_stake as u64, + 0, )), }; let TimingData { @@ -289,9 +312,9 @@ impl TestMetadata { TestLauncher { resource_generator: ResourceGenerators { channel_generator: >::gen_networks( - total_nodes, + num_nodes_with_stake, num_bootstrap_nodes, - da_committee_size, + da_staked_committee_size, unreliable_network, ), storage: Box::new(|_| I::construct_tmp_storage().unwrap()), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 3148f23089..ec44f9d801 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -145,8 +145,11 @@ where } } - self.add_nodes(self.launcher.metadata.total_nodes, &late_start_nodes) - .await; + self.add_nodes( + self.launcher.metadata.num_nodes_with_stake, + &late_start_nodes, + ) + .await; let mut event_rxs = vec![]; let mut internal_event_rxs = vec![]; @@ -328,7 +331,10 @@ where let config = self.launcher.resource_generator.config.clone(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { - TYPES::Membership::default_election_config(config.total_nodes.get() as u64) + TYPES::Membership::default_election_config( + config.num_nodes_with_stake.get() as u64, + config.num_nodes_without_stake as u64, + ) }); let committee_election_config = I::committee_election_config_generator(); let memberships = Memberships { @@ -338,7 +344,10 @@ where ), da_membership: ::Membership::create_election( known_nodes_with_stake.clone(), - committee_election_config(config.da_committee_size as u64), + committee_election_config( + config.da_staked_committee_size as u64, + config.num_nodes_without_stake as u64, + ), ), vid_membership: ::Membership::create_election( known_nodes_with_stake.clone(), diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 3901bf817b..002254996f 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -25,7 +25,7 @@ async fn test_catchup() { metadata.timing_data = timing_data; metadata.start_nodes = 19; - metadata.total_nodes = 20; + metadata.num_nodes_with_stake = 20; metadata.view_sync_properties = hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); @@ -83,7 +83,7 @@ async fn test_catchup_web() { }]; metadata.timing_data = timing_data; metadata.start_nodes = 19; - metadata.total_nodes = 20; + metadata.num_nodes_with_stake = 20; metadata.spinning_properties = SpinningTaskDescription { // Start the nodes before their leadership. @@ -136,7 +136,7 @@ async fn test_catchup_one_node() { }]; metadata.timing_data = timing_data; metadata.start_nodes = 19; - metadata.total_nodes = 20; + metadata.num_nodes_with_stake = 20; metadata.spinning_properties = SpinningTaskDescription { // Start the nodes before their leadership. @@ -198,7 +198,7 @@ async fn test_catchup_in_view_sync() { metadata.timing_data = timing_data; metadata.start_nodes = 18; - metadata.total_nodes = 20; + metadata.num_nodes_with_stake = 20; metadata.view_sync_properties = hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); @@ -256,7 +256,7 @@ async fn test_catchup_reload() { metadata.timing_data = timing_data; metadata.start_nodes = 19; metadata.skip_late = true; - metadata.total_nodes = 20; + metadata.num_nodes_with_stake = 20; metadata.view_sync_properties = hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index fd2c973e8b..49de0e9ed7 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -76,7 +76,7 @@ async fn test_combined_network_webserver_crash() { }; let mut all_nodes = vec![]; - for node in 0..metadata.total_nodes { + for node in 0..metadata.num_nodes_with_stake { all_nodes.push(ChangeNode { idx: node, updown: UpDown::NetworkDown, @@ -126,7 +126,7 @@ async fn test_combined_network_reup() { let mut all_down = vec![]; let mut all_up = vec![]; - for node in 0..metadata.total_nodes { + for node in 0..metadata.num_nodes_with_stake { all_down.push(ChangeNode { idx: node, updown: UpDown::NetworkDown, @@ -178,7 +178,7 @@ async fn test_combined_network_half_dc() { }; let mut half = vec![]; - for node in 0..metadata.total_nodes / 2 { + for node in 0..metadata.num_nodes_with_stake / 2 { half.push(ChangeNode { idx: node, updown: UpDown::NetworkDown, @@ -233,7 +233,7 @@ async fn test_stress_combined_network_fuzzy() { async_compatibility_layer::logging::setup_backtrace(); let mut metadata = TestMetadata { num_bootstrap_nodes: 10, - total_nodes: 20, + num_nodes_with_stake: 20, start_nodes: 20, timing_data: TimingData { @@ -254,7 +254,7 @@ async fn test_stress_combined_network_fuzzy() { metadata.spinning_properties = SpinningTaskDescription { node_changes: generate_random_node_changes( - metadata.total_nodes, + metadata.num_nodes_with_stake, metadata.overall_safety_properties.num_successful_views * 2, ), }; diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index ddcf178bcc..e87c5bac76 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -74,8 +74,8 @@ async fn libp2p_network_failures_2() { metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(3, dead_nodes)], }; - metadata.total_nodes = 12; - metadata.da_committee_size = 12; + metadata.num_nodes_with_stake = 12; + metadata.da_staked_committee_size = 12; metadata.start_nodes = 12; // 2 nodes fail triggering view sync, expect no other timeouts metadata.overall_safety_properties.num_failed_views = 1; diff --git a/testing/tests/timeout.rs b/testing/tests/timeout.rs index 39b9e778f6..ee56cb2fc5 100644 --- a/testing/tests/timeout.rs +++ b/testing/tests/timeout.rs @@ -23,7 +23,7 @@ async fn test_timeout_web() { }; let mut metadata = TestMetadata { - total_nodes: 10, + num_nodes_with_stake: 10, start_nodes: 10, ..Default::default() }; @@ -88,7 +88,7 @@ async fn test_timeout_libp2p() { }; let mut metadata = TestMetadata { - total_nodes: 10, + num_nodes_with_stake: 10, start_nodes: 10, num_bootstrap_nodes: 10, ..Default::default() diff --git a/types/src/data.rs b/types/src/data.rs index bc09bbbd18..942bde5bcb 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -155,7 +155,7 @@ impl VidDisperse { membership: &Arc, ) -> Self { let shares = membership - .get_committee(view_number) + .get_staked_committee(view_number) .iter() .map(|node| (node.clone(), vid_disperse.shares.remove(0))) .collect(); diff --git a/types/src/lib.rs b/types/src/lib.rs index 341ad51520..66b352817d 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -148,17 +148,24 @@ pub struct HotShotConfig { /// Whether to run one view or continuous views pub execution_type: ExecutionType, /// Total number of nodes in the network - pub total_nodes: NonZeroUsize, + // Earlier it was total_nodes + pub num_nodes_with_stake: NonZeroUsize, + /// Number of nodes without stake + pub num_nodes_without_stake: usize, /// Minimum transactions per block pub min_transactions: usize, /// Maximum transactions per block pub max_transactions: NonZeroUsize, /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter pub known_nodes_with_stake: Vec>, + /// List of known non-staking nodes' public keys + pub known_nodes_without_stake: Vec, /// My own validator config, including my public key, private key, stake value, serving as private parameter pub my_own_validator_config: ValidatorConfig, - /// List of DA committee nodes for static DA committe - pub da_committee_size: usize, + /// List of DA committee (staking)nodes for static DA committe + pub da_staked_committee_size: usize, + /// List of DA committee nodes (non-staking)nodes for static DA committe + pub da_non_staked_committee_size: usize, /// Base duration for next-view timeout, in milliseconds pub next_view_timeout: u64, /// The exponential backoff ration for the next-view timeout diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index a69161f8b7..0f3eec9b53 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -38,7 +38,10 @@ pub trait Membership: Clone + Debug + Eq + PartialEq + Send + Sync + Hash + 'static { /// generate a default election configuration - fn default_election_config(num_nodes: u64) -> TYPES::ElectionConfigType; + fn default_election_config( + num_nodes_with_stake: u64, + num_nodes_without_stake: u64, + ) -> TYPES::ElectionConfigType; /// create an election /// TODO may want to move this to a testableelection trait @@ -55,8 +58,14 @@ pub trait Membership: /// The leader of the committee for view `view_number`. fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey; - /// The members of the committee for view `view_number`. - fn get_committee(&self, view_number: TYPES::Time) -> BTreeSet; + /// The staked members of the committee for view `view_number`. + fn get_staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; + + /// The non-staked members of the committee for view `view_number`. + fn get_non_staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; + + /// Get whole (staked + non-staked) committee for view `view_number`. + fn get_whole_committee(&self, view_number: TYPES::Time) -> BTreeSet; /// Check if a key has stake fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 2c14bd9410..3f0959d767 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -59,7 +59,7 @@ pub trait TestableNodeImplementation: NodeImplementation /// Generates a committee-specific election fn committee_election_config_generator( - ) -> Box Self::CommitteeElectionConfig + 'static>; + ) -> Box Self::CommitteeElectionConfig + 'static>; /// Creates random transaction if possible /// otherwise panics @@ -115,8 +115,13 @@ where type CommitteeElectionConfig = TYPES::ElectionConfigType; fn committee_election_config_generator( - ) -> Box Self::CommitteeElectionConfig + 'static> { - Box::new(|num_nodes| ::Membership::default_election_config(num_nodes)) + ) -> Box Self::CommitteeElectionConfig + 'static> { + Box::new(|num_nodes_with_stake, num_nodes_without_stake| { + ::Membership::default_election_config( + num_nodes_with_stake, + num_nodes_without_stake, + ) + }) } fn state_create_random_transaction( From e3c4b46da7307f46ae7346f22c44404aa00ff9fe Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 11 Mar 2024 14:55:50 -0400 Subject: [PATCH 0844/1393] Voting for all Possible Orderings Regression Test (#2722) * checkpoint initial tests * working test for QC init case * minimal third test * complete test * test negative case * move the view to use node 2 as the leaderg * remove dead code * improve test correctness * fix consensus task test * one test to go * tests complete * permutate more aggressively * revert weird leftover * restore old * new helpers, move permute fn, ready to go * document function * whoops * remove permute from consensus task * remove old comment * remove panic test * fix format check --- testing/src/lib.rs | 3 + testing/src/test_helpers.rs | 16 +++++ testing/tests/consensus_task.rs | 96 ++++++++++++++++++++++++++++++ testing/tests/proposal_ordering.rs | 14 +---- 4 files changed, 117 insertions(+), 12 deletions(-) create mode 100644 testing/src/test_helpers.rs diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 9abc87a288..d9ae432175 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -42,6 +42,9 @@ pub mod script; /// view generator for tests pub mod view_generator; +/// helper functions for test scripts +pub mod test_helpers; + /// global event at the test level #[derive(Clone, Debug)] pub enum GlobalTestEvent { diff --git a/testing/src/test_helpers.rs b/testing/src/test_helpers.rs new file mode 100644 index 0000000000..7c98d5d8a7 --- /dev/null +++ b/testing/src/test_helpers.rs @@ -0,0 +1,16 @@ +/// This function permutes the provided input vector `inputs`, given some order provided within the +/// `order` vector. +/// +/// # Examples +/// let output = permute_input_with_index_order(vec![1, 2, 3], vec![2, 1, 0]); +/// // Output is [3, 2, 1] now +pub fn permute_input_with_index_order(inputs: Vec, order: Vec) -> Vec +where + T: Clone, +{ + let mut ordered_inputs = Vec::with_capacity(inputs.len()); + for &index in &order { + ordered_inputs.push(inputs[index].clone()); + } + ordered_inputs +} diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 44d5137fec..0a3b686ef5 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -3,6 +3,7 @@ use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::task_helpers::{build_quorum_proposal, build_vote, key_pair_for_id}; +use hotshot_testing::test_helpers::permute_input_with_index_order; use hotshot_types::traits::{consensus_api::ConsensusApi, election::Membership}; use hotshot_types::{ data::ViewNumber, message::GeneralConsensusMessage, traits::node_implementation::ConsensusTime, @@ -146,6 +147,101 @@ async fn test_consensus_vote() { run_test_script(vec![view_1], consensus_state).await; } +/// Tests the voting behavior by allowing the input to be permuted in any order desired. This +/// assures that, no matter what, a vote is indeed sent no matter what order the precipitating +/// events occur. The permutation is specified as `input_permutation` and is a vector of indices. +async fn test_vote_with_specific_order(input_permutation: Vec) { + use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; + use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; + use hotshot_testing::{ + predicates::{exact, is_at_view_number}, + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, + view_generator::TestViewGenerator, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(2) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } + + // Get out of the genesis view first + let view_1 = TestScriptStage { + inputs: vec![QuorumProposalRecv(proposals[0].clone(), leaders[0])], + outputs: vec![ + exact(ViewChange(ViewNumber::new(1))), + exact(QuorumProposalValidated(proposals[0].data.clone())), + exact(QuorumVoteSend(votes[0].clone())), + ], + asserts: vec![is_at_view_number(1)], + }; + + let inputs = vec![ + // We need a VID share for view 2 otherwise we cannot vote at view 2 (as node 2). + VidDisperseRecv(vids[1].0.clone(), vids[1].1), + DACRecv(dacs[1].clone()), + QuorumProposalRecv(proposals[1].clone(), leaders[1]), + ]; + let view_2_inputs = permute_input_with_index_order(inputs, input_permutation); + + let view_2_outputs = vec![ + exact(ViewChange(ViewNumber::new(2))), + exact(QuorumProposalValidated(proposals[1].data.clone())), + exact(QuorumVoteSend(votes[1].clone())), + ]; + + // Use the permuted inputs for view 2 depending on the provided index ordering. + let view_2 = TestScriptStage { + inputs: view_2_inputs, + outputs: view_2_outputs, + asserts: vec![is_at_view_number(2)], + }; + + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + + inject_consensus_polls(&consensus_state).await; + run_test_script(vec![view_1, view_2], consensus_state).await; +} + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_consensus_vote_with_permuted_dac() { + // These tests verify that a vote is indeed sent no matter when it receives a DACRecv + // event. In particular, we want to verify that receiving events in an unexpected (but still + // valid) order allows the system to proceed as it normally would. + test_vote_with_specific_order(vec![0, 1, 2]).await; + test_vote_with_specific_order(vec![0, 2, 1]).await; + test_vote_with_specific_order(vec![1, 0, 2]).await; + test_vote_with_specific_order(vec![2, 0, 1]).await; + test_vote_with_specific_order(vec![1, 2, 0]).await; + test_vote_with_specific_order(vec![2, 1, 0]).await; +} + /// TODO (jparr721): Nuke these old tests. Tracking: https://github.com/EspressoSystems/HotShot/issues/2727 #[cfg(test)] #[cfg_attr( diff --git a/testing/tests/proposal_ordering.rs b/testing/tests/proposal_ordering.rs index 6c53c1dcc3..40c4d1832e 100644 --- a/testing/tests/proposal_ordering.rs +++ b/testing/tests/proposal_ordering.rs @@ -4,22 +4,12 @@ use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*} use hotshot_testing::{ predicates::{exact, is_at_view_number, quorum_proposal_send}, task_helpers::vid_scheme_from_view_number, + test_helpers::permute_input_with_index_order, view_generator::TestViewGenerator, }; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; use jf_primitives::vid::VidScheme; -fn permute(inputs: Vec, order: Vec) -> Vec -where - T: Clone, -{ - let mut ordered_inputs = Vec::with_capacity(inputs.len()); - for &index in &order { - ordered_inputs.push(inputs[index].clone()); - } - ordered_inputs -} - /// Runs a basic test where a qualified proposal occurs (i.e. not initiated by the genesis view or node 1). /// This proposal should happen no matter how the `input_permutation` is specified. async fn test_ordering_with_specific_order(input_permutation: Vec) { @@ -91,7 +81,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { ] }; - let view_2_inputs = permute(inputs, input_permutation); + let view_2_inputs = permute_input_with_index_order(inputs, input_permutation); // This stage transitions from view 1 to view 2. let view_2 = TestScriptStage { From 2335cee422f976f5051992e96fe3ab6e98d19caf Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 11 Mar 2024 12:05:18 -0700 Subject: [PATCH 0845/1393] [Auto Benchmarks] Run config update (#2747) * update run_config * clean up orchestrator readme --- examples/webserver/README.md | 19 +++++++++++++++++++ orchestrator/run-config.toml | 6 ++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/examples/webserver/README.md b/examples/webserver/README.md index 9d74ab723a..b1371130ab 100644 --- a/examples/webserver/README.md +++ b/examples/webserver/README.md @@ -1,35 +1,54 @@ Commands to run da examples: 1a)Start web servers by either running 3 servers: +``` just async_std example webserver -- just async_std example webserver -- +``` 1b)Or use multi-webserver to spin up all three: +``` just async_std example multi-webserver -- +``` 2) Start orchestrator: +``` just async_std example orchestrator-webserver -- +``` 3a) Start validator: +``` just async_std example validator-webserver -- +``` 3b) Or start multiple validators: +``` just async_std example multi-validator-webserver -- +``` I.e. +``` just async_std example webserver -- http://127.0.0.1:9000 just async_std example webserver -- http://127.0.0.1:9001 just async_std example webserver -- http://127.0.0.1:9002 just async_std example orchestrator-webserver -- http://127.0.0.1:4444 ./crates/orchestrator/run-config.toml just async_std example validator-webserver -- 2 http://127.0.0.1:4444 +``` OR: +``` just async_std example multi-webserver -- 9000 9001 9002 just async_std example orchestrator-webserver -- http://127.0.0.1:4444 ./crates/orchestrator/run-config.toml just async_std example multi-validator-webserver -- 10 http://127.0.0.1:4444 +``` ================All of the above are out-dated================ + OR: + `just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://localhost:4444` + For other argument setting, checkout `read_orchestrator_initialization_config` in `crates/examples/infra/mod.rs`. + One example is: `just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --total_nodes 15`. + Another example is `just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --total_nodes 20 --da_committee_size 5 --transactions_per_round 10 --transaction_size 512 --rounds 100`, I'll get throughput `0.29M/s` for this one. \ No newline at end of file diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 5c547bccc8..6eeced9927 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -39,8 +39,10 @@ seed = [ start_delay_seconds = 0 [config] -total_nodes = 10 -committee_nodes = 5 +num_nodes_with_stake = 10 +num_nodes_without_stake = 0 +staked_committee_nodes = 5 +non_staked_committee_nodes = 0 max_transactions = 100 min_transactions = 0 next_view_timeout = 30000 From 67cf352791ed7d7dafa23097e0a112cc80f013d8 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 11 Mar 2024 16:44:17 -0400 Subject: [PATCH 0846/1393] [Crypto] Allow signing and verifying data of arbitrary length (#2742) * allow signing of arbitrary data * change original functions to operate over arbitrary data --- hotshot-qc/src/bit_vector.rs | 9 +++---- types/src/qc.rs | 39 +++++++++++-------------------- types/src/signature_key.rs | 7 ++---- types/src/traits/qc.rs | 10 ++++---- types/src/traits/signature_key.rs | 2 ++ 5 files changed, 29 insertions(+), 38 deletions(-) diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index cd860d18f1..8313f009fc 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -57,13 +57,14 @@ where type MessageLength = U32; type QuorumSize = U256; - fn sign( - agg_sig_pp: &A::PublicParameter, - message: &GenericArray, + /// Sign a message with the signing key + fn sign>( + pp: &A::PublicParameter, sk: &A::SigningKey, + msg: M, prng: &mut R, ) -> Result { - A::sign(agg_sig_pp, sk, message, prng) + A::sign(pp, sk, msg, prng) } fn assemble( diff --git a/types/src/qc.rs b/types/src/qc.rs index 4fbe6d00d7..3d91fcac75 100644 --- a/types/src/qc.rs +++ b/types/src/qc.rs @@ -55,13 +55,14 @@ where type MessageLength = U32; type QuorumSize = U256; - fn sign( - agg_sig_pp: &A::PublicParameter, - message: &GenericArray, + /// Sign a message with the signing key + fn sign>( + pp: &A::PublicParameter, sk: &A::SigningKey, + msg: M, prng: &mut R, ) -> Result { - A::sign(agg_sig_pp, sk, message, prng) + A::sign(pp, sk, msg, prng) } fn assemble( @@ -214,27 +215,15 @@ mod tests { agg_sig_pp, }; let msg = [72u8; 32]; - let sig1 = BitVectorQC::<$aggsig>::sign( - &agg_sig_pp, - &msg.into(), - key_pair1.sign_key_ref(), - &mut rng, - ) - .unwrap(); - let sig2 = BitVectorQC::<$aggsig>::sign( - &agg_sig_pp, - &msg.into(), - key_pair2.sign_key_ref(), - &mut rng, - ) - .unwrap(); - let sig3 = BitVectorQC::<$aggsig>::sign( - &agg_sig_pp, - &msg.into(), - key_pair3.sign_key_ref(), - &mut rng, - ) - .unwrap(); + let sig1 = + BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair1.sign_key_ref(), &msg, &mut rng) + .unwrap(); + let sig2 = + BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair2.sign_key_ref(), &msg, &mut rng) + .unwrap(); + let sig3 = + BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair3.sign_key_ref(), &msg, &mut rng) + .unwrap(); // happy path let signers = bitvec![0, 1, 1]; diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index 30ee459a7e..55f0a9c8b5 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -18,7 +18,6 @@ use jf_primitives::{ use rand::SeedableRng; use rand_chacha::ChaCha20Rng; use tracing::instrument; -use typenum::U32; /// BLS private key used to sign a message pub type BLSPrivKey = SignKey; @@ -40,19 +39,17 @@ impl SignatureKey for BLSPubKey { #[instrument(skip(self))] fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool { // This is the validation for QC partial signature before append(). - let generic_msg: &GenericArray = GenericArray::from_slice(data); - BLSOverBN254CurveSignatureScheme::verify(&(), self, generic_msg, signature).is_ok() + BLSOverBN254CurveSignatureScheme::verify(&(), self, data, signature).is_ok() } fn sign( sk: &Self::PrivateKey, data: &[u8], ) -> Result { - let generic_msg = GenericArray::from_slice(data); BitVectorQC::::sign( &(), - generic_msg, sk, + data, &mut rand::thread_rng(), ) } diff --git a/types/src/traits/qc.rs b/types/src/traits/qc.rs index 213b51977e..7dd11010f9 100644 --- a/types/src/traits/qc.rs +++ b/types/src/traits/qc.rs @@ -42,12 +42,14 @@ pub trait QuorumCertificateScheme< /// # Errors /// /// Should return error if the underlying signature scheme fail to sign. - fn sign( - agg_sig_pp: &A::PublicParameter, - message: &GenericArray, + fn sign>( + pp: &A::PublicParameter, sk: &A::SigningKey, + msg: M, prng: &mut R, - ) -> Result; + ) -> Result { + A::sign(pp, sk, msg, prng) + } /// Computes an aggregated signature from a set of partial signatures and the verification keys involved /// * `qc_pp` - public parameters for generating the QC diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 9bbc1a223d..3b36e0ed31 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -87,6 +87,7 @@ pub trait SignatureKey: // of serialization, to avoid Cryptographic pitfalls /// Validate a signature fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool; + /// Produce a signature /// # Errors /// If unable to sign the data with the key @@ -94,6 +95,7 @@ pub trait SignatureKey: private_key: &Self::PrivateKey, data: &[u8], ) -> Result; + /// Produce a public key from a private key fn from_private(private_key: &Self::PrivateKey) -> Self; /// Serialize a public key to bytes From 62346de534d1e984f1481c6f407986fdd306aeeb Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 11 Mar 2024 21:03:00 +0000 Subject: [PATCH 0847/1393] remove `Clone` requirement from `InstanceState` (#2746) --- types/src/traits/states.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index d759e24cfb..9afb730526 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -16,7 +16,7 @@ use serde::{de::DeserializeOwned, Serialize}; use std::{error::Error, fmt::Debug, future::Future, hash::Hash}; /// Instance-level state, which allows us to fetch missing validated state. -pub trait InstanceState: Clone + Debug + Send + Sync {} +pub trait InstanceState: Debug + Send + Sync {} /// Abstraction over the state that blocks modify /// From acaa47dd10ac2b39a1cb90f17f2aa175230c40b2 Mon Sep 17 00:00:00 2001 From: Nathan F Yospe Date: Mon, 11 Mar 2024 18:48:34 -0400 Subject: [PATCH 0848/1393] Fixes https://github.com/EspressoSystems/HotShot/issues/2753 (#2754) --- example-types/src/block_types.rs | 2 +- examples/infra/mod.rs | 6 ++++-- types/src/traits/block_contents.rs | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 2fc6470de5..6d28a0e1b3 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -171,7 +171,7 @@ impl BlockPayload for TestBlockPayload { BuilderCommitment::from_raw_digest(digest.finalize()) } - fn get_transactions(&self) -> &Vec { + fn get_transactions(&self, _metadata: &Self::Metadata) -> &Vec { &self.transactions } } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index f52fc20fe7..4a449d5930 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -35,7 +35,7 @@ use hotshot_types::{ data::{Leaf, TestableLeaf}, event::{Event, EventType}, traits::{ - block_contents::TestableBlock, + block_contents::{BlockHeader, TestableBlock}, election::Membership, node_implementation::{ConsensusTime, NodeType}, states::TestableState, @@ -571,7 +571,9 @@ pub trait RunDA< // iterate all the decided transactions to calculate latency if let Some(block_payload) = &leaf.block_payload { - for tx in block_payload.get_transactions() { + for tx in + block_payload.get_transactions(leaf.block_header.metadata()) + { let restored_timestamp_vec = tx.0[tx.0.len() - 8..].to_vec(); let restored_timestamp = i64::from_be_bytes( diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index b63d6d75d6..b0b620e627 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -86,7 +86,7 @@ pub trait BlockPayload: fn builder_commitment(&self, metadata: &Self::Metadata) -> BuilderCommitment; /// Get the transactions in the payload. - fn get_transactions(&self) -> &Vec; + fn get_transactions(&self, metadata: &Self::Metadata) -> &Vec; } /// extra functions required on block to be usable by hotshot-testing From 27ecdd75ab78abb92eae64a5f9cbaae0ad99e046 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Tue, 12 Mar 2024 15:15:45 +0100 Subject: [PATCH 0849/1393] Move hotshot-types to a separate repo (#2758) --- constants/Cargo.toml | 10 - constants/src/lib.rs | 33 - example-types/Cargo.toml | 3 +- examples/Cargo.toml | 3 +- hotshot-qc/Cargo.toml | 2 +- hotshot-stake-table/Cargo.toml | 2 +- hotshot/Cargo.toml | 3 +- hotshot/src/lib.rs | 2 +- hotshot/src/tasks/task_state.rs | 2 +- .../src/traits/networking/combined_network.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 12 +- .../traits/networking/web_server_network.rs | 2 +- libp2p-networking/Cargo.toml | 2 +- libp2p-networking/src/network/node.rs | 2 +- libp2p-networking/src/network/node/handle.rs | 21 + orchestrator/Cargo.toml | 2 +- task-impls/Cargo.toml | 3 +- task-impls/src/consensus.rs | 4 +- task-impls/src/network.rs | 2 +- testing-macros/Cargo.toml | 2 +- testing/Cargo.toml | 3 +- testing/src/test_runner.rs | 2 +- testing/tests/memory_network.rs | 2 +- testing/tests/unit/message.rs | 2 +- testing/tests/unit/version.rs | 2 +- testing/tests/upgrade_task.rs | 2 +- types/Cargo.toml | 62 -- types/src/consensus.rs | 364 ---------- types/src/data.rs | 506 ------------- types/src/error.rs | 118 --- types/src/event.rs | 106 --- types/src/lib.rs | 185 ----- types/src/light_client.rs | 223 ------ types/src/message.rs | 321 --------- types/src/qc.rs | 304 -------- types/src/signature_key.rs | 127 ---- types/src/simple_certificate.rs | 179 ----- types/src/simple_vote.rs | 259 ------- types/src/stake_table.rs | 31 - types/src/traits.rs | 15 - types/src/traits/consensus_api.rs | 51 -- types/src/traits/election.rs | 91 --- types/src/traits/metrics.rs | 295 -------- types/src/traits/network.rs | 669 ------------------ types/src/traits/node_implementation.rs | 251 ------- types/src/traits/qc.rs | 95 --- types/src/traits/signature_key.rs | 140 ---- types/src/traits/stake_table.rs | 235 ------ types/src/traits/states.rs | 84 --- types/src/traits/storage.rs | 160 ----- types/src/utils.rs | 136 ---- types/src/vid.rs | 270 ------- types/src/vote.rs | 183 ----- utils/Cargo.toml | 2 +- utils/src/version.rs | 2 +- web_server/Cargo.toml | 5 +- 56 files changed, 55 insertions(+), 5541 deletions(-) delete mode 100644 constants/Cargo.toml delete mode 100644 constants/src/lib.rs delete mode 100644 types/Cargo.toml delete mode 100644 types/src/consensus.rs delete mode 100644 types/src/data.rs delete mode 100644 types/src/error.rs delete mode 100644 types/src/event.rs delete mode 100644 types/src/lib.rs delete mode 100644 types/src/light_client.rs delete mode 100644 types/src/message.rs delete mode 100644 types/src/qc.rs delete mode 100644 types/src/signature_key.rs delete mode 100644 types/src/simple_certificate.rs delete mode 100644 types/src/simple_vote.rs delete mode 100644 types/src/stake_table.rs delete mode 100644 types/src/traits.rs delete mode 100644 types/src/traits/consensus_api.rs delete mode 100644 types/src/traits/election.rs delete mode 100644 types/src/traits/metrics.rs delete mode 100644 types/src/traits/network.rs delete mode 100644 types/src/traits/node_implementation.rs delete mode 100644 types/src/traits/qc.rs delete mode 100644 types/src/traits/signature_key.rs delete mode 100644 types/src/traits/stake_table.rs delete mode 100644 types/src/traits/states.rs delete mode 100644 types/src/traits/storage.rs delete mode 100644 types/src/utils.rs delete mode 100644 types/src/vid.rs delete mode 100644 types/src/vote.rs diff --git a/constants/Cargo.toml b/constants/Cargo.toml deleted file mode 100644 index dcb3ae0d78..0000000000 --- a/constants/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "hotshot-constants" -edition = { workspace = true } -version = { workspace = true } - -[dependencies] -serde = { workspace = true } - -[lints] -workspace = true diff --git a/constants/src/lib.rs b/constants/src/lib.rs deleted file mode 100644 index 621df3cdfe..0000000000 --- a/constants/src/lib.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! configurable constants for hotshot - -use serde::{Deserialize, Serialize}; - -/// the number of views to gather information for ahead of time -pub const LOOK_AHEAD: u64 = 5; - -/// the default kademlia record republication interval (in seconds) -pub const KAD_DEFAULT_REPUB_INTERVAL_SEC: u64 = 28800; - -/// the number of messages to cache in the combined network -pub const COMBINED_NETWORK_CACHE_SIZE: usize = 1000; - -/// the number of messages to attempt to send over the primary network before switching to prefer the secondary network -pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; - -/// the number of messages to send over the secondary network before re-attempting the (presumed down) primary network -pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 5; - -#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Hash, Eq)] -/// Type for protocol version number -pub struct Version { - /// major version number - pub major: u16, - /// minor version number - pub minor: u16, -} - -/// Constant for protocol version 0.1. -pub const VERSION_0_1: Version = Version { major: 0, minor: 1 }; - -/// Default Channel Size for consensus event sharing -pub const EVENT_CHANNEL_SIZE: usize = 100_000; diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index eecae87e9b..19b8d3532a 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -19,8 +19,7 @@ commit = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot" } -hotshot-constants = { path = "../constants" } -hotshot-types = { path = "../types", default-features = false } +hotshot-types = { workspace = true, default-features = false } hotshot-utils = { path = "../utils" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b97c456e03..377da39dcf 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -87,7 +87,6 @@ bimap = "0.6.3" bincode = { workspace = true } clap = { version = "4.5", features = ["derive", "env"], optional = true } commit = { workspace = true } -hotshot-constants = { path = "../constants" } custom_debug = { workspace = true } dashmap = "5.5.1" either = { workspace = true } @@ -95,7 +94,7 @@ embed-doc-image = "0.1.4" futures = { workspace = true } hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-types = { path = "../types", version = "0.1.0", default-features = false } +hotshot-types = { workspace = true, version = "0.1.0", default-features = false } hotshot-utils = { path = "../utils" } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } libp2p-identity = { workspace = true } diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index f117fe69e0..7766c9bf07 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -16,7 +16,7 @@ bincode = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } generic-array = { workspace = true } -hotshot-types = { path = "../types" } +hotshot-types = { workspace = true } jf-primitives = { workspace = true } jf-relation = { workspace = true } jf-utils = { workspace = true } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 601d6c1f63..dad2699625 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -14,7 +14,7 @@ ark-serialize = { workspace = true } ark-std = { workspace = true } digest = { workspace = true } ethereum-types = { workspace = true } -hotshot-types = { path = "../types" } +hotshot-types = { workspace = true } jf-primitives = { workspace = true } jf-utils = { workspace = true } serde = { workspace = true, features = ["rc"] } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index da8f125e0b..c88e28de1d 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -35,10 +35,9 @@ either = { workspace = true } embed-doc-image = "0.1.4" ethereum-types = { workspace = true } futures = { workspace = true } -hotshot-constants = { path = "../constants" } hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-types = { path = "../types", version = "0.1.0", default-features = false } +hotshot-types = { workspace = true, version = "0.1.0", default-features = false } hotshot-task = { path = "../task" } hotshot-utils = { path = "../utils" } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index a65eb869d9..4ad4e1ad23 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -26,10 +26,10 @@ use async_lock::RwLock; use async_trait::async_trait; use commit::Committable; use futures::join; -use hotshot_constants::{EVENT_CHANNEL_SIZE, VERSION_0_1}; use hotshot_task_impls::events::HotShotEvent; use hotshot_task_impls::helpers::broadcast_event; use hotshot_task_impls::network; +use hotshot_types::constants::{EVENT_CHANNEL_SIZE, VERSION_0_1}; use hotshot_task::task::TaskRegistry; use hotshot_types::{ diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 6e8d023113..b310d0bf6a 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -1,7 +1,6 @@ use crate::types::SystemContextHandle; use async_trait::async_trait; -use hotshot_constants::VERSION_0_1; use hotshot_task_impls::{ consensus::{CommitmentAndMetadata, ConsensusTaskState}, da::DATaskState, @@ -10,6 +9,7 @@ use hotshot_task_impls::{ vid::VIDTaskState, view_sync::ViewSyncTaskState, }; +use hotshot_types::constants::VERSION_0_1; use hotshot_types::traits::election::Membership; use hotshot_types::traits::{ block_contents::vid_commitment, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index f0bef0e2c3..1aa8cc3beb 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -3,7 +3,7 @@ use super::NetworkError; use crate::traits::implementations::{Libp2pNetwork, WebServerNetwork}; use async_lock::RwLock; -use hotshot_constants::{ +use hotshot_types::constants::{ COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, }; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 3199ce9edb..24f12d86a6 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -12,7 +12,7 @@ use async_lock::{Mutex, RwLock}; use async_trait::async_trait; use bimap::BiHashMap; use bincode::Options; -use hotshot_constants::{Version, LOOK_AHEAD, VERSION_0_1}; +use hotshot_types::constants::{Version, LOOK_AHEAD, VERSION_0_1}; use hotshot_types::{ boxed_sync, data::ViewNumber, @@ -684,7 +684,9 @@ impl ConnectedNetwork for Libp2p "Failed to message {:?} because could not find recipient peer id for pk {:?}", request, recipient ); - return Err(NetworkError::Libp2p { source: err }); + return Err(NetworkError::Libp2p { + source: Box::new(err), + }); } }; match self.inner.handle.request_data(&request, pid).await { @@ -776,7 +778,7 @@ impl ConnectedNetwork for Libp2p let topic = topic_map .get_by_left(&recipients) .ok_or(NetworkError::Libp2p { - source: NetworkNodeHandleError::NoSuchTopic, + source: Box::new(NetworkNodeHandleError::NoSuchTopic), })? .clone(); info!("broadcasting to topic: {}", topic); @@ -891,7 +893,9 @@ impl ConnectedNetwork for Libp2p "Failed to message {:?} because could not find recipient peer id for pk {:?}", message, recipient ); - return Err(NetworkError::Libp2p { source: err }); + return Err(NetworkError::Libp2p { + source: Box::new(err), + }); } }; diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 5bae77c950..ebb3fe7466 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -12,7 +12,7 @@ use async_compatibility_layer::{ use async_lock::RwLock; use async_trait::async_trait; use derive_more::{Deref, DerefMut}; -use hotshot_constants::VERSION_0_1; +use hotshot_types::constants::VERSION_0_1; use hotshot_types::{ boxed_sync, message::{Message, MessagePurpose}, diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index e8dd1d4ebc..ced6c864a3 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -21,7 +21,7 @@ custom_debug = { workspace = true } derive_builder = "0.13.1" either = { workspace = true } futures = { workspace = true } -hotshot-constants = { path = "../constants" } +hotshot-types = { workspace = true } hotshot-utils = { path = "../utils" } libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index f1724f3900..ad542bbdbd 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -32,7 +32,7 @@ use async_compatibility_layer::{ channel::{unbounded, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, }; use futures::{select, FutureExt, StreamExt}; -use hotshot_constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; +use hotshot_types::constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; use libp2p::{core::transport::ListenerId, StreamProtocol}; use libp2p::{ gossipsub::{ diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 2dd7c5542c..d96c629ae7 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -11,6 +11,7 @@ use async_compatibility_layer::{ use bincode::Options; use futures::channel::oneshot; +use hotshot_types::traits::network::NetworkError as HotshotNetworkError; use hotshot_utils::bincode::bincode_opts; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; @@ -575,6 +576,26 @@ pub enum NetworkNodeHandleError { NoSuchTopic, } +impl From for HotshotNetworkError { + fn from(error: NetworkNodeHandleError) -> Self { + match error { + NetworkNodeHandleError::SerializationError { source } => { + HotshotNetworkError::FailedToSerialize { source } + } + NetworkNodeHandleError::DeserializationError { source } => { + HotshotNetworkError::FailedToDeserialize { source } + } + NetworkNodeHandleError::TimeoutError { source } => { + HotshotNetworkError::Timeout { source } + } + NetworkNodeHandleError::Killed => HotshotNetworkError::ShutDown, + source => HotshotNetworkError::Libp2p { + source: Box::new(source), + }, + } + } +} + /// Re-exports of the snafu errors that [`NetworkNodeHandleError`] can throw pub mod network_node_handle_error { pub use super::{ diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 7b679472f0..388649682a 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -10,7 +10,7 @@ clap = { version = "4.0", features = ["derive", "env"], optional = false } futures = { workspace = true } libp2p = { workspace = true } blake3 = { workspace = true } -hotshot-types = { version = "0.1.0", path = "../types", default-features = false } +hotshot-types = { workspace = true, version = "0.1.0" , default-features = false } hotshot-utils = { path = "../utils" } tide-disco = { workspace = true } surf-disco = { workspace = true } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index d4a29f63ca..9ed541cf19 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -13,8 +13,7 @@ futures = { workspace = true } snafu = { workspace = true } async-lock = { workspace = true } tracing = { workspace = true } -hotshot-constants = { path = "../constants", default-features = false } -hotshot-types = { path = "../types", default-features = false } +hotshot-types = { workspace = true, default-features = false } hotshot-utils = { path = "../utils" } jf-primitives = { workspace = true } time = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 98ce438696..8ea5dd3401 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -9,9 +9,9 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; use async_std::task::JoinHandle; use commit::Committable; use core::time::Duration; -use hotshot_constants::Version; -use hotshot_constants::LOOK_AHEAD; use hotshot_task::task::{Task, TaskState}; +use hotshot_types::constants::Version; +use hotshot_types::constants::LOOK_AHEAD; use async_broadcast::Sender; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index eb5c19e164..2b9aa97f26 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -5,7 +5,7 @@ use crate::{ use async_broadcast::Sender; use async_compatibility_layer::art::async_spawn; use either::Either::{self, Left, Right}; -use hotshot_constants::VERSION_0_1; +use hotshot_types::constants::VERSION_0_1; use std::sync::Arc; use hotshot_task::task::{Task, TaskState}; diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index dcb0de323c..56d10d45d9 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -15,7 +15,7 @@ commit = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", default-features = false } -hotshot-types = { path = "../types", default-features = false } +hotshot-types = { workspace = true, default-features = false } hotshot-testing = { path = "../testing", default-features = false } hotshot-example-types = { path = "../example-types" } jf-primitives = { workspace = true } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 2040652626..e0b8784bf8 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -19,8 +19,7 @@ commit = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", features = ["hotshot-testing"] } -hotshot-constants = { path = "../constants" } -hotshot-types = { path = "../types", default-features = false } +hotshot-types = { workspace = true, default-features = false } hotshot-utils = { path = "../utils" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index ec44f9d801..72ed056f0e 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -19,8 +19,8 @@ use hotshot_example_types::state_types::TestInstanceState; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; -use hotshot_constants::EVENT_CHANNEL_SIZE; use hotshot_task::task::{Task, TaskRegistry, TestTask}; +use hotshot_types::constants::EVENT_CHANNEL_SIZE; use hotshot_types::{ consensus::ConsensusMetricsValue, data::Leaf, diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 90e88cbdee..4a8e8f47ee 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -9,12 +9,12 @@ use hotshot::traits::implementations::{ }; use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; -use hotshot_constants::VERSION_0_1; use hotshot_example_types::state_types::TestInstanceState; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::TestValidatedState, }; +use hotshot_types::constants::VERSION_0_1; use hotshot_types::message::Message; use hotshot_types::signature_key::BLSPubKey; use hotshot_types::traits::network::ConnectedNetwork; diff --git a/testing/tests/unit/message.rs b/testing/tests/unit/message.rs index eb47244145..3bebd9ae43 100644 --- a/testing/tests/unit/message.rs +++ b/testing/tests/unit/message.rs @@ -4,7 +4,7 @@ use std::marker::PhantomData; use commit::Committable; use either::Left; -use hotshot_constants::Version; +use hotshot_types::constants::Version; use hotshot_example_types::node_types::TestTypes; diff --git a/testing/tests/unit/version.rs b/testing/tests/unit/version.rs index 813bcd9e0d..7db8d2581e 100644 --- a/testing/tests/unit/version.rs +++ b/testing/tests/unit/version.rs @@ -1,5 +1,5 @@ #[cfg(test)] -use hotshot_constants::Version; +use hotshot_types::constants::Version; use hotshot_utils::version::read_version; #[test] diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs index 556dccf286..38a402a971 100644 --- a/testing/tests/upgrade_task.rs +++ b/testing/tests/upgrade_task.rs @@ -1,9 +1,9 @@ use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use hotshot::types::SystemContextHandle; -use hotshot_constants::Version; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{predicates::*, view_generator::TestViewGenerator}; +use hotshot_types::constants::Version; use hotshot_types::{ data::ViewNumber, simple_vote::UpgradeProposalData, traits::node_implementation::ConsensusTime, }; diff --git a/types/Cargo.toml b/types/Cargo.toml deleted file mode 100644 index 8c4964884a..0000000000 --- a/types/Cargo.toml +++ /dev/null @@ -1,62 +0,0 @@ -[package] -authors = ["Espresso Systems "] -description = "Types and traits for the HotShot consesus module" -edition = "2021" -name = "hotshot-types" -readme = "../README.md" -version = "0.1.0" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -ark-bls12-381 = { workspace = true } -ark-ec = { workspace = true } -ark-ed-on-bn254 = { workspace = true } -ark-ff = { workspace = true } -ark-serialize = { workspace = true, features = ["derive"] } -ark-std = { workspace = true } -async-compatibility-layer = { workspace = true } -async-lock = { workspace = true } -async-trait = { workspace = true } -bincode = { workspace = true } -bitvec = { workspace = true } -blake3 = { workspace = true } -commit = { workspace = true } -custom_debug = { workspace = true } -derivative = "2.2.0" -digest = { workspace = true } -displaydoc = { version = "0.2.3", default-features = false } -dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } -either = { workspace = true, features = ["serde"] } -futures = { workspace = true } -espresso-systems-common = { workspace = true } -ethereum-types = { workspace = true } -generic-array = { workspace = true } -hotshot-constants = { path = "../constants" } -hotshot-utils = { path = "../utils" } -jf-plonk = { workspace = true } -jf-primitives = { workspace = true, features = ["test-srs"] } -jf-utils = { workspace = true } -lazy_static = { workspace = true } -libp2p-networking = { workspace = true } -rand = { workspace = true } -rand_chacha = { workspace = true } -serde = { workspace = true } -sha2 = { workspace = true } -snafu = { workspace = true } -tagged-base64 = { workspace = true } -time = { workspace = true } -tracing = { workspace = true } -typenum = { workspace = true } - -[dev-dependencies] -serde_json = { workspace = true } - -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] -tokio = { workspace = true } - -[lints] -workspace = true diff --git a/types/src/consensus.rs b/types/src/consensus.rs deleted file mode 100644 index ac978c41d8..0000000000 --- a/types/src/consensus.rs +++ /dev/null @@ -1,364 +0,0 @@ -//! Provides the core consensus types - -pub use crate::utils::{View, ViewInner}; -use displaydoc::Display; - -use crate::{ - data::Leaf, - error::HotShotError, - simple_certificate::{DACertificate, QuorumCertificate, UpgradeCertificate}, - traits::{ - metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}, - node_implementation::NodeType, - }, - utils::Terminator, -}; -use commit::Commitment; - -use std::{ - collections::{BTreeMap, HashMap}, - sync::{Arc, Mutex}, -}; -use tracing::error; - -/// A type alias for `HashMap, T>` -type CommitmentMap = HashMap, T>; - -/// A reference to the consensus algorithm -/// -/// This will contain the state of all rounds. -#[derive(custom_debug::Debug)] -pub struct Consensus { - /// Immutable instance-level state. - pub instance_state: TYPES::InstanceState, - - /// The validated states that are currently loaded in memory. - pub validated_state_map: BTreeMap>, - - /// All the DA certs we've received for current and future views. - /// view -> DA cert - pub saved_da_certs: HashMap>, - - /// All the upgrade certs we've received for current and future views. - /// view -> upgrade cert - pub saved_upgrade_certs: HashMap>, - - /// View number that is currently on. - pub cur_view: TYPES::Time, - - /// last view had a successful decide event - pub last_decided_view: TYPES::Time, - - /// Map of leaf hash -> leaf - /// - contains undecided leaves - /// - includes the MOST RECENT decided leaf - pub saved_leaves: CommitmentMap>, - - /// Saved payloads. - /// - /// Encoded transactions for every view if we got a payload for that view. - pub saved_payloads: BTreeMap>, - - /// The `locked_qc` view number - pub locked_view: TYPES::Time, - - /// the highqc per spec - pub high_qc: QuorumCertificate, - - /// A reference to the metrics trait - pub metrics: Arc, -} - -/// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces -#[derive(Clone, Debug)] -pub struct ConsensusMetricsValue { - /// The number of last synced block height - pub last_synced_block_height: Box, - /// The number of last decided view - pub last_decided_view: Box, - /// Number of timestamp for the last decided time - pub last_decided_time: Box, - /// The current view - pub current_view: Box, - /// Number of views that are in-flight since the last decided view - pub number_of_views_since_last_decide: Box, - /// Number of views that are in-flight since the last anchor view - pub number_of_views_per_decide_event: Box, - /// Number of invalid QCs we've seen since the last commit. - pub invalid_qc: Box, - /// Number of outstanding transactions - pub outstanding_transactions: Box, - /// Memory size in bytes of the serialized transactions still outstanding - pub outstanding_transactions_memory_size: Box, - /// Number of views that timed out - pub number_of_timeouts: Box, -} - -/// The wrapper with a string name for the networking metrics -#[derive(Clone, Debug)] -pub struct ConsensusMetrics { - /// a prefix which tracks the name of the metric - prefix: String, - /// a map of values - values: Arc>, -} - -/// the set of counters and gauges for the networking metrics -#[derive(Clone, Debug, Default, Display)] -pub struct InnerConsensusMetrics { - /// All the counters of the networking metrics - pub counters: HashMap, - /// All the gauges of the networking metrics - pub gauges: HashMap, - /// All the histograms of the networking metrics - pub histograms: HashMap>, - /// All the labels of the networking metrics - pub labels: HashMap, -} - -impl ConsensusMetrics { - #[must_use] - /// For the creation and naming of gauge, counter, histogram and label. - pub fn sub(&self, name: String) -> Self { - let prefix = if self.prefix.is_empty() { - name - } else { - format!("{}-{name}", self.prefix) - }; - Self { - prefix, - values: Arc::clone(&self.values), - } - } -} - -impl Metrics for ConsensusMetrics { - fn create_counter(&self, label: String, _unit_label: Option) -> Box { - Box::new(self.sub(label)) - } - - fn create_gauge(&self, label: String, _unit_label: Option) -> Box { - Box::new(self.sub(label)) - } - - fn create_histogram(&self, label: String, _unit_label: Option) -> Box { - Box::new(self.sub(label)) - } - - fn create_label(&self, label: String) -> Box { - Box::new(self.sub(label)) - } - - fn subgroup(&self, subgroup_name: String) -> Box { - Box::new(self.sub(subgroup_name)) - } -} - -impl Counter for ConsensusMetrics { - fn add(&self, amount: usize) { - *self - .values - .lock() - .unwrap() - .counters - .entry(self.prefix.clone()) - .or_default() += amount; - } -} - -impl Gauge for ConsensusMetrics { - fn set(&self, amount: usize) { - *self - .values - .lock() - .unwrap() - .gauges - .entry(self.prefix.clone()) - .or_default() = amount; - } - fn update(&self, delta: i64) { - let mut values = self.values.lock().unwrap(); - let value = values.gauges.entry(self.prefix.clone()).or_default(); - let signed_value = i64::try_from(*value).unwrap_or(i64::MAX); - *value = usize::try_from(signed_value + delta).unwrap_or(0); - } -} - -impl Histogram for ConsensusMetrics { - fn add_point(&self, point: f64) { - self.values - .lock() - .unwrap() - .histograms - .entry(self.prefix.clone()) - .or_default() - .push(point); - } -} - -impl Label for ConsensusMetrics { - fn set(&self, value: String) { - *self - .values - .lock() - .unwrap() - .labels - .entry(self.prefix.clone()) - .or_default() = value; - } -} - -impl ConsensusMetricsValue { - /// Create a new instance of this [`ConsensusMetricsValue`] struct, setting all the counters and gauges - #[must_use] - pub fn new(metrics: &dyn Metrics) -> Self { - Self { - last_synced_block_height: metrics - .create_gauge(String::from("last_synced_block_height"), None), - last_decided_view: metrics.create_gauge(String::from("last_decided_view"), None), - last_decided_time: metrics.create_gauge(String::from("last_decided_time"), None), - current_view: metrics.create_gauge(String::from("current_view"), None), - number_of_views_since_last_decide: metrics - .create_gauge(String::from("number_of_views_since_last_decide"), None), - number_of_views_per_decide_event: metrics - .create_histogram(String::from("number_of_views_per_decide_event"), None), - invalid_qc: metrics.create_gauge(String::from("invalid_qc"), None), - outstanding_transactions: metrics - .create_gauge(String::from("outstanding_transactions"), None), - outstanding_transactions_memory_size: metrics - .create_gauge(String::from("outstanding_transactions_memory_size"), None), - number_of_timeouts: metrics.create_counter(String::from("number_of_timeouts"), None), - } - } -} - -impl Default for ConsensusMetricsValue { - fn default() -> Self { - Self::new(&*NoMetrics::boxed()) - } -} - -impl Consensus { - /// Update the current view. - pub fn update_view(&mut self, view_number: TYPES::Time) { - self.cur_view = view_number; - } - - /// gather information from the parent chain of leafs - /// # Errors - /// If the leaf or its ancestors are not found in storage - pub fn visit_leaf_ancestors( - &self, - start_from: TYPES::Time, - terminator: Terminator, - ok_when_finished: bool, - mut f: F, - ) -> Result<(), HotShotError> - where - F: FnMut(&Leaf) -> bool, - { - let mut next_leaf = if let Some(view) = self.validated_state_map.get(&start_from) { - view.get_leaf_commitment() - .ok_or_else(|| HotShotError::InvalidState { - context: format!( - "Visited failed view {start_from:?} leaf. Expected successfuil leaf" - ), - })? - } else { - return Err(HotShotError::InvalidState { - context: format!("View {start_from:?} leaf does not exist in state map "), - }); - }; - - while let Some(leaf) = self.saved_leaves.get(&next_leaf) { - if let Terminator::Exclusive(stop_before) = terminator { - if stop_before == leaf.get_view_number() { - if ok_when_finished { - return Ok(()); - } - break; - } - } - next_leaf = leaf.get_parent_commitment(); - if !f(leaf) { - return Ok(()); - } - if let Terminator::Inclusive(stop_after) = terminator { - if stop_after == leaf.get_view_number() { - if ok_when_finished { - return Ok(()); - } - break; - } - } - } - Err(HotShotError::LeafNotFound {}) - } - - /// Garbage collects based on state change right now, this removes from both the - /// `saved_payloads` and `validated_state_map` fields of `Consensus`. - /// # Panics - /// On inconsistent stored entries - pub fn collect_garbage(&mut self, old_anchor_view: TYPES::Time, new_anchor_view: TYPES::Time) { - // state check - let anchor_entry = self - .validated_state_map - .iter() - .next() - .expect("INCONSISTENT STATE: anchor leaf not in state map!"); - if *anchor_entry.0 != old_anchor_view { - error!( - "Something about GC has failed. Older leaf exists than the previous anchor leaf." - ); - } - // perform gc - self.saved_da_certs - .retain(|view_number, _| *view_number >= old_anchor_view); - self.saved_upgrade_certs - .retain(|view_number, _| *view_number >= old_anchor_view); - self.validated_state_map - .range(old_anchor_view..new_anchor_view) - .filter_map(|(_view_number, view)| view.get_leaf_commitment()) - .for_each(|leaf| { - self.saved_leaves.remove(&leaf); - }); - self.validated_state_map = self.validated_state_map.split_off(&new_anchor_view); - self.saved_payloads = self.saved_payloads.split_off(&new_anchor_view); - } - - /// Gets the last decided leaf. - /// - /// # Panics - /// if the last decided view's leaf does not exist in the state map or saved leaves, which - /// should never happen. - #[must_use] - pub fn get_decided_leaf(&self) -> Leaf { - let decided_view_num = self.last_decided_view; - let view = self.validated_state_map.get(&decided_view_num).unwrap(); - let leaf = view - .get_leaf_commitment() - .expect("Decided leaf not found! Consensus internally inconsistent"); - self.saved_leaves.get(&leaf).unwrap().clone() - } - - /// Gets the validated state with the given view number, if in the state map. - #[must_use] - pub fn get_state(&self, view_number: TYPES::Time) -> Option<&Arc> { - match self.validated_state_map.get(&view_number) { - Some(view) => view.get_state(), - None => None, - } - } - - /// Gets the last decided validated state. - /// - /// # Panics - /// If the last decided view's state does not exist in the state map, which should never - /// happen. - #[must_use] - pub fn get_decided_state(&self) -> &Arc { - let decided_view_num = self.last_decided_view; - self.get_state(decided_view_num) - .expect("Decided state not found! Consensus internally inconsistent") - } -} diff --git a/types/src/data.rs b/types/src/data.rs deleted file mode 100644 index 942bde5bcb..0000000000 --- a/types/src/data.rs +++ /dev/null @@ -1,506 +0,0 @@ -//! Provides types useful for representing `HotShot`'s data structures -//! -//! This module provides types for representing consensus internal state, such as leaves, -//! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. - -use crate::{ - simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, - simple_vote::UpgradeProposalData, - traits::{ - block_contents::{ - vid_commitment, BlockHeader, TestableBlock, GENESIS_VID_NUM_STORAGE_NODES, - }, - election::Membership, - node_implementation::{ConsensusTime, NodeType}, - signature_key::SignatureKey, - states::TestableState, - storage::StoredView, - BlockPayload, - }, - vid::{VidCommitment, VidCommon, VidSchemeType, VidShare}, - vote::{Certificate, HasViewNumber}, -}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use bincode::Options; -use commit::{Commitment, Committable, RawCommitmentBuilder}; -use derivative::Derivative; -use hotshot_utils::bincode::bincode_opts; -use jf_primitives::vid::VidDisperse as JfVidDisperse; -use rand::Rng; -use serde::{Deserialize, Serialize}; -use snafu::Snafu; -use std::{ - collections::BTreeMap, - fmt::{Debug, Display}, - hash::Hash, - sync::Arc, -}; - -/// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. -#[derive( - Copy, - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - CanonicalSerialize, - CanonicalDeserialize, -)] -pub struct ViewNumber(u64); - -impl ConsensusTime for ViewNumber { - /// Create a genesis view number (0) - fn genesis() -> Self { - Self(0) - } - /// Create a new `ViewNumber` with the given value. - fn new(n: u64) -> Self { - Self(n) - } - /// Returen the u64 format - fn get_u64(&self) -> u64 { - self.0 - } -} - -impl Committable for ViewNumber { - fn commit(&self) -> Commitment { - let builder = RawCommitmentBuilder::new("View Number Commitment"); - builder.u64(self.0).finalize() - } -} - -impl std::ops::Add for ViewNumber { - type Output = ViewNumber; - - fn add(self, rhs: u64) -> Self::Output { - Self(self.0 + rhs) - } -} - -impl std::ops::AddAssign for ViewNumber { - fn add_assign(&mut self, rhs: u64) { - self.0 += rhs; - } -} - -impl std::ops::Deref for ViewNumber { - type Target = u64; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl std::ops::Sub for ViewNumber { - type Output = ViewNumber; - fn sub(self, rhs: u64) -> Self::Output { - Self(self.0 - rhs) - } -} - -/// A proposal to start providing data availability for a block. -#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -pub struct DAProposal { - /// Encoded transactions in the block to be applied. - pub encoded_transactions: Vec, - /// Metadata of the block to be applied. - pub metadata: ::Metadata, - /// View this proposal applies to - pub view_number: TYPES::Time, -} - -/// A proposal to upgrade the network -#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -#[serde(bound = "TYPES: NodeType")] -pub struct UpgradeProposal -where - TYPES: NodeType, -{ - /// The information about which version we are upgrading to. - pub upgrade_proposal: UpgradeProposalData, - /// View this proposal applies to - pub view_number: TYPES::Time, -} - -/// VID dispersal data -/// -/// Like [`DAProposal`]. -/// -/// TODO move to vid.rs? -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -pub struct VidDisperse { - /// The view number for which this VID data is intended - pub view_number: TYPES::Time, - /// Block payload commitment - pub payload_commitment: VidCommitment, - /// A storage node's key and its corresponding VID share - pub shares: BTreeMap, - /// VID common data sent to all storage nodes - pub common: VidCommon, -} - -impl VidDisperse { - /// Create VID dispersal from a specified membership - /// Uses the specified function to calculate share dispersal - /// Allows for more complex stake table functionality - pub fn from_membership( - view_number: TYPES::Time, - mut vid_disperse: JfVidDisperse, - membership: &Arc, - ) -> Self { - let shares = membership - .get_staked_committee(view_number) - .iter() - .map(|node| (node.clone(), vid_disperse.shares.remove(0))) - .collect(); - - Self { - view_number, - shares, - common: vid_disperse.common, - payload_commitment: vid_disperse.commit, - } - } -} - -/// Proposal to append a block. -#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -#[serde(bound(deserialize = ""))] -pub struct QuorumProposal { - /// The block header to append - pub block_header: TYPES::BlockHeader, - - /// CurView from leader when proposing leaf - pub view_number: TYPES::Time, - - /// Per spec, justification - pub justify_qc: QuorumCertificate, - - /// Possible timeout certificate. Only present if the justify_qc is not for the preceding view - pub timeout_certificate: Option>, - - /// Possible upgrade certificate, which the leader may optionally attach. - pub upgrade_certificate: Option>, - - /// the propser id - pub proposer_id: TYPES::SignatureKey, -} - -impl HasViewNumber for DAProposal { - fn get_view_number(&self) -> TYPES::Time { - self.view_number - } -} - -impl HasViewNumber for VidDisperse { - fn get_view_number(&self) -> TYPES::Time { - self.view_number - } -} - -impl HasViewNumber for QuorumProposal { - fn get_view_number(&self) -> TYPES::Time { - self.view_number - } -} - -impl HasViewNumber for UpgradeProposal { - fn get_view_number(&self) -> TYPES::Time { - self.view_number - } -} - -/// The error type for block and its transactions. -#[derive(Snafu, Debug)] -pub enum BlockError { - /// Invalid block header. - InvalidBlockHeader, - /// Invalid transaction length. - InvalidTransactionLength, - /// Inconsistent payload commitment. - InconsistentPayloadCommitment, -} - -/// Additional functions required to use a [`Leaf`] with hotshot-testing. -pub trait TestableLeaf { - /// Type of nodes participating in the network. - type NodeType: NodeType; - - /// Create a transaction that can be added to the block contained in this leaf. - fn create_random_transaction( - &self, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> <::BlockPayload as BlockPayload>::Transaction; -} - -/// This is the consensus-internal analogous concept to a block, and it contains the block proper, -/// as well as the hash of its parent `Leaf`. -/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::BlockPayload` -#[derive(Serialize, Deserialize, Clone, Debug, Derivative, Eq)] -#[serde(bound(deserialize = ""))] -pub struct Leaf { - /// CurView from leader when proposing leaf - pub view_number: TYPES::Time, - - /// Per spec, justification - pub justify_qc: QuorumCertificate, - - /// The hash of the parent `Leaf` - /// So we can ask if it extends - pub parent_commitment: Commitment, - - /// Block header. - pub block_header: TYPES::BlockHeader, - - /// Optional block payload. - /// - /// It may be empty for nodes not in the DA committee. - pub block_payload: Option, - - /// the proposer id of the leaf - pub proposer_id: TYPES::SignatureKey, -} - -impl PartialEq for Leaf { - fn eq(&self, other: &Self) -> bool { - self.view_number == other.view_number - && self.justify_qc == other.justify_qc - && self.parent_commitment == other.parent_commitment - && self.block_header == other.block_header - } -} - -impl Hash for Leaf { - fn hash(&self, state: &mut H) { - self.view_number.hash(state); - self.justify_qc.hash(state); - self.parent_commitment.hash(state); - self.block_header.hash(state); - } -} - -impl Display for Leaf { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "view: {:?}, height: {:?}, justify: {}", - self.view_number, - self.get_height(), - self.justify_qc - ) - } -} - -impl Leaf { - /// Create a new leaf from its components. - /// - /// # Panics - /// - /// Panics if the genesis payload (`TYPES::BlockPayload::genesis()`) is malformed (unable to be - /// interpreted as bytes). - #[must_use] - pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { - let (payload, metadata) = TYPES::BlockPayload::genesis(); - let payload_bytes = payload - .encode() - .expect("unable to encode genesis payload") - .collect(); - let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); - let block_header = - TYPES::BlockHeader::genesis(instance_state, payload_commitment, metadata); - Self { - view_number: TYPES::Time::genesis(), - justify_qc: QuorumCertificate::::genesis(), - parent_commitment: fake_commitment(), - block_header: block_header.clone(), - block_payload: Some(payload), - proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), - } - } - - /// Time when this leaf was created. - pub fn get_view_number(&self) -> TYPES::Time { - self.view_number - } - /// Height of this leaf in the chain. - /// - /// Equivalently, this is the number of leaves before this one in the chain. - pub fn get_height(&self) -> u64 { - self.block_header.block_number() - } - /// The QC linking this leaf to its parent in the chain. - pub fn get_justify_qc(&self) -> QuorumCertificate { - self.justify_qc.clone() - } - /// Commitment to this leaf's parent. - pub fn get_parent_commitment(&self) -> Commitment { - self.parent_commitment - } - /// The block header contained in this leaf. - pub fn get_block_header(&self) -> &::BlockHeader { - &self.block_header - } - /// Fill this leaf with the block payload. - /// - /// # Errors - /// - /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()` - /// or if the transactions are of invalid length - pub fn fill_block_payload( - &mut self, - block_payload: TYPES::BlockPayload, - num_storage_nodes: usize, - ) -> Result<(), BlockError> { - let encoded_txns = match block_payload.encode() { - // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. - // - Ok(encoded) => encoded.into_iter().collect(), - Err(_) => return Err(BlockError::InvalidTransactionLength), - }; - let commitment = vid_commitment(&encoded_txns, num_storage_nodes); - if commitment != self.block_header.payload_commitment() { - return Err(BlockError::InconsistentPayloadCommitment); - } - self.block_payload = Some(block_payload); - Ok(()) - } - - /// Fill this leaf with the block payload, without checking - /// header and payload consistency - pub fn fill_block_payload_unchecked(&mut self, block_payload: TYPES::BlockPayload) { - self.block_payload = Some(block_payload); - } - - /// Optional block payload. - pub fn get_block_payload(&self) -> Option { - self.block_payload.clone() - } - - /// A commitment to the block payload contained in this leaf. - pub fn get_payload_commitment(&self) -> VidCommitment { - self.get_block_header().payload_commitment() - } - - /// Identity of the network participant who proposed this leaf. - pub fn get_proposer_id(&self) -> TYPES::SignatureKey { - self.proposer_id.clone() - } - - /// Create a leaf from information stored about a view. - pub fn from_stored_view(stored_view: StoredView) -> Self { - Self { - view_number: stored_view.view_number, - justify_qc: stored_view.justify_qc, - parent_commitment: stored_view.parent, - block_header: stored_view.block_header, - block_payload: stored_view.block_payload, - proposer_id: stored_view.proposer_id, - } - } -} - -impl TestableLeaf for Leaf -where - TYPES::ValidatedState: TestableState, - TYPES::BlockPayload: TestableBlock, -{ - type NodeType = TYPES; - - fn create_random_transaction( - &self, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> <::BlockPayload as BlockPayload>::Transaction { - TYPES::ValidatedState::create_random_transaction(None, rng, padding) - } -} -/// Fake the thing a genesis block points to. Needed to avoid infinite recursion -#[must_use] -pub fn fake_commitment() -> Commitment { - RawCommitmentBuilder::new("Dummy commitment for arbitrary genesis").finalize() -} - -/// create a random commitment -#[must_use] -pub fn random_commitment(rng: &mut dyn rand::RngCore) -> Commitment { - let random_array: Vec = (0u8..100u8).map(|_| rng.gen_range(0..255)).collect(); - RawCommitmentBuilder::new("Random Commitment") - .constant_str("Random Field") - .var_size_bytes(&random_array) - .finalize() -} - -/// Serialization for the QC assembled signature -/// # Panics -/// if serialization fails -pub fn serialize_signature2( - signatures: &::QCType, -) -> Vec { - let mut signatures_bytes = vec![]; - signatures_bytes.extend("Yes".as_bytes()); - - let (sig, proof) = TYPES::SignatureKey::get_sig_proof(signatures); - let proof_bytes = bincode_opts() - .serialize(&proof.as_bitslice()) - .expect("This serialization shouldn't be able to fail"); - signatures_bytes.extend("bitvec proof".as_bytes()); - signatures_bytes.extend(proof_bytes.as_slice()); - let sig_bytes = bincode_opts() - .serialize(&sig) - .expect("This serialization shouldn't be able to fail"); - signatures_bytes.extend("aggregated signature".as_bytes()); - signatures_bytes.extend(sig_bytes.as_slice()); - signatures_bytes -} - -impl Committable for Leaf { - fn commit(&self) -> commit::Commitment { - let signatures_bytes = if self.justify_qc.is_genesis { - let mut bytes = vec![]; - bytes.extend("genesis".as_bytes()); - bytes - } else { - serialize_signature2::(self.justify_qc.signatures.as_ref().unwrap()) - }; - - // Skip the transaction commitments, so that the repliacs can reconstruct the leaf. - RawCommitmentBuilder::new("leaf commitment") - .u64_field("view number", *self.view_number) - .u64_field("block number", self.get_height()) - .field("parent Leaf commitment", self.parent_commitment) - .constant_str("block payload commitment") - .fixed_size_bytes(self.get_payload_commitment().as_ref().as_ref()) - .constant_str("justify_qc view number") - .u64(*self.justify_qc.view_number) - .field( - "justify_qc leaf commitment", - self.justify_qc.get_data().leaf_commit, - ) - .constant_str("justify_qc signatures") - .var_size_bytes(&signatures_bytes) - .finalize() - } -} - -impl From> for StoredView -where - TYPES: NodeType, -{ - fn from(leaf: Leaf) -> Self { - StoredView { - view_number: leaf.get_view_number(), - parent: leaf.get_parent_commitment(), - justify_qc: leaf.get_justify_qc(), - block_header: leaf.get_block_header().clone(), - block_payload: leaf.get_block_payload(), - proposer_id: leaf.get_proposer_id(), - } - } -} diff --git a/types/src/error.rs b/types/src/error.rs deleted file mode 100644 index a8edd760d2..0000000000 --- a/types/src/error.rs +++ /dev/null @@ -1,118 +0,0 @@ -//! Error type for `HotShot` -//! -//! This module provides [`HotShotError`], which is an enum representing possible faults that can -//! occur while interacting with this crate. - -use crate::traits::{ - block_contents::BlockPayload, node_implementation::NodeType, storage::StorageError, -}; -use snafu::Snafu; -use std::num::NonZeroU64; - -#[cfg(async_executor_impl = "async-std")] -use async_std::future::TimeoutError; -#[cfg(async_executor_impl = "tokio")] -use tokio::time::error::Elapsed as TimeoutError; -#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] -compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} - -/// Error type for `HotShot` -#[derive(Snafu, Debug)] -#[snafu(visibility(pub))] -#[non_exhaustive] -pub enum HotShotError { - /// Failed to Message the leader in the given stage - #[snafu(display("Failed to message leader with error: {source}"))] - FailedToMessageLeader { - /// The underlying network fault - source: crate::traits::network::NetworkError, - }, - /// Failed to broadcast a message on the network - #[snafu(display("Failed to broadcast a message"))] - FailedToBroadcast { - /// The underlying network fault - source: crate::traits::network::NetworkError, - }, - /// Failure in the block. - #[snafu(display("Failed to build or verify a block: {source}"))] - BlockError { - /// The underlying block error. - source: ::Error, - }, - /// Failure in networking layer - #[snafu(display("Failure in networking layer: {source}"))] - NetworkFault { - /// Underlying network fault - source: crate::traits::network::NetworkError, - }, - /// Item was not present in storage - LeafNotFound {/* TODO we should create a way to to_string */}, - /// Error accesing storage - StorageError { - /// Underlying error - source: StorageError, - }, - /// Invalid state machine state - #[snafu(display("Invalid state machine state: {}", context))] - InvalidState { - /// Context - context: String, - }, - /// HotShot timed out waiting for msgs - TimeoutError { - /// source of error - source: TimeoutError, - }, - /// HotShot timed out during round - ViewTimeoutError { - /// view number - view_number: TYPES::Time, - /// The state that the round was in when it timed out - state: RoundTimedoutState, - }, - /// Not enough valid signatures for a quorum - #[snafu(display("Insufficient number of valid signatures: the threshold is {}, but only {} signatures were valid", threshold, num_valid_signatures))] - InsufficientValidSignatures { - /// Number of valid signatures - num_valid_signatures: usize, - /// Threshold of signatures needed for a quorum - threshold: NonZeroU64, - }, - /// Miscelaneous error - /// TODO fix this with - /// #181 - Misc { - /// source of error - context: String, - }, - /// Internal value used to drive the state machine - Continue, -} - -/// Contains information about what the state of the hotshot-consensus was when a round timed out -#[derive(Debug, Clone)] -#[non_exhaustive] -pub enum RoundTimedoutState { - /// Leader is in a Prepare phase and is waiting for a HighQC - LeaderWaitingForHighQC, - /// Leader is in a Prepare phase and timed out before the round min time is reached - LeaderMinRoundTimeNotReached, - /// Leader is waiting for prepare votes - LeaderWaitingForPrepareVotes, - /// Leader is waiting for precommit votes - LeaderWaitingForPreCommitVotes, - /// Leader is waiting for commit votes - LeaderWaitingForCommitVotes, - - /// Replica is waiting for a prepare message - ReplicaWaitingForPrepare, - /// Replica is waiting for a pre-commit message - ReplicaWaitingForPreCommit, - /// Replica is waiting for a commit message - ReplicaWaitingForCommit, - /// Replica is waiting for a decide message - ReplicaWaitingForDecide, - - /// HotShot-testing tried to collect round events, but it timed out - TestCollectRoundEventsTimedOut, -} diff --git a/types/src/event.rs b/types/src/event.rs deleted file mode 100644 index 14cd65c874..0000000000 --- a/types/src/event.rs +++ /dev/null @@ -1,106 +0,0 @@ -//! Events that a `HotShot` instance can emit - -use crate::{ - data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse}, - error::HotShotError, - message::Proposal, - simple_certificate::QuorumCertificate, - traits::node_implementation::NodeType, -}; - -use std::sync::Arc; -/// A status event emitted by a `HotShot` instance -/// -/// This includes some metadata, such as the stage and view number that the event was generated in, -/// as well as an inner [`EventType`] describing the event proper. -#[derive(Clone, Debug)] -pub struct Event { - /// The view number that this event originates from - pub view_number: TYPES::Time, - /// The underlying event - pub event: EventType, -} - -/// The chain of leafs decided on with corresponding VID info if we have it -pub type LeafChain = Vec<(Leaf, Option>)>; -/// The type and contents of a status event emitted by a `HotShot` instance -/// -/// This enum does not include metadata shared among all variants, such as the stage and view -/// number, and is thus always returned wrapped in an [`Event`]. -#[non_exhaustive] -#[derive(Clone, Debug)] -pub enum EventType { - /// A view encountered an error and was interrupted - Error { - /// The underlying error - error: Arc>, - }, - /// A new decision event was issued - Decide { - /// The chain of Leafs that were committed by this decision - /// - /// This list is sorted in reverse view number order, with the newest (highest view number) - /// block first in the list. - /// - /// This list may be incomplete if the node is currently performing catchup. - /// Vid Info for a decided view may be missing if this node never saw it's share. - leaf_chain: Arc>, - /// The QC signing the most recent leaf in `leaf_chain`. - /// - /// Note that the QC for each additional leaf in the chain can be obtained from the leaf - /// before it using - qc: Arc>, - /// Optional information of the number of transactions in the block, for logging purposes. - block_size: Option, - }, - /// A replica task was canceled by a timeout interrupt - ReplicaViewTimeout { - /// The view that timed out - view_number: TYPES::Time, - }, - /// A next leader task was canceled by a timeout interrupt - NextLeaderViewTimeout { - /// The view that timed out - view_number: TYPES::Time, - }, - /// The view has finished. If values were decided on, a `Decide` event will also be emitted. - ViewFinished { - /// The view number that has just finished - view_number: TYPES::Time, - }, - /// The view timed out - ViewTimeout { - /// The view that timed out - view_number: TYPES::Time, - }, - /// New transactions were received from the network - /// or submitted to the network by us - Transactions { - /// The list of transactions - transactions: Vec, - }, - /// DA proposal was received from the network - /// or submitted to the network by us - DAProposal { - /// Contents of the proposal - proposal: Proposal>, - /// Public key of the leader submitting the proposal - sender: TYPES::SignatureKey, - }, - /// Quorum proposal was received from the network - /// or submitted to the network by us - QuorumProposal { - /// Contents of the proposal - proposal: Proposal>, - /// Public key of the leader submitting the proposal - sender: TYPES::SignatureKey, - }, - /// Upgrade proposal was received from the network - /// or submitted to the network by us - UpgradeProposal { - /// Contents of the proposal - proposal: Proposal>, - /// Public key of the leader submitting the proposal - sender: TYPES::SignatureKey, - }, -} diff --git a/types/src/lib.rs b/types/src/lib.rs deleted file mode 100644 index 66b352817d..0000000000 --- a/types/src/lib.rs +++ /dev/null @@ -1,185 +0,0 @@ -//! Types and Traits for the `HotShot` consensus module -use bincode::Options; -use displaydoc::Display; -use hotshot_utils::bincode::bincode_opts; -use light_client::StateVerKey; -use std::fmt::Debug; -use std::{future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; -use tracing::error; -use traits::{election::ElectionConfig, signature_key::SignatureKey}; -pub mod consensus; -pub mod data; -pub mod error; -pub mod event; -pub mod light_client; -pub mod message; -pub mod qc; -pub mod signature_key; -pub mod simple_certificate; -pub mod simple_vote; -pub mod stake_table; -pub mod traits; -pub mod utils; -pub mod vid; -pub mod vote; - -/// Pinned future that is Send and Sync -pub type BoxSyncFuture<'a, T> = Pin + Send + Sync + 'a>>; - -/// yoinked from futures crate -pub fn assert_future(future: F) -> F -where - F: Future, -{ - future -} -/// yoinked from futures crate, adds sync bound that we need -pub fn boxed_sync<'a, F>(fut: F) -> BoxSyncFuture<'a, F::Output> -where - F: Future + Sized + Send + Sync + 'a, -{ - assert_future::(Box::pin(fut)) -} -/// the type of consensus to run. Either: -/// wait for a signal to start a view, -/// or constantly run -/// you almost always want continuous -/// incremental is just for testing -#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] -pub enum ExecutionType { - /// constantly increment view as soon as view finishes - Continuous, - /// wait for a signal - Incremental, -} - -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Display)] -#[serde(bound(deserialize = ""))] -/// config for validator, including public key, private key, stake value -pub struct ValidatorConfig { - /// The validator's public key and stake value - pub public_key: KEY, - /// The validator's private key, should be in the mempool, not public - pub private_key: KEY::PrivateKey, - /// The validator's stake - pub stake_value: u64, - /// the validator's key pairs for state signing/verification - pub state_key_pair: light_client::StateKeyPair, -} - -impl ValidatorConfig { - /// generate validator config from input seed, index and stake value - #[must_use] - pub fn generated_from_seed_indexed(seed: [u8; 32], index: u64, stake_value: u64) -> Self { - let (public_key, private_key) = KEY::generated_from_seed_indexed(seed, index); - let state_key_pairs = light_client::StateKeyPair::generate_from_seed_indexed(seed, index); - Self { - public_key, - private_key, - stake_value, - state_key_pair: state_key_pairs, - } - } - - /// get the public config of the validator - pub fn get_public_config(&self) -> PeerConfig { - PeerConfig { - stake_table_entry: self.public_key.get_stake_table_entry(self.stake_value), - state_ver_key: self.state_key_pair.0.ver_key(), - } - } -} - -impl Default for ValidatorConfig { - fn default() -> Self { - Self::generated_from_seed_indexed([0u8; 32], 0, 1) - } -} - -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Display)] -#[serde(bound(deserialize = ""))] -/// structure of peers' config, including public key, stake value, and state key. -pub struct PeerConfig { - /// The peer's public key and stake value - pub stake_table_entry: KEY::StakeTableEntry, - /// the peer's state public key - pub state_ver_key: StateVerKey, -} - -impl PeerConfig { - /// Serialize a peer's config to bytes - pub fn to_bytes(config: &Self) -> Vec { - let x = bincode_opts().serialize(config); - match x { - Ok(x) => x, - Err(e) => { - error!(?e, "Failed to serialize public key"); - vec![] - } - } - } - - /// Deserialize a peer's config from bytes - /// # Errors - /// Will return `None` if deserialization fails - pub fn from_bytes(bytes: &[u8]) -> Option { - let x: Result, _> = bincode_opts().deserialize(bytes); - match x { - Ok(pub_key) => Some(pub_key), - Err(e) => { - error!(?e, "Failed to deserialize public key"); - None - } - } - } -} - -impl Default for PeerConfig { - fn default() -> Self { - let default_validator_config = ValidatorConfig::::default(); - default_validator_config.get_public_config() - } -} - -/// Holds configuration for a `HotShot` -#[derive(Clone, custom_debug::Debug, serde::Serialize, serde::Deserialize)] -#[serde(bound(deserialize = ""))] -pub struct HotShotConfig { - /// Whether to run one view or continuous views - pub execution_type: ExecutionType, - /// Total number of nodes in the network - // Earlier it was total_nodes - pub num_nodes_with_stake: NonZeroUsize, - /// Number of nodes without stake - pub num_nodes_without_stake: usize, - /// Minimum transactions per block - pub min_transactions: usize, - /// Maximum transactions per block - pub max_transactions: NonZeroUsize, - /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter - pub known_nodes_with_stake: Vec>, - /// List of known non-staking nodes' public keys - pub known_nodes_without_stake: Vec, - /// My own validator config, including my public key, private key, stake value, serving as private parameter - pub my_own_validator_config: ValidatorConfig, - /// List of DA committee (staking)nodes for static DA committe - pub da_staked_committee_size: usize, - /// List of DA committee nodes (non-staking)nodes for static DA committe - pub da_non_staked_committee_size: usize, - /// Base duration for next-view timeout, in milliseconds - pub next_view_timeout: u64, - /// The exponential backoff ration for the next-view timeout - pub timeout_ratio: (u64, u64), - /// The delay a leader inserts before starting pre-commit, in milliseconds - pub round_start_delay: u64, - /// Delay after init before starting consensus, in milliseconds - pub start_delay: u64, - /// Number of network bootstrap nodes - pub num_bootstrap: usize, - /// The minimum amount of time a leader has to wait to start a round - pub propose_min_round_time: Duration, - /// The maximum amount of time a leader can wait to start a round - pub propose_max_round_time: Duration, - /// the election configuration - pub election_config: Option, -} diff --git a/types/src/light_client.rs b/types/src/light_client.rs deleted file mode 100644 index ce98eedd3e..0000000000 --- a/types/src/light_client.rs +++ /dev/null @@ -1,223 +0,0 @@ -//! Types and structs associated with light client state - -use ark_ed_on_bn254::EdwardsConfig as Config; -use ark_ff::PrimeField; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ethereum_types::U256; -use jf_primitives::signatures::schnorr; -use rand::SeedableRng; -use rand_chacha::ChaCha20Rng; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use tagged_base64::tagged; - -/// Base field in the prover circuit -pub type CircuitField = ark_ed_on_bn254::Fq; -/// Concrete type for light client state -pub type LightClientState = GenericLightClientState; -/// Signature scheme -pub type StateSignatureScheme = - jf_primitives::signatures::schnorr::SchnorrSignatureScheme; -/// Signatures -pub type StateSignature = schnorr::Signature; -/// Verification key for verifying state signatures -pub type StateVerKey = schnorr::VerKey; -/// Signing key for signing a light client state -pub type StateSignKey = schnorr::SignKey; -/// Concrete for circuit's public input -pub type PublicInput = GenericPublicInput; -/// Key pairs for signing/verifying a light client state -#[derive(Debug, Default, Clone, serde::Serialize, serde::Deserialize)] -pub struct StateKeyPair(pub schnorr::KeyPair); - -/// Request body to send to the state relay server -#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize)] -pub struct StateSignatureRequestBody { - /// The public key associated with this request - pub key: StateVerKey, - /// The associated light client state - pub state: LightClientState, - /// The associated signature of the light client state - pub signature: StateSignature, -} - -/// The state signatures bundle is a light client state and its signatures collected -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct StateSignaturesBundle { - /// The state for this signatures bundle - pub state: LightClientState, - /// The collected signatures - pub signatures: HashMap, - /// Total stakes associated with the signer - pub accumulated_weight: U256, -} - -/// A light client state -#[tagged("LIGHT_CLIENT_STATE")] -#[derive( - Clone, - Debug, - CanonicalSerialize, - CanonicalDeserialize, - Default, - Eq, - PartialEq, - PartialOrd, - Ord, - Hash, -)] -pub struct GenericLightClientState { - /// Current view number - pub view_number: usize, - /// Current block height - pub block_height: usize, - /// Root of the block commitment tree - pub block_comm_root: F, - /// Commitment for fee ledger - pub fee_ledger_comm: F, - /// Commitment for the stake table - pub stake_table_comm: (F, F, F), -} - -impl From> for [F; 7] { - fn from(state: GenericLightClientState) -> Self { - [ - F::from(state.view_number as u64), - F::from(state.block_height as u64), - state.block_comm_root, - state.fee_ledger_comm, - state.stake_table_comm.0, - state.stake_table_comm.1, - state.stake_table_comm.2, - ] - } -} -impl From<&GenericLightClientState> for [F; 7] { - fn from(state: &GenericLightClientState) -> Self { - [ - F::from(state.view_number as u64), - F::from(state.block_height as u64), - state.block_comm_root, - state.fee_ledger_comm, - state.stake_table_comm.0, - state.stake_table_comm.1, - state.stake_table_comm.2, - ] - } -} - -impl std::ops::Deref for StateKeyPair { - type Target = schnorr::KeyPair; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl StateKeyPair { - /// Generate key pairs from private signing keys - #[must_use] - pub fn from_sign_key(sk: StateSignKey) -> Self { - Self(schnorr::KeyPair::::from(sk)) - } - - /// Generate key pairs from `thread_rng()` - #[must_use] - pub fn generate() -> StateKeyPair { - schnorr::KeyPair::generate(&mut rand::thread_rng()).into() - } - - /// Generate key pairs from seed - #[must_use] - pub fn generate_from_seed(seed: [u8; 32]) -> StateKeyPair { - schnorr::KeyPair::generate(&mut ChaCha20Rng::from_seed(seed)).into() - } - - /// Generate key pairs from an index and a seed - #[must_use] - pub fn generate_from_seed_indexed(seed: [u8; 32], index: u64) -> StateKeyPair { - let mut hasher = blake3::Hasher::new(); - hasher.update(&seed); - hasher.update(&index.to_le_bytes()); - let new_seed = *hasher.finalize().as_bytes(); - Self::generate_from_seed(new_seed) - } -} - -impl From> for StateKeyPair { - fn from(value: schnorr::KeyPair) -> Self { - StateKeyPair(value) - } -} - -/// Public input to the light client state prover service -#[derive(Clone, Debug)] -pub struct GenericPublicInput(Vec); - -impl AsRef<[F]> for GenericPublicInput { - fn as_ref(&self) -> &[F] { - &self.0 - } -} - -impl From> for GenericPublicInput { - fn from(v: Vec) -> Self { - Self(v) - } -} - -impl GenericPublicInput { - /// Return the threshold - #[must_use] - pub fn threshold(&self) -> F { - self.0[0] - } - - /// Return the view number of the light client state - #[must_use] - pub fn view_number(&self) -> F { - self.0[1] - } - - /// Return the block height of the light client state - #[must_use] - pub fn block_height(&self) -> F { - self.0[2] - } - - /// Return the block commitment root of the light client state - #[must_use] - pub fn block_comm_root(&self) -> F { - self.0[3] - } - - /// Return the fee ledger commitment of the light client state - #[must_use] - pub fn fee_ledger_comm(&self) -> F { - self.0[4] - } - - /// Return the stake table commitment of the light client state - #[must_use] - pub fn stake_table_comm(&self) -> (F, F, F) { - (self.0[5], self.0[6], self.0[7]) - } - - /// Return the qc key commitment of the light client state - #[must_use] - pub fn qc_key_comm(&self) -> F { - self.0[5] - } - - /// Return the state key commitment of the light client state - #[must_use] - pub fn state_key_comm(&self) -> F { - self.0[6] - } - - /// Return the stake amount commitment of the light client state - #[must_use] - pub fn stake_amount_comm(&self) -> F { - self.0[7] - } -} diff --git a/types/src/message.rs b/types/src/message.rs deleted file mode 100644 index 47ab483fbe..0000000000 --- a/types/src/message.rs +++ /dev/null @@ -1,321 +0,0 @@ -//! Network message types -//! -//! This module contains types used to represent the various types of messages that -//! `HotShot` nodes can send among themselves. - -use crate::data::{QuorumProposal, UpgradeProposal}; -use crate::simple_certificate::{ - DACertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, - ViewSyncPreCommitCertificate2, -}; -use crate::simple_vote::{ - DAVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, - ViewSyncPreCommitVote, -}; -use crate::traits::network::ResponseMessage; -use crate::traits::signature_key::SignatureKey; -use crate::vote::HasViewNumber; -use crate::{ - data::{DAProposal, VidDisperse}, - simple_vote::QuorumVote, - traits::{ - network::{DataRequest, NetworkMsg, ViewMessage}, - node_implementation::{ConsensusTime, NodeType}, - }, -}; - -use derivative::Derivative; -use either::Either::{self, Left, Right}; -use hotshot_constants::Version; -use serde::de::DeserializeOwned; -use serde::{Deserialize, Serialize}; -use std::{fmt::Debug, marker::PhantomData}; - -/// Incoming message -#[derive(Serialize, Deserialize, Clone, Debug, Derivative, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = "", serialize = ""))] -pub struct Message { - /// The version of the protocol in use for this message - pub version: Version, - - /// The sender of this message - pub sender: TYPES::SignatureKey, - - /// The message kind - pub kind: MessageKind, -} - -impl NetworkMsg for Message {} - -impl ViewMessage for Message { - /// get the view number out of a message - fn get_view_number(&self) -> TYPES::Time { - self.kind.get_view_number() - } - fn purpose(&self) -> MessagePurpose { - self.kind.purpose() - } -} - -/// A wrapper type for implementing `PassType` on a vector of `Message`. -#[derive(Clone, Debug)] -pub struct Messages(pub Vec>); - -/// A message type agnostic description of a message's purpose -#[derive(PartialEq, Copy, Clone)] -pub enum MessagePurpose { - /// Message with a [quorum/DA] proposal. - Proposal, - /// Message with most recent [quorum/DA] proposal the server has - LatestProposal, - /// Message with most recent view sync certificate the server has - LatestViewSyncCertificate, - /// Message with a quorum vote. - Vote, - /// Message with a view sync vote. - ViewSyncVote, - /// Message with a view sync certificate. - ViewSyncCertificate, - /// Message with a DAC. - DAC, - /// Message for internal use - Internal, - /// Data message - Data, - /// VID disperse, like [`Proposal`]. - VidDisperse, - /// Message with an upgrade proposal. - Upgrade, -} - -// TODO (da) make it more customized to the consensus layer, maybe separating the specific message -// data from the kind enum. -/// Enum representation of any message type -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -#[serde(bound(deserialize = "", serialize = ""))] -pub enum MessageKind { - /// Messages related to the consensus protocol - Consensus(SequencingMessage), - /// Messages relating to sharing data between nodes - Data(DataMessage), -} - -impl MessageKind { - // Can't implement `From` directly due to potential conflict with - // `From`. - /// Construct a [`MessageKind`] from [`SequencingMessage`]. - pub fn from_consensus_message(m: SequencingMessage) -> Self { - Self::Consensus(m) - } -} - -impl From> for MessageKind { - fn from(m: DataMessage) -> Self { - Self::Data(m) - } -} - -impl ViewMessage for MessageKind { - fn get_view_number(&self) -> TYPES::Time { - match &self { - MessageKind::Consensus(message) => message.view_number(), - MessageKind::Data(DataMessage::SubmitTransaction(_, v)) => *v, - MessageKind::Data(DataMessage::RequestData(msg)) => msg.view, - MessageKind::Data(DataMessage::DataResponse(msg)) => match msg { - ResponseMessage::Found(m) => m.view_number(), - ResponseMessage::NotFound => TYPES::Time::new(1), - }, - } - } - - fn purpose(&self) -> MessagePurpose { - match &self { - MessageKind::Consensus(message) => message.purpose(), - MessageKind::Data(_) => MessagePurpose::Data, - } - } -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = "", serialize = ""))] -/// Messages related to both validating and sequencing consensus. -pub enum GeneralConsensusMessage { - /// Message with a quorum proposal. - Proposal(Proposal>), - - /// Message with a quorum vote. - Vote(QuorumVote), - - /// Message with a view sync pre-commit vote - ViewSyncPreCommitVote(ViewSyncPreCommitVote), - - /// Message with a view sync commit vote - ViewSyncCommitVote(ViewSyncCommitVote), - - /// Message with a view sync finalize vote - ViewSyncFinalizeVote(ViewSyncFinalizeVote), - - /// Message with a view sync pre-commit certificate - ViewSyncPreCommitCertificate(ViewSyncPreCommitCertificate2), - - /// Message with a view sync commit certificate - ViewSyncCommitCertificate(ViewSyncCommitCertificate2), - - /// Message with a view sync finalize certificate - ViewSyncFinalizeCertificate(ViewSyncFinalizeCertificate2), - - /// Message with a Timeout vote - TimeoutVote(TimeoutVote), - - /// Message with an upgrade proposal - UpgradeProposal(Proposal>), - - /// Message with an upgrade vote - UpgradeVote(UpgradeVote), -} - -#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] -#[serde(bound(deserialize = "", serialize = ""))] -/// Messages related to the sequencing consensus protocol for the DA committee. -pub enum CommitteeConsensusMessage { - /// Proposal for data availability committee - DAProposal(Proposal>), - - /// vote for data availability committee - DAVote(DAVote), - - /// Certificate data is available - DACertificate(DACertificate), - - /// Initiate VID dispersal. - /// - /// Like [`DAProposal`]. Use `Msg` suffix to distinguish from [`VidDisperse`]. - /// TODO this variant should not be a [`CommitteeConsensusMessage`] because - VidDisperseMsg(Proposal>), -} - -/// Messages for sequencing consensus. -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = "", serialize = ""))] -pub struct SequencingMessage( - pub Either, CommitteeConsensusMessage>, -); - -impl SequencingMessage { - // TODO: Disable panic after the `ViewSync` case is implemented. - /// Get the view number this message relates to - #[allow(clippy::panic)] - fn view_number(&self) -> TYPES::Time { - match &self.0 { - Left(general_message) => { - match general_message { - GeneralConsensusMessage::Proposal(p) => { - // view of leader in the leaf when proposal - // this should match replica upon receipt - p.data.get_view_number() - } - GeneralConsensusMessage::Vote(vote_message) => vote_message.get_view_number(), - GeneralConsensusMessage::TimeoutVote(message) => message.get_view_number(), - GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { - message.get_view_number() - } - GeneralConsensusMessage::ViewSyncCommitVote(message) => { - message.get_view_number() - } - GeneralConsensusMessage::ViewSyncFinalizeVote(message) => { - message.get_view_number() - } - GeneralConsensusMessage::ViewSyncPreCommitCertificate(message) => { - message.get_view_number() - } - GeneralConsensusMessage::ViewSyncCommitCertificate(message) => { - message.get_view_number() - } - GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { - message.get_view_number() - } - GeneralConsensusMessage::UpgradeProposal(message) => { - message.data.get_view_number() - } - GeneralConsensusMessage::UpgradeVote(message) => message.get_view_number(), - } - } - Right(committee_message) => { - match committee_message { - CommitteeConsensusMessage::DAProposal(p) => { - // view of leader in the leaf when proposal - // this should match replica upon receipt - p.data.get_view_number() - } - CommitteeConsensusMessage::DAVote(vote_message) => { - vote_message.get_view_number() - } - CommitteeConsensusMessage::DACertificate(cert) => cert.view_number, - CommitteeConsensusMessage::VidDisperseMsg(disperse) => { - disperse.data.get_view_number() - } - } - } - } - } - - // TODO: Disable panic after the `ViewSync` case is implemented. - /// Get the message purpos - #[allow(clippy::panic)] - fn purpose(&self) -> MessagePurpose { - match &self.0 { - Left(general_message) => match general_message { - GeneralConsensusMessage::Proposal(_) => MessagePurpose::Proposal, - GeneralConsensusMessage::Vote(_) | GeneralConsensusMessage::TimeoutVote(_) => { - MessagePurpose::Vote - } - GeneralConsensusMessage::ViewSyncPreCommitVote(_) - | GeneralConsensusMessage::ViewSyncCommitVote(_) - | GeneralConsensusMessage::ViewSyncFinalizeVote(_) => MessagePurpose::ViewSyncVote, - - GeneralConsensusMessage::ViewSyncPreCommitCertificate(_) - | GeneralConsensusMessage::ViewSyncCommitCertificate(_) - | GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => { - MessagePurpose::ViewSyncCertificate - } - - GeneralConsensusMessage::UpgradeProposal(_) - | GeneralConsensusMessage::UpgradeVote(_) => MessagePurpose::Upgrade, - }, - Right(committee_message) => match committee_message { - CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, - CommitteeConsensusMessage::DAVote(_) => MessagePurpose::Vote, - CommitteeConsensusMessage::DACertificate(_) => MessagePurpose::DAC, - CommitteeConsensusMessage::VidDisperseMsg(_) => MessagePurpose::VidDisperse, - }, - } - } -} - -#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -/// Messages related to sending data between nodes -pub enum DataMessage { - /// Contains a transaction to be submitted - /// TODO rethink this when we start to send these messages - /// we only need the view number for broadcast - SubmitTransaction(TYPES::Transaction, TYPES::Time), - /// A request for data - RequestData(DataRequest), - /// A response to a data request - DataResponse(ResponseMessage), -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -/// Prepare qc from the leader -pub struct Proposal + DeserializeOwned> { - // NOTE: optimization could include view number to help look up parent leaf - // could even do 16 bit numbers if we want - /// The data being proposed. - pub data: PROPOSAL, - /// The proposal must be signed by the view leader - pub signature: ::PureAssembledSignatureType, - /// Phantom for TYPES - pub _pd: PhantomData, -} diff --git a/types/src/qc.rs b/types/src/qc.rs deleted file mode 100644 index 3d91fcac75..0000000000 --- a/types/src/qc.rs +++ /dev/null @@ -1,304 +0,0 @@ -//! Implementation for `BitVectorQC` that uses BLS signature + Bit vector. -//! See more details in hotshot paper. - -use crate::{ - stake_table::StakeTableEntry, - traits::{qc::QuorumCertificateScheme, signature_key::SignatureKey}, -}; -use ark_std::{ - fmt::Debug, - format, - marker::PhantomData, - rand::{CryptoRng, RngCore}, - vec, - vec::Vec, -}; -use bitvec::prelude::*; -use ethereum_types::U256; -use generic_array::GenericArray; -use jf_primitives::{ - errors::{PrimitivesError, PrimitivesError::ParameterError}, - signatures::AggregateableSignatureSchemes, -}; -use serde::{Deserialize, Serialize}; -use typenum::U32; - -/// An implementation of QC using BLS signature and a bit-vector. -#[derive(Serialize, Deserialize)] -pub struct BitVectorQC Deserialize<'a>>( - PhantomData, -); - -/// Public parameters of [`BitVectorQC`] -#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash)] -#[serde(bound(deserialize = ""))] -pub struct QCParams Deserialize<'a>> { - /// the stake table (snapshot) this QC is verified against - pub stake_entries: Vec>, - /// threshold for the accumulated "weight" of votes to form a QC - pub threshold: U256, - /// public parameter for the aggregated signature scheme - pub agg_sig_pp: P, -} - -impl QuorumCertificateScheme for BitVectorQC -where - A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a>, - A::VerificationKey: SignatureKey, -{ - type QCProverParams = QCParams; - - // TODO: later with SNARKs we'll use a smaller verifier parameter - type QCVerifierParams = QCParams; - - type QC = (A::Signature, BitVec); - type MessageLength = U32; - type QuorumSize = U256; - - /// Sign a message with the signing key - fn sign>( - pp: &A::PublicParameter, - sk: &A::SigningKey, - msg: M, - prng: &mut R, - ) -> Result { - A::sign(pp, sk, msg, prng) - } - - fn assemble( - qc_pp: &Self::QCProverParams, - signers: &BitSlice, - sigs: &[A::Signature], - ) -> Result { - if signers.len() != qc_pp.stake_entries.len() { - return Err(ParameterError(format!( - "bit vector len {} != the number of stake entries {}", - signers.len(), - qc_pp.stake_entries.len(), - ))); - } - let total_weight: U256 = - qc_pp - .stake_entries - .iter() - .zip(signers.iter()) - .fold(U256::zero(), |acc, (entry, b)| { - if *b { - acc + entry.stake_amount - } else { - acc - } - }); - if total_weight < qc_pp.threshold { - return Err(ParameterError(format!( - "total_weight {} less than threshold {}", - total_weight, qc_pp.threshold, - ))); - } - let mut ver_keys = vec![]; - for (entry, b) in qc_pp.stake_entries.iter().zip(signers.iter()) { - if *b { - ver_keys.push(entry.stake_key.clone()); - } - } - if ver_keys.len() != sigs.len() { - return Err(ParameterError(format!( - "the number of ver_keys {} != the number of partial signatures {}", - ver_keys.len(), - sigs.len(), - ))); - } - let sig = A::aggregate(&qc_pp.agg_sig_pp, &ver_keys[..], sigs)?; - - Ok((sig, signers.into())) - } - - fn check( - qc_vp: &Self::QCVerifierParams, - message: &GenericArray, - qc: &Self::QC, - ) -> Result { - let (sig, signers) = qc; - if signers.len() != qc_vp.stake_entries.len() { - return Err(ParameterError(format!( - "signers bit vector len {} != the number of stake entries {}", - signers.len(), - qc_vp.stake_entries.len(), - ))); - } - let total_weight: U256 = - qc_vp - .stake_entries - .iter() - .zip(signers.iter()) - .fold(U256::zero(), |acc, (entry, b)| { - if *b { - acc + entry.stake_amount - } else { - acc - } - }); - if total_weight < qc_vp.threshold { - return Err(ParameterError(format!( - "total_weight {} less than threshold {}", - total_weight, qc_vp.threshold, - ))); - } - let mut ver_keys = vec![]; - for (entry, b) in qc_vp.stake_entries.iter().zip(signers.iter()) { - if *b { - ver_keys.push(entry.stake_key.clone()); - } - } - A::multi_sig_verify(&qc_vp.agg_sig_pp, &ver_keys[..], message, sig)?; - - Ok(total_weight) - } - - fn trace( - qc_vp: &Self::QCVerifierParams, - message: &GenericArray<::MessageUnit, Self::MessageLength>, - qc: &Self::QC, - ) -> Result::VerificationKey>, PrimitivesError> { - let (_sig, signers) = qc; - if signers.len() != qc_vp.stake_entries.len() { - return Err(ParameterError(format!( - "signers bit vector len {} != the number of stake entries {}", - signers.len(), - qc_vp.stake_entries.len(), - ))); - } - - Self::check(qc_vp, message, qc)?; - - let signer_pks: Vec<_> = qc_vp - .stake_entries - .iter() - .zip(signers.iter()) - .filter(|(_, b)| **b) - .map(|(pk, _)| pk.stake_key.clone()) - .collect(); - Ok(signer_pks) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use jf_primitives::signatures::{ - bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, - SignatureScheme, - }; - - macro_rules! test_quorum_certificate { - ($aggsig:tt) => { - let mut rng = jf_utils::test_rng(); - let agg_sig_pp = $aggsig::param_gen(Some(&mut rng)).unwrap(); - let key_pair1 = KeyPair::generate(&mut rng); - let key_pair2 = KeyPair::generate(&mut rng); - let key_pair3 = KeyPair::generate(&mut rng); - let entry1 = StakeTableEntry { - stake_key: key_pair1.ver_key(), - stake_amount: U256::from(3u8), - }; - let entry2 = StakeTableEntry { - stake_key: key_pair2.ver_key(), - stake_amount: U256::from(5u8), - }; - let entry3 = StakeTableEntry { - stake_key: key_pair3.ver_key(), - stake_amount: U256::from(7u8), - }; - let qc_pp = QCParams { - stake_entries: vec![entry1, entry2, entry3], - threshold: U256::from(10u8), - agg_sig_pp, - }; - let msg = [72u8; 32]; - let sig1 = - BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair1.sign_key_ref(), &msg, &mut rng) - .unwrap(); - let sig2 = - BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair2.sign_key_ref(), &msg, &mut rng) - .unwrap(); - let sig3 = - BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair3.sign_key_ref(), &msg, &mut rng) - .unwrap(); - - // happy path - let signers = bitvec![0, 1, 1]; - let qc = BitVectorQC::<$aggsig>::assemble( - &qc_pp, - signers.as_bitslice(), - &[sig2.clone(), sig3.clone()], - ) - .unwrap(); - assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &qc).is_ok()); - assert_eq!( - BitVectorQC::<$aggsig>::trace(&qc_pp, &msg.into(), &qc).unwrap(), - vec![key_pair2.ver_key(), key_pair3.ver_key()], - ); - - // Check the QC and the QCParams can be serialized / deserialized - assert_eq!( - qc, - bincode::deserialize(&bincode::serialize(&qc).unwrap()).unwrap() - ); - - assert_eq!( - qc_pp, - bincode::deserialize(&bincode::serialize(&qc_pp).unwrap()).unwrap() - ); - - // bad paths - // number of signatures unmatch - assert!(BitVectorQC::<$aggsig>::assemble( - &qc_pp, - signers.as_bitslice(), - &[sig2.clone()] - ) - .is_err()); - // total weight under threshold - let active_bad = bitvec![1, 1, 0]; - assert!(BitVectorQC::<$aggsig>::assemble( - &qc_pp, - active_bad.as_bitslice(), - &[sig1.clone(), sig2.clone()] - ) - .is_err()); - // wrong bool vector length - let active_bad_2 = bitvec![0, 1, 1, 0]; - assert!(BitVectorQC::<$aggsig>::assemble( - &qc_pp, - active_bad_2.as_bitslice(), - &[sig2, sig3], - ) - .is_err()); - - assert!(BitVectorQC::<$aggsig>::check( - &qc_pp, - &msg.into(), - &(qc.0.clone(), active_bad) - ) - .is_err()); - assert!(BitVectorQC::<$aggsig>::check( - &qc_pp, - &msg.into(), - &(qc.0.clone(), active_bad_2) - ) - .is_err()); - let bad_msg = [70u8; 32]; - assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); - - let bad_sig = &sig1; - assert!( - BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &(bad_sig.clone(), qc.1)) - .is_err() - ); - }; - } - #[test] - fn test_quorum_certificate() { - test_quorum_certificate!(BLSOverBN254CurveSignatureScheme); - } -} diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs deleted file mode 100644 index 55f0a9c8b5..0000000000 --- a/types/src/signature_key.rs +++ /dev/null @@ -1,127 +0,0 @@ -//! Types and structs for the hotshot signature keys - -use crate::{ - qc::{BitVectorQC, QCParams}, - stake_table::StakeTableEntry, - traits::{qc::QuorumCertificateScheme, signature_key::SignatureKey}, -}; -use bitvec::{slice::BitSlice, vec::BitVec}; -use ethereum_types::U256; -use generic_array::GenericArray; -use jf_primitives::{ - errors::PrimitivesError, - signatures::{ - bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair, SignKey, VerKey}, - SignatureScheme, - }, -}; -use rand::SeedableRng; -use rand_chacha::ChaCha20Rng; -use tracing::instrument; - -/// BLS private key used to sign a message -pub type BLSPrivKey = SignKey; -/// BLS public key used to verify a signature -pub type BLSPubKey = VerKey; -/// Public parameters for BLS signature scheme -pub type BLSPublicParam = (); - -impl SignatureKey for BLSPubKey { - type PrivateKey = BLSPrivKey; - type StakeTableEntry = StakeTableEntry; - type QCParams = - QCParams::PublicParameter>; - type PureAssembledSignatureType = - ::Signature; - type QCType = (Self::PureAssembledSignatureType, BitVec); - type SignError = PrimitivesError; - - #[instrument(skip(self))] - fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool { - // This is the validation for QC partial signature before append(). - BLSOverBN254CurveSignatureScheme::verify(&(), self, data, signature).is_ok() - } - - fn sign( - sk: &Self::PrivateKey, - data: &[u8], - ) -> Result { - BitVectorQC::::sign( - &(), - sk, - data, - &mut rand::thread_rng(), - ) - } - - fn from_private(private_key: &Self::PrivateKey) -> Self { - BLSPubKey::from(private_key) - } - - fn to_bytes(&self) -> Vec { - let mut buf = vec![]; - ark_serialize::CanonicalSerialize::serialize_compressed(self, &mut buf) - .expect("Serialization should not fail."); - buf - } - - fn from_bytes(bytes: &[u8]) -> Result { - Ok(ark_serialize::CanonicalDeserialize::deserialize_compressed( - bytes, - )?) - } - - fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey) { - let mut hasher = blake3::Hasher::new(); - hasher.update(&seed); - hasher.update(&index.to_le_bytes()); - let new_seed = *hasher.finalize().as_bytes(); - let kp = KeyPair::generate(&mut ChaCha20Rng::from_seed(new_seed)); - (kp.ver_key(), kp.sign_key_ref().clone()) - } - - fn get_stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry { - StakeTableEntry { - stake_key: *self, - stake_amount: U256::from(stake), - } - } - - fn get_public_key(entry: &Self::StakeTableEntry) -> Self { - entry.stake_key - } - - fn get_public_parameter( - stake_entries: Vec, - threshold: U256, - ) -> Self::QCParams { - QCParams { - stake_entries, - threshold, - agg_sig_pp: (), - } - } - - fn check(real_qc_pp: &Self::QCParams, data: &[u8], qc: &Self::QCType) -> bool { - let msg = GenericArray::from_slice(data); - BitVectorQC::::check(real_qc_pp, msg, qc).is_ok() - } - - fn get_sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec) { - signature.clone() - } - - fn assemble( - real_qc_pp: &Self::QCParams, - signers: &BitSlice, - sigs: &[Self::PureAssembledSignatureType], - ) -> Self::QCType { - BitVectorQC::::assemble(real_qc_pp, signers, sigs) - .expect("this assembling shouldn't fail") - } - - fn genesis_proposer_pk() -> Self { - let kp = KeyPair::generate(&mut ChaCha20Rng::from_seed([0u8; 32])); - kp.ver_key() - } -} diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs deleted file mode 100644 index 1592583e90..0000000000 --- a/types/src/simple_certificate.rs +++ /dev/null @@ -1,179 +0,0 @@ -//! Implementations of the simple certificate type. Used for Quorum, DA, and Timeout Certificates - -use std::{ - fmt::{self, Debug, Display, Formatter}, - hash::Hash, - marker::PhantomData, -}; - -use commit::{Commitment, CommitmentBoundsArkless, Committable}; -use ethereum_types::U256; - -use crate::{ - data::Leaf, - simple_vote::{ - DAData, QuorumData, TimeoutData, UpgradeProposalData, ViewSyncCommitData, - ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, - }, - traits::{ - election::Membership, node_implementation::ConsensusTime, node_implementation::NodeType, - signature_key::SignatureKey, - }, - vote::{Certificate, HasViewNumber}, -}; - -use serde::{Deserialize, Serialize}; - -/// Trait which allows use to inject different threshold calculations into a Certificate type -pub trait Threshold { - /// Calculate a threshold based on the membership - fn threshold>(membership: &MEMBERSHIP) -> u64; -} - -/// Defines a threshold which is 2f + 1 (Amount needed for Quorum) -#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] -pub struct SuccessThreshold {} - -impl Threshold for SuccessThreshold { - fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.success_threshold().into() - } -} - -/// Defines a threshold which is f + 1 (i.e at least one of the stake is honest) -#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] -pub struct OneHonestThreshold {} - -impl Threshold for OneHonestThreshold { - fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.failure_threshold().into() - } -} - -/// Defines a threshold which is 0.9n + 1 (i.e. over 90% of the nodes with stake) -#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] -pub struct UpgradeThreshold {} - -impl Threshold for UpgradeThreshold { - fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.upgrade_threshold().into() - } -} - -/// A certificate which can be created by aggregating many simple votes on the commitment. -#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] -pub struct SimpleCertificate> { - /// The data this certificate is for. I.e the thing that was voted on to create this Certificate - pub data: VOTEABLE, - /// commitment of all the votes this cert should be signed over - pub vote_commitment: Commitment, - /// Which view this QC relates to - pub view_number: TYPES::Time, - /// assembled signature for certificate aggregation - pub signatures: Option<::QCType>, - /// If this QC is for the genesis block - pub is_genesis: bool, - /// phantom data for `THRESHOLD` and `TYPES` - pub _pd: PhantomData<(TYPES, THRESHOLD)>, -} - -impl> Certificate - for SimpleCertificate -{ - type Voteable = VOTEABLE; - type Threshold = THRESHOLD; - - fn create_signed_certificate( - vote_commitment: Commitment, - data: Self::Voteable, - sig: ::QCType, - view: TYPES::Time, - ) -> Self { - SimpleCertificate { - data, - vote_commitment, - view_number: view, - signatures: Some(sig), - is_genesis: false, - _pd: PhantomData, - } - } - fn is_valid_cert>(&self, membership: &MEMBERSHIP) -> bool { - if self.is_genesis && self.view_number == TYPES::Time::genesis() { - return true; - } - let real_qc_pp = ::get_public_parameter( - membership.get_committee_qc_stake_table(), - U256::from(Self::threshold(membership)), - ); - ::check( - &real_qc_pp, - self.vote_commitment.as_ref(), - self.signatures.as_ref().unwrap(), - ) - } - fn threshold>(membership: &MEMBERSHIP) -> u64 { - THRESHOLD::threshold(membership) - } - fn get_data(&self) -> &Self::Voteable { - &self.data - } - fn get_data_commitment(&self) -> Commitment { - self.vote_commitment - } -} - -impl> - HasViewNumber for SimpleCertificate -{ - fn get_view_number(&self) -> TYPES::Time { - self.view_number - } -} -impl Display for QuorumCertificate { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!( - f, - "view: {:?}, is_genesis: {:?}", - self.view_number, self.is_genesis - ) - } -} - -impl QuorumCertificate { - #[must_use] - /// Creat the Genisis certificate - pub fn genesis() -> Self { - let data = QuorumData { - leaf_commit: Commitment::>::default_commitment_no_preimage(), - }; - let commit = data.commit(); - Self { - data, - vote_commitment: commit, - view_number: ::genesis(), - signatures: None, - is_genesis: true, - _pd: PhantomData, - } - } -} - -/// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` -pub type QuorumCertificate = SimpleCertificate, SuccessThreshold>; -/// Type alias for a DA certificate over `DAData` -pub type DACertificate = SimpleCertificate; -/// Type alias for a Timeout certificate over a view number -pub type TimeoutCertificate = SimpleCertificate, SuccessThreshold>; -/// Type alias for a `ViewSyncPreCommit` certificate over a view number -pub type ViewSyncPreCommitCertificate2 = - SimpleCertificate, OneHonestThreshold>; -/// Type alias for a `ViewSyncCommit` certificate over a view number -pub type ViewSyncCommitCertificate2 = - SimpleCertificate, SuccessThreshold>; -/// Type alias for a `ViewSyncFinalize` certificate over a view number -pub type ViewSyncFinalizeCertificate2 = - SimpleCertificate, SuccessThreshold>; -/// Type alias for a `UpgradeCertificate`, which is a `SimpleCertificate` of `UpgradeProposalData` -pub type UpgradeCertificate = - SimpleCertificate, UpgradeThreshold>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs deleted file mode 100644 index 965474c08b..0000000000 --- a/types/src/simple_vote.rs +++ /dev/null @@ -1,259 +0,0 @@ -//! Implementations of the simple vote types. - -use std::{fmt::Debug, hash::Hash}; - -use commit::{Commitment, Committable}; -use serde::de::DeserializeOwned; -use serde::{Deserialize, Serialize}; - -use hotshot_constants::Version; - -use crate::{ - data::Leaf, - traits::{node_implementation::NodeType, signature_key::SignatureKey}, - vid::VidCommitment, - vote::{HasViewNumber, Vote}, -}; - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -/// Data used for a yes vote. -#[serde(bound(deserialize = ""))] -pub struct QuorumData { - /// Commitment to the leaf - pub leaf_commit: Commitment>, -} -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -/// Data used for a DA vote. -pub struct DAData { - /// Commitment to a block payload - pub payload_commit: VidCommitment, -} -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -/// Data used for a timeout vote. -pub struct TimeoutData { - /// View the timeout is for - pub view: TYPES::Time, -} -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -/// Data used for a VID vote. -pub struct VIDData { - /// Commitment to the block payload the VID vote is on. - pub payload_commit: VidCommitment, -} -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -/// Data used for a Pre Commit vote. -pub struct ViewSyncPreCommitData { - /// The relay this vote is intended for - pub relay: u64, - /// The view number we are trying to sync on - pub round: TYPES::Time, -} -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -/// Data used for a Commit vote. -pub struct ViewSyncCommitData { - /// The relay this vote is intended for - pub relay: u64, - /// The view number we are trying to sync on - pub round: TYPES::Time, -} -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -/// Data used for a Finalize vote. -pub struct ViewSyncFinalizeData { - /// The relay this vote is intended for - pub relay: u64, - /// The view number we are trying to sync on - pub round: TYPES::Time, -} -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -/// Data used for a Upgrade vote. -pub struct UpgradeProposalData { - /// The old version that we are upgrading from. - pub old_version: Version, - /// The new version that we are upgrading to. - pub new_version: Version, - /// A unique identifier for the specific protocol being voted on. - pub new_version_hash: Vec, - /// The last block for which the old version will be in effect. - pub old_version_last_block: TYPES::Time, - /// The first block for which the new version will be in effect. - pub new_version_first_block: TYPES::Time, -} - -/// Marker trait for data or commitments that can be voted on. -/// Only structs in this file can implement voteable. This is enforced with the `Sealed` trait -/// Sealing this trait prevents creating new vote types outside this file. -pub trait Voteable: - sealed::Sealed + Committable + Clone + Serialize + Debug + PartialEq + Hash + Eq -{ -} - -/// Sealed is used to make sure no other files can implement the Voteable trait. -/// All simple voteable types should be implemented here. This prevents us from -/// creating/using improper types when using the vote types. -mod sealed { - use commit::Committable; - - /// Only structs in this file can impl `Sealed` - pub trait Sealed {} - - // TODO: Does the implement for things outside this file that are commitable? - impl Sealed for C {} -} - -/// A simple yes vote over some votable type. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -pub struct SimpleVote { - /// The signature share associated with this vote - pub signature: ( - TYPES::SignatureKey, - ::PureAssembledSignatureType, - ), - /// The leaf commitment being voted on. - pub data: DATA, - /// The view this vote was cast for - pub view_number: TYPES::Time, -} - -impl HasViewNumber for SimpleVote { - fn get_view_number(&self) -> ::Time { - self.view_number - } -} - -impl Vote for SimpleVote { - type Commitment = DATA; - - fn get_signing_key(&self) -> ::SignatureKey { - self.signature.0.clone() - } - - fn get_signature(&self) -> ::PureAssembledSignatureType { - self.signature.1.clone() - } - - fn get_data(&self) -> &DATA { - &self.data - } - - fn get_data_commitment(&self) -> Commitment { - self.data.commit() - } -} - -impl SimpleVote { - /// Creates and signs a simple vote - /// # Errors - /// If we are unable to sign the data - pub fn create_signed_vote( - data: DATA, - view: TYPES::Time, - pub_key: &TYPES::SignatureKey, - private_key: &::PrivateKey, - ) -> Result::SignError> { - match TYPES::SignatureKey::sign(private_key, data.commit().as_ref()) { - Ok(signature) => Ok(Self { - signature: (pub_key.clone(), signature), - data, - view_number: view, - }), - Err(e) => Err(e), - } - } -} - -impl Committable for QuorumData { - fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("Yes Vote") - .var_size_bytes(self.leaf_commit.as_ref()) - .finalize() - } -} - -impl Committable for TimeoutData { - fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("Timeout Vote") - .u64(*self.view) - .finalize() - } -} - -impl Committable for DAData { - fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("DA Vote") - .var_size_bytes(self.payload_commit.as_ref()) - .finalize() - } -} - -impl Committable for VIDData { - fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("VID Vote") - .var_size_bytes(self.payload_commit.as_ref()) - .finalize() - } -} - -impl Committable for UpgradeProposalData { - fn commit(&self) -> Commitment { - let builder = commit::RawCommitmentBuilder::new("Upgrade Vote"); - builder - .u64(*self.new_version_first_block) - .u64(*self.old_version_last_block) - .var_size_bytes(self.new_version_hash.as_slice()) - .u16(self.new_version.minor) - .u16(self.new_version.major) - .u16(self.old_version.minor) - .u16(self.old_version.major) - .finalize() - } -} - -/// This implements commit for all the types which contain a view and relay public key. -fn view_and_relay_commit( - view: TYPES::Time, - relay: u64, - tag: &str, -) -> Commitment { - let builder = commit::RawCommitmentBuilder::new(tag); - builder.u64(*view).u64(relay).finalize() -} - -impl Committable for ViewSyncPreCommitData { - fn commit(&self) -> Commitment { - view_and_relay_commit::(self.round, self.relay, "View Sync Precommit") - } -} - -impl Committable for ViewSyncFinalizeData { - fn commit(&self) -> Commitment { - view_and_relay_commit::(self.round, self.relay, "View Sync Finalize") - } -} -impl Committable for ViewSyncCommitData { - fn commit(&self) -> Commitment { - view_and_relay_commit::(self.round, self.relay, "View Sync Commit") - } -} - -// impl votable for all the data types in this file sealed marker should ensure nothing is accidently -// implemented for structs that aren't "voteable" -impl Voteable - for V -{ -} - -// Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file -/// Quorum vote Alias -pub type QuorumVote = SimpleVote>; -/// DA vote type alias -pub type DAVote = SimpleVote; -/// Timeout Vote type alias -pub type TimeoutVote = SimpleVote>; -/// View Sync Commit Vote type alias -pub type ViewSyncCommitVote = SimpleVote>; -/// View Sync Pre Commit Vote type alias -pub type ViewSyncPreCommitVote = SimpleVote>; -/// View Sync Finalize Vote type alias -pub type ViewSyncFinalizeVote = SimpleVote>; -/// Upgrade proposal vote -pub type UpgradeVote = SimpleVote>; diff --git a/types/src/stake_table.rs b/types/src/stake_table.rs deleted file mode 100644 index 7c6525e0eb..0000000000 --- a/types/src/stake_table.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Types and structs related to the stake table - -use crate::traits::signature_key::{SignatureKey, StakeTableEntryType}; -use ethereum_types::U256; -use serde::{Deserialize, Serialize}; - -/// Stake table entry -#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash, Eq)] -#[serde(bound(deserialize = ""))] -pub struct StakeTableEntry { - /// The public key - pub stake_key: K, - /// The associated stake amount - pub stake_amount: U256, -} - -impl StakeTableEntryType for StakeTableEntry { - /// Get the stake amount - fn get_stake(&self) -> U256 { - self.stake_amount - } -} - -impl StakeTableEntry { - /// Get the public key - pub fn get_key(&self) -> &K { - &self.stake_key - } -} - -// TODO(Chengyu): add stake table snapshot here diff --git a/types/src/traits.rs b/types/src/traits.rs deleted file mode 100644 index a698d2c158..0000000000 --- a/types/src/traits.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! Common traits for the `HotShot` protocol -pub mod block_contents; -pub mod consensus_api; -pub mod election; -pub mod metrics; -pub mod network; -pub mod node_implementation; -pub mod qc; -pub mod signature_key; -pub mod stake_table; -pub mod states; -pub mod storage; - -pub use block_contents::BlockPayload; -pub use states::ValidatedState; diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs deleted file mode 100644 index aed315dfc7..0000000000 --- a/types/src/traits/consensus_api.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! Contains the [`ConsensusApi`] trait. - -use crate::{ - data::Leaf, - event::Event, - traits::{ - node_implementation::{NodeImplementation, NodeType}, - signature_key::SignatureKey, - storage::StorageError, - }, -}; -use async_trait::async_trait; - -use std::{num::NonZeroUsize, time::Duration}; - -/// The API that tasks use to talk to the system -/// TODO we plan to drop this -#[async_trait] -pub trait ConsensusApi>: Send + Sync { - /// Total number of nodes in the network. Also known as `n`. - fn total_nodes(&self) -> NonZeroUsize; - - /// The minimum amount of time a leader has to wait before sending a propose - fn propose_min_round_time(&self) -> Duration; - - /// The maximum amount of time a leader can wait before sending a propose. - /// If this time is reached, the leader has to send a propose without transactions. - fn propose_max_round_time(&self) -> Duration; - - /// Retuns the maximum transactions allowed in a block - fn max_transactions(&self) -> NonZeroUsize; - - /// Returns the minimum transactions that must be in a block - fn min_transactions(&self) -> usize; - - /// Get a reference to the public key. - fn public_key(&self) -> &TYPES::SignatureKey; - - /// Get a reference to the private key. - fn private_key(&self) -> &::PrivateKey; - - /// Notify the system of an event within `hotshot-consensus`. - async fn send_event(&self, event: Event); - - /// Store a leaf in the storage - async fn store_leaf( - &self, - old_anchor_view: TYPES::Time, - leaf: Leaf, - ) -> Result<(), StorageError>; -} diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs deleted file mode 100644 index 0f3eec9b53..0000000000 --- a/types/src/traits/election.rs +++ /dev/null @@ -1,91 +0,0 @@ -//! The election trait, used to decide which node is the leader and determine if a vote is valid. - -// Needed to avoid the non-binding `let` warning. -#![allow(clippy::let_underscore_untyped)] - -use super::node_implementation::NodeType; - -use crate::{traits::signature_key::SignatureKey, PeerConfig}; - -use snafu::Snafu; -use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; - -/// Error for election problems -#[derive(Snafu, Debug)] -pub enum ElectionError { - /// stub error to be filled in - StubError, - /// Math error doing something - /// NOTE: it would be better to make Election polymorphic over - /// the election error and then have specific math errors - MathError, -} - -/// election config -pub trait ElectionConfig: - Default - + Clone - + serde::Serialize - + for<'de> serde::Deserialize<'de> - + Sync - + Send - + core::fmt::Debug -{ -} - -/// A protocol for determining membership in and participating in a committee. -pub trait Membership: - Clone + Debug + Eq + PartialEq + Send + Sync + Hash + 'static -{ - /// generate a default election configuration - fn default_election_config( - num_nodes_with_stake: u64, - num_nodes_without_stake: u64, - ) -> TYPES::ElectionConfigType; - - /// create an election - /// TODO may want to move this to a testableelection trait - fn create_election( - entries: Vec>, - config: TYPES::ElectionConfigType, - ) -> Self; - - /// Clone the public key and corresponding stake table for current elected committee - fn get_committee_qc_stake_table( - &self, - ) -> Vec<::StakeTableEntry>; - - /// The leader of the committee for view `view_number`. - fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey; - - /// The staked members of the committee for view `view_number`. - fn get_staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; - - /// The non-staked members of the committee for view `view_number`. - fn get_non_staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; - - /// Get whole (staked + non-staked) committee for view `view_number`. - fn get_whole_committee(&self, view_number: TYPES::Time) -> BTreeSet; - - /// Check if a key has stake - fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; - - /// Get the stake table entry for a public key, returns `None` if the - /// key is not in the table - fn get_stake( - &self, - pub_key: &TYPES::SignatureKey, - ) -> Option<::StakeTableEntry>; - - /// Returns the number of total nodes in the committee - fn total_nodes(&self) -> usize; - - /// Returns the threshold for a specific `Membership` implementation - fn success_threshold(&self) -> NonZeroU64; - - /// Returns the threshold for a specific `Membership` implementation - fn failure_threshold(&self) -> NonZeroU64; - - /// Returns the threshold required to upgrade the network protocol - fn upgrade_threshold(&self) -> NonZeroU64; -} diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs deleted file mode 100644 index fc69b5c077..0000000000 --- a/types/src/traits/metrics.rs +++ /dev/null @@ -1,295 +0,0 @@ -//! The [`Metrics`] trait is used to collect information from multiple components in the entire system. -//! -//! This trait can be used to spawn the following traits: -//! - [`Counter`]: an ever-increasing value (example usage: total bytes send/received) -//! - [`Gauge`]: a value that store the latest value, and can go up and down (example usage: amount of users logged in) -//! - [`Histogram`]: stores multiple float values based for a graph (example usage: CPU %) -//! - [`Label`]: Stores the last string (example usage: current version, network online/offline) - -use dyn_clone::DynClone; -use std::fmt::Debug; - -/// The metrics type. -pub trait Metrics: Send + Sync + DynClone + Debug { - /// Create a [`Counter`] with an optional `unit_label`. - /// - /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" - fn create_counter(&self, label: String, unit_label: Option) -> Box; - /// Create a [`Gauge`] with an optional `unit_label`. - /// - /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" - fn create_gauge(&self, label: String, unit_label: Option) -> Box; - /// Create a [`Histogram`] with an optional `unit_label`. - /// - /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" - fn create_histogram(&self, label: String, unit_label: Option) -> Box; - /// Create a [`Label`]. - fn create_label(&self, label: String) -> Box; - - /// Create a subgroup with a specified prefix. - fn subgroup(&self, subgroup_name: String) -> Box; -} - -/// Use this if you're not planning to use any metrics. All methods are implemented as a no-op -#[derive(Clone, Copy, Debug, Default)] -pub struct NoMetrics; - -impl NoMetrics { - /// Create a new `Box` with this [`NoMetrics`] - #[must_use] - pub fn boxed() -> Box { - Box::::default() - } -} - -impl Metrics for NoMetrics { - fn create_counter(&self, _: String, _: Option) -> Box { - Box::new(NoMetrics) - } - - fn create_gauge(&self, _: String, _: Option) -> Box { - Box::new(NoMetrics) - } - - fn create_histogram(&self, _: String, _: Option) -> Box { - Box::new(NoMetrics) - } - - fn create_label(&self, _: String) -> Box { - Box::new(NoMetrics) - } - - fn subgroup(&self, _: String) -> Box { - Box::new(NoMetrics) - } -} - -impl Counter for NoMetrics { - fn add(&self, _: usize) {} -} -impl Gauge for NoMetrics { - fn set(&self, _: usize) {} - fn update(&self, _: i64) {} -} -impl Histogram for NoMetrics { - fn add_point(&self, _: f64) {} -} -impl Label for NoMetrics { - fn set(&self, _: String) {} -} - -/// An ever-incrementing counter -pub trait Counter: Send + Sync + Debug + DynClone { - /// Add a value to the counter - fn add(&self, amount: usize); -} -/// A gauge that stores the latest value. -pub trait Gauge: Send + Sync + Debug + DynClone { - /// Set the gauge value - fn set(&self, amount: usize); - - /// Update the guage value - fn update(&self, delts: i64); -} - -/// A histogram which will record a series of points. -pub trait Histogram: Send + Sync + Debug + DynClone { - /// Add a point to this histogram. - fn add_point(&self, point: f64); -} - -/// A label that stores the last string value. -pub trait Label: Send + Sync + DynClone { - /// Set the label value - fn set(&self, value: String); -} -dyn_clone::clone_trait_object!(Metrics); -dyn_clone::clone_trait_object!(Gauge); -dyn_clone::clone_trait_object!(Counter); -dyn_clone::clone_trait_object!(Histogram); -dyn_clone::clone_trait_object!(Label); - -#[cfg(test)] -mod test { - use super::*; - use std::{ - collections::HashMap, - sync::{Arc, Mutex}, - }; - - #[derive(Debug, Clone)] - struct TestMetrics { - prefix: String, - values: Arc>, - } - - impl TestMetrics { - fn sub(&self, name: String) -> Self { - let prefix = if self.prefix.is_empty() { - name - } else { - format!("{}-{name}", self.prefix) - }; - Self { - prefix, - values: Arc::clone(&self.values), - } - } - } - - impl Metrics for TestMetrics { - fn create_counter( - &self, - label: String, - _unit_label: Option, - ) -> Box { - Box::new(self.sub(label)) - } - - fn create_gauge( - &self, - label: String, - _unit_label: Option, - ) -> Box { - Box::new(self.sub(label)) - } - - fn create_histogram( - &self, - label: String, - _unit_label: Option, - ) -> Box { - Box::new(self.sub(label)) - } - - fn create_label(&self, label: String) -> Box { - Box::new(self.sub(label)) - } - - fn subgroup(&self, subgroup_name: String) -> Box { - Box::new(self.sub(subgroup_name)) - } - } - - impl Counter for TestMetrics { - fn add(&self, amount: usize) { - *self - .values - .lock() - .unwrap() - .counters - .entry(self.prefix.clone()) - .or_default() += amount; - } - } - - impl Gauge for TestMetrics { - fn set(&self, amount: usize) { - *self - .values - .lock() - .unwrap() - .gauges - .entry(self.prefix.clone()) - .or_default() = amount; - } - fn update(&self, delta: i64) { - let mut values = self.values.lock().unwrap(); - let value = values.gauges.entry(self.prefix.clone()).or_default(); - let signed_value = i64::try_from(*value).unwrap_or(i64::MAX); - *value = usize::try_from(signed_value + delta).unwrap_or(0); - } - } - - impl Histogram for TestMetrics { - fn add_point(&self, point: f64) { - self.values - .lock() - .unwrap() - .histograms - .entry(self.prefix.clone()) - .or_default() - .push(point); - } - } - - impl Label for TestMetrics { - fn set(&self, value: String) { - *self - .values - .lock() - .unwrap() - .labels - .entry(self.prefix.clone()) - .or_default() = value; - } - } - - #[derive(Default, Debug)] - struct Inner { - counters: HashMap, - gauges: HashMap, - histograms: HashMap>, - labels: HashMap, - } - - #[test] - fn test() { - let values = Arc::default(); - // This is all scoped so all the arcs should go out of scope - { - let metrics: Box = Box::new(TestMetrics { - prefix: String::new(), - values: Arc::clone(&values), - }); - - let gauge = metrics.create_gauge("foo".to_string(), None); - let counter = metrics.create_counter("bar".to_string(), None); - let histogram = metrics.create_histogram("baz".to_string(), None); - - gauge.set(5); - gauge.update(-2); - - for i in 0..5 { - counter.add(i); - } - - for i in 0..10 { - histogram.add_point(f64::from(i)); - } - - let sub = metrics.subgroup("child".to_string()); - - let sub_gauge = sub.create_gauge("foo".to_string(), None); - let sub_counter = sub.create_counter("bar".to_string(), None); - let sub_histogram = sub.create_histogram("baz".to_string(), None); - - sub_gauge.set(10); - - for i in 0..5 { - sub_counter.add(i * 2); - } - - for i in 0..10 { - sub_histogram.add_point(f64::from(i) * 2.0); - } - } - - // The above variables are scoped so they should be dropped at this point - // One of the rare times we can use `Arc::try_unwrap`! - let values = Arc::try_unwrap(values).unwrap().into_inner().unwrap(); - assert_eq!(values.gauges["foo"], 3); - assert_eq!(values.counters["bar"], 10); // 0..5 - assert_eq!( - values.histograms["baz"], - vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] - ); - - assert_eq!(values.gauges["child-foo"], 10); - assert_eq!(values.counters["child-bar"], 20); // 0..5 *2 - assert_eq!( - values.histograms["child-baz"], - vec![0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0] - ); - } -} diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs deleted file mode 100644 index f3fec1c451..0000000000 --- a/types/src/traits/network.rs +++ /dev/null @@ -1,669 +0,0 @@ -//! Network access compatibility -//! -//! Contains types and traits used by `HotShot` to abstract over network access - -use async_compatibility_layer::art::async_sleep; -#[cfg(async_executor_impl = "async-std")] -use async_std::future::TimeoutError; -use derivative::Derivative; -use dyn_clone::DynClone; -use futures::channel::{mpsc, oneshot}; -use libp2p_networking::network::NetworkNodeHandleError; -#[cfg(async_executor_impl = "tokio")] -use tokio::time::error::Elapsed as TimeoutError; -#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] -compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} -use super::{node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{ - data::ViewNumber, - message::{MessagePurpose, SequencingMessage}, - vid::VidCommitment, - BoxSyncFuture, -}; -use async_compatibility_layer::channel::UnboundedSendError; -use async_trait::async_trait; -use rand::{ - distributions::{Bernoulli, Uniform}, - prelude::Distribution, -}; -use serde::{Deserialize, Serialize}; -use snafu::Snafu; -use std::{collections::BTreeSet, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; - -impl From for NetworkError { - fn from(error: NetworkNodeHandleError) -> Self { - match error { - NetworkNodeHandleError::SerializationError { source } => { - NetworkError::FailedToSerialize { source } - } - NetworkNodeHandleError::DeserializationError { source } => { - NetworkError::FailedToDeserialize { source } - } - NetworkNodeHandleError::TimeoutError { source } => NetworkError::Timeout { source }, - NetworkNodeHandleError::Killed => NetworkError::ShutDown, - source => NetworkError::Libp2p { source }, - } - } -} - -/// for any errors we decide to add to memory network -#[derive(Debug, Snafu)] -#[snafu(visibility(pub))] -pub enum MemoryNetworkError { - /// stub - Stub, -} - -/// Centralized server specific errors -#[derive(Debug, Snafu)] -#[snafu(visibility(pub))] -pub enum CentralizedServerNetworkError { - /// The centralized server could not find a specific message. - NoMessagesInQueue, -} - -/// Web server specific errors -#[derive(Debug, Snafu)] -#[snafu(visibility(pub))] -pub enum WebServerNetworkError { - /// The injected consensus data is incorrect - IncorrectConsensusData, - /// The client returned an error - ClientError, - /// Endpoint parsed incorrectly - EndpointError, - /// Client disconnected - ClientDisconnected, -} - -/// the type of transmission -#[derive(Debug, Clone, Copy, Serialize, Deserialize)] -pub enum TransmitType { - /// directly transmit - Direct, - /// broadcast the message to all - Broadcast, - /// broadcast to DA committee - DACommitteeBroadcast, -} - -/// Error type for networking -#[derive(Debug, Snafu)] -#[snafu(visibility(pub))] -pub enum NetworkError { - /// Libp2p specific errors - Libp2p { - /// source of error - source: NetworkNodeHandleError, - }, - /// collection of libp2p secific errors - Libp2pMulti { - /// sources of errors - sources: Vec, - }, - /// memory network specific errors - MemoryNetwork { - /// source of error - source: MemoryNetworkError, - }, - /// Centralized server specific errors - CentralizedServer { - /// source of error - source: CentralizedServerNetworkError, - }, - - /// Web server specific errors - WebServer { - /// source of error - source: WebServerNetworkError, - }, - /// unimplemented functionality - UnimplementedFeature, - /// Could not deliver a message to a specified recipient - CouldNotDeliver, - /// Attempted to deliver a message to an unknown node - NoSuchNode, - /// Failed to serialize a network message - FailedToSerialize { - /// Originating bincode error - source: bincode::Error, - }, - /// Failed to deserealize a network message - FailedToDeserialize { - /// originating bincode error - source: bincode::Error, - }, - /// A timeout occurred - Timeout { - /// Source of error - source: TimeoutError, - }, - /// Error sending output to consumer of NetworkingImplementation - /// TODO this should have more information - ChannelSend, - /// The underlying connection has been shut down - ShutDown, - /// unable to cancel a request, the request has already been cancelled - UnableToCancel, - /// The requested data was not found - NotFound, -} - -#[derive(Clone, Debug)] -// Storing view number as a u64 to avoid the need TYPES generic -/// Events to poll or cancel consensus processes. -pub enum ConsensusIntentEvent { - /// Poll for votes for a particular view - PollForVotes(u64), - /// Poll for a proposal for a particular view - PollForProposal(u64), - /// Poll for VID disperse data for a particular view - PollForVIDDisperse(u64), - /// Poll for the most recent [quorum/da] proposal the webserver has - PollForLatestProposal, - /// Poll for the most recent view sync proposal the webserver has - PollForLatestViewSyncCertificate, - /// Poll for a DAC for a particular view - PollForDAC(u64), - /// Poll for view sync votes starting at a particular view - PollForViewSyncVotes(u64), - /// Poll for view sync proposals (certificates) for a particular view - PollForViewSyncCertificate(u64), - /// Poll for new transactions - PollForTransactions(u64), - /// Poll for future leader - PollFutureLeader(u64, K), - /// Cancel polling for votes - CancelPollForVotes(u64), - /// Cancel polling for view sync votes. - CancelPollForViewSyncVotes(u64), - /// Cancel polling for proposals. - CancelPollForProposal(u64), - /// Cancel polling for the latest proposal. - CancelPollForLatestProposal(u64), - /// Cancel polling for the latest view sync certificate - CancelPollForLatestViewSyncCertificate(u64), - /// Cancal polling for DAC. - CancelPollForDAC(u64), - /// Cancel polling for view sync certificate. - CancelPollForViewSyncCertificate(u64), - /// Cancel polling for VID disperse data - CancelPollForVIDDisperse(u64), - /// Cancel polling for transactions - CancelPollForTransactions(u64), -} - -impl ConsensusIntentEvent { - /// Get the view number of the event. - #[must_use] - pub fn view_number(&self) -> u64 { - match &self { - ConsensusIntentEvent::PollForVotes(view_number) - | ConsensusIntentEvent::PollForProposal(view_number) - | ConsensusIntentEvent::PollForDAC(view_number) - | ConsensusIntentEvent::PollForViewSyncVotes(view_number) - | ConsensusIntentEvent::CancelPollForViewSyncVotes(view_number) - | ConsensusIntentEvent::CancelPollForVotes(view_number) - | ConsensusIntentEvent::CancelPollForProposal(view_number) - | ConsensusIntentEvent::CancelPollForLatestProposal(view_number) - | ConsensusIntentEvent::CancelPollForLatestViewSyncCertificate(view_number) - | ConsensusIntentEvent::PollForVIDDisperse(view_number) - | ConsensusIntentEvent::CancelPollForVIDDisperse(view_number) - | ConsensusIntentEvent::CancelPollForDAC(view_number) - | ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) - | ConsensusIntentEvent::PollForViewSyncCertificate(view_number) - | ConsensusIntentEvent::PollForTransactions(view_number) - | ConsensusIntentEvent::CancelPollForTransactions(view_number) - | ConsensusIntentEvent::PollFutureLeader(view_number, _) => *view_number, - ConsensusIntentEvent::PollForLatestProposal - | ConsensusIntentEvent::PollForLatestViewSyncCertificate => 1, - } - } -} - -/// common traits we would like our network messages to implement -pub trait NetworkMsg: - Serialize + for<'a> Deserialize<'a> + Clone + Sync + Send + Debug + 'static -{ -} - -/// Trait that bundles what we need from a request ID -pub trait Id: Eq + PartialEq + Hash {} -impl NetworkMsg for Vec {} - -/// a message -pub trait ViewMessage { - /// get the view out of the message - fn get_view_number(&self) -> TYPES::Time; - // TODO move out of this trait. - /// get the purpose of the message - fn purpose(&self) -> MessagePurpose; -} - -/// Wraps a oneshot channel for responding to requests -pub struct ResponseChannel(pub oneshot::Sender); - -/// A request for some data that the consensus layer is asking for. -#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -pub struct DataRequest { - /// Hotshot key of who to send the request to - pub recipient: TYPES::SignatureKey, - /// Request - pub request: RequestKind, - /// View this message is for - pub view: TYPES::Time, -} - -/// Underlying data request -#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] -pub enum RequestKind { - /// Request VID data by our key and the VID commitment - VID(VidCommitment, TYPES::SignatureKey), - /// Request a DA proposal for a certain view - DAProposal(TYPES::Time), -} - -/// A resopnse for a request. `SequencingMessage` is the same as other network messages -/// The kind of message `M` is is determined by what we requested -#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] -#[serde(bound(deserialize = ""))] -pub enum ResponseMessage { - /// Peer returned us some data - Found(SequencingMessage), - /// Peer failed to get us data - NotFound, -} - -/// represents a networking implmentration -/// exposes low level API for interacting with a network -/// intended to be implemented for libp2p, the centralized server, -/// and memory network -#[async_trait] -pub trait ConnectedNetwork: - Clone + Send + Sync + 'static -{ - /// Pauses the underlying network - fn pause(&self); - - /// Resumes the underlying network - fn resume(&self); - - /// Blocks until the network is successfully initialized - async fn wait_for_ready(&self); - - /// checks if the network is ready - /// nonblocking - async fn is_ready(&self) -> bool; - - /// Blocks until the network is shut down - /// then returns true - fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> - where - 'a: 'b, - Self: 'b; - - /// broadcast message to some subset of nodes - /// blocking - async fn broadcast_message( - &self, - message: M, - recipients: BTreeSet, - ) -> Result<(), NetworkError>; - - /// broadcast a message only to a DA committee - /// blocking - async fn da_broadcast_message( - &self, - message: M, - recipients: BTreeSet, - ) -> Result<(), NetworkError>; - - /// Sends a direct message to a specific node - /// blocking - async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError>; - - /// Receive one or many messages from the underlying network. - /// - /// # Errors - /// If there is a network-related failure. - async fn recv_msgs(&self) -> Result, NetworkError>; - - /// Ask request the network for some data. Returns the request ID for that data, - /// The ID returned can be used for cancelling the request - async fn request_data( - &self, - _request: M, - _recipient: K, - ) -> Result, NetworkError> { - Err(NetworkError::UnimplementedFeature) - } - - /// Spawn a request task in the given network layer. If it supports - /// Request and responses it will return the receiving end of a channel. - /// Requests the network receives will be sent over this channel along - /// with a return channel to send the response back to. - /// - /// Returns `None`` if network does not support handling requests - async fn spawn_request_receiver_task(&self) -> Option)>> { - None - } - - /// queues lookup of a node - async fn queue_node_lookup( - &self, - _view_number: ViewNumber, - _pk: K, - ) -> Result<(), UnboundedSendError>> { - Ok(()) - } - - /// Injects consensus data such as view number into the networking implementation - /// blocking - /// Ideally we would pass in the `Time` type, but that requires making the entire trait generic over NodeType - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} - - /// handles view update - fn update_view(&self, _view: u64) {} -} - -/// Describes additional functionality needed by the test network implementation -pub trait TestableNetworkingImplementation -where - Self: Sized, -{ - /// generates a network given an expected node count - #[allow(clippy::type_complexity)] - fn generator( - expected_node_count: usize, - num_bootstrap: usize, - network_id: usize, - da_committee_size: usize, - is_da: bool, - reliability_config: Option>, - ) -> Box (Arc, Arc) + 'static>; - - /// Get the number of messages in-flight. - /// - /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. - fn in_flight_message_count(&self) -> Option; -} - -/// Changes that can occur in the network -#[derive(Debug)] -pub enum NetworkChange { - /// A node is connected - NodeConnected(P), - - /// A node is disconnected - NodeDisconnected(P), -} - -/// interface describing how reliable the network is -#[async_trait] -pub trait NetworkReliability: Debug + Sync + std::marker::Send + DynClone + 'static { - /// Sample from bernoulli distribution to decide whether - /// or not to keep a packet - /// # Panics - /// - /// Panics if `self.keep_numerator > self.keep_denominator` - /// - fn sample_keep(&self) -> bool { - true - } - - /// sample from uniform distribution to decide whether - /// or not to keep a packet - fn sample_delay(&self) -> Duration { - std::time::Duration::ZERO - } - - /// scramble the packet - fn scramble(&self, msg: Vec) -> Vec { - msg - } - - /// number of times to repeat the packet - fn sample_repeat(&self) -> usize { - 1 - } - - /// given a message and a way to send the message, - /// decide whether or not to send the message - /// how long to delay the message - /// whether or not to send duplicates - /// and whether or not to include noise with the message - /// then send the message - /// note: usually self is stored in a rwlock - /// so instead of doing the sending part, we just fiddle with the message - /// then return a future that does the sending and delaying - fn chaos_send_msg( - &self, - msg: Vec, - send_fn: Arc) -> BoxSyncFuture<'static, ()>>, - ) -> BoxSyncFuture<'static, ()> { - let sample_keep = self.sample_keep(); - let delay = self.sample_delay(); - let repeats = self.sample_repeat(); - let mut msgs = Vec::new(); - for _idx in 0..repeats { - let scrambled = self.scramble(msg.clone()); - msgs.push(scrambled); - } - let closure = async move { - if sample_keep { - async_sleep(delay).await; - for msg in msgs { - send_fn(msg).await; - } - } - }; - Box::pin(closure) - } -} - -// hack to get clone -dyn_clone::clone_trait_object!(NetworkReliability); - -/// ideal network -#[derive(Clone, Copy, Debug, Default)] -pub struct PerfectNetwork {} - -impl NetworkReliability for PerfectNetwork {} - -/// A synchronous network. Packets may be delayed, but are guaranteed -/// to arrive within `timeout` ns -#[derive(Clone, Copy, Debug, Default)] -pub struct SynchronousNetwork { - /// Max value in milliseconds that a packet may be delayed - pub delay_high_ms: u64, - /// Lowest value in milliseconds that a packet may be delayed - pub delay_low_ms: u64, -} - -impl NetworkReliability for SynchronousNetwork { - /// never drop a packet - fn sample_keep(&self) -> bool { - true - } - fn sample_delay(&self) -> Duration { - Duration::from_millis( - Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) - .sample(&mut rand::thread_rng()), - ) - } -} - -/// An asynchronous network. Packets may be dropped entirely -/// or delayed for arbitrarily long periods -/// probability that packet is kept = `keep_numerator` / `keep_denominator` -/// packet delay is obtained by sampling from a uniform distribution -/// between `delay_low_ms` and `delay_high_ms`, inclusive -#[derive(Debug, Clone, Copy)] -pub struct AsynchronousNetwork { - /// numerator for probability of keeping packets - pub keep_numerator: u32, - /// denominator for probability of keeping packets - pub keep_denominator: u32, - /// lowest value in milliseconds that a packet may be delayed - pub delay_low_ms: u64, - /// highest value in milliseconds that a packet may be delayed - pub delay_high_ms: u64, -} - -impl NetworkReliability for AsynchronousNetwork { - fn sample_keep(&self) -> bool { - Bernoulli::from_ratio(self.keep_numerator, self.keep_denominator) - .unwrap() - .sample(&mut rand::thread_rng()) - } - fn sample_delay(&self) -> Duration { - Duration::from_millis( - Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) - .sample(&mut rand::thread_rng()), - ) - } -} - -/// An partially synchronous network. Behaves asynchronously -/// until some arbitrary time bound, GST, -/// then synchronously after GST -#[allow(clippy::similar_names)] -#[derive(Debug, Clone, Copy)] -pub struct PartiallySynchronousNetwork { - /// asynchronous portion of network - pub asynchronous: AsynchronousNetwork, - /// synchronous portion of network - pub synchronous: SynchronousNetwork, - /// time when GST occurs - pub gst: std::time::Duration, - /// when the network was started - pub start: std::time::Instant, -} - -impl NetworkReliability for PartiallySynchronousNetwork { - /// never drop a packet - fn sample_keep(&self) -> bool { - true - } - fn sample_delay(&self) -> Duration { - // act asyncronous before gst - if self.start.elapsed() < self.gst { - if self.asynchronous.sample_keep() { - self.asynchronous.sample_delay() - } else { - // assume packet was "dropped" and will arrive after gst - self.synchronous.sample_delay() + self.gst - } - } else { - // act syncronous after gst - self.synchronous.sample_delay() - } - } -} - -impl Default for AsynchronousNetwork { - // disable all chance of failure - fn default() -> Self { - AsynchronousNetwork { - keep_numerator: 1, - keep_denominator: 1, - delay_low_ms: 0, - delay_high_ms: 0, - } - } -} - -impl Default for PartiallySynchronousNetwork { - fn default() -> Self { - PartiallySynchronousNetwork { - synchronous: SynchronousNetwork::default(), - asynchronous: AsynchronousNetwork::default(), - gst: std::time::Duration::new(0, 0), - start: std::time::Instant::now(), - } - } -} - -impl SynchronousNetwork { - /// create new `SynchronousNetwork` - #[must_use] - pub fn new(timeout: u64, delay_low_ms: u64) -> Self { - SynchronousNetwork { - delay_high_ms: timeout, - delay_low_ms, - } - } -} - -impl AsynchronousNetwork { - /// create new `AsynchronousNetwork` - #[must_use] - pub fn new( - keep_numerator: u32, - keep_denominator: u32, - delay_low_ms: u64, - delay_high_ms: u64, - ) -> Self { - AsynchronousNetwork { - keep_numerator, - keep_denominator, - delay_low_ms, - delay_high_ms, - } - } -} - -impl PartiallySynchronousNetwork { - /// create new `PartiallySynchronousNetwork` - #[allow(clippy::similar_names)] - #[must_use] - pub fn new( - asynchronous: AsynchronousNetwork, - synchronous: SynchronousNetwork, - gst: std::time::Duration, - ) -> Self { - PartiallySynchronousNetwork { - asynchronous, - synchronous, - gst, - start: std::time::Instant::now(), - } - } -} - -/// A chaotic network using all the networking calls -#[derive(Debug, Clone)] -pub struct ChaosNetwork { - /// numerator for probability of keeping packets - pub keep_numerator: u32, - /// denominator for probability of keeping packets - pub keep_denominator: u32, - /// lowest value in milliseconds that a packet may be delayed - pub delay_low_ms: u64, - /// highest value in milliseconds that a packet may be delayed - pub delay_high_ms: u64, - /// lowest value of repeats for a message - pub repeat_low: usize, - /// highest value of repeats for a message - pub repeat_high: usize, -} - -impl NetworkReliability for ChaosNetwork { - fn sample_keep(&self) -> bool { - Bernoulli::from_ratio(self.keep_numerator, self.keep_denominator) - .unwrap() - .sample(&mut rand::thread_rng()) - } - - fn sample_delay(&self) -> Duration { - Duration::from_millis( - Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) - .sample(&mut rand::thread_rng()), - ) - } - - fn sample_repeat(&self) -> usize { - Uniform::new_inclusive(self.repeat_low, self.repeat_high).sample(&mut rand::thread_rng()) - } -} diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs deleted file mode 100644 index 3f0959d767..0000000000 --- a/types/src/traits/node_implementation.rs +++ /dev/null @@ -1,251 +0,0 @@ -//! Composite trait for node behavior -//! -//! This module defines the [`NodeImplementation`] trait, which is a composite trait used for -//! describing the overall behavior of a node, as a composition of implementations of the node trait. - -use super::{ - block_contents::{BlockHeader, TestableBlock, Transaction}, - election::ElectionConfig, - network::{ConnectedNetwork, NetworkReliability, TestableNetworkingImplementation}, - states::TestableState, - storage::{StorageError, StorageState, TestableStorage}, - ValidatedState, -}; -use crate::{ - data::{Leaf, TestableLeaf}, - message::Message, - traits::{ - election::Membership, signature_key::SignatureKey, states::InstanceState, storage::Storage, - BlockPayload, - }, -}; -use async_trait::async_trait; -use commit::Committable; -use serde::{Deserialize, Serialize}; -use std::{ - fmt::Debug, - hash::Hash, - ops, - ops::{Deref, Sub}, - sync::Arc, -}; - -/// Node implementation aggregate trait -/// -/// This trait exists to collect multiple behavior implementations into one type, to allow -/// `HotShot` to avoid annoying numbers of type arguments and type patching. -/// -/// It is recommended you implement this trait on a zero sized type, as `HotShot`does not actually -/// store or keep a reference to any value implementing this trait. - -pub trait NodeImplementation: - Send + Sync + Debug + Clone + Eq + Hash + 'static + Serialize + for<'de> Deserialize<'de> -{ - /// Storage type for this consensus implementation - type Storage: Storage + Clone; - - /// Network for all nodes - type QuorumNetwork: ConnectedNetwork, TYPES::SignatureKey>; - /// Network for those in the DA committee - type CommitteeNetwork: ConnectedNetwork, TYPES::SignatureKey>; -} - -/// extra functions required on a node implementation to be usable by hotshot-testing -#[allow(clippy::type_complexity)] -#[async_trait] -pub trait TestableNodeImplementation: NodeImplementation { - /// Election config for the DA committee - type CommitteeElectionConfig; - - /// Generates a committee-specific election - fn committee_election_config_generator( - ) -> Box Self::CommitteeElectionConfig + 'static>; - - /// Creates random transaction if possible - /// otherwise panics - /// `padding` is the bytes of padding to add to the transaction - fn state_create_random_transaction( - state: Option<&TYPES::ValidatedState>, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> ::Transaction; - - /// Creates random transaction if possible - /// otherwise panics - /// `padding` is the bytes of padding to add to the transaction - fn leaf_create_random_transaction( - leaf: &Leaf, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> ::Transaction; - - /// generate a genesis block - fn block_genesis() -> TYPES::BlockPayload; - - /// the number of transactions in a block - fn txn_count(block: &TYPES::BlockPayload) -> u64; - - /// Create ephemeral storage - /// Will be deleted/lost immediately after storage is dropped - /// # Errors - /// Errors if it is not possible to construct temporary storage. - fn construct_tmp_storage() -> Result; - - /// Return the full internal state. This is useful for debugging. - async fn get_full_state(storage: &Self::Storage) -> StorageState; - - /// Generate the communication channels for testing - fn gen_networks( - expected_node_count: usize, - num_bootstrap: usize, - da_committee_size: usize, - reliability_config: Option>, - ) -> Box (Arc, Arc)>; -} - -#[async_trait] -impl> TestableNodeImplementation for I -where - TYPES::ValidatedState: TestableState, - TYPES::BlockPayload: TestableBlock, - I::Storage: TestableStorage, - I::QuorumNetwork: TestableNetworkingImplementation, - I::CommitteeNetwork: TestableNetworkingImplementation, -{ - type CommitteeElectionConfig = TYPES::ElectionConfigType; - - fn committee_election_config_generator( - ) -> Box Self::CommitteeElectionConfig + 'static> { - Box::new(|num_nodes_with_stake, num_nodes_without_stake| { - ::Membership::default_election_config( - num_nodes_with_stake, - num_nodes_without_stake, - ) - }) - } - - fn state_create_random_transaction( - state: Option<&TYPES::ValidatedState>, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> ::Transaction { - >::create_random_transaction( - state, rng, padding, - ) - } - - fn leaf_create_random_transaction( - leaf: &Leaf, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> ::Transaction { - Leaf::create_random_transaction(leaf, rng, padding) - } - - fn block_genesis() -> TYPES::BlockPayload { - ::genesis() - } - - fn txn_count(block: &TYPES::BlockPayload) -> u64 { - ::txn_count(block) - } - - fn construct_tmp_storage() -> Result { - >::construct_tmp_storage() - } - - async fn get_full_state(storage: &Self::Storage) -> StorageState { - >::get_full_state(storage).await - } - fn gen_networks( - expected_node_count: usize, - num_bootstrap: usize, - da_committee_size: usize, - reliability_config: Option>, - ) -> Box (Arc, Arc)> { - >::generator( - expected_node_count, - num_bootstrap, - 0, - da_committee_size, - false, - reliability_config.clone(), - ) - } -} - -/// Trait for time compatibility needed for reward collection -pub trait ConsensusTime: - PartialOrd - + Ord - + Send - + Sync - + Debug - + Clone - + Copy - + Hash - + Deref - + serde::Serialize - + for<'de> serde::Deserialize<'de> - + ops::AddAssign - + ops::Add - + Sub - + 'static - + Committable -{ - /// Create a new instance of this time unit at time number 0 - #[must_use] - fn genesis() -> Self { - Self::new(0) - } - /// Create a new instance of this time unit - fn new(val: u64) -> Self; - /// Get the u64 format of time - fn get_u64(&self) -> u64; -} - -/// Trait with all the type definitions that are used in the current hotshot setup. -pub trait NodeType: - Clone - + Copy - + Debug - + Hash - + PartialEq - + Eq - + PartialOrd - + Ord - + Default - + serde::Serialize - + for<'de> Deserialize<'de> - + Send - + Sync - + 'static -{ - /// The time type that this hotshot setup is using. - /// - /// This should be the same `Time` that `ValidatedState::Time` is using. - type Time: ConsensusTime; - /// The block header type that this hotshot setup is using. - type BlockHeader: BlockHeader; - /// The block type that this hotshot setup is using. - /// - /// This should be the same block that `ValidatedState::BlockPayload` is using. - type BlockPayload: BlockPayload; - /// The signature key that this hotshot setup is using. - type SignatureKey: SignatureKey; - /// The transaction type that this hotshot setup is using. - /// - /// This should be equal to `BlockPayload::Transaction` - type Transaction: Transaction; - /// The election config type that this hotshot setup is using. - type ElectionConfigType: ElectionConfig; - - /// The instance-level state type that this hotshot setup is using. - type InstanceState: InstanceState; - - /// The validated state type that this hotshot setup is using. - type ValidatedState: ValidatedState; - - /// Membership used for this implementation - type Membership: Membership; -} diff --git a/types/src/traits/qc.rs b/types/src/traits/qc.rs deleted file mode 100644 index 7dd11010f9..0000000000 --- a/types/src/traits/qc.rs +++ /dev/null @@ -1,95 +0,0 @@ -//! The quorum certificate (QC) trait is a certificate of a sufficient quorum of distinct -//! parties voted for a message or statement. - -use ark_std::{ - rand::{CryptoRng, RngCore}, - vec::Vec, -}; -use bitvec::prelude::*; -use generic_array::{ArrayLength, GenericArray}; -use jf_primitives::{errors::PrimitivesError, signatures::AggregateableSignatureSchemes}; -use serde::{Deserialize, Serialize}; - -/// Trait for validating a QC built from different signatures on the same message -pub trait QuorumCertificateScheme< - A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a>, -> -{ - /// Public parameters for generating the QC - /// E.g: snark proving/verifying keys, list of (or pointer to) public keys stored in the smart contract. - type QCProverParams: Serialize + for<'a> Deserialize<'a>; - - /// Public parameters for validating the QC - /// E.g: verifying keys, stake table commitment - type QCVerifierParams: Serialize + for<'a> Deserialize<'a>; - - /// Allows to fix the size of the message at compilation time. - type MessageLength: ArrayLength; - - /// Type of the actual quorum certificate object - type QC; - - /// Type of the quorum size (e.g. number of votes or accumulated weight of signatures) - type QuorumSize; - - /// Produces a partial signature on a message with a single user signing key - /// NOTE: the original message (vote) should be prefixed with the hash of the stake table. - /// * `agg_sig_pp` - public parameters for aggregate signature - /// * `message` - message to be signed - /// * `sk` - user signing key - /// * `returns` - a "simple" signature - /// - /// # Errors - /// - /// Should return error if the underlying signature scheme fail to sign. - fn sign>( - pp: &A::PublicParameter, - sk: &A::SigningKey, - msg: M, - prng: &mut R, - ) -> Result { - A::sign(pp, sk, msg, prng) - } - - /// Computes an aggregated signature from a set of partial signatures and the verification keys involved - /// * `qc_pp` - public parameters for generating the QC - /// * `signers` - a bool vector indicating the list of verification keys corresponding to the set of partial signatures - /// * `sigs` - partial signatures on the same message - /// - /// # Errors - /// - /// Will return error if some of the partial signatures provided are invalid or the number of - /// partial signatures / verifications keys are different. - fn assemble( - qc_pp: &Self::QCProverParams, - signers: &BitSlice, - sigs: &[A::Signature], - ) -> Result; - - /// Checks an aggregated signature over some message provided as input - /// * `qc_vp` - public parameters for validating the QC - /// * `message` - message to check the aggregated signature against - /// * `qc` - quroum certificate - /// * `returns` - the quorum size if the qc is valid, an error otherwise. - /// - /// # Errors - /// - /// Return error if the QC is invalid, either because accumulated weight didn't exceed threshold, - /// or some partial signatures are invalid. - fn check( - qc_vp: &Self::QCVerifierParams, - message: &GenericArray, - qc: &Self::QC, - ) -> Result; - - /// Trace the list of signers given a qc. - /// - /// # Errors - /// - /// Return error if the inputs mismatch (e.g. wrong verifier parameter or original message). - fn trace( - qc_vp: &Self::QCVerifierParams, - message: &GenericArray, - qc: &Self::QC, - ) -> Result, PrimitivesError>; -} diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs deleted file mode 100644 index 3b36e0ed31..0000000000 --- a/types/src/traits/signature_key.rs +++ /dev/null @@ -1,140 +0,0 @@ -//! Minimal compatibility over public key signatures -use bitvec::prelude::*; -use ethereum_types::U256; -use jf_primitives::errors::PrimitivesError; -use serde::{Deserialize, Serialize}; -use std::{ - fmt::{Debug, Display}, - hash::Hash, -}; -use tagged_base64::TaggedBase64; - -/// Type representing stake table entries in a `StakeTable` -pub trait StakeTableEntryType { - /// Get the stake value - fn get_stake(&self) -> U256; -} - -/// Trait for abstracting public key signatures -/// Self is the public key type -pub trait SignatureKey: - Send - + Sync - + Clone - + Sized - + Debug - + Hash - + Serialize - + for<'a> Deserialize<'a> - + PartialEq - + Eq - + PartialOrd - + Ord - + Display - + for<'a> TryFrom<&'a TaggedBase64> - + Into -{ - /// The private key type for this signature algorithm - type PrivateKey: Send - + Sync - + Sized - + Clone - + Debug - + Eq - + Serialize - + for<'a> Deserialize<'a> - + Hash; - /// The type of the entry that contain both public key and stake value - type StakeTableEntry: StakeTableEntryType - + Send - + Sync - + Sized - + Clone - + Debug - + Hash - + Eq - + Serialize - + for<'a> Deserialize<'a>; - /// The type of the quorum certificate parameters used for assembled signature - type QCParams: Send + Sync + Sized + Clone + Debug + Hash; - /// The type of the assembled signature, without `BitVec` - type PureAssembledSignatureType: Send - + Sync - + Sized - + Clone - + Debug - + Hash - + PartialEq - + Eq - + Serialize - + for<'a> Deserialize<'a>; - /// The type of the assembled qc: assembled signature + `BitVec` - type QCType: Send - + Sync - + Sized - + Clone - + Debug - + Hash - + PartialEq - + Eq - + Serialize - + for<'a> Deserialize<'a>; - - /// Type of error that can occur when signing data - type SignError: std::error::Error + Send + Sync; - - // Signature type represented as a vec/slice of bytes to let the implementer handle the nuances - // of serialization, to avoid Cryptographic pitfalls - /// Validate a signature - fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool; - - /// Produce a signature - /// # Errors - /// If unable to sign the data with the key - fn sign( - private_key: &Self::PrivateKey, - data: &[u8], - ) -> Result; - - /// Produce a public key from a private key - fn from_private(private_key: &Self::PrivateKey) -> Self; - /// Serialize a public key to bytes - fn to_bytes(&self) -> Vec; - /// Deserialize a public key from bytes - /// # Errors - /// - /// Will return `Err` if deserialization fails - fn from_bytes(bytes: &[u8]) -> Result; - - /// Generate a new key pair - fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey); - - /// get the stake table entry from the public key and stake value - fn get_stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry; - - /// only get the public key from the stake table entry - fn get_public_key(entry: &Self::StakeTableEntry) -> Self; - - /// get the public parameter for the assembled signature checking - fn get_public_parameter( - stake_entries: Vec, - threshold: U256, - ) -> Self::QCParams; - - /// check the quorum certificate for the assembled signature - fn check(real_qc_pp: &Self::QCParams, data: &[u8], qc: &Self::QCType) -> bool; - - /// get the assembled signature and the `BitVec` separately from the assembled signature - fn get_sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec); - - /// assemble the signature from the partial signature and the indication of signers in `BitVec` - fn assemble( - real_qc_pp: &Self::QCParams, - signers: &BitSlice, - sigs: &[Self::PureAssembledSignatureType], - ) -> Self::QCType; - - /// generates the genesis public key. Meant to be dummy/filler - #[must_use] - fn genesis_proposer_pk() -> Self; -} diff --git a/types/src/traits/stake_table.rs b/types/src/traits/stake_table.rs deleted file mode 100644 index 598a662650..0000000000 --- a/types/src/traits/stake_table.rs +++ /dev/null @@ -1,235 +0,0 @@ -//! Trait for stake table data structures - -use ark_std::{rand::SeedableRng, string::ToString, vec::Vec}; -use digest::crypto_common::rand_core::CryptoRngCore; -use displaydoc::Display; -use jf_plonk::errors::PlonkError; -use jf_primitives::errors::PrimitivesError; - -/// Snapshots of the stake table -pub enum SnapshotVersion { - /// the latest "Head" where all new changes are applied to - Head, - /// marks the snapshot at the beginning of the current epoch - EpochStart, - /// marks the beginning of the last epoch - LastEpochStart, - /// at arbitrary block height - BlockNum(u64), -} - -/// Common interfaces required for a stake table used in `HotShot` System. -/// APIs that doesn't take `version: SnapshotVersion` as an input by default works on the head/latest version. -pub trait StakeTableScheme { - /// type for stake key - type Key: Clone; - /// type for the staked amount - type Amount: Clone + Copy; - /// type for the commitment to the current stake table - type Commitment; - /// type for the proof associated with the lookup result (if any) - type LookupProof; - /// type for the iterator over (key, value) entries - type IntoIter: Iterator; - /// Auxiliary information associated with the key - type Aux: Clone; - - /// Register a new key into the stake table. - /// - /// # Errors - /// - /// Return err if key is already registered. - fn register( - &mut self, - new_key: Self::Key, - amount: Self::Amount, - aux: Self::Aux, - ) -> Result<(), StakeTableError>; - - /// Batch register a list of new keys. A default implementation is provided - /// w/o batch optimization. - /// - /// # Errors - /// - /// Return err if any of `new_keys` fails to register. - fn batch_register( - &mut self, - new_keys: I, - amounts: J, - auxs: K, - ) -> Result<(), StakeTableError> - where - I: IntoIterator, - J: IntoIterator, - K: IntoIterator, - { - let _ = new_keys - .into_iter() - .zip(amounts) - .zip(auxs) - .try_for_each(|((key, amount), aux)| Self::register(self, key, amount, aux)); - Ok(()) - } - - /// Deregister an existing key from the stake table. - /// Returns error if some keys are not found. - /// - /// # Errors - /// Return err if `existing_key` wasn't registered. - fn deregister(&mut self, existing_key: &Self::Key) -> Result<(), StakeTableError>; - - /// Batch deregister a list of keys. A default implementation is provided - /// w/o batch optimization. - /// - /// # Errors - /// Return err if any of `existing_keys` fail to deregister. - fn batch_deregister<'a, I>(&mut self, existing_keys: I) -> Result<(), StakeTableError> - where - I: IntoIterator::Key>, - ::Key: 'a, - { - let _ = existing_keys - .into_iter() - .try_for_each(|key| Self::deregister(self, key)); - Ok(()) - } - - /// Returns the commitment to the `version` of stake table. - /// - /// # Errors - /// Return err if the `version` is not supported. - fn commitment(&self, version: SnapshotVersion) -> Result; - - /// Returns the accumulated stakes of all registered keys of the `version` - /// of stake table. - /// - /// # Errors - /// Return err if the `version` is not supported. - fn total_stake(&self, version: SnapshotVersion) -> Result; - - /// Returns the number of keys in the `version` of the table. - /// - /// # Errors - /// Return err if the `version` is not supported. - fn len(&self, version: SnapshotVersion) -> Result; - - /// Returns true if `key` is currently registered, else returns false. - fn contains_key(&self, key: &Self::Key) -> bool; - - /// Returns the stakes withhelded by a public key. - /// - /// # Errors - /// Return err if the `version` is not supported or `key` doesn't exist. - fn lookup( - &self, - version: SnapshotVersion, - key: &Self::Key, - ) -> Result; - - /// Returns the stakes withhelded by a public key along with a membership proof. - /// - /// # Errors - /// Return err if the `version` is not supported or `key` doesn't exist. - fn lookup_with_proof( - &self, - version: SnapshotVersion, - key: &Self::Key, - ) -> Result<(Self::Amount, Self::LookupProof), StakeTableError>; - - /// Return the associated stake amount and auxiliary information of a public key, - /// along with a membership proof. - /// - /// # Errors - /// Return err if the `version` is not supported or `key` doesn't exist. - #[allow(clippy::type_complexity)] - fn lookup_with_aux_and_proof( - &self, - version: SnapshotVersion, - key: &Self::Key, - ) -> Result<(Self::Amount, Self::Aux, Self::LookupProof), StakeTableError>; - - /// Update the stake of the `key` with `(negative ? -1 : 1) * delta`. - /// Return the updated stake or error. - /// - /// # Errors - /// Return err if the `key` doesn't exist of if the update overflow/underflow. - fn update( - &mut self, - key: &Self::Key, - delta: Self::Amount, - negative: bool, - ) -> Result; - - /// Batch update the stake balance of `keys`. Read documentation about - /// [`Self::update()`]. By default, we call `Self::update()` on each - /// (key, amount, negative) tuple. - /// - /// # Errors - /// Return err if any one of the `update` failed. - fn batch_update( - &mut self, - keys: &[Self::Key], - amounts: &[Self::Amount], - negative_flags: Vec, - ) -> Result, StakeTableError> { - let updated_amounts = keys - .iter() - .zip(amounts.iter()) - .zip(negative_flags.iter()) - .map(|((key, &amount), negative)| Self::update(self, key, amount, *negative)) - .collect::, _>>()?; - - Ok(updated_amounts) - } - - /// Randomly sample a (key, stake amount) pair proportional to the stake distributions, - /// given a fixed seed for `rng`, this sampling should be deterministic. - fn sample( - &self, - rng: &mut (impl SeedableRng + CryptoRngCore), - ) -> Option<(&Self::Key, &Self::Amount)>; - - /// Returns an iterator over all (key, value) entries of the `version` of the table - /// - /// # Errors - /// Return err if the `version` is not supported. - fn try_iter(&self, version: SnapshotVersion) -> Result; -} - -/// Error type for [`StakeTableScheme`] -#[derive(Debug, Display)] -pub enum StakeTableError { - /// Internal error caused by Rescue - RescueError, - /// Key mismatched - MismatchedKey, - /// Key not found - KeyNotFound, - /// Key already exists - ExistingKey, - /// Malformed Merkle proof - MalformedProof, - /// Verification Error - VerificationError, - /// Insufficient fund: the number of stake cannot be negative - InsufficientFund, - /// The number of stake exceed U256 - StakeOverflow, - /// The historical snapshot requested is not supported. - SnapshotUnsupported, -} - -impl ark_std::error::Error for StakeTableError {} - -impl From for PrimitivesError { - fn from(value: StakeTableError) -> Self { - // FIXME: (alex) should we define a PrimitivesError::General()? - Self::ParameterError(value.to_string()) - } -} - -impl From for PlonkError { - fn from(value: StakeTableError) -> Self { - Self::PrimitiveError(PrimitivesError::ParameterError(value.to_string())) - } -} diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs deleted file mode 100644 index 9afb730526..0000000000 --- a/types/src/traits/states.rs +++ /dev/null @@ -1,84 +0,0 @@ -//! Abstractions over the immutable instance-level state and hte global state that blocks modify. -//! -//! This module provides the [`InstanceState`] and [`ValidatedState`] traits, which serve as -//! compatibilities over the current network state, which is modified by the transactions contained -//! within blocks. - -use super::block_contents::TestableBlock; -use crate::{ - data::Leaf, - traits::{ - node_implementation::{ConsensusTime, NodeType}, - BlockPayload, - }, -}; -use serde::{de::DeserializeOwned, Serialize}; -use std::{error::Error, fmt::Debug, future::Future, hash::Hash}; - -/// Instance-level state, which allows us to fetch missing validated state. -pub trait InstanceState: Debug + Send + Sync {} - -/// Abstraction over the state that blocks modify -/// -/// This trait represents the behaviors that the 'global' ledger state must have: -/// * A defined error type ([`Error`](ValidatedState::Error)) -/// * The type of block that modifies this type of state ([`BlockPayload`](`ValidatedStates:: -/// BlockPayload`)) -/// * The ability to validate that a block header is actually a valid extension of this state and -/// produce a new state, with the modifications from the block applied -/// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header)) -pub trait ValidatedState: - Serialize + DeserializeOwned + Debug + Default + Hash + PartialEq + Eq + Send + Sync -{ - /// The error type for this particular type of ledger state - type Error: Error + Debug + Send + Sync; - /// The type of the instance-level state this state is assocaited with - type Instance: InstanceState; - /// Time compatibility needed for reward collection - type Time: ConsensusTime; - - /// Check if the proposed block header is valid and apply it to the state if so. - /// - /// Returns the new state. - /// - /// # Arguments - /// * `instance` - Immutable instance-level state. - /// - /// # Errors - /// - /// If the block header is invalid or appending it would lead to an invalid state. - fn validate_and_apply_header( - &self, - instance: &Self::Instance, - parent_leaf: &Leaf, - proposed_header: &TYPES::BlockHeader, - ) -> impl Future> + Send; - - /// Construct the state with the given block header. - /// - /// This can also be used to rebuild the state for catchup. - fn from_header(block_header: &TYPES::BlockHeader) -> Self; - - /// Construct a genesis validated state. - #[must_use] - fn genesis(instance: &Self::Instance) -> Self; - - /// Gets called to notify the persistence backend that this state has been committed - fn on_commit(&self); -} - -/// extra functions required on state to be usable by hotshot-testing -pub trait TestableState: ValidatedState -where - TYPES: NodeType, - TYPES::BlockPayload: TestableBlock, -{ - /// Creates random transaction if possible - /// otherwise panics - /// `padding` is the bytes of padding to add to the transaction - fn create_random_transaction( - state: Option<&Self>, - rng: &mut dyn rand::RngCore, - padding: u64, - ) -> ::Transaction; -} diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs deleted file mode 100644 index a91c326442..0000000000 --- a/types/src/traits/storage.rs +++ /dev/null @@ -1,160 +0,0 @@ -//! Abstraction over on-disk storage of node state - -use super::node_implementation::NodeType; -use crate::{data::Leaf, simple_certificate::QuorumCertificate, vote::HasViewNumber}; -use async_trait::async_trait; -use commit::Commitment; -use derivative::Derivative; -use snafu::Snafu; -use std::collections::{BTreeMap, BTreeSet}; -/// Errors that can occur in the storage layer. -#[derive(Clone, Debug, Snafu)] -#[snafu(visibility(pub))] -pub enum StorageError { - /// No genesis view was inserted - NoGenesisView, -} - -/// Result for a storage type -pub type Result = std::result::Result; - -/// Abstraction over on disk persistence of node state -/// -/// This should be a cloneable handle to an underlying storage, with each clone pointing to the same -/// underlying storage. -/// -/// This trait has been constructed for object saftey over convenience. -#[async_trait] -pub trait Storage: Clone + Send + Sync + Sized + 'static -where - TYPES: NodeType + 'static, -{ - /// Append the list of views to this storage - async fn append(&self, views: Vec>) -> Result; - /// Cleans up the storage up to the given view. The given view number will still persist in this storage afterwards. - async fn cleanup_storage_up_to_view(&self, view: TYPES::Time) -> Result; - /// Get the latest anchored view - async fn get_anchored_view(&self) -> Result>; - /// Commit this storage. - async fn commit(&self) -> Result; - - /// Insert a single view. Shorthand for - /// ```rust,ignore - /// storage.append(vec![ViewEntry::Success(view)]).await - /// ``` - async fn append_single_view(&self, view: StoredView) -> Result { - self.append(vec![ViewEntry::Success(view)]).await - } - // future improvement: - // async fn get_future_views(&self) -> Vec; - // async fn add_transaction(&self, transactions: Transaction) -> TransactionHash; - // async fn get_transactions(&self) -> Vec; - // async fn get_transaction(&self, hash: TransactionHash) -> Option; - // async fn remove_transaction(&self, hash: TransactionHash) -> Option; -} - -/// Extra requirements on Storage implementations required for testing -#[async_trait] -pub trait TestableStorage: Clone + Send + Sync + Storage -where - TYPES: NodeType + 'static, -{ - /// Create ephemeral storage - /// Will be deleted/lost immediately after storage is dropped - /// # Errors - /// Errors if it is not possible to construct temporary storage. - fn construct_tmp_storage() -> Result; - - /// Return the full internal state. This is useful for debugging. - async fn get_full_state(&self) -> StorageState; -} - -/// An internal representation of the data stored in a [`Storage`]. -/// -/// This should only be used for testing, never in production code. -#[derive(Debug, PartialEq)] -pub struct StorageState { - /// The views that have been successful - pub stored: BTreeMap>, - /// The views that have failed - pub failed: BTreeSet, -} - -/// An entry to `Storage::append`. This makes it possible to commit both succeeded and failed views at the same time -#[derive(Debug, PartialEq)] -pub enum ViewEntry -where - TYPES: NodeType, -{ - /// A succeeded view - Success(StoredView), - /// A failed view - Failed(TYPES::Time), - // future improvement: - // InProgress(InProgressView), -} - -impl From> for ViewEntry -where - TYPES: NodeType, -{ - fn from(view: StoredView) -> Self { - Self::Success(view) - } -} - -impl From> for ViewEntry -where - TYPES: NodeType, -{ - fn from(leaf: Leaf) -> Self { - Self::Success(StoredView::from(leaf)) - } -} - -/// A view stored in the [`Storage`] -#[derive(Clone, Debug, Derivative)] -#[derivative(PartialEq)] -pub struct StoredView { - /// The view number of this view - pub view_number: TYPES::Time, - /// The parent of this view - pub parent: Commitment>, - /// The justify QC of this view. See the hotstuff paper for more information on this. - pub justify_qc: QuorumCertificate, - /// Block header. - pub block_header: TYPES::BlockHeader, - /// Optional block payload. - /// - /// It may be empty for nodes not in the DA committee. - pub block_payload: Option, - /// the proposer id - #[derivative(PartialEq = "ignore")] - pub proposer_id: TYPES::SignatureKey, -} - -impl StoredView -where - TYPES: NodeType, -{ - /// Create a new `StoredView` from the given QC, `BlockHeader`, `BlockPayload` and State. - /// - /// Note that this will set the `parent` to `LeafHash::default()`, so this will not have a - /// parent. - pub fn from_qc_block_and_state( - qc: QuorumCertificate, - block_header: TYPES::BlockHeader, - block_payload: Option, - parent_commitment: Commitment>, - proposer_id: TYPES::SignatureKey, - ) -> Self { - Self { - view_number: qc.get_view_number(), - parent: parent_commitment, - justify_qc: qc, - block_header, - block_payload, - proposer_id, - } - } -} diff --git a/types/src/utils.rs b/types/src/utils.rs deleted file mode 100644 index af5082f8df..0000000000 --- a/types/src/utils.rs +++ /dev/null @@ -1,136 +0,0 @@ -//! Utility functions, type aliases, helper structs and enum definitions. - -use crate::{data::Leaf, traits::node_implementation::NodeType, vid::VidCommitment}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use commit::Commitment; -use digest::OutputSizeUser; -use sha2::Digest; -use std::{ops::Deref, sync::Arc}; -use tagged_base64::tagged; -use typenum::Unsigned; - -/// A view's state -#[derive(Debug)] -pub enum ViewInner { - /// A pending view with an available block but not leaf proposal yet. - /// - /// Storing this state allows us to garbage collect blocks for views where a proposal is never - /// made. This saves memory when a leader fails and subverts a DoS attack where malicious - /// leaders repeatedly request availability for blocks that they never propose. - DA { - /// Payload commitment to the available block. - payload_commitment: VidCommitment, - }, - /// Undecided view - Leaf { - /// Proposed leaf - leaf: LeafCommitment, - /// Validated state. - state: Arc, - }, - /// Leaf has failed - Failed, -} - -/// The hash of a leaf. -pub type LeafCommitment = Commitment>; - -impl ViewInner { - /// Return the underlying undecide leaf view if it exists. - #[must_use] - pub fn get_leaf(&self) -> Option<(LeafCommitment, &Arc)> { - if let Self::Leaf { leaf, state } = self { - Some((*leaf, state)) - } else { - None - } - } - - /// return the underlying leaf hash if it exists - #[must_use] - pub fn get_leaf_commitment(&self) -> Option> { - if let Self::Leaf { leaf, .. } = self { - Some(*leaf) - } else { - None - } - } - - /// return the underlying validated state if it exists - #[must_use] - pub fn get_state(&self) -> Option<&Arc> { - if let Self::Leaf { state, .. } = self { - Some(state) - } else { - None - } - } - - /// return the underlying block paylod commitment if it exists - #[must_use] - pub fn get_payload_commitment(&self) -> Option { - if let Self::DA { payload_commitment } = self { - Some(*payload_commitment) - } else { - None - } - } -} - -impl Deref for View { - type Target = ViewInner; - - fn deref(&self) -> &Self::Target { - &self.view_inner - } -} - -/// This exists so we can perform state transitions mutably -#[derive(Debug)] -pub struct View { - /// The view data. Wrapped in a struct so we can mutate - pub view_inner: ViewInner, -} - -/// A struct containing information about a finished round. -#[derive(Debug, Clone)] -pub struct RoundFinishedEvent { - /// The round that finished - pub view_number: TYPES::Time, -} - -/// Whether or not to stop inclusively or exclusively when walking -#[derive(Copy, Clone, Debug)] -pub enum Terminator { - /// Stop right before this view number - Exclusive(T), - /// Stop including this view number - Inclusive(T), -} - -/// Type alias for byte array of SHA256 digest length -type Sha256Digest = [u8; ::OutputSize::USIZE]; - -#[tagged("BUILDER_COMMITMENT")] -#[derive(Clone, Debug, Hash, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] -/// Commitment that builders use to sign block options. -/// A thin wrapper around a Sha256 digest. -pub struct BuilderCommitment(Sha256Digest); - -impl BuilderCommitment { - /// Create new commitment for `data` - pub fn from_bytes(data: impl AsRef<[u8]>) -> Self { - Self(sha2::Sha256::digest(data.as_ref()).into()) - } - - /// Create a new commitment from a raw Sha256 digest - pub fn from_raw_digest(digest: impl Into) -> Self { - Self(digest.into()) - } -} - -impl AsRef for BuilderCommitment { - fn as_ref(&self) -> &Sha256Digest { - &self.0 - } -} diff --git a/types/src/vid.rs b/types/src/vid.rs deleted file mode 100644 index 0fbc75f4bd..0000000000 --- a/types/src/vid.rs +++ /dev/null @@ -1,270 +0,0 @@ -//! This module provides: -//! - an opaque constructor [`vid_scheme`] that returns a new instance of a -//! VID scheme. -//! - type aliases [`VidCommitment`], [`VidCommon`], [`VidShare`] -//! for [`VidScheme`] assoc types. -//! -//! Purpose: the specific choice of VID scheme is an implementation detail. -//! This crate and all downstream crates should talk to the VID scheme only -//! via the traits exposed here. - -use ark_bls12_381::Bls12_381; -use jf_primitives::{ - pcs::{ - checked_fft_size, - prelude::{UnivariateKzgPCS, UnivariateUniversalParams}, - PolynomialCommitmentScheme, - }, - vid::{ - advz::{ - payload_prover::{LargeRangeProof, SmallRangeProof}, - Advz, - }, - payload_prover::{PayloadProver, Statement}, - precomputable::Precomputable, - VidDisperse, VidResult, VidScheme, - }, -}; -use lazy_static::lazy_static; -use serde::{Deserialize, Serialize}; -use sha2::Sha256; -use std::{fmt::Debug, ops::Range}; - -/// VID scheme constructor. -/// -/// Returns an opaque type that impls jellyfish traits: -/// [`VidScheme`], [`PayloadProver`], [`Precomputable`]. -/// -/// # Rust forbids naming impl Trait in return types -/// -/// Due to Rust limitations the return type of [`vid_scheme`] is a newtype -/// wrapper [`VidSchemeType`] that impls the above traits. -/// -/// We prefer that the return type of [`vid_scheme`] be `impl Trait` for the -/// above traits. But the ability to name an impl Trait return type is -/// currently missing from Rust: -/// - [Naming impl trait in return types - Impl trait initiative](https://rust-lang.github.io/impl-trait-initiative/explainer/rpit_names.html) -/// - [RFC: Type alias impl trait (TAIT)](https://github.com/rust-lang/rfcs/blob/master/text/2515-type_alias_impl_trait.md) -/// -/// # Panics -/// When the construction fails for the underlying VID scheme. -#[must_use] -pub fn vid_scheme(num_storage_nodes: usize) -> VidSchemeType { - // chunk_size is currently num_storage_nodes rounded down to a power of two - // TODO chunk_size should be a function of the desired erasure code rate - // https://github.com/EspressoSystems/HotShot/issues/2152 - let chunk_size = 1 << num_storage_nodes.ilog2(); - - // TODO intelligent choice of multiplicity - let multiplicity = 1; - - // TODO panic, return `Result`, or make `new` infallible upstream (eg. by panicking)? - #[allow(clippy::panic)] - VidSchemeType(Advz::new(chunk_size, num_storage_nodes, multiplicity, &*KZG_SRS).unwrap_or_else(|err| panic!("advz construction failure:\n\t(num_storage nodes,chunk_size,multiplicity)=({num_storage_nodes},{chunk_size},{multiplicity})\n\terror: : {err}"))) -} - -/// VID commitment type -pub type VidCommitment = ::Commit; -/// VID common type -pub type VidCommon = ::Common; -/// VID share type -pub type VidShare = ::Share; - -/// Newtype wrapper for a VID scheme type that impls -/// [`VidScheme`], [`PayloadProver`], [`Precomputable`]. -pub struct VidSchemeType(Advz); - -/// Newtype wrapper for a large payload range proof. -/// -/// Useful for namespace proofs. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct LargeRangeProofType( - // # Type complexity - // - // Jellyfish's `LargeRangeProof` type has a prime field generic parameter `F`. - // This `F` is determined by the type parameter `E` for `Advz`. - // Jellyfish needs a more ergonomic way for downstream users to refer to this type. - // - // There is a `KzgEval` type alias in jellyfish that helps a little, but it's currently private: - // - // If it were public then we could instead use - // `LargeRangeProof>` - // but that's still pretty crufty. - LargeRangeProof< as PolynomialCommitmentScheme>::Evaluation>, -); - -/// Newtype wrapper for a small payload range proof. -/// -/// Useful for transaction proofs. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct SmallRangeProofType( - // # Type complexity - // - // Similar to the comments in `LargeRangeProofType`. - SmallRangeProof< as PolynomialCommitmentScheme>::Proof>, -); - -lazy_static! { - /// SRS comment - /// - /// TODO use a proper SRS - /// https://github.com/EspressoSystems/HotShot/issues/1686 - static ref KZG_SRS: UnivariateUniversalParams = { - let mut rng = jf_utils::test_rng(); - UnivariateKzgPCS::::gen_srs_for_testing( - &mut rng, - // TODO what's the maximum possible SRS size? - checked_fft_size(200).unwrap(), - ) - .unwrap() - }; -} - -/// Private type alias for the EC pairing type parameter for [`Advz`]. -type E = Bls12_381; -/// Private type alias for the hash type parameter for [`Advz`]. -type H = Sha256; - -// THE REST OF THIS FILE IS BOILERPLATE -// -// All this boilerplate can be deleted when we finally get -// type alias impl trait (TAIT): -// [rfcs/text/2515-type_alias_impl_trait.md at master · rust-lang/rfcs](https://github.com/rust-lang/rfcs/blob/master/text/2515-type_alias_impl_trait.md) -impl VidScheme for VidSchemeType { - type Commit = as VidScheme>::Commit; - type Share = as VidScheme>::Share; - type Common = as VidScheme>::Common; - - fn commit_only(&self, payload: B) -> VidResult - where - B: AsRef<[u8]>, - { - self.0.commit_only(payload) - } - - fn disperse(&self, payload: B) -> VidResult> - where - B: AsRef<[u8]>, - { - self.0.disperse(payload).map(vid_disperse_conversion) - } - - fn verify_share( - &self, - share: &Self::Share, - common: &Self::Common, - commit: &Self::Commit, - ) -> VidResult> { - self.0.verify_share(share, common, commit) - } - - fn recover_payload(&self, shares: &[Self::Share], common: &Self::Common) -> VidResult> { - self.0.recover_payload(shares, common) - } - - fn is_consistent(commit: &Self::Commit, common: &Self::Common) -> VidResult<()> { - as VidScheme>::is_consistent(commit, common) - } - - fn get_payload_byte_len(common: &Self::Common) -> usize { - as VidScheme>::get_payload_byte_len(common) - } - - fn get_num_storage_nodes(common: &Self::Common) -> usize { - as VidScheme>::get_num_storage_nodes(common) - } - - fn get_multiplicity(common: &Self::Common) -> usize { - as VidScheme>::get_multiplicity(common) - } -} - -impl PayloadProver for VidSchemeType { - fn payload_proof(&self, payload: B, range: Range) -> VidResult - where - B: AsRef<[u8]>, - { - self.0 - .payload_proof(payload, range) - .map(LargeRangeProofType) - } - - fn payload_verify( - &self, - stmt: Statement<'_, Self>, - proof: &LargeRangeProofType, - ) -> VidResult> { - self.0.payload_verify(stmt_conversion(stmt), &proof.0) - } -} - -impl PayloadProver for VidSchemeType { - fn payload_proof(&self, payload: B, range: Range) -> VidResult - where - B: AsRef<[u8]>, - { - self.0 - .payload_proof(payload, range) - .map(SmallRangeProofType) - } - - fn payload_verify( - &self, - stmt: Statement<'_, Self>, - proof: &SmallRangeProofType, - ) -> VidResult> { - self.0.payload_verify(stmt_conversion(stmt), &proof.0) - } -} - -impl Precomputable for VidSchemeType { - type PrecomputeData = as Precomputable>::PrecomputeData; - - fn commit_only_precompute( - &self, - payload: B, - ) -> VidResult<(Self::Commit, Self::PrecomputeData)> - where - B: AsRef<[u8]>, - { - self.0.commit_only_precompute(payload) - } - - fn disperse_precompute( - &self, - payload: B, - data: &Self::PrecomputeData, - ) -> VidResult> - where - B: AsRef<[u8]>, - { - self.0 - .disperse_precompute(payload, data) - .map(vid_disperse_conversion) - } -} - -/// Convert a [`VidDisperse>`] to a [`VidDisperse`]. -/// -/// Foreign type rules prevent us from doing: -/// - `impl From> for VidDisperse>` -/// - `impl VidDisperse {...}` -/// and similarly for `Statement`. -/// Thus, we accomplish type conversion via functions. -fn vid_disperse_conversion(vid_disperse: VidDisperse>) -> VidDisperse { - VidDisperse { - shares: vid_disperse.shares, - common: vid_disperse.common, - commit: vid_disperse.commit, - } -} - -/// Convert a [`Statement<'_, VidSchemeType>`] to a [`Statement<'_, Advz>`]. -fn stmt_conversion(stmt: Statement<'_, VidSchemeType>) -> Statement<'_, Advz> { - Statement { - payload_subslice: stmt.payload_subslice, - range: stmt.range, - commit: stmt.commit, - common: stmt.common, - } -} diff --git a/types/src/vote.rs b/types/src/vote.rs deleted file mode 100644 index ba49f4732d..0000000000 --- a/types/src/vote.rs +++ /dev/null @@ -1,183 +0,0 @@ -//! Vote, Accumulator, and Certificate Types - -use std::{ - collections::{BTreeMap, HashMap}, - marker::PhantomData, -}; - -use bitvec::{bitvec, vec::BitVec}; -use commit::Commitment; -use either::Either; -use ethereum_types::U256; -use tracing::error; - -use crate::{ - simple_certificate::Threshold, - simple_vote::Voteable, - traits::{ - election::Membership, - node_implementation::NodeType, - signature_key::{SignatureKey, StakeTableEntryType}, - }, -}; - -/// A simple vote that has a signer and commitment to the data voted on. -pub trait Vote: HasViewNumber { - /// Type of data commitment this vote uses. - type Commitment: Voteable; - - /// Get the signature of the vote sender - fn get_signature(&self) -> ::PureAssembledSignatureType; - /// Gets the data which was voted on by this vote - fn get_data(&self) -> &Self::Commitment; - /// Gets the Data commitment of the vote - fn get_data_commitment(&self) -> Commitment; - - /// Gets the public signature key of the votes creator/sender - fn get_signing_key(&self) -> TYPES::SignatureKey; -} - -/// Any type that is associated with a view -pub trait HasViewNumber { - /// Returns the view number the type refers to. - fn get_view_number(&self) -> TYPES::Time; -} - -/** -The certificate formed from the collection of signatures a committee. -The committee is defined by the `Membership` associated type. -The votes all must be over the `Commitment` associated type. -*/ -pub trait Certificate: HasViewNumber { - /// The data commitment this certificate certifies. - type Voteable: Voteable; - - /// Threshold Functions - type Threshold: Threshold; - - /// Build a certificate from the data commitment and the quorum of signers - fn create_signed_certificate( - vote_commitment: Commitment, - data: Self::Voteable, - sig: ::QCType, - view: TYPES::Time, - ) -> Self; - - /// Checks if the cert is valid - fn is_valid_cert>(&self, membership: &MEMBERSHIP) -> bool; - /// Returns the amount of stake needed to create this certificate - // TODO: Make this a static ratio of the total stake of `Membership` - fn threshold>(membership: &MEMBERSHIP) -> u64; - /// Get the commitment which was voted on - fn get_data(&self) -> &Self::Voteable; - /// Get the vote commitment which the votes commit to - fn get_data_commitment(&self) -> Commitment; -} -/// Mapping of vote commitment to sigatures and bitvec -type SignersMap = HashMap< - COMMITMENT, - ( - BitVec, - Vec<::PureAssembledSignatureType>, - ), ->; -/// Accumulates votes until a certificate is formed. This implementation works for all simple vote and certificate pairs -pub struct VoteAccumulator< - TYPES: NodeType, - VOTE: Vote, - CERT: Certificate, -> { - /// Map of all signatures accumlated so far - pub vote_outcomes: VoteMap2< - Commitment, - TYPES::SignatureKey, - ::PureAssembledSignatureType, - >, - /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check - /// And a list of valid signatures for certificate aggregation - pub signers: SignersMap, TYPES::SignatureKey>, - /// Phantom data to specify the types this accumulator is for - pub phantom: PhantomData<(TYPES, VOTE, CERT)>, -} - -impl, CERT: Certificate> - VoteAccumulator -{ - /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we - /// have accumulated enough votes to exceed the threshold for creating a certificate. - /// - /// # Panics - /// Panics if the vote comes from a node not in the stake table - pub fn accumulate(&mut self, vote: &VOTE, membership: &TYPES::Membership) -> Either<(), CERT> { - let key = vote.get_signing_key(); - - let vote_commitment = vote.get_data_commitment(); - if !key.validate(&vote.get_signature(), vote_commitment.as_ref()) { - error!("Invalid vote! Vote Data {:?}", vote.get_data()); - return Either::Left(()); - } - - let Some(stake_table_entry) = membership.get_stake(&key) else { - return Either::Left(()); - }; - let stake_table = membership.get_committee_qc_stake_table(); - let vote_node_id = stake_table - .iter() - .position(|x| *x == stake_table_entry.clone()) - .unwrap(); - - let original_signature: ::PureAssembledSignatureType = - vote.get_signature(); - - let (total_stake_casted, total_vote_map) = self - .vote_outcomes - .entry(vote_commitment) - .or_insert_with(|| (U256::from(0), BTreeMap::new())); - - // Check for duplicate vote - if total_vote_map.contains_key(&key) { - return Either::Left(()); - } - let (signers, sig_list) = self - .signers - .entry(vote_commitment) - .or_insert((bitvec![0; membership.total_nodes()], Vec::new())); - if signers.get(vote_node_id).as_deref() == Some(&true) { - error!("Node id is already in signers list"); - return Either::Left(()); - } - signers.set(vote_node_id, true); - sig_list.push(original_signature); - - // TODO: Get the stake from the stake table entry. - *total_stake_casted += stake_table_entry.get_stake(); - total_vote_map.insert(key, (vote.get_signature(), vote.get_data_commitment())); - - if *total_stake_casted >= CERT::threshold(membership).into() { - // Assemble QC - let real_qc_pp: <::SignatureKey as SignatureKey>::QCParams = - ::get_public_parameter( - stake_table, - U256::from(CERT::threshold(membership)), - ); - - let real_qc_sig = ::assemble( - &real_qc_pp, - signers.as_bitslice(), - &sig_list[..], - ); - - let cert = CERT::create_signed_certificate( - vote.get_data_commitment(), - vote.get_data().clone(), - real_qc_sig, - vote.get_view_number(), - ); - return Either::Right(cert); - } - Either::Left(()) - } -} - -/// Mapping of commitments to vote tokens by key. -type VoteMap2 = HashMap)>; diff --git a/utils/Cargo.toml b/utils/Cargo.toml index f5622e104e..a3ad83dfad 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -8,7 +8,7 @@ version = "0.1.0" [dependencies] bincode = { workspace = true } -hotshot-constants = { path = "../constants" } +hotshot-types = { workspace = true } [lints] workspace = true diff --git a/utils/src/version.rs b/utils/src/version.rs index 34aadb9c88..5bd0dd7db9 100644 --- a/utils/src/version.rs +++ b/utils/src/version.rs @@ -1,6 +1,6 @@ //! Utilities for reading version number -use hotshot_constants::Version; +use hotshot_types::constants::Version; /// Read the version number from a message (passed a byte vector), /// returning `None` is there are not enough bytes. diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index 0aa74c03cc..f16abd3d83 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -10,15 +10,12 @@ async-compatibility-layer = { workspace = true } async-lock = { workspace = true } clap = { version = "4.0", features = ["derive", "env"], optional = false } futures = { workspace = true } -hotshot-types = { path = "../types", default-features = false } +hotshot-types = { workspace = true, default-features = false } tide-disco = { workspace = true } tracing = { workspace = true } rand = { workspace = true } toml = { workspace = true } -[dev-dependencies] -hotshot-types = { path = "../types", default-features = false } - [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] From d62df73118995ae2a4f6b58ae862c0e5c27b24b5 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 12 Mar 2024 15:11:41 -0400 Subject: [PATCH 0850/1393] Fix cargo warnings and docs (#2760) --- example-types/Cargo.toml | 2 +- examples/Cargo.toml | 2 +- hotshot/Cargo.toml | 2 +- orchestrator/Cargo.toml | 2 +- task-impls/Cargo.toml | 2 +- testing-macros/Cargo.toml | 2 +- testing/Cargo.toml | 2 +- web_server/Cargo.toml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index 19b8d3532a..44f17dc81b 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -19,7 +19,7 @@ commit = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot" } -hotshot-types = { workspace = true, default-features = false } +hotshot-types = { workspace = true } hotshot-utils = { path = "../utils" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 377da39dcf..023a6a9d68 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -94,7 +94,7 @@ embed-doc-image = "0.1.4" futures = { workspace = true } hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-types = { workspace = true, version = "0.1.0", default-features = false } +hotshot-types = { workspace = true } hotshot-utils = { path = "../utils" } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } libp2p-identity = { workspace = true } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index c88e28de1d..7e8073b654 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -37,7 +37,7 @@ ethereum-types = { workspace = true } futures = { workspace = true } hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-types = { workspace = true, version = "0.1.0", default-features = false } +hotshot-types = { workspace = true } hotshot-task = { path = "../task" } hotshot-utils = { path = "../utils" } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 388649682a..437d5f9a6b 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -10,7 +10,7 @@ clap = { version = "4.0", features = ["derive", "env"], optional = false } futures = { workspace = true } libp2p = { workspace = true } blake3 = { workspace = true } -hotshot-types = { workspace = true, version = "0.1.0" , default-features = false } +hotshot-types = { workspace = true } hotshot-utils = { path = "../utils" } tide-disco = { workspace = true } surf-disco = { workspace = true } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 9ed541cf19..0f6346949d 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -13,7 +13,7 @@ futures = { workspace = true } snafu = { workspace = true } async-lock = { workspace = true } tracing = { workspace = true } -hotshot-types = { workspace = true, default-features = false } +hotshot-types = { workspace = true } hotshot-utils = { path = "../utils" } jf-primitives = { workspace = true } time = { workspace = true } diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 56d10d45d9..248c3f96cc 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -15,7 +15,7 @@ commit = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", default-features = false } -hotshot-types = { workspace = true, default-features = false } +hotshot-types = { workspace = true } hotshot-testing = { path = "../testing", default-features = false } hotshot-example-types = { path = "../example-types" } jf-primitives = { workspace = true } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index e0b8784bf8..d75e8858b5 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -19,7 +19,7 @@ commit = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", features = ["hotshot-testing"] } -hotshot-types = { workspace = true, default-features = false } +hotshot-types = { workspace = true } hotshot-utils = { path = "../utils" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index f16abd3d83..269cbe6ac6 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -10,7 +10,7 @@ async-compatibility-layer = { workspace = true } async-lock = { workspace = true } clap = { version = "4.0", features = ["derive", "env"], optional = false } futures = { workspace = true } -hotshot-types = { workspace = true, default-features = false } +hotshot-types = { workspace = true } tide-disco = { workspace = true } tracing = { workspace = true } rand = { workspace = true } From 3c3d43c75c1ea8cab4b5502608eeb434b29aa9e5 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 12 Mar 2024 13:22:05 -0700 Subject: [PATCH 0851/1393] [VALIDATED_STATE] - Add state information to the `Decide` event (#2733) * Add state and state delta to decide event * Wrap decide fields into Arc * Move state and delta to leaf chain, remove Clone, fix CI * Make a struct for leaf info * Update branch * Update tag * Fix build after updating types --- example-types/src/state_types.rs | 27 +++++++++----- examples/infra/mod.rs | 3 +- hotshot/src/lib.rs | 13 +++++-- hotshot/src/traits/storage/atomic_storage.rs | 5 --- libp2p-networking/tests/common/mod.rs | 2 +- task-impls/src/consensus.rs | 37 +++++++++++++------- testing/src/overall_safety_task.rs | 20 ++++++----- testing/src/task_helpers.rs | 12 +++---- 8 files changed, 74 insertions(+), 45 deletions(-) diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index a8593d5fea..5a17c2cdd0 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -6,7 +6,7 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, node_implementation::NodeType, - states::{InstanceState, TestableState, ValidatedState}, + states::{InstanceState, StateDelta, TestableState, ValidatedState}, BlockPayload, }, }; @@ -23,6 +23,12 @@ pub struct TestInstanceState {} impl InstanceState for TestInstanceState {} +/// Application-specific state delta implementation for testing purposes. +#[derive(Clone, Copy, Debug, Default)] +pub struct TestStateDelta {} + +impl StateDelta for TestStateDelta {} + /// Validated state implementation for testing purposes. #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct TestValidatedState { @@ -59,6 +65,8 @@ impl ValidatedState for TestValidatedState { type Instance = TestInstanceState; + type Delta = TestStateDelta; + type Time = ViewNumber; async fn validate_and_apply_header( @@ -66,11 +74,14 @@ impl ValidatedState for TestValidatedState { _instance: &Self::Instance, _parent_leaf: &Leaf, _proposed_header: &TYPES::BlockHeader, - ) -> Result { - Ok(TestValidatedState { - block_height: self.block_height + 1, - prev_state_commitment: self.commit(), - }) + ) -> Result<(Self, Self::Delta), Self::Error> { + Ok(( + TestValidatedState { + block_height: self.block_height + 1, + prev_state_commitment: self.commit(), + }, + TestStateDelta {}, + )) } fn from_header(block_header: &TYPES::BlockHeader) -> Self { @@ -82,8 +93,8 @@ impl ValidatedState for TestValidatedState { fn on_commit(&self) {} - fn genesis(_instance: &Self::Instance) -> Self { - Self::default() + fn genesis(_instance: &Self::Instance) -> (Self, Self::Delta) { + (Self::default(), TestStateDelta {}) } } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 4a449d5930..99388356ec 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -566,7 +566,8 @@ pub trait RunDA< } => { let current_timestamp = Utc::now().timestamp(); // this might be a obob - if let Some((leaf, _)) = leaf_chain.first() { + if let Some(leaf_info) = leaf_chain.first() { + let leaf = &leaf_info.leaf; info!("Decide event for leaf: {}", *leaf.view_number); // iterate all the decided transactions to calculate latency diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 4ad4e1ad23..cb70a2ab1f 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -202,6 +202,7 @@ impl> SystemContext { view_inner: ViewInner::Leaf { leaf: anchored_leaf.commit(), state: validated_state, + delta: initializer.state_delta, }, }, ); @@ -229,6 +230,7 @@ impl> SystemContext { let consensus = Consensus { instance_state, validated_state_map, + vid_shares: BTreeMap::new(), cur_view: start_view, last_decided_view: anchored_leaf.get_view_number(), saved_leaves, @@ -625,6 +627,11 @@ pub struct HotShotInitializer { /// the state from the block header. validated_state: Option>, + /// Optional state delta. + /// + /// If it's given, we'll use it to constrcut the `SystemContext`. + state_delta: Option>::Delta>>, + /// Starting view number that we are confident won't lead to a double vote after restart. start_view: TYPES::Time, } @@ -634,11 +641,12 @@ impl HotShotInitializer { /// # Errors /// If we are unable to apply the genesis block to the default state pub fn from_genesis(instance_state: TYPES::InstanceState) -> Result> { - let validated_state = Some(Arc::new(TYPES::ValidatedState::genesis(&instance_state))); + let (validated_state, state_delta) = TYPES::ValidatedState::genesis(&instance_state); Ok(Self { inner: Leaf::genesis(&instance_state), instance_state, - validated_state, + validated_state: Some(Arc::new(validated_state)), + state_delta: Some(Arc::new(state_delta)), start_view: TYPES::Time::new(0), }) } @@ -660,6 +668,7 @@ impl HotShotInitializer { inner: anchor_leaf, instance_state, validated_state, + state_delta: None, start_view, } } diff --git a/hotshot/src/traits/storage/atomic_storage.rs b/hotshot/src/traits/storage/atomic_storage.rs index e4c9a0ffc6..6da61d4560 100644 --- a/hotshot/src/traits/storage/atomic_storage.rs +++ b/hotshot/src/traits/storage/atomic_storage.rs @@ -179,11 +179,6 @@ impl Storage for AtomicStorage { Ok(self.inner.leaves.load_by_key_2_ref(hash).await) } - #[instrument(name = "AtomicStorage::get_state", skip_all)] - async fn get_state(&self, hash: &Commitment>) -> StorageResult> { - Ok(self.inner.states.get(hash).await) - } - async fn get_internal_state(&self) -> StorageState { let mut blocks: Vec<(Commitment, STATE::BBlockPayloadlock)> = self.inner.blocks.load_all().await.into_iter().collect(); diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index c06ff9085d..e887a50b1b 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -131,7 +131,7 @@ pub async fn test_bed, A: ConsensusApi + let parent = if justify_qc.is_genesis { // Send the `Decide` event for the genesis block if the justify QC is genesis. let leaf = Leaf::genesis(&consensus.instance_state); + let (validated_state, state_delta) = + TYPES::ValidatedState::genesis(&consensus.instance_state); + let state = Arc::new(validated_state); broadcast_event( Event { view_number: TYPES::Time::genesis(), event: EventType::Decide { - leaf_chain: Arc::new(vec![(leaf.clone(), None)]), + leaf_chain: Arc::new(vec![LeafInfo::new( + leaf.clone(), + state.clone(), + Some(Arc::new(state_delta)), + None, + )]), qc: Arc::new(justify_qc.clone()), block_size: None, }, @@ -531,7 +538,6 @@ impl, A: ConsensusApi + &self.output_event_stream, ) .await; - let state = Arc::new(TYPES::ValidatedState::genesis(&consensus.instance_state)); Some((leaf, state)) } else { match consensus @@ -540,7 +546,9 @@ impl, A: ConsensusApi + .cloned() { Some(leaf) => { - if let Some(state) = consensus.get_state(leaf.view_number) { + if let (Some(state), _) = + consensus.get_state_and_delta(leaf.view_number) + { Some((leaf, state.clone())) } else { error!("Parent state not found! Consensus internally inconsistent"); @@ -584,6 +592,7 @@ impl, A: ConsensusApi + view_inner: ViewInner::Leaf { leaf: leaf.commit(), state, + delta: None, }, }, ); @@ -624,7 +633,7 @@ impl, A: ConsensusApi + return; }; - let Ok(state) = parent_state + let Ok((validated_state, state_delta)) = parent_state .validate_and_apply_header( &consensus.instance_state, &parent_leaf, @@ -635,7 +644,8 @@ impl, A: ConsensusApi + error!("Block header doesn't extend the proposal",); return; }; - let state = Arc::new(state); + let state = Arc::new(validated_state); + let delta = Arc::new(state_delta); let parent_commitment = parent_leaf.commit(); let leaf: Leaf<_> = Leaf { view_number: view, @@ -664,7 +674,7 @@ impl, A: ConsensusApi + justify_qc.get_view_number(), Terminator::Inclusive(consensus.locked_view), false, - |leaf| { + |leaf, _, _| { // if leaf view no == locked view no then we're done, report success by // returning true leaf.view_number != consensus.locked_view @@ -723,7 +733,7 @@ impl, A: ConsensusApi + parent_view, Terminator::Exclusive(old_anchor_view), true, - |leaf| { + |leaf, state, delta| { if !new_decide_reached { if last_view_number_visited == leaf.view_number + 1 { last_view_number_visited = leaf.view_number; @@ -774,7 +784,7 @@ impl, A: ConsensusApi + .get(&leaf.get_view_number()) .map(|vid_proposal| vid_proposal.data.clone()); - leaf_views.push((leaf.clone(), vid)); + leaf_views.push(LeafInfo::new(leaf.clone(), state.clone(), delta.clone(), vid)); leafs_decided.push(leaf.clone()); if let Some(ref payload) = leaf.block_payload { for txn in payload @@ -810,7 +820,8 @@ impl, A: ConsensusApi + View { view_inner: ViewInner::Leaf { leaf: leaf.commit(), - state, + state: state.clone(), + delta: Some(delta.clone()), }, }, ); @@ -1270,7 +1281,7 @@ impl, A: ConsensusApi + return false; }; // Leaf hash in view inner does not match high qc hash - Why? - let Some((leaf_commitment, state)) = parent_view.get_leaf() else { + let Some((leaf_commitment, state)) = parent_view.get_leaf_and_state() else { error!( ?parent_view_number, ?parent_view, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 39335f3dc3..ce4b841810 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -2,7 +2,7 @@ use hotshot::{traits::TestableNodeImplementation, HotShotError}; use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::{ - data::{Leaf, VidDisperse}, + data::Leaf, error::RoundTimedoutState, event::{Event, EventType, LeafChain}, simple_certificate::QuorumCertificate, @@ -160,7 +160,7 @@ impl> TestTaskState block_size: maybe_block_size, } => { // Skip the genesis leaf. - if leaf_chain.last().unwrap().0.get_view_number() == TYPES::Time::genesis() { + if leaf_chain.last().unwrap().leaf.get_view_number() == TYPES::Time::genesis() { return None; } let paired_up = (leaf_chain.to_vec(), (*qc).clone()); @@ -205,7 +205,7 @@ impl> TestTaskState view.update_status( threshold, len, - &key.0, + &key, check_leaf, check_block, transaction_threshold, @@ -358,11 +358,12 @@ impl RoundResult { idx: usize, result: (LeafChain, QuorumCertificate), maybe_block_size: Option, - ) -> Option<(Leaf, Option>)> { + ) -> Option> { self.success_nodes.insert(idx as u64, result.clone()); - let maybe_leaf: Option<(Leaf, _)> = result.0.into_iter().last(); - if let Some((leaf, _)) = maybe_leaf.clone() { + let maybe_leaf = result.0.into_iter().last(); + if let Some(leaf_info) = maybe_leaf.clone() { + let leaf = leaf_info.leaf; match self.leaf_map.entry(leaf.clone()) { std::collections::hash_map::Entry::Occupied(mut o) => { *o.get_mut() += 1; @@ -393,8 +394,9 @@ impl RoundResult { } } } + return Some(leaf); } - maybe_leaf + None } /// check if the test failed due to not enough nodes getting through enough views @@ -474,8 +476,8 @@ impl RoundResult { for (leaf_vec, _) in self.success_nodes.values() { let most_recent_leaf = leaf_vec.iter().last(); - if let Some((leaf, _)) = most_recent_leaf { - match leaves.entry(leaf.clone()) { + if let Some(leaf_info) = most_recent_leaf { + match leaves.entry(leaf_info.leaf.clone()) { std::collections::hash_map::Entry::Occupied(mut o) => { *o.get_mut() += 1; } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index bd166039cc..6013ac6d40 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -272,12 +272,11 @@ async fn build_quorum_proposal_and_signature( // Only view 2 is tested, higher views are not tested for cur_view in 2..=view { - let state_new_view = Arc::new( - parent_state - .validate_and_apply_header(&TestInstanceState {}, &parent_leaf, &block_header) - .await - .unwrap(), - ); + let (state_new_view, delta_new_view) = parent_state + .validate_and_apply_header(&TestInstanceState {}, &parent_leaf, &block_header) + .await + .unwrap(); + let state_new_view = Arc::new(state_new_view); // save states for the previous view to pass all the qc checks // In the long term, we want to get rid of this, do not manually update consensus state consensus.validated_state_map.insert( @@ -286,6 +285,7 @@ async fn build_quorum_proposal_and_signature( view_inner: ViewInner::Leaf { leaf: leaf.commit(), state: state_new_view.clone(), + delta: Some(Arc::new(delta_new_view)), }, }, ); From 9ecc53e14c2ff51cf993c91a00eae8c5b11974a2 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 12 Mar 2024 22:20:26 -0400 Subject: [PATCH 0852/1393] Test upgrade and consensus task together (#2714) --- macros/Cargo.toml | 20 ++++ macros/src/lib.rs | 169 +++++++++++++++++++++++++++++ task-impls/src/consensus.rs | 1 + task-impls/src/vote.rs | 1 + task/src/task.rs | 12 ++ testing/Cargo.toml | 1 + testing/src/predicates.rs | 61 +++++++++++ testing/src/script.rs | 149 +++++++++++++++++++------ testing/src/view_generator.rs | 16 ++- testing/tests/consensus_task.rs | 6 +- testing/tests/proposal_ordering.rs | 2 +- testing/tests/upgrade_task.rs | 164 +++++++++++++++++++++++++++- 12 files changed, 561 insertions(+), 41 deletions(-) create mode 100644 macros/Cargo.toml create mode 100644 macros/src/lib.rs diff --git a/macros/Cargo.toml b/macros/Cargo.toml new file mode 100644 index 0000000000..acaebfa966 --- /dev/null +++ b/macros/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "hotshot-macros" +version = "0.1.0" +edition = "2021" +description = "Macros for hotshot tests" + +[dependencies] +# proc macro stuff +quote = "1.0.33" +syn = { version = "2.0.50", features = ["full", "extra-traits"] } +proc-macro2 = "1.0.78" +derive_builder = "0.13.1" + +[dev-dependencies] +async-lock = { workspace = true } + +[lib] +proc-macro = true +[lints] +workspace = true diff --git a/macros/src/lib.rs b/macros/src/lib.rs new file mode 100644 index 0000000000..d5eb21064d --- /dev/null +++ b/macros/src/lib.rs @@ -0,0 +1,169 @@ +//! Macros for use in testing. + +use proc_macro::TokenStream; +use quote::{format_ident, quote}; + +/// Macro to test multiple `TaskState` scripts at once. +/// +/// Usage: +/// +/// `test_scripts[inputs, script1, script2, ...]` +/// +/// The generated test will: +/// - take the first entry of `inputs`, which should be a `Vec>`, +/// - feed this to each task in order, validating any output received before moving on to the next one, +/// - repeat the previous steps, with the aggregated outputs received from all tasks used as the new input set, +/// - repeat until no more output has been generated by any task, and finally +/// - proceed to the next entry of inputs. +/// +/// # Panics +/// +/// The macro panics if the input stream cannot be parsed. +/// The test will panic if the any of the scripts has a different number of stages from the input. +#[proc_macro] +pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { + // Parse the input as an iter of Expr + let inputs: Vec<_> = syn::parse::Parser::parse2( + syn::punctuated::Punctuated::::parse_terminated, + input.into(), + ) + .unwrap() + .into_iter() + .collect(); + + let test_inputs = &inputs[0]; + + let test_inputs_name = quote::quote!(#test_inputs).to_string(); + + let scripts = &inputs[1..]; + + let output_index_names: Vec<_> = scripts + .iter() + .map(|i| format_ident!("{}_output_index", quote::quote!(#i).to_string())) + .collect(); + + let task_names: Vec<_> = scripts + .iter() + .map(|i| format_ident!("{}_task", quote::quote!(#i).to_string())) + .collect(); + + let task_expectations: Vec<_> = scripts + .iter() + .map(|i| format_ident!("{}_expectations", quote::quote!(#i).to_string())) + .collect(); + + let script_names: Vec<_> = scripts + .iter() + .map(|i| quote::quote!(#i).to_string()) + .collect(); + + let expanded = quote! { { + + use hotshot_testing::script::{ + panic_extra_output_in_script, panic_missing_output_in_script, validate_output_or_panic_in_script, + validate_task_state_or_panic_in_script, + }; + + use hotshot_testing::predicates::Predicate; + use async_broadcast::broadcast; + use hotshot_task_impls::events::HotShotEvent; + + use hotshot_task::task::{Task, TaskRegistry, TaskState}; + use hotshot_types::traits::node_implementation::NodeType; + use std::sync::Arc; + + let registry = Arc::new(TaskRegistry::default()); + + let (test_input, task_receiver) = broadcast(1024); + // let (task_input, mut test_receiver) = broadcast(1024); + + let task_input = test_input.clone(); + let mut test_receiver = task_receiver.clone(); + + let mut loop_receiver = task_receiver.clone(); + + #(let mut #task_names = Task::new(task_input.clone(), task_receiver.clone(), registry.clone(), #scripts.state);)* + + #(let mut #task_expectations = #scripts.expectations;)* + + #(assert!(#task_expectations.len() == #test_inputs.len(), "Number of stages in {} does not match the number of stages in {}", #script_names, #test_inputs_name);)* + + for (stage_number, input_group) in #test_inputs.into_iter().enumerate() { + + #(let mut #output_index_names = 0;)* + + for input in &input_group { + #( + if !#task_names.state().filter(input) { + tracing::debug!("Test sent: {:?}", input); + + if let Some(res) = #task_names.handle_event(input.clone()).await { + #task_names.state().handle_result(&res).await; + } + + while let Ok(received_output) = test_receiver.try_recv() { + tracing::debug!("Test received: {:?}", received_output); + + let output_asserts = &#task_expectations[stage_number].output_asserts; + + if #output_index_names >= output_asserts.len() { + panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); + }; + + let assert = &output_asserts[#output_index_names]; + + validate_output_or_panic_in_script(stage_number, #script_names.to_string(), &received_output, assert); + + #output_index_names += 1; + } + } + )* + } + + while let Ok(input) = loop_receiver.try_recv() { + #( + if !#task_names.state().filter(&input) { + tracing::debug!("Test sent: {:?}", input); + + if let Some(res) = #task_names.handle_event(input.clone()).await { + #task_names.state().handle_result(&res).await; + } + + while let Ok(received_output) = test_receiver.try_recv() { + tracing::debug!("Test received: {:?}", received_output); + + let output_asserts = &#task_expectations[stage_number].output_asserts; + + if #output_index_names >= output_asserts.len() { + panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); + }; + + let assert = &output_asserts[#output_index_names]; + + validate_output_or_panic_in_script(stage_number, #script_names.to_string(), &received_output, assert); + + #output_index_names += 1; + } + } + )* + } + + #( + let output_asserts = &#task_expectations[stage_number].output_asserts; + + if #output_index_names < output_asserts.len() { + panic_missing_output_in_script(stage_number, #script_names.to_string(), &output_asserts[#output_index_names]); + } + + let task_state_asserts = &#task_expectations[stage_number].task_state_asserts; + + for assert in task_state_asserts { + validate_task_state_or_panic_in_script(stage_number, #script_names.to_string(), #task_names.state(), assert); + } + )* + } } + + }; + + expanded.into() +} diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 5567707761..06a5b3adfc 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1410,6 +1410,7 @@ impl, A: ConsensusApi + HotShotEvent::QuorumProposalRecv(_, _) | HotShotEvent::QuorumVoteRecv(_) | HotShotEvent::QCFormed(_) + | HotShotEvent::UpgradeCertificateFormed(_) | HotShotEvent::DACRecv(_) | HotShotEvent::ViewChange(_) | HotShotEvent::SendPayloadCommitmentAndMetadata(..) diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 35516a5683..1a4cd0ade9 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -73,6 +73,7 @@ impl< event_stream: &Sender>, ) -> Option { if vote.get_leader(&self.membership) != self.public_key { + error!("Received vote for a view in which we were not the leader."); return None; } diff --git a/task/src/task.rs b/task/src/task.rs index f0cd013aa0..bb24f48f62 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -115,6 +115,18 @@ impl Task { state, } } + + /// The Task analog of `TaskState::handle_event`. + pub fn handle_event( + &mut self, + event: S::Event, + ) -> impl Future> + Send + '_ + where + Self: Sized, + { + S::handle_event(event, self) + } + /// Spawn the task loop, consuming self. Will continue until /// the task reaches some shutdown condition pub fn run(mut self) -> JoinHandle<()> { diff --git a/testing/Cargo.toml b/testing/Cargo.toml index d75e8858b5..fab9b16c59 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -21,6 +21,7 @@ futures = { workspace = true } hotshot = { path = "../hotshot", features = ["hotshot-testing"] } hotshot-types = { workspace = true } hotshot-utils = { path = "../utils" } +hotshot-macros = { path = "../macros" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } jf-primitives = { workspace = true } diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs index f84ff75ce3..0ec2a1f7f8 100644 --- a/testing/src/predicates.rs +++ b/testing/src/predicates.rs @@ -12,6 +12,12 @@ pub struct Predicate { pub info: String, } +impl std::fmt::Debug for Predicate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(f, "{}", self.info) + } +} + pub fn exact(event: HotShotEvent) -> Predicate> where TYPES: NodeType, @@ -50,6 +56,61 @@ where } } +pub fn view_change() -> Predicate> +where + TYPES: NodeType, +{ + let info = "ViewChange".to_string(); + let function = |e: &_| matches!(e, ViewChange(_)); + + Predicate { + function: Box::new(function), + info, + } +} + +pub fn upgrade_certificate_formed() -> Predicate> +where + TYPES: NodeType, +{ + let info = "UpgradeCertificateFormed".to_string(); + let function = |e: &_| matches!(e, UpgradeCertificateFormed(_)); + + Predicate { + function: Box::new(function), + info, + } +} + +pub fn quorum_proposal_send_with_upgrade_certificate() -> Predicate> +where + TYPES: NodeType, +{ + let info = "QuorumProposalSend with UpgradeCertificate attached".to_string(); + let function = |e: &_| match e { + QuorumProposalSend(proposal, _) => proposal.data.upgrade_certificate.is_some(), + _ => false, + }; + + Predicate { + function: Box::new(function), + info, + } +} + +pub fn quorum_proposal_validated() -> Predicate> +where + TYPES: NodeType, +{ + let info = "QuorumProposalValidated".to_string(); + let function = |e: &_| matches!(e, QuorumProposalValidated(_)); + + Predicate { + function: Box::new(function), + info, + } +} + pub fn quorum_proposal_send() -> Predicate> where TYPES: NodeType, diff --git a/testing/src/script.rs b/testing/src/script.rs index 3889727f59..10f4dc2aa1 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -6,19 +6,59 @@ use hotshot_task::task::{Task, TaskRegistry, TaskState}; use hotshot_types::traits::node_implementation::NodeType; use std::sync::Arc; -/// A `TestScript` is a sequence of triples (input sequence, output sequence, assertions). -type TestScript = Vec>; - pub struct TestScriptStage>> { pub inputs: Vec>, pub outputs: Vec>>, pub asserts: Vec>, } -impl std::fmt::Debug for Predicate { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { - write!(f, "{}", self.info) - } +/// A `TestScript` is a sequence of triples (input sequence, output sequence, assertions). +type TestScript = Vec>; + +pub fn panic_extra_output(stage_number: usize, output: &S) +where + S: std::fmt::Debug, +{ + let extra_output_error = format!( + "Stage {} | Received unexpected additional output:\n\n{:?}", + stage_number, output + ); + + panic!("{}", extra_output_error); +} + +pub fn panic_missing_output(stage_number: usize, output: &S) +where + S: std::fmt::Debug, +{ + let output_missing_error = format!( + "Stage {} | Failed to receive output for predicate: {:?}", + stage_number, output + ); + + panic!("{}", output_missing_error); +} + +pub fn validate_task_state_or_panic(stage_number: usize, state: &S, assert: &Predicate) { + assert!( + (assert.function)(state), + "Stage {} | Task state failed to satisfy: {:?}", + stage_number, + assert + ); +} + +pub fn validate_output_or_panic(stage_number: usize, output: &S, assert: &Predicate) +where + S: std::fmt::Debug, +{ + assert!( + (assert.function)(output), + "Stage {} | Output failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", + stage_number, + assert, + output + ); } /// `run_test_script` reads a triple (inputs, outputs, asserts) in a `TestScript`, @@ -66,41 +106,86 @@ pub async fn run_test_script>>( } } - for expected in &stage.outputs { - let output_missing_error = format!( - "Stage {} | Failed to receive output for predicate: {:?}", - stage_number, expected - ); - + for assert in &stage.outputs { if let Ok(received_output) = test_receiver.try_recv() { tracing::debug!("Test received: {:?}", received_output); - assert!( - (expected.function)(&received_output), - "Stage {} | Output failed to satisfy {:?}", - stage_number, - expected - ); + validate_output_or_panic(stage_number, &received_output, assert); } else { - panic!("{}", output_missing_error); + panic_missing_output(stage_number, assert); } } for assert in &stage.asserts { - assert!( - (assert.function)(task.state()), - "Stage {} | Assertion on task state failed: {:?}", - stage_number, - assert - ); + validate_task_state_or_panic(stage_number, task.state(), assert); } if let Ok(received_output) = test_receiver.try_recv() { - let extra_output_error = format!( - "Stage {} | Received unexpected additional output: {:?}", - stage_number, received_output - ); - - panic!("{}", extra_output_error); + panic_extra_output(stage_number, &received_output); } } } + +pub struct TaskScript { + pub state: S, + pub expectations: Vec>, +} + +pub struct Expectations { + pub output_asserts: Vec>>, + pub task_state_asserts: Vec>, +} + +pub fn panic_extra_output_in_script(stage_number: usize, script_name: String, output: &S) +where + S: std::fmt::Debug, +{ + let extra_output_error = format!( + "Stage {} | Received unexpected additional output in {}:\n\n{:?}", + stage_number, script_name, output + ); + + panic!("{}", extra_output_error); +} + +pub fn panic_missing_output_in_script(stage_number: usize, script_name: String, predicate: &S) +where + S: std::fmt::Debug, +{ + let output_missing_error = format!( + "Stage {} | Failed to receive output for predicate in {}: {:?}", + stage_number, script_name, predicate + ); + + panic!("{}", output_missing_error); +} + +pub fn validate_task_state_or_panic_in_script( + stage_number: usize, + script_name: String, + state: &S, + assert: &Predicate, +) { + assert!( + (assert.function)(state), + "Stage {} | Task state in {} failed to satisfy: {:?}", + stage_number, + script_name, + assert + ); +} + +pub fn validate_output_or_panic_in_script( + stage_number: usize, + script_name: String, + output: &S, + assert: &Predicate, +) { + assert!( + (assert.function)(output), + "Stage {} | Output in {} failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", + stage_number, + script_name, + assert, + output + ); +} diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 654cf0ff20..d3b6a2900c 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -239,7 +239,7 @@ impl TestView { } } - pub fn create_vote( + pub fn create_quorum_vote( &self, handle: &SystemContextHandle, ) -> QuorumVote { @@ -253,6 +253,20 @@ impl TestView { ) .expect("Failed to generate a signature on QuorumVote") } + + pub fn create_upgrade_vote( + &self, + data: UpgradeProposalData, + handle: &SystemContextHandle, + ) -> UpgradeVote { + UpgradeVote::::create_signed_vote( + data, + self.view_number, + handle.public_key(), + handle.private_key(), + ) + .expect("Failed to generate a signature on UpgradVote") + } } pub struct TestViewGenerator { diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 0a3b686ef5..e18fdcf6aa 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -45,7 +45,7 @@ async fn test_consensus_task() { for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_vote(&handle)); + votes.push(view.create_quorum_vote(&handle)); } // Run view 1 (the genesis stage). @@ -119,7 +119,7 @@ async fn test_consensus_vote() { for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_vote(&handle)); + votes.push(view.create_quorum_vote(&handle)); } // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader @@ -176,7 +176,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_vote(&handle)); + votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } diff --git a/testing/tests/proposal_ordering.rs b/testing/tests/proposal_ordering.rs index 40c4d1832e..daaed067a1 100644 --- a/testing/tests/proposal_ordering.rs +++ b/testing/tests/proposal_ordering.rs @@ -40,7 +40,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { for view in (&mut generator).take(3) { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_vote(&handle)); + votes.push(view.create_quorum_vote(&handle)); leaders.push(view.leader_public_key); } diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs index 38a402a971..f9e113652e 100644 --- a/testing/tests/upgrade_task.rs +++ b/testing/tests/upgrade_task.rs @@ -1,8 +1,15 @@ use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; -use hotshot_testing::{predicates::*, view_generator::TestViewGenerator}; +use hotshot_macros::test_scripts; +use hotshot_task_impls::{ + consensus::ConsensusTaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState, +}; +use hotshot_testing::{ + predicates::*, + script::{Expectations, TaskScript}, + view_generator::TestViewGenerator, +}; use hotshot_types::constants::Version; use hotshot_types::{ data::ViewNumber, simple_vote::UpgradeProposalData, traits::node_implementation::ConsensusTime, @@ -41,7 +48,7 @@ async fn test_upgrade_task() { for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_vote(&handle)); + votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); @@ -51,7 +58,7 @@ async fn test_upgrade_task() { for view in generator.take(4) { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_vote(&handle)); + votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); @@ -134,3 +141,152 @@ async fn test_upgrade_task() { run_test_script(script, consensus_state).await; } + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +/// Test that we correctly form and include an `UpgradeCertificate` when receiving votes. +async fn test_upgrade_and_consensus_task() { + use hotshot_testing::task_helpers::build_system_handle; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let other_handles = futures::future::join_all((0..=9).map(build_system_handle)).await; + + let old_version = Version { major: 0, minor: 1 }; + let new_version = Version { major: 0, minor: 2 }; + + let upgrade_data: UpgradeProposalData = UpgradeProposalData { + old_version, + new_version, + new_version_hash: [0u8; 12].to_vec(), + old_version_last_block: ViewNumber::new(5), + new_version_first_block: ViewNumber::new(7), + }; + + let mut proposals = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + let mut leaders = Vec::new(); + let mut views = Vec::new(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaders.push(view.leader_public_key); + views.push(view.clone()); + } + + generator.add_upgrade(upgrade_data.clone()); + + for view in generator.take(4) { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaders.push(view.leader_public_key); + views.push(view.clone()); + } + + let upgrade_votes = other_handles + .iter() + .map(|h| views[1].create_upgrade_vote(upgrade_data.clone(), &h.0)); + + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + let mut upgrade_state = UpgradeTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + + upgrade_state.should_vote = |_| true; + + inject_consensus_polls(&consensus_state).await; + + let upgrade_vote_recvs: Vec<_> = upgrade_votes.map(UpgradeVoteRecv).collect(); + + let inputs = vec![ + vec![QuorumProposalRecv(proposals[0].clone(), leaders[0])], + upgrade_vote_recvs, + vec![QuorumProposalRecv(proposals[1].clone(), leaders[1])], + vec![ + DACRecv(dacs[1].clone()), + SendPayloadCommitmentAndMetadata( + vids[2].0.data.payload_commitment, + (), + ViewNumber::new(2), + ), + QCFormed(either::Either::Left(proposals[1].data.justify_qc.clone())), + ], + ]; + + let consensus_script = TaskScript { + state: consensus_state, + expectations: vec![ + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(1))), + quorum_proposal_validated(), + quorum_vote_send(), + ], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(2))), + quorum_proposal_validated(), + ], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![quorum_proposal_send_with_upgrade_certificate()], + task_state_asserts: vec![], + }, + ], + }; + + let upgrade_script = TaskScript { + state: upgrade_state, + expectations: vec![ + Expectations { + output_asserts: vec![], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![upgrade_certificate_formed()], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![], + task_state_asserts: vec![], + }, + ], + }; + + test_scripts![inputs, consensus_script, upgrade_script]; +} From 77882217d8e3439dbd6596c380720220cce9bc38 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Mar 2024 12:32:14 +0100 Subject: [PATCH 0853/1393] Bump proc-macro2 from 1.0.78 to 1.0.79 (#2755) Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.78 to 1.0.79. - [Release notes](https://github.com/dtolnay/proc-macro2/releases) - [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.78...1.0.79) --- updated-dependencies: - dependency-name: proc-macro2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- macros/Cargo.toml | 2 +- testing-macros/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/macros/Cargo.toml b/macros/Cargo.toml index acaebfa966..97448728a9 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -8,7 +8,7 @@ description = "Macros for hotshot tests" # proc macro stuff quote = "1.0.33" syn = { version = "2.0.50", features = ["full", "extra-traits"] } -proc-macro2 = "1.0.78" +proc-macro2 = "1.0.79" derive_builder = "0.13.1" [dev-dependencies] diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 248c3f96cc..d1620e73e9 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -26,7 +26,7 @@ serde = { workspace = true } # proc macro stuff quote = "1.0.33" syn = { version = "2.0.52", features = ["full", "extra-traits"] } -proc-macro2 = "1.0.78" +proc-macro2 = "1.0.79" derive_builder = "0.13.1" [dev-dependencies] From 2647a02fe5f35d3b4a3f016396b4e32f46865dd0 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 13 Mar 2024 13:53:03 +0100 Subject: [PATCH 0854/1393] Remove accidental file (#2761) --- types/src/traits/block_contents.rs | 149 ----------------------------- 1 file changed, 149 deletions(-) delete mode 100644 types/src/traits/block_contents.rs diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs deleted file mode 100644 index b0b620e627..0000000000 --- a/types/src/traits/block_contents.rs +++ /dev/null @@ -1,149 +0,0 @@ -//! Abstraction over the contents of a block -//! -//! This module provides the [`Transaction`], [`BlockPayload`], and [`BlockHeader`] traits, which -//! describe the behaviors that a block is expected to have. - -use crate::{ - data::Leaf, - traits::{node_implementation::NodeType, ValidatedState}, - utils::BuilderCommitment, - vid::{vid_scheme, VidCommitment, VidSchemeType}, -}; -use commit::{Commitment, Committable}; -use jf_primitives::vid::VidScheme; -use serde::{de::DeserializeOwned, Serialize}; - -use std::{ - error::Error, - fmt::{Debug, Display}, - future::Future, - hash::Hash, -}; - -/// Abstraction over any type of transaction. Used by [`BlockPayload`]. -pub trait Transaction: - Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash -{ -} - -/// Abstraction over the full contents of a block -/// -/// This trait encapsulates the behaviors that the transactions of a block must have in order to be -/// used by consensus -/// * Must have a predefined error type ([`BlockPayload::Error`]) -/// * Must have a transaction type that can be compared for equality, serialized and serialized, -/// sent between threads, and can have a hash produced of it -/// * Must be hashable -pub trait BlockPayload: - Serialize + Clone + Debug + Display + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned -{ - /// The error type for this type of block - type Error: Error + Debug + Send + Sync; - - /// The type of the transitions we are applying - type Transaction: Transaction; - - /// Data created during block building which feeds into the block header - type Metadata: Clone + Debug + DeserializeOwned + Eq + Hash + Send + Sync + Serialize; - - /// Encoded payload. - type Encode<'a>: 'a + Iterator + Send - where - Self: 'a; - - /// Build a payload and associated metadata with the transactions. - /// - /// # Errors - /// If the transaction length conversion fails. - fn from_transactions( - transactions: impl IntoIterator, - ) -> Result<(Self, Self::Metadata), Self::Error>; - - /// Build a payload with the encoded transaction bytes, metadata, - /// and the associated number of VID storage nodes - /// - /// `I` may be, but not necessarily is, the `Encode` type directly from `fn encode`. - fn from_bytes(encoded_transactions: I, metadata: &Self::Metadata) -> Self - where - I: Iterator; - - /// Build the genesis payload and metadata. - fn genesis() -> (Self, Self::Metadata); - - /// Encode the payload - /// - /// # Errors - /// If the transaction length conversion fails. - fn encode(&self) -> Result, Self::Error>; - - /// List of transaction commitments. - fn transaction_commitments( - &self, - metadata: &Self::Metadata, - ) -> Vec>; - - /// Generate commitment that builders use to sign block options. - fn builder_commitment(&self, metadata: &Self::Metadata) -> BuilderCommitment; - - /// Get the transactions in the payload. - fn get_transactions(&self, metadata: &Self::Metadata) -> &Vec; -} - -/// extra functions required on block to be usable by hotshot-testing -pub trait TestableBlock: BlockPayload + Debug { - /// generate a genesis block - fn genesis() -> Self; - - /// the number of transactions in this block - fn txn_count(&self) -> u64; -} - -/// Compute the VID payload commitment. -/// TODO(Gus) delete this function? -/// # Panics -/// If the VID computation fails. -#[must_use] -pub fn vid_commitment( - encoded_transactions: &Vec, - num_storage_nodes: usize, -) -> ::Commit { - #[allow(clippy::panic)] - vid_scheme(num_storage_nodes).commit_only(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:\n\t(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{}\n\t{err}", encoded_transactions.len())) -} - -/// The number of storage nodes to use when computing the genesis VID commitment. -/// -/// The number of storage nodes for the genesis VID commitment is arbitrary, since we don't actually -/// do dispersal for the genesis block. For simplicity and performance, we use 1. -pub const GENESIS_VID_NUM_STORAGE_NODES: usize = 1; - -/// Header of a block, which commits to a [`BlockPayload`]. -pub trait BlockHeader: - Serialize + Clone + Debug + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned + Committable -{ - /// Build a header with the parent validate state, instance-level state, parent leaf, payload - /// commitment, and metadata. - fn new( - parent_state: &TYPES::ValidatedState, - instance_state: &>::Instance, - parent_leaf: &Leaf, - payload_commitment: VidCommitment, - metadata: ::Metadata, - ) -> impl Future + Send; - - /// Build the genesis header, payload, and metadata. - fn genesis( - instance_state: &>::Instance, - payload_commitment: VidCommitment, - metadata: ::Metadata, - ) -> Self; - - /// Get the block number. - fn block_number(&self) -> u64; - - /// Get the payload commitment. - fn payload_commitment(&self) -> VidCommitment; - - /// Get the metadata. - fn metadata(&self) -> &::Metadata; -} From 44d8b0223b235e270a6d0dcd2aee09cceb9ee80a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 13 Mar 2024 10:06:50 -0400 Subject: [PATCH 0855/1393] [NETWORK] Add Task to Respond to Requests from Peers (#2689) * Don't lock channels * cargo fix * Allow killing the connected network handler * Fix issues * Remove State from NetworkNodeHandle * Remove async where it's unused * fix async std build * Fix erros a little * Starting VID request response * handle network event * Bubble up request/response * Make Request and Response just bytes at network layer * Adding to ConnectedNetwork * Hooks for request/response in handle + trait * fix request * Remove request and response tasks for now * update mod.rs * Hooked up response flow to ConnectedNetwork * Refactor interface to return result to caller * Add request and response to message struct * Clean up some message stuff * Fix build error * Hook up request and response fully * Review myself, impl functions for combine * Change Receiver interface to spawn task * try_send instead of send * Create the task for request handling * rename request -> response * fix lint * clean up request response event handle fn * fix build * Comments and make the request signed * add signature checking * link gh issue * Address comments * Add state and state delta to decide event * Wrap decide fields into Arc * Move state and delta to leaf chain, remove Clone, fix CI * lock * Make a struct for leaf info * Update branch * Update tag * Fix build after updating types * update cargo toml * updates after merge --------- Co-authored-by: Keyao Shen --- hotshot/src/tasks/task_state.rs | 3 +- task-impls/src/consensus.rs | 25 ++--- task-impls/src/lib.rs | 3 + task-impls/src/response.rs | 179 ++++++++++++++++++++++++++++++++ 4 files changed, 192 insertions(+), 18 deletions(-) create mode 100644 task-impls/src/response.rs diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index b310d0bf6a..959184f1a2 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -18,7 +18,7 @@ use hotshot_types::traits::{ BlockPayload, }; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{HashMap, HashSet}, marker::PhantomData, sync::Arc, time::Duration, @@ -188,7 +188,6 @@ impl> CreateTaskState decided_upgrade_cert: None, current_network_version: VERSION_0_1, output_event_stream: handle.hotshot.output_event_stream.0.clone(), - vid_shares: BTreeMap::new(), current_proposal: None, id: handle.hotshot.id, public_key: handle.public_key().clone(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 06a5b3adfc..693a8c78b4 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -16,7 +16,7 @@ use hotshot_types::constants::LOOK_AHEAD; use hotshot_types::event::LeafInfo; use hotshot_types::{ consensus::{Consensus, View}, - data::{Leaf, QuorumProposal, VidDisperse}, + data::{Leaf, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, @@ -39,11 +39,7 @@ use tracing::warn; use crate::vote::HandleVoteEvent; use chrono::Utc; -use std::{ - collections::{BTreeMap, HashSet}, - marker::PhantomData, - sync::Arc, -}; +use std::{collections::HashSet, marker::PhantomData, sync::Arc}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument}; @@ -130,12 +126,6 @@ pub struct ConsensusTaskState< /// Output events to application pub output_event_stream: async_broadcast::Sender>, - /// All the VID shares we've received for current and future views. - /// In the future we will need a different struct similar to VidDisperse except - /// it stores only one share. - /// TODO - pub vid_shares: BTreeMap>>, - /// The most recent proposal we have, will correspond to the current view if Some() /// Will be none if the view advanced through timeout/view_sync pub current_proposal: Option>, @@ -227,7 +217,7 @@ impl, A: ConsensusApi + } // Only vote if you has seen the VID share for this view - if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) { + if let Some(_vid_share) = consensus.vid_shares.get(&proposal.view_number) { } else { debug!( "We have not seen the VID share for this view {:?} yet, so we cannot vote.", @@ -779,7 +769,7 @@ impl, A: ConsensusApi + leaf.fill_block_payload_unchecked(payload); } - let vid = self + let vid = consensus .vid_shares .get(&leaf.get_view_number()) .map(|vid_proposal| vid_proposal.data.clone()); @@ -845,7 +835,6 @@ impl, A: ConsensusApi + ); let old_anchor_view = consensus.last_decided_view; consensus.collect_garbage(old_anchor_view, new_anchor_view); - self.vid_shares = self.vid_shares.split_off(&new_anchor_view); consensus.last_decided_view = new_anchor_view; consensus .metrics @@ -1121,7 +1110,11 @@ impl, A: ConsensusApi + .await; // Add to the storage that we have received the VID disperse for a specific view - self.vid_shares.insert(view, disperse); + self.consensus + .write() + .await + .vid_shares + .insert(view, disperse); self.vote_if_able(&event_stream).await; } HotShotEvent::ViewChange(new_view) => { diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 1be4020af2..e2d5ee3258 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -33,3 +33,6 @@ pub mod upgrade; /// Helper functions used by any task pub mod helpers; + +/// Task which responsds to requests from the network +pub mod response; diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs new file mode 100644 index 0000000000..425171eeea --- /dev/null +++ b/task-impls/src/response.rs @@ -0,0 +1,179 @@ +use std::{collections::BTreeMap, sync::Arc}; + +use async_broadcast::Receiver; +use async_compatibility_layer::art::async_spawn; +use async_lock::RwLock; +use bincode::Options; +use either::Either::Right; +use futures::{channel::mpsc, FutureExt, StreamExt}; +use hotshot_task::dependency::{Dependency, EventDependency}; +use hotshot_types::constants::VERSION_0_1; +use hotshot_types::{ + consensus::Consensus, + data::VidDisperse, + message::{ + CommitteeConsensusMessage, DataMessage, Message, MessageKind, Proposal, SequencingMessage, + }, + traits::{ + election::Membership, + network::{DataRequest, RequestKind, ResponseChannel, ResponseMessage}, + node_implementation::NodeType, + signature_key::SignatureKey, + }, +}; +use hotshot_utils::bincode::bincode_opts; +use sha2::{Digest, Sha256}; + +use crate::events::HotShotEvent; + +/// Type alias for consensus state wrapped in a lock. +type LockedConsensusState = Arc>>; + +/// Type alias for the channel that we receive requests from the network on. +type ReqestReceiver = mpsc::Receiver<(Message, ResponseChannel>)>; + +/// Task state for the Network Request Task. The task is responsible for handling +/// requests sent to this node by the network. It will validate the sender, +/// parse the request, and try to find the data request in the consensus stores. +pub struct NetworkRequestState { + /// Locked consensus state + consensus: LockedConsensusState, + /// Receiver for requests + receiver: ReqestReceiver, + /// Quorum membership for checking if requesters have state + quorum: TYPES::Membership, + /// This replicas public key + pub_key: TYPES::SignatureKey, +} + +impl NetworkRequestState { + /// Create the network request state with the info it needs + pub fn new( + consensus: LockedConsensusState, + receiver: ReqestReceiver, + quorum: TYPES::Membership, + pub_key: TYPES::SignatureKey, + ) -> Self { + Self { + consensus, + receiver, + quorum, + pub_key, + } + } + + /// Run the request response loop until a `HotShotEvent::Shutdown` is received. + /// Or the stream is closed. + async fn run_loop(mut self, shutdown: EventDependency>) { + let mut shutdown = Box::pin(shutdown.completed().fuse()); + loop { + futures::select! { + req = self.receiver.next() => { + match req { + Some((msg, chan)) => self.handle_message(msg, chan).await, + None => return, + } + }, + _ = shutdown => { + return; + } + } + } + } + + /// Handle an incoming message. First validates the sender, then handles the contained request. + /// Sends the response via `chan` + async fn handle_message(&self, req: Message, chan: ResponseChannel>) { + let sender = req.sender.clone(); + if !self.valid_sender(&sender) { + let _ = chan.0.send(self.make_msg(ResponseMessage::Denied)); + return; + } + + match req.kind { + MessageKind::Data(DataMessage::RequestData(req)) => { + if !valid_signature(&req, &sender) { + let _ = chan.0.send(self.make_msg(ResponseMessage::Denied)); + return; + } + let response = self.handle_request(req).await; + let _ = chan.0.send(response); + } + msg => tracing::error!( + "Received message that wasn't a DataRequest in the request task. Message: {:?}", + msg + ), + } + } + + /// Handle the request contained in the message. Returns the response we should send + /// First parses the kind and passes to the appropriate hanlder for the specific type + /// of the request. + async fn handle_request(&self, req: DataRequest) -> Message { + match req.request { + RequestKind::VID(view, pub_key) => { + let state = self.consensus.read().await; + let Some(shares) = state.vid_shares.get(&view) else { + return self.make_msg(ResponseMessage::NotFound); + }; + self.handle_vid(shares.clone(), pub_key) + } + // TODO impl for DA Proposal: https://github.com/EspressoSystems/HotShot/issues/2651 + RequestKind::DAProposal(_view) => self.make_msg(ResponseMessage::NotFound), + } + } + + /// Handle a vid request by looking up the the share for the key. If a share is found + /// build the response and return it + fn handle_vid( + &self, + mut vid: Proposal>, + key: TYPES::SignatureKey, + ) -> Message { + let Some(share) = vid.data.shares.get(&key) else { + return self.make_msg(ResponseMessage::NotFound); + }; + vid.data.shares = BTreeMap::from([(key, share.clone())]); + let seq_msg = SequencingMessage(Right(CommitteeConsensusMessage::VidDisperseMsg(vid))); + self.make_msg(ResponseMessage::Found(seq_msg)) + } + + /// Helper to turn a `ResponseMessage` into a `Message` by filling + /// in the surrounding feilds and creating the `MessageKind` + fn make_msg(&self, msg: ResponseMessage) -> Message { + Message { + version: VERSION_0_1, + sender: self.pub_key.clone(), + kind: MessageKind::Data(DataMessage::DataResponse(msg)), + } + } + /// Makes sure the sender is allowed to send a request. + fn valid_sender(&self, sender: &TYPES::SignatureKey) -> bool { + self.quorum.has_stake(sender) + } +} + +/// Check the signature +fn valid_signature( + req: &DataRequest, + sender: &TYPES::SignatureKey, +) -> bool { + let Ok(data) = bincode_opts().serialize(&req.request) else { + return false; + }; + sender.validate(&req.signature, &Sha256::digest(data)) +} + +/// Spawn the network request task to handle incoming request for data +/// from other nodes. It will shutdown when it gets `HotshotEvent::Shutdown` +/// on the `event_stream` arg. +pub fn run_request_task( + task_state: NetworkRequestState, + event_stream: Receiver>, +) { + let dep = EventDependency::new( + event_stream, + Box::new(|e| matches!(e, HotShotEvent::Shutdown)), + ); + async_spawn(task_state.run_loop(dep)); +} From d5adb9cea601bff6fbca841b6e65a312183dd1ad Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 13 Mar 2024 13:38:59 -0400 Subject: [PATCH 0856/1393] Remove excessive logging (#2766) --- .../src/traits/networking/libp2p_network.rs | 9 +-- .../traits/networking/web_server_network.rs | 59 ++++++++----------- 2 files changed, 27 insertions(+), 41 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 24f12d86a6..6aef678163 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -66,7 +66,7 @@ use std::{ }, time::Duration, }; -use tracing::{error, info, instrument, warn}; +use tracing::{debug, error, info, instrument, warn}; /// convienence alias for the type for bootstrap addresses /// concurrency primitives are needed for having tests @@ -451,13 +451,8 @@ impl Libp2pNetwork { if bs_addrs.len() >= num_bootstrap { break bs_addrs; } - info!( - "NODE {:?} bs addr len {:?}, number of bootstrap expected {:?}", - id, - bs_addrs.len(), - num_bootstrap - ); }; + debug!("Finished adding bootstrap addresses."); handle.add_known_peers(bs_addrs).await.unwrap(); handle.begin_bootstrap().await?; diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index ebb3fe7466..cd22c894ed 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -503,47 +503,38 @@ impl Inner { } } - let maybe_event = receiver.try_recv(); - match maybe_event { - Ok(event) => { - match event { - // TODO ED Should add extra error checking here to make sure we are intending to cancel a task - ConsensusIntentEvent::CancelPollForVotes(event_view) - | ConsensusIntentEvent::CancelPollForProposal(event_view) - | ConsensusIntentEvent::CancelPollForDAC(event_view) - | ConsensusIntentEvent::CancelPollForViewSyncCertificate(event_view) - | ConsensusIntentEvent::CancelPollForVIDDisperse(event_view) - | ConsensusIntentEvent::CancelPollForLatestProposal(event_view) - | ConsensusIntentEvent::CancelPollForLatestViewSyncCertificate( - event_view, - ) - | ConsensusIntentEvent::CancelPollForViewSyncVotes(event_view) => { - if view_number == event_view { - debug!("Shutting down polling task for view {}", event_view); - return Ok(()); - } + if let Ok(event) = receiver.try_recv() { + match event { + // TODO ED Should add extra error checking here to make sure we are intending to cancel a task + ConsensusIntentEvent::CancelPollForVotes(event_view) + | ConsensusIntentEvent::CancelPollForProposal(event_view) + | ConsensusIntentEvent::CancelPollForDAC(event_view) + | ConsensusIntentEvent::CancelPollForViewSyncCertificate(event_view) + | ConsensusIntentEvent::CancelPollForVIDDisperse(event_view) + | ConsensusIntentEvent::CancelPollForLatestProposal(event_view) + | ConsensusIntentEvent::CancelPollForLatestViewSyncCertificate(event_view) + | ConsensusIntentEvent::CancelPollForViewSyncVotes(event_view) => { + if view_number == event_view { + debug!("Shutting down polling task for view {}", event_view); + return Ok(()); } - ConsensusIntentEvent::CancelPollForTransactions(event_view) => { - // Write the most recent tx index so we can pick up where we left off later + } + ConsensusIntentEvent::CancelPollForTransactions(event_view) => { + // Write the most recent tx index so we can pick up where we left off later - let mut lock = self.tx_index.write().await; - *lock = tx_index; + let mut lock = self.tx_index.write().await; + *lock = tx_index; - if view_number == event_view { - debug!("Shutting down polling task for view {}", event_view); - return Ok(()); - } + if view_number == event_view { + debug!("Shutting down polling task for view {}", event_view); + return Ok(()); } + } - _ => { - unimplemented!() - } + _ => { + unimplemented!() } } - // Nothing on receiving channel - Err(_) => { - debug!("Nothing on receiving channel"); - } } } Err(NetworkError::ShutDown) From 078e5b87e4177f13be0227f9f8737ab4c3728eca Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Wed, 13 Mar 2024 18:48:06 +0100 Subject: [PATCH 0857/1393] Make CombinedNetworks delay duration configurable (#2726) * Make CombinedNetworks delay duration configurable * Secondary network delay configurable in HotShotConfig * Rename CombinedConfig to CombinedNetworkConfig * Network delay in test network generator `secondary_network_delay` removed from `HotShotConfig` because it cannot easily be passed to the test network generator. * Temporary pinning to hotshot-types branch TODO: switch to hotshot-types tag or main branch before merging * Pin to hotshot-types tag 0.1.2 * Remove files added back by mistake --- examples/infra/mod.rs | 27 ++++++++++++------- .../src/traits/networking/combined_network.rs | 11 +++++--- .../src/traits/networking/libp2p_network.rs | 1 + .../src/traits/networking/memory_network.rs | 2 ++ .../traits/networking/web_server_network.rs | 1 + orchestrator/src/config.rs | 14 ++++++++++ testing/src/test_builder.rs | 5 ++++ 7 files changed, 48 insertions(+), 13 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 99388356ec..0eed790175 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -24,7 +24,7 @@ use hotshot_orchestrator::config::NetworkConfigSource; use hotshot_orchestrator::{ self, client::{BenchResults, OrchestratorClient, ValidatorArgs}, - config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, + config::{CombinedNetworkConfig, NetworkConfig, NetworkConfigFile, WebServerConfig}, }; use hotshot_types::message::Message; use hotshot_types::traits::network::ConnectedNetwork; @@ -876,6 +876,9 @@ where wait_between_polls, }: WebServerConfig = config.clone().da_web_server_config.unwrap(); + let CombinedNetworkConfig { delay_duration }: CombinedNetworkConfig = + config.clone().combined_network_config.unwrap(); + // Create and wait for underlying webserver network let web_quorum_network = webserver_network_from_config::(config.clone(), pub_key.clone()); @@ -885,14 +888,20 @@ where web_quorum_network.wait_for_ready().await; // Combine the two communication channels - let da_channel = CombinedNetworks::new(Arc::new(UnderlyingCombinedNetworks( - web_da_network.clone(), - libp2p_underlying_quorum_network.clone(), - ))); - let quorum_channel = CombinedNetworks::new(Arc::new(UnderlyingCombinedNetworks( - web_quorum_network.clone(), - libp2p_underlying_quorum_network.clone(), - ))); + let da_channel = CombinedNetworks::new( + Arc::new(UnderlyingCombinedNetworks( + web_da_network.clone(), + libp2p_underlying_quorum_network.clone(), + )), + delay_duration, + ); + let quorum_channel = CombinedNetworks::new( + Arc::new(UnderlyingCombinedNetworks( + web_quorum_network.clone(), + libp2p_underlying_quorum_network.clone(), + )), + delay_duration, + ); CombinedDARun { config, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 1aa8cc3beb..252f6420c2 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -88,7 +88,7 @@ impl CombinedNetworks { /// /// Panics if `COMBINED_NETWORK_CACHE_SIZE` is 0 #[must_use] - pub fn new(networks: Arc>) -> Self { + pub fn new(networks: Arc>, delay_duration: Duration) -> Self { Self { networks, message_cache: Arc::new(RwLock::new(LruCache::new( @@ -96,7 +96,7 @@ impl CombinedNetworks { ))), primary_down: Arc::new(AtomicU64::new(0)), delayed_tasks: Arc::default(), - delay_duration: Arc::new(RwLock::new(Duration::from_millis(1000))), + delay_duration: Arc::new(RwLock::new(delay_duration)), } } @@ -187,6 +187,7 @@ impl TestableNetworkingImplementation for CombinedNetwor da_committee_size: usize, is_da: bool, reliability_config: Option>, + secondary_network_delay: Duration, ) -> Box (Arc, Arc) + 'static> { let generators = ( TestableNetworkingImplementation for CombinedNetwor da_committee_size, is_da, None, + Duration::default(), ), , TYPES::SignatureKey> as TestableNetworkingImplementation<_>>::generator( expected_node_count, @@ -206,6 +208,7 @@ impl TestableNetworkingImplementation for CombinedNetwor da_committee_size, is_da, reliability_config, + Duration::default(), ) ); Box::new(move |node_id| { @@ -228,7 +231,7 @@ impl TestableNetworkingImplementation for CombinedNetwor ))), primary_down: Arc::new(AtomicU64::new(0)), delayed_tasks: Arc::default(), - delay_duration: Arc::new(RwLock::new(Duration::from_millis(1000))), + delay_duration: Arc::new(RwLock::new(secondary_network_delay)), }; let da_net = Self { networks: Arc::new(da_networks), @@ -237,7 +240,7 @@ impl TestableNetworkingImplementation for CombinedNetwor ))), primary_down: Arc::new(AtomicU64::new(0)), delayed_tasks: Arc::default(), - delay_duration: Arc::new(RwLock::new(Duration::from_millis(1000))), + delay_duration: Arc::new(RwLock::new(secondary_network_delay)), }; (quorum_net.into(), da_net.into()) }) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 6aef678163..6fb0aec8e2 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -179,6 +179,7 @@ where da_committee_size: usize, _is_da: bool, reliability_config: Option>, + _secondary_network_delay: Duration, ) -> Box (Arc, Arc) + 'static> { assert!( da_committee_size <= expected_node_count, diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 20da1732e6..c5aa3641d3 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -11,6 +11,7 @@ use async_compatibility_layer::{ use async_lock::{Mutex, RwLock}; use async_trait::async_trait; use bincode::Options; +use core::time::Duration; use dashmap::DashMap; use futures::StreamExt; use hotshot_types::{ @@ -187,6 +188,7 @@ impl TestableNetworkingImplementation _da_committee_size: usize, _is_da: bool, reliability_config: Option>, + _secondary_network_delay: Duration, ) -> Box (Arc, Arc) + 'static> { let master: Arc<_> = MasterMap::new(); // We assign known_nodes' public key and stake value rather than read from config file since it's a test diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index cd22c894ed..b6cfa4b6a1 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -1242,6 +1242,7 @@ impl TestableNetworkingImplementation for WebServerNetwo da_committee_size: usize, _is_da: bool, reliability_config: Option>, + _secondary_network_delay: Duration, ) -> Box (Arc, Arc) + 'static> { let da_gen = Self::single_generator( expected_node_count, diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 61f17f1220..0810c87de2 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -98,6 +98,13 @@ pub struct WebServerConfig { pub wait_between_polls: Duration, } +/// configuration for combined network +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct CombinedNetworkConfig { + /// delay duration before sending a message through the secondary network + pub delay_duration: Duration, +} + /// a network configuration error #[derive(Error, Debug)] pub enum NetworkConfigError { @@ -154,6 +161,8 @@ pub struct NetworkConfig { pub web_server_config: Option, /// the data availability web server config pub da_web_server_config: Option, + /// combined network config + pub combined_network_config: Option, /// the commit this run is based on pub commit_sha: String, } @@ -388,6 +397,7 @@ impl Default for NetworkConfig { election_config_type_name: std::any::type_name::().to_string(), web_server_config: None, da_web_server_config: None, + combined_network_config: None, next_view_timeout: 10, num_bootrap: 5, propose_min_round_time: Duration::from_secs(0), @@ -432,6 +442,9 @@ pub struct NetworkConfigFile { /// the data availability web server config #[serde(default)] pub da_web_server_config: Option, + /// combined network config + #[serde(default)] + pub combined_network_config: Option, } impl From> for NetworkConfig { @@ -473,6 +486,7 @@ impl From> for NetworkC start_delay_seconds: val.start_delay_seconds, web_server_config: val.web_server_config, da_web_server_config: val.da_web_server_config, + combined_network_config: val.combined_network_config, commit_sha: String::new(), } } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 81d1288995..597fea2152 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -34,6 +34,8 @@ pub struct TimingData { pub propose_min_round_time: Duration, /// The maximum amount of time a leader can wait to start a round pub propose_max_round_time: Duration, + /// Delay before sending through the secondary network in CombinedNetworks + pub secondary_network_delay: Duration, } /// metadata describing a test @@ -81,6 +83,7 @@ impl Default for TimingData { start_delay: 100, propose_min_round_time: Duration::new(0, 0), propose_max_round_time: Duration::from_millis(100), + secondary_network_delay: Duration::from_millis(1000), } } } @@ -297,6 +300,7 @@ impl TestMetadata { start_delay, propose_min_round_time, propose_max_round_time, + secondary_network_delay, } = timing_data; let mod_config = // TODO this should really be using the timing config struct @@ -316,6 +320,7 @@ impl TestMetadata { num_bootstrap_nodes, da_staked_committee_size, unreliable_network, + secondary_network_delay, ), storage: Box::new(|_| I::construct_tmp_storage().unwrap()), config, From 9fc58bc0816c10737d4424d679b0779ce294e129 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 13 Mar 2024 17:06:46 -0400 Subject: [PATCH 0858/1393] [Push CDN] Full integration (#2762) * push cdn integration * fmt * switch hotshot types * updates * add capnp install * fix capnproto install * PR changes * drop was sufficient * update push cdn version * update cdn to 0.1.3 * update CDN * update the CDN --- example-types/src/node_types.rs | 18 +- examples/Cargo.toml | 16 + examples/infra/mod.rs | 103 ++++- examples/push-cdn/all.rs | 137 ++++++ examples/push-cdn/types.rs | 27 ++ examples/push-cdn/validator.rs | 27 ++ hotshot/Cargo.toml | 9 + hotshot/src/traits.rs | 1 + hotshot/src/traits/networking.rs | 2 + .../src/traits/networking/push_cdn_network.rs | 419 ++++++++++++++++++ orchestrator/run-config.toml | 3 +- orchestrator/src/config.rs | 7 + testing-macros/tests/tests.rs | 12 +- testing/tests/catchup.rs | 6 +- testing/tests/push_cdn.rs | 45 ++ 15 files changed, 814 insertions(+), 18 deletions(-) create mode 100644 examples/push-cdn/all.rs create mode 100644 examples/push-cdn/types.rs create mode 100644 examples/push-cdn/validator.rs create mode 100644 hotshot/src/traits/networking/push_cdn_network.rs create mode 100644 testing/tests/push_cdn.rs diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index b84c2da482..e4ba54576d 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -1,4 +1,6 @@ -use hotshot::traits::election::static_committee::GeneralStaticCommittee; +use hotshot::traits::{ + election::static_committee::GeneralStaticCommittee, implementations::PushCdnNetwork, +}; use crate::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, @@ -46,6 +48,10 @@ impl NodeType for TestTypes { type Membership = GeneralStaticCommittee; } +/// The Push CDN implementation +#[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] +pub struct PushCdnImpl; + /// Memory network implementation #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct MemoryImpl; @@ -65,6 +71,10 @@ pub struct CombinedImpl; /// static committee type alias pub type StaticMembership = StaticCommittee; +// Push CDN communication channels +type StaticPushCdnQuorumComm = PushCdnNetwork; +type StaticPushCdnDAComm = PushCdnNetwork; + /// memory network pub type StaticMemoryDAComm = MemoryNetwork, ::SignatureKey>; @@ -92,6 +102,12 @@ type StaticWebQuorumComm = WebServerNetwork; /// combined network (libp2p + web server) type StaticCombinedQuorumComm = CombinedNetworks; +impl NodeImplementation for PushCdnImpl { + type Storage = MemoryStorage; + type QuorumNetwork = StaticPushCdnQuorumComm; + type CommitteeNetwork = StaticPushCdnDAComm; +} + impl NodeImplementation for Libp2pImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticLibp2pQuorumComm; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 023a6a9d68..2dd9b303ac 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -78,6 +78,15 @@ path = "combined/validator.rs" name = "orchestrator-combined" path = "combined/orchestrator.rs" +# Push CDN +[[example]] +name = "all-push-cdn" +path = "push-cdn/all.rs" + +[[example]] +name = "validator-push-cdn" +path = "push-cdn/validator.rs" + [dependencies] async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } @@ -116,8 +125,15 @@ tracing = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } +cdn-client = { workspace = true, features = ["insecure", "runtime-tokio"] } +cdn-broker = { workspace = true, features = ["insecure", "runtime-tokio", "strong_consistency", "local_discovery"] } +cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } + [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } +cdn-client = { workspace = true, features = ["insecure", "runtime-async-std"] } +cdn-broker = { workspace = true, features = ["insecure", "runtime-async-std", "strong_consistency", "local_discovery"] } +cdn-marshal = { workspace = true, features = ["insecure", "runtime-async-std"] } [dev-dependencies] clap = { version = "4.5", features = ["derive", "env"] } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 0eed790175..584d319944 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -3,10 +3,14 @@ use async_compatibility_layer::art::async_sleep; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_lock::RwLock; use async_trait::async_trait; +use cdn_broker::reexports::crypto::signature::KeyPair; +use cdn_broker::reexports::message::Topic; use clap::Parser; use clap::{Arg, Command}; use futures::StreamExt; -use hotshot::traits::implementations::{CombinedNetworks, UnderlyingCombinedNetworks}; +use hotshot::traits::implementations::{ + CombinedNetworks, PushCdnNetwork, UnderlyingCombinedNetworks, WrappedSignatureKey, +}; use hotshot::traits::BlockPayload; use hotshot::{ traits::{ @@ -170,6 +174,7 @@ pub fn read_orchestrator_init_config() -> ( .required(false), ) .get_matches(); + if let Some(config_file_string) = matches.get_one::("config_file") { args = ConfigArgs { config_file: config_file_string.clone(), @@ -208,6 +213,7 @@ pub fn read_orchestrator_init_config() -> ( if let Some(orchestrator_url_string) = matches.get_one::("orchestrator_url") { orchestrator_url = Url::parse(orchestrator_url_string).unwrap(); } + (config, orchestrator_url) } @@ -249,8 +255,8 @@ pub fn load_config_from_file( /// Runs the orchestrator pub async fn run_orchestrator< TYPES: NodeType, - DACHANNEL: ConnectedNetwork, TYPES::SignatureKey> + Debug, - QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey> + Debug, + DACHANNEL: ConnectedNetwork, TYPES::SignatureKey>, + QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, NODE: NodeImplementation>, >( OrchestratorArgs { url, config }: OrchestratorArgs, @@ -421,8 +427,8 @@ async fn libp2p_network_from_config( #[async_trait] pub trait RunDA< TYPES: NodeType, - DANET: ConnectedNetwork, TYPES::SignatureKey> + Debug, - QUORUMNET: ConnectedNetwork, TYPES::SignatureKey> + Debug, + DANET: ConnectedNetwork, TYPES::SignatureKey>, + QUORUMNET: ConnectedNetwork, TYPES::SignatureKey>, NODE: NodeImplementation< TYPES, QuorumNetwork = QUORUMNET, @@ -755,6 +761,89 @@ where } } +// Push CDN + +/// Represents a Push CDN-based run +pub struct PushCdnDaRun { + /// The underlying configuration + config: NetworkConfig, + /// The quorum channel + quorum_channel: PushCdnNetwork, + /// The DA channel + da_channel: PushCdnNetwork, +} + +#[async_trait] +impl< + TYPES: NodeType< + Transaction = TestTransaction, + BlockPayload = TestBlockPayload, + BlockHeader = TestBlockHeader, + InstanceState = TestInstanceState, + >, + NODE: NodeImplementation< + TYPES, + QuorumNetwork = PushCdnNetwork, + CommitteeNetwork = PushCdnNetwork, + Storage = MemoryStorage, + >, + > RunDA, PushCdnNetwork, NODE> for PushCdnDaRun +where + ::ValidatedState: TestableState, + ::BlockPayload: TestableBlock, + Leaf: TestableLeaf, + Self: Sync, +{ + async fn initialize_networking( + config: NetworkConfig, + ) -> PushCdnDaRun { + // Get our own key + let key = config.config.my_own_validator_config.clone(); + + // Convert to the Push-CDN-compatible type + let keypair = KeyPair { + public_key: WrappedSignatureKey(key.public_key), + private_key: key.private_key, + }; + + // See if we should be DA + let mut topics = vec![Topic::Global]; + if config.node_index < config.config.da_staked_committee_size as u64 { + topics.push(Topic::DA); + } + + // Create the network and await the initial connection + let network = PushCdnNetwork::new( + config + .cdn_marshal_address + .clone() + .expect("`cdn_marshal_address` needs to be supplied for a push CDN run"), + topics.iter().map(ToString::to_string).collect(), + keypair, + ) + .await + .expect("failed to perform initial connection"); + + PushCdnDaRun { + config, + quorum_channel: network.clone(), + da_channel: network, + } + } + + fn get_da_channel(&self) -> PushCdnNetwork { + self.da_channel.clone() + } + + fn get_quorum_channel(&self) -> PushCdnNetwork { + self.quorum_channel.clone() + } + + fn get_config(&self) -> NetworkConfig { + self.config.clone() + } +} + // Libp2p /// Represents a libp2p-based run @@ -933,8 +1022,8 @@ pub async fn main_entry_point< BlockHeader = TestBlockHeader, InstanceState = TestInstanceState, >, - DACHANNEL: ConnectedNetwork, TYPES::SignatureKey> + Debug, - QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey> + Debug, + DACHANNEL: ConnectedNetwork, TYPES::SignatureKey>, + QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, NODE: NodeImplementation< TYPES, QuorumNetwork = QUORUMCHANNEL, diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs new file mode 100644 index 0000000000..e513319947 --- /dev/null +++ b/examples/push-cdn/all.rs @@ -0,0 +1,137 @@ +//! A example program using the Push CDN +/// The types we're importing +pub mod types; + +use crate::infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}; +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; +use async_compatibility_layer::art::async_spawn; +use cdn_broker::reexports::connection::protocols::{Quic, Tcp}; +use cdn_broker::reexports::crypto::signature::KeyPair; +use cdn_broker::Broker; +use cdn_marshal::Marshal; +use hotshot::traits::implementations::WrappedSignatureKey; +use hotshot::types::SignatureKey; +use hotshot_example_types::state_types::TestTypes; +use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_types::traits::node_implementation::NodeType; +use std::net::{IpAddr, Ipv4Addr}; + +/// The infra implementation +#[path = "../infra/mod.rs"] +pub mod infra; + +use tracing::error; + +#[cfg_attr(async_executor_impl = "tokio", tokio::main)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +async fn main() { + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; + setup_logging(); + setup_backtrace(); + + // use configfile args + let (config, orchestrator_url) = read_orchestrator_init_config::(); + + // Start the orhcestrator + async_spawn(run_orchestrator::< + TestTypes, + DANetwork, + QuorumNetwork, + NodeImpl, + >(OrchestratorArgs { + url: orchestrator_url.clone(), + config: config.clone(), + })); + + // The configuration we are using for this example is 2 brokers & 1 marshal + + // A keypair shared between brokers + let (broker_public_key, broker_private_key) = + ::SignatureKey::generated_from_seed_indexed([0u8; 32], 1337); + + // The broker (peer) discovery endpoint shall be a local SQLite file + let discovery_endpoint = "test.sqlite".to_string(); + + // 2 brokers + for _ in 0..2 { + // Get the ports to bind to + let private_port = portpicker::pick_unused_port().expect("could not find an open port"); + let public_port = portpicker::pick_unused_port().expect("could not find an open port"); + + // Extrapolate addresses + let private_address = format!("127.0.0.1:{private_port}"); + let public_address = format!("127.0.0.1:{public_port}"); + + let config: cdn_broker::Config::SignatureKey>> = + cdn_broker::ConfigBuilder::default() + .discovery_endpoint(discovery_endpoint.clone()) + .keypair(KeyPair { + public_key: WrappedSignatureKey(broker_public_key), + private_key: broker_private_key.clone(), + }) + .metrics_enabled(false) + .private_bind_address(private_address.clone()) + .public_bind_address(public_address.clone()) + .private_advertise_address(private_address) + .public_advertise_address(public_address) + .build() + .expect("failed to build broker config"); + + // Create and spawn the broker + async_spawn(async move { + let broker: Broker< + WrappedSignatureKey<::SignatureKey>, + WrappedSignatureKey<::SignatureKey>, + Tcp, + Quic, + > = Broker::new(config).await.expect("broker failed to start"); + + // Error if we stopped unexpectedly + if let Err(err) = broker.start().await { + error!("broker stopped: {err}"); + } + }); + } + + // Get the port to use for the marshal + let marshal_port = 9000; + + // Configure the marshal + let marshal_endpoint = format!("127.0.0.1:{marshal_port}"); + let marshal_config = cdn_marshal::ConfigBuilder::default() + .bind_address(marshal_endpoint.clone()) + .discovery_endpoint("test.sqlite".to_string()) + .build() + .expect("failed to build marshal config"); + + // Spawn the marshal + async_spawn(async move { + let marshal: Marshal::SignatureKey>, Quic> = + Marshal::new(marshal_config) + .await + .expect("failed to spawn marshal"); + + // Error if we stopped unexpectedly + if let Err(err) = marshal.start().await { + error!("broker stopped: {err}"); + } + }); + + // Start the proper number of nodes + let mut nodes = Vec::new(); + for _ in 0..(config.config.num_nodes_with_stake.get()) { + let orchestrator_url = orchestrator_url.clone(); + let node = async_spawn(async move { + infra::main_entry_point::( + ValidatorArgs { + url: orchestrator_url, + public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + network_config_file: None, + }, + ) + .await; + }); + nodes.push(node); + } + let _result = futures::future::join_all(nodes).await; +} diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs new file mode 100644 index 0000000000..b6d289af00 --- /dev/null +++ b/examples/push-cdn/types.rs @@ -0,0 +1,27 @@ +use crate::infra::PushCdnDaRun; +use hotshot::traits::implementations::{MemoryStorage, PushCdnNetwork}; +use hotshot_example_types::state_types::TestTypes; +use hotshot_types::traits::node_implementation::NodeImplementation; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Deserialize, Serialize, Hash, PartialEq, Eq)] +/// Convenience type alias +pub struct NodeImpl {} + +/// Convenience type alias +pub type DANetwork = PushCdnNetwork; +/// Convenience type alias +pub type VIDNetwork = PushCdnNetwork; +/// Convenience type alias +pub type QuorumNetwork = PushCdnNetwork; +/// Convenience type alias +pub type ViewSyncNetwork = PushCdnNetwork; + +impl NodeImplementation for NodeImpl { + type Storage = MemoryStorage; + type CommitteeNetwork = DANetwork; + type QuorumNetwork = QuorumNetwork; +} + +/// Convenience type alias +pub type ThisRun = PushCdnDaRun; diff --git a/examples/push-cdn/validator.rs b/examples/push-cdn/validator.rs new file mode 100644 index 0000000000..886847da66 --- /dev/null +++ b/examples/push-cdn/validator.rs @@ -0,0 +1,27 @@ +//! A validator using the web server +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use clap::Parser; +use hotshot_example_types::state_types::TestTypes; +use tracing::{info, instrument}; + +use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; + +use hotshot_orchestrator::client::ValidatorArgs; + +/// types used for this example +pub mod types; + +/// general infra used for this example +#[path = "../infra/mod.rs"] +pub mod infra; + +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + let args = ValidatorArgs::parse(); + info!("connecting to orchestrator at {:?}", args.url); + infra::main_entry_point::(args).await; +} diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 7e8073b654..1f72fd5792 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -51,11 +51,20 @@ snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } +anyhow = "1.0.79" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } +cdn-client = { workspace = true, features = ["insecure", "runtime-tokio"] } +cdn-broker = { workspace = true, features = ["insecure", "runtime-tokio", "strong_consistency", "local_discovery"] } +cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } + [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } +cdn-client = { workspace = true, features = ["insecure", "runtime-async-std"] } +cdn-broker = { workspace = true, features = ["insecure", "runtime-async-std", "strong_consistency", "local_discovery"] } +cdn-marshal = { workspace = true, features = ["insecure", "runtime-async-std"] } + [dev-dependencies] blake3 = { workspace = true } diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 150d2bf015..5e98fe347e 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -16,6 +16,7 @@ pub mod implementations { combined_network::{CombinedNetworks, UnderlyingCombinedNetworks}, libp2p_network::{Libp2pNetwork, PeerInfoVec}, memory_network::{MasterMap, MemoryNetwork}, + push_cdn_network::{PushCdnNetwork, WrappedSignatureKey}, web_server_network::WebServerNetwork, NetworkingMetricsValue, }, diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index 61482f2bd9..15d686fa93 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -8,6 +8,8 @@ pub mod combined_network; pub mod libp2p_network; pub mod memory_network; +/// The Push CDN network +pub mod push_cdn_network; pub mod web_server_network; use std::{ collections::HashMap, diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs new file mode 100644 index 0000000000..50e921f440 --- /dev/null +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -0,0 +1,419 @@ +use super::NetworkError; +use bincode::Options; +use cdn_broker::{ + reexports::connection::protocols::Tcp, Broker, Config, ConfigBuilder as BrokerConfigBuilder, +}; +use cdn_client::{ + reexports::{ + connection::protocols::Quic, + crypto::signature::{KeyPair, Serializable, SignatureScheme}, + message::{Broadcast, Direct, Message as PushCdnMessage, Topic}, + }, + Client, ConfigBuilder as ClientConfigBuilder, +}; +use cdn_marshal::{ConfigBuilder as MarshalConfigBuilder, Marshal}; +use hotshot_utils::bincode::bincode_opts; +use tracing::{error, warn}; + +use async_compatibility_layer::art::{async_block_on, async_spawn}; +use async_trait::async_trait; + +use async_compatibility_layer::channel::UnboundedSendError; +#[cfg(feature = "hotshot-testing")] +use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; +use hotshot_types::{ + boxed_sync, + data::ViewNumber, + message::Message, + traits::{ + network::{ConnectedNetwork, ConsensusIntentEvent, PushCdnNetworkError}, + node_implementation::NodeType, + signature_key::SignatureKey, + }, + BoxSyncFuture, +}; +use std::{collections::BTreeSet, sync::Arc, time::Duration}; + +/// A wrapped `SignatureKey`. We need to implement the Push CDN's `SignatureScheme` +/// trait in order to sign and verify messages to/from the CDN. +#[derive(Clone, Eq, PartialEq)] +pub struct WrappedSignatureKey(pub T); +impl SignatureScheme for WrappedSignatureKey { + type PrivateKey = T::PrivateKey; + type PublicKey = Self; + + /// Sign a message of arbitrary data and return the serialized signature + fn sign(private_key: &Self::PrivateKey, message: &[u8]) -> anyhow::Result> { + let signature = T::sign(private_key, message)?; + + Ok(bincode_opts().serialize(&signature)?) + } + + /// Verify a message of arbitrary data and return the result + fn verify(public_key: &Self::PublicKey, message: &[u8], signature: &[u8]) -> bool { + let signature: T::PureAssembledSignatureType = match bincode_opts().deserialize(signature) { + Ok(key) => key, + Err(_) => return false, + }; + + public_key.0.validate(&signature, message) + } +} + +/// We need to implement the `Serializable` so the Push CDN can serialize the signatures +/// and public keys and send them over the wire. +impl Serializable for WrappedSignatureKey { + fn serialize(&self) -> anyhow::Result> { + Ok(self.0.to_bytes()) + } + + fn deserialize(serialized: &[u8]) -> anyhow::Result { + Ok(WrappedSignatureKey(T::from_bytes(serialized)?)) + } +} + +/// A communication channel to the Push CDN, which is a collection of brokers and a marshal +/// that helps organize them all. +#[derive(Clone)] +/// Is generic over both the type of key and the network protocol. +pub struct PushCdnNetwork(Client, Quic>); + +impl PushCdnNetwork { + /// Create a new `PushCdnNetwork` (really a client) from a marshal endpoint, a list of initial + /// topics we are interested in, and our wrapped keypair that we use to authenticate with the + /// marshal. + /// + /// # Errors + /// If we fail the initial connection + pub async fn new( + marshal_endpoint: String, + topics: Vec, + keypair: KeyPair>, + ) -> anyhow::Result { + // Transform topics to our internal representation + let mut computed_topics: Vec = Vec::new(); + for topic in topics { + computed_topics.push(topic.try_into()?); + } + + // Build config + let config = ClientConfigBuilder::default() + .endpoint(marshal_endpoint) + .subscribed_topics(computed_topics) + .keypair(keypair) + .build()?; + + // Create the client, performing the initial connection + let client = Client::new(config).await?; + + Ok(Self(client)) + } + + /// Broadcast a message to members of the particular topic. Does not retry. + /// + /// # Errors + /// - If we fail to serialize the message + /// - If we fail to send the broadcast message. + async fn broadcast_message( + &self, + message: Message, + topic: Topic, + ) -> Result<(), NetworkError> { + // Bincode the message + let serialized_message = match bincode_opts().serialize(&message) { + Ok(serialized) => serialized, + Err(e) => { + warn!("Failed to serialize message: {}", e); + return Err(NetworkError::FailedToSerialize { source: e }); + } + }; + + // Send the message + // TODO: check if we need to print this error + if self + .0 + .send_broadcast_message(vec![topic], serialized_message) + .await + .is_err() + { + return Err(NetworkError::CouldNotDeliver); + }; + + Ok(()) + } +} + +#[cfg(feature = "hotshot-testing")] +impl TestableNetworkingImplementation for PushCdnNetwork { + /// Generate n Push CDN clients, a marshal, and two brokers (that run locally). + /// Uses a `SQLite` database instead of Redis. + fn generator( + _expected_node_count: usize, + _num_bootstrap: usize, + _network_id: usize, + da_committee_size: usize, + _is_da: bool, + _reliability_config: Option>, + _secondary_network_delay: Duration, + ) -> Box (Arc, Arc) + 'static> { + // The configuration we are using for testing is 2 brokers & 1 marshal + + // A keypair shared between brokers + let (broker_public_key, broker_private_key) = + TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], 1337); + + // The broker (peer) discovery endpoint shall be a local SQLite file + let discovery_endpoint = "test.sqlite".to_string(); + + // Try to delete the file at the discovery endpoint to maintain consistency between tests + if let Err(err) = std::fs::remove_file(discovery_endpoint.clone()) { + warn!("failed to delete pre-existing database: {err}"); + }; + + // 2 brokers + for _ in 0..2 { + // Get the ports to bind to + let private_port = portpicker::pick_unused_port().expect("could not find an open port"); + let public_port = portpicker::pick_unused_port().expect("could not find an open port"); + + // Extrapolate addresses + let private_address = format!("127.0.0.1:{private_port}"); + let public_address = format!("127.0.0.1:{public_port}"); + + let config: Config> = + BrokerConfigBuilder::default() + .discovery_endpoint(discovery_endpoint.clone()) + .keypair(KeyPair { + public_key: WrappedSignatureKey(broker_public_key.clone()), + private_key: broker_private_key.clone(), + }) + .metrics_enabled(false) + .private_bind_address(private_address.clone()) + .public_bind_address(public_address.clone()) + .private_advertise_address(private_address) + .public_advertise_address(public_address) + .build() + .expect("failed to build broker config"); + + // Create and spawn the broker + async_spawn(async move { + let broker: Broker< + WrappedSignatureKey, + WrappedSignatureKey, + Tcp, + Quic, + > = Broker::new(config).await.expect("broker failed to start"); + + // Error if we stopped unexpectedly + if let Err(err) = broker.start().await { + error!("broker stopped: {err}"); + } + }); + } + + // Get the port to use for the marshal + let marshal_port = portpicker::pick_unused_port().expect("could not find an open port"); + + // Configure the marshal + let marshal_endpoint = format!("127.0.0.1:{marshal_port}"); + let marshal_config = MarshalConfigBuilder::default() + .bind_address(marshal_endpoint.clone()) + .discovery_endpoint("test.sqlite".to_string()) + .build() + .expect("failed to build marshal config"); + + // Spawn the marshal + async_spawn(async move { + let marshal: Marshal, Quic> = + Marshal::new(marshal_config) + .await + .expect("failed to spawn marshal"); + + // Error if we stopped unexpectedly + if let Err(err) = marshal.start().await { + error!("broker stopped: {err}"); + } + }); + + // This function is called for each client we spawn + Box::new({ + move |node_id| { + // Derive our public and priate keys from our index + let private_key = + TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; + let public_key = TYPES::SignatureKey::from_private(&private_key); + + // Calculate if we're DA or not + let topics = if node_id < da_committee_size as u64 { + vec![Topic::DA, Topic::Global] + } else { + vec![Topic::Global] + }; + + // Configure our client + let client_config = ClientConfigBuilder::default() + .keypair(KeyPair { + public_key: WrappedSignatureKey(public_key), + private_key, + }) + .subscribed_topics(topics) + .endpoint(marshal_endpoint.clone()) + .build() + .expect("failed to build client config"); + + // Create our client + let client = Arc::new(PushCdnNetwork( + async_block_on(async move { Client::new(client_config).await }) + .expect("failed to create client"), + )); + + (client.clone(), client) + } + }) + } + + /// Get the number of messages in-flight. + /// + /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. + fn in_flight_message_count(&self) -> Option { + None + } +} + +#[async_trait] +impl ConnectedNetwork, TYPES::SignatureKey> + for PushCdnNetwork +{ + /// We do not support pausing the PushCDN network right now, but it is possible. + fn pause(&self) {} + + /// We do not support resuming the PushCDN network right now, but it is possible. + fn resume(&self) {} + + /// The clients form an initial connection when created, so we don't have to wait. + async fn wait_for_ready(&self) {} + + /// The clients form an initial connection when created, so we can return `true` here + /// always. + async fn is_ready(&self) -> bool { + true + } + + /// TODO: shut down the networks. Unneeded for testing. + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b, + { + boxed_sync(async move {}) + } + + /// Broadcast a message to all members of the quorum. + /// + /// # Errors + /// - If we fail to serialize the message + /// - If we fail to send the broadcast message. + async fn broadcast_message( + &self, + message: Message, + _recipients: BTreeSet, + ) -> Result<(), NetworkError> { + self.broadcast_message(message, Topic::Global).await + } + + /// Broadcast a message to all members of the DA committee. + /// + /// # Errors + /// - If we fail to serialize the message + /// - If we fail to send the broadcast message. + async fn da_broadcast_message( + &self, + message: Message, + _recipients: BTreeSet, + ) -> Result<(), NetworkError> { + self.broadcast_message(message, Topic::DA).await + } + + /// Send a direct message to a node with a particular key. Does not retry. + /// + /// - If we fail to serialize the message + /// - If we fail to send the direct message + async fn direct_message( + &self, + message: Message, + recipient: TYPES::SignatureKey, + ) -> Result<(), NetworkError> { + // Bincode the message + let serialized_message = match bincode_opts().serialize(&message) { + Ok(serialized) => serialized, + Err(e) => { + warn!("Failed to serialize message: {}", e); + return Err(NetworkError::FailedToSerialize { source: e }); + } + }; + + // Send the message + // TODO: check if we need to print this error + if self + .0 + .send_direct_message(&WrappedSignatureKey(recipient), serialized_message) + .await + .is_err() + { + return Err(NetworkError::CouldNotDeliver); + }; + + Ok(()) + } + + /// Receive a message. Is agnostic over `transmit_type`, which has an issue + /// to be removed anyway. + /// + /// # Errors + /// - If we fail to receive messages. Will trigger a retry automatically. + async fn recv_msgs(&self) -> Result>, NetworkError> { + // Receive a message + let message = self.0.receive_message().await; + + // If it was an error, wait a bit and retry + let message = match message { + Ok(message) => message, + Err(error) => { + error!("failed to receive message: {error}"); + return Err(NetworkError::PushCdnNetwork { + source: PushCdnNetworkError::FailedToReceive, + }); + } + }; + + // Extract the underlying message + let (PushCdnMessage::Broadcast(Broadcast { message, topics: _ }) + | PushCdnMessage::Direct(Direct { + message, + recipient: _, + })) = message + else { + return Ok(vec![]); + }; + + // Deserialize it + let result: Message = bincode_opts() + .deserialize(&message) + .map_err(|e| NetworkError::FailedToSerialize { source: e })?; + + // Return it + Ok(vec![result]) + } + + /// Do nothing here, as we don't need to look up nodes. + async fn queue_node_lookup( + &self, + _view_number: ViewNumber, + _pk: TYPES::SignatureKey, + ) -> Result<(), UnboundedSendError>> { + Ok(()) + } + + /// We don't need to poll. + async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} +} diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 6eeced9927..4fb883a910 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -1,4 +1,4 @@ -rounds = 10 +rounds = 100 transactions_per_round = 10 transaction_size = 1749 node_index = 0 @@ -37,6 +37,7 @@ seed = [ 0, ] start_delay_seconds = 0 +cdn_marshal_address = "127.0.0.1:9000" [config] num_nodes_with_stake = 10 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 0810c87de2..4ef179d834 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -161,6 +161,8 @@ pub struct NetworkConfig { pub web_server_config: Option, /// the data availability web server config pub da_web_server_config: Option, + /// The address for the Push CDN's "marshal", A.K.A. load balancer + pub cdn_marshal_address: Option, /// combined network config pub combined_network_config: Option, /// the commit this run is based on @@ -397,6 +399,7 @@ impl Default for NetworkConfig { election_config_type_name: std::any::type_name::().to_string(), web_server_config: None, da_web_server_config: None, + cdn_marshal_address: None, combined_network_config: None, next_view_timeout: 10, num_bootrap: 5, @@ -436,6 +439,9 @@ pub struct NetworkConfigFile { /// the hotshot config file #[serde(default)] pub config: HotShotConfigFile, + /// The address of the Push CDN's "marshal", A.K.A. load balancer + #[serde(default)] + pub cdn_marshal_address: Option, /// the webserver config #[serde(default)] pub web_server_config: Option, @@ -484,6 +490,7 @@ impl From> for NetworkC key_type_name: std::any::type_name::().to_string(), election_config_type_name: std::any::type_name::().to_string(), start_delay_seconds: val.start_delay_seconds, + cdn_marshal_address: val.cdn_marshal_address, web_server_config: val.web_server_config, da_web_server_config: val.da_web_server_config, combined_network_config: val.combined_network_config, diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs index e789c496bd..c54b987953 100644 --- a/testing-macros/tests/tests.rs +++ b/testing-macros/tests/tests.rs @@ -1,4 +1,4 @@ -use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, WebImpl}; +use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; use hotshot_example_types::state_types::TestTypes; use hotshot_testing::completion_task::{ CompletionTaskDescription, TimeBasedCompletionTaskDescription, @@ -12,7 +12,7 @@ use std::time::Duration; cross_tests!( TestName: test_success, - Impls: [MemoryImpl, WebImpl, Libp2pImpl], + Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], Ignore: false, Metadata: { @@ -31,7 +31,7 @@ cross_tests!( // Test one node leaving the network. cross_tests!( TestName: test_with_failures_one, - Impls: [MemoryImpl, WebImpl, Libp2pImpl], + Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], Ignore: false, Metadata: { @@ -59,7 +59,7 @@ cross_tests!( // Test f/2 nodes leaving the network. cross_tests!( TestName: test_with_failures_half_f, - Impls: [MemoryImpl, WebImpl, Libp2pImpl], + Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], Ignore: false, Metadata: { @@ -99,7 +99,7 @@ cross_tests!( // Test f nodes leaving the network. cross_tests!( TestName: test_with_failures_f, - Impls: [MemoryImpl, WebImpl, Libp2pImpl], + Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], Ignore: false, Metadata: { @@ -152,7 +152,7 @@ cross_tests!( // Test that a good leader can succeed in the view directly after view sync cross_tests!( TestName: test_with_failures_2, - Impls: [MemoryImpl, WebImpl, Libp2pImpl], + Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], Ignore: false, Metadata: { diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 002254996f..164aece9a4 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -59,10 +59,10 @@ async fn test_catchup() { #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_catchup_web() { +async fn test_catchup_cdn() { use std::time::Duration; - use hotshot_example_types::node_types::{TestTypes, WebImpl}; + use hotshot_example_types::node_types::{PushCdnImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, @@ -103,7 +103,7 @@ async fn test_catchup_web() { }; metadata - .gen_launcher::(0) + .gen_launcher::(0) .launch() .run_test() .await; diff --git a/testing/tests/push_cdn.rs b/testing/tests/push_cdn.rs new file mode 100644 index 0000000000..8ec6564180 --- /dev/null +++ b/testing/tests/push_cdn.rs @@ -0,0 +1,45 @@ +use std::time::Duration; + +use async_compatibility_layer::logging::shutdown_logging; +use hotshot_example_types::node_types::{PushCdnImpl, TestTypes}; +use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + test_builder::{TestMetadata, TimingData}, +}; +use tracing::instrument; + +/// Push CDN network test +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn push_cdn_network() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let metadata = TestMetadata { + timing_data: TimingData { + round_start_delay: 25, + next_view_timeout: 10_000, + start_delay: 120_000, + + ..Default::default() + }, + overall_safety_properties: OverallSafetyPropertiesDescription { + num_failed_views: 33, + num_successful_views: 35, + ..Default::default() + }, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + ..TestMetadata::default() + }; + metadata + .gen_launcher::(0) + .launch() + .run_test() + .await; + shutdown_logging(); +} From 34698aa9eabc9dd11b8970eef052d84e8ae40d0e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Mar 2024 14:00:50 +0100 Subject: [PATCH 0859/1393] Bump anyhow from 1.0.79 to 1.0.81 (#2774) Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.79 to 1.0.81. - [Release notes](https://github.com/dtolnay/anyhow/releases) - [Commits](https://github.com/dtolnay/anyhow/compare/1.0.79...1.0.81) --- updated-dependencies: - dependency-name: anyhow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- hotshot/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 1f72fd5792..d6623eb479 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -51,7 +51,7 @@ snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } -anyhow = "1.0.79" +anyhow = "1.0.81" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } From f09fcf12e3b08b6bb8d202e61ae7c731d003b886 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Mar 2024 09:57:04 -0400 Subject: [PATCH 0860/1393] [CX_CLEANUP] Remove Special Case for View 1 (#2752) * Make CombinedNetworks delay duration configurable * Secondary network delay configurable in HotShotConfig * Rename CombinedConfig to CombinedNetworkConfig * Network delay in test network generator `secondary_network_delay` removed from `HotShotConfig` because it cannot easily be passed to the test network generator. * Remove special casing for view 1 * stricter checks for view * Start with no vid commitment + fix test * fix upgrade test * fix proposal ordering tests * move inject conensus to creation * Temporary pinning to hotshot-types branch TODO: switch to hotshot-types tag or main branch before merging * Update hotshot-types * Pin to hotshot-types tag 0.1.2 * Remove files added back by mistake * fix upgrade test --------- Co-authored-by: Lukasz Rzasik --- hotshot/src/lib.rs | 8 +- hotshot/src/tasks/mod.rs | 21 +++ hotshot/src/tasks/task_state.rs | 23 +-- task-impls/src/consensus.rs | 70 +------- task-impls/src/da.rs | 4 +- task-impls/src/transactions.rs | 3 +- task-impls/src/vid.rs | 3 +- testing/tests/consensus_task.rs | 259 +++-------------------------- testing/tests/proposal_ordering.rs | 19 ++- testing/tests/upgrade_task.rs | 12 +- 10 files changed, 79 insertions(+), 343 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index cb70a2ab1f..81b11facdb 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -220,9 +220,6 @@ impl> SystemContext { } }; saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns.clone()); - // View 1 doesn't have DA which is responsible for saving the payloads, so we store the - // payload for view 1 manually during the intialization. - saved_payloads.insert(TYPES::Time::new(1), encoded_txns); } let start_view = initializer.start_view; @@ -275,6 +272,11 @@ impl> SystemContext { /// Panics if sending genesis fails pub async fn start_consensus(&self) { debug!("Starting Consensus"); + self.internal_event_stream + .0 + .broadcast_direct(HotShotEvent::ViewChange(TYPES::Time::new(0))) + .await + .expect("Genesis Broadcast failed"); self.internal_event_stream .0 .broadcast_direct(HotShotEvent::QCFormed(either::Left( diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 479204817d..34ab2917cd 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -141,6 +141,27 @@ pub async fn inject_consensus_polls< .quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForLatestViewSyncCertificate) .await; + // Start polling for proposals for the first view + consensus_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForProposal(1)) + .await; + + consensus_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForDAC(1)) + .await; + + if consensus_state + .quorum_membership + .get_leader(TYPES::Time::new(1)) + == consensus_state.public_key + { + consensus_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForVotes(0)) + .await; + } } /// add the consensus task diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 959184f1a2..ddb7ebf1ae 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -2,20 +2,13 @@ use crate::types::SystemContextHandle; use async_trait::async_trait; use hotshot_task_impls::{ - consensus::{CommitmentAndMetadata, ConsensusTaskState}, - da::DATaskState, - transactions::TransactionTaskState, - upgrade::UpgradeTaskState, - vid::VIDTaskState, - view_sync::ViewSyncTaskState, + consensus::ConsensusTaskState, da::DATaskState, transactions::TransactionTaskState, + upgrade::UpgradeTaskState, vid::VIDTaskState, view_sync::ViewSyncTaskState, }; use hotshot_types::constants::VERSION_0_1; -use hotshot_types::traits::election::Membership; use hotshot_types::traits::{ - block_contents::vid_commitment, consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, - BlockPayload, }; use std::{ collections::{HashMap, HashSet}, @@ -163,21 +156,11 @@ impl> CreateTaskState ) -> ConsensusTaskState> { let consensus = handle.hotshot.get_consensus(); - let (payload, metadata) = ::genesis(); - // Impossible for `unwrap` to fail on the genesis payload. - let payload_commitment = vid_commitment( - &payload.encode().unwrap().collect(), - handle.hotshot.memberships.quorum_membership.total_nodes(), - ); ConsensusTaskState { consensus, timeout: handle.hotshot.config.next_view_timeout, cur_view: handle.get_cur_view().await, - payload_commitment_and_metadata: Some(CommitmentAndMetadata { - commitment: payload_commitment, - metadata, - is_genesis: true, - }), + payload_commitment_and_metadata: None, api: handle.clone(), _pd: PhantomData, vote_collector: None.into(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 693a8c78b4..ea246ea218 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -50,8 +50,6 @@ pub struct CommitmentAndMetadata { pub commitment: VidCommitment, /// Metadata for the block payload pub metadata: ::Metadata, - /// Flag for if this data represents the genesis block - pub is_genesis: bool, } /// Alias for Optional type for Vote Collectors @@ -153,69 +151,6 @@ impl, A: ConsensusApi + if let Some(proposal) = &self.current_proposal { let consensus = self.consensus.read().await; - // ED Need to account for the genesis DA cert - // No need to check vid share nor da cert for genesis - if proposal.justify_qc.is_genesis && proposal.view_number == TYPES::Time::new(1) { - info!("Proposal is genesis!"); - - let view = TYPES::Time::new(*proposal.view_number); - let justify_qc = proposal.justify_qc.clone(); - let parent = if justify_qc.is_genesis { - Some(Leaf::genesis(&consensus.instance_state)) - } else { - consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - }; - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some(parent) = parent else { - error!( - "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.get_data().leaf_commit, - proposal.view_number, - ); - return false; - }; - let parent_commitment = parent.commit(); - let leaf: Leaf<_> = Leaf { - view_number: view, - justify_qc: proposal.justify_qc.clone(), - parent_commitment, - block_header: proposal.block_header.clone(), - block_payload: None, - proposer_id: self.quorum_membership.get_leader(view), - }; - let Ok(vote) = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: leaf.commit(), - }, - view, - &self.public_key, - &self.private_key, - ) else { - error!("Failed to sign QuorumData!"); - return false; - }; - - let message = GeneralConsensusMessage::::Vote(vote); - - if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.get_view_number() + 1 - ); - broadcast_event(HotShotEvent::QuorumVoteSend(vote), event_stream).await; - if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { - if commit_and_metadata.is_genesis { - self.payload_commitment_and_metadata = None; - } - } - return true; - } - } - // Only vote if you has seen the VID share for this view if let Some(_vid_share) = consensus.vid_shares.get(&proposal.view_number) { } else { @@ -1115,7 +1050,9 @@ impl, A: ConsensusApi + .await .vid_shares .insert(view, disperse); - self.vote_if_able(&event_stream).await; + if self.vote_if_able(&event_stream).await { + self.current_proposal = None; + } } HotShotEvent::ViewChange(new_view) => { debug!("View Change event for view {} in consensus task", *new_view); @@ -1220,7 +1157,6 @@ impl, A: ConsensusApi + self.payload_commitment_and_metadata = Some(CommitmentAndMetadata { commitment: payload_commitment, metadata, - is_genesis: false, }); if self.quorum_membership.get_leader(view) == self.public_key && self.consensus.read().await.high_qc.get_view_number() + 1 == view diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index c2738b7e10..78507a33b9 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -220,7 +220,7 @@ impl, A: ConsensusApi + } } HotShotEvent::ViewChange(view) => { - if *self.cur_view >= *view { + if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { return None; } @@ -228,7 +228,6 @@ impl, A: ConsensusApi + warn!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; - self.consensus.write().await.update_view(view); // Inject view info into network let is_da = self @@ -255,7 +254,6 @@ impl, A: ConsensusApi + // If we are not the next leader (DA leader for this view) immediately exit if self.da_membership.get_leader(self.cur_view + 1) != self.public_key { - // panic!("We are not the DA leader for view {}", *self.cur_view + 1); return None; } debug!("Polling for DA votes for view {}", *self.cur_view + 1); diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 293842ee15..51015451eb 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -174,7 +174,7 @@ impl, A: ConsensusApi + } HotShotEvent::ViewChange(view) => { debug!("view change in transactions to view {:?}", view); - if *self.cur_view >= *view { + if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { return None; } @@ -184,7 +184,6 @@ impl, A: ConsensusApi + make_block = self.membership.get_leader(view) == self.public_key; } self.cur_view = view; - self.consensus.write().await.update_view(view); // return if we aren't the next leader or we skipped last view and aren't the current leader. if !make_block && self.membership.get_leader(self.cur_view + 1) != self.public_key { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 750b3b36b0..7491b82296 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -126,7 +126,7 @@ impl, A: ConsensusApi + } HotShotEvent::ViewChange(view) => { - if *self.cur_view >= *view { + if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { return None; } @@ -134,7 +134,6 @@ impl, A: ConsensusApi + warn!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; - self.consensus.write().await.update_view(view); // Start polling for VID disperse for the new view self.network diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index e18fdcf6aa..10626657b3 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,15 +1,9 @@ #![allow(clippy::panic)] use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_task_impls::events::HotShotEvent; -use hotshot_testing::task_helpers::{build_quorum_proposal, build_vote, key_pair_for_id}; use hotshot_testing::test_helpers::permute_input_with_index_order; -use hotshot_types::traits::{consensus_api::ConsensusApi, election::Membership}; -use hotshot_types::{ - data::ViewNumber, message::GeneralConsensusMessage, traits::node_implementation::ConsensusTime, -}; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; use jf_primitives::vid::VidScheme; -use std::collections::HashMap; #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] @@ -42,15 +36,23 @@ async fn test_consensus_task() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); } // Run view 1 (the genesis stage). let view_1 = TestScriptStage { - inputs: vec![QuorumProposalRecv(proposals[0].clone(), leaders[0])], + inputs: vec![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DACRecv(dacs[0].clone()), + VidDisperseRecv(vids[0].0.clone(), vids[0].1), + ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), exact(QuorumProposalValidated(proposals[0].data.clone())), @@ -116,16 +118,22 @@ async fn test_consensus_vote() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); } // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DACRecv(dacs[0].clone()), + VidDisperseRecv(vids[0].0.clone(), vids[0].1), QuorumVoteRecv(votes[0].clone()), ], outputs: vec![ @@ -183,7 +191,11 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { // Get out of the genesis view first let view_1 = TestScriptStage { - inputs: vec![QuorumProposalRecv(proposals[0].clone(), leaders[0])], + inputs: vec![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DACRecv(dacs[0].clone()), + VidDisperseRecv(vids[0].0.clone(), vids[0].1), + ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), exact(QuorumProposalValidated(proposals[0].data.clone())), @@ -241,232 +253,3 @@ async fn test_consensus_vote_with_permuted_dac() { test_vote_with_specific_order(vec![1, 2, 0]).await; test_vote_with_specific_order(vec![2, 1, 0]).await; } - -/// TODO (jparr721): Nuke these old tests. Tracking: https://github.com/EspressoSystems/HotShot/issues/2727 -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_consensus_task_old() { - use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; - use hotshot_task_impls::{consensus::ConsensusTaskState, harness::run_harness}; - use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::simple_certificate::QuorumCertificate; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle(1).await.0; - // We assign node's key pair rather than read from config file since it's a test - let (private_key, public_key) = key_pair_for_id(1); - - let mut input = Vec::new(); - let mut output = HashMap::new(); - - // Trigger a proposal to send by creating a new QC. Then recieve that proposal and update view based on the valid QC in the proposal - let qc = QuorumCertificate::::genesis(); - let proposal = build_quorum_proposal(&handle, &private_key, 1).await; - - input.push(HotShotEvent::QCFormed(either::Left(qc.clone()))); - input.push(HotShotEvent::QuorumProposalRecv( - proposal.clone(), - public_key, - )); - - input.push(HotShotEvent::Shutdown); - - output.insert( - HotShotEvent::QuorumProposalSend(proposal.clone(), public_key), - 1, - ); - output.insert( - HotShotEvent::QuorumProposalValidated(proposal.data.clone()), - 1, - ); - - output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); - - if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal.data).await { - output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); - input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); - } - - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - SystemContextHandle, - >::create_from(&handle) - .await; - - inject_consensus_polls(&consensus_state).await; - - run_harness(input, output, consensus_state, false).await; -} - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_consensus_vote_old() { - use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; - use hotshot_task_impls::{consensus::ConsensusTaskState, harness::run_harness}; - use hotshot_testing::task_helpers::build_system_handle; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle(2).await.0; - // We assign node's key pair rather than read from config file since it's a test - let (private_key, public_key) = key_pair_for_id(1); - - let mut input = Vec::new(); - let mut output = HashMap::new(); - - let proposal = build_quorum_proposal(&handle, &private_key, 1).await; - - // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader - input.push(HotShotEvent::QuorumProposalRecv( - proposal.clone(), - public_key, - )); - - let proposal = proposal.data; - output.insert(HotShotEvent::QuorumProposalValidated(proposal.clone()), 1); - if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal).await { - output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); - input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); - } - - output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); - - input.push(HotShotEvent::Shutdown); - - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - SystemContextHandle, - >::create_from(&handle) - .await; - - inject_consensus_polls(&consensus_state).await; - - run_harness(input, output, consensus_state, false).await; -} - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -// TODO: re-enable this when HotShot/the sequencer needs the shares for something -// issue: https://github.com/EspressoSystems/HotShot/issues/2236 -#[ignore] -async fn test_consensus_with_vid_old() { - use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; - use hotshot::traits::BlockPayload; - use hotshot::types::SignatureKey; - use hotshot_example_types::block_types::TestBlockPayload; - use hotshot_example_types::block_types::TestTransaction; - use hotshot_task_impls::{consensus::ConsensusTaskState, harness::run_harness}; - use hotshot_testing::task_helpers::build_cert; - use hotshot_testing::task_helpers::build_system_handle; - use hotshot_testing::task_helpers::vid_scheme_from_view_number; - use hotshot_types::simple_certificate::DACertificate; - use hotshot_types::simple_vote::DAData; - use hotshot_types::simple_vote::DAVote; - use hotshot_types::traits::block_contents::{vid_commitment, TestableBlock}; - use hotshot_types::{ - data::VidDisperse, message::Proposal, traits::node_implementation::NodeType, - }; - use std::marker::PhantomData; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let (handle, _tx, _rx) = build_system_handle(2).await; - // We assign node's key pair rather than read from config file since it's a test - // In view 2, node 2 is the leader. - let (private_key_view2, public_key_view2) = key_pair_for_id(2); - - // For the test of vote logic with vid - let pub_key = *handle.public_key(); - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(2)); - let transactions = vec![TestTransaction(vec![0])]; - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); - let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); - let payload_commitment = vid_disperse.commit; - - let vid_signature = ::SignatureKey::sign( - handle.private_key(), - payload_commitment.as_ref(), - ) - .expect("Failed to sign payload commitment"); - let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); - let vid_disperse_inner = VidDisperse::from_membership( - ViewNumber::new(2), - vid_disperse, - &quorum_membership.clone().into(), - ); - // TODO for now reuse the same block payload commitment and signature as DA committee - // https://github.com/EspressoSystems/jellyfish/issues/369 - let vid_proposal = Proposal { - data: vid_disperse_inner.clone(), - signature: vid_signature, - _pd: PhantomData, - }; - - let mut input = Vec::new(); - let mut output = HashMap::new(); - - // Do a view change, so that it's not the genesis view, and vid vote is needed - input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); - - // For the test of vote logic with vid, starting view 2 we need vid share - input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - let proposal_view2 = build_quorum_proposal(&handle, &private_key_view2, 2).await; - let block = ::genesis(); - let da_payload_commitment = vid_commitment( - &block.encode().unwrap().collect(), - quorum_membership.total_nodes(), - ); - let da_data = DAData { - payload_commit: da_payload_commitment, - }; - let created_dac_view2 = - build_cert::, DACertificate>( - da_data, - &quorum_membership, - ViewNumber::new(2), - &public_key_view2, - &private_key_view2, - ); - input.push(HotShotEvent::DACRecv(created_dac_view2.clone())); - input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); - - // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader - input.push(HotShotEvent::QuorumProposalRecv( - proposal_view2.clone(), - public_key_view2, - )); - - if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { - output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); - } - - output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); - - input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::Shutdown, 1); - - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - SystemContextHandle, - >::create_from(&handle) - .await; - - inject_consensus_polls(&consensus_state).await; - - run_harness(input, output, consensus_state, false).await; -} diff --git a/testing/tests/proposal_ordering.rs b/testing/tests/proposal_ordering.rs index daaed067a1..7d37c6cf97 100644 --- a/testing/tests/proposal_ordering.rs +++ b/testing/tests/proposal_ordering.rs @@ -32,21 +32,28 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; - let mut proposals = Vec::new(); - let mut votes = Vec::new(); - let mut leaders = Vec::new(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); - for view in (&mut generator).take(3) { + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); leaders.push(view.leader_public_key); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); } // This stage transitions from the initial view to view 1 let view_1 = TestScriptStage { - inputs: vec![QuorumProposalRecv(proposals[0].clone(), leaders[0])], + inputs: vec![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DACRecv(dacs[0].clone()), + VidDisperseRecv(vids[0].0.clone(), vids[0].1), + ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), exact(QuorumProposalValidated(proposals[0].data.clone())), diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs index f9e113652e..ffbd5089c2 100644 --- a/testing/tests/upgrade_task.rs +++ b/testing/tests/upgrade_task.rs @@ -65,7 +65,11 @@ async fn test_upgrade_task() { } let view_1 = TestScriptStage { - inputs: vec![QuorumProposalRecv(proposals[0].clone(), leaders[0])], + inputs: vec![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + VidDisperseRecv(vids[0].0.clone(), vids[0].1), + DACRecv(dacs[0].clone()), + ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), exact(QuorumProposalValidated(proposals[0].data.clone())), @@ -223,7 +227,11 @@ async fn test_upgrade_and_consensus_task() { let upgrade_vote_recvs: Vec<_> = upgrade_votes.map(UpgradeVoteRecv).collect(); let inputs = vec![ - vec![QuorumProposalRecv(proposals[0].clone(), leaders[0])], + vec![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + VidDisperseRecv(vids[0].0.clone(), vids[0].1), + DACRecv(dacs[0].clone()), + ], upgrade_vote_recvs, vec![QuorumProposalRecv(proposals[1].clone(), leaders[1])], vec![ From 5b904f737fc36c262465d3ae5cb6c92db7561f99 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Mar 2024 12:42:39 -0400 Subject: [PATCH 0861/1393] [TECH_DEBT] Pass Events as `Arc`s (#2770) * Replace events with Arc<> * fixes --- hotshot/src/lib.rs | 15 ++-- hotshot/src/tasks/mod.rs | 32 ++++---- hotshot/src/types/handle.rs | 7 +- libp2p-networking/src/network/node.rs | 2 +- macros/src/lib.rs | 5 +- orchestrator/run-config.toml | 10 +-- task-impls/src/consensus.rs | 56 ++++++++------ task-impls/src/da.rs | 32 ++++---- task-impls/src/harness.rs | 10 +-- task-impls/src/network.rs | 32 ++++---- task-impls/src/transactions.rs | 23 +++--- task-impls/src/upgrade.rs | 19 ++--- task-impls/src/vid.rs | 37 ++++----- task-impls/src/view_sync.rs | 102 ++++++++++++++++--------- task-impls/src/vote.rs | 106 +++++++++++++------------- testing/src/predicates.rs | 33 ++++---- testing/src/script.rs | 16 ++-- testing/src/task_helpers.rs | 4 +- testing/src/view_sync_task.rs | 6 +- 19 files changed, 305 insertions(+), 242 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 81b11facdb..2b15d473c5 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -146,9 +146,10 @@ pub struct SystemContext> { pub output_event_stream: (Sender>, InactiveReceiver>), /// access to the internal event stream, in case we need to, say, shut something down + #[allow(clippy::type_complexity)] internal_event_stream: ( - Sender>, - InactiveReceiver>, + Sender>>, + InactiveReceiver>>, ), /// uid for instrumentation @@ -274,14 +275,14 @@ impl> SystemContext { debug!("Starting Consensus"); self.internal_event_stream .0 - .broadcast_direct(HotShotEvent::ViewChange(TYPES::Time::new(0))) + .broadcast_direct(Arc::new(HotShotEvent::ViewChange(TYPES::Time::new(0)))) .await .expect("Genesis Broadcast failed"); self.internal_event_stream .0 - .broadcast_direct(HotShotEvent::QCFormed(either::Left( + .broadcast_direct(Arc::new(HotShotEvent::QCFormed(either::Left( QuorumCertificate::genesis(), - ))) + )))) .await .expect("Genesis Broadcast failed"); } @@ -414,8 +415,8 @@ impl> SystemContext { ) -> Result< ( SystemContextHandle, - Sender>, - Receiver>, + Sender>>, + Receiver>>, ), HotShotError, > { diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 34ab2917cd..c46013f569 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -50,7 +50,7 @@ pub async fn add_network_message_task< NET: ConnectedNetwork, TYPES::SignatureKey>, >( task_reg: Arc, - event_stream: Sender>, + event_stream: Sender>>, channel: Arc, ) { let net = channel.clone(); @@ -90,11 +90,11 @@ pub async fn add_network_event_task< NET: ConnectedNetwork, TYPES::SignatureKey>, >( task_reg: Arc, - tx: Sender>, - rx: Receiver>, + tx: Sender>>, + rx: Receiver>>, channel: Arc, membership: TYPES::Membership, - filter: fn(&HotShotEvent) -> bool, + filter: fn(&Arc>) -> bool, ) { let network_state: NetworkEventTaskState<_, _> = NetworkEventTaskState { channel, @@ -167,8 +167,8 @@ pub async fn inject_consensus_polls< /// add the consensus task pub async fn add_consensus_task>( task_reg: Arc, - tx: Sender>, - rx: Receiver>, + tx: Sender>>, + rx: Receiver>>, handle: &SystemContextHandle, ) { let consensus_state = ConsensusTaskState::create_from(handle).await; @@ -182,8 +182,8 @@ pub async fn add_consensus_task>( /// add the VID task pub async fn add_vid_task>( task_reg: Arc, - tx: Sender>, - rx: Receiver>, + tx: Sender>>, + rx: Receiver>>, handle: &SystemContextHandle, ) { let vid_state = VIDTaskState::create_from(handle).await; @@ -194,8 +194,8 @@ pub async fn add_vid_task>( /// add the Upgrade task. pub async fn add_upgrade_task>( task_reg: Arc, - tx: Sender>, - rx: Receiver>, + tx: Sender>>, + rx: Receiver>>, handle: &SystemContextHandle, ) { let upgrade_state = UpgradeTaskState::create_from(handle).await; @@ -206,8 +206,8 @@ pub async fn add_upgrade_task>( /// add the Data Availability task pub async fn add_da_task>( task_reg: Arc, - tx: Sender>, - rx: Receiver>, + tx: Sender>>, + rx: Receiver>>, handle: &SystemContextHandle, ) { // build the da task @@ -220,8 +220,8 @@ pub async fn add_da_task>( /// add the Transaction Handling task pub async fn add_transaction_task>( task_reg: Arc, - tx: Sender>, - rx: Receiver>, + tx: Sender>>, + rx: Receiver>>, handle: &SystemContextHandle, ) { let transactions_state = TransactionTaskState::create_from(handle).await; @@ -233,8 +233,8 @@ pub async fn add_transaction_task> /// add the view sync task pub async fn add_view_sync_task>( task_reg: Arc, - tx: Sender>, - rx: Receiver>, + tx: Sender>>, + rx: Receiver>>, handle: &SystemContextHandle, ) { let view_sync_state = ViewSyncTaskState::create_from(handle).await; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 0bdcccac36..58bfb99cbc 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -28,9 +28,10 @@ pub struct SystemContextHandle> { /// clone of the `Receiver` when they get output stream. pub(crate) output_event_stream: (Sender>, InactiveReceiver>), /// access to the internal event stream, in case we need to, say, shut something down + #[allow(clippy::type_complexity)] pub(crate) internal_event_stream: ( - Sender>, - InactiveReceiver>, + Sender>>, + InactiveReceiver>>, ), /// registry for controlling tasks pub(crate) registry: Arc, @@ -61,7 +62,7 @@ impl + 'static> SystemContextHandl /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper /// NOTE: this is only used for sanity checks in our tests - pub fn get_internal_event_stream_known_impl(&self) -> Receiver> { + pub fn get_internal_event_stream_known_impl(&self) -> Receiver>> { self.internal_event_stream.1.activate_cloned() } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index ad542bbdbd..c91dc7c397 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -63,7 +63,7 @@ use std::{ use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; /// Maximum size of a message -pub const MAX_GOSSIP_MSG_SIZE: usize = 200_000_000; +pub const MAX_GOSSIP_MSG_SIZE: usize = 2_000_000_000; /// Wrapped num of connections pub const ESTABLISHED_LIMIT: NonZeroU32 = diff --git a/macros/src/lib.rs b/macros/src/lib.rs index d5eb21064d..45f6f4805f 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -20,6 +20,7 @@ use quote::{format_ident, quote}; /// /// The macro panics if the input stream cannot be parsed. /// The test will panic if the any of the scripts has a different number of stages from the input. +#[allow(clippy::too_many_lines)] #[proc_macro] pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { // Parse the input as an iter of Expr @@ -94,10 +95,10 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { for input in &input_group { #( - if !#task_names.state().filter(input) { + if !#task_names.state().filter(&input.clone().into()) { tracing::debug!("Test sent: {:?}", input); - if let Some(res) = #task_names.handle_event(input.clone()).await { + if let Some(res) = #task_names.handle_event(input.clone().into()).await { #task_names.state().handle_result(&res).await; } diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 4fb883a910..51e7127d2d 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -1,6 +1,6 @@ rounds = 100 transactions_per_round = 10 -transaction_size = 1749 +transaction_size = 1000 node_index = 0 seed = [ 0, @@ -40,12 +40,12 @@ start_delay_seconds = 0 cdn_marshal_address = "127.0.0.1:9000" [config] -num_nodes_with_stake = 10 +num_nodes_with_stake = 100 num_nodes_without_stake = 0 -staked_committee_nodes = 5 +staked_committee_nodes = 100 non_staked_committee_nodes = 0 -max_transactions = 100 -min_transactions = 0 +max_transactions = 1 +min_transactions = 1 next_view_timeout = 30000 timeout_ratio = [ 11, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index ea246ea218..cd8add253c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -139,7 +139,7 @@ impl, A: ConsensusApi + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] // Check if we are able to vote, like whether the proposal is valid, // whether we have DAC and VID share, and if so, vote. - async fn vote_if_able(&mut self, event_stream: &Sender>) -> bool { + async fn vote_if_able(&mut self, event_stream: &Sender>>) -> bool { if !self.quorum_membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", @@ -150,7 +150,6 @@ impl, A: ConsensusApi + if let Some(proposal) = &self.current_proposal { let consensus = self.consensus.read().await; - // Only vote if you has seen the VID share for this view if let Some(_vid_share) = consensus.vid_shares.get(&proposal.view_number) { } else { @@ -232,7 +231,8 @@ impl, A: ConsensusApi + "Sending vote to next quorum leader {:?}", vote.get_view_number() + 1 ); - broadcast_event(HotShotEvent::QuorumVoteSend(vote), event_stream).await; + broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), event_stream) + .await; return true; } } @@ -255,7 +255,7 @@ impl, A: ConsensusApi + async fn update_view( &mut self, new_view: TYPES::Time, - event_stream: &Sender>, + event_stream: &Sender>>, ) -> bool { if *self.cur_view < *new_view { debug!( @@ -302,7 +302,7 @@ impl, A: ConsensusApi + .await; } - broadcast_event(HotShotEvent::ViewChange(new_view), event_stream).await; + broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream).await; // Spawn a timeout task if we did actually update view let timeout = self.timeout; @@ -314,7 +314,7 @@ impl, A: ConsensusApi + async move { async_sleep(Duration::from_millis(timeout)).await; broadcast_event( - HotShotEvent::Timeout(TYPES::Time::new(*view_number)), + Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), &stream, ) .await; @@ -348,11 +348,12 @@ impl, A: ConsensusApi + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] pub async fn handle( &mut self, - event: HotShotEvent, - event_stream: Sender>, + event: Arc>, + event_stream: Sender>>, ) { - match event { + match event.as_ref() { HotShotEvent::QuorumProposalRecv(proposal, sender) => { + let sender = sender.clone(); debug!( "Received Quorum Proposal for view {}", *proposal.data.view_number @@ -635,7 +636,7 @@ impl, A: ConsensusApi + .await; // Notify other tasks broadcast_event( - HotShotEvent::QuorumProposalValidated(proposal.data.clone()), + Arc::new(HotShotEvent::QuorumProposalValidated(proposal.data.clone())), &event_stream, ) .await; @@ -756,7 +757,11 @@ impl, A: ConsensusApi + } #[allow(clippy::cast_precision_loss)] if new_decide_reached { - broadcast_event(HotShotEvent::LeafDecided(leafs_decided), &event_stream).await; + broadcast_event( + Arc::new(HotShotEvent::LeafDecided(leafs_decided)), + &event_stream, + ) + .await; let decide_sent = broadcast_event( Event { view_number: consensus.last_decided_view, @@ -977,7 +982,7 @@ impl, A: ConsensusApi + // Update our current upgrade_cert as long as it's still relevant. if cert.view_number >= self.cur_view { - self.upgrade_cert = Some(cert); + self.upgrade_cert = Some(cert.clone()); } } HotShotEvent::DACRecv(cert) => { @@ -1003,6 +1008,7 @@ impl, A: ConsensusApi + } } HotShotEvent::VidDisperseRecv(disperse, sender) => { + let sender = sender.clone(); let view = disperse.data.get_view_number(); debug!( @@ -1049,12 +1055,13 @@ impl, A: ConsensusApi + .write() .await .vid_shares - .insert(view, disperse); + .insert(view, disperse.clone()); if self.vote_if_able(&event_stream).await { self.current_proposal = None; } } HotShotEvent::ViewChange(new_view) => { + let new_view = *new_view; debug!("View Change event for view {} in consensus task", *new_view); let old_view_number = self.cur_view; @@ -1095,6 +1102,7 @@ impl, A: ConsensusApi + .await; } HotShotEvent::Timeout(view) => { + let view = *view; // NOTE: We may optionally have the timeout task listen for view change events if self.cur_view >= view { return; @@ -1127,7 +1135,7 @@ impl, A: ConsensusApi + return; }; - broadcast_event(HotShotEvent::TimeoutVoteSend(vote), &event_stream).await; + broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), &event_stream).await; broadcast_event( Event { view_number: view, @@ -1153,10 +1161,11 @@ impl, A: ConsensusApi + consensus.metrics.number_of_timeouts.add(1); } HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, metadata, view) => { + let view = *view; debug!("got commit and meta {:?}", payload_commitment); self.payload_commitment_and_metadata = Some(CommitmentAndMetadata { - commitment: payload_commitment, - metadata, + commitment: *payload_commitment, + metadata: metadata.clone(), }); if self.quorum_membership.get_leader(view) == self.public_key && self.consensus.read().await.high_qc.get_view_number() + 1 == view @@ -1187,7 +1196,7 @@ impl, A: ConsensusApi + &mut self, view: TYPES::Time, timeout_certificate: Option>, - event_stream: &Sender>, + event_stream: &Sender>>, ) -> bool { if self.quorum_membership.get_leader(view) != self.public_key { // This is expected for view 1, so skipping the logging. @@ -1315,7 +1324,10 @@ impl, A: ConsensusApi + ); broadcast_event( - HotShotEvent::QuorumProposalSend(message.clone(), self.public_key.clone()), + Arc::new(HotShotEvent::QuorumProposalSend( + message.clone(), + self.public_key.clone(), + )), event_stream, ) .await; @@ -1331,11 +1343,11 @@ impl, A: ConsensusApi + impl, A: ConsensusApi + 'static> TaskState for ConsensusTaskState { - type Event = HotShotEvent; + type Event = Arc>; type Output = (); - fn filter(&self, event: &HotShotEvent) -> bool { + fn filter(&self, event: &Arc>) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::QuorumProposalRecv(_, _) | HotShotEvent::QuorumVoteRecv(_) | HotShotEvent::QCFormed(_) @@ -1359,6 +1371,6 @@ impl, A: ConsensusApi + None } fn should_shutdown(event: &Self::Event) -> bool { - matches!(event, HotShotEvent::Shutdown) + matches!(event.as_ref(), HotShotEvent::Shutdown) } } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 78507a33b9..9691059aaf 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -80,11 +80,12 @@ impl, A: ConsensusApi + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] pub async fn handle( &mut self, - event: HotShotEvent, - event_stream: Sender>, + event: Arc>, + event_stream: Sender>>, ) -> Option { - match event { + match event.as_ref() { HotShotEvent::DAProposalRecv(proposal, sender) => { + let sender = sender.clone(); debug!( "DA proposal received for view: {:?}", proposal.data.get_view_number() @@ -165,7 +166,7 @@ impl, A: ConsensusApi + debug!("Sending vote to the DA leader {:?}", vote.get_view_number()); - broadcast_event(HotShotEvent::DAVoteSend(vote), &event_stream).await; + broadcast_event(Arc::new(HotShotEvent::DAVoteSend(vote)), &event_stream).await; let mut consensus = self.consensus.write().await; // Ensure this view is in the view map for garbage collection, but do not overwrite if @@ -178,7 +179,7 @@ impl, A: ConsensusApi + // Record the payload we have promised to make available. consensus .saved_payloads - .insert(view, proposal.data.encoded_transactions); + .insert(view, proposal.data.encoded_transactions.clone()); } HotShotEvent::DAVoteRecv(ref vote) => { debug!("DA vote recv, Main Task {:?}", vote.get_view_number()); @@ -220,6 +221,7 @@ impl, A: ConsensusApi + } } HotShotEvent::ViewChange(view) => { + let view = *view; if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { return None; } @@ -266,12 +268,13 @@ impl, A: ConsensusApi + return None; } HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view) => { + let view = *view; self.da_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) .await; // quick hash the encoded txns with sha256 - let encoded_transactions_hash = Sha256::digest(&encoded_transactions); + let encoded_transactions_hash = Sha256::digest(encoded_transactions); // sign the encoded transactions as opposed to the VID commitment let Ok(signature) = @@ -282,7 +285,7 @@ impl, A: ConsensusApi + }; let data: DAProposal = DAProposal { - encoded_transactions, + encoded_transactions: encoded_transactions.clone(), metadata: metadata.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? view_number: view, @@ -296,7 +299,10 @@ impl, A: ConsensusApi + }; broadcast_event( - HotShotEvent::DAProposalSend(message.clone(), self.public_key.clone()), + Arc::new(HotShotEvent::DAProposalSend( + message.clone(), + self.public_key.clone(), + )), &event_stream, ) .await; @@ -304,7 +310,7 @@ impl, A: ConsensusApi + HotShotEvent::Timeout(view) => { self.da_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(**view)) .await; } @@ -324,13 +330,13 @@ impl, A: ConsensusApi + impl, A: ConsensusApi + 'static> TaskState for DATaskState { - type Event = HotShotEvent; + type Event = Arc>; type Output = HotShotTaskCompleted; - fn filter(&self, event: &HotShotEvent) -> bool { + fn filter(&self, event: &Arc>) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::DAProposalRecv(_, _) | HotShotEvent::DAVoteRecv(_) | HotShotEvent::Shutdown @@ -349,6 +355,6 @@ impl, A: ConsensusApi + } fn should_shutdown(event: &Self::Event) -> bool { - matches!(event, HotShotEvent::Shutdown) + matches!(event.as_ref(), HotShotEvent::Shutdown) } } diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index d84c230940..03123f3335 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -15,7 +15,7 @@ pub struct TestHarnessState { } impl TaskState for TestHarnessState { - type Event = HotShotEvent; + type Event = Arc>; type Output = HotShotTaskCompleted; async fn handle_event( @@ -27,7 +27,7 @@ impl TaskState for TestHarnessState { } fn should_shutdown(event: &Self::Event) -> bool { - matches!(event, HotShotEvent::Shutdown) + matches!(event.as_ref(), HotShotEvent::Shutdown) } } @@ -43,7 +43,7 @@ impl TaskState for TestHarnessState { /// Panics if any state the test expects is not set. Panicing causes a test failure #[allow(clippy::implicit_hasher)] #[allow(clippy::panic)] -pub async fn run_harness>>( +pub async fn run_harness>>>( input: Vec>, expected_output: HashMap, usize>, state: S, @@ -74,7 +74,7 @@ pub async fn run_harness>>( tasks.push(task.run()); for event in input { - to_task.broadcast_direct(event).await.unwrap(); + to_task.broadcast_direct(Arc::new(event)).await.unwrap(); } if async_timeout(Duration::from_secs(2), futures::future::join_all(tasks)) @@ -96,7 +96,7 @@ pub async fn run_harness>>( /// Will panic to fail the test when it receives and unexpected event #[allow(clippy::needless_pass_by_value)] pub fn handle_event( - event: HotShotEvent, + event: Arc>, task: &mut Task>, allow_extra_output: bool, ) -> Option { diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 2b9aa97f26..1395ca67e6 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -26,9 +26,9 @@ use tracing::instrument; use tracing::{error, warn}; /// quorum filter -pub fn quorum_filter(event: &HotShotEvent) -> bool { +pub fn quorum_filter(event: &Arc>) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::QuorumVoteSend(_) | HotShotEvent::Shutdown @@ -39,9 +39,9 @@ pub fn quorum_filter(event: &HotShotEvent) -> bool { } /// committee filter -pub fn committee_filter(event: &HotShotEvent) -> bool { +pub fn committee_filter(event: &Arc>) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::DAProposalSend(_, _) | HotShotEvent::DAVoteSend(_) | HotShotEvent::Shutdown @@ -50,17 +50,17 @@ pub fn committee_filter(event: &HotShotEvent) -> bool { } /// vid filter -pub fn vid_filter(event: &HotShotEvent) -> bool { +pub fn vid_filter(event: &Arc>) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::Shutdown | HotShotEvent::VidDisperseSend(_, _) | HotShotEvent::ViewChange(_) ) } /// view sync filter -pub fn view_sync_filter(event: &HotShotEvent) -> bool { +pub fn view_sync_filter(event: &Arc>) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) @@ -75,7 +75,7 @@ pub fn view_sync_filter(event: &HotShotEvent) -> bool { #[derive(Clone)] pub struct NetworkMessageTaskState { /// Sender to send internal events this task generates to other tasks - pub event_stream: Sender>, + pub event_stream: Sender>>, } impl TaskState for NetworkMessageTaskState { @@ -165,7 +165,7 @@ impl NetworkMessageTaskState { // TODO (Keyao benchmarking) Update these event variants (similar to the // `TransactionsRecv` event) so we can send one event for a vector of messages. // - broadcast_event(event, &self.event_stream).await; + broadcast_event(Arc::new(event), &self.event_stream).await; } MessageKind::Data(message) => match message { hotshot_types::message::DataMessage::SubmitTransaction(transaction, _) => { @@ -179,7 +179,7 @@ impl NetworkMessageTaskState { } if !transactions.is_empty() { broadcast_event( - HotShotEvent::TransactionsRecv(transactions), + Arc::new(HotShotEvent::TransactionsRecv(transactions)), &self.event_stream, ) .await; @@ -200,13 +200,13 @@ pub struct NetworkEventTaskState< pub membership: TYPES::Membership, // TODO ED Need to add exchange so we can get the recipient key and our own key? /// Filter which returns false for the events that this specific network task cares about - pub filter: fn(&HotShotEvent) -> bool, + pub filter: fn(&Arc>) -> bool, } impl, TYPES::SignatureKey>> TaskState for NetworkEventTaskState { - type Event = HotShotEvent; + type Event = Arc>; type Output = HotShotTaskCompleted; @@ -219,7 +219,7 @@ impl, TYPES::Signa } fn should_shutdown(event: &Self::Event) -> bool { - if matches!(event, HotShotEvent::Shutdown) { + if matches!(event.as_ref(), HotShotEvent::Shutdown) { error!("Network Task received Shutdown event"); return true; } @@ -244,10 +244,10 @@ impl, TYPES::Signa pub async fn handle_event( &mut self, - event: HotShotEvent, + event: Arc>, membership: &TYPES::Membership, ) -> Option { - let (sender, message_kind, transmit_type, recipient) = match event.clone() { + let (sender, message_kind, transmit_type, recipient) = match event.as_ref().clone() { HotShotEvent::QuorumProposalSend(proposal, sender) => ( sender, MessageKind::::from_consensus_message(SequencingMessage(Left( diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 51015451eb..573c2853bc 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -78,10 +78,10 @@ impl, A: ConsensusApi + pub async fn handle( &mut self, - event: HotShotEvent, - event_stream: Sender>, + event: Arc>, + event_stream: Sender>>, ) -> Option { - match event { + match event.as_ref() { HotShotEvent::TransactionsRecv(transactions) => { futures::join! { self.api @@ -101,7 +101,7 @@ impl, A: ConsensusApi + // If we didn't already know about this transaction, update our mempool metrics. if !self.seen_transactions.remove(&transaction.commit()) - && txns.insert(transaction.commit(), transaction).is_none() + && txns.insert(transaction.commit(), transaction.clone()).is_none() { consensus.metrics.outstanding_transactions.update(1); consensus @@ -173,6 +173,7 @@ impl, A: ConsensusApi + return None; } HotShotEvent::ViewChange(view) => { + let view = *view; debug!("view change in transactions to view {:?}", view); if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { return None; @@ -214,7 +215,11 @@ impl, A: ConsensusApi + // send the sequenced transactions to VID and DA tasks let block_view = if make_block { view } else { view + 1 }; broadcast_event( - HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, block_view), + Arc::new(HotShotEvent::TransactionsSequenced( + encoded_transactions, + metadata, + block_view, + )), &event_stream, ) .await; @@ -299,13 +304,13 @@ impl, A: ConsensusApi + impl, A: ConsensusApi + 'static> TaskState for TransactionTaskState { - type Event = HotShotEvent; + type Event = Arc>; type Output = HotShotTaskCompleted; - fn filter(&self, event: &HotShotEvent) -> bool { + fn filter(&self, event: &Arc>) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::TransactionsRecv(_) | HotShotEvent::LeafDecided(_) | HotShotEvent::Shutdown @@ -322,6 +327,6 @@ impl, A: ConsensusApi + } fn should_shutdown(event: &Self::Event) -> bool { - matches!(event, HotShotEvent::Shutdown) + matches!(event.as_ref(), HotShotEvent::Shutdown) } } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 480f022566..c6ab65651b 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -67,10 +67,10 @@ impl, A: ConsensusApi + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Upgrade Task", level = "error")] pub async fn handle( &mut self, - event: HotShotEvent, - tx: Sender>, + event: Arc>, + tx: Sender>>, ) -> Option { - match event { + match event.as_ref() { HotShotEvent::UpgradeProposalRecv(proposal, sender) => { let should_vote = self.should_vote; // If the proposal does not match our upgrade target, we immediately exit. @@ -105,7 +105,7 @@ impl, A: ConsensusApi + // We then validate that the proposal was issued by the leader for the view. let view_leader_key = self.quorum_membership.get_leader(view); - if view_leader_key != sender { + if &view_leader_key != sender { error!("Upgrade proposal doesn't have expected leader key for view {} \n Upgrade proposal is: {:?}", *view, proposal.data.clone()); return None; } @@ -127,7 +127,7 @@ impl, A: ConsensusApi + // If everything is fine up to here, we generate and send a vote on the proposal. let Ok(vote) = UpgradeVote::create_signed_vote( - proposal.data.upgrade_proposal, + proposal.data.upgrade_proposal.clone(), view, &self.public_key, &self.private_key, @@ -136,7 +136,7 @@ impl, A: ConsensusApi + return None; }; debug!("Sending upgrade vote {:?}", vote.get_view_number()); - broadcast_event(HotShotEvent::UpgradeVoteSend(vote), &tx).await; + broadcast_event(Arc::new(HotShotEvent::UpgradeVoteSend(vote)), &tx).await; } HotShotEvent::UpgradeVoteRecv(ref vote) => { debug!("Upgrade vote recv, Main Task {:?}", vote.get_view_number()); @@ -182,6 +182,7 @@ impl, A: ConsensusApi + } } HotShotEvent::ViewChange(view) => { + let view = *view; if *self.cur_view >= *view { return None; } @@ -209,7 +210,7 @@ impl, A: ConsensusApi + impl, A: ConsensusApi + 'static> TaskState for UpgradeTaskState { - type Event = HotShotEvent; + type Event = Arc>; type Output = HotShotTaskCompleted; @@ -223,12 +224,12 @@ impl, A: ConsensusApi + } fn should_shutdown(event: &Self::Event) -> bool { - matches!(event, HotShotEvent::Shutdown) + matches!(event.as_ref(), HotShotEvent::Shutdown) } fn filter(&self, event: &Self::Event) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::UpgradeProposalRecv(_, _) | HotShotEvent::UpgradeVoteRecv(_) | HotShotEvent::Shutdown diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 7491b82296..3e23e68ea8 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -62,11 +62,12 @@ impl, A: ConsensusApi + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] pub async fn handle( &mut self, - event: HotShotEvent, - event_stream: Sender>, + event: Arc>, + event_stream: Sender>>, ) -> Option { - match event { + match event.as_ref() { HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view_number) => { + let encoded_transactions = encoded_transactions.clone(); // get the number of quorum committee members to be used for VID calculation let num_storage_nodes = self.membership.total_nodes(); @@ -82,27 +83,28 @@ impl, A: ConsensusApi + let vid_disperse = vid_disperse.unwrap(); // send the commitment and metadata to consensus for block building broadcast_event( - HotShotEvent::SendPayloadCommitmentAndMetadata( + Arc::new(HotShotEvent::SendPayloadCommitmentAndMetadata( vid_disperse.commit, - metadata, - view_number, - ), + metadata.clone(), + *view_number, + )), &event_stream, ) .await; // send the block to the VID dispersal function broadcast_event( - HotShotEvent::BlockReady( - VidDisperse::from_membership(view_number, vid_disperse, &self.membership), - view_number, - ), + Arc::new(HotShotEvent::BlockReady( + VidDisperse::from_membership(*view_number, vid_disperse, &self.membership), + *view_number, + )), &event_stream, ) .await; } HotShotEvent::BlockReady(vid_disperse, view_number) => { + let view_number = *view_number; let Ok(signature) = TYPES::SignatureKey::sign( &self.private_key, vid_disperse.payload_commitment.as_ref().as_ref(), @@ -112,20 +114,21 @@ impl, A: ConsensusApi + }; debug!("publishing VID disperse for view {}", *view_number); broadcast_event( - HotShotEvent::VidDisperseSend( + Arc::new(HotShotEvent::VidDisperseSend( Proposal { signature, - data: vid_disperse, + data: vid_disperse.clone(), _pd: PhantomData, }, self.public_key.clone(), - ), + )), &event_stream, ) .await; } HotShotEvent::ViewChange(view) => { + let view = *view; if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { return None; } @@ -166,7 +169,7 @@ impl, A: ConsensusApi + impl, A: ConsensusApi + 'static> TaskState for VIDTaskState { - type Event = HotShotEvent; + type Event = Arc>; type Output = HotShotTaskCompleted; @@ -180,7 +183,7 @@ impl, A: ConsensusApi + } fn filter(&self, event: &Self::Event) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::Shutdown | HotShotEvent::TransactionsSequenced(_, _, _) | HotShotEvent::BlockReady(_, _) @@ -188,6 +191,6 @@ impl, A: ConsensusApi + ) } fn should_shutdown(event: &Self::Event) -> bool { - matches!(event, HotShotEvent::Shutdown) + matches!(event.as_ref(), HotShotEvent::Shutdown) } } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 1f0c05114d..2b82f14893 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -108,7 +108,7 @@ impl< A: ConsensusApi + 'static + std::clone::Clone, > TaskState for ViewSyncTaskState { - type Event = HotShotEvent; + type Event = Arc>; type Output = (); @@ -120,7 +120,7 @@ impl< fn filter(&self, event: &Self::Event) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) | HotShotEvent::ViewSyncCommitCertificate2Recv(_) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) @@ -135,7 +135,7 @@ impl< } fn should_shutdown(event: &Self::Event) -> bool { - matches!(event, HotShotEvent::Shutdown) + matches!(event.as_ref(), HotShotEvent::Shutdown) } } @@ -177,7 +177,7 @@ pub struct ViewSyncReplicaTaskState< impl, A: ConsensusApi + 'static> TaskState for ViewSyncReplicaTaskState { - type Event = HotShotEvent; + type Event = Arc>; type Output = (); @@ -188,7 +188,7 @@ impl, A: ConsensusApi + } fn filter(&self, event: &Self::Event) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) | HotShotEvent::ViewSyncCommitCertificate2Recv(_) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) @@ -203,7 +203,7 @@ impl, A: ConsensusApi + } fn should_shutdown(event: &Self::Event) -> bool { - matches!(event, HotShotEvent::Shutdown) + matches!(event.as_ref(), HotShotEvent::Shutdown) } } @@ -218,9 +218,9 @@ impl< /// Handles incoming events for the main view sync task pub async fn send_to_or_create_replica( &mut self, - event: HotShotEvent, + event: Arc>, view: TYPES::Time, - sender: &Sender>, + sender: &Sender>>, ) { // This certificate is old, we can throw it away // If next view = cert round, then that means we should already have a task running for it @@ -277,10 +277,10 @@ impl< /// Handles incoming events for the main view sync task pub async fn handle( &mut self, - event: HotShotEvent, - event_stream: Sender>, + event: Arc>, + event_stream: Sender>>, ) { - match &event { + match event.as_ref() { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.get_view_number(); @@ -378,7 +378,7 @@ impl< } } - HotShotEvent::ViewSyncFinalizeVoteRecv(ref vote) => { + HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { let mut map = self.finalize_relay_map.write().await; let vote_view = vote.get_view_number(); let relay = vote.get_data().relay; @@ -508,7 +508,7 @@ impl< .inject_consensus_info(ConsensusIntentEvent::PollForDAC(subscribe_view)) .await; self.send_to_or_create_replica( - HotShotEvent::ViewSyncTrigger(view_number + 1), + Arc::new(HotShotEvent::ViewSyncTrigger(view_number + 1)), view_number + 1, &event_stream, ) @@ -517,7 +517,9 @@ impl< // If this is the first timeout we've seen advance to the next view self.current_view = view_number; broadcast_event( - HotShotEvent::ViewChange(TYPES::Time::new(*self.current_view)), + Arc::new(HotShotEvent::ViewChange(TYPES::Time::new( + *self.current_view, + ))), &event_stream, ) .await; @@ -536,10 +538,10 @@ impl, A: ConsensusApi + /// Handle incoming events for the view sync replica task pub async fn handle( &mut self, - event: HotShotEvent, - event_stream: Sender>, + event: Arc>, + event_stream: Sender>>, ) -> Option { - match event { + match event.as_ref() { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { let last_seen_certificate = ViewSyncPhase::PreCommit; @@ -582,8 +584,11 @@ impl, A: ConsensusApi + let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { - broadcast_event(HotShotEvent::ViewSyncCommitVoteSend(vote), &event_stream) - .await; + broadcast_event( + Arc::new(HotShotEvent::ViewSyncCommitVoteSend(vote)), + &event_stream, + ) + .await; } if let Some(timeout_task) = self.timeout_task.take() { @@ -601,11 +606,11 @@ impl, A: ConsensusApi + info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); broadcast_event( - HotShotEvent::ViewSyncTimeout( + Arc::new(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, phase, - ), + )), &stream, ) .await; @@ -655,8 +660,11 @@ impl, A: ConsensusApi + let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { - broadcast_event(HotShotEvent::ViewSyncFinalizeVoteSend(vote), &event_stream) - .await; + broadcast_event( + Arc::new(HotShotEvent::ViewSyncFinalizeVoteSend(vote)), + &event_stream, + ) + .await; } info!( @@ -664,9 +672,17 @@ impl, A: ConsensusApi + *self.next_view ); - broadcast_event(HotShotEvent::ViewChange(self.next_view - 1), &event_stream).await; + broadcast_event( + Arc::new(HotShotEvent::ViewChange(self.next_view - 1)), + &event_stream, + ) + .await; - broadcast_event(HotShotEvent::ViewChange(self.next_view), &event_stream).await; + broadcast_event( + Arc::new(HotShotEvent::ViewChange(self.next_view)), + &event_stream, + ) + .await; if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; @@ -684,11 +700,11 @@ impl, A: ConsensusApi + relay ); broadcast_event( - HotShotEvent::ViewSyncTimeout( + Arc::new(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, phase, - ), + )), &stream, ) .await; @@ -713,7 +729,7 @@ impl, A: ConsensusApi + return None; }; - broadcast_event(HotShotEvent::TimeoutVoteSend(vote), &event_stream).await; + broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), &event_stream).await; // Ignore certificate if it is for an older round if certificate.get_view_number() < self.next_view { warn!("We're already in a higher round"); @@ -756,11 +772,16 @@ impl, A: ConsensusApi + cancel_task(timeout_task).await; } - broadcast_event(HotShotEvent::ViewChange(self.next_view), &event_stream).await; + broadcast_event( + Arc::new(HotShotEvent::ViewChange(self.next_view)), + &event_stream, + ) + .await; return Some(HotShotTaskCompleted); } HotShotEvent::ViewSyncTrigger(view_number) => { + let view_number = *view_number; if self.next_view != TYPES::Time::new(*view_number) { error!("Unexpected view number to triger view sync"); return None; @@ -781,8 +802,11 @@ impl, A: ConsensusApi + let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { - broadcast_event(HotShotEvent::ViewSyncPreCommitVoteSend(vote), &event_stream) - .await; + broadcast_event( + Arc::new(HotShotEvent::ViewSyncPreCommitVoteSend(vote)), + &event_stream, + ) + .await; } self.timeout_task = Some(async_spawn({ @@ -794,11 +818,11 @@ impl, A: ConsensusApi + async_sleep(timeout).await; info!("Vote sending timed out in ViewSyncTrigger"); broadcast_event( - HotShotEvent::ViewSyncTimeout( + Arc::new(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, ViewSyncPhase::None, - ), + )), &stream, ) .await; @@ -809,8 +833,9 @@ impl, A: ConsensusApi + } HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { + let round = *round; // Shouldn't ever receive a timeout for a relay higher than ours - if TYPES::Time::new(*round) == self.next_view && relay == self.relay { + if TYPES::Time::new(*round) == self.next_view && *relay == self.relay { if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; } @@ -834,7 +859,7 @@ impl, A: ConsensusApi + if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { broadcast_event( - HotShotEvent::ViewSyncPreCommitVoteSend(vote), + Arc::new(HotShotEvent::ViewSyncPreCommitVoteSend(vote)), &event_stream, ) .await; @@ -851,6 +876,7 @@ impl, A: ConsensusApi + let relay = self.relay; let next_view = self.next_view; let timeout = self.view_sync_timeout; + let last_cert = last_seen_certificate.clone(); async move { async_sleep(timeout).await; info!( @@ -858,11 +884,11 @@ impl, A: ConsensusApi + relay ); broadcast_event( - HotShotEvent::ViewSyncTimeout( + Arc::new(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, - last_seen_certificate, - ), + last_cert, + )), &stream, ) .await; diff --git a/task-impls/src/vote.rs b/task-impls/src/vote.rs index 1a4cd0ade9..b42dc51c1b 100644 --- a/task-impls/src/vote.rs +++ b/task-impls/src/vote.rs @@ -70,7 +70,7 @@ impl< pub async fn accumulate_vote( &mut self, vote: &VOTE, - event_stream: &Sender>, + event_stream: &Sender>>, ) -> Option { if vote.get_leader(&self.membership) != self.public_key { error!("Received vote for a view in which we were not the leader."); @@ -93,7 +93,11 @@ impl< Either::Right(cert) => { debug!("Certificate Formed! {:?}", cert); - broadcast_event(VOTE::make_cert_event(cert, &self.public_key), event_stream).await; + broadcast_event( + Arc::new(VOTE::make_cert_event(cert, &self.public_key)), + event_stream, + ) + .await; self.accumulator = None; Some(HotShotTaskCompleted) } @@ -117,7 +121,7 @@ impl< where VoteCollectionTaskState: HandleVoteEvent, { - type Event = HotShotEvent; + type Event = Arc>; type Output = HotShotTaskCompleted; @@ -127,7 +131,7 @@ where } fn should_shutdown(event: &Self::Event) -> bool { - matches!(event, HotShotEvent::Shutdown) + matches!(event.as_ref(), HotShotEvent::Shutdown) } } @@ -142,12 +146,12 @@ where /// Handle a vote event async fn handle_event( &mut self, - event: HotShotEvent, - sender: &Sender>, + event: Arc>, + sender: &Sender>>, ) -> Option; /// Event filter to use for this event - fn filter(event: &HotShotEvent) -> bool; + fn filter(event: Arc>) -> bool; } /// Info needed to create a vote accumulator task @@ -168,8 +172,8 @@ pub struct AccumulatorInfo { pub async fn create_vote_accumulator( info: &AccumulatorInfo, vote: VOTE, - event: HotShotEvent, - sender: &Sender>, + event: Arc>, + sender: &Sender>>, ) -> Option> where TYPES: NodeType, @@ -352,16 +356,16 @@ impl HandleVoteEvent, QuorumCertificat { async fn handle_event( &mut self, - event: HotShotEvent, - sender: &Sender>, + event: Arc>, + sender: &Sender>>, ) -> Option { - match event { - HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, + match event.as_ref() { + HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => None, } } - fn filter(event: &HotShotEvent) -> bool { - matches!(event, HotShotEvent::QuorumVoteRecv(_)) + fn filter(event: Arc>) -> bool { + matches!(event.as_ref(), HotShotEvent::QuorumVoteRecv(_)) } } @@ -372,16 +376,16 @@ impl HandleVoteEvent, UpgradeCertific { async fn handle_event( &mut self, - event: HotShotEvent, - sender: &Sender>, + event: Arc>, + sender: &Sender>>, ) -> Option { - match event { - HotShotEvent::UpgradeVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, + match event.as_ref() { + HotShotEvent::UpgradeVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => None, } } - fn filter(event: &HotShotEvent) -> bool { - matches!(event, HotShotEvent::UpgradeVoteRecv(_)) + fn filter(event: Arc>) -> bool { + matches!(event.as_ref(), HotShotEvent::UpgradeVoteRecv(_)) } } @@ -391,16 +395,16 @@ impl HandleVoteEvent, DACertificate { async fn handle_event( &mut self, - event: HotShotEvent, - sender: &Sender>, + event: Arc>, + sender: &Sender>>, ) -> Option { - match event { - HotShotEvent::DAVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, + match event.as_ref() { + HotShotEvent::DAVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => None, } } - fn filter(event: &HotShotEvent) -> bool { - matches!(event, HotShotEvent::DAVoteRecv(_)) + fn filter(event: Arc>) -> bool { + matches!(event.as_ref(), HotShotEvent::DAVoteRecv(_)) } } @@ -410,16 +414,16 @@ impl HandleVoteEvent, TimeoutCertific { async fn handle_event( &mut self, - event: HotShotEvent, - sender: &Sender>, + event: Arc>, + sender: &Sender>>, ) -> Option { - match event { - HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, + match event.as_ref() { + HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => None, } } - fn filter(event: &HotShotEvent) -> bool { - matches!(event, HotShotEvent::TimeoutVoteRecv(_)) + fn filter(event: Arc>) -> bool { + matches!(event.as_ref(), HotShotEvent::TimeoutVoteRecv(_)) } } @@ -430,18 +434,18 @@ impl { async fn handle_event( &mut self, - event: HotShotEvent, - sender: &Sender>, + event: Arc>, + sender: &Sender>>, ) -> Option { - match event { + match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { - self.accumulate_vote(&vote, sender).await + self.accumulate_vote(vote, sender).await } _ => None, } } - fn filter(event: &HotShotEvent) -> bool { - matches!(event, HotShotEvent::ViewSyncPreCommitVoteRecv(_)) + fn filter(event: Arc>) -> bool { + matches!(event.as_ref(), HotShotEvent::ViewSyncPreCommitVoteRecv(_)) } } @@ -452,16 +456,16 @@ impl { async fn handle_event( &mut self, - event: HotShotEvent, - sender: &Sender>, + event: Arc>, + sender: &Sender>>, ) -> Option { - match event { - HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, + match event.as_ref() { + HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => None, } } - fn filter(event: &HotShotEvent) -> bool { - matches!(event, HotShotEvent::ViewSyncCommitVoteRecv(_)) + fn filter(event: Arc>) -> bool { + matches!(event.as_ref(), HotShotEvent::ViewSyncCommitVoteRecv(_)) } } @@ -472,17 +476,17 @@ impl { async fn handle_event( &mut self, - event: HotShotEvent, - sender: &Sender>, + event: Arc>, + sender: &Sender>>, ) -> Option { - match event { + match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { - self.accumulate_vote(&vote, sender).await + self.accumulate_vote(vote, sender).await } _ => None, } } - fn filter(event: &HotShotEvent) -> bool { - matches!(event, HotShotEvent::ViewSyncFinalizeVoteRecv(_)) + fn filter(event: Arc>) -> bool { + matches!(event.as_ref(), HotShotEvent::ViewSyncFinalizeVoteRecv(_)) } } diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs index 0ec2a1f7f8..e1274405dd 100644 --- a/testing/src/predicates.rs +++ b/testing/src/predicates.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use hotshot_task_impls::{ consensus::ConsensusTaskState, events::HotShotEvent, events::HotShotEvent::*, }; @@ -18,11 +20,12 @@ impl std::fmt::Debug for Predicate { } } -pub fn exact(event: HotShotEvent) -> Predicate> +pub fn exact(event: HotShotEvent) -> Predicate>> where TYPES: NodeType, { let info = format!("{:?}", event); + let event = Arc::new(event); Predicate { function: Box::new(move |e| e == &event), @@ -30,12 +33,12 @@ where } } -pub fn leaf_decided() -> Predicate> +pub fn leaf_decided() -> Predicate>> where TYPES: NodeType, { let info = "LeafDecided".to_string(); - let function = |e: &_| matches!(e, LeafDecided(_)); + let function = |e: &Arc>| matches!(e.as_ref(), LeafDecided(_)); Predicate { function: Box::new(function), @@ -43,12 +46,12 @@ where } } -pub fn quorum_vote_send() -> Predicate> +pub fn quorum_vote_send() -> Predicate>> where TYPES: NodeType, { let info = "QuorumVoteSend".to_string(); - let function = |e: &_| matches!(e, QuorumVoteSend(_)); + let function = |e: &Arc>| matches!(e.as_ref(), QuorumVoteSend(_)); Predicate { function: Box::new(function), @@ -56,12 +59,12 @@ where } } -pub fn view_change() -> Predicate> +pub fn view_change() -> Predicate>> where TYPES: NodeType, { let info = "ViewChange".to_string(); - let function = |e: &_| matches!(e, ViewChange(_)); + let function = |e: &Arc>| matches!(e.as_ref(), ViewChange(_)); Predicate { function: Box::new(function), @@ -69,12 +72,12 @@ where } } -pub fn upgrade_certificate_formed() -> Predicate> +pub fn upgrade_certificate_formed() -> Predicate>> where TYPES: NodeType, { let info = "UpgradeCertificateFormed".to_string(); - let function = |e: &_| matches!(e, UpgradeCertificateFormed(_)); + let function = |e: &Arc>| matches!(e.as_ref(), UpgradeCertificateFormed(_)); Predicate { function: Box::new(function), @@ -82,12 +85,12 @@ where } } -pub fn quorum_proposal_send_with_upgrade_certificate() -> Predicate> +pub fn quorum_proposal_send_with_upgrade_certificate() -> Predicate>> where TYPES: NodeType, { let info = "QuorumProposalSend with UpgradeCertificate attached".to_string(); - let function = |e: &_| match e { + let function = |e: &Arc>| match e.as_ref() { QuorumProposalSend(proposal, _) => proposal.data.upgrade_certificate.is_some(), _ => false, }; @@ -98,12 +101,12 @@ where } } -pub fn quorum_proposal_validated() -> Predicate> +pub fn quorum_proposal_validated() -> Predicate>> where TYPES: NodeType, { let info = "QuorumProposalValidated".to_string(); - let function = |e: &_| matches!(e, QuorumProposalValidated(_)); + let function = |e: &Arc>| matches!(e.as_ref(), QuorumProposalValidated(_)); Predicate { function: Box::new(function), @@ -111,12 +114,12 @@ where } } -pub fn quorum_proposal_send() -> Predicate> +pub fn quorum_proposal_send() -> Predicate>> where TYPES: NodeType, { let info = "QuorumProposalSend".to_string(); - let function = |e: &_| matches!(e, QuorumProposalSend(_, _)); + let function = |e: &Arc>| matches!(e.as_ref(), QuorumProposalSend(_, _)); Predicate { function: Box::new(function), diff --git a/testing/src/script.rs b/testing/src/script.rs index 10f4dc2aa1..3c98f5480b 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -6,9 +6,9 @@ use hotshot_task::task::{Task, TaskRegistry, TaskState}; use hotshot_types::traits::node_implementation::NodeType; use std::sync::Arc; -pub struct TestScriptStage>> { +pub struct TestScriptStage>>> { pub inputs: Vec>, - pub outputs: Vec>>, + pub outputs: Vec>>>, pub asserts: Vec>, } @@ -72,7 +72,7 @@ where /// Note: the task is not spawned with an async thread; instead, the harness just calls `handle_event`. /// This has a few implications, e.g. shutting down tasks doesn't really make sense, /// and event ordering is deterministic. -pub async fn run_test_script>>( +pub async fn run_test_script>>>( mut script: TestScript, state: S, ) where @@ -96,11 +96,11 @@ pub async fn run_test_script>>( for (stage_number, stage) in script.iter_mut().enumerate() { tracing::debug!("Beginning test stage {}", stage_number); - for input in &mut *stage.inputs { - if !task.state_mut().filter(input) { - tracing::debug!("Test sent: {:?}", input); + for input in &stage.inputs { + if !task.state_mut().filter(&Arc::new(input.clone())) { + tracing::debug!("Test sent: {:?}", input.clone()); - if let Some(res) = S::handle_event(input.clone(), &mut task).await { + if let Some(res) = S::handle_event(input.clone().into(), &mut task).await { task.state_mut().handle_result(&res).await; } } @@ -131,7 +131,7 @@ pub struct TaskScript { } pub struct Expectations { - pub output_asserts: Vec>>, + pub output_asserts: Vec>>>, pub task_state_asserts: Vec>, } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 6013ac6d40..36f80dc126 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -55,8 +55,8 @@ pub async fn build_system_handle( node_id: u64, ) -> ( SystemContextHandle, - Sender>, - Receiver>, + Sender>>, + Receiver>>, ) { let builder = TestMetadata::default_multiple_rounds(); diff --git a/testing/src/view_sync_task.rs b/testing/src/view_sync_task.rs index 139e6b73fd..8d91042f2b 100644 --- a/testing/src/view_sync_task.rs +++ b/testing/src/view_sync_task.rs @@ -2,7 +2,7 @@ use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplementation}; use snafu::Snafu; -use std::{collections::HashSet, marker::PhantomData}; +use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use crate::{test_runner::HotShotTaskCompleted, GlobalTestEvent}; @@ -54,7 +54,7 @@ impl> TaskState for ViewSy impl> TestTaskState for ViewSyncTask { - type Message = HotShotEvent; + type Message = Arc>; type Output = HotShotTaskCompleted; @@ -65,7 +65,7 @@ impl> TestTaskState id: usize, task: &mut hotshot_task::task::TestTask, ) -> Option { - match message { + match message.as_ref() { // all the view sync events HotShotEvent::ViewSyncTimeout(_, _, _) | HotShotEvent::ViewSyncPreCommitVoteRecv(_) From b5de95bdd375ea65a33a9fc7442344e561cbadaa Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 14 Mar 2024 14:26:47 -0400 Subject: [PATCH 0862/1393] [STABILITY] - Remove Hack Fix in Vote Timeout Trigger (#2739) * - Add support for View Sync Finalize Certificate 2 in consensus algorithm. - Add implementation of the view sync certificate in consensus. Initiate a proposal when the event is received. * Fix build by adding view_sync_cert to various locations. * cargo fmt fix * working on test * approximately working test * fix typo, clarify log * remove old code * clarify, remove typo * satisfy clippy * add voting logic * clarify comment * fix proposal call site, improve performance of cert check * fix test, move cert check logic * fix timeout cert check * fix build * fix vote test * all tests passing, cleanup next * fix calculation, remove old code * fix versioning and fix build * add todo comment, remove thread limit * make doc builder happy * fix comment, typos * Potentially fixed off-by-one * fix test runs * more off-by-one fixes * fix build --- hotshot/src/tasks/task_state.rs | 1 + orchestrator/src/config.rs | 1 + task-impls/src/consensus.rs | 99 +++++++-- task-impls/src/view_sync.rs | 19 +- testing/src/predicates.rs | 13 ++ testing/src/task_helpers.rs | 2 + testing/src/test_builder.rs | 1 + testing/src/view_generator.rs | 113 +++++++++- testing/tests/consensus_task.rs | 377 ++++++++++++++++++++++++++++++-- 9 files changed, 565 insertions(+), 61 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index ddb7ebf1ae..77c351ea64 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -168,6 +168,7 @@ impl> CreateTaskState timeout_task: None, timeout_cert: None, upgrade_cert: None, + view_sync_cert: None, decided_upgrade_cert: None, current_network_version: VERSION_0_1, output_event_stream: handle.hotshot.output_event_stream.0.clone(), diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 4ef179d834..6e7f48cd2a 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -613,6 +613,7 @@ impl From> for HotS propose_min_round_time: val.propose_min_round_time, propose_max_round_time: val.propose_max_round_time, election_config: None, + data_request_delay: Duration::from_millis(200), } } } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index cd8add253c..e0aed5d6b4 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -19,7 +19,9 @@ use hotshot_types::{ data::{Leaf, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, - simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, + simple_certificate::{ + QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, + }, simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, traits::{ block_contents::BlockHeader, @@ -114,6 +116,10 @@ pub struct ConsensusTaskState< /// last Upgrade Certificate this node formed pub upgrade_cert: Option>, + // TODO: Merge view sync and timeout certs: https://github.com/EspressoSystems/HotShot/issues/2767 + /// last View Sync Certificate this node formed + pub view_sync_cert: Option>, + /// most recent decided upgrade certificate pub decided_upgrade_cert: Option>, @@ -359,6 +365,8 @@ impl, A: ConsensusApi + *proposal.data.view_number ); + debug!("Received proposal {:?}", proposal); + // stop polling for the received proposal self.quorum_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal( @@ -378,24 +386,42 @@ impl, A: ConsensusApi + return; } - // Verify a timeout certificate exists and is valid + // Verify a timeout certificate OR a view sync certificate exists and is valid. if proposal.data.justify_qc.get_view_number() != view - 1 { - let Some(timeout_cert) = proposal.data.timeout_certificate.clone() else { + // Do we have a timeout certificate at all? + if let Some(timeout_cert) = proposal.data.timeout_certificate.clone() { + if timeout_cert.get_data().view != view - 1 { + warn!("Timeout certificate for view {} was not for the immediately preceding view", *view); + return; + } + + if !timeout_cert.is_valid_cert(self.timeout_membership.as_ref()) { + warn!("Timeout certificate for view {} was invalid", *view); + return; + } + } else if let Some(cert) = &self.view_sync_cert { + // View sync certs _must_ be for the current view. + if cert.view_number != view { + debug!( + "Cert view number {:?} does not match proposal view number {:?}", + cert.view_number, view + ); + return; + } + + // View sync certs must also be valid. + if !cert.is_valid_cert(self.quorum_membership.as_ref()) { + debug!("Invalid ViewSyncFinalize cert provided"); + return; + } + } else { warn!( - "Quorum proposal for view {} needed a timeout certificate but did not have one", + "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", *view); return; }; - if timeout_cert.get_data().view != view - 1 { - warn!("Timeout certificate for view {} was not for the immediately preceding view", *view); - return; - } - - if !timeout_cert.is_valid_cert(self.timeout_membership.as_ref()) { - warn!("Timeout certificate for view {} was invalid", *view); - return; - } + // If we have a ViewSyncFinalize cert, only vote if it is valid. } let justify_qc = proposal.data.justify_qc.clone(); @@ -1173,7 +1199,8 @@ impl, A: ConsensusApi + self.publish_proposal_if_able(view, None, &event_stream) .await; } - if let Some(tc) = &self.timeout_cert { + + if let Some(tc) = self.timeout_cert.as_ref() { if self.quorum_membership.get_leader(tc.get_view_number() + 1) == self.public_key { @@ -1184,6 +1211,40 @@ impl, A: ConsensusApi + ) .await; } + } else if let Some(vsc) = self.view_sync_cert.as_ref() { + if self.quorum_membership.get_leader(vsc.get_view_number()) == self.public_key { + self.publish_proposal_if_able(view, None, &event_stream) + .await; + } + } + } + HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + if !certificate.is_valid_cert(self.quorum_membership.as_ref()) { + warn!( + "View Sync Finalize certificate {:?} was invalid", + certificate.get_data() + ); + return; + } + + self.view_sync_cert = Some(certificate.clone()); + + // cancel poll for votes + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *certificate.view_number - 1, + )) + .await; + + let view = certificate.view_number; + + if self.quorum_membership.get_leader(view) == self.public_key { + debug!( + "Attempting to publish proposal after forming a View Sync Finalized Cert for view {}", + *certificate.view_number + ); + self.publish_proposal_if_able(view, None, &event_stream) + .await; } } _ => {} @@ -1251,7 +1312,7 @@ impl, A: ConsensusApi + // Walk back until we find a decide if !reached_decided { - debug!("not reached decide fro view {:?}", self.cur_view); + debug!("We have not reached decide from view {:?}", self.cur_view); while let Some(next_parent_leaf) = consensus.saved_leaves.get(&next_parent_hash) { if next_parent_leaf.view_number <= consensus.last_decided_view { break; @@ -1309,19 +1370,18 @@ impl, A: ConsensusApi + justify_qc: consensus.high_qc.clone(), timeout_certificate: timeout_certificate.or_else(|| None), upgrade_certificate: upgrade_cert, + view_sync_certificate: self.view_sync_cert.clone(), proposer_id: leaf.proposer_id, }; self.timeout_cert = None; + self.view_sync_cert = None; let message = Proposal { data: proposal, signature, _pd: PhantomData, }; - debug!( - "Sending proposal for view {:?} \n {:?}", - leaf.view_number, "" - ); + debug!("Sending proposal for view {:?}", leaf.view_number); broadcast_event( Arc::new(HotShotEvent::QuorumProposalSend( @@ -1358,6 +1418,7 @@ impl, A: ConsensusApi + | HotShotEvent::Timeout(_) | HotShotEvent::TimeoutVoteRecv(_) | HotShotEvent::VidDisperseRecv(..) + | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) | HotShotEvent::Shutdown, ) } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 2b82f14893..a9ecc8386a 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -11,7 +11,7 @@ use hotshot_types::{ simple_certificate::{ ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, - simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}, + simple_vote::ViewSyncFinalizeData, traits::signature_key::SignatureKey, }; use hotshot_types::{ @@ -713,23 +713,6 @@ impl, A: ConsensusApi + } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { - // HACK sending a timeout vote to the next leader so we they - // can actually propose. We don't give the leader the actual view sync cert - // so they have nothing to propose from. Proper fix is to handle the - // view sync cert in the consensus task as another cert to propose from - let Ok(vote) = TimeoutVote::create_signed_vote( - TimeoutData { - view: self.next_view - 1, - }, - self.next_view - 1, - &self.public_key, - &self.private_key, - ) else { - error!("Failed to sign TimeoutData!"); - return None; - }; - - broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), &event_stream).await; // Ignore certificate if it is for an older round if certificate.get_view_number() < self.next_view { warn!("We're already in a higher round"); diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs index e1274405dd..e78fc698fb 100644 --- a/testing/src/predicates.rs +++ b/testing/src/predicates.rs @@ -127,6 +127,19 @@ where } } +pub fn timeout_vote_send() -> Predicate>> +where + TYPES: NodeType, +{ + let info = "TimeoutVoteSend".to_string(); + let function = |e: &Arc>| matches!(e.as_ref(), TimeoutVoteSend(_)); + + Predicate { + function: Box::new(function), + info, + } +} + type ConsensusTaskTestState = ConsensusTaskState>; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 36f80dc126..bae5d7c483 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -267,6 +267,7 @@ async fn build_quorum_proposal_and_signature( justify_qc: QuorumCertificate::genesis(), timeout_certificate: None, upgrade_certificate: None, + view_sync_certificate: None, proposer_id: leaf.proposer_id, }; @@ -326,6 +327,7 @@ async fn build_quorum_proposal_and_signature( justify_qc: created_qc, timeout_certificate: None, upgrade_certificate: None, + view_sync_certificate: None, proposer_id: leaf_new_view.clone().proposer_id, }; proposal = proposal_new_view; diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 597fea2152..ab339f2371 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -292,6 +292,7 @@ impl TestMetadata { num_nodes_with_stake as u64, 0, )), + data_request_delay: Duration::from_millis(200), }; let TimingData { next_view_timeout, diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index d3b6a2900c..25bdcba2c2 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -1,4 +1,4 @@ -use std::marker::PhantomData; +use std::{cmp::max, marker::PhantomData}; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, @@ -16,8 +16,14 @@ use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_types::{ data::{Leaf, QuorumProposal, VidDisperse, ViewNumber}, message::Proposal, - simple_certificate::{DACertificate, QuorumCertificate, UpgradeCertificate}, - simple_vote::{UpgradeProposalData, UpgradeVote}, + simple_certificate::{ + DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, + ViewSyncFinalizeCertificate2, + }, + simple_vote::{ + TimeoutData, TimeoutVote, UpgradeProposalData, UpgradeVote, ViewSyncFinalizeData, + ViewSyncFinalizeVote, + }, traits::{ consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeType}, @@ -41,6 +47,8 @@ pub struct TestView { pub da_certificate: DACertificate, pub transactions: Vec, upgrade_data: Option>, + view_sync_finalize_data: Option>, + timeout_cert_data: Option>, } impl TestView { @@ -82,6 +90,7 @@ impl TestView { justify_qc: QuorumCertificate::genesis(), timeout_certificate: None, upgrade_certificate: None, + view_sync_certificate: None, proposer_id: public_key, }; @@ -117,13 +126,23 @@ impl TestView { transactions, leader_public_key, upgrade_data: None, + view_sync_finalize_data: None, + timeout_cert_data: None, } } - pub fn next_view(&self) -> Self { - let old = self; + /// Moves the generator to the next view by referencing an ancestor. To have a standard, + /// sequentially ordered set of generated test views, use the `next_view` function. Otherwise, + /// this method can be used to start from an ancestor (whose view is at least one view older + /// than the current view) and construct valid views without the data structures in the task + /// failing by expecting views that they has never seen. + pub fn next_view_from_ancestor(&self, ancestor: TestView) -> Self { + let old = ancestor; let old_view = old.view_number; - let next_view = old_view + 1; + + // This ensures that we're always moving forward in time since someone could pass in any + // test view here. + let next_view = max(old_view, self.view_number) + 1; let quorum_membership = &self.quorum_membership; let transactions = &self.transactions; @@ -187,6 +206,44 @@ impl TestView { None }; + let view_sync_certificate = if let Some(ref data) = self.view_sync_finalize_data { + let cert = build_cert::< + TestTypes, + ViewSyncFinalizeData, + ViewSyncFinalizeVote, + ViewSyncFinalizeCertificate2, + >( + data.clone(), + quorum_membership, + next_view, + &public_key, + &private_key, + ); + + Some(cert) + } else { + None + }; + + let timeout_certificate = if let Some(ref data) = self.timeout_cert_data { + let cert = build_cert::< + TestTypes, + TimeoutData, + TimeoutVote, + TimeoutCertificate, + >( + data.clone(), + quorum_membership, + next_view, + &public_key, + &private_key, + ); + + Some(cert) + } else { + None + }; + let block_header = TestBlockHeader { block_number: *next_view, timestamp: *next_view, @@ -213,8 +270,9 @@ impl TestView { block_header: block_header.clone(), view_number: next_view, justify_qc: quorum_certificate.clone(), - timeout_certificate: None, + timeout_certificate, upgrade_certificate, + view_sync_certificate, proposer_id: public_key, }; @@ -236,9 +294,15 @@ impl TestView { // so we reset for the next view. transactions: Vec::new(), upgrade_data: None, + view_sync_finalize_data: None, + timeout_cert_data: None, } } + pub fn next_view(&self) -> Self { + self.next_view_from_ancestor(self.clone()) + } + pub fn create_quorum_vote( &self, handle: &SystemContextHandle, @@ -303,6 +367,41 @@ impl TestViewGenerator { tracing::error!("Cannot attach transactions to the genesis view."); } } + + pub fn add_view_sync_finalize( + &mut self, + view_sync_finalize_data: ViewSyncFinalizeData, + ) { + if let Some(ref view) = self.current_view { + self.current_view = Some(TestView { + view_sync_finalize_data: Some(view_sync_finalize_data), + ..view.clone() + }); + } else { + tracing::error!("Cannot attach view sync finalize to the genesis view."); + } + } + + /// Advances to the next view by skipping the current view and not adding it to the state tree. + /// This is useful when simulating that a timeout has occurred. + pub fn advance_view_number_by(&mut self, n: u64) { + if let Some(ref view) = self.current_view { + self.current_view = Some(TestView { + view_number: view.view_number + n, + ..view.clone() + }) + } else { + tracing::error!("Cannot attach view sync finalize to the genesis view."); + } + } + + pub fn next_from_anscestor_view(&mut self, ancestor: TestView) { + if let Some(ref view) = self.current_view { + self.current_view = Some(view.next_view_from_ancestor(ancestor)) + } else { + tracing::error!("Cannot attach ancestor to genesis view."); + } + } } impl Iterator for TestViewGenerator { diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 10626657b3..ba8d42f49a 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,7 +1,17 @@ #![allow(clippy::panic)] +use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; +use hotshot_testing::task_helpers::key_pair_for_id; use hotshot_testing::test_helpers::permute_input_with_index_order; +use hotshot_testing::{ + predicates::{exact, is_at_view_number, quorum_vote_send}, + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, + view_generator::TestViewGenerator, +}; +use hotshot_types::simple_vote::ViewSyncFinalizeData; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; use jf_primitives::vid::VidScheme; @@ -92,10 +102,7 @@ async fn test_consensus_task() { } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote() { use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; @@ -159,15 +166,6 @@ async fn test_consensus_vote() { /// assures that, no matter what, a vote is indeed sent no matter what order the precipitating /// events occur. The permutation is specified as `input_permutation` and is a vector of indices. async fn test_vote_with_specific_order(input_permutation: Vec) { - use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; - use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; - use hotshot_testing::{ - predicates::{exact, is_at_view_number}, - script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, - view_generator::TestViewGenerator, - }; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -237,10 +235,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { } #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote_with_permuted_dac() { // These tests verify that a vote is indeed sent no matter when it receives a DACRecv @@ -253,3 +248,351 @@ async fn test_consensus_vote_with_permuted_dac() { test_vote_with_specific_order(vec![1, 2, 0]).await; test_vote_with_specific_order(vec![2, 1, 0]).await; } + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_view_sync_finalize_propose() { + use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; + use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; + use hotshot_testing::{ + predicates::{exact, is_at_view_number, quorum_proposal_send, timeout_vote_send}, + script::{run_test_script, TestScriptStage}, + task_helpers::{build_system_handle, vid_scheme_from_view_number}, + view_generator::TestViewGenerator, + }; + use hotshot_types::simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(4).await.0; + let (priv_key, pub_key) = key_pair_for_id(4); + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + // Make some empty encoded transactions, we just care about having a commitment handy for the + // later calls. We need the VID commitment to be able to propose later. + let vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(4)); + let encoded_transactions = Vec::new(); + let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); + let payload_commitment = vid_disperse.commit; + + let view_sync_finalize_data: ViewSyncFinalizeData = ViewSyncFinalizeData { + relay: 4, + round: ViewNumber::new(4), + }; + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut vids = Vec::new(); + let mut dacs = Vec::new(); + + generator.next(); + let view = generator.current_view.clone().unwrap(); + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_quorum_vote(&handle)); + vids.push(view.vid_proposal.clone()); + dacs.push(view.da_certificate.clone()); + + // Skip two views + generator.advance_view_number_by(2); + + // Initiate a view sync finalize + generator.add_view_sync_finalize(view_sync_finalize_data); + + // Build the next proposal from view 1 + generator.next_from_anscestor_view(view.clone()); + let view = generator.current_view.unwrap(); + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_quorum_vote(&handle)); + + // This is a bog standard view and covers the situation where everything is going normally. + let view_1 = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DACRecv(dacs[0].clone()), + VidDisperseRecv(vids[0].0.clone(), vids[0].1), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(1))), + exact(QuorumProposalValidated(proposals[0].data.clone())), + exact(QuorumVoteSend(votes[0].clone())), + ], + asserts: vec![is_at_view_number(1)], + }; + + // Fail twice here to "trigger" a view sync event. This is accomplished above by advancing the + // view number in the generator. + let view_2_3 = TestScriptStage { + inputs: vec![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], + outputs: vec![timeout_vote_send(), timeout_vote_send()], + // Times out, so we now have a delayed view + asserts: vec![is_at_view_number(1)], + }; + + // Handle the view sync finalize cert, get the requisite data, propose. + let cert = proposals[1].data.view_sync_certificate.clone().unwrap(); + + // Generate the timeout votes for the timeouts that just occurred. + let timeout_vote_view_2 = TimeoutVote::create_signed_vote( + TimeoutData { + view: ViewNumber::new(2), + }, + ViewNumber::new(2), + &pub_key, + &priv_key, + ) + .unwrap(); + + let timeout_vote_view_3 = TimeoutVote::create_signed_vote( + TimeoutData { + view: ViewNumber::new(3), + }, + ViewNumber::new(3), + &pub_key, + &priv_key, + ) + .unwrap(); + + let view_4 = TestScriptStage { + inputs: vec![ + TimeoutVoteRecv(timeout_vote_view_2), + TimeoutVoteRecv(timeout_vote_view_3), + ViewSyncFinalizeCertificate2Recv(cert), + QuorumProposalRecv(proposals[1].clone(), leaders[1]), + SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(4)), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(4))), + exact(QuorumProposalValidated(proposals[1].data.clone())), + quorum_proposal_send(), + ], + asserts: vec![is_at_view_number(4)], + }; + + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + + let stages = vec![view_1, view_2_3, view_4]; + + inject_consensus_polls(&consensus_state).await; + run_test_script(stages, consensus_state).await; +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +/// Makes sure that, when a valid ViewSyncFinalize certificate is available, the consensus task +/// will indeed vote if the cert is valid and matches the correct view number. +async fn test_view_sync_finalize_vote() { + use hotshot_testing::predicates::timeout_vote_send; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(5).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let view_sync_finalize_data: ViewSyncFinalizeData = ViewSyncFinalizeData { + relay: 4, + round: ViewNumber::new(5), + }; + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut vids = Vec::new(); + let mut dacs = Vec::new(); + for view in (&mut generator).take(3) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_quorum_vote(&handle)); + vids.push(view.vid_proposal.clone()); + dacs.push(view.da_certificate.clone()); + } + + // Each call to `take` moves us to the next generated view. We advance to view + // 3 and then add the finalize cert for checking there. + generator.add_view_sync_finalize(view_sync_finalize_data); + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_quorum_vote(&handle)); + vids.push(view.vid_proposal.clone()); + dacs.push(view.da_certificate.clone()); + } + + let view_1 = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DACRecv(dacs[0].clone()), + VidDisperseRecv(vids[0].0.clone(), vids[0].1), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(1))), + exact(QuorumProposalValidated(proposals[0].data.clone())), + exact(QuorumVoteSend(votes[0].clone())), + ], + asserts: vec![is_at_view_number(1)], + }; + + let view_2 = TestScriptStage { + inputs: vec![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], + outputs: vec![timeout_vote_send(), timeout_vote_send()], + // Times out, so we now have a delayed view + asserts: vec![is_at_view_number(1)], + }; + + // Now we're on the latest view. We want to set the quorum + // certificate to be the previous highest QC (before the timeouts). This will be distinct from + // the view sync cert, which is saying "hey, I'm _actually_ at view 4, but my highest QC is + // only for view 1." This forces the QC to be for view 1, and we can move on under this + // assumption. + + // Try to view sync at view 4. + let view_sync_cert = proposals[3].data.view_sync_certificate.clone().unwrap(); + + // Highest qc so far is actually from view 1, so re-assign proposal 0 to the slot of proposal + // 3. + proposals[0].data.proposer_id = proposals[3].data.proposer_id; + + // Now at view 3 we receive the proposal received response. + let view_3 = TestScriptStage { + inputs: vec![ + // Multiple timeouts in a row, so we call for a view sync + ViewSyncFinalizeCertificate2Recv(view_sync_cert), + // Receive a proposal for view 4, but with the highest qc being from view 1. + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + ], + outputs: vec![ + exact(QuorumProposalValidated(proposals[0].data.clone())), + quorum_vote_send(), + ], + asserts: vec![], + }; + + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + + let stages = vec![view_1, view_2, view_3]; + + inject_consensus_polls(&consensus_state).await; + run_test_script(stages, consensus_state).await; +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +/// Makes sure that, when a valid ViewSyncFinalize certificate is available, the consensus task +/// will NOT vote when the certificate matches a different view number. +async fn test_view_sync_finalize_vote_fail_view_number() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(5).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let view_sync_finalize_data: ViewSyncFinalizeData = ViewSyncFinalizeData { + relay: 10, + round: ViewNumber::new(10), + }; + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut vids = Vec::new(); + let mut dacs = Vec::new(); + for view in (&mut generator).take(2) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_quorum_vote(&handle)); + vids.push(view.vid_proposal.clone()); + dacs.push(view.da_certificate.clone()); + } + + // Each call to `take` moves us to the next generated view. We advance to view + // 3 and then add the finalize cert for checking there. + generator.add_view_sync_finalize(view_sync_finalize_data); + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_quorum_vote(&handle)); + vids.push(view.vid_proposal.clone()); + dacs.push(view.da_certificate.clone()); + } + + let view_1 = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DACRecv(dacs[0].clone()), + VidDisperseRecv(vids[0].0.clone(), vids[0].1), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(1))), + exact(QuorumProposalValidated(proposals[0].data.clone())), + exact(QuorumVoteSend(votes[0].clone())), + ], + asserts: vec![is_at_view_number(1)], + }; + + let view_2 = TestScriptStage { + inputs: vec![ + VidDisperseRecv(vids[1].0.clone(), vids[1].1), + QuorumProposalRecv(proposals[1].clone(), leaders[1]), + DACRecv(dacs[1].clone()), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(2))), + exact(QuorumProposalValidated(proposals[1].data.clone())), + exact(QuorumVoteSend(votes[1].clone())), + ], + asserts: vec![is_at_view_number(2)], + }; + + let mut cert = proposals[2].data.view_sync_certificate.clone().unwrap(); + + // Trigger the timeout cert check + proposals[2].data.justify_qc.view_number = ViewNumber::new(1); + + // Overwrite the cert view number with something invalid to force the failure. This should + // result in the vote NOT being sent below in the outputs. + cert.view_number = ViewNumber::new(10); + let view_3 = TestScriptStage { + inputs: vec![ + ViewSyncFinalizeCertificate2Recv(cert), + QuorumProposalRecv(proposals[2].clone(), leaders[2]), + VidDisperseRecv(vids[2].0.clone(), vids[2].1), + DACRecv(dacs[2].clone()), + ], + outputs: vec![ + /* The entire thing dies */ + ], + // We are unable to move to the next view. + asserts: vec![is_at_view_number(2)], + }; + + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + + let stages = vec![view_1, view_2, view_3]; + + inject_consensus_polls(&consensus_state).await; + run_test_script(stages, consensus_state).await; +} From f8151cd5d62612a73c7db3b558549eb4758dcc0a Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 15 Mar 2024 12:16:55 +0100 Subject: [PATCH 0863/1393] [BUILDER] Implement Builder API consumer (#2541) * Implement Builder API consumer * Update to latest hs-builder-api --- task-impls/Cargo.toml | 4 ++ task-impls/src/builder.rs | 127 +++++++++++++++++++++++++++++++++ task-impls/src/lib.rs | 4 ++ testing/Cargo.toml | 4 ++ testing/src/block_builder.rs | 124 ++++++++++++++++++++++++++++++++ testing/src/lib.rs | 3 + testing/tests/block_builder.rs | 59 +++++++++++++++ 7 files changed, 325 insertions(+) create mode 100644 task-impls/src/builder.rs create mode 100644 testing/src/block_builder.rs create mode 100644 testing/tests/block_builder.rs diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 0f6346949d..a01656e8ff 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -15,6 +15,7 @@ async-lock = { workspace = true } tracing = { workspace = true } hotshot-types = { workspace = true } hotshot-utils = { path = "../utils" } +hs-builder-api = { workspace = true } jf-primitives = { workspace = true } time = { workspace = true } commit = { workspace = true } @@ -24,6 +25,9 @@ sha2 = { workspace = true } hotshot-task = { path = "../task" } async-broadcast = { workspace = true } chrono = "0.4" +surf-disco = { workspace = true } +serde = { workspace = true } +tagged-base64 = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs new file mode 100644 index 0000000000..3e5e555207 --- /dev/null +++ b/task-impls/src/builder.rs @@ -0,0 +1,127 @@ +use async_compatibility_layer::art::async_sleep; +use std::time::{Duration, Instant}; + +use hotshot_types::{ + traits::{node_implementation::NodeType, signature_key::SignatureKey}, + utils::BuilderCommitment, + vid::VidCommitment, +}; +use hs_builder_api::builder::{BuildError, Error as BuilderApiError}; +use serde::{Deserialize, Serialize}; +use snafu::Snafu; +use surf_disco::{client::HealthStatus, Client, Url}; +use tagged_base64::TaggedBase64; + +#[derive(Debug, Snafu, Serialize, Deserialize)] +/// Represents errors thant builder client may return +pub enum BuilderClientError { + // NOTE: folds BuilderError::NotFound & builderError::Missing + // into one. Maybe we'll want to handle that separately in + // the future + /// Block not found + #[snafu(display("Requested block not found"))] + NotFound, + /// Generic error while accessing the API, + /// i.e. when API isn't available or compatible + #[snafu(display("Builder API error: {message}"))] + Api { + /// Underlying error + message: String, + }, +} + +impl From for BuilderClientError { + fn from(value: BuilderApiError) -> Self { + match value { + BuilderApiError::Request { source } | BuilderApiError::TxnUnpack { source } => { + Self::Api { + message: source.to_string(), + } + } + BuilderApiError::TxnSubmit { source } => Self::Api { + message: source.to_string(), + }, + BuilderApiError::Custom { message, .. } => Self::Api { message }, + BuilderApiError::BlockAvailable { source, .. } + | BuilderApiError::BlockClaim { source, .. } => match source { + BuildError::NotFound | BuildError::Missing => Self::NotFound, + BuildError::Error { message } => Self::Api { message }, + }, + } + } +} + +/// Client for builder API +pub struct BuilderClient { + /// Underlying surf_disco::Client + inner: Client, + /// Marker for [`NodeType`] used here + _marker: std::marker::PhantomData, +} + +impl BuilderClient +where + <::SignatureKey as SignatureKey>::PureAssembledSignatureType: + for<'a> TryFrom<&'a TaggedBase64> + Into, +{ + /// Construct a new client from base url + pub fn new(base_url: impl Into) -> Self { + Self { + inner: Client::new(base_url.into()), + _marker: std::marker::PhantomData, + } + } + + /// Wait for server to become available + /// Returns `false` if server doesn't respond + /// with OK healthcheck before `timeout` + pub async fn connect(&self, timeout: Duration) -> bool { + let timeout = Instant::now() + timeout; + let mut backoff = Duration::from_millis(50); + while Instant::now() < timeout { + if matches!( + self.inner.healthcheck::().await, + Ok(HealthStatus::Available) + ) { + return true; + } + async_sleep(backoff).await; + backoff *= 2; + } + false + } + + /// Query builder for available blocks + /// + /// # Errors + /// - [`BuilderClientError::NotFound`] if blocks aren't available for this parent + /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly + pub async fn get_avaliable_blocks( + &self, + parent: VidCommitment, + ) -> Result, BuilderClientError> { + self.inner + .get(&format!("availableblocks/{parent}")) + .send() + .await + .map_err(Into::into) + } + + /// Claim block + /// + /// # Errors + /// - [`BuilderClientError::NotFound`] if block isn't available + /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly + pub async fn claim_block( + &self, + block_hash: BuilderCommitment, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result { + let encoded_signature: TaggedBase64 = signature.clone().into(); + self.inner + .get(&format!("claimblock/{block_hash}/{encoded_signature}")) + .send() + .await + .map_err(Into::into) + } +} diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index e2d5ee3258..4489bda1d3 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -31,6 +31,10 @@ pub mod vote; /// Task for handling upgrades pub mod upgrade; +/// Implementations for builder client +/// Should contain builder task in the future +pub mod builder; + /// Helper functions used by any task pub mod helpers; diff --git a/testing/Cargo.toml b/testing/Cargo.toml index fab9b16c59..0074c0b16a 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -13,6 +13,7 @@ slow-tests = [] [dependencies] async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } +async-trait = { workspace = true } sha3 = "^0.10" bincode = { workspace = true } commit = { workspace = true } @@ -24,6 +25,7 @@ hotshot-utils = { path = "../utils" } hotshot-macros = { path = "../macros" } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +hs-builder-api = { workspace = true } jf-primitives = { workspace = true } rand = { workspace = true } snafu = { workspace = true } @@ -35,6 +37,8 @@ bitvec = { workspace = true } ethereum-types = { workspace = true } hotshot-task = { path = "../task" } hotshot-example-types = { path = "../example-types" } +tide-disco = { workspace = true } +portpicker = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs new file mode 100644 index 0000000000..5eb229dac6 --- /dev/null +++ b/testing/src/block_builder.rs @@ -0,0 +1,124 @@ +use async_compatibility_layer::art::async_spawn; +use async_trait::async_trait; +use futures::future::BoxFuture; +use hotshot::traits::BlockPayload; +use hotshot::types::SignatureKey; +use hotshot_example_types::{block_types::TestBlockPayload, node_types::TestTypes}; +use hotshot_types::traits::block_contents::vid_commitment; +use hotshot_types::utils::BuilderCommitment; +use hotshot_types::{traits::node_implementation::NodeType, vid::VidCommitment}; +use hs_builder_api::block_info::{ + AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo, +}; +use hs_builder_api::{ + builder::{BuildError, Options}, + data_source::BuilderDataSource, +}; +use tide_disco::{method::ReadState, App, Url}; + +/// The only block [`TestableBuilderSource`] provides +const EMPTY_BLOCK: TestBlockPayload = TestBlockPayload { + transactions: vec![], +}; + +/// A mock implementation of the builder data source. +/// "Builds" only empty blocks. +pub struct TestableBuilderSource { + priv_key: <::SignatureKey as SignatureKey>::PrivateKey, + pub_key: ::SignatureKey, +} + +#[async_trait] +impl ReadState for TestableBuilderSource { + type State = Self; + + async fn read( + &self, + op: impl Send + for<'a> FnOnce(&'a Self::State) -> BoxFuture<'a, T> + 'async_trait, + ) -> T { + op(self).await + } +} + +#[async_trait] +impl BuilderDataSource for TestableBuilderSource { + async fn get_available_blocks( + &self, + _for_parent: &VidCommitment, + ) -> Result>, BuildError> { + Ok(vec![AvailableBlockInfo { + sender: self.pub_key, + signature: ::SignatureKey::sign( + &self.priv_key, + EMPTY_BLOCK.builder_commitment(&()).as_ref(), + ) + .unwrap(), + block_hash: EMPTY_BLOCK.builder_commitment(&()), + block_size: 0, + offered_fee: 1, + _phantom: std::marker::PhantomData, + }]) + } + + async fn claim_block( + &self, + block_hash: &BuilderCommitment, + _signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuildError> { + if block_hash == &EMPTY_BLOCK.builder_commitment(&()) { + Ok(AvailableBlockData { + block_payload: EMPTY_BLOCK, + metadata: (), + signature: ::SignatureKey::sign( + &self.priv_key, + EMPTY_BLOCK.builder_commitment(&()).as_ref(), + ) + .unwrap(), + sender: self.pub_key, + _phantom: std::marker::PhantomData, + }) + } else { + Err(BuildError::Missing) + } + } + + async fn claim_block_header_input( + &self, + block_hash: &BuilderCommitment, + _signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuildError> { + if block_hash == &EMPTY_BLOCK.builder_commitment(&()) { + Ok(AvailableBlockHeaderInput { + vid_commitment: vid_commitment(&vec![], 1), + signature: ::SignatureKey::sign( + &self.priv_key, + EMPTY_BLOCK.builder_commitment(&()).as_ref(), + ) + .unwrap(), + sender: self.pub_key, + _phantom: std::marker::PhantomData, + }) + } else { + Err(BuildError::Missing) + } + } +} + +/// Construct a tide disco app that mocks the builder API. +/// +/// # Panics +/// If constructing and launching the builder fails for any reason +pub fn run_builder(url: Url) { + let builder_api = hs_builder_api::builder::define_api::( + &Options::default(), + ) + .expect("Failed to construct the builder API"); + let (pub_key, priv_key) = + ::SignatureKey::generated_from_seed_indexed([1; 32], 0); + let mut app: App = + App::with_state(TestableBuilderSource { priv_key, pub_key }); + app.register_module("/", builder_api) + .expect("Failed to register the builder API"); + + async_spawn(app.serve(url)); +} diff --git a/testing/src/lib.rs b/testing/src/lib.rs index d9ae432175..6810620e5d 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -33,6 +33,9 @@ pub mod spinning_task; /// task for checking if view sync got activated pub mod view_sync_task; +/// Test implementation of block builder +pub mod block_builder; + /// predicates to use in tests pub mod predicates; diff --git a/testing/tests/block_builder.rs b/testing/tests/block_builder.rs new file mode 100644 index 0000000000..86d25e3346 --- /dev/null +++ b/testing/tests/block_builder.rs @@ -0,0 +1,59 @@ +use hotshot_example_types::{ + block_types::{TestBlockPayload, TestTransaction}, + node_types::TestTypes, +}; +use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; +use hotshot_testing::block_builder::run_builder; +use hotshot_types::traits::BlockPayload; +use hotshot_types::traits::{ + block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, +}; +use std::time::Duration; +use tide_disco::Url; + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_block_builder() { + let port = portpicker::pick_unused_port().expect("Could not find an open port"); + let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); + + run_builder(api_url.clone()); + + let client: BuilderClient = BuilderClient::new(api_url); + assert!(client.connect(Duration::from_millis(100)).await); + + // Test getting blocks + let mut blocks = client + .get_avaliable_blocks(vid_commitment(&vec![], 1)) + .await + .expect("Failed to get avaliable blocks"); + + assert_eq!(blocks.len(), 1); + + // Test claiming available block + let signature = { + let (_key, private_key) = + ::SignatureKey::generated_from_seed_indexed([0_u8; 32], 0); + ::SignatureKey::sign(&private_key, &[0_u8; 32]) + .expect("Failed to create dummy signature") + }; + + let _: TestBlockPayload = client + .claim_block(blocks.pop().unwrap(), &signature) + .await + .expect("Failed to claim block"); + + // Test claiming non-existent block + let commitment_for_non_existent_block = TestBlockPayload { + transactions: vec![TestTransaction(vec![0; 1])], + } + .builder_commitment(&()); + let result = client + .claim_block(commitment_for_non_existent_block, &signature) + .await; + assert!(matches!(result, Err(BuilderClientError::NotFound))); +} From 703fe7e130b0c0c1deb28f6cf2dee56333d1278d Mon Sep 17 00:00:00 2001 From: Himanshu Goyal Date: Fri, 15 Mar 2024 09:44:56 -0400 Subject: [PATCH 0864/1393] update hotshot-types tag (#2783) --- example-types/src/state_types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 5a17c2cdd0..8c32573cb5 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -24,7 +24,7 @@ pub struct TestInstanceState {} impl InstanceState for TestInstanceState {} /// Application-specific state delta implementation for testing purposes. -#[derive(Clone, Copy, Debug, Default)] +#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize)] pub struct TestStateDelta {} impl StateDelta for TestStateDelta {} From c8b289aa54bf09e04a8caf055614c589d39ec6d7 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 15 Mar 2024 13:49:23 -0400 Subject: [PATCH 0865/1393] [TECH-DEBT] Add View Sync Timeout to Config (#2777) * Propogate config everywhere * Remove casting --- hotshot/src/tasks/task_state.rs | 3 +-- hotshot/src/traits/election/static_committee.rs | 6 +++--- orchestrator/src/config.rs | 8 ++++++++ testing/src/test_builder.rs | 6 ++++++ 4 files changed, 18 insertions(+), 5 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 77c351ea64..e36efc5d16 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -14,7 +14,6 @@ use std::{ collections::{HashMap, HashSet}, marker::PhantomData, sync::Arc, - time::Duration, }; /// Trait for creating task states. @@ -118,7 +117,7 @@ impl> CreateTaskState pre_commit_relay_map: HashMap::default().into(), commit_relay_map: HashMap::default().into(), finalize_relay_map: HashMap::default().into(), - view_sync_timeout: Duration::new(10, 0), + view_sync_timeout: handle.hotshot.config.view_sync_timeout, id: handle.hotshot.id, last_garbage_collected_view: TYPES::Time::new(0), } diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 438451f575..a95a9895f3 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -80,9 +80,9 @@ where #[cfg(feature = "randomized-leader-election")] /// Index the vector of public keys with a random number generated using the current view number as a seed fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { - let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number as u64); - let randomized_view_number: u64 = rng.gen(); - let index = (randomized_view_number % self.nodes_with_stake.len() as u64) as usize; + let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); + let randomized_view_number: usize = rng.gen(); + let index = randomized_view_number % self.nodes_with_stake.len(); let res = self.nodes_with_stake[index].clone(); TYPES::SignatureKey::get_public_key(&res) } diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 6e7f48cd2a..19560b01c3 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -137,6 +137,8 @@ pub struct NetworkConfig { pub num_bootrap: usize, /// timeout before starting the next view pub next_view_timeout: u64, + /// timeout before starting next view sync round + pub view_sync_timeout: Duration, /// minimum time to wait for a view pub propose_min_round_time: Duration, /// maximum time to wait for a view @@ -402,6 +404,7 @@ impl Default for NetworkConfig { cdn_marshal_address: None, combined_network_config: None, next_view_timeout: 10, + view_sync_timeout: Duration::from_secs(2), num_bootrap: 5, propose_min_round_time: Duration::from_secs(0), propose_max_round_time: Duration::from_secs(10), @@ -461,6 +464,7 @@ impl From> for NetworkC node_index: 0, num_bootrap: val.config.num_bootstrap, next_view_timeout: val.config.next_view_timeout, + view_sync_timeout: val.config.view_sync_timeout, propose_max_round_time: val.config.propose_max_round_time, propose_min_round_time: val.config.propose_min_round_time, seed: val.seed, @@ -526,6 +530,8 @@ pub struct HotShotConfigFile { pub min_transactions: usize, /// Base duration for next-view timeout, in milliseconds pub next_view_timeout: u64, + /// Duration for view sync round timeout + pub view_sync_timeout: Duration, /// The exponential backoff ration for the next-view timeout pub timeout_ratio: (u64, u64), /// The delay a leader inserts before starting pre-commit, in milliseconds @@ -606,6 +612,7 @@ impl From> for HotS da_staked_committee_size: val.staked_committee_nodes, da_non_staked_committee_size: val.non_staked_committee_nodes, next_view_timeout: val.next_view_timeout, + view_sync_timeout: val.view_sync_timeout, timeout_ratio: val.timeout_ratio, round_start_delay: val.round_start_delay, start_delay: val.start_delay, @@ -660,6 +667,7 @@ impl Default for HotShotConfigFile { max_transactions: NonZeroUsize::new(100).unwrap(), min_transactions: 1, next_view_timeout: 10000, + view_sync_timeout: Duration::from_millis(1000), timeout_ratio: (11, 10), round_start_delay: 1, start_delay: 1, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index ab339f2371..1293903fbd 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -36,6 +36,8 @@ pub struct TimingData { pub propose_max_round_time: Duration, /// Delay before sending through the secondary network in CombinedNetworks pub secondary_network_delay: Duration, + /// view sync timeout + pub view_sync_timeout: Duration, } /// metadata describing a test @@ -84,6 +86,7 @@ impl Default for TimingData { propose_min_round_time: Duration::new(0, 0), propose_max_round_time: Duration::from_millis(100), secondary_network_delay: Duration::from_millis(1000), + view_sync_timeout: Duration::from_millis(2000), } } } @@ -281,6 +284,7 @@ impl TestMetadata { da_staked_committee_size, da_non_staked_committee_size, next_view_timeout: 500, + view_sync_timeout: Duration::from_millis(250), timeout_ratio: (11, 10), round_start_delay: 1, start_delay: 1, @@ -302,6 +306,7 @@ impl TestMetadata { propose_min_round_time, propose_max_round_time, secondary_network_delay, + view_sync_timeout, } = timing_data; let mod_config = // TODO this should really be using the timing config struct @@ -312,6 +317,7 @@ impl TestMetadata { a.start_delay = start_delay; a.propose_min_round_time = propose_min_round_time; a.propose_max_round_time = propose_max_round_time; + a.view_sync_timeout = view_sync_timeout; }; TestLauncher { From 8159f43e5541fe452a067f836fbb774361d77a50 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 15 Mar 2024 16:05:45 -0400 Subject: [PATCH 0866/1393] [NETWORK] Add Request Task (#2698) * Don't lock channels * cargo fix * Allow killing the connected network handler * Fix issues * Remove State from NetworkNodeHandle * Remove async where it's unused * fix async std build * Fix erros a little * Starting VID request response * handle network event * Bubble up request/response * Make Request and Response just bytes at network layer * Adding to ConnectedNetwork * Hooks for request/response in handle + trait * fix request * Remove request and response tasks for now * update mod.rs * Hooked up response flow to ConnectedNetwork * Refactor interface to return result to caller * Add request and response to message struct * Clean up some message stuff * Fix build error * Hook up request and response fully * Review myself, impl functions for combine * Change Receiver interface to spawn task * try_send instead of send * Create the task for request handling * rename request -> response * fix lint * clean up request response event handle fn * fix build * Comments and make the request signed * add signature checking * link gh issue * Add Event for Validated Quorum Proposal * Fix consensus task * fix upgrade test * start adding the task * Fix the test...again * progress * Finish request task, modify vid disperse event * Address comments * fix build * Make CombinedNetworks delay duration configurable * Secondary network delay configurable in HotShotConfig * Rename CombinedConfig to CombinedNetworkConfig * Network delay in test network generator `secondary_network_delay` removed from `HotShotConfig` because it cannot easily be passed to the test network generator. * lock * Temporary pinning to hotshot-types branch TODO: switch to hotshot-types tag or main branch before merging * fixes * Pin to hotshot-types tag 0.1.2 * Remove files added back by mistake * fix build * Remove whitespace * Review comments * fixes after main merge --------- Co-authored-by: Lukasz Rzasik --- task-impls/Cargo.toml | 1 + task-impls/src/consensus.rs | 13 +- task-impls/src/events.rs | 2 +- task-impls/src/lib.rs | 3 + task-impls/src/network.rs | 2 +- task-impls/src/request.rs | 263 +++++++++++++++++++++++++++++ testing/tests/consensus_task.rs | 18 +- testing/tests/network_task.rs | 2 +- testing/tests/proposal_ordering.rs | 2 +- testing/tests/upgrade_task.rs | 10 +- testing/tests/vid_task.rs | 2 +- 11 files changed, 288 insertions(+), 30 deletions(-) create mode 100644 task-impls/src/request.rs diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index a01656e8ff..0c4ba69bf8 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -25,6 +25,7 @@ sha2 = { workspace = true } hotshot-task = { path = "../task" } async-broadcast = { workspace = true } chrono = "0.4" +rand = { workspace = true } surf-disco = { workspace = true } serde = { workspace = true } tagged-base64 = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e0aed5d6b4..f32a617280 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1033,8 +1033,7 @@ impl, A: ConsensusApi + self.current_proposal = None; } } - HotShotEvent::VidDisperseRecv(disperse, sender) => { - let sender = sender.clone(); + HotShotEvent::VidDisperseRecv(disperse) => { let view = disperse.data.get_view_number(); debug!( @@ -1054,16 +1053,8 @@ impl, A: ConsensusApi + debug!("VID disperse data is not more than one view older."); let payload_commitment = disperse.data.payload_commitment; - // Check whether the sender is the right leader for this view + // Check whether the data comes from the right leader for this view let view_leader_key = self.quorum_membership.get_leader(view); - if view_leader_key != sender { - warn!( - "VID dispersal/share is not from expected leader key for view {} \n", - *view - ); - return; - } - if !view_leader_key.validate(&disperse.signature, payload_commitment.as_ref()) { warn!("Could not verify VID dispersal/share sig."); return; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 8f61eb8402..187dd1dfb3 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -117,7 +117,7 @@ pub enum HotShotEvent { /// Vid disperse data has been received from the network; handled by the DA task /// /// Like [`HotShotEvent::DAProposalRecv`]. - VidDisperseRecv(Proposal>, TYPES::SignatureKey), + VidDisperseRecv(Proposal>), /// Upgrade proposal has been received from the network UpgradeProposalRecv(Proposal>, TYPES::SignatureKey), /// Upgrade proposal has been sent to the network diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 4489bda1d3..dd3e4e4366 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -40,3 +40,6 @@ pub mod helpers; /// Task which responsds to requests from the network pub mod response; + +/// Task for requesting the network for things +pub mod request; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 1395ca67e6..36dc2f4d47 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -158,7 +158,7 @@ impl NetworkMessageTaskState { HotShotEvent::DACRecv(cert) } CommitteeConsensusMessage::VidDisperseMsg(proposal) => { - HotShotEvent::VidDisperseRecv(proposal, sender) + HotShotEvent::VidDisperseRecv(proposal) } }, }; diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs new file mode 100644 index 0000000000..b6c75ee314 --- /dev/null +++ b/task-impls/src/request.rs @@ -0,0 +1,263 @@ +use std::{sync::Arc, time::Duration}; + +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; +use async_broadcast::Sender; +use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; +use async_lock::RwLock; +use bincode::Options; +use either::Either; +use hotshot_task::task::TaskState; +use hotshot_types::{ + consensus::Consensus, + constants::VERSION_0_1, + message::{CommitteeConsensusMessage, DataMessage, Message, MessageKind, SequencingMessage}, + traits::{ + election::Membership, + network::{ConnectedNetwork, DataRequest, RequestKind, ResponseMessage}, + node_implementation::{NodeImplementation, NodeType}, + signature_key::SignatureKey, + }, + vote::HasViewNumber, +}; +use hotshot_utils::bincode::bincode_opts; +use rand::{prelude::SliceRandom, thread_rng}; +use sha2::{Digest, Sha256}; +use tracing::{error, info, warn}; + +/// Amount of time to try for a request before timing out. +const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); + +/// Long running task which will request information after a proposal is received. +/// The task will wait a it's `delay` and then send a request iteratively to peers +/// for any data they don't have related to the proposal. For now it's just requesting VID +/// shares. +pub struct NetworkResponseState> { + /// Network to send requests over + pub network: I::QuorumNetwork, + /// Consensus shared state so we can check if we've gotten the information + /// before sending a request + pub state: Arc>>, + /// Last seen view, we won't request for proposals before older than this view + pub view: TYPES::Time, + /// Delay before requesting peers + pub delay: Duration, + /// Committee + pub da_membership: TYPES::Membership, + /// Quorum + pub quorum_membership: TYPES::Membership, + /// This nodes public key + pub public_key: TYPES::SignatureKey, + /// This nodes private/signign key, used to sign requests. + pub private_key: ::PrivateKey, +} + +/// Alias for a signature +type Signature = + <::SignatureKey as SignatureKey>::PureAssembledSignatureType; + +impl> TaskState for NetworkResponseState { + type Event = HotShotEvent; + + type Output = HotShotTaskCompleted; + + async fn handle_event( + event: Self::Event, + task: &mut hotshot_task::task::Task, + ) -> Option { + match event { + HotShotEvent::QuorumProposalValidated(proposal) => { + let state = task.state(); + let prop_view = proposal.get_view_number(); + if prop_view >= state.view { + state.spawn_requests(prop_view, task.clone_sender()).await; + } + None + } + HotShotEvent::ViewChange(view) => { + if view > task.state().view { + task.state_mut().view = view; + } + None + } + HotShotEvent::Shutdown => Some(HotShotTaskCompleted), + _ => None, + } + } + + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } + fn filter(&self, event: &Self::Event) -> bool { + !matches!( + event, + HotShotEvent::Shutdown + | HotShotEvent::QuorumProposalValidated(_) + | HotShotEvent::ViewChange(_) + ) + } +} + +impl> NetworkResponseState { + /// Spawns tasks for a given view to retrieve any data needed. + async fn spawn_requests(&self, view: TYPES::Time, sender: Sender>) { + let requests = self.build_requests(view).await; + if requests.is_empty() { + return; + } + requests + .into_iter() + .for_each(|r| self.run_delay(r, sender.clone(), view)); + } + + /// Creats the srequest structures for all types that are needed. + async fn build_requests(&self, view: TYPES::Time) -> Vec> { + let mut reqs = Vec::new(); + if !self.state.read().await.vid_shares.contains_key(&view) { + reqs.push(RequestKind::VID(view, self.public_key.clone())); + } + // TODO request other things + reqs + } + + /// run a delayed request task for a request. The first response + /// recieved will be sent over `sender` + fn run_delay( + &self, + request: RequestKind, + sender: Sender>, + view: TYPES::Time, + ) { + let mut recipients: Vec<_> = self + .da_membership + .get_whole_committee(view) + .into_iter() + .collect(); + // Randomize the recipients so all replicas don't overload the same 1 recipients + // and so we don't implicitly rely on the same replica all the time. + recipients.shuffle(&mut thread_rng()); + let requester = DelayedRequester:: { + network: self.network.clone(), + state: self.state.clone(), + sender, + delay: self.delay, + recipients, + }; + let Ok(data) = bincode_opts().serialize(&request) else { + tracing::error!("Failed to serialize request!"); + return; + }; + let Ok(signature) = TYPES::SignatureKey::sign(&self.private_key, &Sha256::digest(data)) + else { + error!("Failed to sign Data Request"); + return; + }; + async_spawn(requester.run(request, signature)); + } +} + +/// A short lived task that waits a delay and starts trying peers until it complets +/// a request. If at any point the requested info is seen in the data stores or +/// the view has moved beyond the view we are requesting, the task will completed. +struct DelayedRequester> { + /// Network to send requests + network: I::QuorumNetwork, + /// Shared state to check if the data go populated + state: Arc>>, + /// Channel to send the event when we receive a response + sender: Sender>, + /// Duration to delay sending the first request + delay: Duration, + /// The peers we will request in a random order + recipients: Vec, +} + +/// Wrapper for the info in a VID request +struct VidRequest(TYPES::Time, TYPES::SignatureKey); + +impl> DelayedRequester { + /// Wait the delay, then try to complete the request. Iterates over peers + /// until the request is completed, or the data is no longer needed. + async fn run(mut self, request: RequestKind, signature: Signature) { + // Do the delay then start sending + async_sleep(self.delay).await; + match request { + RequestKind::VID(view, key) => self.do_vid(VidRequest(view, key), signature).await, + RequestKind::DAProposal(..) => {} + } + } + + /// Handle sending a VID Share request, runs the loop until the data exists + async fn do_vid(&mut self, req: VidRequest, signature: Signature) { + let message = make_vid(&req, signature); + + while !self.recipients.is_empty() && !self.cancel_vid(&req).await { + match async_timeout( + REQUEST_TIMEOUT, + self.network + .request_data::(message.clone(), self.recipients.pop().unwrap()), + ) + .await + { + Ok(Ok(response)) => { + match response { + ResponseMessage::Found(data) => { + self.handle_response_message(data).await; + // keep trying, but expect the map to be populated, or view to increase + async_sleep(REQUEST_TIMEOUT).await; + } + ResponseMessage::NotFound => { + info!("Peer Responded they did not have the data"); + } + ResponseMessage::Denied => { + error!("Request for data was denied by the receiver"); + } + } + } + Ok(Err(e)) => { + warn!("Error Sending request. Error: {:?}", e); + } + Err(_) => { + warn!("Request to other node timed out"); + } + } + } + } + /// Returns true if we got the data we wanted, or the view has moved on. + async fn cancel_vid(&self, req: &VidRequest) -> bool { + let view = req.0; + let state = self.state.read().await; + state.vid_shares.contains_key(&view) && state.cur_view > view + } + + /// Transform a response into a `HotShotEvent` + async fn handle_response_message(&self, message: SequencingMessage) { + let event = match message.0 { + Either::Right(CommitteeConsensusMessage::VidDisperseMsg(prop)) => { + HotShotEvent::VidDisperseRecv(prop) + } + _ => return, + }; + broadcast_event(event, &self.sender).await; + } +} + +/// Make a VID Request Message to send +fn make_vid( + req: &VidRequest, + signature: Signature, +) -> Message { + let kind = RequestKind::VID(req.0, req.1.clone()); + let data_request = DataRequest { + view: req.0, + request: kind, + signature, + }; + Message { + version: VERSION_0_1, + sender: req.1.clone(), + kind: MessageKind::Data(DataMessage::RequestData(data_request)), + } +} diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index ba8d42f49a..8da16ac4af 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -61,7 +61,7 @@ async fn test_consensus_task() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone(), vids[0].1), + VidDisperseRecv(vids[0].0.clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -140,7 +140,7 @@ async fn test_consensus_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone(), vids[0].1), + VidDisperseRecv(vids[0].0.clone()), QuorumVoteRecv(votes[0].clone()), ], outputs: vec![ @@ -192,7 +192,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone(), vids[0].1), + VidDisperseRecv(vids[0].0.clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -204,7 +204,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let inputs = vec![ // We need a VID share for view 2 otherwise we cannot vote at view 2 (as node 2). - VidDisperseRecv(vids[1].0.clone(), vids[1].1), + VidDisperseRecv(vids[1].0.clone()), DACRecv(dacs[1].clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), ]; @@ -314,7 +314,7 @@ async fn test_view_sync_finalize_propose() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone(), vids[0].1), + VidDisperseRecv(vids[0].0.clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -434,7 +434,7 @@ async fn test_view_sync_finalize_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone(), vids[0].1), + VidDisperseRecv(vids[0].0.clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -538,7 +538,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone(), vids[0].1), + VidDisperseRecv(vids[0].0.clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -550,7 +550,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { let view_2 = TestScriptStage { inputs: vec![ - VidDisperseRecv(vids[1].0.clone(), vids[1].1), + VidDisperseRecv(vids[1].0.clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), DACRecv(dacs[1].clone()), ], @@ -574,7 +574,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { inputs: vec![ ViewSyncFinalizeCertificate2Recv(cert), QuorumProposalRecv(proposals[2].clone(), leaders[2]), - VidDisperseRecv(vids[2].0.clone(), vids[2].1), + VidDisperseRecv(vids[2].0.clone()), DACRecv(dacs[2].clone()), ], outputs: vec![ diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 8afccd3c93..a97066a9be 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -132,7 +132,7 @@ async fn test_network_task() { HotShotEvent::QuorumProposalRecv(quorum_proposal, pub_key), 1, ); - output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); + output.insert(HotShotEvent::VidDisperseRecv(vid_proposal), 1); output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); // let build_fn = |task_runner, _| async { task_runner }; diff --git a/testing/tests/proposal_ordering.rs b/testing/tests/proposal_ordering.rs index 7d37c6cf97..5babcb1f2c 100644 --- a/testing/tests/proposal_ordering.rs +++ b/testing/tests/proposal_ordering.rs @@ -52,7 +52,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone(), vids[0].1), + VidDisperseRecv(vids[0].0.clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs index ffbd5089c2..02fe53cc0c 100644 --- a/testing/tests/upgrade_task.rs +++ b/testing/tests/upgrade_task.rs @@ -67,7 +67,7 @@ async fn test_upgrade_task() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidDisperseRecv(vids[0].0.clone(), vids[0].1), + VidDisperseRecv(vids[0].0.clone()), DACRecv(dacs[0].clone()), ], outputs: vec![ @@ -80,7 +80,7 @@ async fn test_upgrade_task() { let view_2 = TestScriptStage { inputs: vec![ - VidDisperseRecv(vids[1].0.clone(), vids[1].1), + VidDisperseRecv(vids[1].0.clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), DACRecv(dacs[1].clone()), ], @@ -96,7 +96,7 @@ async fn test_upgrade_task() { inputs: vec![ QuorumProposalRecv(proposals[2].clone(), leaders[2]), DACRecv(dacs[2].clone()), - VidDisperseRecv(vids[2].0.clone(), vids[2].1), + VidDisperseRecv(vids[2].0.clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(3))), @@ -111,7 +111,7 @@ async fn test_upgrade_task() { inputs: vec![ QuorumProposalRecv(proposals[3].clone(), leaders[3]), DACRecv(dacs[3].clone()), - VidDisperseRecv(vids[3].0.clone(), vids[3].1), + VidDisperseRecv(vids[3].0.clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(4))), @@ -229,7 +229,7 @@ async fn test_upgrade_and_consensus_task() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidDisperseRecv(vids[0].0.clone(), vids[0].1), + VidDisperseRecv(vids[0].0.clone()), DACRecv(dacs[0].clone()), ], upgrade_vote_recvs, diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index f964a69cb6..d79c39b61d 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -77,7 +77,7 @@ async fn test_vid_task() { ViewNumber::new(2), )); input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); - input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); + input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone())); input.push(HotShotEvent::Shutdown); output.insert( From e14c07861cf78f7600e3a7923299460759f276d3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Mar 2024 11:03:40 +0100 Subject: [PATCH 0867/1393] Bump derive_builder from 0.13.1 to 0.20.0 (#2781) Bumps [derive_builder](https://github.com/colin-kiegel/rust-derive-builder) from 0.13.1 to 0.20.0. - [Release notes](https://github.com/colin-kiegel/rust-derive-builder/releases) - [Commits](https://github.com/colin-kiegel/rust-derive-builder/compare/v0.13.1...v0.20.0) --- updated-dependencies: - dependency-name: derive_builder dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- libp2p-networking/Cargo.toml | 2 +- macros/Cargo.toml | 2 +- testing-macros/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index ced6c864a3..8bccfa4678 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -18,7 +18,7 @@ async-trait = { workspace = true } bincode = { workspace = true } blake3 = { workspace = true } custom_debug = { workspace = true } -derive_builder = "0.13.1" +derive_builder = "0.20.0" either = { workspace = true } futures = { workspace = true } hotshot-types = { workspace = true } diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 97448728a9..88784ff052 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -9,7 +9,7 @@ description = "Macros for hotshot tests" quote = "1.0.33" syn = { version = "2.0.50", features = ["full", "extra-traits"] } proc-macro2 = "1.0.79" -derive_builder = "0.13.1" +derive_builder = "0.20.0" [dev-dependencies] async-lock = { workspace = true } diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index d1620e73e9..86a40674b5 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -27,7 +27,7 @@ serde = { workspace = true } quote = "1.0.33" syn = { version = "2.0.52", features = ["full", "extra-traits"] } proc-macro2 = "1.0.79" -derive_builder = "0.13.1" +derive_builder = "0.20.0" [dev-dependencies] async-lock = { workspace = true } From 66f358330a380e7353f7e6f926e94eb3eda07886 Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Mon, 18 Mar 2024 10:44:44 -0400 Subject: [PATCH 0868/1393] feat: GPU accelerated VID integration (#2789) * add feature flag * add gpu-vid feature * update push-cdn deps * cargo lock * update deps & fix * fix test * cargolock * cargo lock * update push-cdn deps * change hs-builder-api back --- example-types/Cargo.toml | 3 +++ examples/Cargo.toml | 3 +++ hotshot/Cargo.toml | 3 +++ task-impls/Cargo.toml | 5 +++++ testing/Cargo.toml | 3 +++ testing/src/task_helpers.rs | 4 ++-- testing/tests/consensus_task.rs | 4 ++-- testing/tests/network_task.rs | 2 +- testing/tests/proposal_ordering.rs | 2 +- testing/tests/vid_task.rs | 2 +- 10 files changed, 24 insertions(+), 7 deletions(-) diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index 44f17dc81b..dc8197fc5e 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -9,6 +9,9 @@ authors = ["Espresso Systems "] default = [] # NOTE this is used to activate the slow tests we don't wish to run in CI slow-tests = [] +gpu-vid = [ + "hotshot-task-impls/gpu-vid", +] [dependencies] async-broadcast = { workspace = true } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 2dd9b303ac..1696a81a30 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -9,6 +9,9 @@ rust-version = "1.65.0" [features] default = ["docs", "doc-images"] +gpu-vid = [ + "hotshot-task-impls/gpu-vid", +] # Features required for binaries bin-orchestrator = ["clap"] diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index d6623eb479..b63989d98c 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -9,6 +9,9 @@ rust-version = "1.65.0" [features] default = ["docs", "doc-images"] +gpu-vid = [ + "hotshot-task-impls/gpu-vid", +] # Features required for binaries bin-orchestrator = ["clap"] diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 0c4ba69bf8..9c09f9c7cb 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -30,6 +30,11 @@ surf-disco = { workspace = true } serde = { workspace = true } tagged-base64 = { workspace = true } +[features] +gpu-vid = [ + "hotshot-types/gpu-vid" +] + [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 0074c0b16a..674c129ab1 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -9,6 +9,9 @@ authors = ["Espresso Systems "] default = [] # NOTE this is used to activate the slow tests we don't wish to run in CI slow-tests = [] +gpu-vid = [ + "hotshot-types/gpu-vid" +] [dependencies] async-broadcast = { workspace = true } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index bae5d7c483..39f83682ce 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -381,7 +381,7 @@ pub fn vid_payload_commitment( view_number: ViewNumber, transactions: Vec, ) -> VidCommitment { - let vid = vid_scheme_from_view_number::(quorum_membership, view_number); + let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(encoded_transactions).unwrap(); @@ -403,7 +403,7 @@ pub fn build_vid_proposal( transactions: Vec, private_key: &::PrivateKey, ) -> Proposal> { - let vid = vid_scheme_from_view_number::(quorum_membership, view_number); + let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 8da16ac4af..4253222531 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -36,7 +36,7 @@ async fn test_consensus_task() { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. - let vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(2)); + let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(2)); let encoded_transactions = Vec::new(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; @@ -271,7 +271,7 @@ async fn test_view_sync_finalize_propose() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. - let vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(4)); + let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(4)); let encoded_transactions = Vec::new(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index a97066a9be..2a7ef10ba2 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -37,7 +37,7 @@ async fn test_network_task() { &encoded_transactions_hash, ) .expect("Failed to sign block payload"); - let vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(2)); + let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(2)); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; let vid_signature = diff --git a/testing/tests/proposal_ordering.rs b/testing/tests/proposal_ordering.rs index 5babcb1f2c..a96b16a9d2 100644 --- a/testing/tests/proposal_ordering.rs +++ b/testing/tests/proposal_ordering.rs @@ -23,7 +23,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let handle = build_system_handle(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let vid = + let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(node_id)); // Make some empty encoded transactions, we just care about having a commitment handy for the diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index d79c39b61d..7e9d4c85d6 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -27,7 +27,7 @@ async fn test_vid_task() { // quorum membership for VID share distribution let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); + let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); From 98f30d23fcb842307ca7fa4ff791a035b1d5e9cb Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 18 Mar 2024 11:43:40 -0400 Subject: [PATCH 0869/1393] [CX_CLEANUP] - Deprecate and remove Storage trait and usages (#2790) * blow away storage type and atomic storage * satisfy clippy * new version * fix build * (kill build) new tag * bump again * new tag in lockfile --- example-types/src/node_types.rs | 9 +- examples/combined/types.rs | 3 +- examples/infra/mod.rs | 23 +- examples/libp2p/types.rs | 3 +- examples/push-cdn/types.rs | 3 +- examples/webserver/types.rs | 3 +- hotshot/src/lib.rs | 38 +-- hotshot/src/traits.rs | 19 +- hotshot/src/traits/storage.rs | 5 - hotshot/src/traits/storage/atomic_storage.rs | 254 ------------------ .../atomic_storage/dual_key_value_store.rs | 215 --------------- .../storage/atomic_storage/hash_map_store.rs | 91 ------- hotshot/src/traits/storage/memory_storage.rs | 106 -------- hotshot/src/types/handle.rs | 15 +- task-impls/src/consensus.rs | 5 - testing/src/spinning_task.rs | 3 +- testing/src/task_helpers.rs | 2 - testing/src/test_builder.rs | 1 - testing/src/test_launcher.rs | 2 - testing/src/test_runner.rs | 7 +- testing/tests/atomic_storage.rs | 145 ---------- testing/tests/memory_network.rs | 5 +- testing/tests/storage.rs | 61 ----- 23 files changed, 28 insertions(+), 990 deletions(-) delete mode 100644 hotshot/src/traits/storage.rs delete mode 100644 hotshot/src/traits/storage/atomic_storage.rs delete mode 100644 hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs delete mode 100644 hotshot/src/traits/storage/atomic_storage/hash_map_store.rs delete mode 100644 hotshot/src/traits/storage/memory_storage.rs delete mode 100644 testing/tests/atomic_storage.rs delete mode 100644 testing/tests/storage.rs diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index e4ba54576d..cff49f8cab 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -9,9 +9,7 @@ use crate::{ use hotshot::traits::{ election::static_committee::{StaticCommittee, StaticElectionConfig}, - implementations::{ - CombinedNetworks, Libp2pNetwork, MemoryNetwork, MemoryStorage, WebServerNetwork, - }, + implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, WebServerNetwork}, NodeImplementation, }; use hotshot_types::{ @@ -103,31 +101,26 @@ type StaticWebQuorumComm = WebServerNetwork; type StaticCombinedQuorumComm = CombinedNetworks; impl NodeImplementation for PushCdnImpl { - type Storage = MemoryStorage; type QuorumNetwork = StaticPushCdnQuorumComm; type CommitteeNetwork = StaticPushCdnDAComm; } impl NodeImplementation for Libp2pImpl { - type Storage = MemoryStorage; type QuorumNetwork = StaticLibp2pQuorumComm; type CommitteeNetwork = StaticLibp2pDAComm; } impl NodeImplementation for MemoryImpl { - type Storage = MemoryStorage; type QuorumNetwork = StaticMemoryQuorumComm; type CommitteeNetwork = StaticMemoryDAComm; } impl NodeImplementation for WebImpl { - type Storage = MemoryStorage; type QuorumNetwork = StaticWebQuorumComm; type CommitteeNetwork = StaticWebDAComm; } impl NodeImplementation for CombinedImpl { - type Storage = MemoryStorage; type QuorumNetwork = StaticCombinedQuorumComm; type CommitteeNetwork = StaticCombinedDAComm; } diff --git a/examples/combined/types.rs b/examples/combined/types.rs index 9a904b592f..f11b159823 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -1,5 +1,5 @@ use crate::infra::CombinedDARun; -use hotshot::traits::implementations::{CombinedNetworks, MemoryStorage}; +use hotshot::traits::implementations::CombinedNetworks; use hotshot_example_types::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; @@ -19,7 +19,6 @@ pub type QuorumNetwork = CombinedNetworks; pub type ViewSyncNetwork = CombinedNetworks; impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 584d319944..1fd4c72b81 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -14,7 +14,7 @@ use hotshot::traits::implementations::{ use hotshot::traits::BlockPayload; use hotshot::{ traits::{ - implementations::{Libp2pNetwork, MemoryStorage, NetworkingMetricsValue, WebServerNetwork}, + implementations::{Libp2pNetwork, NetworkingMetricsValue, WebServerNetwork}, NodeImplementation, }, types::{SignatureKey, SystemContextHandle}, @@ -257,7 +257,7 @@ pub async fn run_orchestrator< TYPES: NodeType, DACHANNEL: ConnectedNetwork, TYPES::SignatureKey>, QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, - NODE: NodeImplementation>, + NODE: NodeImplementation, >( OrchestratorArgs { url, config }: OrchestratorArgs, ) { @@ -429,12 +429,7 @@ pub trait RunDA< TYPES: NodeType, DANET: ConnectedNetwork, TYPES::SignatureKey>, QUORUMNET: ConnectedNetwork, TYPES::SignatureKey>, - NODE: NodeImplementation< - TYPES, - QuorumNetwork = QUORUMNET, - CommitteeNetwork = DANET, - Storage = MemoryStorage, - >, + NODE: NodeImplementation, > where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -507,7 +502,6 @@ pub trait RunDA< sk, config.node_index, config.config, - MemoryStorage::empty(), memberships, networks_bundle, initializer, @@ -711,7 +705,6 @@ impl< TYPES, QuorumNetwork = WebServerNetwork, CommitteeNetwork = WebServerNetwork, - Storage = MemoryStorage, >, > RunDA, WebServerNetwork, NODE> for WebServerDARun where @@ -785,7 +778,6 @@ impl< TYPES, QuorumNetwork = PushCdnNetwork, CommitteeNetwork = PushCdnNetwork, - Storage = MemoryStorage, >, > RunDA, PushCdnNetwork, NODE> for PushCdnDaRun where @@ -868,7 +860,6 @@ impl< TYPES, QuorumNetwork = Libp2pNetwork, TYPES::SignatureKey>, CommitteeNetwork = Libp2pNetwork, TYPES::SignatureKey>, - Storage = MemoryStorage, >, > RunDA< @@ -936,7 +927,6 @@ impl< >, NODE: NodeImplementation< TYPES, - Storage = MemoryStorage, QuorumNetwork = CombinedNetworks, CommitteeNetwork = CombinedNetworks, >, @@ -1024,12 +1014,7 @@ pub async fn main_entry_point< >, DACHANNEL: ConnectedNetwork, TYPES::SignatureKey>, QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, - NODE: NodeImplementation< - TYPES, - QuorumNetwork = QUORUMCHANNEL, - CommitteeNetwork = DACHANNEL, - Storage = MemoryStorage, - >, + NODE: NodeImplementation, RUNDA: RunDA, >( args: ValidatorArgs, diff --git a/examples/libp2p/types.rs b/examples/libp2p/types.rs index 500581b9d6..407dad4639 100644 --- a/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -1,5 +1,5 @@ use crate::infra::Libp2pDARun; -use hotshot::traits::implementations::{Libp2pNetwork, MemoryStorage}; +use hotshot::traits::implementations::Libp2pNetwork; use hotshot_example_types::state_types::TestTypes; use hotshot_types::{ message::Message, @@ -18,7 +18,6 @@ pub type DANetwork = Libp2pNetwork, :: pub type QuorumNetwork = Libp2pNetwork, ::SignatureKey>; impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; } diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs index b6d289af00..d761894416 100644 --- a/examples/push-cdn/types.rs +++ b/examples/push-cdn/types.rs @@ -1,5 +1,5 @@ use crate::infra::PushCdnDaRun; -use hotshot::traits::implementations::{MemoryStorage, PushCdnNetwork}; +use hotshot::traits::implementations::PushCdnNetwork; use hotshot_example_types::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; @@ -18,7 +18,6 @@ pub type QuorumNetwork = PushCdnNetwork; pub type ViewSyncNetwork = PushCdnNetwork; impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; type CommitteeNetwork = DANetwork; type QuorumNetwork = QuorumNetwork; } diff --git a/examples/webserver/types.rs b/examples/webserver/types.rs index 95abc27be1..ee34f38e19 100644 --- a/examples/webserver/types.rs +++ b/examples/webserver/types.rs @@ -1,5 +1,5 @@ use crate::infra::WebServerDARun; -use hotshot::traits::implementations::{MemoryStorage, WebServerNetwork}; +use hotshot::traits::implementations::WebServerNetwork; use hotshot_example_types::state_types::TestTypes; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; @@ -19,7 +19,6 @@ pub type QuorumNetwork = WebServerNetwork; pub type ViewSyncNetwork = WebServerNetwork; impl NodeImplementation for NodeImpl { - type Storage = MemoryStorage; type CommitteeNetwork = DANetwork; type QuorumNetwork = QuorumNetwork; } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 2b15d473c5..c59665f552 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -17,7 +17,7 @@ use crate::{ add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, add_transaction_task, add_upgrade_task, add_view_sync_task, }, - traits::{NodeImplementation, Storage}, + traits::NodeImplementation, types::{Event, SystemContextHandle}, }; use async_broadcast::{broadcast, InactiveReceiver, Receiver, Sender}; @@ -35,7 +35,6 @@ use hotshot_task::task::TaskRegistry; use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, View, ViewInner}, data::Leaf, - error::StorageSnafu, event::EventType, message::{DataMessage, Message, MessageKind}, simple_certificate::QuorumCertificate, @@ -46,12 +45,10 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, states::ValidatedState, - storage::StoredView, BlockPayload, }, HotShotConfig, }; -use snafu::ResultExt; use std::{ collections::{BTreeMap, HashMap}, marker::PhantomData, @@ -126,9 +123,6 @@ pub struct SystemContext> { /// Configuration items for this hotshot instance pub config: HotShotConfig, - /// This `HotShot` instance's storage backend - storage: I::Storage, - /// Networks used by the instance of hotshot pub networks: Arc>, @@ -162,13 +156,12 @@ impl> SystemContext { /// To do a full initialization, use `fn init` instead, which will set up background tasks as /// well. #[allow(clippy::too_many_arguments)] - #[instrument(skip(private_key, storage, memberships, networks, initializer, metrics))] + #[instrument(skip(private_key, memberships, networks, initializer, metrics))] pub async fn new( public_key: TYPES::SignatureKey, private_key: ::PrivateKey, nonce: u64, config: HotShotConfig, - storage: I::Storage, memberships: Memberships, networks: Networks, initializer: HotShotInitializer, @@ -180,12 +173,6 @@ impl> SystemContext { let anchored_leaf = initializer.inner; let instance_state = initializer.instance_state; - // insert to storage - storage - .append(vec![anchored_leaf.clone().into()]) - .await - .context(StorageSnafu)?; - // Get the validated state from the initializer or construct an incomplete one from the // block header. let validated_state = match initializer.validated_state { @@ -256,7 +243,6 @@ impl> SystemContext { public_key, private_key, config, - storage, networks: Arc::new(networks), memberships: Arc::new(memberships), _metrics: consensus_metrics.clone(), @@ -397,17 +383,15 @@ impl> SystemContext { /// the `HotShot` instance will log the error and shut down. /// /// To construct a [`SystemContext`] without setting up tasks, use `fn new` instead. - /// /// # Errors /// - /// Will return an error when the storage failed to insert the first `QuorumCertificate` + /// Can throw an error if `Self::new` fails. #[allow(clippy::too_many_arguments)] pub async fn init( public_key: TYPES::SignatureKey, private_key: ::PrivateKey, node_id: u64, config: HotShotConfig, - storage: I::Storage, memberships: Memberships, networks: Networks, initializer: HotShotInitializer, @@ -420,13 +404,11 @@ impl> SystemContext { ), HotShotError, > { - // Save a clone of the storage for the handle let hotshot = Self::new( public_key, private_key, node_id, config, - storage, memberships, networks, initializer, @@ -477,7 +459,6 @@ impl> SystemContext { output_event_stream: output_event_stream.clone(), internal_event_stream: internal_event_stream.clone(), hotshot: self.clone().into(), - storage: self.storage.clone(), }; add_network_message_task(registry.clone(), event_tx.clone(), quorum_network.clone()).await; @@ -601,19 +582,6 @@ impl> ConsensusApi fn private_key(&self) -> &::PrivateKey { &self.hotshot.private_key } - - async fn store_leaf( - &self, - old_anchor_view: TYPES::Time, - leaf: Leaf, - ) -> std::result::Result<(), hotshot_types::traits::storage::StorageError> { - let view_to_insert = StoredView::from(leaf); - let storage = &self.hotshot.storage; - storage.append_single_view(view_to_insert).await?; - storage.cleanup_storage_up_to_view(old_anchor_view).await?; - storage.commit().await?; - Ok(()) - } } /// initializer struct for creating starting block diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 5e98fe347e..c4d493acb4 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -2,24 +2,19 @@ pub mod election; mod networking; mod node_implementation; -mod storage; pub use hotshot_types::traits::{BlockPayload, ValidatedState}; pub use networking::{NetworkError, NetworkReliability}; pub use node_implementation::{NodeImplementation, TestableNodeImplementation}; -pub use storage::{Result as StorageResult, Storage}; /// Module for publicly usable implementations of the traits pub mod implementations { - pub use super::{ - networking::{ - combined_network::{CombinedNetworks, UnderlyingCombinedNetworks}, - libp2p_network::{Libp2pNetwork, PeerInfoVec}, - memory_network::{MasterMap, MemoryNetwork}, - push_cdn_network::{PushCdnNetwork, WrappedSignatureKey}, - web_server_network::WebServerNetwork, - NetworkingMetricsValue, - }, - storage::memory_storage::MemoryStorage, // atomic_storage::AtomicStorage, + pub use super::networking::{ + combined_network::{CombinedNetworks, UnderlyingCombinedNetworks}, + libp2p_network::{Libp2pNetwork, PeerInfoVec}, + memory_network::{MasterMap, MemoryNetwork}, + push_cdn_network::{PushCdnNetwork, WrappedSignatureKey}, + web_server_network::WebServerNetwork, + NetworkingMetricsValue, }; } diff --git a/hotshot/src/traits/storage.rs b/hotshot/src/traits/storage.rs deleted file mode 100644 index 1961871de7..0000000000 --- a/hotshot/src/traits/storage.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Abstraction over on-disk storage of node state -// pub mod atomic_storage; -pub mod memory_storage; - -pub use hotshot_types::traits::storage::{Result, Storage}; diff --git a/hotshot/src/traits/storage/atomic_storage.rs b/hotshot/src/traits/storage/atomic_storage.rs deleted file mode 100644 index 6da61d4560..0000000000 --- a/hotshot/src/traits/storage/atomic_storage.rs +++ /dev/null @@ -1,254 +0,0 @@ -//! On-disk storage of node state. Based on [`atomic_store`](https://github.com/EspressoSystems/atomicstore). - -mod dual_key_value_store; -mod hash_map_store; - -use self::{dual_key_value_store::DualKeyValueStore, hash_map_store::HashMapStore}; -use crate::{data::Leaf, traits::StateContents}; -use async_std::sync::Mutex; -use async_trait::async_trait; -use atomic_store::{AtomicStore, AtomicStoreLoader}; -use commit::Commitment; -use hotshot_types::traits::storage::{ - AtomicStoreSnafu, Storage, StorageError, StorageResult, StorageState, StorageUpdater, - TestableStorage, -}; -use serde::{de::DeserializeOwned, Serialize}; -use snafu::ResultExt; -use std::{path::Path, sync::Arc}; -use tempfile::{tempdir, TempDir}; -use tracing::{instrument, trace}; - -/// Inner state of an atomic storage -struct AtomicStorageInner -where - STATE: DeserializeOwned + Serialize + StateContents, -{ - /// Temporary directory storage might live in - /// (we want to delete the temporary directory when storage is droppped) - _temp_dir: Option, - /// The atomic store loader - atomic_store: Mutex, - - /// The Blocks stored by this [`AtomicStorage`] - blocks: HashMapStore, STATE::BlockPayload>, - - /// The [`QuorumCertificate`]s stored by this [`AtomicStorage`] - qcs: DualKeyValueStore>, - - /// The [`Leaf`s stored by this [`AtomicStorage`] - /// - /// In order to maintain the struct constraints, this list must be append only. Once a QC is - /// inserted, it index _must not_ change - leaves: DualKeyValueStore>, - - /// The store of states - states: HashMapStore>, STATE>, -} - -/// Persistent [`Storage`] implementation, based upon [`atomic_store`]. -#[derive(Clone)] -pub struct AtomicStorage -where - STATE: DeserializeOwned + Serialize + StateContents, -{ - /// Inner state of the atomic storage - inner: Arc>, -} - -impl TestableStorage for AtomicStorage { - fn construct_tmp_storage() -> StorageResult { - let tempdir = tempdir().map_err(|e| StorageError::InconsistencyError { - description: e.to_string(), - })?; - let loader = AtomicStoreLoader::create(tempdir.path(), "hotshot").map_err(|e| { - StorageError::InconsistencyError { - description: e.to_string(), - } - })?; - Self::init_from_loader(loader, Some(tempdir)) - .map_err(|e| StorageError::AtomicStore { source: e }) - } -} - -impl AtomicStorage -where - STATE: StateContents, -{ - /// Creates an atomic storage at a given path. If files exist, will back up existing directory before creating. - /// - /// # Errors - /// - /// Returns the underlying errors that the following types can throw: - /// - [`atomic_store::AtomicStoreLoader`] - /// - [`atomic_store::AtomicStore`] - /// - [`atomic_store::RollingLog`] - /// - [`atomic_store::AppendLog`] - pub fn create(path: &Path) -> atomic_store::Result { - let loader = AtomicStoreLoader::create(path, "hotshot")?; - Self::init_from_loader(loader, None) - } - - /// Open an atomic storage at a given path. - /// - /// # Errors - /// - /// Returns the underlying errors that the following types can throw: - /// - [`atomic_store::AtomicStoreLoader`] - /// - [`atomic_store::AtomicStore`] - /// - [`atomic_store::RollingLog`] - /// - [`atomic_store::AppendLog`] - pub fn open(path: &Path) -> atomic_store::Result { - let loader = AtomicStoreLoader::load(path, "hotshot")?; - Self::init_from_loader(loader, None) - } - - /// Open an atomic storage with a given [`AtomicStoreLoader`] - /// - /// # Errors - /// - /// Returns the underlying errors that the following types can throw: - /// - [`atomic_store::AtomicStore`] - /// - [`atomic_store::RollingLog`] - /// - [`atomic_store::AppendLog`] - pub fn init_from_loader( - mut loader: AtomicStoreLoader, - dir: Option, - ) -> atomic_store::Result { - let blocks = HashMapStore::load(&mut loader, "hotshot_blocks")?; - let qcs = DualKeyValueStore::open(&mut loader, "hotshot_qcs")?; - let leaves = DualKeyValueStore::open(&mut loader, "hotshot_leaves")?; - let states = HashMapStore::load(&mut loader, "hotshot_states")?; - - let atomic_store = AtomicStore::open(loader)?; - - Ok(Self { - inner: Arc::new(AtomicStorageInner { - _temp_dir: dir, - atomic_store: Mutex::new(atomic_store), - blocks, - qcs, - leaves, - states, - }), - }) - } -} - -#[async_trait] -impl Storage for AtomicStorage { - #[instrument(name = "AtomicStorage::get_block", skip_all)] - async fn get_block( - &self, - hash: &Commitment, - ) -> StorageResult> { - Ok(self.inner.blocks.get(hash).await) - } - - #[instrument(name = "AtomicStorage::get_qc", skip_all)] - async fn get_qc( - &self, - hash: &Commitment, - ) -> StorageResult>> { - Ok(self.inner.qcs.load_by_key_1_ref(hash).await) - } - - #[instrument(name = "AtomicStorage::get_newest_qc", skip_all)] - async fn get_newest_qc(&self) -> StorageResult>> { - Ok(self.inner.qcs.load_latest(|qc| qc.get_view_number()).await) - } - - #[instrument(name = "AtomicStorage::get_qc_for_view", skip_all)] - async fn get_qc_for_view( - &self, - view: TYPES::Time, - ) -> StorageResult>> { - Ok(self.inner.qcs.load_by_key_2(view).await) - } - - #[instrument(name = "AtomicStorage::get_leaf", skip_all)] - async fn get_leaf(&self, hash: &Commitment>) -> StorageResult>> { - Ok(self.inner.leaves.load_by_key_1_ref(hash).await) - } - - #[instrument(name = "AtomicStorage::get_leaf_by_block", skip_all)] - async fn get_leaf_by_block( - &self, - hash: &Commitment, - ) -> StorageResult>> { - Ok(self.inner.leaves.load_by_key_2_ref(hash).await) - } - - async fn get_internal_state(&self) -> StorageState { - let mut blocks: Vec<(Commitment, STATE::BBlockPayloadlock)> = - self.inner.blocks.load_all().await.into_iter().collect(); - - blocks.sort_by_key(|(hash, _)| *hash); - let blocks = blocks.into_iter().map(|(_, block)| block).collect(); - - let mut leafs: Vec> = self.inner.leaves.load_all().await; - leafs.sort_by_cached_key(Leaf::hash); - - let mut quorum_certificates = self.inner.qcs.load_all().await; - quorum_certificates.sort_by_key(|qc| qc.view_number()); - - let mut states: Vec<(Commitment>, STATE)> = - self.inner.states.load_all().await.into_iter().collect(); - states.sort_by_key(|(hash, _)| *hash); - let states = states.into_iter().map(|(_, state)| state).collect(); - - StorageState { - blocks, - quorum_certificates, - leafs, - states, - } - } -} - -/// Implementation of [`StorageUpdater`] for the [`AtomicStorage`] -struct AtomicStorageUpdater<'a, S: StateContents> { - /// A reference to the internals of the [`AtomicStorage`] - inner: &'a AtomicStorageInner, -} - -#[async_trait] -impl<'a, STATE: StateContents + 'static> StorageUpdater<'a, STATE> - for AtomicStorageUpdater<'a, STATE> -{ - #[instrument(name = "AtomicStorage::get_block", skip_all)] - async fn insert_block( - &mut self, - hash: Commitment, - block: STATE::BlockPayload, - ) -> StorageResult { - trace!(?block, "inserting block"); - self.inner - .blocks - .insert(hash, block) - .await - .context(AtomicStoreSnafu)?; - Ok(()) - } - - #[instrument(name = "AtomicStorage::insert_leaf", skip_all)] - async fn insert_leaf(&mut self, leaf: Leaf) -> StorageResult { - self.inner.leaves.insert(leaf).await - } - - #[instrument(name = "AtomicStorage::insert_qc", skip_all)] - async fn insert_qc(&mut self, qc: QuorumCertificate) -> StorageResult { - self.inner.qcs.insert(qc).await - } - - #[instrument(name = "AtomicStorage::insert_state", skip_all)] - async fn insert_state(&mut self, state: STATE, hash: Commitment>) -> StorageResult { - trace!(?hash, "Inserting state"); - self.inner - .states - .insert(hash, state) - .await - .context(AtomicStoreSnafu)?; - Ok(()) - } -} diff --git a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs b/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs deleted file mode 100644 index e947b70127..0000000000 --- a/hotshot/src/traits/storage/atomic_storage/dual_key_value_store.rs +++ /dev/null @@ -1,215 +0,0 @@ -//! A store that operates on a value with 2 different keys. -//! -//! Implementations should implement [`DualKeyValue`] before they can use [`DualKeyValueStore`]. - -use async_std::sync::RwLock; -use atomic_store::{load_store::BincodeLoadStore, AppendLog, AtomicStoreLoader}; -use commit::{Commitment, Committable}; -use hotshot_types::{ - data::{Leaf, ViewNumber}, - traits::{ - storage::{AtomicStoreSnafu, InconsistencySnafu, StorageError}, - StateContents, - }, -}; -use serde::{de::DeserializeOwned, Serialize}; -use snafu::ResultExt; -use std::{collections::HashMap, hash::Hash}; - -/// A store that allows lookup of a value by 2 different keys. -pub struct DualKeyValueStore { - /// inner value - inner: RwLock>, -} - -/// The inner struct of the [`DualKeyValueStore`] -struct Inner { - /// The underlying store - store: AppendLog>, - - /// Key 1 to index - key_1: HashMap, - - /// Key 2 to index - key_2: HashMap, - - /// Actual values. This list should be append-only - values: Vec, -} - -impl DualKeyValueStore { - /// Open the [`DualKeyValueStore`] with the given loader and name. - /// - /// # Errors - /// - /// Returns any errors that [`AppendLog`]'s `load` returns. - pub fn open( - loader: &mut AtomicStoreLoader, - name: &str, - ) -> Result { - let store = AppendLog::load(loader, BincodeLoadStore::default(), name, 1024)?; - let values = store - .iter() - .collect::, atomic_store::PersistenceError>>() - .unwrap_or_default(); - let key_1 = values - .iter() - .enumerate() - .map(|(idx, v)| (v.key_1(), idx)) - .collect(); - let key_2 = values - .iter() - .enumerate() - .map(|(idx, v)| (v.key_2(), idx)) - .collect(); - Ok(Self { - inner: RwLock::new(Inner { - store, - key_1, - key_2, - values, - }), - }) - } - - /// Load the `K` value based on the 1st key. - pub async fn load_by_key_1_ref(&self, k: &K::Key1) -> Option { - let read = self.inner.read().await; - let idx = read.key_1.get(k).copied()?; - Some(read.values[idx].clone()) - } - - /// Load the `K` value based on a reference of the 2nd key. - pub async fn load_by_key_2_ref(&self, k: &K::Key2) -> Option { - let read = self.inner.read().await; - let idx = read.key_2.get(k).copied()?; - Some(read.values[idx].clone()) - } - - /// Load the `K` value based on the 2nd key. - pub async fn load_by_key_2(&self, k: K::Key2) -> Option { - self.load_by_key_2_ref(&k).await - } - - /// Load the latest inserted entry in this [`DualKeyValueStore`] - pub async fn load_latest(&self, cb: F) -> Option - where - F: FnMut(&&K) -> V, - V: std::cmp::Ord, - { - let read = self.inner.read().await; - read.values.iter().max_by_key::(cb).cloned() - } - - /// Load all entries in this [`DualKeyValueStore`] - pub async fn load_all(&self) -> Vec { - self.inner.read().await.values.clone() - } - - /// Insert a value into this [`DualKeyValueStore`] - /// - /// # Errors - /// - /// Returns any errors that [`AppendLog`]'s `store_resource` returns. - pub async fn insert(&self, val: K) -> Result<(), StorageError> { - let mut lock = self.inner.write().await; - - match (lock.key_1.get(&val.key_1()), lock.key_2.get(&val.key_2())) { - (Some(idx), Some(key_2_idx)) if idx == key_2_idx => { - // updating - let idx = *idx; - - // TODO: This still adds a duplicate `K` in the storage - // ideally we'd update this record instead - lock.store.store_resource(&val).context(AtomicStoreSnafu)?; - lock.values[idx] = val; - Ok(()) - } - (Some(_), Some(_)) => InconsistencySnafu { - description: format!("Could not insert {}, both {} and {} already exist, but point at different records", std::any::type_name::(), K::KEY_1_NAME, K::KEY_2_NAME), - } - .fail(), - (Some(_), None) => InconsistencySnafu { - description: format!("Could not insert {}, {} already exists but {} does not", std::any::type_name::(), K::KEY_1_NAME, K::KEY_2_NAME), - } - .fail(), - (None, Some(_)) => InconsistencySnafu { - description: format!("Could not insert {}, {} already exists but {} does not", std::any::type_name::(), K::KEY_2_NAME, K::KEY_1_NAME), - } - .fail(), - (None, None) => { - // inserting - lock.store.store_resource(&val).context(AtomicStoreSnafu)?; - - let idx = lock.values.len(); - lock.key_1.insert(val.key_1(), idx); - lock.key_2.insert(val.key_2(), idx); - lock.values.push(val); - - Ok(()) - } - } - } - - /// Commit this [`DualKeyValueStore`]. - /// - /// # Errors - /// - /// Returns any errors that [`AppendLog`]'s `commit_version` returns. - pub async fn commit_version(&self) -> atomic_store::Result<()> { - let mut lock = self.inner.write().await; - lock.store.commit_version()?; - Ok(()) - } -} - -/// A dual key value. Used for [`DualKeyValueStore`] -pub trait DualKeyValue: Serialize + DeserializeOwned + Clone { - /// The name of the first key - const KEY_1_NAME: &'static str; - /// The first key type - type Key1: Serialize + DeserializeOwned + Hash + Eq; - /// Get a copy of the first key - fn key_1(&self) -> Self::Key1; - - /// The name of the second key - const KEY_2_NAME: &'static str; - /// The second key type - type Key2: Serialize + DeserializeOwned + Hash + Eq; - /// Get a clone of the second key - fn key_2(&self) -> Self::Key2; -} - -impl DualKeyValue for QuorumCertificate { - type Key1 = Commitment; - type Key2 = ViewNumber; - - const KEY_1_NAME: &'static str = "payload_commitment"; - const KEY_2_NAME: &'static str = "view_number"; - - fn key_1(&self) -> Self::Key1 { - self.payload_commitment - } - fn key_2(&self) -> Self::Key2 { - self.view_number - } -} - -impl DualKeyValue for Leaf -where - STATE: StateContents, -{ - type Key1 = Commitment>; - type Key2 = Commitment; - - const KEY_1_NAME: &'static str = "leaf_commitment"; - const KEY_2_NAME: &'static str = "payload_commitment"; - - fn key_1(&self) -> Self::Key1 { - self.commit() - } - - fn key_2(&self) -> Self::Key2 { - ::commit(&self.deltas) - } -} diff --git a/hotshot/src/traits/storage/atomic_storage/hash_map_store.rs b/hotshot/src/traits/storage/atomic_storage/hash_map_store.rs deleted file mode 100644 index dbe2fdbcef..0000000000 --- a/hotshot/src/traits/storage/atomic_storage/hash_map_store.rs +++ /dev/null @@ -1,91 +0,0 @@ -//! A store based on [`RollingLog`] - -use async_std::sync::RwLock; -use atomic_store::{load_store::BincodeLoadStore, AtomicStoreLoader, RollingLog}; -use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::HashMap, hash::Hash}; - -/// A store with [`RollingLog`] as the storage system. -pub struct HashMapStore -where - K: Eq + Hash, - HashMap: Serialize + DeserializeOwned, -{ - /// Inner value - inner: RwLock>, -} - -/// The inner value of the [`HashMapStore`] -struct Inner -where - K: Eq + Hash, - HashMap: Serialize + DeserializeOwned, -{ - /// The underlying atomic_store store - store: RollingLog>>, - /// Data currently loaded in the store - data: HashMap, -} - -impl HashMapStore -where - K: Eq + Hash, - V: Clone, - HashMap: Serialize + DeserializeOwned + Clone, -{ - /// Load a `HashMapStore` with the given loader and name. - /// - /// # Errors - /// - /// Returns any errors that [`RollingLog`]'s `load` returns. - pub fn load(loader: &mut AtomicStoreLoader, name: &str) -> atomic_store::Result { - let store = RollingLog::load(loader, BincodeLoadStore::default(), name, 1024)?; - let data = store.load_latest().unwrap_or_default(); - Ok(Self { - inner: RwLock::new(Inner { store, data }), - }) - } - - /// Get an entry in this store. Returning `Some(V)` if it was found. - pub async fn get(&self, hash: &K) -> Option { - let read = self.inner.read().await; - read.data.get(hash).cloned() - } - - /// Insert a new key-value entry into the store. This won't be committed until `commit` is called. - pub async fn insert(&self, key: K, val: V) -> atomic_store::Result<()> { - let mut lock = self.inner.write().await; - // Make sure to commit the store first before updating the internal value - // this makes sure that in a case of an error, the internal state is still correct - let mut data = lock.data.clone(); - data.insert(key, val); - lock.store.store_resource(&data)?; - - lock.data = data; - Ok(()) - } - - /// Commit this rolling store, returning a commit lock. - /// - /// Once all stores are committed, you need to call `apply` on this lock, or else the commit will be reverted once the lock goes out of scope. - /// - /// # Errors - /// - /// Returns any errors that [`RollingLog`]'s `store_resource` and `commit_version` returns. - pub async fn commit_version(&self) -> atomic_store::Result<()> { - let mut lock = self.inner.write().await; - lock.store.commit_version()?; - Ok(()) - } -} - -impl HashMapStore -where - HashMap: Serialize + DeserializeOwned + Clone, - K: Eq + Hash, -{ - /// Returns all data stored in this [`HashMapStore`]. - pub async fn load_all(&self) -> HashMap { - self.inner.read().await.data.clone() - } -} diff --git a/hotshot/src/traits/storage/memory_storage.rs b/hotshot/src/traits/storage/memory_storage.rs deleted file mode 100644 index bf4bc727eb..0000000000 --- a/hotshot/src/traits/storage/memory_storage.rs +++ /dev/null @@ -1,106 +0,0 @@ -//! [`HashMap`](std::collections::HashMap) and [`Vec`] based implementation of the storage trait -//! -//! This module provides a non-persisting, dummy adapter for the [`Storage`] trait -use async_lock::RwLock; -use async_trait::async_trait; -use hotshot_types::traits::{ - node_implementation::NodeType, - storage::{ - Result, Storage, StorageError, StorageState, StoredView, TestableStorage, ViewEntry, - }, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - sync::Arc, -}; - -/// Internal state for a [`MemoryStorage`] -struct MemoryStorageInternal { - /// The views that have been stored - stored: BTreeMap>, - /// The views that have failed - failed: BTreeSet, -} - -/// In memory, ephemeral, storage for a [`SystemContext`](crate::SystemContext) instance -#[derive(Clone)] -pub struct MemoryStorage { - /// The inner state of this [`MemoryStorage`] - inner: Arc>>, -} - -impl MemoryStorage { - /// Create a new instance of the memory storage with the given block and state - #[must_use] - pub fn empty() -> Self { - let inner = MemoryStorageInternal { - stored: BTreeMap::new(), - failed: BTreeSet::new(), - }; - Self { - inner: Arc::new(RwLock::new(inner)), - } - } -} - -#[async_trait] -impl TestableStorage for MemoryStorage { - fn construct_tmp_storage() -> Result { - Ok(Self::empty()) - } - - async fn get_full_state(&self) -> StorageState { - let inner = self.inner.read().await; - StorageState { - stored: inner.stored.clone(), - failed: inner.failed.clone(), - } - } -} - -#[async_trait] -impl Storage for MemoryStorage { - async fn append(&self, views: Vec>) -> Result { - let mut inner = self.inner.write().await; - for view in views { - match view { - ViewEntry::Failed(num) => { - inner.failed.insert(num); - } - ViewEntry::Success(view) => { - inner.stored.insert(view.view_number, view); - } - } - } - Ok(()) - } - - async fn cleanup_storage_up_to_view(&self, view: TYPES::Time) -> Result { - let mut inner = self.inner.write().await; - - // .split_off will return everything after the given key, including the key. - let stored_after = inner.stored.split_off(&view); - // .split_off will return the map we want to keep stored, so we need to swap them - let old_stored = std::mem::replace(&mut inner.stored, stored_after); - - // same for the BTreeSet - let failed_after = inner.failed.split_off(&view); - let old_failed = std::mem::replace(&mut inner.failed, failed_after); - - Ok(old_stored.len() + old_failed.len()) - } - - async fn get_anchored_view(&self) -> Result> { - let inner = self.inner.read().await; - let last = inner - .stored - .values() - .next_back() - .ok_or(StorageError::NoGenesisView)?; - Ok(last.clone()) - } - - async fn commit(&self) -> Result { - Ok(()) // do nothing - } -} diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 58bfb99cbc..5c19c38393 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -38,9 +38,6 @@ pub struct SystemContextHandle> { /// Internal reference to the underlying [`SystemContext`] pub hotshot: Arc>, - - /// Our copy of the `Storage` view for a hotshot - pub(crate) storage: I::Storage, } impl + 'static> SystemContextHandle { @@ -53,6 +50,7 @@ impl + 'static> SystemContextHandl /// there are two cleaner solutions: /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper + #[must_use] pub fn get_event_stream_known_impl(&self) -> Receiver> { self.output_event_stream.1.activate_cloned() } @@ -62,6 +60,7 @@ impl + 'static> SystemContextHandl /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper /// NOTE: this is only used for sanity checks in our tests + #[must_use] pub fn get_internal_event_stream_known_impl(&self) -> Receiver>> { self.internal_event_stream.1.activate_cloned() } @@ -98,6 +97,7 @@ impl + 'static> SystemContextHandl /// /// # Panics /// Panics if internal consensus is in an inconsistent state. + #[must_use] pub fn try_get_decided_leaf(&self) -> Option> { self.hotshot.try_get_decided_leaf() } @@ -117,13 +117,8 @@ impl + 'static> SystemContextHandl self.hotshot.publish_transaction_async(tx).await } - /// Provides a reference to the underlying storage for this [`SystemContext`], allowing access to - /// historical data - pub fn storage(&self) -> &I::Storage { - &self.storage - } - /// Get the underlying consensus state for this [`SystemContext`] + #[must_use] pub fn get_consensus(&self) -> Arc>> { self.hotshot.get_consensus() } @@ -149,6 +144,7 @@ impl + 'static> SystemContextHandl } /// return the timeout for a view of the underlying `SystemContext` + #[must_use] pub fn get_next_view_timeout(&self) -> u64 { self.hotshot.get_next_view_timeout() } @@ -165,6 +161,7 @@ impl + 'static> SystemContextHandl // Below is for testing only: /// Wrapper to get this node's public key #[cfg(feature = "hotshot-testing")] + #[must_use] pub fn get_public_key(&self) -> TYPES::SignatureKey { self.hotshot.public_key.clone() } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f32a617280..cfb47116e6 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -818,11 +818,6 @@ impl, A: ConsensusApi + .number_of_views_per_decide_event .add_point(cur_number_of_views_per_decide_event as f64); - // We're only storing the last QC. We could store more but we're realistically only going to retrieve the last one. - if let Err(e) = self.api.store_leaf(old_anchor_view, leaf).await { - error!("Could not insert new anchor into the storage API: {:?}", e); - } - debug!("Sending Decide for view {:?}", consensus.last_decided_view); debug!("Decided txns len {:?}", included_txns_set.len()); decide_sent.await; diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 1de2267211..67570ebba9 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -99,7 +99,7 @@ where Left(context) => context, // Node not initialized. Initialize it // based on the received leaf. - Right((storage, memberships, config)) => { + Right((memberships, config)) => { let initializer = HotShotInitializer::::from_reload( state.last_decided_leaf.clone(), TestInstanceState {}, @@ -114,7 +114,6 @@ where TestRunner::add_node_with_config( node_id, node.networks.clone(), - storage, memberships, initializer, config, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 39f83682ce..4ce41423e6 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -63,7 +63,6 @@ pub async fn build_system_handle( let launcher = builder.gen_launcher::(node_id); let networks = (launcher.resource_generator.channel_generator)(node_id); - let storage = (launcher.resource_generator.storage)(node_id); let config = launcher.resource_generator.config.clone(); let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}).unwrap(); @@ -117,7 +116,6 @@ pub async fn build_system_handle( private_key, node_id, config, - storage, memberships, networks_bundle, initializer, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 1293903fbd..1d1342911a 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -329,7 +329,6 @@ impl TestMetadata { unreliable_network, secondary_network_delay, ), - storage: Box::new(|_| I::construct_tmp_storage().unwrap()), config, }, metadata: self, diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index d6196b86d1..eb35ce7472 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -22,8 +22,6 @@ pub type Generator = Box T + 'static>; pub struct ResourceGenerators> { /// generate channels pub channel_generator: Generator>, - /// generate a new storage for each node - pub storage: Generator<>::Storage>, /// configuration used to generate each hotshot node pub config: HotShotConfig, } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 72ed056f0e..74354f6081 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -58,7 +58,6 @@ pub struct Node> { pub type LateNodeContext = Either< Arc>, ( - >::Storage, Memberships, HotShotConfig<::SignatureKey, ::ElectionConfigType>, ), @@ -327,7 +326,6 @@ where let node_id = self.next_node_id; self.next_node_id += 1; tracing::debug!("launch node {}", i); - let storage = (self.launcher.resource_generator.storage)(node_id); let config = self.launcher.resource_generator.config.clone(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { @@ -365,7 +363,7 @@ where node_id, LateStartNode { networks, - context: Right((storage, memberships, config)), + context: Right((memberships, config)), }, ); } else { @@ -377,7 +375,6 @@ where let hotshot = Self::add_node_with_config( node_id, networks.clone(), - storage, memberships, initializer, config, @@ -412,7 +409,6 @@ where pub async fn add_node_with_config( node_id: u64, networks: Networks, - storage: I::Storage, memberships: Memberships, initializer: HotShotInitializer, config: HotShotConfig, @@ -433,7 +429,6 @@ where private_key, node_id, config, - storage, memberships, network_bundle, initializer, diff --git a/testing/tests/atomic_storage.rs b/testing/tests/atomic_storage.rs deleted file mode 100644 index 741b4068aa..0000000000 --- a/testing/tests/atomic_storage.rs +++ /dev/null @@ -1,145 +0,0 @@ -#![cfg(foo)] -use hotshot::{ - certificate::QuorumCertificate, - demos::vdemo::{ - random_quorom_certificate, random_transaction, random_validating_leaf, VDemoBlock, - VDemoState, - }, - traits::{BlockPayload, Storage, ValidatedState}, -}; -use hotshot_types::{data::ViewNumber, traits::statesTestableState}; -use rand::thread_rng; - -type AtomicStorage = hotshot::traits::implementations::AtomicStorage; - -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_happy_path_qcs() { - // This folder will be destroyed when the last handle to it closes - let file = tempfile::tempdir().expect("Could not create temp dir"); - let path = file.path(); - println!("Using store in {:?}", path); - let mut store = AtomicStorage::open(path).expect("Could not open atomic store"); - - // Add some certificates - let mut certs = Vec::>::new(); - for i in 0..10 { - let cert = QuorumCertificate { - view_number: ViewNumber::new(i), - ..random_quorom_certificate() - }; - println!("Inserting {:?}", cert); - store - .update(|mut m| { - let cert = cert.clone(); - async move { m.insert_qc(cert).await } - }) - .await - .unwrap(); - certs.push(cert); - } - - // read them all back 3 times - // 1st time: normal readback - // 2nd: after dropping and re-opening the store - for i in 0..2 { - if i == 1 { - drop(store); - store = AtomicStorage::open(path).expect("Could not open atomic store"); - } - - for cert in &certs { - match store - .get_qc_for_view(cert.view_number) - .await - .expect("Could not read view_number") - { - Some(c) => { - println!("read {:?}", c); - assert_eq!(&c, cert); - } - None => panic!("Could not read {:?}: {:?}", cert.view_number, cert), - } - match store - .get_qc(&cert.block_hash) - .await - .expect("Could not read qc by hash") - { - Some(c) => { - println!("read {:?}", c); - assert_eq!(&c, cert); - } - None => panic!( - "Could not read block_hash {:?}: {:?}", - cert.block_hash, cert - ), - } - } - } -} - -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_happy_path_leaves() { - // This folder will be destroyed when the last handle to it closes - let file = tempfile::tempdir().expect("Could not create temp dir"); - let path = file.path(); - println!("Using store in {:?}", path); - let mut store = AtomicStorage::open(path).expect("Could not open atomic store"); - - // Add some leaves - let mut leaves = Vec::>::new(); - for _ in 0..10 { - let leaf = random_validating_leaf(DEntryBlock { - previous_block: StateHash::random(), - ..Default::default() - }); - println!("Inserting {:?}", leaf); - store - .update(|mut m| { - let leaf = leaf.clone(); - async move { m.insert_leaf(leaf).await } - }) - .await - .unwrap(); - leaves.push(leaf); - } - - // read them all back 2 times - // 1st time: normal readback - // 2nd: after dropping and re-opening the store - for i in 0..2 { - if i == 1 { - drop(store); - store = AtomicStorage::open(path).expect("Could not open atomic store"); - } - - for leaf in &leaves { - match store - .get_leaf(&leaf.hash()) - .await - .expect("Could not read leaf hash") - { - Some(l) => { - println!("read {:?}", l); - assert_eq!(&l, leaf); - } - None => { - panic!("Could not read leaf hash {:?}: {:?}", leaf.hash(), leaf) - } - } - let hash = BlockContents::hash(&leaf.deltas); - match store - .get_leaf_by_block(&hash) - .await - .expect("Could not read leaf by block") - { - Some(l) => { - println!("read {:?}", l); - assert_eq!(&l, leaf); - } - None => panic!("Could not read leaf hash {:?}: {:?}", hash, leaf), - } - } - } -} diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 4a8e8f47ee..086cacaa91 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -4,9 +4,7 @@ use std::sync::Arc; use async_compatibility_layer::logging::setup_logging; use hotshot::traits::election::static_committee::{GeneralStaticCommittee, StaticElectionConfig}; -use hotshot::traits::implementations::{ - MasterMap, MemoryNetwork, MemoryStorage, NetworkingMetricsValue, -}; +use hotshot::traits::implementations::{MasterMap, MemoryNetwork, NetworkingMetricsValue}; use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; use hotshot_example_types::state_types::TestInstanceState; @@ -67,7 +65,6 @@ pub type ViewSyncNetwork = MemoryNetwork, ::Sign pub type VIDNetwork = MemoryNetwork, ::SignatureKey>; impl NodeImplementation for TestImpl { - type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; } diff --git a/testing/tests/storage.rs b/testing/tests/storage.rs deleted file mode 100644 index 2d1968990e..0000000000 --- a/testing/tests/storage.rs +++ /dev/null @@ -1,61 +0,0 @@ -use commit::Committable; -use hotshot::traits::implementations::MemoryStorage; -use hotshot::traits::Storage; -use hotshot_example_types::node_types::TestTypes; -use hotshot_types::{ - data::Leaf, - simple_certificate::QuorumCertificate, - traits::{ - node_implementation::{ConsensusTime, NodeType}, - signature_key::SignatureKey, - storage::{StoredView, TestableStorage}, - }, -}; -use std::marker::PhantomData; -use tracing::instrument; - -fn random_stored_view(view_number: ::Time) -> StoredView { - let mut leaf = Leaf::genesis(&Default::default()); - leaf.view_number = view_number; - let leaf_commit = leaf.commit(); - let data = hotshot_types::simple_vote::QuorumData { leaf_commit }; - let commit = data.commit(); - StoredView::from_qc_block_and_state( - QuorumCertificate { - is_genesis: view_number == ::Time::genesis(), - data, - vote_commitment: commit, - signatures: None, - view_number, - _pd: PhantomData, - }, - leaf.block_header, - leaf.block_payload, - leaf_commit, - <::SignatureKey as SignatureKey>::genesis_proposer_pk(), - ) -} - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn memory_storage() { - let storage = MemoryStorage::construct_tmp_storage().unwrap(); - let genesis = random_stored_view(::Time::genesis()); - storage - .append_single_view(genesis.clone()) - .await - .expect("Could not append block"); - assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); - storage - .cleanup_storage_up_to_view(genesis.view_number) - .await - .unwrap(); - assert_eq!(storage.get_anchored_view().await.unwrap(), genesis); - storage - .cleanup_storage_up_to_view(genesis.view_number + 1) - .await - .unwrap(); - assert!(storage.get_anchored_view().await.is_err()); -} From 5c3e1595c52b4928b6e52ff6adad16a5b26e754d Mon Sep 17 00:00:00 2001 From: Anders Konring Date: Mon, 18 Mar 2024 17:33:21 +0100 Subject: [PATCH 0870/1393] fix missing config entry (#2797) --- orchestrator/run-config.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 51e7127d2d..51197939c4 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -87,6 +87,10 @@ nanos = 100000000 # 10 ms secs = 0 nanos = 0 +[config.view_sync_timeout] +secs = 2 +nanos = 0 + # TODO (Keyao) Clean up configuration parameters. # [config.propose_max_round_time] From 71bd724fd256fcf9dfe37868e0343017ab0ff428 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 18 Mar 2024 19:47:48 -0400 Subject: [PATCH 0871/1393] [Push CDN] CI fixes (#2801) * add per-test, unique embedded db * update the pCDN --- .../src/traits/networking/push_cdn_network.rs | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 50e921f440..0a5fff1fd9 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -13,6 +13,7 @@ use cdn_client::{ }; use cdn_marshal::{ConfigBuilder as MarshalConfigBuilder, Marshal}; use hotshot_utils::bincode::bincode_opts; +use rand::{rngs::StdRng, RngCore, SeedableRng}; use tracing::{error, warn}; use async_compatibility_layer::art::{async_block_on, async_spawn}; @@ -32,7 +33,7 @@ use hotshot_types::{ }, BoxSyncFuture, }; -use std::{collections::BTreeSet, sync::Arc, time::Duration}; +use std::{collections::BTreeSet, path::Path, sync::Arc, time::Duration}; /// A wrapped `SignatureKey`. We need to implement the Push CDN's `SignatureScheme` /// trait in order to sign and verify messages to/from the CDN. @@ -162,13 +163,17 @@ impl TestableNetworkingImplementation for PushCdnNetwork let (broker_public_key, broker_private_key) = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], 1337); - // The broker (peer) discovery endpoint shall be a local SQLite file - let discovery_endpoint = "test.sqlite".to_string(); + // Get the OS temporary directory + let temp_dir = std::env::temp_dir(); - // Try to delete the file at the discovery endpoint to maintain consistency between tests - if let Err(err) = std::fs::remove_file(discovery_endpoint.clone()) { - warn!("failed to delete pre-existing database: {err}"); - }; + // Create an SQLite file inside of the temporary directory + let discovery_endpoint = temp_dir + .join(Path::new(&format!( + "test-{}.sqlite", + StdRng::from_entropy().next_u64() + ))) + .to_string_lossy() + .into_owned(); // 2 brokers for _ in 0..2 { @@ -218,7 +223,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork let marshal_endpoint = format!("127.0.0.1:{marshal_port}"); let marshal_config = MarshalConfigBuilder::default() .bind_address(marshal_endpoint.clone()) - .discovery_endpoint("test.sqlite".to_string()) + .discovery_endpoint(discovery_endpoint) .build() .expect("failed to build marshal config"); From 0fb6338978df768c694ba08a371618c61895e105 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 19 Mar 2024 09:49:27 -0400 Subject: [PATCH 0872/1393] `spawn_blocking` when Calculating VID Commitment (#2802) * don't block executor calculating vid commit * fix imports for 2 executors --- task-impls/src/da.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 9691059aaf..231f037164 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -5,6 +5,8 @@ use crate::{ }; use async_broadcast::Sender; use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::spawn_blocking; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ @@ -29,6 +31,8 @@ use sha2::{Digest, Sha256}; use crate::vote::HandleVoteEvent; use std::{marker::PhantomData, sync::Arc}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::spawn_blocking; use tracing::{debug, error, instrument, warn}; /// Alias for Optional type for Vote Collectors @@ -112,10 +116,13 @@ impl, A: ConsensusApi + return None; } - let payload_commitment = vid_commitment( - &proposal.data.encoded_transactions, - self.quorum_membership.total_nodes(), - ); + let txns = proposal.data.encoded_transactions.clone(); + let num_nodes = self.quorum_membership.total_nodes(); + let payload_commitment = + spawn_blocking(move || vid_commitment(&txns, num_nodes)).await; + #[cfg(async_executor_impl = "tokio")] + let payload_commitment = payload_commitment.unwrap(); + let encoded_transactions_hash = Sha256::digest(&proposal.data.encoded_transactions); // ED Is this the right leader? From 46df34f5d0c54ad0b2a12faf9e47ed26ed37596b Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 19 Mar 2024 11:33:16 -0400 Subject: [PATCH 0873/1393] [CX-CLEANUP] - DA Integrated Storage (#2799) * integrate new storage type * fix build, tie up stragglers, modifying tasks next * merge latest stable tabs * merge latest tags and fix method signatures * add vid_share storage back * add tests for failure cases, add support for DA task in view generator, fix old da test --- example-types/Cargo.toml | 2 + example-types/src/lib.rs | 3 + example-types/src/node_types.rs | 6 + example-types/src/storage_types.rs | 67 +++++++++++ examples/combined/types.rs | 3 +- examples/infra/mod.rs | 20 +++- examples/libp2p/types.rs | 3 +- examples/push-cdn/types.rs | 3 +- examples/webserver/types.rs | 3 +- hotshot/Cargo.toml | 2 +- hotshot/src/lib.rs | 10 +- hotshot/src/tasks/task_state.rs | 2 + hotshot/src/types/handle.rs | 11 ++ task-impls/src/consensus.rs | 13 +++ task-impls/src/da.rs | 13 +++ testing/src/spinning_task.rs | 11 +- testing/src/task_helpers.rs | 2 + testing/src/test_builder.rs | 2 + testing/src/test_launcher.rs | 3 + testing/src/test_runner.rs | 16 ++- testing/src/view_generator.rs | 66 ++++++++++- testing/tests/consensus_task.rs | 68 +++++++++-- testing/tests/da_task.rs | 180 ++++++++++++++++++----------- testing/tests/memory_network.rs | 2 + 24 files changed, 416 insertions(+), 95 deletions(-) create mode 100644 example-types/src/storage_types.rs diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index dc8197fc5e..0741d5ee2f 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -16,6 +16,8 @@ gpu-vid = [ [dependencies] async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } +async-trait = { workspace = true } +anyhow = { workspace = true } sha3 = "^0.10" bincode = { workspace = true } commit = { workspace = true } diff --git a/example-types/src/lib.rs b/example-types/src/lib.rs index 42610b84d7..4049099852 100644 --- a/example-types/src/lib.rs +++ b/example-types/src/lib.rs @@ -6,3 +6,6 @@ pub mod state_types; /// node types pub mod node_types; + +/// storage types for hotshot storage +pub mod storage_types; diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index cff49f8cab..ea0c86e6a4 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -5,6 +5,7 @@ use hotshot::traits::{ use crate::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::{TestInstanceState, TestValidatedState}, + storage_types::TestStorage, }; use hotshot::traits::{ @@ -103,24 +104,29 @@ type StaticCombinedQuorumComm = CombinedNetworks; impl NodeImplementation for PushCdnImpl { type QuorumNetwork = StaticPushCdnQuorumComm; type CommitteeNetwork = StaticPushCdnDAComm; + type Storage = TestStorage; } impl NodeImplementation for Libp2pImpl { type QuorumNetwork = StaticLibp2pQuorumComm; type CommitteeNetwork = StaticLibp2pDAComm; + type Storage = TestStorage; } impl NodeImplementation for MemoryImpl { type QuorumNetwork = StaticMemoryQuorumComm; type CommitteeNetwork = StaticMemoryDAComm; + type Storage = TestStorage; } impl NodeImplementation for WebImpl { type QuorumNetwork = StaticWebQuorumComm; type CommitteeNetwork = StaticWebDAComm; + type Storage = TestStorage; } impl NodeImplementation for CombinedImpl { type QuorumNetwork = StaticCombinedQuorumComm; type CommitteeNetwork = StaticCombinedDAComm; + type Storage = TestStorage; } diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs new file mode 100644 index 0000000000..2315a9b840 --- /dev/null +++ b/example-types/src/storage_types.rs @@ -0,0 +1,67 @@ +use anyhow::{bail, Result}; +use async_lock::RwLock; +use async_trait::async_trait; +use hotshot_types::{ + data::{DAProposal, VidDisperse}, + message::Proposal, + traits::{node_implementation::NodeType, storage::Storage}, +}; +use std::collections::HashMap; +use std::sync::Arc; + +#[derive(Clone, Debug)] +pub struct TestStorageState { + vids: HashMap>>, + das: HashMap>>, +} + +impl Default for TestStorageState { + fn default() -> Self { + Self { + vids: HashMap::new(), + das: HashMap::new(), + } + } +} + +#[derive(Clone, Debug)] +pub struct TestStorage { + inner: Arc>>, + + /// `should_return_err` is a testing utility to validate negative cases. + pub should_return_err: bool, +} + +impl Default for TestStorage { + fn default() -> Self { + Self { + inner: Arc::new(RwLock::new(TestStorageState::default())), + should_return_err: false, + } + } +} + +#[async_trait] +impl Storage for TestStorage { + async fn append_vid(&self, proposal: &Proposal>) -> Result<()> { + if self.should_return_err { + bail!("Failed to append VID proposal to storage"); + } + let mut inner = self.inner.write().await; + inner + .vids + .insert(proposal.data.view_number, proposal.clone()); + Ok(()) + } + + async fn append_da(&self, proposal: &Proposal>) -> Result<()> { + if self.should_return_err { + bail!("Failed to append VID proposal to storage"); + } + let mut inner = self.inner.write().await; + inner + .das + .insert(proposal.data.view_number, proposal.clone()); + Ok(()) + } +} diff --git a/examples/combined/types.rs b/examples/combined/types.rs index f11b159823..dbbc1b2e80 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -1,6 +1,6 @@ use crate::infra::CombinedDARun; use hotshot::traits::implementations::CombinedNetworks; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -21,6 +21,7 @@ pub type ViewSyncNetwork = CombinedNetworks; impl NodeImplementation for NodeImpl { type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; + type Storage = TestStorage; } /// convenience type alias pub type ThisRun = CombinedDARun; diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 1fd4c72b81..e0ec3eeaca 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -20,6 +20,7 @@ use hotshot::{ types::{SignatureKey, SystemContextHandle}, Memberships, Networks, SystemContext, }; +use hotshot_example_types::storage_types::TestStorage; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::TestInstanceState, @@ -429,7 +430,12 @@ pub trait RunDA< TYPES: NodeType, DANET: ConnectedNetwork, TYPES::SignatureKey>, QUORUMNET: ConnectedNetwork, TYPES::SignatureKey>, - NODE: NodeImplementation, + NODE: NodeImplementation< + TYPES, + QuorumNetwork = QUORUMNET, + CommitteeNetwork = DANET, + Storage = TestStorage, + >, > where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -506,6 +512,7 @@ pub trait RunDA< networks_bundle, initializer, ConsensusMetricsValue::default(), + TestStorage::::default(), ) .await .expect("Could not init hotshot") @@ -705,6 +712,7 @@ impl< TYPES, QuorumNetwork = WebServerNetwork, CommitteeNetwork = WebServerNetwork, + Storage = TestStorage, >, > RunDA, WebServerNetwork, NODE> for WebServerDARun where @@ -778,6 +786,7 @@ impl< TYPES, QuorumNetwork = PushCdnNetwork, CommitteeNetwork = PushCdnNetwork, + Storage = TestStorage, >, > RunDA, PushCdnNetwork, NODE> for PushCdnDaRun where @@ -860,6 +869,7 @@ impl< TYPES, QuorumNetwork = Libp2pNetwork, TYPES::SignatureKey>, CommitteeNetwork = Libp2pNetwork, TYPES::SignatureKey>, + Storage = TestStorage, >, > RunDA< @@ -929,6 +939,7 @@ impl< TYPES, QuorumNetwork = CombinedNetworks, CommitteeNetwork = CombinedNetworks, + Storage = TestStorage, >, > RunDA, CombinedNetworks, NODE> for CombinedDARun where @@ -1014,7 +1025,12 @@ pub async fn main_entry_point< >, DACHANNEL: ConnectedNetwork, TYPES::SignatureKey>, QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, - NODE: NodeImplementation, + NODE: NodeImplementation< + TYPES, + QuorumNetwork = QUORUMCHANNEL, + CommitteeNetwork = DACHANNEL, + Storage = TestStorage, + >, RUNDA: RunDA, >( args: ValidatorArgs, diff --git a/examples/libp2p/types.rs b/examples/libp2p/types.rs index 407dad4639..73daa27b1e 100644 --- a/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -1,6 +1,6 @@ use crate::infra::Libp2pDARun; use hotshot::traits::implementations::Libp2pNetwork; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; use hotshot_types::{ message::Message, traits::node_implementation::{NodeImplementation, NodeType}, @@ -20,6 +20,7 @@ pub type QuorumNetwork = Libp2pNetwork, for NodeImpl { type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; + type Storage = TestStorage; } /// convenience type alias pub type ThisRun = Libp2pDARun; diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs index d761894416..404b3f0b4b 100644 --- a/examples/push-cdn/types.rs +++ b/examples/push-cdn/types.rs @@ -1,6 +1,6 @@ use crate::infra::PushCdnDaRun; use hotshot::traits::implementations::PushCdnNetwork; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; @@ -20,6 +20,7 @@ pub type ViewSyncNetwork = PushCdnNetwork; impl NodeImplementation for NodeImpl { type CommitteeNetwork = DANetwork; type QuorumNetwork = QuorumNetwork; + type Storage = TestStorage; } /// Convenience type alias diff --git a/examples/webserver/types.rs b/examples/webserver/types.rs index ee34f38e19..9331fc4933 100644 --- a/examples/webserver/types.rs +++ b/examples/webserver/types.rs @@ -1,6 +1,6 @@ use crate::infra::WebServerDARun; use hotshot::traits::implementations::WebServerNetwork; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -21,6 +21,7 @@ pub type ViewSyncNetwork = WebServerNetwork; impl NodeImplementation for NodeImpl { type CommitteeNetwork = DANetwork; type QuorumNetwork = QuorumNetwork; + type Storage = TestStorage; } /// convenience type alias pub type ThisRun = WebServerDARun; diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index b63989d98c..79e402feae 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -54,7 +54,7 @@ snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } -anyhow = "1.0.81" +anyhow = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index c59665f552..5dcd148158 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -148,6 +148,9 @@ pub struct SystemContext> { /// uid for instrumentation pub id: u64, + + /// Reference to the internal storage for consensus datum. + pub storage: Arc>, } impl> SystemContext { @@ -156,7 +159,7 @@ impl> SystemContext { /// To do a full initialization, use `fn init` instead, which will set up background tasks as /// well. #[allow(clippy::too_many_arguments)] - #[instrument(skip(private_key, memberships, networks, initializer, metrics))] + #[instrument(skip(private_key, memberships, networks, initializer, metrics, storage))] pub async fn new( public_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -166,6 +169,7 @@ impl> SystemContext { networks: Networks, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, + storage: I::Storage, ) -> Result, HotShotError> { debug!("Creating a new hotshot"); @@ -248,6 +252,7 @@ impl> SystemContext { _metrics: consensus_metrics.clone(), internal_event_stream: (internal_tx, internal_rx.deactivate()), output_event_stream: (external_tx, external_rx.deactivate()), + storage: Arc::new(RwLock::new(storage)), }); Ok(inner) @@ -396,6 +401,7 @@ impl> SystemContext { networks: Networks, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, + storage: I::Storage, ) -> Result< ( SystemContextHandle, @@ -413,6 +419,7 @@ impl> SystemContext { networks, initializer, metrics, + storage, ) .await?; let handle = hotshot.clone().run_tasks().await; @@ -459,6 +466,7 @@ impl> SystemContext { output_event_stream: output_event_stream.clone(), internal_event_stream: internal_event_stream.clone(), hotshot: self.clone().into(), + storage: self.storage.clone(), }; add_network_message_task(registry.clone(), event_tx.clone(), quorum_network.clone()).await; diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index e36efc5d16..d950cb4414 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -87,6 +87,7 @@ impl> CreateTaskState public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, + storage: handle.storage.clone(), } } } @@ -180,6 +181,7 @@ impl> CreateTaskState timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), committee_membership: handle.hotshot.memberships.da_membership.clone().into(), + storage: handle.storage.clone(), } } } diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 5c19c38393..13dba0e411 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -27,6 +27,7 @@ pub struct SystemContextHandle> { /// The Channel will output all the events. Subscribers will get an activated /// clone of the `Receiver` when they get output stream. pub(crate) output_event_stream: (Sender>, InactiveReceiver>), + /// access to the internal event stream, in case we need to, say, shut something down #[allow(clippy::type_complexity)] pub(crate) internal_event_stream: ( @@ -38,6 +39,9 @@ pub struct SystemContextHandle> { /// Internal reference to the underlying [`SystemContext`] pub hotshot: Arc>, + + /// Reference to the internal storage for consensus datum. + pub(crate) storage: Arc>, } impl + 'static> SystemContextHandle { @@ -170,4 +174,11 @@ impl + 'static> SystemContextHandl pub async fn get_cur_view(&self) -> TYPES::Time { self.hotshot.consensus.read().await.cur_view } + + /// Provides a reference to the underlying storage for this [`SystemContext`], allowing access to + /// historical data + #[must_use] + pub fn get_storage(&self) -> Arc> { + self.storage.clone() + } } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index cfb47116e6..a76e48c42c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -31,6 +31,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, states::ValidatedState, + storage::Storage, BlockPayload, }, utils::{Terminator, ViewInner}, @@ -137,6 +138,9 @@ pub struct ConsensusTaskState< // ED Should replace this with config information since we need it anyway /// The node's id pub id: u64, + + /// This node's storage ref + pub storage: Arc>, } impl, A: ConsensusApi + 'static> @@ -1063,11 +1067,20 @@ impl, A: ConsensusApi + .await; // Add to the storage that we have received the VID disperse for a specific view + if let Err(e) = self.storage.write().await.append_vid(disperse).await { + error!( + "Failed to store VID Disperse Proposal with error {:?}, aborting vote", + e + ); + return; + } + self.consensus .write() .await .vid_shares .insert(view, disperse.clone()); + if self.vote_if_able(&event_stream).await { self.current_proposal = None; } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 231f037164..ba63ec81e5 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -23,6 +23,7 @@ use hotshot_types::{ network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, + storage::Storage, }, utils::ViewInner, vote::HasViewNumber, @@ -75,6 +76,9 @@ pub struct DATaskState< /// This state's ID pub id: u64, + + /// This node's storage ref + pub storage: Arc>, } impl, A: ConsensusApi + 'static> @@ -155,6 +159,15 @@ impl, A: ConsensusApi + ); return None; } + + if let Err(e) = self.storage.write().await.append_da(proposal).await { + error!( + "Failed to store DA Proposal with error {:?}, aborting vote", + e + ); + return None; + } + // Generate and send vote let Ok(vote) = DAVote::create_signed_vote( DAData { diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 67570ebba9..3268c145f4 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -5,6 +5,7 @@ use crate::test_runner::{LateStartNode, Node, TestRunner}; use either::{Left, Right}; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer}; use hotshot_example_types::state_types::TestInstanceState; +use hotshot_example_types::storage_types::TestStorage; use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::{data::Leaf, ValidatorConfig}; use hotshot_types::{ @@ -64,7 +65,12 @@ impl< > TestTaskState for SpinningTask where I: TestableNodeImplementation, - I: NodeImplementation, + I: NodeImplementation< + TYPES, + QuorumNetwork = N, + CommitteeNetwork = N, + Storage = TestStorage, + >, { type Message = Event; @@ -99,7 +105,7 @@ where Left(context) => context, // Node not initialized. Initialize it // based on the received leaf. - Right((memberships, config)) => { + Right((storage, memberships, config)) => { let initializer = HotShotInitializer::::from_reload( state.last_decided_leaf.clone(), TestInstanceState {}, @@ -118,6 +124,7 @@ where initializer, config, validator_config, + storage, ) .await } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 4ce41423e6..b67e68e36f 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -63,6 +63,7 @@ pub async fn build_system_handle( let launcher = builder.gen_launcher::(node_id); let networks = (launcher.resource_generator.channel_generator)(node_id); + let storage = (launcher.resource_generator.storage)(node_id); let config = launcher.resource_generator.config.clone(); let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}).unwrap(); @@ -120,6 +121,7 @@ pub async fn build_system_handle( networks_bundle, initializer, ConsensusMetricsValue::default(), + storage, ) .await .expect("Could not init hotshot") diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 1d1342911a..9481ec7398 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,4 +1,5 @@ use hotshot::traits::NetworkReliability; +use hotshot_example_types::storage_types::TestStorage; use hotshot_orchestrator::config::ValidatorConfigFile; use hotshot_types::traits::election::Membership; use std::{num::NonZeroUsize, sync::Arc, time::Duration}; @@ -329,6 +330,7 @@ impl TestMetadata { unreliable_network, secondary_network_delay, ), + storage: Box::new(|_| TestStorage::::default()), config, }, metadata: self, diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index eb35ce7472..268649bd10 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -1,6 +1,7 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; +use hotshot_example_types::storage_types::TestStorage; use hotshot_types::{ message::Message, traits::{network::ConnectedNetwork, node_implementation::NodeType}, @@ -22,6 +23,8 @@ pub type Generator = Box T + 'static>; pub struct ResourceGenerators> { /// generate channels pub channel_generator: Generator>, + /// generate new storage for each node + pub storage: Generator>, /// configuration used to generate each hotshot node pub config: HotShotConfig, } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 74354f6081..9153dabf0e 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -15,7 +15,7 @@ use async_broadcast::broadcast; use either::Either::{self, Left, Right}; use futures::future::join_all; use hotshot::{types::SystemContextHandle, Memberships}; -use hotshot_example_types::state_types::TestInstanceState; +use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; @@ -58,6 +58,7 @@ pub struct Node> { pub type LateNodeContext = Either< Arc>, ( + >::Storage, Memberships, HotShotConfig<::SignatureKey, ::ElectionConfigType>, ), @@ -119,7 +120,12 @@ impl< > TestRunner where I: TestableNodeImplementation, - I: NodeImplementation, + I: NodeImplementation< + TYPES, + QuorumNetwork = N, + CommitteeNetwork = N, + Storage = TestStorage, + >, { /// excecute test /// @@ -357,13 +363,14 @@ where ), }; let networks = (self.launcher.resource_generator.channel_generator)(node_id); + let storage = (self.launcher.resource_generator.storage)(node_id); if self.launcher.metadata.skip_late && late_start.contains(&node_id) { self.late_start.insert( node_id, LateStartNode { networks, - context: Right((memberships, config)), + context: Right((storage, memberships, config)), }, ); } else { @@ -379,6 +386,7 @@ where initializer, config, validator_config, + storage, ) .await; if late_start.contains(&node_id) { @@ -413,6 +421,7 @@ where initializer: HotShotInitializer, config: HotShotConfig, validator_config: ValidatorConfig, + storage: I::Storage, ) -> Arc> { // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); @@ -433,6 +442,7 @@ where network_bundle, initializer, ConsensusMetricsValue::default(), + storage, ) .await .expect("Could not init hotshot") diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 25bdcba2c2..7699e2bd88 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -5,6 +5,7 @@ use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes}, state_types::TestInstanceState, }; +use sha2::{Digest, Sha256}; use crate::task_helpers::{ build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, @@ -14,15 +15,15 @@ use commit::Committable; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_types::{ - data::{Leaf, QuorumProposal, VidDisperse, ViewNumber}, + data::{DAProposal, Leaf, QuorumProposal, VidDisperse, ViewNumber}, message::Proposal, simple_certificate::{ DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, }, simple_vote::{ - TimeoutData, TimeoutVote, UpgradeProposalData, UpgradeVote, ViewSyncFinalizeData, - ViewSyncFinalizeVote, + DAData, DAVote, TimeoutData, TimeoutVote, UpgradeProposalData, UpgradeVote, + ViewSyncFinalizeData, ViewSyncFinalizeVote, }, traits::{ consensus_api::ConsensusApi, @@ -35,6 +36,7 @@ use hotshot_types::simple_vote::QuorumVote; #[derive(Clone)] pub struct TestView { + pub da_proposal: Proposal>, pub quorum_proposal: Proposal>, pub leaf: Leaf, pub view_number: ViewNumber, @@ -84,7 +86,7 @@ impl TestView { payload_commitment, }; - let proposal = QuorumProposal:: { + let quorum_proposal_inner = QuorumProposal:: { block_header: block_header.clone(), view_number: genesis_view, justify_qc: QuorumCertificate::genesis(), @@ -94,6 +96,25 @@ impl TestView { proposer_id: public_key, }; + let transactions = vec![TestTransaction(vec![0])]; + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let encoded_transactions_hash = Sha256::digest(&encoded_transactions); + let block_payload_signature = + ::SignatureKey::sign(&private_key, &encoded_transactions_hash) + .expect("Failed to sign block payload"); + + let da_proposal_inner = DAProposal:: { + encoded_transactions: encoded_transactions.clone(), + metadata: (), + view_number: genesis_view, + }; + + let da_proposal = Proposal { + data: da_proposal_inner, + signature: block_payload_signature, + _pd: PhantomData, + }; + let leaf = Leaf { view_number: genesis_view, justify_qc: QuorumCertificate::genesis(), @@ -111,7 +132,7 @@ impl TestView { .expect("Failed to sign leaf commitment!"); let quorum_proposal = Proposal { - data: proposal, + data: quorum_proposal_inner, signature, _pd: PhantomData, }; @@ -128,6 +149,7 @@ impl TestView { upgrade_data: None, view_sync_finalize_data: None, timeout_cert_data: None, + da_proposal, } } @@ -282,6 +304,25 @@ impl TestView { _pd: PhantomData, }; + let transactions = vec![TestTransaction(vec![0])]; + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let encoded_transactions_hash = Sha256::digest(&encoded_transactions); + let block_payload_signature = + ::SignatureKey::sign(&private_key, &encoded_transactions_hash) + .expect("Failed to sign block payload"); + + let da_proposal_inner = DAProposal:: { + encoded_transactions: encoded_transactions.clone(), + metadata: (), + view_number: next_view, + }; + + let da_proposal = Proposal { + data: da_proposal_inner, + signature: block_payload_signature, + _pd: PhantomData, + }; + TestView { quorum_proposal, leaf, @@ -296,6 +337,7 @@ impl TestView { upgrade_data: None, view_sync_finalize_data: None, timeout_cert_data: None, + da_proposal, } } @@ -331,6 +373,20 @@ impl TestView { ) .expect("Failed to generate a signature on UpgradVote") } + + pub fn create_da_vote( + &self, + data: DAData, + handle: &SystemContextHandle, + ) -> DAVote { + DAVote::create_signed_vote( + data, + self.view_number, + handle.public_key(), + handle.private_key(), + ) + .expect("Failed to sign DAData") + } } pub struct TestViewGenerator { diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 4253222531..68f917326a 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -1,4 +1,3 @@ -#![allow(clippy::panic)] use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; @@ -6,9 +5,9 @@ use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*} use hotshot_testing::task_helpers::key_pair_for_id; use hotshot_testing::test_helpers::permute_input_with_index_order; use hotshot_testing::{ - predicates::{exact, is_at_view_number, quorum_vote_send}, + predicates::{exact, is_at_view_number, quorum_proposal_send, quorum_vote_send}, script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, + task_helpers::{build_system_handle, vid_scheme_from_view_number}, view_generator::TestViewGenerator, }; use hotshot_types::simple_vote::ViewSyncFinalizeData; @@ -19,15 +18,6 @@ use jf_primitives::vid::VidScheme; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { - use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; - use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; - use hotshot_testing::{ - predicates::{exact, is_at_view_number, quorum_proposal_send}, - script::{run_test_script, TestScriptStage}, - task_helpers::{build_system_handle, vid_scheme_from_view_number}, - view_generator::TestViewGenerator, - }; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -596,3 +586,57 @@ async fn test_view_sync_finalize_vote_fail_view_number() { inject_consensus_polls(&consensus_state).await; run_test_script(stages, consensus_state).await; } + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_vid_disperse_storage_failure() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + + // Set the error flag here for the system handle. This causes it to emit an error on append. + handle.get_storage().write().await.should_return_err = true; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } + + // Run view 1 (the genesis stage). + let view_1 = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DACRecv(dacs[0].clone()), + VidDisperseRecv(vids[0].0.clone()), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(1))), + exact(QuorumProposalValidated(proposals[0].data.clone())), + /* Does not vote */ + ], + asserts: vec![is_at_view_number(1)], + }; + + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + + inject_consensus_polls(&consensus_state).await; + + run_test_script(vec![view_1], consensus_state).await; +} diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 7d10503d2b..d3b3f8de16 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -1,90 +1,140 @@ use hotshot::tasks::task_state::CreateTaskState; -use hotshot::types::SignatureKey; use hotshot::types::SystemContextHandle; -use hotshot_example_types::node_types::MemoryImpl; -use hotshot_example_types::{block_types::TestTransaction, node_types::TestTypes}; -use hotshot_task_impls::{da::DATaskState, events::HotShotEvent}; +use hotshot_example_types::{ + block_types::TestTransaction, + node_types::{MemoryImpl, TestTypes}, +}; +use hotshot_task_impls::da::DATaskState; +use hotshot_task_impls::events::HotShotEvent::*; +use hotshot_testing::{ + predicates::exact, + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, + view_generator::TestViewGenerator, +}; use hotshot_types::{ - data::{DAProposal, ViewNumber}, - simple_vote::{DAData, DAVote}, + data::ViewNumber, + simple_vote::DAData, traits::{ - block_contents::vid_commitment, - consensus_api::ConsensusApi, - election::Membership, - node_implementation::{ConsensusTime, NodeType}, + block_contents::vid_commitment, election::Membership, node_implementation::ConsensusTime, }, }; -use sha2::{Digest, Sha256}; -use std::{collections::HashMap, marker::PhantomData}; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_da_task() { - use hotshot_task_impls::harness::run_harness; - use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::message::Proposal; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - // Build the API for node 2. let handle = build_system_handle(2).await.0; - let pub_key = *handle.public_key(); + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + // Make some empty encoded transactions, we just care about having a commitment handy for the + // later calls. We need the VID commitment to be able to propose later. let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); - let payload_commitment = vid_commitment( + let payload_commit = vid_commitment( &encoded_transactions, handle.hotshot.memberships.quorum_membership.total_nodes(), ); - let encoded_transactions_hash = Sha256::digest(&encoded_transactions); - - let signature = ::SignatureKey::sign( - handle.private_key(), - &encoded_transactions_hash, - ) - .expect("Failed to sign block payload"); - let proposal = DAProposal { - encoded_transactions: encoded_transactions.clone(), - metadata: (), - view_number: ViewNumber::new(2), + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(2) { + proposals.push(view.da_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_da_vote(DAData { payload_commit }, &handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } + + // Run view 1 (the genesis stage). + let view_1 = TestScriptStage { + inputs: vec![ + ViewChange(ViewNumber::new(1)), + ViewChange(ViewNumber::new(2)), + TransactionsSequenced(encoded_transactions.clone(), (), ViewNumber::new(2)), + ], + outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], + asserts: vec![], + }; + + // Run view 2 and propose. + let view_2 = TestScriptStage { + inputs: vec![DAProposalRecv(proposals[1].clone(), leaders[1])], + outputs: vec![exact(DAVoteSend(votes[1].clone()))], + asserts: vec![], }; - let message = Proposal { - data: proposal, - signature, - _pd: PhantomData, + + let da_state = DATaskState::>::create_from(&handle).await; + let stages = vec![view_1, view_2]; + + run_test_script(stages, da_state).await; +} + +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_da_task_storage_failure() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + + // Set the error flag here for the system handle. This causes it to emit an error on append. + handle.get_storage().write().await.should_return_err = true; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + // Make some empty encoded transactions, we just care about having a commitment handy for the + // later calls. We need the VID commitment to be able to propose later. + let transactions = vec![TestTransaction(vec![0])]; + let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let payload_commit = vid_commitment( + &encoded_transactions, + handle.hotshot.memberships.quorum_membership.total_nodes(), + ); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(2) { + proposals.push(view.da_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_da_vote(DAData { payload_commit }, &handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } + + // Run view 1 (the genesis stage). + let view_1 = TestScriptStage { + inputs: vec![ + ViewChange(ViewNumber::new(1)), + ViewChange(ViewNumber::new(2)), + TransactionsSequenced(encoded_transactions.clone(), (), ViewNumber::new(2)), + ], + outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], + asserts: vec![], }; - // TODO for now reuse the same block payload commitment and signature as DA committee - // https://github.com/EspressoSystems/jellyfish/issues/369 - - // Every event input is seen on the event stream in the output. - let mut input = Vec::new(); - let mut output = HashMap::new(); - - // In view 1, node 2 is the next leader. - input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - input.push(HotShotEvent::TransactionsSequenced( - encoded_transactions.clone(), - (), - ViewNumber::new(2), - )); - input.push(HotShotEvent::DAProposalRecv(message.clone(), pub_key)); - - input.push(HotShotEvent::Shutdown); - - output.insert(HotShotEvent::DAProposalSend(message.clone(), pub_key), 1); - let da_vote = DAVote::create_signed_vote( - DAData { - payload_commit: payload_commitment, - }, - ViewNumber::new(2), - handle.public_key(), - handle.private_key(), - ) - .expect("Failed to sign DAData"); - output.insert(HotShotEvent::DAVoteSend(da_vote), 1); + // Run view 2 and propose. + let view_2 = TestScriptStage { + inputs: vec![DAProposalRecv(proposals[1].clone(), leaders[1])], + outputs: vec![ + /* No vote was sent due to the storage failure */ + ], + asserts: vec![], + }; let da_state = DATaskState::>::create_from(&handle).await; - run_harness(input, output, da_state, false).await; + let stages = vec![view_1, view_2]; + + run_test_script(stages, da_state).await; } diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 086cacaa91..0c9066e9b9 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -8,6 +8,7 @@ use hotshot::traits::implementations::{MasterMap, MemoryNetwork, NetworkingMetri use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; use hotshot_example_types::state_types::TestInstanceState; +use hotshot_example_types::storage_types::TestStorage; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::TestValidatedState, @@ -67,6 +68,7 @@ pub type VIDNetwork = MemoryNetwork, ::Signature impl NodeImplementation for TestImpl { type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; + type Storage = TestStorage; } /// fake Eq From 7512b3afe67d1c69b3f5b04de773d398226ab86f Mon Sep 17 00:00:00 2001 From: Nathan F Yospe Date: Tue, 19 Mar 2024 14:31:53 -0400 Subject: [PATCH 0874/1393] Adding vbs support (#2666) * Using tide-disco and surf-disco with static versions and versioned binary serialization * [de]seriaize with vbs instead of bincode --------- Co-authored-by: Jarred Parr --- example-types/Cargo.toml | 12 +- example-types/src/node_types.rs | 11 +- examples/Cargo.toml | 29 +- examples/combined/all.rs | 6 + examples/combined/types.rs | 11 +- examples/infra/mod.rs | 80 ++-- examples/webserver/all.rs | 6 + examples/webserver/multi-webserver.rs | 21 +- examples/webserver/types.rs | 11 +- examples/webserver/webserver.rs | 9 +- hotshot-qc/Cargo.toml | 1 - hotshot-qc/src/bit_vector.rs | 11 +- hotshot-qc/src/bit_vector_old.rs | 10 +- hotshot/Cargo.toml | 38 +- hotshot/src/lib.rs | 9 +- .../src/traits/networking/combined_network.rs | 81 ++-- .../src/traits/networking/libp2p_network.rs | 168 +++++---- .../src/traits/networking/memory_network.rs | 30 +- .../src/traits/networking/push_cdn_network.rs | 79 ++-- .../traits/networking/web_server_network.rs | 348 ++++++++---------- libp2p-networking/Cargo.toml | 10 +- libp2p-networking/src/network/mod.rs | 18 - libp2p-networking/src/network/node/handle.rs | 71 ++-- libp2p-networking/tests/counter.rs | 35 +- orchestrator/Cargo.toml | 6 +- orchestrator/src/client.rs | 20 +- orchestrator/src/config.rs | 7 +- orchestrator/src/lib.rs | 27 +- task-impls/Cargo.toml | 32 +- task-impls/src/builder.rs | 7 +- task-impls/src/consensus.rs | 2 +- task-impls/src/network.rs | 16 +- task-impls/src/request.rs | 68 +++- task-impls/src/response.rs | 6 +- task-impls/src/transactions.rs | 2 +- task/Cargo.toml | 17 +- testing-macros/Cargo.toml | 4 +- testing/Cargo.toml | 34 +- testing/src/block_builder.rs | 24 +- testing/src/test_builder.rs | 2 +- testing/tests/block_builder.rs | 4 +- testing/tests/memory_network.rs | 17 +- testing/tests/unit.rs | 1 - testing/tests/unit/message.rs | 28 +- testing/tests/unit/version.rs | 33 -- testing/tests/upgrade_task.rs | 2 +- utils/Cargo.toml | 14 - utils/src/bincode.rs | 28 -- utils/src/lib.rs | 7 - utils/src/version.rs | 16 - web_server/Cargo.toml | 5 +- web_server/src/lib.rs | 23 +- 52 files changed, 827 insertions(+), 730 deletions(-) delete mode 100644 testing/tests/unit/version.rs delete mode 100644 utils/Cargo.toml delete mode 100644 utils/src/bincode.rs delete mode 100644 utils/src/lib.rs delete mode 100644 utils/src/version.rs diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index 0741d5ee2f..f86e7c67b7 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "hotshot-example-types" -version = "0.1.0" -edition = "2021" +version = { workspace = true } +edition = { workspace = true } description = "Types and traits for the HotShot consesus module" -authors = ["Espresso Systems "] +authors = { workspace = true } [features] default = [] @@ -19,15 +19,13 @@ async-compatibility-layer = { workspace = true } async-trait = { workspace = true } anyhow = { workspace = true } sha3 = "^0.10" -bincode = { workspace = true } commit = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot" } hotshot-types = { workspace = true } -hotshot-utils = { path = "../utils" } -hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } +hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } rand = { workspace = true } snafu = { workspace = true } tracing = { workspace = true } diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index ea0c86e6a4..4643398e94 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -13,6 +13,9 @@ use hotshot::traits::{ implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, WebServerNetwork}, NodeImplementation, }; + +use hotshot_types::constants::WebServerVersion; + use hotshot_types::{ data::ViewNumber, message::Message, signature_key::BLSPubKey, traits::node_implementation::NodeType, @@ -82,10 +85,10 @@ pub type StaticMemoryDAComm = type StaticLibp2pDAComm = Libp2pNetwork, ::SignatureKey>; /// web server network communication channel -type StaticWebDAComm = WebServerNetwork; +type StaticWebDAComm = WebServerNetwork; /// combined network -type StaticCombinedDAComm = CombinedNetworks; +type StaticCombinedDAComm = CombinedNetworks; /// memory comm channel pub type StaticMemoryQuorumComm = @@ -96,10 +99,10 @@ type StaticLibp2pQuorumComm = Libp2pNetwork, ::SignatureKey>; /// web server comm channel -type StaticWebQuorumComm = WebServerNetwork; +type StaticWebQuorumComm = WebServerNetwork; /// combined network (libp2p + web server) -type StaticCombinedQuorumComm = CombinedNetworks; +type StaticCombinedQuorumComm = CombinedNetworks; impl NodeImplementation for PushCdnImpl { type QuorumNetwork = StaticPushCdnQuorumComm; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 1696a81a30..de083e5c3b 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -1,10 +1,10 @@ [package] -authors = ["Espresso Systems "] +authors = { workspace = true } description = "HotShot Examples and binaries" -edition = "2021" +edition = { workspace = true } name = "hotshot-examples" readme = "README.md" -version = "0.3.3" +version = { workspace = true } rust-version = "1.65.0" [features] @@ -96,7 +96,6 @@ async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6.3" -bincode = { workspace = true } clap = { version = "4.5", features = ["derive", "env"], optional = true } commit = { workspace = true } custom_debug = { workspace = true } @@ -104,11 +103,10 @@ dashmap = "5.5.1" either = { workspace = true } embed-doc-image = "0.1.4" futures = { workspace = true } -hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } -hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } +hotshot-web-server = { version = "0.5.26", path = "../web_server", default-features = false } +hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } hotshot-types = { workspace = true } -hotshot-utils = { path = "../utils" } -hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } rand = { workspace = true } @@ -123,19 +121,30 @@ hotshot-task = { path = "../task" } hotshot = { path = "../hotshot" } hotshot-example-types = { path = "../example-types" } chrono = "0.4" +versioned-binary-serialization = { workspace = true } tracing = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } cdn-client = { workspace = true, features = ["insecure", "runtime-tokio"] } -cdn-broker = { workspace = true, features = ["insecure", "runtime-tokio", "strong_consistency", "local_discovery"] } +cdn-broker = { workspace = true, features = [ + "insecure", + "runtime-tokio", + "strong_consistency", + "local_discovery", +] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } cdn-client = { workspace = true, features = ["insecure", "runtime-async-std"] } -cdn-broker = { workspace = true, features = ["insecure", "runtime-async-std", "strong_consistency", "local_discovery"] } +cdn-broker = { workspace = true, features = [ + "insecure", + "runtime-async-std", + "strong_consistency", + "local_discovery", +] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-async-std"] } [dev-dependencies] diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 5cce2378d3..edcada002f 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -14,10 +14,12 @@ use async_compatibility_layer::channel::oneshot; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_types::constants::WebServerVersion; use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; use surf_disco::Url; use tracing::{error, instrument}; +use versioned_binary_serialization::version::StaticVersionType; /// general infra used for this example #[path = "../infra/mod.rs"] @@ -41,9 +43,11 @@ async fn main() { async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, + WebServerVersion, >( Some(server_shutdown_cdn), Url::parse("http://localhost:9000").unwrap(), + WebServerVersion::instance(), ) .await { @@ -53,9 +57,11 @@ async fn main() { async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, + WebServerVersion, >( Some(server_shutdown_da), Url::parse("http://localhost:9001").unwrap(), + WebServerVersion::instance(), ) .await { diff --git a/examples/combined/types.rs b/examples/combined/types.rs index dbbc1b2e80..fb4397a2e3 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -1,6 +1,7 @@ use crate::infra::CombinedDARun; use hotshot::traits::implementations::CombinedNetworks; use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; +use hotshot_types::constants::WebServerVersion; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -10,13 +11,13 @@ use std::fmt::Debug; pub struct NodeImpl {} /// convenience type alias -pub type DANetwork = CombinedNetworks; +pub type DANetwork = CombinedNetworks; /// convenience type alias -pub type VIDNetwork = CombinedNetworks; +pub type VIDNetwork = CombinedNetworks; /// convenience type alias -pub type QuorumNetwork = CombinedNetworks; +pub type QuorumNetwork = CombinedNetworks; /// convenience type alias -pub type ViewSyncNetwork = CombinedNetworks; +pub type ViewSyncNetwork = CombinedNetworks; impl NodeImplementation for NodeImpl { type QuorumNetwork = QuorumNetwork; @@ -24,4 +25,4 @@ impl NodeImplementation for NodeImpl { type Storage = TestStorage; } /// convenience type alias -pub type ThisRun = CombinedDARun; +pub type ThisRun = CombinedDARun; diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index e0ec3eeaca..ce8f461ed1 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -5,6 +5,7 @@ use async_lock::RwLock; use async_trait::async_trait; use cdn_broker::reexports::crypto::signature::KeyPair; use cdn_broker::reexports::message::Topic; +use chrono::Utc; use clap::Parser; use clap::{Arg, Command}; use futures::StreamExt; @@ -31,22 +32,21 @@ use hotshot_orchestrator::{ client::{BenchResults, OrchestratorClient, ValidatorArgs}, config::{CombinedNetworkConfig, NetworkConfig, NetworkConfigFile, WebServerConfig}, }; -use hotshot_types::message::Message; -use hotshot_types::traits::network::ConnectedNetwork; -use hotshot_types::PeerConfig; -use hotshot_types::ValidatorConfig; use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, TestableLeaf}, event::{Event, EventType}, + message::Message, traits::{ block_contents::{BlockHeader, TestableBlock}, election::Membership, + network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeType}, states::TestableState, }, - HotShotConfig, + HotShotConfig, PeerConfig, ValidatorConfig, }; +use libp2p_identity::PeerId; use libp2p_identity::{ ed25519::{self, SecretKey}, Keypair, @@ -57,17 +57,15 @@ use libp2p_networking::{ }; use rand::rngs::StdRng; use rand::SeedableRng; +use std::fmt::Debug; use std::marker::PhantomData; use std::time::Duration; use std::{collections::BTreeSet, sync::Arc}; +use std::{fs, time::Instant}; use std::{num::NonZeroUsize, str::FromStr}; use surf_disco::Url; - -use chrono::Utc; -use libp2p_identity::PeerId; -use std::fmt::Debug; -use std::{fs, time::Instant}; use tracing::{error, info, warn}; +use versioned_binary_serialization::version::StaticVersionType; #[derive(Debug, Clone)] /// Arguments passed to the orchestrator @@ -287,10 +285,10 @@ fn calculate_num_tx_per_round( /// create a web server network from a config file + public key /// # Panics /// Panics if the web server config doesn't exist in `config` -fn webserver_network_from_config( +fn webserver_network_from_config( config: NetworkConfig, pub_key: TYPES::SignatureKey, -) -> WebServerNetwork { +) -> WebServerNetwork { // Get the configuration for the web server let WebServerConfig { url, @@ -691,13 +689,13 @@ pub trait RunDA< // WEB SERVER /// Represents a web server-based run -pub struct WebServerDARun { +pub struct WebServerDARun { /// the network configuration config: NetworkConfig, /// quorum channel - quorum_channel: WebServerNetwork, + quorum_channel: WebServerNetwork, /// data availability channel - da_channel: WebServerNetwork, + da_channel: WebServerNetwork, } #[async_trait] @@ -710,20 +708,28 @@ impl< >, NODE: NodeImplementation< TYPES, - QuorumNetwork = WebServerNetwork, - CommitteeNetwork = WebServerNetwork, + QuorumNetwork = WebServerNetwork, + CommitteeNetwork = WebServerNetwork, Storage = TestStorage, >, - > RunDA, WebServerNetwork, NODE> for WebServerDARun + NetworkVersion: StaticVersionType, + > + RunDA< + TYPES, + WebServerNetwork, + WebServerNetwork, + NODE, + > for WebServerDARun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, + NetworkVersion: 'static, { async fn initialize_networking( config: NetworkConfig, - ) -> WebServerDARun { + ) -> WebServerDARun { // Get our own key let pub_key = config.config.my_own_validator_config.public_key.clone(); @@ -735,11 +741,11 @@ where // create and wait for underlying network let underlying_quorum_network = - webserver_network_from_config::(config.clone(), pub_key.clone()); + webserver_network_from_config::(config.clone(), pub_key.clone()); underlying_quorum_network.wait_for_ready().await; - let da_channel: WebServerNetwork = + let da_channel: WebServerNetwork = WebServerNetwork::create(url.clone(), wait_between_polls, pub_key.clone(), true); WebServerDARun { @@ -749,11 +755,11 @@ where } } - fn get_da_channel(&self) -> WebServerNetwork { + fn get_da_channel(&self) -> WebServerNetwork { self.da_channel.clone() } - fn get_quorum_channel(&self) -> WebServerNetwork { + fn get_quorum_channel(&self) -> WebServerNetwork { self.quorum_channel.clone() } @@ -918,13 +924,13 @@ where // Combined network /// Represents a combined-network-based run -pub struct CombinedDARun { +pub struct CombinedDARun { /// the network configuration config: NetworkConfig, /// quorum channel - quorum_channel: CombinedNetworks, + quorum_channel: CombinedNetworks, /// data availability channel - da_channel: CombinedNetworks, + da_channel: CombinedNetworks, } #[async_trait] @@ -937,20 +943,28 @@ impl< >, NODE: NodeImplementation< TYPES, - QuorumNetwork = CombinedNetworks, - CommitteeNetwork = CombinedNetworks, + QuorumNetwork = CombinedNetworks, + CommitteeNetwork = CombinedNetworks, Storage = TestStorage, >, - > RunDA, CombinedNetworks, NODE> for CombinedDARun + NetworkVersion: StaticVersionType, + > + RunDA< + TYPES, + CombinedNetworks, + CombinedNetworks, + NODE, + > for CombinedDARun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, + NetworkVersion: 'static, { async fn initialize_networking( config: NetworkConfig, - ) -> CombinedDARun { + ) -> CombinedDARun { // Get our own key let pub_key = config.config.my_own_validator_config.public_key.clone(); @@ -971,7 +985,7 @@ where // Create and wait for underlying webserver network let web_quorum_network = - webserver_network_from_config::(config.clone(), pub_key.clone()); + webserver_network_from_config::(config.clone(), pub_key.clone()); let web_da_network = WebServerNetwork::create(url, wait_between_polls, pub_key, true); @@ -1000,11 +1014,11 @@ where } } - fn get_da_channel(&self) -> CombinedNetworks { + fn get_da_channel(&self) -> CombinedNetworks { self.da_channel.clone() } - fn get_quorum_channel(&self) -> CombinedNetworks { + fn get_quorum_channel(&self) -> CombinedNetworks { self.quorum_channel.clone() } diff --git a/examples/webserver/all.rs b/examples/webserver/all.rs index 140e514124..ecdc445a50 100644 --- a/examples/webserver/all.rs +++ b/examples/webserver/all.rs @@ -19,8 +19,10 @@ pub mod infra; use async_compatibility_layer::{art::async_spawn, channel::oneshot}; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_types::constants::WebServerVersion; use surf_disco::Url; use tracing::error; +use versioned_binary_serialization::version::StaticVersionType; #[cfg_attr(async_executor_impl = "tokio", tokio::main)] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] @@ -40,9 +42,11 @@ async fn main() { async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, + WebServerVersion, >( Some(server_shutdown_cdn), Url::parse("http://localhost:9000").unwrap(), + WebServerVersion::instance(), ) .await { @@ -52,9 +56,11 @@ async fn main() { async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< ::SignatureKey, + WebServerVersion, >( Some(server_shutdown_da), Url::parse("http://localhost:9001").unwrap(), + WebServerVersion::instance(), ) .await { diff --git a/examples/webserver/multi-webserver.rs b/examples/webserver/multi-webserver.rs index 630ebe4c3d..edc5ba586e 100644 --- a/examples/webserver/multi-webserver.rs +++ b/examples/webserver/multi-webserver.rs @@ -8,8 +8,11 @@ use async_compatibility_layer::{ }; use clap::Parser; use hotshot_example_types::state_types::TestTypes; +use hotshot_types::constants::WebServerVersion; +use hotshot_types::traits::node_implementation::NodeType; use surf_disco::Url; use tracing::error; +use versioned_binary_serialization::version::StaticVersionType; /// Arguments to run multiple web servers #[derive(Parser, Debug)] @@ -34,8 +37,13 @@ async fn main() { let consensus_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, - >(Some(server_shutdown_cdn), args.consensus_url) + ::SignatureKey, + WebServerVersion, + >( + Some(server_shutdown_cdn), + args.consensus_url, + WebServerVersion::instance(), + ) .await { error!("Problem starting cdn web server: {:?}", e); @@ -43,8 +51,13 @@ async fn main() { }); let da_server = async_spawn(async move { if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, - >(Some(server_shutdown_da), args.da_url) + ::SignatureKey, + WebServerVersion, + >( + Some(server_shutdown_da), + args.da_url, + WebServerVersion::instance(), + ) .await { error!("Problem starting da web server: {:?}", e); diff --git a/examples/webserver/types.rs b/examples/webserver/types.rs index 9331fc4933..34ce2fa9a1 100644 --- a/examples/webserver/types.rs +++ b/examples/webserver/types.rs @@ -1,6 +1,7 @@ use crate::infra::WebServerDARun; use hotshot::traits::implementations::WebServerNetwork; use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; +use hotshot_types::constants::WebServerVersion; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -10,13 +11,13 @@ use std::fmt::Debug; pub struct NodeImpl {} /// convenience type alias -pub type DANetwork = WebServerNetwork; +pub type DANetwork = WebServerNetwork; /// convenience type alias -pub type VIDNetwork = WebServerNetwork; +pub type VIDNetwork = WebServerNetwork; /// convenience type alias -pub type QuorumNetwork = WebServerNetwork; +pub type QuorumNetwork = WebServerNetwork; /// convenience type alias -pub type ViewSyncNetwork = WebServerNetwork; +pub type ViewSyncNetwork = WebServerNetwork; impl NodeImplementation for NodeImpl { type CommitteeNetwork = DANetwork; @@ -24,4 +25,4 @@ impl NodeImplementation for NodeImpl { type Storage = TestStorage; } /// convenience type alias -pub type ThisRun = WebServerDARun; +pub type ThisRun = WebServerDARun; diff --git a/examples/webserver/webserver.rs b/examples/webserver/webserver.rs index 06158ae573..60669de6d9 100644 --- a/examples/webserver/webserver.rs +++ b/examples/webserver/webserver.rs @@ -1,7 +1,9 @@ //! web server example use hotshot_example_types::state_types::TestTypes; +use hotshot_types::constants::WebServerVersion; use std::sync::Arc; use surf_disco::Url; +use versioned_binary_serialization::version::StaticVersionType; use async_compatibility_layer::{ channel::oneshot, @@ -26,6 +28,11 @@ async fn main() { let _sender = Arc::new(server_shutdown_sender); let _result = hotshot_web_server::run_web_server::< ::SignatureKey, - >(Some(server_shutdown), args.url) + WebServerVersion, + >( + Some(server_shutdown), + args.url, + WebServerVersion::instance(), + ) .await; } diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 7766c9bf07..f5bd2cdd73 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -12,7 +12,6 @@ ark-bn254 = "0.4.0" ark-ec = { workspace = true } ark-ff = "0.4.0" ark-std = { workspace = true } -bincode = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } generic-array = { workspace = true } diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index 8313f009fc..a4035abb27 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -275,14 +275,19 @@ mod tests { // Check the QC and the QCParams can be serialized / deserialized assert_eq!( qc, - bincode::deserialize(&bincode::serialize(&qc).unwrap()).unwrap() + Serializer::::deserialize( + &Serializer::::serialize(&qc).unwrap() + ) + .unwrap() ); // (alex) since deserialized stake table's leaf would contain normalized projective // points with Z=1, which differs from the original projective representation. // We compare individual fields for equivalence instead. - let de_qc_pp: QCParams<$aggsig, ST> = - bincode::deserialize(&bincode::serialize(&qc_pp).unwrap()).unwrap(); + let de_qc_pp: QCParams<$aggsig, ST> = Serializer::::deserialize( + &Serializer::::serialize(&qc_pp).unwrap(), + ) + .unwrap(); assert_eq!( qc_pp.stake_table.commitment(SnapshotVersion::Head).unwrap(), de_qc_pp diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs index fcb12a8398..855d7dd3e3 100644 --- a/hotshot-qc/src/bit_vector_old.rs +++ b/hotshot-qc/src/bit_vector_old.rs @@ -263,12 +263,18 @@ mod tests { // Check the QC and the QCParams can be serialized / deserialized assert_eq!( qc, - bincode::deserialize(&bincode::serialize(&qc).unwrap()).unwrap() + Serializer::::deserialize( + &Serializer::::serialize(&qc).unwrap() + ) + .unwrap() ); assert_eq!( qc_pp, - bincode::deserialize(&bincode::serialize(&qc_pp).unwrap()).unwrap() + Serializer::::deserialize( + &Serializer::::serialize(&qc_pp).unwrap() + ) + .unwrap() ); // bad paths diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 79e402feae..d75f7228b6 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -1,17 +1,15 @@ [package] -authors = ["Espresso Systems "] +authors = { workspace = true } description = "HotShot consesus module" -edition = "2021" +edition = { workspace = true } name = "hotshot" readme = "README.md" -version = "0.3.3" -rust-version = "1.65.0" +version = { workspace = true } +rust-version = { workspace = true } [features] default = ["docs", "doc-images"] -gpu-vid = [ - "hotshot-task-impls/gpu-vid", -] +gpu-vid = ["hotshot-task-impls/gpu-vid"] # Features required for binaries bin-orchestrator = ["clap"] @@ -23,6 +21,7 @@ hotshot-testing = [] randomized-leader-election = [] [dependencies] +anyhow = { workspace = true } async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } @@ -38,12 +37,11 @@ either = { workspace = true } embed-doc-image = "0.1.4" ethereum-types = { workspace = true } futures = { workspace = true } -hotshot-web-server = { version = "0.1.1", path = "../web_server", default-features = false } -hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-types = { workspace = true } +hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } hotshot-task = { path = "../task" } -hotshot-utils = { path = "../utils" } -hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } +hotshot-types = { workspace = true } +hotshot-web-server = { version = "0.5.26", path = "../web_server", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } lru = "0.12.3" @@ -54,18 +52,28 @@ snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } -anyhow = { workspace = true } +versioned-binary-serialization = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } cdn-client = { workspace = true, features = ["insecure", "runtime-tokio"] } -cdn-broker = { workspace = true, features = ["insecure", "runtime-tokio", "strong_consistency", "local_discovery"] } +cdn-broker = { workspace = true, features = [ + "insecure", + "runtime-tokio", + "strong_consistency", + "local_discovery", +] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } cdn-client = { workspace = true, features = ["insecure", "runtime-async-std"] } -cdn-broker = { workspace = true, features = ["insecure", "runtime-async-std", "strong_consistency", "local_discovery"] } +cdn-broker = { workspace = true, features = [ + "insecure", + "runtime-async-std", + "strong_consistency", + "local_discovery", +] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-async-std"] } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 5dcd148158..b521e3e6c4 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -29,7 +29,7 @@ use futures::join; use hotshot_task_impls::events::HotShotEvent; use hotshot_task_impls::helpers::broadcast_event; use hotshot_task_impls::network; -use hotshot_types::constants::{EVENT_CHANNEL_SIZE, VERSION_0_1}; +use hotshot_types::constants::{EVENT_CHANNEL_SIZE, STATIC_VER_0_1}; use hotshot_task::task::TaskRegistry; use hotshot_types::{ @@ -310,17 +310,20 @@ impl> SystemContext { // TODO We should have a function that can return a network error if there is one // but first we'd need to ensure our network implementations can support that // (and not hang instead) - // + + // version <0, 1> currently fixed; this is the same as VERSION_0_1, + // and will be updated to be part of SystemContext. I wanted to use associated + // constants in NodeType, but that seems to be unavailable in the current Rust. api .networks .da_network .broadcast_message( Message { - version: VERSION_0_1, sender: api.public_key.clone(), kind: MessageKind::from(message), }, da_membership.get_whole_committee(view_number), + STATIC_VER_0_1, ), api .send_external_event(Event { diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 252f6420c2..a74f6a6e21 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -50,6 +50,7 @@ use std::hash::Hash; use std::time::Duration; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; +use versioned_binary_serialization::version::StaticVersionType; /// Helper function to calculate a hash of a type that implements Hash pub fn calculate_hash_of(t: &T) -> u64 { @@ -64,9 +65,9 @@ type DelayedTasksLockedMap = Arc { +pub struct CombinedNetworks { /// The two networks we'll use for send/recv - networks: Arc>, + networks: Arc>, /// Last n seen messages to prevent processing duplicates message_cache: Arc>>, @@ -81,14 +82,17 @@ pub struct CombinedNetworks { delay_duration: Arc>, } -impl CombinedNetworks { +impl CombinedNetworks { /// Constructor /// /// # Panics /// /// Panics if `COMBINED_NETWORK_CACHE_SIZE` is 0 #[must_use] - pub fn new(networks: Arc>, delay_duration: Duration) -> Self { + pub fn new( + networks: Arc>, + delay_duration: Duration, + ) -> Self { Self { networks, message_cache: Arc::new(RwLock::new(LruCache::new( @@ -102,7 +106,7 @@ impl CombinedNetworks { /// Get a ref to the primary network #[must_use] - pub fn primary(&self) -> &WebServerNetwork { + pub fn primary(&self) -> &WebServerNetwork { &self.networks.0 } @@ -173,13 +177,15 @@ impl CombinedNetworks { /// We need this so we can impl `TestableNetworkingImplementation` /// on the tuple #[derive(Debug, Clone)] -pub struct UnderlyingCombinedNetworks( - pub WebServerNetwork, +pub struct UnderlyingCombinedNetworks( + pub WebServerNetwork, pub Libp2pNetwork, TYPES::SignatureKey>, ); #[cfg(feature = "hotshot-testing")] -impl TestableNetworkingImplementation for CombinedNetworks { +impl + TestableNetworkingImplementation for CombinedNetworks +{ fn generator( expected_node_count: usize, num_bootstrap: usize, @@ -190,9 +196,7 @@ impl TestableNetworkingImplementation for CombinedNetwor secondary_network_delay: Duration, ) -> Box (Arc, Arc) + 'static> { let generators = ( - as TestableNetworkingImplementation<_>>::generator( + as TestableNetworkingImplementation<_>>::generator( expected_node_count, num_bootstrap, network_id, @@ -215,11 +219,11 @@ impl TestableNetworkingImplementation for CombinedNetwor let (quorum_web, da_web) = generators.0(node_id); let (quorum_p2p, da_p2p) = generators.1(node_id); let da_networks = UnderlyingCombinedNetworks( - Arc::>::into_inner(da_web).unwrap(), + Arc::>::into_inner(da_web).unwrap(), Arc::, TYPES::SignatureKey>>::unwrap_or_clone(da_p2p), ); let quorum_networks = UnderlyingCombinedNetworks( - Arc::>::into_inner(quorum_web).unwrap(), + Arc::>::into_inner(quorum_web).unwrap(), Arc::, TYPES::SignatureKey>>::unwrap_or_clone( quorum_p2p, ), @@ -255,21 +259,28 @@ impl TestableNetworkingImplementation for CombinedNetwor } #[async_trait] -impl ConnectedNetwork, TYPES::SignatureKey> - for CombinedNetworks +impl + ConnectedNetwork, TYPES::SignatureKey> + for CombinedNetworks { - async fn request_data( + async fn request_data( &self, request: Message, recipient: TYPES::SignatureKey, + bind_version: VER, ) -> Result, NetworkError> { - self.secondary().request_data(request, recipient).await + self.secondary() + .request_data(request, recipient, bind_version) + .await } - async fn spawn_request_receiver_task( + async fn spawn_request_receiver_task( &self, + bind_version: VER, ) -> Option, ResponseChannel>)>> { - self.secondary().spawn_request_receiver_task().await + self.secondary() + .spawn_request_receiver_task(bind_version) + .await } fn pause(&self) { @@ -302,10 +313,11 @@ impl ConnectedNetwork, TYPES::SignatureKey> boxed_sync(closure) } - async fn broadcast_message( + async fn broadcast_message( &self, message: Message, recipients: BTreeSet, + bind_version: VER, ) -> Result<(), NetworkError> { let primary = self.primary().clone(); let secondary = self.secondary().clone(); @@ -316,22 +328,23 @@ impl ConnectedNetwork, TYPES::SignatureKey> message, async move { primary - .broadcast_message(primary_message, primary_recipients) + .broadcast_message(primary_message, primary_recipients, bind_version) .await }, async move { secondary - .broadcast_message(secondary_message, recipients) + .broadcast_message(secondary_message, recipients, bind_version) .await }, ) .await } - async fn da_broadcast_message( + async fn da_broadcast_message( &self, message: Message, recipients: BTreeSet, + bind_version: VER, ) -> Result<(), NetworkError> { let primary = self.primary().clone(); let secondary = self.secondary().clone(); @@ -342,22 +355,23 @@ impl ConnectedNetwork, TYPES::SignatureKey> message, async move { primary - .da_broadcast_message(primary_message, primary_recipients) + .da_broadcast_message(primary_message, primary_recipients, bind_version) .await }, async move { secondary - .da_broadcast_message(secondary_message, recipients) + .da_broadcast_message(secondary_message, recipients, bind_version) .await }, ) .await } - async fn direct_message( + async fn direct_message( &self, message: Message, recipient: TYPES::SignatureKey, + bind_version: VER, ) -> Result<(), NetworkError> { let primary = self.primary().clone(); let secondary = self.secondary().clone(); @@ -368,10 +382,14 @@ impl ConnectedNetwork, TYPES::SignatureKey> message, async move { primary - .direct_message(primary_message, primary_recipient) + .direct_message(primary_message, primary_recipient, bind_version) + .await + }, + async move { + secondary + .direct_message(secondary_message, recipient, bind_version) .await }, - async move { secondary.direct_message(secondary_message, recipient).await }, ) .await } @@ -423,8 +441,11 @@ impl ConnectedNetwork, TYPES::SignatureKey> } async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { - as ConnectedNetwork,TYPES::SignatureKey>>:: - inject_consensus_info(self.primary(), event.clone()).await; + as ConnectedNetwork< + Message, + TYPES::SignatureKey, + >>::inject_consensus_info(self.primary(), event.clone()) + .await; as ConnectedNetwork,TYPES::SignatureKey>>:: inject_consensus_info(self.secondary(), event).await; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 6fb0aec8e2..b30a2b8140 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -11,10 +11,14 @@ use async_compatibility_layer::{ use async_lock::{Mutex, RwLock}; use async_trait::async_trait; use bimap::BiHashMap; -use bincode::Options; -use hotshot_types::constants::{Version, LOOK_AHEAD, VERSION_0_1}; +use futures::{ + channel::mpsc::{self, channel, Receiver, Sender}, + future::{join_all, Either}, + FutureExt, StreamExt, +}; use hotshot_types::{ boxed_sync, + constants::{Version01, LOOK_AHEAD, STATIC_VER_0_1, VERSION_0_1}, data::ViewNumber, traits::{ network::{ @@ -31,11 +35,9 @@ use hotshot_types::{ message::{Message, MessageKind}, traits::network::{NetworkReliability, TestableNetworkingImplementation, ViewMessage}, }; -use hotshot_utils::{bincode::bincode_opts, version::read_version}; use libp2p_identity::PeerId; #[cfg(feature = "hotshot-testing")] use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder}; - use libp2p_networking::{ network::{ behaviours::request_response::{Request, Response}, @@ -46,17 +48,8 @@ use libp2p_networking::{ }, reexport::{Multiaddr, ResponseChannel}, }; - use serde::Serialize; use snafu::ResultExt; -#[cfg(feature = "hotshot-testing")] -use std::{collections::HashSet, num::NonZeroUsize, str::FromStr}; - -use futures::{ - channel::mpsc::{self, channel, Receiver, Sender}, - future::{join_all, Either}, - FutureExt, StreamExt, -}; use std::{ collections::BTreeSet, fmt::Debug, @@ -66,7 +59,13 @@ use std::{ }, time::Duration, }; +#[cfg(feature = "hotshot-testing")] +use std::{collections::HashSet, num::NonZeroUsize, str::FromStr}; use tracing::{debug, error, info, instrument, warn}; +use versioned_binary_serialization::{ + version::{StaticVersionType, Version}, + BinarySerializer, Serializer, +}; /// convienence alias for the type for bootstrap addresses /// concurrency primitives are needed for having tests @@ -85,8 +84,9 @@ pub const QC_TOPIC: &str = "global"; /// * we must have an explicit version field. #[derive(Serialize)] pub struct Empty { - /// network protocol version number in use - version: Version, + /// This should not be required, but it is. Version automatically gets prepended. + /// Perhaps this could be replaced with something zero-sized and serializable. + byte: u8, } impl Debug for Libp2pNetwork { @@ -398,15 +398,19 @@ impl Libp2pNetwork { }; result.handle_event_generator(sender, requests_tx, rx); - result.spawn_node_lookup(node_lookup_recv); - result.spawn_connect(id); + result.spawn_node_lookup(node_lookup_recv, STATIC_VER_0_1); + result.spawn_connect(id, STATIC_VER_0_1); Ok(result) } /// Spawns task for looking up nodes pre-emptively #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] - fn spawn_node_lookup(&self, node_lookup_recv: UnboundedReceiver>) { + fn spawn_node_lookup( + &self, + node_lookup_recv: UnboundedReceiver>, + _: Ver, + ) { let handle = self.inner.handle.clone(); let dht_timeout = self.inner.dht_timeout; let latest_seen_view = self.inner.latest_seen_view.clone(); @@ -424,7 +428,10 @@ impl Libp2pNetwork { // only run if we are not too close to the next view number if latest_seen_view.load(Ordering::Relaxed) + THRESHOLD <= *view_number { // look up - if let Err(err) = handle.lookup_node::(pk.clone(), dht_timeout).await { + if let Err(err) = handle + .lookup_node::(pk.clone(), dht_timeout, Ver::instance()) + .await + { error!("Failed to perform lookup for key {:?}: {}", pk, err); }; } @@ -433,7 +440,7 @@ impl Libp2pNetwork { } /// Initiates connection to the outside world - fn spawn_connect(&mut self, id: usize) { + fn spawn_connect(&mut self, id: usize, bind_version: VER) { let pk = self.inner.pk.clone(); let bootstrap_ref = self.inner.bootstrap_addrs.clone(); let num_bootstrap = self.inner.bootstrap_addrs_len; @@ -442,6 +449,7 @@ impl Libp2pNetwork { let node_type = self.inner.handle.config().node_type; let metrics_connected_peers = self.inner.clone(); let is_da = self.inner.is_da; + async_spawn({ let is_ready = self.inner.is_ready.clone(); async move { @@ -480,7 +488,11 @@ impl Libp2pNetwork { // we want our records published before // we begin participating in consensus - while handle.put_record(&pk, &handle.peer_id()).await.is_err() { + while handle + .put_record(&pk, &handle.peer_id(), bind_version) + .await + .is_err() + { async_sleep(Duration::from_secs(1)).await; } info!( @@ -489,7 +501,11 @@ impl Libp2pNetwork { node_type ); - while handle.put_record(&handle.peer_id(), &pk).await.is_err() { + while handle + .put_record(&handle.peer_id(), &pk, bind_version) + .await + .is_err() + { async_sleep(Duration::from_secs(1)).await; } // 10 minute timeout @@ -528,7 +544,7 @@ impl Libp2pNetwork { ) -> Result<(), NetworkError> { match msg { GossipMsg(msg) => { - let result: Result = bincode_opts().deserialize(&msg); + let result: Result = Serializer::::deserialize(&msg); if let Ok(result) = result { sender .send(result) @@ -537,9 +553,8 @@ impl Libp2pNetwork { } } DirectRequest(msg, _pid, chan) => { - let result: Result = bincode_opts() - .deserialize(&msg) - .context(FailedToSerializeSnafu); + let result: Result = + Serializer::::deserialize(&msg).context(FailedToSerializeSnafu); if let Ok(result) = result { sender .send(result) @@ -549,12 +564,7 @@ impl Libp2pNetwork { if self .inner .handle - .direct_response( - chan, - &Empty { - version: VERSION_0_1, - }, - ) + .direct_response(chan, &Empty { byte: 0u8 }, STATIC_VER_0_1) .await .is_err() { @@ -562,17 +572,15 @@ impl Libp2pNetwork { }; } DirectResponse(msg, _) => { - let _result: Result = bincode_opts() - .deserialize(&msg) - .context(FailedToSerializeSnafu); + let _result: Result = + Serializer::::deserialize(&msg).context(FailedToSerializeSnafu); } NetworkEvent::IsBootstrapped => { error!("handle_recvd_events_0_1 received `NetworkEvent::IsBootstrapped`, which should be impossible."); } NetworkEvent::ResponseRequested(msg, chan) => { - let reqeust = bincode_opts() - .deserialize(&msg.0) - .context(FailedToSerializeSnafu)?; + let reqeust = + Serializer::::deserialize(&msg.0).context(FailedToSerializeSnafu)?; request_tx .try_send((reqeust, chan)) .map_err(|_| NetworkError::ChannelSend)?; @@ -613,9 +621,8 @@ impl Libp2pNetwork { | DirectRequest(raw, _, _) | DirectResponse(raw, _) | NetworkEvent::ResponseRequested(Request(raw), _) => { - let message_version = read_version(raw); - match message_version { - Some(VERSION_0_1) => { + match Version::deserialize(raw) { + Ok((VERSION_0_1, _rest)) => { let _ = handle .handle_recvd_events_0_1( message, @@ -624,21 +631,21 @@ impl Libp2pNetwork { ) .await; } - Some(version) => { + Ok((version, _)) => { warn!( - "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", - version, message - ); + "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", + version, message + ); } - _ => { + Err(e) => { warn!( - "Received message with unreadable version number.\n\nPayload:\n\n{:?}", - message - ); + "Error recovering version: {:?}.\n\nPayload:\n\n{:?}", + e, message + ); } } } - }; + } // re-set the `kill_switch` for the next loop kill_switch = other_stream; // re-set `receiver.recv()` for the next loop @@ -660,17 +667,18 @@ impl Libp2pNetwork { #[async_trait] impl ConnectedNetwork for Libp2pNetwork { - async fn request_data( + async fn request_data( &self, request: M, recipient: K, + bind_version: VER, ) -> Result, NetworkError> { self.wait_for_ready().await; let pid = match self .inner .handle - .lookup_node::(recipient.clone(), self.inner.dht_timeout) + .lookup_node::(recipient.clone(), self.inner.dht_timeout, bind_version) .await { Ok(pid) => pid, @@ -685,11 +693,15 @@ impl ConnectedNetwork for Libp2p }); } }; - match self.inner.handle.request_data(&request, pid).await { + match self + .inner + .handle + .request_data(&request, pid, bind_version) + .await + { Ok(response) => match response { Some(msg) => { - let res = bincode_opts() - .deserialize(&msg.0) + let res = Serializer::::deserialize(&msg.0) .map_err(|e| NetworkError::FailedToDeserialize { source: e })?; Ok(ResponseMessage::Found(res)) } @@ -699,8 +711,9 @@ impl ConnectedNetwork for Libp2p } } - async fn spawn_request_receiver_task( + async fn spawn_request_receiver_task( &self, + bind_version: VER, ) -> Option)>> { let Some(mut internal_rx) = self.inner.requests_rx.lock().await.take() else { return None; @@ -719,7 +732,7 @@ impl ConnectedNetwork for Libp2p let Ok(response) = response_rx.await else { continue; }; - let _ = handle.respond_data(&response, chan).await; + let _ = handle.respond_data(&response, chan, bind_version).await; } }); @@ -758,10 +771,11 @@ impl ConnectedNetwork for Libp2p } #[instrument(name = "Libp2pNetwork::broadcast_message", skip_all)] - async fn broadcast_message( + async fn broadcast_message( &self, message: M, recipients: BTreeSet, + bind_version: VER, ) -> Result<(), NetworkError> { self.wait_for_ready().await; info!( @@ -796,9 +810,8 @@ impl ConnectedNetwork for Libp2p if let Some(ref config) = &self.inner.reliability_config { let handle = self.inner.handle.clone(); - let serialized_msg = bincode_opts() - .serialize(&message) - .context(FailedToSerializeSnafu)?; + let serialized_msg = + Serializer::::serialize(&message).context(FailedToSerializeSnafu)?; let fut = config.clone().chaos_send_msg( serialized_msg, Arc::new(move |msg: Vec| { @@ -823,7 +836,12 @@ impl ConnectedNetwork for Libp2p } } - match self.inner.handle.gossip(topic, &message).await { + match self + .inner + .handle + .gossip(topic, &message, bind_version) + .await + { Ok(()) => { self.inner.metrics.outgoing_broadcast_message_count.add(1); Ok(()) @@ -836,14 +854,15 @@ impl ConnectedNetwork for Libp2p } #[instrument(name = "Libp2pNetwork::da_broadcast_message", skip_all)] - async fn da_broadcast_message( + async fn da_broadcast_message( &self, message: M, recipients: BTreeSet, + bind_version: VER, ) -> Result<(), NetworkError> { let future_results = recipients .into_iter() - .map(|r| self.direct_message(message.clone(), r)); + .map(|r| self.direct_message(message.clone(), r, bind_version)); let results = join_all(future_results).await; let errors: Vec<_> = results @@ -862,7 +881,12 @@ impl ConnectedNetwork for Libp2p } #[instrument(name = "Libp2pNetwork::direct_message", skip_all)] - async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError> { + async fn direct_message( + &self, + message: M, + recipient: K, + bind_version: VER, + ) -> Result<(), NetworkError> { // short circuit if we're dming ourselves if recipient == self.inner.pk { // panic if we already shut down? @@ -879,7 +903,7 @@ impl ConnectedNetwork for Libp2p let pid = match self .inner .handle - .lookup_node::(recipient.clone(), self.inner.dht_timeout) + .lookup_node::(recipient.clone(), self.inner.dht_timeout, bind_version) .await { Ok(pid) => pid, @@ -901,9 +925,8 @@ impl ConnectedNetwork for Libp2p if let Some(ref config) = &self.inner.reliability_config { let handle = self.inner.handle.clone(); - let serialized_msg = bincode_opts() - .serialize(&message) - .context(FailedToSerializeSnafu)?; + let serialized_msg = + Serializer::::serialize(&message).context(FailedToSerializeSnafu)?; let fut = config.clone().chaos_send_msg( serialized_msg, Arc::new(move |msg: Vec| { @@ -927,7 +950,12 @@ impl ConnectedNetwork for Libp2p } } - match self.inner.handle.direct_request(pid, &message).await { + match self + .inner + .handle + .direct_request(pid, &message, bind_version) + .await + { Ok(()) => Ok(()), Err(e) => Err(e.into()), } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index c5aa3641d3..ebe0512b29 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -10,12 +10,12 @@ use async_compatibility_layer::{ }; use async_lock::{Mutex, RwLock}; use async_trait::async_trait; -use bincode::Options; use core::time::Duration; use dashmap::DashMap; use futures::StreamExt; use hotshot_types::{ boxed_sync, + constants::Version01, message::Message, traits::{ network::{ConnectedNetwork, NetworkMsg, TestableNetworkingImplementation}, @@ -24,7 +24,6 @@ use hotshot_types::{ }, BoxSyncFuture, }; -use hotshot_utils::bincode::bincode_opts; use rand::Rng; use snafu::ResultExt; use std::{ @@ -36,6 +35,7 @@ use std::{ }, }; use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; +use versioned_binary_serialization::{version::StaticVersionType, BinarySerializer, Serializer}; /// Shared state for in-memory mock networking. /// @@ -125,7 +125,7 @@ impl MemoryNetwork { while let Some(vec) = task_stream.next().await { trace!(?vec, "Incoming message"); // Attempt to decode message - let x = bincode_opts().deserialize(&vec); + let x = Serializer::::deserialize(&vec); match x { Ok(x) => { let ts = task_send.clone(); @@ -242,16 +242,15 @@ impl ConnectedNetwork for Memory } #[instrument(name = "MemoryNetwork::broadcast_message")] - async fn broadcast_message( + async fn broadcast_message( &self, message: M, recipients: BTreeSet, + _: VER, ) -> Result<(), NetworkError> { trace!(?message, "Broadcasting message"); // Bincode the message - let vec = bincode_opts() - .serialize(&message) - .context(FailedToSerializeSnafu)?; + let vec = Serializer::::serialize(&message).context(FailedToSerializeSnafu)?; trace!("Message bincoded, sending"); for node in &self.inner.master_map.map { // TODO delay/drop etc here @@ -294,21 +293,26 @@ impl ConnectedNetwork for Memory } #[instrument(name = "MemoryNetwork::da_broadcast_message")] - async fn da_broadcast_message( + async fn da_broadcast_message( &self, message: M, recipients: BTreeSet, + bind_version: VER, ) -> Result<(), NetworkError> { - self.broadcast_message(message, recipients).await + self.broadcast_message(message, recipients, bind_version) + .await } #[instrument(name = "MemoryNetwork::direct_message")] - async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError> { + async fn direct_message( + &self, + message: M, + recipient: K, + _: VER, + ) -> Result<(), NetworkError> { // debug!(?message, ?recipient, "Sending direct message"); // Bincode the message - let vec = bincode_opts() - .serialize(&message) - .context(FailedToSerializeSnafu)?; + let vec = Serializer::::serialize(&message).context(FailedToSerializeSnafu)?; trace!("Message bincoded, finding recipient"); if let Some(node) = self.inner.master_map.map.get(&recipient) { let node = node.value().clone(); diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 0a5fff1fd9..8933583439 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -1,5 +1,8 @@ use super::NetworkError; -use bincode::Options; +use async_compatibility_layer::art::{async_block_on, async_spawn}; +use async_compatibility_layer::channel::UnboundedSendError; +use async_trait::async_trait; +use bincode::config::Options; use cdn_broker::{ reexports::connection::protocols::Tcp, Broker, Config, ConfigBuilder as BrokerConfigBuilder, }; @@ -12,28 +15,31 @@ use cdn_client::{ Client, ConfigBuilder as ClientConfigBuilder, }; use cdn_marshal::{ConfigBuilder as MarshalConfigBuilder, Marshal}; -use hotshot_utils::bincode::bincode_opts; -use rand::{rngs::StdRng, RngCore, SeedableRng}; -use tracing::{error, warn}; - -use async_compatibility_layer::art::{async_block_on, async_spawn}; -use async_trait::async_trait; - -use async_compatibility_layer::channel::UnboundedSendError; #[cfg(feature = "hotshot-testing")] -use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; +use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::{ boxed_sync, + constants::{Version01, VERSION_0_1}, data::ViewNumber, message::Message, traits::{ - network::{ConnectedNetwork, ConsensusIntentEvent, PushCdnNetworkError}, + network::{ + ConnectedNetwork, ConsensusIntentEvent, NetworkReliability, PushCdnNetworkError, + }, node_implementation::NodeType, signature_key::SignatureKey, }, + utils::bincode_opts, BoxSyncFuture, }; +use rand::rngs::StdRng; +use rand::{RngCore, SeedableRng}; use std::{collections::BTreeSet, path::Path, sync::Arc, time::Duration}; +use tracing::{error, warn}; +use versioned_binary_serialization::{ + version::{StaticVersionType, Version}, + BinarySerializer, Serializer, +}; /// A wrapped `SignatureKey`. We need to implement the Push CDN's `SignatureScheme` /// trait in order to sign and verify messages to/from the CDN. @@ -46,12 +52,14 @@ impl SignatureScheme for WrappedSignatureKey { /// Sign a message of arbitrary data and return the serialized signature fn sign(private_key: &Self::PrivateKey, message: &[u8]) -> anyhow::Result> { let signature = T::sign(private_key, message)?; - + // TODO: replace with rigorously defined serialization scheme... + // why did we not make `PureAssembledSignatureType` be `CanonicalSerialize + CanonicalDeserialize`? Ok(bincode_opts().serialize(&signature)?) } /// Verify a message of arbitrary data and return the result fn verify(public_key: &Self::PublicKey, message: &[u8], signature: &[u8]) -> bool { + // TODO: replace with rigorously defined signing scheme let signature: T::PureAssembledSignatureType = match bincode_opts().deserialize(signature) { Ok(key) => key, Err(_) => return false, @@ -115,13 +123,14 @@ impl PushCdnNetwork { /// # Errors /// - If we fail to serialize the message /// - If we fail to send the broadcast message. - async fn broadcast_message( + async fn broadcast_message( &self, message: Message, topic: Topic, + _: Ver, ) -> Result<(), NetworkError> { // Bincode the message - let serialized_message = match bincode_opts().serialize(&message) { + let serialized_message = match Serializer::::serialize(&message) { Ok(serialized) => serialized, Err(e) => { warn!("Failed to serialize message: {}", e); @@ -318,12 +327,14 @@ impl ConnectedNetwork, TYPES::SignatureKey> /// # Errors /// - If we fail to serialize the message /// - If we fail to send the broadcast message. - async fn broadcast_message( + async fn broadcast_message( &self, message: Message, _recipients: BTreeSet, + bind_version: Ver, ) -> Result<(), NetworkError> { - self.broadcast_message(message, Topic::Global).await + self.broadcast_message(message, Topic::Global, bind_version) + .await } /// Broadcast a message to all members of the DA committee. @@ -331,25 +342,28 @@ impl ConnectedNetwork, TYPES::SignatureKey> /// # Errors /// - If we fail to serialize the message /// - If we fail to send the broadcast message. - async fn da_broadcast_message( + async fn da_broadcast_message( &self, message: Message, _recipients: BTreeSet, + bind_version: Ver, ) -> Result<(), NetworkError> { - self.broadcast_message(message, Topic::DA).await + self.broadcast_message(message, Topic::DA, bind_version) + .await } /// Send a direct message to a node with a particular key. Does not retry. /// /// - If we fail to serialize the message /// - If we fail to send the direct message - async fn direct_message( + async fn direct_message( &self, message: Message, recipient: TYPES::SignatureKey, + _: Ver, ) -> Result<(), NetworkError> { // Bincode the message - let serialized_message = match bincode_opts().serialize(&message) { + let serialized_message = match Serializer::::serialize(&message) { Ok(serialized) => serialized, Err(e) => { warn!("Failed to serialize message: {}", e); @@ -401,13 +415,24 @@ impl ConnectedNetwork, TYPES::SignatureKey> return Ok(vec![]); }; - // Deserialize it - let result: Message = bincode_opts() - .deserialize(&message) - .map_err(|e| NetworkError::FailedToSerialize { source: e })?; - - // Return it - Ok(vec![result]) + let message_version = Version::deserialize(&message) + .map_err(|e| NetworkError::FailedToDeserialize { source: e })?; + if message_version.0 == VERSION_0_1 { + let result: Message = Serializer::::deserialize(&message) + .map_err(|e| NetworkError::FailedToDeserialize { source: e })?; + + // Deserialize it + // Return it + Ok(vec![result]) + } else { + Err(NetworkError::FailedToDeserialize { + source: anyhow::format_err!( + "version mismatch, expected {}, got {}", + VERSION_0_1, + message_version.0 + ), + }) + } } /// Do nothing here, as we don't need to look up nodes. diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index b6cfa4b6a1..24e502a743 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -12,31 +12,27 @@ use async_compatibility_layer::{ use async_lock::RwLock; use async_trait::async_trait; use derive_more::{Deref, DerefMut}; -use hotshot_types::constants::VERSION_0_1; use hotshot_types::{ boxed_sync, + constants::{Version01, VERSION_0_1}, message::{Message, MessagePurpose}, traits::{ network::{ - ConnectedNetwork, ConsensusIntentEvent, NetworkError, NetworkMsg, - TestableNetworkingImplementation, WebServerNetworkError, + ConnectedNetwork, ConsensusIntentEvent, NetworkError, NetworkMsg, NetworkReliability, + TestableNetworkingImplementation, ViewMessage, WebServerNetworkError, }, node_implementation::NodeType, signature_key::SignatureKey, }, BoxSyncFuture, }; -use hotshot_utils::version::read_version; use hotshot_web_server::{self, config}; use lru::LruCache; use serde::{Deserialize, Serialize}; use std::collections::hash_map::DefaultHasher; +use std::collections::BTreeMap; use std::hash::{Hash, Hasher}; use std::num::NonZeroUsize; -use surf_disco::Url; - -use hotshot_types::traits::network::{NetworkReliability, ViewMessage}; -use std::collections::BTreeMap; use std::{ collections::{btree_map::Entry, BTreeSet}, sync::{ @@ -46,7 +42,12 @@ use std::{ time::Duration, }; use surf_disco::error::ClientError; +use surf_disco::Url; use tracing::{debug, error, info, warn}; +use versioned_binary_serialization::{ + version::{StaticVersionType, Version}, + BinarySerializer, Serializer, +}; /// convenience alias alias for the result of getting transactions from the web server pub type TxnResult = Result>)>, ClientError>; @@ -62,19 +63,21 @@ fn hash(t: &T) -> u64 { /// The web server network state #[derive(Clone, Debug)] -pub struct WebServerNetwork { +pub struct WebServerNetwork { /// The inner, core state of the web server network - inner: Arc>, + inner: Arc>, /// An optional shutdown signal. This is only used when this connection is created through the `TestableNetworkingImplementation` API. server_shutdown_signal: Option>>, } -impl WebServerNetwork { +impl WebServerNetwork { /// Post a message to the web server and return the result async fn post_message_to_web_server( &self, message: SendMsg>, ) -> Result<(), NetworkError> { + // Note: it should be possible to get the version of Message and choose client_initial or (if available) + // client_new_ver based on Message. But we do always know let result: Result<(), ClientError> = self .inner .client @@ -166,7 +169,7 @@ impl TaskMap { /// Represents the core of web server networking #[derive(Debug)] -struct Inner { +struct Inner { /// Our own key _own_key: TYPES::SignatureKey, /// Queue for messages @@ -175,8 +178,8 @@ struct Inner { running: AtomicBool, /// The web server connection is ready connected: AtomicBool, - /// The connectioni to the web server - client: surf_disco::Client, + /// The connection to the web server + client: surf_disco::Client, /// The duration to wait between poll attempts wait_between_polls: Duration, /// Whether we are connecting to a DA server @@ -205,7 +208,7 @@ struct Inner { latest_view_sync_certificate_task: Arc>>>, } -impl Inner { +impl Inner { #![allow(clippy::too_many_lines)] /// Handle version 0.1 transactions @@ -225,7 +228,9 @@ impl Inner { *tx_index += 1; - if let Ok(deserialized_message_inner) = bincode::deserialize::>(&tx) { + if let Ok(Some(deserialized_message_inner)) = + Serializer::::deserialize::>>(&tx) + { let deserialized_message = RecvMsg { message: Some(deserialized_message_inner), }; @@ -253,91 +258,93 @@ impl Inner { seen_view_sync_certificates: &mut LruCache, ) -> bool { let poll_queue = &self.poll_queue_0_1; - if let Ok(deserialized_message_inner) = bincode::deserialize::>(&message) { - let deserialized_message = RecvMsg { - message: Some(deserialized_message_inner), - }; - match message_purpose { - MessagePurpose::Data => { - error!("We should not receive transactions in this function"); - } - MessagePurpose::Proposal => { - let proposal = deserialized_message.clone(); - poll_queue.write().await.push(proposal); - - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - return true; - } - MessagePurpose::LatestProposal => { - let proposal = deserialized_message.clone(); - let hash = hash(&proposal); - // Only allow unseen proposals to be pushed to the queue - if seen_proposals.put(hash, ()).is_none() { + match Serializer::::deserialize::>>(&message) { + Ok(Some(deserialized_message_inner)) => { + let deserialized_message = RecvMsg { + message: Some(deserialized_message_inner), + }; + match message_purpose { + MessagePurpose::Data => { + error!("We should not receive transactions in this function"); + } + MessagePurpose::Proposal => { + let proposal = deserialized_message.clone(); poll_queue.write().await.push(proposal); + + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + return true; } + MessagePurpose::LatestProposal => { + let proposal = deserialized_message.clone(); + let hash = hash(&proposal); + // Only allow unseen proposals to be pushed to the queue + if seen_proposals.put(hash, ()).is_none() { + poll_queue.write().await.push(proposal); + } - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - return true; - } - MessagePurpose::LatestViewSyncCertificate => { - let cert = deserialized_message.clone(); - let hash = hash(&cert); - if seen_view_sync_certificates.put(hash, ()).is_none() { - poll_queue.write().await.push(cert); + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + return true; } - return false; - } - MessagePurpose::Vote - | MessagePurpose::ViewSyncVote - | MessagePurpose::ViewSyncCertificate => { - let vote = deserialized_message.clone(); - *vote_index += 1; - poll_queue.write().await.push(vote); - - return false; - } - MessagePurpose::DAC => { - debug!( - "Received DAC from web server for view {} {}", - view_number, self.is_da - ); - poll_queue.write().await.push(deserialized_message.clone()); - - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - // return if we found a DAC, since there will only be 1 per view - // In future we should check to make sure DAC is valid - return true; - } - MessagePurpose::VidDisperse => { - // TODO copy-pasted from `MessagePurpose::Proposal` https://github.com/EspressoSystems/HotShot/issues/1690 + MessagePurpose::LatestViewSyncCertificate => { + let cert = deserialized_message.clone(); + let hash = hash(&cert); + if seen_view_sync_certificates.put(hash, ()).is_none() { + poll_queue.write().await.push(cert); + } + return false; + } + MessagePurpose::Vote + | MessagePurpose::ViewSyncVote + | MessagePurpose::ViewSyncCertificate => { + let vote = deserialized_message.clone(); + *vote_index += 1; + poll_queue.write().await.push(vote); + + return false; + } + MessagePurpose::DAC => { + debug!( + "Received DAC from web server for view {} {}", + view_number, self.is_da + ); + poll_queue.write().await.push(deserialized_message.clone()); + + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + // return if we found a DAC, since there will only be 1 per view + // In future we should check to make sure DAC is valid + return true; + } + MessagePurpose::VidDisperse => { + // TODO copy-pasted from `MessagePurpose::Proposal` https://github.com/EspressoSystems/HotShot/issues/1690 - self.poll_queue_0_1 - .write() - .await - .push(deserialized_message.clone()); + self.poll_queue_0_1 + .write() + .await + .push(deserialized_message.clone()); - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - return true; - } + // Only pushing the first proposal since we will soon only be allowing 1 proposal per view + return true; + } - MessagePurpose::Internal => { - error!("Received internal message in web server network"); + MessagePurpose::Internal => { + error!("Received internal message in web server network"); - return false; - } + return false; + } - MessagePurpose::Upgrade => { - poll_queue.write().await.push(deserialized_message.clone()); + MessagePurpose::Upgrade => { + poll_queue.write().await.push(deserialized_message.clone()); - return true; + return true; + } } } + Ok(None) | Err(_) => {} } - false } - /// Pull a web server. + /// Poll a web server. async fn poll_web_server( &self, receiver: UnboundedReceiver>, @@ -379,54 +386,32 @@ impl Inner { }; if let MessagePurpose::Data = message_purpose { + // Note: this should also be polling on client_ let possible_message: TxnResult = self.client.get(&endpoint).send().await; // Deserialize and process transactions from the server. // If something goes wrong at any point, we sleep for wait_between_polls // then try again next time. if let Ok(Some((first_tx_index, txs))) = possible_message { for tx_raw in txs { - // This is very hacky. - // - // Fundamentally, tx_raw is a serialized Option(Message). - // The problem is, we want to extract the serialized Message - // *without* deserializing the entire tx_raw - // (because, a priori, the serialization of Message might depend on the version number, - // which we have not yet read at this point). - // - // So we use the fact that the bincode serialization of Option(_) is a single leading byte - // (0 for None and 1 for Some). Dropping the first byte then yields the serialized Message. - // - // It would be nice to do this with serde primitives, but I'm not sure how. - - match tx_raw.first() { - Some(0) => { - continue; + let tx_version = Version::deserialize(&tx_raw); + + match tx_version { + Ok((VERSION_0_1, _)) => { + self.handle_tx_0_1(tx_raw, first_tx_index, &mut tx_index) + .await; } - Some(1) => { - let tx = tx_raw[1..].to_vec(); - let tx_version = read_version(&tx); - - match tx_version { - Some(VERSION_0_1) => { - self.handle_tx_0_1(tx, first_tx_index, &mut tx_index).await; - } - Some(version) => { - warn!( - "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", - version, - tx - ); - } - _ => { - warn!( - "Received message with unreadable version number.\n\nPayload:\n\n{:?}", - tx - ); - } - } + Ok((version, _)) => { + warn!( + "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", + version, + tx_raw + ); } - _ => { - warn!("Could not deserialize transaction: {:?}", tx_raw); + Err(e) => { + warn!( + "Error {:?}, could not read version number.\n\nPayload:\n\n{:?}", + e, tx_raw + ); } } } @@ -436,65 +421,36 @@ impl Inner { } else { let possible_message: Result>>, ClientError> = self.client.get(&endpoint).send().await; + if let Ok(Some(messages)) = possible_message { - for message_raw in messages { - // This is very hacky. - // - // Fundamentally, message_raw is a serialized Option(Message). - // The problem is, we want to extract the serialized Message - // *without* deserializing the entire message_raw - // (because, a priori, the serialization of Message might depend on the version number, - // which we have not yet read at this point). - // - // So we use the fact that the bincode serialization of Option(_) is a single leading byte - // (0 for None and 1 for Some). Dropping the first byte then yields the serialized Message. - // - // It would be nice to do this with serde primitives, but I'm not sure how. - - match message_raw.first() { - Some(0) => { - continue; - } - Some(1) => { - let message = message_raw[1..].to_vec(); - let message_version = read_version(&message); - - let should_return; - - match message_version { - Some(VERSION_0_1) => { - should_return = self - .handle_message_0_1( - message, - view_number, - message_purpose, - &mut vote_index, - &mut seen_proposals, - &mut seen_view_sync_certificates, - ) - .await; - - if should_return { - return Ok(()); - } - } - Some(version) => { - warn!( - "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", - version, - message - ); - } - _ => { - warn!( - "Received message with unreadable version number.\n\nPayload:\n\n{:?}", - message - ); - } + for message in messages { + let message_version = Version::deserialize(&message); + + let should_return; + + match message_version { + Ok((VERSION_0_1, _)) => { + should_return = self + .handle_message_0_1( + message, + view_number, + message_purpose, + &mut vote_index, + &mut seen_proposals, + &mut seen_view_sync_certificates, + ) + .await; + + if should_return { + return Ok(()); } } - _ => { - warn!("Could not deserialize message: {:?}", message_raw); + Ok((version, _)) => { + warn!( + "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", version, message); + } + Err(e) => { + warn!("Error {:?}, could not read version number.\n\nPayload:\n\n{:?}", e, message); } } } @@ -592,7 +548,9 @@ impl RecvMsgTrait for RecvMsg { impl NetworkMsg for SendMsg {} impl NetworkMsg for RecvMsg {} -impl WebServerNetwork { +impl + WebServerNetwork +{ /// Creates a new instance of the `WebServerNetwork` /// # Panics /// if the web server url is malformed @@ -605,7 +563,7 @@ impl WebServerNetwork { info!("Connecting to web server at {url:?} is da: {is_da_server}"); // TODO ED Wait for healthcheck - let client = surf_disco::Client::::new(url); + let client = surf_disco::Client::::new(url); let inner = Arc::new(Inner { poll_queue_0_1: Arc::default(), @@ -687,9 +645,10 @@ impl WebServerNetwork { info!("Launching web server on port {port}"); // Start web server async_spawn(async { - match hotshot_web_server::run_web_server::( + match hotshot_web_server::run_web_server::( Some(server_shutdown), url, + NetworkVersion::instance(), ) .await { @@ -724,8 +683,9 @@ impl WebServerNetwork { } #[async_trait] -impl ConnectedNetwork, TYPES::SignatureKey> - for WebServerNetwork +impl + ConnectedNetwork, TYPES::SignatureKey> + for WebServerNetwork { /// Blocks until the network is successfully initialized async fn wait_for_ready(&self) { @@ -776,10 +736,11 @@ impl ConnectedNetwork, TYPES::Signatur /// broadcast message to some subset of nodes /// blocking - async fn broadcast_message( + async fn broadcast_message( &self, message: Message, _recipients: BTreeSet, + _: VER, ) -> Result<(), NetworkError> { // short circuit if we are shut down #[cfg(feature = "hotshot-testing")] @@ -798,20 +759,23 @@ impl ConnectedNetwork, TYPES::Signatur /// broadcast a message only to a DA committee /// blocking - async fn da_broadcast_message( + async fn da_broadcast_message( &self, message: Message, recipients: BTreeSet, + bind_version: VER, ) -> Result<(), NetworkError> { - self.broadcast_message(message, recipients).await + self.broadcast_message(message, recipients, bind_version) + .await } /// Sends a direct message to a specific node /// blocking - async fn direct_message( + async fn direct_message( &self, message: Message, _recipient: TYPES::SignatureKey, + _: VER, ) -> Result<(), NetworkError> { // short circuit if we are shut down #[cfg(feature = "hotshot-testing")] @@ -1234,7 +1198,9 @@ impl ConnectedNetwork, TYPES::Signatur } } -impl TestableNetworkingImplementation for WebServerNetwork { +impl + TestableNetworkingImplementation for WebServerNetwork +{ fn generator( expected_node_count: usize, num_bootstrap: usize, diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 8bccfa4678..1083ff638b 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -1,9 +1,9 @@ [package] description = "Libp2p Networking Layer" name = "libp2p-networking" -version = "0.1.0" -edition = "2021" -authors = ["Espresso Systems "] +version = { workspace = true } +edition = { workspace = true } +authors = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -12,17 +12,16 @@ default = ["webui"] webui = [] [dependencies] +anyhow = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } -bincode = { workspace = true } blake3 = { workspace = true } custom_debug = { workspace = true } derive_builder = "0.20.0" either = { workspace = true } futures = { workspace = true } hotshot-types = { workspace = true } -hotshot-utils = { path = "../utils" } libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } rand = { workspace = true } @@ -34,6 +33,7 @@ tide = { version = "0.16", optional = true, default-features = false, features = "h1-server", ] } tracing = { workspace = true } +versioned-binary-serialization = { workspace = true } void = "1.0.2" dashmap = "5.5.3" lazy_static = { workspace = true } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index b614b61d35..4a28ad441a 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -22,9 +22,7 @@ use self::behaviours::{ direct_message::DMEvent, request_response::{Request, Response}, }; -use bincode::Options; use futures::channel::oneshot::{self, Sender}; -use hotshot_utils::bincode::bincode_opts; use libp2p::{ build_multiaddr, core::{muxing::StreamMuxerBox, transport::Boxed}, @@ -80,22 +78,6 @@ impl FromStr for NetworkNodeType { } } -/// Serialize an arbitrary message -/// # Errors -/// When unable to serialize a message -pub fn serialize_msg(msg: &T) -> Result, Box> { - bincode_opts().serialize(&msg) -} - -/// Deserialize an arbitrary message -/// # Errors -/// When unable to deserialize a message -pub fn deserialize_msg<'a, T: Deserialize<'a>>( - msg: &'a [u8], -) -> Result> { - bincode_opts().deserialize(msg) -} - impl Default for NetworkNodeType { fn default() -> Self { Self::Bootstrap diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index d96c629ae7..1887682b60 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -8,11 +8,9 @@ use async_compatibility_layer::{ art::{async_sleep, async_timeout, future::to}, channel::{Receiver, SendError, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, }; -use bincode::Options; use futures::channel::oneshot; use hotshot_types::traits::network::NetworkError as HotshotNetworkError; -use hotshot_utils::bincode::bincode_opts; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; use serde::{Deserialize, Serialize}; @@ -23,6 +21,7 @@ use std::{ time::{Duration, Instant}, }; use tracing::{debug, info, instrument}; +use versioned_binary_serialization::{version::StaticVersionType, BinarySerializer, Serializer}; /// A handle containing: /// - A reference to the state @@ -184,15 +183,14 @@ impl NetworkNodeHandle { /// /// Will retrun a networking error if the channel closes before the result /// can be sent back - pub async fn request_data( + pub async fn request_data( &self, request: &impl Serialize, peer: PeerId, + _: VER, ) -> Result, NetworkNodeHandleError> { let (tx, rx) = oneshot::channel(); - let serialized_msg = bincode_opts() - .serialize(request) - .context(SerializationSnafu)?; + let serialized_msg = Serializer::::serialize(request).context(SerializationSnafu)?; let req = ClientRequest::DataRequest { request: Request(serialized_msg), peer, @@ -207,14 +205,13 @@ impl NetworkNodeHandle { /// Send a response to a request with the response channel /// # Errors /// Will error if the client request channel is closed, or serialization fails. - pub async fn respond_data( + pub async fn respond_data( &self, response: &impl Serialize, chan: ResponseChannel, + _: VER, ) -> Result<(), NetworkNodeHandleError> { - let serialized_msg = bincode_opts() - .serialize(response) - .context(SerializationSnafu)?; + let serialized_msg = Serializer::::serialize(response).context(SerializationSnafu)?; let req = ClientRequest::DataResponse { response: Response(serialized_msg), chan, @@ -236,13 +233,16 @@ impl NetworkNodeHandle { /// Looks up a node's `PeerId` and attempts to validate routing /// # Errors /// if the peer was unable to be looked up (did not provide a response, DNE) - pub async fn lookup_node Deserialize<'a> + Serialize>( + pub async fn lookup_node Deserialize<'a> + Serialize, VER: StaticVersionType>( &self, key: V, dht_timeout: Duration, + bind_version: VER, ) -> Result { // get record (from DHT) - let pid = self.get_record_timeout::(&key, dht_timeout).await?; + let pid = self + .get_record_timeout::(&key, dht_timeout, bind_version) + .await?; // pid lookup for routing // self.lookup_pid(pid).await?; @@ -254,17 +254,16 @@ impl NetworkNodeHandle { /// # Errors /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key or value - pub async fn put_record( + pub async fn put_record( &self, key: &impl Serialize, value: &impl Serialize, + _: VER, ) -> Result<(), NetworkNodeHandleError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::PutDHT { - key: bincode_opts().serialize(key).context(SerializationSnafu)?, - value: bincode_opts() - .serialize(value) - .context(SerializationSnafu)?, + key: Serializer::::serialize(key).context(SerializationSnafu)?, + value: Serializer::::serialize(value).context(SerializationSnafu)?, notify: s, }; @@ -279,23 +278,22 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value - pub async fn get_record Deserialize<'a>>( + pub async fn get_record Deserialize<'a>, VER: StaticVersionType>( &self, key: &impl Serialize, retry_count: u8, + _: VER, ) -> Result { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetDHT { - key: bincode_opts().serialize(key).context(SerializationSnafu)?, + key: Serializer::::serialize(key).context(SerializationSnafu)?, notify: s, retry_count, }; self.send_request(req).await?; match r.await.context(CancelledRequestSnafu) { - Ok(result) => bincode_opts() - .deserialize(&result) - .context(DeserializationSnafu), + Ok(result) => Serializer::::deserialize(&result).context(DeserializationSnafu), Err(e) => Err(e).context(DHTSnafu), } } @@ -306,12 +304,13 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::TimeoutError`] when times out /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value - pub async fn get_record_timeout Deserialize<'a>>( + pub async fn get_record_timeout Deserialize<'a>, VER: StaticVersionType>( &self, key: &impl Serialize, timeout: Duration, + bind_version: VER, ) -> Result { - let result = async_timeout(timeout, self.get_record(key, 3)).await; + let result = async_timeout(timeout, self.get_record(key, 3, bind_version)).await; match result { Err(e) => Err(e).context(TimeoutSnafu), Ok(r) => r, @@ -324,13 +323,14 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::TimeoutError`] when times out /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key or value /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - pub async fn put_record_timeout( + pub async fn put_record_timeout( &self, key: &impl Serialize, value: &impl Serialize, timeout: Duration, + bind_version: VER, ) -> Result<(), NetworkNodeHandleError> { - let result = async_timeout(timeout, self.put_record(key, value)).await; + let result = async_timeout(timeout, self.put_record(key, value, bind_version)).await; match result { Err(e) => Err(e).context(TimeoutSnafu), Ok(r) => r, @@ -370,12 +370,13 @@ impl NetworkNodeHandle { /// # Errors /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` - pub async fn direct_request( + pub async fn direct_request( &self, pid: PeerId, msg: &impl Serialize, + _: VER, ) -> Result<(), NetworkNodeHandleError> { - let serialized_msg = bincode_opts().serialize(msg).context(SerializationSnafu)?; + let serialized_msg = Serializer::::serialize(msg).context(SerializationSnafu)?; self.direct_request_no_serialize(pid, serialized_msg).await } @@ -400,12 +401,13 @@ impl NetworkNodeHandle { /// # Errors /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` - pub async fn direct_response( + pub async fn direct_response( &self, chan: ResponseChannel>, msg: &impl Serialize, + _: VER, ) -> Result<(), NetworkNodeHandleError> { - let serialized_msg = bincode_opts().serialize(msg).context(SerializationSnafu)?; + let serialized_msg = Serializer::::serialize(msg).context(SerializationSnafu)?; let req = ClientRequest::DirectResponse(chan, serialized_msg); self.send_request(req).await } @@ -426,12 +428,13 @@ impl NetworkNodeHandle { /// # Errors /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` - pub async fn gossip( + pub async fn gossip( &self, topic: String, msg: &impl Serialize, + _: VER, ) -> Result<(), NetworkNodeHandleError> { - let serialized_msg = bincode_opts().serialize(msg).context(SerializationSnafu)?; + let serialized_msg = Serializer::::serialize(msg).context(SerializationSnafu)?; self.gossip_no_serialize(topic, serialized_msg).await } @@ -532,12 +535,12 @@ pub enum NetworkNodeHandleError { /// Failure to serialize a message SerializationError { /// source of error - source: Box, + source: anyhow::Error, }, /// Failure to deserialize a message DeserializationError { /// source of error - source: Box, + source: anyhow::Error, }, /// Error sending request to network SendError, diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 112c6595d8..238122f19a 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -4,15 +4,15 @@ mod common; use crate::common::print_connections; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; -use bincode::Options; use common::{test_bed, HandleSnafu, HandleWithState, TestError}; -use hotshot_utils::bincode::bincode_opts; +use hotshot_types::constants::{Version01, STATIC_VER_0_1}; use libp2p_networking::network::{NetworkEvent, NetworkNodeHandleError}; use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; use snafu::ResultExt; use std::{fmt::Debug, sync::Arc, time::Duration}; use tracing::{debug, error, info, instrument, warn}; +use versioned_binary_serialization::{BinarySerializer, Serializer}; #[cfg(async_executor_impl = "async-std")] use async_std::prelude::StreamExt; @@ -74,7 +74,7 @@ pub async fn counter_handle_network_event( match event { IsBootstrapped | NetworkEvent::ResponseRequested(..) => {} GossipMsg(m) | DirectResponse(m, _) => { - if let Ok(msg) = bincode_opts().deserialize::(&m) { + if let Ok(msg) = Serializer::::deserialize::(&m) { match msg { // direct message only MyCounterIs(c) => { @@ -99,7 +99,7 @@ pub async fn counter_handle_network_event( } } DirectRequest(m, _, chan) => { - if let Ok(msg) = bincode_opts().deserialize::(&m) { + if let Ok(msg) = Serializer::::deserialize::(&m) { match msg { // direct message request IncrementCounter { from, to, .. } => { @@ -113,24 +113,27 @@ pub async fn counter_handle_network_event( .await; handle .handle - .direct_response(chan, &CounterMessage::Noop) + .direct_response(chan, &CounterMessage::Noop, STATIC_VER_0_1) .await?; } // direct message response AskForCounter => { let response = MyCounterIs(handle.state.copied().await); - handle.handle.direct_response(chan, &response).await?; + handle + .handle + .direct_response(chan, &response, STATIC_VER_0_1) + .await?; } MyCounterIs(_) => { handle .handle - .direct_response(chan, &CounterMessage::Noop) + .direct_response(chan, &CounterMessage::Noop, STATIC_VER_0_1) .await?; } Noop => { handle .handle - .direct_response(chan, &CounterMessage::Noop) + .direct_response(chan, &CounterMessage::Noop, STATIC_VER_0_1) .await?; } } @@ -171,7 +174,7 @@ async fn run_request_response_increment<'a>( std::process::exit(-1)}, } requester_handle.handle - .direct_request(requestee_pid, &CounterMessage::AskForCounter) + .direct_request(requestee_pid, &CounterMessage::AskForCounter, STATIC_VER_0_1) .await .context(HandleSnafu)?; match stream.next().await.unwrap() { @@ -241,7 +244,7 @@ async fn run_gossip_round( msg_handle .handle - .gossip("global".to_string(), &msg) + .gossip("global".to_string(), &msg, STATIC_VER_0_1) .await .context(HandleSnafu)?; @@ -355,12 +358,18 @@ async fn run_dht_rounds( value.push(inc_val); // put the key - msg_handle.handle.put_record(&key, &value).await.unwrap(); + msg_handle + .handle + .put_record(&key, &value, STATIC_VER_0_1) + .await + .unwrap(); // get the key from the other nodes for handle in handles { - let result: Result, NetworkNodeHandleError> = - handle.handle.get_record_timeout(&key, timeout).await; + let result: Result, NetworkNodeHandleError> = handle + .handle + .get_record_timeout(&key, timeout, STATIC_VER_0_1) + .await; match result { Err(e) => { error!("DHT error {e:?} during GET"); diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 437d5f9a6b..a564a8d19f 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "hotshot-orchestrator" -version = "0.1.1" -edition = "2021" +version = { workspace = true } +edition = { workspace = true } [dependencies] async-compatibility-layer = { workspace = true } @@ -11,7 +11,6 @@ futures = { workspace = true } libp2p = { workspace = true } blake3 = { workspace = true } hotshot-types = { workspace = true } -hotshot-utils = { path = "../utils" } tide-disco = { workspace = true } surf-disco = { workspace = true } tracing = { workspace = true } @@ -21,6 +20,7 @@ toml = { workspace = true } thiserror = "1.0.50" serde-inline-default = "0.1.1" csv = "1.3.0" +versioned-binary-serialization = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index ca7e57822b..7c36bf2a36 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -1,6 +1,6 @@ use std::{net::IpAddr, time::Duration}; -use crate::config::NetworkConfig; +use crate::{config::NetworkConfig, OrchestratorVersion}; use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; @@ -14,7 +14,7 @@ use tide_disco::Url; /// Holds the client connection to the orchestrator pub struct OrchestratorClient { /// the client - client: surf_disco::Client, + client: surf_disco::Client, /// the identity pub identity: String, } @@ -180,7 +180,7 @@ impl OrchestratorClient { /// Creates the client that will connect to the orchestrator #[must_use] pub fn new(args: ValidatorArgs, identity: String) -> Self { - let client = surf_disco::Client::::new(args.url); + let client = surf_disco::Client::::new(args.url); // TODO ED: Add healthcheck wait here OrchestratorClient { client, identity } } @@ -198,7 +198,7 @@ impl OrchestratorClient { ) -> NetworkConfig { // get the node index let identity = identity.as_str(); - let identity = |client: Client| { + let identity = |client: Client| { async move { let node_index: Result = client .post(&format!("api/identity/{identity}")) @@ -211,7 +211,7 @@ impl OrchestratorClient { let node_index = self.wait_for_fn_from_orchestrator(identity).await; // get the corresponding config - let f = |client: Client| { + let f = |client: Client| { async move { let config: Result, ClientError> = client .post(&format!("api/config/{node_index}")) @@ -233,7 +233,7 @@ impl OrchestratorClient { /// # Panics /// if unable to post pub async fn get_node_index_for_init_validator_config(&self) -> u16 { - let cur_node_index = |client: Client| { + let cur_node_index = |client: Client| { async move { let cur_node_index: Result = client.post("api/get_tmp_node_index").send().await; @@ -264,7 +264,7 @@ impl OrchestratorClient { .await; // wait for all nodes' public keys - let wait_for_all_nodes_pub_key = |client: Client| { + let wait_for_all_nodes_pub_key = |client: Client| { async move { client.get("api/peer_pub_ready").send().await }.boxed() }; self.wait_for_fn_from_orchestrator::<_, _, ()>(wait_for_all_nodes_pub_key) @@ -283,7 +283,7 @@ impl OrchestratorClient { /// # Panics /// Panics if unable to post. pub async fn wait_for_all_nodes_ready(&self, node_index: u64) -> bool { - let send_ready_f = |client: Client| { + let send_ready_f = |client: Client| { async move { let result: Result<_, ClientError> = client .post("api/ready") @@ -298,7 +298,7 @@ impl OrchestratorClient { self.wait_for_fn_from_orchestrator::<_, _, ()>(send_ready_f) .await; - let wait_for_all_nodes_ready_f = |client: Client| { + let wait_for_all_nodes_ready_f = |client: Client| { async move { client.get("api/start").send().await }.boxed() }; self.wait_for_fn_from_orchestrator(wait_for_all_nodes_ready_f) @@ -322,7 +322,7 @@ impl OrchestratorClient { /// Returns whatever type the given function returns async fn wait_for_fn_from_orchestrator(&self, f: F) -> GEN where - F: Fn(Client) -> Fut, + F: Fn(Client) -> Fut, Fut: Future>, { loop { diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 19560b01c3..4f7bc9a281 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -544,6 +544,8 @@ pub struct HotShotConfigFile { pub propose_min_round_time: Duration, /// The maximum amount of time a leader can wait to start a round pub propose_max_round_time: Duration, + /// Time to wait until we request data associated with a proposal + pub data_request_delay: Duration, } /// Holds configuration for a validator node @@ -619,8 +621,8 @@ impl From> for HotS num_bootstrap: val.num_bootstrap, propose_min_round_time: val.propose_min_round_time, propose_max_round_time: val.propose_max_round_time, + data_request_delay: val.data_request_delay, election_config: None, - data_request_delay: Duration::from_millis(200), } } } @@ -671,9 +673,10 @@ impl Default for HotShotConfigFile { timeout_ratio: (11, 10), round_start_delay: 1, start_delay: 1, + num_bootstrap: 5, propose_min_round_time: Duration::from_secs(0), propose_max_round_time: Duration::from_secs(10), - num_bootstrap: 5, + data_request_delay: Duration::from_millis(200), } } } diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index e8a95ce251..5424bc2864 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -35,6 +35,19 @@ use libp2p::identity::{ ed25519::{Keypair as EdKeypair, SecretKey}, Keypair, }; +use versioned_binary_serialization::version::{StaticVersion, StaticVersionType}; + +/// Orchestrator is not, strictly speaking, bound to the network; it can have its own versioning. +/// Orchestrator Version (major) +pub const ORCHESTRATOR_MAJOR_VERSION: u16 = 0; +/// Orchestrator Version (minor) +pub const ORCHESTRATOR_MINOR_VERSION: u16 = 1; +/// Orchestrator Version as a type +pub type OrchestratorVersion = + StaticVersion; +/// Orchestrator Version as a type-binding instance +pub const ORCHESTRATOR_VERSION: OrchestratorVersion = StaticVersion {}; + /// Generate an keypair based on a `seed` and an `index` /// # Panics /// This panics if libp2p is unable to generate a secret key from the seed @@ -360,20 +373,21 @@ where } /// Sets up all API routes -fn define_api( -) -> Result, ApiError> +fn define_api( +) -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, ::State: Send + Sync + OrchestratorApi, KEY: serde::Serialize, ELECTION: serde::Serialize, + VER: 'static, { let api_toml = toml::from_str::(include_str!(concat!( env!("CARGO_MANIFEST_DIR"), "/api.toml" ))) .expect("API file is not valid toml"); - let mut api = Api::::new(api_toml)?; + let mut api = Api::::new(api_toml)?; api.post("post_identity", |req, state| { async move { let identity = req.string_param("identity")?.parse::(); @@ -447,9 +461,12 @@ where let state: RwLock> = RwLock::new(OrchestratorState::new(network_config)); - let mut app = App::>, ServerError>::with_state(state); + let mut app = App::< + RwLock>, + ServerError, OrchestratorVersion + >::with_state(state); app.register_module("api", web_api.unwrap()) .expect("Error registering api"); tracing::error!("listening on {:?}", url); - app.serve(url).await + app.serve(url, ORCHESTRATOR_VERSION).await } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 9c09f9c7cb..387b245e5e 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -1,34 +1,34 @@ [package] -authors = ["Espresso Systems "] +authors = { workspace = true } description = "Async task implementations for consensus" -edition = "2021" +edition = { workspace = true } name = "hotshot-task-impls" -version = "0.1.0" +version = { workspace = true } [dependencies] +async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } async-trait = { workspace = true } +bincode = { workspace = true } +bitvec = { workspace = true } +chrono = "0.4" +commit = { workspace = true } either = { workspace = true } futures = { workspace = true } -snafu = { workspace = true } -async-lock = { workspace = true } -tracing = { workspace = true } +hotshot-task = { path = "../task" } hotshot-types = { workspace = true } -hotshot-utils = { path = "../utils" } hs-builder-api = { workspace = true } jf-primitives = { workspace = true } -time = { workspace = true } -commit = { workspace = true } -bincode = { workspace = true } -bitvec = { workspace = true } -sha2 = { workspace = true } -hotshot-task = { path = "../task" } -async-broadcast = { workspace = true } -chrono = "0.4" rand = { workspace = true } -surf-disco = { workspace = true } serde = { workspace = true } +sha2 = { workspace = true } +snafu = { workspace = true } +surf-disco = { workspace = true } tagged-base64 = { workspace = true } +time = { workspace = true } +tracing = { workspace = true } +versioned-binary-serialization = { workspace = true } [features] gpu-vid = [ diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 3e5e555207..9c950f162a 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -11,6 +11,7 @@ use serde::{Deserialize, Serialize}; use snafu::Snafu; use surf_disco::{client::HealthStatus, Client, Url}; use tagged_base64::TaggedBase64; +use versioned_binary_serialization::version::StaticVersionType; #[derive(Debug, Snafu, Serialize, Deserialize)] /// Represents errors thant builder client may return @@ -52,14 +53,14 @@ impl From for BuilderClientError { } /// Client for builder API -pub struct BuilderClient { +pub struct BuilderClient { /// Underlying surf_disco::Client - inner: Client, + inner: Client, /// Marker for [`NodeType`] used here _marker: std::marker::PhantomData, } -impl BuilderClient +impl BuilderClient where <::SignatureKey as SignatureKey>::PureAssembledSignatureType: for<'a> TryFrom<&'a TaggedBase64> + Into, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a76e48c42c..07e6bb9e54 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -11,7 +11,6 @@ use async_std::task::JoinHandle; use commit::Committable; use core::time::Duration; use hotshot_task::task::{Task, TaskState}; -use hotshot_types::constants::Version; use hotshot_types::constants::LOOK_AHEAD; use hotshot_types::event::LeafInfo; use hotshot_types::{ @@ -39,6 +38,7 @@ use hotshot_types::{ vote::{Certificate, HasViewNumber}, }; use tracing::warn; +use versioned_binary_serialization::version::Version; use crate::vote::HandleVoteEvent; use chrono::Utc; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 36dc2f4d47..004fba3ba6 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -5,10 +5,10 @@ use crate::{ use async_broadcast::Sender; use async_compatibility_layer::art::async_spawn; use either::Either::{self, Left, Right}; -use hotshot_types::constants::VERSION_0_1; use std::sync::Arc; use hotshot_task::task::{Task, TaskState}; +use hotshot_types::constants::STATIC_VER_0_1; use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ message::{ @@ -371,7 +371,6 @@ impl, TYPES::Signa } }; let message = Message { - version: VERSION_0_1, sender, kind: message_kind, }; @@ -380,10 +379,17 @@ impl, TYPES::Signa let net = self.channel.clone(); async_spawn(async move { let transmit_result = match transmit_type { - TransmitType::Direct => net.direct_message(message, recipient.unwrap()).await, - TransmitType::Broadcast => net.broadcast_message(message, committee).await, + TransmitType::Direct => { + net.direct_message(message, recipient.unwrap(), STATIC_VER_0_1) + .await + } + TransmitType::Broadcast => { + net.broadcast_message(message, committee, STATIC_VER_0_1) + .await + } TransmitType::DACommitteeBroadcast => { - net.da_broadcast_message(message, committee).await + net.da_broadcast_message(message, committee, STATIC_VER_0_1) + .await } }; diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index b6c75ee314..4ec7a1f66a 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Duration}; +use std::{marker::PhantomData, sync::Arc, time::Duration}; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -7,12 +7,10 @@ use crate::{ use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_lock::RwLock; -use bincode::Options; use either::Either; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::Consensus, - constants::VERSION_0_1, message::{CommitteeConsensusMessage, DataMessage, Message, MessageKind, SequencingMessage}, traits::{ election::Membership, @@ -22,10 +20,10 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -use hotshot_utils::bincode::bincode_opts; use rand::{prelude::SliceRandom, thread_rng}; use sha2::{Digest, Sha256}; use tracing::{error, info, warn}; +use versioned_binary_serialization::{version::StaticVersionType, BinarySerializer, Serializer}; /// Amount of time to try for a request before timing out. const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); @@ -34,7 +32,11 @@ const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); /// The task will wait a it's `delay` and then send a request iteratively to peers /// for any data they don't have related to the proposal. For now it's just requesting VID /// shares. -pub struct NetworkResponseState> { +pub struct NetworkResponseState< + TYPES: NodeType, + I: NodeImplementation, + Ver: StaticVersionType, +> { /// Network to send requests over pub network: I::QuorumNetwork, /// Consensus shared state so we can check if we've gotten the information @@ -52,13 +54,17 @@ pub struct NetworkResponseState> { pub public_key: TYPES::SignatureKey, /// This nodes private/signign key, used to sign requests. pub private_key: ::PrivateKey, + /// Version discrimination + _phantom: PhantomData, } /// Alias for a signature type Signature = <::SignatureKey as SignatureKey>::PureAssembledSignatureType; -impl> TaskState for NetworkResponseState { +impl, Ver: StaticVersionType + 'static> TaskState + for NetworkResponseState +{ type Event = HotShotEvent; type Output = HotShotTaskCompleted; @@ -72,7 +78,9 @@ impl> TaskState for NetworkRespons let state = task.state(); let prop_view = proposal.get_view_number(); if prop_view >= state.view { - state.spawn_requests(prop_view, task.clone_sender()).await; + state + .spawn_requests(prop_view, task.clone_sender(), Ver::instance()) + .await; } None } @@ -100,20 +108,27 @@ impl> TaskState for NetworkRespons } } -impl> NetworkResponseState { +impl, Ver: StaticVersionType + 'static> + NetworkResponseState +{ /// Spawns tasks for a given view to retrieve any data needed. - async fn spawn_requests(&self, view: TYPES::Time, sender: Sender>) { - let requests = self.build_requests(view).await; + async fn spawn_requests( + &self, + view: TYPES::Time, + sender: Sender>, + bind_version: Ver, + ) { + let requests = self.build_requests(view, bind_version).await; if requests.is_empty() { return; } requests .into_iter() - .for_each(|r| self.run_delay(r, sender.clone(), view)); + .for_each(|r| self.run_delay(r, sender.clone(), view, bind_version)); } /// Creats the srequest structures for all types that are needed. - async fn build_requests(&self, view: TYPES::Time) -> Vec> { + async fn build_requests(&self, view: TYPES::Time, _: Ver) -> Vec> { let mut reqs = Vec::new(); if !self.state.read().await.vid_shares.contains_key(&view) { reqs.push(RequestKind::VID(view, self.public_key.clone())); @@ -129,6 +144,7 @@ impl> NetworkResponseState, sender: Sender>, view: TYPES::Time, + _: Ver, ) { let mut recipients: Vec<_> = self .da_membership @@ -145,7 +161,7 @@ impl> NetworkResponseState::serialize(&request) else { tracing::error!("Failed to serialize request!"); return; }; @@ -154,7 +170,7 @@ impl> NetworkResponseState(request, signature)); } } @@ -180,24 +196,37 @@ struct VidRequest(TYPES::Time, TYPES::SignatureKey); impl> DelayedRequester { /// Wait the delay, then try to complete the request. Iterates over peers /// until the request is completed, or the data is no longer needed. - async fn run(mut self, request: RequestKind, signature: Signature) { + async fn run( + mut self, + request: RequestKind, + signature: Signature, + ) { // Do the delay then start sending async_sleep(self.delay).await; match request { - RequestKind::VID(view, key) => self.do_vid(VidRequest(view, key), signature).await, + RequestKind::VID(view, key) => { + self.do_vid::(VidRequest(view, key), signature).await; + } RequestKind::DAProposal(..) => {} } } /// Handle sending a VID Share request, runs the loop until the data exists - async fn do_vid(&mut self, req: VidRequest, signature: Signature) { + async fn do_vid( + &mut self, + req: VidRequest, + signature: Signature, + ) { let message = make_vid(&req, signature); while !self.recipients.is_empty() && !self.cancel_vid(&req).await { match async_timeout( REQUEST_TIMEOUT, - self.network - .request_data::(message.clone(), self.recipients.pop().unwrap()), + self.network.request_data::( + message.clone(), + self.recipients.pop().unwrap(), + Ver::instance(), + ), ) .await { @@ -256,7 +285,6 @@ fn make_vid( signature, }; Message { - version: VERSION_0_1, sender: req.1.clone(), kind: MessageKind::Data(DataMessage::RequestData(data_request)), } diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 425171eeea..3c47b21808 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -3,11 +3,10 @@ use std::{collections::BTreeMap, sync::Arc}; use async_broadcast::Receiver; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -use bincode::Options; +use bincode::config::Options; use either::Either::Right; use futures::{channel::mpsc, FutureExt, StreamExt}; use hotshot_task::dependency::{Dependency, EventDependency}; -use hotshot_types::constants::VERSION_0_1; use hotshot_types::{ consensus::Consensus, data::VidDisperse, @@ -20,8 +19,8 @@ use hotshot_types::{ node_implementation::NodeType, signature_key::SignatureKey, }, + utils::bincode_opts, }; -use hotshot_utils::bincode::bincode_opts; use sha2::{Digest, Sha256}; use crate::events::HotShotEvent; @@ -142,7 +141,6 @@ impl NetworkRequestState { /// in the surrounding feilds and creating the `MessageKind` fn make_msg(&self, msg: ResponseMessage) -> Message { Message { - version: VERSION_0_1, sender: self.pub_key.clone(), kind: MessageKind::Data(DataMessage::DataResponse(msg)), } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 573c2853bc..5672744f68 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -23,8 +23,8 @@ use hotshot_types::{ signature_key::SignatureKey, BlockPayload, }, + utils::bincode_opts, }; -use hotshot_utils::bincode::bincode_opts; use std::{ collections::{HashMap, HashSet}, sync::Arc, diff --git a/task/Cargo.toml b/task/Cargo.toml index 9ee39d4654..869fe29760 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -1,8 +1,8 @@ [package] -authors = ["Espresso Systems "] +authors = { workspace = true } name = "hotshot-task" -version = "0.1.0" -edition = "2021" +version = { workspace = true } +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -14,9 +14,14 @@ tracing = { workspace = true } async-compatibility-layer = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] -tokio = { workspace= true, features = ["time", "rt-multi-thread", "macros", "sync"] } +tokio = { workspace = true, features = [ + "time", + "rt-multi-thread", + "macros", + "sync", +] } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace= true, features = ["attributes"] } +async-std = { workspace = true, features = ["attributes"] } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 86a40674b5..68410cc2b1 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "hotshot-testing-macros" -version = "0.1.0" -edition = "2021" +version = { workspace = true } +edition = { workspace = true } description = "Macros for creating hotshot tests" [dependencies] diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 674c129ab1..314c320237 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "hotshot-testing" -version = "0.1.0" -edition = "2021" +version = { workspace = true } +edition = { workspace = true } description = "Types and traits for the HotShot consesus module" -authors = ["Espresso Systems "] +authors = { workspace = true } [features] default = [] @@ -16,34 +16,34 @@ gpu-vid = [ [dependencies] async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } async-trait = { workspace = true } -sha3 = "^0.10" -bincode = { workspace = true } +bitvec = { workspace = true } commit = { workspace = true } either = { workspace = true } +ethereum-types = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", features = ["hotshot-testing"] } -hotshot-types = { workspace = true } -hotshot-utils = { path = "../utils" } +hotshot-example-types = { path = "../example-types" } hotshot-macros = { path = "../macros" } -hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } +hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } +hotshot-task = { path = "../task" } +hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } +hotshot-types = { workspace = true } hs-builder-api = { workspace = true } jf-primitives = { workspace = true } +portpicker = { workspace = true } rand = { workspace = true } -snafu = { workspace = true } -tracing = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } -async-lock = { workspace = true } -bitvec = { workspace = true } -ethereum-types = { workspace = true } -hotshot-task = { path = "../task" } -hotshot-example-types = { path = "../example-types" } +sha3 = "^0.10" +snafu = { workspace = true } tide-disco = { workspace = true } -portpicker = { workspace = true } +tracing = { workspace = true } +versioned-binary-serialization = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } + [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 5eb229dac6..5b7bc20968 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -4,13 +4,14 @@ use futures::future::BoxFuture; use hotshot::traits::BlockPayload; use hotshot::types::SignatureKey; use hotshot_example_types::{block_types::TestBlockPayload, node_types::TestTypes}; -use hotshot_types::traits::block_contents::vid_commitment; -use hotshot_types::utils::BuilderCommitment; -use hotshot_types::{traits::node_implementation::NodeType, vid::VidCommitment}; -use hs_builder_api::block_info::{ - AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo, +use hotshot_types::{ + constants::{Version01, STATIC_VER_0_1}, + traits::{block_contents::vid_commitment, node_implementation::NodeType}, + utils::BuilderCommitment, + vid::VidCommitment, }; use hs_builder_api::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::{BuildError, Options}, data_source::BuilderDataSource, }; @@ -109,16 +110,17 @@ impl BuilderDataSource for TestableBuilderSource { /// # Panics /// If constructing and launching the builder fails for any reason pub fn run_builder(url: Url) { - let builder_api = hs_builder_api::builder::define_api::( - &Options::default(), - ) - .expect("Failed to construct the builder API"); + let builder_api = + hs_builder_api::builder::define_api::( + &Options::default(), + ) + .expect("Failed to construct the builder API"); let (pub_key, priv_key) = ::SignatureKey::generated_from_seed_indexed([1; 32], 0); - let mut app: App = + let mut app: App = App::with_state(TestableBuilderSource { priv_key, pub_key }); app.register_module("/", builder_api) .expect("Failed to register the builder API"); - async_spawn(app.serve(url)); + async_spawn(app.serve(url, STATIC_VER_0_1)); } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 9481ec7398..4fafac33b9 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -292,12 +292,12 @@ impl TestMetadata { // TODO do we use these fields?? propose_min_round_time: Duration::from_millis(0), propose_max_round_time: Duration::from_millis(1000), + data_request_delay: Duration::from_millis(200), // TODO what's the difference between this and the second config? election_config: Some(TYPES::Membership::default_election_config( num_nodes_with_stake as u64, 0, )), - data_request_delay: Duration::from_millis(200), }; let TimingData { next_view_timeout, diff --git a/testing/tests/block_builder.rs b/testing/tests/block_builder.rs index 86d25e3346..5146c4534d 100644 --- a/testing/tests/block_builder.rs +++ b/testing/tests/block_builder.rs @@ -18,12 +18,14 @@ use tide_disco::Url; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_block_builder() { + use hotshot_types::constants::Version01; + let port = portpicker::pick_unused_port().expect("Could not find an open port"); let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); run_builder(api_url.clone()); - let client: BuilderClient = BuilderClient::new(api_url); + let client: BuilderClient = BuilderClient::new(api_url); assert!(client.connect(Duration::from_millis(100)).await); // Test getting blocks diff --git a/testing/tests/memory_network.rs b/testing/tests/memory_network.rs index 0c9066e9b9..1b60b9117c 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/memory_network.rs @@ -13,7 +13,7 @@ use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::TestValidatedState, }; -use hotshot_types::constants::VERSION_0_1; +use hotshot_types::constants::STATIC_VER_0_1; use hotshot_types::message::Message; use hotshot_types::signature_key::BLSPubKey; use hotshot_types::traits::network::ConnectedNetwork; @@ -105,7 +105,6 @@ fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec>(), + STATIC_VER_0_1, ) .await .expect("Failed to message node"); @@ -259,6 +259,7 @@ async fn memory_network_broadcast_queue() { .broadcast_message( sent_message.clone(), vec![pub_key_1].into_iter().collect::>(), + STATIC_VER_0_1, ) .await .expect("Failed to message node"); @@ -305,14 +306,18 @@ async fn memory_network_test_in_flight_message_count() { for (count, message) in messages.iter().enumerate() { network1 - .direct_message(message.clone(), pub_key_2) + .direct_message(message.clone(), pub_key_2, STATIC_VER_0_1) .await .unwrap(); // network 2 has received `count` broadcast messages and `count + 1` direct messages assert_eq!(network2.in_flight_message_count(), Some(count + count + 1)); network2 - .broadcast_message(message.clone(), broadcast_recipients.clone()) + .broadcast_message( + message.clone(), + broadcast_recipients.clone(), + STATIC_VER_0_1, + ) .await .unwrap(); // network 1 has received `count` broadcast messages diff --git a/testing/tests/unit.rs b/testing/tests/unit.rs index 02cf77b9d0..ff857ae443 100644 --- a/testing/tests/unit.rs +++ b/testing/tests/unit.rs @@ -1,4 +1,3 @@ mod unit { mod message; - mod version; } diff --git a/testing/tests/unit/message.rs b/testing/tests/unit/message.rs index 3bebd9ae43..90046710f2 100644 --- a/testing/tests/unit/message.rs +++ b/testing/tests/unit/message.rs @@ -4,14 +4,8 @@ use std::marker::PhantomData; use commit::Committable; use either::Left; -use hotshot_types::constants::Version; - use hotshot_example_types::node_types::TestTypes; -use hotshot_utils::bincode::bincode_opts; - -use bincode::config::Options; - use hotshot_types::{ message::{GeneralConsensusMessage, Message, MessageKind, SequencingMessage}, signature_key::BLSPubKey, @@ -19,6 +13,10 @@ use hotshot_types::{ simple_vote::ViewSyncCommitData, traits::{node_implementation::ConsensusTime, signature_key::SignatureKey}, }; +use versioned_binary_serialization::{ + version::{StaticVersion, Version}, + BinarySerializer, Serializer, +}; #[test] // Checks that the current program protocol version @@ -27,10 +25,13 @@ fn version_number_at_start_of_serialization() { let sender = BLSPubKey::generated_from_seed_indexed([0u8; 32], 0).0; let view_number = ConsensusTime::new(17); // The version we set for the message + const MAJOR: u16 = 37; + const MINOR: u16 = 17; let version = Version { - major: 37, - minor: 17, + major: MAJOR, + minor: MINOR, }; + type TestVersion = StaticVersion; // The specific data we attach to our message shouldn't affect the serialization, // we're using ViewSyncCommitData for simplicity. let data: ViewSyncCommitData = ViewSyncCommitData { @@ -46,17 +47,16 @@ fn version_number_at_start_of_serialization() { _pd: PhantomData, }; let message = Message { - version, sender, kind: MessageKind::Consensus(SequencingMessage(Left( GeneralConsensusMessage::ViewSyncCommitCertificate(simple_certificate), ))), }; - let serialized_message: Vec = bincode_opts().serialize(&message).unwrap(); + let serialized_message: Vec = Serializer::::serialize(&message).unwrap(); // The versions we've read from the message - let major_version_read = u16::from_le_bytes(serialized_message[..2].try_into().unwrap()); - let minor_version_read = u16::from_le_bytes(serialized_message[2..4].try_into().unwrap()); - assert_eq!(version.major, major_version_read); - assert_eq!(version.minor, minor_version_read); + let version_read = Version::deserialize(&serialized_message).unwrap().0; + + assert_eq!(version.major, version_read.major); + assert_eq!(version.minor, version_read.minor); } diff --git a/testing/tests/unit/version.rs b/testing/tests/unit/version.rs deleted file mode 100644 index 7db8d2581e..0000000000 --- a/testing/tests/unit/version.rs +++ /dev/null @@ -1,33 +0,0 @@ -#[cfg(test)] -use hotshot_types::constants::Version; -use hotshot_utils::version::read_version; - -#[test] -/// Check that the version number is read correctly. -fn read_version_1() { - let bytes: [u8; 6] = [0, 0, 1, 0, 4, 9]; - let version = Version { major: 0, minor: 1 }; - assert_eq!(read_version(&bytes), Some(version)); -} - -#[test] -/// Check that the version number is read correctly. -fn read_version_2() { - let bytes: [u8; 4] = [9, 0, 3, 0]; - let version = Version { major: 9, minor: 3 }; - assert_eq!(read_version(&bytes), Some(version)); -} - -#[test] -/// Check that `None` is returned if there are not enough bytes. -fn read_version_insufficient_bytes_1() { - let bytes: [u8; 3] = [0, 0, 0]; - assert_eq!(read_version(&bytes), None); -} - -#[test] -/// Check that `None` is returned if there are not enough bytes. -fn read_version_insufficient_bytes_2() { - let bytes: [u8; 0] = []; - assert_eq!(read_version(&bytes), None); -} diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs index 02fe53cc0c..4f9f10358e 100644 --- a/testing/tests/upgrade_task.rs +++ b/testing/tests/upgrade_task.rs @@ -10,10 +10,10 @@ use hotshot_testing::{ script::{Expectations, TaskScript}, view_generator::TestViewGenerator, }; -use hotshot_types::constants::Version; use hotshot_types::{ data::ViewNumber, simple_vote::UpgradeProposalData, traits::node_implementation::ConsensusTime, }; +use versioned_binary_serialization::version::Version; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] diff --git a/utils/Cargo.toml b/utils/Cargo.toml deleted file mode 100644 index a3ad83dfad..0000000000 --- a/utils/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -authors = ["Espresso Systems "] -description = "Accompanying utilities used by hotshot." -edition = "2021" -name = "hotshot-utils" -readme = "../README.md" -version = "0.1.0" - -[dependencies] -bincode = { workspace = true } -hotshot-types = { workspace = true } - -[lints] -workspace = true diff --git a/utils/src/bincode.rs b/utils/src/bincode.rs deleted file mode 100644 index e321e54b06..0000000000 --- a/utils/src/bincode.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![allow(clippy::type_complexity)] -use bincode::{ - config::{ - FixintEncoding, LittleEndian, RejectTrailing, WithOtherEndian, WithOtherIntEncoding, - WithOtherLimit, WithOtherTrailing, - }, - DefaultOptions, Options, -}; - -/// For the wire format, we use bincode with the following options: -/// - No upper size limit -/// - Litte endian encoding -/// - Varint encoding -/// - Reject trailing bytes -#[must_use] -pub fn bincode_opts() -> WithOtherTrailing< - WithOtherIntEncoding< - WithOtherEndian, LittleEndian>, - FixintEncoding, - >, - RejectTrailing, -> { - bincode::DefaultOptions::new() - .with_no_limit() - .with_little_endian() - .with_fixint_encoding() - .reject_trailing_bytes() -} diff --git a/utils/src/lib.rs b/utils/src/lib.rs deleted file mode 100644 index 9f735513ca..0000000000 --- a/utils/src/lib.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Contains general utility structures and methods - -/// Provides bincode options -pub mod bincode; - -/// Provides version utilities -pub mod version; diff --git a/utils/src/version.rs b/utils/src/version.rs deleted file mode 100644 index 5bd0dd7db9..0000000000 --- a/utils/src/version.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! Utilities for reading version number - -use hotshot_types::constants::Version; - -/// Read the version number from a message (passed a byte vector), -/// returning `None` is there are not enough bytes. -#[must_use] -#[allow(clippy::module_name_repetitions)] -pub fn read_version(message: &[u8]) -> Option { - let bytes_major = message.get(0..2)?.try_into().ok()?; - let bytes_minor = message.get(2..4)?.try_into().ok()?; - let major = u16::from_le_bytes(bytes_major); - let minor = u16::from_le_bytes(bytes_minor); - - Some(Version { major, minor }) -} diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index 269cbe6ac6..3d9b342618 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "hotshot-web-server" description = "HotShot web server" -version = "0.1.1" +version = { workspace = true } readme = "README.md" -edition = "2021" +edition = { workspace = true } [dependencies] async-compatibility-layer = { workspace = true } @@ -15,6 +15,7 @@ tide-disco = { workspace = true } tracing = { workspace = true } rand = { workspace = true } toml = { workspace = true } +versioned-binary-serialization = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index fb02e97b07..56ed16ea6b 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -23,6 +23,7 @@ use tide_disco::{ Api, App, StatusCode, Url, }; use tracing::{debug, info}; +use versioned_binary_serialization::version::StaticVersionType; /// Convience alias for a lock over the state of the app /// TODO this is used in two places. It might be clearer to just inline @@ -749,22 +750,28 @@ pub struct Options { } /// Sets up all API routes +/// This web server incorporates the protocol version within each message. +/// Transport versioning (generic params here) only changes when the web-CDN itself changes. +/// When transport versioning changes, the application itself must update its version. #[allow(clippy::too_many_lines)] -fn define_api(options: &Options) -> Result, ApiError> +fn define_api( + options: &Options, +) -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, ::State: Send + Sync + WebServerDataSource, KEY: SignatureKey, + NetworkVersion: 'static, { let mut api = match &options.api_path { - Some(path) => Api::::from_file(path)?, + Some(path) => Api::::from_file(path)?, None => { let toml: toml::Value = toml::from_str(include_str!("../api.toml")).map_err(|err| { ApiError::CannotReadToml { reason: err.to_string(), } })?; - Api::::new(toml)? + Api::::new(toml)? } }; api.get("getproposal", |req, state| { @@ -940,19 +947,23 @@ where /// this looks like it will panic not error /// # Panics /// on errors creating or registering the tide disco api -pub async fn run_web_server( +pub async fn run_web_server< + KEY: SignatureKey + 'static, + NetworkVersion: StaticVersionType + 'static, +>( shutdown_listener: Option>, url: Url, + bind_version: NetworkVersion, ) -> io::Result<()> { let options = Options::default(); let web_api = define_api(&options).unwrap(); let state = State::new(WebServerState::new().with_shutdown_signal(shutdown_listener)); - let mut app = App::, Error>::with_state(state); + let mut app = App::, Error, NetworkVersion>::with_state(state); app.register_module("api", web_api).unwrap(); - let app_future = app.serve(url); + let app_future = app.serve(url, bind_version); app_future.await } From 2e5f588dea261a68229c4ce1850a21919a163364 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 19 Mar 2024 17:33:16 -0400 Subject: [PATCH 0875/1393] Bring `hotshot-types` and `hs-builder-api` into `Hotshot` (#2812) * WIP * Still chasing a bug * One down, one to go * ... and done. * Cleanup and finishing touches * Fixing the post-merge code, and one missed issue in the pre-merge code. * Some clarifying name changes. * Removing bincode dependency where applicable * cleanup of some (not quite all) remaining bincode invocations * dependency versioning tags and cleanup * bump version * Bumping anticipated version * Now with new VBS * branch => tag on dependencies * Update lock? * Cargo.lock update * Apparently `just * lint` runs with --features="hotshot-testing" enabled * integrate new storage type * fix build, tie up stragglers, modifying tasks next * merge latest stable tabs * merge latest tags and fix method signatures * Update *-disco * Dependencies updated to tags * Add builder-api and types to repo * fix doc * hs-builder-api -> hotshot-builder-api * fmt --------- Co-authored-by: Nathan F Yospe Co-authored-by: Jarred Parr --- builder-api/Cargo.toml | 19 + builder-api/README.md | 4 + builder-api/api/builder.toml | 65 +++ builder-api/api/submit.toml | 33 ++ builder-api/src/api.rs | 59 ++ builder-api/src/block_info.rs | 38 ++ builder-api/src/builder.rs | 197 +++++++ builder-api/src/data_source.rs | 43 ++ builder-api/src/lib.rs | 5 + builder-api/src/query_data.rs | 15 + example-types/Cargo.toml | 8 +- examples/Cargo.toml | 22 +- hotshot-stake-table/Cargo.toml | 2 +- hotshot/Cargo.toml | 18 +- libp2p-networking/Cargo.toml | 4 +- orchestrator/Cargo.toml | 2 +- task-impls/Cargo.toml | 8 +- task-impls/src/builder.rs | 2 +- testing-macros/Cargo.toml | 2 +- testing/Cargo.toml | 8 +- testing/src/block_builder.rs | 14 +- types/Cargo.toml | 65 +++ types/src/consensus.rs | 394 ++++++++++++++ types/src/constants.rs | 52 ++ types/src/data.rs | 483 +++++++++++++++++ types/src/error.rs | 112 ++++ types/src/event.rs | 160 ++++++ types/src/lib.rs | 190 +++++++ types/src/light_client.rs | 223 ++++++++ types/src/message.rs | 316 +++++++++++ types/src/qc.rs | 310 +++++++++++ types/src/signature_key.rs | 127 +++++ types/src/simple_certificate.rs | 179 +++++++ types/src/simple_vote.rs | 258 +++++++++ types/src/stake_table.rs | 31 ++ types/src/traits.rs | 15 + types/src/traits/block_contents.rs | 149 ++++++ types/src/traits/consensus_api.rs | 42 ++ types/src/traits/election.rs | 91 ++++ types/src/traits/metrics.rs | 295 ++++++++++ types/src/traits/network.rs | 681 ++++++++++++++++++++++++ types/src/traits/node_implementation.rs | 238 +++++++++ types/src/traits/qc.rs | 95 ++++ types/src/traits/signature_key.rs | 140 +++++ types/src/traits/stake_table.rs | 235 ++++++++ types/src/traits/states.rs | 89 ++++ types/src/traits/storage.rs | 21 + types/src/utils.rs | 189 +++++++ types/src/vid.rs | 277 ++++++++++ types/src/vote.rs | 183 +++++++ web_server/Cargo.toml | 2 +- 51 files changed, 6160 insertions(+), 50 deletions(-) create mode 100644 builder-api/Cargo.toml create mode 100644 builder-api/README.md create mode 100644 builder-api/api/builder.toml create mode 100644 builder-api/api/submit.toml create mode 100644 builder-api/src/api.rs create mode 100644 builder-api/src/block_info.rs create mode 100644 builder-api/src/builder.rs create mode 100644 builder-api/src/data_source.rs create mode 100644 builder-api/src/lib.rs create mode 100644 builder-api/src/query_data.rs create mode 100644 types/Cargo.toml create mode 100644 types/src/consensus.rs create mode 100644 types/src/constants.rs create mode 100644 types/src/data.rs create mode 100644 types/src/error.rs create mode 100644 types/src/event.rs create mode 100644 types/src/lib.rs create mode 100644 types/src/light_client.rs create mode 100644 types/src/message.rs create mode 100644 types/src/qc.rs create mode 100644 types/src/signature_key.rs create mode 100644 types/src/simple_certificate.rs create mode 100644 types/src/simple_vote.rs create mode 100644 types/src/stake_table.rs create mode 100644 types/src/traits.rs create mode 100644 types/src/traits/block_contents.rs create mode 100644 types/src/traits/consensus_api.rs create mode 100644 types/src/traits/election.rs create mode 100644 types/src/traits/metrics.rs create mode 100644 types/src/traits/network.rs create mode 100644 types/src/traits/node_implementation.rs create mode 100644 types/src/traits/qc.rs create mode 100644 types/src/traits/signature_key.rs create mode 100644 types/src/traits/stake_table.rs create mode 100644 types/src/traits/states.rs create mode 100644 types/src/traits/storage.rs create mode 100644 types/src/utils.rs create mode 100644 types/src/vid.rs create mode 100644 types/src/vote.rs diff --git a/builder-api/Cargo.toml b/builder-api/Cargo.toml new file mode 100644 index 0000000000..a351f1099f --- /dev/null +++ b/builder-api/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "hotshot-builder-api" +version = "0.1.6" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait = { workspace = true } +clap = { version = "4.4", features = ["derive", "env"] } +derive_more = "0.99" +futures = "0.3" +hotshot-types = { path = "../types" } +serde = { workspace = true } +snafu = { workspace = true } +tagged-base64 = { workspace = true } +tide-disco = { workspace = true } +toml = { workspace = true } +versioned-binary-serialization = { workspace = true } diff --git a/builder-api/README.md b/builder-api/README.md new file mode 100644 index 0000000000..8f4788f16e --- /dev/null +++ b/builder-api/README.md @@ -0,0 +1,4 @@ +# hotshot-builder-api +Minimal dependencies shared API definitions for HotShot Builder protocol + +# HotShot Consensus Module diff --git a/builder-api/api/builder.toml b/builder-api/api/builder.toml new file mode 100644 index 0000000000..e64b94da48 --- /dev/null +++ b/builder-api/api/builder.toml @@ -0,0 +1,65 @@ +# Copyright (c) 2024 Espresso Systems (espressosys.com) +# This file is part of the HotShot Builder Protocol. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +[meta] +NAME = "hs-builder-get" +DESCRIPTION = "" +FORMAT_VERSION = "0.1.0" + +[route.available_blocks] +PATH = ["availableblocks/:parent_hash"] +":parent_hash" = "TaggedBase64" +DOC = """ +Get descriptions for all block candidates based on a specific parent block. + +Returns +``` +[ + "block_metadata": { + "block_hash": TaggedBase64, + "block_size": integer, + "offered_fee": integer, + }, +] +``` +""" + +[route.claim_block] +PATH = ["claimblock/:block_hash/:signature"] +":block_hash" = "TaggedBase64" +":signature" = "TaggedBase64" +DOC = """ +Get the specified block candidate. + +Returns application-specific encoded transactions type +""" + +[route.claim_header_input] +PATH = ["claimheaderinput/:block_hash/:signature"] +":block_hash" = "TaggedBase64" +":signature" = "TaggedBase64" +DOC = """ +Get the specified block candidate. + +Returns application-specific block header type +""" diff --git a/builder-api/api/submit.toml b/builder-api/api/submit.toml new file mode 100644 index 0000000000..a9d1db4b46 --- /dev/null +++ b/builder-api/api/submit.toml @@ -0,0 +1,33 @@ +# Copyright (c) 2024 Espresso Systems (espressosys.com) +# This file is part of the HotShot Builder Protocol. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +[meta] +NAME = "hs-builder-submit" +DESCRIPTION = "" +FORMAT_VERSION = "0.1.0" + +[route.submit_txn] +PATH = ["/submit"] +METHOD = "POST" +DOC = "Submit a transaction to builder's private mempool." diff --git a/builder-api/src/api.rs b/builder-api/src/api.rs new file mode 100644 index 0000000000..62f2edf08f --- /dev/null +++ b/builder-api/src/api.rs @@ -0,0 +1,59 @@ +// Copyright (c) 2022 Espresso Systems (espressosys.com) +// This file is part of the HotShot Query Service library. +// +// This program is free software: you can redistribute it and/or modify it under the terms of the GNU +// General Public License as published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without +// even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// General Public License for more details. +// You should have received a copy of the GNU General Public License along with this program. If not, +// see . + +use std::fs; +use std::path::Path; +use tide_disco::api::{Api, ApiError}; +use toml::{map::Entry, Value}; +use versioned_binary_serialization::version::StaticVersionType; + +pub(crate) fn load_api( + path: Option>, + default: &str, + extensions: impl IntoIterator, +) -> Result, ApiError> { + let mut toml = match path { + Some(path) => load_toml(path.as_ref())?, + None => toml::from_str(default).map_err(|err| ApiError::CannotReadToml { + reason: err.to_string(), + })?, + }; + for extension in extensions { + merge_toml(&mut toml, extension); + } + Api::new(toml) +} + +fn merge_toml(into: &mut Value, from: Value) { + if let (Value::Table(into), Value::Table(from)) = (into, from) { + for (key, value) in from { + match into.entry(key) { + Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), + Entry::Vacant(entry) => { + entry.insert(value); + } + } + } + } +} + +fn load_toml(path: &Path) -> Result { + let bytes = fs::read(path).map_err(|err| ApiError::CannotReadToml { + reason: err.to_string(), + })?; + let string = std::str::from_utf8(&bytes).map_err(|err| ApiError::CannotReadToml { + reason: err.to_string(), + })?; + toml::from_str(string).map_err(|err| ApiError::CannotReadToml { + reason: err.to_string(), + }) +} diff --git a/builder-api/src/block_info.rs b/builder-api/src/block_info.rs new file mode 100644 index 0000000000..0afb546e4b --- /dev/null +++ b/builder-api/src/block_info.rs @@ -0,0 +1,38 @@ +use std::{hash::Hash, marker::PhantomData}; + +use hotshot_types::{ + traits::{node_implementation::NodeType, signature_key::SignatureKey, BlockPayload}, + utils::BuilderCommitment, + vid::VidCommitment, +}; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] +#[serde(bound = "")] +pub struct AvailableBlockInfo { + pub block_hash: BuilderCommitment, + pub block_size: u64, + pub offered_fee: u64, + pub signature: <::SignatureKey as SignatureKey>::PureAssembledSignatureType, + pub sender: ::SignatureKey, + pub _phantom: PhantomData, +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] +#[serde(bound = "")] +pub struct AvailableBlockData { + pub block_payload: ::BlockPayload, + pub metadata: <::BlockPayload as BlockPayload>::Metadata, + pub signature: <::SignatureKey as SignatureKey>::PureAssembledSignatureType, + pub sender: ::SignatureKey, + pub _phantom: PhantomData, +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] +#[serde(bound = "")] +pub struct AvailableBlockHeaderInput { + pub vid_commitment: VidCommitment, + pub signature: <::SignatureKey as SignatureKey>::PureAssembledSignatureType, + pub sender: ::SignatureKey, + pub _phantom: PhantomData, +} diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs new file mode 100644 index 0000000000..23026675b9 --- /dev/null +++ b/builder-api/src/builder.rs @@ -0,0 +1,197 @@ +use std::{fmt::Display, path::PathBuf}; + +use clap::Args; +use derive_more::From; +use futures::FutureExt; +use hotshot_types::{ + traits::{node_implementation::NodeType, signature_key::SignatureKey}, + utils::BuilderCommitment, +}; +use serde::{Deserialize, Serialize}; +use snafu::{ResultExt, Snafu}; +use tagged_base64::TaggedBase64; +use tide_disco::{ + api::ApiError, + method::{ReadState, WriteState}, + Api, RequestError, StatusCode, +}; +use versioned_binary_serialization::version::StaticVersionType; + +use crate::{ + api::load_api, + data_source::{AcceptsTxnSubmits, BuilderDataSource}, +}; + +#[derive(Args, Default)] +pub struct Options { + #[arg(long = "builder-api-path", env = "HOTSHOT_BUILDER_API_PATH")] + pub api_path: Option, + + /// Additional API specification files to merge with `builder-api-path`. + /// + /// These optional files may contain route definitions for application-specific routes that have + /// been added as extensions to the basic builder API. + #[arg( + long = "builder-extension", + env = "HOTSHOT_BUILDER_EXTENSIONS", + value_delimiter = ',' + )] + pub extensions: Vec, +} + +#[derive(Clone, Debug, Snafu, Deserialize, Serialize)] +#[snafu(visibility(pub))] +pub enum BuildError { + /// The requested resource does not exist or is not known to this builder service. + NotFound, + /// The requested resource exists but is not currently available. + Missing, + /// There was an error while trying to fetch the requested resource. + #[snafu(display("Failed to fetch requested resource: {message}"))] + Error { message: String }, +} + +#[derive(Clone, Debug, From, Snafu, Deserialize, Serialize)] +#[snafu(visibility(pub))] +pub enum Error { + Request { + source: RequestError, + }, + #[snafu(display("error building block from {resource}: {source}"))] + #[from(ignore)] + BlockAvailable { + source: BuildError, + resource: String, + }, + #[snafu(display("error claiming block {resource}: {source}"))] + #[from(ignore)] + BlockClaim { + source: BuildError, + resource: String, + }, + #[snafu(display("error unpacking transaction: {source}"))] + #[from(ignore)] + TxnUnpack { + source: RequestError, + }, + #[snafu(display("error submitting transaction: {source}"))] + #[from(ignore)] + TxnSubmit { + source: BuildError, + }, + Custom { + message: String, + status: StatusCode, + }, +} + +impl tide_disco::error::Error for Error { + fn catch_all(status: StatusCode, msg: String) -> Self { + Error::Custom { + message: msg, + status, + } + } + + fn status(&self) -> StatusCode { + match self { + Error::Request { .. } => StatusCode::BadRequest, + Error::BlockAvailable { source, .. } | Error::BlockClaim { source, .. } => match source + { + BuildError::NotFound => StatusCode::NotFound, + BuildError::Missing => StatusCode::NotFound, + BuildError::Error { .. } => StatusCode::InternalServerError, + }, + Error::TxnUnpack { .. } => StatusCode::BadRequest, + Error::TxnSubmit { .. } => StatusCode::InternalServerError, + Error::Custom { .. } => StatusCode::InternalServerError, + } + } +} + +pub fn define_api( + options: &Options, +) -> Result, ApiError> +where + State: 'static + Send + Sync + ReadState, + ::State: Send + Sync + BuilderDataSource, + Types: NodeType, + <::SignatureKey as SignatureKey>::PureAssembledSignatureType: + for<'a> TryFrom<&'a TaggedBase64> + Into + Display, + for<'a> <<::SignatureKey as SignatureKey>::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, +{ + let mut api = load_api::( + options.api_path.as_ref(), + include_str!("../api/builder.toml"), + options.extensions.clone(), + )?; + api.with_version("0.0.1".parse().unwrap()) + .get("available_blocks", |req, state| { + async move { + let hash = req.blob_param("parent_hash")?; + state + .get_available_blocks(&hash) + .await + .context(BlockAvailableSnafu { + resource: hash.to_string(), + }) + } + .boxed() + })? + .get("claim_block", |req, state| { + async move { + let hash: BuilderCommitment = req.blob_param("block_hash")?; + let signature = req.blob_param("signature")?; + state + .claim_block(&hash, &signature) + .await + .context(BlockClaimSnafu { + resource: hash.to_string(), + }) + } + .boxed() + })? + .get("claim_header_input", |req, state| { + async move { + let hash: BuilderCommitment = req.blob_param("block_hash")?; + let signature = req.blob_param("signature")?; + state + .claim_block_header_input(&hash, &signature) + .await + .context(BlockClaimSnafu { + resource: hash.to_string(), + }) + } + .boxed() + })?; + Ok(api) +} + +pub fn submit_api( + options: &Options, +) -> Result, ApiError> +where + State: 'static + Send + Sync + WriteState, + ::State: Send + Sync + AcceptsTxnSubmits, + Types: NodeType, +{ + let mut api = load_api::( + options.api_path.as_ref(), + include_str!("../api/submit.toml"), + options.extensions.clone(), + )?; + api.with_version("0.0.1".parse().unwrap()) + .post("submit_txn", |req, state| { + async move { + let tx = req + .body_auto::<::Transaction, Ver>(Ver::instance()) + .context(TxnUnpackSnafu)?; + state.submit_txn(tx).await.context(TxnSubmitSnafu)?; + Ok(()) + } + .boxed() + })?; + Ok(api) +} diff --git a/builder-api/src/data_source.rs b/builder-api/src/data_source.rs new file mode 100644 index 0000000000..69a75feb85 --- /dev/null +++ b/builder-api/src/data_source.rs @@ -0,0 +1,43 @@ +use async_trait::async_trait; +use hotshot_types::{ + traits::{node_implementation::NodeType, signature_key::SignatureKey}, + utils::BuilderCommitment, + vid::VidCommitment, +}; +use tagged_base64::TaggedBase64; + +use crate::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::BuildError, +}; + +#[async_trait] +pub trait BuilderDataSource +where + I: NodeType, + <::SignatureKey as SignatureKey>::PureAssembledSignatureType: + for<'a> TryFrom<&'a TaggedBase64> + Into, +{ + async fn get_available_blocks( + &self, + for_parent: &VidCommitment, + ) -> Result>, BuildError>; + async fn claim_block( + &self, + block_hash: &BuilderCommitment, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuildError>; + async fn claim_block_header_input( + &self, + block_hash: &BuilderCommitment, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuildError>; +} + +#[async_trait] +pub trait AcceptsTxnSubmits +where + I: NodeType, +{ + async fn submit_txn(&mut self, txn: ::Transaction) -> Result<(), BuildError>; +} diff --git a/builder-api/src/lib.rs b/builder-api/src/lib.rs new file mode 100644 index 0000000000..c89608d85c --- /dev/null +++ b/builder-api/src/lib.rs @@ -0,0 +1,5 @@ +mod api; +pub mod block_info; +pub mod builder; +pub mod data_source; +pub mod query_data; diff --git a/builder-api/src/query_data.rs b/builder-api/src/query_data.rs new file mode 100644 index 0000000000..44b2d1c24f --- /dev/null +++ b/builder-api/src/query_data.rs @@ -0,0 +1,15 @@ +// Copyright (c) 2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot HotShot Builder Protocol. +// +// TODO: License + +use hotshot_types::traits::node_implementation::NodeType; +use serde::{Deserialize, Serialize}; + +use crate::block_info::AvailableBlockInfo; + +#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq, Hash)] +#[serde(bound = "")] +pub struct AvailableBlocksQueryData { + pub blocks: Vec>, +} diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index f86e7c67b7..19034f2c71 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -9,21 +9,19 @@ authors = { workspace = true } default = [] # NOTE this is used to activate the slow tests we don't wish to run in CI slow-tests = [] -gpu-vid = [ - "hotshot-task-impls/gpu-vid", -] +gpu-vid = ["hotshot-task-impls/gpu-vid"] [dependencies] async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } async-trait = { workspace = true } -anyhow = { workspace = true } +anyhow = { workspace = true } sha3 = "^0.10" commit = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot" } -hotshot-types = { workspace = true } +hotshot-types = { path = "../types" } hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } rand = { workspace = true } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index de083e5c3b..32ae3315ae 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -9,9 +9,7 @@ rust-version = "1.65.0" [features] default = ["docs", "doc-images"] -gpu-vid = [ - "hotshot-task-impls/gpu-vid", -] +gpu-vid = ["hotshot-task-impls/gpu-vid"] # Features required for binaries bin-orchestrator = ["clap"] @@ -105,7 +103,7 @@ embed-doc-image = "0.1.4" futures = { workspace = true } hotshot-web-server = { version = "0.5.26", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } -hotshot-types = { workspace = true } +hotshot-types = { path = "../types" } hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } @@ -129,10 +127,10 @@ tracing = { workspace = true } tokio = { workspace = true } cdn-client = { workspace = true, features = ["insecure", "runtime-tokio"] } cdn-broker = { workspace = true, features = [ - "insecure", - "runtime-tokio", - "strong_consistency", - "local_discovery", + "insecure", + "runtime-tokio", + "strong_consistency", + "local_discovery", ] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } @@ -140,10 +138,10 @@ cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } async-std = { workspace = true } cdn-client = { workspace = true, features = ["insecure", "runtime-async-std"] } cdn-broker = { workspace = true, features = [ - "insecure", - "runtime-async-std", - "strong_consistency", - "local_discovery", + "insecure", + "runtime-async-std", + "strong_consistency", + "local_discovery", ] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-async-std"] } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index dad2699625..601d6c1f63 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -14,7 +14,7 @@ ark-serialize = { workspace = true } ark-std = { workspace = true } digest = { workspace = true } ethereum-types = { workspace = true } -hotshot-types = { workspace = true } +hotshot-types = { path = "../types" } jf-primitives = { workspace = true } jf-utils = { workspace = true } serde = { workspace = true, features = ["rc"] } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index d75f7228b6..39ebb32512 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -40,7 +40,7 @@ futures = { workspace = true } hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } hotshot-task = { path = "../task" } hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } -hotshot-types = { workspace = true } +hotshot-types = { path = "../types" } hotshot-web-server = { version = "0.5.26", path = "../web_server", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } @@ -58,10 +58,10 @@ versioned-binary-serialization = { workspace = true } tokio = { workspace = true } cdn-client = { workspace = true, features = ["insecure", "runtime-tokio"] } cdn-broker = { workspace = true, features = [ - "insecure", - "runtime-tokio", - "strong_consistency", - "local_discovery", + "insecure", + "runtime-tokio", + "strong_consistency", + "local_discovery", ] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } @@ -69,10 +69,10 @@ cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } async-std = { workspace = true } cdn-client = { workspace = true, features = ["insecure", "runtime-async-std"] } cdn-broker = { workspace = true, features = [ - "insecure", - "runtime-async-std", - "strong_consistency", - "local_discovery", + "insecure", + "runtime-async-std", + "strong_consistency", + "local_discovery", ] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-async-std"] } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 1083ff638b..b5ba3e00f1 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -21,7 +21,7 @@ custom_debug = { workspace = true } derive_builder = "0.20.0" either = { workspace = true } futures = { workspace = true } -hotshot-types = { workspace = true } +hotshot-types = { path = "../types" } libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } rand = { workspace = true } @@ -30,7 +30,7 @@ serde_bytes = { workspace = true } serde_json = { workspace = true } snafu = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ - "h1-server", + "h1-server", ] } tracing = { workspace = true } versioned-binary-serialization = { workspace = true } diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index a564a8d19f..001d0ea41d 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -10,7 +10,7 @@ clap = { version = "4.0", features = ["derive", "env"], optional = false } futures = { workspace = true } libp2p = { workspace = true } blake3 = { workspace = true } -hotshot-types = { workspace = true } +hotshot-types = { path = "../types" } tide-disco = { workspace = true } surf-disco = { workspace = true } tracing = { workspace = true } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 387b245e5e..7886a22388 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -17,8 +17,8 @@ commit = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot-task = { path = "../task" } -hotshot-types = { workspace = true } -hs-builder-api = { workspace = true } +hotshot-types = { path = "../types" } +hotshot-builder-api = { path = "../builder-api" } jf-primitives = { workspace = true } rand = { workspace = true } serde = { workspace = true } @@ -31,9 +31,7 @@ tracing = { workspace = true } versioned-binary-serialization = { workspace = true } [features] -gpu-vid = [ - "hotshot-types/gpu-vid" -] +gpu-vid = ["hotshot-types/gpu-vid"] [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 9c950f162a..92ae382df6 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -1,12 +1,12 @@ use async_compatibility_layer::art::async_sleep; use std::time::{Duration, Instant}; +use hotshot_builder_api::builder::{BuildError, Error as BuilderApiError}; use hotshot_types::{ traits::{node_implementation::NodeType, signature_key::SignatureKey}, utils::BuilderCommitment, vid::VidCommitment, }; -use hs_builder_api::builder::{BuildError, Error as BuilderApiError}; use serde::{Deserialize, Serialize}; use snafu::Snafu; use surf_disco::{client::HealthStatus, Client, Url}; diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 68410cc2b1..022176586d 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -15,7 +15,7 @@ commit = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", default-features = false } -hotshot-types = { workspace = true } +hotshot-types = { path = "../types" } hotshot-testing = { path = "../testing", default-features = false } hotshot-example-types = { path = "../example-types" } jf-primitives = { workspace = true } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 314c320237..54af33edf7 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -9,9 +9,7 @@ authors = { workspace = true } default = [] # NOTE this is used to activate the slow tests we don't wish to run in CI slow-tests = [] -gpu-vid = [ - "hotshot-types/gpu-vid" -] +gpu-vid = ["hotshot-types/gpu-vid"] [dependencies] async-broadcast = { workspace = true } @@ -29,8 +27,8 @@ hotshot-macros = { path = "../macros" } hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } hotshot-task = { path = "../task" } hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } -hotshot-types = { workspace = true } -hs-builder-api = { workspace = true } +hotshot-types = { path = "../types" } +hotshot-builder-api = { path = "../builder-api" } jf-primitives = { workspace = true } portpicker = { workspace = true } rand = { workspace = true } diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 5b7bc20968..1074ea0500 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -3,6 +3,11 @@ use async_trait::async_trait; use futures::future::BoxFuture; use hotshot::traits::BlockPayload; use hotshot::types::SignatureKey; +use hotshot_builder_api::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::{BuildError, Options}, + data_source::BuilderDataSource, +}; use hotshot_example_types::{block_types::TestBlockPayload, node_types::TestTypes}; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, @@ -10,11 +15,6 @@ use hotshot_types::{ utils::BuilderCommitment, vid::VidCommitment, }; -use hs_builder_api::{ - block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, - builder::{BuildError, Options}, - data_source::BuilderDataSource, -}; use tide_disco::{method::ReadState, App, Url}; /// The only block [`TestableBuilderSource`] provides @@ -111,13 +111,13 @@ impl BuilderDataSource for TestableBuilderSource { /// If constructing and launching the builder fails for any reason pub fn run_builder(url: Url) { let builder_api = - hs_builder_api::builder::define_api::( + hotshot_builder_api::builder::define_api::( &Options::default(), ) .expect("Failed to construct the builder API"); let (pub_key, priv_key) = ::SignatureKey::generated_from_seed_indexed([1; 32], 0); - let mut app: App = + let mut app: App = App::with_state(TestableBuilderSource { priv_key, pub_key }); app.register_module("/", builder_api) .expect("Failed to register the builder API"); diff --git a/types/Cargo.toml b/types/Cargo.toml new file mode 100644 index 0000000000..720932342b --- /dev/null +++ b/types/Cargo.toml @@ -0,0 +1,65 @@ +[package] +authors = ["Espresso Systems "] +description = "Types and traits for the HotShot consesus module" +edition = "2021" +name = "hotshot-types" +version = "0.1.11" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = { workspace = true } +ark-bls12-381 = { workspace = true } +ark-bn254 = { workspace = true } +ark-ec = { workspace = true } +ark-ed-on-bn254 = { workspace = true } +ark-ff = { workspace = true } +ark-serialize = { workspace = true } +ark-std = { workspace = true } +async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } +async-trait = { workspace = true } +bincode = { workspace = true } +bitvec = { workspace = true } +blake3 = { workspace = true } +commit = { workspace = true } +custom_debug = { workspace = true } +digest = { workspace = true } +either = { workspace = true } +espresso-systems-common = { workspace = true } +ethereum-types = { workspace = true } +futures = { workspace = true } + +generic-array = { workspace = true } + +# TODO generic-array should not be a direct dependency +# https://github.com/EspressoSystems/HotShot/issues/1850 +lazy_static = { workspace = true } +rand = { workspace = true } +sha2 = { workspace = true } +snafu = { workspace = true } +time = { workspace = true } +tracing = { workspace = true } +typenum = { workspace = true } +derivative = "2.2.0" +jf-primitives = { workspace = true } +jf-plonk = { workspace = true } +jf-utils = { workspace = true } +rand_chacha = { workspace = true } +serde = { workspace = true } +tagged-base64 = { workspace = true } +versioned-binary-serialization = { workspace = true } +displaydoc = { version = "0.2.3", default-features = false } +dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } + +[dev-dependencies] +serde_json = { workspace = true } + +[features] +gpu-vid = ["jf-primitives/gpu-vid"] + +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { workspace = true } + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { workspace = true } diff --git a/types/src/consensus.rs b/types/src/consensus.rs new file mode 100644 index 0000000000..aa14eaacdb --- /dev/null +++ b/types/src/consensus.rs @@ -0,0 +1,394 @@ +//! Provides the core consensus types + +pub use crate::utils::{View, ViewInner}; +use displaydoc::Display; + +use crate::{ + data::{Leaf, VidDisperse}, + error::HotShotError, + message::Proposal, + simple_certificate::{DACertificate, QuorumCertificate, UpgradeCertificate}, + traits::{ + metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}, + node_implementation::NodeType, + ValidatedState, + }, + utils::{StateAndDelta, Terminator}, +}; +use commit::Commitment; + +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, Mutex}, +}; +use tracing::error; + +/// A type alias for `HashMap, T>` +type CommitmentMap = HashMap, T>; + +/// A reference to the consensus algorithm +/// +/// This will contain the state of all rounds. +#[derive(custom_debug::Debug)] +pub struct Consensus { + /// Immutable instance-level state. + pub instance_state: TYPES::InstanceState, + + /// The validated states that are currently loaded in memory. + pub validated_state_map: BTreeMap>, + + /// All the VID shares we've received for current and future views. + /// In the future we will need a different struct similar to VidDisperse except + /// it stores only one share. + /// TODO + pub vid_shares: BTreeMap>>, + + /// All the DA certs we've received for current and future views. + /// view -> DA cert + pub saved_da_certs: HashMap>, + + /// All the upgrade certs we've received for current and future views. + /// view -> upgrade cert + pub saved_upgrade_certs: HashMap>, + + /// View number that is currently on. + pub cur_view: TYPES::Time, + + /// last view had a successful decide event + pub last_decided_view: TYPES::Time, + + /// Map of leaf hash -> leaf + /// - contains undecided leaves + /// - includes the MOST RECENT decided leaf + pub saved_leaves: CommitmentMap>, + + /// Saved payloads. + /// + /// Encoded transactions for every view if we got a payload for that view. + pub saved_payloads: BTreeMap>, + + /// The `locked_qc` view number + pub locked_view: TYPES::Time, + + /// the highqc per spec + pub high_qc: QuorumCertificate, + + /// A reference to the metrics trait + pub metrics: Arc, +} + +/// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces +#[derive(Clone, Debug)] +pub struct ConsensusMetricsValue { + /// The number of last synced block height + pub last_synced_block_height: Box, + /// The number of last decided view + pub last_decided_view: Box, + /// Number of timestamp for the last decided time + pub last_decided_time: Box, + /// The current view + pub current_view: Box, + /// Number of views that are in-flight since the last decided view + pub number_of_views_since_last_decide: Box, + /// Number of views that are in-flight since the last anchor view + pub number_of_views_per_decide_event: Box, + /// Number of invalid QCs we've seen since the last commit. + pub invalid_qc: Box, + /// Number of outstanding transactions + pub outstanding_transactions: Box, + /// Memory size in bytes of the serialized transactions still outstanding + pub outstanding_transactions_memory_size: Box, + /// Number of views that timed out + pub number_of_timeouts: Box, +} + +/// The wrapper with a string name for the networking metrics +#[derive(Clone, Debug)] +pub struct ConsensusMetrics { + /// a prefix which tracks the name of the metric + prefix: String, + /// a map of values + values: Arc>, +} + +/// the set of counters and gauges for the networking metrics +#[derive(Clone, Debug, Default, Display)] +pub struct InnerConsensusMetrics { + /// All the counters of the networking metrics + pub counters: HashMap, + /// All the gauges of the networking metrics + pub gauges: HashMap, + /// All the histograms of the networking metrics + pub histograms: HashMap>, + /// All the labels of the networking metrics + pub labels: HashMap, +} + +impl ConsensusMetrics { + #[must_use] + /// For the creation and naming of gauge, counter, histogram and label. + pub fn sub(&self, name: String) -> Self { + let prefix = if self.prefix.is_empty() { + name + } else { + format!("{}-{name}", self.prefix) + }; + Self { + prefix, + values: Arc::clone(&self.values), + } + } +} + +impl Metrics for ConsensusMetrics { + fn create_counter(&self, label: String, _unit_label: Option) -> Box { + Box::new(self.sub(label)) + } + + fn create_gauge(&self, label: String, _unit_label: Option) -> Box { + Box::new(self.sub(label)) + } + + fn create_histogram(&self, label: String, _unit_label: Option) -> Box { + Box::new(self.sub(label)) + } + + fn create_label(&self, label: String) -> Box { + Box::new(self.sub(label)) + } + + fn subgroup(&self, subgroup_name: String) -> Box { + Box::new(self.sub(subgroup_name)) + } +} + +impl Counter for ConsensusMetrics { + fn add(&self, amount: usize) { + *self + .values + .lock() + .unwrap() + .counters + .entry(self.prefix.clone()) + .or_default() += amount; + } +} + +impl Gauge for ConsensusMetrics { + fn set(&self, amount: usize) { + *self + .values + .lock() + .unwrap() + .gauges + .entry(self.prefix.clone()) + .or_default() = amount; + } + fn update(&self, delta: i64) { + let mut values = self.values.lock().unwrap(); + let value = values.gauges.entry(self.prefix.clone()).or_default(); + let signed_value = i64::try_from(*value).unwrap_or(i64::MAX); + *value = usize::try_from(signed_value + delta).unwrap_or(0); + } +} + +impl Histogram for ConsensusMetrics { + fn add_point(&self, point: f64) { + self.values + .lock() + .unwrap() + .histograms + .entry(self.prefix.clone()) + .or_default() + .push(point); + } +} + +impl Label for ConsensusMetrics { + fn set(&self, value: String) { + *self + .values + .lock() + .unwrap() + .labels + .entry(self.prefix.clone()) + .or_default() = value; + } +} + +impl ConsensusMetricsValue { + /// Create a new instance of this [`ConsensusMetricsValue`] struct, setting all the counters and gauges + #[must_use] + pub fn new(metrics: &dyn Metrics) -> Self { + Self { + last_synced_block_height: metrics + .create_gauge(String::from("last_synced_block_height"), None), + last_decided_view: metrics.create_gauge(String::from("last_decided_view"), None), + last_decided_time: metrics.create_gauge(String::from("last_decided_time"), None), + current_view: metrics.create_gauge(String::from("current_view"), None), + number_of_views_since_last_decide: metrics + .create_gauge(String::from("number_of_views_since_last_decide"), None), + number_of_views_per_decide_event: metrics + .create_histogram(String::from("number_of_views_per_decide_event"), None), + invalid_qc: metrics.create_gauge(String::from("invalid_qc"), None), + outstanding_transactions: metrics + .create_gauge(String::from("outstanding_transactions"), None), + outstanding_transactions_memory_size: metrics + .create_gauge(String::from("outstanding_transactions_memory_size"), None), + number_of_timeouts: metrics.create_counter(String::from("number_of_timeouts"), None), + } + } +} + +impl Default for ConsensusMetricsValue { + fn default() -> Self { + Self::new(&*NoMetrics::boxed()) + } +} + +impl Consensus { + /// Update the current view. + pub fn update_view(&mut self, view_number: TYPES::Time) { + self.cur_view = view_number; + } + + /// gather information from the parent chain of leafs + /// # Errors + /// If the leaf or its ancestors are not found in storage + pub fn visit_leaf_ancestors( + &self, + start_from: TYPES::Time, + terminator: Terminator, + ok_when_finished: bool, + mut f: F, + ) -> Result<(), HotShotError> + where + F: FnMut( + &Leaf, + Arc<::ValidatedState>, + Option::ValidatedState as ValidatedState>::Delta>>, + ) -> bool, + { + let mut next_leaf = if let Some(view) = self.validated_state_map.get(&start_from) { + view.get_leaf_commitment() + .ok_or_else(|| HotShotError::InvalidState { + context: format!( + "Visited failed view {start_from:?} leaf. Expected successfuil leaf" + ), + })? + } else { + return Err(HotShotError::InvalidState { + context: format!("View {start_from:?} leaf does not exist in state map "), + }); + }; + + while let Some(leaf) = self.saved_leaves.get(&next_leaf) { + let view = leaf.get_view_number(); + if let (Some(state), delta) = self.get_state_and_delta(view) { + if let Terminator::Exclusive(stop_before) = terminator { + if stop_before == view { + if ok_when_finished { + return Ok(()); + } + break; + } + } + next_leaf = leaf.get_parent_commitment(); + if !f(leaf, state, delta) { + return Ok(()); + } + if let Terminator::Inclusive(stop_after) = terminator { + if stop_after == view { + if ok_when_finished { + return Ok(()); + } + break; + } + } + } else { + return Err(HotShotError::InvalidState { + context: format!("View {view:?} state does not exist in state map "), + }); + } + } + Err(HotShotError::LeafNotFound {}) + } + + /// Garbage collects based on state change right now, this removes from both the + /// `saved_payloads` and `validated_state_map` fields of `Consensus`. + /// # Panics + /// On inconsistent stored entries + pub fn collect_garbage(&mut self, old_anchor_view: TYPES::Time, new_anchor_view: TYPES::Time) { + // state check + let anchor_entry = self + .validated_state_map + .iter() + .next() + .expect("INCONSISTENT STATE: anchor leaf not in state map!"); + if *anchor_entry.0 != old_anchor_view { + error!( + "Something about GC has failed. Older leaf exists than the previous anchor leaf." + ); + } + // perform gc + self.saved_da_certs + .retain(|view_number, _| *view_number >= old_anchor_view); + self.saved_upgrade_certs + .retain(|view_number, _| *view_number >= old_anchor_view); + self.validated_state_map + .range(old_anchor_view..new_anchor_view) + .filter_map(|(_view_number, view)| view.get_leaf_commitment()) + .for_each(|leaf| { + self.saved_leaves.remove(&leaf); + }); + self.validated_state_map = self.validated_state_map.split_off(&new_anchor_view); + self.saved_payloads = self.saved_payloads.split_off(&new_anchor_view); + self.vid_shares = self.vid_shares.split_off(&new_anchor_view); + } + + /// Gets the last decided leaf. + /// + /// # Panics + /// if the last decided view's leaf does not exist in the state map or saved leaves, which + /// should never happen. + #[must_use] + pub fn get_decided_leaf(&self) -> Leaf { + let decided_view_num = self.last_decided_view; + let view = self.validated_state_map.get(&decided_view_num).unwrap(); + let leaf = view + .get_leaf_commitment() + .expect("Decided leaf not found! Consensus internally inconsistent"); + self.saved_leaves.get(&leaf).unwrap().clone() + } + + /// Gets the validated state with the given view number, if in the state map. + #[must_use] + pub fn get_state(&self, view_number: TYPES::Time) -> Option<&Arc> { + match self.validated_state_map.get(&view_number) { + Some(view) => view.get_state(), + None => None, + } + } + + /// Gets the validated state and state delta with the given view number, if in the state map. + #[must_use] + pub fn get_state_and_delta(&self, view_number: TYPES::Time) -> StateAndDelta { + match self.validated_state_map.get(&view_number) { + Some(view) => view.get_state_and_delta(), + None => (None, None), + } + } + + /// Gets the last decided validated state. + /// + /// # Panics + /// If the last decided view's state does not exist in the state map, which should never + /// happen. + #[must_use] + pub fn get_decided_state(&self) -> Arc { + let decided_view_num = self.last_decided_view; + self.get_state_and_delta(decided_view_num) + .0 + .expect("Decided state not found! Consensus internally inconsistent") + } +} diff --git a/types/src/constants.rs b/types/src/constants.rs new file mode 100644 index 0000000000..645cc39d4f --- /dev/null +++ b/types/src/constants.rs @@ -0,0 +1,52 @@ +//! configurable constants for hotshot + +use versioned_binary_serialization::version::{StaticVersion, Version}; + +/// the number of views to gather information for ahead of time +pub const LOOK_AHEAD: u64 = 5; + +/// the default kademlia record republication interval (in seconds) +pub const KAD_DEFAULT_REPUB_INTERVAL_SEC: u64 = 28800; + +/// the number of messages to cache in the combined network +pub const COMBINED_NETWORK_CACHE_SIZE: usize = 1000; + +/// the number of messages to attempt to send over the primary network before switching to prefer the secondary network +pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; + +/// the number of messages to send over the secondary network before re-attempting the (presumed down) primary network +pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 5; + +/// CONSTANT for protocol major version +pub const VERSION_MAJ: u16 = 0; + +/// CONSTANT for protocol major version +pub const VERSION_MIN: u16 = 1; + +/// Constant for protocol version 0.1. +pub const VERSION_0_1: Version = Version { + major: VERSION_MAJ, + minor: VERSION_MIN, +}; + +/// Type for protocol static version 0.1. +pub type Version01 = StaticVersion; + +/// Constant for protocol static version 0.1. +pub const STATIC_VER_0_1: Version01 = StaticVersion {}; + +/// Default Channel Size for consensus event sharing +pub const EVENT_CHANNEL_SIZE: usize = 100_000; + +/// Constants for `WebServerNetwork` and `WebServer` +/// The Web CDN is not, strictly speaking, bound to the network; it can have its own versioning. +/// Web Server CDN Version (major) +pub const WEB_SERVER_MAJOR_VERSION: u16 = 0; +/// Web Server CDN Version (minor) +pub const WEB_SERVER_MINOR_VERSION: u16 = 1; + +/// Type for Web Server CDN Version +pub type WebServerVersion = StaticVersion; + +/// Constant for Web Server CDN Version +pub const WEB_SERVER_VERSION: WebServerVersion = StaticVersion {}; diff --git a/types/src/data.rs b/types/src/data.rs new file mode 100644 index 0000000000..0552ec3f4d --- /dev/null +++ b/types/src/data.rs @@ -0,0 +1,483 @@ +//! Provides types useful for representing `HotShot`'s data structures +//! +//! This module provides types for representing consensus internal state, such as leaves, +//! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. + +use crate::{ + simple_certificate::{ + QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, + }, + simple_vote::UpgradeProposalData, + traits::{ + block_contents::{ + vid_commitment, BlockHeader, TestableBlock, GENESIS_VID_NUM_STORAGE_NODES, + }, + election::Membership, + node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, + states::TestableState, + BlockPayload, + }, + utils::bincode_opts, + vid::{VidCommitment, VidCommon, VidSchemeType, VidShare}, + vote::{Certificate, HasViewNumber}, +}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use bincode::Options; +use commit::{Commitment, Committable, RawCommitmentBuilder}; +use derivative::Derivative; +use jf_primitives::vid::VidDisperse as JfVidDisperse; +use rand::Rng; +use serde::{Deserialize, Serialize}; +use snafu::Snafu; +use std::{ + collections::BTreeMap, + fmt::{Debug, Display}, + hash::Hash, + sync::Arc, +}; + +/// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. +#[derive( + Copy, + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + CanonicalSerialize, + CanonicalDeserialize, +)] +pub struct ViewNumber(u64); + +impl ConsensusTime for ViewNumber { + /// Create a genesis view number (0) + fn genesis() -> Self { + Self(0) + } + /// Create a new `ViewNumber` with the given value. + fn new(n: u64) -> Self { + Self(n) + } + /// Returen the u64 format + fn get_u64(&self) -> u64 { + self.0 + } +} + +impl Committable for ViewNumber { + fn commit(&self) -> Commitment { + let builder = RawCommitmentBuilder::new("View Number Commitment"); + builder.u64(self.0).finalize() + } +} + +impl std::ops::Add for ViewNumber { + type Output = ViewNumber; + + fn add(self, rhs: u64) -> Self::Output { + Self(self.0 + rhs) + } +} + +impl std::ops::AddAssign for ViewNumber { + fn add_assign(&mut self, rhs: u64) { + self.0 += rhs; + } +} + +impl std::ops::Deref for ViewNumber { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::Sub for ViewNumber { + type Output = ViewNumber; + fn sub(self, rhs: u64) -> Self::Output { + Self(self.0 - rhs) + } +} + +/// A proposal to start providing data availability for a block. +#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +pub struct DAProposal { + /// Encoded transactions in the block to be applied. + pub encoded_transactions: Vec, + /// Metadata of the block to be applied. + pub metadata: ::Metadata, + /// View this proposal applies to + pub view_number: TYPES::Time, +} + +/// A proposal to upgrade the network +#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound = "TYPES: NodeType")] +pub struct UpgradeProposal +where + TYPES: NodeType, +{ + /// The information about which version we are upgrading to. + pub upgrade_proposal: UpgradeProposalData, + /// View this proposal applies to + pub view_number: TYPES::Time, +} + +/// VID dispersal data +/// +/// Like [`DAProposal`]. +/// +/// TODO move to vid.rs? +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +pub struct VidDisperse { + /// The view number for which this VID data is intended + pub view_number: TYPES::Time, + /// Block payload commitment + pub payload_commitment: VidCommitment, + /// A storage node's key and its corresponding VID share + pub shares: BTreeMap, + /// VID common data sent to all storage nodes + pub common: VidCommon, +} + +impl VidDisperse { + /// Create VID dispersal from a specified membership + /// Uses the specified function to calculate share dispersal + /// Allows for more complex stake table functionality + pub fn from_membership( + view_number: TYPES::Time, + mut vid_disperse: JfVidDisperse, + membership: &Arc, + ) -> Self { + let shares = membership + .get_staked_committee(view_number) + .iter() + .map(|node| (node.clone(), vid_disperse.shares.remove(0))) + .collect(); + + Self { + view_number, + shares, + common: vid_disperse.common, + payload_commitment: vid_disperse.commit, + } + } +} + +/// Proposal to append a block. +#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct QuorumProposal { + /// The block header to append + pub block_header: TYPES::BlockHeader, + + /// CurView from leader when proposing leaf + pub view_number: TYPES::Time, + + /// Per spec, justification + pub justify_qc: QuorumCertificate, + + /// Possible timeout certificate. Only present if the justify_qc is not for the preceding view + pub timeout_certificate: Option>, + + /// Possible upgrade certificate, which the leader may optionally attach. + pub upgrade_certificate: Option>, + + /// Possible view sync certificate. Only present if the justify_qc and timeout_cert are not + /// present. + pub view_sync_certificate: Option>, + + /// the propser id + pub proposer_id: TYPES::SignatureKey, +} + +impl HasViewNumber for DAProposal { + fn get_view_number(&self) -> TYPES::Time { + self.view_number + } +} + +impl HasViewNumber for VidDisperse { + fn get_view_number(&self) -> TYPES::Time { + self.view_number + } +} + +impl HasViewNumber for QuorumProposal { + fn get_view_number(&self) -> TYPES::Time { + self.view_number + } +} + +impl HasViewNumber for UpgradeProposal { + fn get_view_number(&self) -> TYPES::Time { + self.view_number + } +} + +/// The error type for block and its transactions. +#[derive(Snafu, Debug, Serialize, Deserialize)] +pub enum BlockError { + /// Invalid block header. + InvalidBlockHeader, + /// Invalid transaction length. + InvalidTransactionLength, + /// Inconsistent payload commitment. + InconsistentPayloadCommitment, +} + +/// Additional functions required to use a [`Leaf`] with hotshot-testing. +pub trait TestableLeaf { + /// Type of nodes participating in the network. + type NodeType: NodeType; + + /// Create a transaction that can be added to the block contained in this leaf. + fn create_random_transaction( + &self, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> <::BlockPayload as BlockPayload>::Transaction; +} + +/// This is the consensus-internal analogous concept to a block, and it contains the block proper, +/// as well as the hash of its parent `Leaf`. +/// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::BlockPayload` +#[derive(Serialize, Deserialize, Clone, Debug, Derivative, Eq)] +#[serde(bound(deserialize = ""))] +pub struct Leaf { + /// CurView from leader when proposing leaf + pub view_number: TYPES::Time, + + /// Per spec, justification + pub justify_qc: QuorumCertificate, + + /// The hash of the parent `Leaf` + /// So we can ask if it extends + pub parent_commitment: Commitment, + + /// Block header. + pub block_header: TYPES::BlockHeader, + + /// Optional block payload. + /// + /// It may be empty for nodes not in the DA committee. + pub block_payload: Option, + + /// the proposer id of the leaf + pub proposer_id: TYPES::SignatureKey, +} + +impl PartialEq for Leaf { + fn eq(&self, other: &Self) -> bool { + self.view_number == other.view_number + && self.justify_qc == other.justify_qc + && self.parent_commitment == other.parent_commitment + && self.block_header == other.block_header + } +} + +impl Hash for Leaf { + fn hash(&self, state: &mut H) { + self.view_number.hash(state); + self.justify_qc.hash(state); + self.parent_commitment.hash(state); + self.block_header.hash(state); + } +} + +impl Display for Leaf { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "view: {:?}, height: {:?}, justify: {}", + self.view_number, + self.get_height(), + self.justify_qc + ) + } +} + +impl Leaf { + /// Create a new leaf from its components. + /// + /// # Panics + /// + /// Panics if the genesis payload (`TYPES::BlockPayload::genesis()`) is malformed (unable to be + /// interpreted as bytes). + #[must_use] + pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { + let (payload, metadata) = TYPES::BlockPayload::genesis(); + let payload_bytes = payload + .encode() + .expect("unable to encode genesis payload") + .collect(); + let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); + let block_header = + TYPES::BlockHeader::genesis(instance_state, payload_commitment, metadata); + Self { + view_number: TYPES::Time::genesis(), + justify_qc: QuorumCertificate::::genesis(), + parent_commitment: fake_commitment(), + block_header: block_header.clone(), + block_payload: Some(payload), + proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), + } + } + + /// Time when this leaf was created. + pub fn get_view_number(&self) -> TYPES::Time { + self.view_number + } + /// Height of this leaf in the chain. + /// + /// Equivalently, this is the number of leaves before this one in the chain. + pub fn get_height(&self) -> u64 { + self.block_header.block_number() + } + /// The QC linking this leaf to its parent in the chain. + pub fn get_justify_qc(&self) -> QuorumCertificate { + self.justify_qc.clone() + } + /// Commitment to this leaf's parent. + pub fn get_parent_commitment(&self) -> Commitment { + self.parent_commitment + } + /// The block header contained in this leaf. + pub fn get_block_header(&self) -> &::BlockHeader { + &self.block_header + } + /// Fill this leaf with the block payload. + /// + /// # Errors + /// + /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()` + /// or if the transactions are of invalid length + pub fn fill_block_payload( + &mut self, + block_payload: TYPES::BlockPayload, + num_storage_nodes: usize, + ) -> Result<(), BlockError> { + let encoded_txns = match block_payload.encode() { + // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. + // + Ok(encoded) => encoded.into_iter().collect(), + Err(_) => return Err(BlockError::InvalidTransactionLength), + }; + let commitment = vid_commitment(&encoded_txns, num_storage_nodes); + if commitment != self.block_header.payload_commitment() { + return Err(BlockError::InconsistentPayloadCommitment); + } + self.block_payload = Some(block_payload); + Ok(()) + } + + /// Fill this leaf with the block payload, without checking + /// header and payload consistency + pub fn fill_block_payload_unchecked(&mut self, block_payload: TYPES::BlockPayload) { + self.block_payload = Some(block_payload); + } + + /// Optional block payload. + pub fn get_block_payload(&self) -> Option { + self.block_payload.clone() + } + + /// A commitment to the block payload contained in this leaf. + pub fn get_payload_commitment(&self) -> VidCommitment { + self.get_block_header().payload_commitment() + } + + /// Identity of the network participant who proposed this leaf. + pub fn get_proposer_id(&self) -> TYPES::SignatureKey { + self.proposer_id.clone() + } +} + +impl TestableLeaf for Leaf +where + TYPES::ValidatedState: TestableState, + TYPES::BlockPayload: TestableBlock, +{ + type NodeType = TYPES; + + fn create_random_transaction( + &self, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> <::BlockPayload as BlockPayload>::Transaction { + TYPES::ValidatedState::create_random_transaction(None, rng, padding) + } +} +/// Fake the thing a genesis block points to. Needed to avoid infinite recursion +#[must_use] +pub fn fake_commitment() -> Commitment { + RawCommitmentBuilder::new("Dummy commitment for arbitrary genesis").finalize() +} + +/// create a random commitment +#[must_use] +pub fn random_commitment(rng: &mut dyn rand::RngCore) -> Commitment { + let random_array: Vec = (0u8..100u8).map(|_| rng.gen_range(0..255)).collect(); + RawCommitmentBuilder::new("Random Commitment") + .constant_str("Random Field") + .var_size_bytes(&random_array) + .finalize() +} + +/// Serialization for the QC assembled signature +/// # Panics +/// if serialization fails +pub fn serialize_signature2( + signatures: &::QCType, +) -> Vec { + let mut signatures_bytes = vec![]; + signatures_bytes.extend("Yes".as_bytes()); + + let (sig, proof) = TYPES::SignatureKey::get_sig_proof(signatures); + let proof_bytes = bincode_opts() + .serialize(&proof.as_bitslice()) + .expect("This serialization shouldn't be able to fail"); + signatures_bytes.extend("bitvec proof".as_bytes()); + signatures_bytes.extend(proof_bytes.as_slice()); + let sig_bytes = bincode_opts() + .serialize(&sig) + .expect("This serialization shouldn't be able to fail"); + signatures_bytes.extend("aggregated signature".as_bytes()); + signatures_bytes.extend(sig_bytes.as_slice()); + signatures_bytes +} + +impl Committable for Leaf { + fn commit(&self) -> commit::Commitment { + let signatures_bytes = if self.justify_qc.is_genesis { + let mut bytes = vec![]; + bytes.extend("genesis".as_bytes()); + bytes + } else { + serialize_signature2::(self.justify_qc.signatures.as_ref().unwrap()) + }; + + // Skip the transaction commitments, so that the repliacs can reconstruct the leaf. + RawCommitmentBuilder::new("leaf commitment") + .u64_field("view number", *self.view_number) + .u64_field("block number", self.get_height()) + .field("parent Leaf commitment", self.parent_commitment) + .constant_str("block payload commitment") + .fixed_size_bytes(self.get_payload_commitment().as_ref().as_ref()) + .constant_str("justify_qc view number") + .u64(*self.justify_qc.view_number) + .field( + "justify_qc leaf commitment", + self.justify_qc.get_data().leaf_commit, + ) + .constant_str("justify_qc signatures") + .var_size_bytes(&signatures_bytes) + .finalize() + } +} diff --git a/types/src/error.rs b/types/src/error.rs new file mode 100644 index 0000000000..127f81506b --- /dev/null +++ b/types/src/error.rs @@ -0,0 +1,112 @@ +//! Error type for `HotShot` +//! +//! This module provides [`HotShotError`], which is an enum representing possible faults that can +//! occur while interacting with this crate. + +//use crate::traits::network::TimeoutErr; +use crate::traits::{block_contents::BlockPayload, node_implementation::NodeType}; +#[cfg(async_executor_impl = "async-std")] +use async_std::future::TimeoutError; +use serde::{Deserialize, Serialize}; +use snafu::Snafu; +use std::num::NonZeroU64; +#[cfg(async_executor_impl = "tokio")] +use tokio::time::error::Elapsed as TimeoutError; +#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] +compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} + +/// Error type for `HotShot` +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +#[non_exhaustive] +pub enum HotShotError { + /// Failed to Message the leader in the given stage + #[snafu(display("Failed to message leader with error: {source}"))] + FailedToMessageLeader { + /// The underlying network fault + source: crate::traits::network::NetworkError, + }, + /// Failed to broadcast a message on the network + #[snafu(display("Failed to broadcast a message"))] + FailedToBroadcast { + /// The underlying network fault + source: crate::traits::network::NetworkError, + }, + /// Failure in the block. + #[snafu(display("Failed to build or verify a block: {source}"))] + BlockError { + /// The underlying block error. + source: ::Error, + }, + /// Failure in networking layer + #[snafu(display("Failure in networking layer: {source}"))] + NetworkFault { + /// Underlying network fault + source: crate::traits::network::NetworkError, + }, + /// Item was not present in storage + LeafNotFound {/* TODO we should create a way to to_string */}, + /// Error accesing storage + /// Invalid state machine state + #[snafu(display("Invalid state machine state: {}", context))] + InvalidState { + /// Context + context: String, + }, + /// HotShot timed out waiting for msgs + TimeoutError { + /// source of error + source: TimeoutError, + }, + /// HotShot timed out during round + ViewTimeoutError { + /// view number + view_number: TYPES::Time, + /// The state that the round was in when it timed out + state: RoundTimedoutState, + }, + /// Not enough valid signatures for a quorum + #[snafu(display("Insufficient number of valid signatures: the threshold is {}, but only {} signatures were valid", threshold, num_valid_signatures))] + InsufficientValidSignatures { + /// Number of valid signatures + num_valid_signatures: usize, + /// Threshold of signatures needed for a quorum + threshold: NonZeroU64, + }, + /// Miscelaneous error + /// TODO fix this with + /// #181 + Misc { + /// source of error + context: String, + }, + /// Internal value used to drive the state machine + Continue, +} +/// Contains information about what the state of the hotshot-consensus was when a round timed out +#[derive(Debug, Clone, Serialize, Deserialize)] +#[non_exhaustive] +pub enum RoundTimedoutState { + /// Leader is in a Prepare phase and is waiting for a HighQC + LeaderWaitingForHighQC, + /// Leader is in a Prepare phase and timed out before the round min time is reached + LeaderMinRoundTimeNotReached, + /// Leader is waiting for prepare votes + LeaderWaitingForPrepareVotes, + /// Leader is waiting for precommit votes + LeaderWaitingForPreCommitVotes, + /// Leader is waiting for commit votes + LeaderWaitingForCommitVotes, + + /// Replica is waiting for a prepare message + ReplicaWaitingForPrepare, + /// Replica is waiting for a pre-commit message + ReplicaWaitingForPreCommit, + /// Replica is waiting for a commit message + ReplicaWaitingForCommit, + /// Replica is waiting for a decide message + ReplicaWaitingForDecide, + + /// HotShot-testing tried to collect round events, but it timed out + TestCollectRoundEventsTimedOut, +} diff --git a/types/src/event.rs b/types/src/event.rs new file mode 100644 index 0000000000..8a2e59eedf --- /dev/null +++ b/types/src/event.rs @@ -0,0 +1,160 @@ +//! Events that a `HotShot` instance can emit + +use serde::{Deserialize, Serialize}; + +use crate::{ + data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse}, + error::HotShotError, + message::Proposal, + simple_certificate::QuorumCertificate, + traits::{node_implementation::NodeType, ValidatedState}, +}; + +use std::sync::Arc; +/// A status event emitted by a `HotShot` instance +/// +/// This includes some metadata, such as the stage and view number that the event was generated in, +/// as well as an inner [`EventType`] describing the event proper. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound(deserialize = "TYPES: NodeType"))] +pub struct Event { + /// The view number that this event originates from + pub view_number: TYPES::Time, + /// The underlying event + pub event: EventType, +} + +/// Decided leaf with the corresponding state and VID info. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound(deserialize = "TYPES: NodeType"))] +pub struct LeafInfo { + /// Decided leaf. + pub leaf: Leaf, + /// Validated state. + pub state: Arc<::ValidatedState>, + /// Optional application-specific state delta. + pub delta: Option::ValidatedState as ValidatedState>::Delta>>, + /// Optional VID disperse data. + pub vid: Option>, +} + +impl LeafInfo { + /// Constructor. + pub fn new( + leaf: Leaf, + state: Arc<::ValidatedState>, + delta: Option::ValidatedState as ValidatedState>::Delta>>, + vid: Option>, + ) -> Self { + Self { + leaf, + state, + delta, + vid, + } + } +} + +/// The chain of decided leaves with its corresponding state and VID info. +pub type LeafChain = Vec>; + +pub mod error_adaptor { + use super::*; + use serde::{de::Deserializer, ser::Serializer}; + pub fn serialize( + elem: &Arc>, + serializer: S, + ) -> Result { + serializer.serialize_str(&format!("{}", elem)) + } + + pub fn deserialize<'de, D: Deserializer<'de>, TYPES: NodeType>( + deserializer: D, + ) -> Result>, D::Error> { + let str = String::deserialize(deserializer)?; + Ok(Arc::new(HotShotError::Misc { context: str })) + } +} +/// The type and contents of a status event emitted by a `HotShot` instance +/// +/// This enum does not include metadata shared among all variants, such as the stage and view +/// number, and is thus always returned wrapped in an [`Event`]. +#[non_exhaustive] +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound(deserialize = "TYPES: NodeType"))] +pub enum EventType { + /// A view encountered an error and was interrupted + Error { + /// The underlying error + #[serde(with = "error_adaptor")] + error: Arc>, + }, + /// A new decision event was issued + Decide { + /// The chain of Leafs that were committed by this decision + /// + /// This list is sorted in reverse view number order, with the newest (highest view number) + /// block first in the list. + /// + /// This list may be incomplete if the node is currently performing catchup. + /// Vid Info for a decided view may be missing if this node never saw it's share. + leaf_chain: Arc>, + /// The QC signing the most recent leaf in `leaf_chain`. + /// + /// Note that the QC for each additional leaf in the chain can be obtained from the leaf + /// before it using + qc: Arc>, + /// Optional information of the number of transactions in the block, for logging purposes. + block_size: Option, + }, + /// A replica task was canceled by a timeout interrupt + ReplicaViewTimeout { + /// The view that timed out + view_number: TYPES::Time, + }, + /// A next leader task was canceled by a timeout interrupt + NextLeaderViewTimeout { + /// The view that timed out + view_number: TYPES::Time, + }, + /// The view has finished. If values were decided on, a `Decide` event will also be emitted. + ViewFinished { + /// The view number that has just finished + view_number: TYPES::Time, + }, + /// The view timed out + ViewTimeout { + /// The view that timed out + view_number: TYPES::Time, + }, + /// New transactions were received from the network + /// or submitted to the network by us + Transactions { + /// The list of transactions + transactions: Vec, + }, + /// DA proposal was received from the network + /// or submitted to the network by us + DAProposal { + /// Contents of the proposal + proposal: Proposal>, + /// Public key of the leader submitting the proposal + sender: TYPES::SignatureKey, + }, + /// Quorum proposal was received from the network + /// or submitted to the network by us + QuorumProposal { + /// Contents of the proposal + proposal: Proposal>, + /// Public key of the leader submitting the proposal + sender: TYPES::SignatureKey, + }, + /// Upgrade proposal was received from the network + /// or submitted to the network by us + UpgradeProposal { + /// Contents of the proposal + proposal: Proposal>, + /// Public key of the leader submitting the proposal + sender: TYPES::SignatureKey, + }, +} diff --git a/types/src/lib.rs b/types/src/lib.rs new file mode 100644 index 0000000000..4f882f165c --- /dev/null +++ b/types/src/lib.rs @@ -0,0 +1,190 @@ +//! Types and Traits for the `HotShot` consensus module +use crate::utils::bincode_opts; +use bincode::Options; +use displaydoc::Display; +use light_client::StateVerKey; +use std::fmt::Debug; +use std::{future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; +use tracing::error; +use traits::{election::ElectionConfig, signature_key::SignatureKey}; +pub mod consensus; +pub mod constants; +pub mod data; +pub mod error; +pub mod event; +pub mod light_client; +pub mod message; +pub mod qc; +pub mod signature_key; +pub mod simple_certificate; +pub mod simple_vote; +pub mod stake_table; +pub mod traits; +pub mod utils; +pub mod vid; +pub mod vote; + +/// Pinned future that is Send and Sync +pub type BoxSyncFuture<'a, T> = Pin + Send + Sync + 'a>>; + +/// yoinked from futures crate +pub fn assert_future(future: F) -> F +where + F: Future, +{ + future +} +/// yoinked from futures crate, adds sync bound that we need +pub fn boxed_sync<'a, F>(fut: F) -> BoxSyncFuture<'a, F::Output> +where + F: Future + Sized + Send + Sync + 'a, +{ + assert_future::(Box::pin(fut)) +} +/// the type of consensus to run. Either: +/// wait for a signal to start a view, +/// or constantly run +/// you almost always want continuous +/// incremental is just for testing +#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] +pub enum ExecutionType { + /// constantly increment view as soon as view finishes + Continuous, + /// wait for a signal + Incremental, +} + +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Display)] +#[serde(bound(deserialize = ""))] +/// config for validator, including public key, private key, stake value +pub struct ValidatorConfig { + /// The validator's public key and stake value + pub public_key: KEY, + /// The validator's private key, should be in the mempool, not public + pub private_key: KEY::PrivateKey, + /// The validator's stake + pub stake_value: u64, + /// the validator's key pairs for state signing/verification + pub state_key_pair: light_client::StateKeyPair, +} + +impl ValidatorConfig { + /// generate validator config from input seed, index and stake value + #[must_use] + pub fn generated_from_seed_indexed(seed: [u8; 32], index: u64, stake_value: u64) -> Self { + let (public_key, private_key) = KEY::generated_from_seed_indexed(seed, index); + let state_key_pairs = light_client::StateKeyPair::generate_from_seed_indexed(seed, index); + Self { + public_key, + private_key, + stake_value, + state_key_pair: state_key_pairs, + } + } + + /// get the public config of the validator + pub fn get_public_config(&self) -> PeerConfig { + PeerConfig { + stake_table_entry: self.public_key.get_stake_table_entry(self.stake_value), + state_ver_key: self.state_key_pair.0.ver_key(), + } + } +} + +impl Default for ValidatorConfig { + fn default() -> Self { + Self::generated_from_seed_indexed([0u8; 32], 0, 1) + } +} + +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Display)] +#[serde(bound(deserialize = ""))] +/// structure of peers' config, including public key, stake value, and state key. +pub struct PeerConfig { + /// The peer's public key and stake value + pub stake_table_entry: KEY::StakeTableEntry, + /// the peer's state public key + pub state_ver_key: StateVerKey, +} + +impl PeerConfig { + /// Serialize a peer's config to bytes + pub fn to_bytes(config: &Self) -> Vec { + let x = bincode_opts().serialize(config); + match x { + Ok(x) => x, + Err(e) => { + error!(?e, "Failed to serialize public key"); + vec![] + } + } + } + + /// Deserialize a peer's config from bytes + /// # Errors + /// Will return `None` if deserialization fails + pub fn from_bytes(bytes: &[u8]) -> Option { + let x: Result, _> = bincode_opts().deserialize(bytes); + match x { + Ok(pub_key) => Some(pub_key), + Err(e) => { + error!(?e, "Failed to deserialize public key"); + None + } + } + } +} + +impl Default for PeerConfig { + fn default() -> Self { + let default_validator_config = ValidatorConfig::::default(); + default_validator_config.get_public_config() + } +} + +/// Holds configuration for a `HotShot` +#[derive(Clone, custom_debug::Debug, serde::Serialize, serde::Deserialize)] +#[serde(bound(deserialize = ""))] +pub struct HotShotConfig { + /// Whether to run one view or continuous views + pub execution_type: ExecutionType, + /// Total number of nodes in the network + // Earlier it was total_nodes + pub num_nodes_with_stake: NonZeroUsize, + /// Number of nodes without stake + pub num_nodes_without_stake: usize, + /// Minimum transactions per block + pub min_transactions: usize, + /// Maximum transactions per block + pub max_transactions: NonZeroUsize, + /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter + pub known_nodes_with_stake: Vec>, + /// List of known non-staking nodes' public keys + pub known_nodes_without_stake: Vec, + /// My own validator config, including my public key, private key, stake value, serving as private parameter + pub my_own_validator_config: ValidatorConfig, + /// List of DA committee (staking)nodes for static DA committe + pub da_staked_committee_size: usize, + /// List of DA committee nodes (non-staking)nodes for static DA committe + pub da_non_staked_committee_size: usize, + /// Base duration for next-view timeout, in milliseconds + pub next_view_timeout: u64, + /// Duration of view sync round timeouts + pub view_sync_timeout: Duration, + /// The exponential backoff ration for the next-view timeout + pub timeout_ratio: (u64, u64), + /// The delay a leader inserts before starting pre-commit, in milliseconds + pub round_start_delay: u64, + /// Delay after init before starting consensus, in milliseconds + pub start_delay: u64, + /// Number of network bootstrap nodes + pub num_bootstrap: usize, + /// The minimum amount of time a leader has to wait to start a round + pub propose_min_round_time: Duration, + /// The maximum amount of time a leader can wait to start a round + pub propose_max_round_time: Duration, + /// time to wait until we request data associated with a proposal + pub data_request_delay: Duration, + /// the election configuration + pub election_config: Option, +} diff --git a/types/src/light_client.rs b/types/src/light_client.rs new file mode 100644 index 0000000000..ce98eedd3e --- /dev/null +++ b/types/src/light_client.rs @@ -0,0 +1,223 @@ +//! Types and structs associated with light client state + +use ark_ed_on_bn254::EdwardsConfig as Config; +use ark_ff::PrimeField; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ethereum_types::U256; +use jf_primitives::signatures::schnorr; +use rand::SeedableRng; +use rand_chacha::ChaCha20Rng; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use tagged_base64::tagged; + +/// Base field in the prover circuit +pub type CircuitField = ark_ed_on_bn254::Fq; +/// Concrete type for light client state +pub type LightClientState = GenericLightClientState; +/// Signature scheme +pub type StateSignatureScheme = + jf_primitives::signatures::schnorr::SchnorrSignatureScheme; +/// Signatures +pub type StateSignature = schnorr::Signature; +/// Verification key for verifying state signatures +pub type StateVerKey = schnorr::VerKey; +/// Signing key for signing a light client state +pub type StateSignKey = schnorr::SignKey; +/// Concrete for circuit's public input +pub type PublicInput = GenericPublicInput; +/// Key pairs for signing/verifying a light client state +#[derive(Debug, Default, Clone, serde::Serialize, serde::Deserialize)] +pub struct StateKeyPair(pub schnorr::KeyPair); + +/// Request body to send to the state relay server +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize)] +pub struct StateSignatureRequestBody { + /// The public key associated with this request + pub key: StateVerKey, + /// The associated light client state + pub state: LightClientState, + /// The associated signature of the light client state + pub signature: StateSignature, +} + +/// The state signatures bundle is a light client state and its signatures collected +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct StateSignaturesBundle { + /// The state for this signatures bundle + pub state: LightClientState, + /// The collected signatures + pub signatures: HashMap, + /// Total stakes associated with the signer + pub accumulated_weight: U256, +} + +/// A light client state +#[tagged("LIGHT_CLIENT_STATE")] +#[derive( + Clone, + Debug, + CanonicalSerialize, + CanonicalDeserialize, + Default, + Eq, + PartialEq, + PartialOrd, + Ord, + Hash, +)] +pub struct GenericLightClientState { + /// Current view number + pub view_number: usize, + /// Current block height + pub block_height: usize, + /// Root of the block commitment tree + pub block_comm_root: F, + /// Commitment for fee ledger + pub fee_ledger_comm: F, + /// Commitment for the stake table + pub stake_table_comm: (F, F, F), +} + +impl From> for [F; 7] { + fn from(state: GenericLightClientState) -> Self { + [ + F::from(state.view_number as u64), + F::from(state.block_height as u64), + state.block_comm_root, + state.fee_ledger_comm, + state.stake_table_comm.0, + state.stake_table_comm.1, + state.stake_table_comm.2, + ] + } +} +impl From<&GenericLightClientState> for [F; 7] { + fn from(state: &GenericLightClientState) -> Self { + [ + F::from(state.view_number as u64), + F::from(state.block_height as u64), + state.block_comm_root, + state.fee_ledger_comm, + state.stake_table_comm.0, + state.stake_table_comm.1, + state.stake_table_comm.2, + ] + } +} + +impl std::ops::Deref for StateKeyPair { + type Target = schnorr::KeyPair; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl StateKeyPair { + /// Generate key pairs from private signing keys + #[must_use] + pub fn from_sign_key(sk: StateSignKey) -> Self { + Self(schnorr::KeyPair::::from(sk)) + } + + /// Generate key pairs from `thread_rng()` + #[must_use] + pub fn generate() -> StateKeyPair { + schnorr::KeyPair::generate(&mut rand::thread_rng()).into() + } + + /// Generate key pairs from seed + #[must_use] + pub fn generate_from_seed(seed: [u8; 32]) -> StateKeyPair { + schnorr::KeyPair::generate(&mut ChaCha20Rng::from_seed(seed)).into() + } + + /// Generate key pairs from an index and a seed + #[must_use] + pub fn generate_from_seed_indexed(seed: [u8; 32], index: u64) -> StateKeyPair { + let mut hasher = blake3::Hasher::new(); + hasher.update(&seed); + hasher.update(&index.to_le_bytes()); + let new_seed = *hasher.finalize().as_bytes(); + Self::generate_from_seed(new_seed) + } +} + +impl From> for StateKeyPair { + fn from(value: schnorr::KeyPair) -> Self { + StateKeyPair(value) + } +} + +/// Public input to the light client state prover service +#[derive(Clone, Debug)] +pub struct GenericPublicInput(Vec); + +impl AsRef<[F]> for GenericPublicInput { + fn as_ref(&self) -> &[F] { + &self.0 + } +} + +impl From> for GenericPublicInput { + fn from(v: Vec) -> Self { + Self(v) + } +} + +impl GenericPublicInput { + /// Return the threshold + #[must_use] + pub fn threshold(&self) -> F { + self.0[0] + } + + /// Return the view number of the light client state + #[must_use] + pub fn view_number(&self) -> F { + self.0[1] + } + + /// Return the block height of the light client state + #[must_use] + pub fn block_height(&self) -> F { + self.0[2] + } + + /// Return the block commitment root of the light client state + #[must_use] + pub fn block_comm_root(&self) -> F { + self.0[3] + } + + /// Return the fee ledger commitment of the light client state + #[must_use] + pub fn fee_ledger_comm(&self) -> F { + self.0[4] + } + + /// Return the stake table commitment of the light client state + #[must_use] + pub fn stake_table_comm(&self) -> (F, F, F) { + (self.0[5], self.0[6], self.0[7]) + } + + /// Return the qc key commitment of the light client state + #[must_use] + pub fn qc_key_comm(&self) -> F { + self.0[5] + } + + /// Return the state key commitment of the light client state + #[must_use] + pub fn state_key_comm(&self) -> F { + self.0[6] + } + + /// Return the stake amount commitment of the light client state + #[must_use] + pub fn stake_amount_comm(&self) -> F { + self.0[7] + } +} diff --git a/types/src/message.rs b/types/src/message.rs new file mode 100644 index 0000000000..99f29d4907 --- /dev/null +++ b/types/src/message.rs @@ -0,0 +1,316 @@ +//! Network message types +//! +//! This module contains types used to represent the various types of messages that +//! `HotShot` nodes can send among themselves. + +use crate::data::{QuorumProposal, UpgradeProposal}; +use crate::simple_certificate::{ + DACertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, + ViewSyncPreCommitCertificate2, +}; +use crate::simple_vote::{ + DAVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + ViewSyncPreCommitVote, +}; +use crate::traits::network::ResponseMessage; +use crate::traits::signature_key::SignatureKey; +use crate::vote::HasViewNumber; +use crate::{ + data::{DAProposal, VidDisperse}, + simple_vote::QuorumVote, + traits::{ + network::{DataRequest, NetworkMsg, ViewMessage}, + node_implementation::{ConsensusTime, NodeType}, + }, +}; +use derivative::Derivative; +use either::Either::{self, Left, Right}; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use std::{fmt::Debug, marker::PhantomData}; + +/// Incoming message +#[derive(Serialize, Deserialize, Clone, Debug, Derivative, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = "", serialize = ""))] +pub struct Message { + /// The sender of this message + pub sender: TYPES::SignatureKey, + + /// The message kind + pub kind: MessageKind, +} + +impl NetworkMsg for Message {} + +impl ViewMessage for Message { + /// get the view number out of a message + fn get_view_number(&self) -> TYPES::Time { + self.kind.get_view_number() + } + fn purpose(&self) -> MessagePurpose { + self.kind.purpose() + } +} + +/// A wrapper type for implementing `PassType` on a vector of `Message`. +#[derive(Clone, Debug)] +pub struct Messages(pub Vec>); + +/// A message type agnostic description of a message's purpose +#[derive(PartialEq, Copy, Clone)] +pub enum MessagePurpose { + /// Message with a [quorum/DA] proposal. + Proposal, + /// Message with most recent [quorum/DA] proposal the server has + LatestProposal, + /// Message with most recent view sync certificate the server has + LatestViewSyncCertificate, + /// Message with a quorum vote. + Vote, + /// Message with a view sync vote. + ViewSyncVote, + /// Message with a view sync certificate. + ViewSyncCertificate, + /// Message with a DAC. + DAC, + /// Message for internal use + Internal, + /// Data message + Data, + /// VID disperse, like [`Proposal`]. + VidDisperse, + /// Message with an upgrade proposal. + Upgrade, +} + +// TODO (da) make it more customized to the consensus layer, maybe separating the specific message +// data from the kind enum. +/// Enum representation of any message type +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +#[serde(bound(deserialize = "", serialize = ""))] +pub enum MessageKind { + /// Messages related to the consensus protocol + Consensus(SequencingMessage), + /// Messages relating to sharing data between nodes + Data(DataMessage), +} + +impl MessageKind { + // Can't implement `From` directly due to potential conflict with + // `From`. + /// Construct a [`MessageKind`] from [`SequencingMessage`]. + pub fn from_consensus_message(m: SequencingMessage) -> Self { + Self::Consensus(m) + } +} + +impl From> for MessageKind { + fn from(m: DataMessage) -> Self { + Self::Data(m) + } +} + +impl ViewMessage for MessageKind { + fn get_view_number(&self) -> TYPES::Time { + match &self { + MessageKind::Consensus(message) => message.view_number(), + MessageKind::Data(DataMessage::SubmitTransaction(_, v)) => *v, + MessageKind::Data(DataMessage::RequestData(msg)) => msg.view, + MessageKind::Data(DataMessage::DataResponse(msg)) => match msg { + ResponseMessage::Found(m) => m.view_number(), + ResponseMessage::NotFound | ResponseMessage::Denied => TYPES::Time::new(1), + }, + } + } + + fn purpose(&self) -> MessagePurpose { + match &self { + MessageKind::Consensus(message) => message.purpose(), + MessageKind::Data(_) => MessagePurpose::Data, + } + } +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = "", serialize = ""))] +/// Messages related to both validating and sequencing consensus. +pub enum GeneralConsensusMessage { + /// Message with a quorum proposal. + Proposal(Proposal>), + + /// Message with a quorum vote. + Vote(QuorumVote), + + /// Message with a view sync pre-commit vote + ViewSyncPreCommitVote(ViewSyncPreCommitVote), + + /// Message with a view sync commit vote + ViewSyncCommitVote(ViewSyncCommitVote), + + /// Message with a view sync finalize vote + ViewSyncFinalizeVote(ViewSyncFinalizeVote), + + /// Message with a view sync pre-commit certificate + ViewSyncPreCommitCertificate(ViewSyncPreCommitCertificate2), + + /// Message with a view sync commit certificate + ViewSyncCommitCertificate(ViewSyncCommitCertificate2), + + /// Message with a view sync finalize certificate + ViewSyncFinalizeCertificate(ViewSyncFinalizeCertificate2), + + /// Message with a Timeout vote + TimeoutVote(TimeoutVote), + + /// Message with an upgrade proposal + UpgradeProposal(Proposal>), + + /// Message with an upgrade vote + UpgradeVote(UpgradeVote), +} + +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] +#[serde(bound(deserialize = "", serialize = ""))] +/// Messages related to the sequencing consensus protocol for the DA committee. +pub enum CommitteeConsensusMessage { + /// Proposal for data availability committee + DAProposal(Proposal>), + + /// vote for data availability committee + DAVote(DAVote), + + /// Certificate data is available + DACertificate(DACertificate), + + /// Initiate VID dispersal. + /// + /// Like [`DAProposal`]. Use `Msg` suffix to distinguish from [`VidDisperse`]. + /// TODO this variant should not be a [`CommitteeConsensusMessage`] because + VidDisperseMsg(Proposal>), +} + +/// Messages for sequencing consensus. +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = "", serialize = ""))] +pub struct SequencingMessage( + pub Either, CommitteeConsensusMessage>, +); + +impl SequencingMessage { + // TODO: Disable panic after the `ViewSync` case is implemented. + /// Get the view number this message relates to + #[allow(clippy::panic)] + fn view_number(&self) -> TYPES::Time { + match &self.0 { + Left(general_message) => { + match general_message { + GeneralConsensusMessage::Proposal(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.get_view_number() + } + GeneralConsensusMessage::Vote(vote_message) => vote_message.get_view_number(), + GeneralConsensusMessage::TimeoutVote(message) => message.get_view_number(), + GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { + message.get_view_number() + } + GeneralConsensusMessage::ViewSyncCommitVote(message) => { + message.get_view_number() + } + GeneralConsensusMessage::ViewSyncFinalizeVote(message) => { + message.get_view_number() + } + GeneralConsensusMessage::ViewSyncPreCommitCertificate(message) => { + message.get_view_number() + } + GeneralConsensusMessage::ViewSyncCommitCertificate(message) => { + message.get_view_number() + } + GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { + message.get_view_number() + } + GeneralConsensusMessage::UpgradeProposal(message) => { + message.data.get_view_number() + } + GeneralConsensusMessage::UpgradeVote(message) => message.get_view_number(), + } + } + Right(committee_message) => { + match committee_message { + CommitteeConsensusMessage::DAProposal(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.get_view_number() + } + CommitteeConsensusMessage::DAVote(vote_message) => { + vote_message.get_view_number() + } + CommitteeConsensusMessage::DACertificate(cert) => cert.view_number, + CommitteeConsensusMessage::VidDisperseMsg(disperse) => { + disperse.data.get_view_number() + } + } + } + } + } + + // TODO: Disable panic after the `ViewSync` case is implemented. + /// Get the message purpos + #[allow(clippy::panic)] + fn purpose(&self) -> MessagePurpose { + match &self.0 { + Left(general_message) => match general_message { + GeneralConsensusMessage::Proposal(_) => MessagePurpose::Proposal, + GeneralConsensusMessage::Vote(_) | GeneralConsensusMessage::TimeoutVote(_) => { + MessagePurpose::Vote + } + GeneralConsensusMessage::ViewSyncPreCommitVote(_) + | GeneralConsensusMessage::ViewSyncCommitVote(_) + | GeneralConsensusMessage::ViewSyncFinalizeVote(_) => MessagePurpose::ViewSyncVote, + + GeneralConsensusMessage::ViewSyncPreCommitCertificate(_) + | GeneralConsensusMessage::ViewSyncCommitCertificate(_) + | GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => { + MessagePurpose::ViewSyncCertificate + } + + GeneralConsensusMessage::UpgradeProposal(_) + | GeneralConsensusMessage::UpgradeVote(_) => MessagePurpose::Upgrade, + }, + Right(committee_message) => match committee_message { + CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, + CommitteeConsensusMessage::DAVote(_) => MessagePurpose::Vote, + CommitteeConsensusMessage::DACertificate(_) => MessagePurpose::DAC, + CommitteeConsensusMessage::VidDisperseMsg(_) => MessagePurpose::VidDisperse, + }, + } + } +} + +#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +/// Messages related to sending data between nodes +pub enum DataMessage { + /// Contains a transaction to be submitted + /// TODO rethink this when we start to send these messages + /// we only need the view number for broadcast + SubmitTransaction(TYPES::Transaction, TYPES::Time), + /// A request for data + RequestData(DataRequest), + /// A response to a data request + DataResponse(ResponseMessage), +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +/// Prepare qc from the leader +pub struct Proposal + DeserializeOwned> { + // NOTE: optimization could include view number to help look up parent leaf + // could even do 16 bit numbers if we want + /// The data being proposed. + pub data: PROPOSAL, + /// The proposal must be signed by the view leader + pub signature: ::PureAssembledSignatureType, + /// Phantom for TYPES + pub _pd: PhantomData, +} diff --git a/types/src/qc.rs b/types/src/qc.rs new file mode 100644 index 0000000000..cef6cdd2c0 --- /dev/null +++ b/types/src/qc.rs @@ -0,0 +1,310 @@ +//! Implementation for `BitVectorQC` that uses BLS signature + Bit vector. +//! See more details in hotshot paper. + +use crate::{ + stake_table::StakeTableEntry, + traits::{qc::QuorumCertificateScheme, signature_key::SignatureKey}, +}; +use ark_std::{ + fmt::Debug, + format, + marker::PhantomData, + rand::{CryptoRng, RngCore}, + vec, + vec::Vec, +}; +use bitvec::prelude::*; +use ethereum_types::U256; +use generic_array::GenericArray; +use jf_primitives::{ + errors::{PrimitivesError, PrimitivesError::ParameterError}, + signatures::AggregateableSignatureSchemes, +}; +use serde::{Deserialize, Serialize}; +use typenum::U32; + +/// An implementation of QC using BLS signature and a bit-vector. +#[derive(Serialize, Deserialize)] +pub struct BitVectorQC Deserialize<'a>>( + PhantomData, +); + +/// Public parameters of [`BitVectorQC`] +#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash)] +#[serde(bound(deserialize = ""))] +pub struct QCParams Deserialize<'a>> { + /// the stake table (snapshot) this QC is verified against + pub stake_entries: Vec>, + /// threshold for the accumulated "weight" of votes to form a QC + pub threshold: U256, + /// public parameter for the aggregated signature scheme + pub agg_sig_pp: P, +} + +impl QuorumCertificateScheme for BitVectorQC +where + A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a>, + A::VerificationKey: SignatureKey, +{ + type QCProverParams = QCParams; + + // TODO: later with SNARKs we'll use a smaller verifier parameter + type QCVerifierParams = QCParams; + + type QC = (A::Signature, BitVec); + type MessageLength = U32; + type QuorumSize = U256; + + /// Sign a message with the signing key + fn sign>( + pp: &A::PublicParameter, + sk: &A::SigningKey, + msg: M, + prng: &mut R, + ) -> Result { + A::sign(pp, sk, msg, prng) + } + + fn assemble( + qc_pp: &Self::QCProverParams, + signers: &BitSlice, + sigs: &[A::Signature], + ) -> Result { + if signers.len() != qc_pp.stake_entries.len() { + return Err(ParameterError(format!( + "bit vector len {} != the number of stake entries {}", + signers.len(), + qc_pp.stake_entries.len(), + ))); + } + let total_weight: U256 = + qc_pp + .stake_entries + .iter() + .zip(signers.iter()) + .fold(U256::zero(), |acc, (entry, b)| { + if *b { + acc + entry.stake_amount + } else { + acc + } + }); + if total_weight < qc_pp.threshold { + return Err(ParameterError(format!( + "total_weight {} less than threshold {}", + total_weight, qc_pp.threshold, + ))); + } + let mut ver_keys = vec![]; + for (entry, b) in qc_pp.stake_entries.iter().zip(signers.iter()) { + if *b { + ver_keys.push(entry.stake_key.clone()); + } + } + if ver_keys.len() != sigs.len() { + return Err(ParameterError(format!( + "the number of ver_keys {} != the number of partial signatures {}", + ver_keys.len(), + sigs.len(), + ))); + } + let sig = A::aggregate(&qc_pp.agg_sig_pp, &ver_keys[..], sigs)?; + + Ok((sig, signers.into())) + } + + fn check( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray, + qc: &Self::QC, + ) -> Result { + let (sig, signers) = qc; + if signers.len() != qc_vp.stake_entries.len() { + return Err(ParameterError(format!( + "signers bit vector len {} != the number of stake entries {}", + signers.len(), + qc_vp.stake_entries.len(), + ))); + } + let total_weight: U256 = + qc_vp + .stake_entries + .iter() + .zip(signers.iter()) + .fold(U256::zero(), |acc, (entry, b)| { + if *b { + acc + entry.stake_amount + } else { + acc + } + }); + if total_weight < qc_vp.threshold { + return Err(ParameterError(format!( + "total_weight {} less than threshold {}", + total_weight, qc_vp.threshold, + ))); + } + let mut ver_keys = vec![]; + for (entry, b) in qc_vp.stake_entries.iter().zip(signers.iter()) { + if *b { + ver_keys.push(entry.stake_key.clone()); + } + } + A::multi_sig_verify(&qc_vp.agg_sig_pp, &ver_keys[..], message, sig)?; + + Ok(total_weight) + } + + fn trace( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray<::MessageUnit, Self::MessageLength>, + qc: &Self::QC, + ) -> Result::VerificationKey>, PrimitivesError> { + let (_sig, signers) = qc; + if signers.len() != qc_vp.stake_entries.len() { + return Err(ParameterError(format!( + "signers bit vector len {} != the number of stake entries {}", + signers.len(), + qc_vp.stake_entries.len(), + ))); + } + + Self::check(qc_vp, message, qc)?; + + let signer_pks: Vec<_> = qc_vp + .stake_entries + .iter() + .zip(signers.iter()) + .filter(|(_, b)| **b) + .map(|(pk, _)| pk.stake_key.clone()) + .collect(); + Ok(signer_pks) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use jf_primitives::signatures::{ + bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, + SignatureScheme, + }; + use versioned_binary_serialization::{version::StaticVersion, BinarySerializer, Serializer}; + type Version = StaticVersion<0, 1>; + + macro_rules! test_quorum_certificate { + ($aggsig:tt) => { + let mut rng = jf_utils::test_rng(); + let agg_sig_pp = $aggsig::param_gen(Some(&mut rng)).unwrap(); + let key_pair1 = KeyPair::generate(&mut rng); + let key_pair2 = KeyPair::generate(&mut rng); + let key_pair3 = KeyPair::generate(&mut rng); + let entry1 = StakeTableEntry { + stake_key: key_pair1.ver_key(), + stake_amount: U256::from(3u8), + }; + let entry2 = StakeTableEntry { + stake_key: key_pair2.ver_key(), + stake_amount: U256::from(5u8), + }; + let entry3 = StakeTableEntry { + stake_key: key_pair3.ver_key(), + stake_amount: U256::from(7u8), + }; + let qc_pp = QCParams { + stake_entries: vec![entry1, entry2, entry3], + threshold: U256::from(10u8), + agg_sig_pp, + }; + let msg = [72u8; 32]; + let sig1 = + BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair1.sign_key_ref(), &msg, &mut rng) + .unwrap(); + let sig2 = + BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair2.sign_key_ref(), &msg, &mut rng) + .unwrap(); + let sig3 = + BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair3.sign_key_ref(), &msg, &mut rng) + .unwrap(); + + // happy path + let signers = bitvec![0, 1, 1]; + let qc = BitVectorQC::<$aggsig>::assemble( + &qc_pp, + signers.as_bitslice(), + &[sig2.clone(), sig3.clone()], + ) + .unwrap(); + assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &qc).is_ok()); + assert_eq!( + BitVectorQC::<$aggsig>::trace(&qc_pp, &msg.into(), &qc).unwrap(), + vec![key_pair2.ver_key(), key_pair3.ver_key()], + ); + + // Check the QC and the QCParams can be serialized / deserialized + assert_eq!( + qc, + Serializer::::deserialize(&Serializer::::serialize(&qc).unwrap()) + .unwrap() + ); + + assert_eq!( + qc_pp, + Serializer::::deserialize( + &Serializer::::serialize(&qc_pp).unwrap() + ) + .unwrap() + ); + + // bad paths + // number of signatures unmatch + assert!(BitVectorQC::<$aggsig>::assemble( + &qc_pp, + signers.as_bitslice(), + &[sig2.clone()] + ) + .is_err()); + // total weight under threshold + let active_bad = bitvec![1, 1, 0]; + assert!(BitVectorQC::<$aggsig>::assemble( + &qc_pp, + active_bad.as_bitslice(), + &[sig1.clone(), sig2.clone()] + ) + .is_err()); + // wrong bool vector length + let active_bad_2 = bitvec![0, 1, 1, 0]; + assert!(BitVectorQC::<$aggsig>::assemble( + &qc_pp, + active_bad_2.as_bitslice(), + &[sig2, sig3], + ) + .is_err()); + + assert!(BitVectorQC::<$aggsig>::check( + &qc_pp, + &msg.into(), + &(qc.0.clone(), active_bad) + ) + .is_err()); + assert!(BitVectorQC::<$aggsig>::check( + &qc_pp, + &msg.into(), + &(qc.0.clone(), active_bad_2) + ) + .is_err()); + let bad_msg = [70u8; 32]; + assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); + + let bad_sig = &sig1; + assert!( + BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &(bad_sig.clone(), qc.1)) + .is_err() + ); + }; + } + #[test] + fn test_quorum_certificate() { + test_quorum_certificate!(BLSOverBN254CurveSignatureScheme); + } +} diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs new file mode 100644 index 0000000000..55f0a9c8b5 --- /dev/null +++ b/types/src/signature_key.rs @@ -0,0 +1,127 @@ +//! Types and structs for the hotshot signature keys + +use crate::{ + qc::{BitVectorQC, QCParams}, + stake_table::StakeTableEntry, + traits::{qc::QuorumCertificateScheme, signature_key::SignatureKey}, +}; +use bitvec::{slice::BitSlice, vec::BitVec}; +use ethereum_types::U256; +use generic_array::GenericArray; +use jf_primitives::{ + errors::PrimitivesError, + signatures::{ + bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair, SignKey, VerKey}, + SignatureScheme, + }, +}; +use rand::SeedableRng; +use rand_chacha::ChaCha20Rng; +use tracing::instrument; + +/// BLS private key used to sign a message +pub type BLSPrivKey = SignKey; +/// BLS public key used to verify a signature +pub type BLSPubKey = VerKey; +/// Public parameters for BLS signature scheme +pub type BLSPublicParam = (); + +impl SignatureKey for BLSPubKey { + type PrivateKey = BLSPrivKey; + type StakeTableEntry = StakeTableEntry; + type QCParams = + QCParams::PublicParameter>; + type PureAssembledSignatureType = + ::Signature; + type QCType = (Self::PureAssembledSignatureType, BitVec); + type SignError = PrimitivesError; + + #[instrument(skip(self))] + fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool { + // This is the validation for QC partial signature before append(). + BLSOverBN254CurveSignatureScheme::verify(&(), self, data, signature).is_ok() + } + + fn sign( + sk: &Self::PrivateKey, + data: &[u8], + ) -> Result { + BitVectorQC::::sign( + &(), + sk, + data, + &mut rand::thread_rng(), + ) + } + + fn from_private(private_key: &Self::PrivateKey) -> Self { + BLSPubKey::from(private_key) + } + + fn to_bytes(&self) -> Vec { + let mut buf = vec![]; + ark_serialize::CanonicalSerialize::serialize_compressed(self, &mut buf) + .expect("Serialization should not fail."); + buf + } + + fn from_bytes(bytes: &[u8]) -> Result { + Ok(ark_serialize::CanonicalDeserialize::deserialize_compressed( + bytes, + )?) + } + + fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey) { + let mut hasher = blake3::Hasher::new(); + hasher.update(&seed); + hasher.update(&index.to_le_bytes()); + let new_seed = *hasher.finalize().as_bytes(); + let kp = KeyPair::generate(&mut ChaCha20Rng::from_seed(new_seed)); + (kp.ver_key(), kp.sign_key_ref().clone()) + } + + fn get_stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry { + StakeTableEntry { + stake_key: *self, + stake_amount: U256::from(stake), + } + } + + fn get_public_key(entry: &Self::StakeTableEntry) -> Self { + entry.stake_key + } + + fn get_public_parameter( + stake_entries: Vec, + threshold: U256, + ) -> Self::QCParams { + QCParams { + stake_entries, + threshold, + agg_sig_pp: (), + } + } + + fn check(real_qc_pp: &Self::QCParams, data: &[u8], qc: &Self::QCType) -> bool { + let msg = GenericArray::from_slice(data); + BitVectorQC::::check(real_qc_pp, msg, qc).is_ok() + } + + fn get_sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec) { + signature.clone() + } + + fn assemble( + real_qc_pp: &Self::QCParams, + signers: &BitSlice, + sigs: &[Self::PureAssembledSignatureType], + ) -> Self::QCType { + BitVectorQC::::assemble(real_qc_pp, signers, sigs) + .expect("this assembling shouldn't fail") + } + + fn genesis_proposer_pk() -> Self { + let kp = KeyPair::generate(&mut ChaCha20Rng::from_seed([0u8; 32])); + kp.ver_key() + } +} diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs new file mode 100644 index 0000000000..1592583e90 --- /dev/null +++ b/types/src/simple_certificate.rs @@ -0,0 +1,179 @@ +//! Implementations of the simple certificate type. Used for Quorum, DA, and Timeout Certificates + +use std::{ + fmt::{self, Debug, Display, Formatter}, + hash::Hash, + marker::PhantomData, +}; + +use commit::{Commitment, CommitmentBoundsArkless, Committable}; +use ethereum_types::U256; + +use crate::{ + data::Leaf, + simple_vote::{ + DAData, QuorumData, TimeoutData, UpgradeProposalData, ViewSyncCommitData, + ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, + }, + traits::{ + election::Membership, node_implementation::ConsensusTime, node_implementation::NodeType, + signature_key::SignatureKey, + }, + vote::{Certificate, HasViewNumber}, +}; + +use serde::{Deserialize, Serialize}; + +/// Trait which allows use to inject different threshold calculations into a Certificate type +pub trait Threshold { + /// Calculate a threshold based on the membership + fn threshold>(membership: &MEMBERSHIP) -> u64; +} + +/// Defines a threshold which is 2f + 1 (Amount needed for Quorum) +#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] +pub struct SuccessThreshold {} + +impl Threshold for SuccessThreshold { + fn threshold>(membership: &MEMBERSHIP) -> u64 { + membership.success_threshold().into() + } +} + +/// Defines a threshold which is f + 1 (i.e at least one of the stake is honest) +#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] +pub struct OneHonestThreshold {} + +impl Threshold for OneHonestThreshold { + fn threshold>(membership: &MEMBERSHIP) -> u64 { + membership.failure_threshold().into() + } +} + +/// Defines a threshold which is 0.9n + 1 (i.e. over 90% of the nodes with stake) +#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] +pub struct UpgradeThreshold {} + +impl Threshold for UpgradeThreshold { + fn threshold>(membership: &MEMBERSHIP) -> u64 { + membership.upgrade_threshold().into() + } +} + +/// A certificate which can be created by aggregating many simple votes on the commitment. +#[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] +pub struct SimpleCertificate> { + /// The data this certificate is for. I.e the thing that was voted on to create this Certificate + pub data: VOTEABLE, + /// commitment of all the votes this cert should be signed over + pub vote_commitment: Commitment, + /// Which view this QC relates to + pub view_number: TYPES::Time, + /// assembled signature for certificate aggregation + pub signatures: Option<::QCType>, + /// If this QC is for the genesis block + pub is_genesis: bool, + /// phantom data for `THRESHOLD` and `TYPES` + pub _pd: PhantomData<(TYPES, THRESHOLD)>, +} + +impl> Certificate + for SimpleCertificate +{ + type Voteable = VOTEABLE; + type Threshold = THRESHOLD; + + fn create_signed_certificate( + vote_commitment: Commitment, + data: Self::Voteable, + sig: ::QCType, + view: TYPES::Time, + ) -> Self { + SimpleCertificate { + data, + vote_commitment, + view_number: view, + signatures: Some(sig), + is_genesis: false, + _pd: PhantomData, + } + } + fn is_valid_cert>(&self, membership: &MEMBERSHIP) -> bool { + if self.is_genesis && self.view_number == TYPES::Time::genesis() { + return true; + } + let real_qc_pp = ::get_public_parameter( + membership.get_committee_qc_stake_table(), + U256::from(Self::threshold(membership)), + ); + ::check( + &real_qc_pp, + self.vote_commitment.as_ref(), + self.signatures.as_ref().unwrap(), + ) + } + fn threshold>(membership: &MEMBERSHIP) -> u64 { + THRESHOLD::threshold(membership) + } + fn get_data(&self) -> &Self::Voteable { + &self.data + } + fn get_data_commitment(&self) -> Commitment { + self.vote_commitment + } +} + +impl> + HasViewNumber for SimpleCertificate +{ + fn get_view_number(&self) -> TYPES::Time { + self.view_number + } +} +impl Display for QuorumCertificate { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "view: {:?}, is_genesis: {:?}", + self.view_number, self.is_genesis + ) + } +} + +impl QuorumCertificate { + #[must_use] + /// Creat the Genisis certificate + pub fn genesis() -> Self { + let data = QuorumData { + leaf_commit: Commitment::>::default_commitment_no_preimage(), + }; + let commit = data.commit(); + Self { + data, + vote_commitment: commit, + view_number: ::genesis(), + signatures: None, + is_genesis: true, + _pd: PhantomData, + } + } +} + +/// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` +pub type QuorumCertificate = SimpleCertificate, SuccessThreshold>; +/// Type alias for a DA certificate over `DAData` +pub type DACertificate = SimpleCertificate; +/// Type alias for a Timeout certificate over a view number +pub type TimeoutCertificate = SimpleCertificate, SuccessThreshold>; +/// Type alias for a `ViewSyncPreCommit` certificate over a view number +pub type ViewSyncPreCommitCertificate2 = + SimpleCertificate, OneHonestThreshold>; +/// Type alias for a `ViewSyncCommit` certificate over a view number +pub type ViewSyncCommitCertificate2 = + SimpleCertificate, SuccessThreshold>; +/// Type alias for a `ViewSyncFinalize` certificate over a view number +pub type ViewSyncFinalizeCertificate2 = + SimpleCertificate, SuccessThreshold>; +/// Type alias for a `UpgradeCertificate`, which is a `SimpleCertificate` of `UpgradeProposalData` +pub type UpgradeCertificate = + SimpleCertificate, UpgradeThreshold>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs new file mode 100644 index 0000000000..3573ad4628 --- /dev/null +++ b/types/src/simple_vote.rs @@ -0,0 +1,258 @@ +//! Implementations of the simple vote types. + +use std::{fmt::Debug, hash::Hash}; + +use commit::{Commitment, Committable}; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; + +use crate::{ + data::Leaf, + traits::{node_implementation::NodeType, signature_key::SignatureKey}, + vid::VidCommitment, + vote::{HasViewNumber, Vote}, +}; +use versioned_binary_serialization::version::Version; + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a yes vote. +#[serde(bound(deserialize = ""))] +pub struct QuorumData { + /// Commitment to the leaf + pub leaf_commit: Commitment>, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a DA vote. +pub struct DAData { + /// Commitment to a block payload + pub payload_commit: VidCommitment, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a timeout vote. +pub struct TimeoutData { + /// View the timeout is for + pub view: TYPES::Time, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a VID vote. +pub struct VIDData { + /// Commitment to the block payload the VID vote is on. + pub payload_commit: VidCommitment, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Pre Commit vote. +pub struct ViewSyncPreCommitData { + /// The relay this vote is intended for + pub relay: u64, + /// The view number we are trying to sync on + pub round: TYPES::Time, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Commit vote. +pub struct ViewSyncCommitData { + /// The relay this vote is intended for + pub relay: u64, + /// The view number we are trying to sync on + pub round: TYPES::Time, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Finalize vote. +pub struct ViewSyncFinalizeData { + /// The relay this vote is intended for + pub relay: u64, + /// The view number we are trying to sync on + pub round: TYPES::Time, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Upgrade vote. +pub struct UpgradeProposalData { + /// The old version that we are upgrading from. + pub old_version: Version, + /// The new version that we are upgrading to. + pub new_version: Version, + /// A unique identifier for the specific protocol being voted on. + pub new_version_hash: Vec, + /// The last block for which the old version will be in effect. + pub old_version_last_block: TYPES::Time, + /// The first block for which the new version will be in effect. + pub new_version_first_block: TYPES::Time, +} + +/// Marker trait for data or commitments that can be voted on. +/// Only structs in this file can implement voteable. This is enforced with the `Sealed` trait +/// Sealing this trait prevents creating new vote types outside this file. +pub trait Voteable: + sealed::Sealed + Committable + Clone + Serialize + Debug + PartialEq + Hash + Eq +{ +} + +/// Sealed is used to make sure no other files can implement the Voteable trait. +/// All simple voteable types should be implemented here. This prevents us from +/// creating/using improper types when using the vote types. +mod sealed { + use commit::Committable; + + /// Only structs in this file can impl `Sealed` + pub trait Sealed {} + + // TODO: Does the implement for things outside this file that are commitable? + impl Sealed for C {} +} + +/// A simple yes vote over some votable type. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +pub struct SimpleVote { + /// The signature share associated with this vote + pub signature: ( + TYPES::SignatureKey, + ::PureAssembledSignatureType, + ), + /// The leaf commitment being voted on. + pub data: DATA, + /// The view this vote was cast for + pub view_number: TYPES::Time, +} + +impl HasViewNumber for SimpleVote { + fn get_view_number(&self) -> ::Time { + self.view_number + } +} + +impl Vote for SimpleVote { + type Commitment = DATA; + + fn get_signing_key(&self) -> ::SignatureKey { + self.signature.0.clone() + } + + fn get_signature(&self) -> ::PureAssembledSignatureType { + self.signature.1.clone() + } + + fn get_data(&self) -> &DATA { + &self.data + } + + fn get_data_commitment(&self) -> Commitment { + self.data.commit() + } +} + +impl SimpleVote { + /// Creates and signs a simple vote + /// # Errors + /// If we are unable to sign the data + pub fn create_signed_vote( + data: DATA, + view: TYPES::Time, + pub_key: &TYPES::SignatureKey, + private_key: &::PrivateKey, + ) -> Result::SignError> { + match TYPES::SignatureKey::sign(private_key, data.commit().as_ref()) { + Ok(signature) => Ok(Self { + signature: (pub_key.clone(), signature), + data, + view_number: view, + }), + Err(e) => Err(e), + } + } +} + +impl Committable for QuorumData { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("Yes Vote") + .var_size_bytes(self.leaf_commit.as_ref()) + .finalize() + } +} + +impl Committable for TimeoutData { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("Timeout Vote") + .u64(*self.view) + .finalize() + } +} + +impl Committable for DAData { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("DA Vote") + .var_size_bytes(self.payload_commit.as_ref()) + .finalize() + } +} + +impl Committable for VIDData { + fn commit(&self) -> Commitment { + commit::RawCommitmentBuilder::new("VID Vote") + .var_size_bytes(self.payload_commit.as_ref()) + .finalize() + } +} + +impl Committable for UpgradeProposalData { + fn commit(&self) -> Commitment { + let builder = commit::RawCommitmentBuilder::new("Upgrade Vote"); + builder + .u64(*self.new_version_first_block) + .u64(*self.old_version_last_block) + .var_size_bytes(self.new_version_hash.as_slice()) + .u16(self.new_version.minor) + .u16(self.new_version.major) + .u16(self.old_version.minor) + .u16(self.old_version.major) + .finalize() + } +} + +/// This implements commit for all the types which contain a view and relay public key. +fn view_and_relay_commit( + view: TYPES::Time, + relay: u64, + tag: &str, +) -> Commitment { + let builder = commit::RawCommitmentBuilder::new(tag); + builder.u64(*view).u64(relay).finalize() +} + +impl Committable for ViewSyncPreCommitData { + fn commit(&self) -> Commitment { + view_and_relay_commit::(self.round, self.relay, "View Sync Precommit") + } +} + +impl Committable for ViewSyncFinalizeData { + fn commit(&self) -> Commitment { + view_and_relay_commit::(self.round, self.relay, "View Sync Finalize") + } +} +impl Committable for ViewSyncCommitData { + fn commit(&self) -> Commitment { + view_and_relay_commit::(self.round, self.relay, "View Sync Commit") + } +} + +// impl votable for all the data types in this file sealed marker should ensure nothing is accidently +// implemented for structs that aren't "voteable" +impl Voteable + for V +{ +} + +// Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file +/// Quorum vote Alias +pub type QuorumVote = SimpleVote>; +/// DA vote type alias +pub type DAVote = SimpleVote; +/// Timeout Vote type alias +pub type TimeoutVote = SimpleVote>; +/// View Sync Commit Vote type alias +pub type ViewSyncCommitVote = SimpleVote>; +/// View Sync Pre Commit Vote type alias +pub type ViewSyncPreCommitVote = SimpleVote>; +/// View Sync Finalize Vote type alias +pub type ViewSyncFinalizeVote = SimpleVote>; +/// Upgrade proposal vote +pub type UpgradeVote = SimpleVote>; diff --git a/types/src/stake_table.rs b/types/src/stake_table.rs new file mode 100644 index 0000000000..7c6525e0eb --- /dev/null +++ b/types/src/stake_table.rs @@ -0,0 +1,31 @@ +//! Types and structs related to the stake table + +use crate::traits::signature_key::{SignatureKey, StakeTableEntryType}; +use ethereum_types::U256; +use serde::{Deserialize, Serialize}; + +/// Stake table entry +#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash, Eq)] +#[serde(bound(deserialize = ""))] +pub struct StakeTableEntry { + /// The public key + pub stake_key: K, + /// The associated stake amount + pub stake_amount: U256, +} + +impl StakeTableEntryType for StakeTableEntry { + /// Get the stake amount + fn get_stake(&self) -> U256 { + self.stake_amount + } +} + +impl StakeTableEntry { + /// Get the public key + pub fn get_key(&self) -> &K { + &self.stake_key + } +} + +// TODO(Chengyu): add stake table snapshot here diff --git a/types/src/traits.rs b/types/src/traits.rs new file mode 100644 index 0000000000..a698d2c158 --- /dev/null +++ b/types/src/traits.rs @@ -0,0 +1,15 @@ +//! Common traits for the `HotShot` protocol +pub mod block_contents; +pub mod consensus_api; +pub mod election; +pub mod metrics; +pub mod network; +pub mod node_implementation; +pub mod qc; +pub mod signature_key; +pub mod stake_table; +pub mod states; +pub mod storage; + +pub use block_contents::BlockPayload; +pub use states::ValidatedState; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs new file mode 100644 index 0000000000..24080dbb18 --- /dev/null +++ b/types/src/traits/block_contents.rs @@ -0,0 +1,149 @@ +//! Abstraction over the contents of a block +//! +//! This module provides the [`Transaction`], [`BlockPayload`], and [`BlockHeader`] traits, which +//! describe the behaviors that a block is expected to have. + +use crate::{ + data::Leaf, + traits::{node_implementation::NodeType, ValidatedState}, + utils::BuilderCommitment, + vid::{vid_scheme, VidCommitment, VidSchemeType}, +}; +use commit::{Commitment, Committable}; +use jf_primitives::vid::VidScheme; +use serde::{de::DeserializeOwned, Serialize}; + +use std::{ + error::Error, + fmt::{Debug, Display}, + future::Future, + hash::Hash, +}; + +/// Abstraction over any type of transaction. Used by [`BlockPayload`]. +pub trait Transaction: + Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash +{ +} + +/// Abstraction over the full contents of a block +/// +/// This trait encapsulates the behaviors that the transactions of a block must have in order to be +/// used by consensus +/// * Must have a predefined error type ([`BlockPayload::Error`]) +/// * Must have a transaction type that can be compared for equality, serialized and serialized, +/// sent between threads, and can have a hash produced of it +/// * Must be hashable +pub trait BlockPayload: + Serialize + Clone + Debug + Display + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned +{ + /// The error type for this type of block + type Error: Error + Debug + Send + Sync + Serialize + DeserializeOwned; + + /// The type of the transitions we are applying + type Transaction: Transaction; + + /// Data created during block building which feeds into the block header + type Metadata: Clone + Debug + DeserializeOwned + Eq + Hash + Send + Sync + Serialize; + + /// Encoded payload. + type Encode<'a>: 'a + Iterator + Send + where + Self: 'a; + + /// Build a payload and associated metadata with the transactions. + /// + /// # Errors + /// If the transaction length conversion fails. + fn from_transactions( + transactions: impl IntoIterator, + ) -> Result<(Self, Self::Metadata), Self::Error>; + + /// Build a payload with the encoded transaction bytes, metadata, + /// and the associated number of VID storage nodes + /// + /// `I` may be, but not necessarily is, the `Encode` type directly from `fn encode`. + fn from_bytes(encoded_transactions: I, metadata: &Self::Metadata) -> Self + where + I: Iterator; + + /// Build the genesis payload and metadata. + fn genesis() -> (Self, Self::Metadata); + + /// Encode the payload + /// + /// # Errors + /// If the transaction length conversion fails. + fn encode(&self) -> Result, Self::Error>; + + /// List of transaction commitments. + fn transaction_commitments( + &self, + metadata: &Self::Metadata, + ) -> Vec>; + + /// Generate commitment that builders use to sign block options. + fn builder_commitment(&self, metadata: &Self::Metadata) -> BuilderCommitment; + + /// Get the transactions in the payload. + fn get_transactions(&self, metadata: &Self::Metadata) -> &Vec; +} + +/// extra functions required on block to be usable by hotshot-testing +pub trait TestableBlock: BlockPayload + Debug { + /// generate a genesis block + fn genesis() -> Self; + + /// the number of transactions in this block + fn txn_count(&self) -> u64; +} + +/// Compute the VID payload commitment. +/// TODO(Gus) delete this function? +/// # Panics +/// If the VID computation fails. +#[must_use] +pub fn vid_commitment( + encoded_transactions: &Vec, + num_storage_nodes: usize, +) -> ::Commit { + #[allow(clippy::panic)] + vid_scheme(num_storage_nodes).commit_only(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:\n\t(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{}\n\t{err}", encoded_transactions.len())) +} + +/// The number of storage nodes to use when computing the genesis VID commitment. +/// +/// The number of storage nodes for the genesis VID commitment is arbitrary, since we don't actually +/// do dispersal for the genesis block. For simplicity and performance, we use 1. +pub const GENESIS_VID_NUM_STORAGE_NODES: usize = 1; + +/// Header of a block, which commits to a [`BlockPayload`]. +pub trait BlockHeader: + Serialize + Clone + Debug + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned + Committable +{ + /// Build a header with the parent validate state, instance-level state, parent leaf, payload + /// commitment, and metadata. + fn new( + parent_state: &TYPES::ValidatedState, + instance_state: &>::Instance, + parent_leaf: &Leaf, + payload_commitment: VidCommitment, + metadata: ::Metadata, + ) -> impl Future + Send; + + /// Build the genesis header, payload, and metadata. + fn genesis( + instance_state: &>::Instance, + payload_commitment: VidCommitment, + metadata: ::Metadata, + ) -> Self; + + /// Get the block number. + fn block_number(&self) -> u64; + + /// Get the payload commitment. + fn payload_commitment(&self) -> VidCommitment; + + /// Get the metadata. + fn metadata(&self) -> &::Metadata; +} diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs new file mode 100644 index 0000000000..b596695f56 --- /dev/null +++ b/types/src/traits/consensus_api.rs @@ -0,0 +1,42 @@ +//! Contains the [`ConsensusApi`] trait. + +use crate::{ + event::Event, + traits::{ + node_implementation::{NodeImplementation, NodeType}, + signature_key::SignatureKey, + }, +}; +use async_trait::async_trait; + +use std::{num::NonZeroUsize, time::Duration}; + +/// The API that tasks use to talk to the system +/// TODO we plan to drop this +#[async_trait] +pub trait ConsensusApi>: Send + Sync { + /// Total number of nodes in the network. Also known as `n`. + fn total_nodes(&self) -> NonZeroUsize; + + /// The minimum amount of time a leader has to wait before sending a propose + fn propose_min_round_time(&self) -> Duration; + + /// The maximum amount of time a leader can wait before sending a propose. + /// If this time is reached, the leader has to send a propose without transactions. + fn propose_max_round_time(&self) -> Duration; + + /// Retuns the maximum transactions allowed in a block + fn max_transactions(&self) -> NonZeroUsize; + + /// Returns the minimum transactions that must be in a block + fn min_transactions(&self) -> usize; + + /// Get a reference to the public key. + fn public_key(&self) -> &TYPES::SignatureKey; + + /// Get a reference to the private key. + fn private_key(&self) -> &::PrivateKey; + + /// Notify the system of an event within `hotshot-consensus`. + async fn send_event(&self, event: Event); +} diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs new file mode 100644 index 0000000000..0f3eec9b53 --- /dev/null +++ b/types/src/traits/election.rs @@ -0,0 +1,91 @@ +//! The election trait, used to decide which node is the leader and determine if a vote is valid. + +// Needed to avoid the non-binding `let` warning. +#![allow(clippy::let_underscore_untyped)] + +use super::node_implementation::NodeType; + +use crate::{traits::signature_key::SignatureKey, PeerConfig}; + +use snafu::Snafu; +use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; + +/// Error for election problems +#[derive(Snafu, Debug)] +pub enum ElectionError { + /// stub error to be filled in + StubError, + /// Math error doing something + /// NOTE: it would be better to make Election polymorphic over + /// the election error and then have specific math errors + MathError, +} + +/// election config +pub trait ElectionConfig: + Default + + Clone + + serde::Serialize + + for<'de> serde::Deserialize<'de> + + Sync + + Send + + core::fmt::Debug +{ +} + +/// A protocol for determining membership in and participating in a committee. +pub trait Membership: + Clone + Debug + Eq + PartialEq + Send + Sync + Hash + 'static +{ + /// generate a default election configuration + fn default_election_config( + num_nodes_with_stake: u64, + num_nodes_without_stake: u64, + ) -> TYPES::ElectionConfigType; + + /// create an election + /// TODO may want to move this to a testableelection trait + fn create_election( + entries: Vec>, + config: TYPES::ElectionConfigType, + ) -> Self; + + /// Clone the public key and corresponding stake table for current elected committee + fn get_committee_qc_stake_table( + &self, + ) -> Vec<::StakeTableEntry>; + + /// The leader of the committee for view `view_number`. + fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey; + + /// The staked members of the committee for view `view_number`. + fn get_staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; + + /// The non-staked members of the committee for view `view_number`. + fn get_non_staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; + + /// Get whole (staked + non-staked) committee for view `view_number`. + fn get_whole_committee(&self, view_number: TYPES::Time) -> BTreeSet; + + /// Check if a key has stake + fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; + + /// Get the stake table entry for a public key, returns `None` if the + /// key is not in the table + fn get_stake( + &self, + pub_key: &TYPES::SignatureKey, + ) -> Option<::StakeTableEntry>; + + /// Returns the number of total nodes in the committee + fn total_nodes(&self) -> usize; + + /// Returns the threshold for a specific `Membership` implementation + fn success_threshold(&self) -> NonZeroU64; + + /// Returns the threshold for a specific `Membership` implementation + fn failure_threshold(&self) -> NonZeroU64; + + /// Returns the threshold required to upgrade the network protocol + fn upgrade_threshold(&self) -> NonZeroU64; +} diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs new file mode 100644 index 0000000000..fc69b5c077 --- /dev/null +++ b/types/src/traits/metrics.rs @@ -0,0 +1,295 @@ +//! The [`Metrics`] trait is used to collect information from multiple components in the entire system. +//! +//! This trait can be used to spawn the following traits: +//! - [`Counter`]: an ever-increasing value (example usage: total bytes send/received) +//! - [`Gauge`]: a value that store the latest value, and can go up and down (example usage: amount of users logged in) +//! - [`Histogram`]: stores multiple float values based for a graph (example usage: CPU %) +//! - [`Label`]: Stores the last string (example usage: current version, network online/offline) + +use dyn_clone::DynClone; +use std::fmt::Debug; + +/// The metrics type. +pub trait Metrics: Send + Sync + DynClone + Debug { + /// Create a [`Counter`] with an optional `unit_label`. + /// + /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" + fn create_counter(&self, label: String, unit_label: Option) -> Box; + /// Create a [`Gauge`] with an optional `unit_label`. + /// + /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" + fn create_gauge(&self, label: String, unit_label: Option) -> Box; + /// Create a [`Histogram`] with an optional `unit_label`. + /// + /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" + fn create_histogram(&self, label: String, unit_label: Option) -> Box; + /// Create a [`Label`]. + fn create_label(&self, label: String) -> Box; + + /// Create a subgroup with a specified prefix. + fn subgroup(&self, subgroup_name: String) -> Box; +} + +/// Use this if you're not planning to use any metrics. All methods are implemented as a no-op +#[derive(Clone, Copy, Debug, Default)] +pub struct NoMetrics; + +impl NoMetrics { + /// Create a new `Box` with this [`NoMetrics`] + #[must_use] + pub fn boxed() -> Box { + Box::::default() + } +} + +impl Metrics for NoMetrics { + fn create_counter(&self, _: String, _: Option) -> Box { + Box::new(NoMetrics) + } + + fn create_gauge(&self, _: String, _: Option) -> Box { + Box::new(NoMetrics) + } + + fn create_histogram(&self, _: String, _: Option) -> Box { + Box::new(NoMetrics) + } + + fn create_label(&self, _: String) -> Box { + Box::new(NoMetrics) + } + + fn subgroup(&self, _: String) -> Box { + Box::new(NoMetrics) + } +} + +impl Counter for NoMetrics { + fn add(&self, _: usize) {} +} +impl Gauge for NoMetrics { + fn set(&self, _: usize) {} + fn update(&self, _: i64) {} +} +impl Histogram for NoMetrics { + fn add_point(&self, _: f64) {} +} +impl Label for NoMetrics { + fn set(&self, _: String) {} +} + +/// An ever-incrementing counter +pub trait Counter: Send + Sync + Debug + DynClone { + /// Add a value to the counter + fn add(&self, amount: usize); +} +/// A gauge that stores the latest value. +pub trait Gauge: Send + Sync + Debug + DynClone { + /// Set the gauge value + fn set(&self, amount: usize); + + /// Update the guage value + fn update(&self, delts: i64); +} + +/// A histogram which will record a series of points. +pub trait Histogram: Send + Sync + Debug + DynClone { + /// Add a point to this histogram. + fn add_point(&self, point: f64); +} + +/// A label that stores the last string value. +pub trait Label: Send + Sync + DynClone { + /// Set the label value + fn set(&self, value: String); +} +dyn_clone::clone_trait_object!(Metrics); +dyn_clone::clone_trait_object!(Gauge); +dyn_clone::clone_trait_object!(Counter); +dyn_clone::clone_trait_object!(Histogram); +dyn_clone::clone_trait_object!(Label); + +#[cfg(test)] +mod test { + use super::*; + use std::{ + collections::HashMap, + sync::{Arc, Mutex}, + }; + + #[derive(Debug, Clone)] + struct TestMetrics { + prefix: String, + values: Arc>, + } + + impl TestMetrics { + fn sub(&self, name: String) -> Self { + let prefix = if self.prefix.is_empty() { + name + } else { + format!("{}-{name}", self.prefix) + }; + Self { + prefix, + values: Arc::clone(&self.values), + } + } + } + + impl Metrics for TestMetrics { + fn create_counter( + &self, + label: String, + _unit_label: Option, + ) -> Box { + Box::new(self.sub(label)) + } + + fn create_gauge( + &self, + label: String, + _unit_label: Option, + ) -> Box { + Box::new(self.sub(label)) + } + + fn create_histogram( + &self, + label: String, + _unit_label: Option, + ) -> Box { + Box::new(self.sub(label)) + } + + fn create_label(&self, label: String) -> Box { + Box::new(self.sub(label)) + } + + fn subgroup(&self, subgroup_name: String) -> Box { + Box::new(self.sub(subgroup_name)) + } + } + + impl Counter for TestMetrics { + fn add(&self, amount: usize) { + *self + .values + .lock() + .unwrap() + .counters + .entry(self.prefix.clone()) + .or_default() += amount; + } + } + + impl Gauge for TestMetrics { + fn set(&self, amount: usize) { + *self + .values + .lock() + .unwrap() + .gauges + .entry(self.prefix.clone()) + .or_default() = amount; + } + fn update(&self, delta: i64) { + let mut values = self.values.lock().unwrap(); + let value = values.gauges.entry(self.prefix.clone()).or_default(); + let signed_value = i64::try_from(*value).unwrap_or(i64::MAX); + *value = usize::try_from(signed_value + delta).unwrap_or(0); + } + } + + impl Histogram for TestMetrics { + fn add_point(&self, point: f64) { + self.values + .lock() + .unwrap() + .histograms + .entry(self.prefix.clone()) + .or_default() + .push(point); + } + } + + impl Label for TestMetrics { + fn set(&self, value: String) { + *self + .values + .lock() + .unwrap() + .labels + .entry(self.prefix.clone()) + .or_default() = value; + } + } + + #[derive(Default, Debug)] + struct Inner { + counters: HashMap, + gauges: HashMap, + histograms: HashMap>, + labels: HashMap, + } + + #[test] + fn test() { + let values = Arc::default(); + // This is all scoped so all the arcs should go out of scope + { + let metrics: Box = Box::new(TestMetrics { + prefix: String::new(), + values: Arc::clone(&values), + }); + + let gauge = metrics.create_gauge("foo".to_string(), None); + let counter = metrics.create_counter("bar".to_string(), None); + let histogram = metrics.create_histogram("baz".to_string(), None); + + gauge.set(5); + gauge.update(-2); + + for i in 0..5 { + counter.add(i); + } + + for i in 0..10 { + histogram.add_point(f64::from(i)); + } + + let sub = metrics.subgroup("child".to_string()); + + let sub_gauge = sub.create_gauge("foo".to_string(), None); + let sub_counter = sub.create_counter("bar".to_string(), None); + let sub_histogram = sub.create_histogram("baz".to_string(), None); + + sub_gauge.set(10); + + for i in 0..5 { + sub_counter.add(i * 2); + } + + for i in 0..10 { + sub_histogram.add_point(f64::from(i) * 2.0); + } + } + + // The above variables are scoped so they should be dropped at this point + // One of the rare times we can use `Arc::try_unwrap`! + let values = Arc::try_unwrap(values).unwrap().into_inner().unwrap(); + assert_eq!(values.gauges["foo"], 3); + assert_eq!(values.counters["bar"], 10); // 0..5 + assert_eq!( + values.histograms["baz"], + vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] + ); + + assert_eq!(values.gauges["child-foo"], 10); + assert_eq!(values.counters["child-bar"], 20); // 0..5 *2 + assert_eq!( + values.histograms["child-baz"], + vec![0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0] + ); + } +} diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs new file mode 100644 index 0000000000..07c947c8cb --- /dev/null +++ b/types/src/traits/network.rs @@ -0,0 +1,681 @@ +//! Network access compatibility +//! +//! Contains types and traits used by `HotShot` to abstract over network access + +use async_compatibility_layer::art::async_sleep; +#[cfg(async_executor_impl = "async-std")] +use async_std::future::TimeoutError; +use derivative::Derivative; +use dyn_clone::DynClone; +use futures::channel::{mpsc, oneshot}; +#[cfg(async_executor_impl = "tokio")] +use tokio::time::error::Elapsed as TimeoutError; +#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] +compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} +use super::{node_implementation::NodeType, signature_key::SignatureKey}; +use crate::{ + data::ViewNumber, + message::{MessagePurpose, SequencingMessage}, + BoxSyncFuture, +}; +use async_compatibility_layer::channel::UnboundedSendError; +use async_trait::async_trait; +use rand::{ + distributions::{Bernoulli, Uniform}, + prelude::Distribution, +}; +use serde::{Deserialize, Serialize}; +use snafu::Snafu; +use std::{collections::BTreeSet, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; +use versioned_binary_serialization::version::StaticVersionType; + +/// for any errors we decide to add to memory network +#[derive(Debug, Snafu, Serialize, Deserialize)] +#[snafu(visibility(pub))] +pub enum MemoryNetworkError { + /// stub + Stub, +} + +/// Centralized server specific errors +#[derive(Debug, Snafu, Serialize, Deserialize)] +#[snafu(visibility(pub))] +pub enum CentralizedServerNetworkError { + /// The centralized server could not find a specific message. + NoMessagesInQueue, +} + +/// Centralized server specific errors +#[derive(Debug, Snafu, Serialize, Deserialize)] +#[snafu(visibility(pub))] +pub enum PushCdnNetworkError { + /// Failed to receive a message from the server + FailedToReceive, + /// Failed to send a message to the server + FailedToSend, +} + +/// Web server specific errors +#[derive(Debug, Snafu, Serialize, Deserialize)] +#[snafu(visibility(pub))] +pub enum WebServerNetworkError { + /// The injected consensus data is incorrect + IncorrectConsensusData, + /// The client returned an error + ClientError, + /// Endpoint parsed incorrectly + EndpointError, + /// Client disconnected + ClientDisconnected, +} + +/// the type of transmission +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub enum TransmitType { + /// directly transmit + Direct, + /// broadcast the message to all + Broadcast, + /// broadcast to DA committee + DACommitteeBroadcast, +} + +/// Error type for networking +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum NetworkError { + /// Libp2p specific errors + Libp2p { + /// source of error + source: Box, + }, + /// collection of libp2p secific errors + Libp2pMulti { + /// sources of errors + sources: Vec>, + }, + /// memory network specific errors + MemoryNetwork { + /// source of error + source: MemoryNetworkError, + }, + /// Push CDN network-specific errors + PushCdnNetwork { + /// source of error + source: PushCdnNetworkError, + }, + /// Centralized server specific errors + CentralizedServer { + /// source of error + source: CentralizedServerNetworkError, + }, + + /// Web server specific errors + WebServer { + /// source of error + source: WebServerNetworkError, + }, + /// unimplemented functionality + UnimplementedFeature, + /// Could not deliver a message to a specified recipient + CouldNotDeliver, + /// Attempted to deliver a message to an unknown node + NoSuchNode, + /// Failed to serialize a network message + FailedToSerialize { + /// Originating bincode error + source: anyhow::Error, + }, + /// Failed to deserealize a network message + FailedToDeserialize { + /// originating bincode error + source: anyhow::Error, + }, + /// A timeout occurred + Timeout { + /// Source of error + source: TimeoutError, + }, + /// Error sending output to consumer of NetworkingImplementation + /// TODO this should have more information + ChannelSend, + /// The underlying connection has been shut down + ShutDown, + /// unable to cancel a request, the request has already been cancelled + UnableToCancel, + /// The requested data was not found + NotFound, +} +#[derive(Clone, Debug)] +// Storing view number as a u64 to avoid the need TYPES generic +/// Events to poll or cancel consensus processes. +pub enum ConsensusIntentEvent { + /// Poll for votes for a particular view + PollForVotes(u64), + /// Poll for a proposal for a particular view + PollForProposal(u64), + /// Poll for VID disperse data for a particular view + PollForVIDDisperse(u64), + /// Poll for the most recent [quorum/da] proposal the webserver has + PollForLatestProposal, + /// Poll for the most recent view sync proposal the webserver has + PollForLatestViewSyncCertificate, + /// Poll for a DAC for a particular view + PollForDAC(u64), + /// Poll for view sync votes starting at a particular view + PollForViewSyncVotes(u64), + /// Poll for view sync proposals (certificates) for a particular view + PollForViewSyncCertificate(u64), + /// Poll for new transactions + PollForTransactions(u64), + /// Poll for future leader + PollFutureLeader(u64, K), + /// Cancel polling for votes + CancelPollForVotes(u64), + /// Cancel polling for view sync votes. + CancelPollForViewSyncVotes(u64), + /// Cancel polling for proposals. + CancelPollForProposal(u64), + /// Cancel polling for the latest proposal. + CancelPollForLatestProposal(u64), + /// Cancel polling for the latest view sync certificate + CancelPollForLatestViewSyncCertificate(u64), + /// Cancal polling for DAC. + CancelPollForDAC(u64), + /// Cancel polling for view sync certificate. + CancelPollForViewSyncCertificate(u64), + /// Cancel polling for VID disperse data + CancelPollForVIDDisperse(u64), + /// Cancel polling for transactions + CancelPollForTransactions(u64), +} + +impl ConsensusIntentEvent { + /// Get the view number of the event. + #[must_use] + pub fn view_number(&self) -> u64 { + match &self { + ConsensusIntentEvent::PollForVotes(view_number) + | ConsensusIntentEvent::PollForProposal(view_number) + | ConsensusIntentEvent::PollForDAC(view_number) + | ConsensusIntentEvent::PollForViewSyncVotes(view_number) + | ConsensusIntentEvent::CancelPollForViewSyncVotes(view_number) + | ConsensusIntentEvent::CancelPollForVotes(view_number) + | ConsensusIntentEvent::CancelPollForProposal(view_number) + | ConsensusIntentEvent::CancelPollForLatestProposal(view_number) + | ConsensusIntentEvent::CancelPollForLatestViewSyncCertificate(view_number) + | ConsensusIntentEvent::PollForVIDDisperse(view_number) + | ConsensusIntentEvent::CancelPollForVIDDisperse(view_number) + | ConsensusIntentEvent::CancelPollForDAC(view_number) + | ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) + | ConsensusIntentEvent::PollForViewSyncCertificate(view_number) + | ConsensusIntentEvent::PollForTransactions(view_number) + | ConsensusIntentEvent::CancelPollForTransactions(view_number) + | ConsensusIntentEvent::PollFutureLeader(view_number, _) => *view_number, + ConsensusIntentEvent::PollForLatestProposal + | ConsensusIntentEvent::PollForLatestViewSyncCertificate => 1, + } + } +} + +/// common traits we would like our network messages to implement +pub trait NetworkMsg: + Serialize + for<'a> Deserialize<'a> + Clone + Sync + Send + Debug + 'static +{ +} + +/// Trait that bundles what we need from a request ID +pub trait Id: Eq + PartialEq + Hash {} +impl NetworkMsg for Vec {} + +/// a message +pub trait ViewMessage { + /// get the view out of the message + fn get_view_number(&self) -> TYPES::Time; + // TODO move out of this trait. + /// get the purpose of the message + fn purpose(&self) -> MessagePurpose; +} + +/// Wraps a oneshot channel for responding to requests +pub struct ResponseChannel(pub oneshot::Sender); + +/// A request for some data that the consensus layer is asking for. +#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct DataRequest { + /// Request + pub request: RequestKind, + /// View this message is for + pub view: TYPES::Time, + /// signature of the Sha256 hash of the data so outsiders can't use know + /// public keys with stake. + pub signature: ::PureAssembledSignatureType, +} + +/// Underlying data request +#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +pub enum RequestKind { + /// Request VID data by our key and the VID commitment + VID(TYPES::Time, TYPES::SignatureKey), + /// Request a DA proposal for a certain view + DAProposal(TYPES::Time), +} + +/// A resopnse for a request. `SequencingMessage` is the same as other network messages +/// The kind of message `M` is is determined by what we requested +#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +#[serde(bound(deserialize = ""))] +pub enum ResponseMessage { + /// Peer returned us some data + Found(SequencingMessage), + /// Peer failed to get us data + NotFound, + /// The Request was denied + Denied, +} + +/// represents a networking implmentration +/// exposes low level API for interacting with a network +/// intended to be implemented for libp2p, the centralized server, +/// and memory network +#[async_trait] +pub trait ConnectedNetwork: + Clone + Send + Sync + 'static +{ + /// Pauses the underlying network + fn pause(&self); + + /// Resumes the underlying network + fn resume(&self); + + /// Blocks until the network is successfully initialized + async fn wait_for_ready(&self); + + /// checks if the network is ready + /// nonblocking + async fn is_ready(&self) -> bool; + + /// Blocks until the network is shut down + /// then returns true + fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> + where + 'a: 'b, + Self: 'b; + + /// broadcast message to some subset of nodes + /// blocking + async fn broadcast_message( + &self, + message: M, + recipients: BTreeSet, + bind_version: VER, + ) -> Result<(), NetworkError>; + + /// broadcast a message only to a DA committee + /// blocking + async fn da_broadcast_message( + &self, + message: M, + recipients: BTreeSet, + bind_version: VER, + ) -> Result<(), NetworkError>; + + /// Sends a direct message to a specific node + /// blocking + async fn direct_message( + &self, + message: M, + recipient: K, + bind_version: VER, + ) -> Result<(), NetworkError>; + + /// Receive one or many messages from the underlying network. + /// + /// # Errors + /// If there is a network-related failure. + async fn recv_msgs(&self) -> Result, NetworkError>; + + /// Ask request the network for some data. Returns the request ID for that data, + /// The ID returned can be used for cancelling the request + async fn request_data( + &self, + _request: M, + _recipient: K, + _bind_version: VER, + ) -> Result, NetworkError> { + Err(NetworkError::UnimplementedFeature) + } + + /// Spawn a request task in the given network layer. If it supports + /// Request and responses it will return the receiving end of a channel. + /// Requests the network receives will be sent over this channel along + /// with a return channel to send the response back to. + /// + /// Returns `None`` if network does not support handling requests + async fn spawn_request_receiver_task( + &self, + _bind_version: VER, + ) -> Option)>> { + None + } + + /// queues lookup of a node + async fn queue_node_lookup( + &self, + _view_number: ViewNumber, + _pk: K, + ) -> Result<(), UnboundedSendError>> { + Ok(()) + } + + /// Injects consensus data such as view number into the networking implementation + /// blocking + /// Ideally we would pass in the `Time` type, but that requires making the entire trait generic over NodeType + async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} + + /// handles view update + fn update_view(&self, _view: u64) {} +} + +/// Describes additional functionality needed by the test network implementation +pub trait TestableNetworkingImplementation +where + Self: Sized, +{ + /// generates a network given an expected node count + #[allow(clippy::type_complexity)] + fn generator( + expected_node_count: usize, + num_bootstrap: usize, + network_id: usize, + da_committee_size: usize, + is_da: bool, + reliability_config: Option>, + secondary_network_delay: Duration, + ) -> Box (Arc, Arc) + 'static>; + + /// Get the number of messages in-flight. + /// + /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. + fn in_flight_message_count(&self) -> Option; +} + +/// Changes that can occur in the network +#[derive(Debug)] +pub enum NetworkChange { + /// A node is connected + NodeConnected(P), + + /// A node is disconnected + NodeDisconnected(P), +} + +/// interface describing how reliable the network is +#[async_trait] +pub trait NetworkReliability: Debug + Sync + std::marker::Send + DynClone + 'static { + /// Sample from bernoulli distribution to decide whether + /// or not to keep a packet + /// # Panics + /// + /// Panics if `self.keep_numerator > self.keep_denominator` + /// + fn sample_keep(&self) -> bool { + true + } + + /// sample from uniform distribution to decide whether + /// or not to keep a packet + fn sample_delay(&self) -> Duration { + std::time::Duration::ZERO + } + + /// scramble the packet + fn scramble(&self, msg: Vec) -> Vec { + msg + } + + /// number of times to repeat the packet + fn sample_repeat(&self) -> usize { + 1 + } + + /// given a message and a way to send the message, + /// decide whether or not to send the message + /// how long to delay the message + /// whether or not to send duplicates + /// and whether or not to include noise with the message + /// then send the message + /// note: usually self is stored in a rwlock + /// so instead of doing the sending part, we just fiddle with the message + /// then return a future that does the sending and delaying + fn chaos_send_msg( + &self, + msg: Vec, + send_fn: Arc) -> BoxSyncFuture<'static, ()>>, + ) -> BoxSyncFuture<'static, ()> { + let sample_keep = self.sample_keep(); + let delay = self.sample_delay(); + let repeats = self.sample_repeat(); + let mut msgs = Vec::new(); + for _idx in 0..repeats { + let scrambled = self.scramble(msg.clone()); + msgs.push(scrambled); + } + let closure = async move { + if sample_keep { + async_sleep(delay).await; + for msg in msgs { + send_fn(msg).await; + } + } + }; + Box::pin(closure) + } +} + +// hack to get clone +dyn_clone::clone_trait_object!(NetworkReliability); + +/// ideal network +#[derive(Clone, Copy, Debug, Default)] +pub struct PerfectNetwork {} + +impl NetworkReliability for PerfectNetwork {} + +/// A synchronous network. Packets may be delayed, but are guaranteed +/// to arrive within `timeout` ns +#[derive(Clone, Copy, Debug, Default)] +pub struct SynchronousNetwork { + /// Max value in milliseconds that a packet may be delayed + pub delay_high_ms: u64, + /// Lowest value in milliseconds that a packet may be delayed + pub delay_low_ms: u64, +} + +impl NetworkReliability for SynchronousNetwork { + /// never drop a packet + fn sample_keep(&self) -> bool { + true + } + fn sample_delay(&self) -> Duration { + Duration::from_millis( + Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) + .sample(&mut rand::thread_rng()), + ) + } +} + +/// An asynchronous network. Packets may be dropped entirely +/// or delayed for arbitrarily long periods +/// probability that packet is kept = `keep_numerator` / `keep_denominator` +/// packet delay is obtained by sampling from a uniform distribution +/// between `delay_low_ms` and `delay_high_ms`, inclusive +#[derive(Debug, Clone, Copy)] +pub struct AsynchronousNetwork { + /// numerator for probability of keeping packets + pub keep_numerator: u32, + /// denominator for probability of keeping packets + pub keep_denominator: u32, + /// lowest value in milliseconds that a packet may be delayed + pub delay_low_ms: u64, + /// highest value in milliseconds that a packet may be delayed + pub delay_high_ms: u64, +} + +impl NetworkReliability for AsynchronousNetwork { + fn sample_keep(&self) -> bool { + Bernoulli::from_ratio(self.keep_numerator, self.keep_denominator) + .unwrap() + .sample(&mut rand::thread_rng()) + } + fn sample_delay(&self) -> Duration { + Duration::from_millis( + Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) + .sample(&mut rand::thread_rng()), + ) + } +} + +/// An partially synchronous network. Behaves asynchronously +/// until some arbitrary time bound, GST, +/// then synchronously after GST +#[allow(clippy::similar_names)] +#[derive(Debug, Clone, Copy)] +pub struct PartiallySynchronousNetwork { + /// asynchronous portion of network + pub asynchronous: AsynchronousNetwork, + /// synchronous portion of network + pub synchronous: SynchronousNetwork, + /// time when GST occurs + pub gst: std::time::Duration, + /// when the network was started + pub start: std::time::Instant, +} + +impl NetworkReliability for PartiallySynchronousNetwork { + /// never drop a packet + fn sample_keep(&self) -> bool { + true + } + fn sample_delay(&self) -> Duration { + // act asyncronous before gst + if self.start.elapsed() < self.gst { + if self.asynchronous.sample_keep() { + self.asynchronous.sample_delay() + } else { + // assume packet was "dropped" and will arrive after gst + self.synchronous.sample_delay() + self.gst + } + } else { + // act syncronous after gst + self.synchronous.sample_delay() + } + } +} + +impl Default for AsynchronousNetwork { + // disable all chance of failure + fn default() -> Self { + AsynchronousNetwork { + keep_numerator: 1, + keep_denominator: 1, + delay_low_ms: 0, + delay_high_ms: 0, + } + } +} + +impl Default for PartiallySynchronousNetwork { + fn default() -> Self { + PartiallySynchronousNetwork { + synchronous: SynchronousNetwork::default(), + asynchronous: AsynchronousNetwork::default(), + gst: std::time::Duration::new(0, 0), + start: std::time::Instant::now(), + } + } +} + +impl SynchronousNetwork { + /// create new `SynchronousNetwork` + #[must_use] + pub fn new(timeout: u64, delay_low_ms: u64) -> Self { + SynchronousNetwork { + delay_high_ms: timeout, + delay_low_ms, + } + } +} + +impl AsynchronousNetwork { + /// create new `AsynchronousNetwork` + #[must_use] + pub fn new( + keep_numerator: u32, + keep_denominator: u32, + delay_low_ms: u64, + delay_high_ms: u64, + ) -> Self { + AsynchronousNetwork { + keep_numerator, + keep_denominator, + delay_low_ms, + delay_high_ms, + } + } +} + +impl PartiallySynchronousNetwork { + /// create new `PartiallySynchronousNetwork` + #[allow(clippy::similar_names)] + #[must_use] + pub fn new( + asynchronous: AsynchronousNetwork, + synchronous: SynchronousNetwork, + gst: std::time::Duration, + ) -> Self { + PartiallySynchronousNetwork { + asynchronous, + synchronous, + gst, + start: std::time::Instant::now(), + } + } +} + +/// A chaotic network using all the networking calls +#[derive(Debug, Clone)] +pub struct ChaosNetwork { + /// numerator for probability of keeping packets + pub keep_numerator: u32, + /// denominator for probability of keeping packets + pub keep_denominator: u32, + /// lowest value in milliseconds that a packet may be delayed + pub delay_low_ms: u64, + /// highest value in milliseconds that a packet may be delayed + pub delay_high_ms: u64, + /// lowest value of repeats for a message + pub repeat_low: usize, + /// highest value of repeats for a message + pub repeat_high: usize, +} + +impl NetworkReliability for ChaosNetwork { + fn sample_keep(&self) -> bool { + Bernoulli::from_ratio(self.keep_numerator, self.keep_denominator) + .unwrap() + .sample(&mut rand::thread_rng()) + } + + fn sample_delay(&self) -> Duration { + Duration::from_millis( + Uniform::new_inclusive(self.delay_low_ms, self.delay_high_ms) + .sample(&mut rand::thread_rng()), + ) + } + + fn sample_repeat(&self) -> usize { + Uniform::new_inclusive(self.repeat_low, self.repeat_high).sample(&mut rand::thread_rng()) + } +} diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs new file mode 100644 index 0000000000..cb2c98c403 --- /dev/null +++ b/types/src/traits/node_implementation.rs @@ -0,0 +1,238 @@ +//! Composite trait for node behavior +//! +//! This module defines the [`NodeImplementation`] trait, which is a composite trait used for +//! describing the overall behavior of a node, as a composition of implementations of the node trait. + +use super::{ + block_contents::{BlockHeader, TestableBlock, Transaction}, + election::ElectionConfig, + network::{ConnectedNetwork, NetworkReliability, TestableNetworkingImplementation}, + states::TestableState, + storage::Storage, + ValidatedState, +}; +use crate::{ + data::{Leaf, TestableLeaf}, + message::Message, + traits::{ + election::Membership, signature_key::SignatureKey, states::InstanceState, BlockPayload, + }, +}; +use async_trait::async_trait; +use commit::Committable; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::Debug, + hash::Hash, + ops, + ops::{Deref, Sub}, + sync::Arc, + time::Duration, +}; + +/// Node implementation aggregate trait +/// +/// This trait exists to collect multiple behavior implementations into one type, to allow +/// `HotShot` to avoid annoying numbers of type arguments and type patching. +/// +/// It is recommended you implement this trait on a zero sized type, as `HotShot`does not actually +/// store or keep a reference to any value implementing this trait. + +pub trait NodeImplementation: + Send + Sync + Clone + Eq + Hash + 'static + Serialize + for<'de> Deserialize<'de> +{ + /// Network for all nodes + type QuorumNetwork: ConnectedNetwork, TYPES::SignatureKey>; + + /// Network for those in the DA committee + type CommitteeNetwork: ConnectedNetwork, TYPES::SignatureKey>; + + /// Storage for DA layer interactions + type Storage: Storage; +} + +/// extra functions required on a node implementation to be usable by hotshot-testing +#[allow(clippy::type_complexity)] +#[async_trait] +pub trait TestableNodeImplementation: NodeImplementation { + /// Election config for the DA committee + type CommitteeElectionConfig; + + /// Generates a committee-specific election + fn committee_election_config_generator( + ) -> Box Self::CommitteeElectionConfig + 'static>; + + /// Creates random transaction if possible + /// otherwise panics + /// `padding` is the bytes of padding to add to the transaction + fn state_create_random_transaction( + state: Option<&TYPES::ValidatedState>, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction; + + /// Creates random transaction if possible + /// otherwise panics + /// `padding` is the bytes of padding to add to the transaction + fn leaf_create_random_transaction( + leaf: &Leaf, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction; + + /// generate a genesis block + fn block_genesis() -> TYPES::BlockPayload; + + /// the number of transactions in a block + fn txn_count(block: &TYPES::BlockPayload) -> u64; + + /// Generate the communication channels for testing + fn gen_networks( + expected_node_count: usize, + num_bootstrap: usize, + da_committee_size: usize, + reliability_config: Option>, + secondary_network_delay: Duration, + ) -> Box (Arc, Arc)>; +} + +#[async_trait] +impl> TestableNodeImplementation for I +where + TYPES::ValidatedState: TestableState, + TYPES::BlockPayload: TestableBlock, + I::QuorumNetwork: TestableNetworkingImplementation, + I::CommitteeNetwork: TestableNetworkingImplementation, +{ + type CommitteeElectionConfig = TYPES::ElectionConfigType; + + fn committee_election_config_generator( + ) -> Box Self::CommitteeElectionConfig + 'static> { + Box::new(|num_nodes_with_stake, num_nodes_without_stake| { + ::Membership::default_election_config( + num_nodes_with_stake, + num_nodes_without_stake, + ) + }) + } + + fn state_create_random_transaction( + state: Option<&TYPES::ValidatedState>, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction { + >::create_random_transaction( + state, rng, padding, + ) + } + + fn leaf_create_random_transaction( + leaf: &Leaf, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction { + Leaf::create_random_transaction(leaf, rng, padding) + } + + fn block_genesis() -> TYPES::BlockPayload { + ::genesis() + } + + fn txn_count(block: &TYPES::BlockPayload) -> u64 { + ::txn_count(block) + } + + fn gen_networks( + expected_node_count: usize, + num_bootstrap: usize, + da_committee_size: usize, + reliability_config: Option>, + secondary_network_delay: Duration, + ) -> Box (Arc, Arc)> { + >::generator( + expected_node_count, + num_bootstrap, + 0, + da_committee_size, + false, + reliability_config.clone(), + secondary_network_delay, + ) + } +} + +/// Trait for time compatibility needed for reward collection +pub trait ConsensusTime: + PartialOrd + + Ord + + Send + + Sync + + Debug + + Clone + + Copy + + Hash + + Deref + + serde::Serialize + + for<'de> serde::Deserialize<'de> + + ops::AddAssign + + ops::Add + + Sub + + 'static + + Committable +{ + /// Create a new instance of this time unit at time number 0 + #[must_use] + fn genesis() -> Self { + Self::new(0) + } + /// Create a new instance of this time unit + fn new(val: u64) -> Self; + /// Get the u64 format of time + fn get_u64(&self) -> u64; +} + +/// Trait with all the type definitions that are used in the current hotshot setup. +pub trait NodeType: + Clone + + Copy + + Debug + + Hash + + PartialEq + + Eq + + PartialOrd + + Ord + + Default + + serde::Serialize + + for<'de> Deserialize<'de> + + Send + + Sync + + 'static +{ + /// The time type that this hotshot setup is using. + /// + /// This should be the same `Time` that `ValidatedState::Time` is using. + type Time: ConsensusTime; + /// The block header type that this hotshot setup is using. + type BlockHeader: BlockHeader; + /// The block type that this hotshot setup is using. + /// + /// This should be the same block that `ValidatedState::BlockPayload` is using. + type BlockPayload: BlockPayload; + /// The signature key that this hotshot setup is using. + type SignatureKey: SignatureKey; + /// The transaction type that this hotshot setup is using. + /// + /// This should be equal to `BlockPayload::Transaction` + type Transaction: Transaction; + /// The election config type that this hotshot setup is using. + type ElectionConfigType: ElectionConfig; + + /// The instance-level state type that this hotshot setup is using. + type InstanceState: InstanceState; + + /// The validated state type that this hotshot setup is using. + type ValidatedState: ValidatedState; + + /// Membership used for this implementation + type Membership: Membership; +} diff --git a/types/src/traits/qc.rs b/types/src/traits/qc.rs new file mode 100644 index 0000000000..7dd11010f9 --- /dev/null +++ b/types/src/traits/qc.rs @@ -0,0 +1,95 @@ +//! The quorum certificate (QC) trait is a certificate of a sufficient quorum of distinct +//! parties voted for a message or statement. + +use ark_std::{ + rand::{CryptoRng, RngCore}, + vec::Vec, +}; +use bitvec::prelude::*; +use generic_array::{ArrayLength, GenericArray}; +use jf_primitives::{errors::PrimitivesError, signatures::AggregateableSignatureSchemes}; +use serde::{Deserialize, Serialize}; + +/// Trait for validating a QC built from different signatures on the same message +pub trait QuorumCertificateScheme< + A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a>, +> +{ + /// Public parameters for generating the QC + /// E.g: snark proving/verifying keys, list of (or pointer to) public keys stored in the smart contract. + type QCProverParams: Serialize + for<'a> Deserialize<'a>; + + /// Public parameters for validating the QC + /// E.g: verifying keys, stake table commitment + type QCVerifierParams: Serialize + for<'a> Deserialize<'a>; + + /// Allows to fix the size of the message at compilation time. + type MessageLength: ArrayLength; + + /// Type of the actual quorum certificate object + type QC; + + /// Type of the quorum size (e.g. number of votes or accumulated weight of signatures) + type QuorumSize; + + /// Produces a partial signature on a message with a single user signing key + /// NOTE: the original message (vote) should be prefixed with the hash of the stake table. + /// * `agg_sig_pp` - public parameters for aggregate signature + /// * `message` - message to be signed + /// * `sk` - user signing key + /// * `returns` - a "simple" signature + /// + /// # Errors + /// + /// Should return error if the underlying signature scheme fail to sign. + fn sign>( + pp: &A::PublicParameter, + sk: &A::SigningKey, + msg: M, + prng: &mut R, + ) -> Result { + A::sign(pp, sk, msg, prng) + } + + /// Computes an aggregated signature from a set of partial signatures and the verification keys involved + /// * `qc_pp` - public parameters for generating the QC + /// * `signers` - a bool vector indicating the list of verification keys corresponding to the set of partial signatures + /// * `sigs` - partial signatures on the same message + /// + /// # Errors + /// + /// Will return error if some of the partial signatures provided are invalid or the number of + /// partial signatures / verifications keys are different. + fn assemble( + qc_pp: &Self::QCProverParams, + signers: &BitSlice, + sigs: &[A::Signature], + ) -> Result; + + /// Checks an aggregated signature over some message provided as input + /// * `qc_vp` - public parameters for validating the QC + /// * `message` - message to check the aggregated signature against + /// * `qc` - quroum certificate + /// * `returns` - the quorum size if the qc is valid, an error otherwise. + /// + /// # Errors + /// + /// Return error if the QC is invalid, either because accumulated weight didn't exceed threshold, + /// or some partial signatures are invalid. + fn check( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray, + qc: &Self::QC, + ) -> Result; + + /// Trace the list of signers given a qc. + /// + /// # Errors + /// + /// Return error if the inputs mismatch (e.g. wrong verifier parameter or original message). + fn trace( + qc_vp: &Self::QCVerifierParams, + message: &GenericArray, + qc: &Self::QC, + ) -> Result, PrimitivesError>; +} diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs new file mode 100644 index 0000000000..3b36e0ed31 --- /dev/null +++ b/types/src/traits/signature_key.rs @@ -0,0 +1,140 @@ +//! Minimal compatibility over public key signatures +use bitvec::prelude::*; +use ethereum_types::U256; +use jf_primitives::errors::PrimitivesError; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{Debug, Display}, + hash::Hash, +}; +use tagged_base64::TaggedBase64; + +/// Type representing stake table entries in a `StakeTable` +pub trait StakeTableEntryType { + /// Get the stake value + fn get_stake(&self) -> U256; +} + +/// Trait for abstracting public key signatures +/// Self is the public key type +pub trait SignatureKey: + Send + + Sync + + Clone + + Sized + + Debug + + Hash + + Serialize + + for<'a> Deserialize<'a> + + PartialEq + + Eq + + PartialOrd + + Ord + + Display + + for<'a> TryFrom<&'a TaggedBase64> + + Into +{ + /// The private key type for this signature algorithm + type PrivateKey: Send + + Sync + + Sized + + Clone + + Debug + + Eq + + Serialize + + for<'a> Deserialize<'a> + + Hash; + /// The type of the entry that contain both public key and stake value + type StakeTableEntry: StakeTableEntryType + + Send + + Sync + + Sized + + Clone + + Debug + + Hash + + Eq + + Serialize + + for<'a> Deserialize<'a>; + /// The type of the quorum certificate parameters used for assembled signature + type QCParams: Send + Sync + Sized + Clone + Debug + Hash; + /// The type of the assembled signature, without `BitVec` + type PureAssembledSignatureType: Send + + Sync + + Sized + + Clone + + Debug + + Hash + + PartialEq + + Eq + + Serialize + + for<'a> Deserialize<'a>; + /// The type of the assembled qc: assembled signature + `BitVec` + type QCType: Send + + Sync + + Sized + + Clone + + Debug + + Hash + + PartialEq + + Eq + + Serialize + + for<'a> Deserialize<'a>; + + /// Type of error that can occur when signing data + type SignError: std::error::Error + Send + Sync; + + // Signature type represented as a vec/slice of bytes to let the implementer handle the nuances + // of serialization, to avoid Cryptographic pitfalls + /// Validate a signature + fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool; + + /// Produce a signature + /// # Errors + /// If unable to sign the data with the key + fn sign( + private_key: &Self::PrivateKey, + data: &[u8], + ) -> Result; + + /// Produce a public key from a private key + fn from_private(private_key: &Self::PrivateKey) -> Self; + /// Serialize a public key to bytes + fn to_bytes(&self) -> Vec; + /// Deserialize a public key from bytes + /// # Errors + /// + /// Will return `Err` if deserialization fails + fn from_bytes(bytes: &[u8]) -> Result; + + /// Generate a new key pair + fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey); + + /// get the stake table entry from the public key and stake value + fn get_stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry; + + /// only get the public key from the stake table entry + fn get_public_key(entry: &Self::StakeTableEntry) -> Self; + + /// get the public parameter for the assembled signature checking + fn get_public_parameter( + stake_entries: Vec, + threshold: U256, + ) -> Self::QCParams; + + /// check the quorum certificate for the assembled signature + fn check(real_qc_pp: &Self::QCParams, data: &[u8], qc: &Self::QCType) -> bool; + + /// get the assembled signature and the `BitVec` separately from the assembled signature + fn get_sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec); + + /// assemble the signature from the partial signature and the indication of signers in `BitVec` + fn assemble( + real_qc_pp: &Self::QCParams, + signers: &BitSlice, + sigs: &[Self::PureAssembledSignatureType], + ) -> Self::QCType; + + /// generates the genesis public key. Meant to be dummy/filler + #[must_use] + fn genesis_proposer_pk() -> Self; +} diff --git a/types/src/traits/stake_table.rs b/types/src/traits/stake_table.rs new file mode 100644 index 0000000000..598a662650 --- /dev/null +++ b/types/src/traits/stake_table.rs @@ -0,0 +1,235 @@ +//! Trait for stake table data structures + +use ark_std::{rand::SeedableRng, string::ToString, vec::Vec}; +use digest::crypto_common::rand_core::CryptoRngCore; +use displaydoc::Display; +use jf_plonk::errors::PlonkError; +use jf_primitives::errors::PrimitivesError; + +/// Snapshots of the stake table +pub enum SnapshotVersion { + /// the latest "Head" where all new changes are applied to + Head, + /// marks the snapshot at the beginning of the current epoch + EpochStart, + /// marks the beginning of the last epoch + LastEpochStart, + /// at arbitrary block height + BlockNum(u64), +} + +/// Common interfaces required for a stake table used in `HotShot` System. +/// APIs that doesn't take `version: SnapshotVersion` as an input by default works on the head/latest version. +pub trait StakeTableScheme { + /// type for stake key + type Key: Clone; + /// type for the staked amount + type Amount: Clone + Copy; + /// type for the commitment to the current stake table + type Commitment; + /// type for the proof associated with the lookup result (if any) + type LookupProof; + /// type for the iterator over (key, value) entries + type IntoIter: Iterator; + /// Auxiliary information associated with the key + type Aux: Clone; + + /// Register a new key into the stake table. + /// + /// # Errors + /// + /// Return err if key is already registered. + fn register( + &mut self, + new_key: Self::Key, + amount: Self::Amount, + aux: Self::Aux, + ) -> Result<(), StakeTableError>; + + /// Batch register a list of new keys. A default implementation is provided + /// w/o batch optimization. + /// + /// # Errors + /// + /// Return err if any of `new_keys` fails to register. + fn batch_register( + &mut self, + new_keys: I, + amounts: J, + auxs: K, + ) -> Result<(), StakeTableError> + where + I: IntoIterator, + J: IntoIterator, + K: IntoIterator, + { + let _ = new_keys + .into_iter() + .zip(amounts) + .zip(auxs) + .try_for_each(|((key, amount), aux)| Self::register(self, key, amount, aux)); + Ok(()) + } + + /// Deregister an existing key from the stake table. + /// Returns error if some keys are not found. + /// + /// # Errors + /// Return err if `existing_key` wasn't registered. + fn deregister(&mut self, existing_key: &Self::Key) -> Result<(), StakeTableError>; + + /// Batch deregister a list of keys. A default implementation is provided + /// w/o batch optimization. + /// + /// # Errors + /// Return err if any of `existing_keys` fail to deregister. + fn batch_deregister<'a, I>(&mut self, existing_keys: I) -> Result<(), StakeTableError> + where + I: IntoIterator::Key>, + ::Key: 'a, + { + let _ = existing_keys + .into_iter() + .try_for_each(|key| Self::deregister(self, key)); + Ok(()) + } + + /// Returns the commitment to the `version` of stake table. + /// + /// # Errors + /// Return err if the `version` is not supported. + fn commitment(&self, version: SnapshotVersion) -> Result; + + /// Returns the accumulated stakes of all registered keys of the `version` + /// of stake table. + /// + /// # Errors + /// Return err if the `version` is not supported. + fn total_stake(&self, version: SnapshotVersion) -> Result; + + /// Returns the number of keys in the `version` of the table. + /// + /// # Errors + /// Return err if the `version` is not supported. + fn len(&self, version: SnapshotVersion) -> Result; + + /// Returns true if `key` is currently registered, else returns false. + fn contains_key(&self, key: &Self::Key) -> bool; + + /// Returns the stakes withhelded by a public key. + /// + /// # Errors + /// Return err if the `version` is not supported or `key` doesn't exist. + fn lookup( + &self, + version: SnapshotVersion, + key: &Self::Key, + ) -> Result; + + /// Returns the stakes withhelded by a public key along with a membership proof. + /// + /// # Errors + /// Return err if the `version` is not supported or `key` doesn't exist. + fn lookup_with_proof( + &self, + version: SnapshotVersion, + key: &Self::Key, + ) -> Result<(Self::Amount, Self::LookupProof), StakeTableError>; + + /// Return the associated stake amount and auxiliary information of a public key, + /// along with a membership proof. + /// + /// # Errors + /// Return err if the `version` is not supported or `key` doesn't exist. + #[allow(clippy::type_complexity)] + fn lookup_with_aux_and_proof( + &self, + version: SnapshotVersion, + key: &Self::Key, + ) -> Result<(Self::Amount, Self::Aux, Self::LookupProof), StakeTableError>; + + /// Update the stake of the `key` with `(negative ? -1 : 1) * delta`. + /// Return the updated stake or error. + /// + /// # Errors + /// Return err if the `key` doesn't exist of if the update overflow/underflow. + fn update( + &mut self, + key: &Self::Key, + delta: Self::Amount, + negative: bool, + ) -> Result; + + /// Batch update the stake balance of `keys`. Read documentation about + /// [`Self::update()`]. By default, we call `Self::update()` on each + /// (key, amount, negative) tuple. + /// + /// # Errors + /// Return err if any one of the `update` failed. + fn batch_update( + &mut self, + keys: &[Self::Key], + amounts: &[Self::Amount], + negative_flags: Vec, + ) -> Result, StakeTableError> { + let updated_amounts = keys + .iter() + .zip(amounts.iter()) + .zip(negative_flags.iter()) + .map(|((key, &amount), negative)| Self::update(self, key, amount, *negative)) + .collect::, _>>()?; + + Ok(updated_amounts) + } + + /// Randomly sample a (key, stake amount) pair proportional to the stake distributions, + /// given a fixed seed for `rng`, this sampling should be deterministic. + fn sample( + &self, + rng: &mut (impl SeedableRng + CryptoRngCore), + ) -> Option<(&Self::Key, &Self::Amount)>; + + /// Returns an iterator over all (key, value) entries of the `version` of the table + /// + /// # Errors + /// Return err if the `version` is not supported. + fn try_iter(&self, version: SnapshotVersion) -> Result; +} + +/// Error type for [`StakeTableScheme`] +#[derive(Debug, Display)] +pub enum StakeTableError { + /// Internal error caused by Rescue + RescueError, + /// Key mismatched + MismatchedKey, + /// Key not found + KeyNotFound, + /// Key already exists + ExistingKey, + /// Malformed Merkle proof + MalformedProof, + /// Verification Error + VerificationError, + /// Insufficient fund: the number of stake cannot be negative + InsufficientFund, + /// The number of stake exceed U256 + StakeOverflow, + /// The historical snapshot requested is not supported. + SnapshotUnsupported, +} + +impl ark_std::error::Error for StakeTableError {} + +impl From for PrimitivesError { + fn from(value: StakeTableError) -> Self { + // FIXME: (alex) should we define a PrimitivesError::General()? + Self::ParameterError(value.to_string()) + } +} + +impl From for PlonkError { + fn from(value: StakeTableError) -> Self { + Self::PrimitiveError(PrimitivesError::ParameterError(value.to_string())) + } +} diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs new file mode 100644 index 0000000000..2fb851ba0d --- /dev/null +++ b/types/src/traits/states.rs @@ -0,0 +1,89 @@ +//! Abstractions over the immutable instance-level state and hte global state that blocks modify. +//! +//! This module provides the [`InstanceState`] and [`ValidatedState`] traits, which serve as +//! compatibilities over the current network state, which is modified by the transactions contained +//! within blocks. + +use super::block_contents::TestableBlock; +use crate::{ + data::Leaf, + traits::{ + node_implementation::{ConsensusTime, NodeType}, + BlockPayload, + }, +}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use std::{error::Error, fmt::Debug, future::Future, hash::Hash}; + +/// Instance-level state, which allows us to fetch missing validated state. +pub trait InstanceState: Debug + Send + Sync {} + +/// Application-specific state delta, which will be used to store a list of merkle tree entries. +pub trait StateDelta: Debug + Send + Sync + Serialize + for<'a> Deserialize<'a> {} + +/// Abstraction over the state that blocks modify +/// +/// This trait represents the behaviors that the 'global' ledger state must have: +/// * A defined error type ([`Error`](ValidatedState::Error)) +/// * The type of block that modifies this type of state ([`BlockPayload`](`ValidatedStates:: +/// BlockPayload`)) +/// * The ability to validate that a block header is actually a valid extension of this state and +/// produce a new state, with the modifications from the block applied +/// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header)) +pub trait ValidatedState: + Serialize + DeserializeOwned + Debug + Default + Hash + PartialEq + Eq + Send + Sync +{ + /// The error type for this particular type of ledger state + type Error: Error + Debug + Send + Sync; + /// The type of the instance-level state this state is assocaited with + type Instance: InstanceState; + /// The type of the state delta this state is assocaited with. + type Delta: StateDelta; + /// Time compatibility needed for reward collection + type Time: ConsensusTime; + + /// Check if the proposed block header is valid and apply it to the state if so. + /// + /// Returns the new state and state delta. + /// + /// # Arguments + /// * `instance` - Immutable instance-level state. + /// + /// # Errors + /// + /// If the block header is invalid or appending it would lead to an invalid state. + fn validate_and_apply_header( + &self, + instance: &Self::Instance, + parent_leaf: &Leaf, + proposed_header: &TYPES::BlockHeader, + ) -> impl Future> + Send; + + /// Construct the state with the given block header. + /// + /// This can also be used to rebuild the state for catchup. + fn from_header(block_header: &TYPES::BlockHeader) -> Self; + + /// Construct a genesis validated state. + #[must_use] + fn genesis(instance: &Self::Instance) -> (Self, Self::Delta); + + /// Gets called to notify the persistence backend that this state has been committed + fn on_commit(&self); +} + +/// extra functions required on state to be usable by hotshot-testing +pub trait TestableState: ValidatedState +where + TYPES: NodeType, + TYPES::BlockPayload: TestableBlock, +{ + /// Creates random transaction if possible + /// otherwise panics + /// `padding` is the bytes of padding to add to the transaction + fn create_random_transaction( + state: Option<&Self>, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> ::Transaction; +} diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs new file mode 100644 index 0000000000..2dab00ee88 --- /dev/null +++ b/types/src/traits/storage.rs @@ -0,0 +1,21 @@ +//! Abstract storage type for storing DA proposals and VID shares +//! +//! This modules provides the [`Storage`] trait. +//! + +use anyhow::Result; +use async_trait::async_trait; + +use crate::{ + data::{DAProposal, VidDisperse}, + message::Proposal, +}; + +use super::node_implementation::NodeType; + +/// Abstraction for storing a variety of consensus payload datum. +#[async_trait] +pub trait Storage: Send + Sync + Clone { + async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; + async fn append_da(&self, proposal: &Proposal>) -> Result<()>; +} diff --git a/types/src/utils.rs b/types/src/utils.rs new file mode 100644 index 0000000000..9cce5bb31b --- /dev/null +++ b/types/src/utils.rs @@ -0,0 +1,189 @@ +//! Utility functions, type aliases, helper structs and enum definitions. + +use crate::{ + data::Leaf, + traits::{node_implementation::NodeType, ValidatedState}, + vid::VidCommitment, +}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use bincode::{ + config::{ + FixintEncoding, LittleEndian, RejectTrailing, WithOtherEndian, WithOtherIntEncoding, + WithOtherLimit, WithOtherTrailing, + }, + DefaultOptions, Options, +}; +use commit::Commitment; +use digest::OutputSizeUser; +use sha2::Digest; +use std::{ops::Deref, sync::Arc}; +use tagged_base64::tagged; +use typenum::Unsigned; + +/// A view's state +#[derive(Debug)] +pub enum ViewInner { + /// A pending view with an available block but not leaf proposal yet. + /// + /// Storing this state allows us to garbage collect blocks for views where a proposal is never + /// made. This saves memory when a leader fails and subverts a DoS attack where malicious + /// leaders repeatedly request availability for blocks that they never propose. + DA { + /// Payload commitment to the available block. + payload_commitment: VidCommitment, + }, + /// Undecided view + Leaf { + /// Proposed leaf + leaf: LeafCommitment, + /// Validated state. + state: Arc, + /// Optional state delta. + delta: Option>::Delta>>, + }, + /// Leaf has failed + Failed, +} + +/// The hash of a leaf. +type LeafCommitment = Commitment>; + +/// Optional validated state and state delta. +pub type StateAndDelta = ( + Option::ValidatedState>>, + Option::ValidatedState as ValidatedState>::Delta>>, +); + +impl ViewInner { + /// Return the underlying undecide leaf commitment and validated state if they exist. + #[must_use] + pub fn get_leaf_and_state( + &self, + ) -> Option<(LeafCommitment, &Arc)> { + if let Self::Leaf { leaf, state, .. } = self { + Some((*leaf, state)) + } else { + None + } + } + + /// return the underlying leaf hash if it exists + #[must_use] + pub fn get_leaf_commitment(&self) -> Option> { + if let Self::Leaf { leaf, .. } = self { + Some(*leaf) + } else { + None + } + } + + /// return the underlying validated state if it exists + #[must_use] + pub fn get_state(&self) -> Option<&Arc> { + if let Self::Leaf { state, .. } = self { + Some(state) + } else { + None + } + } + + /// Return the underlying validated state and state delta if they exist. + #[must_use] + pub fn get_state_and_delta(&self) -> StateAndDelta { + if let Self::Leaf { state, delta, .. } = self { + (Some(state.clone()), delta.clone()) + } else { + (None, None) + } + } + + /// return the underlying block paylod commitment if it exists + #[must_use] + pub fn get_payload_commitment(&self) -> Option { + if let Self::DA { payload_commitment } = self { + Some(*payload_commitment) + } else { + None + } + } +} + +impl Deref for View { + type Target = ViewInner; + + fn deref(&self) -> &Self::Target { + &self.view_inner + } +} + +/// This exists so we can perform state transitions mutably +#[derive(Debug)] +pub struct View { + /// The view data. Wrapped in a struct so we can mutate + pub view_inner: ViewInner, +} + +/// A struct containing information about a finished round. +#[derive(Debug, Clone)] +pub struct RoundFinishedEvent { + /// The round that finished + pub view_number: TYPES::Time, +} + +/// Whether or not to stop inclusively or exclusively when walking +#[derive(Copy, Clone, Debug)] +pub enum Terminator { + /// Stop right before this view number + Exclusive(T), + /// Stop including this view number + Inclusive(T), +} + +/// Type alias for byte array of SHA256 digest length +type Sha256Digest = [u8; ::OutputSize::USIZE]; + +#[tagged("BUILDER_COMMITMENT")] +#[derive(Clone, Debug, Hash, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] +/// Commitment that builders use to sign block options. +/// A thin wrapper around a Sha256 digest. +pub struct BuilderCommitment(Sha256Digest); + +impl BuilderCommitment { + /// Create new commitment for `data` + pub fn from_bytes(data: impl AsRef<[u8]>) -> Self { + Self(sha2::Sha256::digest(data.as_ref()).into()) + } + + /// Create a new commitment from a raw Sha256 digest + pub fn from_raw_digest(digest: impl Into) -> Self { + Self(digest.into()) + } +} + +impl AsRef for BuilderCommitment { + fn as_ref(&self) -> &Sha256Digest { + &self.0 + } +} + +/// For the wire format, we use bincode with the following options: +/// - No upper size limit +/// - Litte endian encoding +/// - Varint encoding +/// - Reject trailing bytes +#[allow(clippy::type_complexity)] +#[must_use] +#[allow(clippy::type_complexity)] +pub fn bincode_opts() -> WithOtherTrailing< + WithOtherIntEncoding< + WithOtherEndian, LittleEndian>, + FixintEncoding, + >, + RejectTrailing, +> { + bincode::DefaultOptions::new() + .with_no_limit() + .with_little_endian() + .with_fixint_encoding() + .reject_trailing_bytes() +} diff --git a/types/src/vid.rs b/types/src/vid.rs new file mode 100644 index 0000000000..e6d2c9a5f2 --- /dev/null +++ b/types/src/vid.rs @@ -0,0 +1,277 @@ +//! This module provides: +//! - an opaque constructor [`vid_scheme`] that returns a new instance of a +//! VID scheme. +//! - type aliases [`VidCommitment`], [`VidCommon`], [`VidShare`] +//! for [`VidScheme`] assoc types. +//! +//! Purpose: the specific choice of VID scheme is an implementation detail. +//! This crate and all downstream crates should talk to the VID scheme only +//! via the traits exposed here. + +use ark_bn254::Bn254; +use jf_primitives::{ + pcs::{ + checked_fft_size, + prelude::{UnivariateKzgPCS, UnivariateUniversalParams}, + PolynomialCommitmentScheme, + }, + vid::{ + advz::{ + self, + payload_prover::{LargeRangeProof, SmallRangeProof}, + }, + payload_prover::{PayloadProver, Statement}, + precomputable::Precomputable, + VidDisperse, VidResult, VidScheme, + }, +}; +use lazy_static::lazy_static; +use serde::{Deserialize, Serialize}; +use sha2::Sha256; +use std::{fmt::Debug, ops::Range}; + +/// VID scheme constructor. +/// +/// Returns an opaque type that impls jellyfish traits: +/// [`VidScheme`], [`PayloadProver`], [`Precomputable`]. +/// +/// # Rust forbids naming impl Trait in return types +/// +/// Due to Rust limitations the return type of [`vid_scheme`] is a newtype +/// wrapper [`VidSchemeType`] that impls the above traits. +/// +/// We prefer that the return type of [`vid_scheme`] be `impl Trait` for the +/// above traits. But the ability to name an impl Trait return type is +/// currently missing from Rust: +/// - [Naming impl trait in return types - Impl trait initiative](https://rust-lang.github.io/impl-trait-initiative/explainer/rpit_names.html) +/// - [RFC: Type alias impl trait (TAIT)](https://github.com/rust-lang/rfcs/blob/master/text/2515-type_alias_impl_trait.md) +/// +/// # Panics +/// When the construction fails for the underlying VID scheme. +#[must_use] +pub fn vid_scheme(num_storage_nodes: usize) -> VidSchemeType { + // chunk_size is currently num_storage_nodes rounded down to a power of two + // TODO chunk_size should be a function of the desired erasure code rate + // https://github.com/EspressoSystems/HotShot/issues/2152 + let chunk_size = 1 << num_storage_nodes.ilog2(); + + // TODO intelligent choice of multiplicity + let multiplicity = 1; + + // TODO panic, return `Result`, or make `new` infallible upstream (eg. by panicking)? + #[allow(clippy::panic)] + VidSchemeType(Advz::new(chunk_size, num_storage_nodes, multiplicity, &*KZG_SRS).unwrap_or_else(|err| panic!("advz construction failure:\n\t(num_storage nodes,chunk_size,multiplicity)=({num_storage_nodes},{chunk_size},{multiplicity})\n\terror: : {err}"))) +} + +/// VID commitment type +pub type VidCommitment = ::Commit; +/// VID common type +pub type VidCommon = ::Common; +/// VID share type +pub type VidShare = ::Share; + +#[cfg(not(feature = "gpu-vid"))] +/// Internal Jellyfish VID scheme +type Advz = advz::Advz; +#[cfg(feature = "gpu-vid")] +/// Internal Jellyfish VID scheme +type Advz = advz::AdvzGPU<'static, E, H>; + +/// Newtype wrapper for a VID scheme type that impls +/// [`VidScheme`], [`PayloadProver`], [`Precomputable`]. +pub struct VidSchemeType(Advz); + +/// Newtype wrapper for a large payload range proof. +/// +/// Useful for namespace proofs. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct LargeRangeProofType( + // # Type complexity + // + // Jellyfish's `LargeRangeProof` type has a prime field generic parameter `F`. + // This `F` is determined by the type parameter `E` for `Advz`. + // Jellyfish needs a more ergonomic way for downstream users to refer to this type. + // + // There is a `KzgEval` type alias in jellyfish that helps a little, but it's currently private: + // + // If it were public then we could instead use + // `LargeRangeProof>` + // but that's still pretty crufty. + LargeRangeProof< as PolynomialCommitmentScheme>::Evaluation>, +); + +/// Newtype wrapper for a small payload range proof. +/// +/// Useful for transaction proofs. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct SmallRangeProofType( + // # Type complexity + // + // Similar to the comments in `LargeRangeProofType`. + SmallRangeProof< as PolynomialCommitmentScheme>::Proof>, +); + +lazy_static! { + /// SRS comment + /// + /// TODO use a proper SRS + /// https://github.com/EspressoSystems/HotShot/issues/1686 + static ref KZG_SRS: UnivariateUniversalParams = { + let mut rng = jf_utils::test_rng(); + UnivariateKzgPCS::::gen_srs_for_testing( + &mut rng, + // TODO what's the maximum possible SRS size? + checked_fft_size(200).unwrap(), + ) + .unwrap() + }; +} + +/// Private type alias for the EC pairing type parameter for [`Advz`]. +type E = Bn254; +/// Private type alias for the hash type parameter for [`Advz`]. +type H = Sha256; + +// THE REST OF THIS FILE IS BOILERPLATE +// +// All this boilerplate can be deleted when we finally get +// type alias impl trait (TAIT): +// [rfcs/text/2515-type_alias_impl_trait.md at master · rust-lang/rfcs](https://github.com/rust-lang/rfcs/blob/master/text/2515-type_alias_impl_trait.md) +impl VidScheme for VidSchemeType { + type Commit = ::Commit; + type Share = ::Share; + type Common = ::Common; + + fn commit_only(&mut self, payload: B) -> VidResult + where + B: AsRef<[u8]>, + { + self.0.commit_only(payload) + } + + fn disperse(&mut self, payload: B) -> VidResult> + where + B: AsRef<[u8]>, + { + self.0.disperse(payload).map(vid_disperse_conversion) + } + + fn verify_share( + &self, + share: &Self::Share, + common: &Self::Common, + commit: &Self::Commit, + ) -> VidResult> { + self.0.verify_share(share, common, commit) + } + + fn recover_payload(&self, shares: &[Self::Share], common: &Self::Common) -> VidResult> { + self.0.recover_payload(shares, common) + } + + fn is_consistent(commit: &Self::Commit, common: &Self::Common) -> VidResult<()> { + ::is_consistent(commit, common) + } + + fn get_payload_byte_len(common: &Self::Common) -> usize { + ::get_payload_byte_len(common) + } + + fn get_num_storage_nodes(common: &Self::Common) -> usize { + ::get_num_storage_nodes(common) + } + + fn get_multiplicity(common: &Self::Common) -> usize { + ::get_multiplicity(common) + } +} + +impl PayloadProver for VidSchemeType { + fn payload_proof(&self, payload: B, range: Range) -> VidResult + where + B: AsRef<[u8]>, + { + self.0 + .payload_proof(payload, range) + .map(LargeRangeProofType) + } + + fn payload_verify( + &self, + stmt: Statement<'_, Self>, + proof: &LargeRangeProofType, + ) -> VidResult> { + self.0.payload_verify(stmt_conversion(stmt), &proof.0) + } +} + +impl PayloadProver for VidSchemeType { + fn payload_proof(&self, payload: B, range: Range) -> VidResult + where + B: AsRef<[u8]>, + { + self.0 + .payload_proof(payload, range) + .map(SmallRangeProofType) + } + + fn payload_verify( + &self, + stmt: Statement<'_, Self>, + proof: &SmallRangeProofType, + ) -> VidResult> { + self.0.payload_verify(stmt_conversion(stmt), &proof.0) + } +} + +impl Precomputable for VidSchemeType { + type PrecomputeData = ::PrecomputeData; + + fn commit_only_precompute( + &self, + payload: B, + ) -> VidResult<(Self::Commit, Self::PrecomputeData)> + where + B: AsRef<[u8]>, + { + self.0.commit_only_precompute(payload) + } + + fn disperse_precompute( + &self, + payload: B, + data: &Self::PrecomputeData, + ) -> VidResult> + where + B: AsRef<[u8]>, + { + self.0 + .disperse_precompute(payload, data) + .map(vid_disperse_conversion) + } +} + +/// Convert a [`VidDisperse`] to a [`VidDisperse`]. +/// +/// Foreign type rules prevent us from doing: +/// - `impl From> for VidDisperse` +/// - `impl VidDisperse {...}` +/// and similarly for `Statement`. +/// Thus, we accomplish type conversion via functions. +fn vid_disperse_conversion(vid_disperse: VidDisperse) -> VidDisperse { + VidDisperse { + shares: vid_disperse.shares, + common: vid_disperse.common, + commit: vid_disperse.commit, + } +} + +/// Convert a [`Statement<'_, VidSchemeType>`] to a [`Statement<'_, Advz>`]. +fn stmt_conversion(stmt: Statement<'_, VidSchemeType>) -> Statement<'_, Advz> { + Statement { + payload_subslice: stmt.payload_subslice, + range: stmt.range, + commit: stmt.commit, + common: stmt.common, + } +} diff --git a/types/src/vote.rs b/types/src/vote.rs new file mode 100644 index 0000000000..ba49f4732d --- /dev/null +++ b/types/src/vote.rs @@ -0,0 +1,183 @@ +//! Vote, Accumulator, and Certificate Types + +use std::{ + collections::{BTreeMap, HashMap}, + marker::PhantomData, +}; + +use bitvec::{bitvec, vec::BitVec}; +use commit::Commitment; +use either::Either; +use ethereum_types::U256; +use tracing::error; + +use crate::{ + simple_certificate::Threshold, + simple_vote::Voteable, + traits::{ + election::Membership, + node_implementation::NodeType, + signature_key::{SignatureKey, StakeTableEntryType}, + }, +}; + +/// A simple vote that has a signer and commitment to the data voted on. +pub trait Vote: HasViewNumber { + /// Type of data commitment this vote uses. + type Commitment: Voteable; + + /// Get the signature of the vote sender + fn get_signature(&self) -> ::PureAssembledSignatureType; + /// Gets the data which was voted on by this vote + fn get_data(&self) -> &Self::Commitment; + /// Gets the Data commitment of the vote + fn get_data_commitment(&self) -> Commitment; + + /// Gets the public signature key of the votes creator/sender + fn get_signing_key(&self) -> TYPES::SignatureKey; +} + +/// Any type that is associated with a view +pub trait HasViewNumber { + /// Returns the view number the type refers to. + fn get_view_number(&self) -> TYPES::Time; +} + +/** +The certificate formed from the collection of signatures a committee. +The committee is defined by the `Membership` associated type. +The votes all must be over the `Commitment` associated type. +*/ +pub trait Certificate: HasViewNumber { + /// The data commitment this certificate certifies. + type Voteable: Voteable; + + /// Threshold Functions + type Threshold: Threshold; + + /// Build a certificate from the data commitment and the quorum of signers + fn create_signed_certificate( + vote_commitment: Commitment, + data: Self::Voteable, + sig: ::QCType, + view: TYPES::Time, + ) -> Self; + + /// Checks if the cert is valid + fn is_valid_cert>(&self, membership: &MEMBERSHIP) -> bool; + /// Returns the amount of stake needed to create this certificate + // TODO: Make this a static ratio of the total stake of `Membership` + fn threshold>(membership: &MEMBERSHIP) -> u64; + /// Get the commitment which was voted on + fn get_data(&self) -> &Self::Voteable; + /// Get the vote commitment which the votes commit to + fn get_data_commitment(&self) -> Commitment; +} +/// Mapping of vote commitment to sigatures and bitvec +type SignersMap = HashMap< + COMMITMENT, + ( + BitVec, + Vec<::PureAssembledSignatureType>, + ), +>; +/// Accumulates votes until a certificate is formed. This implementation works for all simple vote and certificate pairs +pub struct VoteAccumulator< + TYPES: NodeType, + VOTE: Vote, + CERT: Certificate, +> { + /// Map of all signatures accumlated so far + pub vote_outcomes: VoteMap2< + Commitment, + TYPES::SignatureKey, + ::PureAssembledSignatureType, + >, + /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check + /// And a list of valid signatures for certificate aggregation + pub signers: SignersMap, TYPES::SignatureKey>, + /// Phantom data to specify the types this accumulator is for + pub phantom: PhantomData<(TYPES, VOTE, CERT)>, +} + +impl, CERT: Certificate> + VoteAccumulator +{ + /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we + /// have accumulated enough votes to exceed the threshold for creating a certificate. + /// + /// # Panics + /// Panics if the vote comes from a node not in the stake table + pub fn accumulate(&mut self, vote: &VOTE, membership: &TYPES::Membership) -> Either<(), CERT> { + let key = vote.get_signing_key(); + + let vote_commitment = vote.get_data_commitment(); + if !key.validate(&vote.get_signature(), vote_commitment.as_ref()) { + error!("Invalid vote! Vote Data {:?}", vote.get_data()); + return Either::Left(()); + } + + let Some(stake_table_entry) = membership.get_stake(&key) else { + return Either::Left(()); + }; + let stake_table = membership.get_committee_qc_stake_table(); + let vote_node_id = stake_table + .iter() + .position(|x| *x == stake_table_entry.clone()) + .unwrap(); + + let original_signature: ::PureAssembledSignatureType = + vote.get_signature(); + + let (total_stake_casted, total_vote_map) = self + .vote_outcomes + .entry(vote_commitment) + .or_insert_with(|| (U256::from(0), BTreeMap::new())); + + // Check for duplicate vote + if total_vote_map.contains_key(&key) { + return Either::Left(()); + } + let (signers, sig_list) = self + .signers + .entry(vote_commitment) + .or_insert((bitvec![0; membership.total_nodes()], Vec::new())); + if signers.get(vote_node_id).as_deref() == Some(&true) { + error!("Node id is already in signers list"); + return Either::Left(()); + } + signers.set(vote_node_id, true); + sig_list.push(original_signature); + + // TODO: Get the stake from the stake table entry. + *total_stake_casted += stake_table_entry.get_stake(); + total_vote_map.insert(key, (vote.get_signature(), vote.get_data_commitment())); + + if *total_stake_casted >= CERT::threshold(membership).into() { + // Assemble QC + let real_qc_pp: <::SignatureKey as SignatureKey>::QCParams = + ::get_public_parameter( + stake_table, + U256::from(CERT::threshold(membership)), + ); + + let real_qc_sig = ::assemble( + &real_qc_pp, + signers.as_bitslice(), + &sig_list[..], + ); + + let cert = CERT::create_signed_certificate( + vote.get_data_commitment(), + vote.get_data().clone(), + real_qc_sig, + vote.get_view_number(), + ); + return Either::Right(cert); + } + Either::Left(()) + } +} + +/// Mapping of commitments to vote tokens by key. +type VoteMap2 = HashMap)>; diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index 3d9b342618..3a641c3a91 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -10,7 +10,7 @@ async-compatibility-layer = { workspace = true } async-lock = { workspace = true } clap = { version = "4.0", features = ["derive", "env"], optional = false } futures = { workspace = true } -hotshot-types = { workspace = true } +hotshot-types = { path = "../types" } tide-disco = { workspace = true } tracing = { workspace = true } rand = { workspace = true } From e5c3c2ce2db38a3fcf3974a791a0cb288f2daba9 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 20 Mar 2024 11:35:04 -0400 Subject: [PATCH 0876/1393] [PushCDN] Add dockerfiles (#2809) * add pushcdn dockerfiles * change executor * fmt * lints --- examples/Cargo.toml | 10 ++++ examples/push-cdn/broker.rs | 95 ++++++++++++++++++++++++++++++++++++ examples/push-cdn/marshal.rs | 52 ++++++++++++++++++++ 3 files changed, 157 insertions(+) create mode 100644 examples/push-cdn/broker.rs create mode 100644 examples/push-cdn/marshal.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 32ae3315ae..0e28bb73a7 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -88,6 +88,14 @@ path = "push-cdn/all.rs" name = "validator-push-cdn" path = "push-cdn/validator.rs" +[[example]] +name = "broker-push-cdn" +path = "push-cdn/broker.rs" + +[[example]] +name = "marshal-push-cdn" +path = "push-cdn/marshal.rs" + [dependencies] async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } @@ -150,6 +158,8 @@ clap = { version = "4.5", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } local-ip-address = "0.6.1" +anyhow.workspace = true +tracing-subscriber = "0.3.18" [lints] workspace = true diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs new file mode 100644 index 0000000000..2c545b1ba4 --- /dev/null +++ b/examples/push-cdn/broker.rs @@ -0,0 +1,95 @@ +//! The following is the main `Broker` binary, which just instantiates and runs +//! a `Broker` object. + +use anyhow::{Context, Result}; +use cdn_broker::reexports::connection::protocols::{Quic, Tcp}; +use cdn_broker::{reexports::crypto::signature::KeyPair, Broker, Config, ConfigBuilder}; +use clap::Parser; +use hotshot::traits::implementations::WrappedSignatureKey; +use hotshot::types::SignatureKey; +use hotshot_example_types::node_types::TestTypes; +use hotshot_types::traits::node_implementation::NodeType; +use local_ip_address::local_ip; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +/// The main component of the push CDN. +struct Args { + /// The discovery client endpoint (including scheme) to connect to. + /// With the local discovery feature, this is a file path. + /// With the remote (redis) discovery feature, this is a redis URL (e.g. `redis://127.0.0.1:6789`). + #[arg(short, long)] + discovery_endpoint: String, + + /// Whether or not metric collection and serving is enabled + #[arg(long, default_value_t = true)] + metrics_enabled: bool, + + /// The IP to bind to for externalizing metrics + #[arg(long, default_value = "127.0.0.1")] + metrics_ip: String, + + /// The port to bind to for externalizing metrics + #[arg(long, default_value_t = 9090)] + metrics_port: u16, + + /// The port to bind to for connections from users + #[arg(long, default_value = "127.0.0.1:1738")] + public_advertise_address: String, + + /// The (public) port to bind to for connections from users + #[arg(long, default_value_t = 1738)] + public_bind_port: u16, + + /// The (private) port to bind to for connections from other brokers + #[arg(long, default_value_t = 1739)] + private_bind_port: u16, +} + +#[cfg_attr(async_executor_impl = "tokio", tokio::main)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +async fn main() -> Result<()> { + // Parse command line arguments + let args = Args::parse(); + + // Initialize tracing + tracing_subscriber::fmt::init(); + + // Get our local IP address + let private_ip_address = local_ip().with_context(|| "failed to get local IP address")?; + let private_address = format!("{}:{}", private_ip_address, args.private_bind_port); + + // Create deterministic keys for brokers (for now, obviously) + let (public_key, private_key) = + ::SignatureKey::generated_from_seed_indexed([0u8; 32], 1337); + + let broker_config: Config::SignatureKey>> = + ConfigBuilder::default() + .public_advertise_address(args.public_advertise_address) + .public_bind_address(format!("0.0.0.0:{}", args.public_bind_port)) + .private_advertise_address(private_address.clone()) + .private_bind_address(private_address) + .discovery_endpoint(args.discovery_endpoint) + .metrics_port(args.metrics_port) + .keypair(KeyPair { + public_key: WrappedSignatureKey(public_key), + private_key, + }) + .build() + .with_context(|| "failed to build broker configuration")?; + + // Create new `Broker` + // Uses TCP from broker connections and Quic for user connections. + let broker = Broker::< + WrappedSignatureKey<::SignatureKey>, + WrappedSignatureKey<::SignatureKey>, + Tcp, + Quic, + >::new(broker_config) + .await?; + + // Start the main loop, consuming it + broker.start().await?; + + Ok(()) +} diff --git a/examples/push-cdn/marshal.rs b/examples/push-cdn/marshal.rs new file mode 100644 index 0000000000..22472abe09 --- /dev/null +++ b/examples/push-cdn/marshal.rs @@ -0,0 +1,52 @@ +//! The following is the main `Marshal` binary, which just instantiates and runs +//! a `Marshal` object with the `HotShot` types. +//! +use anyhow::{Context, Result}; +use cdn_broker::reexports::connection::protocols::Quic; +use cdn_marshal::{ConfigBuilder, Marshal}; +use clap::Parser; +use hotshot::traits::implementations::WrappedSignatureKey; +use hotshot_example_types::node_types::TestTypes; +use hotshot_types::traits::node_implementation::NodeType; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +/// The main component of the push CDN. +struct Args { + /// The discovery client endpoint (including scheme) to connect to. + /// With the local discovery feature, this is a file path. + /// With the remote (redis) discovery feature, this is a redis URL (e.g. `redis://127.0.0.1:6789`). + #[arg(short, long)] + discovery_endpoint: String, + + /// The port to bind to for connections (from users) + #[arg(short, long, default_value_t = 8082)] + bind_port: u16, +} + +#[cfg_attr(async_executor_impl = "tokio", tokio::main)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +async fn main() -> Result<()> { + // Parse command-line arguments + let args = Args::parse(); + + // Initialize tracing + tracing_subscriber::fmt::init(); + + // Create a new `Config` + let config = ConfigBuilder::default() + .bind_address(format!("0.0.0.0:{}", args.bind_port)) + .discovery_endpoint(args.discovery_endpoint) + .build() + .with_context(|| "failed to build Marshal config")?; + + // Create new `Marshal` from the config + let marshal = + Marshal::::SignatureKey>, Quic>::new(config) + .await?; + + // Start the main loop, consuming it + marshal.start().await?; + + Ok(()) +} From 1018f3a6c3b43e529fc966a9cbb101810ee12bd7 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 20 Mar 2024 16:12:59 -0400 Subject: [PATCH 0877/1393] [NETWORKING] Integrate Request and Response Tasks (#2717) * Don't lock channels * cargo fix * Allow killing the connected network handler * Fix issues * Remove State from NetworkNodeHandle * Remove async where it's unused * fix async std build * Fix erros a little * Starting VID request response * handle network event * Bubble up request/response * Make Request and Response just bytes at network layer * Adding to ConnectedNetwork * Hooks for request/response in handle + trait * fix request * Remove request and response tasks for now * update mod.rs * Hooked up response flow to ConnectedNetwork * Refactor interface to return result to caller * Add request and response to message struct * Clean up some message stuff * Fix build error * Hook up request and response fully * Review myself, impl functions for combine * Change Receiver interface to spawn task * try_send instead of send * Create the task for request handling * rename request -> response * fix lint * clean up request response event handle fn * fix build * Comments and make the request signed * add signature checking * link gh issue * Add Event for Validated Quorum Proposal * Fix consensus task * fix upgrade test * start adding the task * Fix the test...again * progress * Finish request task, modify vid disperse event * create tasks * Address comments * fix build * add delay to config * fix async std * fmt * Make CombinedNetworks delay duration configurable * Secondary network delay configurable in HotShotConfig * Rename CombinedConfig to CombinedNetworkConfig * Network delay in test network generator `secondary_network_delay` removed from `HotShotConfig` because it cannot easily be passed to the test network generator. * lock * Temporary pinning to hotshot-types branch TODO: switch to hotshot-types tag or main branch before merging * fixes * Pin to hotshot-types tag 0.1.2 * Remove files added back by mistake * fix build * update hotshot-types * Remove whitespace * Fix typo * fix comment --------- Co-authored-by: Lukasz Rzasik --- hotshot/src/lib.rs | 19 +++++++++++++++++- hotshot/src/tasks/mod.rs | 34 ++++++++++++++++++++++++++++++--- hotshot/src/tasks/task_state.rs | 27 ++++++++++++++++++++++++-- orchestrator/src/config.rs | 4 ++++ task-impls/src/request.rs | 30 +++++++++++++++-------------- task-impls/src/response.rs | 30 ++++++++++++++++------------- testing/src/test_builder.rs | 5 +++++ 7 files changed, 116 insertions(+), 33 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index b521e3e6c4..215de3df3d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -56,7 +56,7 @@ use std::{ sync::Arc, time::Duration, }; -use tasks::add_vid_task; +use tasks::{add_request_network_task, add_response_task, add_vid_task}; use tracing::{debug, instrument, trace}; // -- Rexports @@ -475,6 +475,23 @@ impl> SystemContext { add_network_message_task(registry.clone(), event_tx.clone(), quorum_network.clone()).await; add_network_message_task(registry.clone(), event_tx.clone(), da_network.clone()).await; + if let Some(request_rx) = da_network.spawn_request_receiver_task(STATIC_VER_0_1).await { + add_response_task( + registry.clone(), + event_rx.activate_cloned(), + request_rx, + &handle, + ) + .await; + } + add_request_network_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, + ) + .await; + add_network_event_task( registry.clone(), event_tx.clone(), diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index c46013f569..ee829807d5 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -16,12 +16,15 @@ use hotshot_task_impls::{ da::DATaskState, events::HotShotEvent, network::{NetworkEventTaskState, NetworkMessageTaskState}, + request::NetworkRequestState, + response::{run_response_task, NetworkResponseState, RequestReceiver}, transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VIDTaskState, view_sync::ViewSyncTaskState, }; use hotshot_types::{ + constants::Version01, message::Message, traits::{election::Membership, network::ConnectedNetwork}, }; @@ -44,6 +47,34 @@ pub enum GlobalEvent { Dummy, } +/// Add tasks for network requests and responses +pub async fn add_request_network_task>( + task_reg: Arc, + tx: Sender>>, + rx: Receiver>>, + handle: &SystemContextHandle, +) { + let state = NetworkRequestState::::create_from(handle).await; + + let task = Task::new(tx, rx, task_reg.clone(), state); + task_reg.run_task(task).await; +} + +/// Add a task which responds to requests on the network. +pub async fn add_response_task>( + task_reg: Arc, + hs_rx: Receiver>>, + rx: RequestReceiver, + handle: &SystemContextHandle, +) { + let state = NetworkResponseState::new( + handle.hotshot.get_consensus(), + rx, + handle.hotshot.memberships.quorum_membership.clone(), + handle.public_key().clone(), + ); + task_reg.register(run_response_task(state, hs_rx)).await; +} /// Add the network task to handle messages and publish events. pub async fn add_network_message_task< TYPES: NodeType, @@ -58,9 +89,6 @@ pub async fn add_network_message_task< event_stream: event_stream.clone(), }; - // TODO we don't need two async tasks for this, we should combine the - // by getting rid of `TransmitType` - // https://github.com/EspressoSystems/HotShot/issues/2377 let network = net.clone(); let mut state = network_state.clone(); let handle = async_spawn(async move { diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index d950cb4414..af639bfc36 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -2,8 +2,9 @@ use crate::types::SystemContextHandle; use async_trait::async_trait; use hotshot_task_impls::{ - consensus::ConsensusTaskState, da::DATaskState, transactions::TransactionTaskState, - upgrade::UpgradeTaskState, vid::VIDTaskState, view_sync::ViewSyncTaskState, + consensus::ConsensusTaskState, da::DATaskState, request::NetworkRequestState, + transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VIDTaskState, + view_sync::ViewSyncTaskState, }; use hotshot_types::constants::VERSION_0_1; use hotshot_types::traits::{ @@ -15,6 +16,7 @@ use std::{ marker::PhantomData, sync::Arc, }; +use versioned_binary_serialization::version::StaticVersionType; /// Trait for creating task states. #[async_trait] @@ -27,6 +29,27 @@ where async fn create_from(handle: &SystemContextHandle) -> Self; } +#[async_trait] +impl, V: StaticVersionType> CreateTaskState + for NetworkRequestState +{ + async fn create_from( + handle: &SystemContextHandle, + ) -> NetworkRequestState { + NetworkRequestState { + network: handle.hotshot.networks.quorum_network.clone(), + state: handle.hotshot.get_consensus(), + view: handle.get_cur_view().await, + delay: handle.hotshot.config.data_request_delay, + da_membership: handle.hotshot.memberships.da_membership.clone(), + quorum_membership: handle.hotshot.memberships.quorum_membership.clone(), + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + _phantom: PhantomData, + } + } +} + #[async_trait] impl> CreateTaskState for UpgradeTaskState> diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 4f7bc9a281..5ee35aaf3e 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -143,6 +143,8 @@ pub struct NetworkConfig { pub propose_min_round_time: Duration, /// maximum time to wait for a view pub propose_max_round_time: Duration, + /// time to wait until we request data associated with a proposal + pub data_request_delay: Duration, /// global index of node (for testing purposes a uid) pub node_index: u64, /// unique seed (for randomness? TODO) @@ -408,6 +410,7 @@ impl Default for NetworkConfig { num_bootrap: 5, propose_min_round_time: Duration::from_secs(0), propose_max_round_time: Duration::from_secs(10), + data_request_delay: Duration::from_millis(2500), commit_sha: String::new(), } } @@ -467,6 +470,7 @@ impl From> for NetworkC view_sync_timeout: val.config.view_sync_timeout, propose_max_round_time: val.config.propose_max_round_time, propose_min_round_time: val.config.propose_min_round_time, + data_request_delay: val.config.data_request_delay, seed: val.seed, transaction_size: val.transaction_size, libp2p_config: val.libp2p_config.map(|libp2p_config| Libp2pConfig { diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 4ec7a1f66a..d223f29784 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -32,13 +32,13 @@ const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); /// The task will wait a it's `delay` and then send a request iteratively to peers /// for any data they don't have related to the proposal. For now it's just requesting VID /// shares. -pub struct NetworkResponseState< +pub struct NetworkRequestState< TYPES: NodeType, I: NodeImplementation, Ver: StaticVersionType, > { /// Network to send requests over - pub network: I::QuorumNetwork, + pub network: Arc, /// Consensus shared state so we can check if we've gotten the information /// before sending a request pub state: Arc>>, @@ -55,7 +55,7 @@ pub struct NetworkResponseState< /// This nodes private/signign key, used to sign requests. pub private_key: ::PrivateKey, /// Version discrimination - _phantom: PhantomData, + pub _phantom: PhantomData, } /// Alias for a signature @@ -63,9 +63,9 @@ type Signature = <::SignatureKey as SignatureKey>::PureAssembledSignatureType; impl, Ver: StaticVersionType + 'static> TaskState - for NetworkResponseState + for NetworkRequestState { - type Event = HotShotEvent; + type Event = Arc>; type Output = HotShotTaskCompleted; @@ -73,7 +73,7 @@ impl, Ver: StaticVersionType + 'st event: Self::Event, task: &mut hotshot_task::task::Task, ) -> Option { - match event { + match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal) => { let state = task.state(); let prop_view = proposal.get_view_number(); @@ -85,6 +85,7 @@ impl, Ver: StaticVersionType + 'st None } HotShotEvent::ViewChange(view) => { + let view = *view; if view > task.state().view { task.state_mut().view = view; } @@ -96,11 +97,12 @@ impl, Ver: StaticVersionType + 'st } fn should_shutdown(event: &Self::Event) -> bool { - matches!(event, HotShotEvent::Shutdown) + matches!(event.as_ref(), HotShotEvent::Shutdown) } + fn filter(&self, event: &Self::Event) -> bool { !matches!( - event, + event.as_ref(), HotShotEvent::Shutdown | HotShotEvent::QuorumProposalValidated(_) | HotShotEvent::ViewChange(_) @@ -109,13 +111,13 @@ impl, Ver: StaticVersionType + 'st } impl, Ver: StaticVersionType + 'static> - NetworkResponseState + NetworkRequestState { /// Spawns tasks for a given view to retrieve any data needed. async fn spawn_requests( &self, view: TYPES::Time, - sender: Sender>, + sender: Sender>>, bind_version: Ver, ) { let requests = self.build_requests(view, bind_version).await; @@ -142,7 +144,7 @@ impl, Ver: StaticVersionType + 'st fn run_delay( &self, request: RequestKind, - sender: Sender>, + sender: Sender>>, view: TYPES::Time, _: Ver, ) { @@ -179,11 +181,11 @@ impl, Ver: StaticVersionType + 'st /// the view has moved beyond the view we are requesting, the task will completed. struct DelayedRequester> { /// Network to send requests - network: I::QuorumNetwork, + network: Arc, /// Shared state to check if the data go populated state: Arc>>, /// Channel to send the event when we receive a response - sender: Sender>, + sender: Sender>>, /// Duration to delay sending the first request delay: Duration, /// The peers we will request in a random order @@ -265,7 +267,7 @@ impl> DelayedRequester { async fn handle_response_message(&self, message: SequencingMessage) { let event = match message.0 { Either::Right(CommitteeConsensusMessage::VidDisperseMsg(prop)) => { - HotShotEvent::VidDisperseRecv(prop) + Arc::new(HotShotEvent::VidDisperseRecv(prop)) } _ => return, }; diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 3c47b21808..e7af6689d8 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -3,6 +3,8 @@ use std::{collections::BTreeMap, sync::Arc}; use async_broadcast::Receiver; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use bincode::config::Options; use either::Either::Right; use futures::{channel::mpsc, FutureExt, StreamExt}; @@ -22,6 +24,8 @@ use hotshot_types::{ utils::bincode_opts, }; use sha2::{Digest, Sha256}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; use crate::events::HotShotEvent; @@ -29,27 +33,27 @@ use crate::events::HotShotEvent; type LockedConsensusState = Arc>>; /// Type alias for the channel that we receive requests from the network on. -type ReqestReceiver = mpsc::Receiver<(Message, ResponseChannel>)>; +pub type RequestReceiver = mpsc::Receiver<(Message, ResponseChannel>)>; /// Task state for the Network Request Task. The task is responsible for handling /// requests sent to this node by the network. It will validate the sender, /// parse the request, and try to find the data request in the consensus stores. -pub struct NetworkRequestState { +pub struct NetworkResponseState { /// Locked consensus state consensus: LockedConsensusState, /// Receiver for requests - receiver: ReqestReceiver, + receiver: RequestReceiver, /// Quorum membership for checking if requesters have state quorum: TYPES::Membership, /// This replicas public key pub_key: TYPES::SignatureKey, } -impl NetworkRequestState { +impl NetworkResponseState { /// Create the network request state with the info it needs pub fn new( consensus: LockedConsensusState, - receiver: ReqestReceiver, + receiver: RequestReceiver, quorum: TYPES::Membership, pub_key: TYPES::SignatureKey, ) -> Self { @@ -63,7 +67,7 @@ impl NetworkRequestState { /// Run the request response loop until a `HotShotEvent::Shutdown` is received. /// Or the stream is closed. - async fn run_loop(mut self, shutdown: EventDependency>) { + async fn run_loop(mut self, shutdown: EventDependency>>) { let mut shutdown = Box::pin(shutdown.completed().fuse()); loop { futures::select! { @@ -162,16 +166,16 @@ fn valid_signature( sender.validate(&req.signature, &Sha256::digest(data)) } -/// Spawn the network request task to handle incoming request for data +/// Spawn the network response task to handle incoming request for data /// from other nodes. It will shutdown when it gets `HotshotEvent::Shutdown` /// on the `event_stream` arg. -pub fn run_request_task( - task_state: NetworkRequestState, - event_stream: Receiver>, -) { +pub fn run_response_task( + task_state: NetworkResponseState, + event_stream: Receiver>>, +) -> JoinHandle<()> { let dep = EventDependency::new( event_stream, - Box::new(|e| matches!(e, HotShotEvent::Shutdown)), + Box::new(|e| matches!(e.as_ref(), HotShotEvent::Shutdown)), ); - async_spawn(task_state.run_loop(dep)); + async_spawn(task_state.run_loop(dep)) } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 4fafac33b9..db51786bbf 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -35,6 +35,8 @@ pub struct TimingData { pub propose_min_round_time: Duration, /// The maximum amount of time a leader can wait to start a round pub propose_max_round_time: Duration, + /// time to wait until we request data associated with a proposal + pub data_request_delay: Duration, /// Delay before sending through the secondary network in CombinedNetworks pub secondary_network_delay: Duration, /// view sync timeout @@ -86,6 +88,7 @@ impl Default for TimingData { start_delay: 100, propose_min_round_time: Duration::new(0, 0), propose_max_round_time: Duration::from_millis(100), + data_request_delay: Duration::from_millis(200), secondary_network_delay: Duration::from_millis(1000), view_sync_timeout: Duration::from_millis(2000), } @@ -306,6 +309,7 @@ impl TestMetadata { start_delay, propose_min_round_time, propose_max_round_time, + data_request_delay, secondary_network_delay, view_sync_timeout, } = timing_data; @@ -318,6 +322,7 @@ impl TestMetadata { a.start_delay = start_delay; a.propose_min_round_time = propose_min_round_time; a.propose_max_round_time = propose_max_round_time; + a.data_request_delay = data_request_delay; a.view_sync_timeout = view_sync_timeout; }; From f63be65a00448d130d62ce31427c61a2e5bf70ef Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 20 Mar 2024 16:38:53 -0400 Subject: [PATCH 0878/1393] [Catchup] Emit Attributable Action Events (#2800) * add more action types * integrate new storage type * fix build, tie up stragglers, modifying tasks next * merge latest stable tabs * merge latest tags and fix method signatures * working towards adding action to storage * fix lint * Add test for network task when storage fails --------- Co-authored-by: Jarred Parr --- example-types/src/storage_types.rs | 22 ++- hotshot/src/lib.rs | 4 + hotshot/src/tasks/mod.rs | 8 +- task-impls/src/network.rs | 151 +++++++++++------ testing/tests/network_task.rs | 256 +++++++++++++++-------------- types/src/event.rs | 11 ++ types/src/traits/storage.rs | 4 + 7 files changed, 273 insertions(+), 183 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 2315a9b840..79bc8a0c90 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -27,7 +27,6 @@ impl Default for TestStorageState { #[derive(Clone, Debug)] pub struct TestStorage { inner: Arc>>, - /// `should_return_err` is a testing utility to validate negative cases. pub should_return_err: bool, } @@ -64,4 +63,25 @@ impl Storage for TestStorage { .insert(proposal.data.view_number, proposal.clone()); Ok(()) } + + async fn record_action( + &self, + _view: ::Time, + _action: hotshot_types::event::HotShotAction, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to append Action to storage"); + } + Ok(()) + } + + async fn update_high_qc( + &self, + _qc: hotshot_types::simple_certificate::QuorumCertificate, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to update high qc to storage"); + } + Ok(()) + } } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 215de3df3d..6286294268 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -499,6 +499,7 @@ impl> SystemContext { quorum_network.clone(), quorum_membership, network::quorum_filter, + handle.get_storage().clone(), ) .await; add_network_event_task( @@ -508,6 +509,7 @@ impl> SystemContext { da_network.clone(), da_membership, network::committee_filter, + handle.get_storage().clone(), ) .await; add_network_event_task( @@ -517,6 +519,7 @@ impl> SystemContext { quorum_network.clone(), view_sync_membership, network::view_sync_filter, + handle.get_storage().clone(), ) .await; add_network_event_task( @@ -526,6 +529,7 @@ impl> SystemContext { quorum_network.clone(), vid_membership, network::vid_filter, + handle.get_storage().clone(), ) .await; add_consensus_task( diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index ee829807d5..7e0d23245d 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -10,6 +10,7 @@ use crate::types::SystemContextHandle; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_lock::RwLock; use hotshot_task::task::{Task, TaskRegistry}; use hotshot_task_impls::{ consensus::ConsensusTaskState, @@ -26,7 +27,7 @@ use hotshot_task_impls::{ use hotshot_types::{ constants::Version01, message::Message, - traits::{election::Membership, network::ConnectedNetwork}, + traits::{election::Membership, network::ConnectedNetwork, storage::Storage}, }; use hotshot_types::{ message::Messages, @@ -116,6 +117,7 @@ pub async fn add_network_message_task< pub async fn add_network_event_task< TYPES: NodeType, NET: ConnectedNetwork, TYPES::SignatureKey>, + S: Storage + 'static, >( task_reg: Arc, tx: Sender>>, @@ -123,12 +125,14 @@ pub async fn add_network_event_task< channel: Arc, membership: TYPES::Membership, filter: fn(&Arc>) -> bool, + storage: Arc>, ) { - let network_state: NetworkEventTaskState<_, _> = NetworkEventTaskState { + let network_state: NetworkEventTaskState<_, _, _> = NetworkEventTaskState { channel, view: TYPES::Time::genesis(), membership, filter, + storage, }; let task = Task::new(tx, rx, task_reg.clone(), network_state); task_reg.run_task(task).await; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 004fba3ba6..81a38c9205 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -4,7 +4,9 @@ use crate::{ }; use async_broadcast::Sender; use async_compatibility_layer::art::async_spawn; +use async_lock::RwLock; use either::Either::{self, Left, Right}; +use hotshot_types::{event::HotShotAction, traits::storage::Storage}; use std::sync::Arc; use hotshot_task::task::{Task, TaskState}; @@ -191,6 +193,7 @@ impl NetworkMessageTaskState { pub struct NetworkEventTaskState< TYPES: NodeType, COMMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, + S: Storage, > { /// comm channel pub channel: Arc, @@ -201,10 +204,15 @@ pub struct NetworkEventTaskState< // TODO ED Need to add exchange so we can get the recipient key and our own key? /// Filter which returns false for the events that this specific network task cares about pub filter: fn(&Arc>) -> bool, + /// Storage to store actionable events + pub storage: Arc>, } -impl, TYPES::SignatureKey>> TaskState - for NetworkEventTaskState +impl< + TYPES: NodeType, + COMMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, + S: Storage + 'static, + > TaskState for NetworkEventTaskState { type Event = Arc>; @@ -231,8 +239,11 @@ impl, TYPES::Signa } } -impl, TYPES::SignatureKey>> - NetworkEventTaskState +impl< + TYPES: NodeType, + COMMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, + S: Storage + 'static, + > NetworkEventTaskState { /// Handle the given event. /// @@ -241,64 +252,82 @@ impl, TYPES::Signa /// Panic sif a direct message event is received with no recipient #[allow(clippy::too_many_lines)] // TODO https://github.com/EspressoSystems/HotShot/issues/1704 #[instrument(skip_all, fields(view = *self.view), name = "Network Task", level = "error")] - pub async fn handle_event( &mut self, event: Arc>, membership: &TYPES::Membership, ) -> Option { + let mut maybe_action = None; let (sender, message_kind, transmit_type, recipient) = match event.as_ref().clone() { - HotShotEvent::QuorumProposalSend(proposal, sender) => ( - sender, - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::Proposal(proposal), - ))), - TransmitType::Broadcast, - None, - ), + HotShotEvent::QuorumProposalSend(proposal, sender) => { + maybe_action = Some(HotShotAction::Propose); + ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::Proposal(proposal), + ))), + TransmitType::Broadcast, + None, + ) + } // ED Each network task is subscribed to all these message types. Need filters per network task - HotShotEvent::QuorumVoteSend(vote) => ( - vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::Vote(vote.clone()), - ))), - TransmitType::Direct, - Some(membership.get_leader(vote.get_view_number() + 1)), - ), - HotShotEvent::VidDisperseSend(proposal, sender) => ( - sender, - MessageKind::::from_consensus_message(SequencingMessage(Right( - CommitteeConsensusMessage::VidDisperseMsg(proposal), - ))), // TODO not a CommitteeConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 - TransmitType::Broadcast, // TODO not a broadcast https://github.com/EspressoSystems/HotShot/issues/1696 - None, - ), - HotShotEvent::DAProposalSend(proposal, sender) => ( - sender, - MessageKind::::from_consensus_message(SequencingMessage(Right( - CommitteeConsensusMessage::DAProposal(proposal), - ))), - TransmitType::DACommitteeBroadcast, - None, - ), - HotShotEvent::DAVoteSend(vote) => ( - vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Right( - CommitteeConsensusMessage::DAVote(vote.clone()), - ))), - TransmitType::Direct, - Some(membership.get_leader(vote.get_view_number())), - ), + HotShotEvent::QuorumVoteSend(vote) => { + maybe_action = Some(HotShotAction::Vote); + ( + vote.get_signing_key(), + MessageKind::::from_consensus_message(SequencingMessage(Left( + GeneralConsensusMessage::Vote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.get_view_number() + 1)), + ) + } + HotShotEvent::VidDisperseSend(proposal, sender) => { + maybe_action = Some(HotShotAction::VidDisperse); + ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::VidDisperseMsg(proposal), + ))), // TODO not a CommitteeConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 + TransmitType::Broadcast, // TODO not a broadcast https://github.com/EspressoSystems/HotShot/issues/1696 + None, + ) + } + HotShotEvent::DAProposalSend(proposal, sender) => { + maybe_action = Some(HotShotAction::DAPropose); + ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::DAProposal(proposal), + ))), + TransmitType::DACommitteeBroadcast, + None, + ) + } + HotShotEvent::DAVoteSend(vote) => { + maybe_action = Some(HotShotAction::DAVote); + ( + vote.get_signing_key(), + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::DAVote(vote.clone()), + ))), + TransmitType::Direct, + Some(membership.get_leader(vote.get_view_number())), + ) + } // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee - HotShotEvent::DACSend(certificate, sender) => ( - sender, - MessageKind::::from_consensus_message(SequencingMessage(Right( - CommitteeConsensusMessage::DACertificate(certificate), - ))), - TransmitType::Broadcast, - None, - ), + HotShotEvent::DACSend(certificate, sender) => { + maybe_action = Some(HotShotAction::DACert); + ( + sender, + MessageKind::::from_consensus_message(SequencingMessage(Right( + CommitteeConsensusMessage::DACertificate(certificate), + ))), + TransmitType::Broadcast, + None, + ) + } HotShotEvent::ViewSyncPreCommitVoteSend(vote) => ( vote.get_signing_key(), MessageKind::::from_consensus_message(SequencingMessage(Left( @@ -377,7 +406,23 @@ impl, TYPES::Signa let view = message.kind.get_view_number(); let committee = membership.get_whole_committee(view); let net = self.channel.clone(); + let storage = self.storage.clone(); async_spawn(async move { + if let Some(action) = maybe_action { + match storage + .write() + .await + .record_action(view, action.clone()) + .await + { + Ok(()) => {} + Err(e) => { + warn!("Not Sending {:?} because of storage error: {:?}", action, e); + return; + } + } + } + let transmit_result = match transmit_type { TransmitType::Direct => { net.direct_message(message, recipient.unwrap(), STATIC_VER_0_1) diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 2a7ef10ba2..e52d8105a9 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -1,144 +1,146 @@ -use hotshot::types::SignatureKey; +use async_compatibility_layer::art::async_timeout; +use async_lock::RwLock; +use hotshot::tasks::add_network_message_task; +use hotshot::traits::implementations::MemoryNetwork; +use hotshot_example_types::node_types::MemoryImpl; use hotshot_example_types::node_types::TestTypes; +use hotshot_task::task::{Task, TaskRegistry}; use hotshot_task_impls::events::HotShotEvent; -use hotshot_testing::task_helpers::{build_quorum_proposal, vid_scheme_from_view_number}; -use hotshot_types::{ - data::{DAProposal, ViewNumber}, - traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, -}; -use jf_primitives::vid::VidScheme; -use sha2::{Digest, Sha256}; -use std::{collections::HashMap, marker::PhantomData}; +use hotshot_task_impls::network::{self, NetworkEventTaskState}; +use hotshot_testing::test_builder::TestMetadata; +use hotshot_testing::view_generator::TestViewGenerator; +use hotshot_types::traits::election::Membership; +use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use std::sync::Arc; +use std::time::Duration; +// Test that the event task sends a message, and the message task receives it +// and emits the proper event #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[ignore] #[allow(clippy::too_many_lines)] async fn test_network_task() { - use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::{data::VidDisperse, message::Proposal}; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - // Build the API for node 2. - let (handle, _tx, _rx) = build_system_handle(2).await; - let pub_key = *handle.public_key(); - let priv_key = handle.private_key(); - // quorum membership for VID share distribution - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - - let encoded_transactions = Vec::new(); - let encoded_transactions_hash = Sha256::digest(&encoded_transactions); - let da_signature = - ::SignatureKey::sign( - handle.private_key(), - &encoded_transactions_hash, - ) - .expect("Failed to sign block payload"); - let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(2)); - let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); - let payload_commitment = vid_disperse.commit; - let vid_signature = - ::SignatureKey::sign( - handle.private_key(), - payload_commitment.as_ref(), + let builder = TestMetadata::default_multiple_rounds(); + let node_id = 1; + + let launcher = builder.gen_launcher::(node_id); + + let networks = (launcher.resource_generator.channel_generator)(node_id); + + let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); + let config = launcher.resource_generator.config.clone(); + let public_key = config.my_own_validator_config.public_key; + let known_nodes_with_stake = config.known_nodes_with_stake.clone(); + let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { + ::Membership::default_election_config( + config.num_nodes_with_stake.get() as u64, + config.num_nodes_without_stake as u64, ) - .expect("Failed to sign block commitment"); - - let da_proposal = Proposal { - data: DAProposal { - encoded_transactions: encoded_transactions.clone(), - metadata: (), - view_number: ViewNumber::new(2), - }, - signature: da_signature, - _pd: PhantomData, - }; - let quorum_proposal = build_quorum_proposal(&handle, priv_key, 2).await; - - let vid_disperse_inner = VidDisperse::from_membership( - da_proposal.data.view_number, - vid_disperse, - &quorum_membership.clone().into(), + }); + + let membership = ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config.clone(), ); + let channel = networks.0.clone(); + let network_state: NetworkEventTaskState, _> = + NetworkEventTaskState { + channel: channel.clone(), + view: ViewNumber::new(0), + membership: membership.clone(), + filter: network::quorum_filter, + storage, + }; + let (tx, rx) = async_broadcast::broadcast(10); + let task_reg = Arc::new(TaskRegistry::default()); - // TODO for now reuse the same block payload commitment and signature as DA committee - // https://github.com/EspressoSystems/jellyfish/issues/369 - let vid_proposal = Proposal { - data: vid_disperse_inner.clone(), - signature: vid_signature, - _pd: PhantomData, - }; - - // Every event input is seen on the event stream in the output. - let mut input = Vec::new(); - let mut output = HashMap::new(); - - input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - input.push(HotShotEvent::TransactionsSequenced( - encoded_transactions.clone(), - (), - ViewNumber::new(2), - )); - input.push(HotShotEvent::BlockReady( - vid_disperse_inner.clone(), - ViewNumber::new(2), - )); - input.push(HotShotEvent::DAProposalSend(da_proposal.clone(), pub_key)); - input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); - input.push(HotShotEvent::QuorumProposalSend( - quorum_proposal.clone(), - pub_key, + let task = Task::new(tx.clone(), rx, task_reg.clone(), network_state); + task_reg.run_task(task).await; + + let mut generator = TestViewGenerator::generate(membership.clone()); + let view = generator.next().unwrap(); + + let (out_tx, mut out_rx) = async_broadcast::broadcast(10); + add_network_message_task(task_reg, out_tx.clone(), channel.clone()).await; + + tx.broadcast_direct(Arc::new(HotShotEvent::QuorumProposalSend( + view.quorum_proposal, + public_key, + ))) + .await + .unwrap(); + let res = async_timeout(Duration::from_millis(100), out_rx.recv_direct()) + .await + .expect("timed out waiting for response") + .expect("channel closed"); + assert!(matches!( + res.as_ref(), + HotShotEvent::QuorumProposalRecv(_, _) )); - // Don't send `Shutdown` as other task unit tests do, to avoid nondeterministic behaviors due - // to some tasks shut down earlier than the testing harness and we don't get all the expected - // events. - - output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 2); - output.insert( - HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), - 2, // 2 occurrences: 1 from `input`, 1 from the transactions task - ); - output.insert( - HotShotEvent::BlockReady(vid_disperse_inner, ViewNumber::new(2)), - 2, // 2 occurrences: 1 from `input`, 1 from the VID task - ); - output.insert( - HotShotEvent::DAProposalSend(da_proposal.clone(), pub_key), - 3, // 2 occurrences: 1 from `input`, 2 from the DA task - ); - output.insert( - HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), - 2, // 2 occurrences: 1 from `input`, 1 from the VID task - ); - // Only one output from the input. - // The consensus task will fail to send a second proposal, like the DA task does, due to the - // view number check in `publish_proposal_if_able` in consensus.rs, and we will see an error in - // logging, but that is fine for testing as long as the network task is correctly handling - // events. - output.insert( - HotShotEvent::QuorumProposalSend(quorum_proposal.clone(), pub_key), - 1, - ); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 2); - output.insert( - HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), - 2, // 2 occurrences: both from the VID task - ); - output.insert( - HotShotEvent::QuorumProposalRecv(quorum_proposal, pub_key), - 1, +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_network_storage_fail() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let builder = TestMetadata::default_multiple_rounds(); + let node_id = 1; + + let launcher = builder.gen_launcher::(node_id); + + let networks = (launcher.resource_generator.channel_generator)(node_id); + + let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); + storage.write().await.should_return_err = true; + let config = launcher.resource_generator.config.clone(); + let public_key = config.my_own_validator_config.public_key; + let known_nodes_with_stake = config.known_nodes_with_stake.clone(); + let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { + ::Membership::default_election_config( + config.num_nodes_with_stake.get() as u64, + config.num_nodes_without_stake as u64, + ) + }); + + let membership = ::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config.clone(), ); - output.insert(HotShotEvent::VidDisperseRecv(vid_proposal), 1); - output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); - - // let build_fn = |task_runner, _| async { task_runner }; - // There may be extra outputs not in the expected set, e.g., a second `VidDisperseRecv` if the - // VID task runs fast. All event types we want to test should be seen by this point, so waiting - // for more events will not help us test more cases for now. Therefore, we set - // `allow_extra_output` to `true` for deterministic test result. - // run_harness(input, output, Some(event_stream), build_fn, true).await; + let channel = networks.0.clone(); + let network_state: NetworkEventTaskState, _> = + NetworkEventTaskState { + channel: channel.clone(), + view: ViewNumber::new(0), + membership: membership.clone(), + filter: network::quorum_filter, + storage, + }; + let (tx, rx) = async_broadcast::broadcast(10); + let task_reg = Arc::new(TaskRegistry::default()); + + let task = Task::new(tx.clone(), rx, task_reg.clone(), network_state); + task_reg.run_task(task).await; + + let mut generator = TestViewGenerator::generate(membership.clone()); + let view = generator.next().unwrap(); + + let (out_tx, mut out_rx) = async_broadcast::broadcast(10); + add_network_message_task(task_reg, out_tx.clone(), channel.clone()).await; + + tx.broadcast_direct(Arc::new(HotShotEvent::QuorumProposalSend( + view.quorum_proposal, + public_key, + ))) + .await + .unwrap(); + let res = async_timeout(Duration::from_millis(100), out_rx.recv_direct()).await; + assert!(res.is_err()); } diff --git a/types/src/event.rs b/types/src/event.rs index 8a2e59eedf..97543c23d5 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -158,3 +158,14 @@ pub enum EventType { sender: TYPES::SignatureKey, }, } +#[derive(Debug, Serialize, Deserialize, Clone)] +pub enum HotShotAction { + Vote, + Propose, + DAPropose, + DAVote, + DACert, + VidDisperse, + UpgradeVote, + UpgradePropose, +} diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 2dab00ee88..f65f7ea986 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -8,7 +8,9 @@ use async_trait::async_trait; use crate::{ data::{DAProposal, VidDisperse}, + event::HotShotAction, message::Proposal, + simple_certificate::QuorumCertificate, }; use super::node_implementation::NodeType; @@ -18,4 +20,6 @@ use super::node_implementation::NodeType; pub trait Storage: Send + Sync + Clone { async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; async fn append_da(&self, proposal: &Proposal>) -> Result<()>; + async fn record_action(&self, view: TYPES::Time, action: HotShotAction) -> Result<()>; + async fn update_high_qc(&self, qc: QuorumCertificate) -> Result<()>; } From d5388168a2d46dd022dacd1a90c0e897570358fc Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 20 Mar 2024 13:47:02 -0700 Subject: [PATCH 0879/1393] [CX_CLEANUP] - Implement `QuorumVoteTask` with dummy dependencies (#2776) * Rename vote to vote_collection, add quorum_vote * Add basic structure * Start fix build * More fixes * Fix build * Restore a comment * Fix doc * Modify function signature, add events * Replace duplicate code with DependencyTask, validate events before creating 3 subdependencies * Fix build after merge * Fix lint * Fix tokio * Wrap events into Arc * Add QuorumVoteDependenciesValidated event, move view changes * Simplify run spawning, add view checks. --- task-impls/src/consensus.rs | 4 +- task-impls/src/da.rs | 4 +- task-impls/src/events.rs | 8 + task-impls/src/lib.rs | 5 +- task-impls/src/quorum_vote.rs | 381 ++++++++++++++++++ task-impls/src/upgrade.rs | 4 +- task-impls/src/view_sync.rs | 4 +- .../src/{vote.rs => vote_collection.rs} | 0 8 files changed, 402 insertions(+), 8 deletions(-) create mode 100644 task-impls/src/quorum_vote.rs rename task-impls/src/{vote.rs => vote_collection.rs} (100%) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 07e6bb9e54..9c531bdd23 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,7 +1,7 @@ use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, helpers::{broadcast_event, cancel_task}, - vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, + vote_collection::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -40,7 +40,7 @@ use hotshot_types::{ use tracing::warn; use versioned_binary_serialization::version::Version; -use crate::vote::HandleVoteEvent; +use crate::vote_collection::HandleVoteEvent; use chrono::Utc; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; #[cfg(async_executor_impl = "tokio")] diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index ba63ec81e5..d2d4452067 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -1,7 +1,7 @@ use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, helpers::broadcast_event, - vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, + vote_collection::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; use async_broadcast::Sender; use async_lock::RwLock; @@ -30,7 +30,7 @@ use hotshot_types::{ }; use sha2::{Digest, Sha256}; -use crate::vote::HandleVoteEvent; +use crate::vote_collection::HandleVoteEvent; use std::{marker::PhantomData, sync::Arc}; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 187dd1dfb3..9f9f595db7 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -39,10 +39,16 @@ pub enum HotShotEvent { DAVoteRecv(DAVote), /// A Data Availability Certificate (DAC) has been recieved by the network; handled by the consensus task DACRecv(DACertificate), + /// A DAC is validated. + DACValidated(DACertificate), /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote), + /// Dummy quorum vote to test if the quorum vote dependency works. + DummyQuorumVoteSend(TYPES::Time), + /// All dependencies for the quorum vote are validated. + QuorumVoteDependenciesValidated(TYPES::Time), /// A proposal was validated. This means it comes from the correct leader and has a correct QC. QuorumProposalValidated(QuorumProposal), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task @@ -118,6 +124,8 @@ pub enum HotShotEvent { /// /// Like [`HotShotEvent::DAProposalRecv`]. VidDisperseRecv(Proposal>), + /// A VID disperse data is validated. + VidDisperseValidated(VidDisperse), /// Upgrade proposal has been received from the network UpgradeProposalRecv(Proposal>, TYPES::SignatureKey), /// Upgrade proposal has been sent to the network diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index dd3e4e4366..e2ad897517 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -4,6 +4,9 @@ /// the task which implements the main parts of consensus pub mod consensus; +/// The task which handles the logic for the quorum vote. +pub mod quorum_vote; + /// The task which implements the main parts of data availability. pub mod da; @@ -26,7 +29,7 @@ pub mod view_sync; pub mod vid; /// Generic task for collecting votes -pub mod vote; +pub mod vote_collection; /// Task for handling upgrades pub mod upgrade; diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs new file mode 100644 index 0000000000..7d5913982c --- /dev/null +++ b/task-impls/src/quorum_vote.rs @@ -0,0 +1,381 @@ +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; +use async_broadcast::{Receiver, Sender}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +use hotshot_task::{ + dependency::{AndDependency, EventDependency}, + dependency_task::{DependencyTask, HandleDepOutput}, + task::{Task, TaskState}, +}; +use hotshot_types::{ + data::{QuorumProposal, VidDisperse}, + event::Event, + message::Proposal, + simple_certificate::DACertificate, + traits::{ + block_contents::BlockHeader, + network::{ConnectedNetwork, ConsensusIntentEvent}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + }, + vote::{Certificate, HasViewNumber}, +}; +use std::collections::HashMap; +use std::sync::Arc; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; +use tracing::warn; +use tracing::{debug, instrument}; + +/// Vote dependency types. +#[derive(PartialEq)] +enum VoteDependency { + /// For the `QuorumProposalRecv` event. + QuorumProposal, + /// For the `DACRecv` event. + Dac, + /// For the `VidDisperseRecv` event. + Vid, +} + +/// Validate the quorum proposal. +// TODO: Complete the dependency implementation. +// +#[allow(clippy::needless_pass_by_value)] +fn validate_quorum_proposal( + _quorum_proposal: Proposal>, + _event_sender: Sender>>, +) -> bool { + true +} + +/// Validate the DAC. +// TODO: Complete the dependency implementation. +// +#[allow(clippy::needless_pass_by_value)] +fn validate_dac( + _dac: DACertificate, + _event_sender: Sender>>, +) -> bool { + true +} + +/// Validate the VID share. +// TODO: Complete the dependency implementation. +// +#[allow(clippy::needless_pass_by_value)] +fn validate_vid( + _disperse: Proposal>, + _event_sender: Sender>>, +) -> bool { + true +} + +/// Handler for the vote dependency. +struct VoteDependencyHandle { + /// View number to vote on. + view_number: TYPES::Time, + /// Event sender. + sender: Sender>>, +} +impl HandleDepOutput for VoteDependencyHandle { + type Output = Vec>>; + async fn handle_dep_result(self, res: Self::Output) { + // Add this commitment check to test if the handler works, but this isn't the only thing + // that we'll need to check. E.g., we also need to check that VID commitment matches + // `payload_commitment`. + // TODO: Complete the dependency implementation. + // + let mut payload_commitment = None; + for event in res { + match event.as_ref() { + HotShotEvent::QuorumProposalValidated(proposal) => { + let proposal_payload_comm = proposal.block_header.payload_commitment(); + if let Some(comm) = payload_commitment { + if proposal_payload_comm != comm { + return; + } + } else { + payload_commitment = Some(proposal_payload_comm); + } + } + HotShotEvent::DACValidated(cert) => { + let cert_payload_comm = cert.get_data().payload_commit; + if let Some(comm) = payload_commitment { + if cert_payload_comm != comm { + return; + } + } else { + payload_commitment = Some(cert_payload_comm); + } + } + _ => {} + } + } + broadcast_event( + Arc::new(HotShotEvent::QuorumVoteDependenciesValidated( + self.view_number, + )), + &self.sender, + ) + .await; + broadcast_event( + Arc::new(HotShotEvent::DummyQuorumVoteSend(self.view_number)), + &self.sender, + ) + .await; + } +} + +/// The state for the quorum vote task. +/// +/// Contains all of the information for the quorum vote. +pub struct QuorumVoteTaskState> { + /// Latest view number that has been voted for. + pub latest_voted_view: TYPES::Time, + + /// Table for the in-progress dependency tasks. + pub vote_dependencies: HashMap>, + + /// Network for all nodes + pub quorum_network: Arc, + + /// Network for DA committee + pub committee_network: Arc, + + /// Output events to application + pub output_event_stream: async_broadcast::Sender>, + + /// The node's id + pub id: u64, +} + +impl> QuorumVoteTaskState { + /// Create an event dependency. + #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote create event dependency", level = "error")] + fn create_event_dependency( + &self, + dependency_type: VoteDependency, + view_number: TYPES::Time, + event_receiver: Receiver>>, + ) -> EventDependency>> { + EventDependency::new( + event_receiver.clone(), + Box::new(move |event| { + let event = event.as_ref(); + let event_view = match dependency_type { + VoteDependency::QuorumProposal => { + if let HotShotEvent::QuorumProposalValidated(proposal) = event { + proposal.view_number + } else { + return false; + } + } + VoteDependency::Dac => { + if let HotShotEvent::DACValidated(cert) = event { + cert.view_number + } else { + return false; + } + } + VoteDependency::Vid => { + if let HotShotEvent::VidDisperseValidated(disperse) = event { + disperse.view_number + } else { + return false; + } + } + }; + event_view == view_number + }), + ) + } + + /// Create and store an [`AndDependency`] combining [`EventDependency`]s associated with the + /// given view number if it doesn't exist. + fn create_dependency_task_if_new( + &mut self, + view_number: TYPES::Time, + event_receiver: Receiver>>, + event_sender: Sender>>, + ) { + if self.vote_dependencies.get(&view_number).is_some() { + return; + } + let deps = vec![ + self.create_event_dependency( + VoteDependency::QuorumProposal, + view_number, + event_receiver.clone(), + ), + self.create_event_dependency(VoteDependency::Dac, view_number, event_receiver.clone()), + self.create_event_dependency(VoteDependency::Vid, view_number, event_receiver), + ]; + let vote_dependency = AndDependency::from_deps(deps); + let dependency_task = DependencyTask::new( + vote_dependency, + VoteDependencyHandle { + view_number, + sender: event_sender, + }, + ); + self.vote_dependencies + .insert(view_number, dependency_task.run()); + } + + /// Update the latest voted view number. + #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote update latest voted view", level = "error")] + async fn update_latest_voted_view(&mut self, new_view: TYPES::Time) -> bool { + if *self.latest_voted_view < *new_view { + debug!( + "Updating next vote view from {} to {} in the quorum vote task", + *self.latest_voted_view, *new_view + ); + + // Cancel the old dependency tasks. + for view in (*self.latest_voted_view + 1)..=(*new_view) { + if let Some(dependency) = self.vote_dependencies.remove(&TYPES::Time::new(view)) { + cancel_task(dependency).await; + } + } + + self.latest_voted_view = new_view; + + return true; + } + false + } + + /// Handles a consensus event received on the event stream + #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote handle", level = "error")] + pub async fn handle( + &mut self, + event: Arc>, + event_receiver: Receiver>>, + event_sender: Sender>>, + ) { + match event.as_ref() { + HotShotEvent::QuorumProposalRecv(proposal, _sender) => { + let view = proposal.data.view_number; + if view <= self.latest_voted_view { + return; + } + debug!("Received Quorum Proposal for view {}", *view); + + // stop polling for the received proposal + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal(*view)) + .await; + broadcast_event(Arc::new(HotShotEvent::ViewChange(view + 1)), &event_sender).await; + if !validate_quorum_proposal(proposal.clone(), event_sender.clone()) { + return; + } + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalValidated(proposal.data.clone())), + &event_sender.clone(), + ) + .await; + self.create_dependency_task_if_new(view, event_receiver, event_sender); + } + HotShotEvent::DACRecv(cert) => { + debug!("DAC Received for view {}!", *cert.view_number); + let view = cert.view_number; + if view <= self.latest_voted_view { + return; + } + + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForDAC(*view)) + .await; + + self.committee_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) + .await; + + if !validate_dac(cert.clone(), event_sender.clone()) { + return; + } + broadcast_event( + Arc::new(HotShotEvent::DACValidated(cert.clone())), + &event_sender.clone(), + ) + .await; + self.create_dependency_task_if_new(view, event_receiver, event_sender); + } + HotShotEvent::VidDisperseRecv(disperse) => { + let view = disperse.data.get_view_number(); + if view <= self.latest_voted_view { + return; + } + + // stop polling for the received disperse after verifying it's valid + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse( + *disperse.data.view_number, + )) + .await; + + if !validate_vid(disperse.clone(), event_sender.clone()) { + return; + } + broadcast_event( + Arc::new(HotShotEvent::VidDisperseValidated(disperse.data.clone())), + &event_sender.clone(), + ) + .await; + self.create_dependency_task_if_new(view, event_receiver, event_sender); + } + HotShotEvent::QuorumVoteDependenciesValidated(view) => { + if !self.update_latest_voted_view(*view).await { + debug!("view not updated"); + return; + } + } + HotShotEvent::ViewChange(new_view) => { + let new_view = *new_view; + debug!("View Change event for view {} in consensus task", *new_view); + + let old_voted_view = self.latest_voted_view; + + // Start polling for VID disperse for the new view + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForVIDDisperse( + *old_voted_view + 1, + )) + .await; + } + _ => {} + } + } +} + +impl> TaskState for QuorumVoteTaskState { + type Event = Arc>; + type Output = (); + fn filter(&self, event: &Arc>) -> bool { + !matches!( + event.as_ref(), + HotShotEvent::QuorumProposalRecv(_, _) + | HotShotEvent::DACRecv(_) + | HotShotEvent::ViewChange(_) + | HotShotEvent::VidDisperseRecv(..) + | HotShotEvent::QuorumVoteDependenciesValidated(_) + | HotShotEvent::Shutdown, + ) + } + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> + where + Self: Sized, + { + let receiver = task.subscribe(); + let sender = task.clone_sender(); + tracing::trace!("sender queue len {}", sender.len()); + task.state_mut().handle(event, receiver, sender).await; + None + } + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event.as_ref(), HotShotEvent::Shutdown) + } +} diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index c6ab65651b..1f245113a5 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -1,7 +1,7 @@ use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, helpers::broadcast_event, - vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, + vote_collection::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; use async_broadcast::Sender; use async_lock::RwLock; @@ -20,7 +20,7 @@ use hotshot_types::{ vote::HasViewNumber, }; -use crate::vote::HandleVoteEvent; +use crate::vote_collection::HandleVoteEvent; use std::sync::Arc; use tracing::{debug, error, instrument, warn}; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index a9ecc8386a..e15530e4f5 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -2,7 +2,9 @@ use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, helpers::{broadcast_event, cancel_task}, - vote::{create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState}, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, }; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; diff --git a/task-impls/src/vote.rs b/task-impls/src/vote_collection.rs similarity index 100% rename from task-impls/src/vote.rs rename to task-impls/src/vote_collection.rs From b6edd2916bdf062939fce2babc43b8ddaa73fab8 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 21 Mar 2024 07:26:30 -0400 Subject: [PATCH 0880/1393] remove some unused dependencies (#2816) --- example-types/Cargo.toml | 1 - examples/Cargo.toml | 1 - hotshot/Cargo.toml | 3 --- libp2p-networking/Cargo.toml | 1 - macros/Cargo.toml | 4 ---- 5 files changed, 10 deletions(-) diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index 19034f2c71..de8a42da07 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -22,7 +22,6 @@ either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot" } hotshot-types = { path = "../types" } -hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } rand = { workspace = true } snafu = { workspace = true } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 0e28bb73a7..fb21ed3a64 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -107,7 +107,6 @@ commit = { workspace = true } custom_debug = { workspace = true } dashmap = "5.5.1" either = { workspace = true } -embed-doc-image = "0.1.4" futures = { workspace = true } hotshot-web-server = { version = "0.5.26", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 39ebb32512..21e1d81e92 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -34,10 +34,8 @@ custom_debug = { workspace = true } dashmap = "5.5.1" derive_more = "0.99.17" either = { workspace = true } -embed-doc-image = "0.1.4" ethereum-types = { workspace = true } futures = { workspace = true } -hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } hotshot-task = { path = "../task" } hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } hotshot-types = { path = "../types" } @@ -80,7 +78,6 @@ cdn-marshal = { workspace = true, features = ["insecure", "runtime-async-std"] } [dev-dependencies] blake3 = { workspace = true } clap = { version = "4.5", features = ["derive", "env"] } -local-ip-address = "0.6.1" toml = { workspace = true } [lints] diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index b5ba3e00f1..ae5ada4a94 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -35,7 +35,6 @@ tide = { version = "0.16", optional = true, default-features = false, features = tracing = { workspace = true } versioned-binary-serialization = { workspace = true } void = "1.0.2" -dashmap = "5.5.3" lazy_static = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 88784ff052..1d6f6211a2 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -9,10 +9,6 @@ description = "Macros for hotshot tests" quote = "1.0.33" syn = { version = "2.0.50", features = ["full", "extra-traits"] } proc-macro2 = "1.0.79" -derive_builder = "0.20.0" - -[dev-dependencies] -async-lock = { workspace = true } [lib] proc-macro = true From 33b0fde820d3d0a908fca9f33de6fbabdd291df6 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Thu, 21 Mar 2024 14:58:40 +0100 Subject: [PATCH 0881/1393] Send each recipient one corresponding VID share (#2769) * Send each recipient one corresponding VID share * `Consensus` and `LeafInfo` have full vid disperse * Fix deadlock * Temporarily pin hs-builder-api and fix lint errors * Make lint happy * Update hotshot-types to 0.1.6 based version * `VidDisperseSend` sent as direct message * hotshot-types and hs-builder-api temporarily pinned to custom branch * `VidDisperseSend` unpacked and sent as separate messages * Adapt `Storage` to `VidDisperseShare` * Fix lint issues * Move back changes from hotshot-types repo * Fix docs * Optimise `DMBehaviour::poll` slightly * Fix merge issues * Review fixes * Fix docs --- example-types/src/storage_types.rs | 15 ++- .../src/network/behaviours/direct_message.rs | 11 +- task-impls/src/consensus.rs | 23 +++- task-impls/src/events.rs | 5 +- task-impls/src/network.rs | 108 ++++++++++++++---- task-impls/src/quorum_vote.rs | 4 +- task-impls/src/response.rs | 22 ++-- testing/src/task_helpers.rs | 21 ++-- testing/src/view_generator.rs | 4 +- testing/tests/network_task.rs | 9 +- testing/tests/vid_task.rs | 14 ++- types/src/consensus.rs | 13 ++- types/src/data.rs | 105 +++++++++++++++++ types/src/message.rs | 10 +- types/src/traits/network.rs | 40 ++++++- types/src/traits/storage.rs | 4 +- 16 files changed, 321 insertions(+), 87 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 79bc8a0c90..51b894ca56 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -2,16 +2,21 @@ use anyhow::{bail, Result}; use async_lock::RwLock; use async_trait::async_trait; use hotshot_types::{ - data::{DAProposal, VidDisperse}, + data::{DAProposal, VidDisperseShare}, message::Proposal, traits::{node_implementation::NodeType, storage::Storage}, }; use std::collections::HashMap; use std::sync::Arc; +type VidShares = HashMap< + ::Time, + HashMap<::SignatureKey, Proposal>>, +>; + #[derive(Clone, Debug)] pub struct TestStorageState { - vids: HashMap>>, + vids: VidShares, das: HashMap>>, } @@ -42,14 +47,16 @@ impl Default for TestStorage { #[async_trait] impl Storage for TestStorage { - async fn append_vid(&self, proposal: &Proposal>) -> Result<()> { + async fn append_vid(&self, proposal: &Proposal>) -> Result<()> { if self.should_return_err { bail!("Failed to append VID proposal to storage"); } let mut inner = self.inner.write().await; inner .vids - .insert(proposal.data.view_number, proposal.clone()); + .entry(proposal.data.view_number) + .or_default() + .insert(proposal.data.recipient_key.clone(), proposal.clone()); Ok(()) } diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index fc0e645942..f156e2ad96 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -132,13 +132,16 @@ impl NetworkBehaviour for DMBehaviour { &mut self, cx: &mut std::task::Context<'_>, ) -> Poll>> { - while let Some(req) = self.failed_rr.pop_front() { + let mut retry_req_indices = Vec::new(); + for (idx, req) in self.failed_rr.iter().enumerate() { if req.backoff.is_expired() { - self.add_direct_request(req); - } else { - self.failed_rr.push_back(req); + retry_req_indices.push(idx); } } + let _ = retry_req_indices.into_iter().map(|idx| { + let req = self.failed_rr.remove(idx).unwrap(); + self.add_direct_request(req); + }); while let Poll::Ready(ready) = NetworkBehaviour::poll(&mut self.request_response, cx) { match ready { // NOTE: this generates request diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9c531bdd23..f877e58daa 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -42,7 +42,12 @@ use versioned_binary_serialization::version::Version; use crate::vote_collection::HandleVoteEvent; use chrono::Utc; -use std::{collections::HashSet, marker::PhantomData, sync::Arc}; +use hotshot_types::data::VidDisperseShare; +use std::{ + collections::{HashMap, HashSet}, + marker::PhantomData, + sync::Arc, +}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument}; @@ -735,10 +740,14 @@ impl, A: ConsensusApi + leaf.fill_block_payload_unchecked(payload); } - let vid = consensus - .vid_shares - .get(&leaf.get_view_number()) - .map(|vid_proposal| vid_proposal.data.clone()); + let vid = VidDisperseShare::to_vid_disperse( + consensus + .vid_shares + .get(&leaf.get_view_number()) + .unwrap_or(&HashMap::new()) + .iter() + .map(|(_key, proposal)| &proposal.data) + ); leaf_views.push(LeafInfo::new(leaf.clone(), state.clone(), delta.clone(), vid)); leafs_decided.push(leaf.clone()); @@ -1079,7 +1088,9 @@ impl, A: ConsensusApi + .write() .await .vid_shares - .insert(view, disperse.clone()); + .entry(view) + .or_default() + .insert(disperse.data.recipient_key.clone(), disperse.clone()); if self.vote_if_able(&event_stream).await { self.current_proposal = None; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 9f9f595db7..9f21adf391 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,6 +1,7 @@ use crate::view_sync::ViewSyncPhase; use either::Either; +use hotshot_types::data::VidDisperseShare; use hotshot_types::{ data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse}, message::Proposal, @@ -123,9 +124,9 @@ pub enum HotShotEvent { /// Vid disperse data has been received from the network; handled by the DA task /// /// Like [`HotShotEvent::DAProposalRecv`]. - VidDisperseRecv(Proposal>), + VidDisperseRecv(Proposal>), /// A VID disperse data is validated. - VidDisperseValidated(VidDisperse), + VidDisperseValidated(VidDisperseShare), /// Upgrade proposal has been received from the network UpgradeProposalRecv(Proposal>, TYPES::SignatureKey), /// Upgrade proposal has been sent to the network diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 81a38c9205..ade3b1297f 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -7,20 +7,21 @@ use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use either::Either::{self, Left, Right}; use hotshot_types::{event::HotShotAction, traits::storage::Storage}; +use std::collections::HashMap; use std::sync::Arc; use hotshot_task::task::{Task, TaskState}; use hotshot_types::constants::STATIC_VER_0_1; -use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ + data::{VidDisperse, VidDisperseShare}, message::{ CommitteeConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, - SequencingMessage, + Proposal, SequencingMessage, }, traits::{ election::Membership, network::{ConnectedNetwork, TransmitType, ViewMessage}, - node_implementation::NodeType, + node_implementation::{ConsensusTime, NodeType}, }, vote::{HasViewNumber, Vote}, }; @@ -284,15 +285,7 @@ impl< ) } HotShotEvent::VidDisperseSend(proposal, sender) => { - maybe_action = Some(HotShotAction::VidDisperse); - ( - sender, - MessageKind::::from_consensus_message(SequencingMessage(Right( - CommitteeConsensusMessage::VidDisperseMsg(proposal), - ))), // TODO not a CommitteeConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 - TransmitType::Broadcast, // TODO not a broadcast https://github.com/EspressoSystems/HotShot/issues/1696 - None, - ) + return self.handle_vid_disperse_proposal(proposal, &sender); } HotShotEvent::DAProposalSend(proposal, sender) => { maybe_action = Some(HotShotAction::DAPropose); @@ -408,19 +401,15 @@ impl< let net = self.channel.clone(); let storage = self.storage.clone(); async_spawn(async move { - if let Some(action) = maybe_action { - match storage - .write() - .await - .record_action(view, action.clone()) - .await - { - Ok(()) => {} - Err(e) => { - warn!("Not Sending {:?} because of storage error: {:?}", action, e); - return; - } - } + if NetworkEventTaskState::::maybe_record_action( + maybe_action, + storage, + view, + ) + .await + .is_err() + { + return; } let transmit_result = match transmit_type { @@ -446,4 +435,73 @@ impl< None } + + /// handle `VidDisperseSend` + fn handle_vid_disperse_proposal( + &self, + vid_proposal: Proposal>, + sender: &::SignatureKey, + ) -> Option { + let view = vid_proposal.data.view_number; + let vid_share_proposals = VidDisperseShare::to_vid_share_proposals(vid_proposal); + let messages: HashMap<_, _> = vid_share_proposals + .into_iter() + .map(|proposal| { + ( + proposal.data.recipient_key.clone(), + Message { + sender: sender.clone(), + kind: MessageKind::::from_consensus_message(SequencingMessage( + Right(CommitteeConsensusMessage::VidDisperseMsg(proposal)), + )), // TODO not a CommitteeConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 + }, + ) + }) + .collect(); + + let net = self.channel.clone(); + let storage = self.storage.clone(); + async_spawn(async move { + if NetworkEventTaskState::::maybe_record_action( + Some(HotShotAction::VidDisperse), + storage, + view, + ) + .await + .is_err() + { + return; + } + match net.vid_broadcast_message(messages, STATIC_VER_0_1).await { + Ok(()) => {} + Err(e) => error!("Failed to send message from network task: {:?}", e), + } + }); + + None + } + + /// Record `HotShotAction` if available + async fn maybe_record_action( + maybe_action: Option, + storage: Arc>, + view: ::Time, + ) -> Result<(), ()> { + if let Some(action) = maybe_action { + match storage + .write() + .await + .record_action(view, action.clone()) + .await + { + Ok(()) => Ok(()), + Err(e) => { + warn!("Not Sending {:?} because of storage error: {:?}", action, e); + Err(()) + } + } + } else { + Ok(()) + } + } } diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 7d5913982c..275bb7c391 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -11,7 +11,7 @@ use hotshot_task::{ task::{Task, TaskState}, }; use hotshot_types::{ - data::{QuorumProposal, VidDisperse}, + data::{QuorumProposal, VidDisperseShare}, event::Event, message::Proposal, simple_certificate::DACertificate, @@ -67,7 +67,7 @@ fn validate_dac( // #[allow(clippy::needless_pass_by_value)] fn validate_vid( - _disperse: Proposal>, + _disperse: Proposal>, _event_sender: Sender>>, ) -> bool { true diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index e7af6689d8..d4124097d4 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -1,4 +1,5 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::HashMap; +use std::sync::Arc; use async_broadcast::Receiver; use async_compatibility_layer::art::async_spawn; @@ -11,7 +12,7 @@ use futures::{channel::mpsc, FutureExt, StreamExt}; use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ consensus::Consensus, - data::VidDisperse, + data::VidDisperseShare, message::{ CommitteeConsensusMessage, DataMessage, Message, MessageKind, Proposal, SequencingMessage, }, @@ -116,10 +117,10 @@ impl NetworkResponseState { match req.request { RequestKind::VID(view, pub_key) => { let state = self.consensus.read().await; - let Some(shares) = state.vid_shares.get(&view) else { + let Some(proposals_map) = state.vid_shares.get(&view) else { return self.make_msg(ResponseMessage::NotFound); }; - self.handle_vid(shares.clone(), pub_key) + self.handle_vid(proposals_map, &pub_key) } // TODO impl for DA Proposal: https://github.com/EspressoSystems/HotShot/issues/2651 RequestKind::DAProposal(_view) => self.make_msg(ResponseMessage::NotFound), @@ -130,14 +131,15 @@ impl NetworkResponseState { /// build the response and return it fn handle_vid( &self, - mut vid: Proposal>, - key: TYPES::SignatureKey, + proposals_map: &HashMap>>, + key: &TYPES::SignatureKey, ) -> Message { - let Some(share) = vid.data.shares.get(&key) else { + if !proposals_map.contains_key(key) { return self.make_msg(ResponseMessage::NotFound); - }; - vid.data.shares = BTreeMap::from([(key, share.clone())]); - let seq_msg = SequencingMessage(Right(CommitteeConsensusMessage::VidDisperseMsg(vid))); + } + let seq_msg = SequencingMessage(Right(CommitteeConsensusMessage::VidDisperseMsg( + proposals_map.get(key).unwrap().clone(), + ))); self.make_msg(ResponseMessage::Found(seq_msg)) } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index b67e68e36f..2ae275eff2 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -45,6 +45,7 @@ use hotshot_types::vote::Vote; use jf_primitives::vid::VidScheme; +use hotshot_types::data::VidDisperseShare; use serde::Serialize; use std::{fmt::Debug, hash::Hash, sync::Arc}; @@ -397,32 +398,26 @@ pub fn da_payload_commitment( vid_commitment(&encoded_transactions, quorum_membership.total_nodes()) } +/// TODO: pub fn build_vid_proposal( quorum_membership: &::Membership, view_number: ViewNumber, transactions: Vec, private_key: &::PrivateKey, -) -> Proposal> { +) -> Proposal> { let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); - let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); - let payload_commitment = vid_disperse.commit; - - let vid_signature = - ::SignatureKey::sign(private_key, payload_commitment.as_ref()) - .expect("Failed to sign payload commitment"); let vid_disperse = VidDisperse::from_membership( view_number, - vid.disperse(&encoded_transactions).unwrap(), + vid.disperse(encoded_transactions).unwrap(), &quorum_membership.clone().into(), ); - Proposal { - data: vid_disperse.clone(), - signature: vid_signature, - _pd: PhantomData, - } + VidDisperseShare::from_vid_disperse(vid_disperse) + .swap_remove(0) + .to_proposal(private_key) + .expect("Failed to sign payload commitment") } pub fn build_da_certificate( diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 7699e2bd88..799af75a11 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -15,7 +15,7 @@ use commit::Committable; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_types::{ - data::{DAProposal, Leaf, QuorumProposal, VidDisperse, ViewNumber}, + data::{DAProposal, Leaf, QuorumProposal, VidDisperseShare, ViewNumber}, message::Proposal, simple_certificate::{ DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, @@ -42,7 +42,7 @@ pub struct TestView { pub view_number: ViewNumber, pub quorum_membership: ::Membership, pub vid_proposal: ( - Proposal>, + Proposal>, ::SignatureKey, ), pub leader_public_key: ::SignatureKey, diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index e52d8105a9..5e4fc7103f 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -9,9 +9,12 @@ use hotshot_task_impls::events::HotShotEvent; use hotshot_task_impls::network::{self, NetworkEventTaskState}; use hotshot_testing::test_builder::TestMetadata; use hotshot_testing::view_generator::TestViewGenerator; -use hotshot_types::traits::election::Membership; -use hotshot_types::traits::node_implementation::NodeType; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use hotshot_types::{ + data::ViewNumber, + traits::{ + election::Membership, node_implementation::ConsensusTime, node_implementation::NodeType, + }, +}; use std::sync::Arc; use std::time::Duration; diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index 7e9d4c85d6..d084b0002e 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -2,10 +2,12 @@ use hotshot::types::SignatureKey; use hotshot_example_types::{block_types::TestTransaction, node_types::TestTypes}; use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; use hotshot_testing::task_helpers::{build_system_handle, vid_scheme_from_view_number}; -use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; use hotshot_types::{ - data::{DAProposal, VidDisperse, ViewNumber}, - traits::consensus_api::ConsensusApi, + data::{DAProposal, VidDisperse, VidDisperseShare, ViewNumber}, + traits::{ + consensus_api::ConsensusApi, + node_implementation::{ConsensusTime, NodeType}, + }, }; use jf_primitives::vid::VidScheme; use std::collections::HashMap; @@ -60,6 +62,10 @@ async fn test_vid_task() { signature: message.signature.clone(), _pd: PhantomData, }; + let vid_share_proposal = VidDisperseShare::from_vid_disperse(vid_disperse.clone()) + .swap_remove(0) + .to_proposal(handle.private_key()) + .expect("Failed to sign block payload!"); let mut input = Vec::new(); let mut output = HashMap::new(); @@ -77,7 +83,7 @@ async fn test_vid_task() { ViewNumber::new(2), )); input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); - input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone())); + input.push(HotShotEvent::VidDisperseRecv(vid_share_proposal.clone())); input.push(HotShotEvent::Shutdown); output.insert( diff --git a/types/src/consensus.rs b/types/src/consensus.rs index aa14eaacdb..666461c708 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -4,7 +4,7 @@ pub use crate::utils::{View, ViewInner}; use displaydoc::Display; use crate::{ - data::{Leaf, VidDisperse}, + data::{Leaf, VidDisperseShare}, error::HotShotError, message::Proposal, simple_certificate::{DACertificate, QuorumCertificate, UpgradeCertificate}, @@ -26,6 +26,12 @@ use tracing::error; /// A type alias for `HashMap, T>` type CommitmentMap = HashMap, T>; +/// A type alias for `BTreeMap>>>` +type VidShares = BTreeMap< + ::Time, + HashMap<::SignatureKey, Proposal>>, +>; + /// A reference to the consensus algorithm /// /// This will contain the state of all rounds. @@ -38,10 +44,7 @@ pub struct Consensus { pub validated_state_map: BTreeMap>, /// All the VID shares we've received for current and future views. - /// In the future we will need a different struct similar to VidDisperse except - /// it stores only one share. - /// TODO - pub vid_shares: BTreeMap>>, + pub vid_shares: VidShares, /// All the DA certs we've received for current and future views. /// view -> DA cert diff --git a/types/src/data.rs b/types/src/data.rs index 0552ec3f4d..9b88f4db92 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -4,6 +4,7 @@ //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. use crate::{ + message::Proposal, simple_certificate::{ QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, }, @@ -34,8 +35,10 @@ use std::{ collections::BTreeMap, fmt::{Debug, Display}, hash::Hash, + marker::PhantomData, sync::Arc, }; +use tracing::error; /// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. #[derive( @@ -170,6 +173,102 @@ impl VidDisperse { } } +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +pub struct VidDisperseShare { + /// The view number for which this VID data is intended + pub view_number: TYPES::Time, + /// Block payload commitment + pub payload_commitment: VidCommitment, + /// A storage node's key and its corresponding VID share + pub share: VidShare, + /// VID common data sent to all storage nodes + pub common: VidCommon, + /// a public key of the share recipient + pub recipient_key: TYPES::SignatureKey, +} + +impl VidDisperseShare { + /// Create a vector of `VidDisperseShare` from `VidDisperse` + pub fn from_vid_disperse(vid_disperse: VidDisperse) -> Vec { + vid_disperse + .shares + .into_iter() + .map(|(recipient_key, share)| VidDisperseShare { + share, + recipient_key, + view_number: vid_disperse.view_number, + common: vid_disperse.common.clone(), + payload_commitment: vid_disperse.payload_commitment, + }) + .collect() + } + + /// Consume `self` and return a `Proposal` + pub fn to_proposal( + self, + private_key: &::PrivateKey, + ) -> Option> { + let Ok(signature) = + TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref().as_ref()) + else { + error!("VID: failed to sign dispersal share payload"); + return None; + }; + Some(Proposal { + signature, + _pd: PhantomData, + data: self, + }) + } + + /// Create `VidDisperse` out of an iterator to `VidDisperseShare`s + pub fn to_vid_disperse<'a, I>(mut it: I) -> Option> + where + I: Iterator>, + { + let first_vid_disperse_share = it.next()?.clone(); + let mut share_map = BTreeMap::new(); + share_map.insert( + first_vid_disperse_share.recipient_key, + first_vid_disperse_share.share, + ); + let mut vid_disperse = VidDisperse { + view_number: first_vid_disperse_share.view_number, + payload_commitment: first_vid_disperse_share.payload_commitment, + common: first_vid_disperse_share.common, + shares: share_map, + }; + let _ = it.map(|vid_disperse_share| { + vid_disperse.shares.insert( + vid_disperse_share.recipient_key.clone(), + vid_disperse_share.share.clone(), + ) + }); + Some(vid_disperse) + } + + pub fn to_vid_share_proposals( + vid_disperse_proposal: Proposal>, + ) -> Vec>> { + vid_disperse_proposal + .data + .shares + .into_iter() + .map(|(recipient_key, share)| Proposal { + data: VidDisperseShare { + share, + recipient_key, + view_number: vid_disperse_proposal.data.view_number, + common: vid_disperse_proposal.data.common.clone(), + payload_commitment: vid_disperse_proposal.data.payload_commitment, + }, + signature: vid_disperse_proposal.signature.clone(), + _pd: vid_disperse_proposal._pd, + }) + .collect() + } +} + /// Proposal to append a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound(deserialize = ""))] @@ -209,6 +308,12 @@ impl HasViewNumber for VidDisperse { } } +impl HasViewNumber for VidDisperseShare { + fn get_view_number(&self) -> TYPES::Time { + self.view_number + } +} + impl HasViewNumber for QuorumProposal { fn get_view_number(&self) -> TYPES::Time { self.view_number diff --git a/types/src/message.rs b/types/src/message.rs index 99f29d4907..76d256cf05 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -3,7 +3,7 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. -use crate::data::{QuorumProposal, UpgradeProposal}; +use crate::data::{QuorumProposal, UpgradeProposal, VidDisperseShare}; use crate::simple_certificate::{ DACertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, @@ -16,7 +16,7 @@ use crate::traits::network::ResponseMessage; use crate::traits::signature_key::SignatureKey; use crate::vote::HasViewNumber; use crate::{ - data::{DAProposal, VidDisperse}, + data::DAProposal, simple_vote::QuorumVote, traits::{ network::{DataRequest, NetworkMsg, ViewMessage}, @@ -184,9 +184,9 @@ pub enum CommitteeConsensusMessage { /// Initiate VID dispersal. /// - /// Like [`DAProposal`]. Use `Msg` suffix to distinguish from [`VidDisperse`]. + /// Like [`DAProposal`]. Use `Msg` suffix to distinguish from `VidDisperse`. /// TODO this variant should not be a [`CommitteeConsensusMessage`] because - VidDisperseMsg(Proposal>), + VidDisperseMsg(Proposal>), } /// Messages for sequencing consensus. @@ -289,6 +289,8 @@ impl SequencingMessage { #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] +#[allow(clippy::large_enum_variant)] +/// TODO: Put `DataResponse` content in a `Box` to make enum smaller /// Messages related to sending data between nodes pub enum DataMessage { /// Contains a transaction to be submitted diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 07c947c8cb..337bb1ca38 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -20,13 +20,20 @@ use crate::{ }; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; +use futures::future::join_all; use rand::{ distributions::{Bernoulli, Uniform}, prelude::Distribution, }; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::{collections::BTreeSet, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; +use std::{ + collections::{BTreeSet, HashMap}, + fmt::Debug, + hash::Hash, + sync::Arc, + time::Duration, +}; use versioned_binary_serialization::version::StaticVersionType; /// for any errors we decide to add to memory network @@ -145,6 +152,8 @@ pub enum NetworkError { UnableToCancel, /// The requested data was not found NotFound, + /// Multiple errors + MultipleErrors { errors: Vec> }, } #[derive(Clone, Debug)] // Storing view number as a u64 to avoid the need TYPES generic @@ -266,6 +275,8 @@ pub enum RequestKind { /// The kind of message `M` is is determined by what we requested #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] +#[allow(clippy::large_enum_variant)] +/// TODO: Put `Found` content in a `Box` to make enum smaller pub enum ResponseMessage { /// Peer returned us some data Found(SequencingMessage), @@ -321,6 +332,33 @@ pub trait ConnectedNetwork: bind_version: VER, ) -> Result<(), NetworkError>; + /// send messages with vid shares to its recipients + /// blocking + async fn vid_broadcast_message( + &self, + messages: HashMap, + bind_version: VER, + ) -> Result<(), NetworkError> { + let future_results = messages.into_iter().map(|(recipient_key, message)| { + self.direct_message(message, recipient_key, bind_version) + }); + let results = join_all(future_results).await; + + let errors: Vec<_> = results + .into_iter() + .filter_map(|r| match r { + Err(error) => Some(Box::new(error)), + _ => None, + }) + .collect(); + + if errors.is_empty() { + Ok(()) + } else { + Err(NetworkError::MultipleErrors { errors }) + } + } + /// Sends a direct message to a specific node /// blocking async fn direct_message( diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index f65f7ea986..eae0514852 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -7,7 +7,7 @@ use anyhow::Result; use async_trait::async_trait; use crate::{ - data::{DAProposal, VidDisperse}, + data::{DAProposal, VidDisperseShare}, event::HotShotAction, message::Proposal, simple_certificate::QuorumCertificate, @@ -18,7 +18,7 @@ use super::node_implementation::NodeType; /// Abstraction for storing a variety of consensus payload datum. #[async_trait] pub trait Storage: Send + Sync + Clone { - async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; + async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; async fn append_da(&self, proposal: &Proposal>) -> Result<()>; async fn record_action(&self, view: TYPES::Time, action: HotShotAction) -> Result<()>; async fn update_high_qc(&self, qc: QuorumCertificate) -> Result<()>; From 539d511854babc05057ddef3dd14dc5aaa5afe84 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 21 Mar 2024 10:55:09 -0400 Subject: [PATCH 0882/1393] [CI] Remove toolchain install (#2823) * fix CI by removing toolchain install * fix new lints --- hotshot/src/traits/networking/libp2p_network.rs | 4 +--- .../src/network/behaviours/request_response.rs | 8 ++------ task-impls/src/vote_collection.rs | 4 +--- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index b30a2b8140..b1bb8484a3 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -715,9 +715,7 @@ impl ConnectedNetwork for Libp2p &self, bind_version: VER, ) -> Option)>> { - let Some(mut internal_rx) = self.inner.requests_rx.lock().await.take() else { - return None; - }; + let mut internal_rx = self.inner.requests_rx.lock().await.take()?; let handle = self.inner.handle.clone(); let (mut tx, rx) = mpsc::channel(100); async_spawn(async move { diff --git a/libp2p-networking/src/network/behaviours/request_response.rs b/libp2p-networking/src/network/behaviours/request_response.rs index 8b3887e13c..ecdd2f000a 100644 --- a/libp2p-networking/src/network/behaviours/request_response.rs +++ b/libp2p-networking/src/network/behaviours/request_response.rs @@ -42,9 +42,7 @@ impl RequestResponseState { request_id, response, } => { - let Some(chan) = self.request_map.remove(&request_id) else { - return None; - }; + let chan = self.request_map.remove(&request_id)?; if chan.send(Some(response)).is_err() { tracing::warn!("Failed to send resonse to client, channel closed."); } @@ -57,9 +55,7 @@ impl RequestResponseState { error, } => { tracing::warn!("Error Sending Request {:?}", error); - let Some(chan) = self.request_map.remove(&request_id) else { - return None; - }; + let chan = self.request_map.remove(&request_id)?; if chan.send(None).is_err() { tracing::warn!("Failed to send resonse to client, channel closed."); } diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index b42dc51c1b..1bc98b63a4 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -85,9 +85,7 @@ impl< ); return None; } - let Some(ref mut accumulator) = self.accumulator else { - return None; - }; + let accumulator = self.accumulator.as_mut()?; match accumulator.accumulate(vote, &self.membership) { Either::Left(()) => None, Either::Right(cert) => { From d4d37b7c27b24d28c40adf9fa149f037e3bfeb7b Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 21 Mar 2024 15:57:47 +0100 Subject: [PATCH 0883/1393] Test builder that builds random blocks (#2813) --- testing/Cargo.toml | 1 + testing/src/block_builder.rs | 275 ++++++++++++++++++++++++++------- testing/tests/block_builder.rs | 6 +- 3 files changed, 219 insertions(+), 63 deletions(-) diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 54af33edf7..c446189679 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -39,6 +39,7 @@ snafu = { workspace = true } tide-disco = { workspace = true } tracing = { workspace = true } versioned-binary-serialization = { workspace = true } +lru = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 1074ea0500..7bafbe35c4 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -1,36 +1,205 @@ -use async_compatibility_layer::art::async_spawn; +use std::{ + num::NonZeroUsize, + ops::{Deref, Range}, + sync::Arc, + time::Duration, +}; + +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_lock::RwLock; use async_trait::async_trait; use futures::future::BoxFuture; -use hotshot::traits::BlockPayload; -use hotshot::types::SignatureKey; +use hotshot::{traits::BlockPayload, types::SignatureKey}; use hotshot_builder_api::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::{BuildError, Options}, data_source::BuilderDataSource, }; -use hotshot_example_types::{block_types::TestBlockPayload, node_types::TestTypes}; +use hotshot_example_types::{ + block_types::{TestBlockPayload, TestTransaction}, + node_types::TestTypes, +}; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, traits::{block_contents::vid_commitment, node_implementation::NodeType}, utils::BuilderCommitment, vid::VidCommitment, }; +use lru::LruCache; +use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; use tide_disco::{method::ReadState, App, Url}; -/// The only block [`TestableBuilderSource`] provides -const EMPTY_BLOCK: TestBlockPayload = TestBlockPayload { - transactions: vec![], -}; +/// Entry for a built block +struct BlockEntry { + metadata: AvailableBlockInfo, + payload: Option>, + header_input: Option>, +} + +/// Options controlling how the random builder generates blocks +#[derive(Clone, Debug)] +pub struct RandomBuilderOptions { + /// How many transactions to include in a block + pub txn_in_block: u64, + /// How many blocks to generate per second + pub blocks_per_second: u64, + /// Range of how big a transaction can be (in bytes) + pub txn_size: Range, + /// Number of storage nodes for VID commitment + pub num_storage_nodes: usize, +} + +impl Default for RandomBuilderOptions { + fn default() -> Self { + Self { + txn_in_block: 100, + blocks_per_second: 1, + txn_size: 20..100, + num_storage_nodes: 1, + } + } +} /// A mock implementation of the builder data source. -/// "Builds" only empty blocks. -pub struct TestableBuilderSource { - priv_key: <::SignatureKey as SignatureKey>::PrivateKey, +/// Builds random blocks, doesn't track HotShot state at all. +/// Evicts old available blocks if HotShot doesn't keep up. +#[derive(Clone, Debug)] +pub struct RandomBuilderSource { + /// Built blocks + blocks: Arc< + RwLock< + // Isn't strictly speaking used as a cache, + // just as a HashMap that evicts old blocks + LruCache, + >, + >, pub_key: ::SignatureKey, + priv_key: <::SignatureKey as SignatureKey>::PrivateKey, +} + +impl RandomBuilderSource { + /// Create new [`RandomBuilderSource`] + #[must_use] + #[allow(clippy::missing_panics_doc)] // ony panics if 256 == 0 + pub fn new( + pub_key: ::SignatureKey, + priv_key: <::SignatureKey as SignatureKey>::PrivateKey, + ) -> Self { + Self { + blocks: Arc::new(RwLock::new(LruCache::new(NonZeroUsize::new(256).unwrap()))), + priv_key, + pub_key, + } + } + + /// Spawn a task building blocks, configured with given options + #[allow(clippy::missing_panics_doc)] // ony panics on 16-bit platforms + pub fn run(&self, options: RandomBuilderOptions) { + let blocks = self.blocks.clone(); + let (priv_key, pub_key) = (self.priv_key.clone(), self.pub_key); + async_spawn(async move { + let mut rng = SmallRng::from_entropy(); + let time_per_block = Duration::from_millis(1000 / options.blocks_per_second); + loop { + let start = std::time::Instant::now(); + let transactions = (0..options.txn_in_block) + .map(|_| { + let mut bytes = vec![ + 0; + rng.gen_range(options.txn_size.clone()) + .try_into() + .expect("We are NOT running on a 16-bit platform") + ]; + rng.fill_bytes(&mut bytes); + TestTransaction(bytes) + }) + .collect::>(); + let block_size = transactions.iter().map(|t| t.0.len() as u64).sum::(); + + let block_payload = TestBlockPayload { transactions }; + let commitment = block_payload.builder_commitment(&()); + let vid_commitment = vid_commitment( + &block_payload.encode().unwrap().collect(), + options.num_storage_nodes, + ); + + let signature_over_block_info = { + let mut block_info: Vec = Vec::new(); + block_info.extend_from_slice(block_size.to_be_bytes().as_ref()); + block_info.extend_from_slice(123_u64.to_be_bytes().as_ref()); + block_info.extend_from_slice(commitment.as_ref()); + match ::SignatureKey::sign(&priv_key, &block_info) { + Ok(sig) => sig, + Err(e) => { + tracing::error!(error = %e, "Failed to sign block"); + continue; + } + } + }; + + let signature_over_builder_commitment = + match ::SignatureKey::sign( + &priv_key, + commitment.as_ref(), + ) { + Ok(sig) => sig, + Err(e) => { + tracing::error!(error = %e, "Failed to sign block"); + continue; + } + }; + + let signature_over_vid_commitment = + match ::SignatureKey::sign( + &priv_key, + vid_commitment.as_ref(), + ) { + Ok(sig) => sig, + Err(e) => { + tracing::error!(error = %e, "Failed to sign block"); + continue; + } + }; + + let block = AvailableBlockData { + block_payload, + metadata: (), + sender: pub_key, + signature: signature_over_block_info, + _phantom: std::marker::PhantomData, + }; + let metadata = AvailableBlockInfo { + sender: pub_key, + signature: signature_over_builder_commitment, + block_hash: commitment, + block_size, + offered_fee: 123, + _phantom: std::marker::PhantomData, + }; + let header_input = AvailableBlockHeaderInput { + vid_commitment, + signature: signature_over_vid_commitment, + sender: pub_key, + _phantom: std::marker::PhantomData, + }; + if let Some((hash, _)) = blocks.write().await.push( + metadata.block_hash.clone(), + BlockEntry { + metadata, + payload: Some(block), + header_input: Some(header_input), + }, + ) { + tracing::warn!("Block {} evicted", hash); + }; + async_sleep(time_per_block - start.elapsed()).await; + } + }); + } } #[async_trait] -impl ReadState for TestableBuilderSource { +impl ReadState for RandomBuilderSource { type State = Self; async fn read( @@ -42,23 +211,19 @@ impl ReadState for TestableBuilderSource { } #[async_trait] -impl BuilderDataSource for TestableBuilderSource { +impl BuilderDataSource for RandomBuilderSource { async fn get_available_blocks( &self, _for_parent: &VidCommitment, ) -> Result>, BuildError> { - Ok(vec![AvailableBlockInfo { - sender: self.pub_key, - signature: ::SignatureKey::sign( - &self.priv_key, - EMPTY_BLOCK.builder_commitment(&()).as_ref(), - ) - .unwrap(), - block_hash: EMPTY_BLOCK.builder_commitment(&()), - block_size: 0, - offered_fee: 1, - _phantom: std::marker::PhantomData, - }]) + Ok(self + .blocks + .deref() + .read() + .await + .iter() + .map(|(_, BlockEntry { metadata, .. })| metadata.clone()) + .collect()) } async fn claim_block( @@ -66,21 +231,14 @@ impl BuilderDataSource for TestableBuilderSource { block_hash: &BuilderCommitment, _signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result, BuildError> { - if block_hash == &EMPTY_BLOCK.builder_commitment(&()) { - Ok(AvailableBlockData { - block_payload: EMPTY_BLOCK, - metadata: (), - signature: ::SignatureKey::sign( - &self.priv_key, - EMPTY_BLOCK.builder_commitment(&()).as_ref(), - ) - .unwrap(), - sender: self.pub_key, - _phantom: std::marker::PhantomData, - }) - } else { - Err(BuildError::Missing) - } + let mut blocks = self.blocks.write().await; + let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; + let payload = entry.payload.take().ok_or(BuildError::Missing)?; + // Check if header input is claimed as well, if yes, then evict block + if entry.header_input.is_none() { + blocks.pop(block_hash); + }; + Ok(payload) } async fn claim_block_header_input( @@ -88,20 +246,14 @@ impl BuilderDataSource for TestableBuilderSource { block_hash: &BuilderCommitment, _signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result, BuildError> { - if block_hash == &EMPTY_BLOCK.builder_commitment(&()) { - Ok(AvailableBlockHeaderInput { - vid_commitment: vid_commitment(&vec![], 1), - signature: ::SignatureKey::sign( - &self.priv_key, - EMPTY_BLOCK.builder_commitment(&()).as_ref(), - ) - .unwrap(), - sender: self.pub_key, - _phantom: std::marker::PhantomData, - }) - } else { - Err(BuildError::Missing) - } + let mut blocks = self.blocks.write().await; + let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; + let header_input = entry.header_input.take().ok_or(BuildError::Missing)?; + // Check if payload is claimed as well, if yes, then evict block + if entry.payload.is_none() { + blocks.pop(block_hash); + }; + Ok(header_input) } } @@ -109,16 +261,19 @@ impl BuilderDataSource for TestableBuilderSource { /// /// # Panics /// If constructing and launching the builder fails for any reason -pub fn run_builder(url: Url) { +pub fn run_random_builder(url: Url) { + let (pub_key, priv_key) = + ::SignatureKey::generated_from_seed_indexed([1; 32], 0); + let source = RandomBuilderSource::new(pub_key, priv_key); + source.run(RandomBuilderOptions::default()); + let builder_api = - hotshot_builder_api::builder::define_api::( + hotshot_builder_api::builder::define_api::( &Options::default(), ) .expect("Failed to construct the builder API"); - let (pub_key, priv_key) = - ::SignatureKey::generated_from_seed_indexed([1; 32], 0); - let mut app: App = - App::with_state(TestableBuilderSource { priv_key, pub_key }); + let mut app: App = + App::with_state(source); app.register_module("/", builder_api) .expect("Failed to register the builder API"); diff --git a/testing/tests/block_builder.rs b/testing/tests/block_builder.rs index 5146c4534d..8cfb29664f 100644 --- a/testing/tests/block_builder.rs +++ b/testing/tests/block_builder.rs @@ -3,7 +3,7 @@ use hotshot_example_types::{ node_types::TestTypes, }; use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; -use hotshot_testing::block_builder::run_builder; +use hotshot_testing::block_builder::run_random_builder; use hotshot_types::traits::BlockPayload; use hotshot_types::traits::{ block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, @@ -23,7 +23,7 @@ async fn test_block_builder() { let port = portpicker::pick_unused_port().expect("Could not find an open port"); let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); - run_builder(api_url.clone()); + run_random_builder(api_url.clone()); let client: BuilderClient = BuilderClient::new(api_url); assert!(client.connect(Duration::from_millis(100)).await); @@ -34,7 +34,7 @@ async fn test_block_builder() { .await .expect("Failed to get avaliable blocks"); - assert_eq!(blocks.len(), 1); + assert!(!blocks.is_empty()); // Test claiming available block let signature = { From 8e229170523170bfa6104323fa075216f67a5c19 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 21 Mar 2024 13:42:56 -0400 Subject: [PATCH 0884/1393] Temporarily disable block builder test (#2824) --- task-impls/src/builder.rs | 2 +- testing/tests/block_builder.rs | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 92ae382df6..1878b5f3cc 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -97,7 +97,7 @@ where /// # Errors /// - [`BuilderClientError::NotFound`] if blocks aren't available for this parent /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly - pub async fn get_avaliable_blocks( + pub async fn get_available_blocks( &self, parent: VidCommitment, ) -> Result, BuilderClientError> { diff --git a/testing/tests/block_builder.rs b/testing/tests/block_builder.rs index 8cfb29664f..3246cd366c 100644 --- a/testing/tests/block_builder.rs +++ b/testing/tests/block_builder.rs @@ -11,6 +11,7 @@ use hotshot_types::traits::{ use std::time::Duration; use tide_disco::Url; +#[ignore] #[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", @@ -30,9 +31,9 @@ async fn test_block_builder() { // Test getting blocks let mut blocks = client - .get_avaliable_blocks(vid_commitment(&vec![], 1)) + .get_available_blocks(vid_commitment(&vec![], 1)) .await - .expect("Failed to get avaliable blocks"); + .expect("Failed to get available blocks"); assert!(!blocks.is_empty()); From 4782cd73060360af80dc0adee8e733b2ea44ac70 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 21 Mar 2024 15:24:22 -0400 Subject: [PATCH 0885/1393] [CX_CLEANUP] - Change Consensus Task State to Use An Enum Type for Timeout and View Sync Certs (#2810) * incomplete migration to either type * split either type at the end of the proposal * switch to enum type * change type to use combined view change evidence, move type location, update tests * disambiguate messaging, fix tests * do not check stale timeout cert * add additional proposal checks and improve proposal recv check * do not store inbound timeout proposal * fix build for hopefully the last time --- hotshot/src/tasks/task_state.rs | 3 +- task-impls/src/consensus.rs | 135 +++++++++++++++----------------- testing/src/task_helpers.rs | 6 +- testing/src/view_generator.rs | 14 ++-- testing/tests/consensus_task.rs | 92 ++++++++++++---------- types/src/data.rs | 29 +++++-- 6 files changed, 152 insertions(+), 127 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index af639bfc36..e59b5d5a6d 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -189,9 +189,8 @@ impl> CreateTaskState vote_collector: None.into(), timeout_vote_collector: None.into(), timeout_task: None, - timeout_cert: None, upgrade_cert: None, - view_sync_cert: None, + proposal_cert: None, decided_upgrade_cert: None, current_network_version: VERSION_0_1, output_event_stream: handle.hotshot.output_event_stream.0.clone(), diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f877e58daa..a553b70d3b 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -11,16 +11,13 @@ use async_std::task::JoinHandle; use commit::Committable; use core::time::Duration; use hotshot_task::task::{Task, TaskState}; -use hotshot_types::constants::LOOK_AHEAD; use hotshot_types::event::LeafInfo; use hotshot_types::{ consensus::{Consensus, View}, data::{Leaf, QuorumProposal}, event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, - simple_certificate::{ - QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, - }, + simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, traits::{ block_contents::BlockHeader, @@ -37,6 +34,7 @@ use hotshot_types::{ vid::VidCommitment, vote::{Certificate, HasViewNumber}, }; +use hotshot_types::{constants::LOOK_AHEAD, data::ViewChangeEvidence}; use tracing::warn; use versioned_binary_serialization::version::Version; @@ -116,15 +114,11 @@ pub struct ConsensusTaskState< /// timeout task handle pub timeout_task: Option>, - /// last Timeout Certificate this node formed - pub timeout_cert: Option>, - /// last Upgrade Certificate this node formed pub upgrade_cert: Option>, - // TODO: Merge view sync and timeout certs: https://github.com/EspressoSystems/HotShot/issues/2767 - /// last View Sync Certificate this node formed - pub view_sync_cert: Option>, + /// last View Sync Certificate or Timeout Certificate this node formed. + pub proposal_cert: Option>, /// most recent decided upgrade certificate pub decided_upgrade_cert: Option>, @@ -374,8 +368,6 @@ impl, A: ConsensusApi + *proposal.data.view_number ); - debug!("Received proposal {:?}", proposal); - // stop polling for the received proposal self.quorum_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal( @@ -397,31 +389,35 @@ impl, A: ConsensusApi + // Verify a timeout certificate OR a view sync certificate exists and is valid. if proposal.data.justify_qc.get_view_number() != view - 1 { - // Do we have a timeout certificate at all? - if let Some(timeout_cert) = proposal.data.timeout_certificate.clone() { - if timeout_cert.get_data().view != view - 1 { - warn!("Timeout certificate for view {} was not for the immediately preceding view", *view); - return; - } + if let Some(received_proposal_cert) = proposal.data.proposal_certificate.clone() + { + match received_proposal_cert { + ViewChangeEvidence::Timeout(timeout_cert) => { + if timeout_cert.get_data().view != view - 1 { + warn!("Timeout certificate for view {} was not for the immediately preceding view", *view); + return; + } - if !timeout_cert.is_valid_cert(self.timeout_membership.as_ref()) { - warn!("Timeout certificate for view {} was invalid", *view); - return; - } - } else if let Some(cert) = &self.view_sync_cert { - // View sync certs _must_ be for the current view. - if cert.view_number != view { - debug!( - "Cert view number {:?} does not match proposal view number {:?}", - cert.view_number, view - ); - return; - } + if !timeout_cert.is_valid_cert(self.timeout_membership.as_ref()) { + warn!("Timeout certificate for view {} was invalid", *view); + return; + } + } + ViewChangeEvidence::ViewSync(view_sync_cert) => { + if view_sync_cert.view_number != view { + debug!( + "Cert view number {:?} does not match proposal view number {:?}", + view_sync_cert.view_number, view + ); + return; + } - // View sync certs must also be valid. - if !cert.is_valid_cert(self.quorum_membership.as_ref()) { - debug!("Invalid ViewSyncFinalize cert provided"); - return; + // View sync certs must also be valid. + if !view_sync_cert.is_valid_cert(self.quorum_membership.as_ref()) { + debug!("Invalid ViewSyncFinalize cert provided"); + return; + } + } } } else { warn!( @@ -429,8 +425,6 @@ impl, A: ConsensusApi + *view); return; }; - - // If we have a ViewSyncFinalize cert, only vote if it is valid. } let justify_qc = proposal.data.justify_qc.clone(); @@ -583,7 +577,7 @@ impl, A: ConsensusApi + "Attempting to publish proposal after voting; now in view: {}", *new_view ); - self.publish_proposal_if_able(qc.view_number + 1, None, &event_stream) + self.publish_proposal_if_able(qc.view_number + 1, &event_stream) .await; } if self.vote_if_able(&event_stream).await { @@ -852,7 +846,7 @@ impl, A: ConsensusApi + "Attempting to publish proposal after voting; now in view: {}", *new_view ); - self.publish_proposal_if_able(qc.view_number + 1, None, &event_stream) + self.publish_proposal_if_able(qc.view_number + 1, &event_stream) .await; } @@ -958,7 +952,7 @@ impl, A: ConsensusApi + debug!("QC Formed event happened!"); if let either::Right(qc) = cert.clone() { - self.timeout_cert = Some(qc.clone()); + self.proposal_cert = Some(ViewChangeEvidence::Timeout(qc.clone())); // cancel poll for votes self.quorum_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( @@ -973,11 +967,7 @@ impl, A: ConsensusApi + let view = qc.view_number + 1; - if self - .publish_proposal_if_able(view, Some(qc.clone()), &event_stream) - .await - { - } else { + if !self.publish_proposal_if_able(view, &event_stream).await { warn!("Wasn't able to publish proposal"); } } @@ -999,7 +989,7 @@ impl, A: ConsensusApi + ); if !self - .publish_proposal_if_able(qc.view_number + 1, None, &event_stream) + .publish_proposal_if_able(qc.view_number + 1, &event_stream) .await { debug!( @@ -1206,25 +1196,25 @@ impl, A: ConsensusApi + if self.quorum_membership.get_leader(view) == self.public_key && self.consensus.read().await.high_qc.get_view_number() + 1 == view { - self.publish_proposal_if_able(view, None, &event_stream) - .await; + self.publish_proposal_if_able(view, &event_stream).await; } - if let Some(tc) = self.timeout_cert.as_ref() { - if self.quorum_membership.get_leader(tc.get_view_number() + 1) - == self.public_key - { - self.publish_proposal_if_able( - view, - self.timeout_cert.clone(), - &event_stream, - ) - .await; - } - } else if let Some(vsc) = self.view_sync_cert.as_ref() { - if self.quorum_membership.get_leader(vsc.get_view_number()) == self.public_key { - self.publish_proposal_if_able(view, None, &event_stream) - .await; + if let Some(cert) = &self.proposal_cert { + match cert { + ViewChangeEvidence::Timeout(tc) => { + if self.quorum_membership.get_leader(tc.get_view_number() + 1) + == self.public_key + { + self.publish_proposal_if_able(view, &event_stream).await; + } + } + ViewChangeEvidence::ViewSync(vsc) => { + if self.quorum_membership.get_leader(vsc.get_view_number()) + == self.public_key + { + self.publish_proposal_if_able(view, &event_stream).await; + } + } } } } @@ -1237,7 +1227,7 @@ impl, A: ConsensusApi + return; } - self.view_sync_cert = Some(certificate.clone()); + self.proposal_cert = Some(ViewChangeEvidence::ViewSync(certificate.clone())); // cancel poll for votes self.quorum_network @@ -1253,8 +1243,7 @@ impl, A: ConsensusApi + "Attempting to publish proposal after forming a View Sync Finalized Cert for view {}", *certificate.view_number ); - self.publish_proposal_if_able(view, None, &event_stream) - .await; + self.publish_proposal_if_able(view, &event_stream).await; } } _ => {} @@ -1266,7 +1255,6 @@ impl, A: ConsensusApi + pub async fn publish_proposal_if_able( &mut self, view: TYPES::Time, - timeout_certificate: Option>, event_stream: &Sender>>, ) -> bool { if self.quorum_membership.get_leader(view) != self.public_key { @@ -1373,19 +1361,24 @@ impl, A: ConsensusApi + None }; + // We only want to proposal to be attached if any of them are valid. + let proposal_certificate = self + .proposal_cert + .as_ref() + .filter(|cert| cert.is_valid_for_view(&view)) + .cloned(); + // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. let proposal = QuorumProposal { block_header, view_number: leaf.view_number, justify_qc: consensus.high_qc.clone(), - timeout_certificate: timeout_certificate.or_else(|| None), + proposal_certificate, upgrade_certificate: upgrade_cert, - view_sync_certificate: self.view_sync_cert.clone(), proposer_id: leaf.proposer_id, }; - self.timeout_cert = None; - self.view_sync_cert = None; + self.proposal_cert = None; let message = Proposal { data: proposal, signature, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 2ae275eff2..24ff21f9eb 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -266,9 +266,8 @@ async fn build_quorum_proposal_and_signature( block_header: block_header.clone(), view_number: ViewNumber::new(1), justify_qc: QuorumCertificate::genesis(), - timeout_certificate: None, upgrade_certificate: None, - view_sync_certificate: None, + proposal_certificate: None, proposer_id: leaf.proposer_id, }; @@ -326,9 +325,8 @@ async fn build_quorum_proposal_and_signature( block_header: block_header.clone(), view_number: ViewNumber::new(cur_view), justify_qc: created_qc, - timeout_certificate: None, upgrade_certificate: None, - view_sync_certificate: None, + proposal_certificate: None, proposer_id: leaf_new_view.clone().proposer_id, }; proposal = proposal_new_view; diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 799af75a11..612ff5ae31 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -15,7 +15,7 @@ use commit::Committable; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_types::{ - data::{DAProposal, Leaf, QuorumProposal, VidDisperseShare, ViewNumber}, + data::{DAProposal, Leaf, QuorumProposal, VidDisperseShare, ViewChangeEvidence, ViewNumber}, message::Proposal, simple_certificate::{ DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, @@ -90,9 +90,8 @@ impl TestView { block_header: block_header.clone(), view_number: genesis_view, justify_qc: QuorumCertificate::genesis(), - timeout_certificate: None, upgrade_certificate: None, - view_sync_certificate: None, + proposal_certificate: None, proposer_id: public_key, }; @@ -266,6 +265,12 @@ impl TestView { None }; + let proposal_certificate = if let Some(tc) = timeout_certificate { + Some(ViewChangeEvidence::Timeout(tc)) + } else { + view_sync_certificate.map(ViewChangeEvidence::ViewSync) + }; + let block_header = TestBlockHeader { block_number: *next_view, timestamp: *next_view, @@ -292,9 +297,8 @@ impl TestView { block_header: block_header.clone(), view_number: next_view, justify_qc: quorum_certificate.clone(), - timeout_certificate, upgrade_certificate, - view_sync_certificate, + proposal_certificate, proposer_id: public_key, }; diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 68f917326a..bc9a70758a 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -5,13 +5,18 @@ use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*} use hotshot_testing::task_helpers::key_pair_for_id; use hotshot_testing::test_helpers::permute_input_with_index_order; use hotshot_testing::{ - predicates::{exact, is_at_view_number, quorum_proposal_send, quorum_vote_send}, + predicates::{ + exact, is_at_view_number, quorum_proposal_send, quorum_vote_send, timeout_vote_send, + }, script::{run_test_script, TestScriptStage}, task_helpers::{build_system_handle, vid_scheme_from_view_number}, view_generator::TestViewGenerator, }; -use hotshot_types::simple_vote::ViewSyncFinalizeData; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use hotshot_types::simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}; +use hotshot_types::{ + data::{ViewChangeEvidence, ViewNumber}, + traits::node_implementation::ConsensusTime, +}; use jf_primitives::vid::VidScheme; #[cfg(test)] @@ -243,16 +248,6 @@ async fn test_consensus_vote_with_permuted_dac() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_finalize_propose() { - use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; - use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; - use hotshot_testing::{ - predicates::{exact, is_at_view_number, quorum_proposal_send, timeout_vote_send}, - script::{run_test_script, TestScriptStage}, - task_helpers::{build_system_handle, vid_scheme_from_view_number}, - view_generator::TestViewGenerator, - }; - use hotshot_types::simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -324,7 +319,10 @@ async fn test_view_sync_finalize_propose() { }; // Handle the view sync finalize cert, get the requisite data, propose. - let cert = proposals[1].data.view_sync_certificate.clone().unwrap(); + let cert = match proposals[1].data.proposal_certificate.clone().unwrap() { + ViewChangeEvidence::ViewSync(vsc) => vsc, + _ => panic!("Found a TC when there should have been a view sync cert"), + }; // Generate the timeout votes for the timeouts that just occurred. let timeout_vote_view_2 = TimeoutVote::create_signed_vote( @@ -448,7 +446,10 @@ async fn test_view_sync_finalize_vote() { // assumption. // Try to view sync at view 4. - let view_sync_cert = proposals[3].data.view_sync_certificate.clone().unwrap(); + let cert = match proposals[3].data.proposal_certificate.clone().unwrap() { + ViewChangeEvidence::ViewSync(vsc) => vsc, + _ => panic!("Found a TC when there should have been a view sync cert"), + }; // Highest qc so far is actually from view 1, so re-assign proposal 0 to the slot of proposal // 3. @@ -458,7 +459,7 @@ async fn test_view_sync_finalize_vote() { let view_3 = TestScriptStage { inputs: vec![ // Multiple timeouts in a row, so we call for a view sync - ViewSyncFinalizeCertificate2Recv(view_sync_cert), + ViewSyncFinalizeCertificate2Recv(cert), // Receive a proposal for view 4, but with the highest qc being from view 1. QuorumProposalRecv(proposals[0].clone(), leaders[0]), ], @@ -488,6 +489,8 @@ async fn test_view_sync_finalize_vote() { /// Makes sure that, when a valid ViewSyncFinalize certificate is available, the consensus task /// will NOT vote when the certificate matches a different view number. async fn test_view_sync_finalize_vote_fail_view_number() { + use hotshot_testing::predicates::timeout_vote_send; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -495,7 +498,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let view_sync_finalize_data: ViewSyncFinalizeData = ViewSyncFinalizeData { - relay: 10, + relay: 4, round: ViewNumber::new(10), }; @@ -505,7 +508,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { let mut votes = Vec::new(); let mut vids = Vec::new(); let mut dacs = Vec::new(); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(3) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); @@ -539,39 +542,48 @@ async fn test_view_sync_finalize_vote_fail_view_number() { }; let view_2 = TestScriptStage { - inputs: vec![ - VidDisperseRecv(vids[1].0.clone()), - QuorumProposalRecv(proposals[1].clone(), leaders[1]), - DACRecv(dacs[1].clone()), - ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(2))), - exact(QuorumProposalValidated(proposals[1].data.clone())), - exact(QuorumVoteSend(votes[1].clone())), - ], - asserts: vec![is_at_view_number(2)], + inputs: vec![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], + outputs: vec![timeout_vote_send(), timeout_vote_send()], + // Times out, so we now have a delayed view + asserts: vec![is_at_view_number(1)], }; - let mut cert = proposals[2].data.view_sync_certificate.clone().unwrap(); + // Now we're on the latest view. We want to set the quorum + // certificate to be the previous highest QC (before the timeouts). This will be distinct from + // the view sync cert, which is saying "hey, I'm _actually_ at view 4, but my highest QC is + // only for view 1." This forces the QC to be for view 1, and we can move on under this + // assumption. - // Trigger the timeout cert check - proposals[2].data.justify_qc.view_number = ViewNumber::new(1); + let mut cert = match proposals[3].data.proposal_certificate.clone().unwrap() { + ViewChangeEvidence::ViewSync(vsc) => vsc, + _ => panic!("Found a TC when there should have been a view sync cert"), + }; - // Overwrite the cert view number with something invalid to force the failure. This should - // result in the vote NOT being sent below in the outputs. + // Force this to fail by making the cert happen for a view we've never seen. This will + // intentionally skip the proposal for this node so we can get the proposal and fail to vote. cert.view_number = ViewNumber::new(10); + + // Highest qc so far is actually from view 1, so re-assign proposal 0 to the slot of proposal + // 3. + proposals[0].data.proposer_id = proposals[3].data.proposer_id; + + // We introduce an error by setting a different view number as well, this makes the task check + // for a view sync or timeout cert. This value could be anything as long as it is not the + // previous view number. + proposals[0].data.justify_qc.view_number = proposals[3].data.justify_qc.view_number; + + // Now at view 3 we receive the proposal received response. let view_3 = TestScriptStage { inputs: vec![ + // Multiple timeouts in a row, so we call for a view sync ViewSyncFinalizeCertificate2Recv(cert), - QuorumProposalRecv(proposals[2].clone(), leaders[2]), - VidDisperseRecv(vids[2].0.clone()), - DACRecv(dacs[2].clone()), + // Receive a proposal for view 4, but with the highest qc being from view 1. + QuorumProposalRecv(proposals[0].clone(), leaders[0]), ], outputs: vec![ - /* The entire thing dies */ + /* No outputs make it through. We never got a valid proposal, so we never vote */ ], - // We are unable to move to the next view. - asserts: vec![is_at_view_number(2)], + asserts: vec![is_at_view_number(1)], }; let consensus_state = ConsensusTaskState::< diff --git a/types/src/data.rs b/types/src/data.rs index 9b88f4db92..1b287e2122 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -173,6 +173,26 @@ impl VidDisperse { } } +/// Helper type to encapsulate the various ways that proposal certificates can be captured and +/// stored. +#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound(deserialize = ""))] +pub enum ViewChangeEvidence { + /// Holds a timeout certificate. + Timeout(TimeoutCertificate), + /// Holds a view sync finalized certificate. + ViewSync(ViewSyncFinalizeCertificate2), +} + +impl ViewChangeEvidence { + pub fn is_valid_for_view(&self, view: &TYPES::Time) -> bool { + match self { + ViewChangeEvidence::Timeout(timeout_cert) => timeout_cert.get_data().view == *view - 1, + ViewChangeEvidence::ViewSync(view_sync_cert) => view_sync_cert.view_number == *view, + } + } +} + #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct VidDisperseShare { /// The view number for which this VID data is intended @@ -282,15 +302,14 @@ pub struct QuorumProposal { /// Per spec, justification pub justify_qc: QuorumCertificate, - /// Possible timeout certificate. Only present if the justify_qc is not for the preceding view - pub timeout_certificate: Option>, - /// Possible upgrade certificate, which the leader may optionally attach. pub upgrade_certificate: Option>, - /// Possible view sync certificate. Only present if the justify_qc and timeout_cert are not + /// Possible timeout or view sync certificate. + /// - A timeout certificate is only present if the justify_qc is not for the preceding view + /// - A view sync certificate is only present if the justify_qc and timeout_cert are not /// present. - pub view_sync_certificate: Option>, + pub proposal_certificate: Option>, /// the propser id pub proposer_id: TYPES::SignatureKey, From e15cc228f911bcb39f03cf9918c76568af70dec6 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:39:52 -0400 Subject: [PATCH 0886/1393] [Push CDN] Add whitelist update binary (#2815) * update pcdn, add whitelist adapter * remove capnproto dependency * remove self hosted ARM * add self-hosted arm back * use other orchestrator function --- examples/Cargo.toml | 6 +- examples/push-cdn/all.rs | 18 ++-- examples/push-cdn/broker.rs | 11 +-- examples/push-cdn/marshal.rs | 8 +- examples/push-cdn/types.rs | 3 +- examples/push-cdn/whitelist-adapter.rs | 91 +++++++++++++++++++ hotshot/Cargo.toml | 3 +- hotshot/src/traits.rs | 2 +- .../src/traits/networking/push_cdn_network.rs | 52 +++++++++-- orchestrator/run-config.toml | 9 +- orchestrator/src/client.rs | 37 ++++++-- orchestrator/src/lib.rs | 2 +- 12 files changed, 186 insertions(+), 56 deletions(-) create mode 100644 examples/push-cdn/whitelist-adapter.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index fb21ed3a64..0ca6d4cd82 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -96,6 +96,10 @@ path = "push-cdn/broker.rs" name = "marshal-push-cdn" path = "push-cdn/marshal.rs" +[[example]] +name = "whitelist-push-cdn" +path = "push-cdn/whitelist-adapter.rs" + [dependencies] async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } @@ -137,7 +141,6 @@ cdn-broker = { workspace = true, features = [ "insecure", "runtime-tokio", "strong_consistency", - "local_discovery", ] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } @@ -148,7 +151,6 @@ cdn-broker = { workspace = true, features = [ "insecure", "runtime-async-std", "strong_consistency", - "local_discovery", ] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-async-std"] } diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index e513319947..0f9b7e09f3 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -5,11 +5,10 @@ pub mod types; use crate::infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; use async_compatibility_layer::art::async_spawn; -use cdn_broker::reexports::connection::protocols::{Quic, Tcp}; use cdn_broker::reexports::crypto::signature::KeyPair; use cdn_broker::Broker; use cdn_marshal::Marshal; -use hotshot::traits::implementations::WrappedSignatureKey; +use hotshot::traits::implementations::{TestingDef, WrappedSignatureKey}; use hotshot::types::SignatureKey; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; @@ -79,12 +78,8 @@ async fn main() { // Create and spawn the broker async_spawn(async move { - let broker: Broker< - WrappedSignatureKey<::SignatureKey>, - WrappedSignatureKey<::SignatureKey>, - Tcp, - Quic, - > = Broker::new(config).await.expect("broker failed to start"); + let broker: Broker> = + Broker::new(config).await.expect("broker failed to start"); // Error if we stopped unexpectedly if let Err(err) = broker.start().await { @@ -106,10 +101,9 @@ async fn main() { // Spawn the marshal async_spawn(async move { - let marshal: Marshal::SignatureKey>, Quic> = - Marshal::new(marshal_config) - .await - .expect("failed to spawn marshal"); + let marshal: Marshal> = Marshal::new(marshal_config) + .await + .expect("failed to spawn marshal"); // Error if we stopped unexpectedly if let Err(err) = marshal.start().await { diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index 2c545b1ba4..cc83d7ea2b 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -2,10 +2,9 @@ //! a `Broker` object. use anyhow::{Context, Result}; -use cdn_broker::reexports::connection::protocols::{Quic, Tcp}; use cdn_broker::{reexports::crypto::signature::KeyPair, Broker, Config, ConfigBuilder}; use clap::Parser; -use hotshot::traits::implementations::WrappedSignatureKey; +use hotshot::traits::implementations::{ProductionDef, WrappedSignatureKey}; use hotshot::types::SignatureKey; use hotshot_example_types::node_types::TestTypes; use hotshot_types::traits::node_implementation::NodeType; @@ -80,13 +79,7 @@ async fn main() -> Result<()> { // Create new `Broker` // Uses TCP from broker connections and Quic for user connections. - let broker = Broker::< - WrappedSignatureKey<::SignatureKey>, - WrappedSignatureKey<::SignatureKey>, - Tcp, - Quic, - >::new(broker_config) - .await?; + let broker = Broker::>::new(broker_config).await?; // Start the main loop, consuming it broker.start().await?; diff --git a/examples/push-cdn/marshal.rs b/examples/push-cdn/marshal.rs index 22472abe09..6150542a78 100644 --- a/examples/push-cdn/marshal.rs +++ b/examples/push-cdn/marshal.rs @@ -2,12 +2,10 @@ //! a `Marshal` object with the `HotShot` types. //! use anyhow::{Context, Result}; -use cdn_broker::reexports::connection::protocols::Quic; use cdn_marshal::{ConfigBuilder, Marshal}; use clap::Parser; -use hotshot::traits::implementations::WrappedSignatureKey; +use hotshot::traits::implementations::ProductionDef; use hotshot_example_types::node_types::TestTypes; -use hotshot_types::traits::node_implementation::NodeType; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] @@ -41,9 +39,7 @@ async fn main() -> Result<()> { .with_context(|| "failed to build Marshal config")?; // Create new `Marshal` from the config - let marshal = - Marshal::::SignatureKey>, Quic>::new(config) - .await?; + let marshal = Marshal::>::new(config).await?; // Start the main loop, consuming it marshal.start().await?; diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs index 404b3f0b4b..5f488d740c 100644 --- a/examples/push-cdn/types.rs +++ b/examples/push-cdn/types.rs @@ -1,7 +1,6 @@ use crate::infra::PushCdnDaRun; -use hotshot::traits::implementations::PushCdnNetwork; +use hotshot::traits::{implementations::PushCdnNetwork, NodeImplementation}; use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; -use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; #[derive(Clone, Deserialize, Serialize, Hash, PartialEq, Eq)] diff --git a/examples/push-cdn/whitelist-adapter.rs b/examples/push-cdn/whitelist-adapter.rs new file mode 100644 index 0000000000..9ecd134b96 --- /dev/null +++ b/examples/push-cdn/whitelist-adapter.rs @@ -0,0 +1,91 @@ +//! The whitelist is an adaptor that is able to update the allowed public keys for +//! all brokers. Right now, we do this by asking the orchestrator for the list of +//! allowed public keys. In the future, we will pull the stake table from the L1. + +use std::str::FromStr; +use std::sync::Arc; + +use anyhow::Context; +use anyhow::Result; +use cdn_broker::reexports::discovery::DiscoveryClient; +use cdn_broker::reexports::discovery::{Embedded, Redis}; +use clap::Parser; +use hotshot_example_types::node_types::TestTypes; +use hotshot_orchestrator::client::OrchestratorClient; +use hotshot_orchestrator::client::ValidatorArgs; +use hotshot_orchestrator::config::NetworkConfig; +use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::traits::signature_key::SignatureKey; +use surf_disco::Url; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +/// The main component of the push CDN. +struct Args { + /// The discovery client endpoint (including scheme) to connect to. + /// With the local discovery feature, this is a file path. + /// With the remote (redis) discovery feature, this is a redis URL (e.g. `redis://127.0.0.1:6789`). + #[arg(short, long)] + discovery_endpoint: String, + + /// The URL the orchestrator is running on. This should be something like `http://localhost:5555` + #[arg(short, long)] + orchestrator_url: String, + + /// Whether or not to use the local discovery client + #[arg(short, long)] + local_discovery: bool, +} + +#[cfg_attr(async_executor_impl = "tokio", tokio::main)] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +async fn main() -> Result<()> { + // Parse the command line arguments + let args = Args::parse(); + + // Initialize tracing + tracing_subscriber::fmt::init(); + + // Create a new `OrchestratorClient` from the supplied URL + let orchestrator_client = OrchestratorClient::new( + ValidatorArgs { + url: Url::from_str(&args.orchestrator_url).with_context(|| "Invalid URL")?, + public_ip: None, + network_config_file: None, + }, + "whitelist-adapter".to_string(), + ); + + // Attempt to get the config from the orchestrator. + // Loops internally until the config is received. + let config: NetworkConfig< + ::SignatureKey, + ::ElectionConfigType, + > = orchestrator_client.get_config_after_collection().await; + + tracing::info!("Received config from orchestrator"); + + // Extrapolate the state_ver_keys from the config and convert them to a compatible format + let whitelist = config + .config + .known_nodes_with_stake + .iter() + .map(|k| Arc::from(k.stake_table_entry.stake_key.to_bytes())) + .collect(); + + if args.local_discovery { + ::new(args.discovery_endpoint, None) + .await? + .set_whitelist(whitelist) + .await?; + } else { + ::new(args.discovery_endpoint, None) + .await? + .set_whitelist(whitelist) + .await?; + } + + tracing::info!("Posted config to discovery endpoint"); + + Ok(()) +} diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 21e1d81e92..ea4fbd6d76 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -51,6 +51,7 @@ surf-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } versioned-binary-serialization = { workspace = true } +jf-primitives.workspace = true [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } @@ -59,7 +60,6 @@ cdn-broker = { workspace = true, features = [ "insecure", "runtime-tokio", "strong_consistency", - "local_discovery", ] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } @@ -70,7 +70,6 @@ cdn-broker = { workspace = true, features = [ "insecure", "runtime-async-std", "strong_consistency", - "local_discovery", ] } cdn-marshal = { workspace = true, features = ["insecure", "runtime-async-std"] } diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index c4d493acb4..d37ed8938f 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -13,7 +13,7 @@ pub mod implementations { combined_network::{CombinedNetworks, UnderlyingCombinedNetworks}, libp2p_network::{Libp2pNetwork, PeerInfoVec}, memory_network::{MasterMap, MemoryNetwork}, - push_cdn_network::{PushCdnNetwork, WrappedSignatureKey}, + push_cdn_network::{ProductionDef, PushCdnNetwork, TestingDef, WrappedSignatureKey}, web_server_network::WebServerNetwork, NetworkingMetricsValue, }; diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 8933583439..fcc254e511 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -3,6 +3,8 @@ use async_compatibility_layer::art::{async_block_on, async_spawn}; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; use bincode::config::Options; +use cdn_broker::reexports::def::RunDef; +use cdn_broker::reexports::discovery::{Embedded, Redis}; use cdn_broker::{ reexports::connection::protocols::Tcp, Broker, Config, ConfigBuilder as BrokerConfigBuilder, }; @@ -34,6 +36,7 @@ use hotshot_types::{ }; use rand::rngs::StdRng; use rand::{RngCore, SeedableRng}; +use std::marker::PhantomData; use std::{collections::BTreeSet, path::Path, sync::Arc, time::Duration}; use tracing::{error, warn}; use versioned_binary_serialization::{ @@ -81,6 +84,40 @@ impl Serializable for WrappedSignatureKey { } } +/// The testing run definition for the Push CDN. +/// Uses the real protocols, but with an embedded discovery client. +pub struct TestingDef { + /// Phantom data to hold the type + pd: PhantomData, +} + +impl RunDef for TestingDef { + type BrokerScheme = WrappedSignatureKey; + type BrokerProtocol = Tcp; + + type UserScheme = WrappedSignatureKey; + type UserProtocol = Quic; + + type DiscoveryClientType = Embedded; +} + +/// The production run definition for the Push CDN. +/// Uses the real protocols and a Redis discovery client. +pub struct ProductionDef { + /// Phantom data to hold the type + pd: PhantomData, +} + +impl RunDef for ProductionDef { + type BrokerScheme = WrappedSignatureKey; + type BrokerProtocol = Tcp; + + type UserScheme = WrappedSignatureKey; + type UserProtocol = Quic; + + type DiscoveryClientType = Redis; +} + /// A communication channel to the Push CDN, which is a collection of brokers and a marshal /// that helps organize them all. #[derive(Clone)] @@ -211,12 +248,8 @@ impl TestableNetworkingImplementation for PushCdnNetwork // Create and spawn the broker async_spawn(async move { - let broker: Broker< - WrappedSignatureKey, - WrappedSignatureKey, - Tcp, - Quic, - > = Broker::new(config).await.expect("broker failed to start"); + let broker: Broker> = + Broker::new(config).await.expect("broker failed to start"); // Error if we stopped unexpectedly if let Err(err) = broker.start().await { @@ -238,10 +271,9 @@ impl TestableNetworkingImplementation for PushCdnNetwork // Spawn the marshal async_spawn(async move { - let marshal: Marshal, Quic> = - Marshal::new(marshal_config) - .await - .expect("failed to spawn marshal"); + let marshal: Marshal> = Marshal::new(marshal_config) + .await + .expect("failed to spawn marshal"); // Error if we stopped unexpectedly if let Err(err) = marshal.start().await { diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 51197939c4..905184591d 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -40,9 +40,9 @@ start_delay_seconds = 0 cdn_marshal_address = "127.0.0.1:9000" [config] -num_nodes_with_stake = 100 +num_nodes_with_stake = 10 num_nodes_without_stake = 0 -staked_committee_nodes = 100 +staked_committee_nodes = 10 non_staked_committee_nodes = 0 max_transactions = 1 min_transactions = 1 @@ -83,6 +83,11 @@ nanos = 100000000 # 10 ms secs = 0 nanos = 100000000 # 10 ms + +[config.data_request_delay] +secs = 0 +nanos = 2000000000 # 200 ms + [config.propose_min_round_time] secs = 0 nanos = 0 diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 7c36bf2a36..bac6d08af9 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -185,9 +185,10 @@ impl OrchestratorClient { OrchestratorClient { client, identity } } - /// Sends an identify message to the orchestrator and attempts to get its config - /// Returns both the `node_index` and the run configuration without peer's public config from the orchestrator - /// Will block until both are returned + /// Get the config from the orchestrator. + /// If the identity is provided, register the identity with the orchestrator. + /// If not, just retrieving the config (for passive observers) + /// /// # Panics /// if unable to convert the node index from usize into u64 /// (only applicable on 32 bit systems) @@ -244,6 +245,29 @@ impl OrchestratorClient { self.wait_for_fn_from_orchestrator(cur_node_index).await } + /// Requests the configuration from the orchestrator with the stipulation that + /// a successful call requires all nodes to be registered. + /// + /// Does not fail, retries internally until success. + pub async fn get_config_after_collection( + &self, + ) -> NetworkConfig { + // Define the request for post-register configurations + let get_config_after_collection = |client: Client| { + async move { + client + .get("api/get_config_after_peer_collected") + .send() + .await + } + .boxed() + }; + + // Loop until successful + self.wait_for_fn_from_orchestrator(get_config_after_collection) + .await + } + /// Sends my public key to the orchestrator so that it can collect all public keys /// And get the updated config /// Blocks until the orchestrator collects all peer's public keys/configs @@ -270,12 +294,7 @@ impl OrchestratorClient { self.wait_for_fn_from_orchestrator::<_, _, ()>(wait_for_all_nodes_pub_key) .await; - // get the newest updated config - self.client - .get("api/get_config_after_peer_collected") - .send() - .await - .expect("Unable to get the updated config") + self.get_config_after_collection().await } /// Tells the orchestrator this validator is ready to start diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 5424bc2864..03b8ea98b5 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -267,7 +267,7 @@ where self.pub_posted.insert(node_index); // The guess is the first extra 8 bytes are from orchestrator serialization - pubkey.drain(..8); + pubkey.drain(..12); let register_pub_key_with_stake = PeerConfig::::from_bytes(pubkey).unwrap(); self.config.config.known_nodes_with_stake[node_index as usize] = register_pub_key_with_stake; From 81e4f736d23572a9ee201581c47ec10b3f3cf389 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 22 Mar 2024 10:33:48 -0400 Subject: [PATCH 0887/1393] remove proposal_id (#2828) --- task-impls/src/consensus.rs | 5 ----- testing/src/task_helpers.rs | 6 ------ testing/src/view_generator.rs | 6 ------ testing/tests/consensus_task.rs | 8 -------- types/src/data.rs | 12 ------------ 5 files changed, 37 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index a553b70d3b..6362aa148d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -201,7 +201,6 @@ impl, A: ConsensusApi + parent_commitment, block_header: proposal.block_header.clone(), block_payload: None, - proposer_id: self.quorum_membership.get_leader(view), }; // Validate the DAC. @@ -533,7 +532,6 @@ impl, A: ConsensusApi + parent_commitment: justify_qc.get_data().leaf_commit, block_header: proposal.data.block_header.clone(), block_payload: None, - proposer_id: sender, }; let state = Arc::new( >::from_header( @@ -608,7 +606,6 @@ impl, A: ConsensusApi + parent_commitment, block_header: proposal.data.block_header.clone(), block_payload: None, - proposer_id: sender.clone(), }; let leaf_commitment = leaf.commit(); @@ -1336,7 +1333,6 @@ impl, A: ConsensusApi + parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), block_payload: None, - proposer_id: self.api.public_key().clone(), }; let Ok(signature) = @@ -1375,7 +1371,6 @@ impl, A: ConsensusApi + justify_qc: consensus.high_qc.clone(), proposal_certificate, upgrade_certificate: upgrade_cert, - proposer_id: leaf.proposer_id, }; self.proposal_cert = None; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 24ff21f9eb..67273199c7 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -257,7 +257,6 @@ async fn build_quorum_proposal_and_signature( parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), block_payload: None, - proposer_id: *handle.public_key(), }; let mut signature = ::sign(private_key, leaf.commit().as_ref()) @@ -268,7 +267,6 @@ async fn build_quorum_proposal_and_signature( justify_qc: QuorumCertificate::genesis(), upgrade_certificate: None, proposal_certificate: None, - proposer_id: leaf.proposer_id, }; // Only view 2 is tested, higher views are not tested @@ -316,7 +314,6 @@ async fn build_quorum_proposal_and_signature( parent_commitment: parent_leaf.commit(), block_header: block_header.clone(), block_payload: None, - proposer_id: quorum_membership.get_leader(ViewNumber::new(cur_view)), }; let signature_new_view = ::sign(private_key, leaf_new_view.commit().as_ref()) @@ -327,7 +324,6 @@ async fn build_quorum_proposal_and_signature( justify_qc: created_qc, upgrade_certificate: None, proposal_certificate: None, - proposer_id: leaf_new_view.clone().proposer_id, }; proposal = proposal_new_view; signature = signature_new_view; @@ -449,7 +445,6 @@ pub async fn build_vote( ) -> GeneralConsensusMessage { let consensus_lock = handle.get_consensus(); let consensus = consensus_lock.read().await; - let membership = handle.hotshot.memberships.quorum_membership.clone(); let justify_qc = proposal.justify_qc.clone(); let view = ViewNumber::new(*proposal.view_number); @@ -480,7 +475,6 @@ pub async fn build_vote( parent_commitment, block_header: proposal.block_header, block_payload: None, - proposer_id: membership.get_leader(view), }; let vote = QuorumVote::::create_signed_vote( QuorumData { diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 612ff5ae31..2e9c904c28 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -92,7 +92,6 @@ impl TestView { justify_qc: QuorumCertificate::genesis(), upgrade_certificate: None, proposal_certificate: None, - proposer_id: public_key, }; let transactions = vec![TestTransaction(vec![0])]; @@ -123,8 +122,6 @@ impl TestView { block_payload: Some(TestBlockPayload { transactions: transactions.clone(), }), - // Note: this field is not relevant in calculating the leaf commitment. - proposer_id: public_key, }; let signature = ::sign(&private_key, leaf.commit().as_ref()) @@ -286,8 +283,6 @@ impl TestView { block_payload: Some(TestBlockPayload { transactions: transactions.clone(), }), - // Note: this field is not relevant in calculating the leaf commitment. - proposer_id: public_key, }; let signature = ::sign(&private_key, leaf.commit().as_ref()) @@ -299,7 +294,6 @@ impl TestView { justify_qc: quorum_certificate.clone(), upgrade_certificate, proposal_certificate, - proposer_id: public_key, }; let quorum_proposal = Proposal { diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index bc9a70758a..9d83cc9247 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -451,10 +451,6 @@ async fn test_view_sync_finalize_vote() { _ => panic!("Found a TC when there should have been a view sync cert"), }; - // Highest qc so far is actually from view 1, so re-assign proposal 0 to the slot of proposal - // 3. - proposals[0].data.proposer_id = proposals[3].data.proposer_id; - // Now at view 3 we receive the proposal received response. let view_3 = TestScriptStage { inputs: vec![ @@ -563,10 +559,6 @@ async fn test_view_sync_finalize_vote_fail_view_number() { // intentionally skip the proposal for this node so we can get the proposal and fail to vote. cert.view_number = ViewNumber::new(10); - // Highest qc so far is actually from view 1, so re-assign proposal 0 to the slot of proposal - // 3. - proposals[0].data.proposer_id = proposals[3].data.proposer_id; - // We introduce an error by setting a different view number as well, this makes the task check // for a view sync or timeout cert. This value could be anything as long as it is not the // previous view number. diff --git a/types/src/data.rs b/types/src/data.rs index 1b287e2122..cf330b6b56 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -310,9 +310,6 @@ pub struct QuorumProposal { /// - A view sync certificate is only present if the justify_qc and timeout_cert are not /// present. pub proposal_certificate: Option>, - - /// the propser id - pub proposer_id: TYPES::SignatureKey, } impl HasViewNumber for DAProposal { @@ -392,9 +389,6 @@ pub struct Leaf { /// /// It may be empty for nodes not in the DA committee. pub block_payload: Option, - - /// the proposer id of the leaf - pub proposer_id: TYPES::SignatureKey, } impl PartialEq for Leaf { @@ -450,7 +444,6 @@ impl Leaf { parent_commitment: fake_commitment(), block_header: block_header.clone(), block_payload: Some(payload), - proposer_id: <::SignatureKey as SignatureKey>::genesis_proposer_pk(), } } @@ -516,11 +509,6 @@ impl Leaf { pub fn get_payload_commitment(&self) -> VidCommitment { self.get_block_header().payload_commitment() } - - /// Identity of the network participant who proposed this leaf. - pub fn get_proposer_id(&self) -> TYPES::SignatureKey { - self.proposer_id.clone() - } } impl TestableLeaf for Leaf From 037bc5f2438a6960e03b553e370716c596ff44e6 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 22 Mar 2024 10:15:21 -0700 Subject: [PATCH 0888/1393] [Auto Bench] Scripts of Running Benchmarks in AWS (#2793) * update setting * update webserver pub ip * updated dockerfile * dockerfile updated * clean up log and scripts running on ecs * real script added * fix * update script and config file * parameterizing webserver url * add to ci * to pass ci docker test * to pass ci docker test again * test ci with script, just and ecs * fix duplicate part for ci test * fix ci test * today's last try with ci * fix merge * really last ci try * commented out ci * fix scripts * add file * fix config * fix config * fix bug after upgradability? * init try on parameterization in script * better parameterization for script * clean up and more parameterization * fix lint * finish parameterization for scripts * preserve yml for ci * add blank line * fix less than or equal in script * upload results for init run * last blank line * nit for nano representation * change back transaction size * remove autobench on ci * remove autobench on ci * remove ci_ecs_benchmarks.sh * remove ci autobench script --- examples/infra/mod.rs | 54 ++++++++++++++++++++++++++++-------- orchestrator/run-config.toml | 16 +++++------ orchestrator/src/lib.rs | 11 ++++++-- 3 files changed, 60 insertions(+), 21 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index ce8f461ed1..405fe9642b 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -64,7 +64,7 @@ use std::{collections::BTreeSet, sync::Arc}; use std::{fs, time::Instant}; use std::{num::NonZeroUsize, str::FromStr}; use surf_disco::Url; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use versioned_binary_serialization::version::StaticVersionType; #[derive(Debug, Clone)] @@ -172,6 +172,22 @@ pub fn read_orchestrator_init_config() -> ( .help("Sets the url of the orchestrator") .required(false), ) + .arg( + Arg::new("webserver_url") + .short('w') + .long("webserver_url") + .value_name("URL") + .help("Sets the url of the webserver") + .required(false), + ) + .arg( + Arg::new("da_webserver_url") + .short('a') + .long("da_webserver_url") + .value_name("URL") + .help("Sets the url of the da webserver") + .required(false), + ) .get_matches(); if let Some(config_file_string) = matches.get_one::("config_file") { @@ -212,6 +228,20 @@ pub fn read_orchestrator_init_config() -> ( if let Some(orchestrator_url_string) = matches.get_one::("orchestrator_url") { orchestrator_url = Url::parse(orchestrator_url_string).unwrap(); } + if let Some(webserver_url_string) = matches.get_one::("webserver_url") { + let updated_web_server_config = WebServerConfig { + url: Url::parse(webserver_url_string).unwrap(), + wait_between_polls: config.web_server_config.unwrap().wait_between_polls, + }; + config.web_server_config = Some(updated_web_server_config); + } + if let Some(da_webserver_url_string) = matches.get_one::("da_webserver_url") { + let updated_da_web_server_config = WebServerConfig { + url: Url::parse(da_webserver_url_string).unwrap(), + wait_between_polls: config.da_web_server_config.unwrap().wait_between_polls, + }; + config.da_web_server_config = Some(updated_da_web_server_config); + } (config, orchestrator_url) } @@ -260,7 +290,7 @@ pub async fn run_orchestrator< >( OrchestratorArgs { url, config }: OrchestratorArgs, ) { - error!("Starting orchestrator",); + println!("Starting orchestrator",); let _result = hotshot_orchestrator::run_orchestrator::< TYPES::SignatureKey, TYPES::ElectionConfigType, @@ -540,10 +570,10 @@ pub trait RunDA< let mut total_latency = 0; let mut num_latency = 0; - error!("Sleeping for {start_delay_seconds} seconds before starting hotshot!"); + debug!("Sleeping for {start_delay_seconds} seconds before starting hotshot!"); async_sleep(Duration::from_secs(start_delay_seconds)).await; - error!("Starting HotShot example!"); + debug!("Starting HotShot example!"); let start = Instant::now(); let mut event_stream = context.get_event_stream(); @@ -638,7 +668,7 @@ pub trait RunDA< failed_num_views += 1; warn!("Timed out in view {:?}", view_number); } - _ => {} + _ => {} // mostly DA proposal } } } @@ -647,19 +677,21 @@ pub trait RunDA< let consensus = consensus_lock.read().await; let total_num_views = usize::try_from(consensus.locked_view.get_u64()).unwrap(); // When posting to the orchestrator, note that the total number of views also include un-finalized views. - error!("Failed views: {failed_num_views}, Total views: {total_num_views}, num_successful_commits: {num_successful_commits}"); + println!("[{node_index}]: Total views: {total_num_views}, Failed views: {failed_num_views}, num_successful_commits: {num_successful_commits}"); // +2 is for uncommitted views assert!(total_num_views <= (failed_num_views + num_successful_commits + 2)); // Output run results let total_time_elapsed = start.elapsed(); // in seconds - error!("[{node_index}]: {rounds} rounds completed in {total_time_elapsed:?} - Total transactions sent: {total_transactions_sent} - Total transactions committed: {total_transactions_committed} - Total commitments: {num_successful_commits}"); + println!("[{node_index}]: {rounds} rounds completed in {total_time_elapsed:?} - Total transactions sent: {total_transactions_sent} - Total transactions committed: {total_transactions_committed} - Total commitments: {num_successful_commits}"); if total_transactions_committed != 0 { // extra 8 bytes for timestamp let throughput_bytes_per_sec = total_transactions_committed * (transaction_size_in_bytes + 8) / total_time_elapsed.as_secs(); + let avg_latency_in_sec = total_latency / num_latency; + println!("[{node_index}]: throughput: {throughput_bytes_per_sec} bytes/sec, avg_latency: {avg_latency_in_sec} sec."); BenchResults { - avg_latency_in_sec: total_latency / num_latency, + avg_latency_in_sec, num_latency, minimum_latency_in_sec: minimum_latency, maximum_latency_in_sec: maximum_latency, @@ -1056,7 +1088,7 @@ pub async fn main_entry_point< setup_logging(); setup_backtrace(); - error!("Starting validator"); + debug!("Starting validator"); // see what our public identity will be let public_ip = match args.public_ip { @@ -1128,13 +1160,13 @@ pub async fn main_entry_point< } if let NetworkConfigSource::Orchestrator = source { - error!("Waiting for the start command from orchestrator"); + debug!("Waiting for the start command from orchestrator"); orchestrator_client .wait_for_all_nodes_ready(run_config.clone().node_index) .await; } - error!("Starting HotShot"); + println!("Starting HotShot"); let bench_results = run .run_hotshot( hotshot, diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 905184591d..69dd92e285 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -1,4 +1,4 @@ -rounds = 100 +rounds = 10 transactions_per_round = 10 transaction_size = 1000 node_index = 0 @@ -77,25 +77,25 @@ url = "http://localhost:9001" [web_server_config.wait_between_polls] secs = 0 -nanos = 100000000 # 10 ms +nanos = 10_000_000 [da_web_server_config.wait_between_polls] secs = 0 -nanos = 100000000 # 10 ms +nanos = 10_000_000 +[config.view_sync_timeout] +secs = 2 +nanos = 0 + [config.data_request_delay] secs = 0 -nanos = 2000000000 # 200 ms +nanos = 200_000_000 [config.propose_min_round_time] secs = 0 nanos = 0 -[config.view_sync_timeout] -secs = 2 -nanos = 0 - # TODO (Keyao) Clean up configuration parameters. # [config.propose_max_round_time] diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 03b8ea98b5..b1af27e658 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -11,6 +11,7 @@ use hotshot_types::{ traits::{election::ElectionConfig, signature_key::SignatureKey}, PeerConfig, }; +use std::fs::OpenOptions; use std::{ collections::HashSet, io, @@ -125,8 +126,14 @@ impl total_num_views: self.bench_results.total_num_views, failed_num_views: self.bench_results.failed_num_views, }; + // Open the CSV file in append mode + let results_csv_file = OpenOptions::new() + .create(true) + .append(true) // Open in append mode + .open("scripts/benchmarks_results/results.csv") + .unwrap(); // Open a file for writing - let mut wtr = Writer::from_path("scripts/benchmarks_results/results.csv").unwrap(); + let mut wtr = Writer::from_writer(results_csv_file); let _ = wtr.serialize(output_csv); let _ = wtr.flush(); println!("Results successfully saved in scripts/benchmarks_results/results.csv"); @@ -266,7 +273,7 @@ where } self.pub_posted.insert(node_index); - // The guess is the first extra 8 bytes are from orchestrator serialization + // The guess is the first extra 12 bytes are from orchestrator serialization pubkey.drain(..12); let register_pub_key_with_stake = PeerConfig::::from_bytes(pubkey).unwrap(); self.config.config.known_nodes_with_stake[node_index as usize] = From 188d8791e6dee21eb7928001b504992c630b6f70 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Mon, 25 Mar 2024 04:03:08 +0100 Subject: [PATCH 0889/1393] [BUILDER] Simple builder (#2817) * Simple builder --- testing/src/block_builder.rs | 327 ++++++++++++++++++++++++++------- testing/tests/block_builder.rs | 117 +++++++++++- 2 files changed, 365 insertions(+), 79 deletions(-) diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 7bafbe35c4..42c73f469c 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -1,4 +1,5 @@ use std::{ + collections::HashMap, num::NonZeroUsize, ops::{Deref, Range}, sync::Arc, @@ -8,6 +9,7 @@ use std::{ use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; +use commit::{Commitment, Committable}; use futures::future::BoxFuture; use hotshot::{traits::BlockPayload, types::SignatureKey}; use hotshot_builder_api::{ @@ -19,9 +21,11 @@ use hotshot_example_types::{ block_types::{TestBlockPayload, TestTransaction}, node_types::TestTypes, }; +use hotshot_task::task::{Task, TaskState}; +use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, - traits::{block_contents::vid_commitment, node_implementation::NodeType}, + traits::{block_contents::vid_commitment, election::Membership, node_implementation::NodeType}, utils::BuilderCommitment, vid::VidCommitment, }; @@ -29,6 +33,8 @@ use lru::LruCache; use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; use tide_disco::{method::ReadState, App, Url}; +use crate::test_runner::HotShotTaskCompleted; + /// Entry for a built block struct BlockEntry { metadata: AvailableBlockInfo, @@ -42,7 +48,7 @@ pub struct RandomBuilderOptions { /// How many transactions to include in a block pub txn_in_block: u64, /// How many blocks to generate per second - pub blocks_per_second: u64, + pub blocks_per_second: u32, /// Range of how big a transaction can be (in bytes) pub txn_size: Range, /// Number of storage nodes for VID commitment @@ -99,10 +105,10 @@ impl RandomBuilderSource { let (priv_key, pub_key) = (self.priv_key.clone(), self.pub_key); async_spawn(async move { let mut rng = SmallRng::from_entropy(); - let time_per_block = Duration::from_millis(1000 / options.blocks_per_second); + let time_per_block = Duration::from_secs(1) / options.blocks_per_second; loop { let start = std::time::Instant::now(); - let transactions = (0..options.txn_in_block) + let transactions: Vec = (0..options.txn_in_block) .map(|_| { let mut bytes = vec![ 0; @@ -113,80 +119,20 @@ impl RandomBuilderSource { rng.fill_bytes(&mut bytes); TestTransaction(bytes) }) - .collect::>(); - let block_size = transactions.iter().map(|t| t.0.len() as u64).sum::(); + .collect(); - let block_payload = TestBlockPayload { transactions }; - let commitment = block_payload.builder_commitment(&()); - let vid_commitment = vid_commitment( - &block_payload.encode().unwrap().collect(), + let (metadata, payload, header_input) = build_block( + transactions, options.num_storage_nodes, + pub_key, + priv_key.clone(), ); - let signature_over_block_info = { - let mut block_info: Vec = Vec::new(); - block_info.extend_from_slice(block_size.to_be_bytes().as_ref()); - block_info.extend_from_slice(123_u64.to_be_bytes().as_ref()); - block_info.extend_from_slice(commitment.as_ref()); - match ::SignatureKey::sign(&priv_key, &block_info) { - Ok(sig) => sig, - Err(e) => { - tracing::error!(error = %e, "Failed to sign block"); - continue; - } - } - }; - - let signature_over_builder_commitment = - match ::SignatureKey::sign( - &priv_key, - commitment.as_ref(), - ) { - Ok(sig) => sig, - Err(e) => { - tracing::error!(error = %e, "Failed to sign block"); - continue; - } - }; - - let signature_over_vid_commitment = - match ::SignatureKey::sign( - &priv_key, - vid_commitment.as_ref(), - ) { - Ok(sig) => sig, - Err(e) => { - tracing::error!(error = %e, "Failed to sign block"); - continue; - } - }; - - let block = AvailableBlockData { - block_payload, - metadata: (), - sender: pub_key, - signature: signature_over_block_info, - _phantom: std::marker::PhantomData, - }; - let metadata = AvailableBlockInfo { - sender: pub_key, - signature: signature_over_builder_commitment, - block_hash: commitment, - block_size, - offered_fee: 123, - _phantom: std::marker::PhantomData, - }; - let header_input = AvailableBlockHeaderInput { - vid_commitment, - signature: signature_over_vid_commitment, - sender: pub_key, - _phantom: std::marker::PhantomData, - }; if let Some((hash, _)) = blocks.write().await.push( metadata.block_hash.clone(), BlockEntry { metadata, - payload: Some(block), + payload: Some(payload), header_input: Some(header_input), }, ) { @@ -279,3 +225,244 @@ pub fn run_random_builder(url: Url) { async_spawn(app.serve(url, STATIC_VER_0_1)); } + +pub struct SimpleBuilderSource { + pub_key: ::SignatureKey, + priv_key: <::SignatureKey as SignatureKey>::PrivateKey, + membership: Arc<::Membership>, + transactions: Arc, TestTransaction>>>, + blocks: Arc>>, +} + +#[async_trait] +impl ReadState for SimpleBuilderSource { + type State = Self; + + async fn read( + &self, + op: impl Send + for<'a> FnOnce(&'a Self::State) -> BoxFuture<'a, T> + 'async_trait, + ) -> T { + op(self).await + } +} + +#[async_trait] +impl BuilderDataSource for SimpleBuilderSource { + async fn get_available_blocks( + &self, + _for_parent: &VidCommitment, + ) -> Result>, BuildError> { + let transactions = self + .transactions + .read(|txns| { + Box::pin(async { txns.values().cloned().collect::>() }) + }) + .await; + let (metadata, payload, header_input) = build_block( + transactions, + self.membership.total_nodes(), + self.pub_key, + self.priv_key.clone(), + ); + + self.blocks.write().await.insert( + metadata.block_hash.clone(), + BlockEntry { + metadata: metadata.clone(), + payload: Some(payload), + header_input: Some(header_input), + }, + ); + + Ok(vec![metadata]) + } + + async fn claim_block( + &self, + block_hash: &BuilderCommitment, + _signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuildError> { + let mut blocks = self.blocks.write().await; + let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; + entry.payload.take().ok_or(BuildError::Missing) + } + + async fn claim_block_header_input( + &self, + block_hash: &BuilderCommitment, + _signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuildError> { + let mut blocks = self.blocks.write().await; + let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; + entry.header_input.take().ok_or(BuildError::Missing) + } +} + +impl SimpleBuilderSource { + pub async fn run(self, url: Url) { + let builder_api = + hotshot_builder_api::builder::define_api::( + &Options::default(), + ) + .expect("Failed to construct the builder API"); + let mut app: App = + App::with_state(self); + app.register_module("/", builder_api) + .expect("Failed to register the builder API"); + + async_spawn(app.serve(url, STATIC_VER_0_1)); + } +} + +pub struct SimpleBuilderTask { + transactions: Arc, TestTransaction>>>, + blocks: Arc>>, +} + +impl TaskState for SimpleBuilderTask { + type Event = Arc>; + + type Output = HotShotTaskCompleted; + + fn filter(&self, event: &Arc>) -> bool { + !matches!( + event.as_ref(), + HotShotEvent::TransactionsRecv(_) + | HotShotEvent::LeafDecided(_) + | HotShotEvent::Shutdown + ) + } + + async fn handle_event( + event: Self::Event, + task: &mut Task, + ) -> Option { + let this = task.state_mut(); + match event.as_ref() { + HotShotEvent::TransactionsRecv(transactions) => { + let mut queue = this.transactions.write().await; + for transaction in transactions { + queue.insert(transaction.commit(), transaction.clone()); + } + } + HotShotEvent::LeafDecided(leaf_chain) => { + let mut queue = this.transactions.write().await; + for leaf in leaf_chain.iter() { + if let Some(ref payload) = leaf.block_payload { + for txn in payload.transaction_commitments(&()) { + queue.remove(&txn); + } + } + } + this.blocks.write().await.clear(); + } + _ => {} + }; + None + } + + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event.as_ref(), HotShotEvent::Shutdown) + } +} + +pub async fn make_simple_builder( + membership: Arc<::Membership>, +) -> (SimpleBuilderSource, SimpleBuilderTask) { + let (pub_key, priv_key) = + ::SignatureKey::generated_from_seed_indexed([1; 32], 0); + + let transactions = Arc::new(RwLock::new(HashMap::new())); + let blocks = Arc::new(RwLock::new(HashMap::new())); + + let source = SimpleBuilderSource { + pub_key, + priv_key, + transactions: transactions.clone(), + blocks: blocks.clone(), + membership, + }; + + let task = SimpleBuilderTask { + transactions, + blocks, + }; + + (source, task) +} + +/// Helper function to construct all builder data structures from a list of transactions +fn build_block( + transactions: Vec, + num_storage_nodes: usize, + pub_key: ::SignatureKey, + priv_key: <::SignatureKey as SignatureKey>::PrivateKey, +) -> ( + AvailableBlockInfo, + AvailableBlockData, + AvailableBlockHeaderInput, +) { + let block_size = transactions.iter().map(|t| t.0.len() as u64).sum::(); + + let block_payload = TestBlockPayload { transactions }; + + let commitment = block_payload.builder_commitment(&()); + + let vid_commitment = vid_commitment( + &block_payload.encode().unwrap().collect(), + num_storage_nodes, + ); + + let signature_over_block_info = { + let mut block_info: Vec = Vec::new(); + block_info.extend_from_slice(block_size.to_be_bytes().as_ref()); + block_info.extend_from_slice(123_u64.to_be_bytes().as_ref()); + block_info.extend_from_slice(commitment.as_ref()); + match ::SignatureKey::sign(&priv_key, &block_info) { + Ok(sig) => sig, + Err(e) => { + panic!("Failed to sign block: {}", e); + } + } + }; + + let signature_over_builder_commitment = + match ::SignatureKey::sign(&priv_key, commitment.as_ref()) { + Ok(sig) => sig, + Err(e) => { + panic!("Failed to sign block: {}", e); + } + }; + + let signature_over_vid_commitment = + match ::SignatureKey::sign(&priv_key, vid_commitment.as_ref()) { + Ok(sig) => sig, + Err(e) => { + panic!("Failed to sign block: {}", e); + } + }; + + let block = AvailableBlockData { + block_payload, + metadata: (), + sender: pub_key, + signature: signature_over_block_info, + _phantom: std::marker::PhantomData, + }; + let metadata = AvailableBlockInfo { + sender: pub_key, + signature: signature_over_builder_commitment, + block_hash: commitment, + block_size, + offered_fee: 123, + _phantom: std::marker::PhantomData, + }; + let header_input = AvailableBlockHeaderInput { + vid_commitment, + signature: signature_over_vid_commitment, + sender: pub_key, + _phantom: std::marker::PhantomData, + }; + + (metadata, block, header_input) +} diff --git a/testing/tests/block_builder.rs b/testing/tests/block_builder.rs index 3246cd366c..1f7c268bde 100644 --- a/testing/tests/block_builder.rs +++ b/testing/tests/block_builder.rs @@ -1,26 +1,34 @@ +use async_compatibility_layer::art::async_sleep; use hotshot_example_types::{ block_types::{TestBlockPayload, TestTransaction}, node_types::TestTypes, }; -use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; -use hotshot_testing::block_builder::run_random_builder; -use hotshot_types::traits::BlockPayload; -use hotshot_types::traits::{ - block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, +use hotshot_task_impls::{ + builder::{BuilderClient, BuilderClientError}, + events::HotShotEvent, +}; +use hotshot_testing::{ + block_builder::{make_simple_builder, run_random_builder}, + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, +}; +use hotshot_types::{ + constants::Version01, + traits::{ + block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, + BlockPayload, + }, }; use std::time::Duration; use tide_disco::Url; -#[ignore] #[cfg(test)] #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_block_builder() { - use hotshot_types::constants::Version01; - +async fn test_random_block_builder() { let port = portpicker::pick_unused_port().expect("Could not find an open port"); let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); @@ -29,6 +37,9 @@ async fn test_block_builder() { let client: BuilderClient = BuilderClient::new(api_url); assert!(client.connect(Duration::from_millis(100)).await); + // Wait for at least one block to be built + async_sleep(Duration::from_millis(30)).await; + // Test getting blocks let mut blocks = client .get_available_blocks(vid_commitment(&vec![], 1)) @@ -60,3 +71,91 @@ async fn test_block_builder() { .await; assert!(matches!(result, Err(BuilderClientError::NotFound))); } + +#[cfg(test)] +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_simple_block_builder() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let (source, task) = make_simple_builder(quorum_membership.into()).await; + + let port = portpicker::pick_unused_port().expect("Could not find an open port"); + let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); + + source.run(api_url.clone()).await; + + let client: BuilderClient = BuilderClient::new(api_url); + assert!(client.connect(Duration::from_millis(100)).await); + + // Before block-building task is spun up, should only have an empty block + { + let mut blocks = client + .get_available_blocks(vid_commitment(&vec![], 1)) + .await + .expect("Failed to get available blocks"); + + assert_eq!(blocks.len(), 1); + + let signature = { + let (_key, private_key) = + ::SignatureKey::generated_from_seed_indexed([0_u8; 32], 0); + ::SignatureKey::sign(&private_key, &[0_u8; 32]) + .expect("Failed to create dummy signature") + }; + + let payload: TestBlockPayload = client + .claim_block(blocks.pop().unwrap(), &signature) + .await + .expect("Failed to claim block"); + + assert_eq!(payload.transactions.len(), 0); + } + + { + let stage_1 = TestScriptStage { + inputs: vec![ + HotShotEvent::TransactionsRecv(vec![ + TestTransaction(vec![1]), + TestTransaction(vec![2]), + ]), + HotShotEvent::TransactionsRecv(vec![TestTransaction(vec![3])]), + ], + outputs: vec![], + asserts: vec![], + }; + + let stages = vec![stage_1]; + + run_test_script(stages, task).await; + + // Test getting blocks + let mut blocks = client + .get_available_blocks(vid_commitment(&vec![], 1)) + .await + .expect("Failed to get available blocks"); + + assert!(!blocks.is_empty()); + + let signature = { + let (_key, private_key) = + ::SignatureKey::generated_from_seed_indexed([0_u8; 32], 0); + ::SignatureKey::sign(&private_key, &[0_u8; 32]) + .expect("Failed to create dummy signature") + }; + + let payload: TestBlockPayload = client + .claim_block(blocks.pop().unwrap(), &signature) + .await + .expect("Failed to claim block"); + + assert_eq!(payload.transactions.len(), 3); + } +} From a3c6b71df7319176b28b95d02c9fb0c86c71df46 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 25 Mar 2024 10:11:29 -0400 Subject: [PATCH 0890/1393] [Catchup] Store High QC When it is Updated (#2807) * add more action types * integrate new storage type * fix build, tie up stragglers, modifying tasks next * merge latest stable tabs * merge latest tags and fix method signatures * working towards adding action to storage * Store high qc whenever we update it * fix lint * adding high_qc to initializer and tests * typos and if let instead of match * better comment * adding state to storage * add state to initializer * split high qc and undecided state storage * fix new clippy lints --------- Co-authored-by: Jarred Parr --- example-types/src/storage_types.rs | 18 ++++++++++-- hotshot/src/lib.rs | 26 +++++++++++++++++- task-impls/src/consensus.rs | 44 ++++++++++++++++++++++++++++++ task-impls/src/vote_collection.rs | 2 ++ testing/src/spinning_task.rs | 26 +++++++++++++++--- testing/src/test_runner.rs | 3 +- types/src/consensus.rs | 2 +- types/src/traits/storage.rs | 14 ++++++++-- types/src/utils.rs | 18 ++++++++++-- 9 files changed, 139 insertions(+), 14 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 51b894ca56..3d9c59c422 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -2,11 +2,13 @@ use anyhow::{bail, Result}; use async_lock::RwLock; use async_trait::async_trait; use hotshot_types::{ - data::{DAProposal, VidDisperseShare}, + consensus::CommitmentMap, + data::{DAProposal, Leaf, VidDisperseShare}, message::Proposal, traits::{node_implementation::NodeType, storage::Storage}, + utils::View, }; -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; type VidShares = HashMap< @@ -84,7 +86,17 @@ impl Storage for TestStorage { async fn update_high_qc( &self, - _qc: hotshot_types::simple_certificate::QuorumCertificate, + _high_qc: hotshot_types::simple_certificate::QuorumCertificate, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to update high qc to storage"); + } + Ok(()) + } + async fn update_undecided_state( + &self, + _leafs: CommitmentMap>, + _state: BTreeMap>, ) -> Result<()> { if self.should_return_err { bail!("Failed to update high qc to storage"); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 6286294268..78bea4ce58 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -198,10 +198,16 @@ impl> SystemContext { }, }, ); + for (view_num, inner) in initializer.undecided_state { + validated_state_map.insert(view_num, inner); + } let mut saved_leaves = HashMap::new(); let mut saved_payloads = BTreeMap::new(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); + for leaf in initializer.undecided_leafs { + saved_leaves.insert(leaf.commit(), leaf.clone()); + } if let Some(payload) = anchored_leaf.get_block_payload() { let encoded_txns: Vec = match payload.encode() { // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. @@ -229,7 +235,7 @@ impl> SystemContext { // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 locked_view: anchored_leaf.get_view_number(), - high_qc: anchored_leaf.get_justify_qc(), + high_qc: initializer.high_qc, metrics: consensus_metrics.clone(), }; let consensus = Arc::new(RwLock::new(consensus)); @@ -637,6 +643,15 @@ pub struct HotShotInitializer { /// Starting view number that we are confident won't lead to a double vote after restart. start_view: TYPES::Time, + /// Highest QC that was seen, for genesis it's the genesis QC. It should be for a view greater + /// than `inner`s view number for the non genesis case because we must have seen higher QCs + /// to decide on the leaf. + high_qc: QuorumCertificate, + /// Undecided leafs that were seen, but not yet decided on. These allow a restarting node + /// to vote and propose right away if they didn't miss anything while down. + undecided_leafs: Vec>, + /// Not yet decided state + undecided_state: BTreeMap>, } impl HotShotInitializer { @@ -651,6 +666,9 @@ impl HotShotInitializer { validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::Time::new(0), + high_qc: QuorumCertificate::genesis(), + undecided_leafs: Vec::new(), + undecided_state: BTreeMap::new(), }) } @@ -666,6 +684,9 @@ impl HotShotInitializer { instance_state: TYPES::InstanceState, validated_state: Option>, start_view: TYPES::Time, + high_qc: QuorumCertificate, + undecided_leafs: Vec>, + undecided_state: BTreeMap>, ) -> Self { Self { inner: anchor_leaf, @@ -673,6 +694,9 @@ impl HotShotInitializer { validated_state, state_delta: None, start_view, + high_qc, + undecided_leafs, + undecided_state, } } } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 6362aa148d..df5102fe35 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -513,6 +513,19 @@ impl, A: ConsensusApi + } }; + if justify_qc.get_view_number() > consensus.high_qc.view_number { + if let Err(e) = self + .storage + .write() + .await + .update_high_qc(justify_qc.clone()) + .await + { + warn!("Failed to store High QC not voting. Error: {:?}", e); + return; + } + } + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; if justify_qc.get_view_number() > consensus.high_qc.view_number { @@ -551,6 +564,19 @@ impl, A: ConsensusApi + ); consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + if let Err(e) = self + .storage + .write() + .await + .update_undecided_state( + consensus.saved_leaves.clone(), + consensus.validated_state_map.clone(), + ) + .await + { + warn!("Couldn't store undecided state. Error: {:?}", e); + } + // If we are missing the parent from storage, the safety check will fail. But we can // still vote if the liveness check succeeds. let liveness_check = justify_qc.get_view_number() > consensus.locked_view; @@ -782,6 +808,20 @@ impl, A: ConsensusApi + }, ); consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + + if let Err(e) = self + .storage + .write() + .await + .update_undecided_state( + consensus.saved_leaves.clone(), + consensus.validated_state_map.clone(), + ) + .await + { + warn!("Couldn't store undecided state. Error: {:?}", e); + } + if new_commit_reached { consensus.locked_view = new_locked_view; } @@ -969,6 +1009,10 @@ impl, A: ConsensusApi + } } if let either::Left(qc) = cert { + if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await { + warn!("Failed to store High QC of QC we formed. Error: {:?}", e); + } + let mut consensus = self.consensus.write().await; consensus.high_qc = qc.clone(); diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 1bc98b63a4..69501c1e2a 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -67,6 +67,7 @@ impl< { /// Take one vote and accumultate it. Returns either the cert or the updated state /// after the vote is accumulated + #[allow(clippy::question_mark)] pub async fn accumulate_vote( &mut self, vote: &VOTE, @@ -85,6 +86,7 @@ impl< ); return None; } + let accumulator = self.accumulator.as_mut()?; match accumulator.accumulate(vote, &self.membership) { Either::Left(()) => None, diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 3268c145f4..af80bcb65d 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -3,10 +3,12 @@ use std::collections::HashMap; use crate::test_runner::HotShotTaskCompleted; use crate::test_runner::{LateStartNode, Node, TestRunner}; use either::{Left, Right}; +use hotshot::types::EventType; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer}; use hotshot_example_types::state_types::TestInstanceState; use hotshot_example_types::storage_types::TestStorage; use hotshot_task::task::{Task, TaskState, TestTaskState}; +use hotshot_types::simple_certificate::QuorumCertificate; use hotshot_types::{data::Leaf, ValidatorConfig}; use hotshot_types::{ event::Event, @@ -39,6 +41,8 @@ pub struct SpinningTask> { pub(crate) latest_view: Option, /// Last decided leaf that can be used as the anchor leaf to initialize the node. pub(crate) last_decided_leaf: Leaf, + /// Highest qc seen in the test for restarting nodes + pub(crate) high_qc: QuorumCertificate, } impl> TaskState for SpinningTask { @@ -83,13 +87,24 @@ where _id: usize, task: &mut hotshot_task::task::TestTask, ) -> Option { - let Event { - view_number, - event: _, - } = message; + let Event { view_number, event } = message; let state = &mut task.state_mut(); + if let EventType::Decide { + leaf_chain, + qc: _, + block_size: _, + } = event + { + state.last_decided_leaf = leaf_chain.first().unwrap().leaf.clone(); + } else if let EventType::QuorumProposal { + proposal, + sender: _, + } = event + { + state.high_qc = proposal.data.justify_qc; + } // if we have not seen this view before if state.latest_view.is_none() || view_number > state.latest_view.unwrap() { // perform operations on the nodes @@ -111,6 +126,9 @@ where TestInstanceState {}, None, view_number, + state.high_qc.clone(), + Vec::new(), + BTreeMap::new(), ); // We assign node's public key and stake value rather than read from config file since it's a test let validator_config = diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 9153dabf0e..620b297804 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -20,7 +20,6 @@ use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestS use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; use hotshot_task::task::{Task, TaskRegistry, TestTask}; -use hotshot_types::constants::EVENT_CHANNEL_SIZE; use hotshot_types::{ consensus::ConsensusMetricsValue, data::Leaf, @@ -30,6 +29,7 @@ use hotshot_types::{ }, HotShotConfig, ValidatorConfig, }; +use hotshot_types::{constants::EVENT_CHANNEL_SIZE, simple_certificate::QuorumCertificate}; use hotshot_types::{ message::Message, traits::{network::ConnectedNetwork, node_implementation::NodeImplementation}, @@ -219,6 +219,7 @@ where latest_view: None, changes, last_decided_leaf: Leaf::genesis(&TestInstanceState {}), + high_qc: QuorumCertificate::genesis(), }; let spinning_task = TestTask::, SpinningTask>::new( Task::new(tx.clone(), rx.clone(), reg.clone(), spinning_task_state), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 666461c708..52a92d3c45 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -24,7 +24,7 @@ use std::{ use tracing::error; /// A type alias for `HashMap, T>` -type CommitmentMap = HashMap, T>; +pub type CommitmentMap = HashMap, T>; /// A type alias for `BTreeMap>>>` type VidShares = BTreeMap< diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index eae0514852..bb62857b66 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -3,11 +3,14 @@ //! This modules provides the [`Storage`] trait. //! +use std::collections::BTreeMap; + use anyhow::Result; use async_trait::async_trait; use crate::{ - data::{DAProposal, VidDisperseShare}, + consensus::{CommitmentMap, View}, + data::{DAProposal, Leaf, VidDisperseShare}, event::HotShotAction, message::Proposal, simple_certificate::QuorumCertificate, @@ -21,5 +24,12 @@ pub trait Storage: Send + Sync + Clone { async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; async fn append_da(&self, proposal: &Proposal>) -> Result<()>; async fn record_action(&self, view: TYPES::Time, action: HotShotAction) -> Result<()>; - async fn update_high_qc(&self, qc: QuorumCertificate) -> Result<()>; + async fn update_high_qc(&self, high_qc: QuorumCertificate) -> Result<()>; + /// Update the currently undecided state of consensus. This includes the undecided leaf chain, + /// and the undecided state. + async fn update_undecided_state( + &self, + leafs: CommitmentMap>, + state: BTreeMap>, + ) -> Result<()>; } diff --git a/types/src/utils.rs b/types/src/utils.rs index 9cce5bb31b..663b676e75 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -44,7 +44,21 @@ pub enum ViewInner { /// Leaf has failed Failed, } - +impl Clone for ViewInner { + fn clone(&self) -> Self { + match self { + Self::DA { payload_commitment } => Self::DA { + payload_commitment: *payload_commitment, + }, + Self::Leaf { leaf, state, delta } => Self::Leaf { + leaf: *leaf, + state: state.clone(), + delta: delta.clone(), + }, + Self::Failed => Self::Failed, + } + } +} /// The hash of a leaf. type LeafCommitment = Commitment>; @@ -117,7 +131,7 @@ impl Deref for View { } /// This exists so we can perform state transitions mutably -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct View { /// The view data. Wrapped in a struct so we can mutate pub view_inner: ViewInner, From e30a188e9620662275c9a0b5c0c23fcec5575f9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 17:18:09 +0100 Subject: [PATCH 0891/1393] Bump syn from 2.0.53 to 2.0.55 (#2839) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.53 to 2.0.55. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.53...2.0.55) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- macros/Cargo.toml | 2 +- testing-macros/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 1d6f6211a2..2b75d20e1e 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -7,7 +7,7 @@ description = "Macros for hotshot tests" [dependencies] # proc macro stuff quote = "1.0.33" -syn = { version = "2.0.50", features = ["full", "extra-traits"] } +syn = { version = "2.0.55", features = ["full", "extra-traits"] } proc-macro2 = "1.0.79" [lib] diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 022176586d..0d890e14b9 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -25,7 +25,7 @@ tracing = { workspace = true } serde = { workspace = true } # proc macro stuff quote = "1.0.33" -syn = { version = "2.0.52", features = ["full", "extra-traits"] } +syn = { version = "2.0.55", features = ["full", "extra-traits"] } proc-macro2 = "1.0.79" derive_builder = "0.20.0" From 5b3f78f78842f6da64ff2443b4b0b5a4fb454c8d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 27 Mar 2024 10:15:24 -0400 Subject: [PATCH 0892/1393] [LIBP2P] Use Standard libp2p RequestResponse Behaviour for Direct Messages (#2829) * Removing behaviour impl for DM * retries and fixes --- .../src/network/behaviours/direct_message.rs | 236 +++--------------- .../network/behaviours/exponential_backoff.rs | 6 + libp2p-networking/src/network/def.rs | 30 +-- libp2p-networking/src/network/mod.rs | 3 +- libp2p-networking/src/network/node.rs | 32 ++- 5 files changed, 75 insertions(+), 232 deletions(-) diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index f156e2ad96..df4502c578 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -1,20 +1,16 @@ -use std::{ - collections::{HashMap, VecDeque}, - task::Poll, -}; - -use libp2p::request_response::cbor::Behaviour; -use libp2p::{ - request_response::{Event, Message, OutboundRequestId, ResponseChannel}, - swarm::{NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm}, - Multiaddr, -}; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_compatibility_layer::channel::UnboundedSender; +use libp2p::request_response::{Event, Message, OutboundRequestId, ResponseChannel}; use libp2p_identity::PeerId; +use std::collections::HashMap; use tracing::{error, info}; +use crate::network::{ClientRequest, NetworkEvent}; + use super::exponential_backoff::ExponentialBackoff; /// Request to direct message a peert +#[derive(Debug)] pub struct DMRequest { /// the recv-ers peer id pub peer_id: PeerId, @@ -28,15 +24,10 @@ pub struct DMRequest { /// Wrapper metadata around libp2p's request response /// usage: direct message peer +#[derive(Debug, Default)] pub struct DMBehaviour { - /// The wrapped behaviour - request_response: Behaviour, Vec>, /// In progress queries in_progress_rr: HashMap, - /// Failed queries to be retried - failed_rr: VecDeque, - /// lsit of out events for parent behaviour - out_event_queue: Vec, } /// Lilst of direct message output events @@ -50,7 +41,11 @@ pub enum DMEvent { impl DMBehaviour { /// handle a direct message event - fn handle_dm_event(&mut self, event: Event, Vec>) { + pub(crate) fn handle_dm_event( + &mut self, + event: Event, Vec>, + retry_tx: Option>, + ) -> Option { match event { Event::InboundFailure { peer, @@ -61,6 +56,7 @@ impl DMBehaviour { "inbound failure to send message to {:?} with error {:?}", peer, error ); + None } Event::OutboundFailure { peer, @@ -72,9 +68,24 @@ impl DMBehaviour { peer, error ); if let Some(mut req) = self.in_progress_rr.remove(&request_id) { - req.backoff.start_next(false); - self.failed_rr.push_back(req); + if req.retry_count == 0 { + return None; + } + req.retry_count -= 1; + if let Some(retry_tx) = retry_tx { + async_spawn(async move { + async_sleep(req.backoff.next_timeout(false)).await; + let _ = retry_tx + .send(ClientRequest::DirectRequest { + pid: peer, + contents: req.data, + retry_count: req.retry_count, + }) + .await; + }); + } } + None } Event::Message { message, peer, .. } => match message { Message::Request { @@ -85,8 +96,7 @@ impl DMBehaviour { info!("recv-ed DIRECT REQUEST {:?}", msg); // receiver, not initiator. // don't track. If we are disconnected, sender will reinitiate - self.out_event_queue - .push(DMEvent::DirectRequest(msg, peer, channel)); + Some(NetworkEvent::DirectRequest(msg, peer, channel)) } Message::Response { request_id, @@ -95,206 +105,32 @@ impl DMBehaviour { // success, finished. if let Some(req) = self.in_progress_rr.remove(&request_id) { info!("recv-ed DIRECT RESPONSE {:?}", msg); - self.out_event_queue - .push(DMEvent::DirectResponse(msg, req.peer_id)); + Some(NetworkEvent::DirectResponse(msg, req.peer_id)) } else { error!("recv-ed a direct response, but is no longer tracking message!"); + None } } }, e @ Event::ResponseSent { .. } => { info!(?e, " sending response"); + None } } } } -impl NetworkBehaviour for DMBehaviour { - type ConnectionHandler = , Vec> as NetworkBehaviour>::ConnectionHandler; - - type ToSwarm = DMEvent; - - fn on_swarm_event(&mut self, event: libp2p::swarm::derive_prelude::FromSwarm<'_>) { - self.request_response.on_swarm_event(event); - } - - fn on_connection_handler_event( - &mut self, - peer_id: PeerId, - connection_id: libp2p::swarm::derive_prelude::ConnectionId, - event: THandlerOutEvent, - ) { - self.request_response - .on_connection_handler_event(peer_id, connection_id, event); - } - - fn poll( - &mut self, - cx: &mut std::task::Context<'_>, - ) -> Poll>> { - let mut retry_req_indices = Vec::new(); - for (idx, req) in self.failed_rr.iter().enumerate() { - if req.backoff.is_expired() { - retry_req_indices.push(idx); - } - } - let _ = retry_req_indices.into_iter().map(|idx| { - let req = self.failed_rr.remove(idx).unwrap(); - self.add_direct_request(req); - }); - while let Poll::Ready(ready) = NetworkBehaviour::poll(&mut self.request_response, cx) { - match ready { - // NOTE: this generates request - ToSwarm::GenerateEvent(e) => { - self.handle_dm_event(e); - } - ToSwarm::Dial { opts } => { - return Poll::Ready(ToSwarm::Dial { opts }); - } - ToSwarm::NotifyHandler { - peer_id, - handler, - event, - } => { - return Poll::Ready(ToSwarm::NotifyHandler { - peer_id, - handler, - event, - }); - } - ToSwarm::CloseConnection { - peer_id, - connection, - } => { - return Poll::Ready(ToSwarm::CloseConnection { - peer_id, - connection, - }); - } - ToSwarm::ListenOn { opts } => { - return Poll::Ready(ToSwarm::ListenOn { opts }); - } - ToSwarm::RemoveListener { id } => { - return Poll::Ready(ToSwarm::RemoveListener { id }); - } - ToSwarm::NewExternalAddrCandidate(c) => { - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(c)); - } - ToSwarm::ExternalAddrConfirmed(c) => { - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(c)); - } - ToSwarm::ExternalAddrExpired(c) => { - return Poll::Ready(ToSwarm::ExternalAddrExpired(c)); - } - e => { - error!("UNHANDLED NEW SWARM VARIANT: {e:?}"); - } - } - } - if !self.out_event_queue.is_empty() { - return Poll::Ready(ToSwarm::GenerateEvent(self.out_event_queue.remove(0))); - } - Poll::Pending - } - - fn handle_pending_inbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, - ) -> Result<(), libp2p::swarm::ConnectionDenied> { - self.request_response.handle_pending_inbound_connection( - connection_id, - local_addr, - remote_addr, - ) - } - - fn handle_established_inbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - peer: PeerId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, - ) -> Result, libp2p::swarm::ConnectionDenied> { - self.request_response.handle_established_inbound_connection( - connection_id, - peer, - local_addr, - remote_addr, - ) - } - - fn handle_pending_outbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - maybe_peer: Option, - addresses: &[Multiaddr], - effective_role: libp2p::core::Endpoint, - ) -> Result, libp2p::swarm::ConnectionDenied> { - self.request_response.handle_pending_outbound_connection( - connection_id, - maybe_peer, - addresses, - effective_role, - ) - } - - fn handle_established_outbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - peer: PeerId, - addr: &Multiaddr, - role_override: libp2p::core::Endpoint, - ) -> Result, libp2p::swarm::ConnectionDenied> { - self.request_response - .handle_established_outbound_connection(connection_id, peer, addr, role_override) - } -} - impl DMBehaviour { - /// Create new behaviour based on request response - #[must_use] - pub fn new(request_response: Behaviour, Vec>) -> Self { - Self { - request_response, - in_progress_rr: HashMap::default(), - failed_rr: VecDeque::default(), - out_event_queue: Vec::default(), - } - } - - /// Add address to request response behaviour - pub fn add_address(&mut self, peer_id: &PeerId, address: Multiaddr) { - self.request_response.add_address(peer_id, address); - } - - /// Remove address from request response behaviour - pub fn remove_address(&mut self, peer_id: &PeerId, address: &Multiaddr) { - self.request_response.remove_address(peer_id, address); - } - /// Add a direct request for a given peer - pub fn add_direct_request(&mut self, mut req: DMRequest) { + pub fn add_direct_request(&mut self, mut req: DMRequest, request_id: OutboundRequestId) { if req.retry_count == 0 { return; } req.retry_count -= 1; - let request_id = self - .request_response - .send_request(&req.peer_id, req.data.clone()); info!("direct message request with id {:?}", request_id); self.in_progress_rr.insert(request_id, req); } - - /// Add a direct response for a channel - pub fn add_direct_response(&mut self, chan: ResponseChannel>, msg: Vec) { - let res = self.request_response.send_response(chan, msg); - if let Err(e) = res { - error!("Error replying to direct message. {:?}", e); - } - } } diff --git a/libp2p-networking/src/network/behaviours/exponential_backoff.rs b/libp2p-networking/src/network/behaviours/exponential_backoff.rs index 2091f2abb2..04c848035b 100644 --- a/libp2p-networking/src/network/behaviours/exponential_backoff.rs +++ b/libp2p-networking/src/network/behaviours/exponential_backoff.rs @@ -52,6 +52,12 @@ impl ExponentialBackoff { } } + /// Return the timeout duration and start the next timeout. + pub fn next_timeout(&mut self, result: bool) -> Duration { + let timeout = self.timeout; + self.start_next(result); + timeout + } /// Whether or not the timeout is expired #[must_use] pub fn is_expired(&self) -> bool { diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 9ae2f70d75..6de82414cf 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -2,7 +2,7 @@ use futures::channel::oneshot::Sender; use libp2p::{ gossipsub::{Behaviour as GossipBehaviour, Event as GossipEvent, IdentTopic}, identify::{Behaviour as IdentifyBehaviour, Event as IdentifyEvent}, - request_response::{cbor, ResponseChannel}, + request_response::{cbor, OutboundRequestId, ResponseChannel}, Multiaddr, }; use libp2p_identity::PeerId; @@ -12,7 +12,6 @@ use tracing::{debug, error}; use super::{ behaviours::{ dht::{DHTBehaviour, DHTEvent, KadPutQuery}, - direct_message::{DMBehaviour, DMEvent, DMRequest}, exponential_backoff::ExponentialBackoff, request_response::{Request, Response}, }, @@ -47,7 +46,7 @@ pub struct NetworkDef { /// purpose: directly messaging peer #[debug(skip)] - pub direct_message: DMBehaviour, + pub direct_message: libp2p::request_response::cbor::Behaviour, Vec>, /// Behaviour for requesting and receiving data #[debug(skip)] @@ -61,7 +60,7 @@ impl NetworkDef { gossipsub: GossipBehaviour, dht: DHTBehaviour, identify: IdentifyBehaviour, - direct_message: DMBehaviour, + direct_message: cbor::Behaviour, Vec>, request_response: cbor::Behaviour, ) -> NetworkDef { Self { @@ -144,25 +143,13 @@ impl NetworkDef { /// Request/response functions impl NetworkDef { /// Add a direct request for a given peer - pub fn add_direct_request(&mut self, peer_id: PeerId, data: Vec, retry_count: u8) { - let request = DMRequest { - peer_id, - data, - backoff: ExponentialBackoff::default(), - retry_count, - }; - self.direct_message.add_direct_request(request); + pub fn add_direct_request(&mut self, peer_id: PeerId, data: Vec) -> OutboundRequestId { + self.direct_message.send_request(&peer_id, data) } /// Add a direct response for a channel pub fn add_direct_response(&mut self, chan: ResponseChannel>, msg: Vec) { - self.direct_message.add_direct_response(chan, msg); - } -} - -impl From for NetworkEventInternal { - fn from(event: DMEvent) -> Self { - Self::DMEvent(event) + let _ = self.direct_message.send_response(chan, msg); } } @@ -183,6 +170,11 @@ impl From for NetworkEventInternal { Self::IdentifyEvent(Box::new(event)) } } +impl From, Vec>> for NetworkEventInternal { + fn from(value: libp2p::request_response::Event, Vec>) -> Self { + Self::DMEvent(value) + } +} impl From> for NetworkEventInternal { fn from(event: libp2p::request_response::Event) -> Self { diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 4a28ad441a..1f0b836561 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -19,7 +19,6 @@ pub use self::{ use self::behaviours::{ dht::DHTEvent, - direct_message::DMEvent, request_response::{Request, Response}, }; use futures::channel::oneshot::{self, Sender}; @@ -188,7 +187,7 @@ pub enum NetworkEventInternal { /// a gossip event GossipEvent(Box), /// a direct message event - DMEvent(DMEvent), + DMEvent(libp2p::request_response::Event, Vec>), /// a request response event RequestResponseEvent(libp2p::request_response::Event), } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index c91dc7c397..008f42fbff 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -23,7 +23,7 @@ use super::{ use crate::network::behaviours::{ dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, - direct_message::{DMBehaviour, DMEvent}, + direct_message::{DMBehaviour, DMRequest}, exponential_backoff::ExponentialBackoff, request_response::{Request, RequestResponseState, Response}, }; @@ -87,6 +87,10 @@ pub struct NetworkNode { listener_id: Option, /// Handler for requests and response behavior events. request_response_state: RequestResponseState, + /// Handler for direct messages + direct_message_state: DMBehaviour, + /// Channel to resend requests, set to Some when we call `spawn_listeners` + resend_tx: Option>, } impl NetworkNode { @@ -301,7 +305,7 @@ impl NetworkNode { .unwrap_or_else(|| NonZeroUsize::new(4).unwrap()), ), identify, - DMBehaviour::new(direct_message), + direct_message, request_response, ); @@ -334,6 +338,8 @@ impl NetworkNode { config, listener_id: None, request_response_state: RequestResponseState::default(), + direct_message_state: DMBehaviour::default(), + resend_tx: None, }) } @@ -433,7 +439,14 @@ impl NetworkNode { retry_count, } => { info!("pid {:?} adding direct request", self.peer_id); - behaviour.add_direct_request(pid, contents, retry_count); + let id = behaviour.add_direct_request(pid, contents.clone()); + let req = DMRequest { + peer_id: pid, + data: contents, + backoff: ExponentialBackoff::default(), + retry_count, + }; + self.direct_message_state.add_direct_request(req, id); } ClientRequest::DirectResponse(chan, msg) => { behaviour.add_direct_response(chan, msg); @@ -603,14 +616,9 @@ impl NetworkNode { None } }, - NetworkEventInternal::DMEvent(e) => Some(match e { - DMEvent::DirectRequest(data, pid, chan) => { - NetworkEvent::DirectRequest(data, pid, chan) - } - DMEvent::DirectResponse(data, pid) => { - NetworkEvent::DirectResponse(data, pid) - } - }), + NetworkEventInternal::DMEvent(e) => self + .direct_message_state + .handle_dm_event(e, self.resend_tx.clone()), NetworkEventInternal::RequestResponseEvent(e) => { self.request_response_state.handle_request_response(e) } @@ -670,6 +678,8 @@ impl NetworkNode { let (s_input, s_output) = unbounded::(); let (r_input, r_output) = unbounded::(); + self.resend_tx = Some(s_input.clone()); + async_spawn( async move { let mut fuse = s_output.recv().boxed().fuse(); From 9cc95c0cde71d8e5d9ed6fe13ea4c8094cf2371b Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 27 Mar 2024 10:27:38 -0400 Subject: [PATCH 0893/1393] don't loop forever in DHT poll (#2830) * Removing behaviour impl for DM * retries and fixes * don't loop forever in DHT poll --- libp2p-networking/src/network/behaviours/dht/mod.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 0cbdbba462..caa9f9ee34 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -589,7 +589,10 @@ impl NetworkBehaviour for DHTBehaviour { } // retry put/gets if they are ready - while let Some(req) = self.queued_get_record_queries.pop_front() { + for _i in 0..self.queued_get_record_queries.len() { + let Some(req) = self.queued_get_record_queries.pop_front() else { + continue; + }; if req.backoff.is_expired() { self.get_record( req.key, @@ -603,7 +606,10 @@ impl NetworkBehaviour for DHTBehaviour { } } - while let Some(req) = self.queued_put_record_queries.pop_front() { + for _i in 0..self.queued_put_record_queries.len() { + let Some(req) = self.queued_put_record_queries.pop_front() else { + continue; + }; if req.backoff.is_expired() { self.put_record(req); } else { From c84094c6d7d1162251c9baa50a8498d65d532dac Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 27 Mar 2024 14:46:37 -0400 Subject: [PATCH 0894/1393] [Push-CDN] Docker reconfiguration and update to `0.1.15` (#2843) * update pcdn to 0.1.12 * update dockers * un-feature-gate testing flag * add build for crates back * fix hotshot-testing flag * change name of example * change to debug executables * fmt * remove double integration tests * update broker example and push cdn version * conditional executor * rearrange CI --------- Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> --- examples/Cargo.toml | 20 ++++++++-------- examples/infra/mod.rs | 1 + examples/push-cdn/broker.rs | 17 ++++++++++---- hotshot/Cargo.toml | 10 ++++---- hotshot/src/traits.rs | 4 +++- .../src/traits/networking/libp2p_network.rs | 4 +++- .../src/traits/networking/push_cdn_network.rs | 23 +++++++++++-------- 7 files changed, 48 insertions(+), 31 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 0ca6d4cd82..8ec544f863 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -8,7 +8,7 @@ version = { workspace = true } rust-version = "1.65.0" [features] -default = ["docs", "doc-images"] +default = ["docs", "doc-images", "hotshot-testing"] gpu-vid = ["hotshot-task-impls/gpu-vid"] # Features required for binaries @@ -17,7 +17,7 @@ bin-orchestrator = ["clap"] # Build the extended documentation docs = [] doc-images = [] -hotshot-testing = [] +hotshot-testing = ["hotshot/hotshot-testing"] randomized-leader-election = [] # libp2p @@ -89,11 +89,11 @@ name = "validator-push-cdn" path = "push-cdn/validator.rs" [[example]] -name = "broker-push-cdn" +name = "cdn-broker" path = "push-cdn/broker.rs" [[example]] -name = "marshal-push-cdn" +name = "cdn-marshal" path = "push-cdn/marshal.rs" [[example]] @@ -131,28 +131,28 @@ hotshot = { path = "../hotshot" } hotshot-example-types = { path = "../example-types" } chrono = "0.4" versioned-binary-serialization = { workspace = true } +sha2.workspace = true tracing = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -cdn-client = { workspace = true, features = ["insecure", "runtime-tokio"] } +cdn-client = { workspace = true, features = ["runtime-tokio"] } cdn-broker = { workspace = true, features = [ - "insecure", + "runtime-tokio", "strong_consistency", ] } -cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } +cdn-marshal = { workspace = true, features = ["runtime-tokio"] } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } -cdn-client = { workspace = true, features = ["insecure", "runtime-async-std"] } +cdn-client = { workspace = true, features = ["runtime-async-std"] } cdn-broker = { workspace = true, features = [ - "insecure", "runtime-async-std", "strong_consistency", ] } -cdn-marshal = { workspace = true, features = ["insecure", "runtime-async-std"] } +cdn-marshal = { workspace = true, features = ["runtime-async-std"] } [dev-dependencies] clap = { version = "4.5", features = ["derive", "env"] } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 405fe9642b..2af3c172f3 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -444,6 +444,7 @@ async fn libp2p_network_from_config( // NOTE: this introduces an invariant that the keys are assigned using this indexed // function all_keys, + #[cfg(feature = "hotshot-testing")] None, da_keys.clone(), da_keys.contains(&pub_key), diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index cc83d7ea2b..c4f1249c92 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -9,6 +9,7 @@ use hotshot::types::SignatureKey; use hotshot_example_types::node_types::TestTypes; use hotshot_types::traits::node_implementation::NodeType; use local_ip_address::local_ip; +use sha2::Digest; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] @@ -43,9 +44,13 @@ struct Args { /// The (private) port to bind to for connections from other brokers #[arg(long, default_value_t = 1739)] private_bind_port: u16, + + /// The seed for broker key generation + #[arg(long, default_value_t = 0)] + key_seed: u64, } -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] async fn main() -> Result<()> { // Parse command line arguments @@ -58,16 +63,20 @@ async fn main() -> Result<()> { let private_ip_address = local_ip().with_context(|| "failed to get local IP address")?; let private_address = format!("{}:{}", private_ip_address, args.private_bind_port); - // Create deterministic keys for brokers (for now, obviously) + // Generate the broker key from the supplied seed + let key_hash = sha2::Sha256::digest(args.key_seed.to_le_bytes()); let (public_key, private_key) = - ::SignatureKey::generated_from_seed_indexed([0u8; 32], 1337); + ::SignatureKey::generated_from_seed_indexed(key_hash.into(), 1337); + // Create a broker configuration with all the supplied arguments let broker_config: Config::SignatureKey>> = ConfigBuilder::default() .public_advertise_address(args.public_advertise_address) .public_bind_address(format!("0.0.0.0:{}", args.public_bind_port)) .private_advertise_address(private_address.clone()) .private_bind_address(private_address) + .metrics_enabled(args.metrics_enabled) + .metrics_ip(args.metrics_ip) .discovery_endpoint(args.discovery_endpoint) .metrics_port(args.metrics_port) .keypair(KeyPair { @@ -75,7 +84,7 @@ async fn main() -> Result<()> { private_key, }) .build() - .with_context(|| "failed to build broker configuration")?; + .with_context(|| "failed to build broker config")?; // Create new `Broker` // Uses TCP from broker connections and Quic for user connections. diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index ea4fbd6d76..c51c0b8d16 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -55,23 +55,21 @@ jf-primitives.workspace = true [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -cdn-client = { workspace = true, features = ["insecure", "runtime-tokio"] } +cdn-client = { workspace = true, features = ["runtime-tokio"] } cdn-broker = { workspace = true, features = [ - "insecure", "runtime-tokio", "strong_consistency", ] } -cdn-marshal = { workspace = true, features = ["insecure", "runtime-tokio"] } +cdn-marshal = { workspace = true, features = ["runtime-tokio"] } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } -cdn-client = { workspace = true, features = ["insecure", "runtime-async-std"] } +cdn-client = { workspace = true, features = ["runtime-async-std"] } cdn-broker = { workspace = true, features = [ - "insecure", "runtime-async-std", "strong_consistency", ] } -cdn-marshal = { workspace = true, features = ["insecure", "runtime-async-std"] } +cdn-marshal = { workspace = true, features = ["runtime-async-std"] } [dev-dependencies] diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index d37ed8938f..ffc4136a1b 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -13,7 +13,9 @@ pub mod implementations { combined_network::{CombinedNetworks, UnderlyingCombinedNetworks}, libp2p_network::{Libp2pNetwork, PeerInfoVec}, memory_network::{MasterMap, MemoryNetwork}, - push_cdn_network::{ProductionDef, PushCdnNetwork, TestingDef, WrappedSignatureKey}, + push_cdn_network::{ + KeyPair, ProductionDef, PushCdnNetwork, TestingDef, WrappedSignatureKey, + }, web_server_network::WebServerNetwork, NetworkingMetricsValue, }; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index b1bb8484a3..b20244840c 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -16,6 +16,8 @@ use futures::{ future::{join_all, Either}, FutureExt, StreamExt, }; +#[cfg(feature = "hotshot-testing")] +use hotshot_types::traits::network::NetworkReliability; use hotshot_types::{ boxed_sync, constants::{Version01, LOOK_AHEAD, STATIC_VER_0_1, VERSION_0_1}, @@ -33,7 +35,7 @@ use hotshot_types::{ #[cfg(feature = "hotshot-testing")] use hotshot_types::{ message::{Message, MessageKind}, - traits::network::{NetworkReliability, TestableNetworkingImplementation, ViewMessage}, + traits::network::{TestableNetworkingImplementation, ViewMessage}, }; use libp2p_identity::PeerId; #[cfg(feature = "hotshot-testing")] diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index fcc254e511..aac4ddeabe 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -1,43 +1,48 @@ use super::NetworkError; +#[cfg(feature = "hotshot-testing")] use async_compatibility_layer::art::{async_block_on, async_spawn}; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; use bincode::config::Options; +use cdn_broker::reexports::connection::protocols::Tcp; use cdn_broker::reexports::def::RunDef; use cdn_broker::reexports::discovery::{Embedded, Redis}; -use cdn_broker::{ - reexports::connection::protocols::Tcp, Broker, Config, ConfigBuilder as BrokerConfigBuilder, -}; +#[cfg(feature = "hotshot-testing")] +use cdn_broker::{Broker, Config, ConfigBuilder as BrokerConfigBuilder}; +pub use cdn_client::reexports::crypto::signature::KeyPair; use cdn_client::{ reexports::{ connection::protocols::Quic, - crypto::signature::{KeyPair, Serializable, SignatureScheme}, + crypto::signature::{Serializable, SignatureScheme}, message::{Broadcast, Direct, Message as PushCdnMessage, Topic}, }, Client, ConfigBuilder as ClientConfigBuilder, }; +#[cfg(feature = "hotshot-testing")] use cdn_marshal::{ConfigBuilder as MarshalConfigBuilder, Marshal}; #[cfg(feature = "hotshot-testing")] -use hotshot_types::traits::network::TestableNetworkingImplementation; +use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ boxed_sync, constants::{Version01, VERSION_0_1}, data::ViewNumber, message::Message, traits::{ - network::{ - ConnectedNetwork, ConsensusIntentEvent, NetworkReliability, PushCdnNetworkError, - }, + network::{ConnectedNetwork, ConsensusIntentEvent, PushCdnNetworkError}, node_implementation::NodeType, signature_key::SignatureKey, }, utils::bincode_opts, BoxSyncFuture, }; +#[cfg(feature = "hotshot-testing")] use rand::rngs::StdRng; +#[cfg(feature = "hotshot-testing")] use rand::{RngCore, SeedableRng}; +use std::collections::BTreeSet; use std::marker::PhantomData; -use std::{collections::BTreeSet, path::Path, sync::Arc, time::Duration}; +#[cfg(feature = "hotshot-testing")] +use std::{path::Path, sync::Arc, time::Duration}; use tracing::{error, warn}; use versioned_binary_serialization::{ version::{StaticVersionType, Version}, From 8d682d54c9be00d41980480a9ede537ceb97e71b Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Wed, 27 Mar 2024 21:37:23 +0100 Subject: [PATCH 0895/1393] Calculate VID dispersal locally (#2841) * Calculate VID shared locally * Adjust to tokio and adjust tests * Split DAProsal validation and voting * Store only successfully signed VID shares --- task-impls/src/consensus.rs | 9 ++++++-- task-impls/src/da.rs | 24 +++++++++++++------- task-impls/src/events.rs | 2 ++ task-impls/src/vid.rs | 35 +++++++++++++++++++++++++++++- testing/src/predicates.rs | 19 ++++++++++++++++ testing/src/task_helpers.rs | 12 ++++++---- testing/src/view_generator.rs | 2 +- testing/tests/consensus_task.rs | 16 +++++++------- testing/tests/da_task.rs | 22 +++++++++++++++---- testing/tests/proposal_ordering.rs | 2 +- testing/tests/upgrade_task.rs | 12 +++++----- testing/tests/vid_task.rs | 26 +++++++++++++++++----- 12 files changed, 141 insertions(+), 40 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index df5102fe35..3c65f17386 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1092,9 +1092,14 @@ impl, A: ConsensusApi + debug!("VID disperse data is not more than one view older."); let payload_commitment = disperse.data.payload_commitment; - // Check whether the data comes from the right leader for this view + // Check whether the data comes from the right leader for this view or + // the data was calculated and signed by the current node let view_leader_key = self.quorum_membership.get_leader(view); - if !view_leader_key.validate(&disperse.signature, payload_commitment.as_ref()) { + if !view_leader_key.validate(&disperse.signature, payload_commitment.as_ref()) + && !self + .public_key + .validate(&disperse.signature, payload_commitment.as_ref()) + { warn!("Could not verify VID dispersal/share sig."); return; } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index d2d4452067..852c0567a6 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -17,7 +17,6 @@ use hotshot_types::{ simple_certificate::DACertificate, simple_vote::{DAData, DAVote}, traits::{ - block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, network::{ConnectedNetwork, ConsensusIntentEvent}, @@ -31,6 +30,7 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; use crate::vote_collection::HandleVoteEvent; +use hotshot_types::traits::block_contents::vid_commitment; use std::{marker::PhantomData, sync::Arc}; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; @@ -120,13 +120,6 @@ impl, A: ConsensusApi + return None; } - let txns = proposal.data.encoded_transactions.clone(); - let num_nodes = self.quorum_membership.total_nodes(); - let payload_commitment = - spawn_blocking(move || vid_commitment(&txns, num_nodes)).await; - #[cfg(async_executor_impl = "tokio")] - let payload_commitment = payload_commitment.unwrap(); - let encoded_transactions_hash = Sha256::digest(&proposal.data.encoded_transactions); // ED Is this the right leader? @@ -141,6 +134,13 @@ impl, A: ConsensusApi + return None; } + broadcast_event( + Arc::new(HotShotEvent::DAProposalValidated(proposal.clone(), sender)), + &event_stream, + ) + .await; + } + HotShotEvent::DAProposalValidated(proposal, sender) => { // Proposal is fresh and valid, notify the application layer self.api .send_event(Event { @@ -167,7 +167,14 @@ impl, A: ConsensusApi + ); return None; } + let txns = proposal.data.encoded_transactions.clone(); + let num_nodes = self.quorum_membership.total_nodes(); + let payload_commitment = + spawn_blocking(move || vid_commitment(&txns, num_nodes)).await; + #[cfg(async_executor_impl = "tokio")] + let payload_commitment = payload_commitment.unwrap(); + let view = proposal.data.get_view_number(); // Generate and send vote let Ok(vote) = DAVote::create_signed_vote( DAData { @@ -363,6 +370,7 @@ impl, A: ConsensusApi + | HotShotEvent::TransactionsSequenced(_, _, _) | HotShotEvent::Timeout(_) | HotShotEvent::ViewChange(_) + | HotShotEvent::DAProposalValidated(_, _) ) } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 9f21adf391..f22c8428bc 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -36,6 +36,8 @@ pub enum HotShotEvent { TimeoutVoteSend(TimeoutVote), /// A DA proposal has been received from the network; handled by the DA task DAProposalRecv(Proposal>, TYPES::SignatureKey), + /// A DA proposal has been validated; handled by the DA task and VID task + DAProposalValidated(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task DAVoteRecv(DAVote), /// A Data Availability Certificate (DAC) has been recieved by the network; handled by the consensus task diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 3e23e68ea8..7b394a7bb4 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -5,10 +5,11 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; +use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, - data::VidDisperse, + data::{VidDisperse, VidDisperseShare}, message::Proposal, traits::{ consensus_api::ConsensusApi, @@ -154,6 +155,37 @@ impl, A: ConsensusApi + return None; } + HotShotEvent::DAProposalValidated(proposal, _sender) => { + let txns = proposal.data.encoded_transactions.clone(); + let num_nodes = self.membership.total_nodes(); + let vid_disperse = spawn_blocking(move || { + #[allow(clippy::panic)] + vid_scheme(num_nodes).disperse(&txns).unwrap_or_else(|err|panic!("VID disperse failure:\n\t(num_storage nodes,payload_byte_len)=({num_nodes},{})\n\terror: : {err}", txns.len())) + }) + .await; + #[cfg(async_executor_impl = "tokio")] + let vid_disperse = vid_disperse.unwrap(); + + let vid_disperse = VidDisperse::from_membership( + proposal.data.view_number, + vid_disperse, + &self.membership, + ); + + let vid_disperse_tasks = VidDisperseShare::from_vid_disperse(vid_disperse) + .into_iter() + .filter_map(|vid_share| { + Some(broadcast_event( + Arc::new(HotShotEvent::VidDisperseRecv( + vid_share.to_proposal(&self.private_key)?, + )), + &event_stream, + )) + }); + + join_all(vid_disperse_tasks).await; + } + HotShotEvent::Shutdown => { return Some(HotShotTaskCompleted); } @@ -188,6 +220,7 @@ impl, A: ConsensusApi + | HotShotEvent::TransactionsSequenced(_, _, _) | HotShotEvent::BlockReady(_, _) | HotShotEvent::ViewChange(_) + | HotShotEvent::DAProposalValidated(_, _) ) } fn should_shutdown(event: &Self::Event) -> bool { diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs index e78fc698fb..3b2b1c4fbf 100644 --- a/testing/src/predicates.rs +++ b/testing/src/predicates.rs @@ -33,6 +33,25 @@ where } } +pub fn multi_exact( + events: Vec>, +) -> Vec>>> +where + TYPES: NodeType, +{ + events + .into_iter() + .map(|event| { + let event = Arc::new(event); + let info = format!("{:?}", event); + Predicate { + function: Box::new(move |e| e == &event), + info, + } + }) + .collect() +} + pub fn leaf_decided() -> Predicate>> where TYPES: NodeType, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 67273199c7..f582c7d2e3 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -398,7 +398,7 @@ pub fn build_vid_proposal( view_number: ViewNumber, transactions: Vec, private_key: &::PrivateKey, -) -> Proposal> { +) -> Vec>> { let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); @@ -409,9 +409,13 @@ pub fn build_vid_proposal( ); VidDisperseShare::from_vid_disperse(vid_disperse) - .swap_remove(0) - .to_proposal(private_key) - .expect("Failed to sign payload commitment") + .into_iter() + .map(|vid_disperse| { + vid_disperse + .to_proposal(private_key) + .expect("Failed to sign payload commitment") + }) + .collect() } pub fn build_da_certificate( diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 2e9c904c28..49dfd39c9b 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -42,7 +42,7 @@ pub struct TestView { pub view_number: ViewNumber, pub quorum_membership: ::Membership, pub vid_proposal: ( - Proposal>, + Vec>>, ::SignatureKey, ), pub leader_public_key: ::SignatureKey, diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 9d83cc9247..45de005266 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -56,7 +56,7 @@ async fn test_consensus_task() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone()), + VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -135,7 +135,7 @@ async fn test_consensus_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone()), + VidDisperseRecv(vids[0].0[0].clone()), QuorumVoteRecv(votes[0].clone()), ], outputs: vec![ @@ -187,7 +187,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone()), + VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -199,7 +199,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let inputs = vec![ // We need a VID share for view 2 otherwise we cannot vote at view 2 (as node 2). - VidDisperseRecv(vids[1].0.clone()), + VidDisperseRecv(vids[1].0[0].clone()), DACRecv(dacs[1].clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), ]; @@ -299,7 +299,7 @@ async fn test_view_sync_finalize_propose() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone()), + VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -422,7 +422,7 @@ async fn test_view_sync_finalize_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone()), + VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -527,7 +527,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone()), + VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -623,7 +623,7 @@ async fn test_vid_disperse_storage_failure() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone()), + VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index d3b3f8de16..a660945c6d 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -64,15 +64,22 @@ async fn test_da_task() { asserts: vec![], }; - // Run view 2 and propose. + // Run view 2 and validate proposal. let view_2 = TestScriptStage { inputs: vec![DAProposalRecv(proposals[1].clone(), leaders[1])], + outputs: vec![exact(DAProposalValidated(proposals[1].clone(), leaders[1]))], + asserts: vec![], + }; + + // Run view 3 and vote + let view_3 = TestScriptStage { + inputs: vec![DAProposalValidated(proposals[1].clone(), leaders[1])], outputs: vec![exact(DAVoteSend(votes[1].clone()))], asserts: vec![], }; let da_state = DATaskState::>::create_from(&handle).await; - let stages = vec![view_1, view_2]; + let stages = vec![view_1, view_2, view_3]; run_test_script(stages, da_state).await; } @@ -124,9 +131,16 @@ async fn test_da_task_storage_failure() { asserts: vec![], }; - // Run view 2 and propose. + // Run view 2 and validate proposal. let view_2 = TestScriptStage { inputs: vec![DAProposalRecv(proposals[1].clone(), leaders[1])], + outputs: vec![exact(DAProposalValidated(proposals[1].clone(), leaders[1]))], + asserts: vec![], + }; + + // Run view 3 and propose. + let view_3 = TestScriptStage { + inputs: vec![DAProposalValidated(proposals[1].clone(), leaders[1])], outputs: vec![ /* No vote was sent due to the storage failure */ ], @@ -134,7 +148,7 @@ async fn test_da_task_storage_failure() { }; let da_state = DATaskState::>::create_from(&handle).await; - let stages = vec![view_1, view_2]; + let stages = vec![view_1, view_2, view_3]; run_test_script(stages, da_state).await; } diff --git a/testing/tests/proposal_ordering.rs b/testing/tests/proposal_ordering.rs index a96b16a9d2..833a63900d 100644 --- a/testing/tests/proposal_ordering.rs +++ b/testing/tests/proposal_ordering.rs @@ -52,7 +52,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0.clone()), + VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs index 4f9f10358e..ecd4e88cdd 100644 --- a/testing/tests/upgrade_task.rs +++ b/testing/tests/upgrade_task.rs @@ -67,7 +67,7 @@ async fn test_upgrade_task() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidDisperseRecv(vids[0].0.clone()), + VidDisperseRecv(vids[0].0[0].clone()), DACRecv(dacs[0].clone()), ], outputs: vec![ @@ -80,7 +80,7 @@ async fn test_upgrade_task() { let view_2 = TestScriptStage { inputs: vec![ - VidDisperseRecv(vids[1].0.clone()), + VidDisperseRecv(vids[1].0[0].clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), DACRecv(dacs[1].clone()), ], @@ -96,7 +96,7 @@ async fn test_upgrade_task() { inputs: vec![ QuorumProposalRecv(proposals[2].clone(), leaders[2]), DACRecv(dacs[2].clone()), - VidDisperseRecv(vids[2].0.clone()), + VidDisperseRecv(vids[2].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(3))), @@ -111,7 +111,7 @@ async fn test_upgrade_task() { inputs: vec![ QuorumProposalRecv(proposals[3].clone(), leaders[3]), DACRecv(dacs[3].clone()), - VidDisperseRecv(vids[3].0.clone()), + VidDisperseRecv(vids[3].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(4))), @@ -229,7 +229,7 @@ async fn test_upgrade_and_consensus_task() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidDisperseRecv(vids[0].0.clone()), + VidDisperseRecv(vids[0].0[0].clone()), DACRecv(dacs[0].clone()), ], upgrade_vote_recvs, @@ -237,7 +237,7 @@ async fn test_upgrade_and_consensus_task() { vec![ DACRecv(dacs[1].clone()), SendPayloadCommitmentAndMetadata( - vids[2].0.data.payload_commitment, + vids[2].0[0].data.payload_commitment, (), ViewNumber::new(2), ), diff --git a/testing/tests/vid_task.rs b/testing/tests/vid_task.rs index d084b0002e..ab9e0689e4 100644 --- a/testing/tests/vid_task.rs +++ b/testing/tests/vid_task.rs @@ -46,7 +46,7 @@ async fn test_vid_task() { view_number: ViewNumber::new(2), }; let message = Proposal { - data: proposal, + data: proposal.clone(), signature, _pd: PhantomData, }; @@ -62,10 +62,19 @@ async fn test_vid_task() { signature: message.signature.clone(), _pd: PhantomData, }; - let vid_share_proposal = VidDisperseShare::from_vid_disperse(vid_disperse.clone()) - .swap_remove(0) - .to_proposal(handle.private_key()) - .expect("Failed to sign block payload!"); + let vid_share_proposals: Vec<_> = VidDisperseShare::from_vid_disperse(vid_disperse.clone()) + .into_iter() + .map(|vid_disperse_share| { + vid_disperse_share + .to_proposal(handle.private_key()) + .expect("Failed to sign block payload!") + }) + .collect(); + let vid_share_proposal = vid_share_proposals[0].clone(); + let disperse_receives: Vec<_> = vid_share_proposals + .into_iter() + .map(HotShotEvent::VidDisperseRecv) + .collect(); let mut input = Vec::new(); let mut output = HashMap::new(); @@ -82,6 +91,10 @@ async fn test_vid_task() { vid_disperse.clone(), ViewNumber::new(2), )); + input.push(HotShotEvent::DAProposalValidated( + message, + *handle.public_key(), + )); input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); input.push(HotShotEvent::VidDisperseRecv(vid_share_proposal.clone())); input.push(HotShotEvent::Shutdown); @@ -99,6 +112,9 @@ async fn test_vid_task() { HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), 1, ); + for disperse_receive in disperse_receives { + output.insert(disperse_receive, 1); + } let vid_state = VIDTaskState { api: handle.clone(), From 53b5a9e94357471c83b995dcdd834b24d7c33536 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 27 Mar 2024 23:29:15 -0400 Subject: [PATCH 0896/1393] only spawn request task if network supports rr (#2853) --- hotshot/src/lib.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 78bea4ce58..bacf48c35d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -489,14 +489,14 @@ impl> SystemContext { &handle, ) .await; + add_request_network_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, + ) + .await; } - add_request_network_task( - registry.clone(), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, - ) - .await; add_network_event_task( registry.clone(), From fc63cde170165b0df9c1af9abc51d486c30825a9 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 28 Mar 2024 11:13:03 -0400 Subject: [PATCH 0897/1393] Add upgrade certificate to leaf commit (#2852) --- example-types/src/block_types.rs | 2 +- examples/infra/mod.rs | 12 ++-- hotshot/src/lib.rs | 2 +- task-impls/src/consensus.rs | 99 +++++++++++++------------------- task-impls/src/transactions.rs | 2 +- testing/src/block_builder.rs | 2 +- testing/src/task_helpers.rs | 67 +++++---------------- testing/src/view_generator.rs | 37 +++++------- types/src/data.rs | 76 +++++++++++++++++------- types/src/simple_certificate.rs | 20 ++++++- types/src/simple_vote.rs | 10 ++-- 11 files changed, 155 insertions(+), 174 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 6d28a0e1b3..af1c37116d 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -197,7 +197,7 @@ impl> Block payload_commitment: VidCommitment, _metadata: ::Metadata, ) -> Self { - let parent = &parent_leaf.block_header; + let parent = parent_leaf.get_block_header(); let mut timestamp = OffsetDateTime::now_utc().unix_timestamp() as u64; if timestamp < parent.timestamp { diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 2af3c172f3..06da261499 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -604,12 +604,12 @@ pub trait RunDA< // this might be a obob if let Some(leaf_info) = leaf_chain.first() { let leaf = &leaf_info.leaf; - info!("Decide event for leaf: {}", *leaf.view_number); + info!("Decide event for leaf: {}", *leaf.get_view_number()); // iterate all the decided transactions to calculate latency - if let Some(block_payload) = &leaf.block_payload { - for tx in - block_payload.get_transactions(leaf.block_header.metadata()) + if let Some(block_payload) = &leaf.get_block_payload() { + for tx in block_payload + .get_transactions(leaf.get_block_header().metadata()) { let restored_timestamp_vec = tx.0[tx.0.len() - 8..].to_vec(); @@ -626,9 +626,9 @@ pub trait RunDA< } } - let new_anchor = leaf.view_number; + let new_anchor = leaf.get_view_number(); if new_anchor >= anchor_view { - anchor_view = leaf.view_number; + anchor_view = leaf.get_view_number(); } // send transactions diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index bacf48c35d..555db95843 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -182,7 +182,7 @@ impl> SystemContext { let validated_state = match initializer.validated_state { Some(state) => state, None => Arc::new(TYPES::ValidatedState::from_header( - &anchored_leaf.block_header, + anchored_leaf.get_block_header(), )), }; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 3c65f17386..1e0178bdbc 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -195,13 +195,8 @@ impl, A: ConsensusApi + }; let parent_commitment = parent.commit(); - let leaf: Leaf<_> = Leaf { - view_number: view, - justify_qc: proposal.justify_qc.clone(), - parent_commitment, - block_header: proposal.block_header.clone(), - block_payload: None, - }; + let mut proposed_leaf = Leaf::from_quorum_proposal(proposal); + proposed_leaf.set_parent_commitment(parent_commitment); // Validate the DAC. let message = if cert.is_valid_cert(self.committee_membership.as_ref()) { @@ -215,7 +210,7 @@ impl, A: ConsensusApi + } if let Ok(vote) = QuorumVote::::create_signed_vote( QuorumData { - leaf_commit: leaf.commit(), + leaf_commit: proposed_leaf.commit(), }, view, &self.public_key, @@ -501,7 +496,7 @@ impl, A: ConsensusApi + { Some(leaf) => { if let (Some(state), _) = - consensus.get_state_and_delta(leaf.view_number) + consensus.get_state_and_delta(leaf.get_view_number()) { Some((leaf, state.clone())) } else { @@ -539,13 +534,8 @@ impl, A: ConsensusApi + "Proposal's parent missing from storage with commitment: {:?}", justify_qc.get_data().leaf_commit ); - let leaf = Leaf { - view_number: view, - justify_qc: justify_qc.clone(), - parent_commitment: justify_qc.get_data().leaf_commit, - block_header: proposal.data.block_header.clone(), - block_payload: None, - }; + let leaf = Leaf::from_proposal(proposal); + let state = Arc::new( >::from_header( &proposal.data.block_header, @@ -626,20 +616,16 @@ impl, A: ConsensusApi + let state = Arc::new(validated_state); let delta = Arc::new(state_delta); let parent_commitment = parent_leaf.commit(); - let leaf: Leaf<_> = Leaf { - view_number: view, - justify_qc: justify_qc.clone(), - parent_commitment, - block_header: proposal.data.block_header.clone(), - block_payload: None, - }; - let leaf_commitment = leaf.commit(); + + let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + proposed_leaf.set_parent_commitment(parent_commitment); // Validate the signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - if !view_leader_key.validate(&proposal.signature, leaf_commitment.as_ref()) { + if !view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()) { error!(?proposal.signature, "Could not verify proposal."); return; } + // Create a positive vote if either liveness or safety check // passes. @@ -655,7 +641,7 @@ impl, A: ConsensusApi + |leaf, _, _| { // if leaf view no == locked view no then we're done, report success by // returning true - leaf.view_number != consensus.locked_view + leaf.get_view_number() != consensus.locked_view }, ); let safety_check = outcome.is_ok(); @@ -703,7 +689,7 @@ impl, A: ConsensusApi + let mut leafs_decided = Vec::new(); let mut included_txns = HashSet::new(); let old_anchor_view = consensus.last_decided_view; - let parent_view = leaf.justify_qc.get_view_number(); + let parent_view = proposed_leaf.get_justify_qc().get_view_number(); let mut current_chain_length = 0usize; if parent_view + 1 == view { current_chain_length += 1; @@ -713,17 +699,17 @@ impl, A: ConsensusApi + true, |leaf, state, delta| { if !new_decide_reached { - if last_view_number_visited == leaf.view_number + 1 { - last_view_number_visited = leaf.view_number; + if last_view_number_visited == leaf.get_view_number() + 1 { + last_view_number_visited = leaf.get_view_number(); current_chain_length += 1; if current_chain_length == 2 { - new_locked_view = leaf.view_number; + new_locked_view = leaf.get_view_number(); new_commit_reached = true; // The next leaf in the chain, if there is one, is decided, so this // leaf's justify_qc would become the QC for the decided chain. - new_decide_qc = Some(leaf.justify_qc.clone()); + new_decide_qc = Some(leaf.get_justify_qc().clone()); } else if current_chain_length == 3 { - new_anchor_view = leaf.view_number; + new_anchor_view = leaf.get_view_number(); new_decide_reached = true; } } else { @@ -734,7 +720,7 @@ impl, A: ConsensusApi + // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above if new_decide_reached { let mut leaf = leaf.clone(); - if leaf.view_number == new_anchor_view { + if leaf.get_view_number() == new_anchor_view { consensus .metrics .last_synced_block_height @@ -768,7 +754,7 @@ impl, A: ConsensusApi + leaf_views.push(LeafInfo::new(leaf.clone(), state.clone(), delta.clone(), vid)); leafs_decided.push(leaf.clone()); - if let Some(ref payload) = leaf.block_payload { + if let Some(ref payload) = leaf.get_block_payload() { for txn in payload .transaction_commitments(leaf.get_block_header().metadata()) { @@ -801,13 +787,15 @@ impl, A: ConsensusApi + view, View { view_inner: ViewInner::Leaf { - leaf: leaf.commit(), + leaf: proposed_leaf.commit(), state: state.clone(), delta: Some(delta.clone()), }, }, ); - consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + consensus + .saved_leaves + .insert(proposed_leaf.commit(), proposed_leaf.clone()); if let Err(e) = self .storage @@ -1344,7 +1332,7 @@ impl, A: ConsensusApi + error!("Failed to find high QC of parent."); return false; }; - if leaf.view_number == consensus.last_decided_view { + if leaf.get_view_number() == consensus.last_decided_view { reached_decided = true; } @@ -1358,10 +1346,10 @@ impl, A: ConsensusApi + if !reached_decided { debug!("We have not reached decide from view {:?}", self.cur_view); while let Some(next_parent_leaf) = consensus.saved_leaves.get(&next_parent_hash) { - if next_parent_leaf.view_number <= consensus.last_decided_view { + if next_parent_leaf.get_view_number() <= consensus.last_decided_view { break; } - next_parent_hash = next_parent_leaf.parent_commitment; + next_parent_hash = next_parent_leaf.get_parent_commitment(); } debug!("updated saved leaves"); // TODO do some sort of sanity check on the view number that it matches decided @@ -1376,21 +1364,6 @@ impl, A: ConsensusApi + commit_and_metadata.metadata.clone(), ) .await; - let leaf = Leaf { - view_number: view, - justify_qc: consensus.high_qc.clone(), - parent_commitment: parent_leaf.commit(), - block_header: block_header.clone(), - block_payload: None, - }; - - let Ok(signature) = - TYPES::SignatureKey::sign(&self.private_key, leaf.commit().as_ref()) - else { - error!("Failed to sign leaf.commit()!"); - return false; - }; - let upgrade_cert = if self .upgrade_cert .as_ref() @@ -1415,11 +1388,21 @@ impl, A: ConsensusApi + // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. let proposal = QuorumProposal { - block_header, - view_number: leaf.view_number, + block_header: block_header.clone(), + view_number: view, justify_qc: consensus.high_qc.clone(), proposal_certificate, - upgrade_certificate: upgrade_cert, + upgrade_certificate: upgrade_cert.clone(), + }; + + let mut new_leaf = Leaf::from_quorum_proposal(&proposal); + new_leaf.set_parent_commitment(parent_leaf.commit()); + + let Ok(signature) = + TYPES::SignatureKey::sign(&self.private_key, new_leaf.commit().as_ref()) + else { + error!("Failed to sign new_leaf.commit()!"); + return false; }; self.proposal_cert = None; @@ -1428,7 +1411,7 @@ impl, A: ConsensusApi + signature, _pd: PhantomData, }; - debug!("Sending proposal for view {:?}", leaf.view_number); + debug!("Sending proposal for view {:?}", view); broadcast_event( Arc::new(HotShotEvent::QuorumProposalSend( diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 5672744f68..4d8be0e409 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -127,7 +127,7 @@ impl, A: ConsensusApi + let mut included_txn_size = 0; let mut included_txn_count = 0; for leaf in leaf_chain { - if let Some(ref payload) = leaf.block_payload { + if let Some(ref payload) = leaf.get_block_payload() { for txn in payload.transaction_commitments(leaf.get_block_header().metadata()) { diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 42c73f469c..7d250c3443 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -348,7 +348,7 @@ impl TaskState for SimpleBuilderTask { HotShotEvent::LeafDecided(leaf_chain) => { let mut queue = this.transactions.write().await; for leaf in leaf_chain.iter() { - if let Some(ref payload) = leaf.block_payload { + if let Some(ref payload) = leaf.get_block_payload() { for txn in payload.transaction_commitments(&()) { queue.remove(&txn); } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index f582c7d2e3..c48203f3d6 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -240,7 +240,9 @@ async fn build_quorum_proposal_and_signature( handle.hotshot.memberships.quorum_membership.total_nodes(), ); let mut parent_state = Arc::new( - >::from_header(&parent_leaf.block_header), + >::from_header( + parent_leaf.get_block_header(), + ), ); let block_header = TestBlockHeader::new( &*parent_state, @@ -250,17 +252,6 @@ async fn build_quorum_proposal_and_signature( (), ) .await; - // current leaf that can be re-assigned everytime when entering a new view - let mut leaf = Leaf { - view_number: ViewNumber::new(1), - justify_qc: consensus.high_qc.clone(), - parent_commitment: parent_leaf.commit(), - block_header: block_header.clone(), - block_payload: None, - }; - - let mut signature = ::sign(private_key, leaf.commit().as_ref()) - .expect("Failed to sign leaf commitment!"); let mut proposal = QuorumProposal:: { block_header: block_header.clone(), view_number: ViewNumber::new(1), @@ -268,6 +259,11 @@ async fn build_quorum_proposal_and_signature( upgrade_certificate: None, proposal_certificate: None, }; + // current leaf that can be re-assigned everytime when entering a new view + let mut leaf = Leaf::from_quorum_proposal(&proposal); + + let mut signature = ::sign(private_key, leaf.commit().as_ref()) + .expect("Failed to sign leaf commitment!"); // Only view 2 is tested, higher views are not tested for cur_view in 2..=view { @@ -307,17 +303,6 @@ async fn build_quorum_proposal_and_signature( private_key, ); // create a new leaf for the current view - let parent_leaf = leaf.clone(); - let leaf_new_view = Leaf { - view_number: ViewNumber::new(cur_view), - justify_qc: created_qc.clone(), - parent_commitment: parent_leaf.commit(), - block_header: block_header.clone(), - block_payload: None, - }; - let signature_new_view = - ::sign(private_key, leaf_new_view.commit().as_ref()) - .expect("Failed to sign leaf commitment!"); let proposal_new_view = QuorumProposal:: { block_header: block_header.clone(), view_number: ViewNumber::new(cur_view), @@ -325,6 +310,10 @@ async fn build_quorum_proposal_and_signature( upgrade_certificate: None, proposal_certificate: None, }; + let leaf_new_view = Leaf::from_quorum_proposal(&proposal_new_view); + let signature_new_view = + ::sign(private_key, leaf_new_view.commit().as_ref()) + .expect("Failed to sign leaf commitment!"); proposal = proposal_new_view; signature = signature_new_view; leaf = leaf_new_view; @@ -447,39 +436,9 @@ pub async fn build_vote( handle: &SystemContextHandle, proposal: QuorumProposal, ) -> GeneralConsensusMessage { - let consensus_lock = handle.get_consensus(); - let consensus = consensus_lock.read().await; - - let justify_qc = proposal.justify_qc.clone(); let view = ViewNumber::new(*proposal.view_number); - let parent = if justify_qc.is_genesis { - let Some(genesis_view) = consensus.validated_state_map.get(&ViewNumber::new(0)) else { - panic!("Couldn't find genesis view in state map."); - }; - let Some(leaf) = genesis_view.get_leaf_commitment() else { - panic!("Genesis view points to a view without a leaf"); - }; - let Some(leaf) = consensus.saved_leaves.get(&leaf) else { - panic!("Failed to find genesis leaf."); - }; - leaf.clone() - } else { - consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - .unwrap() - }; - - let parent_commitment = parent.commit(); - let leaf: Leaf<_> = Leaf { - view_number: view, - justify_qc: proposal.justify_qc.clone(), - parent_commitment, - block_header: proposal.block_header, - block_payload: None, - }; + let leaf: Leaf<_> = Leaf::from_quorum_proposal(&proposal); let vote = QuorumVote::::create_signed_vote( QuorumData { leaf_commit: leaf.commit(), diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 49dfd39c9b..f963d69b56 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -113,16 +113,11 @@ impl TestView { _pd: PhantomData, }; - let leaf = Leaf { - view_number: genesis_view, - justify_qc: QuorumCertificate::genesis(), - parent_commitment: Leaf::genesis(&TestInstanceState {}).commit(), - block_header: block_header.clone(), - // Note: this field is not relevant in calculating the leaf commitment. - block_payload: Some(TestBlockPayload { - transactions: transactions.clone(), - }), - }; + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal_inner); + leaf.fill_block_payload_unchecked(TestBlockPayload { + transactions: transactions.clone(), + }); + leaf.set_parent_commitment(Leaf::genesis(&TestInstanceState {}).commit()); let signature = ::sign(&private_key, leaf.commit().as_ref()) .expect("Failed to sign leaf commitment!"); @@ -274,20 +269,6 @@ impl TestView { payload_commitment, }; - let leaf = Leaf { - view_number: next_view, - justify_qc: quorum_certificate.clone(), - parent_commitment: old.leaf.commit(), - block_header: block_header.clone(), - // Note: this field is not relevant in calculating the leaf commitment. - block_payload: Some(TestBlockPayload { - transactions: transactions.clone(), - }), - }; - - let signature = ::sign(&private_key, leaf.commit().as_ref()) - .expect("Failed to sign leaf commitment."); - let proposal = QuorumProposal:: { block_header: block_header.clone(), view_number: next_view, @@ -296,6 +277,14 @@ impl TestView { proposal_certificate, }; + let mut leaf = Leaf::from_quorum_proposal(&proposal); + leaf.fill_block_payload_unchecked(TestBlockPayload { + transactions: transactions.clone(), + }); + + let signature = ::sign(&private_key, leaf.commit().as_ref()) + .expect("Failed to sign leaf commitment."); + let quorum_proposal = Proposal { data: proposal, signature, diff --git a/types/src/data.rs b/types/src/data.rs index cf330b6b56..75c6ebbe4e 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -373,22 +373,25 @@ pub trait TestableLeaf { #[serde(bound(deserialize = ""))] pub struct Leaf { /// CurView from leader when proposing leaf - pub view_number: TYPES::Time, + view_number: TYPES::Time, /// Per spec, justification - pub justify_qc: QuorumCertificate, + justify_qc: QuorumCertificate, /// The hash of the parent `Leaf` /// So we can ask if it extends - pub parent_commitment: Commitment, + parent_commitment: Commitment, /// Block header. - pub block_header: TYPES::BlockHeader, + block_header: TYPES::BlockHeader, + + /// Optional upgrade certificate, if one was attached to the quorum proposal for this view. + upgrade_certificate: Option>, /// Optional block payload. /// /// It may be empty for nodes not in the DA committee. - pub block_payload: Option, + block_payload: Option, } impl PartialEq for Leaf { @@ -442,6 +445,7 @@ impl Leaf { view_number: TYPES::Time::genesis(), justify_qc: QuorumCertificate::::genesis(), parent_commitment: fake_commitment(), + upgrade_certificate: None, block_header: block_header.clone(), block_payload: Some(payload), } @@ -461,10 +465,18 @@ impl Leaf { pub fn get_justify_qc(&self) -> QuorumCertificate { self.justify_qc.clone() } + /// The QC linking this leaf to its parent in the chain. + pub fn get_upgrade_certificate(&self) -> Option> { + self.upgrade_certificate.clone() + } /// Commitment to this leaf's parent. pub fn get_parent_commitment(&self) -> Commitment { self.parent_commitment } + /// Commitment to this leaf's parent. + pub fn set_parent_commitment(&mut self, commitment: Commitment) { + self.parent_commitment = commitment; + } /// The block header contained in this leaf. pub fn get_block_header(&self) -> &::BlockHeader { &self.block_header @@ -567,29 +579,49 @@ pub fn serialize_signature2( impl Committable for Leaf { fn commit(&self) -> commit::Commitment { - let signatures_bytes = if self.justify_qc.is_genesis { - let mut bytes = vec![]; - bytes.extend("genesis".as_bytes()); - bytes - } else { - serialize_signature2::(self.justify_qc.signatures.as_ref().unwrap()) - }; - // Skip the transaction commitments, so that the repliacs can reconstruct the leaf. RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) .u64_field("block number", self.get_height()) .field("parent Leaf commitment", self.parent_commitment) - .constant_str("block payload commitment") - .fixed_size_bytes(self.get_payload_commitment().as_ref().as_ref()) - .constant_str("justify_qc view number") - .u64(*self.justify_qc.view_number) - .field( - "justify_qc leaf commitment", - self.justify_qc.get_data().leaf_commit, + .fixed_size_field( + "block payload commitment", + self.get_payload_commitment().as_ref().as_ref(), ) - .constant_str("justify_qc signatures") - .var_size_bytes(&signatures_bytes) + .field("justify qc", self.justify_qc.commit()) + .optional("upgrade certificate", &self.upgrade_certificate) .finalize() } } + +impl Leaf { + pub fn from_proposal(proposal: &Proposal>) -> Self { + Self::from_quorum_proposal(&proposal.data) + } + + pub fn from_quorum_proposal(quorum_proposal: &QuorumProposal) -> Self { + // WARNING: Do NOT change this to a wildcard match, or reference the fields directly in the construction of the leaf. + // The point of this match is that we will get a compile-time error if we add a field without updating this. + let QuorumProposal { + view_number, + justify_qc, + block_header, + upgrade_certificate, + proposal_certificate: _, + } = quorum_proposal; + Leaf { + view_number: *view_number, + justify_qc: justify_qc.clone(), + parent_commitment: justify_qc.get_data().leaf_commit, + block_header: block_header.clone(), + upgrade_certificate: upgrade_certificate.clone(), + block_payload: None, + } + } + + pub fn commit_from_proposal( + proposal: &Proposal>, + ) -> Commitment { + Leaf::from_proposal(proposal).commit() + } +} diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 1592583e90..73ee8f4c95 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -10,7 +10,7 @@ use commit::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; use crate::{ - data::Leaf, + data::{serialize_signature2, Leaf}, simple_vote::{ DAData, QuorumData, TimeoutData, UpgradeProposalData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, @@ -77,6 +77,24 @@ pub struct SimpleCertificate, } +impl> Committable + for SimpleCertificate +{ + fn commit(&self) -> Commitment { + let signature_bytes = match self.signatures.as_ref() { + Some(sigs) => serialize_signature2::(sigs), + None => vec![], + }; + commit::RawCommitmentBuilder::new("Certificate") + .field("data", self.data.commit()) + .field("vote_commitment", self.vote_commitment) + .field("view number", self.view_number.commit()) + .var_size_field("signatures", &signature_bytes) + .fixed_size_field("is genesis", &[self.is_genesis as u8]) + .finalize() + } +} + impl> Certificate for SimpleCertificate { diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 3573ad4628..faa55c870b 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -162,7 +162,7 @@ impl SimpleVote { impl Committable for QuorumData { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("Yes Vote") + commit::RawCommitmentBuilder::new("Quorum data") .var_size_bytes(self.leaf_commit.as_ref()) .finalize() } @@ -170,7 +170,7 @@ impl Committable for QuorumData { impl Committable for TimeoutData { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("Timeout Vote") + commit::RawCommitmentBuilder::new("Timeout data") .u64(*self.view) .finalize() } @@ -178,7 +178,7 @@ impl Committable for TimeoutData { impl Committable for DAData { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("DA Vote") + commit::RawCommitmentBuilder::new("DA data") .var_size_bytes(self.payload_commit.as_ref()) .finalize() } @@ -186,7 +186,7 @@ impl Committable for DAData { impl Committable for VIDData { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("VID Vote") + commit::RawCommitmentBuilder::new("VID data") .var_size_bytes(self.payload_commit.as_ref()) .finalize() } @@ -194,7 +194,7 @@ impl Committable for VIDData { impl Committable for UpgradeProposalData { fn commit(&self) -> Commitment { - let builder = commit::RawCommitmentBuilder::new("Upgrade Vote"); + let builder = commit::RawCommitmentBuilder::new("Upgrade data"); builder .u64(*self.new_version_first_block) .u64(*self.old_version_last_block) From 96ef3fe5cd5cc938ab28d0d1a5f97d3a2d904245 Mon Sep 17 00:00:00 2001 From: Himanshu Goyal Date: Thu, 28 Mar 2024 14:20:47 -0400 Subject: [PATCH 0898/1393] [Builder] API changes (#2856) * return hash * provide serde suppport for static committee * add builder address api * inclue api in testing * remove serde --- builder-api/Cargo.toml | 1 + builder-api/api/builder.toml | 6 ++++++ builder-api/src/builder.rs | 28 ++++++++++++++++++++++------ builder-api/src/data_source.rs | 8 ++++++++ task-impls/src/builder.rs | 8 +++++--- testing/src/block_builder.rs | 12 ++++++++++++ 6 files changed, 54 insertions(+), 9 deletions(-) diff --git a/builder-api/Cargo.toml b/builder-api/Cargo.toml index a351f1099f..4f70d90a70 100644 --- a/builder-api/Cargo.toml +++ b/builder-api/Cargo.toml @@ -16,4 +16,5 @@ snafu = { workspace = true } tagged-base64 = { workspace = true } tide-disco = { workspace = true } toml = { workspace = true } +commit = { workspace = true } versioned-binary-serialization = { workspace = true } diff --git a/builder-api/api/builder.toml b/builder-api/api/builder.toml index e64b94da48..b906856e0d 100644 --- a/builder-api/api/builder.toml +++ b/builder-api/api/builder.toml @@ -63,3 +63,9 @@ Get the specified block candidate. Returns application-specific block header type """ + +[route.builder_address] +PATH = ["builderaddress"] +DOC = """ +Get the builder address. +""" diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs index 23026675b9..b210e8fba0 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/builder.rs @@ -1,6 +1,11 @@ use std::{fmt::Display, path::PathBuf}; +use crate::{ + api::load_api, + data_source::{AcceptsTxnSubmits, BuilderDataSource}, +}; use clap::Args; +use commit::Committable; use derive_more::From; use futures::FutureExt; use hotshot_types::{ @@ -17,11 +22,6 @@ use tide_disco::{ }; use versioned_binary_serialization::version::StaticVersionType; -use crate::{ - api::load_api, - data_source::{AcceptsTxnSubmits, BuilderDataSource}, -}; - #[derive(Args, Default)] pub struct Options { #[arg(long = "builder-api-path", env = "HOTSHOT_BUILDER_API_PATH")] @@ -79,6 +79,11 @@ pub enum Error { TxnSubmit { source: BuildError, }, + #[snafu(display("error getting builder address: {source}"))] + #[from(ignore)] + BuilderAddress { + source: BuildError, + }, Custom { message: String, status: StatusCode, @@ -105,6 +110,7 @@ impl tide_disco::error::Error for Error { Error::TxnUnpack { .. } => StatusCode::BadRequest, Error::TxnSubmit { .. } => StatusCode::InternalServerError, Error::Custom { .. } => StatusCode::InternalServerError, + Error::BuilderAddress { .. } => StatusCode::InternalServerError, } } } @@ -165,6 +171,15 @@ where }) } .boxed() + })? + .get("builder_address", |_req, state| { + async move { + state + .get_builder_address() + .await + .context(BuilderAddressSnafu) + } + .boxed() })?; Ok(api) } @@ -188,8 +203,9 @@ where let tx = req .body_auto::<::Transaction, Ver>(Ver::instance()) .context(TxnUnpackSnafu)?; + let hash = tx.commit(); state.submit_txn(tx).await.context(TxnSubmitSnafu)?; - Ok(()) + Ok(hash) } .boxed() })?; diff --git a/builder-api/src/data_source.rs b/builder-api/src/data_source.rs index 69a75feb85..f95748b4ec 100644 --- a/builder-api/src/data_source.rs +++ b/builder-api/src/data_source.rs @@ -18,20 +18,28 @@ where <::SignatureKey as SignatureKey>::PureAssembledSignatureType: for<'a> TryFrom<&'a TaggedBase64> + Into, { + // To get the list of available blocks async fn get_available_blocks( &self, for_parent: &VidCommitment, ) -> Result>, BuildError>; + + // to claim a block from the list of provided available blocks async fn claim_block( &self, block_hash: &BuilderCommitment, signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result, BuildError>; + + // To claim a block header input async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result, BuildError>; + + // To get the builder address + async fn get_builder_address(&self) -> Result<::SignatureKey, BuildError>; } #[async_trait] diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 1878b5f3cc..dec86c8a14 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -39,9 +39,11 @@ impl From for BuilderClientError { message: source.to_string(), } } - BuilderApiError::TxnSubmit { source } => Self::Api { - message: source.to_string(), - }, + BuilderApiError::TxnSubmit { source } | BuilderApiError::BuilderAddress { source } => { + Self::Api { + message: source.to_string(), + } + } BuilderApiError::Custom { message, .. } => Self::Api { message }, BuilderApiError::BlockAvailable { source, .. } | BuilderApiError::BlockClaim { source, .. } => match source { diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 7d250c3443..bdacbcee6e 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -201,6 +201,12 @@ impl BuilderDataSource for RandomBuilderSource { }; Ok(header_input) } + + async fn get_builder_address( + &self, + ) -> Result<::SignatureKey, BuildError> { + Ok(self.pub_key) + } } /// Construct a tide disco app that mocks the builder API. @@ -296,6 +302,12 @@ impl BuilderDataSource for SimpleBuilderSource { let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; entry.header_input.take().ok_or(BuildError::Missing) } + + async fn get_builder_address( + &self, + ) -> Result<::SignatureKey, BuildError> { + Ok(self.pub_key) + } } impl SimpleBuilderSource { From 1de902ecc4ad9a8b2c25612c8940cd86c5cf6508 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 28 Mar 2024 16:39:01 -0400 Subject: [PATCH 0899/1393] Move validate and apply off the critical path (#2849) * move validate and apply off the critical path * test pass, but the unit one * fix tests * more test fixes * fix nondeterminism * missed the upgrade task * fix proposal ordering * make ordering more deterministic * typo * forgot the upgrade task * delay more * remove delays * fix da test * uncomment one line * merge main * making the tests pass:wq --- macros/src/lib.rs | 5 +- task-impls/src/consensus.rs | 259 ++++++++++++++++------------- testing/src/overall_safety_task.rs | 17 +- testing/src/script.rs | 14 +- testing/src/spinning_task.rs | 10 +- testing/tests/catchup.rs | 7 +- testing/tests/consensus_task.rs | 8 +- testing/tests/da_task.rs | 14 +- testing/tests/proposal_ordering.rs | 3 +- testing/tests/upgrade_task.rs | 2 +- 10 files changed, 194 insertions(+), 145 deletions(-) diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 45f6f4805f..91e2dee0eb 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -68,7 +68,8 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { use hotshot_testing::predicates::Predicate; use async_broadcast::broadcast; use hotshot_task_impls::events::HotShotEvent; - + use std::time::Duration; + use async_compatibility_layer::art::async_timeout; use hotshot_task::task::{Task, TaskRegistry, TaskState}; use hotshot_types::traits::node_implementation::NodeType; use std::sync::Arc; @@ -130,7 +131,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { #task_names.state().handle_result(&res).await; } - while let Ok(received_output) = test_receiver.try_recv() { + while let Ok(Ok(received_output)) = async_timeout(Duration::from_millis(250), test_receiver.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); let output_asserts = &#task_expectations[stage_number].output_asserts; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 1e0178bdbc..7444be4329 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -61,6 +61,130 @@ pub struct CommitmentAndMetadata { /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; +/// Validate the state and safety and liveness of a proposal then emit +/// a `QuorumProposalValidated` event. +#[allow(clippy::too_many_arguments)] +async fn validate_proposal( + proposal: Proposal>, + parent_leaf: Leaf, + consensus: Arc>>, + parent_state: Arc, + view_leader_key: TYPES::SignatureKey, + event_stream: Sender>>, + sender: TYPES::SignatureKey, + event_sender: Sender>, + storage: Arc>>, +) { + let Ok((validated_state, state_delta)) = parent_state + .validate_and_apply_header( + &consensus.read().await.instance_state, + &parent_leaf, + &proposal.data.block_header.clone(), + ) + .await + else { + error!("Block header doesn't extend the proposal",); + return; + }; + let state = Arc::new(validated_state); + let delta = Arc::new(state_delta); + let parent_commitment = parent_leaf.commit(); + let view = proposal.data.get_view_number(); + let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + proposed_leaf.set_parent_commitment(parent_commitment); + + // Validate the signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment + if !view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()) { + error!(?proposal.signature, "Could not verify proposal."); + return; + } + let justify_qc = proposal.data.justify_qc.clone(); + // Create a positive vote if either liveness or safety check + // passes. + + // Liveness check. + let consensus = consensus.upgradable_read().await; + let liveness_check = justify_qc.get_view_number() > consensus.locked_view; + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = consensus.visit_leaf_ancestors( + justify_qc.get_view_number(), + Terminator::Inclusive(consensus.locked_view), + false, + |leaf, _, _| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.get_view_number() != consensus.locked_view + }, + ); + let safety_check = outcome.is_ok(); + + // Skip if both saftey and liveness checks fail. + if !safety_check && !liveness_check { + error!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc, proposal.data.clone(), consensus.locked_view); + if let Err(e) = outcome { + broadcast_event( + Event { + view_number: view, + event: EventType::Error { error: Arc::new(e) }, + }, + &event_sender, + ) + .await; + } + return; + } + + // We accept the proposal, notify the application layer + + broadcast_event( + Event { + view_number: view, + event: EventType::QuorumProposal { + proposal: proposal.clone(), + sender, + }, + }, + &event_sender, + ) + .await; + // Notify other tasks + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalValidated(proposal.data.clone())), + &event_stream, + ) + .await; + + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + + consensus.validated_state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state: state.clone(), + delta: Some(delta.clone()), + }, + }, + ); + consensus + .saved_leaves + .insert(proposed_leaf.commit(), proposed_leaf.clone()); + + if let Err(e) = storage + .write() + .await + .update_undecided_state( + consensus.saved_leaves.clone(), + consensus.validated_state_map.clone(), + ) + .await + { + warn!("Couldn't store undecided state. Error: {:?}", e); + } +} + /// The state for the consensus task. Contains all of the information for the implementation /// of consensus pub struct ConsensusTaskState< @@ -186,7 +310,7 @@ impl, A: ConsensusApi + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { - error!( + warn!( "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", justify_qc.get_data().leaf_commit, proposal.view_number, @@ -530,7 +654,7 @@ impl, A: ConsensusApi + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some((parent_leaf, parent_state)) = parent else { - error!( + warn!( "Proposal's parent missing from storage with commitment: {:?}", justify_qc.get_data().leaf_commit ); @@ -602,83 +726,23 @@ impl, A: ConsensusApi + return; }; - let Ok((validated_state, state_delta)) = parent_state - .validate_and_apply_header( - &consensus.instance_state, - &parent_leaf, - &proposal.data.block_header.clone(), - ) - .await - else { - error!("Block header doesn't extend the proposal",); - return; - }; - let state = Arc::new(validated_state); - let delta = Arc::new(state_delta); - let parent_commitment = parent_leaf.commit(); - - let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); - proposed_leaf.set_parent_commitment(parent_commitment); - - // Validate the signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - if !view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()) { - error!(?proposal.signature, "Could not verify proposal."); - return; - } - - // Create a positive vote if either liveness or safety check - // passes. - - // Liveness check. - let liveness_check = justify_qc.get_view_number() > consensus.locked_view; - - // Safety check. - // Check if proposal extends from the locked leaf. - let outcome = consensus.visit_leaf_ancestors( - justify_qc.get_view_number(), - Terminator::Inclusive(consensus.locked_view), - false, - |leaf, _, _| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.get_view_number() != consensus.locked_view - }, - ); - let safety_check = outcome.is_ok(); - if let Err(e) = outcome { - self.api - .send_event(Event { - view_number: view, - event: EventType::Error { error: Arc::new(e) }, - }) - .await; - } - - // Skip if both saftey and liveness checks fail. - if !safety_check && !liveness_check { - error!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc, proposal.data.clone(), consensus.locked_view); - return; - } - - self.current_proposal = Some(proposal.data.clone()); - - // We accept the proposal, notify the application layer - self.api - .send_event(Event { - view_number: self.cur_view, - event: EventType::QuorumProposal { - proposal: proposal.clone(), - sender, - }, - }) - .await; - // Notify other tasks - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalValidated(proposal.data.clone())), - &event_stream, - ) - .await; + async_spawn(validate_proposal( + proposal.clone(), + parent_leaf, + self.consensus.clone(), + parent_state.clone(), + view_leader_key, + event_stream.clone(), + sender, + self.output_event_stream.clone(), + self.storage.clone(), + )); + } + HotShotEvent::QuorumProposalValidated(proposal) => { + let consensus = self.consensus.upgradable_read().await; + let view = proposal.get_view_number(); + self.current_proposal = Some(proposal.clone()); let mut new_anchor_view = consensus.last_decided_view; let mut new_locked_view = consensus.locked_view; let mut last_view_number_visited = view; @@ -689,7 +753,7 @@ impl, A: ConsensusApi + let mut leafs_decided = Vec::new(); let mut included_txns = HashSet::new(); let old_anchor_view = consensus.last_decided_view; - let parent_view = proposed_leaf.get_justify_qc().get_view_number(); + let parent_view = proposal.justify_qc.get_view_number(); let mut current_chain_length = 0usize; if parent_view + 1 == view { current_chain_length += 1; @@ -766,14 +830,6 @@ impl, A: ConsensusApi + }, ) { error!("view publish error {e}"); - broadcast_event( - Event { - view_number: view, - event: EventType::Error { error: e.into() }, - }, - &self.output_event_stream, - ) - .await; } } @@ -783,33 +839,7 @@ impl, A: ConsensusApi + HashSet::new() }; - consensus.validated_state_map.insert( - view, - View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), - state: state.clone(), - delta: Some(delta.clone()), - }, - }, - ); - consensus - .saved_leaves - .insert(proposed_leaf.commit(), proposed_leaf.clone()); - - if let Err(e) = self - .storage - .write() - .await - .update_undecided_state( - consensus.saved_leaves.clone(), - consensus.validated_state_map.clone(), - ) - .await - { - warn!("Couldn't store undecided state. Error: {:?}", e); - } - + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; if new_commit_reached { consensus.locked_view = new_locked_view; } @@ -1440,6 +1470,7 @@ impl, A: ConsensusApi + event.as_ref(), HotShotEvent::QuorumProposalRecv(_, _) | HotShotEvent::QuorumVoteRecv(_) + | HotShotEvent::QuorumProposalValidated(_) | HotShotEvent::QCFormed(_) | HotShotEvent::UpgradeCertificateFormed(_) | HotShotEvent::DACRecv(_) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index ce4b841810..58aa2d1c95 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -423,9 +423,20 @@ impl RoundResult { let num_failed = self.failed_nodes.len(); if check_leaf && self.leaf_map.len() != 1 { - error!("LEAF MAP (that is mismatched) IS: {:?}", self.leaf_map); - self.status = ViewStatus::Err(OverallSafetyTaskErr::MismatchedLeaf); - return; + let (quorum_leaf, count) = self + .leaf_map + .iter() + .max_by(|(_, v), (_, other_val)| v.cmp(other_val)) + .unwrap(); + if *count >= threshold { + for leaf in self.leaf_map.keys() { + if leaf.get_view_number() > quorum_leaf.get_view_number() { + error!("LEAF MAP (that is mismatched) IS: {:?}", self.leaf_map); + self.status = ViewStatus::Err(OverallSafetyTaskErr::MismatchedLeaf); + return; + } + } + } } if check_block && self.block_map.len() != 1 { diff --git a/testing/src/script.rs b/testing/src/script.rs index 3c98f5480b..01434bad65 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -2,9 +2,10 @@ use crate::predicates::Predicate; use async_broadcast::broadcast; use hotshot_task_impls::events::HotShotEvent; +use async_compatibility_layer::art::async_timeout; use hotshot_task::task::{Task, TaskRegistry, TaskState}; use hotshot_types::traits::node_implementation::NodeType; -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; pub struct TestScriptStage>>> { pub inputs: Vec>, @@ -107,9 +108,18 @@ pub async fn run_test_script } for assert in &stage.outputs { - if let Ok(received_output) = test_receiver.try_recv() { + if let Ok(Ok(received_output)) = + async_timeout(Duration::from_millis(250), test_receiver.recv_direct()).await + { tracing::debug!("Test received: {:?}", received_output); validate_output_or_panic(stage_number, &received_output, assert); + if !task.state_mut().filter(&received_output.clone()) { + tracing::debug!("Test sent: {:?}", received_output.clone()); + + if let Some(res) = S::handle_event(received_output.clone(), &mut task).await { + task.state_mut().handle_result(&res).await; + } + } } else { panic_missing_output(stage_number, assert); } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index af80bcb65d..0772914734 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -17,6 +17,7 @@ use hotshot_types::{ network::ConnectedNetwork, node_implementation::{NodeImplementation, NodeType}, }, + vote::HasViewNumber, }; use snafu::Snafu; use std::collections::BTreeMap; @@ -97,13 +98,18 @@ where block_size: _, } = event { - state.last_decided_leaf = leaf_chain.first().unwrap().leaf.clone(); + let leaf = leaf_chain.first().unwrap().leaf.clone(); + if leaf.get_view_number() > state.last_decided_leaf.get_view_number() { + state.last_decided_leaf = leaf; + } } else if let EventType::QuorumProposal { proposal, sender: _, } = event { - state.high_qc = proposal.data.justify_qc; + if proposal.data.justify_qc.get_view_number() > state.high_qc.get_view_number() { + state.high_qc = proposal.data.justify_qc; + } } // if we have not seen this view before if state.latest_view.is_none() || view_number > state.latest_view.unwrap() { diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index 164aece9a4..f08a28a3f8 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -45,7 +45,6 @@ async fn test_catchup() { // Make sure we keep commiting rounds after the catchup, but not the full 50. num_successful_views: 22, num_failed_views: 5, - check_leaf: true, ..Default::default() }; @@ -98,7 +97,6 @@ async fn test_catchup_cdn() { ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { num_failed_views: 5, - check_leaf: true, ..Default::default() }; @@ -152,8 +150,7 @@ async fn test_catchup_one_node() { metadata.overall_safety_properties = OverallSafetyPropertiesDescription { // Make sure we keep commiting rounds after the catchup, but not the full 50. num_successful_views: 22, - num_failed_views: 1, - check_leaf: true, + num_failed_views: 2, ..Default::default() }; @@ -213,7 +210,6 @@ async fn test_catchup_in_view_sync() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - check_leaf: true, num_failed_views: 5, ..Default::default() }; @@ -275,7 +271,6 @@ async fn test_catchup_reload() { metadata.overall_safety_properties = OverallSafetyPropertiesDescription { // Make sure we keep commiting rounds after the catchup, but not the full 50. num_successful_views: 22, - check_leaf: true, ..Default::default() }; diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 45de005266..eb5420567c 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -78,8 +78,8 @@ async fn test_consensus_task() { ], outputs: vec![ exact(ViewChange(ViewNumber::new(2))), - exact(QuorumProposalValidated(proposals[1].data.clone())), quorum_proposal_send(), + exact(QuorumProposalValidated(proposals[1].data.clone())), ], asserts: vec![is_at_view_number(2)], }; @@ -347,10 +347,10 @@ async fn test_view_sync_finalize_propose() { let view_4 = TestScriptStage { inputs: vec![ + QuorumProposalRecv(proposals[1].clone(), leaders[1]), TimeoutVoteRecv(timeout_vote_view_2), TimeoutVoteRecv(timeout_vote_view_3), ViewSyncFinalizeCertificate2Recv(cert), - QuorumProposalRecv(proposals[1].clone(), leaders[1]), SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(4)), ], outputs: vec![ @@ -454,10 +454,10 @@ async fn test_view_sync_finalize_vote() { // Now at view 3 we receive the proposal received response. let view_3 = TestScriptStage { inputs: vec![ - // Multiple timeouts in a row, so we call for a view sync - ViewSyncFinalizeCertificate2Recv(cert), // Receive a proposal for view 4, but with the highest qc being from view 1. QuorumProposalRecv(proposals[0].clone(), leaders[0]), + // Multiple timeouts in a row, so we call for a view sync + ViewSyncFinalizeCertificate2Recv(cert), ], outputs: vec![ exact(QuorumProposalValidated(proposals[0].data.clone())), diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index a660945c6d..79aa97f343 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -67,19 +67,15 @@ async fn test_da_task() { // Run view 2 and validate proposal. let view_2 = TestScriptStage { inputs: vec![DAProposalRecv(proposals[1].clone(), leaders[1])], - outputs: vec![exact(DAProposalValidated(proposals[1].clone(), leaders[1]))], - asserts: vec![], - }; - - // Run view 3 and vote - let view_3 = TestScriptStage { - inputs: vec![DAProposalValidated(proposals[1].clone(), leaders[1])], - outputs: vec![exact(DAVoteSend(votes[1].clone()))], + outputs: vec![ + exact(DAProposalValidated(proposals[1].clone(), leaders[1])), + exact(DAVoteSend(votes[1].clone())), + ], asserts: vec![], }; let da_state = DATaskState::>::create_from(&handle).await; - let stages = vec![view_1, view_2, view_3]; + let stages = vec![view_1, view_2]; run_test_script(stages, da_state).await; } diff --git a/testing/tests/proposal_ordering.rs b/testing/tests/proposal_ordering.rs index 833a63900d..de5c2c2a31 100644 --- a/testing/tests/proposal_ordering.rs +++ b/testing/tests/proposal_ordering.rs @@ -78,13 +78,12 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { vec![ quorum_proposal_send(), exact(ViewChange(ViewNumber::new(2))), - exact(QuorumProposalValidated(proposals[1].data.clone())), ] } else { vec![ exact(ViewChange(ViewNumber::new(2))), - exact(QuorumProposalValidated(proposals[1].data.clone())), quorum_proposal_send(), + exact(QuorumProposalValidated(proposals[1].data.clone())), ] }; diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs index ecd4e88cdd..4fe56192a6 100644 --- a/testing/tests/upgrade_task.rs +++ b/testing/tests/upgrade_task.rs @@ -80,8 +80,8 @@ async fn test_upgrade_task() { let view_2 = TestScriptStage { inputs: vec![ - VidDisperseRecv(vids[1].0[0].clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), + VidDisperseRecv(vids[1].0[0].clone()), DACRecv(dacs[1].clone()), ], outputs: vec![ From 60fcebb9439239b20e404fbcc912add7cdcbe79b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 28 Mar 2024 17:36:13 -0400 Subject: [PATCH 0900/1393] Bump clap from 4.5.3 to 4.5.4 (#2845) Bumps [clap](https://github.com/clap-rs/clap) from 4.5.3 to 4.5.4. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.5.3...v4.5.4) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- builder-api/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder-api/Cargo.toml b/builder-api/Cargo.toml index 4f70d90a70..9fc9147a33 100644 --- a/builder-api/Cargo.toml +++ b/builder-api/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] async-trait = { workspace = true } -clap = { version = "4.4", features = ["derive", "env"] } +clap = { version = "4.5", features = ["derive", "env"] } derive_more = "0.99" futures = "0.3" hotshot-types = { path = "../types" } From 19dc22fc14fbdc1047a372dfb3fa4c8fb36c6c42 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 28 Mar 2024 19:48:24 -0400 Subject: [PATCH 0901/1393] [Push CDN] Combined network to new CDN (#2861) * asynchronous network generators * switch combined network to use new cdn * update pcdn * fix libp2p spinlock --- example-types/src/node_types.rs | 4 +- examples/combined/types.rs | 11 +- examples/infra/mod.rs | 68 ++++----- examples/push-cdn/all.rs | 1 + .../src/traits/networking/combined_network.rs | 124 ++++++++-------- .../src/traits/networking/libp2p_network.rs | 72 ++++------ .../src/traits/networking/memory_network.rs | 8 +- .../src/traits/networking/push_cdn_network.rs | 134 ++++++++++++------ .../traits/networking/web_server_network.rs | 9 +- .../src/network/behaviours/dht/mod.rs | 2 +- testing/src/task_helpers.rs | 2 +- testing/src/test_launcher.rs | 7 +- testing/src/test_runner.rs | 2 +- testing/tests/combined_network.rs | 10 +- testing/tests/network_task.rs | 4 +- types/src/traits/network.rs | 11 +- types/src/traits/node_implementation.rs | 11 +- 17 files changed, 269 insertions(+), 211 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 4643398e94..b9c97208d0 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -88,7 +88,7 @@ type StaticLibp2pDAComm = Libp2pNetwork, ; /// combined network -type StaticCombinedDAComm = CombinedNetworks; +type StaticCombinedDAComm = CombinedNetworks; /// memory comm channel pub type StaticMemoryQuorumComm = @@ -102,7 +102,7 @@ type StaticLibp2pQuorumComm = type StaticWebQuorumComm = WebServerNetwork; /// combined network (libp2p + web server) -type StaticCombinedQuorumComm = CombinedNetworks; +type StaticCombinedQuorumComm = CombinedNetworks; impl NodeImplementation for PushCdnImpl { type QuorumNetwork = StaticPushCdnQuorumComm; diff --git a/examples/combined/types.rs b/examples/combined/types.rs index fb4397a2e3..dbbc1b2e80 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -1,7 +1,6 @@ use crate::infra::CombinedDARun; use hotshot::traits::implementations::CombinedNetworks; use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; -use hotshot_types::constants::WebServerVersion; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -11,13 +10,13 @@ use std::fmt::Debug; pub struct NodeImpl {} /// convenience type alias -pub type DANetwork = CombinedNetworks; +pub type DANetwork = CombinedNetworks; /// convenience type alias -pub type VIDNetwork = CombinedNetworks; +pub type VIDNetwork = CombinedNetworks; /// convenience type alias -pub type QuorumNetwork = CombinedNetworks; +pub type QuorumNetwork = CombinedNetworks; /// convenience type alias -pub type ViewSyncNetwork = CombinedNetworks; +pub type ViewSyncNetwork = CombinedNetworks; impl NodeImplementation for NodeImpl { type QuorumNetwork = QuorumNetwork; @@ -25,4 +24,4 @@ impl NodeImplementation for NodeImpl { type Storage = TestStorage; } /// convenience type alias -pub type ThisRun = CombinedDARun; +pub type ThisRun = CombinedDARun; diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 06da261499..bc00f83461 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -862,7 +862,7 @@ where keypair, ) .await - .expect("failed to perform initial connection"); + .expect("failed to perform initial client connection"); PushCdnDaRun { config, @@ -957,13 +957,13 @@ where // Combined network /// Represents a combined-network-based run -pub struct CombinedDARun { +pub struct CombinedDARun { /// the network configuration config: NetworkConfig, /// quorum channel - quorum_channel: CombinedNetworks, + quorum_channel: CombinedNetworks, /// data availability channel - da_channel: CombinedNetworks, + da_channel: CombinedNetworks, } #[async_trait] @@ -976,65 +976,67 @@ impl< >, NODE: NodeImplementation< TYPES, - QuorumNetwork = CombinedNetworks, - CommitteeNetwork = CombinedNetworks, + QuorumNetwork = CombinedNetworks, + CommitteeNetwork = CombinedNetworks, Storage = TestStorage, >, - NetworkVersion: StaticVersionType, - > - RunDA< - TYPES, - CombinedNetworks, - CombinedNetworks, - NODE, - > for CombinedDARun + > RunDA, CombinedNetworks, NODE> for CombinedDARun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, - NetworkVersion: 'static, { async fn initialize_networking( config: NetworkConfig, - ) -> CombinedDARun { + ) -> CombinedDARun { // Get our own key - let pub_key = config.config.my_own_validator_config.public_key.clone(); + let key = config.config.my_own_validator_config.clone(); // Create and wait for libp2p network let libp2p_underlying_quorum_network = - libp2p_network_from_config::(config.clone(), pub_key.clone()).await; + libp2p_network_from_config::(config.clone(), key.public_key.clone()).await; libp2p_underlying_quorum_network.wait_for_ready().await; - // Extract values from config (for webserver DA network) - let WebServerConfig { - url, - wait_between_polls, - }: WebServerConfig = config.clone().da_web_server_config.unwrap(); - let CombinedNetworkConfig { delay_duration }: CombinedNetworkConfig = config.clone().combined_network_config.unwrap(); - // Create and wait for underlying webserver network - let web_quorum_network = - webserver_network_from_config::(config.clone(), pub_key.clone()); + // Convert the keys to the CDN-compatible type + let keypair = KeyPair { + public_key: WrappedSignatureKey(key.public_key), + private_key: key.private_key, + }; - let web_da_network = WebServerNetwork::create(url, wait_between_polls, pub_key, true); + // See if we should be DA + let mut topics = vec![Topic::Global]; + if config.node_index < config.config.da_staked_committee_size as u64 { + topics.push(Topic::DA); + } - web_quorum_network.wait_for_ready().await; + // Create the network and await the initial connection + let cdn_network = PushCdnNetwork::new( + config + .cdn_marshal_address + .clone() + .expect("`cdn_marshal_address` needs to be supplied for a CDN run"), + topics.iter().map(ToString::to_string).collect(), + keypair, + ) + .await + .expect("failed to perform intiail client connection"); // Combine the two communication channels let da_channel = CombinedNetworks::new( Arc::new(UnderlyingCombinedNetworks( - web_da_network.clone(), + cdn_network.clone(), libp2p_underlying_quorum_network.clone(), )), delay_duration, ); let quorum_channel = CombinedNetworks::new( Arc::new(UnderlyingCombinedNetworks( - web_quorum_network.clone(), + cdn_network, libp2p_underlying_quorum_network.clone(), )), delay_duration, @@ -1047,11 +1049,11 @@ where } } - fn get_da_channel(&self) -> CombinedNetworks { + fn get_da_channel(&self) -> CombinedNetworks { self.da_channel.clone() } - fn get_quorum_channel(&self) -> CombinedNetworks { + fn get_quorum_channel(&self) -> CombinedNetworks { self.quorum_channel.clone() } diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 0f9b7e09f3..7b3f8d4e79 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -96,6 +96,7 @@ async fn main() { let marshal_config = cdn_marshal::ConfigBuilder::default() .bind_address(marshal_endpoint.clone()) .discovery_endpoint("test.sqlite".to_string()) + .metrics_enabled(false) .build() .expect("failed to build marshal config"); diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index a74f6a6e21..22020be9c2 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -1,11 +1,14 @@ //! Networking Implementation that has a primary and a fallback newtork. If the primary //! Errors we will use the backup to send or receive -use super::NetworkError; -use crate::traits::implementations::{Libp2pNetwork, WebServerNetwork}; +use super::{push_cdn_network::PushCdnNetwork, NetworkError}; +use crate::traits::implementations::Libp2pNetwork; use async_lock::RwLock; -use hotshot_types::constants::{ - COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, - COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, +use hotshot_types::{ + constants::{ + COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, + COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, + }, + traits::network::AsyncGenerator, }; use lru::LruCache; use std::{ @@ -64,10 +67,10 @@ type DelayedTasksLockedMap = Arc { +#[derive(Clone)] +pub struct CombinedNetworks { /// The two networks we'll use for send/recv - networks: Arc>, + networks: Arc>, /// Last n seen messages to prevent processing duplicates message_cache: Arc>>, @@ -82,17 +85,14 @@ pub struct CombinedNetworks delay_duration: Arc>, } -impl CombinedNetworks { +impl CombinedNetworks { /// Constructor /// /// # Panics /// /// Panics if `COMBINED_NETWORK_CACHE_SIZE` is 0 #[must_use] - pub fn new( - networks: Arc>, - delay_duration: Duration, - ) -> Self { + pub fn new(networks: Arc>, delay_duration: Duration) -> Self { Self { networks, message_cache: Arc::new(RwLock::new(LruCache::new( @@ -106,7 +106,7 @@ impl CombinedNetworks &WebServerNetwork { + pub fn primary(&self) -> &PushCdnNetwork { &self.networks.0 } @@ -173,19 +173,17 @@ impl CombinedNetworks( - pub WebServerNetwork, +#[derive(Clone)] +pub struct UnderlyingCombinedNetworks( + pub PushCdnNetwork, pub Libp2pNetwork, TYPES::SignatureKey>, ); #[cfg(feature = "hotshot-testing")] -impl - TestableNetworkingImplementation for CombinedNetworks -{ +impl TestableNetworkingImplementation for CombinedNetworks { fn generator( expected_node_count: usize, num_bootstrap: usize, @@ -194,9 +192,9 @@ impl is_da: bool, reliability_config: Option>, secondary_network_delay: Duration, - ) -> Box (Arc, Arc) + 'static> { + ) -> AsyncGenerator<(Arc, Arc)> { let generators = ( - as TestableNetworkingImplementation<_>>::generator( + as TestableNetworkingImplementation<_>>::generator( expected_node_count, num_bootstrap, network_id, @@ -215,38 +213,47 @@ impl Duration::default(), ) ); - Box::new(move |node_id| { - let (quorum_web, da_web) = generators.0(node_id); - let (quorum_p2p, da_p2p) = generators.1(node_id); - let da_networks = UnderlyingCombinedNetworks( - Arc::>::into_inner(da_web).unwrap(), - Arc::, TYPES::SignatureKey>>::unwrap_or_clone(da_p2p), - ); - let quorum_networks = UnderlyingCombinedNetworks( - Arc::>::into_inner(quorum_web).unwrap(), - Arc::, TYPES::SignatureKey>>::unwrap_or_clone( - quorum_p2p, - ), - ); - let quorum_net = Self { - networks: Arc::new(quorum_networks), - message_cache: Arc::new(RwLock::new(LruCache::new( - NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), - ))), - primary_down: Arc::new(AtomicU64::new(0)), - delayed_tasks: Arc::default(), - delay_duration: Arc::new(RwLock::new(secondary_network_delay)), - }; - let da_net = Self { - networks: Arc::new(da_networks), - message_cache: Arc::new(RwLock::new(LruCache::new( - NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), - ))), - primary_down: Arc::new(AtomicU64::new(0)), - delayed_tasks: Arc::default(), - delay_duration: Arc::new(RwLock::new(secondary_network_delay)), - }; - (quorum_net.into(), da_net.into()) + Box::pin(move |node_id| { + let gen0 = generators.0(node_id); + let gen1 = generators.1(node_id); + + Box::pin(async move { + let (cdn, _) = gen0.await; + let cdn = Arc::>::into_inner(cdn).unwrap(); + + let (quorum_p2p, da_p2p) = gen1.await; + let da_networks = UnderlyingCombinedNetworks( + cdn.clone(), + Arc::, TYPES::SignatureKey>>::unwrap_or_clone( + da_p2p, + ), + ); + let quorum_networks = UnderlyingCombinedNetworks( + cdn, + Arc::, TYPES::SignatureKey>>::unwrap_or_clone( + quorum_p2p, + ), + ); + let quorum_net = Self { + networks: Arc::new(quorum_networks), + message_cache: Arc::new(RwLock::new(LruCache::new( + NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), + ))), + primary_down: Arc::new(AtomicU64::new(0)), + delayed_tasks: Arc::default(), + delay_duration: Arc::new(RwLock::new(secondary_network_delay)), + }; + let da_net = Self { + networks: Arc::new(da_networks), + message_cache: Arc::new(RwLock::new(LruCache::new( + NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), + ))), + primary_down: Arc::new(AtomicU64::new(0)), + delayed_tasks: Arc::default(), + delay_duration: Arc::new(RwLock::new(secondary_network_delay)), + }; + (quorum_net.into(), da_net.into()) + }) }) } @@ -259,9 +266,8 @@ impl } #[async_trait] -impl - ConnectedNetwork, TYPES::SignatureKey> - for CombinedNetworks +impl ConnectedNetwork, TYPES::SignatureKey> + for CombinedNetworks { async fn request_data( &self, @@ -441,7 +447,7 @@ impl } async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { - as ConnectedNetwork< + as ConnectedNetwork< Message, TYPES::SignatureKey, >>::inject_consensus_info(self.primary(), event.clone()) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index b20244840c..cca3824bc1 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -2,8 +2,6 @@ //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network use super::NetworkingMetricsValue; -#[cfg(feature = "hotshot-testing")] -use async_compatibility_layer::art::async_block_on; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, channel::{self, bounded, unbounded, UnboundedReceiver, UnboundedSendError, UnboundedSender}, @@ -24,8 +22,8 @@ use hotshot_types::{ data::ViewNumber, traits::{ network::{ - self, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, - NetworkMsg, ResponseMessage, + self, AsyncGenerator, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, + NetworkError, NetworkMsg, ResponseMessage, }, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, @@ -123,8 +121,6 @@ struct Libp2pNetworkInner { /// this is really cheating to enable local tests /// hashset of (bootstrap_addr, peer_id) bootstrap_addrs: PeerInfoVec, - /// bootstrap - bootstrap_addrs_len: usize, /// whether or not the network is ready to send is_ready: Arc, /// max time before dropping message due to DHT error @@ -182,7 +178,7 @@ where _is_da: bool, reliability_config: Option>, _secondary_network_delay: Duration, - ) -> Box (Arc, Arc) + 'static> { + ) -> AsyncGenerator<(Arc, Arc)> { assert!( da_committee_size <= expected_node_count, "DA committee size must be less than or equal to total # nodes" @@ -203,7 +199,7 @@ where // NOTE uncomment this for easier debugging // let start_port = 5000; - Box::new({ + Box::pin({ move |node_id| { info!( "GENERATOR: Node id {:?}, is bootstrap: {:?}", @@ -268,29 +264,31 @@ where let keys = all_keys.clone(); let da = da_keys.clone(); let reliability_config_dup = reliability_config.clone(); - let net = Arc::new(async_block_on(async move { - match Libp2pNetwork::new( - NetworkingMetricsValue::default(), - config, - pubkey.clone(), - bootstrap_addrs_ref, - num_bootstrap, - usize::try_from(node_id).unwrap(), - keys, - #[cfg(feature = "hotshot-testing")] - reliability_config_dup, - da.clone(), - da.contains(&pubkey), - ) - .await - { - Ok(network) => network, - Err(err) => { - panic!("Failed to create libp2p network: {err:?}"); - } - } - })); - (net.clone(), net) + Box::pin(async move { + let net = Arc::new( + match Libp2pNetwork::new( + NetworkingMetricsValue::default(), + config, + pubkey.clone(), + bootstrap_addrs_ref, + num_bootstrap, + usize::try_from(node_id).unwrap(), + keys, + #[cfg(feature = "hotshot-testing")] + reliability_config_dup, + da.clone(), + da.contains(&pubkey), + ) + .await + { + Ok(network) => network, + Err(err) => { + panic!("Failed to create libp2p network: {err:?}"); + } + }, + ); + (net.clone(), net) + }) } }) } @@ -378,7 +376,6 @@ impl Libp2pNetwork { requests_rx: Mutex::new(Some(requests_rx)), sender: sender.clone(), pk, - bootstrap_addrs_len, bootstrap_addrs, is_ready: Arc::new(AtomicBool::new(false)), // This is optimal for 10-30 nodes. TODO: parameterize this for both tests and examples @@ -445,7 +442,6 @@ impl Libp2pNetwork { fn spawn_connect(&mut self, id: usize, bind_version: VER) { let pk = self.inner.pk.clone(); let bootstrap_ref = self.inner.bootstrap_addrs.clone(); - let num_bootstrap = self.inner.bootstrap_addrs_len; let handle = self.inner.handle.clone(); let is_bootstrapped = self.inner.is_bootstrapped.clone(); let node_type = self.inner.handle.config().node_type; @@ -455,14 +451,8 @@ impl Libp2pNetwork { async_spawn({ let is_ready = self.inner.is_ready.clone(); async move { - let bs_addrs = loop { - let bss = bootstrap_ref.read().await; - let bs_addrs = bss.clone(); - drop(bss); - if bs_addrs.len() >= num_bootstrap { - break bs_addrs; - } - }; + let bs_addrs = bootstrap_ref.read().await.clone(); + debug!("Finished adding bootstrap addresses."); handle.add_known_peers(bs_addrs).await.unwrap(); diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index ebe0512b29..8f9e634f9c 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -18,7 +18,7 @@ use hotshot_types::{ constants::Version01, message::Message, traits::{ - network::{ConnectedNetwork, NetworkMsg, TestableNetworkingImplementation}, + network::{AsyncGenerator, ConnectedNetwork, NetworkMsg, TestableNetworkingImplementation}, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -189,10 +189,10 @@ impl TestableNetworkingImplementation _is_da: bool, reliability_config: Option>, _secondary_network_delay: Duration, - ) -> Box (Arc, Arc) + 'static> { + ) -> AsyncGenerator<(Arc, Arc)> { let master: Arc<_> = MasterMap::new(); // We assign known_nodes' public key and stake value rather than read from config file since it's a test - Box::new(move |node_id| { + Box::pin(move |node_id| { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); let net = MemoryNetwork::new( @@ -201,7 +201,7 @@ impl TestableNetworkingImplementation master.clone(), reliability_config.clone(), ); - (net.clone().into(), net.into()) + Box::pin(async move { (net.clone().into(), net.into()) }) }) } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index aac4ddeabe..6e03c9c004 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -1,6 +1,6 @@ use super::NetworkError; #[cfg(feature = "hotshot-testing")] -use async_compatibility_layer::art::{async_block_on, async_spawn}; +use async_compatibility_layer::art::async_spawn; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; use bincode::config::Options; @@ -20,6 +20,7 @@ use cdn_client::{ }; #[cfg(feature = "hotshot-testing")] use cdn_marshal::{ConfigBuilder as MarshalConfigBuilder, Marshal}; +use hotshot_types::traits::network::AsyncGenerator; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ @@ -42,6 +43,7 @@ use rand::{RngCore, SeedableRng}; use std::collections::BTreeSet; use std::marker::PhantomData; #[cfg(feature = "hotshot-testing")] +use std::sync::atomic::{AtomicBool, Ordering}; use std::{path::Path, sync::Arc, time::Duration}; use tracing::{error, warn}; use versioned_binary_serialization::{ @@ -127,7 +129,13 @@ impl RunDef for ProductionDef { /// that helps organize them all. #[derive(Clone)] /// Is generic over both the type of key and the network protocol. -pub struct PushCdnNetwork(Client, Quic>); +pub struct PushCdnNetwork { + /// The underlying client + client: Client, Quic>, + /// Whether or not the underlying network is supposed to be paused + #[cfg(feature = "hotshot-testing")] + is_paused: Arc, +} impl PushCdnNetwork { /// Create a new `PushCdnNetwork` (really a client) from a marshal endpoint, a list of initial @@ -157,7 +165,12 @@ impl PushCdnNetwork { // Create the client, performing the initial connection let client = Client::new(config).await?; - Ok(Self(client)) + Ok(Self { + client, + // Start unpaused + #[cfg(feature = "hotshot-testing")] + is_paused: Arc::from(AtomicBool::new(false)), + }) } /// Broadcast a message to members of the particular topic. Does not retry. @@ -171,6 +184,12 @@ impl PushCdnNetwork { topic: Topic, _: Ver, ) -> Result<(), NetworkError> { + // If we're paused, don't send the message + #[cfg(feature = "hotshot-testing")] + if self.is_paused.load(Ordering::Relaxed) { + return Ok(()); + } + // Bincode the message let serialized_message = match Serializer::::serialize(&message) { Ok(serialized) => serialized, @@ -183,7 +202,7 @@ impl PushCdnNetwork { // Send the message // TODO: check if we need to print this error if self - .0 + .client .send_broadcast_message(vec![topic], serialized_message) .await .is_err() @@ -207,7 +226,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork _is_da: bool, _reliability_config: Option>, _secondary_network_delay: Duration, - ) -> Box (Arc, Arc) + 'static> { + ) -> AsyncGenerator<(Arc, Arc)> { // The configuration we are using for testing is 2 brokers & 1 marshal // A keypair shared between brokers @@ -270,6 +289,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork let marshal_endpoint = format!("127.0.0.1:{marshal_port}"); let marshal_config = MarshalConfigBuilder::default() .bind_address(marshal_endpoint.clone()) + .metrics_enabled(false) .discovery_endpoint(discovery_endpoint) .build() .expect("failed to build marshal config"); @@ -287,45 +307,51 @@ impl TestableNetworkingImplementation for PushCdnNetwork }); // This function is called for each client we spawn - Box::new({ + Box::pin({ move |node_id| { - // Derive our public and priate keys from our index - let private_key = - TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; - let public_key = TYPES::SignatureKey::from_private(&private_key); - - // Calculate if we're DA or not - let topics = if node_id < da_committee_size as u64 { - vec![Topic::DA, Topic::Global] - } else { - vec![Topic::Global] - }; - - // Configure our client - let client_config = ClientConfigBuilder::default() - .keypair(KeyPair { - public_key: WrappedSignatureKey(public_key), - private_key, - }) - .subscribed_topics(topics) - .endpoint(marshal_endpoint.clone()) - .build() - .expect("failed to build client config"); - - // Create our client - let client = Arc::new(PushCdnNetwork( - async_block_on(async move { Client::new(client_config).await }) - .expect("failed to create client"), - )); - - (client.clone(), client) + // Clone this so we can pin the future + let marshal_endpoint = marshal_endpoint.clone(); + + Box::pin(async move { + // Derive our public and priate keys from our index + let private_key = + TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; + let public_key = TYPES::SignatureKey::from_private(&private_key); + + // Calculate if we're DA or not + let topics = if node_id < da_committee_size as u64 { + vec![Topic::DA, Topic::Global] + } else { + vec![Topic::Global] + }; + + // Configure our client + let client_config = ClientConfigBuilder::default() + .keypair(KeyPair { + public_key: WrappedSignatureKey(public_key), + private_key, + }) + .subscribed_topics(topics) + .endpoint(marshal_endpoint) + .build() + .expect("failed to build client config"); + + // Create our client + let client = Arc::new(PushCdnNetwork { + client: Client::new(client_config) + .await + .expect("failed to create client"), + #[cfg(feature = "hotshot-testing")] + is_paused: Arc::from(AtomicBool::new(false)), + }); + + (client.clone(), client) + }) } }) } - /// Get the number of messages in-flight. - /// - /// Some implementations will not be able to tell how many messages there are in-flight. These implementations should return `None`. + /// The PushCDN does not support in-flight message counts fn in_flight_message_count(&self) -> Option { None } @@ -335,11 +361,17 @@ impl TestableNetworkingImplementation for PushCdnNetwork impl ConnectedNetwork, TYPES::SignatureKey> for PushCdnNetwork { - /// We do not support pausing the PushCDN network right now, but it is possible. - fn pause(&self) {} + /// Pause sending and receiving on the PushCDN network. + fn pause(&self) { + #[cfg(feature = "hotshot-testing")] + self.is_paused.store(true, Ordering::Relaxed); + } - /// We do not support resuming the PushCDN network right now, but it is possible. - fn resume(&self) {} + /// Resumse sending and receiving on the PushCDN network. + fn resume(&self) { + #[cfg(feature = "hotshot-testing")] + self.is_paused.store(false, Ordering::Relaxed); + } /// The clients form an initial connection when created, so we don't have to wait. async fn wait_for_ready(&self) {} @@ -399,6 +431,12 @@ impl ConnectedNetwork, TYPES::SignatureKey> recipient: TYPES::SignatureKey, _: Ver, ) -> Result<(), NetworkError> { + // If we're paused, don't send the message + #[cfg(feature = "hotshot-testing")] + if self.is_paused.load(Ordering::Relaxed) { + return Ok(()); + } + // Bincode the message let serialized_message = match Serializer::::serialize(&message) { Ok(serialized) => serialized, @@ -411,7 +449,7 @@ impl ConnectedNetwork, TYPES::SignatureKey> // Send the message // TODO: check if we need to print this error if self - .0 + .client .send_direct_message(&WrappedSignatureKey(recipient), serialized_message) .await .is_err() @@ -429,7 +467,13 @@ impl ConnectedNetwork, TYPES::SignatureKey> /// - If we fail to receive messages. Will trigger a retry automatically. async fn recv_msgs(&self) -> Result>, NetworkError> { // Receive a message - let message = self.0.receive_message().await; + let message = self.client.receive_message().await; + + // If we're paused, receive but don't process messages + #[cfg(feature = "hotshot-testing")] + if self.is_paused.load(Ordering::Relaxed) { + return Ok(vec![]); + } // If it was an error, wait a bit and retry let message = match message { diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 24e502a743..31c698f27e 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -12,6 +12,7 @@ use async_compatibility_layer::{ use async_lock::RwLock; use async_trait::async_trait; use derive_more::{Deref, DerefMut}; +use hotshot_types::traits::network::AsyncGenerator; use hotshot_types::{ boxed_sync, constants::{Version01, VERSION_0_1}, @@ -1209,7 +1210,7 @@ impl _is_da: bool, reliability_config: Option>, _secondary_network_delay: Duration, - ) -> Box (Arc, Arc) + 'static> { + ) -> AsyncGenerator<(Arc, Arc)> { let da_gen = Self::single_generator( expected_node_count, num_bootstrap, @@ -1227,7 +1228,11 @@ impl &reliability_config, ); // Start each node's web server client - Box::new(move |id| (quorum_gen(id).into(), da_gen(id).into())) + Box::pin(move |id| { + let da_gen = da_gen(id); + let quorum_gen = quorum_gen(id); + Box::pin(async move { (quorum_gen.into(), da_gen.into()) }) + }) } fn in_flight_message_count(&self) -> Option { diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index caa9f9ee34..d7cde3e87e 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -575,7 +575,7 @@ impl NetworkBehaviour for DHTBehaviour { info!("Starting bootstrap"); } Err(e) => { - error!( + warn!( "peer id {:?} FAILED TO START BOOTSTRAP {:?} adding peers {:?}", self.peer_id, e, self.bootstrap_nodes ); diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index c48203f3d6..2845c98211 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -63,7 +63,7 @@ pub async fn build_system_handle( let launcher = builder.gen_launcher::(node_id); - let networks = (launcher.resource_generator.channel_generator)(node_id); + let networks = (launcher.resource_generator.channel_generator)(node_id).await; let storage = (launcher.resource_generator.storage)(node_id); let config = launcher.resource_generator.config.clone(); diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 268649bd10..7b1ef28e43 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -4,7 +4,10 @@ use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; use hotshot_example_types::storage_types::TestStorage; use hotshot_types::{ message::Message, - traits::{network::ConnectedNetwork, node_implementation::NodeType}, + traits::{ + network::{AsyncGenerator, ConnectedNetwork}, + node_implementation::NodeType, + }, HotShotConfig, }; @@ -22,7 +25,7 @@ pub type Generator = Box T + 'static>; /// generators for resources used by each node pub struct ResourceGenerators> { /// generate channels - pub channel_generator: Generator>, + pub channel_generator: AsyncGenerator>, /// generate new storage for each node pub storage: Generator>, /// configuration used to generate each hotshot node diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 620b297804..b586e84ec0 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -363,7 +363,7 @@ where quorum_election_config, ), }; - let networks = (self.launcher.resource_generator.channel_generator)(node_id); + let networks = (self.launcher.resource_generator.channel_generator)(node_id).await; let storage = (self.launcher.resource_generator.storage)(node_id); if self.launcher.metadata.skip_late && late_start.contains(&node_id) { diff --git a/testing/tests/combined_network.rs b/testing/tests/combined_network.rs index 49de0e9ed7..e4f30ccc3a 100644 --- a/testing/tests/combined_network.rs +++ b/testing/tests/combined_network.rs @@ -10,7 +10,7 @@ use hotshot_testing::{ use rand::Rng; use tracing::instrument; -/// A run with both the webserver and libp2p functioning properly +/// A run with both the CDN and libp2p functioning properly #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -46,11 +46,11 @@ async fn test_combined_network() { .await; } -// A run where the webserver crashes part-way through +// A run where the CDN crashes part-way through #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] -async fn test_combined_network_webserver_crash() { +async fn test_combined_network_cdn_crash() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let mut metadata: TestMetadata = TestMetadata { @@ -94,7 +94,7 @@ async fn test_combined_network_webserver_crash() { .await; } -// A run where the webserver crashes partway through +// A run where the CDN crashes partway through // and then comes back up #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -148,7 +148,7 @@ async fn test_combined_network_reup() { .await; } -// A run where half of the nodes disconnect from the webserver +// A run where half of the nodes disconnect from the CDN #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index 5e4fc7103f..a840d23b26 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -33,7 +33,7 @@ async fn test_network_task() { let launcher = builder.gen_launcher::(node_id); - let networks = (launcher.resource_generator.channel_generator)(node_id); + let networks = (launcher.resource_generator.channel_generator)(node_id).await; let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); let config = launcher.resource_generator.config.clone(); @@ -99,7 +99,7 @@ async fn test_network_storage_fail() { let launcher = builder.gen_launcher::(node_id); - let networks = (launcher.resource_generator.channel_generator)(node_id); + let networks = (launcher.resource_generator.channel_generator)(node_id).await; let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); storage.write().await.should_return_err = true; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 337bb1ca38..5c2000dd21 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -7,7 +7,10 @@ use async_compatibility_layer::art::async_sleep; use async_std::future::TimeoutError; use derivative::Derivative; use dyn_clone::DynClone; -use futures::channel::{mpsc, oneshot}; +use futures::{ + channel::{mpsc, oneshot}, + Future, +}; #[cfg(async_executor_impl = "tokio")] use tokio::time::error::Elapsed as TimeoutError; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] @@ -31,6 +34,7 @@ use std::{ collections::{BTreeSet, HashMap}, fmt::Debug, hash::Hash, + pin::Pin, sync::Arc, time::Duration, }; @@ -416,6 +420,9 @@ pub trait ConnectedNetwork: fn update_view(&self, _view: u64) {} } +/// A channel generator for types that need asynchronous execution +pub type AsyncGenerator = Pin Pin>>>>; + /// Describes additional functionality needed by the test network implementation pub trait TestableNetworkingImplementation where @@ -431,7 +438,7 @@ where is_da: bool, reliability_config: Option>, secondary_network_delay: Duration, - ) -> Box (Arc, Arc) + 'static>; + ) -> AsyncGenerator<(Arc, Arc)>; /// Get the number of messages in-flight. /// diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index cb2c98c403..bb501b64e3 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -6,7 +6,9 @@ use super::{ block_contents::{BlockHeader, TestableBlock, Transaction}, election::ElectionConfig, - network::{ConnectedNetwork, NetworkReliability, TestableNetworkingImplementation}, + network::{ + AsyncGenerator, ConnectedNetwork, NetworkReliability, TestableNetworkingImplementation, + }, states::TestableState, storage::Storage, ValidatedState, @@ -24,8 +26,7 @@ use serde::{Deserialize, Serialize}; use std::{ fmt::Debug, hash::Hash, - ops, - ops::{Deref, Sub}, + ops::{self, Deref, Sub}, sync::Arc, time::Duration, }; @@ -93,7 +94,7 @@ pub trait TestableNodeImplementation: NodeImplementation da_committee_size: usize, reliability_config: Option>, secondary_network_delay: Duration, - ) -> Box (Arc, Arc)>; + ) -> AsyncGenerator<(Arc, Arc)>; } #[async_trait] @@ -148,7 +149,7 @@ where da_committee_size: usize, reliability_config: Option>, secondary_network_delay: Duration, - ) -> Box (Arc, Arc)> { + ) -> AsyncGenerator<(Arc, Arc)> { >::generator( expected_node_count, num_bootstrap, From 8bc8f55f0ac95a48744b156a6a340e91fbdbe943 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 28 Mar 2024 20:03:57 -0400 Subject: [PATCH 0902/1393] [CX_CLEANUP] - Implement `QuorumProposalTask` (#2831) * add new file * add qc * starter * filling out the trait * switching branch * start adding the different payload requirements * adding context to the events * fix build, clippy next * make clippy happy, stub out the rest of the steps * cosmetic improvements * Implement task state * Define a custom re-broadcast type for the quorum proposal (half impl) * Complete task and event impl with dummy proposal * fix clippy lints * fix some logical bugs * remove log * update proposal task to combine dependent events * clarifying comment * clippy: remove unused import * remove overuse of validation events * roll changes down from above branch, remove logs * remove useless event * add proposal validated event * Perform unholy sacriment to get dependencies working * Perform unholy sacrament to get dependencies working * shave a layer off --- task-impls/src/events.rs | 7 + task-impls/src/lib.rs | 3 + task-impls/src/quorum_proposal.rs | 541 ++++++++++++++++++++++++++++++ task/src/dependency.rs | 17 +- task/src/dependency_task.rs | 5 +- 5 files changed, 568 insertions(+), 5 deletions(-) create mode 100644 task-impls/src/quorum_proposal.rs diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index f22c8428bc..31d6475b06 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -139,4 +139,11 @@ pub enum HotShotEvent { UpgradeVoteSend(UpgradeVote), /// Upgrade certificate has been sent to the network UpgradeCertificateFormed(UpgradeCertificate), + + /** Quorum Proposal Task **/ + /// Dummy quorum proposal to test if the quorum proposal dependency task works. + DummyQuorumProposalSend(TYPES::Time), + /// All required dependencies of the quorum proposal have been validated and the task is ready + /// to propose. + QuorumProposalDependenciesValidated(TYPES::Time), } diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index e2ad897517..c0a60950d4 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -46,3 +46,6 @@ pub mod response; /// Task for requesting the network for things pub mod request; + +/// Task for handling logic for quorum proposals +pub mod quorum_proposal; diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs new file mode 100644 index 0000000000..b4b360f194 --- /dev/null +++ b/task-impls/src/quorum_proposal.rs @@ -0,0 +1,541 @@ +use std::{collections::HashMap, sync::Arc}; + +use async_broadcast::{Receiver, Sender}; +use async_lock::RwLock; +use either::Either; +use hotshot_task::{ + dependency::{AndDependency, EventDependency, OrDependency}, + dependency_task::{DependencyTask, HandleDepOutput}, + task::{Task, TaskState}, +}; +use hotshot_types::{ + consensus::Consensus, + data::QuorumProposal, + event::Event, + message::Proposal, + simple_certificate::ViewSyncFinalizeCertificate2, + traits::{ + block_contents::BlockHeader, + network::{ConnectedNetwork, ConsensusIntentEvent}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + }, + vote::HasViewNumber, +}; + +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; +use tracing::{debug, error, instrument}; + +use crate::{ + consensus::CommitmentAndMetadata, + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; + +/// Validate a quorum proposal. +#[allow(clippy::needless_pass_by_value)] +fn validate_quorum_proposal( + _quorum_proposal: Proposal>, + _event_sender: Sender>>, +) -> bool { + true +} + +/// Validate a view sync cert or a timeout cert. +#[allow(clippy::needless_pass_by_value)] +fn validate_view_sync_finalize_certificate( + _certificate: ViewSyncFinalizeCertificate2, + _event_sender: Sender>>, +) -> bool { + true +} + +/// Proposal dependency types. These types represent events that precipitate a proposal. +#[derive(PartialEq, Debug)] +enum ProposalDependency { + /// For the `SendPayloadCommitmentAndMetadata` event. + PayloadAndMetadata, + + /// For the `QCFormed` event. + QC, + + /// For the `ViewSyncFinalizeCertificate2Recv` event. + ViewSyncCert, + + /// For the `QCFormed` event timeout branch. + TimeoutCert, + + /// For the `QuroumProposalRecv` event. + ProposalCertificate, +} + +/// Handler for the proposal dependency +struct ProposalDependencyHandle { + /// The view number to propose for. + view_number: TYPES::Time, + + /// The event sender. + sender: Sender>>, + + /// Reference to consensus. The replica will require a write lock on this. + #[allow(dead_code)] + consensus: Arc>>, +} + +impl HandleDepOutput for ProposalDependencyHandle { + type Output = Vec>>>; + + #[allow(clippy::no_effect_underscore_binding)] + async fn handle_dep_result(self, res: Self::Output) { + let mut payload_commitment = None; + let mut commit_and_metadata: Option> = None; + let mut _quorum_certificate = None; + let mut _timeout_certificate = None; + let mut _view_sync_finalize_cert = None; + for event in res.iter().flatten() { + match event.as_ref() { + HotShotEvent::QuorumProposalRecv(proposal, _) => { + let proposal_payload_comm = proposal.data.block_header.payload_commitment(); + if let Some(comm) = payload_commitment { + if proposal_payload_comm != comm { + return; + } + } else { + payload_commitment = Some(proposal_payload_comm); + } + } + HotShotEvent::SendPayloadCommitmentAndMetadata( + payload_commitment, + metadata, + _view, + ) => { + debug!("Got commit and meta {:?}", payload_commitment); + commit_and_metadata = Some(CommitmentAndMetadata { + commitment: *payload_commitment, + metadata: metadata.clone(), + }); + } + HotShotEvent::QCFormed(cert) => match cert { + either::Right(timeout) => { + _timeout_certificate = Some(timeout.clone()); + } + either::Left(qc) => { + _quorum_certificate = Some(qc.clone()); + } + }, + HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { + _view_sync_finalize_cert = Some(cert.clone()); + } + _ => {} + } + } + + if commit_and_metadata.is_none() { + error!( + "Somehow completed the proposal dependency task without a commitment and metadata" + ); + return; + } + + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalDependenciesValidated( + self.view_number, + )), + &self.sender, + ) + .await; + broadcast_event( + Arc::new(HotShotEvent::DummyQuorumProposalSend(self.view_number)), + &self.sender, + ) + .await; + } +} + +/// The state for the quorum proposal task. +pub struct QuorumProposalTaskState> { + /// Latest view number that has been proposed for. + pub latest_proposed_view: TYPES::Time, + + /// Table for the in-progress proposal depdencey tasks. + pub propose_dependencies: HashMap>, + + /// Network for all nodes + pub quorum_network: Arc, + + /// Network for DA committee + pub committee_network: Arc, + + /// Output events to application + pub output_event_stream: async_broadcast::Sender>, + + /// Reference to consensus. The replica will require a write lock on this. + pub consensus: Arc>>, + + /// Membership for Timeout votes/certs + pub timeout_membership: Arc, + + /// Membership for Quorum Certs/votes + pub quorum_membership: Arc, + + /// The node's id + pub id: u64, +} + +impl> QuorumProposalTaskState { + /// Create an event dependency + #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Quorum proposal create event dependency", level = "error")] + fn create_event_dependency( + &self, + dependency_type: ProposalDependency, + view_number: TYPES::Time, + event_receiver: Receiver>>, + ) -> EventDependency>> { + EventDependency::new( + event_receiver, + Box::new(move |event| { + let event = event.as_ref(); + debug!("Dependency {:?} got event {:?}", dependency_type, event); + let event_view = match dependency_type { + ProposalDependency::QC => { + if let HotShotEvent::QCFormed(either::Left(qc)) = event { + qc.view_number + } else { + return false; + } + } + ProposalDependency::TimeoutCert => { + if let HotShotEvent::QCFormed(either::Right(timeout)) = event { + timeout.view_number + } else { + return false; + } + } + ProposalDependency::ViewSyncCert => { + if let HotShotEvent::ViewSyncFinalizeCertificate2Recv(view_sync_cert) = + event + { + view_sync_cert.view_number + } else { + return false; + } + } + + ProposalDependency::ProposalCertificate => { + if let HotShotEvent::QuorumProposalRecv(proposal, _) = event { + proposal.data.view_number + } else { + return false; + } + } + ProposalDependency::PayloadAndMetadata => { + if let HotShotEvent::SendPayloadCommitmentAndMetadata( + _payload_commitment, + _metadata, + view, + ) = event + { + *view + } else { + return false; + } + } + }; + event_view == view_number + }), + ) + } + + /// Create and store an [`AndDependency`] combining [`EventDependency`]s associated with the + /// given view number if it doesn't exist. Also takes in the received `event` to seed a + /// dependency as already completed. This allows for the task to receive a proposable event + /// without losing the data that it received, as the dependency task would otherwise have no + /// ability to receive the event and, thus, would never propose. + fn create_dependency_task_if_new( + &mut self, + view_number: TYPES::Time, + event_receiver: Receiver>>, + event_sender: Sender>>, + event: Arc>, + ) { + debug!("Attempting to make dependency task for event {:?}", event); + if self.propose_dependencies.get(&view_number).is_some() { + debug!("Task already exists"); + return; + } + + let mut proposal_cert_validated_dependency = self.create_event_dependency( + ProposalDependency::ProposalCertificate, + view_number, + event_receiver.clone(), + ); + + let mut qc_dependency = self.create_event_dependency( + ProposalDependency::QC, + view_number, + event_receiver.clone(), + ); + + let mut view_sync_dependency = self.create_event_dependency( + ProposalDependency::ViewSyncCert, + view_number, + event_receiver.clone(), + ); + + let mut timeout_dependency = self.create_event_dependency( + ProposalDependency::TimeoutCert, + view_number, + event_receiver.clone(), + ); + + let mut payload_commitment_dependency = self.create_event_dependency( + ProposalDependency::PayloadAndMetadata, + view_number, + event_receiver, + ); + + match event.as_ref() { + HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _) => { + payload_commitment_dependency.mark_as_completed(event.clone()); + } + HotShotEvent::QuorumProposalRecv(_, _) => { + proposal_cert_validated_dependency.mark_as_completed(event); + } + HotShotEvent::QCFormed(quorum_certificate) => match quorum_certificate { + Either::Right(_) => { + timeout_dependency.mark_as_completed(event); + } + Either::Left(_) => { + qc_dependency.mark_as_completed(event); + } + }, + HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) => { + view_sync_dependency.mark_as_completed(event); + } + _ => {} + }; + + // We have three cases to consider: + let combined = AndDependency::from_deps(vec![ + OrDependency::from_deps(vec![AndDependency::from_deps(vec![ + payload_commitment_dependency, + ])]), + OrDependency::from_deps(vec![ + // 1. A QCFormed event and QuorumProposalRecv event + AndDependency::from_deps(vec![qc_dependency, proposal_cert_validated_dependency]), + // 2. A timeout cert was received + AndDependency::from_deps(vec![timeout_dependency]), + // 3. A view sync cert was received. + AndDependency::from_deps(vec![view_sync_dependency]), + ]), + ]); + + let dependency_task = DependencyTask::new( + combined, + ProposalDependencyHandle { + view_number, + sender: event_sender, + consensus: self.consensus.clone(), + }, + ); + self.propose_dependencies + .insert(view_number, dependency_task.run()); + } + + /// Update the latest proposed view number. + #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Quorum proposal update latest proposed view", level = "error")] + async fn update_latest_proposed_view(&mut self, new_view: TYPES::Time) -> bool { + if *self.latest_proposed_view < *new_view { + debug!( + "Updating next proposal view from {} to {} in the quorum proposal task", + *self.latest_proposed_view, *new_view + ); + + // Cancel the old dependency tasks. + for view in (*self.latest_proposed_view + 1)..=(*new_view) { + if let Some(dependency) = self.propose_dependencies.remove(&TYPES::Time::new(view)) + { + cancel_task(dependency).await; + } + } + + self.latest_proposed_view = new_view; + + return true; + } + false + } + + /// Handles a consensus event received on the event stream + #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Quorum proposal handle", level = "error")] + pub async fn handle( + &mut self, + event: Arc>, + event_receiver: Receiver>>, + event_sender: Sender>>, + ) { + match event.as_ref() { + HotShotEvent::QCFormed(cert) => { + match cert.clone() { + either::Right(timeout_cert) => { + // cancel poll for votes + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *timeout_cert.view_number, + )) + .await; + + let view = timeout_cert.view_number + 1; + + self.create_dependency_task_if_new( + view, + event_receiver, + event_sender, + event.clone(), + ); + } + either::Left(qc) => { + let mut consensus = self.consensus.write().await; + consensus.high_qc = qc.clone(); + + // cancel poll for votes + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *qc.view_number, + )) + .await; + + // We need to drop our handle here to make the borrow checker happy. + drop(consensus); + debug!( + "Attempting to publish proposal after forming a QC for view {}", + *qc.view_number + ); + + let view = qc.view_number + 1; + + self.create_dependency_task_if_new( + view, + event_receiver, + event_sender, + event.clone(), + ); + } + } + } + HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, _metadata, view) => { + let view = *view; + if view < self.latest_proposed_view { + debug!( + "Payload commitment is from an older view {:?}", + view.clone() + ); + return; + } + + debug!("Got payload commitment and meta {:?}", payload_commitment); + + self.create_dependency_task_if_new( + view, + event_receiver, + event_sender, + event.clone(), + ); + } + HotShotEvent::ViewSyncFinalizeCertificate2Recv(view_sync_finalize_cert) => { + let view = view_sync_finalize_cert.view_number; + if view < self.latest_proposed_view { + debug!( + "View sync certificate is from an old view {:?}", + view.clone() + ); + return; + } + + if !validate_view_sync_finalize_certificate( + view_sync_finalize_cert.clone(), + event_sender.clone(), + ) { + return; + } + + self.create_dependency_task_if_new(view, event_receiver, event_sender, event); + } + HotShotEvent::QuorumProposalRecv(proposal, _sender) => { + let view = proposal.data.get_view_number(); + if view < self.latest_proposed_view { + debug!("Proposal is from an older view {:?}", proposal.data.clone()); + return; + } + + debug!( + "Received Quorum Proposal for view {}", + *proposal.data.view_number + ); + + // stop polling for the received proposal + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal( + *proposal.data.view_number, + )) + .await; + + if !validate_quorum_proposal(proposal.clone(), event_sender.clone()) { + return; + } + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalValidated(proposal.data.clone())), + &event_sender.clone(), + ) + .await; + + self.create_dependency_task_if_new( + view, + event_receiver, + event_sender, + event.clone(), + ); + } + HotShotEvent::QuorumProposalDependenciesValidated(view) => { + debug!("All proposal dependencies verified for view {:?}", view); + if !self.update_latest_proposed_view(*view).await { + debug!("proposal not updated"); + return; + } + } + _ => {} + } + } +} + +impl> TaskState + for QuorumProposalTaskState +{ + type Event = Arc>; + type Output = (); + fn filter(&self, event: &Arc>) -> bool { + !matches!( + event.as_ref(), + HotShotEvent::QuorumProposalRecv(_, _) + | HotShotEvent::QCFormed(_) + | HotShotEvent::SendPayloadCommitmentAndMetadata(..) + | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) + | HotShotEvent::Shutdown, + ) + } + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> + where + Self: Sized, + { + let receiver = task.subscribe(); + let sender = task.clone_sender(); + tracing::trace!("sender queue len {}", sender.len()); + task.state_mut().handle(event, receiver, sender).await; + None + } + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event.as_ref(), HotShotEvent::Shutdown) + } +} diff --git a/task/src/dependency.rs b/task/src/dependency.rs index b42c73e05d..b8d45c9e1d 100644 --- a/task/src/dependency.rs +++ b/task/src/dependency.rs @@ -43,7 +43,7 @@ pub struct AndDependency { deps: Vec>>, } impl Dependency> for AndDependency { - /// Returns a vector of all of the results from it's dependencies. + /// Returns a vector of all of the results from it's dependencies. /// The results will be in a random order async fn completed(self) -> Option> { let futures = FuturesUnordered::from_iter(self.deps); @@ -119,9 +119,14 @@ impl OrDependency { pub struct EventDependency { /// Channel of incomming events pub(crate) event_rx: Receiver, + /// Closure which returns true if the incoming `T` is the /// thing that completes this dependency pub(crate) match_fn: Box bool + Send>, + + /// The potentially externally completed dependency. If the dependency was seeded from an event + /// message, we can mark it as already done in lieu of other events still pending. + completed_dependency: Option, } impl EventDependency { @@ -131,12 +136,21 @@ impl EventDependency { Self { event_rx: receiver, match_fn: Box::new(match_fn), + completed_dependency: None, } } + + /// Mark a dependency as completed. + pub fn mark_as_completed(&mut self, dependency: T) { + self.completed_dependency = Some(dependency); + } } impl Dependency for EventDependency { async fn completed(mut self) -> Option { + if let Some(dependency) = self.completed_dependency { + return Some(dependency); + } loop { match self.event_rx.recv_direct().await { Ok(event) => { @@ -164,6 +178,7 @@ mod tests { EventDependency { event_rx: rx, match_fn: Box::new(move |v| *v == val), + completed_dependency: None, } } diff --git a/task/src/dependency_task.rs b/task/src/dependency_task.rs index 3603b4541f..e5ff1e0179 100644 --- a/task/src/dependency_task.rs +++ b/task/src/dependency_task.rs @@ -82,10 +82,7 @@ mod test { } fn eq_dep(rx: Receiver, val: usize) -> EventDependency { - EventDependency { - event_rx: rx, - match_fn: Box::new(move |v| *v == val), - } + EventDependency::new(rx, Box::new(move |v| *v == val)) } #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] From 26a3ea392648a214c952cd6df8c0289c973b05a5 Mon Sep 17 00:00:00 2001 From: miles <66052478+miles-six@users.noreply.github.com> Date: Sat, 30 Mar 2024 02:21:04 +0800 Subject: [PATCH 0903/1393] fix: typos (#2834) --- hotshot-qc/src/snarked.rs | 2 +- hotshot-stake-table/src/mt_based.rs | 2 +- hotshot-stake-table/src/mt_based/internal.rs | 4 ++-- hotshot-stake-table/src/vec_based.rs | 2 +- hotshot-stake-table/src/vec_based/config.rs | 2 +- hotshot/src/lib.rs | 8 ++++---- hotshot/src/traits/networking/combined_network.rs | 2 +- hotshot/src/traits/networking/libp2p_network.rs | 2 +- libp2p-networking/README.md | 2 +- libp2p-networking/src/network/behaviours/dht/mod.rs | 8 ++++---- .../src/network/behaviours/request_response.rs | 4 ++-- libp2p-networking/src/network/def.rs | 6 +++--- libp2p-networking/src/network/node.rs | 2 +- libp2p-networking/src/network/node/handle.rs | 2 +- libp2p-networking/tests/counter.rs | 2 +- task-impls/src/builder.rs | 2 +- task-impls/src/consensus.rs | 7 +++---- task-impls/src/da.rs | 2 +- task-impls/src/harness.rs | 2 +- task-impls/src/request.rs | 6 +++--- task-impls/src/response.rs | 2 +- task-impls/src/transactions.rs | 2 +- task-impls/src/upgrade.rs | 2 +- task-impls/src/vid.rs | 2 +- task-impls/src/view_sync.rs | 4 ++-- task-impls/src/vote_collection.rs | 2 +- task/src/dependency.rs | 8 ++++---- task/src/lib.rs | 4 ++-- task/src/task.rs | 6 +++--- testing-macros/tests/tests.rs | 6 +++--- testing/src/overall_safety_task.rs | 2 +- testing/src/test_runner.rs | 2 +- testing/tests/catchup.rs | 8 ++++---- testing/tests/libp2p.rs | 2 +- types/src/error.rs | 4 ++-- types/src/lib.rs | 4 ++-- types/src/simple_vote.rs | 2 +- types/src/traits/network.rs | 8 ++++---- types/src/traits/qc.rs | 2 +- types/src/traits/states.rs | 6 +++--- types/src/utils.rs | 2 +- types/src/vote.rs | 4 ++-- 42 files changed, 76 insertions(+), 77 deletions(-) diff --git a/hotshot-qc/src/snarked.rs b/hotshot-qc/src/snarked.rs index f5b54786b5..06f4a7bfaf 100644 --- a/hotshot-qc/src/snarked.rs +++ b/hotshot-qc/src/snarked.rs @@ -1,4 +1,4 @@ -//! This is a `SNARKed` QC implemenation, see more in the `HotShot` paper. +//! This is a `SNARKed` QC implementation, see more in the `HotShot` paper. mod circuit; diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index 235fbb3d19..af5df675cd 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -246,7 +246,7 @@ mod tests { st.total_stake(SnapshotVersion::LastEpochStart)?, U256::from(0) ); - // set to zero for futher sampling test + // set to zero for further sampling test assert_eq!( st.set_value(&keys[1], U256::from(0)).unwrap(), U256::from(100) diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index 89c3719661..4fb066c014 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -170,7 +170,7 @@ impl MerkleProof { #[tagged("MERKLE_COMM")] #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, CanonicalSerialize, CanonicalDeserialize)] -/// A succint commitment for Merkle tree +/// A succinct commitment for Merkle tree pub struct MerkleCommitment { /// Merkle tree digest comm: FieldType, @@ -203,7 +203,7 @@ impl MerkleCommitment { } impl PersistentMerkleNode { - /// Returns the succint commitment of this subtree + /// Returns the succinct commitment of this subtree pub fn commitment(&self) -> FieldType { match self { PersistentMerkleNode::Empty => FieldType::from(0), diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 06e178f38d..49c46e76be 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -417,7 +417,7 @@ mod tests { st.total_stake(SnapshotVersion::LastEpochStart)?, U256::from(0) ); - // set to zero for futher sampling test + // set to zero for further sampling test assert_eq!( st.set_value(&keys[1].0, U256::from(0)).unwrap(), U256::from(100) diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index 8170edfcbc..ad81af094f 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -13,7 +13,7 @@ pub type FieldType = ark_ed_on_bn254::Fq; /// Hashable representation of a key /// NOTE: commitment is only used in light client contract. -/// For this application, we needs only hash the Schnorr verfication key. +/// For this application, we needs only hash the Schnorr verification key. impl ToFields for StateVerKey { const SIZE: usize = 2; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 555db95843..afb440be43 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -74,7 +74,7 @@ pub const H_256: usize = 32; /// Bundle of the networks used in consensus pub struct Networks> { - /// Newtork for reaching all nodes + /// Network for reaching all nodes pub quorum_network: Arc, /// Network for reaching the DA committee @@ -632,13 +632,13 @@ pub struct HotShotInitializer { /// Optional validated state. /// - /// If it's given, we'll use it to constrcut the `SystemContext`. Otherwise, we'll construct + /// If it's given, we'll use it to construct the `SystemContext`. Otherwise, we'll construct /// the state from the block header. validated_state: Option>, /// Optional state delta. /// - /// If it's given, we'll use it to constrcut the `SystemContext`. + /// If it's given, we'll use it to construct the `SystemContext`. state_delta: Option>::Delta>>, /// Starting view number that we are confident won't lead to a double vote after restart. @@ -677,7 +677,7 @@ impl HotShotInitializer { /// # Arguments /// * `start_view` - The minimum view number that we are confident won't lead to a double vote /// after restart. - /// * `validated_state` - Optional validated state that if given, will be used to constrcut the + /// * `validated_state` - Optional validated state that if given, will be used to construct the /// `SystemContext`. pub fn from_reload( anchor_leaf: Leaf, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 22020be9c2..f5364bc530 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -1,4 +1,4 @@ -//! Networking Implementation that has a primary and a fallback newtork. If the primary +//! Networking Implementation that has a primary and a fallback network. If the primary //! Errors we will use the backup to send or receive use super::{push_cdn_network::PushCdnNetwork, NetworkError}; use crate::traits::implementations::Libp2pNetwork; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index cca3824bc1..e6e23eaec0 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -67,7 +67,7 @@ use versioned_binary_serialization::{ BinarySerializer, Serializer, }; -/// convienence alias for the type for bootstrap addresses +/// convenience alias for the type for bootstrap addresses /// concurrency primitives are needed for having tests pub type BootstrapAddrs = Arc, Multiaddr)>>>; diff --git a/libp2p-networking/README.md b/libp2p-networking/README.md index fc7a00f656..b6f7b86bff 100644 --- a/libp2p-networking/README.md +++ b/libp2p-networking/README.md @@ -1,6 +1,6 @@ # USAGE -Networking library inteded for use with HotShot. Builds upon abstractions from libp2p-rs. +Networking library intended for use with HotShot. Builds upon abstractions from libp2p-rs. ## CLI Demo diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index d7cde3e87e..c1885b2b9c 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -52,9 +52,9 @@ pub struct DHTBehaviour { in_progress_get_record_queries: HashMap, /// List of in-progress put requests in_progress_put_record_queries: HashMap, - /// List of previously failled get requests + /// List of previously failed get requests queued_get_record_queries: VecDeque, - /// List of previously failled put requests + /// List of previously failed put requests queued_put_record_queries: VecDeque, /// Kademlia behaviour pub kadem: KademliaBehaviour, @@ -278,7 +278,7 @@ impl DHTBehaviour { }; // if the query has completed and we need to retry - // or if the query has enoguh replicas to return to the client + // or if the query has enough replicas to return to the client // trigger retry or completion logic if num >= NUM_REPLICATED_TO_TRUST || last { if let Some(KadGetQuery { @@ -441,7 +441,7 @@ impl DHTBehaviour { info!("Finished bootstrap for peer {:?}", self.peer_id); self.bootstrap_state.state = State::NotStarted; self.event_queue.push(DHTEvent::IsBootstrapped); - // After initial bootstrap suceeds do it every 2 minutes to maintain routing. + // After initial bootstrap succeeds do it every 2 minutes to maintain routing. self.bootstrap_state.backoff = ExponentialBackoff::new(1, Duration::from_secs(120)); self.bootstrap_state.backoff.start_next(true); diff --git a/libp2p-networking/src/network/behaviours/request_response.rs b/libp2p-networking/src/network/behaviours/request_response.rs index ecdd2f000a..f7814d1452 100644 --- a/libp2p-networking/src/network/behaviours/request_response.rs +++ b/libp2p-networking/src/network/behaviours/request_response.rs @@ -44,7 +44,7 @@ impl RequestResponseState { } => { let chan = self.request_map.remove(&request_id)?; if chan.send(Some(response)).is_err() { - tracing::warn!("Failed to send resonse to client, channel closed."); + tracing::warn!("Failed to send response to client, channel closed."); } None } @@ -57,7 +57,7 @@ impl RequestResponseState { tracing::warn!("Error Sending Request {:?}", error); let chan = self.request_map.remove(&request_id)?; if chan.send(None).is_err() { - tracing::warn!("Failed to send resonse to client, channel closed."); + tracing::warn!("Failed to send response to client, channel closed."); } None } diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 6de82414cf..55fd1d19f7 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -29,7 +29,7 @@ use libp2p_swarm_derive::NetworkBehaviour; #[behaviour(to_swarm = "NetworkEventInternal")] pub struct NetworkDef { /// purpose: broadcasting messages to many peers - /// NOTE gossipsub works ONLY for sharing messsages right now + /// NOTE gossipsub works ONLY for sharing messages right now /// in the future it may be able to do peer discovery and routing /// #[debug(skip)] @@ -98,14 +98,14 @@ impl NetworkDef { /// Subscribe to a given topic pub fn subscribe_gossip(&mut self, t: &str) { if let Err(e) = self.gossipsub.subscribe(&IdentTopic::new(t)) { - error!("Failed to subsribe to topic {:?}. Error: {:?}", t, e); + error!("Failed to subscribe to topic {:?}. Error: {:?}", t, e); } } /// Unsubscribe from a given topic pub fn unsubscribe_gossip(&mut self, t: &str) { if let Err(e) = self.gossipsub.unsubscribe(&IdentTopic::new(t)) { - error!("Failed to unsubsribe from topic {:?}. Error: {:?}", t, e); + error!("Failed to unsubscribe from topic {:?}. Error: {:?}", t, e); } } } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 008f42fbff..013722dead 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -488,7 +488,7 @@ impl NetworkNode { Ok(false) } - /// event handler for events emited from the swarm + /// event handler for events emitted from the swarm #[allow(clippy::type_complexity)] #[instrument(skip(self))] async fn handle_swarm_events( diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 1887682b60..f58f502fd4 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -181,7 +181,7 @@ impl NetworkNodeHandle { /// /// # Errors /// - /// Will retrun a networking error if the channel closes before the result + /// Will return a networking error if the channel closes before the result /// can be sent back pub async fn request_data( &self, diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 238122f19a..d6318602a2 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -495,7 +495,7 @@ async fn test_coverage_request_response_one_round() { .await; } -/// stress test of direct messsage +/// stress test of direct message #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index dec86c8a14..ddcbebb49e 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -14,7 +14,7 @@ use tagged_base64::TaggedBase64; use versioned_binary_serialization::version::StaticVersionType; #[derive(Debug, Snafu, Serialize, Deserialize)] -/// Represents errors thant builder client may return +/// Represents errors than builder client may return pub enum BuilderClientError { // NOTE: folds BuilderError::NotFound & builderError::Missing // into one. Maybe we'll want to handle that separately in diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 7444be4329..efcf02020b 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -452,7 +452,7 @@ impl, A: ConsensusApi + .metrics .current_view .set(usize::try_from(self.cur_view.get_u64()).unwrap()); - // Do the comparison before the substraction to avoid potential overflow, since + // Do the comparison before the subtraction to avoid potential overflow, since // `last_decided_view` may be greater than `cur_view` if the node is catching up. if usize::try_from(self.cur_view.get_u64()).unwrap() > usize::try_from(consensus.last_decided_view.get_u64()).unwrap() @@ -564,7 +564,7 @@ impl, A: ConsensusApi + // SS: It is possible that we may wish to vote against any quorum proposal // if it attaches an upgrade certificate that we cannot support. // But I don't think there's much point in this -- if the UpgradeCertificate - // threshhold (90%) has been reached, voting against the QuorumProposal on that basis + // threshold (90%) has been reached, voting against the QuorumProposal on that basis // will probably be completely symbolic anyway. // // We should just make sure we don't *sign* an UpgradeCertificate for an upgrade @@ -726,7 +726,6 @@ impl, A: ConsensusApi + return; }; - async_spawn(validate_proposal( proposal.clone(), parent_leaf, @@ -911,7 +910,7 @@ impl, A: ConsensusApi + self.current_proposal = None; } HotShotEvent::QuorumVoteRecv(ref vote) => { - debug!("Received quroum vote: {:?}", vote.get_view_number()); + debug!("Received quorum vote: {:?}", vote.get_view_number()); if self .quorum_membership .get_leader(vote.get_view_number() + 1) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 852c0567a6..07576e819a 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -104,7 +104,7 @@ impl, A: ConsensusApi + // Allow a DA proposal that is one view older, in case we have voted on a quorum // proposal and updated the view. // `self.cur_view` should be at least 1 since there is a view change before getting - // the `DAProposalRecv` event. Otherewise, the view number subtraction below will + // the `DAProposalRecv` event. Otherwise, the view number subtraction below will // cause an overflow error. // TODO ED Come back to this - we probably don't need this, but we should also never receive a DAC where this fails, investigate block ready so it doesn't make one for the genesis block diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 03123f3335..3712752bbe 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -40,7 +40,7 @@ impl TaskState for TestHarnessState { /// outputs. Should be `false` in most cases. /// /// # Panics -/// Panics if any state the test expects is not set. Panicing causes a test failure +/// Panics if any state the test expects is not set. Panicking causes a test failure #[allow(clippy::implicit_hasher)] #[allow(clippy::panic)] pub async fn run_harness>>>( diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index d223f29784..0064d0c5d2 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -129,7 +129,7 @@ impl, Ver: StaticVersionType + 'st .for_each(|r| self.run_delay(r, sender.clone(), view, bind_version)); } - /// Creats the srequest structures for all types that are needed. + /// Creates the srequest structures for all types that are needed. async fn build_requests(&self, view: TYPES::Time, _: Ver) -> Vec> { let mut reqs = Vec::new(); if !self.state.read().await.vid_shares.contains_key(&view) { @@ -140,7 +140,7 @@ impl, Ver: StaticVersionType + 'st } /// run a delayed request task for a request. The first response - /// recieved will be sent over `sender` + /// received will be sent over `sender` fn run_delay( &self, request: RequestKind, @@ -176,7 +176,7 @@ impl, Ver: StaticVersionType + 'st } } -/// A short lived task that waits a delay and starts trying peers until it complets +/// A short lived task that waits a delay and starts trying peers until it completes /// a request. If at any point the requested info is seen in the data stores or /// the view has moved beyond the view we are requesting, the task will completed. struct DelayedRequester> { diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index d4124097d4..15d74b0cf7 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -111,7 +111,7 @@ impl NetworkResponseState { } /// Handle the request contained in the message. Returns the response we should send - /// First parses the kind and passes to the appropriate hanlder for the specific type + /// First parses the kind and passes to the appropriate handler for the specific type /// of the request. async fn handle_request(&self, req: DataRequest) -> Message { match req.request { diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4d8be0e409..7bd2e696a5 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -59,7 +59,7 @@ pub struct TransactionTaskState< /// Network for all nodes pub network: Arc, - /// Membership for teh quorum + /// Membership for the quorum pub membership: Arc, /// This Nodes Public Key diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 1f245113a5..a7c7d5f635 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -91,7 +91,7 @@ impl, A: ConsensusApi + // Allow an upgrade proposal that is one view older, in case we have voted on a quorum // proposal and updated the view. // `self.cur_view` should be at least 1 since there is a view change before getting - // the `UpgradeProposalRecv` event. Otherewise, the view number subtraction below will + // the `UpgradeProposalRecv` event. Otherwise, the view number subtraction below will // cause an overflow error. // TODO Come back to this - we probably don't need this, but we should also never receive a UpgradeCertificate where this fails, investigate block ready so it doesn't make one for the genesis block diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 7b394a7bb4..07c1aefd0b 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -80,7 +80,7 @@ impl, A: ConsensusApi + .await; #[cfg(async_executor_impl = "tokio")] - // Unwrap here will just propogate any panic from the spawned task, it's not a new place we can panic. + // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); // send the commitment and metadata to consensus for block building broadcast_event( diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index e15530e4f5..942fbb9fea 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -70,7 +70,7 @@ pub struct ViewSyncTaskState< pub next_view: TYPES::Time, /// Network for all nodes pub network: Arc, - /// Membership for teh quorum + /// Membership for the quorum pub membership: Arc, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -166,7 +166,7 @@ pub struct ViewSyncReplicaTaskState< /// Network for all nodes pub network: Arc, - /// Membership for teh quorum + /// Membership for the quorum pub membership: Arc, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 69501c1e2a..60c0401bd6 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -23,7 +23,7 @@ use hotshot_types::{ }; use tracing::{debug, error}; -/// Task state for collecting votes of one type and emiting a certificate +/// Task state for collecting votes of one type and emitting a certificate pub struct VoteCollectionTaskState< TYPES: NodeType, VOTE: Vote, diff --git a/task/src/dependency.rs b/task/src/dependency.rs index b8d45c9e1d..ad172e2eb6 100644 --- a/task/src/dependency.rs +++ b/task/src/dependency.rs @@ -77,7 +77,7 @@ impl AndDependency { } } -/// Defines a dependency that complets when one of it's dependencies compeltes +/// Defines a dependency that completes when one of it's dependencies completes pub struct OrDependency { /// Dependencies being combined deps: Vec>>, @@ -108,16 +108,16 @@ impl OrDependency { } Self { deps: pinned } } - /// Add another dependecy + /// Add another dependency pub fn add_dep(&mut self, dep: impl Dependency + Send + 'static) { self.deps.push(dep.completed().boxed()); } } -/// A dependency that listens on a chanel for an event +/// A dependency that listens on a channel for an event /// that matches what some value it wants. pub struct EventDependency { - /// Channel of incomming events + /// Channel of incoming events pub(crate) event_rx: Receiver, /// Closure which returns true if the incoming `T` is the diff --git a/task/src/lib.rs b/task/src/lib.rs index cf71eb7090..f38e568065 100644 --- a/task/src/lib.rs +++ b/task/src/lib.rs @@ -1,6 +1,6 @@ -//! Task primatives for `HotShot` +//! Task primitives for `HotShot` -/// Simple Dependecy types +/// Simple Dependency types pub mod dependency; /// Task which can uses dependencies pub mod dependency_task; diff --git a/task/src/task.rs b/task/src/task.rs index bb24f48f62..4cd446802d 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -32,7 +32,7 @@ use crate::{ pub trait TaskState: Send { /// Type of event sent and received by the task type Event: Clone + Send + Sync + 'static; - /// The result returned when this task compeltes + /// The result returned when this task completes type Output: Send; /// Handle event and update state. Return true if the task is finished /// false otherwise. The handler can access the state through `Task::state_mut` @@ -183,7 +183,7 @@ impl Task { &self.state } - /// Spawn a new task adn register it. It will get all events not seend + /// Spawn a new task and register it. It will get all events not seend /// by the task creating it. pub async fn run_sub_task(&self, state: S) { let task = Task { @@ -323,7 +323,7 @@ impl TaskRegistry { } /// Wait for the results of all the tasks registered /// # Panics - /// Panics if one of the tasks paniced + /// Panics if one of the tasks panicked pub async fn join_all(self) -> Vec<()> { #[cfg(async_executor_impl = "async-std")] let ret = join_all(self.task_handles.into_inner()).await; diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs index c54b987953..8c2be38260 100644 --- a/testing-macros/tests/tests.rs +++ b/testing-macros/tests/tests.rs @@ -90,7 +90,7 @@ cross_tests!( }; metadata.overall_safety_properties.num_failed_views = 3; - // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 22; metadata } @@ -106,7 +106,7 @@ cross_tests!( let mut metadata = TestMetadata::default_more_nodes(); metadata.overall_safety_properties.num_failed_views = 6; - // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 22; metadata.num_bootstrap_nodes = 14; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the @@ -183,7 +183,7 @@ cross_tests!( // 2 nodes fail triggering view sync, expect no other timeouts metadata.overall_safety_properties.num_failed_views = 2; - // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 15; metadata diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 58aa2d1c95..af77f42c99 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -52,7 +52,7 @@ pub enum OverallSafetyTaskErr { NotEnoughDecides { /// expected number of decides expected: usize, - /// acutal number of decides + /// actual number of decides got: usize, }, /// mismatched leaves for a view diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index b586e84ec0..3d52f794dd 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -127,7 +127,7 @@ where Storage = TestStorage, >, { - /// excecute test + /// execute test /// /// # Panics /// if the test fails diff --git a/testing/tests/catchup.rs b/testing/tests/catchup.rs index f08a28a3f8..5a2dda7d1c 100644 --- a/testing/tests/catchup.rs +++ b/testing/tests/catchup.rs @@ -42,7 +42,7 @@ async fn test_catchup() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - // Make sure we keep commiting rounds after the catchup, but not the full 50. + // Make sure we keep committing rounds after the catchup, but not the full 50. num_successful_views: 22, num_failed_views: 5, ..Default::default() @@ -107,7 +107,7 @@ async fn test_catchup_cdn() { .await; } -/// Test that one node catches up and has sucessful views after coming back +/// Test that one node catches up and has successful views after coming back #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -148,7 +148,7 @@ async fn test_catchup_one_node() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - // Make sure we keep commiting rounds after the catchup, but not the full 50. + // Make sure we keep committing rounds after the catchup, but not the full 50. num_successful_views: 22, num_failed_views: 2, ..Default::default() @@ -269,7 +269,7 @@ async fn test_catchup_reload() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - // Make sure we keep commiting rounds after the catchup, but not the full 50. + // Make sure we keep committing rounds after the catchup, but not the full 50. num_successful_views: 22, ..Default::default() }; diff --git a/testing/tests/libp2p.rs b/testing/tests/libp2p.rs index e87c5bac76..e03e5ed36d 100644 --- a/testing/tests/libp2p.rs +++ b/testing/tests/libp2p.rs @@ -79,7 +79,7 @@ async fn libp2p_network_failures_2() { metadata.start_nodes = 12; // 2 nodes fail triggering view sync, expect no other timeouts metadata.overall_safety_properties.num_failed_views = 1; - // Make sure we keep commiting rounds after the bad leaders, but not the full 50 because of the numerous timeouts + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 15; metadata diff --git a/types/src/error.rs b/types/src/error.rs index 127f81506b..a9209237c4 100644 --- a/types/src/error.rs +++ b/types/src/error.rs @@ -46,7 +46,7 @@ pub enum HotShotError { }, /// Item was not present in storage LeafNotFound {/* TODO we should create a way to to_string */}, - /// Error accesing storage + /// Error accessing storage /// Invalid state machine state #[snafu(display("Invalid state machine state: {}", context))] InvalidState { @@ -73,7 +73,7 @@ pub enum HotShotError { /// Threshold of signatures needed for a quorum threshold: NonZeroU64, }, - /// Miscelaneous error + /// Miscellaneous error /// TODO fix this with /// #181 Misc { diff --git a/types/src/lib.rs b/types/src/lib.rs index 4f882f165c..fd23ad5a86 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -163,9 +163,9 @@ pub struct HotShotConfig { pub known_nodes_without_stake: Vec, /// My own validator config, including my public key, private key, stake value, serving as private parameter pub my_own_validator_config: ValidatorConfig, - /// List of DA committee (staking)nodes for static DA committe + /// List of DA committee (staking)nodes for static DA committee pub da_staked_committee_size: usize, - /// List of DA committee nodes (non-staking)nodes for static DA committe + /// List of DA committee nodes (non-staking)nodes for static DA committee pub da_non_staked_committee_size: usize, /// Base duration for next-view timeout, in milliseconds pub next_view_timeout: u64, diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index faa55c870b..516e2338b0 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -95,7 +95,7 @@ mod sealed { /// Only structs in this file can impl `Sealed` pub trait Sealed {} - // TODO: Does the implement for things outside this file that are commitable? + // TODO: Does the implement for things outside this file that are committable? impl Sealed for C {} } diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 5c2000dd21..d99a04a7d5 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -100,7 +100,7 @@ pub enum NetworkError { /// source of error source: Box, }, - /// collection of libp2p secific errors + /// collection of libp2p specific errors Libp2pMulti { /// sources of errors sources: Vec>, @@ -275,7 +275,7 @@ pub enum RequestKind { DAProposal(TYPES::Time), } -/// A resopnse for a request. `SequencingMessage` is the same as other network messages +/// A response for a request. `SequencingMessage` is the same as other network messages /// The kind of message `M` is is determined by what we requested #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] @@ -604,7 +604,7 @@ impl NetworkReliability for PartiallySynchronousNetwork { true } fn sample_delay(&self) -> Duration { - // act asyncronous before gst + // act asynchronous before gst if self.start.elapsed() < self.gst { if self.asynchronous.sample_keep() { self.asynchronous.sample_delay() @@ -613,7 +613,7 @@ impl NetworkReliability for PartiallySynchronousNetwork { self.synchronous.sample_delay() + self.gst } } else { - // act syncronous after gst + // act synchronous after gst self.synchronous.sample_delay() } } diff --git a/types/src/traits/qc.rs b/types/src/traits/qc.rs index 7dd11010f9..6151af4eb0 100644 --- a/types/src/traits/qc.rs +++ b/types/src/traits/qc.rs @@ -69,7 +69,7 @@ pub trait QuorumCertificateScheme< /// Checks an aggregated signature over some message provided as input /// * `qc_vp` - public parameters for validating the QC /// * `message` - message to check the aggregated signature against - /// * `qc` - quroum certificate + /// * `qc` - quorum certificate /// * `returns` - the quorum size if the qc is valid, an error otherwise. /// /// # Errors diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 2fb851ba0d..db4505f7f6 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -1,4 +1,4 @@ -//! Abstractions over the immutable instance-level state and hte global state that blocks modify. +//! Abstractions over the immutable instance-level state and the global state that blocks modify. //! //! This module provides the [`InstanceState`] and [`ValidatedState`] traits, which serve as //! compatibilities over the current network state, which is modified by the transactions contained @@ -35,9 +35,9 @@ pub trait ValidatedState: { /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; - /// The type of the instance-level state this state is assocaited with + /// The type of the instance-level state this state is associated with type Instance: InstanceState; - /// The type of the state delta this state is assocaited with. + /// The type of the state delta this state is associated with. type Delta: StateDelta; /// Time compatibility needed for reward collection type Time: ConsensusTime; diff --git a/types/src/utils.rs b/types/src/utils.rs index 663b676e75..99af91d328 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -182,7 +182,7 @@ impl AsRef for BuilderCommitment { /// For the wire format, we use bincode with the following options: /// - No upper size limit -/// - Litte endian encoding +/// - Little endian encoding /// - Varint encoding /// - Reject trailing bytes #[allow(clippy::type_complexity)] diff --git a/types/src/vote.rs b/types/src/vote.rs index ba49f4732d..8b1df1f0f8 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -73,7 +73,7 @@ pub trait Certificate: HasViewNumber { /// Get the vote commitment which the votes commit to fn get_data_commitment(&self) -> Commitment; } -/// Mapping of vote commitment to sigatures and bitvec +/// Mapping of vote commitment to signatures and bitvec type SignersMap = HashMap< COMMITMENT, ( @@ -87,7 +87,7 @@ pub struct VoteAccumulator< VOTE: Vote, CERT: Certificate, > { - /// Map of all signatures accumlated so far + /// Map of all signatures accumulated so far pub vote_outcomes: VoteMap2< Commitment, TYPES::SignatureKey, From e7232454d2f3cfdd33f84f653169c56123e76c60 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 29 Mar 2024 15:27:51 -0400 Subject: [PATCH 0904/1393] [Combined Network] Fix examples (#2864) * fix combined network example * lint --- examples/combined/all.rs | 117 ++++++++++++------ .../src/traits/networking/combined_network.rs | 14 +-- .../src/traits/networking/libp2p_network.rs | 6 +- .../src/traits/networking/push_cdn_network.rs | 10 +- orchestrator/run-config.toml | 5 +- 5 files changed, 101 insertions(+), 51 deletions(-) diff --git a/examples/combined/all.rs b/examples/combined/all.rs index edcada002f..30d9290814 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -10,16 +10,18 @@ use crate::{ types::{DANetwork, NodeImpl, QuorumNetwork}, }; use async_compatibility_layer::art::async_spawn; -use async_compatibility_layer::channel::oneshot; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use cdn_broker::{Broker, Config as BrokerConfig, ConfigBuilder as BrokerConfigBuilder}; +use cdn_marshal::{ConfigBuilder as MarshalConfigBuilder, Marshal}; +use hotshot::traits::implementations::{KeyPair, TestingDef, WrappedSignatureKey}; +use hotshot::types::SignatureKey; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; -use hotshot_types::constants::WebServerVersion; +use hotshot_types::traits::node_implementation::NodeType; +use rand::{rngs::StdRng, RngCore, SeedableRng}; use std::net::{IpAddr, Ipv4Addr}; -use std::sync::Arc; -use surf_disco::Url; +use std::path::Path; use tracing::{error, instrument}; -use versioned_binary_serialization::version::StaticVersionType; /// general infra used for this example #[path = "../infra/mod.rs"] @@ -34,38 +36,83 @@ async fn main() { let (config, orchestrator_url) = read_orchestrator_init_config::(); - // spawn web servers - let (server_shutdown_sender_cdn, server_shutdown_cdn) = oneshot(); - let (server_shutdown_sender_da, server_shutdown_da) = oneshot(); - let _sender = Arc::new(server_shutdown_sender_cdn); - let _sender = Arc::new(server_shutdown_sender_da); + // The configuration we are using for testing is 2 brokers & 1 marshal + // A keypair shared between brokers + let (broker_public_key, broker_private_key) = + ::SignatureKey::generated_from_seed_indexed([0u8; 32], 1337); + // Get the OS temporary directory + let temp_dir = std::env::temp_dir(); + + // Create an SQLite file inside of the temporary directory + let discovery_endpoint = temp_dir + .join(Path::new(&format!( + "test-{}.sqlite", + StdRng::from_entropy().next_u64() + ))) + .to_string_lossy() + .into_owned(); + + // 2 brokers + for _ in 0..2 { + // Get the ports to bind to + let private_port = portpicker::pick_unused_port().expect("could not find an open port"); + let public_port = portpicker::pick_unused_port().expect("could not find an open port"); + + // Extrapolate addresses + let private_address = format!("127.0.0.1:{private_port}"); + let public_address = format!("127.0.0.1:{public_port}"); + + let config: BrokerConfig::SignatureKey>> = + BrokerConfigBuilder::default() + .discovery_endpoint(discovery_endpoint.clone()) + .keypair(KeyPair { + public_key: WrappedSignatureKey(broker_public_key), + private_key: broker_private_key.clone(), + }) + .metrics_enabled(false) + .private_bind_address(private_address.clone()) + .public_bind_address(public_address.clone()) + .private_advertise_address(private_address) + .public_advertise_address(public_address) + .build() + .expect("failed to build broker config"); + + // Create and spawn the broker + async_spawn(async move { + let broker: Broker> = + Broker::new(config).await.expect("broker failed to start"); + + // Error if we stopped unexpectedly + if let Err(err) = broker.start().await { + error!("broker stopped: {err}"); + } + }); + } + + // Get the port to use for the marshal + let marshal_endpoint = config + .cdn_marshal_address + .clone() + .expect("CDN marshal address must be specified"); + + // Configure the marshal + let marshal_config = MarshalConfigBuilder::default() + .bind_address(marshal_endpoint.clone()) + .metrics_enabled(false) + .discovery_endpoint(discovery_endpoint) + .build() + .expect("failed to build marshal config"); + + // Spawn the marshal async_spawn(async move { - if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, - WebServerVersion, - >( - Some(server_shutdown_cdn), - Url::parse("http://localhost:9000").unwrap(), - WebServerVersion::instance(), - ) - .await - { - error!("Problem starting cdn web server: {:?}", e); - } - }); - async_spawn(async move { - if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, - WebServerVersion, - >( - Some(server_shutdown_da), - Url::parse("http://localhost:9001").unwrap(), - WebServerVersion::instance(), - ) - .await - { - error!("Problem starting da web server: {:?}", e); + let marshal: Marshal> = Marshal::new(marshal_config) + .await + .expect("failed to spawn marshal"); + + // Error if we stopped unexpectedly + if let Err(err) = marshal.start().await { + error!("broker stopped: {err}"); } }); diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index f5364bc530..4b4c155db1 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -3,13 +3,11 @@ use super::{push_cdn_network::PushCdnNetwork, NetworkError}; use crate::traits::implementations::Libp2pNetwork; use async_lock::RwLock; -use hotshot_types::{ - constants::{ - COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, - COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, - }, - traits::network::AsyncGenerator, +use hotshot_types::constants::{ + COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, + COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, }; + use lru::LruCache; use std::{ collections::BTreeSet, @@ -25,7 +23,9 @@ use futures::{channel::mpsc, join, select, FutureExt}; use async_compatibility_layer::channel::UnboundedSendError; #[cfg(feature = "hotshot-testing")] -use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; +use hotshot_types::traits::network::{ + AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, +}; use hotshot_types::{ boxed_sync, data::ViewNumber, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index e6e23eaec0..c2109bd145 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -15,15 +15,15 @@ use futures::{ FutureExt, StreamExt, }; #[cfg(feature = "hotshot-testing")] -use hotshot_types::traits::network::NetworkReliability; +use hotshot_types::traits::network::{AsyncGenerator, NetworkReliability}; use hotshot_types::{ boxed_sync, constants::{Version01, LOOK_AHEAD, STATIC_VER_0_1, VERSION_0_1}, data::ViewNumber, traits::{ network::{ - self, AsyncGenerator, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, - NetworkError, NetworkMsg, ResponseMessage, + self, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, + NetworkMsg, ResponseMessage, }, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 6e03c9c004..67e4dc7147 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -20,9 +20,10 @@ use cdn_client::{ }; #[cfg(feature = "hotshot-testing")] use cdn_marshal::{ConfigBuilder as MarshalConfigBuilder, Marshal}; -use hotshot_types::traits::network::AsyncGenerator; #[cfg(feature = "hotshot-testing")] -use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; +use hotshot_types::traits::network::{ + AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, +}; use hotshot_types::{ boxed_sync, constants::{Version01, VERSION_0_1}, @@ -37,13 +38,12 @@ use hotshot_types::{ BoxSyncFuture, }; #[cfg(feature = "hotshot-testing")] -use rand::rngs::StdRng; -#[cfg(feature = "hotshot-testing")] -use rand::{RngCore, SeedableRng}; +use rand::{rngs::StdRng, RngCore, SeedableRng}; use std::collections::BTreeSet; use std::marker::PhantomData; #[cfg(feature = "hotshot-testing")] use std::sync::atomic::{AtomicBool, Ordering}; +#[cfg(feature = "hotshot-testing")] use std::{path::Path, sync::Arc, time::Duration}; use tracing::{error, warn}; use versioned_binary_serialization::{ diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 69dd92e285..8fe8767bdb 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -66,7 +66,7 @@ mesh_n_low = 4 mesh_outbound_min = 2 mesh_n = 4 online_time = 10 -base_port = 9000 +base_port = 8000 [web_server_config] url = "http://localhost:9000" @@ -74,6 +74,9 @@ url = "http://localhost:9000" [da_web_server_config] url = "http://localhost:9001" +[combined_network_config.delay_duration] +secs = 1 +nanos = 0 [web_server_config.wait_between_polls] secs = 0 From 25fbccfa5346548ca3398254d56084cdcabd718e Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 29 Mar 2024 17:45:09 -0400 Subject: [PATCH 0905/1393] Fix block builder test (#2870) --- testing/tests/block_builder.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/testing/tests/block_builder.rs b/testing/tests/block_builder.rs index 1f7c268bde..5749a7703c 100644 --- a/testing/tests/block_builder.rs +++ b/testing/tests/block_builder.rs @@ -46,7 +46,20 @@ async fn test_random_block_builder() { .await .expect("Failed to get available blocks"); - assert!(!blocks.is_empty()); + { + let mut attempt = 0; + + while blocks.is_empty() && attempt < 50 { + blocks = client + .get_available_blocks(vid_commitment(&vec![], 1)) + .await + .expect("Failed to get available blocks"); + attempt += 1; + async_sleep(Duration::from_millis(100)).await; + } + + assert!(!blocks.is_empty()); + } // Test claiming available block let signature = { From 8964b2318157c1a93035ccac4fb17e3804047f10 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 29 Mar 2024 19:01:32 -0400 Subject: [PATCH 0906/1393] prepare for sequencer release (#2867) --- examples/infra/mod.rs | 3 +-- types/src/data.rs | 7 ++++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index bc00f83461..11ffad6498 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -335,10 +335,9 @@ fn webserver_network_from_config( - config: NetworkConfig, + mut config: NetworkConfig, pub_key: TYPES::SignatureKey, ) -> Libp2pNetwork, TYPES::SignatureKey> { - let mut config = config; let libp2p_config = config .libp2p_config .take() diff --git a/types/src/data.rs b/types/src/data.rs index 75c6ebbe4e..f0f3664a44 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -477,10 +477,15 @@ impl Leaf { pub fn set_parent_commitment(&mut self, commitment: Commitment) { self.parent_commitment = commitment; } - /// The block header contained in this leaf. + /// Get a reference to the block header contained in this leaf. pub fn get_block_header(&self) -> &::BlockHeader { &self.block_header } + + /// Get a mutable reference to the block header contained in this leaf. + pub fn get_block_header_mut(&mut self) -> &mut ::BlockHeader { + &mut self.block_header + } /// Fill this leaf with the block payload. /// /// # Errors From c6e4a48404b9e4947cbf86782bd36fe3bfd6de75 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 1 Apr 2024 10:23:27 -0400 Subject: [PATCH 0907/1393] Add autonat protocol to Libp2p (#2872) * add autonat protocol * Use non global as well * Some autonat logging * add known peers to probe for autonat * lint --- libp2p-networking/src/network/def.rs | 14 ++++++++++ libp2p-networking/src/network/mod.rs | 2 ++ libp2p-networking/src/network/node.rs | 40 +++++++++++++++++++++++++-- 3 files changed, 54 insertions(+), 2 deletions(-) diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 55fd1d19f7..146b666187 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -1,5 +1,6 @@ use futures::channel::oneshot::Sender; use libp2p::{ + autonat, gossipsub::{Behaviour as GossipBehaviour, Event as GossipEvent, IdentTopic}, identify::{Behaviour as IdentifyBehaviour, Event as IdentifyEvent}, request_response::{cbor, OutboundRequestId, ResponseChannel}, @@ -51,6 +52,11 @@ pub struct NetworkDef { /// Behaviour for requesting and receiving data #[debug(skip)] pub request_response: libp2p::request_response::cbor::Behaviour, + + /// Auto NAT behaviour to determine if we are publically reachable and + /// by which address + #[debug(skip)] + pub autonat: libp2p::autonat::Behaviour, } impl NetworkDef { @@ -62,6 +68,7 @@ impl NetworkDef { identify: IdentifyBehaviour, direct_message: cbor::Behaviour, Vec>, request_response: cbor::Behaviour, + autonat: autonat::Behaviour, ) -> NetworkDef { Self { gossipsub, @@ -69,6 +76,7 @@ impl NetworkDef { identify, direct_message, request_response, + autonat, } } } @@ -181,3 +189,9 @@ impl From> for NetworkEventIn Self::RequestResponseEvent(event) } } + +impl From for NetworkEventInternal { + fn from(event: libp2p::autonat::Event) -> Self { + Self::AutonatEvent(event) + } +} diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 1f0b836561..fe60aeea66 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -190,6 +190,8 @@ pub enum NetworkEventInternal { DMEvent(libp2p::request_response::Event, Vec>), /// a request response event RequestResponseEvent(libp2p::request_response::Event), + /// a autonat event + AutonatEvent(libp2p::autonat::Event), } /// Bind all interfaces on port `port` diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 013722dead..dfd2833d9f 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -33,7 +33,7 @@ use async_compatibility_layer::{ }; use futures::{select, FutureExt, StreamExt}; use hotshot_types::constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; -use libp2p::{core::transport::ListenerId, StreamProtocol}; +use libp2p::{autonat, core::transport::ListenerId, StreamProtocol}; use libp2p::{ gossipsub::{ Behaviour as Gossipsub, ConfigBuilder as GossipsubConfigBuilder, Event as GossipEvent, @@ -138,6 +138,7 @@ impl NetworkNode { Some(peer_id) => { // if we know the peerid, add address. if *peer_id != self.peer_id { + behaviour.autonat.add_server(*peer_id, Some(addr.clone())); behaviour.dht.add_address(peer_id, addr.clone()); bs_nodes.insert(*peer_id, iter::once(addr.clone()).collect()); } @@ -295,6 +296,11 @@ impl NetworkNode { rrconfig.clone(), ); + let autonat_config = autonat::Config { + only_global_ips: false, + ..Default::default() + }; + let network = NetworkDef::new( gossipsub, DHTBehaviour::new( @@ -307,6 +313,7 @@ impl NetworkNode { identify, direct_message, request_response, + autonat::Behaviour::new(peer_id, autonat_config), ); // build swarm @@ -554,7 +561,6 @@ impl NetworkNode { address: _, } | SwarmEvent::NewExternalAddrCandidate { .. } - | SwarmEvent::ExternalAddrConfirmed { .. } | SwarmEvent::ExternalAddrExpired { .. } | SwarmEvent::IncomingConnection { connection_id: _, @@ -622,6 +628,29 @@ impl NetworkNode { NetworkEventInternal::RequestResponseEvent(e) => { self.request_response_state.handle_request_response(e) } + NetworkEventInternal::AutonatEvent(e) => { + match e { + autonat::Event::InboundProbe(_) => {} + autonat::Event::OutboundProbe(e) => match e { + autonat::OutboundProbeEvent::Request { .. } + | autonat::OutboundProbeEvent::Response { .. } => {} + autonat::OutboundProbeEvent::Error { + probe_id: _, + peer, + error, + } => { + warn!( + "Autonat Probe failed to peer {:?}, with error: {:?}", + peer, error + ); + } + }, + autonat::Event::StatusChanged { old, new } => { + info!("autonat Status changed. Old: {:?}, New: {:?}", old, new); + } + }; + None + } }; if let Some(event) = maybe_event { @@ -653,6 +682,13 @@ impl NetworkNode { SwarmEvent::ListenerError { listener_id, error } => { info!("LISTENER ERROR {:?} {:?}", listener_id, error); } + SwarmEvent::ExternalAddrConfirmed { address } => { + let my_id = *self.swarm.local_peer_id(); + self.swarm + .behaviour_mut() + .dht + .add_address(&my_id, address.clone()); + } _ => { error!( "Unhandled swarm event {:?}. This should not be possible.", From 35721c8db8467acababd24a8f5636e0cfc9b3654 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 1 Apr 2024 13:43:52 -0400 Subject: [PATCH 0908/1393] [CX_CLEANUP] - Implement `QuorumProposalTask` - Testing (#2832) * add new file * add qc * starter * filling out the trait * switching branch * start adding the different payload requirements * adding context to the events * fix build, clippy next * make clippy happy, stub out the rest of the steps * cosmetic improvements * Init testing structures and requisite consruction * Implement task state * add injections and the ability to construct the task * test is passing for the basic construction * fix build * Define a custom re-broadcast type for the quorum proposal (half impl) * Test now works as expected * implement a working test for one case, and the failing test * Complete task and event impl with dummy proposal * fix clippy lints * propage delay change throughout * fix some logical bugs * remove log * update proposal task to combine dependent events * clarifying comment * clippy: remove unused import * remove overuse of validation events * remove sleep in favor of timeout * fix test runner to broadcast messages * remove commented code * remove dead code and events * roll changes down from above branch, remove logs * remove useless event * make timeout duration a global * remove some noisy logs * add proposal validated event * add back the validated event * Perform unholy sacriment to get dependencies working * Perform unholy sacrament to get dependencies working * shave a layer off * combine tasks, remove dead test * fix off-by-one * Put back old changes * use commit_only * fix comparison * change view check --- hotshot/src/tasks/mod.rs | 42 +++++ hotshot/src/tasks/task_state.rs | 23 +++ task-impls/src/quorum_proposal.rs | 21 ++- task/src/dependency.rs | 4 + testing/src/script.rs | 25 ++- testing/src/view_generator.rs | 11 ++ testing/tests/quorum_proposal_task.rs | 227 ++++++++++++++++++++++++++ 7 files changed, 331 insertions(+), 22 deletions(-) create mode 100644 testing/tests/quorum_proposal_task.rs diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 7e0d23245d..99a3f9838f 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -17,6 +17,7 @@ use hotshot_task_impls::{ da::DATaskState, events::HotShotEvent, network::{NetworkEventTaskState, NetworkMessageTaskState}, + quorum_proposal::QuorumProposalTaskState, request::NetworkRequestState, response::{run_response_task, NetworkResponseState, RequestReceiver}, transactions::TransactionTaskState, @@ -196,6 +197,34 @@ pub async fn inject_consensus_polls< } } +/// Setup polls for the given `quorum_proposal`. +pub async fn inject_quorum_proposal_polls>( + quorum_proposal_task_state: &QuorumProposalTaskState, +) { + // Poll (forever) for the latest quorum proposal + quorum_proposal_task_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForLatestProposal) + .await; + + // Poll (forever) for the latest view sync certificate + quorum_proposal_task_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForLatestViewSyncCertificate) + .await; + + // Start polling for proposals for the first view + quorum_proposal_task_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForProposal(1)) + .await; + + quorum_proposal_task_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForDAC(1)) + .await; +} + /// add the consensus task pub async fn add_consensus_task>( task_reg: Arc, @@ -274,3 +303,16 @@ pub async fn add_view_sync_task>( let task = Task::new(tx, rx, task_reg.clone(), view_sync_state); task_reg.run_task(task).await; } + +/// add the quorum proposal task +pub async fn add_quorum_proposal_task>( + task_reg: Arc, + tx: Sender>>, + rx: Receiver>>, + handle: &SystemContextHandle, +) { + let quorum_proposal_task_state = QuorumProposalTaskState::create_from(handle).await; + inject_quorum_proposal_polls(&quorum_proposal_task_state).await; + let task = Task::new(tx, rx, task_reg.clone(), quorum_proposal_task_state); + task_reg.run_task(task).await; +} diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index e59b5d5a6d..4bb4f2e44d 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -1,6 +1,7 @@ use crate::types::SystemContextHandle; use async_trait::async_trait; +use hotshot_task_impls::quorum_proposal::QuorumProposalTaskState; use hotshot_task_impls::{ consensus::ConsensusTaskState, da::DATaskState, request::NetworkRequestState, transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VIDTaskState, @@ -207,3 +208,25 @@ impl> CreateTaskState } } } + +#[async_trait] +impl> CreateTaskState + for QuorumProposalTaskState +{ + async fn create_from( + handle: &SystemContextHandle, + ) -> QuorumProposalTaskState { + let consensus = handle.hotshot.get_consensus(); + QuorumProposalTaskState { + latest_proposed_view: handle.get_cur_view().await, + propose_dependencies: HashMap::new(), + quorum_network: handle.hotshot.networks.quorum_network.clone(), + committee_network: handle.hotshot.networks.da_network.clone(), + output_event_stream: handle.hotshot.output_event_stream.0.clone(), + consensus, + timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + id: handle.hotshot.id, + } + } +} diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index b4b360f194..db2c64836f 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -68,7 +68,7 @@ enum ProposalDependency { TimeoutCert, /// For the `QuroumProposalRecv` event. - ProposalCertificate, + Proposal, } /// Handler for the proposal dependency @@ -197,7 +197,6 @@ impl> QuorumProposalTaskState { if let HotShotEvent::QCFormed(either::Left(qc)) = event { @@ -223,7 +222,7 @@ impl> QuorumProposalTaskState { + ProposalDependency::Proposal => { if let HotShotEvent::QuorumProposalRecv(proposal, _) = event { proposal.data.view_number } else { @@ -243,7 +242,11 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState> QuorumProposalTaskState { - proposal_cert_validated_dependency.mark_as_completed(event); + proposal_dependency.mark_as_completed(event); } HotShotEvent::QCFormed(quorum_certificate) => match quorum_certificate { Either::Right(_) => { @@ -324,7 +327,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState Dependency for EventDependency { return Some(dependency); } loop { + if let Some(dependency) = self.completed_dependency { + return Some(dependency); + } + match self.event_rx.recv_direct().await { Ok(event) => { if (self.match_fn)(&event) { diff --git a/testing/src/script.rs b/testing/src/script.rs index 01434bad65..af762650e3 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -7,6 +7,8 @@ use hotshot_task::task::{Task, TaskRegistry, TaskState}; use hotshot_types::traits::node_implementation::NodeType; use std::{sync::Arc, time::Duration}; +const RECV_TIMEOUT_SEC: Duration = Duration::from_secs(1); + pub struct TestScriptStage>>> { pub inputs: Vec>, pub outputs: Vec>>>, @@ -82,18 +84,10 @@ pub async fn run_test_script { let registry = Arc::new(TaskRegistry::default()); - let (test_input, task_receiver) = broadcast(1024); - // let (task_input, mut test_receiver) = broadcast(1024); - - let task_input = test_input.clone(); - let mut test_receiver = task_receiver.clone(); + let (to_task, from_test) = broadcast(1024); + let (to_test, mut from_task) = broadcast(1024); - let mut task = Task::new( - task_input.clone(), - task_receiver.clone(), - registry.clone(), - state, - ); + let mut task = Task::new(to_test.clone(), from_test.clone(), registry.clone(), state); for (stage_number, stage) in script.iter_mut().enumerate() { tracing::debug!("Beginning test stage {}", stage_number); @@ -101,6 +95,11 @@ pub async fn run_test_script if !task.state_mut().filter(&Arc::new(input.clone())) { tracing::debug!("Test sent: {:?}", input.clone()); + to_task + .broadcast(input.clone().into()) + .await + .expect("Failed to broadcast input message"); + if let Some(res) = S::handle_event(input.clone().into(), &mut task).await { task.state_mut().handle_result(&res).await; } @@ -109,7 +108,7 @@ pub async fn run_test_script for assert in &stage.outputs { if let Ok(Ok(received_output)) = - async_timeout(Duration::from_millis(250), test_receiver.recv_direct()).await + async_timeout(RECV_TIMEOUT_SEC, from_task.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); validate_output_or_panic(stage_number, &received_output, assert); @@ -129,7 +128,7 @@ pub async fn run_test_script validate_task_state_or_panic(stage_number, task.state(), assert); } - if let Ok(received_output) = test_receiver.try_recv() { + if let Ok(received_output) = from_task.try_recv() { panic_extra_output(stage_number, &received_output); } } diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index f963d69b56..6ba50fc46e 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -425,6 +425,17 @@ impl TestViewGenerator { } } + pub fn add_timeout(&mut self, timeout_data: TimeoutData) { + if let Some(ref view) = self.current_view { + self.current_view = Some(TestView { + timeout_cert_data: Some(timeout_data), + ..view.clone() + }); + } else { + tracing::error!("Cannot attach timeout cert to the genesis view.") + } + } + /// Advances to the next view by skipping the current view and not adding it to the state tree. /// This is useful when simulating that a timeout has occurred. pub fn advance_view_number_by(&mut self, n: u64) { diff --git a/testing/tests/quorum_proposal_task.rs b/testing/tests/quorum_proposal_task.rs new file mode 100644 index 0000000000..e990433c42 --- /dev/null +++ b/testing/tests/quorum_proposal_task.rs @@ -0,0 +1,227 @@ +use hotshot::tasks::inject_quorum_proposal_polls; +use hotshot::tasks::task_state::CreateTaskState; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_task_impls::events::HotShotEvent::*; +use hotshot_task_impls::quorum_proposal::QuorumProposalTaskState; +use hotshot_testing::predicates::exact; +use hotshot_testing::task_helpers::vid_scheme_from_view_number; +use hotshot_testing::{ + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, + view_generator::TestViewGenerator, +}; +use hotshot_types::data::{ViewChangeEvidence, ViewNumber}; +use hotshot_types::simple_vote::ViewSyncFinalizeData; +use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; +use hotshot_types::vid::VidSchemeType; +use jf_primitives::vid::VidScheme; + +fn make_payload_commitment( + membership: &::Membership, + view: ViewNumber, +) -> ::Commit { + // Make some empty encoded transactions, we just care about having a commitment handy for the + // later calls. We need the VID commitment to be able to propose later. + let mut vid = vid_scheme_from_view_number::(membership, view); + let encoded_transactions = Vec::new(); + vid.commit_only(&encoded_transactions).unwrap() +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_task_quorum_proposal() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + // We need to propose as the leader for view 2, otherwise we get caught up with the special + // case in the genesis view. + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + for view in (&mut generator).take(2) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + } + let cert = proposals[1].data.justify_qc.clone(); + + // Run at view 2, the quorum vote task shouldn't care as long as the bookkeeping is correct + let view_2 = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(proposals[1].clone(), leaders[1]), + QCFormed(either::Left(cert.clone())), + SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), + ], + outputs: vec![ + exact(QuorumProposalValidated(proposals[1].data.clone())), + exact(QuorumProposalDependenciesValidated(ViewNumber::new(2))), + exact(DummyQuorumProposalSend(ViewNumber::new(2))), + ], + asserts: vec![], + }; + + let quorum_proposal_task_state = + QuorumProposalTaskState::::create_from(&handle).await; + inject_quorum_proposal_polls(&quorum_proposal_task_state).await; + + let script = vec![view_2]; + run_test_script(script, quorum_proposal_task_state).await; +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_task_qc_timeout() { + use hotshot_types::simple_vote::TimeoutData; + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + } + let timeout_data = TimeoutData { + view: ViewNumber::new(1), + }; + generator.add_timeout(timeout_data); + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + } + + // Get the proposal cert out for the view sync input + let cert = match proposals[1].data.proposal_certificate.clone().unwrap() { + ViewChangeEvidence::Timeout(tc) => tc, + _ => panic!("Found a View Sync Cert when there should have been a Timeout cert"), + }; + + // Run at view 2, the quorum vote task shouldn't care as long as the bookkeeping is correct + let view_2 = TestScriptStage { + inputs: vec![ + QCFormed(either::Right(cert.clone())), + SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), + ], + outputs: vec![ + exact(QuorumProposalDependenciesValidated(ViewNumber::new(2))), + exact(DummyQuorumProposalSend(ViewNumber::new(2))), + ], + asserts: vec![], + }; + + let quorum_proposal_task_state = + QuorumProposalTaskState::::create_from(&handle).await; + inject_quorum_proposal_polls(&quorum_proposal_task_state).await; + + let script = vec![view_2]; + run_test_script(script, quorum_proposal_task_state).await; +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_task_view_sync() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + // We need to propose as the leader for view 2, otherwise we get caught up with the special + // case in the genesis view. + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + } + + let view_sync_finalize_data = ViewSyncFinalizeData { + relay: 2, + round: ViewNumber::new(2), + }; + generator.add_view_sync_finalize(view_sync_finalize_data); + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + } + + // Get the proposal cert out for the view sync input + let cert = match proposals[1].data.proposal_certificate.clone().unwrap() { + ViewChangeEvidence::ViewSync(vsc) => vsc, + _ => panic!("Found a TC when there should have been a view sync cert"), + }; + + // Run at view 2, the quorum vote task shouldn't care as long as the bookkeeping is correct + let view_2 = TestScriptStage { + inputs: vec![ + ViewSyncFinalizeCertificate2Recv(cert.clone()), + SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), + ], + outputs: vec![ + exact(QuorumProposalDependenciesValidated(ViewNumber::new(2))), + exact(DummyQuorumProposalSend(ViewNumber::new(2))), + ], + asserts: vec![], + }; + + let quorum_proposal_task_state = + QuorumProposalTaskState::::create_from(&handle).await; + inject_quorum_proposal_polls(&quorum_proposal_task_state).await; + + let script = vec![view_2]; + run_test_script(script, quorum_proposal_task_state).await; +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_task_with_incomplete_events() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + // We need to propose as the leader for view 2, otherwise we get caught up with the special + // case in the genesis view. + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + for view in (&mut generator).take(2) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + } + + // We run the task here at view 2, but this time we ignore the crucial piece of evidence: the + // payload commitment and metadata. Instead we send only one of the three "OR" required fields. + // This should result in the proposal failing to be sent. + let view_2 = TestScriptStage { + inputs: vec![QuorumProposalRecv(proposals[1].clone(), leaders[1])], + outputs: vec![exact(QuorumProposalValidated(proposals[1].data.clone()))], + asserts: vec![], + }; + + let quorum_proposal_task_state = + QuorumProposalTaskState::::create_from(&handle).await; + inject_quorum_proposal_polls(&quorum_proposal_task_state).await; + + let script = vec![view_2]; + run_test_script(script, quorum_proposal_task_state).await; +} From aeb54d27bbddb986010a9690bcba100576bebdda Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 1 Apr 2024 16:10:18 -0400 Subject: [PATCH 0909/1393] Ensure blank blocks between versions (#2765) --- task-impls/Cargo.toml | 1 + task-impls/src/consensus.rs | 114 ++++++++++++- testing/src/predicates.rs | 27 +++- testing/src/view_generator.rs | 2 - testing/tests/da_task.rs | 26 ++- testing/tests/upgrade_task.rs | 293 +++++++++++++++++++++++++++++++++- 6 files changed, 453 insertions(+), 10 deletions(-) diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 7886a22388..95fa746578 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -20,6 +20,7 @@ hotshot-task = { path = "../task" } hotshot-types = { path = "../types" } hotshot-builder-api = { path = "../builder-api" } jf-primitives = { workspace = true } +memoize = { workspace = true } rand = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index efcf02020b..466df2dc6e 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -18,7 +18,7 @@ use hotshot_types::{ event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, - simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, + simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote, UpgradeProposalData}, traits::{ block_contents::BlockHeader, consensus_api::ConsensusApi, @@ -293,6 +293,16 @@ impl, A: ConsensusApi + return false; } + if let Some(upgrade_cert) = &self.decided_upgrade_cert { + if view_is_between_versions(self.cur_view, &upgrade_cert.data) + && Some(proposal.block_header.payload_commitment()) + != null_block::commitment(self.quorum_membership.total_nodes()) + { + info!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(self.quorum_membership.total_nodes()), Some(proposal.block_header.payload_commitment())); + return false; + } + } + // Only vote if you have the DA cert // ED Need to update the view number this is stored under? if let Some(cert) = consensus.saved_da_certs.get(&(proposal.get_view_number())) { @@ -1384,6 +1394,76 @@ impl, A: ConsensusApi + // TODO do some sort of sanity check on the view number that it matches decided } + // Special case: if we have a decided upgrade certificate AND it does not apply a version to the current view, we MUST propose with a null block. + if let Some(upgrade_cert) = &self.decided_upgrade_cert { + if view_is_between_versions(self.cur_view, &upgrade_cert.data) { + let Ok((_payload, metadata)) = + ::from_transactions(Vec::new()) + else { + error!("Failed to build null block payload and metadata"); + return false; + }; + + let Some(null_block_commitment) = + null_block::commitment(self.quorum_membership.total_nodes()) + else { + // This should never happen. + error!("Failed to calculate null block commitment"); + return false; + }; + + let block_header = TYPES::BlockHeader::new( + state, + &consensus.instance_state, + &parent_leaf, + null_block_commitment, + metadata, + ) + .await; + + let proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: consensus.high_qc.clone(), + proposal_certificate: None, + upgrade_certificate: None, + }; + + let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal); + proposed_leaf.set_parent_commitment(parent_leaf.commit()); + + let Ok(signature) = + TYPES::SignatureKey::sign(&self.private_key, proposed_leaf.commit().as_ref()) + else { + // This should never happen. + error!("Failed to sign proposed_leaf.commit()!"); + return false; + }; + + let message = Proposal { + data: proposal, + signature, + _pd: PhantomData, + }; + debug!( + "Sending null proposal for view {:?} \n {:?}", + proposed_leaf.get_view_number(), + "" + ); + + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalSend( + message.clone(), + self.public_key.clone(), + )), + event_stream, + ) + .await; + + return true; + } + } + if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { let block_header = TYPES::BlockHeader::new( state, @@ -1495,3 +1575,35 @@ impl, A: ConsensusApi + matches!(event.as_ref(), HotShotEvent::Shutdown) } } + +pub mod null_block { + #![allow(missing_docs)] + use hotshot_types::vid::{vid_scheme, VidCommitment}; + use jf_primitives::vid::VidScheme; + use memoize::memoize; + + /// The commitment for a null block payload. + /// + /// Note: the commitment depends on the network (via `num_storage_nodes`), + /// and may change (albeit rarely) during execution. + /// + /// We memoize the result to avoid having to recalculate it. + #[memoize(SharedCache, Capacity: 10)] + #[must_use] + pub fn commitment(num_storage_nodes: usize) -> Option { + let vid_result = vid_scheme(num_storage_nodes).commit_only(&Vec::new()); + + match vid_result { + Ok(r) => Some(r), + Err(_) => None, + } + } +} + +/// Test whether a view is in the range defined by an upgrade certificate. +fn view_is_between_versions( + view: TYPES::Time, + upgrade_data: &UpgradeProposalData, +) -> bool { + view > upgrade_data.old_version_last_block && view < upgrade_data.new_version_first_block +} diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs index 3b2b1c4fbf..67f42d7440 100644 --- a/testing/src/predicates.rs +++ b/testing/src/predicates.rs @@ -1,9 +1,11 @@ use std::sync::Arc; use hotshot_task_impls::{ - consensus::ConsensusTaskState, events::HotShotEvent, events::HotShotEvent::*, + consensus::{null_block, ConsensusTaskState}, + events::HotShotEvent, + events::HotShotEvent::*, }; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; use hotshot::types::SystemContextHandle; @@ -146,6 +148,27 @@ where } } +pub fn quorum_proposal_send_with_null_block( + num_storage_nodes: usize, +) -> Predicate>> +where + TYPES: NodeType, +{ + let info = "QuorumProposalSend with null block payload".to_string(); + let function = move |e: &Arc>| match e.as_ref() { + QuorumProposalSend(proposal, _) => { + Some(proposal.data.block_header.payload_commitment()) + == null_block::commitment(num_storage_nodes) + } + _ => false, + }; + + Predicate { + function: Box::new(function), + info, + } +} + pub fn timeout_vote_send() -> Predicate>> where TYPES: NodeType, diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 6ba50fc46e..25fbe34e45 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -94,7 +94,6 @@ impl TestView { proposal_certificate: None, }; - let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let encoded_transactions_hash = Sha256::digest(&encoded_transactions); let block_payload_signature = @@ -291,7 +290,6 @@ impl TestView { _pd: PhantomData, }; - let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let encoded_transactions_hash = Sha256::digest(&encoded_transactions); let block_payload_signature = diff --git a/testing/tests/da_task.rs b/testing/tests/da_task.rs index 79aa97f343..3e94173638 100644 --- a/testing/tests/da_task.rs +++ b/testing/tests/da_task.rs @@ -45,7 +45,18 @@ async fn test_da_task() { let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(2) { + + for view in (&mut generator).take(1) { + proposals.push(view.da_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_da_vote(DAData { payload_commit }, &handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } + + generator.add_transactions(vec![TestTransaction(vec![0])]); + + for view in (&mut generator).take(1) { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_da_vote(DAData { payload_commit }, &handle)); @@ -108,7 +119,18 @@ async fn test_da_task_storage_failure() { let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(2) { + + for view in (&mut generator).take(1) { + proposals.push(view.da_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_da_vote(DAData { payload_commit }, &handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } + + generator.add_transactions(transactions); + + for view in (&mut generator).take(1) { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_da_vote(DAData { payload_commit }, &handle)); diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs index 4fe56192a6..7e2decb321 100644 --- a/testing/tests/upgrade_task.rs +++ b/testing/tests/upgrade_task.rs @@ -1,6 +1,9 @@ use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use hotshot::types::SystemContextHandle; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::{ + block_types::TestTransaction, + node_types::{MemoryImpl, TestTypes}, +}; use hotshot_macros::test_scripts; use hotshot_task_impls::{ consensus::ConsensusTaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState, @@ -11,13 +14,16 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::ViewNumber, simple_vote::UpgradeProposalData, traits::node_implementation::ConsensusTime, + data::ViewNumber, + simple_vote::UpgradeProposalData, + traits::{election::Membership, node_implementation::ConsensusTime}, }; use versioned_binary_serialization::version::Version; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_upgrade_task() { +/// Tests that we correctly update our internal consensus state when reaching a decided upgrade certificate. +async fn test_consensus_task_upgrade() { use hotshot_testing::script::{run_test_script, TestScriptStage}; use hotshot_testing::task_helpers::build_system_handle; @@ -298,3 +304,284 @@ async fn test_upgrade_and_consensus_task() { test_scripts![inputs, consensus_script, upgrade_script]; } + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +/// Test that we correctly handle blank blocks between versions. +/// Specifically, this test schedules an upgrade between views 4 and 8, +/// and ensures that: +/// - we correctly vote affirmatively on a QuorumProposal with a null block payload in view 5 +/// - we correctly propose with a null block payload in view 6, even if we have indications to do otherwise (via SendPayloadCommitmentAndMetadata, VID etc). +/// - we correctly reject a QuorumProposal with a non-null block payload in view 7. +async fn test_upgrade_and_consensus_task_blank_blocks() { + use hotshot_testing::task_helpers::build_system_handle; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(6).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let old_version = Version { major: 0, minor: 1 }; + let new_version = Version { major: 0, minor: 2 }; + + let upgrade_data: UpgradeProposalData = UpgradeProposalData { + old_version, + new_version, + new_version_hash: [0u8; 12].to_vec(), + old_version_last_block: ViewNumber::new(4), + new_version_first_block: ViewNumber::new(8), + }; + + let mut proposals = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + let mut leaders = Vec::new(); + let mut views = Vec::new(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaders.push(view.leader_public_key); + views.push(view.clone()); + } + + generator.add_upgrade(upgrade_data.clone()); + + for view in (&mut generator).take(3) { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaders.push(view.leader_public_key); + views.push(view.clone()); + } + + // We are now in the upgrade period, and set the transactions to null for the QuorumProposalRecv in view 5. + // Our node should vote affirmatively on this. + generator.add_transactions(vec![]); + + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaders.push(view.leader_public_key); + views.push(view.clone()); + } + + // We set the transactions to something not null for view 6, but we expect the node to emit a quorum proposal where they are still null. + generator.add_transactions(vec![TestTransaction(vec![0])]); + + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaders.push(view.leader_public_key); + views.push(view.clone()); + } + + // For view 7, we set the transactions to something not null. The node should fail to vote on this. + generator.add_transactions(vec![TestTransaction(vec![0])]); + + for view in generator.take(1) { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaders.push(view.leader_public_key); + views.push(view.clone()); + } + + let consensus_state = ConsensusTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + let mut upgrade_state = UpgradeTaskState::< + TestTypes, + MemoryImpl, + SystemContextHandle, + >::create_from(&handle) + .await; + + upgrade_state.should_vote = |_| true; + + inject_consensus_polls(&consensus_state).await; + + let inputs = vec![ + vec![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + VidDisperseRecv(vids[0].0[0].clone()), + DACRecv(dacs[0].clone()), + ], + vec![ + QuorumProposalRecv(proposals[1].clone(), leaders[1]), + VidDisperseRecv(vids[1].0[0].clone()), + DACRecv(dacs[1].clone()), + SendPayloadCommitmentAndMetadata( + vids[1].0[0].data.payload_commitment, + (), + ViewNumber::new(2), + ), + ], + vec![ + DACRecv(dacs[2].clone()), + VidDisperseRecv(vids[2].0[0].clone()), + SendPayloadCommitmentAndMetadata( + vids[2].0[0].data.payload_commitment, + (), + ViewNumber::new(3), + ), + QuorumProposalRecv(proposals[2].clone(), leaders[2]), + ], + vec![ + DACRecv(dacs[3].clone()), + VidDisperseRecv(vids[3].0[0].clone()), + SendPayloadCommitmentAndMetadata( + vids[3].0[0].data.payload_commitment, + (), + ViewNumber::new(4), + ), + QuorumProposalRecv(proposals[3].clone(), leaders[3]), + ], + vec![ + DACRecv(dacs[4].clone()), + VidDisperseRecv(vids[4].0[0].clone()), + SendPayloadCommitmentAndMetadata( + vids[4].0[0].data.payload_commitment, + (), + ViewNumber::new(5), + ), + QuorumProposalRecv(proposals[4].clone(), leaders[4]), + ], + vec![ + DACRecv(dacs[5].clone()), + SendPayloadCommitmentAndMetadata( + vids[5].0[0].data.payload_commitment, + (), + ViewNumber::new(6), + ), + QCFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), + ], + vec![ + DACRecv(dacs[6].clone()), + VidDisperseRecv(vids[6].0[0].clone()), + SendPayloadCommitmentAndMetadata( + vids[6].0[0].data.payload_commitment, + (), + ViewNumber::new(7), + ), + QuorumProposalRecv(proposals[6].clone(), leaders[6]), + ], + ]; + + let consensus_script = TaskScript { + state: consensus_state, + expectations: vec![ + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(1))), + quorum_proposal_validated(), + quorum_vote_send(), + ], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(2))), + quorum_proposal_validated(), + quorum_vote_send(), + ], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(3))), + quorum_proposal_validated(), + leaf_decided(), + quorum_vote_send(), + ], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(4))), + quorum_proposal_validated(), + leaf_decided(), + quorum_vote_send(), + ], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(5))), + quorum_proposal_validated(), + leaf_decided(), + // This is between versions, but we are receiving a null block and hence should vote affirmatively on it. + quorum_vote_send(), + ], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![quorum_proposal_send_with_null_block( + quorum_membership.total_nodes(), + )], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(7))), + // We do NOT expect a quorum_vote_send() because we have set the block to be non-null in this view. + ], + task_state_asserts: vec![], + }, + ], + }; + + let upgrade_script = TaskScript { + state: upgrade_state, + expectations: vec![ + Expectations { + output_asserts: vec![], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![], + task_state_asserts: vec![], + }, + ], + }; + + test_scripts![inputs, consensus_script, upgrade_script]; +} From f437f84227309d1f0284d8a2906a437c0cd56004 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 1 Apr 2024 16:41:52 -0400 Subject: [PATCH 0910/1393] [Sequencer] Libp2p config changes (#2875) * libp2p sequencer changes, config overhaul * broker build for async_std * don't check number of bootstrap nodes during tests * local IP replacement * use the local IP if none supplied * lint * update pcdn --- example-types/src/node_types.rs | 71 ++--- examples/Cargo.toml | 2 +- examples/combined/all.rs | 12 +- examples/combined/validator.rs | 16 +- examples/infra/mod.rs | 298 +++++------------- examples/libp2p/all.rs | 11 +- examples/libp2p/validator.rs | 15 +- examples/push-cdn/all.rs | 3 +- examples/push-cdn/broker.rs | 42 +-- examples/push-cdn/marshal.rs | 24 +- examples/push-cdn/whitelist-adapter.rs | 13 +- examples/webserver/all.rs | 3 +- hotshot/Cargo.toml | 2 + hotshot/src/traits.rs | 5 +- .../src/traits/networking/combined_network.rs | 12 +- .../src/traits/networking/libp2p_network.rs | 155 ++++++++- libp2p-networking/src/network/mod.rs | 2 +- libp2p-networking/src/network/node.rs | 26 +- libp2p-networking/src/network/node/config.rs | 2 +- libp2p-networking/src/network/node/handle.rs | 2 +- libp2p-networking/tests/common/mod.rs | 7 +- orchestrator/Cargo.toml | 3 + orchestrator/api.toml | 3 +- orchestrator/run-config.toml | 2 - orchestrator/src/client.rs | 59 ++-- orchestrator/src/config.rs | 83 ++--- orchestrator/src/lib.rs | 89 +++--- types/src/traits/network.rs | 2 + 28 files changed, 500 insertions(+), 464 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index b9c97208d0..e4827b1320 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -73,63 +73,32 @@ pub struct CombinedImpl; /// static committee type alias pub type StaticMembership = StaticCommittee; -// Push CDN communication channels -type StaticPushCdnQuorumComm = PushCdnNetwork; -type StaticPushCdnDAComm = PushCdnNetwork; - -/// memory network -pub type StaticMemoryDAComm = - MemoryNetwork, ::SignatureKey>; - -/// libp2p network -type StaticLibp2pDAComm = Libp2pNetwork, ::SignatureKey>; - -/// web server network communication channel -type StaticWebDAComm = WebServerNetwork; - -/// combined network -type StaticCombinedDAComm = CombinedNetworks; - -/// memory comm channel -pub type StaticMemoryQuorumComm = - MemoryNetwork, ::SignatureKey>; - -/// libp2p comm channel -type StaticLibp2pQuorumComm = - Libp2pNetwork, ::SignatureKey>; - -/// web server comm channel -type StaticWebQuorumComm = WebServerNetwork; - -/// combined network (libp2p + web server) -type StaticCombinedQuorumComm = CombinedNetworks; - -impl NodeImplementation for PushCdnImpl { - type QuorumNetwork = StaticPushCdnQuorumComm; - type CommitteeNetwork = StaticPushCdnDAComm; - type Storage = TestStorage; +impl NodeImplementation for PushCdnImpl { + type QuorumNetwork = PushCdnNetwork; + type CommitteeNetwork = PushCdnNetwork; + type Storage = TestStorage; } -impl NodeImplementation for Libp2pImpl { - type QuorumNetwork = StaticLibp2pQuorumComm; - type CommitteeNetwork = StaticLibp2pDAComm; - type Storage = TestStorage; +impl NodeImplementation for MemoryImpl { + type QuorumNetwork = MemoryNetwork, TYPES::SignatureKey>; + type CommitteeNetwork = MemoryNetwork, TYPES::SignatureKey>; + type Storage = TestStorage; } -impl NodeImplementation for MemoryImpl { - type QuorumNetwork = StaticMemoryQuorumComm; - type CommitteeNetwork = StaticMemoryDAComm; - type Storage = TestStorage; +impl NodeImplementation for WebImpl { + type QuorumNetwork = WebServerNetwork; + type CommitteeNetwork = WebServerNetwork; + type Storage = TestStorage; } -impl NodeImplementation for WebImpl { - type QuorumNetwork = StaticWebQuorumComm; - type CommitteeNetwork = StaticWebDAComm; - type Storage = TestStorage; +impl NodeImplementation for CombinedImpl { + type QuorumNetwork = CombinedNetworks; + type CommitteeNetwork = CombinedNetworks; + type Storage = TestStorage; } -impl NodeImplementation for CombinedImpl { - type QuorumNetwork = StaticCombinedQuorumComm; - type CommitteeNetwork = StaticCombinedDAComm; - type Storage = TestStorage; +impl NodeImplementation for Libp2pImpl { + type QuorumNetwork = Libp2pNetwork, TYPES::SignatureKey>; + type CommitteeNetwork = Libp2pNetwork, TYPES::SignatureKey>; + type Storage = TestStorage; } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 8ec544f863..a371e0f458 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -132,6 +132,7 @@ hotshot-example-types = { path = "../example-types" } chrono = "0.4" versioned-binary-serialization = { workspace = true } sha2.workspace = true +local-ip-address = "0.6" tracing = { workspace = true } @@ -158,7 +159,6 @@ cdn-marshal = { workspace = true, features = ["runtime-async-std"] } clap = { version = "4.5", features = ["derive", "env"] } toml = { workspace = true } blake3 = { workspace = true } -local-ip-address = "0.6.1" anyhow.workspace = true tracing-subscriber = "0.3.18" diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 30d9290814..a06e9efa47 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -19,6 +19,7 @@ use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; use rand::{rngs::StdRng, RngCore, SeedableRng}; +use std::net::SocketAddr; use std::net::{IpAddr, Ipv4Addr}; use std::path::Path; use tracing::{error, instrument}; @@ -129,13 +130,20 @@ async fn main() { // nodes let mut nodes = Vec::new(); - for _ in 0..config.config.num_nodes_with_stake.into() { + for i in 0..config.config.num_nodes_with_stake.into() { + // Calculate our libp2p advertise address, which we will later derive the + // bind address from for example purposes. + let advertise_address = SocketAddr::new( + IpAddr::V4(Ipv4Addr::LOCALHOST), + 8000 + (u16::try_from(i).expect("failed to create advertise address")), + ); + let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, - public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + advertise_address: Some(advertise_address), network_config_file: None, }, ) diff --git a/examples/combined/validator.rs b/examples/combined/validator.rs index 8d72b3cdf0..7574d44ca0 100644 --- a/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -1,7 +1,10 @@ //! A validator using both the web server and libp2p +use std::{net::SocketAddr, str::FromStr}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; +use local_ip_address::local_ip; use tracing::{info, instrument}; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; @@ -21,7 +24,18 @@ pub mod infra; async fn main() { setup_logging(); setup_backtrace(); - let args = ValidatorArgs::parse(); + + let mut args = ValidatorArgs::parse(); + + // If we did not set the advertise address, use our local IP and port 8000 + let local_ip = local_ip().expect("failed to get local IP"); + args.advertise_address = Some( + args.advertise_address.unwrap_or( + SocketAddr::from_str(&format!("{local_ip}:8000")) + .expect("failed to convert local IP to socket address"), + ), + ); + info!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::(args).await; } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 11ffad6498..2ef68c617c 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -1,7 +1,6 @@ #![allow(clippy::panic)] use async_compatibility_layer::art::async_sleep; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use async_lock::RwLock; use async_trait::async_trait; use cdn_broker::reexports::crypto::signature::KeyPair; use cdn_broker::reexports::message::Topic; @@ -10,17 +9,18 @@ use clap::Parser; use clap::{Arg, Command}; use futures::StreamExt; use hotshot::traits::implementations::{ - CombinedNetworks, PushCdnNetwork, UnderlyingCombinedNetworks, WrappedSignatureKey, + derive_libp2p_peer_id, CombinedNetworks, PushCdnNetwork, WrappedSignatureKey, }; use hotshot::traits::BlockPayload; use hotshot::{ traits::{ - implementations::{Libp2pNetwork, NetworkingMetricsValue, WebServerNetwork}, + implementations::{Libp2pNetwork, WebServerNetwork}, NodeImplementation, }, - types::{SignatureKey, SystemContextHandle}, + types::SystemContextHandle, Memberships, Networks, SystemContext, }; +use hotshot_example_types::node_types::{Libp2pImpl, PushCdnImpl}; use hotshot_example_types::storage_types::TestStorage; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, @@ -46,23 +46,15 @@ use hotshot_types::{ }, HotShotConfig, PeerConfig, ValidatorConfig, }; -use libp2p_identity::PeerId; -use libp2p_identity::{ - ed25519::{self, SecretKey}, - Keypair, -}; -use libp2p_networking::{ - network::{MeshParams, NetworkNodeConfigBuilder, NetworkNodeType}, - reexport::Multiaddr, -}; + use rand::rngs::StdRng; use rand::SeedableRng; use std::fmt::Debug; use std::marker::PhantomData; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::num::NonZeroUsize; use std::time::Duration; -use std::{collections::BTreeSet, sync::Arc}; use std::{fs, time::Instant}; -use std::{num::NonZeroUsize, str::FromStr}; use surf_disco::Url; use tracing::{debug, error, info, warn}; use versioned_binary_serialization::version::StaticVersionType; @@ -328,130 +320,6 @@ fn webserver_network_from_config( - mut config: NetworkConfig, - pub_key: TYPES::SignatureKey, -) -> Libp2pNetwork, TYPES::SignatureKey> { - let libp2p_config = config - .libp2p_config - .take() - .expect("Configuration is not for a Libp2p network"); - let bs_len = libp2p_config.bootstrap_nodes.len(); - let bootstrap_nodes: Vec<(PeerId, Multiaddr)> = libp2p_config - .bootstrap_nodes - .iter() - .map(|(addr, pair)| { - let kp = Keypair::from_protobuf_encoding(pair).unwrap(); - let peer_id = PeerId::from_public_key(&kp.public()); - let multiaddr = - Multiaddr::from_str(&format!("/ip4/{}/udp/{}/quic-v1", addr.ip(), addr.port())) - .unwrap(); - (peer_id, multiaddr) - }) - .collect(); - let identity = libp2p_generate_indexed_identity(config.seed, config.node_index); - let node_type = if (config.node_index as usize) < bs_len { - NetworkNodeType::Bootstrap - } else { - NetworkNodeType::Regular - }; - let node_index = config.node_index; - let port_index = if libp2p_config.index_ports { - node_index - } else { - 0 - }; - let bound_addr: Multiaddr = format!( - "/{}/{}/udp/{}/quic-v1", - if libp2p_config.public_ip.is_ipv4() { - "ip4" - } else { - "ip6" - }, - libp2p_config.public_ip, - libp2p_config.base_port as u64 + port_index - ) - .parse() - .unwrap(); - - // generate network - let mut config_builder = NetworkNodeConfigBuilder::default(); - assert!(config.config.num_nodes_with_stake.get() > 2); - let replicated_nodes = NonZeroUsize::new(config.config.num_nodes_with_stake.get() - 2).unwrap(); - config_builder.replication_factor(replicated_nodes); - config_builder.identity(identity.clone()); - - config_builder.bound_addr(Some(bound_addr.clone())); - - let to_connect_addrs = bootstrap_nodes - .iter() - .map(|(peer_id, multiaddr)| (Some(*peer_id), multiaddr.clone())) - .collect(); - - config_builder.to_connect_addrs(to_connect_addrs); - - let mesh_params = - // NOTE I'm arbitrarily choosing these. - match node_type { - NetworkNodeType::Bootstrap => MeshParams { - mesh_n_high: libp2p_config.bootstrap_mesh_n_high, - mesh_n_low: libp2p_config.bootstrap_mesh_n_low, - mesh_outbound_min: libp2p_config.bootstrap_mesh_outbound_min, - mesh_n: libp2p_config.bootstrap_mesh_n, - }, - NetworkNodeType::Regular => MeshParams { - mesh_n_high: libp2p_config.mesh_n_high, - mesh_n_low: libp2p_config.mesh_n_low, - mesh_outbound_min: libp2p_config.mesh_outbound_min, - mesh_n: libp2p_config.mesh_n, - }, - NetworkNodeType::Conductor => unreachable!(), - }; - config_builder.mesh_params(Some(mesh_params)); - - let mut all_keys = BTreeSet::new(); - let mut da_keys = BTreeSet::new(); - for i in 0..config.config.num_nodes_with_stake.get() as u64 { - let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; - let pub_key = TYPES::SignatureKey::from_private(&privkey); - if i < config.config.da_staked_committee_size as u64 { - da_keys.insert(pub_key.clone()); - } - all_keys.insert(pub_key); - } - let node_config = config_builder.build().unwrap(); - - #[allow(clippy::cast_possible_truncation)] - Libp2pNetwork::new( - NetworkingMetricsValue::default(), - node_config, - pub_key.clone(), - Arc::new(RwLock::new( - bootstrap_nodes - .iter() - .map(|(peer_id, addr)| (Some(*peer_id), addr.clone())) - .collect(), - )), - bs_len, - config.node_index as usize, - // NOTE: this introduces an invariant that the keys are assigned using this indexed - // function - all_keys, - #[cfg(feature = "hotshot-testing")] - None, - da_keys.clone(), - da_keys.contains(&pub_key), - ) - .await - .unwrap() -} - /// Defines the behavior of a "run" of the network with a given configuration #[async_trait] pub trait RunDA< @@ -474,6 +342,7 @@ pub trait RunDA< /// Initializes networking, returns self async fn initialize_networking( config: NetworkConfig, + libp2p_advertise_address: Option, ) -> Self; /// Initializes the genesis state and HotShot instance; does not start HotShot consensus @@ -761,6 +630,7 @@ where { async fn initialize_networking( config: NetworkConfig, + _libp2p_advertise_address: Option, ) -> WebServerDARun { // Get our own key let pub_key = config.config.my_own_validator_config.public_key.clone(); @@ -835,6 +705,7 @@ where { async fn initialize_networking( config: NetworkConfig, + _libp2p_advertise_address: Option, ) -> PushCdnDaRun { // Get our own key let key = config.config.my_own_validator_config.clone(); @@ -924,19 +795,47 @@ where { async fn initialize_networking( config: NetworkConfig, + libp2p_advertise_address: Option, ) -> Libp2pDARun { - let pub_key = config.config.my_own_validator_config.public_key.clone(); + // Extrapolate keys for ease of use + let keys = config.clone().config.my_own_validator_config; + let public_key = keys.public_key; + let private_key = keys.private_key; + + // In an example, we can calculate the libp2p bind address as a function + // of the advertise address. + let bind_address = if let Some(libp2p_advertise_address) = libp2p_advertise_address { + // If we have supplied one, use it + SocketAddr::new( + IpAddr::V4(Ipv4Addr::UNSPECIFIED), + libp2p_advertise_address.port(), + ) + } else { + // If not, index a base port with our node index + SocketAddr::new( + IpAddr::V4(Ipv4Addr::UNSPECIFIED), + 8000 + (u16::try_from(config.node_index) + .expect("failed to create advertise address")), + ) + }; - // create and wait for underlying network - let quorum_channel = libp2p_network_from_config::(config.clone(), pub_key).await; + // Create the Libp2p network + let libp2p_network = Libp2pNetwork::from_config::( + config.clone(), + bind_address, + &public_key, + &private_key, + ) + .await + .expect("failed to create libp2p network"); - let da_channel = quorum_channel.clone(); - quorum_channel.wait_for_ready().await; + // Wait for the network to be ready + libp2p_network.wait_for_ready().await; Libp2pDARun { config, - quorum_channel, - da_channel, + quorum_channel: libp2p_network.clone(), + da_channel: libp2p_network, } } @@ -988,59 +887,47 @@ where { async fn initialize_networking( config: NetworkConfig, + libp2p_advertise_address: Option, ) -> CombinedDARun { - // Get our own key - let key = config.config.my_own_validator_config.clone(); - - // Create and wait for libp2p network - let libp2p_underlying_quorum_network = - libp2p_network_from_config::(config.clone(), key.public_key.clone()).await; - - libp2p_underlying_quorum_network.wait_for_ready().await; - - let CombinedNetworkConfig { delay_duration }: CombinedNetworkConfig = - config.clone().combined_network_config.unwrap(); + // Initialize our Libp2p network + let libp2p_da_run: Libp2pDARun = + as RunDA< + TYPES, + Libp2pNetwork, TYPES::SignatureKey>, + Libp2pNetwork, TYPES::SignatureKey>, + Libp2pImpl, + >>::initialize_networking(config.clone(), libp2p_advertise_address) + .await; - // Convert the keys to the CDN-compatible type - let keypair = KeyPair { - public_key: WrappedSignatureKey(key.public_key), - private_key: key.private_key, - }; + // Initialize our CDN network + let cdn_da_run: PushCdnDaRun = + as RunDA< + TYPES, + PushCdnNetwork, + PushCdnNetwork, + PushCdnImpl, + >>::initialize_networking(config.clone(), libp2p_advertise_address) + .await; - // See if we should be DA - let mut topics = vec![Topic::Global]; - if config.node_index < config.config.da_staked_committee_size as u64 { - topics.push(Topic::DA); - } - - // Create the network and await the initial connection - let cdn_network = PushCdnNetwork::new( - config - .cdn_marshal_address - .clone() - .expect("`cdn_marshal_address` needs to be supplied for a CDN run"), - topics.iter().map(ToString::to_string).collect(), - keypair, - ) - .await - .expect("failed to perform intiail client connection"); + // Create our combined network config + let CombinedNetworkConfig { delay_duration }: CombinedNetworkConfig = config + .clone() + .combined_network_config + .expect("combined network config not specified"); // Combine the two communication channels let da_channel = CombinedNetworks::new( - Arc::new(UnderlyingCombinedNetworks( - cdn_network.clone(), - libp2p_underlying_quorum_network.clone(), - )), + cdn_da_run.da_channel, + libp2p_da_run.da_channel, delay_duration, ); let quorum_channel = CombinedNetworks::new( - Arc::new(UnderlyingCombinedNetworks( - cdn_network, - libp2p_underlying_quorum_network.clone(), - )), + cdn_da_run.quorum_channel, + libp2p_da_run.quorum_channel, delay_duration, ); + // Return the run configuration CombinedDARun { config, quorum_channel, @@ -1092,20 +979,18 @@ pub async fn main_entry_point< debug!("Starting validator"); - // see what our public identity will be - let public_ip = match args.public_ip { - Some(ip) => ip, - None => local_ip_address::local_ip().unwrap(), - }; - - let orchestrator_client: OrchestratorClient = - OrchestratorClient::new(args.clone(), public_ip.to_string()); + let orchestrator_client: OrchestratorClient = OrchestratorClient::new(args.clone()); // We assume one node will not call this twice to generate two validator_config-s with same identity. let my_own_validator_config = NetworkConfig::::generate_init_validator_config( &orchestrator_client, ).await; + // Derives our Libp2p private key from our private key, and then returns the public key of that key + let libp2p_public_key = + derive_libp2p_peer_id::(&my_own_validator_config.private_key) + .expect("failed to derive Libp2p keypair"); + // conditionally save/load config from file or orchestrator // This is a function that will return correct complete config from orchestrator. // It takes in a valid args.network_config_file when loading from file, or valid validator_config when loading from orchestrator, the invalid one will be ignored. @@ -1115,13 +1000,16 @@ pub async fn main_entry_point< let (run_config, source) = NetworkConfig::::get_complete_config( &orchestrator_client, - my_own_validator_config, args.clone().network_config_file, + my_own_validator_config, + args.advertise_address, + Some(libp2p_public_key), ) - .await; + .await + .expect("failed to get config"); error!("Initializing networking"); - let run = RUNDA::initialize_networking(run_config.clone()).await; + let run = RUNDA::initialize_networking(run_config.clone(), args.advertise_address).await; let hotshot = run.initialize_state_and_hotshot().await; // pre-generate transactions @@ -1160,7 +1048,6 @@ pub async fn main_entry_point< transactions.push(txn); } } - if let NetworkConfigSource::Orchestrator = source { debug!("Waiting for the start command from orchestrator"); orchestrator_client @@ -1179,16 +1066,3 @@ pub async fn main_entry_point< .await; orchestrator_client.post_bench_results(bench_results).await; } - -/// generate a libp2p identity based on a seed and idx -/// # Panics -/// if unable to create a secret key out of bytes -#[must_use] -pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { - let mut hasher = blake3::Hasher::new(); - hasher.update(&seed); - hasher.update(&index.to_le_bytes()); - let new_seed = *hasher.finalize().as_bytes(); - let sk_bytes = SecretKey::try_from_bytes(new_seed).unwrap(); - >::from(sk_bytes).into() -} diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index dbe486d07f..50f03d63a3 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -13,6 +13,7 @@ use async_compatibility_layer::art::async_spawn; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; +use std::net::SocketAddr; use std::net::{IpAddr, Ipv4Addr}; use tracing::instrument; @@ -43,13 +44,19 @@ async fn main() { // nodes let mut nodes = Vec::new(); - for _ in 0..config.config.num_nodes_with_stake.into() { + for i in 0..config.config.num_nodes_with_stake.into() { + // Calculate our libp2p advertise address, which we will later derive the + // bind address from for example purposes. + let advertise_address = SocketAddr::new( + IpAddr::V4(Ipv4Addr::LOCALHOST), + 8000 + (u16::try_from(i).expect("failed to create advertise address")), + ); let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, - public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + advertise_address: Some(advertise_address), network_config_file: None, }, ) diff --git a/examples/libp2p/validator.rs b/examples/libp2p/validator.rs index 6e3ca1db41..2cf4a4c8b0 100644 --- a/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -1,7 +1,10 @@ //! A validator using libp2p +use std::{net::SocketAddr, str::FromStr}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; +use local_ip_address::local_ip; use tracing::{info, instrument}; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; @@ -21,7 +24,17 @@ pub mod infra; async fn main() { setup_logging(); setup_backtrace(); - let args = ValidatorArgs::parse(); + let mut args = ValidatorArgs::parse(); + + // If we did not set the advertise address, use our local IP and port 8000 + let local_ip = local_ip().expect("failed to get local IP"); + args.advertise_address = Some( + args.advertise_address.unwrap_or( + SocketAddr::from_str(&format!("{local_ip}:8000")) + .expect("failed to convert local IP to socket address"), + ), + ); + info!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::(args).await; } diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 7b3f8d4e79..3ba22e6e27 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -13,7 +13,6 @@ use hotshot::types::SignatureKey; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; -use std::net::{IpAddr, Ipv4Addr}; /// The infra implementation #[path = "../infra/mod.rs"] @@ -120,7 +119,7 @@ async fn main() { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, - public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + advertise_address: None, network_config_file: None, }, ) diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index c4f1249c92..98c3292a96 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -2,13 +2,13 @@ //! a `Broker` object. use anyhow::{Context, Result}; -use cdn_broker::{reexports::crypto::signature::KeyPair, Broker, Config, ConfigBuilder}; +use cdn_broker::{Broker, Config, ConfigBuilder}; use clap::Parser; -use hotshot::traits::implementations::{ProductionDef, WrappedSignatureKey}; -use hotshot::types::SignatureKey; + +use hotshot::traits::implementations::{KeyPair, ProductionDef, WrappedSignatureKey}; use hotshot_example_types::node_types::TestTypes; use hotshot_types::traits::node_implementation::NodeType; -use local_ip_address::local_ip; +use hotshot_types::traits::signature_key::SignatureKey; use sha2::Digest; #[derive(Parser, Debug)] @@ -22,7 +22,7 @@ struct Args { discovery_endpoint: String, /// Whether or not metric collection and serving is enabled - #[arg(long, default_value_t = true)] + #[arg(long, default_value_t = false)] metrics_enabled: bool, /// The IP to bind to for externalizing metrics @@ -33,17 +33,22 @@ struct Args { #[arg(long, default_value_t = 9090)] metrics_port: u16, - /// The port to bind to for connections from users - #[arg(long, default_value = "127.0.0.1:1738")] + /// The user-facing address to bind to for connections from users + #[arg(long, default_value = "0.0.0.0:1738")] + public_bind_address: String, + + /// The user-facing address to advertise + #[arg(long, default_value = "local_ip:1738")] public_advertise_address: String, - /// The (public) port to bind to for connections from users - #[arg(long, default_value_t = 1738)] - public_bind_port: u16, + /// The broker-facing address to bind to for connections from + /// other brokers + #[arg(long, default_value = "0.0.0.0:1739")] + private_bind_address: String, - /// The (private) port to bind to for connections from other brokers - #[arg(long, default_value_t = 1739)] - private_bind_port: u16, + /// The broker-facing address to advertise + #[arg(long, default_value = "local_ip:1739")] + private_advertise_address: String, /// The seed for broker key generation #[arg(long, default_value_t = 0)] @@ -59,22 +64,17 @@ async fn main() -> Result<()> { // Initialize tracing tracing_subscriber::fmt::init(); - // Get our local IP address - let private_ip_address = local_ip().with_context(|| "failed to get local IP address")?; - let private_address = format!("{}:{}", private_ip_address, args.private_bind_port); - // Generate the broker key from the supplied seed let key_hash = sha2::Sha256::digest(args.key_seed.to_le_bytes()); let (public_key, private_key) = ::SignatureKey::generated_from_seed_indexed(key_hash.into(), 1337); - // Create a broker configuration with all the supplied arguments let broker_config: Config::SignatureKey>> = ConfigBuilder::default() + .public_bind_address(args.public_bind_address) .public_advertise_address(args.public_advertise_address) - .public_bind_address(format!("0.0.0.0:{}", args.public_bind_port)) - .private_advertise_address(private_address.clone()) - .private_bind_address(private_address) + .private_bind_address(args.private_bind_address) + .private_advertise_address(args.private_advertise_address) .metrics_enabled(args.metrics_enabled) .metrics_ip(args.metrics_ip) .discovery_endpoint(args.discovery_endpoint) diff --git a/examples/push-cdn/marshal.rs b/examples/push-cdn/marshal.rs index 6150542a78..42c0758549 100644 --- a/examples/push-cdn/marshal.rs +++ b/examples/push-cdn/marshal.rs @@ -1,6 +1,6 @@ //! The following is the main `Marshal` binary, which just instantiates and runs -//! a `Marshal` object with the `HotShot` types. -//! +//! a `Marshal` object. + use anyhow::{Context, Result}; use cdn_marshal::{ConfigBuilder, Marshal}; use clap::Parser; @@ -11,14 +11,27 @@ use hotshot_example_types::node_types::TestTypes; #[command(author, version, about, long_about = None)] /// The main component of the push CDN. struct Args { - /// The discovery client endpoint (including scheme) to connect to. + /// The discovery client endpoint (including scheme) to connect to /// With the local discovery feature, this is a file path. /// With the remote (redis) discovery feature, this is a redis URL (e.g. `redis://127.0.0.1:6789`). + #[arg(short, long)] discovery_endpoint: String, + /// Whether or not metric collection and serving is enabled + #[arg(long, default_value_t = false)] + metrics_enabled: bool, + + /// The IP to bind to for externalizing metrics + #[arg(long, default_value = "127.0.0.1")] + metrics_ip: String, + + /// The port to bind to for externalizing metrics + #[arg(long, default_value_t = 9090)] + metrics_port: u16, + /// The port to bind to for connections (from users) - #[arg(short, long, default_value_t = 8082)] + #[arg(short, long, default_value_t = 1737)] bind_port: u16, } @@ -34,6 +47,9 @@ async fn main() -> Result<()> { // Create a new `Config` let config = ConfigBuilder::default() .bind_address(format!("0.0.0.0:{}", args.bind_port)) + .metrics_enabled(args.metrics_enabled) + .metrics_ip(args.metrics_ip) + .metrics_port(args.metrics_port) .discovery_endpoint(args.discovery_endpoint) .build() .with_context(|| "failed to build Marshal config")?; diff --git a/examples/push-cdn/whitelist-adapter.rs b/examples/push-cdn/whitelist-adapter.rs index 9ecd134b96..1cc376a493 100644 --- a/examples/push-cdn/whitelist-adapter.rs +++ b/examples/push-cdn/whitelist-adapter.rs @@ -47,14 +47,11 @@ async fn main() -> Result<()> { tracing_subscriber::fmt::init(); // Create a new `OrchestratorClient` from the supplied URL - let orchestrator_client = OrchestratorClient::new( - ValidatorArgs { - url: Url::from_str(&args.orchestrator_url).with_context(|| "Invalid URL")?, - public_ip: None, - network_config_file: None, - }, - "whitelist-adapter".to_string(), - ); + let orchestrator_client = OrchestratorClient::new(ValidatorArgs { + url: Url::from_str(&args.orchestrator_url).with_context(|| "Invalid URL")?, + advertise_address: None, + network_config_file: None, + }); // Attempt to get the config from the orchestrator. // Loops internally until the config is received. diff --git a/examples/webserver/all.rs b/examples/webserver/all.rs index ecdc445a50..c6f73ad7d2 100644 --- a/examples/webserver/all.rs +++ b/examples/webserver/all.rs @@ -9,7 +9,6 @@ use crate::{ infra::run_orchestrator, types::{DANetwork, NodeImpl, QuorumNetwork}, }; -use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; /// general infra used for this example @@ -87,7 +86,7 @@ async fn main() { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, - public_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + advertise_address: None, network_config_file: None, }, ) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index c51c0b8d16..4c8178a7f8 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -52,6 +52,8 @@ time = { workspace = true } tracing = { workspace = true } versioned-binary-serialization = { workspace = true } jf-primitives.workspace = true +hotshot-orchestrator = { path = "../orchestrator" } +blake3.workspace = true [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index ffc4136a1b..84b2b9051a 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -4,6 +4,7 @@ mod networking; mod node_implementation; pub use hotshot_types::traits::{BlockPayload, ValidatedState}; +pub use libp2p_networking::network::NetworkNodeConfigBuilder; pub use networking::{NetworkError, NetworkReliability}; pub use node_implementation::{NodeImplementation, TestableNodeImplementation}; @@ -11,7 +12,9 @@ pub use node_implementation::{NodeImplementation, TestableNodeImplementation}; pub mod implementations { pub use super::networking::{ combined_network::{CombinedNetworks, UnderlyingCombinedNetworks}, - libp2p_network::{Libp2pNetwork, PeerInfoVec}, + libp2p_network::{ + derive_libp2p_keypair, derive_libp2p_peer_id, Libp2pNetwork, PeerInfoVec, + }, memory_network::{MasterMap, MemoryNetwork}, push_cdn_network::{ KeyPair, ProductionDef, PushCdnNetwork, TestingDef, WrappedSignatureKey, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 4b4c155db1..5f7ec9de90 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -92,7 +92,17 @@ impl CombinedNetworks { /// /// Panics if `COMBINED_NETWORK_CACHE_SIZE` is 0 #[must_use] - pub fn new(networks: Arc>, delay_duration: Duration) -> Self { + pub fn new( + primary_network: PushCdnNetwork, + secondary_network: Libp2pNetwork, TYPES::SignatureKey>, + delay_duration: Duration, + ) -> Self { + // Create networks from the ones passed in + let networks = Arc::from(UnderlyingCombinedNetworks( + primary_network, + secondary_network, + )); + Self { networks, message_cache: Arc::new(RwLock::new(LruCache::new( diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index c2109bd145..f61e90d982 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -2,6 +2,7 @@ //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network use super::NetworkingMetricsValue; +use anyhow::anyhow; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, channel::{self, bounded, unbounded, UnboundedReceiver, UnboundedSendError, UnboundedSender}, @@ -14,6 +15,7 @@ use futures::{ future::{join_all, Either}, FutureExt, StreamExt, }; +use hotshot_orchestrator::config::NetworkConfig; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{AsyncGenerator, NetworkReliability}; use hotshot_types::{ @@ -35,9 +37,12 @@ use hotshot_types::{ message::{Message, MessageKind}, traits::network::{TestableNetworkingImplementation, ViewMessage}, }; -use libp2p_identity::PeerId; -#[cfg(feature = "hotshot-testing")] -use libp2p_networking::network::{MeshParams, NetworkNodeConfigBuilder}; +use libp2p_identity::{ + ed25519::{self, SecretKey}, + Keypair, PeerId, +}; +use libp2p_networking::network::NetworkNodeConfigBuilder; +use libp2p_networking::{network::MeshParams, reexport::Multiaddr}; use libp2p_networking::{ network::{ behaviours::request_response::{Request, Response}, @@ -46,10 +51,14 @@ use libp2p_networking::{ NetworkNodeConfig, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, NetworkNodeType, }, - reexport::{Multiaddr, ResponseChannel}, + reexport::ResponseChannel, }; +use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; use snafu::ResultExt; +use std::num::NonZeroUsize; +#[cfg(feature = "hotshot-testing")] +use std::str::FromStr; use std::{ collections::BTreeSet, fmt::Debug, @@ -59,8 +68,7 @@ use std::{ }, time::Duration, }; -#[cfg(feature = "hotshot-testing")] -use std::{collections::HashSet, num::NonZeroUsize, str::FromStr}; +use std::{collections::HashSet, net::SocketAddr}; use tracing::{debug, error, info, instrument, warn}; use versioned_binary_serialization::{ version::{StaticVersionType, Version}, @@ -69,7 +77,7 @@ use versioned_binary_serialization::{ /// convenience alias for the type for bootstrap addresses /// concurrency primitives are needed for having tests -pub type BootstrapAddrs = Arc, Multiaddr)>>>; +pub type BootstrapAddrs = Arc>>; /// hardcoded topic of QC used pub const QC_TOPIC: &str = "global"; @@ -99,7 +107,7 @@ impl Debug for Libp2pNetwork { type TakeReceiver = Mutex)>>>; /// Type alias for a shared collection of peerid, multiaddrs -pub type PeerInfoVec = Arc, Multiaddr)>>>; +pub type PeerInfoVec = Arc>>; /// The underlying state of the libp2p network #[derive(Debug)] @@ -271,7 +279,6 @@ where config, pubkey.clone(), bootstrap_addrs_ref, - num_bootstrap, usize::try_from(node_id).unwrap(), keys, #[cfg(feature = "hotshot-testing")] @@ -298,7 +305,125 @@ where } } -impl Libp2pNetwork { +/// Derive a Libp2p keypair from a given private key +/// +/// # Errors +/// If we are unable to derive a new `SecretKey` from the `blake3`-derived +/// bytes. +pub fn derive_libp2p_keypair( + private_key: &K::PrivateKey, +) -> anyhow::Result { + // Derive a secondary key from our primary private key + let derived_key = blake3::derive_key("libp2p key", &(bincode::serialize(&private_key)?)); + let derived_key = SecretKey::try_from_bytes(derived_key)?; + + // Create an `ed25519` keypair from the derived key + Ok(ed25519::Keypair::from(derived_key).into()) +} + +/// Derive a Libp2p Peer ID from a given private key +/// +/// # Errors +/// If we are unable to derive a Libp2p keypair +pub fn derive_libp2p_peer_id( + private_key: &K::PrivateKey, +) -> anyhow::Result { + // Get the derived keypair + let keypair = derive_libp2p_keypair::(private_key)?; + + // Return the PeerID derived from the public key + Ok(PeerId::from_public_key(&keypair.public())) +} + +impl Libp2pNetwork { + /// Create and return a Libp2p network from a network config file + /// and various other configuration-specific values. + /// + /// # Errors + /// If we are unable to parse a Multiaddress + /// + /// # Panics + /// If we are unable to calculate the replication factor + pub async fn from_config( + mut config: NetworkConfig, + bind_address: SocketAddr, + pub_key: &K, + priv_key: &K::PrivateKey, + ) -> anyhow::Result { + // Try to take our Libp2p config from our broader network config + let libp2p_config = config + .libp2p_config + .take() + .ok_or(anyhow!("Libp2p config not supplied"))?; + + // Derive our Libp2p keypair from our supplied private key + let keypair = derive_libp2p_keypair::(priv_key)?; + + // Convert our bind address to a `Multiaddr` + let bind_address: Multiaddr = format!( + "/{}/{}/udp/{}/quic-v1", + if bind_address.is_ipv4() { "ip4" } else { "ip6" }, + bind_address.ip(), + bind_address.port() + ) + .parse()?; + + // Build our libp2p configuration from our global, network configuration + let mut config_builder = NetworkNodeConfigBuilder::default(); + + config_builder + .replication_factor( + NonZeroUsize::new(config.config.num_nodes_with_stake.get() - 2) + .expect("failed to calculate replication factor"), + ) + .identity(keypair) + .bound_addr(Some(bind_address.clone())) + .mesh_params(Some(MeshParams { + mesh_n_high: libp2p_config.mesh_n_high, + mesh_n_low: libp2p_config.mesh_n_low, + mesh_outbound_min: libp2p_config.mesh_outbound_min, + mesh_n: libp2p_config.mesh_n, + })); + + // Choose `mesh_n` random nodes to connect to for bootstrap + let bootstrap_nodes = libp2p_config + .bootstrap_nodes + .into_iter() + .choose_multiple(&mut StdRng::from_entropy(), libp2p_config.mesh_n); + config_builder.to_connect_addrs(HashSet::from_iter(bootstrap_nodes.clone())); + + // Build the node's configuration + let node_config = config_builder.build()?; + + // Calculate all keys so we can keep track of direct message recipients + let mut all_keys = BTreeSet::new(); + let mut da_keys = BTreeSet::new(); + + // Make a node DA if it is under the staked committee size + for (i, node) in config.config.known_nodes_with_stake.into_iter().enumerate() { + if i < config.config.da_staked_committee_size { + da_keys.insert(K::get_public_key(&node.stake_table_entry)); + } + all_keys.insert(K::get_public_key(&node.stake_table_entry)); + } + + Ok(Libp2pNetwork::new( + NetworkingMetricsValue::default(), + node_config, + pub_key.clone(), + Arc::new(RwLock::new(bootstrap_nodes)), + usize::try_from(config.node_index)?, + // NOTE: this introduces an invariant that the keys are assigned using this indexed + // function + all_keys, + #[cfg(feature = "hotshot-testing")] + None, + da_keys.clone(), + da_keys.contains(pub_key), + ) + .await?) + } + /// Returns when network is ready pub async fn wait_for_ready(&self) { loop { @@ -307,7 +432,6 @@ impl Libp2pNetwork { } async_sleep(Duration::from_secs(1)).await; } - info!("LIBP2P: IS READY GOT TRIGGERED!!"); } /// Constructs new network for a node. Note that this network is unconnected. @@ -328,7 +452,6 @@ impl Libp2pNetwork { config: NetworkNodeConfig, pk: K, bootstrap_addrs: BootstrapAddrs, - bootstrap_addrs_len: usize, id: usize, // HACK committee_pks: BTreeSet, @@ -336,7 +459,11 @@ impl Libp2pNetwork { da_pks: BTreeSet, is_da: bool, ) -> Result, NetworkError> { - assert!(bootstrap_addrs_len > 4, "Need at least 5 bootstrap nodes"); + // Error if there were no bootstrap nodes specified + #[cfg(not(feature = "hotshot-testing"))] + if bootstrap_addrs.read().await.len() == 0 { + return Err(NetworkError::NoBootstrapNodesSpecified); + } let (mut rx, network_handle) = spawn_network_node(config.clone(), id) .await .map_err(Into::::into)?; @@ -348,7 +475,7 @@ impl Libp2pNetwork { let addr = network_handle.listen_addr(); let pid = network_handle.peer_id(); let mut bs_cp = bootstrap_addrs.write().await; - bs_cp.push((Some(pid), addr)); + bs_cp.push((pid, addr)); drop(bs_cp); } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index fe60aeea66..45f02ad2c5 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -126,7 +126,7 @@ pub enum ClientRequest { /// prune a peer Prune(PeerId), /// add vec of known peers or addresses - AddKnownPeers(Vec<(Option, Multiaddr)>), + AddKnownPeers(Vec<(PeerId, Multiaddr)>), /// Ignore peers. Only here for debugging purposes. /// Allows us to have nodes that are never pruned IgnorePeers(Vec), diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index dfd2833d9f..eea4bf71d7 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -127,27 +127,17 @@ impl NetworkNode { /// the `spawn_listeners` function /// will start connecting to peers #[instrument(skip(self))] - pub fn add_known_peers(&mut self, known_peers: &[(Option, Multiaddr)]) { + pub fn add_known_peers(&mut self, known_peers: &[(PeerId, Multiaddr)]) { info!("Adding nodes {:?} to {:?}", known_peers, self.peer_id); let behaviour = self.swarm.behaviour_mut(); let mut bs_nodes = HashMap::>::new(); let mut shuffled = known_peers.iter().collect::>(); shuffled.shuffle(&mut thread_rng()); for (peer_id, addr) in shuffled { - match peer_id { - Some(peer_id) => { - // if we know the peerid, add address. - if *peer_id != self.peer_id { - behaviour.autonat.add_server(*peer_id, Some(addr.clone())); - behaviour.dht.add_address(peer_id, addr.clone()); - bs_nodes.insert(*peer_id, iter::once(addr.clone()).collect()); - } - } - None => { - // - // TODO actually implement this part - // if we don't know the peerid, dial to find out what the peerid is - } + if *peer_id != self.peer_id { + behaviour.dht.add_address(peer_id, addr.clone()); + behaviour.autonat.add_server(*peer_id, Some(addr.clone())); + bs_nodes.insert(*peer_id, iter::once(addr.clone()).collect()); } } behaviour.dht.add_bootstrap_nodes(bs_nodes); @@ -331,10 +321,8 @@ impl NetworkNode { .build() }; for (peer, addr) in &config.to_connect_addrs { - if let Some(peer) = peer { - if peer != swarm.local_peer_id() { - swarm.behaviour_mut().add_address(peer, addr.clone()); - } + if peer != swarm.local_peer_id() { + swarm.behaviour_mut().add_address(peer, addr.clone()); } } diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 4d19b6f516..9e03632ae1 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -29,7 +29,7 @@ pub struct NetworkNodeConfig { pub mesh_params: Option, /// list of addresses to connect to at initialization - pub to_connect_addrs: HashSet<(Option, Multiaddr)>, + pub to_connect_addrs: HashSet<(PeerId, Multiaddr)>, /// republication interval in DHT, must be much less than `ttl` #[builder(default)] pub republication_interval: Option, diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index f58f502fd4..111ff6a89e 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -456,7 +456,7 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed pub async fn add_known_peers( &self, - known_peers: Vec<(Option, Multiaddr)>, + known_peers: Vec<(PeerId, Multiaddr)>, ) -> Result<(), NetworkNodeHandleError> { info!("ADDING KNOWN PEERS TO {:?}", self.peer_id); let req = ClientRequest::AddKnownPeers(known_peers); diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index e887a50b1b..cda6438354 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -273,12 +273,7 @@ pub async fn spin_up_swarms( let to_share = bootstrap_addrs.clone(); handle .handle - .add_known_peers( - to_share - .iter() - .map(|(a, b)| (Some(*a), b.clone())) - .collect::>(), - ) + .add_known_peers(to_share) .await .context(HandleSnafu)?; } diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 001d0ea41d..b08a60c312 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -21,6 +21,9 @@ thiserror = "1.0.50" serde-inline-default = "0.1.1" csv = "1.3.0" versioned-binary-serialization = { workspace = true } +multiaddr = "0.18.1" +anyhow.workspace = true +bincode.workspace = true [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/orchestrator/api.toml b/orchestrator/api.toml index 468137ce63..21bac03d8c 100644 --- a/orchestrator/api.toml +++ b/orchestrator/api.toml @@ -5,9 +5,8 @@ FORMAT_VERSION = "0.1.0" # POST node's identity [route.post_identity] -PATH = ["identity/:identity"] +PATH = ["identity"] METHOD = "POST" -":identity" = "Literal" DOC = """ POST a node's identity (IP address) to the orchestrator. Returns the node's node_index. """ diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 8fe8767bdb..77884d58a5 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -56,7 +56,6 @@ start_delay = 1 num_bootstrap = 5 [libp2p_config] -index_ports = true bootstrap_mesh_n_high = 4 bootstrap_mesh_n_low = 4 bootstrap_mesh_outbound_min = 2 @@ -66,7 +65,6 @@ mesh_n_low = 4 mesh_outbound_min = 2 mesh_n = 4 online_time = 10 -base_port = 8000 [web_server_config] url = "http://localhost:9000" diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index bac6d08af9..4807a98fdc 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -1,4 +1,4 @@ -use std::{net::IpAddr, time::Duration}; +use std::{net::SocketAddr, time::Duration}; use crate::{config::NetworkConfig, OrchestratorVersion}; use async_compatibility_layer::art::async_sleep; @@ -6,17 +6,18 @@ use clap::Parser; use futures::{Future, FutureExt}; use hotshot_types::{ + constants::Version01, traits::{election::ElectionConfig, signature_key::SignatureKey}, PeerConfig, }; +use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; +use versioned_binary_serialization::BinarySerializer; /// Holds the client connection to the orchestrator pub struct OrchestratorClient { /// the client client: surf_disco::Client, - /// the identity - pub identity: String, } /// Struct describing a benchmark result @@ -115,9 +116,8 @@ pub struct BenchResultsDownloadConfig { pub struct ValidatorArgs { /// The address the orchestrator runs on pub url: Url, - /// This node's public IP address, for libp2p - /// If no IP address is passed in, it will default to 127.0.0.1 - pub public_ip: Option, + /// The optional advertise address to use for Libp2p + pub advertise_address: Option, /// An optional network config file to save to/load from /// Allows for rejoining the network on a complete state loss #[arg(short, long)] @@ -131,9 +131,8 @@ pub struct MultiValidatorArgs { pub num_nodes: u16, /// The address the orchestrator runs on pub url: Url, - /// This node's public IP address, for libp2p - /// If no IP address is passed in, it will default to 127.0.0.1 - pub public_ip: Option, + /// The optional advertise address to use for Libp2p + pub advertise_address: Option, /// An optional network config file to save to/load from /// Allows for rejoining the network on a complete state loss #[arg(short, long)] @@ -168,7 +167,7 @@ impl ValidatorArgs { pub fn from_multi_args(multi_args: MultiValidatorArgs, node_index: u16) -> Self { Self { url: multi_args.url, - public_ip: multi_args.public_ip, + advertise_address: multi_args.advertise_address, network_config_file: multi_args .network_config_file .map(|s| format!("{s}-{node_index}")), @@ -179,10 +178,10 @@ impl ValidatorArgs { impl OrchestratorClient { /// Creates the client that will connect to the orchestrator #[must_use] - pub fn new(args: ValidatorArgs, identity: String) -> Self { + pub fn new(args: ValidatorArgs) -> Self { let client = surf_disco::Client::::new(args.url); // TODO ED: Add healthcheck wait here - OrchestratorClient { client, identity } + OrchestratorClient { client } } /// Get the config from the orchestrator. @@ -192,19 +191,43 @@ impl OrchestratorClient { /// # Panics /// if unable to convert the node index from usize into u64 /// (only applicable on 32 bit systems) + /// + /// # Errors + /// If we were unable to serialize the Libp2p data #[allow(clippy::type_complexity)] pub async fn get_config_without_peer( &self, - identity: String, - ) -> NetworkConfig { - // get the node index - let identity = identity.as_str(); + libp2p_address: Option, + libp2p_public_key: Option, + ) -> anyhow::Result> { + // Get the (possible) Libp2p advertise address from our args + let libp2p_address = libp2p_address.map(|f| { + Multiaddr::try_from(format!( + "/{}/{}/udp/{}/quic-v1", + if f.is_ipv4() { "ip4" } else { "ip6" }, + f.ip(), + f.port() + )) + .expect("failed to create multiaddress") + }); + + // Serialize our (possible) libp2p-specific data + let request_body = versioned_binary_serialization::Serializer::::serialize(&( + libp2p_address, + libp2p_public_key, + ))?; + let identity = |client: Client| { + // We need to clone here to move it into the closure + let request_body = request_body.clone(); async move { let node_index: Result = client - .post(&format!("api/identity/{identity}")) + .post("api/identity") + .body_binary(&request_body) + .expect("failed to set request body") .send() .await; + node_index } .boxed() @@ -226,7 +249,7 @@ impl OrchestratorClient { let mut config = self.wait_for_fn_from_orchestrator(f).await; config.node_index = From::::from(node_index); - config + Ok(config) } /// Post to the orchestrator and get the latest `node_index` diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 5ee35aaf3e..c50d29fa31 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -2,15 +2,9 @@ use hotshot_types::{ traits::{election::ElectionConfig, signature_key::SignatureKey}, ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, }; +use libp2p::{Multiaddr, PeerId}; use serde_inline_default::serde_inline_default; -use std::{ - env, - net::{IpAddr, Ipv4Addr, SocketAddr}, - num::NonZeroUsize, - path::PathBuf, - time::Duration, - vec, -}; +use std::{env, net::SocketAddr, num::NonZeroUsize, path::PathBuf, time::Duration, vec}; use std::{fs, path::Path}; use surf_disco::Url; use thiserror::Error; @@ -22,18 +16,10 @@ use crate::client::OrchestratorClient; /// Configuration describing a libp2p node #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { - /// bootstrap nodes (socket, serialized public key) - pub bootstrap_nodes: Vec<(SocketAddr, Vec)>, - /// number of bootstrap nodes - pub num_bootstrap_nodes: usize, - /// public ip of this node - pub public_ip: IpAddr, - /// port to run libp2p on - pub base_port: u16, + /// bootstrap nodes (multiaddress, serialized public key) + pub bootstrap_nodes: Vec<(PeerId, Multiaddr)>, /// global index of node (for testing purposes a uid) pub node_index: u64, - /// whether or not to index ports - pub index_ports: bool, /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes pub bootstrap_mesh_n_high: usize, /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes @@ -65,8 +51,6 @@ pub struct Libp2pConfig { /// configuration serialized into a file #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfigFile { - /// whether or not to index ports - pub index_ports: bool, /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes pub bootstrap_mesh_n_high: usize, /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes @@ -85,8 +69,6 @@ pub struct Libp2pConfigFile { pub mesh_n: usize, /// time node has been running pub online_time: u64, - /// port to run libp2p on - pub base_port: u16, } /// configuration for a web server @@ -184,7 +166,7 @@ pub enum NetworkConfigSource { impl NetworkConfig { /// Asynchronously retrieves a `NetworkConfig` either from a file or from an orchestrator. /// - /// This function takes an `OrchestratorClient`, an identity string, and an optional file path. + /// This function takes an `OrchestratorClient`, an optional file path, and Libp2p-specific parameters. /// /// If a file path is provided, the function will first attempt to load the `NetworkConfig` from the file. /// If the file does not exist or cannot be read, the function will fall back to retrieving the `NetworkConfig` from the orchestrator. @@ -193,61 +175,57 @@ impl NetworkConfig { /// /// If no file path is provided, the function will directly retrieve the `NetworkConfig` from the orchestrator. /// + /// # Errors + /// If we were unable to load the configuration. + /// /// # Arguments /// /// * `client` - An `OrchestratorClient` used to retrieve the `NetworkConfig` from the orchestrator. /// * `identity` - A string representing the identity for which to retrieve the `NetworkConfig`. /// * `file` - An optional string representing the path to the file from which to load the `NetworkConfig`. + /// * `libp2p_address` - An optional address specifying where other Libp2p nodes can reach us + /// * `libp2p_public_key` - The public key in which other Libp2p nodes can reach us with /// /// # Returns /// /// This function returns a tuple containing a `NetworkConfig` and a `NetworkConfigSource`. The `NetworkConfigSource` indicates whether the `NetworkConfig` was loaded from a file or retrieved from the orchestrator. - /// - /// # Examples - /// - /// ```ignore - /// # use hotshot_orchestrator::config::NetworkConfig; - /// # use hotshot_orchestrator::client::OrchestratorClient; - /// let client = OrchestratorClient::new(); - /// let identity = "my_identity".to_string(); - /// let file = Some("/path/to/my/config".to_string()); - /// let (config, source) = NetworkConfig::from_file_or_orchestrator(client, file); - /// ``` pub async fn from_file_or_orchestrator( client: &OrchestratorClient, file: Option, - ) -> (NetworkConfig, NetworkConfigSource) { + libp2p_address: Option, + libp2p_public_key: Option, + ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { if let Some(file) = file { error!("Retrieving config from the file"); // if we pass in file, try there first match Self::from_file(file.clone()) { - Ok(config) => (config, NetworkConfigSource::File), + Ok(config) => Ok((config, NetworkConfigSource::File)), Err(e) => { // fallback to orchestrator error!("{e}, falling back to orchestrator"); let config = client - .get_config_without_peer(client.identity.clone()) - .await; + .get_config_without_peer(libp2p_address, libp2p_public_key) + .await?; // save to file if we fell back if let Err(e) = config.to_file(file) { error!("{e}"); }; - (config, NetworkConfigSource::File) + Ok((config, NetworkConfigSource::File)) } } } else { error!("Retrieving config from the orchestrator"); // otherwise just get from orchestrator - ( + Ok(( client - .get_config_without_peer(client.identity.clone()) - .await, + .get_config_without_peer(libp2p_address, libp2p_public_key) + .await?, NetworkConfigSource::Orchestrator, - ) + )) } } @@ -261,12 +239,19 @@ impl NetworkConfig { /// Asynchronously retrieves a `NetworkConfig` from an orchestrator. /// The retrieved one includes correct `node_index` and peer's public config. + /// + /// # Errors + /// If we are unable to get the configuration from the orchestrator pub async fn get_complete_config( client: &OrchestratorClient, - my_own_validator_config: ValidatorConfig, file: Option, - ) -> (NetworkConfig, NetworkConfigSource) { - let (mut run_config, source) = Self::from_file_or_orchestrator(client, file).await; + my_own_validator_config: ValidatorConfig, + libp2p_address: Option, + libp2p_public_key: Option, + ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { + let (mut run_config, source) = + Self::from_file_or_orchestrator(client, file, libp2p_address, libp2p_public_key) + .await?; let node_index = run_config.node_index; // Assign my_own_validator_config to the run_config if not loading from file @@ -292,7 +277,7 @@ impl NetworkConfig { run_config.config.known_nodes_with_stake = updated_config.config.known_nodes_with_stake; error!("Retrieved config; our node index is {node_index}."); - (run_config, source) + Ok((run_config, source)) } /// Loads a `NetworkConfig` from a file. @@ -474,11 +459,7 @@ impl From> for NetworkC seed: val.seed, transaction_size: val.transaction_size, libp2p_config: val.libp2p_config.map(|libp2p_config| Libp2pConfig { - num_bootstrap_nodes: val.config.num_bootstrap, - index_ports: libp2p_config.index_ports, bootstrap_nodes: Vec::new(), - public_ip: IpAddr::V4(Ipv4Addr::UNSPECIFIED), - base_port: libp2p_config.base_port, node_index: 0, bootstrap_mesh_n_high: libp2p_config.bootstrap_mesh_n_high, bootstrap_mesh_n_low: libp2p_config.bootstrap_mesh_n_low, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index b1af27e658..d9fec75375 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -8,16 +8,12 @@ pub mod config; use async_lock::RwLock; use client::{BenchResults, BenchResultsDownloadConfig}; use hotshot_types::{ + constants::Version01, traits::{election::ElectionConfig, signature_key::SignatureKey}, PeerConfig, }; use std::fs::OpenOptions; -use std::{ - collections::HashSet, - io, - io::ErrorKind, - net::{IpAddr, SocketAddr}, -}; +use std::{collections::HashSet, io, io::ErrorKind}; use tide_disco::{Api, App, RequestError}; use surf_disco::Url; @@ -32,11 +28,17 @@ use futures::FutureExt; use crate::config::NetworkConfig; use csv::Writer; -use libp2p::identity::{ - ed25519::{Keypair as EdKeypair, SecretKey}, - Keypair, +use libp2p::{ + identity::{ + ed25519::{Keypair as EdKeypair, SecretKey}, + Keypair, + }, + Multiaddr, PeerId, +}; +use versioned_binary_serialization::{ + version::{StaticVersion, StaticVersionType}, + BinarySerializer, }; -use versioned_binary_serialization::version::{StaticVersion, StaticVersionType}; /// Orchestrator is not, strictly speaking, bound to the network; it can have its own versioning. /// Orchestrator Version (major) @@ -142,10 +144,15 @@ impl /// An api exposed by the orchestrator pub trait OrchestratorApi { - /// post endpoint for identity + /// Post an identity to the orchestrator. Takes in optional + /// arguments so others can identify us on the Libp2p network. /// # Errors - /// if unable to serve - fn post_identity(&mut self, identity: IpAddr) -> Result; + /// If we were unable to serve the request + fn post_identity( + &mut self, + libp2p_address: Option, + libp2p_public_key: Option, + ) -> Result; /// post endpoint for each node's config /// # Errors /// if unable to serve @@ -192,7 +199,15 @@ where KEY: serde::Serialize + Clone + SignatureKey + 'static, ELECTION: serde::Serialize + Clone + Send + ElectionConfig + 'static, { - fn post_identity(&mut self, identity: IpAddr) -> Result { + /// Post an identity to the orchestrator. Takes in optional + /// arguments so others can identify us on the Libp2p network. + /// # Errors + /// If we were unable to serve the request + fn post_identity( + &mut self, + libp2p_address: Option, + libp2p_public_key: Option, + ) -> Result { let node_index = self.latest_index; self.latest_index += 1; @@ -203,24 +218,19 @@ where }); } + // If the orchestrator is set up for libp2p and we have supplied the proper + // Libp2p data, add our node to the list of bootstrap nodes. if self.config.libp2p_config.clone().is_some() { - let libp2p_config_clone = self.config.libp2p_config.clone().unwrap(); - // Designate node as bootstrap node and store its identity information - if libp2p_config_clone.bootstrap_nodes.len() < libp2p_config_clone.num_bootstrap_nodes { - let port_index = if libp2p_config_clone.index_ports { - node_index - } else { - 0 - }; - let socketaddr = - SocketAddr::new(identity, libp2p_config_clone.base_port + port_index); - let keypair = libp2p_generate_indexed_identity(self.config.seed, node_index.into()); + if let (Some(libp2p_public_key), Some(libp2p_address)) = + (libp2p_public_key, libp2p_address) + { + // Push to our bootstrap nodes self.config .libp2p_config .as_mut() .unwrap() .bootstrap_nodes - .push((socketaddr, keypair.to_protobuf_encoding().unwrap())); + .push((libp2p_public_key, libp2p_address)); } } Ok(node_index) @@ -232,15 +242,6 @@ where &mut self, _node_index: u16, ) -> Result, ServerError> { - if self.config.libp2p_config.is_some() { - let libp2p_config = self.config.clone().libp2p_config.unwrap(); - if libp2p_config.bootstrap_nodes.len() < libp2p_config.num_bootstrap_nodes { - return Err(ServerError { - status: tide_disco::StatusCode::BadRequest, - message: "Not enough bootstrap nodes have registered".to_string(), - }); - } - } Ok(self.config.clone()) } @@ -397,14 +398,22 @@ where let mut api = Api::::new(api_toml)?; api.post("post_identity", |req, state| { async move { - let identity = req.string_param("identity")?.parse::(); - if identity.is_err() { + // Read the bytes from the body + let mut body_bytes = req.body_bytes(); + body_bytes.drain(..12); + + // Decode the libp2p data so we can add to our bootstrap nodes (if supplied) + let Ok((libp2p_address, libp2p_public_key)) = + versioned_binary_serialization::Serializer::::deserialize(&body_bytes) + else { return Err(ServerError { status: tide_disco::StatusCode::BadRequest, - message: "Identity is not a properly formed IP address".to_string(), + message: "Malformed body".to_string(), }); - } - state.post_identity(identity.unwrap()) + }; + + // Call our state function to process the request + state.post_identity(libp2p_address, libp2p_public_key) } .boxed() })? diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index d99a04a7d5..4519f186d1 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -132,6 +132,8 @@ pub enum NetworkError { CouldNotDeliver, /// Attempted to deliver a message to an unknown node NoSuchNode, + /// No bootstrap nodes were specified on network creation + NoBootstrapNodesSpecified, /// Failed to serialize a network message FailedToSerialize { /// Originating bincode error From 4995260680a456a993cbbe1a72270dc408e2fcef Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Tue, 2 Apr 2024 16:13:23 +0200 Subject: [PATCH 0911/1393] Do not send VID Dispersal through the fallback network (#2866) * Calculate VID shared locally * Adjust to tokio and adjust tests * Split DAProsal validation and voting * Store only successfully signed VID shares * Do not send VID through the fallback network * Use the same serialiser * Fix use statements --- hotshot/src/tasks/mod.rs | 6 ++- hotshot/src/tasks/task_state.rs | 1 + .../src/traits/networking/combined_network.rs | 13 ++++++- .../src/traits/networking/libp2p_network.rs | 20 ++++++---- task-impls/src/consensus.rs | 38 ++++++++++++++----- task-impls/src/request.rs | 6 ++- task-impls/src/response.rs | 26 ++++++++----- 7 files changed, 79 insertions(+), 31 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 99a3f9838f..0e7d3b0991 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -69,13 +69,15 @@ pub async fn add_response_task>( rx: RequestReceiver, handle: &SystemContextHandle, ) { - let state = NetworkResponseState::new( + let state = NetworkResponseState::::new( handle.hotshot.get_consensus(), rx, handle.hotshot.memberships.quorum_membership.clone(), handle.public_key().clone(), ); - task_reg.register(run_response_task(state, hs_rx)).await; + task_reg + .register(run_response_task::(state, hs_rx)) + .await; } /// Add the network task to handle messages and publish events. pub async fn add_network_message_task< diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 4bb4f2e44d..43f2ea3fd1 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -47,6 +47,7 @@ impl, V: StaticVersionType> Create public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), _phantom: PhantomData, + id: handle.hotshot.id, } } } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 5f7ec9de90..bcd928cbfa 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -36,7 +36,7 @@ use hotshot_types::{ }, BoxSyncFuture, }; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::future::Future; use std::{collections::hash_map::DefaultHasher, sync::Arc}; @@ -410,6 +410,17 @@ impl ConnectedNetwork, TYPES::SignatureKey> .await } + async fn vid_broadcast_message( + &self, + messages: HashMap>, + bind_version: VER, + ) -> Result<(), NetworkError> { + self.networks + .0 + .vid_broadcast_message(messages, bind_version) + .await + } + /// Receive one or many messages from the underlying network. /// /// # Errors diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index f61e90d982..8f2c2c38d5 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -17,11 +17,14 @@ use futures::{ }; use hotshot_orchestrator::config::NetworkConfig; #[cfg(feature = "hotshot-testing")] -use hotshot_types::traits::network::{AsyncGenerator, NetworkReliability}; +use hotshot_types::traits::network::{ + AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, ViewMessage, +}; use hotshot_types::{ boxed_sync, constants::{Version01, LOOK_AHEAD, STATIC_VER_0_1, VERSION_0_1}, data::ViewNumber, + message::{DataMessage::DataResponse, Message, MessageKind}, traits::{ network::{ self, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, @@ -32,11 +35,6 @@ use hotshot_types::{ }, BoxSyncFuture, }; -#[cfg(feature = "hotshot-testing")] -use hotshot_types::{ - message::{Message, MessageKind}, - traits::network::{TestableNetworkingImplementation, ViewMessage}, -}; use libp2p_identity::{ ed25519::{self, SecretKey}, Keypair, PeerId, @@ -820,9 +818,15 @@ impl ConnectedNetwork for Libp2p { Ok(response) => match response { Some(msg) => { - let res = Serializer::::deserialize(&msg.0) + let res: Message = Serializer::::deserialize(&msg.0) .map_err(|e| NetworkError::FailedToDeserialize { source: e })?; - Ok(ResponseMessage::Found(res)) + let DataResponse(res) = (match res.kind { + MessageKind::Data(data) => data, + MessageKind::Consensus(_) => return Ok(ResponseMessage::NotFound), + }) else { + return Ok(ResponseMessage::NotFound); + }; + Ok(res) } None => Ok(ResponseMessage::NotFound), }, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 466df2dc6e..be38867519 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -481,6 +481,34 @@ impl, A: ConsensusApi + false } + /// Validates whether the VID Dispersal Proposal is correctly signed + fn validate_disperse(&self, disperse: &Proposal>) -> bool { + let view = disperse.data.get_view_number(); + let payload_commitment = disperse.data.payload_commitment; + // Check whether the data comes from the right leader for this view + if self + .quorum_membership + .get_leader(view) + .validate(&disperse.signature, payload_commitment.as_ref()) + { + return true; + } + // or the data was calculated and signed by the current node + if self + .public_key + .validate(&disperse.signature, payload_commitment.as_ref()) + { + return true; + } + // or the data was signed by one of the staked DA committee members + for da_member in self.committee_membership.get_staked_committee(view) { + if da_member.validate(&disperse.signature, payload_commitment.as_ref()) { + return true; + } + } + false + } + /// Handles a consensus event received on the event stream #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] pub async fn handle( @@ -1117,16 +1145,8 @@ impl, A: ConsensusApi + } debug!("VID disperse data is not more than one view older."); - let payload_commitment = disperse.data.payload_commitment; - // Check whether the data comes from the right leader for this view or - // the data was calculated and signed by the current node - let view_leader_key = self.quorum_membership.get_leader(view); - if !view_leader_key.validate(&disperse.signature, payload_commitment.as_ref()) - && !self - .public_key - .validate(&disperse.signature, payload_commitment.as_ref()) - { + if !self.validate_disperse(disperse) { warn!("Could not verify VID dispersal/share sig."); return; } diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 0064d0c5d2..cc8943b19d 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -22,7 +22,7 @@ use hotshot_types::{ }; use rand::{prelude::SliceRandom, thread_rng}; use sha2::{Digest, Sha256}; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, instrument, warn}; use versioned_binary_serialization::{version::StaticVersionType, BinarySerializer, Serializer}; /// Amount of time to try for a request before timing out. @@ -56,6 +56,8 @@ pub struct NetworkRequestState< pub private_key: ::PrivateKey, /// Version discrimination pub _phantom: PhantomData, + /// The node's id + pub id: u64, } /// Alias for a signature @@ -141,6 +143,7 @@ impl, Ver: StaticVersionType + 'st /// run a delayed request task for a request. The first response /// received will be sent over `sender` + #[instrument(skip_all, fields(id = self.id, view = *self.view), name = "NetworkRequestState run_delay", level = "error")] fn run_delay( &self, request: RequestKind, @@ -172,6 +175,7 @@ impl, Ver: StaticVersionType + 'st error!("Failed to sign Data Request"); return; }; + debug!("Requesting data: {:?}", request); async_spawn(requester.run::(request, signature)); } } diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 15d74b0cf7..9c8619caaa 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -6,7 +6,6 @@ use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use bincode::config::Options; use either::Either::Right; use futures::{channel::mpsc, FutureExt, StreamExt}; use hotshot_task::dependency::{Dependency, EventDependency}; @@ -22,11 +21,11 @@ use hotshot_types::{ node_implementation::NodeType, signature_key::SignatureKey, }, - utils::bincode_opts, }; use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; +use versioned_binary_serialization::{version::StaticVersionType, BinarySerializer, Serializer}; use crate::events::HotShotEvent; @@ -68,13 +67,16 @@ impl NetworkResponseState { /// Run the request response loop until a `HotShotEvent::Shutdown` is received. /// Or the stream is closed. - async fn run_loop(mut self, shutdown: EventDependency>>) { + async fn run_loop( + mut self, + shutdown: EventDependency>>, + ) { let mut shutdown = Box::pin(shutdown.completed().fuse()); loop { futures::select! { req = self.receiver.next() => { match req { - Some((msg, chan)) => self.handle_message(msg, chan).await, + Some((msg, chan)) => self.handle_message::(msg, chan).await, None => return, } }, @@ -87,7 +89,11 @@ impl NetworkResponseState { /// Handle an incoming message. First validates the sender, then handles the contained request. /// Sends the response via `chan` - async fn handle_message(&self, req: Message, chan: ResponseChannel>) { + async fn handle_message( + &self, + req: Message, + chan: ResponseChannel>, + ) { let sender = req.sender.clone(); if !self.valid_sender(&sender) { let _ = chan.0.send(self.make_msg(ResponseMessage::Denied)); @@ -96,7 +102,7 @@ impl NetworkResponseState { match req.kind { MessageKind::Data(DataMessage::RequestData(req)) => { - if !valid_signature(&req, &sender) { + if !valid_signature::(&req, &sender) { let _ = chan.0.send(self.make_msg(ResponseMessage::Denied)); return; } @@ -158,11 +164,11 @@ impl NetworkResponseState { } /// Check the signature -fn valid_signature( +fn valid_signature( req: &DataRequest, sender: &TYPES::SignatureKey, ) -> bool { - let Ok(data) = bincode_opts().serialize(&req.request) else { + let Ok(data) = Serializer::::serialize(&req.request) else { return false; }; sender.validate(&req.signature, &Sha256::digest(data)) @@ -171,7 +177,7 @@ fn valid_signature( /// Spawn the network response task to handle incoming request for data /// from other nodes. It will shutdown when it gets `HotshotEvent::Shutdown` /// on the `event_stream` arg. -pub fn run_response_task( +pub fn run_response_task( task_state: NetworkResponseState, event_stream: Receiver>>, ) -> JoinHandle<()> { @@ -179,5 +185,5 @@ pub fn run_response_task( event_stream, Box::new(|e| matches!(e.as_ref(), HotShotEvent::Shutdown)), ); - async_spawn(task_state.run_loop(dep)) + async_spawn(task_state.run_loop::(dep)) } From 4613f9e4e3d1cd00443ac1876a067476ca6d440a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 17:34:01 +0200 Subject: [PATCH 0912/1393] Bump syn from 2.0.55 to 2.0.57 (#2876) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.55 to 2.0.57. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.55...2.0.57) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- macros/Cargo.toml | 2 +- testing-macros/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 2b75d20e1e..02c845457d 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -7,7 +7,7 @@ description = "Macros for hotshot tests" [dependencies] # proc macro stuff quote = "1.0.33" -syn = { version = "2.0.55", features = ["full", "extra-traits"] } +syn = { version = "2.0.57", features = ["full", "extra-traits"] } proc-macro2 = "1.0.79" [lib] diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 0d890e14b9..5d0ba925cb 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -25,7 +25,7 @@ tracing = { workspace = true } serde = { workspace = true } # proc macro stuff quote = "1.0.33" -syn = { version = "2.0.55", features = ["full", "extra-traits"] } +syn = { version = "2.0.57", features = ["full", "extra-traits"] } proc-macro2 = "1.0.79" derive_builder = "0.20.0" From d5a8fd83ccc1689a0853bd8e57c7d11c384d1c17 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 2 Apr 2024 12:04:53 -0400 Subject: [PATCH 0913/1393] Allow unordered test events (#2879) --- macros/src/lib.rs | 10 ++-- testing/src/predicates.rs | 96 +++++++++++++++++++++++++++++++-------- testing/src/script.rs | 81 ++++++++++++++++++++++----------- 3 files changed, 137 insertions(+), 50 deletions(-) diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 91e2dee0eb..b2d4d12cc7 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -106,13 +106,13 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { while let Ok(received_output) = test_receiver.try_recv() { tracing::debug!("Test received: {:?}", received_output); - let output_asserts = &#task_expectations[stage_number].output_asserts; + let output_asserts = &mut #task_expectations[stage_number].output_asserts; if #output_index_names >= output_asserts.len() { panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); }; - let assert = &output_asserts[#output_index_names]; + let assert = &mut output_asserts[#output_index_names]; validate_output_or_panic_in_script(stage_number, #script_names.to_string(), &received_output, assert); @@ -134,13 +134,13 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { while let Ok(Ok(received_output)) = async_timeout(Duration::from_millis(250), test_receiver.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); - let output_asserts = &#task_expectations[stage_number].output_asserts; + let output_asserts = &mut #task_expectations[stage_number].output_asserts; if #output_index_names >= output_asserts.len() { panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); }; - let assert = &output_asserts[#output_index_names]; + let mut assert = &mut output_asserts[#output_index_names]; validate_output_or_panic_in_script(stage_number, #script_names.to_string(), &received_output, assert); @@ -157,7 +157,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { panic_missing_output_in_script(stage_number, #script_names.to_string(), &output_asserts[#output_index_names]); } - let task_state_asserts = &#task_expectations[stage_number].task_state_asserts; + let task_state_asserts = &mut #task_expectations[stage_number].task_state_asserts; for assert in task_state_asserts { validate_task_state_or_panic_in_script(stage_number, #script_names.to_string(), #task_names.state(), assert); diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs index 67f42d7440..f43838af90 100644 --- a/testing/src/predicates.rs +++ b/testing/src/predicates.rs @@ -1,21 +1,63 @@ -use std::sync::Arc; - use hotshot_task_impls::{ consensus::{null_block, ConsensusTaskState}, events::HotShotEvent, events::HotShotEvent::*, }; use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; +use std::collections::HashSet; +use std::sync::Arc; use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +#[derive(Eq, PartialEq, Copy, Clone)] +pub enum PredicateResult { + Pass, + + Fail, + + Incomplete, +} + +impl From for PredicateResult { + fn from(boolean: bool) -> Self { + match boolean { + true => PredicateResult::Pass, + false => PredicateResult::Fail, + } + } +} + pub struct Predicate { - pub function: Box bool>, + pub function: Box PredicateResult>, pub info: String, } +pub fn all(events: Vec>) -> Predicate>> +where + TYPES: NodeType, +{ + let info = format!("{:?}", events); + let mut set: HashSet<_> = events.into_iter().collect(); + + let function = move |e: &Arc>| match set.take(e.as_ref()) { + Some(_) => { + if set.is_empty() { + PredicateResult::Pass + } else { + PredicateResult::Incomplete + } + } + None => PredicateResult::Fail, + }; + + Predicate { + function: Box::new(function), + info, + } +} + impl std::fmt::Debug for Predicate { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { write!(f, "{}", self.info) @@ -30,7 +72,7 @@ where let event = Arc::new(event); Predicate { - function: Box::new(move |e| e == &event), + function: Box::new(move |e| PredicateResult::from(e == &event)), info, } } @@ -47,7 +89,7 @@ where let event = Arc::new(event); let info = format!("{:?}", event); Predicate { - function: Box::new(move |e| e == &event), + function: Box::new(move |e| PredicateResult::from(e == &event)), info, } }) @@ -59,7 +101,8 @@ where TYPES: NodeType, { let info = "LeafDecided".to_string(); - let function = |e: &Arc>| matches!(e.as_ref(), LeafDecided(_)); + let function = + |e: &Arc>| PredicateResult::from(matches!(e.as_ref(), LeafDecided(_))); Predicate { function: Box::new(function), @@ -72,7 +115,9 @@ where TYPES: NodeType, { let info = "QuorumVoteSend".to_string(); - let function = |e: &Arc>| matches!(e.as_ref(), QuorumVoteSend(_)); + let function = |e: &Arc>| { + PredicateResult::from(matches!(e.as_ref(), QuorumVoteSend(_))) + }; Predicate { function: Box::new(function), @@ -85,7 +130,8 @@ where TYPES: NodeType, { let info = "ViewChange".to_string(); - let function = |e: &Arc>| matches!(e.as_ref(), ViewChange(_)); + let function = + |e: &Arc>| PredicateResult::from(matches!(e.as_ref(), ViewChange(_))); Predicate { function: Box::new(function), @@ -98,7 +144,9 @@ where TYPES: NodeType, { let info = "UpgradeCertificateFormed".to_string(); - let function = |e: &Arc>| matches!(e.as_ref(), UpgradeCertificateFormed(_)); + let function = |e: &Arc>| { + PredicateResult::from(matches!(e.as_ref(), UpgradeCertificateFormed(_))) + }; Predicate { function: Box::new(function), @@ -112,8 +160,10 @@ where { let info = "QuorumProposalSend with UpgradeCertificate attached".to_string(); let function = |e: &Arc>| match e.as_ref() { - QuorumProposalSend(proposal, _) => proposal.data.upgrade_certificate.is_some(), - _ => false, + QuorumProposalSend(proposal, _) => { + PredicateResult::from(proposal.data.upgrade_certificate.is_some()) + } + _ => PredicateResult::Fail, }; Predicate { @@ -127,7 +177,9 @@ where TYPES: NodeType, { let info = "QuorumProposalValidated".to_string(); - let function = |e: &Arc>| matches!(e.as_ref(), QuorumProposalValidated(_)); + let function = |e: &Arc>| { + PredicateResult::from(matches!(e.as_ref(), QuorumProposalValidated(_))) + }; Predicate { function: Box::new(function), @@ -140,7 +192,9 @@ where TYPES: NodeType, { let info = "QuorumProposalSend".to_string(); - let function = |e: &Arc>| matches!(e.as_ref(), QuorumProposalSend(_, _)); + let function = |e: &Arc>| { + PredicateResult::from(matches!(e.as_ref(), QuorumProposalSend(_, _))) + }; Predicate { function: Box::new(function), @@ -156,11 +210,11 @@ where { let info = "QuorumProposalSend with null block payload".to_string(); let function = move |e: &Arc>| match e.as_ref() { - QuorumProposalSend(proposal, _) => { + QuorumProposalSend(proposal, _) => PredicateResult::from( Some(proposal.data.block_header.payload_commitment()) - == null_block::commitment(num_storage_nodes) - } - _ => false, + == null_block::commitment(num_storage_nodes), + ), + _ => PredicateResult::Fail, }; Predicate { @@ -174,7 +228,9 @@ where TYPES: NodeType, { let info = "TimeoutVoteSend".to_string(); - let function = |e: &Arc>| matches!(e.as_ref(), TimeoutVoteSend(_)); + let function = |e: &Arc>| { + PredicateResult::from(matches!(e.as_ref(), TimeoutVoteSend(_))) + }; Predicate { function: Box::new(function), @@ -189,8 +245,10 @@ pub fn consensus_predicate( function: Box Fn(&'a ConsensusTaskTestState) -> bool>, info: &str, ) -> Predicate { + let wrapped_function = move |e: &ConsensusTaskTestState| PredicateResult::from(function(e)); + Predicate { - function, + function: Box::new(wrapped_function), info: info.to_string(), } } diff --git a/testing/src/script.rs b/testing/src/script.rs index af762650e3..a92e346d59 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -1,4 +1,4 @@ -use crate::predicates::Predicate; +use crate::predicates::{Predicate, PredicateResult}; use async_broadcast::broadcast; use hotshot_task_impls::events::HotShotEvent; @@ -42,26 +42,36 @@ where panic!("{}", output_missing_error); } -pub fn validate_task_state_or_panic(stage_number: usize, state: &S, assert: &Predicate) { +pub fn validate_task_state_or_panic(stage_number: usize, state: &S, assert: &mut Predicate) { assert!( - (assert.function)(state), + (assert.function)(state) == PredicateResult::Pass, "Stage {} | Task state failed to satisfy: {:?}", stage_number, assert ); } -pub fn validate_output_or_panic(stage_number: usize, output: &S, assert: &Predicate) +pub fn validate_output_or_panic( + stage_number: usize, + output: &S, + assert: &mut Predicate, +) -> PredicateResult where S: std::fmt::Debug, { - assert!( - (assert.function)(output), - "Stage {} | Output failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", - stage_number, - assert, - output - ); + let result = (assert.function)(output); + + match result { + PredicateResult::Pass => result, + PredicateResult::Incomplete => result, + PredicateResult::Fail => { + format!( + "Stage {} | Output failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", + stage_number, assert, output + ); + result + } + } } /// `run_test_script` reads a triple (inputs, outputs, asserts) in a `TestScript`, @@ -84,7 +94,7 @@ pub async fn run_test_script { let registry = Arc::new(TaskRegistry::default()); - let (to_task, from_test) = broadcast(1024); + let (to_task, mut from_test) = broadcast(1024); let (to_test, mut from_task) = broadcast(1024); let mut task = Task::new(to_test.clone(), from_test.clone(), registry.clone(), state); @@ -92,26 +102,37 @@ pub async fn run_test_script for (stage_number, stage) in script.iter_mut().enumerate() { tracing::debug!("Beginning test stage {}", stage_number); for input in &stage.inputs { + to_task + .broadcast(input.clone().into()) + .await + .expect("Failed to broadcast input message"); + if !task.state_mut().filter(&Arc::new(input.clone())) { tracing::debug!("Test sent: {:?}", input.clone()); - to_task - .broadcast(input.clone().into()) - .await - .expect("Failed to broadcast input message"); - if let Some(res) = S::handle_event(input.clone().into(), &mut task).await { task.state_mut().handle_result(&res).await; } } + + while from_test.try_recv().is_ok() {} } - for assert in &stage.outputs { - if let Ok(Ok(received_output)) = + for assert in &mut stage.outputs { + let mut result = PredicateResult::Incomplete; + + while let Ok(Ok(received_output)) = async_timeout(RECV_TIMEOUT_SEC, from_task.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); - validate_output_or_panic(stage_number, &received_output, assert); + + result = validate_output_or_panic(stage_number, &received_output, assert); + + to_task + .broadcast(received_output.clone()) + .await + .expect("Failed to re-broadcast output message"); + if !task.state_mut().filter(&received_output.clone()) { tracing::debug!("Test sent: {:?}", received_output.clone()); @@ -119,12 +140,20 @@ pub async fn run_test_script task.state_mut().handle_result(&res).await; } } - } else { + + while from_test.try_recv().is_ok() {} + + if result == PredicateResult::Pass { + break; + } + } + + if result == PredicateResult::Incomplete { panic_missing_output(stage_number, assert); } } - for assert in &stage.asserts { + for assert in &mut stage.asserts { validate_task_state_or_panic(stage_number, task.state(), assert); } @@ -172,10 +201,10 @@ pub fn validate_task_state_or_panic_in_script( stage_number: usize, script_name: String, state: &S, - assert: &Predicate, + assert: &mut Predicate, ) { assert!( - (assert.function)(state), + (assert.function)(state) == PredicateResult::Pass, "Stage {} | Task state in {} failed to satisfy: {:?}", stage_number, script_name, @@ -187,10 +216,10 @@ pub fn validate_output_or_panic_in_script( stage_number: usize, script_name: String, output: &S, - assert: &Predicate, + assert: &mut Predicate, ) { assert!( - (assert.function)(output), + (assert.function)(output) == PredicateResult::Pass, "Stage {} | Output in {} failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", stage_number, script_name, From 1efb80ec4129ecd8a961adb479457585db4713d5 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 2 Apr 2024 12:20:26 -0400 Subject: [PATCH 0914/1393] deps: remove nonbreaking pins (#2880) --- builder-api/Cargo.toml | 2 +- examples/Cargo.toml | 16 ++++++++-------- hotshot-qc/Cargo.toml | 6 +++--- hotshot-stake-table/Cargo.toml | 6 +++--- hotshot/Cargo.toml | 14 +++++++------- libp2p-networking/Cargo.toml | 6 +++--- macros/Cargo.toml | 6 +++--- orchestrator/Cargo.toml | 10 +++++----- task/Cargo.toml | 4 ++-- testing-macros/Cargo.toml | 8 ++++---- types/Cargo.toml | 2 +- web_server/Cargo.toml | 2 +- 12 files changed, 41 insertions(+), 41 deletions(-) diff --git a/builder-api/Cargo.toml b/builder-api/Cargo.toml index 9fc9147a33..868229ab77 100644 --- a/builder-api/Cargo.toml +++ b/builder-api/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] async-trait = { workspace = true } -clap = { version = "4.5", features = ["derive", "env"] } +clap.workspace = true derive_more = "0.99" futures = "0.3" hotshot-types = { path = "../types" } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index a371e0f458..0a477cbc90 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -105,11 +105,11 @@ async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } -bimap = "0.6.3" -clap = { version = "4.5", features = ["derive", "env"], optional = true } +bimap = "0.6" +clap = { workspace = true, optional = true } commit = { workspace = true } custom_debug = { workspace = true } -dashmap = "5.5.1" +dashmap = "5" either = { workspace = true } futures = { workspace = true } hotshot-web-server = { version = "0.5.26", path = "../web_server", default-features = false } @@ -123,9 +123,9 @@ serde = { workspace = true, features = ["rc"] } snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } -derive_more = "0.99.17" -portpicker = "0.1.1" -lru = "0.12.3" +derive_more = "0.99" +portpicker = "0.1" +lru = "0.12" hotshot-task = { path = "../task" } hotshot = { path = "../hotshot" } hotshot-example-types = { path = "../example-types" } @@ -156,11 +156,11 @@ cdn-broker = { workspace = true, features = [ cdn-marshal = { workspace = true, features = ["runtime-async-std"] } [dev-dependencies] -clap = { version = "4.5", features = ["derive", "env"] } +clap.workspace = true toml = { workspace = true } blake3 = { workspace = true } anyhow.workspace = true -tracing-subscriber = "0.3.18" +tracing-subscriber = "0.3" [lints] workspace = true diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index f5bd2cdd73..094bd3e9d6 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -7,10 +7,10 @@ edition = { workspace = true } rust-version = { workspace = true } [dependencies] -ark-bls12-377 = "0.4.0" -ark-bn254 = "0.4.0" +ark-bls12-377 = "0.4" +ark-bn254 = "0.4" ark-ec = { workspace = true } -ark-ff = "0.4.0" +ark-ff = "0.4" ark-std = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 601d6c1f63..40d7507c77 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -7,9 +7,9 @@ edition = { workspace = true } rust-version = { workspace = true } [dependencies] -ark-bn254 = "0.4.0" -ark-ed-on-bn254 = "0.4.0" -ark-ff = "0.4.0" +ark-bn254 = "0.4" +ark-ed-on-bn254 = "0.4" +ark-ff = "0.4" ark-serialize = { workspace = true } ark-std = { workspace = true } digest = { workspace = true } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 4c8178a7f8..81136726ec 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -26,13 +26,13 @@ async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } -bimap = "0.6.3" +bimap = "0.6" bincode = { workspace = true } -clap = { version = "4.5", features = ["derive", "env"], optional = true } +clap = { workspace = true, optional = true } commit = { workspace = true } custom_debug = { workspace = true } -dashmap = "5.5.1" -derive_more = "0.99.17" +dashmap = "5" +derive_more = "0.99" either = { workspace = true } ethereum-types = { workspace = true } futures = { workspace = true } @@ -42,8 +42,8 @@ hotshot-types = { path = "../types" } hotshot-web-server = { version = "0.5.26", path = "../web_server", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } -lru = "0.12.3" -portpicker = "0.1.1" +lru = "0.12" +portpicker = "0.1" rand = { workspace = true } serde = { workspace = true, features = ["rc"] } snafu = { workspace = true } @@ -76,7 +76,7 @@ cdn-marshal = { workspace = true, features = ["runtime-async-std"] } [dev-dependencies] blake3 = { workspace = true } -clap = { version = "4.5", features = ["derive", "env"] } +clap.workspace = true toml = { workspace = true } [lints] diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index ae5ada4a94..8685999874 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -18,7 +18,7 @@ async-lock = { workspace = true } async-trait = { workspace = true } blake3 = { workspace = true } custom_debug = { workspace = true } -derive_builder = "0.20.0" +derive_builder = "0.20" either = { workspace = true } futures = { workspace = true } hotshot-types = { path = "../types" } @@ -34,13 +34,13 @@ tide = { version = "0.16", optional = true, default-features = false, features = ] } tracing = { workspace = true } versioned-binary-serialization = { workspace = true } -void = "1.0.2" +void = "1" lazy_static = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] libp2p = { workspace = true, features = ["tokio"] } tokio = { workspace = true } -tokio-stream = "0.1.14" +tokio-stream = "0.1" [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] libp2p = { workspace = true, features = ["async-std"] } async-std = { workspace = true } diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 02c845457d..40604461e1 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -6,9 +6,9 @@ description = "Macros for hotshot tests" [dependencies] # proc macro stuff -quote = "1.0.33" -syn = { version = "2.0.57", features = ["full", "extra-traits"] } -proc-macro2 = "1.0.79" +quote = "1" +syn = { version = "2", features = ["full", "extra-traits"] } +proc-macro2 = "1" [lib] proc-macro = true diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index b08a60c312..ff9d45125e 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } [dependencies] async-compatibility-layer = { workspace = true } async-lock = { workspace = true } -clap = { version = "4.0", features = ["derive", "env"], optional = false } +clap.workspace = true futures = { workspace = true } libp2p = { workspace = true } blake3 = { workspace = true } @@ -17,11 +17,11 @@ tracing = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } toml = { workspace = true } -thiserror = "1.0.50" -serde-inline-default = "0.1.1" -csv = "1.3.0" +thiserror = "1" +serde-inline-default = "0.1" +csv = "1" versioned-binary-serialization = { workspace = true } -multiaddr = "0.18.1" +multiaddr = "0.18" anyhow.workspace = true bincode.workspace = true diff --git a/task/Cargo.toml b/task/Cargo.toml index 869fe29760..7e4dadd3a4 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -8,8 +8,8 @@ edition = { workspace = true } [dependencies] -futures = "0.3.30" -async-broadcast = "0.7.0" +futures = "0.3" +async-broadcast = "0.7" tracing = { workspace = true } async-compatibility-layer = { workspace = true } diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml index 5d0ba925cb..282c8feb67 100644 --- a/testing-macros/Cargo.toml +++ b/testing-macros/Cargo.toml @@ -24,10 +24,10 @@ snafu = { workspace = true } tracing = { workspace = true } serde = { workspace = true } # proc macro stuff -quote = "1.0.33" -syn = { version = "2.0.57", features = ["full", "extra-traits"] } -proc-macro2 = "1.0.79" -derive_builder = "0.20.0" +quote = "1" +syn = { version = "2", features = ["full", "extra-traits"] } +proc-macro2 = "1" +derive_builder = "0.20" [dev-dependencies] async-lock = { workspace = true } diff --git a/types/Cargo.toml b/types/Cargo.toml index 720932342b..8282ef1e2a 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -41,7 +41,7 @@ snafu = { workspace = true } time = { workspace = true } tracing = { workspace = true } typenum = { workspace = true } -derivative = "2.2.0" +derivative = "2" jf-primitives = { workspace = true } jf-plonk = { workspace = true } jf-utils = { workspace = true } diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index 3a641c3a91..fa1bf01e36 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -8,7 +8,7 @@ edition = { workspace = true } [dependencies] async-compatibility-layer = { workspace = true } async-lock = { workspace = true } -clap = { version = "4.0", features = ["derive", "env"], optional = false } +clap.workspace = true futures = { workspace = true } hotshot-types = { path = "../types" } tide-disco = { workspace = true } From d570281e63377e50b242a97867a5c6ea9e196bb9 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 2 Apr 2024 14:55:32 -0400 Subject: [PATCH 0915/1393] Example upgrade (#2792) --- hotshot/Cargo.toml | 1 + hotshot/src/lib.rs | 20 +- hotshot/src/tasks/mod.rs | 14 + hotshot/src/tasks/task_state.rs | 7 +- .../src/traits/networking/combined_network.rs | 9 +- .../traits/networking/web_server_network.rs | 93 ++++- orchestrator/run-config.toml | 2 +- task-impls/Cargo.toml | 8 +- task-impls/src/consensus.rs | 42 +- task-impls/src/events.rs | 6 +- task-impls/src/network.rs | 380 ++++++++++-------- task-impls/src/request.rs | 9 +- task-impls/src/response.rs | 9 +- task-impls/src/upgrade.rs | 98 ++++- testing/tests/consensus_task.rs | 2 +- testing/tests/network_task.rs | 3 + testing/tests/proposal_ordering.rs | 2 +- testing/tests/unit/message.rs | 5 +- types/Cargo.toml | 6 +- types/src/constants.rs | 3 + types/src/message.rs | 33 +- types/src/traits/network.rs | 12 +- types/src/vote.rs | 9 +- web_server/api.toml | 36 ++ web_server/src/config.rs | 24 +- web_server/src/lib.rs | 151 +++++++ 26 files changed, 712 insertions(+), 272 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 81136726ec..a70c2ff16f 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -9,6 +9,7 @@ rust-version = { workspace = true } [features] default = ["docs", "doc-images"] +example-upgrade = ["hotshot-task-impls/example-upgrade"] gpu-vid = ["hotshot-task-impls/gpu-vid"] # Features required for binaries diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index afb440be43..4724bcc648 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -29,7 +29,8 @@ use futures::join; use hotshot_task_impls::events::HotShotEvent; use hotshot_task_impls::helpers::broadcast_event; use hotshot_task_impls::network; -use hotshot_types::constants::{EVENT_CHANNEL_SIZE, STATIC_VER_0_1}; +use hotshot_types::constants::{BASE_VERSION, EVENT_CHANNEL_SIZE, STATIC_VER_0_1}; +use versioned_binary_serialization::version::Version; use hotshot_task::task::TaskRegistry; use hotshot_types::{ @@ -135,6 +136,9 @@ pub struct SystemContext> { /// The hotstuff implementation consensus: Arc>>, + /// The network version + version: Arc>, + // global_registry: GlobalRegistry, /// Access to the output event stream. pub output_event_stream: (Sender>, InactiveReceiver>), @@ -239,6 +243,7 @@ impl> SystemContext { metrics: consensus_metrics.clone(), }; let consensus = Arc::new(RwLock::new(consensus)); + let version = Arc::new(RwLock::new(BASE_VERSION)); let (internal_tx, internal_rx) = broadcast(EVENT_CHANNEL_SIZE); let (mut external_tx, external_rx) = broadcast(EVENT_CHANNEL_SIZE); @@ -253,6 +258,7 @@ impl> SystemContext { public_key, private_key, config, + version, networks: Arc::new(networks), memberships: Arc::new(memberships), _metrics: consensus_metrics.clone(), @@ -503,11 +509,21 @@ impl> SystemContext { event_tx.clone(), event_rx.activate_cloned(), quorum_network.clone(), - quorum_membership, + quorum_membership.clone(), network::quorum_filter, handle.get_storage().clone(), ) .await; + add_network_event_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), + quorum_network.clone(), + quorum_membership, + network::upgrade_filter, + handle.get_storage().clone(), + ) + .await; add_network_event_task( registry.clone(), event_tx.clone(), diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 0e7d3b0991..5140f44529 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -25,6 +25,7 @@ use hotshot_task_impls::{ vid::VIDTaskState, view_sync::ViewSyncTaskState, }; +use hotshot_types::constants::VERSION_0_1; use hotshot_types::{ constants::Version01, message::Message, @@ -133,6 +134,7 @@ pub async fn add_network_event_task< let network_state: NetworkEventTaskState<_, _, _> = NetworkEventTaskState { channel, view: TYPES::Time::genesis(), + version: VERSION_0_1, membership, filter, storage, @@ -155,6 +157,18 @@ pub async fn inject_consensus_polls< .inject_consensus_info(ConsensusIntentEvent::PollForLatestProposal) .await; + // Poll (forever) for upgrade proposals + consensus_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForUpgradeProposal(0)) + .await; + + // Poll (forever) for upgrade votes + consensus_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForUpgradeVotes(0)) + .await; + // See if we're in the DA committee // This will not work for epochs (because dynamic subscription // With the Push CDN, we are _always_ polling for latest anyway. diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 43f2ea3fd1..d0f9cc7754 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -7,7 +7,6 @@ use hotshot_task_impls::{ transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VIDTaskState, view_sync::ViewSyncTaskState, }; -use hotshot_types::constants::VERSION_0_1; use hotshot_types::traits::{ consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -64,7 +63,10 @@ impl> CreateTaskState cur_view: handle.get_cur_view().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_network: handle.hotshot.networks.quorum_network.clone(), + #[cfg(not(feature = "example-upgrade"))] should_vote: |_upgrade_proposal| false, + #[cfg(feature = "example-upgrade")] + should_vote: |_upgrade_proposal| true, vote_collector: None.into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -184,6 +186,7 @@ impl> CreateTaskState ConsensusTaskState { consensus, timeout: handle.hotshot.config.next_view_timeout, + round_start_delay: handle.hotshot.config.round_start_delay, cur_view: handle.get_cur_view().await, payload_commitment_and_metadata: None, api: handle.clone(), @@ -194,7 +197,7 @@ impl> CreateTaskState upgrade_cert: None, proposal_cert: None, decided_upgrade_cert: None, - current_network_version: VERSION_0_1, + version: handle.hotshot.version.clone(), output_event_stream: handle.hotshot.output_event_stream.0.clone(), current_proposal: None, id: handle.hotshot.id, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index bcd928cbfa..8580cf356b 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -29,7 +29,7 @@ use hotshot_types::traits::network::{ use hotshot_types::{ boxed_sync, data::ViewNumber, - message::Message, + message::{Message, SequencingMessage}, traits::{ network::{ConnectedNetwork, ConsensusIntentEvent, ResponseChannel, ResponseMessage}, node_implementation::NodeType, @@ -43,7 +43,6 @@ use std::{collections::hash_map::DefaultHasher, sync::Arc}; use async_compatibility_layer::art::{async_sleep, async_spawn}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use either::Either; use futures::future::join_all; use hotshot_task_impls::helpers::cancel_task; use hotshot_types::message::{GeneralConsensusMessage, MessageKind}; @@ -129,11 +128,11 @@ impl CombinedNetworks { /// a helper function returning a bool whether a given message is of delayable type fn should_delay(message: &Message) -> bool { match &message.kind { - MessageKind::Consensus(consensus_message) => match &consensus_message.0 { - Either::Left(general_consensus_message) => { + MessageKind::Consensus(consensus_message) => match &consensus_message { + SequencingMessage::General(general_consensus_message) => { matches!(general_consensus_message, GeneralConsensusMessage::Vote(_)) } - Either::Right(_) => true, + SequencingMessage::Committee(_) => true, }, MessageKind::Data(_) => false, } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 31c698f27e..6384a0cdb9 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -88,8 +88,11 @@ impl WebServerNetwork { #[allow(clippy::type_complexity)] /// A handle on the task polling for latest quorum propsal latest_proposal_task: Arc>>>, + /// A handle on the task polling for an upgrade propsal + upgrade_proposal_task: Arc>>>, + /// A handle on the task polling for an upgrade vote + upgrade_vote_task: Arc>>>, #[allow(clippy::type_complexity)] /// A handle on the task polling for the latest view sync certificate latest_view_sync_certificate_task: Arc>>>, @@ -255,6 +262,7 @@ impl Inner, seen_view_sync_certificates: &mut LruCache, ) -> bool { @@ -303,6 +311,13 @@ impl Inner { + let vote = deserialized_message.clone(); + *upgrade_vote_index += 1; + poll_queue.write().await.push(vote); + + return false; + } MessagePurpose::DAC => { debug!( "Received DAC from web server for view {} {}", @@ -333,7 +348,7 @@ impl Inner { + MessagePurpose::UpgradeProposal => { poll_queue.write().await.push(deserialized_message.clone()); return true; @@ -355,6 +370,7 @@ impl Inner Result<(), NetworkError> { let mut vote_index = 0; let mut tx_index = 0; + let mut upgrade_vote_index = 0; let mut seen_proposals = LruCache::new(NonZeroUsize::new(100).unwrap()); let mut seen_view_sync_certificates = LruCache::new(NonZeroUsize::new(100).unwrap()); @@ -383,7 +399,10 @@ impl Inner config::get_da_certificate_route(view_number), MessagePurpose::VidDisperse => config::get_vid_disperse_route(view_number), // like `Proposal` - MessagePurpose::Upgrade => config::get_upgrade_route(view_number), + MessagePurpose::UpgradeProposal => config::get_upgrade_proposal_route(0), + MessagePurpose::UpgradeVote => { + config::get_upgrade_vote_route(0, upgrade_vote_index) + } }; if let MessagePurpose::Data = message_purpose { @@ -437,6 +456,7 @@ impl Inner view_sync_vote_task_map: Arc::default(), txn_task_map: Arc::default(), latest_proposal_task: Arc::default(), + upgrade_proposal_task: Arc::default(), + upgrade_vote_task: Arc::default(), latest_view_sync_certificate_task: Arc::default(), }); @@ -617,7 +639,8 @@ impl MessagePurpose::ViewSyncVote => config::post_view_sync_vote_route(*view_number), MessagePurpose::DAC => config::post_da_certificate_route(*view_number), MessagePurpose::VidDisperse => config::post_vid_disperse_route(*view_number), - MessagePurpose::Upgrade => config::post_upgrade_route(*view_number), + MessagePurpose::UpgradeProposal => config::post_upgrade_proposal_route(0), + MessagePurpose::UpgradeVote => config::post_upgrade_vote_route(0), }; let network_msg: SendMsg> = SendMsg { @@ -930,6 +953,66 @@ impl }); } } + ConsensusIntentEvent::PollForUpgradeProposal(view_number) => { + // Only start this task if we haven't already started it. + let mut cancel_handle = self.inner.upgrade_proposal_task.write().await; + if cancel_handle.is_none() { + error!("Starting poll for upgrade proposals!"); + let inner = self.inner.clone(); + + // Create sender and receiver for cancelling the task + let (sender, receiver) = unbounded(); + *cancel_handle = Some(sender); + + // Create the new task + async_spawn(async move { + if let Err(e) = inner + .poll_web_server( + receiver, + MessagePurpose::UpgradeProposal, + view_number, + Duration::from_millis(500), + ) + .await + { + warn!( + "Background receive latest upgrade proposal polling encountered an error: {:?}", + e + ); + } + }); + } + } + ConsensusIntentEvent::PollForUpgradeVotes(view_number) => { + // Only start this task if we haven't already started it. + let mut cancel_handle = self.inner.upgrade_vote_task.write().await; + if cancel_handle.is_none() { + error!("Starting poll for upgrade proposals!"); + let inner = self.inner.clone(); + + // Create sender and receiver for cancelling the task + let (sender, receiver) = unbounded(); + *cancel_handle = Some(sender); + + // Create the new task + async_spawn(async move { + if let Err(e) = inner + .poll_web_server( + receiver, + MessagePurpose::UpgradeVote, + view_number, + Duration::from_millis(500), + ) + .await + { + warn!( + "Background receive latest upgrade proposal polling encountered an error: {:?}", + e + ); + } + }); + } + } ConsensusIntentEvent::PollForLatestViewSyncCertificate => { // Only start this task if we haven't already started it. let mut cancel_handle = self.inner.latest_view_sync_certificate_task.write().await; diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 77884d58a5..a1700cdff5 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -1,4 +1,4 @@ -rounds = 10 +rounds = 100 transactions_per_round = 10 transaction_size = 1000 node_index = 0 diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 95fa746578..9596195cf3 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -5,6 +5,11 @@ edition = { workspace = true } name = "hotshot-task-impls" version = { workspace = true } +[features] +example-upgrade = [] +gpu-vid = ["hotshot-types/gpu-vid"] + + [dependencies] async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } @@ -31,9 +36,6 @@ time = { workspace = true } tracing = { workspace = true } versioned-binary-serialization = { workspace = true } -[features] -gpu-vid = ["hotshot-types/gpu-vid"] - [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index be38867519..8da6d84734 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -35,7 +35,6 @@ use hotshot_types::{ vote::{Certificate, HasViewNumber}, }; use hotshot_types::{constants::LOOK_AHEAD, data::ViewChangeEvidence}; -use tracing::warn; use versioned_binary_serialization::version::Version; use crate::vote_collection::HandleVoteEvent; @@ -48,7 +47,7 @@ use std::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument}; +use tracing::{debug, error, info, instrument, warn}; /// Alias for the block payload commitment and the associated metadata. pub struct CommitmentAndMetadata { @@ -200,6 +199,8 @@ pub struct ConsensusTaskState< pub consensus: Arc>>, /// View timeout from config. pub timeout: u64, + /// Round start delay from config, in milliseconds. + pub round_start_delay: u64, /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -247,9 +248,8 @@ pub struct ConsensusTaskState< /// most recent decided upgrade certificate pub decided_upgrade_cert: Option>, - /// The current version of the network. - /// Updated on view change based on the most recent decided upgrade certificate. - pub current_network_version: Version, + /// Globally shared reference to the current network version. + pub version: Arc>, /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -1102,6 +1102,7 @@ impl, A: ConsensusApi + // Update our current upgrade_cert as long as it's still relevant. if cert.view_number >= self.cur_view { + debug!("Updating current upgrade_cert"); self.upgrade_cert = Some(cert.clone()); } } @@ -1192,23 +1193,36 @@ impl, A: ConsensusApi + )) .await; - // update the view in state to the one in the message - // Publish a view change event to the application - if !self.update_view(new_view, &event_stream).await { - debug!("view not updated"); - return; - } - // If we have a decided upgrade certificate, // we may need to upgrade the protocol version on a view change. if let Some(ref cert) = self.decided_upgrade_cert { if new_view >= cert.data.new_version_first_block { - self.current_network_version = cert.data.new_version; + warn!( + "Updating version based on a decided upgrade cert: {:?}", + cert + ); + let mut version = self.version.write().await; + *version = cert.data.new_version; + + broadcast_event( + Arc::new(HotShotEvent::VersionUpgrade(cert.data.new_version)), + &event_stream, + ) + .await; + // Discard the old upgrade certificate, which is no longer relevant. self.decided_upgrade_cert = None; } } + // update the view in state to the one in the message + // Publish a view change event to the application + // Returns if the view does not need updating. + if !self.update_view(new_view, &event_stream).await { + debug!("view not updated"); + return; + } + broadcast_event( Event { view_number: old_view_number, @@ -1498,6 +1512,7 @@ impl, A: ConsensusApi + .as_ref() .is_some_and(|cert| cert.view_number == view) { + debug!("Attaching upgrade certificate to proposal."); // If the cert view number matches, set upgrade_cert to self.upgrade_cert // and set self.upgrade_cert to None. // @@ -1542,6 +1557,7 @@ impl, A: ConsensusApi + }; debug!("Sending proposal for view {:?}", view); + async_sleep(Duration::from_millis(self.round_start_delay)).await; broadcast_event( Arc::new(HotShotEvent::QuorumProposalSend( message.clone(), diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 31d6475b06..b651131f86 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -16,6 +16,7 @@ use hotshot_types::{ traits::{node_implementation::NodeType, BlockPayload}, vid::VidCommitment, }; +use versioned_binary_serialization::version::Version; /// Marker that the task completed #[derive(Eq, Hash, PartialEq, Debug, Clone)] @@ -132,14 +133,15 @@ pub enum HotShotEvent { /// Upgrade proposal has been received from the network UpgradeProposalRecv(Proposal>, TYPES::SignatureKey), /// Upgrade proposal has been sent to the network - UpgradeProposalSend(UpgradeProposal), + UpgradeProposalSend(Proposal>, TYPES::SignatureKey), /// Upgrade vote has been received from the network UpgradeVoteRecv(UpgradeVote), /// Upgrade vote has been sent to the network UpgradeVoteSend(UpgradeVote), /// Upgrade certificate has been sent to the network UpgradeCertificateFormed(UpgradeCertificate), - + /// HotShot was upgraded, with a new network version. + VersionUpgrade(Version), /** Quorum Proposal Task **/ /// Dummy quorum proposal to test if the quorum proposal dependency task works. DummyQuorumProposalSend(TYPES::Time), diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index ade3b1297f..857e149865 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -5,13 +5,12 @@ use crate::{ use async_broadcast::Sender; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -use either::Either::{self, Left, Right}; use hotshot_types::{event::HotShotAction, traits::storage::Storage}; use std::collections::HashMap; use std::sync::Arc; use hotshot_task::task::{Task, TaskState}; -use hotshot_types::constants::STATIC_VER_0_1; +use hotshot_types::constants::{BASE_VERSION, STATIC_VER_0_1}; use hotshot_types::{ data::{VidDisperse, VidDisperseShare}, message::{ @@ -26,7 +25,8 @@ use hotshot_types::{ vote::{HasViewNumber, Vote}, }; use tracing::instrument; -use tracing::{error, warn}; +use tracing::{debug, error, warn}; +use versioned_binary_serialization::version::Version; /// quorum filter pub fn quorum_filter(event: &Arc>) -> bool { @@ -34,30 +34,30 @@ pub fn quorum_filter(event: &Arc>) -> bool event.as_ref(), HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::QuorumVoteSend(_) - | HotShotEvent::Shutdown | HotShotEvent::DACSend(_, _) - | HotShotEvent::ViewChange(_) | HotShotEvent::TimeoutVoteSend(_) ) } +/// upgrade filter +pub fn upgrade_filter(event: &Arc>) -> bool { + !matches!( + event.as_ref(), + HotShotEvent::UpgradeProposalSend(_, _) | HotShotEvent::UpgradeVoteSend(_) + ) +} + /// committee filter pub fn committee_filter(event: &Arc>) -> bool { !matches!( event.as_ref(), - HotShotEvent::DAProposalSend(_, _) - | HotShotEvent::DAVoteSend(_) - | HotShotEvent::Shutdown - | HotShotEvent::ViewChange(_) + HotShotEvent::DAProposalSend(_, _) | HotShotEvent::DAVoteSend(_) ) } /// vid filter pub fn vid_filter(event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::Shutdown | HotShotEvent::VidDisperseSend(_, _) | HotShotEvent::ViewChange(_) - ) + !matches!(event.as_ref(), HotShotEvent::VidDisperseSend(_, _)) } /// view sync filter @@ -70,8 +70,6 @@ pub fn view_sync_filter(event: &Arc>) -> bo | HotShotEvent::ViewSyncPreCommitVoteSend(_) | HotShotEvent::ViewSyncCommitVoteSend(_) | HotShotEvent::ViewSyncFinalizeVoteSend(_) - | HotShotEvent::Shutdown - | HotShotEvent::ViewChange(_) ) } /// the network message task state @@ -111,8 +109,8 @@ impl NetworkMessageTaskState { let sender = message.sender; match message.kind { MessageKind::Consensus(consensus_message) => { - let event = match consensus_message.0 { - Either::Left(general_message) => match general_message { + let event = match consensus_message { + SequencingMessage::General(general_message) => match general_message { GeneralConsensusMessage::Proposal(proposal) => { HotShotEvent::QuorumProposalRecv(proposal, sender) } @@ -147,23 +145,26 @@ impl NetworkMessageTaskState { HotShotEvent::UpgradeProposalRecv(message, sender) } GeneralConsensusMessage::UpgradeVote(message) => { + error!("Received upgrade vote!"); HotShotEvent::UpgradeVoteRecv(message) } }, - Either::Right(committee_message) => match committee_message { - CommitteeConsensusMessage::DAProposal(proposal) => { - HotShotEvent::DAProposalRecv(proposal, sender) + SequencingMessage::Committee(committee_message) => { + match committee_message { + CommitteeConsensusMessage::DAProposal(proposal) => { + HotShotEvent::DAProposalRecv(proposal, sender) + } + CommitteeConsensusMessage::DAVote(vote) => { + HotShotEvent::DAVoteRecv(vote.clone()) + } + CommitteeConsensusMessage::DACertificate(cert) => { + HotShotEvent::DACRecv(cert) + } + CommitteeConsensusMessage::VidDisperseMsg(proposal) => { + HotShotEvent::VidDisperseRecv(proposal) + } } - CommitteeConsensusMessage::DAVote(vote) => { - HotShotEvent::DAVoteRecv(vote.clone()) - } - CommitteeConsensusMessage::DACertificate(cert) => { - HotShotEvent::DACRecv(cert) - } - CommitteeConsensusMessage::VidDisperseMsg(proposal) => { - HotShotEvent::VidDisperseRecv(proposal) - } - }, + } }; // TODO (Keyao benchmarking) Update these event variants (similar to the // `TransactionsRecv` event) so we can send one event for a vector of messages. @@ -171,7 +172,7 @@ impl NetworkMessageTaskState { broadcast_event(Arc::new(event), &self.event_stream).await; } MessageKind::Data(message) => match message { - hotshot_types::message::DataMessage::SubmitTransaction(transaction, _) => { + DataMessage::SubmitTransaction(transaction, _) => { transactions.push(transaction); } DataMessage::DataResponse(_) | DataMessage::RequestData(_) => { @@ -200,6 +201,8 @@ pub struct NetworkEventTaskState< pub channel: Arc, /// view number pub view: TYPES::Time, + /// version + pub version: Version, /// membership for the channel pub membership: TYPES::Membership, // TODO ED Need to add exchange so we can get the recipient key and our own key? @@ -237,6 +240,12 @@ impl< fn filter(&self, event: &Self::Event) -> bool { (self.filter)(event) + && !matches!( + event.as_ref(), + HotShotEvent::VersionUpgrade(_) + | HotShotEvent::ViewChange(_) + | HotShotEvent::Shutdown + ) } } @@ -249,8 +258,6 @@ impl< /// Handle the given event. /// /// Returns the completion status. - /// # Panics - /// Panic sif a direct message event is received with no recipient #[allow(clippy::too_many_lines)] // TODO https://github.com/EspressoSystems/HotShot/issues/1704 #[instrument(skip_all, fields(view = *self.view), name = "Network Task", level = "error")] pub async fn handle_event( @@ -259,139 +266,155 @@ impl< membership: &TYPES::Membership, ) -> Option { let mut maybe_action = None; - let (sender, message_kind, transmit_type, recipient) = match event.as_ref().clone() { - HotShotEvent::QuorumProposalSend(proposal, sender) => { - maybe_action = Some(HotShotAction::Propose); - ( - sender, - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::Proposal(proposal), - ))), - TransmitType::Broadcast, - None, - ) - } + let (sender, message_kind, transmit): (_, _, TransmitType) = + match event.as_ref().clone() { + HotShotEvent::QuorumProposalSend(proposal, sender) => { + maybe_action = Some(HotShotAction::Propose); + ( + sender, + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::Proposal(proposal), + )), + TransmitType::Broadcast, + ) + } - // ED Each network task is subscribed to all these message types. Need filters per network task - HotShotEvent::QuorumVoteSend(vote) => { - maybe_action = Some(HotShotAction::Vote); - ( + // ED Each network task is subscribed to all these message types. Need filters per network task + HotShotEvent::QuorumVoteSend(vote) => { + maybe_action = Some(HotShotAction::Vote); + ( + vote.get_signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::Vote(vote.clone()), + )), + TransmitType::Direct(membership.get_leader(vote.get_view_number() + 1)), + ) + } + HotShotEvent::VidDisperseSend(proposal, sender) => { + return self.handle_vid_disperse_proposal(proposal, &sender); + } + HotShotEvent::DAProposalSend(proposal, sender) => { + maybe_action = Some(HotShotAction::DAPropose); + ( + sender, + MessageKind::::from_consensus_message(SequencingMessage::Committee( + CommitteeConsensusMessage::DAProposal(proposal), + )), + TransmitType::DACommitteeBroadcast, + ) + } + HotShotEvent::DAVoteSend(vote) => { + maybe_action = Some(HotShotAction::DAVote); + ( + vote.get_signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::Committee( + CommitteeConsensusMessage::DAVote(vote.clone()), + )), + TransmitType::Direct(membership.get_leader(vote.get_view_number())), + ) + } + // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee + HotShotEvent::DACSend(certificate, sender) => { + maybe_action = Some(HotShotAction::DACert); + ( + sender, + MessageKind::::from_consensus_message(SequencingMessage::Committee( + CommitteeConsensusMessage::DACertificate(certificate), + )), + TransmitType::Broadcast, + ) + } + HotShotEvent::ViewSyncPreCommitVoteSend(vote) => ( vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::Vote(vote.clone()), - ))), - TransmitType::Direct, - Some(membership.get_leader(vote.get_view_number() + 1)), - ) - } - HotShotEvent::VidDisperseSend(proposal, sender) => { - return self.handle_vid_disperse_proposal(proposal, &sender); - } - HotShotEvent::DAProposalSend(proposal, sender) => { - maybe_action = Some(HotShotAction::DAPropose); - ( + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), + )), + TransmitType::Direct( + membership.get_leader(vote.get_view_number() + vote.get_data().relay), + ), + ), + HotShotEvent::ViewSyncCommitVoteSend(vote) => ( + vote.get_signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), + )), + TransmitType::Direct( + membership.get_leader(vote.get_view_number() + vote.get_data().relay), + ), + ), + HotShotEvent::ViewSyncFinalizeVoteSend(vote) => ( + vote.get_signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), + )), + TransmitType::Direct( + membership.get_leader(vote.get_view_number() + vote.get_data().relay), + ), + ), + HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => ( sender, - MessageKind::::from_consensus_message(SequencingMessage(Right( - CommitteeConsensusMessage::DAProposal(proposal), - ))), - TransmitType::DACommitteeBroadcast, - None, - ) - } - HotShotEvent::DAVoteSend(vote) => { - maybe_action = Some(HotShotAction::DAVote); - ( + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate), + )), + TransmitType::Broadcast, + ), + HotShotEvent::ViewSyncCommitCertificate2Send(certificate, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncCommitCertificate(certificate), + )), + TransmitType::Broadcast, + ), + HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, sender) => ( + sender, + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate), + )), + TransmitType::Broadcast, + ), + HotShotEvent::TimeoutVoteSend(vote) => ( vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Right( - CommitteeConsensusMessage::DAVote(vote.clone()), - ))), - TransmitType::Direct, - Some(membership.get_leader(vote.get_view_number())), - ) - } - // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee - HotShotEvent::DACSend(certificate, sender) => { - maybe_action = Some(HotShotAction::DACert); - ( + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::TimeoutVote(vote.clone()), + )), + TransmitType::Direct(membership.get_leader(vote.get_view_number() + 1)), + ), + HotShotEvent::UpgradeProposalSend(proposal, sender) => ( sender, - MessageKind::::from_consensus_message(SequencingMessage(Right( - CommitteeConsensusMessage::DACertificate(certificate), - ))), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::UpgradeProposal(proposal), + )), TransmitType::Broadcast, - None, - ) - } - HotShotEvent::ViewSyncPreCommitVoteSend(vote) => ( - vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), - ))), - TransmitType::Direct, - Some(membership.get_leader(vote.get_view_number() + vote.get_data().relay)), - ), - HotShotEvent::ViewSyncCommitVoteSend(vote) => ( - vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), - ))), - TransmitType::Direct, - Some(membership.get_leader(vote.get_view_number() + vote.get_data().relay)), - ), - HotShotEvent::ViewSyncFinalizeVoteSend(vote) => ( - vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), - ))), - TransmitType::Direct, - Some(membership.get_leader(vote.get_view_number() + vote.get_data().relay)), - ), - HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => ( - sender, - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate), - ))), - TransmitType::Broadcast, - None, - ), - HotShotEvent::ViewSyncCommitCertificate2Send(certificate, sender) => ( - sender, - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::ViewSyncCommitCertificate(certificate), - ))), - TransmitType::Broadcast, - None, - ), - - HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, sender) => ( - sender, - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate), - ))), - TransmitType::Broadcast, - None, - ), - HotShotEvent::TimeoutVoteSend(vote) => ( - vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage(Left( - GeneralConsensusMessage::TimeoutVote(vote.clone()), - ))), - TransmitType::Direct, - Some(membership.get_leader(vote.get_view_number() + 1)), - ), - HotShotEvent::ViewChange(view) => { - self.view = view; - self.channel.update_view(self.view.get_u64()); - return None; - } - HotShotEvent::Shutdown => { - error!("Networking task shutting down"); - return Some(HotShotTaskCompleted); - } - event => { - error!("Receieved unexpected message in network task {:?}", event); - return None; - } - }; + ), + HotShotEvent::UpgradeVoteSend(vote) => { + error!("Sending upgrade vote!"); + ( + vote.get_signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::UpgradeVote(vote.clone()), + )), + TransmitType::Direct(membership.get_leader(vote.get_view_number())), + ) + } + HotShotEvent::ViewChange(view) => { + self.view = view; + self.channel.update_view(self.view.get_u64()); + return None; + } + HotShotEvent::VersionUpgrade(version) => { + debug!("Updating internal version in network task to {:?}", version); + self.version = version; + return None; + } + HotShotEvent::Shutdown => { + error!("Networking task shutting down"); + return Some(HotShotTaskCompleted); + } + event => { + error!("Receieved unexpected message in network task {:?}", event); + return None; + } + }; let message = Message { sender, kind: message_kind, @@ -400,6 +423,7 @@ impl< let committee = membership.get_whole_committee(view); let net = self.channel.clone(); let storage = self.storage.clone(); + let version = self.version; async_spawn(async move { if NetworkEventTaskState::::maybe_record_action( maybe_action, @@ -412,19 +436,23 @@ impl< return; } - let transmit_result = match transmit_type { - TransmitType::Direct => { - net.direct_message(message, recipient.unwrap(), STATIC_VER_0_1) - .await - } - TransmitType::Broadcast => { - net.broadcast_message(message, committee, STATIC_VER_0_1) - .await - } - TransmitType::DACommitteeBroadcast => { - net.da_broadcast_message(message, committee, STATIC_VER_0_1) - .await + let transmit_result = if version == BASE_VERSION { + match transmit { + TransmitType::Direct(recipient) => { + net.direct_message(message, recipient, STATIC_VER_0_1).await + } + TransmitType::Broadcast => { + net.broadcast_message(message, committee, STATIC_VER_0_1) + .await + } + TransmitType::DACommitteeBroadcast => { + net.da_broadcast_message(message, committee, STATIC_VER_0_1) + .await + } } + } else { + error!("The network has upgraded to {:?}, which is not implemented in this instance of HotShot.", version); + return; }; match transmit_result { @@ -451,9 +479,11 @@ impl< proposal.data.recipient_key.clone(), Message { sender: sender.clone(), - kind: MessageKind::::from_consensus_message(SequencingMessage( - Right(CommitteeConsensusMessage::VidDisperseMsg(proposal)), - )), // TODO not a CommitteeConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 + kind: MessageKind::::from_consensus_message( + SequencingMessage::Committee( + CommitteeConsensusMessage::VidDisperseMsg(proposal), + ), + ), // TODO not a CommitteeConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 }, ) }) diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index cc8943b19d..0518f86d1c 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -7,7 +7,6 @@ use crate::{ use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_lock::RwLock; -use either::Either; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::Consensus, @@ -269,13 +268,13 @@ impl> DelayedRequester { /// Transform a response into a `HotShotEvent` async fn handle_response_message(&self, message: SequencingMessage) { - let event = match message.0 { - Either::Right(CommitteeConsensusMessage::VidDisperseMsg(prop)) => { - Arc::new(HotShotEvent::VidDisperseRecv(prop)) + let event = match message { + SequencingMessage::Committee(CommitteeConsensusMessage::VidDisperseMsg(prop)) => { + HotShotEvent::VidDisperseRecv(prop) } _ => return, }; - broadcast_event(event, &self.sender).await; + broadcast_event(Arc::new(event), &self.sender).await; } } diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 9c8619caaa..b26e18e29a 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -1,12 +1,12 @@ use std::collections::HashMap; use std::sync::Arc; +use crate::events::HotShotEvent; use async_broadcast::Receiver; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use either::Either::Right; use futures::{channel::mpsc, FutureExt, StreamExt}; use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ @@ -27,8 +27,6 @@ use sha2::{Digest, Sha256}; use tokio::task::JoinHandle; use versioned_binary_serialization::{version::StaticVersionType, BinarySerializer, Serializer}; -use crate::events::HotShotEvent; - /// Type alias for consensus state wrapped in a lock. type LockedConsensusState = Arc>>; @@ -143,9 +141,10 @@ impl NetworkResponseState { if !proposals_map.contains_key(key) { return self.make_msg(ResponseMessage::NotFound); } - let seq_msg = SequencingMessage(Right(CommitteeConsensusMessage::VidDisperseMsg( + + let seq_msg = SequencingMessage::Committee(CommitteeConsensusMessage::VidDisperseMsg( proposals_map.get(key).unwrap().clone(), - ))); + )); self.make_msg(ResponseMessage::Found(seq_msg)) } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index a7c7d5f635..842c4aca8a 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -22,7 +22,7 @@ use hotshot_types::{ use crate::vote_collection::HandleVoteEvent; use std::sync::Arc; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, info, instrument, warn}; /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; @@ -72,29 +72,37 @@ impl, A: ConsensusApi + ) -> Option { match event.as_ref() { HotShotEvent::UpgradeProposalRecv(proposal, sender) => { - let should_vote = self.should_vote; + info!("Received upgrade proposal: {:?}", proposal); + // If the proposal does not match our upgrade target, we immediately exit. - if !should_vote(proposal.data.upgrade_proposal.clone()) { - warn!("Received unexpected upgrade proposal:\n{:?}", proposal.data); + if !(self.should_vote)(proposal.data.upgrade_proposal.clone()) { + info!("Received unexpected upgrade proposal:\n{:?}", proposal.data); return None; } // If we have an upgrade target, we validate that the proposal is relevant for the current view. - - debug!( + info!( "Upgrade proposal received for view: {:?}", proposal.data.get_view_number() ); - // NOTE: Assuming that the next view leader is the one who sends an upgrade proposal for this view + let view = proposal.data.get_view_number(); + // At this point, we could choose to validate + // that the proposal was issued by the correct leader + // for the indiciated view. + // + // We choose not to, because we don't gain that much from it. + // The certificate itself is only useful to the leader for that view anyway, + // and from the node's perspective it doesn't matter who the sender is. + // All we'd save is the cost of signing the vote, and we'd lose some flexibility. + // Allow an upgrade proposal that is one view older, in case we have voted on a quorum // proposal and updated the view. // `self.cur_view` should be at least 1 since there is a view change before getting // the `UpgradeProposalRecv` event. Otherwise, the view number subtraction below will // cause an overflow error. // TODO Come back to this - we probably don't need this, but we should also never receive a UpgradeCertificate where this fails, investigate block ready so it doesn't make one for the genesis block - if self.cur_view != TYPES::Time::genesis() && view < self.cur_view - 1 { warn!("Discarding old upgrade proposal; the proposal is for view {:?}, but the current view is {:?}.", view, @@ -113,7 +121,6 @@ impl, A: ConsensusApi + // At this point, we've checked that: // * the proposal was expected, // * the proposal is valid, and - // * the proposal is recent, // so we notify the application layer self.api .send_event(Event { @@ -140,16 +147,20 @@ impl, A: ConsensusApi + } HotShotEvent::UpgradeVoteRecv(ref vote) => { debug!("Upgrade vote recv, Main Task {:?}", vote.get_view_number()); + // Check if we are the leader. - let view = vote.get_view_number(); - if self.quorum_membership.get_leader(view) != self.public_key { - error!( - "We are not the leader for view {} are we leader for next view? {}", - *view, - self.quorum_membership.get_leader(view + 1) == self.public_key - ); - return None; + { + let view = vote.get_view_number(); + if self.quorum_membership.get_leader(view) != self.public_key { + error!( + "We are not the leader for view {} are we leader for next view? {}", + *view, + self.quorum_membership.get_leader(view + 1) == self.public_key + ); + return None; + } } + let mut collector = self.vote_collector.write().await; if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view @@ -181,6 +192,9 @@ impl, A: ConsensusApi + } } } + HotShotEvent::VersionUpgrade(version) => { + error!("The network was upgraded to {:?}. This instance of HotShot did not expect an upgrade.", version); + } HotShotEvent::ViewChange(view) => { let view = *view; if *self.cur_view >= *view { @@ -192,6 +206,55 @@ impl, A: ConsensusApi + } self.cur_view = view; + #[cfg(feature = "example-upgrade")] + { + use commit::Committable; + use std::marker::PhantomData; + + use hotshot_types::{ + data::UpgradeProposal, message::Proposal, + traits::node_implementation::ConsensusTime, + }; + use versioned_binary_serialization::version::Version; + + if *view == 5 && self.quorum_membership.get_leader(view + 5) == self.public_key + { + let upgrade_proposal_data = UpgradeProposalData { + old_version: Version { major: 0, minor: 1 }, + new_version: Version { major: 1, minor: 0 }, + new_version_hash: vec![1, 1, 0, 0, 1], + old_version_last_block: TYPES::Time::new(15), + new_version_first_block: TYPES::Time::new(18), + }; + + let upgrade_proposal = UpgradeProposal { + upgrade_proposal: upgrade_proposal_data.clone(), + view_number: view + 5, + }; + + let signature = TYPES::SignatureKey::sign( + &self.private_key, + upgrade_proposal_data.commit().as_ref(), + ) + .expect("Failed to sign upgrade proposal commitment!"); + + let message = Proposal { + data: upgrade_proposal, + signature, + _pd: PhantomData, + }; + + broadcast_event( + Arc::new(HotShotEvent::UpgradeProposalSend( + message, + self.public_key.clone(), + )), + &tx, + ) + .await; + } + } + return None; } HotShotEvent::Shutdown => { @@ -234,6 +297,7 @@ impl, A: ConsensusApi + | HotShotEvent::UpgradeVoteRecv(_) | HotShotEvent::Shutdown | HotShotEvent::ViewChange(_) + | HotShotEvent::VersionUpgrade(_) ) } } diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index eb5420567c..4a02e8532d 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -78,8 +78,8 @@ async fn test_consensus_task() { ], outputs: vec![ exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_send(), exact(QuorumProposalValidated(proposals[1].data.clone())), + quorum_proposal_send(), ], asserts: vec![is_at_view_number(2)], }; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index a840d23b26..d9e51ce796 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -10,6 +10,7 @@ use hotshot_task_impls::network::{self, NetworkEventTaskState}; use hotshot_testing::test_builder::TestMetadata; use hotshot_testing::view_generator::TestViewGenerator; use hotshot_types::{ + constants::BASE_VERSION, data::ViewNumber, traits::{ election::Membership, node_implementation::ConsensusTime, node_implementation::NodeType, @@ -57,6 +58,7 @@ async fn test_network_task() { view: ViewNumber::new(0), membership: membership.clone(), filter: network::quorum_filter, + version: BASE_VERSION, storage, }; let (tx, rx) = async_broadcast::broadcast(10); @@ -124,6 +126,7 @@ async fn test_network_storage_fail() { view: ViewNumber::new(0), membership: membership.clone(), filter: network::quorum_filter, + version: BASE_VERSION, storage, }; let (tx, rx) = async_broadcast::broadcast(10); diff --git a/testing/tests/proposal_ordering.rs b/testing/tests/proposal_ordering.rs index de5c2c2a31..be6db3b3c5 100644 --- a/testing/tests/proposal_ordering.rs +++ b/testing/tests/proposal_ordering.rs @@ -82,8 +82,8 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { } else { vec![ exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_send(), exact(QuorumProposalValidated(proposals[1].data.clone())), + quorum_proposal_send(), ] }; diff --git a/testing/tests/unit/message.rs b/testing/tests/unit/message.rs index 90046710f2..d4b3f561e5 100644 --- a/testing/tests/unit/message.rs +++ b/testing/tests/unit/message.rs @@ -2,7 +2,6 @@ use std::marker::PhantomData; use commit::Committable; -use either::Left; use hotshot_example_types::node_types::TestTypes; @@ -48,9 +47,9 @@ fn version_number_at_start_of_serialization() { }; let message = Message { sender, - kind: MessageKind::Consensus(SequencingMessage(Left( + kind: MessageKind::Consensus(SequencingMessage::General( GeneralConsensusMessage::ViewSyncCommitCertificate(simple_certificate), - ))), + )), }; let serialized_message: Vec = Serializer::::serialize(&message).unwrap(); // The versions we've read from the message diff --git a/types/Cargo.toml b/types/Cargo.toml index 8282ef1e2a..996ecf9428 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" name = "hotshot-types" version = "0.1.11" +[features] +gpu-vid = ["jf-primitives/gpu-vid"] + # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -55,9 +58,6 @@ dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } [dev-dependencies] serde_json = { workspace = true } -[features] -gpu-vid = ["jf-primitives/gpu-vid"] - [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } diff --git a/types/src/constants.rs b/types/src/constants.rs index 645cc39d4f..834af4243d 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -29,6 +29,9 @@ pub const VERSION_0_1: Version = Version { minor: VERSION_MIN, }; +/// Constant for the base protocol version in this instance of HotShot. +pub const BASE_VERSION: Version = VERSION_0_1; + /// Type for protocol static version 0.1. pub type Version01 = StaticVersion; diff --git a/types/src/message.rs b/types/src/message.rs index 76d256cf05..c16f2cd24a 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -24,7 +24,6 @@ use crate::{ }, }; use derivative::Derivative; -use either::Either::{self, Left, Right}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use std::{fmt::Debug, marker::PhantomData}; @@ -80,7 +79,9 @@ pub enum MessagePurpose { /// VID disperse, like [`Proposal`]. VidDisperse, /// Message with an upgrade proposal. - Upgrade, + UpgradeProposal, + /// Upgrade vote. + UpgradeVote, } // TODO (da) make it more customized to the consensus layer, maybe separating the specific message @@ -192,17 +193,19 @@ pub enum CommitteeConsensusMessage { /// Messages for sequencing consensus. #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] -pub struct SequencingMessage( - pub Either, CommitteeConsensusMessage>, -); +pub enum SequencingMessage { + /// Messages related to validating and sequencing consensus + General(GeneralConsensusMessage), + + /// Messages related to the sequencing consensus protocol for the DA committee. + Committee(CommitteeConsensusMessage), +} impl SequencingMessage { - // TODO: Disable panic after the `ViewSync` case is implemented. /// Get the view number this message relates to - #[allow(clippy::panic)] fn view_number(&self) -> TYPES::Time { - match &self.0 { - Left(general_message) => { + match &self { + SequencingMessage::General(general_message) => { match general_message { GeneralConsensusMessage::Proposal(p) => { // view of leader in the leaf when proposal @@ -235,7 +238,7 @@ impl SequencingMessage { GeneralConsensusMessage::UpgradeVote(message) => message.get_view_number(), } } - Right(committee_message) => { + SequencingMessage::Committee(committee_message) => { match committee_message { CommitteeConsensusMessage::DAProposal(p) => { // view of leader in the leaf when proposal @@ -258,8 +261,8 @@ impl SequencingMessage { /// Get the message purpos #[allow(clippy::panic)] fn purpose(&self) -> MessagePurpose { - match &self.0 { - Left(general_message) => match general_message { + match &self { + SequencingMessage::General(general_message) => match general_message { GeneralConsensusMessage::Proposal(_) => MessagePurpose::Proposal, GeneralConsensusMessage::Vote(_) | GeneralConsensusMessage::TimeoutVote(_) => { MessagePurpose::Vote @@ -274,10 +277,10 @@ impl SequencingMessage { MessagePurpose::ViewSyncCertificate } - GeneralConsensusMessage::UpgradeProposal(_) - | GeneralConsensusMessage::UpgradeVote(_) => MessagePurpose::Upgrade, + GeneralConsensusMessage::UpgradeProposal(_) => MessagePurpose::UpgradeProposal, + GeneralConsensusMessage::UpgradeVote(_) => MessagePurpose::UpgradeVote, }, - Right(committee_message) => match committee_message { + SequencingMessage::Committee(committee_message) => match committee_message { CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, CommitteeConsensusMessage::DAVote(_) => MessagePurpose::Vote, CommitteeConsensusMessage::DACertificate(_) => MessagePurpose::DAC, diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 4519f186d1..16ad614497 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -81,10 +81,10 @@ pub enum WebServerNetworkError { } /// the type of transmission -#[derive(Debug, Clone, Copy, Serialize, Deserialize)] -pub enum TransmitType { +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum TransmitType { /// directly transmit - Direct, + Direct(TYPES::SignatureKey), /// broadcast the message to all Broadcast, /// broadcast to DA committee @@ -167,8 +167,12 @@ pub enum NetworkError { pub enum ConsensusIntentEvent { /// Poll for votes for a particular view PollForVotes(u64), + /// Poll for upgrade votes for a particular view + PollForUpgradeVotes(u64), /// Poll for a proposal for a particular view PollForProposal(u64), + /// Poll for an upgrade proposal for a particular view + PollForUpgradeProposal(u64), /// Poll for VID disperse data for a particular view PollForVIDDisperse(u64), /// Poll for the most recent [quorum/da] proposal the webserver has @@ -212,6 +216,8 @@ impl ConsensusIntentEvent { match &self { ConsensusIntentEvent::PollForVotes(view_number) | ConsensusIntentEvent::PollForProposal(view_number) + | ConsensusIntentEvent::PollForUpgradeVotes(view_number) + | ConsensusIntentEvent::PollForUpgradeProposal(view_number) | ConsensusIntentEvent::PollForDAC(view_number) | ConsensusIntentEvent::PollForViewSyncVotes(view_number) | ConsensusIntentEvent::CancelPollForViewSyncVotes(view_number) diff --git a/types/src/vote.rs b/types/src/vote.rs index 8b1df1f0f8..3117ddef03 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -105,9 +105,6 @@ impl, CERT: Certificate Either<(), CERT> { let key = vote.get_signing_key(); @@ -121,10 +118,12 @@ impl, CERT: Certificate::PureAssembledSignatureType = vote.get_signature(); diff --git a/web_server/api.toml b/web_server/api.toml index d6c6363d47..496331cf67 100644 --- a/web_server/api.toml +++ b/web_server/api.toml @@ -11,6 +11,14 @@ DOC = """ Return the proposal for a given view number """ +# GET an upgrade proposal for a view, where the view is passed as an argument +[route.get_upgrade_proposal] +PATH = ["upgrade_proposal/:view_number"] +":view_number" = "Integer" +DOC = """ +Return the upgrade proposal for a given view number +""" + # POST a VID disperse, where the view is passed as an argument [route.getviddisperse] PATH = ["vid_disperse/:view_number"] @@ -42,6 +50,15 @@ DOC = """ Post the proposal for a given view_number """ +# POST an upgrade proposal, where the view is passed as an argument +[route.post_upgrade_proposal] +PATH = ["upgrade_proposal/:view_number"] +METHOD = "POST" +":view_number" = "Integer" +DOC = """ +Post the upgrade proposal for a given view_number +""" + # POST a VID disperse, where the view is passed as an argument [route.postviddisperse] PATH = ["vid_disperse/:view_number"] @@ -80,6 +97,16 @@ DOC = """ Get all votes for a view number """ +# GET all the upgrade votes from a given index for a given view number +[route.get_upgrade_votes] +PATH = ["upgrade_votes/:view_number/:index"] +":view_number" = "Integer" +":index" = "Integer" +METHOD = "GET" +DOC = """ +Get all upgrade votes for a view number +""" + # POST a vote, where the view number is passed as an argument [route.postvote] @@ -90,6 +117,15 @@ DOC = """ Send a vote """ +# POST an upgrade vote, where the view number is passed as an argument +[route.post_upgrade_vote] +PATH = ["upgrade_votes/:view_number"] +":view_number" = "Integer" +METHOD = "POST" +DOC = """ +Send an upgrade vote +""" + # GET all transactions starting at :index [route.gettransactions] diff --git a/web_server/src/config.rs b/web_server/src/config.rs index 0cbf8f47be..7dcfd8de66 100644 --- a/web_server/src/config.rs +++ b/web_server/src/config.rs @@ -60,6 +60,18 @@ pub fn post_vote_route(view_number: u64) -> String { format!("api/votes/{view_number}") } +/// get upgrade votes +#[must_use] +pub fn get_upgrade_vote_route(view_number: u64, index: u64) -> String { + format!("api/upgrade_votes/{view_number}/{index}") +} + +/// post vote +#[must_use] +pub fn post_upgrade_vote_route(view_number: u64) -> String { + format!("api/upgrade_votes/{view_number}") +} + /// get vid dispersal #[must_use] pub fn get_vid_disperse_route(view_number: u64) -> String { @@ -72,16 +84,16 @@ pub fn post_vid_disperse_route(view_number: u64) -> String { format!("api/vid_disperse/{view_number}") } -/// get upgrade route +/// get upgrade proposal #[must_use] -pub fn get_upgrade_route(view_number: u64) -> String { - format!("api/upgrade/{view_number}") +pub fn get_upgrade_proposal_route(view_number: u64) -> String { + format!("api/upgrade_proposal/{view_number}") } -/// post upgrade route +/// post upgrade proposal #[must_use] -pub fn post_upgrade_route(view_number: u64) -> String { - format!("api/upgrade/{view_number}") +pub fn post_upgrade_proposal_route(view_number: u64) -> String { + format!("api/upgrade_proposal/{view_number}") } /// get vid vote route diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 56ed16ea6b..22783710cc 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -37,6 +37,8 @@ type Error = ServerError; struct WebServerState { /// view number -> (secret, proposal) proposals: BTreeMap)>, + /// view number -> (secret, proposal) + upgrade_proposals: BTreeMap)>, /// for view sync: view number -> (relay, certificate) view_sync_certificates: BTreeMap)>>, /// view number -> relay @@ -51,14 +53,20 @@ struct WebServerState { oldest_certificate: u64, /// view number -> Vec(index, vote) votes: HashMap)>>, + /// view number -> Vec(index, vote) + upgrade_votes: HashMap)>>, /// view sync: view number -> Vec(relay, vote) view_sync_votes: HashMap)>>, /// view number -> highest vote index for that view number vote_index: HashMap, + /// view number -> highest vote index for that view number + upgrade_vote_index: HashMap, /// view_sync: view number -> highest vote index for that view number view_sync_vote_index: HashMap, /// view number of oldest votes in memory oldest_vote: u64, + /// view number of oldest votes in memory + oldest_upgrade_vote: u64, /// view sync: view number of oldest votes in memory oldest_view_sync_vote: u64, /// view number -> (secret, string) @@ -98,6 +106,7 @@ impl WebServerState { fn new() -> Self { Self { proposals: BTreeMap::new(), + upgrade_proposals: BTreeMap::new(), da_certificates: HashMap::new(), votes: HashMap::new(), num_txns: 0, @@ -114,6 +123,9 @@ impl WebServerState { view_sync_certificates: BTreeMap::new(), view_sync_votes: HashMap::new(), view_sync_vote_index: HashMap::new(), + upgrade_votes: HashMap::new(), + oldest_upgrade_vote: 0, + upgrade_vote_index: HashMap::new(), vid_disperses: HashMap::new(), oldest_vid_disperse: 0, @@ -151,6 +163,10 @@ pub trait WebServerDataSource { /// # Errors /// Error if unable to serve. fn get_proposal(&self, view_number: u64) -> Result>>, Error>; + /// Get upgrade proposal + /// # Errors + /// Error if unable to serve. + fn get_upgrade_proposal(&self, view_number: u64) -> Result>>, Error>; /// Get latest quanrum proposal /// # Errors /// Error if unable to serve. @@ -172,6 +188,14 @@ pub trait WebServerDataSource { /// # Errors /// Error if unable to serve. fn get_votes(&self, view_number: u64, index: u64) -> Result>>, Error>; + /// Get upgrade votes + /// # Errors + /// Error if unable to serve. + fn get_upgrade_votes( + &self, + view_number: u64, + index: u64, + ) -> Result>>, Error>; /// Get view sync votes /// # Errors /// Error if unable to serve. @@ -194,6 +218,10 @@ pub trait WebServerDataSource { /// # Errors /// Error if unable to serve. fn post_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; + /// Post upgrade vote + /// # Errors + /// Error if unable to serve. + fn post_upgrade_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; /// Post view sync vote /// # Errors /// Error if unable to serve. @@ -203,6 +231,10 @@ pub trait WebServerDataSource { /// # Errors /// Error if unable to serve. fn post_proposal(&mut self, view_number: u64, proposal: Vec) -> Result<(), Error>; + /// Post upgrade proposal + /// # Errors + /// Error if unable to serve. + fn post_upgrade_proposal(&mut self, view_number: u64, proposal: Vec) -> Result<(), Error>; /// Post view sync certificate /// # Errors /// Error if unable to serve. @@ -286,6 +318,26 @@ impl WebServerDataSource for WebServerState { }), } } + /// Return the proposal the server has received for a particular view + fn get_upgrade_proposal(&self, view_number: u64) -> Result>>, Error> { + match self.upgrade_proposals.get(&view_number) { + Some(proposal) => { + if proposal.1.is_empty() { + Err(ServerError { + status: StatusCode::NotImplemented, + message: format!("Proposal empty for view {view_number}"), + }) + } else { + tracing::error!("found proposal"); + Ok(Some(vec![proposal.1.clone()])) + } + } + None => Err(ServerError { + status: StatusCode::NotImplemented, + message: format!("Proposal not found for view {view_number}"), + }), + } + } /// Return the VID disperse data that the server has received for a particular view fn get_vid_disperse(&self, view_number: u64) -> Result>>, Error> { @@ -350,6 +402,26 @@ impl WebServerDataSource for WebServerState { } } + /// Return all votes the server has received for a particular view from provided index to most recent + fn get_upgrade_votes( + &self, + view_number: u64, + index: u64, + ) -> Result>>, Error> { + let votes = self.upgrade_votes.get(&view_number); + let mut ret_votes = vec![]; + if let Some(votes) = votes { + for i in index..*self.upgrade_vote_index.get(&view_number).unwrap() { + ret_votes.push(votes[usize::try_from(i).unwrap()].1.clone()); + } + } + if ret_votes.is_empty() { + Ok(None) + } else { + Ok(Some(ret_votes)) + } + } + /// Return all VID votes the server has received for a particular view from provided index to most recent fn get_vid_votes(&self, view_number: u64, index: u64) -> Result>>, Error> { let vid_votes = self.vid_votes.get(&view_number); @@ -494,6 +566,35 @@ impl WebServerDataSource for WebServerState { Ok(()) } + /// Stores a received vote in the `WebServerState` + fn post_upgrade_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error> { + // Only keep vote history for MAX_VIEWS number of views + if self.upgrade_votes.len() >= MAX_VIEWS { + self.upgrade_votes.remove(&self.oldest_upgrade_vote); + while !self.upgrade_votes.contains_key(&self.oldest_upgrade_vote) { + self.oldest_upgrade_vote += 1; + } + } + + // don't accept the vote if it is too old + if self.oldest_upgrade_vote > view_number { + return Err(ServerError { + status: StatusCode::Gone, + message: "Posted vote is too old".to_string(), + }); + } + + let next_index = self.upgrade_vote_index.entry(view_number).or_insert(0); + self.upgrade_votes + .entry(view_number) + .and_modify(|current_votes| current_votes.push((*next_index, vote.clone()))) + .or_insert_with(|| vec![(*next_index, vote)]); + self.upgrade_vote_index + .entry(view_number) + .and_modify(|index| *index += 1); + Ok(()) + } + /// Stores a received VID vote in the `WebServerState` fn post_vid_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error> { // Only keep vote history for MAX_VIEWS number of views @@ -572,6 +673,24 @@ impl WebServerDataSource for WebServerState { Ok(()) } + fn post_upgrade_proposal( + &mut self, + view_number: u64, + mut proposal: Vec, + ) -> Result<(), Error> { + tracing::error!("Received upgrade proposal for view {}", view_number); + + if self.upgrade_proposals.len() >= MAX_VIEWS { + self.upgrade_proposals.pop_first(); + } + + self.upgrade_proposals + .entry(view_number) + .and_modify(|(_, empty_proposal)| empty_proposal.append(&mut proposal)) + .or_insert_with(|| (String::new(), proposal)); + Ok(()) + } + fn post_vid_disperse(&mut self, view_number: u64, mut disperse: Vec) -> Result<(), Error> { info!("Received VID disperse for view {}", view_number); if view_number > self.recent_vid_disperse { @@ -781,6 +900,13 @@ where } .boxed() })? + .get("get_upgrade_proposal", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + state.get_upgrade_proposal(view_number) + } + .boxed() + })? .get("getviddisperse", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; @@ -817,6 +943,14 @@ where } .boxed() })? + .get("get_upgrade_votes", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let index: u64 = req.integer_param("index")?; + state.get_upgrade_votes(view_number, index) + } + .boxed() + })? .get("getviewsyncvotes", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; @@ -841,6 +975,15 @@ where } .boxed() })? + .post("post_upgrade_vote", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + // Using body_bytes because we don't want to deserialize; body_auto or body_json deserializes automatically + let vote = req.body_bytes(); + state.post_upgrade_vote(view_number, vote) + } + .boxed() + })? .post("postviewsyncvote", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; @@ -858,6 +1001,14 @@ where } .boxed() })? + .post("post_upgrade_proposal", |req, state| { + async move { + let view_number: u64 = req.integer_param("view_number")?; + let proposal = req.body_bytes(); + state.post_upgrade_proposal(view_number, proposal) + } + .boxed() + })? .post("postviddisperse", |req, state| { async move { let view_number: u64 = req.integer_param("view_number")?; From 03677e46765b598c8eacc93591b734c52eb7b589 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 2 Apr 2024 13:30:15 -0700 Subject: [PATCH 0916/1393] [CX_CLEANUP] - Test the dependency framework (#2833) * Rename vote to vote_collection, add quorum_vote * Add basic structure * Start fix build * More fixes * Fix build * Restore a comment * Fix doc * Modify function signature, add events * Replace duplicate code with DependencyTask, validate events before creating 3 subdependencies * Fix build after merge * Fix lint * Fix tokio * Wrap events into Arc * Add a simple test * Add negative tests * Lint * Add QuorumVoteDependenciesValidated event, move view changes * Fix test after event change * Add logs, rename based on diagrams * Allow different orders * Move event * Replace try_recv with recv * Fix for tokio * Replace either with enum * Fix merge issues * Fix build after merge * Address comments * Undo a rename * Fix build after merge again * Remove unused checking and const * Drain from_test * Fix sender and receiver setup * Fix build after merge * Replace with const * Comment out unused function --- hotshot/src/lib.rs | 9 ++ hotshot/src/tasks/mod.rs | 14 +++ hotshot/src/tasks/task_state.rs | 22 ++++- macros/src/lib.rs | 5 +- task-impls/src/consensus.rs | 4 +- task-impls/src/events.rs | 13 +-- task-impls/src/network.rs | 2 +- task-impls/src/quorum_vote.rs | 54 +++++++----- testing/src/script.rs | 7 +- testing/tests/consensus_task.rs | 18 ++-- testing/tests/proposal_ordering.rs | 2 +- testing/tests/quorum_vote_task.rs | 132 +++++++++++++++++++++++++++++ testing/tests/upgrade_task.rs | 26 +++--- 13 files changed, 246 insertions(+), 62 deletions(-) create mode 100644 testing/tests/quorum_vote_task.rs diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 4724bcc648..a161e9d87f 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -561,6 +561,15 @@ impl> SystemContext { &handle, ) .await; + // TODO: [CX_CLEANUP] - Integrate QuorumVoteTask with other tasks. + // + // add_quorum_vote_task( + // registry.clone(), + // event_tx.clone(), + // event_rx.activate_cloned(), + // &handle, + // ) + // .await; add_da_task( registry.clone(), event_tx.clone(), diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 5140f44529..43c19a5c86 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -256,6 +256,20 @@ pub async fn add_consensus_task>( task_reg.run_task(task).await; } +// TODO: [CX_CLEANUP] - Integrate QuorumVoteTask with other tasks. +// +// /// Add the quorum vote task. +// pub async fn add_quorum_vote_task>( +// task_reg: Arc, +// tx: Sender>>, +// rx: Receiver>>, +// handle: &SystemContextHandle, +// ) { +// let quorum_vote_state = QuorumVoteTaskState::create_from(handle).await; +// let task = Task::new(tx, rx, task_reg.clone(), quorum_vote_state); +// task_reg.run_task(task).await; +// } + /// add the VID task pub async fn add_vid_task>( task_reg: Arc, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index d0f9cc7754..39668862d1 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -3,9 +3,9 @@ use crate::types::SystemContextHandle; use async_trait::async_trait; use hotshot_task_impls::quorum_proposal::QuorumProposalTaskState; use hotshot_task_impls::{ - consensus::ConsensusTaskState, da::DATaskState, request::NetworkRequestState, - transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VIDTaskState, - view_sync::ViewSyncTaskState, + consensus::ConsensusTaskState, da::DATaskState, quorum_vote::QuorumVoteTaskState, + request::NetworkRequestState, transactions::TransactionTaskState, upgrade::UpgradeTaskState, + vid::VIDTaskState, view_sync::ViewSyncTaskState, }; use hotshot_types::traits::{ consensus_api::ConsensusApi, @@ -213,6 +213,22 @@ impl> CreateTaskState } } +#[async_trait] +impl> CreateTaskState + for QuorumVoteTaskState +{ + async fn create_from(handle: &SystemContextHandle) -> QuorumVoteTaskState { + QuorumVoteTaskState { + latest_voted_view: handle.get_cur_view().await, + vote_dependencies: HashMap::new(), + quorum_network: handle.hotshot.networks.quorum_network.clone(), + committee_network: handle.hotshot.networks.da_network.clone(), + output_event_stream: handle.hotshot.output_event_stream.0.clone(), + id: handle.hotshot.id, + } + } +} + #[async_trait] impl> CreateTaskState for QuorumProposalTaskState diff --git a/macros/src/lib.rs b/macros/src/lib.rs index b2d4d12cc7..4f2373f4c3 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -65,10 +65,9 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { validate_task_state_or_panic_in_script, }; - use hotshot_testing::predicates::Predicate; + use hotshot_testing::{predicates::Predicate, script::RECV_TIMEOUT}; use async_broadcast::broadcast; use hotshot_task_impls::events::HotShotEvent; - use std::time::Duration; use async_compatibility_layer::art::async_timeout; use hotshot_task::task::{Task, TaskRegistry, TaskState}; use hotshot_types::traits::node_implementation::NodeType; @@ -131,7 +130,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { #task_names.state().handle_result(&res).await; } - while let Ok(Ok(received_output)) = async_timeout(Duration::from_millis(250), test_receiver.recv_direct()).await { + while let Ok(Ok(received_output)) = async_timeout(RECV_TIMEOUT, test_receiver.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); let output_asserts = &mut #task_expectations[stage_number].output_asserts; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 8da6d84734..13acdda885 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1106,7 +1106,7 @@ impl, A: ConsensusApi + self.upgrade_cert = Some(cert.clone()); } } - HotShotEvent::DACRecv(cert) => { + HotShotEvent::DACertificateRecv(cert) => { debug!("DAC Received for view {}!", *cert.view_number); let view = cert.view_number; @@ -1588,7 +1588,7 @@ impl, A: ConsensusApi + | HotShotEvent::QuorumProposalValidated(_) | HotShotEvent::QCFormed(_) | HotShotEvent::UpgradeCertificateFormed(_) - | HotShotEvent::DACRecv(_) + | HotShotEvent::DACertificateRecv(_) | HotShotEvent::ViewChange(_) | HotShotEvent::SendPayloadCommitmentAndMetadata(..) | HotShotEvent::Timeout(_) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index b651131f86..e66734f0b2 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -42,14 +42,17 @@ pub enum HotShotEvent { /// A DA vote has been received by the network; handled by the DA task DAVoteRecv(DAVote), /// A Data Availability Certificate (DAC) has been recieved by the network; handled by the consensus task - DACRecv(DACertificate), + DACertificateRecv(DACertificate), /// A DAC is validated. - DACValidated(DACertificate), + DACertificateValidated(DACertificate), /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote), - /// Dummy quorum vote to test if the quorum vote dependency works. + // TODO: Complete the dependency implementation. + // + /// Dummy quorum vote to test if the quorum vote dependency works. Should be removed and + /// replaced by `QuorumVoteSend` once the above TODO is done. DummyQuorumVoteSend(TYPES::Time), /// All dependencies for the quorum vote are validated. QuorumVoteDependenciesValidated(TYPES::Time), @@ -128,8 +131,8 @@ pub enum HotShotEvent { /// /// Like [`HotShotEvent::DAProposalRecv`]. VidDisperseRecv(Proposal>), - /// A VID disperse data is validated. - VidDisperseValidated(VidDisperseShare), + /// VID share data is validated. + VIDShareValidated(VidDisperseShare), /// Upgrade proposal has been received from the network UpgradeProposalRecv(Proposal>, TYPES::SignatureKey), /// Upgrade proposal has been sent to the network diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 857e149865..c5f17d0a8d 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -158,7 +158,7 @@ impl NetworkMessageTaskState { HotShotEvent::DAVoteRecv(vote.clone()) } CommitteeConsensusMessage::DACertificate(cert) => { - HotShotEvent::DACRecv(cert) + HotShotEvent::DACertificateRecv(cert) } CommitteeConsensusMessage::VidDisperseMsg(proposal) => { HotShotEvent::VidDisperseRecv(proposal) diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 275bb7c391..5e1c446bea 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -26,15 +26,14 @@ use std::collections::HashMap; use std::sync::Arc; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::warn; -use tracing::{debug, instrument}; +use tracing::{debug, error, instrument, warn}; /// Vote dependency types. -#[derive(PartialEq)] +#[derive(Debug, PartialEq)] enum VoteDependency { /// For the `QuorumProposalRecv` event. QuorumProposal, - /// For the `DACRecv` event. + /// For the `DACertificateRecv` event. Dac, /// For the `VidDisperseRecv` event. Vid, @@ -95,16 +94,18 @@ impl HandleDepOutput for VoteDependencyHandle { let proposal_payload_comm = proposal.block_header.payload_commitment(); if let Some(comm) = payload_commitment { if proposal_payload_comm != comm { + error!("Quorum proposal and DAC have inconsistent payload commitment."); return; } } else { payload_commitment = Some(proposal_payload_comm); } } - HotShotEvent::DACValidated(cert) => { + HotShotEvent::DACertificateValidated(cert) => { let cert_payload_comm = cert.get_data().payload_commit; if let Some(comm) = payload_commitment { if cert_payload_comm != comm { + error!("Quorum proposal and DAC have inconsistent payload commitment."); return; } } else { @@ -174,32 +175,37 @@ impl> QuorumVoteTaskState { - if let HotShotEvent::DACValidated(cert) = event { + if let HotShotEvent::DACertificateValidated(cert) = event { cert.view_number } else { return false; } } VoteDependency::Vid => { - if let HotShotEvent::VidDisperseValidated(disperse) = event { + if let HotShotEvent::VIDShareValidated(disperse) = event { disperse.view_number } else { return false; } } }; - event_view == view_number + if event_view == view_number { + debug!("Vote dependency {:?} completed", dependency_type); + return true; + } + false }), ) } /// Create and store an [`AndDependency`] combining [`EventDependency`]s associated with the /// given view number if it doesn't exist. + #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote crete dependency task if new", level = "error")] fn create_dependency_task_if_new( &mut self, view_number: TYPES::Time, event_receiver: Receiver>>, - event_sender: Sender>>, + event_sender: &Sender>>, ) { if self.vote_dependencies.get(&view_number).is_some() { return; @@ -218,7 +224,7 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState> QuorumVoteTaskState> QuorumVoteTaskState { let view = proposal.data.view_number; + debug!("Received Quorum Proposal for view {}", *view); if view <= self.latest_voted_view { return; } - debug!("Received Quorum Proposal for view {}", *view); // stop polling for the received proposal self.quorum_network @@ -277,11 +284,11 @@ impl> QuorumVoteTaskState { - debug!("DAC Received for view {}!", *cert.view_number); + HotShotEvent::DACertificateRecv(cert) => { let view = cert.view_number; + debug!("Received DAC for view {}", *view); if view <= self.latest_voted_view { return; } @@ -298,14 +305,15 @@ impl> QuorumVoteTaskState { let view = disperse.data.get_view_number(); + debug!("Received VID share for view {}", *view); if view <= self.latest_voted_view { return; } @@ -321,13 +329,14 @@ impl> QuorumVoteTaskState { + debug!("All vote dependencies verified for view {:?}", view); if !self.update_latest_voted_view(*view).await { debug!("view not updated"); return; @@ -335,7 +344,10 @@ impl> QuorumVoteTaskState { let new_view = *new_view; - debug!("View Change event for view {} in consensus task", *new_view); + debug!( + "View Change event for view {} in quorum vote task", + *new_view + ); let old_voted_view = self.latest_voted_view; @@ -358,7 +370,7 @@ impl> TaskState for QuorumVoteTask !matches!( event.as_ref(), HotShotEvent::QuorumProposalRecv(_, _) - | HotShotEvent::DACRecv(_) + | HotShotEvent::DACertificateRecv(_) | HotShotEvent::ViewChange(_) | HotShotEvent::VidDisperseRecv(..) | HotShotEvent::QuorumVoteDependenciesValidated(_) diff --git a/testing/src/script.rs b/testing/src/script.rs index a92e346d59..ca433d83bc 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -1,13 +1,12 @@ use crate::predicates::{Predicate, PredicateResult}; use async_broadcast::broadcast; -use hotshot_task_impls::events::HotShotEvent; - use async_compatibility_layer::art::async_timeout; use hotshot_task::task::{Task, TaskRegistry, TaskState}; +use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::NodeType; use std::{sync::Arc, time::Duration}; -const RECV_TIMEOUT_SEC: Duration = Duration::from_secs(1); +pub const RECV_TIMEOUT: Duration = Duration::from_millis(250); pub struct TestScriptStage>>> { pub inputs: Vec>, @@ -122,7 +121,7 @@ pub async fn run_test_script let mut result = PredicateResult::Incomplete; while let Ok(Ok(received_output)) = - async_timeout(RECV_TIMEOUT_SEC, from_task.recv_direct()).await + async_timeout(RECV_TIMEOUT, from_task.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); diff --git a/testing/tests/consensus_task.rs b/testing/tests/consensus_task.rs index 4a02e8532d..19f83fd6f8 100644 --- a/testing/tests/consensus_task.rs +++ b/testing/tests/consensus_task.rs @@ -55,7 +55,7 @@ async fn test_consensus_task() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACRecv(dacs[0].clone()), + DACertificateRecv(dacs[0].clone()), VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ @@ -134,7 +134,7 @@ async fn test_consensus_vote() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACRecv(dacs[0].clone()), + DACertificateRecv(dacs[0].clone()), VidDisperseRecv(vids[0].0[0].clone()), QuorumVoteRecv(votes[0].clone()), ], @@ -186,7 +186,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACRecv(dacs[0].clone()), + DACertificateRecv(dacs[0].clone()), VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ @@ -200,7 +200,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let inputs = vec![ // We need a VID share for view 2 otherwise we cannot vote at view 2 (as node 2). VidDisperseRecv(vids[1].0[0].clone()), - DACRecv(dacs[1].clone()), + DACertificateRecv(dacs[1].clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), ]; let view_2_inputs = permute_input_with_index_order(inputs, input_permutation); @@ -233,7 +233,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote_with_permuted_dac() { - // These tests verify that a vote is indeed sent no matter when it receives a DACRecv + // These tests verify that a vote is indeed sent no matter when it receives a DACertificateRecv // event. In particular, we want to verify that receiving events in an unexpected (but still // valid) order allows the system to proceed as it normally would. test_vote_with_specific_order(vec![0, 1, 2]).await; @@ -298,7 +298,7 @@ async fn test_view_sync_finalize_propose() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACRecv(dacs[0].clone()), + DACertificateRecv(dacs[0].clone()), VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ @@ -421,7 +421,7 @@ async fn test_view_sync_finalize_vote() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACRecv(dacs[0].clone()), + DACertificateRecv(dacs[0].clone()), VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ @@ -526,7 +526,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACRecv(dacs[0].clone()), + DACertificateRecv(dacs[0].clone()), VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ @@ -622,7 +622,7 @@ async fn test_vid_disperse_storage_failure() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACRecv(dacs[0].clone()), + DACertificateRecv(dacs[0].clone()), VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ diff --git a/testing/tests/proposal_ordering.rs b/testing/tests/proposal_ordering.rs index be6db3b3c5..72895343a6 100644 --- a/testing/tests/proposal_ordering.rs +++ b/testing/tests/proposal_ordering.rs @@ -51,7 +51,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACRecv(dacs[0].clone()), + DACertificateRecv(dacs[0].clone()), VidDisperseRecv(vids[0].0[0].clone()), ], outputs: vec![ diff --git a/testing/tests/quorum_vote_task.rs b/testing/tests/quorum_vote_task.rs new file mode 100644 index 0000000000..970409204f --- /dev/null +++ b/testing/tests/quorum_vote_task.rs @@ -0,0 +1,132 @@ +#![allow(clippy::panic)] +use hotshot::tasks::task_state::CreateTaskState; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_vote_task_success() { + use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; + use hotshot_testing::{ + predicates::exact, + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, + view_generator::TestViewGenerator, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + generator.next(); + let view = generator.current_view.clone().unwrap(); + + // Send the quorum proposal, DAC, and VID disperse data, in which case a dummy vote can be + // formed and the view number will be updated. + let view_success = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(view.quorum_proposal.clone(), view.leader_public_key), + DACertificateRecv(view.da_certificate.clone()), + VidDisperseRecv(view.vid_proposal.0[0].clone()), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(2))), + exact(QuorumProposalValidated(view.quorum_proposal.data.clone())), + exact(DACertificateValidated(view.da_certificate.clone())), + exact(VIDShareValidated(view.vid_proposal.0[0].data.clone())), + exact(QuorumVoteDependenciesValidated(ViewNumber::new(1))), + exact(DummyQuorumVoteSend(ViewNumber::new(1))), + ], + asserts: vec![], + }; + + let quorum_vote_state = + QuorumVoteTaskState::::create_from(&handle).await; + + run_test_script(vec![view_success], quorum_vote_state).await; +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_vote_task_miss_dependency() { + use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; + use hotshot_testing::{ + predicates::exact, + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, + view_generator::TestViewGenerator, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(3) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } + + // Send two of quorum proposal, DAC, and VID disperse data, in which case there's no vote. + let view_no_quorum_proposal = TestScriptStage { + inputs: vec![ + DACertificateRecv(dacs[0].clone()), + VidDisperseRecv(vids[0].0[0].clone()), + ], + outputs: vec![ + exact(DACertificateValidated(dacs[0].clone())), + exact(VIDShareValidated(vids[0].0[0].data.clone())), + ], + asserts: vec![], + }; + let view_no_dac = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(proposals[1].clone(), leaders[1]), + VidDisperseRecv(vids[1].0[0].clone()), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(3))), + exact(QuorumProposalValidated(proposals[1].data.clone())), + exact(VIDShareValidated(vids[1].0[0].data.clone())), + ], + asserts: vec![], + }; + let view_no_vid = TestScriptStage { + inputs: vec![ + QuorumProposalRecv(proposals[2].clone(), leaders[2]), + DACertificateRecv(dacs[2].clone()), + ], + outputs: vec![ + exact(ViewChange(ViewNumber::new(4))), + exact(QuorumProposalValidated(proposals[2].data.clone())), + exact(DACertificateValidated(dacs[2].clone())), + ], + asserts: vec![], + }; + + let quorum_vote_state = + QuorumVoteTaskState::::create_from(&handle).await; + + run_test_script( + vec![view_no_quorum_proposal, view_no_dac, view_no_vid], + quorum_vote_state, + ) + .await; +} diff --git a/testing/tests/upgrade_task.rs b/testing/tests/upgrade_task.rs index 7e2decb321..0b52471177 100644 --- a/testing/tests/upgrade_task.rs +++ b/testing/tests/upgrade_task.rs @@ -74,7 +74,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), VidDisperseRecv(vids[0].0[0].clone()), - DACRecv(dacs[0].clone()), + DACertificateRecv(dacs[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -88,7 +88,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), VidDisperseRecv(vids[1].0[0].clone()), - DACRecv(dacs[1].clone()), + DACertificateRecv(dacs[1].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(2))), @@ -101,7 +101,7 @@ async fn test_consensus_task_upgrade() { let view_3 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[2].clone(), leaders[2]), - DACRecv(dacs[2].clone()), + DACertificateRecv(dacs[2].clone()), VidDisperseRecv(vids[2].0[0].clone()), ], outputs: vec![ @@ -116,7 +116,7 @@ async fn test_consensus_task_upgrade() { let view_4 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[3].clone(), leaders[3]), - DACRecv(dacs[3].clone()), + DACertificateRecv(dacs[3].clone()), VidDisperseRecv(vids[3].0[0].clone()), ], outputs: vec![ @@ -236,12 +236,12 @@ async fn test_upgrade_and_consensus_task() { vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), VidDisperseRecv(vids[0].0[0].clone()), - DACRecv(dacs[0].clone()), + DACertificateRecv(dacs[0].clone()), ], upgrade_vote_recvs, vec![QuorumProposalRecv(proposals[1].clone(), leaders[1])], vec![ - DACRecv(dacs[1].clone()), + DACertificateRecv(dacs[1].clone()), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, (), @@ -423,12 +423,12 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), VidDisperseRecv(vids[0].0[0].clone()), - DACRecv(dacs[0].clone()), + DACertificateRecv(dacs[0].clone()), ], vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), VidDisperseRecv(vids[1].0[0].clone()), - DACRecv(dacs[1].clone()), + DACertificateRecv(dacs[1].clone()), SendPayloadCommitmentAndMetadata( vids[1].0[0].data.payload_commitment, (), @@ -436,7 +436,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ), ], vec![ - DACRecv(dacs[2].clone()), + DACertificateRecv(dacs[2].clone()), VidDisperseRecv(vids[2].0[0].clone()), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, @@ -446,7 +446,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { QuorumProposalRecv(proposals[2].clone(), leaders[2]), ], vec![ - DACRecv(dacs[3].clone()), + DACertificateRecv(dacs[3].clone()), VidDisperseRecv(vids[3].0[0].clone()), SendPayloadCommitmentAndMetadata( vids[3].0[0].data.payload_commitment, @@ -456,7 +456,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { QuorumProposalRecv(proposals[3].clone(), leaders[3]), ], vec![ - DACRecv(dacs[4].clone()), + DACertificateRecv(dacs[4].clone()), VidDisperseRecv(vids[4].0[0].clone()), SendPayloadCommitmentAndMetadata( vids[4].0[0].data.payload_commitment, @@ -466,7 +466,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { QuorumProposalRecv(proposals[4].clone(), leaders[4]), ], vec![ - DACRecv(dacs[5].clone()), + DACertificateRecv(dacs[5].clone()), SendPayloadCommitmentAndMetadata( vids[5].0[0].data.payload_commitment, (), @@ -475,7 +475,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { QCFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), ], vec![ - DACRecv(dacs[6].clone()), + DACertificateRecv(dacs[6].clone()), VidDisperseRecv(vids[6].0[0].clone()), SendPayloadCommitmentAndMetadata( vids[6].0[0].data.payload_commitment, From ef995c2b0570847324cc71be62055ac6089fab9a Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 2 Apr 2024 15:59:46 -0700 Subject: [PATCH 0917/1393] [Auto Bench] HotShot changes with GPU VID (#2858) * update setting * update webserver pub ip * updated dockerfile * dockerfile updated * clean up log and scripts running on ecs * real script added * fix * update script and config file * parameterizing webserver url * add to ci * to pass ci docker test * to pass ci docker test again * test ci with script, just and ecs * fix duplicate part for ci test * fix ci test * today's last try with ci * fix merge * really last ci try * commented out ci * fix scripts * add file * fix config * fix config * fix bug after upgradability? * init try on parameterization in script * better parameterization for script * clean up and more parameterization * fix lint * finish parameterization for scripts * preserve yml for ci * add blank line * fix less than or equal in script * upload results for init run * last blank line * nit for nano representation * change back transaction size * remove autobench on ci * remove autobench on ci * remove ci_ecs_benchmarks.sh * remove ci autobench script * nginx init * update dockerfile * script of using nginx * script of using nginx * try a larger next_view_timeout * some results and more println * fmt * init commit * config changes and README for examples * should have finished config changes...move to testing * lint * fix after test * clean output * nit * some log level changes * update webserver README * remove useless comment * lint * example_gpuvid_validator to example_fixed_leader * Update aws_ecs_nginx_benchmarks.sh to hide our servers' ip --- examples/Cargo.toml | 1 + examples/infra/mod.rs | 32 ++++++++--- examples/webserver/README.md | 55 +++++++++++-------- hotshot/Cargo.toml | 1 + .../src/traits/election/static_committee.rs | 22 +++++++- orchestrator/run-config.toml | 1 + orchestrator/src/config.rs | 12 ++-- testing/src/task_helpers.rs | 4 ++ testing/src/test_builder.rs | 1 + testing/src/test_runner.rs | 4 ++ testing/tests/network_task.rs | 2 + types/src/lib.rs | 2 + types/src/traits/election.rs | 1 + 13 files changed, 103 insertions(+), 35 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 0a477cbc90..31df23b950 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -19,6 +19,7 @@ docs = [] doc-images = [] hotshot-testing = ["hotshot/hotshot-testing"] randomized-leader-election = [] +fixed-leader-election = [] # libp2p [[example]] diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 2ef68c617c..fb9fcbb4b3 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -56,7 +56,7 @@ use std::num::NonZeroUsize; use std::time::Duration; use std::{fs, time::Instant}; use surf_disco::Url; -use tracing::{debug, error, info, warn}; +use tracing::{error, info, warn}; use versioned_binary_serialization::version::StaticVersionType; #[derive(Debug, Clone)] @@ -180,6 +180,14 @@ pub fn read_orchestrator_init_config() -> ( .help("Sets the url of the da webserver") .required(false), ) + .arg( + Arg::new("fixed_leader_for_gpuvid") + .short('f') + .long("fixed_leader_for_gpuvid") + .value_name("BOOL") + .help("Sets the number of fixed leader for gpu vid, only be used when leaders running on gpu") + .required(false), + ) .get_matches(); if let Some(config_file_string) = matches.get_one::("config_file") { @@ -204,6 +212,12 @@ pub fn read_orchestrator_init_config() -> ( if let Some(da_committee_size_string) = matches.get_one::("da_committee_size") { config.config.da_staked_committee_size = da_committee_size_string.parse::().unwrap(); } + if let Some(fixed_leader_for_gpuvid_string) = + matches.get_one::("fixed_leader_for_gpuvid") + { + config.config.fixed_leader_for_gpuvid = + fixed_leader_for_gpuvid_string.parse::().unwrap(); + } if let Some(transactions_per_round_string) = matches.get_one::("transactions_per_round") { config.transactions_per_round = transactions_per_round_string.parse::().unwrap(); @@ -385,18 +399,22 @@ pub trait RunDA< quorum_membership: ::Membership::create_election( known_nodes_with_stake.clone(), quorum_election_config.clone(), + config.config.fixed_leader_for_gpuvid, ), da_membership: ::Membership::create_election( known_nodes_with_stake.clone(), committee_election_config, + config.config.fixed_leader_for_gpuvid, ), vid_membership: ::Membership::create_election( known_nodes_with_stake.clone(), quorum_election_config.clone(), + config.config.fixed_leader_for_gpuvid, ), view_sync_membership: ::Membership::create_election( known_nodes_with_stake.clone(), quorum_election_config, + config.config.fixed_leader_for_gpuvid, ), }; @@ -439,10 +457,10 @@ pub trait RunDA< let mut total_latency = 0; let mut num_latency = 0; - debug!("Sleeping for {start_delay_seconds} seconds before starting hotshot!"); + info!("Sleeping for {start_delay_seconds} seconds before starting hotshot!"); async_sleep(Duration::from_secs(start_delay_seconds)).await; - debug!("Starting HotShot example!"); + info!("Starting HotShot example!"); let start = Instant::now(); let mut event_stream = context.get_event_stream(); @@ -977,7 +995,7 @@ pub async fn main_entry_point< setup_logging(); setup_backtrace(); - debug!("Starting validator"); + info!("Starting validator"); let orchestrator_client: OrchestratorClient = OrchestratorClient::new(args.clone()); @@ -1008,7 +1026,7 @@ pub async fn main_entry_point< .await .expect("failed to get config"); - error!("Initializing networking"); + info!("Initializing networking"); let run = RUNDA::initialize_networking(run_config.clone(), args.advertise_address).await; let hotshot = run.initialize_state_and_hotshot().await; @@ -1049,13 +1067,13 @@ pub async fn main_entry_point< } } if let NetworkConfigSource::Orchestrator = source { - debug!("Waiting for the start command from orchestrator"); + info!("Waiting for the start command from orchestrator"); orchestrator_client .wait_for_all_nodes_ready(run_config.clone().node_index) .await; } - println!("Starting HotShot"); + info!("Starting HotShot"); let bench_results = run .run_hotshot( hotshot, diff --git a/examples/webserver/README.md b/examples/webserver/README.md index b1371130ab..d782070a77 100644 --- a/examples/webserver/README.md +++ b/examples/webserver/README.md @@ -1,54 +1,65 @@ Commands to run da examples: 1a)Start web servers by either running 3 servers: ``` -just async_std example webserver -- -just async_std example webserver -- -``` - -1b)Or use multi-webserver to spin up all three: -``` -just async_std example multi-webserver -- +just async_std example webserver -- +just async_std example webserver -- ``` 2) Start orchestrator: ``` -just async_std example orchestrator-webserver -- +just async_std example orchestrator-webserver -- --orchestrator_url --config_file ``` 3a) Start validator: ``` -just async_std example validator-webserver -- +just async_std example validator-webserver -- ``` 3b) Or start multiple validators: ``` -just async_std example multi-validator-webserver -- +just async_std example multi-validator-webserver -- ``` I.e. ``` just async_std example webserver -- http://127.0.0.1:9000 just async_std example webserver -- http://127.0.0.1:9001 -just async_std example webserver -- http://127.0.0.1:9002 -just async_std example orchestrator-webserver -- http://127.0.0.1:4444 ./crates/orchestrator/run-config.toml -just async_std example validator-webserver -- 2 http://127.0.0.1:4444 -``` - -OR: -``` -just async_std example multi-webserver -- 9000 9001 9002 -just async_std example orchestrator-webserver -- http://127.0.0.1:4444 ./crates/orchestrator/run-config.toml +just async_std example orchestrator-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://127.0.0.1:4444 --total_nodes 10 --da_committee_size 5 --transactions_per_round 1 --transaction_size 512 --rounds 10 --fixed_leader_for_gpuvid 0 --webserver_url http://127.0.0.1:9000 --da_webserver_url http://127.0.0.1:9001 just async_std example multi-validator-webserver -- 10 http://127.0.0.1:4444 ``` -================All of the above are out-dated================ OR: `just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://localhost:4444` -For other argument setting, checkout `read_orchestrator_initialization_config` in `crates/examples/infra/mod.rs`. +For other argument setting, checkout `read_orchestrator_init_config` in `crates/examples/infra/mod.rs`. One example is: `just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --total_nodes 15`. -Another example is `just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --total_nodes 20 --da_committee_size 5 --transactions_per_round 10 --transaction_size 512 --rounds 100`, I'll get throughput `0.29M/s` for this one. \ No newline at end of file +Another example is `just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --total_nodes 20 --da_committee_size 5 --transactions_per_round 10 --transaction_size 512 --rounds 100`, I'll get throughput `0.29M/s` locally for this one. + +If using gpu-vid, you have to run: +``` +just async_std example webserver -- http://127.0.0.1:9000 +just async_std example webserver -- http://127.0.0.1:9001 +just async_std example orchestrator-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://127.0.0.1:4444 --total_nodes 10 --da_committee_size 5 --fixed_leader_for_gpuvid 1 +just async_std example_gpuvid_leader multi-validator-webserver -- 1 http://127.0.0.1:4444 +sleep 1m +just async_std example_fixed_leader multi-validator-webserver -- 9 http://127.0.0.1:4444 +``` + +Where ones using `example_gpuvid_leader` could be the leader and should be running on a nvidia GPU, and other validators using `example_fixed_leader` will never be a leader. In practice, these url should be changed to the corresponding ip and port. + + +If you don't have a gpu but want to test out fixed leader, you can run: +``` +just async_std example webserver -- http://127.0.0.1:9000 +just async_std example webserver -- http://127.0.0.1:9001 +just async_std example orchestrator-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://127.0.0.1:4444 --total_nodes 10 --da_committee_size 5 --transactions_per_round 1 --transaction_size 512 --rounds 10 --fixed_leader_for_gpuvid 2 --webserver_url http://127.0.0.1:9000 --da_webserver_url http://127.0.0.1:9001 +just async_std example_fixed_leader multi-validator-webserver -- 2 http://127.0.0.1:4444 +sleep 1m +just async_std example_fixed_leader multi-validator-webserver -- 8 http://127.0.0.1:4444 +``` + +Remember, you have to run leaders first, then other validators, so that leaders will have lower index. \ No newline at end of file diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index a70c2ff16f..bb68353fb9 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -20,6 +20,7 @@ docs = [] doc-images = [] hotshot-testing = [] randomized-leader-election = [] +fixed-leader-election = [] [dependencies] anyhow = { workspace = true } diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index a95a9895f3..437cd8019a 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -24,6 +24,8 @@ pub struct GeneralStaticCommittee { committee_nodes_with_stake: Vec, /// builder nodes committee_nodes_without_stake: Vec, + /// the number of fixed leader for gpuvid + fixed_leader_for_gpuvid: usize, /// Node type phantom _type_phantom: PhantomData, } @@ -38,11 +40,13 @@ impl GeneralStaticCommittee { _nodes: &[PUBKEY], nodes_with_stake: Vec, nodes_without_stake: Vec, + fixed_leader_for_gpuvid: usize, ) -> Self { Self { nodes_with_stake: nodes_with_stake.clone(), committee_nodes_with_stake: nodes_with_stake, committee_nodes_without_stake: nodes_without_stake, + fixed_leader_for_gpuvid, _type_phantom: PhantomData, } } @@ -69,7 +73,10 @@ where self.committee_nodes_with_stake.clone() } - #[cfg(not(feature = "randomized-leader-election"))] + #[cfg(not(any( + feature = "randomized-leader-election", + feature = "fixed-leader-election" + )))] /// Index the vector of public keys with the current view number fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { let index = usize::try_from(*view_number % self.nodes_with_stake.len() as u64).unwrap(); @@ -77,6 +84,15 @@ where TYPES::SignatureKey::get_public_key(&res) } + #[cfg(feature = "fixed-leader-election")] + /// Only get leader in fixed set + /// Index the fixed vector (first fixed_leader_for_gpuvid element) of public keys with the current view number + fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { + let index = usize::try_from(*view_number % self.fixed_leader_for_gpuvid as u64).unwrap(); + let res = self.nodes_with_stake[index].clone(); + TYPES::SignatureKey::get_public_key(&res) + } + #[cfg(feature = "randomized-leader-election")] /// Index the vector of public keys with a random number generated using the current view number as a seed fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { @@ -117,6 +133,7 @@ where fn create_election( entries: Vec>, config: TYPES::ElectionConfigType, + fixed_leader_for_gpuvid: usize, ) -> Self { let nodes_with_stake: Vec = entries .iter() @@ -135,7 +152,7 @@ where } } debug!("Election Membership Size: {}", config.num_nodes_with_stake); - // truncate committee_nodes_with_stake to only `num_nodes` + // truncate committee_nodes_with_stake to only `num_nodes` with lower index // since the `num_nodes_without_stake` are not part of the committee, committee_nodes_with_stake.truncate(config.num_nodes_with_stake.try_into().unwrap()); committee_nodes_without_stake.truncate(config.num_nodes_without_stake.try_into().unwrap()); @@ -143,6 +160,7 @@ where nodes_with_stake, committee_nodes_with_stake, committee_nodes_without_stake, + fixed_leader_for_gpuvid, _type_phantom: PhantomData, } } diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index a1700cdff5..9362b1a14c 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -44,6 +44,7 @@ num_nodes_with_stake = 10 num_nodes_without_stake = 0 staked_committee_nodes = 10 non_staked_committee_nodes = 0 +fixed_leader_for_gpuvid = 0 max_transactions = 1 min_transactions = 1 next_view_timeout = 30000 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index c50d29fa31..798858b208 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -9,7 +9,7 @@ use std::{fs, path::Path}; use surf_disco::Url; use thiserror::Error; use toml; -use tracing::error; +use tracing::{error, info}; use crate::client::OrchestratorClient; @@ -196,7 +196,7 @@ impl NetworkConfig { libp2p_public_key: Option, ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { if let Some(file) = file { - error!("Retrieving config from the file"); + info!("Retrieving config from the file"); // if we pass in file, try there first match Self::from_file(file.clone()) { Ok(config) => Ok((config, NetworkConfigSource::File)), @@ -217,7 +217,7 @@ impl NetworkConfig { } } } else { - error!("Retrieving config from the orchestrator"); + info!("Retrieving config from the orchestrator"); // otherwise just get from orchestrator Ok(( @@ -276,7 +276,7 @@ impl NetworkConfig { .await; run_config.config.known_nodes_with_stake = updated_config.config.known_nodes_with_stake; - error!("Retrieved config; our node index is {node_index}."); + info!("Retrieved config; our node index is {node_index}."); Ok((run_config, source)) } @@ -509,6 +509,8 @@ pub struct HotShotConfigFile { pub staked_committee_nodes: usize, /// Number of non-staking committee nodes pub non_staked_committee_nodes: usize, + /// Number of fixed leaders for GPU VID + pub fixed_leader_for_gpuvid: usize, /// Maximum transactions per block pub max_transactions: NonZeroUsize, /// Minimum transactions per block @@ -598,6 +600,7 @@ impl From> for HotS my_own_validator_config: val.my_own_validator_config, da_staked_committee_size: val.staked_committee_nodes, da_non_staked_committee_size: val.non_staked_committee_nodes, + fixed_leader_for_gpuvid: val.fixed_leader_for_gpuvid, next_view_timeout: val.next_view_timeout, view_sync_timeout: val.view_sync_timeout, timeout_ratio: val.timeout_ratio, @@ -651,6 +654,7 @@ impl Default for HotShotConfigFile { known_nodes_without_stake: vec![], staked_committee_nodes: 5, non_staked_committee_nodes: 0, + fixed_leader_for_gpuvid: 0, max_transactions: NonZeroUsize::new(100).unwrap(), min_transactions: 1, next_view_timeout: 10000, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 2845c98211..fdf3dab009 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -98,18 +98,22 @@ pub async fn build_system_handle( quorum_membership: ::Membership::create_election( known_nodes_with_stake.clone(), quorum_election_config.clone(), + config.fixed_leader_for_gpuvid, ), da_membership: ::Membership::create_election( known_nodes_with_stake.clone(), committee_election_config, + config.fixed_leader_for_gpuvid, ), vid_membership: ::Membership::create_election( known_nodes_with_stake.clone(), quorum_election_config.clone(), + config.fixed_leader_for_gpuvid, ), view_sync_membership: ::Membership::create_election( known_nodes_with_stake.clone(), quorum_election_config, + config.fixed_leader_for_gpuvid, ), }; diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index db51786bbf..feca1f3de1 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -287,6 +287,7 @@ impl TestMetadata { my_own_validator_config, da_staked_committee_size, da_non_staked_committee_size, + fixed_leader_for_gpuvid: 0, next_view_timeout: 500, view_sync_timeout: Duration::from_millis(250), timeout_ratio: (11, 10), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 3d52f794dd..0da1ed74c3 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -346,6 +346,7 @@ where quorum_membership: ::Membership::create_election( known_nodes_with_stake.clone(), quorum_election_config.clone(), + config.fixed_leader_for_gpuvid, ), da_membership: ::Membership::create_election( known_nodes_with_stake.clone(), @@ -353,14 +354,17 @@ where config.da_staked_committee_size as u64, config.num_nodes_without_stake as u64, ), + config.fixed_leader_for_gpuvid, ), vid_membership: ::Membership::create_election( known_nodes_with_stake.clone(), quorum_election_config.clone(), + config.fixed_leader_for_gpuvid, ), view_sync_membership: ::Membership::create_election( known_nodes_with_stake.clone(), quorum_election_config, + config.fixed_leader_for_gpuvid, ), }; let networks = (self.launcher.resource_generator.channel_generator)(node_id).await; diff --git a/testing/tests/network_task.rs b/testing/tests/network_task.rs index d9e51ce796..1d7518e3a0 100644 --- a/testing/tests/network_task.rs +++ b/testing/tests/network_task.rs @@ -50,6 +50,7 @@ async fn test_network_task() { let membership = ::Membership::create_election( known_nodes_with_stake.clone(), quorum_election_config.clone(), + config.fixed_leader_for_gpuvid, ); let channel = networks.0.clone(); let network_state: NetworkEventTaskState, _> = @@ -118,6 +119,7 @@ async fn test_network_storage_fail() { let membership = ::Membership::create_election( known_nodes_with_stake.clone(), quorum_election_config.clone(), + config.fixed_leader_for_gpuvid, ); let channel = networks.0.clone(); let network_state: NetworkEventTaskState, _> = diff --git a/types/src/lib.rs b/types/src/lib.rs index fd23ad5a86..36e91fdbf6 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -167,6 +167,8 @@ pub struct HotShotConfig { pub da_staked_committee_size: usize, /// List of DA committee nodes (non-staking)nodes for static DA committee pub da_non_staked_committee_size: usize, + /// Number of fixed leaders for GPU VID, normally it will be 0, it's only used when running GPU VID + pub fixed_leader_for_gpuvid: usize, /// Base duration for next-view timeout, in milliseconds pub next_view_timeout: u64, /// Duration of view sync round timeouts diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 0f3eec9b53..4c5ded2953 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -48,6 +48,7 @@ pub trait Membership: fn create_election( entries: Vec>, config: TYPES::ElectionConfigType, + fixed_leader_for_gpuvid: usize, ) -> Self; /// Clone the public key and corresponding stake table for current elected committee From 736b235618a45a3af2dc9cf4015a99b09b820659 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 3 Apr 2024 09:18:16 -0400 Subject: [PATCH 0918/1393] [Hotfix] Fix VID supplied in leaf (#2886) * fix VID supplied in leaf * remove iteration * remove map from leaf type * update committable * Revert "update committable" This reverts commit 54c5b1977507956536a18bbbadd65dd66ea3e35d. --- task-impls/src/consensus.rs | 12 ++++++------ types/src/event.rs | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 13acdda885..b9dc6e601d 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -844,16 +844,16 @@ impl, A: ConsensusApi + leaf.fill_block_payload_unchecked(payload); } - let vid = VidDisperseShare::to_vid_disperse( - consensus + // Get the VID share at the leaf's view number, corresponding to our key + // (if one exists) + let vid_share = consensus .vid_shares .get(&leaf.get_view_number()) .unwrap_or(&HashMap::new()) - .iter() - .map(|(_key, proposal)| &proposal.data) - ); + .get(&self.public_key).map(|proposal| proposal.data.clone()); - leaf_views.push(LeafInfo::new(leaf.clone(), state.clone(), delta.clone(), vid)); + // Add our data into a new `LeafInfo` + leaf_views.push(LeafInfo::new(leaf.clone(), state.clone(), delta.clone(), vid_share)); leafs_decided.push(leaf.clone()); if let Some(ref payload) = leaf.get_block_payload() { for txn in payload diff --git a/types/src/event.rs b/types/src/event.rs index 97543c23d5..224adad9a5 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use crate::{ - data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse}, + data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, error::HotShotError, message::Proposal, simple_certificate::QuorumCertificate, @@ -34,8 +34,8 @@ pub struct LeafInfo { pub state: Arc<::ValidatedState>, /// Optional application-specific state delta. pub delta: Option::ValidatedState as ValidatedState>::Delta>>, - /// Optional VID disperse data. - pub vid: Option>, + /// Optional VID share data. + pub vid_share: Option>, } impl LeafInfo { @@ -44,13 +44,13 @@ impl LeafInfo { leaf: Leaf, state: Arc<::ValidatedState>, delta: Option::ValidatedState as ValidatedState>::Delta>>, - vid: Option>, + vid_share: Option>, ) -> Self { Self { leaf, state, delta, - vid, + vid_share, } } } From 915cf3e5aa52930318eb68a8cf7522991d0560e5 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 3 Apr 2024 13:04:47 -0400 Subject: [PATCH 0919/1393] Split tests (#2891) --- macros/Cargo.toml | 5 +- macros/src/lib.rs | 252 +++++++++++++++++ testing-macros/Cargo.toml | 44 --- testing-macros/src/lib.rs | 256 ------------------ testing-macros/tests/tests.rs | 191 ------------- testing/Cargo.toml | 1 + testing/README.md | 4 +- testing/tests/lossy.rs | 118 -------- testing/tests/tests_1.rs | 3 + testing/tests/{ => tests_1}/block_builder.rs | 0 testing/tests/{ => tests_1}/consensus_task.rs | 0 testing/tests/{ => tests_1}/da_task.rs | 0 testing/tests/{ => tests_1}/gen_key_pair.rs | 0 testing/tests/{ => tests_1}/libp2p.rs | 0 testing/tests/{unit => tests_1}/message.rs | 0 testing/tests/{ => tests_1}/network_task.rs | 0 .../tests/{ => tests_1}/proposal_ordering.rs | 0 .../{ => tests_1}/quorum_proposal_task.rs | 0 .../tests/{ => tests_1}/quorum_vote_task.rs | 0 testing/tests/tests_1/test_success.rs | 27 ++ testing/tests/tests_1/test_with_failures_2.rs | 48 ++++ testing/tests/{ => tests_1}/upgrade_task.rs | 0 testing/tests/{ => tests_1}/vid_task.rs | 0 testing/tests/{ => tests_1}/view_sync_task.rs | 0 testing/tests/tests_2.rs | 3 + testing/tests/{ => tests_2}/catchup.rs | 0 testing/tests/{ => tests_2}/push_cdn.rs | 0 .../tests/tests_2/test_with_failures_one.rs | 36 +++ testing/tests/tests_3.rs | 3 + testing/tests/{ => tests_3}/memory_network.rs | 3 - .../tests_3/test_with_failures_half_f.rs | 48 ++++ testing/tests/tests_4.rs | 3 + testing/tests/tests_4/test_with_failures_f.rs | 61 +++++ testing/tests/tests_5.rs | 3 + .../tests/{ => tests_5}/combined_network.rs | 0 testing/tests/{ => tests_5}/timeout.rs | 0 .../tests/{ => tests_5}/unreliable_network.rs | 0 testing/tests/{ => tests_5}/web_server.rs | 0 testing/tests/unit.rs | 3 - 39 files changed, 493 insertions(+), 619 deletions(-) delete mode 100644 testing-macros/Cargo.toml delete mode 100644 testing-macros/src/lib.rs delete mode 100644 testing-macros/tests/tests.rs delete mode 100644 testing/tests/lossy.rs create mode 100644 testing/tests/tests_1.rs rename testing/tests/{ => tests_1}/block_builder.rs (100%) rename testing/tests/{ => tests_1}/consensus_task.rs (100%) rename testing/tests/{ => tests_1}/da_task.rs (100%) rename testing/tests/{ => tests_1}/gen_key_pair.rs (100%) rename testing/tests/{ => tests_1}/libp2p.rs (100%) rename testing/tests/{unit => tests_1}/message.rs (100%) rename testing/tests/{ => tests_1}/network_task.rs (100%) rename testing/tests/{ => tests_1}/proposal_ordering.rs (100%) rename testing/tests/{ => tests_1}/quorum_proposal_task.rs (100%) rename testing/tests/{ => tests_1}/quorum_vote_task.rs (100%) create mode 100644 testing/tests/tests_1/test_success.rs create mode 100644 testing/tests/tests_1/test_with_failures_2.rs rename testing/tests/{ => tests_1}/upgrade_task.rs (100%) rename testing/tests/{ => tests_1}/vid_task.rs (100%) rename testing/tests/{ => tests_1}/view_sync_task.rs (100%) create mode 100644 testing/tests/tests_2.rs rename testing/tests/{ => tests_2}/catchup.rs (100%) rename testing/tests/{ => tests_2}/push_cdn.rs (100%) create mode 100644 testing/tests/tests_2/test_with_failures_one.rs create mode 100644 testing/tests/tests_3.rs rename testing/tests/{ => tests_3}/memory_network.rs (97%) create mode 100644 testing/tests/tests_3/test_with_failures_half_f.rs create mode 100644 testing/tests/tests_4.rs create mode 100644 testing/tests/tests_4/test_with_failures_f.rs create mode 100644 testing/tests/tests_5.rs rename testing/tests/{ => tests_5}/combined_network.rs (100%) rename testing/tests/{ => tests_5}/timeout.rs (100%) rename testing/tests/{ => tests_5}/unreliable_network.rs (100%) rename testing/tests/{ => tests_5}/web_server.rs (100%) delete mode 100644 testing/tests/unit.rs diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 40604461e1..84a5909c87 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "hotshot-macros" -version = "0.1.0" -edition = "2021" +version = { workspace = true } +edition = { workspace = true } description = "Macros for hotshot tests" [dependencies] @@ -9,6 +9,7 @@ description = "Macros for hotshot tests" quote = "1" syn = { version = "2", features = ["full", "extra-traits"] } proc-macro2 = "1" +derive_builder = "0.20" [lib] proc-macro = true diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 4f2373f4c3..931102763c 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -1,7 +1,259 @@ //! Macros for use in testing. use proc_macro::TokenStream; +use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; +use syn::parse::Result; +use syn::{ + parse::{Parse, ParseStream}, + parse_macro_input, Expr, ExprArray, ExprPath, ExprTuple, Ident, LitBool, Token, +}; + +/// description of a crosstest +#[derive(derive_builder::Builder, Debug, Clone)] +struct CrossTestData { + /// imlementations + impls: ExprArray, + /// types + types: ExprArray, + /// name of the test + test_name: Ident, + /// test description/spec + metadata: Expr, + /// whether or not to ignore + ignore: LitBool, +} + +impl CrossTestDataBuilder { + /// if we've extracted all the metadata + fn is_ready(&self) -> bool { + self.impls.is_some() + && self.types.is_some() + && self.test_name.is_some() + && self.metadata.is_some() + && self.test_name.is_some() + && self.ignore.is_some() + } +} + +/// requisite data to generate a single test +#[derive(derive_builder::Builder, Debug, Clone)] +struct TestData { + /// type + ty: ExprPath, + /// impl + imply: ExprPath, + /// name of test + test_name: Ident, + /// test description + metadata: Expr, + /// whether or not to ignore the test + ignore: LitBool, +} + +/// trait make a string lower and snake case +trait ToLowerSnakeStr { + /// make a lower and snake case string + fn to_lower_snake_str(&self) -> String; +} + +impl ToLowerSnakeStr for ExprPath { + fn to_lower_snake_str(&self) -> String { + self.path + .segments + .iter() + .fold(String::new(), |mut acc, s| { + acc.push_str(&s.ident.to_string().to_lowercase()); + acc.push('_'); + acc + }) + .to_lowercase() + } +} + +impl ToLowerSnakeStr for ExprTuple { + /// allow panic because this is a compiler error + #[allow(clippy::panic)] + fn to_lower_snake_str(&self) -> String { + self.elems + .iter() + .map(|x| { + let Expr::Path(expr_path) = x else { + panic!("Expected path expr, got {x:?}"); + }; + expr_path + }) + .fold(String::new(), |mut acc, s| { + acc.push_str(&s.to_lower_snake_str()); + acc + }) + } +} + +impl TestData { + /// generate the code for a single test + fn generate_test(&self) -> TokenStream2 { + let TestData { + ty, + imply, + test_name, + metadata, + ignore, + } = self; + + let slow_attribute = if ignore.value() { + // quote! { #[cfg(feature = "slow-tests")] } + quote! { #[ignore] } + } else { + quote! {} + }; + quote! { + #[cfg(test)] + #slow_attribute + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread") + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tracing::instrument] + async fn #test_name() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + (#metadata).gen_launcher::<#ty, #imply>(0).launch().run_test().await; + } + } + } +} + +/// macro specific custom keywords +mod keywords { + syn::custom_keyword!(Metadata); + syn::custom_keyword!(Ignore); + syn::custom_keyword!(TestName); + syn::custom_keyword!(Types); + syn::custom_keyword!(Impls); +} + +impl Parse for CrossTestData { + /// allow panic because this is a compiler error + #[allow(clippy::panic)] + fn parse(input: ParseStream<'_>) -> Result { + let mut description = CrossTestDataBuilder::create_empty(); + + while !description.is_ready() { + if input.peek(keywords::Types) { + let _ = input.parse::()?; + input.parse::()?; + let types = input.parse::()?; + description.types(types); + } else if input.peek(keywords::Impls) { + let _ = input.parse::()?; + input.parse::()?; + let impls = input.parse::()?; + description.impls(impls); + } else if input.peek(keywords::TestName) { + let _ = input.parse::()?; + input.parse::()?; + let test_name = input.parse::()?; + description.test_name(test_name); + } else if input.peek(keywords::Metadata) { + let _ = input.parse::()?; + input.parse::()?; + let metadata = input.parse::()?; + description.metadata(metadata); + } else if input.peek(keywords::Ignore) { + let _ = input.parse::()?; + input.parse::()?; + let ignore = input.parse::()?; + description.ignore(ignore); + } else { + panic!( + "Unexpected token. Expected one of: Metadata, Ignore, Impls, Types, Testname" + ); + } + if input.peek(Token![,]) { + input.parse::()?; + } + } + description + .build() + .map_err(|e| syn::Error::new(proc_macro2::Span::call_site(), format!("{e}"))) + } +} + +/// Helper function to do the actual code gen +/// allow panic because this is a compiler error +#[allow(clippy::panic)] +fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { + let impls = test_spec.impls.elems.iter().map(|t| { + let Expr::Path(p) = t else { + panic!("Expected Path for Impl! Got {t:?}"); + }; + p + }); + // + let types = test_spec.types.elems.iter().map(|t| { + let Expr::Path(p) = t else { + panic!("Expected Path for Type! Got {t:?}"); + }; + p + }); + + let mut result = quote! {}; + for ty in types.clone() { + let mut type_mod = quote! {}; + for imp in impls.clone() { + let test_data = TestDataBuilder::create_empty() + .test_name(test_spec.test_name.clone()) + .metadata(test_spec.metadata.clone()) + .ignore(test_spec.ignore.clone()) + .imply(imp.clone()) + .ty(ty.clone()) + .build() + .unwrap(); + let test = test_data.generate_test(); + + let impl_str = format_ident!("{}", imp.to_lower_snake_str()); + let impl_result = quote! { + pub mod #impl_str { + use super::*; + #test + } + }; + type_mod.extend(impl_result); + } + let ty_str = format_ident!("{}", ty.to_lower_snake_str()); + let typ_result = quote! { + pub mod #ty_str { + use super::*; + #type_mod + } + }; + result.extend(typ_result); + } + let name = test_spec.test_name; + quote! { + pub mod #name { + use super::*; + #result + } + } + .into() +} + +/// Generate a cartesian product of tests across all types +/// Arguments: +/// - `Impls: []` - a list of types that implement nodetype +/// - `Metadata`: `TestMetadata::default()` - test metadata +/// - `Types: []` - a list types that implement `NodeImplementation` over the types in `Impls` +/// - `TestName: example_test` - the name of the test +/// - `Ignore`: whether or not this set of tests are ignored +/// Example usage: see tests in this module +#[proc_macro] +pub fn cross_tests(input: TokenStream) -> TokenStream { + let test_spec = parse_macro_input!(input as CrossTestData); + cross_tests_internal(test_spec) +} /// Macro to test multiple `TaskState` scripts at once. /// diff --git a/testing-macros/Cargo.toml b/testing-macros/Cargo.toml deleted file mode 100644 index 282c8feb67..0000000000 --- a/testing-macros/Cargo.toml +++ /dev/null @@ -1,44 +0,0 @@ -[package] -name = "hotshot-testing-macros" -version = { workspace = true } -edition = { workspace = true } -description = "Macros for creating hotshot tests" - -[dependencies] -ark-bls12-381 = { workspace = true } -async-compatibility-layer = { workspace = true } -async-trait = { workspace = true } -# needed for vrf demo -# so non-optional for now -blake3 = { workspace = true } -commit = { workspace = true } -either = { workspace = true } -futures = { workspace = true } -hotshot = { path = "../hotshot", default-features = false } -hotshot-types = { path = "../types" } -hotshot-testing = { path = "../testing", default-features = false } -hotshot-example-types = { path = "../example-types" } -jf-primitives = { workspace = true } -rand = { workspace = true } -snafu = { workspace = true } -tracing = { workspace = true } -serde = { workspace = true } -# proc macro stuff -quote = "1" -syn = { version = "2", features = ["full", "extra-traits"] } -proc-macro2 = "1" -derive_builder = "0.20" - -[dev-dependencies] -async-lock = { workspace = true } - -[lib] -proc-macro = true - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] -tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } - -[lints] -workspace = true diff --git a/testing-macros/src/lib.rs b/testing-macros/src/lib.rs deleted file mode 100644 index 77459c00e1..0000000000 --- a/testing-macros/src/lib.rs +++ /dev/null @@ -1,256 +0,0 @@ -//! Macros for testing over all network implementations and nodetype implementations - -use proc_macro::TokenStream; -use proc_macro2::TokenStream as TokenStream2; -use quote::{format_ident, quote}; -use syn::parse::Result; -use syn::{ - parse::{Parse, ParseStream}, - parse_macro_input, Expr, ExprArray, ExprPath, ExprTuple, Ident, LitBool, Token, -}; - -/// description of a crosstest -#[derive(derive_builder::Builder, Debug, Clone)] -struct CrossTestData { - /// imlementations - impls: ExprArray, - /// types - types: ExprArray, - /// name of the test - test_name: Ident, - /// test description/spec - metadata: Expr, - /// whether or not to ignore - ignore: LitBool, -} - -impl CrossTestDataBuilder { - /// if we've extracted all the metadata - fn is_ready(&self) -> bool { - self.impls.is_some() - && self.types.is_some() - && self.test_name.is_some() - && self.metadata.is_some() - && self.test_name.is_some() - && self.ignore.is_some() - } -} - -/// requisite data to generate a single test -#[derive(derive_builder::Builder, Debug, Clone)] -struct TestData { - /// type - ty: ExprPath, - /// impl - imply: ExprPath, - /// name of test - test_name: Ident, - /// test description - metadata: Expr, - /// whether or not to ignore the test - ignore: LitBool, -} - -/// trait make a string lower and snake case -trait ToLowerSnakeStr { - /// make a lower and snake case string - fn to_lower_snake_str(&self) -> String; -} - -impl ToLowerSnakeStr for ExprPath { - fn to_lower_snake_str(&self) -> String { - self.path - .segments - .iter() - .fold(String::new(), |mut acc, s| { - acc.push_str(&s.ident.to_string().to_lowercase()); - acc.push('_'); - acc - }) - .to_lowercase() - } -} - -impl ToLowerSnakeStr for ExprTuple { - /// allow panic because this is a compiler error - #[allow(clippy::panic)] - fn to_lower_snake_str(&self) -> String { - self.elems - .iter() - .map(|x| { - let Expr::Path(expr_path) = x else { - panic!("Expected path expr, got {x:?}"); - }; - expr_path - }) - .fold(String::new(), |mut acc, s| { - acc.push_str(&s.to_lower_snake_str()); - acc - }) - } -} - -impl TestData { - /// generate the code for a single test - fn generate_test(&self) -> TokenStream2 { - let TestData { - ty, - imply, - test_name, - metadata, - ignore, - } = self; - - let slow_attribute = if ignore.value() { - // quote! { #[cfg(feature = "slow-tests")] } - quote! { #[ignore] } - } else { - quote! {} - }; - quote! { - #[cfg(test)] - #slow_attribute - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread") - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[tracing::instrument] - async fn #test_name() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - (#metadata).gen_launcher::<#ty, #imply>(0).launch().run_test().await; - } - } - } -} - -/// macro specific custom keywords -mod keywords { - syn::custom_keyword!(Metadata); - syn::custom_keyword!(Ignore); - syn::custom_keyword!(TestName); - syn::custom_keyword!(Types); - syn::custom_keyword!(Impls); -} - -impl Parse for CrossTestData { - /// allow panic because this is a compiler error - #[allow(clippy::panic)] - fn parse(input: ParseStream<'_>) -> Result { - let mut description = CrossTestDataBuilder::create_empty(); - - while !description.is_ready() { - if input.peek(keywords::Types) { - let _ = input.parse::()?; - input.parse::()?; - let types = input.parse::()?; - description.types(types); - } else if input.peek(keywords::Impls) { - let _ = input.parse::()?; - input.parse::()?; - let impls = input.parse::()?; - description.impls(impls); - } else if input.peek(keywords::TestName) { - let _ = input.parse::()?; - input.parse::()?; - let test_name = input.parse::()?; - description.test_name(test_name); - } else if input.peek(keywords::Metadata) { - let _ = input.parse::()?; - input.parse::()?; - let metadata = input.parse::()?; - description.metadata(metadata); - } else if input.peek(keywords::Ignore) { - let _ = input.parse::()?; - input.parse::()?; - let ignore = input.parse::()?; - description.ignore(ignore); - } else { - panic!( - "Unexpected token. Expected one of: Metadata, Ignore, Impls, Types, Testname" - ); - } - if input.peek(Token![,]) { - input.parse::()?; - } - } - description - .build() - .map_err(|e| syn::Error::new(proc_macro2::Span::call_site(), format!("{e}"))) - } -} - -/// Helper function to do the actual code gen -/// allow panic because this is a compiler error -#[allow(clippy::panic)] -fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { - let impls = test_spec.impls.elems.iter().map(|t| { - let Expr::Path(p) = t else { - panic!("Expected Path for Impl! Got {t:?}"); - }; - p - }); - // - let types = test_spec.types.elems.iter().map(|t| { - let Expr::Path(p) = t else { - panic!("Expected Path for Type! Got {t:?}"); - }; - p - }); - - let mut result = quote! {}; - for ty in types.clone() { - let mut type_mod = quote! {}; - for imp in impls.clone() { - let test_data = TestDataBuilder::create_empty() - .test_name(test_spec.test_name.clone()) - .metadata(test_spec.metadata.clone()) - .ignore(test_spec.ignore.clone()) - .imply(imp.clone()) - .ty(ty.clone()) - .build() - .unwrap(); - let test = test_data.generate_test(); - - let impl_str = format_ident!("{}", imp.to_lower_snake_str()); - let impl_result = quote! { - pub mod #impl_str { - use super::*; - #test - } - }; - type_mod.extend(impl_result); - } - let ty_str = format_ident!("{}", ty.to_lower_snake_str()); - let typ_result = quote! { - pub mod #ty_str { - use super::*; - #type_mod - } - }; - result.extend(typ_result); - } - let name = test_spec.test_name; - quote! { - pub mod #name { - use super::*; - #result - } - } - .into() -} - -/// Generate a cartesian product of tests across all types -/// Arguments: -/// - `Impls: []` - a list of types that implement nodetype -/// - `Metadata`: `TestMetadata::default()` - test metadata -/// - `Types: []` - a list types that implement `NodeImplementation` over the types in `Impls` -/// - `TestName: example_test` - the name of the test -/// - `Ignore`: whether or not this set of tests are ignored -/// Example usage: see tests in this module -#[proc_macro] -pub fn cross_tests(input: TokenStream) -> TokenStream { - let test_spec = parse_macro_input!(input as CrossTestData); - cross_tests_internal(test_spec) -} diff --git a/testing-macros/tests/tests.rs b/testing-macros/tests/tests.rs deleted file mode 100644 index 8c2be38260..0000000000 --- a/testing-macros/tests/tests.rs +++ /dev/null @@ -1,191 +0,0 @@ -use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; -use hotshot_example_types::state_types::TestTypes; -use hotshot_testing::completion_task::{ - CompletionTaskDescription, TimeBasedCompletionTaskDescription, -}; -use hotshot_testing::spinning_task::ChangeNode; -use hotshot_testing::spinning_task::SpinningTaskDescription; -use hotshot_testing::spinning_task::UpDown; -use hotshot_testing::test_builder::TestMetadata; -use hotshot_testing_macros::cross_tests; -use std::time::Duration; - -cross_tests!( - TestName: test_success, - Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], - Ignore: false, - Metadata: { - TestMetadata { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - ..TestMetadata::default() - } - }, -); - -// Test one node leaving the network. -cross_tests!( - TestName: test_with_failures_one, - Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], - Ignore: false, - Metadata: { - let mut metadata = TestMetadata::default_more_nodes(); - metadata.num_bootstrap_nodes = 19; - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ChangeNode { - idx: 19, - updown: UpDown::Down, - }]; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)] - }; - metadata.overall_safety_properties.num_failed_views = 3; - metadata.overall_safety_properties.num_successful_views = 25; - metadata - } -); - -// Test f/2 nodes leaving the network. -cross_tests!( - TestName: test_with_failures_half_f, - Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], - Ignore: false, - Metadata: { - let mut metadata = TestMetadata::default_more_nodes(); - metadata.num_bootstrap_nodes = 17; - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ - ChangeNode { - idx: 17, - updown: UpDown::Down, - }, - ChangeNode { - idx: 18, - updown: UpDown::Down, - }, - ChangeNode { - idx: 19, - updown: UpDown::Down, - }, - ]; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)] - }; - - metadata.overall_safety_properties.num_failed_views = 3; - // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 22; - metadata - } -); - -// Test f nodes leaving the network. -cross_tests!( - TestName: test_with_failures_f, - Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], - Ignore: false, - Metadata: { - - let mut metadata = TestMetadata::default_more_nodes(); - metadata.overall_safety_properties.num_failed_views = 6; - // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 22; - metadata.num_bootstrap_nodes = 14; - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ - ChangeNode { - idx: 14, - updown: UpDown::Down, - }, - ChangeNode { - idx: 15, - updown: UpDown::Down, - }, - ChangeNode { - idx: 16, - updown: UpDown::Down, - }, - ChangeNode { - idx: 17, - updown: UpDown::Down, - }, - ChangeNode { - idx: 18, - updown: UpDown::Down, - }, - ChangeNode { - idx: 19, - updown: UpDown::Down, - }, - ]; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)] - }; - - metadata - } -); - -// Test that a good leader can succeed in the view directly after view sync -cross_tests!( - TestName: test_with_failures_2, - Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], - Ignore: false, - Metadata: { - let mut metadata = TestMetadata::default_more_nodes(); - metadata.num_bootstrap_nodes = 10; - metadata.num_nodes_with_stake = 12; - metadata.da_staked_committee_size = 12; - metadata.start_nodes = 12; - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - // TODO: Update message broadcasting to avoid hanging - // - let dead_nodes = vec![ - ChangeNode { - idx: 10, - updown: UpDown::Down, - }, - ChangeNode { - idx: 11, - updown: UpDown::Down, - }, - ]; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)] - }; - - // 2 nodes fail triggering view sync, expect no other timeouts - metadata.overall_safety_properties.num_failed_views = 2; - // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 15; - - metadata - } -); diff --git a/testing/Cargo.toml b/testing/Cargo.toml index c446189679..04c4f98b70 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -12,6 +12,7 @@ slow-tests = [] gpu-vid = ["hotshot-types/gpu-vid"] [dependencies] +automod = "1.0.14" async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } diff --git a/testing/README.md b/testing/README.md index 67551acc79..96016cd427 100644 --- a/testing/README.md +++ b/testing/README.md @@ -1,6 +1,6 @@ # Purpose -Infrastructure and integration tests for hotshot. +Infrastructure and integration tests for hotshot. Since a lot of our tests can take a while to run, they've been split into groups to allow for parallelization in CI. # Usage @@ -69,4 +69,4 @@ async { }; ``` -See TODO for examples. \ No newline at end of file +See TODO for examples. diff --git a/testing/tests/lossy.rs b/testing/tests/lossy.rs deleted file mode 100644 index 0559423b29..0000000000 --- a/testing/tests/lossy.rs +++ /dev/null @@ -1,118 +0,0 @@ -// TODO these should be config options for lossy network -// #![allow(clippy::type_complexity)] -// use hotshot_example_types::{ -// network_reliability::{AsynchronousNetwork, PartiallySynchronousNetwork, SynchronousNetwork}, -// test_builder::{TestBuilder, TestMetadata}, -// test_types::{StaticCommitteeTestTypes, StaticNodeImplType}, -// }; -// use std::sync::Arc; -// use tracing::instrument; -// -// // tests base level of working synchronous network -// #[cfg_attr( -// feature = "tokio-executor", -// tokio::test(flavor = "multi_thread") -// )] -// #[cfg_attr(feature = "async-std-executor", async_std::test)] -// #[instrument] -// async fn test_no_loss_network() { -// let builder = TestBuilder { -// metadata: TestMetadata { -// total_nodes: 10, -// start_nodes: 10, -// network_reliability: Some(Arc::new(SynchronousNetwork::default())), -// ..TestMetadata::default() -// }, -// ..Default::default() -// }; -// builder -// .build::() -// .launch() -// .run_test() -// .await -// .unwrap(); -// } -// -// // // tests network with forced packet delay -// #[cfg_attr( -// feature = "tokio-executor", -// tokio::test(flavor = "multi_thread") -// )] -// #[cfg_attr(feature = "async-std-executor", async_std::test)] -// #[instrument] -// async fn test_synchronous_network() { -// let builder = TestBuilder { -// metadata: TestMetadata { -// total_nodes: 5, -// start_nodes: 5, -// num_succeeds: 2, -// ..TestMetadata::default() -// }, -// ..Default::default() -// }; -// builder -// .build::() -// .launch() -// .run_test() -// .await -// .unwrap(); -// } -// -// // tests network with small packet delay and dropped packets -// #[cfg_attr( -// feature = "tokio-executor", -// tokio::test(flavor = "multi_thread") -// )] -// #[cfg_attr(feature = "async-std-executor", async_std::test)] -// #[instrument] -// #[ignore] -// async fn test_asynchronous_network() { -// let builder = TestBuilder { -// metadata: TestMetadata { -// total_nodes: 5, -// start_nodes: 5, -// num_succeeds: 2, -// failure_threshold: 5, -// network_reliability: Some(Arc::new(AsynchronousNetwork::new(97, 100, 0, 5))), -// ..TestMetadata::default() -// }, -// ..Default::default() -// }; -// builder -// .build::() -// .launch() -// .run_test() -// .await -// .unwrap(); -// } -// -// /// tests network with asynchronous patch that eventually becomes synchronous -// #[cfg_attr( -// feature = "tokio-executor", -// tokio::test(flavor = "multi_thread") -// )] -// #[cfg_attr(feature = "async-std-executor", async_std::test)] -// #[instrument] -// #[ignore] -// async fn test_partially_synchronous_network() { -// let asn = AsynchronousNetwork::new(90, 100, 0, 0); -// let sn = SynchronousNetwork::new(10, 0); -// let gst = std::time::Duration::new(10, 0); -// -// let builder = TestBuilder { -// metadata: TestMetadata { -// total_nodes: 5, -// start_nodes: 5, -// num_succeeds: 2, -// network_reliability: Some(Arc::new(PartiallySynchronousNetwork::new(asn, sn, gst))), -// ..TestMetadata::default() -// }, -// ..Default::default() -// }; -// builder -// .build::() -// .launch() -// .run_test() -// .await -// .unwrap(); -// } diff --git a/testing/tests/tests_1.rs b/testing/tests/tests_1.rs new file mode 100644 index 0000000000..a856dca727 --- /dev/null +++ b/testing/tests/tests_1.rs @@ -0,0 +1,3 @@ +mod tests_1 { + automod::dir!("tests/tests_1"); +} diff --git a/testing/tests/block_builder.rs b/testing/tests/tests_1/block_builder.rs similarity index 100% rename from testing/tests/block_builder.rs rename to testing/tests/tests_1/block_builder.rs diff --git a/testing/tests/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs similarity index 100% rename from testing/tests/consensus_task.rs rename to testing/tests/tests_1/consensus_task.rs diff --git a/testing/tests/da_task.rs b/testing/tests/tests_1/da_task.rs similarity index 100% rename from testing/tests/da_task.rs rename to testing/tests/tests_1/da_task.rs diff --git a/testing/tests/gen_key_pair.rs b/testing/tests/tests_1/gen_key_pair.rs similarity index 100% rename from testing/tests/gen_key_pair.rs rename to testing/tests/tests_1/gen_key_pair.rs diff --git a/testing/tests/libp2p.rs b/testing/tests/tests_1/libp2p.rs similarity index 100% rename from testing/tests/libp2p.rs rename to testing/tests/tests_1/libp2p.rs diff --git a/testing/tests/unit/message.rs b/testing/tests/tests_1/message.rs similarity index 100% rename from testing/tests/unit/message.rs rename to testing/tests/tests_1/message.rs diff --git a/testing/tests/network_task.rs b/testing/tests/tests_1/network_task.rs similarity index 100% rename from testing/tests/network_task.rs rename to testing/tests/tests_1/network_task.rs diff --git a/testing/tests/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs similarity index 100% rename from testing/tests/proposal_ordering.rs rename to testing/tests/tests_1/proposal_ordering.rs diff --git a/testing/tests/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs similarity index 100% rename from testing/tests/quorum_proposal_task.rs rename to testing/tests/tests_1/quorum_proposal_task.rs diff --git a/testing/tests/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs similarity index 100% rename from testing/tests/quorum_vote_task.rs rename to testing/tests/tests_1/quorum_vote_task.rs diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs new file mode 100644 index 0000000000..65e07fdba6 --- /dev/null +++ b/testing/tests/tests_1/test_success.rs @@ -0,0 +1,27 @@ +use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; +use hotshot_example_types::state_types::TestTypes; +use hotshot_macros::cross_tests; +use hotshot_testing::completion_task::{ + CompletionTaskDescription, TimeBasedCompletionTaskDescription, +}; +use hotshot_testing::test_builder::TestMetadata; +use std::time::Duration; + +cross_tests!( + TestName: test_success, + Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + TestMetadata { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + ..TestMetadata::default() + } + }, +); + diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs new file mode 100644 index 0000000000..67806d82bb --- /dev/null +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -0,0 +1,48 @@ +use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; +use hotshot_example_types::state_types::TestTypes; +use hotshot_macros::cross_tests; +use hotshot_testing::spinning_task::ChangeNode; +use hotshot_testing::spinning_task::SpinningTaskDescription; +use hotshot_testing::spinning_task::UpDown; +use hotshot_testing::test_builder::TestMetadata; + +// Test that a good leader can succeed in the view directly after view sync +cross_tests!( + TestName: test_with_failures_2, + Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let mut metadata = TestMetadata::default_more_nodes(); + metadata.num_bootstrap_nodes = 10; + metadata.num_nodes_with_stake = 12; + metadata.da_staked_committee_size = 12; + metadata.start_nodes = 12; + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 10, + updown: UpDown::Down, + }, + ChangeNode { + idx: 11, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)] + }; + + // 2 nodes fail triggering view sync, expect no other timeouts + metadata.overall_safety_properties.num_failed_views = 2; + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 15; + + metadata + } +); diff --git a/testing/tests/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs similarity index 100% rename from testing/tests/upgrade_task.rs rename to testing/tests/tests_1/upgrade_task.rs diff --git a/testing/tests/vid_task.rs b/testing/tests/tests_1/vid_task.rs similarity index 100% rename from testing/tests/vid_task.rs rename to testing/tests/tests_1/vid_task.rs diff --git a/testing/tests/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs similarity index 100% rename from testing/tests/view_sync_task.rs rename to testing/tests/tests_1/view_sync_task.rs diff --git a/testing/tests/tests_2.rs b/testing/tests/tests_2.rs new file mode 100644 index 0000000000..517ed4c2ca --- /dev/null +++ b/testing/tests/tests_2.rs @@ -0,0 +1,3 @@ +mod tests_2 { + automod::dir!("tests/tests_2"); +} diff --git a/testing/tests/catchup.rs b/testing/tests/tests_2/catchup.rs similarity index 100% rename from testing/tests/catchup.rs rename to testing/tests/tests_2/catchup.rs diff --git a/testing/tests/push_cdn.rs b/testing/tests/tests_2/push_cdn.rs similarity index 100% rename from testing/tests/push_cdn.rs rename to testing/tests/tests_2/push_cdn.rs diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_2/test_with_failures_one.rs new file mode 100644 index 0000000000..79730a840a --- /dev/null +++ b/testing/tests/tests_2/test_with_failures_one.rs @@ -0,0 +1,36 @@ +use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; +use hotshot_example_types::state_types::TestTypes; +use hotshot_macros::cross_tests; +use hotshot_testing::spinning_task::ChangeNode; +use hotshot_testing::spinning_task::SpinningTaskDescription; +use hotshot_testing::spinning_task::UpDown; +use hotshot_testing::test_builder::TestMetadata; + + +// Test one node leaving the network. +cross_tests!( + TestName: test_with_failures_one, + Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let mut metadata = TestMetadata::default_more_nodes(); + metadata.num_bootstrap_nodes = 19; + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ChangeNode { + idx: 19, + updown: UpDown::Down, + }]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)] + }; + metadata.overall_safety_properties.num_failed_views = 3; + metadata.overall_safety_properties.num_successful_views = 25; + metadata + } +); diff --git a/testing/tests/tests_3.rs b/testing/tests/tests_3.rs new file mode 100644 index 0000000000..c24caf7c74 --- /dev/null +++ b/testing/tests/tests_3.rs @@ -0,0 +1,3 @@ +mod tests_3 { + automod::dir!("tests/tests_3"); +} diff --git a/testing/tests/memory_network.rs b/testing/tests/tests_3/memory_network.rs similarity index 97% rename from testing/tests/memory_network.rs rename to testing/tests/tests_3/memory_network.rs index 1b60b9117c..5212c21c53 100644 --- a/testing/tests/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -59,11 +59,8 @@ impl NodeType for Test { #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct TestImpl {} -pub type ThisMembership = GeneralStaticCommittee::SignatureKey>; pub type DANetwork = MemoryNetwork, ::SignatureKey>; pub type QuorumNetwork = MemoryNetwork, ::SignatureKey>; -pub type ViewSyncNetwork = MemoryNetwork, ::SignatureKey>; -pub type VIDNetwork = MemoryNetwork, ::SignatureKey>; impl NodeImplementation for TestImpl { type QuorumNetwork = QuorumNetwork; diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs new file mode 100644 index 0000000000..a732a8b0cd --- /dev/null +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -0,0 +1,48 @@ +use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; +use hotshot_example_types::state_types::TestTypes; +use hotshot_macros::cross_tests; +use hotshot_testing::spinning_task::ChangeNode; +use hotshot_testing::spinning_task::SpinningTaskDescription; +use hotshot_testing::spinning_task::UpDown; +use hotshot_testing::test_builder::TestMetadata; + +// Test f/2 nodes leaving the network. +cross_tests!( + TestName: test_with_failures_half_f, + Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let mut metadata = TestMetadata::default_more_nodes(); + metadata.num_bootstrap_nodes = 17; + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 17, + updown: UpDown::Down, + }, + ChangeNode { + idx: 18, + updown: UpDown::Down, + }, + ChangeNode { + idx: 19, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)] + }; + + metadata.overall_safety_properties.num_failed_views = 3; + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 22; + metadata + } +); + diff --git a/testing/tests/tests_4.rs b/testing/tests/tests_4.rs new file mode 100644 index 0000000000..e8de159afc --- /dev/null +++ b/testing/tests/tests_4.rs @@ -0,0 +1,3 @@ +mod tests_4 { + automod::dir!("tests/tests_4"); +} diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs new file mode 100644 index 0000000000..1623649307 --- /dev/null +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -0,0 +1,61 @@ +use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; +use hotshot_example_types::state_types::TestTypes; +use hotshot_macros::cross_tests; +use hotshot_testing::spinning_task::ChangeNode; +use hotshot_testing::spinning_task::SpinningTaskDescription; +use hotshot_testing::spinning_task::UpDown; +use hotshot_testing::test_builder::TestMetadata; + +// Test f nodes leaving the network. +cross_tests!( + TestName: test_with_failures_f, + Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + + let mut metadata = TestMetadata::default_more_nodes(); + metadata.overall_safety_properties.num_failed_views = 6; + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 22; + metadata.num_bootstrap_nodes = 14; + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + // TODO: Update message broadcasting to avoid hanging + // + let dead_nodes = vec![ + ChangeNode { + idx: 14, + updown: UpDown::Down, + }, + ChangeNode { + idx: 15, + updown: UpDown::Down, + }, + ChangeNode { + idx: 16, + updown: UpDown::Down, + }, + ChangeNode { + idx: 17, + updown: UpDown::Down, + }, + ChangeNode { + idx: 18, + updown: UpDown::Down, + }, + ChangeNode { + idx: 19, + updown: UpDown::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)] + }; + + metadata + } +); + diff --git a/testing/tests/tests_5.rs b/testing/tests/tests_5.rs new file mode 100644 index 0000000000..040fc1e199 --- /dev/null +++ b/testing/tests/tests_5.rs @@ -0,0 +1,3 @@ +mod tests_5 { + automod::dir!("tests/tests_5"); +} diff --git a/testing/tests/combined_network.rs b/testing/tests/tests_5/combined_network.rs similarity index 100% rename from testing/tests/combined_network.rs rename to testing/tests/tests_5/combined_network.rs diff --git a/testing/tests/timeout.rs b/testing/tests/tests_5/timeout.rs similarity index 100% rename from testing/tests/timeout.rs rename to testing/tests/tests_5/timeout.rs diff --git a/testing/tests/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs similarity index 100% rename from testing/tests/unreliable_network.rs rename to testing/tests/tests_5/unreliable_network.rs diff --git a/testing/tests/web_server.rs b/testing/tests/tests_5/web_server.rs similarity index 100% rename from testing/tests/web_server.rs rename to testing/tests/tests_5/web_server.rs diff --git a/testing/tests/unit.rs b/testing/tests/unit.rs deleted file mode 100644 index ff857ae443..0000000000 --- a/testing/tests/unit.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod unit { - mod message; -} From b46cbdef202bb4f8761083512fa53d4eb25cf96a Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 3 Apr 2024 15:24:48 -0400 Subject: [PATCH 0920/1393] update push CDN (#2882) --- examples/infra/mod.rs | 3 +-- .../src/traits/networking/combined_network.rs | 4 ---- .../src/traits/networking/libp2p_network.rs | 5 ---- .../src/traits/networking/memory_network.rs | 5 ---- .../src/traits/networking/push_cdn_network.rs | 24 +++++++------------ .../traits/networking/web_server_network.rs | 5 ---- types/src/traits/network.rs | 4 ---- 7 files changed, 10 insertions(+), 40 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index fb9fcbb4b3..e56f7cc615 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -749,8 +749,7 @@ where topics.iter().map(ToString::to_string).collect(), keypair, ) - .await - .expect("failed to perform initial client connection"); + .expect("failed to create network"); PushCdnDaRun { config, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 8580cf356b..e0a1b05e02 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -313,10 +313,6 @@ impl ConnectedNetwork, TYPES::SignatureKey> ); } - async fn is_ready(&self) -> bool { - self.primary().is_ready().await && self.secondary().is_ready().await - } - fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> where 'a: 'b, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 8f2c2c38d5..43030e565a 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -872,11 +872,6 @@ impl ConnectedNetwork for Libp2p unimplemented!("Resuming not implemented for the Libp2p network"); } - #[instrument(name = "Libp2pNetwork::ready_nonblocking", skip_all)] - async fn is_ready(&self) -> bool { - self.inner.is_ready.load(Ordering::Relaxed) - } - #[instrument(name = "Libp2pNetwork::shut_down", skip_all)] fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> where diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 8f9e634f9c..847052d173 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -224,11 +224,6 @@ impl ConnectedNetwork for Memory unimplemented!("Resuming not implemented for the Memory network"); } - #[instrument(name = "MemoryNetwork::ready_nonblocking")] - async fn is_ready(&self) -> bool { - true - } - #[instrument(name = "MemoryNetwork::shut_down")] fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> where diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 67e4dc7147..5df45ed62b 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -143,8 +143,8 @@ impl PushCdnNetwork { /// marshal. /// /// # Errors - /// If we fail the initial connection - pub async fn new( + /// If we fail to build the config + pub fn new( marshal_endpoint: String, topics: Vec, keypair: KeyPair>, @@ -162,8 +162,8 @@ impl PushCdnNetwork { .keypair(keypair) .build()?; - // Create the client, performing the initial connection - let client = Client::new(config).await?; + // Create the client from the config + let client = Client::new(config); Ok(Self { client, @@ -338,9 +338,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork // Create our client let client = Arc::new(PushCdnNetwork { - client: Client::new(client_config) - .await - .expect("failed to create client"), + client: Client::new(client_config), #[cfg(feature = "hotshot-testing")] is_paused: Arc::from(AtomicBool::new(false)), }); @@ -367,19 +365,15 @@ impl ConnectedNetwork, TYPES::SignatureKey> self.is_paused.store(true, Ordering::Relaxed); } - /// Resumse sending and receiving on the PushCDN network. + /// Resume sending and receiving on the PushCDN network. fn resume(&self) { #[cfg(feature = "hotshot-testing")] self.is_paused.store(false, Ordering::Relaxed); } - /// The clients form an initial connection when created, so we don't have to wait. - async fn wait_for_ready(&self) {} - - /// The clients form an initial connection when created, so we can return `true` here - /// always. - async fn is_ready(&self) -> bool { - true + /// Wait for the client to initialize the connection + async fn wait_for_ready(&self) { + self.client.ensure_initialized().await; } /// TODO: shut down the networks. Unneeded for testing. diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 6384a0cdb9..87bccefa60 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -726,11 +726,6 @@ impl error!("Resuming CDN network"); self.inner.running.store(true, Ordering::Relaxed); } - /// checks if the network is ready - /// nonblocking - async fn is_ready(&self) -> bool { - self.inner.connected.load(Ordering::Relaxed) - } /// Blocks until the network is shut down /// then returns true diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 16ad614497..a0237f2b5e 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -315,10 +315,6 @@ pub trait ConnectedNetwork: /// Blocks until the network is successfully initialized async fn wait_for_ready(&self); - /// checks if the network is ready - /// nonblocking - async fn is_ready(&self) -> bool; - /// Blocks until the network is shut down /// then returns true fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> From 2eff0b4edabf400620da430cc01455710cbf004d Mon Sep 17 00:00:00 2001 From: Anders Konring Date: Thu, 4 Apr 2024 17:14:22 +0200 Subject: [PATCH 0921/1393] remove as_ref on commitment (#2892) --- task-impls/src/vid.rs | 2 +- types/src/data.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 07c1aefd0b..a8a9b2f2c5 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -108,7 +108,7 @@ impl, A: ConsensusApi + let view_number = *view_number; let Ok(signature) = TYPES::SignatureKey::sign( &self.private_key, - vid_disperse.payload_commitment.as_ref().as_ref(), + vid_disperse.payload_commitment.as_ref(), ) else { error!("VID: failed to sign dispersal payload"); return None; diff --git a/types/src/data.rs b/types/src/data.rs index f0f3664a44..42f61ce16a 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -229,7 +229,7 @@ impl VidDisperseShare { private_key: &::PrivateKey, ) -> Option> { let Ok(signature) = - TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref().as_ref()) + TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref()) else { error!("VID: failed to sign dispersal share payload"); return None; @@ -589,9 +589,9 @@ impl Committable for Leaf { .u64_field("view number", *self.view_number) .u64_field("block number", self.get_height()) .field("parent Leaf commitment", self.parent_commitment) - .fixed_size_field( + .var_size_field( "block payload commitment", - self.get_payload_commitment().as_ref().as_ref(), + self.get_payload_commitment().as_ref(), ) .field("justify qc", self.justify_qc.commit()) .optional("upgrade certificate", &self.upgrade_certificate) From a0fcf0f6eb5be7d4ef380671864e7557e497f299 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 4 Apr 2024 23:22:05 +0200 Subject: [PATCH 0922/1393] [BUILDER] Builder integration (#2837) * WIP: Builder integration * Fix random_builder test (again) * Add decided_transactions set to simple builder * Pick the biggest block * Skip re-claiming identical blocks * Sort blocks by fee * Parametrize Version in transaction task * Merge fix * sleep in wait loop * increase round_start_delay * Spawn only 1 builder task and server (#2907) --------- Co-authored-by: Brendon Fish Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> --- hotshot/src/tasks/mod.rs | 2 +- hotshot/src/tasks/task_state.rs | 17 +- macros/src/lib.rs | 2 +- orchestrator/src/config.rs | 4 + task-impls/src/builder.rs | 35 +- task-impls/src/da.rs | 4 +- task-impls/src/events.rs | 2 +- task-impls/src/transactions.rs | 309 +++++++----------- task-impls/src/vid.rs | 4 +- testing/src/block_builder.rs | 157 ++++++--- testing/src/spinning_task.rs | 4 +- testing/src/test_builder.rs | 7 +- testing/src/test_runner.rs | 50 ++- testing/tests/tests_1/block_builder.rs | 141 ++------ testing/tests/tests_1/da_task.rs | 4 +- testing/tests/tests_1/libp2p.rs | 9 +- testing/tests/tests_1/test_success.rs | 2 +- testing/tests/tests_1/test_with_failures_2.rs | 2 +- testing/tests/tests_1/vid_task.rs | 2 +- testing/tests/tests_2/catchup.rs | 15 +- testing/tests/tests_2/push_cdn.rs | 3 +- .../tests/tests_2/test_with_failures_one.rs | 2 +- .../tests_3/test_with_failures_half_f.rs | 2 +- testing/tests/tests_4/test_with_failures_f.rs | 2 +- testing/tests/tests_5/combined_network.rs | 13 +- testing/tests/tests_5/timeout.rs | 6 +- testing/tests/tests_5/unreliable_network.rs | 17 +- testing/tests/tests_5/web_server.rs | 3 +- types/Cargo.toml | 1 + types/src/lib.rs | 3 + types/src/traits/signature_key.rs | 3 +- 31 files changed, 396 insertions(+), 431 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 43c19a5c86..4da8d9a0be 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -315,7 +315,7 @@ pub async fn add_transaction_task> rx: Receiver>>, handle: &SystemContextHandle, ) { - let transactions_state = TransactionTaskState::create_from(handle).await; + let transactions_state = TransactionTaskState::<_, _, _, Version01>::create_from(handle).await; let task = Task::new(tx, rx, task_reg.clone(), transactions_state); task_reg.run_task(task).await; diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 39668862d1..eea538ff48 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -1,6 +1,7 @@ use crate::types::SystemContextHandle; use async_trait::async_trait; +use hotshot_task_impls::builder::BuilderClient; use hotshot_task_impls::quorum_proposal::QuorumProposalTaskState; use hotshot_task_impls::{ consensus::ConsensusTaskState, da::DATaskState, quorum_vote::QuorumVoteTaskState, @@ -11,11 +12,7 @@ use hotshot_types::traits::{ consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }; -use std::{ - collections::{HashMap, HashSet}, - marker::PhantomData, - sync::Arc, -}; +use std::{collections::HashMap, marker::PhantomData}; use versioned_binary_serialization::version::StaticVersionType; /// Trait for creating task states. @@ -153,23 +150,23 @@ impl> CreateTaskState } #[async_trait] -impl> CreateTaskState - for TransactionTaskState> +impl, Ver: StaticVersionType> + CreateTaskState + for TransactionTaskState, Ver> { async fn create_from( handle: &SystemContextHandle, - ) -> TransactionTaskState> { + ) -> TransactionTaskState, Ver> { TransactionTaskState { api: handle.clone(), consensus: handle.hotshot.get_consensus(), - transactions: Arc::default(), - seen_transactions: HashSet::new(), cur_view: handle.get_cur_view().await, network: handle.hotshot.networks.quorum_network.clone(), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, + builder_client: BuilderClient::new(handle.hotshot.config.builder_url.clone()), } } } diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 931102763c..f179c8a94e 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -119,7 +119,7 @@ impl TestData { async fn #test_name() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - (#metadata).gen_launcher::<#ty, #imply>(0).launch().run_test().await; + (#metadata).gen_launcher::<#ty, #imply>(0).launch().run_test::>().await; } } } diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 798858b208..344ecd80d4 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -533,6 +533,8 @@ pub struct HotShotConfigFile { pub propose_max_round_time: Duration, /// Time to wait until we request data associated with a proposal pub data_request_delay: Duration, + /// Builder API base URL + pub builder_url: Url, } /// Holds configuration for a validator node @@ -611,6 +613,7 @@ impl From> for HotS propose_max_round_time: val.propose_max_round_time, data_request_delay: val.data_request_delay, election_config: None, + builder_url: val.builder_url, } } } @@ -666,6 +669,7 @@ impl Default for HotShotConfigFile { propose_min_round_time: Duration::from_secs(0), propose_max_round_time: Duration::from_secs(10), data_request_delay: Duration::from_millis(200), + builder_url: Url::parse("http://localhost:3311").unwrap(), } } } diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index ddcbebb49e..5ec595727b 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -1,7 +1,10 @@ use async_compatibility_layer::art::async_sleep; use std::time::{Duration, Instant}; -use hotshot_builder_api::builder::{BuildError, Error as BuilderApiError}; +use hotshot_builder_api::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::{BuildError, Error as BuilderApiError}, +}; use hotshot_types::{ traits::{node_implementation::NodeType, signature_key::SignatureKey}, utils::BuilderCommitment, @@ -62,11 +65,7 @@ pub struct BuilderClient { _marker: std::marker::PhantomData, } -impl BuilderClient -where - <::SignatureKey as SignatureKey>::PureAssembledSignatureType: - for<'a> TryFrom<&'a TaggedBase64> + Into, -{ +impl BuilderClient { /// Construct a new client from base url pub fn new(base_url: impl Into) -> Self { Self { @@ -102,7 +101,7 @@ where pub async fn get_available_blocks( &self, parent: VidCommitment, - ) -> Result, BuilderClientError> { + ) -> Result>, BuilderClientError> { self.inner .get(&format!("availableblocks/{parent}")) .send() @@ -119,7 +118,7 @@ where &self, block_hash: BuilderCommitment, signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result { + ) -> Result, BuilderClientError> { let encoded_signature: TaggedBase64 = signature.clone().into(); self.inner .get(&format!("claimblock/{block_hash}/{encoded_signature}")) @@ -127,4 +126,24 @@ where .await .map_err(Into::into) } + + /// Claim block header input + /// + /// # Errors + /// - [`BuilderClientError::NotFound`] if block isn't available + /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly + pub async fn claim_block_header_input( + &self, + block_hash: BuilderCommitment, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuilderClientError> { + let encoded_signature: TaggedBase64 = signature.clone().into(); + self.inner + .get(&format!( + "claimheaderinput/{block_hash}/{encoded_signature}" + )) + .send() + .await + .map_err(Into::into) + } } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 07576e819a..4445bb1b9c 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -294,7 +294,7 @@ impl, A: ConsensusApi + return None; } - HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view) => { + HotShotEvent::BlockRecv(encoded_transactions, metadata, view) => { let view = *view; self.da_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) @@ -367,7 +367,7 @@ impl, A: ConsensusApi + HotShotEvent::DAProposalRecv(_, _) | HotShotEvent::DAVoteRecv(_) | HotShotEvent::Shutdown - | HotShotEvent::TransactionsSequenced(_, _, _) + | HotShotEvent::BlockRecv(_, _, _) | HotShotEvent::Timeout(_) | HotShotEvent::ViewChange(_) | HotShotEvent::DAProposalValidated(_, _) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index e66734f0b2..26a5cd0be7 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -114,7 +114,7 @@ pub enum HotShotEvent { TYPES::Time, ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number - TransactionsSequenced( + BlockRecv( Vec, ::Metadata, TYPES::Time, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 7bd2e696a5..5d74d259f6 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -1,16 +1,12 @@ use crate::{ + builder::BuilderClient, events::{HotShotEvent, HotShotTaskCompleted}, helpers::broadcast_event, }; use async_broadcast::Sender; -use async_compatibility_layer::{ - art::async_timeout, - async_primitives::subscribable_rwlock::{ReadView, SubscribableRwLock}, -}; +use async_compatibility_layer::art::async_sleep; use async_lock::RwLock; -use bincode::config::Options; -use commit::{Commitment, Committable}; - +use hotshot_builder_api::block_info::{AvailableBlockData, AvailableBlockHeaderInput}; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, @@ -23,23 +19,20 @@ use hotshot_types::{ signature_key::SignatureKey, BlockPayload, }, - utils::bincode_opts, }; use std::{ - collections::{HashMap, HashSet}, sync::Arc, - time::Instant, + time::{Duration, Instant}, }; -use tracing::{debug, error, instrument, warn}; - -/// A type alias for `HashMap, T>` -type CommitmentMap = HashMap, T>; +use tracing::{debug, error, instrument}; +use versioned_binary_serialization::version::StaticVersionType; /// Tracks state of a Transaction task pub struct TransactionTaskState< TYPES: NodeType, I: NodeImplementation, A: ConsensusApi + 'static, + Ver: StaticVersionType, > { /// The state's api pub api: A, @@ -50,18 +43,15 @@ pub struct TransactionTaskState< /// Reference to consensus. Leader will require a read lock on this. pub consensus: Arc>>, - /// A list of undecided transactions - pub transactions: Arc>>, - - /// A list of transactions we've seen decided, but didn't receive - pub seen_transactions: HashSet>, - /// Network for all nodes pub network: Arc, /// Membership for the quorum pub membership: Arc, + /// Builder API client + pub builder_client: BuilderClient, + /// This Nodes Public Key pub public_key: TYPES::SignatureKey, /// Our Private Key @@ -70,12 +60,15 @@ pub struct TransactionTaskState< pub id: u64, } -impl, A: ConsensusApi + 'static> - TransactionTaskState +impl< + TYPES: NodeType, + I: NodeImplementation, + A: ConsensusApi + 'static, + Ver: StaticVersionType, + > TransactionTaskState { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] - pub async fn handle( &mut self, event: Arc>, @@ -83,93 +76,14 @@ impl, A: ConsensusApi + ) -> Option { match event.as_ref() { HotShotEvent::TransactionsRecv(transactions) => { - futures::join! { - self.api - .send_event(Event { - view_number: self.cur_view, - event: EventType::Transactions { - transactions: transactions.clone(), - }, - }), - async { - let consensus = self.consensus.read().await; - self.transactions - .modify(|txns| { - for transaction in transactions { - let size = - bincode_opts().serialized_size(&transaction).unwrap_or(0); - - // If we didn't already know about this transaction, update our mempool metrics. - if !self.seen_transactions.remove(&transaction.commit()) - && txns.insert(transaction.commit(), transaction.clone()).is_none() - { - consensus.metrics.outstanding_transactions.update(1); - consensus - .metrics - .outstanding_transactions_memory_size - .update(i64::try_from(size).unwrap_or_else(|e| { - warn!( - "Conversion failed: {e}. Using the max value." - ); - i64::MAX - })); - } - } - }) - .await; - } - }; - - return None; - } - HotShotEvent::LeafDecided(leaf_chain) => { - let mut included_txns = HashSet::new(); - let mut included_txn_size = 0; - let mut included_txn_count = 0; - for leaf in leaf_chain { - if let Some(ref payload) = leaf.get_block_payload() { - for txn in - payload.transaction_commitments(leaf.get_block_header().metadata()) - { - included_txns.insert(txn); - } - } - } - let consensus = self.consensus.read().await; - let txns = self.transactions.cloned().await; - - let _ = included_txns.iter().map(|hash| { - if !txns.contains_key(hash) { - self.seen_transactions.insert(*hash); - } - }); - drop(txns); - self.transactions - .modify(|txns| { - *txns = txns - .drain() - .filter(|(txn_hash, txn)| { - if included_txns.contains(txn_hash) { - included_txn_count += 1; - included_txn_size += - bincode_opts().serialized_size(txn).unwrap_or_default(); - false - } else { - true - } - }) - .collect(); + self.api + .send_event(Event { + view_number: self.cur_view, + event: EventType::Transactions { + transactions: transactions.clone(), + }, }) .await; - - consensus - .metrics - .outstanding_transactions - .update(-included_txn_count); - consensus - .metrics - .outstanding_transactions_memory_size - .update(-(i64::try_from(included_txn_size).unwrap_or(i64::MAX))); return None; } HotShotEvent::ViewChange(view) => { @@ -191,39 +105,30 @@ impl, A: ConsensusApi + debug!("Not next leader for view {:?}", self.cur_view); return None; } - // TODO (Keyao) Determine whether to allow empty blocks. - // - let txns = self.wait_for_transactions().await?; - let (payload, metadata) = - match ::from_transactions(txns) { - Ok((payload, metadata)) => (payload, metadata), + + if let Some((block, _)) = self.wait_for_block().await { + // send the sequenced transactions to VID and DA tasks + let block_view = if make_block { view } else { view + 1 }; + let encoded_transactions = match block.block_payload.encode() { + Ok(encoded) => encoded.into_iter().collect::>(), Err(e) => { - error!("Failed to build the block payload: {:?}.", e); + error!("Failed to encode the block payload: {:?}.", e); return None; } }; - - // encode the transactions - let encoded_transactions = match payload.encode() { - Ok(encoded) => encoded.into_iter().collect::>(), - Err(e) => { - error!("Failed to encode the block payload: {:?}.", e); - return None; - } + broadcast_event( + Arc::new(HotShotEvent::BlockRecv( + encoded_transactions, + block.metadata, + block_view, + )), + &event_stream, + ) + .await; + } else { + error!("Failed to get a block from the builder"); }; - // send the sequenced transactions to VID and DA tasks - let block_view = if make_block { view } else { view + 1 }; - broadcast_event( - Arc::new(HotShotEvent::TransactionsSequenced( - encoded_transactions, - metadata, - block_view, - )), - &event_stream, - ) - .await; - return None; } HotShotEvent::Shutdown => { @@ -235,74 +140,95 @@ impl, A: ConsensusApi + } #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] - async fn wait_for_transactions(&self) -> Option> { + async fn wait_for_block( + &self, + ) -> Option<(AvailableBlockData, AvailableBlockHeaderInput)> { let task_start_time = Instant::now(); - // TODO (Keyao) Investigate the use of transaction hash - // - // let parent_leaf = self.parent_leaf().await?; - // let previous_used_txns = match parent_leaf.tarnsaction_commitments { - // Some(txns) => txns, - // None => HashSet::new(), - // }; + let last_leaf = self.consensus.read().await.get_decided_leaf(); + let mut latest_block: Option<( + AvailableBlockData, + AvailableBlockHeaderInput, + )> = None; + while task_start_time.elapsed() < self.api.propose_max_round_time() + && latest_block.as_ref().map_or(true, |(data, _)| { + data.block_payload.get_transactions(&data.metadata).len() + < self.api.min_transactions() + }) + { + let mut available_blocks = match self + .builder_client + .get_available_blocks(last_leaf.get_block_header().payload_commitment()) + .await + { + Ok(blocks) => blocks, + Err(err) => { + error!(%err, "Couldn't get available blocks"); + continue; + } + }; - let receiver = self.transactions.subscribe().await; + available_blocks.sort_by_key(|block_info| block_info.offered_fee); - loop { - let all_txns = self.transactions.cloned().await; - debug!("Size of transactions: {}", all_txns.len()); - // TODO (Keyao) Investigate the use of transaction hash - // - // let unclaimed_txns: Vec<_> = all_txns - // .iter() - // .filter(|(txn_hash, _txn)| !previous_used_txns.contains(txn_hash)) - // .collect(); - let unclaimed_txns = all_txns; + let Some(block_info) = available_blocks.pop() else { + continue; + }; - let time_past = task_start_time.elapsed(); - if unclaimed_txns.len() < self.api.min_transactions() - && (time_past < self.api.propose_max_round_time()) - { - let duration = self.api.propose_max_round_time() - time_past; - let result = async_timeout(duration, receiver.recv()).await; - match result { - Err(_) => { - // Fall through below to updating new block - debug!( - "propose_max_round_time passed, sending transactions we have so far" - ); - } - Ok(Err(e)) => { - // Something unprecedented is wrong, and `transactions` has been dropped - error!("Channel receiver error for SubscribableRwLock {:?}", e); - return None; - } - Ok(Ok(_)) => continue, + // Don't try to re-claim the same block if builder advertises it again + if latest_block.as_ref().map_or(false, |block| { + block.0.block_payload.builder_commitment(&block.0.metadata) == block_info.block_hash + }) { + continue; + } + + let Ok(signature) = <::SignatureKey as SignatureKey>::sign( + &self.private_key, + block_info.block_hash.as_ref(), + ) else { + error!("Failed to sign block hash"); + continue; + }; + + let (block, header_input) = futures::join! { + self.builder_client.claim_block(block_info.block_hash.clone(), &signature), + self.builder_client.claim_block_header_input(block_info.block_hash, &signature) + }; + + let block = match block { + Ok(val) => val, + Err(err) => { + error!(%err, "Failed to claim block"); + continue; } + }; + + let header_input = match header_input { + Ok(val) => val, + Err(err) => { + error!(%err, "Failed to claim block"); + continue; + } + }; + + let num_txns = block.block_payload.get_transactions(&block.metadata).len(); + + latest_block = Some((block, header_input)); + if num_txns >= self.api.min_transactions() { + return latest_block; } - break; + async_sleep(Duration::from_millis(100)).await; } - let all_txns = self.transactions.cloned().await; - // TODO (Keyao) Investigate the use of transaction hash - // - let txns: Vec = all_txns.values().cloned().collect(); - // let txns: Vec = all_txns - // .iter() - // .filter_map(|(txn_hash, txn)| { - // if previous_used_txns.contains(txn_hash) { - // None - // } else { - // Some(txn.clone()) - // } - // }) - // .collect(); - Some(txns) + latest_block } } /// task state implementation for Transactions Task -impl, A: ConsensusApi + 'static> TaskState - for TransactionTaskState +impl< + TYPES: NodeType, + I: NodeImplementation, + A: ConsensusApi + 'static, + Ver: StaticVersionType + 'static, + > TaskState for TransactionTaskState { type Event = Arc>; @@ -312,7 +238,6 @@ impl, A: ConsensusApi + !matches!( event.as_ref(), HotShotEvent::TransactionsRecv(_) - | HotShotEvent::LeafDecided(_) | HotShotEvent::Shutdown | HotShotEvent::ViewChange(_) ) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index a8a9b2f2c5..8cabb08878 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -67,7 +67,7 @@ impl, A: ConsensusApi + event_stream: Sender>>, ) -> Option { match event.as_ref() { - HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view_number) => { + HotShotEvent::BlockRecv(encoded_transactions, metadata, view_number) => { let encoded_transactions = encoded_transactions.clone(); // get the number of quorum committee members to be used for VID calculation let num_storage_nodes = self.membership.total_nodes(); @@ -217,7 +217,7 @@ impl, A: ConsensusApi + !matches!( event.as_ref(), HotShotEvent::Shutdown - | HotShotEvent::TransactionsSequenced(_, _, _) + | HotShotEvent::BlockRecv(_, _, _) | HotShotEvent::BlockReady(_, _) | HotShotEvent::ViewChange(_) | HotShotEvent::DAProposalValidated(_, _) diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index bdacbcee6e..dd0df2dbcf 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -10,8 +10,11 @@ use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; use commit::{Commitment, Committable}; -use futures::future::BoxFuture; -use hotshot::{traits::BlockPayload, types::SignatureKey}; +use futures::{future::BoxFuture, Stream, StreamExt}; +use hotshot::{ + traits::{BlockPayload, TestableNodeImplementation}, + types::{Event, EventType, SignatureKey}, +}; use hotshot_builder_api::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::{BuildError, Options}, @@ -21,8 +24,6 @@ use hotshot_example_types::{ block_types::{TestBlockPayload, TestTransaction}, node_types::TestTypes, }; -use hotshot_task::task::{Task, TaskState}; -use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, traits::{block_contents::vid_commitment, election::Membership, node_implementation::NodeType}, @@ -33,7 +34,68 @@ use lru::LruCache; use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; use tide_disco::{method::ReadState, App, Url}; -use crate::test_runner::HotShotTaskCompleted; +#[async_trait] +pub trait TestBuilderImplementation { + type TYPES: NodeType; + type I: TestableNodeImplementation; + async fn start( + membership: Arc<::Membership>, + ) -> (Option>>, Url); +} + +pub struct RandomBuilderImplementation> { + _marker: std::marker::PhantomData, +} + +#[async_trait] +impl> TestBuilderImplementation + for RandomBuilderImplementation +{ + type TYPES = TestTypes; + type I = I; + + async fn start( + _membership: Arc<::Membership>, + ) -> (Option>>, Url) { + let port = portpicker::pick_unused_port().expect("No free ports"); + let url = Url::parse(&format!("http://localhost:{port}")).expect("Valid URL"); + run_random_builder(url.clone()); + (None, url) + } +} + +pub struct SimpleBuilderImplementation> { + _marker: std::marker::PhantomData, +} + +#[async_trait] +impl> TestBuilderImplementation + for SimpleBuilderImplementation +{ + type TYPES = TestTypes; + type I = I; + + async fn start( + membership: Arc<::Membership>, + ) -> (Option>>, Url) { + let port = portpicker::pick_unused_port().expect("No free ports"); + let url = Url::parse(&format!("http://localhost:{port}")).expect("Valid URL"); + let (source, task) = make_simple_builder(membership).await; + + let builder_api = + hotshot_builder_api::builder::define_api::( + &Options::default(), + ) + .expect("Failed to construct the builder API"); + let mut app: App = + App::with_state(source); + app.register_module("/", builder_api) + .expect("Failed to register the builder API"); + + async_spawn(app.serve(url.clone(), STATIC_VER_0_1)); + (Some(Box::new(task)), url) + } +} /// Entry for a built block struct BlockEntry { @@ -326,55 +388,63 @@ impl SimpleBuilderSource { } } +#[derive(Clone)] pub struct SimpleBuilderTask { transactions: Arc, TestTransaction>>>, blocks: Arc>>, + decided_transactions: LruCache, ()>, } -impl TaskState for SimpleBuilderTask { - type Event = Arc>; +pub trait BuilderTask: Send + Sync { + type TYPES: NodeType; - type Output = HotShotTaskCompleted; + fn start( + self: Box, + stream: Box> + std::marker::Unpin + Send + 'static>, + ); +} - fn filter(&self, event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::TransactionsRecv(_) - | HotShotEvent::LeafDecided(_) - | HotShotEvent::Shutdown - ) - } +impl BuilderTask for SimpleBuilderTask { + type TYPES = TestTypes; - async fn handle_event( - event: Self::Event, - task: &mut Task, - ) -> Option { - let this = task.state_mut(); - match event.as_ref() { - HotShotEvent::TransactionsRecv(transactions) => { - let mut queue = this.transactions.write().await; - for transaction in transactions { - queue.insert(transaction.commit(), transaction.clone()); - } - } - HotShotEvent::LeafDecided(leaf_chain) => { - let mut queue = this.transactions.write().await; - for leaf in leaf_chain.iter() { - if let Some(ref payload) = leaf.get_block_payload() { - for txn in payload.transaction_commitments(&()) { - queue.remove(&txn); - } + fn start( + mut self: Box, + mut stream: Box< + dyn Stream> + std::marker::Unpin + Send + 'static, + >, + ) { + async_spawn(async move { + loop { + match stream.next().await { + None => { + break; } + Some(evt) => match evt.event { + EventType::Decide { leaf_chain, .. } => { + let mut queue = self.transactions.write().await; + for leaf_info in leaf_chain.iter() { + if let Some(ref payload) = leaf_info.leaf.get_block_payload() { + for txn in payload.transaction_commitments(&()) { + self.decided_transactions.put(txn, ()); + queue.remove(&txn); + } + } + } + self.blocks.write().await.clear(); + } + EventType::Transactions { transactions } => { + let mut queue = self.transactions.write().await; + for transaction in transactions { + if !self.decided_transactions.contains(&transaction.commit()) { + queue.insert(transaction.commit(), transaction.clone()); + } + } + } + _ => {} + }, } - this.blocks.write().await.clear(); } - _ => {} - }; - None - } - - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) + }); } } @@ -398,6 +468,7 @@ pub async fn make_simple_builder( let task = SimpleBuilderTask { transactions, blocks, + decided_transactions: LruCache::new(NonZeroUsize::new(u16::MAX.into()).expect("> 0")), }; (source, task) diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 0772914734..aef4583a2c 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -154,13 +154,15 @@ where } }; + let handle = context.run_tasks().await; + // Create the node and add it to the state, so we can shut them // down properly later to avoid the overflow error in the overall // safety task. let node = Node { node_id, networks: node.networks, - handle: context.run_tasks().await, + handle, }; state.handles.push(node.clone()); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index feca1f3de1..7d5a224ce3 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -3,6 +3,7 @@ use hotshot_example_types::storage_types::TestStorage; use hotshot_orchestrator::config::ValidatorConfigFile; use hotshot_types::traits::election::Membership; use std::{num::NonZeroUsize, sync::Arc, time::Duration}; +use tide_disco::Url; use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; @@ -87,7 +88,7 @@ impl Default for TimingData { round_start_delay: 100, start_delay: 100, propose_min_round_time: Duration::new(0, 0), - propose_max_round_time: Duration::from_millis(100), + propose_max_round_time: Duration::from_millis(1000), data_request_delay: Duration::from_millis(200), secondary_network_delay: Duration::from_millis(1000), view_sync_timeout: Duration::from_millis(2000), @@ -291,7 +292,7 @@ impl TestMetadata { next_view_timeout: 500, view_sync_timeout: Duration::from_millis(250), timeout_ratio: (11, 10), - round_start_delay: 1, + round_start_delay: 25, start_delay: 1, // TODO do we use these fields?? propose_min_round_time: Duration::from_millis(0), @@ -302,6 +303,8 @@ impl TestMetadata { num_nodes_with_stake as u64, 0, )), + // Placeholder until we spin up the builder + builder_url: Url::parse("http://localhost:9999").expect("Valid URL"), }; let TimingData { next_view_timeout, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 0da1ed74c3..2b0a676e6e 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -5,6 +5,7 @@ use super::{ txn_task::TxnTask, }; use crate::{ + block_builder::TestBuilderImplementation, completion_task::CompletionTaskDescription, spinning_task::{ChangeNode, SpinningTask, UpDown}, test_launcher::{Networks, TestLauncher}, @@ -65,7 +66,6 @@ pub type LateNodeContext = Either< >; /// A yet-to-be-started node that participates in tests -#[derive(Clone)] pub struct LateStartNode> { /// The underlying networks belonging to the node pub networks: Networks, @@ -132,7 +132,7 @@ where /// # Panics /// if the test fails #[allow(clippy::too_many_lines)] - pub async fn run_test(mut self) { + pub async fn run_test>(mut self) { let (tx, rx) = broadcast(EVENT_CHANNEL_SIZE); let spinning_changes = self .launcher @@ -150,7 +150,7 @@ where } } - self.add_nodes( + self.add_nodes::( self.launcher.metadata.num_nodes_with_stake, &late_start_nodes, ) @@ -327,20 +327,33 @@ where /// /// # Panics /// Panics if unable to create a [`HotShotInitializer`] - pub async fn add_nodes(&mut self, total: usize, late_start: &HashSet) -> Vec { + pub async fn add_nodes>( + &mut self, + total: usize, + late_start: &HashSet, + ) -> Vec { let mut results = vec![]; + let config = self.launcher.resource_generator.config.clone(); + let known_nodes_with_stake = config.known_nodes_with_stake.clone(); + let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { + TYPES::Membership::default_election_config( + config.num_nodes_with_stake.get() as u64, + config.num_nodes_without_stake as u64, + ) + }); + let (mut builder_task, builder_url) = + B::start(Arc::new(::Membership::create_election( + known_nodes_with_stake.clone(), + quorum_election_config.clone(), + config.fixed_leader_for_gpuvid, + ))) + .await; for i in 0..total { + let mut config = config.clone(); let node_id = self.next_node_id; self.next_node_id += 1; tracing::debug!("launch node {}", i); - let config = self.launcher.resource_generator.config.clone(); - let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { - TYPES::Membership::default_election_config( - config.num_nodes_with_stake.get() as u64, - config.num_nodes_without_stake as u64, - ) - }); + let committee_election_config = I::committee_election_config_generator(); let memberships = Memberships { quorum_membership: ::Membership::create_election( @@ -363,10 +376,12 @@ where ), view_sync_membership: ::Membership::create_election( known_nodes_with_stake.clone(), - quorum_election_config, + quorum_election_config.clone(), config.fixed_leader_for_gpuvid, ), }; + config.builder_url = builder_url.clone(); + let networks = (self.launcher.resource_generator.channel_generator)(node_id).await; let storage = (self.launcher.resource_generator.storage)(node_id); @@ -403,10 +418,17 @@ where }, ); } else { + let handle = hotshot.run_tasks().await; + if node_id == 1 { + if let Some(task) = builder_task.take() { + task.start(Box::new(handle.get_event_stream())) + } + } + self.nodes.push(Node { node_id, networks, - handle: hotshot.run_tasks().await, + handle, }); } } diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 5749a7703c..5c3635c33b 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -3,15 +3,8 @@ use hotshot_example_types::{ block_types::{TestBlockPayload, TestTransaction}, node_types::TestTypes, }; -use hotshot_task_impls::{ - builder::{BuilderClient, BuilderClientError}, - events::HotShotEvent, -}; -use hotshot_testing::{ - block_builder::{make_simple_builder, run_random_builder}, - script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, -}; +use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; +use hotshot_testing::block_builder::run_random_builder; use hotshot_types::{ constants::Version01, traits::{ @@ -29,37 +22,37 @@ use tide_disco::Url; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_random_block_builder() { + use std::time::Instant; + + use hotshot_builder_api::block_info::AvailableBlockData; + let port = portpicker::pick_unused_port().expect("Could not find an open port"); let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); run_random_builder(api_url.clone()); + let builder_started = Instant::now(); let client: BuilderClient = BuilderClient::new(api_url); assert!(client.connect(Duration::from_millis(100)).await); - // Wait for at least one block to be built - async_sleep(Duration::from_millis(30)).await; + let mut blocks = loop { + // Test getting blocks + let blocks = client + .get_available_blocks(vid_commitment(&vec![], 1)) + .await + .expect("Failed to get available blocks"); - // Test getting blocks - let mut blocks = client - .get_available_blocks(vid_commitment(&vec![], 1)) - .await - .expect("Failed to get available blocks"); + if !blocks.is_empty() { + break blocks; + }; - { - let mut attempt = 0; + // Wait for at least one block to be built + async_sleep(Duration::from_millis(20)).await; - while blocks.is_empty() && attempt < 50 { - blocks = client - .get_available_blocks(vid_commitment(&vec![], 1)) - .await - .expect("Failed to get available blocks"); - attempt += 1; - async_sleep(Duration::from_millis(100)).await; + if builder_started.elapsed() > Duration::from_secs(2) { + panic!("Builder failed to provide blocks in two seconds"); } - - assert!(!blocks.is_empty()); - } + }; // Test claiming available block let signature = { @@ -69,8 +62,8 @@ async fn test_random_block_builder() { .expect("Failed to create dummy signature") }; - let _: TestBlockPayload = client - .claim_block(blocks.pop().unwrap(), &signature) + let _: AvailableBlockData = client + .claim_block(blocks.pop().unwrap().block_hash, &signature) .await .expect("Failed to claim block"); @@ -84,91 +77,3 @@ async fn test_random_block_builder() { .await; assert!(matches!(result, Err(BuilderClientError::NotFound))); } - -#[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_simple_block_builder() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle(2).await.0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - - let (source, task) = make_simple_builder(quorum_membership.into()).await; - - let port = portpicker::pick_unused_port().expect("Could not find an open port"); - let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); - - source.run(api_url.clone()).await; - - let client: BuilderClient = BuilderClient::new(api_url); - assert!(client.connect(Duration::from_millis(100)).await); - - // Before block-building task is spun up, should only have an empty block - { - let mut blocks = client - .get_available_blocks(vid_commitment(&vec![], 1)) - .await - .expect("Failed to get available blocks"); - - assert_eq!(blocks.len(), 1); - - let signature = { - let (_key, private_key) = - ::SignatureKey::generated_from_seed_indexed([0_u8; 32], 0); - ::SignatureKey::sign(&private_key, &[0_u8; 32]) - .expect("Failed to create dummy signature") - }; - - let payload: TestBlockPayload = client - .claim_block(blocks.pop().unwrap(), &signature) - .await - .expect("Failed to claim block"); - - assert_eq!(payload.transactions.len(), 0); - } - - { - let stage_1 = TestScriptStage { - inputs: vec![ - HotShotEvent::TransactionsRecv(vec![ - TestTransaction(vec![1]), - TestTransaction(vec![2]), - ]), - HotShotEvent::TransactionsRecv(vec![TestTransaction(vec![3])]), - ], - outputs: vec![], - asserts: vec![], - }; - - let stages = vec![stage_1]; - - run_test_script(stages, task).await; - - // Test getting blocks - let mut blocks = client - .get_available_blocks(vid_commitment(&vec![], 1)) - .await - .expect("Failed to get available blocks"); - - assert!(!blocks.is_empty()); - - let signature = { - let (_key, private_key) = - ::SignatureKey::generated_from_seed_indexed([0_u8; 32], 0); - ::SignatureKey::sign(&private_key, &[0_u8; 32]) - .expect("Failed to create dummy signature") - }; - - let payload: TestBlockPayload = client - .claim_block(blocks.pop().unwrap(), &signature) - .await - .expect("Failed to claim block"); - - assert_eq!(payload.transactions.len(), 3); - } -} diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 3e94173638..9be4932065 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -69,7 +69,7 @@ async fn test_da_task() { inputs: vec![ ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), - TransactionsSequenced(encoded_transactions.clone(), (), ViewNumber::new(2)), + BlockRecv(encoded_transactions.clone(), (), ViewNumber::new(2)), ], outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], asserts: vec![], @@ -143,7 +143,7 @@ async fn test_da_task_storage_failure() { inputs: vec![ ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), - TransactionsSequenced(encoded_transactions.clone(), (), ViewNumber::new(2)), + BlockRecv(encoded_transactions.clone(), (), ViewNumber::new(2)), ], outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], asserts: vec![], diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index e03e5ed36d..ed07389b44 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -2,6 +2,7 @@ use std::time::Duration; use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, @@ -28,7 +29,6 @@ async fn libp2p_network() { ), timing_data: TimingData { next_view_timeout: 4000, - propose_max_round_time: Duration::from_millis(300), ..Default::default() }, ..TestMetadata::default_multiple_rounds() @@ -37,7 +37,7 @@ async fn libp2p_network() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -60,7 +60,6 @@ async fn libp2p_network_failures_2() { ), timing_data: TimingData { next_view_timeout: 4000, - propose_max_round_time: Duration::from_millis(100), ..Default::default() }, ..TestMetadata::default_multiple_rounds() @@ -85,7 +84,7 @@ async fn libp2p_network_failures_2() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -101,6 +100,6 @@ async fn test_stress_libp2p_network() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 65e07fdba6..c95280b941 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -6,7 +6,7 @@ use hotshot_testing::completion_task::{ }; use hotshot_testing::test_builder::TestMetadata; use std::time::Duration; - +use hotshot_testing::block_builder::SimpleBuilderImplementation; cross_tests!( TestName: test_success, Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index 67806d82bb..a00c8ce2bc 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -5,7 +5,7 @@ use hotshot_testing::spinning_task::ChangeNode; use hotshot_testing::spinning_task::SpinningTaskDescription; use hotshot_testing::spinning_task::UpDown; use hotshot_testing::test_builder::TestMetadata; - +use hotshot_testing::block_builder::SimpleBuilderImplementation; // Test that a good leader can succeed in the view directly after view sync cross_tests!( TestName: test_with_failures_2, diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index ab9e0689e4..c23914f9b6 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -82,7 +82,7 @@ async fn test_vid_task() { // In view 1, node 2 is the next leader. input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - input.push(HotShotEvent::TransactionsSequenced( + input.push(HotShotEvent::BlockRecv( encoded_transactions.clone(), (), ViewNumber::new(2), diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 5a2dda7d1c..74be76f0d7 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -6,6 +6,7 @@ async fn test_catchup() { use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, @@ -51,7 +52,7 @@ async fn test_catchup() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -63,6 +64,7 @@ async fn test_catchup_cdn() { use hotshot_example_types::node_types::{PushCdnImpl, TestTypes}; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, @@ -103,7 +105,7 @@ async fn test_catchup_cdn() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -116,6 +118,7 @@ async fn test_catchup_one_node() { use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, @@ -157,7 +160,7 @@ async fn test_catchup_one_node() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -170,6 +173,7 @@ async fn test_catchup_in_view_sync() { use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, @@ -217,7 +221,7 @@ async fn test_catchup_in_view_sync() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -231,6 +235,7 @@ async fn test_catchup_reload() { use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, @@ -277,6 +282,6 @@ async fn test_catchup_reload() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } diff --git a/testing/tests/tests_2/push_cdn.rs b/testing/tests/tests_2/push_cdn.rs index 8ec6564180..7a37aa42b9 100644 --- a/testing/tests/tests_2/push_cdn.rs +++ b/testing/tests/tests_2/push_cdn.rs @@ -3,6 +3,7 @@ use std::time::Duration; use async_compatibility_layer::logging::shutdown_logging; use hotshot_example_types::node_types::{PushCdnImpl, TestTypes}; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::{TestMetadata, TimingData}, @@ -39,7 +40,7 @@ async fn push_cdn_network() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; shutdown_logging(); } diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_2/test_with_failures_one.rs index 79730a840a..a77702dbc3 100644 --- a/testing/tests/tests_2/test_with_failures_one.rs +++ b/testing/tests/tests_2/test_with_failures_one.rs @@ -5,7 +5,7 @@ use hotshot_testing::spinning_task::ChangeNode; use hotshot_testing::spinning_task::SpinningTaskDescription; use hotshot_testing::spinning_task::UpDown; use hotshot_testing::test_builder::TestMetadata; - +use hotshot_testing::block_builder::SimpleBuilderImplementation; // Test one node leaving the network. cross_tests!( diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index a732a8b0cd..461fce36a0 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -5,7 +5,7 @@ use hotshot_testing::spinning_task::ChangeNode; use hotshot_testing::spinning_task::SpinningTaskDescription; use hotshot_testing::spinning_task::UpDown; use hotshot_testing::test_builder::TestMetadata; - +use hotshot_testing::block_builder::SimpleBuilderImplementation; // Test f/2 nodes leaving the network. cross_tests!( TestName: test_with_failures_half_f, diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 1623649307..97ed8f70ce 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -5,7 +5,7 @@ use hotshot_testing::spinning_task::ChangeNode; use hotshot_testing::spinning_task::SpinningTaskDescription; use hotshot_testing::spinning_task::UpDown; use hotshot_testing::test_builder::TestMetadata; - +use hotshot_testing::block_builder::SimpleBuilderImplementation; // Test f nodes leaving the network. cross_tests!( TestName: test_with_failures_f, diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index e4f30ccc3a..156ea07d5f 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -2,6 +2,7 @@ use std::time::Duration; use hotshot_example_types::node_types::{CombinedImpl, TestTypes}; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, @@ -16,6 +17,8 @@ use tracing::instrument; #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_combined_network() { + use hotshot_testing::block_builder::SimpleBuilderImplementation; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let metadata: TestMetadata = TestMetadata { @@ -42,7 +45,7 @@ async fn test_combined_network() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -90,7 +93,7 @@ async fn test_combined_network_cdn_crash() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -144,7 +147,7 @@ async fn test_combined_network_reup() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -192,7 +195,7 @@ async fn test_combined_network_half_dc() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -262,6 +265,6 @@ async fn test_stress_combined_network_fuzzy() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index ee56cb2fc5..f2e4466afc 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -9,6 +9,7 @@ async fn test_timeout_web() { use hotshot_example_types::node_types::WebImpl; use hotshot_example_types::node_types::TestTypes; + use hotshot_testing::block_builder::SimpleBuilderImplementation; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, @@ -56,7 +57,7 @@ async fn test_timeout_web() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -70,6 +71,7 @@ async fn test_timeout_libp2p() { use hotshot_example_types::node_types::Libp2pImpl; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, @@ -122,6 +124,6 @@ async fn test_timeout_libp2p() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index 4705419fb6..0ee858019d 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -1,3 +1,4 @@ +use hotshot_testing::block_builder::SimpleBuilderImplementation; use hotshot_testing::test_builder::TimingData; use hotshot_types::traits::network::AsynchronousNetwork; use hotshot_types::traits::network::ChaosNetwork; @@ -40,7 +41,7 @@ async fn libp2p_network_sync() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -73,7 +74,7 @@ async fn test_memory_network_sync() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -112,7 +113,7 @@ async fn libp2p_network_async() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -159,7 +160,7 @@ async fn test_memory_network_async() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -211,7 +212,7 @@ async fn test_memory_network_partially_sync() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -251,7 +252,7 @@ async fn libp2p_network_partially_sync() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -289,7 +290,7 @@ async fn test_memory_network_chaos() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } @@ -324,6 +325,6 @@ async fn libp2p_network_chaos() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; } diff --git a/testing/tests/tests_5/web_server.rs b/testing/tests/tests_5/web_server.rs index 786229abda..e9ba618a0e 100644 --- a/testing/tests/tests_5/web_server.rs +++ b/testing/tests/tests_5/web_server.rs @@ -3,6 +3,7 @@ use std::time::Duration; use async_compatibility_layer::logging::shutdown_logging; use hotshot_example_types::node_types::{TestTypes, WebImpl}; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, test_builder::{TestMetadata, TimingData}, @@ -39,7 +40,7 @@ async fn web_server_network() { metadata .gen_launcher::(0) .launch() - .run_test() + .run_test::>() .await; shutdown_logging(); } diff --git a/types/Cargo.toml b/types/Cargo.toml index 996ecf9428..32ee10c43f 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -54,6 +54,7 @@ tagged-base64 = { workspace = true } versioned-binary-serialization = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } +url = "2.5.0" [dev-dependencies] serde_json = { workspace = true } diff --git a/types/src/lib.rs b/types/src/lib.rs index 36e91fdbf6..fc07d1f60e 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -7,6 +7,7 @@ use std::fmt::Debug; use std::{future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; use tracing::error; use traits::{election::ElectionConfig, signature_key::SignatureKey}; +use url::Url; pub mod consensus; pub mod constants; pub mod data; @@ -189,4 +190,6 @@ pub struct HotShotConfig { pub data_request_delay: Duration, /// the election configuration pub election_config: Option, + /// Builder API base URL + pub builder_url: Url, } diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 3b36e0ed31..38fbe41a7f 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -67,7 +67,8 @@ pub trait SignatureKey: + PartialEq + Eq + Serialize - + for<'a> Deserialize<'a>; + + for<'a> Deserialize<'a> + + Into; /// The type of the assembled qc: assembled signature + `BitVec` type QCType: Send + Sync From 075490980734851cb4d1840cfb2b712831a29333 Mon Sep 17 00:00:00 2001 From: Himanshu Goyal Date: Thu, 4 Apr 2024 19:30:14 -0400 Subject: [PATCH 0923/1393] fix license for builder api (#2908) * remove GPL license * Add MIT license * whoops fix comment * remove old cargo.lock --- builder-api/src/api.rs | 12 ------------ builder-api/src/lib.rs | 22 ++++++++++++++++++++++ builder-api/src/query_data.rs | 5 ----- 3 files changed, 22 insertions(+), 17 deletions(-) diff --git a/builder-api/src/api.rs b/builder-api/src/api.rs index 62f2edf08f..524708eb9c 100644 --- a/builder-api/src/api.rs +++ b/builder-api/src/api.rs @@ -1,15 +1,3 @@ -// Copyright (c) 2022 Espresso Systems (espressosys.com) -// This file is part of the HotShot Query Service library. -// -// This program is free software: you can redistribute it and/or modify it under the terms of the GNU -// General Public License as published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without -// even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -// General Public License for more details. -// You should have received a copy of the GNU General Public License along with this program. If not, -// see . - use std::fs; use std::path::Path; use tide_disco::api::{Api, ApiError}; diff --git a/builder-api/src/lib.rs b/builder-api/src/lib.rs index c89608d85c..152dc4a9ec 100644 --- a/builder-api/src/lib.rs +++ b/builder-api/src/lib.rs @@ -1,3 +1,25 @@ +// MIT License + +// Copyright (c) 2024 Espresso Systems + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + mod api; pub mod block_info; pub mod builder; diff --git a/builder-api/src/query_data.rs b/builder-api/src/query_data.rs index 44b2d1c24f..795eabecf1 100644 --- a/builder-api/src/query_data.rs +++ b/builder-api/src/query_data.rs @@ -1,8 +1,3 @@ -// Copyright (c) 2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot HotShot Builder Protocol. -// -// TODO: License - use hotshot_types::traits::node_implementation::NodeType; use serde::{Deserialize, Serialize}; From 718b2ea4877bfca7efb0528f82e9f498363f86f2 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 5 Apr 2024 12:02:55 -0400 Subject: [PATCH 0924/1393] fix test script (#2915) --- testing/src/script.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testing/src/script.rs b/testing/src/script.rs index ca433d83bc..8db5f14ba1 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -64,11 +64,10 @@ where PredicateResult::Pass => result, PredicateResult::Incomplete => result, PredicateResult::Fail => { - format!( + panic!( "Stage {} | Output failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", stage_number, assert, output - ); - result + ) } } } From 0cfc130a49a69c778a427b5f2dee4059cd3aa949 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Sat, 6 Apr 2024 07:04:26 -0400 Subject: [PATCH 0925/1393] Only calculate VID shares when Asked (#2902) * Only calculate VID shares when asked * fix comment * change back type --- hotshot/src/tasks/mod.rs | 3 +- task-impls/src/consensus.rs | 4 +- task-impls/src/helpers.rs | 33 +++++++++++++- task-impls/src/response.rs | 76 +++++++++++++++++++------------ task-impls/src/vid.rs | 67 ++++----------------------- testing/src/task_helpers.rs | 2 +- testing/tests/tests_1/vid_task.rs | 14 +----- types/src/consensus.rs | 5 +- types/src/data.rs | 2 +- 9 files changed, 97 insertions(+), 109 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 4da8d9a0be..3d16e8c98b 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -73,8 +73,9 @@ pub async fn add_response_task>( let state = NetworkResponseState::::new( handle.hotshot.get_consensus(), rx, - handle.hotshot.memberships.quorum_membership.clone(), + handle.hotshot.memberships.quorum_membership.clone().into(), handle.public_key().clone(), + handle.private_key().clone(), ); task_reg .register(run_response_task::(state, hs_rx)) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index b9dc6e601d..4cb9121081 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -850,7 +850,7 @@ impl, A: ConsensusApi + .vid_shares .get(&leaf.get_view_number()) .unwrap_or(&HashMap::new()) - .get(&self.public_key).map(|proposal| proposal.data.clone()); + .get(&self.public_key).cloned(); // Add our data into a new `LeafInfo` leaf_views.push(LeafInfo::new(leaf.clone(), state.clone(), delta.clone(), vid_share)); @@ -1174,7 +1174,7 @@ impl, A: ConsensusApi + .vid_shares .entry(view) .or_default() - .insert(disperse.data.recipient_key.clone(), disperse.clone()); + .insert(disperse.data.recipient_key.clone(), disperse.data.clone()); if self.vote_if_able(&event_stream).await { self.current_proposal = None; diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 93376f7086..32492e4dfb 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -1,8 +1,16 @@ +use std::sync::Arc; + use async_broadcast::{SendError, Sender}; #[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; +use async_std::task::{spawn_blocking, JoinHandle}; +use hotshot_types::{ + data::VidDisperse, + traits::{election::Membership, node_implementation::NodeType}, + vid::vid_scheme, +}; +use jf_primitives::vid::VidScheme; #[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; +use tokio::task::{spawn_blocking, JoinHandle}; /// Cancel a task pub async fn cancel_task(task: JoinHandle) { @@ -30,3 +38,24 @@ pub async fn broadcast_event(event: E, sender: &Send } } } +/// Calculate the vid disperse information from the payload given a view and membership +/// +/// # Panics +/// Panics if the VID calculation fails, this should not happen. +pub async fn calculate_vid_disperse( + txns: Vec, + membership: Arc, + view: TYPES::Time, +) -> VidDisperse { + let num_nodes = membership.total_nodes(); + let vid_disperse = spawn_blocking(move || { + #[allow(clippy::panic)] + vid_scheme(num_nodes).disperse(&txns).unwrap_or_else(|err|panic!("VID disperse failure:\n\t(num_storage nodes,payload_byte_len)=({num_nodes},{})\n\terror: : {err}", txns.len())) + }) + .await; + #[cfg(async_executor_impl = "tokio")] + // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. + let vid_disperse = vid_disperse.unwrap(); + + VidDisperse::from_membership(view, vid_disperse, membership) +} diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index b26e18e29a..ea12d8644a 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -1,10 +1,9 @@ -use std::collections::HashMap; use std::sync::Arc; -use crate::events::HotShotEvent; +use crate::{events::HotShotEvent, helpers::calculate_vid_disperse}; use async_broadcast::Receiver; use async_compatibility_layer::art::async_spawn; -use async_lock::RwLock; +use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use futures::{channel::mpsc, FutureExt, StreamExt}; @@ -12,9 +11,7 @@ use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ consensus::Consensus, data::VidDisperseShare, - message::{ - CommitteeConsensusMessage, DataMessage, Message, MessageKind, Proposal, SequencingMessage, - }, + message::{CommitteeConsensusMessage, DataMessage, Message, MessageKind, SequencingMessage}, traits::{ election::Membership, network::{DataRequest, RequestKind, ResponseChannel, ResponseMessage}, @@ -42,9 +39,11 @@ pub struct NetworkResponseState { /// Receiver for requests receiver: RequestReceiver, /// Quorum membership for checking if requesters have state - quorum: TYPES::Membership, + quorum: Arc, /// This replicas public key pub_key: TYPES::SignatureKey, + /// This replicas private key + private_key: ::PrivateKey, } impl NetworkResponseState { @@ -52,14 +51,16 @@ impl NetworkResponseState { pub fn new( consensus: LockedConsensusState, receiver: RequestReceiver, - quorum: TYPES::Membership, + quorum: Arc, pub_key: TYPES::SignatureKey, + private_key: ::PrivateKey, ) -> Self { Self { consensus, receiver, quorum, pub_key, + private_key, } } @@ -114,40 +115,59 @@ impl NetworkResponseState { } } + /// Get the VID share from conensus storage, or calculate it from a the payload for + /// the view, if we have the payload. Stores all the shares calculated from the payload + /// if the calculation was done + async fn get_or_calc_vid_share( + &self, + view: TYPES::Time, + key: &TYPES::SignatureKey, + ) -> Option> { + let consensus = self.consensus.upgradable_read().await; + let contained = consensus + .vid_shares + .get(&view) + .is_some_and(|m| m.contains_key(key)); + if !contained { + let txns = consensus.saved_payloads.get(&view)?; + let vid = calculate_vid_disperse(txns.clone(), self.quorum.clone(), view).await; + let shares = VidDisperseShare::from_vid_disperse(vid); + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + for share in shares { + let s = share.clone(); + let key: ::SignatureKey = s.recipient_key; + consensus + .vid_shares + .entry(view) + .or_default() + .insert(key, share); + } + return consensus.vid_shares.get(&view)?.get(key).cloned(); + } + consensus.vid_shares.get(&view)?.get(key).cloned() + } + /// Handle the request contained in the message. Returns the response we should send /// First parses the kind and passes to the appropriate handler for the specific type /// of the request. async fn handle_request(&self, req: DataRequest) -> Message { match req.request { RequestKind::VID(view, pub_key) => { - let state = self.consensus.read().await; - let Some(proposals_map) = state.vid_shares.get(&view) else { + let Some(share) = self.get_or_calc_vid_share(view, &pub_key).await else { return self.make_msg(ResponseMessage::NotFound); }; - self.handle_vid(proposals_map, &pub_key) + let Some(prop) = share.to_proposal(&self.private_key) else { + return self.make_msg(ResponseMessage::NotFound); + }; + let seq_msg = + SequencingMessage::Committee(CommitteeConsensusMessage::VidDisperseMsg(prop)); + self.make_msg(ResponseMessage::Found(seq_msg)) } // TODO impl for DA Proposal: https://github.com/EspressoSystems/HotShot/issues/2651 RequestKind::DAProposal(_view) => self.make_msg(ResponseMessage::NotFound), } } - /// Handle a vid request by looking up the the share for the key. If a share is found - /// build the response and return it - fn handle_vid( - &self, - proposals_map: &HashMap>>, - key: &TYPES::SignatureKey, - ) -> Message { - if !proposals_map.contains_key(key) { - return self.make_msg(ResponseMessage::NotFound); - } - - let seq_msg = SequencingMessage::Committee(CommitteeConsensusMessage::VidDisperseMsg( - proposals_map.get(key).unwrap().clone(), - )); - self.make_msg(ResponseMessage::Found(seq_msg)) - } - /// Helper to turn a `ResponseMessage` into a `Message` by filling /// in the surrounding feilds and creating the `MessageKind` fn make_msg(&self, msg: ResponseMessage) -> Message { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 8cabb08878..5bdb9a7808 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -1,15 +1,11 @@ use crate::events::{HotShotEvent, HotShotTaskCompleted}; -use crate::helpers::broadcast_event; +use crate::helpers::{broadcast_event, calculate_vid_disperse}; use async_broadcast::Sender; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::spawn_blocking; -use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, - data::{VidDisperse, VidDisperseShare}, message::Proposal, traits::{ consensus_api::ConsensusApi, @@ -18,11 +14,7 @@ use hotshot_types::{ node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, }, - vid::vid_scheme, }; -use jf_primitives::vid::VidScheme; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::spawn_blocking; use std::marker::PhantomData; use std::sync::Arc; @@ -68,24 +60,16 @@ impl, A: ConsensusApi + ) -> Option { match event.as_ref() { HotShotEvent::BlockRecv(encoded_transactions, metadata, view_number) => { - let encoded_transactions = encoded_transactions.clone(); - // get the number of quorum committee members to be used for VID calculation - let num_storage_nodes = self.membership.total_nodes(); - - // calculate vid shares - let vid_disperse = spawn_blocking(move || { - #[allow(clippy::panic)] - vid_scheme(num_storage_nodes).disperse(&encoded_transactions).unwrap_or_else(|err|panic!("VID disperse failure:\n\t(num_storage nodes,payload_byte_len)=({num_storage_nodes},{})\n\terror: : {err}", encoded_transactions.len())) - }) + let vid_disperse = calculate_vid_disperse( + encoded_transactions.clone(), + self.membership.clone(), + *view_number, + ) .await; - - #[cfg(async_executor_impl = "tokio")] - // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. - let vid_disperse = vid_disperse.unwrap(); // send the commitment and metadata to consensus for block building broadcast_event( Arc::new(HotShotEvent::SendPayloadCommitmentAndMetadata( - vid_disperse.commit, + vid_disperse.payload_commitment, metadata.clone(), *view_number, )), @@ -95,10 +79,7 @@ impl, A: ConsensusApi + // send the block to the VID dispersal function broadcast_event( - Arc::new(HotShotEvent::BlockReady( - VidDisperse::from_membership(*view_number, vid_disperse, &self.membership), - *view_number, - )), + Arc::new(HotShotEvent::BlockReady(vid_disperse, *view_number)), &event_stream, ) .await; @@ -155,37 +136,6 @@ impl, A: ConsensusApi + return None; } - HotShotEvent::DAProposalValidated(proposal, _sender) => { - let txns = proposal.data.encoded_transactions.clone(); - let num_nodes = self.membership.total_nodes(); - let vid_disperse = spawn_blocking(move || { - #[allow(clippy::panic)] - vid_scheme(num_nodes).disperse(&txns).unwrap_or_else(|err|panic!("VID disperse failure:\n\t(num_storage nodes,payload_byte_len)=({num_nodes},{})\n\terror: : {err}", txns.len())) - }) - .await; - #[cfg(async_executor_impl = "tokio")] - let vid_disperse = vid_disperse.unwrap(); - - let vid_disperse = VidDisperse::from_membership( - proposal.data.view_number, - vid_disperse, - &self.membership, - ); - - let vid_disperse_tasks = VidDisperseShare::from_vid_disperse(vid_disperse) - .into_iter() - .filter_map(|vid_share| { - Some(broadcast_event( - Arc::new(HotShotEvent::VidDisperseRecv( - vid_share.to_proposal(&self.private_key)?, - )), - &event_stream, - )) - }); - - join_all(vid_disperse_tasks).await; - } - HotShotEvent::Shutdown => { return Some(HotShotTaskCompleted); } @@ -220,7 +170,6 @@ impl, A: ConsensusApi + | HotShotEvent::BlockRecv(_, _, _) | HotShotEvent::BlockReady(_, _) | HotShotEvent::ViewChange(_) - | HotShotEvent::DAProposalValidated(_, _) ) } fn should_shutdown(event: &Self::Event) -> bool { diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index fdf3dab009..a7ca49f7c0 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -398,7 +398,7 @@ pub fn build_vid_proposal( let vid_disperse = VidDisperse::from_membership( view_number, vid.disperse(encoded_transactions).unwrap(), - &quorum_membership.clone().into(), + quorum_membership.clone().into(), ); VidDisperseShare::from_vid_disperse(vid_disperse) diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index c23914f9b6..1d6d71bb02 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -54,7 +54,7 @@ async fn test_vid_task() { let vid_disperse = VidDisperse::from_membership( message.data.view_number, vid_disperse, - &quorum_membership.clone().into(), + quorum_membership.clone().into(), ); let vid_proposal = Proposal { @@ -71,10 +71,6 @@ async fn test_vid_task() { }) .collect(); let vid_share_proposal = vid_share_proposals[0].clone(); - let disperse_receives: Vec<_> = vid_share_proposals - .into_iter() - .map(HotShotEvent::VidDisperseRecv) - .collect(); let mut input = Vec::new(); let mut output = HashMap::new(); @@ -91,10 +87,7 @@ async fn test_vid_task() { vid_disperse.clone(), ViewNumber::new(2), )); - input.push(HotShotEvent::DAProposalValidated( - message, - *handle.public_key(), - )); + input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); input.push(HotShotEvent::VidDisperseRecv(vid_share_proposal.clone())); input.push(HotShotEvent::Shutdown); @@ -112,9 +105,6 @@ async fn test_vid_task() { HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), 1, ); - for disperse_receive in disperse_receives { - output.insert(disperse_receive, 1); - } let vid_state = VIDTaskState { api: handle.clone(), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 52a92d3c45..d06fbe6c8e 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -6,7 +6,6 @@ use displaydoc::Display; use crate::{ data::{Leaf, VidDisperseShare}, error::HotShotError, - message::Proposal, simple_certificate::{DACertificate, QuorumCertificate, UpgradeCertificate}, traits::{ metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}, @@ -27,9 +26,9 @@ use tracing::error; pub type CommitmentMap = HashMap, T>; /// A type alias for `BTreeMap>>>` -type VidShares = BTreeMap< +pub type VidShares = BTreeMap< ::Time, - HashMap<::SignatureKey, Proposal>>, + HashMap<::SignatureKey, VidDisperseShare>, >; /// A reference to the consensus algorithm diff --git a/types/src/data.rs b/types/src/data.rs index 42f61ce16a..bf185f1bd4 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -156,7 +156,7 @@ impl VidDisperse { pub fn from_membership( view_number: TYPES::Time, mut vid_disperse: JfVidDisperse, - membership: &Arc, + membership: Arc, ) -> Self { let shares = membership .get_staked_committee(view_number) From 0dd64f10b63dad2ecc53483a981d696a6b43a30c Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 8 Apr 2024 04:40:38 -0400 Subject: [PATCH 0926/1393] update the cdn (#2904) --- examples/Cargo.toml | 7 ++++--- hotshot/Cargo.toml | 6 ++++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 31df23b950..39a58f3772 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -141,9 +141,9 @@ tracing = { workspace = true } tokio = { workspace = true } cdn-client = { workspace = true, features = ["runtime-tokio"] } cdn-broker = { workspace = true, features = [ - "runtime-tokio", - "strong_consistency", + "strong-consistency", + "global-permits", ] } cdn-marshal = { workspace = true, features = ["runtime-tokio"] } @@ -152,7 +152,8 @@ async-std = { workspace = true } cdn-client = { workspace = true, features = ["runtime-async-std"] } cdn-broker = { workspace = true, features = [ "runtime-async-std", - "strong_consistency", + "strong-consistency", + "global-permits", ] } cdn-marshal = { workspace = true, features = ["runtime-async-std"] } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index bb68353fb9..b69542e435 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -62,7 +62,8 @@ tokio = { workspace = true } cdn-client = { workspace = true, features = ["runtime-tokio"] } cdn-broker = { workspace = true, features = [ "runtime-tokio", - "strong_consistency", + "strong-consistency", + "global-permits", ] } cdn-marshal = { workspace = true, features = ["runtime-tokio"] } @@ -71,7 +72,8 @@ async-std = { workspace = true } cdn-client = { workspace = true, features = ["runtime-async-std"] } cdn-broker = { workspace = true, features = [ "runtime-async-std", - "strong_consistency", + "strong-consistency", + "global-permits", ] } cdn-marshal = { workspace = true, features = ["runtime-async-std"] } From 4da919b4b98bda9fb0389b7ba6abf0592c98f6c1 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 8 Apr 2024 13:19:48 +0300 Subject: [PATCH 0927/1393] [CHORE] Update tide-disco (and others) to crates.io (#2893) * Update tide-disco (and others) to crates.io * Fix test builder API * Fix panic docs * Update h2; patching vulnerability * Revert "Merge remote-tracking branch 'origin/main' into update/tide-disco" This reverts commit e62b762a01f4b2c60f255b3d7d8a2672192033f7, reversing changes made to 039adf56d7198f251fe87c34503fb089d7a35e84. * Revert "Revert "Merge remote-tracking branch 'origin/main' into update/tide-disco"" This reverts commit de74d481c29bd46b8d2ac062deacfb902587f25a. * update to api base --------- Co-authored-by: Rob --- builder-api/Cargo.toml | 4 ++-- builder-api/src/api.rs | 4 ++-- builder-api/src/builder.rs | 4 ++-- example-types/Cargo.toml | 2 +- example-types/src/block_types.rs | 6 +++--- example-types/src/state_types.rs | 4 ++-- examples/Cargo.toml | 4 ++-- examples/infra/mod.rs | 2 +- examples/webserver/all.rs | 2 +- examples/webserver/multi-webserver.rs | 2 +- examples/webserver/webserver.rs | 2 +- hotshot/Cargo.toml | 4 ++-- hotshot/src/lib.rs | 4 ++-- hotshot/src/tasks/task_state.rs | 2 +- .../src/traits/networking/combined_network.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 2 +- .../src/traits/networking/memory_network.rs | 2 +- .../src/traits/networking/push_cdn_network.rs | 2 +- .../traits/networking/web_server_network.rs | 2 +- libp2p-networking/Cargo.toml | 2 +- libp2p-networking/src/network/node/handle.rs | 2 +- libp2p-networking/tests/counter.rs | 2 +- orchestrator/Cargo.toml | 2 +- orchestrator/src/client.rs | 8 +++----- orchestrator/src/lib.rs | 11 ++++------- task-impls/Cargo.toml | 4 ++-- task-impls/src/builder.rs | 8 ++++++-- task-impls/src/consensus.rs | 4 ++-- task-impls/src/events.rs | 2 +- task-impls/src/network.rs | 2 +- task-impls/src/request.rs | 2 +- task-impls/src/response.rs | 2 +- task-impls/src/transactions.rs | 3 ++- task-impls/src/upgrade.rs | 4 ++-- testing/Cargo.toml | 4 ++-- testing/src/block_builder.rs | 18 ++++++++---------- testing/src/task_helpers.rs | 2 +- testing/src/view_generator.rs | 2 +- testing/tests/tests_1/message.rs | 4 ++-- testing/tests/tests_1/upgrade_task.rs | 2 +- types/Cargo.toml | 4 ++-- types/src/consensus.rs | 2 +- types/src/constants.rs | 2 +- types/src/data.rs | 4 ++-- types/src/qc.rs | 2 +- types/src/simple_certificate.rs | 4 ++-- types/src/simple_vote.rs | 18 +++++++++--------- types/src/traits/block_contents.rs | 2 +- types/src/traits/network.rs | 2 +- types/src/traits/node_implementation.rs | 2 +- types/src/utils.rs | 2 +- types/src/vid.rs | 19 ++++++++++--------- types/src/vote.rs | 2 +- web_server/Cargo.toml | 2 +- web_server/src/lib.rs | 7 ++++--- 55 files changed, 110 insertions(+), 110 deletions(-) diff --git a/builder-api/Cargo.toml b/builder-api/Cargo.toml index 868229ab77..7a83c79990 100644 --- a/builder-api/Cargo.toml +++ b/builder-api/Cargo.toml @@ -16,5 +16,5 @@ snafu = { workspace = true } tagged-base64 = { workspace = true } tide-disco = { workspace = true } toml = { workspace = true } -commit = { workspace = true } -versioned-binary-serialization = { workspace = true } +committable = { workspace = true } +vbs = { workspace = true } diff --git a/builder-api/src/api.rs b/builder-api/src/api.rs index 524708eb9c..587638301c 100644 --- a/builder-api/src/api.rs +++ b/builder-api/src/api.rs @@ -2,9 +2,9 @@ use std::fs; use std::path::Path; use tide_disco::api::{Api, ApiError}; use toml::{map::Entry, Value}; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; -pub(crate) fn load_api( +pub(crate) fn load_api( path: Option>, default: &str, extensions: impl IntoIterator, diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs index b210e8fba0..68f6b52ab2 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/builder.rs @@ -5,7 +5,7 @@ use crate::{ data_source::{AcceptsTxnSubmits, BuilderDataSource}, }; use clap::Args; -use commit::Committable; +use committable::Committable; use derive_more::From; use futures::FutureExt; use hotshot_types::{ @@ -20,7 +20,7 @@ use tide_disco::{ method::{ReadState, WriteState}, Api, RequestError, StatusCode, }; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; #[derive(Args, Default)] pub struct Options { diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index de8a42da07..f90c9da8b3 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -17,7 +17,7 @@ async-compatibility-layer = { workspace = true } async-trait = { workspace = true } anyhow = { workspace = true } sha3 = "^0.10" -commit = { workspace = true } +committable = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot" } diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index af1c37116d..5d2cae0c87 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -4,7 +4,7 @@ use std::{ }; use crate::node_types::TestTypes; -use commit::{Commitment, Committable, RawCommitmentBuilder}; +use committable::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ data::{BlockError, Leaf}, traits::{ @@ -52,7 +52,7 @@ impl TestTransaction { impl Committable for TestTransaction { fn commit(&self) -> Commitment { - let builder = commit::RawCommitmentBuilder::new("Txn Comm"); + let builder = committable::RawCommitmentBuilder::new("Txn Comm"); let mut hasher = Keccak256::new(); hasher.update(&self.0); let generic_array = hasher.finalize(); @@ -159,7 +159,7 @@ impl BlockPayload for TestBlockPayload { ) -> Vec> { self.transactions .iter() - .map(commit::Committable::commit) + .map(committable::Committable::commit) .collect() } diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 8c32573cb5..c6383c8f72 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -1,5 +1,5 @@ //! Implementations for examples and tests only -use commit::{Commitment, Committable}; +use committable::{Commitment, Committable}; use hotshot_types::{ data::{fake_commitment, BlockError, Leaf, ViewNumber}, @@ -40,7 +40,7 @@ pub struct TestValidatedState { impl Committable for TestValidatedState { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("Test State Commit") + committable::RawCommitmentBuilder::new("Test State Commit") .u64_field("block_height", self.block_height) .field("prev_state_commitment", self.prev_state_commitment) .finalize() diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 39a58f3772..739552c085 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -108,7 +108,7 @@ async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6" clap = { workspace = true, optional = true } -commit = { workspace = true } +committable = { workspace = true } custom_debug = { workspace = true } dashmap = "5" either = { workspace = true } @@ -131,7 +131,7 @@ hotshot-task = { path = "../task" } hotshot = { path = "../hotshot" } hotshot-example-types = { path = "../example-types" } chrono = "0.4" -versioned-binary-serialization = { workspace = true } +vbs = { workspace = true } sha2.workspace = true local-ip-address = "0.6" diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index e56f7cc615..350d0cdd21 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -57,7 +57,7 @@ use std::time::Duration; use std::{fs, time::Instant}; use surf_disco::Url; use tracing::{error, info, warn}; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; #[derive(Debug, Clone)] /// Arguments passed to the orchestrator diff --git a/examples/webserver/all.rs b/examples/webserver/all.rs index c6f73ad7d2..8585764d98 100644 --- a/examples/webserver/all.rs +++ b/examples/webserver/all.rs @@ -21,7 +21,7 @@ use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::constants::WebServerVersion; use surf_disco::Url; use tracing::error; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; #[cfg_attr(async_executor_impl = "tokio", tokio::main)] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] diff --git a/examples/webserver/multi-webserver.rs b/examples/webserver/multi-webserver.rs index edc5ba586e..34af9b9e6b 100644 --- a/examples/webserver/multi-webserver.rs +++ b/examples/webserver/multi-webserver.rs @@ -12,7 +12,7 @@ use hotshot_types::constants::WebServerVersion; use hotshot_types::traits::node_implementation::NodeType; use surf_disco::Url; use tracing::error; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; /// Arguments to run multiple web servers #[derive(Parser, Debug)] diff --git a/examples/webserver/webserver.rs b/examples/webserver/webserver.rs index 60669de6d9..4f4a2a5820 100644 --- a/examples/webserver/webserver.rs +++ b/examples/webserver/webserver.rs @@ -3,7 +3,7 @@ use hotshot_example_types::state_types::TestTypes; use hotshot_types::constants::WebServerVersion; use std::sync::Arc; use surf_disco::Url; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; use async_compatibility_layer::{ channel::oneshot, diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index b69542e435..d1235f3be9 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -31,7 +31,7 @@ async-trait = { workspace = true } bimap = "0.6" bincode = { workspace = true } clap = { workspace = true, optional = true } -commit = { workspace = true } +committable = { workspace = true } custom_debug = { workspace = true } dashmap = "5" derive_more = "0.99" @@ -52,7 +52,7 @@ snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } -versioned-binary-serialization = { workspace = true } +vbs = { workspace = true } jf-primitives.workspace = true hotshot-orchestrator = { path = "../orchestrator" } blake3.workspace = true diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index a161e9d87f..04d0a78803 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -24,13 +24,13 @@ use async_broadcast::{broadcast, InactiveReceiver, Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; -use commit::Committable; +use committable::Committable; use futures::join; use hotshot_task_impls::events::HotShotEvent; use hotshot_task_impls::helpers::broadcast_event; use hotshot_task_impls::network; use hotshot_types::constants::{BASE_VERSION, EVENT_CHANNEL_SIZE, STATIC_VER_0_1}; -use versioned_binary_serialization::version::Version; +use vbs::version::Version; use hotshot_task::task::TaskRegistry; use hotshot_types::{ diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index eea538ff48..fa5bf90fd9 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -13,7 +13,7 @@ use hotshot_types::traits::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }; use std::{collections::HashMap, marker::PhantomData}; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; /// Trait for creating task states. #[async_trait] diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index e0a1b05e02..16af13c81c 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -52,7 +52,7 @@ use std::hash::Hash; use std::time::Duration; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; /// Helper function to calculate a hash of a type that implements Hash pub fn calculate_hash_of(t: &T) -> u64 { diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 43030e565a..6c3b82fcc8 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -68,7 +68,7 @@ use std::{ }; use std::{collections::HashSet, net::SocketAddr}; use tracing::{debug, error, info, instrument, warn}; -use versioned_binary_serialization::{ +use vbs::{ version::{StaticVersionType, Version}, BinarySerializer, Serializer, }; diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 847052d173..de6df4be35 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -35,7 +35,7 @@ use std::{ }, }; use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; -use versioned_binary_serialization::{version::StaticVersionType, BinarySerializer, Serializer}; +use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; /// Shared state for in-memory mock networking. /// diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 5df45ed62b..315e334bc7 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -46,7 +46,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; #[cfg(feature = "hotshot-testing")] use std::{path::Path, sync::Arc, time::Duration}; use tracing::{error, warn}; -use versioned_binary_serialization::{ +use vbs::{ version::{StaticVersionType, Version}, BinarySerializer, Serializer, }; diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 87bccefa60..ed4d7a68c2 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -45,7 +45,7 @@ use std::{ use surf_disco::error::ClientError; use surf_disco::Url; use tracing::{debug, error, info, warn}; -use versioned_binary_serialization::{ +use vbs::{ version::{StaticVersionType, Version}, BinarySerializer, Serializer, }; diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 8685999874..b982b437dd 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -33,7 +33,7 @@ tide = { version = "0.16", optional = true, default-features = false, features = "h1-server", ] } tracing = { workspace = true } -versioned-binary-serialization = { workspace = true } +vbs = { workspace = true } void = "1" lazy_static = { workspace = true } diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 111ff6a89e..1e63f15540 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -21,7 +21,7 @@ use std::{ time::{Duration, Instant}, }; use tracing::{debug, info, instrument}; -use versioned_binary_serialization::{version::StaticVersionType, BinarySerializer, Serializer}; +use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; /// A handle containing: /// - A reference to the state diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index d6318602a2..9d8a37dabc 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize}; use snafu::ResultExt; use std::{fmt::Debug, sync::Arc, time::Duration}; use tracing::{debug, error, info, instrument, warn}; -use versioned_binary_serialization::{BinarySerializer, Serializer}; +use vbs::{BinarySerializer, Serializer}; #[cfg(async_executor_impl = "async-std")] use async_std::prelude::StreamExt; diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index ff9d45125e..532a2e09f1 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -20,7 +20,7 @@ toml = { workspace = true } thiserror = "1" serde-inline-default = "0.1" csv = "1" -versioned-binary-serialization = { workspace = true } +vbs = { workspace = true } multiaddr = "0.18" anyhow.workspace = true bincode.workspace = true diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 4807a98fdc..531a922f07 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -13,7 +13,7 @@ use hotshot_types::{ use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; -use versioned_binary_serialization::BinarySerializer; +use vbs::BinarySerializer; /// Holds the client connection to the orchestrator pub struct OrchestratorClient { /// the client @@ -212,10 +212,8 @@ impl OrchestratorClient { }); // Serialize our (possible) libp2p-specific data - let request_body = versioned_binary_serialization::Serializer::::serialize(&( - libp2p_address, - libp2p_public_key, - ))?; + let request_body = + vbs::Serializer::::serialize(&(libp2p_address, libp2p_public_key))?; let identity = |client: Client| { // We need to clone here to move it into the closure diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index d9fec75375..73dcdf3cd9 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -35,7 +35,7 @@ use libp2p::{ }, Multiaddr, PeerId, }; -use versioned_binary_serialization::{ +use vbs::{ version::{StaticVersion, StaticVersionType}, BinarySerializer, }; @@ -404,7 +404,7 @@ where // Decode the libp2p data so we can add to our bootstrap nodes (if supplied) let Ok((libp2p_address, libp2p_public_key)) = - versioned_binary_serialization::Serializer::::deserialize(&body_bytes) + vbs::Serializer::::deserialize(&body_bytes) else { return Err(ServerError { status: tide_disco::StatusCode::BadRequest, @@ -477,11 +477,8 @@ where let state: RwLock> = RwLock::new(OrchestratorState::new(network_config)); - let mut app = App::< - RwLock>, - ServerError, OrchestratorVersion - >::with_state(state); - app.register_module("api", web_api.unwrap()) + let mut app = App::>, ServerError>::with_state(state); + app.register_module::("api", web_api.unwrap()) .expect("Error registering api"); tracing::error!("listening on {:?}", url); app.serve(url, ORCHESTRATOR_VERSION).await diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 9596195cf3..2f9b918b25 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -18,7 +18,7 @@ async-trait = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } chrono = "0.4" -commit = { workspace = true } +committable = { workspace = true } either = { workspace = true } futures = { workspace = true } hotshot-task = { path = "../task" } @@ -34,7 +34,7 @@ surf-disco = { workspace = true } tagged-base64 = { workspace = true } time = { workspace = true } tracing = { workspace = true } -versioned-binary-serialization = { workspace = true } +vbs = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 5ec595727b..674aa73ed6 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -14,7 +14,7 @@ use serde::{Deserialize, Serialize}; use snafu::Snafu; use surf_disco::{client::HealthStatus, Client, Url}; use tagged_base64::TaggedBase64; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; #[derive(Debug, Snafu, Serialize, Deserialize)] /// Represents errors than builder client may return @@ -67,9 +67,13 @@ pub struct BuilderClient { impl BuilderClient { /// Construct a new client from base url + /// + /// # Panics + /// + /// If the URL is malformed. pub fn new(base_url: impl Into) -> Self { Self { - inner: Client::new(base_url.into()), + inner: Client::new(base_url.into().join("api").unwrap()), _marker: std::marker::PhantomData, } } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 4cb9121081..51060dbce8 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -8,7 +8,7 @@ use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use commit::Committable; +use committable::Committable; use core::time::Duration; use hotshot_task::task::{Task, TaskState}; use hotshot_types::event::LeafInfo; @@ -35,7 +35,7 @@ use hotshot_types::{ vote::{Certificate, HasViewNumber}, }; use hotshot_types::{constants::LOOK_AHEAD, data::ViewChangeEvidence}; -use versioned_binary_serialization::version::Version; +use vbs::version::Version; use crate::vote_collection::HandleVoteEvent; use chrono::Utc; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 26a5cd0be7..4f0355cca9 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -16,7 +16,7 @@ use hotshot_types::{ traits::{node_implementation::NodeType, BlockPayload}, vid::VidCommitment, }; -use versioned_binary_serialization::version::Version; +use vbs::version::Version; /// Marker that the task completed #[derive(Eq, Hash, PartialEq, Debug, Clone)] diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index c5f17d0a8d..792aa0a307 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -26,7 +26,7 @@ use hotshot_types::{ }; use tracing::instrument; use tracing::{debug, error, warn}; -use versioned_binary_serialization::version::Version; +use vbs::version::Version; /// quorum filter pub fn quorum_filter(event: &Arc>) -> bool { diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 0518f86d1c..a9408b418d 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -22,7 +22,7 @@ use hotshot_types::{ use rand::{prelude::SliceRandom, thread_rng}; use sha2::{Digest, Sha256}; use tracing::{debug, error, info, instrument, warn}; -use versioned_binary_serialization::{version::StaticVersionType, BinarySerializer, Serializer}; +use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; /// Amount of time to try for a request before timing out. const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index ea12d8644a..6858db657f 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -22,7 +22,7 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use versioned_binary_serialization::{version::StaticVersionType, BinarySerializer, Serializer}; +use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; /// Type alias for consensus state wrapped in a lock. type LockedConsensusState = Arc>>; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 5d74d259f6..bbfecee84b 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -6,6 +6,7 @@ use crate::{ use async_broadcast::Sender; use async_compatibility_layer::art::async_sleep; use async_lock::RwLock; + use hotshot_builder_api::block_info::{AvailableBlockData, AvailableBlockHeaderInput}; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ @@ -25,7 +26,7 @@ use std::{ time::{Duration, Instant}, }; use tracing::{debug, error, instrument}; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; /// Tracks state of a Transaction task pub struct TransactionTaskState< diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 842c4aca8a..8ca04a955c 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -208,14 +208,14 @@ impl, A: ConsensusApi + #[cfg(feature = "example-upgrade")] { - use commit::Committable; + use committable::Committable; use std::marker::PhantomData; use hotshot_types::{ data::UpgradeProposal, message::Proposal, traits::node_implementation::ConsensusTime, }; - use versioned_binary_serialization::version::Version; + use vbs::version::Version; if *view == 5 && self.quorum_membership.get_leader(view + 5) == self.public_key { diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 04c4f98b70..573ccad2f1 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -18,7 +18,7 @@ async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bitvec = { workspace = true } -commit = { workspace = true } +committable = { workspace = true } either = { workspace = true } ethereum-types = { workspace = true } futures = { workspace = true } @@ -39,7 +39,7 @@ sha3 = "^0.10" snafu = { workspace = true } tide-disco = { workspace = true } tracing = { workspace = true } -versioned-binary-serialization = { workspace = true } +vbs = { workspace = true } lru = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index dd0df2dbcf..708a2f3272 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -9,7 +9,7 @@ use std::{ use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; -use commit::{Commitment, Committable}; +use committable::{Commitment, Committable}; use futures::{future::BoxFuture, Stream, StreamExt}; use hotshot::{ traits::{BlockPayload, TestableNodeImplementation}, @@ -17,7 +17,7 @@ use hotshot::{ }; use hotshot_builder_api::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, - builder::{BuildError, Options}, + builder::{BuildError, Error, Options}, data_source::BuilderDataSource, }; use hotshot_example_types::{ @@ -87,9 +87,9 @@ impl> TestBuilderImplementation &Options::default(), ) .expect("Failed to construct the builder API"); - let mut app: App = + let mut app: App = App::with_state(source); - app.register_module("/", builder_api) + app.register_module("api", builder_api) .expect("Failed to register the builder API"); async_spawn(app.serve(url.clone(), STATIC_VER_0_1)); @@ -286,9 +286,8 @@ pub fn run_random_builder(url: Url) { &Options::default(), ) .expect("Failed to construct the builder API"); - let mut app: App = - App::with_state(source); - app.register_module("/", builder_api) + let mut app: App = App::with_state(source); + app.register_module::("api", builder_api) .expect("Failed to register the builder API"); async_spawn(app.serve(url, STATIC_VER_0_1)); @@ -379,9 +378,8 @@ impl SimpleBuilderSource { &Options::default(), ) .expect("Failed to construct the builder API"); - let mut app: App = - App::with_state(self); - app.register_module("/", builder_api) + let mut app: App = App::with_state(self); + app.register_module::("api", builder_api) .expect("Failed to register the builder API"); async_spawn(app.serve(url, STATIC_VER_0_1)); diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index a7ca49f7c0..eac39c9ba6 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -8,7 +8,7 @@ use hotshot_example_types::{ }; use crate::test_builder::TestMetadata; -use commit::Committable; +use committable::Committable; use ethereum_types::U256; use hotshot::{ types::{BLSPubKey, SignatureKey, SystemContextHandle}, diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 25fbe34e45..935bb57d41 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -10,7 +10,7 @@ use sha2::{Digest, Sha256}; use crate::task_helpers::{ build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, }; -use commit::Committable; +use committable::Committable; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index d4b3f561e5..6dd7ae1c29 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -1,7 +1,7 @@ #[cfg(test)] use std::marker::PhantomData; -use commit::Committable; +use committable::Committable; use hotshot_example_types::node_types::TestTypes; @@ -12,7 +12,7 @@ use hotshot_types::{ simple_vote::ViewSyncCommitData, traits::{node_implementation::ConsensusTime, signature_key::SignatureKey}, }; -use versioned_binary_serialization::{ +use vbs::{ version::{StaticVersion, Version}, BinarySerializer, Serializer, }; diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 0b52471177..301f1cdafa 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -18,7 +18,7 @@ use hotshot_types::{ simple_vote::UpgradeProposalData, traits::{election::Membership, node_implementation::ConsensusTime}, }; -use versioned_binary_serialization::version::Version; +use vbs::version::Version; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] diff --git a/types/Cargo.toml b/types/Cargo.toml index 32ee10c43f..72287849d2 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -25,7 +25,7 @@ async-trait = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } blake3 = { workspace = true } -commit = { workspace = true } +committable = { workspace = true } custom_debug = { workspace = true } digest = { workspace = true } either = { workspace = true } @@ -51,7 +51,7 @@ jf-utils = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } tagged-base64 = { workspace = true } -versioned-binary-serialization = { workspace = true } +vbs = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } url = "2.5.0" diff --git a/types/src/consensus.rs b/types/src/consensus.rs index d06fbe6c8e..8cffa11a20 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -14,7 +14,7 @@ use crate::{ }, utils::{StateAndDelta, Terminator}, }; -use commit::Commitment; +use committable::Commitment; use std::{ collections::{BTreeMap, HashMap}, diff --git a/types/src/constants.rs b/types/src/constants.rs index 834af4243d..06b9adf24b 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -1,6 +1,6 @@ //! configurable constants for hotshot -use versioned_binary_serialization::version::{StaticVersion, Version}; +use vbs::version::{StaticVersion, Version}; /// the number of views to gather information for ahead of time pub const LOOK_AHEAD: u64 = 5; diff --git a/types/src/data.rs b/types/src/data.rs index bf185f1bd4..ae42d8748f 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -25,7 +25,7 @@ use crate::{ }; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::Options; -use commit::{Commitment, Committable, RawCommitmentBuilder}; +use committable::{Commitment, Committable, RawCommitmentBuilder}; use derivative::Derivative; use jf_primitives::vid::VidDisperse as JfVidDisperse; use rand::Rng; @@ -583,7 +583,7 @@ pub fn serialize_signature2( } impl Committable for Leaf { - fn commit(&self) -> commit::Commitment { + fn commit(&self) -> committable::Commitment { // Skip the transaction commitments, so that the repliacs can reconstruct the leaf. RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) diff --git a/types/src/qc.rs b/types/src/qc.rs index cef6cdd2c0..52d540ce53 100644 --- a/types/src/qc.rs +++ b/types/src/qc.rs @@ -189,7 +189,7 @@ mod tests { bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, SignatureScheme, }; - use versioned_binary_serialization::{version::StaticVersion, BinarySerializer, Serializer}; + use vbs::{version::StaticVersion, BinarySerializer, Serializer}; type Version = StaticVersion<0, 1>; macro_rules! test_quorum_certificate { diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 73ee8f4c95..c977b32c4c 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -6,7 +6,7 @@ use std::{ marker::PhantomData, }; -use commit::{Commitment, CommitmentBoundsArkless, Committable}; +use committable::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; use crate::{ @@ -85,7 +85,7 @@ impl serialize_signature2::(sigs), None => vec![], }; - commit::RawCommitmentBuilder::new("Certificate") + committable::RawCommitmentBuilder::new("Certificate") .field("data", self.data.commit()) .field("vote_commitment", self.vote_commitment) .field("view number", self.view_number.commit()) diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 516e2338b0..bf9b85ada2 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -2,7 +2,7 @@ use std::{fmt::Debug, hash::Hash}; -use commit::{Commitment, Committable}; +use committable::{Commitment, Committable}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; @@ -12,7 +12,7 @@ use crate::{ vid::VidCommitment, vote::{HasViewNumber, Vote}, }; -use versioned_binary_serialization::version::Version; +use vbs::version::Version; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a yes vote. @@ -90,7 +90,7 @@ pub trait Voteable: /// All simple voteable types should be implemented here. This prevents us from /// creating/using improper types when using the vote types. mod sealed { - use commit::Committable; + use committable::Committable; /// Only structs in this file can impl `Sealed` pub trait Sealed {} @@ -162,7 +162,7 @@ impl SimpleVote { impl Committable for QuorumData { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("Quorum data") + committable::RawCommitmentBuilder::new("Quorum data") .var_size_bytes(self.leaf_commit.as_ref()) .finalize() } @@ -170,7 +170,7 @@ impl Committable for QuorumData { impl Committable for TimeoutData { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("Timeout data") + committable::RawCommitmentBuilder::new("Timeout data") .u64(*self.view) .finalize() } @@ -178,7 +178,7 @@ impl Committable for TimeoutData { impl Committable for DAData { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("DA data") + committable::RawCommitmentBuilder::new("DA data") .var_size_bytes(self.payload_commit.as_ref()) .finalize() } @@ -186,7 +186,7 @@ impl Committable for DAData { impl Committable for VIDData { fn commit(&self) -> Commitment { - commit::RawCommitmentBuilder::new("VID data") + committable::RawCommitmentBuilder::new("VID data") .var_size_bytes(self.payload_commit.as_ref()) .finalize() } @@ -194,7 +194,7 @@ impl Committable for VIDData { impl Committable for UpgradeProposalData { fn commit(&self) -> Commitment { - let builder = commit::RawCommitmentBuilder::new("Upgrade data"); + let builder = committable::RawCommitmentBuilder::new("Upgrade data"); builder .u64(*self.new_version_first_block) .u64(*self.old_version_last_block) @@ -213,7 +213,7 @@ fn view_and_relay_commit( relay: u64, tag: &str, ) -> Commitment { - let builder = commit::RawCommitmentBuilder::new(tag); + let builder = committable::RawCommitmentBuilder::new(tag); builder.u64(*view).u64(relay).finalize() } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 24080dbb18..13818a2820 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -9,7 +9,7 @@ use crate::{ utils::BuilderCommitment, vid::{vid_scheme, VidCommitment, VidSchemeType}, }; -use commit::{Commitment, Committable}; +use committable::{Commitment, Committable}; use jf_primitives::vid::VidScheme; use serde::{de::DeserializeOwned, Serialize}; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index a0237f2b5e..40e1045625 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -38,7 +38,7 @@ use std::{ sync::Arc, time::Duration, }; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; /// for any errors we decide to add to memory network #[derive(Debug, Snafu, Serialize, Deserialize)] diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index bb501b64e3..c59755de62 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -21,7 +21,7 @@ use crate::{ }, }; use async_trait::async_trait; -use commit::Committable; +use committable::Committable; use serde::{Deserialize, Serialize}; use std::{ fmt::Debug, diff --git a/types/src/utils.rs b/types/src/utils.rs index 99af91d328..54a5d11e72 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -13,7 +13,7 @@ use bincode::{ }, DefaultOptions, Options, }; -use commit::Commitment; +use committable::Commitment; use digest::OutputSizeUser; use sha2::Digest; use std::{ops::Deref, sync::Arc}; diff --git a/types/src/vid.rs b/types/src/vid.rs index e6d2c9a5f2..a424227852 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -50,17 +50,18 @@ use std::{fmt::Debug, ops::Range}; /// When the construction fails for the underlying VID scheme. #[must_use] pub fn vid_scheme(num_storage_nodes: usize) -> VidSchemeType { - // chunk_size is currently num_storage_nodes rounded down to a power of two - // TODO chunk_size should be a function of the desired erasure code rate + // recovery_threshold is currently num_storage_nodes rounded down to a power of two + // TODO recovery_threshold should be a function of the desired erasure code rate // https://github.com/EspressoSystems/HotShot/issues/2152 - let chunk_size = 1 << num_storage_nodes.ilog2(); + let recovery_threshold = 1 << num_storage_nodes.ilog2(); - // TODO intelligent choice of multiplicity - let multiplicity = 1; + let num_storage_nodes = u32::try_from(num_storage_nodes).unwrap_or_else(|err| { + panic!("num_storage_nodes {num_storage_nodes} should fit into u32\n\terror: : {err}") + }); // TODO panic, return `Result`, or make `new` infallible upstream (eg. by panicking)? #[allow(clippy::panic)] - VidSchemeType(Advz::new(chunk_size, num_storage_nodes, multiplicity, &*KZG_SRS).unwrap_or_else(|err| panic!("advz construction failure:\n\t(num_storage nodes,chunk_size,multiplicity)=({num_storage_nodes},{chunk_size},{multiplicity})\n\terror: : {err}"))) + VidSchemeType(Advz::new(num_storage_nodes, recovery_threshold, &*KZG_SRS).unwrap_or_else(|err| panic!("advz construction failure:\n\t(num_storage nodes,recovery_threshold)=({num_storage_nodes},{recovery_threshold})\n\terror: : {err}"))) } /// VID commitment type @@ -173,15 +174,15 @@ impl VidScheme for VidSchemeType { ::is_consistent(commit, common) } - fn get_payload_byte_len(common: &Self::Common) -> usize { + fn get_payload_byte_len(common: &Self::Common) -> u32 { ::get_payload_byte_len(common) } - fn get_num_storage_nodes(common: &Self::Common) -> usize { + fn get_num_storage_nodes(common: &Self::Common) -> u32 { ::get_num_storage_nodes(common) } - fn get_multiplicity(common: &Self::Common) -> usize { + fn get_multiplicity(common: &Self::Common) -> u32 { ::get_multiplicity(common) } } diff --git a/types/src/vote.rs b/types/src/vote.rs index 3117ddef03..db62bdc8c0 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -6,7 +6,7 @@ use std::{ }; use bitvec::{bitvec, vec::BitVec}; -use commit::Commitment; +use committable::Commitment; use either::Either; use ethereum_types::U256; use tracing::error; diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml index fa1bf01e36..29213c9b67 100644 --- a/web_server/Cargo.toml +++ b/web_server/Cargo.toml @@ -15,7 +15,7 @@ tide-disco = { workspace = true } tracing = { workspace = true } rand = { workspace = true } toml = { workspace = true } -versioned-binary-serialization = { workspace = true } +vbs = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index 22783710cc..f41f89cdc9 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -23,7 +23,7 @@ use tide_disco::{ Api, App, StatusCode, Url, }; use tracing::{debug, info}; -use versioned_binary_serialization::version::StaticVersionType; +use vbs::version::StaticVersionType; /// Convience alias for a lock over the state of the app /// TODO this is used in two places. It might be clearer to just inline @@ -1110,9 +1110,10 @@ pub async fn run_web_server< let web_api = define_api(&options).unwrap(); let state = State::new(WebServerState::new().with_shutdown_signal(shutdown_listener)); - let mut app = App::, Error, NetworkVersion>::with_state(state); + let mut app = App::, Error>::with_state(state); - app.register_module("api", web_api).unwrap(); + app.register_module::("api", web_api) + .unwrap(); let app_future = app.serve(url, bind_version); From 1f08e62ebc14401bfb8ddb1f665fd493644c4eb0 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 9 Apr 2024 02:19:40 -0400 Subject: [PATCH 0928/1393] Attach certificate throughout upgrade (#2903) --- hotshot/src/lib.rs | 1 - hotshot/src/tasks/task_state.rs | 2 +- task-impls/Cargo.toml | 2 +- task-impls/src/consensus.rs | 228 ++++++++++++-------------- task-impls/src/helpers.rs | 12 ++ testing/src/predicates.rs | 7 +- testing/src/view_generator.rs | 9 +- testing/tests/tests_1/upgrade_task.rs | 15 +- types/Cargo.toml | 1 + types/src/consensus.rs | 8 +- types/src/data.rs | 64 ++++++++ types/src/message.rs | 23 ++- types/src/simple_certificate.rs | 39 +++++ types/src/simple_vote.rs | 12 +- 14 files changed, 277 insertions(+), 146 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 04d0a78803..a7469e7ade 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -235,7 +235,6 @@ impl> SystemContext { saved_leaves, saved_payloads, saved_da_certs: HashMap::new(), - saved_upgrade_certs: HashMap::new(), // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 locked_view: anchored_leaf.get_view_number(), diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index fa5bf90fd9..df1f4c3add 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -191,7 +191,7 @@ impl> CreateTaskState vote_collector: None.into(), timeout_vote_collector: None.into(), timeout_task: None, - upgrade_cert: None, + formed_upgrade_certificate: None, proposal_cert: None, decided_upgrade_cert: None, version: handle.hotshot.version.clone(), diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 2f9b918b25..2994a7308b 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -11,6 +11,7 @@ gpu-vid = ["hotshot-types/gpu-vid"] [dependencies] +anyhow = { workspace = true } async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } @@ -25,7 +26,6 @@ hotshot-task = { path = "../task" } hotshot-types = { path = "../types" } hotshot-builder-api = { path = "../builder-api" } jf-primitives = { workspace = true } -memoize = { workspace = true } rand = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 51060dbce8..6b23d9bafc 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,8 +1,9 @@ use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, + helpers::{broadcast_event, cancel_task, AnyhowTracing}, vote_collection::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; +use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; @@ -10,7 +11,9 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; use async_std::task::JoinHandle; use committable::Committable; use core::time::Duration; +use futures::future::FutureExt; use hotshot_task::task::{Task, TaskState}; +use hotshot_types::data::null_block; use hotshot_types::event::LeafInfo; use hotshot_types::{ consensus::{Consensus, View}, @@ -18,7 +21,7 @@ use hotshot_types::{ event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, - simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote, UpgradeProposalData}, + simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, traits::{ block_contents::BlockHeader, consensus_api::ConsensusApi, @@ -63,40 +66,54 @@ type VoteCollectorOption = Option( proposal: Proposal>, parent_leaf: Leaf, consensus: Arc>>, + decided_upgrade_certificate: Option>, + quorum_membership: Arc, parent_state: Arc, view_leader_key: TYPES::SignatureKey, event_stream: Sender>>, sender: TYPES::SignatureKey, event_sender: Sender>, storage: Arc>>, -) { - let Ok((validated_state, state_delta)) = parent_state +) -> Result<()> { + let (validated_state, state_delta) = parent_state .validate_and_apply_header( &consensus.read().await.instance_state, &parent_leaf, &proposal.data.block_header.clone(), ) .await - else { - error!("Block header doesn't extend the proposal",); - return; - }; + .context("Block header doesn't extend the proposal!")?; + let state = Arc::new(validated_state); let delta = Arc::new(state_delta); let parent_commitment = parent_leaf.commit(); let view = proposal.data.get_view_number(); + let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); proposed_leaf.set_parent_commitment(parent_commitment); - // Validate the signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - if !view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()) { - error!(?proposal.signature, "Could not verify proposal."); - return; - } + // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment + // + // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: + // + // proposal.validate_signature(&quorum_membership)?; + // + // in a future PR. + ensure!( + view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), + "Could not verify proposal." + ); + + UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; + + // Validate that the upgrade certificate is re-attached, if we saw one on the parent + proposed_leaf.extends_upgrade(parent_leaf, decided_upgrade_certificate)?; + let justify_qc = proposal.data.justify_qc.clone(); // Create a positive vote if either liveness or safety check // passes. @@ -119,9 +136,7 @@ async fn validate_proposal( ); let safety_check = outcome.is_ok(); - // Skip if both saftey and liveness checks fail. - if !safety_check && !liveness_check { - error!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc, proposal.data.clone(), consensus.locked_view); + ensure!(safety_check || liveness_check, { if let Err(e) = outcome { broadcast_event( Event { @@ -132,8 +147,9 @@ async fn validate_proposal( ) .await; } - return; - } + + format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc, proposal.data.clone(), consensus.locked_view) + }); // We accept the proposal, notify the application layer @@ -182,6 +198,8 @@ async fn validate_proposal( { warn!("Couldn't store undecided state. Error: {:?}", e); } + + Ok(()) } /// The state for the consensus task. Contains all of the information for the implementation @@ -239,8 +257,13 @@ pub struct ConsensusTaskState< /// timeout task handle pub timeout_task: Option>, - /// last Upgrade Certificate this node formed - pub upgrade_cert: Option>, + /// The most recent upgrade certificate this node formed. + /// Note: this is ONLY for certificates that have been formed internally, + /// so that we can propose with them. + /// + /// Certificates received from other nodes will get reattached regardless of this fields, + /// since they will be present in the leaf we propose off of. + pub formed_upgrade_certificate: Option>, /// last View Sync Certificate or Timeout Certificate this node formed. pub proposal_cert: Option>, @@ -294,7 +317,7 @@ impl, A: ConsensusApi + } if let Some(upgrade_cert) = &self.decided_upgrade_cert { - if view_is_between_versions(self.cur_view, &upgrade_cert.data) + if upgrade_cert.in_interim(self.cur_view) && Some(proposal.block_header.payload_commitment()) != null_block::commitment(self.quorum_membership.total_nodes()) { @@ -516,6 +539,10 @@ impl, A: ConsensusApi + event: Arc>, event_stream: Sender>>, ) { + error!( + "self.decided_upgrade_cert is {:?}", + self.decided_upgrade_cert.clone() + ); match event.as_ref() { HotShotEvent::QuorumProposalRecv(proposal, sender) => { let sender = sender.clone(); @@ -592,32 +619,15 @@ impl, A: ConsensusApi + return; } - // Validate the upgrade certificate, if one is attached. - // Continue unless the certificate is invalid. - // - // Note: we are *not* directly voting on the upgrade certificate here. - // Once a certificate has been (allegedly) formed, it has already been voted on. - // The certificate is either valid or invalid, and we are simply validating it. - // - // SS: It is possible that we may wish to vote against any quorum proposal - // if it attaches an upgrade certificate that we cannot support. - // But I don't think there's much point in this -- if the UpgradeCertificate - // threshold (90%) has been reached, voting against the QuorumProposal on that basis - // will probably be completely symbolic anyway. - // - // We should just make sure we don't *sign* an UpgradeCertificate for an upgrade - // that we do not support. - if let Some(ref upgrade_cert) = proposal.data.upgrade_certificate { - if upgrade_cert.is_valid_cert(self.quorum_membership.as_ref()) { - self.consensus - .write() - .await - .saved_upgrade_certs - .insert(view, upgrade_cert.clone()); - } else { - error!("Invalid upgrade_cert in proposal for view {}", *view); - return; - } + // Validate the upgrade certificate -- this is just a signature validation. + // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. + if let Err(e) = UpgradeCertificate::validate( + &proposal.data.upgrade_certificate, + &self.quorum_membership, + ) { + warn!("{:?}", e); + + return; } // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here @@ -764,17 +774,23 @@ impl, A: ConsensusApi + return; }; - async_spawn(validate_proposal( - proposal.clone(), - parent_leaf, - self.consensus.clone(), - parent_state.clone(), - view_leader_key, - event_stream.clone(), - sender, - self.output_event_stream.clone(), - self.storage.clone(), - )); + + async_spawn( + validate_proposal( + proposal.clone(), + parent_leaf, + self.consensus.clone(), + self.decided_upgrade_cert.clone(), + self.quorum_membership.clone(), + parent_state.clone(), + view_leader_key, + event_stream.clone(), + sender, + self.output_event_stream.clone(), + self.storage.clone(), + ) + .map(AnyhowTracing::err_as_debug), + ); } HotShotEvent::QuorumProposalValidated(proposal) => { let consensus = self.consensus.upgradable_read().await; @@ -827,9 +843,13 @@ impl, A: ConsensusApi + .last_synced_block_height .set(usize::try_from(leaf.get_height()).unwrap_or(0)); } - if let Some(upgrade_cert) = consensus.saved_upgrade_certs.get(&leaf.get_view_number()) { - info!("Updating consensus state with decided upgrade certificate: {:?}", upgrade_cert); - self.decided_upgrade_cert = Some(upgrade_cert.clone()); + if let Some(cert) = leaf.get_upgrade_certificate() { + if cert.data.decide_by < view { + warn!("Failed to decide an upgrade certificate in time. Ignoring."); + } else { + info!("Updating consensus state with decided upgrade certificate: {:?}", cert); + self.decided_upgrade_cert = Some(cert.clone()); + } } // If the block payload is available for this leaf, include it in // the leaf chain that we send to the client. @@ -1100,10 +1120,11 @@ impl, A: ConsensusApi + *cert.view_number ); - // Update our current upgrade_cert as long as it's still relevant. - if cert.view_number >= self.cur_view { - debug!("Updating current upgrade_cert"); - self.upgrade_cert = Some(cert.clone()); + // Update our current upgrade_cert as long as we still have a chance of reaching a decide on it in time. + if cert.data.decide_by >= self.cur_view + 3 { + debug!("Updating current formed_upgrade_certificate"); + + self.formed_upgrade_certificate = Some(cert.clone()); } } HotShotEvent::DACertificateRecv(cert) => { @@ -1196,7 +1217,7 @@ impl, A: ConsensusApi + // If we have a decided upgrade certificate, // we may need to upgrade the protocol version on a view change. if let Some(ref cert) = self.decided_upgrade_cert { - if new_view >= cert.data.new_version_first_block { + if new_view == cert.data.new_version_first_view { warn!( "Updating version based on a decided upgrade cert: {:?}", cert @@ -1209,9 +1230,6 @@ impl, A: ConsensusApi + &event_stream, ) .await; - - // Discard the old upgrade certificate, which is no longer relevant. - self.decided_upgrade_cert = None; } } @@ -1430,7 +1448,7 @@ impl, A: ConsensusApi + // Special case: if we have a decided upgrade certificate AND it does not apply a version to the current view, we MUST propose with a null block. if let Some(upgrade_cert) = &self.decided_upgrade_cert { - if view_is_between_versions(self.cur_view, &upgrade_cert.data) { + if upgrade_cert.in_interim(self.cur_view) { let Ok((_payload, metadata)) = ::from_transactions(Vec::new()) else { @@ -1460,7 +1478,7 @@ impl, A: ConsensusApi + view_number: view, justify_qc: consensus.high_qc.clone(), proposal_certificate: None, - upgrade_certificate: None, + upgrade_certificate: self.decided_upgrade_cert.clone(), }; let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal); @@ -1507,21 +1525,25 @@ impl, A: ConsensusApi + commit_and_metadata.metadata.clone(), ) .await; - let upgrade_cert = if self - .upgrade_cert - .as_ref() - .is_some_and(|cert| cert.view_number == view) - { - debug!("Attaching upgrade certificate to proposal."); - // If the cert view number matches, set upgrade_cert to self.upgrade_cert - // and set self.upgrade_cert to None. - // - // Note: the certificate is discarded, regardless of whether the vote on the proposal succeeds or not. - self.upgrade_cert.take() - } else { - // Otherwise, set upgrade_cert to None. - None - }; + + // In order of priority, we should try to attach: + // - the parent certificate if it exists, or + // - our own certificate that we formed. + // In either case, we need to ensure that the certificate is still relevant. + // + // Note: once we reach a point of potentially propose with our formed upgrade certificate, we will ALWAYS drop it. If we cannot immediately use it for whatever reason, we choose to discard it. + // It is possible that multiple nodes form separate upgrade certificates for the some upgrade if we are not careful about voting. But this shouldn't bother us: the first leader to propose is the one whose certificate will be used. And if that fails to reach a decide for whatever reason, we may lose our own certificate, but something will likely have gone wrong there anyway. + let formed_upgrade_certificate = self.formed_upgrade_certificate.take(); + let mut proposal_upgrade_certificate = parent_leaf + .get_upgrade_certificate() + .or(formed_upgrade_certificate); + + if !proposal_upgrade_certificate.clone().is_some_and(|cert| { + cert.is_relevant(view, self.decided_upgrade_cert.clone()) + .is_ok() + }) { + proposal_upgrade_certificate = None; + } // We only want to proposal to be attached if any of them are valid. let proposal_certificate = self @@ -1536,7 +1558,7 @@ impl, A: ConsensusApi + view_number: view, justify_qc: consensus.high_qc.clone(), proposal_certificate, - upgrade_certificate: upgrade_cert.clone(), + upgrade_certificate: proposal_upgrade_certificate.clone(), }; let mut new_leaf = Leaf::from_quorum_proposal(&proposal); @@ -1611,35 +1633,3 @@ impl, A: ConsensusApi + matches!(event.as_ref(), HotShotEvent::Shutdown) } } - -pub mod null_block { - #![allow(missing_docs)] - use hotshot_types::vid::{vid_scheme, VidCommitment}; - use jf_primitives::vid::VidScheme; - use memoize::memoize; - - /// The commitment for a null block payload. - /// - /// Note: the commitment depends on the network (via `num_storage_nodes`), - /// and may change (albeit rarely) during execution. - /// - /// We memoize the result to avoid having to recalculate it. - #[memoize(SharedCache, Capacity: 10)] - #[must_use] - pub fn commitment(num_storage_nodes: usize) -> Option { - let vid_result = vid_scheme(num_storage_nodes).commit_only(&Vec::new()); - - match vid_result { - Ok(r) => Some(r), - Err(_) => None, - } - } -} - -/// Test whether a view is in the range defined by an upgrade certificate. -fn view_is_between_versions( - view: TYPES::Time, - upgrade_data: &UpgradeProposalData, -) -> bool { - view > upgrade_data.old_version_last_block && view < upgrade_data.new_version_first_block -} diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 32492e4dfb..efc54bca68 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -59,3 +59,15 @@ pub async fn calculate_vid_disperse( VidDisperse::from_membership(view, vid_disperse, membership) } + +/// Utilities to print anyhow logs. +pub trait AnyhowTracing { + /// Print logs as debug + fn err_as_debug(self); +} + +impl AnyhowTracing for anyhow::Result { + fn err_as_debug(self) { + let _ = self.inspect_err(|e| tracing::debug!("{}", format!("{:?}", e))); + } +} diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs index f43838af90..ff583f1411 100644 --- a/testing/src/predicates.rs +++ b/testing/src/predicates.rs @@ -1,8 +1,7 @@ use hotshot_task_impls::{ - consensus::{null_block, ConsensusTaskState}, - events::HotShotEvent, - events::HotShotEvent::*, + consensus::ConsensusTaskState, events::HotShotEvent, events::HotShotEvent::*, }; +use hotshot_types::data::null_block; use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; use std::collections::HashSet; use std::sync::Arc; @@ -11,7 +10,7 @@ use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -#[derive(Eq, PartialEq, Copy, Clone)] +#[derive(Eq, PartialEq, Copy, Clone, Debug)] pub enum PredicateResult { Pass, diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 935bb57d41..42e0151434 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -49,6 +49,7 @@ pub struct TestView { pub da_certificate: DACertificate, pub transactions: Vec, upgrade_data: Option>, + formed_upgrade_certificate: Option>, view_sync_finalize_data: Option>, timeout_cert_data: Option>, } @@ -137,6 +138,7 @@ impl TestView { transactions, leader_public_key, upgrade_data: None, + formed_upgrade_certificate: None, view_sync_finalize_data: None, timeout_cert_data: None, da_proposal, @@ -215,7 +217,7 @@ impl TestView { Some(cert) } else { - None + self.formed_upgrade_certificate.clone() }; let view_sync_certificate = if let Some(ref data) = self.view_sync_finalize_data { @@ -272,7 +274,7 @@ impl TestView { block_header: block_header.clone(), view_number: next_view, justify_qc: quorum_certificate.clone(), - upgrade_certificate, + upgrade_certificate: upgrade_certificate.clone(), proposal_certificate, }; @@ -320,6 +322,9 @@ impl TestView { // so we reset for the next view. transactions: Vec::new(), upgrade_data: None, + // We preserve the upgrade_certificate once formed, + // and reattach it on every future view until cleared. + formed_upgrade_certificate: upgrade_certificate, view_sync_finalize_data: None, timeout_cert_data: None, da_proposal, diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 301f1cdafa..863c8bc90e 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -39,9 +39,10 @@ async fn test_consensus_task_upgrade() { let upgrade_data: UpgradeProposalData = UpgradeProposalData { old_version, new_version, + decide_by: ViewNumber::new(5), new_version_hash: [0u8; 12].to_vec(), - old_version_last_block: ViewNumber::new(5), - new_version_first_block: ViewNumber::new(7), + old_version_last_view: ViewNumber::new(5), + new_version_first_view: ViewNumber::new(7), }; let mut proposals = Vec::new(); @@ -175,9 +176,10 @@ async fn test_upgrade_and_consensus_task() { let upgrade_data: UpgradeProposalData = UpgradeProposalData { old_version, new_version, + decide_by: ViewNumber::new(4), new_version_hash: [0u8; 12].to_vec(), - old_version_last_block: ViewNumber::new(5), - new_version_first_block: ViewNumber::new(7), + old_version_last_view: ViewNumber::new(5), + new_version_first_view: ViewNumber::new(7), }; let mut proposals = Vec::new(); @@ -331,9 +333,10 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { let upgrade_data: UpgradeProposalData = UpgradeProposalData { old_version, new_version, + decide_by: ViewNumber::new(4), new_version_hash: [0u8; 12].to_vec(), - old_version_last_block: ViewNumber::new(4), - new_version_first_block: ViewNumber::new(8), + old_version_last_view: ViewNumber::new(4), + new_version_first_view: ViewNumber::new(8), }; let mut proposals = Vec::new(); diff --git a/types/Cargo.toml b/types/Cargo.toml index 72287849d2..de15be8858 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -38,6 +38,7 @@ generic-array = { workspace = true } # TODO generic-array should not be a direct dependency # https://github.com/EspressoSystems/HotShot/issues/1850 lazy_static = { workspace = true } +memoize = { workspace = true } rand = { workspace = true } sha2 = { workspace = true } snafu = { workspace = true } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 8cffa11a20..4a88261db7 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -6,7 +6,7 @@ use displaydoc::Display; use crate::{ data::{Leaf, VidDisperseShare}, error::HotShotError, - simple_certificate::{DACertificate, QuorumCertificate, UpgradeCertificate}, + simple_certificate::{DACertificate, QuorumCertificate}, traits::{ metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}, node_implementation::NodeType, @@ -49,10 +49,6 @@ pub struct Consensus { /// view -> DA cert pub saved_da_certs: HashMap>, - /// All the upgrade certs we've received for current and future views. - /// view -> upgrade cert - pub saved_upgrade_certs: HashMap>, - /// View number that is currently on. pub cur_view: TYPES::Time, @@ -335,8 +331,6 @@ impl Consensus { // perform gc self.saved_da_certs .retain(|view_number, _| *view_number >= old_anchor_view); - self.saved_upgrade_certs - .retain(|view_number, _| *view_number >= old_anchor_view); self.validated_state_map .range(old_anchor_view..new_anchor_view) .filter_map(|(_view_number, view)| view.get_leaf_commitment()) diff --git a/types/src/data.rs b/types/src/data.rs index ae42d8748f..1d62a7a156 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -23,6 +23,7 @@ use crate::{ vid::{VidCommitment, VidCommon, VidSchemeType, VidShare}, vote::{Certificate, HasViewNumber}, }; +use anyhow::{ensure, Result}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::Options; use committable::{Commitment, Committable, RawCommitmentBuilder}; @@ -526,6 +527,45 @@ impl Leaf { pub fn get_payload_commitment(&self) -> VidCommitment { self.get_block_header().payload_commitment() } + + /// Validate that a leaf has the right upgrade certificate to be the immediate child of another leaf + /// + /// This may not be a complete function. Please double-check that it performs the checks you expect before subtituting validation logic with it. + pub fn extends_upgrade( + &self, + parent: Self, + decided_upgrade_certificate: Option>, + ) -> Result<()> { + match ( + self.get_upgrade_certificate(), + parent.get_upgrade_certificate(), + ) { + // No upgrade certificate on either is the most common case, and is always fine. + (None, None) => {} + // If the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade. Again, this is always fine. + (Some(_), None) => {} + // If we no longer see a cert, we have to make sure that we either: + // - no longer care because we have passed new_version_first_view, or + // - no longer care because we have passed `decide_by` without deciding the certificate. + (None, Some(parent_cert)) => { + ensure!(self.get_view_number() > parent_cert.data.new_version_first_view + || (self.get_view_number() > parent_cert.data.decide_by && decided_upgrade_certificate.is_none()), + "The new leaf is missing an upgrade certificate that was present in its parent, and should still be live." + ); + } + // If we both have a certificate, they should be identical. + // Technically, this prevents us from initiating a new upgrade in the view immediately following an upgrade. + // I think this is a fairly lax restriction. + (Some(cert), Some(parent_cert)) => { + ensure!(cert == parent_cert, "The new leaf does not extend the parent leaf, because it has attached a different upgrade certificate."); + } + } + + // This check should be added once we sort out the genesis leaf/justify_qc issue. + // ensure!(self.get_parent_commitment() == parent_leaf.commit(), "The commitment of the parent leaf does not match the specified parent commitment."); + + Ok(()) + } } impl TestableLeaf for Leaf @@ -630,3 +670,27 @@ impl Leaf { Leaf::from_proposal(proposal).commit() } } + +pub mod null_block { + #![allow(missing_docs)] + use crate::vid::{vid_scheme, VidCommitment}; + use jf_primitives::vid::VidScheme; + use memoize::memoize; + + /// The commitment for a null block payload. + /// + /// Note: the commitment depends on the network (via `num_storage_nodes`), + /// and may change (albeit rarely) during execution. + /// + /// We memoize the result to avoid having to recalculate it. + #[memoize(SharedCache, Capacity: 10)] + #[must_use] + pub fn commitment(num_storage_nodes: usize) -> Option { + let vid_result = vid_scheme(num_storage_nodes).commit_only(&Vec::new()); + + match vid_result { + Ok(r) => Some(r), + Err(_) => None, + } + } +} diff --git a/types/src/message.rs b/types/src/message.rs index c16f2cd24a..840b583a81 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -3,7 +3,7 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. -use crate::data::{QuorumProposal, UpgradeProposal, VidDisperseShare}; +use crate::data::{Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}; use crate::simple_certificate::{ DACertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, @@ -12,6 +12,7 @@ use crate::simple_vote::{ DAVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }; +use crate::traits::election::Membership; use crate::traits::network::ResponseMessage; use crate::traits::signature_key::SignatureKey; use crate::vote::HasViewNumber; @@ -23,6 +24,8 @@ use crate::{ node_implementation::{ConsensusTime, NodeType}, }, }; +use anyhow::{ensure, Result}; +use committable::Committable; use derivative::Derivative; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; @@ -319,3 +322,21 @@ pub struct Proposal + Deserializ /// Phantom for TYPES pub _pd: PhantomData, } + +impl Proposal> +where + TYPES: NodeType, +{ + pub fn validate_signature(&self, quorum_membership: &TYPES::Membership) -> Result<()> { + let view_number = self.data.get_view_number(); + let view_leader_key = quorum_membership.get_leader(view_number); + let proposed_leaf = Leaf::from_quorum_proposal(&self.data); + + ensure!( + view_leader_key.validate(&self.signature, proposed_leaf.commit().as_ref()), + "Proposal signature is invalid." + ); + + Ok(()) + } +} diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index c977b32c4c..30f6bf0719 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -6,6 +6,8 @@ use std::{ marker::PhantomData, }; +use anyhow::{ensure, Result}; + use committable::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; @@ -177,6 +179,43 @@ impl QuorumCertificate { } } +impl UpgradeCertificate { + pub fn is_relevant( + &self, + view_number: TYPES::Time, + decided_upgrade_certificate: Option, + ) -> Result<()> { + ensure!( + self.data.decide_by >= view_number + || decided_upgrade_certificate.is_some_and(|cert| cert == *self), + "Upgrade certificate is no longer relevant." + ); + + Ok(()) + } + + /// Validate an upgrade certificate + pub fn validate( + upgrade_certificate: &Option, + quorum_membership: &TYPES::Membership, + ) -> Result<()> { + if let Some(ref cert) = upgrade_certificate { + ensure!( + cert.is_valid_cert(quorum_membership), + "Invalid upgrade certificate." + ); + Ok(()) + } else { + Ok(()) + } + } + + /// Test whether a view is in the interim period prior to the new version taking effect. + pub fn in_interim(&self, view: TYPES::Time) -> bool { + view > self.data.old_version_last_view && view < self.data.new_version_first_view + } +} + /// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` pub type QuorumCertificate = SimpleCertificate, SuccessThreshold>; /// Type alias for a DA certificate over `DAData` diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index bf9b85ada2..293ebc6307 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -70,12 +70,15 @@ pub struct UpgradeProposalData { pub old_version: Version, /// The new version that we are upgrading to. pub new_version: Version, + /// The last view in which we are allowed to reach a decide on this upgrade. + /// If it is not decided by that view, we discard it. + pub decide_by: TYPES::Time, /// A unique identifier for the specific protocol being voted on. pub new_version_hash: Vec, /// The last block for which the old version will be in effect. - pub old_version_last_block: TYPES::Time, + pub old_version_last_view: TYPES::Time, /// The first block for which the new version will be in effect. - pub new_version_first_block: TYPES::Time, + pub new_version_first_view: TYPES::Time, } /// Marker trait for data or commitments that can be voted on. @@ -196,8 +199,9 @@ impl Committable for UpgradeProposalData { fn commit(&self) -> Commitment { let builder = committable::RawCommitmentBuilder::new("Upgrade data"); builder - .u64(*self.new_version_first_block) - .u64(*self.old_version_last_block) + .u64(*self.decide_by) + .u64(*self.new_version_first_view) + .u64(*self.old_version_last_view) .var_size_bytes(self.new_version_hash.as_slice()) .u16(self.new_version.minor) .u16(self.new_version.major) From 3c44e231a59f87a8d2e5b2e8016ceb8064e7aea1 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 9 Apr 2024 09:26:48 +0300 Subject: [PATCH 0929/1393] fix random builder panic (#2926) --- testing/src/block_builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 708a2f3272..7e247f627b 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -200,7 +200,7 @@ impl RandomBuilderSource { ) { tracing::warn!("Block {} evicted", hash); }; - async_sleep(time_per_block - start.elapsed()).await; + async_sleep(time_per_block.saturating_sub(start.elapsed())).await; } }); } From 2764c9c4c14ae80076ab4e0eaa56dfd9da2783a5 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 9 Apr 2024 11:30:17 +0300 Subject: [PATCH 0930/1393] Spawn task for proposing (#2920) * Spawn task for proposing * Fix tests, return after blank proposal * fmt and lint --- macros/src/lib.rs | 2 +- task-impls/src/consensus.rs | 242 +++++++++++---------- testing/tests/tests_1/proposal_ordering.rs | 14 +- testing/tests/tests_1/upgrade_task.rs | 1 + 4 files changed, 128 insertions(+), 131 deletions(-) diff --git a/macros/src/lib.rs b/macros/src/lib.rs index f179c8a94e..29ea7d19f9 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -354,7 +354,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { #task_names.state().handle_result(&res).await; } - while let Ok(received_output) = test_receiver.try_recv() { + while let Ok(Ok(received_output)) = async_timeout(Duration::from_millis(35), test_receiver.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); let output_asserts = &mut #task_expectations[stage_number].output_asserts; diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 6b23d9bafc..9580502d22 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -202,6 +202,69 @@ async fn validate_proposal( Ok(()) } +/// Create the header for a proposal, build the proposal, and broadcast +/// the proposal send evnet. +#[allow(clippy::too_many_arguments)] +async fn create_and_send_proposal( + pub_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + consensus: Arc>>, + event_stream: Sender>>, + view: TYPES::Time, + commitment: VidCommitment, + metadata: ::Metadata, + parent_leaf: Leaf, + state: Arc, + upgrade_cert: Option>, + proposal_cert: Option>, + round_start_delay: u64, +) { + let block_header = TYPES::BlockHeader::new( + state.as_ref(), + &consensus.read().await.instance_state, + &parent_leaf, + commitment, + metadata, + ) + .await; + + let proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: consensus.read().await.high_qc.clone(), + proposal_certificate: proposal_cert, + upgrade_certificate: upgrade_cert, + }; + + let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal); + proposed_leaf.set_parent_commitment(parent_leaf.commit()); + + let Ok(signature) = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref()) + else { + // This should never happen. + error!("Failed to sign proposed_leaf.commit()!"); + return; + }; + + let message = Proposal { + data: proposal, + signature, + _pd: PhantomData, + }; + debug!( + "Sending null proposal for view {:?} \n {:?}", + proposed_leaf.get_view_number(), + "" + ); + + async_sleep(Duration::from_millis(round_start_delay)).await; + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalSend(message.clone(), pub_key)), + &event_stream, + ) + .await; +} + /// The state for the consensus task. Contains all of the information for the implementation /// of consensus pub struct ConsensusTaskState< @@ -539,10 +602,6 @@ impl, A: ConsensusApi + event: Arc>, event_stream: Sender>>, ) { - error!( - "self.decided_upgrade_cert is {:?}", - self.decided_upgrade_cert.clone() - ); match event.as_ref() { HotShotEvent::QuorumProposalRecv(proposal, sender) => { let sender = sender.clone(); @@ -1079,9 +1138,7 @@ impl, A: ConsensusApi + let view = qc.view_number + 1; - if !self.publish_proposal_if_able(view, &event_stream).await { - warn!("Wasn't able to publish proposal"); - } + self.publish_proposal_if_able(view, &event_stream).await; } if let either::Left(qc) = cert { if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await { @@ -1104,14 +1161,8 @@ impl, A: ConsensusApi + *qc.view_number ); - if !self - .publish_proposal_if_able(qc.view_number + 1, &event_stream) - .await - { - debug!( - "Wasn't able to publish proposal when QC was formed, still may publish" - ); - } + self.publish_proposal_if_able(qc.view_number + 1, &event_stream) + .await; } } HotShotEvent::UpgradeCertificateFormed(cert) => { @@ -1381,7 +1432,7 @@ impl, A: ConsensusApi + &mut self, view: TYPES::Time, event_stream: &Sender>>, - ) -> bool { + ) { if self.quorum_membership.get_leader(view) != self.public_key { // This is expected for view 1, so skipping the logging. if view != TYPES::Time::new(1) { @@ -1390,7 +1441,7 @@ impl, A: ConsensusApi + view ); } - return false; + return; } let consensus = self.consensus.read().await; @@ -1400,7 +1451,7 @@ impl, A: ConsensusApi + let Some(parent_view) = consensus.validated_state_map.get(parent_view_number) else { // This should have been added by the replica? error!("Couldn't find parent view in state map, waiting for replica to see proposal\n parent view number: {}", **parent_view_number); - return false; + return; }; // Leaf hash in view inner does not match high qc hash - Why? let Some((leaf_commitment, state)) = parent_view.get_leaf_and_state() else { @@ -1409,7 +1460,7 @@ impl, A: ConsensusApi + ?parent_view, "Parent of high QC points to a view without a proposal" ); - return false; + return; }; if leaf_commitment != consensus.high_qc.get_data().leaf_commit { // NOTE: This happens on the genesis block @@ -1421,7 +1472,7 @@ impl, A: ConsensusApi + } let Some(leaf) = consensus.saved_leaves.get(&leaf_commitment) else { error!("Failed to find high QC of parent."); - return false; + return; }; if leaf.get_view_number() == consensus.last_decided_view { reached_decided = true; @@ -1453,7 +1504,7 @@ impl, A: ConsensusApi + ::from_transactions(Vec::new()) else { error!("Failed to build null block payload and metadata"); - return false; + return; }; let Some(null_block_commitment) = @@ -1461,71 +1512,39 @@ impl, A: ConsensusApi + else { // This should never happen. error!("Failed to calculate null block commitment"); - return false; - }; - - let block_header = TYPES::BlockHeader::new( - state, - &consensus.instance_state, - &parent_leaf, - null_block_commitment, - metadata, - ) - .await; - - let proposal = QuorumProposal { - block_header, - view_number: view, - justify_qc: consensus.high_qc.clone(), - proposal_certificate: None, - upgrade_certificate: self.decided_upgrade_cert.clone(), - }; - - let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal); - proposed_leaf.set_parent_commitment(parent_leaf.commit()); - - let Ok(signature) = - TYPES::SignatureKey::sign(&self.private_key, proposed_leaf.commit().as_ref()) - else { - // This should never happen. - error!("Failed to sign proposed_leaf.commit()!"); - return false; - }; - - let message = Proposal { - data: proposal, - signature, - _pd: PhantomData, + return; }; - debug!( - "Sending null proposal for view {:?} \n {:?}", - proposed_leaf.get_view_number(), - "" - ); - - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalSend( - message.clone(), - self.public_key.clone(), - )), - event_stream, - ) - .await; - return true; + let pub_key = self.public_key.clone(); + let priv_key = self.private_key.clone(); + let consensus = self.consensus.clone(); + let sender = event_stream.clone(); + let delay = self.round_start_delay; + let parent = parent_leaf.clone(); + let state = state.clone(); + let upgrade_cert = self.decided_upgrade_cert.clone(); + async_spawn(async move { + create_and_send_proposal( + pub_key, + priv_key, + consensus, + sender, + view, + null_block_commitment, + metadata, + parent, + state, + upgrade_cert, + None, + delay, + ) + .await; + }); + return; } } if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { - let block_header = TYPES::BlockHeader::new( - state, - &consensus.instance_state, - &parent_leaf, - commit_and_metadata.commitment, - commit_and_metadata.metadata.clone(), - ) - .await; - // In order of priority, we should try to attach: // - the parent certificate if it exists, or // - our own certificate that we formed. @@ -1551,49 +1570,36 @@ impl, A: ConsensusApi + .as_ref() .filter(|cert| cert.is_valid_for_view(&view)) .cloned(); - - // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. - let proposal = QuorumProposal { - block_header: block_header.clone(), - view_number: view, - justify_qc: consensus.high_qc.clone(), - proposal_certificate, - upgrade_certificate: proposal_upgrade_certificate.clone(), - }; - - let mut new_leaf = Leaf::from_quorum_proposal(&proposal); - new_leaf.set_parent_commitment(parent_leaf.commit()); - - let Ok(signature) = - TYPES::SignatureKey::sign(&self.private_key, new_leaf.commit().as_ref()) - else { - error!("Failed to sign new_leaf.commit()!"); - return false; - }; + let pub_key = self.public_key.clone(); + let priv_key = self.private_key.clone(); + let consensus = self.consensus.clone(); + let sender = event_stream.clone(); + let commit = commit_and_metadata.commitment; + let metadata = commit_and_metadata.metadata.clone(); + let state = state.clone(); + let delay = self.round_start_delay; + async_spawn(async move { + create_and_send_proposal( + pub_key, + priv_key, + consensus, + sender, + view, + commit, + metadata, + parent_leaf.clone(), + state, + proposal_upgrade_certificate, + proposal_certificate, + delay, + ) + .await; + }); self.proposal_cert = None; - let message = Proposal { - data: proposal, - signature, - _pd: PhantomData, - }; - debug!("Sending proposal for view {:?}", view); - - async_sleep(Duration::from_millis(self.round_start_delay)).await; - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalSend( - message.clone(), - self.public_key.clone(), - )), - event_stream, - ) - .await; - self.payload_commitment_and_metadata = None; - return true; } debug!("Cannot propose because we don't have the VID payload commitment and metadata"); - false } } diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 72895343a6..54952bfc7a 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -70,22 +70,12 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(node_id)), ]; - // The consensus task does not like it when the proposal received is the last thing to happen, - // at least not while keeping an arbitrary ordering. The testing framework does not allow us to - // check events out of order, so we instead just give the test what it wants, but this should - // still be okay. - let view_2_outputs = if input_permutation[2] == 0 { - vec![ - quorum_proposal_send(), - exact(ViewChange(ViewNumber::new(2))), - ] - } else { + let view_2_outputs = vec![ exact(ViewChange(ViewNumber::new(2))), exact(QuorumProposalValidated(proposals[1].data.clone())), quorum_proposal_send(), - ] - }; + ]; let view_2_inputs = permute_input_with_index_order(inputs, input_permutation); diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 863c8bc90e..a5e62b6fd2 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -18,6 +18,7 @@ use hotshot_types::{ simple_vote::UpgradeProposalData, traits::{election::Membership, node_implementation::ConsensusTime}, }; +use std::time::Duration; use vbs::version::Version; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] From bd0c9619c3387240ac26a9964a1a8a4b4e242229 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 9 Apr 2024 13:46:00 +0300 Subject: [PATCH 0931/1393] Cancel Spawned Tasks after Decide in Consensus Task (#2929) * Spawn task for proposing * Fix tests, return after blank proposal * fmt and lint * Keep track of spawned consensus tasks; GC on decide * Store tasks by view, actually cancel them --- hotshot/src/tasks/task_state.rs | 2 + task-impls/src/consensus.rs | 131 +++++++++++++++++++------------- 2 files changed, 81 insertions(+), 52 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index df1f4c3add..ac4e260b71 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -12,6 +12,7 @@ use hotshot_types::traits::{ consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }; +use std::collections::BTreeMap; use std::{collections::HashMap, marker::PhantomData}; use vbs::version::StaticVersionType; @@ -191,6 +192,7 @@ impl> CreateTaskState vote_collector: None.into(), timeout_vote_collector: None.into(), timeout_task: None, + spawned_tasks: BTreeMap::new(), formed_upgrade_certificate: None, proposal_cert: None, decided_upgrade_cert: None, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 9580502d22..4669bafbc9 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -11,7 +11,7 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; use async_std::task::JoinHandle; use committable::Committable; use core::time::Duration; -use futures::future::FutureExt; +use futures::future::{join_all, FutureExt}; use hotshot_task::task::{Task, TaskState}; use hotshot_types::data::null_block; use hotshot_types::event::LeafInfo; @@ -44,7 +44,7 @@ use crate::vote_collection::HandleVoteEvent; use chrono::Utc; use hotshot_types::data::VidDisperseShare; use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, marker::PhantomData, sync::Arc, }; @@ -320,6 +320,10 @@ pub struct ConsensusTaskState< /// timeout task handle pub timeout_task: Option>, + /// Spawned tasks related to a specific view, so we can cancel them when + /// they are stale + pub spawned_tasks: BTreeMap>>, + /// The most recent upgrade certificate this node formed. /// Note: this is ONLY for certificates that have been formed internally, /// so that we can propose with them. @@ -355,6 +359,17 @@ pub struct ConsensusTaskState< impl, A: ConsensusApi + 'static> ConsensusTaskState { + /// Cancel all tasks the consensus tasks has spawned before the given view + async fn cancel_tasks(&mut self, view: TYPES::Time) { + let keep = self.spawned_tasks.split_off(&view); + let mut cancel = Vec::new(); + while let Some((_, tasks)) = self.spawned_tasks.pop_first() { + let mut to_cancel = tasks.into_iter().map(cancel_task).collect(); + cancel.append(&mut to_cancel); + } + self.spawned_tasks = keep; + join_all(cancel).await; + } #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] // Check if we are able to vote, like whether the proposal is valid, // whether we have DAC and VID share, and if so, vote. @@ -834,22 +849,25 @@ impl, A: ConsensusApi + return; }; - async_spawn( - validate_proposal( - proposal.clone(), - parent_leaf, - self.consensus.clone(), - self.decided_upgrade_cert.clone(), - self.quorum_membership.clone(), - parent_state.clone(), - view_leader_key, - event_stream.clone(), - sender, - self.output_event_stream.clone(), - self.storage.clone(), - ) - .map(AnyhowTracing::err_as_debug), - ); + self.spawned_tasks + .entry(proposal.data.get_view_number()) + .or_default() + .push(async_spawn( + validate_proposal( + proposal.clone(), + parent_leaf, + self.consensus.clone(), + self.decided_upgrade_cert.clone(), + self.quorum_membership.clone(), + parent_state.clone(), + view_leader_key, + event_stream.clone(), + sender, + self.output_event_stream.clone(), + self.storage.clone(), + ) + .map(AnyhowTracing::err_as_debug), + )); } HotShotEvent::QuorumProposalValidated(proposal) => { let consensus = self.consensus.upgradable_read().await; @@ -1012,6 +1030,9 @@ impl, A: ConsensusApi + let qc = consensus.high_qc.clone(); drop(consensus); + if new_decide_reached { + self.cancel_tasks(new_anchor_view).await; + } if should_propose { debug!( "Attempting to publish proposal after voting; now in view: {}", @@ -1523,23 +1544,26 @@ impl, A: ConsensusApi + let parent = parent_leaf.clone(); let state = state.clone(); let upgrade_cert = self.decided_upgrade_cert.clone(); - async_spawn(async move { - create_and_send_proposal( - pub_key, - priv_key, - consensus, - sender, - view, - null_block_commitment, - metadata, - parent, - state, - upgrade_cert, - None, - delay, - ) - .await; - }); + self.spawned_tasks + .entry(view) + .or_default() + .push(async_spawn(async move { + create_and_send_proposal( + pub_key, + priv_key, + consensus, + sender, + view, + null_block_commitment, + metadata, + parent, + state, + upgrade_cert, + None, + delay, + ) + .await; + })); return; } } @@ -1578,23 +1602,26 @@ impl, A: ConsensusApi + let metadata = commit_and_metadata.metadata.clone(); let state = state.clone(); let delay = self.round_start_delay; - async_spawn(async move { - create_and_send_proposal( - pub_key, - priv_key, - consensus, - sender, - view, - commit, - metadata, - parent_leaf.clone(), - state, - proposal_upgrade_certificate, - proposal_certificate, - delay, - ) - .await; - }); + self.spawned_tasks + .entry(view) + .or_default() + .push(async_spawn(async move { + create_and_send_proposal( + pub_key, + priv_key, + consensus, + sender, + view, + commit, + metadata, + parent_leaf.clone(), + state, + proposal_upgrade_certificate, + proposal_certificate, + delay, + ) + .await; + })); self.proposal_cert = None; self.payload_commitment_and_metadata = None; From ebfb40bd4d7396e03d3df876041a2b5cbafea505 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 9 Apr 2024 14:11:35 +0300 Subject: [PATCH 0932/1393] [Builder] Make simple builder generic over `NodeType` (#2931) * make simple builder generic over nodetype * lint --- builder-api/src/block_info.rs | 11 +- builder-api/src/builder.rs | 4 +- builder-api/src/data_source.rs | 20 +- example-types/src/block_types.rs | 17 +- macros/src/lib.rs | 2 +- testing/Cargo.toml | 1 + testing/src/block_builder.rs | 260 ++++++++++---------- testing/src/test_runner.rs | 4 +- testing/tests/tests_1/block_builder.rs | 2 +- testing/tests/tests_1/libp2p.rs | 6 +- testing/tests/tests_2/catchup.rs | 10 +- testing/tests/tests_2/push_cdn.rs | 2 +- testing/tests/tests_5/combined_network.rs | 10 +- testing/tests/tests_5/timeout.rs | 4 +- testing/tests/tests_5/unreliable_network.rs | 16 +- testing/tests/tests_5/web_server.rs | 2 +- types/src/traits/block_contents.rs | 8 + types/src/traits/signature_key.rs | 3 +- 18 files changed, 198 insertions(+), 184 deletions(-) diff --git a/builder-api/src/block_info.rs b/builder-api/src/block_info.rs index 0afb546e4b..236bb4af83 100644 --- a/builder-api/src/block_info.rs +++ b/builder-api/src/block_info.rs @@ -20,12 +20,11 @@ pub struct AvailableBlockInfo { #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound = "")] -pub struct AvailableBlockData { - pub block_payload: ::BlockPayload, - pub metadata: <::BlockPayload as BlockPayload>::Metadata, - pub signature: <::SignatureKey as SignatureKey>::PureAssembledSignatureType, - pub sender: ::SignatureKey, - pub _phantom: PhantomData, +pub struct AvailableBlockData { + pub block_payload: TYPES::BlockPayload, + pub metadata: ::Metadata, + pub signature: ::PureAssembledSignatureType, + pub sender: TYPES::SignatureKey, } #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs index 68f6b52ab2..6c08cb68c7 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/builder.rs @@ -122,9 +122,7 @@ where State: 'static + Send + Sync + ReadState, ::State: Send + Sync + BuilderDataSource, Types: NodeType, - <::SignatureKey as SignatureKey>::PureAssembledSignatureType: - for<'a> TryFrom<&'a TaggedBase64> + Into + Display, - for<'a> <<::SignatureKey as SignatureKey>::PureAssembledSignatureType as TryFrom< + for<'a> <::PureAssembledSignatureType as TryFrom< &'a TaggedBase64, >>::Error: Display, { diff --git a/builder-api/src/data_source.rs b/builder-api/src/data_source.rs index f95748b4ec..3ec3ee0c1f 100644 --- a/builder-api/src/data_source.rs +++ b/builder-api/src/data_source.rs @@ -4,7 +4,6 @@ use hotshot_types::{ utils::BuilderCommitment, vid::VidCommitment, }; -use tagged_base64::TaggedBase64; use crate::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, @@ -12,34 +11,29 @@ use crate::{ }; #[async_trait] -pub trait BuilderDataSource -where - I: NodeType, - <::SignatureKey as SignatureKey>::PureAssembledSignatureType: - for<'a> TryFrom<&'a TaggedBase64> + Into, -{ +pub trait BuilderDataSource { // To get the list of available blocks async fn get_available_blocks( &self, for_parent: &VidCommitment, - ) -> Result>, BuildError>; + ) -> Result>, BuildError>; // to claim a block from the list of provided available blocks async fn claim_block( &self, block_hash: &BuilderCommitment, - signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, BuildError>; + signature: &::PureAssembledSignatureType, + ) -> Result, BuildError>; // To claim a block header input async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, - signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, BuildError>; + signature: &::PureAssembledSignatureType, + ) -> Result, BuildError>; // To get the builder address - async fn get_builder_address(&self) -> Result<::SignatureKey, BuildError>; + async fn get_builder_address(&self) -> Result; } #[async_trait] diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 5d2cae0c87..d89ebf2401 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -64,7 +64,22 @@ impl Committable for TestTransaction { } } -impl Transaction for TestTransaction {} +impl Transaction for TestTransaction { + /// Create a transaction from bytes + fn from_bytes(bytes: &[u8]) -> Self { + Self(bytes.to_vec()) + } + + /// Get the length of the transaction in bytes + fn len(&self) -> usize { + self.0.len() + } + + /// Returns whether or not the transaction is empty + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} /// A [`BlockPayload`] that contains a list of `TestTransaction`. #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 29ea7d19f9..62cb8bee7c 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -119,7 +119,7 @@ impl TestData { async fn #test_name() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - (#metadata).gen_launcher::<#ty, #imply>(0).launch().run_test::>().await; + (#metadata).gen_launcher::<#ty, #imply>(0).launch().run_test::().await; } } } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 573ccad2f1..ed741d8ac4 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -41,6 +41,7 @@ tide-disco = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } lru = { workspace = true } +tagged-base64.workspace = true [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 7e247f627b..e73af5a116 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -1,5 +1,6 @@ use std::{ collections::HashMap, + fmt::Display, num::NonZeroUsize, ops::{Deref, Range}, sync::Arc, @@ -12,7 +13,7 @@ use async_trait::async_trait; use committable::{Commitment, Committable}; use futures::{future::BoxFuture, Stream, StreamExt}; use hotshot::{ - traits::{BlockPayload, TestableNodeImplementation}, + traits::BlockPayload, types::{Event, EventType, SignatureKey}, }; use hotshot_builder_api::{ @@ -20,10 +21,8 @@ use hotshot_builder_api::{ builder::{BuildError, Error, Options}, data_source::BuilderDataSource, }; -use hotshot_example_types::{ - block_types::{TestBlockPayload, TestTransaction}, - node_types::TestTypes, -}; +use hotshot_types::traits::block_contents::BlockHeader; +use hotshot_types::traits::block_contents::Transaction; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, traits::{block_contents::vid_commitment, election::Membership, node_implementation::NodeType}, @@ -32,62 +31,58 @@ use hotshot_types::{ }; use lru::LruCache; use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; +use tagged_base64::TaggedBase64; use tide_disco::{method::ReadState, App, Url}; #[async_trait] -pub trait TestBuilderImplementation { - type TYPES: NodeType; - type I: TestableNodeImplementation; +pub trait TestBuilderImplementation { async fn start( - membership: Arc<::Membership>, - ) -> (Option>>, Url); + membership: Arc<::Membership>, + ) -> (Option>>, Url); } -pub struct RandomBuilderImplementation> { - _marker: std::marker::PhantomData, -} +pub struct RandomBuilderImplementation; #[async_trait] -impl> TestBuilderImplementation - for RandomBuilderImplementation +impl TestBuilderImplementation for RandomBuilderImplementation +where + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, { - type TYPES = TestTypes; - type I = I; - async fn start( - _membership: Arc<::Membership>, - ) -> (Option>>, Url) { + _membership: Arc, + ) -> (Option>>, Url) { let port = portpicker::pick_unused_port().expect("No free ports"); let url = Url::parse(&format!("http://localhost:{port}")).expect("Valid URL"); - run_random_builder(url.clone()); + run_random_builder::(url.clone()); (None, url) } } -pub struct SimpleBuilderImplementation> { - _marker: std::marker::PhantomData, -} +pub struct SimpleBuilderImplementation; #[async_trait] -impl> TestBuilderImplementation - for SimpleBuilderImplementation +impl TestBuilderImplementation for SimpleBuilderImplementation +where + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, { - type TYPES = TestTypes; - type I = I; - async fn start( - membership: Arc<::Membership>, - ) -> (Option>>, Url) { + membership: Arc, + ) -> (Option>>, Url) { let port = portpicker::pick_unused_port().expect("No free ports"); let url = Url::parse(&format!("http://localhost:{port}")).expect("Valid URL"); let (source, task) = make_simple_builder(membership).await; - let builder_api = - hotshot_builder_api::builder::define_api::( - &Options::default(), - ) - .expect("Failed to construct the builder API"); - let mut app: App = + let builder_api = hotshot_builder_api::builder::define_api::< + SimpleBuilderSource, + TYPES, + Version01, + >(&Options::default()) + .expect("Failed to construct the builder API"); + let mut app: App, hotshot_builder_api::builder::Error> = App::with_state(source); app.register_module("api", builder_api) .expect("Failed to register the builder API"); @@ -98,10 +93,10 @@ impl> TestBuilderImplementation } /// Entry for a built block -struct BlockEntry { - metadata: AvailableBlockInfo, - payload: Option>, - header_input: Option>, +struct BlockEntry { + metadata: AvailableBlockInfo, + payload: Option>, + header_input: Option>, } /// Options controlling how the random builder generates blocks @@ -132,26 +127,26 @@ impl Default for RandomBuilderOptions { /// Builds random blocks, doesn't track HotShot state at all. /// Evicts old available blocks if HotShot doesn't keep up. #[derive(Clone, Debug)] -pub struct RandomBuilderSource { +pub struct RandomBuilderSource { /// Built blocks blocks: Arc< RwLock< // Isn't strictly speaking used as a cache, // just as a HashMap that evicts old blocks - LruCache, + LruCache>, >, >, - pub_key: ::SignatureKey, - priv_key: <::SignatureKey as SignatureKey>::PrivateKey, + pub_key: TYPES::SignatureKey, + priv_key: ::PrivateKey, } -impl RandomBuilderSource { +impl RandomBuilderSource { /// Create new [`RandomBuilderSource`] #[must_use] #[allow(clippy::missing_panics_doc)] // ony panics if 256 == 0 pub fn new( - pub_key: ::SignatureKey, - priv_key: <::SignatureKey as SignatureKey>::PrivateKey, + pub_key: TYPES::SignatureKey, + priv_key: ::PrivateKey, ) -> Self { Self { blocks: Arc::new(RwLock::new(LruCache::new(NonZeroUsize::new(256).unwrap()))), @@ -164,13 +159,13 @@ impl RandomBuilderSource { #[allow(clippy::missing_panics_doc)] // ony panics on 16-bit platforms pub fn run(&self, options: RandomBuilderOptions) { let blocks = self.blocks.clone(); - let (priv_key, pub_key) = (self.priv_key.clone(), self.pub_key); + let (priv_key, pub_key) = (self.priv_key.clone(), self.pub_key.clone()); async_spawn(async move { let mut rng = SmallRng::from_entropy(); let time_per_block = Duration::from_secs(1) / options.blocks_per_second; loop { let start = std::time::Instant::now(); - let transactions: Vec = (0..options.txn_in_block) + let transactions: Vec = (0..options.txn_in_block) .map(|_| { let mut bytes = vec![ 0; @@ -179,14 +174,14 @@ impl RandomBuilderSource { .expect("We are NOT running on a 16-bit platform") ]; rng.fill_bytes(&mut bytes); - TestTransaction(bytes) + TYPES::Transaction::from_bytes(&bytes) }) .collect(); let (metadata, payload, header_input) = build_block( transactions, options.num_storage_nodes, - pub_key, + pub_key.clone(), priv_key.clone(), ); @@ -207,7 +202,7 @@ impl RandomBuilderSource { } #[async_trait] -impl ReadState for RandomBuilderSource { +impl ReadState for RandomBuilderSource { type State = Self; async fn read( @@ -219,11 +214,11 @@ impl ReadState for RandomBuilderSource { } #[async_trait] -impl BuilderDataSource for RandomBuilderSource { +impl BuilderDataSource for RandomBuilderSource { async fn get_available_blocks( &self, _for_parent: &VidCommitment, - ) -> Result>, BuildError> { + ) -> Result>, BuildError> { Ok(self .blocks .deref() @@ -237,8 +232,8 @@ impl BuilderDataSource for RandomBuilderSource { async fn claim_block( &self, block_hash: &BuilderCommitment, - _signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, BuildError> { + _signature: &::PureAssembledSignatureType, + ) -> Result, BuildError> { let mut blocks = self.blocks.write().await; let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; let payload = entry.payload.take().ok_or(BuildError::Missing)?; @@ -252,8 +247,8 @@ impl BuilderDataSource for RandomBuilderSource { async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, - _signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, BuildError> { + _signature: &::PureAssembledSignatureType, + ) -> Result, BuildError> { let mut blocks = self.blocks.write().await; let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; let header_input = entry.header_input.take().ok_or(BuildError::Missing)?; @@ -264,10 +259,8 @@ impl BuilderDataSource for RandomBuilderSource { Ok(header_input) } - async fn get_builder_address( - &self, - ) -> Result<::SignatureKey, BuildError> { - Ok(self.pub_key) + async fn get_builder_address(&self) -> Result { + Ok(self.pub_key.clone()) } } @@ -275,34 +268,39 @@ impl BuilderDataSource for RandomBuilderSource { /// /// # Panics /// If constructing and launching the builder fails for any reason -pub fn run_random_builder(url: Url) { - let (pub_key, priv_key) = - ::SignatureKey::generated_from_seed_indexed([1; 32], 0); +pub fn run_random_builder(url: Url) +where + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, +{ + let (pub_key, priv_key) = TYPES::SignatureKey::generated_from_seed_indexed([1; 32], 0); let source = RandomBuilderSource::new(pub_key, priv_key); source.run(RandomBuilderOptions::default()); let builder_api = - hotshot_builder_api::builder::define_api::( + hotshot_builder_api::builder::define_api::, TYPES, Version01>( &Options::default(), ) .expect("Failed to construct the builder API"); - let mut app: App = App::with_state(source); + let mut app: App, Error> = App::with_state(source); app.register_module::("api", builder_api) .expect("Failed to register the builder API"); async_spawn(app.serve(url, STATIC_VER_0_1)); } -pub struct SimpleBuilderSource { - pub_key: ::SignatureKey, - priv_key: <::SignatureKey as SignatureKey>::PrivateKey, - membership: Arc<::Membership>, - transactions: Arc, TestTransaction>>>, - blocks: Arc>>, +pub struct SimpleBuilderSource { + pub_key: TYPES::SignatureKey, + priv_key: ::PrivateKey, + membership: Arc, + #[allow(clippy::type_complexity)] + transactions: Arc, TYPES::Transaction>>>, + blocks: Arc>>>, } #[async_trait] -impl ReadState for SimpleBuilderSource { +impl ReadState for SimpleBuilderSource { type State = Self; async fn read( @@ -314,21 +312,21 @@ impl ReadState for SimpleBuilderSource { } #[async_trait] -impl BuilderDataSource for SimpleBuilderSource { +impl BuilderDataSource for SimpleBuilderSource { async fn get_available_blocks( &self, _for_parent: &VidCommitment, - ) -> Result>, BuildError> { + ) -> Result>, BuildError> { let transactions = self .transactions .read(|txns| { - Box::pin(async { txns.values().cloned().collect::>() }) + Box::pin(async { txns.values().cloned().collect::>() }) }) .await; let (metadata, payload, header_input) = build_block( transactions, self.membership.total_nodes(), - self.pub_key, + self.pub_key.clone(), self.priv_key.clone(), ); @@ -347,8 +345,8 @@ impl BuilderDataSource for SimpleBuilderSource { async fn claim_block( &self, block_hash: &BuilderCommitment, - _signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, BuildError> { + _signature: &::PureAssembledSignatureType, + ) -> Result, BuildError> { let mut blocks = self.blocks.write().await; let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; entry.payload.take().ok_or(BuildError::Missing) @@ -357,28 +355,32 @@ impl BuilderDataSource for SimpleBuilderSource { async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, - _signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, BuildError> { + _signature: &::PureAssembledSignatureType, + ) -> Result, BuildError> { let mut blocks = self.blocks.write().await; let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; entry.header_input.take().ok_or(BuildError::Missing) } - async fn get_builder_address( - &self, - ) -> Result<::SignatureKey, BuildError> { - Ok(self.pub_key) + async fn get_builder_address(&self) -> Result { + Ok(self.pub_key.clone()) } } -impl SimpleBuilderSource { +impl SimpleBuilderSource +where + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, +{ pub async fn run(self, url: Url) { - let builder_api = - hotshot_builder_api::builder::define_api::( - &Options::default(), - ) - .expect("Failed to construct the builder API"); - let mut app: App = App::with_state(self); + let builder_api = hotshot_builder_api::builder::define_api::< + SimpleBuilderSource, + TYPES, + Version01, + >(&Options::default()) + .expect("Failed to construct the builder API"); + let mut app: App, Error> = App::with_state(self); app.register_module::("api", builder_api) .expect("Failed to register the builder API"); @@ -387,29 +389,24 @@ impl SimpleBuilderSource { } #[derive(Clone)] -pub struct SimpleBuilderTask { - transactions: Arc, TestTransaction>>>, - blocks: Arc>>, - decided_transactions: LruCache, ()>, +pub struct SimpleBuilderTask { + #[allow(clippy::type_complexity)] + transactions: Arc, TYPES::Transaction>>>, + blocks: Arc>>>, + decided_transactions: LruCache, ()>, } -pub trait BuilderTask: Send + Sync { - type TYPES: NodeType; - +pub trait BuilderTask: Send + Sync { fn start( self: Box, - stream: Box> + std::marker::Unpin + Send + 'static>, + stream: Box> + std::marker::Unpin + Send + 'static>, ); } -impl BuilderTask for SimpleBuilderTask { - type TYPES = TestTypes; - +impl BuilderTask for SimpleBuilderTask { fn start( mut self: Box, - mut stream: Box< - dyn Stream> + std::marker::Unpin + Send + 'static, - >, + mut stream: Box> + std::marker::Unpin + Send + 'static>, ) { async_spawn(async move { loop { @@ -422,7 +419,9 @@ impl BuilderTask for SimpleBuilderTask { let mut queue = self.transactions.write().await; for leaf_info in leaf_chain.iter() { if let Some(ref payload) = leaf_info.leaf.get_block_payload() { - for txn in payload.transaction_commitments(&()) { + for txn in payload.transaction_commitments( + leaf_info.leaf.get_block_header().metadata(), + ) { self.decided_transactions.put(txn, ()); queue.remove(&txn); } @@ -446,11 +445,10 @@ impl BuilderTask for SimpleBuilderTask { } } -pub async fn make_simple_builder( - membership: Arc<::Membership>, -) -> (SimpleBuilderSource, SimpleBuilderTask) { - let (pub_key, priv_key) = - ::SignatureKey::generated_from_seed_indexed([1; 32], 0); +pub async fn make_simple_builder( + membership: Arc, +) -> (SimpleBuilderSource, SimpleBuilderTask) { + let (pub_key, priv_key) = TYPES::SignatureKey::generated_from_seed_indexed([1; 32], 0); let transactions = Arc::new(RwLock::new(HashMap::new())); let blocks = Arc::new(RwLock::new(HashMap::new())); @@ -473,21 +471,22 @@ pub async fn make_simple_builder( } /// Helper function to construct all builder data structures from a list of transactions -fn build_block( - transactions: Vec, +fn build_block( + transactions: Vec, num_storage_nodes: usize, - pub_key: ::SignatureKey, - priv_key: <::SignatureKey as SignatureKey>::PrivateKey, + pub_key: TYPES::SignatureKey, + priv_key: ::PrivateKey, ) -> ( - AvailableBlockInfo, - AvailableBlockData, - AvailableBlockHeaderInput, + AvailableBlockInfo, + AvailableBlockData, + AvailableBlockHeaderInput, ) { - let block_size = transactions.iter().map(|t| t.0.len() as u64).sum::(); + let block_size = transactions.iter().map(|t| t.len() as u64).sum::(); - let block_payload = TestBlockPayload { transactions }; + let (block_payload, metadata) = TYPES::BlockPayload::from_transactions(transactions) + .expect("failed to build block payload from transactions"); - let commitment = block_payload.builder_commitment(&()); + let commitment = block_payload.builder_commitment(&metadata); let vid_commitment = vid_commitment( &block_payload.encode().unwrap().collect(), @@ -499,7 +498,7 @@ fn build_block( block_info.extend_from_slice(block_size.to_be_bytes().as_ref()); block_info.extend_from_slice(123_u64.to_be_bytes().as_ref()); block_info.extend_from_slice(commitment.as_ref()); - match ::SignatureKey::sign(&priv_key, &block_info) { + match TYPES::SignatureKey::sign(&priv_key, &block_info) { Ok(sig) => sig, Err(e) => { panic!("Failed to sign block: {}", e); @@ -508,7 +507,7 @@ fn build_block( }; let signature_over_builder_commitment = - match ::SignatureKey::sign(&priv_key, commitment.as_ref()) { + match TYPES::SignatureKey::sign(&priv_key, commitment.as_ref()) { Ok(sig) => sig, Err(e) => { panic!("Failed to sign block: {}", e); @@ -516,7 +515,7 @@ fn build_block( }; let signature_over_vid_commitment = - match ::SignatureKey::sign(&priv_key, vid_commitment.as_ref()) { + match TYPES::SignatureKey::sign(&priv_key, vid_commitment.as_ref()) { Ok(sig) => sig, Err(e) => { panic!("Failed to sign block: {}", e); @@ -525,13 +524,12 @@ fn build_block( let block = AvailableBlockData { block_payload, - metadata: (), - sender: pub_key, + metadata, + sender: pub_key.clone(), signature: signature_over_block_info, - _phantom: std::marker::PhantomData, }; let metadata = AvailableBlockInfo { - sender: pub_key, + sender: pub_key.clone(), signature: signature_over_builder_commitment, block_hash: commitment, block_size, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 2b0a676e6e..2a1be42d89 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -132,7 +132,7 @@ where /// # Panics /// if the test fails #[allow(clippy::too_many_lines)] - pub async fn run_test>(mut self) { + pub async fn run_test>(mut self) { let (tx, rx) = broadcast(EVENT_CHANNEL_SIZE); let spinning_changes = self .launcher @@ -327,7 +327,7 @@ where /// /// # Panics /// Panics if unable to create a [`HotShotInitializer`] - pub async fn add_nodes>( + pub async fn add_nodes>( &mut self, total: usize, late_start: &HashSet, diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 5c3635c33b..333450a16d 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -29,7 +29,7 @@ async fn test_random_block_builder() { let port = portpicker::pick_unused_port().expect("Could not find an open port"); let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); - run_random_builder(api_url.clone()); + run_random_builder::(api_url.clone()); let builder_started = Instant::now(); let client: BuilderClient = BuilderClient::new(api_url); diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index ed07389b44..eabc560b23 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -37,7 +37,7 @@ async fn libp2p_network() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -84,7 +84,7 @@ async fn libp2p_network_failures_2() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -100,6 +100,6 @@ async fn test_stress_libp2p_network() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 74be76f0d7..2ba6cbf8d6 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -52,7 +52,7 @@ async fn test_catchup() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -105,7 +105,7 @@ async fn test_catchup_cdn() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -160,7 +160,7 @@ async fn test_catchup_one_node() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -221,7 +221,7 @@ async fn test_catchup_in_view_sync() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -282,6 +282,6 @@ async fn test_catchup_reload() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } diff --git a/testing/tests/tests_2/push_cdn.rs b/testing/tests/tests_2/push_cdn.rs index 7a37aa42b9..b63bb15048 100644 --- a/testing/tests/tests_2/push_cdn.rs +++ b/testing/tests/tests_2/push_cdn.rs @@ -40,7 +40,7 @@ async fn push_cdn_network() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; shutdown_logging(); } diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index 156ea07d5f..93ed12e866 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -45,7 +45,7 @@ async fn test_combined_network() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -93,7 +93,7 @@ async fn test_combined_network_cdn_crash() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -147,7 +147,7 @@ async fn test_combined_network_reup() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -195,7 +195,7 @@ async fn test_combined_network_half_dc() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -265,6 +265,6 @@ async fn test_stress_combined_network_fuzzy() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index f2e4466afc..0fa4cd72ed 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -57,7 +57,7 @@ async fn test_timeout_web() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -124,6 +124,6 @@ async fn test_timeout_libp2p() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index 0ee858019d..308abdcaf6 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -41,7 +41,7 @@ async fn libp2p_network_sync() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -74,7 +74,7 @@ async fn test_memory_network_sync() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -113,7 +113,7 @@ async fn libp2p_network_async() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -160,7 +160,7 @@ async fn test_memory_network_async() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -212,7 +212,7 @@ async fn test_memory_network_partially_sync() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -252,7 +252,7 @@ async fn libp2p_network_partially_sync() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -290,7 +290,7 @@ async fn test_memory_network_chaos() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } @@ -325,6 +325,6 @@ async fn libp2p_network_chaos() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; } diff --git a/testing/tests/tests_5/web_server.rs b/testing/tests/tests_5/web_server.rs index e9ba618a0e..2c551247c2 100644 --- a/testing/tests/tests_5/web_server.rs +++ b/testing/tests/tests_5/web_server.rs @@ -40,7 +40,7 @@ async fn web_server_network() { metadata .gen_launcher::(0) .launch() - .run_test::>() + .run_test::() .await; shutdown_logging(); } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 13818a2820..e97890bd22 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -24,6 +24,14 @@ use std::{ pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash { + /// Build a transaction from bytes + fn from_bytes(bytes: &[u8]) -> Self; + + /// Get the length of the transaction + fn len(&self) -> usize; + + /// Whether or not the transaction is empty + fn is_empty(&self) -> bool; } /// Abstraction over the full contents of a block diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 38fbe41a7f..484721b0c0 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -68,7 +68,8 @@ pub trait SignatureKey: + Eq + Serialize + for<'a> Deserialize<'a> - + Into; + + Into + + for<'a> TryFrom<&'a TaggedBase64>; /// The type of the assembled qc: assembled signature + `BitVec` type QCType: Send + Sync From 8b9569d22f78c3482a14d4c33583c8a75d3d5c50 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 9 Apr 2024 09:44:34 -0400 Subject: [PATCH 0933/1393] Add workspace lints for hotshot-types (#2923) --- task-impls/src/consensus.rs | 2 +- task-impls/src/helpers.rs | 2 +- testing/src/task_helpers.rs | 2 +- testing/tests/tests_1/vid_task.rs | 2 +- types/Cargo.toml | 3 +++ types/src/data.rs | 17 +++++------------ types/src/event.rs | 23 +++++++++++++++++++++-- types/src/simple_certificate.rs | 2 +- types/src/traits/network.rs | 5 ++++- types/src/traits/storage.rs | 4 ++++ types/src/vid.rs | 1 + 11 files changed, 43 insertions(+), 20 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 4669bafbc9..4fde098064 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -780,7 +780,7 @@ impl, A: ConsensusApi + "Proposal's parent missing from storage with commitment: {:?}", justify_qc.get_data().leaf_commit ); - let leaf = Leaf::from_proposal(proposal); + let leaf = Leaf::from_quorum_proposal(&proposal.data); let state = Arc::new( >::from_header( diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index efc54bca68..bbf6f9e204 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -57,7 +57,7 @@ pub async fn calculate_vid_disperse( // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); - VidDisperse::from_membership(view, vid_disperse, membership) + VidDisperse::from_membership(view, vid_disperse, membership.as_ref()) } /// Utilities to print anyhow logs. diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index eac39c9ba6..287d51b499 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -398,7 +398,7 @@ pub fn build_vid_proposal( let vid_disperse = VidDisperse::from_membership( view_number, vid.disperse(encoded_transactions).unwrap(), - quorum_membership.clone().into(), + quorum_membership, ); VidDisperseShare::from_vid_disperse(vid_disperse) diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 1d6d71bb02..535204d15a 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -54,7 +54,7 @@ async fn test_vid_task() { let vid_disperse = VidDisperse::from_membership( message.data.view_number, vid_disperse, - quorum_membership.clone().into(), + &quorum_membership, ); let vid_proposal = Proposal { diff --git a/types/Cargo.toml b/types/Cargo.toml index de15be8858..5ad087f24f 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -65,3 +65,6 @@ async-std = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } + +[lints] +workspace = true diff --git a/types/src/data.rs b/types/src/data.rs index 1d62a7a156..18cbf05cdd 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -37,7 +37,6 @@ use std::{ fmt::{Debug, Display}, hash::Hash, marker::PhantomData, - sync::Arc, }; use tracing::error; @@ -157,7 +156,7 @@ impl VidDisperse { pub fn from_membership( view_number: TYPES::Time, mut vid_disperse: JfVidDisperse, - membership: Arc, + membership: &TYPES::Membership, ) -> Self { let shares = membership .get_staked_committee(view_number) @@ -186,6 +185,7 @@ pub enum ViewChangeEvidence { } impl ViewChangeEvidence { + /// Check that the given ViewChangeEvidence is relevant to the current view. pub fn is_valid_for_view(&self, view: &TYPES::Time) -> bool { match self { ViewChangeEvidence::Timeout(timeout_cert) => timeout_cert.get_data().view == *view - 1, @@ -195,6 +195,7 @@ impl ViewChangeEvidence { } #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +/// VID share and associated metadata for a single node pub struct VidDisperseShare { /// The view number for which this VID data is intended pub view_number: TYPES::Time, @@ -268,6 +269,7 @@ impl VidDisperseShare { Some(vid_disperse) } + /// Split a VID share proposal into a proposal for each recipient. pub fn to_vid_share_proposals( vid_disperse_proposal: Proposal>, ) -> Vec>> { @@ -640,10 +642,7 @@ impl Committable for Leaf { } impl Leaf { - pub fn from_proposal(proposal: &Proposal>) -> Self { - Self::from_quorum_proposal(&proposal.data) - } - + /// Constructs a leaf from a given quorum proposal. pub fn from_quorum_proposal(quorum_proposal: &QuorumProposal) -> Self { // WARNING: Do NOT change this to a wildcard match, or reference the fields directly in the construction of the leaf. // The point of this match is that we will get a compile-time error if we add a field without updating this. @@ -663,12 +662,6 @@ impl Leaf { block_payload: None, } } - - pub fn commit_from_proposal( - proposal: &Proposal>, - ) -> Commitment { - Leaf::from_proposal(proposal).commit() - } } pub mod null_block { diff --git a/types/src/event.rs b/types/src/event.rs index 224adad9a5..244bd4001c 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -58,16 +58,26 @@ impl LeafInfo { /// The chain of decided leaves with its corresponding state and VID info. pub type LeafChain = Vec>; +/// Utilities for converting between HotShotError and a string. pub mod error_adaptor { - use super::*; + use super::{Arc, Deserialize, HotShotError, NodeType}; use serde::{de::Deserializer, ser::Serializer}; + + /// Convert a HotShotError into a string + /// + /// # Errors + /// Returns `Err` if the serializer fails. pub fn serialize( elem: &Arc>, serializer: S, ) -> Result { - serializer.serialize_str(&format!("{}", elem)) + serializer.serialize_str(&format!("{elem}")) } + /// Convert a string into a HotShotError + /// + /// # Errors + /// Returns `Err` if the string cannot be deserialized. pub fn deserialize<'de, D: Deserializer<'de>, TYPES: NodeType>( deserializer: D, ) -> Result>, D::Error> { @@ -159,13 +169,22 @@ pub enum EventType { }, } #[derive(Debug, Serialize, Deserialize, Clone)] +/// A list of actions that we track for nodes pub enum HotShotAction { + /// A quorum vote was sent Vote, + /// A quorum proposal was sent Propose, + /// DA proposal was sent DAPropose, + /// DA vote was sent DAVote, + /// DA certificate was sent DACert, + /// VID shares were sent VidDisperse, + /// An upgrade vote was sent UpgradeVote, + /// An upgrade proposal was sent UpgradePropose, } diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 30f6bf0719..c49f7ccab6 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -92,7 +92,7 @@ impl> }, + MultipleErrors { + /// vec of errors + errors: Vec>, + }, } #[derive(Clone, Debug)] // Storing view number as a u64 to avoid the need TYPES generic diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index bb62857b66..ecc1905bcd 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -21,9 +21,13 @@ use super::node_implementation::NodeType; /// Abstraction for storing a variety of consensus payload datum. #[async_trait] pub trait Storage: Send + Sync + Clone { + /// Add a proposal to the stored VID proposals. async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; + /// Add a proposal to the stored DA proposals. async fn append_da(&self, proposal: &Proposal>) -> Result<()>; + /// Record a HotShotAction taken. async fn record_action(&self, view: TYPES::Time, action: HotShotAction) -> Result<()>; + /// Update the current high QC in storage. async fn update_high_qc(&self, high_qc: QuorumCertificate) -> Result<()>; /// Update the currently undecided state of consensus. This includes the undecided leaf chain, /// and the undecided state. diff --git a/types/src/vid.rs b/types/src/vid.rs index a424227852..71ef81d85a 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -55,6 +55,7 @@ pub fn vid_scheme(num_storage_nodes: usize) -> VidSchemeType { // https://github.com/EspressoSystems/HotShot/issues/2152 let recovery_threshold = 1 << num_storage_nodes.ilog2(); + #[allow(clippy::panic)] let num_storage_nodes = u32::try_from(num_storage_nodes).unwrap_or_else(|err| { panic!("num_storage_nodes {num_storage_nodes} should fit into u32\n\terror: : {err}") }); From 3414612f838d0f153c5cf3f3df8edd91a36c2187 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 9 Apr 2024 21:33:26 +0300 Subject: [PATCH 0934/1393] [CX_CLEANUP] - Complete the dependency implementation (#2930) * Start adding validations * Create vote * Complete validations for DAC and VID * Add ProposalRecv validation * Rename and update comments * Fix proposal tests * Add storage when parent is missing --- hotshot/src/tasks/task_state.rs | 8 + task-impls/src/consensus.rs | 15 +- task-impls/src/events.rs | 13 +- task-impls/src/network.rs | 2 +- task-impls/src/quorum_proposal.rs | 12 - task-impls/src/quorum_vote.rs | 435 +++++++++++++++--- task-impls/src/request.rs | 6 +- testing/src/predicates.rs | 2 +- testing/tests/tests_1/consensus_task.rs | 44 +- testing/tests/tests_1/proposal_ordering.rs | 8 +- testing/tests/tests_1/quorum_proposal_task.rs | 7 +- testing/tests/tests_1/quorum_vote_task.rs | 36 +- testing/tests/tests_1/upgrade_task.rs | 32 +- testing/tests/tests_1/vid_task.rs | 2 +- 14 files changed, 456 insertions(+), 166 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index ac4e260b71..72a2d16d0a 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -217,13 +217,21 @@ impl> CreateTaskState for QuorumVoteTaskState { async fn create_from(handle: &SystemContextHandle) -> QuorumVoteTaskState { + let consensus = handle.hotshot.get_consensus(); + QuorumVoteTaskState { + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + consensus, latest_voted_view: handle.get_cur_view().await, vote_dependencies: HashMap::new(), quorum_network: handle.hotshot.networks.quorum_network.clone(), committee_network: handle.hotshot.networks.da_network.clone(), + quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + da_membership: handle.hotshot.memberships.da_membership.clone().into(), output_event_stream: handle.hotshot.output_event_stream.0.clone(), id: handle.hotshot.id, + storage: handle.storage.clone(), } } } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 4fde098064..519250698a 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -112,7 +112,7 @@ async fn validate_proposal( UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; // Validate that the upgrade certificate is re-attached, if we saw one on the parent - proposed_leaf.extends_upgrade(parent_leaf, decided_upgrade_certificate)?; + proposed_leaf.extends_upgrade(parent_leaf.clone(), decided_upgrade_certificate)?; let justify_qc = proposal.data.justify_qc.clone(); // Create a positive vote if either liveness or safety check @@ -166,7 +166,10 @@ async fn validate_proposal( .await; // Notify other tasks broadcast_event( - Arc::new(HotShotEvent::QuorumProposalValidated(proposal.data.clone())), + Arc::new(HotShotEvent::QuorumProposalValidated( + proposal.data.clone(), + parent_leaf, + )), &event_stream, ) .await; @@ -869,7 +872,7 @@ impl, A: ConsensusApi + .map(AnyhowTracing::err_as_debug), )); } - HotShotEvent::QuorumProposalValidated(proposal) => { + HotShotEvent::QuorumProposalValidated(proposal, _) => { let consensus = self.consensus.upgradable_read().await; let view = proposal.get_view_number(); self.current_proposal = Some(proposal.clone()); @@ -1221,7 +1224,7 @@ impl, A: ConsensusApi + self.current_proposal = None; } } - HotShotEvent::VidDisperseRecv(disperse) => { + HotShotEvent::VIDShareRecv(disperse) => { let view = disperse.data.get_view_number(); debug!( @@ -1640,7 +1643,7 @@ impl, A: ConsensusApi + event.as_ref(), HotShotEvent::QuorumProposalRecv(_, _) | HotShotEvent::QuorumVoteRecv(_) - | HotShotEvent::QuorumProposalValidated(_) + | HotShotEvent::QuorumProposalValidated(..) | HotShotEvent::QCFormed(_) | HotShotEvent::UpgradeCertificateFormed(_) | HotShotEvent::DACertificateRecv(_) @@ -1648,7 +1651,7 @@ impl, A: ConsensusApi + | HotShotEvent::SendPayloadCommitmentAndMetadata(..) | HotShotEvent::Timeout(_) | HotShotEvent::TimeoutVoteRecv(_) - | HotShotEvent::VidDisperseRecv(..) + | HotShotEvent::VIDShareRecv(..) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) | HotShotEvent::Shutdown, ) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 4f0355cca9..7d30b69703 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -49,15 +49,10 @@ pub enum HotShotEvent { QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote), - // TODO: Complete the dependency implementation. - // - /// Dummy quorum vote to test if the quorum vote dependency works. Should be removed and - /// replaced by `QuorumVoteSend` once the above TODO is done. - DummyQuorumVoteSend(TYPES::Time), /// All dependencies for the quorum vote are validated. QuorumVoteDependenciesValidated(TYPES::Time), - /// A proposal was validated. This means it comes from the correct leader and has a correct QC. - QuorumProposalValidated(QuorumProposal), + /// A quorum proposal with the given parent leaf is validated. + QuorumProposalValidated(QuorumProposal, Leaf), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DAProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal @@ -127,10 +122,10 @@ pub enum HotShotEvent { /// /// Like [`HotShotEvent::DAProposalSend`]. VidDisperseSend(Proposal>, TYPES::SignatureKey), - /// Vid disperse data has been received from the network; handled by the DA task + /// Vid disperse share has been received from the network; handled by the consensus task /// /// Like [`HotShotEvent::DAProposalRecv`]. - VidDisperseRecv(Proposal>), + VIDShareRecv(Proposal>), /// VID share data is validated. VIDShareValidated(VidDisperseShare), /// Upgrade proposal has been received from the network diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 792aa0a307..490f6f5324 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -161,7 +161,7 @@ impl NetworkMessageTaskState { HotShotEvent::DACertificateRecv(cert) } CommitteeConsensusMessage::VidDisperseMsg(proposal) => { - HotShotEvent::VidDisperseRecv(proposal) + HotShotEvent::VIDShareRecv(proposal) } } } diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index db2c64836f..0b56200e04 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -139,13 +139,6 @@ impl HandleDepOutput for ProposalDependencyHandle { return; } - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalDependenciesValidated( - self.view_number, - )), - &self.sender, - ) - .await; broadcast_event( Arc::new(HotShotEvent::DummyQuorumProposalSend(self.view_number)), &self.sender, @@ -488,11 +481,6 @@ impl> QuorumProposalTaskState -#[allow(clippy::needless_pass_by_value)] -fn validate_quorum_proposal( - _quorum_proposal: Proposal>, - _event_sender: Sender>>, -) -> bool { - true -} - -/// Validate the DAC. -// TODO: Complete the dependency implementation. -// -#[allow(clippy::needless_pass_by_value)] -fn validate_dac( - _dac: DACertificate, - _event_sender: Sender>>, -) -> bool { - true -} - -/// Validate the VID share. -// TODO: Complete the dependency implementation. -// -#[allow(clippy::needless_pass_by_value)] -fn validate_vid( - _disperse: Proposal>, - _event_sender: Sender>>, -) -> bool { - true -} - /// Handler for the vote dependency. struct VoteDependencyHandle { + /// Public key. + pub public_key: TYPES::SignatureKey, + /// Private Key. + pub private_key: ::PrivateKey, /// View number to vote on. view_number: TYPES::Time, /// Event sender. @@ -82,36 +61,47 @@ struct VoteDependencyHandle { impl HandleDepOutput for VoteDependencyHandle { type Output = Vec>>; async fn handle_dep_result(self, res: Self::Output) { - // Add this commitment check to test if the handler works, but this isn't the only thing - // that we'll need to check. E.g., we also need to check that VID commitment matches - // `payload_commitment`. - // TODO: Complete the dependency implementation. - // let mut payload_commitment = None; + let mut leaf = None; for event in res { match event.as_ref() { - HotShotEvent::QuorumProposalValidated(proposal) => { + HotShotEvent::QuorumProposalValidated(proposal, parent_leaf) => { let proposal_payload_comm = proposal.block_header.payload_commitment(); if let Some(comm) = payload_commitment { if proposal_payload_comm != comm { - error!("Quorum proposal and DAC have inconsistent payload commitment."); + error!("Quorum proposal has inconsistent payload commitment with DAC or VID."); return; } } else { payload_commitment = Some(proposal_payload_comm); } + let parent_commitment = parent_leaf.commit(); + let mut proposed_leaf = Leaf::from_quorum_proposal(proposal); + proposed_leaf.set_parent_commitment(parent_commitment); + leaf = Some(proposed_leaf); } HotShotEvent::DACertificateValidated(cert) => { let cert_payload_comm = cert.get_data().payload_commit; if let Some(comm) = payload_commitment { - if cert_payload_comm != comm { - error!("Quorum proposal and DAC have inconsistent payload commitment."); + if !cert.is_genesis && cert_payload_comm != comm { + error!("DAC has inconsistent payload commitment with quorum proposal or VID."); return; } } else { payload_commitment = Some(cert_payload_comm); } } + HotShotEvent::VIDShareValidated(share) => { + let vid_payload_commitment = share.payload_commitment; + if let Some(comm) = payload_commitment { + if vid_payload_commitment != comm { + error!("VID has inconsistent payload commitment with quorum proposal or DAC."); + return; + } + } else { + payload_commitment = Some(vid_payload_commitment); + } + } _ => {} } } @@ -122,11 +112,32 @@ impl HandleDepOutput for VoteDependencyHandle { &self.sender, ) .await; - broadcast_event( - Arc::new(HotShotEvent::DummyQuorumVoteSend(self.view_number)), - &self.sender, - ) - .await; + + // Create and send the vote. + let Some(leaf) = leaf else { + error!("Quorum proposal isn't validated, but it should be."); + return; + }; + let message = if let Ok(vote) = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(), + }, + self.view_number, + &self.public_key, + &self.private_key, + ) { + GeneralConsensusMessage::::Vote(vote) + } else { + error!("Unable to sign quorum vote!"); + return; + }; + if let GeneralConsensusMessage::Vote(vote) = message { + debug!( + "Sending vote to next quorum leader {:?}", + vote.get_view_number() + 1 + ); + broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &self.sender).await; + } } } @@ -134,6 +145,15 @@ impl HandleDepOutput for VoteDependencyHandle { /// /// Contains all of the information for the quorum vote. pub struct QuorumVoteTaskState> { + /// Public key. + pub public_key: TYPES::SignatureKey, + + /// Private Key. + pub private_key: ::PrivateKey, + + /// Reference to consensus. The replica will require a write lock on this. + pub consensus: Arc>>, + /// Latest view number that has been voted for. pub latest_voted_view: TYPES::Time, @@ -146,11 +166,20 @@ pub struct QuorumVoteTaskState> { /// Network for DA committee pub committee_network: Arc, + /// Membership for Quorum certs/votes. + pub quorum_membership: Arc, + + /// Membership for DA committee certs/votes. + pub da_membership: Arc, + /// Output events to application pub output_event_stream: async_broadcast::Sender>, /// The node's id pub id: u64, + + /// Reference to the storage. + pub storage: Arc>, } impl> QuorumVoteTaskState { @@ -168,7 +197,7 @@ impl> QuorumVoteTaskState { - if let HotShotEvent::QuorumProposalValidated(proposal) = event { + if let HotShotEvent::QuorumProposalValidated(proposal, _) = event { proposal.view_number } else { return false; @@ -223,6 +252,8 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState>>, ) { match event.as_ref() { - HotShotEvent::QuorumProposalRecv(proposal, _sender) => { + HotShotEvent::QuorumProposalRecv(proposal, sender) => { let view = proposal.data.view_number; debug!("Received Quorum Proposal for view {}", *view); if view <= self.latest_voted_view { return; } - // stop polling for the received proposal - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal(*view)) - .await; + // TODO (Keyao) Add validations for view change evidence and upgrade cert. + + // Vaildate the justify QC. + let justify_qc = proposal.data.justify_qc.clone(); + if !justify_qc.is_valid_cert(self.quorum_membership.as_ref()) { + error!("Invalid justify_qc in proposal for view {}", *view); + let consensus = self.consensus.write().await; + consensus.metrics.invalid_qc.update(1); + return; + } broadcast_event(Arc::new(HotShotEvent::ViewChange(view + 1)), &event_sender).await; - if !validate_quorum_proposal(proposal.clone(), event_sender.clone()) { + + let consensus = self.consensus.upgradable_read().await; + // Get the parent leaf and state. + let parent = if justify_qc.is_genesis { + // Send the `Decide` event for the genesis block if the justify QC is genesis. + let leaf = Leaf::genesis(&consensus.instance_state); + let (validated_state, state_delta) = + TYPES::ValidatedState::genesis(&consensus.instance_state); + let state = Arc::new(validated_state); + broadcast_event( + Event { + view_number: TYPES::Time::genesis(), + event: EventType::Decide { + leaf_chain: Arc::new(vec![LeafInfo::new( + leaf.clone(), + state.clone(), + Some(Arc::new(state_delta)), + None, + )]), + qc: Arc::new(justify_qc.clone()), + block_size: None, + }, + }, + &self.output_event_stream, + ) + .await; + Some((leaf, state)) + } else { + match consensus + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned() + { + Some(leaf) => { + if let (Some(state), _) = + consensus.get_state_and_delta(leaf.get_view_number()) + { + Some((leaf, state.clone())) + } else { + error!("Parent state not found! Consensus internally inconsistent"); + return; + } + } + None => None, + } + }; + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + if justify_qc.get_view_number() > consensus.high_qc.view_number { + debug!("Updating high QC"); + + if let Err(e) = self + .storage + .write() + .await + .update_high_qc(justify_qc.clone()) + .await + { + warn!("Failed to store High QC not voting. Error: {:?}", e); + return; + } + + consensus.high_qc = justify_qc.clone(); + } + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some((parent_leaf, parent_state)) = parent else { + warn!( + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.get_data().leaf_commit, + *view, + ); + + let leaf = Leaf::from_proposal(proposal); + + let state = Arc::new( + >::from_header( + &proposal.data.block_header, + ), + ); + + consensus.validated_state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state, + delta: None, + }, + }, + ); + consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + + if let Err(e) = self + .storage + .write() + .await + .update_undecided_state( + consensus.saved_leaves.clone(), + consensus.validated_state_map.clone(), + ) + .await + { + warn!("Couldn't store undecided state. Error: {:?}", e); + } + drop(consensus); + + return; + }; + + drop(consensus); + + // Validate the state. + let Ok((validated_state, state_delta)) = parent_state + .validate_and_apply_header( + &self.consensus.read().await.instance_state, + &parent_leaf, + &proposal.data.block_header.clone(), + ) + .await + else { + error!("Block header doesn't extend the proposal",); + return; + }; + let state = Arc::new(validated_state); + let delta = Arc::new(state_delta); + let parent_commitment = parent_leaf.commit(); + let view = proposal.data.get_view_number(); + let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + proposed_leaf.set_parent_commitment(parent_commitment); + + // Validate the signature. This should also catch if `leaf_commitment`` does not + // equal our calculated parent commitment. + let view_leader_key = self.quorum_membership.get_leader(view); + if view_leader_key != *sender { + warn!("Leader key does not match key in proposal"); + return; + } + if !view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()) { + error!(?proposal.signature, "Could not verify proposal."); return; } + + // Liveness and safety checks. + let consensus = self.consensus.upgradable_read().await; + let liveness_check = justify_qc.get_view_number() > consensus.locked_view; + // Check if proposal extends from the locked leaf. + let outcome = consensus.visit_leaf_ancestors( + justify_qc.get_view_number(), + Terminator::Inclusive(consensus.locked_view), + false, + |leaf, _, _| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.get_view_number() != consensus.locked_view + }, + ); + let safety_check = outcome.is_ok(); + // Skip if both saftey and liveness checks fail. + if !safety_check && !liveness_check { + error!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc, proposal.data.clone(), consensus.locked_view); + if let Err(e) = outcome { + broadcast_event( + Event { + view_number: view, + event: EventType::Error { error: Arc::new(e) }, + }, + &self.output_event_stream, + ) + .await; + } + return; + } + + // Stop polling for the received proposal. + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal(*view)) + .await; + + // Notify the application layer and other tasks. broadcast_event( - Arc::new(HotShotEvent::QuorumProposalValidated(proposal.data.clone())), - &event_sender.clone(), + Event { + view_number: view, + event: EventType::QuorumProposal { + proposal: proposal.clone(), + sender: sender.clone(), + }, + }, + &self.output_event_stream, + ) + .await; + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalValidated( + proposal.data.clone(), + parent_leaf, + )), + &event_sender, ) .await; + + // Add to the storage. + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + consensus.validated_state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state: state.clone(), + delta: Some(delta.clone()), + }, + }, + ); + consensus + .saved_leaves + .insert(proposed_leaf.commit(), proposed_leaf.clone()); + if let Err(e) = self + .storage + .write() + .await + .update_undecided_state( + consensus.saved_leaves.clone(), + consensus.validated_state_map.clone(), + ) + .await + { + warn!("Couldn't store undecided state. Error: {:?}", e); + } + drop(consensus); + self.create_dependency_task_if_new(view, event_receiver, &event_sender); } HotShotEvent::DACertificateRecv(cert) => { @@ -293,6 +549,11 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState> QuorumVoteTaskState { + HotShotEvent::VIDShareRecv(disperse) => { let view = disperse.data.get_view_number(); debug!("Received VID share for view {}", *view); if view <= self.latest_voted_view { return; } + // Validate the VID share. + let payload_commitment = disperse.data.payload_commitment; + // Check whether the data satisfies one of the following. + // * From the right leader for this view. + // * Calculated and signed by the current node. + // * Signed by one of the staked DA committee members. + if !self + .quorum_membership + .get_leader(view) + .validate(&disperse.signature, payload_commitment.as_ref()) + && !self + .public_key + .validate(&disperse.signature, payload_commitment.as_ref()) + { + let mut validated = false; + for da_member in self.da_membership.get_staked_committee(view) { + if da_member.validate(&disperse.signature, payload_commitment.as_ref()) { + validated = true; + break; + } + } + if !validated { + return; + } + } + // stop polling for the received disperse after verifying it's valid self.quorum_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse( @@ -325,9 +616,19 @@ impl> QuorumVoteTaskState> TaskState for QuorumVoteTask HotShotEvent::QuorumProposalRecv(_, _) | HotShotEvent::DACertificateRecv(_) | HotShotEvent::ViewChange(_) - | HotShotEvent::VidDisperseRecv(..) + | HotShotEvent::VIDShareRecv(..) | HotShotEvent::QuorumVoteDependenciesValidated(_) | HotShotEvent::Shutdown, ) diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index a9408b418d..bbbed4d1c1 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -75,7 +75,7 @@ impl, Ver: StaticVersionType + 'st task: &mut hotshot_task::task::Task, ) -> Option { match event.as_ref() { - HotShotEvent::QuorumProposalValidated(proposal) => { + HotShotEvent::QuorumProposalValidated(proposal, _) => { let state = task.state(); let prop_view = proposal.get_view_number(); if prop_view >= state.view { @@ -105,7 +105,7 @@ impl, Ver: StaticVersionType + 'st !matches!( event.as_ref(), HotShotEvent::Shutdown - | HotShotEvent::QuorumProposalValidated(_) + | HotShotEvent::QuorumProposalValidated(..) | HotShotEvent::ViewChange(_) ) } @@ -270,7 +270,7 @@ impl> DelayedRequester { async fn handle_response_message(&self, message: SequencingMessage) { let event = match message { SequencingMessage::Committee(CommitteeConsensusMessage::VidDisperseMsg(prop)) => { - HotShotEvent::VidDisperseRecv(prop) + HotShotEvent::VIDShareRecv(prop) } _ => return, }; diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs index ff583f1411..b9ad705144 100644 --- a/testing/src/predicates.rs +++ b/testing/src/predicates.rs @@ -177,7 +177,7 @@ where { let info = "QuorumProposalValidated".to_string(); let function = |e: &Arc>| { - PredicateResult::from(matches!(e.as_ref(), QuorumProposalValidated(_))) + PredicateResult::from(matches!(e.as_ref(), QuorumProposalValidated(..))) }; Predicate { diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 19f83fd6f8..bc0ad97c05 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -6,7 +6,8 @@ use hotshot_testing::task_helpers::key_pair_for_id; use hotshot_testing::test_helpers::permute_input_with_index_order; use hotshot_testing::{ predicates::{ - exact, is_at_view_number, quorum_proposal_send, quorum_vote_send, timeout_vote_send, + exact, is_at_view_number, quorum_proposal_send, quorum_proposal_validated, + quorum_vote_send, timeout_vote_send, }, script::{run_test_script, TestScriptStage}, task_helpers::{build_system_handle, vid_scheme_from_view_number}, @@ -56,11 +57,11 @@ async fn test_consensus_task() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0[0].clone()), + VIDShareRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), - exact(QuorumProposalValidated(proposals[0].data.clone())), + quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], asserts: vec![is_at_view_number(1)], @@ -78,7 +79,7 @@ async fn test_consensus_task() { ], outputs: vec![ exact(ViewChange(ViewNumber::new(2))), - exact(QuorumProposalValidated(proposals[1].data.clone())), + quorum_proposal_validated(), quorum_proposal_send(), ], asserts: vec![is_at_view_number(2)], @@ -135,12 +136,12 @@ async fn test_consensus_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0[0].clone()), + VIDShareRecv(vids[0].0[0].clone()), QuorumVoteRecv(votes[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), - exact(QuorumProposalValidated(proposals[0].data.clone())), + quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], asserts: vec![], @@ -187,11 +188,11 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0[0].clone()), + VIDShareRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), - exact(QuorumProposalValidated(proposals[0].data.clone())), + quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], asserts: vec![is_at_view_number(1)], @@ -199,7 +200,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let inputs = vec![ // We need a VID share for view 2 otherwise we cannot vote at view 2 (as node 2). - VidDisperseRecv(vids[1].0[0].clone()), + VIDShareRecv(vids[1].0[0].clone()), DACertificateRecv(dacs[1].clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), ]; @@ -207,7 +208,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let view_2_outputs = vec![ exact(ViewChange(ViewNumber::new(2))), - exact(QuorumProposalValidated(proposals[1].data.clone())), + quorum_proposal_validated(), exact(QuorumVoteSend(votes[1].clone())), ]; @@ -299,11 +300,11 @@ async fn test_view_sync_finalize_propose() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0[0].clone()), + VIDShareRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), - exact(QuorumProposalValidated(proposals[0].data.clone())), + quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], asserts: vec![is_at_view_number(1)], @@ -355,7 +356,7 @@ async fn test_view_sync_finalize_propose() { ], outputs: vec![ exact(ViewChange(ViewNumber::new(4))), - exact(QuorumProposalValidated(proposals[1].data.clone())), + quorum_proposal_validated(), quorum_proposal_send(), ], asserts: vec![is_at_view_number(4)], @@ -422,11 +423,11 @@ async fn test_view_sync_finalize_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0[0].clone()), + VIDShareRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), - exact(QuorumProposalValidated(proposals[0].data.clone())), + quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], asserts: vec![is_at_view_number(1)], @@ -459,10 +460,7 @@ async fn test_view_sync_finalize_vote() { // Multiple timeouts in a row, so we call for a view sync ViewSyncFinalizeCertificate2Recv(cert), ], - outputs: vec![ - exact(QuorumProposalValidated(proposals[0].data.clone())), - quorum_vote_send(), - ], + outputs: vec![quorum_proposal_validated(), quorum_vote_send()], asserts: vec![], }; @@ -527,11 +525,11 @@ async fn test_view_sync_finalize_vote_fail_view_number() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0[0].clone()), + VIDShareRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), - exact(QuorumProposalValidated(proposals[0].data.clone())), + quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], asserts: vec![is_at_view_number(1)], @@ -623,11 +621,11 @@ async fn test_vid_disperse_storage_failure() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0[0].clone()), + VIDShareRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), - exact(QuorumProposalValidated(proposals[0].data.clone())), + quorum_proposal_validated(), /* Does not vote */ ], asserts: vec![is_at_view_number(1)], diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 54952bfc7a..5463bae541 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -2,7 +2,7 @@ use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ - predicates::{exact, is_at_view_number, quorum_proposal_send}, + predicates::{exact, is_at_view_number, quorum_proposal_send, quorum_proposal_validated}, task_helpers::vid_scheme_from_view_number, test_helpers::permute_input_with_index_order, view_generator::TestViewGenerator, @@ -52,11 +52,11 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0[0].clone()), + VIDShareRecv(vids[0].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), - exact(QuorumProposalValidated(proposals[0].data.clone())), + quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], asserts: vec![is_at_view_number(1)], @@ -73,7 +73,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let view_2_outputs = vec![ exact(ViewChange(ViewNumber::new(2))), - exact(QuorumProposalValidated(proposals[1].data.clone())), + quorum_proposal_validated(), quorum_proposal_send(), ]; diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index e990433c42..3b430b8481 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -58,8 +58,6 @@ async fn test_quorum_proposal_task_quorum_proposal() { SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), ], outputs: vec![ - exact(QuorumProposalValidated(proposals[1].data.clone())), - exact(QuorumProposalDependenciesValidated(ViewNumber::new(2))), exact(DummyQuorumProposalSend(ViewNumber::new(2))), ], asserts: vec![], @@ -115,7 +113,6 @@ async fn test_quorum_proposal_task_qc_timeout() { SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), ], outputs: vec![ - exact(QuorumProposalDependenciesValidated(ViewNumber::new(2))), exact(DummyQuorumProposalSend(ViewNumber::new(2))), ], asserts: vec![], @@ -174,7 +171,6 @@ async fn test_quorum_proposal_task_view_sync() { SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), ], outputs: vec![ - exact(QuorumProposalDependenciesValidated(ViewNumber::new(2))), exact(DummyQuorumProposalSend(ViewNumber::new(2))), ], asserts: vec![], @@ -192,6 +188,7 @@ async fn test_quorum_proposal_task_view_sync() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_with_incomplete_events() { + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -214,7 +211,7 @@ async fn test_quorum_proposal_task_with_incomplete_events() { // This should result in the proposal failing to be sent. let view_2 = TestScriptStage { inputs: vec![QuorumProposalRecv(proposals[1].clone(), leaders[1])], - outputs: vec![exact(QuorumProposalValidated(proposals[1].data.clone()))], + outputs: vec![], asserts: vec![], }; diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 970409204f..fb9cdea7de 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -9,7 +9,7 @@ use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime async fn test_quorum_vote_task_success() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ - predicates::exact, + predicates::{exact, quorum_proposal_validated,quorum_vote_send}, script::{run_test_script, TestScriptStage}, task_helpers::build_system_handle, view_generator::TestViewGenerator, @@ -32,15 +32,15 @@ async fn test_quorum_vote_task_success() { inputs: vec![ QuorumProposalRecv(view.quorum_proposal.clone(), view.leader_public_key), DACertificateRecv(view.da_certificate.clone()), - VidDisperseRecv(view.vid_proposal.0[0].clone()), + VIDShareRecv(view.vid_proposal.0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(2))), - exact(QuorumProposalValidated(view.quorum_proposal.data.clone())), + quorum_proposal_validated(), exact(DACertificateValidated(view.da_certificate.clone())), exact(VIDShareValidated(view.vid_proposal.0[0].data.clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(1))), - exact(DummyQuorumVoteSend(ViewNumber::new(1))), + quorum_vote_send(), ], asserts: vec![], }; @@ -57,7 +57,7 @@ async fn test_quorum_vote_task_success() { async fn test_quorum_vote_task_miss_dependency() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ - predicates::exact, + predicates::{exact, quorum_proposal_validated}, script::{run_test_script, TestScriptStage}, task_helpers::build_system_handle, view_generator::TestViewGenerator, @@ -85,38 +85,38 @@ async fn test_quorum_vote_task_miss_dependency() { } // Send two of quorum proposal, DAC, and VID disperse data, in which case there's no vote. - let view_no_quorum_proposal = TestScriptStage { + let view_no_dac = TestScriptStage { inputs: vec![ - DACertificateRecv(dacs[0].clone()), - VidDisperseRecv(vids[0].0[0].clone()), + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + VIDShareRecv(vids[0].0[0].clone()), ], outputs: vec![ - exact(DACertificateValidated(dacs[0].clone())), + exact(ViewChange(ViewNumber::new(2))), + quorum_proposal_validated(), exact(VIDShareValidated(vids[0].0[0].data.clone())), ], asserts: vec![], }; - let view_no_dac = TestScriptStage { + let view_no_vid = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VidDisperseRecv(vids[1].0[0].clone()), + DACertificateRecv(dacs[1].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(3))), - exact(QuorumProposalValidated(proposals[1].data.clone())), - exact(VIDShareValidated(vids[1].0[0].data.clone())), + quorum_proposal_validated(), + exact(DACertificateValidated(dacs[1].clone())), ], asserts: vec![], }; - let view_no_vid = TestScriptStage { + let view_no_quorum_proposal = TestScriptStage { inputs: vec![ - QuorumProposalRecv(proposals[2].clone(), leaders[2]), DACertificateRecv(dacs[2].clone()), + VIDShareRecv(vids[2].0[0].clone()), ], outputs: vec![ - exact(ViewChange(ViewNumber::new(4))), - exact(QuorumProposalValidated(proposals[2].data.clone())), exact(DACertificateValidated(dacs[2].clone())), + exact(VIDShareValidated(vids[2].0[0].data.clone())), ], asserts: vec![], }; @@ -125,7 +125,7 @@ async fn test_quorum_vote_task_miss_dependency() { QuorumVoteTaskState::::create_from(&handle).await; run_test_script( - vec![view_no_quorum_proposal, view_no_dac, view_no_vid], + vec![ view_no_dac, view_no_vid, view_no_quorum_proposal], quorum_vote_state, ) .await; diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index a5e62b6fd2..570e9887c2 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -75,12 +75,12 @@ async fn test_consensus_task_upgrade() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidDisperseRecv(vids[0].0[0].clone()), + VIDShareRecv(vids[0].0[0].clone()), DACertificateRecv(dacs[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), - exact(QuorumProposalValidated(proposals[0].data.clone())), + quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], asserts: vec![], @@ -89,12 +89,12 @@ async fn test_consensus_task_upgrade() { let view_2 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VidDisperseRecv(vids[1].0[0].clone()), + VIDShareRecv(vids[1].0[0].clone()), DACertificateRecv(dacs[1].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(2))), - exact(QuorumProposalValidated(proposals[1].data.clone())), + quorum_proposal_validated(), exact(QuorumVoteSend(votes[1].clone())), ], asserts: vec![no_decided_upgrade_cert()], @@ -104,11 +104,11 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[2].clone(), leaders[2]), DACertificateRecv(dacs[2].clone()), - VidDisperseRecv(vids[2].0[0].clone()), + VIDShareRecv(vids[2].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(3))), - exact(QuorumProposalValidated(proposals[2].data.clone())), + quorum_proposal_validated(), leaf_decided(), exact(QuorumVoteSend(votes[2].clone())), ], @@ -119,11 +119,11 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[3].clone(), leaders[3]), DACertificateRecv(dacs[3].clone()), - VidDisperseRecv(vids[3].0[0].clone()), + VIDShareRecv(vids[3].0[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(4))), - exact(QuorumProposalValidated(proposals[3].data.clone())), + quorum_proposal_validated(), leaf_decided(), exact(QuorumVoteSend(votes[3].clone())), ], @@ -134,7 +134,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![QuorumProposalRecv(proposals[4].clone(), leaders[4])], outputs: vec![ exact(ViewChange(ViewNumber::new(5))), - exact(QuorumProposalValidated(proposals[4].data.clone())), + quorum_proposal_validated(), leaf_decided(), ], asserts: vec![decided_upgrade_cert()], @@ -238,7 +238,7 @@ async fn test_upgrade_and_consensus_task() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidDisperseRecv(vids[0].0[0].clone()), + VIDShareRecv(vids[0].0[0].clone()), DACertificateRecv(dacs[0].clone()), ], upgrade_vote_recvs, @@ -426,12 +426,12 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidDisperseRecv(vids[0].0[0].clone()), + VIDShareRecv(vids[0].0[0].clone()), DACertificateRecv(dacs[0].clone()), ], vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VidDisperseRecv(vids[1].0[0].clone()), + VIDShareRecv(vids[1].0[0].clone()), DACertificateRecv(dacs[1].clone()), SendPayloadCommitmentAndMetadata( vids[1].0[0].data.payload_commitment, @@ -441,7 +441,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DACertificateRecv(dacs[2].clone()), - VidDisperseRecv(vids[2].0[0].clone()), + VIDShareRecv(vids[2].0[0].clone()), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, (), @@ -451,7 +451,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DACertificateRecv(dacs[3].clone()), - VidDisperseRecv(vids[3].0[0].clone()), + VIDShareRecv(vids[3].0[0].clone()), SendPayloadCommitmentAndMetadata( vids[3].0[0].data.payload_commitment, (), @@ -461,7 +461,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DACertificateRecv(dacs[4].clone()), - VidDisperseRecv(vids[4].0[0].clone()), + VIDShareRecv(vids[4].0[0].clone()), SendPayloadCommitmentAndMetadata( vids[4].0[0].data.payload_commitment, (), @@ -480,7 +480,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DACertificateRecv(dacs[6].clone()), - VidDisperseRecv(vids[6].0[0].clone()), + VIDShareRecv(vids[6].0[0].clone()), SendPayloadCommitmentAndMetadata( vids[6].0[0].data.payload_commitment, (), diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 535204d15a..adf14fb706 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -89,7 +89,7 @@ async fn test_vid_task() { )); input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); - input.push(HotShotEvent::VidDisperseRecv(vid_share_proposal.clone())); + input.push(HotShotEvent::VIDShareRecv(vid_share_proposal.clone())); input.push(HotShotEvent::Shutdown); output.insert( From 1194d5b70810118c338b8d28b702f3eea0da92f3 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 9 Apr 2024 22:23:49 +0300 Subject: [PATCH 0935/1393] fix lints (#2932) --- task-impls/src/consensus.rs | 2 +- task-impls/src/quorum_vote.rs | 2 +- types/src/data.rs | 13 ++++++++----- types/src/message.rs | 3 +++ types/src/simple_certificate.rs | 9 ++++++++- 5 files changed, 21 insertions(+), 8 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 519250698a..d1dcf88752 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -112,7 +112,7 @@ async fn validate_proposal( UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; // Validate that the upgrade certificate is re-attached, if we saw one on the parent - proposed_leaf.extends_upgrade(parent_leaf.clone(), decided_upgrade_certificate)?; + proposed_leaf.extends_upgrade(&parent_leaf, &decided_upgrade_certificate)?; let justify_qc = proposal.data.justify_qc.clone(); // Create a positive vote if either liveness or safety check diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 593b67ae75..68ee36a4a6 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -384,7 +384,7 @@ impl> QuorumVoteTaskState>::from_header( diff --git a/types/src/data.rs b/types/src/data.rs index 18cbf05cdd..4abcdef577 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -533,19 +533,22 @@ impl Leaf { /// Validate that a leaf has the right upgrade certificate to be the immediate child of another leaf /// /// This may not be a complete function. Please double-check that it performs the checks you expect before subtituting validation logic with it. + /// + /// # Errors + /// Returns an error if the certificates are not identical, or that when we no longer see a + /// cert, it's for the right reason. pub fn extends_upgrade( &self, - parent: Self, - decided_upgrade_certificate: Option>, + parent: &Self, + decided_upgrade_certificate: &Option>, ) -> Result<()> { match ( self.get_upgrade_certificate(), parent.get_upgrade_certificate(), ) { - // No upgrade certificate on either is the most common case, and is always fine. - (None, None) => {} // If the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade. Again, this is always fine. - (Some(_), None) => {} + // But, if we have no upgrade certificate on either is the most common case, and is always fine. + (Some(_) | None, None) => {} // If we no longer see a cert, we have to make sure that we either: // - no longer care because we have passed new_version_first_view, or // - no longer care because we have passed `decide_by` without deciding the certificate. diff --git a/types/src/message.rs b/types/src/message.rs index 840b583a81..b591db4dfa 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -327,6 +327,9 @@ impl Proposal> where TYPES: NodeType, { + /// Checks that the signature of the quorum proposal is valid. + /// # Errors + /// Returns an error when the proposal signature is invalid. pub fn validate_signature(&self, quorum_membership: &TYPES::Membership) -> Result<()> { let view_number = self.data.get_view_number(); let view_leader_key = quorum_membership.get_leader(view_number); diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index c49f7ccab6..8f2261380b 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -180,6 +180,11 @@ impl QuorumCertificate { } impl UpgradeCertificate { + /// Determines whether or not a certificate is relevant (i.e. we still have time to reach a + /// decide) + /// + /// # Errors + /// Returns an error when the certificate is no longer relevant pub fn is_relevant( &self, view_number: TYPES::Time, @@ -194,7 +199,9 @@ impl UpgradeCertificate { Ok(()) } - /// Validate an upgrade certificate + /// Validate an upgrade certificate. + /// # Errors + /// Returns an error when the upgrade certificate is invalid. pub fn validate( upgrade_certificate: &Option, quorum_membership: &TYPES::Membership, From 89dce04c254950e03b745ba0893fbb47064e52a9 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 9 Apr 2024 15:45:01 -0400 Subject: [PATCH 0936/1393] Enable import grouping in rustfmt (#2794) --- builder-api/src/api.rs | 4 +- builder-api/src/builder.rs | 9 +-- example-types/src/block_types.rs | 3 +- example-types/src/node_types.rs | 25 +++---- example-types/src/state_types.rs | 5 +- example-types/src/storage_types.rs | 7 +- examples/combined/all.rs | 30 +++++---- examples/combined/orchestrator.rs | 8 +-- examples/combined/types.rs | 6 +- examples/combined/validator.rs | 3 +- examples/infra/mod.rs | 52 ++++++++------- examples/libp2p/all.rs | 20 +++--- examples/libp2p/orchestrator.rs | 9 +-- examples/libp2p/types.rs | 6 +- examples/libp2p/validator.rs | 3 +- examples/push-cdn/all.rs | 16 +++-- examples/push-cdn/broker.rs | 4 +- examples/push-cdn/types.rs | 3 +- examples/push-cdn/validator.rs | 3 +- examples/push-cdn/whitelist-adapter.rs | 19 +++--- examples/webserver/all.rs | 10 ++- examples/webserver/multi-webserver.rs | 3 +- examples/webserver/orchestrator.rs | 9 +-- examples/webserver/types.rs | 9 +-- examples/webserver/validator.rs | 3 +- examples/webserver/webserver.rs | 8 +-- hotshot-stake-table/src/mt_based.rs | 6 +- hotshot-stake-table/src/mt_based/config.rs | 3 +- hotshot-stake-table/src/mt_based/internal.rs | 8 ++- hotshot-stake-table/src/vec_based.rs | 21 +++--- hotshot-stake-table/src/vec_based/config.rs | 6 +- hotshot/src/lib.rs | 50 +++++++------- hotshot/src/tasks/mod.rs | 21 +++--- hotshot/src/tasks/task_state.rs | 14 ++-- .../src/traits/election/static_committee.rs | 17 +++-- .../src/traits/networking/combined_network.rs | 65 +++++++++---------- .../src/traits/networking/libp2p_network.rs | 40 ++++++------ .../src/traits/networking/memory_network.rs | 22 ++++--- .../src/traits/networking/push_cdn_network.rs | 23 ++++--- .../traits/networking/web_server_network.rs | 34 +++++----- hotshot/src/types/handle.rs | 20 +++--- libp2p-networking/src/lib.rs | 3 +- .../src/network/behaviours/dht/mod.rs | 3 +- .../src/network/behaviours/direct_message.rs | 12 ++-- libp2p-networking/src/network/def.rs | 6 +- libp2p-networking/src/network/error.rs | 3 +- libp2p-networking/src/network/mod.rs | 42 ++++++------ libp2p-networking/src/network/node.rs | 57 ++++++++-------- libp2p-networking/src/network/node/config.rs | 6 +- libp2p-networking/src/network/node/handle.rs | 23 +++---- libp2p-networking/tests/common/mod.rs | 20 +++--- libp2p-networking/tests/counter.rs | 13 ++-- macros/src/lib.rs | 3 +- orchestrator/src/client.rs | 4 +- orchestrator/src/config.rs | 11 +++- orchestrator/src/lib.rs | 29 ++++----- task-impls/src/builder.rs | 2 +- task-impls/src/consensus.rs | 39 ++++++----- task-impls/src/da.rs | 21 +++--- task-impls/src/events.rs | 7 +- task-impls/src/harness.rs | 7 +- task-impls/src/network.rs | 22 +++---- task-impls/src/quorum_proposal.rs | 5 +- task-impls/src/quorum_vote.rs | 13 ++-- task-impls/src/request.rs | 9 +-- task-impls/src/response.rs | 3 +- task-impls/src/transactions.rs | 19 +++--- task-impls/src/upgrade.rs | 21 +++--- task-impls/src/vid.rs | 13 ++-- task-impls/src/view_sync.rs | 46 ++++++------- task-impls/src/vote_collection.rs | 10 +-- task/src/dependency.rs | 15 +++-- task/src/dependency_task.rs | 6 +- task/src/task.rs | 14 ++-- testing/src/block_builder.rs | 8 ++- testing/src/completion_task.rs | 11 ++-- testing/src/overall_safety_task.rs | 10 +-- testing/src/predicates.rs | 17 ++--- testing/src/script.rs | 6 +- testing/src/spinning_task.rs | 18 +++-- testing/src/task_helpers.rs | 40 +++++------- testing/src/test_builder.rs | 20 +++--- testing/src/test_runner.rs | 56 ++++++++-------- testing/src/txn_task.rs | 6 +- testing/src/view_generator.rs | 21 +++--- testing/src/view_sync_task.rs | 3 +- types/src/consensus.rs | 16 ++--- types/src/data.rs | 37 ++++++----- types/src/error.rs | 6 +- types/src/event.rs | 7 +- types/src/lib.rs | 7 +- types/src/light_client.rs | 3 +- types/src/message.rs | 42 ++++++------ types/src/qc.rs | 12 ++-- types/src/signature_key.rs | 11 ++-- types/src/simple_certificate.rs | 7 +- types/src/simple_vote.rs | 5 +- types/src/stake_table.rs | 3 +- types/src/traits/block_contents.rs | 21 +++--- types/src/traits/consensus_api.rs | 7 +- types/src/traits/election.rs | 8 +-- types/src/traits/metrics.rs | 6 +- types/src/traits/network.rs | 28 ++++---- types/src/traits/node_implementation.rs | 22 ++++--- types/src/traits/signature_key.rs | 9 +-- types/src/traits/states.rs | 6 +- types/src/traits/storage.rs | 3 +- types/src/utils.rs | 14 ++-- types/src/vid.rs | 3 +- web_server/src/lib.rs | 15 +++-- 110 files changed, 841 insertions(+), 807 deletions(-) diff --git a/builder-api/src/api.rs b/builder-api/src/api.rs index 587638301c..b1ad3f380b 100644 --- a/builder-api/src/api.rs +++ b/builder-api/src/api.rs @@ -1,5 +1,5 @@ -use std::fs; -use std::path::Path; +use std::{fs, path::Path}; + use tide_disco::api::{Api, ApiError}; use toml::{map::Entry, Value}; use vbs::version::StaticVersionType; diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs index 6c08cb68c7..2ddfe55540 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/builder.rs @@ -1,9 +1,5 @@ use std::{fmt::Display, path::PathBuf}; -use crate::{ - api::load_api, - data_source::{AcceptsTxnSubmits, BuilderDataSource}, -}; use clap::Args; use committable::Committable; use derive_more::From; @@ -22,6 +18,11 @@ use tide_disco::{ }; use vbs::version::StaticVersionType; +use crate::{ + api::load_api, + data_source::{AcceptsTxnSubmits, BuilderDataSource}, +}; + #[derive(Args, Default)] pub struct Options { #[arg(long = "builder-api-path", env = "HOTSHOT_BUILDER_API_PATH")] diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index d89ebf2401..c053e758ea 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -3,7 +3,6 @@ use std::{ mem::size_of, }; -use crate::node_types::TestTypes; use committable::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ data::{BlockError, Leaf}, @@ -19,6 +18,8 @@ use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; use time::OffsetDateTime; +use crate::node_types::TestTypes; + /// The transaction in a [`TestBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] pub struct TestTransaction(pub Vec); diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index e4827b1320..8da0ef4351 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -1,6 +1,15 @@ use hotshot::traits::{ - election::static_committee::GeneralStaticCommittee, implementations::PushCdnNetwork, + election::static_committee::{GeneralStaticCommittee, StaticCommittee, StaticElectionConfig}, + implementations::{ + CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork, WebServerNetwork, + }, + NodeImplementation, +}; +use hotshot_types::{ + constants::WebServerVersion, data::ViewNumber, message::Message, signature_key::BLSPubKey, + traits::node_implementation::NodeType, }; +use serde::{Deserialize, Serialize}; use crate::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, @@ -8,20 +17,6 @@ use crate::{ storage_types::TestStorage, }; -use hotshot::traits::{ - election::static_committee::{StaticCommittee, StaticElectionConfig}, - implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, WebServerNetwork}, - NodeImplementation, -}; - -use hotshot_types::constants::WebServerVersion; - -use hotshot_types::{ - data::ViewNumber, message::Message, signature_key::BLSPubKey, - traits::node_implementation::NodeType, -}; -use serde::{Deserialize, Serialize}; - #[derive( Copy, Clone, diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index c6383c8f72..ac477c30f0 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -1,6 +1,7 @@ //! Implementations for examples and tests only -use committable::{Commitment, Committable}; +use std::fmt::Debug; +use committable::{Commitment, Committable}; use hotshot_types::{ data::{fake_commitment, BlockError, Leaf, ViewNumber}, traits::{ @@ -10,9 +11,7 @@ use hotshot_types::{ BlockPayload, }, }; - use serde::{Deserialize, Serialize}; -use std::fmt::Debug; use crate::block_types::{TestBlockPayload, TestTransaction}; pub use crate::node_types::TestTypes; diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 3d9c59c422..b446cbdc90 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -1,3 +1,8 @@ +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; + use anyhow::{bail, Result}; use async_lock::RwLock; use async_trait::async_trait; @@ -8,8 +13,6 @@ use hotshot_types::{ traits::{node_implementation::NodeType, storage::Storage}, utils::View, }; -use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; type VidShares = HashMap< ::Time, diff --git a/examples/combined/all.rs b/examples/combined/all.rs index a06e9efa47..d06f3522ce 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -2,28 +2,32 @@ /// types used for this example pub mod types; -use crate::infra::read_orchestrator_init_config; -use crate::infra::OrchestratorArgs; -use crate::types::ThisRun; -use crate::{ - infra::run_orchestrator, - types::{DANetwork, NodeImpl, QuorumNetwork}, +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + path::Path, +}; + +use async_compatibility_layer::{ + art::async_spawn, + logging::{setup_backtrace, setup_logging}, }; -use async_compatibility_layer::art::async_spawn; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use cdn_broker::{Broker, Config as BrokerConfig, ConfigBuilder as BrokerConfigBuilder}; use cdn_marshal::{ConfigBuilder as MarshalConfigBuilder, Marshal}; -use hotshot::traits::implementations::{KeyPair, TestingDef, WrappedSignatureKey}; -use hotshot::types::SignatureKey; +use hotshot::{ + traits::implementations::{KeyPair, TestingDef, WrappedSignatureKey}, + types::SignatureKey, +}; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; use rand::{rngs::StdRng, RngCore, SeedableRng}; -use std::net::SocketAddr; -use std::net::{IpAddr, Ipv4Addr}; -use std::path::Path; use tracing::{error, instrument}; +use crate::{ + infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, + types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}, +}; + /// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; diff --git a/examples/combined/orchestrator.rs b/examples/combined/orchestrator.rs index 409fc4f16c..067c827efb 100644 --- a/examples/combined/orchestrator.rs +++ b/examples/combined/orchestrator.rs @@ -6,10 +6,10 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; -use crate::infra::read_orchestrator_init_config; -use crate::infra::run_orchestrator; -use crate::infra::OrchestratorArgs; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; +use crate::{ + infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, + types::{DANetwork, NodeImpl, QuorumNetwork}, +}; /// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; diff --git a/examples/combined/types.rs b/examples/combined/types.rs index dbbc1b2e80..05507b2327 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -1,9 +1,11 @@ -use crate::infra::CombinedDARun; +use std::fmt::Debug; + use hotshot::traits::implementations::CombinedNetworks; use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; -use std::fmt::Debug; + +use crate::infra::CombinedDARun; /// dummy struct so we can choose types #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] diff --git a/examples/combined/validator.rs b/examples/combined/validator.rs index 7574d44ca0..43f202c974 100644 --- a/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -4,13 +4,12 @@ use std::{net::SocketAddr, str::FromStr}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; +use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; use tracing::{info, instrument}; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; -use hotshot_orchestrator::client::ValidatorArgs; - /// types used for this example pub mod types; diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 350d0cdd21..bfd7b485f2 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -1,36 +1,46 @@ #![allow(clippy::panic)] -use async_compatibility_layer::art::async_sleep; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use std::{ + fmt::Debug, + fs, + marker::PhantomData, + net::{IpAddr, Ipv4Addr, SocketAddr}, + num::NonZeroUsize, + time::{Duration, Instant}, +}; + +use async_compatibility_layer::{ + art::async_sleep, + logging::{setup_backtrace, setup_logging}, +}; use async_trait::async_trait; -use cdn_broker::reexports::crypto::signature::KeyPair; -use cdn_broker::reexports::message::Topic; +use cdn_broker::reexports::{crypto::signature::KeyPair, message::Topic}; use chrono::Utc; -use clap::Parser; -use clap::{Arg, Command}; +use clap::{Arg, Command, Parser}; use futures::StreamExt; -use hotshot::traits::implementations::{ - derive_libp2p_peer_id, CombinedNetworks, PushCdnNetwork, WrappedSignatureKey, -}; -use hotshot::traits::BlockPayload; use hotshot::{ traits::{ - implementations::{Libp2pNetwork, WebServerNetwork}, - NodeImplementation, + implementations::{ + derive_libp2p_peer_id, CombinedNetworks, Libp2pNetwork, PushCdnNetwork, + WebServerNetwork, WrappedSignatureKey, + }, + BlockPayload, NodeImplementation, }, types::SystemContextHandle, Memberships, Networks, SystemContext, }; -use hotshot_example_types::node_types::{Libp2pImpl, PushCdnImpl}; -use hotshot_example_types::storage_types::TestStorage; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + node_types::{Libp2pImpl, PushCdnImpl}, state_types::TestInstanceState, + storage_types::TestStorage, }; -use hotshot_orchestrator::config::NetworkConfigSource; use hotshot_orchestrator::{ self, client::{BenchResults, OrchestratorClient, ValidatorArgs}, - config::{CombinedNetworkConfig, NetworkConfig, NetworkConfigFile, WebServerConfig}, + config::{ + CombinedNetworkConfig, NetworkConfig, NetworkConfigFile, NetworkConfigSource, + WebServerConfig, + }, }; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -46,15 +56,7 @@ use hotshot_types::{ }, HotShotConfig, PeerConfig, ValidatorConfig, }; - -use rand::rngs::StdRng; -use rand::SeedableRng; -use std::fmt::Debug; -use std::marker::PhantomData; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::num::NonZeroUsize; -use std::time::Duration; -use std::{fs, time::Instant}; +use rand::{rngs::StdRng, SeedableRng}; use surf_disco::Url; use tracing::{error, info, warn}; use vbs::version::StaticVersionType; diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index 50f03d63a3..09ffa3aacb 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -2,21 +2,21 @@ /// types used for this example pub mod types; -use crate::infra::read_orchestrator_init_config; -use crate::infra::OrchestratorArgs; -use crate::types::ThisRun; -use crate::{ - infra::run_orchestrator, - types::{DANetwork, NodeImpl, QuorumNetwork}, +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use async_compatibility_layer::{ + art::async_spawn, + logging::{setup_backtrace, setup_logging}, }; -use async_compatibility_layer::art::async_spawn; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; -use std::net::SocketAddr; -use std::net::{IpAddr, Ipv4Addr}; use tracing::instrument; +use crate::{ + infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, + types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}, +}; + /// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; diff --git a/examples/libp2p/orchestrator.rs b/examples/libp2p/orchestrator.rs index 8e677abdbe..0a8761b95f 100644 --- a/examples/libp2p/orchestrator.rs +++ b/examples/libp2p/orchestrator.rs @@ -3,14 +3,15 @@ /// types used for this example pub mod types; -use crate::infra::read_orchestrator_init_config; -use crate::infra::run_orchestrator; -use crate::infra::OrchestratorArgs; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; +use crate::{ + infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, + types::{DANetwork, NodeImpl, QuorumNetwork}, +}; + /// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; diff --git a/examples/libp2p/types.rs b/examples/libp2p/types.rs index 73daa27b1e..cd46c71f9c 100644 --- a/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -1,4 +1,5 @@ -use crate::infra::Libp2pDARun; +use std::fmt::Debug; + use hotshot::traits::implementations::Libp2pNetwork; use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; use hotshot_types::{ @@ -6,7 +7,8 @@ use hotshot_types::{ traits::node_implementation::{NodeImplementation, NodeType}, }; use serde::{Deserialize, Serialize}; -use std::fmt::Debug; + +use crate::infra::Libp2pDARun; /// dummy struct so we can choose types #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] diff --git a/examples/libp2p/validator.rs b/examples/libp2p/validator.rs index 2cf4a4c8b0..9b46a591fb 100644 --- a/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -4,13 +4,12 @@ use std::{net::SocketAddr, str::FromStr}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; +use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; use tracing::{info, instrument}; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; -use hotshot_orchestrator::client::ValidatorArgs; - /// types used for this example pub mod types; diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 3ba22e6e27..0a010941f4 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -2,18 +2,22 @@ /// The types we're importing pub mod types; -use crate::infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; use async_compatibility_layer::art::async_spawn; -use cdn_broker::reexports::crypto::signature::KeyPair; -use cdn_broker::Broker; +use cdn_broker::{reexports::crypto::signature::KeyPair, Broker}; use cdn_marshal::Marshal; -use hotshot::traits::implementations::{TestingDef, WrappedSignatureKey}; -use hotshot::types::SignatureKey; +use hotshot::{ + traits::implementations::{TestingDef, WrappedSignatureKey}, + types::SignatureKey, +}; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; +use crate::{ + infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, + types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}, +}; + /// The infra implementation #[path = "../infra/mod.rs"] pub mod infra; diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index 98c3292a96..900e2697c5 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -4,11 +4,9 @@ use anyhow::{Context, Result}; use cdn_broker::{Broker, Config, ConfigBuilder}; use clap::Parser; - use hotshot::traits::implementations::{KeyPair, ProductionDef, WrappedSignatureKey}; use hotshot_example_types::node_types::TestTypes; -use hotshot_types::traits::node_implementation::NodeType; -use hotshot_types::traits::signature_key::SignatureKey; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use sha2::Digest; #[derive(Parser, Debug)] diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs index 5f488d740c..c62eec2c28 100644 --- a/examples/push-cdn/types.rs +++ b/examples/push-cdn/types.rs @@ -1,8 +1,9 @@ -use crate::infra::PushCdnDaRun; use hotshot::traits::{implementations::PushCdnNetwork, NodeImplementation}; use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; use serde::{Deserialize, Serialize}; +use crate::infra::PushCdnDaRun; + #[derive(Clone, Deserialize, Serialize, Hash, PartialEq, Eq)] /// Convenience type alias pub struct NodeImpl {} diff --git a/examples/push-cdn/validator.rs b/examples/push-cdn/validator.rs index 886847da66..1991c0e857 100644 --- a/examples/push-cdn/validator.rs +++ b/examples/push-cdn/validator.rs @@ -2,12 +2,11 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; +use hotshot_orchestrator::client::ValidatorArgs; use tracing::{info, instrument}; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; -use hotshot_orchestrator::client::ValidatorArgs; - /// types used for this example pub mod types; diff --git a/examples/push-cdn/whitelist-adapter.rs b/examples/push-cdn/whitelist-adapter.rs index 1cc376a493..14ffa8dc91 100644 --- a/examples/push-cdn/whitelist-adapter.rs +++ b/examples/push-cdn/whitelist-adapter.rs @@ -2,20 +2,17 @@ //! all brokers. Right now, we do this by asking the orchestrator for the list of //! allowed public keys. In the future, we will pull the stake table from the L1. -use std::str::FromStr; -use std::sync::Arc; +use std::{str::FromStr, sync::Arc}; -use anyhow::Context; -use anyhow::Result; -use cdn_broker::reexports::discovery::DiscoveryClient; -use cdn_broker::reexports::discovery::{Embedded, Redis}; +use anyhow::{Context, Result}; +use cdn_broker::reexports::discovery::{DiscoveryClient, Embedded, Redis}; use clap::Parser; use hotshot_example_types::node_types::TestTypes; -use hotshot_orchestrator::client::OrchestratorClient; -use hotshot_orchestrator::client::ValidatorArgs; -use hotshot_orchestrator::config::NetworkConfig; -use hotshot_types::traits::node_implementation::NodeType; -use hotshot_types::traits::signature_key::SignatureKey; +use hotshot_orchestrator::{ + client::{OrchestratorClient, ValidatorArgs}, + config::NetworkConfig, +}; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use surf_disco::Url; #[derive(Parser, Debug)] diff --git a/examples/webserver/all.rs b/examples/webserver/all.rs index 8585764d98..7425915558 100644 --- a/examples/webserver/all.rs +++ b/examples/webserver/all.rs @@ -2,14 +2,12 @@ /// types used for this example pub mod types; -use crate::infra::read_orchestrator_init_config; -use crate::infra::OrchestratorArgs; -use crate::types::ThisRun; +use std::sync::Arc; + use crate::{ - infra::run_orchestrator, - types::{DANetwork, NodeImpl, QuorumNetwork}, + infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, + types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}, }; -use std::sync::Arc; /// general infra used for this example #[path = "../infra/mod.rs"] diff --git a/examples/webserver/multi-webserver.rs b/examples/webserver/multi-webserver.rs index 34af9b9e6b..51b4f69021 100644 --- a/examples/webserver/multi-webserver.rs +++ b/examples/webserver/multi-webserver.rs @@ -8,8 +8,7 @@ use async_compatibility_layer::{ }; use clap::Parser; use hotshot_example_types::state_types::TestTypes; -use hotshot_types::constants::WebServerVersion; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{constants::WebServerVersion, traits::node_implementation::NodeType}; use surf_disco::Url; use tracing::error; use vbs::version::StaticVersionType; diff --git a/examples/webserver/orchestrator.rs b/examples/webserver/orchestrator.rs index b033e89fb5..8d2bcff5d8 100644 --- a/examples/webserver/orchestrator.rs +++ b/examples/webserver/orchestrator.rs @@ -3,14 +3,15 @@ /// types used for this example pub mod types; -use crate::infra::read_orchestrator_init_config; -use crate::infra::run_orchestrator; -use crate::infra::OrchestratorArgs; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; +use crate::{ + infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, + types::{DANetwork, NodeImpl, QuorumNetwork}, +}; + /// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; diff --git a/examples/webserver/types.rs b/examples/webserver/types.rs index 34ce2fa9a1..6d57090b76 100644 --- a/examples/webserver/types.rs +++ b/examples/webserver/types.rs @@ -1,10 +1,11 @@ -use crate::infra::WebServerDARun; +use std::fmt::Debug; + use hotshot::traits::implementations::WebServerNetwork; use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; -use hotshot_types::constants::WebServerVersion; -use hotshot_types::traits::node_implementation::NodeImplementation; +use hotshot_types::{constants::WebServerVersion, traits::node_implementation::NodeImplementation}; use serde::{Deserialize, Serialize}; -use std::fmt::Debug; + +use crate::infra::WebServerDARun; /// dummy struct so we can choose types #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] diff --git a/examples/webserver/validator.rs b/examples/webserver/validator.rs index 886847da66..1991c0e857 100644 --- a/examples/webserver/validator.rs +++ b/examples/webserver/validator.rs @@ -2,12 +2,11 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; +use hotshot_orchestrator::client::ValidatorArgs; use tracing::{info, instrument}; use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; -use hotshot_orchestrator::client::ValidatorArgs; - /// types used for this example pub mod types; diff --git a/examples/webserver/webserver.rs b/examples/webserver/webserver.rs index 4f4a2a5820..e8f6e40b42 100644 --- a/examples/webserver/webserver.rs +++ b/examples/webserver/webserver.rs @@ -1,15 +1,15 @@ //! web server example -use hotshot_example_types::state_types::TestTypes; -use hotshot_types::constants::WebServerVersion; use std::sync::Arc; -use surf_disco::Url; -use vbs::version::StaticVersionType; use async_compatibility_layer::{ channel::oneshot, logging::{setup_backtrace, setup_logging}, }; use clap::Parser; +use hotshot_example_types::state_types::TestTypes; +use hotshot_types::constants::WebServerVersion; +use surf_disco::Url; +use vbs::version::StaticVersionType; /// web server arguments #[derive(Parser, Debug)] diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index af5df675cd..15897e0f8d 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -3,13 +3,14 @@ mod config; mod internal; -use self::internal::{to_merkle_path, Key, MerkleCommitment, MerkleProof, PersistentMerkleNode}; use ark_std::{collections::HashMap, rand::SeedableRng, sync::Arc}; use digest::crypto_common::rand_core::CryptoRngCore; use ethereum_types::{U256, U512}; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; use serde::{Deserialize, Serialize}; +use self::internal::{to_merkle_path, Key, MerkleCommitment, MerkleProof, PersistentMerkleNode}; + /// Locally maintained stake table, generic over public key type `K`. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "K: Key")] @@ -222,11 +223,12 @@ impl StakeTable { #[cfg(test)] mod tests { - use super::StakeTable; use ark_std::{rand::SeedableRng, vec::Vec}; use ethereum_types::U256; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; + use super::StakeTable; + // Hotshot use bn254::Fq as key type. type Key = ark_bn254::Fq; diff --git a/hotshot-stake-table/src/mt_based/config.rs b/hotshot-stake-table/src/mt_based/config.rs index a41bf5a66b..3048471c2c 100644 --- a/hotshot-stake-table/src/mt_based/config.rs +++ b/hotshot-stake-table/src/mt_based/config.rs @@ -1,9 +1,10 @@ //! Config file for stake table -use crate::utils::ToFields; use ark_ff::PrimeField; use ark_std::vec; use jf_primitives::{crhf::FixedLengthRescueCRHF, signatures::bls_over_bn254}; +use crate::utils::ToFields; + /// Branch of merkle tree. /// Set to 3 because we are currently using RATE-3 rescue hash function pub(crate) const TREE_BRANCH: usize = 3; diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index 4fb066c014..01e9e78d56 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -1,7 +1,5 @@ //! Utilities and internals for maintaining a local stake table -use super::config::{Digest, FieldType, TREE_BRANCH}; -use crate::utils::{u256_to_field, ToFields}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::{hash::Hash, sync::Arc, vec, vec::Vec}; use ethereum_types::U256; @@ -11,6 +9,9 @@ use jf_utils::canonical; use serde::{Deserialize, Serialize}; use tagged_base64::tagged; +use super::config::{Digest, FieldType, TREE_BRANCH}; +use crate::utils::{u256_to_field, ToFields}; + /// Common trait bounds for generic key type `K` for [`PersistentMerkleNode`] pub trait Key: Clone + CanonicalSerialize + CanonicalDeserialize + PartialEq + Eq + ToFields + Hash @@ -618,7 +619,6 @@ pub fn from_merkle_path(path: &[usize]) -> usize { #[cfg(test)] mod tests { - use super::{super::config, to_merkle_path, PersistentMerkleNode}; use ark_std::{ rand::{Rng, RngCore}, sync::Arc, @@ -628,6 +628,8 @@ mod tests { use ethereum_types::U256; use jf_utils::test_rng; + use super::{super::config, to_merkle_path, PersistentMerkleNode}; + type Key = ark_bn254::Fq; #[test] diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 49c46e76be..1d8d72d0f6 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -1,9 +1,5 @@ //! A vector based stake table implementation. The commitment is the rescue hash of the list of (key, amount) pairs; -use crate::{ - config::STAKE_TABLE_CAPACITY, - utils::{u256_to_field, ToFields}, -}; use ark_std::{collections::HashMap, hash::Hash, rand::SeedableRng}; use digest::crypto_common::rand_core::CryptoRngCore; use ethereum_types::{U256, U512}; @@ -14,6 +10,11 @@ use jf_primitives::{ }; use serde::{Deserialize, Serialize}; +use crate::{ + config::STAKE_TABLE_CAPACITY, + utils::{u256_to_field, ToFields}, +}; + pub mod config; /// a snapshot of the stake table @@ -381,13 +382,17 @@ where #[cfg(test)] mod tests { - use super::config::{FieldType as F, QCVerKey, StateVerKey}; - use super::StakeTable; use ark_std::{rand::SeedableRng, vec::Vec}; use ethereum_types::U256; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; - use jf_primitives::signatures::bls_over_bn254::BLSOverBN254CurveSignatureScheme; - use jf_primitives::signatures::{SchnorrSignatureScheme, SignatureScheme}; + use jf_primitives::signatures::{ + bls_over_bn254::BLSOverBN254CurveSignatureScheme, SchnorrSignatureScheme, SignatureScheme, + }; + + use super::{ + config::{FieldType as F, QCVerKey, StateVerKey}, + StakeTable, + }; #[test] fn crypto_test_stake_table() -> Result<(), StakeTableError> { diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index ad81af094f..c4341090f1 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -1,13 +1,13 @@ //! Config file for stake table -use crate::utils::ToFields; use ark_ff::PrimeField; use ark_std::vec; -use jf_utils::to_bytes; - /// Schnorr verification key as auxiliary information pub use hotshot_types::light_client::StateVerKey; /// BLS verification key as indexing key pub use jf_primitives::signatures::bls_over_bn254::VerKey as QCVerKey; +use jf_utils::to_bytes; + +use crate::utils::ToFields; /// Type for commitment pub type FieldType = ark_ed_on_bn254::Fq; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index a7469e7ade..ee660ea325 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -12,29 +12,28 @@ pub mod types; pub mod tasks; -use crate::{ - tasks::{ - add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, - add_transaction_task, add_upgrade_task, add_view_sync_task, - }, - traits::NodeImplementation, - types::{Event, SystemContextHandle}, +use std::{ + collections::{BTreeMap, HashMap}, + marker::PhantomData, + num::NonZeroUsize, + sync::Arc, + time::Duration, }; + use async_broadcast::{broadcast, InactiveReceiver, Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; use committable::Committable; use futures::join; -use hotshot_task_impls::events::HotShotEvent; -use hotshot_task_impls::helpers::broadcast_event; -use hotshot_task_impls::network; -use hotshot_types::constants::{BASE_VERSION, EVENT_CHANNEL_SIZE, STATIC_VER_0_1}; -use vbs::version::Version; - use hotshot_task::task::TaskRegistry; +use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event, network}; +// Internal +/// Reexport error type +pub use hotshot_types::error::HotShotError; use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, View, ViewInner}, + constants::{BASE_VERSION, EVENT_CHANNEL_SIZE, STATIC_VER_0_1}, data::Leaf, event::EventType, message::{DataMessage, Message, MessageKind}, @@ -50,23 +49,22 @@ use hotshot_types::{ }, HotShotConfig, }; -use std::{ - collections::{BTreeMap, HashMap}, - marker::PhantomData, - num::NonZeroUsize, - sync::Arc, - time::Duration, -}; -use tasks::{add_request_network_task, add_response_task, add_vid_task}; -use tracing::{debug, instrument, trace}; - // -- Rexports // External /// Reexport rand crate pub use rand; -// Internal -/// Reexport error type -pub use hotshot_types::error::HotShotError; +use tasks::{add_request_network_task, add_response_task, add_vid_task}; +use tracing::{debug, instrument, trace}; +use vbs::version::Version; + +use crate::{ + tasks::{ + add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, + add_transaction_task, add_upgrade_task, add_view_sync_task, + }, + traits::NodeImplementation, + types::{Event, SystemContextHandle}, +}; /// Length, in bytes, of a 512 bit hash pub const H_512: usize = 64; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 3d16e8c98b..4c86f2f411 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -3,13 +3,10 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; -use crate::tasks::task_state::CreateTaskState; -use crate::ConsensusApi; +use std::{sync::Arc, time::Duration}; -use crate::types::SystemContextHandle; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; - use async_lock::RwLock; use hotshot_task::task::{Task, TaskRegistry}; use hotshot_task_impls::{ @@ -25,22 +22,20 @@ use hotshot_task_impls::{ vid::VIDTaskState, view_sync::ViewSyncTaskState, }; -use hotshot_types::constants::VERSION_0_1; -use hotshot_types::{ - constants::Version01, - message::Message, - traits::{election::Membership, network::ConnectedNetwork, storage::Storage}, -}; use hotshot_types::{ - message::Messages, + constants::{Version01, VERSION_0_1}, + message::{Message, Messages}, traits::{ - network::ConsensusIntentEvent, + election::Membership, + network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + storage::Storage, }, }; -use std::{sync::Arc, time::Duration}; use tracing::error; +use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; + /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 72a2d16d0a..218f2feae0 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -1,10 +1,12 @@ -use crate::types::SystemContextHandle; +use std::{ + collections::{BTreeMap, HashMap}, + marker::PhantomData, +}; use async_trait::async_trait; -use hotshot_task_impls::builder::BuilderClient; -use hotshot_task_impls::quorum_proposal::QuorumProposalTaskState; use hotshot_task_impls::{ - consensus::ConsensusTaskState, da::DATaskState, quorum_vote::QuorumVoteTaskState, + builder::BuilderClient, consensus::ConsensusTaskState, da::DATaskState, + quorum_proposal::QuorumProposalTaskState, quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VIDTaskState, view_sync::ViewSyncTaskState, }; @@ -12,10 +14,10 @@ use hotshot_types::traits::{ consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }; -use std::collections::BTreeMap; -use std::{collections::HashMap, marker::PhantomData}; use vbs::version::StaticVersionType; +use crate::types::SystemContextHandle; + /// Trait for creating task states. #[async_trait] pub trait CreateTaskState diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 437cd8019a..d2c01455c8 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,17 +1,20 @@ // use ark_bls12_381::Parameters as Param381; +use std::{marker::PhantomData, num::NonZeroU64}; + use ethereum_types::U256; -use hotshot_types::signature_key::BLSPubKey; -use hotshot_types::traits::{ - election::{ElectionConfig, Membership}, - node_implementation::NodeType, - signature_key::{SignatureKey, StakeTableEntryType}, +use hotshot_types::{ + signature_key::BLSPubKey, + traits::{ + election::{ElectionConfig, Membership}, + node_implementation::NodeType, + signature_key::{SignatureKey, StakeTableEntryType}, + }, + PeerConfig, }; -use hotshot_types::PeerConfig; #[cfg(feature = "randomized-leader-election")] use rand::{rngs::StdRng, Rng}; #[allow(deprecated)] use serde::{Deserialize, Serialize}; -use std::{marker::PhantomData, num::NonZeroU64}; use tracing::debug; /// Dummy implementation of [`Membership`] diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 16af13c81c..bd5b704646 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -1,59 +1,56 @@ //! Networking Implementation that has a primary and a fallback network. If the primary //! Errors we will use the backup to send or receive -use super::{push_cdn_network::PushCdnNetwork, NetworkError}; -use crate::traits::implementations::Libp2pNetwork; -use async_lock::RwLock; -use hotshot_types::constants::{ - COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, - COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, -}; - -use lru::LruCache; use std::{ - collections::BTreeSet, - hash::Hasher, + collections::{hash_map::DefaultHasher, BTreeMap, BTreeSet, HashMap}, + future::Future, + hash::{Hash, Hasher}, num::NonZeroUsize, - sync::atomic::{AtomicU64, Ordering}, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, }; -use tracing::warn; +use async_compatibility_layer::{ + art::{async_sleep, async_spawn}, + channel::UnboundedSendError, +}; +use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use async_trait::async_trait; - -use futures::{channel::mpsc, join, select, FutureExt}; - -use async_compatibility_layer::channel::UnboundedSendError; +use futures::{channel::mpsc, future::join_all, join, select, FutureExt}; +use hotshot_task_impls::helpers::cancel_task; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{ AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, }; use hotshot_types::{ boxed_sync, + constants::{ + COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, + COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, + }, data::ViewNumber, - message::{Message, SequencingMessage}, + message::{GeneralConsensusMessage, Message, MessageKind, SequencingMessage}, traits::{ - network::{ConnectedNetwork, ConsensusIntentEvent, ResponseChannel, ResponseMessage}, - node_implementation::NodeType, + network::{ + ConnectedNetwork, ConsensusIntentEvent, ResponseChannel, ResponseMessage, ViewMessage, + }, + node_implementation::{ConsensusTime, NodeType}, }, BoxSyncFuture, }; -use std::collections::{BTreeMap, HashMap}; -use std::future::Future; -use std::{collections::hash_map::DefaultHasher, sync::Arc}; - -use async_compatibility_layer::art::{async_sleep, async_spawn}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; -use futures::future::join_all; -use hotshot_task_impls::helpers::cancel_task; -use hotshot_types::message::{GeneralConsensusMessage, MessageKind}; -use hotshot_types::traits::network::ViewMessage; -use hotshot_types::traits::node_implementation::ConsensusTime; -use std::hash::Hash; -use std::time::Duration; +use lru::LruCache; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; +use tracing::warn; use vbs::version::StaticVersionType; +use super::{push_cdn_network::PushCdnNetwork, NetworkError}; +use crate::traits::implementations::Libp2pNetwork; + /// Helper function to calculate a hash of a type that implements Hash pub fn calculate_hash_of(t: &T) -> u64 { let mut s = DefaultHasher::new(); diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 6c3b82fcc8..47f85669ea 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -1,7 +1,20 @@ //! Libp2p based/production networking implementation //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network -use super::NetworkingMetricsValue; +#[cfg(feature = "hotshot-testing")] +use std::str::FromStr; +use std::{ + collections::{BTreeSet, HashSet}, + fmt::Debug, + net::SocketAddr, + num::NonZeroUsize, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; + use anyhow::anyhow; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, @@ -39,40 +52,27 @@ use libp2p_identity::{ ed25519::{self, SecretKey}, Keypair, PeerId, }; -use libp2p_networking::network::NetworkNodeConfigBuilder; -use libp2p_networking::{network::MeshParams, reexport::Multiaddr}; use libp2p_networking::{ network::{ behaviours::request_response::{Request, Response}, - spawn_network_node, + spawn_network_node, MeshParams, NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, - NetworkNodeConfig, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, - NetworkNodeType, + NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, + NetworkNodeReceiver, NetworkNodeType, }, - reexport::ResponseChannel, + reexport::{Multiaddr, ResponseChannel}, }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; use snafu::ResultExt; -use std::num::NonZeroUsize; -#[cfg(feature = "hotshot-testing")] -use std::str::FromStr; -use std::{ - collections::BTreeSet, - fmt::Debug, - sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, - }, - time::Duration, -}; -use std::{collections::HashSet, net::SocketAddr}; use tracing::{debug, error, info, instrument, warn}; use vbs::{ version::{StaticVersionType, Version}, BinarySerializer, Serializer, }; +use super::NetworkingMetricsValue; + /// convenience alias for the type for bootstrap addresses /// concurrency primitives are needed for having tests pub type BootstrapAddrs = Arc>>; diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index de6df4be35..06fd7a318c 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -3,14 +3,22 @@ //! This module provides an in-memory only simulation of an actual network, useful for unit and //! integration tests. -use super::{FailedToSerializeSnafu, NetworkError, NetworkReliability, NetworkingMetricsValue}; +use core::time::Duration; +use std::{ + collections::BTreeSet, + fmt::Debug, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; + use async_compatibility_layer::{ art::async_spawn, channel::{bounded, BoundedStream, Receiver, SendError, Sender}, }; use async_lock::{Mutex, RwLock}; use async_trait::async_trait; -use core::time::Duration; use dashmap::DashMap; use futures::StreamExt; use hotshot_types::{ @@ -26,17 +34,11 @@ use hotshot_types::{ }; use rand::Rng; use snafu::ResultExt; -use std::{ - collections::BTreeSet, - fmt::Debug, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, -}; use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; +use super::{FailedToSerializeSnafu, NetworkError, NetworkReliability, NetworkingMetricsValue}; + /// Shared state for in-memory mock networking. /// /// This type is responsible for keeping track of the channels to each [`MemoryNetwork`], and is diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 315e334bc7..ba96c081f1 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -1,12 +1,19 @@ -use super::NetworkError; +#[cfg(feature = "hotshot-testing")] +use std::sync::atomic::{AtomicBool, Ordering}; +use std::{collections::BTreeSet, marker::PhantomData}; +#[cfg(feature = "hotshot-testing")] +use std::{path::Path, sync::Arc, time::Duration}; + #[cfg(feature = "hotshot-testing")] use async_compatibility_layer::art::async_spawn; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; use bincode::config::Options; -use cdn_broker::reexports::connection::protocols::Tcp; -use cdn_broker::reexports::def::RunDef; -use cdn_broker::reexports::discovery::{Embedded, Redis}; +use cdn_broker::reexports::{ + connection::protocols::Tcp, + def::RunDef, + discovery::{Embedded, Redis}, +}; #[cfg(feature = "hotshot-testing")] use cdn_broker::{Broker, Config, ConfigBuilder as BrokerConfigBuilder}; pub use cdn_client::reexports::crypto::signature::KeyPair; @@ -39,18 +46,14 @@ use hotshot_types::{ }; #[cfg(feature = "hotshot-testing")] use rand::{rngs::StdRng, RngCore, SeedableRng}; -use std::collections::BTreeSet; -use std::marker::PhantomData; -#[cfg(feature = "hotshot-testing")] -use std::sync::atomic::{AtomicBool, Ordering}; -#[cfg(feature = "hotshot-testing")] -use std::{path::Path, sync::Arc, time::Duration}; use tracing::{error, warn}; use vbs::{ version::{StaticVersionType, Version}, BinarySerializer, Serializer, }; +use super::NetworkError; + /// A wrapped `SignatureKey`. We need to implement the Push CDN's `SignatureScheme` /// trait in order to sign and verify messages to/from the CDN. #[derive(Clone, Eq, PartialEq)] diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index ed4d7a68c2..8db761c836 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -3,24 +3,33 @@ //! To run the web server, see the `./web_server/` folder in this repo. //! -use async_compatibility_layer::channel::{unbounded, UnboundedReceiver, UnboundedSender}; +use std::{ + collections::{btree_map::Entry, hash_map::DefaultHasher, BTreeMap, BTreeSet}, + hash::{Hash, Hasher}, + num::NonZeroUsize, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, - channel::{oneshot, OneShotSender}, + channel::{oneshot, unbounded, OneShotSender, UnboundedReceiver, UnboundedSender}, }; use async_lock::RwLock; use async_trait::async_trait; use derive_more::{Deref, DerefMut}; -use hotshot_types::traits::network::AsyncGenerator; use hotshot_types::{ boxed_sync, constants::{Version01, VERSION_0_1}, message::{Message, MessagePurpose}, traits::{ network::{ - ConnectedNetwork, ConsensusIntentEvent, NetworkError, NetworkMsg, NetworkReliability, - TestableNetworkingImplementation, ViewMessage, WebServerNetworkError, + AsyncGenerator, ConnectedNetwork, ConsensusIntentEvent, NetworkError, NetworkMsg, + NetworkReliability, TestableNetworkingImplementation, ViewMessage, + WebServerNetworkError, }, node_implementation::NodeType, signature_key::SignatureKey, @@ -30,20 +39,7 @@ use hotshot_types::{ use hotshot_web_server::{self, config}; use lru::LruCache; use serde::{Deserialize, Serialize}; -use std::collections::hash_map::DefaultHasher; -use std::collections::BTreeMap; -use std::hash::{Hash, Hasher}; -use std::num::NonZeroUsize; -use std::{ - collections::{btree_map::Entry, BTreeSet}, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::Duration, -}; -use surf_disco::error::ClientError; -use surf_disco::Url; +use surf_disco::{error::ClientError, Url}; use tracing::{debug, error, info, warn}; use vbs::{ version::{StaticVersionType, Version}, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 13dba0e411..5cfe8a1c52 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -1,20 +1,22 @@ //! Provides an event-streaming handle for a [`SystemContext`] running in the background -use crate::{traits::NodeImplementation, types::Event, SystemContext}; -use async_broadcast::{InactiveReceiver, Receiver, Sender}; +use std::sync::Arc; +use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; use futures::Stream; - -use hotshot_task_impls::events::HotShotEvent; -use hotshot_types::traits::election::Membership; - use hotshot_task::task::TaskRegistry; -use hotshot_types::{boxed_sync, BoxSyncFuture}; +use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ - consensus::Consensus, data::Leaf, error::HotShotError, traits::node_implementation::NodeType, + boxed_sync, + consensus::Consensus, + data::Leaf, + error::HotShotError, + traits::{election::Membership, node_implementation::NodeType}, + BoxSyncFuture, }; -use std::sync::Arc; + +use crate::{traits::NodeImplementation, types::Event, SystemContext}; /// Event streaming handle for a [`SystemContext`] instance running in the background /// diff --git a/libp2p-networking/src/lib.rs b/libp2p-networking/src/lib.rs index b536cb49f3..6af2522019 100644 --- a/libp2p-networking/src/lib.rs +++ b/libp2p-networking/src/lib.rs @@ -5,7 +5,6 @@ pub mod network; /// symbols needed to implement a networking instance over libp2p-netorking pub mod reexport { - pub use libp2p::request_response::ResponseChannel; - pub use libp2p::Multiaddr; + pub use libp2p::{request_response::ResponseChannel, Multiaddr}; pub use libp2p_identity::PeerId; } diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index c1885b2b9c..c435696e86 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -8,8 +8,7 @@ use std::{ /// a local caching layer for the DHT key value pairs use futures::channel::oneshot::Sender; use lazy_static::lazy_static; -use libp2p::kad::Event as KademliaEvent; -use libp2p::kad::{store::RecordStore, Behaviour as KademliaBehaviour}; +use libp2p::kad::{store::RecordStore, Behaviour as KademliaBehaviour, Event as KademliaEvent}; use libp2p::{ kad::{ /* handler::KademliaHandlerIn, */ store::MemoryStore, BootstrapError, BootstrapOk, diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index df4502c578..0d84faff3c 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -1,13 +1,15 @@ -use async_compatibility_layer::art::{async_sleep, async_spawn}; -use async_compatibility_layer::channel::UnboundedSender; +use std::collections::HashMap; + +use async_compatibility_layer::{ + art::{async_sleep, async_spawn}, + channel::UnboundedSender, +}; use libp2p::request_response::{Event, Message, OutboundRequestId, ResponseChannel}; use libp2p_identity::PeerId; -use std::collections::HashMap; use tracing::{error, info}; -use crate::network::{ClientRequest, NetworkEvent}; - use super::exponential_backoff::ExponentialBackoff; +use crate::network::{ClientRequest, NetworkEvent}; /// Request to direct message a peert #[derive(Debug)] diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 146b666187..b63c0d4a88 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -1,3 +1,5 @@ +use std::num::NonZeroUsize; + use futures::channel::oneshot::Sender; use libp2p::{ autonat, @@ -7,7 +9,7 @@ use libp2p::{ Multiaddr, }; use libp2p_identity::PeerId; -use std::num::NonZeroUsize; +use libp2p_swarm_derive::NetworkBehaviour; use tracing::{debug, error}; use super::{ @@ -19,8 +21,6 @@ use super::{ NetworkEventInternal, }; -use libp2p_swarm_derive::NetworkBehaviour; - /// Overarching network behaviour performing: /// - network topology discovoery /// - direct messaging diff --git a/libp2p-networking/src/network/error.rs b/libp2p-networking/src/network/error.rs index dd83e06584..bc4dd5f2ca 100644 --- a/libp2p-networking/src/network/error.rs +++ b/libp2p-networking/src/network/error.rs @@ -1,5 +1,7 @@ //! Contains the [`NetworkError`] snafu types +use std::fmt::{Debug, Display}; + use futures::channel::oneshot::Canceled; use libp2p::{ gossipsub::PublishError, @@ -8,7 +10,6 @@ use libp2p::{ TransportError, }; use snafu::Snafu; -use std::fmt::{Debug, Display}; /// wrapper type for errors generated by the `Network` #[derive(Debug, Snafu)] diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 45f02ad2c5..fd9f8147f1 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -7,21 +7,13 @@ pub mod error; /// functionality of a libp2p network node mod node; -pub use self::{ - def::NetworkDef, - error::NetworkError, - node::{ - network_node_handle_error, spawn_network_node, MeshParams, NetworkNode, NetworkNodeConfig, - NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, NetworkNodeHandle, - NetworkNodeHandleError, NetworkNodeReceiver, - }, -}; +use std::{collections::HashSet, fmt::Debug, str::FromStr}; -use self::behaviours::{ - dht::DHTEvent, - request_response::{Request, Response}, -}; use futures::channel::oneshot::{self, Sender}; +#[cfg(async_executor_impl = "async-std")] +use libp2p::dns::async_std::Transport as DnsTransport; +#[cfg(async_executor_impl = "tokio")] +use libp2p::dns::tokio::Transport as DnsTransport; use libp2p::{ build_multiaddr, core::{muxing::StreamMuxerBox, transport::Boxed}, @@ -33,18 +25,26 @@ use libp2p::{ Multiaddr, Transport, }; use libp2p_identity::PeerId; -use serde::{Deserialize, Serialize}; -use std::{collections::HashSet, fmt::Debug, str::FromStr}; -use tracing::instrument; - -#[cfg(async_executor_impl = "async-std")] -use libp2p::dns::async_std::Transport as DnsTransport; -#[cfg(async_executor_impl = "tokio")] -use libp2p::dns::tokio::Transport as DnsTransport; #[cfg(async_executor_impl = "async-std")] use quic::async_std::Transport as QuicTransport; #[cfg(async_executor_impl = "tokio")] use quic::tokio::Transport as QuicTransport; +use serde::{Deserialize, Serialize}; +use tracing::instrument; + +use self::behaviours::{ + dht::DHTEvent, + request_response::{Request, Response}, +}; +pub use self::{ + def::NetworkDef, + error::NetworkError, + node::{ + network_node_handle_error, spawn_network_node, MeshParams, NetworkNode, NetworkNodeConfig, + NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, NetworkNodeHandle, + NetworkNodeHandleError, NetworkNodeReceiver, + }, +}; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index eea4bf71d7..9d16a1bc8e 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -5,36 +5,22 @@ mod config; /// allows for control over the libp2p network mod handle; -pub use self::{ - config::{ - MeshParams, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, - }, - handle::{ - network_node_handle_error, spawn_network_node, NetworkNodeHandle, NetworkNodeHandleError, - NetworkNodeReceiver, - }, -}; - -use super::{ - error::{GossipsubBuildSnafu, GossipsubConfigSnafu, NetworkError, TransportSnafu}, - gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkEvent, NetworkEventInternal, - NetworkNodeType, +use std::{ + collections::{HashMap, HashSet}, + iter, + num::{NonZeroU32, NonZeroUsize}, + time::Duration, }; -use crate::network::behaviours::{ - dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, - direct_message::{DMBehaviour, DMRequest}, - exponential_backoff::ExponentialBackoff, - request_response::{Request, RequestResponseState, Response}, -}; use async_compatibility_layer::{ art::async_spawn, channel::{unbounded, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, }; use futures::{select, FutureExt, StreamExt}; use hotshot_types::constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; -use libp2p::{autonat, core::transport::ListenerId, StreamProtocol}; use libp2p::{ + autonat, + core::transport::ListenerId, gossipsub::{ Behaviour as Gossipsub, ConfigBuilder as GossipsubConfigBuilder, Event as GossipEvent, Message as GossipsubMessage, MessageAuthenticity, MessageId, Topic, ValidationMode, @@ -49,19 +35,34 @@ use libp2p::{ Behaviour as RequestResponse, Config as RequestResponseConfig, ProtocolSupport, }, swarm::SwarmEvent, - Multiaddr, Swarm, SwarmBuilder, + Multiaddr, StreamProtocol, Swarm, SwarmBuilder, }; use libp2p_identity::PeerId; use rand::{prelude::SliceRandom, thread_rng}; use snafu::ResultExt; -use std::{ - collections::{HashMap, HashSet}, - iter, - num::{NonZeroU32, NonZeroUsize}, - time::Duration, -}; use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; +pub use self::{ + config::{ + MeshParams, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, + }, + handle::{ + network_node_handle_error, spawn_network_node, NetworkNodeHandle, NetworkNodeHandleError, + NetworkNodeReceiver, + }, +}; +use super::{ + error::{GossipsubBuildSnafu, GossipsubConfigSnafu, NetworkError, TransportSnafu}, + gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkEvent, NetworkEventInternal, + NetworkNodeType, +}; +use crate::network::behaviours::{ + dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, + direct_message::{DMBehaviour, DMRequest}, + exponential_backoff::ExponentialBackoff, + request_response::{Request, RequestResponseState, Response}, +}; + /// Maximum size of a message pub const MAX_GOSSIP_MSG_SIZE: usize = 2_000_000_000; diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 9e03632ae1..183b58d0ef 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -1,7 +1,9 @@ -use crate::network::NetworkNodeType; +use std::{collections::HashSet, num::NonZeroUsize, time::Duration}; + use libp2p::{identity::Keypair, Multiaddr}; use libp2p_identity::PeerId; -use std::{collections::HashSet, num::NonZeroUsize, time::Duration}; + +use crate::network::NetworkNodeType; /// replication factor for kademlia pub const DEFAULT_REPLICATION_FACTOR: Option = NonZeroUsize::new(20); diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 1e63f15540..c1537080ab 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -1,28 +1,29 @@ -use crate::network::{ - behaviours::request_response::{Request, Response}, - error::{CancelledRequestSnafu, DHTError}, - gen_multiaddr, ClientRequest, NetworkError, NetworkEvent, NetworkNode, NetworkNodeConfig, - NetworkNodeConfigBuilderError, +use std::{ + collections::HashSet, + fmt::Debug, + time::{Duration, Instant}, }; + use async_compatibility_layer::{ art::{async_sleep, async_timeout, future::to}, channel::{Receiver, SendError, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, }; use futures::channel::oneshot; - use hotshot_types::traits::network::NetworkError as HotshotNetworkError; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; -use std::{ - collections::HashSet, - fmt::Debug, - time::{Duration, Instant}, -}; use tracing::{debug, info, instrument}; use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; +use crate::network::{ + behaviours::request_response::{Request, Response}, + error::{CancelledRequestSnafu, DHTError}, + gen_multiaddr, ClientRequest, NetworkError, NetworkEvent, NetworkNode, NetworkNodeConfig, + NetworkNodeConfigBuilderError, +}; + /// A handle containing: /// - A reference to the state /// - Controls for the swarm diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index cda6438354..741cd20060 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -1,6 +1,14 @@ +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + num::NonZeroUsize, + str::FromStr, + sync::Arc, + time::Duration, +}; + use async_compatibility_layer::{ - art::async_sleep, - art::async_spawn, + art::{async_sleep, async_spawn}, async_primitives::subscribable_mutex::SubscribableMutex, channel::{bounded, RecvError}, logging::{setup_backtrace, setup_logging}, @@ -14,14 +22,6 @@ use libp2p_networking::network::{ NetworkNodeType, }; use snafu::{ResultExt, Snafu}; -use std::{ - collections::{HashMap, HashSet}, - fmt::Debug, - num::NonZeroUsize, - str::FromStr, - sync::Arc, - time::Duration, -}; use tracing::{info, instrument, warn}; #[derive(Clone, Debug)] diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 9d8a37dabc..a6787c0bfc 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -1,23 +1,24 @@ #![allow(clippy::panic)] mod common; -use crate::common::print_connections; +use std::{fmt::Debug, sync::Arc, time::Duration}; + use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::prelude::StreamExt; use common::{test_bed, HandleSnafu, HandleWithState, TestError}; use hotshot_types::constants::{Version01, STATIC_VER_0_1}; use libp2p_networking::network::{NetworkEvent, NetworkNodeHandleError}; use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; use snafu::ResultExt; -use std::{fmt::Debug, sync::Arc, time::Duration}; +#[cfg(async_executor_impl = "tokio")] +use tokio_stream::StreamExt; use tracing::{debug, error, info, instrument, warn}; use vbs::{BinarySerializer, Serializer}; -#[cfg(async_executor_impl = "async-std")] -use async_std::prelude::StreamExt; -#[cfg(async_executor_impl = "tokio")] -use tokio_stream::StreamExt; +use crate::common::print_connections; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 62cb8bee7c..9e03360037 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -3,9 +3,8 @@ use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; -use syn::parse::Result; use syn::{ - parse::{Parse, ParseStream}, + parse::{Parse, ParseStream, Result}, parse_macro_input, Expr, ExprArray, ExprPath, ExprTuple, Ident, LitBool, Token, }; diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 531a922f07..4a59968e53 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -1,10 +1,8 @@ use std::{net::SocketAddr, time::Duration}; -use crate::{config::NetworkConfig, OrchestratorVersion}; use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; - use hotshot_types::{ constants::Version01, traits::{election::ElectionConfig, signature_key::SignatureKey}, @@ -14,6 +12,8 @@ use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; use vbs::BinarySerializer; + +use crate::{config::NetworkConfig, OrchestratorVersion}; /// Holds the client connection to the orchestrator pub struct OrchestratorClient { /// the client diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 344ecd80d4..f27aad427a 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,11 +1,18 @@ +use std::{ + env, fs, + net::SocketAddr, + num::NonZeroUsize, + path::{Path, PathBuf}, + time::Duration, + vec, +}; + use hotshot_types::{ traits::{election::ElectionConfig, signature_key::SignatureKey}, ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, }; use libp2p::{Multiaddr, PeerId}; use serde_inline_default::serde_inline_default; -use std::{env, net::SocketAddr, num::NonZeroUsize, path::PathBuf, time::Duration, vec}; -use std::{fs, path::Path}; use surf_disco::Url; use thiserror::Error; use toml; diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 73dcdf3cd9..b11ee78ccb 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -5,29 +5,17 @@ pub mod client; /// Configuration for the orchestrator pub mod config; +use std::{collections::HashSet, fs::OpenOptions, io, io::ErrorKind}; + use async_lock::RwLock; use client::{BenchResults, BenchResultsDownloadConfig}; +use csv::Writer; +use futures::FutureExt; use hotshot_types::{ constants::Version01, traits::{election::ElectionConfig, signature_key::SignatureKey}, PeerConfig, }; -use std::fs::OpenOptions; -use std::{collections::HashSet, io, io::ErrorKind}; -use tide_disco::{Api, App, RequestError}; - -use surf_disco::Url; -use tide_disco::{ - api::ApiError, - error::ServerError, - method::{ReadState, WriteState}, -}; - -use futures::FutureExt; - -use crate::config::NetworkConfig; - -use csv::Writer; use libp2p::{ identity::{ ed25519::{Keypair as EdKeypair, SecretKey}, @@ -35,11 +23,20 @@ use libp2p::{ }, Multiaddr, PeerId, }; +use surf_disco::Url; +use tide_disco::{ + api::ApiError, + error::ServerError, + method::{ReadState, WriteState}, + Api, App, RequestError, +}; use vbs::{ version::{StaticVersion, StaticVersionType}, BinarySerializer, }; +use crate::config::NetworkConfig; + /// Orchestrator is not, strictly speaking, bound to the network; it can have its own versioning. /// Orchestrator Version (major) pub const ORCHESTRATOR_MAJOR_VERSION: u16 = 0; diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 674aa73ed6..0e4ca78686 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -1,6 +1,6 @@ -use async_compatibility_layer::art::async_sleep; use std::time::{Duration, Instant}; +use async_compatibility_layer::art::async_sleep; use hotshot_builder_api::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::{BuildError, Error as BuilderApiError}, diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index d1dcf88752..21e4195031 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,24 +1,25 @@ -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task, AnyhowTracing}, - vote_collection::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, +use core::time::Duration; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + marker::PhantomData, + sync::Arc, }; + use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; +use chrono::Utc; use committable::Committable; -use core::time::Duration; use futures::future::{join_all, FutureExt}; use hotshot_task::task::{Task, TaskState}; -use hotshot_types::data::null_block; -use hotshot_types::event::LeafInfo; use hotshot_types::{ consensus::{Consensus, View}, - data::{Leaf, QuorumProposal}, - event::{Event, EventType}, + constants::LOOK_AHEAD, + data::{null_block, Leaf, QuorumProposal, VidDisperseShare, ViewChangeEvidence}, + event::{Event, EventType, LeafInfo}, message::{GeneralConsensusMessage, Proposal}, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, @@ -37,20 +38,18 @@ use hotshot_types::{ vid::VidCommitment, vote::{Certificate, HasViewNumber}, }; -use hotshot_types::{constants::LOOK_AHEAD, data::ViewChangeEvidence}; -use vbs::version::Version; - -use crate::vote_collection::HandleVoteEvent; -use chrono::Utc; -use hotshot_types::data::VidDisperseShare; -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - marker::PhantomData, - sync::Arc, -}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; +use vbs::version::Version; + +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task, AnyhowTracing}, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; /// Alias for the block payload commitment and the associated metadata. pub struct CommitmentAndMetadata { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 4445bb1b9c..a43f452b89 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -1,13 +1,9 @@ -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, - vote_collection::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, -}; +use std::{marker::PhantomData, sync::Arc}; + use async_broadcast::Sender; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; - use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{Consensus, View}, @@ -17,6 +13,7 @@ use hotshot_types::{ simple_certificate::DACertificate, simple_vote::{DAData, DAVote}, traits::{ + block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, network::{ConnectedNetwork, ConsensusIntentEvent}, @@ -28,14 +25,18 @@ use hotshot_types::{ vote::HasViewNumber, }; use sha2::{Digest, Sha256}; - -use crate::vote_collection::HandleVoteEvent; -use hotshot_types::traits::block_contents::vid_commitment; -use std::{marker::PhantomData, sync::Arc}; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; use tracing::{debug, error, instrument, warn}; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; + /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 7d30b69703..1ef2684c51 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,9 +1,6 @@ -use crate::view_sync::ViewSyncPhase; - use either::Either; -use hotshot_types::data::VidDisperseShare; use hotshot_types::{ - data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse}, + data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse, VidDisperseShare}, message::Proposal, simple_certificate::{ DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, @@ -18,6 +15,8 @@ use hotshot_types::{ }; use vbs::version::Version; +use crate::view_sync::ViewSyncPhase; + /// Marker that the task completed #[derive(Eq, Hash, PartialEq, Debug, Clone)] pub struct HotShotTaskCompleted; diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 3712752bbe..d4da652ab1 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -1,10 +1,11 @@ -use crate::events::{HotShotEvent, HotShotTaskCompleted}; -use async_broadcast::broadcast; +use std::{collections::HashMap, sync::Arc, time::Duration}; +use async_broadcast::broadcast; use async_compatibility_layer::art::async_timeout; use hotshot_task::task::{Task, TaskRegistry, TaskState}; use hotshot_types::traits::node_implementation::NodeType; -use std::{collections::HashMap, sync::Arc, time::Duration}; + +use crate::events::{HotShotEvent, HotShotTaskCompleted}; /// The state for the test harness task. Keeps track of which events and how many we expect to get pub struct TestHarnessState { diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 490f6f5324..5509c0e713 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -1,18 +1,13 @@ -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, -}; +use std::{collections::HashMap, sync::Arc}; + use async_broadcast::Sender; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -use hotshot_types::{event::HotShotAction, traits::storage::Storage}; -use std::collections::HashMap; -use std::sync::Arc; - use hotshot_task::task::{Task, TaskState}; -use hotshot_types::constants::{BASE_VERSION, STATIC_VER_0_1}; use hotshot_types::{ + constants::{BASE_VERSION, STATIC_VER_0_1}, data::{VidDisperse, VidDisperseShare}, + event::HotShotAction, message::{ CommitteeConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, SequencingMessage, @@ -21,13 +16,18 @@ use hotshot_types::{ election::Membership, network::{ConnectedNetwork, TransmitType, ViewMessage}, node_implementation::{ConsensusTime, NodeType}, + storage::Storage, }, vote::{HasViewNumber, Vote}, }; -use tracing::instrument; -use tracing::{debug, error, warn}; +use tracing::{debug, error, instrument, warn}; use vbs::version::Version; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; + /// quorum filter pub fn quorum_filter(event: &Arc>) -> bool { !matches!( diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 0b56200e04..e5dbe47d6a 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -2,6 +2,8 @@ use std::{collections::HashMap, sync::Arc}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use either::Either; use hotshot_task::{ dependency::{AndDependency, EventDependency, OrDependency}, @@ -21,9 +23,6 @@ use hotshot_types::{ }, vote::HasViewNumber, }; - -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, instrument}; diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 68ee36a4a6..1bfa6e3ce6 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -1,7 +1,5 @@ -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; +use std::{collections::HashMap, sync::Arc}; + use async_broadcast::{Receiver, Sender}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] @@ -30,12 +28,15 @@ use hotshot_types::{ utils::{Terminator, View, ViewInner}, vote::{Certificate, HasViewNumber}, }; -use std::collections::HashMap; -use std::sync::Arc; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, instrument, warn}; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; + /// Vote dependency types. #[derive(Debug, PartialEq)] enum VoteDependency { diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index bbbed4d1c1..5f61dc82b2 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -1,9 +1,5 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, -}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_lock::RwLock; @@ -24,6 +20,11 @@ use sha2::{Digest, Sha256}; use tracing::{debug, error, info, instrument, warn}; use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; + /// Amount of time to try for a request before timing out. const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 6858db657f..dacd731388 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -1,6 +1,5 @@ use std::sync::Arc; -use crate::{events::HotShotEvent, helpers::calculate_vid_disperse}; use async_broadcast::Receiver; use async_compatibility_layer::art::async_spawn; use async_lock::{RwLock, RwLockUpgradableReadGuard}; @@ -24,6 +23,8 @@ use sha2::{Digest, Sha256}; use tokio::task::JoinHandle; use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; +use crate::{events::HotShotEvent, helpers::calculate_vid_disperse}; + /// Type alias for consensus state wrapped in a lock. type LockedConsensusState = Arc>>; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index bbfecee84b..9ae88cb95f 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -1,12 +1,11 @@ -use crate::{ - builder::BuilderClient, - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, +use std::{ + sync::Arc, + time::{Duration, Instant}, }; + use async_broadcast::Sender; use async_compatibility_layer::art::async_sleep; use async_lock::RwLock; - use hotshot_builder_api::block_info::{AvailableBlockData, AvailableBlockHeaderInput}; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ @@ -21,13 +20,15 @@ use hotshot_types::{ BlockPayload, }, }; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; use tracing::{debug, error, instrument}; use vbs::version::StaticVersionType; +use crate::{ + builder::BuilderClient, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; + /// Tracks state of a Transaction task pub struct TransactionTaskState< TYPES: NodeType, diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 8ca04a955c..6986bbcd86 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -1,11 +1,7 @@ -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, - vote_collection::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, -}; +use std::sync::Arc; + use async_broadcast::Sender; use async_lock::RwLock; - use hotshot_task::task::TaskState; use hotshot_types::{ event::{Event, EventType}, @@ -19,11 +15,16 @@ use hotshot_types::{ }, vote::HasViewNumber, }; - -use crate::vote_collection::HandleVoteEvent; -use std::sync::Arc; use tracing::{debug, error, info, instrument, warn}; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; + /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; @@ -208,9 +209,9 @@ impl, A: ConsensusApi + #[cfg(feature = "example-upgrade")] { - use committable::Committable; use std::marker::PhantomData; + use committable::Committable; use hotshot_types::{ data::UpgradeProposal, message::Proposal, traits::node_implementation::ConsensusTime, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 5bdb9a7808..0fb77cecf6 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -1,8 +1,7 @@ -use crate::events::{HotShotEvent, HotShotTaskCompleted}; -use crate::helpers::{broadcast_event, calculate_vid_disperse}; +use std::{marker::PhantomData, sync::Arc}; + use async_broadcast::Sender; use async_lock::RwLock; - use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, @@ -15,11 +14,13 @@ use hotshot_types::{ signature_key::SignatureKey, }, }; - -use std::marker::PhantomData; -use std::sync::Arc; use tracing::{debug, error, instrument, warn}; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, calculate_vid_disperse}, +}; + /// Tracks state of a VID task pub struct VIDTaskState< TYPES: NodeType, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 942fbb9fea..ade64a8534 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1,46 +1,46 @@ #![allow(clippy::module_name_repetitions)] -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, - }, +use std::{ + collections::{BTreeMap, HashMap}, + fmt::Debug, + sync::Arc, + time::Duration, }; + use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ + message::GeneralConsensusMessage, simple_certificate::{ ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, - simple_vote::ViewSyncFinalizeData, - traits::signature_key::SignatureKey, -}; -use hotshot_types::{ simple_vote::{ - ViewSyncCommitData, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitData, - ViewSyncPreCommitVote, + ViewSyncCommitData, ViewSyncCommitVote, ViewSyncFinalizeData, ViewSyncFinalizeVote, + ViewSyncPreCommitData, ViewSyncPreCommitVote, }, - traits::network::ConsensusIntentEvent, - vote::{Certificate, HasViewNumber, Vote}, -}; - -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; -use hotshot_task::task::{Task, TaskState}; -use hotshot_types::{ - message::GeneralConsensusMessage, traits::{ consensus_api::ConsensusApi, election::Membership, - network::ConnectedNetwork, + network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + signature_key::SignatureKey, }, + vote::{Certificate, HasViewNumber, Vote}, }; -use std::{collections::BTreeMap, collections::HashMap, fmt::Debug, sync::Arc, time::Duration}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; + +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; #[derive(PartialEq, PartialOrd, Clone, Debug, Eq, Hash)] /// Phases of view sync pub enum ViewSyncPhase { diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 60c0401bd6..6b90f9f14e 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -1,13 +1,8 @@ use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, -}; use async_broadcast::Sender; use async_trait::async_trait; use either::Either::{self, Left, Right}; - use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ simple_certificate::{ @@ -23,6 +18,11 @@ use hotshot_types::{ }; use tracing::{debug, error}; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; + /// Task state for collecting votes of one type and emitting a certificate pub struct VoteCollectionTaskState< TYPES: NodeType, diff --git a/task/src/dependency.rs b/task/src/dependency.rs index ecbe25612f..7ff6f653f2 100644 --- a/task/src/dependency.rs +++ b/task/src/dependency.rs @@ -1,10 +1,12 @@ -use async_broadcast::{Receiver, RecvError}; -use futures::future::BoxFuture; -use futures::stream::FuturesUnordered; -use futures::stream::StreamExt; -use futures::FutureExt; use std::future::Future; +use async_broadcast::{Receiver, RecvError}; +use futures::{ + future::BoxFuture, + stream::{FuturesUnordered, StreamExt}, + FutureExt, +}; + /// Type which describes the idea of waiting for a dependency to complete pub trait Dependency { /// Complete will wait until it gets some value `T` then return the value @@ -175,9 +177,10 @@ impl Dependency for EventDependency { #[cfg(test)] mod tests { - use super::{AndDependency, Dependency, EventDependency, OrDependency}; use async_broadcast::{broadcast, Receiver}; + use super::{AndDependency, Dependency, EventDependency, OrDependency}; + fn eq_dep(rx: Receiver, val: usize) -> EventDependency { EventDependency { event_rx: rx, diff --git a/task/src/dependency_task.rs b/task/src/dependency_task.rs index e5ff1e0179..281e858a06 100644 --- a/task/src/dependency_task.rs +++ b/task/src/dependency_task.rs @@ -1,10 +1,9 @@ #[cfg(async_executor_impl = "async-std")] use async_std::task::{spawn, JoinHandle}; +use futures::Future; #[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn, JoinHandle}; -use futures::Future; - use crate::dependency::Dependency; /// Defines a type that can handle the result of a dependency @@ -52,10 +51,9 @@ mod test { use std::time::Duration; use async_broadcast::{broadcast, Receiver, Sender}; - use futures::{stream::FuturesOrdered, StreamExt}; - #[cfg(async_executor_impl = "async-std")] use async_std::task::sleep; + use futures::{stream::FuturesOrdered, StreamExt}; #[cfg(async_executor_impl = "tokio")] use tokio::time::sleep; diff --git a/task/src/task.rs b/task/src/task.rs index 4cd446802d..f1e1f5afa0 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -1,5 +1,4 @@ -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use async_broadcast::{Receiver, SendError, Sender}; use async_compatibility_layer::art::async_timeout; @@ -8,14 +7,11 @@ use async_std::{ sync::RwLock, task::{spawn, JoinHandle}, }; -use futures::{future::select_all, Future}; - #[cfg(async_executor_impl = "async-std")] use futures::future::join_all; - #[cfg(async_executor_impl = "tokio")] use futures::future::try_join_all; - +use futures::{future::select_all, Future}; #[cfg(async_executor_impl = "tokio")] use tokio::{ sync::RwLock, @@ -335,14 +331,16 @@ impl TaskRegistry { #[cfg(test)] mod tests { - use super::*; + use std::{collections::HashSet, time::Duration}; + use async_broadcast::broadcast; #[cfg(async_executor_impl = "async-std")] use async_std::task::sleep; - use std::{collections::HashSet, time::Duration}; #[cfg(async_executor_impl = "tokio")] use tokio::time::sleep; + use super::*; + #[derive(Default)] pub struct DummyHandle { val: usize, diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index e73af5a116..dd524bb580 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -21,11 +21,13 @@ use hotshot_builder_api::{ builder::{BuildError, Error, Options}, data_source::BuilderDataSource, }; -use hotshot_types::traits::block_contents::BlockHeader; -use hotshot_types::traits::block_contents::Transaction; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, - traits::{block_contents::vid_commitment, election::Membership, node_implementation::NodeType}, + traits::{ + block_contents::{vid_commitment, BlockHeader, Transaction}, + election::Membership, + node_implementation::NodeType, + }, utils::BuilderCommitment, vid::VidCommitment, }; diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index 94efb83b3c..c0e5d77e67 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -1,19 +1,18 @@ -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use std::time::Duration; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_spawn, async_timeout}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use hotshot::traits::TestableNodeImplementation; use hotshot_task_impls::helpers::broadcast_event; use hotshot_types::traits::node_implementation::NodeType; use snafu::Snafu; - -use crate::test_runner::{HotShotTaskCompleted, Node}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; use super::GlobalTestEvent; +use crate::test_runner::{HotShotTaskCompleted, Node}; /// the idea here is to run as long as we want diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index af77f42c99..9a6f637520 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -1,5 +1,9 @@ -use hotshot::{traits::TestableNodeImplementation, HotShotError}; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + sync::Arc, +}; +use hotshot::{traits::TestableNodeImplementation, HotShotError}; use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::{ data::Leaf, @@ -10,10 +14,6 @@ use hotshot_types::{ vid::VidCommitment, }; use snafu::Snafu; -use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, - sync::Arc, -}; use tracing::error; use crate::test_runner::{HotShotTaskCompleted, Node}; diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs index b9ad705144..46dec6a558 100644 --- a/testing/src/predicates.rs +++ b/testing/src/predicates.rs @@ -1,14 +1,15 @@ -use hotshot_task_impls::{ - consensus::ConsensusTaskState, events::HotShotEvent, events::HotShotEvent::*, -}; -use hotshot_types::data::null_block; -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; -use std::collections::HashSet; -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; use hotshot::types::SystemContextHandle; - use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_task_impls::{ + consensus::ConsensusTaskState, + events::{HotShotEvent, HotShotEvent::*}, +}; +use hotshot_types::{ + data::null_block, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; #[derive(Eq, PartialEq, Copy, Clone, Debug)] pub enum PredicateResult { diff --git a/testing/src/script.rs b/testing/src/script.rs index 8db5f14ba1..d9fb02383e 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -1,10 +1,12 @@ -use crate::predicates::{Predicate, PredicateResult}; +use std::{sync::Arc, time::Duration}; + use async_broadcast::broadcast; use async_compatibility_layer::art::async_timeout; use hotshot_task::task::{Task, TaskRegistry, TaskState}; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::NodeType; -use std::{sync::Arc, time::Duration}; + +use crate::predicates::{Predicate, PredicateResult}; pub const RECV_TIMEOUT: Duration = Duration::from_millis(250); diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index aef4583a2c..cfd99fed50 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -1,26 +1,24 @@ -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; -use crate::test_runner::HotShotTaskCompleted; -use crate::test_runner::{LateStartNode, Node, TestRunner}; use either::{Left, Right}; -use hotshot::types::EventType; -use hotshot::{traits::TestableNodeImplementation, HotShotInitializer}; -use hotshot_example_types::state_types::TestInstanceState; -use hotshot_example_types::storage_types::TestStorage; +use hotshot::{traits::TestableNodeImplementation, types::EventType, HotShotInitializer}; +use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; use hotshot_task::task::{Task, TaskState, TestTaskState}; -use hotshot_types::simple_certificate::QuorumCertificate; -use hotshot_types::{data::Leaf, ValidatorConfig}; use hotshot_types::{ + data::Leaf, event::Event, message::Message, + simple_certificate::QuorumCertificate, traits::{ network::ConnectedNetwork, node_implementation::{NodeImplementation, NodeType}, }, vote::HasViewNumber, + ValidatorConfig, }; use snafu::Snafu; -use std::collections::BTreeMap; + +use crate::test_runner::{HotShotTaskCompleted, LateStartNode, Node, TestRunner}; /// convience type for state and block pub type StateAndBlock = (Vec, Vec); diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 287d51b499..36e2bc4043 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -1,26 +1,27 @@ #![allow(clippy::panic)] -use std::marker::PhantomData; +use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; -use hotshot_example_types::{ - block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - node_types::{MemoryImpl, TestTypes}, - state_types::{TestInstanceState, TestValidatedState}, -}; - -use crate::test_builder::TestMetadata; +use async_broadcast::{Receiver, Sender}; +use async_lock::RwLockUpgradableReadGuard; +use bitvec::bitvec; use committable::Committable; use ethereum_types::U256; use hotshot::{ types::{BLSPubKey, SignatureKey, SystemContextHandle}, HotShotInitializer, Memberships, Networks, SystemContext, }; +use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + node_types::{MemoryImpl, TestTypes}, + state_types::{TestInstanceState, TestValidatedState}, +}; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, - data::{Leaf, QuorumProposal, VidDisperse, ViewNumber}, + data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare, ViewNumber}, message::{GeneralConsensusMessage, Proposal}, simple_certificate::{DACertificate, QuorumCertificate}, - simple_vote::{DAData, DAVote, SimpleVote}, + simple_vote::{DAData, DAVote, QuorumData, QuorumVote, SimpleVote}, traits::{ block_contents::{vid_commitment, BlockHeader, TestableBlock}, consensus_api::ConsensusApi, @@ -29,25 +30,14 @@ use hotshot_types::{ states::ValidatedState, BlockPayload, }, + utils::{View, ViewInner}, vid::{vid_scheme, VidCommitment, VidSchemeType}, - vote::HasViewNumber, + vote::{Certificate, HasViewNumber, Vote}, }; - -use async_broadcast::{Receiver, Sender}; -use async_lock::RwLockUpgradableReadGuard; -use bitvec::bitvec; -use hotshot_types::simple_vote::QuorumData; -use hotshot_types::simple_vote::QuorumVote; -use hotshot_types::utils::View; -use hotshot_types::utils::ViewInner; -use hotshot_types::vote::Certificate; -use hotshot_types::vote::Vote; - use jf_primitives::vid::VidScheme; - -use hotshot_types::data::VidDisperseShare; use serde::Serialize; -use std::{fmt::Debug, hash::Hash, sync::Arc}; + +use crate::test_builder::TestMetadata; /// create the [`SystemContextHandle`] from a node id /// # Panics diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 7d5a224ce3..41b0bedf59 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,26 +1,24 @@ -use hotshot::traits::NetworkReliability; -use hotshot_example_types::storage_types::TestStorage; -use hotshot_orchestrator::config::ValidatorConfigFile; -use hotshot_types::traits::election::Membership; use std::{num::NonZeroUsize, sync::Arc, time::Duration}; -use tide_disco::Url; - -use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; +use hotshot::traits::{NetworkReliability, NodeImplementation, TestableNodeImplementation}; +use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; +use hotshot_orchestrator::config::ValidatorConfigFile; use hotshot_types::{ - traits::node_implementation::NodeType, ExecutionType, HotShotConfig, ValidatorConfig, + traits::{election::Membership, node_implementation::NodeType}, + ExecutionType, HotShotConfig, ValidatorConfig, }; +use tide_disco::Url; -use super::completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}; use super::{ - overall_safety_task::OverallSafetyPropertiesDescription, txn_task::TxnTaskDescription, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + txn_task::TxnTaskDescription, }; use crate::{ spinning_task::SpinningTaskDescription, test_launcher::{ResourceGenerators, TestLauncher}, view_sync_task::ViewSyncTaskDescription, }; -use hotshot_example_types::state_types::TestInstanceState; /// data describing how a round should be timed. #[derive(Clone, Debug, Copy)] pub struct TimingData { diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 2a1be42d89..14a1bff776 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -1,49 +1,49 @@ #![allow(clippy::panic)] -use super::{ - completion_task::CompletionTask, - overall_safety_task::{OverallSafetyTask, RoundCtx}, - txn_task::TxnTask, -}; -use crate::{ - block_builder::TestBuilderImplementation, - completion_task::CompletionTaskDescription, - spinning_task::{ChangeNode, SpinningTask, UpDown}, - test_launcher::{Networks, TestLauncher}, - txn_task::TxnTaskDescription, - view_sync_task::ViewSyncTask, +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + marker::PhantomData, + sync::Arc, }; + use async_broadcast::broadcast; use either::Either::{self, Left, Right}; use futures::future::join_all; -use hotshot::{types::SystemContextHandle, Memberships}; +use hotshot::{ + traits::TestableNodeImplementation, types::SystemContextHandle, HotShotInitializer, + Memberships, SystemContext, +}; use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; - -use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; - use hotshot_task::task::{Task, TaskRegistry, TestTask}; use hotshot_types::{ consensus::ConsensusMetricsValue, + constants::EVENT_CHANNEL_SIZE, data::Leaf, + message::Message, + simple_certificate::QuorumCertificate, traits::{ election::Membership, - node_implementation::{ConsensusTime, NodeType}, + network::ConnectedNetwork, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, HotShotConfig, ValidatorConfig, }; -use hotshot_types::{constants::EVENT_CHANNEL_SIZE, simple_certificate::QuorumCertificate}; -use hotshot_types::{ - message::Message, - traits::{network::ConnectedNetwork, node_implementation::NodeImplementation}, -}; -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - marker::PhantomData, - sync::Arc, -}; - #[allow(deprecated)] use tracing::info; +use super::{ + completion_task::CompletionTask, + overall_safety_task::{OverallSafetyTask, RoundCtx}, + txn_task::TxnTask, +}; +use crate::{ + block_builder::TestBuilderImplementation, + completion_task::CompletionTaskDescription, + spinning_task::{ChangeNode, SpinningTask, UpDown}, + test_launcher::{Networks, TestLauncher}, + txn_task::TxnTaskDescription, + view_sync_task::ViewSyncTask, +}; + /// a node participating in a test #[derive(Clone)] pub struct Node> { diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index 4e510cc50a..f02cdd8b59 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -1,4 +1,5 @@ -use crate::test_runner::{HotShotTaskCompleted, Node}; +use std::time::Duration; + use async_broadcast::{Receiver, TryRecvError}; use async_compatibility_layer::art::{async_sleep, async_spawn}; #[cfg(async_executor_impl = "async-std")] @@ -10,9 +11,8 @@ use snafu::Snafu; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use std::time::Duration; - use super::GlobalTestEvent; +use crate::test_runner::{HotShotTaskCompleted, Node}; // the obvious idea here is to pass in a "stream" that completes every `n` seconds // the stream construction can definitely be fancier but that's the baseline idea diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 42e0151434..2d6004b173 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -1,19 +1,12 @@ use std::{cmp::max, marker::PhantomData}; +use committable::Committable; +use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, node_types::{MemoryImpl, TestTypes}, state_types::TestInstanceState, }; -use sha2::{Digest, Sha256}; - -use crate::task_helpers::{ - build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, -}; -use committable::Committable; - -use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; - use hotshot_types::{ data::{DAProposal, Leaf, QuorumProposal, VidDisperseShare, ViewChangeEvidence, ViewNumber}, message::Proposal, @@ -22,17 +15,19 @@ use hotshot_types::{ ViewSyncFinalizeCertificate2, }, simple_vote::{ - DAData, DAVote, TimeoutData, TimeoutVote, UpgradeProposalData, UpgradeVote, - ViewSyncFinalizeData, ViewSyncFinalizeVote, + DAData, DAVote, QuorumData, QuorumVote, TimeoutData, TimeoutVote, UpgradeProposalData, + UpgradeVote, ViewSyncFinalizeData, ViewSyncFinalizeVote, }, traits::{ consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeType}, }, }; +use sha2::{Digest, Sha256}; -use hotshot_types::simple_vote::QuorumData; -use hotshot_types::simple_vote::QuorumVote; +use crate::task_helpers::{ + build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, +}; #[derive(Clone)] pub struct TestView { diff --git a/testing/src/view_sync_task.rs b/testing/src/view_sync_task.rs index 8d91042f2b..648d840dc3 100644 --- a/testing/src/view_sync_task.rs +++ b/testing/src/view_sync_task.rs @@ -1,8 +1,9 @@ +use std::{collections::HashSet, marker::PhantomData, sync::Arc}; + use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplementation}; use snafu::Snafu; -use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use crate::{test_runner::HotShotTaskCompleted, GlobalTestEvent}; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 4a88261db7..7e09165e5a 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -1,8 +1,15 @@ //! Provides the core consensus types -pub use crate::utils::{View, ViewInner}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, Mutex}, +}; + +use committable::Commitment; use displaydoc::Display; +use tracing::error; +pub use crate::utils::{View, ViewInner}; use crate::{ data::{Leaf, VidDisperseShare}, error::HotShotError, @@ -14,13 +21,6 @@ use crate::{ }, utils::{StateAndDelta, Terminator}, }; -use committable::Commitment; - -use std::{ - collections::{BTreeMap, HashMap}, - sync::{Arc, Mutex}, -}; -use tracing::error; /// A type alias for `HashMap, T>` pub type CommitmentMap = HashMap, T>; diff --git a/types/src/data.rs b/types/src/data.rs index 4abcdef577..17ced1f262 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -3,6 +3,24 @@ //! This module provides types for representing consensus internal state, such as leaves, //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. +use std::{ + collections::BTreeMap, + fmt::{Debug, Display}, + hash::Hash, + marker::PhantomData, +}; + +use anyhow::{ensure, Result}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use bincode::Options; +use committable::{Commitment, Committable, RawCommitmentBuilder}; +use derivative::Derivative; +use jf_primitives::vid::VidDisperse as JfVidDisperse; +use rand::Rng; +use serde::{Deserialize, Serialize}; +use snafu::Snafu; +use tracing::error; + use crate::{ message::Proposal, simple_certificate::{ @@ -23,22 +41,6 @@ use crate::{ vid::{VidCommitment, VidCommon, VidSchemeType, VidShare}, vote::{Certificate, HasViewNumber}, }; -use anyhow::{ensure, Result}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use bincode::Options; -use committable::{Commitment, Committable, RawCommitmentBuilder}; -use derivative::Derivative; -use jf_primitives::vid::VidDisperse as JfVidDisperse; -use rand::Rng; -use serde::{Deserialize, Serialize}; -use snafu::Snafu; -use std::{ - collections::BTreeMap, - fmt::{Debug, Display}, - hash::Hash, - marker::PhantomData, -}; -use tracing::error; /// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. #[derive( @@ -669,10 +671,11 @@ impl Leaf { pub mod null_block { #![allow(missing_docs)] - use crate::vid::{vid_scheme, VidCommitment}; use jf_primitives::vid::VidScheme; use memoize::memoize; + use crate::vid::{vid_scheme, VidCommitment}; + /// The commitment for a null block payload. /// /// Note: the commitment depends on the network (via `num_storage_nodes`), diff --git a/types/src/error.rs b/types/src/error.rs index a9209237c4..0797f726ed 100644 --- a/types/src/error.rs +++ b/types/src/error.rs @@ -4,14 +4,16 @@ //! occur while interacting with this crate. //use crate::traits::network::TimeoutErr; -use crate::traits::{block_contents::BlockPayload, node_implementation::NodeType}; +use std::num::NonZeroU64; + #[cfg(async_executor_impl = "async-std")] use async_std::future::TimeoutError; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::num::NonZeroU64; #[cfg(async_executor_impl = "tokio")] use tokio::time::error::Elapsed as TimeoutError; + +use crate::traits::{block_contents::BlockPayload, node_implementation::NodeType}; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} diff --git a/types/src/event.rs b/types/src/event.rs index 244bd4001c..a66a1559a7 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -1,5 +1,7 @@ //! Events that a `HotShot` instance can emit +use std::sync::Arc; + use serde::{Deserialize, Serialize}; use crate::{ @@ -9,8 +11,6 @@ use crate::{ simple_certificate::QuorumCertificate, traits::{node_implementation::NodeType, ValidatedState}, }; - -use std::sync::Arc; /// A status event emitted by a `HotShot` instance /// /// This includes some metadata, such as the stage and view number that the event was generated in, @@ -60,9 +60,10 @@ pub type LeafChain = Vec>; /// Utilities for converting between HotShotError and a string. pub mod error_adaptor { - use super::{Arc, Deserialize, HotShotError, NodeType}; use serde::{de::Deserializer, ser::Serializer}; + use super::{Arc, Deserialize, HotShotError, NodeType}; + /// Convert a HotShotError into a string /// /// # Errors diff --git a/types/src/lib.rs b/types/src/lib.rs index fc07d1f60e..0b604038f7 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -1,13 +1,14 @@ //! Types and Traits for the `HotShot` consensus module -use crate::utils::bincode_opts; +use std::{fmt::Debug, future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; + use bincode::Options; use displaydoc::Display; use light_client::StateVerKey; -use std::fmt::Debug; -use std::{future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; use tracing::error; use traits::{election::ElectionConfig, signature_key::SignatureKey}; use url::Url; + +use crate::utils::bincode_opts; pub mod consensus; pub mod constants; pub mod data; diff --git a/types/src/light_client.rs b/types/src/light_client.rs index ce98eedd3e..3a0aa8f5c4 100644 --- a/types/src/light_client.rs +++ b/types/src/light_client.rs @@ -1,5 +1,7 @@ //! Types and structs associated with light client state +use std::collections::HashMap; + use ark_ed_on_bn254::EdwardsConfig as Config; use ark_ff::PrimeField; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -8,7 +10,6 @@ use jf_primitives::signatures::schnorr; use rand::SeedableRng; use rand_chacha::ChaCha20Rng; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; use tagged_base64::tagged; /// Base field in the prover circuit diff --git a/types/src/message.rs b/types/src/message.rs index b591db4dfa..2a061366ae 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -3,33 +3,31 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. -use crate::data::{Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}; -use crate::simple_certificate::{ - DACertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, - ViewSyncPreCommitCertificate2, -}; -use crate::simple_vote::{ - DAVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, - ViewSyncPreCommitVote, -}; -use crate::traits::election::Membership; -use crate::traits::network::ResponseMessage; -use crate::traits::signature_key::SignatureKey; -use crate::vote::HasViewNumber; +use std::{fmt::Debug, marker::PhantomData}; + +use anyhow::{ensure, Result}; +use committable::Committable; +use derivative::Derivative; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + use crate::{ - data::DAProposal, - simple_vote::QuorumVote, + data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, + simple_certificate::{ + DACertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, + ViewSyncPreCommitCertificate2, + }, + simple_vote::{ + DAVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + ViewSyncPreCommitVote, + }, traits::{ - network::{DataRequest, NetworkMsg, ViewMessage}, + election::Membership, + network::{DataRequest, NetworkMsg, ResponseMessage, ViewMessage}, node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, }, + vote::HasViewNumber, }; -use anyhow::{ensure, Result}; -use committable::Committable; -use derivative::Derivative; -use serde::de::DeserializeOwned; -use serde::{Deserialize, Serialize}; -use std::{fmt::Debug, marker::PhantomData}; /// Incoming message #[derive(Serialize, Deserialize, Clone, Debug, Derivative, PartialEq, Eq, Hash)] diff --git a/types/src/qc.rs b/types/src/qc.rs index 52d540ce53..ca3295f1bb 100644 --- a/types/src/qc.rs +++ b/types/src/qc.rs @@ -1,10 +1,6 @@ //! Implementation for `BitVectorQC` that uses BLS signature + Bit vector. //! See more details in hotshot paper. -use crate::{ - stake_table::StakeTableEntry, - traits::{qc::QuorumCertificateScheme, signature_key::SignatureKey}, -}; use ark_std::{ fmt::Debug, format, @@ -23,6 +19,11 @@ use jf_primitives::{ use serde::{Deserialize, Serialize}; use typenum::U32; +use crate::{ + stake_table::StakeTableEntry, + traits::{qc::QuorumCertificateScheme, signature_key::SignatureKey}, +}; + /// An implementation of QC using BLS signature and a bit-vector. #[derive(Serialize, Deserialize)] pub struct BitVectorQC Deserialize<'a>>( @@ -184,12 +185,13 @@ where #[cfg(test)] mod tests { - use super::*; use jf_primitives::signatures::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, SignatureScheme, }; use vbs::{version::StaticVersion, BinarySerializer, Serializer}; + + use super::*; type Version = StaticVersion<0, 1>; macro_rules! test_quorum_certificate { diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index 55f0a9c8b5..a413a328a5 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -1,10 +1,5 @@ //! Types and structs for the hotshot signature keys -use crate::{ - qc::{BitVectorQC, QCParams}, - stake_table::StakeTableEntry, - traits::{qc::QuorumCertificateScheme, signature_key::SignatureKey}, -}; use bitvec::{slice::BitSlice, vec::BitVec}; use ethereum_types::U256; use generic_array::GenericArray; @@ -19,6 +14,12 @@ use rand::SeedableRng; use rand_chacha::ChaCha20Rng; use tracing::instrument; +use crate::{ + qc::{BitVectorQC, QCParams}, + stake_table::StakeTableEntry, + traits::{qc::QuorumCertificateScheme, signature_key::SignatureKey}, +}; + /// BLS private key used to sign a message pub type BLSPrivKey = SignKey; /// BLS public key used to verify a signature diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 8f2261380b..5fd4ff04a1 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -7,9 +7,9 @@ use std::{ }; use anyhow::{ensure, Result}; - use committable::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; +use serde::{Deserialize, Serialize}; use crate::{ data::{serialize_signature2, Leaf}, @@ -18,14 +18,13 @@ use crate::{ ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, }, traits::{ - election::Membership, node_implementation::ConsensusTime, node_implementation::NodeType, + election::Membership, + node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, vote::{Certificate, HasViewNumber}, }; -use serde::{Deserialize, Serialize}; - /// Trait which allows use to inject different threshold calculations into a Certificate type pub trait Threshold { /// Calculate a threshold based on the membership diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 293ebc6307..97ab6e398f 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -3,8 +3,8 @@ use std::{fmt::Debug, hash::Hash}; use committable::{Commitment, Committable}; -use serde::de::DeserializeOwned; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use vbs::version::Version; use crate::{ data::Leaf, @@ -12,7 +12,6 @@ use crate::{ vid::VidCommitment, vote::{HasViewNumber, Vote}, }; -use vbs::version::Version; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a yes vote. diff --git a/types/src/stake_table.rs b/types/src/stake_table.rs index 7c6525e0eb..3884a7eee6 100644 --- a/types/src/stake_table.rs +++ b/types/src/stake_table.rs @@ -1,9 +1,10 @@ //! Types and structs related to the stake table -use crate::traits::signature_key::{SignatureKey, StakeTableEntryType}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; +use crate::traits::signature_key::{SignatureKey, StakeTableEntryType}; + /// Stake table entry #[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash, Eq)] #[serde(bound(deserialize = ""))] diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index e97890bd22..33bc14a0d7 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -3,16 +3,6 @@ //! This module provides the [`Transaction`], [`BlockPayload`], and [`BlockHeader`] traits, which //! describe the behaviors that a block is expected to have. -use crate::{ - data::Leaf, - traits::{node_implementation::NodeType, ValidatedState}, - utils::BuilderCommitment, - vid::{vid_scheme, VidCommitment, VidSchemeType}, -}; -use committable::{Commitment, Committable}; -use jf_primitives::vid::VidScheme; -use serde::{de::DeserializeOwned, Serialize}; - use std::{ error::Error, fmt::{Debug, Display}, @@ -20,6 +10,17 @@ use std::{ hash::Hash, }; +use committable::{Commitment, Committable}; +use jf_primitives::vid::VidScheme; +use serde::{de::DeserializeOwned, Serialize}; + +use crate::{ + data::Leaf, + traits::{node_implementation::NodeType, ValidatedState}, + utils::BuilderCommitment, + vid::{vid_scheme, VidCommitment, VidSchemeType}, +}; + /// Abstraction over any type of transaction. Used by [`BlockPayload`]. pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index b596695f56..40484812cb 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -1,5 +1,9 @@ //! Contains the [`ConsensusApi`] trait. +use std::{num::NonZeroUsize, time::Duration}; + +use async_trait::async_trait; + use crate::{ event::Event, traits::{ @@ -7,9 +11,6 @@ use crate::{ signature_key::SignatureKey, }, }; -use async_trait::async_trait; - -use std::{num::NonZeroUsize, time::Duration}; /// The API that tasks use to talk to the system /// TODO we plan to drop this diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 4c5ded2953..956f6654e2 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -3,12 +3,12 @@ // Needed to avoid the non-binding `let` warning. #![allow(clippy::let_underscore_untyped)] -use super::node_implementation::NodeType; - -use crate::{traits::signature_key::SignatureKey, PeerConfig}; +use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; use snafu::Snafu; -use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; + +use super::node_implementation::NodeType; +use crate::{traits::signature_key::SignatureKey, PeerConfig}; /// Error for election problems #[derive(Snafu, Debug)] diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs index fc69b5c077..685673615c 100644 --- a/types/src/traits/metrics.rs +++ b/types/src/traits/metrics.rs @@ -6,9 +6,10 @@ //! - [`Histogram`]: stores multiple float values based for a graph (example usage: CPU %) //! - [`Label`]: Stores the last string (example usage: current version, network online/offline) -use dyn_clone::DynClone; use std::fmt::Debug; +use dyn_clone::DynClone; + /// The metrics type. pub trait Metrics: Send + Sync + DynClone + Debug { /// Create a [`Counter`] with an optional `unit_label`. @@ -111,12 +112,13 @@ dyn_clone::clone_trait_object!(Label); #[cfg(test)] mod test { - use super::*; use std::{ collections::HashMap, sync::{Arc, Mutex}, }; + use super::*; + #[derive(Debug, Clone)] struct TestMetrics { prefix: String, diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 8349aa651a..18db5d447e 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -15,12 +15,15 @@ use futures::{ use tokio::time::error::Elapsed as TimeoutError; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} -use super::{node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{ - data::ViewNumber, - message::{MessagePurpose, SequencingMessage}, - BoxSyncFuture, +use std::{ + collections::{BTreeSet, HashMap}, + fmt::Debug, + hash::Hash, + pin::Pin, + sync::Arc, + time::Duration, }; + use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; use futures::future::join_all; @@ -30,16 +33,15 @@ use rand::{ }; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::{ - collections::{BTreeSet, HashMap}, - fmt::Debug, - hash::Hash, - pin::Pin, - sync::Arc, - time::Duration, -}; use vbs::version::StaticVersionType; +use super::{node_implementation::NodeType, signature_key::SignatureKey}; +use crate::{ + data::ViewNumber, + message::{MessagePurpose, SequencingMessage}, + BoxSyncFuture, +}; + /// for any errors we decide to add to memory network #[derive(Debug, Snafu, Serialize, Deserialize)] #[snafu(visibility(pub))] diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index c59755de62..affc5383db 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -3,6 +3,18 @@ //! This module defines the [`NodeImplementation`] trait, which is a composite trait used for //! describing the overall behavior of a node, as a composition of implementations of the node trait. +use std::{ + fmt::Debug, + hash::Hash, + ops::{self, Deref, Sub}, + sync::Arc, + time::Duration, +}; + +use async_trait::async_trait; +use committable::Committable; +use serde::{Deserialize, Serialize}; + use super::{ block_contents::{BlockHeader, TestableBlock, Transaction}, election::ElectionConfig, @@ -20,16 +32,6 @@ use crate::{ election::Membership, signature_key::SignatureKey, states::InstanceState, BlockPayload, }, }; -use async_trait::async_trait; -use committable::Committable; -use serde::{Deserialize, Serialize}; -use std::{ - fmt::Debug, - hash::Hash, - ops::{self, Deref, Sub}, - sync::Arc, - time::Duration, -}; /// Node implementation aggregate trait /// diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 484721b0c0..ad274d515c 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -1,12 +1,13 @@ //! Minimal compatibility over public key signatures -use bitvec::prelude::*; -use ethereum_types::U256; -use jf_primitives::errors::PrimitivesError; -use serde::{Deserialize, Serialize}; use std::{ fmt::{Debug, Display}, hash::Hash, }; + +use bitvec::prelude::*; +use ethereum_types::U256; +use jf_primitives::errors::PrimitivesError; +use serde::{Deserialize, Serialize}; use tagged_base64::TaggedBase64; /// Type representing stake table entries in a `StakeTable` diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index db4505f7f6..63b6bb6a69 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -4,6 +4,10 @@ //! compatibilities over the current network state, which is modified by the transactions contained //! within blocks. +use std::{error::Error, fmt::Debug, future::Future, hash::Hash}; + +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + use super::block_contents::TestableBlock; use crate::{ data::Leaf, @@ -12,8 +16,6 @@ use crate::{ BlockPayload, }, }; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use std::{error::Error, fmt::Debug, future::Future, hash::Hash}; /// Instance-level state, which allows us to fetch missing validated state. pub trait InstanceState: Debug + Send + Sync {} diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index ecc1905bcd..497b1b0857 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -8,6 +8,7 @@ use std::collections::BTreeMap; use anyhow::Result; use async_trait::async_trait; +use super::node_implementation::NodeType; use crate::{ consensus::{CommitmentMap, View}, data::{DAProposal, Leaf, VidDisperseShare}, @@ -16,8 +17,6 @@ use crate::{ simple_certificate::QuorumCertificate, }; -use super::node_implementation::NodeType; - /// Abstraction for storing a variety of consensus payload datum. #[async_trait] pub trait Storage: Send + Sync + Clone { diff --git a/types/src/utils.rs b/types/src/utils.rs index 54a5d11e72..ad8544179c 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -1,10 +1,7 @@ //! Utility functions, type aliases, helper structs and enum definitions. -use crate::{ - data::Leaf, - traits::{node_implementation::NodeType, ValidatedState}, - vid::VidCommitment, -}; +use std::{ops::Deref, sync::Arc}; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::{ config::{ @@ -16,10 +13,15 @@ use bincode::{ use committable::Commitment; use digest::OutputSizeUser; use sha2::Digest; -use std::{ops::Deref, sync::Arc}; use tagged_base64::tagged; use typenum::Unsigned; +use crate::{ + data::Leaf, + traits::{node_implementation::NodeType, ValidatedState}, + vid::VidCommitment, +}; + /// A view's state #[derive(Debug)] pub enum ViewInner { diff --git a/types/src/vid.rs b/types/src/vid.rs index 71ef81d85a..7b50c06bf7 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -8,6 +8,8 @@ //! This crate and all downstream crates should talk to the VID scheme only //! via the traits exposed here. +use std::{fmt::Debug, ops::Range}; + use ark_bn254::Bn254; use jf_primitives::{ pcs::{ @@ -28,7 +30,6 @@ use jf_primitives::{ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use sha2::Sha256; -use std::{fmt::Debug, ops::Range}; /// VID scheme constructor. /// diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index f41f89cdc9..ca294063de 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -3,19 +3,18 @@ /// Configuration for the webserver pub mod config; -use crate::config::{MAX_TXNS, MAX_VIEWS, TX_BATCH_SIZE}; +use std::{ + collections::{BTreeMap, HashMap}, + io, + path::PathBuf, +}; + use async_compatibility_layer::channel::OneShotReceiver; use async_lock::RwLock; use clap::Args; use futures::FutureExt; - use hotshot_types::traits::signature_key::SignatureKey; use rand::{distributions::Alphanumeric, rngs::StdRng, thread_rng, Rng, SeedableRng}; -use std::{ - collections::{BTreeMap, HashMap}, - io, - path::PathBuf, -}; use tide_disco::{ api::ApiError, error::ServerError, @@ -25,6 +24,8 @@ use tide_disco::{ use tracing::{debug, info}; use vbs::version::StaticVersionType; +use crate::config::{MAX_TXNS, MAX_VIEWS, TX_BATCH_SIZE}; + /// Convience alias for a lock over the state of the app /// TODO this is used in two places. It might be clearer to just inline type State = RwLock>; From 227fa9081f0855da0041097a6ce91d947097559d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 10 Apr 2024 13:54:03 +0300 Subject: [PATCH 0937/1393] DHT not impl `NetworkBehaviour` (#2844) * Removing behaviour impl for DM * retries and fixes * don't loop forever in DHT poll * remove proposal_id (#2828) * [Auto Bench] Scripts of Running Benchmarks in AWS (#2793) * update setting * update webserver pub ip * updated dockerfile * dockerfile updated * clean up log and scripts running on ecs * real script added * fix * update script and config file * parameterizing webserver url * add to ci * to pass ci docker test * to pass ci docker test again * test ci with script, just and ecs * fix duplicate part for ci test * fix ci test * today's last try with ci * fix merge * really last ci try * commented out ci * fix scripts * add file * fix config * fix config * fix bug after upgradability? * init try on parameterization in script * better parameterization for script * clean up and more parameterization * fix lint * finish parameterization for scripts * preserve yml for ci * add blank line * fix less than or equal in script * upload results for init run * last blank line * nit for nano representation * change back transaction size * remove autobench on ci * remove autobench on ci * remove ci_ecs_benchmarks.sh * remove ci autobench script * working towards making dht not impl net behaviour * adding bootstrap task * tests failing now * test logging * wip * Cancel bootstrap task when `NetworkNode` shuts down * Cleanup * Make lint happy * Fix for tokio * Server mode is still needed apparently * Set nodes to server mode only in the relevant tests * Server mode is configurable through config file * Wait until task is actually cancelled in tokio * Fix lint issue * Fix debug messages * Fix more lint issues * Bootstrap task shutdown --------- Co-authored-by: Jarred Parr Co-authored-by: Sishan Long Co-authored-by: Lukasz Rzasik --- .../src/traits/networking/libp2p_network.rs | 1 + .../src/network/behaviours/dht/bootstrap.rs | 88 ++++ .../src/network/behaviours/dht/mod.rs | 424 +++++------------- libp2p-networking/src/network/def.rs | 48 +- libp2p-networking/src/network/mod.rs | 7 +- libp2p-networking/src/network/node.rs | 91 +++- libp2p-networking/src/network/node/config.rs | 3 + libp2p-networking/tests/common/mod.rs | 4 +- orchestrator/run-config.toml | 1 + orchestrator/src/config.rs | 5 + 10 files changed, 286 insertions(+), 386 deletions(-) create mode 100644 libp2p-networking/src/network/behaviours/dht/bootstrap.rs diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 47f85669ea..9bcf4edffb 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -374,6 +374,7 @@ impl Libp2pNetwork { NonZeroUsize::new(config.config.num_nodes_with_stake.get() - 2) .expect("failed to calculate replication factor"), ) + .server_mode(libp2p_config.server_mode) .identity(keypair) .bound_addr(Some(bind_address.clone())) .mesh_params(Some(MeshParams { diff --git a/libp2p-networking/src/network/behaviours/dht/bootstrap.rs b/libp2p-networking/src/network/behaviours/dht/bootstrap.rs new file mode 100644 index 0000000000..d209132f0b --- /dev/null +++ b/libp2p-networking/src/network/behaviours/dht/bootstrap.rs @@ -0,0 +1,88 @@ +use std::time::Duration; + +use async_compatibility_layer::{art, channel::UnboundedSender}; +use futures::{channel::mpsc, StreamExt}; + +use crate::network::ClientRequest; + +/// Internal bootstrap events +pub enum InputEvent { + /// Start bootstrap + StartBootstrap, + /// Bootstrap has finished + BootstrapFinished, + /// Shutdown bootstrap + ShutdownBootstrap, +} +/// Bootstrap task's state +pub struct DHTBootstrapTask { + /// Task's receiver + rx: mpsc::Receiver, + /// Task's sender + network_tx: UnboundedSender, + /// Field indicating progress state + in_progress: bool, +} + +impl DHTBootstrapTask { + /// Run bootstrap task + pub fn run(rx: mpsc::Receiver, tx: UnboundedSender) { + art::async_spawn(async move { + let state = Self { + rx, + network_tx: tx, + in_progress: false, + }; + state.run_loop().await; + }); + } + /// Task's loop + async fn run_loop(mut self) { + loop { + tracing::debug!("looping bootstrap"); + if self.in_progress { + match self.rx.next().await { + Some(InputEvent::BootstrapFinished) => { + tracing::debug!("Bootstrap finished"); + self.in_progress = false; + } + Some(InputEvent::ShutdownBootstrap) => { + tracing::debug!("ShutdownBootstrap received, shutting down"); + break; + } + Some(_) => {} + None => { + tracing::debug!("Bootstrap channel closed, exiting loop"); + break; + } + } + } else if let Ok(maybe_event) = + art::async_timeout(Duration::from_secs(120), self.rx.next()).await + { + match maybe_event { + Some(InputEvent::StartBootstrap) => { + tracing::debug!("Start bootstrap in bootstrap task"); + self.bootstrap().await; + } + Some(InputEvent::ShutdownBootstrap) => { + tracing::debug!("ShutdownBootstrap received, shutting down"); + break; + } + Some(_) => {} + None => { + tracing::debug!("Bootstrap channel closed, exiting loop"); + break; + } + } + } else { + tracing::debug!("Start bootstrap in bootstrap task after timeout"); + self.bootstrap().await; + } + } + } + /// Start bootstrap + async fn bootstrap(&mut self) { + self.in_progress = true; + let _ = self.network_tx.send(ClientRequest::BeginBootstrap).await; + } +} diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index c435696e86..f49af0de4e 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -1,23 +1,20 @@ -use std::{ - collections::{HashMap, HashSet, VecDeque}, - num::NonZeroUsize, - task::Poll, - time::Duration, -}; +/// Task for doing bootstraps at a regular interval +pub mod bootstrap; +use std::{collections::HashMap, num::NonZeroUsize, time::Duration}; +use async_compatibility_layer::{art, channel::UnboundedSender}; /// a local caching layer for the DHT key value pairs -use futures::channel::oneshot::Sender; +use futures::{ + channel::{mpsc, oneshot::Sender}, + SinkExt, +}; use lazy_static::lazy_static; -use libp2p::kad::{store::RecordStore, Behaviour as KademliaBehaviour, Event as KademliaEvent}; -use libp2p::{ - kad::{ - /* handler::KademliaHandlerIn, */ store::MemoryStore, BootstrapError, BootstrapOk, - GetClosestPeersOk, GetRecordOk, GetRecordResult, Mode, ProgressStep, PutRecordResult, - QueryId, QueryResult, Quorum, Record, - }, - swarm::{NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm}, - Multiaddr, +use libp2p::kad::{ + /* handler::KademliaHandlerIn, */ store::MemoryStore, BootstrapOk, GetClosestPeersOk, + GetRecordOk, GetRecordResult, ProgressStep, PutRecordResult, QueryId, QueryResult, Record, }; +use libp2p::kad::{store::RecordStore, Behaviour as KademliaBehaviour}; +use libp2p::kad::{BootstrapError, Event as KademliaEvent}; use libp2p_identity::PeerId; use tracing::{error, info, warn}; @@ -30,6 +27,8 @@ lazy_static! { static ref MAX_DHT_QUERY_SIZE: NonZeroUsize = NonZeroUsize::new(50).unwrap(); } +use crate::network::{ClientRequest, NetworkEvent}; + use super::exponential_backoff::ExponentialBackoff; /// Behaviour wrapping libp2p's kademlia @@ -38,31 +37,24 @@ use super::exponential_backoff::ExponentialBackoff; /// - Request API /// - bootstrapping into the network /// - peer discovery +#[derive(Debug)] pub struct DHTBehaviour { - /// client approval to begin bootstrap - pub begin_bootstrap: bool, /// in progress queries for nearby peers pub in_progress_get_closest_peers: HashMap>, - /// bootstrap nodes - pub bootstrap_nodes: HashMap>, - /// List of kademlia events - pub event_queue: Vec, /// List of in-progress get requests in_progress_get_record_queries: HashMap, /// List of in-progress put requests in_progress_put_record_queries: HashMap, - /// List of previously failed get requests - queued_get_record_queries: VecDeque, - /// List of previously failed put requests - queued_put_record_queries: VecDeque, - /// Kademlia behaviour - pub kadem: KademliaBehaviour, /// State of bootstrapping pub bootstrap_state: Bootstrap, /// the peer id (useful only for debugging right now) pub peer_id: PeerId, /// replication factor pub replication_factor: NonZeroUsize, + /// Sender to retry requests. + retry_tx: Option>, + /// Sender to the bootstrap task + bootstrap_tx: Option>, } /// State of bootstrapping @@ -91,58 +83,41 @@ pub enum DHTEvent { } impl DHTBehaviour { - /// Begin the bootstrap process - pub fn begin_bootstrap(&mut self) { - self.begin_bootstrap = true; + /// Give the handler a way to retry requests. + pub fn set_retry(&mut self, tx: UnboundedSender) { + self.retry_tx = Some(tx); } - - /// Start a query for the closest peers - pub fn query_closest_peers(&mut self, random_peer: PeerId) { - self.kadem.get_closest_peers(random_peer); + /// Sets a sender to bootstrap task + pub fn set_bootstrap_sender(&mut self, tx: mpsc::Sender) { + self.bootstrap_tx = Some(tx); } - /// Create a new DHT behaviour #[must_use] - pub fn new( - mut kadem: KademliaBehaviour, - pid: PeerId, - replication_factor: NonZeroUsize, - ) -> Self { + pub fn new(pid: PeerId, replication_factor: NonZeroUsize) -> Self { // needed because otherwise we stay in client mode when testing locally // and don't publish keys stuff // e.g. dht just doesn't work. We'd need to add mdns and that doesn't seem worth it since // we won't have a local network // - kadem.set_mode(Some(Mode::Server)); Self { - begin_bootstrap: false, - bootstrap_nodes: HashMap::default(), peer_id: pid, - event_queue: Vec::default(), in_progress_get_record_queries: HashMap::default(), in_progress_put_record_queries: HashMap::default(), - queued_get_record_queries: VecDeque::default(), - queued_put_record_queries: VecDeque::default(), - kadem, bootstrap_state: Bootstrap { state: State::NotStarted, backoff: ExponentialBackoff::new(2, Duration::from_secs(1)), }, in_progress_get_closest_peers: HashMap::default(), replication_factor, + retry_tx: None, + bootstrap_tx: None, } } - /// query a peer (e.g. obtain its address if it exists) - pub fn lookup_peer(&mut self, peer_id: PeerId, chan: Sender<()>) { - let qid = self.kadem.get_closest_peers(peer_id); - self.in_progress_get_closest_peers.insert(qid, chan); - } - /// print out the routing table to stderr - pub fn print_routing_table(&mut self) { + pub fn print_routing_table(&mut self, kadem: &mut KademliaBehaviour) { let mut err = format!("KBUCKETS: PID: {:?}, ", self.peer_id); - let v = self.kadem.kbuckets().collect::>(); + let v = kadem.kbuckets().collect::>(); for i in v { for j in i.iter() { let s = format!( @@ -155,47 +130,17 @@ impl DHTBehaviour { error!("{:?}", err); } - /// Passthru to kademlia - /// Associate address with kademlia peer - pub fn add_address(&mut self, peer_id: &PeerId, addr: Multiaddr) { - // add address to kademlia - self.kadem.add_address(peer_id, addr); - } - - /// Save in case kademlia forgets about bootstrap nodes - pub fn add_bootstrap_nodes(&mut self, nodes: HashMap>) { - for (k, v) in nodes { - self.bootstrap_nodes.insert(k, v); - } + /// Get the replication factor for queries + #[must_use] + pub fn get_replication_factor(&self) -> NonZeroUsize { + self.replication_factor } - /// Publish a key/value to the kv store. /// Once replicated upon all nodes, the caller is notified over /// `chan`. If there is an error, a [`crate::network::error::DHTError`] is /// sent instead. - pub fn put_record(&mut self, mut query: KadPutQuery) { - let record = Record::new(query.key.clone(), query.value.clone()); - - match self - .kadem - .put_record(record, Quorum::N(self.replication_factor)) - { - Err(e) => { - // failed try again later - query.progress = DHTProgress::NotStarted; - query.backoff.start_next(false); - error!("Error publishing to DHT: {e:?} for peer {:?}", self.peer_id); - self.queued_put_record_queries.push_back(query); - } - Ok(qid) => { - info!("Success publishing {:?} to DHT", qid); - let query = KadPutQuery { - progress: DHTProgress::InProgress(qid), - ..query - }; - self.in_progress_put_record_queries.insert(qid, query); - } - } + pub fn put_record(&mut self, id: QueryId, query: KadPutQuery) { + self.in_progress_put_record_queries.insert(id, query); } /// Retrieve a value for a key from the DHT. @@ -209,6 +154,7 @@ impl DHTBehaviour { factor: NonZeroUsize, backoff: ExponentialBackoff, retry_count: u8, + kad: &mut KademliaBehaviour, ) { // noop if retry_count == 0 { @@ -216,7 +162,7 @@ impl DHTBehaviour { } // check cache before making the request - if let Some(entry) = self.kadem.store_mut().get(&key.clone().into()) { + if let Some(entry) = kad.store_mut().get(&key.clone().into()) { // exists in cache if chan.send(entry.value.clone()).is_err() { error!("Get DHT: channel closed before get record request result could be sent"); @@ -224,7 +170,7 @@ impl DHTBehaviour { } else { tracing::debug!("DHT cache miss, key: {:?}", key); // doesn't exist in cache, actually propagate request - let qid = self.kadem.get_record(key.clone().into()); + let qid = kad.get_record(key.clone().into()); let query = KadGetQuery { backoff, progress: DHTProgress::InProgress(qid), @@ -238,8 +184,47 @@ impl DHTBehaviour { } } + /// Spawn a task which will retry the query after a backoff. + fn retry_get(&self, mut query: KadGetQuery) { + let Some(tx) = self.retry_tx.clone() else { + return; + }; + let req = ClientRequest::GetDHT { + key: query.key, + notify: query.notify, + retry_count: query.retry_count, + }; + let backoff = query.backoff.next_timeout(false); + art::async_spawn(async move { + art::async_sleep(backoff).await; + let _ = tx.send(req).await; + }); + } + + /// Spawn a task which will retry the query after a backoff. + fn retry_put(&self, mut query: KadPutQuery) { + let Some(tx) = self.retry_tx.clone() else { + return; + }; + let req = ClientRequest::PutDHT { + key: query.key, + value: query.value, + notify: query.notify, + }; + art::async_spawn(async move { + art::async_sleep(query.backoff.next_timeout(false)).await; + let _ = tx.send(req).await; + }); + } + /// update state based on recv-ed get query - fn handle_get_query(&mut self, record_results: GetRecordResult, id: QueryId, mut last: bool) { + fn handle_get_query( + &mut self, + store: &mut MemoryStore, + record_results: GetRecordResult, + id: QueryId, + mut last: bool, + ) { let num = match self.in_progress_get_record_queries.get_mut(&id) { Some(query) => match record_results { Ok(results) => match results { @@ -311,9 +296,7 @@ impl DHTBehaviour { publisher: None, expires: None, }; - if self.kadem.store_mut().put(record).is_err() { - error!("Error putting DHT Get result into Record Store"); - } + let _ = store.put(record); // return value if notify.send(r).is_err() { error!("Get DHT: channel closed before get record request result could be sent"); @@ -330,7 +313,7 @@ impl DHTBehaviour { NonZeroUsize::new(num_replicas.get() + 1).unwrap_or(num_replicas), *MAX_DHT_QUERY_SIZE, ); - self.queued_get_record_queries.push_back(KadGetQuery { + self.retry_get(KadGetQuery { backoff, progress: DHTProgress::NotStarted, notify, @@ -369,27 +352,39 @@ impl DHTBehaviour { e, self.peer_id ); // push back onto the queue - self.queued_put_record_queries.push_back(query); + self.retry_put(query); } } } else { warn!("Put DHT: completed DHT query that is no longer tracked."); } } -} -impl DHTBehaviour { - #![allow(clippy::too_many_lines)] + /// Send that the bootsrap suceeded + fn finsish_bootstrap(&mut self) { + if let Some(mut tx) = self.bootstrap_tx.clone() { + art::async_spawn( + async move { tx.send(bootstrap::InputEvent::BootstrapFinished).await }, + ); + } + } + #[allow(clippy::too_many_lines)] /// handle a DHT event - fn dht_handle_event(&mut self, event: KademliaEvent) { + pub fn dht_handle_event( + &mut self, + event: KademliaEvent, + store: &mut MemoryStore, + ) -> Option { match event { KademliaEvent::OutboundQueryProgressed { result: QueryResult::PutRecord(record_results), id, - step: ProgressStep { last: true, .. }, + step: ProgressStep { last, .. }, .. } => { - self.handle_put_query(record_results, id); + if last { + self.handle_put_query(record_results, id); + } } KademliaEvent::OutboundQueryProgressed { result: QueryResult::GetClosestPeers(r), @@ -425,7 +420,7 @@ impl DHTBehaviour { step: ProgressStep { last, .. }, .. } => { - self.handle_get_query(record_results, id, last); + self.handle_get_query(store, record_results, id, last); } KademliaEvent::OutboundQueryProgressed { result: @@ -438,18 +433,14 @@ impl DHTBehaviour { } => { if num_remaining == 0 { info!("Finished bootstrap for peer {:?}", self.peer_id); - self.bootstrap_state.state = State::NotStarted; - self.event_queue.push(DHTEvent::IsBootstrapped); - // After initial bootstrap succeeds do it every 2 minutes to maintain routing. - self.bootstrap_state.backoff = - ExponentialBackoff::new(1, Duration::from_secs(120)); - self.bootstrap_state.backoff.start_next(true); + self.finsish_bootstrap(); } else { warn!( "Bootstrap in progress: num remaining nodes to ping {:?}", num_remaining ); } + return Some(NetworkEvent::IsBootstrapped); } KademliaEvent::OutboundQueryProgressed { result: QueryResult::Bootstrap(Err(e)), @@ -458,17 +449,11 @@ impl DHTBehaviour { let BootstrapError::Timeout { num_remaining, .. } = e; if num_remaining.is_none() { error!( - "Peer {:?} failed bootstrap with error {:?}. This should not happen and means all bootstrap nodes are down or were evicted from our local DHT. Readding bootstrap nodes {:?}", - self.peer_id, e, self.bootstrap_nodes + "Peer {:?} failed bootstrap with error {:?}. This should not happen and means all bootstrap nodes are down or were evicted from our local DHT.", + self.peer_id, e, ); - for (peer, addrs) in self.bootstrap_nodes.clone() { - for addr in addrs { - self.kadem.add_address(&peer, addr); - } - } } - self.bootstrap_state.state = State::NotStarted; - self.bootstrap_state.backoff.start_next(true); + self.finsish_bootstrap(); } KademliaEvent::RoutablePeer { peer, address: _ } => { info!("on peer {:?} found routable peer {:?}", self.peer_id, peer); @@ -490,13 +475,7 @@ impl DHTBehaviour { bucket_range: _, old_peer: _, } => { - // Trigger a new bootstrap when our table changes, if it's not running - // We do this to refresh our peers when we know routing has changed - // For more info see: https://github.com/libp2p/rust-libp2p/pull/4838 - // TODO: Remove once that pr is in a libp2p release - if self.bootstrap_state.state == State::NotStarted { - self.bootstrap_state.backoff.expire(); - } + info!("Routing table update"); } e @ KademliaEvent::OutboundQueryProgressed { .. } => { info!("Not handling dht event {:?}", e); @@ -505,6 +484,7 @@ impl DHTBehaviour { error!("UNHANDLED NEW SWARM VARIANT: {e:?}"); } } + None } } @@ -550,189 +530,3 @@ pub enum DHTProgress { /// The query has not been started NotStarted, } - -// Diagnostics: -// 1. use of deprecated associated function `libp2p::libp2p_swarm::NetworkBehaviour::inject_event`: Implement `NetworkBehaviour::on_connection_handler_event` instead. The default implementation of this `inject_*` method delegates to it. - -impl NetworkBehaviour for DHTBehaviour { - type ConnectionHandler = - as NetworkBehaviour>::ConnectionHandler; - - type ToSwarm = DHTEvent; - - fn poll( - &mut self, - cx: &mut std::task::Context<'_>, - ) -> Poll>> { - if matches!(self.bootstrap_state.state, State::NotStarted) - && self.bootstrap_state.backoff.is_expired() - && self.begin_bootstrap - { - match self.kadem.bootstrap() { - Ok(_) => { - self.bootstrap_state.state = State::Started; - info!("Starting bootstrap"); - } - Err(e) => { - warn!( - "peer id {:?} FAILED TO START BOOTSTRAP {:?} adding peers {:?}", - self.peer_id, e, self.bootstrap_nodes - ); - for (peer, addrs) in self.bootstrap_nodes.clone() { - for addr in addrs { - self.kadem.add_address(&peer, addr); - } - } - } - } - } - - // retry put/gets if they are ready - for _i in 0..self.queued_get_record_queries.len() { - let Some(req) = self.queued_get_record_queries.pop_front() else { - continue; - }; - if req.backoff.is_expired() { - self.get_record( - req.key, - req.notify, - req.num_replicas, - req.backoff, - req.retry_count, - ); - } else { - self.queued_get_record_queries.push_back(req); - } - } - - for _i in 0..self.queued_put_record_queries.len() { - let Some(req) = self.queued_put_record_queries.pop_front() else { - continue; - }; - if req.backoff.is_expired() { - self.put_record(req); - } else { - self.queued_put_record_queries.push_back(req); - } - } - - // poll behaviour which is a passthrough and call inject event - while let Poll::Ready(ready) = NetworkBehaviour::poll(&mut self.kadem, cx) { - match ready { - ToSwarm::GenerateEvent(e) => { - self.dht_handle_event(e); - } - ToSwarm::Dial { opts } => { - return Poll::Ready(ToSwarm::Dial { opts }); - } - ToSwarm::NotifyHandler { - peer_id, - handler, - event, - } => { - return Poll::Ready(ToSwarm::NotifyHandler { - peer_id, - handler, - event, - }); - } - ToSwarm::CloseConnection { - peer_id, - connection, - } => { - return Poll::Ready(ToSwarm::CloseConnection { - peer_id, - connection, - }); - } - ToSwarm::ListenOn { opts } => { - return Poll::Ready(ToSwarm::ListenOn { opts }); - } - ToSwarm::RemoveListener { id } => { - return Poll::Ready(ToSwarm::RemoveListener { id }); - } - ToSwarm::NewExternalAddrCandidate(c) => { - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(c)); - } - ToSwarm::ExternalAddrConfirmed(c) => { - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(c)); - } - ToSwarm::ExternalAddrExpired(c) => { - return Poll::Ready(ToSwarm::ExternalAddrExpired(c)); - } - e => { - error!("UNHANDLED NEW SWARM VARIANT: {e:?}"); - } - } - } - if !self.event_queue.is_empty() { - return Poll::Ready(ToSwarm::GenerateEvent(self.event_queue.remove(0))); - } - Poll::Pending - } - - fn on_swarm_event(&mut self, event: libp2p::swarm::derive_prelude::FromSwarm<'_>) { - self.kadem.on_swarm_event(event); - } - - fn on_connection_handler_event( - &mut self, - peer_id: PeerId, - connection_id: libp2p::swarm::derive_prelude::ConnectionId, - event: THandlerOutEvent, - ) { - self.kadem - .on_connection_handler_event(peer_id, connection_id, event); - } - - fn handle_pending_inbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, - ) -> Result<(), libp2p::swarm::ConnectionDenied> { - self.kadem - .handle_pending_inbound_connection(connection_id, local_addr, remote_addr) - } - - fn handle_established_inbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - peer: PeerId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, - ) -> Result, libp2p::swarm::ConnectionDenied> { - self.kadem.handle_established_inbound_connection( - connection_id, - peer, - local_addr, - remote_addr, - ) - } - - fn handle_pending_outbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - maybe_peer: Option, - addresses: &[Multiaddr], - effective_role: libp2p::core::Endpoint, - ) -> Result, libp2p::swarm::ConnectionDenied> { - self.kadem.handle_pending_outbound_connection( - connection_id, - maybe_peer, - addresses, - effective_role, - ) - } - - fn handle_established_outbound_connection( - &mut self, - connection_id: libp2p::swarm::ConnectionId, - peer: PeerId, - addr: &Multiaddr, - role_override: libp2p::core::Endpoint, - ) -> Result, libp2p::swarm::ConnectionDenied> { - self.kadem - .handle_established_outbound_connection(connection_id, peer, addr, role_override) - } -} diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index b63c0d4a88..c81a22b734 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -1,10 +1,8 @@ -use std::num::NonZeroUsize; - -use futures::channel::oneshot::Sender; use libp2p::{ autonat, gossipsub::{Behaviour as GossipBehaviour, Event as GossipEvent, IdentTopic}, identify::{Behaviour as IdentifyBehaviour, Event as IdentifyEvent}, + kad::store::MemoryStore, request_response::{cbor, OutboundRequestId, ResponseChannel}, Multiaddr, }; @@ -13,11 +11,7 @@ use libp2p_swarm_derive::NetworkBehaviour; use tracing::{debug, error}; use super::{ - behaviours::{ - dht::{DHTBehaviour, DHTEvent, KadPutQuery}, - exponential_backoff::ExponentialBackoff, - request_response::{Request, Response}, - }, + behaviours::request_response::{Request, Response}, NetworkEventInternal, }; @@ -39,7 +33,7 @@ pub struct NetworkDef { /// purpose: peer routing /// purpose: storing pub key <-> peer id bijection #[debug(skip)] - pub dht: DHTBehaviour, + pub dht: libp2p::kad::Behaviour, /// purpose: identifying the addresses from an outside POV #[debug(skip)] @@ -64,7 +58,7 @@ impl NetworkDef { #[must_use] pub fn new( gossipsub: GossipBehaviour, - dht: DHTBehaviour, + dht: libp2p::kad::Behaviour, identify: IdentifyBehaviour, direct_message: cbor::Behaviour, Vec>, request_response: cbor::Behaviour, @@ -118,36 +112,6 @@ impl NetworkDef { } } -/// DHT functions -impl NetworkDef { - /// Publish a key/value to the kv store. - /// Once replicated upon all nodes, the caller is notified over - /// `chan`. If there is an error, a [`super::error::DHTError`] is - /// sent instead. - pub fn put_record(&mut self, query: KadPutQuery) { - self.dht.put_record(query); - } - - /// Retrieve a value for a key from the DHT. - /// Value (serialized) is sent over `chan`, and if a value is not found, - /// a [`super::error::DHTError`] is sent instead. - pub fn get_record( - &mut self, - key: Vec, - chan: Sender>, - factor: NonZeroUsize, - retry_count: u8, - ) { - self.dht.get_record( - key, - chan, - factor, - ExponentialBackoff::default(), - retry_count, - ); - } -} - /// Request/response functions impl NetworkDef { /// Add a direct request for a given peer @@ -167,8 +131,8 @@ impl From for NetworkEventInternal { } } -impl From for NetworkEventInternal { - fn from(event: DHTEvent) -> Self { +impl From for NetworkEventInternal { + fn from(event: libp2p::kad::Event) -> Self { Self::DHTEvent(event) } } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index fd9f8147f1..ad148bfddc 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -32,10 +32,7 @@ use quic::tokio::Transport as QuicTransport; use serde::{Deserialize, Serialize}; use tracing::instrument; -use self::behaviours::{ - dht::DHTEvent, - request_response::{Request, Response}, -}; +use self::behaviours::request_response::{Request, Response}; pub use self::{ def::NetworkDef, error::NetworkError, @@ -180,7 +177,7 @@ pub enum NetworkEvent { /// only used for event processing before relaying to client pub enum NetworkEventInternal { /// a DHT event - DHTEvent(DHTEvent), + DHTEvent(libp2p::kad::Event), /// a identify event. Is boxed because this event is much larger than the other ones so we want /// to store it on the heap. IdentifyEvent(Box), diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 9d16a1bc8e..2ffd868161 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -16,7 +16,7 @@ use async_compatibility_layer::{ art::async_spawn, channel::{unbounded, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, }; -use futures::{select, FutureExt, StreamExt}; +use futures::{channel::mpsc, select, FutureExt, SinkExt, StreamExt}; use hotshot_types::constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; use libp2p::{ autonat, @@ -30,7 +30,7 @@ use libp2p::{ Info as IdentifyInfo, }, identity::Keypair, - kad::{store::MemoryStore, Behaviour, Config}, + kad::{store::MemoryStore, Behaviour, Config, Mode, Record}, request_response::{ Behaviour as RequestResponse, Config as RequestResponseConfig, ProtocolSupport, }, @@ -52,12 +52,13 @@ pub use self::{ }, }; use super::{ + behaviours::dht::bootstrap::{self, DHTBootstrapTask, InputEvent}, error::{GossipsubBuildSnafu, GossipsubConfigSnafu, NetworkError, TransportSnafu}, gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkEvent, NetworkEventInternal, NetworkNodeType, }; use crate::network::behaviours::{ - dht::{DHTBehaviour, DHTEvent, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, + dht::{DHTBehaviour, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, direct_message::{DMBehaviour, DMRequest}, exponential_backoff::ExponentialBackoff, request_response::{Request, RequestResponseState, Response}, @@ -90,8 +91,12 @@ pub struct NetworkNode { request_response_state: RequestResponseState, /// Handler for direct messages direct_message_state: DMBehaviour, + /// Handler for DHT Events + dht_handler: DHTBehaviour, /// Channel to resend requests, set to Some when we call `spawn_listeners` resend_tx: Option>, + /// Send to the bootstrap task to tell it to start a bootstrap + bootstrap_tx: Option>, } impl NetworkNode { @@ -141,7 +146,6 @@ impl NetworkNode { bs_nodes.insert(*peer_id, iter::once(addr.clone()).collect()); } } - behaviour.dht.add_bootstrap_nodes(bs_nodes); } /// Creates a new `Network` with the given settings. @@ -264,7 +268,10 @@ impl NetworkNode { panic!("Replication factor not set"); } - let kadem = Behaviour::with_config(peer_id, MemoryStore::new(peer_id), kconfig); + let mut kadem = Behaviour::with_config(peer_id, MemoryStore::new(peer_id), kconfig); + if config.server_mode { + kadem.set_mode(Some(Mode::Server)); + } let rrconfig = RequestResponseConfig::default(); @@ -294,13 +301,7 @@ impl NetworkNode { let network = NetworkDef::new( gossipsub, - DHTBehaviour::new( - kadem, - peer_id, - config - .replication_factor - .unwrap_or_else(|| NonZeroUsize::new(4).unwrap()), - ), + kadem, identify, direct_message, request_response, @@ -331,14 +332,48 @@ impl NetworkNode { identity, peer_id, swarm, - config, + config: config.clone(), listener_id: None, request_response_state: RequestResponseState::default(), direct_message_state: DMBehaviour::default(), + dht_handler: DHTBehaviour::new( + peer_id, + config + .replication_factor + .unwrap_or(NonZeroUsize::new(4).unwrap()), + ), resend_tx: None, + bootstrap_tx: None, }) } + /// Publish a key/value to the kv store. + /// Once replicated upon all nodes, the caller is notified over + /// `chan`. If there is an error, a [`super::error::DHTError`] is + /// sent instead. + pub fn put_record(&mut self, mut query: KadPutQuery) { + let record = Record::new(query.key.clone(), query.value.clone()); + match self.swarm.behaviour_mut().dht.put_record( + record, + libp2p::kad::Quorum::N(self.dht_handler.get_replication_factor()), + ) { + Err(e) => { + // failed try again later + query.progress = DHTProgress::NotStarted; + query.backoff.start_next(false); + error!("Error publishing to DHT: {e:?} for peer {:?}", self.peer_id); + } + Ok(qid) => { + info!("Success publishing {:?} to DHT", qid); + let query = KadPutQuery { + progress: DHTProgress::InProgress(qid), + ..query + }; + self.dht_handler.put_record(qid, query); + } + } + } + /// event handler for client events /// currectly supported actions include /// - shutting down the swarm @@ -357,13 +392,18 @@ impl NetworkNode { Ok(msg) => { match msg { ClientRequest::BeginBootstrap => { - self.swarm.behaviour_mut().dht.begin_bootstrap(); + debug!("begin bootstrap"); + let _ = self.swarm.behaviour_mut().dht.bootstrap(); } ClientRequest::LookupPeer(pid, chan) => { - self.swarm.behaviour_mut().dht.lookup_peer(pid, chan); + let id = self.swarm.behaviour_mut().dht.get_closest_peers(pid); + self.dht_handler + .in_progress_get_closest_peers + .insert(id, chan); } ClientRequest::GetRoutingTable(chan) => { - self.swarm.behaviour_mut().dht.print_routing_table(); + self.dht_handler + .print_routing_table(&mut self.swarm.behaviour_mut().dht); if chan.send(()).is_err() { warn!("Tried to notify client but client not tracking anymore"); } @@ -376,7 +416,7 @@ impl NetworkNode { value, backoff: ExponentialBackoff::default(), }; - self.swarm.behaviour_mut().put_record(query); + self.put_record(query); } ClientRequest::GetConnectedPeerNum(s) => { if s.send(self.num_connected()).is_err() { @@ -393,11 +433,13 @@ impl NetworkNode { notify, retry_count, } => { - self.swarm.behaviour_mut().get_record( + self.dht_handler.get_record( key, notify, NonZeroUsize::new(NUM_REPLICATED_TO_TRUST).unwrap(), + ExponentialBackoff::default(), retry_count, + &mut self.swarm.behaviour_mut().dht, ); } ClientRequest::IgnorePeers(_peers) => { @@ -558,9 +600,9 @@ impl NetworkNode { } => {} SwarmEvent::Behaviour(b) => { let maybe_event = match b { - NetworkEventInternal::DHTEvent(e) => match e { - DHTEvent::IsBootstrapped => Some(NetworkEvent::IsBootstrapped), - }, + NetworkEventInternal::DHTEvent(e) => self + .dht_handler + .dht_handle_event(e, self.swarm.behaviour_mut().dht.store_mut()), NetworkEventInternal::IdentifyEvent(e) => { // NOTE feed identified peers into kademlia's routing table for peer discovery. if let IdentifyEvent::Received { @@ -583,7 +625,7 @@ impl NetworkNode { // with autonat info!( "local peer {:?} IDENTIFY ADDRS LISTEN: {:?} for peer {:?}, ADDRS OBSERVED: {:?} ", - behaviour.dht.peer_id, peer_id, listen_addrs, observed_addr + self.dht_handler.peer_id , peer_id, listen_addrs, observed_addr ); // into hashset to delete duplicates (I checked: there are duplicates) for addr in listen_addrs.iter().collect::>() { @@ -702,9 +744,11 @@ impl NetworkNode { > { let (s_input, s_output) = unbounded::(); let (r_input, r_output) = unbounded::(); - + let (mut bootstrap_tx, bootstrap_rx) = mpsc::channel(100); self.resend_tx = Some(s_input.clone()); + self.dht_handler.set_bootstrap_sender(bootstrap_tx.clone()); + DHTBootstrapTask::run(bootstrap_rx, s_input.clone()); async_spawn( async move { let mut fuse = s_output.recv().boxed().fuse(); @@ -721,6 +765,7 @@ impl NetworkNode { debug!("peerid {:?}\t\thandling msg {:?}", self.peer_id, msg); let shutdown = self.handle_client_requests(msg).await?; if shutdown { + let _ = bootstrap_tx.send(InputEvent::ShutdownBootstrap).await; break } fuse = s_output.recv().boxed().fuse(); diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 183b58d0ef..147da7f363 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -38,6 +38,9 @@ pub struct NetworkNodeConfig { /// expiratiry for records in DHT #[builder(default)] pub ttl: Option, + /// whether to start in libp2p::kad::Mode::Server mode + #[builder(default = "false")] + pub server_mode: bool, } /// NOTE: `mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high` diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index 741cd20060..8ff0dcfb92 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -200,7 +200,8 @@ pub async fn spin_up_swarms( .to_connect_addrs(HashSet::default()) .bound_addr(Some(addr)) .ttl(None) - .republication_interval(None); + .republication_interval(None) + .server_mode(true); let config = config .build() .context(NodeConfigSnafu) @@ -237,6 +238,7 @@ pub async fn spin_up_swarms( .replication_factor(replication_factor) .bound_addr(Some(addr.clone())) .to_connect_addrs(HashSet::default()) + .server_mode(true) .build() .context(NodeConfigSnafu) .context(HandleSnafu)?; diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 9362b1a14c..dce2b74f86 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -66,6 +66,7 @@ mesh_n_low = 4 mesh_outbound_min = 2 mesh_n = 4 online_time = 10 +server_mode = true [web_server_config] url = "http://localhost:9000" diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index f27aad427a..ad22fa25af 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -53,6 +53,8 @@ pub struct Libp2pConfig { pub online_time: u64, /// number of transactions per view pub num_txn_per_round: usize, + /// whether to start in libp2p::kad::Mode::Server mode + pub server_mode: bool, } /// configuration serialized into a file @@ -76,6 +78,8 @@ pub struct Libp2pConfigFile { pub mesh_n: usize, /// time node has been running pub online_time: u64, + /// whether to start in libp2p::kad::Mode::Server mode + pub server_mode: bool, } /// configuration for a web server @@ -481,6 +485,7 @@ impl From> for NetworkC propose_max_round_time: val.config.propose_max_round_time, online_time: libp2p_config.online_time, num_txn_per_round: val.transactions_per_round, + server_mode: libp2p_config.server_mode, }), config: val.config.into(), key_type_name: std::any::type_name::().to_string(), From 011fb0b7d9b4daca092d69028485339dc3ed4903 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 10 Apr 2024 10:40:24 -0400 Subject: [PATCH 0938/1393] Remove special treatment of view 1 (#2873) --- hotshot/src/lib.rs | 6 +- task-impls/src/consensus.rs | 99 ++++++++++++-------------------- task-impls/src/quorum_vote.rs | 42 ++++---------- testing/src/task_helpers.rs | 2 +- testing/src/test_runner.rs | 2 +- testing/src/view_generator.rs | 3 +- testing/tests/tests_1/message.rs | 1 - types/src/data.rs | 52 +++++++++++++---- types/src/simple_certificate.rs | 35 ++--------- 9 files changed, 99 insertions(+), 143 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index ee660ea325..4efc247e87 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -281,7 +281,7 @@ impl> SystemContext { self.internal_event_stream .0 .broadcast_direct(Arc::new(HotShotEvent::QCFormed(either::Left( - QuorumCertificate::genesis(), + self.consensus.read().await.high_qc.clone(), )))) .await .expect("Genesis Broadcast failed"); @@ -684,13 +684,13 @@ impl HotShotInitializer { let (validated_state, state_delta) = TYPES::ValidatedState::genesis(&instance_state); Ok(Self { inner: Leaf::genesis(&instance_state), - instance_state, validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::Time::new(0), - high_qc: QuorumCertificate::genesis(), + high_qc: QuorumCertificate::genesis(&instance_state), undecided_leafs: Vec::new(), undecided_state: BTreeMap::new(), + instance_state, }) } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 21e4195031..e66fe71e61 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -90,11 +90,13 @@ async fn validate_proposal( let state = Arc::new(validated_state); let delta = Arc::new(state_delta); - let parent_commitment = parent_leaf.commit(); let view = proposal.data.get_view_number(); - let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); - proposed_leaf.set_parent_commitment(parent_commitment); + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + ensure!( + proposed_leaf.get_parent_commitment() == parent_leaf.commit(), + "Proposed leaf does not extend the parent leaf." + ); // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment // @@ -238,8 +240,10 @@ async fn create_and_send_proposal( upgrade_certificate: upgrade_cert, }; - let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal); - proposed_leaf.set_parent_commitment(parent_leaf.commit()); + let proposed_leaf = Leaf::from_quorum_proposal(&proposal); + if proposed_leaf.get_parent_commitment() != parent_leaf.commit() { + return; + } let Ok(signature) = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref()) else { @@ -412,14 +416,10 @@ impl, A: ConsensusApi + let view = cert.view_number; // TODO: do some of this logic without the vote token check, only do that when voting. let justify_qc = proposal.justify_qc.clone(); - let parent = if justify_qc.is_genesis { - Some(Leaf::genesis(&consensus.instance_state)) - } else { - consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - }; + let parent = consensus + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned(); // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { @@ -432,15 +432,15 @@ impl, A: ConsensusApi + }; let parent_commitment = parent.commit(); - let mut proposed_leaf = Leaf::from_quorum_proposal(proposal); - proposed_leaf.set_parent_commitment(parent_commitment); + let proposed_leaf = Leaf::from_quorum_proposal(proposal); + if proposed_leaf.get_parent_commitment() != parent_commitment { + return false; + } // Validate the DAC. let message = if cert.is_valid_cert(self.committee_membership.as_ref()) { // Validate the block payload commitment for non-genesis DAC. - if !cert.is_genesis - && cert.get_data().payload_commit - != proposal.block_header.payload_commitment() + if cert.get_data().payload_commit != proposal.block_header.payload_commitment() { error!("Block payload commitment does not equal da cert payload commitment. View = {}", *view); return false; @@ -712,48 +712,22 @@ impl, A: ConsensusApi + let consensus = self.consensus.upgradable_read().await; // Get the parent leaf and state. - let parent = if justify_qc.is_genesis { - // Send the `Decide` event for the genesis block if the justify QC is genesis. - let leaf = Leaf::genesis(&consensus.instance_state); - let (validated_state, state_delta) = - TYPES::ValidatedState::genesis(&consensus.instance_state); - let state = Arc::new(validated_state); - broadcast_event( - Event { - view_number: TYPES::Time::genesis(), - event: EventType::Decide { - leaf_chain: Arc::new(vec![LeafInfo::new( - leaf.clone(), - state.clone(), - Some(Arc::new(state_delta)), - None, - )]), - qc: Arc::new(justify_qc.clone()), - block_size: None, - }, - }, - &self.output_event_stream, - ) - .await; - Some((leaf, state)) - } else { - match consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - { - Some(leaf) => { - if let (Some(state), _) = - consensus.get_state_and_delta(leaf.get_view_number()) - { - Some((leaf, state.clone())) - } else { - error!("Parent state not found! Consensus internally inconsistent"); - return; - } + let parent = match consensus + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned() + { + Some(leaf) => { + if let (Some(state), _) = + consensus.get_state_and_delta(leaf.get_view_number()) + { + Some((leaf, state.clone())) + } else { + error!("Parent state not found! Consensus internally inconsistent"); + return; } - None => None, } + None => None, }; if justify_qc.get_view_number() > consensus.high_qc.view_number { @@ -782,7 +756,7 @@ impl, A: ConsensusApi + "Proposal's parent missing from storage with commitment: {:?}", justify_qc.get_data().leaf_commit ); - let leaf = Leaf::from_quorum_proposal(&proposal.data); + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); let state = Arc::new( >::from_header( @@ -794,13 +768,15 @@ impl, A: ConsensusApi + view, View { view_inner: ViewInner::Leaf { - leaf: leaf.commit(), + leaf: proposed_leaf.commit(), state, delta: None, }, }, ); - consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); + consensus + .saved_leaves + .insert(proposed_leaf.commit(), proposed_leaf.clone()); if let Err(e) = self .storage @@ -1596,6 +1572,7 @@ impl, A: ConsensusApi + .as_ref() .filter(|cert| cert.is_valid_for_view(&view)) .cloned(); + let pub_key = self.public_key.clone(); let priv_key = self.private_key.clone(); let consensus = self.consensus.clone(); diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 1bfa6e3ce6..e6e66ca388 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -13,7 +13,7 @@ use hotshot_task::{ use hotshot_types::{ consensus::Consensus, data::Leaf, - event::{Event, EventType, LeafInfo}, + event::{Event, EventType}, message::GeneralConsensusMessage, simple_vote::{QuorumData, QuorumVote}, traits::{ @@ -77,14 +77,16 @@ impl HandleDepOutput for VoteDependencyHandle { payload_commitment = Some(proposal_payload_comm); } let parent_commitment = parent_leaf.commit(); - let mut proposed_leaf = Leaf::from_quorum_proposal(proposal); - proposed_leaf.set_parent_commitment(parent_commitment); + let proposed_leaf = Leaf::from_quorum_proposal(proposal); + if proposed_leaf.get_parent_commitment() != parent_commitment { + return; + } leaf = Some(proposed_leaf); } HotShotEvent::DACertificateValidated(cert) => { let cert_payload_comm = cert.get_data().payload_commit; if let Some(comm) = payload_commitment { - if !cert.is_genesis && cert_payload_comm != comm { + if cert_payload_comm != comm { error!("DAC has inconsistent payload commitment with quorum proposal or VID."); return; } @@ -317,31 +319,7 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState { block_header: block_header.clone(), view_number: ViewNumber::new(1), - justify_qc: QuorumCertificate::genesis(), + justify_qc: QuorumCertificate::genesis(&TestInstanceState {}), upgrade_certificate: None, proposal_certificate: None, }; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 14a1bff776..b4867629b8 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -219,7 +219,7 @@ where latest_view: None, changes, last_decided_leaf: Leaf::genesis(&TestInstanceState {}), - high_qc: QuorumCertificate::genesis(), + high_qc: QuorumCertificate::genesis(&TestInstanceState {}), }; let spinning_task = TestTask::, SpinningTask>::new( Task::new(tx.clone(), rx.clone(), reg.clone(), spinning_task_state), diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 2d6004b173..02a00642d1 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -85,7 +85,7 @@ impl TestView { let quorum_proposal_inner = QuorumProposal:: { block_header: block_header.clone(), view_number: genesis_view, - justify_qc: QuorumCertificate::genesis(), + justify_qc: QuorumCertificate::genesis(&TestInstanceState {}), upgrade_certificate: None, proposal_certificate: None, }; @@ -112,7 +112,6 @@ impl TestView { leaf.fill_block_payload_unchecked(TestBlockPayload { transactions: transactions.clone(), }); - leaf.set_parent_commitment(Leaf::genesis(&TestInstanceState {}).commit()); let signature = ::sign(&private_key, leaf.commit().as_ref()) .expect("Failed to sign leaf commitment!"); diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index 6dd7ae1c29..b531218b0d 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -42,7 +42,6 @@ fn version_number_at_start_of_serialization() { vote_commitment: data.commit(), view_number, signatures: None, - is_genesis: false, _pd: PhantomData, }; let message = Message { diff --git a/types/src/data.rs b/types/src/data.rs index 17ced1f262..eb64df0f70 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -13,7 +13,7 @@ use std::{ use anyhow::{ensure, Result}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::Options; -use committable::{Commitment, Committable, RawCommitmentBuilder}; +use committable::{Commitment, CommitmentBoundsArkless, Committable, RawCommitmentBuilder}; use derivative::Derivative; use jf_primitives::vid::VidDisperse as JfVidDisperse; use rand::Rng; @@ -26,7 +26,7 @@ use crate::{ simple_certificate::{ QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, }, - simple_vote::UpgradeProposalData, + simple_vote::{QuorumData, UpgradeProposalData}, traits::{ block_contents::{ vid_commitment, BlockHeader, TestableBlock, GENESIS_VID_NUM_STORAGE_NODES, @@ -429,6 +429,24 @@ impl Display for Leaf { } } +impl QuorumCertificate { + #[must_use] + /// Creat the Genesis certificate + pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { + let data = QuorumData { + leaf_commit: Leaf::genesis(instance_state).commit(), + }; + let commit = data.commit(); + Self { + data, + vote_commitment: commit, + view_number: ::genesis(), + signatures: None, + _pd: PhantomData, + } + } +} + impl Leaf { /// Create a new leaf from its components. /// @@ -446,10 +464,23 @@ impl Leaf { let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); let block_header = TYPES::BlockHeader::genesis(instance_state, payload_commitment, metadata); + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::>::default_commitment_no_preimage(), + }; + + let justify_qc = QuorumCertificate { + data: null_quorum_data.clone(), + vote_commitment: null_quorum_data.commit(), + view_number: ::genesis(), + signatures: None, + _pd: PhantomData, + }; + Self { view_number: TYPES::Time::genesis(), - justify_qc: QuorumCertificate::::genesis(), - parent_commitment: fake_commitment(), + justify_qc, + parent_commitment: null_quorum_data.leaf_commit, upgrade_certificate: None, block_header: block_header.clone(), block_payload: Some(payload), @@ -478,11 +509,7 @@ impl Leaf { pub fn get_parent_commitment(&self) -> Commitment { self.parent_commitment } - /// Commitment to this leaf's parent. - pub fn set_parent_commitment(&mut self, commitment: Commitment) { - self.parent_commitment = commitment; - } - /// Get a reference to the block header contained in this leaf. + /// The block header contained in this leaf. pub fn get_block_header(&self) -> &::BlockHeader { &self.block_header } @@ -548,9 +575,10 @@ impl Leaf { self.get_upgrade_certificate(), parent.get_upgrade_certificate(), ) { - // If the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade. Again, this is always fine. - // But, if we have no upgrade certificate on either is the most common case, and is always fine. - (Some(_) | None, None) => {} + // Easiest cases are: + // - no upgrade certificate on either: this is the most common case, and is always fine. + // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. + (None | Some(_), None) => {} // If we no longer see a cert, we have to make sure that we either: // - no longer care because we have passed new_version_first_view, or // - no longer care because we have passed `decide_by` without deciding the certificate. diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 5fd4ff04a1..fe18f9d599 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -7,12 +7,12 @@ use std::{ }; use anyhow::{ensure, Result}; -use committable::{Commitment, CommitmentBoundsArkless, Committable}; +use committable::{Commitment, Committable}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; use crate::{ - data::{serialize_signature2, Leaf}, + data::serialize_signature2, simple_vote::{ DAData, QuorumData, TimeoutData, UpgradeProposalData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, @@ -72,8 +72,6 @@ pub struct SimpleCertificate::QCType>, - /// If this QC is for the genesis block - pub is_genesis: bool, /// phantom data for `THRESHOLD` and `TYPES` pub _pd: PhantomData<(TYPES, THRESHOLD)>, } @@ -91,7 +89,6 @@ impl> vote_commitment, view_number: view, signatures: Some(sig), - is_genesis: false, _pd: PhantomData, } } fn is_valid_cert>(&self, membership: &MEMBERSHIP) -> bool { - if self.is_genesis && self.view_number == TYPES::Time::genesis() { + if self.view_number == TYPES::Time::genesis() { return true; } let real_qc_pp = ::get_public_parameter( @@ -151,30 +147,7 @@ impl> } impl Display for QuorumCertificate { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!( - f, - "view: {:?}, is_genesis: {:?}", - self.view_number, self.is_genesis - ) - } -} - -impl QuorumCertificate { - #[must_use] - /// Creat the Genisis certificate - pub fn genesis() -> Self { - let data = QuorumData { - leaf_commit: Commitment::>::default_commitment_no_preimage(), - }; - let commit = data.commit(); - Self { - data, - vote_commitment: commit, - view_number: ::genesis(), - signatures: None, - is_genesis: true, - _pd: PhantomData, - } + write!(f, "view: {:?}", self.view_number) } } From d3e3c2e89d5ab0ca50086d396f2fc66b80b26c6b Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 11 Apr 2024 09:53:27 +0300 Subject: [PATCH 0939/1393] [CX_CLEANUP] - Integrate QuorumProposalTask with other tasks (#2927) * gated, fully clone the behavior * add propose function, not integrated yet * finalize the proposal send functionality, test next * fix simple failing bugs, debugging main test next * checkpoint * swap view number * quick merge * all success test cases passing * re-add feature gates * remove extra log * tests now passing * satisfy clippy * more clippy fixes * some cleanup * fix compilation * let consensus do what it needs to do * fix build * feature gate quorum proposal recv * fix view number checks * revert --- hotshot/Cargo.toml | 1 + hotshot/src/lib.rs | 29 +- hotshot/src/tasks/task_state.rs | 8 +- task-impls/Cargo.toml | 2 +- task-impls/src/consensus.rs | 17 +- task-impls/src/da.rs | 1 - task-impls/src/events.rs | 6 - task-impls/src/quorum_proposal.rs | 637 +++++++++++++++--- testing/Cargo.toml | 1 + testing/tests/tests_1/quorum_proposal_task.rs | 66 +- 10 files changed, 631 insertions(+), 137 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index d1235f3be9..c8e6a18db0 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -11,6 +11,7 @@ rust-version = { workspace = true } default = ["docs", "doc-images"] example-upgrade = ["hotshot-task-impls/example-upgrade"] gpu-vid = ["hotshot-task-impls/gpu-vid"] +proposal-task = ["hotshot-task-impls/proposal-task"] # Features required for binaries bin-orchestrator = ["clap"] diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 4efc247e87..dc1924c124 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -12,6 +12,18 @@ pub mod types; pub mod tasks; +#[cfg(feature = "proposal-task")] +use crate::tasks::add_quorum_proposal_task; + +use crate::{ + tasks::{ + add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, + add_transaction_task, add_upgrade_task, add_view_sync_task, + }, + traits::NodeImplementation, + types::{Event, SystemContextHandle}, +}; + use std::{ collections::{BTreeMap, HashMap}, marker::PhantomData, @@ -57,15 +69,6 @@ use tasks::{add_request_network_task, add_response_task, add_vid_task}; use tracing::{debug, instrument, trace}; use vbs::version::Version; -use crate::{ - tasks::{ - add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, - add_transaction_task, add_upgrade_task, add_view_sync_task, - }, - traits::NodeImplementation, - types::{Event, SystemContextHandle}, -}; - /// Length, in bytes, of a 512 bit hash pub const H_512: usize = 64; /// Length, in bytes, of a 256 bit hash @@ -602,6 +605,14 @@ impl> SystemContext { &handle, ) .await; + #[cfg(feature = "proposal-task")] + add_quorum_proposal_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, + ) + .await; handle } } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 218f2feae0..4af385d152 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -190,7 +190,6 @@ impl> CreateTaskState cur_view: handle.get_cur_view().await, payload_commitment_and_metadata: None, api: handle.clone(), - _pd: PhantomData, vote_collector: None.into(), timeout_vote_collector: None.into(), timeout_task: None, @@ -255,6 +254,13 @@ impl> CreateTaskState consensus, timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + cur_view: handle.get_cur_view().await, + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + storage: handle.storage.clone(), + timeout: handle.hotshot.config.next_view_timeout, + timeout_task: None, + round_start_delay: handle.hotshot.config.round_start_delay, id: handle.hotshot.id, } } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 2994a7308b..7aa3237c99 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -8,7 +8,7 @@ version = { workspace = true } [features] example-upgrade = [] gpu-vid = ["hotshot-types/gpu-vid"] - +proposal-task = [] [dependencies] anyhow = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index e66fe71e61..d689909306 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -66,7 +66,7 @@ type VoteCollectorOption = Option( +pub(crate) async fn validate_proposal( proposal: Proposal>, parent_leaf: Leaf, consensus: Arc>>, @@ -312,9 +312,6 @@ pub struct ConsensusTaskState< /// Consensus api pub api: A, - /// needed to typecheck - pub _pd: PhantomData, - /// Current Vote collection task, with it's view. pub vote_collector: RwLock, QuorumCertificate>>, @@ -491,7 +488,6 @@ impl, A: ConsensusApi + /// Must only update the view and GC if the view actually changes #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus update view", level = "error")] - async fn update_view( &mut self, new_view: TYPES::Time, @@ -620,6 +616,7 @@ impl, A: ConsensusApi + event_stream: Sender>>, ) { match event.as_ref() { + #[cfg(not(feature = "proposal-task"))] HotShotEvent::QuorumProposalRecv(proposal, sender) => { let sender = sender.clone(); debug!( @@ -1425,8 +1422,18 @@ impl, A: ConsensusApi + } } + /// Ignores old propose behavior and lets QuorumProposalTask take over. + #[cfg(feature = "proposal-task")] + pub async fn publish_proposal_if_able( + &mut self, + _view: TYPES::Time, + _event_stream: &Sender>>, + ) { + } + /// Sends a proposal if possible from the high qc we have #[allow(clippy::too_many_lines)] + #[cfg(not(feature = "proposal-task"))] pub async fn publish_proposal_if_able( &mut self, view: TYPES::Time, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index a43f452b89..2fc51f7a46 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -318,7 +318,6 @@ impl, A: ConsensusApi + // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? view_number: view, }; - debug!("Sending DA proposal for view {:?}", data.view_number); let message = Proposal { data, diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 1ef2684c51..dc67932082 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -139,10 +139,4 @@ pub enum HotShotEvent { UpgradeCertificateFormed(UpgradeCertificate), /// HotShot was upgraded, with a new network version. VersionUpgrade(Version), - /** Quorum Proposal Task **/ - /// Dummy quorum proposal to test if the quorum proposal dependency task works. - DummyQuorumProposalSend(TYPES::Time), - /// All required dependencies of the quorum proposal have been validated and the task is ready - /// to propose. - QuorumProposalDependenciesValidated(TYPES::Time), } diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index e5dbe47d6a..0ff4dbcfff 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -1,10 +1,12 @@ -use std::{collections::HashMap, sync::Arc}; +use crate::{consensus::validate_proposal, helpers::AnyhowTracing}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_lock::{RwLock, RwLockUpgradableReadGuard}; +use committable::Committable; use either::Either; +use futures::future::FutureExt; use hotshot_task::{ dependency::{AndDependency, EventDependency, OrDependency}, dependency_task::{DependencyTask, HandleDepOutput}, @@ -12,20 +14,28 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::Consensus, - data::QuorumProposal, - event::Event, + constants::LOOK_AHEAD, + data::{Leaf, QuorumProposal}, + event::{Event, EventType, LeafInfo}, message::Proposal, - simple_certificate::ViewSyncFinalizeCertificate2, + simple_certificate::UpgradeCertificate, traits::{ block_contents::BlockHeader, + election::Membership, network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + signature_key::SignatureKey, + states::ValidatedState, + storage::Storage, }, - vote::HasViewNumber, + vote::{Certificate, HasViewNumber}, }; + +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, info, instrument, warn}; use crate::{ consensus::CommitmentAndMetadata, @@ -33,24 +43,6 @@ use crate::{ helpers::{broadcast_event, cancel_task}, }; -/// Validate a quorum proposal. -#[allow(clippy::needless_pass_by_value)] -fn validate_quorum_proposal( - _quorum_proposal: Proposal>, - _event_sender: Sender>>, -) -> bool { - true -} - -/// Validate a view sync cert or a timeout cert. -#[allow(clippy::needless_pass_by_value)] -fn validate_view_sync_finalize_certificate( - _certificate: ViewSyncFinalizeCertificate2, - _event_sender: Sender>>, -) -> bool { - true -} - /// Proposal dependency types. These types represent events that precipitate a proposal. #[derive(PartialEq, Debug)] enum ProposalDependency { @@ -79,8 +71,160 @@ struct ProposalDependencyHandle { sender: Sender>>, /// Reference to consensus. The replica will require a write lock on this. - #[allow(dead_code)] consensus: Arc>>, + + /// Output events to application + #[allow(dead_code)] + output_event_stream: async_broadcast::Sender>, + + /// Membership for Timeout votes/certs + #[allow(dead_code)] + timeout_membership: Arc, + + /// Membership for Quorum Certs/votes + quorum_membership: Arc, + + /// Our public key + public_key: TYPES::SignatureKey, + + /// Our Private Key + private_key: ::PrivateKey, + + /// View timeout from config. + #[allow(dead_code)] + timeout: u64, + + /// Round start delay from config, in milliseconds. + round_start_delay: u64, + + /// The node's id + #[allow(dead_code)] + id: u64, +} + +impl ProposalDependencyHandle { + /// Sends a proposal if possible from the high qc we have + #[allow(clippy::too_many_lines)] + pub async fn publish_proposal_if_able( + &self, + view: TYPES::Time, + event_stream: &Sender>>, + commit_and_metadata: CommitmentAndMetadata, + ) -> bool { + if self.quorum_membership.get_leader(view) != self.public_key { + // This is expected for view 1, so skipping the logging. + if view != TYPES::Time::new(1) { + error!( + "Somehow we formed a QC but are not the leader for the next view {:?}", + view + ); + } + return false; + } + + let consensus = self.consensus.read().await; + let parent_view_number = &consensus.high_qc.get_view_number(); + let mut reached_decided = false; + + let Some(parent_view) = consensus.validated_state_map.get(parent_view_number) else { + // This should have been added by the replica? + error!("Couldn't find parent view in state map, waiting for replica to see proposal\n parent view number: {}", **parent_view_number); + return false; + }; + // Leaf hash in view inner does not match high qc hash - Why? + let Some((leaf_commitment, state)) = parent_view.get_leaf_and_state() else { + error!( + ?parent_view_number, + ?parent_view, + "Parent of high QC points to a view without a proposal" + ); + return false; + }; + if leaf_commitment != consensus.high_qc.get_data().leaf_commit { + // NOTE: This happens on the genesis block + debug!( + "They don't equal: {:?} {:?}", + leaf_commitment, + consensus.high_qc.get_data().leaf_commit + ); + } + let Some(leaf) = consensus.saved_leaves.get(&leaf_commitment) else { + error!("Failed to find high QC of parent."); + return false; + }; + if leaf.get_view_number() == consensus.last_decided_view { + reached_decided = true; + } + + let parent_leaf = leaf.clone(); + + let original_parent_hash = parent_leaf.commit(); + + let mut next_parent_hash = original_parent_hash; + + // Walk back until we find a decide + if !reached_decided { + debug!( + "We have not reached decide from view {:?}", + self.view_number + ); + while let Some(next_parent_leaf) = consensus.saved_leaves.get(&next_parent_hash) { + if next_parent_leaf.get_view_number() <= consensus.last_decided_view { + break; + } + next_parent_hash = next_parent_leaf.get_parent_commitment(); + } + debug!("updated saved leaves"); + // TODO do some sort of sanity check on the view number that it matches decided + } + + // Special case: if we have a decided upgrade certificate AND it does not apply a version to the current view, we MUST propose with a null block. + let block_header = TYPES::BlockHeader::new( + state, + &consensus.instance_state, + &parent_leaf, + commit_and_metadata.commitment, + commit_and_metadata.metadata.clone(), + ) + .await; + + // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. + let proposal = QuorumProposal { + block_header: block_header.clone(), + view_number: view, + justify_qc: consensus.high_qc.clone(), + proposal_certificate: None, + upgrade_certificate: None, + }; + + let mut new_leaf = Leaf::from_quorum_proposal(&proposal); + new_leaf.set_parent_commitment(parent_leaf.commit()); + + let Ok(signature) = + TYPES::SignatureKey::sign(&self.private_key, new_leaf.commit().as_ref()) + else { + error!("Failed to sign new_leaf.commit()!"); + return false; + }; + + let message = Proposal { + data: proposal, + signature, + _pd: PhantomData, + }; + debug!("Sending proposal for view {:?}", view); + + async_sleep(Duration::from_millis(self.round_start_delay)).await; + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalSend( + message.clone(), + self.public_key.clone(), + )), + event_stream, + ) + .await; + true + } } impl HandleDepOutput for ProposalDependencyHandle { @@ -95,8 +239,8 @@ impl HandleDepOutput for ProposalDependencyHandle { let mut _view_sync_finalize_cert = None; for event in res.iter().flatten() { match event.as_ref() { - HotShotEvent::QuorumProposalRecv(proposal, _) => { - let proposal_payload_comm = proposal.data.block_header.payload_commitment(); + HotShotEvent::QuorumProposalValidated(proposal, _) => { + let proposal_payload_comm = proposal.block_header.payload_commitment(); if let Some(comm) = payload_commitment { if proposal_payload_comm != comm { return; @@ -138,11 +282,8 @@ impl HandleDepOutput for ProposalDependencyHandle { return; } - broadcast_event( - Arc::new(HotShotEvent::DummyQuorumProposalSend(self.view_number)), - &self.sender, - ) - .await; + self.publish_proposal_if_able(self.view_number, &self.sender, commit_and_metadata.unwrap()) + .await; } } @@ -172,13 +313,36 @@ pub struct QuorumProposalTaskState /// Membership for Quorum Certs/votes pub quorum_membership: Arc, + /// Our public key + pub public_key: TYPES::SignatureKey, + + /// Our Private Key + pub private_key: ::PrivateKey, + + /// View timeout from config. + pub timeout: u64, + + /// Round start delay from config, in milliseconds. + pub round_start_delay: u64, + + /// The view number that this node is executing in. + pub cur_view: TYPES::Time, + + /// timeout task handle + pub timeout_task: Option>, + + /// This node's storage ref + pub storage: Arc>, + + // /// most recent decided upgrade certificate + // pub decided_upgrade_cert: Option>, /// The node's id pub id: u64, } impl> QuorumProposalTaskState { /// Create an event dependency - #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Quorum proposal create event dependency", level = "error")] + #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view, view = *self.cur_view), name = "Quorum proposal create event dependency", level = "error")] fn create_event_dependency( &self, dependency_type: ProposalDependency, @@ -192,7 +356,11 @@ impl> QuorumProposalTaskState { if let HotShotEvent::QCFormed(either::Left(qc)) = event { - qc.view_number + warn!( + "QC View number {} View number {}", + *qc.view_number, *view_number + ); + qc.view_number + 1 } else { return false; } @@ -215,8 +383,8 @@ impl> QuorumProposalTaskState { - if let HotShotEvent::QuorumProposalRecv(proposal, _) = event { - proposal.data.view_number + if let HotShotEvent::QuorumProposalValidated(proposal, _) = event { + proposal.view_number } else { return false; } @@ -236,7 +404,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState> QuorumProposalTaskState>>, event: Arc>, ) { - debug!("Attempting to make dependency task for event {:?}", event); + info!("Attempting to make dependency task for event {:?}", event); if self.propose_dependencies.get(&view_number).is_some() { debug!("Task already exists"); return; @@ -292,47 +461,91 @@ impl> QuorumProposalTaskState { + HotShotEvent::SendPayloadCommitmentAndMetadata(..) => { payload_commitment_dependency.mark_as_completed(event.clone()); + info!( + "Node {} Dependency PayloadAndMetadata is complete for view {}!", + self.id, *view_number + ); } - HotShotEvent::QuorumProposalRecv(_, _) => { + HotShotEvent::QuorumProposalValidated(..) => { proposal_dependency.mark_as_completed(event); + info!( + "Node {} Dependency Proposal is complete for view {}!", + self.id, *view_number + ); } HotShotEvent::QCFormed(quorum_certificate) => match quorum_certificate { Either::Right(_) => { timeout_dependency.mark_as_completed(event); + info!( + "Node {} Dependency TimeoutCert is complete for view {}!", + self.id, *view_number + ); } Either::Left(_) => { qc_dependency.mark_as_completed(event); + info!( + "Node {} Dependency QC is complete for view {}!", + self.id, *view_number + ); } }, HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) => { view_sync_dependency.mark_as_completed(event); + info!( + "Node {} Dependency ViewSyncCert is complete for view {}!", + self.id, *view_number + ); } _ => {} }; // We have three cases to consider: - let combined = AndDependency::from_deps(vec![ - OrDependency::from_deps(vec![AndDependency::from_deps(vec![ - payload_commitment_dependency, - ])]), - OrDependency::from_deps(vec![ - // 1. A QCFormed event and QuorumProposalRecv event - AndDependency::from_deps(vec![qc_dependency, proposal_dependency]), - // 2. A timeout cert was received - AndDependency::from_deps(vec![timeout_dependency]), - // 3. A view sync cert was received. - AndDependency::from_deps(vec![view_sync_dependency]), - ]), - ]); + let combined = if *view_number > 1 { + AndDependency::from_deps(vec![ + OrDependency::from_deps(vec![AndDependency::from_deps(vec![ + payload_commitment_dependency, + ])]), + OrDependency::from_deps(vec![ + // 1. A QCFormed event and QuorumProposalValidated event + AndDependency::from_deps(vec![qc_dependency, proposal_dependency]), + // 2. A timeout cert was received + AndDependency::from_deps(vec![timeout_dependency]), + // 3. A view sync cert was received. + AndDependency::from_deps(vec![view_sync_dependency]), + ]), + ]) + } else { + AndDependency::from_deps(vec![ + OrDependency::from_deps(vec![AndDependency::from_deps(vec![ + payload_commitment_dependency, + ])]), + OrDependency::from_deps(vec![ + // 1. A QCFormed event and QuorumProposalValidated event + AndDependency::from_deps(vec![qc_dependency]), + // 2. A timeout cert was received + AndDependency::from_deps(vec![timeout_dependency]), + // 3. A view sync cert was received. + AndDependency::from_deps(vec![view_sync_dependency]), + ]), + ]) + }; let dependency_task = DependencyTask::new( combined, ProposalDependencyHandle { view_number, - sender: event_sender, + sender: event_sender.clone(), consensus: self.consensus.clone(), + output_event_stream: self.output_event_stream.clone(), + timeout_membership: self.timeout_membership.clone(), + quorum_membership: self.quorum_membership.clone(), + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), + timeout: self.timeout, + round_start_delay: self.round_start_delay, + id: self.id, }, ); self.propose_dependencies @@ -343,7 +556,7 @@ impl> QuorumProposalTaskState bool { if *self.latest_proposed_view < *new_view { - debug!( + info!( "Updating next proposal view from {} to {} in the quorum proposal task", *self.latest_proposed_view, *new_view ); @@ -383,6 +596,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { + if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await + { + warn!("Failed to store High QC of QC we formed. Error: {:?}", e); + } + let mut consensus = self.consensus.write().await; consensus.high_qc = qc.clone(); @@ -404,12 +623,9 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { let view = *view; - if view < self.latest_proposed_view { - debug!( - "Payload commitment is from an older view {:?}", - view.clone() - ); - return; - } - - debug!("Got payload commitment and meta {:?}", payload_commitment); + debug!( + "Got payload commitment {:?} for view {view:?}", + payload_commitment + ); self.create_dependency_task_if_new( view, @@ -439,32 +650,34 @@ impl> QuorumProposalTaskState { - let view = view_sync_finalize_cert.view_number; - if view < self.latest_proposed_view { - debug!( - "View sync certificate is from an old view {:?}", - view.clone() + HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + if !certificate.is_valid_cert(self.quorum_membership.as_ref()) { + warn!( + "View Sync Finalize certificate {:?} was invalid", + certificate.get_data() ); return; } - if !validate_view_sync_finalize_certificate( - view_sync_finalize_cert.clone(), - event_sender.clone(), - ) { - return; - } + // cancel poll for votes + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *certificate.view_number - 1, + )) + .await; - self.create_dependency_task_if_new(view, event_receiver, event_sender, event); - } - HotShotEvent::QuorumProposalRecv(proposal, _sender) => { - let view = proposal.data.get_view_number(); - if view < self.latest_proposed_view { - debug!("Proposal is from an older view {:?}", proposal.data.clone()); - return; - } + let view = certificate.view_number; + if self.quorum_membership.get_leader(view) == self.public_key { + debug!( + "Attempting to publish proposal after forming a View Sync Finalized Cert for view {}", + *certificate.view_number + ); + self.create_dependency_task_if_new(view, event_receiver, event_sender, event); + } + } + HotShotEvent::QuorumProposalRecv(proposal, sender) => { + let sender = sender.clone(); debug!( "Received Quorum Proposal for view {}", *proposal.data.view_number @@ -477,27 +690,256 @@ impl> QuorumProposalTaskState { + if let (Some(state), _) = + consensus.get_state_and_delta(leaf.get_view_number()) + { + Some((leaf, state.clone())) + } else { + error!("Parent state not found! Consensus internally inconsistent"); + return; + } + } + None => None, + } + }; + + if justify_qc.get_view_number() > consensus.high_qc.view_number { + if let Err(e) = self + .storage + .write() + .await + .update_high_qc(justify_qc.clone()) + .await + { + warn!("Failed to store High QC not voting. Error: {:?}", e); + return; + } + } + + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + + if justify_qc.get_view_number() > consensus.high_qc.view_number { + debug!("Updating high QC"); + consensus.high_qc = justify_qc.clone(); + } + + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some((parent_leaf, parent_state)) = parent else { + warn!( + "Proposal's parent missing from storage with commitment: {:?}", + justify_qc.get_data().leaf_commit + ); + return; + }; + async_spawn( + validate_proposal( + proposal.clone(), + parent_leaf, + self.consensus.clone(), + None, + self.quorum_membership.clone(), + parent_state.clone(), + view_leader_key, + event_sender.clone(), + sender, + self.output_event_stream.clone(), + self.storage.clone(), + ) + .map(AnyhowTracing::err_as_debug), + ); + } + HotShotEvent::QuorumProposalValidated(proposal, _) => { + let current_proposal = Some(proposal.clone()); + let new_view = current_proposal.clone().unwrap().view_number + 1; + + info!( + "Node {} creating dependency task for view {:?} from QuorumProposalRecv", + self.id, new_view + ); + self.create_dependency_task_if_new( - view + 1, + new_view, event_receiver, event_sender, event.clone(), ); } - HotShotEvent::QuorumProposalDependenciesValidated(view) => { - debug!("All proposal dependencies verified for view {:?}", view); - if !self.update_latest_proposed_view(*view).await { - debug!("proposal not updated"); + HotShotEvent::QuorumProposalSend(proposal, _) => { + let view = proposal.data.view_number; + if !self.update_latest_proposed_view(view).await { + warn!("Failed to update latest proposed view"); return; } } _ => {} } } + + /// Must only update the view and GC if the view actually changes + #[instrument(skip_all, fields( + id = self.id, + view = *self.cur_view + ), name = "Consensus update view", level = "error")] + async fn update_view( + &mut self, + new_view: TYPES::Time, + event_stream: &Sender>>, + ) -> bool { + if *self.cur_view < *new_view { + debug!( + "Updating view from {} to {} in consensus task", + *self.cur_view, *new_view + ); + + if *self.cur_view / 100 != *new_view / 100 { + // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): + // switch to info! when INFO logs become less cluttered + error!("Progress: entered view {:>6}", *new_view); + } + + // cancel the old timeout task + if let Some(timeout_task) = self.timeout_task.take() { + cancel_task(timeout_task).await; + } + self.cur_view = new_view; + + // Poll the future leader for lookahead + let lookahead_view = new_view + LOOK_AHEAD; + if self.quorum_membership.get_leader(lookahead_view) != self.public_key { + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollFutureLeader( + *lookahead_view, + self.quorum_membership.get_leader(lookahead_view), + )) + .await; + } + + // Start polling for proposals for the new view + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForProposal(*self.cur_view + 1)) + .await; + + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForDAC(*self.cur_view + 1)) + .await; + + if self.quorum_membership.get_leader(self.cur_view + 1) == self.public_key { + debug!("Polling for quorum votes for view {}", *self.cur_view); + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForVotes(*self.cur_view)) + .await; + } + + broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream).await; + + // Spawn a timeout task if we did actually update view + let timeout = self.timeout; + self.timeout_task = Some(async_spawn({ + let stream = event_stream.clone(); + // Nuance: We timeout on the view + 1 here because that means that we have + // not seen evidence to transition to this new view + let view_number = self.cur_view + 1; + async move { + async_sleep(Duration::from_millis(timeout)).await; + broadcast_event( + Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), + &stream, + ) + .await; + } + })); + let consensus = self.consensus.upgradable_read().await; + consensus + .metrics + .current_view + .set(usize::try_from(self.cur_view.get_u64()).unwrap()); + // Do the comparison before the subtraction to avoid potential overflow, since + // `last_decided_view` may be greater than `cur_view` if the node is catching up. + if usize::try_from(self.cur_view.get_u64()).unwrap() + > usize::try_from(consensus.last_decided_view.get_u64()).unwrap() + { + consensus.metrics.number_of_views_since_last_decide.set( + usize::try_from(self.cur_view.get_u64()).unwrap() + - usize::try_from(consensus.last_decided_view.get_u64()).unwrap(), + ); + } + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + consensus.update_view(new_view); + drop(consensus); + + return true; + } + false + } } impl> TaskState @@ -509,6 +951,7 @@ impl> TaskState !matches!( event.as_ref(), HotShotEvent::QuorumProposalRecv(_, _) + | HotShotEvent::QuorumProposalValidated(..) | HotShotEvent::QCFormed(_) | HotShotEvent::SendPayloadCommitmentAndMetadata(..) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) diff --git a/testing/Cargo.toml b/testing/Cargo.toml index ed741d8ac4..3a5f75aa37 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -10,6 +10,7 @@ default = [] # NOTE this is used to activate the slow tests we don't wish to run in CI slow-tests = [] gpu-vid = ["hotshot-types/gpu-vid"] +proposal-task = ["hotshot/proposal-task"] [dependencies] automod = "1.0.14" diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 3b430b8481..756c39952e 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,19 +1,23 @@ use hotshot::tasks::inject_quorum_proposal_polls; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::state_types::TestValidatedState; use hotshot_task_impls::events::HotShotEvent::*; use hotshot_task_impls::quorum_proposal::QuorumProposalTaskState; -use hotshot_testing::predicates::exact; +use hotshot_testing::predicates::quorum_proposal_send; use hotshot_testing::task_helpers::vid_scheme_from_view_number; use hotshot_testing::{ script::{run_test_script, TestScriptStage}, task_helpers::build_system_handle, view_generator::TestViewGenerator, }; -use hotshot_types::data::{ViewChangeEvidence, ViewNumber}; -use hotshot_types::simple_vote::ViewSyncFinalizeData; -use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; -use hotshot_types::vid::VidSchemeType; +use hotshot_types::{ + data::{ViewChangeEvidence, ViewNumber}, + simple_vote::ViewSyncFinalizeData, + traits::node_implementation::{ConsensusTime, NodeType}, + utils::{View, ViewInner}, + vid::VidSchemeType, +}; use jf_primitives::vid::VidScheme; fn make_payload_commitment( @@ -44,22 +48,50 @@ async fn test_quorum_proposal_task_quorum_proposal() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); + let mut leaves = Vec::new(); for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); } + let consensus = handle.get_consensus(); + let mut consensus = consensus.write().await; + + // `find_parent_leaf_and_state` depends on the existence of prior values in the consensus + // state, but since we do not spin up the consensus task, these values must be manually filled + // out. + + // First, insert a parent view whose leaf commitment will be returned in the lower function + // call. + consensus.validated_state_map.insert( + ViewNumber::new(1), + View { + view_inner: ViewInner::Leaf { + leaf: leaves[1].get_parent_commitment(), + state: TestValidatedState::default().into(), + delta: None, + }, + }, + ); + + // Match an entry into the saved leaves for the parent commitment, returning the generated leaf + // for this call. + consensus + .saved_leaves + .insert(leaves[1].get_parent_commitment(), leaves[1].clone()); + + // Release the write lock before proceeding with the test + drop(consensus); let cert = proposals[1].data.justify_qc.clone(); // Run at view 2, the quorum vote task shouldn't care as long as the bookkeeping is correct let view_2 = TestScriptStage { inputs: vec![ - QuorumProposalRecv(proposals[1].clone(), leaders[1]), + QuorumProposalValidated(proposals[1].data.clone(), leaves[1].clone()), QCFormed(either::Left(cert.clone())), SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), ], - outputs: vec![ - exact(DummyQuorumProposalSend(ViewNumber::new(2))), - ], + outputs: vec![quorum_proposal_send()], asserts: vec![], }; @@ -112,9 +144,7 @@ async fn test_quorum_proposal_task_qc_timeout() { QCFormed(either::Right(cert.clone())), SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), ], - outputs: vec![ - exact(DummyQuorumProposalSend(ViewNumber::new(2))), - ], + outputs: vec![quorum_proposal_send()], asserts: vec![], }; @@ -170,9 +200,7 @@ async fn test_quorum_proposal_task_view_sync() { ViewSyncFinalizeCertificate2Recv(cert.clone()), SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), ], - outputs: vec![ - exact(DummyQuorumProposalSend(ViewNumber::new(2))), - ], + outputs: vec![quorum_proposal_send()], asserts: vec![], }; @@ -188,7 +216,6 @@ async fn test_quorum_proposal_task_view_sync() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_with_incomplete_events() { - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -201,16 +228,21 @@ async fn test_quorum_proposal_task_with_incomplete_events() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); + let mut leaves = Vec::new(); for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); } // We run the task here at view 2, but this time we ignore the crucial piece of evidence: the // payload commitment and metadata. Instead we send only one of the three "OR" required fields. // This should result in the proposal failing to be sent. let view_2 = TestScriptStage { - inputs: vec![QuorumProposalRecv(proposals[1].clone(), leaders[1])], + inputs: vec![QuorumProposalValidated( + proposals[1].data.clone(), + leaves[1].clone(), + )], outputs: vec![], asserts: vec![], }; From fcf12532077cc59910518504cfd0dbb1af664ccd Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 11 Apr 2024 10:19:18 +0300 Subject: [PATCH 0940/1393] update the push cdn (#2937) --- examples/Cargo.toml | 5 ++--- hotshot/Cargo.toml | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 739552c085..a52b60b3e1 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -139,13 +139,12 @@ tracing = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -cdn-client = { workspace = true, features = ["runtime-tokio"] } +cdn-client = { workspace = true } cdn-broker = { workspace = true, features = [ - "runtime-tokio", "strong-consistency", "global-permits", ] } -cdn-marshal = { workspace = true, features = ["runtime-tokio"] } +cdn-marshal = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index c8e6a18db0..1f4e594ea4 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -60,13 +60,12 @@ blake3.workspace = true [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -cdn-client = { workspace = true, features = ["runtime-tokio"] } +cdn-client = { workspace = true } cdn-broker = { workspace = true, features = [ - "runtime-tokio", "strong-consistency", "global-permits", ] } -cdn-marshal = { workspace = true, features = ["runtime-tokio"] } +cdn-marshal = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } From 047095ee80678677deff1d202955d6a7b00d6daa Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 11 Apr 2024 10:52:25 +0300 Subject: [PATCH 0941/1393] fix build (#2947) --- task-impls/src/quorum_proposal.rs | 68 ++++++++++--------------------- 1 file changed, 22 insertions(+), 46 deletions(-) diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 0ff4dbcfff..49efb0675d 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -16,7 +16,7 @@ use hotshot_types::{ consensus::Consensus, constants::LOOK_AHEAD, data::{Leaf, QuorumProposal}, - event::{Event, EventType, LeafInfo}, + event::Event, message::Proposal, simple_certificate::UpgradeCertificate, traits::{ @@ -25,7 +25,6 @@ use hotshot_types::{ network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, - states::ValidatedState, storage::Storage, }, vote::{Certificate, HasViewNumber}, @@ -197,11 +196,10 @@ impl ProposalDependencyHandle { upgrade_certificate: None, }; - let mut new_leaf = Leaf::from_quorum_proposal(&proposal); - new_leaf.set_parent_commitment(parent_leaf.commit()); + let proposed_leaf = Leaf::from_quorum_proposal(&proposal); let Ok(signature) = - TYPES::SignatureKey::sign(&self.private_key, new_leaf.commit().as_ref()) + TYPES::SignatureKey::sign(&self.private_key, proposed_leaf.commit().as_ref()) else { error!("Failed to sign new_leaf.commit()!"); return false; @@ -212,7 +210,11 @@ impl ProposalDependencyHandle { signature, _pd: PhantomData, }; - debug!("Sending proposal for view {:?}", view); + debug!( + "Sending null proposal for view {:?} \n {:?}", + proposed_leaf.get_view_number(), + "" + ); async_sleep(Duration::from_millis(self.round_start_delay)).await; broadcast_event( @@ -728,48 +730,22 @@ impl> QuorumProposalTaskState { - if let (Some(state), _) = - consensus.get_state_and_delta(leaf.get_view_number()) - { - Some((leaf, state.clone())) - } else { - error!("Parent state not found! Consensus internally inconsistent"); - return; - } + let parent = match consensus + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned() + { + Some(leaf) => { + if let (Some(state), _) = + consensus.get_state_and_delta(leaf.get_view_number()) + { + Some((leaf, state.clone())) + } else { + error!("Parent state not found! Consensus internally inconsistent"); + return; } - None => None, } + None => None, }; if justify_qc.get_view_number() > consensus.high_qc.view_number { From 412f8fcd6267436e7d77dc43547497eebf6bc8c2 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 11 Apr 2024 11:40:39 +0300 Subject: [PATCH 0942/1393] Set sever mode for tests, retry bootstrap on startup (#2949) --- hotshot/src/traits/networking/libp2p_network.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 9bcf4edffb..0a81337d94 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -236,6 +236,7 @@ where // the worst case of 7/2+3 > 5 mesh_n: (expected_node_count / 2 + 3), })) + .server_mode(true) .replication_factor(replication_factor) .node_type(NetworkNodeType::Bootstrap) .bound_addr(Some(addr)) @@ -256,6 +257,7 @@ where mesh_outbound_min: 4, mesh_n: 8, })) + .server_mode(true) .replication_factor(replication_factor) .node_type(NetworkNodeType::Regular) .bound_addr(Some(addr)) @@ -586,6 +588,7 @@ impl Libp2pNetwork { while !is_bootstrapped.load(Ordering::Relaxed) { async_sleep(Duration::from_secs(1)).await; + handle.begin_bootstrap().await?; } handle.subscribe(QC_TOPIC.to_string()).await.unwrap(); From 5c002c1bb89ae2de3bb51abb3e541b6caa84c0d6 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Thu, 11 Apr 2024 12:04:50 +0300 Subject: [PATCH 0943/1393] Have BlockPayload::get_transactions return an iterator (#2946) This means that the payload is no longer required to explicitly store a list of transactions as a field. We also default-impl a couple other trait functions using this iterator. --- example-types/src/block_types.rs | 17 +++++------------ task-impls/src/transactions.rs | 5 ++--- types/src/traits/block_contents.rs | 16 ++++++++++++++-- 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index c053e758ea..6fc29f4a20 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -169,16 +169,6 @@ impl BlockPayload for TestBlockPayload { Ok(TestTransaction::encode(self.transactions.clone())?.into_iter()) } - fn transaction_commitments( - &self, - _metadata: &Self::Metadata, - ) -> Vec> { - self.transactions - .iter() - .map(committable::Committable::commit) - .collect() - } - fn builder_commitment(&self, _metadata: &Self::Metadata) -> BuilderCommitment { let mut digest = sha2::Sha256::new(); for txn in &self.transactions { @@ -187,8 +177,11 @@ impl BlockPayload for TestBlockPayload { BuilderCommitment::from_raw_digest(digest.finalize()) } - fn get_transactions(&self, _metadata: &Self::Metadata) -> &Vec { - &self.transactions + fn get_transactions<'a>( + &'a self, + _metadata: &'a Self::Metadata, + ) -> impl 'a + Iterator { + self.transactions.iter().cloned() } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 9ae88cb95f..6adfaff3c6 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -154,8 +154,7 @@ impl< )> = None; while task_start_time.elapsed() < self.api.propose_max_round_time() && latest_block.as_ref().map_or(true, |(data, _)| { - data.block_payload.get_transactions(&data.metadata).len() - < self.api.min_transactions() + data.block_payload.num_transactions(&data.metadata) < self.api.min_transactions() }) { let mut available_blocks = match self @@ -212,7 +211,7 @@ impl< } }; - let num_txns = block.block_payload.get_transactions(&block.metadata).len(); + let num_txns = block.block_payload.num_transactions(&block.metadata); latest_block = Some((block, header_input)); if num_txns >= self.api.min_transactions() { diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 33bc14a0d7..78f974cbe9 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -89,13 +89,25 @@ pub trait BlockPayload: fn transaction_commitments( &self, metadata: &Self::Metadata, - ) -> Vec>; + ) -> Vec> { + self.get_transactions(metadata) + .map(|tx| tx.commit()) + .collect() + } + + /// Number of transactions in the block. + fn num_transactions(&self, metadata: &Self::Metadata) -> usize { + self.get_transactions(metadata).count() + } /// Generate commitment that builders use to sign block options. fn builder_commitment(&self, metadata: &Self::Metadata) -> BuilderCommitment; /// Get the transactions in the payload. - fn get_transactions(&self, metadata: &Self::Metadata) -> &Vec; + fn get_transactions<'a>( + &'a self, + metadata: &'a Self::Metadata, + ) -> impl 'a + Iterator; } /// extra functions required on block to be usable by hotshot-testing From b24e200b06fe04d75b5da22144414ce5c4ab9f96 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 11 Apr 2024 12:18:49 +0300 Subject: [PATCH 0944/1393] Only Append VID and Payload once Right Before Voting (#2934) * append VID only right before vote * fix webserver for multi vid * append da to storage right when we vote * merge fixes * fix unit tests * lint * revert chnages to saved payloads * Move append_vid in quorum vote task * Only accept own share in quorum_vote task * fix polling --- hotshot/Cargo.toml | 1 + hotshot/src/lib.rs | 4 +- .../traits/networking/web_server_network.rs | 40 +++++++++++---- task-impls/src/consensus.rs | 48 ++++++++++-------- task-impls/src/da.rs | 15 ++++-- task-impls/src/events.rs | 2 +- task-impls/src/quorum_vote.rs | 50 ++++++++++++------- task-impls/src/response.rs | 23 +++++---- testing/src/task_helpers.rs | 14 ++++++ testing/tests/tests_1/consensus_task.rs | 17 ++++--- testing/tests/tests_1/proposal_ordering.rs | 4 +- testing/tests/tests_1/quorum_vote_task.rs | 11 ++-- testing/tests/tests_1/upgrade_task.rs | 23 +++++---- types/src/consensus.rs | 3 +- web_server/src/lib.rs | 13 +++-- 15 files changed, 165 insertions(+), 103 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 1f4e594ea4..7222af901e 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -57,6 +57,7 @@ vbs = { workspace = true } jf-primitives.workspace = true hotshot-orchestrator = { path = "../orchestrator" } blake3.workspace = true +sha2 = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index dc1924c124..f4a625ef13 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -61,6 +61,7 @@ use hotshot_types::{ }, HotShotConfig, }; + // -- Rexports // External /// Reexport rand crate @@ -222,7 +223,8 @@ impl> SystemContext { return Err(HotShotError::BlockError { source: e }); } }; - saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns.clone()); + + saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns); } let start_view = initializer.start_view; diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 8db761c836..15a03622f0 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -24,7 +24,7 @@ use derive_more::{Deref, DerefMut}; use hotshot_types::{ boxed_sync, constants::{Version01, VERSION_0_1}, - message::{Message, MessagePurpose}, + message::{CommitteeConsensusMessage, Message, MessageKind, MessagePurpose, SequencingMessage}, traits::{ network::{ AsyncGenerator, ConnectedNetwork, ConsensusIntentEvent, NetworkError, NetworkMsg, @@ -171,7 +171,7 @@ impl TaskMap { #[derive(Debug)] struct Inner { /// Our own key - _own_key: TYPES::SignatureKey, + own_key: TYPES::SignatureKey, /// Queue for messages poll_queue_0_1: Arc>>>>, /// Client is running @@ -327,14 +327,30 @@ impl Inner { - // TODO copy-pasted from `MessagePurpose::Proposal` https://github.com/EspressoSystems/HotShot/issues/1690 - + let RecvMsg { + message: Some(message), + } = deserialized_message.clone() + else { + return false; + }; + let Message { + sender: _, + kind: + MessageKind::Consensus(SequencingMessage::Committee( + CommitteeConsensusMessage::VidDisperseMsg(vid), + )), + } = message + else { + return false; + }; + if vid.data.recipient_key != self.own_key { + // error!("Key {:?} does not match ours for VID", vid.data.recipient_key); + return false; + } self.poll_queue_0_1 .write() .await .push(deserialized_message.clone()); - - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view return true; } @@ -588,7 +604,7 @@ impl connected: AtomicBool::new(false), client, wait_between_polls, - _own_key: key, + own_key: key, is_da: is_da_server, tx_index: Arc::default(), proposal_task_map: Arc::default(), @@ -911,9 +927,11 @@ impl } // Cancel old, stale tasks - task_map - .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForVIDDisperse) - .await; + if view_number > 2 { + task_map + .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForVIDDisperse) + .await; + } } ConsensusIntentEvent::PollForLatestProposal => { // Only start this task if we haven't already started it. @@ -978,7 +996,7 @@ impl // Only start this task if we haven't already started it. let mut cancel_handle = self.inner.upgrade_vote_task.write().await; if cancel_handle.is_none() { - error!("Starting poll for upgrade proposals!"); + debug!("Starting poll for upgrade proposals!"); let inner = self.inner.clone(); // Create sender and receiver for cancelling the task diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index d689909306..20320f8e21 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -388,14 +388,13 @@ impl, A: ConsensusApi + if let Some(proposal) = &self.current_proposal { let consensus = self.consensus.read().await; // Only vote if you has seen the VID share for this view - if let Some(_vid_share) = consensus.vid_shares.get(&proposal.view_number) { - } else { + let Some(vid_shares) = consensus.vid_shares.get(&proposal.view_number) else { debug!( "We have not seen the VID share for this view {:?} yet, so we cannot vote.", proposal.view_number ); return false; - } + }; if let Some(upgrade_cert) = &self.decided_upgrade_cert { if upgrade_cert.in_interim(self.cur_view) @@ -468,6 +467,19 @@ impl, A: ConsensusApi + "Sending vote to next quorum leader {:?}", vote.get_view_number() + 1 ); + // Add to the storage that we have received the VID disperse for a specific view + if let Some(vid_share) = vid_shares.get(&self.public_key) { + if let Err(e) = self.storage.write().await.append_vid(vid_share).await { + error!( + "Failed to store VID Disperse Proposal with error {:?}, aborting vote", + e + ); + return false; + } + } else { + error!("Did not get a VID share for our public key, aborting vote"); + return false; + } broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), event_stream) .await; return true; @@ -922,7 +934,7 @@ impl, A: ConsensusApi + .vid_shares .get(&leaf.get_view_number()) .unwrap_or(&HashMap::new()) - .get(&self.public_key).cloned(); + .get(&self.public_key).cloned().map(|prop| prop.data); // Add our data into a new `LeafInfo` leaf_views.push(LeafInfo::new(leaf.clone(), state.clone(), delta.clone(), vid_share)); @@ -1220,30 +1232,22 @@ impl, A: ConsensusApi + return; } - // stop polling for the received disperse after verifying it's valid - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse( - *disperse.data.view_number, - )) - .await; - - // Add to the storage that we have received the VID disperse for a specific view - if let Err(e) = self.storage.write().await.append_vid(disperse).await { - error!( - "Failed to store VID Disperse Proposal with error {:?}, aborting vote", - e - ); - return; - } - self.consensus .write() .await .vid_shares .entry(view) .or_default() - .insert(disperse.data.recipient_key.clone(), disperse.data.clone()); - + .insert(disperse.data.recipient_key.clone(), disperse.clone()); + if disperse.data.recipient_key != self.public_key { + return; + } + // stop polling for the received disperse after verifying it's valid + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse( + *disperse.data.view_number, + )) + .await; if self.vote_if_able(&event_stream).await { self.current_proposal = None; } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 2fc51f7a46..58555df1f3 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -121,6 +121,17 @@ impl, A: ConsensusApi + return None; } + if self + .consensus + .read() + .await + .saved_payloads + .contains_key(&view) + { + warn!("Received DA proposal for view {:?} but we already have a payload for that view. Throwing it away", view); + return None; + } + let encoded_transactions_hash = Sha256::digest(&proposal.data.encoded_transactions); // ED Is this the right leader? @@ -160,7 +171,6 @@ impl, A: ConsensusApi + ); return None; } - if let Err(e) = self.storage.write().await.append_da(proposal).await { error!( "Failed to store DA Proposal with error {:?}, aborting vote", @@ -189,9 +199,6 @@ impl, A: ConsensusApi + return None; }; - // ED Don't think this is necessary? - // self.cur_view = view; - debug!("Sending vote to the DA leader {:?}", vote.get_view_number()); broadcast_event(Arc::new(HotShotEvent::DAVoteSend(vote)), &event_stream).await; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index dc67932082..8d59fbdf26 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -126,7 +126,7 @@ pub enum HotShotEvent { /// Like [`HotShotEvent::DAProposalRecv`]. VIDShareRecv(Proposal>), /// VID share data is validated. - VIDShareValidated(VidDisperseShare), + VIDShareValidated(Proposal>), /// Upgrade proposal has been received from the network UpgradeProposalRecv(Proposal>, TYPES::SignatureKey), /// Upgrade proposal has been sent to the network diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index e6e66ca388..682802b8bf 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -49,21 +49,26 @@ enum VoteDependency { } /// Handler for the vote dependency. -struct VoteDependencyHandle { +struct VoteDependencyHandle> { /// Public key. pub public_key: TYPES::SignatureKey, /// Private Key. pub private_key: ::PrivateKey, + /// Reference to the storage. + pub storage: Arc>, /// View number to vote on. view_number: TYPES::Time, /// Event sender. sender: Sender>>, } -impl HandleDepOutput for VoteDependencyHandle { +impl + 'static> HandleDepOutput + for VoteDependencyHandle +{ type Output = Vec>>; async fn handle_dep_result(self, res: Self::Output) { let mut payload_commitment = None; let mut leaf = None; + let mut disperse_share = None; for event in res { match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, parent_leaf) => { @@ -95,7 +100,8 @@ impl HandleDepOutput for VoteDependencyHandle { } } HotShotEvent::VIDShareValidated(share) => { - let vid_payload_commitment = share.payload_commitment; + let vid_payload_commitment = share.data.payload_commitment; + disperse_share = Some(share.clone()); if let Some(comm) = payload_commitment { if vid_payload_commitment != comm { error!("VID has inconsistent payload commitment with quorum proposal or DAC."); @@ -139,6 +145,14 @@ impl HandleDepOutput for VoteDependencyHandle { "Sending vote to next quorum leader {:?}", vote.get_view_number() + 1 ); + // Add to the storage. + let Some(disperse) = disperse_share else { + return; + }; + if let Err(e) = self.storage.write().await.append_vid(&disperse).await { + error!("Failed to store VID share with error {:?}", e); + return; + } broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &self.sender).await; } } @@ -215,7 +229,7 @@ impl> QuorumVoteTaskState { if let HotShotEvent::VIDShareValidated(disperse) = event { - disperse.view_number + disperse.data.view_number } else { return false; } @@ -257,6 +271,7 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState NetworkResponseState { &self, view: TYPES::Time, key: &TYPES::SignatureKey, - ) -> Option> { + ) -> Option>> { let consensus = self.consensus.upgradable_read().await; let contained = consensus .vid_shares @@ -137,11 +139,13 @@ impl NetworkResponseState { for share in shares { let s = share.clone(); let key: ::SignatureKey = s.recipient_key; - consensus - .vid_shares - .entry(view) - .or_default() - .insert(key, share); + if let Some(prop) = share.to_proposal(&self.private_key) { + consensus + .vid_shares + .entry(view) + .or_default() + .insert(key, prop); + } } return consensus.vid_shares.get(&view)?.get(key).cloned(); } @@ -157,11 +161,8 @@ impl NetworkResponseState { let Some(share) = self.get_or_calc_vid_share(view, &pub_key).await else { return self.make_msg(ResponseMessage::NotFound); }; - let Some(prop) = share.to_proposal(&self.private_key) else { - return self.make_msg(ResponseMessage::NotFound); - }; let seq_msg = - SequencingMessage::Committee(CommitteeConsensusMessage::VidDisperseMsg(prop)); + SequencingMessage::Committee(CommitteeConsensusMessage::VidDisperseMsg(share)); self.make_msg(ResponseMessage::Found(seq_msg)) } // TODO impl for DA Proposal: https://github.com/EspressoSystems/HotShot/issues/2651 diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index e56f4adf9f..5edec9e75d 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -151,6 +151,20 @@ pub fn build_cert< cert } +pub fn get_vid_share( + shares: &[Proposal>], + pub_key: TYPES::SignatureKey, +) -> Proposal> { + shares + .iter() + .filter(|s| s.data.recipient_key == pub_key) + .cloned() + .collect::>() + .first() + .expect("No VID for key") + .clone() +} + /// create signature /// # Panics /// if fails to convert node id into keypair diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index bc0ad97c05..2564f307ff 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -3,6 +3,7 @@ use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::task_helpers::key_pair_for_id; +use hotshot_testing::task_helpers::get_vid_share; use hotshot_testing::test_helpers::permute_input_with_index_order; use hotshot_testing::{ predicates::{ @@ -57,7 +58,7 @@ async fn test_consensus_task() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -136,7 +137,7 @@ async fn test_consensus_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), QuorumVoteRecv(votes[0].clone()), ], outputs: vec![ @@ -188,7 +189,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -200,7 +201,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let inputs = vec![ // We need a VID share for view 2 otherwise we cannot vote at view 2 (as node 2). - VIDShareRecv(vids[1].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), DACertificateRecv(dacs[1].clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), ]; @@ -300,7 +301,7 @@ async fn test_view_sync_finalize_propose() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -423,7 +424,7 @@ async fn test_view_sync_finalize_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -525,7 +526,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -621,7 +622,7 @@ async fn test_vid_disperse_storage_failure() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 5463bae541..02e5470ff0 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -3,7 +3,7 @@ use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ predicates::{exact, is_at_view_number, quorum_proposal_send, quorum_proposal_validated}, - task_helpers::vid_scheme_from_view_number, + task_helpers::{vid_scheme_from_view_number, get_vid_share}, test_helpers::permute_input_with_index_order, view_generator::TestViewGenerator, }; @@ -52,7 +52,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DACertificateRecv(dacs[0].clone()), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index fb9cdea7de..173eb78f53 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -2,6 +2,7 @@ use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use hotshot_testing::task_helpers::get_vid_share; #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] @@ -38,7 +39,7 @@ async fn test_quorum_vote_task_success() { exact(ViewChange(ViewNumber::new(2))), quorum_proposal_validated(), exact(DACertificateValidated(view.da_certificate.clone())), - exact(VIDShareValidated(view.vid_proposal.0[0].data.clone())), + exact(VIDShareValidated(view.vid_proposal.0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(1))), quorum_vote_send(), ], @@ -88,12 +89,12 @@ async fn test_quorum_vote_task_miss_dependency() { let view_no_dac = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(2))), quorum_proposal_validated(), - exact(VIDShareValidated(vids[0].0[0].data.clone())), + exact(VIDShareValidated(vids[0].0[0].clone())), ], asserts: vec![], }; @@ -112,11 +113,11 @@ async fn test_quorum_vote_task_miss_dependency() { let view_no_quorum_proposal = TestScriptStage { inputs: vec![ DACertificateRecv(dacs[2].clone()), - VIDShareRecv(vids[2].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), ], outputs: vec![ exact(DACertificateValidated(dacs[2].clone())), - exact(VIDShareValidated(vids[2].0[0].data.clone())), + exact(VIDShareValidated(vids[2].0[0].clone())), ], asserts: vec![], }; diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 570e9887c2..fee835763e 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -13,6 +13,7 @@ use hotshot_testing::{ script::{Expectations, TaskScript}, view_generator::TestViewGenerator, }; +use hotshot_testing::task_helpers::get_vid_share; use hotshot_types::{ data::ViewNumber, simple_vote::UpgradeProposalData, @@ -75,7 +76,7 @@ async fn test_consensus_task_upgrade() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), DACertificateRecv(dacs[0].clone()), ], outputs: vec![ @@ -89,7 +90,7 @@ async fn test_consensus_task_upgrade() { let view_2 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VIDShareRecv(vids[1].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), DACertificateRecv(dacs[1].clone()), ], outputs: vec![ @@ -104,7 +105,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[2].clone(), leaders[2]), DACertificateRecv(dacs[2].clone()), - VIDShareRecv(vids[2].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(3))), @@ -119,7 +120,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[3].clone(), leaders[3]), DACertificateRecv(dacs[3].clone()), - VIDShareRecv(vids[3].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[3].0, handle.get_public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(4))), @@ -238,7 +239,7 @@ async fn test_upgrade_and_consensus_task() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), DACertificateRecv(dacs[0].clone()), ], upgrade_vote_recvs, @@ -426,12 +427,12 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VIDShareRecv(vids[0].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), DACertificateRecv(dacs[0].clone()), ], vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VIDShareRecv(vids[1].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), DACertificateRecv(dacs[1].clone()), SendPayloadCommitmentAndMetadata( vids[1].0[0].data.payload_commitment, @@ -441,7 +442,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DACertificateRecv(dacs[2].clone()), - VIDShareRecv(vids[2].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, (), @@ -451,7 +452,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DACertificateRecv(dacs[3].clone()), - VIDShareRecv(vids[3].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[3].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[3].0[0].data.payload_commitment, (), @@ -461,7 +462,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DACertificateRecv(dacs[4].clone()), - VIDShareRecv(vids[4].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[4].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[4].0[0].data.payload_commitment, (), @@ -480,7 +481,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DACertificateRecv(dacs[6].clone()), - VIDShareRecv(vids[6].0[0].clone()), + VIDShareRecv(get_vid_share(&vids[6].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[6].0[0].data.payload_commitment, (), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 7e09165e5a..62509b9065 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -13,6 +13,7 @@ pub use crate::utils::{View, ViewInner}; use crate::{ data::{Leaf, VidDisperseShare}, error::HotShotError, + message::Proposal, simple_certificate::{DACertificate, QuorumCertificate}, traits::{ metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}, @@ -28,7 +29,7 @@ pub type CommitmentMap = HashMap, T>; /// A type alias for `BTreeMap>>>` pub type VidShares = BTreeMap< ::Time, - HashMap<::SignatureKey, VidDisperseShare>, + HashMap<::SignatureKey, Proposal>>, >; /// A reference to the consensus algorithm diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index ca294063de..d13ce63b79 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -71,7 +71,7 @@ struct WebServerState { /// view sync: view number of oldest votes in memory oldest_view_sync_vote: u64, /// view number -> (secret, string) - vid_disperses: HashMap)>, + vid_disperses: HashMap>>, /// view for the oldest vid disperal oldest_vid_disperse: u64, /// view of most recent vid dispersal @@ -344,13 +344,13 @@ impl WebServerDataSource for WebServerState { fn get_vid_disperse(&self, view_number: u64) -> Result>>, Error> { match self.vid_disperses.get(&view_number) { Some(disperse) => { - if disperse.1.is_empty() { + if disperse.is_empty() { Err(ServerError { status: StatusCode::NotImplemented, message: format!("VID disperse not found for view {view_number}"), }) } else { - Ok(Some(vec![disperse.1.clone()])) + Ok(Some(disperse.clone())) } } None => Err(ServerError { @@ -692,8 +692,7 @@ impl WebServerDataSource for WebServerState { Ok(()) } - fn post_vid_disperse(&mut self, view_number: u64, mut disperse: Vec) -> Result<(), Error> { - info!("Received VID disperse for view {}", view_number); + fn post_vid_disperse(&mut self, view_number: u64, disperse: Vec) -> Result<(), Error> { if view_number > self.recent_vid_disperse { self.recent_vid_disperse = view_number; } @@ -707,8 +706,8 @@ impl WebServerDataSource for WebServerState { } self.vid_disperses .entry(view_number) - .and_modify(|(_, empty_proposal)| empty_proposal.append(&mut disperse)) - .or_insert_with(|| (String::new(), disperse)); + .or_default() + .push(disperse); Ok(()) } From 1bbe470d9ebf9ee230c8cdad9553290582b25cac Mon Sep 17 00:00:00 2001 From: Himanshu Goyal Date: Thu, 11 Apr 2024 13:30:53 +0300 Subject: [PATCH 0945/1393] Builder Key Type changes (#2948) * add builder signature trait * impl fn * remove unused imports * add params to blockheader * fix lint * introduce builder fee const * update comments * solving MC * update realted changes * remove header changes * add builder message sender and signature * fix test --- builder-api/api/builder.toml | 10 ++-- builder-api/src/block_info.rs | 25 +++++----- builder-api/src/builder.rs | 11 +++-- builder-api/src/data_source.rs | 14 ++++-- example-types/src/block_types.rs | 9 ++++ example-types/src/node_types.rs | 6 ++- task-impls/src/builder.rs | 15 ++++-- task-impls/src/consensus.rs | 1 - task-impls/src/quorum_proposal.rs | 11 ++--- task-impls/src/transactions.rs | 19 ++++++-- testing/src/block_builder.rs | 57 ++++++++++++++-------- testing/tests/tests_1/block_builder.rs | 19 +++----- testing/tests/tests_1/proposal_ordering.rs | 11 ++--- testing/tests/tests_3/memory_network.rs | 3 +- types/src/signature_key.rs | 41 +++++++++++++++- types/src/traits/block_contents.rs | 6 +++ types/src/traits/node_implementation.rs | 4 ++ types/src/traits/signature_key.rs | 56 +++++++++++++++++++++ 18 files changed, 241 insertions(+), 77 deletions(-) diff --git a/builder-api/api/builder.toml b/builder-api/api/builder.toml index b906856e0d..6716742d17 100644 --- a/builder-api/api/builder.toml +++ b/builder-api/api/builder.toml @@ -27,8 +27,10 @@ DESCRIPTION = "" FORMAT_VERSION = "0.1.0" [route.available_blocks] -PATH = ["availableblocks/:parent_hash"] +PATH = ["availableblocks/:parent_hash/:sender/:signature"] ":parent_hash" = "TaggedBase64" +":sender" = "TaggedBase64" +":signature" = "TaggedBase64" DOC = """ Get descriptions for all block candidates based on a specific parent block. @@ -45,8 +47,9 @@ Returns """ [route.claim_block] -PATH = ["claimblock/:block_hash/:signature"] +PATH = ["claimblock/:block_hash/:sender/:signature"] ":block_hash" = "TaggedBase64" +":sender" = "TaggedBase64" ":signature" = "TaggedBase64" DOC = """ Get the specified block candidate. @@ -55,8 +58,9 @@ Returns application-specific encoded transactions type """ [route.claim_header_input] -PATH = ["claimheaderinput/:block_hash/:signature"] +PATH = ["claimheaderinput/:block_hash/:sender/:signature"] ":block_hash" = "TaggedBase64" +":sender" = "TaggedBase64" ":signature" = "TaggedBase64" DOC = """ Get the specified block candidate. diff --git a/builder-api/src/block_info.rs b/builder-api/src/block_info.rs index 236bb4af83..df5bdcbe69 100644 --- a/builder-api/src/block_info.rs +++ b/builder-api/src/block_info.rs @@ -1,7 +1,7 @@ use std::{hash::Hash, marker::PhantomData}; use hotshot_types::{ - traits::{node_implementation::NodeType, signature_key::SignatureKey, BlockPayload}, + traits::{node_implementation::NodeType, signature_key::BuilderSignatureKey, BlockPayload}, utils::BuilderCommitment, vid::VidCommitment, }; @@ -9,13 +9,14 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound = "")] -pub struct AvailableBlockInfo { +pub struct AvailableBlockInfo { pub block_hash: BuilderCommitment, pub block_size: u64, pub offered_fee: u64, - pub signature: <::SignatureKey as SignatureKey>::PureAssembledSignatureType, - pub sender: ::SignatureKey, - pub _phantom: PhantomData, + pub signature: + <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, + pub sender: ::BuilderSignatureKey, + pub _phantom: PhantomData, } #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] @@ -23,15 +24,17 @@ pub struct AvailableBlockInfo { pub struct AvailableBlockData { pub block_payload: TYPES::BlockPayload, pub metadata: ::Metadata, - pub signature: ::PureAssembledSignatureType, - pub sender: TYPES::SignatureKey, + pub signature: + <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, + pub sender: ::BuilderSignatureKey, } #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound = "")] -pub struct AvailableBlockHeaderInput { +pub struct AvailableBlockHeaderInput { pub vid_commitment: VidCommitment, - pub signature: <::SignatureKey as SignatureKey>::PureAssembledSignatureType, - pub sender: ::SignatureKey, - pub _phantom: PhantomData, + pub signature: + <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, + pub sender: ::BuilderSignatureKey, + pub _phantom: PhantomData, } diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs index 2ddfe55540..592dc4262b 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/builder.rs @@ -123,6 +123,7 @@ where State: 'static + Send + Sync + ReadState, ::State: Send + Sync + BuilderDataSource, Types: NodeType, + for<'a> >::Error: Display, for<'a> <::PureAssembledSignatureType as TryFrom< &'a TaggedBase64, >>::Error: Display, @@ -136,8 +137,10 @@ where .get("available_blocks", |req, state| { async move { let hash = req.blob_param("parent_hash")?; + let sender = req.blob_param("sender")?; + let signature = req.blob_param("signature")?; state - .get_available_blocks(&hash) + .get_available_blocks(&hash, sender, &signature) .await .context(BlockAvailableSnafu { resource: hash.to_string(), @@ -148,9 +151,10 @@ where .get("claim_block", |req, state| { async move { let hash: BuilderCommitment = req.blob_param("block_hash")?; + let sender = req.blob_param("sender")?; let signature = req.blob_param("signature")?; state - .claim_block(&hash, &signature) + .claim_block(&hash, sender, &signature) .await .context(BlockClaimSnafu { resource: hash.to_string(), @@ -161,9 +165,10 @@ where .get("claim_header_input", |req, state| { async move { let hash: BuilderCommitment = req.blob_param("block_hash")?; + let sender = req.blob_param("sender")?; let signature = req.blob_param("signature")?; state - .claim_block_header_input(&hash, &signature) + .claim_block_header_input(&hash, sender, &signature) .await .context(BlockClaimSnafu { resource: hash.to_string(), diff --git a/builder-api/src/data_source.rs b/builder-api/src/data_source.rs index 3ec3ee0c1f..90641e1358 100644 --- a/builder-api/src/data_source.rs +++ b/builder-api/src/data_source.rs @@ -12,28 +12,32 @@ use crate::{ #[async_trait] pub trait BuilderDataSource { - // To get the list of available blocks + /// To get the list of available blocks async fn get_available_blocks( &self, for_parent: &VidCommitment, + sender: TYPES::SignatureKey, + signature: &::PureAssembledSignatureType, ) -> Result>, BuildError>; - // to claim a block from the list of provided available blocks + /// to claim a block from the list of provided available blocks async fn claim_block( &self, block_hash: &BuilderCommitment, + sender: TYPES::SignatureKey, signature: &::PureAssembledSignatureType, ) -> Result, BuildError>; - // To claim a block header input + /// To claim a block header input async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, + sender: TYPES::SignatureKey, signature: &::PureAssembledSignatureType, ) -> Result, BuildError>; - // To get the builder address - async fn get_builder_address(&self) -> Result; + /// To get the builder address + async fn get_builder_address(&self) -> Result; } #[async_trait] diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 6fc29f4a20..35312c2fde 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -244,6 +244,15 @@ impl> Block fn metadata(&self) -> &::Metadata { &() } + + fn builder_commitment( + &self, + _metadata: &::Metadata, + ) -> BuilderCommitment { + let mut digest = sha2::Sha256::new(); + digest.update(self.payload_commitment.as_ref()); + BuilderCommitment::from_raw_digest(digest.finalize()) + } } impl Committable for TestBlockHeader { diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 8da0ef4351..0f45946c6f 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -6,7 +6,10 @@ use hotshot::traits::{ NodeImplementation, }; use hotshot_types::{ - constants::WebServerVersion, data::ViewNumber, message::Message, signature_key::BLSPubKey, + constants::WebServerVersion, + data::ViewNumber, + message::Message, + signature_key::{BLSPubKey, BuilderKey}, traits::node_implementation::NodeType, }; use serde::{Deserialize, Serialize}; @@ -43,6 +46,7 @@ impl NodeType for TestTypes { type ValidatedState = TestValidatedState; type InstanceState = TestInstanceState; type Membership = GeneralStaticCommittee; + type BuilderSignatureKey = BuilderKey; } /// The Push CDN implementation diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 0e4ca78686..b15aa6e267 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -105,9 +105,14 @@ impl BuilderClient { pub async fn get_available_blocks( &self, parent: VidCommitment, + sender: TYPES::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result>, BuilderClientError> { + let encoded_signature: TaggedBase64 = signature.clone().into(); self.inner - .get(&format!("availableblocks/{parent}")) + .get(&format!( + "availableblocks/{parent}/{sender}/{encoded_signature}" + )) .send() .await .map_err(Into::into) @@ -121,11 +126,14 @@ impl BuilderClient { pub async fn claim_block( &self, block_hash: BuilderCommitment, + sender: TYPES::SignatureKey, signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result, BuilderClientError> { let encoded_signature: TaggedBase64 = signature.clone().into(); self.inner - .get(&format!("claimblock/{block_hash}/{encoded_signature}")) + .get(&format!( + "claimblock/{block_hash}/{sender}/{encoded_signature}" + )) .send() .await .map_err(Into::into) @@ -139,12 +147,13 @@ impl BuilderClient { pub async fn claim_block_header_input( &self, block_hash: BuilderCommitment, + sender: TYPES::SignatureKey, signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result, BuilderClientError> { let encoded_signature: TaggedBase64 = signature.clone().into(); self.inner .get(&format!( - "claimheaderinput/{block_hash}/{encoded_signature}" + "claimheaderinput/{block_hash}/{sender}/{encoded_signature}" )) .send() .await diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 20320f8e21..69850a7392 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -50,7 +50,6 @@ use crate::{ create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, }, }; - /// Alias for the block payload commitment and the associated metadata. pub struct CommitmentAndMetadata { /// Vid Commitment diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 49efb0675d..de64c54979 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -30,17 +30,16 @@ use hotshot_types::{ vote::{Certificate, HasViewNumber}, }; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; - use crate::{ consensus::CommitmentAndMetadata, events::HotShotEvent, helpers::{broadcast_event, cancel_task}, }; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; +use tracing::{debug, error, info, instrument, warn}; /// Proposal dependency types. These types represent events that precipitate a proposal. #[derive(PartialEq, Debug)] diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 6adfaff3c6..e8c1c447fc 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -157,9 +157,20 @@ impl< data.block_payload.num_transactions(&data.metadata) < self.api.min_transactions() }) { + let Ok(request_signature) = <::SignatureKey as SignatureKey>::sign( + &self.private_key, + last_leaf.get_block_header().payload_commitment().as_ref(), + ) else { + error!("Failed to sign block hash"); + continue; + }; let mut available_blocks = match self .builder_client - .get_available_blocks(last_leaf.get_block_header().payload_commitment()) + .get_available_blocks( + last_leaf.get_block_header().payload_commitment(), + self.public_key.clone(), + &request_signature, + ) .await { Ok(blocks) => blocks, @@ -182,7 +193,7 @@ impl< continue; } - let Ok(signature) = <::SignatureKey as SignatureKey>::sign( + let Ok(request_signature) = <::SignatureKey as SignatureKey>::sign( &self.private_key, block_info.block_hash.as_ref(), ) else { @@ -191,8 +202,8 @@ impl< }; let (block, header_input) = futures::join! { - self.builder_client.claim_block(block_info.block_hash.clone(), &signature), - self.builder_client.claim_block_header_input(block_info.block_hash, &signature) + self.builder_client.claim_block(block_info.block_hash.clone(), self.public_key.clone(), &request_signature), + self.builder_client.claim_block_header_input(block_info.block_hash, self.public_key.clone(), &request_signature) }; let block = match block { diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index dd524bb580..2c47da52e9 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -27,6 +27,7 @@ use hotshot_types::{ block_contents::{vid_commitment, BlockHeader, Transaction}, election::Membership, node_implementation::NodeType, + signature_key::BuilderSignatureKey, }, utils::BuilderCommitment, vid::VidCommitment, @@ -51,6 +52,7 @@ where for<'a> <::PureAssembledSignatureType as TryFrom< &'a TaggedBase64, >>::Error: Display, + for<'a> >::Error: Display, { async fn start( _membership: Arc, @@ -70,6 +72,7 @@ where for<'a> <::PureAssembledSignatureType as TryFrom< &'a TaggedBase64, >>::Error: Display, + for<'a> >::Error: Display, { async fn start( membership: Arc, @@ -138,8 +141,8 @@ pub struct RandomBuilderSource { LruCache>, >, >, - pub_key: TYPES::SignatureKey, - priv_key: ::PrivateKey, + pub_key: TYPES::BuilderSignatureKey, + priv_key: ::BuilderPrivateKey, } impl RandomBuilderSource { @@ -147,8 +150,8 @@ impl RandomBuilderSource { #[must_use] #[allow(clippy::missing_panics_doc)] // ony panics if 256 == 0 pub fn new( - pub_key: TYPES::SignatureKey, - priv_key: ::PrivateKey, + pub_key: TYPES::BuilderSignatureKey, + priv_key: ::BuilderPrivateKey, ) -> Self { Self { blocks: Arc::new(RwLock::new(LruCache::new(NonZeroUsize::new(256).unwrap()))), @@ -220,6 +223,8 @@ impl BuilderDataSource for RandomBuilderSource { async fn get_available_blocks( &self, _for_parent: &VidCommitment, + _sender: TYPES::SignatureKey, + _signature: &::PureAssembledSignatureType, ) -> Result>, BuildError> { Ok(self .blocks @@ -234,6 +239,7 @@ impl BuilderDataSource for RandomBuilderSource { async fn claim_block( &self, block_hash: &BuilderCommitment, + _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result, BuildError> { let mut blocks = self.blocks.write().await; @@ -249,6 +255,7 @@ impl BuilderDataSource for RandomBuilderSource { async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, + _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result, BuildError> { let mut blocks = self.blocks.write().await; @@ -261,7 +268,7 @@ impl BuilderDataSource for RandomBuilderSource { Ok(header_input) } - async fn get_builder_address(&self) -> Result { + async fn get_builder_address(&self) -> Result { Ok(self.pub_key.clone()) } } @@ -275,8 +282,9 @@ where for<'a> <::PureAssembledSignatureType as TryFrom< &'a TaggedBase64, >>::Error: Display, + for<'a> >::Error: Display, { - let (pub_key, priv_key) = TYPES::SignatureKey::generated_from_seed_indexed([1; 32], 0); + let (pub_key, priv_key) = TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); let source = RandomBuilderSource::new(pub_key, priv_key); source.run(RandomBuilderOptions::default()); @@ -293,8 +301,8 @@ where } pub struct SimpleBuilderSource { - pub_key: TYPES::SignatureKey, - priv_key: ::PrivateKey, + pub_key: TYPES::BuilderSignatureKey, + priv_key: ::BuilderPrivateKey, membership: Arc, #[allow(clippy::type_complexity)] transactions: Arc, TYPES::Transaction>>>, @@ -318,6 +326,8 @@ impl BuilderDataSource for SimpleBuilderSource { async fn get_available_blocks( &self, _for_parent: &VidCommitment, + _sender: TYPES::SignatureKey, + _signature: &::PureAssembledSignatureType, ) -> Result>, BuildError> { let transactions = self .transactions @@ -347,6 +357,7 @@ impl BuilderDataSource for SimpleBuilderSource { async fn claim_block( &self, block_hash: &BuilderCommitment, + _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result, BuildError> { let mut blocks = self.blocks.write().await; @@ -357,6 +368,7 @@ impl BuilderDataSource for SimpleBuilderSource { async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, + _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result, BuildError> { let mut blocks = self.blocks.write().await; @@ -364,7 +376,7 @@ impl BuilderDataSource for SimpleBuilderSource { entry.header_input.take().ok_or(BuildError::Missing) } - async fn get_builder_address(&self) -> Result { + async fn get_builder_address(&self) -> Result { Ok(self.pub_key.clone()) } } @@ -374,6 +386,7 @@ where for<'a> <::PureAssembledSignatureType as TryFrom< &'a TaggedBase64, >>::Error: Display, + for<'a> >::Error: Display, { pub async fn run(self, url: Url) { let builder_api = hotshot_builder_api::builder::define_api::< @@ -450,7 +463,7 @@ impl BuilderTask for SimpleBuilderTask { pub async fn make_simple_builder( membership: Arc, ) -> (SimpleBuilderSource, SimpleBuilderTask) { - let (pub_key, priv_key) = TYPES::SignatureKey::generated_from_seed_indexed([1; 32], 0); + let (pub_key, priv_key) = TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); let transactions = Arc::new(RwLock::new(HashMap::new())); let blocks = Arc::new(RwLock::new(HashMap::new())); @@ -476,8 +489,8 @@ pub async fn make_simple_builder( fn build_block( transactions: Vec, num_storage_nodes: usize, - pub_key: TYPES::SignatureKey, - priv_key: ::PrivateKey, + pub_key: TYPES::BuilderSignatureKey, + priv_key: ::BuilderPrivateKey, ) -> ( AvailableBlockInfo, AvailableBlockData, @@ -500,7 +513,7 @@ fn build_block( block_info.extend_from_slice(block_size.to_be_bytes().as_ref()); block_info.extend_from_slice(123_u64.to_be_bytes().as_ref()); block_info.extend_from_slice(commitment.as_ref()); - match TYPES::SignatureKey::sign(&priv_key, &block_info) { + match TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, &block_info) { Ok(sig) => sig, Err(e) => { panic!("Failed to sign block: {}", e); @@ -509,20 +522,22 @@ fn build_block( }; let signature_over_builder_commitment = - match TYPES::SignatureKey::sign(&priv_key, commitment.as_ref()) { + match TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, commitment.as_ref()) { Ok(sig) => sig, Err(e) => { panic!("Failed to sign block: {}", e); } }; - let signature_over_vid_commitment = - match TYPES::SignatureKey::sign(&priv_key, vid_commitment.as_ref()) { - Ok(sig) => sig, - Err(e) => { - panic!("Failed to sign block: {}", e); - } - }; + let signature_over_vid_commitment = match TYPES::BuilderSignatureKey::sign_builder_message( + &priv_key, + vid_commitment.as_ref(), + ) { + Ok(sig) => sig, + Err(e) => { + panic!("Failed to sign block: {}", e); + } + }; let block = AvailableBlockData { block_payload, diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 333450a16d..7aad10cd32 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -35,10 +35,15 @@ async fn test_random_block_builder() { let client: BuilderClient = BuilderClient::new(api_url); assert!(client.connect(Duration::from_millis(100)).await); + let (pub_key, private_key) = + ::SignatureKey::generated_from_seed_indexed([0_u8; 32], 0); + let signature = ::SignatureKey::sign(&private_key, &[0_u8; 32]) + .expect("Failed to create dummy signature"); + let mut blocks = loop { // Test getting blocks let blocks = client - .get_available_blocks(vid_commitment(&vec![], 1)) + .get_available_blocks(vid_commitment(&vec![], 1), pub_key, &signature) .await .expect("Failed to get available blocks"); @@ -54,16 +59,8 @@ async fn test_random_block_builder() { } }; - // Test claiming available block - let signature = { - let (_key, private_key) = - ::SignatureKey::generated_from_seed_indexed([0_u8; 32], 0); - ::SignatureKey::sign(&private_key, &[0_u8; 32]) - .expect("Failed to create dummy signature") - }; - let _: AvailableBlockData = client - .claim_block(blocks.pop().unwrap().block_hash, &signature) + .claim_block(blocks.pop().unwrap().block_hash, pub_key, &signature) .await .expect("Failed to claim block"); @@ -73,7 +70,7 @@ async fn test_random_block_builder() { } .builder_commitment(&()); let result = client - .claim_block(commitment_for_non_existent_block, &signature) + .claim_block(commitment_for_non_existent_block, pub_key, &signature) .await; assert!(matches!(result, Err(BuilderClientError::NotFound))); } diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 02e5470ff0..4d88365e0b 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -70,12 +70,11 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(node_id)), ]; - let view_2_outputs = - vec![ - exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_validated(), - quorum_proposal_send(), - ]; + let view_2_outputs = vec![ + exact(ViewChange(ViewNumber::new(2))), + quorum_proposal_validated(), + quorum_proposal_send(), + ]; let view_2_inputs = permute_input_with_index_order(inputs, input_permutation); diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 5212c21c53..15bc954497 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -15,7 +15,7 @@ use hotshot_example_types::{ }; use hotshot_types::constants::STATIC_VER_0_1; use hotshot_types::message::Message; -use hotshot_types::signature_key::BLSPubKey; +use hotshot_types::signature_key::{BLSPubKey, BuilderKey}; use hotshot_types::traits::network::ConnectedNetwork; use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; @@ -54,6 +54,7 @@ impl NodeType for Test { type ValidatedState = TestValidatedState; type InstanceState = TestInstanceState; type Membership = GeneralStaticCommittee; + type BuilderSignatureKey = BuilderKey; } #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index a413a328a5..c8aa93a673 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -17,7 +17,10 @@ use tracing::instrument; use crate::{ qc::{BitVectorQC, QCParams}, stake_table::StakeTableEntry, - traits::{qc::QuorumCertificateScheme, signature_key::SignatureKey}, + traits::{ + qc::QuorumCertificateScheme, + signature_key::{BuilderSignatureKey, SignatureKey}, + }, }; /// BLS private key used to sign a message @@ -126,3 +129,39 @@ impl SignatureKey for BLSPubKey { kp.ver_key() } } + +// Currently implement builder signature key for BLS +// So copy pasta here, but actually Sequencer will implement the same trait for ethereum types +/// Builder signature key +pub type BuilderKey = BLSPubKey; + +impl BuilderSignatureKey for BuilderKey { + type BuilderPrivateKey = BLSPrivKey; + type BuilderSignature = ::Signature; + type SignError = PrimitivesError; + + fn sign_builder_message( + private_key: &Self::BuilderPrivateKey, + data: &[u8], + ) -> Result { + BitVectorQC::::sign( + &(), + private_key, + data, + &mut rand::thread_rng(), + ) + } + + fn validate_builder_signature(&self, signature: &Self::BuilderSignature, data: &[u8]) -> bool { + BLSOverBN254CurveSignatureScheme::verify(&(), self, data, signature).is_ok() + } + + fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::BuilderPrivateKey) { + let mut hasher = blake3::Hasher::new(); + hasher.update(&seed); + hasher.update(&index.to_le_bytes()); + let new_seed = *hasher.finalize().as_bytes(); + let kp = KeyPair::generate(&mut ChaCha20Rng::from_seed(new_seed)); + (kp.ver_key(), kp.sign_key_ref().clone()) + } +} diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 78f974cbe9..2c86d67483 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -167,4 +167,10 @@ pub trait BlockHeader: /// Get the metadata. fn metadata(&self) -> &::Metadata; + + /// Get the builder commitment + fn builder_commitment( + &self, + metadata: &::Metadata, + ) -> BuilderCommitment; } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index affc5383db..4f11e66dba 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -21,6 +21,7 @@ use super::{ network::{ AsyncGenerator, ConnectedNetwork, NetworkReliability, TestableNetworkingImplementation, }, + signature_key::BuilderSignatureKey, states::TestableState, storage::Storage, ValidatedState, @@ -238,4 +239,7 @@ pub trait NodeType: /// Membership used for this implementation type Membership: Membership; + + /// The type builder uses to sign its messages + type BuilderSignatureKey: BuilderSignatureKey; } diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index ad274d515c..0c921e0c4d 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -141,3 +141,59 @@ pub trait SignatureKey: #[must_use] fn genesis_proposer_pk() -> Self; } + +/// Builder Signature Key trait with minimal requirements +pub trait BuilderSignatureKey: + Send + + Sync + + Clone + + Sized + + Debug + + Hash + + Serialize + + for<'a> Deserialize<'a> + + PartialEq + + Eq + + PartialOrd + + Ord + + Display +{ + /// The type of the keys builder would use to sign its messages + type BuilderPrivateKey: Send + + Sync + + Sized + + Clone + + Debug + + Eq + + Serialize + + for<'a> Deserialize<'a> + + Hash; + + /// The type of the signature builder would use to sign its messages + type BuilderSignature: Send + + Sync + + Sized + + Clone + + Debug + + Eq + + Serialize + + for<'a> Deserialize<'a> + + Hash; + + /// Type of error that can occur when signing data + type SignError: std::error::Error + Send + Sync; + + /// validate the message with the builder's public key + fn validate_builder_signature(&self, signature: &Self::BuilderSignature, data: &[u8]) -> bool; + + /// sign the message with the builder's private key + /// # Errors + /// If unable to sign the data with the key + fn sign_builder_message( + private_key: &Self::BuilderPrivateKey, + data: &[u8], + ) -> Result; + + /// Generate a new key pair + fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::BuilderPrivateKey); +} From d53dbded8cf70305d723f79c81c4d879d91977b3 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 11 Apr 2024 16:47:36 +0300 Subject: [PATCH 0946/1393] Revert "Remove special treatment of view 1 (#2873)" (#2952) This reverts commit c5f85f4e6683daee0413c0f449e49ce3cb470cd2. --- hotshot/src/lib.rs | 6 +- task-impls/src/consensus.rs | 99 ++++++++++++++++++++------------ task-impls/src/quorum_vote.rs | 42 ++++++++++---- testing/src/task_helpers.rs | 2 +- testing/src/test_runner.rs | 2 +- testing/src/view_generator.rs | 3 +- testing/tests/tests_1/message.rs | 1 + types/src/data.rs | 52 ++++------------- types/src/simple_certificate.rs | 35 +++++++++-- 9 files changed, 143 insertions(+), 99 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index f4a625ef13..3ce65bf6fc 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -286,7 +286,7 @@ impl> SystemContext { self.internal_event_stream .0 .broadcast_direct(Arc::new(HotShotEvent::QCFormed(either::Left( - self.consensus.read().await.high_qc.clone(), + QuorumCertificate::genesis(), )))) .await .expect("Genesis Broadcast failed"); @@ -697,13 +697,13 @@ impl HotShotInitializer { let (validated_state, state_delta) = TYPES::ValidatedState::genesis(&instance_state); Ok(Self { inner: Leaf::genesis(&instance_state), + instance_state, validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::Time::new(0), - high_qc: QuorumCertificate::genesis(&instance_state), + high_qc: QuorumCertificate::genesis(), undecided_leafs: Vec::new(), undecided_state: BTreeMap::new(), - instance_state, }) } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 69850a7392..c5017cf59c 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -89,13 +89,11 @@ pub(crate) async fn validate_proposal( let state = Arc::new(validated_state); let delta = Arc::new(state_delta); + let parent_commitment = parent_leaf.commit(); let view = proposal.data.get_view_number(); - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); - ensure!( - proposed_leaf.get_parent_commitment() == parent_leaf.commit(), - "Proposed leaf does not extend the parent leaf." - ); + let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + proposed_leaf.set_parent_commitment(parent_commitment); // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment // @@ -239,10 +237,8 @@ async fn create_and_send_proposal( upgrade_certificate: upgrade_cert, }; - let proposed_leaf = Leaf::from_quorum_proposal(&proposal); - if proposed_leaf.get_parent_commitment() != parent_leaf.commit() { - return; - } + let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal); + proposed_leaf.set_parent_commitment(parent_leaf.commit()); let Ok(signature) = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref()) else { @@ -411,10 +407,14 @@ impl, A: ConsensusApi + let view = cert.view_number; // TODO: do some of this logic without the vote token check, only do that when voting. let justify_qc = proposal.justify_qc.clone(); - let parent = consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned(); + let parent = if justify_qc.is_genesis { + Some(Leaf::genesis(&consensus.instance_state)) + } else { + consensus + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned() + }; // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { @@ -427,15 +427,15 @@ impl, A: ConsensusApi + }; let parent_commitment = parent.commit(); - let proposed_leaf = Leaf::from_quorum_proposal(proposal); - if proposed_leaf.get_parent_commitment() != parent_commitment { - return false; - } + let mut proposed_leaf = Leaf::from_quorum_proposal(proposal); + proposed_leaf.set_parent_commitment(parent_commitment); // Validate the DAC. let message = if cert.is_valid_cert(self.committee_membership.as_ref()) { // Validate the block payload commitment for non-genesis DAC. - if cert.get_data().payload_commit != proposal.block_header.payload_commitment() + if !cert.is_genesis + && cert.get_data().payload_commit + != proposal.block_header.payload_commitment() { error!("Block payload commitment does not equal da cert payload commitment. View = {}", *view); return false; @@ -720,22 +720,48 @@ impl, A: ConsensusApi + let consensus = self.consensus.upgradable_read().await; // Get the parent leaf and state. - let parent = match consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - { - Some(leaf) => { - if let (Some(state), _) = - consensus.get_state_and_delta(leaf.get_view_number()) - { - Some((leaf, state.clone())) - } else { - error!("Parent state not found! Consensus internally inconsistent"); - return; + let parent = if justify_qc.is_genesis { + // Send the `Decide` event for the genesis block if the justify QC is genesis. + let leaf = Leaf::genesis(&consensus.instance_state); + let (validated_state, state_delta) = + TYPES::ValidatedState::genesis(&consensus.instance_state); + let state = Arc::new(validated_state); + broadcast_event( + Event { + view_number: TYPES::Time::genesis(), + event: EventType::Decide { + leaf_chain: Arc::new(vec![LeafInfo::new( + leaf.clone(), + state.clone(), + Some(Arc::new(state_delta)), + None, + )]), + qc: Arc::new(justify_qc.clone()), + block_size: None, + }, + }, + &self.output_event_stream, + ) + .await; + Some((leaf, state)) + } else { + match consensus + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned() + { + Some(leaf) => { + if let (Some(state), _) = + consensus.get_state_and_delta(leaf.get_view_number()) + { + Some((leaf, state.clone())) + } else { + error!("Parent state not found! Consensus internally inconsistent"); + return; + } } + None => None, } - None => None, }; if justify_qc.get_view_number() > consensus.high_qc.view_number { @@ -764,7 +790,7 @@ impl, A: ConsensusApi + "Proposal's parent missing from storage with commitment: {:?}", justify_qc.get_data().leaf_commit ); - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + let leaf = Leaf::from_quorum_proposal(&proposal.data); let state = Arc::new( >::from_header( @@ -776,15 +802,13 @@ impl, A: ConsensusApi + view, View { view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), + leaf: leaf.commit(), state, delta: None, }, }, ); - consensus - .saved_leaves - .insert(proposed_leaf.commit(), proposed_leaf.clone()); + consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); if let Err(e) = self .storage @@ -1582,7 +1606,6 @@ impl, A: ConsensusApi + .as_ref() .filter(|cert| cert.is_valid_for_view(&view)) .cloned(); - let pub_key = self.public_key.clone(); let priv_key = self.private_key.clone(); let consensus = self.consensus.clone(); diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 682802b8bf..13c55dd400 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -13,7 +13,7 @@ use hotshot_task::{ use hotshot_types::{ consensus::Consensus, data::Leaf, - event::{Event, EventType}, + event::{Event, EventType, LeafInfo}, message::GeneralConsensusMessage, simple_vote::{QuorumData, QuorumVote}, traits::{ @@ -82,16 +82,14 @@ impl + 'static> HandleDepOutput payload_commitment = Some(proposal_payload_comm); } let parent_commitment = parent_leaf.commit(); - let proposed_leaf = Leaf::from_quorum_proposal(proposal); - if proposed_leaf.get_parent_commitment() != parent_commitment { - return; - } + let mut proposed_leaf = Leaf::from_quorum_proposal(proposal); + proposed_leaf.set_parent_commitment(parent_commitment); leaf = Some(proposed_leaf); } HotShotEvent::DACertificateValidated(cert) => { let cert_payload_comm = cert.get_data().payload_commit; if let Some(comm) = payload_commitment { - if cert_payload_comm != comm { + if !cert.is_genesis && cert_payload_comm != comm { error!("DAC has inconsistent payload commitment with quorum proposal or VID."); return; } @@ -334,7 +332,31 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState { block_header: block_header.clone(), view_number: ViewNumber::new(1), - justify_qc: QuorumCertificate::genesis(&TestInstanceState {}), + justify_qc: QuorumCertificate::genesis(), upgrade_certificate: None, proposal_certificate: None, }; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index b4867629b8..14a1bff776 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -219,7 +219,7 @@ where latest_view: None, changes, last_decided_leaf: Leaf::genesis(&TestInstanceState {}), - high_qc: QuorumCertificate::genesis(&TestInstanceState {}), + high_qc: QuorumCertificate::genesis(), }; let spinning_task = TestTask::, SpinningTask>::new( Task::new(tx.clone(), rx.clone(), reg.clone(), spinning_task_state), diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 02a00642d1..2d6004b173 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -85,7 +85,7 @@ impl TestView { let quorum_proposal_inner = QuorumProposal:: { block_header: block_header.clone(), view_number: genesis_view, - justify_qc: QuorumCertificate::genesis(&TestInstanceState {}), + justify_qc: QuorumCertificate::genesis(), upgrade_certificate: None, proposal_certificate: None, }; @@ -112,6 +112,7 @@ impl TestView { leaf.fill_block_payload_unchecked(TestBlockPayload { transactions: transactions.clone(), }); + leaf.set_parent_commitment(Leaf::genesis(&TestInstanceState {}).commit()); let signature = ::sign(&private_key, leaf.commit().as_ref()) .expect("Failed to sign leaf commitment!"); diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index b531218b0d..6dd7ae1c29 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -42,6 +42,7 @@ fn version_number_at_start_of_serialization() { vote_commitment: data.commit(), view_number, signatures: None, + is_genesis: false, _pd: PhantomData, }; let message = Message { diff --git a/types/src/data.rs b/types/src/data.rs index eb64df0f70..17ced1f262 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -13,7 +13,7 @@ use std::{ use anyhow::{ensure, Result}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::Options; -use committable::{Commitment, CommitmentBoundsArkless, Committable, RawCommitmentBuilder}; +use committable::{Commitment, Committable, RawCommitmentBuilder}; use derivative::Derivative; use jf_primitives::vid::VidDisperse as JfVidDisperse; use rand::Rng; @@ -26,7 +26,7 @@ use crate::{ simple_certificate::{ QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, }, - simple_vote::{QuorumData, UpgradeProposalData}, + simple_vote::UpgradeProposalData, traits::{ block_contents::{ vid_commitment, BlockHeader, TestableBlock, GENESIS_VID_NUM_STORAGE_NODES, @@ -429,24 +429,6 @@ impl Display for Leaf { } } -impl QuorumCertificate { - #[must_use] - /// Creat the Genesis certificate - pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { - let data = QuorumData { - leaf_commit: Leaf::genesis(instance_state).commit(), - }; - let commit = data.commit(); - Self { - data, - vote_commitment: commit, - view_number: ::genesis(), - signatures: None, - _pd: PhantomData, - } - } -} - impl Leaf { /// Create a new leaf from its components. /// @@ -464,23 +446,10 @@ impl Leaf { let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); let block_header = TYPES::BlockHeader::genesis(instance_state, payload_commitment, metadata); - - let null_quorum_data = QuorumData { - leaf_commit: Commitment::>::default_commitment_no_preimage(), - }; - - let justify_qc = QuorumCertificate { - data: null_quorum_data.clone(), - vote_commitment: null_quorum_data.commit(), - view_number: ::genesis(), - signatures: None, - _pd: PhantomData, - }; - Self { view_number: TYPES::Time::genesis(), - justify_qc, - parent_commitment: null_quorum_data.leaf_commit, + justify_qc: QuorumCertificate::::genesis(), + parent_commitment: fake_commitment(), upgrade_certificate: None, block_header: block_header.clone(), block_payload: Some(payload), @@ -509,7 +478,11 @@ impl Leaf { pub fn get_parent_commitment(&self) -> Commitment { self.parent_commitment } - /// The block header contained in this leaf. + /// Commitment to this leaf's parent. + pub fn set_parent_commitment(&mut self, commitment: Commitment) { + self.parent_commitment = commitment; + } + /// Get a reference to the block header contained in this leaf. pub fn get_block_header(&self) -> &::BlockHeader { &self.block_header } @@ -575,10 +548,9 @@ impl Leaf { self.get_upgrade_certificate(), parent.get_upgrade_certificate(), ) { - // Easiest cases are: - // - no upgrade certificate on either: this is the most common case, and is always fine. - // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. - (None | Some(_), None) => {} + // If the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade. Again, this is always fine. + // But, if we have no upgrade certificate on either is the most common case, and is always fine. + (Some(_) | None, None) => {} // If we no longer see a cert, we have to make sure that we either: // - no longer care because we have passed new_version_first_view, or // - no longer care because we have passed `decide_by` without deciding the certificate. diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index fe18f9d599..5fd4ff04a1 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -7,12 +7,12 @@ use std::{ }; use anyhow::{ensure, Result}; -use committable::{Commitment, Committable}; +use committable::{Commitment, CommitmentBoundsArkless, Committable}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; use crate::{ - data::serialize_signature2, + data::{serialize_signature2, Leaf}, simple_vote::{ DAData, QuorumData, TimeoutData, UpgradeProposalData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, @@ -72,6 +72,8 @@ pub struct SimpleCertificate::QCType>, + /// If this QC is for the genesis block + pub is_genesis: bool, /// phantom data for `THRESHOLD` and `TYPES` pub _pd: PhantomData<(TYPES, THRESHOLD)>, } @@ -89,6 +91,7 @@ impl> vote_commitment, view_number: view, signatures: Some(sig), + is_genesis: false, _pd: PhantomData, } } fn is_valid_cert>(&self, membership: &MEMBERSHIP) -> bool { - if self.view_number == TYPES::Time::genesis() { + if self.is_genesis && self.view_number == TYPES::Time::genesis() { return true; } let real_qc_pp = ::get_public_parameter( @@ -147,7 +151,30 @@ impl> } impl Display for QuorumCertificate { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "view: {:?}", self.view_number) + write!( + f, + "view: {:?}, is_genesis: {:?}", + self.view_number, self.is_genesis + ) + } +} + +impl QuorumCertificate { + #[must_use] + /// Creat the Genisis certificate + pub fn genesis() -> Self { + let data = QuorumData { + leaf_commit: Commitment::>::default_commitment_no_preimage(), + }; + let commit = data.commit(); + Self { + data, + vote_commitment: commit, + view_number: ::genesis(), + signatures: None, + is_genesis: true, + _pd: PhantomData, + } } } From 639474798bf49f13d2cc07c5949970a97c006b54 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 12 Apr 2024 09:07:44 +0300 Subject: [PATCH 0947/1393] Update Testing Framework to Support Async (#2945) * New async testing framwork * clippy lints, check tests * fix some names, fix lints, move to separate folders and update imports * some renaming * fix build * fix build actually --- macros/src/lib.rs | 6 +- testing/src/predicates.rs | 275 ------------------ testing/src/predicates/event.rs | 159 ++++++++++ testing/src/predicates/mod.rs | 28 ++ testing/src/predicates/upgrade.rs | 49 ++++ testing/src/script.rs | 45 +-- testing/tests/tests_1/consensus_task.rs | 48 ++- testing/tests/tests_1/da_task.rs | 2 +- testing/tests/tests_1/proposal_ordering.rs | 22 +- testing/tests/tests_1/quorum_proposal_task.rs | 2 +- testing/tests/tests_1/quorum_vote_task.rs | 6 +- testing/tests/tests_1/upgrade_task.rs | 19 +- 12 files changed, 313 insertions(+), 348 deletions(-) delete mode 100644 testing/src/predicates.rs create mode 100644 testing/src/predicates/event.rs create mode 100644 testing/src/predicates/mod.rs create mode 100644 testing/src/predicates/upgrade.rs diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 9e03360037..06b9c0dcae 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -364,7 +364,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { let assert = &mut output_asserts[#output_index_names]; - validate_output_or_panic_in_script(stage_number, #script_names.to_string(), &received_output, assert); + validate_output_or_panic_in_script(stage_number, #script_names.to_string(), &received_output, &**assert).await; #output_index_names += 1; } @@ -392,7 +392,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { let mut assert = &mut output_asserts[#output_index_names]; - validate_output_or_panic_in_script(stage_number, #script_names.to_string(), &received_output, assert); + validate_output_or_panic_in_script(stage_number, #script_names.to_string(), &received_output, &**assert).await; #output_index_names += 1; } @@ -410,7 +410,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { let task_state_asserts = &mut #task_expectations[stage_number].task_state_asserts; for assert in task_state_asserts { - validate_task_state_or_panic_in_script(stage_number, #script_names.to_string(), #task_names.state(), assert); + validate_task_state_or_panic_in_script(stage_number, #script_names.to_string(), #task_names.state(), &**assert).await; } )* } } diff --git a/testing/src/predicates.rs b/testing/src/predicates.rs deleted file mode 100644 index 46dec6a558..0000000000 --- a/testing/src/predicates.rs +++ /dev/null @@ -1,275 +0,0 @@ -use std::{collections::HashSet, sync::Arc}; - -use hotshot::types::SystemContextHandle; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_task_impls::{ - consensus::ConsensusTaskState, - events::{HotShotEvent, HotShotEvent::*}, -}; -use hotshot_types::{ - data::null_block, - traits::{block_contents::BlockHeader, node_implementation::NodeType}, -}; - -#[derive(Eq, PartialEq, Copy, Clone, Debug)] -pub enum PredicateResult { - Pass, - - Fail, - - Incomplete, -} - -impl From for PredicateResult { - fn from(boolean: bool) -> Self { - match boolean { - true => PredicateResult::Pass, - false => PredicateResult::Fail, - } - } -} - -pub struct Predicate { - pub function: Box PredicateResult>, - pub info: String, -} - -pub fn all(events: Vec>) -> Predicate>> -where - TYPES: NodeType, -{ - let info = format!("{:?}", events); - let mut set: HashSet<_> = events.into_iter().collect(); - - let function = move |e: &Arc>| match set.take(e.as_ref()) { - Some(_) => { - if set.is_empty() { - PredicateResult::Pass - } else { - PredicateResult::Incomplete - } - } - None => PredicateResult::Fail, - }; - - Predicate { - function: Box::new(function), - info, - } -} - -impl std::fmt::Debug for Predicate { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { - write!(f, "{}", self.info) - } -} - -pub fn exact(event: HotShotEvent) -> Predicate>> -where - TYPES: NodeType, -{ - let info = format!("{:?}", event); - let event = Arc::new(event); - - Predicate { - function: Box::new(move |e| PredicateResult::from(e == &event)), - info, - } -} - -pub fn multi_exact( - events: Vec>, -) -> Vec>>> -where - TYPES: NodeType, -{ - events - .into_iter() - .map(|event| { - let event = Arc::new(event); - let info = format!("{:?}", event); - Predicate { - function: Box::new(move |e| PredicateResult::from(e == &event)), - info, - } - }) - .collect() -} - -pub fn leaf_decided() -> Predicate>> -where - TYPES: NodeType, -{ - let info = "LeafDecided".to_string(); - let function = - |e: &Arc>| PredicateResult::from(matches!(e.as_ref(), LeafDecided(_))); - - Predicate { - function: Box::new(function), - info, - } -} - -pub fn quorum_vote_send() -> Predicate>> -where - TYPES: NodeType, -{ - let info = "QuorumVoteSend".to_string(); - let function = |e: &Arc>| { - PredicateResult::from(matches!(e.as_ref(), QuorumVoteSend(_))) - }; - - Predicate { - function: Box::new(function), - info, - } -} - -pub fn view_change() -> Predicate>> -where - TYPES: NodeType, -{ - let info = "ViewChange".to_string(); - let function = - |e: &Arc>| PredicateResult::from(matches!(e.as_ref(), ViewChange(_))); - - Predicate { - function: Box::new(function), - info, - } -} - -pub fn upgrade_certificate_formed() -> Predicate>> -where - TYPES: NodeType, -{ - let info = "UpgradeCertificateFormed".to_string(); - let function = |e: &Arc>| { - PredicateResult::from(matches!(e.as_ref(), UpgradeCertificateFormed(_))) - }; - - Predicate { - function: Box::new(function), - info, - } -} - -pub fn quorum_proposal_send_with_upgrade_certificate() -> Predicate>> -where - TYPES: NodeType, -{ - let info = "QuorumProposalSend with UpgradeCertificate attached".to_string(); - let function = |e: &Arc>| match e.as_ref() { - QuorumProposalSend(proposal, _) => { - PredicateResult::from(proposal.data.upgrade_certificate.is_some()) - } - _ => PredicateResult::Fail, - }; - - Predicate { - function: Box::new(function), - info, - } -} - -pub fn quorum_proposal_validated() -> Predicate>> -where - TYPES: NodeType, -{ - let info = "QuorumProposalValidated".to_string(); - let function = |e: &Arc>| { - PredicateResult::from(matches!(e.as_ref(), QuorumProposalValidated(..))) - }; - - Predicate { - function: Box::new(function), - info, - } -} - -pub fn quorum_proposal_send() -> Predicate>> -where - TYPES: NodeType, -{ - let info = "QuorumProposalSend".to_string(); - let function = |e: &Arc>| { - PredicateResult::from(matches!(e.as_ref(), QuorumProposalSend(_, _))) - }; - - Predicate { - function: Box::new(function), - info, - } -} - -pub fn quorum_proposal_send_with_null_block( - num_storage_nodes: usize, -) -> Predicate>> -where - TYPES: NodeType, -{ - let info = "QuorumProposalSend with null block payload".to_string(); - let function = move |e: &Arc>| match e.as_ref() { - QuorumProposalSend(proposal, _) => PredicateResult::from( - Some(proposal.data.block_header.payload_commitment()) - == null_block::commitment(num_storage_nodes), - ), - _ => PredicateResult::Fail, - }; - - Predicate { - function: Box::new(function), - info, - } -} - -pub fn timeout_vote_send() -> Predicate>> -where - TYPES: NodeType, -{ - let info = "TimeoutVoteSend".to_string(); - let function = |e: &Arc>| { - PredicateResult::from(matches!(e.as_ref(), TimeoutVoteSend(_))) - }; - - Predicate { - function: Box::new(function), - info, - } -} - -type ConsensusTaskTestState = - ConsensusTaskState>; - -pub fn consensus_predicate( - function: Box Fn(&'a ConsensusTaskTestState) -> bool>, - info: &str, -) -> Predicate { - let wrapped_function = move |e: &ConsensusTaskTestState| PredicateResult::from(function(e)); - - Predicate { - function: Box::new(wrapped_function), - info: info.to_string(), - } -} - -pub fn no_decided_upgrade_cert() -> Predicate { - consensus_predicate( - Box::new(|state| state.decided_upgrade_cert.is_none()), - "expected decided_upgrade_cert to be None", - ) -} - -pub fn decided_upgrade_cert() -> Predicate { - consensus_predicate( - Box::new(|state| state.decided_upgrade_cert.is_some()), - "expected decided_upgrade_cert to be Some(_)", - ) -} - -pub fn is_at_view_number(n: u64) -> Predicate { - consensus_predicate( - Box::new(move |state| *state.cur_view == n), - format!("expected cur view to be {}", n).as_str(), - ) -} diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs new file mode 100644 index 0000000000..bdef99986c --- /dev/null +++ b/testing/src/predicates/event.rs @@ -0,0 +1,159 @@ +use async_trait::async_trait; +use hotshot_task_impls::events::{HotShotEvent, HotShotEvent::*}; +use hotshot_types::{ + data::null_block, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; +use std::sync::Arc; + +use crate::predicates::{Predicate, PredicateResult}; + +type EventCallback = Arc>) -> bool + Send + Sync>; + +pub struct EventPredicate +where + TYPES: NodeType + Send + Sync, +{ + check: EventCallback, + info: String, +} + +impl std::fmt::Debug for EventPredicate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.info) + } +} + +#[async_trait] +impl Predicate>> for EventPredicate +where + TYPES: NodeType + Send + Sync + 'static, +{ + async fn evaluate(&self, input: &Arc>) -> PredicateResult { + PredicateResult::from((self.check)(input.clone())) + } + + async fn info(&self) -> String { + self.info.clone() + } +} + +pub fn exact(event: HotShotEvent) -> Box> +where + TYPES: NodeType, +{ + let info = format!("{:?}", event); + let event = Arc::new(event); + + let check: EventCallback = Arc::new(move |e: Arc>| { + let event_clone = event.clone(); + *e == *event_clone + }); + + Box::new(EventPredicate { check, info }) +} + +pub fn leaf_decided() -> Box> +where + TYPES: NodeType, +{ + let info = "LeafDecided".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| matches!(e.as_ref(), LeafDecided(_))); + + Box::new(EventPredicate { check, info }) +} + +pub fn quorum_vote_send() -> Box> +where + TYPES: NodeType, +{ + let info = "QuorumVoteSend".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| matches!(e.as_ref(), QuorumVoteSend(_))); + + Box::new(EventPredicate { check, info }) +} + +pub fn view_change() -> Box> +where + TYPES: NodeType, +{ + let info = "ViewChange".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| matches!(e.as_ref(), ViewChange(_))); + Box::new(EventPredicate { check, info }) +} + +pub fn upgrade_certificate_formed() -> Box> +where + TYPES: NodeType, +{ + let info = "UpgradeCertificateFormed".to_string(); + let check: EventCallback = Arc::new(move |e: Arc>| { + matches!(e.as_ref(), UpgradeCertificateFormed(_)) + }); + Box::new(EventPredicate { check, info }) +} + +pub fn quorum_proposal_send_with_upgrade_certificate() -> Box> +where + TYPES: NodeType, +{ + let info = "QuorumProposalSend with UpgradeCertificate attached".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| match e.as_ref() { + QuorumProposalSend(proposal, _) => proposal.data.upgrade_certificate.is_some(), + _ => false, + }); + Box::new(EventPredicate { info, check }) +} + +pub fn quorum_proposal_validated() -> Box> +where + TYPES: NodeType, +{ + let info = "QuorumProposalValidated".to_string(); + let check: EventCallback = Arc::new(move |e: Arc>| { + matches!(*e.clone(), QuorumProposalValidated(..)) + }); + Box::new(EventPredicate { check, info }) +} + +pub fn quorum_proposal_send() -> Box> +where + TYPES: NodeType, +{ + let info = "QuorumProposalSend".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| matches!(e.as_ref(), QuorumProposalSend(..))); + Box::new(EventPredicate { check, info }) +} + +pub fn quorum_proposal_send_with_null_block( + num_storage_nodes: usize, +) -> Box> +where + TYPES: NodeType, +{ + let info = "QuorumProposalSend with null block payload".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| match e.as_ref() { + QuorumProposalSend(proposal, _) => { + Some(proposal.data.block_header.payload_commitment()) + == null_block::commitment(num_storage_nodes) + } + _ => false, + }); + Box::new(EventPredicate { check, info }) +} + +pub fn timeout_vote_send() -> Box> +where + TYPES: NodeType, +{ + let info = "TimeoutVoteSend".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| matches!(e.as_ref(), TimeoutVoteSend(..))); + Box::new(EventPredicate { check, info }) +} diff --git a/testing/src/predicates/mod.rs b/testing/src/predicates/mod.rs new file mode 100644 index 0000000000..9042b41068 --- /dev/null +++ b/testing/src/predicates/mod.rs @@ -0,0 +1,28 @@ +pub mod event; +pub mod upgrade; + +use async_trait::async_trait; + +#[derive(Eq, PartialEq, Copy, Clone, Debug)] +pub enum PredicateResult { + Pass, + + Fail, + + Incomplete, +} + +impl From for PredicateResult { + fn from(boolean: bool) -> Self { + match boolean { + true => PredicateResult::Pass, + false => PredicateResult::Fail, + } + } +} + +#[async_trait] +pub trait Predicate: std::fmt::Debug { + async fn evaluate(&self, input: &INPUT) -> PredicateResult; + async fn info(&self) -> String; +} diff --git a/testing/src/predicates/upgrade.rs b/testing/src/predicates/upgrade.rs new file mode 100644 index 0000000000..4fcae5483b --- /dev/null +++ b/testing/src/predicates/upgrade.rs @@ -0,0 +1,49 @@ +use async_trait::async_trait; +use hotshot::types::SystemContextHandle; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_task_impls::consensus::ConsensusTaskState; +use hotshot_types::simple_certificate::UpgradeCertificate; +use std::sync::Arc; + +use crate::predicates::{Predicate, PredicateResult}; + +type ConsensusTaskTestState = + ConsensusTaskState>; + +type UpgradeCertCallback = + Arc>>) -> bool + Send + Sync>; + +pub struct UpgradeCertPredicate { + check: UpgradeCertCallback, + info: String, +} + +impl std::fmt::Debug for UpgradeCertPredicate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.info) + } +} + +#[async_trait] +impl Predicate for UpgradeCertPredicate { + async fn evaluate(&self, input: &ConsensusTaskTestState) -> PredicateResult { + let upgrade_cert = input.decided_upgrade_cert.clone(); + PredicateResult::from((self.check)(upgrade_cert.into())) + } + + async fn info(&self) -> String { + self.info.clone() + } +} + +pub fn no_decided_upgrade_cert() -> Box { + let info = "expected decided_upgrade_cert to be None".to_string(); + let check: UpgradeCertCallback = Arc::new(move |s| s.is_none()); + Box::new(UpgradeCertPredicate { info, check }) +} + +pub fn decided_upgrade_cert() -> Box { + let info = "expected decided_upgrade_cert to be Some(_)".to_string(); + let check: UpgradeCertCallback = Arc::new(move |s| s.is_some()); + Box::new(UpgradeCertPredicate { info, check }) +} diff --git a/testing/src/script.rs b/testing/src/script.rs index d9fb02383e..7573bf8aa7 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -12,8 +12,8 @@ pub const RECV_TIMEOUT: Duration = Duration::from_millis(250); pub struct TestScriptStage>>> { pub inputs: Vec>, - pub outputs: Vec>>>, - pub asserts: Vec>, + pub outputs: Vec>>>>, + pub asserts: Vec>>, } /// A `TestScript` is a sequence of triples (input sequence, output sequence, assertions). @@ -43,24 +43,28 @@ where panic!("{}", output_missing_error); } -pub fn validate_task_state_or_panic(stage_number: usize, state: &S, assert: &mut Predicate) { +pub async fn validate_task_state_or_panic( + stage_number: usize, + state: &S, + assert: &dyn Predicate, +) { assert!( - (assert.function)(state) == PredicateResult::Pass, + assert.evaluate(state).await == PredicateResult::Pass, "Stage {} | Task state failed to satisfy: {:?}", stage_number, assert ); } -pub fn validate_output_or_panic( +pub async fn validate_output_or_panic( stage_number: usize, output: &S, - assert: &mut Predicate, + assert: &(dyn Predicate + 'static), ) -> PredicateResult where S: std::fmt::Debug, { - let result = (assert.function)(output); + let result = assert.evaluate(output).await; match result { PredicateResult::Pass => result, @@ -126,7 +130,14 @@ pub async fn run_test_script { tracing::debug!("Test received: {:?}", received_output); - result = validate_output_or_panic(stage_number, &received_output, assert); + result = validate_output_or_panic( + stage_number, + &received_output, + // The first * dereferences &Box to Box, the second one then dereferences the Box to the + // trait object itself and then we're good to go. + &**assert, + ) + .await; to_task .broadcast(received_output.clone()) @@ -154,7 +165,7 @@ pub async fn run_test_script } for assert in &mut stage.asserts { - validate_task_state_or_panic(stage_number, task.state(), assert); + validate_task_state_or_panic(stage_number, task.state(), &**assert).await; } if let Ok(received_output) = from_task.try_recv() { @@ -169,8 +180,8 @@ pub struct TaskScript { } pub struct Expectations { - pub output_asserts: Vec>>>, - pub task_state_asserts: Vec>, + pub output_asserts: Vec>>>>, + pub task_state_asserts: Vec>>, } pub fn panic_extra_output_in_script(stage_number: usize, script_name: String, output: &S) @@ -197,14 +208,14 @@ where panic!("{}", output_missing_error); } -pub fn validate_task_state_or_panic_in_script( +pub async fn validate_task_state_or_panic_in_script( stage_number: usize, script_name: String, state: &S, - assert: &mut Predicate, + assert: &dyn Predicate, ) { assert!( - (assert.function)(state) == PredicateResult::Pass, + assert.evaluate(state).await == PredicateResult::Pass, "Stage {} | Task state in {} failed to satisfy: {:?}", stage_number, script_name, @@ -212,14 +223,14 @@ pub fn validate_task_state_or_panic_in_script( ); } -pub fn validate_output_or_panic_in_script( +pub async fn validate_output_or_panic_in_script( stage_number: usize, script_name: String, output: &S, - assert: &mut Predicate, + assert: &dyn Predicate, ) { assert!( - (assert.function)(output) == PredicateResult::Pass, + assert.evaluate(output).await == PredicateResult::Pass, "Stage {} | Output in {} failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", stage_number, script_name, diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 2564f307ff..c0079a706b 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -6,9 +6,8 @@ use hotshot_testing::task_helpers::key_pair_for_id; use hotshot_testing::task_helpers::get_vid_share; use hotshot_testing::test_helpers::permute_input_with_index_order; use hotshot_testing::{ - predicates::{ - exact, is_at_view_number, quorum_proposal_send, quorum_proposal_validated, - quorum_vote_send, timeout_vote_send, + predicates::event::{ + exact, quorum_proposal_send, quorum_proposal_validated, quorum_vote_send, timeout_vote_send, }, script::{run_test_script, TestScriptStage}, task_helpers::{build_system_handle, vid_scheme_from_view_number}, @@ -65,7 +64,7 @@ async fn test_consensus_task() { quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], - asserts: vec![is_at_view_number(1)], + asserts: vec![], }; let cert = proposals[1].data.justify_qc.clone(); @@ -83,7 +82,7 @@ async fn test_consensus_task() { quorum_proposal_validated(), quorum_proposal_send(), ], - asserts: vec![is_at_view_number(2)], + asserts: vec![], }; let consensus_state = ConsensusTaskState::< @@ -105,7 +104,6 @@ async fn test_consensus_vote() { use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ - predicates::exact, script::{run_test_script, TestScriptStage}, task_helpers::build_system_handle, view_generator::TestViewGenerator, @@ -196,7 +194,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], - asserts: vec![is_at_view_number(1)], + asserts: vec![], }; let inputs = vec![ @@ -207,17 +205,15 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { ]; let view_2_inputs = permute_input_with_index_order(inputs, input_permutation); - let view_2_outputs = vec![ - exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[1].clone())), - ]; - // Use the permuted inputs for view 2 depending on the provided index ordering. let view_2 = TestScriptStage { inputs: view_2_inputs, - outputs: view_2_outputs, - asserts: vec![is_at_view_number(2)], + outputs: vec![ + exact(ViewChange(ViewNumber::new(2))), + quorum_proposal_validated(), + exact(QuorumVoteSend(votes[1].clone())), + ], + asserts: vec![], }; let consensus_state = ConsensusTaskState::< @@ -308,7 +304,7 @@ async fn test_view_sync_finalize_propose() { quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], - asserts: vec![is_at_view_number(1)], + asserts: vec![], }; // Fail twice here to "trigger" a view sync event. This is accomplished above by advancing the @@ -317,7 +313,7 @@ async fn test_view_sync_finalize_propose() { inputs: vec![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], outputs: vec![timeout_vote_send(), timeout_vote_send()], // Times out, so we now have a delayed view - asserts: vec![is_at_view_number(1)], + asserts: vec![], }; // Handle the view sync finalize cert, get the requisite data, propose. @@ -360,7 +356,7 @@ async fn test_view_sync_finalize_propose() { quorum_proposal_validated(), quorum_proposal_send(), ], - asserts: vec![is_at_view_number(4)], + asserts: vec![], }; let consensus_state = ConsensusTaskState::< @@ -382,8 +378,6 @@ async fn test_view_sync_finalize_propose() { /// Makes sure that, when a valid ViewSyncFinalize certificate is available, the consensus task /// will indeed vote if the cert is valid and matches the correct view number. async fn test_view_sync_finalize_vote() { - use hotshot_testing::predicates::timeout_vote_send; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -431,14 +425,14 @@ async fn test_view_sync_finalize_vote() { quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], - asserts: vec![is_at_view_number(1)], + asserts: vec![], }; let view_2 = TestScriptStage { inputs: vec![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], outputs: vec![timeout_vote_send(), timeout_vote_send()], // Times out, so we now have a delayed view - asserts: vec![is_at_view_number(1)], + asserts: vec![], }; // Now we're on the latest view. We want to set the quorum @@ -484,8 +478,6 @@ async fn test_view_sync_finalize_vote() { /// Makes sure that, when a valid ViewSyncFinalize certificate is available, the consensus task /// will NOT vote when the certificate matches a different view number. async fn test_view_sync_finalize_vote_fail_view_number() { - use hotshot_testing::predicates::timeout_vote_send; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -533,14 +525,14 @@ async fn test_view_sync_finalize_vote_fail_view_number() { quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], - asserts: vec![is_at_view_number(1)], + asserts: vec![], }; let view_2 = TestScriptStage { inputs: vec![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], outputs: vec![timeout_vote_send(), timeout_vote_send()], // Times out, so we now have a delayed view - asserts: vec![is_at_view_number(1)], + asserts: vec![], }; // Now we're on the latest view. We want to set the quorum @@ -574,7 +566,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { outputs: vec![ /* No outputs make it through. We never got a valid proposal, so we never vote */ ], - asserts: vec![is_at_view_number(1)], + asserts: vec![], }; let consensus_state = ConsensusTaskState::< @@ -629,7 +621,7 @@ async fn test_vid_disperse_storage_failure() { quorum_proposal_validated(), /* Does not vote */ ], - asserts: vec![is_at_view_number(1)], + asserts: vec![], }; let consensus_state = ConsensusTaskState::< diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 9be4932065..ca18e25335 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -7,7 +7,7 @@ use hotshot_example_types::{ use hotshot_task_impls::da::DATaskState; use hotshot_task_impls::events::HotShotEvent::*; use hotshot_testing::{ - predicates::exact, + predicates::event::exact, script::{run_test_script, TestScriptStage}, task_helpers::build_system_handle, view_generator::TestViewGenerator, diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 4d88365e0b..afd75586f1 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -2,8 +2,10 @@ use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ - predicates::{exact, is_at_view_number, quorum_proposal_send, quorum_proposal_validated}, - task_helpers::{vid_scheme_from_view_number, get_vid_share}, + predicates::event::{ + exact, quorum_proposal_send, quorum_proposal_validated, + }, + task_helpers::{get_vid_share, vid_scheme_from_view_number}, test_helpers::permute_input_with_index_order, view_generator::TestViewGenerator, }; @@ -59,7 +61,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], - asserts: vec![is_at_view_number(1)], + asserts: vec![], }; // Node 2 is the leader up next, so we form the QC for it. @@ -70,20 +72,18 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(node_id)), ]; - let view_2_outputs = vec![ - exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_validated(), - quorum_proposal_send(), - ]; - let view_2_inputs = permute_input_with_index_order(inputs, input_permutation); // This stage transitions from view 1 to view 2. let view_2 = TestScriptStage { inputs: view_2_inputs, - outputs: view_2_outputs, + outputs: vec![ + exact(ViewChange(ViewNumber::new(2))), + quorum_proposal_validated(), + quorum_proposal_send(), + ], // We should end on view 2. - asserts: vec![is_at_view_number(2)], + asserts: vec![], }; let script = vec![view_1, view_2]; diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 756c39952e..a50daba851 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -4,7 +4,7 @@ use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_example_types::state_types::TestValidatedState; use hotshot_task_impls::events::HotShotEvent::*; use hotshot_task_impls::quorum_proposal::QuorumProposalTaskState; -use hotshot_testing::predicates::quorum_proposal_send; +use hotshot_testing::predicates::event::quorum_proposal_send; use hotshot_testing::task_helpers::vid_scheme_from_view_number; use hotshot_testing::{ script::{run_test_script, TestScriptStage}, diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 173eb78f53..ad6b4ebd4c 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -10,7 +10,7 @@ use hotshot_testing::task_helpers::get_vid_share; async fn test_quorum_vote_task_success() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ - predicates::{exact, quorum_proposal_validated,quorum_vote_send}, + predicates::event::{exact, quorum_proposal_validated, quorum_vote_send}, script::{run_test_script, TestScriptStage}, task_helpers::build_system_handle, view_generator::TestViewGenerator, @@ -58,7 +58,7 @@ async fn test_quorum_vote_task_success() { async fn test_quorum_vote_task_miss_dependency() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ - predicates::{exact, quorum_proposal_validated}, + predicates::event::{exact, quorum_proposal_validated}, script::{run_test_script, TestScriptStage}, task_helpers::build_system_handle, view_generator::TestViewGenerator, @@ -126,7 +126,7 @@ async fn test_quorum_vote_task_miss_dependency() { QuorumVoteTaskState::::create_from(&handle).await; run_test_script( - vec![ view_no_dac, view_no_vid, view_no_quorum_proposal], + vec![view_no_dac, view_no_vid, view_no_quorum_proposal], quorum_vote_state, ) .await; diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index fee835763e..b6d4f9076f 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -9,7 +9,8 @@ use hotshot_task_impls::{ consensus::ConsensusTaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState, }; use hotshot_testing::{ - predicates::*, + predicates::event::*, + predicates::upgrade::*, script::{Expectations, TaskScript}, view_generator::TestViewGenerator, }; @@ -260,9 +261,9 @@ async fn test_upgrade_and_consensus_task() { expectations: vec![ Expectations { output_asserts: vec![ - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - quorum_vote_send(), + exact::(ViewChange(ViewNumber::new(1))), + quorum_proposal_validated::(), + quorum_vote_send::(), ], task_state_asserts: vec![], }, @@ -272,13 +273,13 @@ async fn test_upgrade_and_consensus_task() { }, Expectations { output_asserts: vec![ - exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_validated(), + exact::(ViewChange(ViewNumber::new(2))), + quorum_proposal_validated::(), ], task_state_asserts: vec![], }, Expectations { - output_asserts: vec![quorum_proposal_send_with_upgrade_certificate()], + output_asserts: vec![quorum_proposal_send_with_upgrade_certificate::()], task_state_asserts: vec![], }, ], @@ -292,7 +293,7 @@ async fn test_upgrade_and_consensus_task() { task_state_asserts: vec![], }, Expectations { - output_asserts: vec![upgrade_certificate_formed()], + output_asserts: vec![upgrade_certificate_formed::()], task_state_asserts: vec![], }, Expectations { @@ -496,7 +497,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { expectations: vec![ Expectations { output_asserts: vec![ - exact(ViewChange(ViewNumber::new(1))), + exact::(ViewChange(ViewNumber::new(1))), quorum_proposal_validated(), quorum_vote_send(), ], From fd9280afe8d43646ee3685e59eb0a1d459c1fa9f Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 12 Apr 2024 09:49:09 +0300 Subject: [PATCH 0948/1393] constrain random builder to test transactions only (#2933) --- example-types/src/block_types.rs | 17 +---------------- testing/src/block_builder.rs | 23 +++++++++++++++++------ types/src/traits/block_contents.rs | 8 -------- 3 files changed, 18 insertions(+), 30 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 35312c2fde..15436d5f92 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -65,22 +65,7 @@ impl Committable for TestTransaction { } } -impl Transaction for TestTransaction { - /// Create a transaction from bytes - fn from_bytes(bytes: &[u8]) -> Self { - Self(bytes.to_vec()) - } - - /// Get the length of the transaction in bytes - fn len(&self) -> usize { - self.0.len() - } - - /// Returns whether or not the transaction is empty - fn is_empty(&self) -> bool { - self.0.is_empty() - } -} +impl Transaction for TestTransaction {} /// A [`BlockPayload`] that contains a list of `TestTransaction`. #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 2c47da52e9..b711b45958 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -21,10 +21,11 @@ use hotshot_builder_api::{ builder::{BuildError, Error, Options}, data_source::BuilderDataSource, }; +use hotshot_example_types::block_types::TestTransaction; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, traits::{ - block_contents::{vid_commitment, BlockHeader, Transaction}, + block_contents::{vid_commitment, BlockHeader}, election::Membership, node_implementation::NodeType, signature_key::BuilderSignatureKey, @@ -52,6 +53,7 @@ where for<'a> <::PureAssembledSignatureType as TryFrom< &'a TaggedBase64, >>::Error: Display, + TYPES: NodeType, for<'a> >::Error: Display, { async fn start( @@ -145,7 +147,10 @@ pub struct RandomBuilderSource { priv_key: ::BuilderPrivateKey, } -impl RandomBuilderSource { +impl RandomBuilderSource +where + TYPES: NodeType, +{ /// Create new [`RandomBuilderSource`] #[must_use] #[allow(clippy::missing_panics_doc)] // ony panics if 256 == 0 @@ -170,7 +175,7 @@ impl RandomBuilderSource { let time_per_block = Duration::from_secs(1) / options.blocks_per_second; loop { let start = std::time::Instant::now(); - let transactions: Vec = (0..options.txn_in_block) + let transactions: Vec = (0..options.txn_in_block) .map(|_| { let mut bytes = vec![ 0; @@ -179,7 +184,7 @@ impl RandomBuilderSource { .expect("We are NOT running on a 16-bit platform") ]; rng.fill_bytes(&mut bytes); - TYPES::Transaction::from_bytes(&bytes) + TestTransaction(bytes) }) .collect(); @@ -282,6 +287,7 @@ where for<'a> <::PureAssembledSignatureType as TryFrom< &'a TaggedBase64, >>::Error: Display, + TYPES: NodeType, for<'a> >::Error: Display, { let (pub_key, priv_key) = TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); @@ -496,8 +502,6 @@ fn build_block( AvailableBlockData, AvailableBlockHeaderInput, ) { - let block_size = transactions.iter().map(|t| t.len() as u64).sum::(); - let (block_payload, metadata) = TYPES::BlockPayload::from_transactions(transactions) .expect("failed to build block payload from transactions"); @@ -508,6 +512,13 @@ fn build_block( num_storage_nodes, ); + // Get block size from the encoded payload + let block_size = block_payload + .encode() + .expect("failed to encode block") + .collect::>() + .len() as u64; + let signature_over_block_info = { let mut block_info: Vec = Vec::new(); block_info.extend_from_slice(block_size.to_be_bytes().as_ref()); diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 2c86d67483..d0bc6a4fc6 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -25,14 +25,6 @@ use crate::{ pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash { - /// Build a transaction from bytes - fn from_bytes(bytes: &[u8]) -> Self; - - /// Get the length of the transaction - fn len(&self) -> usize; - - /// Whether or not the transaction is empty - fn is_empty(&self) -> bool; } /// Abstraction over the full contents of a block From 3839bce01d7b5328a00934e488dc7483a26dfba1 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Mon, 15 Apr 2024 12:06:00 +0200 Subject: [PATCH 0949/1393] Lr/fallback switch (#2939) * Fallback network algorith refactoring * Reset primary network fail counter correctly * First check if primary network is down --- .../src/traits/networking/combined_network.rs | 68 ++++++++++++------- task-impls/src/request.rs | 6 +- types/src/traits/network.rs | 5 ++ 3 files changed, 51 insertions(+), 28 deletions(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index bd5b704646..3a56909a2b 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -6,7 +6,7 @@ use std::{ hash::{Hash, Hasher}, num::NonZeroUsize, sync::{ - atomic::{AtomicU64, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, }, time::Duration, @@ -28,10 +28,7 @@ use hotshot_types::traits::network::{ }; use hotshot_types::{ boxed_sync, - constants::{ - COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, - COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, - }, + constants::{COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES}, data::ViewNumber, message::{GeneralConsensusMessage, Message, MessageKind, SequencingMessage}, traits::{ @@ -45,7 +42,7 @@ use hotshot_types::{ use lru::LruCache; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::warn; +use tracing::{info, warn}; use vbs::version::StaticVersionType; use super::{push_cdn_network::PushCdnNetwork, NetworkError}; @@ -71,8 +68,11 @@ pub struct CombinedNetworks { /// Last n seen messages to prevent processing duplicates message_cache: Arc>>, - /// If the primary network is down (0) or not, and for how many messages - primary_down: Arc, + /// How many times primary failed to deliver + primary_fail_counter: Arc, + + /// Whether primary is considered down + primary_down: Arc, /// delayed, cancelable tasks for secondary network delayed_tasks: DelayedTasksLockedMap, @@ -104,7 +104,8 @@ impl CombinedNetworks { message_cache: Arc::new(RwLock::new(LruCache::new( NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), ))), - primary_down: Arc::new(AtomicU64::new(0)), + primary_fail_counter: Arc::new(AtomicU64::new(0)), + primary_down: Arc::new(AtomicBool::new(false)), delayed_tasks: Arc::default(), delay_duration: Arc::new(RwLock::new(delay_duration)), } @@ -142,27 +143,31 @@ impl CombinedNetworks { primary_future: impl Future> + Send + 'static, secondary_future: impl Future> + Send + 'static, ) -> Result<(), NetworkError> { - // send optimistically on both networks, but if the primary network is down, skip it - let primary_down = self.primary_down.load(Ordering::Relaxed); + // Check if primary is down let mut primary_failed = false; - if primary_down < COMBINED_NETWORK_MIN_PRIMARY_FAILURES - || primary_down % COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL == 0 + if self.primary_down.load(Ordering::Relaxed) { + primary_failed = true; + } else if self.primary_fail_counter.load(Ordering::Relaxed) + > COMBINED_NETWORK_MIN_PRIMARY_FAILURES { - // send on the primary network as it is not down, or we are checking if it is back up - match primary_future.await { - Ok(()) => { - self.primary_down.store(0, Ordering::Relaxed); - } - Err(e) => { - warn!("Error on primary network: {}", e); - self.primary_down.fetch_add(1, Ordering::Relaxed); - primary_failed = true; - } - }; + warn!( + "Primary failed more than {} times and is considered down now", + COMBINED_NETWORK_MIN_PRIMARY_FAILURES + ); + self.primary_down.store(true, Ordering::Relaxed); + primary_failed = true; } + // always send on the primary network + if let Err(e) = primary_future.await { + warn!("Error on primary network: {}", e); + self.primary_fail_counter.fetch_add(1, Ordering::Relaxed); + primary_failed = true; + }; + if !primary_failed && Self::should_delay(&message) { let duration = *self.delay_duration.read().await; + let primary_fail_counter = self.primary_fail_counter.clone(); self.delayed_tasks .write() .await @@ -170,6 +175,8 @@ impl CombinedNetworks { .or_default() .push(async_spawn(async move { async_sleep(duration).await; + info!("Sending on secondary after delay, message possibly has not reached recipient on primary"); + primary_fail_counter.fetch_add(1, Ordering::Relaxed); secondary_future.await })); Ok(()) @@ -245,7 +252,8 @@ impl TestableNetworkingImplementation for CombinedNetwor message_cache: Arc::new(RwLock::new(LruCache::new( NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), ))), - primary_down: Arc::new(AtomicU64::new(0)), + primary_fail_counter: Arc::new(AtomicU64::new(0)), + primary_down: Arc::new(AtomicBool::new(false)), delayed_tasks: Arc::default(), delay_duration: Arc::new(RwLock::new(secondary_network_delay)), }; @@ -254,7 +262,8 @@ impl TestableNetworkingImplementation for CombinedNetwor message_cache: Arc::new(RwLock::new(LruCache::new( NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), ))), - primary_down: Arc::new(AtomicU64::new(0)), + primary_fail_counter: Arc::new(AtomicU64::new(0)), + primary_down: Arc::new(AtomicBool::new(false)), delayed_tasks: Arc::default(), delay_duration: Arc::new(RwLock::new(secondary_network_delay)), }; @@ -491,5 +500,12 @@ impl ConnectedNetwork, TYPES::SignatureKey> } join_all(cancel_tasks).await; }); + // View changed, let's start primary again + self.primary_down.store(false, Ordering::Relaxed); + self.primary_fail_counter.store(0, Ordering::Relaxed); + } + + fn is_primary_down(&self) -> bool { + self.primary_down.load(Ordering::Relaxed) } } diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 5f61dc82b2..53866170a7 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -207,8 +207,10 @@ impl> DelayedRequester { request: RequestKind, signature: Signature, ) { - // Do the delay then start sending - async_sleep(self.delay).await; + // Do the delay only if primary is up and then start sending + if !self.network.is_primary_down() { + async_sleep(self.delay).await; + } match request { RequestKind::VID(view, key) => { self.do_vid::(VidRequest(view, key), signature).await; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 18db5d447e..570c7e660f 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -427,6 +427,11 @@ pub trait ConnectedNetwork: /// handles view update fn update_view(&self, _view: u64) {} + + /// Is primary network down? Makes sense only for combined network + fn is_primary_down(&self) -> bool { + false + } } /// A channel generator for types that need asynchronous execution From b35a52851e8485bd5ee9ce30532243939c76c7b2 Mon Sep 17 00:00:00 2001 From: Himanshu Goyal Date: Tue, 16 Apr 2024 07:39:03 -0400 Subject: [PATCH 0950/1393] Miscellaneous Builder changes (#2956) --- builder-api/src/block_info.rs | 9 +- builder-api/src/data_source.rs | 3 +- task-impls/src/builder.rs | 3 +- task-impls/src/helpers.rs | 41 ++++--- task-impls/src/response.rs | 2 +- task-impls/src/transactions.rs | 141 ++++++++++++++++++++----- task-impls/src/vid.rs | 7 +- testing/src/block_builder.rs | 31 ++++-- testing/tests/tests_1/block_builder.rs | 8 +- types/src/traits/block_contents.rs | 21 +++- types/src/vid.rs | 2 + 11 files changed, 202 insertions(+), 66 deletions(-) diff --git a/builder-api/src/block_info.rs b/builder-api/src/block_info.rs index df5bdcbe69..59404c4bb8 100644 --- a/builder-api/src/block_info.rs +++ b/builder-api/src/block_info.rs @@ -3,7 +3,7 @@ use std::{hash::Hash, marker::PhantomData}; use hotshot_types::{ traits::{node_implementation::NodeType, signature_key::BuilderSignatureKey, BlockPayload}, utils::BuilderCommitment, - vid::VidCommitment, + vid::{VidCommitment, VidPrecomputeData}, }; use serde::{Deserialize, Serialize}; @@ -33,7 +33,12 @@ pub struct AvailableBlockData { #[serde(bound = "")] pub struct AvailableBlockHeaderInput { pub vid_commitment: VidCommitment, - pub signature: + pub vid_precompute_data: VidPrecomputeData, + // signature over vid_commitment, BlockPayload::Metadata, and offered_fee + pub fee_signature: + <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, + // signature over the current response + pub message_signature: <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, pub sender: ::BuilderSignatureKey, pub _phantom: PhantomData, diff --git a/builder-api/src/data_source.rs b/builder-api/src/data_source.rs index 90641e1358..d168ba6ab9 100644 --- a/builder-api/src/data_source.rs +++ b/builder-api/src/data_source.rs @@ -2,7 +2,6 @@ use async_trait::async_trait; use hotshot_types::{ traits::{node_implementation::NodeType, signature_key::SignatureKey}, utils::BuilderCommitment, - vid::VidCommitment, }; use crate::{ @@ -15,7 +14,7 @@ pub trait BuilderDataSource { /// To get the list of available blocks async fn get_available_blocks( &self, - for_parent: &VidCommitment, + for_parent: &BuilderCommitment, sender: TYPES::SignatureKey, signature: &::PureAssembledSignatureType, ) -> Result>, BuildError>; diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index b15aa6e267..6266b188e9 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -8,7 +8,6 @@ use hotshot_builder_api::{ use hotshot_types::{ traits::{node_implementation::NodeType, signature_key::SignatureKey}, utils::BuilderCommitment, - vid::VidCommitment, }; use serde::{Deserialize, Serialize}; use snafu::Snafu; @@ -104,7 +103,7 @@ impl BuilderClient { /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly pub async fn get_available_blocks( &self, - parent: VidCommitment, + parent: BuilderCommitment, sender: TYPES::SignatureKey, signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result>, BuilderClientError> { diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index bbf6f9e204..5c6d552b56 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -2,15 +2,15 @@ use std::sync::Arc; use async_broadcast::{SendError, Sender}; #[cfg(async_executor_impl = "async-std")] -use async_std::task::{spawn_blocking, JoinHandle}; +use async_std::task::JoinHandle; use hotshot_types::{ data::VidDisperse, traits::{election::Membership, node_implementation::NodeType}, - vid::vid_scheme, + vid::{vid_scheme, VidPrecomputeData}, }; -use jf_primitives::vid::VidScheme; +use jf_primitives::vid::{precomputable::Precomputable, VidScheme}; #[cfg(async_executor_impl = "tokio")] -use tokio::task::{spawn_blocking, JoinHandle}; +use tokio::task::JoinHandle; /// Cancel a task pub async fn cancel_task(task: JoinHandle) { @@ -42,20 +42,31 @@ pub async fn broadcast_event(event: E, sender: &Send /// /// # Panics /// Panics if the VID calculation fails, this should not happen. -pub async fn calculate_vid_disperse( - txns: Vec, - membership: Arc, +#[allow(clippy::panic)] +pub fn calculate_vid_disperse( + txns: &[u8], + membership: &Arc, view: TYPES::Time, ) -> VidDisperse { let num_nodes = membership.total_nodes(); - let vid_disperse = spawn_blocking(move || { - #[allow(clippy::panic)] - vid_scheme(num_nodes).disperse(&txns).unwrap_or_else(|err|panic!("VID disperse failure:\n\t(num_storage nodes,payload_byte_len)=({num_nodes},{})\n\terror: : {err}", txns.len())) - }) - .await; - #[cfg(async_executor_impl = "tokio")] - // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. - let vid_disperse = vid_disperse.unwrap(); + let vid_disperse = vid_scheme(num_nodes).disperse(txns).unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())); + + VidDisperse::from_membership(view, vid_disperse, membership.as_ref()) +} + +/// Calculate the vid disperse information from the payload given a view and membership, and precompute data from builder +/// +/// # Panics +/// Panics if the VID calculation fails, this should not happen. +#[allow(clippy::panic)] +pub fn calculate_vid_disperse_using_precompute_data( + txns: &Vec, + membership: &Arc, + view: TYPES::Time, + pre_compute_data: &VidPrecomputeData, +) -> VidDisperse { + let num_nodes = membership.total_nodes(); + let vid_disperse = vid_scheme(num_nodes).disperse_precompute(txns, pre_compute_data).unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())); VidDisperse::from_membership(view, vid_disperse, membership.as_ref()) } diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index c267631fd2..b7299cc5c2 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -133,7 +133,7 @@ impl NetworkResponseState { .is_some_and(|m| m.contains_key(key)); if !contained { let txns = consensus.saved_payloads.get(&view)?; - let vid = calculate_vid_disperse(txns.clone(), self.quorum.clone(), view).await; + let vid = calculate_vid_disperse(&txns.clone(), &self.quorum.clone(), view); let shares = VidDisperseShare::from_vid_disperse(vid); let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; for share in shares { diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index e8c1c447fc..f4fa24bd5b 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -6,7 +6,9 @@ use std::{ use async_broadcast::Sender; use async_compatibility_layer::art::async_sleep; use async_lock::RwLock; -use hotshot_builder_api::block_info::{AvailableBlockData, AvailableBlockHeaderInput}; +use hotshot_builder_api::block_info::{ + AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo, +}; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, @@ -16,7 +18,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::Membership, node_implementation::{NodeImplementation, NodeType}, - signature_key::SignatureKey, + signature_key::{BuilderSignatureKey, SignatureKey}, BlockPayload, }, }; @@ -29,6 +31,18 @@ use crate::{ helpers::broadcast_event, }; +/// Builder Provided Responses +pub struct BuilderResponses { + /// Initial API response + /// It contains information about the available blocks + pub blocks_initial_info: AvailableBlockInfo, + /// Second API response + /// It contains information about the chosen blocks + pub block_data: AvailableBlockData, + /// Third API response + /// It contains the final block information + pub block_header: AvailableBlockHeaderInput, +} /// Tracks state of a Transaction task pub struct TransactionTaskState< TYPES: NodeType, @@ -108,10 +122,10 @@ impl< return None; } - if let Some((block, _)) = self.wait_for_block().await { + if let Some(BuilderResponses { block_data, .. }) = self.wait_for_block().await { // send the sequenced transactions to VID and DA tasks let block_view = if make_block { view } else { view + 1 }; - let encoded_transactions = match block.block_payload.encode() { + let encoded_transactions = match block_data.block_payload.encode() { Ok(encoded) => encoded.into_iter().collect::>(), Err(e) => { error!("Failed to encode the block payload: {:?}.", e); @@ -121,7 +135,7 @@ impl< broadcast_event( Arc::new(HotShotEvent::BlockRecv( encoded_transactions, - block.metadata, + block_data.metadata, block_view, )), &event_stream, @@ -142,32 +156,37 @@ impl< } #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] - async fn wait_for_block( - &self, - ) -> Option<(AvailableBlockData, AvailableBlockHeaderInput)> { + async fn wait_for_block(&self) -> Option> { let task_start_time = Instant::now(); let last_leaf = self.consensus.read().await.get_decided_leaf(); - let mut latest_block: Option<( - AvailableBlockData, - AvailableBlockHeaderInput, - )> = None; + let mut latest_block: Option> = None; while task_start_time.elapsed() < self.api.propose_max_round_time() - && latest_block.as_ref().map_or(true, |(data, _)| { - data.block_payload.num_transactions(&data.metadata) < self.api.min_transactions() + && latest_block.as_ref().map_or(true, |builder_response| { + builder_response + .block_data + .block_payload + .num_transactions(&builder_response.block_data.metadata) + < self.api.min_transactions() }) { let Ok(request_signature) = <::SignatureKey as SignatureKey>::sign( &self.private_key, - last_leaf.get_block_header().payload_commitment().as_ref(), + last_leaf + .get_block_header() + .builder_commitment(last_leaf.get_block_header().metadata()) + .as_ref(), ) else { error!("Failed to sign block hash"); continue; }; + let mut available_blocks = match self .builder_client .get_available_blocks( - last_leaf.get_block_header().payload_commitment(), + last_leaf + .get_block_header() + .builder_commitment(last_leaf.get_block_header().metadata()), self.public_key.clone(), &request_signature, ) @@ -186,9 +205,32 @@ impl< continue; }; + // Verify signature over chosen block instead of + // verifying the signature over all the blocks received from builder + let combined_message_bytes = { + let mut combined_response_bytes: Vec = Vec::new(); + combined_response_bytes + .extend_from_slice(block_info.block_size.to_be_bytes().as_ref()); + combined_response_bytes + .extend_from_slice(block_info.offered_fee.to_be_bytes().as_ref()); + combined_response_bytes.extend_from_slice(block_info.block_hash.as_ref()); + combined_response_bytes + }; + if !block_info + .sender + .validate_builder_signature(&block_info.signature, &combined_message_bytes) + { + error!("Failed to verify available block info response message signature"); + continue; + } + // Don't try to re-claim the same block if builder advertises it again - if latest_block.as_ref().map_or(false, |block| { - block.0.block_payload.builder_commitment(&block.0.metadata) == block_info.block_hash + if latest_block.as_ref().map_or(false, |builder_response| { + builder_response + .block_data + .block_payload + .builder_commitment(&builder_response.block_data.metadata) + == block_info.block_hash }) { continue; } @@ -203,11 +245,24 @@ impl< let (block, header_input) = futures::join! { self.builder_client.claim_block(block_info.block_hash.clone(), self.public_key.clone(), &request_signature), - self.builder_client.claim_block_header_input(block_info.block_hash, self.public_key.clone(), &request_signature) + self.builder_client.claim_block_header_input(block_info.block_hash.clone(), self.public_key.clone(), &request_signature) }; - let block = match block { - Ok(val) => val, + let block_data = match block { + Ok(block_data) => { + // verify the signature over the message, construct the builder commitment + let builder_commitment = block_data + .block_payload + .builder_commitment(&block_data.metadata); + if !block_data.sender.validate_builder_signature( + &block_data.signature, + builder_commitment.as_ref(), + ) { + error!("Failed to verify available block data response message signature"); + continue; + } + block_data + } Err(err) => { error!(%err, "Failed to claim block"); continue; @@ -215,16 +270,54 @@ impl< }; let header_input = match header_input { - Ok(val) => val, + Ok(header_input) => { + // first verify the message signature and later verify the fee_signature + if !header_input.sender.validate_builder_signature( + &header_input.message_signature, + header_input.vid_commitment.as_ref(), + ) { + error!("Failed to verify available block header input data response message signature"); + continue; + } + + let offered_fee = block_info.offered_fee; + let builder_commitment = block_data + .block_payload + .builder_commitment(&block_data.metadata); + let vid_commitment = header_input.vid_commitment; + let combined_response_bytes = { + let mut combined_response_bytes: Vec = Vec::new(); + combined_response_bytes + .extend_from_slice(offered_fee.to_be_bytes().as_ref()); + combined_response_bytes.extend_from_slice(builder_commitment.as_ref()); + combined_response_bytes.extend_from_slice(vid_commitment.as_ref()); + combined_response_bytes + }; + // verify the signature over the message + if !header_input.sender.validate_builder_signature( + &header_input.fee_signature, + combined_response_bytes.as_ref(), + ) { + error!("Failed to verify fee signature"); + continue; + } + header_input + } Err(err) => { error!(%err, "Failed to claim block"); continue; } }; - let num_txns = block.block_payload.num_transactions(&block.metadata); + let num_txns = block_data + .block_payload + .num_transactions(&block_data.metadata); - latest_block = Some((block, header_input)); + latest_block = Some(BuilderResponses { + blocks_initial_info: block_info, + block_data, + block_header: header_input, + }); if num_txns >= self.api.min_transactions() { return latest_block; } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 0fb77cecf6..bcc1218dce 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -62,11 +62,10 @@ impl, A: ConsensusApi + match event.as_ref() { HotShotEvent::BlockRecv(encoded_transactions, metadata, view_number) => { let vid_disperse = calculate_vid_disperse( - encoded_transactions.clone(), - self.membership.clone(), + &encoded_transactions.clone(), + &self.membership.clone(), *view_number, - ) - .await; + ); // send the commitment and metadata to consensus for block building broadcast_event( Arc::new(HotShotEvent::SendPayloadCommitmentAndMetadata( diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index b711b45958..85275a1c63 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -25,13 +25,12 @@ use hotshot_example_types::block_types::TestTransaction; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, traits::{ - block_contents::{vid_commitment, BlockHeader}, + block_contents::{precompute_vid_commitment, BlockHeader}, election::Membership, node_implementation::NodeType, signature_key::BuilderSignatureKey, }, utils::BuilderCommitment, - vid::VidCommitment, }; use lru::LruCache; use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; @@ -227,7 +226,7 @@ impl ReadState for RandomBuilderSource { impl BuilderDataSource for RandomBuilderSource { async fn get_available_blocks( &self, - _for_parent: &VidCommitment, + _for_parent: &BuilderCommitment, _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result>, BuildError> { @@ -331,7 +330,7 @@ impl ReadState for SimpleBuilderSource { impl BuilderDataSource for SimpleBuilderSource { async fn get_available_blocks( &self, - _for_parent: &VidCommitment, + _for_parent: &BuilderCommitment, _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result>, BuildError> { @@ -507,7 +506,7 @@ fn build_block( let commitment = block_payload.builder_commitment(&metadata); - let vid_commitment = vid_commitment( + let (vid_commitment, precompute_data) = precompute_vid_commitment( &block_payload.encode().unwrap().collect(), num_storage_nodes, ); @@ -524,6 +523,7 @@ fn build_block( block_info.extend_from_slice(block_size.to_be_bytes().as_ref()); block_info.extend_from_slice(123_u64.to_be_bytes().as_ref()); block_info.extend_from_slice(commitment.as_ref()); + match TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, &block_info) { Ok(sig) => sig, Err(e) => { @@ -550,15 +550,28 @@ fn build_block( } }; + let signature_over_fee_info = { + let mut fee_info: Vec = Vec::new(); + fee_info.extend_from_slice(123_u64.to_be_bytes().as_ref()); + fee_info.extend_from_slice(commitment.as_ref()); + fee_info.extend_from_slice(vid_commitment.as_ref()); + match TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, &fee_info) { + Ok(sig) => sig, + Err(e) => { + panic!("Failed to sign block: {}", e); + } + } + }; + let block = AvailableBlockData { block_payload, metadata, sender: pub_key.clone(), - signature: signature_over_block_info, + signature: signature_over_builder_commitment, }; let metadata = AvailableBlockInfo { sender: pub_key.clone(), - signature: signature_over_builder_commitment, + signature: signature_over_block_info, block_hash: commitment, block_size, offered_fee: 123, @@ -566,7 +579,9 @@ fn build_block( }; let header_input = AvailableBlockHeaderInput { vid_commitment, - signature: signature_over_vid_commitment, + vid_precompute_data: precompute_data, + message_signature: signature_over_vid_commitment.clone(), + fee_signature: signature_over_fee_info, sender: pub_key, _phantom: std::marker::PhantomData, }; diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 7aad10cd32..f0dda37bd4 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -7,10 +7,7 @@ use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; use hotshot_testing::block_builder::run_random_builder; use hotshot_types::{ constants::Version01, - traits::{ - block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, - BlockPayload, - }, + traits::{node_implementation::NodeType, signature_key::SignatureKey, BlockPayload}, }; use std::time::Duration; use tide_disco::Url; @@ -25,6 +22,7 @@ async fn test_random_block_builder() { use std::time::Instant; use hotshot_builder_api::block_info::AvailableBlockData; + use hotshot_types::utils::BuilderCommitment; let port = portpicker::pick_unused_port().expect("Could not find an open port"); let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); @@ -43,7 +41,7 @@ async fn test_random_block_builder() { let mut blocks = loop { // Test getting blocks let blocks = client - .get_available_blocks(vid_commitment(&vec![], 1), pub_key, &signature) + .get_available_blocks(BuilderCommitment::from_bytes(&vec![]), pub_key, &signature) .await .expect("Failed to get available blocks"); diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index d0bc6a4fc6..c7bdd2aa67 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -11,7 +11,7 @@ use std::{ }; use committable::{Commitment, Committable}; -use jf_primitives::vid::VidScheme; +use jf_primitives::vid::{precomputable::Precomputable, VidScheme}; use serde::{de::DeserializeOwned, Serialize}; use crate::{ @@ -116,12 +116,27 @@ pub trait TestableBlock: BlockPayload + Debug { /// # Panics /// If the VID computation fails. #[must_use] +#[allow(clippy::panic)] pub fn vid_commitment( encoded_transactions: &Vec, num_storage_nodes: usize, ) -> ::Commit { - #[allow(clippy::panic)] - vid_scheme(num_storage_nodes).commit_only(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:\n\t(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{}\n\t{err}", encoded_transactions.len())) + vid_scheme(num_storage_nodes).commit_only(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{}) error: {err}", encoded_transactions.len())) +} + +/// Compute the VID payload commitment along with precompute data reducing time in VID Disperse +/// # Panics +/// If the VID computation fails. +#[must_use] +#[allow(clippy::panic)] +pub fn precompute_vid_commitment( + encoded_transactions: &Vec, + num_storage_nodes: usize, +) -> ( + ::Commit, + ::PrecomputeData, +) { + vid_scheme(num_storage_nodes).commit_only_precompute(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{}) error: {err}", encoded_transactions.len())) } /// The number of storage nodes to use when computing the genesis VID commitment. diff --git a/types/src/vid.rs b/types/src/vid.rs index 7b50c06bf7..a744b227ef 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -72,6 +72,8 @@ pub type VidCommitment = ::Commit; pub type VidCommon = ::Common; /// VID share type pub type VidShare = ::Share; +/// VID PrecomputeData type +pub type VidPrecomputeData = ::PrecomputeData; #[cfg(not(feature = "gpu-vid"))] /// Internal Jellyfish VID scheme From 41d85e549360faeead56b0a77a03a69b34b235b2 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 16 Apr 2024 18:18:49 +0300 Subject: [PATCH 0951/1393] Update HS tag (#2955) * update HS tag * update HS tag (again) --- example-types/Cargo.toml | 2 +- examples/Cargo.toml | 6 +++--- hotshot/Cargo.toml | 4 ++-- testing/Cargo.toml | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index f90c9da8b3..3973b724ef 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -22,7 +22,7 @@ either = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot" } hotshot-types = { path = "../types" } -hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } +hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } rand = { workspace = true } snafu = { workspace = true } tracing = { workspace = true } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index a52b60b3e1..6f21040ea9 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -113,10 +113,10 @@ custom_debug = { workspace = true } dashmap = "5" either = { workspace = true } futures = { workspace = true } -hotshot-web-server = { version = "0.5.26", path = "../web_server", default-features = false } -hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } +hotshot-web-server = { version = "0.5.36", path = "../web_server", default-features = false } +hotshot-orchestrator = { version = "0.5.36", path = "../orchestrator", default-features = false } hotshot-types = { path = "../types" } -hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } +hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } rand = { workspace = true } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 7222af901e..2bf27848bf 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -40,9 +40,9 @@ either = { workspace = true } ethereum-types = { workspace = true } futures = { workspace = true } hotshot-task = { path = "../task" } -hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } +hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } hotshot-types = { path = "../types" } -hotshot-web-server = { version = "0.5.26", path = "../web_server", default-features = false } +hotshot-web-server = { version = "0.5.36", path = "../web_server", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } lru = "0.12" diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 3a5f75aa37..b4b49a3794 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -26,9 +26,9 @@ futures = { workspace = true } hotshot = { path = "../hotshot", features = ["hotshot-testing"] } hotshot-example-types = { path = "../example-types" } hotshot-macros = { path = "../macros" } -hotshot-orchestrator = { version = "0.5.26", path = "../orchestrator", default-features = false } +hotshot-orchestrator = { version = "0.5.36", path = "../orchestrator", default-features = false } hotshot-task = { path = "../task" } -hotshot-task-impls = { path = "../task-impls", version = "0.5.26", default-features = false } +hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } hotshot-types = { path = "../types" } hotshot-builder-api = { path = "../builder-api" } jf-primitives = { workspace = true } From 0579294fff45aadb29865aeab555c02f047a7f87 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Tue, 16 Apr 2024 15:40:27 +0000 Subject: [PATCH 0952/1393] Add (integrated) builder support to examples (#2935) * Add (integrated) builder support to examples * Improve builder configurability in examples * Reduce duplicate transactions with simple builder --- builder-api/src/builder.rs | 39 ++-- examples/Cargo.toml | 1 + examples/infra/mod.rs | 53 +++++- hotshot/src/lib.rs | 24 ++- .../src/network/behaviours/dht/mod.rs | 8 +- orchestrator/run-config.toml | 5 + orchestrator/src/config.rs | 58 +++++- task-impls/src/quorum_proposal.rs | 15 +- testing/src/block_builder.rs | 167 ++++++++++-------- testing/src/predicates/event.rs | 3 +- testing/src/predicates/upgrade.rs | 3 +- testing/src/test_runner.rs | 7 +- testing/tests/tests_1/block_builder.rs | 6 +- 13 files changed, 261 insertions(+), 128 deletions(-) diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs index 592dc4262b..96898a3ad1 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/builder.rs @@ -1,20 +1,17 @@ -use std::{fmt::Display, path::PathBuf}; +use std::path::PathBuf; use clap::Args; use committable::Committable; use derive_more::From; use futures::FutureExt; -use hotshot_types::{ - traits::{node_implementation::NodeType, signature_key::SignatureKey}, - utils::BuilderCommitment, -}; +use hotshot_types::{traits::node_implementation::NodeType, utils::BuilderCommitment}; use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; use tagged_base64::TaggedBase64; use tide_disco::{ api::ApiError, method::{ReadState, WriteState}, - Api, RequestError, StatusCode, + Api, RequestError, RequestParams, StatusCode, }; use vbs::version::StaticVersionType; @@ -116,6 +113,20 @@ impl tide_disco::error::Error for Error { } } +fn try_extract_param TryFrom<&'a TaggedBase64>>( + params: &RequestParams, + param_name: &str, +) -> Result { + params + .param(param_name)? + .as_tagged_base64()? + .try_into() + .map_err(|_| Error::Custom { + message: format!("Invalid {param_name}"), + status: StatusCode::UnprocessableEntity, + }) +} + pub fn define_api( options: &Options, ) -> Result, ApiError> @@ -123,10 +134,6 @@ where State: 'static + Send + Sync + ReadState, ::State: Send + Sync + BuilderDataSource, Types: NodeType, - for<'a> >::Error: Display, - for<'a> <::PureAssembledSignatureType as TryFrom< - &'a TaggedBase64, - >>::Error: Display, { let mut api = load_api::( options.api_path.as_ref(), @@ -137,8 +144,8 @@ where .get("available_blocks", |req, state| { async move { let hash = req.blob_param("parent_hash")?; - let sender = req.blob_param("sender")?; - let signature = req.blob_param("signature")?; + let signature = try_extract_param(&req, "signature")?; + let sender = try_extract_param(&req, "sender")?; state .get_available_blocks(&hash, sender, &signature) .await @@ -151,8 +158,8 @@ where .get("claim_block", |req, state| { async move { let hash: BuilderCommitment = req.blob_param("block_hash")?; - let sender = req.blob_param("sender")?; - let signature = req.blob_param("signature")?; + let signature = try_extract_param(&req, "signature")?; + let sender = try_extract_param(&req, "sender")?; state .claim_block(&hash, sender, &signature) .await @@ -165,8 +172,8 @@ where .get("claim_header_input", |req, state| { async move { let hash: BuilderCommitment = req.blob_param("block_hash")?; - let sender = req.blob_param("sender")?; - let signature = req.blob_param("signature")?; + let signature = try_extract_param(&req, "signature")?; + let sender = try_extract_param(&req, "sender")?; state .claim_block_header_input(&hash, sender, &signature) .await diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 6f21040ea9..43ea461944 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -116,6 +116,7 @@ futures = { workspace = true } hotshot-web-server = { version = "0.5.36", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.5.36", path = "../orchestrator", default-features = false } hotshot-types = { path = "../types" } +hotshot-testing = { path = "../testing" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index bfd7b485f2..d5e8d453e1 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -15,7 +15,7 @@ use async_compatibility_layer::{ use async_trait::async_trait; use cdn_broker::reexports::{crypto::signature::KeyPair, message::Topic}; use chrono::Utc; -use clap::{Arg, Command, Parser}; +use clap::{value_parser, Arg, Command, Parser}; use futures::StreamExt; use hotshot::{ traits::{ @@ -38,10 +38,13 @@ use hotshot_orchestrator::{ self, client::{BenchResults, OrchestratorClient, ValidatorArgs}, config::{ - CombinedNetworkConfig, NetworkConfig, NetworkConfigFile, NetworkConfigSource, + BuilderType, CombinedNetworkConfig, NetworkConfig, NetworkConfigFile, NetworkConfigSource, WebServerConfig, }, }; +use hotshot_testing::block_builder::{ + RandomBuilderImplementation, SimpleBuilderImplementation, TestBuilderImplementation, +}; use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, TestableLeaf}, @@ -190,6 +193,15 @@ pub fn read_orchestrator_init_config() -> ( .help("Sets the number of fixed leader for gpu vid, only be used when leaders running on gpu") .required(false), ) + .arg( + Arg::new("builder") + .short('b') + .long("builder") + .value_name("BUILDER_TYPE") + .value_parser(value_parser!(BuilderType)) + .help("Sets type of builder. `simple` or `random` to run corresponding integrated builder, `external` to use the one specified by `[config.builder_url]` in config") + .required(false), + ) .get_matches(); if let Some(config_file_string) = matches.get_one::("config_file") { @@ -250,6 +262,9 @@ pub fn read_orchestrator_init_config() -> ( }; config.da_web_server_config = Some(updated_da_web_server_config); } + if let Some(builder_type) = matches.get_one::("builder") { + config.builder = *builder_type; + } (config, orchestrator_url) } @@ -1016,7 +1031,7 @@ pub async fn main_entry_point< // It returns the complete config which also includes peer's public key and public config. // This function will be taken solely by sequencer right after OrchestratorClient::new, // which means the previous `generate_validator_config_when_init` will not be taken by sequencer, it's only for key pair generation for testing in hotshot. - let (run_config, source) = + let (mut run_config, source) = NetworkConfig::::get_complete_config( &orchestrator_client, args.clone().network_config_file, @@ -1027,10 +1042,42 @@ pub async fn main_entry_point< .await .expect("failed to get config"); + let builder_task = match run_config.builder { + BuilderType::External => None, + BuilderType::Random => { + let (builder_task, builder_url) = + >::start( + run_config.config.num_nodes_with_stake.into(), + run_config.random_builder.clone().unwrap_or_default(), + ) + .await; + + run_config.config.builder_url = builder_url; + + builder_task + } + BuilderType::Simple => { + let (builder_task, builder_url) = + >::start( + run_config.config.num_nodes_with_stake.into(), + (), + ) + .await; + + run_config.config.builder_url = builder_url; + + builder_task + } + }; + info!("Initializing networking"); let run = RUNDA::initialize_networking(run_config.clone(), args.advertise_address).await; let hotshot = run.initialize_state_and_hotshot().await; + if let Some(task) = builder_task { + task.start(Box::new(hotshot.get_event_stream())); + } + // pre-generate transactions let NetworkConfig { transaction_size, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 3ce65bf6fc..4c25109860 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -12,18 +12,6 @@ pub mod types; pub mod tasks; -#[cfg(feature = "proposal-task")] -use crate::tasks::add_quorum_proposal_task; - -use crate::{ - tasks::{ - add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, - add_transaction_task, add_upgrade_task, add_view_sync_task, - }, - traits::NodeImplementation, - types::{Event, SystemContextHandle}, -}; - use std::{ collections::{BTreeMap, HashMap}, marker::PhantomData, @@ -61,7 +49,6 @@ use hotshot_types::{ }, HotShotConfig, }; - // -- Rexports // External /// Reexport rand crate @@ -70,6 +57,17 @@ use tasks::{add_request_network_task, add_response_task, add_vid_task}; use tracing::{debug, instrument, trace}; use vbs::version::Version; +#[cfg(feature = "proposal-task")] +use crate::tasks::add_quorum_proposal_task; +use crate::{ + tasks::{ + add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, + add_transaction_task, add_upgrade_task, add_view_sync_task, + }, + traits::NodeImplementation, + types::{Event, SystemContextHandle}, +}; + /// Length, in bytes, of a 512 bit hash pub const H_512: usize = 64; /// Length, in bytes, of a 256 bit hash diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index f49af0de4e..25d65dc5af 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -13,8 +13,9 @@ use libp2p::kad::{ /* handler::KademliaHandlerIn, */ store::MemoryStore, BootstrapOk, GetClosestPeersOk, GetRecordOk, GetRecordResult, ProgressStep, PutRecordResult, QueryId, QueryResult, Record, }; -use libp2p::kad::{store::RecordStore, Behaviour as KademliaBehaviour}; -use libp2p::kad::{BootstrapError, Event as KademliaEvent}; +use libp2p::kad::{ + store::RecordStore, Behaviour as KademliaBehaviour, BootstrapError, Event as KademliaEvent, +}; use libp2p_identity::PeerId; use tracing::{error, info, warn}; @@ -27,9 +28,8 @@ lazy_static! { static ref MAX_DHT_QUERY_SIZE: NonZeroUsize = NonZeroUsize::new(50).unwrap(); } -use crate::network::{ClientRequest, NetworkEvent}; - use super::exponential_backoff::ExponentialBackoff; +use crate::network::{ClientRequest, NetworkEvent}; /// Behaviour wrapping libp2p's kademlia /// included: diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index dce2b74f86..2ccc11dcdc 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -68,6 +68,11 @@ mesh_n = 4 online_time = 10 server_mode = true +[random_builder] +txn_in_block = 100 +blocks_per_second = 1 +txn_size = { start = 20, end = 100 } + [web_server_config] url = "http://localhost:9000" diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index ad22fa25af..0da0b88078 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -2,11 +2,13 @@ use std::{ env, fs, net::SocketAddr, num::NonZeroUsize, + ops::Range, path::{Path, PathBuf}, time::Duration, vec, }; +use clap::ValueEnum; use hotshot_types::{ traits::{election::ElectionConfig, signature_key::SignatureKey}, ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, @@ -118,6 +120,40 @@ pub enum NetworkConfigError { FailedToCreatePath(std::io::Error), } +#[derive(Clone, Copy, Debug, serde::Serialize, serde::Deserialize, Default, ValueEnum)] +/// configuration for builder type to use +pub enum BuilderType { + /// Use external builder, [config.builder_url] must be + /// set to correct builder address + External, + #[default] + /// Simple integrated builder will be started and used by each hotshot node + Simple, + /// Random integrated builder will be started and used by each hotshot node + Random, +} + +/// Options controlling how the random builder generates blocks +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct RandomBuilderConfig { + /// How many transactions to include in a block + pub txn_in_block: u64, + /// How many blocks to generate per second + pub blocks_per_second: u32, + /// Range of how big a transaction can be (in bytes) + pub txn_size: Range, +} + +impl Default for RandomBuilderConfig { + fn default() -> Self { + Self { + txn_in_block: 100, + blocks_per_second: 1, + txn_size: 20..100, + } + } +} + /// a network configuration #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] #[serde(bound(deserialize = ""))] @@ -164,6 +200,10 @@ pub struct NetworkConfig { pub combined_network_config: Option, /// the commit this run is based on pub commit_sha: String, + /// builder to use + pub builder: BuilderType, + /// random builder config + pub random_builder: Option, } /// the source of the network config @@ -408,6 +448,8 @@ impl Default for NetworkConfig { propose_max_round_time: Duration::from_secs(10), data_request_delay: Duration::from_millis(2500), commit_sha: String::new(), + builder: BuilderType::default(), + random_builder: None, } } } @@ -453,6 +495,12 @@ pub struct NetworkConfigFile { /// combined network config #[serde(default)] pub combined_network_config: Option, + /// builder to use + #[serde(default)] + pub builder: BuilderType, + /// random builder configuration + #[serde(default)] + pub random_builder: Option, } impl From> for NetworkConfig { @@ -496,10 +544,17 @@ impl From> for NetworkC da_web_server_config: val.da_web_server_config, combined_network_config: val.combined_network_config, commit_sha: String::new(), + builder: val.builder, + random_builder: val.random_builder, } } } +/// Default builder URL, used as placeholder +fn default_builder_url() -> Url { + Url::parse("http://localhost:3311").unwrap() +} + /// Holds configuration for a `HotShot` #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] #[serde(bound(deserialize = ""))] @@ -546,6 +601,7 @@ pub struct HotShotConfigFile { /// Time to wait until we request data associated with a proposal pub data_request_delay: Duration, /// Builder API base URL + #[serde(default = "default_builder_url")] pub builder_url: Url, } @@ -681,7 +737,7 @@ impl Default for HotShotConfigFile { propose_min_round_time: Duration::from_secs(0), propose_max_round_time: Duration::from_secs(10), data_request_delay: Duration::from_millis(200), - builder_url: Url::parse("http://localhost:3311").unwrap(), + builder_url: default_builder_url(), } } } diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index de64c54979..e1d737408f 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -1,9 +1,10 @@ -use crate::{consensus::validate_proposal, helpers::AnyhowTracing}; use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use committable::Committable; use either::Either; use futures::future::FutureExt; @@ -29,17 +30,15 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber}, }; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; +use tracing::{debug, error, info, instrument, warn}; use crate::{ - consensus::CommitmentAndMetadata, + consensus::{validate_proposal, CommitmentAndMetadata}, events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, + helpers::{broadcast_event, cancel_task, AnyhowTracing}, }; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; /// Proposal dependency types. These types represent events that precipitate a proposal. #[derive(PartialEq, Debug)] diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 85275a1c63..0e27852b77 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -1,10 +1,9 @@ use std::{ collections::HashMap, - fmt::Display, num::NonZeroUsize, - ops::{Deref, Range}, + ops::Deref, sync::Arc, - time::Duration, + time::{Duration, Instant}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -22,11 +21,11 @@ use hotshot_builder_api::{ data_source::BuilderDataSource, }; use hotshot_example_types::block_types::TestTransaction; +use hotshot_orchestrator::config::RandomBuilderConfig; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, traits::{ block_contents::{precompute_vid_commitment, BlockHeader}, - election::Membership, node_implementation::NodeType, signature_key::BuilderSignatureKey, }, @@ -34,33 +33,34 @@ use hotshot_types::{ }; use lru::LruCache; use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; -use tagged_base64::TaggedBase64; use tide_disco::{method::ReadState, App, Url}; #[async_trait] pub trait TestBuilderImplementation { + type Config: Default; + async fn start( - membership: Arc<::Membership>, + num_storage_nodes: usize, + options: Self::Config, ) -> (Option>>, Url); } pub struct RandomBuilderImplementation; #[async_trait] -impl TestBuilderImplementation for RandomBuilderImplementation +impl TestBuilderImplementation for RandomBuilderImplementation where - for<'a> <::PureAssembledSignatureType as TryFrom< - &'a TaggedBase64, - >>::Error: Display, TYPES: NodeType, - for<'a> >::Error: Display, { + type Config = RandomBuilderConfig; + async fn start( - _membership: Arc, + num_storage_nodes: usize, + config: RandomBuilderConfig, ) -> (Option>>, Url) { let port = portpicker::pick_unused_port().expect("No free ports"); let url = Url::parse(&format!("http://localhost:{port}")).expect("Valid URL"); - run_random_builder::(url.clone()); + run_random_builder::(url.clone(), num_storage_nodes, config); (None, url) } } @@ -68,19 +68,16 @@ where pub struct SimpleBuilderImplementation; #[async_trait] -impl TestBuilderImplementation for SimpleBuilderImplementation -where - for<'a> <::PureAssembledSignatureType as TryFrom< - &'a TaggedBase64, - >>::Error: Display, - for<'a> >::Error: Display, -{ +impl TestBuilderImplementation for SimpleBuilderImplementation { + type Config = (); + async fn start( - membership: Arc, + num_storage_nodes: usize, + _config: Self::Config, ) -> (Option>>, Url) { let port = portpicker::pick_unused_port().expect("No free ports"); let url = Url::parse(&format!("http://localhost:{port}")).expect("Valid URL"); - let (source, task) = make_simple_builder(membership).await; + let (source, task) = make_simple_builder(num_storage_nodes).await; let builder_api = hotshot_builder_api::builder::define_api::< SimpleBuilderSource, @@ -105,30 +102,6 @@ struct BlockEntry { header_input: Option>, } -/// Options controlling how the random builder generates blocks -#[derive(Clone, Debug)] -pub struct RandomBuilderOptions { - /// How many transactions to include in a block - pub txn_in_block: u64, - /// How many blocks to generate per second - pub blocks_per_second: u32, - /// Range of how big a transaction can be (in bytes) - pub txn_size: Range, - /// Number of storage nodes for VID commitment - pub num_storage_nodes: usize, -} - -impl Default for RandomBuilderOptions { - fn default() -> Self { - Self { - txn_in_block: 100, - blocks_per_second: 1, - txn_size: 20..100, - num_storage_nodes: 1, - } - } -} - /// A mock implementation of the builder data source. /// Builds random blocks, doesn't track HotShot state at all. /// Evicts old available blocks if HotShot doesn't keep up. @@ -146,7 +119,7 @@ pub struct RandomBuilderSource { priv_key: ::BuilderPrivateKey, } -impl RandomBuilderSource +impl RandomBuilderSource where TYPES: NodeType, { @@ -166,7 +139,7 @@ where /// Spawn a task building blocks, configured with given options #[allow(clippy::missing_panics_doc)] // ony panics on 16-bit platforms - pub fn run(&self, options: RandomBuilderOptions) { + pub fn run(&self, num_storage_nodes: usize, options: RandomBuilderConfig) { let blocks = self.blocks.clone(); let (priv_key, pub_key) = (self.priv_key.clone(), self.pub_key.clone()); async_spawn(async move { @@ -189,7 +162,7 @@ where let (metadata, payload, header_input) = build_block( transactions, - options.num_storage_nodes, + num_storage_nodes, pub_key.clone(), priv_key.clone(), ); @@ -281,17 +254,13 @@ impl BuilderDataSource for RandomBuilderSource { /// /// # Panics /// If constructing and launching the builder fails for any reason -pub fn run_random_builder(url: Url) +pub fn run_random_builder(url: Url, num_storage_nodes: usize, options: RandomBuilderConfig) where - for<'a> <::PureAssembledSignatureType as TryFrom< - &'a TaggedBase64, - >>::Error: Display, TYPES: NodeType, - for<'a> >::Error: Display, { let (pub_key, priv_key) = TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); let source = RandomBuilderSource::new(pub_key, priv_key); - source.run(RandomBuilderOptions::default()); + source.run(num_storage_nodes, options); let builder_api = hotshot_builder_api::builder::define_api::, TYPES, Version01>( @@ -305,12 +274,18 @@ where async_spawn(app.serve(url, STATIC_VER_0_1)); } +#[derive(Debug, Clone)] +struct SubmittedTransaction { + claimed: Option, + transaction: TYPES::Transaction, +} + pub struct SimpleBuilderSource { pub_key: TYPES::BuilderSignatureKey, priv_key: ::BuilderPrivateKey, - membership: Arc, + num_storage_nodes: usize, #[allow(clippy::type_complexity)] - transactions: Arc, TYPES::Transaction>>>, + transactions: Arc, SubmittedTransaction>>>, blocks: Arc>>>, } @@ -337,12 +312,25 @@ impl BuilderDataSource for SimpleBuilderSource { let transactions = self .transactions .read(|txns| { - Box::pin(async { txns.values().cloned().collect::>() }) + Box::pin(async { + txns.values() + .filter(|txn| { + // We want transactions that are either unclaimed, or claimed long ago + // and thus probably not included, or they would've been decided on + // already and removed from the queue + txn.claimed + .map(|claim_time| claim_time.elapsed() > Duration::from_secs(30)) + .unwrap_or(true) + }) + .cloned() + .map(|txn| txn.transaction) + .collect::>() + }) }) .await; let (metadata, payload, header_input) = build_block( transactions, - self.membership.total_nodes(), + self.num_storage_nodes, self.pub_key.clone(), self.priv_key.clone(), ); @@ -365,9 +353,26 @@ impl BuilderDataSource for SimpleBuilderSource { _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result, BuildError> { - let mut blocks = self.blocks.write().await; - let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; - entry.payload.take().ok_or(BuildError::Missing) + let payload = { + let mut blocks = self.blocks.write().await; + let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; + entry.payload.take().ok_or(BuildError::Missing)? + }; + + let now = Instant::now(); + + let claimed_transactions = payload + .block_payload + .transaction_commitments(&payload.metadata); + + let mut transactions = self.transactions.write().await; + for txn_hash in claimed_transactions { + if let Some(txn) = transactions.get_mut(&txn_hash) { + txn.claimed = Some(now); + } + } + + Ok(payload) } async fn claim_block_header_input( @@ -386,13 +391,7 @@ impl BuilderDataSource for SimpleBuilderSource { } } -impl SimpleBuilderSource -where - for<'a> <::PureAssembledSignatureType as TryFrom< - &'a TaggedBase64, - >>::Error: Display, - for<'a> >::Error: Display, -{ +impl SimpleBuilderSource { pub async fn run(self, url: Url) { let builder_api = hotshot_builder_api::builder::define_api::< SimpleBuilderSource, @@ -411,7 +410,7 @@ where #[derive(Clone)] pub struct SimpleBuilderTask { #[allow(clippy::type_complexity)] - transactions: Arc, TYPES::Transaction>>>, + transactions: Arc, SubmittedTransaction>>>, blocks: Arc>>>, decided_transactions: LruCache, ()>, } @@ -449,11 +448,33 @@ impl BuilderTask for SimpleBuilderTask { } self.blocks.write().await.clear(); } + EventType::DAProposal { proposal, .. } => { + let payload = TYPES::BlockPayload::from_bytes( + proposal.data.encoded_transactions.into_iter(), + &proposal.data.metadata, + ); + let now = Instant::now(); + + let mut queue = self.transactions.write().await; + for commitment in + payload.transaction_commitments(&proposal.data.metadata) + { + if let Some(txn) = queue.get_mut(&commitment) { + txn.claimed = Some(now); + } + } + } EventType::Transactions { transactions } => { let mut queue = self.transactions.write().await; for transaction in transactions { if !self.decided_transactions.contains(&transaction.commit()) { - queue.insert(transaction.commit(), transaction.clone()); + queue.insert( + transaction.commit(), + SubmittedTransaction { + claimed: None, + transaction: transaction.clone(), + }, + ); } } } @@ -466,7 +487,7 @@ impl BuilderTask for SimpleBuilderTask { } pub async fn make_simple_builder( - membership: Arc, + num_storage_nodes: usize, ) -> (SimpleBuilderSource, SimpleBuilderTask) { let (pub_key, priv_key) = TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); @@ -478,7 +499,7 @@ pub async fn make_simple_builder( priv_key, transactions: transactions.clone(), blocks: blocks.clone(), - membership, + num_storage_nodes, }; let task = SimpleBuilderTask { diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index bdef99986c..8ef401c95e 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -1,10 +1,11 @@ +use std::sync::Arc; + use async_trait::async_trait; use hotshot_task_impls::events::{HotShotEvent, HotShotEvent::*}; use hotshot_types::{ data::null_block, traits::{block_contents::BlockHeader, node_implementation::NodeType}, }; -use std::sync::Arc; use crate::predicates::{Predicate, PredicateResult}; diff --git a/testing/src/predicates/upgrade.rs b/testing/src/predicates/upgrade.rs index 4fcae5483b..0b74342fd0 100644 --- a/testing/src/predicates/upgrade.rs +++ b/testing/src/predicates/upgrade.rs @@ -1,9 +1,10 @@ +use std::sync::Arc; + use async_trait::async_trait; use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::consensus::ConsensusTaskState; use hotshot_types::simple_certificate::UpgradeCertificate; -use std::sync::Arc; use crate::predicates::{Predicate, PredicateResult}; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 14a1bff776..08a8952b23 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -342,12 +342,7 @@ where ) }); let (mut builder_task, builder_url) = - B::start(Arc::new(::Membership::create_election( - known_nodes_with_stake.clone(), - quorum_election_config.clone(), - config.fixed_leader_for_gpuvid, - ))) - .await; + B::start(config.num_nodes_with_stake.into(), B::Config::default()).await; for i in 0..total { let mut config = config.clone(); let node_id = self.next_node_id; diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index f0dda37bd4..f9d29d7258 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use async_compatibility_layer::art::async_sleep; use hotshot_example_types::{ block_types::{TestBlockPayload, TestTransaction}, @@ -9,7 +11,6 @@ use hotshot_types::{ constants::Version01, traits::{node_implementation::NodeType, signature_key::SignatureKey, BlockPayload}, }; -use std::time::Duration; use tide_disco::Url; #[cfg(test)] @@ -22,12 +23,13 @@ async fn test_random_block_builder() { use std::time::Instant; use hotshot_builder_api::block_info::AvailableBlockData; + use hotshot_orchestrator::config::RandomBuilderConfig; use hotshot_types::utils::BuilderCommitment; let port = portpicker::pick_unused_port().expect("Could not find an open port"); let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); - run_random_builder::(api_url.clone()); + run_random_builder::(api_url.clone(), 1, RandomBuilderConfig::default()); let builder_started = Instant::now(); let client: BuilderClient = BuilderClient::new(api_url); From f9918190804189cf07b913f43ca094fbffb35015 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 17 Apr 2024 12:41:01 +0800 Subject: [PATCH 0953/1393] [CX_CLEANUP] - Integrate` QuorumVoteTask` (#2971) * Update feature flags * Group imports --- hotshot/Cargo.toml | 2 +- hotshot/src/lib.rs | 23 +++++++------- hotshot/src/tasks/mod.rs | 30 +++++++++---------- task-impls/Cargo.toml | 2 +- task-impls/src/consensus.rs | 60 ++++++++++++++++++++++++++----------- testing/Cargo.toml | 2 +- 6 files changed, 70 insertions(+), 49 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 2bf27848bf..73fbabe732 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -11,7 +11,7 @@ rust-version = { workspace = true } default = ["docs", "doc-images"] example-upgrade = ["hotshot-task-impls/example-upgrade"] gpu-vid = ["hotshot-task-impls/gpu-vid"] -proposal-task = ["hotshot-task-impls/proposal-task"] +dependency-tasks = ["hotshot-task-impls/dependency-tasks"] # Features required for binaries bin-orchestrator = ["clap"] diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 4c25109860..6dfce57713 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -57,8 +57,8 @@ use tasks::{add_request_network_task, add_response_task, add_vid_task}; use tracing::{debug, instrument, trace}; use vbs::version::Version; -#[cfg(feature = "proposal-task")] -use crate::tasks::add_quorum_proposal_task; +#[cfg(feature = "dependency-tasks")] +use crate::tasks::{add_quorum_proposal_task, add_quorum_vote_task}; use crate::{ tasks::{ add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, @@ -561,15 +561,6 @@ impl> SystemContext { &handle, ) .await; - // TODO: [CX_CLEANUP] - Integrate QuorumVoteTask with other tasks. - // - // add_quorum_vote_task( - // registry.clone(), - // event_tx.clone(), - // event_rx.activate_cloned(), - // &handle, - // ) - // .await; add_da_task( registry.clone(), event_tx.clone(), @@ -605,7 +596,7 @@ impl> SystemContext { &handle, ) .await; - #[cfg(feature = "proposal-task")] + #[cfg(feature = "dependency-tasks")] add_quorum_proposal_task( registry.clone(), event_tx.clone(), @@ -613,6 +604,14 @@ impl> SystemContext { &handle, ) .await; + #[cfg(feature = "dependency-tasks")] + add_quorum_vote_task( + registry.clone(), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, + ) + .await; handle } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 4c86f2f411..6067f297a6 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -5,6 +5,7 @@ pub mod task_state; use std::{sync::Arc, time::Duration}; +use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; @@ -15,6 +16,7 @@ use hotshot_task_impls::{ events::HotShotEvent, network::{NetworkEventTaskState, NetworkMessageTaskState}, quorum_proposal::QuorumProposalTaskState, + quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, response::{run_response_task, NetworkResponseState, RequestReceiver}, transactions::TransactionTaskState, @@ -34,8 +36,6 @@ use hotshot_types::{ }; use tracing::error; -use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; - /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { @@ -252,20 +252,6 @@ pub async fn add_consensus_task>( task_reg.run_task(task).await; } -// TODO: [CX_CLEANUP] - Integrate QuorumVoteTask with other tasks. -// -// /// Add the quorum vote task. -// pub async fn add_quorum_vote_task>( -// task_reg: Arc, -// tx: Sender>>, -// rx: Receiver>>, -// handle: &SystemContextHandle, -// ) { -// let quorum_vote_state = QuorumVoteTaskState::create_from(handle).await; -// let task = Task::new(tx, rx, task_reg.clone(), quorum_vote_state); -// task_reg.run_task(task).await; -// } - /// add the VID task pub async fn add_vid_task>( task_reg: Arc, @@ -342,3 +328,15 @@ pub async fn add_quorum_proposal_task>( + task_reg: Arc, + tx: Sender>>, + rx: Receiver>>, + handle: &SystemContextHandle, +) { + let quorum_vote_state = QuorumVoteTaskState::create_from(handle).await; + let task = Task::new(tx, rx, task_reg.clone(), quorum_vote_state); + task_reg.run_task(task).await; +} diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 7aa3237c99..3a80ea8691 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -8,7 +8,7 @@ version = { workspace = true } [features] example-upgrade = [] gpu-vid = ["hotshot-types/gpu-vid"] -proposal-task = [] +dependency-tasks = [] [dependencies] anyhow = { workspace = true } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index c5017cf59c..c70de94692 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,28 +1,32 @@ -use core::time::Duration; use std::{ collections::{BTreeMap, HashMap, HashSet}, - marker::PhantomData, sync::Arc, }; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use chrono::Utc; use committable::Committable; -use futures::future::{join_all, FutureExt}; +use core::time::Duration; +use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{Consensus, View}, constants::LOOK_AHEAD, - data::{null_block, Leaf, QuorumProposal, VidDisperseShare, ViewChangeEvidence}, + data::{Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, - message::{GeneralConsensusMessage, Proposal}, + message::Proposal, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, - simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, + simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, traits::{ block_contents::BlockHeader, consensus_api::ConsensusApi, @@ -38,18 +42,26 @@ use hotshot_types::{ vid::VidCommitment, vote::{Certificate, HasViewNumber}, }; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use vbs::version::Version; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task, AnyhowTracing}, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; + +#[cfg(not(feature = "dependency-tasks"))] +use { + crate::helpers::AnyhowTracing, + futures::FutureExt, + hotshot_types::{ + data::{null_block, VidDisperseShare}, + message::GeneralConsensusMessage, + simple_vote::QuorumData, }, + std::marker::PhantomData, }; + /// Alias for the block payload commitment and the associated metadata. pub struct CommitmentAndMetadata { /// Vid Commitment @@ -206,6 +218,7 @@ pub(crate) async fn validate_proposal( /// Create the header for a proposal, build the proposal, and broadcast /// the proposal send evnet. #[allow(clippy::too_many_arguments)] +#[cfg(not(feature = "dependency-tasks"))] async fn create_and_send_proposal( pub_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -368,7 +381,15 @@ impl, A: ConsensusApi + self.spawned_tasks = keep; join_all(cancel).await; } + + /// Ignores old vote behavior and lets `QuorumVoteTask` take over. + #[cfg(feature = "dependency-tasks")] + async fn vote_if_able(&mut self, _event_stream: &Sender>>) -> bool { + false + } + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] + #[cfg(not(feature = "dependency-tasks"))] // Check if we are able to vote, like whether the proposal is valid, // whether we have DAC and VID share, and if so, vote. async fn vote_if_able(&mut self, event_stream: &Sender>>) -> bool { @@ -592,6 +613,7 @@ impl, A: ConsensusApi + } /// Validates whether the VID Dispersal Proposal is correctly signed + #[cfg(not(feature = "dependency-tasks"))] fn validate_disperse(&self, disperse: &Proposal>) -> bool { let view = disperse.data.get_view_number(); let payload_commitment = disperse.data.payload_commitment; @@ -627,7 +649,7 @@ impl, A: ConsensusApi + event_stream: Sender>>, ) { match event.as_ref() { - #[cfg(not(feature = "proposal-task"))] + #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QuorumProposalRecv(proposal, sender) => { let sender = sender.clone(); debug!( @@ -1209,6 +1231,7 @@ impl, A: ConsensusApi + self.formed_upgrade_certificate = Some(cert.clone()); } } + #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::DACertificateRecv(cert) => { debug!("DAC Received for view {}!", *cert.view_number); let view = cert.view_number; @@ -1231,6 +1254,7 @@ impl, A: ConsensusApi + self.current_proposal = None; } } + #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::VIDShareRecv(disperse) => { let view = disperse.data.get_view_number(); @@ -1450,7 +1474,7 @@ impl, A: ConsensusApi + } /// Ignores old propose behavior and lets QuorumProposalTask take over. - #[cfg(feature = "proposal-task")] + #[cfg(feature = "dependency-tasks")] pub async fn publish_proposal_if_able( &mut self, _view: TYPES::Time, @@ -1460,7 +1484,7 @@ impl, A: ConsensusApi + /// Sends a proposal if possible from the high qc we have #[allow(clippy::too_many_lines)] - #[cfg(not(feature = "proposal-task"))] + #[cfg(not(feature = "dependency-tasks"))] pub async fn publish_proposal_if_able( &mut self, view: TYPES::Time, diff --git a/testing/Cargo.toml b/testing/Cargo.toml index b4b49a3794..9d19b3b488 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -10,7 +10,7 @@ default = [] # NOTE this is used to activate the slow tests we don't wish to run in CI slow-tests = [] gpu-vid = ["hotshot-types/gpu-vid"] -proposal-task = ["hotshot/proposal-task"] +proposal-dependency-tasks = ["hotshot/dependency-tasks"] [dependencies] automod = "1.0.14" From 2d21e65161cb450c3a744deb99dbe8dbd9e71d2a Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Wed, 17 Apr 2024 07:23:42 -0400 Subject: [PATCH 0954/1393] Add The `ProposeNow` and `VoteNow` events. (#2963) * add propose now * initiate vote now event * support now types * fix lint * one more fix * make linter happy * tmp commit * finish up testing * rename * allow large enum variations * fix comments * Remove payload_commitment when handling VoteNow --------- Co-authored-by: Keyao Shen --- task-impls/src/consensus.rs | 10 +- task-impls/src/events.rs | 7 + task-impls/src/quorum_proposal.rs | 167 +++++++++++------- task-impls/src/quorum_vote.rs | 76 ++++++-- testing/tests/tests_1/quorum_proposal_task.rs | 121 ++++++++++++- testing/tests/tests_1/quorum_vote_task.rs | 49 ++++- types/src/consensus.rs | 39 +++- types/src/vote.rs | 21 ++- 8 files changed, 394 insertions(+), 96 deletions(-) diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index c70de94692..24fa0f183b 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -20,7 +20,7 @@ use core::time::Duration; use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ - consensus::{Consensus, View}, + consensus::{CommitmentAndMetadata, Consensus, View}, constants::LOOK_AHEAD, data::{Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, @@ -62,14 +62,6 @@ use { std::marker::PhantomData, }; -/// Alias for the block payload commitment and the associated metadata. -pub struct CommitmentAndMetadata { - /// Vid Commitment - pub commitment: VidCommitment, - /// Metadata for the block payload - pub metadata: ::Metadata, -} - /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 8d59fbdf26..744cd729fd 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,5 +1,6 @@ use either::Either; use hotshot_types::{ + consensus::ProposalDependencyData, data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse, VidDisperseShare}, message::Proposal, simple_certificate::{ @@ -12,6 +13,7 @@ use hotshot_types::{ }, traits::{node_implementation::NodeType, BlockPayload}, vid::VidCommitment, + vote::VoteDependencyData, }; use vbs::version::Version; @@ -23,6 +25,7 @@ pub struct HotShotTaskCompleted; /// All of the possible events that can be passed between Sequecning `HotShot` tasks #[derive(Eq, Hash, PartialEq, Debug, Clone)] +#[allow(clippy::large_enum_variant)] pub enum HotShotEvent { /// Shutdown the task Shutdown, @@ -139,4 +142,8 @@ pub enum HotShotEvent { UpgradeCertificateFormed(UpgradeCertificate), /// HotShot was upgraded, with a new network version. VersionUpgrade(Version), + /// Initiate a proposal right now for a provided view. + ProposeNow(TYPES::Time, ProposalDependencyData), + /// Initiate a vote right now for the designated view. + VoteNow(TYPES::Time, VoteDependencyData), } diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index e1d737408f..fa55bd7a48 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -14,7 +14,7 @@ use hotshot_task::{ task::{Task, TaskState}, }; use hotshot_types::{ - consensus::Consensus, + consensus::{CommitmentAndMetadata, Consensus}, constants::LOOK_AHEAD, data::{Leaf, QuorumProposal}, event::Event, @@ -30,12 +30,13 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber}, }; + #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use crate::{ - consensus::{validate_proposal, CommitmentAndMetadata}, + consensus::validate_proposal, events::HotShotEvent, helpers::{broadcast_event, cancel_task, AnyhowTracing}, }; @@ -57,6 +58,9 @@ enum ProposalDependency { /// For the `QuroumProposalRecv` event. Proposal, + + /// For the `ProposeNow` event. + ProposeNow, } /// Handler for the proposal dependency @@ -228,7 +232,7 @@ impl ProposalDependencyHandle { } impl HandleDepOutput for ProposalDependencyHandle { - type Output = Vec>>>; + type Output = Vec>>>>; #[allow(clippy::no_effect_underscore_binding)] async fn handle_dep_result(self, res: Self::Output) { @@ -237,7 +241,7 @@ impl HandleDepOutput for ProposalDependencyHandle { let mut _quorum_certificate = None; let mut _timeout_certificate = None; let mut _view_sync_finalize_cert = None; - for event in res.iter().flatten() { + for event in res.iter().flatten().flatten() { match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _) => { let proposal_payload_comm = proposal.block_header.payload_commitment(); @@ -271,6 +275,21 @@ impl HandleDepOutput for ProposalDependencyHandle { HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { _view_sync_finalize_cert = Some(cert.clone()); } + HotShotEvent::ProposeNow(_, pdd) => { + commit_and_metadata = Some(pdd.commitment_and_metadata.clone()); + match &pdd.secondary_proposal_information { + hotshot_types::consensus::SecondaryProposalInformation::QuorumProposalAndCertificate(quorum_proposal, quorum_certificate) => { + _quorum_certificate = Some(quorum_certificate.clone()); + payload_commitment = Some(quorum_proposal.block_header.payload_commitment()); + }, + hotshot_types::consensus::SecondaryProposalInformation::Timeout(tc) => { + _timeout_certificate = Some(tc.clone()); + } + hotshot_types::consensus::SecondaryProposalInformation::ViewSync(vsc) => { + _view_sync_finalize_cert = Some(vsc.clone()); + }, + } + } _ => {} } } @@ -401,6 +420,13 @@ impl> QuorumProposalTaskState { + if let HotShotEvent::ProposeNow(view, _) = event { + *view + } else { + return false; + } + } }; let valid = event_view == view_number; if valid { @@ -411,25 +437,13 @@ impl> QuorumProposalTaskState>>, - event_sender: Sender>>, event: Arc>, - ) { - info!("Attempting to make dependency task for event {:?}", event); - if self.propose_dependencies.get(&view_number).is_some() { - debug!("Task already exists"); - return; - } - + ) -> AndDependency>>>> { let mut proposal_dependency = self.create_event_dependency( ProposalDependency::Proposal, view_number, @@ -457,86 +471,98 @@ impl> QuorumProposalTaskState propose_now_dependency.mark_as_completed(event.clone()), HotShotEvent::SendPayloadCommitmentAndMetadata(..) => { payload_commitment_dependency.mark_as_completed(event.clone()); - info!( - "Node {} Dependency PayloadAndMetadata is complete for view {}!", - self.id, *view_number - ); } HotShotEvent::QuorumProposalValidated(..) => { proposal_dependency.mark_as_completed(event); - info!( - "Node {} Dependency Proposal is complete for view {}!", - self.id, *view_number - ); } HotShotEvent::QCFormed(quorum_certificate) => match quorum_certificate { Either::Right(_) => { timeout_dependency.mark_as_completed(event); - info!( - "Node {} Dependency TimeoutCert is complete for view {}!", - self.id, *view_number - ); } Either::Left(_) => { qc_dependency.mark_as_completed(event); - info!( - "Node {} Dependency QC is complete for view {}!", - self.id, *view_number - ); } }, HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) => { view_sync_dependency.mark_as_completed(event); - info!( - "Node {} Dependency ViewSyncCert is complete for view {}!", - self.id, *view_number - ); } _ => {} }; // We have three cases to consider: - let combined = if *view_number > 1 { - AndDependency::from_deps(vec![ - OrDependency::from_deps(vec![AndDependency::from_deps(vec![ - payload_commitment_dependency, - ])]), - OrDependency::from_deps(vec![ - // 1. A QCFormed event and QuorumProposalValidated event - AndDependency::from_deps(vec![qc_dependency, proposal_dependency]), - // 2. A timeout cert was received - AndDependency::from_deps(vec![timeout_dependency]), - // 3. A view sync cert was received. - AndDependency::from_deps(vec![view_sync_dependency]), - ]), - ]) + let mut secondary_deps = vec![ + // 2. A timeout cert was received + AndDependency::from_deps(vec![timeout_dependency]), + // 3. A view sync cert was received. + AndDependency::from_deps(vec![view_sync_dependency]), + ]; + + // 1. A QCFormed event and QuorumProposalValidated event + if *view_number > 1 { + secondary_deps.push(AndDependency::from_deps(vec![ + qc_dependency, + proposal_dependency, + ])); } else { + secondary_deps.push(AndDependency::from_deps(vec![qc_dependency])); + } + + AndDependency::from_deps(vec![OrDependency::from_deps(vec![ + AndDependency::from_deps(vec![AndDependency::from_deps(vec![propose_now_dependency])]), AndDependency::from_deps(vec![ OrDependency::from_deps(vec![AndDependency::from_deps(vec![ payload_commitment_dependency, ])]), - OrDependency::from_deps(vec![ - // 1. A QCFormed event and QuorumProposalValidated event - AndDependency::from_deps(vec![qc_dependency]), - // 2. A timeout cert was received - AndDependency::from_deps(vec![timeout_dependency]), - // 3. A view sync cert was received. - AndDependency::from_deps(vec![view_sync_dependency]), - ]), - ]) - }; + OrDependency::from_deps(secondary_deps), + ]), + ])]) + } + + /// Create and store an [`AndDependency`] combining [`EventDependency`]s associated with the + /// given view number if it doesn't exist. Also takes in the received `event` to seed a + /// dependency as already completed. This allows for the task to receive a proposable event + /// without losing the data that it received, as the dependency task would otherwise have no + /// ability to receive the event and, thus, would never propose. + #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view, view = *self.cur_view), name = "Quorum proposal create dependency task", level = "error")] + fn create_dependency_task_if_new( + &mut self, + view_number: TYPES::Time, + event_receiver: Receiver>>, + event_sender: Sender>>, + event: Arc>, + ) { + info!("Attempting to make dependency task for event {:?}", event); + if self.propose_dependencies.get(&view_number).is_some() { + debug!("Task already exists"); + return; + } + + let dependency_chain = + self.create_and_complete_dependencies(view_number, event_receiver, event); let dependency_task = DependencyTask::new( - combined, + dependency_chain, ProposalDependencyHandle { view_number, - sender: event_sender.clone(), + sender: event_sender, consensus: self.consensus.clone(), output_event_stream: self.output_event_stream.clone(), timeout_membership: self.timeout_membership.clone(), @@ -585,6 +611,14 @@ impl> QuorumProposalTaskState>>, ) { match event.as_ref() { + HotShotEvent::ProposeNow(view, _) => { + self.create_dependency_task_if_new( + *view, + event_receiver, + event_sender, + event.clone(), + ); + } HotShotEvent::QCFormed(cert) => { match cert.clone() { either::Right(timeout_cert) => { @@ -929,6 +963,7 @@ impl> TaskState | HotShotEvent::QCFormed(_) | HotShotEvent::SendPayloadCommitmentAndMetadata(..) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) + | HotShotEvent::ProposeNow(..) | HotShotEvent::Shutdown, ) } diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 13c55dd400..4e5af7f215 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -6,7 +6,7 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; use async_std::task::JoinHandle; use committable::Committable; use hotshot_task::{ - dependency::{AndDependency, EventDependency}, + dependency::{AndDependency, EventDependency, OrDependency}, dependency_task::{DependencyTask, HandleDepOutput}, task::{Task, TaskState}, }; @@ -46,6 +46,8 @@ enum VoteDependency { Dac, /// For the `VIDShareRecv` event. Vid, + /// For the `VoteNow` event. + VoteNow, } /// Handler for the vote dependency. @@ -109,6 +111,10 @@ impl + 'static> HandleDepOutput payload_commitment = Some(vid_payload_commitment); } } + HotShotEvent::VoteNow(_, vote_dependency_data) => { + leaf = Some(vote_dependency_data.leaf.clone()); + disperse_share = Some(vote_dependency_data.disperse_share.clone()); + } _ => {} } } @@ -232,6 +238,13 @@ impl> QuorumVoteTaskState { + if let HotShotEvent::VoteNow(view, _) = event { + *view + } else { + return false; + } + } }; if event_view == view_number { debug!("Vote dependency {:?} completed", dependency_type); @@ -250,22 +263,46 @@ impl> QuorumVoteTaskState>>, event_sender: &Sender>>, + event: Option>>, ) { if self.vote_dependencies.get(&view_number).is_some() { return; } - let deps = vec![ - self.create_event_dependency( - VoteDependency::QuorumProposal, - view_number, - event_receiver.clone(), - ), - self.create_event_dependency(VoteDependency::Dac, view_number, event_receiver.clone()), - self.create_event_dependency(VoteDependency::Vid, view_number, event_receiver), - ]; - let vote_dependency = AndDependency::from_deps(deps); + + let quorum_proposal_dependency = self.create_event_dependency( + VoteDependency::QuorumProposal, + view_number, + event_receiver.clone(), + ); + let dac_dependency = + self.create_event_dependency(VoteDependency::Dac, view_number, event_receiver.clone()); + let vid_dependency = + self.create_event_dependency(VoteDependency::Vid, view_number, event_receiver.clone()); + let mut vote_now_dependency = self.create_event_dependency( + VoteDependency::VoteNow, + view_number, + event_receiver.clone(), + ); + + // If we have an event provided to us + if let Some(event) = event { + // Match on the type of event + if let HotShotEvent::VoteNow(..) = event.as_ref() { + tracing::info!("Completing all events"); + vote_now_dependency.mark_as_completed(event); + }; + } + + let deps = vec![quorum_proposal_dependency, dac_dependency, vid_dependency]; + let dependency_chain = OrDependency::from_deps(vec![ + // Either we fulfull the dependencies individiaully. + AndDependency::from_deps(deps), + // Or we fulfill the single dependency that contains all the info that we need. + AndDependency::from_deps(vec![vote_now_dependency]), + ]); + let dependency_task = DependencyTask::new( - vote_dependency, + dependency_chain, VoteDependencyHandle { public_key: self.public_key.clone(), private_key: self.private_key.clone(), @@ -311,6 +348,14 @@ impl> QuorumVoteTaskState>>, ) { match event.as_ref() { + HotShotEvent::VoteNow(view, ..) => { + self.create_dependency_task_if_new( + *view, + event_receiver, + &event_sender, + Some(event), + ); + } HotShotEvent::QuorumProposalRecv(proposal, sender) => { let view = proposal.data.view_number; debug!("Received Quorum Proposal for view {}", *view); @@ -556,7 +601,7 @@ impl> QuorumVoteTaskState { let view = cert.view_number; @@ -590,7 +635,7 @@ impl> QuorumVoteTaskState { let view = disperse.data.get_view_number(); @@ -647,7 +692,7 @@ impl> QuorumVoteTaskState { debug!("All vote dependencies verified for view {:?}", view); @@ -688,6 +733,7 @@ impl> TaskState for QuorumVoteTask | HotShotEvent::ViewChange(_) | HotShotEvent::VIDShareRecv(..) | HotShotEvent::QuorumVoteDependenciesValidated(_) + | HotShotEvent::VoteNow(..) | HotShotEvent::Shutdown, ) } diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index a50daba851..0acd685584 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -84,7 +84,7 @@ async fn test_quorum_proposal_task_quorum_proposal() { drop(consensus); let cert = proposals[1].data.justify_qc.clone(); - // Run at view 2, the quorum vote task shouldn't care as long as the bookkeeping is correct + // Run at view 2, the quorum proposal task shouldn't care as long as the bookkeeping is correct let view_2 = TestScriptStage { inputs: vec![ QuorumProposalValidated(proposals[1].data.clone(), leaves[1].clone()), @@ -212,6 +212,125 @@ async fn test_quorum_proposal_task_view_sync() { run_test_script(script, quorum_proposal_task_state).await; } +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_task_propose_now() { + use hotshot_testing::task_helpers::{build_cert, key_pair_for_id}; + use hotshot_types::{ + consensus::{CommitmentAndMetadata, ProposalDependencyData}, + simple_certificate::{TimeoutCertificate, ViewSyncFinalizeCertificate2}, + simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeVote}, + }; + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let (private_key, public_key) = key_pair_for_id(2); + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + } + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + } + + // proposal dependency data - quorum proposal and cert + let pdd_qp = ProposalDependencyData { + commitment_and_metadata: CommitmentAndMetadata { + commitment: payload_commitment, + metadata: (), + }, + secondary_proposal_information: + hotshot_types::consensus::SecondaryProposalInformation::QuorumProposalAndCertificate( + proposals[1].data.clone(), + proposals[1].data.justify_qc.clone(), + ), + }; + + // proposal dependency data - timeout cert + let pdd_timeout = ProposalDependencyData { + commitment_and_metadata: CommitmentAndMetadata { + commitment: payload_commitment, + metadata: (), + }, + secondary_proposal_information: + hotshot_types::consensus::SecondaryProposalInformation::Timeout(build_cert::< + TestTypes, + TimeoutData, + TimeoutVote, + TimeoutCertificate, + >( + TimeoutData { + view: ViewNumber::new(1), + }, + &quorum_membership, + ViewNumber::new(2), + &public_key, + &private_key, + )), + }; + + // proposal dependency data - view sync cert + let pdd_view_sync = ProposalDependencyData { + commitment_and_metadata: CommitmentAndMetadata { + commitment: payload_commitment, + metadata: (), + }, + secondary_proposal_information: + hotshot_types::consensus::SecondaryProposalInformation::ViewSync(build_cert::< + TestTypes, + ViewSyncFinalizeData, + ViewSyncFinalizeVote, + ViewSyncFinalizeCertificate2, + >( + ViewSyncFinalizeData { + relay: 1, + round: ViewNumber::new(1), + }, + &quorum_membership, + ViewNumber::new(2), + &public_key, + &private_key, + )), + }; + + let view_qp = TestScriptStage { + inputs: vec![ProposeNow(ViewNumber::new(2), pdd_qp)], + outputs: vec![quorum_proposal_send()], + asserts: vec![], + }; + + let view_timeout = TestScriptStage { + inputs: vec![ProposeNow(ViewNumber::new(2), pdd_timeout)], + outputs: vec![quorum_proposal_send()], + asserts: vec![], + }; + + let view_view_sync = TestScriptStage { + inputs: vec![ProposeNow(ViewNumber::new(2), pdd_view_sync)], + outputs: vec![quorum_proposal_send()], + asserts: vec![], + }; + + for stage in vec![view_qp, view_timeout, view_view_sync] { + let quorum_proposal_task_state = + QuorumProposalTaskState::::create_from(&handle).await; + inject_quorum_proposal_polls(&quorum_proposal_task_state).await; + + let script = vec![stage]; + run_test_script(script, quorum_proposal_task_state).await; + } +} + #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index ad6b4ebd4c..09f4fbadc6 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -1,8 +1,8 @@ #![allow(clippy::panic)] use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; use hotshot_testing::task_helpers::get_vid_share; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] @@ -52,6 +52,53 @@ async fn test_quorum_vote_task_success() { run_test_script(vec![view_success], quorum_vote_state).await; } +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_vote_task_vote_now() { + use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; + use hotshot_testing::{ + predicates::event::{exact, quorum_vote_send}, + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, + view_generator::TestViewGenerator, + }; + use hotshot_types::vote::VoteDependencyData; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + generator.next(); + let view = generator.current_view.clone().unwrap(); + + let vote_dependency_data = VoteDependencyData { + quorum_proposal: view.quorum_proposal.data.clone(), + leaf: view.leaf.clone(), + disperse_share: view.vid_proposal.0[0].clone(), + da_cert: view.da_certificate.clone(), + }; + + // Submit an event with just the `VoteNow` event which should successfully send a vote. + let view_vote_now = TestScriptStage { + inputs: vec![VoteNow(view.view_number, vote_dependency_data)], + outputs: vec![ + exact(QuorumVoteDependenciesValidated(ViewNumber::new(1))), + quorum_vote_send(), + ], + asserts: vec![], + }; + + let quorum_vote_state = + QuorumVoteTaskState::::create_from(&handle).await; + + run_test_script(vec![view_vote_now], quorum_vote_state).await; +} + #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 62509b9065..56d2a1f02f 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -11,16 +11,19 @@ use tracing::error; pub use crate::utils::{View, ViewInner}; use crate::{ - data::{Leaf, VidDisperseShare}, + data::{Leaf, QuorumProposal, VidDisperseShare}, error::HotShotError, message::Proposal, - simple_certificate::{DACertificate, QuorumCertificate}, + simple_certificate::{ + DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncFinalizeCertificate2, + }, traits::{ metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}, node_implementation::NodeType, - ValidatedState, + BlockPayload, ValidatedState, }, utils::{StateAndDelta, Terminator}, + vid::VidCommitment, }; /// A type alias for `HashMap, T>` @@ -389,3 +392,33 @@ impl Consensus { .expect("Decided state not found! Consensus internally inconsistent") } } + +/// Alias for the block payload commitment and the associated metadata. The primary data +/// needed in order to submit a proposal. +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub struct CommitmentAndMetadata { + /// Vid Commitment + pub commitment: VidCommitment, + /// Metadata for the block payload + pub metadata: ::Metadata, +} + +/// Helper type to hold the optional secondary information required to propose. +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub enum SecondaryProposalInformation { + /// The quorum proposal and certificate needed to propose. + QuorumProposalAndCertificate(QuorumProposal, QuorumCertificate), + /// The timeout certificate which we can propose from. + Timeout(TimeoutCertificate), + /// The view sync certificate which we can propose from. + ViewSync(ViewSyncFinalizeCertificate2), +} + +/// Dependency data required to submit a proposal +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub struct ProposalDependencyData { + /// The primary data in a proposal. + pub commitment_and_metadata: CommitmentAndMetadata, + /// The secondary data in a proposal + pub secondary_proposal_information: SecondaryProposalInformation, +} diff --git a/types/src/vote.rs b/types/src/vote.rs index db62bdc8c0..fe49d2f0fe 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -12,7 +12,9 @@ use ethereum_types::U256; use tracing::error; use crate::{ - simple_certificate::Threshold, + data::{Leaf, QuorumProposal, VidDisperseShare}, + message::Proposal, + simple_certificate::{DACertificate, Threshold}, simple_vote::Voteable, traits::{ election::Membership, @@ -180,3 +182,20 @@ impl, CERT: Certificate = HashMap)>; + +/// Payload for the `HotShotEvents::VoteNow` event type. The proposal and leaf are +/// obtained via a `QuorumProposalValidated` event being processed. +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub struct VoteDependencyData { + /// The quorum proposal (not necessarily valid). + pub quorum_proposal: QuorumProposal, + + /// The leaf we've obtained from the `QuorumProposalValidated` event. + pub leaf: Leaf, + + /// The Vid disperse proposal. + pub disperse_share: Proposal>, + + /// The DA certificate. + pub da_cert: DACertificate, +} From 496557ca90a35727fc1582da0e9fc20480dfc3eb Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 17 Apr 2024 11:55:49 +0000 Subject: [PATCH 0955/1393] Propagate fee info to `BlockHeader::new` (#2967) * Propagate fee info to `BlockHeader::new` * Address comments * Fix a lint * Fix failed merge --- builder-api/src/block_info.rs | 1 - example-types/src/block_types.rs | 3 +- hotshot/src/tasks/mod.rs | 3 +- task-impls/src/consensus.rs | 67 ++++---- task-impls/src/da.rs | 4 +- task-impls/src/events.rs | 4 +- task-impls/src/quorum_proposal.rs | 18 ++- task-impls/src/transactions.rs | 13 +- task-impls/src/vid.rs | 5 +- testing/src/block_builder.rs | 52 ++----- testing/src/task_helpers.rs | 147 +----------------- testing/tests/tests_1/consensus_task.rs | 36 +++-- testing/tests/tests_1/da_task.rs | 22 ++- testing/tests/tests_1/proposal_ordering.rs | 22 ++- testing/tests/tests_1/quorum_proposal_task.rs | 53 +++++-- testing/tests/tests_1/upgrade_task.rs | 29 ++-- testing/tests/tests_1/vid_task.rs | 22 +-- types/src/consensus.rs | 9 +- types/src/data.rs | 36 ++++- types/src/traits/block_contents.rs | 11 ++ types/src/traits/signature_key.rs | 36 ++++- 21 files changed, 306 insertions(+), 287 deletions(-) diff --git a/builder-api/src/block_info.rs b/builder-api/src/block_info.rs index 59404c4bb8..9d196984ab 100644 --- a/builder-api/src/block_info.rs +++ b/builder-api/src/block_info.rs @@ -41,5 +41,4 @@ pub struct AvailableBlockHeaderInput { pub message_signature: <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, pub sender: ::BuilderSignatureKey, - pub _phantom: PhantomData, } diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 15436d5f92..df834b424f 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -7,7 +7,7 @@ use committable::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ data::{BlockError, Leaf}, traits::{ - block_contents::{BlockHeader, TestableBlock, Transaction}, + block_contents::{BlockHeader, BuilderFee, TestableBlock, Transaction}, node_implementation::NodeType, BlockPayload, ValidatedState, }, @@ -190,6 +190,7 @@ impl> Block parent_leaf: &Leaf, payload_commitment: VidCommitment, _metadata: ::Metadata, + _builder_fee: BuilderFee, ) -> Self { let parent = parent_leaf.get_block_header(); diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 6067f297a6..77db227891 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -5,7 +5,6 @@ pub mod task_state; use std::{sync::Arc, time::Duration}; -use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; @@ -36,6 +35,8 @@ use hotshot_types::{ }; use tracing::error; +use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; + /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index 24fa0f183b..f74df235dc 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -1,22 +1,17 @@ +use core::time::Duration; use std::{ collections::{BTreeMap, HashMap, HashSet}, sync::Arc, }; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, - }, -}; use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use chrono::Utc; use committable::Committable; -use core::time::Duration; use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ @@ -39,17 +34,12 @@ use hotshot_types::{ BlockPayload, }, utils::{Terminator, ViewInner}, - vid::VidCommitment, vote::{Certificate, HasViewNumber}, }; -use tracing::{debug, error, info, instrument, warn}; -use vbs::version::Version; - -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; - +use tracing::{debug, error, info, instrument, warn}; +use vbs::version::Version; #[cfg(not(feature = "dependency-tasks"))] use { crate::helpers::AnyhowTracing, @@ -62,6 +52,14 @@ use { std::marker::PhantomData, }; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; + /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; @@ -217,8 +215,7 @@ async fn create_and_send_proposal( consensus: Arc>>, event_stream: Sender>>, view: TYPES::Time, - commitment: VidCommitment, - metadata: ::Metadata, + commitment_and_metadata: CommitmentAndMetadata, parent_leaf: Leaf, state: Arc, upgrade_cert: Option>, @@ -229,8 +226,9 @@ async fn create_and_send_proposal( state.as_ref(), &consensus.read().await.instance_state, &parent_leaf, - commitment, - metadata, + commitment_and_metadata.commitment, + commitment_and_metadata.metadata, + commitment_and_metadata.fee, ) .await; @@ -292,7 +290,7 @@ pub struct ConsensusTaskState< pub cur_view: TYPES::Time, /// The commitment to the current block payload and its metadata submitted to DA. - pub payload_commitment_and_metadata: Option>, + pub payload_commitment_and_metadata: Option>, /// Network for all nodes pub quorum_network: Arc, @@ -1401,12 +1399,18 @@ impl, A: ConsensusApi + let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } - HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, metadata, view) => { + HotShotEvent::SendPayloadCommitmentAndMetadata( + payload_commitment, + metadata, + view, + fee, + ) => { let view = *view; debug!("got commit and meta {:?}", payload_commitment); self.payload_commitment_and_metadata = Some(CommitmentAndMetadata { commitment: *payload_commitment, metadata: metadata.clone(), + fee: fee.clone(), }); if self.quorum_membership.get_leader(view) == self.public_key && self.consensus.read().await.high_qc.get_view_number() + 1 == view @@ -1564,6 +1568,14 @@ impl, A: ConsensusApi + return; }; + let Some(null_block_fee) = + null_block::builder_fee::(self.quorum_membership.total_nodes()) + else { + // This should never happen. + error!("Failed to calculate null block fee info"); + return; + }; + let pub_key = self.public_key.clone(); let priv_key = self.private_key.clone(); let consensus = self.consensus.clone(); @@ -1582,8 +1594,11 @@ impl, A: ConsensusApi + consensus, sender, view, - null_block_commitment, - metadata, + CommitmentAndMetadata { + commitment: null_block_commitment, + metadata, + fee: null_block_fee, + }, parent, state, upgrade_cert, @@ -1626,10 +1641,9 @@ impl, A: ConsensusApi + let priv_key = self.private_key.clone(); let consensus = self.consensus.clone(); let sender = event_stream.clone(); - let commit = commit_and_metadata.commitment; - let metadata = commit_and_metadata.metadata.clone(); let state = state.clone(); let delay = self.round_start_delay; + let commitment_and_metadata = commit_and_metadata.clone(); self.spawned_tasks .entry(view) .or_default() @@ -1640,8 +1654,7 @@ impl, A: ConsensusApi + consensus, sender, view, - commit, - metadata, + commitment_and_metadata, parent_leaf.clone(), state, proposal_upgrade_certificate, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 58555df1f3..4c47f2617d 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -302,7 +302,7 @@ impl, A: ConsensusApi + return None; } - HotShotEvent::BlockRecv(encoded_transactions, metadata, view) => { + HotShotEvent::BlockRecv(encoded_transactions, metadata, view, _fee) => { let view = *view; self.da_network .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) @@ -374,7 +374,7 @@ impl, A: ConsensusApi + HotShotEvent::DAProposalRecv(_, _) | HotShotEvent::DAVoteRecv(_) | HotShotEvent::Shutdown - | HotShotEvent::BlockRecv(_, _, _) + | HotShotEvent::BlockRecv(_, _, _, _) | HotShotEvent::Timeout(_) | HotShotEvent::ViewChange(_) | HotShotEvent::DAProposalValidated(_, _) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 744cd729fd..bd3a2927e2 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -11,7 +11,7 @@ use hotshot_types::{ DAVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, - traits::{node_implementation::NodeType, BlockPayload}, + traits::{block_contents::BuilderFee, node_implementation::NodeType, BlockPayload}, vid::VidCommitment, vote::VoteDependencyData, }; @@ -109,12 +109,14 @@ pub enum HotShotEvent { VidCommitment, ::Metadata, TYPES::Time, + BuilderFee, ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number BlockRecv( Vec, ::Metadata, TYPES::Time, + BuilderFee, ), /// Event when the transactions task has a block formed BlockReady(VidDisperse, TYPES::Time), diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index fa55bd7a48..fdb4fe55f2 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -30,7 +30,6 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber}, }; - #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; @@ -110,7 +109,7 @@ impl ProposalDependencyHandle { &self, view: TYPES::Time, event_stream: &Sender>>, - commit_and_metadata: CommitmentAndMetadata, + commit_and_metadata: CommitmentAndMetadata, ) -> bool { if self.quorum_membership.get_leader(view) != self.public_key { // This is expected for view 1, so skipping the logging. @@ -185,7 +184,8 @@ impl ProposalDependencyHandle { &consensus.instance_state, &parent_leaf, commit_and_metadata.commitment, - commit_and_metadata.metadata.clone(), + commit_and_metadata.metadata, + commit_and_metadata.fee, ) .await; @@ -237,7 +237,7 @@ impl HandleDepOutput for ProposalDependencyHandle { #[allow(clippy::no_effect_underscore_binding)] async fn handle_dep_result(self, res: Self::Output) { let mut payload_commitment = None; - let mut commit_and_metadata: Option> = None; + let mut commit_and_metadata: Option> = None; let mut _quorum_certificate = None; let mut _timeout_certificate = None; let mut _view_sync_finalize_cert = None; @@ -257,11 +257,13 @@ impl HandleDepOutput for ProposalDependencyHandle { payload_commitment, metadata, _view, + fee, ) => { debug!("Got commit and meta {:?}", payload_commitment); commit_and_metadata = Some(CommitmentAndMetadata { commitment: *payload_commitment, metadata: metadata.clone(), + fee: fee.clone(), }); } HotShotEvent::QCFormed(cert) => match cert { @@ -413,6 +415,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { + HotShotEvent::SendPayloadCommitmentAndMetadata( + payload_commitment, + _metadata, + view, + _fee, + ) => { let view = *view; debug!( "Got payload commitment {:?} for view {view:?}", diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index f4fa24bd5b..3778e6e06c 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -14,7 +14,7 @@ use hotshot_types::{ consensus::Consensus, event::{Event, EventType}, traits::{ - block_contents::BlockHeader, + block_contents::{BlockHeader, BuilderFee}, consensus_api::ConsensusApi, election::Membership, node_implementation::{NodeImplementation, NodeType}, @@ -122,7 +122,12 @@ impl< return None; } - if let Some(BuilderResponses { block_data, .. }) = self.wait_for_block().await { + if let Some(BuilderResponses { + block_data, + blocks_initial_info, + block_header, + }) = self.wait_for_block().await + { // send the sequenced transactions to VID and DA tasks let block_view = if make_block { view } else { view + 1 }; let encoded_transactions = match block_data.block_payload.encode() { @@ -137,6 +142,10 @@ impl< encoded_transactions, block_data.metadata, block_view, + BuilderFee { + fee_amount: blocks_initial_info.offered_fee, + fee_signature: block_header.fee_signature, + }, )), &event_stream, ) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index bcc1218dce..4e44618bdd 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -60,7 +60,7 @@ impl, A: ConsensusApi + event_stream: Sender>>, ) -> Option { match event.as_ref() { - HotShotEvent::BlockRecv(encoded_transactions, metadata, view_number) => { + HotShotEvent::BlockRecv(encoded_transactions, metadata, view_number, fee) => { let vid_disperse = calculate_vid_disperse( &encoded_transactions.clone(), &self.membership.clone(), @@ -72,6 +72,7 @@ impl, A: ConsensusApi + vid_disperse.payload_commitment, metadata.clone(), *view_number, + fee.clone(), )), &event_stream, ) @@ -167,7 +168,7 @@ impl, A: ConsensusApi + !matches!( event.as_ref(), HotShotEvent::Shutdown - | HotShotEvent::BlockRecv(_, _, _) + | HotShotEvent::BlockRecv(_, _, _, _) | HotShotEvent::BlockReady(_, _) | HotShotEvent::ViewChange(_) ) diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 0e27852b77..df3e3e293a 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -539,50 +539,21 @@ fn build_block( .collect::>() .len() as u64; - let signature_over_block_info = { - let mut block_info: Vec = Vec::new(); - block_info.extend_from_slice(block_size.to_be_bytes().as_ref()); - block_info.extend_from_slice(123_u64.to_be_bytes().as_ref()); - block_info.extend_from_slice(commitment.as_ref()); - - match TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, &block_info) { - Ok(sig) => sig, - Err(e) => { - panic!("Failed to sign block: {}", e); - } - } - }; + let signature_over_block_info = + TYPES::BuilderSignatureKey::sign_block_info(&priv_key, block_size, 123, &commitment) + .expect("Failed to sign block info"); let signature_over_builder_commitment = - match TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, commitment.as_ref()) { - Ok(sig) => sig, - Err(e) => { - panic!("Failed to sign block: {}", e); - } - }; + TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, commitment.as_ref()) + .expect("Failed to sign commitment"); - let signature_over_vid_commitment = match TYPES::BuilderSignatureKey::sign_builder_message( - &priv_key, - vid_commitment.as_ref(), - ) { - Ok(sig) => sig, - Err(e) => { - panic!("Failed to sign block: {}", e); - } - }; + let signature_over_vid_commitment = + TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, vid_commitment.as_ref()) + .expect("Failed to sign block vid commitment"); - let signature_over_fee_info = { - let mut fee_info: Vec = Vec::new(); - fee_info.extend_from_slice(123_u64.to_be_bytes().as_ref()); - fee_info.extend_from_slice(commitment.as_ref()); - fee_info.extend_from_slice(vid_commitment.as_ref()); - match TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, &fee_info) { - Ok(sig) => sig, - Err(e) => { - panic!("Failed to sign block: {}", e); - } - } - }; + let signature_over_fee_info = + TYPES::BuilderSignatureKey::sign_fee(&priv_key, 123_u64, &commitment, &vid_commitment) + .expect("Failed to sign fee info"); let block = AvailableBlockData { block_payload, @@ -604,7 +575,6 @@ fn build_block( message_signature: signature_over_vid_commitment.clone(), fee_signature: signature_over_fee_info, sender: pub_key, - _phantom: std::marker::PhantomData, }; (metadata, block, header_input) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 9da30b02ab..d306d13963 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -2,7 +2,6 @@ use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLockUpgradableReadGuard; use bitvec::bitvec; use committable::Committable; use ethereum_types::U256; @@ -11,26 +10,23 @@ use hotshot::{ HotShotInitializer, Memberships, Networks, SystemContext, }; use hotshot_example_types::{ - block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, - state_types::{TestInstanceState, TestValidatedState}, + state_types::TestInstanceState, }; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare, ViewNumber}, message::{GeneralConsensusMessage, Proposal}, - simple_certificate::{DACertificate, QuorumCertificate}, + simple_certificate::DACertificate, simple_vote::{DAData, DAVote, QuorumData, QuorumVote, SimpleVote}, traits::{ - block_contents::{vid_commitment, BlockHeader, TestableBlock}, + block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, node_implementation::{ConsensusTime, NodeType}, - states::ValidatedState, - BlockPayload, }, - utils::{View, ViewInner}, vid::{vid_scheme, VidCommitment, VidSchemeType}, vote::{Certificate, HasViewNumber, Vote}, }; @@ -212,141 +208,6 @@ pub fn build_assembled_sig< real_qc_sig } -/// build a quorum proposal and signature -#[allow(clippy::too_many_lines)] -async fn build_quorum_proposal_and_signature( - handle: &SystemContextHandle, - private_key: &::PrivateKey, - public_key: &BLSPubKey, - view: u64, -) -> ( - QuorumProposal, - ::PureAssembledSignatureType, -) { - // build the genesis view - let genesis_consensus = handle.get_consensus(); - let cur_consensus = genesis_consensus.upgradable_read().await; - let mut consensus = RwLockUpgradableReadGuard::upgrade(cur_consensus).await; - // parent_view_number should be equal to 0 - let parent_view_number = &consensus.high_qc.get_view_number(); - assert_eq!(parent_view_number.get_u64(), 0); - let Some(parent_view) = consensus.validated_state_map.get(parent_view_number) else { - panic!("Couldn't find high QC parent in state map."); - }; - let Some(leaf_view_0) = parent_view.get_leaf_commitment() else { - panic!("Parent of high QC points to a view without a proposal"); - }; - let Some(leaf_view_0) = consensus.saved_leaves.get(&leaf_view_0) else { - panic!("Failed to find high QC parent."); - }; - let parent_leaf = leaf_view_0.clone(); - - // every event input is seen on the event stream in the output. - let block = ::genesis(); - let payload_commitment = vid_commitment( - &block.encode().unwrap().collect(), - handle.hotshot.memberships.quorum_membership.total_nodes(), - ); - let mut parent_state = Arc::new( - >::from_header( - parent_leaf.get_block_header(), - ), - ); - let block_header = TestBlockHeader::new( - &*parent_state, - &TestInstanceState {}, - &parent_leaf, - payload_commitment, - (), - ) - .await; - let mut proposal = QuorumProposal:: { - block_header: block_header.clone(), - view_number: ViewNumber::new(1), - justify_qc: QuorumCertificate::genesis(), - upgrade_certificate: None, - proposal_certificate: None, - }; - // current leaf that can be re-assigned everytime when entering a new view - let mut leaf = Leaf::from_quorum_proposal(&proposal); - - let mut signature = ::sign(private_key, leaf.commit().as_ref()) - .expect("Failed to sign leaf commitment!"); - - // Only view 2 is tested, higher views are not tested - for cur_view in 2..=view { - let (state_new_view, delta_new_view) = parent_state - .validate_and_apply_header(&TestInstanceState {}, &parent_leaf, &block_header) - .await - .unwrap(); - let state_new_view = Arc::new(state_new_view); - // save states for the previous view to pass all the qc checks - // In the long term, we want to get rid of this, do not manually update consensus state - consensus.validated_state_map.insert( - ViewNumber::new(cur_view - 1), - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - state: state_new_view.clone(), - delta: Some(Arc::new(delta_new_view)), - }, - }, - ); - consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); - // create a qc by aggregate signatures on the previous view (the data signed is last leaf commitment) - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let quorum_data = QuorumData { - leaf_commit: leaf.commit(), - }; - let created_qc = build_cert::< - TestTypes, - QuorumData, - QuorumVote, - QuorumCertificate, - >( - quorum_data, - &quorum_membership, - ViewNumber::new(cur_view - 1), - public_key, - private_key, - ); - // create a new leaf for the current view - let proposal_new_view = QuorumProposal:: { - block_header: block_header.clone(), - view_number: ViewNumber::new(cur_view), - justify_qc: created_qc, - upgrade_certificate: None, - proposal_certificate: None, - }; - let leaf_new_view = Leaf::from_quorum_proposal(&proposal_new_view); - let signature_new_view = - ::sign(private_key, leaf_new_view.commit().as_ref()) - .expect("Failed to sign leaf commitment!"); - proposal = proposal_new_view; - signature = signature_new_view; - leaf = leaf_new_view; - parent_state = state_new_view; - } - - (proposal, signature) -} - -/// create a quorum proposal -pub async fn build_quorum_proposal( - handle: &SystemContextHandle, - private_key: &::PrivateKey, - view: u64, -) -> Proposal> { - let public_key = &BLSPubKey::from_private(private_key); - let (proposal, signature) = - build_quorum_proposal_and_signature(handle, private_key, public_key, view).await; - Proposal { - data: proposal, - signature, - _pd: PhantomData, - } -} - /// get the keypair for a node id #[must_use] pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey, BLSPubKey) { diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index c0079a706b..eefcd9e9fe 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -1,22 +1,24 @@ -use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; -use hotshot::types::SystemContextHandle; +use hotshot::{ + tasks::{inject_consensus_polls, task_state::CreateTaskState}, + types::SystemContextHandle, +}; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; -use hotshot_testing::task_helpers::key_pair_for_id; -use hotshot_testing::task_helpers::get_vid_share; -use hotshot_testing::test_helpers::permute_input_with_index_order; use hotshot_testing::{ predicates::event::{ exact, quorum_proposal_send, quorum_proposal_validated, quorum_vote_send, timeout_vote_send, }, script::{run_test_script, TestScriptStage}, - task_helpers::{build_system_handle, vid_scheme_from_view_number}, + task_helpers::{ + build_system_handle, get_vid_share, key_pair_for_id, vid_scheme_from_view_number, + }, + test_helpers::permute_input_with_index_order, view_generator::TestViewGenerator, }; -use hotshot_types::simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}; use hotshot_types::{ data::{ViewChangeEvidence, ViewNumber}, - traits::node_implementation::ConsensusTime, + simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}, + traits::{election::Membership, node_implementation::ConsensusTime}, }; use jf_primitives::vid::VidScheme; @@ -24,6 +26,8 @@ use jf_primitives::vid::VidScheme; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { + use hotshot_types::data::null_block; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -75,7 +79,12 @@ async fn test_consensus_task() { QuorumProposalRecv(proposals[1].clone(), leaders[1]), QCFormed(either::Left(cert)), // We must have a payload commitment and metadata to propose. - SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), + SendPayloadCommitmentAndMetadata( + payload_commitment, + (), + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), ], outputs: vec![ exact(ViewChange(ViewNumber::new(2))), @@ -246,6 +255,8 @@ async fn test_consensus_vote_with_permuted_dac() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_finalize_propose() { + use hotshot_types::data::null_block; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -349,7 +360,12 @@ async fn test_view_sync_finalize_propose() { TimeoutVoteRecv(timeout_vote_view_2), TimeoutVoteRecv(timeout_vote_view_3), ViewSyncFinalizeCertificate2Recv(cert), - SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(4)), + SendPayloadCommitmentAndMetadata( + payload_commitment, + (), + ViewNumber::new(4), + null_block::builder_fee(4).unwrap(), + ), ], outputs: vec![ exact(ViewChange(ViewNumber::new(4))), diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index ca18e25335..026f3e424e 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -1,11 +1,9 @@ -use hotshot::tasks::task_state::CreateTaskState; -use hotshot::types::SystemContextHandle; +use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, }; -use hotshot_task_impls::da::DATaskState; -use hotshot_task_impls::events::HotShotEvent::*; +use hotshot_task_impls::{da::DATaskState, events::HotShotEvent::*}; use hotshot_testing::{ predicates::event::exact, script::{run_test_script, TestScriptStage}, @@ -13,7 +11,7 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::ViewNumber, + data::{null_block, ViewNumber}, simple_vote::DAData, traits::{ block_contents::vid_commitment, election::Membership, node_implementation::ConsensusTime, @@ -69,7 +67,12 @@ async fn test_da_task() { inputs: vec![ ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), - BlockRecv(encoded_transactions.clone(), (), ViewNumber::new(2)), + BlockRecv( + encoded_transactions.clone(), + (), + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), ], outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], asserts: vec![], @@ -143,7 +146,12 @@ async fn test_da_task_storage_failure() { inputs: vec![ ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), - BlockRecv(encoded_transactions.clone(), (), ViewNumber::new(2)), + BlockRecv( + encoded_transactions.clone(), + (), + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), ], outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], asserts: vec![], diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index afd75586f1..4086f95e19 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -2,21 +2,24 @@ use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ - predicates::event::{ - exact, quorum_proposal_send, quorum_proposal_validated, - }, + predicates::event::{exact, quorum_proposal_send, quorum_proposal_validated}, task_helpers::{get_vid_share, vid_scheme_from_view_number}, test_helpers::permute_input_with_index_order, view_generator::TestViewGenerator, }; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use hotshot_types::{ + data::{null_block, ViewNumber}, + traits::{election::Membership, node_implementation::ConsensusTime}, +}; use jf_primitives::vid::VidScheme; /// Runs a basic test where a qualified proposal occurs (i.e. not initiated by the genesis view or node 1). /// This proposal should happen no matter how the `input_permutation` is specified. async fn test_ordering_with_specific_order(input_permutation: Vec) { - use hotshot_testing::script::{run_test_script, TestScriptStage}; - use hotshot_testing::task_helpers::build_system_handle; + use hotshot_testing::{ + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, + }; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -69,7 +72,12 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let inputs = vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), QCFormed(either::Left(cert)), - SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(node_id)), + SendPayloadCommitmentAndMetadata( + payload_commitment, + (), + ViewNumber::new(node_id), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), ]; let view_2_inputs = permute_input_with_index_order(inputs, input_permutation); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 0acd685584..8e95efb3c9 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,20 +1,22 @@ -use hotshot::tasks::inject_quorum_proposal_polls; -use hotshot::tasks::task_state::CreateTaskState; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_example_types::state_types::TestValidatedState; -use hotshot_task_impls::events::HotShotEvent::*; -use hotshot_task_impls::quorum_proposal::QuorumProposalTaskState; -use hotshot_testing::predicates::event::quorum_proposal_send; -use hotshot_testing::task_helpers::vid_scheme_from_view_number; +use hotshot::tasks::{inject_quorum_proposal_polls, task_state::CreateTaskState}; +use hotshot_example_types::{ + node_types::{MemoryImpl, TestTypes}, + state_types::TestValidatedState, +}; +use hotshot_task_impls::{events::HotShotEvent::*, quorum_proposal::QuorumProposalTaskState}; use hotshot_testing::{ + predicates::event::quorum_proposal_send, script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, + task_helpers::{build_system_handle, vid_scheme_from_view_number}, view_generator::TestViewGenerator, }; use hotshot_types::{ data::{ViewChangeEvidence, ViewNumber}, simple_vote::ViewSyncFinalizeData, - traits::node_implementation::{ConsensusTime, NodeType}, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, NodeType}, + }, utils::{View, ViewInner}, vid::VidSchemeType, }; @@ -35,6 +37,8 @@ fn make_payload_commitment( #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_quorum_proposal() { + use hotshot_types::data::null_block; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -89,7 +93,12 @@ async fn test_quorum_proposal_task_quorum_proposal() { inputs: vec![ QuorumProposalValidated(proposals[1].data.clone(), leaves[1].clone()), QCFormed(either::Left(cert.clone())), - SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), + SendPayloadCommitmentAndMetadata( + payload_commitment, + (), + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), ], outputs: vec![quorum_proposal_send()], asserts: vec![], @@ -107,7 +116,7 @@ async fn test_quorum_proposal_task_quorum_proposal() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_qc_timeout() { - use hotshot_types::simple_vote::TimeoutData; + use hotshot_types::{data::null_block, simple_vote::TimeoutData}; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -142,7 +151,12 @@ async fn test_quorum_proposal_task_qc_timeout() { let view_2 = TestScriptStage { inputs: vec![ QCFormed(either::Right(cert.clone())), - SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), + SendPayloadCommitmentAndMetadata( + payload_commitment, + (), + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), ], outputs: vec![quorum_proposal_send()], asserts: vec![], @@ -160,6 +174,8 @@ async fn test_quorum_proposal_task_qc_timeout() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_view_sync() { + use hotshot_types::data::null_block; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -198,7 +214,12 @@ async fn test_quorum_proposal_task_view_sync() { let view_2 = TestScriptStage { inputs: vec![ ViewSyncFinalizeCertificate2Recv(cert.clone()), - SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), + SendPayloadCommitmentAndMetadata( + payload_commitment, + (), + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), ], outputs: vec![quorum_proposal_send()], asserts: vec![], @@ -219,6 +240,7 @@ async fn test_quorum_proposal_task_propose_now() { use hotshot_testing::task_helpers::{build_cert, key_pair_for_id}; use hotshot_types::{ consensus::{CommitmentAndMetadata, ProposalDependencyData}, + data::null_block, simple_certificate::{TimeoutCertificate, ViewSyncFinalizeCertificate2}, simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeVote}, }; @@ -248,6 +270,7 @@ async fn test_quorum_proposal_task_propose_now() { commitment_and_metadata: CommitmentAndMetadata { commitment: payload_commitment, metadata: (), + fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), }, secondary_proposal_information: hotshot_types::consensus::SecondaryProposalInformation::QuorumProposalAndCertificate( @@ -261,6 +284,7 @@ async fn test_quorum_proposal_task_propose_now() { commitment_and_metadata: CommitmentAndMetadata { commitment: payload_commitment, metadata: (), + fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), }, secondary_proposal_information: hotshot_types::consensus::SecondaryProposalInformation::Timeout(build_cert::< @@ -284,6 +308,7 @@ async fn test_quorum_proposal_task_propose_now() { commitment_and_metadata: CommitmentAndMetadata { commitment: payload_commitment, metadata: (), + fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), }, secondary_proposal_information: hotshot_types::consensus::SecondaryProposalInformation::ViewSync(build_cert::< diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index b6d4f9076f..153adeefd1 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -1,5 +1,9 @@ -use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; -use hotshot::types::SystemContextHandle; +use std::time::Duration; + +use hotshot::{ + tasks::{inject_consensus_polls, task_state::CreateTaskState}, + types::SystemContextHandle, +}; use hotshot_example_types::{ block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, @@ -9,26 +13,26 @@ use hotshot_task_impls::{ consensus::ConsensusTaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState, }; use hotshot_testing::{ - predicates::event::*, - predicates::upgrade::*, + predicates::{event::*, upgrade::*}, script::{Expectations, TaskScript}, + task_helpers::get_vid_share, view_generator::TestViewGenerator, }; -use hotshot_testing::task_helpers::get_vid_share; use hotshot_types::{ - data::ViewNumber, + data::{null_block, ViewNumber}, simple_vote::UpgradeProposalData, traits::{election::Membership, node_implementation::ConsensusTime}, }; -use std::time::Duration; use vbs::version::Version; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Tests that we correctly update our internal consensus state when reaching a decided upgrade certificate. async fn test_consensus_task_upgrade() { - use hotshot_testing::script::{run_test_script, TestScriptStage}; - use hotshot_testing::task_helpers::build_system_handle; + use hotshot_testing::{ + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, + }; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -251,6 +255,7 @@ async fn test_upgrade_and_consensus_task() { vids[2].0[0].data.payload_commitment, (), ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QCFormed(either::Either::Left(proposals[1].data.justify_qc.clone())), ], @@ -439,6 +444,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { vids[1].0[0].data.payload_commitment, (), ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), ], vec![ @@ -448,6 +454,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { vids[2].0[0].data.payload_commitment, (), ViewNumber::new(3), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QuorumProposalRecv(proposals[2].clone(), leaders[2]), ], @@ -458,6 +465,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { vids[3].0[0].data.payload_commitment, (), ViewNumber::new(4), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QuorumProposalRecv(proposals[3].clone(), leaders[3]), ], @@ -468,6 +476,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { vids[4].0[0].data.payload_commitment, (), ViewNumber::new(5), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QuorumProposalRecv(proposals[4].clone(), leaders[4]), ], @@ -477,6 +486,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { vids[5].0[0].data.payload_commitment, (), ViewNumber::new(6), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QCFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), ], @@ -487,6 +497,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { vids[6].0[0].data.payload_commitment, (), ViewNumber::new(7), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QuorumProposalRecv(proposals[6].clone(), leaders[6]), ], diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index adf14fb706..b989e6aadd 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -1,17 +1,18 @@ +use std::{collections::HashMap, marker::PhantomData}; + use hotshot::types::SignatureKey; use hotshot_example_types::{block_types::TestTransaction, node_types::TestTypes}; use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; use hotshot_testing::task_helpers::{build_system_handle, vid_scheme_from_view_number}; use hotshot_types::{ - data::{DAProposal, VidDisperse, VidDisperseShare, ViewNumber}, + data::{null_block, DAProposal, VidDisperse, VidDisperseShare, ViewNumber}, traits::{ consensus_api::ConsensusApi, + election::Membership, node_implementation::{ConsensusTime, NodeType}, }, }; use jf_primitives::vid::VidScheme; -use std::collections::HashMap; -use std::marker::PhantomData; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -51,11 +52,8 @@ async fn test_vid_task() { _pd: PhantomData, }; - let vid_disperse = VidDisperse::from_membership( - message.data.view_number, - vid_disperse, - &quorum_membership, - ); + let vid_disperse = + VidDisperse::from_membership(message.data.view_number, vid_disperse, &quorum_membership); let vid_proposal = Proposal { data: vid_disperse.clone(), @@ -82,6 +80,7 @@ async fn test_vid_task() { encoded_transactions.clone(), (), ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), )); input.push(HotShotEvent::BlockReady( vid_disperse.clone(), @@ -98,7 +97,12 @@ async fn test_vid_task() { ); output.insert( - HotShotEvent::SendPayloadCommitmentAndMetadata(payload_commitment, (), ViewNumber::new(2)), + HotShotEvent::SendPayloadCommitmentAndMetadata( + payload_commitment, + (), + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), 1, ); output.insert( diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 56d2a1f02f..5bb448af8a 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -18,6 +18,7 @@ use crate::{ DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncFinalizeCertificate2, }, traits::{ + block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}, node_implementation::NodeType, BlockPayload, ValidatedState, @@ -396,11 +397,13 @@ impl Consensus { /// Alias for the block payload commitment and the associated metadata. The primary data /// needed in order to submit a proposal. #[derive(Eq, Hash, PartialEq, Debug, Clone)] -pub struct CommitmentAndMetadata { +pub struct CommitmentAndMetadata { /// Vid Commitment pub commitment: VidCommitment, /// Metadata for the block payload - pub metadata: ::Metadata, + pub metadata: ::Metadata, + /// Builder fee data + pub fee: BuilderFee, } /// Helper type to hold the optional secondary information required to propose. @@ -418,7 +421,7 @@ pub enum SecondaryProposalInformation { #[derive(Eq, Hash, PartialEq, Debug, Clone)] pub struct ProposalDependencyData { /// The primary data in a proposal. - pub commitment_and_metadata: CommitmentAndMetadata, + pub commitment_and_metadata: CommitmentAndMetadata, /// The secondary data in a proposal pub secondary_proposal_information: SecondaryProposalInformation, } diff --git a/types/src/data.rs b/types/src/data.rs index 17ced1f262..563d0fc5d0 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -674,7 +674,13 @@ pub mod null_block { use jf_primitives::vid::VidScheme; use memoize::memoize; - use crate::vid::{vid_scheme, VidCommitment}; + use crate::{ + traits::{ + block_contents::BuilderFee, node_implementation::NodeType, + signature_key::BuilderSignatureKey, BlockPayload, + }, + vid::{vid_scheme, VidCommitment}, + }; /// The commitment for a null block payload. /// @@ -692,4 +698,32 @@ pub mod null_block { Err(_) => None, } } + + /// Builder fee data for a null block payload + #[must_use] + pub fn builder_fee(num_storage_nodes: usize) -> Option> { + /// Arbitrary fee amount, this block doesn't actually come from a builder + const FEE_AMOUNT: u64 = 0; + + let (_, priv_key) = + ::generated_from_seed_indexed( + [0_u8; 32], 0, + ); + + let (null_block, null_block_metadata) = + ::from_transactions([]).ok()?; + + match TYPES::BuilderSignatureKey::sign_fee( + &priv_key, + FEE_AMOUNT, + &null_block.builder_commitment(&null_block_metadata), + &commitment(num_storage_nodes)?, + ) { + Ok(sig) => Some(BuilderFee { + fee_amount: FEE_AMOUNT, + fee_signature: sig, + }), + Err(_) => None, + } + } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index c7bdd2aa67..63cdb2392a 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -14,6 +14,7 @@ use committable::{Commitment, Committable}; use jf_primitives::vid::{precomputable::Precomputable, VidScheme}; use serde::{de::DeserializeOwned, Serialize}; +use super::signature_key::BuilderSignatureKey; use crate::{ data::Leaf, traits::{node_implementation::NodeType, ValidatedState}, @@ -145,6 +146,15 @@ pub fn precompute_vid_commitment( /// do dispersal for the genesis block. For simplicity and performance, we use 1. pub const GENESIS_VID_NUM_STORAGE_NODES: usize = 1; +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +/// Information about builder fee for proposed block +pub struct BuilderFee { + /// Proposed fee amount + pub fee_amount: u64, + /// Signature over fee amount + pub fee_signature: ::BuilderSignature, +} + /// Header of a block, which commits to a [`BlockPayload`]. pub trait BlockHeader: Serialize + Clone + Debug + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned + Committable @@ -157,6 +167,7 @@ pub trait BlockHeader: parent_leaf: &Leaf, payload_commitment: VidCommitment, metadata: ::Metadata, + builder_fee: BuilderFee, ) -> impl Future + Send; /// Build the genesis header, payload, and metadata. diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 0c921e0c4d..379951d655 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -6,10 +6,12 @@ use std::{ use bitvec::prelude::*; use ethereum_types::U256; -use jf_primitives::errors::PrimitivesError; +use jf_primitives::{errors::PrimitivesError, vid::VidScheme}; use serde::{Deserialize, Serialize}; use tagged_base64::TaggedBase64; +use crate::{utils::BuilderCommitment, vid::VidSchemeType}; + /// Type representing stake table entries in a `StakeTable` pub trait StakeTableEntryType { /// Get the stake value @@ -194,6 +196,38 @@ pub trait BuilderSignatureKey: data: &[u8], ) -> Result; + /// sign fee offer for proposed payload + /// # Errors + /// If unable to sign the data with the key + fn sign_fee( + private_key: &Self::BuilderPrivateKey, + fee_amount: u64, + payload_commitment: &BuilderCommitment, + vid_commitment: &::Commit, + ) -> Result { + let mut fee_info: Vec = Vec::new(); + fee_info.extend_from_slice(fee_amount.to_be_bytes().as_ref()); + fee_info.extend_from_slice(payload_commitment.as_ref()); + fee_info.extend_from_slice(vid_commitment.as_ref()); + Self::sign_builder_message(private_key, &fee_info) + } + + /// sign information about offered block + /// # Errors + /// If unable to sign the data with the key + fn sign_block_info( + private_key: &Self::BuilderPrivateKey, + block_size: u64, + fee_amount: u64, + payload_commitment: &BuilderCommitment, + ) -> Result { + let mut block_info: Vec = Vec::new(); + block_info.extend_from_slice(block_size.to_be_bytes().as_ref()); + block_info.extend_from_slice(fee_amount.to_be_bytes().as_ref()); + block_info.extend_from_slice(payload_commitment.as_ref()); + Self::sign_builder_message(private_key, &block_info) + } + /// Generate a new key pair fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::BuilderPrivateKey); } From 26913c298e456d5b66f1e5f2c07a2ef9e82dc7d2 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 17 Apr 2024 09:29:40 -0400 Subject: [PATCH 0956/1393] fix CDN bug (#2973) --- examples/Cargo.toml | 11 +++++------ hotshot/Cargo.toml | 8 ++------ 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 43ea461944..3afa51f8c9 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -141,10 +141,7 @@ tracing = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } cdn-client = { workspace = true } -cdn-broker = { workspace = true, features = [ - "strong-consistency", - "global-permits", -] } +cdn-broker = { workspace = true, features = ["global-permits"] } cdn-marshal = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] @@ -152,10 +149,12 @@ async-std = { workspace = true } cdn-client = { workspace = true, features = ["runtime-async-std"] } cdn-broker = { workspace = true, features = [ "runtime-async-std", - "strong-consistency", "global-permits", ] } -cdn-marshal = { workspace = true, features = ["runtime-async-std"] } +cdn-marshal = { workspace = true, features = [ + "runtime-async-std", + "global-permits", +] } [dev-dependencies] clap.workspace = true diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 73fbabe732..42b878267d 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -62,10 +62,7 @@ sha2 = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } cdn-client = { workspace = true } -cdn-broker = { workspace = true, features = [ - "strong-consistency", - "global-permits", -] } +cdn-broker = { workspace = true, features = ["global-permits"] } cdn-marshal = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] @@ -73,10 +70,9 @@ async-std = { workspace = true } cdn-client = { workspace = true, features = ["runtime-async-std"] } cdn-broker = { workspace = true, features = [ "runtime-async-std", - "strong-consistency", "global-permits", ] } -cdn-marshal = { workspace = true, features = ["runtime-async-std"] } +cdn-marshal = { workspace = true, features = ["runtime-async-std", "global-permits"] } [dev-dependencies] From f0f2930eec4b12859874dc11c23eed45372874ae Mon Sep 17 00:00:00 2001 From: Nathan F Yospe Date: Wed, 17 Apr 2024 11:26:22 -0400 Subject: [PATCH 0957/1393] builder_commitment support (#2972) * Including builder_commitment in the construction of * Version bump * naming correction * fix build --------- Co-authored-by: Himanshu Goyal --- example-types/src/block_types.rs | 15 ++++++++------- task-impls/src/consensus.rs | 7 ++++++- task-impls/src/events.rs | 2 ++ task-impls/src/quorum_proposal.rs | 5 +++++ task-impls/src/transactions.rs | 9 ++------- task-impls/src/vid.rs | 7 +++++++ testing/src/view_generator.rs | 11 +++++++++++ testing/tests/tests_1/consensus_task.rs | 6 ++++++ testing/tests/tests_1/proposal_ordering.rs | 4 ++++ testing/tests/tests_1/quorum_proposal_task.rs | 14 ++++++++++++-- testing/tests/tests_1/upgrade_task.rs | 7 +++++++ testing/tests/tests_1/vid_task.rs | 9 ++++++++- types/src/consensus.rs | 4 +++- types/src/data.rs | 9 +++++++-- types/src/traits/block_contents.rs | 7 +++---- 15 files changed, 91 insertions(+), 25 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index df834b424f..22024035b5 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -177,6 +177,8 @@ pub struct TestBlockHeader { pub block_number: u64, /// VID commitment to the payload. pub payload_commitment: VidCommitment, + /// Fast commitment for builder verification + pub builder_commitment: BuilderCommitment, /// Timestamp when this header was created. pub timestamp: u64, } @@ -189,6 +191,7 @@ impl> Block _instance_state: &>::Instance, parent_leaf: &Leaf, payload_commitment: VidCommitment, + builder_commitment: BuilderCommitment, _metadata: ::Metadata, _builder_fee: BuilderFee, ) -> Self { @@ -203,6 +206,7 @@ impl> Block Self { block_number: parent.block_number + 1, payload_commitment, + builder_commitment, timestamp, } } @@ -210,11 +214,13 @@ impl> Block fn genesis( _instance_state: &>::Instance, payload_commitment: VidCommitment, + builder_commitment: BuilderCommitment, _metadata: ::Metadata, ) -> Self { Self { block_number: 0, payload_commitment, + builder_commitment, timestamp: 0, } } @@ -231,13 +237,8 @@ impl> Block &() } - fn builder_commitment( - &self, - _metadata: &::Metadata, - ) -> BuilderCommitment { - let mut digest = sha2::Sha256::new(); - digest.update(self.payload_commitment.as_ref()); - BuilderCommitment::from_raw_digest(digest.finalize()) + fn builder_commitment(&self) -> BuilderCommitment { + self.builder_commitment.clone() } } diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus.rs index f74df235dc..b21efb96d7 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus.rs @@ -227,6 +227,7 @@ async fn create_and_send_proposal( &consensus.read().await.instance_state, &parent_leaf, commitment_and_metadata.commitment, + commitment_and_metadata.builder_commitment, commitment_and_metadata.metadata, commitment_and_metadata.fee, ) @@ -1401,6 +1402,7 @@ impl, A: ConsensusApi + } HotShotEvent::SendPayloadCommitmentAndMetadata( payload_commitment, + builder_commitment, metadata, view, fee, @@ -1409,6 +1411,7 @@ impl, A: ConsensusApi + debug!("got commit and meta {:?}", payload_commitment); self.payload_commitment_and_metadata = Some(CommitmentAndMetadata { commitment: *payload_commitment, + builder_commitment: builder_commitment.clone(), metadata: metadata.clone(), fee: fee.clone(), }); @@ -1553,13 +1556,14 @@ impl, A: ConsensusApi + // Special case: if we have a decided upgrade certificate AND it does not apply a version to the current view, we MUST propose with a null block. if let Some(upgrade_cert) = &self.decided_upgrade_cert { if upgrade_cert.in_interim(self.cur_view) { - let Ok((_payload, metadata)) = + let Ok((payload, metadata)) = ::from_transactions(Vec::new()) else { error!("Failed to build null block payload and metadata"); return; }; + let builder_commitment = payload.builder_commitment(&metadata); let Some(null_block_commitment) = null_block::commitment(self.quorum_membership.total_nodes()) else { @@ -1596,6 +1600,7 @@ impl, A: ConsensusApi + view, CommitmentAndMetadata { commitment: null_block_commitment, + builder_commitment, metadata, fee: null_block_fee, }, diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index bd3a2927e2..d5aa57015e 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -12,6 +12,7 @@ use hotshot_types::{ ViewSyncPreCommitVote, }, traits::{block_contents::BuilderFee, node_implementation::NodeType, BlockPayload}, + utils::BuilderCommitment, vid::VidCommitment, vote::VoteDependencyData, }; @@ -107,6 +108,7 @@ pub enum HotShotEvent { /// Event to send block payload commitment and metadata from DA leader to the quorum; internal event only SendPayloadCommitmentAndMetadata( VidCommitment, + BuilderCommitment, ::Metadata, TYPES::Time, BuilderFee, diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index fdb4fe55f2..b4e93abe63 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -184,6 +184,7 @@ impl ProposalDependencyHandle { &consensus.instance_state, &parent_leaf, commit_and_metadata.commitment, + commit_and_metadata.builder_commitment, commit_and_metadata.metadata, commit_and_metadata.fee, ) @@ -255,6 +256,7 @@ impl HandleDepOutput for ProposalDependencyHandle { } HotShotEvent::SendPayloadCommitmentAndMetadata( payload_commitment, + builder_commitment, metadata, _view, fee, @@ -262,6 +264,7 @@ impl HandleDepOutput for ProposalDependencyHandle { debug!("Got commit and meta {:?}", payload_commitment); commit_and_metadata = Some(CommitmentAndMetadata { commitment: *payload_commitment, + builder_commitment: builder_commitment.clone(), metadata: metadata.clone(), fee: fee.clone(), }); @@ -413,6 +416,7 @@ impl> QuorumProposalTaskState { if let HotShotEvent::SendPayloadCommitmentAndMetadata( _payload_commitment, + _builder_commitment, _metadata, view, _fee, @@ -675,6 +679,7 @@ impl> QuorumProposalTaskState::SignatureKey as SignatureKey>::sign( &self.private_key, - last_leaf - .get_block_header() - .builder_commitment(last_leaf.get_block_header().metadata()) - .as_ref(), + last_leaf.get_block_header().builder_commitment().as_ref(), ) else { error!("Failed to sign block hash"); continue; @@ -193,9 +190,7 @@ impl< let mut available_blocks = match self .builder_client .get_available_blocks( - last_leaf - .get_block_header() - .builder_commitment(last_leaf.get_block_header().metadata()), + last_leaf.get_block_header().builder_commitment(), self.public_key.clone(), &request_signature, ) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 4e44618bdd..e5e7a13937 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -12,6 +12,7 @@ use hotshot_types::{ network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, + BlockPayload, }, }; use tracing::{debug, error, instrument, warn}; @@ -61,6 +62,11 @@ impl, A: ConsensusApi + ) -> Option { match event.as_ref() { HotShotEvent::BlockRecv(encoded_transactions, metadata, view_number, fee) => { + let payload = ::BlockPayload::from_bytes( + encoded_transactions.clone().into_iter(), + metadata, + ); + let builder_commitment = payload.builder_commitment(metadata); let vid_disperse = calculate_vid_disperse( &encoded_transactions.clone(), &self.membership.clone(), @@ -70,6 +76,7 @@ impl, A: ConsensusApi + broadcast_event( Arc::new(HotShotEvent::SendPayloadCommitmentAndMetadata( vid_disperse.payload_commitment, + builder_commitment, metadata.clone(), *view_number, fee.clone(), diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 2d6004b173..f0e12d58a3 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -21,6 +21,7 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeType}, + BlockPayload, }, }; use sha2::{Digest, Sha256}; @@ -55,6 +56,10 @@ impl TestView { let transactions = Vec::new(); + let (block_payload, metadata) = + TestBlockPayload::from_transactions(transactions.clone()).unwrap(); + let builder_commitment = block_payload.builder_commitment(&metadata); + let (private_key, public_key) = key_pair_for_id(*genesis_view); let leader_public_key = public_key; @@ -80,6 +85,7 @@ impl TestView { block_number: 1, timestamp: 1, payload_commitment, + builder_commitment, }; let quorum_proposal_inner = QuorumProposal:: { @@ -166,6 +172,10 @@ impl TestView { let leader_public_key = public_key; + let (block_payload, metadata) = + TestBlockPayload::from_transactions(transactions.clone()).unwrap(); + let builder_commitment = block_payload.builder_commitment(&metadata); + let payload_commitment = da_payload_commitment(quorum_membership, transactions.clone()); let vid_proposal = build_vid_proposal( @@ -263,6 +273,7 @@ impl TestView { block_number: *next_view, timestamp: *next_view, payload_commitment, + builder_commitment, }; let proposal = QuorumProposal:: { diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index eefcd9e9fe..a06936182a 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -19,8 +19,10 @@ use hotshot_types::{ data::{ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}, traits::{election::Membership, node_implementation::ConsensusTime}, + utils::BuilderCommitment, }; use jf_primitives::vid::VidScheme; +use sha2::Digest; #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] @@ -72,6 +74,7 @@ async fn test_consensus_task() { }; let cert = proposals[1].data.justify_qc.clone(); + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); // Run view 2 and propose. let view_2 = TestScriptStage { @@ -81,6 +84,7 @@ async fn test_consensus_task() { // We must have a payload commitment and metadata to propose. SendPayloadCommitmentAndMetadata( payload_commitment, + builder_commitment, (), ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), @@ -354,6 +358,7 @@ async fn test_view_sync_finalize_propose() { ) .unwrap(); + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let view_4 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), @@ -362,6 +367,7 @@ async fn test_view_sync_finalize_propose() { ViewSyncFinalizeCertificate2Recv(cert), SendPayloadCommitmentAndMetadata( payload_commitment, + builder_commitment, (), ViewNumber::new(4), null_block::builder_fee(4).unwrap(), diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 4086f95e19..4c4bbc6bc9 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -10,8 +10,10 @@ use hotshot_testing::{ use hotshot_types::{ data::{null_block, ViewNumber}, traits::{election::Membership, node_implementation::ConsensusTime}, + utils::BuilderCommitment, }; use jf_primitives::vid::VidScheme; +use sha2::Digest; /// Runs a basic test where a qualified proposal occurs (i.e. not initiated by the genesis view or node 1). /// This proposal should happen no matter how the `input_permutation` is specified. @@ -69,11 +71,13 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { // Node 2 is the leader up next, so we form the QC for it. let cert = proposals[1].data.justify_qc.clone(); + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let inputs = vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), QCFormed(either::Left(cert)), SendPayloadCommitmentAndMetadata( payload_commitment, + builder_commitment, (), ViewNumber::new(node_id), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 8e95efb3c9..610ed619a3 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -17,10 +17,11 @@ use hotshot_types::{ election::Membership, node_implementation::{ConsensusTime, NodeType}, }, - utils::{View, ViewInner}, + utils::{BuilderCommitment, View, ViewInner}, vid::VidSchemeType, }; use jf_primitives::vid::VidScheme; +use sha2::Digest; fn make_payload_commitment( membership: &::Membership, @@ -87,6 +88,7 @@ async fn test_quorum_proposal_task_quorum_proposal() { // Release the write lock before proceeding with the test drop(consensus); let cert = proposals[1].data.justify_qc.clone(); + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); // Run at view 2, the quorum proposal task shouldn't care as long as the bookkeeping is correct let view_2 = TestScriptStage { @@ -95,6 +97,7 @@ async fn test_quorum_proposal_task_quorum_proposal() { QCFormed(either::Left(cert.clone())), SendPayloadCommitmentAndMetadata( payload_commitment, + builder_commitment, (), ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), @@ -123,6 +126,7 @@ async fn test_quorum_proposal_task_qc_timeout() { let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone()); @@ -153,6 +157,7 @@ async fn test_quorum_proposal_task_qc_timeout() { QCFormed(either::Right(cert.clone())), SendPayloadCommitmentAndMetadata( payload_commitment, + builder_commitment, (), ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), @@ -184,6 +189,7 @@ async fn test_quorum_proposal_task_view_sync() { let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone()); @@ -216,6 +222,7 @@ async fn test_quorum_proposal_task_view_sync() { ViewSyncFinalizeCertificate2Recv(cert.clone()), SendPayloadCommitmentAndMetadata( payload_commitment, + builder_commitment, (), ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), @@ -264,11 +271,12 @@ async fn test_quorum_proposal_task_propose_now() { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); } - + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); // proposal dependency data - quorum proposal and cert let pdd_qp = ProposalDependencyData { commitment_and_metadata: CommitmentAndMetadata { commitment: payload_commitment, + builder_commitment: builder_commitment.clone(), metadata: (), fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), }, @@ -283,6 +291,7 @@ async fn test_quorum_proposal_task_propose_now() { let pdd_timeout = ProposalDependencyData { commitment_and_metadata: CommitmentAndMetadata { commitment: payload_commitment, + builder_commitment: builder_commitment.clone(), metadata: (), fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), }, @@ -307,6 +316,7 @@ async fn test_quorum_proposal_task_propose_now() { let pdd_view_sync = ProposalDependencyData { commitment_and_metadata: CommitmentAndMetadata { commitment: payload_commitment, + builder_commitment, metadata: (), fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), }, diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 153adeefd1..8d6c88a1d5 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -253,6 +253,7 @@ async fn test_upgrade_and_consensus_task() { DACertificateRecv(dacs[1].clone()), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, + proposals[2].data.block_header.builder_commitment.clone(), (), ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), @@ -442,6 +443,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { DACertificateRecv(dacs[1].clone()), SendPayloadCommitmentAndMetadata( vids[1].0[0].data.payload_commitment, + proposals[1].data.block_header.builder_commitment.clone(), (), ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), @@ -452,6 +454,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, + proposals[2].data.block_header.builder_commitment.clone(), (), ViewNumber::new(3), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), @@ -463,6 +466,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { VIDShareRecv(get_vid_share(&vids[3].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[3].0[0].data.payload_commitment, + proposals[3].data.block_header.builder_commitment.clone(), (), ViewNumber::new(4), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), @@ -474,6 +478,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { VIDShareRecv(get_vid_share(&vids[4].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[4].0[0].data.payload_commitment, + proposals[4].data.block_header.builder_commitment.clone(), (), ViewNumber::new(5), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), @@ -484,6 +489,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { DACertificateRecv(dacs[5].clone()), SendPayloadCommitmentAndMetadata( vids[5].0[0].data.payload_commitment, + proposals[5].data.block_header.builder_commitment.clone(), (), ViewNumber::new(6), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), @@ -495,6 +501,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { VIDShareRecv(get_vid_share(&vids[6].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[6].0[0].data.payload_commitment, + proposals[6].data.block_header.builder_commitment.clone(), (), ViewNumber::new(7), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index b989e6aadd..a10f1d181e 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -1,7 +1,10 @@ use std::{collections::HashMap, marker::PhantomData}; use hotshot::types::SignatureKey; -use hotshot_example_types::{block_types::TestTransaction, node_types::TestTypes}; +use hotshot_example_types::{ + block_types::{TestBlockPayload, TestTransaction}, + node_types::TestTypes, +}; use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; use hotshot_testing::task_helpers::{build_system_handle, vid_scheme_from_view_number}; use hotshot_types::{ @@ -10,6 +13,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::Membership, node_implementation::{ConsensusTime, NodeType}, + BlockPayload, }, }; use jf_primitives::vid::VidScheme; @@ -32,6 +36,8 @@ async fn test_vid_task() { let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); let transactions = vec![TestTransaction(vec![0])]; + let (payload, metadata) = TestBlockPayload::from_transactions(transactions.clone()).unwrap(); + let builder_commitment = payload.builder_commitment(&metadata); let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; @@ -99,6 +105,7 @@ async fn test_vid_task() { output.insert( HotShotEvent::SendPayloadCommitmentAndMetadata( payload_commitment, + builder_commitment, (), ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 5bb448af8a..e0d1b7437a 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -23,7 +23,7 @@ use crate::{ node_implementation::NodeType, BlockPayload, ValidatedState, }, - utils::{StateAndDelta, Terminator}, + utils::{BuilderCommitment, StateAndDelta, Terminator}, vid::VidCommitment, }; @@ -400,6 +400,8 @@ impl Consensus { pub struct CommitmentAndMetadata { /// Vid Commitment pub commitment: VidCommitment, + /// Builder Commitment + pub builder_commitment: BuilderCommitment, /// Metadata for the block payload pub metadata: ::Metadata, /// Builder fee data diff --git a/types/src/data.rs b/types/src/data.rs index 563d0fc5d0..5df12a75af 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -439,13 +439,18 @@ impl Leaf { #[must_use] pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { let (payload, metadata) = TYPES::BlockPayload::genesis(); + let builder_commitment = payload.builder_commitment(&metadata); let payload_bytes = payload .encode() .expect("unable to encode genesis payload") .collect(); let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); - let block_header = - TYPES::BlockHeader::genesis(instance_state, payload_commitment, metadata); + let block_header = TYPES::BlockHeader::genesis( + instance_state, + payload_commitment, + builder_commitment, + metadata, + ); Self { view_number: TYPES::Time::genesis(), justify_qc: QuorumCertificate::::genesis(), diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 63cdb2392a..3af3f8c89a 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -166,6 +166,7 @@ pub trait BlockHeader: instance_state: &>::Instance, parent_leaf: &Leaf, payload_commitment: VidCommitment, + builder_commitment: BuilderCommitment, metadata: ::Metadata, builder_fee: BuilderFee, ) -> impl Future + Send; @@ -174,6 +175,7 @@ pub trait BlockHeader: fn genesis( instance_state: &>::Instance, payload_commitment: VidCommitment, + builder_commitment: BuilderCommitment, metadata: ::Metadata, ) -> Self; @@ -187,8 +189,5 @@ pub trait BlockHeader: fn metadata(&self) -> &::Metadata; /// Get the builder commitment - fn builder_commitment( - &self, - metadata: &::Metadata, - ) -> BuilderCommitment; + fn builder_commitment(&self) -> BuilderCommitment; } From 5b526edaf469e15a9fb021ce93a5a214036eb734 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 17 Apr 2024 19:59:00 -0400 Subject: [PATCH 0958/1393] Minor rustfmt fixes (#2940) --- hotshot-qc/src/bit_vector.rs | 3 ++- hotshot-qc/src/bit_vector_old.rs | 3 ++- hotshot-qc/src/snarked/circuit.rs | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index a4035abb27..432c84d882 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -201,7 +201,6 @@ where #[cfg(test)] mod tests { - use super::*; use hotshot_stake_table::mt_based::StakeTable; use hotshot_types::traits::stake_table::StakeTableScheme; use jf_primitives::signatures::{ @@ -209,6 +208,8 @@ mod tests { SignatureScheme, }; + use super::*; + macro_rules! test_quorum_certificate { ($aggsig:tt) => { type ST = StakeTable<<$aggsig as SignatureScheme>::VerificationKey>; diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs index 855d7dd3e3..07dc988237 100644 --- a/hotshot-qc/src/bit_vector_old.rs +++ b/hotshot-qc/src/bit_vector_old.rs @@ -193,12 +193,13 @@ where #[cfg(test)] mod tests { - use super::*; use jf_primitives::signatures::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, SignatureScheme, }; + use super::*; + macro_rules! test_quorum_certificate { ($aggsig:tt) => { let mut rng = jf_utils::test_rng(); diff --git a/hotshot-qc/src/snarked/circuit.rs b/hotshot-qc/src/snarked/circuit.rs index 5ec3ce068a..a1605d0685 100644 --- a/hotshot-qc/src/snarked/circuit.rs +++ b/hotshot-qc/src/snarked/circuit.rs @@ -321,7 +321,6 @@ where #[cfg(test)] mod tests { - use super::*; use ark_bls12_377::{g1::Config as Param377, Fq as Fq377}; use ark_bn254::{g1::Config as Param254, Fq as Fq254, Fr as Fr254}; use ark_ec::{ @@ -334,6 +333,8 @@ mod tests { errors::CircuitError, gadgets::ecc::SWToTEConParam, Circuit, PlonkCircuit, Variable, }; + use super::*; + #[test] fn crypto_test_vk_aggregate_sw_circuit() -> Result<(), CircuitError> { let a_ecc = Fq377::zero(); From 0890efc91dcb040e13cd0bf09b035473887247a7 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 17 Apr 2024 20:27:18 -0400 Subject: [PATCH 0959/1393] Add sleep in builder loop (#2983) * add sleep * change where sleep is * better sleep --- task-impls/src/transactions.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index c709df9995..db2129c302 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -170,6 +170,7 @@ impl< let last_leaf = self.consensus.read().await.get_decided_leaf(); let mut latest_block: Option> = None; + let mut first_iteration = true; while task_start_time.elapsed() < self.api.propose_max_round_time() && latest_block.as_ref().map_or(true, |builder_response| { builder_response @@ -179,6 +180,13 @@ impl< < self.api.min_transactions() }) { + // Sleep if this isn't the first iteration + if first_iteration { + first_iteration = false; + } else { + async_sleep(Duration::from_millis(100)).await; + } + let Ok(request_signature) = <::SignatureKey as SignatureKey>::sign( &self.private_key, last_leaf.get_block_header().builder_commitment().as_ref(), @@ -325,7 +333,6 @@ impl< if num_txns >= self.api.min_transactions() { return latest_block; } - async_sleep(Duration::from_millis(100)).await; } latest_block } From 981c666e4cc496dfd406bb3fafb6d835982fd2a3 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 18 Apr 2024 08:27:15 -0400 Subject: [PATCH 0960/1393] [CX_CLEANUP] - Decompose Consensus Task into a Smaller Module (#2980) * separate update view and move consensus to a split package * unused import * finish moving files * improve docs * update lockfile --- .../src/{consensus.rs => consensus/mod.rs} | 364 +++--------------- task-impls/src/consensus/proposal.rs | 233 +++++++++++ task-impls/src/consensus/view_change.rs | 132 +++++++ task-impls/src/quorum_proposal.rs | 7 +- 4 files changed, 415 insertions(+), 321 deletions(-) rename task-impls/src/{consensus.rs => consensus/mod.rs} (84%) create mode 100644 task-impls/src/consensus/proposal.rs create mode 100644 task-impls/src/consensus/view_change.rs diff --git a/task-impls/src/consensus.rs b/task-impls/src/consensus/mod.rs similarity index 84% rename from task-impls/src/consensus.rs rename to task-impls/src/consensus/mod.rs index b21efb96d7..fbc7131eb6 100644 --- a/task-impls/src/consensus.rs +++ b/task-impls/src/consensus/mod.rs @@ -1,22 +1,27 @@ -use core::time::Duration; use std::{ collections::{BTreeMap, HashMap, HashSet}, sync::Arc, }; -use anyhow::{ensure, Context, Result}; +use crate::{ + consensus::{proposal::validate_proposal, view_change::update_view}, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; use async_broadcast::Sender; -use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_compatibility_layer::art::async_spawn; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use chrono::Utc; use committable::Committable; -use futures::future::join_all; +use futures::{future::join_all, FutureExt}; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus, View}, - constants::LOOK_AHEAD, data::{Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, message::Proposal, @@ -43,233 +48,22 @@ use vbs::version::Version; #[cfg(not(feature = "dependency-tasks"))] use { crate::helpers::AnyhowTracing, - futures::FutureExt, hotshot_types::{ data::{null_block, VidDisperseShare}, message::GeneralConsensusMessage, simple_vote::QuorumData, }, - std::marker::PhantomData, }; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, - }, -}; +/// Handles proposal-related functionality. +pub(crate) mod proposal; + +/// Handles view-change related functionality. +pub(crate) mod view_change; /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; -/// Validate the state and safety and liveness of a proposal then emit -/// a `QuorumProposalValidated` event. -#[allow(clippy::too_many_arguments)] -#[allow(clippy::too_many_lines)] -pub(crate) async fn validate_proposal( - proposal: Proposal>, - parent_leaf: Leaf, - consensus: Arc>>, - decided_upgrade_certificate: Option>, - quorum_membership: Arc, - parent_state: Arc, - view_leader_key: TYPES::SignatureKey, - event_stream: Sender>>, - sender: TYPES::SignatureKey, - event_sender: Sender>, - storage: Arc>>, -) -> Result<()> { - let (validated_state, state_delta) = parent_state - .validate_and_apply_header( - &consensus.read().await.instance_state, - &parent_leaf, - &proposal.data.block_header.clone(), - ) - .await - .context("Block header doesn't extend the proposal!")?; - - let state = Arc::new(validated_state); - let delta = Arc::new(state_delta); - let parent_commitment = parent_leaf.commit(); - let view = proposal.data.get_view_number(); - - let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); - proposed_leaf.set_parent_commitment(parent_commitment); - - // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - // - // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: - // - // proposal.validate_signature(&quorum_membership)?; - // - // in a future PR. - ensure!( - view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), - "Could not verify proposal." - ); - - UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; - - // Validate that the upgrade certificate is re-attached, if we saw one on the parent - proposed_leaf.extends_upgrade(&parent_leaf, &decided_upgrade_certificate)?; - - let justify_qc = proposal.data.justify_qc.clone(); - // Create a positive vote if either liveness or safety check - // passes. - - // Liveness check. - let consensus = consensus.upgradable_read().await; - let liveness_check = justify_qc.get_view_number() > consensus.locked_view; - - // Safety check. - // Check if proposal extends from the locked leaf. - let outcome = consensus.visit_leaf_ancestors( - justify_qc.get_view_number(), - Terminator::Inclusive(consensus.locked_view), - false, - |leaf, _, _| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.get_view_number() != consensus.locked_view - }, - ); - let safety_check = outcome.is_ok(); - - ensure!(safety_check || liveness_check, { - if let Err(e) = outcome { - broadcast_event( - Event { - view_number: view, - event: EventType::Error { error: Arc::new(e) }, - }, - &event_sender, - ) - .await; - } - - format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc, proposal.data.clone(), consensus.locked_view) - }); - - // We accept the proposal, notify the application layer - - broadcast_event( - Event { - view_number: view, - event: EventType::QuorumProposal { - proposal: proposal.clone(), - sender, - }, - }, - &event_sender, - ) - .await; - // Notify other tasks - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalValidated( - proposal.data.clone(), - parent_leaf, - )), - &event_stream, - ) - .await; - - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - - consensus.validated_state_map.insert( - view, - View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), - state: state.clone(), - delta: Some(delta.clone()), - }, - }, - ); - consensus - .saved_leaves - .insert(proposed_leaf.commit(), proposed_leaf.clone()); - - if let Err(e) = storage - .write() - .await - .update_undecided_state( - consensus.saved_leaves.clone(), - consensus.validated_state_map.clone(), - ) - .await - { - warn!("Couldn't store undecided state. Error: {:?}", e); - } - - Ok(()) -} - -/// Create the header for a proposal, build the proposal, and broadcast -/// the proposal send evnet. -#[allow(clippy::too_many_arguments)] -#[cfg(not(feature = "dependency-tasks"))] -async fn create_and_send_proposal( - pub_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: Arc>>, - event_stream: Sender>>, - view: TYPES::Time, - commitment_and_metadata: CommitmentAndMetadata, - parent_leaf: Leaf, - state: Arc, - upgrade_cert: Option>, - proposal_cert: Option>, - round_start_delay: u64, -) { - let block_header = TYPES::BlockHeader::new( - state.as_ref(), - &consensus.read().await.instance_state, - &parent_leaf, - commitment_and_metadata.commitment, - commitment_and_metadata.builder_commitment, - commitment_and_metadata.metadata, - commitment_and_metadata.fee, - ) - .await; - - let proposal = QuorumProposal { - block_header, - view_number: view, - justify_qc: consensus.read().await.high_qc.clone(), - proposal_certificate: proposal_cert, - upgrade_certificate: upgrade_cert, - }; - - let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal); - proposed_leaf.set_parent_commitment(parent_leaf.commit()); - - let Ok(signature) = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref()) - else { - // This should never happen. - error!("Failed to sign proposed_leaf.commit()!"); - return; - }; - - let message = Proposal { - data: proposal, - signature, - _pd: PhantomData, - }; - debug!( - "Sending null proposal for view {:?} \n {:?}", - proposed_leaf.get_view_number(), - "" - ); - - async_sleep(Duration::from_millis(round_start_delay)).await; - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalSend(message.clone(), pub_key)), - &event_stream, - ) - .await; -} - /// The state for the consensus task. Contains all of the information for the implementation /// of consensus pub struct ConsensusTaskState< @@ -509,100 +303,6 @@ impl, A: ConsensusApi + false } - /// Must only update the view and GC if the view actually changes - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus update view", level = "error")] - async fn update_view( - &mut self, - new_view: TYPES::Time, - event_stream: &Sender>>, - ) -> bool { - if *self.cur_view < *new_view { - debug!( - "Updating view from {} to {} in consensus task", - *self.cur_view, *new_view - ); - - if *self.cur_view / 100 != *new_view / 100 { - // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): - // switch to info! when INFO logs become less cluttered - error!("Progress: entered view {:>6}", *new_view); - } - - // cancel the old timeout task - if let Some(timeout_task) = self.timeout_task.take() { - cancel_task(timeout_task).await; - } - self.cur_view = new_view; - - // Poll the future leader for lookahead - let lookahead_view = new_view + LOOK_AHEAD; - if self.quorum_membership.get_leader(lookahead_view) != self.public_key { - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollFutureLeader( - *lookahead_view, - self.quorum_membership.get_leader(lookahead_view), - )) - .await; - } - - // Start polling for proposals for the new view - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForProposal(*self.cur_view + 1)) - .await; - - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForDAC(*self.cur_view + 1)) - .await; - - if self.quorum_membership.get_leader(self.cur_view + 1) == self.public_key { - debug!("Polling for quorum votes for view {}", *self.cur_view); - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForVotes(*self.cur_view)) - .await; - } - - broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream).await; - - // Spawn a timeout task if we did actually update view - let timeout = self.timeout; - self.timeout_task = Some(async_spawn({ - let stream = event_stream.clone(); - // Nuance: We timeout on the view + 1 here because that means that we have - // not seen evidence to transition to this new view - let view_number = self.cur_view + 1; - async move { - async_sleep(Duration::from_millis(timeout)).await; - broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), - &stream, - ) - .await; - } - })); - let consensus = self.consensus.upgradable_read().await; - consensus - .metrics - .current_view - .set(usize::try_from(self.cur_view.get_u64()).unwrap()); - // Do the comparison before the subtraction to avoid potential overflow, since - // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(self.cur_view.get_u64()).unwrap() - > usize::try_from(consensus.last_decided_view.get_u64()).unwrap() - { - consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(self.cur_view.get_u64()).unwrap() - - usize::try_from(consensus.last_decided_view.get_u64()).unwrap(), - ); - } - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus.update_view(new_view); - drop(consensus); - - return true; - } - false - } - /// Validates whether the VID Dispersal Proposal is correctly signed #[cfg(not(feature = "dependency-tasks"))] fn validate_disperse(&self, disperse: &Proposal>) -> bool { @@ -728,7 +428,21 @@ impl, A: ConsensusApi + } // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here - self.update_view(view, &event_stream).await; + if let Err(e) = update_view::( + self.public_key.clone(), + view, + &event_stream, + self.quorum_membership.clone(), + self.quorum_network.clone(), + self.timeout, + self.consensus.clone(), + &mut self.cur_view, + &mut self.timeout_task, + ) + .await + { + warn!("Failed to update view; error = {e:?}"); + } let consensus = self.consensus.upgradable_read().await; @@ -1325,8 +1039,20 @@ impl, A: ConsensusApi + // update the view in state to the one in the message // Publish a view change event to the application // Returns if the view does not need updating. - if !self.update_view(new_view, &event_stream).await { - debug!("view not updated"); + if let Err(e) = update_view::( + self.public_key.clone(), + new_view, + &event_stream, + self.quorum_membership.clone(), + self.quorum_network.clone(), + self.timeout, + self.consensus.clone(), + &mut self.cur_view, + &mut self.timeout_task, + ) + .await + { + warn!("Failed to update view; error = {e:?}"); return; } @@ -1489,6 +1215,8 @@ impl, A: ConsensusApi + view: TYPES::Time, event_stream: &Sender>>, ) { + use crate::consensus::proposal::create_and_send_proposal; + if self.quorum_membership.get_leader(view) != self.public_key { // This is expected for view 1, so skipping the logging. if view != TYPES::Time::new(1) { diff --git a/task-impls/src/consensus/proposal.rs b/task-impls/src/consensus/proposal.rs new file mode 100644 index 0000000000..63be7ee082 --- /dev/null +++ b/task-impls/src/consensus/proposal.rs @@ -0,0 +1,233 @@ +use std::sync::Arc; + +use crate::{events::HotShotEvent, helpers::broadcast_event}; +use anyhow::{ensure, Context, Result}; +use async_broadcast::Sender; +use async_compatibility_layer::art::async_sleep; +use async_lock::{RwLock, RwLockUpgradableReadGuard}; +use committable::Committable; +use core::time::Duration; +use hotshot_types::{ + consensus::{CommitmentAndMetadata, Consensus, View}, + data::{Leaf, QuorumProposal, ViewChangeEvidence}, + event::{Event, EventType}, + message::Proposal, + simple_certificate::UpgradeCertificate, + traits::{ + block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, + states::ValidatedState, storage::Storage, + }, + utils::{Terminator, ViewInner}, + vote::HasViewNumber, +}; +use tracing::{debug, error, warn}; + +#[cfg(not(feature = "dependency-tasks"))] +use std::marker::PhantomData; + +/// Validate the state and safety and liveness of a proposal then emit +/// a `QuorumProposalValidated` event. +#[allow(clippy::too_many_arguments)] +#[allow(clippy::too_many_lines)] +pub async fn validate_proposal( + proposal: Proposal>, + parent_leaf: Leaf, + consensus: Arc>>, + decided_upgrade_certificate: Option>, + quorum_membership: Arc, + parent_state: Arc, + view_leader_key: TYPES::SignatureKey, + event_stream: Sender>>, + sender: TYPES::SignatureKey, + event_sender: Sender>, + storage: Arc>>, +) -> Result<()> { + let (validated_state, state_delta) = parent_state + .validate_and_apply_header( + &consensus.read().await.instance_state, + &parent_leaf, + &proposal.data.block_header.clone(), + ) + .await + .context("Block header doesn't extend the proposal!")?; + + let state = Arc::new(validated_state); + let delta = Arc::new(state_delta); + let parent_commitment = parent_leaf.commit(); + let view = proposal.data.get_view_number(); + + let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + proposed_leaf.set_parent_commitment(parent_commitment); + + // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment + // + // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: + // + // proposal.validate_signature(&quorum_membership)?; + // + // in a future PR. + ensure!( + view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), + "Could not verify proposal." + ); + + UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; + + // Validate that the upgrade certificate is re-attached, if we saw one on the parent + proposed_leaf.extends_upgrade(&parent_leaf, &decided_upgrade_certificate)?; + + let justify_qc = proposal.data.justify_qc.clone(); + // Create a positive vote if either liveness or safety check + // passes. + + // Liveness check. + let consensus = consensus.upgradable_read().await; + let liveness_check = justify_qc.get_view_number() > consensus.locked_view; + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = consensus.visit_leaf_ancestors( + justify_qc.get_view_number(), + Terminator::Inclusive(consensus.locked_view), + false, + |leaf, _, _| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.get_view_number() != consensus.locked_view + }, + ); + let safety_check = outcome.is_ok(); + + ensure!(safety_check || liveness_check, { + if let Err(e) = outcome { + broadcast_event( + Event { + view_number: view, + event: EventType::Error { error: Arc::new(e) }, + }, + &event_sender, + ) + .await; + } + + format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc, proposal.data.clone(), consensus.locked_view) + }); + + // We accept the proposal, notify the application layer + + broadcast_event( + Event { + view_number: view, + event: EventType::QuorumProposal { + proposal: proposal.clone(), + sender, + }, + }, + &event_sender, + ) + .await; + // Notify other tasks + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalValidated( + proposal.data.clone(), + parent_leaf, + )), + &event_stream, + ) + .await; + + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + + consensus.validated_state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state: state.clone(), + delta: Some(delta.clone()), + }, + }, + ); + consensus + .saved_leaves + .insert(proposed_leaf.commit(), proposed_leaf.clone()); + + if let Err(e) = storage + .write() + .await + .update_undecided_state( + consensus.saved_leaves.clone(), + consensus.validated_state_map.clone(), + ) + .await + { + warn!("Couldn't store undecided state. Error: {:?}", e); + } + + Ok(()) +} + +/// Create the header for a proposal, build the proposal, and broadcast +/// the proposal send evnet. +#[allow(clippy::too_many_arguments)] +#[cfg(not(feature = "dependency-tasks"))] +pub async fn create_and_send_proposal( + pub_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + consensus: Arc>>, + event_stream: Sender>>, + view: TYPES::Time, + commitment_and_metadata: CommitmentAndMetadata, + parent_leaf: Leaf, + state: Arc, + upgrade_cert: Option>, + proposal_cert: Option>, + round_start_delay: u64, +) { + let block_header = TYPES::BlockHeader::new( + state.as_ref(), + &consensus.read().await.instance_state, + &parent_leaf, + commitment_and_metadata.commitment, + commitment_and_metadata.builder_commitment, + commitment_and_metadata.metadata, + commitment_and_metadata.fee, + ) + .await; + + let proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: consensus.read().await.high_qc.clone(), + proposal_certificate: proposal_cert, + upgrade_certificate: upgrade_cert, + }; + + let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal); + proposed_leaf.set_parent_commitment(parent_leaf.commit()); + + let Ok(signature) = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref()) + else { + // This should never happen. + error!("Failed to sign proposed_leaf.commit()!"); + return; + }; + + let message = Proposal { + data: proposal, + signature, + _pd: PhantomData, + }; + debug!( + "Sending null proposal for view {:?} \n {:?}", + proposed_leaf.get_view_number(), + "" + ); + + async_sleep(Duration::from_millis(round_start_delay)).await; + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalSend(message.clone(), pub_key)), + &event_stream, + ) + .await; +} diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs new file mode 100644 index 0000000000..a0ac090024 --- /dev/null +++ b/task-impls/src/consensus/view_change.rs @@ -0,0 +1,132 @@ +use std::sync::Arc; + +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; +use anyhow::{ensure, Result}; +use async_broadcast::Sender; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_lock::{RwLock, RwLockUpgradableReadGuard}; +use core::time::Duration; +use hotshot_types::{ + consensus::Consensus, + constants::LOOK_AHEAD, + traits::{ + election::Membership, + network::{ConnectedNetwork, ConsensusIntentEvent}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + }, +}; +use tracing::{debug, error}; + +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; + +/// Update the view if it actually changed, takes a mutable reference to the `cur_view` and the +/// `timeout_task` which are updated during the operation of the function. +/// +/// # Errors +/// Returns an [`anyhow::Error`] when the new view is not greater than the current view. +#[allow(clippy::too_many_arguments)] +pub(crate) async fn update_view>( + public_key: TYPES::SignatureKey, + new_view: TYPES::Time, + event_stream: &Sender>>, + quorum_membership: Arc, + quorum_network: Arc, + timeout: u64, + consensus: Arc>>, + cur_view: &mut TYPES::Time, + timeout_task: &mut Option>, +) -> Result<()> { + ensure!( + new_view > *cur_view, + "New view is not greater than our current view" + ); + + debug!("Updating view from {} to {}", **cur_view, *new_view); + + if **cur_view / 100 != *new_view / 100 { + // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): + // switch to info! when INFO logs become less cluttered + error!("Progress: entered view {:>6}", *new_view); + } + + // cancel the old timeout task + if let Some(timeout_task) = timeout_task.take() { + cancel_task(timeout_task).await; + } + + *cur_view = new_view; + + // Poll the future leader for lookahead + let lookahead_view = new_view + LOOK_AHEAD; + if quorum_membership.get_leader(lookahead_view) != public_key { + quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollFutureLeader( + *lookahead_view, + quorum_membership.get_leader(lookahead_view), + )) + .await; + } + + // The next view is just the current view + 1 + let next_view = *cur_view + 1; + + // Start polling for proposals for the new view + quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForProposal(*next_view)) + .await; + + quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForDAC(*next_view)) + .await; + + if quorum_membership.get_leader(next_view) == public_key { + debug!("Polling for quorum votes for view {}", **cur_view); + quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForVotes(**cur_view)) + .await; + } + + broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream).await; + + // Spawn a timeout task if we did actually update view + *timeout_task = Some(async_spawn({ + let stream = event_stream.clone(); + // Nuance: We timeout on the view + 1 here because that means that we have + // not seen evidence to transition to this new view + let view_number = next_view; + async move { + async_sleep(Duration::from_millis(timeout)).await; + broadcast_event( + Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), + &stream, + ) + .await; + } + })); + let consensus = consensus.upgradable_read().await; + consensus + .metrics + .current_view + .set(usize::try_from(cur_view.get_u64()).unwrap()); + + // Do the comparison before the subtraction to avoid potential overflow, since + // `last_decided_view` may be greater than `cur_view` if the node is catching up. + if usize::try_from(cur_view.get_u64()).unwrap() + > usize::try_from(consensus.last_decided_view.get_u64()).unwrap() + { + consensus.metrics.number_of_views_since_last_decide.set( + usize::try_from(cur_view.get_u64()).unwrap() + - usize::try_from(consensus.last_decided_view.get_u64()).unwrap(), + ); + } + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + consensus.update_view(new_view); + + Ok(()) +} diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index b4e93abe63..d38f52238c 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -3,8 +3,6 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use committable::Committable; use either::Either; use futures::future::FutureExt; @@ -30,12 +28,15 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber}, }; + +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use crate::{ - consensus::validate_proposal, + consensus::proposal::validate_proposal, events::HotShotEvent, helpers::{broadcast_event, cancel_task, AnyhowTracing}, }; From 6f43ef7f726bd229c496960b01eecca61b7c99ce Mon Sep 17 00:00:00 2001 From: Alex Xiong Date: Thu, 18 Apr 2024 21:55:31 +0800 Subject: [PATCH 0961/1393] feat: add aztec-srs (#2854) * add aztec-srs * use upgraded ark-srs api, and only cached srs upon nix-shell entry * address comments * update ci to download srs * fix linting and quiet wget * use KZG_TEST and vid_scheme_for_test() instead * minor update on err msg --- types/Cargo.toml | 10 ++++---- types/src/constants.rs | 7 +++++ types/src/vid.rs | 58 ++++++++++++++++++++++++++++++++++-------- 3 files changed, 60 insertions(+), 15 deletions(-) diff --git a/types/Cargo.toml b/types/Cargo.toml index 5ad087f24f..4fc5621445 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -5,11 +5,6 @@ edition = "2021" name = "hotshot-types" version = "0.1.11" -[features] -gpu-vid = ["jf-primitives/gpu-vid"] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dependencies] anyhow = { workspace = true } ark-bls12-381 = { workspace = true } @@ -18,6 +13,7 @@ ark-ec = { workspace = true } ark-ed-on-bn254 = { workspace = true } ark-ff = { workspace = true } ark-serialize = { workspace = true } +ark-srs = { version = "0.2.0" } ark-std = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } @@ -60,6 +56,10 @@ url = "2.5.0" [dev-dependencies] serde_json = { workspace = true } +[features] +gpu-vid = ["jf-primitives/gpu-vid"] +test-srs = ["jf-primitives/test-srs"] + [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } diff --git a/types/src/constants.rs b/types/src/constants.rs index 06b9adf24b..8f93667ca8 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -53,3 +53,10 @@ pub type WebServerVersion = StaticVersion VidSchemeType { #[allow(clippy::panic)] let num_storage_nodes = u32::try_from(num_storage_nodes).unwrap_or_else(|err| { - panic!("num_storage_nodes {num_storage_nodes} should fit into u32\n\terror: : {err}") + panic!( + "num_storage_nodes {num_storage_nodes} should fit into u32; \ + error: {err}" + ) }); // TODO panic, return `Result`, or make `new` infallible upstream (eg. by panicking)? #[allow(clippy::panic)] - VidSchemeType(Advz::new(num_storage_nodes, recovery_threshold, &*KZG_SRS).unwrap_or_else(|err| panic!("advz construction failure:\n\t(num_storage nodes,recovery_threshold)=({num_storage_nodes},{recovery_threshold})\n\terror: : {err}"))) + VidSchemeType( + Advz::new(num_storage_nodes, recovery_threshold, &*KZG_SRS).unwrap_or_else(|err| { + panic!("advz construction failure: (num_storage nodes,recovery_threshold)=({num_storage_nodes},{recovery_threshold}); \ + error: {err}") + }) + ) +} + +/// Similar to [`vid_scheme()`], but with `KZG_SRS_TEST` for testing purpose only. +#[cfg(feature = "test-srs")] +pub fn vid_scheme_for_test(num_storage_nodes: usize) -> VidSchemeType { + let recovery_threshold = 1 << num_storage_nodes.ilog2(); + #[allow(clippy::panic)] + let num_storage_nodes = u32::try_from(num_storage_nodes).unwrap_or_else(|err| { + panic!("num_storage_nodes {num_storage_nodes} should fit into u32; error: {err}") + }); + #[allow(clippy::panic)] + VidSchemeType( + Advz::new(num_storage_nodes, recovery_threshold, &*KZG_SRS_TEST).unwrap_or_else(|err| { + panic!("advz construction failure: (num_storage nodes,recovery_threshold)=({num_storage_nodes},{recovery_threshold});\ + error: {err}") + }) + ) } /// VID commitment type @@ -116,22 +142,34 @@ pub struct SmallRangeProofType( SmallRangeProof< as PolynomialCommitmentScheme>::Proof>, ); +#[cfg(feature = "test-srs")] lazy_static! { - /// SRS comment - /// - /// TODO use a proper SRS - /// https://github.com/EspressoSystems/HotShot/issues/1686 - static ref KZG_SRS: UnivariateUniversalParams = { + /// SRS for testing only + static ref KZG_SRS_TEST: UnivariateUniversalParams = { let mut rng = jf_utils::test_rng(); UnivariateKzgPCS::::gen_srs_for_testing( &mut rng, - // TODO what's the maximum possible SRS size? - checked_fft_size(200).unwrap(), + SRS_DEGREE, ) .unwrap() }; } +// By default, use SRS from Aztec's ceremony +lazy_static! { + /// SRS comment + static ref KZG_SRS: UnivariateUniversalParams = { + let srs = ark_srs::kzg10::aztec20::setup(SRS_DEGREE) + .expect("Aztec SRS failed to load"); + UnivariateUniversalParams { + powers_of_g: srs.powers_of_g, + h: srs.h, + beta_h: srs.beta_h, + powers_of_h: vec![srs.h, srs.beta_h], + } + }; +} + /// Private type alias for the EC pairing type parameter for [`Advz`]. type E = Bn254; /// Private type alias for the hash type parameter for [`Advz`]. From d029e961fcd9ba89c0879bc6d4b494016562252c Mon Sep 17 00:00:00 2001 From: Himanshu Goyal Date: Thu, 18 Apr 2024 15:52:21 -0400 Subject: [PATCH 0962/1393] revert spawn_blocking (#2982) * add spawn_blocking * remove clone * fix cargo lock * fix lockfile --------- Co-authored-by: Jarred Parr --- task-impls/src/helpers.rs | 30 +++++++++++++++++++++--------- task-impls/src/response.rs | 2 +- task-impls/src/vid.rs | 5 +++-- 3 files changed, 25 insertions(+), 12 deletions(-) diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 5c6d552b56..37cb9932a7 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use async_broadcast::{SendError, Sender}; #[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; +use async_std::task::{spawn_blocking, JoinHandle}; use hotshot_types::{ data::VidDisperse, traits::{election::Membership, node_implementation::NodeType}, @@ -10,7 +10,7 @@ use hotshot_types::{ }; use jf_primitives::vid::{precomputable::Precomputable, VidScheme}; #[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; +use tokio::task::{spawn_blocking, JoinHandle}; /// Cancel a task pub async fn cancel_task(task: JoinHandle) { @@ -43,13 +43,19 @@ pub async fn broadcast_event(event: E, sender: &Send /// # Panics /// Panics if the VID calculation fails, this should not happen. #[allow(clippy::panic)] -pub fn calculate_vid_disperse( - txns: &[u8], +pub async fn calculate_vid_disperse( + txns: Vec, membership: &Arc, view: TYPES::Time, ) -> VidDisperse { let num_nodes = membership.total_nodes(); - let vid_disperse = vid_scheme(num_nodes).disperse(txns).unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())); + let vid_disperse = spawn_blocking(move || { + vid_scheme(num_nodes).disperse(&txns).unwrap_or_else(|err| panic!("VID precompute disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) + }) + .await; + #[cfg(async_executor_impl = "tokio")] + // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. + let vid_disperse = vid_disperse.unwrap(); VidDisperse::from_membership(view, vid_disperse, membership.as_ref()) } @@ -59,14 +65,20 @@ pub fn calculate_vid_disperse( /// # Panics /// Panics if the VID calculation fails, this should not happen. #[allow(clippy::panic)] -pub fn calculate_vid_disperse_using_precompute_data( - txns: &Vec, +pub async fn calculate_vid_disperse_using_precompute_data( + txns: Vec, membership: &Arc, view: TYPES::Time, - pre_compute_data: &VidPrecomputeData, + pre_compute_data: VidPrecomputeData, ) -> VidDisperse { let num_nodes = membership.total_nodes(); - let vid_disperse = vid_scheme(num_nodes).disperse_precompute(txns, pre_compute_data).unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())); + let vid_disperse = spawn_blocking(move || { + vid_scheme(num_nodes).disperse_precompute(&txns, &pre_compute_data).unwrap_or_else(|err| panic!("VID precompute disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) + }) + .await; + #[cfg(async_executor_impl = "tokio")] + // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. + let vid_disperse = vid_disperse.unwrap(); VidDisperse::from_membership(view, vid_disperse, membership.as_ref()) } diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index b7299cc5c2..026bfce7ab 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -133,7 +133,7 @@ impl NetworkResponseState { .is_some_and(|m| m.contains_key(key)); if !contained { let txns = consensus.saved_payloads.get(&view)?; - let vid = calculate_vid_disperse(&txns.clone(), &self.quorum.clone(), view); + let vid = calculate_vid_disperse(txns.clone(), &self.quorum.clone(), view).await; let shares = VidDisperseShare::from_vid_disperse(vid); let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; for share in shares { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index e5e7a13937..e77c118c87 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -68,10 +68,11 @@ impl, A: ConsensusApi + ); let builder_commitment = payload.builder_commitment(metadata); let vid_disperse = calculate_vid_disperse( - &encoded_transactions.clone(), + encoded_transactions.clone(), &self.membership.clone(), *view_number, - ); + ) + .await; // send the commitment and metadata to consensus for block building broadcast_event( Arc::new(HotShotEvent::SendPayloadCommitmentAndMetadata( From 1028e7e974e82fbb5e55fd53af0a0876b8b63e26 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 18 Apr 2024 23:11:16 +0200 Subject: [PATCH 0963/1393] Use correct commitment when querying builder (#2987) --- task-impls/src/consensus/mod.rs | 17 ++++----- task-impls/src/consensus/proposal.rs | 8 ++--- task-impls/src/consensus/view_change.rs | 18 +++++----- task-impls/src/quorum_proposal.rs | 5 ++- task-impls/src/transactions.rs | 46 ++++++++++++++++++++++--- 5 files changed, 66 insertions(+), 28 deletions(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index fbc7131eb6..9be045b921 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -3,14 +3,6 @@ use std::{ sync::Arc, }; -use crate::{ - consensus::{proposal::validate_proposal, view_change::update_view}, - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, - }, -}; use async_broadcast::Sender; use async_compatibility_layer::art::async_spawn; use async_lock::{RwLock, RwLockUpgradableReadGuard}; @@ -55,6 +47,15 @@ use { }, }; +use crate::{ + consensus::{proposal::validate_proposal, view_change::update_view}, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; + /// Handles proposal-related functionality. pub(crate) mod proposal; diff --git a/task-impls/src/consensus/proposal.rs b/task-impls/src/consensus/proposal.rs index 63be7ee082..bb39197c44 100644 --- a/task-impls/src/consensus/proposal.rs +++ b/task-impls/src/consensus/proposal.rs @@ -1,12 +1,13 @@ +use core::time::Duration; +#[cfg(not(feature = "dependency-tasks"))] +use std::marker::PhantomData; use std::sync::Arc; -use crate::{events::HotShotEvent, helpers::broadcast_event}; use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::async_sleep; use async_lock::{RwLock, RwLockUpgradableReadGuard}; use committable::Committable; -use core::time::Duration; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus, View}, data::{Leaf, QuorumProposal, ViewChangeEvidence}, @@ -22,8 +23,7 @@ use hotshot_types::{ }; use tracing::{debug, error, warn}; -#[cfg(not(feature = "dependency-tasks"))] -use std::marker::PhantomData; +use crate::{events::HotShotEvent, helpers::broadcast_event}; /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index a0ac090024..6c82e8c82c 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -1,14 +1,12 @@ +use core::time::Duration; use std::sync::Arc; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; use anyhow::{ensure, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; -use core::time::Duration; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use hotshot_types::{ consensus::Consensus, constants::LOOK_AHEAD, @@ -18,12 +16,14 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; -use tracing::{debug, error}; - -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; +use tracing::{debug, error}; + +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; /// Update the view if it actually changed, takes a mutable reference to the `cur_view` and the /// `timeout_task` which are updated during the operation of the function. diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index d38f52238c..770987f004 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -3,6 +3,8 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use committable::Committable; use either::Either; use futures::future::FutureExt; @@ -28,9 +30,6 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber}, }; - -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index db2129c302..66edb4317e 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -12,15 +12,17 @@ use hotshot_builder_api::block_info::{ use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, + data::Leaf, event::{Event, EventType}, traits::{ block_contents::{BlockHeader, BuilderFee}, consensus_api::ConsensusApi, election::Membership, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::{BuilderSignatureKey, SignatureKey}, BlockPayload, }, + utils::BuilderCommitment, }; use tracing::{debug, error, instrument}; use vbs::version::StaticVersionType; @@ -164,11 +166,47 @@ impl< None } + /// Get last known builder commitment from consensus. + async fn latest_known_builder_commitment(&self) -> BuilderCommitment { + let consensus = self.consensus.read().await; + + let mut prev_view = TYPES::Time::new(self.cur_view.saturating_sub(1)); + + // Search through all previous views... + while prev_view != TYPES::Time::genesis() { + if let Some(commitment) = + consensus + .validated_state_map + .get(&prev_view) + .and_then(|view| match view.view_inner { + // For a view for which we have a Leaf stored + hotshot_types::utils::ViewInner::Leaf { leaf, .. } => consensus + .saved_leaves + .get(&leaf) + .map(Leaf::get_block_header) + .map(BlockHeader::builder_commitment), // and return it's commitment + _ => None, + }) + { + return commitment; + } + prev_view = prev_view - 1; + } + + // If not found, return commitment for last decided block + consensus + .get_decided_leaf() + .get_block_header() + .builder_commitment() + } + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] async fn wait_for_block(&self) -> Option> { let task_start_time = Instant::now(); - let last_leaf = self.consensus.read().await.get_decided_leaf(); + // Find commitment to the block we want to build upon + let parent_commitment = self.latest_known_builder_commitment().await; + let mut latest_block: Option> = None; let mut first_iteration = true; while task_start_time.elapsed() < self.api.propose_max_round_time() @@ -189,7 +227,7 @@ impl< let Ok(request_signature) = <::SignatureKey as SignatureKey>::sign( &self.private_key, - last_leaf.get_block_header().builder_commitment().as_ref(), + parent_commitment.as_ref(), ) else { error!("Failed to sign block hash"); continue; @@ -198,7 +236,7 @@ impl< let mut available_blocks = match self .builder_client .get_available_blocks( - last_leaf.get_block_header().builder_commitment(), + parent_commitment.clone(), self.public_key.clone(), &request_signature, ) From 904ac0fb66ab2d279271f1968b8ccbd938d010e8 Mon Sep 17 00:00:00 2001 From: Mathis Date: Fri, 19 Apr 2024 20:43:08 +0800 Subject: [PATCH 0964/1393] Update ark-srs 0.2 -> 0.3.1 (#2994) * Update ark-srs 0.2 -> 0.3 Remove manual downloading of SRS files. * Test CI with new request lib * ark-srs: 0.3.1 --- types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/Cargo.toml b/types/Cargo.toml index 4fc5621445..c15ab430e4 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -13,7 +13,7 @@ ark-ec = { workspace = true } ark-ed-on-bn254 = { workspace = true } ark-ff = { workspace = true } ark-serialize = { workspace = true } -ark-srs = { version = "0.2.0" } +ark-srs = { version = "0.3.1" } ark-std = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } From 8cb82762abd2f2c3fd1511382e791c947ca83978 Mon Sep 17 00:00:00 2001 From: Mathis Date: Fri, 19 Apr 2024 22:53:54 +0800 Subject: [PATCH 0965/1393] Fix builder client URL (#2979) * Fix builder client URL * Fix builder client URL in test builder * [ci skip] debug log successful blocks fetched --- task-impls/src/builder.rs | 2 +- task-impls/src/transactions.rs | 7 ++++++- testing/src/block_builder.rs | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 6266b188e9..aecbf4ee2e 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -72,7 +72,7 @@ impl BuilderClient { /// If the URL is malformed. pub fn new(base_url: impl Into) -> Self { Self { - inner: Client::new(base_url.into().join("api").unwrap()), + inner: Client::new(base_url.into().join("block_info").unwrap()), _marker: std::marker::PhantomData, } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 66edb4317e..8702dc8902 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -242,9 +242,14 @@ impl< ) .await { - Ok(blocks) => blocks, + Ok(blocks) => { + tracing::debug!("Got available blocks: {:?}", blocks); + blocks + }, Err(err) => { error!(%err, "Couldn't get available blocks"); + // pause a bit + async_sleep(Duration::from_millis(100)).await; continue; } }; diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index df3e3e293a..013e32dfbb 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -87,7 +87,7 @@ impl TestBuilderImplementation for SimpleBuilderImplemen .expect("Failed to construct the builder API"); let mut app: App, hotshot_builder_api::builder::Error> = App::with_state(source); - app.register_module("api", builder_api) + app.register_module("block_info", builder_api) .expect("Failed to register the builder API"); async_spawn(app.serve(url.clone(), STATIC_VER_0_1)); From 13baf26c58675968e71d172df55e31ed7f4c2cb6 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 19 Apr 2024 19:00:37 +0200 Subject: [PATCH 0966/1393] Revert use of BuilderCommitment in available blocks call (#3001) * Revert use of BuilderCommitment in avaliable blocks call * Serialize + Deserialize for BuilderFee --- builder-api/src/data_source.rs | 3 ++- task-impls/src/builder.rs | 3 ++- task-impls/src/transactions.rs | 26 ++++++++++++-------------- testing/src/block_builder.rs | 5 +++-- testing/tests/tests_1/block_builder.rs | 4 ++-- types/src/traits/block_contents.rs | 4 ++-- 6 files changed, 23 insertions(+), 22 deletions(-) diff --git a/builder-api/src/data_source.rs b/builder-api/src/data_source.rs index d168ba6ab9..90641e1358 100644 --- a/builder-api/src/data_source.rs +++ b/builder-api/src/data_source.rs @@ -2,6 +2,7 @@ use async_trait::async_trait; use hotshot_types::{ traits::{node_implementation::NodeType, signature_key::SignatureKey}, utils::BuilderCommitment, + vid::VidCommitment, }; use crate::{ @@ -14,7 +15,7 @@ pub trait BuilderDataSource { /// To get the list of available blocks async fn get_available_blocks( &self, - for_parent: &BuilderCommitment, + for_parent: &VidCommitment, sender: TYPES::SignatureKey, signature: &::PureAssembledSignatureType, ) -> Result>, BuildError>; diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index aecbf4ee2e..4f4b036ba3 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -8,6 +8,7 @@ use hotshot_builder_api::{ use hotshot_types::{ traits::{node_implementation::NodeType, signature_key::SignatureKey}, utils::BuilderCommitment, + vid::VidCommitment, }; use serde::{Deserialize, Serialize}; use snafu::Snafu; @@ -103,7 +104,7 @@ impl BuilderClient { /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly pub async fn get_available_blocks( &self, - parent: BuilderCommitment, + parent: VidCommitment, sender: TYPES::SignatureKey, signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result>, BuilderClientError> { diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 8702dc8902..98c1980126 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -15,14 +15,15 @@ use hotshot_types::{ data::Leaf, event::{Event, EventType}, traits::{ - block_contents::{BlockHeader, BuilderFee}, + block_contents::BuilderFee, consensus_api::ConsensusApi, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::{BuilderSignatureKey, SignatureKey}, BlockPayload, }, - utils::BuilderCommitment, + utils::ViewInner, + vid::VidCommitment, }; use tracing::{debug, error, instrument}; use vbs::version::StaticVersionType; @@ -167,7 +168,7 @@ impl< } /// Get last known builder commitment from consensus. - async fn latest_known_builder_commitment(&self) -> BuilderCommitment { + async fn latest_known_vid_commitment(&self) -> VidCommitment { let consensus = self.consensus.read().await; let mut prev_view = TYPES::Time::new(self.cur_view.saturating_sub(1)); @@ -180,12 +181,12 @@ impl< .get(&prev_view) .and_then(|view| match view.view_inner { // For a view for which we have a Leaf stored - hotshot_types::utils::ViewInner::Leaf { leaf, .. } => consensus + ViewInner::DA { payload_commitment } => Some(payload_commitment), + ViewInner::Leaf { leaf, .. } => consensus .saved_leaves .get(&leaf) - .map(Leaf::get_block_header) - .map(BlockHeader::builder_commitment), // and return it's commitment - _ => None, + .map(Leaf::get_payload_commitment), + ViewInner::Failed => None, }) { return commitment; @@ -194,10 +195,7 @@ impl< } // If not found, return commitment for last decided block - consensus - .get_decided_leaf() - .get_block_header() - .builder_commitment() + consensus.get_decided_leaf().get_payload_commitment() } #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] @@ -205,7 +203,7 @@ impl< let task_start_time = Instant::now(); // Find commitment to the block we want to build upon - let parent_commitment = self.latest_known_builder_commitment().await; + let parent_commitment = self.latest_known_vid_commitment().await; let mut latest_block: Option> = None; let mut first_iteration = true; @@ -236,7 +234,7 @@ impl< let mut available_blocks = match self .builder_client .get_available_blocks( - parent_commitment.clone(), + parent_commitment, self.public_key.clone(), &request_signature, ) @@ -245,7 +243,7 @@ impl< Ok(blocks) => { tracing::debug!("Got available blocks: {:?}", blocks); blocks - }, + } Err(err) => { error!(%err, "Couldn't get available blocks"); // pause a bit diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 013e32dfbb..31e2ddc028 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -30,6 +30,7 @@ use hotshot_types::{ signature_key::BuilderSignatureKey, }, utils::BuilderCommitment, + vid::VidCommitment, }; use lru::LruCache; use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; @@ -199,7 +200,7 @@ impl ReadState for RandomBuilderSource { impl BuilderDataSource for RandomBuilderSource { async fn get_available_blocks( &self, - _for_parent: &BuilderCommitment, + _for_parent: &VidCommitment, _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result>, BuildError> { @@ -305,7 +306,7 @@ impl ReadState for SimpleBuilderSource { impl BuilderDataSource for SimpleBuilderSource { async fn get_available_blocks( &self, - _for_parent: &BuilderCommitment, + _for_parent: &VidCommitment, _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result>, BuildError> { diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index f9d29d7258..0e0759446f 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -24,7 +24,7 @@ async fn test_random_block_builder() { use hotshot_builder_api::block_info::AvailableBlockData; use hotshot_orchestrator::config::RandomBuilderConfig; - use hotshot_types::utils::BuilderCommitment; + use hotshot_types::traits::block_contents::vid_commitment; let port = portpicker::pick_unused_port().expect("Could not find an open port"); let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); @@ -43,7 +43,7 @@ async fn test_random_block_builder() { let mut blocks = loop { // Test getting blocks let blocks = client - .get_available_blocks(BuilderCommitment::from_bytes(&vec![]), pub_key, &signature) + .get_available_blocks(vid_commitment(&vec![], 1), pub_key, &signature) .await .expect("Failed to get available blocks"); diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 3af3f8c89a..6ee11e2e9c 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -12,7 +12,7 @@ use std::{ use committable::{Commitment, Committable}; use jf_primitives::vid::{precomputable::Precomputable, VidScheme}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use super::signature_key::BuilderSignatureKey; use crate::{ @@ -146,7 +146,7 @@ pub fn precompute_vid_commitment( /// do dispersal for the genesis block. For simplicity and performance, we use 1. pub const GENESIS_VID_NUM_STORAGE_NODES: usize = 1; -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] /// Information about builder fee for proposed block pub struct BuilderFee { /// Proposed fee amount From c46c7afaacfe463d875e99aa4882c698e458d78d Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 19 Apr 2024 19:07:48 +0200 Subject: [PATCH 0967/1393] Fix builder URL, part 2 (#3000) * Fix builder url 2.0 * Restore Cargo.lock * Revert use of spawn_blocking in builders --- testing/src/block_builder.rs | 19 ++++++++++--------- testing/tests/tests_1/block_builder.rs | 11 ++++++++++- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 31e2ddc028..01a908fa9b 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -166,7 +166,8 @@ where num_storage_nodes, pub_key.clone(), priv_key.clone(), - ); + ) + .await; if let Some((hash, _)) = blocks.write().await.push( metadata.block_hash.clone(), @@ -269,7 +270,7 @@ where ) .expect("Failed to construct the builder API"); let mut app: App, Error> = App::with_state(source); - app.register_module::("api", builder_api) + app.register_module::("block_info", builder_api) .expect("Failed to register the builder API"); async_spawn(app.serve(url, STATIC_VER_0_1)); @@ -334,7 +335,8 @@ impl BuilderDataSource for SimpleBuilderSource { self.num_storage_nodes, self.pub_key.clone(), self.priv_key.clone(), - ); + ) + .await; self.blocks.write().await.insert( metadata.block_hash.clone(), @@ -401,7 +403,7 @@ impl SimpleBuilderSource { >(&Options::default()) .expect("Failed to construct the builder API"); let mut app: App, Error> = App::with_state(self); - app.register_module::("api", builder_api) + app.register_module::("block_info", builder_api) .expect("Failed to register the builder API"); async_spawn(app.serve(url, STATIC_VER_0_1)); @@ -513,7 +515,7 @@ pub async fn make_simple_builder( } /// Helper function to construct all builder data structures from a list of transactions -fn build_block( +async fn build_block( transactions: Vec, num_storage_nodes: usize, pub_key: TYPES::BuilderSignatureKey, @@ -528,10 +530,9 @@ fn build_block( let commitment = block_payload.builder_commitment(&metadata); - let (vid_commitment, precompute_data) = precompute_vid_commitment( - &block_payload.encode().unwrap().collect(), - num_storage_nodes, - ); + let encoded_payload = block_payload.encode().unwrap().collect(); + let (vid_commitment, precompute_data) = + precompute_vid_commitment(&encoded_payload, num_storage_nodes); // Get block size from the encoded payload let block_size = block_payload diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 0e0759446f..c4cca251f6 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -29,7 +29,16 @@ async fn test_random_block_builder() { let port = portpicker::pick_unused_port().expect("Could not find an open port"); let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); - run_random_builder::(api_url.clone(), 1, RandomBuilderConfig::default()); + run_random_builder::( + api_url.clone(), + 1, + RandomBuilderConfig { + // Essentially removes delays so that builder doesn't slow + // down the test + blocks_per_second: u32::MAX, + ..Default::default() + }, + ); let builder_started = Instant::now(); let client: BuilderClient = BuilderClient::new(api_url); From b95a68da6880306fdea4af53cb3c069c564a8468 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 19 Apr 2024 19:21:33 +0200 Subject: [PATCH 0968/1393] Version bump (#3003) --- builder-api/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder-api/Cargo.toml b/builder-api/Cargo.toml index 7a83c79990..97d171b3be 100644 --- a/builder-api/Cargo.toml +++ b/builder-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hotshot-builder-api" -version = "0.1.6" +version = "0.1.7" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From bf4965ec233e6e01c6ba8886efbef56ff64e4e36 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 22 Apr 2024 09:10:17 -0400 Subject: [PATCH 0969/1393] Propose empty block (#3006) * propose empty block * improve logging * format and lint * PR changes * break instead of returning * only print log if we didn't get a latest block --- task-impls/src/transactions.rs | 69 ++++++++++++++++++++++++++++------ 1 file changed, 57 insertions(+), 12 deletions(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 98c1980126..edbcc20fd9 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -12,7 +12,7 @@ use hotshot_builder_api::block_info::{ use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, - data::Leaf, + data::{null_block, Leaf}, event::{Event, EventType}, traits::{ block_contents::BuilderFee, @@ -124,6 +124,7 @@ impl< debug!("Not next leader for view {:?}", self.cur_view); return None; } + let block_view = if make_block { view } else { view + 1 }; if let Some(BuilderResponses { block_data, @@ -132,7 +133,6 @@ impl< }) = self.wait_for_block().await { // send the sequenced transactions to VID and DA tasks - let block_view = if make_block { view } else { view + 1 }; let encoded_transactions = match block_data.block_payload.encode() { Ok(encoded) => encoded.into_iter().collect::>(), Err(e) => { @@ -154,7 +154,38 @@ impl< ) .await; } else { - error!("Failed to get a block from the builder"); + // If we couldn't get a block, send an empty block + error!( + "Failed to get a block for view {:?} proposing empty block", + view + ); + + // Calculate the builder fee for the empty block + let Some(builder_fee) = null_block::builder_fee(self.membership.total_nodes()) + else { + error!("Failed to get builder fee"); + return None; + }; + + // Create an empty block payload and metadata + let Ok((_, metadata)) = + ::BlockPayload::from_transactions(vec![]) + else { + error!("Failed to create empty block payload"); + return None; + }; + + // Broadcast the empty block + broadcast_event( + Arc::new(HotShotEvent::BlockRecv( + vec![], + metadata, + block_view, + builder_fee, + )), + &event_stream, + ) + .await; }; return None; @@ -231,25 +262,39 @@ impl< continue; }; - let mut available_blocks = match self - .builder_client - .get_available_blocks( + let mut available_blocks = match async_compatibility_layer::art::async_timeout( + self.api + .propose_max_round_time() + .saturating_sub(task_start_time.elapsed()), + self.builder_client.get_available_blocks( parent_commitment, self.public_key.clone(), &request_signature, - ) - .await + ), + ) + .await { - Ok(blocks) => { + // We got available blocks + Ok(Ok(blocks)) => { tracing::debug!("Got available blocks: {:?}", blocks); blocks } - Err(err) => { + + // We failed to get available blocks + Ok(Err(err)) => { error!(%err, "Couldn't get available blocks"); // pause a bit async_sleep(Duration::from_millis(100)).await; continue; } + + // We timed out while getting available blocks + Err(err) => { + if latest_block.is_none() { + error!(%err, "Timeout while getting available blocks"); + } + break; + } }; available_blocks.sort_by_key(|block_info| block_info.offered_fee); @@ -317,7 +362,7 @@ impl< block_data } Err(err) => { - error!(%err, "Failed to claim block"); + error!(%err, "Failed to claim block data"); continue; } }; @@ -357,7 +402,7 @@ impl< header_input } Err(err) => { - error!(%err, "Failed to claim block"); + error!(%err, "Failed to claim block header input"); continue; } }; From f91eca6d9523d1e47b1b6cca443ff5176fd37703 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 22 Apr 2024 09:27:30 -0400 Subject: [PATCH 0970/1393] [RESTARTABILITY] Send proper QC and view change events when starting consensus (#3004) * Send proper QC and view change events when starting consensus * Initialize consensus in anchor view, rather than always in genesis view --- hotshot/src/lib.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 6dfce57713..e041a06d57 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -139,6 +139,9 @@ pub struct SystemContext> { /// The network version version: Arc>, + /// The view to enter when first starting consensus + start_view: TYPES::Time, + // global_registry: GlobalRegistry, /// Access to the output event stream. pub output_event_stream: (Sender>, InactiveReceiver>), @@ -225,13 +228,11 @@ impl> SystemContext { saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns); } - let start_view = initializer.start_view; - let consensus = Consensus { instance_state, validated_state_map, vid_shares: BTreeMap::new(), - cur_view: start_view, + cur_view: anchored_leaf.get_view_number(), last_decided_view: anchored_leaf.get_view_number(), saved_leaves, saved_payloads, @@ -259,6 +260,7 @@ impl> SystemContext { private_key, config, version, + start_view: initializer.start_view, networks: Arc::new(networks), memberships: Arc::new(memberships), _metrics: consensus_metrics.clone(), @@ -276,15 +278,16 @@ impl> SystemContext { /// Panics if sending genesis fails pub async fn start_consensus(&self) { debug!("Starting Consensus"); + let consensus = self.consensus.read().await; self.internal_event_stream .0 - .broadcast_direct(Arc::new(HotShotEvent::ViewChange(TYPES::Time::new(0)))) + .broadcast_direct(Arc::new(HotShotEvent::ViewChange(self.start_view))) .await .expect("Genesis Broadcast failed"); self.internal_event_stream .0 .broadcast_direct(Arc::new(HotShotEvent::QCFormed(either::Left( - QuorumCertificate::genesis(), + consensus.high_qc.clone(), )))) .await .expect("Genesis Broadcast failed"); From c01b22632445f2a57f5218c69c53aaf762deaec2 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 22 Apr 2024 10:41:52 -0400 Subject: [PATCH 0971/1393] Update the Push CDN to 0.2.1 (#2997) * update the push CDN to 0.2.0 * lint * short param for key seed * fix argument name * update to 0.2.1 --- examples/combined/all.rs | 47 +++--- examples/push-cdn/all.rs | 43 +++--- examples/push-cdn/broker.rs | 97 ++++++------ examples/push-cdn/marshal.rs | 57 +++---- .../src/traits/networking/push_cdn_network.rs | 142 ++++++++++-------- 5 files changed, 210 insertions(+), 176 deletions(-) diff --git a/examples/combined/all.rs b/examples/combined/all.rs index d06f3522ce..55f20a570a 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -11,8 +11,8 @@ use async_compatibility_layer::{ art::async_spawn, logging::{setup_backtrace, setup_logging}, }; -use cdn_broker::{Broker, Config as BrokerConfig, ConfigBuilder as BrokerConfigBuilder}; -use cdn_marshal::{ConfigBuilder as MarshalConfigBuilder, Marshal}; +use cdn_broker::Broker; +use cdn_marshal::Marshal; use hotshot::{ traits::implementations::{KeyPair, TestingDef, WrappedSignatureKey}, types::SignatureKey, @@ -68,20 +68,22 @@ async fn main() { let private_address = format!("127.0.0.1:{private_port}"); let public_address = format!("127.0.0.1:{public_port}"); - let config: BrokerConfig::SignatureKey>> = - BrokerConfigBuilder::default() - .discovery_endpoint(discovery_endpoint.clone()) - .keypair(KeyPair { - public_key: WrappedSignatureKey(broker_public_key), - private_key: broker_private_key.clone(), - }) - .metrics_enabled(false) - .private_bind_address(private_address.clone()) - .public_bind_address(public_address.clone()) - .private_advertise_address(private_address) - .public_advertise_address(public_address) - .build() - .expect("failed to build broker config"); + let config: cdn_broker::Config> = cdn_broker::Config { + discovery_endpoint: discovery_endpoint.clone(), + public_advertise_endpoint: public_address.clone(), + public_bind_endpoint: public_address, + private_advertise_endpoint: private_address.clone(), + private_bind_endpoint: private_address, + + keypair: KeyPair { + public_key: WrappedSignatureKey(broker_public_key), + private_key: broker_private_key.clone(), + }, + + metrics_bind_endpoint: None, + ca_cert_path: None, + ca_key_path: None, + }; // Create and spawn the broker async_spawn(async move { @@ -102,12 +104,13 @@ async fn main() { .expect("CDN marshal address must be specified"); // Configure the marshal - let marshal_config = MarshalConfigBuilder::default() - .bind_address(marshal_endpoint.clone()) - .metrics_enabled(false) - .discovery_endpoint(discovery_endpoint) - .build() - .expect("failed to build marshal config"); + let marshal_config = cdn_marshal::Config { + bind_endpoint: marshal_endpoint.clone(), + discovery_endpoint, + metrics_bind_endpoint: None, + ca_cert_path: None, + ca_key_path: None, + }; // Spawn the marshal async_spawn(async move { diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 0a010941f4..137a99f41c 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -64,20 +64,22 @@ async fn main() { let private_address = format!("127.0.0.1:{private_port}"); let public_address = format!("127.0.0.1:{public_port}"); - let config: cdn_broker::Config::SignatureKey>> = - cdn_broker::ConfigBuilder::default() - .discovery_endpoint(discovery_endpoint.clone()) - .keypair(KeyPair { - public_key: WrappedSignatureKey(broker_public_key), - private_key: broker_private_key.clone(), - }) - .metrics_enabled(false) - .private_bind_address(private_address.clone()) - .public_bind_address(public_address.clone()) - .private_advertise_address(private_address) - .public_advertise_address(public_address) - .build() - .expect("failed to build broker config"); + let config: cdn_broker::Config> = cdn_broker::Config { + discovery_endpoint: discovery_endpoint.clone(), + public_advertise_endpoint: public_address.clone(), + public_bind_endpoint: public_address, + private_advertise_endpoint: private_address.clone(), + private_bind_endpoint: private_address, + + keypair: KeyPair { + public_key: WrappedSignatureKey(broker_public_key), + private_key: broker_private_key.clone(), + }, + + metrics_bind_endpoint: None, + ca_cert_path: None, + ca_key_path: None, + }; // Create and spawn the broker async_spawn(async move { @@ -96,12 +98,13 @@ async fn main() { // Configure the marshal let marshal_endpoint = format!("127.0.0.1:{marshal_port}"); - let marshal_config = cdn_marshal::ConfigBuilder::default() - .bind_address(marshal_endpoint.clone()) - .discovery_endpoint("test.sqlite".to_string()) - .metrics_enabled(false) - .build() - .expect("failed to build marshal config"); + let marshal_config = cdn_marshal::Config { + bind_endpoint: marshal_endpoint.clone(), + discovery_endpoint, + metrics_bind_endpoint: None, + ca_cert_path: None, + ca_key_path: None, + }; // Spawn the marshal async_spawn(async move { diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index 900e2697c5..2559b29357 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -1,12 +1,12 @@ //! The following is the main `Broker` binary, which just instantiates and runs //! a `Broker` object. - -use anyhow::{Context, Result}; -use cdn_broker::{Broker, Config, ConfigBuilder}; +use anyhow::Result; +use cdn_broker::{Broker, Config}; use clap::Parser; use hotshot::traits::implementations::{KeyPair, ProductionDef, WrappedSignatureKey}; use hotshot_example_types::node_types::TestTypes; -use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; +use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::traits::signature_key::SignatureKey; use sha2::Digest; #[derive(Parser, Debug)] @@ -19,74 +19,81 @@ struct Args { #[arg(short, long)] discovery_endpoint: String, - /// Whether or not metric collection and serving is enabled - #[arg(long, default_value_t = false)] - metrics_enabled: bool, - - /// The IP to bind to for externalizing metrics - #[arg(long, default_value = "127.0.0.1")] - metrics_ip: String, - - /// The port to bind to for externalizing metrics - #[arg(long, default_value_t = 9090)] - metrics_port: u16, - - /// The user-facing address to bind to for connections from users + /// The user-facing endpoint in `IP:port` form to bind to for connections from users #[arg(long, default_value = "0.0.0.0:1738")] - public_bind_address: String, + public_bind_endpoint: String, - /// The user-facing address to advertise + /// The user-facing endpoint in `IP:port` form to advertise #[arg(long, default_value = "local_ip:1738")] - public_advertise_address: String, + public_advertise_endpoint: String, - /// The broker-facing address to bind to for connections from + /// The broker-facing endpoint in `IP:port` form to bind to for connections from /// other brokers #[arg(long, default_value = "0.0.0.0:1739")] - private_bind_address: String, + private_bind_endpoint: String, - /// The broker-facing address to advertise + /// The broker-facing endpoint in `IP:port` form to advertise #[arg(long, default_value = "local_ip:1739")] - private_advertise_address: String, + private_advertise_endpoint: String, + + /// The endpoint to bind to for externalizing metrics (in `IP:port` form). If not provided, + /// metrics are not exposed. + #[arg(short, long)] + metrics_bind_endpoint: Option, + + /// The path to the CA certificate + /// If not provided, a local, pinned CA is used + #[arg(long)] + ca_cert_path: Option, + + /// The path to the CA key + /// If not provided, a local, pinned CA is used + #[arg(long)] + ca_key_path: Option, /// The seed for broker key generation - #[arg(long, default_value_t = 0)] + #[arg(short, long, default_value_t = 0)] key_seed: u64, } - -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "tokio", tokio::main)] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] async fn main() -> Result<()> { // Parse command line arguments let args = Args::parse(); // Initialize tracing - tracing_subscriber::fmt::init(); + if std::env::var("RUST_LOG_FORMAT") == Ok("json".to_string()) { + tracing_subscriber::fmt().json().init(); + } else { + tracing_subscriber::fmt().init(); + } // Generate the broker key from the supplied seed let key_hash = sha2::Sha256::digest(args.key_seed.to_le_bytes()); let (public_key, private_key) = ::SignatureKey::generated_from_seed_indexed(key_hash.into(), 1337); - let broker_config: Config::SignatureKey>> = - ConfigBuilder::default() - .public_bind_address(args.public_bind_address) - .public_advertise_address(args.public_advertise_address) - .private_bind_address(args.private_bind_address) - .private_advertise_address(args.private_advertise_address) - .metrics_enabled(args.metrics_enabled) - .metrics_ip(args.metrics_ip) - .discovery_endpoint(args.discovery_endpoint) - .metrics_port(args.metrics_port) - .keypair(KeyPair { - public_key: WrappedSignatureKey(public_key), - private_key, - }) - .build() - .with_context(|| "failed to build broker config")?; + // Create config + let broker_config: Config> = Config { + ca_cert_path: args.ca_cert_path, + ca_key_path: args.ca_key_path, + + discovery_endpoint: args.discovery_endpoint, + metrics_bind_endpoint: args.metrics_bind_endpoint, + keypair: KeyPair { + public_key: WrappedSignatureKey(public_key), + private_key, + }, + + public_bind_endpoint: args.public_bind_endpoint, + public_advertise_endpoint: args.public_advertise_endpoint, + private_bind_endpoint: args.private_bind_endpoint, + private_advertise_endpoint: args.private_advertise_endpoint, + }; // Create new `Broker` // Uses TCP from broker connections and Quic for user connections. - let broker = Broker::>::new(broker_config).await?; + let broker = Broker::new(broker_config).await?; // Start the main loop, consuming it broker.start().await?; diff --git a/examples/push-cdn/marshal.rs b/examples/push-cdn/marshal.rs index 42c0758549..b2288a9a49 100644 --- a/examples/push-cdn/marshal.rs +++ b/examples/push-cdn/marshal.rs @@ -1,38 +1,40 @@ //! The following is the main `Marshal` binary, which just instantiates and runs //! a `Marshal` object. -use anyhow::{Context, Result}; -use cdn_marshal::{ConfigBuilder, Marshal}; +use anyhow::Result; +use cdn_marshal::{Config, Marshal}; use clap::Parser; use hotshot::traits::implementations::ProductionDef; use hotshot_example_types::node_types::TestTypes; +// TODO: forall, add logging where we need it + #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] /// The main component of the push CDN. struct Args { /// The discovery client endpoint (including scheme) to connect to - /// With the local discovery feature, this is a file path. - /// With the remote (redis) discovery feature, this is a redis URL (e.g. `redis://127.0.0.1:6789`). - #[arg(short, long)] discovery_endpoint: String, - /// Whether or not metric collection and serving is enabled - #[arg(long, default_value_t = false)] - metrics_enabled: bool, - - /// The IP to bind to for externalizing metrics - #[arg(long, default_value = "127.0.0.1")] - metrics_ip: String, - - /// The port to bind to for externalizing metrics - #[arg(long, default_value_t = 9090)] - metrics_port: u16, - /// The port to bind to for connections (from users) #[arg(short, long, default_value_t = 1737)] bind_port: u16, + + /// The endpoint to bind to for externalizing metrics (in `IP:port` form). If not provided, + /// metrics are not exposed. + #[arg(short, long)] + metrics_bind_endpoint: Option, + + /// The path to the CA certificate + /// If not provided, a local, pinned CA is used + #[arg(long)] + ca_cert_path: Option, + + /// The path to the CA key + /// If not provided, a local, pinned CA is used + #[arg(long)] + ca_key_path: Option, } #[cfg_attr(async_executor_impl = "tokio", tokio::main)] @@ -42,17 +44,20 @@ async fn main() -> Result<()> { let args = Args::parse(); // Initialize tracing - tracing_subscriber::fmt::init(); + if std::env::var("RUST_LOG_FORMAT") == Ok("json".to_string()) { + tracing_subscriber::fmt().json().init(); + } else { + tracing_subscriber::fmt().init(); + } // Create a new `Config` - let config = ConfigBuilder::default() - .bind_address(format!("0.0.0.0:{}", args.bind_port)) - .metrics_enabled(args.metrics_enabled) - .metrics_ip(args.metrics_ip) - .metrics_port(args.metrics_port) - .discovery_endpoint(args.discovery_endpoint) - .build() - .with_context(|| "failed to build Marshal config")?; + let config = Config { + discovery_endpoint: args.discovery_endpoint, + bind_endpoint: format!("0.0.0.0:{}", args.bind_port), + metrics_bind_endpoint: args.metrics_bind_endpoint, + ca_cert_path: args.ca_cert_path, + ca_key_path: args.ca_key_path, + }; // Create new `Marshal` from the config let marshal = Marshal::>::new(config).await?; diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index ba96c081f1..03febebd81 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -10,12 +10,12 @@ use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; use bincode::config::Options; use cdn_broker::reexports::{ - connection::protocols::Tcp, - def::RunDef, + connection::{protocols::Tcp, NoMiddleware, TrustedMiddleware, UntrustedMiddleware}, + def::{ConnectionDef, RunDef}, discovery::{Embedded, Redis}, }; #[cfg(feature = "hotshot-testing")] -use cdn_broker::{Broker, Config, ConfigBuilder as BrokerConfigBuilder}; +use cdn_broker::{Broker, Config as BrokerConfig}; pub use cdn_client::reexports::crypto::signature::KeyPair; use cdn_client::{ reexports::{ @@ -23,10 +23,10 @@ use cdn_client::{ crypto::signature::{Serializable, SignatureScheme}, message::{Broadcast, Direct, Message as PushCdnMessage, Topic}, }, - Client, ConfigBuilder as ClientConfigBuilder, + Client, Config as ClientConfig, }; #[cfg(feature = "hotshot-testing")] -use cdn_marshal::{ConfigBuilder as MarshalConfigBuilder, Marshal}; +use cdn_marshal::{Config as MarshalConfig, Marshal}; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{ AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, @@ -94,38 +94,51 @@ impl Serializable for WrappedSignatureKey { } } -/// The testing run definition for the Push CDN. -/// Uses the real protocols, but with an embedded discovery client. -pub struct TestingDef { - /// Phantom data to hold the type - pd: PhantomData, +/// The production run definition for the Push CDN. +/// Uses the real protocols and a Redis discovery client. +pub struct ProductionDef(PhantomData); +impl RunDef for ProductionDef { + type User = UserDef; + type Broker = BrokerDef; + type DiscoveryClientType = Redis; } -impl RunDef for TestingDef { - type BrokerScheme = WrappedSignatureKey; - type BrokerProtocol = Tcp; - - type UserScheme = WrappedSignatureKey; - type UserProtocol = Quic; - - type DiscoveryClientType = Embedded; +/// The user definition for the Push CDN. +/// Uses the Quic protocol and untrusted middleware. +pub struct UserDef(PhantomData); +impl ConnectionDef for UserDef { + type Scheme = WrappedSignatureKey; + type Protocol = Quic; + type Middleware = UntrustedMiddleware; } -/// The production run definition for the Push CDN. -/// Uses the real protocols and a Redis discovery client. -pub struct ProductionDef { - /// Phantom data to hold the type - pd: PhantomData, +/// The broker definition for the Push CDN. +/// Uses the TCP protocol and trusted middleware. +pub struct BrokerDef(PhantomData); +impl ConnectionDef for BrokerDef { + type Scheme = WrappedSignatureKey; + type Protocol = Tcp; + type Middleware = TrustedMiddleware; } -impl RunDef for ProductionDef { - type BrokerScheme = WrappedSignatureKey; - type BrokerProtocol = Tcp; - - type UserScheme = WrappedSignatureKey; - type UserProtocol = Quic; +/// The client definition for the Push CDN. Uses the Quic +/// protocol and no middleware. Differs from the user +/// definition in that is on the client-side. +#[derive(Clone)] +pub struct ClientDef(PhantomData); +impl ConnectionDef for ClientDef { + type Scheme = WrappedSignatureKey; + type Protocol = Quic; + type Middleware = NoMiddleware; +} - type DiscoveryClientType = Redis; +/// The testing run definition for the Push CDN. +/// Uses the real protocols, but with an embedded discovery client. +pub struct TestingDef(PhantomData); +impl RunDef for TestingDef { + type User = UserDef; + type Broker = BrokerDef; + type DiscoveryClientType = Embedded; } /// A communication channel to the Push CDN, which is a collection of brokers and a marshal @@ -134,7 +147,7 @@ impl RunDef for ProductionDef { /// Is generic over both the type of key and the network protocol. pub struct PushCdnNetwork { /// The underlying client - client: Client, Quic>, + client: Client>, /// Whether or not the underlying network is supposed to be paused #[cfg(feature = "hotshot-testing")] is_paused: Arc, @@ -159,11 +172,12 @@ impl PushCdnNetwork { } // Build config - let config = ClientConfigBuilder::default() - .endpoint(marshal_endpoint) - .subscribed_topics(computed_topics) - .keypair(keypair) - .build()?; + let config = ClientConfig { + endpoint: marshal_endpoint, + subscribed_topics: computed_topics, + keypair, + use_local_authority: true, + }; // Create the client from the config let client = Client::new(config); @@ -258,20 +272,21 @@ impl TestableNetworkingImplementation for PushCdnNetwork let private_address = format!("127.0.0.1:{private_port}"); let public_address = format!("127.0.0.1:{public_port}"); - let config: Config> = - BrokerConfigBuilder::default() - .discovery_endpoint(discovery_endpoint.clone()) - .keypair(KeyPair { - public_key: WrappedSignatureKey(broker_public_key.clone()), - private_key: broker_private_key.clone(), - }) - .metrics_enabled(false) - .private_bind_address(private_address.clone()) - .public_bind_address(public_address.clone()) - .private_advertise_address(private_address) - .public_advertise_address(public_address) - .build() - .expect("failed to build broker config"); + // Configure the broker + let config: BrokerConfig> = BrokerConfig { + public_advertise_endpoint: public_address.clone(), + public_bind_endpoint: public_address, + private_advertise_endpoint: private_address.clone(), + private_bind_endpoint: private_address, + metrics_bind_endpoint: None, + keypair: KeyPair { + public_key: WrappedSignatureKey(broker_public_key.clone()), + private_key: broker_private_key.clone(), + }, + discovery_endpoint: discovery_endpoint.clone(), + ca_cert_path: None, + ca_key_path: None, + }; // Create and spawn the broker async_spawn(async move { @@ -290,12 +305,13 @@ impl TestableNetworkingImplementation for PushCdnNetwork // Configure the marshal let marshal_endpoint = format!("127.0.0.1:{marshal_port}"); - let marshal_config = MarshalConfigBuilder::default() - .bind_address(marshal_endpoint.clone()) - .metrics_enabled(false) - .discovery_endpoint(discovery_endpoint) - .build() - .expect("failed to build marshal config"); + let marshal_config = MarshalConfig { + bind_endpoint: marshal_endpoint.clone(), + discovery_endpoint, + metrics_bind_endpoint: None, + ca_cert_path: None, + ca_key_path: None, + }; // Spawn the marshal async_spawn(async move { @@ -329,15 +345,15 @@ impl TestableNetworkingImplementation for PushCdnNetwork }; // Configure our client - let client_config = ClientConfigBuilder::default() - .keypair(KeyPair { + let client_config: ClientConfig> = ClientConfig { + keypair: KeyPair { public_key: WrappedSignatureKey(public_key), private_key, - }) - .subscribed_topics(topics) - .endpoint(marshal_endpoint) - .build() - .expect("failed to build client config"); + }, + subscribed_topics: topics, + endpoint: marshal_endpoint, + use_local_authority: true, + }; // Create our client let client = Arc::new(PushCdnNetwork { From 4d376df0f7208ae243f19cf8c87f327e1119c1b0 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 22 Apr 2024 11:05:17 -0400 Subject: [PATCH 0972/1393] [CX_CLEANUP] - Decompose `QuorumProposalRecv` into module functions (#2988) * moving functions over * complete refactor * fix lint * fix build and lint * new method for proposing * move feature gate * tmp revert * fix test failures --- task-impls/src/consensus/mod.rs | 454 ++++++++------------------- task-impls/src/consensus/proposal.rs | 339 +++++++++++++++++++- task-impls/src/quorum_proposal.rs | 4 +- 3 files changed, 466 insertions(+), 331 deletions(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 9be045b921..60a24f5a5a 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -1,8 +1,23 @@ +use anyhow::Result; use std::{ collections::{BTreeMap, HashMap, HashSet}, sync::Arc, }; +use crate::{ + consensus::{ + proposal::{ + publish_proposal_if_able, validate_proposal_safety_and_liveness, + validate_proposal_view_and_certs, + }, + view_change::update_view, + }, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; use async_broadcast::Sender; use async_compatibility_layer::art::async_spawn; use async_lock::{RwLock, RwLockUpgradableReadGuard}; @@ -47,15 +62,6 @@ use { }, }; -use crate::{ - consensus::{proposal::validate_proposal, view_change::update_view}, - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, - }, -}; - /// Handles proposal-related functionality. pub(crate) mod proposal; @@ -333,6 +339,46 @@ impl, A: ConsensusApi + false } + #[cfg(feature = "dependency-tasks")] + async fn publish_proposal( + &mut self, + view: TYPES::Time, + event_stream: Sender>>, + ) -> Result<()> { + Ok(()) + } + + /// Publishes a proposal + #[cfg(not(feature = "dependency-tasks"))] + async fn publish_proposal( + &mut self, + view: TYPES::Time, + event_stream: Sender>>, + ) -> Result<()> { + let create_and_send_proposal_handle = publish_proposal_if_able( + self.cur_view, + view, + event_stream, + self.quorum_membership.clone(), + self.public_key.clone(), + self.private_key.clone(), + self.consensus.clone(), + self.round_start_delay, + self.formed_upgrade_certificate.clone(), + self.decided_upgrade_cert.clone(), + &mut self.payload_commitment_and_metadata, + &mut self.proposal_cert, + ) + .await?; + + self.spawned_tasks + .entry(view) + .or_default() + .push(create_and_send_proposal_handle); + + Ok(()) + } + /// Handles a consensus event received on the event stream #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] pub async fn handle( @@ -356,58 +402,19 @@ impl, A: ConsensusApi + )) .await; - let view = proposal.data.get_view_number(); - if view < self.cur_view { - debug!("Proposal is from an older view {:?}", proposal.data.clone()); + if let Err(e) = validate_proposal_view_and_certs( + proposal, + &sender, + self.cur_view, + &self.quorum_membership, + &self.timeout_membership, + ) { + warn!("Failed to validate proposal view and attached certs; error = {e:?}"); return; } + let view = proposal.data.get_view_number(); let view_leader_key = self.quorum_membership.get_leader(view); - if view_leader_key != sender { - warn!("Leader key does not match key in proposal"); - return; - } - - // Verify a timeout certificate OR a view sync certificate exists and is valid. - if proposal.data.justify_qc.get_view_number() != view - 1 { - if let Some(received_proposal_cert) = proposal.data.proposal_certificate.clone() - { - match received_proposal_cert { - ViewChangeEvidence::Timeout(timeout_cert) => { - if timeout_cert.get_data().view != view - 1 { - warn!("Timeout certificate for view {} was not for the immediately preceding view", *view); - return; - } - - if !timeout_cert.is_valid_cert(self.timeout_membership.as_ref()) { - warn!("Timeout certificate for view {} was invalid", *view); - return; - } - } - ViewChangeEvidence::ViewSync(view_sync_cert) => { - if view_sync_cert.view_number != view { - debug!( - "Cert view number {:?} does not match proposal view number {:?}", - view_sync_cert.view_number, view - ); - return; - } - - // View sync certs must also be valid. - if !view_sync_cert.is_valid_cert(self.quorum_membership.as_ref()) { - debug!("Invalid ViewSyncFinalize cert provided"); - return; - } - } - } - } else { - warn!( - "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", - *view); - return; - }; - } - let justify_qc = proposal.data.justify_qc.clone(); if !justify_qc.is_valid_cert(self.quorum_membership.as_ref()) { @@ -417,17 +424,6 @@ impl, A: ConsensusApi + return; } - // Validate the upgrade certificate -- this is just a signature validation. - // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. - if let Err(e) = UpgradeCertificate::validate( - &proposal.data.upgrade_certificate, - &self.quorum_membership, - ) { - warn!("{:?}", e); - - return; - } - // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here if let Err(e) = update_view::( self.public_key.clone(), @@ -569,14 +565,19 @@ impl, A: ConsensusApi + == self.public_key && high_qc.view_number == self.current_proposal.clone().unwrap().view_number; + let qc = high_qc.clone(); if should_propose { debug!( "Attempting to publish proposal after voting; now in view: {}", *new_view ); - self.publish_proposal_if_able(qc.view_number + 1, &event_stream) - .await; + if let Err(e) = self + .publish_proposal(qc.view_number + 1, event_stream.clone()) + .await + { + warn!("Failed to propose; error = {e:?}"); + }; } if self.vote_if_able(&event_stream).await { self.current_proposal = None; @@ -591,7 +592,7 @@ impl, A: ConsensusApi + .entry(proposal.data.get_view_number()) .or_default() .push(async_spawn( - validate_proposal( + validate_proposal_safety_and_liveness( proposal.clone(), parent_leaf, self.consensus.clone(), @@ -776,8 +777,12 @@ impl, A: ConsensusApi + "Attempting to publish proposal after voting; now in view: {}", *new_view ); - self.publish_proposal_if_able(qc.view_number + 1, &event_stream) - .await; + if let Err(e) = self + .publish_proposal(qc.view_number + 1, event_stream.clone()) + .await + { + warn!("Failed to propose; error = {e:?}"); + }; } if !self.vote_if_able(&event_stream).await { @@ -879,49 +884,57 @@ impl, A: ConsensusApi + } } HotShotEvent::QCFormed(cert) => { - debug!("QC Formed event happened!"); - - if let either::Right(qc) = cert.clone() { - self.proposal_cert = Some(ViewChangeEvidence::Timeout(qc.clone())); - // cancel poll for votes - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - *qc.view_number, - )) - .await; - - debug!( - "Attempting to publish proposal after forming a TC for view {}", - *qc.view_number - ); - - let view = qc.view_number + 1; + match cert { + either::Right(qc) => { + self.proposal_cert = Some(ViewChangeEvidence::Timeout(qc.clone())); + // cancel poll for votes + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *qc.view_number, + )) + .await; + + debug!( + "Attempting to publish proposal after forming a TC for view {}", + *qc.view_number + ); - self.publish_proposal_if_able(view, &event_stream).await; - } - if let either::Left(qc) = cert { - if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await { - warn!("Failed to store High QC of QC we formed. Error: {:?}", e); + if let Err(e) = self + .publish_proposal(qc.view_number + 1, event_stream) + .await + { + warn!("Failed to propose; error = {e:?}"); + }; } + either::Left(qc) => { + if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await + { + warn!("Failed to store High QC of QC we formed. Error: {:?}", e); + } - let mut consensus = self.consensus.write().await; - consensus.high_qc = qc.clone(); + let mut consensus = self.consensus.write().await; + consensus.high_qc = qc.clone(); - // cancel poll for votes - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - *qc.view_number, - )) - .await; + // cancel poll for votes + self.quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( + *qc.view_number, + )) + .await; - drop(consensus); - debug!( - "Attempting to publish proposal after forming a QC for view {}", - *qc.view_number - ); + drop(consensus); + debug!( + "Attempting to publish proposal after forming a QC for view {}", + *qc.view_number + ); - self.publish_proposal_if_able(qc.view_number + 1, &event_stream) - .await; + if let Err(e) = self + .publish_proposal(qc.view_number + 1, event_stream) + .await + { + warn!("Failed to propose; error = {e:?}"); + }; + } } } HotShotEvent::UpgradeCertificateFormed(cert) => { @@ -1145,7 +1158,9 @@ impl, A: ConsensusApi + if self.quorum_membership.get_leader(view) == self.public_key && self.consensus.read().await.high_qc.get_view_number() + 1 == view { - self.publish_proposal_if_able(view, &event_stream).await; + if let Err(e) = self.publish_proposal(view, event_stream.clone()).await { + warn!("Failed to propose; error = {e:?}"); + }; } if let Some(cert) = &self.proposal_cert { @@ -1154,14 +1169,18 @@ impl, A: ConsensusApi + if self.quorum_membership.get_leader(tc.get_view_number() + 1) == self.public_key { - self.publish_proposal_if_able(view, &event_stream).await; + if let Err(e) = self.publish_proposal(view, event_stream).await { + warn!("Failed to propose; error = {e:?}"); + }; } } ViewChangeEvidence::ViewSync(vsc) => { if self.quorum_membership.get_leader(vsc.get_view_number()) == self.public_key { - self.publish_proposal_if_able(view, &event_stream).await; + if let Err(e) = self.publish_proposal(view, event_stream).await { + warn!("Failed to propose; error = {e:?}"); + }; } } } @@ -1192,217 +1211,14 @@ impl, A: ConsensusApi + "Attempting to publish proposal after forming a View Sync Finalized Cert for view {}", *certificate.view_number ); - self.publish_proposal_if_able(view, &event_stream).await; + if let Err(e) = self.publish_proposal(view, event_stream).await { + warn!("Failed to propose; error = {e:?}"); + }; } } _ => {} } } - - /// Ignores old propose behavior and lets QuorumProposalTask take over. - #[cfg(feature = "dependency-tasks")] - pub async fn publish_proposal_if_able( - &mut self, - _view: TYPES::Time, - _event_stream: &Sender>>, - ) { - } - - /// Sends a proposal if possible from the high qc we have - #[allow(clippy::too_many_lines)] - #[cfg(not(feature = "dependency-tasks"))] - pub async fn publish_proposal_if_able( - &mut self, - view: TYPES::Time, - event_stream: &Sender>>, - ) { - use crate::consensus::proposal::create_and_send_proposal; - - if self.quorum_membership.get_leader(view) != self.public_key { - // This is expected for view 1, so skipping the logging. - if view != TYPES::Time::new(1) { - error!( - "Somehow we formed a QC but are not the leader for the next view {:?}", - view - ); - } - return; - } - - let consensus = self.consensus.read().await; - let parent_view_number = &consensus.high_qc.get_view_number(); - let mut reached_decided = false; - - let Some(parent_view) = consensus.validated_state_map.get(parent_view_number) else { - // This should have been added by the replica? - error!("Couldn't find parent view in state map, waiting for replica to see proposal\n parent view number: {}", **parent_view_number); - return; - }; - // Leaf hash in view inner does not match high qc hash - Why? - let Some((leaf_commitment, state)) = parent_view.get_leaf_and_state() else { - error!( - ?parent_view_number, - ?parent_view, - "Parent of high QC points to a view without a proposal" - ); - return; - }; - if leaf_commitment != consensus.high_qc.get_data().leaf_commit { - // NOTE: This happens on the genesis block - debug!( - "They don't equal: {:?} {:?}", - leaf_commitment, - consensus.high_qc.get_data().leaf_commit - ); - } - let Some(leaf) = consensus.saved_leaves.get(&leaf_commitment) else { - error!("Failed to find high QC of parent."); - return; - }; - if leaf.get_view_number() == consensus.last_decided_view { - reached_decided = true; - } - - let parent_leaf = leaf.clone(); - - let original_parent_hash = parent_leaf.commit(); - - let mut next_parent_hash = original_parent_hash; - - // Walk back until we find a decide - if !reached_decided { - debug!("We have not reached decide from view {:?}", self.cur_view); - while let Some(next_parent_leaf) = consensus.saved_leaves.get(&next_parent_hash) { - if next_parent_leaf.get_view_number() <= consensus.last_decided_view { - break; - } - next_parent_hash = next_parent_leaf.get_parent_commitment(); - } - debug!("updated saved leaves"); - // TODO do some sort of sanity check on the view number that it matches decided - } - - // Special case: if we have a decided upgrade certificate AND it does not apply a version to the current view, we MUST propose with a null block. - if let Some(upgrade_cert) = &self.decided_upgrade_cert { - if upgrade_cert.in_interim(self.cur_view) { - let Ok((payload, metadata)) = - ::from_transactions(Vec::new()) - else { - error!("Failed to build null block payload and metadata"); - return; - }; - - let builder_commitment = payload.builder_commitment(&metadata); - let Some(null_block_commitment) = - null_block::commitment(self.quorum_membership.total_nodes()) - else { - // This should never happen. - error!("Failed to calculate null block commitment"); - return; - }; - - let Some(null_block_fee) = - null_block::builder_fee::(self.quorum_membership.total_nodes()) - else { - // This should never happen. - error!("Failed to calculate null block fee info"); - return; - }; - - let pub_key = self.public_key.clone(); - let priv_key = self.private_key.clone(); - let consensus = self.consensus.clone(); - let sender = event_stream.clone(); - let delay = self.round_start_delay; - let parent = parent_leaf.clone(); - let state = state.clone(); - let upgrade_cert = self.decided_upgrade_cert.clone(); - self.spawned_tasks - .entry(view) - .or_default() - .push(async_spawn(async move { - create_and_send_proposal( - pub_key, - priv_key, - consensus, - sender, - view, - CommitmentAndMetadata { - commitment: null_block_commitment, - builder_commitment, - metadata, - fee: null_block_fee, - }, - parent, - state, - upgrade_cert, - None, - delay, - ) - .await; - })); - return; - } - } - - if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { - // In order of priority, we should try to attach: - // - the parent certificate if it exists, or - // - our own certificate that we formed. - // In either case, we need to ensure that the certificate is still relevant. - // - // Note: once we reach a point of potentially propose with our formed upgrade certificate, we will ALWAYS drop it. If we cannot immediately use it for whatever reason, we choose to discard it. - // It is possible that multiple nodes form separate upgrade certificates for the some upgrade if we are not careful about voting. But this shouldn't bother us: the first leader to propose is the one whose certificate will be used. And if that fails to reach a decide for whatever reason, we may lose our own certificate, but something will likely have gone wrong there anyway. - let formed_upgrade_certificate = self.formed_upgrade_certificate.take(); - let mut proposal_upgrade_certificate = parent_leaf - .get_upgrade_certificate() - .or(formed_upgrade_certificate); - - if !proposal_upgrade_certificate.clone().is_some_and(|cert| { - cert.is_relevant(view, self.decided_upgrade_cert.clone()) - .is_ok() - }) { - proposal_upgrade_certificate = None; - } - - // We only want to proposal to be attached if any of them are valid. - let proposal_certificate = self - .proposal_cert - .as_ref() - .filter(|cert| cert.is_valid_for_view(&view)) - .cloned(); - let pub_key = self.public_key.clone(); - let priv_key = self.private_key.clone(); - let consensus = self.consensus.clone(); - let sender = event_stream.clone(); - let state = state.clone(); - let delay = self.round_start_delay; - let commitment_and_metadata = commit_and_metadata.clone(); - self.spawned_tasks - .entry(view) - .or_default() - .push(async_spawn(async move { - create_and_send_proposal( - pub_key, - priv_key, - consensus, - sender, - view, - commitment_and_metadata, - parent_leaf.clone(), - state, - proposal_upgrade_certificate, - proposal_certificate, - delay, - ) - .await; - })); - - self.proposal_cert = None; - self.payload_commitment_and_metadata = None; - } - debug!("Cannot propose because we don't have the VID payload commitment and metadata"); - } } impl, A: ConsensusApi + 'static> TaskState diff --git a/task-impls/src/consensus/proposal.rs b/task-impls/src/consensus/proposal.rs index bb39197c44..26e604af77 100644 --- a/task-impls/src/consensus/proposal.rs +++ b/task-impls/src/consensus/proposal.rs @@ -1,35 +1,39 @@ use core::time::Duration; -#[cfg(not(feature = "dependency-tasks"))] use std::marker::PhantomData; use std::sync::Arc; use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; -use async_compatibility_layer::art::async_sleep; +use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; use committable::Committable; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus, View}, - data::{Leaf, QuorumProposal, ViewChangeEvidence}, + data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType}, message::Proposal, simple_certificate::UpgradeCertificate, traits::{ - block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, - states::ValidatedState, storage::Storage, + block_contents::BlockHeader, election::Membership, node_implementation::NodeType, + signature_key::SignatureKey, states::ValidatedState, storage::Storage, BlockPayload, }, utils::{Terminator, ViewInner}, - vote::HasViewNumber, + vote::{Certificate, HasViewNumber}, }; use tracing::{debug, error, warn}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; + use crate::{events::HotShotEvent, helpers::broadcast_event}; /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. #[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_lines)] -pub async fn validate_proposal( +pub async fn validate_proposal_safety_and_liveness( proposal: Proposal>, parent_leaf: Leaf, consensus: Arc>>, @@ -170,9 +174,8 @@ pub async fn validate_proposal( /// Create the header for a proposal, build the proposal, and broadcast /// the proposal send evnet. #[allow(clippy::too_many_arguments)] -#[cfg(not(feature = "dependency-tasks"))] pub async fn create_and_send_proposal( - pub_key: TYPES::SignatureKey, + public_key: TYPES::SignatureKey, private_key: ::PrivateKey, consensus: Arc>>, event_stream: Sender>>, @@ -226,8 +229,324 @@ pub async fn create_and_send_proposal( async_sleep(Duration::from_millis(round_start_delay)).await; broadcast_event( - Arc::new(HotShotEvent::QuorumProposalSend(message.clone(), pub_key)), + Arc::new(HotShotEvent::QuorumProposalSend( + message.clone(), + public_key, + )), &event_stream, ) .await; } + +/// Validates, from a given `proposal` that the view that it is being submitted for is valid when +/// compared to `cur_view` which is the highest proposed view (so far) for the caller. If the proposal +/// is for a view that's later than expected, that the proposal includes a timeout or view sync certificate. +pub fn validate_proposal_view_and_certs( + proposal: &Proposal>, + sender: &TYPES::SignatureKey, + cur_view: TYPES::Time, + quorum_membership: &Arc, + timeout_membership: &Arc, +) -> Result<()> { + let view = proposal.data.get_view_number(); + ensure!( + view >= cur_view, + "Proposal is from an older view {:?}", + proposal.data.clone() + ); + + let view_leader_key = quorum_membership.get_leader(view); + ensure!( + view_leader_key == *sender, + "Leader key does not match key in proposal" + ); + + // Verify a timeout certificate OR a view sync certificate exists and is valid. + if proposal.data.justify_qc.get_view_number() != view - 1 { + let received_proposal_cert = + proposal.data.proposal_certificate.clone().context(format!( +"Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", + *view + ))?; + + match received_proposal_cert { + ViewChangeEvidence::Timeout(timeout_cert) => { + ensure!( + timeout_cert.get_data().view == view - 1, + "Timeout certificate for view {} was not for the immediately preceding view", + *view + ); + ensure!( + timeout_cert.is_valid_cert(timeout_membership.as_ref()), + "Timeout certificate for view {} was invalid", + *view + ); + } + ViewChangeEvidence::ViewSync(view_sync_cert) => { + ensure!( + view_sync_cert.view_number == view, + "View sync cert view number {:?} does not match proposal view number {:?}", + view_sync_cert.view_number, + view + ); + + // View sync certs must also be valid. + ensure!( + view_sync_cert.is_valid_cert(quorum_membership.as_ref()), + "Invalid view sync finalize cert provided" + ); + } + } + } + + // Validate the upgrade certificate -- this is just a signature validation. + // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. + UpgradeCertificate::validate(&proposal.data.upgrade_certificate, quorum_membership)?; + + Ok(()) +} + +/// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. +pub async fn get_parent_leaf_and_state( + cur_view: TYPES::Time, + view: TYPES::Time, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + consensus: Arc>>, +) -> Result<(Leaf, Arc<::ValidatedState>)> { + ensure!( + quorum_membership.get_leader(view) == public_key, + "Somehow we formed a QC but are not the leader for the next view {view:?}", + ); + + let consensus = consensus.read().await; + let parent_view_number = &consensus.high_qc.get_view_number(); + let parent_view = consensus.validated_state_map.get(parent_view_number).context( + format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", **parent_view_number) + )?; + + // Leaf hash in view inner does not match high qc hash - Why? + let (leaf_commitment, state) = parent_view.get_leaf_and_state().context( + format!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") + )?; + + if leaf_commitment != consensus.high_qc.get_data().leaf_commit { + // NOTE: This happens on the genesis block + debug!( + "They don't equal: {:?} {:?}", + leaf_commitment, + consensus.high_qc.get_data().leaf_commit + ); + } + + let leaf = consensus + .saved_leaves + .get(&leaf_commitment) + .context("Failed to find high QC of parent")?; + + let reached_decided = leaf.get_view_number() == consensus.last_decided_view; + let parent_leaf = leaf.clone(); + let original_parent_hash = parent_leaf.commit(); + let mut next_parent_hash = original_parent_hash; + + // Walk back until we find a decide + if !reached_decided { + debug!("We have not reached decide from view {:?}", cur_view); + while let Some(next_parent_leaf) = consensus.saved_leaves.get(&next_parent_hash) { + if next_parent_leaf.get_view_number() <= consensus.last_decided_view { + break; + } + next_parent_hash = next_parent_leaf.get_parent_commitment(); + } + debug!("updated saved leaves"); + // TODO do some sort of sanity check on the view number that it matches decided + } + + Ok((parent_leaf, state.clone())) +} + +/// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is a special +/// case proposal scenario. +#[allow(clippy::too_many_lines)] +#[allow(clippy::too_many_arguments)] +async fn publish_proposal_from_upgrade_cert( + cur_view: TYPES::Time, + view: TYPES::Time, + sender: Sender>>, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + consensus: Arc>>, + upgrade_cert: UpgradeCertificate, + delay: u64, +) -> Result> { + let (parent_leaf, state) = get_parent_leaf_and_state( + cur_view, + view, + quorum_membership.clone(), + public_key.clone(), + consensus.clone(), + ) + .await?; + + // Special case: if we have a decided upgrade certificate AND it does not apply a version to the current view, we MUST propose with a null block. + ensure!(upgrade_cert.in_interim(cur_view), "Cert is not in interim"); + let (payload, metadata) = ::from_transactions(Vec::new()) + .context("Failed to build null block payload and metadata")?; + + let builder_commitment = payload.builder_commitment(&metadata); + let null_block_commitment = null_block::commitment(quorum_membership.total_nodes()) + .context("Failed to calculate null block commitment")?; + let null_block_fee = null_block::builder_fee::(quorum_membership.total_nodes()) + .context("Failed to calculate null block fee info")?; + + Ok(async_spawn(async move { + create_and_send_proposal( + public_key, + private_key, + consensus, + sender, + view, + CommitmentAndMetadata { + commitment: null_block_commitment, + builder_commitment, + metadata, + fee: null_block_fee, + }, + parent_leaf, + state, + Some(upgrade_cert), + None, + delay, + ) + .await; + })) +} + +/// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is the +/// standard case proposal scenario. +#[allow(clippy::too_many_arguments)] +async fn publish_proposal_from_commitment_and_metadata( + cur_view: TYPES::Time, + view: TYPES::Time, + sender: Sender>>, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + consensus: Arc>>, + delay: u64, + formed_upgrade_certificate: Option>, + decided_upgrade_cert: Option>, + commitment_and_metadata: &mut Option>, + proposal_cert: &mut Option>, +) -> Result> { + let (parent_leaf, state) = get_parent_leaf_and_state( + cur_view, + view, + quorum_membership, + public_key.clone(), + consensus.clone(), + ) + .await?; + + // In order of priority, we should try to attach: + // - the parent certificate if it exists, or + // - our own certificate that we formed. + // In either case, we need to ensure that the certificate is still relevant. + // + // Note: once we reach a point of potentially propose with our formed upgrade certificate, we will ALWAYS drop it. If we cannot immediately use it for whatever reason, we choose to discard it. + // It is possible that multiple nodes form separate upgrade certificates for the some upgrade if we are not careful about voting. But this shouldn't bother us: the first leader to propose is the one whose certificate will be used. And if that fails to reach a decide for whatever reason, we may lose our own certificate, but something will likely have gone wrong there anyway. + let mut proposal_upgrade_certificate = parent_leaf + .get_upgrade_certificate() + .or(formed_upgrade_certificate); + + if !proposal_upgrade_certificate + .clone() + .is_some_and(|cert| cert.is_relevant(view, decided_upgrade_cert).is_ok()) + { + proposal_upgrade_certificate = None; + } + + // We only want to proposal to be attached if any of them are valid. + let proposal_certificate = proposal_cert + .as_ref() + .filter(|cert| cert.is_valid_for_view(&view)) + .cloned(); + + // FIXME - This is not great, and will be fixed later. + // If it's > July, 2024 and this is still here, something has gone horribly wrong. + let cnm = commitment_and_metadata + .clone() + .context("Cannot propose because we don't have the VID payload commitment and metadata")?; + + let create_and_send_proposal_handle = async_spawn(async move { + create_and_send_proposal( + public_key, + private_key, + consensus, + sender, + view, + cnm, + parent_leaf.clone(), + state, + proposal_upgrade_certificate, + proposal_certificate, + delay, + ) + .await; + }); + + *proposal_cert = None; + *commitment_and_metadata = None; + + Ok(create_and_send_proposal_handle) +} + +/// Publishes a proposal if there exists a value which we can propose from. Specifically, we must have either +/// `commitment_and_metadata`, or a `decided_upgrade_cert`. +#[allow(clippy::too_many_arguments)] +pub async fn publish_proposal_if_able( + cur_view: TYPES::Time, + view: TYPES::Time, + sender: Sender>>, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + consensus: Arc>>, + delay: u64, + formed_upgrade_certificate: Option>, + decided_upgrade_cert: Option>, + commitment_and_metadata: &mut Option>, + proposal_cert: &mut Option>, +) -> Result> { + if let Some(upgrade_cert) = decided_upgrade_cert { + publish_proposal_from_upgrade_cert( + cur_view, + view, + sender, + quorum_membership, + public_key, + private_key, + consensus, + upgrade_cert, + delay, + ) + .await + } else { + publish_proposal_from_commitment_and_metadata( + cur_view, + view, + sender, + quorum_membership, + public_key, + private_key, + consensus, + delay, + formed_upgrade_certificate, + decided_upgrade_cert, + commitment_and_metadata, + proposal_cert, + ) + .await + } +} diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 770987f004..2a800c4757 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -35,7 +35,7 @@ use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use crate::{ - consensus::proposal::validate_proposal, + consensus::proposal::validate_proposal_safety_and_liveness, events::HotShotEvent, helpers::{broadcast_event, cancel_task, AnyhowTracing}, }; @@ -822,7 +822,7 @@ impl> QuorumProposalTaskState Date: Tue, 23 Apr 2024 14:32:16 +0200 Subject: [PATCH 0973/1393] avoid collect/copy of payload after encode (#2964) * avoid collect/copy in of payload after encode * fixing fix lint more fix lint reintroduce length in panic msg final lint remove again * fix conflicts * encode into an Arc<[u8]> * fix lint * remove unnecessary clone * use Arc> instead of Arc<[u8]> * minimal use of Arc and Arc> to Arc<[u8]> * fix lint * fix to proper Arc::clone format * revert cargo lock * Update crates/example-types/src/block_types.rs Co-authored-by: Gus Gutoski * fix after rebase to main --------- Co-authored-by: Gus Gutoski --- example-types/src/block_types.rs | 22 +++++++++------------- hotshot/src/lib.rs | 8 +++----- task-impls/src/consensus/mod.rs | 2 +- task-impls/src/da.rs | 2 +- task-impls/src/events.rs | 4 +++- task-impls/src/helpers.rs | 4 ++-- task-impls/src/transactions.rs | 5 +++-- task-impls/src/vid.rs | 6 ++---- testing/src/block_builder.rs | 6 ++---- testing/src/task_helpers.rs | 12 ++++++------ testing/src/view_generator.rs | 6 +++--- testing/tests/tests_1/block_builder.rs | 2 +- testing/tests/tests_1/da_task.rs | 10 ++++++---- testing/tests/tests_1/vid_task.rs | 6 +++--- types/src/consensus.rs | 2 +- types/src/data.rs | 16 ++++++---------- types/src/traits/block_contents.rs | 24 +++++++++--------------- 17 files changed, 61 insertions(+), 76 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 22024035b5..6c35a40cef 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -1,6 +1,7 @@ use std::{ fmt::{Debug, Display}, mem::size_of, + sync::Arc, }; use committable::{Commitment, Committable, RawCommitmentBuilder}; @@ -29,7 +30,7 @@ impl TestTransaction { /// /// # Errors /// If the transaction length conversion fails. - pub fn encode(transactions: Vec) -> Result, BlockError> { + pub fn encode(transactions: &[Self]) -> Result, BlockError> { let mut encoded = Vec::new(); for txn in transactions { @@ -44,7 +45,7 @@ impl TestTransaction { // Concatenate the bytes of the transaction size and the transaction itself. encoded.extend(txn_size); - encoded.extend(txn.0); + encoded.extend(&txn.0); } Ok(encoded) @@ -107,7 +108,6 @@ impl BlockPayload for TestBlockPayload { type Error = BlockError; type Transaction = TestTransaction; type Metadata = (); - type Encode<'a> = as IntoIterator>::IntoIter; fn from_transactions( transactions: impl IntoIterator, @@ -121,24 +121,20 @@ impl BlockPayload for TestBlockPayload { )) } - fn from_bytes(encoded_transactions: E, _metadata: &Self::Metadata) -> Self - where - E: Iterator, - { - let encoded_vec: Vec = encoded_transactions.collect(); + fn from_bytes(encoded_transactions: &[u8], _metadata: &Self::Metadata) -> Self { let mut transactions = Vec::new(); let mut current_index = 0; - while current_index < encoded_vec.len() { + while current_index < encoded_transactions.len() { // Decode the transaction length. let txn_start_index = current_index + size_of::(); let mut txn_len_bytes = [0; size_of::()]; - txn_len_bytes.copy_from_slice(&encoded_vec[current_index..txn_start_index]); + txn_len_bytes.copy_from_slice(&encoded_transactions[current_index..txn_start_index]); let txn_len: usize = u32::from_le_bytes(txn_len_bytes) as usize; // Get the transaction. let next_index = txn_start_index + txn_len; transactions.push(TestTransaction( - encoded_vec[txn_start_index..next_index].to_vec(), + encoded_transactions[txn_start_index..next_index].to_vec(), )); current_index = next_index; } @@ -150,8 +146,8 @@ impl BlockPayload for TestBlockPayload { (Self::genesis(), ()) } - fn encode(&self) -> Result, Self::Error> { - Ok(TestTransaction::encode(self.transactions.clone())?.into_iter()) + fn encode(&self) -> Result, Self::Error> { + TestTransaction::encode(&self.transactions).map(Arc::from) } fn builder_commitment(&self, _metadata: &Self::Metadata) -> BuilderCommitment { diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e041a06d57..5d6aaa39a4 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -216,16 +216,14 @@ impl> SystemContext { saved_leaves.insert(leaf.commit(), leaf.clone()); } if let Some(payload) = anchored_leaf.get_block_payload() { - let encoded_txns: Vec = match payload.encode() { - // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. - // - Ok(encoded) => encoded.into_iter().collect(), + let encoded_txns = match payload.encode() { + Ok(encoded) => encoded, Err(e) => { return Err(HotShotError::BlockError { source: e }); } }; - saved_payloads.insert(anchored_leaf.get_view_number(), encoded_txns); + saved_payloads.insert(anchored_leaf.get_view_number(), Arc::clone(&encoded_txns)); } let consensus = Consensus { diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 60a24f5a5a..df3e194f05 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -673,7 +673,7 @@ impl, A: ConsensusApi + consensus.saved_payloads.get(&leaf.get_view_number()) { let payload = BlockPayload::from_bytes( - encoded_txns.clone().into_iter(), + encoded_txns, leaf.get_block_header().metadata(), ); diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 4c47f2617d..bb0b0c232e 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -214,7 +214,7 @@ impl, A: ConsensusApi + // Record the payload we have promised to make available. consensus .saved_payloads - .insert(view, proposal.data.encoded_transactions.clone()); + .insert(view, Arc::clone(&proposal.data.encoded_transactions)); } HotShotEvent::DAVoteRecv(ref vote) => { debug!("DA vote recv, Main Task {:?}", vote.get_view_number()); diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index d5aa57015e..46f1d75a06 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use either::Either; use hotshot_types::{ consensus::ProposalDependencyData, @@ -115,7 +117,7 @@ pub enum HotShotEvent { ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number BlockRecv( - Vec, + Arc<[u8]>, ::Metadata, TYPES::Time, BuilderFee, diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 37cb9932a7..cc6d8e07dd 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -44,7 +44,7 @@ pub async fn broadcast_event(event: E, sender: &Send /// Panics if the VID calculation fails, this should not happen. #[allow(clippy::panic)] pub async fn calculate_vid_disperse( - txns: Vec, + txns: Arc<[u8]>, membership: &Arc, view: TYPES::Time, ) -> VidDisperse { @@ -66,7 +66,7 @@ pub async fn calculate_vid_disperse( /// Panics if the VID calculation fails, this should not happen. #[allow(clippy::panic)] pub async fn calculate_vid_disperse_using_precompute_data( - txns: Vec, + txns: Arc<[u8]>, membership: &Arc, view: TYPES::Time, pre_compute_data: VidPrecomputeData, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index edbcc20fd9..2a76873e03 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -134,12 +134,13 @@ impl< { // send the sequenced transactions to VID and DA tasks let encoded_transactions = match block_data.block_payload.encode() { - Ok(encoded) => encoded.into_iter().collect::>(), + Ok(encoded) => encoded, Err(e) => { error!("Failed to encode the block payload: {:?}.", e); return None; } }; + broadcast_event( Arc::new(HotShotEvent::BlockRecv( encoded_transactions, @@ -178,7 +179,7 @@ impl< // Broadcast the empty block broadcast_event( Arc::new(HotShotEvent::BlockRecv( - vec![], + vec![].into(), metadata, block_view, builder_fee, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index e77c118c87..b0fc7899a1 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -62,10 +62,8 @@ impl, A: ConsensusApi + ) -> Option { match event.as_ref() { HotShotEvent::BlockRecv(encoded_transactions, metadata, view_number, fee) => { - let payload = ::BlockPayload::from_bytes( - encoded_transactions.clone().into_iter(), - metadata, - ); + let payload = + ::BlockPayload::from_bytes(encoded_transactions, metadata); let builder_commitment = payload.builder_commitment(metadata); let vid_disperse = calculate_vid_disperse( encoded_transactions.clone(), diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 01a908fa9b..57f665d508 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -453,7 +453,7 @@ impl BuilderTask for SimpleBuilderTask { } EventType::DAProposal { proposal, .. } => { let payload = TYPES::BlockPayload::from_bytes( - proposal.data.encoded_transactions.into_iter(), + &proposal.data.encoded_transactions, &proposal.data.metadata, ); let now = Instant::now(); @@ -530,15 +530,13 @@ async fn build_block( let commitment = block_payload.builder_commitment(&metadata); - let encoded_payload = block_payload.encode().unwrap().collect(); let (vid_commitment, precompute_data) = - precompute_vid_commitment(&encoded_payload, num_storage_nodes); + precompute_vid_commitment(&block_payload.encode().unwrap(), num_storage_nodes); // Get block size from the encoded payload let block_size = block_payload .encode() .expect("failed to encode block") - .collect::>() .len() as u64; let signature_over_block_info = diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index d306d13963..8240203f12 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -235,8 +235,8 @@ pub fn vid_payload_commitment( transactions: Vec, ) -> VidCommitment { let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); - let vid_disperse = vid.disperse(encoded_transactions).unwrap(); + let encoded_transactions = TestTransaction::encode(&transactions).unwrap(); + let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); vid_disperse.commit } @@ -245,7 +245,7 @@ pub fn da_payload_commitment( quorum_membership: &::Membership, transactions: Vec, ) -> VidCommitment { - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let encoded_transactions = TestTransaction::encode(&transactions).unwrap(); vid_commitment(&encoded_transactions, quorum_membership.total_nodes()) } @@ -258,11 +258,11 @@ pub fn build_vid_proposal( private_key: &::PrivateKey, ) -> Vec>> { let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let encoded_transactions = TestTransaction::encode(&transactions).unwrap(); let vid_disperse = VidDisperse::from_membership( view_number, - vid.disperse(encoded_transactions).unwrap(), + vid.disperse(&encoded_transactions).unwrap(), quorum_membership, ); @@ -283,7 +283,7 @@ pub fn build_da_certificate( public_key: &::SignatureKey, private_key: &::PrivateKey, ) -> DACertificate { - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let encoded_transactions = TestTransaction::encode(&transactions).unwrap(); let da_payload_commitment = vid_commitment(&encoded_transactions, quorum_membership.total_nodes()); diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index f0e12d58a3..28ba13811c 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -1,4 +1,4 @@ -use std::{cmp::max, marker::PhantomData}; +use std::{cmp::max, marker::PhantomData, sync::Arc}; use committable::Committable; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; @@ -96,7 +96,7 @@ impl TestView { proposal_certificate: None, }; - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); let encoded_transactions_hash = Sha256::digest(&encoded_transactions); let block_payload_signature = ::SignatureKey::sign(&private_key, &encoded_transactions_hash) @@ -298,7 +298,7 @@ impl TestView { _pd: PhantomData, }; - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let encoded_transactions = Arc::from(TestTransaction::encode(transactions).unwrap()); let encoded_transactions_hash = Sha256::digest(&encoded_transactions); let block_payload_signature = ::SignatureKey::sign(&private_key, &encoded_transactions_hash) diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index c4cca251f6..e67c26299a 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -52,7 +52,7 @@ async fn test_random_block_builder() { let mut blocks = loop { // Test getting blocks let blocks = client - .get_available_blocks(vid_commitment(&vec![], 1), pub_key, &signature) + .get_available_blocks(vid_commitment(&[], 1), pub_key, &signature) .await .expect("Failed to get available blocks"); diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 026f3e424e..07dfb0f269 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::TestTransaction, @@ -30,7 +32,7 @@ async fn test_da_task() { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. let transactions = vec![TestTransaction(vec![0])]; - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); let payload_commit = vid_commitment( &encoded_transactions, handle.hotshot.memberships.quorum_membership.total_nodes(), @@ -68,7 +70,7 @@ async fn test_da_task() { ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), BlockRecv( - encoded_transactions.clone(), + encoded_transactions, (), ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), @@ -109,7 +111,7 @@ async fn test_da_task_storage_failure() { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. let transactions = vec![TestTransaction(vec![0])]; - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); let payload_commit = vid_commitment( &encoded_transactions, handle.hotshot.memberships.quorum_membership.total_nodes(), @@ -147,7 +149,7 @@ async fn test_da_task_storage_failure() { ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), BlockRecv( - encoded_transactions.clone(), + encoded_transactions, (), ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index a10f1d181e..79aeb5840c 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, marker::PhantomData}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use hotshot::types::SignatureKey; use hotshot_example_types::{ @@ -38,7 +38,7 @@ async fn test_vid_task() { let transactions = vec![TestTransaction(vec![0])]; let (payload, metadata) = TestBlockPayload::from_transactions(transactions.clone()).unwrap(); let builder_commitment = payload.builder_commitment(&metadata); - let encoded_transactions = TestTransaction::encode(transactions.clone()).unwrap(); + let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; @@ -83,7 +83,7 @@ async fn test_vid_task() { input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); input.push(HotShotEvent::BlockRecv( - encoded_transactions.clone(), + encoded_transactions, (), ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index e0d1b7437a..bf9ab41722 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -68,7 +68,7 @@ pub struct Consensus { /// Saved payloads. /// /// Encoded transactions for every view if we got a payload for that view. - pub saved_payloads: BTreeMap>, + pub saved_payloads: BTreeMap>, /// The `locked_qc` view number pub locked_view: TYPES::Time, diff --git a/types/src/data.rs b/types/src/data.rs index 5df12a75af..bc88e02221 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -8,6 +8,7 @@ use std::{ fmt::{Debug, Display}, hash::Hash, marker::PhantomData, + sync::Arc, }; use anyhow::{ensure, Result}; @@ -114,7 +115,7 @@ impl std::ops::Sub for ViewNumber { #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct DAProposal { /// Encoded transactions in the block to be applied. - pub encoded_transactions: Vec, + pub encoded_transactions: Arc<[u8]>, /// Metadata of the block to be applied. pub metadata: ::Metadata, /// View this proposal applies to @@ -440,10 +441,8 @@ impl Leaf { pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { let (payload, metadata) = TYPES::BlockPayload::genesis(); let builder_commitment = payload.builder_commitment(&metadata); - let payload_bytes = payload - .encode() - .expect("unable to encode genesis payload") - .collect(); + let payload_bytes = payload.encode().expect("unable to encode genesis payload"); + let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); let block_header = TYPES::BlockHeader::genesis( instance_state, @@ -507,11 +506,8 @@ impl Leaf { block_payload: TYPES::BlockPayload, num_storage_nodes: usize, ) -> Result<(), BlockError> { - let encoded_txns = match block_payload.encode() { - // TODO (Keyao) [VALIDATED_STATE] - Avoid collect/copy on the encoded transaction bytes. - // - Ok(encoded) => encoded.into_iter().collect(), - Err(_) => return Err(BlockError::InvalidTransactionLength), + let Ok(encoded_txns) = block_payload.encode() else { + return Err(BlockError::InvalidTransactionLength); }; let commitment = vid_commitment(&encoded_txns, num_storage_nodes); if commitment != self.block_header.payload_commitment() { diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 6ee11e2e9c..3e33635495 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -8,6 +8,7 @@ use std::{ fmt::{Debug, Display}, future::Future, hash::Hash, + sync::Arc, }; use committable::{Commitment, Committable}; @@ -48,11 +49,6 @@ pub trait BlockPayload: /// Data created during block building which feeds into the block header type Metadata: Clone + Debug + DeserializeOwned + Eq + Hash + Send + Sync + Serialize; - /// Encoded payload. - type Encode<'a>: 'a + Iterator + Send - where - Self: 'a; - /// Build a payload and associated metadata with the transactions. /// /// # Errors @@ -63,11 +59,7 @@ pub trait BlockPayload: /// Build a payload with the encoded transaction bytes, metadata, /// and the associated number of VID storage nodes - /// - /// `I` may be, but not necessarily is, the `Encode` type directly from `fn encode`. - fn from_bytes(encoded_transactions: I, metadata: &Self::Metadata) -> Self - where - I: Iterator; + fn from_bytes(encoded_transactions: &[u8], metadata: &Self::Metadata) -> Self; /// Build the genesis payload and metadata. fn genesis() -> (Self, Self::Metadata); @@ -76,7 +68,7 @@ pub trait BlockPayload: /// /// # Errors /// If the transaction length conversion fails. - fn encode(&self) -> Result, Self::Error>; + fn encode(&self) -> Result, Self::Error>; /// List of transaction commitments. fn transaction_commitments( @@ -119,10 +111,11 @@ pub trait TestableBlock: BlockPayload + Debug { #[must_use] #[allow(clippy::panic)] pub fn vid_commitment( - encoded_transactions: &Vec, + encoded_transactions: &[u8], num_storage_nodes: usize, ) -> ::Commit { - vid_scheme(num_storage_nodes).commit_only(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{}) error: {err}", encoded_transactions.len())) + let encoded_tx_len = encoded_transactions.len(); + vid_scheme(num_storage_nodes).commit_only(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{encoded_tx_len}) error: {err}")) } /// Compute the VID payload commitment along with precompute data reducing time in VID Disperse @@ -131,13 +124,14 @@ pub fn vid_commitment( #[must_use] #[allow(clippy::panic)] pub fn precompute_vid_commitment( - encoded_transactions: &Vec, + encoded_transactions: &[u8], num_storage_nodes: usize, ) -> ( ::Commit, ::PrecomputeData, ) { - vid_scheme(num_storage_nodes).commit_only_precompute(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{}) error: {err}", encoded_transactions.len())) + let encoded_tx_len = encoded_transactions.len(); + vid_scheme(num_storage_nodes).commit_only_precompute(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{encoded_tx_len}) error: {err}")) } /// The number of storage nodes to use when computing the genesis VID commitment. From de1e063d1cc80f49df183058fa8ca69d7392e0bd Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 23 Apr 2024 22:22:12 +0800 Subject: [PATCH 0974/1393] [CX_CLEANUP] - Add a standalone function to handle `QuorumProposalRecv` (#2995) * moving functions over * complete refactor * fix lint * fix build and lint * Add proposal event handling, rename file * Fix too many args and lines lints * new method for proposing * move feature gate * tmp revert * fix test failures * Fix build after merge * update implementation to integrate * remove dead code and too many args comments --------- Co-authored-by: Jarred Parr --- task-impls/src/consensus/mod.rs | 249 ++-------------- .../{proposal.rs => proposal_helpers.rs} | 271 +++++++++++++++++- task-impls/src/quorum_proposal.rs | 2 +- 3 files changed, 286 insertions(+), 236 deletions(-) rename task-impls/src/consensus/{proposal.rs => proposal_helpers.rs} (67%) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index df3e194f05..79736a5589 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -6,10 +6,7 @@ use std::{ use crate::{ consensus::{ - proposal::{ - publish_proposal_if_able, validate_proposal_safety_and_liveness, - validate_proposal_view_and_certs, - }, + proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}, view_change::update_view, }, events::{HotShotEvent, HotShotTaskCompleted}, @@ -19,16 +16,15 @@ use crate::{ }, }; use async_broadcast::Sender; -use async_compatibility_layer::art::async_spawn; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use chrono::Utc; use committable::Committable; -use futures::{future::join_all, FutureExt}; +use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ - consensus::{CommitmentAndMetadata, Consensus, View}, + consensus::{CommitmentAndMetadata, Consensus}, data::{Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, message::Proposal, @@ -41,29 +37,26 @@ use hotshot_types::{ network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, - states::ValidatedState, storage::Storage, BlockPayload, }, - utils::{Terminator, ViewInner}, + utils::Terminator, vote::{Certificate, HasViewNumber}, }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use vbs::version::Version; + #[cfg(not(feature = "dependency-tasks"))] -use { - crate::helpers::AnyhowTracing, - hotshot_types::{ - data::{null_block, VidDisperseShare}, - message::GeneralConsensusMessage, - simple_vote::QuorumData, - }, +use hotshot_types::{ + data::{null_block, VidDisperseShare}, + message::GeneralConsensusMessage, + simple_vote::QuorumData, }; -/// Handles proposal-related functionality. -pub(crate) mod proposal; +/// Helper functions to handler proposal-related functionality. +pub(crate) mod proposal_helpers; /// Handles view-change related functionality. pub(crate) mod view_change; @@ -389,224 +382,18 @@ impl, A: ConsensusApi + match event.as_ref() { #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QuorumProposalRecv(proposal, sender) => { - let sender = sender.clone(); - debug!( - "Received Quorum Proposal for view {}", - *proposal.data.view_number - ); - - // stop polling for the received proposal - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal( - *proposal.data.view_number, - )) - .await; - - if let Err(e) = validate_proposal_view_and_certs( - proposal, - &sender, - self.cur_view, - &self.quorum_membership, - &self.timeout_membership, - ) { - warn!("Failed to validate proposal view and attached certs; error = {e:?}"); - return; - } - - let view = proposal.data.get_view_number(); - let view_leader_key = self.quorum_membership.get_leader(view); - let justify_qc = proposal.data.justify_qc.clone(); - - if !justify_qc.is_valid_cert(self.quorum_membership.as_ref()) { - error!("Invalid justify_qc in proposal for view {}", *view); - let consensus = self.consensus.write().await; - consensus.metrics.invalid_qc.update(1); - return; - } - - // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here - if let Err(e) = update_view::( - self.public_key.clone(), - view, - &event_stream, - self.quorum_membership.clone(), - self.quorum_network.clone(), - self.timeout, - self.consensus.clone(), - &mut self.cur_view, - &mut self.timeout_task, - ) - .await + match handle_quorum_proposal_recv(proposal, sender, event_stream.clone(), self) + .await { - warn!("Failed to update view; error = {e:?}"); - } - - let consensus = self.consensus.upgradable_read().await; - - // Get the parent leaf and state. - let parent = if justify_qc.is_genesis { - // Send the `Decide` event for the genesis block if the justify QC is genesis. - let leaf = Leaf::genesis(&consensus.instance_state); - let (validated_state, state_delta) = - TYPES::ValidatedState::genesis(&consensus.instance_state); - let state = Arc::new(validated_state); - broadcast_event( - Event { - view_number: TYPES::Time::genesis(), - event: EventType::Decide { - leaf_chain: Arc::new(vec![LeafInfo::new( - leaf.clone(), - state.clone(), - Some(Arc::new(state_delta)), - None, - )]), - qc: Arc::new(justify_qc.clone()), - block_size: None, - }, - }, - &self.output_event_stream, - ) - .await; - Some((leaf, state)) - } else { - match consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - { - Some(leaf) => { - if let (Some(state), _) = - consensus.get_state_and_delta(leaf.get_view_number()) - { - Some((leaf, state.clone())) - } else { - error!("Parent state not found! Consensus internally inconsistent"); - return; - } - } - None => None, - } - }; - - if justify_qc.get_view_number() > consensus.high_qc.view_number { - if let Err(e) = self - .storage - .write() - .await - .update_high_qc(justify_qc.clone()) - .await - { - warn!("Failed to store High QC not voting. Error: {:?}", e); - return; - } - } - - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - - if justify_qc.get_view_number() > consensus.high_qc.view_number { - debug!("Updating high QC"); - consensus.high_qc = justify_qc.clone(); - } - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some((parent_leaf, parent_state)) = parent else { - warn!( - "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.get_data().leaf_commit - ); - let leaf = Leaf::from_quorum_proposal(&proposal.data); - - let state = Arc::new( - >::from_header( - &proposal.data.block_header, - ), - ); - - consensus.validated_state_map.insert( - view, - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - state, - delta: None, - }, - }, - ); - consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); - - if let Err(e) = self - .storage - .write() - .await - .update_undecided_state( - consensus.saved_leaves.clone(), - consensus.validated_state_map.clone(), - ) - .await - { - warn!("Couldn't store undecided state. Error: {:?}", e); - } - - // If we are missing the parent from storage, the safety check will fail. But we can - // still vote if the liveness check succeeds. - let liveness_check = justify_qc.get_view_number() > consensus.locked_view; - - let high_qc = consensus.high_qc.clone(); - let locked_view = consensus.locked_view; - - drop(consensus); - - if liveness_check { - self.current_proposal = Some(proposal.data.clone()); - let new_view = proposal.data.view_number + 1; - - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = self.quorum_membership.get_leader(new_view) - == self.public_key - && high_qc.view_number - == self.current_proposal.clone().unwrap().view_number; - - let qc = high_qc.clone(); - if should_propose { - debug!( - "Attempting to publish proposal after voting; now in view: {}", - *new_view - ); - if let Err(e) = self - .publish_proposal(qc.view_number + 1, event_stream.clone()) - .await - { - warn!("Failed to propose; error = {e:?}"); - }; - } + Ok(Some(current_proposal)) => { + self.current_proposal = Some(current_proposal); if self.vote_if_able(&event_stream).await { self.current_proposal = None; } } - warn!("Failed liveneess check; cannot find parent either\n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", high_qc, proposal.data.clone(), locked_view); - - return; - }; - - self.spawned_tasks - .entry(proposal.data.get_view_number()) - .or_default() - .push(async_spawn( - validate_proposal_safety_and_liveness( - proposal.clone(), - parent_leaf, - self.consensus.clone(), - self.decided_upgrade_cert.clone(), - self.quorum_membership.clone(), - parent_state.clone(), - view_leader_key, - event_stream.clone(), - sender, - self.output_event_stream.clone(), - self.storage.clone(), - ) - .map(AnyhowTracing::err_as_debug), - )); + Ok(None) => {} + Err(e) => warn!(?e, "Failed to propose"), + } } HotShotEvent::QuorumProposalValidated(proposal, _) => { let consensus = self.consensus.upgradable_read().await; diff --git a/task-impls/src/consensus/proposal.rs b/task-impls/src/consensus/proposal_helpers.rs similarity index 67% rename from task-impls/src/consensus/proposal.rs rename to task-impls/src/consensus/proposal_helpers.rs index 26e604af77..b62d4f0692 100644 --- a/task-impls/src/consensus/proposal.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -2,20 +2,29 @@ use core::time::Duration; use std::marker::PhantomData; use std::sync::Arc; -use anyhow::{ensure, Context, Result}; +use crate::{consensus::update_view, helpers::AnyhowTracing}; +use anyhow::{bail, ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; use committable::Committable; +use futures::FutureExt; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus, View}, data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, - event::{Event, EventType}, + event::{Event, EventType, LeafInfo}, message::Proposal, simple_certificate::UpgradeCertificate, traits::{ - block_contents::BlockHeader, election::Membership, node_implementation::NodeType, - signature_key::SignatureKey, states::ValidatedState, storage::Storage, BlockPayload, + block_contents::BlockHeader, + consensus_api::ConsensusApi, + election::Membership, + network::{ConnectedNetwork, ConsensusIntentEvent}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + signature_key::SignatureKey, + states::ValidatedState, + storage::Storage, + BlockPayload, }, utils::{Terminator, ViewInner}, vote::{Certificate, HasViewNumber}, @@ -29,6 +38,8 @@ use tokio::task::JoinHandle; use crate::{events::HotShotEvent, helpers::broadcast_event}; +use super::ConsensusTaskState; + /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. #[allow(clippy::too_many_arguments)] @@ -550,3 +561,255 @@ pub async fn publish_proposal_if_able( .await } } + +// TODO: Fix `clippy::too_many_lines`. +/// Handle the received quorum proposal. +/// +/// Returns the proposal that should be used to set the `cur_proposal` for other tasks. +#[allow(clippy::too_many_lines)] +pub async fn handle_quorum_proposal_recv< + TYPES: NodeType, + I: NodeImplementation, + A: ConsensusApi, +>( + proposal: &Proposal>, + sender: &TYPES::SignatureKey, + event_stream: Sender>>, + task_state: &mut ConsensusTaskState, +) -> Result>> { + let sender = sender.clone(); + debug!( + "Received Quorum Proposal for view {}", + *proposal.data.view_number + ); + + // stop polling for the received proposal + task_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal( + *proposal.data.view_number, + )) + .await; + + validate_proposal_view_and_certs( + proposal, + &sender, + task_state.cur_view, + &task_state.quorum_membership, + &task_state.timeout_membership, + ) + .context("Failed to validate proposal view and attached certs")?; + + let view = proposal.data.get_view_number(); + let view_leader_key = task_state.quorum_membership.get_leader(view); + let justify_qc = proposal.data.justify_qc.clone(); + + if !justify_qc.is_valid_cert(task_state.quorum_membership.as_ref()) { + let consensus = task_state.consensus.write().await; + consensus.metrics.invalid_qc.update(1); + bail!("Invalid justify_qc in proposal for view {}", *view); + } + + // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here + if let Err(e) = update_view::( + task_state.public_key.clone(), + view, + &event_stream, + task_state.quorum_membership.clone(), + task_state.quorum_network.clone(), + task_state.timeout, + task_state.consensus.clone(), + &mut task_state.cur_view, + &mut task_state.timeout_task, + ) + .await + { + warn!("Failed to update view; error = {e:?}"); + } + + let consensus_read = task_state.consensus.upgradable_read().await; + + // Get the parent leaf and state. + let parent = if justify_qc.is_genesis { + // Send the `Decide` event for the genesis block if the justify QC is genesis. + let leaf = Leaf::genesis(&consensus_read.instance_state); + let (validated_state, state_delta) = + TYPES::ValidatedState::genesis(&consensus_read.instance_state); + let state = Arc::new(validated_state); + broadcast_event( + Event { + view_number: TYPES::Time::genesis(), + event: EventType::Decide { + leaf_chain: Arc::new(vec![LeafInfo::new( + leaf.clone(), + state.clone(), + Some(Arc::new(state_delta)), + None, + )]), + qc: Arc::new(justify_qc.clone()), + block_size: None, + }, + }, + &task_state.output_event_stream, + ) + .await; + Some((leaf, state)) + } else { + match consensus_read + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned() + { + Some(leaf) => { + if let (Some(state), _) = consensus_read.get_state_and_delta(leaf.get_view_number()) + { + Some((leaf, state.clone())) + } else { + bail!("Parent state not found! Consensus internally inconsistent"); + } + } + None => None, + } + }; + + if justify_qc.get_view_number() > consensus_read.high_qc.view_number { + if let Err(e) = task_state + .storage + .write() + .await + .update_high_qc(justify_qc.clone()) + .await + { + bail!("Failed to store High QC not voting. Error: {:?}", e); + } + } + + let mut consensus_write = RwLockUpgradableReadGuard::upgrade(consensus_read).await; + + if justify_qc.get_view_number() > consensus_write.high_qc.view_number { + debug!("Updating high QC"); + consensus_write.high_qc = justify_qc.clone(); + } + + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some((parent_leaf, parent_state)) = parent else { + warn!( + "Proposal's parent missing from storage with commitment: {:?}", + justify_qc.get_data().leaf_commit + ); + let leaf = Leaf::from_quorum_proposal(&proposal.data); + + let state = Arc::new( + >::from_header( + &proposal.data.block_header, + ), + ); + + consensus_write.validated_state_map.insert( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state, + delta: None, + }, + }, + ); + consensus_write + .saved_leaves + .insert(leaf.commit(), leaf.clone()); + + if let Err(e) = task_state + .storage + .write() + .await + .update_undecided_state( + consensus_write.saved_leaves.clone(), + consensus_write.validated_state_map.clone(), + ) + .await + { + warn!("Couldn't store undecided state. Error: {:?}", e); + } + + // If we are missing the parent from storage, the safety check will fail. But we can + // still vote if the liveness check succeeds. + let liveness_check = justify_qc.get_view_number() > consensus_write.locked_view; + + let high_qc = consensus_write.high_qc.clone(); + let locked_view = consensus_write.locked_view; + + drop(consensus_write); + + let mut current_proposal = None; + + if liveness_check { + current_proposal = Some(proposal.data.clone()); + let new_view = proposal.data.view_number + 1; + + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = task_state.quorum_membership.get_leader(new_view) + == task_state.public_key + && high_qc.view_number == current_proposal.clone().unwrap().view_number; + + let qc = high_qc.clone(); + if should_propose { + debug!( + "Attempting to publish proposal after voting; now in view: {}", + *new_view + ); + let create_and_send_proposal_handle = publish_proposal_if_able( + task_state.cur_view, + qc.view_number + 1, + event_stream, + task_state.quorum_membership.clone(), + task_state.public_key.clone(), + task_state.private_key.clone(), + task_state.consensus.clone(), + task_state.round_start_delay, + task_state.formed_upgrade_certificate.clone(), + task_state.decided_upgrade_cert.clone(), + &mut task_state.payload_commitment_and_metadata, + &mut task_state.proposal_cert, + ) + .await?; + + task_state + .spawned_tasks + .entry(view) + .or_default() + .push(create_and_send_proposal_handle); + } + // TODO: Instead of calling `vote_if_able` here, we can call it in the original place + // in the consensus task, and set `current_proposal` accordingly. + // if self.vote_if_able(&event_stream).await { + // current_proposal = None; + // } + } + warn!(?high_qc, ?proposal.data, ?locked_view, "Failed liveneess check; cannot find parent either."); + + return Ok(current_proposal); + }; + + task_state + .spawned_tasks + .entry(proposal.data.get_view_number()) + .or_default() + .push(async_spawn( + validate_proposal_safety_and_liveness( + proposal.clone(), + parent_leaf, + task_state.consensus.clone(), + task_state.decided_upgrade_cert.clone(), + task_state.quorum_membership.clone(), + parent_state.clone(), + view_leader_key, + event_stream.clone(), + sender, + task_state.output_event_stream.clone(), + task_state.storage.clone(), + ) + .map(AnyhowTracing::err_as_debug), + )); + Ok(None) +} diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 2a800c4757..2802ada50a 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -35,7 +35,7 @@ use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use crate::{ - consensus::proposal::validate_proposal_safety_and_liveness, + consensus::proposal_helpers::validate_proposal_safety_and_liveness, events::HotShotEvent, helpers::{broadcast_event, cancel_task, AnyhowTracing}, }; From 812df5afbca2eaad0ffaf73d7ab4b164747b7105 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 23 Apr 2024 11:25:10 -0400 Subject: [PATCH 0975/1393] add num empty blocks proposed to metrics (#3018) --- task-impls/src/transactions.rs | 10 +++++++++- types/src/consensus.rs | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 2a76873e03..30eb5790a1 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -157,10 +157,18 @@ impl< } else { // If we couldn't get a block, send an empty block error!( - "Failed to get a block for view {:?} proposing empty block", + "Failed to get a block for view {:?}, proposing empty block", view ); + // Increment the metric for number of empty blocks proposed + self.consensus + .write() + .await + .metrics + .number_of_empty_blocks_proposed + .add(1); + // Calculate the builder fee for the empty block let Some(builder_fee) = null_block::builder_fee(self.membership.total_nodes()) else { diff --git a/types/src/consensus.rs b/types/src/consensus.rs index bf9ab41722..6ab4d0264c 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -103,6 +103,8 @@ pub struct ConsensusMetricsValue { pub outstanding_transactions_memory_size: Box, /// Number of views that timed out pub number_of_timeouts: Box, + /// The number of empty blocks that have been proposed + pub number_of_empty_blocks_proposed: Box, } /// The wrapper with a string name for the networking metrics @@ -239,6 +241,8 @@ impl ConsensusMetricsValue { outstanding_transactions_memory_size: metrics .create_gauge(String::from("outstanding_transactions_memory_size"), None), number_of_timeouts: metrics.create_counter(String::from("number_of_timeouts"), None), + number_of_empty_blocks_proposed: metrics + .create_counter(String::from("number_of_empty_blocks_proposed"), None), } } } From e3b8f56b51e436361aea743193a3ac5348e2f135 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 23 Apr 2024 12:00:03 -0400 Subject: [PATCH 0976/1393] Remove special casing for view 1 (again) (#2968) --- examples/push-cdn/broker.rs | 3 +- hotshot/src/lib.rs | 50 +++++++++--- hotshot/src/types/handle.rs | 11 ++- task-impls/src/consensus/mod.rs | 56 ++++++------- task-impls/src/consensus/proposal_helpers.rs | 85 +++++++------------- task-impls/src/quorum_vote.rs | 42 +++------- testing/src/test_runner.rs | 2 +- testing/src/view_generator.rs | 3 +- testing/tests/tests_1/message.rs | 1 - types/src/constants.rs | 5 +- types/src/data.rs | 53 +++++++++--- types/src/simple_certificate.rs | 35 +------- 12 files changed, 164 insertions(+), 182 deletions(-) diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index 2559b29357..543e8b114f 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -5,8 +5,7 @@ use cdn_broker::{Broker, Config}; use clap::Parser; use hotshot::traits::implementations::{KeyPair, ProductionDef, WrappedSignatureKey}; use hotshot_example_types::node_types::TestTypes; -use hotshot_types::traits::node_implementation::NodeType; -use hotshot_types::traits::signature_key::SignatureKey; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use sha2::Digest; #[derive(Parser, Debug)] diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 5d6aaa39a4..06240bfdcb 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -33,9 +33,9 @@ use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event, network pub use hotshot_types::error::HotShotError; use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, View, ViewInner}, - constants::{BASE_VERSION, EVENT_CHANNEL_SIZE, STATIC_VER_0_1}, + constants::{BASE_VERSION, EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE, STATIC_VER_0_1}, data::Leaf, - event::EventType, + event::{EventType, LeafInfo}, message::{DataMessage, Message, MessageKind}, simple_certificate::QuorumCertificate, traits::{ @@ -144,7 +144,7 @@ pub struct SystemContext> { // global_registry: GlobalRegistry, /// Access to the output event stream. - pub output_event_stream: (Sender>, InactiveReceiver>), + pub output_event_stream: (Sender>, Receiver>), /// access to the internal event stream, in case we need to, say, shut something down #[allow(clippy::type_complexity)] @@ -184,6 +184,12 @@ impl> SystemContext { let anchored_leaf = initializer.inner; let instance_state = initializer.instance_state; + let (internal_tx, internal_rx) = broadcast(EVENT_CHANNEL_SIZE); + let (mut external_tx, mut external_rx) = broadcast(EXTERNAL_EVENT_CHANNEL_SIZE); + + // Allow overflow on the channel, otherwise sending to it may block. + external_rx.set_overflow(true); + // Get the validated state from the initializer or construct an incomplete one from the // block header. let validated_state = match initializer.validated_state { @@ -193,6 +199,8 @@ impl> SystemContext { )), }; + let state_delta = initializer.state_delta.as_ref(); + // Insert the validated state to state map. let mut validated_state_map = BTreeMap::default(); validated_state_map.insert( @@ -200,8 +208,8 @@ impl> SystemContext { View { view_inner: ViewInner::Leaf { leaf: anchored_leaf.commit(), - state: validated_state, - delta: initializer.state_delta, + state: validated_state.clone(), + delta: initializer.state_delta.clone(), }, }, ); @@ -212,6 +220,29 @@ impl> SystemContext { let mut saved_leaves = HashMap::new(); let mut saved_payloads = BTreeMap::new(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); + + // Some applications seem to expect a leaf decide event for the genesis leaf, + // which contains only that leaf and nothing else. + if anchored_leaf.get_view_number() == TYPES::Time::genesis() { + broadcast_event( + Event { + view_number: anchored_leaf.get_view_number(), + event: EventType::Decide { + leaf_chain: Arc::new(vec![LeafInfo::new( + anchored_leaf.clone(), + validated_state.clone(), + state_delta.cloned(), + None, + )]), + qc: Arc::new(QuorumCertificate::genesis(&instance_state)), + block_size: None, + }, + }, + &external_tx, + ) + .await; + } + for leaf in initializer.undecided_leafs { saved_leaves.insert(leaf.commit(), leaf.clone()); } @@ -244,9 +275,6 @@ impl> SystemContext { let consensus = Arc::new(RwLock::new(consensus)); let version = Arc::new(RwLock::new(BASE_VERSION)); - let (internal_tx, internal_rx) = broadcast(EVENT_CHANNEL_SIZE); - let (mut external_tx, external_rx) = broadcast(EVENT_CHANNEL_SIZE); - // This makes it so we won't block on broadcasting if there is not a receiver // Our own copy of the receiver is inactive so it doesn't count. external_tx.set_await_active(false); @@ -263,7 +291,7 @@ impl> SystemContext { memberships: Arc::new(memberships), _metrics: consensus_metrics.clone(), internal_event_stream: (internal_tx, internal_rx.deactivate()), - output_event_stream: (external_tx, external_rx.deactivate()), + output_event_stream: (external_tx, external_rx), storage: Arc::new(RwLock::new(storage)), }); @@ -695,13 +723,13 @@ impl HotShotInitializer { let (validated_state, state_delta) = TYPES::ValidatedState::genesis(&instance_state); Ok(Self { inner: Leaf::genesis(&instance_state), - instance_state, validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::Time::new(0), - high_qc: QuorumCertificate::genesis(), + high_qc: QuorumCertificate::genesis(&instance_state), undecided_leafs: Vec::new(), undecided_state: BTreeMap::new(), + instance_state, }) } diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 5cfe8a1c52..329d95a201 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -25,10 +25,9 @@ use crate::{traits::NodeImplementation, types::Event, SystemContext}; /// the underlying storage. #[derive(Clone)] pub struct SystemContextHandle> { - /// The [sender](Sender) and an `InactiveReceiver` to keep the channel open. - /// The Channel will output all the events. Subscribers will get an activated - /// clone of the `Receiver` when they get output stream. - pub(crate) output_event_stream: (Sender>, InactiveReceiver>), + /// The [sender](Sender) and [receiver](Receiver), + /// to allow the application to communicate with HotShot. + pub(crate) output_event_stream: (Sender>, Receiver>), /// access to the internal event stream, in case we need to, say, shut something down #[allow(clippy::type_complexity)] @@ -49,7 +48,7 @@ pub struct SystemContextHandle> { impl + 'static> SystemContextHandle { /// obtains a stream to expose to the user pub fn get_event_stream(&self) -> impl Stream> { - self.output_event_stream.1.activate_cloned() + self.output_event_stream.1.clone() } /// HACK so we can know the types when running tests... @@ -58,7 +57,7 @@ impl + 'static> SystemContextHandl /// - type wrapper #[must_use] pub fn get_event_stream_known_impl(&self) -> Receiver> { - self.output_event_stream.1.activate_cloned() + self.output_event_stream.1.clone() } /// HACK so we can know the types when running tests... diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 79736a5589..268a0b7fbd 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -1,20 +1,9 @@ -use anyhow::Result; use std::{ collections::{BTreeMap, HashMap, HashSet}, sync::Arc, }; -use crate::{ - consensus::{ - proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}, - view_change::update_view, - }, - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, - }, -}; +use anyhow::Result; use async_broadcast::Sender; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] @@ -43,17 +32,28 @@ use hotshot_types::{ utils::Terminator, vote::{Certificate, HasViewNumber}, }; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; -use vbs::version::Version; - #[cfg(not(feature = "dependency-tasks"))] use hotshot_types::{ data::{null_block, VidDisperseShare}, message::GeneralConsensusMessage, simple_vote::QuorumData, }; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; +use tracing::{debug, error, info, instrument, warn}; +use vbs::version::Version; + +use crate::{ + consensus::{ + proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}, + view_change::update_view, + }, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; /// Helper functions to handler proposal-related functionality. pub(crate) mod proposal_helpers; @@ -213,14 +213,10 @@ impl, A: ConsensusApi + let view = cert.view_number; // TODO: do some of this logic without the vote token check, only do that when voting. let justify_qc = proposal.justify_qc.clone(); - let parent = if justify_qc.is_genesis { - Some(Leaf::genesis(&consensus.instance_state)) - } else { - consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - }; + let parent = consensus + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned(); // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { @@ -233,15 +229,15 @@ impl, A: ConsensusApi + }; let parent_commitment = parent.commit(); - let mut proposed_leaf = Leaf::from_quorum_proposal(proposal); - proposed_leaf.set_parent_commitment(parent_commitment); + let proposed_leaf = Leaf::from_quorum_proposal(proposal); + if proposed_leaf.get_parent_commitment() != parent_commitment { + return false; + } // Validate the DAC. let message = if cert.is_valid_cert(self.committee_membership.as_ref()) { // Validate the block payload commitment for non-genesis DAC. - if !cert.is_genesis - && cert.get_data().payload_commit - != proposal.block_header.payload_commitment() + if cert.get_data().payload_commit != proposal.block_header.payload_commitment() { error!("Block payload commitment does not equal da cert payload commitment. View = {}", *view); return false; diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index b62d4f0692..51a9e2de84 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -1,18 +1,18 @@ use core::time::Duration; -use std::marker::PhantomData; -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; -use crate::{consensus::update_view, helpers::AnyhowTracing}; use anyhow::{bail, ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use committable::Committable; use futures::FutureExt; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus, View}, data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, - event::{Event, EventType, LeafInfo}, + event::{Event, EventType}, message::Proposal, simple_certificate::UpgradeCertificate, traits::{ @@ -20,7 +20,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::Membership, network::{ConnectedNetwork, ConsensusIntentEvent}, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, states::ValidatedState, storage::Storage, @@ -29,16 +29,16 @@ use hotshot_types::{ utils::{Terminator, ViewInner}, vote::{Certificate, HasViewNumber}, }; -use tracing::{debug, error, warn}; - -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; - -use crate::{events::HotShotEvent, helpers::broadcast_event}; +use tracing::{debug, error, warn}; use super::ConsensusTaskState; +use crate::{ + consensus::update_view, + events::HotShotEvent, + helpers::{broadcast_event, AnyhowTracing}, +}; /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. @@ -68,11 +68,13 @@ pub async fn validate_proposal_safety_and_liveness( let state = Arc::new(validated_state); let delta = Arc::new(state_delta); - let parent_commitment = parent_leaf.commit(); let view = proposal.data.get_view_number(); - let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); - proposed_leaf.set_parent_commitment(parent_commitment); + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + ensure!( + proposed_leaf.get_parent_commitment() == parent_leaf.commit(), + "Proposed leaf does not extend the parent leaf." + ); // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment // @@ -217,8 +219,10 @@ pub async fn create_and_send_proposal( upgrade_certificate: upgrade_cert, }; - let mut proposed_leaf = Leaf::from_quorum_proposal(&proposal); - proposed_leaf.set_parent_commitment(parent_leaf.commit()); + let proposed_leaf = Leaf::from_quorum_proposal(&proposal); + if proposed_leaf.get_parent_commitment() != parent_leaf.commit() { + return; + } let Ok(signature) = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref()) else { @@ -630,46 +634,19 @@ pub async fn handle_quorum_proposal_recv< let consensus_read = task_state.consensus.upgradable_read().await; // Get the parent leaf and state. - let parent = if justify_qc.is_genesis { - // Send the `Decide` event for the genesis block if the justify QC is genesis. - let leaf = Leaf::genesis(&consensus_read.instance_state); - let (validated_state, state_delta) = - TYPES::ValidatedState::genesis(&consensus_read.instance_state); - let state = Arc::new(validated_state); - broadcast_event( - Event { - view_number: TYPES::Time::genesis(), - event: EventType::Decide { - leaf_chain: Arc::new(vec![LeafInfo::new( - leaf.clone(), - state.clone(), - Some(Arc::new(state_delta)), - None, - )]), - qc: Arc::new(justify_qc.clone()), - block_size: None, - }, - }, - &task_state.output_event_stream, - ) - .await; - Some((leaf, state)) - } else { - match consensus_read - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - { - Some(leaf) => { - if let (Some(state), _) = consensus_read.get_state_and_delta(leaf.get_view_number()) - { - Some((leaf, state.clone())) - } else { - bail!("Parent state not found! Consensus internally inconsistent"); - } + let parent = match consensus_read + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned() + { + Some(leaf) => { + if let (Some(state), _) = consensus_read.get_state_and_delta(leaf.get_view_number()) { + Some((leaf, state.clone())) + } else { + bail!("Parent state not found! Consensus internally inconsistent"); } - None => None, } + None => None, }; if justify_qc.get_view_number() > consensus_read.high_qc.view_number { diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 4e5af7f215..073dc5de79 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -13,7 +13,7 @@ use hotshot_task::{ use hotshot_types::{ consensus::Consensus, data::Leaf, - event::{Event, EventType, LeafInfo}, + event::{Event, EventType}, message::GeneralConsensusMessage, simple_vote::{QuorumData, QuorumVote}, traits::{ @@ -84,14 +84,16 @@ impl + 'static> HandleDepOutput payload_commitment = Some(proposal_payload_comm); } let parent_commitment = parent_leaf.commit(); - let mut proposed_leaf = Leaf::from_quorum_proposal(proposal); - proposed_leaf.set_parent_commitment(parent_commitment); + let proposed_leaf = Leaf::from_quorum_proposal(proposal); + if proposed_leaf.get_parent_commitment() != parent_commitment { + return; + } leaf = Some(proposed_leaf); } HotShotEvent::DACertificateValidated(cert) => { let cert_payload_comm = cert.get_data().payload_commit; if let Some(comm) = payload_commitment { - if !cert.is_genesis && cert_payload_comm != comm { + if cert_payload_comm != comm { error!("DAC has inconsistent payload commitment with quorum proposal or VID."); return; } @@ -377,31 +379,7 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState, SpinningTask>::new( Task::new(tx.clone(), rx.clone(), reg.clone(), spinning_task_state), diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 28ba13811c..2e8585cac1 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -91,7 +91,7 @@ impl TestView { let quorum_proposal_inner = QuorumProposal:: { block_header: block_header.clone(), view_number: genesis_view, - justify_qc: QuorumCertificate::genesis(), + justify_qc: QuorumCertificate::genesis(&TestInstanceState {}), upgrade_certificate: None, proposal_certificate: None, }; @@ -118,7 +118,6 @@ impl TestView { leaf.fill_block_payload_unchecked(TestBlockPayload { transactions: transactions.clone(), }); - leaf.set_parent_commitment(Leaf::genesis(&TestInstanceState {}).commit()); let signature = ::sign(&private_key, leaf.commit().as_ref()) .expect("Failed to sign leaf commitment!"); diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index 6dd7ae1c29..b531218b0d 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -42,7 +42,6 @@ fn version_number_at_start_of_serialization() { vote_commitment: data.commit(), view_number, signatures: None, - is_genesis: false, _pd: PhantomData, }; let message = Message { diff --git a/types/src/constants.rs b/types/src/constants.rs index 8f93667ca8..b9e95e4a02 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -38,9 +38,12 @@ pub type Version01 = StaticVersion; /// Constant for protocol static version 0.1. pub const STATIC_VER_0_1: Version01 = StaticVersion {}; -/// Default Channel Size for consensus event sharing +/// Default channel size for consensus event sharing pub const EVENT_CHANNEL_SIZE: usize = 100_000; +/// Default channel size for HotShot -> application communication +pub const EXTERNAL_EVENT_CHANNEL_SIZE: usize = 100_000; + /// Constants for `WebServerNetwork` and `WebServer` /// The Web CDN is not, strictly speaking, bound to the network; it can have its own versioning. /// Web Server CDN Version (major) diff --git a/types/src/data.rs b/types/src/data.rs index bc88e02221..a55e9f2ac7 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -14,7 +14,7 @@ use std::{ use anyhow::{ensure, Result}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::Options; -use committable::{Commitment, Committable, RawCommitmentBuilder}; +use committable::{Commitment, CommitmentBoundsArkless, Committable, RawCommitmentBuilder}; use derivative::Derivative; use jf_primitives::vid::VidDisperse as JfVidDisperse; use rand::Rng; @@ -27,7 +27,7 @@ use crate::{ simple_certificate::{ QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, }, - simple_vote::UpgradeProposalData, + simple_vote::{QuorumData, UpgradeProposalData}, traits::{ block_contents::{ vid_commitment, BlockHeader, TestableBlock, GENESIS_VID_NUM_STORAGE_NODES, @@ -430,6 +430,24 @@ impl Display for Leaf { } } +impl QuorumCertificate { + #[must_use] + /// Creat the Genesis certificate + pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { + let data = QuorumData { + leaf_commit: Leaf::genesis(instance_state).commit(), + }; + let commit = data.commit(); + Self { + data, + vote_commitment: commit, + view_number: ::genesis(), + signatures: None, + _pd: PhantomData, + } + } +} + impl Leaf { /// Create a new leaf from its components. /// @@ -444,16 +462,30 @@ impl Leaf { let payload_bytes = payload.encode().expect("unable to encode genesis payload"); let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); + let block_header = TYPES::BlockHeader::genesis( instance_state, payload_commitment, builder_commitment, metadata, ); + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::>::default_commitment_no_preimage(), + }; + + let justify_qc = QuorumCertificate { + data: null_quorum_data.clone(), + vote_commitment: null_quorum_data.commit(), + view_number: ::genesis(), + signatures: None, + _pd: PhantomData, + }; + Self { view_number: TYPES::Time::genesis(), - justify_qc: QuorumCertificate::::genesis(), - parent_commitment: fake_commitment(), + justify_qc, + parent_commitment: null_quorum_data.leaf_commit, upgrade_certificate: None, block_header: block_header.clone(), block_payload: Some(payload), @@ -482,11 +514,7 @@ impl Leaf { pub fn get_parent_commitment(&self) -> Commitment { self.parent_commitment } - /// Commitment to this leaf's parent. - pub fn set_parent_commitment(&mut self, commitment: Commitment) { - self.parent_commitment = commitment; - } - /// Get a reference to the block header contained in this leaf. + /// The block header contained in this leaf. pub fn get_block_header(&self) -> &::BlockHeader { &self.block_header } @@ -549,9 +577,10 @@ impl Leaf { self.get_upgrade_certificate(), parent.get_upgrade_certificate(), ) { - // If the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade. Again, this is always fine. - // But, if we have no upgrade certificate on either is the most common case, and is always fine. - (Some(_) | None, None) => {} + // Easiest cases are: + // - no upgrade certificate on either: this is the most common case, and is always fine. + // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. + (None | Some(_), None) => {} // If we no longer see a cert, we have to make sure that we either: // - no longer care because we have passed new_version_first_view, or // - no longer care because we have passed `decide_by` without deciding the certificate. diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 5fd4ff04a1..fe18f9d599 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -7,12 +7,12 @@ use std::{ }; use anyhow::{ensure, Result}; -use committable::{Commitment, CommitmentBoundsArkless, Committable}; +use committable::{Commitment, Committable}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; use crate::{ - data::{serialize_signature2, Leaf}, + data::serialize_signature2, simple_vote::{ DAData, QuorumData, TimeoutData, UpgradeProposalData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, @@ -72,8 +72,6 @@ pub struct SimpleCertificate::QCType>, - /// If this QC is for the genesis block - pub is_genesis: bool, /// phantom data for `THRESHOLD` and `TYPES` pub _pd: PhantomData<(TYPES, THRESHOLD)>, } @@ -91,7 +89,6 @@ impl> vote_commitment, view_number: view, signatures: Some(sig), - is_genesis: false, _pd: PhantomData, } } fn is_valid_cert>(&self, membership: &MEMBERSHIP) -> bool { - if self.is_genesis && self.view_number == TYPES::Time::genesis() { + if self.view_number == TYPES::Time::genesis() { return true; } let real_qc_pp = ::get_public_parameter( @@ -151,30 +147,7 @@ impl> } impl Display for QuorumCertificate { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!( - f, - "view: {:?}, is_genesis: {:?}", - self.view_number, self.is_genesis - ) - } -} - -impl QuorumCertificate { - #[must_use] - /// Creat the Genisis certificate - pub fn genesis() -> Self { - let data = QuorumData { - leaf_commit: Commitment::>::default_commitment_no_preimage(), - }; - let commit = data.commit(); - Self { - data, - vote_commitment: commit, - view_number: ::genesis(), - signatures: None, - is_genesis: true, - _pd: PhantomData, - } + write!(f, "view: {:?}", self.view_number) } } From 9a6ebfbf4c8ff7b383bad75727475335ac321840 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Tue, 23 Apr 2024 22:03:13 +0200 Subject: [PATCH 0977/1393] Add specialized validation functions to BuilderSignature trait (#3017) * Add specialized validation functions to BuilderSignature trait * Add EncodeBytes trait * Replace builder commitment with metadata when signing fee * Rename TestMetadata -> TestDescription * Remove accidental commented code * Fix broken doc link * Replace builder signature validation with new functions --- example-types/src/block_types.rs | 19 +++-- task-impls/src/transactions.rs | 38 +++------ testing/src/block_builder.rs | 2 +- testing/src/task_helpers.rs | 4 +- testing/src/test_builder.rs | 28 +++---- testing/src/test_launcher.rs | 4 +- testing/src/view_generator.rs | 6 +- testing/tests/tests_1/block_builder.rs | 3 +- testing/tests/tests_1/consensus_task.rs | 6 +- testing/tests/tests_1/da_task.rs | 6 +- testing/tests/tests_1/libp2p.rs | 12 +-- testing/tests/tests_1/network_task.rs | 26 +++---- testing/tests/tests_1/proposal_ordering.rs | 7 +- testing/tests/tests_1/quorum_proposal_task.rs | 16 ++-- testing/tests/tests_1/test_success.rs | 22 +++--- testing/tests/tests_1/test_with_failures_2.rs | 18 +++-- testing/tests/tests_1/upgrade_task.rs | 16 ++-- testing/tests/tests_1/vid_task.rs | 8 +- testing/tests/tests_2/catchup.rs | 20 ++--- testing/tests/tests_2/push_cdn.rs | 6 +- .../tests/tests_2/test_with_failures_one.rs | 18 +++-- .../tests_3/test_with_failures_half_f.rs | 19 ++--- testing/tests/tests_4/test_with_failures_f.rs | 19 ++--- testing/tests/tests_5/combined_network.rs | 22 +++--- testing/tests/tests_5/timeout.rs | 19 ++--- testing/tests/tests_5/unreliable_network.rs | 73 +++++++++--------- testing/tests/tests_5/web_server.rs | 6 +- types/src/data.rs | 4 +- types/src/traits.rs | 2 +- types/src/traits/block_contents.rs | 16 +++- types/src/traits/signature_key.rs | 77 ++++++++++++++++--- 31 files changed, 306 insertions(+), 236 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 6c35a40cef..dddb6dcd09 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -8,7 +8,7 @@ use committable::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ data::{BlockError, Leaf}, traits::{ - block_contents::{BlockHeader, BuilderFee, TestableBlock, Transaction}, + block_contents::{BlockHeader, BuilderFee, EncodeBytes, TestableBlock, Transaction}, node_implementation::NodeType, BlockPayload, ValidatedState, }, @@ -104,10 +104,19 @@ impl TestableBlock for TestBlockPayload { } } +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct TestMetadata; + +impl EncodeBytes for TestMetadata { + fn encode(&self) -> Arc<[u8]> { + Arc::new([]) + } +} + impl BlockPayload for TestBlockPayload { type Error = BlockError; type Transaction = TestTransaction; - type Metadata = (); + type Metadata = TestMetadata; fn from_transactions( transactions: impl IntoIterator, @@ -117,7 +126,7 @@ impl BlockPayload for TestBlockPayload { Self { transactions: txns_vec, }, - (), + TestMetadata, )) } @@ -143,7 +152,7 @@ impl BlockPayload for TestBlockPayload { } fn genesis() -> (Self, Self::Metadata) { - (Self::genesis(), ()) + (Self::genesis(), TestMetadata) } fn encode(&self) -> Result, Self::Error> { @@ -230,7 +239,7 @@ impl> Block } fn metadata(&self) -> &::Metadata { - &() + &TestMetadata } fn builder_commitment(&self) -> BuilderCommitment { diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 30eb5790a1..4e69911ba4 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -314,19 +314,12 @@ impl< // Verify signature over chosen block instead of // verifying the signature over all the blocks received from builder - let combined_message_bytes = { - let mut combined_response_bytes: Vec = Vec::new(); - combined_response_bytes - .extend_from_slice(block_info.block_size.to_be_bytes().as_ref()); - combined_response_bytes - .extend_from_slice(block_info.offered_fee.to_be_bytes().as_ref()); - combined_response_bytes.extend_from_slice(block_info.block_hash.as_ref()); - combined_response_bytes - }; - if !block_info - .sender - .validate_builder_signature(&block_info.signature, &combined_message_bytes) - { + if !block_info.sender.validate_block_info_signature( + &block_info.signature, + block_info.block_size, + block_info.offered_fee, + &block_info.block_hash, + ) { error!("Failed to verify available block info response message signature"); continue; } @@ -387,23 +380,12 @@ impl< continue; } - let offered_fee = block_info.offered_fee; - let builder_commitment = block_data - .block_payload - .builder_commitment(&block_data.metadata); - let vid_commitment = header_input.vid_commitment; - let combined_response_bytes = { - let mut combined_response_bytes: Vec = Vec::new(); - combined_response_bytes - .extend_from_slice(offered_fee.to_be_bytes().as_ref()); - combined_response_bytes.extend_from_slice(builder_commitment.as_ref()); - combined_response_bytes.extend_from_slice(vid_commitment.as_ref()); - combined_response_bytes - }; // verify the signature over the message - if !header_input.sender.validate_builder_signature( + if !header_input.sender.validate_fee_signature( &header_input.fee_signature, - combined_response_bytes.as_ref(), + block_info.offered_fee, + &block_data.metadata, + &header_input.vid_commitment, ) { error!("Failed to verify fee signature"); continue; diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 57f665d508..b7afd78b35 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -552,7 +552,7 @@ async fn build_block( .expect("Failed to sign block vid commitment"); let signature_over_fee_info = - TYPES::BuilderSignatureKey::sign_fee(&priv_key, 123_u64, &commitment, &vid_commitment) + TYPES::BuilderSignatureKey::sign_fee(&priv_key, 123_u64, &metadata, &vid_commitment) .expect("Failed to sign fee info"); let block = AvailableBlockData { diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 8240203f12..e46cac7f9e 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -33,7 +33,7 @@ use hotshot_types::{ use jf_primitives::vid::VidScheme; use serde::Serialize; -use crate::test_builder::TestMetadata; +use crate::test_builder::TestDescription; /// create the [`SystemContextHandle`] from a node id /// # Panics @@ -45,7 +45,7 @@ pub async fn build_system_handle( Sender>>, Receiver>>, ) { - let builder = TestMetadata::default_multiple_rounds(); + let builder = TestDescription::default_multiple_rounds(); let launcher = builder.gen_launcher::(node_id); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 41b0bedf59..a851565343 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -44,7 +44,7 @@ pub struct TimingData { /// metadata describing a test #[derive(Clone, Debug)] -pub struct TestMetadata { +pub struct TestDescription { /// Total number of staked nodes in the test pub num_nodes_with_stake: usize, /// Total number of non-staked nodes in the test @@ -94,7 +94,7 @@ impl Default for TimingData { } } -impl TestMetadata { +impl TestDescription { /// the default metadata for a stress test #[must_use] #[allow(clippy::redundant_field_names)] @@ -102,7 +102,7 @@ impl TestMetadata { let num_nodes_with_stake = 100; let num_nodes_without_stake = 0; - TestMetadata { + TestDescription { num_bootstrap_nodes: num_nodes_with_stake, num_nodes_with_stake: num_nodes_with_stake, num_nodes_without_stake: num_nodes_without_stake, @@ -123,17 +123,17 @@ impl TestMetadata { ..TimingData::default() }, view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), - ..TestMetadata::default() + ..TestDescription::default() } } /// the default metadata for multiple rounds #[must_use] #[allow(clippy::redundant_field_names)] - pub fn default_multiple_rounds() -> TestMetadata { + pub fn default_multiple_rounds() -> TestDescription { let num_nodes_with_stake = 10; let num_nodes_without_stake = 0; - TestMetadata { + TestDescription { // TODO: remove once we have fixed the DHT timeout issue // https://github.com/EspressoSystems/HotShot/issues/2088 num_bootstrap_nodes: num_nodes_with_stake, @@ -154,17 +154,17 @@ impl TestMetadata { ..TimingData::default() }, view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), - ..TestMetadata::default() + ..TestDescription::default() } } /// Default setting with 20 nodes and 8 views of successful views. #[must_use] #[allow(clippy::redundant_field_names)] - pub fn default_more_nodes() -> TestMetadata { + pub fn default_more_nodes() -> TestDescription { let num_nodes_with_stake = 20; let num_nodes_without_stake = 0; - TestMetadata { + TestDescription { num_nodes_with_stake: num_nodes_with_stake, num_nodes_without_stake: num_nodes_without_stake, start_nodes: num_nodes_with_stake, @@ -189,12 +189,12 @@ impl TestMetadata { ..TimingData::default() }, view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), - ..TestMetadata::default() + ..TestDescription::default() } } } -impl Default for TestMetadata { +impl Default for TestDescription { /// by default, just a single round #[allow(clippy::redundant_field_names)] fn default() -> Self { @@ -228,8 +228,8 @@ impl Default for TestMetadata { } } -impl TestMetadata { - /// turn a description of a test (e.g. a [`TestMetadata`]) into +impl TestDescription { + /// turn a description of a test (e.g. a [`TestDescription`]) into /// a [`TestLauncher`] that can be used to launch the test. /// # Panics /// if some of the the configuration values are zero @@ -244,7 +244,7 @@ impl TestMetadata { where I: NodeImplementation, { - let TestMetadata { + let TestDescription { num_nodes_with_stake, num_bootstrap_nodes, min_transactions, diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 7b1ef28e43..8f2ed21cc0 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -11,7 +11,7 @@ use hotshot_types::{ HotShotConfig, }; -use super::{test_builder::TestMetadata, test_runner::TestRunner}; +use super::{test_builder::TestDescription, test_runner::TestRunner}; /// convience type alias for the networks available pub type Networks = ( @@ -37,7 +37,7 @@ pub struct TestLauncher> { /// generator for resources pub resource_generator: ResourceGenerators, /// metadasta used for tasks - pub metadata: TestMetadata, + pub metadata: TestDescription, } impl> TestLauncher { diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 2e8585cac1..f4e14f6f22 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -3,7 +3,7 @@ use std::{cmp::max, marker::PhantomData, sync::Arc}; use committable::Committable; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_example_types::{ - block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, state_types::TestInstanceState, }; @@ -104,7 +104,7 @@ impl TestView { let da_proposal_inner = DAProposal:: { encoded_transactions: encoded_transactions.clone(), - metadata: (), + metadata: TestMetadata, view_number: genesis_view, }; @@ -305,7 +305,7 @@ impl TestView { let da_proposal_inner = DAProposal:: { encoded_transactions: encoded_transactions.clone(), - metadata: (), + metadata: TestMetadata, view_number: next_view, }; diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index e67c26299a..02602318c7 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -23,6 +23,7 @@ async fn test_random_block_builder() { use std::time::Instant; use hotshot_builder_api::block_info::AvailableBlockData; + use hotshot_example_types::block_types::TestMetadata; use hotshot_orchestrator::config::RandomBuilderConfig; use hotshot_types::traits::block_contents::vid_commitment; @@ -77,7 +78,7 @@ async fn test_random_block_builder() { let commitment_for_non_existent_block = TestBlockPayload { transactions: vec![TestTransaction(vec![0; 1])], } - .builder_commitment(&()); + .builder_commitment(&TestMetadata); let result = client .claim_block(commitment_for_non_existent_block, pub_key, &signature) .await; diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index a06936182a..82f372b2bf 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -28,6 +28,7 @@ use sha2::Digest; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { + use hotshot_example_types::block_types::TestMetadata; use hotshot_types::data::null_block; async_compatibility_layer::logging::setup_logging(); @@ -85,7 +86,7 @@ async fn test_consensus_task() { SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - (), + TestMetadata, ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), @@ -259,6 +260,7 @@ async fn test_consensus_vote_with_permuted_dac() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_finalize_propose() { + use hotshot_example_types::block_types::TestMetadata; use hotshot_types::data::null_block; async_compatibility_layer::logging::setup_logging(); @@ -368,7 +370,7 @@ async fn test_view_sync_finalize_propose() { SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - (), + TestMetadata, ViewNumber::new(4), null_block::builder_fee(4).unwrap(), ), diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 07dfb0f269..b6a30800c4 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ - block_types::TestTransaction, + block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, }; use hotshot_task_impls::{da::DATaskState, events::HotShotEvent::*}; @@ -71,7 +71,7 @@ async fn test_da_task() { ViewChange(ViewNumber::new(2)), BlockRecv( encoded_transactions, - (), + TestMetadata, ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), @@ -150,7 +150,7 @@ async fn test_da_task_storage_failure() { ViewChange(ViewNumber::new(2)), BlockRecv( encoded_transactions, - (), + TestMetadata, ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index eabc560b23..091e92662d 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -6,7 +6,7 @@ use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::{TestMetadata, TimingData}, + test_builder::{TestDescription, TimingData}, }; use tracing::instrument; @@ -17,7 +17,7 @@ use tracing::instrument; async fn libp2p_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -31,7 +31,7 @@ async fn libp2p_network() { next_view_timeout: 4000, ..Default::default() }, - ..TestMetadata::default_multiple_rounds() + ..TestDescription::default_multiple_rounds() }; metadata @@ -48,7 +48,7 @@ async fn libp2p_network() { async fn libp2p_network_failures_2() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata { + let mut metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -62,7 +62,7 @@ async fn libp2p_network_failures_2() { next_view_timeout: 4000, ..Default::default() }, - ..TestMetadata::default_multiple_rounds() + ..TestDescription::default_multiple_rounds() }; let dead_nodes = vec![ChangeNode { @@ -96,7 +96,7 @@ async fn libp2p_network_failures_2() { async fn test_stress_libp2p_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata::default_stress(); + let metadata = TestDescription::default_stress(); metadata .gen_launcher::(0) .launch() diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 1d7518e3a0..04a7594911 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -1,23 +1,23 @@ +use std::{sync::Arc, time::Duration}; + use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; -use hotshot::tasks::add_network_message_task; -use hotshot::traits::implementations::MemoryNetwork; -use hotshot_example_types::node_types::MemoryImpl; -use hotshot_example_types::node_types::TestTypes; +use hotshot::{tasks::add_network_message_task, traits::implementations::MemoryNetwork}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task::task::{Task, TaskRegistry}; -use hotshot_task_impls::events::HotShotEvent; -use hotshot_task_impls::network::{self, NetworkEventTaskState}; -use hotshot_testing::test_builder::TestMetadata; -use hotshot_testing::view_generator::TestViewGenerator; +use hotshot_task_impls::{ + events::HotShotEvent, + network::{self, NetworkEventTaskState}, +}; +use hotshot_testing::{test_builder::TestDescription, view_generator::TestViewGenerator}; use hotshot_types::{ constants::BASE_VERSION, data::ViewNumber, traits::{ - election::Membership, node_implementation::ConsensusTime, node_implementation::NodeType, + election::Membership, + node_implementation::{ConsensusTime, NodeType}, }, }; -use std::sync::Arc; -use std::time::Duration; // Test that the event task sends a message, and the message task receives it // and emits the proper event @@ -29,7 +29,7 @@ async fn test_network_task() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let builder = TestMetadata::default_multiple_rounds(); + let builder = TestDescription::default_multiple_rounds(); let node_id = 1; let launcher = builder.gen_launcher::(node_id); @@ -97,7 +97,7 @@ async fn test_network_storage_fail() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let builder = TestMetadata::default_multiple_rounds(); + let builder = TestDescription::default_multiple_rounds(); let node_id = 1; let launcher = builder.gen_launcher::(node_id); diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 4c4bbc6bc9..968fd7a444 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -1,5 +1,8 @@ use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::{ + block_types::TestMetadata, + node_types::{MemoryImpl, TestTypes}, +}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ predicates::event::{exact, quorum_proposal_send, quorum_proposal_validated}, @@ -78,7 +81,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - (), + TestMetadata, ViewNumber::new(node_id), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 610ed619a3..37d8fd8924 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -38,6 +38,7 @@ fn make_payload_commitment( #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_quorum_proposal() { + use hotshot_example_types::block_types::TestMetadata; use hotshot_types::data::null_block; async_compatibility_layer::logging::setup_logging(); @@ -98,7 +99,7 @@ async fn test_quorum_proposal_task_quorum_proposal() { SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - (), + TestMetadata, ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), @@ -119,6 +120,7 @@ async fn test_quorum_proposal_task_quorum_proposal() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_qc_timeout() { + use hotshot_example_types::block_types::TestMetadata; use hotshot_types::{data::null_block, simple_vote::TimeoutData}; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -158,7 +160,7 @@ async fn test_quorum_proposal_task_qc_timeout() { SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - (), + TestMetadata, ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), @@ -179,6 +181,7 @@ async fn test_quorum_proposal_task_qc_timeout() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_view_sync() { + use hotshot_example_types::block_types::TestMetadata; use hotshot_types::data::null_block; async_compatibility_layer::logging::setup_logging(); @@ -223,7 +226,7 @@ async fn test_quorum_proposal_task_view_sync() { SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - (), + TestMetadata, ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), @@ -244,6 +247,7 @@ async fn test_quorum_proposal_task_view_sync() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_propose_now() { + use hotshot_example_types::block_types::TestMetadata; use hotshot_testing::task_helpers::{build_cert, key_pair_for_id}; use hotshot_types::{ consensus::{CommitmentAndMetadata, ProposalDependencyData}, @@ -277,7 +281,7 @@ async fn test_quorum_proposal_task_propose_now() { commitment_and_metadata: CommitmentAndMetadata { commitment: payload_commitment, builder_commitment: builder_commitment.clone(), - metadata: (), + metadata: TestMetadata, fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), }, secondary_proposal_information: @@ -292,7 +296,7 @@ async fn test_quorum_proposal_task_propose_now() { commitment_and_metadata: CommitmentAndMetadata { commitment: payload_commitment, builder_commitment: builder_commitment.clone(), - metadata: (), + metadata: TestMetadata, fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), }, secondary_proposal_information: @@ -317,7 +321,7 @@ async fn test_quorum_proposal_task_propose_now() { commitment_and_metadata: CommitmentAndMetadata { commitment: payload_commitment, builder_commitment, - metadata: (), + metadata: TestMetadata, fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), }, secondary_proposal_information: diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index c95280b941..0656474c0c 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -1,27 +1,29 @@ -use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; -use hotshot_example_types::state_types::TestTypes; +use std::time::Duration; + +use hotshot_example_types::{ + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}, + state_types::TestTypes, +}; use hotshot_macros::cross_tests; -use hotshot_testing::completion_task::{ - CompletionTaskDescription, TimeBasedCompletionTaskDescription, +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + test_builder::TestDescription, }; -use hotshot_testing::test_builder::TestMetadata; -use std::time::Duration; -use hotshot_testing::block_builder::SimpleBuilderImplementation; cross_tests!( TestName: test_success, Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], Ignore: false, Metadata: { - TestMetadata { + TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { duration: Duration::from_secs(60), }, ), - ..TestMetadata::default() + ..TestDescription::default() } }, ); - diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index a00c8ce2bc..83a4be337a 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -1,11 +1,13 @@ -use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{ + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}, + state_types::TestTypes, +}; use hotshot_macros::cross_tests; -use hotshot_testing::spinning_task::ChangeNode; -use hotshot_testing::spinning_task::SpinningTaskDescription; -use hotshot_testing::spinning_task::UpDown; -use hotshot_testing::test_builder::TestMetadata; -use hotshot_testing::block_builder::SimpleBuilderImplementation; +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestDescription, +}; // Test that a good leader can succeed in the view directly after view sync cross_tests!( TestName: test_with_failures_2, @@ -13,7 +15,7 @@ cross_tests!( Types: [TestTypes], Ignore: false, Metadata: { - let mut metadata = TestMetadata::default_more_nodes(); + let mut metadata = TestDescription::default_more_nodes(); metadata.num_bootstrap_nodes = 10; metadata.num_nodes_with_stake = 12; metadata.da_staked_committee_size = 12; diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 8d6c88a1d5..6de0d932da 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -5,7 +5,7 @@ use hotshot::{ types::SystemContextHandle, }; use hotshot_example_types::{ - block_types::TestTransaction, + block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, }; use hotshot_macros::test_scripts; @@ -254,7 +254,7 @@ async fn test_upgrade_and_consensus_task() { SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, proposals[2].data.block_header.builder_commitment.clone(), - (), + TestMetadata, ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), @@ -444,7 +444,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[1].0[0].data.payload_commitment, proposals[1].data.block_header.builder_commitment.clone(), - (), + TestMetadata, ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), @@ -455,7 +455,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, proposals[2].data.block_header.builder_commitment.clone(), - (), + TestMetadata, ViewNumber::new(3), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), @@ -467,7 +467,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[3].0[0].data.payload_commitment, proposals[3].data.block_header.builder_commitment.clone(), - (), + TestMetadata, ViewNumber::new(4), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), @@ -479,7 +479,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[4].0[0].data.payload_commitment, proposals[4].data.block_header.builder_commitment.clone(), - (), + TestMetadata, ViewNumber::new(5), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), @@ -490,7 +490,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[5].0[0].data.payload_commitment, proposals[5].data.block_header.builder_commitment.clone(), - (), + TestMetadata, ViewNumber::new(6), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), @@ -502,7 +502,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[6].0[0].data.payload_commitment, proposals[6].data.block_header.builder_commitment.clone(), - (), + TestMetadata, ViewNumber::new(7), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 79aeb5840c..a95c4bf7f4 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use hotshot::types::SignatureKey; use hotshot_example_types::{ - block_types::{TestBlockPayload, TestTransaction}, + block_types::{TestBlockPayload, TestMetadata, TestTransaction}, node_types::TestTypes, }; use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; @@ -49,7 +49,7 @@ async fn test_vid_task() { .expect("Failed to sign block payload!"); let proposal: DAProposal = DAProposal { encoded_transactions: encoded_transactions.clone(), - metadata: (), + metadata: TestMetadata, view_number: ViewNumber::new(2), }; let message = Proposal { @@ -84,7 +84,7 @@ async fn test_vid_task() { input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); input.push(HotShotEvent::BlockRecv( encoded_transactions, - (), + TestMetadata, ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), )); @@ -106,7 +106,7 @@ async fn test_vid_task() { HotShotEvent::SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - (), + TestMetadata, ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 2ba6cbf8d6..9f2a250a41 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -10,7 +10,7 @@ async fn test_catchup() { completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::{TestMetadata, TimingData}, + test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -18,7 +18,7 @@ async fn test_catchup() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestMetadata::default(); + let mut metadata = TestDescription::default(); let catchup_node = vec![ChangeNode { idx: 19, updown: UpDown::Up, @@ -68,7 +68,7 @@ async fn test_catchup_cdn() { completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::{TestMetadata, TimingData}, + test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); @@ -77,7 +77,7 @@ async fn test_catchup_cdn() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestMetadata::default(); + let mut metadata = TestDescription::default(); let catchup_nodes = vec![ChangeNode { idx: 18, updown: UpDown::Up, @@ -122,7 +122,7 @@ async fn test_catchup_one_node() { completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::{TestMetadata, TimingData}, + test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -130,7 +130,7 @@ async fn test_catchup_one_node() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestMetadata::default(); + let mut metadata = TestDescription::default(); let catchup_nodes = vec![ChangeNode { idx: 18, updown: UpDown::Up, @@ -177,7 +177,7 @@ async fn test_catchup_in_view_sync() { completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::{TestMetadata, TimingData}, + test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -185,7 +185,7 @@ async fn test_catchup_in_view_sync() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestMetadata::default(); + let mut metadata = TestDescription::default(); let catchup_nodes = vec![ ChangeNode { idx: 18, @@ -239,7 +239,7 @@ async fn test_catchup_reload() { completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::{TestMetadata, TimingData}, + test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); @@ -248,7 +248,7 @@ async fn test_catchup_reload() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestMetadata::default(); + let mut metadata = TestDescription::default(); let catchup_node = vec![ChangeNode { idx: 19, updown: UpDown::Up, diff --git a/testing/tests/tests_2/push_cdn.rs b/testing/tests/tests_2/push_cdn.rs index b63bb15048..b708d91ff3 100644 --- a/testing/tests/tests_2/push_cdn.rs +++ b/testing/tests/tests_2/push_cdn.rs @@ -6,7 +6,7 @@ use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - test_builder::{TestMetadata, TimingData}, + test_builder::{TestDescription, TimingData}, }; use tracing::instrument; @@ -17,7 +17,7 @@ use tracing::instrument; async fn push_cdn_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -35,7 +35,7 @@ async fn push_cdn_network() { duration: Duration::from_secs(60), }, ), - ..TestMetadata::default() + ..TestDescription::default() }; metadata .gen_launcher::(0) diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_2/test_with_failures_one.rs index a77702dbc3..0071584586 100644 --- a/testing/tests/tests_2/test_with_failures_one.rs +++ b/testing/tests/tests_2/test_with_failures_one.rs @@ -1,11 +1,13 @@ -use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{ + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}, + state_types::TestTypes, +}; use hotshot_macros::cross_tests; -use hotshot_testing::spinning_task::ChangeNode; -use hotshot_testing::spinning_task::SpinningTaskDescription; -use hotshot_testing::spinning_task::UpDown; -use hotshot_testing::test_builder::TestMetadata; -use hotshot_testing::block_builder::SimpleBuilderImplementation; +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestDescription, +}; // Test one node leaving the network. cross_tests!( @@ -14,7 +16,7 @@ cross_tests!( Types: [TestTypes], Ignore: false, Metadata: { - let mut metadata = TestMetadata::default_more_nodes(); + let mut metadata = TestDescription::default_more_nodes(); metadata.num_bootstrap_nodes = 19; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index 461fce36a0..74c36f2b20 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -1,11 +1,13 @@ -use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{ + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}, + state_types::TestTypes, +}; use hotshot_macros::cross_tests; -use hotshot_testing::spinning_task::ChangeNode; -use hotshot_testing::spinning_task::SpinningTaskDescription; -use hotshot_testing::spinning_task::UpDown; -use hotshot_testing::test_builder::TestMetadata; -use hotshot_testing::block_builder::SimpleBuilderImplementation; +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestDescription, +}; // Test f/2 nodes leaving the network. cross_tests!( TestName: test_with_failures_half_f, @@ -13,7 +15,7 @@ cross_tests!( Types: [TestTypes], Ignore: false, Metadata: { - let mut metadata = TestMetadata::default_more_nodes(); + let mut metadata = TestDescription::default_more_nodes(); metadata.num_bootstrap_nodes = 17; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the @@ -45,4 +47,3 @@ cross_tests!( metadata } ); - diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 97ed8f70ce..a46a402e06 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -1,11 +1,13 @@ -use hotshot_example_types::node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{ + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}, + state_types::TestTypes, +}; use hotshot_macros::cross_tests; -use hotshot_testing::spinning_task::ChangeNode; -use hotshot_testing::spinning_task::SpinningTaskDescription; -use hotshot_testing::spinning_task::UpDown; -use hotshot_testing::test_builder::TestMetadata; -use hotshot_testing::block_builder::SimpleBuilderImplementation; +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestDescription, +}; // Test f nodes leaving the network. cross_tests!( TestName: test_with_failures_f, @@ -14,7 +16,7 @@ cross_tests!( Ignore: false, Metadata: { - let mut metadata = TestMetadata::default_more_nodes(); + let mut metadata = TestDescription::default_more_nodes(); metadata.overall_safety_properties.num_failed_views = 6; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 22; @@ -58,4 +60,3 @@ cross_tests!( metadata } ); - diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index 93ed12e866..9bf85792a1 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -6,7 +6,7 @@ use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::{TestMetadata, TimingData}, + test_builder::{TestDescription, TimingData}, }; use rand::Rng; use tracing::instrument; @@ -21,7 +21,7 @@ async fn test_combined_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestMetadata = TestMetadata { + let metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -39,7 +39,7 @@ async fn test_combined_network() { duration: Duration::from_secs(120), }, ), - ..TestMetadata::default_multiple_rounds() + ..TestDescription::default_multiple_rounds() }; metadata @@ -56,7 +56,7 @@ async fn test_combined_network() { async fn test_combined_network_cdn_crash() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestMetadata = TestMetadata { + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -75,7 +75,7 @@ async fn test_combined_network_cdn_crash() { duration: Duration::from_secs(120), }, ), - ..TestMetadata::default_multiple_rounds() + ..TestDescription::default_multiple_rounds() }; let mut all_nodes = vec![]; @@ -105,7 +105,7 @@ async fn test_combined_network_cdn_crash() { async fn test_combined_network_reup() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestMetadata = TestMetadata { + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -124,7 +124,7 @@ async fn test_combined_network_reup() { duration: Duration::from_secs(120), }, ), - ..TestMetadata::default_multiple_rounds() + ..TestDescription::default_multiple_rounds() }; let mut all_down = vec![]; @@ -158,7 +158,7 @@ async fn test_combined_network_reup() { async fn test_combined_network_half_dc() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestMetadata = TestMetadata { + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -177,7 +177,7 @@ async fn test_combined_network_half_dc() { duration: Duration::from_secs(120), }, ), - ..TestMetadata::default_multiple_rounds() + ..TestDescription::default_multiple_rounds() }; let mut half = vec![]; @@ -234,7 +234,7 @@ fn generate_random_node_changes( async fn test_stress_combined_network_fuzzy() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestMetadata { + let mut metadata = TestDescription { num_bootstrap_nodes: 10, num_nodes_with_stake: 20, start_nodes: 20, @@ -252,7 +252,7 @@ async fn test_stress_combined_network_fuzzy() { duration: Duration::from_secs(120), }, ), - ..TestMetadata::default_stress() + ..TestDescription::default_stress() }; metadata.spinning_properties = SpinningTaskDescription { diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 0fa4cd72ed..7c0f6de2a5 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -6,15 +6,13 @@ async fn test_timeout_web() { use std::time::Duration; - use hotshot_example_types::node_types::WebImpl; - - use hotshot_example_types::node_types::TestTypes; - use hotshot_testing::block_builder::SimpleBuilderImplementation; + use hotshot_example_types::node_types::{TestTypes, WebImpl}; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::{TestMetadata, TimingData}, + test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -23,7 +21,7 @@ async fn test_timeout_web() { ..Default::default() }; - let mut metadata = TestMetadata { + let mut metadata = TestDescription { num_nodes_with_stake: 10, start_nodes: 10, ..Default::default() @@ -68,18 +66,15 @@ async fn test_timeout_web() { async fn test_timeout_libp2p() { use std::time::Duration; - use hotshot_example_types::node_types::Libp2pImpl; - + use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::{TestMetadata, TimingData}, + test_builder::{TestDescription, TimingData}, }; - use hotshot_example_types::node_types::TestTypes; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { @@ -89,7 +84,7 @@ async fn test_timeout_libp2p() { ..Default::default() }; - let mut metadata = TestMetadata { + let mut metadata = TestDescription { num_nodes_with_stake: 10, start_nodes: 10, num_bootstrap_nodes: 10, diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index 308abdcaf6..c9203f1081 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -1,17 +1,14 @@ -use hotshot_testing::block_builder::SimpleBuilderImplementation; -use hotshot_testing::test_builder::TimingData; -use hotshot_types::traits::network::AsynchronousNetwork; -use hotshot_types::traits::network::ChaosNetwork; -use hotshot_types::traits::network::PartiallySynchronousNetwork; -use hotshot_types::traits::network::SynchronousNetwork; -use std::time::Duration; -use std::time::Instant; +use std::time::{Duration, Instant}; use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - test_builder::TestMetadata, + test_builder::{TestDescription, TimingData}, +}; +use hotshot_types::traits::network::{ + AsynchronousNetwork, ChaosNetwork, PartiallySynchronousNetwork, SynchronousNetwork, }; use tracing::instrument; @@ -21,7 +18,7 @@ use tracing::instrument; async fn libp2p_network_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -35,7 +32,7 @@ async fn libp2p_network_sync() { delay_high_ms: 30, delay_low_ms: 4, })), - ..TestMetadata::default_multiple_rounds() + ..TestDescription::default_multiple_rounds() }; metadata @@ -49,16 +46,17 @@ async fn libp2p_network_sync() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_sync() { + use std::time::Duration; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - test_builder::TestMetadata, + test_builder::TestDescription, }; - use std::time::Duration; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { @@ -69,7 +67,7 @@ async fn test_memory_network_sync() { delay_high_ms: 30, delay_low_ms: 4, })), - ..TestMetadata::default() + ..TestDescription::default() }; metadata .gen_launcher::(0) @@ -85,7 +83,7 @@ async fn test_memory_network_sync() { async fn libp2p_network_async() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, num_failed_views: 50, @@ -99,7 +97,7 @@ async fn libp2p_network_async() { timing_data: TimingData { timeout_ratio: (1, 1), next_view_timeout: 25000, - ..TestMetadata::default_multiple_rounds().timing_data + ..TestDescription::default_multiple_rounds().timing_data }, unreliable_network: Some(Box::new(AsynchronousNetwork { keep_numerator: 9, @@ -107,7 +105,7 @@ async fn libp2p_network_async() { delay_low_ms: 4, delay_high_ms: 30, })), - ..TestMetadata::default_multiple_rounds() + ..TestDescription::default_multiple_rounds() }; metadata @@ -122,17 +120,17 @@ async fn libp2p_network_async() { #[ignore] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_async() { + use std::time::Duration; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - test_builder::TestMetadata, + test_builder::TestDescription, }; - use std::time::Duration; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, num_failed_views: 5000, @@ -147,7 +145,7 @@ async fn test_memory_network_async() { timing_data: TimingData { timeout_ratio: (1, 1), next_view_timeout: 1000, - ..TestMetadata::default_multiple_rounds().timing_data + ..TestDescription::default_multiple_rounds().timing_data }, unreliable_network: Some(Box::new(AsynchronousNetwork { keep_numerator: 95, @@ -155,7 +153,7 @@ async fn test_memory_network_async() { delay_low_ms: 4, delay_high_ms: 30, })), - ..TestMetadata::default() + ..TestDescription::default() }; metadata .gen_launcher::(0) @@ -168,17 +166,17 @@ async fn test_memory_network_async() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_partially_sync() { + use std::time::Duration; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - test_builder::TestMetadata, + test_builder::TestDescription, }; - use std::time::Duration; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 2, ..Default::default() @@ -207,7 +205,7 @@ async fn test_memory_network_partially_sync() { gst: std::time::Duration::from_millis(1000), start: Instant::now(), })), - ..TestMetadata::default() + ..TestDescription::default() }; metadata .gen_launcher::(0) @@ -222,7 +220,7 @@ async fn test_memory_network_partially_sync() { async fn libp2p_network_partially_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 2, ..Default::default() @@ -246,7 +244,7 @@ async fn libp2p_network_partially_sync() { gst: std::time::Duration::from_millis(1000), start: Instant::now(), })), - ..TestMetadata::default_multiple_rounds() + ..TestDescription::default_multiple_rounds() }; metadata @@ -261,16 +259,17 @@ async fn libp2p_network_partially_sync() { #[ignore] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_memory_network_chaos() { + use std::time::Duration; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - test_builder::TestMetadata, + test_builder::TestDescription, }; - use std::time::Duration; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { @@ -285,7 +284,7 @@ async fn test_memory_network_chaos() { repeat_low: 1, repeat_high: 5, })), - ..TestMetadata::default() + ..TestDescription::default() }; metadata .gen_launcher::(0) @@ -301,7 +300,7 @@ async fn test_memory_network_chaos() { async fn libp2p_network_chaos() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -319,7 +318,7 @@ async fn libp2p_network_chaos() { repeat_low: 1, repeat_high: 5, })), - ..TestMetadata::default_multiple_rounds() + ..TestDescription::default_multiple_rounds() }; metadata diff --git a/testing/tests/tests_5/web_server.rs b/testing/tests/tests_5/web_server.rs index 2c551247c2..99ec3ff64b 100644 --- a/testing/tests/tests_5/web_server.rs +++ b/testing/tests/tests_5/web_server.rs @@ -6,7 +6,7 @@ use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - test_builder::{TestMetadata, TimingData}, + test_builder::{TestDescription, TimingData}, }; use tracing::instrument; @@ -17,7 +17,7 @@ use tracing::instrument; async fn web_server_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestMetadata { + let metadata = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -35,7 +35,7 @@ async fn web_server_network() { duration: Duration::from_secs(60), }, ), - ..TestMetadata::default() + ..TestDescription::default() }; metadata .gen_launcher::(0) diff --git a/types/src/data.rs b/types/src/data.rs index a55e9f2ac7..93dff8b203 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -740,13 +740,13 @@ pub mod null_block { [0_u8; 32], 0, ); - let (null_block, null_block_metadata) = + let (_null_block, null_block_metadata) = ::from_transactions([]).ok()?; match TYPES::BuilderSignatureKey::sign_fee( &priv_key, FEE_AMOUNT, - &null_block.builder_commitment(&null_block_metadata), + &null_block_metadata, &commitment(num_storage_nodes)?, ) { Ok(sig) => Some(BuilderFee { diff --git a/types/src/traits.rs b/types/src/traits.rs index a698d2c158..765b202402 100644 --- a/types/src/traits.rs +++ b/types/src/traits.rs @@ -11,5 +11,5 @@ pub mod stake_table; pub mod states; pub mod storage; -pub use block_contents::BlockPayload; +pub use block_contents::{BlockPayload, EncodeBytes}; pub use states::ValidatedState; diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 3e33635495..6f692e05f2 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -23,6 +23,12 @@ use crate::{ vid::{vid_scheme, VidCommitment, VidSchemeType}, }; +/// Trait for structures that need to be unambiguously encoded as bytes. +pub trait EncodeBytes { + /// Encode `&self` + fn encode(&self) -> Arc<[u8]>; +} + /// Abstraction over any type of transaction. Used by [`BlockPayload`]. pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash @@ -47,7 +53,15 @@ pub trait BlockPayload: type Transaction: Transaction; /// Data created during block building which feeds into the block header - type Metadata: Clone + Debug + DeserializeOwned + Eq + Hash + Send + Sync + Serialize; + type Metadata: Clone + + Debug + + DeserializeOwned + + Eq + + Hash + + Send + + Sync + + Serialize + + EncodeBytes; /// Build a payload and associated metadata with the transactions. /// diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 379951d655..3c1ac230a6 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -10,6 +10,7 @@ use jf_primitives::{errors::PrimitivesError, vid::VidScheme}; use serde::{Deserialize, Serialize}; use tagged_base64::TaggedBase64; +use super::EncodeBytes; use crate::{utils::BuilderCommitment, vid::VidSchemeType}; /// Type representing stake table entries in a `StakeTable` @@ -188,6 +189,34 @@ pub trait BuilderSignatureKey: /// validate the message with the builder's public key fn validate_builder_signature(&self, signature: &Self::BuilderSignature, data: &[u8]) -> bool; + /// validate signature over fee information with the builder's public key + fn validate_fee_signature( + &self, + signature: &Self::BuilderSignature, + fee_amount: u64, + metadata: &Metadata, + vid_commitment: &::Commit, + ) -> bool { + self.validate_builder_signature( + signature, + &aggregate_fee_data(fee_amount, metadata, vid_commitment), + ) + } + + /// validate signature over block information with the builder's public key + fn validate_block_info_signature( + &self, + signature: &Self::BuilderSignature, + block_size: u64, + fee_amount: u64, + payload_commitment: &BuilderCommitment, + ) -> bool { + self.validate_builder_signature( + signature, + &aggregate_block_info_data(block_size, fee_amount, payload_commitment), + ) + } + /// sign the message with the builder's private key /// # Errors /// If unable to sign the data with the key @@ -199,17 +228,16 @@ pub trait BuilderSignatureKey: /// sign fee offer for proposed payload /// # Errors /// If unable to sign the data with the key - fn sign_fee( + fn sign_fee( private_key: &Self::BuilderPrivateKey, fee_amount: u64, - payload_commitment: &BuilderCommitment, + metadata: &Metadata, vid_commitment: &::Commit, ) -> Result { - let mut fee_info: Vec = Vec::new(); - fee_info.extend_from_slice(fee_amount.to_be_bytes().as_ref()); - fee_info.extend_from_slice(payload_commitment.as_ref()); - fee_info.extend_from_slice(vid_commitment.as_ref()); - Self::sign_builder_message(private_key, &fee_info) + Self::sign_builder_message( + private_key, + &aggregate_fee_data(fee_amount, metadata, vid_commitment), + ) } /// sign information about offered block @@ -221,13 +249,38 @@ pub trait BuilderSignatureKey: fee_amount: u64, payload_commitment: &BuilderCommitment, ) -> Result { - let mut block_info: Vec = Vec::new(); - block_info.extend_from_slice(block_size.to_be_bytes().as_ref()); - block_info.extend_from_slice(fee_amount.to_be_bytes().as_ref()); - block_info.extend_from_slice(payload_commitment.as_ref()); - Self::sign_builder_message(private_key, &block_info) + Self::sign_builder_message( + private_key, + &aggregate_block_info_data(block_size, fee_amount, payload_commitment), + ) } /// Generate a new key pair fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::BuilderPrivateKey); } + +/// Aggregate all inputs used for signature over fee data +fn aggregate_fee_data( + fee_amount: u64, + metadata: &Metadata, + vid_commitment: &::Commit, +) -> Vec { + let mut fee_info = Vec::new(); + fee_info.extend_from_slice(fee_amount.to_be_bytes().as_ref()); + fee_info.extend_from_slice(metadata.encode().as_ref()); + fee_info.extend_from_slice(vid_commitment.as_ref()); + fee_info +} + +/// Aggregate all inputs used for signature over block info +fn aggregate_block_info_data( + block_size: u64, + fee_amount: u64, + payload_commitment: &BuilderCommitment, +) -> Vec { + let mut block_info = Vec::new(); + block_info.extend_from_slice(block_size.to_be_bytes().as_ref()); + block_info.extend_from_slice(fee_amount.to_be_bytes().as_ref()); + block_info.extend_from_slice(payload_commitment.as_ref()); + block_info +} From 20650cc38d0f08476cd6ebd7765572dce74a9dc6 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 23 Apr 2024 18:48:28 -0400 Subject: [PATCH 0978/1393] Move instance_state out of lock (#3022) * Move instance_state out of lock * fix metrics name --- hotshot/src/lib.rs | 35 +++++++++++++++++--- hotshot/src/tasks/task_state.rs | 3 ++ task-impls/src/consensus/mod.rs | 3 ++ task-impls/src/consensus/proposal_helpers.rs | 15 +++++++-- task-impls/src/quorum_proposal.rs | 20 +++++++++-- task-impls/src/quorum_vote.rs | 5 ++- types/src/consensus.rs | 3 -- 7 files changed, 71 insertions(+), 13 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 06240bfdcb..a2f79ee80c 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -113,7 +113,6 @@ pub struct Memberships { } /// Holds the state needed to participate in `HotShot` consensus -#[derive(Clone)] pub struct SystemContext> { /// The public key of this node public_key: TYPES::SignatureKey, @@ -131,11 +130,14 @@ pub struct SystemContext> { pub memberships: Arc>, /// the metrics that the implementor is using. - _metrics: Arc, + metrics: Arc, /// The hotstuff implementation consensus: Arc>>, + /// Immutable instance state + instance_state: Arc, + /// The network version version: Arc>, @@ -159,6 +161,26 @@ pub struct SystemContext> { /// Reference to the internal storage for consensus datum. pub storage: Arc>, } +impl> Clone for SystemContext { + fn clone(&self) -> Self { + Self { + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), + config: self.config.clone(), + networks: self.networks.clone(), + memberships: self.memberships.clone(), + metrics: self.metrics.clone(), + consensus: self.consensus.clone(), + instance_state: self.instance_state.clone(), + version: self.version.clone(), + start_view: self.start_view, + output_event_stream: self.output_event_stream.clone(), + internal_event_stream: self.internal_event_stream.clone(), + id: self.id, + storage: self.storage.clone(), + } + } +} impl> SystemContext { /// Creates a new [`Arc`] with the given configuration options. @@ -258,7 +280,6 @@ impl> SystemContext { } let consensus = Consensus { - instance_state, validated_state_map, vid_shares: BTreeMap::new(), cur_view: anchored_leaf.get_view_number(), @@ -282,6 +303,7 @@ impl> SystemContext { let inner: Arc> = Arc::new(SystemContext { id: nonce, consensus, + instance_state: Arc::new(instance_state), public_key, private_key, config, @@ -289,7 +311,7 @@ impl> SystemContext { start_view: initializer.start_view, networks: Arc::new(networks), memberships: Arc::new(memberships), - _metrics: consensus_metrics.clone(), + metrics: consensus_metrics.clone(), internal_event_stream: (internal_tx, internal_rx.deactivate()), output_event_stream: (external_tx, external_rx), storage: Arc::new(RwLock::new(storage)), @@ -384,6 +406,11 @@ impl> SystemContext { self.consensus.clone() } + /// Returns a copy of the instance state + pub fn get_instance_state(&self) -> Arc { + self.instance_state.clone() + } + /// Returns a copy of the last decided leaf /// # Panics /// Panics if internal leaf for consensus is inconsistent diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 4af385d152..b1261a2ea4 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -185,6 +185,7 @@ impl> CreateTaskState ConsensusTaskState { consensus, + instance_state: handle.hotshot.get_instance_state(), timeout: handle.hotshot.config.next_view_timeout, round_start_delay: handle.hotshot.config.round_start_delay, cur_view: handle.get_cur_view().await, @@ -224,6 +225,7 @@ impl> CreateTaskState public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), consensus, + instance_state: handle.hotshot.get_instance_state(), latest_voted_view: handle.get_cur_view().await, vote_dependencies: HashMap::new(), quorum_network: handle.hotshot.networks.quorum_network.clone(), @@ -252,6 +254,7 @@ impl> CreateTaskState committee_network: handle.hotshot.networks.da_network.clone(), output_event_stream: handle.hotshot.output_event_stream.0.clone(), consensus, + instance_state: handle.hotshot.get_instance_state(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), cur_view: handle.get_cur_view().await, diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 268a0b7fbd..d1223453de 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -77,6 +77,8 @@ pub struct ConsensusTaskState< pub private_key: ::PrivateKey, /// Reference to consensus. The replica will require a write lock on this. pub consensus: Arc>>, + /// Immutable instance state + pub instance_state: Arc, /// View timeout from config. pub timeout: u64, /// Round start delay from config, in milliseconds. @@ -357,6 +359,7 @@ impl, A: ConsensusApi + self.decided_upgrade_cert.clone(), &mut self.payload_commitment_and_metadata, &mut self.proposal_cert, + self.instance_state.clone(), ) .await?; diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index 51a9e2de84..4cb393ea3f 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -56,10 +56,11 @@ pub async fn validate_proposal_safety_and_liveness( sender: TYPES::SignatureKey, event_sender: Sender>, storage: Arc>>, + instance_state: Arc, ) -> Result<()> { let (validated_state, state_delta) = parent_state .validate_and_apply_header( - &consensus.read().await.instance_state, + instance_state.as_ref(), &parent_leaf, &proposal.data.block_header.clone(), ) @@ -199,10 +200,11 @@ pub async fn create_and_send_proposal( upgrade_cert: Option>, proposal_cert: Option>, round_start_delay: u64, + instance_state: Arc, ) { let block_header = TYPES::BlockHeader::new( state.as_ref(), - &consensus.read().await.instance_state, + instance_state.as_ref(), &parent_leaf, commitment_and_metadata.commitment, commitment_and_metadata.builder_commitment, @@ -394,6 +396,7 @@ async fn publish_proposal_from_upgrade_cert( consensus: Arc>>, upgrade_cert: UpgradeCertificate, delay: u64, + instance_state: Arc, ) -> Result> { let (parent_leaf, state) = get_parent_leaf_and_state( cur_view, @@ -433,6 +436,7 @@ async fn publish_proposal_from_upgrade_cert( Some(upgrade_cert), None, delay, + instance_state.clone(), ) .await; })) @@ -454,6 +458,7 @@ async fn publish_proposal_from_commitment_and_metadata( decided_upgrade_cert: Option>, commitment_and_metadata: &mut Option>, proposal_cert: &mut Option>, + instance_state: Arc, ) -> Result> { let (parent_leaf, state) = get_parent_leaf_and_state( cur_view, @@ -507,6 +512,7 @@ async fn publish_proposal_from_commitment_and_metadata( proposal_upgrade_certificate, proposal_certificate, delay, + instance_state.clone(), ) .await; }); @@ -533,6 +539,7 @@ pub async fn publish_proposal_if_able( decided_upgrade_cert: Option>, commitment_and_metadata: &mut Option>, proposal_cert: &mut Option>, + instance_state: Arc, ) -> Result> { if let Some(upgrade_cert) = decided_upgrade_cert { publish_proposal_from_upgrade_cert( @@ -545,6 +552,7 @@ pub async fn publish_proposal_if_able( consensus, upgrade_cert, delay, + instance_state, ) .await } else { @@ -561,6 +569,7 @@ pub async fn publish_proposal_if_able( decided_upgrade_cert, commitment_and_metadata, proposal_cert, + instance_state, ) .await } @@ -748,6 +757,7 @@ pub async fn handle_quorum_proposal_recv< task_state.decided_upgrade_cert.clone(), &mut task_state.payload_commitment_and_metadata, &mut task_state.proposal_cert, + task_state.instance_state.clone(), ) .await?; @@ -785,6 +795,7 @@ pub async fn handle_quorum_proposal_recv< sender, task_state.output_event_stream.clone(), task_state.storage.clone(), + task_state.instance_state.clone(), ) .map(AnyhowTracing::err_as_debug), )); diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 2802ada50a..daae47daff 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -73,6 +73,9 @@ struct ProposalDependencyHandle { /// Reference to consensus. The replica will require a write lock on this. consensus: Arc>>, + /// Immutable instance state + instance_state: Arc, + /// Output events to application #[allow(dead_code)] output_event_stream: async_broadcast::Sender>, @@ -110,6 +113,7 @@ impl ProposalDependencyHandle { view: TYPES::Time, event_stream: &Sender>>, commit_and_metadata: CommitmentAndMetadata, + instance_state: Arc, ) -> bool { if self.quorum_membership.get_leader(view) != self.public_key { // This is expected for view 1, so skipping the logging. @@ -181,7 +185,7 @@ impl ProposalDependencyHandle { // Special case: if we have a decided upgrade certificate AND it does not apply a version to the current view, we MUST propose with a null block. let block_header = TYPES::BlockHeader::new( state, - &consensus.instance_state, + instance_state.as_ref(), &parent_leaf, commit_and_metadata.commitment, commit_and_metadata.builder_commitment, @@ -306,8 +310,13 @@ impl HandleDepOutput for ProposalDependencyHandle { return; } - self.publish_proposal_if_able(self.view_number, &self.sender, commit_and_metadata.unwrap()) - .await; + self.publish_proposal_if_able( + self.view_number, + &self.sender, + commit_and_metadata.unwrap(), + self.instance_state.clone(), + ) + .await; } } @@ -331,6 +340,9 @@ pub struct QuorumProposalTaskState /// Reference to consensus. The replica will require a write lock on this. pub consensus: Arc>>, + /// Immutable instance state + pub instance_state: Arc, + /// Membership for Timeout votes/certs pub timeout_membership: Arc, @@ -579,6 +591,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState> { /// Reference to consensus. The replica will require a write lock on this. pub consensus: Arc>>, + /// Immutable instance state + pub instance_state: Arc, + /// Latest view number that has been voted for. pub latest_voted_view: TYPES::Time, @@ -465,7 +468,7 @@ impl> QuorumVoteTaskState = BTreeMap< /// This will contain the state of all rounds. #[derive(custom_debug::Debug)] pub struct Consensus { - /// Immutable instance-level state. - pub instance_state: TYPES::InstanceState, - /// The validated states that are currently loaded in memory. pub validated_state_map: BTreeMap>, From 9a3ccb3ae5ca30f349e844e271603e1eaf48cfb1 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 24 Apr 2024 21:37:34 +0800 Subject: [PATCH 0979/1393] Remove consensusApi from consensus task (#3025) --- hotshot/src/tasks/mod.rs | 8 ++------ hotshot/src/tasks/task_state.rs | 7 ++----- task-impls/src/consensus/mod.rs | 18 +++--------------- task-impls/src/consensus/proposal_helpers.rs | 9 ++------- testing/src/predicates/upgrade.rs | 4 +--- testing/tests/tests_1/consensus_task.rs | 8 -------- testing/tests/tests_1/proposal_ordering.rs | 3 +-- testing/tests/tests_1/upgrade_task.rs | 3 --- 8 files changed, 11 insertions(+), 49 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 77db227891..64a293b3c7 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -141,12 +141,8 @@ pub async fn add_network_event_task< } /// Setup polls for the given `consensus_state` -pub async fn inject_consensus_polls< - TYPES: NodeType, - I: NodeImplementation, - API: ConsensusApi, ->( - consensus_state: &ConsensusTaskState, +pub async fn inject_consensus_polls>( + consensus_state: &ConsensusTaskState, ) { // Poll (forever) for the latest quorum proposal consensus_state diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index b1261a2ea4..7f0cd06464 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -176,11 +176,9 @@ impl, Ver: StaticVersionType> #[async_trait] impl> CreateTaskState - for ConsensusTaskState> + for ConsensusTaskState { - async fn create_from( - handle: &SystemContextHandle, - ) -> ConsensusTaskState> { + async fn create_from(handle: &SystemContextHandle) -> ConsensusTaskState { let consensus = handle.hotshot.get_consensus(); ConsensusTaskState { @@ -190,7 +188,6 @@ impl> CreateTaskState round_start_delay: handle.hotshot.config.round_start_delay, cur_view: handle.get_cur_view().await, payload_commitment_and_metadata: None, - api: handle.clone(), vote_collector: None.into(), timeout_vote_collector: None.into(), timeout_task: None, diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index d1223453de..5a7850bfe3 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -21,7 +21,6 @@ use hotshot_types::{ simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, traits::{ block_contents::BlockHeader, - consensus_api::ConsensusApi, election::Membership, network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -66,11 +65,7 @@ type VoteCollectorOption = Option, - A: ConsensusApi + 'static, -> { +pub struct ConsensusTaskState> { /// Our public key pub public_key: TYPES::SignatureKey, /// Our Private Key @@ -104,9 +99,6 @@ pub struct ConsensusTaskState< /// Membership for DA committee Votes/certs pub committee_membership: Arc, - /// Consensus api - pub api: A, - /// Current Vote collection task, with it's view. pub vote_collector: RwLock, QuorumCertificate>>, @@ -154,9 +146,7 @@ pub struct ConsensusTaskState< pub storage: Arc>, } -impl, A: ConsensusApi + 'static> - ConsensusTaskState -{ +impl> ConsensusTaskState { /// Cancel all tasks the consensus tasks has spawned before the given view async fn cancel_tasks(&mut self, view: TYPES::Time) { let keep = self.spawned_tasks.split_off(&view); @@ -1007,9 +997,7 @@ impl, A: ConsensusApi + } } -impl, A: ConsensusApi + 'static> TaskState - for ConsensusTaskState -{ +impl> TaskState for ConsensusTaskState { type Event = Arc>; type Output = (); fn filter(&self, event: &Arc>) -> bool { diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index 4cb393ea3f..be43f49292 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -17,7 +17,6 @@ use hotshot_types::{ simple_certificate::UpgradeCertificate, traits::{ block_contents::BlockHeader, - consensus_api::ConsensusApi, election::Membership, network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{NodeImplementation, NodeType}, @@ -580,15 +579,11 @@ pub async fn publish_proposal_if_able( /// /// Returns the proposal that should be used to set the `cur_proposal` for other tasks. #[allow(clippy::too_many_lines)] -pub async fn handle_quorum_proposal_recv< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi, ->( +pub async fn handle_quorum_proposal_recv>( proposal: &Proposal>, sender: &TYPES::SignatureKey, event_stream: Sender>>, - task_state: &mut ConsensusTaskState, + task_state: &mut ConsensusTaskState, ) -> Result>> { let sender = sender.clone(); debug!( diff --git a/testing/src/predicates/upgrade.rs b/testing/src/predicates/upgrade.rs index 0b74342fd0..b5f33ec56c 100644 --- a/testing/src/predicates/upgrade.rs +++ b/testing/src/predicates/upgrade.rs @@ -1,15 +1,13 @@ use std::sync::Arc; use async_trait::async_trait; -use hotshot::types::SystemContextHandle; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::consensus::ConsensusTaskState; use hotshot_types::simple_certificate::UpgradeCertificate; use crate::predicates::{Predicate, PredicateResult}; -type ConsensusTaskTestState = - ConsensusTaskState>; +type ConsensusTaskTestState = ConsensusTaskState; type UpgradeCertCallback = Arc>>) -> bool + Send + Sync>; diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 82f372b2bf..6d8a1f8b02 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -1,6 +1,5 @@ use hotshot::{ tasks::{inject_consensus_polls, task_state::CreateTaskState}, - types::SystemContextHandle, }; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; @@ -102,7 +101,6 @@ async fn test_consensus_task() { let consensus_state = ConsensusTaskState::< TestTypes, MemoryImpl, - SystemContextHandle, >::create_from(&handle) .await; @@ -163,7 +161,6 @@ async fn test_consensus_vote() { let consensus_state = ConsensusTaskState::< TestTypes, MemoryImpl, - SystemContextHandle, >::create_from(&handle) .await; @@ -233,7 +230,6 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let consensus_state = ConsensusTaskState::< TestTypes, MemoryImpl, - SystemContextHandle, >::create_from(&handle) .await; @@ -386,7 +382,6 @@ async fn test_view_sync_finalize_propose() { let consensus_state = ConsensusTaskState::< TestTypes, MemoryImpl, - SystemContextHandle, >::create_from(&handle) .await; @@ -486,7 +481,6 @@ async fn test_view_sync_finalize_vote() { let consensus_state = ConsensusTaskState::< TestTypes, MemoryImpl, - SystemContextHandle, >::create_from(&handle) .await; @@ -596,7 +590,6 @@ async fn test_view_sync_finalize_vote_fail_view_number() { let consensus_state = ConsensusTaskState::< TestTypes, MemoryImpl, - SystemContextHandle, >::create_from(&handle) .await; @@ -651,7 +644,6 @@ async fn test_vid_disperse_storage_failure() { let consensus_state = ConsensusTaskState::< TestTypes, MemoryImpl, - SystemContextHandle, >::create_from(&handle) .await; diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 968fd7a444..32474d7595 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -1,4 +1,4 @@ -use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; +use hotshot::{tasks::task_state::CreateTaskState}; use hotshot_example_types::{ block_types::TestMetadata, node_types::{MemoryImpl, TestTypes}, @@ -106,7 +106,6 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let consensus_state = ConsensusTaskState::< TestTypes, MemoryImpl, - SystemContextHandle, >::create_from(&handle) .await; diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 6de0d932da..fcb242edc8 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -151,7 +151,6 @@ async fn test_consensus_task_upgrade() { let consensus_state = ConsensusTaskState::< TestTypes, MemoryImpl, - SystemContextHandle, >::create_from(&handle) .await; @@ -225,7 +224,6 @@ async fn test_upgrade_and_consensus_task() { let consensus_state = ConsensusTaskState::< TestTypes, MemoryImpl, - SystemContextHandle, >::create_from(&handle) .await; let mut upgrade_state = UpgradeTaskState::< @@ -417,7 +415,6 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { let consensus_state = ConsensusTaskState::< TestTypes, MemoryImpl, - SystemContextHandle, >::create_from(&handle) .await; let mut upgrade_state = UpgradeTaskState::< From ba352f96b06e7d1ebc53e51c81a7fb544ea6f8f5 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 24 Apr 2024 10:31:30 -0400 Subject: [PATCH 0980/1393] Miscellaneous fixes (#3014) --- task-impls/src/consensus/mod.rs | 9 ++++++++- task-impls/src/consensus/proposal_helpers.rs | 10 ++++++++-- task-impls/src/quorum_proposal.rs | 3 ++- task-impls/src/transactions.rs | 4 ++-- testing/tests/tests_1/quorum_proposal_task.rs | 3 +++ types/src/consensus.rs | 2 ++ 6 files changed, 25 insertions(+), 6 deletions(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 5a7850bfe3..4e05145dbc 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -826,6 +826,12 @@ impl> ConsensusTaskState } } + if let Some(commitment_and_metadata) = &self.payload_commitment_and_metadata { + if commitment_and_metadata.block_view < old_view_number { + self.payload_commitment_and_metadata = None; + } + } + // update the view in state to the one in the message // Publish a view change event to the application // Returns if the view does not need updating. @@ -842,7 +848,7 @@ impl> ConsensusTaskState ) .await { - warn!("Failed to update view; error = {e:?}"); + tracing::trace!("Failed to update view; error = {e:?}"); return; } @@ -930,6 +936,7 @@ impl> ConsensusTaskState builder_commitment: builder_commitment.clone(), metadata: metadata.clone(), fee: fee.clone(), + block_view: view, }); if self.quorum_membership.get_leader(view) == self.public_key && self.consensus.read().await.high_qc.get_view_number() + 1 == view diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index be43f49292..f6d03c6cc8 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -281,7 +281,7 @@ pub fn validate_proposal_view_and_certs( if proposal.data.justify_qc.get_view_number() != view - 1 { let received_proposal_cert = proposal.data.proposal_certificate.clone().context(format!( -"Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", + "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", *view ))?; @@ -429,6 +429,7 @@ async fn publish_proposal_from_upgrade_cert( builder_commitment, metadata, fee: null_block_fee, + block_view: view, }, parent_leaf, state, @@ -495,9 +496,14 @@ async fn publish_proposal_from_commitment_and_metadata( // FIXME - This is not great, and will be fixed later. // If it's > July, 2024 and this is still here, something has gone horribly wrong. let cnm = commitment_and_metadata - .clone() + .take() .context("Cannot propose because we don't have the VID payload commitment and metadata")?; + ensure!( + cnm.block_view == view, + "Cannot propose because our VID payload commitment and metadata is for an older view." + ); + let create_and_send_proposal_handle = async_spawn(async move { create_and_send_proposal( public_key, diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index daae47daff..7551d7772a 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -262,7 +262,7 @@ impl HandleDepOutput for ProposalDependencyHandle { payload_commitment, builder_commitment, metadata, - _view, + view, fee, ) => { debug!("Got commit and meta {:?}", payload_commitment); @@ -271,6 +271,7 @@ impl HandleDepOutput for ProposalDependencyHandle { builder_commitment: builder_commitment.clone(), metadata: metadata.clone(), fee: fee.clone(), + block_view: *view, }); } HotShotEvent::QCFormed(cert) => match cert { diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4e69911ba4..edb77b238c 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -87,7 +87,7 @@ impl< > TransactionTaskState { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction task", level = "error")] pub async fn handle( &mut self, event: Arc>, @@ -238,7 +238,7 @@ impl< consensus.get_decided_leaf().get_payload_commitment() } - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "wait_for_block", level = "error")] async fn wait_for_block(&self) -> Option> { let task_start_time = Instant::now(); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 37d8fd8924..619e427c4b 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -283,6 +283,7 @@ async fn test_quorum_proposal_task_propose_now() { builder_commitment: builder_commitment.clone(), metadata: TestMetadata, fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + block_view: ViewNumber::new(2), }, secondary_proposal_information: hotshot_types::consensus::SecondaryProposalInformation::QuorumProposalAndCertificate( @@ -298,6 +299,7 @@ async fn test_quorum_proposal_task_propose_now() { builder_commitment: builder_commitment.clone(), metadata: TestMetadata, fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + block_view: ViewNumber::new(2), }, secondary_proposal_information: hotshot_types::consensus::SecondaryProposalInformation::Timeout(build_cert::< @@ -323,6 +325,7 @@ async fn test_quorum_proposal_task_propose_now() { builder_commitment, metadata: TestMetadata, fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + block_view: ViewNumber::new(2), }, secondary_proposal_information: hotshot_types::consensus::SecondaryProposalInformation::ViewSync(build_cert::< diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 112ca40cf7..899d8b994a 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -407,6 +407,8 @@ pub struct CommitmentAndMetadata { pub metadata: ::Metadata, /// Builder fee data pub fee: BuilderFee, + /// View number this block is for + pub block_view: TYPES::Time, } /// Helper type to hold the optional secondary information required to propose. From f0445a9957058400bc65b77edcca1ab73cf5c091 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 24 Apr 2024 15:56:42 -0400 Subject: [PATCH 0981/1393] Add instrument tracing to the network task (#3031) --- task-impls/src/network.rs | 2 ++ types/Cargo.toml | 1 + types/src/message.rs | 14 ++++++++++++-- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 5509c0e713..52f6b35e97 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -101,11 +101,13 @@ impl TaskState for NetworkMessageTaskState { } impl NetworkMessageTaskState { + #[instrument(skip_all, name = "Network message task", level = "trace")] /// Handle the message. pub async fn handle_messages(&mut self, messages: Vec>) { // We will send only one event for a vector of transactions. let mut transactions = Vec::new(); for message in messages { + tracing::trace!("Received message from network:\n\n{message:?}"); let sender = message.sender; match message.kind { MessageKind::Consensus(consensus_message) => { diff --git a/types/Cargo.toml b/types/Cargo.toml index c15ab430e4..f5b6f7429d 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -28,6 +28,7 @@ either = { workspace = true } espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } futures = { workspace = true } +cdn-proto = { workspace = true } generic-array = { workspace = true } diff --git a/types/src/message.rs b/types/src/message.rs index 2a061366ae..42d2e470f1 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -3,9 +3,10 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. -use std::{fmt::Debug, marker::PhantomData}; +use std::{fmt, fmt::Debug, marker::PhantomData}; use anyhow::{ensure, Result}; +use cdn_proto::mnemonic; use committable::Committable; use derivative::Derivative; use serde::{de::DeserializeOwned, Deserialize, Serialize}; @@ -30,7 +31,7 @@ use crate::{ }; /// Incoming message -#[derive(Serialize, Deserialize, Clone, Debug, Derivative, PartialEq, Eq, Hash)] +#[derive(Serialize, Deserialize, Clone, Derivative, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] pub struct Message { /// The sender of this message @@ -40,6 +41,15 @@ pub struct Message { pub kind: MessageKind, } +impl fmt::Debug for Message { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Message") + .field("sender", &mnemonic(&self.sender)) + .field("kind", &self.kind) + .finish() + } +} + impl NetworkMsg for Message {} impl ViewMessage for Message { From 68e5a3e422bb416a9363ab4e0e65a26627943ac3 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 25 Apr 2024 10:27:25 -0400 Subject: [PATCH 0982/1393] Add lint for .clone() on references (#3007) Co-authored-by: Artemii Gerasimovich --- hotshot-stake-table/src/mt_based.rs | 4 +- hotshot/src/lib.rs | 102 ++++++++++-------- hotshot/src/tasks/mod.rs | 24 ++--- hotshot/src/tasks/task_state.rs | 35 +++--- .../src/traits/networking/combined_network.rs | 4 +- .../src/traits/networking/libp2p_network.rs | 30 +++--- .../src/traits/networking/memory_network.rs | 4 +- .../src/traits/networking/push_cdn_network.rs | 2 +- .../traits/networking/web_server_network.rs | 22 ++-- hotshot/src/types/handle.rs | 2 +- libp2p-networking/tests/common/mod.rs | 8 +- libp2p-networking/tests/counter.rs | 2 +- task-impls/src/consensus/mod.rs | 22 ++-- task-impls/src/consensus/proposal_helpers.rs | 40 +++---- task-impls/src/da.rs | 8 +- task-impls/src/harness.rs | 9 +- task-impls/src/network.rs | 8 +- task-impls/src/quorum_proposal.rs | 38 +++---- task-impls/src/quorum_vote.rs | 8 +- task-impls/src/request.rs | 4 +- task-impls/src/response.rs | 3 +- task-impls/src/upgrade.rs | 4 +- task-impls/src/vid.rs | 4 +- task-impls/src/view_sync.rs | 30 ++++-- task-impls/src/vote_collection.rs | 4 +- task/src/task.rs | 6 +- types/src/utils.rs | 4 +- 27 files changed, 230 insertions(+), 201 deletions(-) diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index 15897e0f8d..f05a1363f0 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -196,8 +196,8 @@ impl StakeTable { /// Update the stake table when the epoch number advances, should be manually called. pub fn advance(&mut self) { - self.last_epoch_start = self.epoch_start.clone(); - self.epoch_start = self.head.clone(); + self.last_epoch_start = Arc::clone(&self.epoch_start); + self.epoch_start = Arc::clone(&self.head); } /// Set the stake withheld by `key` to be `value`. diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index a2f79ee80c..34799857d5 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -167,17 +167,17 @@ impl> Clone for SystemContext> SystemContext { View { view_inner: ViewInner::Leaf { leaf: anchored_leaf.commit(), - state: validated_state.clone(), + state: Arc::clone(&validated_state), delta: initializer.state_delta.clone(), }, }, @@ -252,7 +252,7 @@ impl> SystemContext { event: EventType::Decide { leaf_chain: Arc::new(vec![LeafInfo::new( anchored_leaf.clone(), - validated_state.clone(), + Arc::clone(&validated_state), state_delta.cloned(), None, )]), @@ -291,7 +291,7 @@ impl> SystemContext { // https://github.com/EspressoSystems/HotShot/issues/560 locked_view: anchored_leaf.get_view_number(), high_qc: initializer.high_qc, - metrics: consensus_metrics.clone(), + metrics: Arc::clone(&consensus_metrics), }; let consensus = Arc::new(RwLock::new(consensus)); let version = Arc::new(RwLock::new(BASE_VERSION)); @@ -311,7 +311,7 @@ impl> SystemContext { start_view: initializer.start_view, networks: Arc::new(networks), memberships: Arc::new(memberships), - metrics: consensus_metrics.clone(), + metrics: Arc::clone(&consensus_metrics), internal_event_stream: (internal_tx, internal_rx.deactivate()), output_event_stream: (external_tx, external_rx), storage: Arc::new(RwLock::new(storage)), @@ -403,12 +403,12 @@ impl> SystemContext { /// Returns a copy of the consensus struct #[must_use] pub fn get_consensus(&self) -> Arc>> { - self.consensus.clone() + Arc::clone(&self.consensus) } /// Returns a copy of the instance state pub fn get_instance_state(&self) -> Arc { - self.instance_state.clone() + Arc::clone(&self.instance_state) } /// Returns a copy of the last decided leaf @@ -435,7 +435,7 @@ impl> SystemContext { /// # Panics /// Panics if internal state for consensus is inconsistent pub async fn get_decided_state(&self) -> Arc { - self.consensus.read().await.get_decided_state().clone() + Arc::clone(&self.consensus.read().await.get_decided_state()) } /// Get the validated state from a given `view`. @@ -493,7 +493,7 @@ impl> SystemContext { storage, ) .await?; - let handle = hotshot.clone().run_tasks().await; + let handle = Arc::clone(&hotshot).run_tasks().await; let (tx, rx) = hotshot.internal_event_stream.clone(); Ok((handle, tx, rx.activate())) @@ -523,8 +523,8 @@ impl> SystemContext { let output_event_stream = self.output_event_stream.clone(); let internal_event_stream = self.internal_event_stream.clone(); - let quorum_network = self.networks.quorum_network.clone(); - let da_network = self.networks.da_network.clone(); + let quorum_network = Arc::clone(&self.networks.quorum_network); + let da_network = Arc::clone(&self.networks.da_network); let quorum_membership = self.memberships.quorum_membership.clone(); let da_membership = self.memberships.da_membership.clone(); let vid_membership = self.memberships.vid_membership.clone(); @@ -533,26 +533,36 @@ impl> SystemContext { let (event_tx, event_rx) = internal_event_stream.clone(); let handle = SystemContextHandle { - registry: registry.clone(), + registry: Arc::clone(®istry), output_event_stream: output_event_stream.clone(), internal_event_stream: internal_event_stream.clone(), hotshot: self.clone().into(), - storage: self.storage.clone(), + storage: Arc::clone(&self.storage), }; - add_network_message_task(registry.clone(), event_tx.clone(), quorum_network.clone()).await; - add_network_message_task(registry.clone(), event_tx.clone(), da_network.clone()).await; + add_network_message_task( + Arc::clone(®istry), + event_tx.clone(), + Arc::clone(&quorum_network), + ) + .await; + add_network_message_task( + Arc::clone(®istry), + event_tx.clone(), + Arc::clone(&da_network), + ) + .await; if let Some(request_rx) = da_network.spawn_request_receiver_task(STATIC_VER_0_1).await { add_response_task( - registry.clone(), + Arc::clone(®istry), event_rx.activate_cloned(), request_rx, &handle, ) .await; add_request_network_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), &handle, @@ -561,92 +571,92 @@ impl> SystemContext { } add_network_event_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), - quorum_network.clone(), + Arc::clone(&quorum_network), quorum_membership.clone(), network::quorum_filter, - handle.get_storage().clone(), + Arc::clone(&handle.get_storage()), ) .await; add_network_event_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), - quorum_network.clone(), + Arc::clone(&quorum_network), quorum_membership, network::upgrade_filter, - handle.get_storage().clone(), + Arc::clone(&handle.get_storage()), ) .await; add_network_event_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), - da_network.clone(), + Arc::clone(&da_network), da_membership, network::committee_filter, - handle.get_storage().clone(), + Arc::clone(&handle.get_storage()), ) .await; add_network_event_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), - quorum_network.clone(), + Arc::clone(&quorum_network), view_sync_membership, network::view_sync_filter, - handle.get_storage().clone(), + Arc::clone(&handle.get_storage()), ) .await; add_network_event_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), - quorum_network.clone(), + Arc::clone(&quorum_network), vid_membership, network::vid_filter, - handle.get_storage().clone(), + Arc::clone(&handle.get_storage()), ) .await; add_consensus_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), &handle, ) .await; add_da_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), &handle, ) .await; add_vid_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), &handle, ) .await; add_transaction_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), &handle, ) .await; add_view_sync_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), &handle, ) .await; add_upgrade_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), &handle, @@ -654,7 +664,7 @@ impl> SystemContext { .await; #[cfg(feature = "dependency-tasks")] add_quorum_proposal_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), &handle, @@ -662,7 +672,7 @@ impl> SystemContext { .await; #[cfg(feature = "dependency-tasks")] add_quorum_vote_task( - registry.clone(), + Arc::clone(®istry), event_tx.clone(), event_rx.activate_cloned(), &handle, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 64a293b3c7..92eb8aeaf7 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -55,7 +55,7 @@ pub async fn add_request_network_task::create_from(handle).await; - let task = Task::new(tx, rx, task_reg.clone(), state); + let task = Task::new(tx, rx, Arc::clone(&task_reg), state); task_reg.run_task(task).await; } @@ -86,12 +86,12 @@ pub async fn add_network_message_task< event_stream: Sender>>, channel: Arc, ) { - let net = channel.clone(); + let net = Arc::clone(&channel); let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { event_stream: event_stream.clone(), }; - let network = net.clone(); + let network = Arc::clone(&net); let mut state = network_state.clone(); let handle = async_spawn(async move { loop { @@ -136,7 +136,7 @@ pub async fn add_network_event_task< filter, storage, }; - let task = Task::new(tx, rx, task_reg.clone(), network_state); + let task = Task::new(tx, rx, Arc::clone(&task_reg), network_state); task_reg.run_task(task).await; } @@ -245,7 +245,7 @@ pub async fn add_consensus_task>( inject_consensus_polls(&consensus_state).await; - let task = Task::new(tx, rx, task_reg.clone(), consensus_state); + let task = Task::new(tx, rx, Arc::clone(&task_reg), consensus_state); task_reg.run_task(task).await; } @@ -257,7 +257,7 @@ pub async fn add_vid_task>( handle: &SystemContextHandle, ) { let vid_state = VIDTaskState::create_from(handle).await; - let task = Task::new(tx, rx, task_reg.clone(), vid_state); + let task = Task::new(tx, rx, Arc::clone(&task_reg), vid_state); task_reg.run_task(task).await; } @@ -270,7 +270,7 @@ pub async fn add_upgrade_task>( ) { let upgrade_state = UpgradeTaskState::create_from(handle).await; - let task = Task::new(tx, rx, task_reg.clone(), upgrade_state); + let task = Task::new(tx, rx, Arc::clone(&task_reg), upgrade_state); task_reg.run_task(task).await; } /// add the Data Availability task @@ -283,7 +283,7 @@ pub async fn add_da_task>( // build the da task let da_state = DATaskState::create_from(handle).await; - let task = Task::new(tx, rx, task_reg.clone(), da_state); + let task = Task::new(tx, rx, Arc::clone(&task_reg), da_state); task_reg.run_task(task).await; } @@ -296,7 +296,7 @@ pub async fn add_transaction_task> ) { let transactions_state = TransactionTaskState::<_, _, _, Version01>::create_from(handle).await; - let task = Task::new(tx, rx, task_reg.clone(), transactions_state); + let task = Task::new(tx, rx, Arc::clone(&task_reg), transactions_state); task_reg.run_task(task).await; } @@ -309,7 +309,7 @@ pub async fn add_view_sync_task>( ) { let view_sync_state = ViewSyncTaskState::create_from(handle).await; - let task = Task::new(tx, rx, task_reg.clone(), view_sync_state); + let task = Task::new(tx, rx, Arc::clone(&task_reg), view_sync_state); task_reg.run_task(task).await; } @@ -322,7 +322,7 @@ pub async fn add_quorum_proposal_task> handle: &SystemContextHandle, ) { let quorum_vote_state = QuorumVoteTaskState::create_from(handle).await; - let task = Task::new(tx, rx, task_reg.clone(), quorum_vote_state); + let task = Task::new(tx, rx, Arc::clone(&task_reg), quorum_vote_state); task_reg.run_task(task).await; } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 7f0cd06464..13742ff6d0 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -1,6 +1,7 @@ use std::{ collections::{BTreeMap, HashMap}, marker::PhantomData, + sync::Arc, }; use async_trait::async_trait; @@ -37,7 +38,7 @@ impl, V: StaticVersionType> Create handle: &SystemContextHandle, ) -> NetworkRequestState { NetworkRequestState { - network: handle.hotshot.networks.quorum_network.clone(), + network: Arc::clone(&handle.hotshot.networks.quorum_network), state: handle.hotshot.get_consensus(), view: handle.get_cur_view().await, delay: handle.hotshot.config.data_request_delay, @@ -62,7 +63,7 @@ impl> CreateTaskState api: handle.clone(), cur_view: handle.get_cur_view().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - quorum_network: handle.hotshot.networks.quorum_network.clone(), + quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), #[cfg(not(feature = "example-upgrade"))] should_vote: |_upgrade_proposal| false, #[cfg(feature = "example-upgrade")] @@ -87,7 +88,7 @@ impl> CreateTaskState consensus: handle.hotshot.get_consensus(), cur_view: handle.get_cur_view().await, vote_collector: None, - network: handle.hotshot.networks.quorum_network.clone(), + network: Arc::clone(&handle.hotshot.networks.quorum_network), membership: handle.hotshot.memberships.vid_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -107,14 +108,14 @@ impl> CreateTaskState api: handle.clone(), consensus: handle.hotshot.get_consensus(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), - da_network: handle.hotshot.networks.da_network.clone(), + da_network: Arc::clone(&handle.hotshot.networks.da_network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), cur_view: handle.get_cur_view().await, vote_collector: None.into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, - storage: handle.storage.clone(), + storage: Arc::clone(&handle.storage), } } } @@ -130,7 +131,7 @@ impl> CreateTaskState ViewSyncTaskState { current_view: cur_view, next_view: cur_view, - network: handle.hotshot.networks.quorum_network.clone(), + network: Arc::clone(&handle.hotshot.networks.quorum_network), membership: handle .hotshot .memberships @@ -164,7 +165,7 @@ impl, Ver: StaticVersionType> api: handle.clone(), consensus: handle.hotshot.get_consensus(), cur_view: handle.get_cur_view().await, - network: handle.hotshot.networks.quorum_network.clone(), + network: Arc::clone(&handle.hotshot.networks.quorum_network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -195,18 +196,18 @@ impl> CreateTaskState formed_upgrade_certificate: None, proposal_cert: None, decided_upgrade_cert: None, - version: handle.hotshot.version.clone(), + version: Arc::clone(&handle.hotshot.version), output_event_stream: handle.hotshot.output_event_stream.0.clone(), current_proposal: None, id: handle.hotshot.id, public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), - quorum_network: handle.hotshot.networks.quorum_network.clone(), - committee_network: handle.hotshot.networks.da_network.clone(), + quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), + committee_network: Arc::clone(&handle.hotshot.networks.da_network), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), committee_membership: handle.hotshot.memberships.da_membership.clone().into(), - storage: handle.storage.clone(), + storage: Arc::clone(&handle.storage), } } } @@ -225,13 +226,13 @@ impl> CreateTaskState instance_state: handle.hotshot.get_instance_state(), latest_voted_view: handle.get_cur_view().await, vote_dependencies: HashMap::new(), - quorum_network: handle.hotshot.networks.quorum_network.clone(), - committee_network: handle.hotshot.networks.da_network.clone(), + quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), + committee_network: Arc::clone(&handle.hotshot.networks.da_network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), output_event_stream: handle.hotshot.output_event_stream.0.clone(), id: handle.hotshot.id, - storage: handle.storage.clone(), + storage: Arc::clone(&handle.storage), } } } @@ -247,8 +248,8 @@ impl> CreateTaskState QuorumProposalTaskState { latest_proposed_view: handle.get_cur_view().await, propose_dependencies: HashMap::new(), - quorum_network: handle.hotshot.networks.quorum_network.clone(), - committee_network: handle.hotshot.networks.da_network.clone(), + quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), + committee_network: Arc::clone(&handle.hotshot.networks.da_network), output_event_stream: handle.hotshot.output_event_stream.0.clone(), consensus, instance_state: handle.hotshot.get_instance_state(), @@ -257,7 +258,7 @@ impl> CreateTaskState cur_view: handle.get_cur_view().await, public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), - storage: handle.storage.clone(), + storage: Arc::clone(&handle.storage), timeout: handle.hotshot.config.next_view_timeout, timeout_task: None, round_start_delay: handle.hotshot.config.round_start_delay, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 3a56909a2b..73019b2ccf 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -167,7 +167,7 @@ impl CombinedNetworks { if !primary_failed && Self::should_delay(&message) { let duration = *self.delay_duration.read().await; - let primary_fail_counter = self.primary_fail_counter.clone(); + let primary_fail_counter = Arc::clone(&self.primary_fail_counter); self.delayed_tasks .write() .await @@ -480,7 +480,7 @@ impl ConnectedNetwork, TYPES::SignatureKey> } fn update_view(&self, view: u64) { - let delayed_map = self.delayed_tasks.clone(); + let delayed_map = Arc::clone(&self.delayed_tasks); async_spawn(async move { let mut cancel_tasks = Vec::new(); { diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 0a81337d94..248ab018a1 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -268,7 +268,7 @@ where .build() .unwrap() }; - let bootstrap_addrs_ref = bootstrap_addrs.clone(); + let bootstrap_addrs_ref = Arc::clone(&bootstrap_addrs); let keys = all_keys.clone(); let da = da_keys.clone(); let reliability_config_dup = reliability_config.clone(); @@ -294,7 +294,7 @@ where } }, ); - (net.clone(), net) + (Arc::clone(&net), net) }) } }) @@ -538,9 +538,9 @@ impl Libp2pNetwork { node_lookup_recv: UnboundedReceiver>, _: Ver, ) { - let handle = self.inner.handle.clone(); + let handle = Arc::clone(&self.inner.handle); let dht_timeout = self.inner.dht_timeout; - let latest_seen_view = self.inner.latest_seen_view.clone(); + let latest_seen_view = Arc::clone(&self.inner.latest_seen_view); // deals with handling lookup queue. should be infallible async_spawn(async move { @@ -569,15 +569,15 @@ impl Libp2pNetwork { /// Initiates connection to the outside world fn spawn_connect(&mut self, id: usize, bind_version: VER) { let pk = self.inner.pk.clone(); - let bootstrap_ref = self.inner.bootstrap_addrs.clone(); - let handle = self.inner.handle.clone(); - let is_bootstrapped = self.inner.is_bootstrapped.clone(); + let bootstrap_ref = Arc::clone(&self.inner.bootstrap_addrs); + let handle = Arc::clone(&self.inner.handle); + let is_bootstrapped = Arc::clone(&self.inner.is_bootstrapped); let node_type = self.inner.handle.config().node_type; - let metrics_connected_peers = self.inner.clone(); + let metrics_connected_peers = Arc::clone(&self.inner); let is_da = self.inner.is_da; async_spawn({ - let is_ready = self.inner.is_ready.clone(); + let is_ready = Arc::clone(&self.inner.is_ready); async move { let bs_addrs = bootstrap_ref.read().await.clone(); @@ -719,7 +719,7 @@ impl Libp2pNetwork { mut network_rx: NetworkNodeReceiver, ) { let handle = self.clone(); - let is_bootstrapped = self.inner.is_bootstrapped.clone(); + let is_bootstrapped = Arc::clone(&self.inner.is_bootstrapped); async_spawn(async move { let Some(mut kill_switch) = network_rx.take_kill_switch() else { tracing::error!( @@ -843,7 +843,7 @@ impl ConnectedNetwork for Libp2p bind_version: VER, ) -> Option)>> { let mut internal_rx = self.inner.requests_rx.lock().await.take()?; - let handle = self.inner.handle.clone(); + let handle = Arc::clone(&self.inner.handle); let (mut tx, rx) = mpsc::channel(100); async_spawn(async move { while let Some((request, chan)) = internal_rx.next().await { @@ -928,7 +928,7 @@ impl ConnectedNetwork for Libp2p { let metrics = self.inner.metrics.clone(); if let Some(ref config) = &self.inner.reliability_config { - let handle = self.inner.handle.clone(); + let handle = Arc::clone(&self.inner.handle); let serialized_msg = Serializer::::serialize(&message).context(FailedToSerializeSnafu)?; @@ -936,7 +936,7 @@ impl ConnectedNetwork for Libp2p serialized_msg, Arc::new(move |msg: Vec| { let topic_2 = topic.clone(); - let handle_2 = handle.clone(); + let handle_2 = Arc::clone(&handle); let metrics_2 = metrics.clone(); boxed_sync(async move { match handle_2.gossip_no_serialize(topic_2, msg).await { @@ -1043,14 +1043,14 @@ impl ConnectedNetwork for Libp2p { let metrics = self.inner.metrics.clone(); if let Some(ref config) = &self.inner.reliability_config { - let handle = self.inner.handle.clone(); + let handle = Arc::clone(&self.inner.handle); let serialized_msg = Serializer::::serialize(&message).context(FailedToSerializeSnafu)?; let fut = config.clone().chaos_send_msg( serialized_msg, Arc::new(move |msg: Vec| { - let handle_2 = handle.clone(); + let handle_2 = Arc::clone(&handle); let metrics_2 = metrics.clone(); boxed_sync(async move { match handle_2.direct_request_no_serialize(pid, msg).await { diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 06fd7a318c..6d5f5097b2 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -153,7 +153,7 @@ impl MemoryNetwork { inner: Arc::new(MemoryNetworkInner { input: RwLock::new(Some(input)), output: Mutex::new(output), - master_map: master_map.clone(), + master_map: Arc::clone(&master_map), in_flight_message_count, metrics, reliability_config, @@ -200,7 +200,7 @@ impl TestableNetworkingImplementation let net = MemoryNetwork::new( pubkey, NetworkingMetricsValue::default(), - master.clone(), + Arc::clone(&master), reliability_config.clone(), ); Box::pin(async move { (net.clone().into(), net.into()) }) diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 03febebd81..c153c1c56a 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -362,7 +362,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork is_paused: Arc::from(AtomicBool::new(false)), }); - (client.clone(), client) + (Arc::clone(&client), client) }) } }) diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs index 15a03622f0..e9a99e21ab 100644 --- a/hotshot/src/traits/networking/web_server_network.rs +++ b/hotshot/src/traits/networking/web_server_network.rs @@ -866,7 +866,7 @@ impl e.insert(sender); async_spawn({ - let inner_clone = self.inner.clone(); + let inner_clone = Arc::clone(&self.inner); async move { if let Err(e) = inner_clone .poll_web_server( @@ -904,7 +904,7 @@ impl e.insert(sender); async_spawn({ - let inner_clone = self.inner.clone(); + let inner_clone = Arc::clone(&self.inner); async move { if let Err(e) = inner_clone .poll_web_server( @@ -937,7 +937,7 @@ impl // Only start this task if we haven't already started it. let mut cancel_handle = self.inner.latest_proposal_task.write().await; if cancel_handle.is_none() { - let inner = self.inner.clone(); + let inner = Arc::clone(&self.inner); // Create sender and receiver for cancelling the task let (sender, receiver) = unbounded(); @@ -967,7 +967,7 @@ impl let mut cancel_handle = self.inner.upgrade_proposal_task.write().await; if cancel_handle.is_none() { error!("Starting poll for upgrade proposals!"); - let inner = self.inner.clone(); + let inner = Arc::clone(&self.inner); // Create sender and receiver for cancelling the task let (sender, receiver) = unbounded(); @@ -997,7 +997,7 @@ impl let mut cancel_handle = self.inner.upgrade_vote_task.write().await; if cancel_handle.is_none() { debug!("Starting poll for upgrade proposals!"); - let inner = self.inner.clone(); + let inner = Arc::clone(&self.inner); // Create sender and receiver for cancelling the task let (sender, receiver) = unbounded(); @@ -1026,7 +1026,7 @@ impl // Only start this task if we haven't already started it. let mut cancel_handle = self.inner.latest_view_sync_certificate_task.write().await; if cancel_handle.is_none() { - let inner = self.inner.clone(); + let inner = Arc::clone(&self.inner); // Create sender and receiver for cancelling the task let (sender, receiver) = unbounded(); @@ -1058,7 +1058,7 @@ impl let (sender, receiver) = unbounded(); e.insert(sender); async_spawn({ - let inner_clone = self.inner.clone(); + let inner_clone = Arc::clone(&self.inner); async move { if let Err(e) = inner_clone .poll_web_server( @@ -1093,7 +1093,7 @@ impl let (sender, receiver) = unbounded(); e.insert(sender); async_spawn({ - let inner_clone = self.inner.clone(); + let inner_clone = Arc::clone(&self.inner); async move { if let Err(e) = inner_clone .poll_web_server( @@ -1141,7 +1141,7 @@ impl let (sender, receiver) = unbounded(); e.insert(sender); async_spawn({ - let inner_clone = self.inner.clone(); + let inner_clone = Arc::clone(&self.inner); async move { if let Err(e) = inner_clone .poll_web_server( @@ -1178,7 +1178,7 @@ impl let (sender, receiver) = unbounded(); e.insert(sender); async_spawn({ - let inner_clone = self.inner.clone(); + let inner_clone = Arc::clone(&self.inner); async move { if let Err(e) = inner_clone .poll_web_server( @@ -1244,7 +1244,7 @@ impl let (sender, receiver) = unbounded(); e.insert(sender); async_spawn({ - let inner_clone = self.inner.clone(); + let inner_clone = Arc::clone(&self.inner); async move { if let Err(e) = inner_clone .poll_web_server( diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 329d95a201..7b5a466fc9 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -180,6 +180,6 @@ impl + 'static> SystemContextHandl /// historical data #[must_use] pub fn get_storage(&self) -> Arc> { - self.storage.clone() + Arc::clone(&self.storage) } } diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index 8ff0dcfb92..c9f4cf4473 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -212,7 +212,7 @@ pub async fn spin_up_swarms( info!("listen addr for {} is {:?}", i, addr); bootstrap_addrs.push((node.peer_id(), addr)); connecting_futs.push({ - let node = node.clone(); + let node = Arc::clone(&node); async move { node.begin_bootstrap().await?; node.lookup_pid(PeerId::random()).await @@ -220,7 +220,7 @@ pub async fn spin_up_swarms( .boxed_local() }); let node_with_state = HandleWithState { - handle: node.clone(), + handle: Arc::clone(&node), state: Arc::default(), }; handles.push((node_with_state, rx)); @@ -248,7 +248,7 @@ pub async fn spin_up_swarms( let node = Arc::new(node); connecting_futs.push({ - let node = node.clone(); + let node = Arc::clone(&node); async move { node.begin_bootstrap().await?; node.lookup_pid(PeerId::random()).await @@ -256,7 +256,7 @@ pub async fn spin_up_swarms( .boxed_local() }); let node_with_state = HandleWithState { - handle: node.clone(), + handle: Arc::clone(&node), state: Arc::default(), }; handles.push((node_with_state, rx)); diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index a6787c0bfc..e89ae4972d 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -450,7 +450,7 @@ async fn run_request_response_increment_all( for _ in 0..futs.len() { let fut = futs.pop().unwrap(); - let results = results.clone(); + let results = Arc::clone(&results); async_spawn(async move { let res = fut.await; results.write().await.push(res); diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 4e05145dbc..7e02f1bd3b 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -340,16 +340,16 @@ impl> ConsensusTaskState self.cur_view, view, event_stream, - self.quorum_membership.clone(), + Arc::clone(&self.quorum_membership), self.public_key.clone(), self.private_key.clone(), - self.consensus.clone(), + Arc::clone(&self.consensus), self.round_start_delay, self.formed_upgrade_certificate.clone(), self.decided_upgrade_cert.clone(), &mut self.payload_commitment_and_metadata, &mut self.proposal_cert, - self.instance_state.clone(), + Arc::clone(&self.instance_state), ) .await?; @@ -465,7 +465,7 @@ impl> ConsensusTaskState .get(&self.public_key).cloned().map(|prop| prop.data); // Add our data into a new `LeafInfo` - leaf_views.push(LeafInfo::new(leaf.clone(), state.clone(), delta.clone(), vid_share)); + leaf_views.push(LeafInfo::new(leaf.clone(), Arc::clone(&state), delta.clone(), vid_share)); leafs_decided.push(leaf.clone()); if let Some(ref payload) = leaf.get_block_payload() { for txn in payload @@ -589,7 +589,7 @@ impl> ConsensusTaskState debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: self.quorum_membership.clone(), + membership: Arc::clone(&self.quorum_membership), view: vote.get_view_number(), id: self.id, }; @@ -603,7 +603,7 @@ impl> ConsensusTaskState let result = collector .as_mut() .unwrap() - .handle_event(event.clone(), &event_stream) + .handle_event(Arc::clone(&event), &event_stream) .await; if result == Some(HotShotTaskCompleted) { @@ -635,7 +635,7 @@ impl> ConsensusTaskState debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: self.quorum_membership.clone(), + membership: Arc::clone(&self.quorum_membership), view: vote.get_view_number(), id: self.id, }; @@ -649,7 +649,7 @@ impl> ConsensusTaskState let result = collector .as_mut() .unwrap() - .handle_event(event.clone(), &event_stream) + .handle_event(Arc::clone(&event), &event_stream) .await; if result == Some(HotShotTaskCompleted) { @@ -839,10 +839,10 @@ impl> ConsensusTaskState self.public_key.clone(), new_view, &event_stream, - self.quorum_membership.clone(), - self.quorum_network.clone(), + Arc::clone(&self.quorum_membership), + Arc::clone(&self.quorum_network), self.timeout, - self.consensus.clone(), + Arc::clone(&self.consensus), &mut self.cur_view, &mut self.timeout_task, ) diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index f6d03c6cc8..347e2afe45 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -160,8 +160,8 @@ pub async fn validate_proposal_safety_and_liveness( View { view_inner: ViewInner::Leaf { leaf: proposed_leaf.commit(), - state: state.clone(), - delta: Some(delta.clone()), + state: Arc::clone(&state), + delta: Some(Arc::clone(&delta)), }, }, ); @@ -378,7 +378,7 @@ pub async fn get_parent_leaf_and_state( // TODO do some sort of sanity check on the view number that it matches decided } - Ok((parent_leaf, state.clone())) + Ok((parent_leaf, Arc::clone(state))) } /// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is a special @@ -400,9 +400,9 @@ async fn publish_proposal_from_upgrade_cert( let (parent_leaf, state) = get_parent_leaf_and_state( cur_view, view, - quorum_membership.clone(), + Arc::clone(&quorum_membership), public_key.clone(), - consensus.clone(), + Arc::clone(&consensus), ) .await?; @@ -436,7 +436,7 @@ async fn publish_proposal_from_upgrade_cert( Some(upgrade_cert), None, delay, - instance_state.clone(), + Arc::clone(&instance_state), ) .await; })) @@ -465,7 +465,7 @@ async fn publish_proposal_from_commitment_and_metadata( view, quorum_membership, public_key.clone(), - consensus.clone(), + Arc::clone(&consensus), ) .await?; @@ -517,7 +517,7 @@ async fn publish_proposal_from_commitment_and_metadata( proposal_upgrade_certificate, proposal_certificate, delay, - instance_state.clone(), + Arc::clone(&instance_state), ) .await; }); @@ -629,10 +629,10 @@ pub async fn handle_quorum_proposal_recv { if let (Some(state), _) = consensus_read.get_state_and_delta(leaf.get_view_number()) { - Some((leaf, state.clone())) + Some((leaf, Arc::clone(&state))) } else { bail!("Parent state not found! Consensus internally inconsistent"); } @@ -749,16 +749,16 @@ pub async fn handle_quorum_proposal_recv, A: ConsensusApi + ); return None; } - let txns = proposal.data.encoded_transactions.clone(); + let txns = Arc::clone(&proposal.data.encoded_transactions); let num_nodes = self.quorum_membership.total_nodes(); let payload_commitment = spawn_blocking(move || vid_commitment(&txns, num_nodes)).await; @@ -231,7 +231,7 @@ impl, A: ConsensusApi + debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: self.da_membership.clone(), + membership: Arc::clone(&self.da_membership), view: vote.get_view_number(), id: self.id, }; @@ -245,7 +245,7 @@ impl, A: ConsensusApi + let result = collector .as_mut() .unwrap() - .handle_event(event.clone(), &event_stream) + .handle_event(Arc::clone(&event), &event_stream) .await; if result == Some(HotShotTaskCompleted) { @@ -320,7 +320,7 @@ impl, A: ConsensusApi + }; let data: DAProposal = DAProposal { - encoded_transactions: encoded_transactions.clone(), + encoded_transactions: Arc::clone(encoded_transactions), metadata: metadata.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? view_number: view, diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index d4da652ab1..1e34d46dae 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -66,10 +66,15 @@ pub async fn run_harness>>>( let test_task = Task::new( to_test.clone(), from_task.clone(), - registry.clone(), + Arc::clone(®istry), test_state, ); - let task = Task::new(to_test.clone(), from_test.clone(), registry.clone(), state); + let task = Task::new( + to_test.clone(), + from_test.clone(), + Arc::clone(®istry), + state, + ); tasks.push(test_task.run()); tasks.push(task.run()); diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 52f6b35e97..ec0c4b6161 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -423,8 +423,8 @@ impl< }; let view = message.kind.get_view_number(); let committee = membership.get_whole_committee(view); - let net = self.channel.clone(); - let storage = self.storage.clone(); + let net = Arc::clone(&self.channel); + let storage = Arc::clone(&self.storage); let version = self.version; async_spawn(async move { if NetworkEventTaskState::::maybe_record_action( @@ -491,8 +491,8 @@ impl< }) .collect(); - let net = self.channel.clone(); - let storage = self.storage.clone(); + let net = Arc::clone(&self.channel); + let storage = Arc::clone(&self.storage); async_spawn(async move { if NetworkEventTaskState::::maybe_record_action( Some(HotShotAction::VidDisperse), diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 7551d7772a..86a20ed4e2 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -315,7 +315,7 @@ impl HandleDepOutput for ProposalDependencyHandle { self.view_number, &self.sender, commit_and_metadata.unwrap(), - self.instance_state.clone(), + Arc::clone(&self.instance_state), ) .await; } @@ -506,9 +506,11 @@ impl> QuorumProposalTaskState propose_now_dependency.mark_as_completed(event.clone()), + HotShotEvent::ProposeNow(..) => { + propose_now_dependency.mark_as_completed(Arc::clone(&event)); + } HotShotEvent::SendPayloadCommitmentAndMetadata(..) => { - payload_commitment_dependency.mark_as_completed(event.clone()); + payload_commitment_dependency.mark_as_completed(Arc::clone(&event)); } HotShotEvent::QuorumProposalValidated(..) => { proposal_dependency.mark_as_completed(event); @@ -583,16 +585,16 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { @@ -657,7 +659,7 @@ impl> QuorumProposalTaskState { @@ -686,7 +688,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { @@ -798,7 +800,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState> QuorumProposalTaskState { diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 7411eb84fd..814c5348c0 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -311,7 +311,7 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState> QuorumVoteTaskState, Ver: StaticVersionType + 'st // and so we don't implicitly rely on the same replica all the time. recipients.shuffle(&mut thread_rng()); let requester = DelayedRequester:: { - network: self.network.clone(), - state: self.state.clone(), + network: Arc::clone(&self.network), + state: Arc::clone(&self.state), sender, delay: self.delay, recipients, diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 026bfce7ab..271600b231 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -133,7 +133,8 @@ impl NetworkResponseState { .is_some_and(|m| m.contains_key(key)); if !contained { let txns = consensus.saved_payloads.get(&view)?; - let vid = calculate_vid_disperse(txns.clone(), &self.quorum.clone(), view).await; + let vid = + calculate_vid_disperse(Arc::clone(txns), &Arc::clone(&self.quorum), view).await; let shares = VidDisperseShare::from_vid_disperse(vid); let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; for share in shares { diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 6986bbcd86..04d3afd455 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -169,7 +169,7 @@ impl, A: ConsensusApi + debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: self.quorum_membership.clone(), + membership: Arc::clone(&self.quorum_membership), view: vote.get_view_number(), id: self.id, }; @@ -183,7 +183,7 @@ impl, A: ConsensusApi + let result = collector .as_mut() .unwrap() - .handle_event(event.clone(), &tx) + .handle_event(Arc::clone(&event), &tx) .await; if result == Some(HotShotTaskCompleted) { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index b0fc7899a1..02885ec46d 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -66,8 +66,8 @@ impl, A: ConsensusApi + ::BlockPayload::from_bytes(encoded_transactions, metadata); let builder_commitment = payload.builder_commitment(metadata); let vid_disperse = calculate_vid_disperse( - encoded_transactions.clone(), - &self.membership.clone(), + Arc::clone(encoded_transactions), + &Arc::clone(&self.membership), *view_number, ) .await; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index ade64a8534..c5ee4da88b 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -236,7 +236,9 @@ impl< if let Some(replica_task) = task_map.get_mut(&view) { // Forward event then return debug!("Forwarding message"); - let result = replica_task.handle(event.clone(), sender.clone()).await; + let result = replica_task + .handle(Arc::clone(&event), sender.clone()) + .await; if result == Some(HotShotTaskCompleted) { // The protocol has finished @@ -255,8 +257,8 @@ impl< finalized: false, sent_view_change_event: false, timeout_task: None, - membership: self.membership.clone(), - network: self.network.clone(), + membership: Arc::clone(&self.membership), + network: Arc::clone(&self.network), public_key: self.public_key.clone(), private_key: self.private_key.clone(), api: self.api.clone(), @@ -264,7 +266,9 @@ impl< id: self.id, }; - let result = replica_state.handle(event.clone(), sender.clone()).await; + let result = replica_state + .handle(Arc::clone(&event), sender.clone()) + .await; if result == Some(HotShotTaskCompleted) { // The protocol has finished @@ -315,7 +319,9 @@ impl< let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); - let result = relay_task.handle_event(event.clone(), &event_stream).await; + let result = relay_task + .handle_event(Arc::clone(&event), &event_stream) + .await; if result == Some(HotShotTaskCompleted) { // The protocol has finished @@ -333,7 +339,7 @@ impl< let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: self.membership.clone(), + membership: Arc::clone(&self.membership), view: vote_view, id: self.id, }; @@ -351,7 +357,9 @@ impl< let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); - let result = relay_task.handle_event(event.clone(), &event_stream).await; + let result = relay_task + .handle_event(Arc::clone(&event), &event_stream) + .await; if result == Some(HotShotTaskCompleted) { // The protocol has finished @@ -369,7 +377,7 @@ impl< let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: self.membership.clone(), + membership: Arc::clone(&self.membership), view: vote_view, id: self.id, }; @@ -387,7 +395,9 @@ impl< let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); - let result = relay_task.handle_event(event.clone(), &event_stream).await; + let result = relay_task + .handle_event(Arc::clone(&event), &event_stream) + .await; if result == Some(HotShotTaskCompleted) { // The protocol has finished @@ -405,7 +415,7 @@ impl< let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: self.membership.clone(), + membership: Arc::clone(&self.membership), view: vote_view, id: self.id, }; diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 6b90f9f14e..360304d2c9 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -204,14 +204,14 @@ where }; let mut state = VoteCollectionTaskState:: { - membership: info.membership.clone(), + membership: Arc::clone(&info.membership), public_key: info.public_key.clone(), accumulator: Some(new_accumulator), view: info.view, id: info.id, }; - let result = state.handle_event(event.clone(), sender).await; + let result = state.handle_event(Arc::clone(&event), sender).await; if result == Some(HotShotTaskCompleted) { // The protocol has finished diff --git a/task/src/task.rs b/task/src/task.rs index f1e1f5afa0..ad7ce9c316 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -185,7 +185,7 @@ impl Task { let task = Task { event_sender: self.clone_sender(), event_receiver: self.subscribe(), - registry: self.registry.clone(), + registry: Arc::clone(&self.registry), state, }; // Note: await here is only awaiting the task to be added to the @@ -400,7 +400,7 @@ mod tests { let task1 = Task:: { event_sender: tx.clone(), event_receiver: rx.clone(), - registry: reg.clone(), + registry: Arc::clone(®), state: DummyHandle::default(), }; tx.broadcast(1).await.unwrap(); @@ -429,7 +429,7 @@ mod tests { let task1 = Task:: { event_sender: tx.clone(), event_receiver: rx.clone(), - registry: reg.clone(), + registry: Arc::clone(®), state: DummyHandle::default(), }; tx.broadcast(1).await.unwrap(); diff --git a/types/src/utils.rs b/types/src/utils.rs index ad8544179c..e628aad883 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -54,7 +54,7 @@ impl Clone for ViewInner { }, Self::Leaf { leaf, state, delta } => Self::Leaf { leaf: *leaf, - state: state.clone(), + state: Arc::clone(state), delta: delta.clone(), }, Self::Failed => Self::Failed, @@ -107,7 +107,7 @@ impl ViewInner { #[must_use] pub fn get_state_and_delta(&self) -> StateAndDelta { if let Self::Leaf { state, delta, .. } = self { - (Some(state.clone()), delta.clone()) + (Some(Arc::clone(state)), delta.clone()) } else { (None, None) } From defdefdc1e2a5ec84ce52f25575e82de80719e25 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 25 Apr 2024 12:04:34 -0400 Subject: [PATCH 0983/1393] Conditional DA (#3021) * conditional DA * lint * add comments --- examples/infra/mod.rs | 16 ++++-- .../src/traits/networking/libp2p_network.rs | 10 ++-- orchestrator/api.toml | 6 ++- orchestrator/src/client.rs | 3 +- orchestrator/src/config.rs | 49 ++++++++++++++++--- orchestrator/src/lib.rs | 16 +++++- testing/src/spinning_task.rs | 6 ++- testing/src/task_helpers.rs | 3 +- testing/src/test_builder.rs | 35 ++++++++++--- testing/src/test_runner.rs | 9 +++- types/src/lib.rs | 22 +++++++-- 11 files changed, 138 insertions(+), 37 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index d5e8d453e1..0d9bac23f8 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -296,7 +296,7 @@ pub fn load_config_from_file( // but its type is too complex to load so we'll generate it from seed now. // Also this function is only used for orchestrator initialization now, so this value doesn't matter config.config.my_own_validator_config = - ValidatorConfig::generated_from_seed_indexed(config.seed, config.node_index, 1); + ValidatorConfig::generated_from_seed_indexed(config.seed, config.node_index, 1, true); // initialize it with size for better assignment of peers' config config.config.known_nodes_with_stake = vec![PeerConfig::default(); config.config.num_nodes_with_stake.get() as usize]; @@ -403,7 +403,8 @@ pub trait RunDA< }); let committee_election_config = TYPES::Membership::default_election_config( - config.config.da_staked_committee_size.try_into().unwrap(), + // Use the number of _actual_ DA nodes connected for the committee size + config.config.known_da_nodes.len() as u64, config.config.num_nodes_without_stake as u64, ); let networks_bundle = Networks { @@ -751,9 +752,9 @@ where private_key: key.private_key, }; - // See if we should be DA + // See if we should be DA, subscribe to the DA topic if so let mut topics = vec![Topic::Global]; - if config.node_index < config.config.da_staked_committee_size as u64 { + if config.config.my_own_validator_config.is_da { topics.push(Topic::DA); } @@ -768,6 +769,9 @@ where ) .expect("failed to create network"); + // Wait for the network to be ready + network.wait_for_ready().await; + PushCdnDaRun { config, quorum_channel: network.clone(), @@ -1018,6 +1022,8 @@ pub async fn main_entry_point< // We assume one node will not call this twice to generate two validator_config-s with same identity. let my_own_validator_config = NetworkConfig::::generate_init_validator_config( &orchestrator_client, + // This is false for now, we only use it to generate the keypair + false ).await; // Derives our Libp2p private key from our private key, and then returns the public key of that key @@ -1038,6 +1044,8 @@ pub async fn main_entry_point< my_own_validator_config, args.advertise_address, Some(libp2p_public_key), + // If `indexed_da` is true: use the node index to determine if we are a DA node. + true, ) .await .expect("failed to get config"); diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 248ab018a1..1dc082c76c 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -401,10 +401,12 @@ impl Libp2pNetwork { let mut da_keys = BTreeSet::new(); // Make a node DA if it is under the staked committee size - for (i, node) in config.config.known_nodes_with_stake.into_iter().enumerate() { - if i < config.config.da_staked_committee_size { - da_keys.insert(K::get_public_key(&node.stake_table_entry)); - } + for node in config.config.known_da_nodes { + da_keys.insert(K::get_public_key(&node.stake_table_entry)); + } + + // Insert all known nodes into the set of all keys + for node in config.config.known_nodes_with_stake { all_keys.insert(K::get_public_key(&node.stake_table_entry)); } diff --git a/orchestrator/api.toml b/orchestrator/api.toml index 21bac03d8c..3f97dfec9b 100644 --- a/orchestrator/api.toml +++ b/orchestrator/api.toml @@ -32,11 +32,13 @@ Get the latest temporary node index only for generating validator's key pair for # POST the node's node index to generate public key for pubkey collection [route.post_pubkey] -PATH = ["pubkey/:node_index"] +PATH = ["pubkey/:node_index/:is_da"] METHOD = "POST" ":node_index" = "Integer" +":is_da" = "Boolean" DOC = """ -Post a node's node_index so that its public key could be posted and collected by the orchestrator. +Post a node's node_index so that its public key could be posted and collected by the orchestrator. +Supply whether or not we are DA. """ # GET whether or not the config with all peers' public keys / configs are ready diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 4a59968e53..506a2f48cf 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -298,11 +298,12 @@ impl OrchestratorClient { &self, node_index: u64, my_pub_key: PeerConfig, + is_da: bool, ) -> NetworkConfig { // send my public key let _send_pubkey_ready_f: Result<(), ClientError> = self .client - .post(&format!("api/pubkey/{node_index}")) + .post(&format!("api/pubkey/{node_index}/{is_da}")) .body_binary(&PeerConfig::::to_bytes(&my_pub_key)) .unwrap() .send() diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 0da0b88078..3179c5c11f 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,4 +1,5 @@ use std::{ + collections::HashSet, env, fs, net::SocketAddr, num::NonZeroUsize, @@ -281,11 +282,14 @@ impl NetworkConfig { } /// Get a temporary node index for generating a validator config - pub async fn generate_init_validator_config(client: &OrchestratorClient) -> ValidatorConfig { + pub async fn generate_init_validator_config( + client: &OrchestratorClient, + is_da: bool, + ) -> ValidatorConfig { // This cur_node_index is only used for key pair generation, it's not bound with the node, // lather the node with the generated key pair will get a new node_index from orchestrator. let cur_node_index = client.get_node_index_for_init_validator_config().await; - ValidatorConfig::generated_from_seed_indexed([0u8; 32], cur_node_index.into(), 1) + ValidatorConfig::generated_from_seed_indexed([0u8; 32], cur_node_index.into(), 1, is_da) } /// Asynchronously retrieves a `NetworkConfig` from an orchestrator. @@ -299,6 +303,8 @@ impl NetworkConfig { my_own_validator_config: ValidatorConfig, libp2p_address: Option, libp2p_public_key: Option, + // If true, we will use the node index to determine if we are a DA node + indexed_da: bool, ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { let (mut run_config, source) = Self::from_file_or_orchestrator(client, file, libp2p_address, libp2p_public_key) @@ -308,13 +314,19 @@ impl NetworkConfig { // Assign my_own_validator_config to the run_config if not loading from file match source { NetworkConfigSource::Orchestrator => { - run_config.config.my_own_validator_config = my_own_validator_config; + run_config.config.my_own_validator_config = my_own_validator_config.clone(); } NetworkConfigSource::File => { // do nothing, my_own_validator_config has already been loaded from file } } + // If we've chosen to be DA based on the index, do so + if indexed_da { + run_config.config.my_own_validator_config.is_da = + run_config.node_index < run_config.config.da_staked_committee_size as u64; + } + // one more round of orchestrator here to get peer's public key/config let updated_config: NetworkConfig = client .post_and_wait_all_public_keys::( @@ -323,9 +335,11 @@ impl NetworkConfig { .config .my_own_validator_config .get_public_config(), + run_config.config.my_own_validator_config.is_da, ) .await; run_config.config.known_nodes_with_stake = updated_config.config.known_nodes_with_stake; + run_config.config.known_da_nodes = updated_config.config.known_da_nodes; info!("Retrieved config; our node index is {node_index}."); Ok((run_config, source)) @@ -570,6 +584,9 @@ pub struct HotShotConfigFile { /// The known nodes' public key and stake value pub known_nodes_with_stake: Vec>, #[serde(skip)] + /// The known DA nodes' public key and stake values + pub known_da_nodes: HashSet>, + #[serde(skip)] /// The known non-staking nodes' pub known_nodes_without_stake: Vec, /// Number of staking committee nodes @@ -615,6 +632,8 @@ pub struct ValidatorConfigFile { pub node_id: u64, // The validator's stake, commented for now // pub stake_value: u64, + /// Whether or not we are DA + pub is_da: bool, } impl ValidatorConfigFile { @@ -663,6 +682,7 @@ impl From> for HotS execution_type: ExecutionType::Continuous, num_nodes_with_stake: val.num_nodes_with_stake, num_nodes_without_stake: val.num_nodes_without_stake, + known_da_nodes: val.known_da_nodes, max_transactions: val.max_transactions, min_transactions: val.min_transactions, known_nodes_with_stake: val.known_nodes_with_stake, @@ -697,7 +717,7 @@ pub const ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS: u64 = 60; impl From for ValidatorConfig { fn from(val: ValidatorConfigFile) -> Self { // here stake_value is set to 1, since we don't input stake_value from ValidatorConfigFile for now - ValidatorConfig::generated_from_seed_indexed(val.seed, val.node_id, 1) + ValidatorConfig::generated_from_seed_indexed(val.seed, val.node_id, 1, val.is_da) } } impl From for HotShotConfig { @@ -710,20 +730,35 @@ impl From for HotShot impl Default for HotShotConfigFile { fn default() -> Self { + // The default number of nodes is 5 + let staked_committee_nodes: usize = 5; + + // Aggregate the DA nodes + let mut known_da_nodes = HashSet::new(); + let gen_known_nodes_with_stake = (0..10) .map(|node_id| { - let cur_validator_config: ValidatorConfig = - ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); + let mut cur_validator_config: ValidatorConfig = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, false); + + // Add to DA nodes based on index + if node_id < staked_committee_nodes as u64 { + known_da_nodes.insert(cur_validator_config.get_public_config()); + cur_validator_config.is_da = true; + } + cur_validator_config.get_public_config() }) .collect(); + Self { num_nodes_with_stake: NonZeroUsize::new(10).unwrap(), num_nodes_without_stake: 0, my_own_validator_config: ValidatorConfig::default(), known_nodes_with_stake: gen_known_nodes_with_stake, known_nodes_without_stake: vec![], - staked_committee_nodes: 5, + staked_committee_nodes, + known_da_nodes, non_staked_committee_nodes: 0, fixed_leader_for_gpuvid: 0, max_transactions: NonZeroUsize::new(100).unwrap(), diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index b11ee78ccb..3535f24674 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -168,6 +168,7 @@ pub trait OrchestratorApi { &mut self, node_index: u64, pubkey: &mut Vec, + is_da: bool, ) -> Result<(), ServerError>; /// post endpoint for whether or not all peers public keys are ready /// # Errors @@ -262,6 +263,7 @@ where &mut self, node_index: u64, pubkey: &mut Vec, + is_da: bool, ) -> Result<(), ServerError> { if self.pub_posted.contains(&node_index) { return Err(ServerError { @@ -275,7 +277,16 @@ where pubkey.drain(..12); let register_pub_key_with_stake = PeerConfig::::from_bytes(pubkey).unwrap(); self.config.config.known_nodes_with_stake[node_index as usize] = - register_pub_key_with_stake; + register_pub_key_with_stake.clone(); + + // If the node wants to be DA, add it to the list of known DAs + if is_da { + self.config + .config + .known_da_nodes + .insert(register_pub_key_with_stake); + }; + self.nodes_with_pubkey += 1; println!( "Node {:?} posted public key, now total num posted public key: {:?}", @@ -427,8 +438,9 @@ where .post("post_pubkey", |req, state| { async move { let node_index = req.integer_param("node_index")?; + let is_da = req.boolean_param("is_da")?; let mut pubkey = req.body_bytes(); - state.register_public_key(node_index, &mut pubkey) + state.register_public_key(node_index, &mut pubkey, is_da) } .boxed() })? diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index cfd99fed50..3b87d85dad 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -137,7 +137,11 @@ where // We assign node's public key and stake value rather than read from config file since it's a test let validator_config = ValidatorConfig::generated_from_seed_indexed( - [0u8; 32], node_id, 1, + [0u8; 32], + node_id, + 1, + // For tests, make the node DA based on its index + node_id < config.da_staked_committee_size as u64, ); TestRunner::add_node_with_config( node_id, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index e46cac7f9e..43b1dd642d 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -70,7 +70,8 @@ pub async fn build_system_handle( let committee_election_config = config.election_config.clone().unwrap_or_else(|| { ::Membership::default_election_config( - config.num_nodes_with_stake.get() as u64, + // Use the _actual_ number of known DA nodes instead of the expected number of DA nodes + config.known_da_nodes.len() as u64, config.num_nodes_without_stake as u64, ) }); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index a851565343..84e9318fd1 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,4 +1,4 @@ -use std::{num::NonZeroUsize, sync::Arc, time::Duration}; +use std::{collections::HashSet, num::NonZeroUsize, sync::Arc, time::Duration}; use hotshot::traits::{NetworkReliability, NodeImplementation, TestableNodeImplementation}; use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; @@ -137,8 +137,8 @@ impl TestDescription { // TODO: remove once we have fixed the DHT timeout issue // https://github.com/EspressoSystems/HotShot/issues/2088 num_bootstrap_nodes: num_nodes_with_stake, - num_nodes_with_stake: num_nodes_with_stake, - num_nodes_without_stake: num_nodes_without_stake, + num_nodes_with_stake, + num_nodes_without_stake, start_nodes: num_nodes_with_stake, overall_safety_properties: OverallSafetyPropertiesDescription { num_successful_views: 20, @@ -203,8 +203,8 @@ impl Default for TestDescription { Self { timing_data: TimingData::default(), min_transactions: 0, - num_nodes_with_stake: num_nodes_with_stake, - num_nodes_without_stake: num_nodes_without_stake, + num_nodes_with_stake, + num_nodes_without_stake, start_nodes: num_nodes_with_stake, skip_late: false, num_bootstrap_nodes: num_nodes_with_stake, @@ -255,17 +255,35 @@ impl TestDescription { .. } = self.clone(); + let mut known_da_nodes = HashSet::new(); + // We assign known_nodes' public key and stake value here rather than read from config file since it's a test. let known_nodes_with_stake = (0..num_nodes_with_stake) .map(|node_id_| { let cur_validator_config: ValidatorConfig = - ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id_ as u64, 1); + ValidatorConfig::generated_from_seed_indexed( + [0u8; 32], + node_id_ as u64, + 1, + node_id_ < da_staked_committee_size, + ); + + // Add the node to the known DA nodes based on the index (for tests) + if node_id_ < da_staked_committee_size { + known_da_nodes.insert(cur_validator_config.get_public_config()); + } + cur_validator_config.get_public_config() }) .collect(); // But now to test validator's config, we input the info of my_own_validator from config file when node_id == 0. - let mut my_own_validator_config = - ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); + let mut my_own_validator_config = ValidatorConfig::generated_from_seed_indexed( + [0u8; 32], + node_id, + 1, + // This is the config for node 0 + 0 < da_staked_committee_size, + ); if node_id == 0 { my_own_validator_config = ValidatorConfig::from(ValidatorConfigFile::from_file( "config/ValidatorConfigFile.toml", @@ -277,6 +295,7 @@ impl TestDescription { execution_type: ExecutionType::Incremental, num_nodes_with_stake: NonZeroUsize::new(num_nodes_with_stake).unwrap(), // Currently making this zero for simplicity + known_da_nodes, num_nodes_without_stake: 0, num_bootstrap: num_bootstrap_nodes, min_transactions, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 288af890ff..23d06cad82 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -359,7 +359,8 @@ where da_membership: ::Membership::create_election( known_nodes_with_stake.clone(), committee_election_config( - config.da_staked_committee_size as u64, + // Use the _actual_ number of DA nodes instead of expected + config.known_da_nodes.len() as u64, config.num_nodes_without_stake as u64, ), config.fixed_leader_for_gpuvid, @@ -391,9 +392,13 @@ where } else { let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}).unwrap(); + + // See whether or not we should be DA + let is_da = node_id < config.da_staked_committee_size as u64; + // We assign node's public key and stake value rather than read from config file since it's a test let validator_config = - ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1); + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, is_da); let hotshot = Self::add_node_with_config( node_id, networks.clone(), diff --git a/types/src/lib.rs b/types/src/lib.rs index 0b604038f7..cbebe11c16 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -1,5 +1,7 @@ //! Types and Traits for the `HotShot` consensus module -use std::{fmt::Debug, future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; +use std::{ + collections::HashSet, fmt::Debug, future::Future, num::NonZeroUsize, pin::Pin, time::Duration, +}; use bincode::Options; use displaydoc::Display; @@ -68,12 +70,19 @@ pub struct ValidatorConfig { pub stake_value: u64, /// the validator's key pairs for state signing/verification pub state_key_pair: light_client::StateKeyPair, + /// Whether or not this validator is DA + pub is_da: bool, } impl ValidatorConfig { - /// generate validator config from input seed, index and stake value + /// generate validator config from input seed, index, stake value, and whether it's DA #[must_use] - pub fn generated_from_seed_indexed(seed: [u8; 32], index: u64, stake_value: u64) -> Self { + pub fn generated_from_seed_indexed( + seed: [u8; 32], + index: u64, + stake_value: u64, + is_da: bool, + ) -> Self { let (public_key, private_key) = KEY::generated_from_seed_indexed(seed, index); let state_key_pairs = light_client::StateKeyPair::generate_from_seed_indexed(seed, index); Self { @@ -81,6 +90,7 @@ impl ValidatorConfig { private_key, stake_value, state_key_pair: state_key_pairs, + is_da, } } @@ -95,11 +105,11 @@ impl ValidatorConfig { impl Default for ValidatorConfig { fn default() -> Self { - Self::generated_from_seed_indexed([0u8; 32], 0, 1) + Self::generated_from_seed_indexed([0u8; 32], 0, 1, true) } } -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Display)] +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Display, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] /// structure of peers' config, including public key, stake value, and state key. pub struct PeerConfig { @@ -161,6 +171,8 @@ pub struct HotShotConfig { pub max_transactions: NonZeroUsize, /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter pub known_nodes_with_stake: Vec>, + /// All public keys known to be DA nodes + pub known_da_nodes: HashSet>, /// List of known non-staking nodes' public keys pub known_nodes_without_stake: Vec, /// My own validator config, including my public key, private key, stake value, serving as private parameter From d256ffab0c05b3ed9eade343d01657c4442f149b Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 25 Apr 2024 13:33:36 -0400 Subject: [PATCH 0984/1393] lower API channel size (#3039) --- types/src/constants.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/constants.rs b/types/src/constants.rs index b9e95e4a02..1016d80776 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -42,7 +42,7 @@ pub const STATIC_VER_0_1: Version01 = StaticVersion {}; pub const EVENT_CHANNEL_SIZE: usize = 100_000; /// Default channel size for HotShot -> application communication -pub const EXTERNAL_EVENT_CHANNEL_SIZE: usize = 100_000; +pub const EXTERNAL_EVENT_CHANNEL_SIZE: usize = 1000; /// Constants for `WebServerNetwork` and `WebServer` /// The Web CDN is not, strictly speaking, bound to the network; it can have its own versioning. From ae0a5c4a8d65fdf6104c12bc4ee0731c687f2d1f Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 25 Apr 2024 15:28:57 -0400 Subject: [PATCH 0985/1393] Move `QuorumProposalValidated` to its own function (#3040) * move functionality over * demote log * avoid spammy errors --- task-impls/src/consensus/mod.rs | 203 ++--------------- task-impls/src/consensus/proposal_helpers.rs | 215 ++++++++++++++++++- types/src/consensus.rs | 2 +- 3 files changed, 224 insertions(+), 196 deletions(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 7e02f1bd3b..6d326ea7d4 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -1,21 +1,17 @@ -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - sync::Arc, -}; +use std::{collections::BTreeMap, sync::Arc}; use anyhow::Result; use async_broadcast::Sender; -use async_lock::{RwLock, RwLockUpgradableReadGuard}; +use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use chrono::Utc; use committable::Committable; use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus}, data::{Leaf, QuorumProposal, ViewChangeEvidence}, - event::{Event, EventType, LeafInfo}, + event::{Event, EventType}, message::Proposal, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, @@ -23,12 +19,10 @@ use hotshot_types::{ block_contents::BlockHeader, election::Membership, network::{ConnectedNetwork, ConsensusIntentEvent}, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, storage::Storage, - BlockPayload, }, - utils::Terminator, vote::{Certificate, HasViewNumber}, }; #[cfg(not(feature = "dependency-tasks"))] @@ -54,6 +48,8 @@ use crate::{ }, }; +use self::proposal_helpers::handle_quorum_proposal_validated; + /// Helper functions to handler proposal-related functionality. pub(crate) mod proposal_helpers; @@ -381,190 +377,15 @@ impl> ConsensusTaskState } } Ok(None) => {} - Err(e) => warn!(?e, "Failed to propose"), + Err(e) => warn!("Failed to propose {e:#}"), } } HotShotEvent::QuorumProposalValidated(proposal, _) => { - let consensus = self.consensus.upgradable_read().await; - let view = proposal.get_view_number(); - self.current_proposal = Some(proposal.clone()); - let mut new_anchor_view = consensus.last_decided_view; - let mut new_locked_view = consensus.locked_view; - let mut last_view_number_visited = view; - let mut new_commit_reached: bool = false; - let mut new_decide_reached = false; - let mut new_decide_qc = None; - let mut leaf_views = Vec::new(); - let mut leafs_decided = Vec::new(); - let mut included_txns = HashSet::new(); - let old_anchor_view = consensus.last_decided_view; - let parent_view = proposal.justify_qc.get_view_number(); - let mut current_chain_length = 0usize; - if parent_view + 1 == view { - current_chain_length += 1; - if let Err(e) = consensus.visit_leaf_ancestors( - parent_view, - Terminator::Exclusive(old_anchor_view), - true, - |leaf, state, delta| { - if !new_decide_reached { - if last_view_number_visited == leaf.get_view_number() + 1 { - last_view_number_visited = leaf.get_view_number(); - current_chain_length += 1; - if current_chain_length == 2 { - new_locked_view = leaf.get_view_number(); - new_commit_reached = true; - // The next leaf in the chain, if there is one, is decided, so this - // leaf's justify_qc would become the QC for the decided chain. - new_decide_qc = Some(leaf.get_justify_qc().clone()); - } else if current_chain_length == 3 { - new_anchor_view = leaf.get_view_number(); - new_decide_reached = true; - } - } else { - // nothing more to do here... we don't have a new chain extension - return false; - } - } - // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above - if new_decide_reached { - let mut leaf = leaf.clone(); - if leaf.get_view_number() == new_anchor_view { - consensus - .metrics - .last_synced_block_height - .set(usize::try_from(leaf.get_height()).unwrap_or(0)); - } - if let Some(cert) = leaf.get_upgrade_certificate() { - if cert.data.decide_by < view { - warn!("Failed to decide an upgrade certificate in time. Ignoring."); - } else { - info!("Updating consensus state with decided upgrade certificate: {:?}", cert); - self.decided_upgrade_cert = Some(cert.clone()); - } - } - // If the block payload is available for this leaf, include it in - // the leaf chain that we send to the client. - if let Some(encoded_txns) = - consensus.saved_payloads.get(&leaf.get_view_number()) - { - let payload = BlockPayload::from_bytes( - encoded_txns, - leaf.get_block_header().metadata(), - ); - - leaf.fill_block_payload_unchecked(payload); - } - - // Get the VID share at the leaf's view number, corresponding to our key - // (if one exists) - let vid_share = consensus - .vid_shares - .get(&leaf.get_view_number()) - .unwrap_or(&HashMap::new()) - .get(&self.public_key).cloned().map(|prop| prop.data); - - // Add our data into a new `LeafInfo` - leaf_views.push(LeafInfo::new(leaf.clone(), Arc::clone(&state), delta.clone(), vid_share)); - leafs_decided.push(leaf.clone()); - if let Some(ref payload) = leaf.get_block_payload() { - for txn in payload - .transaction_commitments(leaf.get_block_header().metadata()) - { - included_txns.insert(txn); - } - } - } - true - }, - ) { - error!("view publish error {e}"); - } - } - - let included_txns_set: HashSet<_> = if new_decide_reached { - included_txns - } else { - HashSet::new() - }; - - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - if new_commit_reached { - consensus.locked_view = new_locked_view; - } - #[allow(clippy::cast_precision_loss)] - if new_decide_reached { - broadcast_event( - Arc::new(HotShotEvent::LeafDecided(leafs_decided)), - &event_stream, - ) - .await; - let decide_sent = broadcast_event( - Event { - view_number: consensus.last_decided_view, - event: EventType::Decide { - leaf_chain: Arc::new(leaf_views), - qc: Arc::new(new_decide_qc.unwrap()), - block_size: Some(included_txns_set.len().try_into().unwrap()), - }, - }, - &self.output_event_stream, - ); - let old_anchor_view = consensus.last_decided_view; - consensus.collect_garbage(old_anchor_view, new_anchor_view); - consensus.last_decided_view = new_anchor_view; - consensus - .metrics - .last_decided_time - .set(Utc::now().timestamp().try_into().unwrap()); - consensus.metrics.invalid_qc.set(0); - consensus - .metrics - .last_decided_view - .set(usize::try_from(consensus.last_decided_view.get_u64()).unwrap()); - let cur_number_of_views_per_decide_event = - *self.cur_view - consensus.last_decided_view.get_u64(); - consensus - .metrics - .number_of_views_per_decide_event - .add_point(cur_number_of_views_per_decide_event as f64); - - debug!("Sending Decide for view {:?}", consensus.last_decided_view); - debug!("Decided txns len {:?}", included_txns_set.len()); - decide_sent.await; - debug!("decide send succeeded"); - } - - let new_view = self.current_proposal.clone().unwrap().view_number + 1; - // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = self.quorum_membership.get_leader(new_view) == self.public_key - && consensus.high_qc.view_number - == self.current_proposal.clone().unwrap().view_number; - // todo get rid of this clone - let qc = consensus.high_qc.clone(); - - drop(consensus); - if new_decide_reached { - self.cancel_tasks(new_anchor_view).await; - } - if should_propose { - debug!( - "Attempting to publish proposal after voting; now in view: {}", - *new_view - ); - if let Err(e) = self - .publish_proposal(qc.view_number + 1, event_stream.clone()) - .await - { - warn!("Failed to propose; error = {e:?}"); - }; - } - - if !self.vote_if_able(&event_stream).await { - return; + if let Err(e) = + handle_quorum_proposal_validated(proposal, event_stream.clone(), self).await + { + info!("Failed to handle QuorumProposalValidated event {e:#}"); } - self.current_proposal = None; } HotShotEvent::QuorumVoteRecv(ref vote) => { debug!("Received quorum vote: {:?}", vote.get_view_number()); @@ -848,7 +669,7 @@ impl> ConsensusTaskState ) .await { - tracing::trace!("Failed to update view; error = {e:?}"); + tracing::trace!("Failed to update view; error = {e}"); return; } diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index 347e2afe45..de50b0ae91 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -1,5 +1,9 @@ use core::time::Duration; -use std::{marker::PhantomData, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + marker::PhantomData, + sync::Arc, +}; use anyhow::{bail, ensure, Context, Result}; use async_broadcast::Sender; @@ -7,19 +11,20 @@ use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; +use chrono::Utc; use committable::Committable; use futures::FutureExt; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus, View}, data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, - event::{Event, EventType}, + event::{Event, EventType, LeafInfo}, message::Proposal, simple_certificate::UpgradeCertificate, traits::{ block_contents::BlockHeader, election::Membership, network::{ConnectedNetwork, ConsensusIntentEvent}, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, states::ValidatedState, storage::Storage, @@ -30,7 +35,7 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, warn}; +use tracing::{debug, error, info, warn}; use super::ConsensusTaskState; use crate::{ @@ -802,3 +807,205 @@ pub async fn handle_quorum_proposal_recv>( + proposal: &QuorumProposal, + event_stream: Sender>>, + task_state: &mut ConsensusTaskState, +) -> Result<()> { + let consensus = task_state.consensus.upgradable_read().await; + let view = proposal.get_view_number(); + task_state.current_proposal = Some(proposal.clone()); + let mut new_anchor_view = consensus.last_decided_view; + let mut new_locked_view = consensus.locked_view; + let mut last_view_number_visited = view; + let mut new_commit_reached: bool = false; + let mut new_decide_reached = false; + let mut new_decide_qc = None; + let mut leaf_views = Vec::new(); + let mut leafs_decided = Vec::new(); + let mut included_txns = HashSet::new(); + let old_anchor_view = consensus.last_decided_view; + let parent_view = proposal.justify_qc.get_view_number(); + let mut current_chain_length = 0usize; + if parent_view + 1 == view { + current_chain_length += 1; + if let Err(e) = consensus.visit_leaf_ancestors( + parent_view, + Terminator::Exclusive(old_anchor_view), + true, + |leaf, state, delta| { + if !new_decide_reached { + if last_view_number_visited == leaf.get_view_number() + 1 { + last_view_number_visited = leaf.get_view_number(); + current_chain_length += 1; + if current_chain_length == 2 { + new_locked_view = leaf.get_view_number(); + new_commit_reached = true; + // The next leaf in the chain, if there is one, is decided, so this + // leaf's justify_qc would become the QC for the decided chain. + new_decide_qc = Some(leaf.get_justify_qc().clone()); + } else if current_chain_length == 3 { + new_anchor_view = leaf.get_view_number(); + new_decide_reached = true; + } + } else { + // nothing more to do here... we don't have a new chain extension + return false; + } + } + // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above + if new_decide_reached { + let mut leaf = leaf.clone(); + if leaf.get_view_number() == new_anchor_view { + consensus + .metrics + .last_synced_block_height + .set(usize::try_from(leaf.get_height()).unwrap_or(0)); + } + if let Some(cert) = leaf.get_upgrade_certificate() { + if cert.data.decide_by < view { + warn!("Failed to decide an upgrade certificate in time. Ignoring."); + } else { + info!( + "Updating consensus state with decided upgrade certificate: {:?}", + cert + ); + task_state.decided_upgrade_cert = Some(cert.clone()); + } + } + // If the block payload is available for this leaf, include it in + // the leaf chain that we send to the client. + if let Some(encoded_txns) = + consensus.saved_payloads.get(&leaf.get_view_number()) + { + let payload = BlockPayload::from_bytes( + encoded_txns, + leaf.get_block_header().metadata(), + ); + + leaf.fill_block_payload_unchecked(payload); + } + + // Get the VID share at the leaf's view number, corresponding to our key + // (if one exists) + let vid_share = consensus + .vid_shares + .get(&leaf.get_view_number()) + .unwrap_or(&HashMap::new()) + .get(&task_state.public_key) + .cloned() + .map(|prop| prop.data); + + // Add our data into a new `LeafInfo` + leaf_views.push(LeafInfo::new( + leaf.clone(), + Arc::clone(&state), + delta.clone(), + vid_share, + )); + leafs_decided.push(leaf.clone()); + if let Some(ref payload) = leaf.get_block_payload() { + for txn in + payload.transaction_commitments(leaf.get_block_header().metadata()) + { + included_txns.insert(txn); + } + } + } + true + }, + ) { + error!("view publish error {e}"); + } + } + + let included_txns_set: HashSet<_> = if new_decide_reached { + included_txns + } else { + HashSet::new() + }; + + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + if new_commit_reached { + consensus.locked_view = new_locked_view; + } + #[allow(clippy::cast_precision_loss)] + if new_decide_reached { + broadcast_event( + Arc::new(HotShotEvent::LeafDecided(leafs_decided)), + &event_stream, + ) + .await; + let decide_sent = broadcast_event( + Event { + view_number: consensus.last_decided_view, + event: EventType::Decide { + leaf_chain: Arc::new(leaf_views), + qc: Arc::new(new_decide_qc.unwrap()), + block_size: Some(included_txns_set.len().try_into().unwrap()), + }, + }, + &task_state.output_event_stream, + ); + let old_anchor_view = consensus.last_decided_view; + consensus.collect_garbage(old_anchor_view, new_anchor_view); + consensus.last_decided_view = new_anchor_view; + consensus + .metrics + .last_decided_time + .set(Utc::now().timestamp().try_into().unwrap()); + consensus.metrics.invalid_qc.set(0); + consensus + .metrics + .last_decided_view + .set(usize::try_from(consensus.last_decided_view.get_u64()).unwrap()); + let cur_number_of_views_per_decide_event = + *task_state.cur_view - consensus.last_decided_view.get_u64(); + consensus + .metrics + .number_of_views_per_decide_event + .add_point(cur_number_of_views_per_decide_event as f64); + + debug!("Sending Decide for view {:?}", consensus.last_decided_view); + debug!("Decided txns len {:?}", included_txns_set.len()); + decide_sent.await; + debug!("decide send succeeded"); + } + + let new_view = task_state.current_proposal.clone().unwrap().view_number + 1; + // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = task_state.quorum_membership.get_leader(new_view) == task_state.public_key + && consensus.high_qc.view_number + == task_state.current_proposal.clone().unwrap().view_number; + // todo get rid of this clone + let qc = consensus.high_qc.clone(); + + drop(consensus); + if new_decide_reached { + task_state.cancel_tasks(new_anchor_view).await; + } + if should_propose { + debug!( + "Attempting to publish proposal after voting; now in view: {}", + *new_view + ); + if let Err(e) = task_state + .publish_proposal(qc.view_number + 1, event_stream.clone()) + .await + { + warn!("Failed to propose; error = {e:?}"); + }; + } + + ensure!( + task_state.vote_if_able(&event_stream).await, + "Failed to vote" + ); + task_state.current_proposal = None; + + Ok(()) +} diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 899d8b994a..a3d23f0a9b 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -256,7 +256,7 @@ impl Consensus { self.cur_view = view_number; } - /// gather information from the parent chain of leafs + /// gather information from the parent chain of leaves /// # Errors /// If the leaf or its ancestors are not found in storage pub fn visit_leaf_ancestors( From 55c24bfbf9192d5205ba806e07e7eb5bed5fffaa Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 25 Apr 2024 16:50:00 -0400 Subject: [PATCH 0986/1393] demote proposal failure log (#3043) --- task-impls/src/consensus/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 6d326ea7d4..47bc02e066 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -377,7 +377,7 @@ impl> ConsensusTaskState } } Ok(None) => {} - Err(e) => warn!("Failed to propose {e:#}"), + Err(e) => debug!("Failed to propose {e:#}"), } } HotShotEvent::QuorumProposalValidated(proposal, _) => { From 65f08116b822ab967835a913f11967ae2dffc6db Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 26 Apr 2024 12:27:58 -0400 Subject: [PATCH 0987/1393] Deactivate receiver (#3042) --- hotshot/src/lib.rs | 76 ++++++++++++++++++++------------- hotshot/src/tasks/task_state.rs | 6 +-- hotshot/src/types/handle.rs | 6 +-- 3 files changed, 52 insertions(+), 36 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 34799857d5..0f72afe1d6 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -144,9 +144,17 @@ pub struct SystemContext> { /// The view to enter when first starting consensus start_view: TYPES::Time, - // global_registry: GlobalRegistry, /// Access to the output event stream. - pub output_event_stream: (Sender>, Receiver>), + #[deprecated( + note = "please use the `get_event_stream` method on `SystemContextHandle` instead. This field will be made private in a future release of HotShot" + )] + pub output_event_stream: (Sender>, InactiveReceiver>), + + /// External event stream for communication with the application. + pub(crate) external_event_stream: (Sender>, InactiveReceiver>), + + /// Anchored leaf provided by the initializer. + anchored_leaf: Leaf, /// access to the internal event stream, in case we need to, say, shut something down #[allow(clippy::type_complexity)] @@ -162,6 +170,7 @@ pub struct SystemContext> { pub storage: Arc>, } impl> Clone for SystemContext { + #![allow(deprecated)] fn clone(&self) -> Self { Self { public_key: self.public_key.clone(), @@ -175,6 +184,8 @@ impl> Clone for SystemContext> Clone for SystemContext> SystemContext { + #![allow(deprecated)] /// Creates a new [`Arc`] with the given configuration options. /// /// To do a full initialization, use `fn init` instead, which will set up background tasks as @@ -221,8 +233,6 @@ impl> SystemContext { )), }; - let state_delta = initializer.state_delta.as_ref(); - // Insert the validated state to state map. let mut validated_state_map = BTreeMap::default(); validated_state_map.insert( @@ -243,28 +253,6 @@ impl> SystemContext { let mut saved_payloads = BTreeMap::new(); saved_leaves.insert(anchored_leaf.commit(), anchored_leaf.clone()); - // Some applications seem to expect a leaf decide event for the genesis leaf, - // which contains only that leaf and nothing else. - if anchored_leaf.get_view_number() == TYPES::Time::genesis() { - broadcast_event( - Event { - view_number: anchored_leaf.get_view_number(), - event: EventType::Decide { - leaf_chain: Arc::new(vec![LeafInfo::new( - anchored_leaf.clone(), - Arc::clone(&validated_state), - state_delta.cloned(), - None, - )]), - qc: Arc::new(QuorumCertificate::genesis(&instance_state)), - block_size: None, - }, - }, - &external_tx, - ) - .await; - } - for leaf in initializer.undecided_leafs { saved_leaves.insert(leaf.commit(), leaf.clone()); } @@ -313,7 +301,9 @@ impl> SystemContext { memberships: Arc::new(memberships), metrics: Arc::clone(&consensus_metrics), internal_event_stream: (internal_tx, internal_rx.deactivate()), - output_event_stream: (external_tx, external_rx), + output_event_stream: (external_tx.clone(), external_rx.clone().deactivate()), + external_event_stream: (external_tx, external_rx.deactivate()), + anchored_leaf: anchored_leaf.clone(), storage: Arc::new(RwLock::new(storage)), }); @@ -339,6 +329,32 @@ impl> SystemContext { )))) .await .expect("Genesis Broadcast failed"); + + { + // Some applications seem to expect a leaf decide event for the genesis leaf, + // which contains only that leaf and nothing else. + if self.anchored_leaf.get_view_number() == TYPES::Time::genesis() { + let (validated_state, state_delta) = + TYPES::ValidatedState::genesis(&self.instance_state); + broadcast_event( + Event { + view_number: self.anchored_leaf.get_view_number(), + event: EventType::Decide { + leaf_chain: Arc::new(vec![LeafInfo::new( + self.anchored_leaf.clone(), + Arc::new(validated_state), + Some(Arc::new(state_delta)), + None, + )]), + qc: Arc::new(QuorumCertificate::genesis(self.instance_state.as_ref())), + block_size: None, + }, + }, + &self.external_event_stream.0, + ) + .await; + } + } } /// Emit an external event @@ -346,7 +362,7 @@ impl> SystemContext { // TODO: remove with https://github.com/EspressoSystems/HotShot/issues/2407 async fn send_external_event(&self, event: Event) { debug!(?event, "send_external_event"); - broadcast_event(event, &self.output_event_stream.0).await; + broadcast_event(event, &self.external_event_stream.0).await; } /// Publishes a transaction asynchronously to the network. @@ -520,7 +536,7 @@ impl> SystemContext { // ED Need to set first first number to 1, or properly trigger the change upon start let registry = Arc::new(TaskRegistry::default()); - let output_event_stream = self.output_event_stream.clone(); + let output_event_stream = self.external_event_stream.clone(); let internal_event_stream = self.internal_event_stream.clone(); let quorum_network = Arc::clone(&self.networks.quorum_network); @@ -708,7 +724,7 @@ impl> ConsensusApi async fn send_event(&self, event: Event) { debug!(?event, "send_event"); - broadcast_event(event, &self.hotshot.output_event_stream.0).await; + broadcast_event(event, &self.hotshot.external_event_stream.0).await; } fn public_key(&self) -> &TYPES::SignatureKey { diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 13742ff6d0..83ad461f45 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -197,7 +197,7 @@ impl> CreateTaskState proposal_cert: None, decided_upgrade_cert: None, version: Arc::clone(&handle.hotshot.version), - output_event_stream: handle.hotshot.output_event_stream.0.clone(), + output_event_stream: handle.hotshot.external_event_stream.0.clone(), current_proposal: None, id: handle.hotshot.id, public_key: handle.public_key().clone(), @@ -230,7 +230,7 @@ impl> CreateTaskState committee_network: Arc::clone(&handle.hotshot.networks.da_network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), - output_event_stream: handle.hotshot.output_event_stream.0.clone(), + output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, storage: Arc::clone(&handle.storage), } @@ -250,7 +250,7 @@ impl> CreateTaskState propose_dependencies: HashMap::new(), quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), committee_network: Arc::clone(&handle.hotshot.networks.da_network), - output_event_stream: handle.hotshot.output_event_stream.0.clone(), + output_event_stream: handle.hotshot.external_event_stream.0.clone(), consensus, instance_state: handle.hotshot.get_instance_state(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 7b5a466fc9..b1b0f2a138 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -27,7 +27,7 @@ use crate::{traits::NodeImplementation, types::Event, SystemContext}; pub struct SystemContextHandle> { /// The [sender](Sender) and [receiver](Receiver), /// to allow the application to communicate with HotShot. - pub(crate) output_event_stream: (Sender>, Receiver>), + pub(crate) output_event_stream: (Sender>, InactiveReceiver>), /// access to the internal event stream, in case we need to, say, shut something down #[allow(clippy::type_complexity)] @@ -48,7 +48,7 @@ pub struct SystemContextHandle> { impl + 'static> SystemContextHandle { /// obtains a stream to expose to the user pub fn get_event_stream(&self) -> impl Stream> { - self.output_event_stream.1.clone() + self.output_event_stream.1.activate_cloned() } /// HACK so we can know the types when running tests... @@ -57,7 +57,7 @@ impl + 'static> SystemContextHandl /// - type wrapper #[must_use] pub fn get_event_stream_known_impl(&self) -> Receiver> { - self.output_event_stream.1.clone() + self.output_event_stream.1.activate_cloned() } /// HACK so we can know the types when running tests... From 5d589eeca3e21981e8f37cc1c0d1b1f243fcc9dd Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 26 Apr 2024 15:48:03 -0400 Subject: [PATCH 0988/1393] add orchestrator logs (#3053) --- orchestrator/src/client.rs | 46 ++++++++++++++++++++++++++------- task-impls/src/consensus/mod.rs | 3 +-- 2 files changed, 38 insertions(+), 11 deletions(-) diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 506a2f48cf..c4c732d39c 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -11,6 +11,7 @@ use hotshot_types::{ use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; +use tracing::instrument; use vbs::BinarySerializer; use crate::{config::NetworkConfig, OrchestratorVersion}; @@ -254,11 +255,16 @@ impl OrchestratorClient { /// Then return it for the init validator config /// # Panics /// if unable to post + #[instrument(skip_all, name = "orchestrator node index for validator config")] pub async fn get_node_index_for_init_validator_config(&self) -> u16 { let cur_node_index = |client: Client| { async move { - let cur_node_index: Result = - client.post("api/get_tmp_node_index").send().await; + let cur_node_index: Result = client + .post("api/get_tmp_node_index") + .send() + .await + .inspect_err(|err| tracing::error!("{err}")); + cur_node_index } .boxed() @@ -270,16 +276,23 @@ impl OrchestratorClient { /// a successful call requires all nodes to be registered. /// /// Does not fail, retries internally until success. + #[instrument(skip_all, name = "orchestrator config")] pub async fn get_config_after_collection( &self, ) -> NetworkConfig { // Define the request for post-register configurations let get_config_after_collection = |client: Client| { async move { - client + let result = client .get("api/get_config_after_peer_collected") .send() - .await + .await; + + if let Err(ref err) = result { + tracing::error!("{err}"); + } + + result } .boxed() }; @@ -294,6 +307,7 @@ impl OrchestratorClient { /// Blocks until the orchestrator collects all peer's public keys/configs /// # Panics /// if unable to post + #[instrument(skip(self), name = "orchestrator public keys")] pub async fn post_and_wait_all_public_keys( &self, node_index: u64, @@ -307,11 +321,19 @@ impl OrchestratorClient { .body_binary(&PeerConfig::::to_bytes(&my_pub_key)) .unwrap() .send() - .await; + .await + .inspect_err(|err| tracing::error!("{err}")); // wait for all nodes' public keys let wait_for_all_nodes_pub_key = |client: Client| { - async move { client.get("api/peer_pub_ready").send().await }.boxed() + async move { + client + .get("api/peer_pub_ready") + .send() + .await + .inspect_err(|err| tracing::error!("{err}")) + } + .boxed() }; self.wait_for_fn_from_orchestrator::<_, _, ()>(wait_for_all_nodes_pub_key) .await; @@ -323,6 +345,7 @@ impl OrchestratorClient { /// Blocks until the orchestrator indicates all nodes are ready to start /// # Panics /// Panics if unable to post. + #[instrument(skip(self), name = "orchestrator ready signal")] pub async fn wait_for_all_nodes_ready(&self, node_index: u64) -> bool { let send_ready_f = |client: Client| { async move { @@ -331,7 +354,8 @@ impl OrchestratorClient { .body_json(&node_index) .unwrap() .send() - .await; + .await + .inspect_err(|err| tracing::error!("{err}")); result } .boxed() @@ -349,6 +373,7 @@ impl OrchestratorClient { /// Sends the benchmark metrics to the orchestrator /// # Panics /// Panics if unable to post + #[instrument(skip_all, name = "orchestrator metrics")] pub async fn post_bench_results(&self, bench_results: BenchResults) { let _send_metrics_f: Result<(), ClientError> = self .client @@ -356,11 +381,13 @@ impl OrchestratorClient { .body_json(&bench_results) .unwrap() .send() - .await; + .await + .inspect_err(|err| tracing::warn!("{err}")); } /// Generic function that waits for the orchestrator to return a non-error /// Returns whatever type the given function returns + #[instrument(skip_all, name = "waiting for orchestrator")] async fn wait_for_fn_from_orchestrator(&self, f: F) -> GEN where F: Fn(Client) -> Fut, @@ -371,7 +398,8 @@ impl OrchestratorClient { let res = f(client).await; match res { Ok(x) => break x, - Err(_x) => { + Err(err) => { + tracing::info!("{err}"); async_sleep(Duration::from_millis(250)).await; } } diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 47bc02e066..ce46520682 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -36,6 +36,7 @@ use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use vbs::version::Version; +use self::proposal_helpers::handle_quorum_proposal_validated; use crate::{ consensus::{ proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}, @@ -48,8 +49,6 @@ use crate::{ }, }; -use self::proposal_helpers::handle_quorum_proposal_validated; - /// Helper functions to handler proposal-related functionality. pub(crate) mod proposal_helpers; From 28642f36cde9f422000fbb85cb056a8cf52ed8e9 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Fri, 26 Apr 2024 16:11:18 -0400 Subject: [PATCH 0989/1393] Simplify interaction with builder (#3041) * Remove block building parameters (`min_transactions`, `max_transactions`, `propose_min_round_time`, `propose_max_round_time`) from HotShot, let the builder deal with that * Loop until we successfully get blocks (or time out), but no more than that; take a block from the first set that we receive * Choose block by max fee per byte, not max total fee * Apply timeout to entire builder interaction, not just `get_available_blocks` --- hotshot/src/lib.rs | 16 +- orchestrator/run-config.toml | 8 +- orchestrator/src/config.rs | 41 ++--- task-impls/src/builder.rs | 4 +- task-impls/src/transactions.rs | 260 +++++++++++++----------------- testing/src/test_builder.rs | 25 +-- types/src/lib.rs | 10 +- types/src/traits/consensus_api.rs | 14 +- 8 files changed, 140 insertions(+), 238 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 0f72afe1d6..86919099ca 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -706,20 +706,8 @@ impl> ConsensusApi self.hotshot.config.num_nodes_with_stake } - fn propose_min_round_time(&self) -> Duration { - self.hotshot.config.propose_min_round_time - } - - fn propose_max_round_time(&self) -> Duration { - self.hotshot.config.propose_max_round_time - } - - fn max_transactions(&self) -> NonZeroUsize { - self.hotshot.config.max_transactions - } - - fn min_transactions(&self) -> usize { - self.hotshot.config.min_transactions + fn builder_timeout(&self) -> Duration { + self.hotshot.config.builder_timeout } async fn send_event(&self, event: Event) { diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 2ccc11dcdc..946e4319aa 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -45,8 +45,6 @@ num_nodes_without_stake = 0 staked_committee_nodes = 10 non_staked_committee_nodes = 0 fixed_leader_for_gpuvid = 0 -max_transactions = 1 -min_transactions = 1 next_view_timeout = 30000 timeout_ratio = [ 11, @@ -100,12 +98,8 @@ nanos = 0 secs = 0 nanos = 200_000_000 -[config.propose_min_round_time] -secs = 0 -nanos = 0 - # TODO (Keyao) Clean up configuration parameters. # -[config.propose_max_round_time] +[config.builder_timeout] secs = 2 nanos = 0 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 3179c5c11f..e23a2d8856 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -48,10 +48,8 @@ pub struct Libp2pConfig { pub mesh_n: usize, /// timeout before starting the next view pub next_view_timeout: u64, - /// minimum time to wait for a view - pub propose_min_round_time: Duration, - /// maximum time to wait for a view - pub propose_max_round_time: Duration, + /// The maximum amount of time a leader can wait to get a block from a builder + pub builder_timeout: Duration, /// time node has been running pub online_time: u64, /// number of transactions per view @@ -169,10 +167,8 @@ pub struct NetworkConfig { pub next_view_timeout: u64, /// timeout before starting next view sync round pub view_sync_timeout: Duration, - /// minimum time to wait for a view - pub propose_min_round_time: Duration, - /// maximum time to wait for a view - pub propose_max_round_time: Duration, + /// The maximum amount of time a leader can wait to get a block from a builder + pub builder_timeout: Duration, /// time to wait until we request data associated with a proposal pub data_request_delay: Duration, /// global index of node (for testing purposes a uid) @@ -458,8 +454,7 @@ impl Default for NetworkConfig { next_view_timeout: 10, view_sync_timeout: Duration::from_secs(2), num_bootrap: 5, - propose_min_round_time: Duration::from_secs(0), - propose_max_round_time: Duration::from_secs(10), + builder_timeout: Duration::from_secs(10), data_request_delay: Duration::from_millis(2500), commit_sha: String::new(), builder: BuilderType::default(), @@ -526,8 +521,7 @@ impl From> for NetworkC num_bootrap: val.config.num_bootstrap, next_view_timeout: val.config.next_view_timeout, view_sync_timeout: val.config.view_sync_timeout, - propose_max_round_time: val.config.propose_max_round_time, - propose_min_round_time: val.config.propose_min_round_time, + builder_timeout: val.config.builder_timeout, data_request_delay: val.config.data_request_delay, seed: val.seed, transaction_size: val.transaction_size, @@ -543,8 +537,7 @@ impl From> for NetworkC mesh_outbound_min: libp2p_config.mesh_outbound_min, mesh_n: libp2p_config.mesh_n, next_view_timeout: val.config.next_view_timeout, - propose_min_round_time: val.config.propose_min_round_time, - propose_max_round_time: val.config.propose_max_round_time, + builder_timeout: val.config.builder_timeout, online_time: libp2p_config.online_time, num_txn_per_round: val.transactions_per_round, server_mode: libp2p_config.server_mode, @@ -595,10 +588,6 @@ pub struct HotShotConfigFile { pub non_staked_committee_nodes: usize, /// Number of fixed leaders for GPU VID pub fixed_leader_for_gpuvid: usize, - /// Maximum transactions per block - pub max_transactions: NonZeroUsize, - /// Minimum transactions per block - pub min_transactions: usize, /// Base duration for next-view timeout, in milliseconds pub next_view_timeout: u64, /// Duration for view sync round timeout @@ -611,10 +600,8 @@ pub struct HotShotConfigFile { pub start_delay: u64, /// Number of network bootstrap nodes pub num_bootstrap: usize, - /// The minimum amount of time a leader has to wait to start a round - pub propose_min_round_time: Duration, - /// The maximum amount of time a leader can wait to start a round - pub propose_max_round_time: Duration, + /// The maximum amount of time a leader can wait to get a block from a builder + pub builder_timeout: Duration, /// Time to wait until we request data associated with a proposal pub data_request_delay: Duration, /// Builder API base URL @@ -683,8 +670,6 @@ impl From> for HotS num_nodes_with_stake: val.num_nodes_with_stake, num_nodes_without_stake: val.num_nodes_without_stake, known_da_nodes: val.known_da_nodes, - max_transactions: val.max_transactions, - min_transactions: val.min_transactions, known_nodes_with_stake: val.known_nodes_with_stake, known_nodes_without_stake: val.known_nodes_without_stake, my_own_validator_config: val.my_own_validator_config, @@ -697,8 +682,7 @@ impl From> for HotS round_start_delay: val.round_start_delay, start_delay: val.start_delay, num_bootstrap: val.num_bootstrap, - propose_min_round_time: val.propose_min_round_time, - propose_max_round_time: val.propose_max_round_time, + builder_timeout: val.builder_timeout, data_request_delay: val.data_request_delay, election_config: None, builder_url: val.builder_url, @@ -761,16 +745,13 @@ impl Default for HotShotConfigFile { known_da_nodes, non_staked_committee_nodes: 0, fixed_leader_for_gpuvid: 0, - max_transactions: NonZeroUsize::new(100).unwrap(), - min_transactions: 1, next_view_timeout: 10000, view_sync_timeout: Duration::from_millis(1000), timeout_ratio: (11, 10), round_start_delay: 1, start_delay: 1, num_bootstrap: 5, - propose_min_round_time: Duration::from_secs(0), - propose_max_round_time: Duration::from_secs(10), + builder_timeout: Duration::from_secs(10), data_request_delay: Duration::from_millis(200), builder_url: default_builder_url(), } diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 4f4b036ba3..59ef040b12 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -73,7 +73,9 @@ impl BuilderClient { /// If the URL is malformed. pub fn new(base_url: impl Into) -> Self { Self { - inner: Client::new(base_url.into().join("block_info").unwrap()), + inner: Client::builder(base_url.into().join("block_info").unwrap()) + .set_timeout(Some(Duration::from_secs(2))) + .build(), _marker: std::marker::PhantomData, } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index edb77b238c..7216a38961 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -3,6 +3,7 @@ use std::{ time::{Duration, Instant}, }; +use anyhow::{bail, Context}; use async_broadcast::Sender; use async_compatibility_layer::art::async_sleep; use async_lock::RwLock; @@ -243,55 +244,35 @@ impl< let task_start_time = Instant::now(); // Find commitment to the block we want to build upon - let parent_commitment = self.latest_known_vid_commitment().await; - - let mut latest_block: Option> = None; - let mut first_iteration = true; - while task_start_time.elapsed() < self.api.propose_max_round_time() - && latest_block.as_ref().map_or(true, |builder_response| { - builder_response - .block_data - .block_payload - .num_transactions(&builder_response.block_data.metadata) - < self.api.min_transactions() - }) - { - // Sleep if this isn't the first iteration - if first_iteration { - first_iteration = false; - } else { - async_sleep(Duration::from_millis(100)).await; + let parent_comm = self.latest_known_vid_commitment().await; + let parent_comm_sig = match <::SignatureKey as SignatureKey>::sign( + &self.private_key, + parent_comm.as_ref(), + ) { + Ok(sig) => sig, + Err(err) => { + error!(%err, "Failed to sign block hash"); + return None; } + }; - let Ok(request_signature) = <::SignatureKey as SignatureKey>::sign( - &self.private_key, - parent_commitment.as_ref(), - ) else { - error!("Failed to sign block hash"); - continue; - }; - - let mut available_blocks = match async_compatibility_layer::art::async_timeout( + while task_start_time.elapsed() < self.api.builder_timeout() { + match async_compatibility_layer::art::async_timeout( self.api - .propose_max_round_time() + .builder_timeout() .saturating_sub(task_start_time.elapsed()), - self.builder_client.get_available_blocks( - parent_commitment, - self.public_key.clone(), - &request_signature, - ), + self.get_block_from_builder(parent_comm, &parent_comm_sig), ) .await { - // We got available blocks - Ok(Ok(blocks)) => { - tracing::debug!("Got available blocks: {:?}", blocks); - blocks + // We got a block + Ok(Ok(block)) => { + return Some(block); } - // We failed to get available blocks + // We failed to get a block Ok(Err(err)) => { - error!(%err, "Couldn't get available blocks"); + error!(%err, "Couldn't get a block"); // pause a bit async_sleep(Duration::from_millis(100)).await; continue; @@ -299,119 +280,104 @@ impl< // We timed out while getting available blocks Err(err) => { - if latest_block.is_none() { - error!(%err, "Timeout while getting available blocks"); - } - break; + error!(%err, "Timeout while getting available blocks"); + return None; } - }; - - available_blocks.sort_by_key(|block_info| block_info.offered_fee); - - let Some(block_info) = available_blocks.pop() else { - continue; - }; - - // Verify signature over chosen block instead of - // verifying the signature over all the blocks received from builder - if !block_info.sender.validate_block_info_signature( - &block_info.signature, - block_info.block_size, - block_info.offered_fee, - &block_info.block_hash, - ) { - error!("Failed to verify available block info response message signature"); - continue; } + } - // Don't try to re-claim the same block if builder advertises it again - if latest_block.as_ref().map_or(false, |builder_response| { - builder_response - .block_data - .block_payload - .builder_commitment(&builder_response.block_data.metadata) - == block_info.block_hash - }) { - continue; - } + tracing::warn!("could not get a block from the builder in time"); + None + } - let Ok(request_signature) = <::SignatureKey as SignatureKey>::sign( - &self.private_key, - block_info.block_hash.as_ref(), - ) else { - error!("Failed to sign block hash"); - continue; - }; - - let (block, header_input) = futures::join! { - self.builder_client.claim_block(block_info.block_hash.clone(), self.public_key.clone(), &request_signature), - self.builder_client.claim_block_header_input(block_info.block_hash.clone(), self.public_key.clone(), &request_signature) - }; - - let block_data = match block { - Ok(block_data) => { - // verify the signature over the message, construct the builder commitment - let builder_commitment = block_data - .block_payload - .builder_commitment(&block_data.metadata); - if !block_data.sender.validate_builder_signature( - &block_data.signature, - builder_commitment.as_ref(), - ) { - error!("Failed to verify available block data response message signature"); - continue; - } - block_data - } - Err(err) => { - error!(%err, "Failed to claim block data"); - continue; - } - }; - - let header_input = match header_input { - Ok(header_input) => { - // first verify the message signature and later verify the fee_signature - if !header_input.sender.validate_builder_signature( - &header_input.message_signature, - header_input.vid_commitment.as_ref(), - ) { - error!("Failed to verify available block header input data response message signature"); - continue; - } - - // verify the signature over the message - if !header_input.sender.validate_fee_signature( - &header_input.fee_signature, - block_info.offered_fee, - &block_data.metadata, - &header_input.vid_commitment, - ) { - error!("Failed to verify fee signature"); - continue; - } - header_input - } - Err(err) => { - error!(%err, "Failed to claim block header input"); - continue; - } - }; - - let num_txns = block_data - .block_payload - .num_transactions(&block_data.metadata); - - latest_block = Some(BuilderResponses { - blocks_initial_info: block_info, - block_data, - block_header: header_input, - }); - if num_txns >= self.api.min_transactions() { - return latest_block; - } + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "get_block_from_builder", level = "error")] + async fn get_block_from_builder( + &self, + parent_comm: VidCommitment, + parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> anyhow::Result> { + let available_blocks = self + .builder_client + .get_available_blocks(parent_comm, self.public_key.clone(), parent_comm_sig) + .await + .context("getting available blocks")?; + tracing::debug!("Got available blocks: {available_blocks:?}"); + + let block_info = available_blocks + .into_iter() + .max_by(|l, r| { + // We want the block with the highest fee per byte of data we're going to have to + // process, thus our comparision function is: + // (l.offered_fee / l.block_size) < (r.offered_fee / r.block_size) + // To avoid floating point math (which doesn't even have an `Ord` impl) we multiply + // through by the denominators to get + // l.offered_fee * r.block_size < r.offered_fee * l.block_size + // We cast up to u128 to avoid overflow. + (u128::from(l.offered_fee) * u128::from(r.block_size)) + .cmp(&(u128::from(r.offered_fee) * u128::from(l.block_size))) + }) + .context("no available blocks")?; + tracing::debug!("Selected block: {block_info:?}"); + + // Verify signature over chosen block. + if !block_info.sender.validate_block_info_signature( + &block_info.signature, + block_info.block_size, + block_info.offered_fee, + &block_info.block_hash, + ) { + bail!("Failed to verify available block info response message signature"); } - latest_block + + let request_signature = <::SignatureKey as SignatureKey>::sign( + &self.private_key, + block_info.block_hash.as_ref(), + ) + .context("signing block hash")?; + + let (block, header_input) = futures::join! { + self.builder_client.claim_block(block_info.block_hash.clone(), self.public_key.clone(), &request_signature), + self.builder_client.claim_block_header_input(block_info.block_hash.clone(), self.public_key.clone(), &request_signature) + }; + + let block_data = block.context("claiming block data")?; + + // verify the signature over the message, construct the builder commitment + let builder_commitment = block_data + .block_payload + .builder_commitment(&block_data.metadata); + if !block_data + .sender + .validate_builder_signature(&block_data.signature, builder_commitment.as_ref()) + { + bail!("Failed to verify available block data response message signature"); + } + + let header_input = header_input.context("claiming header input")?; + + // first verify the message signature and later verify the fee_signature + if !header_input.sender.validate_builder_signature( + &header_input.message_signature, + header_input.vid_commitment.as_ref(), + ) { + bail!("Failed to verify available block header input data response message signature"); + } + + // verify the signature over the message + if !header_input.sender.validate_fee_signature( + &header_input.fee_signature, + block_info.offered_fee, + &block_data.metadata, + &header_input.vid_commitment, + ) { + bail!("Failed to verify fee signature"); + } + + Ok(BuilderResponses { + blocks_initial_info: block_info, + block_data, + block_header: header_input, + }) } } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 84e9318fd1..d80f0f85c0 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -30,10 +30,8 @@ pub struct TimingData { pub round_start_delay: u64, /// Delay after init before starting consensus, in milliseconds pub start_delay: u64, - /// The minimum amount of time a leader has to wait to start a round - pub propose_min_round_time: Duration, - /// The maximum amount of time a leader can wait to start a round - pub propose_max_round_time: Duration, + /// The maximum amount of time a leader can wait to get a block from a builder + pub builder_timeout: Duration, /// time to wait until we request data associated with a proposal pub data_request_delay: Duration, /// Delay before sending through the secondary network in CombinedNetworks @@ -68,8 +66,6 @@ pub struct TestDescription { pub txn_description: TxnTaskDescription, /// completion task pub completion_task_description: CompletionTaskDescription, - /// Minimum transactions required for a block - pub min_transactions: usize, /// timing data pub timing_data: TimingData, /// unrelabile networking metadata @@ -85,8 +81,7 @@ impl Default for TimingData { timeout_ratio: (11, 10), round_start_delay: 100, start_delay: 100, - propose_min_round_time: Duration::new(0, 0), - propose_max_round_time: Duration::from_millis(1000), + builder_timeout: Duration::from_millis(1000), data_request_delay: Duration::from_millis(200), secondary_network_delay: Duration::from_millis(1000), view_sync_timeout: Duration::from_millis(2000), @@ -202,7 +197,6 @@ impl Default for TestDescription { let num_nodes_without_stake = 0; Self { timing_data: TimingData::default(), - min_transactions: 0, num_nodes_with_stake, num_nodes_without_stake, start_nodes: num_nodes_with_stake, @@ -247,7 +241,6 @@ impl TestDescription { let TestDescription { num_nodes_with_stake, num_bootstrap_nodes, - min_transactions, timing_data, da_staked_committee_size, da_non_staked_committee_size, @@ -298,8 +291,6 @@ impl TestDescription { known_da_nodes, num_nodes_without_stake: 0, num_bootstrap: num_bootstrap_nodes, - min_transactions, - max_transactions: NonZeroUsize::new(99999).unwrap(), known_nodes_with_stake, known_nodes_without_stake: vec![], my_own_validator_config, @@ -311,9 +302,7 @@ impl TestDescription { timeout_ratio: (11, 10), round_start_delay: 25, start_delay: 1, - // TODO do we use these fields?? - propose_min_round_time: Duration::from_millis(0), - propose_max_round_time: Duration::from_millis(1000), + builder_timeout: Duration::from_millis(1000), data_request_delay: Duration::from_millis(200), // TODO what's the difference between this and the second config? election_config: Some(TYPES::Membership::default_election_config( @@ -328,8 +317,7 @@ impl TestDescription { timeout_ratio, round_start_delay, start_delay, - propose_min_round_time, - propose_max_round_time, + builder_timeout, data_request_delay, secondary_network_delay, view_sync_timeout, @@ -341,8 +329,7 @@ impl TestDescription { a.timeout_ratio = timeout_ratio; a.round_start_delay = round_start_delay; a.start_delay = start_delay; - a.propose_min_round_time = propose_min_round_time; - a.propose_max_round_time = propose_max_round_time; + a.builder_timeout = builder_timeout; a.data_request_delay = data_request_delay; a.view_sync_timeout = view_sync_timeout; }; diff --git a/types/src/lib.rs b/types/src/lib.rs index cbebe11c16..12e080b1ff 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -165,10 +165,6 @@ pub struct HotShotConfig { pub num_nodes_with_stake: NonZeroUsize, /// Number of nodes without stake pub num_nodes_without_stake: usize, - /// Minimum transactions per block - pub min_transactions: usize, - /// Maximum transactions per block - pub max_transactions: NonZeroUsize, /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter pub known_nodes_with_stake: Vec>, /// All public keys known to be DA nodes @@ -195,10 +191,8 @@ pub struct HotShotConfig { pub start_delay: u64, /// Number of network bootstrap nodes pub num_bootstrap: usize, - /// The minimum amount of time a leader has to wait to start a round - pub propose_min_round_time: Duration, - /// The maximum amount of time a leader can wait to start a round - pub propose_max_round_time: Duration, + /// The maximum amount of time a leader can wait to get a block from a builder + pub builder_timeout: Duration, /// time to wait until we request data associated with a proposal pub data_request_delay: Duration, /// the election configuration diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index 40484812cb..a8de5efbf3 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -19,18 +19,8 @@ pub trait ConsensusApi>: Send + Sy /// Total number of nodes in the network. Also known as `n`. fn total_nodes(&self) -> NonZeroUsize; - /// The minimum amount of time a leader has to wait before sending a propose - fn propose_min_round_time(&self) -> Duration; - - /// The maximum amount of time a leader can wait before sending a propose. - /// If this time is reached, the leader has to send a propose without transactions. - fn propose_max_round_time(&self) -> Duration; - - /// Retuns the maximum transactions allowed in a block - fn max_transactions(&self) -> NonZeroUsize; - - /// Returns the minimum transactions that must be in a block - fn min_transactions(&self) -> usize; + /// The maximum amount of time a leader can wait to get a block from a builder. + fn builder_timeout(&self) -> Duration; /// Get a reference to the public key. fn public_key(&self) -> &TYPES::SignatureKey; From e580b982abb53b29ca2c162d12107d04b9b1b954 Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Fri, 26 Apr 2024 15:23:39 -0500 Subject: [PATCH 0990/1393] Add `InstanceState` to `TransactionTaskState` (#3044) * add InstanceState to from_transactions --------- Co-authored-by: tbro Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> --- example-types/src/block_types.rs | 2 ++ hotshot/src/tasks/task_state.rs | 1 + task-impls/src/consensus/proposal_helpers.rs | 15 +++++++++++---- task-impls/src/transactions.rs | 15 ++++++++++----- testing/src/block_builder.rs | 7 ++++--- testing/src/view_generator.rs | 14 ++++++++++---- testing/tests/tests_1/consensus_task.rs | 9 +++++++-- testing/tests/tests_1/da_task.rs | 7 +++++-- testing/tests/tests_1/proposal_ordering.rs | 8 +++++++- testing/tests/tests_1/quorum_proposal_task.rs | 16 ++++++++++------ testing/tests/tests_1/upgrade_task.rs | 17 ++++++++++------- testing/tests/tests_1/vid_task.rs | 8 ++++---- types/src/data.rs | 9 +++++++-- types/src/traits/block_contents.rs | 4 ++-- 14 files changed, 90 insertions(+), 42 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index dddb6dcd09..df18b13429 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -10,6 +10,7 @@ use hotshot_types::{ traits::{ block_contents::{BlockHeader, BuilderFee, EncodeBytes, TestableBlock, Transaction}, node_implementation::NodeType, + states::InstanceState, BlockPayload, ValidatedState, }, utils::BuilderCommitment, @@ -120,6 +121,7 @@ impl BlockPayload for TestBlockPayload { fn from_transactions( transactions: impl IntoIterator, + _state: Arc, ) -> Result<(Self, Self::Metadata), Self::Error> { let txns_vec: Vec = transactions.into_iter().collect(); Ok(( diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 83ad461f45..b0d9b72618 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -169,6 +169,7 @@ impl, Ver: StaticVersionType> membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), + instance_state: handle.hotshot.get_instance_state(), id: handle.hotshot.id, builder_client: BuilderClient::new(handle.hotshot.config.builder_url.clone()), } diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index de50b0ae91..923d10b145 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -413,14 +413,21 @@ async fn publish_proposal_from_upgrade_cert( // Special case: if we have a decided upgrade certificate AND it does not apply a version to the current view, we MUST propose with a null block. ensure!(upgrade_cert.in_interim(cur_view), "Cert is not in interim"); - let (payload, metadata) = ::from_transactions(Vec::new()) - .context("Failed to build null block payload and metadata")?; + let (payload, metadata) = ::from_transactions( + Vec::new(), + Arc::<::InstanceState>::clone(&instance_state), + ) + .context("Failed to build null block payload and metadata")?; let builder_commitment = payload.builder_commitment(&metadata); let null_block_commitment = null_block::commitment(quorum_membership.total_nodes()) .context("Failed to calculate null block commitment")?; - let null_block_fee = null_block::builder_fee::(quorum_membership.total_nodes()) - .context("Failed to calculate null block fee info")?; + + let null_block_fee = null_block::builder_fee::( + quorum_membership.total_nodes(), + Arc::<::InstanceState>::clone(&instance_state), + ) + .context("Failed to calculate null block fee info")?; Ok(async_spawn(async move { create_and_send_proposal( diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 7216a38961..4e768ff730 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -76,6 +76,8 @@ pub struct TransactionTaskState< pub public_key: TYPES::SignatureKey, /// Our Private Key pub private_key: ::PrivateKey, + /// InstanceState + pub instance_state: Arc, /// This state's ID pub id: u64, } @@ -171,16 +173,19 @@ impl< .add(1); // Calculate the builder fee for the empty block - let Some(builder_fee) = null_block::builder_fee(self.membership.total_nodes()) - else { + let Some(builder_fee) = null_block::builder_fee( + self.membership.total_nodes(), + Arc::<::InstanceState>::clone(&self.instance_state), + ) else { error!("Failed to get builder fee"); return None; }; // Create an empty block payload and metadata - let Ok((_, metadata)) = - ::BlockPayload::from_transactions(vec![]) - else { + let Ok((_, metadata)) = ::BlockPayload::from_transactions( + vec![], + Arc::<::InstanceState>::clone(&self.instance_state), + ) else { error!("Failed to create empty block payload"); return None; }; diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index b7afd78b35..180843b35b 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -20,7 +20,7 @@ use hotshot_builder_api::{ builder::{BuildError, Error, Options}, data_source::BuilderDataSource, }; -use hotshot_example_types::block_types::TestTransaction; +use hotshot_example_types::{block_types::TestTransaction, state_types::TestInstanceState}; use hotshot_orchestrator::config::RandomBuilderConfig; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, @@ -525,8 +525,9 @@ async fn build_block( AvailableBlockData, AvailableBlockHeaderInput, ) { - let (block_payload, metadata) = TYPES::BlockPayload::from_transactions(transactions) - .expect("failed to build block payload from transactions"); + let (block_payload, metadata) = + TYPES::BlockPayload::from_transactions(transactions, Arc::new(TestInstanceState {})) + .expect("failed to build block payload from transactions"); let commitment = block_payload.builder_commitment(&metadata); diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index f4e14f6f22..8a43cb980d 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -56,8 +56,11 @@ impl TestView { let transactions = Vec::new(); - let (block_payload, metadata) = - TestBlockPayload::from_transactions(transactions.clone()).unwrap(); + let (block_payload, metadata) = TestBlockPayload::from_transactions( + transactions.clone(), + Arc::new(TestInstanceState {}), + ) + .unwrap(); let builder_commitment = block_payload.builder_commitment(&metadata); let (private_key, public_key) = key_pair_for_id(*genesis_view); @@ -171,8 +174,11 @@ impl TestView { let leader_public_key = public_key; - let (block_payload, metadata) = - TestBlockPayload::from_transactions(transactions.clone()).unwrap(); + let (block_payload, metadata) = TestBlockPayload::from_transactions( + transactions.clone(), + Arc::new(TestInstanceState {}), + ) + .unwrap(); let builder_commitment = block_payload.builder_commitment(&metadata); let payload_commitment = da_payload_commitment(quorum_membership, transactions.clone()); diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 6d8a1f8b02..1b37aae268 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -1,6 +1,11 @@ use hotshot::{ tasks::{inject_consensus_polls, task_state::CreateTaskState}, }; + +use std::sync::Arc; + + +use hotshot_example_types::state_types::TestInstanceState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ @@ -87,7 +92,7 @@ async fn test_consensus_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), ], outputs: vec![ @@ -368,7 +373,7 @@ async fn test_view_sync_finalize_propose() { builder_commitment, TestMetadata, ViewNumber::new(4), - null_block::builder_fee(4).unwrap(), + null_block::builder_fee(4, Arc::new(TestInstanceState {})).unwrap(), ), ], outputs: vec![ diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index b6a30800c4..43dba3ae9d 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -1,5 +1,8 @@ use std::sync::Arc; + +use hotshot_example_types::state_types::TestInstanceState; + use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, @@ -73,7 +76,7 @@ async fn test_da_task() { encoded_transactions, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), ], outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], @@ -152,7 +155,7 @@ async fn test_da_task_storage_failure() { encoded_transactions, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), ], outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 32474d7595..e430362255 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -1,4 +1,10 @@ use hotshot::{tasks::task_state::CreateTaskState}; + +use std::sync::Arc; + + +use hotshot_example_types::state_types::TestInstanceState; + use hotshot_example_types::{ block_types::TestMetadata, node_types::{MemoryImpl, TestTypes}, @@ -83,7 +89,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { builder_commitment, TestMetadata, ViewNumber::new(node_id), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), ]; diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 619e427c4b..d977c229b2 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,4 +1,8 @@ use hotshot::tasks::{inject_quorum_proposal_polls, task_state::CreateTaskState}; + +use hotshot_example_types::state_types::TestInstanceState; +use std::sync::Arc; + use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes}, state_types::TestValidatedState, @@ -101,7 +105,7 @@ async fn test_quorum_proposal_task_quorum_proposal() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), ], outputs: vec![quorum_proposal_send()], @@ -162,7 +166,7 @@ async fn test_quorum_proposal_task_qc_timeout() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), ], outputs: vec![quorum_proposal_send()], @@ -228,7 +232,7 @@ async fn test_quorum_proposal_task_view_sync() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), ], outputs: vec![quorum_proposal_send()], @@ -282,7 +286,7 @@ async fn test_quorum_proposal_task_propose_now() { commitment: payload_commitment, builder_commitment: builder_commitment.clone(), metadata: TestMetadata, - fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + fee: null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), block_view: ViewNumber::new(2), }, secondary_proposal_information: @@ -298,7 +302,7 @@ async fn test_quorum_proposal_task_propose_now() { commitment: payload_commitment, builder_commitment: builder_commitment.clone(), metadata: TestMetadata, - fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + fee: null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), block_view: ViewNumber::new(2), }, secondary_proposal_information: @@ -324,7 +328,7 @@ async fn test_quorum_proposal_task_propose_now() { commitment: payload_commitment, builder_commitment, metadata: TestMetadata, - fee: null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + fee: null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), block_view: ViewNumber::new(2), }, secondary_proposal_information: diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index fcb242edc8..0ed62a373b 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -1,4 +1,7 @@ use std::time::Duration; +use hotshot_example_types::state_types::TestInstanceState; +use std::sync::Arc; + use hotshot::{ tasks::{inject_consensus_polls, task_state::CreateTaskState}, @@ -254,7 +257,7 @@ async fn test_upgrade_and_consensus_task() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), QCFormed(either::Either::Left(proposals[1].data.justify_qc.clone())), ], @@ -443,7 +446,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[1].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), ], vec![ @@ -454,7 +457,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), QuorumProposalRecv(proposals[2].clone(), leaders[2]), ], @@ -466,7 +469,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[3].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), QuorumProposalRecv(proposals[3].clone(), leaders[3]), ], @@ -478,7 +481,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[4].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(5), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), QuorumProposalRecv(proposals[4].clone(), leaders[4]), ], @@ -489,7 +492,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[5].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(6), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), QCFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), ], @@ -501,7 +504,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[6].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(7), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), QuorumProposalRecv(proposals[6].clone(), leaders[6]), ], diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index a95c4bf7f4..9730b7a459 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -1,5 +1,5 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; - +use hotshot_example_types::state_types::TestInstanceState; use hotshot::types::SignatureKey; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, @@ -36,7 +36,7 @@ async fn test_vid_task() { let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); let transactions = vec![TestTransaction(vec![0])]; - let (payload, metadata) = TestBlockPayload::from_transactions(transactions.clone()).unwrap(); + let (payload, metadata) = TestBlockPayload::from_transactions(transactions.clone(), Arc::new(TestInstanceState {})).unwrap(); let builder_commitment = payload.builder_commitment(&metadata); let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); @@ -86,7 +86,7 @@ async fn test_vid_task() { encoded_transactions, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), )); input.push(HotShotEvent::BlockReady( vid_disperse.clone(), @@ -108,7 +108,7 @@ async fn test_vid_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), ), 1, ); diff --git a/types/src/data.rs b/types/src/data.rs index 93dff8b203..0777906923 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -701,6 +701,8 @@ impl Leaf { pub mod null_block { #![allow(missing_docs)] + use std::sync::Arc; + use jf_primitives::vid::VidScheme; use memoize::memoize; @@ -731,7 +733,10 @@ pub mod null_block { /// Builder fee data for a null block payload #[must_use] - pub fn builder_fee(num_storage_nodes: usize) -> Option> { + pub fn builder_fee( + num_storage_nodes: usize, + state: Arc, + ) -> Option> { /// Arbitrary fee amount, this block doesn't actually come from a builder const FEE_AMOUNT: u64 = 0; @@ -741,7 +746,7 @@ pub mod null_block { ); let (_null_block, null_block_metadata) = - ::from_transactions([]).ok()?; + ::from_transactions([], state).ok()?; match TYPES::BuilderSignatureKey::sign_fee( &priv_key, diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 6f692e05f2..84e57f0ac7 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -18,7 +18,7 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; use super::signature_key::BuilderSignatureKey; use crate::{ data::Leaf, - traits::{node_implementation::NodeType, ValidatedState}, + traits::{node_implementation::NodeType, states::InstanceState, ValidatedState}, utils::BuilderCommitment, vid::{vid_scheme, VidCommitment, VidSchemeType}, }; @@ -51,7 +51,6 @@ pub trait BlockPayload: /// The type of the transitions we are applying type Transaction: Transaction; - /// Data created during block building which feeds into the block header type Metadata: Clone + Debug @@ -69,6 +68,7 @@ pub trait BlockPayload: /// If the transaction length conversion fails. fn from_transactions( transactions: impl IntoIterator, + state: Arc, ) -> Result<(Self, Self::Metadata), Self::Error>; /// Build a payload with the encoded transaction bytes, metadata, From 0a5bdbd33a3c5700b9dcc8f0d3015117826bf447 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 26 Apr 2024 16:24:17 -0400 Subject: [PATCH 0991/1393] add start_threshold for orchestrator (#3054) --- orchestrator/run-config.toml | 4 ++++ orchestrator/src/config.rs | 5 +++++ orchestrator/src/lib.rs | 6 +++++- testing/src/test_builder.rs | 1 + types/src/lib.rs | 3 +++ 5 files changed, 18 insertions(+), 1 deletion(-) diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 946e4319aa..e543d1ef0d 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -42,6 +42,10 @@ cdn_marshal_address = "127.0.0.1:9000" [config] num_nodes_with_stake = 10 num_nodes_without_stake = 0 +start_threshold = [ + 8, + 10, +] staked_committee_nodes = 10 non_staked_committee_nodes = 0 fixed_leader_for_gpuvid = 0 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index e23a2d8856..a7e44f2892 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -566,6 +566,9 @@ fn default_builder_url() -> Url { #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] #[serde(bound(deserialize = ""))] pub struct HotShotConfigFile { + /// The proportion of nodes required before the orchestrator issues the ready signal, + /// expressed as (numerator, denominator) + pub start_threshold: (u64, u64), /// Total number of staked nodes in the network pub num_nodes_with_stake: NonZeroUsize, /// Total number of non-staked nodes in the network @@ -667,6 +670,7 @@ impl From> for HotS fn from(val: HotShotConfigFile) -> Self { HotShotConfig { execution_type: ExecutionType::Continuous, + start_threshold: val.start_threshold, num_nodes_with_stake: val.num_nodes_with_stake, num_nodes_without_stake: val.num_nodes_without_stake, known_da_nodes: val.known_da_nodes, @@ -737,6 +741,7 @@ impl Default for HotShotConfigFile { Self { num_nodes_with_stake: NonZeroUsize::new(10).unwrap(), + start_threshold: (8, 10), num_nodes_without_stake: 0, my_own_validator_config: ValidatorConfig::default(), known_nodes_with_stake: gen_known_nodes_with_stake, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 3535f24674..d7250320ef 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -334,7 +334,11 @@ where fn post_ready(&mut self) -> Result<(), ServerError> { self.nodes_connected += 1; println!("Nodes connected: {}", self.nodes_connected); - if self.nodes_connected >= (self.config.config.num_nodes_with_stake.get() as u64) { + // i.e. nodes_connected >= num_nodes_with_stake * (start_threshold.0 / start_threshold.1) + if self.nodes_connected * self.config.config.start_threshold.1 + >= (self.config.config.num_nodes_with_stake.get() as u64) + * self.config.config.start_threshold.0 + { self.start = true; } Ok(()) diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index d80f0f85c0..8421de7f8d 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -286,6 +286,7 @@ impl TestDescription { let config = HotShotConfig { // TODO this doesn't exist anymore execution_type: ExecutionType::Incremental, + start_threshold: (1, 1), num_nodes_with_stake: NonZeroUsize::new(num_nodes_with_stake).unwrap(), // Currently making this zero for simplicity known_da_nodes, diff --git a/types/src/lib.rs b/types/src/lib.rs index 12e080b1ff..e9ca937f5d 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -160,6 +160,9 @@ impl Default for PeerConfig { pub struct HotShotConfig { /// Whether to run one view or continuous views pub execution_type: ExecutionType, + /// The proportion of nodes required before the orchestrator issues the ready signal, + /// expressed as (numerator, denominator) + pub start_threshold: (u64, u64), /// Total number of nodes in the network // Earlier it was total_nodes pub num_nodes_with_stake: NonZeroUsize, From 58a8c2e582589cfb455401e815d345b5f25c3752 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 26 Apr 2024 21:47:36 -0400 Subject: [PATCH 0992/1393] Fix conditional DA: remove node indexing (#3045) * fix conditional DA: remove node indexing * fix genesis DAC by using DA membership in tests * PR comments * PR comment --- example-types/src/node_types.rs | 3 +- examples/infra/mod.rs | 138 ++++++++---------- examples/push-cdn/whitelist-adapter.rs | 6 +- hotshot/src/lib.rs | 6 +- .../src/traits/election/static_committee.rs | 104 +++++-------- .../src/traits/networking/libp2p_network.rs | 2 +- orchestrator/src/client.rs | 22 +-- orchestrator/src/config.rs | 39 ++--- orchestrator/src/lib.rs | 54 +++---- testing/src/spinning_task.rs | 2 +- testing/src/task_helpers.rs | 25 +--- testing/src/test_builder.rs | 18 +-- testing/src/test_launcher.rs | 4 +- testing/src/test_runner.rs | 26 +--- testing/src/view_generator.rs | 24 ++- testing/tests/tests_1/consensus_task.rs | 85 +++++------ testing/tests/tests_1/da_task.rs | 8 +- testing/tests/tests_1/network_task.rs | 20 +-- testing/tests/tests_1/proposal_ordering.rs | 4 +- testing/tests/tests_1/quorum_proposal_task.rs | 20 ++- testing/tests/tests_1/quorum_vote_task.rs | 12 +- testing/tests/tests_1/upgrade_task.rs | 12 +- testing/tests/tests_3/memory_network.rs | 3 +- types/src/lib.rs | 12 +- types/src/traits/election.rs | 22 +-- types/src/traits/node_implementation.rs | 22 --- 26 files changed, 280 insertions(+), 413 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 0f45946c6f..ca0c29e20c 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -1,5 +1,5 @@ use hotshot::traits::{ - election::static_committee::{GeneralStaticCommittee, StaticCommittee, StaticElectionConfig}, + election::static_committee::{GeneralStaticCommittee, StaticCommittee}, implementations::{ CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork, WebServerNetwork, }, @@ -42,7 +42,6 @@ impl NodeType for TestTypes { type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; type Transaction = TestTransaction; - type ElectionConfigType = StaticElectionConfig; type ValidatedState = TestValidatedState; type InstanceState = TestInstanceState; type Membership = GeneralStaticCommittee; diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 0d9bac23f8..29ef95abc4 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -70,7 +70,7 @@ pub struct OrchestratorArgs { /// The url the orchestrator runs on; this should be in the form of `http://localhost:5555` or `http://0.0.0.0:5555` pub url: Url, /// The configuration file to be used for this run - pub config: NetworkConfig, + pub config: NetworkConfig, } #[derive(Parser, Debug, Clone)] @@ -96,10 +96,8 @@ impl Default for ConfigArgs { /// # Panics /// If unable to read the config file from the command line #[allow(clippy::too_many_lines)] -pub fn read_orchestrator_init_config() -> ( - NetworkConfig, - Url, -) { +pub fn read_orchestrator_init_config() -> (NetworkConfig, Url) +{ // assign default setting let mut orchestrator_url = Url::parse("http://localhost:4444").unwrap(); let mut args = ConfigArgs::default(); @@ -211,7 +209,7 @@ pub fn read_orchestrator_init_config() -> ( } else { error!("No config file provided, we'll use the default one."); } - let mut config: NetworkConfig = + let mut config: NetworkConfig = load_config_from_file::(&args.config_file); if let Some(total_nodes_string) = matches.get_one::("total_nodes") { @@ -282,15 +280,14 @@ pub fn read_orchestrator_init_config() -> ( #[must_use] pub fn load_config_from_file( config_file: &str, -) -> NetworkConfig { +) -> NetworkConfig { let config_file_as_string: String = fs::read_to_string(config_file) .unwrap_or_else(|_| panic!("Could not read config file located at {config_file}")); let config_toml: NetworkConfigFile = toml::from_str::>(&config_file_as_string) .expect("Unable to convert config file to TOML"); - let mut config: NetworkConfig = - config_toml.into(); + let mut config: NetworkConfig = config_toml.into(); // my_own_validator_config would be best to load from file, // but its type is too complex to load so we'll generate it from seed now. @@ -314,11 +311,7 @@ pub async fn run_orchestrator< OrchestratorArgs { url, config }: OrchestratorArgs, ) { println!("Starting orchestrator",); - let _result = hotshot_orchestrator::run_orchestrator::< - TYPES::SignatureKey, - TYPES::ElectionConfigType, - >(config, url) - .await; + let _ = hotshot_orchestrator::run_orchestrator::(config, url).await; } /// Helper function to calculate the nuymber of transactions to send per node per round @@ -339,7 +332,7 @@ fn calculate_num_tx_per_round( /// # Panics /// Panics if the web server config doesn't exist in `config` fn webserver_network_from_config( - config: NetworkConfig, + config: NetworkConfig, pub_key: TYPES::SignatureKey, ) -> WebServerNetwork { // Get the configuration for the web server @@ -372,7 +365,7 @@ pub trait RunDA< { /// Initializes networking, returns self async fn initialize_networking( - config: NetworkConfig, + config: NetworkConfig, libp2p_advertise_address: Option, ) -> Self; @@ -394,46 +387,32 @@ pub trait RunDA< let da_network = self.get_da_channel(); let quorum_network = self.get_quorum_channel(); - // Since we do not currently pass the election config type in the NetworkConfig, this will always be the default election config - let quorum_election_config = config.config.election_config.clone().unwrap_or_else(|| { - TYPES::Membership::default_election_config( - config.config.num_nodes_with_stake.get() as u64, - config.config.num_nodes_without_stake as u64, - ) - }); - - let committee_election_config = TYPES::Membership::default_election_config( - // Use the number of _actual_ DA nodes connected for the committee size - config.config.known_da_nodes.len() as u64, - config.config.num_nodes_without_stake as u64, - ); let networks_bundle = Networks { quorum_network: quorum_network.clone().into(), da_network: da_network.clone().into(), _pd: PhantomData, }; + // Create the quorum membership from all nodes + let quorum_membership = ::Membership::create_election( + known_nodes_with_stake.clone(), + known_nodes_with_stake.clone(), + config.config.fixed_leader_for_gpuvid, + ); + + // Create the quorum membership from all nodes, specifying the committee + // as the known da nodes + let da_membership = ::Membership::create_election( + known_nodes_with_stake.clone(), + known_nodes_with_stake, + config.config.fixed_leader_for_gpuvid, + ); + let memberships = Memberships { - quorum_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - quorum_election_config.clone(), - config.config.fixed_leader_for_gpuvid, - ), - da_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - committee_election_config, - config.config.fixed_leader_for_gpuvid, - ), - vid_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - quorum_election_config.clone(), - config.config.fixed_leader_for_gpuvid, - ), - view_sync_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - quorum_election_config, - config.config.fixed_leader_for_gpuvid, - ), + quorum_membership: quorum_membership.clone(), + da_membership, + vid_membership: quorum_membership.clone(), + view_sync_membership: quorum_membership, }; SystemContext::init( @@ -620,7 +599,7 @@ pub trait RunDA< fn get_quorum_channel(&self) -> QUORUMNET; /// Returns the config for this run - fn get_config(&self) -> NetworkConfig; + fn get_config(&self) -> NetworkConfig; } // WEB SERVER @@ -628,7 +607,7 @@ pub trait RunDA< /// Represents a web server-based run pub struct WebServerDARun { /// the network configuration - config: NetworkConfig, + config: NetworkConfig, /// quorum channel quorum_channel: WebServerNetwork, /// data availability channel @@ -665,7 +644,7 @@ where NetworkVersion: 'static, { async fn initialize_networking( - config: NetworkConfig, + config: NetworkConfig, _libp2p_advertise_address: Option, ) -> WebServerDARun { // Get our own key @@ -701,7 +680,7 @@ where self.quorum_channel.clone() } - fn get_config(&self) -> NetworkConfig { + fn get_config(&self) -> NetworkConfig { self.config.clone() } } @@ -711,7 +690,7 @@ where /// Represents a Push CDN-based run pub struct PushCdnDaRun { /// The underlying configuration - config: NetworkConfig, + config: NetworkConfig, /// The quorum channel quorum_channel: PushCdnNetwork, /// The DA channel @@ -740,7 +719,7 @@ where Self: Sync, { async fn initialize_networking( - config: NetworkConfig, + config: NetworkConfig, _libp2p_advertise_address: Option, ) -> PushCdnDaRun { // Get our own key @@ -787,7 +766,7 @@ where self.quorum_channel.clone() } - fn get_config(&self) -> NetworkConfig { + fn get_config(&self) -> NetworkConfig { self.config.clone() } } @@ -797,7 +776,7 @@ where /// Represents a libp2p-based run pub struct Libp2pDARun { /// the network configuration - config: NetworkConfig, + config: NetworkConfig, /// quorum channel quorum_channel: Libp2pNetwork, TYPES::SignatureKey>, /// data availability channel @@ -832,7 +811,7 @@ where Self: Sync, { async fn initialize_networking( - config: NetworkConfig, + config: NetworkConfig, libp2p_advertise_address: Option, ) -> Libp2pDARun { // Extrapolate keys for ease of use @@ -885,7 +864,7 @@ where self.quorum_channel.clone() } - fn get_config(&self) -> NetworkConfig { + fn get_config(&self) -> NetworkConfig { self.config.clone() } } @@ -895,7 +874,7 @@ where /// Represents a combined-network-based run pub struct CombinedDARun { /// the network configuration - config: NetworkConfig, + config: NetworkConfig, /// quorum channel quorum_channel: CombinedNetworks, /// data availability channel @@ -924,7 +903,7 @@ where Self: Sync, { async fn initialize_networking( - config: NetworkConfig, + config: NetworkConfig, libp2p_advertise_address: Option, ) -> CombinedDARun { // Initialize our Libp2p network @@ -981,7 +960,7 @@ where self.quorum_channel.clone() } - fn get_config(&self) -> NetworkConfig { + fn get_config(&self) -> NetworkConfig { self.config.clone() } } @@ -1020,11 +999,13 @@ pub async fn main_entry_point< let orchestrator_client: OrchestratorClient = OrchestratorClient::new(args.clone()); // We assume one node will not call this twice to generate two validator_config-s with same identity. - let my_own_validator_config = NetworkConfig::::generate_init_validator_config( - &orchestrator_client, - // This is false for now, we only use it to generate the keypair - false - ).await; + let my_own_validator_config = + NetworkConfig::::generate_init_validator_config( + &orchestrator_client, + // This is false for now, we only use it to generate the keypair + false, + ) + .await; // Derives our Libp2p private key from our private key, and then returns the public key of that key let libp2p_public_key = @@ -1037,18 +1018,17 @@ pub async fn main_entry_point< // It returns the complete config which also includes peer's public key and public config. // This function will be taken solely by sequencer right after OrchestratorClient::new, // which means the previous `generate_validator_config_when_init` will not be taken by sequencer, it's only for key pair generation for testing in hotshot. - let (mut run_config, source) = - NetworkConfig::::get_complete_config( - &orchestrator_client, - args.clone().network_config_file, - my_own_validator_config, - args.advertise_address, - Some(libp2p_public_key), - // If `indexed_da` is true: use the node index to determine if we are a DA node. - true, - ) - .await - .expect("failed to get config"); + let (mut run_config, source) = NetworkConfig::::get_complete_config( + &orchestrator_client, + args.clone().network_config_file, + my_own_validator_config, + args.advertise_address, + Some(libp2p_public_key), + // If `indexed_da` is true: use the node index to determine if we are a DA node. + true, + ) + .await + .expect("failed to get config"); let builder_task = match run_config.builder { BuilderType::External => None, diff --git a/examples/push-cdn/whitelist-adapter.rs b/examples/push-cdn/whitelist-adapter.rs index 14ffa8dc91..9666e459f7 100644 --- a/examples/push-cdn/whitelist-adapter.rs +++ b/examples/push-cdn/whitelist-adapter.rs @@ -52,10 +52,8 @@ async fn main() -> Result<()> { // Attempt to get the config from the orchestrator. // Loops internally until the config is received. - let config: NetworkConfig< - ::SignatureKey, - ::ElectionConfigType, - > = orchestrator_client.get_config_after_collection().await; + let config: NetworkConfig<::SignatureKey> = + orchestrator_client.get_config_after_collection().await; tracing::info!("Received config from orchestrator"); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 86919099ca..28809ad528 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -121,7 +121,7 @@ pub struct SystemContext> { private_key: ::PrivateKey, /// Configuration items for this hotshot instance - pub config: HotShotConfig, + pub config: HotShotConfig, /// Networks used by the instance of hotshot pub networks: Arc>, @@ -205,7 +205,7 @@ impl> SystemContext { public_key: TYPES::SignatureKey, private_key: ::PrivateKey, nonce: u64, - config: HotShotConfig, + config: HotShotConfig, memberships: Memberships, networks: Networks, initializer: HotShotInitializer, @@ -483,7 +483,7 @@ impl> SystemContext { public_key: TYPES::SignatureKey, private_key: ::PrivateKey, node_id: u64, - config: HotShotConfig, + config: HotShotConfig, memberships: Memberships, networks: Networks, initializer: HotShotInitializer, diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index d2c01455c8..1aa4d967ca 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,20 +1,14 @@ -// use ark_bls12_381::Parameters as Param381; -use std::{marker::PhantomData, num::NonZeroU64}; - use ethereum_types::U256; +// use ark_bls12_381::Parameters as Param381; +use hotshot_types::traits::signature_key::StakeTableEntryType; use hotshot_types::{ signature_key::BLSPubKey, - traits::{ - election::{ElectionConfig, Membership}, - node_implementation::NodeType, - signature_key::{SignatureKey, StakeTableEntryType}, - }, + traits::{election::Membership, node_implementation::NodeType, signature_key::SignatureKey}, PeerConfig, }; #[cfg(feature = "randomized-leader-election")] use rand::{rngs::StdRng, Rng}; -#[allow(deprecated)] -use serde::{Deserialize, Serialize}; +use std::{marker::PhantomData, num::NonZeroU64}; use tracing::debug; /// Dummy implementation of [`Membership`] @@ -22,7 +16,7 @@ use tracing::debug; #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct GeneralStaticCommittee { /// All the nodes participating and their stake - nodes_with_stake: Vec, + all_nodes_with_stake: Vec, /// The nodes on the static committee and their stake committee_nodes_with_stake: Vec, /// builder nodes @@ -46,7 +40,7 @@ impl GeneralStaticCommittee { fixed_leader_for_gpuvid: usize, ) -> Self { Self { - nodes_with_stake: nodes_with_stake.clone(), + all_nodes_with_stake: nodes_with_stake.clone(), committee_nodes_with_stake: nodes_with_stake, committee_nodes_without_stake: nodes_without_stake, fixed_leader_for_gpuvid, @@ -55,21 +49,10 @@ impl GeneralStaticCommittee { } } -/// configuration for static committee. stub for now -#[derive(Default, Clone, Serialize, Deserialize, core::fmt::Debug)] -pub struct StaticElectionConfig { - /// Number of nodes on the committee - num_nodes_with_stake: u64, - /// Number of non staking nodes - num_nodes_without_stake: u64, -} - -impl ElectionConfig for StaticElectionConfig {} - impl Membership for GeneralStaticCommittee where - TYPES: NodeType, + TYPES: NodeType, { /// Clone the public key and corresponding stake table for current elected committee fn get_committee_qc_stake_table(&self) -> Vec { @@ -82,8 +65,8 @@ where )))] /// Index the vector of public keys with the current view number fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { - let index = usize::try_from(*view_number % self.nodes_with_stake.len() as u64).unwrap(); - let res = self.nodes_with_stake[index].clone(); + let index = usize::try_from(*view_number % self.all_nodes_with_stake.len() as u64).unwrap(); + let res = self.all_nodes_with_stake[index].clone(); TYPES::SignatureKey::get_public_key(&res) } @@ -123,44 +106,41 @@ where } } - fn default_election_config( - num_nodes_with_stake: u64, - num_nodes_without_stake: u64, - ) -> TYPES::ElectionConfigType { - StaticElectionConfig { - num_nodes_with_stake, - num_nodes_without_stake, - } - } - fn create_election( - entries: Vec>, - config: TYPES::ElectionConfigType, + mut all_nodes: Vec>, + committee_members: Vec>, fixed_leader_for_gpuvid: usize, ) -> Self { - let nodes_with_stake: Vec = entries - .iter() - .map(|x| x.stake_table_entry.clone()) - .collect(); + let mut committee_nodes_with_stake = Vec::new(); + let mut committee_nodes_without_stake = Vec::new(); - let mut committee_nodes_with_stake: Vec = Vec::new(); - - let mut committee_nodes_without_stake: Vec = Vec::new(); - // filter out the committee nodes with non-zero state and zero stake - for node in &nodes_with_stake { - if node.get_stake() == U256::from(0) { - committee_nodes_without_stake.push(PUBKEY::get_public_key(node)); + // Iterate over committee members + for entry in committee_members + .iter() + .map(|entry| entry.stake_table_entry.clone()) + { + if entry.get_stake() > U256::from(0) { + // Positive stake + committee_nodes_with_stake.push(entry); } else { - committee_nodes_with_stake.push(node.clone()); + // Zero stake + committee_nodes_without_stake.push(PUBKEY::get_public_key(&entry)); } } - debug!("Election Membership Size: {}", config.num_nodes_with_stake); - // truncate committee_nodes_with_stake to only `num_nodes` with lower index - // since the `num_nodes_without_stake` are not part of the committee, - committee_nodes_with_stake.truncate(config.num_nodes_with_stake.try_into().unwrap()); - committee_nodes_without_stake.truncate(config.num_nodes_without_stake.try_into().unwrap()); + + // Retain all nodes with stake + all_nodes.retain(|entry| entry.stake_table_entry.get_stake() > U256::from(0)); + + debug!( + "Election Membership Size: {}", + committee_nodes_with_stake.len() + ); + Self { - nodes_with_stake, + all_nodes_with_stake: all_nodes + .into_iter() + .map(|entry| entry.stake_table_entry) + .collect(), committee_nodes_with_stake, committee_nodes_without_stake, fixed_leader_for_gpuvid, @@ -188,13 +168,9 @@ where &self, _view_number: ::Time, ) -> std::collections::BTreeSet<::SignatureKey> { - // Transfer from committee_nodes_with_stake to pure committee_nodes - (0..self.committee_nodes_with_stake.len()) - .map(|node_id| { - ::SignatureKey::get_public_key( - &self.committee_nodes_with_stake[node_id], - ) - }) + self.committee_nodes_with_stake + .iter() + .map(|node| ::SignatureKey::get_public_key(node)) .collect() } @@ -217,7 +193,7 @@ where impl GeneralStaticCommittee where - TYPES: NodeType, + TYPES: NodeType, { #[allow(clippy::must_use_candidate)] /// get the non-staked builder nodes diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 1dc082c76c..3895200a7a 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -345,7 +345,7 @@ impl Libp2pNetwork { /// # Panics /// If we are unable to calculate the replication factor pub async fn from_config( - mut config: NetworkConfig, + mut config: NetworkConfig, bind_address: SocketAddr, pub_key: &K, priv_key: &K::PrivateKey, diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index c4c732d39c..988cb22a27 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -3,11 +3,7 @@ use std::{net::SocketAddr, time::Duration}; use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; -use hotshot_types::{ - constants::Version01, - traits::{election::ElectionConfig, signature_key::SignatureKey}, - PeerConfig, -}; +use hotshot_types::{constants::Version01, traits::signature_key::SignatureKey, PeerConfig}; use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; @@ -84,8 +80,6 @@ pub struct BenchResultsDownloadConfig { pub transaction_size: u64, /// The number of rounds pub rounds: usize, - /// The type of leader election used - pub leader_election_type: String, // Results starting here /// The average latency of the transactions @@ -196,11 +190,11 @@ impl OrchestratorClient { /// # Errors /// If we were unable to serialize the Libp2p data #[allow(clippy::type_complexity)] - pub async fn get_config_without_peer( + pub async fn get_config_without_peer( &self, libp2p_address: Option, libp2p_public_key: Option, - ) -> anyhow::Result> { + ) -> anyhow::Result> { // Get the (possible) Libp2p advertise address from our args let libp2p_address = libp2p_address.map(|f| { Multiaddr::try_from(format!( @@ -236,7 +230,7 @@ impl OrchestratorClient { // get the corresponding config let f = |client: Client| { async move { - let config: Result, ClientError> = client + let config: Result, ClientError> = client .post(&format!("api/config/{node_index}")) .send() .await; @@ -277,9 +271,7 @@ impl OrchestratorClient { /// /// Does not fail, retries internally until success. #[instrument(skip_all, name = "orchestrator config")] - pub async fn get_config_after_collection( - &self, - ) -> NetworkConfig { + pub async fn get_config_after_collection(&self) -> NetworkConfig { // Define the request for post-register configurations let get_config_after_collection = |client: Client| { async move { @@ -308,12 +300,12 @@ impl OrchestratorClient { /// # Panics /// if unable to post #[instrument(skip(self), name = "orchestrator public keys")] - pub async fn post_and_wait_all_public_keys( + pub async fn post_and_wait_all_public_keys( &self, node_index: u64, my_pub_key: PeerConfig, is_da: bool, - ) -> NetworkConfig { + ) -> NetworkConfig { // send my public key let _send_pubkey_ready_f: Result<(), ClientError> = self .client diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index a7e44f2892..0856b09716 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,5 +1,4 @@ use std::{ - collections::HashSet, env, fs, net::SocketAddr, num::NonZeroUsize, @@ -11,8 +10,7 @@ use std::{ use clap::ValueEnum; use hotshot_types::{ - traits::{election::ElectionConfig, signature_key::SignatureKey}, - ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, + traits::signature_key::SignatureKey, ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, }; use libp2p::{Multiaddr, PeerId}; use serde_inline_default::serde_inline_default; @@ -156,7 +154,7 @@ impl Default for RandomBuilderConfig { /// a network configuration #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] #[serde(bound(deserialize = ""))] -pub struct NetworkConfig { +pub struct NetworkConfig { /// number of views to run pub rounds: usize, /// number of transactions per view @@ -181,12 +179,10 @@ pub struct NetworkConfig { pub start_delay_seconds: u64, /// name of the key type (for debugging) pub key_type_name: String, - /// election config type (for debugging) - pub election_config_type_name: String, /// the libp2p config pub libp2p_config: Option, /// the hotshot config - pub config: HotShotConfig, + pub config: HotShotConfig, /// the webserver config pub web_server_config: Option, /// the data availability web server config @@ -211,7 +207,7 @@ pub enum NetworkConfigSource { File, } -impl NetworkConfig { +impl NetworkConfig { /// Asynchronously retrieves a `NetworkConfig` either from a file or from an orchestrator. /// /// This function takes an `OrchestratorClient`, an optional file path, and Libp2p-specific parameters. @@ -242,7 +238,7 @@ impl NetworkConfig { file: Option, libp2p_address: Option, libp2p_public_key: Option, - ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { + ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { if let Some(file) = file { info!("Retrieving config from the file"); // if we pass in file, try there first @@ -301,7 +297,7 @@ impl NetworkConfig { libp2p_public_key: Option, // If true, we will use the node index to determine if we are a DA node indexed_da: bool, - ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { + ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { let (mut run_config, source) = Self::from_file_or_orchestrator(client, file, libp2p_address, libp2p_public_key) .await?; @@ -324,8 +320,8 @@ impl NetworkConfig { } // one more round of orchestrator here to get peer's public key/config - let updated_config: NetworkConfig = client - .post_and_wait_all_public_keys::( + let updated_config: NetworkConfig = client + .post_and_wait_all_public_keys::( run_config.node_index, run_config .config @@ -434,7 +430,7 @@ impl NetworkConfig { } } -impl Default for NetworkConfig { +impl Default for NetworkConfig { fn default() -> Self { Self { rounds: ORCHESTRATOR_DEFAULT_NUM_ROUNDS, @@ -446,7 +442,6 @@ impl Default for NetworkConfig { config: HotShotConfigFile::default().into(), start_delay_seconds: 60, key_type_name: std::any::type_name::().to_string(), - election_config_type_name: std::any::type_name::().to_string(), web_server_config: None, da_web_server_config: None, cdn_marshal_address: None, @@ -512,7 +507,7 @@ pub struct NetworkConfigFile { pub random_builder: Option, } -impl From> for NetworkConfig { +impl From> for NetworkConfig { fn from(val: NetworkConfigFile) -> Self { NetworkConfig { rounds: val.rounds, @@ -544,7 +539,6 @@ impl From> for NetworkC }), config: val.config.into(), key_type_name: std::any::type_name::().to_string(), - election_config_type_name: std::any::type_name::().to_string(), start_delay_seconds: val.start_delay_seconds, cdn_marshal_address: val.cdn_marshal_address, web_server_config: val.web_server_config, @@ -581,7 +575,7 @@ pub struct HotShotConfigFile { pub known_nodes_with_stake: Vec>, #[serde(skip)] /// The known DA nodes' public key and stake values - pub known_da_nodes: HashSet>, + pub known_da_nodes: Vec>, #[serde(skip)] /// The known non-staking nodes' pub known_nodes_without_stake: Vec, @@ -666,7 +660,7 @@ impl ValidatorConfigFile { } } -impl From> for HotShotConfig { +impl From> for HotShotConfig { fn from(val: HotShotConfigFile) -> Self { HotShotConfig { execution_type: ExecutionType::Continuous, @@ -688,7 +682,6 @@ impl From> for HotS num_bootstrap: val.num_bootstrap, builder_timeout: val.builder_timeout, data_request_delay: val.data_request_delay, - election_config: None, builder_url: val.builder_url, } } @@ -708,9 +701,9 @@ impl From for ValidatorConfig { ValidatorConfig::generated_from_seed_indexed(val.seed, val.node_id, 1, val.is_da) } } -impl From for HotShotConfig { +impl From for HotShotConfig { fn from(value: ValidatorConfigFile) -> Self { - let mut config: HotShotConfig = HotShotConfigFile::default().into(); + let mut config: HotShotConfig = HotShotConfigFile::default().into(); config.my_own_validator_config = value.into(); config } @@ -722,7 +715,7 @@ impl Default for HotShotConfigFile { let staked_committee_nodes: usize = 5; // Aggregate the DA nodes - let mut known_da_nodes = HashSet::new(); + let mut known_da_nodes = Vec::new(); let gen_known_nodes_with_stake = (0..10) .map(|node_id| { @@ -731,7 +724,7 @@ impl Default for HotShotConfigFile { // Add to DA nodes based on index if node_id < staked_committee_nodes as u64 { - known_da_nodes.insert(cur_validator_config.get_public_config()); + known_da_nodes.push(cur_validator_config.get_public_config()); cur_validator_config.is_da = true; } diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index d7250320ef..69d434947f 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -11,11 +11,7 @@ use async_lock::RwLock; use client::{BenchResults, BenchResultsDownloadConfig}; use csv::Writer; use futures::FutureExt; -use hotshot_types::{ - constants::Version01, - traits::{election::ElectionConfig, signature_key::SignatureKey}, - PeerConfig, -}; +use hotshot_types::{constants::Version01, traits::signature_key::SignatureKey, PeerConfig}; use libp2p::{ identity::{ ed25519::{Keypair as EdKeypair, SecretKey}, @@ -63,13 +59,13 @@ pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { /// The state of the orchestrator #[derive(Default, Clone)] -struct OrchestratorState { +struct OrchestratorState { /// Tracks the latest node index we have generated a configuration for latest_index: u16, /// Tracks the latest temporary index we have generated for init validator's key pair tmp_latest_index: u16, /// The network configuration - config: NetworkConfig, + config: NetworkConfig, /// The total nodes that have posted their public keys nodes_with_pubkey: u64, /// Whether the network configuration has been updated with all the peer's public keys/configs @@ -87,11 +83,9 @@ struct OrchestratorState { nodes_post_results: u64, } -impl - OrchestratorState -{ +impl OrchestratorState { /// create a new [`OrchestratorState`] - pub fn new(network_config: NetworkConfig) -> Self { + pub fn new(network_config: NetworkConfig) -> Self { OrchestratorState { latest_index: 0, tmp_latest_index: 0, @@ -115,7 +109,6 @@ impl transactions_per_round: self.config.transactions_per_round, transaction_size: self.bench_results.transaction_size_in_bytes, rounds: self.config.rounds, - leader_election_type: self.config.election_config_type_name.clone(), avg_latency_in_sec: self.bench_results.avg_latency_in_sec, minimum_latency_in_sec: self.bench_results.minimum_latency_in_sec, maximum_latency_in_sec: self.bench_results.maximum_latency_in_sec, @@ -140,7 +133,7 @@ impl } /// An api exposed by the orchestrator -pub trait OrchestratorApi { +pub trait OrchestratorApi { /// Post an identity to the orchestrator. Takes in optional /// arguments so others can identify us on the Libp2p network. /// # Errors @@ -153,10 +146,7 @@ pub trait OrchestratorApi { /// post endpoint for each node's config /// # Errors /// if unable to serve - fn post_getconfig( - &mut self, - _node_index: u16, - ) -> Result, ServerError>; + fn post_getconfig(&mut self, _node_index: u16) -> Result, ServerError>; /// get endpoint for the next available temporary node index /// # Errors /// if unable to serve @@ -177,7 +167,7 @@ pub trait OrchestratorApi { /// get endpoint for the network config after all peers public keys are collected /// # Errors /// if unable to serve - fn get_config_after_peer_collected(&self) -> Result, ServerError>; + fn get_config_after_peer_collected(&self) -> Result, ServerError>; /// get endpoint for whether or not the run has started /// # Errors /// if unable to serve @@ -192,10 +182,9 @@ pub trait OrchestratorApi { fn post_ready(&mut self) -> Result<(), ServerError>; } -impl OrchestratorApi for OrchestratorState +impl OrchestratorApi for OrchestratorState where KEY: serde::Serialize + Clone + SignatureKey + 'static, - ELECTION: serde::Serialize + Clone + Send + ElectionConfig + 'static, { /// Post an identity to the orchestrator. Takes in optional /// arguments so others can identify us on the Libp2p network. @@ -236,10 +225,7 @@ where // Assumes nodes will set their own index that they received from the // 'identity' endpoint - fn post_getconfig( - &mut self, - _node_index: u16, - ) -> Result, ServerError> { + fn post_getconfig(&mut self, _node_index: u16) -> Result, ServerError> { Ok(self.config.clone()) } @@ -284,7 +270,7 @@ where self.config .config .known_da_nodes - .insert(register_pub_key_with_stake); + .push(register_pub_key_with_stake); }; self.nodes_with_pubkey += 1; @@ -308,7 +294,7 @@ where Ok(self.peer_pub_ready) } - fn get_config_after_peer_collected(&self) -> Result, ServerError> { + fn get_config_after_peer_collected(&self) -> Result, ServerError> { if !self.peer_pub_ready { return Err(ServerError { status: tide_disco::StatusCode::BadRequest, @@ -393,13 +379,12 @@ where } /// Sets up all API routes -fn define_api( +fn define_api( ) -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, - ::State: Send + Sync + OrchestratorApi, + ::State: Send + Sync + OrchestratorApi, KEY: serde::Serialize, - ELECTION: serde::Serialize, VER: 'static, { let api_toml = toml::from_str::(include_str!(concat!( @@ -476,21 +461,16 @@ where /// This errors if tide disco runs into an issue during serving /// # Panics /// This panics if unable to register the api with tide disco -pub async fn run_orchestrator( - network_config: NetworkConfig, - url: Url, -) -> io::Result<()> +pub async fn run_orchestrator(network_config: NetworkConfig, url: Url) -> io::Result<()> where KEY: SignatureKey + 'static + serde::Serialize, - ELECTION: ElectionConfig + 'static + serde::Serialize, { let web_api = define_api().map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api")); - let state: RwLock> = - RwLock::new(OrchestratorState::new(network_config)); + let state: RwLock> = RwLock::new(OrchestratorState::new(network_config)); - let mut app = App::>, ServerError>::with_state(state); + let mut app = App::>, ServerError>::with_state(state); app.register_module::("api", web_api.unwrap()) .expect("Error registering api"); tracing::error!("listening on {:?}", url); diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 3b87d85dad..5ca2754c16 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -67,7 +67,7 @@ impl< N: ConnectedNetwork, TYPES::SignatureKey>, > TestTaskState for SpinningTask where - I: TestableNodeImplementation, + I: TestableNodeImplementation, I: NodeImplementation< TYPES, QuorumNetwork = N, diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 43b1dd642d..02442b5648 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -61,20 +61,6 @@ pub async fn build_system_handle( let _known_nodes_without_stake = config.known_nodes_without_stake.clone(); - let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { - ::Membership::default_election_config( - config.num_nodes_with_stake.get() as u64, - config.num_nodes_without_stake as u64, - ) - }); - - let committee_election_config = config.election_config.clone().unwrap_or_else(|| { - ::Membership::default_election_config( - // Use the _actual_ number of known DA nodes instead of the expected number of DA nodes - config.known_da_nodes.len() as u64, - config.num_nodes_without_stake as u64, - ) - }); let networks_bundle = Networks { quorum_network: networks.0.clone(), da_network: networks.1.clone(), @@ -84,22 +70,22 @@ pub async fn build_system_handle( let memberships = Memberships { quorum_membership: ::Membership::create_election( known_nodes_with_stake.clone(), - quorum_election_config.clone(), + known_nodes_with_stake.clone(), config.fixed_leader_for_gpuvid, ), da_membership: ::Membership::create_election( known_nodes_with_stake.clone(), - committee_election_config, + config.known_da_nodes.clone(), config.fixed_leader_for_gpuvid, ), vid_membership: ::Membership::create_election( known_nodes_with_stake.clone(), - quorum_election_config.clone(), + known_nodes_with_stake.clone(), config.fixed_leader_for_gpuvid, ), view_sync_membership: ::Membership::create_election( known_nodes_with_stake.clone(), - quorum_election_config, + known_nodes_with_stake, config.fixed_leader_for_gpuvid, ), }; @@ -279,6 +265,7 @@ pub fn build_vid_proposal( pub fn build_da_certificate( quorum_membership: &::Membership, + da_membership: &::Membership, view_number: ViewNumber, transactions: Vec, public_key: &::SignatureKey, @@ -295,7 +282,7 @@ pub fn build_da_certificate( build_cert::, DACertificate>( da_data, - quorum_membership, + da_membership, view_number, public_key, private_key, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 8421de7f8d..26ebc85922 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,11 +1,10 @@ -use std::{collections::HashSet, num::NonZeroUsize, sync::Arc, time::Duration}; +use std::{num::NonZeroUsize, sync::Arc, time::Duration}; use hotshot::traits::{NetworkReliability, NodeImplementation, TestableNodeImplementation}; use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; use hotshot_orchestrator::config::ValidatorConfigFile; use hotshot_types::{ - traits::{election::Membership, node_implementation::NodeType}, - ExecutionType, HotShotConfig, ValidatorConfig, + traits::node_implementation::NodeType, ExecutionType, HotShotConfig, ValidatorConfig, }; use tide_disco::Url; @@ -230,7 +229,7 @@ impl TestDescription { #[must_use] pub fn gen_launcher< TYPES: NodeType, - I: TestableNodeImplementation, + I: TestableNodeImplementation, >( self, node_id: u64, @@ -248,7 +247,7 @@ impl TestDescription { .. } = self.clone(); - let mut known_da_nodes = HashSet::new(); + let mut known_da_nodes = Vec::new(); // We assign known_nodes' public key and stake value here rather than read from config file since it's a test. let known_nodes_with_stake = (0..num_nodes_with_stake) @@ -263,7 +262,7 @@ impl TestDescription { // Add the node to the known DA nodes based on the index (for tests) if node_id_ < da_staked_committee_size { - known_da_nodes.insert(cur_validator_config.get_public_config()); + known_da_nodes.push(cur_validator_config.get_public_config()); } cur_validator_config.get_public_config() @@ -305,11 +304,6 @@ impl TestDescription { start_delay: 1, builder_timeout: Duration::from_millis(1000), data_request_delay: Duration::from_millis(200), - // TODO what's the difference between this and the second config? - election_config: Some(TYPES::Membership::default_election_config( - num_nodes_with_stake as u64, - 0, - )), // Placeholder until we spin up the builder builder_url: Url::parse("http://localhost:9999").expect("Valid URL"), }; @@ -325,7 +319,7 @@ impl TestDescription { } = timing_data; let mod_config = // TODO this should really be using the timing config struct - |a: &mut HotShotConfig| { + |a: &mut HotShotConfig| { a.next_view_timeout = next_view_timeout; a.timeout_ratio = timeout_ratio; a.round_start_delay = round_start_delay; diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 8f2ed21cc0..5845350b97 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -29,7 +29,7 @@ pub struct ResourceGenerators>, /// configuration used to generate each hotshot node - pub config: HotShotConfig, + pub config: HotShotConfig, } /// test launcher @@ -58,7 +58,7 @@ impl> TestLauncher), + mut f: impl FnMut(&mut HotShotConfig), ) -> Self { f(&mut self.resource_generator.config); self diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 23d06cad82..85e5025666 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -61,7 +61,7 @@ pub type LateNodeContext = Either< ( >::Storage, Memberships, - HotShotConfig<::SignatureKey, ::ElectionConfigType>, + HotShotConfig<::SignatureKey>, ), >; @@ -119,7 +119,7 @@ impl< N: ConnectedNetwork, TYPES::SignatureKey>, > TestRunner where - I: TestableNodeImplementation, + I: TestableNodeImplementation, I: NodeImplementation< TYPES, QuorumNetwork = N, @@ -335,12 +335,7 @@ where let mut results = vec![]; let config = self.launcher.resource_generator.config.clone(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { - TYPES::Membership::default_election_config( - config.num_nodes_with_stake.get() as u64, - config.num_nodes_without_stake as u64, - ) - }); + let (mut builder_task, builder_url) = B::start(config.num_nodes_with_stake.into(), B::Config::default()).await; for i in 0..total { @@ -349,30 +344,25 @@ where self.next_node_id += 1; tracing::debug!("launch node {}", i); - let committee_election_config = I::committee_election_config_generator(); let memberships = Memberships { quorum_membership: ::Membership::create_election( known_nodes_with_stake.clone(), - quorum_election_config.clone(), + known_nodes_with_stake.clone(), config.fixed_leader_for_gpuvid, ), da_membership: ::Membership::create_election( known_nodes_with_stake.clone(), - committee_election_config( - // Use the _actual_ number of DA nodes instead of expected - config.known_da_nodes.len() as u64, - config.num_nodes_without_stake as u64, - ), + config.known_da_nodes.clone(), config.fixed_leader_for_gpuvid, ), vid_membership: ::Membership::create_election( known_nodes_with_stake.clone(), - quorum_election_config.clone(), + known_nodes_with_stake.clone(), config.fixed_leader_for_gpuvid, ), view_sync_membership: ::Membership::create_election( known_nodes_with_stake.clone(), - quorum_election_config.clone(), + known_nodes_with_stake.clone(), config.fixed_leader_for_gpuvid, ), }; @@ -446,7 +436,7 @@ where networks: Networks, memberships: Memberships, initializer: HotShotInitializer, - config: HotShotConfig, + config: HotShotConfig, validator_config: ValidatorConfig, storage: I::Storage, ) -> Arc> { diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 8a43cb980d..321e5e6ac9 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -37,6 +37,7 @@ pub struct TestView { pub leaf: Leaf, pub view_number: ViewNumber, pub quorum_membership: ::Membership, + pub da_membership: ::Membership, pub vid_proposal: ( Vec>>, ::SignatureKey, @@ -51,7 +52,10 @@ pub struct TestView { } impl TestView { - pub fn genesis(quorum_membership: &::Membership) -> Self { + pub fn genesis( + quorum_membership: &::Membership, + da_membership: &::Membership, + ) -> Self { let genesis_view = ViewNumber::new(1); let transactions = Vec::new(); @@ -78,6 +82,7 @@ impl TestView { let da_certificate = build_da_certificate( quorum_membership, + da_membership, genesis_view, transactions.clone(), &public_key, @@ -136,6 +141,7 @@ impl TestView { leaf, view_number: genesis_view, quorum_membership: quorum_membership.clone(), + da_membership: da_membership.clone(), vid_proposal: (vid_proposal, public_key), da_certificate, transactions, @@ -162,6 +168,8 @@ impl TestView { let next_view = max(old_view, self.view_number) + 1; let quorum_membership = &self.quorum_membership; + let da_membership = &self.da_membership; + let transactions = &self.transactions; let quorum_data = QuorumData { @@ -192,6 +200,7 @@ impl TestView { let da_certificate = build_da_certificate( quorum_membership, + da_membership, next_view, transactions.clone(), &public_key, @@ -326,6 +335,7 @@ impl TestView { leaf, view_number: next_view, quorum_membership: quorum_membership.clone(), + da_membership: self.da_membership.clone(), vid_proposal: (vid_proposal, public_key), da_certificate, leader_public_key, @@ -393,13 +403,18 @@ impl TestView { pub struct TestViewGenerator { pub current_view: Option, pub quorum_membership: ::Membership, + pub da_membership: ::Membership, } impl TestViewGenerator { - pub fn generate(quorum_membership: ::Membership) -> Self { + pub fn generate( + quorum_membership: ::Membership, + da_membership: ::Membership, + ) -> Self { TestViewGenerator { current_view: None, quorum_membership, + da_membership, } } @@ -479,7 +494,10 @@ impl Iterator for TestViewGenerator { if let Some(view) = &self.current_view { self.current_view = Some(TestView::next_view(view)); } else { - self.current_view = Some(TestView::genesis(&self.quorum_membership)); + self.current_view = Some(TestView::genesis( + &self.quorum_membership, + &self.da_membership, + )); } self.current_view.clone() diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 1b37aae268..05646e60dd 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -1,12 +1,9 @@ -use hotshot::{ - tasks::{inject_consensus_polls, task_state::CreateTaskState}, -}; +use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use std::sync::Arc; - -use hotshot_example_types::state_types::TestInstanceState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::state_types::TestInstanceState; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ predicates::event::{ @@ -40,6 +37,7 @@ async fn test_consensus_task() { let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. @@ -48,7 +46,8 @@ async fn test_consensus_task() { let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = + TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -92,7 +91,11 @@ async fn test_consensus_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), ], outputs: vec![ @@ -103,11 +106,7 @@ async fn test_consensus_task() { asserts: vec![], }; - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - >::create_from(&handle) - .await; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; inject_consensus_polls(&consensus_state).await; @@ -131,8 +130,10 @@ async fn test_consensus_vote() { let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = + TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -163,11 +164,7 @@ async fn test_consensus_vote() { asserts: vec![], }; - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - >::create_from(&handle) - .await; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; inject_consensus_polls(&consensus_state).await; run_test_script(vec![view_1], consensus_state).await; @@ -182,8 +179,10 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = + TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -232,11 +231,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { asserts: vec![], }; - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - >::create_from(&handle) - .await; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; inject_consensus_polls(&consensus_state).await; run_test_script(vec![view_1, view_2], consensus_state).await; @@ -270,6 +265,8 @@ async fn test_view_sync_finalize_propose() { let handle = build_system_handle(4).await.0; let (priv_key, pub_key) = key_pair_for_id(4); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(4)); @@ -282,7 +279,8 @@ async fn test_view_sync_finalize_propose() { round: ViewNumber::new(4), }; - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = + TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut votes = Vec::new(); @@ -384,11 +382,7 @@ async fn test_view_sync_finalize_propose() { asserts: vec![], }; - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - >::create_from(&handle) - .await; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; let stages = vec![view_1, view_2_3, view_4]; @@ -407,13 +401,15 @@ async fn test_view_sync_finalize_vote() { let handle = build_system_handle(5).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); let view_sync_finalize_data: ViewSyncFinalizeData = ViewSyncFinalizeData { relay: 4, round: ViewNumber::new(5), }; - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = + TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut votes = Vec::new(); @@ -483,11 +479,7 @@ async fn test_view_sync_finalize_vote() { asserts: vec![], }; - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - >::create_from(&handle) - .await; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; let stages = vec![view_1, view_2, view_3]; @@ -506,13 +498,15 @@ async fn test_view_sync_finalize_vote_fail_view_number() { let handle = build_system_handle(5).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); let view_sync_finalize_data: ViewSyncFinalizeData = ViewSyncFinalizeData { relay: 4, round: ViewNumber::new(10), }; - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = + TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut votes = Vec::new(); @@ -592,11 +586,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { asserts: vec![], }; - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - >::create_from(&handle) - .await; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; let stages = vec![view_1, view_2, view_3]; @@ -616,7 +606,10 @@ async fn test_vid_disperse_storage_failure() { // Set the error flag here for the system handle. This causes it to emit an error on append. handle.get_storage().write().await.should_return_err = true; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let mut generator = + TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -646,11 +639,7 @@ async fn test_vid_disperse_storage_failure() { asserts: vec![], }; - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - >::create_from(&handle) - .await; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; inject_consensus_polls(&consensus_state).await; diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 43dba3ae9d..2aa0a9472f 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -31,6 +31,8 @@ async fn test_da_task() { let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. @@ -41,7 +43,7 @@ async fn test_da_task() { handle.hotshot.memberships.quorum_membership.total_nodes(), ); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -110,6 +112,8 @@ async fn test_da_task_storage_failure() { // Set the error flag here for the system handle. This causes it to emit an error on append. handle.get_storage().write().await.should_return_err = true; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. @@ -120,7 +124,7 @@ async fn test_da_task_storage_failure() { handle.hotshot.memberships.quorum_membership.total_nodes(), ); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 04a7594911..631c6add2e 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -40,16 +40,10 @@ async fn test_network_task() { let config = launcher.resource_generator.config.clone(); let public_key = config.my_own_validator_config.public_key; let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { - ::Membership::default_election_config( - config.num_nodes_with_stake.get() as u64, - config.num_nodes_without_stake as u64, - ) - }); let membership = ::Membership::create_election( known_nodes_with_stake.clone(), - quorum_election_config.clone(), + known_nodes_with_stake, config.fixed_leader_for_gpuvid, ); let channel = networks.0.clone(); @@ -68,7 +62,7 @@ async fn test_network_task() { let task = Task::new(tx.clone(), rx, task_reg.clone(), network_state); task_reg.run_task(task).await; - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(membership.clone(), membership); let view = generator.next().unwrap(); let (out_tx, mut out_rx) = async_broadcast::broadcast(10); @@ -109,16 +103,10 @@ async fn test_network_storage_fail() { let config = launcher.resource_generator.config.clone(); let public_key = config.my_own_validator_config.public_key; let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let quorum_election_config = config.election_config.clone().unwrap_or_else(|| { - ::Membership::default_election_config( - config.num_nodes_with_stake.get() as u64, - config.num_nodes_without_stake as u64, - ) - }); let membership = ::Membership::create_election( known_nodes_with_stake.clone(), - quorum_election_config.clone(), + known_nodes_with_stake, config.fixed_leader_for_gpuvid, ); let channel = networks.0.clone(); @@ -137,7 +125,7 @@ async fn test_network_storage_fail() { let task = Task::new(tx.clone(), rx, task_reg.clone(), network_state); task_reg.run_task(task).await; - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(membership.clone(), membership); let view = generator.next().unwrap(); let (out_tx, mut out_rx) = async_broadcast::broadcast(10); diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index e430362255..a5a53fd0f3 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -38,6 +38,8 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let node_id = 2; let handle = build_system_handle(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(node_id)); @@ -48,7 +50,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index d977c229b2..6ff624b12f 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -52,9 +52,11 @@ async fn test_quorum_proposal_task_quorum_proposal() { // case in the genesis view. let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -131,10 +133,12 @@ async fn test_quorum_proposal_task_qc_timeout() { let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -195,10 +199,12 @@ async fn test_quorum_proposal_task_view_sync() { // case in the genesis view. let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -265,9 +271,11 @@ async fn test_quorum_proposal_task_propose_now() { let handle = build_system_handle(2).await.0; let (private_key, public_key) = key_pair_for_id(2); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -388,8 +396,10 @@ async fn test_quorum_proposal_task_with_incomplete_events() { // case in the genesis view. let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 09f4fbadc6..ee7bfdb5ac 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -21,8 +21,10 @@ async fn test_quorum_vote_task_success() { let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); generator.next(); let view = generator.current_view.clone().unwrap(); @@ -70,8 +72,10 @@ async fn test_quorum_vote_task_vote_now() { let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); generator.next(); let view = generator.current_view.clone().unwrap(); @@ -116,8 +120,10 @@ async fn test_quorum_vote_task_miss_dependency() { let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 0ed62a373b..8f321895d9 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -42,6 +42,8 @@ async fn test_consensus_task_upgrade() { let handle = build_system_handle(1).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + let old_version = Version { major: 0, minor: 1 }; let new_version = Version { major: 0, minor: 2 }; @@ -61,7 +63,7 @@ async fn test_consensus_task_upgrade() { let mut vids = Vec::new(); let mut leaders = Vec::new(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); @@ -176,6 +178,8 @@ async fn test_upgrade_and_consensus_task() { let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + let other_handles = futures::future::join_all((0..=9).map(build_system_handle)).await; @@ -198,7 +202,7 @@ async fn test_upgrade_and_consensus_task() { let mut leaders = Vec::new(); let mut views = Vec::new(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); for view in (&mut generator).take(1) { proposals.push(view.quorum_proposal.clone()); @@ -336,6 +340,8 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { let handle = build_system_handle(6).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + let old_version = Version { major: 0, minor: 1 }; let new_version = Version { major: 0, minor: 2 }; @@ -356,7 +362,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { let mut leaders = Vec::new(); let mut views = Vec::new(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone()); + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); for view in (&mut generator).take(1) { proposals.push(view.quorum_proposal.clone()); diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 15bc954497..cd6e04f2e9 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -3,7 +3,7 @@ use std::collections::BTreeSet; use std::sync::Arc; use async_compatibility_layer::logging::setup_logging; -use hotshot::traits::election::static_committee::{GeneralStaticCommittee, StaticElectionConfig}; +use hotshot::traits::election::static_committee::GeneralStaticCommittee; use hotshot::traits::implementations::{MasterMap, MemoryNetwork, NetworkingMetricsValue}; use hotshot::traits::NodeImplementation; use hotshot::types::SignatureKey; @@ -50,7 +50,6 @@ impl NodeType for Test { type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; type Transaction = TestTransaction; - type ElectionConfigType = StaticElectionConfig; type ValidatedState = TestValidatedState; type InstanceState = TestInstanceState; type Membership = GeneralStaticCommittee; diff --git a/types/src/lib.rs b/types/src/lib.rs index e9ca937f5d..4fe91f8d8f 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -1,13 +1,11 @@ //! Types and Traits for the `HotShot` consensus module -use std::{ - collections::HashSet, fmt::Debug, future::Future, num::NonZeroUsize, pin::Pin, time::Duration, -}; +use std::{fmt::Debug, future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; use bincode::Options; use displaydoc::Display; use light_client::StateVerKey; use tracing::error; -use traits::{election::ElectionConfig, signature_key::SignatureKey}; +use traits::signature_key::SignatureKey; use url::Url; use crate::utils::bincode_opts; @@ -157,7 +155,7 @@ impl Default for PeerConfig { /// Holds configuration for a `HotShot` #[derive(Clone, custom_debug::Debug, serde::Serialize, serde::Deserialize)] #[serde(bound(deserialize = ""))] -pub struct HotShotConfig { +pub struct HotShotConfig { /// Whether to run one view or continuous views pub execution_type: ExecutionType, /// The proportion of nodes required before the orchestrator issues the ready signal, @@ -171,7 +169,7 @@ pub struct HotShotConfig { /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter pub known_nodes_with_stake: Vec>, /// All public keys known to be DA nodes - pub known_da_nodes: HashSet>, + pub known_da_nodes: Vec>, /// List of known non-staking nodes' public keys pub known_nodes_without_stake: Vec, /// My own validator config, including my public key, private key, stake value, serving as private parameter @@ -198,8 +196,6 @@ pub struct HotShotConfig { pub builder_timeout: Duration, /// time to wait until we request data associated with a proposal pub data_request_delay: Duration, - /// the election configuration - pub election_config: Option, /// Builder API base URL pub builder_url: Url, } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 956f6654e2..08829b9ae2 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -21,33 +21,15 @@ pub enum ElectionError { MathError, } -/// election config -pub trait ElectionConfig: - Default - + Clone - + serde::Serialize - + for<'de> serde::Deserialize<'de> - + Sync - + Send - + core::fmt::Debug -{ -} - /// A protocol for determining membership in and participating in a committee. pub trait Membership: Clone + Debug + Eq + PartialEq + Send + Sync + Hash + 'static { - /// generate a default election configuration - fn default_election_config( - num_nodes_with_stake: u64, - num_nodes_without_stake: u64, - ) -> TYPES::ElectionConfigType; - /// create an election /// TODO may want to move this to a testableelection trait fn create_election( - entries: Vec>, - config: TYPES::ElectionConfigType, + all_nodes: Vec>, + committee_members: Vec>, fixed_leader_for_gpuvid: usize, ) -> Self; diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 4f11e66dba..6935ca4653 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -17,7 +17,6 @@ use serde::{Deserialize, Serialize}; use super::{ block_contents::{BlockHeader, TestableBlock, Transaction}, - election::ElectionConfig, network::{ AsyncGenerator, ConnectedNetwork, NetworkReliability, TestableNetworkingImplementation, }, @@ -59,13 +58,6 @@ pub trait NodeImplementation: #[allow(clippy::type_complexity)] #[async_trait] pub trait TestableNodeImplementation: NodeImplementation { - /// Election config for the DA committee - type CommitteeElectionConfig; - - /// Generates a committee-specific election - fn committee_election_config_generator( - ) -> Box Self::CommitteeElectionConfig + 'static>; - /// Creates random transaction if possible /// otherwise panics /// `padding` is the bytes of padding to add to the transaction @@ -108,18 +100,6 @@ where I::QuorumNetwork: TestableNetworkingImplementation, I::CommitteeNetwork: TestableNetworkingImplementation, { - type CommitteeElectionConfig = TYPES::ElectionConfigType; - - fn committee_election_config_generator( - ) -> Box Self::CommitteeElectionConfig + 'static> { - Box::new(|num_nodes_with_stake, num_nodes_without_stake| { - ::Membership::default_election_config( - num_nodes_with_stake, - num_nodes_without_stake, - ) - }) - } - fn state_create_random_transaction( state: Option<&TYPES::ValidatedState>, rng: &mut dyn rand::RngCore, @@ -228,8 +208,6 @@ pub trait NodeType: /// /// This should be equal to `BlockPayload::Transaction` type Transaction: Transaction; - /// The election config type that this hotshot setup is using. - type ElectionConfigType: ElectionConfig; /// The instance-level state type that this hotshot setup is using. type InstanceState: InstanceState; From 2c432e3a01f1fe0dc8acd39dd231405160933c90 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 29 Apr 2024 18:02:58 +0800 Subject: [PATCH 0993/1393] [CX_CLEANUP] - Add VID share validation (#3062) * Add vid validation * Fix membership --- task-impls/src/consensus/mod.rs | 76 ++++++++++++++++++++------------- task-impls/src/quorum_vote.rs | 22 +++++++--- 2 files changed, 64 insertions(+), 34 deletions(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index ce46520682..4ba2262b04 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -1,5 +1,17 @@ use std::{collections::BTreeMap, sync::Arc}; +use self::proposal_helpers::handle_quorum_proposal_validated; +use crate::{ + consensus::{ + proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}, + view_change::update_view, + }, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; use anyhow::Result; use async_broadcast::Sender; use async_lock::RwLock; @@ -23,6 +35,7 @@ use hotshot_types::{ signature_key::SignatureKey, storage::Storage, }, + vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; #[cfg(not(feature = "dependency-tasks"))] @@ -31,24 +44,12 @@ use hotshot_types::{ message::GeneralConsensusMessage, simple_vote::QuorumData, }; +use jf_primitives::vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use vbs::version::Version; -use self::proposal_helpers::handle_quorum_proposal_validated; -use crate::{ - consensus::{ - proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}, - view_change::update_view, - }, - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, - }, -}; - /// Helper functions to handler proposal-related functionality. pub(crate) mod proposal_helpers; @@ -286,33 +287,50 @@ impl> ConsensusTaskState false } - /// Validates whether the VID Dispersal Proposal is correctly signed + /// Validate the VID disperse is correctly signed and has the correct share. #[cfg(not(feature = "dependency-tasks"))] fn validate_disperse(&self, disperse: &Proposal>) -> bool { let view = disperse.data.get_view_number(); let payload_commitment = disperse.data.payload_commitment; - // Check whether the data comes from the right leader for this view - if self + + // Check whether the data satisfies one of the following. + // * From the right leader for this view. + // * Calculated and signed by the current node. + // * Signed by one of the staked DA committee members. + if !self .quorum_membership .get_leader(view) .validate(&disperse.signature, payload_commitment.as_ref()) + && !self + .public_key + .validate(&disperse.signature, payload_commitment.as_ref()) { - return true; + let mut validated = false; + for da_member in self.committee_membership.get_staked_committee(view) { + if da_member.validate(&disperse.signature, payload_commitment.as_ref()) { + validated = true; + break; + } + } + if !validated { + return false; + } } - // or the data was calculated and signed by the current node - if self - .public_key - .validate(&disperse.signature, payload_commitment.as_ref()) + + // Validate the VID share. + if vid_scheme(self.quorum_membership.total_nodes()) + .verify_share( + &disperse.data.share, + &disperse.data.common, + &payload_commitment, + ) + .is_err() { - return true; - } - // or the data was signed by one of the staked DA committee members - for da_member in self.committee_membership.get_staked_committee(view) { - if da_member.validate(&disperse.signature, payload_commitment.as_ref()) { - return true; - } + debug!("Invalid VID share."); + return false; } - false + + true } #[cfg(feature = "dependency-tasks")] diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 814c5348c0..80a10844a9 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -1,5 +1,9 @@ use std::{collections::HashMap, sync::Arc}; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; use async_broadcast::{Receiver, Sender}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] @@ -26,17 +30,14 @@ use hotshot_types::{ ValidatedState, }, utils::{Terminator, View, ViewInner}, + vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; +use jf_primitives::vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, instrument, warn}; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; - /// Vote dependency types. #[derive(Debug, PartialEq)] enum VoteDependency { @@ -652,6 +653,17 @@ impl> QuorumVoteTaskState Date: Mon, 29 Apr 2024 08:36:36 -0400 Subject: [PATCH 0994/1393] [CX_CLEANUP] - Create a new task for `QuorumProposalRecv` event (#2970) * partial commit * add propose now * initiate vote now event * support now types * fix lint * one more fix * make linter happy * tmp commit * finish up testing * rename * allow large enum variations * fix comments * fix import * swapping branches * Fix some comments * fix build * separate update view and move consensus to a split package * unused import * finish moving files * improve docs * revert * merge lower branch target * update lockfile * paring down proposal recv method * moving functions over * complete refactor * fix lint * fix build and lint * Add proposal event handling, rename file * Fix too many args and lines lints * new method for proposing * move feature gate * tmp revert * fix test failures * Fix build after merge * update implementation to integrate * remove dead code and too many args comments * commit broken option for mutli type inputs * working on getting parent, switching branches * Fix an import * Fix lints * Fix lints for dependency-task feature except ConsensusApi errors * Restore justfile * Restore change to ConsensusApi * build vote now * fix build * fix build * gating more features, remove recv code from qp * lint fail, fix build * move functionality over * roll back inline gating * fix lints * remove dead variable * docs * cancel tasks * separate out consensus types, remove todo log * fix build * rename * demote * demote * demote and do not dump trace * fix name lint * remove handling of QuorumProposalRecv from vote dependency task * clippy * fix tests * remove superfluous log * fix errneous view number * Remove dummy publish_proposal * move gates to block the entire event --------- Co-authored-by: Keyao Shen --- hotshot/src/lib.rs | 3 + task-impls/src/consensus/mod.rs | 106 ++++---- task-impls/src/consensus/proposal_helpers.rs | 133 ++++++---- task-impls/src/lib.rs | 3 + task-impls/src/quorum_proposal.rs | 235 +---------------- task-impls/src/quorum_proposal_recv.rs | 208 +++++++++++++++ task-impls/src/quorum_vote.rs | 260 ++----------------- testing/tests/tests_1/quorum_vote_task.rs | 53 ++-- types/src/consensus.rs | 14 +- types/src/vote.rs | 5 +- 10 files changed, 430 insertions(+), 590 deletions(-) create mode 100644 task-impls/src/quorum_proposal_recv.rs diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 28809ad528..9ff2e20d49 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -280,7 +280,10 @@ impl> SystemContext { locked_view: anchored_leaf.get_view_number(), high_qc: initializer.high_qc, metrics: Arc::clone(&consensus_metrics), + dontuse_decided_upgrade_cert: None, + dontuse_formed_upgrade_certificate: None, }; + let consensus = Arc::new(RwLock::new(consensus)); let version = Arc::new(RwLock::new(BASE_VERSION)); diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 4ba2262b04..f073f3f0e7 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -1,17 +1,16 @@ use std::{collections::BTreeMap, sync::Arc}; -use self::proposal_helpers::handle_quorum_proposal_validated; +#[cfg(not(feature = "dependency-tasks"))] +use crate::consensus::proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}; use crate::{ - consensus::{ - proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}, - view_change::update_view, - }, + consensus::view_change::update_view, events::{HotShotEvent, HotShotTaskCompleted}, helpers::{broadcast_event, cancel_task}, vote_collection::{ create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, }, }; +#[cfg(not(feature = "dependency-tasks"))] use anyhow::Result; use async_broadcast::Sender; use async_lock::RwLock; @@ -20,13 +19,15 @@ use async_std::task::JoinHandle; use committable::Committable; use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_types::message::Proposal; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus}, - data::{Leaf, QuorumProposal, ViewChangeEvidence}, + data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType}, - message::Proposal, + message::GeneralConsensusMessage, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, - simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, + simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, traits::{ block_contents::BlockHeader, election::Membership, @@ -38,19 +39,19 @@ use hotshot_types::{ vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; + #[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::{ - data::{null_block, VidDisperseShare}, - message::GeneralConsensusMessage, - simple_vote::QuorumData, -}; +use hotshot_types::data::VidDisperseShare; use jf_primitives::vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use vbs::version::Version; -/// Helper functions to handler proposal-related functionality. +#[cfg(not(feature = "dependency-tasks"))] +use self::proposal_helpers::handle_quorum_proposal_validated; + +/// Helper functions to handle proposal-related functionality. pub(crate) mod proposal_helpers; /// Handles view-change related functionality. @@ -144,7 +145,7 @@ pub struct ConsensusTaskState> { impl> ConsensusTaskState { /// Cancel all tasks the consensus tasks has spawned before the given view - async fn cancel_tasks(&mut self, view: TYPES::Time) { + pub async fn cancel_tasks(&mut self, view: TYPES::Time) { let keep = self.spawned_tasks.split_off(&view); let mut cancel = Vec::new(); while let Some((_, tasks)) = self.spawned_tasks.pop_first() { @@ -155,14 +156,7 @@ impl> ConsensusTaskState join_all(cancel).await; } - /// Ignores old vote behavior and lets `QuorumVoteTask` take over. - #[cfg(feature = "dependency-tasks")] - async fn vote_if_able(&mut self, _event_stream: &Sender>>) -> bool { - false - } - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] - #[cfg(not(feature = "dependency-tasks"))] // Check if we are able to vote, like whether the proposal is valid, // whether we have DAC and VID share, and if so, vote. async fn vote_if_able(&mut self, event_stream: &Sender>>) -> bool { @@ -333,17 +327,8 @@ impl> ConsensusTaskState true } - #[cfg(feature = "dependency-tasks")] - async fn publish_proposal( - &mut self, - view: TYPES::Time, - event_stream: Sender>>, - ) -> Result<()> { - Ok(()) - } - - /// Publishes a proposal #[cfg(not(feature = "dependency-tasks"))] + /// Publishes a proposal async fn publish_proposal( &mut self, view: TYPES::Time, @@ -397,11 +382,12 @@ impl> ConsensusTaskState Err(e) => debug!("Failed to propose {e:#}"), } } + #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QuorumProposalValidated(proposal, _) => { if let Err(e) = handle_quorum_proposal_validated(proposal, event_stream.clone(), self).await { - info!("Failed to handle QuorumProposalValidated event {e:#}"); + debug!("Failed to handle QuorumProposalValidated event {e:#}"); } } HotShotEvent::QuorumVoteRecv(ref vote) => { @@ -497,6 +483,7 @@ impl> ConsensusTaskState } } } + #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QCFormed(cert) => { match cert { either::Right(qc) => { @@ -776,37 +763,43 @@ impl> ConsensusTaskState fee: fee.clone(), block_view: view, }); - if self.quorum_membership.get_leader(view) == self.public_key - && self.consensus.read().await.high_qc.get_view_number() + 1 == view + #[cfg(not(feature = "dependency-tasks"))] { - if let Err(e) = self.publish_proposal(view, event_stream.clone()).await { - warn!("Failed to propose; error = {e:?}"); - }; - } + if self.quorum_membership.get_leader(view) == self.public_key + && self.consensus.read().await.high_qc.get_view_number() + 1 == view + { + if let Err(e) = self.publish_proposal(view, event_stream.clone()).await { + warn!("Failed to propose; error = {e:?}"); + }; + } - if let Some(cert) = &self.proposal_cert { - match cert { - ViewChangeEvidence::Timeout(tc) => { - if self.quorum_membership.get_leader(tc.get_view_number() + 1) - == self.public_key - { - if let Err(e) = self.publish_proposal(view, event_stream).await { - warn!("Failed to propose; error = {e:?}"); - }; + if let Some(cert) = &self.proposal_cert { + match cert { + ViewChangeEvidence::Timeout(tc) => { + if self.quorum_membership.get_leader(tc.get_view_number() + 1) + == self.public_key + { + if let Err(e) = self.publish_proposal(view, event_stream).await + { + warn!("Failed to propose; error = {e:?}"); + }; + } } - } - ViewChangeEvidence::ViewSync(vsc) => { - if self.quorum_membership.get_leader(vsc.get_view_number()) - == self.public_key - { - if let Err(e) = self.publish_proposal(view, event_stream).await { - warn!("Failed to propose; error = {e:?}"); - }; + ViewChangeEvidence::ViewSync(vsc) => { + if self.quorum_membership.get_leader(vsc.get_view_number()) + == self.public_key + { + if let Err(e) = self.publish_proposal(view, event_stream).await + { + warn!("Failed to propose; error = {e:?}"); + }; + } } } } } } + #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { if !certificate.is_valid_cert(self.quorum_membership.as_ref()) { warn!( @@ -832,6 +825,7 @@ impl> ConsensusTaskState "Attempting to publish proposal after forming a View Sync Finalized Cert for view {}", *certificate.view_number ); + if let Err(e) = self.publish_proposal(view, event_stream).await { warn!("Failed to propose; error = {e:?}"); }; diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index 923d10b145..3633795bfd 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -1,3 +1,14 @@ +#[cfg(not(feature = "dependency-tasks"))] +use super::ConsensusTaskState; +#[cfg(feature = "dependency-tasks")] +use crate::quorum_proposal::QuorumProposalTaskState; +#[cfg(feature = "dependency-tasks")] +use crate::quorum_proposal_recv::QuorumProposalRecvTaskState; +use crate::{ + consensus::update_view, + events::HotShotEvent, + helpers::{broadcast_event, AnyhowTracing}, +}; use core::time::Duration; use std::{ collections::{HashMap, HashSet}, @@ -37,13 +48,6 @@ use hotshot_types::{ use tokio::task::JoinHandle; use tracing::{debug, error, info, warn}; -use super::ConsensusTaskState; -use crate::{ - consensus::update_view, - events::HotShotEvent, - helpers::{broadcast_event, AnyhowTracing}, -}; - /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. #[allow(clippy::too_many_arguments)] @@ -592,6 +596,14 @@ pub async fn publish_proposal_if_able( } } +/// TEMPORARY TYPE: Quorum proposal recv task state when using dependency tasks +#[cfg(feature = "dependency-tasks")] +type TemporaryProposalRecvCombinedType = QuorumProposalRecvTaskState; + +/// TEMPORARY TYPE: Consensus task state when not using dependency tasks +#[cfg(not(feature = "dependency-tasks"))] +type TemporaryProposalRecvCombinedType = ConsensusTaskState; + // TODO: Fix `clippy::too_many_lines`. /// Handle the received quorum proposal. /// @@ -601,7 +613,7 @@ pub async fn handle_quorum_proposal_recv>, sender: &TYPES::SignatureKey, event_stream: Sender>>, - task_state: &mut ConsensusTaskState, + task_state: &mut TemporaryProposalRecvCombinedType, ) -> Result>> { let sender = sender.clone(); debug!( @@ -650,7 +662,7 @@ pub async fn handle_quorum_proposal_recv = QuorumProposalTaskState; + +/// TEMPORARY TYPE: Consensus task state when not using dependency tasks +#[cfg(not(feature = "dependency-tasks"))] +type TemporaryProposalValidatedCombinedType = ConsensusTaskState; + +/// Handle `QuorumProposalValidated` event content and submit a proposal if possible. #[allow(clippy::too_many_lines)] pub async fn handle_quorum_proposal_validated>( proposal: &QuorumProposal, event_stream: Sender>>, - task_state: &mut ConsensusTaskState, + task_state: &mut TemporaryProposalValidatedCombinedType, ) -> Result<()> { let consensus = task_state.consensus.upgradable_read().await; let view = proposal.get_view_number(); - task_state.current_proposal = Some(proposal.clone()); + #[cfg(not(feature = "dependency-tasks"))] + { + task_state.current_proposal = Some(proposal.clone()); + } + + #[allow(unused_mut)] + #[allow(unused_variables)] + let mut decided_upgrade_cert: Option> = None; let mut new_anchor_view = consensus.last_decided_view; let mut new_locked_view = consensus.locked_view; let mut last_view_number_visited = view; @@ -880,7 +902,15 @@ pub async fn handle_quorum_proposal_validated> QuorumProposalTaskState { - let sender = sender.clone(); - debug!( - "Received Quorum Proposal for view {}", - *proposal.data.view_number - ); - - // stop polling for the received proposal - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal( - *proposal.data.view_number, - )) - .await; - - let view = proposal.data.get_view_number(); - if view < self.cur_view { - debug!("Proposal is from an older view {:?}", proposal.data.clone()); - return; - } - - let view_leader_key = self.quorum_membership.get_leader(view); - if view_leader_key != sender { - warn!("Leader key does not match key in proposal"); - return; - } - - let justify_qc = proposal.data.justify_qc.clone(); - - if !justify_qc.is_valid_cert(self.quorum_membership.as_ref()) { - error!("Invalid justify_qc in proposal for view {}", *view); - let consensus = self.consensus.write().await; - consensus.metrics.invalid_qc.update(1); - return; - } - - // Validate the upgrade certificate -- this is just a signature validation. - // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. - if let Err(e) = UpgradeCertificate::validate( - &proposal.data.upgrade_certificate, - &self.quorum_membership, - ) { - warn!("{:?}", e); - - return; - } - - // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here - self.update_view(view, &event_sender).await; - - let consensus = self.consensus.upgradable_read().await; + #[cfg(feature = "dependency-tasks")] + HotShotEvent::QuorumProposalValidated(proposal, _) => { + let new_view = proposal.view_number + 1; - // Get the parent leaf and state. - let parent = match consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() + if let Err(e) = + handle_quorum_proposal_validated(proposal, event_sender.clone(), self).await { - Some(leaf) => { - if let (Some(state), _) = - consensus.get_state_and_delta(leaf.get_view_number()) - { - Some((leaf, Arc::clone(&state))) - } else { - error!("Parent state not found! Consensus internally inconsistent"); - return; - } - } - None => None, - }; - - if justify_qc.get_view_number() > consensus.high_qc.view_number { - if let Err(e) = self - .storage - .write() - .await - .update_high_qc(justify_qc.clone()) - .await - { - warn!("Failed to store High QC not voting. Error: {:?}", e); - return; - } + debug!("Failed to handle QuorumProposalValidated event; error = {e:#}"); } - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - - if justify_qc.get_view_number() > consensus.high_qc.view_number { - debug!("Updating high QC"); - consensus.high_qc = justify_qc.clone(); - } - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some((parent_leaf, parent_state)) = parent else { - warn!( - "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.get_data().leaf_commit - ); - return; - }; - async_spawn( - validate_proposal_safety_and_liveness( - proposal.clone(), - parent_leaf, - Arc::clone(&self.consensus), - None, - Arc::clone(&self.quorum_membership), - Arc::clone(&parent_state), - view_leader_key, - event_sender.clone(), - sender, - self.output_event_stream.clone(), - Arc::clone(&self.storage), - Arc::clone(&self.instance_state), - ) - .map(AnyhowTracing::err_as_debug), - ); - } - HotShotEvent::QuorumProposalValidated(proposal, _) => { - let current_proposal = Some(proposal.clone()); - let new_view = current_proposal.clone().unwrap().view_number + 1; - info!( "Node {} creating dependency task for view {:?} from QuorumProposalRecv", self.id, new_view @@ -881,103 +770,6 @@ impl> QuorumProposalTaskState {} } } - - /// Must only update the view and GC if the view actually changes - #[instrument(skip_all, fields( - id = self.id, - view = *self.cur_view - ), name = "Consensus update view", level = "error")] - async fn update_view( - &mut self, - new_view: TYPES::Time, - event_stream: &Sender>>, - ) -> bool { - if *self.cur_view < *new_view { - debug!( - "Updating view from {} to {} in consensus task", - *self.cur_view, *new_view - ); - - if *self.cur_view / 100 != *new_view / 100 { - // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): - // switch to info! when INFO logs become less cluttered - error!("Progress: entered view {:>6}", *new_view); - } - - // cancel the old timeout task - if let Some(timeout_task) = self.timeout_task.take() { - cancel_task(timeout_task).await; - } - self.cur_view = new_view; - - // Poll the future leader for lookahead - let lookahead_view = new_view + LOOK_AHEAD; - if self.quorum_membership.get_leader(lookahead_view) != self.public_key { - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollFutureLeader( - *lookahead_view, - self.quorum_membership.get_leader(lookahead_view), - )) - .await; - } - - // Start polling for proposals for the new view - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForProposal(*self.cur_view + 1)) - .await; - - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForDAC(*self.cur_view + 1)) - .await; - - if self.quorum_membership.get_leader(self.cur_view + 1) == self.public_key { - debug!("Polling for quorum votes for view {}", *self.cur_view); - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForVotes(*self.cur_view)) - .await; - } - - broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream).await; - - // Spawn a timeout task if we did actually update view - let timeout = self.timeout; - self.timeout_task = Some(async_spawn({ - let stream = event_stream.clone(); - // Nuance: We timeout on the view + 1 here because that means that we have - // not seen evidence to transition to this new view - let view_number = self.cur_view + 1; - async move { - async_sleep(Duration::from_millis(timeout)).await; - broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), - &stream, - ) - .await; - } - })); - let consensus = self.consensus.upgradable_read().await; - consensus - .metrics - .current_view - .set(usize::try_from(self.cur_view.get_u64()).unwrap()); - // Do the comparison before the subtraction to avoid potential overflow, since - // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(self.cur_view.get_u64()).unwrap() - > usize::try_from(consensus.last_decided_view.get_u64()).unwrap() - { - consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(self.cur_view.get_u64()).unwrap() - - usize::try_from(consensus.last_decided_view.get_u64()).unwrap(), - ); - } - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus.update_view(new_view); - drop(consensus); - - return true; - } - false - } } impl> TaskState @@ -988,8 +780,7 @@ impl> TaskState fn filter(&self, event: &Arc>) -> bool { !matches!( event.as_ref(), - HotShotEvent::QuorumProposalRecv(_, _) - | HotShotEvent::QuorumProposalValidated(..) + HotShotEvent::QuorumProposalValidated(..) | HotShotEvent::QCFormed(_) | HotShotEvent::SendPayloadCommitmentAndMetadata(..) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) diff --git a/task-impls/src/quorum_proposal_recv.rs b/task-impls/src/quorum_proposal_recv.rs new file mode 100644 index 0000000000..8d96c63c39 --- /dev/null +++ b/task-impls/src/quorum_proposal_recv.rs @@ -0,0 +1,208 @@ +#![allow(unused_imports)] + +use futures::future::join_all; +use std::{collections::BTreeMap, sync::Arc}; + +use crate::{ + consensus::proposal_helpers::{get_parent_leaf_and_state, handle_quorum_proposal_recv}, + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; +use async_broadcast::Sender; +use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +use hotshot_task::task::{Task, TaskState}; +use hotshot_types::{ + consensus::{CommitmentAndMetadata, Consensus}, + data::{QuorumProposal, ViewChangeEvidence}, + event::Event, + simple_certificate::UpgradeCertificate, + traits::{ + node_implementation::{NodeImplementation, NodeType}, + signature_key::SignatureKey, + }, + vote::{HasViewNumber, VoteDependencyData}, +}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; +use tracing::{debug, error, instrument, warn}; + +/// The state for the quorum proposal task. Contains all of the information for +/// handling [`HotShotEvent::QuorumProposalRecv`] events. +pub struct QuorumProposalRecvTaskState> { + /// Our public key + pub public_key: TYPES::SignatureKey, + + /// Our Private Key + pub private_key: ::PrivateKey, + + /// Reference to consensus. The replica will require a write lock on this. + pub consensus: Arc>>, + + /// View number this view is executing in. + pub cur_view: TYPES::Time, + + /// The commitment to the current block payload and its metadata submitted to DA. + pub payload_commitment_and_metadata: Option>, + + /// Network for all nodes + pub quorum_network: Arc, + + /// Membership for Quorum Certs/votes + pub quorum_membership: Arc, + + /// Membership for Timeout votes/certs + pub timeout_membership: Arc, + + /// timeout task handle + pub timeout_task: Option>, + + /// View timeout from config. + pub timeout: u64, + + /// Round start delay from config, in milliseconds. + pub round_start_delay: u64, + + /// Output events to application + pub output_event_stream: async_broadcast::Sender>, + + /// This node's storage ref + pub storage: Arc>, + + /// The most recent upgrade certificate this node formed. + /// Note: this is ONLY for certificates that have been formed internally, + /// so that we can propose with them. + /// + /// Certificates received from other nodes will get reattached regardless of this fields, + /// since they will be present in the leaf we propose off of. + pub formed_upgrade_certificate: Option>, + + /// last View Sync Certificate or Timeout Certificate this node formed. + pub proposal_cert: Option>, + + /// most recent decided upgrade certificate + pub decided_upgrade_cert: Option>, + + /// Spawned tasks related to a specific view, so we can cancel them when + /// they are stale + pub spawned_tasks: BTreeMap>>, + + /// Immutable instance state + pub instance_state: Arc, + + /// The node's id + pub id: u64, +} + +impl> QuorumProposalRecvTaskState { + /// Cancel all tasks the consensus tasks has spawned before the given view + pub async fn cancel_tasks(&mut self, view: TYPES::Time) { + let keep = self.spawned_tasks.split_off(&view); + let mut cancel = Vec::new(); + while let Some((_, tasks)) = self.spawned_tasks.pop_first() { + let mut to_cancel = tasks.into_iter().map(cancel_task).collect(); + cancel.append(&mut to_cancel); + } + self.spawned_tasks = keep; + join_all(cancel).await; + } + + /// Handles all consensus events relating to propose and vote-enabling events. + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] + #[allow(unused_variables)] + pub async fn handle( + &mut self, + event: Arc>, + event_stream: Sender>>, + ) { + #[cfg(feature = "dependency-tasks")] + if let HotShotEvent::QuorumProposalRecv(proposal, sender) = event.as_ref() { + match handle_quorum_proposal_recv(proposal, sender, event_stream.clone(), self).await { + Ok(Some(current_proposal)) => { + self.cancel_tasks(proposal.data.get_view_number() + 1).await; + // Build the parent leaf since we didn't find it during the proposal check. + let parent_leaf = match get_parent_leaf_and_state( + self.cur_view, + proposal.data.get_view_number() + 1, + Arc::clone(&self.quorum_membership), + self.public_key.clone(), + Arc::clone(&self.consensus), + ) + .await + { + Ok((parent_leaf, _ /* state */)) => parent_leaf, + Err(e) => { + warn!(?e, "Failed to get parent leaf and state"); + return; + } + }; + + let consensus = self.consensus.read().await; + let view = current_proposal.get_view_number(); + let Some(vid_shares) = consensus.vid_shares.get(&view) else { + debug!( + "We have not seen the VID share for this view {:?} yet, so we cannot vote.", + view + ); + return; + }; + let Some(disperse_share) = vid_shares.get(&self.public_key) else { + error!("Did not get a VID share for our public key, aborting vote"); + return; + }; + let Some(da_cert) = consensus + .saved_da_certs + .get(¤t_proposal.get_view_number()) + else { + debug!( + "Received VID share, but couldn't find DAC cert for view {:?}", + current_proposal.get_view_number() + ); + return; + }; + broadcast_event( + Arc::new(HotShotEvent::VoteNow( + view, + VoteDependencyData { + quorum_proposal: current_proposal, + parent_leaf, + disperse_share: disperse_share.clone(), + da_cert: da_cert.clone(), + }, + )), + &event_stream, + ) + .await; + } + Ok(None) => { + self.cancel_tasks(proposal.data.get_view_number() + 1).await; + } + Err(e) => warn!(?e, "Failed to propose"), + } + } + } +} + +impl> TaskState + for QuorumProposalRecvTaskState +{ + type Event = Arc>; + type Output = (); + fn filter(&self, event: &Arc>) -> bool { + !matches!(event.as_ref(), HotShotEvent::QuorumProposalRecv(..)) + } + + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> + where + Self: Sized, + { + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await; + None + } + + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event.as_ref(), HotShotEvent::Shutdown) + } +} diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 80a10844a9..81c5f98d30 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -5,7 +5,7 @@ use crate::{ helpers::{broadcast_event, cancel_task}, }; use async_broadcast::{Receiver, Sender}; -use async_lock::{RwLock, RwLockUpgradableReadGuard}; +use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use committable::Committable; @@ -17,7 +17,7 @@ use hotshot_task::{ use hotshot_types::{ consensus::Consensus, data::Leaf, - event::{Event, EventType}, + event::Event, message::GeneralConsensusMessage, simple_vote::{QuorumData, QuorumVote}, traits::{ @@ -27,9 +27,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, storage::Storage, - ValidatedState, }, - utils::{Terminator, View, ViewInner}, vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; @@ -87,6 +85,7 @@ impl + 'static> HandleDepOutput let parent_commitment = parent_leaf.commit(); let proposed_leaf = Leaf::from_quorum_proposal(proposal); if proposed_leaf.get_parent_commitment() != parent_commitment { + warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); return; } leaf = Some(proposed_leaf); @@ -115,7 +114,7 @@ impl + 'static> HandleDepOutput } } HotShotEvent::VoteNow(_, vote_dependency_data) => { - leaf = Some(vote_dependency_data.leaf.clone()); + leaf = Some(vote_dependency_data.parent_leaf.clone()); disperse_share = Some(vote_dependency_data.disperse_share.clone()); } _ => {} @@ -275,7 +274,7 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState { + vote_now_dependency.mark_as_completed(event); + } + HotShotEvent::QuorumProposalValidated(..) => { + quorum_proposal_dependency.mark_as_completed(event); + } + _ => {} + } } let deps = vec![quorum_proposal_dependency, dac_dependency, vid_dependency]; @@ -362,230 +365,17 @@ impl> QuorumVoteTaskState { - let view = proposal.data.view_number; - debug!("Received Quorum Proposal for view {}", *view); - if view <= self.latest_voted_view { - return; - } - - // TODO (Keyao) Add validations for view change evidence and upgrade cert. - - // Vaildate the justify QC. - let justify_qc = proposal.data.justify_qc.clone(); - if !justify_qc.is_valid_cert(self.quorum_membership.as_ref()) { - error!("Invalid justify_qc in proposal for view {}", *view); - let consensus = self.consensus.write().await; - consensus.metrics.invalid_qc.update(1); - return; - } - broadcast_event(Arc::new(HotShotEvent::ViewChange(view + 1)), &event_sender).await; - - let consensus = self.consensus.upgradable_read().await; - // Get the parent leaf and state. - let parent = { - match consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned() - { - Some(leaf) => { - if let (Some(state), _) = - consensus.get_state_and_delta(leaf.get_view_number()) - { - Some((leaf, Arc::clone(&state))) - } else { - error!("Parent state not found! Consensus internally inconsistent"); - return; - } - } - None => None, - } - }; - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - if justify_qc.get_view_number() > consensus.high_qc.view_number { - debug!("Updating high QC"); - - if let Err(e) = self - .storage - .write() - .await - .update_high_qc(justify_qc.clone()) - .await - { - warn!("Failed to store High QC not voting. Error: {:?}", e); - return; - } - - consensus.high_qc = justify_qc.clone(); - } - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some((parent_leaf, parent_state)) = parent else { - warn!( - "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.get_data().leaf_commit, - *view, - ); - - let leaf = Leaf::from_quorum_proposal(&proposal.data); - - let state = Arc::new( - >::from_header( - &proposal.data.block_header, - ), - ); - - consensus.validated_state_map.insert( - view, - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - state, - delta: None, - }, - }, - ); - consensus.saved_leaves.insert(leaf.commit(), leaf.clone()); - - if let Err(e) = self - .storage - .write() - .await - .update_undecided_state( - consensus.saved_leaves.clone(), - consensus.validated_state_map.clone(), - ) - .await - { - warn!("Couldn't store undecided state. Error: {:?}", e); - } - drop(consensus); - - return; - }; - - drop(consensus); - - // Validate the state. - let Ok((validated_state, state_delta)) = parent_state - .validate_and_apply_header( - self.instance_state.as_ref(), - &parent_leaf, - &proposal.data.block_header.clone(), - ) - .await - else { - error!("Block header doesn't extend the proposal",); - return; - }; - let state = Arc::new(validated_state); - let delta = Arc::new(state_delta); - let parent_commitment = parent_leaf.commit(); - let view = proposal.data.get_view_number(); - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); - if proposed_leaf.get_parent_commitment() != parent_commitment { - return; - } - - // Validate the signature. This should also catch if `leaf_commitment`` does not - // equal our calculated parent commitment. - let view_leader_key = self.quorum_membership.get_leader(view); - if view_leader_key != *sender { - warn!("Leader key does not match key in proposal"); - return; - } - if !view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()) { - error!(?proposal.signature, "Could not verify proposal."); - return; - } - - // Liveness and safety checks. - let consensus = self.consensus.upgradable_read().await; - let liveness_check = justify_qc.get_view_number() > consensus.locked_view; - // Check if proposal extends from the locked leaf. - let outcome = consensus.visit_leaf_ancestors( - justify_qc.get_view_number(), - Terminator::Inclusive(consensus.locked_view), - false, - |leaf, _, _| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.get_view_number() != consensus.locked_view - }, - ); - let safety_check = outcome.is_ok(); - // Skip if both saftey and liveness checks fail. - if !safety_check && !liveness_check { - error!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc, proposal.data.clone(), consensus.locked_view); - if let Err(e) = outcome { - broadcast_event( - Event { - view_number: view, - event: EventType::Error { error: Arc::new(e) }, - }, - &self.output_event_stream, - ) - .await; - } - return; - } - - // Stop polling for the received proposal. - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal(*view)) - .await; - - // Notify the application layer and other tasks. - broadcast_event( - Event { - view_number: view, - event: EventType::QuorumProposal { - proposal: proposal.clone(), - sender: sender.clone(), - }, - }, - &self.output_event_stream, - ) - .await; - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalValidated( - proposal.data.clone(), - parent_leaf, - )), + HotShotEvent::QuorumProposalValidated(proposal, _leaf) => { + // This task simultaneously does not rely on the state updates of the `handle_quorum_proposal_validated` + // function and that function does not return an `Error` unless the propose or vote fails, in which case + // the other would still have been attempted regardless. Therefore, we pass this through as a task and + // eschew validation in lieu of the `QuorumProposal` task doing it for us and updating the internal state. + self.create_dependency_task_if_new( + proposal.view_number, + event_receiver, &event_sender, - ) - .await; - - // Add to the storage. - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus.validated_state_map.insert( - view, - View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), - state: Arc::clone(&state), - delta: Some(Arc::clone(&delta)), - }, - }, + Some(Arc::clone(&event)), ); - consensus - .saved_leaves - .insert(proposed_leaf.commit(), proposed_leaf.clone()); - if let Err(e) = self - .storage - .write() - .await - .update_undecided_state( - consensus.saved_leaves.clone(), - consensus.validated_state_map.clone(), - ) - .await - { - warn!("Couldn't store undecided state. Error: {:?}", e); - } - drop(consensus); - - self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); } HotShotEvent::DACertificateRecv(cert) => { let view = cert.view_number; @@ -723,12 +513,12 @@ impl> TaskState for QuorumVoteTask fn filter(&self, event: &Arc>) -> bool { !matches!( event.as_ref(), - HotShotEvent::QuorumProposalRecv(_, _) - | HotShotEvent::DACertificateRecv(_) + HotShotEvent::DACertificateRecv(_) | HotShotEvent::ViewChange(_) | HotShotEvent::VIDShareRecv(..) | HotShotEvent::QuorumVoteDependenciesValidated(_) | HotShotEvent::VoteNow(..) + | HotShotEvent::QuorumProposalValidated(..) | HotShotEvent::Shutdown, ) } diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index ee7bfdb5ac..8311ee602a 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -10,7 +10,7 @@ use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime async fn test_quorum_vote_task_success() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ - predicates::event::{exact, quorum_proposal_validated, quorum_vote_send}, + predicates::event::{exact, quorum_vote_send}, script::{run_test_script, TestScriptStage}, task_helpers::build_system_handle, view_generator::TestViewGenerator, @@ -23,26 +23,31 @@ async fn test_quorum_vote_task_success() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - generator.next(); - let view = generator.current_view.clone().unwrap(); + let mut proposals = Vec::new(); + let mut leaves = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(2) { + proposals.push(view.quorum_proposal.clone()); + leaves.push(view.leaf.clone()); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } // Send the quorum proposal, DAC, and VID disperse data, in which case a dummy vote can be // formed and the view number will be updated. let view_success = TestScriptStage { inputs: vec![ - QuorumProposalRecv(view.quorum_proposal.clone(), view.leader_public_key), - DACertificateRecv(view.da_certificate.clone()), - VIDShareRecv(view.vid_proposal.0[0].clone()), + QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + DACertificateRecv(dacs[1].clone()), + VIDShareRecv(vids[1].0[0].clone()), ], outputs: vec![ - exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_validated(), - exact(DACertificateValidated(view.da_certificate.clone())), - exact(VIDShareValidated(view.vid_proposal.0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(1))), + exact(DACertificateValidated(dacs[1].clone())), + exact(VIDShareValidated(vids[1].0[0].clone())), + exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), quorum_vote_send(), ], asserts: vec![], @@ -74,7 +79,6 @@ async fn test_quorum_vote_task_vote_now() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); generator.next(); @@ -82,7 +86,7 @@ async fn test_quorum_vote_task_vote_now() { let vote_dependency_data = VoteDependencyData { quorum_proposal: view.quorum_proposal.data.clone(), - leaf: view.leaf.clone(), + parent_leaf: view.leaf.clone(), disperse_share: view.vid_proposal.0[0].clone(), da_cert: view.da_certificate.clone(), }; @@ -109,7 +113,7 @@ async fn test_quorum_vote_task_vote_now() { async fn test_quorum_vote_task_miss_dependency() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ - predicates::event::{exact, quorum_proposal_validated}, + predicates::event::exact, script::{run_test_script, TestScriptStage}, task_helpers::build_system_handle, view_generator::TestViewGenerator, @@ -122,7 +126,6 @@ async fn test_quorum_vote_task_miss_dependency() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); @@ -130,37 +133,31 @@ async fn test_quorum_vote_task_miss_dependency() { let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); + let mut leaves = Vec::new(); for view in (&mut generator).take(3) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); + leaves.push(view.leaf.clone()); } // Send two of quorum proposal, DAC, and VID disperse data, in which case there's no vote. let view_no_dac = TestScriptStage { inputs: vec![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), + QuorumProposalValidated(proposals[0].data.clone(), leaves[0].clone()), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_validated(), - exact(VIDShareValidated(vids[0].0[0].clone())), - ], + outputs: vec![exact(VIDShareValidated(vids[0].0[0].clone()))], asserts: vec![], }; let view_no_vid = TestScriptStage { inputs: vec![ - QuorumProposalRecv(proposals[1].clone(), leaders[1]), + QuorumProposalValidated(proposals[1].data.clone(), leaves[1].clone()), DACertificateRecv(dacs[1].clone()), ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(3))), - quorum_proposal_validated(), - exact(DACertificateValidated(dacs[1].clone())), - ], + outputs: vec![exact(DACertificateValidated(dacs[1].clone()))], asserts: vec![], }; let view_no_quorum_proposal = TestScriptStage { diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a3d23f0a9b..8233371f6b 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -15,7 +15,8 @@ use crate::{ error::HotShotError, message::Proposal, simple_certificate::{ - DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncFinalizeCertificate2, + DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, + ViewSyncFinalizeCertificate2, }, traits::{ block_contents::BuilderFee, @@ -75,6 +76,17 @@ pub struct Consensus { /// A reference to the metrics trait pub metrics: Arc, + + /// The most recent upgrade certificate this node formed. + /// Note: this is ONLY for certificates that have been formed internally, + /// so that we can propose with them. + /// + /// Certificates received from other nodes will get reattached regardless of this fields, + /// since they will be present in the leaf we propose off of. + pub dontuse_formed_upgrade_certificate: Option>, + + /// most recent decided upgrade certificate + pub dontuse_decided_upgrade_cert: Option>, } /// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces diff --git a/types/src/vote.rs b/types/src/vote.rs index fe49d2f0fe..53a470c912 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -190,8 +190,9 @@ pub struct VoteDependencyData { /// The quorum proposal (not necessarily valid). pub quorum_proposal: QuorumProposal, - /// The leaf we've obtained from the `QuorumProposalValidated` event. - pub leaf: Leaf, + /// The leaf we've obtained from the `QuorumProposalValidated` event. This is the + /// parent leaf. + pub parent_leaf: Leaf, /// The Vid disperse proposal. pub disperse_share: Proposal>, From 64d98f130c16703d37377cf004f1240e913f151d Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 29 Apr 2024 11:56:38 -0400 Subject: [PATCH 0995/1393] update the CDN to 0-2-4 (#3064) --- examples/push-cdn/broker.rs | 10 ++++++++-- examples/push-cdn/marshal.rs | 10 ++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index 543e8b114f..66d535d7c0 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -7,6 +7,7 @@ use hotshot::traits::implementations::{KeyPair, ProductionDef, WrappedSignatureK use hotshot_example_types::node_types::TestTypes; use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use sha2::Digest; +use tracing_subscriber::EnvFilter; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] @@ -62,9 +63,14 @@ async fn main() -> Result<()> { // Initialize tracing if std::env::var("RUST_LOG_FORMAT") == Ok("json".to_string()) { - tracing_subscriber::fmt().json().init(); + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .json() + .init(); } else { - tracing_subscriber::fmt().init(); + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .init(); } // Generate the broker key from the supplied seed diff --git a/examples/push-cdn/marshal.rs b/examples/push-cdn/marshal.rs index b2288a9a49..9a9995cc83 100644 --- a/examples/push-cdn/marshal.rs +++ b/examples/push-cdn/marshal.rs @@ -6,6 +6,7 @@ use cdn_marshal::{Config, Marshal}; use clap::Parser; use hotshot::traits::implementations::ProductionDef; use hotshot_example_types::node_types::TestTypes; +use tracing_subscriber::EnvFilter; // TODO: forall, add logging where we need it @@ -45,9 +46,14 @@ async fn main() -> Result<()> { // Initialize tracing if std::env::var("RUST_LOG_FORMAT") == Ok("json".to_string()) { - tracing_subscriber::fmt().json().init(); + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .json() + .init(); } else { - tracing_subscriber::fmt().init(); + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .init(); } // Create a new `Config` From 2873581f4cb6f9bbd15ffe4c5b8d1902e3603c58 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 29 Apr 2024 14:11:57 -0400 Subject: [PATCH 0996/1393] [Auto Bench] Add election config recognition (#3048) * add election config recognition * nit * add feature flag to orchestrator crate * nit * lint * lint --- examples/webserver/README.md | 4 ++-- .../src/traits/election/static_committee.rs | 9 ++++++-- orchestrator/Cargo.toml | 4 ++++ orchestrator/run-config.toml | 4 ++-- orchestrator/src/client.rs | 3 ++- orchestrator/src/config.rs | 4 +++- orchestrator/src/lib.rs | 22 +++++++++++++++++++ testing/src/test_builder.rs | 2 +- 8 files changed, 43 insertions(+), 9 deletions(-) diff --git a/examples/webserver/README.md b/examples/webserver/README.md index d782070a77..bb5c323df6 100644 --- a/examples/webserver/README.md +++ b/examples/webserver/README.md @@ -43,7 +43,7 @@ If using gpu-vid, you have to run: ``` just async_std example webserver -- http://127.0.0.1:9000 just async_std example webserver -- http://127.0.0.1:9001 -just async_std example orchestrator-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://127.0.0.1:4444 --total_nodes 10 --da_committee_size 5 --fixed_leader_for_gpuvid 1 +just async_std example_fixed_leader orchestrator-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://127.0.0.1:4444 --total_nodes 10 --da_committee_size 5 --fixed_leader_for_gpuvid 1 just async_std example_gpuvid_leader multi-validator-webserver -- 1 http://127.0.0.1:4444 sleep 1m just async_std example_fixed_leader multi-validator-webserver -- 9 http://127.0.0.1:4444 @@ -56,7 +56,7 @@ If you don't have a gpu but want to test out fixed leader, you can run: ``` just async_std example webserver -- http://127.0.0.1:9000 just async_std example webserver -- http://127.0.0.1:9001 -just async_std example orchestrator-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://127.0.0.1:4444 --total_nodes 10 --da_committee_size 5 --transactions_per_round 1 --transaction_size 512 --rounds 10 --fixed_leader_for_gpuvid 2 --webserver_url http://127.0.0.1:9000 --da_webserver_url http://127.0.0.1:9001 +just async_std example_fixed_leader orchestrator-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://127.0.0.1:4444 --total_nodes 10 --da_committee_size 5 --transactions_per_round 1 --transaction_size 512 --rounds 10 --fixed_leader_for_gpuvid 2 --webserver_url http://127.0.0.1:9000 --da_webserver_url http://127.0.0.1:9001 just async_std example_fixed_leader multi-validator-webserver -- 2 http://127.0.0.1:4444 sleep 1m just async_std example_fixed_leader multi-validator-webserver -- 8 http://127.0.0.1:4444 diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 1aa4d967ca..2d4024d34a 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -74,8 +74,13 @@ where /// Only get leader in fixed set /// Index the fixed vector (first fixed_leader_for_gpuvid element) of public keys with the current view number fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { + if self.fixed_leader_for_gpuvid <= 0 + || self.fixed_leader_for_gpuvid > self.all_nodes_with_stake.len() + { + panic!("fixed_leader_for_gpuvid is not set correctly."); + } let index = usize::try_from(*view_number % self.fixed_leader_for_gpuvid as u64).unwrap(); - let res = self.nodes_with_stake[index].clone(); + let res = self.all_nodes_with_stake[index].clone(); TYPES::SignatureKey::get_public_key(&res) } @@ -85,7 +90,7 @@ where let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); let randomized_view_number: usize = rng.gen(); let index = randomized_view_number % self.nodes_with_stake.len(); - let res = self.nodes_with_stake[index].clone(); + let res = self.all_nodes_with_stake[index].clone(); TYPES::SignatureKey::get_public_key(&res) } diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 532a2e09f1..202a70241e 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -3,6 +3,10 @@ name = "hotshot-orchestrator" version = { workspace = true } edition = { workspace = true } +[features] +randomized-leader-election = [] +fixed-leader-election = [] + [dependencies] async-compatibility-layer = { workspace = true } async-lock = { workspace = true } diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index e543d1ef0d..0f210cdb66 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -1,4 +1,4 @@ -rounds = 100 +rounds = 10 transactions_per_round = 10 transaction_size = 1000 node_index = 0 @@ -48,7 +48,7 @@ start_threshold = [ ] staked_committee_nodes = 10 non_staked_committee_nodes = 0 -fixed_leader_for_gpuvid = 0 +fixed_leader_for_gpuvid = 1 next_view_timeout = 30000 timeout_ratio = [ 11, diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 988cb22a27..b44fde363d 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -80,7 +80,8 @@ pub struct BenchResultsDownloadConfig { pub transaction_size: u64, /// The number of rounds pub rounds: usize, - + /// The type of leader election: static, fixed, random + pub leader_election_type: String, // Results starting here /// The average latency of the transactions pub avg_latency_in_sec: i64, diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 0856b09716..568179bcb5 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -507,6 +507,8 @@ pub struct NetworkConfigFile { pub random_builder: Option, } +impl NetworkConfigFile {} + impl From> for NetworkConfig { fn from(val: NetworkConfigFile) -> Self { NetworkConfig { @@ -742,7 +744,7 @@ impl Default for HotShotConfigFile { staked_committee_nodes, known_da_nodes, non_staked_committee_nodes: 0, - fixed_leader_for_gpuvid: 0, + fixed_leader_for_gpuvid: 1, next_view_timeout: 10000, view_sync_timeout: Duration::from_millis(1000), timeout_ratio: (11, 10), diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 69d434947f..9c6670d9ef 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -100,6 +100,27 @@ impl OrchestratorState { } } + /// get election type in use + #[must_use] + pub fn get_election_type() -> String { + // leader is chosen in index order + #[cfg(not(any( + feature = "randomized-leader-election", + feature = "fixed-leader-election" + )))] + let election_type = "static-leader-selection".to_string(); + + // leader is from a fixed set + #[cfg(feature = "fixed-leader-election")] + let election_type = "fixed-leader-election".to_string(); + + // leader is randomly chosen + #[cfg(feature = "randomized-leader-election")] + let election_type = "randomized-leader-election".to_string(); + + election_type + } + /// Output the results to a csv file according to orchestrator state pub fn output_to_csv(&self) { let output_csv = BenchResultsDownloadConfig { @@ -109,6 +130,7 @@ impl OrchestratorState { transactions_per_round: self.config.transactions_per_round, transaction_size: self.bench_results.transaction_size_in_bytes, rounds: self.config.rounds, + leader_election_type: OrchestratorState::::get_election_type(), avg_latency_in_sec: self.bench_results.avg_latency_in_sec, minimum_latency_in_sec: self.bench_results.minimum_latency_in_sec, maximum_latency_in_sec: self.bench_results.maximum_latency_in_sec, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 26ebc85922..a5c175485c 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -296,7 +296,7 @@ impl TestDescription { my_own_validator_config, da_staked_committee_size, da_non_staked_committee_size, - fixed_leader_for_gpuvid: 0, + fixed_leader_for_gpuvid: 1, next_view_timeout: 500, view_sync_timeout: Duration::from_millis(250), timeout_ratio: (11, 10), From ede0a9592f3569792fb6867a2dacd17e6b2c0871 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 29 Apr 2024 18:42:27 -0400 Subject: [PATCH 0997/1393] Add `all` predicate (#3070) --- .../src/traits/election/static_committee.rs | 3 +- task-impls/src/consensus/mod.rs | 25 +++--- task-impls/src/consensus/proposal_helpers.rs | 23 ++--- task-impls/src/quorum_proposal.rs | 1 - task-impls/src/quorum_proposal_recv.rs | 13 +-- task-impls/src/quorum_vote.rs | 9 +- testing/src/predicates/event.rs | 86 ++++++++++++++++++- testing/tests/tests_1/proposal_ordering.rs | 31 +++---- 8 files changed, 137 insertions(+), 54 deletions(-) diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 2d4024d34a..9195cbf7f7 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,3 +1,5 @@ +use std::{marker::PhantomData, num::NonZeroU64}; + use ethereum_types::U256; // use ark_bls12_381::Parameters as Param381; use hotshot_types::traits::signature_key::StakeTableEntryType; @@ -8,7 +10,6 @@ use hotshot_types::{ }; #[cfg(feature = "randomized-leader-election")] use rand::{rngs::StdRng, Rng}; -use std::{marker::PhantomData, num::NonZeroU64}; use tracing::debug; /// Dummy implementation of [`Membership`] diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index f073f3f0e7..d7f96a197e 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -1,15 +1,5 @@ use std::{collections::BTreeMap, sync::Arc}; -#[cfg(not(feature = "dependency-tasks"))] -use crate::consensus::proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}; -use crate::{ - consensus::view_change::update_view, - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, - }, -}; #[cfg(not(feature = "dependency-tasks"))] use anyhow::Result; use async_broadcast::Sender; @@ -20,6 +10,8 @@ use committable::Committable; use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; #[cfg(not(feature = "dependency-tasks"))] +use hotshot_types::data::VidDisperseShare; +#[cfg(not(feature = "dependency-tasks"))] use hotshot_types::message::Proposal; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus}, @@ -39,9 +31,6 @@ use hotshot_types::{ vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; - -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::data::VidDisperseShare; use jf_primitives::vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; @@ -50,6 +39,16 @@ use vbs::version::Version; #[cfg(not(feature = "dependency-tasks"))] use self::proposal_helpers::handle_quorum_proposal_validated; +#[cfg(not(feature = "dependency-tasks"))] +use crate::consensus::proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}; +use crate::{ + consensus::view_change::update_view, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, + vote_collection::{ + create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, + }, +}; /// Helper functions to handle proposal-related functionality. pub(crate) mod proposal_helpers; diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index 3633795bfd..48979b5986 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -1,14 +1,3 @@ -#[cfg(not(feature = "dependency-tasks"))] -use super::ConsensusTaskState; -#[cfg(feature = "dependency-tasks")] -use crate::quorum_proposal::QuorumProposalTaskState; -#[cfg(feature = "dependency-tasks")] -use crate::quorum_proposal_recv::QuorumProposalRecvTaskState; -use crate::{ - consensus::update_view, - events::HotShotEvent, - helpers::{broadcast_event, AnyhowTracing}, -}; use core::time::Duration; use std::{ collections::{HashMap, HashSet}, @@ -48,6 +37,18 @@ use hotshot_types::{ use tokio::task::JoinHandle; use tracing::{debug, error, info, warn}; +#[cfg(not(feature = "dependency-tasks"))] +use super::ConsensusTaskState; +#[cfg(feature = "dependency-tasks")] +use crate::quorum_proposal::QuorumProposalTaskState; +#[cfg(feature = "dependency-tasks")] +use crate::quorum_proposal_recv::QuorumProposalRecvTaskState; +use crate::{ + consensus::update_view, + events::HotShotEvent, + helpers::{broadcast_event, AnyhowTracing}, +}; + /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. #[allow(clippy::too_many_arguments)] diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 9d4650616a..c966245c49 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -33,7 +33,6 @@ use tracing::{debug, error, info, instrument, warn}; #[cfg(feature = "dependency-tasks")] use crate::consensus::proposal_helpers::handle_quorum_proposal_validated; - use crate::{ events::HotShotEvent, helpers::{broadcast_event, cancel_task}, diff --git a/task-impls/src/quorum_proposal_recv.rs b/task-impls/src/quorum_proposal_recv.rs index 8d96c63c39..be1d00c1aa 100644 --- a/task-impls/src/quorum_proposal_recv.rs +++ b/task-impls/src/quorum_proposal_recv.rs @@ -1,17 +1,12 @@ #![allow(unused_imports)] -use futures::future::join_all; use std::{collections::BTreeMap, sync::Arc}; -use crate::{ - consensus::proposal_helpers::{get_parent_leaf_and_state, handle_quorum_proposal_recv}, - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; use async_broadcast::Sender; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; +use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus}, @@ -28,6 +23,12 @@ use hotshot_types::{ use tokio::task::JoinHandle; use tracing::{debug, error, instrument, warn}; +use crate::{ + consensus::proposal_helpers::{get_parent_leaf_and_state, handle_quorum_proposal_recv}, + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; + /// The state for the quorum proposal task. Contains all of the information for /// handling [`HotShotEvent::QuorumProposalRecv`] events. pub struct QuorumProposalRecvTaskState> { diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 81c5f98d30..f4d9611791 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -1,9 +1,5 @@ use std::{collections::HashMap, sync::Arc}; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -36,6 +32,11 @@ use jf_primitives::vid::VidScheme; use tokio::task::JoinHandle; use tracing::{debug, error, instrument, warn}; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; + /// Vote dependency types. #[derive(Debug, PartialEq)] enum VoteDependency { diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index 8ef401c95e..e044c18581 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -1,5 +1,6 @@ -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; +use async_lock::RwLock; use async_trait::async_trait; use hotshot_task_impls::events::{HotShotEvent, HotShotEvent::*}; use hotshot_types::{ @@ -25,6 +26,89 @@ impl std::fmt::Debug for EventPredicate { } } +#[allow(clippy::type_complexity)] +pub struct TestPredicate { + pub function: Arc PredicateResult + Send + Sync>>, + pub info: String, +} + +impl std::fmt::Debug for TestPredicate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.info) + } +} + +#[async_trait] +impl Predicate for TestPredicate +where + INPUT: Send + Sync, +{ + async fn evaluate(&self, input: &INPUT) -> PredicateResult { + let mut function = self.function.write().await; + function(input) + } + + async fn info(&self) -> String { + self.info.clone() + } +} + +pub fn all(events: Vec>) -> Box>>> +where + TYPES: NodeType, +{ + let info = format!("{:?}", events); + let mut set: HashSet<_> = events.into_iter().collect(); + + let function = move |e: &Arc>| match set.take(e.as_ref()) { + Some(_) => { + if set.is_empty() { + PredicateResult::Pass + } else { + PredicateResult::Incomplete + } + } + None => PredicateResult::Fail, + }; + + Box::new(TestPredicate { + function: Arc::new(RwLock::new(function)), + info, + }) +} + +pub fn all_predicates( + predicates: Vec>>, +) -> Box>>> { + let info = format!("{:?}", predicates); + + let mut unsatisfied: Vec<_> = predicates.into_iter().map(Arc::new).collect(); + + let function = move |e: &Arc>| { + if !unsatisfied + .clone() + .into_iter() + .map(|pred| (pred.check)(e.clone())) + .any(|val| val) + { + return PredicateResult::Fail; + } + + unsatisfied.retain(|pred| !(pred.check)(e.clone())); + + if unsatisfied.is_empty() { + PredicateResult::Pass + } else { + PredicateResult::Incomplete + } + }; + + Box::new(TestPredicate { + function: Arc::new(RwLock::new(function)), + info, + }) +} + #[async_trait] impl Predicate>> for EventPredicate where diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index a5a53fd0f3..e174603076 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -1,17 +1,14 @@ -use hotshot::{tasks::task_state::CreateTaskState}; - use std::sync::Arc; - -use hotshot_example_types::state_types::TestInstanceState; - +use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ block_types::TestMetadata, node_types::{MemoryImpl, TestTypes}, + state_types::TestInstanceState, }; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ - predicates::event::{exact, quorum_proposal_send, quorum_proposal_validated}, + predicates::event::{all_predicates, exact, quorum_proposal_send, quorum_proposal_validated}, task_helpers::{get_vid_share, vid_scheme_from_view_number}, test_helpers::permute_input_with_index_order, view_generator::TestViewGenerator, @@ -40,7 +37,6 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(node_id)); @@ -74,8 +70,10 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), + all_predicates(vec![ + quorum_proposal_validated(), + exact(QuorumVoteSend(votes[0].clone())), + ]), ], asserts: vec![], }; @@ -91,7 +89,11 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { builder_commitment, TestMetadata, ViewNumber::new(node_id), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), ]; @@ -102,8 +104,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { inputs: view_2_inputs, outputs: vec![ exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_validated(), - quorum_proposal_send(), + all_predicates(vec![quorum_proposal_validated(), quorum_proposal_send()]), ], // We should end on view 2. asserts: vec![], @@ -111,11 +112,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let script = vec![view_1, view_2]; - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - >::create_from(&handle) - .await; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; run_test_script(script, consensus_state).await; } From 865373d45804580fceee64e9f931b4f248317e46 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 29 Apr 2024 20:50:11 -0400 Subject: [PATCH 0998/1393] Don't return empty blocks from simple builder (#3069) * Don't return empty blocks from simple builder * Decrease builder timeout to make views go faster in tests --- testing/src/block_builder.rs | 10 ++++++++++ testing/src/test_builder.rs | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 180843b35b..1d07fd494c 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -330,6 +330,16 @@ impl BuilderDataSource for SimpleBuilderSource { }) }) .await; + + if transactions.is_empty() { + // We don't want to return an empty block, as we will end up driving consensus to + // produce empty blocks extremely quickly. Instead, we return no blocks, so that + // consensus will keep asking for blocks until either we have something non-trivial to + // propose, or a timeout, in which case consensus will finally propose an empty block + // anyways. + return Ok(vec![]); + } + let (metadata, payload, header_input) = build_block( transactions, self.num_storage_nodes, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index a5c175485c..5ebdb87b34 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -80,7 +80,7 @@ impl Default for TimingData { timeout_ratio: (11, 10), round_start_delay: 100, start_delay: 100, - builder_timeout: Duration::from_millis(1000), + builder_timeout: Duration::from_millis(500), data_request_delay: Duration::from_millis(200), secondary_network_delay: Duration::from_millis(1000), view_sync_timeout: Duration::from_millis(2000), From 6dbeda15bd65ebf4f17f8bc6310c872854d13c56 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 1 May 2024 09:41:32 -0400 Subject: [PATCH 0999/1393] Update the CDN (#3073) * update the CDN * add sleep for smaller broker --- .../src/traits/networking/push_cdn_network.rs | 30 +++++++++++++++++-- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index c153c1c56a..3b5c7c1899 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -4,6 +4,7 @@ use std::{collections::BTreeSet, marker::PhantomData}; #[cfg(feature = "hotshot-testing")] use std::{path::Path, sync::Arc, time::Duration}; +use async_compatibility_layer::art::async_sleep; #[cfg(feature = "hotshot-testing")] use async_compatibility_layer::art::async_spawn; use async_compatibility_layer::channel::UnboundedSendError; @@ -262,15 +263,32 @@ impl TestableNetworkingImplementation for PushCdnNetwork .to_string_lossy() .into_owned(); + // Pick some unused public ports + let public_address_1 = format!( + "127.0.0.1:{}", + portpicker::pick_unused_port().expect("could not find an open port") + ); + let public_address_2 = format!( + "127.0.0.1:{}", + portpicker::pick_unused_port().expect("could not find an open port") + ); + // 2 brokers - for _ in 0..2 { + for i in 0..2 { // Get the ports to bind to let private_port = portpicker::pick_unused_port().expect("could not find an open port"); - let public_port = portpicker::pick_unused_port().expect("could not find an open port"); // Extrapolate addresses let private_address = format!("127.0.0.1:{private_port}"); - let public_address = format!("127.0.0.1:{public_port}"); + let (public_address, other_public_address) = if i == 0 { + (public_address_1.clone(), public_address_2.clone()) + } else { + (public_address_2.clone(), public_address_1.clone()) + }; + + // Calculate the broker identifiers + let broker_identifier = format!("{public_address}/{public_address}"); + let other_broker_identifier = format!("{other_public_address}/{other_public_address}"); // Configure the broker let config: BrokerConfig> = BrokerConfig { @@ -293,6 +311,12 @@ impl TestableNetworkingImplementation for PushCdnNetwork let broker: Broker> = Broker::new(config).await.expect("broker failed to start"); + // If we are the first broker by identifier, we need to sleep a bit + // for discovery to happen first + if other_broker_identifier > broker_identifier { + async_sleep(Duration::from_secs(2)).await; + } + // Error if we stopped unexpectedly if let Err(err) = broker.start().await { error!("broker stopped: {err}"); From 085bdb6f7462a73751433e21076687fe9bb0828c Mon Sep 17 00:00:00 2001 From: Himanshu Goyal Date: Wed, 1 May 2024 11:24:33 -0400 Subject: [PATCH 1000/1393] include `view_number` inside api signatures: (#3076) * include parent_hash inside api signatures: * fix test * replace parent_vid_commitment with view_number --- builder-api/api/builder.toml | 11 ++++++++--- builder-api/api/submit.toml | 6 +++++- builder-api/src/builder.rs | 17 ++++++++++------- builder-api/src/data_source.rs | 18 ++++++++++++------ task-impls/src/builder.rs | 9 ++++++--- task-impls/src/transactions.rs | 25 +++++++++++++++++-------- testing/src/block_builder.rs | 6 ++++++ testing/tests/tests_1/block_builder.rs | 22 +++++++++++++++++++--- 8 files changed, 83 insertions(+), 31 deletions(-) diff --git a/builder-api/api/builder.toml b/builder-api/api/builder.toml index 6716742d17..8a2b6fe152 100644 --- a/builder-api/api/builder.toml +++ b/builder-api/api/builder.toml @@ -27,8 +27,9 @@ DESCRIPTION = "" FORMAT_VERSION = "0.1.0" [route.available_blocks] -PATH = ["availableblocks/:parent_hash/:sender/:signature"] +PATH = ["availableblocks/:parent_hash/:view_number/:sender/:signature"] ":parent_hash" = "TaggedBase64" +":view_number" = "Integer" ":sender" = "TaggedBase64" ":signature" = "TaggedBase64" DOC = """ @@ -47,8 +48,9 @@ Returns """ [route.claim_block] -PATH = ["claimblock/:block_hash/:sender/:signature"] +PATH = ["claimblock/:block_hash/:view_number/:sender/:signature"] ":block_hash" = "TaggedBase64" +":view_number" = "Integer" ":sender" = "TaggedBase64" ":signature" = "TaggedBase64" DOC = """ @@ -58,8 +60,9 @@ Returns application-specific encoded transactions type """ [route.claim_header_input] -PATH = ["claimheaderinput/:block_hash/:sender/:signature"] +PATH = ["claimheaderinput/:block_hash/:view_number/:sender/:signature"] ":block_hash" = "TaggedBase64" +":view_number" = "Integer" ":sender" = "TaggedBase64" ":signature" = "TaggedBase64" DOC = """ @@ -72,4 +75,6 @@ Returns application-specific block header type PATH = ["builderaddress"] DOC = """ Get the builder address. + +Returns the builder's public key """ diff --git a/builder-api/api/submit.toml b/builder-api/api/submit.toml index a9d1db4b46..5d4354aa43 100644 --- a/builder-api/api/submit.toml +++ b/builder-api/api/submit.toml @@ -30,4 +30,8 @@ FORMAT_VERSION = "0.1.0" [route.submit_txn] PATH = ["/submit"] METHOD = "POST" -DOC = "Submit a transaction to builder's private mempool." +DOC = """ +Submit a transaction to the Builder + +Returns transaction hash +""" diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs index 96898a3ad1..14cef81b3e 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/builder.rs @@ -144,10 +144,11 @@ where .get("available_blocks", |req, state| { async move { let hash = req.blob_param("parent_hash")?; + let view_number = req.integer_param("view_number")?; let signature = try_extract_param(&req, "signature")?; let sender = try_extract_param(&req, "sender")?; state - .get_available_blocks(&hash, sender, &signature) + .get_available_blocks(&hash, view_number, sender, &signature) .await .context(BlockAvailableSnafu { resource: hash.to_string(), @@ -157,28 +158,30 @@ where })? .get("claim_block", |req, state| { async move { - let hash: BuilderCommitment = req.blob_param("block_hash")?; + let block_hash: BuilderCommitment = req.blob_param("block_hash")?; + let view_number = req.integer_param("view_number")?; let signature = try_extract_param(&req, "signature")?; let sender = try_extract_param(&req, "sender")?; state - .claim_block(&hash, sender, &signature) + .claim_block(&block_hash, view_number, sender, &signature) .await .context(BlockClaimSnafu { - resource: hash.to_string(), + resource: block_hash.to_string(), }) } .boxed() })? .get("claim_header_input", |req, state| { async move { - let hash: BuilderCommitment = req.blob_param("block_hash")?; + let block_hash: BuilderCommitment = req.blob_param("block_hash")?; + let view_number = req.integer_param("view_number")?; let signature = try_extract_param(&req, "signature")?; let sender = try_extract_param(&req, "sender")?; state - .claim_block_header_input(&hash, sender, &signature) + .claim_block_header_input(&block_hash, view_number, sender, &signature) .await .context(BlockClaimSnafu { - resource: hash.to_string(), + resource: block_hash.to_string(), }) } .boxed() diff --git a/builder-api/src/data_source.rs b/builder-api/src/data_source.rs index 90641e1358..57e8a44663 100644 --- a/builder-api/src/data_source.rs +++ b/builder-api/src/data_source.rs @@ -1,21 +1,22 @@ +use crate::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::BuildError, +}; use async_trait::async_trait; +use committable::Commitment; use hotshot_types::{ traits::{node_implementation::NodeType, signature_key::SignatureKey}, utils::BuilderCommitment, vid::VidCommitment, }; -use crate::{ - block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, - builder::BuildError, -}; - #[async_trait] pub trait BuilderDataSource { /// To get the list of available blocks async fn get_available_blocks( &self, for_parent: &VidCommitment, + view_number: u64, sender: TYPES::SignatureKey, signature: &::PureAssembledSignatureType, ) -> Result>, BuildError>; @@ -24,6 +25,7 @@ pub trait BuilderDataSource { async fn claim_block( &self, block_hash: &BuilderCommitment, + view_number: u64, sender: TYPES::SignatureKey, signature: &::PureAssembledSignatureType, ) -> Result, BuildError>; @@ -32,6 +34,7 @@ pub trait BuilderDataSource { async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, + view_number: u64, sender: TYPES::SignatureKey, signature: &::PureAssembledSignatureType, ) -> Result, BuildError>; @@ -45,5 +48,8 @@ pub trait AcceptsTxnSubmits where I: NodeType, { - async fn submit_txn(&mut self, txn: ::Transaction) -> Result<(), BuildError>; + async fn submit_txn( + &mut self, + txn: ::Transaction, + ) -> Result::Transaction>, BuildError>; } diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 59ef040b12..f1530593c6 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -107,13 +107,14 @@ impl BuilderClient { pub async fn get_available_blocks( &self, parent: VidCommitment, + view_number: u64, sender: TYPES::SignatureKey, signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result>, BuilderClientError> { let encoded_signature: TaggedBase64 = signature.clone().into(); self.inner .get(&format!( - "availableblocks/{parent}/{sender}/{encoded_signature}" + "availableblocks/{parent}/{view_number}/{sender}/{encoded_signature}" )) .send() .await @@ -128,13 +129,14 @@ impl BuilderClient { pub async fn claim_block( &self, block_hash: BuilderCommitment, + view_number: u64, sender: TYPES::SignatureKey, signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result, BuilderClientError> { let encoded_signature: TaggedBase64 = signature.clone().into(); self.inner .get(&format!( - "claimblock/{block_hash}/{sender}/{encoded_signature}" + "claimblock/{block_hash}/{view_number}/{sender}/{encoded_signature}" )) .send() .await @@ -149,13 +151,14 @@ impl BuilderClient { pub async fn claim_block_header_input( &self, block_hash: BuilderCommitment, + view_number: u64, sender: TYPES::SignatureKey, signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result, BuilderClientError> { let encoded_signature: TaggedBase64 = signature.clone().into(); self.inner .get(&format!( - "claimheaderinput/{block_hash}/{sender}/{encoded_signature}" + "claimheaderinput/{block_hash}/{view_number}/{sender}/{encoded_signature}" )) .send() .await diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4e768ff730..5c38b4a014 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -214,7 +214,7 @@ impl< } /// Get last known builder commitment from consensus. - async fn latest_known_vid_commitment(&self) -> VidCommitment { + async fn latest_known_vid_commitment(&self) -> (TYPES::Time, VidCommitment) { let consensus = self.consensus.read().await; let mut prev_view = TYPES::Time::new(self.cur_view.saturating_sub(1)); @@ -235,13 +235,16 @@ impl< ViewInner::Failed => None, }) { - return commitment; + return (prev_view, commitment); } prev_view = prev_view - 1; } // If not found, return commitment for last decided block - consensus.get_decided_leaf().get_payload_commitment() + ( + prev_view, + consensus.get_decided_leaf().get_payload_commitment(), + ) } #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "wait_for_block", level = "error")] @@ -249,7 +252,7 @@ impl< let task_start_time = Instant::now(); // Find commitment to the block we want to build upon - let parent_comm = self.latest_known_vid_commitment().await; + let (view_num, parent_comm) = self.latest_known_vid_commitment().await; let parent_comm_sig = match <::SignatureKey as SignatureKey>::sign( &self.private_key, parent_comm.as_ref(), @@ -266,7 +269,7 @@ impl< self.api .builder_timeout() .saturating_sub(task_start_time.elapsed()), - self.get_block_from_builder(parent_comm, &parent_comm_sig), + self.get_block_from_builder(parent_comm, view_num, &parent_comm_sig), ) .await { @@ -299,11 +302,17 @@ impl< async fn get_block_from_builder( &self, parent_comm: VidCommitment, + view_number: TYPES::Time, parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> anyhow::Result> { let available_blocks = self .builder_client - .get_available_blocks(parent_comm, self.public_key.clone(), parent_comm_sig) + .get_available_blocks( + parent_comm, + view_number.get_u64(), + self.public_key.clone(), + parent_comm_sig, + ) .await .context("getting available blocks")?; tracing::debug!("Got available blocks: {available_blocks:?}"); @@ -341,8 +350,8 @@ impl< .context("signing block hash")?; let (block, header_input) = futures::join! { - self.builder_client.claim_block(block_info.block_hash.clone(), self.public_key.clone(), &request_signature), - self.builder_client.claim_block_header_input(block_info.block_hash.clone(), self.public_key.clone(), &request_signature) + self.builder_client.claim_block(block_info.block_hash.clone(), view_number.get_u64(), self.public_key.clone(), &request_signature), + self.builder_client.claim_block_header_input(block_info.block_hash.clone(), view_number.get_u64(), self.public_key.clone(), &request_signature) }; let block_data = block.context("claiming block data")?; diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 1d07fd494c..84e6efbf19 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -202,6 +202,7 @@ impl BuilderDataSource for RandomBuilderSource { async fn get_available_blocks( &self, _for_parent: &VidCommitment, + _view_number: u64, _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result>, BuildError> { @@ -218,6 +219,7 @@ impl BuilderDataSource for RandomBuilderSource { async fn claim_block( &self, block_hash: &BuilderCommitment, + _view_number: u64, _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result, BuildError> { @@ -234,6 +236,7 @@ impl BuilderDataSource for RandomBuilderSource { async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, + _view_number: u64, _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result, BuildError> { @@ -308,6 +311,7 @@ impl BuilderDataSource for SimpleBuilderSource { async fn get_available_blocks( &self, _for_parent: &VidCommitment, + _view_number: u64, _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result>, BuildError> { @@ -363,6 +367,7 @@ impl BuilderDataSource for SimpleBuilderSource { async fn claim_block( &self, block_hash: &BuilderCommitment, + _view_number: u64, _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result, BuildError> { @@ -391,6 +396,7 @@ impl BuilderDataSource for SimpleBuilderSource { async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, + _view_number: u64, _sender: TYPES::SignatureKey, _signature: &::PureAssembledSignatureType, ) -> Result, BuildError> { diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 02602318c7..444c0977cc 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -49,11 +49,17 @@ async fn test_random_block_builder() { ::SignatureKey::generated_from_seed_indexed([0_u8; 32], 0); let signature = ::SignatureKey::sign(&private_key, &[0_u8; 32]) .expect("Failed to create dummy signature"); + let dummy_view_number = 0u64; let mut blocks = loop { // Test getting blocks let blocks = client - .get_available_blocks(vid_commitment(&[], 1), pub_key, &signature) + .get_available_blocks( + vid_commitment(&[], 1), + dummy_view_number, + pub_key, + &signature, + ) .await .expect("Failed to get available blocks"); @@ -70,7 +76,12 @@ async fn test_random_block_builder() { }; let _: AvailableBlockData = client - .claim_block(blocks.pop().unwrap().block_hash, pub_key, &signature) + .claim_block( + blocks.pop().unwrap().block_hash, + dummy_view_number, + pub_key, + &signature, + ) .await .expect("Failed to claim block"); @@ -80,7 +91,12 @@ async fn test_random_block_builder() { } .builder_commitment(&TestMetadata); let result = client - .claim_block(commitment_for_non_existent_block, pub_key, &signature) + .claim_block( + commitment_for_non_existent_block, + dummy_view_number, + pub_key, + &signature, + ) .await; assert!(matches!(result, Err(BuilderClientError::NotFound))); } From f8c36729cc7050902c7e0889db2291e7da08715b Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 2 May 2024 08:18:39 -0400 Subject: [PATCH 1001/1393] [CX_CLEANUP] - Add Testing for QuorumProposalRecv Task (#3052) * partial commit * add propose now * initiate vote now event * support now types * fix lint * one more fix * make linter happy * tmp commit * finish up testing * rename * allow large enum variations * fix comments * fix import * swapping branches * Fix some comments * fix build * separate update view and move consensus to a split package * unused import * finish moving files * improve docs * revert * merge lower branch target * update lockfile * paring down proposal recv method * moving functions over * complete refactor * fix lint * fix build and lint * Add proposal event handling, rename file * Fix too many args and lines lints * new method for proposing * move feature gate * tmp revert * fix test failures * Fix build after merge * update implementation to integrate * remove dead code and too many args comments * commit broken option for mutli type inputs * working on getting parent, switching branches * Fix an import * Fix lints * Fix lints for dependency-task feature except ConsensusApi errors * Restore justfile * Restore change to ConsensusApi * build vote now * fix build * fix build * gating more features, remove recv code from qp * lint fail, fix build * move functionality over * roll back inline gating * fix lints * remove dead variable * docs * cancel tasks * separate out consensus types, remove todo log * fix build * rename * demote * demote * new workflow, remove noisy file * add create_from * fix compiler error * update test * demote and do not dump trace * remove gate * fix name lint * passing test * fix lints * fix build lints off feature branch * remove handling of QuorumProposalRecv from vote dependency task * clippy * fix tests * fix build, add justfile command * remove superfluous log * debugging tests * fix errneous view number * fix lints * check build * actually spawn task for CI test * tracking down proposal halting * remove the need to modify state tree for view 2 now that genesis behaves normally * incorporate keyaos PR * reduce the noise of a lot of logs, fix a couple off by one errors, debugging lack of peoposal * fix build * fix some more off by ones * incorporate feedback * merge main * fix merge conflict breakage * fix libp2p failing test and halting issues * fix all build and lint errors * actually fix lint * re-enable failure test * remove noisy logs, re-add failure test * lints * docs, cleanup, constants * last fixes * gate tests with excluded behavior * remove failing jobs * remove problematic view change handler --------- Co-authored-by: Keyao Shen --- hotshot/src/lib.rs | 10 +- hotshot/src/tasks/mod.rs | 60 ++++- hotshot/src/tasks/task_state.rs | 39 ++- task-impls/src/consensus/mod.rs | 66 ++--- task-impls/src/consensus/proposal_helpers.rs | 122 +++++---- task-impls/src/consensus/view_change.rs | 14 +- task-impls/src/quorum_proposal.rs | 246 ++++-------------- task-impls/src/quorum_proposal_recv.rs | 18 +- task-impls/src/quorum_vote.rs | 27 +- testing/Cargo.toml | 2 +- testing/tests/tests_1/consensus_task.rs | 11 + testing/tests/tests_1/libp2p.rs | 4 +- testing/tests/tests_1/proposal_ordering.rs | 4 + .../tests_1/quorum_proposal_recv_task.rs | 52 ++++ testing/tests/tests_1/quorum_proposal_task.rs | 129 +++++++-- testing/tests/tests_1/test_with_failures_2.rs | 4 + testing/tests/tests_1/upgrade_task.rs | 72 +++-- types/src/constants.rs | 2 +- 18 files changed, 504 insertions(+), 378 deletions(-) create mode 100644 testing/tests/tests_1/quorum_proposal_recv_task.rs diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 9ff2e20d49..f8b88720d6 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -58,7 +58,7 @@ use tracing::{debug, instrument, trace}; use vbs::version::Version; #[cfg(feature = "dependency-tasks")] -use crate::tasks::{add_quorum_proposal_task, add_quorum_vote_task}; +use crate::tasks::{add_quorum_proposal_recv_task, add_quorum_proposal_task, add_quorum_vote_task}; use crate::{ tasks::{ add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, @@ -697,6 +697,14 @@ impl> SystemContext { &handle, ) .await; + #[cfg(feature = "dependency-tasks")] + add_quorum_proposal_recv_task( + Arc::clone(®istry), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, + ) + .await; handle } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 92eb8aeaf7..70cd33cfa4 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -15,6 +15,7 @@ use hotshot_task_impls::{ events::HotShotEvent, network::{NetworkEventTaskState, NetworkMessageTaskState}, quorum_proposal::QuorumProposalTaskState, + quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, response::{run_response_task, NetworkResponseState, RequestReceiver}, @@ -210,30 +211,52 @@ pub async fn inject_consensus_polls>( quorum_proposal_task_state: &QuorumProposalTaskState, ) { - // Poll (forever) for the latest quorum proposal + // Poll (forever) for the latest view sync certificate quorum_proposal_task_state .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestProposal) + .inject_consensus_info(ConsensusIntentEvent::PollForLatestViewSyncCertificate) .await; +} - // Poll (forever) for the latest view sync certificate - quorum_proposal_task_state +/// Setup polls for the [`QuorumVoteTaskState`]. +pub async fn inject_quorum_vote_polls>( + quorum_vote_task_state: &QuorumVoteTaskState, +) { + // Poll (forever) for the latest quorum proposal + quorum_vote_task_state .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestViewSyncCertificate) + .inject_consensus_info(ConsensusIntentEvent::PollForLatestProposal) .await; // Start polling for proposals for the first view - quorum_proposal_task_state + quorum_vote_task_state .quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForProposal(1)) .await; - quorum_proposal_task_state + quorum_vote_task_state .quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForDAC(1)) .await; } +/// Setup polls for the [`QuorumProposalRecvTaskState`]. +pub async fn inject_quorum_proposal_recv_polls>( + quorum_proposal_recv_task_state: &QuorumProposalRecvTaskState, +) { + // Poll (forever) for the latest quorum proposal + quorum_proposal_recv_task_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForLatestProposal) + .await; + + // Start polling for proposals for the first view + quorum_proposal_recv_task_state + .quorum_network + .inject_consensus_info(ConsensusIntentEvent::PollForProposal(1)) + .await; +} + /// add the consensus task pub async fn add_consensus_task>( task_reg: Arc, @@ -333,7 +356,26 @@ pub async fn add_quorum_vote_task> rx: Receiver>>, handle: &SystemContextHandle, ) { - let quorum_vote_state = QuorumVoteTaskState::create_from(handle).await; - let task = Task::new(tx, rx, Arc::clone(&task_reg), quorum_vote_state); + let quorum_vote_task_state = QuorumVoteTaskState::create_from(handle).await; + inject_quorum_vote_polls(&quorum_vote_task_state).await; + let task = Task::new(tx, rx, Arc::clone(&task_reg), quorum_vote_task_state); + task_reg.run_task(task).await; +} + +/// Add the quorum proposal recv task. +pub async fn add_quorum_proposal_recv_task>( + task_reg: Arc, + tx: Sender>>, + rx: Receiver>>, + handle: &SystemContextHandle, +) { + let quorum_proposal_recv_task_state = QuorumProposalRecvTaskState::create_from(handle).await; + inject_quorum_proposal_recv_polls(&quorum_proposal_recv_task_state).await; + let task = Task::new( + tx, + rx, + Arc::clone(&task_reg), + quorum_proposal_recv_task_state, + ); task_reg.run_task(task).await; } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index b0d9b72618..3fab51de3d 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -7,9 +7,10 @@ use std::{ use async_trait::async_trait; use hotshot_task_impls::{ builder::BuilderClient, consensus::ConsensusTaskState, da::DATaskState, - quorum_proposal::QuorumProposalTaskState, quorum_vote::QuorumVoteTaskState, - request::NetworkRequestState, transactions::TransactionTaskState, upgrade::UpgradeTaskState, - vid::VIDTaskState, view_sync::ViewSyncTaskState, + quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, + quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, + transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VIDTaskState, + view_sync::ViewSyncTaskState, }; use hotshot_types::traits::{ consensus_api::ConsensusApi, @@ -256,7 +257,6 @@ impl> CreateTaskState instance_state: handle.hotshot.get_instance_state(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - cur_view: handle.get_cur_view().await, public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), storage: Arc::clone(&handle.storage), @@ -267,3 +267,34 @@ impl> CreateTaskState } } } + +#[async_trait] +impl> CreateTaskState + for QuorumProposalRecvTaskState +{ + async fn create_from( + handle: &SystemContextHandle, + ) -> QuorumProposalRecvTaskState { + let consensus = handle.hotshot.get_consensus(); + QuorumProposalRecvTaskState { + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + consensus, + cur_view: handle.get_cur_view().await, + quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), + quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + timeout_task: None, + timeout: handle.hotshot.config.next_view_timeout, + round_start_delay: handle.hotshot.config.round_start_delay, + output_event_stream: handle.hotshot.external_event_stream.0.clone(), + storage: Arc::clone(&handle.storage), + formed_upgrade_certificate: None, + proposal_cert: None, + decided_upgrade_cert: None, + spawned_tasks: BTreeMap::new(), + instance_state: handle.hotshot.get_instance_state(), + id: handle.hotshot.id, + } + } +} diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index d7f96a197e..6b37cf44a3 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -2,6 +2,12 @@ use std::{collections::BTreeMap, sync::Arc}; #[cfg(not(feature = "dependency-tasks"))] use anyhow::Result; + +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_types::vid::vid_scheme; +#[cfg(not(feature = "dependency-tasks"))] +use jf_primitives::vid::VidScheme; + use async_broadcast::Sender; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -28,10 +34,9 @@ use hotshot_types::{ signature_key::SignatureKey, storage::Storage, }, - vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; -use jf_primitives::vid::VidScheme; + #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; @@ -42,7 +47,7 @@ use self::proposal_helpers::handle_quorum_proposal_validated; #[cfg(not(feature = "dependency-tasks"))] use crate::consensus::proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}; use crate::{ - consensus::view_change::update_view, + consensus::view_change::{update_view, DONT_SEND_VIEW_CHANGE_EVENT}, events::{HotShotEvent, HotShotTaskCompleted}, helpers::{broadcast_event, cancel_task}, vote_collection::{ @@ -620,7 +625,7 @@ impl> ConsensusTaskState } HotShotEvent::ViewChange(new_view) => { let new_view = *new_view; - debug!("View Change event for view {} in consensus task", *new_view); + tracing::trace!("View Change event for view {} in consensus task", *new_view); let old_view_number = self.cur_view; @@ -669,6 +674,7 @@ impl> ConsensusTaskState Arc::clone(&self.consensus), &mut self.cur_view, &mut self.timeout_task, + DONT_SEND_VIEW_CHANGE_EVENT, ) .await { @@ -746,6 +752,7 @@ impl> ConsensusTaskState let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } + #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, @@ -762,37 +769,32 @@ impl> ConsensusTaskState fee: fee.clone(), block_view: view, }); - #[cfg(not(feature = "dependency-tasks"))] + if self.quorum_membership.get_leader(view) == self.public_key + && self.consensus.read().await.high_qc.get_view_number() + 1 == view { - if self.quorum_membership.get_leader(view) == self.public_key - && self.consensus.read().await.high_qc.get_view_number() + 1 == view - { - if let Err(e) = self.publish_proposal(view, event_stream.clone()).await { - warn!("Failed to propose; error = {e:?}"); - }; - } + if let Err(e) = self.publish_proposal(view, event_stream.clone()).await { + warn!("Failed to propose; error = {e:?}"); + }; + } - if let Some(cert) = &self.proposal_cert { - match cert { - ViewChangeEvidence::Timeout(tc) => { - if self.quorum_membership.get_leader(tc.get_view_number() + 1) - == self.public_key - { - if let Err(e) = self.publish_proposal(view, event_stream).await - { - warn!("Failed to propose; error = {e:?}"); - }; - } + if let Some(cert) = &self.proposal_cert { + match cert { + ViewChangeEvidence::Timeout(tc) => { + if self.quorum_membership.get_leader(tc.get_view_number() + 1) + == self.public_key + { + if let Err(e) = self.publish_proposal(view, event_stream).await { + warn!("Failed to propose; error = {e:?}"); + }; } - ViewChangeEvidence::ViewSync(vsc) => { - if self.quorum_membership.get_leader(vsc.get_view_number()) - == self.public_key - { - if let Err(e) = self.publish_proposal(view, event_stream).await - { - warn!("Failed to propose; error = {e:?}"); - }; - } + } + ViewChangeEvidence::ViewSync(vsc) => { + if self.quorum_membership.get_leader(vsc.get_view_number()) + == self.public_key + { + if let Err(e) = self.publish_proposal(view, event_stream).await { + warn!("Failed to propose; error = {e:?}"); + }; } } } diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index 48979b5986..24fd60f8ac 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -44,7 +44,7 @@ use crate::quorum_proposal::QuorumProposalTaskState; #[cfg(feature = "dependency-tasks")] use crate::quorum_proposal_recv::QuorumProposalRecvTaskState; use crate::{ - consensus::update_view, + consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, events::HotShotEvent, helpers::{broadcast_event, AnyhowTracing}, }; @@ -53,7 +53,7 @@ use crate::{ /// a `QuorumProposalValidated` event. #[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_lines)] -pub async fn validate_proposal_safety_and_liveness( +async fn validate_proposal_safety_and_liveness( proposal: Proposal>, parent_leaf: Leaf, consensus: Arc>>, @@ -248,9 +248,8 @@ pub async fn create_and_send_proposal( _pd: PhantomData, }; debug!( - "Sending null proposal for view {:?} \n {:?}", + "Sending null proposal for view {:?}", proposed_leaf.get_view_number(), - "" ); async_sleep(Duration::from_millis(round_start_delay)).await; @@ -384,7 +383,6 @@ pub async fn get_parent_leaf_and_state( } next_parent_hash = next_parent_leaf.get_parent_commitment(); } - debug!("updated saved leaves"); // TODO do some sort of sanity check on the view number that it matches decided } @@ -660,6 +658,7 @@ pub async fn handle_quorum_proposal_recv consensus_write.locked_view; - - let high_qc = consensus_write.high_qc.clone(); - let locked_view = consensus_write.locked_view; - - drop(consensus_write); - - let mut current_proposal = None; - - if liveness_check { - current_proposal = Some(proposal.data.clone()); - let new_view = proposal.data.view_number + 1; + #[cfg(not(feature = "dependency-tasks"))] + { + let liveness_check = justify_qc.get_view_number() > consensus_write.locked_view; + + let high_qc = consensus_write.high_qc.clone(); + let locked_view = consensus_write.locked_view; + + drop(consensus_write); + + let mut current_proposal = None; + if liveness_check { + current_proposal = Some(proposal.data.clone()); + let new_view = proposal.data.view_number + 1; + + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = task_state.quorum_membership.get_leader(new_view) + == task_state.public_key + && high_qc.view_number == current_proposal.clone().unwrap().view_number; + + let qc = high_qc.clone(); + if should_propose { + debug!( + "Attempting to publish proposal after voting; now in view: {}", + *new_view + ); + let create_and_send_proposal_handle = publish_proposal_if_able( + task_state.cur_view, + qc.view_number + 1, + event_stream, + Arc::clone(&task_state.quorum_membership), + task_state.public_key.clone(), + task_state.private_key.clone(), + Arc::clone(&task_state.consensus), + task_state.round_start_delay, + task_state.formed_upgrade_certificate.clone(), + task_state.decided_upgrade_cert.clone(), + &mut task_state.payload_commitment_and_metadata, + &mut task_state.proposal_cert, + Arc::clone(&task_state.instance_state), + ) + .await?; + + task_state + .spawned_tasks + .entry(view) + .or_default() + .push(create_and_send_proposal_handle); + } + } - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = task_state.quorum_membership.get_leader(new_view) - == task_state.public_key - && high_qc.view_number == current_proposal.clone().unwrap().view_number; + warn!(?high_qc, ?proposal.data, ?locked_view, "Failed liveneess check; cannot find parent either."); - let qc = high_qc.clone(); - if should_propose { - debug!( - "Attempting to publish proposal after voting; now in view: {}", - *new_view - ); - let create_and_send_proposal_handle = publish_proposal_if_able( - task_state.cur_view, - qc.view_number + 1, - event_stream, - Arc::clone(&task_state.quorum_membership), - task_state.public_key.clone(), - task_state.private_key.clone(), - Arc::clone(&task_state.consensus), - task_state.round_start_delay, - task_state.formed_upgrade_certificate.clone(), - task_state.decided_upgrade_cert.clone(), - &mut task_state.payload_commitment_and_metadata, - &mut task_state.proposal_cert, - Arc::clone(&task_state.instance_state), - ) - .await?; - - task_state - .spawned_tasks - .entry(view) - .or_default() - .push(create_and_send_proposal_handle); - } + return Ok(current_proposal); } - warn!(?high_qc, ?proposal.data, ?locked_view, "Failed liveneess check; cannot find parent either."); - return Ok(current_proposal); + #[cfg(feature = "dependency-tasks")] + return Ok(None); }; task_state @@ -1007,8 +1012,17 @@ pub async fn handle_quorum_proposal_validated>( consensus: Arc>>, cur_view: &mut TYPES::Time, timeout_task: &mut Option>, + send_view_change_event: bool, ) -> Result<()> { ensure!( new_view > *cur_view, @@ -86,14 +93,14 @@ pub(crate) async fn update_view>( .await; if quorum_membership.get_leader(next_view) == public_key { - debug!("Polling for quorum votes for view {}", **cur_view); quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForVotes(**cur_view)) .await; } - broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream).await; - + if send_view_change_event { + broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream).await; + } // Spawn a timeout task if we did actually update view *timeout_task = Some(async_spawn({ let stream = event_stream.clone(); @@ -127,6 +134,7 @@ pub(crate) async fn update_view>( } let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; consensus.update_view(new_view); + tracing::trace!("View updated successfully"); Ok(()) } diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index c966245c49..dfc9d223f8 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -1,11 +1,9 @@ -use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; +use std::{collections::HashMap, sync::Arc}; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::async_sleep; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use committable::Committable; use either::Either; use hotshot_task::{ dependency::{AndDependency, EventDependency, OrDependency}, @@ -14,9 +12,7 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus}, - data::{Leaf, QuorumProposal}, event::Event, - message::Proposal, traits::{ block_contents::BlockHeader, election::Membership, @@ -25,17 +21,17 @@ use hotshot_types::{ signature_key::SignatureKey, storage::Storage, }, - vote::{Certificate, HasViewNumber}, + vote::Certificate, }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; +use tracing::{debug, error, instrument, warn}; #[cfg(feature = "dependency-tasks")] use crate::consensus::proposal_helpers::handle_quorum_proposal_validated; use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, + consensus::proposal_helpers::publish_proposal_if_able, events::HotShotEvent, + helpers::cancel_task, }; /// Proposal dependency types. These types represent events that precipitate a proposal. @@ -53,7 +49,7 @@ enum ProposalDependency { /// For the `QCFormed` event timeout branch. TimeoutCert, - /// For the `QuroumProposalRecv` event. + /// For the `QuroumProposalValidated` event after validating `QuorumProposalRecv`. Proposal, /// For the `ProposeNow` event. @@ -62,6 +58,9 @@ enum ProposalDependency { /// Handler for the proposal dependency struct ProposalDependencyHandle { + /// Latest view number that has been proposed for. + latest_proposed_view: TYPES::Time, + /// The view number to propose for. view_number: TYPES::Time, @@ -103,137 +102,6 @@ struct ProposalDependencyHandle { id: u64, } -impl ProposalDependencyHandle { - /// Sends a proposal if possible from the high qc we have - #[allow(clippy::too_many_lines)] - pub async fn publish_proposal_if_able( - &self, - view: TYPES::Time, - event_stream: &Sender>>, - commit_and_metadata: CommitmentAndMetadata, - instance_state: Arc, - ) -> bool { - if self.quorum_membership.get_leader(view) != self.public_key { - // This is expected for view 1, so skipping the logging. - if view != TYPES::Time::new(1) { - error!( - "Somehow we formed a QC but are not the leader for the next view {:?}", - view - ); - } - return false; - } - - let consensus = self.consensus.read().await; - let parent_view_number = &consensus.high_qc.get_view_number(); - let mut reached_decided = false; - - let Some(parent_view) = consensus.validated_state_map.get(parent_view_number) else { - // This should have been added by the replica? - error!("Couldn't find parent view in state map, waiting for replica to see proposal\n parent view number: {}", **parent_view_number); - return false; - }; - // Leaf hash in view inner does not match high qc hash - Why? - let Some((leaf_commitment, state)) = parent_view.get_leaf_and_state() else { - error!( - ?parent_view_number, - ?parent_view, - "Parent of high QC points to a view without a proposal" - ); - return false; - }; - if leaf_commitment != consensus.high_qc.get_data().leaf_commit { - // NOTE: This happens on the genesis block - debug!( - "They don't equal: {:?} {:?}", - leaf_commitment, - consensus.high_qc.get_data().leaf_commit - ); - } - let Some(leaf) = consensus.saved_leaves.get(&leaf_commitment) else { - error!("Failed to find high QC of parent."); - return false; - }; - if leaf.get_view_number() == consensus.last_decided_view { - reached_decided = true; - } - - let parent_leaf = leaf.clone(); - - let original_parent_hash = parent_leaf.commit(); - - let mut next_parent_hash = original_parent_hash; - - // Walk back until we find a decide - if !reached_decided { - debug!( - "We have not reached decide from view {:?}", - self.view_number - ); - while let Some(next_parent_leaf) = consensus.saved_leaves.get(&next_parent_hash) { - if next_parent_leaf.get_view_number() <= consensus.last_decided_view { - break; - } - next_parent_hash = next_parent_leaf.get_parent_commitment(); - } - debug!("updated saved leaves"); - // TODO do some sort of sanity check on the view number that it matches decided - } - - // Special case: if we have a decided upgrade certificate AND it does not apply a version to the current view, we MUST propose with a null block. - let block_header = TYPES::BlockHeader::new( - state, - instance_state.as_ref(), - &parent_leaf, - commit_and_metadata.commitment, - commit_and_metadata.builder_commitment, - commit_and_metadata.metadata, - commit_and_metadata.fee, - ) - .await; - - // TODO: DA cert is sent as part of the proposal here, we should split this out so we don't have to wait for it. - let proposal = QuorumProposal { - block_header: block_header.clone(), - view_number: view, - justify_qc: consensus.high_qc.clone(), - proposal_certificate: None, - upgrade_certificate: None, - }; - - let proposed_leaf = Leaf::from_quorum_proposal(&proposal); - - let Ok(signature) = - TYPES::SignatureKey::sign(&self.private_key, proposed_leaf.commit().as_ref()) - else { - error!("Failed to sign new_leaf.commit()!"); - return false; - }; - - let message = Proposal { - data: proposal, - signature, - _pd: PhantomData, - }; - debug!( - "Sending null proposal for view {:?} \n {:?}", - proposed_leaf.get_view_number(), - "" - ); - - async_sleep(Duration::from_millis(self.round_start_delay)).await; - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalSend( - message.clone(), - self.public_key.clone(), - )), - event_stream, - ) - .await; - true - } -} - impl HandleDepOutput for ProposalDependencyHandle { type Output = Vec>>>>; @@ -263,7 +131,6 @@ impl HandleDepOutput for ProposalDependencyHandle { view, fee, ) => { - debug!("Got commit and meta {:?}", payload_commitment); commit_and_metadata = Some(CommitmentAndMetadata { commitment: *payload_commitment, builder_commitment: builder_commitment.clone(), @@ -309,13 +176,25 @@ impl HandleDepOutput for ProposalDependencyHandle { return; } - self.publish_proposal_if_able( + if let Err(e) = publish_proposal_if_able( + self.latest_proposed_view, self.view_number, - &self.sender, - commit_and_metadata.unwrap(), + self.sender, + self.quorum_membership, + self.public_key, + self.private_key, + Arc::clone(&self.consensus), + self.round_start_delay, + None, + None, + &mut commit_and_metadata, + &mut None, Arc::clone(&self.instance_state), ) - .await; + .await + { + error!(?e, "Failed to publish proposal"); + } } } @@ -360,9 +239,6 @@ pub struct QuorumProposalTaskState /// Round start delay from config, in milliseconds. pub round_start_delay: u64, - /// The view number that this node is executing in. - pub cur_view: TYPES::Time, - /// timeout task handle pub timeout_task: Option>, @@ -377,7 +253,7 @@ pub struct QuorumProposalTaskState impl> QuorumProposalTaskState { /// Create an event dependency - #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view, view = *self.cur_view), name = "Quorum proposal create event dependency", level = "error")] + #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Create event dependency", level = "info")] fn create_event_dependency( &self, dependency_type: ProposalDependency, @@ -391,10 +267,6 @@ impl> QuorumProposalTaskState { if let HotShotEvent::QCFormed(either::Left(qc)) = event { - warn!( - "QC View number {} View number {}", - *qc.view_number, *view_number - ); qc.view_number + 1 } else { return false; @@ -448,7 +320,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { propose_now_dependency.mark_as_completed(Arc::clone(&event)); @@ -561,7 +428,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState>>, event: Arc>, ) { - info!("Attempting to make dependency task for event {:?}", event); + // Don't even bother making the task if we are not entitled to propose anyay. + if self.quorum_membership.get_leader(view_number) != self.public_key { + return; + } + + // Don't try to propose twice for the same view. + if view_number <= self.latest_proposed_view { + return; + } + + debug!("Attempting to make dependency task for view {view_number:?} and event {event:?}"); if self.propose_dependencies.get(&view_number).is_some() { debug!("Task already exists"); return; @@ -581,6 +458,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState bool { if *self.latest_proposed_view < *new_view { - info!( - "Updating next proposal view from {} to {} in the quorum proposal task", + debug!( + "Updating latest proposed view from {} to {}", *self.latest_proposed_view, *new_view ); @@ -624,7 +502,7 @@ impl> QuorumProposalTaskState>, @@ -651,7 +529,6 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await { - warn!("Failed to store High QC of QC we formed. Error: {:?}", e); + warn!("Failed to store High QC of QC we formed; error = {:?}", e); } let mut consensus = self.consensus.write().await; @@ -680,7 +557,6 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { let view = *view; - debug!( - "Got payload commitment {:?} for view {view:?}", - payload_commitment - ); self.create_dependency_task_if_new( view, @@ -729,17 +601,16 @@ impl> QuorumProposalTaskState { - let new_view = proposal.view_number + 1; + let new_view = proposal.view_number; + + if !self.update_latest_proposed_view(new_view).await { + tracing::trace!("Failed to update latest proposed view"); + return; + } if let Err(e) = handle_quorum_proposal_validated(proposal, event_sender.clone(), self).await @@ -747,13 +618,8 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { let view = proposal.data.view_number; if !self.update_latest_proposed_view(view).await { - warn!("Failed to update latest proposed view"); + tracing::trace!("Failed to update latest proposed view"); return; } } @@ -784,6 +650,7 @@ impl> TaskState | HotShotEvent::SendPayloadCommitmentAndMetadata(..) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) | HotShotEvent::ProposeNow(..) + | HotShotEvent::QuorumProposalSend(..) | HotShotEvent::Shutdown, ) } @@ -793,7 +660,6 @@ impl> TaskState { let receiver = task.subscribe(); let sender = task.clone_sender(); - tracing::trace!("sender queue len {}", sender.len()); task.state_mut().handle(event, receiver, sender).await; None } diff --git a/task-impls/src/quorum_proposal_recv.rs b/task-impls/src/quorum_proposal_recv.rs index be1d00c1aa..8219aa4d17 100644 --- a/task-impls/src/quorum_proposal_recv.rs +++ b/task-impls/src/quorum_proposal_recv.rs @@ -44,9 +44,6 @@ pub struct QuorumProposalRecvTaskState>, - /// Network for all nodes pub quorum_network: Arc, @@ -121,7 +118,6 @@ impl> QuorumProposalRecvTaskState< if let HotShotEvent::QuorumProposalRecv(proposal, sender) = event.as_ref() { match handle_quorum_proposal_recv(proposal, sender, event_stream.clone(), self).await { Ok(Some(current_proposal)) => { - self.cancel_tasks(proposal.data.get_view_number() + 1).await; // Build the parent leaf since we didn't find it during the proposal check. let parent_leaf = match get_parent_leaf_and_state( self.cur_view, @@ -133,14 +129,15 @@ impl> QuorumProposalRecvTaskState< .await { Ok((parent_leaf, _ /* state */)) => parent_leaf, - Err(e) => { - warn!(?e, "Failed to get parent leaf and state"); + Err(error) => { + warn!(?error, "Failed to get parent leaf and state"); return; } }; - let consensus = self.consensus.read().await; let view = current_proposal.get_view_number(); + self.cancel_tasks(proposal.data.get_view_number()).await; + let consensus = self.consensus.read().await; let Some(vid_shares) = consensus.vid_shares.get(&view) else { debug!( "We have not seen the VID share for this view {:?} yet, so we cannot vote.", @@ -177,7 +174,7 @@ impl> QuorumProposalRecvTaskState< .await; } Ok(None) => { - self.cancel_tasks(proposal.data.get_view_number() + 1).await; + self.cancel_tasks(proposal.data.get_view_number()).await; } Err(e) => warn!(?e, "Failed to propose"), } @@ -191,7 +188,10 @@ impl> TaskState type Event = Arc>; type Output = (); fn filter(&self, event: &Arc>) -> bool { - !matches!(event.as_ref(), HotShotEvent::QuorumProposalRecv(..)) + !matches!( + event.as_ref(), + HotShotEvent::QuorumProposalRecv(..) | HotShotEvent::Shutdown + ) } async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index f4d9611791..352ca9e179 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -30,7 +30,7 @@ use hotshot_types::{ use jf_primitives::vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, instrument, trace, warn}; use crate::{ events::HotShotEvent, @@ -40,7 +40,7 @@ use crate::{ /// Vote dependency types. #[derive(Debug, PartialEq)] enum VoteDependency { - /// For the `QuorumProposalRecv` event. + /// For the `QuroumProposalValidated` event after validating `QuorumProposalRecv`. QuorumProposal, /// For the `DACertificateRecv` event. Dac, @@ -253,7 +253,7 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState { let view = cert.view_number; - debug!("Received DAC for view {}", *view); + trace!("Received DAC for view {}", *view); if view <= self.latest_voted_view { return; } @@ -414,7 +414,7 @@ impl> QuorumVoteTaskState { let view = disperse.data.get_view_number(); - debug!("Received VID share for view {}", *view); + trace!("Received VID share for view {}", *view); if view <= self.latest_voted_view { return; } @@ -487,22 +487,6 @@ impl> QuorumVoteTaskState { - let new_view = *new_view; - debug!( - "View Change event for view {} in quorum vote task", - *new_view - ); - - let old_voted_view = self.latest_voted_view; - - // Start polling for VID disperse for the new view - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForVIDDisperse( - *old_voted_view + 1, - )) - .await; - } _ => {} } } @@ -515,7 +499,6 @@ impl> TaskState for QuorumVoteTask !matches!( event.as_ref(), HotShotEvent::DACertificateRecv(_) - | HotShotEvent::ViewChange(_) | HotShotEvent::VIDShareRecv(..) | HotShotEvent::QuorumVoteDependenciesValidated(_) | HotShotEvent::VoteNow(..) diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 9d19b3b488..63f2a58d43 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -10,7 +10,7 @@ default = [] # NOTE this is used to activate the slow tests we don't wish to run in CI slow-tests = [] gpu-vid = ["hotshot-types/gpu-vid"] -proposal-dependency-tasks = ["hotshot/dependency-tasks"] +dependency-tasks = ["hotshot/dependency-tasks"] [dependencies] automod = "1.0.14" diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 05646e60dd..d673780a7c 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -1,3 +1,6 @@ +// TODO: Remove after integration of dependency-tasks +#![allow(unused_imports)] + use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; use std::sync::Arc; @@ -26,6 +29,7 @@ use jf_primitives::vid::VidScheme; use sha2::Digest; #[cfg(test)] +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { @@ -114,6 +118,7 @@ async fn test_consensus_task() { } #[cfg(test)] +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote() { @@ -173,6 +178,7 @@ async fn test_consensus_vote() { /// Tests the voting behavior by allowing the input to be permuted in any order desired. This /// assures that, no matter what, a vote is indeed sent no matter what order the precipitating /// events occur. The permutation is specified as `input_permutation` and is a vector of indices. +#[cfg(not(feature = "dependency-tasks"))] async fn test_vote_with_specific_order(input_permutation: Vec) { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -238,6 +244,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { } #[cfg(test)] +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote_with_permuted_dac() { @@ -253,6 +260,7 @@ async fn test_consensus_vote_with_permuted_dac() { } #[cfg(test)] +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_finalize_propose() { @@ -391,6 +399,7 @@ async fn test_view_sync_finalize_propose() { } #[cfg(test)] +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Makes sure that, when a valid ViewSyncFinalize certificate is available, the consensus task @@ -488,6 +497,7 @@ async fn test_view_sync_finalize_vote() { } #[cfg(test)] +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Makes sure that, when a valid ViewSyncFinalize certificate is available, the consensus task @@ -595,6 +605,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { } #[cfg(test)] +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_vid_disperse_storage_failure() { diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index 091e92662d..f4fa98636e 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -1,11 +1,12 @@ use std::time::Duration; use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_testing::spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestDescription, TimingData}, }; use tracing::instrument; @@ -42,6 +43,7 @@ async fn libp2p_network() { } /// libp2p network test with failures +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index e174603076..1adde8799b 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -1,3 +1,5 @@ +// TODO: Remove this after integration +#![allow(unused_imports)] use std::sync::Arc; use hotshot::tasks::task_state::CreateTaskState; @@ -23,6 +25,7 @@ use sha2::Digest; /// Runs a basic test where a qualified proposal occurs (i.e. not initiated by the genesis view or node 1). /// This proposal should happen no matter how the `input_permutation` is specified. +#[cfg(not(feature = "dependency-tasks"))] async fn test_ordering_with_specific_order(input_permutation: Vec) { use hotshot_testing::{ script::{run_test_script, TestScriptStage}, @@ -117,6 +120,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { run_test_script(script, consensus_state).await; } +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs new file mode 100644 index 0000000000..5eb00f5543 --- /dev/null +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -0,0 +1,52 @@ +// TODO: Remove after integration +#![allow(unused_imports)] + +use hotshot::tasks::task_state::CreateTaskState; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_task_impls::{ + events::HotShotEvent::*, quorum_proposal_recv::QuorumProposalRecvTaskState, +}; +use hotshot_testing::{ + predicates::event::exact, + script::{run_test_script, TestScriptStage}, + task_helpers::build_system_handle, + view_generator::TestViewGenerator, +}; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; + +#[cfg(test)] +#[cfg(feature = "dependency-tasks")] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_recv_task() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(2) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } + + // Run view 2 and propose. + let view_2 = TestScriptStage { + inputs: vec![QuorumProposalRecv(proposals[1].clone(), leaders[1])], + outputs: vec![exact(ViewChange(ViewNumber::new(2)))], + asserts: vec![], + }; + + let state = QuorumProposalRecvTaskState::::create_from(&handle).await; + run_test_script(vec![view_2], state).await; +} diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 6ff624b12f..a79f002c50 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -3,10 +3,7 @@ use hotshot::tasks::{inject_quorum_proposal_polls, task_state::CreateTaskState}; use hotshot_example_types::state_types::TestInstanceState; use std::sync::Arc; -use hotshot_example_types::{ - node_types::{MemoryImpl, TestTypes}, - state_types::TestValidatedState, -}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{events::HotShotEvent::*, quorum_proposal::QuorumProposalTaskState}; use hotshot_testing::{ predicates::event::quorum_proposal_send, @@ -21,7 +18,7 @@ use hotshot_types::{ election::Membership, node_implementation::{ConsensusTime, NodeType}, }, - utils::{BuilderCommitment, View, ViewInner}, + utils::BuilderCommitment, vid::VidSchemeType, }; use jf_primitives::vid::VidScheme; @@ -38,23 +35,22 @@ fn make_payload_commitment( vid.commit_only(&encoded_transactions).unwrap() } +#[cfg(feature = "dependency-tasks")] #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_quorum_proposal_task_quorum_proposal() { +async fn test_quorum_proposal_task_quorum_proposal_view_1() { use hotshot_example_types::block_types::TestMetadata; use hotshot_types::data::null_block; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - // We need to propose as the leader for view 2, otherwise we get caught up with the special - // case in the genesis view. - let handle = build_system_handle(2).await.0; + let handle = build_system_handle(1).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(1)); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -66,17 +62,79 @@ async fn test_quorum_proposal_task_quorum_proposal() { leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); } + + let cert = proposals[0].data.justify_qc.clone(); + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); + + let view = TestScriptStage { + inputs: vec![ + QCFormed(either::Left(cert.clone())), + SendPayloadCommitmentAndMetadata( + payload_commitment, + builder_commitment, + TestMetadata, + ViewNumber::new(1), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), + ), + ], + outputs: vec![quorum_proposal_send()], + asserts: vec![], + }; + + let quorum_proposal_task_state = + QuorumProposalTaskState::::create_from(&handle).await; + inject_quorum_proposal_polls(&quorum_proposal_task_state).await; + + let script = vec![view]; + run_test_script(script, quorum_proposal_task_state).await; +} + +#[cfg(feature = "dependency-tasks")] +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { + use hotshot_example_types::{block_types::TestMetadata, state_types::TestValidatedState}; + use hotshot_types::{ + data::null_block, + utils::{View, ViewInner}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let node_id = 3; + let handle = build_system_handle(node_id).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut leaves = Vec::new(); + for view in (&mut generator).take(3) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); + } let consensus = handle.get_consensus(); let mut consensus = consensus.write().await; - // `find_parent_leaf_and_state` depends on the existence of prior values in the consensus + // `validate_proposal_safety_and_liveness` depends on the existence of prior values in the consensus // state, but since we do not spin up the consensus task, these values must be manually filled // out. // First, insert a parent view whose leaf commitment will be returned in the lower function // call. consensus.validated_state_map.insert( - ViewNumber::new(1), + ViewNumber::new(2), View { view_inner: ViewInner::Leaf { leaf: leaves[1].get_parent_commitment(), @@ -94,11 +152,11 @@ async fn test_quorum_proposal_task_quorum_proposal() { // Release the write lock before proceeding with the test drop(consensus); - let cert = proposals[1].data.justify_qc.clone(); + + let cert = proposals[2].data.justify_qc.clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - // Run at view 2, the quorum proposal task shouldn't care as long as the bookkeeping is correct - let view_2 = TestScriptStage { + let view = TestScriptStage { inputs: vec![ QuorumProposalValidated(proposals[1].data.clone(), leaves[1].clone()), QCFormed(either::Left(cert.clone())), @@ -106,8 +164,12 @@ async fn test_quorum_proposal_task_quorum_proposal() { payload_commitment, builder_commitment, TestMetadata, - ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + ViewNumber::new(node_id), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), ], outputs: vec![quorum_proposal_send()], @@ -118,7 +180,7 @@ async fn test_quorum_proposal_task_quorum_proposal() { QuorumProposalTaskState::::create_from(&handle).await; inject_quorum_proposal_polls(&quorum_proposal_task_state).await; - let script = vec![view_2]; + let script = vec![view]; run_test_script(script, quorum_proposal_task_state).await; } @@ -170,7 +232,11 @@ async fn test_quorum_proposal_task_qc_timeout() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), ], outputs: vec![quorum_proposal_send()], @@ -238,7 +304,11 @@ async fn test_quorum_proposal_task_view_sync() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), ], outputs: vec![quorum_proposal_send()], @@ -294,7 +364,11 @@ async fn test_quorum_proposal_task_propose_now() { commitment: payload_commitment, builder_commitment: builder_commitment.clone(), metadata: TestMetadata, - fee: null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + fee: null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), block_view: ViewNumber::new(2), }, secondary_proposal_information: @@ -310,7 +384,11 @@ async fn test_quorum_proposal_task_propose_now() { commitment: payload_commitment, builder_commitment: builder_commitment.clone(), metadata: TestMetadata, - fee: null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + fee: null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), block_view: ViewNumber::new(2), }, secondary_proposal_information: @@ -336,7 +414,11 @@ async fn test_quorum_proposal_task_propose_now() { commitment: payload_commitment, builder_commitment, metadata: TestMetadata, - fee: null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + fee: null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), block_view: ViewNumber::new(2), }, secondary_proposal_information: @@ -398,7 +480,6 @@ async fn test_quorum_proposal_task_with_incomplete_events() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index 83a4be337a..e477394601 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -1,3 +1,6 @@ +// TODO: Remove this after integration +#![allow(unused_imports)] + use hotshot_example_types::{ node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}, state_types::TestTypes, @@ -9,6 +12,7 @@ use hotshot_testing::{ test_builder::TestDescription, }; // Test that a good leader can succeed in the view directly after view sync +#[cfg(not(feature = "dependency-tasks"))] cross_tests!( TestName: test_with_failures_2, Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 8f321895d9..75a555caf8 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -1,7 +1,9 @@ -use std::time::Duration; +// TODO: Remove after integration of dependency-tasks +#![allow(unused_imports)] + use hotshot_example_types::state_types::TestInstanceState; use std::sync::Arc; - +use std::time::Duration; use hotshot::{ tasks::{inject_consensus_polls, task_state::CreateTaskState}, @@ -28,6 +30,7 @@ use hotshot_types::{ }; use vbs::version::Version; +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Tests that we correctly update our internal consensus state when reaching a decided upgrade certificate. @@ -44,7 +47,6 @@ async fn test_consensus_task_upgrade() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let old_version = Version { major: 0, minor: 1 }; let new_version = Version { major: 0, minor: 2 }; @@ -153,17 +155,14 @@ async fn test_consensus_task_upgrade() { let script = vec![view_1, view_2, view_3, view_4, view_5]; - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - >::create_from(&handle) - .await; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; inject_consensus_polls(&consensus_state).await; run_test_script(script, consensus_state).await; } +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) @@ -180,7 +179,6 @@ async fn test_upgrade_and_consensus_task() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let other_handles = futures::future::join_all((0..=9).map(build_system_handle)).await; let old_version = Version { major: 0, minor: 1 }; @@ -228,11 +226,7 @@ async fn test_upgrade_and_consensus_task() { .iter() .map(|h| views[1].create_upgrade_vote(upgrade_data.clone(), &h.0)); - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - >::create_from(&handle) - .await; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; let mut upgrade_state = UpgradeTaskState::< TestTypes, MemoryImpl, @@ -261,7 +255,11 @@ async fn test_upgrade_and_consensus_task() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), QCFormed(either::Either::Left(proposals[1].data.justify_qc.clone())), ], @@ -321,6 +319,7 @@ async fn test_upgrade_and_consensus_task() { test_scripts![inputs, consensus_script, upgrade_script]; } +#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) @@ -342,7 +341,6 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let old_version = Version { major: 0, minor: 1 }; let new_version = Version { major: 0, minor: 2 }; @@ -421,11 +419,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { views.push(view.clone()); } - let consensus_state = ConsensusTaskState::< - TestTypes, - MemoryImpl, - >::create_from(&handle) - .await; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; let mut upgrade_state = UpgradeTaskState::< TestTypes, MemoryImpl, @@ -452,7 +446,11 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[1].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), ], vec![ @@ -463,7 +461,11 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), QuorumProposalRecv(proposals[2].clone(), leaders[2]), ], @@ -475,7 +477,11 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[3].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), QuorumProposalRecv(proposals[3].clone(), leaders[3]), ], @@ -487,7 +493,11 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[4].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(5), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), QuorumProposalRecv(proposals[4].clone(), leaders[4]), ], @@ -498,7 +508,11 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[5].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(6), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), QCFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), ], @@ -510,7 +524,11 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[6].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(7), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee( + quorum_membership.total_nodes(), + Arc::new(TestInstanceState {}), + ) + .unwrap(), ), QuorumProposalRecv(proposals[6].clone(), leaders[6]), ], diff --git a/types/src/constants.rs b/types/src/constants.rs index 1016d80776..b9e95e4a02 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -42,7 +42,7 @@ pub const STATIC_VER_0_1: Version01 = StaticVersion {}; pub const EVENT_CHANNEL_SIZE: usize = 100_000; /// Default channel size for HotShot -> application communication -pub const EXTERNAL_EVENT_CHANNEL_SIZE: usize = 1000; +pub const EXTERNAL_EVENT_CHANNEL_SIZE: usize = 100_000; /// Constants for `WebServerNetwork` and `WebServer` /// The Web CDN is not, strictly speaking, bound to the network; it can have its own versioning. From 4babd0482eb1fad9e3395d82674ea4664d1e26c8 Mon Sep 17 00:00:00 2001 From: Nathan F Yospe Date: Thu, 2 May 2024 11:12:58 -0400 Subject: [PATCH 1002/1393] Add support for and bypasses a write lock (#3080) * Add support for and bypasses a write lock * Use POST in disco .toml file * APIs move a vec instead of doing vec.clone() --- builder-api/api/submit.toml | 11 ++++++++- builder-api/src/builder.rs | 24 ++++++++++++------- builder-api/src/data_source.rs | 17 ++++++------- .../src/traits/networking/push_cdn_network.rs | 3 +-- task-impls/src/consensus/mod.rs | 11 ++++----- 5 files changed, 39 insertions(+), 27 deletions(-) diff --git a/builder-api/api/submit.toml b/builder-api/api/submit.toml index 5d4354aa43..929ec45854 100644 --- a/builder-api/api/submit.toml +++ b/builder-api/api/submit.toml @@ -31,7 +31,16 @@ FORMAT_VERSION = "0.1.0" PATH = ["/submit"] METHOD = "POST" DOC = """ -Submit a transaction to the Builder +Submit a transaction to builder's private mempool." Returns transaction hash """ + +[route.submit_batch] +PATH = ["/batch"] +METHOD = "POST" +DOC = """ +Submit a list of transactions to builder's private mempool." + +Returns the corresponding list of transaction hashes +""" diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs index 14cef81b3e..2850c84b4f 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/builder.rs @@ -8,11 +8,7 @@ use hotshot_types::{traits::node_implementation::NodeType, utils::BuilderCommitm use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; use tagged_base64::TaggedBase64; -use tide_disco::{ - api::ApiError, - method::{ReadState, WriteState}, - Api, RequestError, RequestParams, StatusCode, -}; +use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, RequestParams, StatusCode}; use vbs::version::StaticVersionType; use crate::{ @@ -202,8 +198,7 @@ pub fn submit_api( options: &Options, ) -> Result, ApiError> where - State: 'static + Send + Sync + WriteState, - ::State: Send + Sync + AcceptsTxnSubmits, + State: 'static + Send + Sync + AcceptsTxnSubmits, Types: NodeType, { let mut api = load_api::( @@ -212,16 +207,27 @@ where options.extensions.clone(), )?; api.with_version("0.0.1".parse().unwrap()) - .post("submit_txn", |req, state| { + .at("submit_txn", |req: RequestParams, state| { async move { let tx = req .body_auto::<::Transaction, Ver>(Ver::instance()) .context(TxnUnpackSnafu)?; let hash = tx.commit(); - state.submit_txn(tx).await.context(TxnSubmitSnafu)?; + state.submit_txns(vec![tx]).await.context(TxnSubmitSnafu)?; Ok(hash) } .boxed() + })? + .at("submit_batch", |req: RequestParams, state| { + async move { + let txns = req + .body_auto::::Transaction>, Ver>(Ver::instance()) + .context(TxnUnpackSnafu)?; + let hashes = txns.iter().map(|tx| tx.commit()).collect::>(); + state.submit_txns(txns).await.context(TxnSubmitSnafu)?; + Ok(hashes) + } + .boxed() })?; Ok(api) } diff --git a/builder-api/src/data_source.rs b/builder-api/src/data_source.rs index 57e8a44663..5dea99695b 100644 --- a/builder-api/src/data_source.rs +++ b/builder-api/src/data_source.rs @@ -1,7 +1,3 @@ -use crate::{ - block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, - builder::BuildError, -}; use async_trait::async_trait; use committable::Commitment; use hotshot_types::{ @@ -10,6 +6,11 @@ use hotshot_types::{ vid::VidCommitment, }; +use crate::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::BuildError, +}; + #[async_trait] pub trait BuilderDataSource { /// To get the list of available blocks @@ -48,8 +49,8 @@ pub trait AcceptsTxnSubmits where I: NodeType, { - async fn submit_txn( - &mut self, - txn: ::Transaction, - ) -> Result::Transaction>, BuildError>; + async fn submit_txns( + &self, + txns: Vec<::Transaction>, + ) -> Result::Transaction>>, BuildError>; } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 3b5c7c1899..db68676bdd 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -4,10 +4,9 @@ use std::{collections::BTreeSet, marker::PhantomData}; #[cfg(feature = "hotshot-testing")] use std::{path::Path, sync::Arc, time::Duration}; -use async_compatibility_layer::art::async_sleep; #[cfg(feature = "hotshot-testing")] use async_compatibility_layer::art::async_spawn; -use async_compatibility_layer::channel::UnboundedSendError; +use async_compatibility_layer::{art::async_sleep, channel::UnboundedSendError}; use async_trait::async_trait; use bincode::config::Options; use cdn_broker::reexports::{ diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 6b37cf44a3..2c6881f29f 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -2,12 +2,6 @@ use std::{collections::BTreeMap, sync::Arc}; #[cfg(not(feature = "dependency-tasks"))] use anyhow::Result; - -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::vid::vid_scheme; -#[cfg(not(feature = "dependency-tasks"))] -use jf_primitives::vid::VidScheme; - use async_broadcast::Sender; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -19,6 +13,8 @@ use hotshot_task::task::{Task, TaskState}; use hotshot_types::data::VidDisperseShare; #[cfg(not(feature = "dependency-tasks"))] use hotshot_types::message::Proposal; +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_types::vid::vid_scheme; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus}, data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, @@ -36,7 +32,8 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber}, }; - +#[cfg(not(feature = "dependency-tasks"))] +use jf_primitives::vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; From dd095ff1963dc5f206284083e1273432f4456b54 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 2 May 2024 15:42:57 -0400 Subject: [PATCH 1003/1393] Add endpoint to force start the orchestrator (#3055) --- examples/infra/mod.rs | 7 +- orchestrator/api.toml | 20 +++- orchestrator/run-config.toml | 1 + orchestrator/src/client.rs | 69 ++++++++--- orchestrator/src/config.rs | 66 +++++------ orchestrator/src/lib.rs | 221 ++++++++++++++++++++++++++++------- 6 files changed, 275 insertions(+), 109 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 29ef95abc4..b7165625bf 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -323,8 +323,8 @@ fn calculate_num_tx_per_round( ) -> usize { transactions_per_round / total_num_nodes + usize::from( - (total_num_nodes - 1 - node_index as usize) - < (transactions_per_round % total_num_nodes), + (total_num_nodes) + < (transactions_per_round % total_num_nodes) + 1 + (node_index as usize), ) } @@ -1020,12 +1020,9 @@ pub async fn main_entry_point< // which means the previous `generate_validator_config_when_init` will not be taken by sequencer, it's only for key pair generation for testing in hotshot. let (mut run_config, source) = NetworkConfig::::get_complete_config( &orchestrator_client, - args.clone().network_config_file, my_own_validator_config, args.advertise_address, Some(libp2p_public_key), - // If `indexed_da` is true: use the node index to determine if we are a DA node. - true, ) .await .expect("failed to get config"); diff --git a/orchestrator/api.toml b/orchestrator/api.toml index 3f97dfec9b..a19d05b85c 100644 --- a/orchestrator/api.toml +++ b/orchestrator/api.toml @@ -22,7 +22,7 @@ This must be a POST request so we can update the OrchestratorState in the server received from the 'identity' endpoint """ -# GET the latest temporary node index only for generating validator's key pair +# POST the latest temporary node index only for generating validator's key pair [route.get_tmp_node_index] PATH = ["get_tmp_node_index"] METHOD = "POST" @@ -32,9 +32,8 @@ Get the latest temporary node index only for generating validator's key pair for # POST the node's node index to generate public key for pubkey collection [route.post_pubkey] -PATH = ["pubkey/:node_index/:is_da"] +PATH = ["pubkey/:is_da"] METHOD = "POST" -":node_index" = "Integer" ":is_da" = "Boolean" DOC = """ Post a node's node_index so that its public key could be posted and collected by the orchestrator. @@ -48,9 +47,10 @@ DOC = """ Get whether the node can collect the final config which includs all peer's public config/info like public keys, returns a boolean. """ -# GET the updated config with all peers' public keys / configs -[route.get_config_after_peer_collected] -PATH = ["get_config_after_peer_collected"] +# POST the updated config with all peers' public keys / configs +[route.post_config_after_peer_collected] +PATH = ["post_config_after_peer_collected"] +METHOD = "POST" DOC = """ Get the updated config with all peers' public keys / configs, returns a NetworkConfig. """ @@ -79,3 +79,11 @@ METHOD = "POST" DOC = """ Post run results. """ + +# POST to manually start the run +[route.post_manual_start] +PATH = ["manual_start"] +METHOD = "POST" +DOC = """ +Post whether the orchestrator should start the run immediately, with the nodes that have already registered. +""" diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 0f210cdb66..9086b5f87c 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -1,4 +1,5 @@ rounds = 10 +indexed_da = true transactions_per_round = 10 transaction_size = 1000 node_index = 0 diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index b44fde363d..2544dc9179 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -3,7 +3,9 @@ use std::{net::SocketAddr, time::Duration}; use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; -use hotshot_types::{constants::Version01, traits::signature_key::SignatureKey, PeerConfig}; +use hotshot_types::{ + constants::Version01, traits::signature_key::SignatureKey, PeerConfig, ValidatorConfig, +}; use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; @@ -14,7 +16,7 @@ use crate::{config::NetworkConfig, OrchestratorVersion}; /// Holds the client connection to the orchestrator pub struct OrchestratorClient { /// the client - client: surf_disco::Client, + pub client: surf_disco::Client, } /// Struct describing a benchmark result @@ -277,7 +279,7 @@ impl OrchestratorClient { let get_config_after_collection = |client: Client| { async move { let result = client - .get("api/get_config_after_peer_collected") + .post("api/post_config_after_peer_collected") .send() .await; @@ -303,19 +305,49 @@ impl OrchestratorClient { #[instrument(skip(self), name = "orchestrator public keys")] pub async fn post_and_wait_all_public_keys( &self, - node_index: u64, - my_pub_key: PeerConfig, - is_da: bool, + mut validator_config: ValidatorConfig, + libp2p_address: Option, + libp2p_public_key: Option, ) -> NetworkConfig { - // send my public key - let _send_pubkey_ready_f: Result<(), ClientError> = self - .client - .post(&format!("api/pubkey/{node_index}/{is_da}")) - .body_binary(&PeerConfig::::to_bytes(&my_pub_key)) - .unwrap() - .send() - .await - .inspect_err(|err| tracing::error!("{err}")); + // Get the (possible) Libp2p advertise address from our args + let libp2p_address: Option = libp2p_address.map(|f| { + Multiaddr::try_from(format!( + "/{}/{}/udp/{}/quic-v1", + if f.is_ipv4() { "ip4" } else { "ip6" }, + f.ip(), + f.port() + )) + .expect("failed to create multiaddress") + }); + + let pubkey: Vec = + PeerConfig::::to_bytes(&validator_config.get_public_config()).clone(); + let da_requested: bool = validator_config.is_da; + + // Serialize our (possible) libp2p-specific data + let request_body = + vbs::Serializer::::serialize(&(pubkey, libp2p_address, libp2p_public_key)) + .expect("failed to serialize request"); + + // register our public key with the orchestrator + let (node_index, is_da): (u64, bool) = loop { + let result = self + .client + .post(&format!("api/pubkey/{da_requested}")) + .body_binary(&request_body) + .expect("Failed to form request") + .send() + .await + .inspect_err(|err| tracing::error!("{err}")); + + if let Ok((index, is_da)) = result { + break (index, is_da); + } + + async_sleep(Duration::from_millis(250)).await; + }; + + validator_config.is_da = is_da; // wait for all nodes' public keys let wait_for_all_nodes_pub_key = |client: Client| { @@ -331,7 +363,12 @@ impl OrchestratorClient { self.wait_for_fn_from_orchestrator::<_, _, ()>(wait_for_all_nodes_pub_key) .await; - self.get_config_after_collection().await + let mut network_config = self.get_config_after_collection().await; + + network_config.node_index = node_index; + network_config.config.my_own_validator_config = validator_config; + + network_config } /// Tells the orchestrator this validator is ready to start diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 568179bcb5..9febee8532 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -157,8 +157,15 @@ impl Default for RandomBuilderConfig { pub struct NetworkConfig { /// number of views to run pub rounds: usize, + /// whether DA membership is determined by index. + /// if true, the first k nodes to register form the DA committee + /// if false, DA membership is requested by the nodes + pub indexed_da: bool, /// number of transactions per view pub transactions_per_round: usize, + /// password to have the orchestrator start the network, + /// regardless of the number of nodes connected. + pub manual_start_password: Option, /// number of bootstrap nodes pub num_bootrap: usize, /// timeout before starting the next view @@ -291,50 +298,24 @@ impl NetworkConfig { /// If we are unable to get the configuration from the orchestrator pub async fn get_complete_config( client: &OrchestratorClient, - file: Option, my_own_validator_config: ValidatorConfig, libp2p_address: Option, libp2p_public_key: Option, - // If true, we will use the node index to determine if we are a DA node - indexed_da: bool, ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { - let (mut run_config, source) = - Self::from_file_or_orchestrator(client, file, libp2p_address, libp2p_public_key) - .await?; - let node_index = run_config.node_index; - - // Assign my_own_validator_config to the run_config if not loading from file - match source { - NetworkConfigSource::Orchestrator => { - run_config.config.my_own_validator_config = my_own_validator_config.clone(); - } - NetworkConfigSource::File => { - // do nothing, my_own_validator_config has already been loaded from file - } - } - - // If we've chosen to be DA based on the index, do so - if indexed_da { - run_config.config.my_own_validator_config.is_da = - run_config.node_index < run_config.config.da_staked_committee_size as u64; - } - - // one more round of orchestrator here to get peer's public key/config - let updated_config: NetworkConfig = client + // get the configuration from the orchestrator + let run_config: NetworkConfig = client .post_and_wait_all_public_keys::( - run_config.node_index, - run_config - .config - .my_own_validator_config - .get_public_config(), - run_config.config.my_own_validator_config.is_da, + my_own_validator_config, + libp2p_address, + libp2p_public_key, ) .await; - run_config.config.known_nodes_with_stake = updated_config.config.known_nodes_with_stake; - run_config.config.known_da_nodes = updated_config.config.known_da_nodes; - info!("Retrieved config; our node index is {node_index}."); - Ok((run_config, source)) + info!( + "Retrieved config; our node index is {}. DA committee member: {}", + run_config.node_index, run_config.config.my_own_validator_config.is_da + ); + Ok((run_config, NetworkConfigSource::Orchestrator)) } /// Loads a `NetworkConfig` from a file. @@ -434,10 +415,12 @@ impl Default for NetworkConfig { fn default() -> Self { Self { rounds: ORCHESTRATOR_DEFAULT_NUM_ROUNDS, + indexed_da: true, transactions_per_round: ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND, node_index: 0, seed: [0u8; 32], transaction_size: ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE, + manual_start_password: None, libp2p_config: None, config: HotShotConfigFile::default().into(), start_delay_seconds: 60, @@ -466,9 +449,16 @@ pub struct NetworkConfigFile { /// number of views to run #[serde_inline_default(ORCHESTRATOR_DEFAULT_NUM_ROUNDS)] pub rounds: usize, + /// number of views to run + #[serde(default)] + pub indexed_da: bool, /// number of transactions per view #[serde_inline_default(ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND)] pub transactions_per_round: usize, + /// password to have the orchestrator start the network, + /// regardless of the number of nodes connected. + #[serde(default)] + pub manual_start_password: Option, /// global index of node (for testing purposes a uid) #[serde(default)] pub node_index: u64, @@ -513,9 +503,11 @@ impl From> for NetworkConfig { fn from(val: NetworkConfigFile) -> Self { NetworkConfig { rounds: val.rounds, + indexed_da: val.indexed_da, transactions_per_round: val.transactions_per_round, node_index: 0, num_bootrap: val.config.num_bootstrap, + manual_start_password: val.manual_start_password, next_view_timeout: val.config.next_view_timeout, view_sync_timeout: val.config.view_sync_timeout, builder_timeout: val.config.builder_timeout, @@ -736,7 +728,7 @@ impl Default for HotShotConfigFile { Self { num_nodes_with_stake: NonZeroUsize::new(10).unwrap(), - start_threshold: (8, 10), + start_threshold: (1, 1), num_nodes_without_stake: 0, my_own_validator_config: ValidatorConfig::default(), known_nodes_with_stake: gen_known_nodes_with_stake, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 9c6670d9ef..cf05b8d227 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -5,7 +5,7 @@ pub mod client; /// Configuration for the orchestrator pub mod config; -use std::{collections::HashSet, fs::OpenOptions, io, io::ErrorKind}; +use std::{collections::HashMap, fs::OpenOptions, io, io::ErrorKind}; use async_lock::RwLock; use client::{BenchResults, BenchResultsDownloadConfig}; @@ -59,6 +59,7 @@ pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair { /// The state of the orchestrator #[derive(Default, Clone)] +#[allow(clippy::struct_excessive_bools)] struct OrchestratorState { /// Tracks the latest node index we have generated a configuration for latest_index: u16, @@ -66,12 +67,10 @@ struct OrchestratorState { tmp_latest_index: u16, /// The network configuration config: NetworkConfig, - /// The total nodes that have posted their public keys - nodes_with_pubkey: u64, /// Whether the network configuration has been updated with all the peer's public keys/configs peer_pub_ready: bool, - /// The set of index for nodes that have posted their public keys/configs - pub_posted: HashSet, + /// A map from public keys to `(node_index, is_da)`. + pub_posted: HashMap, (u64, bool)>, /// Whether nodes should start their HotShot instances /// Will be set to true once all nodes post they are ready to start start: bool, @@ -81,6 +80,10 @@ struct OrchestratorState { bench_results: BenchResults, /// The number of nodes that have posted their results nodes_post_results: u64, + /// Whether the orchestrator can be started manually + manual_start_allowed: bool, + /// Whether we are still accepting new keys for registration + accepting_new_keys: bool, } impl OrchestratorState { @@ -90,13 +93,14 @@ impl OrchestratorState { latest_index: 0, tmp_latest_index: 0, config: network_config, - nodes_with_pubkey: 0, peer_pub_ready: false, - pub_posted: HashSet::new(), + pub_posted: HashMap::new(), nodes_connected: 0, start: false, bench_results: BenchResults::default(), nodes_post_results: 0, + manual_start_allowed: true, + accepting_new_keys: true, } } @@ -178,10 +182,11 @@ pub trait OrchestratorApi { /// if unable to serve fn register_public_key( &mut self, - node_index: u64, pubkey: &mut Vec, is_da: bool, - ) -> Result<(), ServerError>; + libp2p_address: Option, + libp2p_public_key: Option, + ) -> Result<(u64, bool), ServerError>; /// post endpoint for whether or not all peers public keys are ready /// # Errors /// if unable to serve @@ -189,7 +194,7 @@ pub trait OrchestratorApi { /// get endpoint for the network config after all peers public keys are collected /// # Errors /// if unable to serve - fn get_config_after_peer_collected(&self) -> Result, ServerError>; + fn post_config_after_peer_collected(&mut self) -> Result, ServerError>; /// get endpoint for whether or not the run has started /// # Errors /// if unable to serve @@ -202,6 +207,10 @@ pub trait OrchestratorApi { /// # Errors /// if unable to serve fn post_ready(&mut self) -> Result<(), ServerError>; + /// post endpoint for manually starting the orchestrator + /// # Errors + /// if unable to serve + fn post_manual_start(&mut self, password_bytes: Vec) -> Result<(), ServerError>; } impl OrchestratorApi for OrchestratorState @@ -269,41 +278,78 @@ where #[allow(clippy::cast_possible_truncation)] fn register_public_key( &mut self, - node_index: u64, pubkey: &mut Vec, - is_da: bool, - ) -> Result<(), ServerError> { - if self.pub_posted.contains(&node_index) { + da_requested: bool, + libp2p_address: Option, + libp2p_public_key: Option, + ) -> Result<(u64, bool), ServerError> { + if !self.accepting_new_keys { return Err(ServerError { - status: tide_disco::StatusCode::BadRequest, - message: "Node has already posted public key".to_string(), + status: tide_disco::StatusCode::Forbidden, + message: + "Network has been started manually, and is no longer registering new keys." + .to_string(), }); } - self.pub_posted.insert(node_index); - - // The guess is the first extra 12 bytes are from orchestrator serialization - pubkey.drain(..12); - let register_pub_key_with_stake = PeerConfig::::from_bytes(pubkey).unwrap(); - self.config.config.known_nodes_with_stake[node_index as usize] = - register_pub_key_with_stake.clone(); - - // If the node wants to be DA, add it to the list of known DAs - if is_da { - self.config - .config - .known_da_nodes - .push(register_pub_key_with_stake); - }; - self.nodes_with_pubkey += 1; - println!( - "Node {:?} posted public key, now total num posted public key: {:?}", - node_index, self.nodes_with_pubkey - ); - if self.nodes_with_pubkey >= (self.config.config.num_nodes_with_stake.get() as u64) { + if let Some((node_index, is_da)) = self.pub_posted.get(pubkey) { + return Ok((*node_index, *is_da)); + } + + let node_index = self.pub_posted.len() as u64; + + let staked_pubkey = PeerConfig::::from_bytes(pubkey).unwrap(); + self.config + .config + .known_nodes_with_stake + .push(staked_pubkey.clone()); + + let mut added_to_da = false; + + let da_full = + self.config.config.known_da_nodes.len() >= self.config.config.da_staked_committee_size; + + #[allow(clippy::nonminimal_bool)] + // We add the node to the DA committee depending on either its node index or whether it requested membership. + // + // Since we issue `node_index` incrementally, if we are deciding DA membership by node_index + // we only need to check that the committee is not yet full. + // + // Note: this logically simplifies to (self.config.indexed_da || da_requested) && !da_full, + // but writing it that way makes it a little less clear to me. + if (self.config.indexed_da || (!self.config.indexed_da && da_requested)) && !da_full { + self.config.config.known_da_nodes.push(staked_pubkey); + added_to_da = true; + } + + self.pub_posted + .insert(pubkey.clone(), (node_index, added_to_da)); + + // If the orchestrator is set up for libp2p and we have supplied the proper + // Libp2p data, add our node to the list of bootstrap nodes. + if self.config.libp2p_config.clone().is_some() { + if let (Some(libp2p_public_key), Some(libp2p_address)) = + (libp2p_public_key, libp2p_address) + { + // Push to our bootstrap nodes + self.config + .libp2p_config + .as_mut() + .unwrap() + .bootstrap_nodes + .push((libp2p_public_key, libp2p_address)); + } + } + + println!("Posted public key for node_index {node_index}"); + + // node_index starts at 0, so once it matches `num_nodes_with_stake` + // we will have registered one node too many. hence, we want `node_index + 1`. + if node_index + 1 >= (self.config.config.num_nodes_with_stake.get() as u64) { self.peer_pub_ready = true; + self.accepting_new_keys = false; } - Ok(()) + Ok((node_index, added_to_da)) } fn peer_pub_ready(&self) -> Result { @@ -316,13 +362,15 @@ where Ok(self.peer_pub_ready) } - fn get_config_after_peer_collected(&self) -> Result, ServerError> { + fn post_config_after_peer_collected(&mut self) -> Result, ServerError> { if !self.peer_pub_ready { return Err(ServerError { status: tide_disco::StatusCode::BadRequest, message: "Peer's public configs are not ready".to_string(), }); } + + self.manual_start_allowed = false; Ok(self.config.clone()) } @@ -341,14 +389,62 @@ where // TODO ED Add a map to verify which nodes have posted they're ready fn post_ready(&mut self) -> Result<(), ServerError> { self.nodes_connected += 1; + println!("Nodes connected: {}", self.nodes_connected); + // i.e. nodes_connected >= num_nodes_with_stake * (start_threshold.0 / start_threshold.1) if self.nodes_connected * self.config.config.start_threshold.1 >= (self.config.config.num_nodes_with_stake.get() as u64) * self.config.config.start_threshold.0 { + self.accepting_new_keys = false; + self.manual_start_allowed = false; self.start = true; } + + Ok(()) + } + + /// Manually start the network + fn post_manual_start(&mut self, password_bytes: Vec) -> Result<(), ServerError> { + if !self.manual_start_allowed { + return Err(ServerError { + status: tide_disco::StatusCode::Forbidden, + message: "Configs have already been distributed to nodes, and the network can no longer be started manually.".to_string(), + }); + } + + let password = String::from_utf8(password_bytes) + .expect("Failed to decode raw password as UTF-8 string."); + + // Check that the password matches + if self.config.manual_start_password != Some(password) { + return Err(ServerError { + status: tide_disco::StatusCode::Forbidden, + message: "Incorrect password.".to_string(), + }); + } + + let registered_nodes_with_stake = self.config.config.known_nodes_with_stake.len(); + let registered_da_nodes = self.config.config.known_da_nodes.len(); + + if registered_da_nodes > 1 { + self.config.config.num_nodes_with_stake = + std::num::NonZeroUsize::new(registered_nodes_with_stake) + .expect("Failed to convert to NonZeroUsize; this should be impossible."); + + self.config.config.da_staked_committee_size = registered_da_nodes; + } else { + return Err(ServerError { + status: tide_disco::StatusCode::Forbidden, + message: format!("We cannot manually start the network, because we only have {registered_nodes_with_stake} nodes with stake registered, with {registered_da_nodes} DA nodes.") + }); + } + + self.accepting_new_keys = false; + self.manual_start_allowed = false; + self.peer_pub_ready = true; + Ok(()) } @@ -448,23 +544,45 @@ where })? .post("post_pubkey", |req, state| { async move { - let node_index = req.integer_param("node_index")?; let is_da = req.boolean_param("is_da")?; - let mut pubkey = req.body_bytes(); - state.register_public_key(node_index, &mut pubkey, is_da) + // Read the bytes from the body + let mut body_bytes = req.body_bytes(); + body_bytes.drain(..12); + + // Decode the libp2p data so we can add to our bootstrap nodes (if supplied) + let Ok((mut pubkey, libp2p_address, libp2p_public_key)) = + vbs::Serializer::::deserialize(&body_bytes) + else { + return Err(ServerError { + status: tide_disco::StatusCode::BadRequest, + message: "Malformed body".to_string(), + }); + }; + + state.register_public_key(&mut pubkey, is_da, libp2p_address, libp2p_public_key) } .boxed() })? .get("peer_pubconfig_ready", |_req, state| { async move { state.peer_pub_ready() }.boxed() })? - .get("get_config_after_peer_collected", |_req, state| { - async move { state.get_config_after_peer_collected() }.boxed() + .post("post_config_after_peer_collected", |_req, state| { + async move { state.post_config_after_peer_collected() }.boxed() })? .post( "post_ready", |_req, state: &mut ::State| async move { state.post_ready() }.boxed(), )? + .post( + "post_manual_start", + |req, state: &mut ::State| { + async move { + let password = req.body_bytes(); + state.post_manual_start(password) + } + .boxed() + }, + )? .get("get_start", |_req, state| { async move { state.get_start() }.boxed() })? @@ -483,10 +601,23 @@ where /// This errors if tide disco runs into an issue during serving /// # Panics /// This panics if unable to register the api with tide disco -pub async fn run_orchestrator(network_config: NetworkConfig, url: Url) -> io::Result<()> +pub async fn run_orchestrator( + mut network_config: NetworkConfig, + url: Url, +) -> io::Result<()> where KEY: SignatureKey + 'static + serde::Serialize, { + let env_password = std::env::var("ORCHESTRATOR_MANUAL_START_PASSWORD"); + + if env_password.is_ok() { + tracing::warn!("Took orchestrator manual start password from the environment variable: ORCHESTRATOR_MANUAL_START_PASSWORD={:?}", env_password); + network_config.manual_start_password = env_password.ok(); + } + + network_config.config.known_nodes_with_stake = vec![]; + network_config.config.known_da_nodes = vec![]; + let web_api = define_api().map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api")); From cfa42cff5a2fba8a6359085abb5b9a7b9483ed8d Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 3 May 2024 23:42:14 +0800 Subject: [PATCH 1004/1393] Fix linting due to version update (#3099) * linting * More fix * More * More * Use workspace --------- Co-authored-by: Brendon Fish --- builder-api/src/builder.rs | 2 -- examples/Cargo.toml | 2 +- hotshot-stake-table/src/mt_based.rs | 2 +- hotshot-stake-table/src/vec_based.rs | 2 +- libp2p-networking/tests/common/mod.rs | 4 ++-- orchestrator/src/lib.rs | 7 +++---- task-impls/src/harness.rs | 3 +-- task-impls/src/quorum_proposal.rs | 2 +- task-impls/src/quorum_vote.rs | 2 +- task/src/dependency.rs | 6 ------ testing/src/script.rs | 6 ++++-- testing/src/test_builder.rs | 7 ++----- testing/src/txn_task.rs | 2 +- web_server/src/lib.rs | 3 +-- 14 files changed, 19 insertions(+), 31 deletions(-) diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs index 2850c84b4f..6c58589bb0 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/builder.rs @@ -129,7 +129,6 @@ pub fn define_api( where State: 'static + Send + Sync + ReadState, ::State: Send + Sync + BuilderDataSource, - Types: NodeType, { let mut api = load_api::( options.api_path.as_ref(), @@ -199,7 +198,6 @@ pub fn submit_api( ) -> Result, ApiError> where State: 'static + Send + Sync + AcceptsTxnSubmits, - Types: NodeType, { let mut api = load_api::( options.api_path.as_ref(), diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 3afa51f8c9..903bf90f6a 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -5,7 +5,7 @@ edition = { workspace = true } name = "hotshot-examples" readme = "README.md" version = { workspace = true } -rust-version = "1.65.0" +rust-version = { workspace = true } [features] default = ["docs", "doc-images", "hotshot-testing"] diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index f05a1363f0..f417e19672 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -45,7 +45,7 @@ impl StakeTableScheme for StakeTable { amount: Self::Amount, (): Self::Aux, ) -> Result<(), StakeTableError> { - if self.mapping.get(&new_key).is_some() { + if self.mapping.contains_key(&new_key) { Err(StakeTableError::ExistingKey) } else { let pos = self.mapping.len(); diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 1d8d72d0f6..bc4a345d38 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -103,7 +103,7 @@ where amount: Self::Amount, aux: Self::Aux, ) -> Result<(), StakeTableError> { - if self.bls_mapping.get(&new_key).is_some() { + if self.bls_mapping.contains_key(&new_key) { Err(StakeTableError::ExistingKey) } else { let pos = self.bls_mapping.len(); diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index c9f4cf4473..bdb1aa09bd 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -91,7 +91,7 @@ where /// - Initialize network nodes /// - Kill network nodes /// - A test assertion fails -pub async fn test_bed( +pub async fn test_bed( run_test: F, client_handler: G, num_nodes: usize, @@ -101,7 +101,7 @@ pub async fn test_bed, FutG: Future> + 'static + Send + Sync, F: FnOnce(Vec>, Duration) -> FutF, - G: Fn(NetworkEvent, HandleWithState) -> FutG + 'static + Send + Sync, + G: Fn(NetworkEvent, HandleWithState) -> FutG + 'static + Send + Sync + Clone, { setup_logging(); setup_backtrace(); diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index cf05b8d227..c8fb62a199 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -497,13 +497,12 @@ where } /// Sets up all API routes -fn define_api( -) -> Result, ApiError> +fn define_api() -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, ::State: Send + Sync + OrchestratorApi, - KEY: serde::Serialize, - VER: 'static, + KEY: serde::Serialize + SignatureKey, + VER: StaticVersionType + 'static, { let api_toml = toml::from_str::(include_str!(concat!( env!("CARGO_MANIFEST_DIR"), diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 1e34d46dae..0e1ff590ef 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -44,14 +44,13 @@ impl TaskState for TestHarnessState { /// Panics if any state the test expects is not set. Panicking causes a test failure #[allow(clippy::implicit_hasher)] #[allow(clippy::panic)] -pub async fn run_harness>>>( +pub async fn run_harness>> + Send + 'static>( input: Vec>, expected_output: HashMap, usize>, state: S, allow_extra_output: bool, ) where TYPES: NodeType, - S: Send + 'static, { let registry = Arc::new(TaskRegistry::default()); let mut tasks = vec![]; diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index dfc9d223f8..85cfff7c40 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -447,7 +447,7 @@ impl> QuorumProposalTaskState> QuorumVoteTaskState>>, event: Option>>, ) { - if self.vote_dependencies.get(&view_number).is_some() { + if self.vote_dependencies.contains_key(&view_number) { return; } diff --git a/task/src/dependency.rs b/task/src/dependency.rs index 7ff6f653f2..0921ac6f85 100644 --- a/task/src/dependency.rs +++ b/task/src/dependency.rs @@ -33,12 +33,6 @@ pub trait Dependency { } } -/// Used to combine dependencies to create `AndDependency`s or `OrDependency`s -trait CombineDependencies: - Sized + Dependency + Send + 'static -{ -} - /// Defines a dependency that completes when all of its deps complete pub struct AndDependency { /// Dependencies being combined diff --git a/testing/src/script.rs b/testing/src/script.rs index 7573bf8aa7..3edde1b4d9 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -89,12 +89,14 @@ where /// Note: the task is not spawned with an async thread; instead, the harness just calls `handle_event`. /// This has a few implications, e.g. shutting down tasks doesn't really make sense, /// and event ordering is deterministic. -pub async fn run_test_script>>>( +pub async fn run_test_script< + TYPES, + S: TaskState>> + Send + 'static, +>( mut script: TestScript, state: S, ) where TYPES: NodeType, - S: Send + 'static, { let registry = Arc::new(TaskRegistry::default()); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 5ebdb87b34..de2237022f 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,6 +1,6 @@ use std::{num::NonZeroUsize, sync::Arc, time::Duration}; -use hotshot::traits::{NetworkReliability, NodeImplementation, TestableNodeImplementation}; +use hotshot::traits::{NetworkReliability, TestableNodeImplementation}; use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; use hotshot_orchestrator::config::ValidatorConfigFile; use hotshot_types::{ @@ -233,10 +233,7 @@ impl TestDescription { >( self, node_id: u64, - ) -> TestLauncher - where - I: NodeImplementation, - { + ) -> TestLauncher { let TestDescription { num_nodes_with_stake, num_bootstrap_nodes, diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index f02cdd8b59..330a5ebafe 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -30,7 +30,7 @@ pub struct TxnTask> { pub next_node_idx: Option, /// time to wait between txns pub duration: Duration, - /// + /// Receiver for the shutdown signal from the testing harness pub shutdown_chan: Receiver, } diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs index d13ce63b79..6f4b110fcd 100644 --- a/web_server/src/lib.rs +++ b/web_server/src/lib.rs @@ -873,14 +873,13 @@ pub struct Options { /// Transport versioning (generic params here) only changes when the web-CDN itself changes. /// When transport versioning changes, the application itself must update its version. #[allow(clippy::too_many_lines)] -fn define_api( +fn define_api( options: &Options, ) -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, ::State: Send + Sync + WebServerDataSource, KEY: SignatureKey, - NetworkVersion: 'static, { let mut api = match &options.api_path { Some(path) => Api::::from_file(path)?, From 0ba764edb707ab053db45313998862d84c2389d0 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 3 May 2024 14:32:52 -0400 Subject: [PATCH 1005/1393] adjust log levels (#3101) --- task-impls/src/consensus/mod.rs | 22 +++++++++++----------- task-impls/src/quorum_proposal_recv.rs | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 2c6881f29f..64cde880ba 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -185,7 +185,7 @@ impl> ConsensusTaskState && Some(proposal.block_header.payload_commitment()) != null_block::commitment(self.quorum_membership.total_nodes()) { - info!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(self.quorum_membership.total_nodes()), Some(proposal.block_header.payload_commitment())); + warn!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(self.quorum_membership.total_nodes()), Some(proposal.block_header.payload_commitment())); return false; } } @@ -505,13 +505,13 @@ impl> ConsensusTaskState .publish_proposal(qc.view_number + 1, event_stream) .await { - warn!("Failed to propose; error = {e:?}"); + debug!("Failed to propose; error = {e:?}"); }; } either::Left(qc) => { if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await { - warn!("Failed to store High QC of QC we formed. Error: {:?}", e); + error!("Failed to store High QC of QC we formed. Error: {:?}", e); } let mut consensus = self.consensus.write().await; @@ -534,7 +534,7 @@ impl> ConsensusTaskState .publish_proposal(qc.view_number + 1, event_stream) .await { - warn!("Failed to propose; error = {e:?}"); + debug!("Failed to propose; error = {e:?}"); }; } } @@ -589,14 +589,14 @@ impl> ConsensusTaskState // Adding `+ 1` on the LHS rather than `- 1` on the RHS, to avoid the overflow // error due to subtracting the genesis view number. if view + 1 < self.cur_view { - warn!("Throwing away VID disperse data that is more than one view older"); + info!("Throwing away VID disperse data that is more than one view older"); return; } debug!("VID disperse data is not more than one view older."); if !self.validate_disperse(disperse) { - warn!("Could not verify VID dispersal/share sig."); + warn!("Failed to validated the VID dispersal/share sig."); return; } @@ -770,7 +770,7 @@ impl> ConsensusTaskState && self.consensus.read().await.high_qc.get_view_number() + 1 == view { if let Err(e) = self.publish_proposal(view, event_stream.clone()).await { - warn!("Failed to propose; error = {e:?}"); + debug!("Failed to propose; error = {e:?}"); }; } @@ -781,7 +781,7 @@ impl> ConsensusTaskState == self.public_key { if let Err(e) = self.publish_proposal(view, event_stream).await { - warn!("Failed to propose; error = {e:?}"); + debug!("Failed to propose; error = {e:?}"); }; } } @@ -790,7 +790,7 @@ impl> ConsensusTaskState == self.public_key { if let Err(e) = self.publish_proposal(view, event_stream).await { - warn!("Failed to propose; error = {e:?}"); + debug!("Failed to propose; error = {e:?}"); }; } } @@ -800,7 +800,7 @@ impl> ConsensusTaskState #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { if !certificate.is_valid_cert(self.quorum_membership.as_ref()) { - warn!( + error!( "View Sync Finalize certificate {:?} was invalid", certificate.get_data() ); @@ -825,7 +825,7 @@ impl> ConsensusTaskState ); if let Err(e) = self.publish_proposal(view, event_stream).await { - warn!("Failed to propose; error = {e:?}"); + debug!("Failed to propose; error = {e:?}"); }; } } diff --git a/task-impls/src/quorum_proposal_recv.rs b/task-impls/src/quorum_proposal_recv.rs index 8219aa4d17..1164a1ef50 100644 --- a/task-impls/src/quorum_proposal_recv.rs +++ b/task-impls/src/quorum_proposal_recv.rs @@ -176,7 +176,7 @@ impl> QuorumProposalRecvTaskState< Ok(None) => { self.cancel_tasks(proposal.data.get_view_number()).await; } - Err(e) => warn!(?e, "Failed to propose"), + Err(e) => debug!(?e, "Failed to propose"), } } } From f17db7be6d8803e5288ed51fa9511ec400fa73a8 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Sat, 4 May 2024 03:09:25 -0400 Subject: [PATCH 1006/1393] Remove webserver (#3049) --- example-types/src/node_types.rs | 11 +- examples/Cargo.toml | 26 - examples/infra/mod.rs | 102 +- hotshot/Cargo.toml | 1 - hotshot/src/tasks/mod.rs | 124 +- hotshot/src/traits.rs | 1 - hotshot/src/traits/networking.rs | 1 - .../src/traits/networking/combined_network.rs | 24 +- .../src/traits/networking/libp2p_network.rs | 34 +- .../src/traits/networking/push_cdn_network.rs | 5 +- .../traits/networking/web_server_network.rs | 1333 ----------------- task-impls/src/consensus/mod.rs | 124 +- task-impls/src/consensus/proposal_helpers.rs | 14 +- task-impls/src/consensus/view_change.rs | 40 +- task-impls/src/da.rs | 45 - task-impls/src/network.rs | 4 +- task-impls/src/quorum_proposal.rs | 23 - task-impls/src/quorum_vote.rs | 17 +- task-impls/src/vid.rs | 8 - task-impls/src/view_sync.rs | 51 - testing/tests/tests_1/consensus_task.rs | 20 +- testing/tests/tests_1/quorum_proposal_task.rs | 15 +- testing/tests/tests_1/test_success.rs | 4 +- testing/tests/tests_1/test_with_failures_2.rs | 4 +- testing/tests/tests_1/upgrade_task.rs | 8 +- .../tests/tests_2/test_with_failures_one.rs | 4 +- .../tests_3/test_with_failures_half_f.rs | 4 +- testing/tests/tests_4/test_with_failures_f.rs | 4 +- testing/tests/tests_5/timeout.rs | 6 +- testing/tests/tests_5/web_server.rs | 46 - types/src/traits/network.rs | 91 +- web_server/Cargo.toml | 26 - web_server/README.md | 14 - web_server/api.toml | 210 --- web_server/src/config.rs | 163 -- web_server/src/lib.rs | 1120 -------------- 36 files changed, 101 insertions(+), 3626 deletions(-) delete mode 100644 hotshot/src/traits/networking/web_server_network.rs delete mode 100644 testing/tests/tests_5/web_server.rs delete mode 100644 web_server/Cargo.toml delete mode 100644 web_server/README.md delete mode 100644 web_server/api.toml delete mode 100644 web_server/src/config.rs delete mode 100644 web_server/src/lib.rs diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index ca0c29e20c..f0cf75e3bd 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -1,12 +1,9 @@ use hotshot::traits::{ election::static_committee::{GeneralStaticCommittee, StaticCommittee}, - implementations::{ - CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork, WebServerNetwork, - }, + implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork}, NodeImplementation, }; use hotshot_types::{ - constants::WebServerVersion, data::ViewNumber, message::Message, signature_key::{BLSPubKey, BuilderKey}, @@ -83,12 +80,6 @@ impl NodeImplementation for MemoryImpl { type Storage = TestStorage; } -impl NodeImplementation for WebImpl { - type QuorumNetwork = WebServerNetwork; - type CommitteeNetwork = WebServerNetwork; - type Storage = TestStorage; -} - impl NodeImplementation for CombinedImpl { type QuorumNetwork = CombinedNetworks; type CommitteeNetwork = CombinedNetworks; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 903bf90f6a..e10ca3b820 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -38,31 +38,6 @@ path = "libp2p/orchestrator.rs" name = "all-libp2p" path = "libp2p/all.rs" -# webserver -[[example]] -name = "webserver" -path = "webserver/webserver.rs" - -[[example]] -name = "orchestrator-webserver" -path = "webserver/orchestrator.rs" - -[[example]] -name = "validator-webserver" -path = "webserver/validator.rs" - -[[example]] -name = "multi-validator-webserver" -path = "webserver/multi-validator.rs" - -[[example]] -name = "multi-webserver" -path = "webserver/multi-webserver.rs" - -[[example]] -name = "all-webserver" -path = "webserver/all.rs" - # combined [[example]] name = "all-combined" @@ -113,7 +88,6 @@ custom_debug = { workspace = true } dashmap = "5" either = { workspace = true } futures = { workspace = true } -hotshot-web-server = { version = "0.5.36", path = "../web_server", default-features = false } hotshot-orchestrator = { version = "0.5.36", path = "../orchestrator", default-features = false } hotshot-types = { path = "../types" } hotshot-testing = { path = "../testing" } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index b7165625bf..426afce600 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -21,7 +21,7 @@ use hotshot::{ traits::{ implementations::{ derive_libp2p_peer_id, CombinedNetworks, Libp2pNetwork, PushCdnNetwork, - WebServerNetwork, WrappedSignatureKey, + WrappedSignatureKey, }, BlockPayload, NodeImplementation, }, @@ -62,7 +62,6 @@ use hotshot_types::{ use rand::{rngs::StdRng, SeedableRng}; use surf_disco::Url; use tracing::{error, info, warn}; -use vbs::version::StaticVersionType; #[derive(Debug, Clone)] /// Arguments passed to the orchestrator @@ -328,22 +327,6 @@ fn calculate_num_tx_per_round( ) } -/// create a web server network from a config file + public key -/// # Panics -/// Panics if the web server config doesn't exist in `config` -fn webserver_network_from_config( - config: NetworkConfig, - pub_key: TYPES::SignatureKey, -) -> WebServerNetwork { - // Get the configuration for the web server - let WebServerConfig { - url, - wait_between_polls, - }: WebServerConfig = config.web_server_config.unwrap(); - - WebServerNetwork::create(url, wait_between_polls, pub_key, false) -} - /// Defines the behavior of a "run" of the network with a given configuration #[async_trait] pub trait RunDA< @@ -602,89 +585,6 @@ pub trait RunDA< fn get_config(&self) -> NetworkConfig; } -// WEB SERVER - -/// Represents a web server-based run -pub struct WebServerDARun { - /// the network configuration - config: NetworkConfig, - /// quorum channel - quorum_channel: WebServerNetwork, - /// data availability channel - da_channel: WebServerNetwork, -} - -#[async_trait] -impl< - TYPES: NodeType< - Transaction = TestTransaction, - BlockPayload = TestBlockPayload, - BlockHeader = TestBlockHeader, - InstanceState = TestInstanceState, - >, - NODE: NodeImplementation< - TYPES, - QuorumNetwork = WebServerNetwork, - CommitteeNetwork = WebServerNetwork, - Storage = TestStorage, - >, - NetworkVersion: StaticVersionType, - > - RunDA< - TYPES, - WebServerNetwork, - WebServerNetwork, - NODE, - > for WebServerDARun -where - ::ValidatedState: TestableState, - ::BlockPayload: TestableBlock, - Leaf: TestableLeaf, - Self: Sync, - NetworkVersion: 'static, -{ - async fn initialize_networking( - config: NetworkConfig, - _libp2p_advertise_address: Option, - ) -> WebServerDARun { - // Get our own key - let pub_key = config.config.my_own_validator_config.public_key.clone(); - - // extract values from config (for DA network) - let WebServerConfig { - url, - wait_between_polls, - }: WebServerConfig = config.clone().da_web_server_config.unwrap(); - - // create and wait for underlying network - let underlying_quorum_network = - webserver_network_from_config::(config.clone(), pub_key.clone()); - - underlying_quorum_network.wait_for_ready().await; - - let da_channel: WebServerNetwork = - WebServerNetwork::create(url.clone(), wait_between_polls, pub_key.clone(), true); - - WebServerDARun { - config, - quorum_channel: underlying_quorum_network, - da_channel, - } - } - - fn get_da_channel(&self) -> WebServerNetwork { - self.da_channel.clone() - } - - fn get_quorum_channel(&self) -> WebServerNetwork { - self.quorum_channel.clone() - } - - fn get_config(&self) -> NetworkConfig { - self.config.clone() - } -} - // Push CDN /// Represents a Push CDN-based run diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 42b878267d..a23cbc3f9e 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -42,7 +42,6 @@ futures = { workspace = true } hotshot-task = { path = "../task" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } hotshot-types = { path = "../types" } -hotshot-web-server = { version = "0.5.36", path = "../web_server", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } lru = "0.12" diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 70cd33cfa4..cbbcb8dc1e 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -28,8 +28,7 @@ use hotshot_types::{ constants::{Version01, VERSION_0_1}, message::{Message, Messages}, traits::{ - election::Membership, - network::{ConnectedNetwork, ConsensusIntentEvent}, + network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, storage::Storage, }, @@ -141,122 +140,6 @@ pub async fn add_network_event_task< task_reg.run_task(task).await; } -/// Setup polls for the given `consensus_state` -pub async fn inject_consensus_polls>( - consensus_state: &ConsensusTaskState, -) { - // Poll (forever) for the latest quorum proposal - consensus_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestProposal) - .await; - - // Poll (forever) for upgrade proposals - consensus_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForUpgradeProposal(0)) - .await; - - // Poll (forever) for upgrade votes - consensus_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForUpgradeVotes(0)) - .await; - - // See if we're in the DA committee - // This will not work for epochs (because dynamic subscription - // With the Push CDN, we are _always_ polling for latest anyway. - let is_da = consensus_state - .committee_membership - .get_whole_committee(::Time::new(0)) - .contains(&consensus_state.public_key); - - // If we are, poll for latest DA proposal. - if is_da { - consensus_state - .committee_network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestProposal) - .await; - } - - // Poll (forever) for the latest view sync certificate - consensus_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestViewSyncCertificate) - .await; - // Start polling for proposals for the first view - consensus_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForProposal(1)) - .await; - - consensus_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForDAC(1)) - .await; - - if consensus_state - .quorum_membership - .get_leader(TYPES::Time::new(1)) - == consensus_state.public_key - { - consensus_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForVotes(0)) - .await; - } -} - -/// Setup polls for the given `quorum_proposal`. -pub async fn inject_quorum_proposal_polls>( - quorum_proposal_task_state: &QuorumProposalTaskState, -) { - // Poll (forever) for the latest view sync certificate - quorum_proposal_task_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestViewSyncCertificate) - .await; -} - -/// Setup polls for the [`QuorumVoteTaskState`]. -pub async fn inject_quorum_vote_polls>( - quorum_vote_task_state: &QuorumVoteTaskState, -) { - // Poll (forever) for the latest quorum proposal - quorum_vote_task_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestProposal) - .await; - - // Start polling for proposals for the first view - quorum_vote_task_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForProposal(1)) - .await; - - quorum_vote_task_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForDAC(1)) - .await; -} - -/// Setup polls for the [`QuorumProposalRecvTaskState`]. -pub async fn inject_quorum_proposal_recv_polls>( - quorum_proposal_recv_task_state: &QuorumProposalRecvTaskState, -) { - // Poll (forever) for the latest quorum proposal - quorum_proposal_recv_task_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForLatestProposal) - .await; - - // Start polling for proposals for the first view - quorum_proposal_recv_task_state - .quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForProposal(1)) - .await; -} - /// add the consensus task pub async fn add_consensus_task>( task_reg: Arc, @@ -266,8 +149,6 @@ pub async fn add_consensus_task>( ) { let consensus_state = ConsensusTaskState::create_from(handle).await; - inject_consensus_polls(&consensus_state).await; - let task = Task::new(tx, rx, Arc::clone(&task_reg), consensus_state); task_reg.run_task(task).await; } @@ -344,7 +225,6 @@ pub async fn add_quorum_proposal_task, ) { let quorum_proposal_task_state = QuorumProposalTaskState::create_from(handle).await; - inject_quorum_proposal_polls(&quorum_proposal_task_state).await; let task = Task::new(tx, rx, Arc::clone(&task_reg), quorum_proposal_task_state); task_reg.run_task(task).await; } @@ -357,7 +237,6 @@ pub async fn add_quorum_vote_task> handle: &SystemContextHandle, ) { let quorum_vote_task_state = QuorumVoteTaskState::create_from(handle).await; - inject_quorum_vote_polls(&quorum_vote_task_state).await; let task = Task::new(tx, rx, Arc::clone(&task_reg), quorum_vote_task_state); task_reg.run_task(task).await; } @@ -370,7 +249,6 @@ pub async fn add_quorum_proposal_recv_task, ) { let quorum_proposal_recv_task_state = QuorumProposalRecvTaskState::create_from(handle).await; - inject_quorum_proposal_recv_polls(&quorum_proposal_recv_task_state).await; let task = Task::new( tx, rx, diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 84b2b9051a..64812a37fb 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -19,7 +19,6 @@ pub mod implementations { push_cdn_network::{ KeyPair, ProductionDef, PushCdnNetwork, TestingDef, WrappedSignatureKey, }, - web_server_network::WebServerNetwork, NetworkingMetricsValue, }; } diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index 15d686fa93..d0de703de1 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -10,7 +10,6 @@ pub mod libp2p_network; pub mod memory_network; /// The Push CDN network pub mod push_cdn_network; -pub mod web_server_network; use std::{ collections::HashMap, sync::{Arc, Mutex}, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 73019b2ccf..25a2eba845 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -32,9 +32,7 @@ use hotshot_types::{ data::ViewNumber, message::{GeneralConsensusMessage, Message, MessageKind, SequencingMessage}, traits::{ - network::{ - ConnectedNetwork, ConsensusIntentEvent, ResponseChannel, ResponseMessage, ViewMessage, - }, + network::{ConnectedNetwork, ResponseChannel, ResponseMessage, ViewMessage}, node_implementation::{ConsensusTime, NodeType}, }, BoxSyncFuture, @@ -468,18 +466,10 @@ impl ConnectedNetwork, TYPES::SignatureKey> self.secondary().queue_node_lookup(view_number, pk).await } - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { - as ConnectedNetwork< - Message, - TYPES::SignatureKey, - >>::inject_consensus_info(self.primary(), event.clone()) - .await; - - as ConnectedNetwork,TYPES::SignatureKey>>:: - inject_consensus_info(self.secondary(), event).await; - } - - fn update_view(&self, view: u64) { + async fn update_view<'a, T>(&'a self, view: u64, membership: &T::Membership) + where + T: NodeType + 'a, + { let delayed_map = Arc::clone(&self.delayed_tasks); async_spawn(async move { let mut cancel_tasks = Vec::new(); @@ -500,9 +490,13 @@ impl ConnectedNetwork, TYPES::SignatureKey> } join_all(cancel_tasks).await; }); + // View changed, let's start primary again self.primary_down.store(false, Ordering::Relaxed); self.primary_fail_counter.store(0, Ordering::Relaxed); + + // Run `update_view` logic for the libp2p network + self.networks.1.update_view::(view, membership).await; } fn is_primary_down(&self) -> bool { diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 3895200a7a..4d78727512 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -39,9 +39,10 @@ use hotshot_types::{ data::ViewNumber, message::{DataMessage::DataResponse, Message, MessageKind}, traits::{ + election::Membership, network::{ - self, ConnectedNetwork, ConsensusIntentEvent, FailedToSerializeSnafu, NetworkError, - NetworkMsg, ResponseMessage, + self, ConnectedNetwork, FailedToSerializeSnafu, NetworkError, NetworkMsg, + ResponseMessage, }, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, @@ -1112,24 +1113,17 @@ impl ConnectedNetwork for Libp2p .await } - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { - match event { - ConsensusIntentEvent::PollFutureLeader(future_view, future_leader) => { - let _ = self - .queue_node_lookup(ViewNumber::new(future_view), future_leader) - .await - .map_err(|err| warn!("failed to process node lookup request: {}", err)); - } - - ConsensusIntentEvent::PollForProposal(new_view) => { - if new_view > self.inner.latest_seen_view.load(Ordering::Relaxed) { - self.inner - .latest_seen_view - .store(new_view, Ordering::Relaxed); - } - } + /// handles view update + async fn update_view<'a, TYPES>(&'a self, view: u64, membership: &TYPES::Membership) + where + TYPES: NodeType + 'a, + { + let future_view = ::Time::new(view) + LOOK_AHEAD; + let future_leader = membership.get_leader(future_view); - _ => {} - } + let _ = self + .queue_node_lookup(ViewNumber::new(*future_view), future_leader) + .await + .map_err(|err| tracing::warn!("failed to process node lookup request: {err}")); } } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index db68676bdd..7f0ebb70aa 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -37,7 +37,7 @@ use hotshot_types::{ data::ViewNumber, message::Message, traits::{ - network::{ConnectedNetwork, ConsensusIntentEvent, PushCdnNetworkError}, + network::{ConnectedNetwork, PushCdnNetworkError}, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -560,7 +560,4 @@ impl ConnectedNetwork, TYPES::SignatureKey> ) -> Result<(), UnboundedSendError>> { Ok(()) } - - /// We don't need to poll. - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} } diff --git a/hotshot/src/traits/networking/web_server_network.rs b/hotshot/src/traits/networking/web_server_network.rs deleted file mode 100644 index e9a99e21ab..0000000000 --- a/hotshot/src/traits/networking/web_server_network.rs +++ /dev/null @@ -1,1333 +0,0 @@ -//! A network implementation that connects to a web server. -//! -//! To run the web server, see the `./web_server/` folder in this repo. -//! - -use std::{ - collections::{btree_map::Entry, hash_map::DefaultHasher, BTreeMap, BTreeSet}, - hash::{Hash, Hasher}, - num::NonZeroUsize, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::Duration, -}; - -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - channel::{oneshot, unbounded, OneShotSender, UnboundedReceiver, UnboundedSender}, -}; -use async_lock::RwLock; -use async_trait::async_trait; -use derive_more::{Deref, DerefMut}; -use hotshot_types::{ - boxed_sync, - constants::{Version01, VERSION_0_1}, - message::{CommitteeConsensusMessage, Message, MessageKind, MessagePurpose, SequencingMessage}, - traits::{ - network::{ - AsyncGenerator, ConnectedNetwork, ConsensusIntentEvent, NetworkError, NetworkMsg, - NetworkReliability, TestableNetworkingImplementation, ViewMessage, - WebServerNetworkError, - }, - node_implementation::NodeType, - signature_key::SignatureKey, - }, - BoxSyncFuture, -}; -use hotshot_web_server::{self, config}; -use lru::LruCache; -use serde::{Deserialize, Serialize}; -use surf_disco::{error::ClientError, Url}; -use tracing::{debug, error, info, warn}; -use vbs::{ - version::{StaticVersionType, Version}, - BinarySerializer, Serializer, -}; - -/// convenience alias alias for the result of getting transactions from the web server -pub type TxnResult = Result>)>, ClientError>; - -/// # Note -/// -/// This function uses `DefaultHasher` instead of cryptographic hash functions like SHA-256 because of an `AsRef` requirement. -fn hash(t: &T) -> u64 { - let mut s = DefaultHasher::new(); - t.hash(&mut s); - s.finish() -} - -/// The web server network state -#[derive(Clone, Debug)] -pub struct WebServerNetwork { - /// The inner, core state of the web server network - inner: Arc>, - /// An optional shutdown signal. This is only used when this connection is created through the `TestableNetworkingImplementation` API. - server_shutdown_signal: Option>>, -} - -impl WebServerNetwork { - /// Post a message to the web server and return the result - async fn post_message_to_web_server( - &self, - message: SendMsg>, - ) -> Result<(), NetworkError> { - // Note: it should be possible to get the version of Message and choose client_initial or (if available) - // client_new_ver based on Message. But we do always know - let result: Result<(), ClientError> = self - .inner - .client - .post(&message.get_endpoint()) - .body_binary(&message.get_message()) - .unwrap() - .send() - .await; - // error!("POST message error for endpoint {} is {:?}", &message.get_endpoint(), result.clone()); - result.map_err(|_e| { - error!("{}", &message.get_endpoint()); - NetworkError::WebServer { - source: WebServerNetworkError::ClientError, - } - }) - } -} - -/// `TaskChannel` is a type alias for an unbounded sender channel that sends `ConsensusIntentEvent`s. -/// -/// This channel is used to send events to a task. The `K` type parameter is the type of the key used in the `ConsensusIntentEvent`. -/// -/// # Examples -/// -/// ```ignore -/// let (tx, _rx): (TaskChannel, _) = tokio::sync::mpsc::unbounded_channel(); -/// ``` -/// -/// # Note -/// -/// This type alias is used in the context of a `TaskMap`, where each task is represented by a `TaskChannel`. -type TaskChannel = UnboundedSender>; - -/// `TaskMap` is a wrapper around a `BTreeMap` that maps view numbers to tasks. -/// -/// Each task is represented by a `TaskChannel` that can be used to send events to the task. -/// The key `K` is a type that implements the `SignatureKey` trait. -/// -/// # Examples -/// -/// ```ignore -/// # use crate::TaskMap; -/// let mut map: TaskMap = TaskMap::default(); -/// ``` -/// -/// # Note -/// -/// This struct is `Clone`, `Deref`, and `DerefMut`, so it can be used just like a `BTreeMap`. -#[derive(Debug, Clone, Deref, DerefMut)] -struct TaskMap(BTreeMap>); - -impl Default for TaskMap { - fn default() -> Self { - Self(BTreeMap::default()) - } -} - -impl TaskMap { - /// Prunes tasks that are polling for a view less than or equal to `current_view - 2`. - /// - /// This method cancels and removes all entries in the task map that are polling for a view less than or equal to `current_view - 2`. - /// The cancellation is performed by sending a `cancel_event` to the task. - /// - /// # Arguments - /// - /// * `current_view` - The current view number. Tasks polling for a view less than or equal to `current_view - 2` will be pruned. - /// * `cancel_event_fn` - A function that takes a view number and returns a `ConsensusIntentEvent` to be sent to the task for cancellation. - /// - /// # Examples - /// - /// ```ignore - /// # use crate::TaskMap; - /// let mut map: TaskMap = TaskMap::default(); - /// map.prune_tasks(10, ConsensusIntentEvent::CancelPollForProposal(5)).await; - /// ``` - async fn prune_tasks( - &mut self, - current_view: u64, - cancel_event_fn: fn(u64) -> ConsensusIntentEvent, - ) { - let cutoff_view = current_view.saturating_sub(2); - let views_to_remove: Vec<_> = self.range(..cutoff_view).map(|(key, _)| *key).collect(); - - for view in views_to_remove { - let task = self.remove(&view); - if let Some(task) = task { - let _ = task.send(cancel_event_fn(view)).await; - } - } - } -} - -/// Represents the core of web server networking -#[derive(Debug)] -struct Inner { - /// Our own key - own_key: TYPES::SignatureKey, - /// Queue for messages - poll_queue_0_1: Arc>>>>, - /// Client is running - running: AtomicBool, - /// The web server connection is ready - connected: AtomicBool, - /// The connection to the web server - client: surf_disco::Client, - /// The duration to wait between poll attempts - wait_between_polls: Duration, - /// Whether we are connecting to a DA server - is_da: bool, - /// The last tx_index we saw from the web server - tx_index: Arc>, - /// Task map for quorum proposals. - proposal_task_map: Arc>>, - /// Task map for quorum votes. - vote_task_map: Arc>>, - /// Task map for VID disperse data - vid_disperse_task_map: Arc>>, - /// Task map for DACs. - dac_task_map: Arc>>, - /// Task map for view sync certificates. - view_sync_cert_task_map: Arc>>, - /// Task map for view sync votes. - view_sync_vote_task_map: Arc>>, - /// Task map for transactions - txn_task_map: Arc>>, - #[allow(clippy::type_complexity)] - /// A handle on the task polling for latest quorum propsal - latest_proposal_task: Arc>>>, - /// A handle on the task polling for an upgrade propsal - upgrade_proposal_task: Arc>>>, - /// A handle on the task polling for an upgrade vote - upgrade_vote_task: Arc>>>, - #[allow(clippy::type_complexity)] - /// A handle on the task polling for the latest view sync certificate - latest_view_sync_certificate_task: Arc>>>, -} - -impl Inner { - #![allow(clippy::too_many_lines)] - - /// Handle version 0.1 transactions - /// - /// * `first_tx_index` - the index of the first transaction received from the server in the latest batch. - /// * `tx_index` - the last transaction index we saw from the web server. - async fn handle_tx_0_1(&self, tx: Vec, first_tx_index: u64, tx_index: &mut u64) { - let poll_queue = &self.poll_queue_0_1; - if first_tx_index > *tx_index + 1 { - debug!( - "missed txns from {} to {}", - *tx_index + 1, - first_tx_index - 1 - ); - *tx_index = first_tx_index - 1; - } - - *tx_index += 1; - - if let Ok(Some(deserialized_message_inner)) = - Serializer::::deserialize::>>(&tx) - { - let deserialized_message = RecvMsg { - message: Some(deserialized_message_inner), - }; - poll_queue.write().await.push(deserialized_message.clone()); - } else { - async_sleep(self.wait_between_polls).await; - } - - debug!("tx index is {}", tx_index); - } - - /// Handle version 0.1 messages - /// - /// Returns `should_return` as a boolean, which is: - /// * `true` if we've seen enough this round and the `poll_web_server` function should return - /// * `false` if we want to receive further messages from the server. - #[allow(clippy::too_many_arguments)] - async fn handle_message_0_1( - &self, - message: Vec, - view_number: u64, - message_purpose: MessagePurpose, - vote_index: &mut u64, - upgrade_vote_index: &mut u64, - seen_proposals: &mut LruCache, - seen_view_sync_certificates: &mut LruCache, - ) -> bool { - let poll_queue = &self.poll_queue_0_1; - match Serializer::::deserialize::>>(&message) { - Ok(Some(deserialized_message_inner)) => { - let deserialized_message = RecvMsg { - message: Some(deserialized_message_inner), - }; - match message_purpose { - MessagePurpose::Data => { - error!("We should not receive transactions in this function"); - } - MessagePurpose::Proposal => { - let proposal = deserialized_message.clone(); - poll_queue.write().await.push(proposal); - - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - return true; - } - MessagePurpose::LatestProposal => { - let proposal = deserialized_message.clone(); - let hash = hash(&proposal); - // Only allow unseen proposals to be pushed to the queue - if seen_proposals.put(hash, ()).is_none() { - poll_queue.write().await.push(proposal); - } - - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - return true; - } - MessagePurpose::LatestViewSyncCertificate => { - let cert = deserialized_message.clone(); - let hash = hash(&cert); - if seen_view_sync_certificates.put(hash, ()).is_none() { - poll_queue.write().await.push(cert); - } - return false; - } - MessagePurpose::Vote - | MessagePurpose::ViewSyncVote - | MessagePurpose::ViewSyncCertificate => { - let vote = deserialized_message.clone(); - *vote_index += 1; - poll_queue.write().await.push(vote); - - return false; - } - MessagePurpose::UpgradeVote => { - let vote = deserialized_message.clone(); - *upgrade_vote_index += 1; - poll_queue.write().await.push(vote); - - return false; - } - MessagePurpose::DAC => { - debug!( - "Received DAC from web server for view {} {}", - view_number, self.is_da - ); - poll_queue.write().await.push(deserialized_message.clone()); - - // Only pushing the first proposal since we will soon only be allowing 1 proposal per view - // return if we found a DAC, since there will only be 1 per view - // In future we should check to make sure DAC is valid - return true; - } - MessagePurpose::VidDisperse => { - let RecvMsg { - message: Some(message), - } = deserialized_message.clone() - else { - return false; - }; - let Message { - sender: _, - kind: - MessageKind::Consensus(SequencingMessage::Committee( - CommitteeConsensusMessage::VidDisperseMsg(vid), - )), - } = message - else { - return false; - }; - if vid.data.recipient_key != self.own_key { - // error!("Key {:?} does not match ours for VID", vid.data.recipient_key); - return false; - } - self.poll_queue_0_1 - .write() - .await - .push(deserialized_message.clone()); - return true; - } - - MessagePurpose::Internal => { - error!("Received internal message in web server network"); - - return false; - } - - MessagePurpose::UpgradeProposal => { - poll_queue.write().await.push(deserialized_message.clone()); - - return true; - } - } - } - Ok(None) | Err(_) => {} - } - false - } - - /// Poll a web server. - async fn poll_web_server( - &self, - receiver: UnboundedReceiver>, - message_purpose: MessagePurpose, - view_number: u64, - additional_wait: Duration, - ) -> Result<(), NetworkError> { - let mut vote_index = 0; - let mut tx_index = 0; - let mut upgrade_vote_index = 0; - let mut seen_proposals = LruCache::new(NonZeroUsize::new(100).unwrap()); - let mut seen_view_sync_certificates = LruCache::new(NonZeroUsize::new(100).unwrap()); - - if message_purpose == MessagePurpose::Data { - tx_index = *self.tx_index.read().await; - debug!("Previous tx index was {}", tx_index); - }; - - while self.running.load(Ordering::Relaxed) { - async_sleep(additional_wait).await; - - let endpoint = match message_purpose { - MessagePurpose::Proposal => config::get_proposal_route(view_number), - MessagePurpose::LatestProposal => config::get_latest_proposal_route(), - MessagePurpose::LatestViewSyncCertificate => { - config::get_latest_view_sync_certificate_route() - } - MessagePurpose::Vote => config::get_vote_route(view_number, vote_index), - MessagePurpose::Data => config::get_transactions_route(tx_index), - MessagePurpose::Internal => unimplemented!(), - MessagePurpose::ViewSyncCertificate => { - config::get_view_sync_certificate_route(view_number, vote_index) - } - MessagePurpose::ViewSyncVote => { - config::get_view_sync_vote_route(view_number, vote_index) - } - MessagePurpose::DAC => config::get_da_certificate_route(view_number), - MessagePurpose::VidDisperse => config::get_vid_disperse_route(view_number), // like `Proposal` - MessagePurpose::UpgradeProposal => config::get_upgrade_proposal_route(0), - MessagePurpose::UpgradeVote => { - config::get_upgrade_vote_route(0, upgrade_vote_index) - } - }; - - if let MessagePurpose::Data = message_purpose { - // Note: this should also be polling on client_ - let possible_message: TxnResult = self.client.get(&endpoint).send().await; - // Deserialize and process transactions from the server. - // If something goes wrong at any point, we sleep for wait_between_polls - // then try again next time. - if let Ok(Some((first_tx_index, txs))) = possible_message { - for tx_raw in txs { - let tx_version = Version::deserialize(&tx_raw); - - match tx_version { - Ok((VERSION_0_1, _)) => { - self.handle_tx_0_1(tx_raw, first_tx_index, &mut tx_index) - .await; - } - Ok((version, _)) => { - warn!( - "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", - version, - tx_raw - ); - } - Err(e) => { - warn!( - "Error {:?}, could not read version number.\n\nPayload:\n\n{:?}", - e, tx_raw - ); - } - } - } - } else { - async_sleep(self.wait_between_polls + additional_wait).await; - } - } else { - let possible_message: Result>>, ClientError> = - self.client.get(&endpoint).send().await; - - if let Ok(Some(messages)) = possible_message { - for message in messages { - let message_version = Version::deserialize(&message); - - let should_return; - - match message_version { - Ok((VERSION_0_1, _)) => { - should_return = self - .handle_message_0_1( - message, - view_number, - message_purpose, - &mut vote_index, - &mut upgrade_vote_index, - &mut seen_proposals, - &mut seen_view_sync_certificates, - ) - .await; - - if should_return { - return Ok(()); - } - } - Ok((version, _)) => { - warn!( - "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", version, message); - } - Err(e) => { - warn!("Error {:?}, could not read version number.\n\nPayload:\n\n{:?}", e, message); - } - } - } - } else { - async_sleep(self.wait_between_polls).await; - } - } - - if let Ok(event) = receiver.try_recv() { - match event { - // TODO ED Should add extra error checking here to make sure we are intending to cancel a task - ConsensusIntentEvent::CancelPollForVotes(event_view) - | ConsensusIntentEvent::CancelPollForProposal(event_view) - | ConsensusIntentEvent::CancelPollForDAC(event_view) - | ConsensusIntentEvent::CancelPollForViewSyncCertificate(event_view) - | ConsensusIntentEvent::CancelPollForVIDDisperse(event_view) - | ConsensusIntentEvent::CancelPollForLatestProposal(event_view) - | ConsensusIntentEvent::CancelPollForLatestViewSyncCertificate(event_view) - | ConsensusIntentEvent::CancelPollForViewSyncVotes(event_view) => { - if view_number == event_view { - debug!("Shutting down polling task for view {}", event_view); - return Ok(()); - } - } - ConsensusIntentEvent::CancelPollForTransactions(event_view) => { - // Write the most recent tx index so we can pick up where we left off later - - let mut lock = self.tx_index.write().await; - *lock = tx_index; - - if view_number == event_view { - debug!("Shutting down polling task for view {}", event_view); - return Ok(()); - } - } - - _ => { - unimplemented!() - } - } - } - } - Err(NetworkError::ShutDown) - } -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -#[serde(bound(deserialize = ""))] -/// A message being sent to the web server -pub struct SendMsg { - /// The optional message, or body, to send - message: Option, - /// The endpoint to send the message to - endpoint: String, -} - -/// A message being received from the web server -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash)] -#[serde(bound(deserialize = ""))] -pub struct RecvMsg { - /// The optional message being received - message: Option, -} - -/// Trait for messages being sent to the web server -pub trait SendMsgTrait { - /// Returns the endpoint to send the message to - fn get_endpoint(&self) -> String; - /// Returns the actual message being sent - fn get_message(&self) -> Option; -} - -/// Trait for messages being received from the web server -pub trait RecvMsgTrait { - /// Returns the actual message being received - fn get_message(&self) -> Option; -} - -impl SendMsgTrait for SendMsg { - fn get_endpoint(&self) -> String { - self.endpoint.clone() - } - - fn get_message(&self) -> Option { - self.message.clone() - } -} - -impl RecvMsgTrait for RecvMsg { - fn get_message(&self) -> Option { - self.message.clone() - } -} - -impl NetworkMsg for SendMsg {} -impl NetworkMsg for RecvMsg {} - -impl - WebServerNetwork -{ - /// Creates a new instance of the `WebServerNetwork` - /// # Panics - /// if the web server url is malformed - pub fn create( - url: Url, - wait_between_polls: Duration, - key: TYPES::SignatureKey, - is_da_server: bool, - ) -> Self { - info!("Connecting to web server at {url:?} is da: {is_da_server}"); - - // TODO ED Wait for healthcheck - let client = surf_disco::Client::::new(url); - - let inner = Arc::new(Inner { - poll_queue_0_1: Arc::default(), - running: AtomicBool::new(true), - connected: AtomicBool::new(false), - client, - wait_between_polls, - own_key: key, - is_da: is_da_server, - tx_index: Arc::default(), - proposal_task_map: Arc::default(), - vote_task_map: Arc::default(), - vid_disperse_task_map: Arc::default(), - dac_task_map: Arc::default(), - view_sync_cert_task_map: Arc::default(), - view_sync_vote_task_map: Arc::default(), - txn_task_map: Arc::default(), - latest_proposal_task: Arc::default(), - upgrade_proposal_task: Arc::default(), - upgrade_vote_task: Arc::default(), - latest_view_sync_certificate_task: Arc::default(), - }); - - inner.connected.store(true, Ordering::Relaxed); - - Self { - inner, - server_shutdown_signal: None, - } - } - - /// Parses a message to find the appropriate endpoint - /// Returns a `SendMsg` containing the endpoint - fn parse_post_message( - message: Message, - ) -> Result>, WebServerNetworkError> { - let view_number: TYPES::Time = message.get_view_number(); - - let endpoint = match &message.purpose() { - MessagePurpose::Proposal => config::post_proposal_route(*view_number), - MessagePurpose::Vote => config::post_vote_route(*view_number), - MessagePurpose::Data => config::post_transactions_route(), - MessagePurpose::Internal - | MessagePurpose::LatestProposal - | MessagePurpose::LatestViewSyncCertificate => { - return Err(WebServerNetworkError::EndpointError) - } - MessagePurpose::ViewSyncCertificate => { - // error!("Posting view sync proposal route is: {}", config::post_view_sync_certificate_route(*view_number)); - config::post_view_sync_certificate_route(*view_number) - } - MessagePurpose::ViewSyncVote => config::post_view_sync_vote_route(*view_number), - MessagePurpose::DAC => config::post_da_certificate_route(*view_number), - MessagePurpose::VidDisperse => config::post_vid_disperse_route(*view_number), - MessagePurpose::UpgradeProposal => config::post_upgrade_proposal_route(0), - MessagePurpose::UpgradeVote => config::post_upgrade_vote_route(0), - }; - - let network_msg: SendMsg> = SendMsg { - message: Some(message), - endpoint, - }; - Ok(network_msg) - } - - /// Generates a single webserver network, for use in tests - fn single_generator( - expected_node_count: usize, - _num_bootstrap: usize, - _network_id: usize, - _da_committee_size: usize, - is_da: bool, - _reliability_config: &Option>, - ) -> Box Self + 'static> { - let (server_shutdown_sender, server_shutdown) = oneshot(); - let sender = Arc::new(server_shutdown_sender); - - // pick random, unused port - let port = portpicker::pick_unused_port().expect("Could not find an open port"); - - let url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); - info!("Launching web server on port {port}"); - // Start web server - async_spawn(async { - match hotshot_web_server::run_web_server::( - Some(server_shutdown), - url, - NetworkVersion::instance(), - ) - .await - { - Ok(()) => error!("Web server future finished unexpectedly"), - Err(e) => error!("Web server task failed: {e}"), - } - }); - - // We assign known_nodes' public key and stake value rather than read from config file since it's a test - let known_nodes = (0..expected_node_count as u64) - .map(|id| { - TYPES::SignatureKey::from_private( - &TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], id).1, - ) - }) - .collect::>(); - - // Start each node's web server client - Box::new(move |id| { - let sender = Arc::clone(&sender); - let url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); - let mut network = WebServerNetwork::create( - url, - Duration::from_millis(100), - known_nodes[usize::try_from(id).unwrap()].clone(), - is_da, - ); - network.server_shutdown_signal = Some(sender); - network - }) - } -} - -#[async_trait] -impl - ConnectedNetwork, TYPES::SignatureKey> - for WebServerNetwork -{ - /// Blocks until the network is successfully initialized - async fn wait_for_ready(&self) { - while !self.inner.connected.load(Ordering::Relaxed) { - async_sleep(Duration::from_secs(1)).await; - } - } - fn pause(&self) { - error!("Pausing CDN network"); - self.inner.running.store(false, Ordering::Relaxed); - } - - fn resume(&self) { - error!("Resuming CDN network"); - self.inner.running.store(true, Ordering::Relaxed); - } - - /// Blocks until the network is shut down - /// then returns true - fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> - where - 'a: 'b, - Self: 'b, - { - let closure = async move { - // Cancel poll for latest proposal on shutdown - if let Some(ref sender) = *self.inner.latest_proposal_task.read().await { - let _ = sender - .send(ConsensusIntentEvent::CancelPollForLatestProposal(1)) - .await; - }; - - // Cancel poll for latest view sync certificate on shutdown - if let Some(ref sender) = *self.inner.latest_view_sync_certificate_task.read().await { - let _ = sender - .send(ConsensusIntentEvent::CancelPollForLatestViewSyncCertificate(1)) - .await; - }; - self.inner.running.store(false, Ordering::Relaxed); - }; - boxed_sync(closure) - } - - /// broadcast message to some subset of nodes - /// blocking - async fn broadcast_message( - &self, - message: Message, - _recipients: BTreeSet, - _: VER, - ) -> Result<(), NetworkError> { - // short circuit if we are shut down - #[cfg(feature = "hotshot-testing")] - if !self.inner.running.load(Ordering::Relaxed) { - return Err(NetworkError::ShutDown); - } - - let network_msg = Self::parse_post_message(message); - match network_msg { - Ok(network_msg) => self.post_message_to_web_server(network_msg).await, - Err(network_msg) => Err(NetworkError::WebServer { - source: network_msg, - }), - } - } - - /// broadcast a message only to a DA committee - /// blocking - async fn da_broadcast_message( - &self, - message: Message, - recipients: BTreeSet, - bind_version: VER, - ) -> Result<(), NetworkError> { - self.broadcast_message(message, recipients, bind_version) - .await - } - - /// Sends a direct message to a specific node - /// blocking - async fn direct_message( - &self, - message: Message, - _recipient: TYPES::SignatureKey, - _: VER, - ) -> Result<(), NetworkError> { - // short circuit if we are shut down - #[cfg(feature = "hotshot-testing")] - if !self.inner.running.load(Ordering::Relaxed) { - return Err(NetworkError::ShutDown); - } - let network_msg = Self::parse_post_message(message); - match network_msg { - Ok(network_msg) => { - // error!("network msg is {:?}", network_msg.clone()); - - self.post_message_to_web_server(network_msg).await - } - Err(network_msg) => Err(NetworkError::WebServer { - source: network_msg, - }), - } - } - - /// Receive one or many messages from the underlying network. - /// - /// # Errors - /// Does not error - async fn recv_msgs(&self) -> Result>, NetworkError> { - let mut queue = self.inner.poll_queue_0_1.write().await; - Ok(queue - .drain(..) - .collect::>() - .iter() - .map(|x| x.get_message().expect("failed to clone message")) - .collect()) - } - - #[allow(clippy::too_many_lines)] - async fn inject_consensus_info(&self, event: ConsensusIntentEvent) { - #[cfg(feature = "hotshot-testing")] - if !self.inner.running.load(Ordering::Relaxed) { - return; - } - - debug!( - "Injecting event: {:?} is da {}", - event.clone(), - self.inner.is_da - ); - - // TODO ED Need to handle canceling tasks that don't receive their expected output (such a proposal that never comes) - match event { - ConsensusIntentEvent::PollForProposal(view_number) => { - // Check if we already have a task for this (we shouldn't) - - // Going to do a write lock since mostly likely we will need it - can change to upgradable read in the future - let mut task_map = self.inner.proposal_task_map.write().await; - if let Entry::Vacant(e) = task_map.entry(view_number) { - // create new task - let (sender, receiver) = unbounded(); - e.insert(sender); - - async_spawn({ - let inner_clone = Arc::clone(&self.inner); - async move { - if let Err(e) = inner_clone - .poll_web_server( - receiver, - MessagePurpose::Proposal, - view_number, - Duration::ZERO, - ) - .await - { - warn!( - "Background receive proposal polling encountered an error: {:?}", - e - ); - } - } - }); - } else { - debug!("Somehow task already existed!"); - } - - // Cancel old, stale tasks - task_map - .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForProposal) - .await; - } - ConsensusIntentEvent::PollForVIDDisperse(view_number) => { - // Check if we already have a task for this (we shouldn't) - - // Going to do a write lock since mostly likely we will need it - can change to upgradable read in the future - let mut task_map = self.inner.vid_disperse_task_map.write().await; - if let Entry::Vacant(e) = task_map.entry(view_number) { - // create new task - let (sender, receiver) = unbounded(); - e.insert(sender); - - async_spawn({ - let inner_clone = Arc::clone(&self.inner); - async move { - if let Err(e) = inner_clone - .poll_web_server( - receiver, - MessagePurpose::VidDisperse, - view_number, - Duration::ZERO, - ) - .await - { - warn!( - "Background receive VID disperse polling encountered an error: {:?}", - e - ); - } - } - }); - } else { - debug!("Somehow task already existed!"); - } - - // Cancel old, stale tasks - if view_number > 2 { - task_map - .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForVIDDisperse) - .await; - } - } - ConsensusIntentEvent::PollForLatestProposal => { - // Only start this task if we haven't already started it. - let mut cancel_handle = self.inner.latest_proposal_task.write().await; - if cancel_handle.is_none() { - let inner = Arc::clone(&self.inner); - - // Create sender and receiver for cancelling the task - let (sender, receiver) = unbounded(); - *cancel_handle = Some(sender); - - // Create the new task - async_spawn(async move { - if let Err(e) = inner - .poll_web_server( - receiver, - MessagePurpose::LatestProposal, - 1, - Duration::from_millis(500), - ) - .await - { - warn!( - "Background receive latest quorum proposal polling encountered an error: {:?}", - e - ); - } - }); - } - } - ConsensusIntentEvent::PollForUpgradeProposal(view_number) => { - // Only start this task if we haven't already started it. - let mut cancel_handle = self.inner.upgrade_proposal_task.write().await; - if cancel_handle.is_none() { - error!("Starting poll for upgrade proposals!"); - let inner = Arc::clone(&self.inner); - - // Create sender and receiver for cancelling the task - let (sender, receiver) = unbounded(); - *cancel_handle = Some(sender); - - // Create the new task - async_spawn(async move { - if let Err(e) = inner - .poll_web_server( - receiver, - MessagePurpose::UpgradeProposal, - view_number, - Duration::from_millis(500), - ) - .await - { - warn!( - "Background receive latest upgrade proposal polling encountered an error: {:?}", - e - ); - } - }); - } - } - ConsensusIntentEvent::PollForUpgradeVotes(view_number) => { - // Only start this task if we haven't already started it. - let mut cancel_handle = self.inner.upgrade_vote_task.write().await; - if cancel_handle.is_none() { - debug!("Starting poll for upgrade proposals!"); - let inner = Arc::clone(&self.inner); - - // Create sender and receiver for cancelling the task - let (sender, receiver) = unbounded(); - *cancel_handle = Some(sender); - - // Create the new task - async_spawn(async move { - if let Err(e) = inner - .poll_web_server( - receiver, - MessagePurpose::UpgradeVote, - view_number, - Duration::from_millis(500), - ) - .await - { - warn!( - "Background receive latest upgrade proposal polling encountered an error: {:?}", - e - ); - } - }); - } - } - ConsensusIntentEvent::PollForLatestViewSyncCertificate => { - // Only start this task if we haven't already started it. - let mut cancel_handle = self.inner.latest_view_sync_certificate_task.write().await; - if cancel_handle.is_none() { - let inner = Arc::clone(&self.inner); - - // Create sender and receiver for cancelling the task - let (sender, receiver) = unbounded(); - *cancel_handle = Some(sender); - - // Create the new task - async_spawn(async move { - if let Err(e) = inner - .poll_web_server( - receiver, - MessagePurpose::LatestViewSyncCertificate, - 1, - Duration::from_millis(500), - ) - .await - { - warn!( - "Background receive latest view sync certificate polling encountered an error: {:?}", - e - ); - } - }); - } - } - ConsensusIntentEvent::PollForVotes(view_number) => { - let mut task_map = self.inner.vote_task_map.write().await; - if let Entry::Vacant(e) = task_map.entry(view_number) { - // create new task - let (sender, receiver) = unbounded(); - e.insert(sender); - async_spawn({ - let inner_clone = Arc::clone(&self.inner); - async move { - if let Err(e) = inner_clone - .poll_web_server( - receiver, - MessagePurpose::Vote, - view_number, - Duration::ZERO, - ) - .await - { - warn!( - "Background receive proposal polling encountered an error: {:?}", - e - ); - } - } - }); - } else { - debug!("Somehow task already existed!"); - } - - // Cancel old, stale tasks - task_map - .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForVotes) - .await; - } - - ConsensusIntentEvent::PollForDAC(view_number) => { - let mut task_map = self.inner.dac_task_map.write().await; - if let Entry::Vacant(e) = task_map.entry(view_number) { - // create new task - let (sender, receiver) = unbounded(); - e.insert(sender); - async_spawn({ - let inner_clone = Arc::clone(&self.inner); - async move { - if let Err(e) = inner_clone - .poll_web_server( - receiver, - MessagePurpose::DAC, - view_number, - Duration::ZERO, - ) - .await - { - warn!( - "Background receive proposal polling encountered an error: {:?}", - e - ); - } - } - }); - } else { - debug!("Somehow task already existed!"); - } - - // Cancel old, stale tasks - task_map - .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForDAC) - .await; - } - - ConsensusIntentEvent::CancelPollForVotes(view_number) => { - let mut task_map = self.inner.vote_task_map.write().await; - - if let Some((_, sender)) = task_map.remove_entry(&(view_number)) { - // Send task cancel message to old task - - // If task already exited we expect an error - let _res = sender - .send(ConsensusIntentEvent::CancelPollForVotes(view_number)) - .await; - } - } - - ConsensusIntentEvent::PollForViewSyncCertificate(view_number) => { - let mut task_map = self.inner.view_sync_cert_task_map.write().await; - if let Entry::Vacant(e) = task_map.entry(view_number) { - // create new task - let (sender, receiver) = unbounded(); - e.insert(sender); - async_spawn({ - let inner_clone = Arc::clone(&self.inner); - async move { - if let Err(e) = inner_clone - .poll_web_server( - receiver, - MessagePurpose::ViewSyncCertificate, - view_number, - Duration::ZERO, - ) - .await - { - warn!( - "Background receive proposal polling encountered an error: {:?}", - e - ); - } - } - }); - } else { - debug!("Somehow task already existed!"); - } - - // Cancel old, stale tasks - task_map - .prune_tasks( - view_number, - ConsensusIntentEvent::CancelPollForViewSyncCertificate, - ) - .await; - } - ConsensusIntentEvent::PollForViewSyncVotes(view_number) => { - let mut task_map = self.inner.view_sync_vote_task_map.write().await; - if let Entry::Vacant(e) = task_map.entry(view_number) { - // create new task - let (sender, receiver) = unbounded(); - e.insert(sender); - async_spawn({ - let inner_clone = Arc::clone(&self.inner); - async move { - if let Err(e) = inner_clone - .poll_web_server( - receiver, - MessagePurpose::ViewSyncVote, - view_number, - Duration::ZERO, - ) - .await - { - warn!( - "Background receive proposal polling encountered an error: {:?}", - e - ); - } - } - }); - } else { - debug!("Somehow task already existed!"); - } - - // Cancel old, stale tasks - task_map - .prune_tasks( - view_number, - ConsensusIntentEvent::CancelPollForViewSyncVotes, - ) - .await; - } - - ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) => { - let mut task_map = self.inner.view_sync_cert_task_map.write().await; - - if let Some((_, sender)) = task_map.remove_entry(&(view_number)) { - // Send task cancel message to old task - - // If task already exited we expect an error - let _res = sender - .send(ConsensusIntentEvent::CancelPollForViewSyncCertificate( - view_number, - )) - .await; - } - } - ConsensusIntentEvent::CancelPollForViewSyncVotes(view_number) => { - let mut task_map = self.inner.view_sync_vote_task_map.write().await; - - if let Some((_, sender)) = task_map.remove_entry(&(view_number)) { - // Send task cancel message to old task - - // If task already exited we expect an error - let _res = sender - .send(ConsensusIntentEvent::CancelPollForViewSyncVotes( - view_number, - )) - .await; - } - } - ConsensusIntentEvent::PollForTransactions(view_number) => { - let mut task_map = self.inner.txn_task_map.write().await; - if let Entry::Vacant(e) = task_map.entry(view_number) { - // create new task - let (sender, receiver) = unbounded(); - e.insert(sender); - async_spawn({ - let inner_clone = Arc::clone(&self.inner); - async move { - if let Err(e) = inner_clone - .poll_web_server( - receiver, - MessagePurpose::Data, - view_number, - Duration::ZERO, - ) - .await - { - warn!( - "Background receive transaction polling encountered an error: {:?}", - e - ); - } - } - }); - } else { - debug!("Somehow task already existed!"); - } - - // Cancel old, stale tasks - task_map - .prune_tasks(view_number, ConsensusIntentEvent::CancelPollForTransactions) - .await; - } - ConsensusIntentEvent::CancelPollForTransactions(view_number) => { - let mut task_map = self.inner.txn_task_map.write().await; - - if let Some((_view, sender)) = task_map.remove_entry(&(view_number)) { - // Send task cancel message to old task - - // If task already exited we expect an error - let _res = sender - .send(ConsensusIntentEvent::CancelPollForTransactions(view_number)) - .await; - } else { - info!("Task map entry should have existed"); - }; - } - - _ => {} - } - } -} - -impl - TestableNetworkingImplementation for WebServerNetwork -{ - fn generator( - expected_node_count: usize, - num_bootstrap: usize, - network_id: usize, - da_committee_size: usize, - _is_da: bool, - reliability_config: Option>, - _secondary_network_delay: Duration, - ) -> AsyncGenerator<(Arc, Arc)> { - let da_gen = Self::single_generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - true, - &reliability_config, - ); - let quorum_gen = Self::single_generator( - expected_node_count, - num_bootstrap, - network_id, - da_committee_size, - false, - &reliability_config, - ); - // Start each node's web server client - Box::pin(move |id| { - let da_gen = da_gen(id); - let quorum_gen = quorum_gen(id); - Box::pin(async move { (quorum_gen.into(), da_gen.into()) }) - }) - } - - fn in_flight_message_count(&self) -> Option { - None - } -} diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 64cde880ba..d49674e856 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -25,7 +25,6 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, election::Membership, - network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, storage::Storage, @@ -485,60 +484,44 @@ impl> ConsensusTaskState } } #[cfg(not(feature = "dependency-tasks"))] - HotShotEvent::QCFormed(cert) => { - match cert { - either::Right(qc) => { - self.proposal_cert = Some(ViewChangeEvidence::Timeout(qc.clone())); - // cancel poll for votes - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - *qc.view_number, - )) - .await; - - debug!( - "Attempting to publish proposal after forming a TC for view {}", - *qc.view_number - ); + HotShotEvent::QCFormed(cert) => match cert { + either::Right(qc) => { + self.proposal_cert = Some(ViewChangeEvidence::Timeout(qc.clone())); - if let Err(e) = self - .publish_proposal(qc.view_number + 1, event_stream) - .await - { - debug!("Failed to propose; error = {e:?}"); - }; - } - either::Left(qc) => { - if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await - { - error!("Failed to store High QC of QC we formed. Error: {:?}", e); - } + debug!( + "Attempting to publish proposal after forming a TC for view {}", + *qc.view_number + ); - let mut consensus = self.consensus.write().await; - consensus.high_qc = qc.clone(); + if let Err(e) = self + .publish_proposal(qc.view_number + 1, event_stream) + .await + { + debug!("Failed to propose; error = {e:?}"); + }; + } + either::Left(qc) => { + if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await { + error!("Failed to store High QC of QC we formed. Error: {:?}", e); + } - // cancel poll for votes - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - *qc.view_number, - )) - .await; + let mut consensus = self.consensus.write().await; + consensus.high_qc = qc.clone(); - drop(consensus); - debug!( - "Attempting to publish proposal after forming a QC for view {}", - *qc.view_number - ); + drop(consensus); + debug!( + "Attempting to publish proposal after forming a QC for view {}", + *qc.view_number + ); - if let Err(e) = self - .publish_proposal(qc.view_number + 1, event_stream) - .await - { - debug!("Failed to propose; error = {e:?}"); - }; - } + if let Err(e) = self + .publish_proposal(qc.view_number + 1, event_stream) + .await + { + debug!("Failed to propose; error = {e:?}"); + }; } - } + }, HotShotEvent::UpgradeCertificateFormed(cert) => { debug!( "Upgrade certificate received for view {}!", @@ -557,14 +540,6 @@ impl> ConsensusTaskState debug!("DAC Received for view {}!", *cert.view_number); let view = cert.view_number; - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForDAC(*view)) - .await; - - self.committee_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) - .await; - self.consensus .write() .await @@ -610,12 +585,6 @@ impl> ConsensusTaskState if disperse.data.recipient_key != self.public_key { return; } - // stop polling for the received disperse after verifying it's valid - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse( - *disperse.data.view_number, - )) - .await; if self.vote_if_able(&event_stream).await { self.current_proposal = None; } @@ -626,13 +595,6 @@ impl> ConsensusTaskState let old_view_number = self.cur_view; - // Start polling for VID disperse for the new view - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForVIDDisperse( - *old_view_number + 1, - )) - .await; - // If we have a decided upgrade certificate, // we may need to upgrade the protocol version on a view change. if let Some(ref cert) = self.decided_upgrade_cert { @@ -661,12 +623,9 @@ impl> ConsensusTaskState // update the view in state to the one in the message // Publish a view change event to the application // Returns if the view does not need updating. - if let Err(e) = update_view::( - self.public_key.clone(), + if let Err(e) = update_view::( new_view, &event_stream, - Arc::clone(&self.quorum_membership), - Arc::clone(&self.quorum_network), self.timeout, Arc::clone(&self.consensus), &mut self.cur_view, @@ -704,16 +663,6 @@ impl> ConsensusTaskState return; } - // cancel poll for votes - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view)) - .await; - - // cancel poll for proposal - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal(*view)) - .await; - let Ok(vote) = TimeoutVote::create_signed_vote( TimeoutData { view }, view, @@ -809,13 +758,6 @@ impl> ConsensusTaskState self.proposal_cert = Some(ViewChangeEvidence::ViewSync(certificate.clone())); - // cancel poll for votes - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - *certificate.view_number - 1, - )) - .await; - let view = certificate.view_number; if self.quorum_membership.get_leader(view) == self.public_key { diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index 24fd60f8ac..2cc7d4c987 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -23,7 +23,6 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, election::Membership, - network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, states::ValidatedState, @@ -620,14 +619,6 @@ pub async fn handle_quorum_proposal_recv( - task_state.public_key.clone(), + if let Err(e) = update_view::( view, &event_stream, - Arc::clone(&task_state.quorum_membership), - Arc::clone(&task_state.quorum_network), task_state.timeout, Arc::clone(&task_state.consensus), &mut task_state.cur_view, diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index 8026f56632..426abbd515 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -9,12 +9,7 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; use async_std::task::JoinHandle; use hotshot_types::{ consensus::Consensus, - constants::LOOK_AHEAD, - traits::{ - election::Membership, - network::{ConnectedNetwork, ConsensusIntentEvent}, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, - }, + traits::node_implementation::{ConsensusTime, NodeType}, }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; @@ -36,13 +31,9 @@ pub(crate) const DONT_SEND_VIEW_CHANGE_EVENT: bool = false; /// /// # Errors /// Returns an [`anyhow::Error`] when the new view is not greater than the current view. -#[allow(clippy::too_many_arguments)] -pub(crate) async fn update_view>( - public_key: TYPES::SignatureKey, +pub(crate) async fn update_view( new_view: TYPES::Time, event_stream: &Sender>>, - quorum_membership: Arc, - quorum_network: Arc, timeout: u64, consensus: Arc>>, cur_view: &mut TYPES::Time, @@ -69,38 +60,13 @@ pub(crate) async fn update_view>( *cur_view = new_view; - // Poll the future leader for lookahead - let lookahead_view = new_view + LOOK_AHEAD; - if quorum_membership.get_leader(lookahead_view) != public_key { - quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollFutureLeader( - *lookahead_view, - quorum_membership.get_leader(lookahead_view), - )) - .await; - } - // The next view is just the current view + 1 let next_view = *cur_view + 1; - // Start polling for proposals for the new view - quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForProposal(*next_view)) - .await; - - quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForDAC(*next_view)) - .await; - - if quorum_membership.get_leader(next_view) == public_key { - quorum_network - .inject_consensus_info(ConsensusIntentEvent::PollForVotes(**cur_view)) - .await; - } - if send_view_change_event { broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream).await; } + // Spawn a timeout task if we did actually update view *timeout_task = Some(async_spawn({ let stream = event_stream.clone(); diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index a56198f71d..bc52039b69 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -16,7 +16,6 @@ use hotshot_types::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, - network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, storage::Storage, @@ -109,13 +108,6 @@ impl, A: ConsensusApi + // cause an overflow error. // TODO ED Come back to this - we probably don't need this, but we should also never receive a DAC where this fails, investigate block ready so it doesn't make one for the genesis block - // stop polling for the received proposal - self.da_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForProposal( - *proposal.data.view_number, - )) - .await; - if self.cur_view != TYPES::Time::genesis() && view < self.cur_view - 1 { warn!("Throwing away DA proposal that is more than one view older"); return None; @@ -266,47 +258,16 @@ impl, A: ConsensusApi + } self.cur_view = view; - // Inject view info into network - let is_da = self - .da_membership - .get_whole_committee(self.cur_view + 1) - .contains(&self.public_key); - - if is_da { - debug!("Polling for DA proposals for view {}", *self.cur_view + 1); - self.da_network - .inject_consensus_info(ConsensusIntentEvent::PollForProposal( - *self.cur_view + 1, - )) - .await; - } - if self.da_membership.get_leader(self.cur_view + 3) == self.public_key { - debug!("Polling for transactions for view {}", *self.cur_view + 3); - self.da_network - .inject_consensus_info(ConsensusIntentEvent::PollForTransactions( - *self.cur_view + 3, - )) - .await; - } - // If we are not the next leader (DA leader for this view) immediately exit if self.da_membership.get_leader(self.cur_view + 1) != self.public_key { return None; } debug!("Polling for DA votes for view {}", *self.cur_view + 1); - // Start polling for DA votes for the "next view" - self.da_network - .inject_consensus_info(ConsensusIntentEvent::PollForVotes(*self.cur_view + 1)) - .await; - return None; } HotShotEvent::BlockRecv(encoded_transactions, metadata, view, _fee) => { let view = *view; - self.da_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForTransactions(*view)) - .await; // quick hash the encoded txns with sha256 let encoded_transactions_hash = Sha256::digest(encoded_transactions); @@ -342,12 +303,6 @@ impl, A: ConsensusApi + .await; } - HotShotEvent::Timeout(view) => { - self.da_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(**view)) - .await; - } - HotShotEvent::Shutdown => { error!("Shutting down because of shutdown signal!"); return Some(HotShotTaskCompleted); diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index ec0c4b6161..dffe342a84 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -400,7 +400,9 @@ impl< } HotShotEvent::ViewChange(view) => { self.view = view; - self.channel.update_view(self.view.get_u64()); + self.channel + .update_view::(self.view.get_u64(), membership) + .await; return None; } HotShotEvent::VersionUpgrade(version) => { diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 85cfff7c40..748f2ffb02 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -16,7 +16,6 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, election::Membership, - network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, storage::Storage, @@ -521,13 +520,6 @@ impl> QuorumProposalTaskState { match cert.clone() { either::Right(timeout_cert) => { - // cancel poll for votes - self.quorum_network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes( - *timeout_cert.view_number, - )) - .await; - let view = timeout_cert.view_number + 1; self.create_dependency_task_if_new( @@ -545,14 +537,6 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState> QuorumVoteTaskState> QuorumVoteTaskState, A: ConsensusApi + } self.cur_view = view; - // Start polling for VID disperse for the new view - self.network - .inject_consensus_info(ConsensusIntentEvent::PollForVIDDisperse( - *self.cur_view + 1, - )) - .await; - // If we are not the next leader, we should exit if self.membership.get_leader(self.cur_view + 1) != self.public_key { // panic!("We are not the DA leader for view {}", *self.cur_view + 1); diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index c5ee4da88b..aca66bfa1b 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -24,7 +24,6 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, election::Membership, - network::{ConnectedNetwork, ConsensusIntentEvent}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, }, @@ -470,11 +469,6 @@ impl< return; } - // cancel poll for votes - self.network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForVotes(*view_number)) - .await; - self.num_timeouts_tracked += 1; error!( "Num timeouts tracked since last view change is {}. View {} timed out", @@ -487,38 +481,7 @@ impl< if self.num_timeouts_tracked >= 2 { error!("Starting view sync protocol for view {}", *view_number + 1); - // Start polling for view sync certificates - self.network - .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncCertificate( - *view_number + 1, - )) - .await; - self.network - .inject_consensus_info(ConsensusIntentEvent::PollForViewSyncVotes( - *view_number + 1, - )) - .await; - - // Spawn replica task - let next_view = *view_number + 1; - // Subscribe to the view after we are leader since we know we won't propose in the next view if we are leader. - let subscribe_view = if self.membership.get_leader(TYPES::Time::new(next_view)) - == self.public_key - { - next_view + 1 - } else { - next_view - }; - // Subscribe to the next view just in case there is progress being made - self.network - .inject_consensus_info(ConsensusIntentEvent::PollForProposal( - subscribe_view, - )) - .await; - self.network - .inject_consensus_info(ConsensusIntentEvent::PollForDAC(subscribe_view)) - .await; self.send_to_or_create_replica( Arc::new(HotShotEvent::ViewSyncTrigger(view_number + 1)), view_number + 1, @@ -745,20 +708,6 @@ impl, A: ConsensusApi + return Some(HotShotTaskCompleted); } - // cancel poll for votes - self.network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForViewSyncVotes( - *certificate.view_number, - )) - .await; - - // cancel poll for view sync cert - self.network - .inject_consensus_info(ConsensusIntentEvent::CancelPollForViewSyncCertificate( - *certificate.view_number, - )) - .await; - if certificate.get_data().relay > self.relay { self.relay = certificate.get_data().relay; } diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index d673780a7c..652bd74c26 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -1,12 +1,13 @@ // TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] -use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; - use std::sync::Arc; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_example_types::state_types::TestInstanceState; +use hotshot::tasks::{task_state::CreateTaskState}; +use hotshot_example_types::{ + node_types::{MemoryImpl, TestTypes}, + state_types::TestInstanceState, +}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ predicates::event::{ @@ -112,8 +113,6 @@ async fn test_consensus_task() { let consensus_state = ConsensusTaskState::::create_from(&handle).await; - inject_consensus_polls(&consensus_state).await; - run_test_script(vec![view_1, view_2], consensus_state).await; } @@ -122,7 +121,7 @@ async fn test_consensus_task() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote() { - use hotshot::tasks::{inject_consensus_polls, task_state::CreateTaskState}; + use hotshot::tasks::task_state::CreateTaskState; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ script::{run_test_script, TestScriptStage}, @@ -171,7 +170,6 @@ async fn test_consensus_vote() { let consensus_state = ConsensusTaskState::::create_from(&handle).await; - inject_consensus_polls(&consensus_state).await; run_test_script(vec![view_1], consensus_state).await; } @@ -239,7 +237,6 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let consensus_state = ConsensusTaskState::::create_from(&handle).await; - inject_consensus_polls(&consensus_state).await; run_test_script(vec![view_1, view_2], consensus_state).await; } @@ -394,7 +391,6 @@ async fn test_view_sync_finalize_propose() { let stages = vec![view_1, view_2_3, view_4]; - inject_consensus_polls(&consensus_state).await; run_test_script(stages, consensus_state).await; } @@ -492,7 +488,6 @@ async fn test_view_sync_finalize_vote() { let stages = vec![view_1, view_2, view_3]; - inject_consensus_polls(&consensus_state).await; run_test_script(stages, consensus_state).await; } @@ -600,7 +595,6 @@ async fn test_view_sync_finalize_vote_fail_view_number() { let stages = vec![view_1, view_2, view_3]; - inject_consensus_polls(&consensus_state).await; run_test_script(stages, consensus_state).await; } @@ -652,7 +646,5 @@ async fn test_vid_disperse_storage_failure() { let consensus_state = ConsensusTaskState::::create_from(&handle).await; - inject_consensus_polls(&consensus_state).await; - run_test_script(vec![view_1], consensus_state).await; } diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index a79f002c50..3034aef250 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,9 +1,10 @@ -use hotshot::tasks::{inject_quorum_proposal_polls, task_state::CreateTaskState}; - -use hotshot_example_types::state_types::TestInstanceState; use std::sync::Arc; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot::tasks::{task_state::CreateTaskState}; +use hotshot_example_types::{ + node_types::{MemoryImpl, TestTypes}, + state_types::{TestInstanceState, }, +}; use hotshot_task_impls::{events::HotShotEvent::*, quorum_proposal::QuorumProposalTaskState}; use hotshot_testing::{ predicates::event::quorum_proposal_send, @@ -87,7 +88,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - inject_quorum_proposal_polls(&quorum_proposal_task_state).await; let script = vec![view]; run_test_script(script, quorum_proposal_task_state).await; @@ -178,7 +178,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - inject_quorum_proposal_polls(&quorum_proposal_task_state).await; let script = vec![view]; run_test_script(script, quorum_proposal_task_state).await; @@ -245,7 +244,6 @@ async fn test_quorum_proposal_task_qc_timeout() { let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - inject_quorum_proposal_polls(&quorum_proposal_task_state).await; let script = vec![view_2]; run_test_script(script, quorum_proposal_task_state).await; @@ -317,7 +315,6 @@ async fn test_quorum_proposal_task_view_sync() { let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - inject_quorum_proposal_polls(&quorum_proposal_task_state).await; let script = vec![view_2]; run_test_script(script, quorum_proposal_task_state).await; @@ -460,7 +457,6 @@ async fn test_quorum_proposal_task_propose_now() { for stage in vec![view_qp, view_timeout, view_view_sync] { let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - inject_quorum_proposal_polls(&quorum_proposal_task_state).await; let script = vec![stage]; run_test_script(script, quorum_proposal_task_state).await; @@ -505,7 +501,6 @@ async fn test_quorum_proposal_task_with_incomplete_events() { let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - inject_quorum_proposal_polls(&quorum_proposal_task_state).await; let script = vec![view_2]; run_test_script(script, quorum_proposal_task_state).await; diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 0656474c0c..07a8c7d3b6 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -1,7 +1,7 @@ use std::time::Duration; use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -12,7 +12,7 @@ use hotshot_testing::{ }; cross_tests!( TestName: test_success, - Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], Ignore: false, Metadata: { diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index e477394601..785d2e3c66 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -2,7 +2,7 @@ #![allow(unused_imports)] use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -15,7 +15,7 @@ use hotshot_testing::{ #[cfg(not(feature = "dependency-tasks"))] cross_tests!( TestName: test_with_failures_2, - Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], Ignore: false, Metadata: { diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 75a555caf8..f3331cb24e 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use std::time::Duration; use hotshot::{ - tasks::{inject_consensus_polls, task_state::CreateTaskState}, + tasks::{task_state::CreateTaskState}, types::SystemContextHandle, }; use hotshot_example_types::{ @@ -157,8 +157,6 @@ async fn test_consensus_task_upgrade() { let consensus_state = ConsensusTaskState::::create_from(&handle).await; - inject_consensus_polls(&consensus_state).await; - run_test_script(script, consensus_state).await; } @@ -236,8 +234,6 @@ async fn test_upgrade_and_consensus_task() { upgrade_state.should_vote = |_| true; - inject_consensus_polls(&consensus_state).await; - let upgrade_vote_recvs: Vec<_> = upgrade_votes.map(UpgradeVoteRecv).collect(); let inputs = vec![ @@ -429,8 +425,6 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { upgrade_state.should_vote = |_| true; - inject_consensus_polls(&consensus_state).await; - let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_2/test_with_failures_one.rs index 0071584586..0a6448a410 100644 --- a/testing/tests/tests_2/test_with_failures_one.rs +++ b/testing/tests/tests_2/test_with_failures_one.rs @@ -1,5 +1,5 @@ use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -12,7 +12,7 @@ use hotshot_testing::{ // Test one node leaving the network. cross_tests!( TestName: test_with_failures_one, - Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], Ignore: false, Metadata: { diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index 74c36f2b20..27383cfdb1 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -1,5 +1,5 @@ use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl }, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -11,7 +11,7 @@ use hotshot_testing::{ // Test f/2 nodes leaving the network. cross_tests!( TestName: test_with_failures_half_f, - Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], Ignore: false, Metadata: { diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index a46a402e06..690269ad11 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -1,5 +1,5 @@ use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, WebImpl}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl }, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -11,7 +11,7 @@ use hotshot_testing::{ // Test f nodes leaving the network. cross_tests!( TestName: test_with_failures_f, - Impls: [MemoryImpl, WebImpl, Libp2pImpl, PushCdnImpl], + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], Ignore: false, Metadata: { diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 7c0f6de2a5..250852defc 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -3,10 +3,10 @@ #[cfg_attr(async_executor_impl = "async-std", async_std::test)] // TODO Add memory network tests after this issue is finished: // https://github.com/EspressoSystems/HotShot/issues/1790 -async fn test_timeout_web() { +async fn test_timeout() { use std::time::Duration; - use hotshot_example_types::node_types::{TestTypes, WebImpl}; + use hotshot_example_types::node_types::{TestTypes, MemoryImpl}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -53,7 +53,7 @@ async fn test_timeout_web() { // TODO ED Test with memory network once issue is resolved // https://github.com/EspressoSystems/HotShot/issues/1790 metadata - .gen_launcher::(0) + .gen_launcher::(0) .launch() .run_test::() .await; diff --git a/testing/tests/tests_5/web_server.rs b/testing/tests/tests_5/web_server.rs deleted file mode 100644 index 99ec3ff64b..0000000000 --- a/testing/tests/tests_5/web_server.rs +++ /dev/null @@ -1,46 +0,0 @@ -use std::time::Duration; - -use async_compatibility_layer::logging::shutdown_logging; -use hotshot_example_types::node_types::{TestTypes, WebImpl}; -use hotshot_testing::{ - block_builder::SimpleBuilderImplementation, - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - overall_safety_task::OverallSafetyPropertiesDescription, - test_builder::{TestDescription, TimingData}, -}; -use tracing::instrument; - -/// Web server network test -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn web_server_network() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription { - timing_data: TimingData { - round_start_delay: 25, - next_view_timeout: 10_000, - start_delay: 120_000, - - ..Default::default() - }, - overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 33, - num_successful_views: 35, - ..Default::default() - }, - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - ..TestDescription::default() - }; - metadata - .gen_launcher::(0) - .launch() - .run_test::() - .await; - shutdown_logging(); -} diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 570c7e660f..60a934c144 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -13,6 +13,7 @@ use futures::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::time::error::Elapsed as TimeoutError; + #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use std::{ @@ -166,83 +167,6 @@ pub enum NetworkError { errors: Vec>, }, } -#[derive(Clone, Debug)] -// Storing view number as a u64 to avoid the need TYPES generic -/// Events to poll or cancel consensus processes. -pub enum ConsensusIntentEvent { - /// Poll for votes for a particular view - PollForVotes(u64), - /// Poll for upgrade votes for a particular view - PollForUpgradeVotes(u64), - /// Poll for a proposal for a particular view - PollForProposal(u64), - /// Poll for an upgrade proposal for a particular view - PollForUpgradeProposal(u64), - /// Poll for VID disperse data for a particular view - PollForVIDDisperse(u64), - /// Poll for the most recent [quorum/da] proposal the webserver has - PollForLatestProposal, - /// Poll for the most recent view sync proposal the webserver has - PollForLatestViewSyncCertificate, - /// Poll for a DAC for a particular view - PollForDAC(u64), - /// Poll for view sync votes starting at a particular view - PollForViewSyncVotes(u64), - /// Poll for view sync proposals (certificates) for a particular view - PollForViewSyncCertificate(u64), - /// Poll for new transactions - PollForTransactions(u64), - /// Poll for future leader - PollFutureLeader(u64, K), - /// Cancel polling for votes - CancelPollForVotes(u64), - /// Cancel polling for view sync votes. - CancelPollForViewSyncVotes(u64), - /// Cancel polling for proposals. - CancelPollForProposal(u64), - /// Cancel polling for the latest proposal. - CancelPollForLatestProposal(u64), - /// Cancel polling for the latest view sync certificate - CancelPollForLatestViewSyncCertificate(u64), - /// Cancal polling for DAC. - CancelPollForDAC(u64), - /// Cancel polling for view sync certificate. - CancelPollForViewSyncCertificate(u64), - /// Cancel polling for VID disperse data - CancelPollForVIDDisperse(u64), - /// Cancel polling for transactions - CancelPollForTransactions(u64), -} - -impl ConsensusIntentEvent { - /// Get the view number of the event. - #[must_use] - pub fn view_number(&self) -> u64 { - match &self { - ConsensusIntentEvent::PollForVotes(view_number) - | ConsensusIntentEvent::PollForProposal(view_number) - | ConsensusIntentEvent::PollForUpgradeVotes(view_number) - | ConsensusIntentEvent::PollForUpgradeProposal(view_number) - | ConsensusIntentEvent::PollForDAC(view_number) - | ConsensusIntentEvent::PollForViewSyncVotes(view_number) - | ConsensusIntentEvent::CancelPollForViewSyncVotes(view_number) - | ConsensusIntentEvent::CancelPollForVotes(view_number) - | ConsensusIntentEvent::CancelPollForProposal(view_number) - | ConsensusIntentEvent::CancelPollForLatestProposal(view_number) - | ConsensusIntentEvent::CancelPollForLatestViewSyncCertificate(view_number) - | ConsensusIntentEvent::PollForVIDDisperse(view_number) - | ConsensusIntentEvent::CancelPollForVIDDisperse(view_number) - | ConsensusIntentEvent::CancelPollForDAC(view_number) - | ConsensusIntentEvent::CancelPollForViewSyncCertificate(view_number) - | ConsensusIntentEvent::PollForViewSyncCertificate(view_number) - | ConsensusIntentEvent::PollForTransactions(view_number) - | ConsensusIntentEvent::CancelPollForTransactions(view_number) - | ConsensusIntentEvent::PollFutureLeader(view_number, _) => *view_number, - ConsensusIntentEvent::PollForLatestProposal - | ConsensusIntentEvent::PollForLatestViewSyncCertificate => 1, - } - } -} /// common traits we would like our network messages to implement pub trait NetworkMsg: @@ -303,11 +227,11 @@ pub enum ResponseMessage { Denied, } +#[async_trait] /// represents a networking implmentration /// exposes low level API for interacting with a network /// intended to be implemented for libp2p, the centralized server, /// and memory network -#[async_trait] pub trait ConnectedNetwork: Clone + Send + Sync + 'static { @@ -420,13 +344,12 @@ pub trait ConnectedNetwork: Ok(()) } - /// Injects consensus data such as view number into the networking implementation - /// blocking - /// Ideally we would pass in the `Time` type, but that requires making the entire trait generic over NodeType - async fn inject_consensus_info(&self, _event: ConsensusIntentEvent) {} - /// handles view update - fn update_view(&self, _view: u64) {} + async fn update_view<'a, TYPES>(&'a self, _view: u64, _membership: &TYPES::Membership) + where + TYPES: NodeType + 'a, + { + } /// Is primary network down? Makes sense only for combined network fn is_primary_down(&self) -> bool { diff --git a/web_server/Cargo.toml b/web_server/Cargo.toml deleted file mode 100644 index 29213c9b67..0000000000 --- a/web_server/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "hotshot-web-server" -description = "HotShot web server" -version = { workspace = true } -readme = "README.md" -edition = { workspace = true } - -[dependencies] -async-compatibility-layer = { workspace = true } -async-lock = { workspace = true } -clap.workspace = true -futures = { workspace = true } -hotshot-types = { path = "../types" } -tide-disco = { workspace = true } -tracing = { workspace = true } -rand = { workspace = true } -toml = { workspace = true } -vbs = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] -tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } - -[lints] -workspace = true diff --git a/web_server/README.md b/web_server/README.md deleted file mode 100644 index 3d616c1bc6..0000000000 --- a/web_server/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Web Server - -This crate implements a web server that HotShot can use to increase its throughput. The web server is designed to be run behind several Nginx instances for high performance. - -The run the web server: `cargo run --features=full-ci --example web_server`. -This will start the web server at `0.0.0.0:9000` - -## How it works -Unlike the other networking implementations in `HotShot` that use a *pushing* paradigm over a TCP connection, the web server uses a *pulling* paradigm over HTTP. In the older centralized server, for example, messages are pushed to nodes from the server. In contrast, in the web server nodes must poll (pull from) the server periodically to download the latest data. The direction of dataflow is reversed. This design allows us to take advantage of Nginx's powerful caching mode, which will improve performance in the large networks `HotShot` is designed for. Keep in mind that `HotShot` will also be using `Libp2p` in tandem with the web server. The web server allows high bandwidth through the network under optimistic conditions while `Libp2p` protects against centralization and censorship. - -The web server is built on [Tide Disco](https://github.com/EspressoSystems/tide-disco), an expansion on the popular [Tide](https://docs.rs/tide/0.16.0/tide/index.html) Rust web application framework. It has several endpoints specified in its `api.toml` file. These endpoints are what nodes use to either POST or GET messages. For example, a replica node will poll the web server periodically through the `proposal` endpoint to ask if there is a proposal for its current view. The server will respond with either an error if there is no proposal for that view yet, or with the proposal. It works similarly for transactions: Nodes can post transactions to the web server at any time. They can also download all the transactions they haven't received yet from the web server. - - - diff --git a/web_server/api.toml b/web_server/api.toml deleted file mode 100644 index 496331cf67..0000000000 --- a/web_server/api.toml +++ /dev/null @@ -1,210 +0,0 @@ -[meta] -NAME = "hotshot_web_server" -DESCRIPTION = "Web server for HotShot" -FORMAT_VERSION = "0.1.0" - -# GET the proposal for a view, where the view is passed as an argument -[route.getproposal] -PATH = ["proposal/:view_number"] -":view_number" = "Integer" -DOC = """ -Return the proposal for a given view number -""" - -# GET an upgrade proposal for a view, where the view is passed as an argument -[route.get_upgrade_proposal] -PATH = ["upgrade_proposal/:view_number"] -":view_number" = "Integer" -DOC = """ -Return the upgrade proposal for a given view number -""" - -# POST a VID disperse, where the view is passed as an argument -[route.getviddisperse] -PATH = ["vid_disperse/:view_number"] -":view_number" = "Integer" -DOC = """ -Return the VID disperse data for a given view number -""" - -# GET the latest quorum proposal -[route.get_latest_proposal] -PATH = ["proposal/latest"] -DOC = """ -Return the proposal for the most recent view the server has -""" - -# GET the latest quorum proposal -[route.get_latest_view_sync_certificate] -PATH = ["view_sync_certificate/latest"] -DOC = """ -Return the proposal for the most recent view the server has -""" - -# POST a proposal, where the view is passed as an argument -[route.postproposal] -PATH = ["proposal/:view_number"] -METHOD = "POST" -":view_number" = "Integer" -DOC = """ -Post the proposal for a given view_number -""" - -# POST an upgrade proposal, where the view is passed as an argument -[route.post_upgrade_proposal] -PATH = ["upgrade_proposal/:view_number"] -METHOD = "POST" -":view_number" = "Integer" -DOC = """ -Post the upgrade proposal for a given view_number -""" - -# POST a VID disperse, where the view is passed as an argument -[route.postviddisperse] -PATH = ["vid_disperse/:view_number"] -METHOD = "POST" -":view_number" = "Integer" -DOC = """ -Post the VID disperse data for a given view number -""" - -# GET the DA certificate for a view, where the view is passed as an argument -[route.getcertificate] -PATH = ["certificate/:view_number"] -":view_number" = "Integer" -DOC = """ -Return the DA certificate for a given view number -""" - - -# POST a DA certificate, where the view is passed as an argument -[route.postcertificate] -PATH = ["certificate/:view_number"] -METHOD = "POST" -":view_number" = "Integer" -DOC = """ -Post the DA certificate for a given view_number -""" - - -# GET all the votes from a given index for a given view number -[route.getvotes] -PATH = ["votes/:view_number/:index"] -":view_number" = "Integer" -":index" = "Integer" -METHOD = "GET" -DOC = """ -Get all votes for a view number -""" - -# GET all the upgrade votes from a given index for a given view number -[route.get_upgrade_votes] -PATH = ["upgrade_votes/:view_number/:index"] -":view_number" = "Integer" -":index" = "Integer" -METHOD = "GET" -DOC = """ -Get all upgrade votes for a view number -""" - - -# POST a vote, where the view number is passed as an argument -[route.postvote] -PATH = ["votes/:view_number"] -":view_number" = "Integer" -METHOD = "POST" -DOC = """ -Send a vote -""" - -# POST an upgrade vote, where the view number is passed as an argument -[route.post_upgrade_vote] -PATH = ["upgrade_votes/:view_number"] -":view_number" = "Integer" -METHOD = "POST" -DOC = """ -Send an upgrade vote -""" - - -# GET all transactions starting at :index -[route.gettransactions] -PATH = ["transactions/:index"] -":index" = "Integer" -METHOD = "GET" -DOC = """ -Get all transactions since given index -""" - - -# POST a transaction -[route.posttransaction] -PATH = ["transactions"] -METHOD = "POST" -DOC = """ -Post a transaction to the web server -""" - -# POST a transaction removal -[route.postcompletedtransaction] -PATH = ["transactionscomplet"] -METHOD = "POST" -DOC = """ -Post a transaction removal to the web server -""" - -# POST stake table -[route.poststaketable] -PATH = ["staketable"] -METHOD = "POST" -DOC = """ -Post the stake table to the web server -""" - -# POST secret proposal -[route.secret] -PATH = ["secret/:view_number/:secret"] -METHOD = "POST" -":view_number" = "Integer" -":secret" = "Literal" -DOC = """ -Secret path for leader to post proposal for a given view -""" - -# POST a view sync vote, where the view number is passed as an argument -[route.postviewsyncvote] -PATH = ["view_sync_vote/:view_number"] -":view_number" = "Integer" -METHOD = "POST" -DOC = """ -Send a view sync vote -""" - -# GET a view sync vote, where the view number is passed as an argument -[route.getviewsyncvotes] -PATH = ["view_sync_vote/:view_number/:index"] -":view_number" = "Integer" -":index" = "Integer" -METHOD = "GET" -DOC = """ -GET a view sync vote -""" - -# POST a view sync proposal, where the view number is passed as an argument -[route.postviewsynccertificate] -PATH = ["view_sync_certificate/:view_number"] -":view_number" = "Integer" -METHOD = "POST" -DOC = """ -Send a view sync vote -""" - -# GET a view sync certificate, where the view number is passed as an argument -[route.getviewsynccertificate] -PATH = ["view_sync_certificate/:view_number/:index"] -":view_number" = "Integer" -":index" = "Integer" -METHOD = "GET" -DOC = """ -GET a view sync proposal -""" diff --git a/web_server/src/config.rs b/web_server/src/config.rs deleted file mode 100644 index 7dcfd8de66..0000000000 --- a/web_server/src/config.rs +++ /dev/null @@ -1,163 +0,0 @@ -/// the default port on which to run the web server -pub const DEFAULT_WEB_SERVER_PORT: u16 = 9000; -/// the default port on which to serve Data availability functionality -pub const DEFAULT_WEB_SERVER_DA_PORT: u16 = 9001; -/// the default port on which to serve View Sync functionality -pub const DEFAULT_WEB_SERVER_VIEW_SYNC_PORT: u16 = 9002; - -/// How many views to keep in memory -pub const MAX_VIEWS: usize = 100; -/// How many transactions to keep in memory -pub const MAX_TXNS: usize = 500; -/// How many transactions to return at once -pub const TX_BATCH_SIZE: u64 = 1; - -/// get proposal -#[must_use] -pub fn get_proposal_route(view_number: u64) -> String { - format!("api/proposal/{view_number}") -} - -/// post proposal -#[must_use] -pub fn post_proposal_route(view_number: u64) -> String { - format!("api/proposal/{view_number}") -} - -/// get latest qc -#[must_use] -pub fn get_latest_proposal_route() -> String { - "api/proposal/latest".to_string() -} - -/// get latest view sync proposal -#[must_use] -pub fn get_latest_view_sync_certificate_route() -> String { - "api/view_sync_certificate/latest".to_string() -} - -/// get latest certificate -#[must_use] -pub fn get_da_certificate_route(view_number: u64) -> String { - format!("api/certificate/{view_number}") -} - -/// post data availability certificate -#[must_use] -pub fn post_da_certificate_route(view_number: u64) -> String { - format!("api/certificate/{view_number}") -} - -/// get vote -#[must_use] -pub fn get_vote_route(view_number: u64, index: u64) -> String { - format!("api/votes/{view_number}/{index}") -} - -/// post vote -#[must_use] -pub fn post_vote_route(view_number: u64) -> String { - format!("api/votes/{view_number}") -} - -/// get upgrade votes -#[must_use] -pub fn get_upgrade_vote_route(view_number: u64, index: u64) -> String { - format!("api/upgrade_votes/{view_number}/{index}") -} - -/// post vote -#[must_use] -pub fn post_upgrade_vote_route(view_number: u64) -> String { - format!("api/upgrade_votes/{view_number}") -} - -/// get vid dispersal -#[must_use] -pub fn get_vid_disperse_route(view_number: u64) -> String { - format!("api/vid_disperse/{view_number}") -} - -/// post vid dispersal -#[must_use] -pub fn post_vid_disperse_route(view_number: u64) -> String { - format!("api/vid_disperse/{view_number}") -} - -/// get upgrade proposal -#[must_use] -pub fn get_upgrade_proposal_route(view_number: u64) -> String { - format!("api/upgrade_proposal/{view_number}") -} - -/// post upgrade proposal -#[must_use] -pub fn post_upgrade_proposal_route(view_number: u64) -> String { - format!("api/upgrade_proposal/{view_number}") -} - -/// get vid vote route -#[must_use] -pub fn get_vid_vote_route(view_number: u64, index: u64) -> String { - format!("api/vid_votes/{view_number}/{index}") -} - -/// post vid vote route -#[must_use] -pub fn post_vid_vote_route(view_number: u64) -> String { - format!("api/vid_votes/{view_number}") -} - -/// get vid certificate -#[must_use] -pub fn get_vid_certificate_route(view_number: u64) -> String { - format!("api/vid_certificate/{view_number}") -} - -/// post vid certificate -#[must_use] -pub fn post_vid_certificate_route(view_number: u64) -> String { - format!("api/vid_certificate/{view_number}") -} - -/// get transactions -#[must_use] -pub fn get_transactions_route(index: u64) -> String { - format!("api/transactions/{index}") -} - -/// post transactions -#[must_use] -pub fn post_transactions_route() -> String { - "api/transactions".to_string() -} - -/// post stake table -#[must_use] -pub fn post_staketable_route() -> String { - "api/staketable".to_string() -} - -/// post view sync proposal -#[must_use] -pub fn post_view_sync_certificate_route(view_number: u64) -> String { - format!("api/view_sync_certificate/{view_number}") -} - -/// get view sync proposal -#[must_use] -pub fn get_view_sync_certificate_route(view_number: u64, index: u64) -> String { - format!("api/view_sync_certificate/{view_number}/{index}") -} - -/// post view sync vote -#[must_use] -pub fn post_view_sync_vote_route(view_number: u64) -> String { - format!("api/view_sync_vote/{view_number}") -} - -/// get view sync vote -#[must_use] -pub fn get_view_sync_vote_route(view_number: u64, index: u64) -> String { - format!("api/view_sync_vote/{view_number}/{index}") -} diff --git a/web_server/src/lib.rs b/web_server/src/lib.rs deleted file mode 100644 index 6f4b110fcd..0000000000 --- a/web_server/src/lib.rs +++ /dev/null @@ -1,1120 +0,0 @@ -//! Web server for `HotShot` - -/// Configuration for the webserver -pub mod config; - -use std::{ - collections::{BTreeMap, HashMap}, - io, - path::PathBuf, -}; - -use async_compatibility_layer::channel::OneShotReceiver; -use async_lock::RwLock; -use clap::Args; -use futures::FutureExt; -use hotshot_types::traits::signature_key::SignatureKey; -use rand::{distributions::Alphanumeric, rngs::StdRng, thread_rng, Rng, SeedableRng}; -use tide_disco::{ - api::ApiError, - error::ServerError, - method::{ReadState, WriteState}, - Api, App, StatusCode, Url, -}; -use tracing::{debug, info}; -use vbs::version::StaticVersionType; - -use crate::config::{MAX_TXNS, MAX_VIEWS, TX_BATCH_SIZE}; - -/// Convience alias for a lock over the state of the app -/// TODO this is used in two places. It might be clearer to just inline -type State = RwLock>; -/// Convience alias for errors in this crate -type Error = ServerError; - -/// State that tracks proposals and votes the server receives -/// Data is stored as a `Vec` to not incur overhead from deserializing -// TODO should the view numbers be generic over time? -struct WebServerState { - /// view number -> (secret, proposal) - proposals: BTreeMap)>, - /// view number -> (secret, proposal) - upgrade_proposals: BTreeMap)>, - /// for view sync: view number -> (relay, certificate) - view_sync_certificates: BTreeMap)>>, - /// view number -> relay - view_sync_certificate_index: HashMap, - /// view number -> (secret, da_certificates) - da_certificates: HashMap)>, - /// view for the most recent proposal to help nodes catchup - latest_proposal: u64, - /// view for the most recent view sync proposal - latest_view_sync_certificate: u64, - /// view for the oldest DA certificate - oldest_certificate: u64, - /// view number -> Vec(index, vote) - votes: HashMap)>>, - /// view number -> Vec(index, vote) - upgrade_votes: HashMap)>>, - /// view sync: view number -> Vec(relay, vote) - view_sync_votes: HashMap)>>, - /// view number -> highest vote index for that view number - vote_index: HashMap, - /// view number -> highest vote index for that view number - upgrade_vote_index: HashMap, - /// view_sync: view number -> highest vote index for that view number - view_sync_vote_index: HashMap, - /// view number of oldest votes in memory - oldest_vote: u64, - /// view number of oldest votes in memory - oldest_upgrade_vote: u64, - /// view sync: view number of oldest votes in memory - oldest_view_sync_vote: u64, - /// view number -> (secret, string) - vid_disperses: HashMap>>, - /// view for the oldest vid disperal - oldest_vid_disperse: u64, - /// view of most recent vid dispersal - recent_vid_disperse: u64, - /// votes that a node got, that is, their VID share - vid_votes: HashMap)>>, - /// oldest vid vote view number - oldest_vid_vote: u64, - /// recent_vid_vote view number - vid_certificates: HashMap)>, - /// oldest vid certificate view number - oldest_vid_certificate: u64, - /// recent_vid_certificate: u64, - vid_vote_index: HashMap, - /// index -> transaction - // TODO ED Make indexable by hash of tx - transactions: HashMap>, - /// tx hash -> tx index, is currently unused - txn_lookup: HashMap, u64>, - /// highest transaction index - num_txns: u64, - - /// shutdown signal - shutdown: Option>, - /// stake table with leader keys - stake_table: Vec, - /// prng for generating endpoint - _prng: StdRng, -} - -impl WebServerState { - /// Create new web server state - fn new() -> Self { - Self { - proposals: BTreeMap::new(), - upgrade_proposals: BTreeMap::new(), - da_certificates: HashMap::new(), - votes: HashMap::new(), - num_txns: 0, - oldest_vote: 0, - latest_proposal: 0, - latest_view_sync_certificate: 0, - oldest_certificate: 0, - shutdown: None, - stake_table: Vec::new(), - vote_index: HashMap::new(), - transactions: HashMap::new(), - txn_lookup: HashMap::new(), - _prng: StdRng::from_entropy(), - view_sync_certificates: BTreeMap::new(), - view_sync_votes: HashMap::new(), - view_sync_vote_index: HashMap::new(), - upgrade_votes: HashMap::new(), - oldest_upgrade_vote: 0, - upgrade_vote_index: HashMap::new(), - - vid_disperses: HashMap::new(), - oldest_vid_disperse: 0, - recent_vid_disperse: 0, - - vid_votes: HashMap::new(), - oldest_vid_vote: 0, - // recent_vid_vote: 0, - vid_certificates: HashMap::new(), - oldest_vid_certificate: 0, - // recent_vid_certificate: 0, - vid_vote_index: HashMap::new(), - - oldest_view_sync_vote: 0, - view_sync_certificate_index: HashMap::new(), - } - } - /// Provide a shutdown signal to the server - /// # Panics - /// Panics if already shut down - #[allow(clippy::panic)] - pub fn with_shutdown_signal(mut self, shutdown_listener: Option>) -> Self { - assert!( - self.shutdown.is_none(), - "A shutdown signal is already registered and can not be registered twice" - ); - self.shutdown = shutdown_listener; - self - } -} - -/// Trait defining methods needed for the `WebServerState` -pub trait WebServerDataSource { - /// Get proposal - /// # Errors - /// Error if unable to serve. - fn get_proposal(&self, view_number: u64) -> Result>>, Error>; - /// Get upgrade proposal - /// # Errors - /// Error if unable to serve. - fn get_upgrade_proposal(&self, view_number: u64) -> Result>>, Error>; - /// Get latest quanrum proposal - /// # Errors - /// Error if unable to serve. - fn get_latest_proposal(&self) -> Result>>, Error>; - /// Get latest view sync proposal - /// # Errors - /// Error if unable to serve. - fn get_latest_view_sync_certificate(&self) -> Result>>, Error>; - /// Get view sync proposal - /// # Errors - /// Error if unable to serve. - fn get_view_sync_certificate( - &self, - view_number: u64, - index: u64, - ) -> Result>>, Error>; - - /// Get vote - /// # Errors - /// Error if unable to serve. - fn get_votes(&self, view_number: u64, index: u64) -> Result>>, Error>; - /// Get upgrade votes - /// # Errors - /// Error if unable to serve. - fn get_upgrade_votes( - &self, - view_number: u64, - index: u64, - ) -> Result>>, Error>; - /// Get view sync votes - /// # Errors - /// Error if unable to serve. - fn get_view_sync_votes( - &self, - view_number: u64, - index: u64, - ) -> Result>>, Error>; - - #[allow(clippy::type_complexity)] - /// Get transactions - /// # Errors - /// Error if unable to serve. - fn get_transactions(&self, index: u64) -> Result>)>, Error>; - /// Get da certificate - /// # Errors - /// Error if unable to serve. - fn get_da_certificate(&self, index: u64) -> Result>>, Error>; - /// Post vote - /// # Errors - /// Error if unable to serve. - fn post_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; - /// Post upgrade vote - /// # Errors - /// Error if unable to serve. - fn post_upgrade_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; - /// Post view sync vote - /// # Errors - /// Error if unable to serve. - fn post_view_sync_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; - - /// Post proposal - /// # Errors - /// Error if unable to serve. - fn post_proposal(&mut self, view_number: u64, proposal: Vec) -> Result<(), Error>; - /// Post upgrade proposal - /// # Errors - /// Error if unable to serve. - fn post_upgrade_proposal(&mut self, view_number: u64, proposal: Vec) -> Result<(), Error>; - /// Post view sync certificate - /// # Errors - /// Error if unable to serve. - fn post_view_sync_certificate( - &mut self, - view_number: u64, - certificate: Vec, - ) -> Result<(), Error>; - - /// Post data avaiability certificate - /// # Errors - /// Error if unable to serve. - fn post_da_certificate(&mut self, view_number: u64, cert: Vec) -> Result<(), Error>; - /// Post transaction - /// # Errors - /// Error if unable to serve. - fn post_transaction(&mut self, txn: Vec) -> Result<(), Error>; - /// Post staketable - /// # Errors - /// Error if unable to serve. - fn post_staketable(&mut self, key: Vec) -> Result<(), Error>; - /// Post completed transaction - /// # Errors - /// Error if unable to serve. - fn post_completed_transaction(&mut self, block: Vec) -> Result<(), Error>; - /// Post secret proposal - /// # Errors - /// Error if unable to serve. - fn post_secret_proposal(&mut self, _view_number: u64, _proposal: Vec) -> Result<(), Error>; - /// Post proposal - /// # Errors - /// Error if unable to serve. - fn proposal(&self, view_number: u64) -> Option<(String, Vec)>; - /// Post vid disperal - /// # Errors - /// Error if unable to serve. - fn post_vid_disperse(&mut self, view_number: u64, disperse: Vec) -> Result<(), Error>; - /// Post vid vote - /// # Errors - /// Error if unable to serve. - fn post_vid_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error>; - /// post vid certificate - /// # Errors - /// Error if unable to serve. - fn post_vid_certificate(&mut self, view_number: u64, certificate: Vec) - -> Result<(), Error>; - /// Get vid dispersal - /// # Errors - /// Error if unable to serve. - fn get_vid_disperse(&self, view_number: u64) -> Result>>, Error>; - /// Get vid votes - /// # Errors - /// Error if unable to serve. - fn get_vid_votes(&self, view_number: u64, index: u64) -> Result>>, Error>; - /// Get vid certificates - /// # Errors - /// Error if unable to serve. - fn get_vid_certificate(&self, index: u64) -> Result>>, Error>; -} - -impl WebServerDataSource for WebServerState { - fn proposal(&self, view_number: u64) -> Option<(String, Vec)> { - self.proposals.get(&view_number).cloned() - } - /// Return the proposal the server has received for a particular view - fn get_proposal(&self, view_number: u64) -> Result>>, Error> { - match self.proposals.get(&view_number) { - Some(proposal) => { - if proposal.1.is_empty() { - Err(ServerError { - status: StatusCode::NotImplemented, - message: format!("Proposal empty for view {view_number}"), - }) - } else { - Ok(Some(vec![proposal.1.clone()])) - } - } - None => Err(ServerError { - status: StatusCode::NotImplemented, - message: format!("Proposal not found for view {view_number}"), - }), - } - } - /// Return the proposal the server has received for a particular view - fn get_upgrade_proposal(&self, view_number: u64) -> Result>>, Error> { - match self.upgrade_proposals.get(&view_number) { - Some(proposal) => { - if proposal.1.is_empty() { - Err(ServerError { - status: StatusCode::NotImplemented, - message: format!("Proposal empty for view {view_number}"), - }) - } else { - tracing::error!("found proposal"); - Ok(Some(vec![proposal.1.clone()])) - } - } - None => Err(ServerError { - status: StatusCode::NotImplemented, - message: format!("Proposal not found for view {view_number}"), - }), - } - } - - /// Return the VID disperse data that the server has received for a particular view - fn get_vid_disperse(&self, view_number: u64) -> Result>>, Error> { - match self.vid_disperses.get(&view_number) { - Some(disperse) => { - if disperse.is_empty() { - Err(ServerError { - status: StatusCode::NotImplemented, - message: format!("VID disperse not found for view {view_number}"), - }) - } else { - Ok(Some(disperse.clone())) - } - } - None => Err(ServerError { - status: StatusCode::NotImplemented, - message: format!("VID disperse not found for view {view_number}"), - }), - } - } - - fn get_latest_proposal(&self) -> Result>>, Error> { - self.get_proposal(self.latest_proposal) - } - - fn get_latest_view_sync_certificate(&self) -> Result>>, Error> { - self.get_view_sync_certificate(self.latest_view_sync_certificate, 0) - } - - fn get_view_sync_certificate( - &self, - view_number: u64, - index: u64, - ) -> Result>>, Error> { - let proposals = self.view_sync_certificates.get(&view_number); - let mut ret_proposals = vec![]; - if let Some(cert) = proposals { - for i in index..*self.view_sync_certificate_index.get(&view_number).unwrap() { - ret_proposals.push(cert[usize::try_from(i).unwrap()].1.clone()); - } - } - if ret_proposals.is_empty() { - Ok(None) - } else { - Ok(Some(ret_proposals)) - } - } - - /// Return all votes the server has received for a particular view from provided index to most recent - fn get_votes(&self, view_number: u64, index: u64) -> Result>>, Error> { - let votes = self.votes.get(&view_number); - let mut ret_votes = vec![]; - if let Some(votes) = votes { - for i in index..*self.vote_index.get(&view_number).unwrap() { - ret_votes.push(votes[usize::try_from(i).unwrap()].1.clone()); - } - } - if ret_votes.is_empty() { - Ok(None) - } else { - Ok(Some(ret_votes)) - } - } - - /// Return all votes the server has received for a particular view from provided index to most recent - fn get_upgrade_votes( - &self, - view_number: u64, - index: u64, - ) -> Result>>, Error> { - let votes = self.upgrade_votes.get(&view_number); - let mut ret_votes = vec![]; - if let Some(votes) = votes { - for i in index..*self.upgrade_vote_index.get(&view_number).unwrap() { - ret_votes.push(votes[usize::try_from(i).unwrap()].1.clone()); - } - } - if ret_votes.is_empty() { - Ok(None) - } else { - Ok(Some(ret_votes)) - } - } - - /// Return all VID votes the server has received for a particular view from provided index to most recent - fn get_vid_votes(&self, view_number: u64, index: u64) -> Result>>, Error> { - let vid_votes = self.vid_votes.get(&view_number); - let mut ret_votes = vec![]; - if let Some(vid_votes) = vid_votes { - for i in index..*self.vid_vote_index.get(&view_number).unwrap() { - ret_votes.push(vid_votes[usize::try_from(i).unwrap()].1.clone()); - } - } - if ret_votes.is_empty() { - Ok(None) - } else { - Ok(Some(ret_votes)) - } - } - - fn get_view_sync_votes( - &self, - view_number: u64, - index: u64, - ) -> Result>>, Error> { - let votes = self.view_sync_votes.get(&view_number); - let mut ret_votes = vec![]; - if let Some(votes) = votes { - for i in index..*self.view_sync_vote_index.get(&view_number).unwrap() { - ret_votes.push(votes[usize::try_from(i).unwrap()].1.clone()); - } - } - if ret_votes.is_empty() { - Ok(None) - } else { - Ok(Some(ret_votes)) - } - } - - #[allow(clippy::type_complexity)] - /// Return the transaction at the specified index (which will help with Nginx caching, but reduce performance otherwise) - /// In the future we will return batches of transactions - fn get_transactions(&self, index: u64) -> Result>)>, Error> { - let mut txns_to_return = vec![]; - - let lowest_in_memory_txs = if self.num_txns < MAX_TXNS.try_into().unwrap() { - 0 - } else { - usize::try_from(self.num_txns).unwrap() - MAX_TXNS - }; - - let starting_index = if (usize::try_from(index).unwrap()) < lowest_in_memory_txs { - lowest_in_memory_txs - } else { - usize::try_from(index).unwrap() - }; - - for idx in starting_index..=self.num_txns.try_into().unwrap() { - if let Some(txn) = self.transactions.get(&(idx as u64)) { - txns_to_return.push(txn.clone()); - } - if txns_to_return.len() >= usize::try_from(TX_BATCH_SIZE).unwrap() { - break; - } - } - - if txns_to_return.is_empty() { - Err(ServerError { - // TODO ED: Why does NoContent status code cause errors? - status: StatusCode::NotImplemented, - message: format!("Transaction not found for index {index}"), - }) - } else { - debug!("Returning this many txs {}", txns_to_return.len()); - //starting_index is the oldest index of the returned txns - Ok(Some((starting_index as u64, txns_to_return))) - } - } - - /// Return the da certificate the server has received for a particular view - fn get_da_certificate(&self, index: u64) -> Result>>, Error> { - match self.da_certificates.get(&index) { - Some(cert) => { - if cert.1.is_empty() { - Err(ServerError { - status: StatusCode::NotImplemented, - message: format!("DA Certificate not found for view {index}"), - }) - } else { - Ok(Some(vec![cert.1.clone()])) - } - } - None => Err(ServerError { - status: StatusCode::NotImplemented, - message: format!("Proposal not found for view {index}"), - }), - } - } - - /// Return the VID certificate the server has received for a particular view - fn get_vid_certificate(&self, index: u64) -> Result>>, Error> { - match self.vid_certificates.get(&index) { - Some(vid_cert) => { - if vid_cert.1.is_empty() { - Err(ServerError { - status: StatusCode::NotImplemented, - message: format!("VID Certificate not found for view {index}"), - }) - } else { - Ok(Some(vec![vid_cert.1.clone()])) - } - } - None => Err(ServerError { - status: StatusCode::NotImplemented, - message: format!("VID certificate not found for view {index}"), - }), - } - } - - /// Stores a received vote in the `WebServerState` - fn post_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error> { - // Only keep vote history for MAX_VIEWS number of views - if self.votes.len() >= MAX_VIEWS { - self.votes.remove(&self.oldest_vote); - while !self.votes.contains_key(&self.oldest_vote) { - self.oldest_vote += 1; - } - } - - // don't accept the vote if it is too old - if self.oldest_vote > view_number { - return Err(ServerError { - status: StatusCode::Gone, - message: "Posted vote is too old".to_string(), - }); - } - - let next_index = self.vote_index.entry(view_number).or_insert(0); - self.votes - .entry(view_number) - .and_modify(|current_votes| current_votes.push((*next_index, vote.clone()))) - .or_insert_with(|| vec![(*next_index, vote)]); - self.vote_index - .entry(view_number) - .and_modify(|index| *index += 1); - Ok(()) - } - - /// Stores a received vote in the `WebServerState` - fn post_upgrade_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error> { - // Only keep vote history for MAX_VIEWS number of views - if self.upgrade_votes.len() >= MAX_VIEWS { - self.upgrade_votes.remove(&self.oldest_upgrade_vote); - while !self.upgrade_votes.contains_key(&self.oldest_upgrade_vote) { - self.oldest_upgrade_vote += 1; - } - } - - // don't accept the vote if it is too old - if self.oldest_upgrade_vote > view_number { - return Err(ServerError { - status: StatusCode::Gone, - message: "Posted vote is too old".to_string(), - }); - } - - let next_index = self.upgrade_vote_index.entry(view_number).or_insert(0); - self.upgrade_votes - .entry(view_number) - .and_modify(|current_votes| current_votes.push((*next_index, vote.clone()))) - .or_insert_with(|| vec![(*next_index, vote)]); - self.upgrade_vote_index - .entry(view_number) - .and_modify(|index| *index += 1); - Ok(()) - } - - /// Stores a received VID vote in the `WebServerState` - fn post_vid_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error> { - // Only keep vote history for MAX_VIEWS number of views - if self.vid_votes.len() >= MAX_VIEWS { - self.vid_votes.remove(&self.oldest_vote); - while !self.vid_votes.contains_key(&self.oldest_vid_vote) { - self.oldest_vid_vote += 1; - } - } - - // don't accept the vote if it is too old - if self.oldest_vid_vote > view_number { - return Err(ServerError { - status: StatusCode::Gone, - message: "Posted vid vote is too old".to_string(), - }); - } - - let next_index = self.vid_vote_index.entry(view_number).or_insert(0); - self.vid_votes - .entry(view_number) - .and_modify(|current_votes| current_votes.push((*next_index, vote.clone()))) - .or_insert_with(|| vec![(*next_index, vote)]); - self.vid_vote_index - .entry(view_number) - .and_modify(|index| *index += 1); - Ok(()) - } - - fn post_view_sync_vote(&mut self, view_number: u64, vote: Vec) -> Result<(), Error> { - // Only keep vote history for MAX_VIEWS number of views - if self.view_sync_votes.len() >= MAX_VIEWS { - self.view_sync_votes.remove(&self.oldest_view_sync_vote); - while !self - .view_sync_votes - .contains_key(&self.oldest_view_sync_vote) - { - self.oldest_view_sync_vote += 1; - } - } - - // don't accept the vote if it is too old - if self.oldest_view_sync_vote > view_number { - return Err(ServerError { - status: StatusCode::Gone, - message: "Posted view sync vote is too old".to_string(), - }); - } - - let next_index = self.view_sync_vote_index.entry(view_number).or_insert(0); - self.view_sync_votes - .entry(view_number) - .and_modify(|current_votes| current_votes.push((*next_index, vote.clone()))) - .or_insert_with(|| vec![(*next_index, vote)]); - self.view_sync_vote_index - .entry(view_number) - .and_modify(|index| *index += 1); - Ok(()) - } - /// Stores a received proposal in the `WebServerState` - fn post_proposal(&mut self, view_number: u64, mut proposal: Vec) -> Result<(), Error> { - info!("Received proposal for view {}", view_number); - - if view_number > self.latest_proposal { - self.latest_proposal = view_number; - } - - // Only keep proposal history for MAX_VIEWS number of view - if self.proposals.len() >= MAX_VIEWS { - self.proposals.pop_first(); - } - self.proposals - .entry(view_number) - .and_modify(|(_, empty_proposal)| empty_proposal.append(&mut proposal)) - .or_insert_with(|| (String::new(), proposal)); - Ok(()) - } - - fn post_upgrade_proposal( - &mut self, - view_number: u64, - mut proposal: Vec, - ) -> Result<(), Error> { - tracing::error!("Received upgrade proposal for view {}", view_number); - - if self.upgrade_proposals.len() >= MAX_VIEWS { - self.upgrade_proposals.pop_first(); - } - - self.upgrade_proposals - .entry(view_number) - .and_modify(|(_, empty_proposal)| empty_proposal.append(&mut proposal)) - .or_insert_with(|| (String::new(), proposal)); - Ok(()) - } - - fn post_vid_disperse(&mut self, view_number: u64, disperse: Vec) -> Result<(), Error> { - if view_number > self.recent_vid_disperse { - self.recent_vid_disperse = view_number; - } - - // Only keep proposal history for MAX_VIEWS number of view - if self.vid_disperses.len() >= MAX_VIEWS { - self.vid_disperses.remove(&self.oldest_vid_disperse); - while !self.vid_disperses.contains_key(&self.oldest_vid_disperse) { - self.oldest_vid_disperse += 1; - } - } - self.vid_disperses - .entry(view_number) - .or_default() - .push(disperse); - Ok(()) - } - - fn post_view_sync_certificate( - &mut self, - view_number: u64, - proposal: Vec, - ) -> Result<(), Error> { - if view_number > self.latest_view_sync_certificate { - self.latest_view_sync_certificate = view_number; - } - - // Only keep proposal history for MAX_VIEWS number of view - if self.view_sync_certificates.len() >= MAX_VIEWS { - self.view_sync_certificates.pop_first(); - } - let next_index = self - .view_sync_certificate_index - .entry(view_number) - .or_insert(0); - self.view_sync_certificates - .entry(view_number) - .and_modify(|current_props| current_props.push((*next_index, proposal.clone()))) - .or_insert_with(|| vec![(*next_index, proposal)]); - self.view_sync_certificate_index - .entry(view_number) - .and_modify(|index| *index += 1); - Ok(()) - } - - /// Stores a received DA certificate in the `WebServerState` - fn post_da_certificate(&mut self, view_number: u64, mut cert: Vec) -> Result<(), Error> { - debug!("Received DA Certificate for view {}", view_number); - - // Only keep proposal history for MAX_VIEWS number of view - if self.da_certificates.len() >= MAX_VIEWS { - self.da_certificates.remove(&self.oldest_certificate); - while !self.da_certificates.contains_key(&self.oldest_certificate) { - self.oldest_certificate += 1; - } - } - self.da_certificates - .entry(view_number) - .and_modify(|(_, empty_cert)| empty_cert.append(&mut cert)) - .or_insert_with(|| (String::new(), cert)); - Ok(()) - } - - fn post_vid_certificate( - &mut self, - view_number: u64, - mut certificate: Vec, - ) -> Result<(), Error> { - info!("Received VID Certificate for view {}", view_number); - - // Only keep proposal history for MAX_VIEWS number of view - if self.vid_certificates.len() >= MAX_VIEWS { - self.vid_certificates.remove(&self.oldest_vid_certificate); - while !self - .vid_certificates - .contains_key(&self.oldest_vid_certificate) - { - self.oldest_vid_certificate += 1; - } - } - self.vid_certificates - .entry(view_number) - .and_modify(|(_, empty_cert)| empty_cert.append(&mut certificate)) - .or_insert_with(|| (String::new(), certificate)); - Ok(()) - } - - /// Stores a received group of transactions in the `WebServerState` - fn post_transaction(&mut self, txn: Vec) -> Result<(), Error> { - if self.transactions.len() >= MAX_TXNS { - let old_txn = self.transactions.remove(&(self.num_txns - MAX_TXNS as u64)); - if let Some(old_txn) = old_txn { - self.txn_lookup.remove(&old_txn); - } - } - self.txn_lookup.insert(txn.clone(), self.num_txns); - self.transactions.insert(self.num_txns, txn); - self.num_txns += 1; - - debug!( - "Received transaction! Number of transactions received is: {}", - self.num_txns - ); - - Ok(()) - } - - fn post_staketable(&mut self, key: Vec) -> Result<(), Error> { - // KALEY TODO: need security checks here - let new_key = KEY::from_bytes(&key).map_err(|_| ServerError { - status: StatusCode::BadRequest, - message: "Only signature keys can be added to stake table".to_string(), - })?; - let node_index = self.stake_table.len() as u64; - //generate secret for leader's first submission endpoint when key is added - let secret = thread_rng() - .sample_iter(&Alphanumeric) - .take(30) - .map(char::from) - .collect(); - self.proposals.insert(node_index, (secret, Vec::new())); - self.stake_table.push(new_key); - Ok(()) - } - - fn post_completed_transaction(&mut self, txn: Vec) -> Result<(), Error> { - if let Some(idx) = self.txn_lookup.remove(&txn) { - self.transactions.remove(&idx); - Ok(()) - } else { - Err(ServerError { - status: StatusCode::BadRequest, - message: "Transaction Not Found".to_string(), - }) - } - } - - //KALEY TODO: this will be merged with post_proposal once it is fully working, - //but keeping it separate to not break things in the meantime - fn post_secret_proposal( - &mut self, - view_number: u64, - mut proposal: Vec, - ) -> Result<(), Error> { - debug!("Received proposal for view {}", view_number); - - // Only keep proposal history for MAX_VIEWS number of views - if self.proposals.len() >= MAX_VIEWS { - self.proposals.pop_first(); - } - self.proposals - .entry(view_number) - .and_modify(|(_, empty_proposal)| empty_proposal.append(&mut proposal)); - - //generate new secret for the next time this node is leader - let secret = thread_rng() - .sample_iter(&Alphanumeric) - .take(30) - .map(char::from) - .collect(); - let next_view_for_leader = view_number + self.stake_table.len() as u64; - self.proposals - .insert(next_view_for_leader, (secret, Vec::new())); - Ok(()) - } -} - -/// configurability options for the web server -#[derive(Args, Default)] -pub struct Options { - #[arg(long = "web-server-api-path", env = "WEB_SERVER_API_PATH")] - /// path to API - pub api_path: Option, -} - -/// Sets up all API routes -/// This web server incorporates the protocol version within each message. -/// Transport versioning (generic params here) only changes when the web-CDN itself changes. -/// When transport versioning changes, the application itself must update its version. -#[allow(clippy::too_many_lines)] -fn define_api( - options: &Options, -) -> Result, ApiError> -where - State: 'static + Send + Sync + ReadState + WriteState, - ::State: Send + Sync + WebServerDataSource, - KEY: SignatureKey, -{ - let mut api = match &options.api_path { - Some(path) => Api::::from_file(path)?, - None => { - let toml: toml::Value = toml::from_str(include_str!("../api.toml")).map_err(|err| { - ApiError::CannotReadToml { - reason: err.to_string(), - } - })?; - Api::::new(toml)? - } - }; - api.get("getproposal", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - state.get_proposal(view_number) - } - .boxed() - })? - .get("get_upgrade_proposal", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - state.get_upgrade_proposal(view_number) - } - .boxed() - })? - .get("getviddisperse", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - state.get_vid_disperse(view_number) - } - .boxed() - })? - .get("get_latest_proposal", |_req, state| { - async move { state.get_latest_proposal() }.boxed() - })? - .get("get_latest_view_sync_certificate", |_req, state| { - async move { state.get_latest_view_sync_certificate() }.boxed() - })? - .get("getviewsynccertificate", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let index: u64 = req.integer_param("index")?; - state.get_view_sync_certificate(view_number, index) - } - .boxed() - })? - .get("getcertificate", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - state.get_da_certificate(view_number) - } - .boxed() - })? - .get("getvotes", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let index: u64 = req.integer_param("index")?; - state.get_votes(view_number, index) - } - .boxed() - })? - .get("get_upgrade_votes", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let index: u64 = req.integer_param("index")?; - state.get_upgrade_votes(view_number, index) - } - .boxed() - })? - .get("getviewsyncvotes", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let index: u64 = req.integer_param("index")?; - state.get_view_sync_votes(view_number, index) - } - .boxed() - })? - .get("gettransactions", |req, state| { - async move { - let index: u64 = req.integer_param("index")?; - state.get_transactions(index) - } - .boxed() - })? - .post("postvote", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - // Using body_bytes because we don't want to deserialize; body_auto or body_json deserializes automatically - let vote = req.body_bytes(); - state.post_vote(view_number, vote) - } - .boxed() - })? - .post("post_upgrade_vote", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - // Using body_bytes because we don't want to deserialize; body_auto or body_json deserializes automatically - let vote = req.body_bytes(); - state.post_upgrade_vote(view_number, vote) - } - .boxed() - })? - .post("postviewsyncvote", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - // Using body_bytes because we don't want to deserialize; body_auto or body_json deserializes automatically - let vote = req.body_bytes(); - state.post_view_sync_vote(view_number, vote) - } - .boxed() - })? - .post("postproposal", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let proposal = req.body_bytes(); - state.post_proposal(view_number, proposal) - } - .boxed() - })? - .post("post_upgrade_proposal", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let proposal = req.body_bytes(); - state.post_upgrade_proposal(view_number, proposal) - } - .boxed() - })? - .post("postviddisperse", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let disperse = req.body_bytes(); - state.post_vid_disperse(view_number, disperse) - } - .boxed() - })? - .post("postviewsynccertificate", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let proposal = req.body_bytes(); - state.post_view_sync_certificate(view_number, proposal) - } - .boxed() - })? - .post("postcertificate", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let cert = req.body_bytes(); - state.post_da_certificate(view_number, cert) - } - .boxed() - })? - .post("posttransaction", |req, state| { - async move { - let txns = req.body_bytes(); - state.post_transaction(txns) - } - .boxed() - })? - .post("poststaketable", |req, state| { - async move { - //works one key at a time for now - let key = req.body_bytes(); - state.post_staketable(key) - } - .boxed() - })? - .post("postcompletedtransaction", |req, state| { - async move { - //works one txn at a time for now - let txn = req.body_bytes(); - state.post_completed_transaction(txn) - } - .boxed() - })? - .post("secret", |req, state| { - async move { - let view_number: u64 = req.integer_param("view_number")?; - let secret: &str = req.string_param("secret")?; - //if secret is correct and view_number->proposal is empty, proposal is valid - if let Some(prop) = state.proposal(view_number) { - if prop.1.is_empty() { - if prop.0 == secret { - let proposal = req.body_bytes(); - state.post_secret_proposal(view_number, proposal) - } else { - Err(ServerError { - status: StatusCode::BadRequest, - message: format!( - "Wrong secret value for proposal for view {view_number:?}" - ), - }) - } - } else { - Err(ServerError { - status: StatusCode::BadRequest, - message: format!("Proposal already submitted for view {view_number:?}"), - }) - } - } else { - Err(ServerError { - status: StatusCode::BadRequest, - message: format!("No endpoint for view number {view_number:?} yet"), - }) - } - } - .boxed() - })?; - Ok(api) -} - -/// run the web server -/// # Errors -/// TODO -/// this looks like it will panic not error -/// # Panics -/// on errors creating or registering the tide disco api -pub async fn run_web_server< - KEY: SignatureKey + 'static, - NetworkVersion: StaticVersionType + 'static, ->( - shutdown_listener: Option>, - url: Url, - bind_version: NetworkVersion, -) -> io::Result<()> { - let options = Options::default(); - - let web_api = define_api(&options).unwrap(); - let state = State::new(WebServerState::new().with_shutdown_signal(shutdown_listener)); - let mut app = App::, Error>::with_state(state); - - app.register_module::("api", web_api) - .unwrap(); - - let app_future = app.serve(url, bind_version); - - app_future.await -} From b3202a5ab7c1fc626dc6339315578af4594ab04f Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Mon, 6 May 2024 16:04:44 +0200 Subject: [PATCH 1007/1393] Make Header::new fallible (#3095) --- example-types/src/block_types.rs | 8 +++++--- task-impls/src/consensus/proposal_helpers.rs | 11 +++++++++-- types/src/traits/block_contents.rs | 5 ++++- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index df18b13429..6de700cfd2 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -193,6 +193,8 @@ pub struct TestBlockHeader { impl> BlockHeader for TestBlockHeader { + type Error = std::convert::Infallible; + async fn new( _parent_state: &TYPES::ValidatedState, _instance_state: &>::Instance, @@ -201,7 +203,7 @@ impl> Block builder_commitment: BuilderCommitment, _metadata: ::Metadata, _builder_fee: BuilderFee, - ) -> Self { + ) -> Result { let parent = parent_leaf.get_block_header(); let mut timestamp = OffsetDateTime::now_utc().unix_timestamp() as u64; @@ -210,12 +212,12 @@ impl> Block timestamp = parent.timestamp; } - Self { + Ok(Self { block_number: parent.block_number + 1, payload_commitment, builder_commitment, timestamp, - } + }) } fn genesis( diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index 2cc7d4c987..26dcd54b62 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -210,7 +210,7 @@ pub async fn create_and_send_proposal( round_start_delay: u64, instance_state: Arc, ) { - let block_header = TYPES::BlockHeader::new( + let block_header = match TYPES::BlockHeader::new( state.as_ref(), instance_state.as_ref(), &parent_leaf, @@ -219,7 +219,14 @@ pub async fn create_and_send_proposal( commitment_and_metadata.metadata, commitment_and_metadata.fee, ) - .await; + .await + { + Ok(header) => header, + Err(err) => { + error!(%err, "Failed to construct block header"); + return; + } + }; let proposal = QuorumProposal { block_header, diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 84e57f0ac7..ec02232e1c 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -167,6 +167,9 @@ pub struct BuilderFee { pub trait BlockHeader: Serialize + Clone + Debug + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned + Committable { + /// Error type for this type of block header + type Error: Error + Debug + Send + Sync; + /// Build a header with the parent validate state, instance-level state, parent leaf, payload /// commitment, and metadata. fn new( @@ -177,7 +180,7 @@ pub trait BlockHeader: builder_commitment: BuilderCommitment, metadata: ::Metadata, builder_fee: BuilderFee, - ) -> impl Future + Send; + ) -> impl Future> + Send; /// Build the genesis header, payload, and metadata. fn genesis( From 6bd5ff1a1b908ccf27f5b791ae1d53247f168780 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 6 May 2024 22:06:54 +0800 Subject: [PATCH 1008/1393] [CX_CLEANUP] - Make `high_qc` and `cur_view` private and add functions to avoid accidental overwrite (#3098) * Make view and qc private * Fix lint WIP * linting * More fix * More * More * Use workspace * Fix build after merge --------- Co-authored-by: Brendon Fish --- hotshot/src/lib.rs | 22 +++----- hotshot/src/types/handle.rs | 2 +- task-impls/src/consensus/mod.rs | 8 ++- task-impls/src/consensus/proposal_helpers.rs | 23 ++++---- task-impls/src/consensus/view_change.rs | 2 +- task-impls/src/quorum_proposal.rs | 3 +- task-impls/src/request.rs | 2 +- types/src/consensus.rs | 58 ++++++++++++++++++-- 8 files changed, 82 insertions(+), 38 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index f8b88720d6..233b8b0b74 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -267,22 +267,18 @@ impl> SystemContext { saved_payloads.insert(anchored_leaf.get_view_number(), Arc::clone(&encoded_txns)); } - let consensus = Consensus { + let consensus = Consensus::new( validated_state_map, - vid_shares: BTreeMap::new(), - cur_view: anchored_leaf.get_view_number(), - last_decided_view: anchored_leaf.get_view_number(), + anchored_leaf.get_view_number(), + anchored_leaf.get_view_number(), saved_leaves, saved_payloads, - saved_da_certs: HashMap::new(), // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 - locked_view: anchored_leaf.get_view_number(), - high_qc: initializer.high_qc, - metrics: Arc::clone(&consensus_metrics), - dontuse_decided_upgrade_cert: None, - dontuse_formed_upgrade_certificate: None, - }; + anchored_leaf.get_view_number(), + initializer.high_qc, + Arc::clone(&consensus_metrics), + ); let consensus = Arc::new(RwLock::new(consensus)); let version = Arc::new(RwLock::new(BASE_VERSION)); @@ -328,7 +324,7 @@ impl> SystemContext { self.internal_event_stream .0 .broadcast_direct(Arc::new(HotShotEvent::QCFormed(either::Left( - consensus.high_qc.clone(), + consensus.high_qc().clone(), )))) .await .expect("Genesis Broadcast failed"); @@ -381,7 +377,7 @@ impl> SystemContext { trace!("Adding transaction to our own queue"); let api = self.clone(); - let view_number = api.consensus.read().await.cur_view; + let view_number = api.consensus.read().await.cur_view(); // Wrap up a message let message = DataMessage::SubmitTransaction(transaction.clone(), view_number); diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index b1b0f2a138..30782bc391 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -173,7 +173,7 @@ impl + 'static> SystemContextHandl /// Wrapper to get the view number this node is on. pub async fn get_cur_view(&self) -> TYPES::Time { - self.hotshot.consensus.read().await.cur_view + self.hotshot.consensus.read().await.cur_view() } /// Provides a reference to the underlying storage for this [`SystemContext`], allowing access to diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index d49674e856..6a87bc861e 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -35,7 +35,9 @@ use hotshot_types::{ use jf_primitives::vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; +#[cfg(not(feature = "dependency-tasks"))] +use tracing::info; +use tracing::{debug, error, instrument, warn}; use vbs::version::Version; #[cfg(not(feature = "dependency-tasks"))] @@ -506,7 +508,7 @@ impl> ConsensusTaskState } let mut consensus = self.consensus.write().await; - consensus.high_qc = qc.clone(); + consensus.update_high_qc_if_new(qc.clone()); drop(consensus); debug!( @@ -716,7 +718,7 @@ impl> ConsensusTaskState block_view: view, }); if self.quorum_membership.get_leader(view) == self.public_key - && self.consensus.read().await.high_qc.get_view_number() + 1 == view + && self.consensus.read().await.high_qc().get_view_number() + 1 == view { if let Err(e) = self.publish_proposal(view, event_stream.clone()).await { debug!("Failed to propose; error = {e:?}"); diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index 26dcd54b62..567bb78179 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -136,7 +136,7 @@ async fn validate_proposal_safety_and_liveness( .await; } - format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc, proposal.data.clone(), consensus.locked_view) + format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc(), proposal.data.clone(), consensus.locked_view) }); // We accept the proposal, notify the application layer @@ -231,7 +231,7 @@ pub async fn create_and_send_proposal( let proposal = QuorumProposal { block_header, view_number: view, - justify_qc: consensus.read().await.high_qc.clone(), + justify_qc: consensus.read().await.high_qc().clone(), proposal_certificate: proposal_cert, upgrade_certificate: upgrade_cert, }; @@ -351,7 +351,7 @@ pub async fn get_parent_leaf_and_state( ); let consensus = consensus.read().await; - let parent_view_number = &consensus.high_qc.get_view_number(); + let parent_view_number = &consensus.high_qc().get_view_number(); let parent_view = consensus.validated_state_map.get(parent_view_number).context( format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", **parent_view_number) )?; @@ -361,12 +361,12 @@ pub async fn get_parent_leaf_and_state( format!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") )?; - if leaf_commitment != consensus.high_qc.get_data().leaf_commit { + if leaf_commitment != consensus.high_qc().get_data().leaf_commit { // NOTE: This happens on the genesis block debug!( "They don't equal: {:?} {:?}", leaf_commitment, - consensus.high_qc.get_data().leaf_commit + consensus.high_qc().get_data().leaf_commit ); } @@ -678,7 +678,7 @@ pub async fn handle_quorum_proposal_recv None, }; - if justify_qc.get_view_number() > consensus_read.high_qc.view_number { + if justify_qc.get_view_number() > consensus_read.high_qc().view_number { if let Err(e) = task_state .storage .write() @@ -692,10 +692,7 @@ pub async fn handle_quorum_proposal_recv consensus_write.high_qc.view_number { - debug!("Updating high QC"); - consensus_write.high_qc = justify_qc.clone(); - } + consensus_write.update_high_qc_if_new(justify_qc.clone()); // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some((parent_leaf, parent_state)) = parent else { @@ -744,7 +741,7 @@ pub async fn handle_quorum_proposal_recv consensus_write.locked_view; - let high_qc = consensus_write.high_qc.clone(); + let high_qc = consensus_write.high_qc().clone(); let locked_view = consensus_write.locked_view; drop(consensus_write); @@ -1036,10 +1033,10 @@ pub async fn handle_quorum_proposal_validated( ); } let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus.update_view(new_view); + consensus.update_view_if_new(new_view); tracing::trace!("View updated successfully"); Ok(()) diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 748f2ffb02..9ab6311277 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -536,7 +536,8 @@ impl> QuorumProposalTaskState> DelayedRequester { async fn cancel_vid(&self, req: &VidRequest) -> bool { let view = req.0; let state = self.state.read().await; - state.vid_shares.contains_key(&view) && state.cur_view > view + state.vid_shares.contains_key(&view) && state.cur_view() > view } /// Transform a response into a `HotShotEvent` diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 8233371f6b..987c058e26 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -7,7 +7,7 @@ use std::{ use committable::Commitment; use displaydoc::Display; -use tracing::error; +use tracing::{debug, error}; pub use crate::utils::{View, ViewInner}; use crate::{ @@ -53,7 +53,7 @@ pub struct Consensus { pub saved_da_certs: HashMap>, /// View number that is currently on. - pub cur_view: TYPES::Time, + cur_view: TYPES::Time, /// last view had a successful decide event pub last_decided_view: TYPES::Time, @@ -72,7 +72,7 @@ pub struct Consensus { pub locked_view: TYPES::Time, /// the highqc per spec - pub high_qc: QuorumCertificate, + high_qc: QuorumCertificate, /// A reference to the metrics trait pub metrics: Arc, @@ -263,9 +263,57 @@ impl Default for ConsensusMetricsValue { } impl Consensus { + /// Constructor. + #[allow(clippy::too_many_arguments)] + pub fn new( + validated_state_map: BTreeMap>, + cur_view: TYPES::Time, + last_decided_view: TYPES::Time, + saved_leaves: CommitmentMap>, + saved_payloads: BTreeMap>, + locked_view: TYPES::Time, + high_qc: QuorumCertificate, + metrics: Arc, + ) -> Self { + Consensus { + validated_state_map, + vid_shares: BTreeMap::new(), + saved_da_certs: HashMap::new(), + cur_view, + last_decided_view, + saved_leaves, + saved_payloads, + locked_view, + high_qc, + metrics, + dontuse_decided_upgrade_cert: None, + dontuse_formed_upgrade_certificate: None, + } + } + + /// Get the current view. + pub fn cur_view(&self) -> TYPES::Time { + self.cur_view + } + /// Update the current view. - pub fn update_view(&mut self, view_number: TYPES::Time) { - self.cur_view = view_number; + pub fn update_view_if_new(&mut self, view_number: TYPES::Time) { + if view_number > self.cur_view { + self.cur_view = view_number; + } + } + + /// Get the high QC. + pub fn high_qc(&self) -> &QuorumCertificate { + &self.high_qc + } + + /// Update the high QC if given a newer one. + pub fn update_high_qc_if_new(&mut self, high_qc: QuorumCertificate) { + if high_qc.view_number > self.high_qc.view_number { + debug!("Updating high QC"); + self.high_qc = high_qc; + } } /// gather information from the parent chain of leaves From 961eabc3ab94ad0a76a2ba8cff5bf792dcb8a8d6 Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Mon, 6 May 2024 10:49:09 -0500 Subject: [PATCH 1009/1393] pass instance_state to `from_transactions` by ref (#3096) This corrects the mistake of passing Arc to from_transactions. If we pass as reference we can use as an opaque type as we do elsewhere. --------- Co-authored-by: tbro --- example-types/src/block_types.rs | 6 +- task-impls/src/consensus/proposal_helpers.rs | 14 ++--- task-impls/src/transactions.rs | 4 +- testing/src/block_builder.rs | 9 ++- testing/src/view_generator.rs | 16 ++---- testing/tests/tests_1/consensus_task.rs | 11 ++-- testing/tests/tests_1/da_task.rs | 12 ++-- testing/tests/tests_1/proposal_ordering.rs | 7 +-- testing/tests/tests_1/quorum_proposal_task.rs | 55 ++++++------------- testing/tests/tests_1/upgrade_task.rs | 52 +++++------------- testing/tests/tests_1/vid_task.rs | 11 ++-- types/src/data.rs | 8 +-- types/src/traits/block_contents.rs | 4 +- types/src/traits/node_implementation.rs | 2 +- types/src/traits/states.rs | 2 +- 15 files changed, 81 insertions(+), 132 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 6de700cfd2..95b59790a5 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -10,7 +10,6 @@ use hotshot_types::{ traits::{ block_contents::{BlockHeader, BuilderFee, EncodeBytes, TestableBlock, Transaction}, node_implementation::NodeType, - states::InstanceState, BlockPayload, ValidatedState, }, utils::BuilderCommitment, @@ -20,7 +19,7 @@ use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; use time::OffsetDateTime; -use crate::node_types::TestTypes; +use crate::{node_types::TestTypes, state_types::TestInstanceState}; /// The transaction in a [`TestBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] @@ -116,12 +115,13 @@ impl EncodeBytes for TestMetadata { impl BlockPayload for TestBlockPayload { type Error = BlockError; + type Instance = TestInstanceState; type Transaction = TestTransaction; type Metadata = TestMetadata; fn from_transactions( transactions: impl IntoIterator, - _state: Arc, + _instance_state: &Self::Instance, ) -> Result<(Self, Self::Metadata), Self::Error> { let txns_vec: Vec = transactions.into_iter().collect(); Ok(( diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/proposal_helpers.rs index 567bb78179..39ed2d076f 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/proposal_helpers.rs @@ -424,7 +424,7 @@ async fn publish_proposal_from_upgrade_cert( ensure!(upgrade_cert.in_interim(cur_view), "Cert is not in interim"); let (payload, metadata) = ::from_transactions( Vec::new(), - Arc::<::InstanceState>::clone(&instance_state), + instance_state.as_ref(), ) .context("Failed to build null block payload and metadata")?; @@ -432,11 +432,9 @@ async fn publish_proposal_from_upgrade_cert( let null_block_commitment = null_block::commitment(quorum_membership.total_nodes()) .context("Failed to calculate null block commitment")?; - let null_block_fee = null_block::builder_fee::( - quorum_membership.total_nodes(), - Arc::<::InstanceState>::clone(&instance_state), - ) - .context("Failed to calculate null block fee info")?; + let null_block_fee = + null_block::builder_fee::(quorum_membership.total_nodes(), instance_state.as_ref()) + .context("Failed to calculate null block fee info")?; Ok(async_spawn(async move { create_and_send_proposal( @@ -457,7 +455,7 @@ async fn publish_proposal_from_upgrade_cert( Some(upgrade_cert), None, delay, - Arc::clone(&instance_state), + instance_state, ) .await; })) @@ -538,7 +536,7 @@ async fn publish_proposal_from_commitment_and_metadata( proposal_upgrade_certificate, proposal_certificate, delay, - Arc::clone(&instance_state), + instance_state, ) .await; }); diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 5c38b4a014..58d72b014c 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -175,7 +175,7 @@ impl< // Calculate the builder fee for the empty block let Some(builder_fee) = null_block::builder_fee( self.membership.total_nodes(), - Arc::<::InstanceState>::clone(&self.instance_state), + self.instance_state.as_ref(), ) else { error!("Failed to get builder fee"); return None; @@ -184,7 +184,7 @@ impl< // Create an empty block payload and metadata let Ok((_, metadata)) = ::BlockPayload::from_transactions( vec![], - Arc::<::InstanceState>::clone(&self.instance_state), + &self.instance_state, ) else { error!("Failed to create empty block payload"); return None; diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 84e6efbf19..bd9c469bd8 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -20,7 +20,7 @@ use hotshot_builder_api::{ builder::{BuildError, Error, Options}, data_source::BuilderDataSource, }; -use hotshot_example_types::{block_types::TestTransaction, state_types::TestInstanceState}; +use hotshot_example_types::block_types::TestTransaction; use hotshot_orchestrator::config::RandomBuilderConfig; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, @@ -540,9 +540,12 @@ async fn build_block( AvailableBlockInfo, AvailableBlockData, AvailableBlockHeaderInput, -) { +) +where + ::InstanceState: Default, +{ let (block_payload, metadata) = - TYPES::BlockPayload::from_transactions(transactions, Arc::new(TestInstanceState {})) + TYPES::BlockPayload::from_transactions(transactions, &Default::default()) .expect("failed to build block payload from transactions"); let commitment = block_payload.builder_commitment(&metadata); diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 321e5e6ac9..914d58e2ee 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -60,11 +60,9 @@ impl TestView { let transactions = Vec::new(); - let (block_payload, metadata) = TestBlockPayload::from_transactions( - transactions.clone(), - Arc::new(TestInstanceState {}), - ) - .unwrap(); + let (block_payload, metadata) = + TestBlockPayload::from_transactions(transactions.clone(), &TestInstanceState {}) + .unwrap(); let builder_commitment = block_payload.builder_commitment(&metadata); let (private_key, public_key) = key_pair_for_id(*genesis_view); @@ -182,11 +180,9 @@ impl TestView { let leader_public_key = public_key; - let (block_payload, metadata) = TestBlockPayload::from_transactions( - transactions.clone(), - Arc::new(TestInstanceState {}), - ) - .unwrap(); + let (block_payload, metadata) = + TestBlockPayload::from_transactions(transactions.clone(), &TestInstanceState {}) + .unwrap(); let builder_commitment = block_payload.builder_commitment(&metadata); let payload_commitment = da_payload_commitment(quorum_membership, transactions.clone()); diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 652bd74c26..7433c57ad3 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -3,7 +3,7 @@ use std::sync::Arc; -use hotshot::tasks::{task_state::CreateTaskState}; +use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes}, state_types::TestInstanceState, @@ -96,11 +96,8 @@ async fn test_consensus_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), ], outputs: vec![ @@ -376,7 +373,7 @@ async fn test_view_sync_finalize_propose() { builder_commitment, TestMetadata, ViewNumber::new(4), - null_block::builder_fee(4, Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee(4, &TestInstanceState {}).unwrap(), ), ], outputs: vec![ diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 2aa0a9472f..697857344b 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -1,12 +1,10 @@ use std::sync::Arc; - -use hotshot_example_types::state_types::TestInstanceState; - use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, + state_types::TestInstanceState, }; use hotshot_task_impls::{da::DATaskState, events::HotShotEvent::*}; use hotshot_testing::{ @@ -33,7 +31,6 @@ async fn test_da_task() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. let transactions = vec![TestTransaction(vec![0])]; @@ -78,7 +75,8 @@ async fn test_da_task() { encoded_transactions, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), ], outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], @@ -114,7 +112,6 @@ async fn test_da_task_storage_failure() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. let transactions = vec![TestTransaction(vec![0])]; @@ -159,7 +156,8 @@ async fn test_da_task_storage_failure() { encoded_transactions, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), ], outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 1adde8799b..7cd80bd62f 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -92,11 +92,8 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { builder_commitment, TestMetadata, ViewNumber::new(node_id), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), ]; diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 3034aef250..4badd978e1 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,9 +1,7 @@ -use std::sync::Arc; - -use hotshot::tasks::{task_state::CreateTaskState}; +use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes}, - state_types::{TestInstanceState, }, + state_types::TestInstanceState, }; use hotshot_task_impls::{events::HotShotEvent::*, quorum_proposal::QuorumProposalTaskState}; use hotshot_testing::{ @@ -75,11 +73,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { builder_commitment, TestMetadata, ViewNumber::new(1), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), ], outputs: vec![quorum_proposal_send()], @@ -165,11 +160,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment, TestMetadata, ViewNumber::new(node_id), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), ], outputs: vec![quorum_proposal_send()], @@ -231,11 +223,8 @@ async fn test_quorum_proposal_task_qc_timeout() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), ], outputs: vec![quorum_proposal_send()], @@ -302,11 +291,8 @@ async fn test_quorum_proposal_task_view_sync() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), ], outputs: vec![quorum_proposal_send()], @@ -361,11 +347,8 @@ async fn test_quorum_proposal_task_propose_now() { commitment: payload_commitment, builder_commitment: builder_commitment.clone(), metadata: TestMetadata, - fee: null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + fee: null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), block_view: ViewNumber::new(2), }, secondary_proposal_information: @@ -381,11 +364,8 @@ async fn test_quorum_proposal_task_propose_now() { commitment: payload_commitment, builder_commitment: builder_commitment.clone(), metadata: TestMetadata, - fee: null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + fee: null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), block_view: ViewNumber::new(2), }, secondary_proposal_information: @@ -411,11 +391,8 @@ async fn test_quorum_proposal_task_propose_now() { commitment: payload_commitment, builder_commitment, metadata: TestMetadata, - fee: null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + fee: null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), block_view: ViewNumber::new(2), }, secondary_proposal_information: diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index f3331cb24e..f401d31da2 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -1,8 +1,6 @@ // TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] -use hotshot_example_types::state_types::TestInstanceState; -use std::sync::Arc; use std::time::Duration; use hotshot::{ @@ -12,6 +10,7 @@ use hotshot::{ use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, + state_types::TestInstanceState, }; use hotshot_macros::test_scripts; use hotshot_task_impls::{ @@ -251,11 +250,8 @@ async fn test_upgrade_and_consensus_task() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), QCFormed(either::Either::Left(proposals[1].data.justify_qc.clone())), ], @@ -440,11 +436,8 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[1].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), ], vec![ @@ -455,11 +448,8 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), QuorumProposalRecv(proposals[2].clone(), leaders[2]), ], @@ -471,11 +461,8 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[3].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), QuorumProposalRecv(proposals[3].clone(), leaders[3]), ], @@ -487,11 +474,8 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[4].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(5), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), QuorumProposalRecv(proposals[4].clone(), leaders[4]), ], @@ -502,11 +486,8 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[5].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(6), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), QCFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), ], @@ -518,11 +499,8 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[6].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(7), - null_block::builder_fee( - quorum_membership.total_nodes(), - Arc::new(TestInstanceState {}), - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), QuorumProposalRecv(proposals[6].clone(), leaders[6]), ], diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 9730b7a459..78ed8caaf8 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -1,9 +1,10 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; -use hotshot_example_types::state_types::TestInstanceState; + use hotshot::types::SignatureKey; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, node_types::TestTypes, + state_types::TestInstanceState, }; use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; use hotshot_testing::task_helpers::{build_system_handle, vid_scheme_from_view_number}; @@ -36,7 +37,8 @@ async fn test_vid_task() { let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); let transactions = vec![TestTransaction(vec![0])]; - let (payload, metadata) = TestBlockPayload::from_transactions(transactions.clone(), Arc::new(TestInstanceState {})).unwrap(); + let (payload, metadata) = + TestBlockPayload::from_transactions(transactions.clone(), &TestInstanceState {}).unwrap(); let builder_commitment = payload.builder_commitment(&metadata); let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); @@ -86,7 +88,7 @@ async fn test_vid_task() { encoded_transactions, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}).unwrap(), )); input.push(HotShotEvent::BlockReady( vid_disperse.clone(), @@ -108,7 +110,8 @@ async fn test_vid_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), Arc::new(TestInstanceState {})).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), 1, ); diff --git a/types/src/data.rs b/types/src/data.rs index 0777906923..d039b52f44 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -457,7 +457,8 @@ impl Leaf { /// interpreted as bytes). #[must_use] pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { - let (payload, metadata) = TYPES::BlockPayload::genesis(); + let (payload, metadata) = + TYPES::BlockPayload::from_transactions([], instance_state).unwrap(); let builder_commitment = payload.builder_commitment(&metadata); let payload_bytes = payload.encode().expect("unable to encode genesis payload"); @@ -701,7 +702,6 @@ impl Leaf { pub mod null_block { #![allow(missing_docs)] - use std::sync::Arc; use jf_primitives::vid::VidScheme; use memoize::memoize; @@ -735,7 +735,7 @@ pub mod null_block { #[must_use] pub fn builder_fee( num_storage_nodes: usize, - state: Arc, + instance_state: &::Instance, ) -> Option> { /// Arbitrary fee amount, this block doesn't actually come from a builder const FEE_AMOUNT: u64 = 0; @@ -746,7 +746,7 @@ pub mod null_block { ); let (_null_block, null_block_metadata) = - ::from_transactions([], state).ok()?; + ::from_transactions([], instance_state).ok()?; match TYPES::BuilderSignatureKey::sign_fee( &priv_key, diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index ec02232e1c..38091f792b 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -49,6 +49,8 @@ pub trait BlockPayload: /// The error type for this type of block type Error: Error + Debug + Send + Sync + Serialize + DeserializeOwned; + /// The type of the instance-level state this state is associated with + type Instance: InstanceState; /// The type of the transitions we are applying type Transaction: Transaction; /// Data created during block building which feeds into the block header @@ -68,7 +70,7 @@ pub trait BlockPayload: /// If the transaction length conversion fails. fn from_transactions( transactions: impl IntoIterator, - state: Arc, + instance_state: &Self::Instance, ) -> Result<(Self, Self::Metadata), Self::Error>; /// Build a payload with the encoded transaction bytes, metadata, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 6935ca4653..76945dad01 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -201,7 +201,7 @@ pub trait NodeType: /// The block type that this hotshot setup is using. /// /// This should be the same block that `ValidatedState::BlockPayload` is using. - type BlockPayload: BlockPayload; + type BlockPayload: BlockPayload; /// The signature key that this hotshot setup is using. type SignatureKey: SignatureKey; /// The transaction type that this hotshot setup is using. diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 63b6bb6a69..069b6928fe 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -18,7 +18,7 @@ use crate::{ }; /// Instance-level state, which allows us to fetch missing validated state. -pub trait InstanceState: Debug + Send + Sync {} +pub trait InstanceState: Debug + Send + Sync + Default {} /// Application-specific state delta, which will be used to store a list of merkle tree entries. pub trait StateDelta: Debug + Send + Sync + Serialize + for<'a> Deserialize<'a> {} From f494aaf8e9441da39ca0033ac4b28522a3081655 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 6 May 2024 11:52:10 -0400 Subject: [PATCH 1010/1393] [Auto Bench] Fix unexpected panic when reporting benchmark results (#3102) * init push * comment update --- examples/infra/mod.rs | 11 +++++------ orchestrator/api.toml | 2 +- orchestrator/run-config.toml | 2 +- orchestrator/src/config.rs | 2 -- testing/src/test_builder.rs | 8 +------- 5 files changed, 8 insertions(+), 17 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 426afce600..5917cd87f5 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -446,7 +446,6 @@ pub trait RunDA< let mut event_stream = context.get_event_stream(); let mut anchor_view: TYPES::Time = ::genesis(); let mut num_successful_commits = 0; - let mut failed_num_views = 0; context.hotshot.start_consensus().await; @@ -528,11 +527,9 @@ pub trait RunDA< // when we make progress, submit new events } EventType::ReplicaViewTimeout { view_number } => { - failed_num_views += 1; warn!("Timed out as a replicas in view {:?}", view_number); } EventType::ViewTimeout { view_number } => { - failed_num_views += 1; warn!("Timed out in view {:?}", view_number); } _ => {} // mostly DA proposal @@ -543,18 +540,20 @@ pub trait RunDA< let consensus_lock = context.hotshot.get_consensus(); let consensus = consensus_lock.read().await; let total_num_views = usize::try_from(consensus.locked_view.get_u64()).unwrap(); + // `failed_num_views` could include uncommitted views + let failed_num_views = total_num_views - num_successful_commits; // When posting to the orchestrator, note that the total number of views also include un-finalized views. println!("[{node_index}]: Total views: {total_num_views}, Failed views: {failed_num_views}, num_successful_commits: {num_successful_commits}"); - // +2 is for uncommitted views - assert!(total_num_views <= (failed_num_views + num_successful_commits + 2)); // Output run results let total_time_elapsed = start.elapsed(); // in seconds println!("[{node_index}]: {rounds} rounds completed in {total_time_elapsed:?} - Total transactions sent: {total_transactions_sent} - Total transactions committed: {total_transactions_committed} - Total commitments: {num_successful_commits}"); if total_transactions_committed != 0 { + // prevent devision by 0 + let total_time_elapsed_sec = std::cmp::max(total_time_elapsed.as_secs(), 1u64); // extra 8 bytes for timestamp let throughput_bytes_per_sec = total_transactions_committed * (transaction_size_in_bytes + 8) - / total_time_elapsed.as_secs(); + / total_time_elapsed_sec; let avg_latency_in_sec = total_latency / num_latency; println!("[{node_index}]: throughput: {throughput_bytes_per_sec} bytes/sec, avg_latency: {avg_latency_in_sec} sec."); BenchResults { diff --git a/orchestrator/api.toml b/orchestrator/api.toml index a19d05b85c..4915e0c9e3 100644 --- a/orchestrator/api.toml +++ b/orchestrator/api.toml @@ -30,7 +30,7 @@ DOC = """ Get the latest temporary node index only for generating validator's key pair for testing in hotshot, later the generated key pairs might be bound with other node_index. """ -# POST the node's node index to generate public key for pubkey collection +# POST the node's node index for pubkey and is_da collection [route.post_pubkey] PATH = ["pubkey/:is_da"] METHOD = "POST" diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 9086b5f87c..39d3b8b485 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -1,4 +1,4 @@ -rounds = 10 +rounds = 100 indexed_da = true transactions_per_round = 10 transaction_size = 1000 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 9febee8532..1cd8416461 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -497,8 +497,6 @@ pub struct NetworkConfigFile { pub random_builder: Option, } -impl NetworkConfigFile {} - impl From> for NetworkConfig { fn from(val: NetworkConfigFile) -> Self { NetworkConfig { diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index de2237022f..4af8cebcf4 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -2,7 +2,6 @@ use std::{num::NonZeroUsize, sync::Arc, time::Duration}; use hotshot::traits::{NetworkReliability, TestableNodeImplementation}; use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; -use hotshot_orchestrator::config::ValidatorConfigFile; use hotshot_types::{ traits::node_implementation::NodeType, ExecutionType, HotShotConfig, ValidatorConfig, }; @@ -266,18 +265,13 @@ impl TestDescription { }) .collect(); // But now to test validator's config, we input the info of my_own_validator from config file when node_id == 0. - let mut my_own_validator_config = ValidatorConfig::generated_from_seed_indexed( + let my_own_validator_config = ValidatorConfig::generated_from_seed_indexed( [0u8; 32], node_id, 1, // This is the config for node 0 0 < da_staked_committee_size, ); - if node_id == 0 { - my_own_validator_config = ValidatorConfig::from(ValidatorConfigFile::from_file( - "config/ValidatorConfigFile.toml", - )); - } // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); let config = HotShotConfig { // TODO this doesn't exist anymore From 3d6481662a48fac93b03a22833db006e0f383d23 Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Mon, 6 May 2024 11:31:47 -0500 Subject: [PATCH 1011/1393] Remove `Default` bound from `InstanceState` trait (#3106) Instead supply bounds in `testing` crate. This avoids forcing implementation of `Default`. Co-authored-by: tbro --- testing/src/block_builder.rs | 27 ++++++++++++++++++++++----- types/src/traits/states.rs | 2 +- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index bd9c469bd8..2f92bee4d6 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -37,7 +37,10 @@ use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; use tide_disco::{method::ReadState, App, Url}; #[async_trait] -pub trait TestBuilderImplementation { +pub trait TestBuilderImplementation +where + ::InstanceState: Default, +{ type Config: Default; async fn start( @@ -52,6 +55,7 @@ pub struct RandomBuilderImplementation; impl TestBuilderImplementation for RandomBuilderImplementation where TYPES: NodeType, + ::InstanceState: Default, { type Config = RandomBuilderConfig; @@ -69,7 +73,10 @@ where pub struct SimpleBuilderImplementation; #[async_trait] -impl TestBuilderImplementation for SimpleBuilderImplementation { +impl TestBuilderImplementation for SimpleBuilderImplementation +where + ::InstanceState: Default, +{ type Config = (); async fn start( @@ -140,7 +147,10 @@ where /// Spawn a task building blocks, configured with given options #[allow(clippy::missing_panics_doc)] // ony panics on 16-bit platforms - pub fn run(&self, num_storage_nodes: usize, options: RandomBuilderConfig) { + pub fn run(&self, num_storage_nodes: usize, options: RandomBuilderConfig) + where + ::InstanceState: Default, + { let blocks = self.blocks.clone(); let (priv_key, pub_key) = (self.priv_key.clone(), self.pub_key.clone()); async_spawn(async move { @@ -262,6 +272,7 @@ impl BuilderDataSource for RandomBuilderSource { pub fn run_random_builder(url: Url, num_storage_nodes: usize, options: RandomBuilderConfig) where TYPES: NodeType, + ::InstanceState: Default, { let (pub_key, priv_key) = TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); let source = RandomBuilderSource::new(pub_key, priv_key); @@ -307,7 +318,10 @@ impl ReadState for SimpleBuilderSource { } #[async_trait] -impl BuilderDataSource for SimpleBuilderSource { +impl BuilderDataSource for SimpleBuilderSource +where + ::InstanceState: Default, +{ async fn get_available_blocks( &self, _for_parent: &VidCommitment, @@ -411,7 +425,10 @@ impl BuilderDataSource for SimpleBuilderSource { } impl SimpleBuilderSource { - pub async fn run(self, url: Url) { + pub async fn run(self, url: Url) + where + ::InstanceState: Default, + { let builder_api = hotshot_builder_api::builder::define_api::< SimpleBuilderSource, TYPES, diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 069b6928fe..63b6bb6a69 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -18,7 +18,7 @@ use crate::{ }; /// Instance-level state, which allows us to fetch missing validated state. -pub trait InstanceState: Debug + Send + Sync + Default {} +pub trait InstanceState: Debug + Send + Sync {} /// Application-specific state delta, which will be used to store a list of merkle tree entries. pub trait StateDelta: Debug + Send + Sync + Serialize + for<'a> Deserialize<'a> {} From d2076f0a915904ab35ffe863e613074a91041aa8 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 6 May 2024 16:19:33 -0400 Subject: [PATCH 1012/1393] Update the CDN (#3108) * update pCDN, move topics application-side * fix topics --- examples/infra/mod.rs | 6 ++--- hotshot/src/traits.rs | 2 +- .../src/traits/networking/push_cdn_network.rs | 27 ++++++++++--------- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 5917cd87f5..3cd06894ef 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -13,14 +13,14 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use async_trait::async_trait; -use cdn_broker::reexports::{crypto::signature::KeyPair, message::Topic}; +use cdn_broker::reexports::crypto::signature::KeyPair; use chrono::Utc; use clap::{value_parser, Arg, Command, Parser}; use futures::StreamExt; use hotshot::{ traits::{ implementations::{ - derive_libp2p_peer_id, CombinedNetworks, Libp2pNetwork, PushCdnNetwork, + derive_libp2p_peer_id, CombinedNetworks, Libp2pNetwork, PushCdnNetwork, Topic, WrappedSignatureKey, }, BlockPayload, NodeImplementation, @@ -642,7 +642,7 @@ where .cdn_marshal_address .clone() .expect("`cdn_marshal_address` needs to be supplied for a push CDN run"), - topics.iter().map(ToString::to_string).collect(), + topics, keypair, ) .expect("failed to create network"); diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 64812a37fb..99d22fa569 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -17,7 +17,7 @@ pub mod implementations { }, memory_network::{MasterMap, MemoryNetwork}, push_cdn_network::{ - KeyPair, ProductionDef, PushCdnNetwork, TestingDef, WrappedSignatureKey, + KeyPair, ProductionDef, PushCdnNetwork, TestingDef, Topic, WrappedSignatureKey, }, NetworkingMetricsValue, }; diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 7f0ebb70aa..a64dd6ff49 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -21,7 +21,7 @@ use cdn_client::{ reexports::{ connection::protocols::Quic, crypto::signature::{Serializable, SignatureScheme}, - message::{Broadcast, Direct, Message as PushCdnMessage, Topic}, + message::{Broadcast, Direct, Message as PushCdnMessage}, }, Client, Config as ClientConfig, }; @@ -153,6 +153,15 @@ pub struct PushCdnNetwork { is_paused: Arc, } +/// The enum for the topics we can subscribe to in the Push CDN +#[repr(u8)] +pub enum Topic { + /// The global topic + Global = 0, + /// The DA topic + DA = 1, +} + impl PushCdnNetwork { /// Create a new `PushCdnNetwork` (really a client) from a marshal endpoint, a list of initial /// topics we are interested in, and our wrapped keypair that we use to authenticate with the @@ -162,19 +171,13 @@ impl PushCdnNetwork { /// If we fail to build the config pub fn new( marshal_endpoint: String, - topics: Vec, + topics: Vec, keypair: KeyPair>, ) -> anyhow::Result { - // Transform topics to our internal representation - let mut computed_topics: Vec = Vec::new(); - for topic in topics { - computed_topics.push(topic.try_into()?); - } - // Build config let config = ClientConfig { endpoint: marshal_endpoint, - subscribed_topics: computed_topics, + subscribed_topics: topics.into_iter().map(|t| t as u8).collect(), keypair, use_local_authority: true, }; @@ -220,7 +223,7 @@ impl PushCdnNetwork { // TODO: check if we need to print this error if self .client - .send_broadcast_message(vec![topic], serialized_message) + .send_broadcast_message(vec![topic as u8], serialized_message) .await .is_err() { @@ -362,9 +365,9 @@ impl TestableNetworkingImplementation for PushCdnNetwork // Calculate if we're DA or not let topics = if node_id < da_committee_size as u64 { - vec![Topic::DA, Topic::Global] + vec![Topic::DA as u8, Topic::Global as u8] } else { - vec![Topic::Global] + vec![Topic::Global as u8] }; // Configure our client From 0449e8b7353419834cd833380048ae38ad4445a4 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 7 May 2024 06:18:41 -0400 Subject: [PATCH 1013/1393] fix inadvertent DA change (#3110) --- examples/infra/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 3cd06894ef..a910fc5de6 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -387,7 +387,7 @@ pub trait RunDA< // as the known da nodes let da_membership = ::Membership::create_election( known_nodes_with_stake.clone(), - known_nodes_with_stake, + config.config.known_da_nodes.clone(), config.config.fixed_leader_for_gpuvid, ); From 8a58f83903c9646f1104c0a31e4db20535b8f0df Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 7 May 2024 10:25:31 -0400 Subject: [PATCH 1014/1393] Fix Dockerfiles, update examples (#3113) * remove webserver dockers, generalize orchestrator * remove webserver from launch.json * change orchestrator in README and scripts --- examples/Cargo.toml | 13 ++-- examples/combined/all.rs | 7 +- examples/combined/orchestrator.rs | 9 +-- examples/infra/mod.rs | 7 +- examples/libp2p/all.rs | 7 +- examples/libp2p/orchestrator.rs | 33 -------- examples/{webserver => }/orchestrator.rs | 14 +--- examples/push-cdn/all.rs | 7 +- examples/webserver/README.md | 65 ---------------- examples/webserver/all.rs | 96 ------------------------ examples/webserver/multi-validator.rs | 41 ---------- examples/webserver/multi-webserver.rs | 67 ----------------- examples/webserver/types.rs | 29 ------- examples/webserver/validator.rs | 26 ------- examples/webserver/webserver.rs | 38 ---------- orchestrator/README.md | 4 +- 16 files changed, 17 insertions(+), 446 deletions(-) delete mode 100644 examples/libp2p/orchestrator.rs rename examples/{webserver => }/orchestrator.rs (65%) delete mode 100644 examples/webserver/README.md delete mode 100644 examples/webserver/all.rs delete mode 100644 examples/webserver/multi-validator.rs delete mode 100644 examples/webserver/multi-webserver.rs delete mode 100644 examples/webserver/types.rs delete mode 100644 examples/webserver/validator.rs delete mode 100644 examples/webserver/webserver.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index e10ca3b820..51f6683e03 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -21,7 +21,12 @@ hotshot-testing = ["hotshot/hotshot-testing"] randomized-leader-election = [] fixed-leader-election = [] -# libp2p +# Common +[[example]] +name = "orchestrator" +path = "orchestrator.rs" + +# Libp2p [[example]] name = "validator-libp2p" path = "libp2p/validator.rs" @@ -30,15 +35,11 @@ path = "libp2p/validator.rs" name = "multi-validator-libp2p" path = "libp2p/multi-validator.rs" -[[example]] -name = "orchestrator-libp2p" -path = "libp2p/orchestrator.rs" - [[example]] name = "all-libp2p" path = "libp2p/all.rs" -# combined +# Combined [[example]] name = "all-combined" path = "combined/all.rs" diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 55f20a570a..5d7665697b 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -125,12 +125,7 @@ async fn main() { }); // orchestrator - async_spawn(run_orchestrator::< - TestTypes, - DANetwork, - QuorumNetwork, - NodeImpl, - >(OrchestratorArgs { + async_spawn(run_orchestrator::(OrchestratorArgs { url: orchestrator_url.clone(), config: config.clone(), })); diff --git a/examples/combined/orchestrator.rs b/examples/combined/orchestrator.rs index 067c827efb..12b376628e 100644 --- a/examples/combined/orchestrator.rs +++ b/examples/combined/orchestrator.rs @@ -6,10 +6,7 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; -use crate::{ - infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, - types::{DANetwork, NodeImpl, QuorumNetwork}, -}; +use crate::infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}; /// general infra used for this example #[path = "../infra/mod.rs"] pub mod infra; @@ -21,9 +18,7 @@ async fn main() { setup_logging(); setup_backtrace(); let (config, orchestrator_url) = read_orchestrator_init_config::(); - run_orchestrator::(OrchestratorArgs::< - TestTypes, - > { + run_orchestrator::(OrchestratorArgs:: { url: orchestrator_url.clone(), config: config.clone(), }) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index a910fc5de6..86a40d6313 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -301,12 +301,7 @@ pub fn load_config_from_file( } /// Runs the orchestrator -pub async fn run_orchestrator< - TYPES: NodeType, - DACHANNEL: ConnectedNetwork, TYPES::SignatureKey>, - QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, - NODE: NodeImplementation, ->( +pub async fn run_orchestrator( OrchestratorArgs { url, config }: OrchestratorArgs, ) { println!("Starting orchestrator",); diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index 09ffa3aacb..36ec5c0258 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -32,12 +32,7 @@ async fn main() { let (config, orchestrator_url) = read_orchestrator_init_config::(); // orchestrator - async_spawn(run_orchestrator::< - TestTypes, - DANetwork, - QuorumNetwork, - NodeImpl, - >(OrchestratorArgs { + async_spawn(run_orchestrator::(OrchestratorArgs { url: orchestrator_url.clone(), config: config.clone(), })); diff --git a/examples/libp2p/orchestrator.rs b/examples/libp2p/orchestrator.rs deleted file mode 100644 index 0a8761b95f..0000000000 --- a/examples/libp2p/orchestrator.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! An orchestrator using libp2p - -/// types used for this example -pub mod types; - -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use hotshot_example_types::state_types::TestTypes; -use tracing::instrument; - -use crate::{ - infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, - types::{DANetwork, NodeImpl, QuorumNetwork}, -}; - -/// general infra used for this example -#[path = "../infra/mod.rs"] -pub mod infra; - -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] -#[instrument] -async fn main() { - setup_logging(); - setup_backtrace(); - let (config, orchestrator_url) = read_orchestrator_init_config::(); - run_orchestrator::(OrchestratorArgs::< - TestTypes, - > { - url: orchestrator_url.clone(), - config: config.clone(), - }) - .await; -} diff --git a/examples/webserver/orchestrator.rs b/examples/orchestrator.rs similarity index 65% rename from examples/webserver/orchestrator.rs rename to examples/orchestrator.rs index 8d2bcff5d8..6e352e9ffc 100644 --- a/examples/webserver/orchestrator.rs +++ b/examples/orchestrator.rs @@ -1,19 +1,13 @@ //! A orchestrator using the web server -/// types used for this example -pub mod types; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; -use crate::{ - infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, - types::{DANetwork, NodeImpl, QuorumNetwork}, -}; +use crate::infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}; /// general infra used for this example -#[path = "../infra/mod.rs"] +#[path = "./infra/mod.rs"] pub mod infra; #[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] @@ -23,9 +17,7 @@ async fn main() { setup_logging(); setup_backtrace(); let (config, orchestrator_url) = read_orchestrator_init_config::(); - run_orchestrator::(OrchestratorArgs::< - TestTypes, - > { + run_orchestrator::(OrchestratorArgs:: { url: orchestrator_url.clone(), config: config.clone(), }) diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 137a99f41c..c5e8989f43 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -35,12 +35,7 @@ async fn main() { let (config, orchestrator_url) = read_orchestrator_init_config::(); // Start the orhcestrator - async_spawn(run_orchestrator::< - TestTypes, - DANetwork, - QuorumNetwork, - NodeImpl, - >(OrchestratorArgs { + async_spawn(run_orchestrator::(OrchestratorArgs { url: orchestrator_url.clone(), config: config.clone(), })); diff --git a/examples/webserver/README.md b/examples/webserver/README.md deleted file mode 100644 index bb5c323df6..0000000000 --- a/examples/webserver/README.md +++ /dev/null @@ -1,65 +0,0 @@ -Commands to run da examples: -1a)Start web servers by either running 3 servers: -``` -just async_std example webserver -- -just async_std example webserver -- -``` - -2) Start orchestrator: -``` -just async_std example orchestrator-webserver -- --orchestrator_url --config_file -``` - -3a) Start validator: -``` -just async_std example validator-webserver -- -``` - -3b) Or start multiple validators: -``` -just async_std example multi-validator-webserver -- -``` - -I.e. -``` -just async_std example webserver -- http://127.0.0.1:9000 -just async_std example webserver -- http://127.0.0.1:9001 -just async_std example orchestrator-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://127.0.0.1:4444 --total_nodes 10 --da_committee_size 5 --transactions_per_round 1 --transaction_size 512 --rounds 10 --fixed_leader_for_gpuvid 0 --webserver_url http://127.0.0.1:9000 --da_webserver_url http://127.0.0.1:9001 -just async_std example multi-validator-webserver -- 10 http://127.0.0.1:4444 -``` - - -OR: - -`just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://localhost:4444` - -For other argument setting, checkout `read_orchestrator_init_config` in `crates/examples/infra/mod.rs`. - -One example is: `just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --total_nodes 15`. - -Another example is `just async_std example all-webserver -- --config_file ./crates/orchestrator/run-config.toml --total_nodes 20 --da_committee_size 5 --transactions_per_round 10 --transaction_size 512 --rounds 100`, I'll get throughput `0.29M/s` locally for this one. - -If using gpu-vid, you have to run: -``` -just async_std example webserver -- http://127.0.0.1:9000 -just async_std example webserver -- http://127.0.0.1:9001 -just async_std example_fixed_leader orchestrator-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://127.0.0.1:4444 --total_nodes 10 --da_committee_size 5 --fixed_leader_for_gpuvid 1 -just async_std example_gpuvid_leader multi-validator-webserver -- 1 http://127.0.0.1:4444 -sleep 1m -just async_std example_fixed_leader multi-validator-webserver -- 9 http://127.0.0.1:4444 -``` - -Where ones using `example_gpuvid_leader` could be the leader and should be running on a nvidia GPU, and other validators using `example_fixed_leader` will never be a leader. In practice, these url should be changed to the corresponding ip and port. - - -If you don't have a gpu but want to test out fixed leader, you can run: -``` -just async_std example webserver -- http://127.0.0.1:9000 -just async_std example webserver -- http://127.0.0.1:9001 -just async_std example_fixed_leader orchestrator-webserver -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://127.0.0.1:4444 --total_nodes 10 --da_committee_size 5 --transactions_per_round 1 --transaction_size 512 --rounds 10 --fixed_leader_for_gpuvid 2 --webserver_url http://127.0.0.1:9000 --da_webserver_url http://127.0.0.1:9001 -just async_std example_fixed_leader multi-validator-webserver -- 2 http://127.0.0.1:4444 -sleep 1m -just async_std example_fixed_leader multi-validator-webserver -- 8 http://127.0.0.1:4444 -``` - -Remember, you have to run leaders first, then other validators, so that leaders will have lower index. \ No newline at end of file diff --git a/examples/webserver/all.rs b/examples/webserver/all.rs deleted file mode 100644 index 7425915558..0000000000 --- a/examples/webserver/all.rs +++ /dev/null @@ -1,96 +0,0 @@ -//! A example program using the web server -/// types used for this example -pub mod types; - -use std::sync::Arc; - -use crate::{ - infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, - types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}, -}; - -/// general infra used for this example -#[path = "../infra/mod.rs"] -pub mod infra; - -use async_compatibility_layer::{art::async_spawn, channel::oneshot}; -use hotshot_example_types::state_types::TestTypes; -use hotshot_orchestrator::client::ValidatorArgs; -use hotshot_types::constants::WebServerVersion; -use surf_disco::Url; -use tracing::error; -use vbs::version::StaticVersionType; - -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] -async fn main() { - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; - setup_logging(); - setup_backtrace(); - - let (config, orchestrator_url) = read_orchestrator_init_config::(); - - // spawn web servers - let (server_shutdown_sender_cdn, server_shutdown_cdn) = oneshot(); - let (server_shutdown_sender_da, server_shutdown_da) = oneshot(); - let _sender = Arc::new(server_shutdown_sender_cdn); - let _sender = Arc::new(server_shutdown_sender_da); - - async_spawn(async move { - if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, - WebServerVersion, - >( - Some(server_shutdown_cdn), - Url::parse("http://localhost:9000").unwrap(), - WebServerVersion::instance(), - ) - .await - { - error!("Problem starting cdn web server: {:?}", e); - } - }); - async_spawn(async move { - if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, - WebServerVersion, - >( - Some(server_shutdown_da), - Url::parse("http://localhost:9001").unwrap(), - WebServerVersion::instance(), - ) - .await - { - error!("Problem starting da web server: {:?}", e); - } - }); - - // web server orchestrator - async_spawn(run_orchestrator::< - TestTypes, - DANetwork, - QuorumNetwork, - NodeImpl, - >(OrchestratorArgs:: { - url: orchestrator_url.clone(), - config: config.clone(), - })); - - // multi validator run - let mut nodes = Vec::new(); - for _ in 0..(config.config.num_nodes_with_stake.get()) { - let orchestrator_url = orchestrator_url.clone(); - let node = async_spawn(async move { - infra::main_entry_point::( - ValidatorArgs { - url: orchestrator_url, - advertise_address: None, - network_config_file: None, - }, - ) - .await; - }); - nodes.push(node); - } - let _result = futures::future::join_all(nodes).await; -} diff --git a/examples/webserver/multi-validator.rs b/examples/webserver/multi-validator.rs deleted file mode 100644 index b93ed63409..0000000000 --- a/examples/webserver/multi-validator.rs +++ /dev/null @@ -1,41 +0,0 @@ -//! A multi-validator using the web server -use async_compatibility_layer::{ - art::async_spawn, - logging::{setup_backtrace, setup_logging}, -}; -use clap::Parser; -use hotshot_example_types::state_types::TestTypes; -use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; -use tracing::instrument; - -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; - -/// types used for this example -pub mod types; - -/// general infra used for this example -#[path = "../infra/mod.rs"] -pub mod infra; - -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] -#[instrument] -async fn main() { - setup_logging(); - setup_backtrace(); - let args = MultiValidatorArgs::parse(); - tracing::error!("connecting to orchestrator at {:?}", args.url); - let mut nodes = Vec::new(); - for node_index in 0..args.num_nodes { - let args = args.clone(); - - let node = async_spawn(async move { - infra::main_entry_point::( - ValidatorArgs::from_multi_args(args, node_index), - ) - .await; - }); - nodes.push(node); - } - let _result = futures::future::join_all(nodes).await; -} diff --git a/examples/webserver/multi-webserver.rs b/examples/webserver/multi-webserver.rs deleted file mode 100644 index 51b4f69021..0000000000 --- a/examples/webserver/multi-webserver.rs +++ /dev/null @@ -1,67 +0,0 @@ -//! A multi web server -use std::sync::Arc; - -use async_compatibility_layer::{ - art::async_spawn, - channel::oneshot, - logging::{setup_backtrace, setup_logging}, -}; -use clap::Parser; -use hotshot_example_types::state_types::TestTypes; -use hotshot_types::{constants::WebServerVersion, traits::node_implementation::NodeType}; -use surf_disco::Url; -use tracing::error; -use vbs::version::StaticVersionType; - -/// Arguments to run multiple web servers -#[derive(Parser, Debug)] -struct MultiWebServerArgs { - /// consensus url - consensus_url: Url, - /// data availability server url - da_url: Url, -} - -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] -async fn main() { - setup_backtrace(); - setup_logging(); - - let args = MultiWebServerArgs::parse(); - let (server_shutdown_sender_cdn, server_shutdown_cdn) = oneshot(); - let (server_shutdown_sender_da, server_shutdown_da) = oneshot(); - let _sender = Arc::new(server_shutdown_sender_cdn); - let _sender = Arc::new(server_shutdown_sender_da); - - let consensus_server = async_spawn(async move { - if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, - WebServerVersion, - >( - Some(server_shutdown_cdn), - args.consensus_url, - WebServerVersion::instance(), - ) - .await - { - error!("Problem starting cdn web server: {:?}", e); - } - }); - let da_server = async_spawn(async move { - if let Err(e) = hotshot_web_server::run_web_server::< - ::SignatureKey, - WebServerVersion, - >( - Some(server_shutdown_da), - args.da_url, - WebServerVersion::instance(), - ) - .await - { - error!("Problem starting da web server: {:?}", e); - } - }); - - let _result = futures::future::join_all(vec![consensus_server, da_server]).await; -} diff --git a/examples/webserver/types.rs b/examples/webserver/types.rs deleted file mode 100644 index 6d57090b76..0000000000 --- a/examples/webserver/types.rs +++ /dev/null @@ -1,29 +0,0 @@ -use std::fmt::Debug; - -use hotshot::traits::implementations::WebServerNetwork; -use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; -use hotshot_types::{constants::WebServerVersion, traits::node_implementation::NodeImplementation}; -use serde::{Deserialize, Serialize}; - -use crate::infra::WebServerDARun; - -/// dummy struct so we can choose types -#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] -pub struct NodeImpl {} - -/// convenience type alias -pub type DANetwork = WebServerNetwork; -/// convenience type alias -pub type VIDNetwork = WebServerNetwork; -/// convenience type alias -pub type QuorumNetwork = WebServerNetwork; -/// convenience type alias -pub type ViewSyncNetwork = WebServerNetwork; - -impl NodeImplementation for NodeImpl { - type CommitteeNetwork = DANetwork; - type QuorumNetwork = QuorumNetwork; - type Storage = TestStorage; -} -/// convenience type alias -pub type ThisRun = WebServerDARun; diff --git a/examples/webserver/validator.rs b/examples/webserver/validator.rs deleted file mode 100644 index 1991c0e857..0000000000 --- a/examples/webserver/validator.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! A validator using the web server -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use clap::Parser; -use hotshot_example_types::state_types::TestTypes; -use hotshot_orchestrator::client::ValidatorArgs; -use tracing::{info, instrument}; - -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; - -/// types used for this example -pub mod types; - -/// general infra used for this example -#[path = "../infra/mod.rs"] -pub mod infra; - -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] -#[instrument] -async fn main() { - setup_logging(); - setup_backtrace(); - let args = ValidatorArgs::parse(); - info!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::(args).await; -} diff --git a/examples/webserver/webserver.rs b/examples/webserver/webserver.rs deleted file mode 100644 index e8f6e40b42..0000000000 --- a/examples/webserver/webserver.rs +++ /dev/null @@ -1,38 +0,0 @@ -//! web server example -use std::sync::Arc; - -use async_compatibility_layer::{ - channel::oneshot, - logging::{setup_backtrace, setup_logging}, -}; -use clap::Parser; -use hotshot_example_types::state_types::TestTypes; -use hotshot_types::constants::WebServerVersion; -use surf_disco::Url; -use vbs::version::StaticVersionType; - -/// web server arguments -#[derive(Parser, Debug)] -struct WebServerArgs { - /// url to run on - url: Url, -} - -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] -async fn main() { - setup_backtrace(); - setup_logging(); - let args = WebServerArgs::parse(); - let (server_shutdown_sender, server_shutdown) = oneshot(); - let _sender = Arc::new(server_shutdown_sender); - let _result = hotshot_web_server::run_web_server::< - ::SignatureKey, - WebServerVersion, - >( - Some(server_shutdown), - args.url, - WebServerVersion::instance(), - ) - .await; -} diff --git a/orchestrator/README.md b/orchestrator/README.md index 979dceb64c..09df305c34 100644 --- a/orchestrator/README.md +++ b/orchestrator/README.md @@ -2,6 +2,4 @@ This crate implements an orchestrator that coordinates starting the network with a particular configuration. It is useful for testing and benchmarking. Like the web server, the orchestrator is built using [Tide Disco](https://github.com/EspressoSystems/tide-disco). -To run the orchestrator for a libp2p network: `just async_std example orchestrator-libp2p http://0.0.0.0:3333 ./crates/orchestrator/run-config.toml` - -To run the orchestrator for a webserver network: `just async_std example orchestrator-webserver http://0.0.0.0:3333 ./crates/orchestrator/run-config.toml ` \ No newline at end of file +To run the orchestrator: `just async_std example orchestrator http://0.0.0.0:3333 ./crates/orchestrator/run-config.toml` \ No newline at end of file From 99e297a40bb8c293b10420db9bc903e954438da0 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 7 May 2024 10:25:40 -0400 Subject: [PATCH 1015/1393] Add VidCommon to `validate_and_apply_header` and `Header::new` (#3066) * spawn vote if able * fixes * Add vid common to Header::new * fix tests * one fix * Fix proposal issue * Fixes * fmt * WIP fixes * [CX_CLEANUP] - Support state update for dependency tasks (#3093) * Move vote_if_able to helper * Fix lint * More lints * More lints * working now * reduce a log * linting * fix tests, lower log level * tweeking * Address some comments * fix build * merge changes * Fix Tests in bf/vid-common (#3109) * all tests passing on branch * gate tests * remove gates and gate file instead * address review comments * fix lint --------- Co-authored-by: Keyao Shen Co-authored-by: Jarred Parr --- example-types/src/block_types.rs | 3 +- example-types/src/state_types.rs | 2 + .../{proposal_helpers.rs => helpers.rs} | 307 ++++++++++++---- task-impls/src/consensus/mod.rs | 346 ++++++++++-------- task-impls/src/quorum_proposal.rs | 9 +- task-impls/src/quorum_proposal_recv.rs | 2 +- task-impls/src/quorum_vote.rs | 63 +++- task-impls/src/transactions.rs | 6 +- task-impls/src/vid.rs | 18 +- testing/src/overall_safety_task.rs | 9 +- testing/src/test_runner.rs | 2 +- testing/tests/tests_1/consensus_task.rs | 3 + testing/tests/tests_1/proposal_ordering.rs | 13 +- testing/tests/tests_1/quorum_proposal_task.rs | 205 ++++++++--- testing/tests/tests_1/upgrade_task.rs | 26 +- testing/tests/tests_5/combined_network.rs | 6 +- types/src/consensus.rs | 4 + types/src/traits/block_contents.rs | 4 +- types/src/traits/states.rs | 2 + 19 files changed, 720 insertions(+), 310 deletions(-) rename task-impls/src/consensus/{proposal_helpers.rs => helpers.rs} (82%) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 95b59790a5..660f00f995 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -13,7 +13,7 @@ use hotshot_types::{ BlockPayload, ValidatedState, }, utils::BuilderCommitment, - vid::VidCommitment, + vid::{VidCommitment, VidCommon}, }; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; @@ -203,6 +203,7 @@ impl> Block builder_commitment: BuilderCommitment, _metadata: ::Metadata, _builder_fee: BuilderFee, + _vid_common: VidCommon, ) -> Result { let parent = parent_leaf.get_block_header(); diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index ac477c30f0..0a68d9a602 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -10,6 +10,7 @@ use hotshot_types::{ states::{InstanceState, StateDelta, TestableState, ValidatedState}, BlockPayload, }, + vid::VidCommon, }; use serde::{Deserialize, Serialize}; @@ -73,6 +74,7 @@ impl ValidatedState for TestValidatedState { _instance: &Self::Instance, _parent_leaf: &Leaf, _proposed_header: &TYPES::BlockHeader, + _vid_common: VidCommon, ) -> Result<(Self, Self::Delta), Self::Error> { Ok(( TestValidatedState { diff --git a/task-impls/src/consensus/proposal_helpers.rs b/task-impls/src/consensus/helpers.rs similarity index 82% rename from task-impls/src/consensus/proposal_helpers.rs rename to task-impls/src/consensus/helpers.rs index 39ed2d076f..3595852791 100644 --- a/task-impls/src/consensus/proposal_helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -14,11 +14,13 @@ use async_std::task::JoinHandle; use chrono::Utc; use committable::Committable; use futures::FutureExt; +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_types::simple_vote::QuorumData; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus, View}, data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, - message::Proposal, + message::{GeneralConsensusMessage, Proposal}, simple_certificate::UpgradeCertificate, traits::{ block_contents::BlockHeader, @@ -58,25 +60,11 @@ async fn validate_proposal_safety_and_liveness( consensus: Arc>>, decided_upgrade_certificate: Option>, quorum_membership: Arc, - parent_state: Arc, view_leader_key: TYPES::SignatureKey, event_stream: Sender>>, sender: TYPES::SignatureKey, event_sender: Sender>, - storage: Arc>>, - instance_state: Arc, ) -> Result<()> { - let (validated_state, state_delta) = parent_state - .validate_and_apply_header( - instance_state.as_ref(), - &parent_leaf, - &proposal.data.block_header.clone(), - ) - .await - .context("Block header doesn't extend the proposal!")?; - - let state = Arc::new(validated_state); - let delta = Arc::new(state_delta); let view = proposal.data.get_view_number(); let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); @@ -164,32 +152,10 @@ async fn validate_proposal_safety_and_liveness( let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus.validated_state_map.insert( - view, - View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), - state: Arc::clone(&state), - delta: Some(Arc::clone(&delta)), - }, - }, - ); consensus .saved_leaves .insert(proposed_leaf.commit(), proposed_leaf.clone()); - if let Err(e) = storage - .write() - .await - .update_undecided_state( - consensus.saved_leaves.clone(), - consensus.validated_state_map.clone(), - ) - .await - { - warn!("Couldn't store undecided state. Error: {:?}", e); - } - Ok(()) } @@ -210,6 +176,15 @@ pub async fn create_and_send_proposal( round_start_delay: u64, instance_state: Arc, ) { + let consensus_read = consensus.read().await; + let Some(Some(vid_share)) = consensus_read + .vid_shares + .get(&view) + .map(|shares| shares.get(&public_key)) + else { + error!("Cannot propopse without our VID share, view {:?}", view); + return; + }; let block_header = match TYPES::BlockHeader::new( state.as_ref(), instance_state.as_ref(), @@ -218,6 +193,7 @@ pub async fn create_and_send_proposal( commitment_and_metadata.builder_commitment, commitment_and_metadata.metadata, commitment_and_metadata.fee, + vid_share.data.common.clone(), ) .await { @@ -227,6 +203,7 @@ pub async fn create_and_send_proposal( return; } }; + drop(consensus_read); let proposal = QuorumProposal { block_header, @@ -257,7 +234,10 @@ pub async fn create_and_send_proposal( "Sending null proposal for view {:?}", proposed_leaf.get_view_number(), ); - + if consensus.read().await.last_proposed_view >= view { + return; + } + consensus.write().await.last_proposed_view = view; async_sleep(Duration::from_millis(round_start_delay)).await; broadcast_event( Arc::new(HotShotEvent::QuorumProposalSend( @@ -338,7 +318,7 @@ pub fn validate_proposal_view_and_certs( } /// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. -pub async fn get_parent_leaf_and_state( +pub(crate) async fn get_parent_leaf_and_state( cur_view: TYPES::Time, view: TYPES::Time, quorum_membership: Arc, @@ -399,7 +379,7 @@ pub async fn get_parent_leaf_and_state( /// case proposal scenario. #[allow(clippy::too_many_lines)] #[allow(clippy::too_many_arguments)] -async fn publish_proposal_from_upgrade_cert( +pub(crate) async fn publish_proposal_from_upgrade_cert( cur_view: TYPES::Time, view: TYPES::Time, sender: Sender>>, @@ -464,7 +444,7 @@ async fn publish_proposal_from_upgrade_cert( /// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is the /// standard case proposal scenario. #[allow(clippy::too_many_arguments)] -async fn publish_proposal_from_commitment_and_metadata( +pub async fn publish_proposal_from_commitment_and_metadata( cur_view: TYPES::Time, view: TYPES::Time, sender: Sender>>, @@ -475,8 +455,8 @@ async fn publish_proposal_from_commitment_and_metadata( delay: u64, formed_upgrade_certificate: Option>, decided_upgrade_cert: Option>, - commitment_and_metadata: &mut Option>, - proposal_cert: &mut Option>, + commitment_and_metadata: Option>, + proposal_cert: Option>, instance_state: Arc, ) -> Result> { let (parent_leaf, state) = get_parent_leaf_and_state( @@ -515,7 +495,7 @@ async fn publish_proposal_from_commitment_and_metadata( // FIXME - This is not great, and will be fixed later. // If it's > July, 2024 and this is still here, something has gone horribly wrong. let cnm = commitment_and_metadata - .take() + .clone() .context("Cannot propose because we don't have the VID payload commitment and metadata")?; ensure!( @@ -541,9 +521,6 @@ async fn publish_proposal_from_commitment_and_metadata( .await; }); - *proposal_cert = None; - *commitment_and_metadata = None; - Ok(create_and_send_proposal_handle) } @@ -561,8 +538,8 @@ pub async fn publish_proposal_if_able( delay: u64, formed_upgrade_certificate: Option>, decided_upgrade_cert: Option>, - commitment_and_metadata: &mut Option>, - proposal_cert: &mut Option>, + commitment_and_metadata: Option>, + proposal_cert: Option>, instance_state: Arc, ) -> Result> { if let Some(upgrade_cert) = decided_upgrade_cert { @@ -693,7 +670,7 @@ pub async fn handle_quorum_proposal_recv = PhantomData; + +/// TEMPORARY TYPE: Private key, latest decided upgrade certificate, committee membership, and +/// event stream, for sending the vote. +#[cfg(not(feature = "dependency-tasks"))] +type TemporaryVoteInfo = ( + <::SignatureKey as SignatureKey>::PrivateKey, + Option>, + Arc<::Membership>, + Sender>>, +); + +#[allow(clippy::too_many_arguments)] +#[allow(clippy::too_many_lines)] +#[allow(unused_variables)] +/// Check if we are able to vote, like whether the proposal is valid, +/// whether we have DAC and VID share, and if so, vote. +pub async fn update_state_and_vote_if_able>( + cur_view: TYPES::Time, + proposal: QuorumProposal, + public_key: TYPES::SignatureKey, + consensus: Arc>>, + storage: Arc>, + quorum_membership: Arc, + instance_state: Arc, + vote_info: TemporaryVoteInfo, +) -> bool { + #[cfg(not(feature = "dependency-tasks"))] + use hotshot_types::simple_vote::QuorumVote; + + if !quorum_membership.has_stake(&public_key) { + debug!( + "We were not chosen for consensus committee on {:?}", + cur_view + ); + return false; + } + + let consensus = consensus.upgradable_read().await; + // Only vote if you has seen the VID share for this view + let Some(vid_shares) = consensus.vid_shares.get(&proposal.view_number) else { + debug!( + "We have not seen the VID share for this view {:?} yet, so we cannot vote.", + proposal.view_number + ); + return false; + }; + let Some(vid_share) = vid_shares.get(&public_key).cloned() else { + debug!("we have not seen our VID share yet"); + return false; + }; + + #[cfg(not(feature = "dependency-tasks"))] + { + if let Some(upgrade_cert) = &vote_info.1 { + if upgrade_cert.in_interim(cur_view) + && Some(proposal.block_header.payload_commitment()) + != null_block::commitment(quorum_membership.total_nodes()) { - debug!("Failed to propose; error = {e:#}"); - }; + info!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(quorum_membership.total_nodes()), Some(proposal.block_header.payload_commitment())); + return false; + } } + } + + // Only vote if you have the DA cert + // ED Need to update the view number this is stored under? + let Some(cert) = consensus.saved_da_certs.get(&cur_view) else { + return false; + }; + + let view = cert.view_number; + // TODO: do some of this logic without the vote token check, only do that when voting. + let justify_qc = proposal.justify_qc.clone(); + let parent = consensus + .saved_leaves + .get(&justify_qc.get_data().leaf_commit) + .cloned(); - ensure!( - task_state.vote_if_able(&event_stream).await, - "Failed to vote" + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some(parent) = parent else { + warn!( + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.get_data().leaf_commit, + proposal.view_number, ); - task_state.current_proposal = None; + return false; + }; + let (Some(parent_state), _) = consensus.get_state_and_delta(parent.get_view_number()) else { + warn!("Parent state not found! Consensus internally inconsistent"); + return false; + }; + let Ok((validated_state, state_delta)) = parent_state + .validate_and_apply_header( + instance_state.as_ref(), + &parent, + &proposal.block_header.clone(), + vid_share.data.common.clone(), + ) + .await + else { + warn!("Block header doesn't extend the proposal!"); + return false; + }; + + let state = Arc::new(validated_state); + let delta = Arc::new(state_delta); + let parent_commitment = parent.commit(); + + let proposed_leaf = Leaf::from_quorum_proposal(&proposal); + if proposed_leaf.get_parent_commitment() != parent_commitment { + return false; } - Ok(()) + let message: GeneralConsensusMessage; + + #[cfg(not(feature = "dependency-tasks"))] + { + // Validate the DAC. + message = if cert.is_valid_cert(vote_info.2.as_ref()) { + // Validate the block payload commitment for non-genesis DAC. + if cert.get_data().payload_commit != proposal.block_header.payload_commitment() { + error!( + "Block payload commitment does not equal da cert payload commitment. View = {}", + *view + ); + return false; + } + if let Ok(vote) = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: proposed_leaf.commit(), + }, + view, + &public_key, + &vote_info.0, + ) { + GeneralConsensusMessage::::Vote(vote) + } else { + error!("Unable to sign quorum vote!"); + return false; + } + } else { + error!( + "Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", + cert, cur_view + ); + return false; + }; + } + + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + consensus.validated_state_map.insert( + cur_view, + View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state: Arc::clone(&state), + delta: Some(Arc::clone(&delta)), + }, + }, + ); + + if let Err(e) = storage + .write() + .await + .update_undecided_state( + consensus.saved_leaves.clone(), + consensus.validated_state_map.clone(), + ) + .await + { + warn!("Couldn't store undecided state. Error: {:?}", e); + } + + #[cfg(not(feature = "dependency-tasks"))] + { + if let GeneralConsensusMessage::Vote(vote) = message { + debug!( + "Sending vote to next quorum leader {:?}", + vote.get_view_number() + 1 + ); + // Add to the storage that we have received the VID disperse for a specific view + if let Err(e) = storage.write().await.append_vid(&vid_share).await { + error!( + "Failed to store VID Disperse Proposal with error {:?}, aborting vote", + e + ); + return false; + } + broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &vote_info.3).await; + return true; + } + debug!( + "Received VID share, but couldn't find DAC cert for view {:?}", + *proposal.get_view_number(), + ); + } + false } diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 6a87bc861e..fbd43bf0f3 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -3,10 +3,11 @@ use std::{collections::BTreeMap, sync::Arc}; #[cfg(not(feature = "dependency-tasks"))] use anyhow::Result; use async_broadcast::Sender; +#[cfg(not(feature = "dependency-tasks"))] +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use committable::Committable; use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; #[cfg(not(feature = "dependency-tasks"))] @@ -17,33 +18,31 @@ use hotshot_types::message::Proposal; use hotshot_types::vid::vid_scheme; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus}, - data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, + data::{QuorumProposal, ViewChangeEvidence}, event::{Event, EventType}, - message::GeneralConsensusMessage, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, - simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote}, + simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, traits::{ - block_contents::BlockHeader, election::Membership, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, - storage::Storage, }, - vote::{Certificate, HasViewNumber}, + vote::HasViewNumber, }; #[cfg(not(feature = "dependency-tasks"))] +use hotshot_types::{traits::storage::Storage, vote::Certificate}; +#[cfg(not(feature = "dependency-tasks"))] use jf_primitives::vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -#[cfg(not(feature = "dependency-tasks"))] -use tracing::info; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, info, instrument, warn}; use vbs::version::Version; #[cfg(not(feature = "dependency-tasks"))] -use self::proposal_helpers::handle_quorum_proposal_validated; -#[cfg(not(feature = "dependency-tasks"))] -use crate::consensus::proposal_helpers::{handle_quorum_proposal_recv, publish_proposal_if_able}; +use crate::consensus::helpers::{ + handle_quorum_proposal_recv, handle_quorum_proposal_validated, publish_proposal_if_able, + update_state_and_vote_if_able, +}; use crate::{ consensus::view_change::{update_view, DONT_SEND_VIEW_CHANGE_EVENT}, events::{HotShotEvent, HotShotTaskCompleted}, @@ -54,7 +53,7 @@ use crate::{ }; /// Helper functions to handle proposal-related functionality. -pub(crate) mod proposal_helpers; +pub(crate) mod helpers; /// Handles view-change related functionality. pub(crate) mod view_change; @@ -158,131 +157,6 @@ impl> ConsensusTaskState join_all(cancel).await; } - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] - // Check if we are able to vote, like whether the proposal is valid, - // whether we have DAC and VID share, and if so, vote. - async fn vote_if_able(&mut self, event_stream: &Sender>>) -> bool { - if !self.quorum_membership.has_stake(&self.public_key) { - debug!( - "We were not chosen for consensus committee on {:?}", - self.cur_view - ); - return false; - } - - if let Some(proposal) = &self.current_proposal { - let consensus = self.consensus.read().await; - // Only vote if you has seen the VID share for this view - let Some(vid_shares) = consensus.vid_shares.get(&proposal.view_number) else { - debug!( - "We have not seen the VID share for this view {:?} yet, so we cannot vote.", - proposal.view_number - ); - return false; - }; - - if let Some(upgrade_cert) = &self.decided_upgrade_cert { - if upgrade_cert.in_interim(self.cur_view) - && Some(proposal.block_header.payload_commitment()) - != null_block::commitment(self.quorum_membership.total_nodes()) - { - warn!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(self.quorum_membership.total_nodes()), Some(proposal.block_header.payload_commitment())); - return false; - } - } - - // Only vote if you have the DA cert - // ED Need to update the view number this is stored under? - if let Some(cert) = consensus.saved_da_certs.get(&(proposal.get_view_number())) { - let view = cert.view_number; - // TODO: do some of this logic without the vote token check, only do that when voting. - let justify_qc = proposal.justify_qc.clone(); - let parent = consensus - .saved_leaves - .get(&justify_qc.get_data().leaf_commit) - .cloned(); - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some(parent) = parent else { - warn!( - "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.get_data().leaf_commit, - proposal.view_number, - ); - return false; - }; - let parent_commitment = parent.commit(); - - let proposed_leaf = Leaf::from_quorum_proposal(proposal); - if proposed_leaf.get_parent_commitment() != parent_commitment { - return false; - } - - // Validate the DAC. - let message = if cert.is_valid_cert(self.committee_membership.as_ref()) { - // Validate the block payload commitment for non-genesis DAC. - if cert.get_data().payload_commit != proposal.block_header.payload_commitment() - { - error!("Block payload commitment does not equal da cert payload commitment. View = {}", *view); - return false; - } - if let Ok(vote) = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: proposed_leaf.commit(), - }, - view, - &self.public_key, - &self.private_key, - ) { - GeneralConsensusMessage::::Vote(vote) - } else { - error!("Unable to sign quorum vote!"); - return false; - } - } else { - error!( - "Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", - cert, self.cur_view - ); - return false; - }; - - if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.get_view_number() + 1 - ); - // Add to the storage that we have received the VID disperse for a specific view - if let Some(vid_share) = vid_shares.get(&self.public_key) { - if let Err(e) = self.storage.write().await.append_vid(vid_share).await { - error!( - "Failed to store VID Disperse Proposal with error {:?}, aborting vote", - e - ); - return false; - } - } else { - error!("Did not get a VID share for our public key, aborting vote"); - return false; - } - broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), event_stream) - .await; - return true; - } - } - debug!( - "Received VID share, but couldn't find DAC cert for view {:?}", - *proposal.get_view_number(), - ); - return false; - } - debug!( - "Could not vote because we don't have a proposal yet for view {}", - *self.cur_view - ); - false - } - /// Validate the VID disperse is correctly signed and has the correct share. #[cfg(not(feature = "dependency-tasks"))] fn validate_disperse(&self, disperse: &Proposal>) -> bool { @@ -347,8 +221,8 @@ impl> ConsensusTaskState self.round_start_delay, self.formed_upgrade_certificate.clone(), self.decided_upgrade_cert.clone(), - &mut self.payload_commitment_and_metadata, - &mut self.proposal_cert, + self.payload_commitment_and_metadata.clone(), + self.proposal_cert.clone(), Arc::clone(&self.instance_state), ) .await?; @@ -361,6 +235,124 @@ impl> ConsensusTaskState Ok(()) } + /// Spawn a vote task for the given view. Will try to vote + /// and emit a `QuorumVoteSend` event we should vote on the current proposal + fn spawn_vote_task( + &mut self, + view: TYPES::Time, + event_stream: Sender>>, + ) { + let Some(proposal) = self.current_proposal.clone() else { + return; + }; + let upgrade = self.decided_upgrade_cert.clone(); + let pub_key = self.public_key.clone(); + let priv_key = self.private_key.clone(); + let consensus = Arc::clone(&self.consensus); + let storage = Arc::clone(&self.storage); + let quorum_mem = Arc::clone(&self.quorum_membership); + let committee_mem = Arc::clone(&self.committee_membership); + let instance_state = Arc::clone(&self.instance_state); + let handle = async_spawn(async move { + update_state_and_vote_if_able::( + view, + proposal, + pub_key, + consensus, + storage, + quorum_mem, + instance_state, + (priv_key, upgrade, committee_mem, event_stream), + ) + .await; + }); + self.spawned_tasks.entry(view).or_default().push(handle); + } + + #[cfg(not(feature = "dependency-tasks"))] + /// Tries to vote then Publishes a proposal + fn vote_and_publish_proposal( + &mut self, + vote_view: TYPES::Time, + propose_view: TYPES::Time, + proposal: QuorumProposal, + event_stream: Sender>>, + ) { + use crate::consensus::helpers::publish_proposal_from_commitment_and_metadata; + + use self::helpers::publish_proposal_from_upgrade_cert; + + let upgrade = self.decided_upgrade_cert.clone(); + let pub_key = self.public_key.clone(); + let priv_key = self.private_key.clone(); + let consensus = Arc::clone(&self.consensus); + let storage = Arc::clone(&self.storage); + let quorum_mem = Arc::clone(&self.quorum_membership); + let committee_mem = Arc::clone(&self.committee_membership); + let instance_state = Arc::clone(&self.instance_state); + let commitment_and_metadata = self.payload_commitment_and_metadata.clone(); + let cur_view = self.cur_view; + let sender = event_stream.clone(); + let decided_upgrade_cert = self.decided_upgrade_cert.clone(); + let delay = self.round_start_delay; + let formed_upgrade_certificate = self.formed_upgrade_certificate.clone(); + let proposal_cert = self.proposal_cert.clone(); + let handle = async_spawn(async move { + update_state_and_vote_if_able::( + vote_view, + proposal, + pub_key.clone(), + Arc::clone(&consensus), + storage, + Arc::clone(&quorum_mem), + Arc::clone(&instance_state), + (priv_key.clone(), upgrade, committee_mem, event_stream), + ) + .await; + if let Some(upgrade_cert) = decided_upgrade_cert { + if let Err(e) = publish_proposal_from_upgrade_cert( + cur_view, + propose_view, + sender, + quorum_mem, + pub_key, + priv_key, + consensus, + upgrade_cert, + delay, + instance_state, + ) + .await + { + debug!("Couldn't propose with Error: {}", e); + } + } else if let Err(e) = publish_proposal_from_commitment_and_metadata( + cur_view, + propose_view, + sender, + quorum_mem, + pub_key, + priv_key, + consensus, + delay, + formed_upgrade_certificate, + decided_upgrade_cert, + commitment_and_metadata, + proposal_cert, + instance_state, + ) + .await + { + debug!("Couldn't propose with Error: {}", e); + } + }); + + self.spawned_tasks + .entry(propose_view) + .or_default() + .push(handle); + } + /// Handles a consensus event received on the event stream #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] pub async fn handle( @@ -375,10 +367,9 @@ impl> ConsensusTaskState .await { Ok(Some(current_proposal)) => { + let view = current_proposal.get_view_number(); self.current_proposal = Some(current_proposal); - if self.vote_if_able(&event_stream).await { - self.current_proposal = None; - } + self.spawn_vote_task(view, event_stream); } Ok(None) => {} Err(e) => debug!("Failed to propose {e:#}"), @@ -389,7 +380,7 @@ impl> ConsensusTaskState if let Err(e) = handle_quorum_proposal_validated(proposal, event_stream.clone(), self).await { - debug!("Failed to handle QuorumProposalValidated event {e:#}"); + warn!("Failed to handle QuorumProposalValidated event {e:#}"); } } HotShotEvent::QuorumVoteRecv(ref vote) => { @@ -547,10 +538,13 @@ impl> ConsensusTaskState .await .saved_da_certs .insert(view, cert.clone()); - - if self.vote_if_able(&event_stream).await { - self.current_proposal = None; + let Some(proposal) = self.current_proposal.clone() else { + return; + }; + if proposal.get_view_number() != view { + return; } + self.spawn_vote_task(view, event_stream); } #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::VIDShareRecv(disperse) => { @@ -587,9 +581,7 @@ impl> ConsensusTaskState if disperse.data.recipient_key != self.public_key { return; } - if self.vote_if_able(&event_stream).await { - self.current_proposal = None; - } + self.spawn_vote_task(view, event_stream); } HotShotEvent::ViewChange(new_view) => { let new_view = *new_view; @@ -709,7 +701,10 @@ impl> ConsensusTaskState fee, ) => { let view = *view; - debug!("got commit and meta {:?}", payload_commitment); + debug!( + "got commit and meta {:?}, view {:?}", + payload_commitment, view + ); self.payload_commitment_and_metadata = Some(CommitmentAndMetadata { commitment: *payload_commitment, builder_commitment: builder_commitment.clone(), @@ -758,11 +753,11 @@ impl> ConsensusTaskState return; } - self.proposal_cert = Some(ViewChangeEvidence::ViewSync(certificate.clone())); - let view = certificate.view_number; if self.quorum_membership.get_leader(view) == self.public_key { + self.proposal_cert = Some(ViewChangeEvidence::ViewSync(certificate.clone())); + debug!( "Attempting to publish proposal after forming a View Sync Finalized Cert for view {}", *certificate.view_number @@ -773,6 +768,47 @@ impl> ConsensusTaskState }; } } + #[cfg(not(feature = "dependency-tasks"))] + HotShotEvent::QuorumVoteSend(vote) => { + let Some(proposal) = self.current_proposal.clone() else { + return; + }; + let new_view = proposal.get_view_number() + 1; + // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = self.quorum_membership.get_leader(new_view) == self.public_key + && self.consensus.read().await.high_qc().view_number + == proposal.get_view_number(); + + if should_propose { + debug!( + "Attempting to publish proposal after voting; now in view: {}", + *new_view + ); + let _ = self.publish_proposal(new_view, event_stream.clone()).await; + } + if proposal.get_view_number() <= vote.get_view_number() { + self.current_proposal = None; + } + } + HotShotEvent::QuorumProposalSend(proposal, _) => { + if self + .payload_commitment_and_metadata + .as_ref() + .is_some_and(|p| p.block_view <= proposal.data.get_view_number()) + { + self.payload_commitment_and_metadata = None; + } + if let Some(cert) = &self.proposal_cert { + let view = match cert { + ViewChangeEvidence::Timeout(tc) => tc.get_view_number() + 1, + ViewChangeEvidence::ViewSync(vsc) => vsc.get_view_number(), + }; + if view < proposal.data.get_view_number() { + self.proposal_cert = None; + } + } + } _ => {} } } @@ -796,6 +832,8 @@ impl> TaskState for ConsensusTaskS | HotShotEvent::TimeoutVoteRecv(_) | HotShotEvent::VIDShareRecv(..) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) + | HotShotEvent::QuorumVoteSend(_) + | HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::Shutdown, ) } diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 9ab6311277..babd1bc785 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -27,10 +27,9 @@ use tokio::task::JoinHandle; use tracing::{debug, error, instrument, warn}; #[cfg(feature = "dependency-tasks")] -use crate::consensus::proposal_helpers::handle_quorum_proposal_validated; +use crate::consensus::helpers::handle_quorum_proposal_validated; use crate::{ - consensus::proposal_helpers::publish_proposal_if_able, events::HotShotEvent, - helpers::cancel_task, + consensus::helpers::publish_proposal_if_able, events::HotShotEvent, helpers::cancel_task, }; /// Proposal dependency types. These types represent events that precipitate a proposal. @@ -186,8 +185,8 @@ impl HandleDepOutput for ProposalDependencyHandle { self.round_start_delay, None, None, - &mut commit_and_metadata, - &mut None, + commit_and_metadata, + None, Arc::clone(&self.instance_state), ) .await diff --git a/task-impls/src/quorum_proposal_recv.rs b/task-impls/src/quorum_proposal_recv.rs index 1164a1ef50..4f508fb9bb 100644 --- a/task-impls/src/quorum_proposal_recv.rs +++ b/task-impls/src/quorum_proposal_recv.rs @@ -24,7 +24,7 @@ use tokio::task::JoinHandle; use tracing::{debug, error, instrument, warn}; use crate::{ - consensus::proposal_helpers::{get_parent_leaf_and_state, handle_quorum_proposal_recv}, + consensus::helpers::{get_parent_leaf_and_state, handle_quorum_proposal_recv}, events::HotShotEvent, helpers::{broadcast_event, cancel_task}, }; diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index ae596536a6..b4c80d776e 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -1,5 +1,9 @@ -use std::{collections::HashMap, sync::Arc}; - +#[cfg(feature = "dependency-tasks")] +use crate::consensus::helpers::update_state_and_vote_if_able; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -27,15 +31,13 @@ use hotshot_types::{ vote::{Certificate, HasViewNumber}, }; use jf_primitives::vid::VidScheme; +#[cfg(feature = "dependency-tasks")] +use std::marker::PhantomData; +use std::{collections::HashMap, sync::Arc}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, instrument, trace, warn}; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; - /// Vote dependency types. #[derive(Debug, PartialEq)] enum VoteDependency { @@ -50,29 +52,42 @@ enum VoteDependency { } /// Handler for the vote dependency. -struct VoteDependencyHandle> { +#[allow(dead_code)] +struct VoteDependencyHandle> { /// Public key. pub public_key: TYPES::SignatureKey, /// Private Key. pub private_key: ::PrivateKey, + /// Reference to consensus. The replica will require a write lock on this. + consensus: Arc>>, + /// Immutable instance state + instance_state: Arc, + /// Membership for Quorum certs/votes. + quorum_membership: Arc, /// Reference to the storage. - pub storage: Arc>, + pub storage: Arc>, /// View number to vote on. view_number: TYPES::Time, /// Event sender. sender: Sender>>, } -impl + 'static> HandleDepOutput - for VoteDependencyHandle + +impl + 'static> HandleDepOutput + for VoteDependencyHandle { type Output = Vec>>; + #[allow(clippy::too_many_lines)] async fn handle_dep_result(self, res: Self::Output) { + #[allow(unused_variables)] + let mut cur_proposal = None; let mut payload_commitment = None; let mut leaf = None; let mut disperse_share = None; for event in res { match event.as_ref() { + #[allow(unused_assignments)] HotShotEvent::QuorumProposalValidated(proposal, parent_leaf) => { + cur_proposal = Some(proposal.clone()); let proposal_payload_comm = proposal.block_header.payload_commitment(); if let Some(comm) = payload_commitment { if proposal_payload_comm != comm { @@ -128,6 +143,27 @@ impl + 'static> HandleDepOutput ) .await; + #[cfg(feature = "dependency-tasks")] + { + let Some(proposal) = cur_proposal else { + error!("No proposal received, but it should be."); + return; + }; + // For this vote task, we'll update the state in storage without voting in this function, + // then vote later. + update_state_and_vote_if_able::( + self.view_number, + proposal, + self.public_key.clone(), + self.consensus, + Arc::clone(&self.storage), + self.quorum_membership, + self.instance_state, + PhantomData, + ) + .await; + } + // Create and send the vote. let Some(leaf) = leaf else { error!("Quorum proposal isn't validated, but it should be."); @@ -312,9 +348,12 @@ impl> QuorumVoteTaskState { public_key: self.public_key.clone(), private_key: self.private_key.clone(), + consensus: Arc::clone(&self.consensus), + instance_state: Arc::clone(&self.instance_state), + quorum_membership: Arc::clone(&self.quorum_membership), storage: Arc::clone(&self.storage), view_number, sender: event_sender.clone(), diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 58d72b014c..1f3646ff0d 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -26,7 +26,7 @@ use hotshot_types::{ utils::ViewInner, vid::VidCommitment, }; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, instrument, warn}; use vbs::version::StaticVersionType; use crate::{ @@ -159,7 +159,7 @@ impl< .await; } else { // If we couldn't get a block, send an empty block - error!( + warn!( "Failed to get a block for view {:?}, proposing empty block", view ); @@ -280,7 +280,7 @@ impl< // We failed to get a block Ok(Err(err)) => { - error!(%err, "Couldn't get a block"); + tracing::warn!(%err, "Couldn't get a block"); // pause a bit async_sleep(Duration::from_millis(100)).await; continue; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 1e5a1b8950..58368e3116 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -5,6 +5,7 @@ use async_lock::RwLock; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, + data::VidDisperseShare, message::Proposal, traits::{ consensus_api::ConsensusApi, @@ -70,10 +71,25 @@ impl, A: ConsensusApi + *view_number, ) .await; + let payload_commitment = vid_disperse.payload_commitment; + let shares = VidDisperseShare::from_vid_disperse(vid_disperse.clone()); + let mut consensus = self.consensus.write().await; + for share in shares { + let s = share.clone(); + let key: ::SignatureKey = s.recipient_key; + if let Some(prop) = share.to_proposal(&self.private_key) { + consensus + .vid_shares + .entry(*view_number) + .or_default() + .insert(key, prop); + } + } + // send the commitment and metadata to consensus for block building broadcast_event( Arc::new(HotShotEvent::SendPayloadCommitmentAndMetadata( - vid_disperse.payload_commitment, + payload_commitment, builder_commitment, metadata.clone(), *view_number, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 9a6f637520..e6e6ab8d2b 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -361,9 +361,9 @@ impl RoundResult { ) -> Option> { self.success_nodes.insert(idx as u64, result.clone()); - let maybe_leaf = result.0.into_iter().last(); - if let Some(leaf_info) = maybe_leaf.clone() { - let leaf = leaf_info.leaf; + let maybe_leaf = result.0.first(); + if let Some(leaf_info) = maybe_leaf { + let leaf = &leaf_info.leaf; match self.leaf_map.entry(leaf.clone()) { std::collections::hash_map::Entry::Occupied(mut o) => { *o.get_mut() += 1; @@ -394,7 +394,7 @@ impl RoundResult { } } } - return Some(leaf); + return Some(leaf.clone()); } None } @@ -441,6 +441,7 @@ impl RoundResult { if check_block && self.block_map.len() != 1 { self.status = ViewStatus::Err(OverallSafetyTaskErr::InconsistentBlocks); + error!("Check blocks failed. Block map IS: {:?}", self.block_map); return; } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 85e5025666..a57f20c098 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -311,7 +311,7 @@ where } } Err(e) => { - panic!("Error Joining the test task {:?}", e); + tracing::error!("Error Joining the test task {:?}", e); } } } diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 7433c57ad3..4d18a38212 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -88,6 +88,7 @@ async fn test_consensus_task() { // Run view 2 and propose. let view_2 = TestScriptStage { inputs: vec![ + VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), QuorumProposalRecv(proposals[1].clone(), leaders[1]), QCFormed(either::Left(cert)), // We must have a payload commitment and metadata to propose. @@ -309,6 +310,7 @@ async fn test_view_sync_finalize_propose() { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); + vids.push(view.vid_proposal); // This is a bog standard view and covers the situation where everything is going normally. let view_1 = TestScriptStage { @@ -364,6 +366,7 @@ async fn test_view_sync_finalize_propose() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let view_4 = TestScriptStage { inputs: vec![ + VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), QuorumProposalRecv(proposals[1].clone(), leaders[1]), TimeoutVoteRecv(timeout_vote_view_2), TimeoutVoteRecv(timeout_vote_view_3), diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 7cd80bd62f..ec53f21557 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -35,7 +35,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let node_id = 2; + let node_id = 3; let handle = build_system_handle(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -56,7 +56,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(3) { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); leaders.push(view.leader_public_key); @@ -82,7 +82,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { }; // Node 2 is the leader up next, so we form the QC for it. - let cert = proposals[1].data.justify_qc.clone(); + let cert = proposals[2].data.justify_qc.clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let inputs = vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), @@ -97,14 +97,17 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { ), ]; - let view_2_inputs = permute_input_with_index_order(inputs, input_permutation); + let mut view_2_inputs = permute_input_with_index_order(inputs, input_permutation); + view_2_inputs.insert(0, DACertificateRecv(dacs[1].clone())); + view_2_inputs.insert(0, VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key()))); + view_2_inputs.insert(0, VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key()))); // This stage transitions from view 1 to view 2. let view_2 = TestScriptStage { inputs: view_2_inputs, outputs: vec![ exact(ViewChange(ViewNumber::new(2))), - all_predicates(vec![quorum_proposal_validated(), quorum_proposal_send()]), + all_predicates(vec![exact(QuorumVoteSend(votes[1].clone())), quorum_proposal_validated(), quorum_proposal_send()]), ], // We should end on view 2. asserts: vec![], diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 4badd978e1..74895d4427 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,5 +1,8 @@ -use hotshot::tasks::task_state::CreateTaskState; +#![cfg(feature = "dependency-tasks")] + +use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ + block_types::TestMetadata, node_types::{MemoryImpl, TestTypes}, state_types::TestInstanceState, }; @@ -7,12 +10,18 @@ use hotshot_task_impls::{events::HotShotEvent::*, quorum_proposal::QuorumProposa use hotshot_testing::{ predicates::event::quorum_proposal_send, script::{run_test_script, TestScriptStage}, + task_helpers::{build_cert, key_pair_for_id}, task_helpers::{build_system_handle, vid_scheme_from_view_number}, view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{ViewChangeEvidence, ViewNumber}, + consensus::{CommitmentAndMetadata, ProposalDependencyData}, + data::null_block, + data::{VidDisperseShare, ViewChangeEvidence, ViewNumber}, + message::Proposal, + simple_certificate::{TimeoutCertificate, ViewSyncFinalizeCertificate2}, simple_vote::ViewSyncFinalizeData, + simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeVote}, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeType}, @@ -34,7 +43,26 @@ fn make_payload_commitment( vid.commit_only(&encoded_transactions).unwrap() } -#[cfg(feature = "dependency-tasks")] +async fn insert_vid_shares_for_view( + view: ::Time, + handle: &SystemContextHandle, + vid: ( + Vec>>, + ::SignatureKey, + ), +) { + let consensus = handle.get_consensus(); + let mut consensus = consensus.write().await; + + // `create_and_send_proposal` depends on the `vid_shares` obtaining a vid dispersal. + // to avoid needing to spin up the vote task, we can just insert it in here. + consensus + .vid_shares + .entry(view) + .or_default() + .insert(vid.1, vid.0[0].clone()); +} + #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -45,23 +73,28 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(1).await.0; + let node_id = 1; + let handle = build_system_handle(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(1)); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut leaves = Vec::new(); + let mut vids = Vec::new(); for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); + vids.push(view.vid_proposal.clone()); } + insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[0].clone()).await; + let cert = proposals[0].data.justify_qc.clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); @@ -88,7 +121,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { run_test_script(script, quorum_proposal_task_state).await; } -#[cfg(feature = "dependency-tasks")] #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -114,11 +146,15 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut leaves = Vec::new(); + let mut vids = Vec::new(); for view in (&mut generator).take(3) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); + vids.push(view.vid_proposal.clone()); } + + insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[2].clone()).await; let consensus = handle.get_consensus(); let mut consensus = consensus.write().await; @@ -184,20 +220,23 @@ async fn test_quorum_proposal_task_qc_timeout() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let node_id = 2; + let handle = build_system_handle(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); + let mut vids = Vec::new(); for view in (&mut generator).take(1) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); + vids.push(view.vid_proposal.clone()); } let timeout_data = TimeoutData { view: ViewNumber::new(1), @@ -206,8 +245,10 @@ async fn test_quorum_proposal_task_qc_timeout() { for view in (&mut generator).take(1) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); + vids.push(view.vid_proposal.clone()); } + insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[1].clone()).await; // Get the proposal cert out for the view sync input let cert = match proposals[1].data.proposal_certificate.clone().unwrap() { ViewChangeEvidence::Timeout(tc) => tc, @@ -250,32 +291,37 @@ async fn test_quorum_proposal_task_view_sync() { // We need to propose as the leader for view 2, otherwise we get caught up with the special // case in the genesis view. - let handle = build_system_handle(2).await.0; + let node_id = 2; + let handle = build_system_handle(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); + let mut vids = Vec::new(); for view in (&mut generator).take(1) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); + vids.push(view.vid_proposal.clone()); } let view_sync_finalize_data = ViewSyncFinalizeData { relay: 2, - round: ViewNumber::new(2), + round: ViewNumber::new(node_id), }; generator.add_view_sync_finalize(view_sync_finalize_data); for view in (&mut generator).take(1) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); + vids.push(view.vid_proposal.clone()); } + insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[1].clone()).await; // Get the proposal cert out for the view sync input let cert = match proposals[1].data.proposal_certificate.clone().unwrap() { ViewChangeEvidence::ViewSync(vsc) => vsc, @@ -310,36 +356,32 @@ async fn test_quorum_proposal_task_view_sync() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_propose_now() { - use hotshot_example_types::block_types::TestMetadata; - use hotshot_testing::task_helpers::{build_cert, key_pair_for_id}; - use hotshot_types::{ - consensus::{CommitmentAndMetadata, ProposalDependencyData}, - data::null_block, - simple_certificate::{TimeoutCertificate, ViewSyncFinalizeCertificate2}, - simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeVote}, - }; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; - let (private_key, public_key) = key_pair_for_id(2); + let node_id = 2; + let handle = build_system_handle(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(2)); + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); + let mut vids = Vec::new(); for view in (&mut generator).take(1) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); + vids.push(view.vid_proposal.clone()); } for view in (&mut generator).take(1) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); + vids.push(view.vid_proposal.clone()); } + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); // proposal dependency data - quorum proposal and cert let pdd_qp = ProposalDependencyData { @@ -358,6 +400,52 @@ async fn test_quorum_proposal_task_propose_now() { ), }; + let view_qp = TestScriptStage { + inputs: vec![ProposeNow(ViewNumber::new(node_id), pdd_qp)], + outputs: vec![quorum_proposal_send()], + asserts: vec![], + }; + + let quorum_proposal_task_state = + QuorumProposalTaskState::::create_from(&handle).await; + insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[1].clone()).await; + + let script = vec![view_qp]; + run_test_script(script, quorum_proposal_task_state).await; +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_task_propose_now_timeout() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let node_id = 2; + let handle = build_system_handle(node_id).await.0; + let (private_key, public_key) = key_pair_for_id(node_id); + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + vids.push(view.vid_proposal.clone()); + } + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + vids.push(view.vid_proposal.clone()); + } + + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); // proposal dependency data - timeout cert let pdd_timeout = ProposalDependencyData { commitment_and_metadata: CommitmentAndMetadata { @@ -385,6 +473,52 @@ async fn test_quorum_proposal_task_propose_now() { )), }; + let view_timeout = TestScriptStage { + inputs: vec![ProposeNow(ViewNumber::new(node_id), pdd_timeout)], + outputs: vec![quorum_proposal_send()], + asserts: vec![], + }; + + let quorum_proposal_task_state = + QuorumProposalTaskState::::create_from(&handle).await; + insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[1].clone()).await; + + let script = vec![view_timeout]; + run_test_script(script, quorum_proposal_task_state).await; +} +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_task_propose_now_view_sync() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let node_id = 2; + let handle = build_system_handle(node_id).await.0; + let (private_key, public_key) = key_pair_for_id(node_id); + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + vids.push(view.vid_proposal.clone()); + } + for view in (&mut generator).take(1) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + vids.push(view.vid_proposal.clone()); + } + + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); + // proposal dependency data - view sync cert let pdd_view_sync = ProposalDependencyData { commitment_and_metadata: CommitmentAndMetadata { @@ -407,37 +541,24 @@ async fn test_quorum_proposal_task_propose_now() { round: ViewNumber::new(1), }, &quorum_membership, - ViewNumber::new(2), + ViewNumber::new(node_id), &public_key, &private_key, )), }; - let view_qp = TestScriptStage { - inputs: vec![ProposeNow(ViewNumber::new(2), pdd_qp)], - outputs: vec![quorum_proposal_send()], - asserts: vec![], - }; - - let view_timeout = TestScriptStage { - inputs: vec![ProposeNow(ViewNumber::new(2), pdd_timeout)], - outputs: vec![quorum_proposal_send()], - asserts: vec![], - }; - let view_view_sync = TestScriptStage { - inputs: vec![ProposeNow(ViewNumber::new(2), pdd_view_sync)], + inputs: vec![ProposeNow(ViewNumber::new(node_id), pdd_view_sync)], outputs: vec![quorum_proposal_send()], asserts: vec![], }; - for stage in vec![view_qp, view_timeout, view_view_sync] { - let quorum_proposal_task_state = - QuorumProposalTaskState::::create_from(&handle).await; + let quorum_proposal_task_state = + QuorumProposalTaskState::::create_from(&handle).await; + insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[1].clone()).await; - let script = vec![stage]; - run_test_script(script, quorum_proposal_task_state).await; - } + let script = vec![view_view_sync]; + run_test_script(script, quorum_proposal_task_state).await; } #[cfg(test)] diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index f401d31da2..cabb0a5381 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -167,12 +167,14 @@ async fn test_consensus_task_upgrade() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Test that we correctly form and include an `UpgradeCertificate` when receiving votes. async fn test_upgrade_and_consensus_task() { + use std::sync::Arc; + use hotshot_testing::task_helpers::build_system_handle; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle(3).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -221,7 +223,7 @@ async fn test_upgrade_and_consensus_task() { let upgrade_votes = other_handles .iter() - .map(|h| views[1].create_upgrade_vote(upgrade_data.clone(), &h.0)); + .map(|h| views[2].create_upgrade_vote(upgrade_data.clone(), &h.0)); let consensus_state = ConsensusTaskState::::create_from(&handle).await; let mut upgrade_state = UpgradeTaskState::< @@ -242,18 +244,24 @@ async fn test_upgrade_and_consensus_task() { DACertificateRecv(dacs[0].clone()), ], upgrade_vote_recvs, - vec![QuorumProposalRecv(proposals[1].clone(), leaders[1])], + vec![QuorumProposalRecv(proposals[1].clone(), leaders[1]), + DACertificateRecv(dacs[1].clone()), + VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), + ], vec![ - DACertificateRecv(dacs[1].clone()), + VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, - ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + ViewNumber::new(3), + null_block::builder_fee( + quorum_membership.total_nodes(), + &TestInstanceState {}, + ) + .unwrap(), ), - QCFormed(either::Either::Left(proposals[1].data.justify_qc.clone())), + QCFormed(either::Either::Left(proposals[2].data.justify_qc.clone())), ], ]; @@ -276,6 +284,7 @@ async fn test_upgrade_and_consensus_task() { output_asserts: vec![ exact::(ViewChange(ViewNumber::new(2))), quorum_proposal_validated::(), + quorum_vote_send(), ], task_state_asserts: vec![], }, @@ -481,6 +490,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DACertificateRecv(dacs[5].clone()), + VIDShareRecv(get_vid_share(&vids[5].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[5].0[0].data.payload_commitment, proposals[5].data.block_header.builder_commitment.clone(), diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index 9bf85792a1..82c849b018 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -60,8 +60,6 @@ async fn test_combined_network_cdn_crash() { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, - start_delay: 120_000, - ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { @@ -72,7 +70,7 @@ async fn test_combined_network_cdn_crash() { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(120), + duration: Duration::from_secs(220), }, ), ..TestDescription::default_multiple_rounds() @@ -121,7 +119,7 @@ async fn test_combined_network_reup() { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(120), + duration: Duration::from_secs(220), }, ), ..TestDescription::default_multiple_rounds() diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 987c058e26..3ab5aa2551 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -55,6 +55,9 @@ pub struct Consensus { /// View number that is currently on. cur_view: TYPES::Time, + /// View we proposed in last. To prevent duplicate proposals + pub last_proposed_view: TYPES::Time, + /// last view had a successful decide event pub last_decided_view: TYPES::Time, @@ -281,6 +284,7 @@ impl Consensus { saved_da_certs: HashMap::new(), cur_view, last_decided_view, + last_proposed_view: last_decided_view, saved_leaves, saved_payloads, locked_view, diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 38091f792b..ce7d6eaf76 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -20,7 +20,7 @@ use crate::{ data::Leaf, traits::{node_implementation::NodeType, states::InstanceState, ValidatedState}, utils::BuilderCommitment, - vid::{vid_scheme, VidCommitment, VidSchemeType}, + vid::{vid_scheme, VidCommitment, VidCommon, VidSchemeType}, }; /// Trait for structures that need to be unambiguously encoded as bytes. @@ -174,6 +174,7 @@ pub trait BlockHeader: /// Build a header with the parent validate state, instance-level state, parent leaf, payload /// commitment, and metadata. + #[allow(clippy::too_many_arguments)] fn new( parent_state: &TYPES::ValidatedState, instance_state: &>::Instance, @@ -182,6 +183,7 @@ pub trait BlockHeader: builder_commitment: BuilderCommitment, metadata: ::Metadata, builder_fee: BuilderFee, + vid_common: VidCommon, ) -> impl Future> + Send; /// Build the genesis header, payload, and metadata. diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 63b6bb6a69..07c38bead6 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -15,6 +15,7 @@ use crate::{ node_implementation::{ConsensusTime, NodeType}, BlockPayload, }, + vid::VidCommon, }; /// Instance-level state, which allows us to fetch missing validated state. @@ -59,6 +60,7 @@ pub trait ValidatedState: instance: &Self::Instance, parent_leaf: &Leaf, proposed_header: &TYPES::BlockHeader, + vid_common: VidCommon, ) -> impl Future> + Send; /// Construct the state with the given block header. From c6421dc5a0a873a3fef904699264deb20ed81d1d Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Tue, 7 May 2024 10:44:12 -0400 Subject: [PATCH 1016/1393] Add builder public key to fee info (#3114) --- task-impls/src/transactions.rs | 1 + types/src/data.rs | 3 ++- types/src/traits/block_contents.rs | 4 +++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 1f3646ff0d..bb59ef826a 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -151,6 +151,7 @@ impl< block_view, BuilderFee { fee_amount: blocks_initial_info.offered_fee, + fee_account: block_data.sender, fee_signature: block_header.fee_signature, }, )), diff --git a/types/src/data.rs b/types/src/data.rs index d039b52f44..70140ee3b9 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -740,7 +740,7 @@ pub mod null_block { /// Arbitrary fee amount, this block doesn't actually come from a builder const FEE_AMOUNT: u64 = 0; - let (_, priv_key) = + let (pub_key, priv_key) = ::generated_from_seed_indexed( [0_u8; 32], 0, ); @@ -756,6 +756,7 @@ pub mod null_block { ) { Ok(sig) => Some(BuilderFee { fee_amount: FEE_AMOUNT, + fee_account: pub_key, fee_signature: sig, }), Err(_) => None, diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index ce7d6eaf76..9db0d9cda9 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -161,7 +161,9 @@ pub const GENESIS_VID_NUM_STORAGE_NODES: usize = 1; pub struct BuilderFee { /// Proposed fee amount pub fee_amount: u64, - /// Signature over fee amount + /// Account authorizing the fee. + pub fee_account: TYPES::BuilderSignatureKey, + /// Signature over fee amount by `fee_account`. pub fee_signature: ::BuilderSignature, } From 0b6073e5c629a049008225074c06ac40b6b2dddf Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 7 May 2024 13:21:27 -0600 Subject: [PATCH 1017/1393] fix build and lint (#3121) --- task-impls/src/consensus/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index fbd43bf0f3..820197c6d0 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -35,7 +35,10 @@ use hotshot_types::{traits::storage::Storage, vote::Certificate}; use jf_primitives::vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; +#[cfg(not(feature = "dependency-tasks"))] +use tracing::info; + +use tracing::{debug, error, instrument, warn}; use vbs::version::Version; #[cfg(not(feature = "dependency-tasks"))] @@ -237,6 +240,7 @@ impl> ConsensusTaskState /// Spawn a vote task for the given view. Will try to vote /// and emit a `QuorumVoteSend` event we should vote on the current proposal + #[cfg(not(feature = "dependency-tasks"))] fn spawn_vote_task( &mut self, view: TYPES::Time, From ee4263604144bd42624d812511bac802a6218858 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Tue, 7 May 2024 15:35:53 -0400 Subject: [PATCH 1018/1393] Allow nodes to join late if they have already registered (#3118) By switching the order of two checks (for manual start and duplicate keys) we can allow nodes that have already posted their keys, but restarted, to successfully get a config even after the network has started. --- orchestrator/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index c8fb62a199..28784747f4 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -283,6 +283,10 @@ where libp2p_address: Option, libp2p_public_key: Option, ) -> Result<(u64, bool), ServerError> { + if let Some((node_index, is_da)) = self.pub_posted.get(pubkey) { + return Ok((*node_index, *is_da)); + } + if !self.accepting_new_keys { return Err(ServerError { status: tide_disco::StatusCode::Forbidden, @@ -292,10 +296,6 @@ where }); } - if let Some((node_index, is_da)) = self.pub_posted.get(pubkey) { - return Ok((*node_index, *is_da)); - } - let node_index = self.pub_posted.len() as u64; let staked_pubkey = PeerConfig::::from_bytes(pubkey).unwrap(); From bd5642c2a5888d61d662add556be8b6d668d872c Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 7 May 2024 14:22:30 -0600 Subject: [PATCH 1019/1393] Sanitize Consensus State Variables and Updates (#3120) * privatize variables * errors * fix run for dependency tasks * actually fix dependency tasks build this time --- task-impls/src/consensus/helpers.rs | 43 +++++---- task-impls/src/consensus/mod.rs | 12 ++- task-impls/src/consensus/view_change.rs | 4 +- task-impls/src/da.rs | 11 ++- task-impls/src/quorum_proposal.rs | 4 +- task-impls/src/quorum_proposal_recv.rs | 4 +- task-impls/src/quorum_vote.rs | 8 +- task-impls/src/request.rs | 4 +- task-impls/src/response.rs | 14 +-- task-impls/src/transactions.rs | 4 +- task-impls/src/vid.rs | 10 +-- testing/tests/tests_1/quorum_proposal_task.rs | 12 +-- types/src/consensus.rs | 87 +++++++++++++++---- 13 files changed, 127 insertions(+), 90 deletions(-) diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 3595852791..335899875c 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -152,9 +152,7 @@ async fn validate_proposal_safety_and_liveness( let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus - .saved_leaves - .insert(proposed_leaf.commit(), proposed_leaf.clone()); + consensus.update_saved_leaves(proposed_leaf); Ok(()) } @@ -178,7 +176,7 @@ pub async fn create_and_send_proposal( ) { let consensus_read = consensus.read().await; let Some(Some(vid_share)) = consensus_read - .vid_shares + .vid_shares() .get(&view) .map(|shares| shares.get(&public_key)) else { @@ -332,7 +330,7 @@ pub(crate) async fn get_parent_leaf_and_state( let consensus = consensus.read().await; let parent_view_number = &consensus.high_qc().get_view_number(); - let parent_view = consensus.validated_state_map.get(parent_view_number).context( + let parent_view = consensus.validated_state_map().get(parent_view_number).context( format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", **parent_view_number) )?; @@ -351,7 +349,7 @@ pub(crate) async fn get_parent_leaf_and_state( } let leaf = consensus - .saved_leaves + .saved_leaves() .get(&leaf_commitment) .context("Failed to find high QC of parent")?; @@ -363,7 +361,7 @@ pub(crate) async fn get_parent_leaf_and_state( // Walk back until we find a decide if !reached_decided { debug!("We have not reached decide from view {:?}", cur_view); - while let Some(next_parent_leaf) = consensus.saved_leaves.get(&next_parent_hash) { + while let Some(next_parent_leaf) = consensus.saved_leaves().get(&next_parent_hash) { if next_parent_leaf.get_view_number() <= consensus.last_decided_view { break; } @@ -639,7 +637,7 @@ pub async fn handle_quorum_proposal_recv> ConsensusTaskState } let mut consensus = self.consensus.write().await; - consensus.update_high_qc_if_new(qc.clone()); + if let Err(e) = consensus.update_high_qc(qc.clone()) { + tracing::trace!("{e:?}"); + } drop(consensus); debug!( @@ -540,8 +542,7 @@ impl> ConsensusTaskState self.consensus .write() .await - .saved_da_certs - .insert(view, cert.clone()); + .update_saved_da_certs(view, cert.clone()); let Some(proposal) = self.current_proposal.clone() else { return; }; @@ -578,10 +579,7 @@ impl> ConsensusTaskState self.consensus .write() .await - .vid_shares - .entry(view) - .or_default() - .insert(disperse.data.recipient_key.clone(), disperse.clone()); + .update_vid_shares(view, disperse.clone()); if disperse.data.recipient_key != self.public_key { return; } diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index c7f2be2e41..de0bc9436e 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -99,7 +99,9 @@ pub(crate) async fn update_view( ); } let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus.update_view_if_new(new_view); + if let Err(e) = consensus.update_view(new_view) { + tracing::trace!("{e:?}"); + } tracing::trace!("View updated successfully"); Ok(()) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index bc52039b69..26792a87a3 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -199,9 +199,14 @@ impl, A: ConsensusApi + // Ensure this view is in the view map for garbage collection, but do not overwrite if // there is already a view there: the replica task may have inserted a `Leaf` view which // contains strictly more information. - consensus.validated_state_map.entry(view).or_insert(View { - view_inner: ViewInner::DA { payload_commitment }, - }); + if !consensus.validated_state_map().contains_key(&view) { + consensus.update_validated_state_map( + view, + View { + view_inner: ViewInner::DA { payload_commitment }, + }, + ); + } // Record the payload we have promised to make available. consensus diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index babd1bc785..ec152b54d7 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -535,7 +535,9 @@ impl> QuorumProposalTaskState> QuorumProposalRecvTaskState< let view = current_proposal.get_view_number(); self.cancel_tasks(proposal.data.get_view_number()).await; let consensus = self.consensus.read().await; - let Some(vid_shares) = consensus.vid_shares.get(&view) else { + let Some(vid_shares) = consensus.vid_shares().get(&view) else { debug!( "We have not seen the VID share for this view {:?} yet, so we cannot vote.", view @@ -150,7 +150,7 @@ impl> QuorumProposalRecvTaskState< return; }; let Some(da_cert) = consensus - .saved_da_certs + .saved_da_certs() .get(¤t_proposal.get_view_number()) else { debug!( diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index b4c80d776e..94bb5890b5 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -432,8 +432,7 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState, Ver: StaticVersionType + 'st /// Creates the srequest structures for all types that are needed. async fn build_requests(&self, view: TYPES::Time, _: Ver) -> Vec> { let mut reqs = Vec::new(); - if !self.state.read().await.vid_shares.contains_key(&view) { + if !self.state.read().await.vid_shares().contains_key(&view) { reqs.push(RequestKind::VID(view, self.public_key.clone())); } // TODO request other things @@ -266,7 +266,7 @@ impl> DelayedRequester { async fn cancel_vid(&self, req: &VidRequest) -> bool { let view = req.0; let state = self.state.read().await; - state.vid_shares.contains_key(&view) && state.cur_view() > view + state.vid_shares().contains_key(&view) && state.cur_view() > view } /// Transform a response into a `HotShotEvent` diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 271600b231..8fc22298c5 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -128,7 +128,7 @@ impl NetworkResponseState { ) -> Option>> { let consensus = self.consensus.upgradable_read().await; let contained = consensus - .vid_shares + .vid_shares() .get(&view) .is_some_and(|m| m.contains_key(key)); if !contained { @@ -138,19 +138,13 @@ impl NetworkResponseState { let shares = VidDisperseShare::from_vid_disperse(vid); let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; for share in shares { - let s = share.clone(); - let key: ::SignatureKey = s.recipient_key; if let Some(prop) = share.to_proposal(&self.private_key) { - consensus - .vid_shares - .entry(view) - .or_default() - .insert(key, prop); + consensus.update_vid_shares(view, prop); } } - return consensus.vid_shares.get(&view)?.get(key).cloned(); + return consensus.vid_shares().get(&view)?.get(key).cloned(); } - consensus.vid_shares.get(&view)?.get(key).cloned() + consensus.vid_shares().get(&view)?.get(key).cloned() } /// Handle the request contained in the message. Returns the response we should send diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index bb59ef826a..d1ff7d4db8 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -224,13 +224,13 @@ impl< while prev_view != TYPES::Time::genesis() { if let Some(commitment) = consensus - .validated_state_map + .validated_state_map() .get(&prev_view) .and_then(|view| match view.view_inner { // For a view for which we have a Leaf stored ViewInner::DA { payload_commitment } => Some(payload_commitment), ViewInner::Leaf { leaf, .. } => consensus - .saved_leaves + .saved_leaves() .get(&leaf) .map(Leaf::get_payload_commitment), ViewInner::Failed => None, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 58368e3116..4609037973 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -75,14 +75,8 @@ impl, A: ConsensusApi + let shares = VidDisperseShare::from_vid_disperse(vid_disperse.clone()); let mut consensus = self.consensus.write().await; for share in shares { - let s = share.clone(); - let key: ::SignatureKey = s.recipient_key; - if let Some(prop) = share.to_proposal(&self.private_key) { - consensus - .vid_shares - .entry(*view_number) - .or_default() - .insert(key, prop); + if let Some(disperse) = share.to_proposal(&self.private_key) { + consensus.update_vid_shares(*view_number, disperse); } } diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 74895d4427..bfae7ca062 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -56,11 +56,7 @@ async fn insert_vid_shares_for_view( // `create_and_send_proposal` depends on the `vid_shares` obtaining a vid dispersal. // to avoid needing to spin up the vote task, we can just insert it in here. - consensus - .vid_shares - .entry(view) - .or_default() - .insert(vid.1, vid.0[0].clone()); + consensus.update_vid_shares(view, vid.0[0].clone()); } #[cfg(test)] @@ -164,7 +160,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { // First, insert a parent view whose leaf commitment will be returned in the lower function // call. - consensus.validated_state_map.insert( + consensus.update_validated_state_map( ViewNumber::new(2), View { view_inner: ViewInner::Leaf { @@ -177,9 +173,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { // Match an entry into the saved leaves for the parent commitment, returning the generated leaf // for this call. - consensus - .saved_leaves - .insert(leaves[1].get_parent_commitment(), leaves[1].clone()); + consensus.update_saved_leaves(leaves[1].clone()); // Release the write lock before proceeding with the test drop(consensus); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 3ab5aa2551..9d04b71c9a 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -5,7 +5,8 @@ use std::{ sync::{Arc, Mutex}, }; -use committable::Commitment; +use anyhow::{ensure, Result}; +use committable::{Commitment, Committable}; use displaydoc::Display; use tracing::{debug, error}; @@ -43,14 +44,14 @@ pub type VidShares = BTreeMap< #[derive(custom_debug::Debug)] pub struct Consensus { /// The validated states that are currently loaded in memory. - pub validated_state_map: BTreeMap>, + validated_state_map: BTreeMap>, /// All the VID shares we've received for current and future views. - pub vid_shares: VidShares, + vid_shares: VidShares, /// All the DA certs we've received for current and future views. /// view -> DA cert - pub saved_da_certs: HashMap>, + saved_da_certs: HashMap>, /// View number that is currently on. cur_view: TYPES::Time, @@ -64,7 +65,7 @@ pub struct Consensus { /// Map of leaf hash -> leaf /// - contains undecided leaves /// - includes the MOST RECENT decided leaf - pub saved_leaves: CommitmentMap>, + saved_leaves: CommitmentMap>, /// Saved payloads. /// @@ -300,24 +301,76 @@ impl Consensus { self.cur_view } - /// Update the current view. - pub fn update_view_if_new(&mut self, view_number: TYPES::Time) { - if view_number > self.cur_view { - self.cur_view = view_number; - } - } - /// Get the high QC. pub fn high_qc(&self) -> &QuorumCertificate { &self.high_qc } + /// Get the validated state map. + pub fn validated_state_map(&self) -> &BTreeMap> { + &self.validated_state_map + } + + /// Get the saved leaves. + pub fn saved_leaves(&self) -> &CommitmentMap> { + &self.saved_leaves + } + + /// Get the vid shares. + pub fn vid_shares(&self) -> &VidShares { + &self.vid_shares + } + + /// Get the saved DA certs. + pub fn saved_da_certs(&self) -> &HashMap> { + &self.saved_da_certs + } + + /// Update the current view. + /// # Errors + /// Can return an error when the new view_number is not higher than the existing view number. + pub fn update_view(&mut self, view_number: TYPES::Time) -> Result<()> { + ensure!(view_number > self.cur_view); + self.cur_view = view_number; + Ok(()) + } + + /// Update the validated state map with a new view_number/view combo. + pub fn update_validated_state_map(&mut self, view_number: TYPES::Time, view: View) { + self.validated_state_map.insert(view_number, view); + } + + /// Update the saved leaves with a new leaf. + pub fn update_saved_leaves(&mut self, leaf: Leaf) { + self.saved_leaves.insert(leaf.commit(), leaf); + } + /// Update the high QC if given a newer one. - pub fn update_high_qc_if_new(&mut self, high_qc: QuorumCertificate) { - if high_qc.view_number > self.high_qc.view_number { - debug!("Updating high QC"); - self.high_qc = high_qc; - } + /// # Errors + /// Can return an error when the provided high_qc is not newer than the existing entry. + pub fn update_high_qc(&mut self, high_qc: QuorumCertificate) -> Result<()> { + ensure!(high_qc.view_number > self.high_qc.view_number); + debug!("Updating high QC"); + self.high_qc = high_qc; + + Ok(()) + } + + /// Add a new entry to the vid_shares map. + pub fn update_vid_shares( + &mut self, + view_number: TYPES::Time, + disperse: Proposal>, + ) { + self.vid_shares + .entry(view_number) + .or_default() + .insert(disperse.data.recipient_key.clone(), disperse); + } + + /// Add a new entry to the da_certs map. + pub fn update_saved_da_certs(&mut self, view_number: TYPES::Time, cert: DACertificate) { + self.saved_da_certs.insert(view_number, cert); } /// gather information from the parent chain of leaves From 09f1957a32f9428a1dea5cd578973f10462c1936 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 7 May 2024 22:41:46 -0400 Subject: [PATCH 1020/1393] Move url to workspace and enabled serde feature (#3117) --- types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/Cargo.toml b/types/Cargo.toml index f5b6f7429d..77239fee5a 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -52,7 +52,7 @@ tagged-base64 = { workspace = true } vbs = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } -url = "2.5.0" +url = { workspace = true } [dev-dependencies] serde_json = { workspace = true } From 8a9605376e08a833d1e7917f7c30b3d7a8be998d Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 8 May 2024 09:31:51 -0400 Subject: [PATCH 1021/1393] update the CDN (again) (#3122) --- hotshot/Cargo.toml | 1 + hotshot/src/traits/networking/push_cdn_network.rs | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index a23cbc3f9e..1b0a387056 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -57,6 +57,7 @@ jf-primitives.workspace = true hotshot-orchestrator = { path = "../orchestrator" } blake3.workspace = true sha2 = { workspace = true } +num_enum = "0.7" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index a64dd6ff49..e7b61dafba 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -11,7 +11,7 @@ use async_trait::async_trait; use bincode::config::Options; use cdn_broker::reexports::{ connection::{protocols::Tcp, NoMiddleware, TrustedMiddleware, UntrustedMiddleware}, - def::{ConnectionDef, RunDef}, + def::{ConnectionDef, RunDef, Topic as TopicTrait}, discovery::{Embedded, Redis}, }; #[cfg(feature = "hotshot-testing")] @@ -44,6 +44,7 @@ use hotshot_types::{ utils::bincode_opts, BoxSyncFuture, }; +use num_enum::{IntoPrimitive, TryFromPrimitive}; #[cfg(feature = "hotshot-testing")] use rand::{rngs::StdRng, RngCore, SeedableRng}; use tracing::{error, warn}; @@ -101,6 +102,7 @@ impl RunDef for ProductionDef { type User = UserDef; type Broker = BrokerDef; type DiscoveryClientType = Redis; + type Topic = Topic; } /// The user definition for the Push CDN. @@ -139,6 +141,7 @@ impl RunDef for TestingDef { type User = UserDef; type Broker = BrokerDef; type DiscoveryClientType = Embedded; + type Topic = Topic; } /// A communication channel to the Push CDN, which is a collection of brokers and a marshal @@ -155,6 +158,7 @@ pub struct PushCdnNetwork { /// The enum for the topics we can subscribe to in the Push CDN #[repr(u8)] +#[derive(IntoPrimitive, TryFromPrimitive, Clone, PartialEq, Eq)] pub enum Topic { /// The global topic Global = 0, @@ -162,6 +166,10 @@ pub enum Topic { DA = 1, } +/// Implement the `TopicTrait` for our `Topic` enum. We need this to filter +/// topics that are not implemented at the application level. +impl TopicTrait for Topic {} + impl PushCdnNetwork { /// Create a new `PushCdnNetwork` (really a client) from a marshal endpoint, a list of initial /// topics we are interested in, and our wrapped keypair that we use to authenticate with the From 1084946040447391a6d6e8d29cfe88f22e1ca203 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Wed, 8 May 2024 13:11:38 -0400 Subject: [PATCH 1022/1393] Improve view timeout log (#3131) * Improve view timeout log * Include pub key of leader for that view * Put relevant information in attributes so it is easier to parse and query * Add leader pub key mnemonic to view timeout log --- task-impls/Cargo.toml | 1 + task-impls/src/view_sync.rs | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 3a80ea8691..32726004e6 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -18,6 +18,7 @@ async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } +cdn-proto = { workspace = true } chrono = "0.4" committable = { workspace = true } either = { workspace = true } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index aca66bfa1b..5068442734 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -470,9 +470,13 @@ impl< } self.num_timeouts_tracked += 1; + let leader = self.membership.get_leader(view_number); error!( - "Num timeouts tracked since last view change is {}. View {} timed out", - self.num_timeouts_tracked, *view_number + %leader, + leader_mnemonic = cdn_proto::mnemonic(&leader), + view_number = *view_number, + num_timeouts_tracked = self.num_timeouts_tracked, + "view timed out", ); if self.num_timeouts_tracked >= 3 { From fd003260b8c4a7fb7cc8d3e00343eaef8dea0e34 Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Thu, 9 May 2024 08:18:20 -0500 Subject: [PATCH 1023/1393] Change `BuilderSignatureKey` bound to `DeserializeOwned` (#3134) Co-authored-by: tbro --- types/src/traits/signature_key.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 3c1ac230a6..3d8fccc71e 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -7,7 +7,7 @@ use std::{ use bitvec::prelude::*; use ethereum_types::U256; use jf_primitives::{errors::PrimitivesError, vid::VidScheme}; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use tagged_base64::TaggedBase64; use super::EncodeBytes; @@ -154,7 +154,7 @@ pub trait BuilderSignatureKey: + Debug + Hash + Serialize - + for<'a> Deserialize<'a> + + DeserializeOwned + PartialEq + Eq + PartialOrd From cb96fc434c02972ac7b6bcd4608ed50d3139d095 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Thu, 9 May 2024 20:26:14 +0200 Subject: [PATCH 1024/1393] Cycle through recipients until ready to cancel (#3072) * Cycle through recipients until ready to cancel * Finish `DelayedRequester` tasks when `NetworkRequestState` finishes All tasks now finish cooperatively * Fix getter * Add docs * Bring back task registry * Simplify the flags * Remove unnecessary mutability --- hotshot/src/tasks/task_state.rs | 3 +- .../src/traits/networking/combined_network.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 2 +- hotshot/src/types/handle.rs | 8 +++++ task-impls/src/network.rs | 4 +-- task-impls/src/request.rs | 33 +++++++++++++++---- types/src/traits/network.rs | 2 +- 7 files changed, 42 insertions(+), 12 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 3fab51de3d..e1bc5b1497 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -1,7 +1,7 @@ use std::{ collections::{BTreeMap, HashMap}, marker::PhantomData, - sync::Arc, + sync::{atomic::AtomicBool, Arc}, }; use async_trait::async_trait; @@ -49,6 +49,7 @@ impl, V: StaticVersionType> Create private_key: handle.private_key().clone(), _phantom: PhantomData, id: handle.hotshot.id, + shutdown_flag: Arc::new(AtomicBool::new(false)), } } } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 25a2eba845..e828354ef3 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -285,7 +285,7 @@ impl ConnectedNetwork, TYPES::SignatureKey> async fn request_data( &self, request: Message, - recipient: TYPES::SignatureKey, + recipient: &TYPES::SignatureKey, bind_version: VER, ) -> Result, NetworkError> { self.secondary() diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 4d78727512..25f1d21ef0 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -794,7 +794,7 @@ impl ConnectedNetwork for Libp2p async fn request_data( &self, request: M, - recipient: K, + recipient: &K, bind_version: VER, ) -> Result, NetworkError> { self.wait_for_ready().await; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 30782bc391..e93cfd42f7 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -144,6 +144,14 @@ impl + 'static> SystemContextHandl { boxed_sync(async move { self.hotshot.networks.shut_down_networks().await; + // this is required because `SystemContextHandle` holds an inactive receiver and + // `broadcast_direct` below can wait indefinitely + self.internal_event_stream.0.set_await_active(false); + let _ = self + .internal_event_stream + .0 + .broadcast_direct(Arc::new(HotShotEvent::Shutdown)) + .await; self.registry.shutdown().await; }) } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index dffe342a84..96d1b84a90 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -20,7 +20,7 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, info, instrument, warn}; use vbs::version::Version; use crate::{ @@ -234,7 +234,7 @@ impl< fn should_shutdown(event: &Self::Event) -> bool { if matches!(event.as_ref(), HotShotEvent::Shutdown) { - error!("Network Task received Shutdown event"); + info!("Network Task received Shutdown event"); return true; } false diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 4f97a42364..cd63be6e3c 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -1,3 +1,4 @@ +use std::sync::atomic::{AtomicBool, Ordering}; use std::{marker::PhantomData, sync::Arc, time::Duration}; use async_broadcast::Sender; @@ -58,6 +59,8 @@ pub struct NetworkRequestState< pub _phantom: PhantomData, /// The node's id pub id: u64, + /// A flag indicating that `HotShotEvent::Shutdown` has been received + pub shutdown_flag: Arc, } /// Alias for a signature @@ -93,7 +96,10 @@ impl, Ver: StaticVersionType + 'st } None } - HotShotEvent::Shutdown => Some(HotShotTaskCompleted), + HotShotEvent::Shutdown => { + task.state().set_shutdown_flag(); + Some(HotShotTaskCompleted) + } _ => None, } } @@ -110,6 +116,10 @@ impl, Ver: StaticVersionType + 'st | HotShotEvent::ViewChange(_) ) } + + async fn shutdown(&mut self) { + self.set_shutdown_flag(); + } } impl, Ver: StaticVersionType + 'static> @@ -165,6 +175,7 @@ impl, Ver: StaticVersionType + 'st sender, delay: self.delay, recipients, + shutdown_flag: Arc::clone(&self.shutdown_flag), }; let Ok(data) = Serializer::::serialize(&request) else { tracing::error!("Failed to serialize request!"); @@ -178,6 +189,11 @@ impl, Ver: StaticVersionType + 'st debug!("Requesting data: {:?}", request); async_spawn(requester.run::(request, signature)); } + + /// Signals delayed requesters to finish + fn set_shutdown_flag(&self) { + self.shutdown_flag.store(true, Ordering::Relaxed); + } } /// A short lived task that waits a delay and starts trying peers until it completes @@ -194,6 +210,8 @@ struct DelayedRequester> { delay: Duration, /// The peers we will request in a random order recipients: Vec, + /// A flag indicating that `HotShotEvent::Shutdown` has been received + shutdown_flag: Arc, } /// Wrapper for the info in a VID request @@ -203,7 +221,7 @@ impl> DelayedRequester { /// Wait the delay, then try to complete the request. Iterates over peers /// until the request is completed, or the data is no longer needed. async fn run( - mut self, + self, request: RequestKind, signature: Signature, ) { @@ -221,18 +239,19 @@ impl> DelayedRequester { /// Handle sending a VID Share request, runs the loop until the data exists async fn do_vid( - &mut self, + &self, req: VidRequest, signature: Signature, ) { let message = make_vid(&req, signature); + let mut recipients_it = self.recipients.iter().cycle(); - while !self.recipients.is_empty() && !self.cancel_vid(&req).await { + while !self.cancel_vid(&req).await { match async_timeout( REQUEST_TIMEOUT, self.network.request_data::( message.clone(), - self.recipients.pop().unwrap(), + recipients_it.next().unwrap(), Ver::instance(), ), ) @@ -266,7 +285,9 @@ impl> DelayedRequester { async fn cancel_vid(&self, req: &VidRequest) -> bool { let view = req.0; let state = self.state.read().await; - state.vid_shares().contains_key(&view) && state.cur_view() > view + self.shutdown_flag.load(Ordering::Relaxed) + || state.vid_shares().contains_key(&view) + || state.cur_view() > view } /// Transform a response into a `HotShotEvent` diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 60a934c144..10a3f6965f 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -316,7 +316,7 @@ pub trait ConnectedNetwork: async fn request_data( &self, _request: M, - _recipient: K, + _recipient: &K, _bind_version: VER, ) -> Result, NetworkError> { Err(NetworkError::UnimplementedFeature) From bfa57ddaddcc86655c05a9bc72d991c362a11bf7 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 9 May 2024 16:30:54 -0400 Subject: [PATCH 1025/1393] Bf/debug (#3136) * Cycle through recipients until ready to cancel * Finish `DelayedRequester` tasks when `NetworkRequestState` finishes All tasks now finish cooperatively * Fix getter * Add docs * reduce locking * fix * properly update leaves * less locking, don't propse after vid recv * add toolchain back * fmt * Bring back task registry * regular proposal spawning * Simplify the flags * logging, lint, fmt * Remove unnecessary mutability * revert logging change * Reduce logging to original level * comment out dep tests --------- Co-authored-by: Lukasz Rzasik --- task-impls/src/consensus/helpers.rs | 131 ++++++++++++++-------------- task-impls/src/consensus/mod.rs | 101 ++------------------- task-impls/src/da.rs | 1 - task-impls/src/vid.rs | 1 + 4 files changed, 77 insertions(+), 157 deletions(-) diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 335899875c..c08c7bfa0f 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -95,19 +95,19 @@ async fn validate_proposal_safety_and_liveness( // passes. // Liveness check. - let consensus = consensus.upgradable_read().await; - let liveness_check = justify_qc.get_view_number() > consensus.locked_view; + let read_consensus = consensus.read().await; + let liveness_check = justify_qc.get_view_number() > read_consensus.locked_view; // Safety check. // Check if proposal extends from the locked leaf. - let outcome = consensus.visit_leaf_ancestors( + let outcome = read_consensus.visit_leaf_ancestors( justify_qc.get_view_number(), - Terminator::Inclusive(consensus.locked_view), + Terminator::Inclusive(read_consensus.locked_view), false, |leaf, _, _| { // if leaf view no == locked view no then we're done, report success by // returning true - leaf.get_view_number() != consensus.locked_view + leaf.get_view_number() != read_consensus.locked_view }, ); let safety_check = outcome.is_ok(); @@ -124,7 +124,7 @@ async fn validate_proposal_safety_and_liveness( .await; } - format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus.high_qc(), proposal.data.clone(), consensus.locked_view) + format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view) }); // We accept the proposal, notify the application layer @@ -150,10 +150,6 @@ async fn validate_proposal_safety_and_liveness( ) .await; - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - - consensus.update_saved_leaves(proposed_leaf); - Ok(()) } @@ -613,7 +609,7 @@ pub async fn handle_quorum_proposal_recv>>, task_state: &mut TemporaryProposalValidatedCombinedType, ) -> Result<()> { - let consensus = task_state.consensus.upgradable_read().await; + let consensus = task_state.consensus.read().await; let view = proposal.get_view_number(); #[cfg(not(feature = "dependency-tasks"))] { @@ -927,6 +923,7 @@ pub async fn handle_quorum_proposal_validated = if new_decide_reached { included_txns @@ -934,7 +931,7 @@ pub async fn handle_quorum_proposal_validated> ConsensusTaskState self.spawned_tasks.entry(view).or_default().push(handle); } - #[cfg(not(feature = "dependency-tasks"))] - /// Tries to vote then Publishes a proposal - fn vote_and_publish_proposal( - &mut self, - vote_view: TYPES::Time, - propose_view: TYPES::Time, - proposal: QuorumProposal, - event_stream: Sender>>, - ) { - use crate::consensus::helpers::publish_proposal_from_commitment_and_metadata; - - use self::helpers::publish_proposal_from_upgrade_cert; - - let upgrade = self.decided_upgrade_cert.clone(); - let pub_key = self.public_key.clone(); - let priv_key = self.private_key.clone(); - let consensus = Arc::clone(&self.consensus); - let storage = Arc::clone(&self.storage); - let quorum_mem = Arc::clone(&self.quorum_membership); - let committee_mem = Arc::clone(&self.committee_membership); - let instance_state = Arc::clone(&self.instance_state); - let commitment_and_metadata = self.payload_commitment_and_metadata.clone(); - let cur_view = self.cur_view; - let sender = event_stream.clone(); - let decided_upgrade_cert = self.decided_upgrade_cert.clone(); - let delay = self.round_start_delay; - let formed_upgrade_certificate = self.formed_upgrade_certificate.clone(); - let proposal_cert = self.proposal_cert.clone(); - let handle = async_spawn(async move { - update_state_and_vote_if_able::( - vote_view, - proposal, - pub_key.clone(), - Arc::clone(&consensus), - storage, - Arc::clone(&quorum_mem), - Arc::clone(&instance_state), - (priv_key.clone(), upgrade, committee_mem, event_stream), - ) - .await; - if let Some(upgrade_cert) = decided_upgrade_cert { - if let Err(e) = publish_proposal_from_upgrade_cert( - cur_view, - propose_view, - sender, - quorum_mem, - pub_key, - priv_key, - consensus, - upgrade_cert, - delay, - instance_state, - ) - .await - { - debug!("Couldn't propose with Error: {}", e); - } - } else if let Err(e) = publish_proposal_from_commitment_and_metadata( - cur_view, - propose_view, - sender, - quorum_mem, - pub_key, - priv_key, - consensus, - delay, - formed_upgrade_certificate, - decided_upgrade_cert, - commitment_and_metadata, - proposal_cert, - instance_state, - ) - .await - { - debug!("Couldn't propose with Error: {}", e); - } - }); - - self.spawned_tasks - .entry(propose_view) - .or_default() - .push(handle); - } - /// Handles a consensus event received on the event stream #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] pub async fn handle( @@ -367,6 +283,7 @@ impl> ConsensusTaskState match event.as_ref() { #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QuorumProposalRecv(proposal, sender) => { + debug!("proposal recv view: {:?}", proposal.data.get_view_number()); match handle_quorum_proposal_recv(proposal, sender, event_stream.clone(), self) .await { @@ -381,6 +298,7 @@ impl> ConsensusTaskState } #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QuorumProposalValidated(proposal, _) => { + debug!("proposal validated view: {:?}", proposal.get_view_number()); if let Err(e) = handle_quorum_proposal_validated(proposal, event_stream.clone(), self).await { @@ -502,12 +420,9 @@ impl> ConsensusTaskState error!("Failed to store High QC of QC we formed. Error: {:?}", e); } - let mut consensus = self.consensus.write().await; - if let Err(e) = consensus.update_high_qc(qc.clone()) { - tracing::trace!("{e:?}"); + if let Err(e) = self.consensus.write().await.update_high_qc(qc.clone()) { + tracing::error!("{e:?}"); } - - drop(consensus); debug!( "Attempting to publish proposal after forming a QC for view {}", *qc.view_number @@ -583,7 +498,7 @@ impl> ConsensusTaskState if disperse.data.recipient_key != self.public_key { return; } - self.spawn_vote_task(view, event_stream); + self.spawn_vote_task(view, event_stream.clone()); } HotShotEvent::ViewChange(new_view) => { let new_view = *new_view; @@ -718,7 +633,7 @@ impl> ConsensusTaskState && self.consensus.read().await.high_qc().get_view_number() + 1 == view { if let Err(e) = self.publish_proposal(view, event_stream.clone()).await { - debug!("Failed to propose; error = {e:?}"); + error!("Failed to propose; error = {e:?}"); }; } @@ -787,7 +702,9 @@ impl> ConsensusTaskState "Attempting to publish proposal after voting; now in view: {}", *new_view ); - let _ = self.publish_proposal(new_view, event_stream.clone()).await; + if let Err(e) = self.publish_proposal(new_view, event_stream.clone()).await { + debug!("failed to propose e = {:?}", e); + } } if proposal.get_view_number() <= vote.get_view_number() { self.current_proposal = None; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 26792a87a3..67e8cb382f 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -335,7 +335,6 @@ impl, A: ConsensusApi + | HotShotEvent::DAVoteRecv(_) | HotShotEvent::Shutdown | HotShotEvent::BlockRecv(_, _, _, _) - | HotShotEvent::Timeout(_) | HotShotEvent::ViewChange(_) | HotShotEvent::DAProposalValidated(_, _) ) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 4609037973..1cbcbdf44f 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -79,6 +79,7 @@ impl, A: ConsensusApi + consensus.update_vid_shares(*view_number, disperse); } } + drop(consensus); // send the commitment and metadata to consensus for block building broadcast_event( From 199838d38bd17570b7ff1e78fba8e6843cf8af09 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 10 May 2024 11:52:41 +0800 Subject: [PATCH 1026/1393] [CX_CLEANUP] - Make all fields in `Consensus` private to avoid accidental state update (#3130) * Make most fields private * Add error messages * Update loggings * fmt --- examples/infra/mod.rs | 2 +- hotshot/src/lib.rs | 4 +- task-impls/src/consensus/helpers.rs | 53 +++++++----- task-impls/src/consensus/view_change.rs | 4 +- task-impls/src/da.rs | 10 ++- task-impls/src/response.rs | 2 +- types/src/consensus.rs | 106 +++++++++++++++++++++--- 7 files changed, 136 insertions(+), 45 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 86a40d6313..050799c86c 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -534,7 +534,7 @@ pub trait RunDA< } let consensus_lock = context.hotshot.get_consensus(); let consensus = consensus_lock.read().await; - let total_num_views = usize::try_from(consensus.locked_view.get_u64()).unwrap(); + let total_num_views = usize::try_from(consensus.locked_view().get_u64()).unwrap(); // `failed_num_views` could include uncommitted views let failed_num_views = total_num_views - num_successful_commits; // When posting to the orchestrator, note that the total number of views also include un-finalized views. diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 233b8b0b74..ffac267cd9 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -271,11 +271,11 @@ impl> SystemContext { validated_state_map, anchored_leaf.get_view_number(), anchored_leaf.get_view_number(), - saved_leaves, - saved_payloads, // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 anchored_leaf.get_view_number(), + saved_leaves, + saved_payloads, initializer.high_qc, Arc::clone(&consensus_metrics), ); diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index c08c7bfa0f..ad48527d38 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -96,18 +96,18 @@ async fn validate_proposal_safety_and_liveness( // Liveness check. let read_consensus = consensus.read().await; - let liveness_check = justify_qc.get_view_number() > read_consensus.locked_view; + let liveness_check = justify_qc.get_view_number() > read_consensus.locked_view(); // Safety check. // Check if proposal extends from the locked leaf. let outcome = read_consensus.visit_leaf_ancestors( justify_qc.get_view_number(), - Terminator::Inclusive(read_consensus.locked_view), + Terminator::Inclusive(read_consensus.locked_view()), false, |leaf, _, _| { // if leaf view no == locked view no then we're done, report success by // returning true - leaf.get_view_number() != read_consensus.locked_view + leaf.get_view_number() != read_consensus.locked_view() }, ); let safety_check = outcome.is_ok(); @@ -124,7 +124,7 @@ async fn validate_proposal_safety_and_liveness( .await; } - format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view) + format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) }); // We accept the proposal, notify the application layer @@ -228,10 +228,10 @@ pub async fn create_and_send_proposal( "Sending null proposal for view {:?}", proposed_leaf.get_view_number(), ); - if consensus.read().await.last_proposed_view >= view { + if let Err(e) = consensus.write().await.update_last_proposed_view(view) { + tracing::trace!("{e:?}"); return; } - consensus.write().await.last_proposed_view = view; async_sleep(Duration::from_millis(round_start_delay)).await; broadcast_event( Arc::new(HotShotEvent::QuorumProposalSend( @@ -349,7 +349,7 @@ pub(crate) async fn get_parent_leaf_and_state( .get(&leaf_commitment) .context("Failed to find high QC of parent")?; - let reached_decided = leaf.get_view_number() == consensus.last_decided_view; + let reached_decided = leaf.get_view_number() == consensus.last_decided_view(); let parent_leaf = leaf.clone(); let original_parent_hash = parent_leaf.commit(); let mut next_parent_hash = original_parent_hash; @@ -358,7 +358,7 @@ pub(crate) async fn get_parent_leaf_and_state( if !reached_decided { debug!("We have not reached decide from view {:?}", cur_view); while let Some(next_parent_leaf) = consensus.saved_leaves().get(&next_parent_hash) { - if next_parent_leaf.get_view_number() <= consensus.last_decided_view { + if next_parent_leaf.get_view_number() <= consensus.last_decided_view() { break; } next_parent_hash = next_parent_leaf.get_parent_commitment(); @@ -709,10 +709,10 @@ pub async fn handle_quorum_proposal_recv consensus_write.locked_view; + let liveness_check = justify_qc.get_view_number() > consensus_write.locked_view(); let high_qc = consensus_write.high_qc().clone(); - let locked_view = consensus_write.locked_view; + let locked_view = consensus_write.locked_view(); drop(consensus_write); @@ -812,8 +812,8 @@ pub async fn handle_quorum_proposal_validated> = None; - let mut new_anchor_view = consensus.last_decided_view; - let mut new_locked_view = consensus.locked_view; + let mut new_anchor_view = consensus.last_decided_view(); + let mut new_locked_view = consensus.locked_view(); let mut last_view_number_visited = view; let mut new_commit_reached: bool = false; let mut new_decide_reached = false; @@ -821,7 +821,7 @@ pub async fn handle_quorum_proposal_validated( // Do the comparison before the subtraction to avoid potential overflow, since // `last_decided_view` may be greater than `cur_view` if the node is catching up. if usize::try_from(cur_view.get_u64()).unwrap() - > usize::try_from(consensus.last_decided_view.get_u64()).unwrap() + > usize::try_from(consensus.last_decided_view().get_u64()).unwrap() { consensus.metrics.number_of_views_since_last_decide.set( usize::try_from(cur_view.get_u64()).unwrap() - - usize::try_from(consensus.last_decided_view.get_u64()).unwrap(), + - usize::try_from(consensus.last_decided_view().get_u64()).unwrap(), ); } let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 67e8cb382f..3732bc8231 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -117,7 +117,7 @@ impl, A: ConsensusApi + .consensus .read() .await - .saved_payloads + .saved_payloads() .contains_key(&view) { warn!("Received DA proposal for view {:?} but we already have a payload for that view. Throwing it away", view); @@ -209,9 +209,11 @@ impl, A: ConsensusApi + } // Record the payload we have promised to make available. - consensus - .saved_payloads - .insert(view, Arc::clone(&proposal.data.encoded_transactions)); + if let Err(e) = consensus + .update_saved_payloads(view, Arc::clone(&proposal.data.encoded_transactions)) + { + tracing::trace!("{e:?}"); + } } HotShotEvent::DAVoteRecv(ref vote) => { debug!("DA vote recv, Main Task {:?}", vote.get_view_number()); diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 8fc22298c5..1cd4bcb4d6 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -132,7 +132,7 @@ impl NetworkResponseState { .get(&view) .is_some_and(|m| m.contains_key(key)); if !contained { - let txns = consensus.saved_payloads.get(&view)?; + let txns = consensus.saved_payloads().get(&view)?; let vid = calculate_vid_disperse(Arc::clone(txns), &Arc::clone(&self.quorum), view).await; let shares = VidDisperseShare::from_vid_disperse(vid); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 9d04b71c9a..2e5ed666cd 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -57,10 +57,13 @@ pub struct Consensus { cur_view: TYPES::Time, /// View we proposed in last. To prevent duplicate proposals - pub last_proposed_view: TYPES::Time, + last_proposed_view: TYPES::Time, /// last view had a successful decide event - pub last_decided_view: TYPES::Time, + last_decided_view: TYPES::Time, + + /// The `locked_qc` view number + locked_view: TYPES::Time, /// Map of leaf hash -> leaf /// - contains undecided leaves @@ -70,10 +73,7 @@ pub struct Consensus { /// Saved payloads. /// /// Encoded transactions for every view if we got a payload for that view. - pub saved_payloads: BTreeMap>, - - /// The `locked_qc` view number - pub locked_view: TYPES::Time, + saved_payloads: BTreeMap>, /// the highqc per spec high_qc: QuorumCertificate, @@ -87,10 +87,10 @@ pub struct Consensus { /// /// Certificates received from other nodes will get reattached regardless of this fields, /// since they will be present in the leaf we propose off of. - pub dontuse_formed_upgrade_certificate: Option>, + dontuse_formed_upgrade_certificate: Option>, /// most recent decided upgrade certificate - pub dontuse_decided_upgrade_cert: Option>, + dontuse_decided_upgrade_cert: Option>, } /// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces @@ -272,10 +272,10 @@ impl Consensus { pub fn new( validated_state_map: BTreeMap>, cur_view: TYPES::Time, + locked_view: TYPES::Time, last_decided_view: TYPES::Time, saved_leaves: CommitmentMap>, saved_payloads: BTreeMap>, - locked_view: TYPES::Time, high_qc: QuorumCertificate, metrics: Arc, ) -> Self { @@ -286,9 +286,9 @@ impl Consensus { cur_view, last_decided_view, last_proposed_view: last_decided_view, + locked_view, saved_leaves, saved_payloads, - locked_view, high_qc, metrics, dontuse_decided_upgrade_cert: None, @@ -301,6 +301,16 @@ impl Consensus { self.cur_view } + /// Get the last decided view. + pub fn last_decided_view(&self) -> TYPES::Time { + self.last_decided_view + } + + /// Get the locked view. + pub fn locked_view(&self) -> TYPES::Time { + self.locked_view + } + /// Get the high QC. pub fn high_qc(&self) -> &QuorumCertificate { &self.high_qc @@ -316,6 +326,11 @@ impl Consensus { &self.saved_leaves } + /// Get the saved payloads. + pub fn saved_payloads(&self) -> &BTreeMap> { + &self.saved_payloads + } + /// Get the vid shares. pub fn vid_shares(&self) -> &VidShares { &self.vid_shares @@ -330,11 +345,53 @@ impl Consensus { /// # Errors /// Can return an error when the new view_number is not higher than the existing view number. pub fn update_view(&mut self, view_number: TYPES::Time) -> Result<()> { - ensure!(view_number > self.cur_view); + ensure!( + view_number > self.cur_view, + "New view isn't newer than the current view." + ); self.cur_view = view_number; Ok(()) } + /// Update the last proposed view. + /// + /// # Errors + /// Can return an error when the new view_number is not higher than the existing proposed view number. + pub fn update_last_proposed_view(&mut self, view_number: TYPES::Time) -> Result<()> { + ensure!( + view_number > self.last_proposed_view, + "New view isn't newer than the previously proposed view." + ); + self.last_proposed_view = view_number; + Ok(()) + } + + /// Update the last decided view. + /// + /// # Errors + /// Can return an error when the new view_number is not higher than the existing decided view number. + pub fn update_last_decided_view(&mut self, view_number: TYPES::Time) -> Result<()> { + ensure!( + view_number > self.last_decided_view, + "New view isn't newer than the previously decided view." + ); + self.last_decided_view = view_number; + Ok(()) + } + + /// Update the locked view. + /// + /// # Errors + /// Can return an error when the new view_number is not higher than the existing locked view number. + pub fn update_locked_view(&mut self, view_number: TYPES::Time) -> Result<()> { + ensure!( + view_number > self.locked_view, + "New view isn't newer than the previously locked view." + ); + self.locked_view = view_number; + Ok(()) + } + /// Update the validated state map with a new view_number/view combo. pub fn update_validated_state_map(&mut self, view_number: TYPES::Time, view: View) { self.validated_state_map.insert(view_number, view); @@ -345,11 +402,31 @@ impl Consensus { self.saved_leaves.insert(leaf.commit(), leaf); } + /// Update the saved payloads with a new encoded transaction. + /// + /// # Errors + /// Can return an error when there's an existing payload corresponding to the same view number. + pub fn update_saved_payloads( + &mut self, + view_number: TYPES::Time, + encoded_transaction: Arc<[u8]>, + ) -> Result<()> { + ensure!( + !self.saved_payloads.contains_key(&view_number), + "Payload with the same view already exists." + ); + self.saved_payloads.insert(view_number, encoded_transaction); + Ok(()) + } + /// Update the high QC if given a newer one. /// # Errors /// Can return an error when the provided high_qc is not newer than the existing entry. pub fn update_high_qc(&mut self, high_qc: QuorumCertificate) -> Result<()> { - ensure!(high_qc.view_number > self.high_qc.view_number); + ensure!( + high_qc.view_number > self.high_qc.view_number, + "High QC with an equal or higher view exists." + ); debug!("Updating high QC"); self.high_qc = high_qc; @@ -373,6 +450,11 @@ impl Consensus { self.saved_da_certs.insert(view_number, cert); } + /// Update the most recent decided upgrade certificate. + pub fn update_dontuse_decided_upgrade_cert(&mut self, cert: Option>) { + self.dontuse_decided_upgrade_cert = cert; + } + /// gather information from the parent chain of leaves /// # Errors /// If the leaf or its ancestors are not found in storage From 73b6e92c99ebc0b924587e08149c988b483eaf28 Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Fri, 10 May 2024 12:31:26 +0800 Subject: [PATCH 1027/1393] update jf deps (#3144) --- hotshot-qc/Cargo.toml | 10 ++++--- hotshot-qc/src/bit_vector.rs | 7 ++--- hotshot-qc/src/bit_vector_old.rs | 7 ++--- hotshot-qc/src/snarked/circuit.rs | 5 +--- hotshot-stake-table/Cargo.toml | 6 +++-- hotshot-stake-table/src/mt_based/config.rs | 3 ++- hotshot-stake-table/src/mt_based/internal.rs | 2 +- hotshot-stake-table/src/vec_based.rs | 11 ++++---- hotshot-stake-table/src/vec_based/config.rs | 2 +- hotshot/Cargo.toml | 2 +- task-impls/Cargo.toml | 3 ++- task-impls/src/consensus/mod.rs | 2 +- task-impls/src/helpers.rs | 2 +- task-impls/src/quorum_vote.rs | 2 +- testing/Cargo.toml | 3 ++- testing/src/task_helpers.rs | 2 +- testing/tests/tests_1/consensus_task.rs | 2 +- testing/tests/tests_1/proposal_ordering.rs | 18 ++++++++++--- testing/tests/tests_1/quorum_proposal_task.rs | 2 +- testing/tests/tests_1/vid_task.rs | 2 +- types/Cargo.toml | 11 ++++---- types/src/data.rs | 4 +-- types/src/light_client.rs | 4 +-- types/src/qc.rs | 27 +++++++++---------- types/src/signature_key.rs | 20 ++++++-------- types/src/traits/block_contents.rs | 2 +- types/src/traits/qc.rs | 10 +++---- types/src/traits/signature_key.rs | 5 ++-- types/src/traits/stake_table.rs | 17 +----------- types/src/vid.rs | 24 ++++++++--------- 30 files changed, 102 insertions(+), 115 deletions(-) diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml index 094bd3e9d6..6ab2f9d0bf 100644 --- a/hotshot-qc/Cargo.toml +++ b/hotshot-qc/Cargo.toml @@ -15,8 +15,9 @@ ark-std = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } generic-array = { workspace = true } -hotshot-types = { workspace = true } -jf-primitives = { workspace = true } +hotshot-types = { path = "../types" } +jf-signature = { workspace = true } +jf-rescue = { workspace = true, features = ["gadgets"] } jf-relation = { workspace = true } jf-utils = { workspace = true } serde = { workspace = true } @@ -28,7 +29,10 @@ hotshot-stake-table = { path = "../hotshot-stake-table" } [features] default = ["parallel"] std = ["ark-std/std"] -parallel = ["jf-primitives/parallel", "jf-utils/parallel"] +parallel = [ + "jf-utils/parallel", "jf-rescue/parallel", + "jf-signature/parallel", "jf-relation/parallel" +] [lints] workspace = true diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index 432c84d882..fff3faa8d2 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -16,10 +16,7 @@ use hotshot_types::traits::{ qc::QuorumCertificate, stake_table::{SnapshotVersion, StakeTableScheme}, }; -use jf_primitives::{ - errors::{PrimitivesError, PrimitivesError::ParameterError}, - signatures::AggregateableSignatureSchemes, -}; +use jf_signature::AggregateableSignatureSchemes; use serde::{Deserialize, Serialize}; use typenum::U32; @@ -203,7 +200,7 @@ where mod tests { use hotshot_stake_table::mt_based::StakeTable; use hotshot_types::traits::stake_table::StakeTableScheme; - use jf_primitives::signatures::{ + use jf_signature::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, SignatureScheme, }; diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs index 07dc988237..7e59161dcc 100644 --- a/hotshot-qc/src/bit_vector_old.rs +++ b/hotshot-qc/src/bit_vector_old.rs @@ -13,10 +13,7 @@ use bitvec::prelude::*; use ethereum_types::U256; use generic_array::GenericArray; use hotshot_types::traits::{qc::QuorumCertificate, signature_key::StakeTableEntryType}; -use jf_primitives::{ - errors::{PrimitivesError, PrimitivesError::ParameterError}, - signatures::AggregateableSignatureSchemes, -}; +use jf_signature::AggregateableSignatureSchemes; use serde::{Deserialize, Serialize}; use typenum::U32; @@ -193,7 +190,7 @@ where #[cfg(test)] mod tests { - use jf_primitives::signatures::{ + use jf_signature::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, SignatureScheme, }; diff --git a/hotshot-qc/src/snarked/circuit.rs b/hotshot-qc/src/snarked/circuit.rs index a1605d0685..6b299ac3ff 100644 --- a/hotshot-qc/src/snarked/circuit.rs +++ b/hotshot-qc/src/snarked/circuit.rs @@ -2,10 +2,6 @@ use ark_ff::PrimeField; use ark_std::{format, vec, vec::Vec}; -use jf_primitives::{ - circuit::rescue::RescueNativeGadget, - rescue::{sponge::RescueCRHF, RescueParameter}, -}; use jf_relation::{ errors::CircuitError, gadgets::{ @@ -17,6 +13,7 @@ use jf_relation::{ }, BoolVar, Circuit, PlonkCircuit, Variable, }; +use jf_rescue::{crhf::RescueCRHF, gadgets::RescueNativeGadget, RescueParameter}; /// Digest a list of verification keys and their associated stake amounts /// * `stack_amts` - stake amounts diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 40d7507c77..ef3d9bca62 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -15,7 +15,9 @@ ark-std = { workspace = true } digest = { workspace = true } ethereum-types = { workspace = true } hotshot-types = { path = "../types" } -jf-primitives = { workspace = true } +jf-crhf = { workspace = true } +jf-signature = { workspace = true, features = ["bls", "schnorr"] } +jf-rescue = { workspace = true } jf-utils = { workspace = true } serde = { workspace = true, features = ["rc"] } tagged-base64 = { workspace = true } @@ -26,7 +28,7 @@ rand_chacha = { workspace = true } [features] default = ["parallel"] std = ["ark-std/std", "ark-serialize/std", "ark-ff/std"] -parallel = ["jf-primitives/parallel", "jf-utils/parallel", "ark-ff/parallel"] +parallel = ["jf-utils/parallel", "ark-ff/parallel"] [lints] workspace = true diff --git a/hotshot-stake-table/src/mt_based/config.rs b/hotshot-stake-table/src/mt_based/config.rs index 3048471c2c..6b720850a9 100644 --- a/hotshot-stake-table/src/mt_based/config.rs +++ b/hotshot-stake-table/src/mt_based/config.rs @@ -1,7 +1,8 @@ //! Config file for stake table use ark_ff::PrimeField; use ark_std::vec; -use jf_primitives::{crhf::FixedLengthRescueCRHF, signatures::bls_over_bn254}; +use jf_rescue::crhf::FixedLengthRescueCRHF; +use jf_signature::bls_over_bn254; use crate::utils::ToFields; diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index 01e9e78d56..d426c2c9a0 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -4,7 +4,7 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::{hash::Hash, sync::Arc, vec, vec::Vec}; use ethereum_types::U256; use hotshot_types::traits::stake_table::StakeTableError; -use jf_primitives::crhf::CRHF; +use jf_crhf::CRHF; use jf_utils::canonical; use serde::{Deserialize, Serialize}; use tagged_base64::tagged; diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index bc4a345d38..a4e8426af3 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -4,10 +4,8 @@ use ark_std::{collections::HashMap, hash::Hash, rand::SeedableRng}; use digest::crypto_common::rand_core::CryptoRngCore; use ethereum_types::{U256, U512}; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; -use jf_primitives::{ - crhf::{VariableLengthRescueCRHF, CRHF}, - rescue::RescueParameter, -}; +use jf_crhf::CRHF; +use jf_rescue::{crhf::VariableLengthRescueCRHF, RescueParameter}; use serde::{Deserialize, Serialize}; use crate::{ @@ -385,8 +383,9 @@ mod tests { use ark_std::{rand::SeedableRng, vec::Vec}; use ethereum_types::U256; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; - use jf_primitives::signatures::{ - bls_over_bn254::BLSOverBN254CurveSignatureScheme, SchnorrSignatureScheme, SignatureScheme, + use jf_signature::{ + bls_over_bn254::BLSOverBN254CurveSignatureScheme, schnorr::SchnorrSignatureScheme, + SignatureScheme, }; use super::{ diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index c4341090f1..83ea7f8358 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -4,7 +4,7 @@ use ark_std::vec; /// Schnorr verification key as auxiliary information pub use hotshot_types::light_client::StateVerKey; /// BLS verification key as indexing key -pub use jf_primitives::signatures::bls_over_bn254::VerKey as QCVerKey; +pub use jf_signature::bls_over_bn254::VerKey as QCVerKey; use jf_utils::to_bytes; use crate::utils::ToFields; diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 1b0a387056..eb3fd6b052 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -53,7 +53,7 @@ surf-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } -jf-primitives.workspace = true +jf-signature.workspace = true hotshot-orchestrator = { path = "../orchestrator" } blake3.workspace = true sha2 = { workspace = true } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 32726004e6..120a8108e4 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -26,7 +26,8 @@ futures = { workspace = true } hotshot-task = { path = "../task" } hotshot-types = { path = "../types" } hotshot-builder-api = { path = "../builder-api" } -jf-primitives = { workspace = true } +jf-signature = { workspace = true } +jf-vid = { workspace = true } rand = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index cddc15485b..99dfd59e13 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -32,7 +32,7 @@ use hotshot_types::{ #[cfg(not(feature = "dependency-tasks"))] use hotshot_types::{traits::storage::Storage, vote::Certificate}; #[cfg(not(feature = "dependency-tasks"))] -use jf_primitives::vid::VidScheme; +use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; #[cfg(not(feature = "dependency-tasks"))] diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index cc6d8e07dd..b430303ef6 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -8,7 +8,7 @@ use hotshot_types::{ traits::{election::Membership, node_implementation::NodeType}, vid::{vid_scheme, VidPrecomputeData}, }; -use jf_primitives::vid::{precomputable::Precomputable, VidScheme}; +use jf_vid::{precomputable::Precomputable, VidScheme}; #[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn_blocking, JoinHandle}; diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 94bb5890b5..dfa9e81719 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -30,7 +30,7 @@ use hotshot_types::{ vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; -use jf_primitives::vid::VidScheme; +use jf_vid::VidScheme; #[cfg(feature = "dependency-tasks")] use std::marker::PhantomData; use std::{collections::HashMap, sync::Arc}; diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 63f2a58d43..ad2757fdb5 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -31,7 +31,8 @@ hotshot-task = { path = "../task" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } hotshot-types = { path = "../types" } hotshot-builder-api = { path = "../builder-api" } -jf-primitives = { workspace = true } +jf-signature = { workspace = true } +jf-vid = { workspace = true } portpicker = { workspace = true } rand = { workspace = true } serde = { workspace = true } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 02442b5648..6f416ca0ba 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -30,7 +30,7 @@ use hotshot_types::{ vid::{vid_scheme, VidCommitment, VidSchemeType}, vote::{Certificate, HasViewNumber, Vote}, }; -use jf_primitives::vid::VidScheme; +use jf_vid::VidScheme; use serde::Serialize; use crate::test_builder::TestDescription; diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 4d18a38212..5c9594a04c 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -26,7 +26,7 @@ use hotshot_types::{ traits::{election::Membership, node_implementation::ConsensusTime}, utils::BuilderCommitment, }; -use jf_primitives::vid::VidScheme; +use jf_vid::VidScheme; use sha2::Digest; #[cfg(test)] diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index ec53f21557..2467493001 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -20,7 +20,7 @@ use hotshot_types::{ traits::{election::Membership, node_implementation::ConsensusTime}, utils::BuilderCommitment, }; -use jf_primitives::vid::VidScheme; +use jf_vid::VidScheme; use sha2::Digest; /// Runs a basic test where a qualified proposal occurs (i.e. not initiated by the genesis view or node 1). @@ -99,15 +99,25 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let mut view_2_inputs = permute_input_with_index_order(inputs, input_permutation); view_2_inputs.insert(0, DACertificateRecv(dacs[1].clone())); - view_2_inputs.insert(0, VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key()))); - view_2_inputs.insert(0, VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key()))); + view_2_inputs.insert( + 0, + VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), + ); + view_2_inputs.insert( + 0, + VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), + ); // This stage transitions from view 1 to view 2. let view_2 = TestScriptStage { inputs: view_2_inputs, outputs: vec![ exact(ViewChange(ViewNumber::new(2))), - all_predicates(vec![exact(QuorumVoteSend(votes[1].clone())), quorum_proposal_validated(), quorum_proposal_send()]), + all_predicates(vec![ + exact(QuorumVoteSend(votes[1].clone())), + quorum_proposal_validated(), + quorum_proposal_send(), + ]), ], // We should end on view 2. asserts: vec![], diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index bfae7ca062..a3e9701b9b 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -29,7 +29,7 @@ use hotshot_types::{ utils::BuilderCommitment, vid::VidSchemeType, }; -use jf_primitives::vid::VidScheme; +use jf_vid::VidScheme; use sha2::Digest; fn make_payload_commitment( diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 78ed8caaf8..e732c4046d 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -17,7 +17,7 @@ use hotshot_types::{ BlockPayload, }, }; -use jf_primitives::vid::VidScheme; +use jf_vid::VidScheme; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] diff --git a/types/Cargo.toml b/types/Cargo.toml index 77239fee5a..ef7a60860b 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -23,7 +23,7 @@ bitvec = { workspace = true } blake3 = { workspace = true } committable = { workspace = true } custom_debug = { workspace = true } -digest = { workspace = true } +digest = { workspace = true, features = ["rand_core"] } either = { workspace = true } espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } @@ -43,8 +43,9 @@ time = { workspace = true } tracing = { workspace = true } typenum = { workspace = true } derivative = "2" -jf-primitives = { workspace = true } -jf-plonk = { workspace = true } +jf-vid = { workspace = true } +jf-pcs = { workspace = true } +jf-signature = { workspace = true, features = ["schnorr"] } jf-utils = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } @@ -58,8 +59,8 @@ url = { workspace = true } serde_json = { workspace = true } [features] -gpu-vid = ["jf-primitives/gpu-vid"] -test-srs = ["jf-primitives/test-srs"] +gpu-vid = ["jf-vid/gpu-vid"] +test-srs = ["jf-vid/test-srs"] [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } diff --git a/types/src/data.rs b/types/src/data.rs index 70140ee3b9..fbe6b627d5 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -16,7 +16,7 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::Options; use committable::{Commitment, CommitmentBoundsArkless, Committable, RawCommitmentBuilder}; use derivative::Derivative; -use jf_primitives::vid::VidDisperse as JfVidDisperse; +use jf_vid::VidDisperse as JfVidDisperse; use rand::Rng; use serde::{Deserialize, Serialize}; use snafu::Snafu; @@ -703,7 +703,7 @@ impl Leaf { pub mod null_block { #![allow(missing_docs)] - use jf_primitives::vid::VidScheme; + use jf_vid::VidScheme; use memoize::memoize; use crate::{ diff --git a/types/src/light_client.rs b/types/src/light_client.rs index 3a0aa8f5c4..14481a1181 100644 --- a/types/src/light_client.rs +++ b/types/src/light_client.rs @@ -6,7 +6,7 @@ use ark_ed_on_bn254::EdwardsConfig as Config; use ark_ff::PrimeField; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ethereum_types::U256; -use jf_primitives::signatures::schnorr; +use jf_signature::schnorr; use rand::SeedableRng; use rand_chacha::ChaCha20Rng; use serde::{Deserialize, Serialize}; @@ -18,7 +18,7 @@ pub type CircuitField = ark_ed_on_bn254::Fq; pub type LightClientState = GenericLightClientState; /// Signature scheme pub type StateSignatureScheme = - jf_primitives::signatures::schnorr::SchnorrSignatureScheme; + jf_signature::schnorr::SchnorrSignatureScheme; /// Signatures pub type StateSignature = schnorr::Signature; /// Verification key for verifying state signatures diff --git a/types/src/qc.rs b/types/src/qc.rs index ca3295f1bb..df27c211fa 100644 --- a/types/src/qc.rs +++ b/types/src/qc.rs @@ -12,10 +12,7 @@ use ark_std::{ use bitvec::prelude::*; use ethereum_types::U256; use generic_array::GenericArray; -use jf_primitives::{ - errors::{PrimitivesError, PrimitivesError::ParameterError}, - signatures::AggregateableSignatureSchemes, -}; +use jf_signature::{AggregateableSignatureSchemes, SignatureError}; use serde::{Deserialize, Serialize}; use typenum::U32; @@ -62,7 +59,7 @@ where sk: &A::SigningKey, msg: M, prng: &mut R, - ) -> Result { + ) -> Result { A::sign(pp, sk, msg, prng) } @@ -70,9 +67,9 @@ where qc_pp: &Self::QCProverParams, signers: &BitSlice, sigs: &[A::Signature], - ) -> Result { + ) -> Result { if signers.len() != qc_pp.stake_entries.len() { - return Err(ParameterError(format!( + return Err(SignatureError::ParameterError(format!( "bit vector len {} != the number of stake entries {}", signers.len(), qc_pp.stake_entries.len(), @@ -91,7 +88,7 @@ where } }); if total_weight < qc_pp.threshold { - return Err(ParameterError(format!( + return Err(SignatureError::ParameterError(format!( "total_weight {} less than threshold {}", total_weight, qc_pp.threshold, ))); @@ -103,7 +100,7 @@ where } } if ver_keys.len() != sigs.len() { - return Err(ParameterError(format!( + return Err(SignatureError::ParameterError(format!( "the number of ver_keys {} != the number of partial signatures {}", ver_keys.len(), sigs.len(), @@ -118,10 +115,10 @@ where qc_vp: &Self::QCVerifierParams, message: &GenericArray, qc: &Self::QC, - ) -> Result { + ) -> Result { let (sig, signers) = qc; if signers.len() != qc_vp.stake_entries.len() { - return Err(ParameterError(format!( + return Err(SignatureError::ParameterError(format!( "signers bit vector len {} != the number of stake entries {}", signers.len(), qc_vp.stake_entries.len(), @@ -140,7 +137,7 @@ where } }); if total_weight < qc_vp.threshold { - return Err(ParameterError(format!( + return Err(SignatureError::ParameterError(format!( "total_weight {} less than threshold {}", total_weight, qc_vp.threshold, ))); @@ -160,10 +157,10 @@ where qc_vp: &Self::QCVerifierParams, message: &GenericArray<::MessageUnit, Self::MessageLength>, qc: &Self::QC, - ) -> Result::VerificationKey>, PrimitivesError> { + ) -> Result::VerificationKey>, SignatureError> { let (_sig, signers) = qc; if signers.len() != qc_vp.stake_entries.len() { - return Err(ParameterError(format!( + return Err(SignatureError::ParameterError(format!( "signers bit vector len {} != the number of stake entries {}", signers.len(), qc_vp.stake_entries.len(), @@ -185,7 +182,7 @@ where #[cfg(test)] mod tests { - use jf_primitives::signatures::{ + use jf_signature::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, SignatureScheme, }; diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index c8aa93a673..56e1aa7cf7 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -1,14 +1,12 @@ //! Types and structs for the hotshot signature keys +use ark_serialize::SerializationError; use bitvec::{slice::BitSlice, vec::BitVec}; use ethereum_types::U256; use generic_array::GenericArray; -use jf_primitives::{ - errors::PrimitivesError, - signatures::{ - bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair, SignKey, VerKey}, - SignatureScheme, - }, +use jf_signature::{ + bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair, SignKey, VerKey}, + SignatureError, SignatureScheme, }; use rand::SeedableRng; use rand_chacha::ChaCha20Rng; @@ -38,7 +36,7 @@ impl SignatureKey for BLSPubKey { type PureAssembledSignatureType = ::Signature; type QCType = (Self::PureAssembledSignatureType, BitVec); - type SignError = PrimitivesError; + type SignError = SignatureError; #[instrument(skip(self))] fn validate(&self, signature: &Self::PureAssembledSignatureType, data: &[u8]) -> bool { @@ -69,10 +67,8 @@ impl SignatureKey for BLSPubKey { buf } - fn from_bytes(bytes: &[u8]) -> Result { - Ok(ark_serialize::CanonicalDeserialize::deserialize_compressed( - bytes, - )?) + fn from_bytes(bytes: &[u8]) -> Result { + ark_serialize::CanonicalDeserialize::deserialize_compressed(bytes) } fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey) { @@ -138,7 +134,7 @@ pub type BuilderKey = BLSPubKey; impl BuilderSignatureKey for BuilderKey { type BuilderPrivateKey = BLSPrivKey; type BuilderSignature = ::Signature; - type SignError = PrimitivesError; + type SignError = SignatureError; fn sign_builder_message( private_key: &Self::BuilderPrivateKey, diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 9db0d9cda9..06d115162b 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -12,7 +12,7 @@ use std::{ }; use committable::{Commitment, Committable}; -use jf_primitives::vid::{precomputable::Precomputable, VidScheme}; +use jf_vid::{precomputable::Precomputable, VidScheme}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use super::signature_key::BuilderSignatureKey; diff --git a/types/src/traits/qc.rs b/types/src/traits/qc.rs index 6151af4eb0..0ab0659d89 100644 --- a/types/src/traits/qc.rs +++ b/types/src/traits/qc.rs @@ -7,7 +7,7 @@ use ark_std::{ }; use bitvec::prelude::*; use generic_array::{ArrayLength, GenericArray}; -use jf_primitives::{errors::PrimitivesError, signatures::AggregateableSignatureSchemes}; +use jf_signature::{AggregateableSignatureSchemes, SignatureError}; use serde::{Deserialize, Serialize}; /// Trait for validating a QC built from different signatures on the same message @@ -47,7 +47,7 @@ pub trait QuorumCertificateScheme< sk: &A::SigningKey, msg: M, prng: &mut R, - ) -> Result { + ) -> Result { A::sign(pp, sk, msg, prng) } @@ -64,7 +64,7 @@ pub trait QuorumCertificateScheme< qc_pp: &Self::QCProverParams, signers: &BitSlice, sigs: &[A::Signature], - ) -> Result; + ) -> Result; /// Checks an aggregated signature over some message provided as input /// * `qc_vp` - public parameters for validating the QC @@ -80,7 +80,7 @@ pub trait QuorumCertificateScheme< qc_vp: &Self::QCVerifierParams, message: &GenericArray, qc: &Self::QC, - ) -> Result; + ) -> Result; /// Trace the list of signers given a qc. /// @@ -91,5 +91,5 @@ pub trait QuorumCertificateScheme< qc_vp: &Self::QCVerifierParams, message: &GenericArray, qc: &Self::QC, - ) -> Result, PrimitivesError>; + ) -> Result, SignatureError>; } diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 3d8fccc71e..9de1675063 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -4,9 +4,10 @@ use std::{ hash::Hash, }; +use ark_serialize::SerializationError; use bitvec::prelude::*; use ethereum_types::U256; -use jf_primitives::{errors::PrimitivesError, vid::VidScheme}; +use jf_vid::VidScheme; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use tagged_base64::TaggedBase64; @@ -110,7 +111,7 @@ pub trait SignatureKey: /// # Errors /// /// Will return `Err` if deserialization fails - fn from_bytes(bytes: &[u8]) -> Result; + fn from_bytes(bytes: &[u8]) -> Result; /// Generate a new key pair fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey); diff --git a/types/src/traits/stake_table.rs b/types/src/traits/stake_table.rs index 598a662650..eced8e30b9 100644 --- a/types/src/traits/stake_table.rs +++ b/types/src/traits/stake_table.rs @@ -1,10 +1,8 @@ //! Trait for stake table data structures -use ark_std::{rand::SeedableRng, string::ToString, vec::Vec}; +use ark_std::{rand::SeedableRng, vec::Vec}; use digest::crypto_common::rand_core::CryptoRngCore; use displaydoc::Display; -use jf_plonk::errors::PlonkError; -use jf_primitives::errors::PrimitivesError; /// Snapshots of the stake table pub enum SnapshotVersion { @@ -220,16 +218,3 @@ pub enum StakeTableError { } impl ark_std::error::Error for StakeTableError {} - -impl From for PrimitivesError { - fn from(value: StakeTableError) -> Self { - // FIXME: (alex) should we define a PrimitivesError::General()? - Self::ParameterError(value.to_string()) - } -} - -impl From for PlonkError { - fn from(value: StakeTableError) -> Self { - Self::PrimitiveError(PrimitivesError::ParameterError(value.to_string())) - } -} diff --git a/types/src/vid.rs b/types/src/vid.rs index b2dbd2c004..3df852b9b9 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -11,20 +11,18 @@ use std::{fmt::Debug, ops::Range}; use ark_bn254::Bn254; -use jf_primitives::{ - pcs::{ - prelude::{UnivariateKzgPCS, UnivariateUniversalParams}, - PolynomialCommitmentScheme, - }, - vid::{ - advz::{ - self, - payload_prover::{LargeRangeProof, SmallRangeProof}, - }, - payload_prover::{PayloadProver, Statement}, - precomputable::Precomputable, - VidDisperse, VidResult, VidScheme, +use jf_pcs::{ + prelude::{UnivariateKzgPCS, UnivariateUniversalParams}, + PolynomialCommitmentScheme, +}; +use jf_vid::{ + advz::{ + self, + payload_prover::{LargeRangeProof, SmallRangeProof}, }, + payload_prover::{PayloadProver, Statement}, + precomputable::Precomputable, + VidDisperse, VidResult, VidScheme, }; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; From df9c8b37871f9e78624e2039fc2b5d254eb90a66 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 10 May 2024 16:54:32 +0200 Subject: [PATCH 1028/1393] Use precomputed data for VID disperse (#2996) --- task-impls/src/consensus/mod.rs | 1 - task-impls/src/da.rs | 4 +-- task-impls/src/events.rs | 3 ++- task-impls/src/helpers.rs | 38 +++++++++------------------ task-impls/src/quorum_vote.rs | 18 +++++++------ task-impls/src/request.rs | 10 +++++-- task-impls/src/response.rs | 3 ++- task-impls/src/transactions.rs | 7 ++++- task-impls/src/vid.rs | 11 ++++++-- testing/tests/tests_1/da_task.rs | 9 ++++--- testing/tests/tests_1/upgrade_task.rs | 19 +++++--------- testing/tests/tests_1/vid_task.rs | 4 ++- 12 files changed, 67 insertions(+), 60 deletions(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 99dfd59e13..bc6c1f813d 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -37,7 +37,6 @@ use jf_vid::VidScheme; use tokio::task::JoinHandle; #[cfg(not(feature = "dependency-tasks"))] use tracing::info; - use tracing::{debug, error, instrument, warn}; use vbs::version::Version; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 3732bc8231..1738cca715 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -273,7 +273,7 @@ impl, A: ConsensusApi + return None; } - HotShotEvent::BlockRecv(encoded_transactions, metadata, view, _fee) => { + HotShotEvent::BlockRecv(encoded_transactions, metadata, view, _fee, _vid_precomp) => { let view = *view; // quick hash the encoded txns with sha256 @@ -336,7 +336,7 @@ impl, A: ConsensusApi + HotShotEvent::DAProposalRecv(_, _) | HotShotEvent::DAVoteRecv(_) | HotShotEvent::Shutdown - | HotShotEvent::BlockRecv(_, _, _, _) + | HotShotEvent::BlockRecv(_, _, _, _, _) | HotShotEvent::ViewChange(_) | HotShotEvent::DAProposalValidated(_, _) ) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 46f1d75a06..4811bdfd75 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -15,7 +15,7 @@ use hotshot_types::{ }, traits::{block_contents::BuilderFee, node_implementation::NodeType, BlockPayload}, utils::BuilderCommitment, - vid::VidCommitment, + vid::{VidCommitment, VidPrecomputeData}, vote::VoteDependencyData, }; use vbs::version::Version; @@ -121,6 +121,7 @@ pub enum HotShotEvent { ::Metadata, TYPES::Time, BuilderFee, + VidPrecomputeData, ), /// Event when the transactions task has a block formed BlockReady(VidDisperse, TYPES::Time), diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index b430303ef6..8deeea29ef 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -38,7 +38,9 @@ pub async fn broadcast_event(event: E, sender: &Send } } } -/// Calculate the vid disperse information from the payload given a view and membership + +/// Calculate the vid disperse information from the payload given a view and membership, +/// optionally using precompute data from builder /// /// # Panics /// Panics if the VID calculation fails, this should not happen. @@ -47,36 +49,20 @@ pub async fn calculate_vid_disperse( txns: Arc<[u8]>, membership: &Arc, view: TYPES::Time, + precompute_data: Option, ) -> VidDisperse { let num_nodes = membership.total_nodes(); - let vid_disperse = spawn_blocking(move || { - vid_scheme(num_nodes).disperse(&txns).unwrap_or_else(|err| panic!("VID precompute disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) - }) - .await; - #[cfg(async_executor_impl = "tokio")] - // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. - let vid_disperse = vid_disperse.unwrap(); - VidDisperse::from_membership(view, vid_disperse, membership.as_ref()) -} - -/// Calculate the vid disperse information from the payload given a view and membership, and precompute data from builder -/// -/// # Panics -/// Panics if the VID calculation fails, this should not happen. -#[allow(clippy::panic)] -pub async fn calculate_vid_disperse_using_precompute_data( - txns: Arc<[u8]>, - membership: &Arc, - view: TYPES::Time, - pre_compute_data: VidPrecomputeData, -) -> VidDisperse { - let num_nodes = membership.total_nodes(); let vid_disperse = spawn_blocking(move || { - vid_scheme(num_nodes).disperse_precompute(&txns, &pre_compute_data).unwrap_or_else(|err| panic!("VID precompute disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) - }) - .await; + precompute_data + .map_or_else( + || vid_scheme(num_nodes).disperse(Arc::clone(&txns)), + |data| vid_scheme(num_nodes).disperse_precompute(Arc::clone(&txns), &data) + ) + .unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) + }).await; #[cfg(async_executor_impl = "tokio")] + // Tokio's JoinHandle's `Output` is `Result`, while in async-std it's just `T` // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index dfa9e81719..e790f2b0fe 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -1,9 +1,7 @@ #[cfg(feature = "dependency-tasks")] -use crate::consensus::helpers::update_state_and_vote_if_able; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; +use std::marker::PhantomData; +use std::{collections::HashMap, sync::Arc}; + use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -31,13 +29,17 @@ use hotshot_types::{ vote::{Certificate, HasViewNumber}, }; use jf_vid::VidScheme; -#[cfg(feature = "dependency-tasks")] -use std::marker::PhantomData; -use std::{collections::HashMap, sync::Arc}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, instrument, trace, warn}; +#[cfg(feature = "dependency-tasks")] +use crate::consensus::helpers::update_state_and_vote_if_able; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; + /// Vote dependency types. #[derive(Debug, PartialEq)] enum VoteDependency { diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index cd63be6e3c..50afffa761 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -1,5 +1,11 @@ -use std::sync::atomic::{AtomicBool, Ordering}; -use std::{marker::PhantomData, sync::Arc, time::Duration}; +use std::{ + marker::PhantomData, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 1cd4bcb4d6..39f292481a 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -134,7 +134,8 @@ impl NetworkResponseState { if !contained { let txns = consensus.saved_payloads().get(&view)?; let vid = - calculate_vid_disperse(Arc::clone(txns), &Arc::clone(&self.quorum), view).await; + calculate_vid_disperse(Arc::clone(txns), &Arc::clone(&self.quorum), view, None) + .await; let shares = VidDisperseShare::from_vid_disperse(vid); let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; for share in shares { diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index d1ff7d4db8..e25b140c73 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -16,7 +16,7 @@ use hotshot_types::{ data::{null_block, Leaf}, event::{Event, EventType}, traits::{ - block_contents::BuilderFee, + block_contents::{precompute_vid_commitment, BuilderFee}, consensus_api::ConsensusApi, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -154,6 +154,7 @@ impl< fee_account: block_data.sender, fee_signature: block_header.fee_signature, }, + block_header.vid_precompute_data, )), &event_stream, ) @@ -191,6 +192,9 @@ impl< return None; }; + let (_, precompute_data) = + precompute_vid_commitment(&[], self.membership.total_nodes()); + // Broadcast the empty block broadcast_event( Arc::new(HotShotEvent::BlockRecv( @@ -198,6 +202,7 @@ impl< metadata, block_view, builder_fee, + precompute_data, )), &event_stream, ) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 1cbcbdf44f..fe5af6d66a 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -61,7 +61,13 @@ impl, A: ConsensusApi + event_stream: Sender>>, ) -> Option { match event.as_ref() { - HotShotEvent::BlockRecv(encoded_transactions, metadata, view_number, fee) => { + HotShotEvent::BlockRecv( + encoded_transactions, + metadata, + view_number, + fee, + precompute_data, + ) => { let payload = ::BlockPayload::from_bytes(encoded_transactions, metadata); let builder_commitment = payload.builder_commitment(metadata); @@ -69,6 +75,7 @@ impl, A: ConsensusApi + Arc::clone(encoded_transactions), &Arc::clone(&self.membership), *view_number, + Some(precompute_data.clone()), ) .await; let payload_commitment = vid_disperse.payload_commitment; @@ -177,7 +184,7 @@ impl, A: ConsensusApi + !matches!( event.as_ref(), HotShotEvent::Shutdown - | HotShotEvent::BlockRecv(_, _, _, _) + | HotShotEvent::BlockRecv(_, _, _, _, _) | HotShotEvent::BlockReady(_, _) | HotShotEvent::ViewChange(_) ) diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 697857344b..9eead78ad5 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -17,7 +17,8 @@ use hotshot_types::{ data::{null_block, ViewNumber}, simple_vote::DAData, traits::{ - block_contents::vid_commitment, election::Membership, node_implementation::ConsensusTime, + block_contents::precompute_vid_commitment, election::Membership, + node_implementation::ConsensusTime, }, }; @@ -35,7 +36,7 @@ async fn test_da_task() { // later calls. We need the VID commitment to be able to propose later. let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); - let payload_commit = vid_commitment( + let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, handle.hotshot.memberships.quorum_membership.total_nodes(), ); @@ -77,6 +78,7 @@ async fn test_da_task() { ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) .unwrap(), + precompute, ), ], outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], @@ -116,7 +118,7 @@ async fn test_da_task_storage_failure() { // later calls. We need the VID commitment to be able to propose later. let transactions = vec![TestTransaction(vec![0])]; let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); - let payload_commit = vid_commitment( + let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, handle.hotshot.memberships.quorum_membership.total_nodes(), ); @@ -158,6 +160,7 @@ async fn test_da_task_storage_failure() { ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) .unwrap(), + precompute, ), ], outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index cabb0a5381..e60f7247f0 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -3,10 +3,7 @@ use std::time::Duration; -use hotshot::{ - tasks::{task_state::CreateTaskState}, - types::SystemContextHandle, -}; +use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, @@ -244,9 +241,10 @@ async fn test_upgrade_and_consensus_task() { DACertificateRecv(dacs[0].clone()), ], upgrade_vote_recvs, - vec![QuorumProposalRecv(proposals[1].clone(), leaders[1]), - DACertificateRecv(dacs[1].clone()), - VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), + vec![ + QuorumProposalRecv(proposals[1].clone(), leaders[1]), + DACertificateRecv(dacs[1].clone()), + VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), ], vec![ VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), @@ -255,11 +253,8 @@ async fn test_upgrade_and_consensus_task() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee( - quorum_membership.total_nodes(), - &TestInstanceState {}, - ) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), ), QCFormed(either::Either::Left(proposals[2].data.justify_qc.clone())), ], diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index e732c4046d..802d54b0ec 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -17,7 +17,7 @@ use hotshot_types::{ BlockPayload, }, }; -use jf_vid::VidScheme; +use jf_vid::{precomputable::Precomputable, VidScheme}; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -42,6 +42,7 @@ async fn test_vid_task() { let builder_commitment = payload.builder_commitment(&metadata); let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); + let (_, vid_precompute) = vid.commit_only_precompute(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; let signature = ::SignatureKey::sign( @@ -89,6 +90,7 @@ async fn test_vid_task() { TestMetadata, ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}).unwrap(), + vid_precompute, )); input.push(HotShotEvent::BlockReady( vid_disperse.clone(), From e8ecb7cc67c1149912f747c5c48ce4435bd6f35f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 10 May 2024 14:18:43 -0400 Subject: [PATCH 1029/1393] Maybe fix CDN tests (#3146) * maybe fix tests * give last crash test a bit more time too * Never spawn a vote task for the wrong view --- task-impls/src/consensus/mod.rs | 9 +++++++++ testing/tests/tests_5/combined_network.rs | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index bc6c1f813d..b0969a8088 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -248,6 +248,9 @@ impl> ConsensusTaskState let Some(proposal) = self.current_proposal.clone() else { return; }; + if proposal.get_view_number() != view { + return; + } let upgrade = self.decided_upgrade_cert.clone(); let pub_key = self.public_key.clone(); let priv_key = self.private_key.clone(); @@ -497,6 +500,12 @@ impl> ConsensusTaskState if disperse.data.recipient_key != self.public_key { return; } + let Some(proposal) = self.current_proposal.clone() else { + return; + }; + if proposal.get_view_number() != view { + return; + } self.spawn_vote_task(view, event_stream.clone()); } HotShotEvent::ViewChange(new_view) => { diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index 82c849b018..5be931e554 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -172,7 +172,7 @@ async fn test_combined_network_half_dc() { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(120), + duration: Duration::from_secs(220), }, ), ..TestDescription::default_multiple_rounds() From 8d9b15125d76cca379799bceb4c557cc8da25b3c Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Fri, 10 May 2024 14:20:58 -0400 Subject: [PATCH 1030/1393] Add serialization for View type (#3147) --- types/src/utils.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/types/src/utils.rs b/types/src/utils.rs index e628aad883..d6bb2a1eb3 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -12,6 +12,7 @@ use bincode::{ }; use committable::Commitment; use digest::OutputSizeUser; +use serde::{Deserialize, Serialize}; use sha2::Digest; use tagged_base64::tagged; use typenum::Unsigned; @@ -23,7 +24,8 @@ use crate::{ }; /// A view's state -#[derive(Debug)] +#[derive(Debug, Deserialize, Serialize)] +#[serde(bound = "")] pub enum ViewInner { /// A pending view with an available block but not leaf proposal yet. /// @@ -133,7 +135,8 @@ impl Deref for View { } /// This exists so we can perform state transitions mutably -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(bound = "")] pub struct View { /// The view data. Wrapped in a struct so we can mutate pub view_inner: ViewInner, From 6a3568b52a165375d99dd3cd9db1cda18f4c6b12 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 10 May 2024 14:24:35 -0400 Subject: [PATCH 1031/1393] Fix failure to attach view change evidence (#3148) --- task-impls/src/consensus/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index b0969a8088..0bcb603da5 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -646,6 +646,11 @@ impl> ConsensusTaskState } if let Some(cert) = &self.proposal_cert { + if !cert.is_valid_for_view(&view) { + self.proposal_cert = None; + info!("Failed to propose off SendPayloadCommitmentAndMetadata because we had view change evidence, but it was not current."); + return; + } match cert { ViewChangeEvidence::Timeout(tc) => { if self.quorum_membership.get_leader(tc.get_view_number() + 1) From 1134c128fff1a61c89c0ebafe2f7253e8feac9d2 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 13 May 2024 10:32:46 -0400 Subject: [PATCH 1032/1393] Unify committee/da terminology (#3137) * unify committee/da terminology * capitalization * update DA to Da * revert changes to diagrams --- example-types/src/node_types.rs | 8 +-- example-types/src/storage_types.rs | 6 +- examples/combined/all.rs | 4 +- examples/combined/multi-validator.rs | 4 +- examples/combined/types.rs | 8 +-- examples/combined/validator.rs | 4 +- examples/infra/mod.rs | 40 +++++------ examples/libp2p/all.rs | 4 +- examples/libp2p/multi-validator.rs | 4 +- examples/libp2p/types.rs | 8 +-- examples/libp2p/validator.rs | 4 +- examples/push-cdn/all.rs | 4 +- examples/push-cdn/types.rs | 4 +- examples/push-cdn/validator.rs | 4 +- hotshot/src/lib.rs | 4 +- hotshot/src/tasks/mod.rs | 4 +- hotshot/src/tasks/task_state.rs | 16 ++--- .../src/traits/networking/combined_network.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 8 +-- hotshot/src/types/handle.rs | 2 +- orchestrator/run-config.toml | 4 +- orchestrator/src/config.rs | 20 +++--- orchestrator/src/lib.rs | 2 +- task-impls/HotShot_event_architecture.drawio | 8 +-- task-impls/src/consensus/helpers.rs | 5 +- task-impls/src/consensus/mod.rs | 14 ++-- task-impls/src/da.rs | 52 +++++++------- task-impls/src/events.rs | 26 +++---- task-impls/src/network.rs | 72 +++++++++---------- task-impls/src/quorum_proposal.rs | 2 +- task-impls/src/quorum_vote.rs | 14 ++-- task-impls/src/request.rs | 10 +-- task-impls/src/response.rs | 9 +-- task-impls/src/transactions.rs | 2 +- task-impls/src/vote_collection.rs | 22 +++--- testing/src/block_builder.rs | 2 +- testing/src/spinning_task.rs | 7 +- testing/src/task_helpers.rs | 10 +-- testing/src/test_runner.rs | 7 +- testing/src/view_generator.rs | 22 +++--- testing/tests/tests_1/consensus_task.rs | 18 ++--- testing/tests/tests_1/da_task.rs | 32 ++++----- testing/tests/tests_1/proposal_ordering.rs | 4 +- testing/tests/tests_1/quorum_vote_task.rs | 12 ++-- testing/tests/tests_1/upgrade_task.rs | 26 +++---- testing/tests/tests_1/vid_task.rs | 4 +- testing/tests/tests_3/memory_network.rs | 4 +- types/src/consensus.rs | 8 +-- types/src/data.rs | 6 +- types/src/event.rs | 8 +-- types/src/message.rs | 46 ++++++------ types/src/simple_certificate.rs | 6 +- types/src/simple_vote.rs | 6 +- types/src/traits/network.rs | 4 +- types/src/traits/node_implementation.rs | 4 +- types/src/traits/storage.rs | 4 +- types/src/utils.rs | 6 +- types/src/vote.rs | 4 +- 58 files changed, 315 insertions(+), 339 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index f0cf75e3bd..822ff21053 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -70,24 +70,24 @@ pub type StaticMembership = StaticCommittee; impl NodeImplementation for PushCdnImpl { type QuorumNetwork = PushCdnNetwork; - type CommitteeNetwork = PushCdnNetwork; + type DaNetwork = PushCdnNetwork; type Storage = TestStorage; } impl NodeImplementation for MemoryImpl { type QuorumNetwork = MemoryNetwork, TYPES::SignatureKey>; - type CommitteeNetwork = MemoryNetwork, TYPES::SignatureKey>; + type DaNetwork = MemoryNetwork, TYPES::SignatureKey>; type Storage = TestStorage; } impl NodeImplementation for CombinedImpl { type QuorumNetwork = CombinedNetworks; - type CommitteeNetwork = CombinedNetworks; + type DaNetwork = CombinedNetworks; type Storage = TestStorage; } impl NodeImplementation for Libp2pImpl { type QuorumNetwork = Libp2pNetwork, TYPES::SignatureKey>; - type CommitteeNetwork = Libp2pNetwork, TYPES::SignatureKey>; + type DaNetwork = Libp2pNetwork, TYPES::SignatureKey>; type Storage = TestStorage; } diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index b446cbdc90..f473c61420 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -8,7 +8,7 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_types::{ consensus::CommitmentMap, - data::{DAProposal, Leaf, VidDisperseShare}, + data::{DaProposal, Leaf, VidDisperseShare}, message::Proposal, traits::{node_implementation::NodeType, storage::Storage}, utils::View, @@ -22,7 +22,7 @@ type VidShares = HashMap< #[derive(Clone, Debug)] pub struct TestStorageState { vids: VidShares, - das: HashMap>>, + das: HashMap>>, } impl Default for TestStorageState { @@ -65,7 +65,7 @@ impl Storage for TestStorage { Ok(()) } - async fn append_da(&self, proposal: &Proposal>) -> Result<()> { + async fn append_da(&self, proposal: &Proposal>) -> Result<()> { if self.should_return_err { bail!("Failed to append VID proposal to storage"); } diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 5d7665697b..8f82011985 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -25,7 +25,7 @@ use tracing::{error, instrument}; use crate::{ infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, - types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}, + types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}, }; /// general infra used for this example @@ -142,7 +142,7 @@ async fn main() { let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { - infra::main_entry_point::( + infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, advertise_address: Some(advertise_address), diff --git a/examples/combined/multi-validator.rs b/examples/combined/multi-validator.rs index 44a8943051..9d12549eee 100644 --- a/examples/combined/multi-validator.rs +++ b/examples/combined/multi-validator.rs @@ -8,7 +8,7 @@ use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use tracing::instrument; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; +use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; /// types used for this example pub mod types; @@ -30,7 +30,7 @@ async fn main() { let args = args.clone(); let node = async_spawn(async move { - infra::main_entry_point::( + infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) .await; diff --git a/examples/combined/types.rs b/examples/combined/types.rs index 05507b2327..568629b8d1 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -5,14 +5,14 @@ use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; -use crate::infra::CombinedDARun; +use crate::infra::CombinedDaRun; /// dummy struct so we can choose types #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} /// convenience type alias -pub type DANetwork = CombinedNetworks; +pub type DaNetwork = CombinedNetworks; /// convenience type alias pub type VIDNetwork = CombinedNetworks; /// convenience type alias @@ -22,8 +22,8 @@ pub type ViewSyncNetwork = CombinedNetworks; impl NodeImplementation for NodeImpl { type QuorumNetwork = QuorumNetwork; - type CommitteeNetwork = DANetwork; + type DaNetwork = DaNetwork; type Storage = TestStorage; } /// convenience type alias -pub type ThisRun = CombinedDARun; +pub type ThisRun = CombinedDaRun; diff --git a/examples/combined/validator.rs b/examples/combined/validator.rs index 43f202c974..36ee261377 100644 --- a/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -8,7 +8,7 @@ use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; use tracing::{info, instrument}; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; +use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; /// types used for this example pub mod types; @@ -36,5 +36,5 @@ async fn main() { ); info!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::(args).await; + infra::main_entry_point::(args).await; } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 050799c86c..6a62d1cc01 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -324,14 +324,14 @@ fn calculate_num_tx_per_round( /// Defines the behavior of a "run" of the network with a given configuration #[async_trait] -pub trait RunDA< +pub trait RunDa< TYPES: NodeType, DANET: ConnectedNetwork, TYPES::SignatureKey>, QUORUMNET: ConnectedNetwork, TYPES::SignatureKey>, NODE: NodeImplementation< TYPES, QuorumNetwork = QUORUMNET, - CommitteeNetwork = DANET, + DaNetwork = DANET, Storage = TestStorage, >, > where @@ -602,10 +602,10 @@ impl< NODE: NodeImplementation< TYPES, QuorumNetwork = PushCdnNetwork, - CommitteeNetwork = PushCdnNetwork, + DaNetwork = PushCdnNetwork, Storage = TestStorage, >, - > RunDA, PushCdnNetwork, NODE> for PushCdnDaRun + > RunDa, PushCdnNetwork, NODE> for PushCdnDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -668,7 +668,7 @@ where // Libp2p /// Represents a libp2p-based run -pub struct Libp2pDARun { +pub struct Libp2pDaRun { /// the network configuration config: NetworkConfig, /// quorum channel @@ -688,16 +688,16 @@ impl< NODE: NodeImplementation< TYPES, QuorumNetwork = Libp2pNetwork, TYPES::SignatureKey>, - CommitteeNetwork = Libp2pNetwork, TYPES::SignatureKey>, + DaNetwork = Libp2pNetwork, TYPES::SignatureKey>, Storage = TestStorage, >, > - RunDA< + RunDa< TYPES, Libp2pNetwork, TYPES::SignatureKey>, Libp2pNetwork, TYPES::SignatureKey>, NODE, - > for Libp2pDARun + > for Libp2pDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -707,7 +707,7 @@ where async fn initialize_networking( config: NetworkConfig, libp2p_advertise_address: Option, - ) -> Libp2pDARun { + ) -> Libp2pDaRun { // Extrapolate keys for ease of use let keys = config.clone().config.my_own_validator_config; let public_key = keys.public_key; @@ -743,7 +743,7 @@ where // Wait for the network to be ready libp2p_network.wait_for_ready().await; - Libp2pDARun { + Libp2pDaRun { config, quorum_channel: libp2p_network.clone(), da_channel: libp2p_network, @@ -766,7 +766,7 @@ where // Combined network /// Represents a combined-network-based run -pub struct CombinedDARun { +pub struct CombinedDaRun { /// the network configuration config: NetworkConfig, /// quorum channel @@ -786,10 +786,10 @@ impl< NODE: NodeImplementation< TYPES, QuorumNetwork = CombinedNetworks, - CommitteeNetwork = CombinedNetworks, + DaNetwork = CombinedNetworks, Storage = TestStorage, >, - > RunDA, CombinedNetworks, NODE> for CombinedDARun + > RunDa, CombinedNetworks, NODE> for CombinedDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -799,10 +799,10 @@ where async fn initialize_networking( config: NetworkConfig, libp2p_advertise_address: Option, - ) -> CombinedDARun { + ) -> CombinedDaRun { // Initialize our Libp2p network - let libp2p_da_run: Libp2pDARun = - as RunDA< + let libp2p_da_run: Libp2pDaRun = + as RunDa< TYPES, Libp2pNetwork, TYPES::SignatureKey>, Libp2pNetwork, TYPES::SignatureKey>, @@ -812,7 +812,7 @@ where // Initialize our CDN network let cdn_da_run: PushCdnDaRun = - as RunDA< + as RunDa< TYPES, PushCdnNetwork, PushCdnNetwork, @@ -839,7 +839,7 @@ where ); // Return the run configuration - CombinedDARun { + CombinedDaRun { config, quorum_channel, da_channel, @@ -874,10 +874,10 @@ pub async fn main_entry_point< NODE: NodeImplementation< TYPES, QuorumNetwork = QUORUMCHANNEL, - CommitteeNetwork = DACHANNEL, + DaNetwork = DACHANNEL, Storage = TestStorage, >, - RUNDA: RunDA, + RUNDA: RunDa, >( args: ValidatorArgs, ) where diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index 36ec5c0258..500e90992d 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -14,7 +14,7 @@ use tracing::instrument; use crate::{ infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, - types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}, + types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}, }; /// general infra used for this example @@ -48,7 +48,7 @@ async fn main() { ); let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { - infra::main_entry_point::( + infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, advertise_address: Some(advertise_address), diff --git a/examples/libp2p/multi-validator.rs b/examples/libp2p/multi-validator.rs index 36527bf848..3b212a5da1 100644 --- a/examples/libp2p/multi-validator.rs +++ b/examples/libp2p/multi-validator.rs @@ -8,7 +8,7 @@ use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use tracing::instrument; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; +use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; /// types used for this example pub mod types; @@ -30,7 +30,7 @@ async fn main() { let args = args.clone(); let node = async_spawn(async move { - infra::main_entry_point::( + infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) .await; diff --git a/examples/libp2p/types.rs b/examples/libp2p/types.rs index cd46c71f9c..6823611656 100644 --- a/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -8,21 +8,21 @@ use hotshot_types::{ }; use serde::{Deserialize, Serialize}; -use crate::infra::Libp2pDARun; +use crate::infra::Libp2pDaRun; /// dummy struct so we can choose types #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} /// convenience type alias -pub type DANetwork = Libp2pNetwork, ::SignatureKey>; +pub type DaNetwork = Libp2pNetwork, ::SignatureKey>; /// convenience type alias pub type QuorumNetwork = Libp2pNetwork, ::SignatureKey>; impl NodeImplementation for NodeImpl { type QuorumNetwork = QuorumNetwork; - type CommitteeNetwork = DANetwork; + type DaNetwork = DaNetwork; type Storage = TestStorage; } /// convenience type alias -pub type ThisRun = Libp2pDARun; +pub type ThisRun = Libp2pDaRun; diff --git a/examples/libp2p/validator.rs b/examples/libp2p/validator.rs index 9b46a591fb..9c0b8b60f8 100644 --- a/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -8,7 +8,7 @@ use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; use tracing::{info, instrument}; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; +use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; /// types used for this example pub mod types; @@ -35,5 +35,5 @@ async fn main() { ); info!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::(args).await; + infra::main_entry_point::(args).await; } diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index c5e8989f43..24f89b874e 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -15,7 +15,7 @@ use hotshot_types::traits::node_implementation::NodeType; use crate::{ infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, - types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}, + types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}, }; /// The infra implementation @@ -118,7 +118,7 @@ async fn main() { for _ in 0..(config.config.num_nodes_with_stake.get()) { let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { - infra::main_entry_point::( + infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, advertise_address: None, diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs index c62eec2c28..09bd0ef8c2 100644 --- a/examples/push-cdn/types.rs +++ b/examples/push-cdn/types.rs @@ -9,7 +9,7 @@ use crate::infra::PushCdnDaRun; pub struct NodeImpl {} /// Convenience type alias -pub type DANetwork = PushCdnNetwork; +pub type DaNetwork = PushCdnNetwork; /// Convenience type alias pub type VIDNetwork = PushCdnNetwork; /// Convenience type alias @@ -18,7 +18,7 @@ pub type QuorumNetwork = PushCdnNetwork; pub type ViewSyncNetwork = PushCdnNetwork; impl NodeImplementation for NodeImpl { - type CommitteeNetwork = DANetwork; + type DaNetwork = DaNetwork; type QuorumNetwork = QuorumNetwork; type Storage = TestStorage; } diff --git a/examples/push-cdn/validator.rs b/examples/push-cdn/validator.rs index 1991c0e857..35d0b4470b 100644 --- a/examples/push-cdn/validator.rs +++ b/examples/push-cdn/validator.rs @@ -5,7 +5,7 @@ use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use tracing::{info, instrument}; -use crate::types::{DANetwork, NodeImpl, QuorumNetwork, ThisRun}; +use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; /// types used for this example pub mod types; @@ -22,5 +22,5 @@ async fn main() { setup_backtrace(); let args = ValidatorArgs::parse(); info!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::(args).await; + infra::main_entry_point::(args).await; } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index ffac267cd9..028d5402a2 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -79,7 +79,7 @@ pub struct Networks> { pub quorum_network: Arc, /// Network for reaching the DA committee - pub da_network: Arc, + pub da_network: Arc, /// Phantom for TYPES and I pub _pd: PhantomData<(TYPES, I)>, @@ -611,7 +611,7 @@ impl> SystemContext { event_rx.activate_cloned(), Arc::clone(&da_network), da_membership, - network::committee_filter, + network::da_filter, Arc::clone(&handle.get_storage()), ) .await; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index cbbcb8dc1e..2073982a5d 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -11,7 +11,7 @@ use async_lock::RwLock; use hotshot_task::task::{Task, TaskRegistry}; use hotshot_task_impls::{ consensus::ConsensusTaskState, - da::DATaskState, + da::DaTaskState, events::HotShotEvent, network::{NetworkEventTaskState, NetworkMessageTaskState}, quorum_proposal::QuorumProposalTaskState, @@ -185,7 +185,7 @@ pub async fn add_da_task>( handle: &SystemContextHandle, ) { // build the da task - let da_state = DATaskState::create_from(handle).await; + let da_state = DaTaskState::create_from(handle).await; let task = Task::new(tx, rx, Arc::clone(&task_reg), da_state); task_reg.run_task(task).await; diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index e1bc5b1497..80089f2df0 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -6,7 +6,7 @@ use std::{ use async_trait::async_trait; use hotshot_task_impls::{ - builder::BuilderClient, consensus::ConsensusTaskState, da::DATaskState, + builder::BuilderClient, consensus::ConsensusTaskState, da::DaTaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VIDTaskState, @@ -101,12 +101,12 @@ impl> CreateTaskState #[async_trait] impl> CreateTaskState - for DATaskState> + for DaTaskState> { async fn create_from( handle: &SystemContextHandle, - ) -> DATaskState> { - DATaskState { + ) -> DaTaskState> { + DaTaskState { api: handle.clone(), consensus: handle.hotshot.get_consensus(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), @@ -206,10 +206,10 @@ impl> CreateTaskState public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), - committee_network: Arc::clone(&handle.hotshot.networks.da_network), + da_network: Arc::clone(&handle.hotshot.networks.da_network), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - committee_membership: handle.hotshot.memberships.da_membership.clone().into(), + da_membership: handle.hotshot.memberships.da_membership.clone().into(), storage: Arc::clone(&handle.storage), } } @@ -230,7 +230,7 @@ impl> CreateTaskState latest_voted_view: handle.get_cur_view().await, vote_dependencies: HashMap::new(), quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), - committee_network: Arc::clone(&handle.hotshot.networks.da_network), + da_network: Arc::clone(&handle.hotshot.networks.da_network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), @@ -252,7 +252,7 @@ impl> CreateTaskState latest_proposed_view: handle.get_cur_view().await, propose_dependencies: HashMap::new(), quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), - committee_network: Arc::clone(&handle.hotshot.networks.da_network), + da_network: Arc::clone(&handle.hotshot.networks.da_network), output_event_stream: handle.hotshot.external_event_stream.0.clone(), consensus, instance_state: handle.hotshot.get_instance_state(), diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index e828354ef3..9893ed8f7b 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -128,7 +128,7 @@ impl CombinedNetworks { SequencingMessage::General(general_consensus_message) => { matches!(general_consensus_message, GeneralConsensusMessage::Vote(_)) } - SequencingMessage::Committee(_) => true, + SequencingMessage::Da(_) => true, }, MessageKind::Data(_) => false, } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 25f1d21ef0..27d0706298 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -458,9 +458,9 @@ impl Libp2pNetwork { bootstrap_addrs: BootstrapAddrs, id: usize, // HACK - committee_pks: BTreeSet, + quorum_public_keys: BTreeSet, #[cfg(feature = "hotshot-testing")] reliability_config: Option>, - da_pks: BTreeSet, + da_public_keys: BTreeSet, is_da: bool, ) -> Result, NetworkError> { // Error if there were no bootstrap nodes specified @@ -487,8 +487,8 @@ impl Libp2pNetwork { pubkey_pid_map.insert(pk.clone(), network_handle.peer_id()); let mut topic_map = BiHashMap::new(); - topic_map.insert(committee_pks, QC_TOPIC.to_string()); - topic_map.insert(da_pks, "DA".to_string()); + topic_map.insert(quorum_public_keys, QC_TOPIC.to_string()); + topic_map.insert(da_public_keys, "DA".to_string()); let topic_map = RwLock::new(topic_map); diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index e93cfd42f7..3a1f7f44bd 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -128,7 +128,7 @@ impl + 'static> SystemContextHandl self.hotshot.get_consensus() } - /// Block the underlying quorum (and committee) networking interfaces until node is + /// Block the underlying quorum (and DA) networking interfaces until node is /// successfully initialized into the networks. pub async fn wait_for_networks_ready(&self) { self.hotshot.networks.wait_for_networks_ready().await; diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 39d3b8b485..f6f68b2e19 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -47,8 +47,8 @@ start_threshold = [ 8, 10, ] -staked_committee_nodes = 10 -non_staked_committee_nodes = 0 +staked_da_nodes = 10 +non_staked_da_nodes = 0 fixed_leader_for_gpuvid = 1 next_view_timeout = 30000 timeout_ratio = [ diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 1cd8416461..738f3541ee 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -571,10 +571,10 @@ pub struct HotShotConfigFile { #[serde(skip)] /// The known non-staking nodes' pub known_nodes_without_stake: Vec, - /// Number of staking committee nodes - pub staked_committee_nodes: usize, - /// Number of non-staking committee nodes - pub non_staked_committee_nodes: usize, + /// Number of staking DA nodes + pub staked_da_nodes: usize, + /// Number of non-staking DA nodes + pub non_staked_da_nodes: usize, /// Number of fixed leaders for GPU VID pub fixed_leader_for_gpuvid: usize, /// Base duration for next-view timeout, in milliseconds @@ -663,8 +663,8 @@ impl From> for HotShotConfig { known_nodes_with_stake: val.known_nodes_with_stake, known_nodes_without_stake: val.known_nodes_without_stake, my_own_validator_config: val.my_own_validator_config, - da_staked_committee_size: val.staked_committee_nodes, - da_non_staked_committee_size: val.non_staked_committee_nodes, + da_staked_committee_size: val.staked_da_nodes, + da_non_staked_committee_size: val.non_staked_da_nodes, fixed_leader_for_gpuvid: val.fixed_leader_for_gpuvid, next_view_timeout: val.next_view_timeout, view_sync_timeout: val.view_sync_timeout, @@ -704,7 +704,7 @@ impl From for HotShotConfig { impl Default for HotShotConfigFile { fn default() -> Self { // The default number of nodes is 5 - let staked_committee_nodes: usize = 5; + let staked_da_nodes: usize = 5; // Aggregate the DA nodes let mut known_da_nodes = Vec::new(); @@ -715,7 +715,7 @@ impl Default for HotShotConfigFile { ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, false); // Add to DA nodes based on index - if node_id < staked_committee_nodes as u64 { + if node_id < staked_da_nodes as u64 { known_da_nodes.push(cur_validator_config.get_public_config()); cur_validator_config.is_da = true; } @@ -731,9 +731,9 @@ impl Default for HotShotConfigFile { my_own_validator_config: ValidatorConfig::default(), known_nodes_with_stake: gen_known_nodes_with_stake, known_nodes_without_stake: vec![], - staked_committee_nodes, + staked_da_nodes, known_da_nodes, - non_staked_committee_nodes: 0, + non_staked_da_nodes: 0, fixed_leader_for_gpuvid: 1, next_view_timeout: 10000, view_sync_timeout: Duration::from_millis(1000), diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 28784747f4..93c99418fd 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -313,7 +313,7 @@ where // We add the node to the DA committee depending on either its node index or whether it requested membership. // // Since we issue `node_index` incrementally, if we are deciding DA membership by node_index - // we only need to check that the committee is not yet full. + // we only need to check that the DA committee is not yet full. // // Note: this logically simplifies to (self.config.indexed_da || da_requested) && !da_full, // but writing it that way makes it a little less clear to me. diff --git a/task-impls/HotShot_event_architecture.drawio b/task-impls/HotShot_event_architecture.drawio index ee5702aa4e..970a8db0c9 100644 --- a/task-impls/HotShot_event_architecture.drawio +++ b/task-impls/HotShot_event_architecture.drawio @@ -111,7 +111,7 @@ - + @@ -124,7 +124,7 @@ - + @@ -136,7 +136,7 @@ - + @@ -162,7 +162,7 @@ - + diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index ad48527d38..98e4ec9b74 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -1071,10 +1071,7 @@ pub async fn update_state_and_vote_if_able> { pub quorum_network: Arc, /// Network for DA committee - pub committee_network: Arc, + pub da_network: Arc, /// Membership for Timeout votes/certs pub timeout_membership: Arc, @@ -97,7 +97,7 @@ pub struct ConsensusTaskState> { pub quorum_membership: Arc, /// Membership for DA committee Votes/certs - pub committee_membership: Arc, + pub da_membership: Arc, /// Current Vote collection task, with it's view. pub vote_collector: @@ -178,7 +178,7 @@ impl> ConsensusTaskState .validate(&disperse.signature, payload_commitment.as_ref()) { let mut validated = false; - for da_member in self.committee_membership.get_staked_committee(view) { + for da_member in self.da_membership.get_staked_committee(view) { if da_member.validate(&disperse.signature, payload_commitment.as_ref()) { validated = true; break; @@ -257,7 +257,7 @@ impl> ConsensusTaskState let consensus = Arc::clone(&self.consensus); let storage = Arc::clone(&self.storage); let quorum_mem = Arc::clone(&self.quorum_membership); - let committee_mem = Arc::clone(&self.committee_membership); + let da_mem = Arc::clone(&self.da_membership); let instance_state = Arc::clone(&self.instance_state); let handle = async_spawn(async move { update_state_and_vote_if_able::( @@ -268,7 +268,7 @@ impl> ConsensusTaskState storage, quorum_mem, instance_state, - (priv_key, upgrade, committee_mem, event_stream), + (priv_key, upgrade, da_mem, event_stream), ) .await; }); @@ -452,7 +452,7 @@ impl> ConsensusTaskState } } #[cfg(not(feature = "dependency-tasks"))] - HotShotEvent::DACertificateRecv(cert) => { + HotShotEvent::DaCertificateRecv(cert) => { debug!("DAC Received for view {}!", *cert.view_number); let view = cert.view_number; @@ -757,7 +757,7 @@ impl> TaskState for ConsensusTaskS | HotShotEvent::QuorumProposalValidated(..) | HotShotEvent::QCFormed(_) | HotShotEvent::UpgradeCertificateFormed(_) - | HotShotEvent::DACertificateRecv(_) + | HotShotEvent::DaCertificateRecv(_) | HotShotEvent::ViewChange(_) | HotShotEvent::SendPayloadCommitmentAndMetadata(..) | HotShotEvent::Timeout(_) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 1738cca715..ce253ce3f3 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -7,11 +7,11 @@ use async_std::task::spawn_blocking; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{Consensus, View}, - data::DAProposal, + data::DaProposal, event::{Event, EventType}, message::Proposal, - simple_certificate::DACertificate, - simple_vote::{DAData, DAVote}, + simple_certificate::DaCertificate, + simple_vote::{DaData, DaVote}, traits::{ block_contents::vid_commitment, consensus_api::ConsensusApi, @@ -40,7 +40,7 @@ use crate::{ type VoteCollectorOption = Option>; /// Tracks state of a DA task -pub struct DATaskState< +pub struct DaTaskState< TYPES: NodeType, I: NodeImplementation, A: ConsensusApi + 'static, @@ -63,10 +63,10 @@ pub struct DATaskState< pub quorum_membership: Arc, /// Network for DA - pub da_network: Arc, + pub da_network: Arc, /// The current vote collection task, if there is one. - pub vote_collector: RwLock, DACertificate>>, + pub vote_collector: RwLock, DaCertificate>>, /// This Nodes public key pub public_key: TYPES::SignatureKey, @@ -82,7 +82,7 @@ pub struct DATaskState< } impl, A: ConsensusApi + 'static> - DATaskState + DaTaskState { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] @@ -92,7 +92,7 @@ impl, A: ConsensusApi + event_stream: Sender>>, ) -> Option { match event.as_ref() { - HotShotEvent::DAProposalRecv(proposal, sender) => { + HotShotEvent::DaProposalRecv(proposal, sender) => { let sender = sender.clone(); debug!( "DA proposal received for view: {:?}", @@ -104,7 +104,7 @@ impl, A: ConsensusApi + // Allow a DA proposal that is one view older, in case we have voted on a quorum // proposal and updated the view. // `self.cur_view` should be at least 1 since there is a view change before getting - // the `DAProposalRecv` event. Otherwise, the view number subtraction below will + // the `DaProposalRecv` event. Otherwise, the view number subtraction below will // cause an overflow error. // TODO ED Come back to this - we probably don't need this, but we should also never receive a DAC where this fails, investigate block ready so it doesn't make one for the genesis block @@ -139,17 +139,17 @@ impl, A: ConsensusApi + } broadcast_event( - Arc::new(HotShotEvent::DAProposalValidated(proposal.clone(), sender)), + Arc::new(HotShotEvent::DaProposalValidated(proposal.clone(), sender)), &event_stream, ) .await; } - HotShotEvent::DAProposalValidated(proposal, sender) => { + HotShotEvent::DaProposalValidated(proposal, sender) => { // Proposal is fresh and valid, notify the application layer self.api .send_event(Event { view_number: self.cur_view, - event: EventType::DAProposal { + event: EventType::DaProposal { proposal: proposal.clone(), sender: sender.clone(), }, @@ -179,8 +179,8 @@ impl, A: ConsensusApi + let view = proposal.data.get_view_number(); // Generate and send vote - let Ok(vote) = DAVote::create_signed_vote( - DAData { + let Ok(vote) = DaVote::create_signed_vote( + DaData { payload_commit: payload_commitment, }, view, @@ -193,7 +193,7 @@ impl, A: ConsensusApi + debug!("Sending vote to the DA leader {:?}", vote.get_view_number()); - broadcast_event(Arc::new(HotShotEvent::DAVoteSend(vote)), &event_stream).await; + broadcast_event(Arc::new(HotShotEvent::DaVoteSend(vote)), &event_stream).await; let mut consensus = self.consensus.write().await; // Ensure this view is in the view map for garbage collection, but do not overwrite if @@ -203,7 +203,7 @@ impl, A: ConsensusApi + consensus.update_validated_state_map( view, View { - view_inner: ViewInner::DA { payload_commitment }, + view_inner: ViewInner::Da { payload_commitment }, }, ); } @@ -215,12 +215,12 @@ impl, A: ConsensusApi + tracing::trace!("{e:?}"); } } - HotShotEvent::DAVoteRecv(ref vote) => { + HotShotEvent::DaVoteRecv(ref vote) => { debug!("DA vote recv, Main Task {:?}", vote.get_view_number()); // Check if we are the leader and the vote is from the sender. let view = vote.get_view_number(); if self.da_membership.get_leader(view) != self.public_key { - error!("We are not the committee leader for view {} are we leader for next view? {}", *view, self.da_membership.get_leader(view + 1) == self.public_key); + error!("We are not the DA committee leader for view {} are we leader for next view? {}", *view, self.da_membership.get_leader(view + 1) == self.public_key); return None; } let mut collector = self.vote_collector.write().await; @@ -236,8 +236,8 @@ impl, A: ConsensusApi + }; *collector = create_vote_accumulator::< TYPES, - DAVote, - DACertificate, + DaVote, + DaCertificate, >(&info, vote.clone(), event, &event_stream) .await; } else { @@ -287,7 +287,7 @@ impl, A: ConsensusApi + return None; }; - let data: DAProposal = DAProposal { + let data: DaProposal = DaProposal { encoded_transactions: Arc::clone(encoded_transactions), metadata: metadata.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? @@ -301,7 +301,7 @@ impl, A: ConsensusApi + }; broadcast_event( - Arc::new(HotShotEvent::DAProposalSend( + Arc::new(HotShotEvent::DaProposalSend( message.clone(), self.public_key.clone(), )), @@ -324,7 +324,7 @@ impl, A: ConsensusApi + /// task state implementation for DA Task impl, A: ConsensusApi + 'static> TaskState - for DATaskState + for DaTaskState { type Event = Arc>; @@ -333,12 +333,12 @@ impl, A: ConsensusApi + fn filter(&self, event: &Arc>) -> bool { !matches!( event.as_ref(), - HotShotEvent::DAProposalRecv(_, _) - | HotShotEvent::DAVoteRecv(_) + HotShotEvent::DaProposalRecv(_, _) + | HotShotEvent::DaVoteRecv(_) | HotShotEvent::Shutdown | HotShotEvent::BlockRecv(_, _, _, _, _) | HotShotEvent::ViewChange(_) - | HotShotEvent::DAProposalValidated(_, _) + | HotShotEvent::DaProposalValidated(_, _) ) } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 4811bdfd75..f0fa8cc8be 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -3,14 +3,14 @@ use std::sync::Arc; use either::Either; use hotshot_types::{ consensus::ProposalDependencyData, - data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse, VidDisperseShare}, + data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse, VidDisperseShare}, message::Proposal, simple_certificate::{ - DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, + DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DAVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + DaVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{block_contents::BuilderFee, node_implementation::NodeType, BlockPayload}, @@ -41,15 +41,15 @@ pub enum HotShotEvent { /// Send a timeout vote to the network; emitted by consensus task replicas TimeoutVoteSend(TimeoutVote), /// A DA proposal has been received from the network; handled by the DA task - DAProposalRecv(Proposal>, TYPES::SignatureKey), + DaProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA proposal has been validated; handled by the DA task and VID task - DAProposalValidated(Proposal>, TYPES::SignatureKey), + DaProposalValidated(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task - DAVoteRecv(DAVote), + DaVoteRecv(DaVote), /// A Data Availability Certificate (DAC) has been recieved by the network; handled by the consensus task - DACertificateRecv(DACertificate), + DaCertificateRecv(DaCertificate), /// A DAC is validated. - DACertificateValidated(DACertificate), + DaCertificateValidated(DaCertificate), /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal @@ -59,13 +59,13 @@ pub enum HotShotEvent { /// A quorum proposal with the given parent leaf is validated. QuorumProposalValidated(QuorumProposal, Leaf), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task - DAProposalSend(Proposal>, TYPES::SignatureKey), + DaProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal - DAVoteSend(DAVote), + DaVoteSend(DaVote), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only QCFormed(Either, TimeoutCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task - DACSend(DACertificate, TYPES::SignatureKey), + DacSend(DaCertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks ViewChange(TYPES::Time), /// Timeout for the view sync protocol; emitted by a replica in the view sync task @@ -129,11 +129,11 @@ pub enum HotShotEvent { LeafDecided(Vec>), /// Send VID shares to VID storage nodes; emitted by the DA leader /// - /// Like [`HotShotEvent::DAProposalSend`]. + /// Like [`HotShotEvent::DaProposalSend`]. VidDisperseSend(Proposal>, TYPES::SignatureKey), /// Vid disperse share has been received from the network; handled by the consensus task /// - /// Like [`HotShotEvent::DAProposalRecv`]. + /// Like [`HotShotEvent::DaProposalRecv`]. VIDShareRecv(Proposal>), /// VID share data is validated. VIDShareValidated(Proposal>), diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 96d1b84a90..64460d7b42 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -9,8 +9,8 @@ use hotshot_types::{ data::{VidDisperse, VidDisperseShare}, event::HotShotAction, message::{ - CommitteeConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, - Proposal, SequencingMessage, + DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, + SequencingMessage, }, traits::{ election::Membership, @@ -34,7 +34,7 @@ pub fn quorum_filter(event: &Arc>) -> bool event.as_ref(), HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::QuorumVoteSend(_) - | HotShotEvent::DACSend(_, _) + | HotShotEvent::DacSend(_, _) | HotShotEvent::TimeoutVoteSend(_) ) } @@ -47,11 +47,11 @@ pub fn upgrade_filter(event: &Arc>) -> bool ) } -/// committee filter -pub fn committee_filter(event: &Arc>) -> bool { +/// DA filter +pub fn da_filter(event: &Arc>) -> bool { !matches!( event.as_ref(), - HotShotEvent::DAProposalSend(_, _) | HotShotEvent::DAVoteSend(_) + HotShotEvent::DaProposalSend(_, _) | HotShotEvent::DaVoteSend(_) ) } @@ -151,22 +151,20 @@ impl NetworkMessageTaskState { HotShotEvent::UpgradeVoteRecv(message) } }, - SequencingMessage::Committee(committee_message) => { - match committee_message { - CommitteeConsensusMessage::DAProposal(proposal) => { - HotShotEvent::DAProposalRecv(proposal, sender) - } - CommitteeConsensusMessage::DAVote(vote) => { - HotShotEvent::DAVoteRecv(vote.clone()) - } - CommitteeConsensusMessage::DACertificate(cert) => { - HotShotEvent::DACertificateRecv(cert) - } - CommitteeConsensusMessage::VidDisperseMsg(proposal) => { - HotShotEvent::VIDShareRecv(proposal) - } + SequencingMessage::Da(da_message) => match da_message { + DaConsensusMessage::DaProposal(proposal) => { + HotShotEvent::DaProposalRecv(proposal, sender) } - } + DaConsensusMessage::DaVote(vote) => { + HotShotEvent::DaVoteRecv(vote.clone()) + } + DaConsensusMessage::DaCertificate(cert) => { + HotShotEvent::DaCertificateRecv(cert) + } + DaConsensusMessage::VidDisperseMsg(proposal) => { + HotShotEvent::VIDShareRecv(proposal) + } + }, }; // TODO (Keyao benchmarking) Update these event variants (similar to the // `TransactionsRecv` event) so we can send one event for a vector of messages. @@ -295,33 +293,33 @@ impl< HotShotEvent::VidDisperseSend(proposal, sender) => { return self.handle_vid_disperse_proposal(proposal, &sender); } - HotShotEvent::DAProposalSend(proposal, sender) => { + HotShotEvent::DaProposalSend(proposal, sender) => { maybe_action = Some(HotShotAction::DAPropose); ( sender, - MessageKind::::from_consensus_message(SequencingMessage::Committee( - CommitteeConsensusMessage::DAProposal(proposal), + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaProposal(proposal), )), - TransmitType::DACommitteeBroadcast, + TransmitType::DaCommitteeBroadcast, ) } - HotShotEvent::DAVoteSend(vote) => { - maybe_action = Some(HotShotAction::DAVote); + HotShotEvent::DaVoteSend(vote) => { + maybe_action = Some(HotShotAction::DaVote); ( vote.get_signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::Committee( - CommitteeConsensusMessage::DAVote(vote.clone()), + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaVote(vote.clone()), )), TransmitType::Direct(membership.get_leader(vote.get_view_number())), ) } // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee - HotShotEvent::DACSend(certificate, sender) => { + HotShotEvent::DacSend(certificate, sender) => { maybe_action = Some(HotShotAction::DACert); ( sender, - MessageKind::::from_consensus_message(SequencingMessage::Committee( - CommitteeConsensusMessage::DACertificate(certificate), + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaCertificate(certificate), )), TransmitType::Broadcast, ) @@ -449,7 +447,7 @@ impl< net.broadcast_message(message, committee, STATIC_VER_0_1) .await } - TransmitType::DACommitteeBroadcast => { + TransmitType::DaCommitteeBroadcast => { net.da_broadcast_message(message, committee, STATIC_VER_0_1) .await } @@ -483,11 +481,9 @@ impl< proposal.data.recipient_key.clone(), Message { sender: sender.clone(), - kind: MessageKind::::from_consensus_message( - SequencingMessage::Committee( - CommitteeConsensusMessage::VidDisperseMsg(proposal), - ), - ), // TODO not a CommitteeConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 + kind: MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::VidDisperseMsg(proposal), + )), // TODO not a DaConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 }, ) }) diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index ec152b54d7..2be2109eb0 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -208,7 +208,7 @@ pub struct QuorumProposalTaskState pub quorum_network: Arc, /// Network for DA committee - pub committee_network: Arc, + pub da_network: Arc, /// Output events to application pub output_event_stream: async_broadcast::Sender>, diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index e790f2b0fe..dce9608f24 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -45,7 +45,7 @@ use crate::{ enum VoteDependency { /// For the `QuroumProposalValidated` event after validating `QuorumProposalRecv`. QuorumProposal, - /// For the `DACertificateRecv` event. + /// For the `DaCertificateRecv` event. Dac, /// For the `VIDShareRecv` event. Vid, @@ -107,7 +107,7 @@ impl + 'static> HandleDepOutput } leaf = Some(proposed_leaf); } - HotShotEvent::DACertificateValidated(cert) => { + HotShotEvent::DaCertificateValidated(cert) => { let cert_payload_comm = cert.get_data().payload_commit; if let Some(comm) = payload_commitment { if cert_payload_comm != comm { @@ -228,7 +228,7 @@ pub struct QuorumVoteTaskState> { pub quorum_network: Arc, /// Network for DA committee - pub committee_network: Arc, + pub da_network: Arc, /// Membership for Quorum certs/votes. pub quorum_membership: Arc, @@ -268,7 +268,7 @@ impl> QuorumVoteTaskState { - if let HotShotEvent::DACertificateValidated(cert) = event { + if let HotShotEvent::DaCertificateValidated(cert) = event { cert.view_number } else { return false; @@ -418,7 +418,7 @@ impl> QuorumVoteTaskState { + HotShotEvent::DaCertificateRecv(cert) => { let view = cert.view_number; trace!("Received DAC for view {}", *view); if view <= self.latest_voted_view { @@ -437,7 +437,7 @@ impl> QuorumVoteTaskState> TaskState for QuorumVoteTask fn filter(&self, event: &Arc>) -> bool { !matches!( event.as_ref(), - HotShotEvent::DACertificateRecv(_) + HotShotEvent::DaCertificateRecv(_) | HotShotEvent::VIDShareRecv(..) | HotShotEvent::QuorumVoteDependenciesValidated(_) | HotShotEvent::VoteNow(..) diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 50afffa761..1b90c96b6f 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -13,7 +13,7 @@ use async_lock::RwLock; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::Consensus, - message::{CommitteeConsensusMessage, DataMessage, Message, MessageKind, SequencingMessage}, + message::{DaConsensusMessage, DataMessage, Message, MessageKind, SequencingMessage}, traits::{ election::Membership, network::{ConnectedNetwork, DataRequest, RequestKind, ResponseMessage}, @@ -53,9 +53,9 @@ pub struct NetworkRequestState< pub view: TYPES::Time, /// Delay before requesting peers pub delay: Duration, - /// Committee + /// DA Membership pub da_membership: TYPES::Membership, - /// Quorum + /// Quorum Membership pub quorum_membership: TYPES::Membership, /// This nodes public key pub public_key: TYPES::SignatureKey, @@ -239,7 +239,7 @@ impl> DelayedRequester { RequestKind::VID(view, key) => { self.do_vid::(VidRequest(view, key), signature).await; } - RequestKind::DAProposal(..) => {} + RequestKind::DaProposal(..) => {} } } @@ -299,7 +299,7 @@ impl> DelayedRequester { /// Transform a response into a `HotShotEvent` async fn handle_response_message(&self, message: SequencingMessage) { let event = match message { - SequencingMessage::Committee(CommitteeConsensusMessage::VidDisperseMsg(prop)) => { + SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg(prop)) => { HotShotEvent::VIDShareRecv(prop) } _ => return, diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 39f292481a..e5881fe77c 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -10,9 +10,7 @@ use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ consensus::Consensus, data::VidDisperseShare, - message::{ - CommitteeConsensusMessage, DataMessage, Message, MessageKind, Proposal, SequencingMessage, - }, + message::{DaConsensusMessage, DataMessage, Message, MessageKind, Proposal, SequencingMessage}, traits::{ election::Membership, network::{DataRequest, RequestKind, ResponseChannel, ResponseMessage}, @@ -157,12 +155,11 @@ impl NetworkResponseState { let Some(share) = self.get_or_calc_vid_share(view, &pub_key).await else { return self.make_msg(ResponseMessage::NotFound); }; - let seq_msg = - SequencingMessage::Committee(CommitteeConsensusMessage::VidDisperseMsg(share)); + let seq_msg = SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg(share)); self.make_msg(ResponseMessage::Found(seq_msg)) } // TODO impl for DA Proposal: https://github.com/EspressoSystems/HotShot/issues/2651 - RequestKind::DAProposal(_view) => self.make_msg(ResponseMessage::NotFound), + RequestKind::DaProposal(_view) => self.make_msg(ResponseMessage::NotFound), } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index e25b140c73..65509a43c1 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -233,7 +233,7 @@ impl< .get(&prev_view) .and_then(|view| match view.view_inner { // For a view for which we have a Leaf stored - ViewInner::DA { payload_commitment } => Some(payload_commitment), + ViewInner::Da { payload_commitment } => Some(payload_commitment), ViewInner::Leaf { leaf, .. } => consensus .saved_leaves() .get(&leaf) diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 360304d2c9..c5a14e75e2 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -6,11 +6,11 @@ use either::Either::{self, Left, Right}; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ simple_certificate::{ - DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, + DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DAVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + DaVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{election::Membership, node_implementation::NodeType}, @@ -225,7 +225,7 @@ where type QuorumVoteState = VoteCollectionTaskState, QuorumCertificate>; /// Alias for DA vote accumulator -type DAVoteState = VoteCollectionTaskState, DACertificate>; +type DaVoteState = VoteCollectionTaskState, DaCertificate>; /// Alias for Timeout vote accumulator type TimeoutVoteState = VoteCollectionTaskState, TimeoutCertificate>; @@ -276,17 +276,17 @@ impl AggregatableVote, UpgradeCertifi } } -impl AggregatableVote, DACertificate> - for DAVote +impl AggregatableVote, DaCertificate> + for DaVote { fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { membership.get_leader(self.get_view_number()) } fn make_cert_event( - certificate: DACertificate, + certificate: DaCertificate, key: &TYPES::SignatureKey, ) -> HotShotEvent { - HotShotEvent::DACSend(certificate, key.clone()) + HotShotEvent::DacSend(certificate, key.clone()) } } @@ -390,8 +390,8 @@ impl HandleVoteEvent, UpgradeCertific } #[async_trait] -impl HandleVoteEvent, DACertificate> - for DAVoteState +impl HandleVoteEvent, DaCertificate> + for DaVoteState { async fn handle_event( &mut self, @@ -399,12 +399,12 @@ impl HandleVoteEvent, DACertificate sender: &Sender>>, ) -> Option { match event.as_ref() { - HotShotEvent::DAVoteRecv(vote) => self.accumulate_vote(vote, sender).await, + HotShotEvent::DaVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => None, } } fn filter(event: Arc>) -> bool { - matches!(event.as_ref(), HotShotEvent::DAVoteRecv(_)) + matches!(event.as_ref(), HotShotEvent::DaVoteRecv(_)) } } diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 2f92bee4d6..20dcb77a38 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -484,7 +484,7 @@ impl BuilderTask for SimpleBuilderTask { } self.blocks.write().await.clear(); } - EventType::DAProposal { proposal, .. } => { + EventType::DaProposal { proposal, .. } => { let payload = TYPES::BlockPayload::from_bytes( &proposal.data.encoded_transactions, &proposal.data.metadata, diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 5ca2754c16..96b7028a4a 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -68,12 +68,7 @@ impl< > TestTaskState for SpinningTask where I: TestableNodeImplementation, - I: NodeImplementation< - TYPES, - QuorumNetwork = N, - CommitteeNetwork = N, - Storage = TestStorage, - >, + I: NodeImplementation>, { type Message = Event; diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 6f416ca0ba..967762fcc8 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -19,8 +19,8 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare, ViewNumber}, message::{GeneralConsensusMessage, Proposal}, - simple_certificate::DACertificate, - simple_vote::{DAData, DAVote, QuorumData, QuorumVote, SimpleVote}, + simple_certificate::DaCertificate, + simple_vote::{DaData, DaVote, QuorumData, QuorumVote, SimpleVote}, traits::{ block_contents::vid_commitment, consensus_api::ConsensusApi, @@ -270,17 +270,17 @@ pub fn build_da_certificate( transactions: Vec, public_key: &::SignatureKey, private_key: &::PrivateKey, -) -> DACertificate { +) -> DaCertificate { let encoded_transactions = TestTransaction::encode(&transactions).unwrap(); let da_payload_commitment = vid_commitment(&encoded_transactions, quorum_membership.total_nodes()); - let da_data = DAData { + let da_data = DaData { payload_commit: da_payload_commitment, }; - build_cert::, DACertificate>( + build_cert::, DaCertificate>( da_data, da_membership, view_number, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index a57f20c098..2aceeb8493 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -120,12 +120,7 @@ impl< > TestRunner where I: TestableNodeImplementation, - I: NodeImplementation< - TYPES, - QuorumNetwork = N, - CommitteeNetwork = N, - Storage = TestStorage, - >, + I: NodeImplementation>, { /// execute test /// diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 914d58e2ee..3d09029ac5 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -8,14 +8,14 @@ use hotshot_example_types::{ state_types::TestInstanceState, }; use hotshot_types::{ - data::{DAProposal, Leaf, QuorumProposal, VidDisperseShare, ViewChangeEvidence, ViewNumber}, + data::{DaProposal, Leaf, QuorumProposal, VidDisperseShare, ViewChangeEvidence, ViewNumber}, message::Proposal, simple_certificate::{ - DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, + DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, }, simple_vote::{ - DAData, DAVote, QuorumData, QuorumVote, TimeoutData, TimeoutVote, UpgradeProposalData, + DaData, DaVote, QuorumData, QuorumVote, TimeoutData, TimeoutVote, UpgradeProposalData, UpgradeVote, ViewSyncFinalizeData, ViewSyncFinalizeVote, }, traits::{ @@ -32,7 +32,7 @@ use crate::task_helpers::{ #[derive(Clone)] pub struct TestView { - pub da_proposal: Proposal>, + pub da_proposal: Proposal>, pub quorum_proposal: Proposal>, pub leaf: Leaf, pub view_number: ViewNumber, @@ -43,7 +43,7 @@ pub struct TestView { ::SignatureKey, ), pub leader_public_key: ::SignatureKey, - pub da_certificate: DACertificate, + pub da_certificate: DaCertificate, pub transactions: Vec, upgrade_data: Option>, formed_upgrade_certificate: Option>, @@ -108,7 +108,7 @@ impl TestView { ::SignatureKey::sign(&private_key, &encoded_transactions_hash) .expect("Failed to sign block payload"); - let da_proposal_inner = DAProposal:: { + let da_proposal_inner = DaProposal:: { encoded_transactions: encoded_transactions.clone(), metadata: TestMetadata, view_number: genesis_view, @@ -314,7 +314,7 @@ impl TestView { ::SignatureKey::sign(&private_key, &encoded_transactions_hash) .expect("Failed to sign block payload"); - let da_proposal_inner = DAProposal:: { + let da_proposal_inner = DaProposal:: { encoded_transactions: encoded_transactions.clone(), metadata: TestMetadata, view_number: next_view, @@ -383,16 +383,16 @@ impl TestView { pub fn create_da_vote( &self, - data: DAData, + data: DaData, handle: &SystemContextHandle, - ) -> DAVote { - DAVote::create_signed_vote( + ) -> DaVote { + DaVote::create_signed_vote( data, self.view_number, handle.public_key(), handle.private_key(), ) - .expect("Failed to sign DAData") + .expect("Failed to sign DaData") } } diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 5c9594a04c..9675040444 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -71,7 +71,7 @@ async fn test_consensus_task() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACertificateRecv(dacs[0].clone()), + DaCertificateRecv(dacs[0].clone()), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ @@ -154,7 +154,7 @@ async fn test_consensus_vote() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACertificateRecv(dacs[0].clone()), + DaCertificateRecv(dacs[0].clone()), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), QuorumVoteRecv(votes[0].clone()), ], @@ -203,7 +203,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACertificateRecv(dacs[0].clone()), + DaCertificateRecv(dacs[0].clone()), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ @@ -217,7 +217,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let inputs = vec![ // We need a VID share for view 2 otherwise we cannot vote at view 2 (as node 2). VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), - DACertificateRecv(dacs[1].clone()), + DaCertificateRecv(dacs[1].clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), ]; let view_2_inputs = permute_input_with_index_order(inputs, input_permutation); @@ -243,7 +243,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote_with_permuted_dac() { - // These tests verify that a vote is indeed sent no matter when it receives a DACertificateRecv + // These tests verify that a vote is indeed sent no matter when it receives a DaCertificateRecv // event. In particular, we want to verify that receiving events in an unexpected (but still // valid) order allows the system to proceed as it normally would. test_vote_with_specific_order(vec![0, 1, 2]).await; @@ -316,7 +316,7 @@ async fn test_view_sync_finalize_propose() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACertificateRecv(dacs[0].clone()), + DaCertificateRecv(dacs[0].clone()), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ @@ -442,7 +442,7 @@ async fn test_view_sync_finalize_vote() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACertificateRecv(dacs[0].clone()), + DaCertificateRecv(dacs[0].clone()), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ @@ -539,7 +539,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACertificateRecv(dacs[0].clone()), + DaCertificateRecv(dacs[0].clone()), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ @@ -633,7 +633,7 @@ async fn test_vid_disperse_storage_failure() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACertificateRecv(dacs[0].clone()), + DaCertificateRecv(dacs[0].clone()), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 9eead78ad5..f2c8cea784 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -6,7 +6,7 @@ use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes}, state_types::TestInstanceState, }; -use hotshot_task_impls::{da::DATaskState, events::HotShotEvent::*}; +use hotshot_task_impls::{da::DaTaskState, events::HotShotEvent::*}; use hotshot_testing::{ predicates::event::exact, script::{run_test_script, TestScriptStage}, @@ -15,7 +15,7 @@ use hotshot_testing::{ }; use hotshot_types::{ data::{null_block, ViewNumber}, - simple_vote::DAData, + simple_vote::DaData, traits::{ block_contents::precompute_vid_commitment, election::Membership, node_implementation::ConsensusTime, @@ -52,7 +52,7 @@ async fn test_da_task() { for view in (&mut generator).take(1) { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_da_vote(DAData { payload_commit }, &handle)); + votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } @@ -62,7 +62,7 @@ async fn test_da_task() { for view in (&mut generator).take(1) { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_da_vote(DAData { payload_commit }, &handle)); + votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } @@ -81,21 +81,21 @@ async fn test_da_task() { precompute, ), ], - outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], + outputs: vec![exact(DaProposalSend(proposals[1].clone(), leaders[1]))], asserts: vec![], }; // Run view 2 and validate proposal. let view_2 = TestScriptStage { - inputs: vec![DAProposalRecv(proposals[1].clone(), leaders[1])], + inputs: vec![DaProposalRecv(proposals[1].clone(), leaders[1])], outputs: vec![ - exact(DAProposalValidated(proposals[1].clone(), leaders[1])), - exact(DAVoteSend(votes[1].clone())), + exact(DaProposalValidated(proposals[1].clone(), leaders[1])), + exact(DaVoteSend(votes[1].clone())), ], asserts: vec![], }; - let da_state = DATaskState::>::create_from(&handle).await; + let da_state = DaTaskState::>::create_from(&handle).await; let stages = vec![view_1, view_2]; run_test_script(stages, da_state).await; @@ -134,7 +134,7 @@ async fn test_da_task_storage_failure() { for view in (&mut generator).take(1) { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_da_vote(DAData { payload_commit }, &handle)); + votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } @@ -144,7 +144,7 @@ async fn test_da_task_storage_failure() { for view in (&mut generator).take(1) { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_da_vote(DAData { payload_commit }, &handle)); + votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } @@ -163,27 +163,27 @@ async fn test_da_task_storage_failure() { precompute, ), ], - outputs: vec![exact(DAProposalSend(proposals[1].clone(), leaders[1]))], + outputs: vec![exact(DaProposalSend(proposals[1].clone(), leaders[1]))], asserts: vec![], }; // Run view 2 and validate proposal. let view_2 = TestScriptStage { - inputs: vec![DAProposalRecv(proposals[1].clone(), leaders[1])], - outputs: vec![exact(DAProposalValidated(proposals[1].clone(), leaders[1]))], + inputs: vec![DaProposalRecv(proposals[1].clone(), leaders[1])], + outputs: vec![exact(DaProposalValidated(proposals[1].clone(), leaders[1]))], asserts: vec![], }; // Run view 3 and propose. let view_3 = TestScriptStage { - inputs: vec![DAProposalValidated(proposals[1].clone(), leaders[1])], + inputs: vec![DaProposalValidated(proposals[1].clone(), leaders[1])], outputs: vec![ /* No vote was sent due to the storage failure */ ], asserts: vec![], }; - let da_state = DATaskState::>::create_from(&handle).await; + let da_state = DaTaskState::>::create_from(&handle).await; let stages = vec![view_1, view_2, view_3]; run_test_script(stages, da_state).await; diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 2467493001..c16b2f41bb 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -68,7 +68,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DACertificateRecv(dacs[0].clone()), + DaCertificateRecv(dacs[0].clone()), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), ], outputs: vec![ @@ -98,7 +98,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { ]; let mut view_2_inputs = permute_input_with_index_order(inputs, input_permutation); - view_2_inputs.insert(0, DACertificateRecv(dacs[1].clone())); + view_2_inputs.insert(0, DaCertificateRecv(dacs[1].clone())); view_2_inputs.insert( 0, VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 8311ee602a..15393d9397 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -41,11 +41,11 @@ async fn test_quorum_vote_task_success() { let view_success = TestScriptStage { inputs: vec![ QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), - DACertificateRecv(dacs[1].clone()), + DaCertificateRecv(dacs[1].clone()), VIDShareRecv(vids[1].0[0].clone()), ], outputs: vec![ - exact(DACertificateValidated(dacs[1].clone())), + exact(DaCertificateValidated(dacs[1].clone())), exact(VIDShareValidated(vids[1].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), quorum_vote_send(), @@ -155,18 +155,18 @@ async fn test_quorum_vote_task_miss_dependency() { let view_no_vid = TestScriptStage { inputs: vec![ QuorumProposalValidated(proposals[1].data.clone(), leaves[1].clone()), - DACertificateRecv(dacs[1].clone()), + DaCertificateRecv(dacs[1].clone()), ], - outputs: vec![exact(DACertificateValidated(dacs[1].clone()))], + outputs: vec![exact(DaCertificateValidated(dacs[1].clone()))], asserts: vec![], }; let view_no_quorum_proposal = TestScriptStage { inputs: vec![ - DACertificateRecv(dacs[2].clone()), + DaCertificateRecv(dacs[2].clone()), VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), ], outputs: vec![ - exact(DACertificateValidated(dacs[2].clone())), + exact(DaCertificateValidated(dacs[2].clone())), exact(VIDShareValidated(vids[2].0[0].clone())), ], asserts: vec![], diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index e60f7247f0..9f12408f58 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -85,7 +85,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), - DACertificateRecv(dacs[0].clone()), + DaCertificateRecv(dacs[0].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -99,7 +99,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), - DACertificateRecv(dacs[1].clone()), + DaCertificateRecv(dacs[1].clone()), ], outputs: vec![ exact(ViewChange(ViewNumber::new(2))), @@ -112,7 +112,7 @@ async fn test_consensus_task_upgrade() { let view_3 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[2].clone(), leaders[2]), - DACertificateRecv(dacs[2].clone()), + DaCertificateRecv(dacs[2].clone()), VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), ], outputs: vec![ @@ -127,7 +127,7 @@ async fn test_consensus_task_upgrade() { let view_4 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[3].clone(), leaders[3]), - DACertificateRecv(dacs[3].clone()), + DaCertificateRecv(dacs[3].clone()), VIDShareRecv(get_vid_share(&vids[3].0, handle.get_public_key())), ], outputs: vec![ @@ -238,12 +238,12 @@ async fn test_upgrade_and_consensus_task() { vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), - DACertificateRecv(dacs[0].clone()), + DaCertificateRecv(dacs[0].clone()), ], upgrade_vote_recvs, vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - DACertificateRecv(dacs[1].clone()), + DaCertificateRecv(dacs[1].clone()), VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), ], vec![ @@ -429,12 +429,12 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), - DACertificateRecv(dacs[0].clone()), + DaCertificateRecv(dacs[0].clone()), ], vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), - DACertificateRecv(dacs[1].clone()), + DaCertificateRecv(dacs[1].clone()), SendPayloadCommitmentAndMetadata( vids[1].0[0].data.payload_commitment, proposals[1].data.block_header.builder_commitment.clone(), @@ -445,7 +445,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ), ], vec![ - DACertificateRecv(dacs[2].clone()), + DaCertificateRecv(dacs[2].clone()), VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, @@ -458,7 +458,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { QuorumProposalRecv(proposals[2].clone(), leaders[2]), ], vec![ - DACertificateRecv(dacs[3].clone()), + DaCertificateRecv(dacs[3].clone()), VIDShareRecv(get_vid_share(&vids[3].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[3].0[0].data.payload_commitment, @@ -471,7 +471,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { QuorumProposalRecv(proposals[3].clone(), leaders[3]), ], vec![ - DACertificateRecv(dacs[4].clone()), + DaCertificateRecv(dacs[4].clone()), VIDShareRecv(get_vid_share(&vids[4].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[4].0[0].data.payload_commitment, @@ -484,7 +484,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { QuorumProposalRecv(proposals[4].clone(), leaders[4]), ], vec![ - DACertificateRecv(dacs[5].clone()), + DaCertificateRecv(dacs[5].clone()), VIDShareRecv(get_vid_share(&vids[5].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[5].0[0].data.payload_commitment, @@ -497,7 +497,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { QCFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), ], vec![ - DACertificateRecv(dacs[6].clone()), + DaCertificateRecv(dacs[6].clone()), VIDShareRecv(get_vid_share(&vids[6].0, handle.get_public_key())), SendPayloadCommitmentAndMetadata( vids[6].0[0].data.payload_commitment, diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 802d54b0ec..ce8ab85cee 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -9,7 +9,7 @@ use hotshot_example_types::{ use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; use hotshot_testing::task_helpers::{build_system_handle, vid_scheme_from_view_number}; use hotshot_types::{ - data::{null_block, DAProposal, VidDisperse, VidDisperseShare, ViewNumber}, + data::{null_block, DaProposal, VidDisperse, VidDisperseShare, ViewNumber}, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -50,7 +50,7 @@ async fn test_vid_task() { payload_commitment.as_ref(), ) .expect("Failed to sign block payload!"); - let proposal: DAProposal = DAProposal { + let proposal: DaProposal = DaProposal { encoded_transactions: encoded_transactions.clone(), metadata: TestMetadata, view_number: ViewNumber::new(2), diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index cd6e04f2e9..668b75e055 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -59,12 +59,12 @@ impl NodeType for Test { #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct TestImpl {} -pub type DANetwork = MemoryNetwork, ::SignatureKey>; +pub type DaNetwork = MemoryNetwork, ::SignatureKey>; pub type QuorumNetwork = MemoryNetwork, ::SignatureKey>; impl NodeImplementation for TestImpl { type QuorumNetwork = QuorumNetwork; - type CommitteeNetwork = DANetwork; + type DaNetwork = DaNetwork; type Storage = TestStorage; } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 2e5ed666cd..831757c054 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -16,7 +16,7 @@ use crate::{ error::HotShotError, message::Proposal, simple_certificate::{ - DACertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, + DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, }, traits::{ @@ -51,7 +51,7 @@ pub struct Consensus { /// All the DA certs we've received for current and future views. /// view -> DA cert - saved_da_certs: HashMap>, + saved_da_certs: HashMap>, /// View number that is currently on. cur_view: TYPES::Time, @@ -337,7 +337,7 @@ impl Consensus { } /// Get the saved DA certs. - pub fn saved_da_certs(&self) -> &HashMap> { + pub fn saved_da_certs(&self) -> &HashMap> { &self.saved_da_certs } @@ -446,7 +446,7 @@ impl Consensus { } /// Add a new entry to the da_certs map. - pub fn update_saved_da_certs(&mut self, view_number: TYPES::Time, cert: DACertificate) { + pub fn update_saved_da_certs(&mut self, view_number: TYPES::Time, cert: DaCertificate) { self.saved_da_certs.insert(view_number, cert); } diff --git a/types/src/data.rs b/types/src/data.rs index fbe6b627d5..fd38ae20d2 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -113,7 +113,7 @@ impl std::ops::Sub for ViewNumber { /// A proposal to start providing data availability for a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -pub struct DAProposal { +pub struct DaProposal { /// Encoded transactions in the block to be applied. pub encoded_transactions: Arc<[u8]>, /// Metadata of the block to be applied. @@ -137,7 +137,7 @@ where /// VID dispersal data /// -/// Like [`DAProposal`]. +/// Like [`DaProposal`]. /// /// TODO move to vid.rs? #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] @@ -318,7 +318,7 @@ pub struct QuorumProposal { pub proposal_certificate: Option>, } -impl HasViewNumber for DAProposal { +impl HasViewNumber for DaProposal { fn get_view_number(&self) -> TYPES::Time { self.view_number } diff --git a/types/src/event.rs b/types/src/event.rs index a66a1559a7..44dcda5de6 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use serde::{Deserialize, Serialize}; use crate::{ - data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, + data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, error::HotShotError, message::Proposal, simple_certificate::QuorumCertificate, @@ -146,9 +146,9 @@ pub enum EventType { }, /// DA proposal was received from the network /// or submitted to the network by us - DAProposal { + DaProposal { /// Contents of the proposal - proposal: Proposal>, + proposal: Proposal>, /// Public key of the leader submitting the proposal sender: TYPES::SignatureKey, }, @@ -179,7 +179,7 @@ pub enum HotShotAction { /// DA proposal was sent DAPropose, /// DA vote was sent - DAVote, + DaVote, /// DA certificate was sent DACert, /// VID shares were sent diff --git a/types/src/message.rs b/types/src/message.rs index 42d2e470f1..1499b25b36 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -12,13 +12,13 @@ use derivative::Derivative; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ - data::{DAProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, + data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, simple_certificate::{ - DACertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, + DaCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DAVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + DaVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{ @@ -184,20 +184,20 @@ pub enum GeneralConsensusMessage { #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] #[serde(bound(deserialize = "", serialize = ""))] /// Messages related to the sequencing consensus protocol for the DA committee. -pub enum CommitteeConsensusMessage { +pub enum DaConsensusMessage { /// Proposal for data availability committee - DAProposal(Proposal>), + DaProposal(Proposal>), /// vote for data availability committee - DAVote(DAVote), + DaVote(DaVote), /// Certificate data is available - DACertificate(DACertificate), + DaCertificate(DaCertificate), /// Initiate VID dispersal. /// - /// Like [`DAProposal`]. Use `Msg` suffix to distinguish from `VidDisperse`. - /// TODO this variant should not be a [`CommitteeConsensusMessage`] because + /// Like [`DaProposal`]. Use `Msg` suffix to distinguish from `VidDisperse`. + /// TODO this variant should not be a [`DaConsensusMessage`] because VidDisperseMsg(Proposal>), } @@ -209,7 +209,7 @@ pub enum SequencingMessage { General(GeneralConsensusMessage), /// Messages related to the sequencing consensus protocol for the DA committee. - Committee(CommitteeConsensusMessage), + Da(DaConsensusMessage), } impl SequencingMessage { @@ -249,20 +249,16 @@ impl SequencingMessage { GeneralConsensusMessage::UpgradeVote(message) => message.get_view_number(), } } - SequencingMessage::Committee(committee_message) => { - match committee_message { - CommitteeConsensusMessage::DAProposal(p) => { + SequencingMessage::Da(da_message) => { + match da_message { + DaConsensusMessage::DaProposal(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.get_view_number() } - CommitteeConsensusMessage::DAVote(vote_message) => { - vote_message.get_view_number() - } - CommitteeConsensusMessage::DACertificate(cert) => cert.view_number, - CommitteeConsensusMessage::VidDisperseMsg(disperse) => { - disperse.data.get_view_number() - } + DaConsensusMessage::DaVote(vote_message) => vote_message.get_view_number(), + DaConsensusMessage::DaCertificate(cert) => cert.view_number, + DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.get_view_number(), } } } @@ -291,11 +287,11 @@ impl SequencingMessage { GeneralConsensusMessage::UpgradeProposal(_) => MessagePurpose::UpgradeProposal, GeneralConsensusMessage::UpgradeVote(_) => MessagePurpose::UpgradeVote, }, - SequencingMessage::Committee(committee_message) => match committee_message { - CommitteeConsensusMessage::DAProposal(_) => MessagePurpose::Proposal, - CommitteeConsensusMessage::DAVote(_) => MessagePurpose::Vote, - CommitteeConsensusMessage::DACertificate(_) => MessagePurpose::DAC, - CommitteeConsensusMessage::VidDisperseMsg(_) => MessagePurpose::VidDisperse, + SequencingMessage::Da(da_message) => match da_message { + DaConsensusMessage::DaProposal(_) => MessagePurpose::Proposal, + DaConsensusMessage::DaVote(_) => MessagePurpose::Vote, + DaConsensusMessage::DaCertificate(_) => MessagePurpose::DAC, + DaConsensusMessage::VidDisperseMsg(_) => MessagePurpose::VidDisperse, }, } } diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index fe18f9d599..92d1d2fef2 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -14,7 +14,7 @@ use serde::{Deserialize, Serialize}; use crate::{ data::serialize_signature2, simple_vote::{ - DAData, QuorumData, TimeoutData, UpgradeProposalData, ViewSyncCommitData, + DaData, QuorumData, TimeoutData, UpgradeProposalData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, }, traits::{ @@ -197,8 +197,8 @@ impl UpgradeCertificate { /// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` pub type QuorumCertificate = SimpleCertificate, SuccessThreshold>; -/// Type alias for a DA certificate over `DAData` -pub type DACertificate = SimpleCertificate; +/// Type alias for a DA certificate over `DaData` +pub type DaCertificate = SimpleCertificate; /// Type alias for a Timeout certificate over a view number pub type TimeoutCertificate = SimpleCertificate, SuccessThreshold>; /// Type alias for a `ViewSyncPreCommit` certificate over a view number diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 97ab6e398f..7fa71a27df 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -22,7 +22,7 @@ pub struct QuorumData { } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a DA vote. -pub struct DAData { +pub struct DaData { /// Commitment to a block payload pub payload_commit: VidCommitment, } @@ -178,7 +178,7 @@ impl Committable for TimeoutData { } } -impl Committable for DAData { +impl Committable for DaData { fn commit(&self) -> Commitment { committable::RawCommitmentBuilder::new("DA data") .var_size_bytes(self.payload_commit.as_ref()) @@ -248,7 +248,7 @@ impl = SimpleVote>; /// DA vote type alias -pub type DAVote = SimpleVote; +pub type DaVote = SimpleVote; /// Timeout Vote type alias pub type TimeoutVote = SimpleVote>; /// View Sync Commit Vote type alias diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 10a3f6965f..15e4e32d0c 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -91,7 +91,7 @@ pub enum TransmitType { /// broadcast the message to all Broadcast, /// broadcast to DA committee - DACommitteeBroadcast, + DaCommitteeBroadcast, } /// Error type for networking @@ -209,7 +209,7 @@ pub enum RequestKind { /// Request VID data by our key and the VID commitment VID(TYPES::Time, TYPES::SignatureKey), /// Request a DA proposal for a certain view - DAProposal(TYPES::Time), + DaProposal(TYPES::Time), } /// A response for a request. `SequencingMessage` is the same as other network messages diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 76945dad01..9a8504d7bb 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -48,7 +48,7 @@ pub trait NodeImplementation: type QuorumNetwork: ConnectedNetwork, TYPES::SignatureKey>; /// Network for those in the DA committee - type CommitteeNetwork: ConnectedNetwork, TYPES::SignatureKey>; + type DaNetwork: ConnectedNetwork, TYPES::SignatureKey>; /// Storage for DA layer interactions type Storage: Storage; @@ -98,7 +98,7 @@ where TYPES::ValidatedState: TestableState, TYPES::BlockPayload: TestableBlock, I::QuorumNetwork: TestableNetworkingImplementation, - I::CommitteeNetwork: TestableNetworkingImplementation, + I::DaNetwork: TestableNetworkingImplementation, { fn state_create_random_transaction( state: Option<&TYPES::ValidatedState>, diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 497b1b0857..4feb880e85 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -11,7 +11,7 @@ use async_trait::async_trait; use super::node_implementation::NodeType; use crate::{ consensus::{CommitmentMap, View}, - data::{DAProposal, Leaf, VidDisperseShare}, + data::{DaProposal, Leaf, VidDisperseShare}, event::HotShotAction, message::Proposal, simple_certificate::QuorumCertificate, @@ -23,7 +23,7 @@ pub trait Storage: Send + Sync + Clone { /// Add a proposal to the stored VID proposals. async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; /// Add a proposal to the stored DA proposals. - async fn append_da(&self, proposal: &Proposal>) -> Result<()>; + async fn append_da(&self, proposal: &Proposal>) -> Result<()>; /// Record a HotShotAction taken. async fn record_action(&self, view: TYPES::Time, action: HotShotAction) -> Result<()>; /// Update the current high QC in storage. diff --git a/types/src/utils.rs b/types/src/utils.rs index d6bb2a1eb3..73cacb7b47 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -32,7 +32,7 @@ pub enum ViewInner { /// Storing this state allows us to garbage collect blocks for views where a proposal is never /// made. This saves memory when a leader fails and subverts a DoS attack where malicious /// leaders repeatedly request availability for blocks that they never propose. - DA { + Da { /// Payload commitment to the available block. payload_commitment: VidCommitment, }, @@ -51,7 +51,7 @@ pub enum ViewInner { impl Clone for ViewInner { fn clone(&self) -> Self { match self { - Self::DA { payload_commitment } => Self::DA { + Self::Da { payload_commitment } => Self::Da { payload_commitment: *payload_commitment, }, Self::Leaf { leaf, state, delta } => Self::Leaf { @@ -118,7 +118,7 @@ impl ViewInner { /// return the underlying block paylod commitment if it exists #[must_use] pub fn get_payload_commitment(&self) -> Option { - if let Self::DA { payload_commitment } = self { + if let Self::Da { payload_commitment } = self { Some(*payload_commitment) } else { None diff --git a/types/src/vote.rs b/types/src/vote.rs index 53a470c912..b633eef058 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -14,7 +14,7 @@ use tracing::error; use crate::{ data::{Leaf, QuorumProposal, VidDisperseShare}, message::Proposal, - simple_certificate::{DACertificate, Threshold}, + simple_certificate::{DaCertificate, Threshold}, simple_vote::Voteable, traits::{ election::Membership, @@ -198,5 +198,5 @@ pub struct VoteDependencyData { pub disperse_share: Proposal>, /// The DA certificate. - pub da_cert: DACertificate, + pub da_cert: DaCertificate, } From a89d36509f31405326ac13d7605bcd2f40298af3 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Tue, 14 May 2024 16:27:10 -0400 Subject: [PATCH 1033/1393] Make metrics labels more compatible with Prometheus (#3158) * Add `MetricsFamily` trait to group related metrics that are partitioned by values of their labels. This gives us a way to create metrics that actually use Prometheus labels, a capability we haven't had until now. * Rename `Label` to `Text` to distinguish it from the Prometheus concept of a label * Change the way `Text` works. Prometheus doesn't support top-level key-value pairs with text values, so implementing the old `Label` trait for Prometheus, we were forced to hack it by putting the value in a comment. A better approach is to have a "text" metric where the value of the metric is the name (this can be implemented, e.g., as a gauge with a dummy value of 1). Additional key-value pairs can be added using families/labels as described above. * Remove dead code `Metrics` implementations in consensus and networking The motivation for this is a request from node operators to make the version metrics populated by the sequencer more parseable. For this, we only need a family with a single instance, whose label values contain information like git revision and whatnot. However, this implementation is more general, and will allow us to fully take advantage of Prometheus labels if we want to in the future. Examples of both the simple "version label" usage and a more complex use of labels can be found in the doc comments. --- hotshot/src/traits/networking.rs | 119 +---------------- types/src/consensus.rs | 119 +---------------- types/src/traits/metrics.rs | 221 +++++++++++++++++++++++++------ 3 files changed, 187 insertions(+), 272 deletions(-) diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index d0de703de1..05292465ad 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -10,13 +10,9 @@ pub mod libp2p_network; pub mod memory_network; /// The Push CDN network pub mod push_cdn_network; -use std::{ - collections::HashMap, - sync::{Arc, Mutex}, -}; use custom_debug::Debug; -use hotshot_types::traits::metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}; +use hotshot_types::traits::metrics::{Counter, Gauge, Metrics, NoMetrics}; pub use hotshot_types::traits::network::{ FailedToSerializeSnafu, NetworkError, NetworkReliability, }; @@ -43,119 +39,6 @@ pub struct NetworkingMetricsValue { // pub kademlia_buckets: Box, } -/// The wrapper with a string name for the networking metrics -#[derive(Clone, Debug)] -pub struct NetworkingMetrics { - /// a prefix which tracks the name of the metric - prefix: String, - /// a map of values - values: Arc>, -} - -/// the set of counters and gauges for the networking metrics -#[derive(Clone, Debug, Default)] -pub struct InnerNetworkingMetrics { - /// All the counters of the networking metrics - counters: HashMap, - /// All the gauges of the networking metrics - gauges: HashMap, - /// All the histograms of the networking metrics - histograms: HashMap>, - /// All the labels of the networking metrics - labels: HashMap, -} - -impl NetworkingMetrics { - /// For the creation and naming of gauge, counter, histogram and label. - pub fn sub(&self, name: String) -> Self { - let prefix = if self.prefix.is_empty() { - name - } else { - format!("{}-{name}", self.prefix) - }; - Self { - prefix, - values: Arc::clone(&self.values), - } - } -} - -impl Metrics for NetworkingMetrics { - fn create_counter(&self, label: String, _unit_label: Option) -> Box { - Box::new(self.sub(label)) - } - - fn create_gauge(&self, label: String, _unit_label: Option) -> Box { - Box::new(self.sub(label)) - } - - fn create_histogram(&self, label: String, _unit_label: Option) -> Box { - Box::new(self.sub(label)) - } - - fn create_label(&self, label: String) -> Box { - Box::new(self.sub(label)) - } - - fn subgroup(&self, subgroup_name: String) -> Box { - Box::new(self.sub(subgroup_name)) - } -} - -impl Counter for NetworkingMetrics { - fn add(&self, amount: usize) { - *self - .values - .lock() - .unwrap() - .counters - .entry(self.prefix.clone()) - .or_default() += amount; - } -} - -impl Gauge for NetworkingMetrics { - fn set(&self, amount: usize) { - *self - .values - .lock() - .unwrap() - .gauges - .entry(self.prefix.clone()) - .or_default() = amount; - } - fn update(&self, delta: i64) { - let mut values = self.values.lock().unwrap(); - let value = values.gauges.entry(self.prefix.clone()).or_default(); - let signed_value = i64::try_from(*value).unwrap_or(i64::MAX); - *value = usize::try_from(signed_value + delta).unwrap_or(0); - } -} - -impl Histogram for NetworkingMetrics { - fn add_point(&self, point: f64) { - self.values - .lock() - .unwrap() - .histograms - .entry(self.prefix.clone()) - .or_default() - .push(point); - } -} - -impl Label for NetworkingMetrics { - fn set(&self, value: String) { - *self - .values - .lock() - .unwrap() - .labels - .entry(self.prefix.clone()) - .or_default() = value; - } -} - impl NetworkingMetricsValue { /// Create a new instance of this [`NetworkingMetricsValue`] struct, setting all the counters and gauges #[must_use] diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 831757c054..0d05fbbf9b 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -2,12 +2,11 @@ use std::{ collections::{BTreeMap, HashMap}, - sync::{Arc, Mutex}, + sync::Arc, }; use anyhow::{ensure, Result}; use committable::{Commitment, Committable}; -use displaydoc::Display; use tracing::{debug, error}; pub use crate::utils::{View, ViewInner}; @@ -21,7 +20,7 @@ use crate::{ }, traits::{ block_contents::BuilderFee, - metrics::{Counter, Gauge, Histogram, Label, Metrics, NoMetrics}, + metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, node_implementation::NodeType, BlockPayload, ValidatedState, }, @@ -120,120 +119,6 @@ pub struct ConsensusMetricsValue { pub number_of_empty_blocks_proposed: Box, } -/// The wrapper with a string name for the networking metrics -#[derive(Clone, Debug)] -pub struct ConsensusMetrics { - /// a prefix which tracks the name of the metric - prefix: String, - /// a map of values - values: Arc>, -} - -/// the set of counters and gauges for the networking metrics -#[derive(Clone, Debug, Default, Display)] -pub struct InnerConsensusMetrics { - /// All the counters of the networking metrics - pub counters: HashMap, - /// All the gauges of the networking metrics - pub gauges: HashMap, - /// All the histograms of the networking metrics - pub histograms: HashMap>, - /// All the labels of the networking metrics - pub labels: HashMap, -} - -impl ConsensusMetrics { - #[must_use] - /// For the creation and naming of gauge, counter, histogram and label. - pub fn sub(&self, name: String) -> Self { - let prefix = if self.prefix.is_empty() { - name - } else { - format!("{}-{name}", self.prefix) - }; - Self { - prefix, - values: Arc::clone(&self.values), - } - } -} - -impl Metrics for ConsensusMetrics { - fn create_counter(&self, label: String, _unit_label: Option) -> Box { - Box::new(self.sub(label)) - } - - fn create_gauge(&self, label: String, _unit_label: Option) -> Box { - Box::new(self.sub(label)) - } - - fn create_histogram(&self, label: String, _unit_label: Option) -> Box { - Box::new(self.sub(label)) - } - - fn create_label(&self, label: String) -> Box { - Box::new(self.sub(label)) - } - - fn subgroup(&self, subgroup_name: String) -> Box { - Box::new(self.sub(subgroup_name)) - } -} - -impl Counter for ConsensusMetrics { - fn add(&self, amount: usize) { - *self - .values - .lock() - .unwrap() - .counters - .entry(self.prefix.clone()) - .or_default() += amount; - } -} - -impl Gauge for ConsensusMetrics { - fn set(&self, amount: usize) { - *self - .values - .lock() - .unwrap() - .gauges - .entry(self.prefix.clone()) - .or_default() = amount; - } - fn update(&self, delta: i64) { - let mut values = self.values.lock().unwrap(); - let value = values.gauges.entry(self.prefix.clone()).or_default(); - let signed_value = i64::try_from(*value).unwrap_or(i64::MAX); - *value = usize::try_from(signed_value + delta).unwrap_or(0); - } -} - -impl Histogram for ConsensusMetrics { - fn add_point(&self, point: f64) { - self.values - .lock() - .unwrap() - .histograms - .entry(self.prefix.clone()) - .or_default() - .push(point); - } -} - -impl Label for ConsensusMetrics { - fn set(&self, value: String) { - *self - .values - .lock() - .unwrap() - .labels - .entry(self.prefix.clone()) - .or_default() = value; - } -} - impl ConsensusMetricsValue { /// Create a new instance of this [`ConsensusMetricsValue`] struct, setting all the counters and gauges #[must_use] diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs index 685673615c..af2ed32dee 100644 --- a/types/src/traits/metrics.rs +++ b/types/src/traits/metrics.rs @@ -4,7 +4,7 @@ //! - [`Counter`]: an ever-increasing value (example usage: total bytes send/received) //! - [`Gauge`]: a value that store the latest value, and can go up and down (example usage: amount of users logged in) //! - [`Histogram`]: stores multiple float values based for a graph (example usage: CPU %) -//! - [`Label`]: Stores the last string (example usage: current version, network online/offline) +//! - text: stores a constant string in the collected metrics use std::fmt::Debug; @@ -15,22 +15,115 @@ pub trait Metrics: Send + Sync + DynClone + Debug { /// Create a [`Counter`] with an optional `unit_label`. /// /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" - fn create_counter(&self, label: String, unit_label: Option) -> Box; + fn create_counter(&self, name: String, unit_label: Option) -> Box; /// Create a [`Gauge`] with an optional `unit_label`. /// /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" - fn create_gauge(&self, label: String, unit_label: Option) -> Box; + fn create_gauge(&self, name: String, unit_label: Option) -> Box; /// Create a [`Histogram`] with an optional `unit_label`. /// /// The `unit_label` can be used to indicate what the unit of the value is, e.g. "kb" or "seconds" - fn create_histogram(&self, label: String, unit_label: Option) -> Box; - /// Create a [`Label`]. - fn create_label(&self, label: String) -> Box; + fn create_histogram(&self, name: String, unit_label: Option) -> Box; + + /// Create a text metric. + /// + /// Unlike other metrics, a textmetric does not have a value. It exists only to record a text + /// string in the collected metrics, and possibly to care other key-value pairs as part of a + /// [`TextFamily`]. Thus, the act of creating the text itself is sufficient to populate the text + /// in the collect metrics; no setter function needs to be called. + fn create_text(&self, name: String); + + /// Create a family of related counters, partitioned by their label values. + fn counter_family(&self, name: String, labels: Vec) -> Box; + + /// Create a family of related gauges, partitioned by their label values. + fn gauge_family(&self, name: String, labels: Vec) -> Box; + + /// Create a family of related histograms, partitioned by their label values. + fn histogram_family(&self, name: String, labels: Vec) -> Box; + + /// Create a family of related text metricx, partitioned by their label values. + fn text_family(&self, name: String, labels: Vec) -> Box; /// Create a subgroup with a specified prefix. fn subgroup(&self, subgroup_name: String) -> Box; } +/// A family of related metrics, partitioned by their label values. +/// +/// All metrics in a family have the same name. They are distinguished by a vector of strings +/// called labels. Each label has a name and a value, and each distinct vector of lable values +/// within a family acts like a distinct metric. +/// +/// The family object is used to instantiate individual metrics within the family via the +/// [`create`](Self::create) method. +/// +/// # Examples +/// +/// ## Counting HTTP requests, partitioned by method. +/// +/// ``` +/// # use hotshot_types::traits::metrics::{Metrics, MetricsFamily, Counter}; +/// # fn doc(_metrics: Box) { +/// let metrics: Box; +/// # metrics = _metrics; +/// let http_count = metrics.counter_family("http".into(), vec!["method".into()]); +/// let get_count = http_count.create(vec!["GET".into()]); +/// let post_count = http_count.create(vec!["POST".into()]); +/// +/// get_count.add(1); +/// post_count.add(2); +/// # } +/// ``` +/// +/// This creates Prometheus metrics like +/// ```text +/// http{method="GET"} 1 +/// http{method="POST"} 2 +/// ``` +/// +/// ## Using labels to store key-value text pairs. +/// +/// ``` +/// # use hotshot_types::traits::metrics::{Metrics, MetricsFamily}; +/// # fn doc(_metrics: Box) { +/// let metrics: Box; +/// # metrics = _metrics; +/// metrics +/// .text_family("version".into(), vec!["semver".into(), "rev".into()]) +/// .create(vec!["0.1.0".into(), "891c5baa5".into()]); +/// # } +/// ``` +/// +/// This creates Prometheus metrics like +/// ```text +/// version{semver="0.1.0", rev="891c5baa5"} 1 +/// ``` +pub trait MetricsFamily: Send + Sync + DynClone + Debug { + /// Instantiate a metric in this family with a specific label vector. + /// + /// The given values of `labels` are used to identify this metric within its family. It must + /// contain exactly one value for each label name defined when the family was created, in the + /// same order. + fn create(&self, labels: Vec) -> M; +} + +/// A family of related counters, partitioned by their label values. +pub trait CounterFamily: MetricsFamily> {} +impl>> CounterFamily for T {} + +/// A family of related gauges, partitioned by their label values. +pub trait GaugeFamily: MetricsFamily> {} +impl>> GaugeFamily for T {} + +/// A family of related histograms, partitioned by their label values. +pub trait HistogramFamily: MetricsFamily> {} +impl>> HistogramFamily for T {} + +/// A family of related text metrics, partitioned by their label values. +pub trait TextFamily: MetricsFamily<()> {} +impl> TextFamily for T {} + /// Use this if you're not planning to use any metrics. All methods are implemented as a no-op #[derive(Clone, Copy, Debug, Default)] pub struct NoMetrics; @@ -56,7 +149,21 @@ impl Metrics for NoMetrics { Box::new(NoMetrics) } - fn create_label(&self, _: String) -> Box { + fn create_text(&self, _: String) {} + + fn counter_family(&self, _: String, _: Vec) -> Box { + Box::new(NoMetrics) + } + + fn gauge_family(&self, _: String, _: Vec) -> Box { + Box::new(NoMetrics) + } + + fn histogram_family(&self, _: String, _: Vec) -> Box { + Box::new(NoMetrics) + } + + fn text_family(&self, _: String, _: Vec) -> Box { Box::new(NoMetrics) } @@ -75,8 +182,23 @@ impl Gauge for NoMetrics { impl Histogram for NoMetrics { fn add_point(&self, _: f64) {} } -impl Label for NoMetrics { - fn set(&self, _: String) {} +impl MetricsFamily> for NoMetrics { + fn create(&self, _: Vec) -> Box { + Box::new(NoMetrics) + } +} +impl MetricsFamily> for NoMetrics { + fn create(&self, _: Vec) -> Box { + Box::new(NoMetrics) + } +} +impl MetricsFamily> for NoMetrics { + fn create(&self, _: Vec) -> Box { + Box::new(NoMetrics) + } +} +impl MetricsFamily<()> for NoMetrics { + fn create(&self, _: Vec) {} } /// An ever-incrementing counter @@ -99,16 +221,10 @@ pub trait Histogram: Send + Sync + Debug + DynClone { fn add_point(&self, point: f64); } -/// A label that stores the last string value. -pub trait Label: Send + Sync + DynClone { - /// Set the label value - fn set(&self, value: String); -} dyn_clone::clone_trait_object!(Metrics); dyn_clone::clone_trait_object!(Gauge); dyn_clone::clone_trait_object!(Counter); dyn_clone::clone_trait_object!(Histogram); -dyn_clone::clone_trait_object!(Label); #[cfg(test)] mod test { @@ -137,35 +253,55 @@ mod test { values: Arc::clone(&self.values), } } + + fn family(&self, labels: Vec) -> Self { + let mut curr = self.clone(); + for label in labels { + curr = curr.sub(label); + } + curr + } } impl Metrics for TestMetrics { fn create_counter( &self, - label: String, + name: String, _unit_label: Option, ) -> Box { - Box::new(self.sub(label)) + Box::new(self.sub(name)) } - fn create_gauge( - &self, - label: String, - _unit_label: Option, - ) -> Box { - Box::new(self.sub(label)) + fn create_gauge(&self, name: String, _unit_label: Option) -> Box { + Box::new(self.sub(name)) } fn create_histogram( &self, - label: String, + name: String, _unit_label: Option, ) -> Box { - Box::new(self.sub(label)) + Box::new(self.sub(name)) + } + + fn create_text(&self, name: String) { + self.create_gauge(name, None).set(1); + } + + fn counter_family(&self, name: String, _: Vec) -> Box { + Box::new(self.sub(name)) + } + + fn gauge_family(&self, name: String, _: Vec) -> Box { + Box::new(self.sub(name)) + } + + fn histogram_family(&self, name: String, _: Vec) -> Box { + Box::new(self.sub(name)) } - fn create_label(&self, label: String) -> Box { - Box::new(self.sub(label)) + fn text_family(&self, name: String, _: Vec) -> Box { + Box::new(self.sub(name)) } fn subgroup(&self, subgroup_name: String) -> Box { @@ -215,15 +351,27 @@ mod test { } } - impl Label for TestMetrics { - fn set(&self, value: String) { - *self - .values - .lock() - .unwrap() - .labels - .entry(self.prefix.clone()) - .or_default() = value; + impl MetricsFamily> for TestMetrics { + fn create(&self, labels: Vec) -> Box { + Box::new(self.family(labels)) + } + } + + impl MetricsFamily> for TestMetrics { + fn create(&self, labels: Vec) -> Box { + Box::new(self.family(labels)) + } + } + + impl MetricsFamily> for TestMetrics { + fn create(&self, labels: Vec) -> Box { + Box::new(self.family(labels)) + } + } + + impl MetricsFamily<()> for TestMetrics { + fn create(&self, labels: Vec) { + self.family(labels).set(1); } } @@ -232,7 +380,6 @@ mod test { counters: HashMap, gauges: HashMap, histograms: HashMap>, - labels: HashMap, } #[test] From ac0461a0cb13e3d0d7e170fd68b7076ba0941551 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 15 May 2024 15:36:00 +0200 Subject: [PATCH 1034/1393] Make BlockPayload EncodeBytes (#3119) --- example-types/src/block_types.rs | 67 ++++++++++++++++++++----- example-types/src/state_types.rs | 2 +- examples/infra/mod.rs | 13 ++--- hotshot/src/lib.rs | 9 +--- task-impls/src/transactions.rs | 13 +---- testing/src/block_builder.rs | 11 ++-- testing/src/task_helpers.rs | 8 +-- testing/src/view_generator.rs | 4 +- testing/tests/tests_1/block_builder.rs | 2 +- testing/tests/tests_1/da_task.rs | 10 ++-- testing/tests/tests_1/upgrade_task.rs | 4 +- testing/tests/tests_1/vid_task.rs | 4 +- testing/tests/tests_3/memory_network.rs | 42 ++++++++-------- types/src/data.rs | 8 ++- types/src/traits/block_contents.rs | 18 ++++--- 15 files changed, 121 insertions(+), 94 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 660f00f995..9cdf7f5ef4 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -17,38 +17,77 @@ use hotshot_types::{ }; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; +use snafu::Snafu; use time::OffsetDateTime; use crate::{node_types::TestTypes, state_types::TestInstanceState}; /// The transaction in a [`TestBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] -pub struct TestTransaction(pub Vec); +#[serde(try_from = "Vec")] +pub struct TestTransaction(Vec); + +#[derive(Debug, Snafu)] +pub struct TransactionTooLong; + +impl TryFrom> for TestTransaction { + type Error = TransactionTooLong; + + fn try_from(value: Vec) -> Result { + Self::try_new(value).ok_or(TransactionTooLong) + } +} impl TestTransaction { + /// Construct new transaction + /// + /// # Panics + /// If `bytes.len()` > `u32::MAX` + pub fn new(bytes: Vec) -> Self { + Self::try_new(bytes).expect("Vector too long") + } + + /// Construct a new transaction. + /// Returns `None` if `bytes.len()` > `u32::MAX` + /// for cross-platform compatibility + pub fn try_new(bytes: Vec) -> Option { + if u32::try_from(bytes.len()).is_err() { + None + } else { + Some(Self(bytes)) + } + } + + /// Get reference to raw bytes of transaction + pub fn bytes(&self) -> &Vec { + &self.0 + } + + /// Convert transaction to raw vector of bytes + pub fn into_bytes(self) -> Vec { + self.0 + } + /// Encode a list of transactions into bytes. /// /// # Errors /// If the transaction length conversion fails. - pub fn encode(transactions: &[Self]) -> Result, BlockError> { + pub fn encode(transactions: &[Self]) -> Vec { let mut encoded = Vec::new(); for txn in transactions { // The transaction length is converted from `usize` to `u32` to ensure consistent // number of bytes on different platforms. - let txn_size = match u32::try_from(txn.0.len()) { - Ok(len) => len.to_le_bytes(), - Err(_) => { - return Err(BlockError::InvalidTransactionLength); - } - }; + let txn_size = u32::try_from(txn.0.len()) + .expect("Invalid transaction length") + .to_le_bytes(); // Concatenate the bytes of the transaction size and the transaction itself. encoded.extend(txn_size); encoded.extend(&txn.0); } - Ok(encoded) + encoded } } @@ -113,6 +152,12 @@ impl EncodeBytes for TestMetadata { } } +impl EncodeBytes for TestBlockPayload { + fn encode(&self) -> Arc<[u8]> { + TestTransaction::encode(&self.transactions).into() + } +} + impl BlockPayload for TestBlockPayload { type Error = BlockError; type Instance = TestInstanceState; @@ -157,10 +202,6 @@ impl BlockPayload for TestBlockPayload { (Self::genesis(), TestMetadata) } - fn encode(&self) -> Result, Self::Error> { - TestTransaction::encode(&self.transactions).map(Arc::from) - } - fn builder_commitment(&self, _metadata: &Self::Metadata) -> BuilderCommitment { let mut digest = sha2::Sha256::new(); for txn in &self.transactions { diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 0a68d9a602..43d097e825 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -107,7 +107,7 @@ impl> TestableState for ) -> ::Transaction { /// clippy appeasement for `RANDOM_TX_BASE_SIZE` const RANDOM_TX_BASE_SIZE: usize = 8; - TestTransaction(vec![ + TestTransaction::new(vec![ 0; RANDOM_TX_BASE_SIZE + usize::try_from(padding).unwrap() ]) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 6a62d1cc01..1c6cf9c649 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -472,7 +472,7 @@ pub trait RunDa< .get_transactions(leaf.get_block_header().metadata()) { let restored_timestamp_vec = - tx.0[tx.0.len() - 8..].to_vec(); + tx.bytes()[tx.bytes().len() - 8..].to_vec(); let restored_timestamp = i64::from_be_bytes( restored_timestamp_vec.as_slice().try_into().unwrap(), ); @@ -495,12 +495,12 @@ pub trait RunDa< for _ in 0..transactions_to_send_per_round { // append current timestamp to the tx to calc latency let timestamp = Utc::now().timestamp(); - let mut tx = transactions.remove(0).0; + let mut tx = transactions.remove(0).into_bytes(); let mut timestamp_vec = timestamp.to_be_bytes().to_vec(); tx.append(&mut timestamp_vec); () = context - .submit_transaction(TestTransaction(tx)) + .submit_transaction(TestTransaction::new(tx)) .await .unwrap(); total_transactions_sent += 1; @@ -980,7 +980,7 @@ pub async fn main_entry_point< for round in 0..rounds { for _ in 0..transactions_to_send_per_round { - let mut txn = ::create_random_transaction( + let txn = ::create_random_transaction( None, &mut txn_rng, transaction_size as u64, @@ -988,9 +988,10 @@ pub async fn main_entry_point< // prepend destined view number to transaction let view_execute_number: u64 = round as u64 + 4; - txn.0[0..8].copy_from_slice(&view_execute_number.to_be_bytes()); + let mut bytes = txn.into_bytes(); + bytes[0..8].copy_from_slice(&view_execute_number.to_be_bytes()); - transactions.push(txn); + transactions.push(TestTransaction::new(bytes)); } } if let NetworkConfigSource::Orchestrator = source { diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 028d5402a2..891849662f 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -45,7 +45,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, states::ValidatedState, - BlockPayload, + EncodeBytes, }, HotShotConfig, }; @@ -257,12 +257,7 @@ impl> SystemContext { saved_leaves.insert(leaf.commit(), leaf.clone()); } if let Some(payload) = anchored_leaf.get_block_payload() { - let encoded_txns = match payload.encode() { - Ok(encoded) => encoded, - Err(e) => { - return Err(HotShotError::BlockError { source: e }); - } - }; + let encoded_txns = payload.encode(); saved_payloads.insert(anchored_leaf.get_view_number(), Arc::clone(&encoded_txns)); } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 65509a43c1..fd853ac8ce 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -16,7 +16,7 @@ use hotshot_types::{ data::{null_block, Leaf}, event::{Event, EventType}, traits::{ - block_contents::{precompute_vid_commitment, BuilderFee}, + block_contents::{precompute_vid_commitment, BuilderFee, EncodeBytes}, consensus_api::ConsensusApi, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -135,18 +135,9 @@ impl< block_header, }) = self.wait_for_block().await { - // send the sequenced transactions to VID and DA tasks - let encoded_transactions = match block_data.block_payload.encode() { - Ok(encoded) => encoded, - Err(e) => { - error!("Failed to encode the block payload: {:?}.", e); - return None; - } - }; - broadcast_event( Arc::new(HotShotEvent::BlockRecv( - encoded_transactions, + block_data.block_payload.encode(), block_data.metadata, block_view, BuilderFee { diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 20dcb77a38..74598e89c3 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -25,7 +25,7 @@ use hotshot_orchestrator::config::RandomBuilderConfig; use hotshot_types::{ constants::{Version01, STATIC_VER_0_1}, traits::{ - block_contents::{precompute_vid_commitment, BlockHeader}, + block_contents::{precompute_vid_commitment, BlockHeader, EncodeBytes}, node_implementation::NodeType, signature_key::BuilderSignatureKey, }, @@ -167,7 +167,7 @@ where .expect("We are NOT running on a 16-bit platform") ]; rng.fill_bytes(&mut bytes); - TestTransaction(bytes) + TestTransaction::new(bytes) }) .collect(); @@ -568,13 +568,10 @@ where let commitment = block_payload.builder_commitment(&metadata); let (vid_commitment, precompute_data) = - precompute_vid_commitment(&block_payload.encode().unwrap(), num_storage_nodes); + precompute_vid_commitment(&block_payload.encode(), num_storage_nodes); // Get block size from the encoded payload - let block_size = block_payload - .encode() - .expect("failed to encode block") - .len() as u64; + let block_size = block_payload.encode().len() as u64; let signature_over_block_info = TYPES::BuilderSignatureKey::sign_block_info(&priv_key, block_size, 123, &commitment) diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index 967762fcc8..e4c4a2ae9c 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -222,7 +222,7 @@ pub fn vid_payload_commitment( transactions: Vec, ) -> VidCommitment { let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); - let encoded_transactions = TestTransaction::encode(&transactions).unwrap(); + let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); vid_disperse.commit @@ -232,7 +232,7 @@ pub fn da_payload_commitment( quorum_membership: &::Membership, transactions: Vec, ) -> VidCommitment { - let encoded_transactions = TestTransaction::encode(&transactions).unwrap(); + let encoded_transactions = TestTransaction::encode(&transactions); vid_commitment(&encoded_transactions, quorum_membership.total_nodes()) } @@ -245,7 +245,7 @@ pub fn build_vid_proposal( private_key: &::PrivateKey, ) -> Vec>> { let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); - let encoded_transactions = TestTransaction::encode(&transactions).unwrap(); + let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = VidDisperse::from_membership( view_number, @@ -271,7 +271,7 @@ pub fn build_da_certificate( public_key: &::SignatureKey, private_key: &::PrivateKey, ) -> DaCertificate { - let encoded_transactions = TestTransaction::encode(&transactions).unwrap(); + let encoded_transactions = TestTransaction::encode(&transactions); let da_payload_commitment = vid_commitment(&encoded_transactions, quorum_membership.total_nodes()); diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 3d09029ac5..11a5e4cd4e 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -102,7 +102,7 @@ impl TestView { proposal_certificate: None, }; - let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); + let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let encoded_transactions_hash = Sha256::digest(&encoded_transactions); let block_payload_signature = ::SignatureKey::sign(&private_key, &encoded_transactions_hash) @@ -308,7 +308,7 @@ impl TestView { _pd: PhantomData, }; - let encoded_transactions = Arc::from(TestTransaction::encode(transactions).unwrap()); + let encoded_transactions = Arc::from(TestTransaction::encode(transactions)); let encoded_transactions_hash = Sha256::digest(&encoded_transactions); let block_payload_signature = ::SignatureKey::sign(&private_key, &encoded_transactions_hash) diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 444c0977cc..cb4e96824f 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -87,7 +87,7 @@ async fn test_random_block_builder() { // Test claiming non-existent block let commitment_for_non_existent_block = TestBlockPayload { - transactions: vec![TestTransaction(vec![0; 1])], + transactions: vec![TestTransaction::new(vec![0; 1])], } .builder_commitment(&TestMetadata); let result = client diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index f2c8cea784..804ab2a153 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -34,8 +34,8 @@ async fn test_da_task() { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. - let transactions = vec![TestTransaction(vec![0])]; - let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); + let transactions = vec![TestTransaction::new(vec![0])]; + let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, handle.hotshot.memberships.quorum_membership.total_nodes(), @@ -57,7 +57,7 @@ async fn test_da_task() { vids.push(view.vid_proposal.clone()); } - generator.add_transactions(vec![TestTransaction(vec![0])]); + generator.add_transactions(vec![TestTransaction::new(vec![0])]); for view in (&mut generator).take(1) { proposals.push(view.da_proposal.clone()); @@ -116,8 +116,8 @@ async fn test_da_task_storage_failure() { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. - let transactions = vec![TestTransaction(vec![0])]; - let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); + let transactions = vec![TestTransaction::new(vec![0])]; + let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, handle.hotshot.memberships.quorum_membership.total_nodes(), diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 9f12408f58..a6ef590bf1 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -392,7 +392,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { } // We set the transactions to something not null for view 6, but we expect the node to emit a quorum proposal where they are still null. - generator.add_transactions(vec![TestTransaction(vec![0])]); + generator.add_transactions(vec![TestTransaction::new(vec![0])]); for view in (&mut generator).take(1) { proposals.push(view.quorum_proposal.clone()); @@ -404,7 +404,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { } // For view 7, we set the transactions to something not null. The node should fail to vote on this. - generator.add_transactions(vec![TestTransaction(vec![0])]); + generator.add_transactions(vec![TestTransaction::new(vec![0])]); for view in generator.take(1) { proposals.push(view.quorum_proposal.clone()); diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index ce8ab85cee..776548619a 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -36,11 +36,11 @@ async fn test_vid_task() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); - let transactions = vec![TestTransaction(vec![0])]; + let transactions = vec![TestTransaction::new(vec![0])]; let (payload, metadata) = TestBlockPayload::from_transactions(transactions.clone(), &TestInstanceState {}).unwrap(); let builder_commitment = payload.builder_commitment(&metadata); - let encoded_transactions = Arc::from(TestTransaction::encode(&transactions).unwrap()); + let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let (_, vid_precompute) = vid.commit_only_precompute(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 668b75e055..e6307df437 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -1,33 +1,33 @@ #![allow(clippy::panic)] -use std::collections::BTreeSet; -use std::sync::Arc; +use std::{collections::BTreeSet, sync::Arc}; use async_compatibility_layer::logging::setup_logging; -use hotshot::traits::election::static_committee::GeneralStaticCommittee; -use hotshot::traits::implementations::{MasterMap, MemoryNetwork, NetworkingMetricsValue}; -use hotshot::traits::NodeImplementation; -use hotshot::types::SignatureKey; -use hotshot_example_types::state_types::TestInstanceState; -use hotshot_example_types::storage_types::TestStorage; +use hotshot::{ + traits::{ + election::static_committee::GeneralStaticCommittee, + implementations::{MasterMap, MemoryNetwork, NetworkingMetricsValue}, + NodeImplementation, + }, + types::SignatureKey, +}; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::TestValidatedState, + state_types::{TestInstanceState, TestValidatedState}, + storage_types::TestStorage, }; -use hotshot_types::constants::STATIC_VER_0_1; -use hotshot_types::message::Message; -use hotshot_types::signature_key::{BLSPubKey, BuilderKey}; -use hotshot_types::traits::network::ConnectedNetwork; -use hotshot_types::traits::network::TestableNetworkingImplementation; -use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; use hotshot_types::{ + constants::STATIC_VER_0_1, data::ViewNumber, - message::{DataMessage, MessageKind}, + message::{DataMessage, Message, MessageKind}, + signature_key::{BLSPubKey, BuilderKey}, + traits::{ + network::{ConnectedNetwork, TestableNetworkingImplementation}, + node_implementation::{ConsensusTime, NodeType}, + }, }; -use rand::rngs::StdRng; -use rand::{RngCore, SeedableRng}; +use rand::{rngs::StdRng, RngCore, SeedableRng}; use serde::{Deserialize, Serialize}; -use tracing::instrument; -use tracing::trace; +use tracing::{instrument, trace}; #[derive( Copy, @@ -104,7 +104,7 @@ fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec::new(0), )), }; diff --git a/types/src/data.rs b/types/src/data.rs index fd38ae20d2..e07a83e3b9 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -30,7 +30,7 @@ use crate::{ simple_vote::{QuorumData, UpgradeProposalData}, traits::{ block_contents::{ - vid_commitment, BlockHeader, TestableBlock, GENESIS_VID_NUM_STORAGE_NODES, + vid_commitment, BlockHeader, EncodeBytes, TestableBlock, GENESIS_VID_NUM_STORAGE_NODES, }, election::Membership, node_implementation::{ConsensusTime, NodeType}, @@ -460,7 +460,7 @@ impl Leaf { let (payload, metadata) = TYPES::BlockPayload::from_transactions([], instance_state).unwrap(); let builder_commitment = payload.builder_commitment(&metadata); - let payload_bytes = payload.encode().expect("unable to encode genesis payload"); + let payload_bytes = payload.encode(); let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); @@ -535,9 +535,7 @@ impl Leaf { block_payload: TYPES::BlockPayload, num_storage_nodes: usize, ) -> Result<(), BlockError> { - let Ok(encoded_txns) = block_payload.encode() else { - return Err(BlockError::InvalidTransactionLength); - }; + let encoded_txns = block_payload.encode(); let commitment = vid_commitment(&encoded_txns, num_storage_nodes); if commitment != self.block_header.payload_commitment() { return Err(BlockError::InconsistentPayloadCommitment); diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 06d115162b..2e64666e49 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -44,7 +44,17 @@ pub trait Transaction: /// sent between threads, and can have a hash produced of it /// * Must be hashable pub trait BlockPayload: - Serialize + Clone + Debug + Display + Hash + PartialEq + Eq + Send + Sync + DeserializeOwned + Serialize + + Clone + + Debug + + Display + + Hash + + PartialEq + + Eq + + Send + + Sync + + DeserializeOwned + + EncodeBytes { /// The error type for this type of block type Error: Error + Debug + Send + Sync + Serialize + DeserializeOwned; @@ -80,12 +90,6 @@ pub trait BlockPayload: /// Build the genesis payload and metadata. fn genesis() -> (Self, Self::Metadata); - /// Encode the payload - /// - /// # Errors - /// If the transaction length conversion fails. - fn encode(&self) -> Result, Self::Error>; - /// List of transaction commitments. fn transaction_commitments( &self, From b2df6603821fd9822b648a34073bfeb4f596812e Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 16 May 2024 11:49:11 +0800 Subject: [PATCH 1035/1393] [CLEANUP] - Remove `get_` from all getters (#3154) * Remove get_ * Make output_event_stream private and revert changes to orchestrator * More naming fixes for orchestrator * Fix names --- builder-api/src/builder.rs | 10 +- builder-api/src/data_source.rs | 4 +- example-types/src/block_types.rs | 4 +- examples/infra/mod.rs | 52 ++++---- hotshot-qc/src/bit_vector_old.rs | 2 +- hotshot-stake-table/src/mt_based.rs | 18 +-- hotshot-stake-table/src/mt_based/internal.rs | 18 +-- hotshot-stake-table/src/vec_based.rs | 10 +- hotshot/src/lib.rs | 67 ++++------ hotshot/src/tasks/mod.rs | 2 +- hotshot/src/tasks/task_state.rs | 46 +++---- .../src/traits/election/static_committee.rs | 40 +++--- .../src/traits/networking/combined_network.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 6 +- hotshot/src/types/handle.rs | 44 +++--- .../src/network/behaviours/dht/mod.rs | 14 +- libp2p-networking/src/network/node.rs | 4 +- libp2p-networking/src/network/node/handle.rs | 8 +- libp2p-networking/tests/counter.rs | 10 +- orchestrator/src/client.rs | 3 +- orchestrator/src/config.rs | 4 +- orchestrator/src/lib.rs | 4 +- task-impls/src/builder.rs | 2 +- task-impls/src/consensus/helpers.rs | 125 +++++++++--------- task-impls/src/consensus/mod.rs | 91 ++++++------- task-impls/src/consensus/view_change.rs | 10 +- task-impls/src/da.rs | 27 ++-- task-impls/src/network.rs | 40 +++--- task-impls/src/quorum_proposal.rs | 4 +- task-impls/src/quorum_proposal_recv.rs | 16 +-- task-impls/src/quorum_vote.rs | 12 +- task-impls/src/request.rs | 4 +- task-impls/src/transactions.rs | 25 ++-- task-impls/src/upgrade.rs | 26 ++-- task-impls/src/vid.rs | 2 +- task-impls/src/view_sync.rs | 60 ++++----- task-impls/src/vote_collection.rs | 40 +++--- testing/src/block_builder.rs | 12 +- testing/src/overall_safety_task.rs | 8 +- testing/src/spinning_task.rs | 4 +- testing/src/task_helpers.rs | 18 +-- testing/src/test_builder.rs | 4 +- testing/src/test_runner.rs | 6 +- testing/src/txn_task.rs | 2 +- testing/src/view_generator.rs | 6 +- testing/tests/tests_1/block_builder.rs | 2 +- testing/tests/tests_1/consensus_task.rs | 24 ++-- testing/tests/tests_1/da_task.rs | 2 +- testing/tests/tests_1/proposal_ordering.rs | 8 +- testing/tests/tests_1/quorum_proposal_task.rs | 6 +- testing/tests/tests_1/quorum_vote_task.rs | 6 +- testing/tests/tests_1/upgrade_task.rs | 30 ++--- testing/tests/tests_1/vid_task.rs | 6 +- testing/tests/tests_3/memory_network.rs | 20 +-- types/src/consensus.rs | 26 ++-- types/src/data.rs | 57 ++++---- types/src/lib.rs | 6 +- types/src/message.rs | 44 +++--- types/src/signature_key.rs | 8 +- types/src/simple_certificate.rs | 10 +- types/src/simple_vote.rs | 10 +- types/src/stake_table.rs | 4 +- types/src/traits/block_contents.rs | 8 +- types/src/traits/election.rs | 12 +- types/src/traits/network.rs | 2 +- types/src/traits/node_implementation.rs | 2 +- types/src/traits/signature_key.rs | 10 +- types/src/utils.rs | 12 +- types/src/vote.rs | 40 +++--- 69 files changed, 602 insertions(+), 669 deletions(-) diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs index 6c58589bb0..4e7544c81d 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/builder.rs @@ -143,7 +143,7 @@ where let signature = try_extract_param(&req, "signature")?; let sender = try_extract_param(&req, "sender")?; state - .get_available_blocks(&hash, view_number, sender, &signature) + .available_blocks(&hash, view_number, sender, &signature) .await .context(BlockAvailableSnafu { resource: hash.to_string(), @@ -182,13 +182,7 @@ where .boxed() })? .get("builder_address", |_req, state| { - async move { - state - .get_builder_address() - .await - .context(BuilderAddressSnafu) - } - .boxed() + async move { state.builder_address().await.context(BuilderAddressSnafu) }.boxed() })?; Ok(api) } diff --git a/builder-api/src/data_source.rs b/builder-api/src/data_source.rs index 5dea99695b..eaa6f01860 100644 --- a/builder-api/src/data_source.rs +++ b/builder-api/src/data_source.rs @@ -14,7 +14,7 @@ use crate::{ #[async_trait] pub trait BuilderDataSource { /// To get the list of available blocks - async fn get_available_blocks( + async fn available_blocks( &self, for_parent: &VidCommitment, view_number: u64, @@ -41,7 +41,7 @@ pub trait BuilderDataSource { ) -> Result, BuildError>; /// To get the builder address - async fn get_builder_address(&self) -> Result; + async fn builder_address(&self) -> Result; } #[async_trait] diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 9cdf7f5ef4..a2d516355e 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -210,7 +210,7 @@ impl BlockPayload for TestBlockPayload { BuilderCommitment::from_raw_digest(digest.finalize()) } - fn get_transactions<'a>( + fn transactions<'a>( &'a self, _metadata: &'a Self::Metadata, ) -> impl 'a + Iterator { @@ -246,7 +246,7 @@ impl> Block _builder_fee: BuilderFee, _vid_common: VidCommon, ) -> Result { - let parent = parent_leaf.get_block_header(); + let parent = parent_leaf.block_header(); let mut timestamp = OffsetDateTime::now_utc().unix_timestamp() as u64; if timestamp < parent.timestamp { diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 1c6cf9c649..8cad0b0d4d 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -355,15 +355,15 @@ pub trait RunDa< let initializer = hotshot::HotShotInitializer::::from_genesis(TestInstanceState {}) .expect("Couldn't generate genesis block"); - let config = self.get_config(); + let config = self.config(); // Get KeyPair for certificate Aggregation let pk = config.config.my_own_validator_config.public_key.clone(); let sk = config.config.my_own_validator_config.private_key.clone(); let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); - let da_network = self.get_da_channel(); - let quorum_network = self.get_quorum_channel(); + let da_network = self.da_channel(); + let quorum_network = self.quorum_channel(); let networks_bundle = Networks { quorum_network: quorum_network.clone().into(), @@ -423,7 +423,7 @@ pub trait RunDa< node_index, start_delay_seconds, .. - } = self.get_config(); + } = self.config(); let mut total_transactions_committed = 0; let mut total_transactions_sent = 0; @@ -438,7 +438,7 @@ pub trait RunDa< info!("Starting HotShot example!"); let start = Instant::now(); - let mut event_stream = context.get_event_stream(); + let mut event_stream = context.event_stream(); let mut anchor_view: TYPES::Time = ::genesis(); let mut num_successful_commits = 0; @@ -464,12 +464,12 @@ pub trait RunDa< // this might be a obob if let Some(leaf_info) = leaf_chain.first() { let leaf = &leaf_info.leaf; - info!("Decide event for leaf: {}", *leaf.get_view_number()); + info!("Decide event for leaf: {}", *leaf.view_number()); // iterate all the decided transactions to calculate latency - if let Some(block_payload) = &leaf.get_block_payload() { - for tx in block_payload - .get_transactions(leaf.get_block_header().metadata()) + if let Some(block_payload) = &leaf.block_payload() { + for tx in + block_payload.transactions(leaf.block_header().metadata()) { let restored_timestamp_vec = tx.bytes()[tx.bytes().len() - 8..].to_vec(); @@ -486,9 +486,9 @@ pub trait RunDa< } } - let new_anchor = leaf.get_view_number(); + let new_anchor = leaf.view_number(); if new_anchor >= anchor_view { - anchor_view = leaf.get_view_number(); + anchor_view = leaf.view_number(); } // send transactions @@ -532,9 +532,9 @@ pub trait RunDa< } } } - let consensus_lock = context.hotshot.get_consensus(); + let consensus_lock = context.hotshot.consensus(); let consensus = consensus_lock.read().await; - let total_num_views = usize::try_from(consensus.locked_view().get_u64()).unwrap(); + let total_num_views = usize::try_from(consensus.locked_view().u64()).unwrap(); // `failed_num_views` could include uncommitted views let failed_num_views = total_num_views - num_successful_commits; // When posting to the orchestrator, note that the total number of views also include un-finalized views. @@ -570,13 +570,13 @@ pub trait RunDa< } /// Returns the da network for this run - fn get_da_channel(&self) -> DANET; + fn da_channel(&self) -> DANET; /// Returns the quorum network for this run - fn get_quorum_channel(&self) -> QUORUMNET; + fn quorum_channel(&self) -> QUORUMNET; /// Returns the config for this run - fn get_config(&self) -> NetworkConfig; + fn config(&self) -> NetworkConfig; } // Push CDN @@ -652,15 +652,15 @@ where } } - fn get_da_channel(&self) -> PushCdnNetwork { + fn da_channel(&self) -> PushCdnNetwork { self.da_channel.clone() } - fn get_quorum_channel(&self) -> PushCdnNetwork { + fn quorum_channel(&self) -> PushCdnNetwork { self.quorum_channel.clone() } - fn get_config(&self) -> NetworkConfig { + fn config(&self) -> NetworkConfig { self.config.clone() } } @@ -750,15 +750,15 @@ where } } - fn get_da_channel(&self) -> Libp2pNetwork, TYPES::SignatureKey> { + fn da_channel(&self) -> Libp2pNetwork, TYPES::SignatureKey> { self.da_channel.clone() } - fn get_quorum_channel(&self) -> Libp2pNetwork, TYPES::SignatureKey> { + fn quorum_channel(&self) -> Libp2pNetwork, TYPES::SignatureKey> { self.quorum_channel.clone() } - fn get_config(&self) -> NetworkConfig { + fn config(&self) -> NetworkConfig { self.config.clone() } } @@ -846,15 +846,15 @@ where } } - fn get_da_channel(&self) -> CombinedNetworks { + fn da_channel(&self) -> CombinedNetworks { self.da_channel.clone() } - fn get_quorum_channel(&self) -> CombinedNetworks { + fn quorum_channel(&self) -> CombinedNetworks { self.quorum_channel.clone() } - fn get_config(&self) -> NetworkConfig { + fn config(&self) -> NetworkConfig { self.config.clone() } } @@ -954,7 +954,7 @@ pub async fn main_entry_point< let hotshot = run.initialize_state_and_hotshot().await; if let Some(task) = builder_task { - task.start(Box::new(hotshot.get_event_stream())); + task.start(Box::new(hotshot.event_stream())); } // pre-generate transactions diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs index 7e59161dcc..328d047042 100644 --- a/hotshot-qc/src/bit_vector_old.rs +++ b/hotshot-qc/src/bit_vector_old.rs @@ -33,7 +33,7 @@ pub struct StakeTableEntry { } impl StakeTableEntryType for StakeTableEntry { - fn get_stake(&self) -> U256 { + fn stake(&self) -> U256 { self.stake_amount } } diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index f417e19672..b870478628 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -66,7 +66,7 @@ impl StakeTableScheme for StakeTable { } fn commitment(&self, version: SnapshotVersion) -> Result { - let root = Self::get_root(self, &version)?; + let root = Self::root(self, &version)?; Ok(MerkleCommitment::new( root.commitment(), self.height, @@ -75,12 +75,12 @@ impl StakeTableScheme for StakeTable { } fn total_stake(&self, version: SnapshotVersion) -> Result { - let root = Self::get_root(self, &version)?; + let root = Self::root(self, &version)?; Ok(root.total_stakes()) } fn len(&self, version: SnapshotVersion) -> Result { - let root = Self::get_root(self, &version)?; + let root = Self::root(self, &version)?; Ok(root.num_keys()) } @@ -89,7 +89,7 @@ impl StakeTableScheme for StakeTable { } fn lookup(&self, version: SnapshotVersion, key: &K) -> Result { - let root = Self::get_root(self, &version)?; + let root = Self::root(self, &version)?; match self.mapping.get(key) { Some(index) => { let branches = to_merkle_path(*index, self.height); @@ -104,7 +104,7 @@ impl StakeTableScheme for StakeTable { version: SnapshotVersion, key: &Self::Key, ) -> Result<(Self::Amount, Self::LookupProof), StakeTableError> { - let root = Self::get_root(self, &version)?; + let root = Self::root(self, &version)?; let proof = match self.mapping.get(key) { Some(index) => { @@ -113,7 +113,7 @@ impl StakeTableScheme for StakeTable { } None => Err(StakeTableError::KeyNotFound), }?; - let amount = *proof.get_value().ok_or(StakeTableError::KeyNotFound)?; + let amount = *proof.value().ok_or(StakeTableError::KeyNotFound)?; Ok((amount, proof)) } @@ -159,11 +159,11 @@ impl StakeTableScheme for StakeTable { let r = U512::from_big_endian(&bytes); let m = U512::from(self.last_epoch_start.total_stakes()); let pos: U256 = (r % m).try_into().unwrap(); // won't fail - self.last_epoch_start.get_key_by_stake(pos) + self.last_epoch_start.key_by_stake(pos) } fn try_iter(&self, version: SnapshotVersion) -> Result { - let root = Self::get_root(self, &version)?; + let root = Self::root(self, &version)?; Ok(internal::IntoIter::new(root)) } } @@ -182,7 +182,7 @@ impl StakeTable { } /// returns the root of stake table at `version` - fn get_root( + fn root( &self, version: &SnapshotVersion, ) -> Result>, StakeTableError> { diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index d426c2c9a0..50440cc4b3 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -103,7 +103,7 @@ impl MerkleProof { } /// Returns the public key of the associated stake table entry, if there's any. - pub fn get_key(&self) -> Option<&K> { + pub fn key(&self) -> Option<&K> { match self.path.first() { Some(MerklePathEntry::Leaf { key, value: _ }) => Some(key), _ => None, @@ -111,7 +111,7 @@ impl MerkleProof { } /// Returns the stake amount of the associated stake table entry, if there's any. - pub fn get_value(&self) -> Option<&U256> { + pub fn value(&self) -> Option<&U256> { match self.path.first() { Some(MerklePathEntry::Leaf { key: _, value }) => Some(value), _ => None, @@ -119,7 +119,7 @@ impl MerkleProof { } /// Returns the associated stake table entry, if there's any. - pub fn get_key_value(&self) -> Option<(&K, &U256)> { + pub fn key_value(&self) -> Option<(&K, &U256)> { match self.path.first() { Some(MerklePathEntry::Leaf { key, value }) => Some((key, value)), _ => None, @@ -317,7 +317,7 @@ impl PersistentMerkleNode { /// Imagine that the keys in this subtree is sorted, returns the first key such that /// the prefix sum of withholding stakes is greater or equal the given `stake_number`. /// Useful for key sampling weighted by withholding stakes - pub fn get_key_by_stake(&self, mut stake_number: U256) -> Option<(&K, &U256)> { + pub fn key_by_stake(&self, mut stake_number: U256) -> Option<(&K, &U256)> { if stake_number >= self.total_stakes() { None } else { @@ -334,7 +334,7 @@ impl PersistentMerkleNode { stake_number -= children[ptr].total_stakes(); ptr += 1; } - children[ptr].get_key_by_stake(stake_number) + children[ptr].key_by_stake(stake_number) } PersistentMerkleNode::Leaf { comm: _, @@ -663,14 +663,14 @@ mod tests { roots[i + 1].simple_lookup(height, &path[i]).unwrap() ); } - // test get_key_by_stake + // test key_by_stake keys.iter().enumerate().for_each(|(i, key)| { assert_eq!( key, roots .last() .unwrap() - .get_key_by_stake(U256::from(i as u64 * 100 + i as u64 + 1)) + .key_by_stake(U256::from(i as u64 * 100 + i as u64 + 1)) .unwrap() .0 ); @@ -680,8 +680,8 @@ mod tests { for i in 0..10 { let proof = roots.last().unwrap().lookup(height, &path[i]).unwrap(); assert_eq!(height, proof.tree_height()); - assert_eq!(&keys[i], proof.get_key().unwrap()); - assert_eq!(&U256::from(100), proof.get_value().unwrap()); + assert_eq!(&keys[i], proof.key().unwrap()); + assert_eq!(&U256::from(100), proof.value().unwrap()); assert_eq!( roots.last().unwrap().commitment(), proof.compute_root().unwrap() diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index a4e8426af3..78b9e326ee 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -144,7 +144,7 @@ where } fn len(&self, version: SnapshotVersion) -> Result { - Ok(self.get_version(&version)?.bls_keys.len()) + Ok(self.version(&version)?.bls_keys.len()) } fn contains_key(&self, key: &Self::Key) -> bool { @@ -156,7 +156,7 @@ where version: SnapshotVersion, key: &Self::Key, ) -> Result { - let table = self.get_version(&version)?; + let table = self.version(&version)?; let pos = self.lookup_pos(key)?; if pos >= table.bls_keys.len() { Err(StakeTableError::KeyNotFound) @@ -179,7 +179,7 @@ where version: SnapshotVersion, key: &Self::Key, ) -> Result<(Self::Amount, Self::Aux, Self::LookupProof), StakeTableError> { - let table = self.get_version(&version)?; + let table = self.version(&version)?; let pos = self.lookup_pos(key)?; if pos >= table.bls_keys.len() { Err(StakeTableError::KeyNotFound) @@ -228,7 +228,7 @@ where } fn try_iter(&self, version: SnapshotVersion) -> Result { - let table = self.get_version(&version)?; + let table = self.version(&version)?; let owned = (0..table.bls_keys.len()) .map(|i| { ( @@ -354,7 +354,7 @@ where } /// returns the snapshot version - fn get_version( + fn version( &self, version: &SnapshotVersion, ) -> Result<&StakeTableSnapshot, StakeTableError> { diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 891849662f..1592c8b996 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -145,10 +145,7 @@ pub struct SystemContext> { start_view: TYPES::Time, /// Access to the output event stream. - #[deprecated( - note = "please use the `get_event_stream` method on `SystemContextHandle` instead. This field will be made private in a future release of HotShot" - )] - pub output_event_stream: (Sender>, InactiveReceiver>), + output_event_stream: (Sender>, InactiveReceiver>), /// External event stream for communication with the application. pub(crate) external_event_stream: (Sender>, InactiveReceiver>), @@ -229,14 +226,14 @@ impl> SystemContext { let validated_state = match initializer.validated_state { Some(state) => state, None => Arc::new(TYPES::ValidatedState::from_header( - anchored_leaf.get_block_header(), + anchored_leaf.block_header(), )), }; // Insert the validated state to state map. let mut validated_state_map = BTreeMap::default(); validated_state_map.insert( - anchored_leaf.get_view_number(), + anchored_leaf.view_number(), View { view_inner: ViewInner::Leaf { leaf: anchored_leaf.commit(), @@ -256,19 +253,19 @@ impl> SystemContext { for leaf in initializer.undecided_leafs { saved_leaves.insert(leaf.commit(), leaf.clone()); } - if let Some(payload) = anchored_leaf.get_block_payload() { + if let Some(payload) = anchored_leaf.block_payload() { let encoded_txns = payload.encode(); - saved_payloads.insert(anchored_leaf.get_view_number(), Arc::clone(&encoded_txns)); + saved_payloads.insert(anchored_leaf.view_number(), Arc::clone(&encoded_txns)); } let consensus = Consensus::new( validated_state_map, - anchored_leaf.get_view_number(), - anchored_leaf.get_view_number(), + anchored_leaf.view_number(), + anchored_leaf.view_number(), // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 - anchored_leaf.get_view_number(), + anchored_leaf.view_number(), saved_leaves, saved_payloads, initializer.high_qc, @@ -327,12 +324,12 @@ impl> SystemContext { { // Some applications seem to expect a leaf decide event for the genesis leaf, // which contains only that leaf and nothing else. - if self.anchored_leaf.get_view_number() == TYPES::Time::genesis() { + if self.anchored_leaf.view_number() == TYPES::Time::genesis() { let (validated_state, state_delta) = TYPES::ValidatedState::genesis(&self.instance_state); broadcast_event( Event { - view_number: self.anchored_leaf.get_view_number(), + view_number: self.anchored_leaf.view_number(), event: EventType::Decide { leaf_chain: Arc::new(vec![LeafInfo::new( self.anchored_leaf.clone(), @@ -395,7 +392,7 @@ impl> SystemContext { sender: api.public_key.clone(), kind: MessageKind::from(message), }, - da_membership.get_whole_committee(view_number), + da_membership.whole_committee(view_number), STATIC_VER_0_1, ), api @@ -412,20 +409,20 @@ impl> SystemContext { /// Returns a copy of the consensus struct #[must_use] - pub fn get_consensus(&self) -> Arc>> { + pub fn consensus(&self) -> Arc>> { Arc::clone(&self.consensus) } /// Returns a copy of the instance state - pub fn get_instance_state(&self) -> Arc { + pub fn instance_state(&self) -> Arc { Arc::clone(&self.instance_state) } /// Returns a copy of the last decided leaf /// # Panics /// Panics if internal leaf for consensus is inconsistent - pub async fn get_decided_leaf(&self) -> Leaf { - self.consensus.read().await.get_decided_leaf() + pub async fn decided_leaf(&self) -> Leaf { + self.consensus.read().await.decided_leaf() } /// [Non-blocking] instantly returns a copy of the last decided leaf if @@ -434,18 +431,16 @@ impl> SystemContext { /// # Panics /// Panics if internal state for consensus is inconsistent #[must_use] - pub fn try_get_decided_leaf(&self) -> Option> { - self.consensus - .try_read() - .map(|guard| guard.get_decided_leaf()) + pub fn try_decided_leaf(&self) -> Option> { + self.consensus.try_read().map(|guard| guard.decided_leaf()) } /// Returns the last decided validated state. /// /// # Panics /// Panics if internal state for consensus is inconsistent - pub async fn get_decided_state(&self) -> Arc { - Arc::clone(&self.consensus.read().await.get_decided_state()) + pub async fn decided_state(&self) -> Arc { + Arc::clone(&self.consensus.read().await.decided_state()) } /// Get the validated state from a given `view`. @@ -453,10 +448,10 @@ impl> SystemContext { /// Returns the requested state, if the [`SystemContext`] is tracking this view. Consensus /// tracks views that have not yet been decided but could be in the future. This function may /// return [`None`] if the requested view has already been decided (but see - /// [`get_decided_state`](Self::get_decided_state)) or if there is no path for the requested + /// [`decided_state`](Self::decided_state)) or if there is no path for the requested /// view to ever be decided. - pub async fn get_state(&self, view: TYPES::Time) -> Option> { - self.consensus.read().await.get_state(view).cloned() + pub async fn state(&self, view: TYPES::Time) -> Option> { + self.consensus.read().await.state(view).cloned() } /// Initializes a new [`SystemContext`] and does the work of setting up all the background tasks @@ -510,18 +505,12 @@ impl> SystemContext { } /// return the timeout for a view for `self` #[must_use] - pub fn get_next_view_timeout(&self) -> u64 { + pub fn next_view_timeout(&self) -> u64 { self.config.next_view_timeout } } impl> SystemContext { - /// Get access to [`Consensus`] - #[must_use] - pub fn consensus(&self) -> &Arc>> { - &self.consensus - } - /// Spawn all tasks that operate on [`SystemContextHandle`]. /// /// For a list of which tasks are being spawned, see this module's documentation. @@ -587,7 +576,7 @@ impl> SystemContext { Arc::clone(&quorum_network), quorum_membership.clone(), network::quorum_filter, - Arc::clone(&handle.get_storage()), + Arc::clone(&handle.storage()), ) .await; add_network_event_task( @@ -597,7 +586,7 @@ impl> SystemContext { Arc::clone(&quorum_network), quorum_membership, network::upgrade_filter, - Arc::clone(&handle.get_storage()), + Arc::clone(&handle.storage()), ) .await; add_network_event_task( @@ -607,7 +596,7 @@ impl> SystemContext { Arc::clone(&da_network), da_membership, network::da_filter, - Arc::clone(&handle.get_storage()), + Arc::clone(&handle.storage()), ) .await; add_network_event_task( @@ -617,7 +606,7 @@ impl> SystemContext { Arc::clone(&quorum_network), view_sync_membership, network::view_sync_filter, - Arc::clone(&handle.get_storage()), + Arc::clone(&handle.storage()), ) .await; add_network_event_task( @@ -627,7 +616,7 @@ impl> SystemContext { Arc::clone(&quorum_network), vid_membership, network::vid_filter, - Arc::clone(&handle.get_storage()), + Arc::clone(&handle.storage()), ) .await; add_consensus_task( diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 2073982a5d..8273432108 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -67,7 +67,7 @@ pub async fn add_response_task>( handle: &SystemContextHandle, ) { let state = NetworkResponseState::::new( - handle.hotshot.get_consensus(), + handle.hotshot.consensus(), rx, handle.hotshot.memberships.quorum_membership.clone().into(), handle.public_key().clone(), diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 80089f2df0..cabecde622 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -40,8 +40,8 @@ impl, V: StaticVersionType> Create ) -> NetworkRequestState { NetworkRequestState { network: Arc::clone(&handle.hotshot.networks.quorum_network), - state: handle.hotshot.get_consensus(), - view: handle.get_cur_view().await, + state: handle.hotshot.consensus(), + view: handle.cur_view().await, delay: handle.hotshot.config.data_request_delay, da_membership: handle.hotshot.memberships.da_membership.clone(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone(), @@ -63,7 +63,7 @@ impl> CreateTaskState ) -> UpgradeTaskState> { UpgradeTaskState { api: handle.clone(), - cur_view: handle.get_cur_view().await, + cur_view: handle.cur_view().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), #[cfg(not(feature = "example-upgrade"))] @@ -87,8 +87,8 @@ impl> CreateTaskState ) -> VIDTaskState> { VIDTaskState { api: handle.clone(), - consensus: handle.hotshot.get_consensus(), - cur_view: handle.get_cur_view().await, + consensus: handle.hotshot.consensus(), + cur_view: handle.cur_view().await, vote_collector: None, network: Arc::clone(&handle.hotshot.networks.quorum_network), membership: handle.hotshot.memberships.vid_membership.clone().into(), @@ -108,11 +108,11 @@ impl> CreateTaskState ) -> DaTaskState> { DaTaskState { api: handle.clone(), - consensus: handle.hotshot.get_consensus(), + consensus: handle.hotshot.consensus(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), da_network: Arc::clone(&handle.hotshot.networks.da_network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - cur_view: handle.get_cur_view().await, + cur_view: handle.cur_view().await, vote_collector: None.into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -129,7 +129,7 @@ impl> CreateTaskState async fn create_from( handle: &SystemContextHandle, ) -> ViewSyncTaskState> { - let cur_view = handle.get_cur_view().await; + let cur_view = handle.cur_view().await; ViewSyncTaskState { current_view: cur_view, next_view: cur_view, @@ -165,13 +165,13 @@ impl, Ver: StaticVersionType> ) -> TransactionTaskState, Ver> { TransactionTaskState { api: handle.clone(), - consensus: handle.hotshot.get_consensus(), - cur_view: handle.get_cur_view().await, + consensus: handle.hotshot.consensus(), + cur_view: handle.cur_view().await, network: Arc::clone(&handle.hotshot.networks.quorum_network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), - instance_state: handle.hotshot.get_instance_state(), + instance_state: handle.hotshot.instance_state(), id: handle.hotshot.id, builder_client: BuilderClient::new(handle.hotshot.config.builder_url.clone()), } @@ -183,14 +183,14 @@ impl> CreateTaskState for ConsensusTaskState { async fn create_from(handle: &SystemContextHandle) -> ConsensusTaskState { - let consensus = handle.hotshot.get_consensus(); + let consensus = handle.hotshot.consensus(); ConsensusTaskState { consensus, - instance_state: handle.hotshot.get_instance_state(), + instance_state: handle.hotshot.instance_state(), timeout: handle.hotshot.config.next_view_timeout, round_start_delay: handle.hotshot.config.round_start_delay, - cur_view: handle.get_cur_view().await, + cur_view: handle.cur_view().await, payload_commitment_and_metadata: None, vote_collector: None.into(), timeout_vote_collector: None.into(), @@ -220,14 +220,14 @@ impl> CreateTaskState for QuorumVoteTaskState { async fn create_from(handle: &SystemContextHandle) -> QuorumVoteTaskState { - let consensus = handle.hotshot.get_consensus(); + let consensus = handle.hotshot.consensus(); QuorumVoteTaskState { public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), consensus, - instance_state: handle.hotshot.get_instance_state(), - latest_voted_view: handle.get_cur_view().await, + instance_state: handle.hotshot.instance_state(), + latest_voted_view: handle.cur_view().await, vote_dependencies: HashMap::new(), quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), da_network: Arc::clone(&handle.hotshot.networks.da_network), @@ -247,15 +247,15 @@ impl> CreateTaskState async fn create_from( handle: &SystemContextHandle, ) -> QuorumProposalTaskState { - let consensus = handle.hotshot.get_consensus(); + let consensus = handle.hotshot.consensus(); QuorumProposalTaskState { - latest_proposed_view: handle.get_cur_view().await, + latest_proposed_view: handle.cur_view().await, propose_dependencies: HashMap::new(), quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), da_network: Arc::clone(&handle.hotshot.networks.da_network), output_event_stream: handle.hotshot.external_event_stream.0.clone(), consensus, - instance_state: handle.hotshot.get_instance_state(), + instance_state: handle.hotshot.instance_state(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), @@ -276,12 +276,12 @@ impl> CreateTaskState async fn create_from( handle: &SystemContextHandle, ) -> QuorumProposalRecvTaskState { - let consensus = handle.hotshot.get_consensus(); + let consensus = handle.hotshot.consensus(); QuorumProposalRecvTaskState { public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), consensus, - cur_view: handle.get_cur_view().await, + cur_view: handle.cur_view().await, quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), @@ -294,7 +294,7 @@ impl> CreateTaskState proposal_cert: None, decided_upgrade_cert: None, spawned_tasks: BTreeMap::new(), - instance_state: handle.hotshot.get_instance_state(), + instance_state: handle.hotshot.instance_state(), id: handle.hotshot.id, } } diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 9195cbf7f7..00f6f55b60 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -56,7 +56,7 @@ where TYPES: NodeType, { /// Clone the public key and corresponding stake table for current elected committee - fn get_committee_qc_stake_table(&self) -> Vec { + fn committee_qc_stake_table(&self) -> Vec { self.committee_nodes_with_stake.clone() } @@ -65,16 +65,16 @@ where feature = "fixed-leader-election" )))] /// Index the vector of public keys with the current view number - fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { + fn leader(&self, view_number: TYPES::Time) -> PUBKEY { let index = usize::try_from(*view_number % self.all_nodes_with_stake.len() as u64).unwrap(); let res = self.all_nodes_with_stake[index].clone(); - TYPES::SignatureKey::get_public_key(&res) + TYPES::SignatureKey::public_key(&res) } #[cfg(feature = "fixed-leader-election")] /// Only get leader in fixed set /// Index the fixed vector (first fixed_leader_for_gpuvid element) of public keys with the current view number - fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { + fn leader(&self, view_number: TYPES::Time) -> PUBKEY { if self.fixed_leader_for_gpuvid <= 0 || self.fixed_leader_for_gpuvid > self.all_nodes_with_stake.len() { @@ -82,29 +82,29 @@ where } let index = usize::try_from(*view_number % self.fixed_leader_for_gpuvid as u64).unwrap(); let res = self.all_nodes_with_stake[index].clone(); - TYPES::SignatureKey::get_public_key(&res) + TYPES::SignatureKey::public_key(&res) } #[cfg(feature = "randomized-leader-election")] /// Index the vector of public keys with a random number generated using the current view number as a seed - fn get_leader(&self, view_number: TYPES::Time) -> PUBKEY { + fn leader(&self, view_number: TYPES::Time) -> PUBKEY { let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); let randomized_view_number: usize = rng.gen(); let index = randomized_view_number % self.nodes_with_stake.len(); let res = self.all_nodes_with_stake[index].clone(); - TYPES::SignatureKey::get_public_key(&res) + TYPES::SignatureKey::public_key(&res) } fn has_stake(&self, pub_key: &PUBKEY) -> bool { - let entry = pub_key.get_stake_table_entry(1u64); + let entry = pub_key.stake_table_entry(1u64); self.committee_nodes_with_stake.contains(&entry) } - fn get_stake( + fn stake( &self, pub_key: &::SignatureKey, ) -> Option<::StakeTableEntry> { - let entry = pub_key.get_stake_table_entry(1u64); + let entry = pub_key.stake_table_entry(1u64); if self.committee_nodes_with_stake.contains(&entry) { Some(entry) } else { @@ -125,17 +125,17 @@ where .iter() .map(|entry| entry.stake_table_entry.clone()) { - if entry.get_stake() > U256::from(0) { + if entry.stake() > U256::from(0) { // Positive stake committee_nodes_with_stake.push(entry); } else { // Zero stake - committee_nodes_without_stake.push(PUBKEY::get_public_key(&entry)); + committee_nodes_without_stake.push(PUBKEY::public_key(&entry)); } } // Retain all nodes with stake - all_nodes.retain(|entry| entry.stake_table_entry.get_stake() > U256::from(0)); + all_nodes.retain(|entry| entry.stake_table_entry.stake() > U256::from(0)); debug!( "Election Membership Size: {}", @@ -170,29 +170,29 @@ where NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64 * 9) / 10) + 1).unwrap() } - fn get_staked_committee( + fn staked_committee( &self, _view_number: ::Time, ) -> std::collections::BTreeSet<::SignatureKey> { self.committee_nodes_with_stake .iter() - .map(|node| ::SignatureKey::get_public_key(node)) + .map(|node| ::SignatureKey::public_key(node)) .collect() } - fn get_non_staked_committee( + fn non_staked_committee( &self, _view_number: ::Time, ) -> std::collections::BTreeSet<::SignatureKey> { self.committee_nodes_without_stake.iter().cloned().collect() } - fn get_whole_committee( + fn whole_committee( &self, view_number: ::Time, ) -> std::collections::BTreeSet<::SignatureKey> { - let mut committee = self.get_staked_committee(view_number); - committee.extend(self.get_non_staked_committee(view_number)); + let mut committee = self.staked_committee(view_number); + committee.extend(self.non_staked_committee(view_number)); committee } } @@ -208,7 +208,7 @@ where } #[allow(clippy::must_use_candidate)] /// get all the non-staked nodes - pub fn get_non_staked_nodes(&self) -> Vec { + pub fn non_staked_nodes(&self) -> Vec { self.committee_nodes_without_stake.clone() } } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 9893ed8f7b..50dfe13250 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -169,7 +169,7 @@ impl CombinedNetworks { self.delayed_tasks .write() .await - .entry(message.kind.get_view_number().get_u64()) + .entry(message.kind.view_number().u64()) .or_default() .push(async_spawn(async move { async_sleep(duration).await; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 27d0706298..23ee8f304d 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -403,12 +403,12 @@ impl Libp2pNetwork { // Make a node DA if it is under the staked committee size for node in config.config.known_da_nodes { - da_keys.insert(K::get_public_key(&node.stake_table_entry)); + da_keys.insert(K::public_key(&node.stake_table_entry)); } // Insert all known nodes into the set of all keys for node in config.config.known_nodes_with_stake { - all_keys.insert(K::get_public_key(&node.stake_table_entry)); + all_keys.insert(K::public_key(&node.stake_table_entry)); } Ok(Libp2pNetwork::new( @@ -1119,7 +1119,7 @@ impl ConnectedNetwork for Libp2p TYPES: NodeType + 'a, { let future_view = ::Time::new(view) + LOOK_AHEAD; - let future_leader = membership.get_leader(future_view); + let future_leader = membership.leader(future_view); let _ = self .queue_node_lookup(ViewNumber::new(*future_view), future_leader) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 3a1f7f44bd..a35fc43e76 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -47,7 +47,7 @@ pub struct SystemContextHandle> { impl + 'static> SystemContextHandle { /// obtains a stream to expose to the user - pub fn get_event_stream(&self) -> impl Stream> { + pub fn event_stream(&self) -> impl Stream> { self.output_event_stream.1.activate_cloned() } @@ -56,7 +56,7 @@ impl + 'static> SystemContextHandl /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper #[must_use] - pub fn get_event_stream_known_impl(&self) -> Receiver> { + pub fn event_stream_known_impl(&self) -> Receiver> { self.output_event_stream.1.activate_cloned() } @@ -66,7 +66,7 @@ impl + 'static> SystemContextHandl /// - type wrapper /// NOTE: this is only used for sanity checks in our tests #[must_use] - pub fn get_internal_event_stream_known_impl(&self) -> Receiver>> { + pub fn internal_event_stream_known_impl(&self) -> Receiver>> { self.internal_event_stream.1.activate_cloned() } @@ -74,8 +74,8 @@ impl + 'static> SystemContextHandl /// /// # Panics /// If the internal consensus is in an inconsistent state. - pub async fn get_decided_state(&self) -> Arc { - self.hotshot.get_decided_state().await + pub async fn decided_state(&self) -> Arc { + self.hotshot.decided_state().await } /// Get the validated state from a given `view`. @@ -83,18 +83,18 @@ impl + 'static> SystemContextHandl /// Returns the requested state, if the [`SystemContext`] is tracking this view. Consensus /// tracks views that have not yet been decided but could be in the future. This function may /// return [`None`] if the requested view has already been decided (but see - /// [`get_decided_state`](Self::get_decided_state)) or if there is no path for the requested + /// [`decided_state`](Self::decided_state)) or if there is no path for the requested /// view to ever be decided. - pub async fn get_state(&self, view: TYPES::Time) -> Option> { - self.hotshot.get_state(view).await + pub async fn state(&self, view: TYPES::Time) -> Option> { + self.hotshot.state(view).await } /// Get the last decided leaf of the [`SystemContext`] instance. /// /// # Panics /// If the internal consensus is in an inconsistent state. - pub async fn get_decided_leaf(&self) -> Leaf { - self.hotshot.get_decided_leaf().await + pub async fn decided_leaf(&self) -> Leaf { + self.hotshot.decided_leaf().await } /// Tries to get the most recent decided leaf, returning instantly @@ -103,8 +103,8 @@ impl + 'static> SystemContextHandl /// # Panics /// Panics if internal consensus is in an inconsistent state. #[must_use] - pub fn try_get_decided_leaf(&self) -> Option> { - self.hotshot.try_get_decided_leaf() + pub fn try_decided_leaf(&self) -> Option> { + self.hotshot.try_decided_leaf() } /// Submits a transaction to the backing [`SystemContext`] instance. @@ -124,8 +124,8 @@ impl + 'static> SystemContextHandl /// Get the underlying consensus state for this [`SystemContext`] #[must_use] - pub fn get_consensus(&self) -> Arc>> { - self.hotshot.get_consensus() + pub fn consensus(&self) -> Arc>> { + self.hotshot.consensus() } /// Block the underlying quorum (and DA) networking interfaces until node is @@ -158,36 +158,36 @@ impl + 'static> SystemContextHandl /// return the timeout for a view of the underlying `SystemContext` #[must_use] - pub fn get_next_view_timeout(&self) -> u64 { - self.hotshot.get_next_view_timeout() + pub fn next_view_timeout(&self) -> u64 { + self.hotshot.next_view_timeout() } - /// Wrapper for `HotShotConsensusApi`'s `get_leader` function + /// Wrapper for `HotShotConsensusApi`'s `leader` function #[allow(clippy::unused_async)] // async for API compatibility reasons - pub async fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + pub async fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { self.hotshot .memberships .quorum_membership - .get_leader(view_number) + .leader(view_number) } // Below is for testing only: /// Wrapper to get this node's public key #[cfg(feature = "hotshot-testing")] #[must_use] - pub fn get_public_key(&self) -> TYPES::SignatureKey { + pub fn public_key(&self) -> TYPES::SignatureKey { self.hotshot.public_key.clone() } /// Wrapper to get the view number this node is on. - pub async fn get_cur_view(&self) -> TYPES::Time { + pub async fn cur_view(&self) -> TYPES::Time { self.hotshot.consensus.read().await.cur_view() } /// Provides a reference to the underlying storage for this [`SystemContext`], allowing access to /// historical data #[must_use] - pub fn get_storage(&self) -> Arc> { + pub fn storage(&self) -> Arc> { Arc::clone(&self.storage) } } diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 25d65dc5af..9a1ae61303 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -42,7 +42,7 @@ pub struct DHTBehaviour { /// in progress queries for nearby peers pub in_progress_get_closest_peers: HashMap>, /// List of in-progress get requests - in_progress_get_record_queries: HashMap, + in_progress_record_queries: HashMap, /// List of in-progress put requests in_progress_put_record_queries: HashMap, /// State of bootstrapping @@ -101,7 +101,7 @@ impl DHTBehaviour { // Self { peer_id: pid, - in_progress_get_record_queries: HashMap::default(), + in_progress_record_queries: HashMap::default(), in_progress_put_record_queries: HashMap::default(), bootstrap_state: Bootstrap { state: State::NotStarted, @@ -132,7 +132,7 @@ impl DHTBehaviour { /// Get the replication factor for queries #[must_use] - pub fn get_replication_factor(&self) -> NonZeroUsize { + pub fn replication_factor(&self) -> NonZeroUsize { self.replication_factor } /// Publish a key/value to the kv store. @@ -147,7 +147,7 @@ impl DHTBehaviour { /// Value (serialized) is sent over `chan`, and if a value is not found, /// a [`crate::network::error::DHTError`] is sent instead. /// NOTE: noop if `retry_count` is 0 - pub fn get_record( + pub fn record( &mut self, key: Vec, chan: Sender>, @@ -180,7 +180,7 @@ impl DHTBehaviour { retry_count: retry_count - 1, records: HashMap::default(), }; - self.in_progress_get_record_queries.insert(qid, query); + self.in_progress_record_queries.insert(qid, query); } } @@ -225,7 +225,7 @@ impl DHTBehaviour { id: QueryId, mut last: bool, ) { - let num = match self.in_progress_get_record_queries.get_mut(&id) { + let num = match self.in_progress_record_queries.get_mut(&id) { Some(query) => match record_results { Ok(results) => match results { GetRecordOk::FoundRecord(record) => { @@ -273,7 +273,7 @@ impl DHTBehaviour { key, retry_count, records, - }) = self.in_progress_get_record_queries.remove(&id) + }) = self.in_progress_record_queries.remove(&id) { // if channel has been dropped, cancel request if notify.is_canceled() { diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 2ffd868161..917fba34f7 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -355,7 +355,7 @@ impl NetworkNode { let record = Record::new(query.key.clone(), query.value.clone()); match self.swarm.behaviour_mut().dht.put_record( record, - libp2p::kad::Quorum::N(self.dht_handler.get_replication_factor()), + libp2p::kad::Quorum::N(self.dht_handler.replication_factor()), ) { Err(e) => { // failed try again later @@ -433,7 +433,7 @@ impl NetworkNode { notify, retry_count, } => { - self.dht_handler.get_record( + self.dht_handler.record( key, notify, NonZeroUsize::new(NUM_REPLICATED_TO_TRUST).unwrap(), diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index c1537080ab..88cb63b834 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -242,7 +242,7 @@ impl NetworkNodeHandle { ) -> Result { // get record (from DHT) let pid = self - .get_record_timeout::(&key, dht_timeout, bind_version) + .record_timeout::(&key, dht_timeout, bind_version) .await?; // pid lookup for routing @@ -279,7 +279,7 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value - pub async fn get_record Deserialize<'a>, VER: StaticVersionType>( + pub async fn record Deserialize<'a>, VER: StaticVersionType>( &self, key: &impl Serialize, retry_count: u8, @@ -305,13 +305,13 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::TimeoutError`] when times out /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value - pub async fn get_record_timeout Deserialize<'a>, VER: StaticVersionType>( + pub async fn record_timeout Deserialize<'a>, VER: StaticVersionType>( &self, key: &impl Serialize, timeout: Duration, bind_version: VER, ) -> Result { - let result = async_timeout(timeout, self.get_record(key, 3, bind_version)).await; + let result = async_timeout(timeout, self.record(key, 3, bind_version)).await; match result { Err(e) => Err(e).context(TimeoutSnafu), Ok(r) => r, diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index e89ae4972d..d51530b0e2 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -55,7 +55,7 @@ pub enum CounterMessage { /// chooses one /// # Panics /// panics if handles is of length 0 -fn get_random_handle( +fn random_handle( handles: &[HandleWithState], rng: &mut dyn rand::RngCore, ) -> HandleWithState { @@ -208,7 +208,7 @@ async fn run_gossip_round( timeout_duration: Duration, ) -> Result<(), TestError> { let mut rng = rand::thread_rng(); - let msg_handle = get_random_handle(handles, &mut rng); + let msg_handle = random_handle(handles, &mut rng); msg_handle.state.modify(|s| *s = new_state).await; let mut futs = Vec::new(); @@ -351,7 +351,7 @@ async fn run_dht_rounds( let mut rng = rand::thread_rng(); for i in 0..num_rounds { debug!("begin round {}", i); - let msg_handle = get_random_handle(handles, &mut rng); + let msg_handle = random_handle(handles, &mut rng); let mut key = vec![0; DHT_KV_PADDING]; let inc_val = u8::try_from(starting_val + i).unwrap(); key.push(inc_val); @@ -369,7 +369,7 @@ async fn run_dht_rounds( for handle in handles { let result: Result, NetworkNodeHandleError> = handle .handle - .get_record_timeout(&key, timeout, STATIC_VER_0_1) + .record_timeout(&key, timeout, STATIC_VER_0_1) .await; match result { Err(e) => { @@ -416,7 +416,7 @@ async fn run_request_response_increment_all( timeout: Duration, ) { let mut rng = rand::thread_rng(); - let requestee_handle = get_random_handle(handles, &mut rng); + let requestee_handle = random_handle(handles, &mut rng); requestee_handle.state.modify(|s| *s += 1).await; info!("RR REQUESTEE IS {:?}", requestee_handle.handle.peer_id()); let mut futs = Vec::new(); diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 2544dc9179..5f42d57712 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -320,8 +320,7 @@ impl OrchestratorClient { .expect("failed to create multiaddress") }); - let pubkey: Vec = - PeerConfig::::to_bytes(&validator_config.get_public_config()).clone(); + let pubkey: Vec = PeerConfig::::to_bytes(&validator_config.public_config()).clone(); let da_requested: bool = validator_config.is_da; // Serialize our (possible) libp2p-specific data diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 738f3541ee..b8291b792d 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -716,11 +716,11 @@ impl Default for HotShotConfigFile { // Add to DA nodes based on index if node_id < staked_da_nodes as u64 { - known_da_nodes.push(cur_validator_config.get_public_config()); + known_da_nodes.push(cur_validator_config.public_config()); cur_validator_config.is_da = true; } - cur_validator_config.get_public_config() + cur_validator_config.public_config() }) .collect(); diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 93c99418fd..eda3b51ba9 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -106,7 +106,7 @@ impl OrchestratorState { /// get election type in use #[must_use] - pub fn get_election_type() -> String { + pub fn election_type() -> String { // leader is chosen in index order #[cfg(not(any( feature = "randomized-leader-election", @@ -134,7 +134,7 @@ impl OrchestratorState { transactions_per_round: self.config.transactions_per_round, transaction_size: self.bench_results.transaction_size_in_bytes, rounds: self.config.rounds, - leader_election_type: OrchestratorState::::get_election_type(), + leader_election_type: OrchestratorState::::election_type(), avg_latency_in_sec: self.bench_results.avg_latency_in_sec, minimum_latency_in_sec: self.bench_results.minimum_latency_in_sec, maximum_latency_in_sec: self.bench_results.maximum_latency_in_sec, diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index f1530593c6..d69748bc85 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -104,7 +104,7 @@ impl BuilderClient { /// # Errors /// - [`BuilderClientError::NotFound`] if blocks aren't available for this parent /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly - pub async fn get_available_blocks( + pub async fn available_blocks( &self, parent: VidCommitment, view_number: u64, diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 98e4ec9b74..aa8d7729ee 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -65,11 +65,11 @@ async fn validate_proposal_safety_and_liveness( sender: TYPES::SignatureKey, event_sender: Sender>, ) -> Result<()> { - let view = proposal.data.get_view_number(); + let view = proposal.data.view_number(); let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); ensure!( - proposed_leaf.get_parent_commitment() == parent_leaf.commit(), + proposed_leaf.parent_commitment() == parent_leaf.commit(), "Proposed leaf does not extend the parent leaf." ); @@ -96,18 +96,18 @@ async fn validate_proposal_safety_and_liveness( // Liveness check. let read_consensus = consensus.read().await; - let liveness_check = justify_qc.get_view_number() > read_consensus.locked_view(); + let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); // Safety check. // Check if proposal extends from the locked leaf. let outcome = read_consensus.visit_leaf_ancestors( - justify_qc.get_view_number(), + justify_qc.view_number(), Terminator::Inclusive(read_consensus.locked_view()), false, |leaf, _, _| { // if leaf view no == locked view no then we're done, report success by // returning true - leaf.get_view_number() != read_consensus.locked_view() + leaf.view_number() != read_consensus.locked_view() }, ); let safety_check = outcome.is_ok(); @@ -208,7 +208,7 @@ pub async fn create_and_send_proposal( }; let proposed_leaf = Leaf::from_quorum_proposal(&proposal); - if proposed_leaf.get_parent_commitment() != parent_leaf.commit() { + if proposed_leaf.parent_commitment() != parent_leaf.commit() { return; } @@ -226,7 +226,7 @@ pub async fn create_and_send_proposal( }; debug!( "Sending null proposal for view {:?}", - proposed_leaf.get_view_number(), + proposed_leaf.view_number(), ); if let Err(e) = consensus.write().await.update_last_proposed_view(view) { tracing::trace!("{e:?}"); @@ -253,21 +253,21 @@ pub fn validate_proposal_view_and_certs( quorum_membership: &Arc, timeout_membership: &Arc, ) -> Result<()> { - let view = proposal.data.get_view_number(); + let view = proposal.data.view_number(); ensure!( view >= cur_view, "Proposal is from an older view {:?}", proposal.data.clone() ); - let view_leader_key = quorum_membership.get_leader(view); + let view_leader_key = quorum_membership.leader(view); ensure!( view_leader_key == *sender, "Leader key does not match key in proposal" ); // Verify a timeout certificate OR a view sync certificate exists and is valid. - if proposal.data.justify_qc.get_view_number() != view - 1 { + if proposal.data.justify_qc.view_number() != view - 1 { let received_proposal_cert = proposal.data.proposal_certificate.clone().context(format!( "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", @@ -277,7 +277,7 @@ pub fn validate_proposal_view_and_certs( match received_proposal_cert { ViewChangeEvidence::Timeout(timeout_cert) => { ensure!( - timeout_cert.get_data().view == view - 1, + timeout_cert.date().view == view - 1, "Timeout certificate for view {} was not for the immediately preceding view", *view ); @@ -312,7 +312,7 @@ pub fn validate_proposal_view_and_certs( } /// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. -pub(crate) async fn get_parent_leaf_and_state( +pub(crate) async fn parent_leaf_and_state( cur_view: TYPES::Time, view: TYPES::Time, quorum_membership: Arc, @@ -320,27 +320,27 @@ pub(crate) async fn get_parent_leaf_and_state( consensus: Arc>>, ) -> Result<(Leaf, Arc<::ValidatedState>)> { ensure!( - quorum_membership.get_leader(view) == public_key, + quorum_membership.leader(view) == public_key, "Somehow we formed a QC but are not the leader for the next view {view:?}", ); let consensus = consensus.read().await; - let parent_view_number = &consensus.high_qc().get_view_number(); + let parent_view_number = &consensus.high_qc().view_number(); let parent_view = consensus.validated_state_map().get(parent_view_number).context( format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", **parent_view_number) )?; // Leaf hash in view inner does not match high qc hash - Why? - let (leaf_commitment, state) = parent_view.get_leaf_and_state().context( + let (leaf_commitment, state) = parent_view.leaf_and_state().context( format!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") )?; - if leaf_commitment != consensus.high_qc().get_data().leaf_commit { + if leaf_commitment != consensus.high_qc().date().leaf_commit { // NOTE: This happens on the genesis block debug!( "They don't equal: {:?} {:?}", leaf_commitment, - consensus.high_qc().get_data().leaf_commit + consensus.high_qc().date().leaf_commit ); } @@ -349,7 +349,7 @@ pub(crate) async fn get_parent_leaf_and_state( .get(&leaf_commitment) .context("Failed to find high QC of parent")?; - let reached_decided = leaf.get_view_number() == consensus.last_decided_view(); + let reached_decided = leaf.view_number() == consensus.last_decided_view(); let parent_leaf = leaf.clone(); let original_parent_hash = parent_leaf.commit(); let mut next_parent_hash = original_parent_hash; @@ -358,10 +358,10 @@ pub(crate) async fn get_parent_leaf_and_state( if !reached_decided { debug!("We have not reached decide from view {:?}", cur_view); while let Some(next_parent_leaf) = consensus.saved_leaves().get(&next_parent_hash) { - if next_parent_leaf.get_view_number() <= consensus.last_decided_view() { + if next_parent_leaf.view_number() <= consensus.last_decided_view() { break; } - next_parent_hash = next_parent_leaf.get_parent_commitment(); + next_parent_hash = next_parent_leaf.parent_commitment(); } // TODO do some sort of sanity check on the view number that it matches decided } @@ -385,7 +385,7 @@ pub(crate) async fn publish_proposal_from_upgrade_cert( delay: u64, instance_state: Arc, ) -> Result> { - let (parent_leaf, state) = get_parent_leaf_and_state( + let (parent_leaf, state) = parent_leaf_and_state( cur_view, view, Arc::clone(&quorum_membership), @@ -453,7 +453,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( proposal_cert: Option>, instance_state: Arc, ) -> Result> { - let (parent_leaf, state) = get_parent_leaf_and_state( + let (parent_leaf, state) = parent_leaf_and_state( cur_view, view, quorum_membership, @@ -470,7 +470,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( // Note: once we reach a point of potentially propose with our formed upgrade certificate, we will ALWAYS drop it. If we cannot immediately use it for whatever reason, we choose to discard it. // It is possible that multiple nodes form separate upgrade certificates for the some upgrade if we are not careful about voting. But this shouldn't bother us: the first leader to propose is the one whose certificate will be used. And if that fails to reach a decide for whatever reason, we may lose our own certificate, but something will likely have gone wrong there anyway. let mut proposal_upgrade_certificate = parent_leaf - .get_upgrade_certificate() + .upgrade_certificate() .or(formed_upgrade_certificate); if !proposal_upgrade_certificate @@ -604,8 +604,8 @@ pub async fn handle_quorum_proposal_recv { - if let (Some(state), _) = consensus_read.get_state_and_delta(leaf.get_view_number()) { + if let (Some(state), _) = consensus_read.state_and_delta(leaf.view_number()) { Some((leaf, Arc::clone(&state))) } else { bail!("Parent state not found! Consensus internally inconsistent"); @@ -647,7 +647,7 @@ pub async fn handle_quorum_proposal_recv None, }; - if justify_qc.get_view_number() > consensus_read.high_qc().view_number { + if justify_qc.view_number() > consensus_read.high_qc().view_number { if let Err(e) = task_state .storage .write() @@ -669,7 +669,7 @@ pub async fn handle_quorum_proposal_recv consensus_write.locked_view(); + let liveness_check = justify_qc.view_number() > consensus_write.locked_view(); let high_qc = consensus_write.high_qc().clone(); let locked_view = consensus_write.locked_view(); @@ -722,7 +722,7 @@ pub async fn handle_quorum_proposal_recv, ) -> Result<()> { let consensus = task_state.consensus.read().await; - let view = proposal.get_view_number(); + let view = proposal.view_number(); #[cfg(not(feature = "dependency-tasks"))] { task_state.current_proposal = Some(proposal.clone()); @@ -822,7 +822,7 @@ pub async fn handle_quorum_proposal_validated> ConsensusTaskState /// Validate the VID disperse is correctly signed and has the correct share. #[cfg(not(feature = "dependency-tasks"))] fn validate_disperse(&self, disperse: &Proposal>) -> bool { - let view = disperse.data.get_view_number(); + let view = disperse.data.view_number(); let payload_commitment = disperse.data.payload_commitment; // Check whether the data satisfies one of the following. @@ -171,14 +171,14 @@ impl> ConsensusTaskState // * Signed by one of the staked DA committee members. if !self .quorum_membership - .get_leader(view) + .leader(view) .validate(&disperse.signature, payload_commitment.as_ref()) && !self .public_key .validate(&disperse.signature, payload_commitment.as_ref()) { let mut validated = false; - for da_member in self.da_membership.get_staked_committee(view) { + for da_member in self.da_membership.staked_committee(view) { if da_member.validate(&disperse.signature, payload_commitment.as_ref()) { validated = true; break; @@ -248,7 +248,7 @@ impl> ConsensusTaskState let Some(proposal) = self.current_proposal.clone() else { return; }; - if proposal.get_view_number() != view { + if proposal.view_number() != view { return; } let upgrade = self.decided_upgrade_cert.clone(); @@ -285,12 +285,12 @@ impl> ConsensusTaskState match event.as_ref() { #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QuorumProposalRecv(proposal, sender) => { - debug!("proposal recv view: {:?}", proposal.data.get_view_number()); + debug!("proposal recv view: {:?}", proposal.data.view_number()); match handle_quorum_proposal_recv(proposal, sender, event_stream.clone(), self) .await { Ok(Some(current_proposal)) => { - let view = current_proposal.get_view_number(); + let view = current_proposal.view_number(); self.current_proposal = Some(current_proposal); self.spawn_vote_task(view, event_stream); } @@ -300,7 +300,7 @@ impl> ConsensusTaskState } #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QuorumProposalValidated(proposal, _) => { - debug!("proposal validated view: {:?}", proposal.get_view_number()); + debug!("proposal validated view: {:?}", proposal.view_number()); if let Err(e) = handle_quorum_proposal_validated(proposal, event_stream.clone(), self).await { @@ -308,30 +308,23 @@ impl> ConsensusTaskState } } HotShotEvent::QuorumVoteRecv(ref vote) => { - debug!("Received quorum vote: {:?}", vote.get_view_number()); - if self - .quorum_membership - .get_leader(vote.get_view_number() + 1) - != self.public_key - { + debug!("Received quorum vote: {:?}", vote.view_number()); + if self.quorum_membership.leader(vote.view_number() + 1) != self.public_key { error!( "We are not the leader for view {} are we the leader for view + 1? {}", - *vote.get_view_number() + 1, - self.quorum_membership - .get_leader(vote.get_view_number() + 2) - == self.public_key + *vote.view_number() + 1, + self.quorum_membership.leader(vote.view_number() + 2) == self.public_key ); return; } let mut collector = self.vote_collector.write().await; - if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view - { - debug!("Starting vote handle for view {:?}", vote.get_view_number()); + if collector.is_none() || vote.view_number() > collector.as_ref().unwrap().view { + debug!("Starting vote handle for view {:?}", vote.view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: Arc::clone(&self.quorum_membership), - view: vote.get_view_number(), + view: vote.view_number(), id: self.id, }; *collector = create_vote_accumulator::< @@ -355,29 +348,22 @@ impl> ConsensusTaskState } } HotShotEvent::TimeoutVoteRecv(ref vote) => { - if self - .timeout_membership - .get_leader(vote.get_view_number() + 1) - != self.public_key - { + if self.timeout_membership.leader(vote.view_number() + 1) != self.public_key { error!( "We are not the leader for view {} are we the leader for view + 1? {}", - *vote.get_view_number() + 1, - self.timeout_membership - .get_leader(vote.get_view_number() + 2) - == self.public_key + *vote.view_number() + 1, + self.timeout_membership.leader(vote.view_number() + 2) == self.public_key ); return; } let mut collector = self.timeout_vote_collector.write().await; - if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view - { - debug!("Starting vote handle for view {:?}", vote.get_view_number()); + if collector.is_none() || vote.view_number() > collector.as_ref().unwrap().view { + debug!("Starting vote handle for view {:?}", vote.view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: Arc::clone(&self.quorum_membership), - view: vote.get_view_number(), + view: vote.view_number(), id: self.id, }; *collector = create_vote_accumulator::< @@ -463,14 +449,14 @@ impl> ConsensusTaskState let Some(proposal) = self.current_proposal.clone() else { return; }; - if proposal.get_view_number() != view { + if proposal.view_number() != view { return; } self.spawn_vote_task(view, event_stream); } #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::VIDShareRecv(disperse) => { - let view = disperse.data.get_view_number(); + let view = disperse.data.view_number(); debug!( "VID disperse received for view: {:?} in consensus task", @@ -503,7 +489,7 @@ impl> ConsensusTaskState let Some(proposal) = self.current_proposal.clone() else { return; }; - if proposal.get_view_number() != view { + if proposal.view_number() != view { return; } self.spawn_vote_task(view, event_stream.clone()); @@ -637,8 +623,8 @@ impl> ConsensusTaskState fee: fee.clone(), block_view: view, }); - if self.quorum_membership.get_leader(view) == self.public_key - && self.consensus.read().await.high_qc().get_view_number() + 1 == view + if self.quorum_membership.leader(view) == self.public_key + && self.consensus.read().await.high_qc().view_number() + 1 == view { if let Err(e) = self.publish_proposal(view, event_stream.clone()).await { error!("Failed to propose; error = {e:?}"); @@ -653,7 +639,7 @@ impl> ConsensusTaskState } match cert { ViewChangeEvidence::Timeout(tc) => { - if self.quorum_membership.get_leader(tc.get_view_number() + 1) + if self.quorum_membership.leader(tc.view_number() + 1) == self.public_key { if let Err(e) = self.publish_proposal(view, event_stream).await { @@ -662,9 +648,7 @@ impl> ConsensusTaskState } } ViewChangeEvidence::ViewSync(vsc) => { - if self.quorum_membership.get_leader(vsc.get_view_number()) - == self.public_key - { + if self.quorum_membership.leader(vsc.view_number()) == self.public_key { if let Err(e) = self.publish_proposal(view, event_stream).await { debug!("Failed to propose; error = {e:?}"); }; @@ -678,14 +662,14 @@ impl> ConsensusTaskState if !certificate.is_valid_cert(self.quorum_membership.as_ref()) { error!( "View Sync Finalize certificate {:?} was invalid", - certificate.get_data() + certificate.date() ); return; } let view = certificate.view_number; - if self.quorum_membership.get_leader(view) == self.public_key { + if self.quorum_membership.leader(view) == self.public_key { self.proposal_cert = Some(ViewChangeEvidence::ViewSync(certificate.clone())); debug!( @@ -703,12 +687,11 @@ impl> ConsensusTaskState let Some(proposal) = self.current_proposal.clone() else { return; }; - let new_view = proposal.get_view_number() + 1; + let new_view = proposal.view_number() + 1; // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = self.quorum_membership.get_leader(new_view) == self.public_key - && self.consensus.read().await.high_qc().view_number - == proposal.get_view_number(); + let should_propose = self.quorum_membership.leader(new_view) == self.public_key + && self.consensus.read().await.high_qc().view_number == proposal.view_number(); if should_propose { debug!( @@ -719,7 +702,7 @@ impl> ConsensusTaskState debug!("failed to propose e = {:?}", e); } } - if proposal.get_view_number() <= vote.get_view_number() { + if proposal.view_number() <= vote.view_number() { self.current_proposal = None; } } @@ -727,16 +710,16 @@ impl> ConsensusTaskState if self .payload_commitment_and_metadata .as_ref() - .is_some_and(|p| p.block_view <= proposal.data.get_view_number()) + .is_some_and(|p| p.block_view <= proposal.data.view_number()) { self.payload_commitment_and_metadata = None; } if let Some(cert) = &self.proposal_cert { let view = match cert { - ViewChangeEvidence::Timeout(tc) => tc.get_view_number() + 1, - ViewChangeEvidence::ViewSync(vsc) => vsc.get_view_number(), + ViewChangeEvidence::Timeout(tc) => tc.view_number() + 1, + ViewChangeEvidence::ViewSync(vsc) => vsc.view_number(), }; - if view < proposal.data.get_view_number() { + if view < proposal.data.view_number() { self.proposal_cert = None; } } diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index 7bb37fc912..ec8a56b831 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -86,16 +86,16 @@ pub(crate) async fn update_view( consensus .metrics .current_view - .set(usize::try_from(cur_view.get_u64()).unwrap()); + .set(usize::try_from(cur_view.u64()).unwrap()); // Do the comparison before the subtraction to avoid potential overflow, since // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(cur_view.get_u64()).unwrap() - > usize::try_from(consensus.last_decided_view().get_u64()).unwrap() + if usize::try_from(cur_view.u64()).unwrap() + > usize::try_from(consensus.last_decided_view().u64()).unwrap() { consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(cur_view.get_u64()).unwrap() - - usize::try_from(consensus.last_decided_view().get_u64()).unwrap(), + usize::try_from(cur_view.u64()).unwrap() + - usize::try_from(consensus.last_decided_view().u64()).unwrap(), ); } let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index ce253ce3f3..4bcbbd5cf0 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -96,10 +96,10 @@ impl, A: ConsensusApi + let sender = sender.clone(); debug!( "DA proposal received for view: {:?}", - proposal.data.get_view_number() + proposal.data.view_number() ); // ED NOTE: Assuming that the next view leader is the one who sends DA proposal for this view - let view = proposal.data.get_view_number(); + let view = proposal.data.view_number(); // Allow a DA proposal that is one view older, in case we have voted on a quorum // proposal and updated the view. @@ -127,7 +127,7 @@ impl, A: ConsensusApi + let encoded_transactions_hash = Sha256::digest(&proposal.data.encoded_transactions); // ED Is this the right leader? - let view_leader_key = self.da_membership.get_leader(view); + let view_leader_key = self.da_membership.leader(view); if view_leader_key != sender { error!("DA proposal doesn't have expected leader key for view {} \n DA proposal is: {:?}", *view, proposal.data.clone()); return None; @@ -177,7 +177,7 @@ impl, A: ConsensusApi + #[cfg(async_executor_impl = "tokio")] let payload_commitment = payload_commitment.unwrap(); - let view = proposal.data.get_view_number(); + let view = proposal.data.view_number(); // Generate and send vote let Ok(vote) = DaVote::create_signed_vote( DaData { @@ -191,7 +191,7 @@ impl, A: ConsensusApi + return None; }; - debug!("Sending vote to the DA leader {:?}", vote.get_view_number()); + debug!("Sending vote to the DA leader {:?}", vote.view_number()); broadcast_event(Arc::new(HotShotEvent::DaVoteSend(vote)), &event_stream).await; let mut consensus = self.consensus.write().await; @@ -216,22 +216,21 @@ impl, A: ConsensusApi + } } HotShotEvent::DaVoteRecv(ref vote) => { - debug!("DA vote recv, Main Task {:?}", vote.get_view_number()); + debug!("DA vote recv, Main Task {:?}", vote.view_number()); // Check if we are the leader and the vote is from the sender. - let view = vote.get_view_number(); - if self.da_membership.get_leader(view) != self.public_key { - error!("We are not the DA committee leader for view {} are we leader for next view? {}", *view, self.da_membership.get_leader(view + 1) == self.public_key); + let view = vote.view_number(); + if self.da_membership.leader(view) != self.public_key { + error!("We are not the DA committee leader for view {} are we leader for next view? {}", *view, self.da_membership.leader(view + 1) == self.public_key); return None; } let mut collector = self.vote_collector.write().await; - if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view - { - debug!("Starting vote handle for view {:?}", vote.get_view_number()); + if collector.is_none() || vote.view_number() > collector.as_ref().unwrap().view { + debug!("Starting vote handle for view {:?}", vote.view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: Arc::clone(&self.da_membership), - view: vote.get_view_number(), + view: vote.view_number(), id: self.id, }; *collector = create_vote_accumulator::< @@ -266,7 +265,7 @@ impl, A: ConsensusApi + self.cur_view = view; // If we are not the next leader (DA leader for this view) immediately exit - if self.da_membership.get_leader(self.cur_view + 1) != self.public_key { + if self.da_membership.leader(self.cur_view + 1) != self.public_key { return None; } debug!("Polling for DA votes for view {}", *self.cur_view + 1); diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 64460d7b42..3d88afec1d 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -283,11 +283,11 @@ impl< HotShotEvent::QuorumVoteSend(vote) => { maybe_action = Some(HotShotAction::Vote); ( - vote.get_signing_key(), + vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote(vote.clone()), )), - TransmitType::Direct(membership.get_leader(vote.get_view_number() + 1)), + TransmitType::Direct(membership.leader(vote.view_number() + 1)), ) } HotShotEvent::VidDisperseSend(proposal, sender) => { @@ -306,11 +306,11 @@ impl< HotShotEvent::DaVoteSend(vote) => { maybe_action = Some(HotShotAction::DaVote); ( - vote.get_signing_key(), + vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::Da( DaConsensusMessage::DaVote(vote.clone()), )), - TransmitType::Direct(membership.get_leader(vote.get_view_number())), + TransmitType::Direct(membership.leader(vote.view_number())), ) } // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee @@ -325,31 +325,25 @@ impl< ) } HotShotEvent::ViewSyncPreCommitVoteSend(vote) => ( - vote.get_signing_key(), + vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), )), - TransmitType::Direct( - membership.get_leader(vote.get_view_number() + vote.get_data().relay), - ), + TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), ), HotShotEvent::ViewSyncCommitVoteSend(vote) => ( - vote.get_signing_key(), + vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), )), - TransmitType::Direct( - membership.get_leader(vote.get_view_number() + vote.get_data().relay), - ), + TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), ), HotShotEvent::ViewSyncFinalizeVoteSend(vote) => ( - vote.get_signing_key(), + vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), )), - TransmitType::Direct( - membership.get_leader(vote.get_view_number() + vote.get_data().relay), - ), + TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), ), HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => ( sender, @@ -373,11 +367,11 @@ impl< TransmitType::Broadcast, ), HotShotEvent::TimeoutVoteSend(vote) => ( - vote.get_signing_key(), + vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::TimeoutVote(vote.clone()), )), - TransmitType::Direct(membership.get_leader(vote.get_view_number() + 1)), + TransmitType::Direct(membership.leader(vote.view_number() + 1)), ), HotShotEvent::UpgradeProposalSend(proposal, sender) => ( sender, @@ -389,17 +383,17 @@ impl< HotShotEvent::UpgradeVoteSend(vote) => { error!("Sending upgrade vote!"); ( - vote.get_signing_key(), + vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::UpgradeVote(vote.clone()), )), - TransmitType::Direct(membership.get_leader(vote.get_view_number())), + TransmitType::Direct(membership.leader(vote.view_number())), ) } HotShotEvent::ViewChange(view) => { self.view = view; self.channel - .update_view::(self.view.get_u64(), membership) + .update_view::(self.view.u64(), membership) .await; return None; } @@ -421,8 +415,8 @@ impl< sender, kind: message_kind, }; - let view = message.kind.get_view_number(); - let committee = membership.get_whole_committee(view); + let view = message.kind.view_number(); + let committee = membership.whole_committee(view); let net = Arc::clone(&self.channel); let storage = Arc::clone(&self.storage); let version = self.version; diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 2be2109eb0..fc6a4153f6 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -435,7 +435,7 @@ impl> QuorumProposalTaskState>, ) { // Don't even bother making the task if we are not entitled to propose anyay. - if self.quorum_membership.get_leader(view_number) != self.public_key { + if self.quorum_membership.leader(view_number) != self.public_key { return; } @@ -573,7 +573,7 @@ impl> QuorumProposalTaskState> QuorumProposalRecvTaskState< match handle_quorum_proposal_recv(proposal, sender, event_stream.clone(), self).await { Ok(Some(current_proposal)) => { // Build the parent leaf since we didn't find it during the proposal check. - let parent_leaf = match get_parent_leaf_and_state( + let parent_leaf = match parent_leaf_and_state( self.cur_view, - proposal.data.get_view_number() + 1, + proposal.data.view_number() + 1, Arc::clone(&self.quorum_membership), self.public_key.clone(), Arc::clone(&self.consensus), @@ -135,8 +135,8 @@ impl> QuorumProposalRecvTaskState< } }; - let view = current_proposal.get_view_number(); - self.cancel_tasks(proposal.data.get_view_number()).await; + let view = current_proposal.view_number(); + self.cancel_tasks(proposal.data.view_number()).await; let consensus = self.consensus.read().await; let Some(vid_shares) = consensus.vid_shares().get(&view) else { debug!( @@ -151,11 +151,11 @@ impl> QuorumProposalRecvTaskState< }; let Some(da_cert) = consensus .saved_da_certs() - .get(¤t_proposal.get_view_number()) + .get(¤t_proposal.view_number()) else { debug!( "Received VID share, but couldn't find DAC cert for view {:?}", - current_proposal.get_view_number() + current_proposal.view_number() ); return; }; @@ -174,7 +174,7 @@ impl> QuorumProposalRecvTaskState< .await; } Ok(None) => { - self.cancel_tasks(proposal.data.get_view_number()).await; + self.cancel_tasks(proposal.data.view_number()).await; } Err(e) => debug!(?e, "Failed to propose"), } diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index dce9608f24..403cfd3db3 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -101,14 +101,14 @@ impl + 'static> HandleDepOutput } let parent_commitment = parent_leaf.commit(); let proposed_leaf = Leaf::from_quorum_proposal(proposal); - if proposed_leaf.get_parent_commitment() != parent_commitment { + if proposed_leaf.parent_commitment() != parent_commitment { warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); return; } leaf = Some(proposed_leaf); } HotShotEvent::DaCertificateValidated(cert) => { - let cert_payload_comm = cert.get_data().payload_commit; + let cert_payload_comm = cert.date().payload_commit; if let Some(comm) = payload_commitment { if cert_payload_comm != comm { error!("DAC has inconsistent payload commitment with quorum proposal or VID."); @@ -187,7 +187,7 @@ impl + 'static> HandleDepOutput if let GeneralConsensusMessage::Vote(vote) = message { debug!( "Sending vote to next quorum leader {:?}", - vote.get_view_number() + 1 + vote.view_number() + 1 ); // Add to the storage. let Some(disperse) = disperse_share else { @@ -444,7 +444,7 @@ impl> QuorumVoteTaskState { - let view = disperse.data.get_view_number(); + let view = disperse.data.view_number(); trace!("Received VID share for view {}", *view); if view <= self.latest_voted_view { return; @@ -458,14 +458,14 @@ impl> QuorumVoteTaskState, Ver: StaticVersionType + 'st match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _) => { let state = task.state(); - let prop_view = proposal.get_view_number(); + let prop_view = proposal.view_number(); if prop_view >= state.view { state .spawn_requests(prop_view, task.clone_sender(), Ver::instance()) @@ -169,7 +169,7 @@ impl, Ver: StaticVersionType + 'st ) { let mut recipients: Vec<_> = self .da_membership - .get_whole_committee(view) + .whole_committee(view) .into_iter() .collect(); // Randomize the recipients so all replicas don't overload the same 1 recipients diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index fd853ac8ce..966f0c8517 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -118,12 +118,12 @@ impl< let mut make_block = false; if *view - *self.cur_view > 1 { error!("View changed by more than 1 going to view {:?}", view); - make_block = self.membership.get_leader(view) == self.public_key; + make_block = self.membership.leader(view) == self.public_key; } self.cur_view = view; // return if we aren't the next leader or we skipped last view and aren't the current leader. - if !make_block && self.membership.get_leader(self.cur_view + 1) != self.public_key { + if !make_block && self.membership.leader(self.cur_view + 1) != self.public_key { debug!("Not next leader for view {:?}", self.cur_view); return None; } @@ -228,7 +228,7 @@ impl< ViewInner::Leaf { leaf, .. } => consensus .saved_leaves() .get(&leaf) - .map(Leaf::get_payload_commitment), + .map(Leaf::payload_commitment), ViewInner::Failed => None, }) { @@ -238,10 +238,7 @@ impl< } // If not found, return commitment for last decided block - ( - prev_view, - consensus.get_decided_leaf().get_payload_commitment(), - ) + (prev_view, consensus.decided_leaf().payload_commitment()) } #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "wait_for_block", level = "error")] @@ -266,7 +263,7 @@ impl< self.api .builder_timeout() .saturating_sub(task_start_time.elapsed()), - self.get_block_from_builder(parent_comm, view_num, &parent_comm_sig), + self.block_from_builder(parent_comm, view_num, &parent_comm_sig), ) .await { @@ -295,8 +292,8 @@ impl< None } - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "get_block_from_builder", level = "error")] - async fn get_block_from_builder( + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "block_from_builder", level = "error")] + async fn block_from_builder( &self, parent_comm: VidCommitment, view_number: TYPES::Time, @@ -304,9 +301,9 @@ impl< ) -> anyhow::Result> { let available_blocks = self .builder_client - .get_available_blocks( + .available_blocks( parent_comm, - view_number.get_u64(), + view_number.u64(), self.public_key.clone(), parent_comm_sig, ) @@ -347,8 +344,8 @@ impl< .context("signing block hash")?; let (block, header_input) = futures::join! { - self.builder_client.claim_block(block_info.block_hash.clone(), view_number.get_u64(), self.public_key.clone(), &request_signature), - self.builder_client.claim_block_header_input(block_info.block_hash.clone(), view_number.get_u64(), self.public_key.clone(), &request_signature) + self.builder_client.claim_block(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature), + self.builder_client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) }; let block_data = block.context("claiming block data")?; diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 04d3afd455..a3d7a1a72e 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -84,10 +84,10 @@ impl, A: ConsensusApi + // If we have an upgrade target, we validate that the proposal is relevant for the current view. info!( "Upgrade proposal received for view: {:?}", - proposal.data.get_view_number() + proposal.data.view_number() ); - let view = proposal.data.get_view_number(); + let view = proposal.data.view_number(); // At this point, we could choose to validate // that the proposal was issued by the correct leader @@ -113,7 +113,7 @@ impl, A: ConsensusApi + } // We then validate that the proposal was issued by the leader for the view. - let view_leader_key = self.quorum_membership.get_leader(view); + let view_leader_key = self.quorum_membership.leader(view); if &view_leader_key != sender { error!("Upgrade proposal doesn't have expected leader key for view {} \n Upgrade proposal is: {:?}", *view, proposal.data.clone()); return None; @@ -143,20 +143,20 @@ impl, A: ConsensusApi + error!("Failed to sign UpgradeVote!"); return None; }; - debug!("Sending upgrade vote {:?}", vote.get_view_number()); + debug!("Sending upgrade vote {:?}", vote.view_number()); broadcast_event(Arc::new(HotShotEvent::UpgradeVoteSend(vote)), &tx).await; } HotShotEvent::UpgradeVoteRecv(ref vote) => { - debug!("Upgrade vote recv, Main Task {:?}", vote.get_view_number()); + debug!("Upgrade vote recv, Main Task {:?}", vote.view_number()); // Check if we are the leader. { - let view = vote.get_view_number(); - if self.quorum_membership.get_leader(view) != self.public_key { + let view = vote.view_number(); + if self.quorum_membership.leader(view) != self.public_key { error!( "We are not the leader for view {} are we leader for next view? {}", *view, - self.quorum_membership.get_leader(view + 1) == self.public_key + self.quorum_membership.leader(view + 1) == self.public_key ); return None; } @@ -164,13 +164,12 @@ impl, A: ConsensusApi + let mut collector = self.vote_collector.write().await; - if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view - { - debug!("Starting vote handle for view {:?}", vote.get_view_number()); + if collector.is_none() || vote.view_number() > collector.as_ref().unwrap().view { + debug!("Starting vote handle for view {:?}", vote.view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: Arc::clone(&self.quorum_membership), - view: vote.get_view_number(), + view: vote.view_number(), id: self.id, }; *collector = create_vote_accumulator::< @@ -218,8 +217,7 @@ impl, A: ConsensusApi + }; use vbs::version::Version; - if *view == 5 && self.quorum_membership.get_leader(view + 5) == self.public_key - { + if *view == 5 && self.quorum_membership.leader(view + 5) == self.public_key { let upgrade_proposal_data = UpgradeProposalData { old_version: Version { major: 0, minor: 1 }, new_version: Version { major: 1, minor: 0 }, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index fe5af6d66a..234336a557 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -145,7 +145,7 @@ impl, A: ConsensusApi + self.cur_view = view; // If we are not the next leader, we should exit - if self.membership.get_leader(self.cur_view + 1) != self.public_key { + if self.membership.leader(self.cur_view + 1) != self.public_key { // panic!("We are not the DA leader for view {}", *self.cur_view + 1); return None; } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 5068442734..36fd1424f2 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -288,19 +288,19 @@ impl< match event.as_ref() { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { debug!("Received view sync cert for phase {:?}", certificate); - let view = certificate.get_view_number(); + let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { debug!("Received view sync cert for phase {:?}", certificate); - let view = certificate.get_view_number(); + let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { debug!("Received view sync cert for phase {:?}", certificate); - let view = certificate.get_view_number(); + let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } @@ -313,8 +313,8 @@ impl< HotShotEvent::ViewSyncPreCommitVoteRecv(ref vote) => { let mut map = self.pre_commit_relay_map.write().await; - let vote_view = vote.get_view_number(); - let relay = vote.get_data().relay; + let vote_view = vote.view_number(); + let relay = vote.date().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); @@ -330,7 +330,7 @@ impl< } // We do not have a relay task already running, so start one - if self.membership.get_leader(vote_view + relay) != self.public_key { + if self.membership.leader(vote_view + relay) != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); return; @@ -351,8 +351,8 @@ impl< HotShotEvent::ViewSyncCommitVoteRecv(ref vote) => { let mut map = self.commit_relay_map.write().await; - let vote_view = vote.get_view_number(); - let relay = vote.get_data().relay; + let vote_view = vote.view_number(); + let relay = vote.date().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); @@ -368,7 +368,7 @@ impl< } // We do not have a relay task already running, so start one - if self.membership.get_leader(vote_view + relay) != self.public_key { + if self.membership.leader(vote_view + relay) != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); return; @@ -389,8 +389,8 @@ impl< HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { let mut map = self.finalize_relay_map.write().await; - let vote_view = vote.get_view_number(); - let relay = vote.get_data().relay; + let vote_view = vote.view_number(); + let relay = vote.date().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); @@ -406,7 +406,7 @@ impl< } // We do not have a relay task already running, so start one - if self.membership.get_leader(vote_view + relay) != self.public_key { + if self.membership.leader(vote_view + relay) != self.public_key { // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); return; @@ -470,7 +470,7 @@ impl< } self.num_timeouts_tracked += 1; - let leader = self.membership.get_leader(view_number); + let leader = self.membership.leader(view_number); error!( %leader, leader_mnemonic = cdn_proto::mnemonic(&leader), @@ -525,7 +525,7 @@ impl, A: ConsensusApi + let last_seen_certificate = ViewSyncPhase::PreCommit; // Ignore certificate if it is for an older round - if certificate.get_view_number() < self.next_view { + if certificate.view_number() < self.next_view { warn!("We're already in a higher round"); return None; @@ -533,24 +533,24 @@ impl, A: ConsensusApi + // If certificate is not valid, return current state if !certificate.is_valid_cert(self.membership.as_ref()) { - error!("Not valid view sync cert! {:?}", certificate.get_data()); + error!("Not valid view sync cert! {:?}", certificate.date()); return None; } // If certificate is for a higher round shutdown this task // since another task should have been started for the higher round - if certificate.get_view_number() > self.next_view { + if certificate.view_number() > self.next_view { return Some(HotShotTaskCompleted); } - if certificate.get_data().relay > self.relay { - self.relay = certificate.get_data().relay; + if certificate.date().relay > self.relay { + self.relay = certificate.date().relay; } let Ok(vote) = ViewSyncCommitVote::::create_signed_vote( ViewSyncCommitData { - relay: certificate.get_data().relay, + relay: certificate.date().relay, round: self.next_view, }, self.next_view, @@ -601,7 +601,7 @@ impl, A: ConsensusApi + let last_seen_certificate = ViewSyncPhase::Commit; // Ignore certificate if it is for an older round - if certificate.get_view_number() < self.next_view { + if certificate.view_number() < self.next_view { warn!("We're already in a higher round"); return None; @@ -609,24 +609,24 @@ impl, A: ConsensusApi + // If certificate is not valid, return current state if !certificate.is_valid_cert(self.membership.as_ref()) { - error!("Not valid view sync cert! {:?}", certificate.get_data()); + error!("Not valid view sync cert! {:?}", certificate.date()); return None; } // If certificate is for a higher round shutdown this task // since another task should have been started for the higher round - if certificate.get_view_number() > self.next_view { + if certificate.view_number() > self.next_view { return Some(HotShotTaskCompleted); } - if certificate.get_data().relay > self.relay { - self.relay = certificate.get_data().relay; + if certificate.date().relay > self.relay { + self.relay = certificate.date().relay; } let Ok(vote) = ViewSyncFinalizeVote::::create_signed_vote( ViewSyncFinalizeData { - relay: certificate.get_data().relay, + relay: certificate.date().relay, round: self.next_view, }, self.next_view, @@ -693,7 +693,7 @@ impl, A: ConsensusApi + HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { // Ignore certificate if it is for an older round - if certificate.get_view_number() < self.next_view { + if certificate.view_number() < self.next_view { warn!("We're already in a higher round"); return None; @@ -701,19 +701,19 @@ impl, A: ConsensusApi + // If certificate is not valid, return current state if !certificate.is_valid_cert(self.membership.as_ref()) { - error!("Not valid view sync cert! {:?}", certificate.get_data()); + error!("Not valid view sync cert! {:?}", certificate.date()); return None; } // If certificate is for a higher round shutdown this task // since another task should have been started for the higher round - if certificate.get_view_number() > self.next_view { + if certificate.view_number() > self.next_view { return Some(HotShotTaskCompleted); } - if certificate.get_data().relay > self.relay { - self.relay = certificate.get_data().relay; + if certificate.date().relay > self.relay { + self.relay = certificate.date().relay; } if let Some(timeout_task) = self.timeout_task.take() { diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index c5a14e75e2..da11a98617 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -53,7 +53,7 @@ pub trait AggregatableVote< > { /// return the leader for this votes - fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey; + fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey; /// return the Hotshot event for the completion of this CERT fn make_cert_event(certificate: CERT, key: &TYPES::SignatureKey) -> HotShotEvent; @@ -73,15 +73,15 @@ impl< vote: &VOTE, event_stream: &Sender>>, ) -> Option { - if vote.get_leader(&self.membership) != self.public_key { + if vote.leader(&self.membership) != self.public_key { error!("Received vote for a view in which we were not the leader."); return None; } - if vote.get_view_number() != self.view { + if vote.view_number() != self.view { error!( "Vote view does not match! vote view is {} current view is {}", - *vote.get_view_number(), + *vote.view_number(), *self.view ); return None; @@ -189,10 +189,10 @@ where + 'static, VoteCollectionTaskState: HandleVoteEvent, { - if vote.get_view_number() != info.view { + if vote.view_number() != info.view { error!( "Vote view does not match! vote view is {} current view is {}", - *vote.get_view_number(), + *vote.view_number(), *info.view ); return None; @@ -251,8 +251,8 @@ type ViewSyncFinalizeVoteState = VoteCollectionTaskState< impl AggregatableVote, QuorumCertificate> for QuorumVote { - fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_view_number() + 1) + fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.leader(self.view_number() + 1) } fn make_cert_event( certificate: QuorumCertificate, @@ -265,8 +265,8 @@ impl AggregatableVote, QuorumCertifica impl AggregatableVote, UpgradeCertificate> for UpgradeVote { - fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_view_number()) + fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.leader(self.view_number()) } fn make_cert_event( certificate: UpgradeCertificate, @@ -279,8 +279,8 @@ impl AggregatableVote, UpgradeCertifi impl AggregatableVote, DaCertificate> for DaVote { - fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_view_number()) + fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.leader(self.view_number()) } fn make_cert_event( certificate: DaCertificate, @@ -293,8 +293,8 @@ impl AggregatableVote, DaCertificate AggregatableVote, TimeoutCertificate> for TimeoutVote { - fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_view_number() + 1) + fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.leader(self.view_number() + 1) } fn make_cert_event( certificate: TimeoutCertificate, @@ -308,8 +308,8 @@ impl AggregatableVote, ViewSyncCommitCertificate2> for ViewSyncCommitVote { - fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_data().round + self.get_data().relay) + fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.leader(self.date().round + self.date().relay) } fn make_cert_event( certificate: ViewSyncCommitCertificate2, @@ -323,8 +323,8 @@ impl AggregatableVote, ViewSyncPreCommitCertificate2> for ViewSyncPreCommitVote { - fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_data().round + self.get_data().relay) + fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.leader(self.date().round + self.date().relay) } fn make_cert_event( certificate: ViewSyncPreCommitCertificate2, @@ -338,8 +338,8 @@ impl AggregatableVote, ViewSyncFinalizeCertificate2> for ViewSyncFinalizeVote { - fn get_leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.get_leader(self.get_data().round + self.get_data().relay) + fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { + membership.leader(self.date().round + self.date().relay) } fn make_cert_event( certificate: ViewSyncFinalizeCertificate2, diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 74598e89c3..c2d1f0c4ef 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -209,7 +209,7 @@ impl ReadState for RandomBuilderSource { #[async_trait] impl BuilderDataSource for RandomBuilderSource { - async fn get_available_blocks( + async fn available_blocks( &self, _for_parent: &VidCommitment, _view_number: u64, @@ -260,7 +260,7 @@ impl BuilderDataSource for RandomBuilderSource { Ok(header_input) } - async fn get_builder_address(&self) -> Result { + async fn builder_address(&self) -> Result { Ok(self.pub_key.clone()) } } @@ -322,7 +322,7 @@ impl BuilderDataSource for SimpleBuilderSource where ::InstanceState: Default, { - async fn get_available_blocks( + async fn available_blocks( &self, _for_parent: &VidCommitment, _view_number: u64, @@ -419,7 +419,7 @@ where entry.header_input.take().ok_or(BuildError::Missing) } - async fn get_builder_address(&self) -> Result { + async fn builder_address(&self) -> Result { Ok(self.pub_key.clone()) } } @@ -473,9 +473,9 @@ impl BuilderTask for SimpleBuilderTask { EventType::Decide { leaf_chain, .. } => { let mut queue = self.transactions.write().await; for leaf_info in leaf_chain.iter() { - if let Some(ref payload) = leaf_info.leaf.get_block_payload() { + if let Some(ref payload) = leaf_info.leaf.block_payload() { for txn in payload.transaction_commitments( - leaf_info.leaf.get_block_header().metadata(), + leaf_info.leaf.block_header().metadata(), ) { self.decided_transactions.put(txn, ()); queue.remove(&txn); diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index e6e6ab8d2b..635c277539 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -160,7 +160,7 @@ impl> TestTaskState block_size: maybe_block_size, } => { // Skip the genesis leaf. - if leaf_chain.last().unwrap().leaf.get_view_number() == TYPES::Time::genesis() { + if leaf_chain.last().unwrap().leaf.view_number() == TYPES::Time::genesis() { return None; } let paired_up = (leaf_chain.to_vec(), (*qc).clone()); @@ -373,7 +373,7 @@ impl RoundResult { } } - let payload_commitment = leaf.get_payload_commitment(); + let payload_commitment = leaf.payload_commitment(); match self.block_map.entry(payload_commitment) { std::collections::hash_map::Entry::Occupied(mut o) => { @@ -430,7 +430,7 @@ impl RoundResult { .unwrap(); if *count >= threshold { for leaf in self.leaf_map.keys() { - if leaf.get_view_number() > quorum_leaf.get_view_number() { + if leaf.view_number() > quorum_leaf.view_number() { error!("LEAF MAP (that is mismatched) IS: {:?}", self.leaf_map); self.status = ViewStatus::Err(OverallSafetyTaskErr::MismatchedLeaf); return; @@ -465,7 +465,7 @@ impl RoundResult { // if not, return error // if neither, continue through - let block_key = key.get_payload_commitment(); + let block_key = key.payload_commitment(); if *self.block_map.get(&block_key).unwrap() == threshold && *self.leaf_map.get(key).unwrap() == threshold diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 96b7028a4a..1a3cb7d43b 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -92,7 +92,7 @@ where } = event { let leaf = leaf_chain.first().unwrap().leaf.clone(); - if leaf.get_view_number() > state.last_decided_leaf.get_view_number() { + if leaf.view_number() > state.last_decided_leaf.view_number() { state.last_decided_leaf = leaf; } } else if let EventType::QuorumProposal { @@ -100,7 +100,7 @@ where sender: _, } = event { - if proposal.data.justify_qc.get_view_number() > state.high_qc.get_view_number() { + if proposal.data.justify_qc.view_number() > state.high_qc.view_number() { state.high_qc = proposal.data.justify_qc; } } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index e4c4a2ae9c..e34050f05e 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -126,15 +126,15 @@ pub fn build_cert< SimpleVote::::create_signed_vote(data, view, public_key, private_key) .expect("Failed to sign data!"); let cert = CERT::create_signed_certificate( - vote.get_data_commitment(), - vote.get_data().clone(), + vote.date_commitment(), + vote.date().clone(), real_qc_sig, - vote.get_view_number(), + vote.view_number(), ); cert } -pub fn get_vid_share( +pub fn vid_share( shares: &[Proposal>], pub_key: TYPES::SignatureKey, ) -> Proposal> { @@ -161,9 +161,9 @@ pub fn build_assembled_sig< membership: &TYPES::Membership, view: TYPES::Time, ) -> ::QCType { - let stake_table = membership.get_committee_qc_stake_table(); + let stake_table = membership.committee_qc_stake_table(); let real_qc_pp: ::QCParams = - ::get_public_parameter( + ::public_parameter( stake_table.clone(), U256::from(CERT::threshold(membership)), ); @@ -182,7 +182,7 @@ pub fn build_assembled_sig< ) .expect("Failed to sign data!"); let original_signature: ::PureAssembledSignatureType = - vote.get_signature(); + vote.signature(); sig_lists.push(original_signature); } @@ -212,7 +212,7 @@ pub fn vid_scheme_from_view_number( membership: &TYPES::Membership, view_number: TYPES::Time, ) -> VidSchemeType { - let num_storage_nodes = membership.get_staked_committee(view_number).len(); + let num_storage_nodes = membership.staked_committee(view_number).len(); vid_scheme(num_storage_nodes) } @@ -301,7 +301,7 @@ pub async fn build_vote( leaf_commit: leaf.commit(), }, view, - handle.public_key(), + &handle.public_key(), handle.private_key(), ) .expect("Failed to create quorum vote"); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 4af8cebcf4..3d04c99f94 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -258,10 +258,10 @@ impl TestDescription { // Add the node to the known DA nodes based on the index (for tests) if node_id_ < da_staked_committee_size { - known_da_nodes.push(cur_validator_config.get_public_config()); + known_da_nodes.push(cur_validator_config.public_config()); } - cur_validator_config.get_public_config() + cur_validator_config.public_config() }) .collect(); // But now to test validator's config, we input the info of my_own_validator from config file when node_id == 0. diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 2aceeb8493..2d197a3d80 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -154,11 +154,11 @@ where let mut internal_event_rxs = vec![]; for node in &self.nodes { - let r = node.handle.get_event_stream_known_impl(); + let r = node.handle.event_stream_known_impl(); event_rxs.push(r); } for node in &self.nodes { - let r = node.handle.get_internal_event_stream_known_impl(); + let r = node.handle.internal_event_stream_known_impl(); internal_event_rxs.push(r); } @@ -406,7 +406,7 @@ where let handle = hotshot.run_tasks().await; if node_id == 1 { if let Some(task) = builder_task.take() { - task.start(Box::new(handle.get_event_stream())) + task.start(Box::new(handle.event_stream())) } } diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index 330a5ebafe..dcf72dcfb5 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -69,7 +69,7 @@ impl> TxnTask { // we're assuming all nodes have the same leaf. // If they don't match, this is probably fine since // it should be caught by an assertion (and the txn will be rejected anyway) - let leaf = node.handle.get_decided_leaf().await; + let leaf = node.handle.decided_leaf().await; let txn = I::leaf_create_random_transaction(&leaf, &mut thread_rng(), 0); node.handle .submit_transaction(txn.clone()) diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 11a5e4cd4e..67ca740af9 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -361,7 +361,7 @@ impl TestView { leaf_commit: self.leaf.commit(), }, self.view_number, - handle.public_key(), + &handle.public_key(), handle.private_key(), ) .expect("Failed to generate a signature on QuorumVote") @@ -375,7 +375,7 @@ impl TestView { UpgradeVote::::create_signed_vote( data, self.view_number, - handle.public_key(), + &handle.public_key(), handle.private_key(), ) .expect("Failed to generate a signature on UpgradVote") @@ -389,7 +389,7 @@ impl TestView { DaVote::create_signed_vote( data, self.view_number, - handle.public_key(), + &handle.public_key(), handle.private_key(), ) .expect("Failed to sign DaData") diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index cb4e96824f..c7d1aba333 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -54,7 +54,7 @@ async fn test_random_block_builder() { let mut blocks = loop { // Test getting blocks let blocks = client - .get_available_blocks( + .available_blocks( vid_commitment(&[], 1), dummy_view_number, pub_key, diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 9675040444..284308b13d 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -15,7 +15,7 @@ use hotshot_testing::{ }, script::{run_test_script, TestScriptStage}, task_helpers::{ - build_system_handle, get_vid_share, key_pair_for_id, vid_scheme_from_view_number, + build_system_handle, vid_share, key_pair_for_id, vid_scheme_from_view_number, }, test_helpers::permute_input_with_index_order, view_generator::TestViewGenerator, @@ -72,7 +72,7 @@ async fn test_consensus_task() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -88,7 +88,7 @@ async fn test_consensus_task() { // Run view 2 and propose. let view_2 = TestScriptStage { inputs: vec![ - VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), QuorumProposalRecv(proposals[1].clone(), leaders[1]), QCFormed(either::Left(cert)), // We must have a payload commitment and metadata to propose. @@ -155,7 +155,7 @@ async fn test_consensus_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), QuorumVoteRecv(votes[0].clone()), ], outputs: vec![ @@ -204,7 +204,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -216,7 +216,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let inputs = vec![ // We need a VID share for view 2 otherwise we cannot vote at view 2 (as node 2). - VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), DaCertificateRecv(dacs[1].clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), ]; @@ -317,7 +317,7 @@ async fn test_view_sync_finalize_propose() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -366,7 +366,7 @@ async fn test_view_sync_finalize_propose() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let view_4 = TestScriptStage { inputs: vec![ - VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), QuorumProposalRecv(proposals[1].clone(), leaders[1]), TimeoutVoteRecv(timeout_vote_view_2), TimeoutVoteRecv(timeout_vote_view_3), @@ -443,7 +443,7 @@ async fn test_view_sync_finalize_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -540,7 +540,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -609,7 +609,7 @@ async fn test_vid_disperse_storage_failure() { let handle = build_system_handle(2).await.0; // Set the error flag here for the system handle. This causes it to emit an error on append. - handle.get_storage().write().await.should_return_err = true; + handle.storage().write().await.should_return_err = true; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -634,7 +634,7 @@ async fn test_vid_disperse_storage_failure() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 804ab2a153..cee01f6ff9 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -110,7 +110,7 @@ async fn test_da_task_storage_failure() { let handle = build_system_handle(2).await.0; // Set the error flag here for the system handle. This causes it to emit an error on append. - handle.get_storage().write().await.should_return_err = true; + handle.storage().write().await.should_return_err = true; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index c16b2f41bb..d22eadc825 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -11,7 +11,7 @@ use hotshot_example_types::{ use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ predicates::event::{all_predicates, exact, quorum_proposal_send, quorum_proposal_validated}, - task_helpers::{get_vid_share, vid_scheme_from_view_number}, + task_helpers::{vid_share, vid_scheme_from_view_number}, test_helpers::permute_input_with_index_order, view_generator::TestViewGenerator, }; @@ -69,7 +69,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -101,11 +101,11 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { view_2_inputs.insert(0, DaCertificateRecv(dacs[1].clone())); view_2_inputs.insert( 0, - VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[2].0, handle.public_key())), ); view_2_inputs.insert( 0, - VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), ); // This stage transitions from view 1 to view 2. diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index a3e9701b9b..a3737681b1 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -51,7 +51,7 @@ async fn insert_vid_shares_for_view( ::SignatureKey, ), ) { - let consensus = handle.get_consensus(); + let consensus = handle.consensus(); let mut consensus = consensus.write().await; // `create_and_send_proposal` depends on the `vid_shares` obtaining a vid dispersal. @@ -151,7 +151,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { } insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[2].clone()).await; - let consensus = handle.get_consensus(); + let consensus = handle.consensus(); let mut consensus = consensus.write().await; // `validate_proposal_safety_and_liveness` depends on the existence of prior values in the consensus @@ -164,7 +164,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ViewNumber::new(2), View { view_inner: ViewInner::Leaf { - leaf: leaves[1].get_parent_commitment(), + leaf: leaves[1].parent_commitment(), state: TestValidatedState::default().into(), delta: None, }, diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 15393d9397..43265bd93c 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -1,7 +1,7 @@ #![allow(clippy::panic)] use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_testing::task_helpers::get_vid_share; +use hotshot_testing::task_helpers::vid_share; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; #[cfg(test)] @@ -147,7 +147,7 @@ async fn test_quorum_vote_task_miss_dependency() { let view_no_dac = TestScriptStage { inputs: vec![ QuorumProposalValidated(proposals[0].data.clone(), leaves[0].clone()), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![exact(VIDShareValidated(vids[0].0[0].clone()))], asserts: vec![], @@ -163,7 +163,7 @@ async fn test_quorum_vote_task_miss_dependency() { let view_no_quorum_proposal = TestScriptStage { inputs: vec![ DaCertificateRecv(dacs[2].clone()), - VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[2].0, handle.public_key())), ], outputs: vec![ exact(DaCertificateValidated(dacs[2].clone())), diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index a6ef590bf1..5452df6217 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -16,7 +16,7 @@ use hotshot_task_impls::{ use hotshot_testing::{ predicates::{event::*, upgrade::*}, script::{Expectations, TaskScript}, - task_helpers::get_vid_share, + task_helpers::vid_share, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -84,7 +84,7 @@ async fn test_consensus_task_upgrade() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), DaCertificateRecv(dacs[0].clone()), ], outputs: vec![ @@ -98,7 +98,7 @@ async fn test_consensus_task_upgrade() { let view_2 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), DaCertificateRecv(dacs[1].clone()), ], outputs: vec![ @@ -113,7 +113,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[2].clone(), leaders[2]), DaCertificateRecv(dacs[2].clone()), - VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[2].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(3))), @@ -128,7 +128,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[3].clone(), leaders[3]), DaCertificateRecv(dacs[3].clone()), - VIDShareRecv(get_vid_share(&vids[3].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[3].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(4))), @@ -237,17 +237,17 @@ async fn test_upgrade_and_consensus_task() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), DaCertificateRecv(dacs[0].clone()), ], upgrade_vote_recvs, vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), DaCertificateRecv(dacs[1].clone()), - VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), ], vec![ - VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[2].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, proposals[2].data.block_header.builder_commitment.clone(), @@ -428,12 +428,12 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VIDShareRecv(get_vid_share(&vids[0].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), DaCertificateRecv(dacs[0].clone()), ], vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VIDShareRecv(get_vid_share(&vids[1].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), DaCertificateRecv(dacs[1].clone()), SendPayloadCommitmentAndMetadata( vids[1].0[0].data.payload_commitment, @@ -446,7 +446,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[2].clone()), - VIDShareRecv(get_vid_share(&vids[2].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[2].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, proposals[2].data.block_header.builder_commitment.clone(), @@ -459,7 +459,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[3].clone()), - VIDShareRecv(get_vid_share(&vids[3].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[3].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[3].0[0].data.payload_commitment, proposals[3].data.block_header.builder_commitment.clone(), @@ -472,7 +472,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[4].clone()), - VIDShareRecv(get_vid_share(&vids[4].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[4].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[4].0[0].data.payload_commitment, proposals[4].data.block_header.builder_commitment.clone(), @@ -485,7 +485,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[5].clone()), - VIDShareRecv(get_vid_share(&vids[5].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[5].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[5].0[0].data.payload_commitment, proposals[5].data.block_header.builder_commitment.clone(), @@ -498,7 +498,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[6].clone()), - VIDShareRecv(get_vid_share(&vids[6].0, handle.get_public_key())), + VIDShareRecv(vid_share(&vids[6].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[6].0[0].data.payload_commitment, proposals[6].data.block_header.builder_commitment.clone(), diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 776548619a..819d32248e 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -30,7 +30,7 @@ async fn test_vid_task() { // Build the API for node 2. let handle = build_system_handle(2).await.0; - let pub_key = *handle.public_key(); + let pub_key = handle.public_key(); // quorum membership for VID share distribution let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); @@ -124,12 +124,12 @@ async fn test_vid_task() { let vid_state = VIDTaskState { api: handle.clone(), - consensus: handle.hotshot.get_consensus(), + consensus: handle.hotshot.consensus(), cur_view: ViewNumber::new(0), vote_collector: None, network: handle.hotshot.networks.quorum_network.clone(), membership: handle.hotshot.memberships.vid_membership.clone().into(), - public_key: *handle.public_key(), + public_key: handle.public_key(), private_key: handle.private_key().clone(), id: handle.hotshot.id, }; diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index e6307df437..e5fb2827e3 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -85,7 +85,7 @@ fn fake_message_eq(message_1: Message, message_2: Message) { } #[instrument] -fn get_pubkey() -> BLSPubKey { +fn pubkey() -> BLSPubKey { // random 32 bytes let mut bytes = [0; 32]; rand::thread_rng().fill_bytes(&mut bytes); @@ -121,7 +121,7 @@ async fn memory_network_spawn_single() { setup_logging(); let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); - let _pub_key = get_pubkey(); + let _pub_key = pubkey(); } // // Spawning a two MemoryNetworks and connecting them should produce no errors @@ -132,8 +132,8 @@ async fn memory_network_spawn_double() { setup_logging(); let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); - let _pub_key_1 = get_pubkey(); - let _pub_key_2 = get_pubkey(); + let _pub_key_1 = pubkey(); + let _pub_key_2 = pubkey(); } // Check to make sure direct queue works @@ -148,7 +148,7 @@ async fn memory_network_direct_queue() { let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); - let pub_key_1 = get_pubkey(); + let pub_key_1 = pubkey(); let network1 = MemoryNetwork::new( pub_key_1, NetworkingMetricsValue::default(), @@ -156,7 +156,7 @@ async fn memory_network_direct_queue() { Option::None, ); - let pub_key_2 = get_pubkey(); + let pub_key_2 = pubkey(); let network2 = MemoryNetwork::new( pub_key_2, NetworkingMetricsValue::default(), @@ -210,14 +210,14 @@ async fn memory_network_broadcast_queue() { // Make and connect the networking instances let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); - let pub_key_1 = get_pubkey(); + let pub_key_1 = pubkey(); let network1 = MemoryNetwork::new( pub_key_1, NetworkingMetricsValue::default(), group.clone(), Option::None, ); - let pub_key_2 = get_pubkey(); + let pub_key_2 = pubkey(); let network2 = MemoryNetwork::new( pub_key_2, NetworkingMetricsValue::default(), @@ -279,14 +279,14 @@ async fn memory_network_test_in_flight_message_count() { let group: Arc, ::SignatureKey>> = MasterMap::new(); trace!(?group); - let pub_key_1 = get_pubkey(); + let pub_key_1 = pubkey(); let network1 = MemoryNetwork::new( pub_key_1, NetworkingMetricsValue::default(), group.clone(), Option::None, ); - let pub_key_2 = get_pubkey(); + let pub_key_2 = pubkey(); let network2 = MemoryNetwork::new( pub_key_2, NetworkingMetricsValue::default(), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 0d05fbbf9b..24187d52ce 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -358,7 +358,7 @@ impl Consensus { ) -> bool, { let mut next_leaf = if let Some(view) = self.validated_state_map.get(&start_from) { - view.get_leaf_commitment() + view.leaf_commitment() .ok_or_else(|| HotShotError::InvalidState { context: format!( "Visited failed view {start_from:?} leaf. Expected successfuil leaf" @@ -371,8 +371,8 @@ impl Consensus { }; while let Some(leaf) = self.saved_leaves.get(&next_leaf) { - let view = leaf.get_view_number(); - if let (Some(state), delta) = self.get_state_and_delta(view) { + let view = leaf.view_number(); + if let (Some(state), delta) = self.state_and_delta(view) { if let Terminator::Exclusive(stop_before) = terminator { if stop_before == view { if ok_when_finished { @@ -381,7 +381,7 @@ impl Consensus { break; } } - next_leaf = leaf.get_parent_commitment(); + next_leaf = leaf.parent_commitment(); if !f(leaf, state, delta) { return Ok(()); } @@ -423,7 +423,7 @@ impl Consensus { .retain(|view_number, _| *view_number >= old_anchor_view); self.validated_state_map .range(old_anchor_view..new_anchor_view) - .filter_map(|(_view_number, view)| view.get_leaf_commitment()) + .filter_map(|(_view_number, view)| view.leaf_commitment()) .for_each(|leaf| { self.saved_leaves.remove(&leaf); }); @@ -438,29 +438,29 @@ impl Consensus { /// if the last decided view's leaf does not exist in the state map or saved leaves, which /// should never happen. #[must_use] - pub fn get_decided_leaf(&self) -> Leaf { + pub fn decided_leaf(&self) -> Leaf { let decided_view_num = self.last_decided_view; let view = self.validated_state_map.get(&decided_view_num).unwrap(); let leaf = view - .get_leaf_commitment() + .leaf_commitment() .expect("Decided leaf not found! Consensus internally inconsistent"); self.saved_leaves.get(&leaf).unwrap().clone() } /// Gets the validated state with the given view number, if in the state map. #[must_use] - pub fn get_state(&self, view_number: TYPES::Time) -> Option<&Arc> { + pub fn state(&self, view_number: TYPES::Time) -> Option<&Arc> { match self.validated_state_map.get(&view_number) { - Some(view) => view.get_state(), + Some(view) => view.state(), None => None, } } /// Gets the validated state and state delta with the given view number, if in the state map. #[must_use] - pub fn get_state_and_delta(&self, view_number: TYPES::Time) -> StateAndDelta { + pub fn state_and_delta(&self, view_number: TYPES::Time) -> StateAndDelta { match self.validated_state_map.get(&view_number) { - Some(view) => view.get_state_and_delta(), + Some(view) => view.state_and_delta(), None => (None, None), } } @@ -471,9 +471,9 @@ impl Consensus { /// If the last decided view's state does not exist in the state map, which should never /// happen. #[must_use] - pub fn get_decided_state(&self) -> Arc { + pub fn decided_state(&self) -> Arc { let decided_view_num = self.last_decided_view; - self.get_state_and_delta(decided_view_num) + self.state_and_delta(decided_view_num) .0 .expect("Decided state not found! Consensus internally inconsistent") } diff --git a/types/src/data.rs b/types/src/data.rs index e07a83e3b9..8d30fcbfeb 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -70,7 +70,7 @@ impl ConsensusTime for ViewNumber { Self(n) } /// Returen the u64 format - fn get_u64(&self) -> u64 { + fn u64(&self) -> u64 { self.0 } } @@ -162,7 +162,7 @@ impl VidDisperse { membership: &TYPES::Membership, ) -> Self { let shares = membership - .get_staked_committee(view_number) + .staked_committee(view_number) .iter() .map(|node| (node.clone(), vid_disperse.shares.remove(0))) .collect(); @@ -191,7 +191,7 @@ impl ViewChangeEvidence { /// Check that the given ViewChangeEvidence is relevant to the current view. pub fn is_valid_for_view(&self, view: &TYPES::Time) -> bool { match self { - ViewChangeEvidence::Timeout(timeout_cert) => timeout_cert.get_data().view == *view - 1, + ViewChangeEvidence::Timeout(timeout_cert) => timeout_cert.date().view == *view - 1, ViewChangeEvidence::ViewSync(view_sync_cert) => view_sync_cert.view_number == *view, } } @@ -319,31 +319,31 @@ pub struct QuorumProposal { } impl HasViewNumber for DaProposal { - fn get_view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::Time { self.view_number } } impl HasViewNumber for VidDisperse { - fn get_view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::Time { self.view_number } } impl HasViewNumber for VidDisperseShare { - fn get_view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::Time { self.view_number } } impl HasViewNumber for QuorumProposal { - fn get_view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::Time { self.view_number } } impl HasViewNumber for UpgradeProposal { - fn get_view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::Time { self.view_number } } @@ -424,7 +424,7 @@ impl Display for Leaf { f, "view: {:?}, height: {:?}, justify: {}", self.view_number, - self.get_height(), + self.height(), self.justify_qc ) } @@ -494,34 +494,34 @@ impl Leaf { } /// Time when this leaf was created. - pub fn get_view_number(&self) -> TYPES::Time { + pub fn view_number(&self) -> TYPES::Time { self.view_number } /// Height of this leaf in the chain. /// /// Equivalently, this is the number of leaves before this one in the chain. - pub fn get_height(&self) -> u64 { + pub fn height(&self) -> u64 { self.block_header.block_number() } /// The QC linking this leaf to its parent in the chain. - pub fn get_justify_qc(&self) -> QuorumCertificate { + pub fn justify_qc(&self) -> QuorumCertificate { self.justify_qc.clone() } /// The QC linking this leaf to its parent in the chain. - pub fn get_upgrade_certificate(&self) -> Option> { + pub fn upgrade_certificate(&self) -> Option> { self.upgrade_certificate.clone() } /// Commitment to this leaf's parent. - pub fn get_parent_commitment(&self) -> Commitment { + pub fn parent_commitment(&self) -> Commitment { self.parent_commitment } /// The block header contained in this leaf. - pub fn get_block_header(&self) -> &::BlockHeader { + pub fn block_header(&self) -> &::BlockHeader { &self.block_header } /// Get a mutable reference to the block header contained in this leaf. - pub fn get_block_header_mut(&mut self) -> &mut ::BlockHeader { + pub fn block_header_mut(&mut self) -> &mut ::BlockHeader { &mut self.block_header } /// Fill this leaf with the block payload. @@ -551,13 +551,13 @@ impl Leaf { } /// Optional block payload. - pub fn get_block_payload(&self) -> Option { + pub fn block_payload(&self) -> Option { self.block_payload.clone() } /// A commitment to the block payload contained in this leaf. - pub fn get_payload_commitment(&self) -> VidCommitment { - self.get_block_header().payload_commitment() + pub fn payload_commitment(&self) -> VidCommitment { + self.block_header().payload_commitment() } /// Validate that a leaf has the right upgrade certificate to be the immediate child of another leaf @@ -572,10 +572,7 @@ impl Leaf { parent: &Self, decided_upgrade_certificate: &Option>, ) -> Result<()> { - match ( - self.get_upgrade_certificate(), - parent.get_upgrade_certificate(), - ) { + match (self.upgrade_certificate(), parent.upgrade_certificate()) { // Easiest cases are: // - no upgrade certificate on either: this is the most common case, and is always fine. // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. @@ -584,8 +581,8 @@ impl Leaf { // - no longer care because we have passed new_version_first_view, or // - no longer care because we have passed `decide_by` without deciding the certificate. (None, Some(parent_cert)) => { - ensure!(self.get_view_number() > parent_cert.data.new_version_first_view - || (self.get_view_number() > parent_cert.data.decide_by && decided_upgrade_certificate.is_none()), + ensure!(self.view_number() > parent_cert.data.new_version_first_view + || (self.view_number() > parent_cert.data.decide_by && decided_upgrade_certificate.is_none()), "The new leaf is missing an upgrade certificate that was present in its parent, and should still be live." ); } @@ -598,7 +595,7 @@ impl Leaf { } // This check should be added once we sort out the genesis leaf/justify_qc issue. - // ensure!(self.get_parent_commitment() == parent_leaf.commit(), "The commitment of the parent leaf does not match the specified parent commitment."); + // ensure!(self.parent_commitment() == parent_leaf.commit(), "The commitment of the parent leaf does not match the specified parent commitment."); Ok(()) } @@ -644,7 +641,7 @@ pub fn serialize_signature2( let mut signatures_bytes = vec![]; signatures_bytes.extend("Yes".as_bytes()); - let (sig, proof) = TYPES::SignatureKey::get_sig_proof(signatures); + let (sig, proof) = TYPES::SignatureKey::sig_proof(signatures); let proof_bytes = bincode_opts() .serialize(&proof.as_bitslice()) .expect("This serialization shouldn't be able to fail"); @@ -663,11 +660,11 @@ impl Committable for Leaf { // Skip the transaction commitments, so that the repliacs can reconstruct the leaf. RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) - .u64_field("block number", self.get_height()) + .u64_field("block number", self.height()) .field("parent Leaf commitment", self.parent_commitment) .var_size_field( "block payload commitment", - self.get_payload_commitment().as_ref(), + self.payload_commitment().as_ref(), ) .field("justify qc", self.justify_qc.commit()) .optional("upgrade certificate", &self.upgrade_certificate) @@ -690,7 +687,7 @@ impl Leaf { Leaf { view_number: *view_number, justify_qc: justify_qc.clone(), - parent_commitment: justify_qc.get_data().leaf_commit, + parent_commitment: justify_qc.date().leaf_commit, block_header: block_header.clone(), upgrade_certificate: upgrade_certificate.clone(), block_payload: None, diff --git a/types/src/lib.rs b/types/src/lib.rs index 4fe91f8d8f..64bee0a46d 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -93,9 +93,9 @@ impl ValidatorConfig { } /// get the public config of the validator - pub fn get_public_config(&self) -> PeerConfig { + pub fn public_config(&self) -> PeerConfig { PeerConfig { - stake_table_entry: self.public_key.get_stake_table_entry(self.stake_value), + stake_table_entry: self.public_key.stake_table_entry(self.stake_value), state_ver_key: self.state_key_pair.0.ver_key(), } } @@ -148,7 +148,7 @@ impl PeerConfig { impl Default for PeerConfig { fn default() -> Self { let default_validator_config = ValidatorConfig::::default(); - default_validator_config.get_public_config() + default_validator_config.public_config() } } diff --git a/types/src/message.rs b/types/src/message.rs index 1499b25b36..933aa692c8 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -54,8 +54,8 @@ impl NetworkMsg for Message {} impl ViewMessage for Message { /// get the view number out of a message - fn get_view_number(&self) -> TYPES::Time { - self.kind.get_view_number() + fn view_number(&self) -> TYPES::Time { + self.kind.view_number() } fn purpose(&self) -> MessagePurpose { self.kind.purpose() @@ -123,7 +123,7 @@ impl From> for MessageKind { } impl ViewMessage for MessageKind { - fn get_view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::Time { match &self { MessageKind::Consensus(message) => message.view_number(), MessageKind::Data(DataMessage::SubmitTransaction(_, v)) => *v, @@ -221,32 +221,26 @@ impl SequencingMessage { GeneralConsensusMessage::Proposal(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt - p.data.get_view_number() + p.data.view_number() } - GeneralConsensusMessage::Vote(vote_message) => vote_message.get_view_number(), - GeneralConsensusMessage::TimeoutVote(message) => message.get_view_number(), + GeneralConsensusMessage::Vote(vote_message) => vote_message.view_number(), + GeneralConsensusMessage::TimeoutVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { - message.get_view_number() - } - GeneralConsensusMessage::ViewSyncCommitVote(message) => { - message.get_view_number() - } - GeneralConsensusMessage::ViewSyncFinalizeVote(message) => { - message.get_view_number() + message.view_number() } + GeneralConsensusMessage::ViewSyncCommitVote(message) => message.view_number(), + GeneralConsensusMessage::ViewSyncFinalizeVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitCertificate(message) => { - message.get_view_number() + message.view_number() } GeneralConsensusMessage::ViewSyncCommitCertificate(message) => { - message.get_view_number() + message.view_number() } GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { - message.get_view_number() - } - GeneralConsensusMessage::UpgradeProposal(message) => { - message.data.get_view_number() + message.view_number() } - GeneralConsensusMessage::UpgradeVote(message) => message.get_view_number(), + GeneralConsensusMessage::UpgradeProposal(message) => message.data.view_number(), + GeneralConsensusMessage::UpgradeVote(message) => message.view_number(), } } SequencingMessage::Da(da_message) => { @@ -254,11 +248,11 @@ impl SequencingMessage { DaConsensusMessage::DaProposal(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt - p.data.get_view_number() + p.data.view_number() } - DaConsensusMessage::DaVote(vote_message) => vote_message.get_view_number(), + DaConsensusMessage::DaVote(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate(cert) => cert.view_number, - DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.get_view_number(), + DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.view_number(), } } } @@ -335,8 +329,8 @@ where /// # Errors /// Returns an error when the proposal signature is invalid. pub fn validate_signature(&self, quorum_membership: &TYPES::Membership) -> Result<()> { - let view_number = self.data.get_view_number(); - let view_leader_key = quorum_membership.get_leader(view_number); + let view_number = self.data.view_number(); + let view_leader_key = quorum_membership.leader(view_number); let proposed_leaf = Leaf::from_quorum_proposal(&self.data); ensure!( diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index 56e1aa7cf7..6e1852f748 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -80,18 +80,18 @@ impl SignatureKey for BLSPubKey { (kp.ver_key(), kp.sign_key_ref().clone()) } - fn get_stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry { + fn stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry { StakeTableEntry { stake_key: *self, stake_amount: U256::from(stake), } } - fn get_public_key(entry: &Self::StakeTableEntry) -> Self { + fn public_key(entry: &Self::StakeTableEntry) -> Self { entry.stake_key } - fn get_public_parameter( + fn public_parameter( stake_entries: Vec, threshold: U256, ) -> Self::QCParams { @@ -107,7 +107,7 @@ impl SignatureKey for BLSPubKey { BitVectorQC::::check(real_qc_pp, msg, qc).is_ok() } - fn get_sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec) { + fn sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec) { signature.clone() } diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 92d1d2fef2..2785e13a8a 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -117,8 +117,8 @@ impl> if self.view_number == TYPES::Time::genesis() { return true; } - let real_qc_pp = ::get_public_parameter( - membership.get_committee_qc_stake_table(), + let real_qc_pp = ::public_parameter( + membership.committee_qc_stake_table(), U256::from(Self::threshold(membership)), ); ::check( @@ -130,10 +130,10 @@ impl> fn threshold>(membership: &MEMBERSHIP) -> u64 { THRESHOLD::threshold(membership) } - fn get_data(&self) -> &Self::Voteable { + fn date(&self) -> &Self::Voteable { &self.data } - fn get_data_commitment(&self) -> Commitment { + fn date_commitment(&self) -> Commitment { self.vote_commitment } } @@ -141,7 +141,7 @@ impl> impl> HasViewNumber for SimpleCertificate { - fn get_view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::Time { self.view_number } } diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 7fa71a27df..cc65c2aa17 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -116,7 +116,7 @@ pub struct SimpleVote { } impl HasViewNumber for SimpleVote { - fn get_view_number(&self) -> ::Time { + fn view_number(&self) -> ::Time { self.view_number } } @@ -124,19 +124,19 @@ impl HasViewNumber for SimpleV impl Vote for SimpleVote { type Commitment = DATA; - fn get_signing_key(&self) -> ::SignatureKey { + fn signing_key(&self) -> ::SignatureKey { self.signature.0.clone() } - fn get_signature(&self) -> ::PureAssembledSignatureType { + fn signature(&self) -> ::PureAssembledSignatureType { self.signature.1.clone() } - fn get_data(&self) -> &DATA { + fn date(&self) -> &DATA { &self.data } - fn get_data_commitment(&self) -> Commitment { + fn date_commitment(&self) -> Commitment { self.data.commit() } } diff --git a/types/src/stake_table.rs b/types/src/stake_table.rs index 3884a7eee6..feac5a38da 100644 --- a/types/src/stake_table.rs +++ b/types/src/stake_table.rs @@ -17,14 +17,14 @@ pub struct StakeTableEntry { impl StakeTableEntryType for StakeTableEntry { /// Get the stake amount - fn get_stake(&self) -> U256 { + fn stake(&self) -> U256 { self.stake_amount } } impl StakeTableEntry { /// Get the public key - pub fn get_key(&self) -> &K { + pub fn key(&self) -> &K { &self.stake_key } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 2e64666e49..4f83e74ee8 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -95,21 +95,19 @@ pub trait BlockPayload: &self, metadata: &Self::Metadata, ) -> Vec> { - self.get_transactions(metadata) - .map(|tx| tx.commit()) - .collect() + self.transactions(metadata).map(|tx| tx.commit()).collect() } /// Number of transactions in the block. fn num_transactions(&self, metadata: &Self::Metadata) -> usize { - self.get_transactions(metadata).count() + self.transactions(metadata).count() } /// Generate commitment that builders use to sign block options. fn builder_commitment(&self, metadata: &Self::Metadata) -> BuilderCommitment; /// Get the transactions in the payload. - fn get_transactions<'a>( + fn transactions<'a>( &'a self, metadata: &'a Self::Metadata, ) -> impl 'a + Iterator; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 08829b9ae2..c192e94680 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -34,28 +34,28 @@ pub trait Membership: ) -> Self; /// Clone the public key and corresponding stake table for current elected committee - fn get_committee_qc_stake_table( + fn committee_qc_stake_table( &self, ) -> Vec<::StakeTableEntry>; /// The leader of the committee for view `view_number`. - fn get_leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey; + fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey; /// The staked members of the committee for view `view_number`. - fn get_staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; + fn staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; /// The non-staked members of the committee for view `view_number`. - fn get_non_staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; + fn non_staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; /// Get whole (staked + non-staked) committee for view `view_number`. - fn get_whole_committee(&self, view_number: TYPES::Time) -> BTreeSet; + fn whole_committee(&self, view_number: TYPES::Time) -> BTreeSet; /// Check if a key has stake fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; /// Get the stake table entry for a public key, returns `None` if the /// key is not in the table - fn get_stake( + fn stake( &self, pub_key: &TYPES::SignatureKey, ) -> Option<::StakeTableEntry>; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 15e4e32d0c..80379770f6 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -181,7 +181,7 @@ impl NetworkMsg for Vec {} /// a message pub trait ViewMessage { /// get the view out of the message - fn get_view_number(&self) -> TYPES::Time; + fn view_number(&self) -> TYPES::Time; // TODO move out of this trait. /// get the purpose of the message fn purpose(&self) -> MessagePurpose; diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 9a8504d7bb..280c8e00de 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -172,7 +172,7 @@ pub trait ConsensusTime: /// Create a new instance of this time unit fn new(val: u64) -> Self; /// Get the u64 format of time - fn get_u64(&self) -> u64; + fn u64(&self) -> u64; } /// Trait with all the type definitions that are used in the current hotshot setup. diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 9de1675063..9c460d5c3f 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -17,7 +17,7 @@ use crate::{utils::BuilderCommitment, vid::VidSchemeType}; /// Type representing stake table entries in a `StakeTable` pub trait StakeTableEntryType { /// Get the stake value - fn get_stake(&self) -> U256; + fn stake(&self) -> U256; } /// Trait for abstracting public key signatures @@ -117,13 +117,13 @@ pub trait SignatureKey: fn generated_from_seed_indexed(seed: [u8; 32], index: u64) -> (Self, Self::PrivateKey); /// get the stake table entry from the public key and stake value - fn get_stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry; + fn stake_table_entry(&self, stake: u64) -> Self::StakeTableEntry; /// only get the public key from the stake table entry - fn get_public_key(entry: &Self::StakeTableEntry) -> Self; + fn public_key(entry: &Self::StakeTableEntry) -> Self; /// get the public parameter for the assembled signature checking - fn get_public_parameter( + fn public_parameter( stake_entries: Vec, threshold: U256, ) -> Self::QCParams; @@ -132,7 +132,7 @@ pub trait SignatureKey: fn check(real_qc_pp: &Self::QCParams, data: &[u8], qc: &Self::QCType) -> bool; /// get the assembled signature and the `BitVec` separately from the assembled signature - fn get_sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec); + fn sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec); /// assemble the signature from the partial signature and the indication of signers in `BitVec` fn assemble( diff --git a/types/src/utils.rs b/types/src/utils.rs index 73cacb7b47..98014a5b0d 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -75,9 +75,7 @@ pub type StateAndDelta = ( impl ViewInner { /// Return the underlying undecide leaf commitment and validated state if they exist. #[must_use] - pub fn get_leaf_and_state( - &self, - ) -> Option<(LeafCommitment, &Arc)> { + pub fn leaf_and_state(&self) -> Option<(LeafCommitment, &Arc)> { if let Self::Leaf { leaf, state, .. } = self { Some((*leaf, state)) } else { @@ -87,7 +85,7 @@ impl ViewInner { /// return the underlying leaf hash if it exists #[must_use] - pub fn get_leaf_commitment(&self) -> Option> { + pub fn leaf_commitment(&self) -> Option> { if let Self::Leaf { leaf, .. } = self { Some(*leaf) } else { @@ -97,7 +95,7 @@ impl ViewInner { /// return the underlying validated state if it exists #[must_use] - pub fn get_state(&self) -> Option<&Arc> { + pub fn state(&self) -> Option<&Arc> { if let Self::Leaf { state, .. } = self { Some(state) } else { @@ -107,7 +105,7 @@ impl ViewInner { /// Return the underlying validated state and state delta if they exist. #[must_use] - pub fn get_state_and_delta(&self) -> StateAndDelta { + pub fn state_and_delta(&self) -> StateAndDelta { if let Self::Leaf { state, delta, .. } = self { (Some(Arc::clone(state)), delta.clone()) } else { @@ -117,7 +115,7 @@ impl ViewInner { /// return the underlying block paylod commitment if it exists #[must_use] - pub fn get_payload_commitment(&self) -> Option { + pub fn payload_commitment(&self) -> Option { if let Self::Da { payload_commitment } = self { Some(*payload_commitment) } else { diff --git a/types/src/vote.rs b/types/src/vote.rs index b633eef058..73920d6417 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -29,20 +29,20 @@ pub trait Vote: HasViewNumber { type Commitment: Voteable; /// Get the signature of the vote sender - fn get_signature(&self) -> ::PureAssembledSignatureType; + fn signature(&self) -> ::PureAssembledSignatureType; /// Gets the data which was voted on by this vote - fn get_data(&self) -> &Self::Commitment; + fn date(&self) -> &Self::Commitment; /// Gets the Data commitment of the vote - fn get_data_commitment(&self) -> Commitment; + fn date_commitment(&self) -> Commitment; /// Gets the public signature key of the votes creator/sender - fn get_signing_key(&self) -> TYPES::SignatureKey; + fn signing_key(&self) -> TYPES::SignatureKey; } /// Any type that is associated with a view pub trait HasViewNumber { /// Returns the view number the type refers to. - fn get_view_number(&self) -> TYPES::Time; + fn view_number(&self) -> TYPES::Time; } /** @@ -71,9 +71,9 @@ pub trait Certificate: HasViewNumber { // TODO: Make this a static ratio of the total stake of `Membership` fn threshold>(membership: &MEMBERSHIP) -> u64; /// Get the commitment which was voted on - fn get_data(&self) -> &Self::Voteable; + fn date(&self) -> &Self::Voteable; /// Get the vote commitment which the votes commit to - fn get_data_commitment(&self) -> Commitment; + fn date_commitment(&self) -> Commitment; } /// Mapping of vote commitment to signatures and bitvec type SignersMap = HashMap< @@ -108,18 +108,18 @@ impl, CERT: Certificate Either<(), CERT> { - let key = vote.get_signing_key(); + let key = vote.signing_key(); - let vote_commitment = vote.get_data_commitment(); - if !key.validate(&vote.get_signature(), vote_commitment.as_ref()) { - error!("Invalid vote! Vote Data {:?}", vote.get_data()); + let vote_commitment = vote.date_commitment(); + if !key.validate(&vote.signature(), vote_commitment.as_ref()) { + error!("Invalid vote! Vote Data {:?}", vote.date()); return Either::Left(()); } - let Some(stake_table_entry) = membership.get_stake(&key) else { + let Some(stake_table_entry) = membership.stake(&key) else { return Either::Left(()); }; - let stake_table = membership.get_committee_qc_stake_table(); + let stake_table = membership.committee_qc_stake_table(); let Some(vote_node_id) = stake_table .iter() .position(|x| *x == stake_table_entry.clone()) @@ -128,7 +128,7 @@ impl, CERT: Certificate::PureAssembledSignatureType = - vote.get_signature(); + vote.signature(); let (total_stake_casted, total_vote_map) = self .vote_outcomes @@ -151,13 +151,13 @@ impl, CERT: Certificate= CERT::threshold(membership).into() { // Assemble QC let real_qc_pp: <::SignatureKey as SignatureKey>::QCParams = - ::get_public_parameter( + ::public_parameter( stake_table, U256::from(CERT::threshold(membership)), ); @@ -169,10 +169,10 @@ impl, CERT: Certificate Date: Thu, 16 May 2024 10:56:50 -0400 Subject: [PATCH 1036/1393] Don't log private key (#3176) --- types/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/types/src/lib.rs b/types/src/lib.rs index 64bee0a46d..31f9c45f76 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -2,6 +2,7 @@ use std::{fmt::Debug, future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; use bincode::Options; +use derivative::Derivative; use displaydoc::Display; use light_client::StateVerKey; use tracing::error; @@ -56,13 +57,15 @@ pub enum ExecutionType { Incremental, } -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Display)] +#[derive(serde::Serialize, serde::Deserialize, Clone, Derivative, Display)] #[serde(bound(deserialize = ""))] +#[derivative(Debug(bound = ""))] /// config for validator, including public key, private key, stake value pub struct ValidatorConfig { /// The validator's public key and stake value pub public_key: KEY, /// The validator's private key, should be in the mempool, not public + #[derivative(Debug = "ignore")] pub private_key: KEY::PrivateKey, /// The validator's stake pub stake_value: u64, From e9b470798ec6b749a38d713bedae00105d0b568b Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 16 May 2024 12:27:15 -0400 Subject: [PATCH 1037/1393] Add version as an argument to block header (#3123) --- example-types/Cargo.toml | 1 + example-types/src/block_types.rs | 2 ++ example-types/src/state_types.rs | 2 ++ hotshot/src/tasks/task_state.rs | 1 + task-impls/src/consensus/helpers.rs | 16 +++++++++++++- task-impls/src/consensus/mod.rs | 22 ++++++++++++++----- task-impls/src/quorum_proposal.rs | 12 ++++++++++ testing/tests/tests_1/gen_key_pair.rs | 5 ++--- testing/tests/tests_1/message.rs | 2 -- testing/tests/tests_1/quorum_proposal_task.rs | 9 +++----- testing/tests/tests_1/view_sync_task.rs | 9 ++++---- .../tests_3/test_with_failures_half_f.rs | 2 +- testing/tests/tests_4/test_with_failures_f.rs | 2 +- testing/tests/tests_5/timeout.rs | 2 +- types/src/traits/block_contents.rs | 2 ++ types/src/traits/states.rs | 2 ++ 16 files changed, 65 insertions(+), 26 deletions(-) diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index 3973b724ef..d958b6d8f9 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -33,6 +33,7 @@ async-lock = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } hotshot-task = { path = "../task" } +vbs = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index a2d516355e..0412f92e4d 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -19,6 +19,7 @@ use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; use snafu::Snafu; use time::OffsetDateTime; +use vbs::version::Version; use crate::{node_types::TestTypes, state_types::TestInstanceState}; @@ -245,6 +246,7 @@ impl> Block _metadata: ::Metadata, _builder_fee: BuilderFee, _vid_common: VidCommon, + _version: Version, ) -> Result { let parent = parent_leaf.block_header(); diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 43d097e825..f02a48c046 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -13,6 +13,7 @@ use hotshot_types::{ vid::VidCommon, }; use serde::{Deserialize, Serialize}; +use vbs::version::Version; use crate::block_types::{TestBlockPayload, TestTransaction}; pub use crate::node_types::TestTypes; @@ -75,6 +76,7 @@ impl ValidatedState for TestValidatedState { _parent_leaf: &Leaf, _proposed_header: &TYPES::BlockHeader, _vid_common: VidCommon, + _version: Version, ) -> Result<(Self, Self::Delta), Self::Error> { Ok(( TestValidatedState { diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index cabecde622..aa2a3efe5c 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -265,6 +265,7 @@ impl> CreateTaskState timeout_task: None, round_start_delay: handle.hotshot.config.round_start_delay, id: handle.hotshot.id, + version: *handle.hotshot.version.read().await, } } } diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index aa8d7729ee..9ea5651fd8 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -37,6 +37,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, warn}; +use vbs::version::Version; #[cfg(not(feature = "dependency-tasks"))] use super::ConsensusTaskState; @@ -169,6 +170,7 @@ pub async fn create_and_send_proposal( proposal_cert: Option>, round_start_delay: u64, instance_state: Arc, + version: Version, ) { let consensus_read = consensus.read().await; let Some(Some(vid_share)) = consensus_read @@ -188,6 +190,7 @@ pub async fn create_and_send_proposal( commitment_and_metadata.metadata, commitment_and_metadata.fee, vid_share.data.common.clone(), + version, ) .await { @@ -384,6 +387,7 @@ pub(crate) async fn publish_proposal_from_upgrade_cert( upgrade_cert: UpgradeCertificate, delay: u64, instance_state: Arc, + version: Version, ) -> Result> { let (parent_leaf, state) = parent_leaf_and_state( cur_view, @@ -430,6 +434,7 @@ pub(crate) async fn publish_proposal_from_upgrade_cert( None, delay, instance_state, + version, ) .await; })) @@ -452,6 +457,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( commitment_and_metadata: Option>, proposal_cert: Option>, instance_state: Arc, + version: Version, ) -> Result> { let (parent_leaf, state) = parent_leaf_and_state( cur_view, @@ -511,6 +517,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( proposal_certificate, delay, instance_state, + version, ) .await; }); @@ -535,6 +542,7 @@ pub async fn publish_proposal_if_able( commitment_and_metadata: Option>, proposal_cert: Option>, instance_state: Arc, + version: Version, ) -> Result> { if let Some(upgrade_cert) = decided_upgrade_cert { publish_proposal_from_upgrade_cert( @@ -548,6 +556,7 @@ pub async fn publish_proposal_if_able( upgrade_cert, delay, instance_state, + version, ) .await } else { @@ -565,6 +574,7 @@ pub async fn publish_proposal_if_able( commitment_and_metadata, proposal_cert, instance_state, + version, ) .await } @@ -588,6 +598,7 @@ pub async fn handle_quorum_proposal_recv>>, task_state: &mut TemporaryProposalRecvCombinedType, + version: Version, ) -> Result>> { let sender = sender.clone(); debug!( @@ -746,6 +757,7 @@ pub async fn handle_quorum_proposal_recv, instance_state: Arc, vote_info: TemporaryVoteInfo, + version: Version, ) -> bool { #[cfg(not(feature = "dependency-tasks"))] use hotshot_types::simple_vote::QuorumVote; @@ -1130,6 +1143,7 @@ pub async fn update_state_and_vote_if_able> ConsensusTaskState self.payload_commitment_and_metadata.clone(), self.proposal_cert.clone(), Arc::clone(&self.instance_state), + *self.version.read().await, ) .await?; @@ -240,7 +241,7 @@ impl> ConsensusTaskState /// Spawn a vote task for the given view. Will try to vote /// and emit a `QuorumVoteSend` event we should vote on the current proposal #[cfg(not(feature = "dependency-tasks"))] - fn spawn_vote_task( + async fn spawn_vote_task( &mut self, view: TYPES::Time, event_stream: Sender>>, @@ -259,6 +260,7 @@ impl> ConsensusTaskState let quorum_mem = Arc::clone(&self.quorum_membership); let da_mem = Arc::clone(&self.da_membership); let instance_state = Arc::clone(&self.instance_state); + let version = *self.version.read().await; let handle = async_spawn(async move { update_state_and_vote_if_able::( view, @@ -269,6 +271,7 @@ impl> ConsensusTaskState quorum_mem, instance_state, (priv_key, upgrade, da_mem, event_stream), + version, ) .await; }); @@ -282,17 +285,24 @@ impl> ConsensusTaskState event: Arc>, event_stream: Sender>>, ) { + let version = *self.version.read().await; match event.as_ref() { #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QuorumProposalRecv(proposal, sender) => { debug!("proposal recv view: {:?}", proposal.data.view_number()); - match handle_quorum_proposal_recv(proposal, sender, event_stream.clone(), self) - .await + match handle_quorum_proposal_recv( + proposal, + sender, + event_stream.clone(), + self, + version, + ) + .await { Ok(Some(current_proposal)) => { let view = current_proposal.view_number(); self.current_proposal = Some(current_proposal); - self.spawn_vote_task(view, event_stream); + self.spawn_vote_task(view, event_stream).await; } Ok(None) => {} Err(e) => debug!("Failed to propose {e:#}"), @@ -452,7 +462,7 @@ impl> ConsensusTaskState if proposal.view_number() != view { return; } - self.spawn_vote_task(view, event_stream); + self.spawn_vote_task(view, event_stream).await; } #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::VIDShareRecv(disperse) => { @@ -492,7 +502,7 @@ impl> ConsensusTaskState if proposal.view_number() != view { return; } - self.spawn_vote_task(view, event_stream.clone()); + self.spawn_vote_task(view, event_stream.clone()).await; } HotShotEvent::ViewChange(new_view) => { let new_view = *new_view; diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index fc6a4153f6..3a21f60ff5 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -25,6 +25,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, instrument, warn}; +use vbs::version::Version; #[cfg(feature = "dependency-tasks")] use crate::consensus::helpers::handle_quorum_proposal_validated; @@ -98,6 +99,9 @@ struct ProposalDependencyHandle { /// The node's id #[allow(dead_code)] id: u64, + + /// Current version of consensus + version: Version, } impl HandleDepOutput for ProposalDependencyHandle { @@ -188,6 +192,7 @@ impl HandleDepOutput for ProposalDependencyHandle { commit_and_metadata, None, Arc::clone(&self.instance_state), + self.version, ) .await { @@ -247,6 +252,9 @@ pub struct QuorumProposalTaskState // pub decided_upgrade_cert: Option>, /// The node's id pub id: u64, + + /// Current version of consensus + pub version: Version, } impl> QuorumProposalTaskState { @@ -469,6 +477,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState>>, ) { match event.as_ref() { + HotShotEvent::VersionUpgrade(version) => { + self.version = *version; + } HotShotEvent::ProposeNow(view, _) => { self.create_dependency_task_if_new( *view, diff --git a/testing/tests/tests_1/gen_key_pair.rs b/testing/tests/tests_1/gen_key_pair.rs index eadb20575d..2a7637b33c 100644 --- a/testing/tests/tests_1/gen_key_pair.rs +++ b/testing/tests/tests_1/gen_key_pair.rs @@ -3,12 +3,11 @@ #[cfg(test)] mod tests { use core::panic; + use std::{env, fs::File, io::prelude::*}; + use hotshot::types::{BLSPubKey, SignatureKey}; use hotshot_orchestrator::config::ValidatorConfigFile; use hotshot_types::ValidatorConfig; - use std::env; - use std::fs::File; - use std::io::prelude::*; #[test] fn gen_key_pair_gen_from_config_file() { let config_file = ValidatorConfigFile::from_file("config/ValidatorConfigFile.toml"); diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index b531218b0d..83cbc00bb3 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -2,9 +2,7 @@ use std::marker::PhantomData; use committable::Committable; - use hotshot_example_types::node_types::TestTypes; - use hotshot_types::{ message::{GeneralConsensusMessage, Message, MessageKind, SequencingMessage}, signature_key::BLSPubKey, diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index a3737681b1..de880fe69a 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -10,18 +10,15 @@ use hotshot_task_impls::{events::HotShotEvent::*, quorum_proposal::QuorumProposa use hotshot_testing::{ predicates::event::quorum_proposal_send, script::{run_test_script, TestScriptStage}, - task_helpers::{build_cert, key_pair_for_id}, - task_helpers::{build_system_handle, vid_scheme_from_view_number}, + task_helpers::{build_cert, build_system_handle, key_pair_for_id, vid_scheme_from_view_number}, view_generator::TestViewGenerator, }; use hotshot_types::{ consensus::{CommitmentAndMetadata, ProposalDependencyData}, - data::null_block, - data::{VidDisperseShare, ViewChangeEvidence, ViewNumber}, + data::{null_block, VidDisperseShare, ViewChangeEvidence, ViewNumber}, message::Proposal, simple_certificate::{TimeoutCertificate, ViewSyncFinalizeCertificate2}, - simple_vote::ViewSyncFinalizeData, - simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeVote}, + simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData, ViewSyncFinalizeVote}, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeType}, diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index 34141310cd..ffa23b76a5 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -1,16 +1,15 @@ -use hotshot::tasks::task_state::CreateTaskState; -use hotshot::types::SystemContextHandle; +use std::collections::HashMap; + +use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; -use std::collections::HashMap; #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_task() { - use hotshot_task_impls::harness::run_harness; - use hotshot_task_impls::view_sync::ViewSyncTaskState; + use hotshot_task_impls::{harness::run_harness, view_sync::ViewSyncTaskState}; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::simple_vote::ViewSyncPreCommitData; diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index 27383cfdb1..a79cc99920 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -1,5 +1,5 @@ use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl }, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, state_types::TestTypes, }; use hotshot_macros::cross_tests; diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 690269ad11..43a4fe51b5 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -1,5 +1,5 @@ use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl }, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, state_types::TestTypes, }; use hotshot_macros::cross_tests; diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 250852defc..189342f4f2 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -6,7 +6,7 @@ async fn test_timeout() { use std::time::Duration; - use hotshot_example_types::node_types::{TestTypes, MemoryImpl}; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 4f83e74ee8..3d3f2241a5 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -14,6 +14,7 @@ use std::{ use committable::{Commitment, Committable}; use jf_vid::{precomputable::Precomputable, VidScheme}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use vbs::version::Version; use super::signature_key::BuilderSignatureKey; use crate::{ @@ -188,6 +189,7 @@ pub trait BlockHeader: metadata: ::Metadata, builder_fee: BuilderFee, vid_common: VidCommon, + version: Version, ) -> impl Future> + Send; /// Build the genesis header, payload, and metadata. diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 07c38bead6..624dbdf248 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -7,6 +7,7 @@ use std::{error::Error, fmt::Debug, future::Future, hash::Hash}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use vbs::version::Version; use super::block_contents::TestableBlock; use crate::{ @@ -61,6 +62,7 @@ pub trait ValidatedState: parent_leaf: &Leaf, proposed_header: &TYPES::BlockHeader, vid_common: VidCommon, + version: Version, ) -> impl Future> + Send; /// Construct the state with the given block header. From 6859bec41cec32028b14e2d982899a1a6c46e542 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 17 May 2024 11:19:31 -0400 Subject: [PATCH 1038/1393] Don't lock storage (#3184) * Don't lock storage * don't lock header::new * one more place * logging * revert change --- task-impls/src/consensus/helpers.rs | 39 +++++++++++++++-------------- task-impls/src/da.rs | 6 ++++- 2 files changed, 25 insertions(+), 20 deletions(-) diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 9ea5651fd8..57795b4feb 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -8,7 +8,7 @@ use std::{ use anyhow::{bail, ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use async_lock::{RwLock, RwLockUpgradableReadGuard}; +use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use chrono::Utc; @@ -176,11 +176,12 @@ pub async fn create_and_send_proposal( let Some(Some(vid_share)) = consensus_read .vid_shares() .get(&view) - .map(|shares| shares.get(&public_key)) + .map(|shares| shares.get(&public_key).cloned()) else { error!("Cannot propopse without our VID share, view {:?}", view); return; }; + drop(consensus_read); let block_header = match TYPES::BlockHeader::new( state.as_ref(), instance_state.as_ref(), @@ -189,7 +190,7 @@ pub async fn create_and_send_proposal( commitment_and_metadata.builder_commitment, commitment_and_metadata.metadata, commitment_and_metadata.fee, - vid_share.data.common.clone(), + vid_share.data.common, version, ) .await @@ -200,7 +201,6 @@ pub async fn create_and_send_proposal( return; } }; - drop(consensus_read); let proposal = QuorumProposal { block_header, @@ -640,7 +640,7 @@ pub async fn handle_quorum_proposal_recv consensus_write.locked_view(); + let consensus_read = task_state.consensus.read().await; + let liveness_check = justify_qc.view_number() > consensus_read.locked_view(); - let high_qc = consensus_write.high_qc().clone(); - let locked_view = consensus_write.locked_view(); + let high_qc = consensus_read.high_qc().clone(); + let locked_view = consensus_read.locked_view(); - drop(consensus_write); + drop(consensus_read); let mut current_proposal = None; if liveness_check { @@ -1207,19 +1209,18 @@ pub async fn update_state_and_vote_if_able, A: ConsensusApi + // the `DaProposalRecv` event. Otherwise, the view number subtraction below will // cause an overflow error. // TODO ED Come back to this - we probably don't need this, but we should also never receive a DAC where this fails, investigate block ready so it doesn't make one for the genesis block - if self.cur_view != TYPES::Time::genesis() && view < self.cur_view - 1 { warn!("Throwing away DA proposal that is more than one view older"); return None; @@ -145,6 +144,11 @@ impl, A: ConsensusApi + .await; } HotShotEvent::DaProposalValidated(proposal, sender) => { + let curr_view = self.consensus.read().await.cur_view(); + if curr_view > proposal.data.view_number() + 1 { + tracing::debug!("Validated DA proposal for prior view but it's too old now Current view {:?}, DA Proposal view {:?}", curr_view, proposal.data.view_number()); + return None; + } // Proposal is fresh and valid, notify the application layer self.api .send_event(Event { From cc0cbedcdc073d4645004e5e5647b598b1a7d65e Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Sat, 18 May 2024 00:16:16 +0800 Subject: [PATCH 1039/1393] [CLEANUP] - Capitalize acronyms (#3155) * Remove get_ * Rename acronyms * Make output_event_stream private and revert changes to orchestrator * Fix conflict * Revert changes to orchestrator * Revert a name change * Rename Dac --- examples/combined/types.rs | 2 +- examples/infra/mod.rs | 2 +- examples/push-cdn/types.rs | 2 +- hotshot-qc/src/bit_vector.rs | 60 +++++++++---------- hotshot-qc/src/bit_vector_old.rs | 58 +++++++++--------- hotshot-qc/src/snarked/circuit.rs | 4 +- hotshot/src/lib.rs | 4 +- hotshot/src/tasks/mod.rs | 4 +- hotshot/src/tasks/task_state.rs | 8 +-- .../src/traits/networking/push_cdn_network.rs | 6 +- task-impls/HotShot_event_architecture.drawio | 2 +- task-impls/src/consensus/mod.rs | 8 +-- task-impls/src/events.rs | 6 +- task-impls/src/network.rs | 6 +- task-impls/src/quorum_proposal.rs | 18 +++--- task-impls/src/quorum_vote.rs | 12 ++-- task-impls/src/request.rs | 8 +-- task-impls/src/response.rs | 2 +- task-impls/src/vid.rs | 6 +- task-impls/src/vote_collection.rs | 4 +- testing/src/task_helpers.rs | 4 +- testing/tests/tests_1/consensus_task.rs | 22 +++---- testing/tests/tests_1/proposal_ordering.rs | 8 +-- testing/tests/tests_1/quorum_proposal_task.rs | 6 +- testing/tests/tests_1/quorum_vote_task.rs | 12 ++-- testing/tests/tests_1/upgrade_task.rs | 32 +++++----- testing/tests/tests_1/vid_task.rs | 6 +- types/src/data.rs | 2 +- types/src/event.rs | 4 +- types/src/message.rs | 4 +- types/src/qc.rs | 58 +++++++++--------- types/src/signature_key.rs | 28 ++++----- types/src/simple_certificate.rs | 4 +- types/src/simple_vote.rs | 4 +- types/src/traits/network.rs | 2 +- types/src/traits/qc.rs | 18 +++--- types/src/traits/signature_key.rs | 14 ++--- types/src/vote.rs | 4 +- 38 files changed, 227 insertions(+), 227 deletions(-) diff --git a/examples/combined/types.rs b/examples/combined/types.rs index 568629b8d1..90b5bff2d8 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -14,7 +14,7 @@ pub struct NodeImpl {} /// convenience type alias pub type DaNetwork = CombinedNetworks; /// convenience type alias -pub type VIDNetwork = CombinedNetworks; +pub type VidNetwork = CombinedNetworks; /// convenience type alias pub type QuorumNetwork = CombinedNetworks; /// convenience type alias diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 8cad0b0d4d..559c64bbce 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -628,7 +628,7 @@ where // See if we should be DA, subscribe to the DA topic if so let mut topics = vec![Topic::Global]; if config.config.my_own_validator_config.is_da { - topics.push(Topic::DA); + topics.push(Topic::Da); } // Create the network and await the initial connection diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs index 09bd0ef8c2..7930207365 100644 --- a/examples/push-cdn/types.rs +++ b/examples/push-cdn/types.rs @@ -11,7 +11,7 @@ pub struct NodeImpl {} /// Convenience type alias pub type DaNetwork = PushCdnNetwork; /// Convenience type alias -pub type VIDNetwork = PushCdnNetwork; +pub type VidNetwork = PushCdnNetwork; /// Convenience type alias pub type QuorumNetwork = PushCdnNetwork; /// Convenience type alias diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs index fff3faa8d2..2dfb10e2f4 100644 --- a/hotshot-qc/src/bit_vector.rs +++ b/hotshot-qc/src/bit_vector.rs @@ -1,4 +1,4 @@ -//! Implementation for `BitVectorQC` that uses BLS signature + Bit vector. +//! Implementation for `BitVectorQc` that uses BLS signature + Bit vector. //! See more details in `HotShot` paper. use ark_std::{ @@ -21,14 +21,14 @@ use serde::{Deserialize, Serialize}; use typenum::U32; /// An implementation of QC using BLS signature and a bit-vector. -pub struct BitVectorQC( +pub struct BitVectorQc( PhantomData, PhantomData, ); -/// Public parameters of [`BitVectorQC`] +/// Public parameters of [`BitVectorQc`] #[derive(Serialize, Deserialize, PartialEq, Debug)] -pub struct QCParams { +pub struct QcParams { /// the stake table (snapshot) this QC is verified against pub stake_table: ST, /// threshold for the accumulated "weight" of votes to form a QC @@ -37,7 +37,7 @@ pub struct QCParams { pub agg_sig_pp: A::PublicParameter, } -impl QuorumCertificate for BitVectorQC +impl QuorumCertificate for BitVectorQc where A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a> + PartialEq, ST: StakeTableScheme @@ -45,12 +45,12 @@ where + for<'a> Deserialize<'a> + PartialEq, { - type QCProverParams = QCParams; + type QcProverParams = QcParams; // TODO: later with SNARKs we'll use a smaller verifier parameter - type QCVerifierParams = QCParams; + type QcVerifierParams = QcParams; - type QC = (A::Signature, BitVec); + type Qc = (A::Signature, BitVec); type MessageLength = U32; type QuorumSize = U256; @@ -65,10 +65,10 @@ where } fn assemble( - qc_pp: &Self::QCProverParams, + qc_pp: &Self::QcProverParams, signers: &BitSlice, sigs: &[A::Signature], - ) -> Result { + ) -> Result { let st_len = qc_pp.stake_table.len(SnapshotVersion::LastEpochStart)?; if signers.len() != st_len { return Err(ParameterError(format!( @@ -120,9 +120,9 @@ where } fn check( - qc_vp: &Self::QCVerifierParams, + qc_vp: &Self::QcVerifierParams, message: &GenericArray, - qc: &Self::QC, + qc: &Self::Qc, ) -> Result { let (sig, signers) = qc; let st_len = qc_vp.stake_table.len(SnapshotVersion::LastEpochStart)?; @@ -169,9 +169,9 @@ where } fn trace( - qc_vp: &Self::QCVerifierParams, + qc_vp: &Self::QcVerifierParams, message: &GenericArray<::MessageUnit, Self::MessageLength>, - qc: &Self::QC, + qc: &Self::Qc, ) -> Result::VerificationKey>, PrimitivesError> { let (_sig, signers) = qc; let st_len = qc_vp.stake_table.len(SnapshotVersion::LastEpochStart)?; @@ -227,28 +227,28 @@ mod tests { st.advance(); st.advance(); - let qc_pp = QCParams { + let qc_pp = QcParams { stake_table: st, threshold: U256::from(10u8), agg_sig_pp, }; let msg = [72u8; 32]; - let sig1 = BitVectorQC::<$aggsig, ST>::sign( + let sig1 = BitVectorQc::<$aggsig, ST>::sign( &agg_sig_pp, &msg.into(), key_pair1.sign_key_ref(), &mut rng, ) .unwrap(); - let sig2 = BitVectorQC::<$aggsig, ST>::sign( + let sig2 = BitVectorQc::<$aggsig, ST>::sign( &agg_sig_pp, &msg.into(), key_pair2.sign_key_ref(), &mut rng, ) .unwrap(); - let sig3 = BitVectorQC::<$aggsig, ST>::sign( + let sig3 = BitVectorQc::<$aggsig, ST>::sign( &agg_sig_pp, &msg.into(), key_pair3.sign_key_ref(), @@ -258,19 +258,19 @@ mod tests { // happy path let signers = bitvec![0, 1, 1]; - let qc = BitVectorQC::<$aggsig, ST>::assemble( + let qc = BitVectorQc::<$aggsig, ST>::assemble( &qc_pp, signers.as_bitslice(), &[sig2.clone(), sig3.clone()], ) .unwrap(); - assert!(BitVectorQC::<$aggsig, ST>::check(&qc_pp, &msg.into(), &qc).is_ok()); + assert!(BitVectorQc::<$aggsig, ST>::check(&qc_pp, &msg.into(), &qc).is_ok()); assert_eq!( - BitVectorQC::<$aggsig, ST>::trace(&qc_pp, &msg.into(), &qc).unwrap(), + BitVectorQc::<$aggsig, ST>::trace(&qc_pp, &msg.into(), &qc).unwrap(), vec![key_pair2.ver_key(), key_pair3.ver_key()], ); - // Check the QC and the QCParams can be serialized / deserialized + // Check the QC and the QcParams can be serialized / deserialized assert_eq!( qc, Serializer::::deserialize( @@ -282,7 +282,7 @@ mod tests { // (alex) since deserialized stake table's leaf would contain normalized projective // points with Z=1, which differs from the original projective representation. // We compare individual fields for equivalence instead. - let de_qc_pp: QCParams<$aggsig, ST> = Serializer::::deserialize( + let de_qc_pp: QcParams<$aggsig, ST> = Serializer::::deserialize( &Serializer::::serialize(&qc_pp).unwrap(), ) .unwrap(); @@ -308,7 +308,7 @@ mod tests { // bad paths // number of signatures unmatch - assert!(BitVectorQC::<$aggsig, ST>::assemble( + assert!(BitVectorQc::<$aggsig, ST>::assemble( &qc_pp, signers.as_bitslice(), &[sig2.clone()] @@ -316,7 +316,7 @@ mod tests { .is_err()); // total weight under threshold let active_bad = bitvec![1, 1, 0]; - assert!(BitVectorQC::<$aggsig, ST>::assemble( + assert!(BitVectorQc::<$aggsig, ST>::assemble( &qc_pp, active_bad.as_bitslice(), &[sig1.clone(), sig2.clone()] @@ -324,30 +324,30 @@ mod tests { .is_err()); // wrong bool vector length let active_bad_2 = bitvec![0, 1, 1, 0]; - assert!(BitVectorQC::<$aggsig, ST>::assemble( + assert!(BitVectorQc::<$aggsig, ST>::assemble( &qc_pp, active_bad_2.as_bitslice(), &[sig2, sig3], ) .is_err()); - assert!(BitVectorQC::<$aggsig, ST>::check( + assert!(BitVectorQc::<$aggsig, ST>::check( &qc_pp, &msg.into(), &(qc.0.clone(), active_bad) ) .is_err()); - assert!(BitVectorQC::<$aggsig, ST>::check( + assert!(BitVectorQc::<$aggsig, ST>::check( &qc_pp, &msg.into(), &(qc.0.clone(), active_bad_2) ) .is_err()); let bad_msg = [70u8; 32]; - assert!(BitVectorQC::<$aggsig, ST>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); + assert!(BitVectorQc::<$aggsig, ST>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); let bad_sig = &sig1; - assert!(BitVectorQC::<$aggsig, ST>::check( + assert!(BitVectorQc::<$aggsig, ST>::check( &qc_pp, &msg.into(), &(bad_sig.clone(), qc.1) diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs index 328d047042..938f1e5ca7 100644 --- a/hotshot-qc/src/bit_vector_old.rs +++ b/hotshot-qc/src/bit_vector_old.rs @@ -1,4 +1,4 @@ -//! Implementation for `BitVectorQC` that uses BLS signature + Bit vector. +//! Implementation for `BitVectorQc` that uses BLS signature + Bit vector. //! See more details in `HotShot` paper. use ark_std::{ @@ -19,7 +19,7 @@ use typenum::U32; /// An implementation of QC using BLS signature and a bit-vector. #[derive(Serialize, Deserialize)] -pub struct BitVectorQC Deserialize<'a>>( +pub struct BitVectorQc Deserialize<'a>>( PhantomData, ); @@ -38,9 +38,9 @@ impl StakeTableEntryType for StakeTableEntry { } } -/// Public parameters of [`BitVectorQC`] +/// Public parameters of [`BitVectorQc`] #[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash)] -pub struct QCParams { +pub struct QcParams { /// the stake table (snapshot) this QC is verified against pub stake_entries: Vec>, /// threshold for the accumulated "weight" of votes to form a QC @@ -49,16 +49,16 @@ pub struct QCParams { pub agg_sig_pp: P, } -impl QuorumCertificate for BitVectorQC +impl QuorumCertificate for BitVectorQc where A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a>, { - type QCProverParams = QCParams; + type QcProverParams = QcParams; // TODO: later with SNARKs we'll use a smaller verifier parameter - type QCVerifierParams = QCParams; + type QcVerifierParams = QcParams; - type QC = (A::Signature, BitVec); + type Qc = (A::Signature, BitVec); type MessageLength = U32; type QuorumSize = U256; @@ -72,10 +72,10 @@ where } fn assemble( - qc_pp: &Self::QCProverParams, + qc_pp: &Self::QcProverParams, signers: &BitSlice, sigs: &[A::Signature], - ) -> Result { + ) -> Result { if signers.len() != qc_pp.stake_entries.len() { return Err(ParameterError(format!( "bit vector len {} != the number of stake entries {}", @@ -120,9 +120,9 @@ where } fn check( - qc_vp: &Self::QCVerifierParams, + qc_vp: &Self::QcVerifierParams, message: &GenericArray, - qc: &Self::QC, + qc: &Self::Qc, ) -> Result { let (sig, signers) = qc; if signers.len() != qc_vp.stake_entries.len() { @@ -162,9 +162,9 @@ where } fn trace( - qc_vp: &Self::QCVerifierParams, + qc_vp: &Self::QcVerifierParams, message: &GenericArray<::MessageUnit, Self::MessageLength>, - qc: &Self::QC, + qc: &Self::Qc, ) -> Result::VerificationKey>, PrimitivesError> { let (_sig, signers) = qc; if signers.len() != qc_vp.stake_entries.len() { @@ -216,27 +216,27 @@ mod tests { stake_key: key_pair3.ver_key(), stake_amount: U256::from(7u8), }; - let qc_pp = QCParams { + let qc_pp = QcParams { stake_entries: vec![entry1, entry2, entry3], threshold: U256::from(10u8), agg_sig_pp, }; let msg = [72u8; 32]; - let sig1 = BitVectorQC::<$aggsig>::sign( + let sig1 = BitVectorQc::<$aggsig>::sign( &agg_sig_pp, &msg.into(), key_pair1.sign_key_ref(), &mut rng, ) .unwrap(); - let sig2 = BitVectorQC::<$aggsig>::sign( + let sig2 = BitVectorQc::<$aggsig>::sign( &agg_sig_pp, &msg.into(), key_pair2.sign_key_ref(), &mut rng, ) .unwrap(); - let sig3 = BitVectorQC::<$aggsig>::sign( + let sig3 = BitVectorQc::<$aggsig>::sign( &agg_sig_pp, &msg.into(), key_pair3.sign_key_ref(), @@ -246,19 +246,19 @@ mod tests { // happy path let signers = bitvec![0, 1, 1]; - let qc = BitVectorQC::<$aggsig>::assemble( + let qc = BitVectorQc::<$aggsig>::assemble( &qc_pp, signers.as_bitslice(), &[sig2.clone(), sig3.clone()], ) .unwrap(); - assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &qc).is_ok()); + assert!(BitVectorQc::<$aggsig>::check(&qc_pp, &msg.into(), &qc).is_ok()); assert_eq!( - BitVectorQC::<$aggsig>::trace(&qc_pp, &msg.into(), &qc).unwrap(), + BitVectorQc::<$aggsig>::trace(&qc_pp, &msg.into(), &qc).unwrap(), vec![key_pair2.ver_key(), key_pair3.ver_key()], ); - // Check the QC and the QCParams can be serialized / deserialized + // Check the QC and the QcParams can be serialized / deserialized assert_eq!( qc, Serializer::::deserialize( @@ -277,7 +277,7 @@ mod tests { // bad paths // number of signatures unmatch - assert!(BitVectorQC::<$aggsig>::assemble( + assert!(BitVectorQc::<$aggsig>::assemble( &qc_pp, signers.as_bitslice(), &[sig2.clone()] @@ -285,7 +285,7 @@ mod tests { .is_err()); // total weight under threshold let active_bad = bitvec![1, 1, 0]; - assert!(BitVectorQC::<$aggsig>::assemble( + assert!(BitVectorQc::<$aggsig>::assemble( &qc_pp, active_bad.as_bitslice(), &[sig1.clone(), sig2.clone()] @@ -293,31 +293,31 @@ mod tests { .is_err()); // wrong bool vector length let active_bad_2 = bitvec![0, 1, 1, 0]; - assert!(BitVectorQC::<$aggsig>::assemble( + assert!(BitVectorQc::<$aggsig>::assemble( &qc_pp, active_bad_2.as_bitslice(), &[sig2, sig3], ) .is_err()); - assert!(BitVectorQC::<$aggsig>::check( + assert!(BitVectorQc::<$aggsig>::check( &qc_pp, &msg.into(), &(qc.0.clone(), active_bad) ) .is_err()); - assert!(BitVectorQC::<$aggsig>::check( + assert!(BitVectorQc::<$aggsig>::check( &qc_pp, &msg.into(), &(qc.0.clone(), active_bad_2) ) .is_err()); let bad_msg = [70u8; 32]; - assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); + assert!(BitVectorQc::<$aggsig>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); let bad_sig = &sig1; assert!( - BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &(bad_sig.clone(), qc.1)) + BitVectorQc::<$aggsig>::check(&qc_pp, &msg.into(), &(bad_sig.clone(), qc.1)) .is_err() ); }; diff --git a/hotshot-qc/src/snarked/circuit.rs b/hotshot-qc/src/snarked/circuit.rs index 6b299ac3ff..d278896c25 100644 --- a/hotshot-qc/src/snarked/circuit.rs +++ b/hotshot-qc/src/snarked/circuit.rs @@ -84,7 +84,7 @@ pub trait VerKeyVar: Sized + Clone { } /// Plonk circuit gadget for stake key aggregation for quorum certificates. -pub trait QCKeyAggregateGadget +pub trait QcKeyAggregateGadget where F: RescueParameter, { @@ -124,7 +124,7 @@ where ) -> Result<(), CircuitError>; } -impl QCKeyAggregateGadget for PlonkCircuit +impl QcKeyAggregateGadget for PlonkCircuit where F: RescueParameter, { diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 1592c8b996..d9be0ac954 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -301,7 +301,7 @@ impl> SystemContext { Ok(inner) } - /// "Starts" consensus by sending a `QCFormed` event + /// "Starts" consensus by sending a `QcFormed` event /// /// # Panics /// Panics if sending genesis fails @@ -315,7 +315,7 @@ impl> SystemContext { .expect("Genesis Broadcast failed"); self.internal_event_stream .0 - .broadcast_direct(Arc::new(HotShotEvent::QCFormed(either::Left( + .broadcast_direct(Arc::new(HotShotEvent::QcFormed(either::Left( consensus.high_qc().clone(), )))) .await diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 8273432108..ea235721aa 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -21,7 +21,7 @@ use hotshot_task_impls::{ response::{run_response_task, NetworkResponseState, RequestReceiver}, transactions::TransactionTaskState, upgrade::UpgradeTaskState, - vid::VIDTaskState, + vid::VidTaskState, view_sync::ViewSyncTaskState, }; use hotshot_types::{ @@ -160,7 +160,7 @@ pub async fn add_vid_task>( rx: Receiver>>, handle: &SystemContextHandle, ) { - let vid_state = VIDTaskState::create_from(handle).await; + let vid_state = VidTaskState::create_from(handle).await; let task = Task::new(tx, rx, Arc::clone(&task_reg), vid_state); task_reg.run_task(task).await; } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index aa2a3efe5c..ecce149d07 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -9,7 +9,7 @@ use hotshot_task_impls::{ builder::BuilderClient, consensus::ConsensusTaskState, da::DaTaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, - transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VIDTaskState, + transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, view_sync::ViewSyncTaskState, }; use hotshot_types::traits::{ @@ -80,12 +80,12 @@ impl> CreateTaskState #[async_trait] impl> CreateTaskState - for VIDTaskState> + for VidTaskState> { async fn create_from( handle: &SystemContextHandle, - ) -> VIDTaskState> { - VIDTaskState { + ) -> VidTaskState> { + VidTaskState { api: handle.clone(), consensus: handle.hotshot.consensus(), cur_view: handle.cur_view().await, diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index e7b61dafba..0c424c83b9 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -163,7 +163,7 @@ pub enum Topic { /// The global topic Global = 0, /// The DA topic - DA = 1, + Da = 1, } /// Implement the `TopicTrait` for our `Topic` enum. We need this to filter @@ -373,7 +373,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork // Calculate if we're DA or not let topics = if node_id < da_committee_size as u64 { - vec![Topic::DA as u8, Topic::Global as u8] + vec![Topic::Da as u8, Topic::Global as u8] } else { vec![Topic::Global as u8] }; @@ -464,7 +464,7 @@ impl ConnectedNetwork, TYPES::SignatureKey> _recipients: BTreeSet, bind_version: Ver, ) -> Result<(), NetworkError> { - self.broadcast_message(message, Topic::DA, bind_version) + self.broadcast_message(message, Topic::Da, bind_version) .await } diff --git a/task-impls/HotShot_event_architecture.drawio b/task-impls/HotShot_event_architecture.drawio index 970a8db0c9..c657ef0315 100644 --- a/task-impls/HotShot_event_architecture.drawio +++ b/task-impls/HotShot_event_architecture.drawio @@ -97,7 +97,7 @@ - + diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 0a1edce1a4..e6e4fcc0bf 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -397,7 +397,7 @@ impl> ConsensusTaskState } } #[cfg(not(feature = "dependency-tasks"))] - HotShotEvent::QCFormed(cert) => match cert { + HotShotEvent::QcFormed(cert) => match cert { either::Right(qc) => { self.proposal_cert = Some(ViewChangeEvidence::Timeout(qc.clone())); @@ -465,7 +465,7 @@ impl> ConsensusTaskState self.spawn_vote_task(view, event_stream).await; } #[cfg(not(feature = "dependency-tasks"))] - HotShotEvent::VIDShareRecv(disperse) => { + HotShotEvent::VidShareRecv(disperse) => { let view = disperse.data.view_number(); debug!( @@ -748,14 +748,14 @@ impl> TaskState for ConsensusTaskS HotShotEvent::QuorumProposalRecv(_, _) | HotShotEvent::QuorumVoteRecv(_) | HotShotEvent::QuorumProposalValidated(..) - | HotShotEvent::QCFormed(_) + | HotShotEvent::QcFormed(_) | HotShotEvent::UpgradeCertificateFormed(_) | HotShotEvent::DaCertificateRecv(_) | HotShotEvent::ViewChange(_) | HotShotEvent::SendPayloadCommitmentAndMetadata(..) | HotShotEvent::Timeout(_) | HotShotEvent::TimeoutVoteRecv(_) - | HotShotEvent::VIDShareRecv(..) + | HotShotEvent::VidShareRecv(..) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) | HotShotEvent::QuorumVoteSend(_) | HotShotEvent::QuorumProposalSend(_, _) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index f0fa8cc8be..29629d6036 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -63,7 +63,7 @@ pub enum HotShotEvent { /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal DaVoteSend(DaVote), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only - QCFormed(Either, TimeoutCertificate>), + QcFormed(Either, TimeoutCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DacSend(DaCertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks @@ -134,9 +134,9 @@ pub enum HotShotEvent { /// Vid disperse share has been received from the network; handled by the consensus task /// /// Like [`HotShotEvent::DaProposalRecv`]. - VIDShareRecv(Proposal>), + VidShareRecv(Proposal>), /// VID share data is validated. - VIDShareValidated(Proposal>), + VidShareValidated(Proposal>), /// Upgrade proposal has been received from the network UpgradeProposalRecv(Proposal>, TYPES::SignatureKey), /// Upgrade proposal has been sent to the network diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 3d88afec1d..59a47ea19f 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -162,7 +162,7 @@ impl NetworkMessageTaskState { HotShotEvent::DaCertificateRecv(cert) } DaConsensusMessage::VidDisperseMsg(proposal) => { - HotShotEvent::VIDShareRecv(proposal) + HotShotEvent::VidShareRecv(proposal) } }, }; @@ -294,7 +294,7 @@ impl< return self.handle_vid_disperse_proposal(proposal, &sender); } HotShotEvent::DaProposalSend(proposal, sender) => { - maybe_action = Some(HotShotAction::DAPropose); + maybe_action = Some(HotShotAction::DaPropose); ( sender, MessageKind::::from_consensus_message(SequencingMessage::Da( @@ -315,7 +315,7 @@ impl< } // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee HotShotEvent::DacSend(certificate, sender) => { - maybe_action = Some(HotShotAction::DACert); + maybe_action = Some(HotShotAction::DaCert); ( sender, MessageKind::::from_consensus_message(SequencingMessage::Da( diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal.rs index 3a21f60ff5..115a659298 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal.rs @@ -39,13 +39,13 @@ enum ProposalDependency { /// For the `SendPayloadCommitmentAndMetadata` event. PayloadAndMetadata, - /// For the `QCFormed` event. + /// For the `QcFormed` event. QC, /// For the `ViewSyncFinalizeCertificate2Recv` event. ViewSyncCert, - /// For the `QCFormed` event timeout branch. + /// For the `QcFormed` event timeout branch. TimeoutCert, /// For the `QuroumProposalValidated` event after validating `QuorumProposalRecv`. @@ -141,7 +141,7 @@ impl HandleDepOutput for ProposalDependencyHandle { block_view: *view, }); } - HotShotEvent::QCFormed(cert) => match cert { + HotShotEvent::QcFormed(cert) => match cert { either::Right(timeout) => { _timeout_certificate = Some(timeout.clone()); } @@ -272,14 +272,14 @@ impl> QuorumProposalTaskState { - if let HotShotEvent::QCFormed(either::Left(qc)) = event { + if let HotShotEvent::QcFormed(either::Left(qc)) = event { qc.view_number + 1 } else { return false; } } ProposalDependency::TimeoutCert => { - if let HotShotEvent::QCFormed(either::Right(timeout)) = event { + if let HotShotEvent::QcFormed(either::Right(timeout)) = event { timeout.view_number } else { return false; @@ -386,7 +386,7 @@ impl> QuorumProposalTaskState { proposal_dependency.mark_as_completed(event); } - HotShotEvent::QCFormed(quorum_certificate) => match quorum_certificate { + HotShotEvent::QcFormed(quorum_certificate) => match quorum_certificate { Either::Right(_) => { timeout_dependency.mark_as_completed(event); } @@ -408,7 +408,7 @@ impl> QuorumProposalTaskState 1 { secondary_deps.push(AndDependency::from_deps(vec![ qc_dependency, @@ -528,7 +528,7 @@ impl> QuorumProposalTaskState { + HotShotEvent::QcFormed(cert) => { match cert.clone() { either::Right(timeout_cert) => { let view = timeout_cert.view_number + 1; @@ -637,7 +637,7 @@ impl> TaskState !matches!( event.as_ref(), HotShotEvent::QuorumProposalValidated(..) - | HotShotEvent::QCFormed(_) + | HotShotEvent::QcFormed(_) | HotShotEvent::SendPayloadCommitmentAndMetadata(..) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) | HotShotEvent::ProposeNow(..) diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 403cfd3db3..cc9505bd16 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -47,7 +47,7 @@ enum VoteDependency { QuorumProposal, /// For the `DaCertificateRecv` event. Dac, - /// For the `VIDShareRecv` event. + /// For the `VidShareRecv` event. Vid, /// For the `VoteNow` event. VoteNow, @@ -118,7 +118,7 @@ impl + 'static> HandleDepOutput payload_commitment = Some(cert_payload_comm); } } - HotShotEvent::VIDShareValidated(share) => { + HotShotEvent::VidShareValidated(share) => { let vid_payload_commitment = share.data.payload_commitment; disperse_share = Some(share.clone()); if let Some(comm) = payload_commitment { @@ -275,7 +275,7 @@ impl> QuorumVoteTaskState { - if let HotShotEvent::VIDShareValidated(disperse) = event { + if let HotShotEvent::VidShareValidated(disperse) = event { disperse.data.view_number } else { return false; @@ -443,7 +443,7 @@ impl> QuorumVoteTaskState { + HotShotEvent::VidShareRecv(disperse) => { let view = disperse.data.view_number(); trace!("Received VID share for view {}", *view); if view <= self.latest_voted_view { @@ -498,7 +498,7 @@ impl> QuorumVoteTaskState> TaskState for QuorumVoteTask !matches!( event.as_ref(), HotShotEvent::DaCertificateRecv(_) - | HotShotEvent::VIDShareRecv(..) + | HotShotEvent::VidShareRecv(..) | HotShotEvent::QuorumVoteDependenciesValidated(_) | HotShotEvent::VoteNow(..) | HotShotEvent::QuorumProposalValidated(..) diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 9105b57bf0..ab16dc425e 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -151,7 +151,7 @@ impl, Ver: StaticVersionType + 'st async fn build_requests(&self, view: TYPES::Time, _: Ver) -> Vec> { let mut reqs = Vec::new(); if !self.state.read().await.vid_shares().contains_key(&view) { - reqs.push(RequestKind::VID(view, self.public_key.clone())); + reqs.push(RequestKind::Vid(view, self.public_key.clone())); } // TODO request other things reqs @@ -236,7 +236,7 @@ impl> DelayedRequester { async_sleep(self.delay).await; } match request { - RequestKind::VID(view, key) => { + RequestKind::Vid(view, key) => { self.do_vid::(VidRequest(view, key), signature).await; } RequestKind::DaProposal(..) => {} @@ -300,7 +300,7 @@ impl> DelayedRequester { async fn handle_response_message(&self, message: SequencingMessage) { let event = match message { SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg(prop)) => { - HotShotEvent::VIDShareRecv(prop) + HotShotEvent::VidShareRecv(prop) } _ => return, }; @@ -313,7 +313,7 @@ fn make_vid( req: &VidRequest, signature: Signature, ) -> Message { - let kind = RequestKind::VID(req.0, req.1.clone()); + let kind = RequestKind::Vid(req.0, req.1.clone()); let data_request = DataRequest { view: req.0, request: kind, diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index e5881fe77c..889d2862ac 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -151,7 +151,7 @@ impl NetworkResponseState { /// of the request. async fn handle_request(&self, req: DataRequest) -> Message { match req.request { - RequestKind::VID(view, pub_key) => { + RequestKind::Vid(view, pub_key) => { let Some(share) = self.get_or_calc_vid_share(view, &pub_key).await else { return self.make_msg(ResponseMessage::NotFound); }; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 234336a557..82bb316e9f 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -23,7 +23,7 @@ use crate::{ }; /// Tracks state of a VID task -pub struct VIDTaskState< +pub struct VidTaskState< TYPES: NodeType, I: NodeImplementation, A: ConsensusApi + 'static, @@ -51,7 +51,7 @@ pub struct VIDTaskState< } impl, A: ConsensusApi + 'static> - VIDTaskState + VidTaskState { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] @@ -166,7 +166,7 @@ impl, A: ConsensusApi + /// task state implementation for VID Task impl, A: ConsensusApi + 'static> TaskState - for VIDTaskState + for VidTaskState { type Event = Arc>; diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index da11a98617..62b604cae7 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -258,7 +258,7 @@ impl AggregatableVote, QuorumCertifica certificate: QuorumCertificate, _key: &TYPES::SignatureKey, ) -> HotShotEvent { - HotShotEvent::QCFormed(Left(certificate)) + HotShotEvent::QcFormed(Left(certificate)) } } @@ -300,7 +300,7 @@ impl AggregatableVote, TimeoutCertifi certificate: TimeoutCertificate, _key: &TYPES::SignatureKey, ) -> HotShotEvent { - HotShotEvent::QCFormed(Right(certificate)) + HotShotEvent::QcFormed(Right(certificate)) } } diff --git a/testing/src/task_helpers.rs b/testing/src/task_helpers.rs index e34050f05e..fe573eef16 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/task_helpers.rs @@ -160,9 +160,9 @@ pub fn build_assembled_sig< data: &DATAType, membership: &TYPES::Membership, view: TYPES::Time, -) -> ::QCType { +) -> ::QcType { let stake_table = membership.committee_qc_stake_table(); - let real_qc_pp: ::QCParams = + let real_qc_pp: ::QcParams = ::public_parameter( stake_table.clone(), U256::from(CERT::threshold(membership)), diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 284308b13d..d8c99aae57 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -72,7 +72,7 @@ async fn test_consensus_task() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -88,9 +88,9 @@ async fn test_consensus_task() { // Run view 2 and propose. let view_2 = TestScriptStage { inputs: vec![ - VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(vid_share(&vids[1].0, handle.public_key())), QuorumProposalRecv(proposals[1].clone(), leaders[1]), - QCFormed(either::Left(cert)), + QcFormed(either::Left(cert)), // We must have a payload commitment and metadata to propose. SendPayloadCommitmentAndMetadata( payload_commitment, @@ -155,7 +155,7 @@ async fn test_consensus_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), QuorumVoteRecv(votes[0].clone()), ], outputs: vec![ @@ -204,7 +204,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -216,7 +216,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let inputs = vec![ // We need a VID share for view 2 otherwise we cannot vote at view 2 (as node 2). - VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(vid_share(&vids[1].0, handle.public_key())), DaCertificateRecv(dacs[1].clone()), QuorumProposalRecv(proposals[1].clone(), leaders[1]), ]; @@ -317,7 +317,7 @@ async fn test_view_sync_finalize_propose() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -366,7 +366,7 @@ async fn test_view_sync_finalize_propose() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let view_4 = TestScriptStage { inputs: vec![ - VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(vid_share(&vids[1].0, handle.public_key())), QuorumProposalRecv(proposals[1].clone(), leaders[1]), TimeoutVoteRecv(timeout_vote_view_2), TimeoutVoteRecv(timeout_vote_view_3), @@ -443,7 +443,7 @@ async fn test_view_sync_finalize_vote() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -540,7 +540,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -634,7 +634,7 @@ async fn test_vid_disperse_storage_failure() { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index d22eadc825..920f1cd4b2 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -69,7 +69,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(1))), @@ -86,7 +86,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let inputs = vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - QCFormed(either::Left(cert)), + QcFormed(either::Left(cert)), SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, @@ -101,11 +101,11 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { view_2_inputs.insert(0, DaCertificateRecv(dacs[1].clone())); view_2_inputs.insert( 0, - VIDShareRecv(vid_share(&vids[2].0, handle.public_key())), + VidShareRecv(vid_share(&vids[2].0, handle.public_key())), ); view_2_inputs.insert( 0, - VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(vid_share(&vids[1].0, handle.public_key())), ); // This stage transitions from view 1 to view 2. diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index de880fe69a..a618ea9678 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -93,7 +93,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let view = TestScriptStage { inputs: vec![ - QCFormed(either::Left(cert.clone())), + QcFormed(either::Left(cert.clone())), SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, @@ -181,7 +181,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let view = TestScriptStage { inputs: vec![ QuorumProposalValidated(proposals[1].data.clone(), leaves[1].clone()), - QCFormed(either::Left(cert.clone())), + QcFormed(either::Left(cert.clone())), SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, @@ -249,7 +249,7 @@ async fn test_quorum_proposal_task_qc_timeout() { // Run at view 2, the quorum vote task shouldn't care as long as the bookkeeping is correct let view_2 = TestScriptStage { inputs: vec![ - QCFormed(either::Right(cert.clone())), + QcFormed(either::Right(cert.clone())), SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 43265bd93c..26e210507c 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -42,11 +42,11 @@ async fn test_quorum_vote_task_success() { inputs: vec![ QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), DaCertificateRecv(dacs[1].clone()), - VIDShareRecv(vids[1].0[0].clone()), + VidShareRecv(vids[1].0[0].clone()), ], outputs: vec![ exact(DaCertificateValidated(dacs[1].clone())), - exact(VIDShareValidated(vids[1].0[0].clone())), + exact(VidShareValidated(vids[1].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), quorum_vote_send(), ], @@ -147,9 +147,9 @@ async fn test_quorum_vote_task_miss_dependency() { let view_no_dac = TestScriptStage { inputs: vec![ QuorumProposalValidated(proposals[0].data.clone(), leaves[0].clone()), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), ], - outputs: vec![exact(VIDShareValidated(vids[0].0[0].clone()))], + outputs: vec![exact(VidShareValidated(vids[0].0[0].clone()))], asserts: vec![], }; let view_no_vid = TestScriptStage { @@ -163,11 +163,11 @@ async fn test_quorum_vote_task_miss_dependency() { let view_no_quorum_proposal = TestScriptStage { inputs: vec![ DaCertificateRecv(dacs[2].clone()), - VIDShareRecv(vid_share(&vids[2].0, handle.public_key())), + VidShareRecv(vid_share(&vids[2].0, handle.public_key())), ], outputs: vec![ exact(DaCertificateValidated(dacs[2].clone())), - exact(VIDShareValidated(vids[2].0[0].clone())), + exact(VidShareValidated(vids[2].0[0].clone())), ], asserts: vec![], }; diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 5452df6217..5cfda6a7d3 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -84,7 +84,7 @@ async fn test_consensus_task_upgrade() { let view_1 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), DaCertificateRecv(dacs[0].clone()), ], outputs: vec![ @@ -98,7 +98,7 @@ async fn test_consensus_task_upgrade() { let view_2 = TestScriptStage { inputs: vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(vid_share(&vids[1].0, handle.public_key())), DaCertificateRecv(dacs[1].clone()), ], outputs: vec![ @@ -113,7 +113,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[2].clone(), leaders[2]), DaCertificateRecv(dacs[2].clone()), - VIDShareRecv(vid_share(&vids[2].0, handle.public_key())), + VidShareRecv(vid_share(&vids[2].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(3))), @@ -128,7 +128,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![ QuorumProposalRecv(proposals[3].clone(), leaders[3]), DaCertificateRecv(dacs[3].clone()), - VIDShareRecv(vid_share(&vids[3].0, handle.public_key())), + VidShareRecv(vid_share(&vids[3].0, handle.public_key())), ], outputs: vec![ exact(ViewChange(ViewNumber::new(4))), @@ -237,17 +237,17 @@ async fn test_upgrade_and_consensus_task() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), DaCertificateRecv(dacs[0].clone()), ], upgrade_vote_recvs, vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), DaCertificateRecv(dacs[1].clone()), - VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(vid_share(&vids[1].0, handle.public_key())), ], vec![ - VIDShareRecv(vid_share(&vids[2].0, handle.public_key())), + VidShareRecv(vid_share(&vids[2].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, proposals[2].data.block_header.builder_commitment.clone(), @@ -256,7 +256,7 @@ async fn test_upgrade_and_consensus_task() { null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) .unwrap(), ), - QCFormed(either::Either::Left(proposals[2].data.justify_qc.clone())), + QcFormed(either::Either::Left(proposals[2].data.justify_qc.clone())), ], ]; @@ -428,12 +428,12 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VIDShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), DaCertificateRecv(dacs[0].clone()), ], vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VIDShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(vid_share(&vids[1].0, handle.public_key())), DaCertificateRecv(dacs[1].clone()), SendPayloadCommitmentAndMetadata( vids[1].0[0].data.payload_commitment, @@ -446,7 +446,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[2].clone()), - VIDShareRecv(vid_share(&vids[2].0, handle.public_key())), + VidShareRecv(vid_share(&vids[2].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, proposals[2].data.block_header.builder_commitment.clone(), @@ -459,7 +459,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[3].clone()), - VIDShareRecv(vid_share(&vids[3].0, handle.public_key())), + VidShareRecv(vid_share(&vids[3].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[3].0[0].data.payload_commitment, proposals[3].data.block_header.builder_commitment.clone(), @@ -472,7 +472,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[4].clone()), - VIDShareRecv(vid_share(&vids[4].0, handle.public_key())), + VidShareRecv(vid_share(&vids[4].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[4].0[0].data.payload_commitment, proposals[4].data.block_header.builder_commitment.clone(), @@ -485,7 +485,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[5].clone()), - VIDShareRecv(vid_share(&vids[5].0, handle.public_key())), + VidShareRecv(vid_share(&vids[5].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[5].0[0].data.payload_commitment, proposals[5].data.block_header.builder_commitment.clone(), @@ -494,11 +494,11 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) .unwrap(), ), - QCFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), + QcFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), ], vec![ DaCertificateRecv(dacs[6].clone()), - VIDShareRecv(vid_share(&vids[6].0, handle.public_key())), + VidShareRecv(vid_share(&vids[6].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[6].0[0].data.payload_commitment, proposals[6].data.block_header.builder_commitment.clone(), diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 819d32248e..4d47d6d197 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -6,7 +6,7 @@ use hotshot_example_types::{ node_types::TestTypes, state_types::TestInstanceState, }; -use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; +use hotshot_task_impls::{events::HotShotEvent, vid::VidTaskState}; use hotshot_testing::task_helpers::{build_system_handle, vid_scheme_from_view_number}; use hotshot_types::{ data::{null_block, DaProposal, VidDisperse, VidDisperseShare, ViewNumber}, @@ -98,7 +98,7 @@ async fn test_vid_task() { )); input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); - input.push(HotShotEvent::VIDShareRecv(vid_share_proposal.clone())); + input.push(HotShotEvent::VidShareRecv(vid_share_proposal.clone())); input.push(HotShotEvent::Shutdown); output.insert( @@ -122,7 +122,7 @@ async fn test_vid_task() { 1, ); - let vid_state = VIDTaskState { + let vid_state = VidTaskState { api: handle.clone(), consensus: handle.hotshot.consensus(), cur_view: ViewNumber::new(0), diff --git a/types/src/data.rs b/types/src/data.rs index 8d30fcbfeb..c335c36755 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -636,7 +636,7 @@ pub fn random_commitment(rng: &mut dyn rand::RngCore) -> Commitm /// # Panics /// if serialization fails pub fn serialize_signature2( - signatures: &::QCType, + signatures: &::QcType, ) -> Vec { let mut signatures_bytes = vec![]; signatures_bytes.extend("Yes".as_bytes()); diff --git a/types/src/event.rs b/types/src/event.rs index 44dcda5de6..ebb0814deb 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -177,11 +177,11 @@ pub enum HotShotAction { /// A quorum proposal was sent Propose, /// DA proposal was sent - DAPropose, + DaPropose, /// DA vote was sent DaVote, /// DA certificate was sent - DACert, + DaCert, /// VID shares were sent VidDisperse, /// An upgrade vote was sent diff --git a/types/src/message.rs b/types/src/message.rs index 933aa692c8..85c71f57cd 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -82,7 +82,7 @@ pub enum MessagePurpose { /// Message with a view sync certificate. ViewSyncCertificate, /// Message with a DAC. - DAC, + DaCertificate, /// Message for internal use Internal, /// Data message @@ -284,7 +284,7 @@ impl SequencingMessage { SequencingMessage::Da(da_message) => match da_message { DaConsensusMessage::DaProposal(_) => MessagePurpose::Proposal, DaConsensusMessage::DaVote(_) => MessagePurpose::Vote, - DaConsensusMessage::DaCertificate(_) => MessagePurpose::DAC, + DaConsensusMessage::DaCertificate(_) => MessagePurpose::DaCertificate, DaConsensusMessage::VidDisperseMsg(_) => MessagePurpose::VidDisperse, }, } diff --git a/types/src/qc.rs b/types/src/qc.rs index df27c211fa..b999a2a5fe 100644 --- a/types/src/qc.rs +++ b/types/src/qc.rs @@ -1,4 +1,4 @@ -//! Implementation for `BitVectorQC` that uses BLS signature + Bit vector. +//! Implementation for `BitVectorQc` that uses BLS signature + Bit vector. //! See more details in hotshot paper. use ark_std::{ @@ -23,14 +23,14 @@ use crate::{ /// An implementation of QC using BLS signature and a bit-vector. #[derive(Serialize, Deserialize)] -pub struct BitVectorQC Deserialize<'a>>( +pub struct BitVectorQc Deserialize<'a>>( PhantomData, ); -/// Public parameters of [`BitVectorQC`] +/// Public parameters of [`BitVectorQc`] #[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash)] #[serde(bound(deserialize = ""))] -pub struct QCParams Deserialize<'a>> { +pub struct QcParams Deserialize<'a>> { /// the stake table (snapshot) this QC is verified against pub stake_entries: Vec>, /// threshold for the accumulated "weight" of votes to form a QC @@ -39,17 +39,17 @@ pub struct QCParams Deserialize<'a>> { pub agg_sig_pp: P, } -impl QuorumCertificateScheme for BitVectorQC +impl QuorumCertificateScheme for BitVectorQc where A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a>, A::VerificationKey: SignatureKey, { - type QCProverParams = QCParams; + type QcProverParams = QcParams; // TODO: later with SNARKs we'll use a smaller verifier parameter - type QCVerifierParams = QCParams; + type QcVerifierParams = QcParams; - type QC = (A::Signature, BitVec); + type Qc = (A::Signature, BitVec); type MessageLength = U32; type QuorumSize = U256; @@ -64,10 +64,10 @@ where } fn assemble( - qc_pp: &Self::QCProverParams, + qc_pp: &Self::QcProverParams, signers: &BitSlice, sigs: &[A::Signature], - ) -> Result { + ) -> Result { if signers.len() != qc_pp.stake_entries.len() { return Err(SignatureError::ParameterError(format!( "bit vector len {} != the number of stake entries {}", @@ -112,9 +112,9 @@ where } fn check( - qc_vp: &Self::QCVerifierParams, + qc_vp: &Self::QcVerifierParams, message: &GenericArray, - qc: &Self::QC, + qc: &Self::Qc, ) -> Result { let (sig, signers) = qc; if signers.len() != qc_vp.stake_entries.len() { @@ -154,9 +154,9 @@ where } fn trace( - qc_vp: &Self::QCVerifierParams, + qc_vp: &Self::QcVerifierParams, message: &GenericArray<::MessageUnit, Self::MessageLength>, - qc: &Self::QC, + qc: &Self::Qc, ) -> Result::VerificationKey>, SignatureError> { let (_sig, signers) = qc; if signers.len() != qc_vp.stake_entries.len() { @@ -210,37 +210,37 @@ mod tests { stake_key: key_pair3.ver_key(), stake_amount: U256::from(7u8), }; - let qc_pp = QCParams { + let qc_pp = QcParams { stake_entries: vec![entry1, entry2, entry3], threshold: U256::from(10u8), agg_sig_pp, }; let msg = [72u8; 32]; let sig1 = - BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair1.sign_key_ref(), &msg, &mut rng) + BitVectorQc::<$aggsig>::sign(&agg_sig_pp, key_pair1.sign_key_ref(), &msg, &mut rng) .unwrap(); let sig2 = - BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair2.sign_key_ref(), &msg, &mut rng) + BitVectorQc::<$aggsig>::sign(&agg_sig_pp, key_pair2.sign_key_ref(), &msg, &mut rng) .unwrap(); let sig3 = - BitVectorQC::<$aggsig>::sign(&agg_sig_pp, key_pair3.sign_key_ref(), &msg, &mut rng) + BitVectorQc::<$aggsig>::sign(&agg_sig_pp, key_pair3.sign_key_ref(), &msg, &mut rng) .unwrap(); // happy path let signers = bitvec![0, 1, 1]; - let qc = BitVectorQC::<$aggsig>::assemble( + let qc = BitVectorQc::<$aggsig>::assemble( &qc_pp, signers.as_bitslice(), &[sig2.clone(), sig3.clone()], ) .unwrap(); - assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &qc).is_ok()); + assert!(BitVectorQc::<$aggsig>::check(&qc_pp, &msg.into(), &qc).is_ok()); assert_eq!( - BitVectorQC::<$aggsig>::trace(&qc_pp, &msg.into(), &qc).unwrap(), + BitVectorQc::<$aggsig>::trace(&qc_pp, &msg.into(), &qc).unwrap(), vec![key_pair2.ver_key(), key_pair3.ver_key()], ); - // Check the QC and the QCParams can be serialized / deserialized + // Check the QC and the QcParams can be serialized / deserialized assert_eq!( qc, Serializer::::deserialize(&Serializer::::serialize(&qc).unwrap()) @@ -257,7 +257,7 @@ mod tests { // bad paths // number of signatures unmatch - assert!(BitVectorQC::<$aggsig>::assemble( + assert!(BitVectorQc::<$aggsig>::assemble( &qc_pp, signers.as_bitslice(), &[sig2.clone()] @@ -265,7 +265,7 @@ mod tests { .is_err()); // total weight under threshold let active_bad = bitvec![1, 1, 0]; - assert!(BitVectorQC::<$aggsig>::assemble( + assert!(BitVectorQc::<$aggsig>::assemble( &qc_pp, active_bad.as_bitslice(), &[sig1.clone(), sig2.clone()] @@ -273,31 +273,31 @@ mod tests { .is_err()); // wrong bool vector length let active_bad_2 = bitvec![0, 1, 1, 0]; - assert!(BitVectorQC::<$aggsig>::assemble( + assert!(BitVectorQc::<$aggsig>::assemble( &qc_pp, active_bad_2.as_bitslice(), &[sig2, sig3], ) .is_err()); - assert!(BitVectorQC::<$aggsig>::check( + assert!(BitVectorQc::<$aggsig>::check( &qc_pp, &msg.into(), &(qc.0.clone(), active_bad) ) .is_err()); - assert!(BitVectorQC::<$aggsig>::check( + assert!(BitVectorQc::<$aggsig>::check( &qc_pp, &msg.into(), &(qc.0.clone(), active_bad_2) ) .is_err()); let bad_msg = [70u8; 32]; - assert!(BitVectorQC::<$aggsig>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); + assert!(BitVectorQc::<$aggsig>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); let bad_sig = &sig1; assert!( - BitVectorQC::<$aggsig>::check(&qc_pp, &msg.into(), &(bad_sig.clone(), qc.1)) + BitVectorQc::<$aggsig>::check(&qc_pp, &msg.into(), &(bad_sig.clone(), qc.1)) .is_err() ); }; diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index 6e1852f748..44eb1b1e21 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -13,7 +13,7 @@ use rand_chacha::ChaCha20Rng; use tracing::instrument; use crate::{ - qc::{BitVectorQC, QCParams}, + qc::{BitVectorQc, QcParams}, stake_table::StakeTableEntry, traits::{ qc::QuorumCertificateScheme, @@ -31,11 +31,11 @@ pub type BLSPublicParam = (); impl SignatureKey for BLSPubKey { type PrivateKey = BLSPrivKey; type StakeTableEntry = StakeTableEntry; - type QCParams = - QCParams::PublicParameter>; + type QcParams = + QcParams::PublicParameter>; type PureAssembledSignatureType = ::Signature; - type QCType = (Self::PureAssembledSignatureType, BitVec); + type QcType = (Self::PureAssembledSignatureType, BitVec); type SignError = SignatureError; #[instrument(skip(self))] @@ -48,7 +48,7 @@ impl SignatureKey for BLSPubKey { sk: &Self::PrivateKey, data: &[u8], ) -> Result { - BitVectorQC::::sign( + BitVectorQc::::sign( &(), sk, data, @@ -94,29 +94,29 @@ impl SignatureKey for BLSPubKey { fn public_parameter( stake_entries: Vec, threshold: U256, - ) -> Self::QCParams { - QCParams { + ) -> Self::QcParams { + QcParams { stake_entries, threshold, agg_sig_pp: (), } } - fn check(real_qc_pp: &Self::QCParams, data: &[u8], qc: &Self::QCType) -> bool { + fn check(real_qc_pp: &Self::QcParams, data: &[u8], qc: &Self::QcType) -> bool { let msg = GenericArray::from_slice(data); - BitVectorQC::::check(real_qc_pp, msg, qc).is_ok() + BitVectorQc::::check(real_qc_pp, msg, qc).is_ok() } - fn sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec) { + fn sig_proof(signature: &Self::QcType) -> (Self::PureAssembledSignatureType, BitVec) { signature.clone() } fn assemble( - real_qc_pp: &Self::QCParams, + real_qc_pp: &Self::QcParams, signers: &BitSlice, sigs: &[Self::PureAssembledSignatureType], - ) -> Self::QCType { - BitVectorQC::::assemble(real_qc_pp, signers, sigs) + ) -> Self::QcType { + BitVectorQc::::assemble(real_qc_pp, signers, sigs) .expect("this assembling shouldn't fail") } @@ -140,7 +140,7 @@ impl BuilderSignatureKey for BuilderKey { private_key: &Self::BuilderPrivateKey, data: &[u8], ) -> Result { - BitVectorQC::::sign( + BitVectorQc::::sign( &(), private_key, data, diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 2785e13a8a..2a9c48126f 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -71,7 +71,7 @@ pub struct SimpleCertificate::QCType>, + pub signatures: Option<::QcType>, /// phantom data for `THRESHOLD` and `TYPES` pub _pd: PhantomData<(TYPES, THRESHOLD)>, } @@ -102,7 +102,7 @@ impl> fn create_signed_certificate( vote_commitment: Commitment, data: Self::Voteable, - sig: ::QCType, + sig: ::QcType, view: TYPES::Time, ) -> Self { SimpleCertificate { diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index cc65c2aa17..ba18349b20 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -34,7 +34,7 @@ pub struct TimeoutData { } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a VID vote. -pub struct VIDData { +pub struct VidData { /// Commitment to the block payload the VID vote is on. pub payload_commit: VidCommitment, } @@ -186,7 +186,7 @@ impl Committable for DaData { } } -impl Committable for VIDData { +impl Committable for VidData { fn commit(&self) -> Commitment { committable::RawCommitmentBuilder::new("VID data") .var_size_bytes(self.payload_commit.as_ref()) diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 80379770f6..bfcf2d3de0 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -207,7 +207,7 @@ pub struct DataRequest { #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] pub enum RequestKind { /// Request VID data by our key and the VID commitment - VID(TYPES::Time, TYPES::SignatureKey), + Vid(TYPES::Time, TYPES::SignatureKey), /// Request a DA proposal for a certain view DaProposal(TYPES::Time), } diff --git a/types/src/traits/qc.rs b/types/src/traits/qc.rs index 0ab0659d89..1e72f7d098 100644 --- a/types/src/traits/qc.rs +++ b/types/src/traits/qc.rs @@ -17,17 +17,17 @@ pub trait QuorumCertificateScheme< { /// Public parameters for generating the QC /// E.g: snark proving/verifying keys, list of (or pointer to) public keys stored in the smart contract. - type QCProverParams: Serialize + for<'a> Deserialize<'a>; + type QcProverParams: Serialize + for<'a> Deserialize<'a>; /// Public parameters for validating the QC /// E.g: verifying keys, stake table commitment - type QCVerifierParams: Serialize + for<'a> Deserialize<'a>; + type QcVerifierParams: Serialize + for<'a> Deserialize<'a>; /// Allows to fix the size of the message at compilation time. type MessageLength: ArrayLength; /// Type of the actual quorum certificate object - type QC; + type Qc; /// Type of the quorum size (e.g. number of votes or accumulated weight of signatures) type QuorumSize; @@ -61,10 +61,10 @@ pub trait QuorumCertificateScheme< /// Will return error if some of the partial signatures provided are invalid or the number of /// partial signatures / verifications keys are different. fn assemble( - qc_pp: &Self::QCProverParams, + qc_pp: &Self::QcProverParams, signers: &BitSlice, sigs: &[A::Signature], - ) -> Result; + ) -> Result; /// Checks an aggregated signature over some message provided as input /// * `qc_vp` - public parameters for validating the QC @@ -77,9 +77,9 @@ pub trait QuorumCertificateScheme< /// Return error if the QC is invalid, either because accumulated weight didn't exceed threshold, /// or some partial signatures are invalid. fn check( - qc_vp: &Self::QCVerifierParams, + qc_vp: &Self::QcVerifierParams, message: &GenericArray, - qc: &Self::QC, + qc: &Self::Qc, ) -> Result; /// Trace the list of signers given a qc. @@ -88,8 +88,8 @@ pub trait QuorumCertificateScheme< /// /// Return error if the inputs mismatch (e.g. wrong verifier parameter or original message). fn trace( - qc_vp: &Self::QCVerifierParams, + qc_vp: &Self::QcVerifierParams, message: &GenericArray, - qc: &Self::QC, + qc: &Self::Qc, ) -> Result, SignatureError>; } diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 9c460d5c3f..fd4ece125a 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -61,7 +61,7 @@ pub trait SignatureKey: + Serialize + for<'a> Deserialize<'a>; /// The type of the quorum certificate parameters used for assembled signature - type QCParams: Send + Sync + Sized + Clone + Debug + Hash; + type QcParams: Send + Sync + Sized + Clone + Debug + Hash; /// The type of the assembled signature, without `BitVec` type PureAssembledSignatureType: Send + Sync @@ -76,7 +76,7 @@ pub trait SignatureKey: + Into + for<'a> TryFrom<&'a TaggedBase64>; /// The type of the assembled qc: assembled signature + `BitVec` - type QCType: Send + type QcType: Send + Sync + Sized + Clone @@ -126,20 +126,20 @@ pub trait SignatureKey: fn public_parameter( stake_entries: Vec, threshold: U256, - ) -> Self::QCParams; + ) -> Self::QcParams; /// check the quorum certificate for the assembled signature - fn check(real_qc_pp: &Self::QCParams, data: &[u8], qc: &Self::QCType) -> bool; + fn check(real_qc_pp: &Self::QcParams, data: &[u8], qc: &Self::QcType) -> bool; /// get the assembled signature and the `BitVec` separately from the assembled signature - fn sig_proof(signature: &Self::QCType) -> (Self::PureAssembledSignatureType, BitVec); + fn sig_proof(signature: &Self::QcType) -> (Self::PureAssembledSignatureType, BitVec); /// assemble the signature from the partial signature and the indication of signers in `BitVec` fn assemble( - real_qc_pp: &Self::QCParams, + real_qc_pp: &Self::QcParams, signers: &BitSlice, sigs: &[Self::PureAssembledSignatureType], - ) -> Self::QCType; + ) -> Self::QcType; /// generates the genesis public key. Meant to be dummy/filler #[must_use] diff --git a/types/src/vote.rs b/types/src/vote.rs index 73920d6417..bfb175d310 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -61,7 +61,7 @@ pub trait Certificate: HasViewNumber { fn create_signed_certificate( vote_commitment: Commitment, data: Self::Voteable, - sig: ::QCType, + sig: ::QcType, view: TYPES::Time, ) -> Self; @@ -156,7 +156,7 @@ impl, CERT: Certificate= CERT::threshold(membership).into() { // Assemble QC - let real_qc_pp: <::SignatureKey as SignatureKey>::QCParams = + let real_qc_pp: <::SignatureKey as SignatureKey>::QcParams = ::public_parameter( stake_table, U256::from(CERT::threshold(membership)), From d5bd42de5d7d4f26c0f6813d9616848e79a6f2b6 Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Fri, 17 May 2024 17:05:11 -0500 Subject: [PATCH 1040/1393] Add default `genesis` to trait (#3164) Avoids downstream implementations. Co-authored-by: tbro --- example-types/src/block_types.rs | 4 ---- types/src/traits/block_contents.rs | 8 +++++++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 0412f92e4d..1c0e9dbb8a 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -199,10 +199,6 @@ impl BlockPayload for TestBlockPayload { Self { transactions } } - fn genesis() -> (Self, Self::Metadata) { - (Self::genesis(), TestMetadata) - } - fn builder_commitment(&self, _metadata: &Self::Metadata) -> BuilderCommitment { let mut digest = sha2::Sha256::new(); for txn in &self.transactions { diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 3d3f2241a5..61375d5171 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -89,7 +89,13 @@ pub trait BlockPayload: fn from_bytes(encoded_transactions: &[u8], metadata: &Self::Metadata) -> Self; /// Build the genesis payload and metadata. - fn genesis() -> (Self, Self::Metadata); + #[must_use] + fn genesis() -> (Self, Self::Metadata) + where + ::Instance: Default, + { + Self::from_transactions([], &Default::default()).unwrap() + } /// List of transaction commitments. fn transaction_commitments( From 042b1e8aed06db766216752e681c70963ea63563 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 20 May 2024 08:40:26 -0600 Subject: [PATCH 1041/1393] [Consensus2] - Init new consensus module (#3149) * init consensus2 from main * fix naming * fix error messages * naming changes --- example-types/src/state_types.rs | 2 +- hotshot/src/lib.rs | 22 ++- hotshot/src/tasks/mod.rs | 13 ++ hotshot/src/tasks/task_state.rs | 39 +++- task-impls/src/consensus2/handlers.rs | 247 ++++++++++++++++++++++++++ task-impls/src/consensus2/mod.rs | 174 ++++++++++++++++++ task-impls/src/events.rs | 9 +- task-impls/src/lib.rs | 3 + types/src/traits/states.rs | 5 +- types/src/utils.rs | 4 +- 10 files changed, 505 insertions(+), 13 deletions(-) create mode 100644 task-impls/src/consensus2/handlers.rs create mode 100644 task-impls/src/consensus2/mod.rs diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index f02a48c046..2b95f10c7f 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -25,7 +25,7 @@ pub struct TestInstanceState {} impl InstanceState for TestInstanceState {} /// Application-specific state delta implementation for testing purposes. -#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct TestStateDelta {} impl StateDelta for TestStateDelta {} diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index d9be0ac954..4a56ed6c93 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -58,11 +58,18 @@ use tracing::{debug, instrument, trace}; use vbs::version::Version; #[cfg(feature = "dependency-tasks")] -use crate::tasks::{add_quorum_proposal_recv_task, add_quorum_proposal_task, add_quorum_vote_task}; +use crate::tasks::{ + add_consensus2_task, add_quorum_proposal_recv_task, add_quorum_proposal_task, + add_quorum_vote_task, +}; + +#[cfg(not(feature = "dependency-tasks"))] +use crate::tasks::add_consensus_task; + use crate::{ tasks::{ - add_consensus_task, add_da_task, add_network_event_task, add_network_message_task, - add_transaction_task, add_upgrade_task, add_view_sync_task, + add_da_task, add_network_event_task, add_network_message_task, add_transaction_task, + add_upgrade_task, add_view_sync_task, }, traits::NodeImplementation, types::{Event, SystemContextHandle}, @@ -619,6 +626,7 @@ impl> SystemContext { Arc::clone(&handle.storage()), ) .await; + #[cfg(not(feature = "dependency-tasks"))] add_consensus_task( Arc::clone(®istry), event_tx.clone(), @@ -685,6 +693,14 @@ impl> SystemContext { &handle, ) .await; + #[cfg(feature = "dependency-tasks")] + add_consensus2_task( + Arc::clone(®istry), + event_tx.clone(), + event_rx.activate_cloned(), + &handle, + ) + .await; handle } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index ea235721aa..71cf00a5ec 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -11,6 +11,7 @@ use async_lock::RwLock; use hotshot_task::task::{Task, TaskRegistry}; use hotshot_task_impls::{ consensus::ConsensusTaskState, + consensus2::Consensus2TaskState, da::DaTaskState, events::HotShotEvent, network::{NetworkEventTaskState, NetworkMessageTaskState}, @@ -257,3 +258,15 @@ pub async fn add_quorum_proposal_recv_task>( + task_reg: Arc, + tx: Sender>>, + rx: Receiver>>, + handle: &SystemContextHandle, +) { + let consensus2_task_state = Consensus2TaskState::create_from(handle).await; + let task = Task::new(tx, rx, Arc::clone(&task_reg), consensus2_task_state); + task_reg.run_task(task).await; +} diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index ecce149d07..57810f82f3 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -6,11 +6,11 @@ use std::{ use async_trait::async_trait; use hotshot_task_impls::{ - builder::BuilderClient, consensus::ConsensusTaskState, da::DaTaskState, - quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, - quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, - transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, - view_sync::ViewSyncTaskState, + builder::BuilderClient, consensus::ConsensusTaskState, consensus2::Consensus2TaskState, + da::DaTaskState, quorum_proposal::QuorumProposalTaskState, + quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, + request::NetworkRequestState, transactions::TransactionTaskState, upgrade::UpgradeTaskState, + vid::VidTaskState, view_sync::ViewSyncTaskState, }; use hotshot_types::traits::{ consensus_api::ConsensusApi, @@ -300,3 +300,32 @@ impl> CreateTaskState } } } + +#[async_trait] +impl> CreateTaskState + for Consensus2TaskState +{ + async fn create_from(handle: &SystemContextHandle) -> Consensus2TaskState { + let consensus = handle.hotshot.consensus(); + Consensus2TaskState { + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + instance_state: handle.hotshot.instance_state(), + quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), + da_network: Arc::clone(&handle.hotshot.networks.da_network), + timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + committee_membership: handle.hotshot.memberships.da_membership.clone().into(), + vote_collector: None.into(), + timeout_vote_collector: None.into(), + storage: Arc::clone(&handle.storage), + cur_view: handle.cur_view().await, + output_event_stream: handle.hotshot.external_event_stream.0.clone(), + timeout_task: None, + timeout: handle.hotshot.config.next_view_timeout, + consensus, + last_decided_view: handle.cur_view().await, + id: handle.hotshot.id, + } + } +} diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs new file mode 100644 index 0000000000..2a1f751043 --- /dev/null +++ b/task-impls/src/consensus2/handlers.rs @@ -0,0 +1,247 @@ +use std::{sync::Arc, time::Duration}; + +use anyhow::{ensure, Context, Result}; +use async_broadcast::Sender; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use hotshot_types::{ + event::{Event, EventType}, + simple_certificate::{QuorumCertificate, TimeoutCertificate}, + simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + }, + vote::HasViewNumber, +}; +use tracing::debug; + +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, + vote_collection::{create_vote_accumulator, AccumulatorInfo, HandleVoteEvent}, +}; + +use super::Consensus2TaskState; + +/// Handle a `QuorumVoteRecv` event. +pub(crate) async fn handle_quorum_vote_recv>( + vote: &QuorumVote, + event: Arc>, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + // Are we the leader for this view? + ensure!( + task_state.quorum_membership.leader(vote.view_number() + 1) == task_state.public_key, + format!( + "We are not the leader for view {:?}", + vote.view_number() + 1 + ) + ); + + let mut collector = task_state.vote_collector.write().await; + + if collector.is_none() || vote.view_number() > collector.as_ref().unwrap().view { + let info = AccumulatorInfo { + public_key: task_state.public_key.clone(), + membership: Arc::clone(&task_state.quorum_membership), + view: vote.view_number(), + id: task_state.id, + }; + *collector = create_vote_accumulator::, QuorumCertificate>( + &info, + vote.clone(), + event, + sender, + ) + .await; + } else { + let result = collector + .as_mut() + .unwrap() + .handle_event(Arc::clone(&event), sender) + .await; + + if result == Some(HotShotTaskCompleted) { + *collector = None; + } + } + Ok(()) +} + +/// Handle a `TimeoutVoteRecv` event. +pub(crate) async fn handle_timeout_vote_recv>( + vote: &TimeoutVote, + event: Arc>, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + // Are we the leader for this view? + ensure!( + task_state.timeout_membership.leader(vote.view_number() + 1) == task_state.public_key, + format!( + "We are not the leader for view {:?}", + vote.view_number() + 1 + ) + ); + + let mut collector = task_state.timeout_vote_collector.write().await; + + if collector.is_none() || vote.view_number() > collector.as_ref().unwrap().view { + let info = AccumulatorInfo { + public_key: task_state.public_key.clone(), + membership: Arc::clone(&task_state.quorum_membership), + view: vote.view_number(), + id: task_state.id, + }; + *collector = + create_vote_accumulator::, TimeoutCertificate>( + &info, + vote.clone(), + event, + sender, + ) + .await; + } else { + let result = collector + .as_mut() + .unwrap() + .handle_event(Arc::clone(&event), sender) + .await; + + if result == Some(HotShotTaskCompleted) { + *collector = None; + } + } + Ok(()) +} + +/// Handle a `ViewChange` event. +pub(crate) async fn handle_view_change>( + new_view_number: TYPES::Time, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + ensure!( + new_view_number > task_state.cur_view, + "New view is not larger than the current view" + ); + + let old_view_number = task_state.cur_view; + debug!("Updating view from {old_view_number:?} to {new_view_number:?}"); + + // Cancel the old timeout task + if let Some(timeout_task) = task_state.timeout_task.take() { + cancel_task(timeout_task).await; + } + + // Move this node to the next view + task_state.cur_view = new_view_number; + + // Spawn a timeout task if we did actually update view + let timeout = task_state.timeout; + task_state.timeout_task = Some(async_spawn({ + let stream = sender.clone(); + // Nuance: We timeout on the view + 1 here because that means that we have + // not seen evidence to transition to this new view + let view_number = new_view_number + 1; + async move { + async_sleep(Duration::from_millis(timeout)).await; + broadcast_event( + Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), + &stream, + ) + .await; + } + })); + + let consensus = task_state.consensus.read().await; + consensus + .metrics + .current_view + .set(usize::try_from(task_state.cur_view.u64()).unwrap()); + + // Do the comparison before the subtraction to avoid potential overflow, since + // `last_decided_view` may be greater than `cur_view` if the node is catching up. + if usize::try_from(task_state.cur_view.u64()).unwrap() + > usize::try_from(task_state.last_decided_view.u64()).unwrap() + { + consensus.metrics.number_of_views_since_last_decide.set( + usize::try_from(task_state.cur_view.u64()).unwrap() + - usize::try_from(task_state.last_decided_view.u64()).unwrap(), + ); + } + + broadcast_event( + Event { + view_number: old_view_number, + event: EventType::ViewFinished { + view_number: old_view_number, + }, + }, + &task_state.output_event_stream, + ) + .await; + Ok(()) +} + +/// Handle a `Timeout` event. +pub(crate) async fn handle_timeout>( + view_number: TYPES::Time, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + ensure!( + task_state.cur_view < view_number, + "Timeout event is for an old view" + ); + + ensure!( + task_state + .timeout_membership + .has_stake(&task_state.public_key), + format!("We were not chosen for the consensus committee for view {view_number:?}") + ); + + let vote = TimeoutVote::create_signed_vote( + TimeoutData:: { view: view_number }, + view_number, + &task_state.public_key, + &task_state.private_key, + ) + .context("Failed to sign TimeoutData")?; + + broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), sender).await; + broadcast_event( + Event { + view_number, + event: EventType::ViewTimeout { view_number }, + }, + &task_state.output_event_stream, + ) + .await; + + debug!( + "We did not receive evidence for view {} in time, sending timeout vote for that view!", + *view_number + ); + + broadcast_event( + Event { + view_number, + event: EventType::ReplicaViewTimeout { view_number }, + }, + &task_state.output_event_stream, + ) + .await; + + task_state + .consensus + .read() + .await + .metrics + .number_of_timeouts + .add(1); + + Ok(()) +} diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs new file mode 100644 index 0000000000..a20ff872ca --- /dev/null +++ b/task-impls/src/consensus2/mod.rs @@ -0,0 +1,174 @@ +use async_broadcast::Sender; +use hotshot_task::task::Task; +use std::sync::Arc; +use tracing::instrument; + +use async_lock::RwLock; +use hotshot_task::task::TaskState; +use hotshot_types::{ + consensus::Consensus, + event::Event, + simple_certificate::{QuorumCertificate, TimeoutCertificate}, + simple_vote::{QuorumVote, TimeoutVote}, + traits::{ + node_implementation::{NodeImplementation, NodeType}, + signature_key::SignatureKey, + }, +}; + +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; + +use crate::{events::HotShotEvent, vote_collection::VoteCollectionTaskState}; + +use self::handlers::{ + handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, +}; + +/// Alias for Optional type for Vote Collectors +type VoteCollectorOption = Option>; + +/// Event handlers for use in the `handle` method. +mod handlers; + +/// Task state for the Consensus task. +pub struct Consensus2TaskState> { + /// Our public key + pub public_key: TYPES::SignatureKey, + + /// Our Private Key + pub private_key: ::PrivateKey, + + /// Immutable instance state + pub instance_state: Arc, + + /// Network for all nodes + pub quorum_network: Arc, + + /// Network for DA committee + pub da_network: Arc, + + /// Membership for Timeout votes/certs + pub timeout_membership: Arc, + + /// Membership for Quorum Certs/votes + pub quorum_membership: Arc, + + /// Membership for DA committee Votes/certs + pub committee_membership: Arc, + + /// Current Vote collection task, with it's view. + pub vote_collector: + RwLock, QuorumCertificate>>, + + /// Current timeout vote collection task with its view + pub timeout_vote_collector: + RwLock, TimeoutCertificate>>, + + /// This node's storage ref + pub storage: Arc>, + + /// The view number that this node is currently executing in. + pub cur_view: TYPES::Time, + + /// Output events to application + pub output_event_stream: async_broadcast::Sender>, + + /// Timeout task handle + pub timeout_task: Option>, + + /// View timeout from config. + pub timeout: u64, + + /// A reference to the metrics trait. + pub consensus: Arc>>, + + /// The last decided view + pub last_decided_view: TYPES::Time, + + /// The node's id + pub id: u64, +} +impl> Consensus2TaskState { + /// Handles a consensus event received on the event stream + #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, last_decided_view = *self.last_decided_view), name = "Consensus replica task", level = "error")] + pub async fn handle( + &mut self, + event: Arc>, + sender: Sender>>, + ) { + match event.as_ref() { + HotShotEvent::QuorumVoteRecv(ref vote) => { + if let Err(e) = + handle_quorum_vote_recv(vote, Arc::clone(&event), &sender, self).await + { + tracing::debug!("Failed to handle QuorumVoteRecv event; error = {e}"); + } + } + HotShotEvent::TimeoutVoteRecv(ref vote) => { + if let Err(e) = + handle_timeout_vote_recv(vote, Arc::clone(&event), &sender, self).await + { + tracing::debug!("Failed to handle TimeoutVoteRecv event; error = {e}"); + } + } + HotShotEvent::ViewChange(new_view_number) => { + if let Err(e) = handle_view_change(*new_view_number, &sender, self).await { + tracing::trace!("Failed to handle ViewChange event; error = {e}"); + } + } + HotShotEvent::Timeout(view_number) => { + if let Err(e) = handle_timeout(*view_number, &sender, self).await { + tracing::debug!("Failed to handle Timeout event; error = {e}"); + } + } + HotShotEvent::LastDecidedViewUpdated(view_number) => { + if *view_number < self.last_decided_view { + tracing::debug!("New decided view is not newer than ours"); + } else { + self.last_decided_view = *view_number; + if let Err(e) = self + .consensus + .write() + .await + .update_last_decided_view(*view_number) + { + tracing::trace!("{e:?}"); + } + } + } + _ => {} + } + } +} + +impl> TaskState for Consensus2TaskState { + type Event = Arc>; + type Output = (); + + fn filter(&self, event: &Arc>) -> bool { + !matches!( + event.as_ref(), + HotShotEvent::QuorumVoteRecv(_) + | HotShotEvent::TimeoutVoteRecv(_) + | HotShotEvent::ViewChange(_) + | HotShotEvent::Timeout(_) + | HotShotEvent::LastDecidedViewUpdated(_) + | HotShotEvent::Shutdown + ) + } + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> + where + Self: Sized, + { + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await; + None + } + + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event.as_ref(), HotShotEvent::Shutdown) + } +} diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 29629d6036..9718e92fd3 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -14,7 +14,7 @@ use hotshot_types::{ ViewSyncPreCommitVote, }, traits::{block_contents::BuilderFee, node_implementation::NodeType, BlockPayload}, - utils::BuilderCommitment, + utils::{BuilderCommitment, View}, vid::{VidCommitment, VidPrecomputeData}, vote::VoteDependencyData, }; @@ -153,4 +153,11 @@ pub enum HotShotEvent { ProposeNow(TYPES::Time, ProposalDependencyData), /// Initiate a vote right now for the designated view. VoteNow(TYPES::Time, VoteDependencyData), + + /* Consensus State Update Events */ + /// A undecided view has been created and added to the validated state storage. + ValidatedStateUpdate(TYPES::Time, View), + + /// A new anchor view has been successfully reached by this node. + LastDecidedViewUpdated(TYPES::Time), } diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index a7f92ac64b..58b75422b0 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -4,6 +4,9 @@ /// the task which implements the main parts of consensus pub mod consensus; +/// The task which implements the core state logic of consensus. +pub mod consensus2; + /// The task which handles the logic for the quorum vote. pub mod quorum_vote; diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 624dbdf248..f555ca304e 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -23,7 +23,10 @@ use crate::{ pub trait InstanceState: Debug + Send + Sync {} /// Application-specific state delta, which will be used to store a list of merkle tree entries. -pub trait StateDelta: Debug + Send + Sync + Serialize + for<'a> Deserialize<'a> {} +pub trait StateDelta: + Debug + PartialEq + Eq + Hash + Send + Sync + Serialize + for<'a> Deserialize<'a> +{ +} /// Abstraction over the state that blocks modify /// diff --git a/types/src/utils.rs b/types/src/utils.rs index 98014a5b0d..0bec71ea42 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -24,7 +24,7 @@ use crate::{ }; /// A view's state -#[derive(Debug, Deserialize, Serialize)] +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound = "")] pub enum ViewInner { /// A pending view with an available block but not leaf proposal yet. @@ -133,7 +133,7 @@ impl Deref for View { } /// This exists so we can perform state transitions mutably -#[derive(Debug, Clone, Deserialize, Serialize)] +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound = "")] pub struct View { /// The view data. Wrapped in a struct so we can mutate From 15d4419a40e9227c311e484f220dc7c67df8f16c Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Tue, 21 May 2024 20:32:23 +0200 Subject: [PATCH 1042/1393] Reliably broadcast ViewFinished event (#3201) * Reliably broadcast ViewFinished event * Reduce number of args to update_view --- task-impls/src/consensus/helpers.rs | 10 ++--- task-impls/src/consensus/mod.rs | 17 +------- task-impls/src/consensus/view_change.rs | 52 +++++++++++++++---------- 3 files changed, 37 insertions(+), 42 deletions(-) diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 57795b4feb..9c14f75c9c 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -582,11 +582,11 @@ pub async fn publish_proposal_if_able( /// TEMPORARY TYPE: Quorum proposal recv task state when using dependency tasks #[cfg(feature = "dependency-tasks")] -type TemporaryProposalRecvCombinedType = QuorumProposalRecvTaskState; +pub(crate) type TemporaryProposalRecvCombinedType = QuorumProposalRecvTaskState; /// TEMPORARY TYPE: Consensus task state when not using dependency tasks #[cfg(not(feature = "dependency-tasks"))] -type TemporaryProposalRecvCombinedType = ConsensusTaskState; +pub(crate) type TemporaryProposalRecvCombinedType = ConsensusTaskState; // TODO: Fix `clippy::too_many_lines`. /// Handle the received quorum proposal. @@ -626,13 +626,11 @@ pub async fn handle_quorum_proposal_recv( + if let Err(e) = update_view::( + task_state, view, &event_stream, - task_state.timeout, Arc::clone(&task_state.consensus), - &mut task_state.cur_view, - &mut task_state.timeout_task, SEND_VIEW_CHANGE_EVENT, ) .await diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index e6e4fcc0bf..31eb95ec3e 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -538,13 +538,11 @@ impl> ConsensusTaskState // update the view in state to the one in the message // Publish a view change event to the application // Returns if the view does not need updating. - if let Err(e) = update_view::( + if let Err(e) = update_view::( + self, new_view, &event_stream, - self.timeout, Arc::clone(&self.consensus), - &mut self.cur_view, - &mut self.timeout_task, DONT_SEND_VIEW_CHANGE_EVENT, ) .await @@ -552,17 +550,6 @@ impl> ConsensusTaskState tracing::trace!("Failed to update view; error = {e}"); return; } - - broadcast_event( - Event { - view_number: old_view_number, - event: EventType::ViewFinished { - view_number: old_view_number, - }, - }, - &self.output_event_stream, - ) - .await; } HotShotEvent::Timeout(view) => { let view = *view; diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index ec8a56b831..0269cb997d 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -5,17 +5,15 @@ use anyhow::{ensure, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use hotshot_types::{ consensus::Consensus, - traits::node_implementation::{ConsensusTime, NodeType}, + event::{Event, EventType}, + traits::node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; use tracing::{debug, error}; use crate::{ + consensus::helpers::TemporaryProposalRecvCombinedType, events::HotShotEvent, helpers::{broadcast_event, cancel_task}, }; @@ -31,50 +29,62 @@ pub(crate) const DONT_SEND_VIEW_CHANGE_EVENT: bool = false; /// /// # Errors /// Returns an [`anyhow::Error`] when the new view is not greater than the current view. -pub(crate) async fn update_view( +pub(crate) async fn update_view>( + task_state: &mut TemporaryProposalRecvCombinedType, new_view: TYPES::Time, event_stream: &Sender>>, - timeout: u64, consensus: Arc>>, - cur_view: &mut TYPES::Time, - timeout_task: &mut Option>, send_view_change_event: bool, ) -> Result<()> { ensure!( - new_view > *cur_view, + new_view > task_state.cur_view, "New view is not greater than our current view" ); - debug!("Updating view from {} to {}", **cur_view, *new_view); + let old_view = task_state.cur_view; - if **cur_view / 100 != *new_view / 100 { + debug!("Updating view from {} to {}", *old_view, *new_view); + + if *old_view / 100 != *new_view / 100 { // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): // switch to info! when INFO logs become less cluttered error!("Progress: entered view {:>6}", *new_view); } // cancel the old timeout task - if let Some(timeout_task) = timeout_task.take() { + if let Some(timeout_task) = task_state.timeout_task.take() { cancel_task(timeout_task).await; } - *cur_view = new_view; + task_state.cur_view = new_view; // The next view is just the current view + 1 - let next_view = *cur_view + 1; + let next_view = task_state.cur_view + 1; if send_view_change_event { - broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream).await; + futures::join! { + broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream), + broadcast_event( + Event { + view_number: old_view, + event: EventType::ViewFinished { + view_number: old_view, + }, + }, + &task_state.output_event_stream, + ) + }; } // Spawn a timeout task if we did actually update view - *timeout_task = Some(async_spawn({ + task_state.timeout_task = Some(async_spawn({ let stream = event_stream.clone(); // Nuance: We timeout on the view + 1 here because that means that we have // not seen evidence to transition to this new view let view_number = next_view; + let timeout = Duration::from_millis(task_state.timeout); async move { - async_sleep(Duration::from_millis(timeout)).await; + async_sleep(timeout).await; broadcast_event( Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), &stream, @@ -86,15 +96,15 @@ pub(crate) async fn update_view( consensus .metrics .current_view - .set(usize::try_from(cur_view.u64()).unwrap()); + .set(usize::try_from(task_state.cur_view.u64()).unwrap()); // Do the comparison before the subtraction to avoid potential overflow, since // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(cur_view.u64()).unwrap() + if usize::try_from(task_state.cur_view.u64()).unwrap() > usize::try_from(consensus.last_decided_view().u64()).unwrap() { consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(cur_view.u64()).unwrap() + usize::try_from(task_state.cur_view.u64()).unwrap() - usize::try_from(consensus.last_decided_view().u64()).unwrap(), ); } From c4e414d95d9496d59eb9cae68fcb86e998bdb6bd Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Tue, 21 May 2024 15:26:46 -0400 Subject: [PATCH 1043/1393] Use alternate display to log anyhow error (#3204) --- task-impls/src/transactions.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 966f0c8517..7e87c826df 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -274,7 +274,7 @@ impl< // We failed to get a block Ok(Err(err)) => { - tracing::warn!(%err, "Couldn't get a block"); + tracing::warn!("Couldn't get a block: {err:#}"); // pause a bit async_sleep(Duration::from_millis(100)).await; continue; From 87518f5b4c33d5e514025390185037be4acedbb6 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Wed, 22 May 2024 08:35:09 -0600 Subject: [PATCH 1044/1393] [Consensus2] - Decouple quorum proposal task (#3151) * init consensus2 from main * rollback, fix types, re-create module * add new parent leaf and state method * leaf chain traversal, new fields * fix build * partial quorum proposal validated event * clippy, fix build, add method * remove high qc blocking requirement * some working tests, almost done * migrate to using consensus internal state * test passes but doesnt decide * fix naming * fix task and get decide event * fix timeout test * fix view sync cert * temporarily ignore propose now * lint * remove error logs, actually propose correctly for view 1 * fix doc * fix all tests * fix error messages * naming changes * fix build * remove comment * fix build and tests * move storage update * pr feedback * more pr comments, collect garbage, fix bug in view update * fix bug with early return * comments, move function, remove log * comment * walk leaves by number instead * fill out context message * fix lints --- hotshot/src/tasks/task_state.rs | 1 + task-impls/src/consensus/helpers.rs | 62 +-- task-impls/src/events.rs | 13 +- .../src/quorum_proposal/dependency_handle.rs | 274 ++++++++++ task-impls/src/quorum_proposal/handlers.rs | 325 ++++++++++++ .../mod.rs} | 393 ++++++--------- task-impls/src/quorum_proposal_recv.rs | 14 +- task-impls/src/quorum_vote.rs | 41 +- testing/tests/tests_1/quorum_proposal_task.rs | 472 +++++++++++++++--- 9 files changed, 1208 insertions(+), 387 deletions(-) create mode 100644 task-impls/src/quorum_proposal/dependency_handle.rs create mode 100644 task-impls/src/quorum_proposal/handlers.rs rename task-impls/src/{quorum_proposal.rs => quorum_proposal/mod.rs} (63%) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 57810f82f3..0d6217ec35 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -297,6 +297,7 @@ impl> CreateTaskState spawned_tasks: BTreeMap::new(), instance_state: handle.hotshot.instance_state(), id: handle.hotshot.id, + version: *handle.hotshot.version.read().await, } } } diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 9c14f75c9c..630849d393 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -39,11 +39,8 @@ use tokio::task::JoinHandle; use tracing::{debug, error, info, warn}; use vbs::version::Version; -#[cfg(not(feature = "dependency-tasks"))] use super::ConsensusTaskState; #[cfg(feature = "dependency-tasks")] -use crate::quorum_proposal::QuorumProposalTaskState; -#[cfg(feature = "dependency-tasks")] use crate::quorum_proposal_recv::QuorumProposalRecvTaskState; use crate::{ consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, @@ -317,20 +314,20 @@ pub fn validate_proposal_view_and_certs( /// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. pub(crate) async fn parent_leaf_and_state( cur_view: TYPES::Time, - view: TYPES::Time, + view_number: TYPES::Time, quorum_membership: Arc, public_key: TYPES::SignatureKey, consensus: Arc>>, ) -> Result<(Leaf, Arc<::ValidatedState>)> { ensure!( - quorum_membership.leader(view) == public_key, - "Somehow we formed a QC but are not the leader for the next view {view:?}", + quorum_membership.leader(view_number) == public_key, + "Somehow we formed a QC but are not the leader for the next view {view_number:?}", ); - let consensus = consensus.read().await; - let parent_view_number = &consensus.high_qc().view_number(); - let parent_view = consensus.validated_state_map().get(parent_view_number).context( - format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", **parent_view_number) + let consensus_reader = consensus.read().await; + let parent_view_number = consensus_reader.high_qc().view_number(); + let parent_view = consensus_reader.validated_state_map().get(&parent_view_number).context( + format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", *parent_view_number) )?; // Leaf hash in view inner does not match high qc hash - Why? @@ -338,21 +335,21 @@ pub(crate) async fn parent_leaf_and_state( format!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") )?; - if leaf_commitment != consensus.high_qc().date().leaf_commit { + if leaf_commitment != consensus_reader.high_qc().date().leaf_commit { // NOTE: This happens on the genesis block debug!( "They don't equal: {:?} {:?}", leaf_commitment, - consensus.high_qc().date().leaf_commit + consensus_reader.high_qc().date().leaf_commit ); } - let leaf = consensus + let leaf = consensus_reader .saved_leaves() .get(&leaf_commitment) .context("Failed to find high QC of parent")?; - let reached_decided = leaf.view_number() == consensus.last_decided_view(); + let reached_decided = leaf.view_number() == consensus_reader.last_decided_view(); let parent_leaf = leaf.clone(); let original_parent_hash = parent_leaf.commit(); let mut next_parent_hash = original_parent_hash; @@ -360,8 +357,8 @@ pub(crate) async fn parent_leaf_and_state( // Walk back until we find a decide if !reached_decided { debug!("We have not reached decide from view {:?}", cur_view); - while let Some(next_parent_leaf) = consensus.saved_leaves().get(&next_parent_hash) { - if next_parent_leaf.view_number() <= consensus.last_decided_view() { + while let Some(next_parent_leaf) = consensus_reader.saved_leaves().get(&next_parent_hash) { + if next_parent_leaf.view_number() <= consensus_reader.last_decided_view() { break; } next_parent_hash = next_parent_leaf.parent_commitment(); @@ -799,20 +796,12 @@ pub async fn handle_quorum_proposal_recv = QuorumProposalTaskState; - -/// TEMPORARY TYPE: Consensus task state when not using dependency tasks -#[cfg(not(feature = "dependency-tasks"))] -type TemporaryProposalValidatedCombinedType = ConsensusTaskState; - /// Handle `QuorumProposalValidated` event content and submit a proposal if possible. #[allow(clippy::too_many_lines)] pub async fn handle_quorum_proposal_validated>( proposal: &QuorumProposal, event_stream: Sender>>, - task_state: &mut TemporaryProposalValidatedCombinedType, + task_state: &mut ConsensusTaskState, ) -> Result<()> { let consensus = task_state.consensus.read().await; let view = proposal.view_number(); @@ -879,15 +868,7 @@ pub async fn handle_quorum_proposal_validated { /* Consensus State Update Events */ /// A undecided view has been created and added to the validated state storage. - ValidatedStateUpdate(TYPES::Time, View), + ValidatedStateUpdated(TYPES::Time, View), - /// A new anchor view has been successfully reached by this node. + /// A new locked view has been created (2-chain) + LockedViewUpdated(TYPES::Time), + + /// A new anchor view has been successfully reached by this node (3-chain). LastDecidedViewUpdated(TYPES::Time), + + /// A new high_qc has been reached by this node. + UpdateHighQc(QuorumCertificate), + + /// A new undecided view has been proposed. + NewUndecidedView(Leaf), } diff --git a/task-impls/src/quorum_proposal/dependency_handle.rs b/task-impls/src/quorum_proposal/dependency_handle.rs new file mode 100644 index 0000000000..450791a21a --- /dev/null +++ b/task-impls/src/quorum_proposal/dependency_handle.rs @@ -0,0 +1,274 @@ +//! This module holds the dependency task for the QuorumProposalTask. It is spawned whenever an event that could +//! initiate a proposal occurs. + +use std::{marker::PhantomData, sync::Arc, time::Duration}; + +use anyhow::{ensure, Context, Result}; +use async_broadcast::Sender; +use async_compatibility_layer::art::async_sleep; +use async_lock::RwLock; +use committable::Committable; +use hotshot_task::dependency_task::HandleDepOutput; +use hotshot_types::{ + consensus::{CommitmentAndMetadata, Consensus}, + data::{Leaf, QuorumProposal, VidDisperseShare, ViewChangeEvidence}, + message::Proposal, + traits::{ + block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, + }, +}; +use tracing::{debug, error}; +use vbs::version::Version; + +use crate::{ + consensus::helpers::parent_leaf_and_state, events::HotShotEvent, helpers::broadcast_event, +}; + +/// Proposal dependency types. These types represent events that precipitate a proposal. +#[derive(PartialEq, Debug)] +pub(crate) enum ProposalDependency { + /// For the `SendPayloadCommitmentAndMetadata` event. + PayloadAndMetadata, + + /// For the `QcFormed` event. + QC, + + /// For the `ViewSyncFinalizeCertificate2Recv` event. + ViewSyncCert, + + /// For the `QcFormed` event timeout branch. + TimeoutCert, + + /// For the `QuroumProposalValidated` event after validating `QuorumProposalRecv`. + Proposal, + + /// For the `ProposeNow` event. + ProposeNow, + + /// For the `VidShareValidated` event. + VidShare, + + /// For the `ValidatedStateUpdated` event. + ValidatedState, +} + +/// Handler for the proposal dependency +pub(crate) struct ProposalDependencyHandle { + /// Latest view number that has been proposed for (proxy for cur_view). + pub latest_proposed_view: TYPES::Time, + + /// The view number to propose for. + pub view_number: TYPES::Time, + + /// The event sender. + pub sender: Sender>>, + + /// Immutable instance state + pub instance_state: Arc, + + /// Membership for Quorum Certs/votes + pub quorum_membership: Arc, + + /// Our public key + pub public_key: TYPES::SignatureKey, + + /// Our Private Key + pub private_key: ::PrivateKey, + + /// Round start delay from config, in milliseconds. + pub round_start_delay: u64, + + /// Shared consensus task state + pub consensus: Arc>>, + + /// The current version of consensus + pub version: Version, +} + +impl ProposalDependencyHandle { + /// Publishes a proposal given the [`CommitmentAndMetadata`], [`VidDisperseShare`] + /// and high qc [`hotshot_types::simple_certificate::QuorumCertificate`], + /// with optional [`ViewChangeEvidence`]. + async fn publish_proposal( + &self, + commitment_and_metadata: CommitmentAndMetadata, + vid_share: Proposal>, + view_change_evidence: Option>, + ) -> Result<()> { + let (parent_leaf, state) = parent_leaf_and_state( + self.latest_proposed_view, + self.view_number, + Arc::clone(&self.quorum_membership), + self.public_key.clone(), + Arc::clone(&self.consensus), + ) + .await?; + + let proposal_certificate = view_change_evidence + .as_ref() + .filter(|cert| cert.is_valid_for_view(&self.view_number)) + .cloned(); + + ensure!( + commitment_and_metadata.block_view == self.view_number, + "Cannot propose because our VID payload commitment and metadata is for an older view." + ); + + let block_header = TYPES::BlockHeader::new( + state.as_ref(), + self.instance_state.as_ref(), + &parent_leaf, + commitment_and_metadata.commitment, + commitment_and_metadata.builder_commitment, + commitment_and_metadata.metadata, + commitment_and_metadata.fee, + vid_share.data.common.clone(), + self.version, + ) + .await + .context("Failed to construct block header")?; + + let proposal = QuorumProposal { + block_header, + view_number: self.view_number, + justify_qc: self.consensus.read().await.high_qc().clone(), + proposal_certificate, + upgrade_certificate: None, + }; + + let proposed_leaf = Leaf::from_quorum_proposal(&proposal); + ensure!( + proposed_leaf.parent_commitment() == parent_leaf.commit(), + "Proposed leaf parent does not equal high qc" + ); + + let signature = + TYPES::SignatureKey::sign(&self.private_key, proposed_leaf.commit().as_ref()) + .context("Failed to compute proposed_leaf.commit()")?; + + let message = Proposal { + data: proposal, + signature, + _pd: PhantomData, + }; + debug!( + "Sending proposal for view {:?}", + proposed_leaf.view_number(), + ); + + self.consensus + .write() + .await + .update_last_proposed_view(self.view_number)?; + async_sleep(Duration::from_millis(self.round_start_delay)).await; + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalSend( + message.clone(), + self.public_key.clone(), + )), + &self.sender, + ) + .await; + + Ok(()) + } +} +impl HandleDepOutput for ProposalDependencyHandle { + type Output = Vec>>>>; + + #[allow(clippy::no_effect_underscore_binding)] + async fn handle_dep_result(self, res: Self::Output) { + let mut payload_commitment = None; + let mut commit_and_metadata: Option> = None; + let mut timeout_certificate = None; + let mut view_sync_finalize_cert = None; + let mut vid_share = None; + for event in res.iter().flatten().flatten() { + match event.as_ref() { + HotShotEvent::QuorumProposalValidated(proposal, _) => { + let proposal_payload_comm = proposal.block_header.payload_commitment(); + if let Some(comm) = payload_commitment { + if proposal_payload_comm != comm { + return; + } + } else { + payload_commitment = Some(proposal_payload_comm); + } + } + HotShotEvent::SendPayloadCommitmentAndMetadata( + payload_commitment, + builder_commitment, + metadata, + view, + fee, + ) => { + commit_and_metadata = Some(CommitmentAndMetadata { + commitment: *payload_commitment, + builder_commitment: builder_commitment.clone(), + metadata: metadata.clone(), + fee: fee.clone(), + block_view: *view, + }); + } + HotShotEvent::QcFormed(cert) => match cert { + either::Right(timeout) => { + timeout_certificate = Some(timeout.clone()); + } + either::Left(_) => { + // Handled by the HighQcUpdated event. + } + }, + HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { + view_sync_finalize_cert = Some(cert.clone()); + } + HotShotEvent::ProposeNow(_, pdd) => { + commit_and_metadata = Some(pdd.commitment_and_metadata.clone()); + match &pdd.secondary_proposal_information { + hotshot_types::consensus::SecondaryProposalInformation::QuorumProposalAndCertificate(quorum_proposal, _) => { + payload_commitment = Some(quorum_proposal.block_header.payload_commitment()); + }, + hotshot_types::consensus::SecondaryProposalInformation::Timeout(tc) => { + timeout_certificate = Some(tc.clone()); + } + hotshot_types::consensus::SecondaryProposalInformation::ViewSync(vsc) => { + view_sync_finalize_cert = Some(vsc.clone()); + }, + } + } + HotShotEvent::VidShareValidated(share) => { + vid_share = Some(share.clone()); + } + _ => {} + } + } + + if commit_and_metadata.is_none() { + error!( + "Somehow completed the proposal dependency task without a commitment and metadata" + ); + return; + } + + if vid_share.is_none() { + error!("Somehow completed the proposal dependency task without a VID share"); + return; + } + + let proposal_cert = if let Some(view_sync_cert) = view_sync_finalize_cert { + Some(ViewChangeEvidence::ViewSync(view_sync_cert)) + } else { + timeout_certificate.map(ViewChangeEvidence::Timeout) + }; + + if let Err(e) = self + .publish_proposal( + commit_and_metadata.unwrap(), + vid_share.unwrap(), + proposal_cert, + ) + .await + { + error!("Failed to publish proposal; error = {e}"); + } + } +} diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs new file mode 100644 index 0000000000..0ca678841e --- /dev/null +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -0,0 +1,325 @@ +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use anyhow::{bail, Context, Result}; +use async_broadcast::Sender; +use chrono::Utc; +use committable::Commitment; +use hotshot_types::{ + data::{Leaf, QuorumProposal}, + event::{Event, EventType, LeafInfo}, + simple_certificate::QuorumCertificate, + traits::{ + block_contents::BlockHeader, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + BlockPayload, + }, + vote::HasViewNumber, +}; +use tracing::debug; + +use crate::{events::HotShotEvent, helpers::broadcast_event}; + +use super::QuorumProposalTaskState; + +/// Helper type to give names and to the output values of the leaf chain traversal operation. +#[derive(Debug)] +struct LeafChainTraversalOutcome { + /// The new locked view obtained from a 2 chain starting from the proposal's parent. + pub new_locked_view_number: Option, + + /// The new decided view obtained from a 3 chain starting from the proposal's parent. + pub new_decided_view_number: Option, + + /// The qc for the decided chain. + pub new_decide_qc: Option>, + + /// The decided leaves with corresponding validated state and VID info. + pub leaf_views: Vec>, + + /// The decided leaves. + pub leaves_decided: Vec>, + + /// The transactions in the block payload for each leaf. + pub included_txns: HashSet::Transaction>>, + // TODO - add upgrade cert here and fill +} + +impl Default for LeafChainTraversalOutcome { + /// The default method for this type is to set all of the returned values to `None`. + fn default() -> Self { + Self { + new_locked_view_number: None, + new_decided_view_number: None, + new_decide_qc: None, + leaf_views: Vec::new(), + leaves_decided: Vec::new(), + included_txns: HashSet::new(), + } + } +} + +/// Ascends the leaf chain by traversing through the parent commitments of the proposal. We begin +/// by obtaining the parent view, and if we are in a chain (i.e. the next view from the parent is +/// one view newer), then we begin attempting to form the chain. This is a direct impl from +/// [HotStuff](https://arxiv.org/pdf/1803.05069) section 5: +/// +/// > When a node b* carries a QC that refers to a direct parent, i.e., b*.justify.node = b*.parent, +/// we say that it forms a One-Chain. Denote by b'' = b*.justify.node. Node b* forms a Two-Chain, +/// if in addition to forming a One-Chain, b''.justify.node = b''.parent. +/// It forms a Three-Chain, if b'' forms a Two-Chain. +/// +/// We follow this exact logic to determine if we are able to reach a commit and a decide. A commit +/// is reached when we have a two chain, and a decide is reached when we have a three chain. +/// +/// # Example +/// Suppose we have a decide for view 1, and we then move on to get undecided views 2, 3, and 4. Further, +/// suppose that our *next* proposal is for view 5, but this leader did not see info for view 4, so the +/// justify qc of the proposal points to view 3. This is fine, and the undecided chain now becomes +/// 2-3-5. +/// +/// Assuming we continue with honest leaders, we then eventually could get a chain like: 2-3-5-6-7-8. This +/// will prompt a decide event to occur (this code), where the `proposal` is for view 8. Now, since the +/// lowest value in the 3-chain here would be 5 (excluding 8 since we only walk the parents), we begin at +/// the first link in the chain, and walk back through all undecided views, making our new anchor view 5, +/// and out new locked view will be 6. +/// +/// Upon receipt then of a proposal for view 9, assuming it is valid, this entire process will repeat, and +/// the anchor view will be set to view 6, with the locked view as view 7. +async fn visit_leaf_chain>( + proposal: &QuorumProposal, + task_state: &QuorumProposalTaskState, +) -> Result> { + let proposal_view_number = proposal.view_number(); + let proposal_parent_view_number = proposal.justify_qc.view_number(); + + // This is the output return type object whose members will be mutated as we traverse. + let mut ret = LeafChainTraversalOutcome::default(); + + // Are these views consecutive (1-chain) + if proposal_parent_view_number + 1 != proposal_view_number { + // Since they aren't we can return early before we do anything else. + return Ok(ret); + } + + // Unpacking here prevents the need to endlessly call the function. These values don't change during + // the execution of this code. + let consensus_reader = task_state.consensus.read().await; + let validated_state_map = consensus_reader.validated_state_map(); + let saved_leaves = consensus_reader.saved_leaves(); + let last_decided_view = consensus_reader.last_decided_view(); + let saved_payloads = consensus_reader.saved_payloads(); + let vid_shares = consensus_reader.vid_shares(); + + // We are in at least a 1-chain, so we start from here. + let mut current_chain_length: usize = 1; + + // Get the state so we can traverse the chain to see if we have a 2 or 3 chain. + let mut view_number = proposal_parent_view_number; + + // The most recently seen view number (the view number of the last leaf we saw). + let mut last_seen_view_number = proposal_view_number; + + while let Some(leaf_state) = validated_state_map.get(&view_number) { + let leaf_commitment = leaf_state + .leaf_commitment() + .context("Failed to find the leaf commitment")?; + let leaf = saved_leaves + .get(&leaf_commitment) + .context("Failed to find the saved leaf")?; + + // These are all just checks to make sure we have what we need to proceed. + let current_leaf_view_number = leaf.view_number(); + + if let (Some(state), delta) = leaf_state.state_and_delta() { + // Exit if we've reached the last anchor view. + if current_leaf_view_number == last_decided_view { + return Ok(ret); + } + + // IMPORTANT: This is the logic from the paper, and is the most critical part of this function. + if ret.new_decided_view_number.is_none() { + // Does this leaf extend the chain? + if last_seen_view_number == leaf.view_number() + 1 { + last_seen_view_number = leaf.view_number(); + current_chain_length += 1; + + // We've got a 2 chain, update the locked view. + if current_chain_length == 2 { + ret.new_locked_view_number = Some(leaf.view_number()); + + // The next leaf in the chain, if there is one, is decided, so this + // leaf's justify_qc would become the QC for the decided chain. + ret.new_decide_qc = Some(leaf.justify_qc().clone()); + } else if current_chain_length == 3 { + // We've got the 3-chain, which means we can successfully decide on this leaf. + ret.new_decided_view_number = Some(leaf.view_number()); + } + } else { + // Bail out with empty values, but this is not necessarily an error, but we don't have a + // new chain extension. + return Ok(ret); + } + } + + // If we got a 3-chain, we can start our state updates, garbage collection, etc + if let Some(decided_view) = ret.new_decided_view_number { + let mut leaf = leaf.clone(); + if leaf.view_number() == decided_view { + consensus_reader + .metrics + .last_synced_block_height + .set(usize::try_from(leaf.height()).unwrap_or(0)); + } + + // TODO - Upgrade certificates + // if let Some(cert) = leaf.upgrade_certificate() { + // ensure!( + // cert.data.decide_by >= proposal_view_number, + // "Failed to decide an upgrade certificate in time. Ignoring." + // ); + // task_state.decided_upgrade_cert = Some(cert.clone()); + // } + // If the block payload is available for this leaf, include it in + // the leaf chain that we send to the client. + if let Some(encoded_txns) = saved_payloads.get(&leaf.view_number()) { + let payload = + BlockPayload::from_bytes(encoded_txns, leaf.block_header().metadata()); + + leaf.fill_block_payload_unchecked(payload); + } + + let vid_share = vid_shares + .get(&leaf.view_number()) + .unwrap_or(&HashMap::new()) + .get(&task_state.public_key) + .cloned() + .map(|prop| prop.data); + + // Add our data into a new `LeafInfo` + ret.leaf_views.push(LeafInfo::new( + leaf.clone(), + Arc::clone(&state), + delta.clone(), + vid_share, + )); + ret.leaves_decided.push(leaf.clone()); + if let Some(ref payload) = leaf.block_payload() { + for txn in payload.transaction_commitments(leaf.block_header().metadata()) { + ret.included_txns.insert(txn); + } + } + } + } else { + bail!( + "Validated state and delta do not exist for the leaf for view {current_leaf_view_number:?}" + ) + }; + + // Move on to the next leaf at the end. + view_number = leaf.justify_qc().view_number(); + } + + Ok(ret) +} + +/// Handles the `QuorumProposalValidated` event. +pub(crate) async fn handle_quorum_proposal_validated< + TYPES: NodeType, + I: NodeImplementation, +>( + proposal: &QuorumProposal, + sender: &Sender>>, + task_state: &mut QuorumProposalTaskState, +) -> Result<()> { + let LeafChainTraversalOutcome { + new_locked_view_number, + new_decided_view_number, + new_decide_qc, + leaf_views, + leaves_decided, + included_txns, + } = visit_leaf_chain(proposal, task_state).await?; + + let included_txns = if new_decided_view_number.is_some() { + included_txns + } else { + HashSet::new() + }; + + let mut consensus_writer = task_state.consensus.write().await; + if let Some(locked_view_number) = new_locked_view_number { + // Broadcast the locked view update. + broadcast_event( + HotShotEvent::LockedViewUpdated(locked_view_number).into(), + sender, + ) + .await; + + consensus_writer.update_locked_view(locked_view_number)?; + } + + // TODO - update decided upgrade cert + + #[allow(clippy::cast_precision_loss)] + if let Some(decided_view_number) = new_decided_view_number { + // Bring in the cleanup crew. When a new decide is indeed valid, we need to clear out old memory. + + let old_decided_view = consensus_writer.last_decided_view(); + consensus_writer.collect_garbage(old_decided_view, decided_view_number); + + // Set the new decided view. + consensus_writer.update_last_decided_view(decided_view_number)?; + broadcast_event( + HotShotEvent::LastDecidedViewUpdated(decided_view_number).into(), + sender, + ) + .await; + + consensus_writer + .metrics + .last_decided_time + .set(Utc::now().timestamp().try_into().unwrap()); + consensus_writer.metrics.invalid_qc.set(0); + consensus_writer + .metrics + .last_decided_view + .set(usize::try_from(consensus_writer.last_decided_view().u64()).unwrap()); + let cur_number_of_views_per_decide_event = + *task_state.latest_proposed_view - consensus_writer.last_decided_view().u64(); + consensus_writer + .metrics + .number_of_views_per_decide_event + .add_point(cur_number_of_views_per_decide_event as f64); + + debug!( + "Sending Decide for view {:?}", + consensus_writer.last_decided_view() + ); + + // We don't need to hold this while we broadcast + drop(consensus_writer); + + // First, send an update to everyone saying that we've reached a decide + broadcast_event( + Event { + view_number: decided_view_number, + event: EventType::Decide { + leaf_chain: Arc::new(leaf_views), + // This is never *not* none if we've reached a new decide, so this is safe to unwrap. + qc: Arc::new(new_decide_qc.unwrap()), + block_size: Some(included_txns.len().try_into().unwrap()), + }, + }, + &task_state.output_event_stream, + ) + .await; + + broadcast_event(Arc::new(HotShotEvent::LeafDecided(leaves_decided)), sender).await; + debug!("Successfully sent decide event"); + } + + Ok(()) +} diff --git a/task-impls/src/quorum_proposal.rs b/task-impls/src/quorum_proposal/mod.rs similarity index 63% rename from task-impls/src/quorum_proposal.rs rename to task-impls/src/quorum_proposal/mod.rs index 115a659298..1ff461fe5d 100644 --- a/task-impls/src/quorum_proposal.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -7,199 +7,39 @@ use async_std::task::JoinHandle; use either::Either; use hotshot_task::{ dependency::{AndDependency, EventDependency, OrDependency}, - dependency_task::{DependencyTask, HandleDepOutput}, + dependency_task::DependencyTask, task::{Task, TaskState}, }; use hotshot_types::{ - consensus::{CommitmentAndMetadata, Consensus}, + consensus::Consensus, event::Event, traits::{ - block_contents::BlockHeader, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, storage::Storage, }, - vote::Certificate, + vote::{Certificate, HasViewNumber}, }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, instrument, warn}; use vbs::version::Version; -#[cfg(feature = "dependency-tasks")] -use crate::consensus::helpers::handle_quorum_proposal_validated; use crate::{ - consensus::helpers::publish_proposal_if_able, events::HotShotEvent, helpers::cancel_task, + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, }; -/// Proposal dependency types. These types represent events that precipitate a proposal. -#[derive(PartialEq, Debug)] -enum ProposalDependency { - /// For the `SendPayloadCommitmentAndMetadata` event. - PayloadAndMetadata, - - /// For the `QcFormed` event. - QC, - - /// For the `ViewSyncFinalizeCertificate2Recv` event. - ViewSyncCert, - - /// For the `QcFormed` event timeout branch. - TimeoutCert, - - /// For the `QuroumProposalValidated` event after validating `QuorumProposalRecv`. - Proposal, - - /// For the `ProposeNow` event. - ProposeNow, -} - -/// Handler for the proposal dependency -struct ProposalDependencyHandle { - /// Latest view number that has been proposed for. - latest_proposed_view: TYPES::Time, - - /// The view number to propose for. - view_number: TYPES::Time, - - /// The event sender. - sender: Sender>>, - - /// Reference to consensus. The replica will require a write lock on this. - consensus: Arc>>, - - /// Immutable instance state - instance_state: Arc, - - /// Output events to application - #[allow(dead_code)] - output_event_stream: async_broadcast::Sender>, - - /// Membership for Timeout votes/certs - #[allow(dead_code)] - timeout_membership: Arc, - - /// Membership for Quorum Certs/votes - quorum_membership: Arc, - - /// Our public key - public_key: TYPES::SignatureKey, - - /// Our Private Key - private_key: ::PrivateKey, - - /// View timeout from config. - #[allow(dead_code)] - timeout: u64, - - /// Round start delay from config, in milliseconds. - round_start_delay: u64, - - /// The node's id - #[allow(dead_code)] - id: u64, - - /// Current version of consensus - version: Version, -} - -impl HandleDepOutput for ProposalDependencyHandle { - type Output = Vec>>>>; - - #[allow(clippy::no_effect_underscore_binding)] - async fn handle_dep_result(self, res: Self::Output) { - let mut payload_commitment = None; - let mut commit_and_metadata: Option> = None; - let mut _quorum_certificate = None; - let mut _timeout_certificate = None; - let mut _view_sync_finalize_cert = None; - for event in res.iter().flatten().flatten() { - match event.as_ref() { - HotShotEvent::QuorumProposalValidated(proposal, _) => { - let proposal_payload_comm = proposal.block_header.payload_commitment(); - if let Some(comm) = payload_commitment { - if proposal_payload_comm != comm { - return; - } - } else { - payload_commitment = Some(proposal_payload_comm); - } - } - HotShotEvent::SendPayloadCommitmentAndMetadata( - payload_commitment, - builder_commitment, - metadata, - view, - fee, - ) => { - commit_and_metadata = Some(CommitmentAndMetadata { - commitment: *payload_commitment, - builder_commitment: builder_commitment.clone(), - metadata: metadata.clone(), - fee: fee.clone(), - block_view: *view, - }); - } - HotShotEvent::QcFormed(cert) => match cert { - either::Right(timeout) => { - _timeout_certificate = Some(timeout.clone()); - } - either::Left(qc) => { - _quorum_certificate = Some(qc.clone()); - } - }, - HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { - _view_sync_finalize_cert = Some(cert.clone()); - } - HotShotEvent::ProposeNow(_, pdd) => { - commit_and_metadata = Some(pdd.commitment_and_metadata.clone()); - match &pdd.secondary_proposal_information { - hotshot_types::consensus::SecondaryProposalInformation::QuorumProposalAndCertificate(quorum_proposal, quorum_certificate) => { - _quorum_certificate = Some(quorum_certificate.clone()); - payload_commitment = Some(quorum_proposal.block_header.payload_commitment()); - }, - hotshot_types::consensus::SecondaryProposalInformation::Timeout(tc) => { - _timeout_certificate = Some(tc.clone()); - } - hotshot_types::consensus::SecondaryProposalInformation::ViewSync(vsc) => { - _view_sync_finalize_cert = Some(vsc.clone()); - }, - } - } - _ => {} - } - } +use self::{ + dependency_handle::{ProposalDependency, ProposalDependencyHandle}, + handlers::handle_quorum_proposal_validated, +}; - if commit_and_metadata.is_none() { - error!( - "Somehow completed the proposal dependency task without a commitment and metadata" - ); - return; - } +mod dependency_handle; - if let Err(e) = publish_proposal_if_able( - self.latest_proposed_view, - self.view_number, - self.sender, - self.quorum_membership, - self.public_key, - self.private_key, - Arc::clone(&self.consensus), - self.round_start_delay, - None, - None, - commit_and_metadata, - None, - Arc::clone(&self.instance_state), - self.version, - ) - .await - { - error!(?e, "Failed to publish proposal"); - } - } -} +/// Event handlers for [`QuorumProposalTaskState`]. +mod handlers; /// The state for the quorum proposal task. pub struct QuorumProposalTaskState> { @@ -218,9 +58,6 @@ pub struct QuorumProposalTaskState /// Output events to application pub output_event_stream: async_broadcast::Sender>, - /// Reference to consensus. The replica will require a write lock on this. - pub consensus: Arc>>, - /// Immutable instance state pub instance_state: Arc, @@ -248,8 +85,9 @@ pub struct QuorumProposalTaskState /// This node's storage ref pub storage: Arc>, - // /// most recent decided upgrade certificate - // pub decided_upgrade_cert: Option>, + /// Shared consensus task state + pub consensus: Arc>>, + /// The node's id pub id: u64, @@ -272,15 +110,15 @@ impl> QuorumProposalTaskState { - if let HotShotEvent::QcFormed(either::Left(qc)) = event { - qc.view_number + 1 + if let HotShotEvent::UpdateHighQc(qc) = event { + qc.view_number() + 1 } else { return false; } } ProposalDependency::TimeoutCert => { if let HotShotEvent::QcFormed(either::Right(timeout)) = event { - timeout.view_number + timeout.view_number() + 1 } else { return false; } @@ -289,7 +127,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { if let HotShotEvent::QuorumProposalValidated(proposal, _) = event { - proposal.view_number + proposal.view_number() + 1 } else { return false; } @@ -307,18 +145,32 @@ impl> QuorumProposalTaskState { - if let HotShotEvent::ProposeNow(view, _) = event { - *view + if let HotShotEvent::ProposeNow(view_number, _) = event { + *view_number + } else { + return false; + } + } + ProposalDependency::VidShare => { + if let HotShotEvent::VidShareValidated(vid_share) = event { + vid_share.data.view_number() + } else { + return false; + } + } + ProposalDependency::ValidatedState => { + if let HotShotEvent::ValidatedStateUpdated(view_number, _) = event { + *view_number + 1 } else { return false; } @@ -373,6 +225,18 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { - qc_dependency.mark_as_completed(event); + // qc_dependency.mark_as_completed(event); } }, HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) => { view_sync_dependency.mark_as_completed(event); } + HotShotEvent::VidShareValidated(_) => { + vid_share_dependency.mark_as_completed(event); + } + HotShotEvent::ValidatedStateUpdated(_, _) => { + validated_state_update_dependency.mark_as_completed(event); + } + HotShotEvent::UpdateHighQc(_) => { + qc_dependency.mark_as_completed(event); + } _ => {} }; @@ -423,6 +296,8 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState> QuorumProposalTaskState> QuorumProposalTaskState { self.version = *version; } - HotShotEvent::ProposeNow(view, _) => { + HotShotEvent::ProposeNow(view_number, _) => { self.create_dependency_task_if_new( - *view, + *view_number, event_receiver, event_sender, Arc::clone(&event), ); } - HotShotEvent::QcFormed(cert) => { - match cert.clone() { - either::Right(timeout_cert) => { - let view = timeout_cert.view_number + 1; - - self.create_dependency_task_if_new( - view, - event_receiver, - event_sender, - Arc::clone(&event), + HotShotEvent::QcFormed(cert) => match cert.clone() { + either::Right(timeout_cert) => { + let view_number = timeout_cert.view_number + 1; + + self.create_dependency_task_if_new( + view_number, + event_receiver, + event_sender, + Arc::clone(&event), + ); + } + either::Left(qc) => { + // Only update if the qc is from a newer view + let consensus_reader = self.consensus.read().await; + if qc.view_number <= consensus_reader.high_qc().view_number { + tracing::trace!( + "Received a QC for a view that was not > than our current high QC" ); } - either::Left(qc) => { - if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await - { - warn!("Failed to store High QC of QC we formed; error = {:?}", e); - } - let mut consensus = self.consensus.write().await; - if let Err(e) = consensus.update_high_qc(qc.clone()) { - tracing::trace!("{e:?}"); - } - - // We need to drop our handle here to make the borrow checker happy. - drop(consensus); - - let view = qc.view_number + 1; - - self.create_dependency_task_if_new( - view, - event_receiver, - event_sender, - Arc::clone(&event), - ); - } + // We need to gate on this data actually existing in the consensus shared state. + // So we broadcast here and handle *before* we make the task. + broadcast_event(HotShotEvent::UpdateHighQc(qc).into(), &event_sender).await; } - } + }, HotShotEvent::SendPayloadCommitmentAndMetadata( _payload_commitment, _builder_commitment, _metadata, - view, + view_number, _fee, ) => { - let view = *view; + let view_number = *view_number; self.create_dependency_task_if_new( - view, + view_number, event_receiver, event_sender, Arc::clone(&event), @@ -590,21 +451,27 @@ impl> QuorumProposalTaskState { let new_view = proposal.view_number; + // All nodes get the latest proposed view as a proxy of `cur_view` of olde. if !self.update_latest_proposed_view(new_view).await { tracing::trace!("Failed to update latest proposed view"); return; } + // Handle the event before creating the dependency task. if let Err(e) = - handle_quorum_proposal_validated(proposal, event_sender.clone(), self).await + handle_quorum_proposal_validated(proposal, &event_sender, self).await { debug!("Failed to handle QuorumProposalValidated event; error = {e:#}"); } @@ -623,6 +490,55 @@ impl> QuorumProposalTaskState { + let view_number = vid_share.data.view_number(); + + // Update the vid shares map if we need to include the new value. + let share = vid_share.clone(); + self.consensus + .write() + .await + .update_vid_shares(view_number, share.clone()); + + self.create_dependency_task_if_new( + view_number, + event_receiver, + event_sender, + Arc::clone(&event), + ); + } + HotShotEvent::ValidatedStateUpdated(view_number, view) => { + // Update the internal validated state map. + self.consensus + .write() + .await + .update_validated_state_map(*view_number, view.clone()); + + self.create_dependency_task_if_new( + *view_number + 1, + event_receiver, + event_sender, + Arc::clone(&event), + ); + } + HotShotEvent::UpdateHighQc(qc) => { + // First, update the high QC. + if let Err(e) = self.consensus.write().await.update_high_qc(qc.clone()) { + tracing::trace!("Failed to update high qc; error = {e}"); + } + + if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await { + warn!("Failed to store High QC of QC we formed; error = {:?}", e); + } + + let view_number = qc.view_number() + 1; + self.create_dependency_task_if_new( + view_number, + event_receiver, + event_sender, + Arc::clone(&event), + ); + } _ => {} } } @@ -633,7 +549,7 @@ impl> TaskState { type Event = Arc>; type Output = (); - fn filter(&self, event: &Arc>) -> bool { + fn filter(&self, event: &Self::Event) -> bool { !matches!( event.as_ref(), HotShotEvent::QuorumProposalValidated(..) @@ -642,7 +558,10 @@ impl> TaskState | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) | HotShotEvent::ProposeNow(..) | HotShotEvent::QuorumProposalSend(..) - | HotShotEvent::Shutdown, + | HotShotEvent::VidShareValidated(_) + | HotShotEvent::ValidatedStateUpdated(..) + | HotShotEvent::UpdateHighQc(_) + | HotShotEvent::Shutdown ) } async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> diff --git a/task-impls/src/quorum_proposal_recv.rs b/task-impls/src/quorum_proposal_recv.rs index 35f70c1f97..e9cca7f355 100644 --- a/task-impls/src/quorum_proposal_recv.rs +++ b/task-impls/src/quorum_proposal_recv.rs @@ -22,6 +22,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, instrument, warn}; +use vbs::version::Version; use crate::{ consensus::helpers::{handle_quorum_proposal_recv, parent_leaf_and_state}, @@ -91,6 +92,9 @@ pub struct QuorumProposalRecvTaskState> QuorumProposalRecvTaskState { @@ -116,7 +120,15 @@ impl> QuorumProposalRecvTaskState< ) { #[cfg(feature = "dependency-tasks")] if let HotShotEvent::QuorumProposalRecv(proposal, sender) = event.as_ref() { - match handle_quorum_proposal_recv(proposal, sender, event_stream.clone(), self).await { + match handle_quorum_proposal_recv( + proposal, + sender, + event_stream.clone(), + self, + self.version, + ) + .await + { Ok(Some(current_proposal)) => { // Build the parent leaf since we didn't find it during the proposal check. let parent_leaf = match parent_leaf_and_state( diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index cc9505bd16..622cb99309 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -145,26 +145,27 @@ impl + 'static> HandleDepOutput ) .await; - #[cfg(feature = "dependency-tasks")] - { - let Some(proposal) = cur_proposal else { - error!("No proposal received, but it should be."); - return; - }; - // For this vote task, we'll update the state in storage without voting in this function, - // then vote later. - update_state_and_vote_if_able::( - self.view_number, - proposal, - self.public_key.clone(), - self.consensus, - Arc::clone(&self.storage), - self.quorum_membership, - self.instance_state, - PhantomData, - ) - .await; - } + // TODO + // #[cfg(feature = "dependency-tasks")] + // { + // let Some(proposal) = cur_proposal else { + // error!("No proposal received, but it should be."); + // return; + // }; + // // For this vote task, we'll update the state in storage without voting in this function, + // // then vote later. + // update_state_and_vote_if_able::( + // self.view_number, + // proposal, + // self.public_key.clone(), + // self.consensus, + // Arc::clone(&self.storage), + // self.quorum_membership, + // self.instance_state, + // PhantomData, + // ) + // .await; + // } // Create and send the vote. let Some(leaf) = leaf else { diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index a618ea9678..fba97696f5 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,29 +1,39 @@ #![cfg(feature = "dependency-tasks")] -use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; +use std::sync::Arc; + +use committable::Committable; +use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ block_types::TestMetadata, node_types::{MemoryImpl, TestTypes}, - state_types::TestInstanceState, + state_types::{TestInstanceState, TestValidatedState}, +}; +use hotshot_task_impls::{ + events::HotShotEvent::{self, *}, + quorum_proposal::QuorumProposalTaskState, }; -use hotshot_task_impls::{events::HotShotEvent::*, quorum_proposal::QuorumProposalTaskState}; use hotshot_testing::{ - predicates::event::quorum_proposal_send, + predicates::{ + event::{exact, leaf_decided, quorum_proposal_send}, + Predicate, + }, script::{run_test_script, TestScriptStage}, - task_helpers::{build_cert, build_system_handle, key_pair_for_id, vid_scheme_from_view_number}, + task_helpers::{ + build_cert, build_system_handle, key_pair_for_id, vid_scheme_from_view_number, vid_share, + }, view_generator::TestViewGenerator, }; use hotshot_types::{ consensus::{CommitmentAndMetadata, ProposalDependencyData}, - data::{null_block, VidDisperseShare, ViewChangeEvidence, ViewNumber}, - message::Proposal, + data::{null_block, Leaf, ViewChangeEvidence, ViewNumber}, simple_certificate::{TimeoutCertificate, ViewSyncFinalizeCertificate2}, simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData, ViewSyncFinalizeVote}, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeType}, }, - utils::BuilderCommitment, + utils::{BuilderCommitment, View, ViewInner}, vid::VidSchemeType, }; use jf_vid::VidScheme; @@ -40,28 +50,21 @@ fn make_payload_commitment( vid.commit_only(&encoded_transactions).unwrap() } -async fn insert_vid_shares_for_view( - view: ::Time, - handle: &SystemContextHandle, - vid: ( - Vec>>, - ::SignatureKey, - ), -) { - let consensus = handle.consensus(); - let mut consensus = consensus.write().await; - - // `create_and_send_proposal` depends on the `vid_shares` obtaining a vid dispersal. - // to avoid needing to spin up the vote task, we can just insert it in here. - consensus.update_vid_shares(view, vid.0[0].clone()); +fn create_fake_view_with_leaf(leaf: Leaf) -> View { + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state: TestValidatedState::default().into(), + delta: None, + }, + } } #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_quorum_proposal_view_1() { - use hotshot_example_types::block_types::TestMetadata; - use hotshot_types::data::null_block; + use hotshot_types::simple_certificate::QuorumCertificate; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -79,21 +82,29 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let mut leaders = Vec::new(); let mut leaves = Vec::new(); let mut vids = Vec::new(); + let consensus = handle.hotshot.consensus(); + let mut consensus_writer = consensus.write().await; for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); vids.push(view.vid_proposal.clone()); - } - insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[0].clone()).await; + // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals + // to make sure they show up during tests. + consensus_writer + .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + } + drop(consensus_writer); - let cert = proposals[0].data.justify_qc.clone(); + // We must send the genesis cert here to initialize hotshot successfully. + let genesis_cert = QuorumCertificate::genesis(&*handle.hotshot.instance_state()); + let genesis_leaf = Leaf::genesis(&*handle.hotshot.instance_state()); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let view = TestScriptStage { inputs: vec![ - QcFormed(either::Left(cert.clone())), + QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, @@ -102,8 +113,13 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) .unwrap(), ), + VidShareValidated(vid_share(&vids[0].0, handle.public_key())), + ValidatedStateUpdated(ViewNumber::new(0), create_fake_view_with_leaf(genesis_leaf)), + ], + outputs: vec![ + exact(UpdateHighQc(genesis_cert.clone())), + quorum_proposal_send(), ], - outputs: vec![quorum_proposal_send()], asserts: vec![], }; @@ -118,11 +134,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { - use hotshot_example_types::{block_types::TestMetadata, state_types::TestValidatedState}; - use hotshot_types::{ - data::null_block, - utils::{View, ViewInner}, - }; + use hotshot_types::vote::HasViewNumber; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -132,73 +144,160 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut leaves = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(3) { + let consensus = handle.hotshot.consensus(); + let mut consensus_writer = consensus.write().await; + for view in (&mut generator).take(5) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); vids.push(view.vid_proposal.clone()); + + // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals + // to make sure they show up during tests. + consensus_writer + .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); } + drop(consensus_writer); - insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[2].clone()).await; - let consensus = handle.consensus(); - let mut consensus = consensus.write().await; - - // `validate_proposal_safety_and_liveness` depends on the existence of prior values in the consensus - // state, but since we do not spin up the consensus task, these values must be manually filled - // out. - - // First, insert a parent view whose leaf commitment will be returned in the lower function - // call. - consensus.update_validated_state_map( - ViewNumber::new(2), - View { - view_inner: ViewInner::Leaf { - leaf: leaves[1].parent_commitment(), - state: TestValidatedState::default().into(), - delta: None, - }, - }, - ); + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - // Match an entry into the saved leaves for the parent commitment, returning the generated leaf - // for this call. - consensus.update_saved_leaves(leaves[1].clone()); + // We need to handle the views where we aren't the leader to ensure that the states are + // updated properly. - // Release the write lock before proceeding with the test - drop(consensus); + let genesis_cert = proposals[0].data.justify_qc.clone(); + let genesis_leaf = Leaf::genesis(&*handle.hotshot.instance_state()); - let cert = proposals[2].data.justify_qc.clone(); - let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); + let genesis_view = TestScriptStage { + inputs: vec![ + QcFormed(either::Left(genesis_cert.clone())), + SendPayloadCommitmentAndMetadata( + make_payload_commitment(&quorum_membership, ViewNumber::new(1)), + builder_commitment.clone(), + TestMetadata, + ViewNumber::new(1), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), + ), + VidShareValidated(vid_share(&vids[0].0, handle.public_key())), + ValidatedStateUpdated( + ViewNumber::new(0), + create_fake_view_with_leaf(genesis_leaf.clone()), + ), + ], + outputs: vec![exact(UpdateHighQc(genesis_cert.clone()))], + asserts: vec![], + }; - let view = TestScriptStage { + // We send all the events that we'd have otherwise received to ensure the states are updated. + let view_1 = TestScriptStage { + inputs: vec![ + QuorumProposalValidated(proposals[0].data.clone(), leaves[0].clone()), + QcFormed(either::Left(proposals[1].data.justify_qc.clone())), + SendPayloadCommitmentAndMetadata( + make_payload_commitment(&quorum_membership, ViewNumber::new(2)), + builder_commitment.clone(), + TestMetadata, + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), + ), + VidShareValidated(vid_share(&vids[1].0, handle.public_key())), + ValidatedStateUpdated( + proposals[0].data.view_number(), + create_fake_view_with_leaf(leaves[0].clone()), + ), + ], + outputs: vec![exact(UpdateHighQc(proposals[1].data.justify_qc.clone()))], + asserts: vec![], + }; + + // Proposing for this view since we've received a proposal for view 2. + let view_2 = TestScriptStage { inputs: vec![ QuorumProposalValidated(proposals[1].data.clone(), leaves[1].clone()), - QcFormed(either::Left(cert.clone())), + QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - payload_commitment, + make_payload_commitment(&quorum_membership, ViewNumber::new(3)), + builder_commitment.clone(), + TestMetadata, + ViewNumber::new(3), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), + ), + VidShareValidated(vid_share(&vids[2].0, handle.public_key())), + ValidatedStateUpdated( + proposals[1].data.view_number(), + create_fake_view_with_leaf(leaves[1].clone()), + ), + ], + outputs: vec![ + exact(LockedViewUpdated(ViewNumber::new(1))), + exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), + quorum_proposal_send(), + ], + asserts: vec![], + }; + + // Now, let's verify that we get the decide on the 3-chain. + let view_3 = TestScriptStage { + inputs: vec![ + QuorumProposalValidated(proposals[2].data.clone(), leaves[2].clone()), + QcFormed(either::Left(proposals[3].data.justify_qc.clone())), + SendPayloadCommitmentAndMetadata( + make_payload_commitment(&quorum_membership, ViewNumber::new(4)), + builder_commitment.clone(), + TestMetadata, + ViewNumber::new(4), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), + ), + VidShareValidated(vid_share(&vids[3].0, handle.public_key())), + ValidatedStateUpdated( + proposals[2].data.view_number(), + create_fake_view_with_leaf(leaves[2].clone()), + ), + ], + outputs: vec![exact(UpdateHighQc(proposals[3].data.justify_qc.clone()))], + asserts: vec![], + }; + + let view_4 = TestScriptStage { + inputs: vec![ + QuorumProposalValidated(proposals[3].data.clone(), leaves[3].clone()), + QcFormed(either::Left(proposals[4].data.justify_qc.clone())), + SendPayloadCommitmentAndMetadata( + make_payload_commitment(&quorum_membership, ViewNumber::new(5)), builder_commitment, TestMetadata, - ViewNumber::new(node_id), + ViewNumber::new(5), null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) .unwrap(), ), + VidShareValidated(vid_share(&vids[4].0, handle.public_key())), + ValidatedStateUpdated( + proposals[3].data.view_number(), + create_fake_view_with_leaf(leaves[3].clone()), + ), + ], + outputs: vec![ + exact(LockedViewUpdated(ViewNumber::new(3))), + exact(LastDecidedViewUpdated(ViewNumber::new(2))), + leaf_decided(), + exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), ], - outputs: vec![quorum_proposal_send()], asserts: vec![], }; let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - let script = vec![view]; + let script = vec![genesis_view, view_1, view_2, view_3, view_4]; run_test_script(script, quorum_proposal_task_state).await; } @@ -206,12 +305,10 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_qc_timeout() { - use hotshot_example_types::block_types::TestMetadata; - use hotshot_types::{data::null_block, simple_vote::TimeoutData}; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let node_id = 2; + let node_id = 3; let handle = build_system_handle(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -224,29 +321,31 @@ async fn test_quorum_proposal_task_qc_timeout() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut vids = Vec::new(); + let mut leaves = Vec::new(); for view in (&mut generator).take(1) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); + leaves.push(view.leaf.clone()); } let timeout_data = TimeoutData { view: ViewNumber::new(1), }; generator.add_timeout(timeout_data); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); + leaves.push(view.leaf.clone()); } - insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[1].clone()).await; // Get the proposal cert out for the view sync input let cert = match proposals[1].data.proposal_certificate.clone().unwrap() { ViewChangeEvidence::Timeout(tc) => tc, _ => panic!("Found a View Sync Cert when there should have been a Timeout cert"), }; - // Run at view 2, the quorum vote task shouldn't care as long as the bookkeeping is correct + // Run at view 2, propose at view 3. let view_2 = TestScriptStage { inputs: vec![ QcFormed(either::Right(cert.clone())), @@ -254,10 +353,15 @@ async fn test_quorum_proposal_task_qc_timeout() { payload_commitment, builder_commitment, TestMetadata, - ViewNumber::new(2), + ViewNumber::new(3), null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) .unwrap(), ), + VidShareValidated(vid_share(&vids[2].0.clone(), handle.public_key())), + ValidatedStateUpdated( + ViewNumber::new(2), + create_fake_view_with_leaf(leaves[1].clone()), + ), ], outputs: vec![quorum_proposal_send()], asserts: vec![], @@ -295,10 +399,12 @@ async fn test_quorum_proposal_task_view_sync() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut vids = Vec::new(); + let mut leaves = Vec::new(); for view in (&mut generator).take(1) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); + leaves.push(view.leaf.clone()); } let view_sync_finalize_data = ViewSyncFinalizeData { @@ -306,13 +412,13 @@ async fn test_quorum_proposal_task_view_sync() { round: ViewNumber::new(node_id), }; generator.add_view_sync_finalize(view_sync_finalize_data); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); + leaves.push(view.leaf.clone()); } - insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[1].clone()).await; // Get the proposal cert out for the view sync input let cert = match proposals[1].data.proposal_certificate.clone().unwrap() { ViewChangeEvidence::ViewSync(vsc) => vsc, @@ -331,6 +437,11 @@ async fn test_quorum_proposal_task_view_sync() { null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) .unwrap(), ), + VidShareValidated(vid_share(&vids[1].0.clone(), handle.public_key())), + ValidatedStateUpdated( + ViewNumber::new(1), + create_fake_view_with_leaf(leaves[1].clone()), + ), ], outputs: vec![quorum_proposal_send()], asserts: vec![], @@ -343,6 +454,7 @@ async fn test_quorum_proposal_task_view_sync() { run_test_script(script, quorum_proposal_task_state).await; } +#[ignore] #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -399,12 +511,12 @@ async fn test_quorum_proposal_task_propose_now() { let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[1].clone()).await; let script = vec![view_qp]; run_test_script(script, quorum_proposal_task_state).await; } +#[ignore] #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -472,11 +584,12 @@ async fn test_quorum_proposal_task_propose_now_timeout() { let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[1].clone()).await; let script = vec![view_timeout]; run_test_script(script, quorum_proposal_task_state).await; } + +#[ignore] #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -546,7 +659,6 @@ async fn test_quorum_proposal_task_propose_now_view_sync() { let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - insert_vid_shares_for_view(ViewNumber::new(node_id), &handle, vids[1].clone()).await; let script = vec![view_view_sync]; run_test_script(script, quorum_proposal_task_state).await; @@ -594,3 +706,199 @@ async fn test_quorum_proposal_task_with_incomplete_events() { let script = vec![view_2]; run_test_script(script, quorum_proposal_task_state).await; } + +fn generate_outputs( + chain_length: i32, + current_view_number: u64, +) -> Vec>>>> { + match chain_length { + // This is not - 2 because we start from the parent + 2 => vec![exact(LockedViewUpdated(ViewNumber::new( + current_view_number - 1, + )))], + // This is not - 3 because we start from the parent + 3 => vec![ + exact(LockedViewUpdated(ViewNumber::new(current_view_number - 1))), + exact(LastDecidedViewUpdated(ViewNumber::new( + current_view_number - 2, + ))), + leaf_decided(), + ], + _ => vec![], + } +} + +/// This test validates the the ascension of the leaf chain across a large input space with +/// consistently increasing inputs to ensure that decides and locked view updates +/// occur as expected. +/// +/// This test will never propose, instead, we focus exclusively on the processing of the +/// [`HotShotEvent::QuorumProposalValidated`] event in a number of different circumstances. We want to +/// guarantee that a particular space of outputs is generated. +/// +/// These outputs should be easy to run since we'll be deterministically incrementing our iterator from +/// 0..100 proposals, inserting the valid state into the map (hence "happy path"). Since we'll know ahead +/// of time, we can essentially anticipate the formation of a valid chain. +/// +/// The output sequence is essentially: +/// view 0/1 = No outputs +/// view 2 +/// ```rust +/// LockedViewUpdated(1) +/// ``` +/// +/// view 3 +/// ```rust +/// LockedViewUpdated(2) +/// LastDecidedViewUpdated(1) +/// LeafDecided() +/// ``` +/// +/// view i in 4..n +/// ```rust +/// LockedViewUpdated(i - 1) +/// LastDecidedViewUpdated(i - 2) +/// LeafDecided() +/// ``` +/// +/// Because we've inserted all of the valid data, the traversals should go exactly as we expect them to. +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_task_happy_path_leaf_ascension() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let node_id: usize = 1; + let handle = build_system_handle(node_id.try_into().unwrap()).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + let mut generator = TestViewGenerator::generate(quorum_membership, da_membership); + + let mut current_chain_length = 0; + let mut script = Vec::new(); + for view_number in 1..100u64 { + current_chain_length += 1; + if current_chain_length > 3 { + current_chain_length = 3; + } + // This unwrap is safe here + let view = generator.next().unwrap(); + let proposal = view.quorum_proposal.clone(); + let leaf = view.leaf.clone(); + + // update the consensus shared state + { + let consensus = handle.consensus(); + let mut consensus_writer = consensus.write().await; + consensus_writer.update_validated_state_map( + ViewNumber::new(view_number), + create_fake_view_with_leaf(leaf.clone()), + ); + consensus_writer.update_saved_leaves(leaf.clone()); + consensus_writer.update_vid_shares( + ViewNumber::new(view_number), + view.vid_proposal.0[node_id].clone(), + ); + } + + let view = TestScriptStage { + inputs: vec![QuorumProposalValidated(proposal.data, leaf)], + outputs: generate_outputs(current_chain_length, view_number.try_into().unwrap()), + asserts: vec![], + }; + script.push(view); + } + + let quorum_proposal_task_state = + QuorumProposalTaskState::::create_from(&handle).await; + run_test_script(script, quorum_proposal_task_state).await; +} + +/// This test non-deterministically injects faults into the leaf ascension process where we randomly +/// drop states, views, etc from the proposals to ensure that we get decide events only when a three +/// chain is detected. This is accomplished by simply looking up in the state map and checking if the +/// parents for a given node indeed exist and, if so, updating the current chain depending on how recent +/// the dropped parent was. +/// +/// We utilize the same method to generate the outputs in both cases since it's quite easy to get a predictable +/// output set depending on where we are in the chain. Again, we do *not* propose in this method and instead +/// verify that the leaf ascension is reliable. We also use non-determinism to make sure that our fault +/// injection is randomized to some degree. This helps smoke out corner cases (i.e. the start and end). +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_task_fault_injection_leaf_ascension() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let node_id: usize = 1; + let handle = build_system_handle(node_id.try_into().unwrap()).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + let mut generator = TestViewGenerator::generate(quorum_membership, da_membership); + + let mut current_chain_length = 0; + let mut script = Vec::new(); + let mut dropped_views = Vec::new(); + for view_number in 1..15u64 { + current_chain_length += 1; + // If the chain keeps going, then let it keep going + if current_chain_length > 3 { + current_chain_length = 3; + } + // This unwrap is safe here + let view = generator.next().unwrap(); + let proposal = view.quorum_proposal.clone(); + let leaf = view.leaf.clone(); + + { + let consensus = handle.consensus(); + let mut consensus_writer = consensus.write().await; + + // Break the chain depending on the prior state. If the immediate parent is not found, we have a chain of + // 1, but, if it is, and the parent 2 away is not found, we have a 2 chain. + if consensus_writer + .validated_state_map() + .get(&ViewNumber::new(view_number - 1)) + .is_none() + { + current_chain_length = 1; + } else if view_number > 2 + && consensus_writer + .validated_state_map() + .get(&ViewNumber::new(view_number - 2)) + .is_none() + { + current_chain_length = 2; + } + + // Update the consensus shared state with a 10% failure rate + if rand::random::() < 0.9 { + // if view_number != 7 && view_number != 13 { + consensus_writer.update_validated_state_map( + ViewNumber::new(view_number), + create_fake_view_with_leaf(leaf.clone()), + ); + consensus_writer.update_saved_leaves(leaf.clone()); + consensus_writer.update_vid_shares( + ViewNumber::new(view_number), + view.vid_proposal.0[node_id].clone(), + ); + } else { + dropped_views.push(view_number); + } + } + + let view = TestScriptStage { + inputs: vec![QuorumProposalValidated(proposal.data, leaf)], + outputs: generate_outputs(current_chain_length, view_number.try_into().unwrap()), + asserts: vec![], + }; + script.push(view); + } + + let quorum_proposal_task_state = + QuorumProposalTaskState::::create_from(&handle).await; + run_test_script(script, quorum_proposal_task_state).await; +} From 92cb8d852906e6a35a10f5f1fce2f845ab1efa09 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 23 May 2024 12:39:57 -0400 Subject: [PATCH 1045/1393] Allow genesis to time out (#3206) * allow genesis to time out * some PR comments * timeout task to helper function * use start_view+1 * wait for networks to be ready before starting tasks * move initial timeout task spawn --- hotshot/src/tasks/task_state.rs | 27 +++++++++----- hotshot/src/types/handle.rs | 30 ++++++++++++++- task-impls/src/consensus/mod.rs | 2 +- task-impls/src/consensus/view_change.rs | 17 +++++---- task-impls/src/consensus2/handlers.rs | 16 ++++---- task-impls/src/consensus2/mod.rs | 2 +- task-impls/src/quorum_proposal/mod.rs | 2 +- task-impls/src/quorum_proposal_recv.rs | 2 +- testing/src/test_runner.rs | 49 +++++++++++++++++++------ 9 files changed, 106 insertions(+), 41 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 0d6217ec35..56ec2489c0 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -1,9 +1,3 @@ -use std::{ - collections::{BTreeMap, HashMap}, - marker::PhantomData, - sync::{atomic::AtomicBool, Arc}, -}; - use async_trait::async_trait; use hotshot_task_impls::{ builder::BuilderClient, consensus::ConsensusTaskState, consensus2::Consensus2TaskState, @@ -16,6 +10,12 @@ use hotshot_types::traits::{ consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }; +use std::{ + collections::{BTreeMap, HashMap}, + marker::PhantomData, + sync::{atomic::AtomicBool, Arc}, +}; + use vbs::version::StaticVersionType; use crate::types::SystemContextHandle; @@ -184,6 +184,7 @@ impl> CreateTaskState { async fn create_from(handle: &SystemContextHandle) -> ConsensusTaskState { let consensus = handle.hotshot.consensus(); + let timeout_task = handle.spawn_initial_timeout_task(); ConsensusTaskState { consensus, @@ -194,7 +195,7 @@ impl> CreateTaskState payload_commitment_and_metadata: None, vote_collector: None.into(), timeout_vote_collector: None.into(), - timeout_task: None, + timeout_task, spawned_tasks: BTreeMap::new(), formed_upgrade_certificate: None, proposal_cert: None, @@ -248,6 +249,8 @@ impl> CreateTaskState handle: &SystemContextHandle, ) -> QuorumProposalTaskState { let consensus = handle.hotshot.consensus(); + let timeout_task = handle.spawn_initial_timeout_task(); + QuorumProposalTaskState { latest_proposed_view: handle.cur_view().await, propose_dependencies: HashMap::new(), @@ -262,7 +265,7 @@ impl> CreateTaskState private_key: handle.private_key().clone(), storage: Arc::clone(&handle.storage), timeout: handle.hotshot.config.next_view_timeout, - timeout_task: None, + timeout_task, round_start_delay: handle.hotshot.config.round_start_delay, id: handle.hotshot.id, version: *handle.hotshot.version.read().await, @@ -278,6 +281,8 @@ impl> CreateTaskState handle: &SystemContextHandle, ) -> QuorumProposalRecvTaskState { let consensus = handle.hotshot.consensus(); + let timeout_task = handle.spawn_initial_timeout_task(); + QuorumProposalRecvTaskState { public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -286,7 +291,7 @@ impl> CreateTaskState quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - timeout_task: None, + timeout_task, timeout: handle.hotshot.config.next_view_timeout, round_start_delay: handle.hotshot.config.round_start_delay, output_event_stream: handle.hotshot.external_event_stream.0.clone(), @@ -308,6 +313,8 @@ impl> CreateTaskState { async fn create_from(handle: &SystemContextHandle) -> Consensus2TaskState { let consensus = handle.hotshot.consensus(); + let timeout_task = handle.spawn_initial_timeout_task(); + Consensus2TaskState { public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -322,7 +329,7 @@ impl> CreateTaskState storage: Arc::clone(&handle.storage), cur_view: handle.cur_view().await, output_event_stream: handle.hotshot.external_event_stream.0.clone(), - timeout_task: None, + timeout_task, timeout: handle.hotshot.config.next_view_timeout, consensus, last_decided_view: handle.cur_view().await, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index a35fc43e76..cb859b659a 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -3,10 +3,13 @@ use std::sync::Arc; use async_broadcast::{InactiveReceiver, Receiver, Sender}; +use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use futures::Stream; use hotshot_task::task::TaskRegistry; -use hotshot_task_impls::events::HotShotEvent; +use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; use hotshot_types::{ boxed_sync, consensus::Consensus, @@ -15,6 +18,9 @@ use hotshot_types::{ traits::{election::Membership, node_implementation::NodeType}, BoxSyncFuture, }; +use std::time::Duration; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; use crate::{traits::NodeImplementation, types::Event, SystemContext}; @@ -190,4 +196,26 @@ impl + 'static> SystemContextHandl pub fn storage(&self) -> Arc> { Arc::clone(&self.storage) } + + /// A helper function to spawn the initial timeout task from a given `SystemContextHandle`. + #[must_use] + pub fn spawn_initial_timeout_task(&self) -> JoinHandle<()> { + // Clone the event stream that we send the timeout event to + let event_stream = self.internal_event_stream.0.clone(); + let next_view_timeout = self.hotshot.config.next_view_timeout; + let start_view = self.hotshot.start_view; + + // Spawn a task that will sleep for the next view timeout and then send a timeout event + // if not cancelled + async_spawn({ + async move { + async_sleep(Duration::from_millis(next_view_timeout)).await; + broadcast_event( + Arc::new(HotShotEvent::Timeout(start_view + 1)), + &event_stream, + ) + .await; + } + }) + } } diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 31eb95ec3e..6a3a43bbc7 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -108,7 +108,7 @@ pub struct ConsensusTaskState> { RwLock, TimeoutCertificate>>, /// timeout task handle - pub timeout_task: Option>, + pub timeout_task: JoinHandle<()>, /// Spawned tasks related to a specific view, so we can cancel them when /// they are stale diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index 0269cb997d..4037693a4f 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -51,11 +51,6 @@ pub(crate) async fn update_view>( error!("Progress: entered view {:>6}", *new_view); } - // cancel the old timeout task - if let Some(timeout_task) = task_state.timeout_task.take() { - cancel_task(timeout_task).await; - } - task_state.cur_view = new_view; // The next view is just the current view + 1 @@ -77,7 +72,7 @@ pub(crate) async fn update_view>( } // Spawn a timeout task if we did actually update view - task_state.timeout_task = Some(async_spawn({ + let new_timeout_task = async_spawn({ let stream = event_stream.clone(); // Nuance: We timeout on the view + 1 here because that means that we have // not seen evidence to transition to this new view @@ -91,7 +86,15 @@ pub(crate) async fn update_view>( ) .await; } - })); + }); + + // Cancel the old timeout task + cancel_task(std::mem::replace( + &mut task_state.timeout_task, + new_timeout_task, + )) + .await; + let consensus = consensus.upgradable_read().await; consensus .metrics diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index 2a1f751043..673f49079c 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -130,17 +130,12 @@ pub(crate) async fn handle_view_change> { pub output_event_stream: async_broadcast::Sender>, /// Timeout task handle - pub timeout_task: Option>, + pub timeout_task: JoinHandle<()>, /// View timeout from config. pub timeout: u64, diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 1ff461fe5d..4f261346ae 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -80,7 +80,7 @@ pub struct QuorumProposalTaskState pub round_start_delay: u64, /// timeout task handle - pub timeout_task: Option>, + pub timeout_task: JoinHandle<()>, /// This node's storage ref pub storage: Arc>, diff --git a/task-impls/src/quorum_proposal_recv.rs b/task-impls/src/quorum_proposal_recv.rs index e9cca7f355..9405bed8b1 100644 --- a/task-impls/src/quorum_proposal_recv.rs +++ b/task-impls/src/quorum_proposal_recv.rs @@ -55,7 +55,7 @@ pub struct QuorumProposalRecvTaskState, /// timeout task handle - pub timeout_task: Option>, + pub timeout_task: JoinHandle<()>, /// View timeout from config. pub timeout: u64, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 2d197a3d80..83905e5e9c 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -333,6 +333,11 @@ where let (mut builder_task, builder_url) = B::start(config.num_nodes_with_stake.into(), B::Config::default()).await; + + // Collect uninitialized nodes because we need to wait for all networks to be ready before starting the tasks + let mut uninitialized_nodes = Vec::new(); + let mut networks_ready = Vec::new(); + for i in 0..total { let mut config = config.clone(); let node_id = self.next_node_id; @@ -366,6 +371,17 @@ where let networks = (self.launcher.resource_generator.channel_generator)(node_id).await; let storage = (self.launcher.resource_generator.storage)(node_id); + // Create a future that waits for the networks to be ready + let network0 = networks.0.clone(); + let network1 = networks.1.clone(); + let networks_ready_future = async move { + network0.wait_for_ready().await; + network1.wait_for_ready().await; + }; + + // Collect it so we can wait for all networks to be ready before starting the tasks + networks_ready.push(networks_ready_future); + if self.launcher.metadata.skip_late && late_start.contains(&node_id) { self.late_start.insert( node_id, @@ -403,23 +419,32 @@ where }, ); } else { - let handle = hotshot.run_tasks().await; - if node_id == 1 { - if let Some(task) = builder_task.take() { - task.start(Box::new(handle.event_stream())) - } - } - - self.nodes.push(Node { - node_id, - networks, - handle, - }); + uninitialized_nodes.push((node_id, networks, hotshot)); } } + results.push(node_id); } + // Wait for all networks to be ready + join_all(networks_ready).await; + + // Then start the necessary tasks + for (node_id, networks, hotshot) in uninitialized_nodes { + let handle = hotshot.run_tasks().await; + if node_id == 1 { + if let Some(task) = builder_task.take() { + task.start(Box::new(handle.event_stream())) + } + } + + self.nodes.push(Node { + node_id, + networks, + handle, + }); + } + results } From df1914705907bfa388426b1ac3edc4c8bbab64df Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 23 May 2024 12:40:20 -0400 Subject: [PATCH 1046/1393] Randomize transactions (#3163) * add random transactions as opposed to zeroed * fix message cache * lints * change round start delay * set correct parent view number * Revert "set correct parent view number" This reverts commit 463548354d403095c6fe262038728ffc1cb7de1d. * decide event view number * revert to using normal round start delay --- example-types/src/state_types.rs | 17 ++++++--- .../src/traits/networking/combined_network.rs | 37 ++++++++++--------- task-impls/src/consensus/helpers.rs | 2 +- 3 files changed, 33 insertions(+), 23 deletions(-) diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 2b95f10c7f..9bc37c35ba 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -12,6 +12,7 @@ use hotshot_types::{ }, vid::VidCommon, }; +use rand::Rng; use serde::{Deserialize, Serialize}; use vbs::version::Version; @@ -104,14 +105,20 @@ impl ValidatedState for TestValidatedState { impl> TestableState for TestValidatedState { fn create_random_transaction( _state: Option<&Self>, - _rng: &mut dyn rand::RngCore, + rng: &mut dyn rand::RngCore, padding: u64, ) -> ::Transaction { /// clippy appeasement for `RANDOM_TX_BASE_SIZE` const RANDOM_TX_BASE_SIZE: usize = 8; - TestTransaction::new(vec![ - 0; - RANDOM_TX_BASE_SIZE + usize::try_from(padding).unwrap() - ]) + + // Generate a random transaction + let mut tx = rng.gen::<[u8; RANDOM_TX_BASE_SIZE]>().to_vec(); + + // Create and add padding to the transaction + let padding = vec![0; padding.try_into().expect("transaction padding too large")]; + tx.extend(padding); + + // Return the transaction + TestTransaction::new(tx) } } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 50dfe13250..884886cfed 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -245,21 +245,24 @@ impl TestableNetworkingImplementation for CombinedNetwor quorum_p2p, ), ); + + // We want to the message cache between the two networks + let message_cache = Arc::new(RwLock::new(LruCache::new( + NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), + ))); + + // Create the quorum and da networks let quorum_net = Self { networks: Arc::new(quorum_networks), - message_cache: Arc::new(RwLock::new(LruCache::new( - NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), - ))), primary_fail_counter: Arc::new(AtomicU64::new(0)), primary_down: Arc::new(AtomicBool::new(false)), + message_cache: Arc::clone(&message_cache), delayed_tasks: Arc::default(), delay_duration: Arc::new(RwLock::new(secondary_network_delay)), }; let da_net = Self { networks: Arc::new(da_networks), - message_cache: Arc::new(RwLock::new(LruCache::new( - NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), - ))), + message_cache, primary_fail_counter: Arc::new(AtomicU64::new(0)), primary_down: Arc::new(AtomicBool::new(false)), delayed_tasks: Arc::default(), @@ -436,19 +439,19 @@ impl ConnectedNetwork, TYPES::SignatureKey> }; let mut filtered_msgs = Vec::with_capacity(msgs.len()); + + // For each message, for msg in msgs { - // see if we've already seen this message - if !self - .message_cache - .read() - .await - .contains(&calculate_hash_of(&msg)) - { + // Calculate hash of the message + let message_hash = calculate_hash_of(&msg); + + // Add the hash to the cache + if !self.message_cache.read().await.contains(&message_hash) { + // If the message is not in the cache, process it filtered_msgs.push(msg.clone()); - self.message_cache - .write() - .await - .put(calculate_hash_of(&msg), ()); + + // Add it to the cache + self.message_cache.write().await.put(message_hash, ()); } } diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 630849d393..7ec7c721ff 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -966,7 +966,7 @@ pub async fn handle_quorum_proposal_validated Date: Sat, 25 May 2024 19:58:35 -0600 Subject: [PATCH 1047/1393] [Consensus2] Decouple quorum proposal recv task (#3156) * init consensus2 from main * rollback, fix types, re-create module * add new parent leaf and state method * leaf chain traversal, new fields * fix build * partial quorum proposal validated event * clippy, fix build, add method * remove high qc blocking requirement * some working tests, almost done * migrate to using consensus internal state * test passes but doesnt decide * fix naming * fix task and get decide event * fix timeout test * fix view sync cert * temporarily ignore propose now * lint * make new folder * remove error logs, actually propose correctly for view 1 * fix doc * add quorum proposal recv task without liveness check * receive all events in quorum proposal recv task * fix all tests * add partial liveness check * note for later * fix error messages * naming changes * tmp push * fix build * remove comment * fix build and tests * move storage update * pr feedback * move the liveness proposal out, fix all lints * add replacement to ProposeNow event * new tests, validate vote now event * fix validated event data, make helper less redundant * remove unused import * more pr comments, collect garbage, fix bug in view update * fix bug with early return * comments, move function, remove log * comment * walk leaves by number instead * fill out context message * fix lints * remove accidentally checked in comment * remove error when validated state not found * feedback * I guess clippy is particular about comments now... * feedback * better name * fix build * fix docs --- hotshot/src/lib.rs | 40 ++- task-impls/src/consensus/helpers.rs | 24 +- task-impls/src/consensus/mod.rs | 7 +- task-impls/src/consensus/view_change.rs | 43 ++- task-impls/src/events.rs | 10 +- .../src/quorum_proposal/dependency_handle.rs | 36 +- task-impls/src/quorum_proposal/mod.rs | 41 +-- .../src/quorum_proposal_recv/handlers.rs | 241 +++++++++++++ .../mod.rs} | 23 +- testing/src/predicates/event.rs | 10 + testing/src/test_helpers.rs | 26 ++ .../tests_1/quorum_proposal_recv_task.rs | 124 ++++++- testing/tests/tests_1/quorum_proposal_task.rs | 334 ++++++++---------- types/src/consensus.rs | 27 +- 14 files changed, 661 insertions(+), 325 deletions(-) create mode 100644 task-impls/src/quorum_proposal_recv/handlers.rs rename task-impls/src/{quorum_proposal_recv.rs => quorum_proposal_recv/mod.rs} (93%) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 4a56ed6c93..b90b084e5a 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -308,25 +308,59 @@ impl> SystemContext { Ok(inner) } - /// "Starts" consensus by sending a `QcFormed` event + /// "Starts" consensus by sending a `QcFormed`, `ViewChange`, and `ValidatedStateUpdated` events /// /// # Panics /// Panics if sending genesis fails pub async fn start_consensus(&self) { + #[cfg(feature = "dependncy-tasks")] + error!("HotShot is running with the dependency tasks feature enabled!!"); debug!("Starting Consensus"); let consensus = self.consensus.read().await; + + #[allow(clippy::panic)] self.internal_event_stream .0 .broadcast_direct(Arc::new(HotShotEvent::ViewChange(self.start_view))) .await - .expect("Genesis Broadcast failed"); + .unwrap_or_else(|_| { + panic!( + "Genesis Broadcast failed; event = ViewChange({:?})", + self.start_view + ) + }); + #[cfg(feature = "dependency-tasks")] + { + if let Some(validated_state) = consensus.validated_state_map().get(&self.start_view) { + #[allow(clippy::panic)] + self.internal_event_stream + .0 + .broadcast_direct(Arc::new(HotShotEvent::ValidatedStateUpdated( + TYPES::Time::new(*self.start_view), + validated_state.clone(), + ))) + .await + .unwrap_or_else(|_| { + panic!( + "Genesis Broadcast failed; event = ValidatedStateUpdated({:?})", + self.start_view, + ) + }); + } + } + #[allow(clippy::panic)] self.internal_event_stream .0 .broadcast_direct(Arc::new(HotShotEvent::QcFormed(either::Left( consensus.high_qc().clone(), )))) .await - .expect("Genesis Broadcast failed"); + .unwrap_or_else(|_| { + panic!( + "Genesis Broadcast failed; event = QcFormed(either::Left({:?}))", + consensus.high_qc() + ) + }); { // Some applications seem to expect a leaf decide event for the genesis leaf, diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 7ec7c721ff..22836d56f4 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -50,9 +50,12 @@ use crate::{ /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. +/// +/// TODO - This should just take the QuorumProposalRecv task state after +/// we merge the dependency tasks. #[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_lines)] -async fn validate_proposal_safety_and_liveness( +pub async fn validate_proposal_safety_and_liveness( proposal: Proposal>, parent_leaf: Leaf, consensus: Arc>>, @@ -577,24 +580,16 @@ pub async fn publish_proposal_if_able( } } -/// TEMPORARY TYPE: Quorum proposal recv task state when using dependency tasks -#[cfg(feature = "dependency-tasks")] -pub(crate) type TemporaryProposalRecvCombinedType = QuorumProposalRecvTaskState; - -/// TEMPORARY TYPE: Consensus task state when not using dependency tasks -#[cfg(not(feature = "dependency-tasks"))] -pub(crate) type TemporaryProposalRecvCombinedType = ConsensusTaskState; - // TODO: Fix `clippy::too_many_lines`. /// Handle the received quorum proposal. /// /// Returns the proposal that should be used to set the `cur_proposal` for other tasks. #[allow(clippy::too_many_lines)] -pub async fn handle_quorum_proposal_recv>( +pub(crate) async fn handle_quorum_proposal_recv>( proposal: &Proposal>, sender: &TYPES::SignatureKey, event_stream: Sender>>, - task_state: &mut TemporaryProposalRecvCombinedType, + task_state: &mut ConsensusTaskState, version: Version, ) -> Result>> { let sender = sender.clone(); @@ -623,11 +618,14 @@ pub async fn handle_quorum_proposal_recv( - task_state, + if let Err(e) = update_view::( view, &event_stream, + task_state.timeout, Arc::clone(&task_state.consensus), + &mut task_state.cur_view, + &mut task_state.timeout_task, + &task_state.output_event_stream, SEND_VIEW_CHANGE_EVENT, ) .await diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 6a3a43bbc7..92695a59d3 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -538,11 +538,14 @@ impl> ConsensusTaskState // update the view in state to the one in the message // Publish a view change event to the application // Returns if the view does not need updating. - if let Err(e) = update_view::( - self, + if let Err(e) = update_view::( new_view, &event_stream, + self.timeout, Arc::clone(&self.consensus), + &mut self.cur_view, + &mut self.timeout_task, + &self.output_event_stream, DONT_SEND_VIEW_CHANGE_EVENT, ) .await diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index 4037693a4f..5394d5e40c 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -1,6 +1,11 @@ use core::time::Duration; use std::sync::Arc; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; + use anyhow::{ensure, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -8,12 +13,11 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; use hotshot_types::{ consensus::Consensus, event::{Event, EventType}, - traits::node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + traits::node_implementation::{ConsensusTime, NodeType}, }; use tracing::{debug, error}; use crate::{ - consensus::helpers::TemporaryProposalRecvCombinedType, events::HotShotEvent, helpers::{broadcast_event, cancel_task}, }; @@ -29,19 +33,24 @@ pub(crate) const DONT_SEND_VIEW_CHANGE_EVENT: bool = false; /// /// # Errors /// Returns an [`anyhow::Error`] when the new view is not greater than the current view. -pub(crate) async fn update_view>( - task_state: &mut TemporaryProposalRecvCombinedType, +/// TODO: Remove args when we merge dependency tasks. +#[allow(clippy::too_many_arguments)] +pub(crate) async fn update_view( new_view: TYPES::Time, event_stream: &Sender>>, + timeout: u64, consensus: Arc>>, + cur_view: &mut TYPES::Time, + timeout_task: &mut JoinHandle<()>, + output_event_stream: &Sender>, send_view_change_event: bool, ) -> Result<()> { ensure!( - new_view > task_state.cur_view, + new_view > *cur_view, "New view is not greater than our current view" ); - let old_view = task_state.cur_view; + let old_view = *cur_view; debug!("Updating view from {} to {}", *old_view, *new_view); @@ -51,10 +60,10 @@ pub(crate) async fn update_view>( error!("Progress: entered view {:>6}", *new_view); } - task_state.cur_view = new_view; + *cur_view = new_view; // The next view is just the current view + 1 - let next_view = task_state.cur_view + 1; + let next_view = *cur_view + 1; if send_view_change_event { futures::join! { @@ -66,7 +75,7 @@ pub(crate) async fn update_view>( view_number: old_view, }, }, - &task_state.output_event_stream, + output_event_stream, ) }; } @@ -77,7 +86,7 @@ pub(crate) async fn update_view>( // Nuance: We timeout on the view + 1 here because that means that we have // not seen evidence to transition to this new view let view_number = next_view; - let timeout = Duration::from_millis(task_state.timeout); + let timeout = Duration::from_millis(timeout); async move { async_sleep(timeout).await; broadcast_event( @@ -88,26 +97,22 @@ pub(crate) async fn update_view>( } }); - // Cancel the old timeout task - cancel_task(std::mem::replace( - &mut task_state.timeout_task, - new_timeout_task, - )) - .await; + // cancel the old timeout task + cancel_task(std::mem::replace(timeout_task, new_timeout_task)).await; let consensus = consensus.upgradable_read().await; consensus .metrics .current_view - .set(usize::try_from(task_state.cur_view.u64()).unwrap()); + .set(usize::try_from(cur_view.u64()).unwrap()); // Do the comparison before the subtraction to avoid potential overflow, since // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(task_state.cur_view.u64()).unwrap() + if usize::try_from(cur_view.u64()).unwrap() > usize::try_from(consensus.last_decided_view().u64()).unwrap() { consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(task_state.cur_view.u64()).unwrap() + usize::try_from(cur_view.u64()).unwrap() - usize::try_from(consensus.last_decided_view().u64()).unwrap(), ); } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 75d56871b0..90fe75887a 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -2,7 +2,6 @@ use std::sync::Arc; use either::Either; use hotshot_types::{ - consensus::ProposalDependencyData, data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse, VidDisperseShare}, message::Proposal, simple_certificate::{ @@ -149,8 +148,13 @@ pub enum HotShotEvent { UpgradeCertificateFormed(UpgradeCertificate), /// HotShot was upgraded, with a new network version. VersionUpgrade(Version), - /// Initiate a proposal right now for a provided view. - ProposeNow(TYPES::Time, ProposalDependencyData), + + /// Initiate a proposal for a proposal without a parent, but passing the liveness check. + /// This is distinct from `QuorumProposalValidated` due to the fact that it is in a + /// different state than what we'd typically see with a fully validated proposal and, + /// as a result, it need to be its own event. + QuorumProposalLivenessValidated(QuorumProposal), + /// Initiate a vote right now for the designated view. VoteNow(TYPES::Time, VoteDependencyData), diff --git a/task-impls/src/quorum_proposal/dependency_handle.rs b/task-impls/src/quorum_proposal/dependency_handle.rs index 450791a21a..5d00baabd5 100644 --- a/task-impls/src/quorum_proposal/dependency_handle.rs +++ b/task-impls/src/quorum_proposal/dependency_handle.rs @@ -39,12 +39,10 @@ pub(crate) enum ProposalDependency { /// For the `QcFormed` event timeout branch. TimeoutCert, - /// For the `QuroumProposalValidated` event after validating `QuorumProposalRecv`. + /// For the `QuroumProposalValidated` event after validating `QuorumProposalRecv` or the + /// `LivenessCheckProposalRecv` event during the liveness check in `QuorumProposalRecv`. Proposal, - /// For the `ProposeNow` event. - ProposeNow, - /// For the `VidShareValidated` event. VidShare, @@ -178,23 +176,12 @@ impl HandleDepOutput for ProposalDependencyHandle { #[allow(clippy::no_effect_underscore_binding)] async fn handle_dep_result(self, res: Self::Output) { - let mut payload_commitment = None; let mut commit_and_metadata: Option> = None; let mut timeout_certificate = None; let mut view_sync_finalize_cert = None; let mut vid_share = None; for event in res.iter().flatten().flatten() { match event.as_ref() { - HotShotEvent::QuorumProposalValidated(proposal, _) => { - let proposal_payload_comm = proposal.block_header.payload_commitment(); - if let Some(comm) = payload_commitment { - if proposal_payload_comm != comm { - return; - } - } else { - payload_commitment = Some(proposal_payload_comm); - } - } HotShotEvent::SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, @@ -221,24 +208,13 @@ impl HandleDepOutput for ProposalDependencyHandle { HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { view_sync_finalize_cert = Some(cert.clone()); } - HotShotEvent::ProposeNow(_, pdd) => { - commit_and_metadata = Some(pdd.commitment_and_metadata.clone()); - match &pdd.secondary_proposal_information { - hotshot_types::consensus::SecondaryProposalInformation::QuorumProposalAndCertificate(quorum_proposal, _) => { - payload_commitment = Some(quorum_proposal.block_header.payload_commitment()); - }, - hotshot_types::consensus::SecondaryProposalInformation::Timeout(tc) => { - timeout_certificate = Some(tc.clone()); - } - hotshot_types::consensus::SecondaryProposalInformation::ViewSync(vsc) => { - view_sync_finalize_cert = Some(vsc.clone()); - }, - } - } HotShotEvent::VidShareValidated(share) => { vid_share = Some(share.clone()); } - _ => {} + _ => { + // LivenessCheckProposalRecv and QuorumProposalValidated are implicitly + // handled here. + } } } diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 4f261346ae..1d6aa45303 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -136,6 +136,10 @@ impl> QuorumProposalTaskState { if let HotShotEvent::QuorumProposalValidated(proposal, _) = event { proposal.view_number() + 1 + } else if let HotShotEvent::QuorumProposalLivenessValidated(proposal) = + event + { + proposal.view_number() + 1 } else { return false; } @@ -154,13 +158,6 @@ impl> QuorumProposalTaskState { - if let HotShotEvent::ProposeNow(view_number, _) = event { - *view_number - } else { - return false; - } - } ProposalDependency::VidShare => { if let HotShotEvent::VidShareValidated(vid_share) = event { vid_share.data.view_number() @@ -222,12 +219,6 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { - propose_now_dependency.mark_as_completed(Arc::clone(&event)); - } HotShotEvent::SendPayloadCommitmentAndMetadata(..) => { payload_commitment_dependency.mark_as_completed(Arc::clone(&event)); } - HotShotEvent::QuorumProposalValidated(..) => { + // All proposals are equivalent in this case. + HotShotEvent::QuorumProposalValidated(..) + | HotShotEvent::QuorumProposalLivenessValidated(_) => { proposal_dependency.mark_as_completed(event); } HotShotEvent::QcFormed(quorum_certificate) => match quorum_certificate { @@ -292,7 +282,6 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { self.version = *version; } - HotShotEvent::ProposeNow(view_number, _) => { + HotShotEvent::QuorumProposalLivenessValidated(proposal) => { + // We may not be able to propose off of this, but we still spin up an event just in case + // the other data is already here, we're still proposing for view + 1 here. self.create_dependency_task_if_new( - *view_number, + proposal.view_number() + 1, event_receiver, event_sender, Arc::clone(&event), @@ -461,10 +452,10 @@ impl> QuorumProposalTaskState { - let new_view = proposal.view_number; + let view_number = proposal.view_number(); // All nodes get the latest proposed view as a proxy of `cur_view` of olde. - if !self.update_latest_proposed_view(new_view).await { + if !self.update_latest_proposed_view(view_number).await { tracing::trace!("Failed to update latest proposed view"); return; } @@ -477,14 +468,14 @@ impl> QuorumProposalTaskState { - let view = proposal.data.view_number; + let view = proposal.data.view_number(); if !self.update_latest_proposed_view(view).await { tracing::trace!("Failed to update latest proposed view"); return; @@ -556,7 +547,7 @@ impl> TaskState | HotShotEvent::QcFormed(_) | HotShotEvent::SendPayloadCommitmentAndMetadata(..) | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - | HotShotEvent::ProposeNow(..) + | HotShotEvent::QuorumProposalLivenessValidated(..) | HotShotEvent::QuorumProposalSend(..) | HotShotEvent::VidShareValidated(_) | HotShotEvent::ValidatedStateUpdated(..) diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs new file mode 100644 index 0000000000..593760ef82 --- /dev/null +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -0,0 +1,241 @@ +#![allow(dead_code)] + +use anyhow::{bail, Context, Result}; +use async_lock::RwLockUpgradableReadGuard; +use committable::Committable; +use std::sync::Arc; +use tracing::{debug, warn}; + +use async_broadcast::{broadcast, Sender}; +use hotshot_types::{ + data::{Leaf, QuorumProposal}, + message::Proposal, + simple_certificate::QuorumCertificate, + traits::{ + election::Membership, + node_implementation::{NodeImplementation, NodeType}, + storage::Storage, + ValidatedState, + }, + utils::{View, ViewInner}, + vote::{Certificate, HasViewNumber}, +}; + +use crate::{ + consensus::{ + helpers::{validate_proposal_safety_and_liveness, validate_proposal_view_and_certs}, + view_change::{update_view, SEND_VIEW_CHANGE_EVENT}, + }, + events::HotShotEvent, + helpers::broadcast_event, +}; + +use super::QuorumProposalRecvTaskState; + +/// Broadcast the proposal in the event that the parent state is not found for +/// a given `proposal`, but it still passes the liveness check. Optionally return +/// the inner [`QuorumProposal`] if the liveness check passes. +async fn validate_proposal_liveness>( + proposal: &Proposal>, + event_sender: &Sender>>, + justify_qc: &QuorumCertificate, + task_state: &mut QuorumProposalRecvTaskState, +) -> Option> { + let view_number = proposal.data.view_number(); + let mut consensus_write = task_state.consensus.write().await; + + let leaf = Leaf::from_quorum_proposal(&proposal.data); + + let state = Arc::new( + >::from_header(&proposal.data.block_header), + ); + let view = View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state, + delta: None, + }, + }; + + consensus_write.update_validated_state_map(view_number, view.clone()); + consensus_write.update_saved_leaves(leaf.clone()); + + if let Err(e) = task_state + .storage + .write() + .await + .update_undecided_state( + consensus_write.saved_leaves().clone(), + consensus_write.validated_state_map().clone(), + ) + .await + { + warn!("Couldn't store undecided state. Error: {:?}", e); + } + + let liveness_check = justify_qc.view_number() > consensus_write.locked_view(); + + let high_qc = consensus_write.high_qc().clone(); + drop(consensus_write); + + // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. + broadcast_event( + HotShotEvent::ValidatedStateUpdated(view_number, view).into(), + event_sender, + ) + .await; + broadcast_event( + HotShotEvent::NewUndecidedView(leaf.clone()).into(), + event_sender, + ) + .await; + + if liveness_check { + let new_view = proposal.data.view_number + 1; + + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = task_state.quorum_membership.leader(new_view) == task_state.public_key + && high_qc.view_number() == proposal.data.view_number(); + + if should_propose { + debug!( + "Attempting to publish proposal after voting for liveness; now in view: {}", + *new_view + ); + broadcast_event( + HotShotEvent::QuorumProposalLivenessValidated(proposal.data.clone()).into(), + event_sender, + ) + .await; + } + + return Some(proposal.data.clone()); + } + + None +} + +/// Handles the `QuorumProposalRecv` event by first validating the cert itself for the view, and then +/// evaluating if a liveness check is needed for the proposal, which runs when the proposal cannot be +/// found in the internal state map. +/// +/// This code can fail when: +/// - The justify qc is invalid. +/// - The task is internally inconsistent. +/// - The sequencer storage update fails. +pub(crate) async fn handle_quorum_proposal_recv>( + proposal: &Proposal>, + sender: &TYPES::SignatureKey, + event_sender: &Sender>>, + task_state: &mut QuorumProposalRecvTaskState, +) -> Result>> { + let sender = sender.clone(); + + validate_proposal_view_and_certs( + proposal, + &sender, + task_state.cur_view, + &task_state.quorum_membership, + &task_state.timeout_membership, + ) + .context("Failed to validate proposal view and attached certs")?; + + let view_number = proposal.data.view_number(); + let view_leader_key = task_state.quorum_membership.leader(view_number); + let justify_qc = proposal.data.justify_qc.clone(); + + if !justify_qc.is_valid_cert(task_state.quorum_membership.as_ref()) { + let consensus = task_state.consensus.read().await; + consensus.metrics.invalid_qc.update(1); + bail!("Invalid justify_qc in proposal for view {}", *view_number); + } + + // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here + if let Err(e) = update_view::( + view_number, + event_sender, + task_state.timeout, + Arc::clone(&task_state.consensus), + &mut task_state.cur_view, + &mut task_state.timeout_task, + &task_state.output_event_stream, + SEND_VIEW_CHANGE_EVENT, + ) + .await + { + debug!("Failed to update view; error = {e:#}"); + } + + let parent = { + let consensus_read = task_state.consensus.read().await; + + // Get the parent leaf and state. + let parent = match consensus_read + .saved_leaves() + .get(&justify_qc.data.leaf_commit) + .cloned() + { + Some(leaf) => { + if let (Some(state), _) = consensus_read.state_and_delta(leaf.view_number()) { + Some((leaf, Arc::clone(&state))) + } else { + bail!("Parent state not found! Consensus internally inconsistent"); + } + } + None => None, + }; + + if justify_qc.view_number() > consensus_read.high_qc().view_number { + if let Err(e) = task_state + .storage + .write() + .await + .update_high_qc(justify_qc.clone()) + .await + { + bail!("Failed to store High QC, not voting; error = {:?}", e); + } + } + + parent + }; + + { + let mut consensus_write = task_state.consensus.write().await; + if let Err(e) = consensus_write.update_high_qc(justify_qc.clone()) { + tracing::trace!("{e:?}"); + } + } + + let Some((parent_leaf, _parent_state)) = parent else { + warn!( + "Proposal's parent missing from storage with commitment: {:?}", + justify_qc.data.leaf_commit + ); + return Ok( + validate_proposal_liveness(proposal, event_sender, &justify_qc, task_state).await, + ); + }; + + broadcast_event( + HotShotEvent::UpdateHighQc(justify_qc.clone()).into(), + event_sender, + ) + .await; + + // Validate the proposal + validate_proposal_safety_and_liveness( + proposal.clone(), + parent_leaf, + Arc::clone(&task_state.consensus), + None, + Arc::clone(&task_state.quorum_membership), + view_leader_key, + event_sender.clone(), + sender, + task_state.output_event_stream.clone(), + ) + .await?; + + Ok(None) +} diff --git a/task-impls/src/quorum_proposal_recv.rs b/task-impls/src/quorum_proposal_recv/mod.rs similarity index 93% rename from task-impls/src/quorum_proposal_recv.rs rename to task-impls/src/quorum_proposal_recv/mod.rs index 9405bed8b1..17eebf4438 100644 --- a/task-impls/src/quorum_proposal_recv.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -9,8 +9,8 @@ use async_std::task::JoinHandle; use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ - consensus::{CommitmentAndMetadata, Consensus}, - data::{QuorumProposal, ViewChangeEvidence}, + consensus::Consensus, + data::ViewChangeEvidence, event::Event, simple_certificate::UpgradeCertificate, traits::{ @@ -25,11 +25,16 @@ use tracing::{debug, error, instrument, warn}; use vbs::version::Version; use crate::{ - consensus::helpers::{handle_quorum_proposal_recv, parent_leaf_and_state}, + consensus::helpers::parent_leaf_and_state, events::HotShotEvent, helpers::{broadcast_event, cancel_task}, }; +use self::handlers::handle_quorum_proposal_recv; + +/// Event handlers for this task. +mod handlers; + /// The state for the quorum proposal task. Contains all of the information for /// handling [`HotShotEvent::QuorumProposalRecv`] events. pub struct QuorumProposalRecvTaskState> { @@ -120,15 +125,7 @@ impl> QuorumProposalRecvTaskState< ) { #[cfg(feature = "dependency-tasks")] if let HotShotEvent::QuorumProposalRecv(proposal, sender) = event.as_ref() { - match handle_quorum_proposal_recv( - proposal, - sender, - event_stream.clone(), - self, - self.version, - ) - .await - { + match handle_quorum_proposal_recv(proposal, sender, &event_stream, self).await { Ok(Some(current_proposal)) => { // Build the parent leaf since we didn't find it during the proposal check. let parent_leaf = match parent_leaf_and_state( @@ -142,7 +139,7 @@ impl> QuorumProposalRecvTaskState< { Ok((parent_leaf, _ /* state */)) => parent_leaf, Err(error) => { - warn!(?error, "Failed to get parent leaf and state"); + warn!("Failed to get parent leaf and state during VoteNow data construction; error = {error:#}"); return; } }; diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index e044c18581..87f134a667 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -242,3 +242,13 @@ where Arc::new(move |e: Arc>| matches!(e.as_ref(), TimeoutVoteSend(..))); Box::new(EventPredicate { check, info }) } + +pub fn vote_now() -> Box> +where + TYPES: NodeType, +{ + let info = "VoteNow".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| matches!(e.as_ref(), VoteNow(..))); + Box::new(EventPredicate { check, info }) +} diff --git a/testing/src/test_helpers.rs b/testing/src/test_helpers.rs index 7c98d5d8a7..a25c32ec9a 100644 --- a/testing/src/test_helpers.rs +++ b/testing/src/test_helpers.rs @@ -1,3 +1,10 @@ +use committable::Committable; +use hotshot_example_types::{node_types::TestTypes, state_types::TestValidatedState}; +use hotshot_types::{ + data::Leaf, + utils::{View, ViewInner}, +}; + /// This function permutes the provided input vector `inputs`, given some order provided within the /// `order` vector. /// @@ -14,3 +21,22 @@ where } ordered_inputs } + +/// This function will create a fake [`View`] from a provided [`Leaf`]. +pub fn create_fake_view_with_leaf(leaf: Leaf) -> View { + create_fake_view_with_leaf_and_state(leaf, TestValidatedState::default()) +} + +/// This function will create a fake [`View`] from a provided [`Leaf`] and `state`. +pub fn create_fake_view_with_leaf_and_state( + leaf: Leaf, + state: TestValidatedState, +) -> View { + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state: state.into(), + delta: None, + }, + } +} diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 5eb00f5543..85495dbab1 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -7,7 +7,7 @@ use hotshot_task_impls::{ events::HotShotEvent::*, quorum_proposal_recv::QuorumProposalRecvTaskState, }; use hotshot_testing::{ - predicates::event::exact, + predicates::event::{exact, vote_now}, script::{run_test_script, TestScriptStage}, task_helpers::build_system_handle, view_generator::TestViewGenerator, @@ -19,12 +19,17 @@ use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_recv_task() { + use hotshot_testing::test_helpers::create_fake_view_with_leaf; + use hotshot_types::data::Leaf; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); + let consensus = handle.hotshot.consensus(); + let mut consensus_writer = consensus.write().await; let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); @@ -32,18 +37,133 @@ async fn test_quorum_proposal_recv_task() { let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); + let mut leaves = Vec::new(); for view in (&mut generator).take(2) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); + leaves.push(view.leaf.clone()); + + // These are both updated when we vote. Since we don't have access + // to that, we'll just put them in here. + consensus_writer + .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + consensus_writer.update_validated_state_map( + view.quorum_proposal.data.view_number, + create_fake_view_with_leaf(view.leaf.clone()), + ); } + drop(consensus_writer); // Run view 2 and propose. let view_2 = TestScriptStage { inputs: vec![QuorumProposalRecv(proposals[1].clone(), leaders[1])], - outputs: vec![exact(ViewChange(ViewNumber::new(2)))], + outputs: vec![ + exact(ViewChange(ViewNumber::new(2))), + exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), + exact(QuorumProposalValidated( + proposals[1].data.clone(), + leaves[0].clone(), + )), + ], + asserts: vec![], + }; + + let state = QuorumProposalRecvTaskState::::create_from(&handle).await; + run_test_script(vec![view_2], state).await; +} + +#[cfg(test)] +#[cfg(feature = "dependency-tasks")] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_proposal_recv_task_liveness_check() { + use hotshot::traits::ValidatedState; + use hotshot_example_types::state_types::TestValidatedState; + use hotshot_testing::test_helpers::{ + create_fake_view_with_leaf, create_fake_view_with_leaf_and_state, + }; + use hotshot_types::{ + data::Leaf, + vote::{HasViewNumber, VoteDependencyData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(4).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + let consensus = handle.hotshot.consensus(); + let mut consensus_writer = consensus.write().await; + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + let mut leaves = Vec::new(); + for view in (&mut generator).take(4) { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaves.push(view.leaf.clone()); + + // It's not explicitly required to insert an entry for every generated view, but + // there's no reason not to. + let inserted_view_number = view.quorum_proposal.data.view_number(); + + // These are both updated when we'd have voted previously. However, since + // we don't have access to that, we'll just put them in here. We + // specifically ignore writing the saved leaves so that way + // the parent lookup fails and we trigger a view liveness check. + consensus_writer.update_validated_state_map( + inserted_view_number, + create_fake_view_with_leaf(view.leaf.clone()), + ); + + // The index here is important. Since we're proposing for view 4, we need the + // value from entry 2 to align the public key from the shares map. + consensus_writer.update_vid_shares(inserted_view_number, view.vid_proposal.0[2].clone()); + + // We need there to be a DA certificate for us to be able to vote, so we grab + // this from the generator as well since we don't have the running task that'd + // insert the value ordinarily. + consensus_writer.update_saved_da_certs(inserted_view_number, view.da_certificate.clone()); + } + + // We can only propose if we've seen a QcFormed event already, so we just insert it + // ourselves here instead. This is a bit cheesy, but it'll work as we expect for the + // purposes of the test. + consensus_writer + .update_high_qc(proposals[3].data.justify_qc.clone()) + .unwrap(); + + drop(consensus_writer); + + // Run view 2 and propose. + let view_2 = TestScriptStage { + inputs: vec![QuorumProposalRecv(proposals[2].clone(), leaders[2])], + outputs: vec![ + exact(ViewChange(ViewNumber::new(3))), + exact(ValidatedStateUpdated( + ViewNumber::new(3), + create_fake_view_with_leaf_and_state( + leaves[2].clone(), + >::from_header( + &proposals[2].data.block_header, + ), + ), + )), + exact(NewUndecidedView(leaves[2].clone())), + exact(QuorumProposalLivenessValidated(proposals[2].data.clone())), + vote_now(), + ], asserts: vec![], }; diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index fba97696f5..9f67c2f46a 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,7 +1,5 @@ #![cfg(feature = "dependency-tasks")] -use std::sync::Arc; - use committable::Committable; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ @@ -22,22 +20,24 @@ use hotshot_testing::{ task_helpers::{ build_cert, build_system_handle, key_pair_for_id, vid_scheme_from_view_number, vid_share, }, + test_helpers::create_fake_view_with_leaf, view_generator::TestViewGenerator, }; use hotshot_types::{ - consensus::{CommitmentAndMetadata, ProposalDependencyData}, data::{null_block, Leaf, ViewChangeEvidence, ViewNumber}, - simple_certificate::{TimeoutCertificate, ViewSyncFinalizeCertificate2}, - simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData, ViewSyncFinalizeVote}, + simple_certificate::QuorumCertificate, + simple_vote::{TimeoutData, ViewSyncFinalizeData}, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeType}, }, utils::{BuilderCommitment, View, ViewInner}, vid::VidSchemeType, + vote::HasViewNumber, }; use jf_vid::VidScheme; use sha2::Digest; +use std::sync::Arc; fn make_payload_commitment( membership: &::Membership, @@ -50,22 +50,10 @@ fn make_payload_commitment( vid.commit_only(&encoded_transactions).unwrap() } -fn create_fake_view_with_leaf(leaf: Leaf) -> View { - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - state: TestValidatedState::default().into(), - delta: None, - }, - } -} - #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_quorum_proposal_view_1() { - use hotshot_types::simple_certificate::QuorumCertificate; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -197,7 +185,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { // We send all the events that we'd have otherwise received to ensure the states are updated. let view_1 = TestScriptStage { inputs: vec![ - QuorumProposalValidated(proposals[0].data.clone(), leaves[0].clone()), + QuorumProposalValidated(proposals[0].data.clone(), genesis_leaf), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(2)), @@ -220,7 +208,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { // Proposing for this view since we've received a proposal for view 2. let view_2 = TestScriptStage { inputs: vec![ - QuorumProposalValidated(proposals[1].data.clone(), leaves[1].clone()), + QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(3)), @@ -247,7 +235,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { // Now, let's verify that we get the decide on the 3-chain. let view_3 = TestScriptStage { inputs: vec![ - QuorumProposalValidated(proposals[2].data.clone(), leaves[2].clone()), + QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(4)), @@ -269,7 +257,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let view_4 = TestScriptStage { inputs: vec![ - QuorumProposalValidated(proposals[3].data.clone(), leaves[3].clone()), + QuorumProposalValidated(proposals[3].data.clone(), leaves[2].clone()), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(5)), @@ -458,209 +446,170 @@ async fn test_quorum_proposal_task_view_sync() { #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_quorum_proposal_task_propose_now() { +async fn test_quorum_proposal_livness_check_proposal() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let node_id = 2; + let node_id = 3; let handle = build_system_handle(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); + let mut leaves = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(1) { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - vids.push(view.vid_proposal.clone()); - } - for view in (&mut generator).take(1) { + let consensus = handle.hotshot.consensus(); + let mut consensus_writer = consensus.write().await; + for view in (&mut generator).take(5) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); vids.push(view.vid_proposal.clone()); + + // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals + // to make sure they show up during tests. + consensus_writer + .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); } + drop(consensus_writer); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - // proposal dependency data - quorum proposal and cert - let pdd_qp = ProposalDependencyData { - commitment_and_metadata: CommitmentAndMetadata { - commitment: payload_commitment, - builder_commitment: builder_commitment.clone(), - metadata: TestMetadata, - fee: null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), - block_view: ViewNumber::new(2), - }, - secondary_proposal_information: - hotshot_types::consensus::SecondaryProposalInformation::QuorumProposalAndCertificate( - proposals[1].data.clone(), - proposals[1].data.justify_qc.clone(), - ), - }; - - let view_qp = TestScriptStage { - inputs: vec![ProposeNow(ViewNumber::new(node_id), pdd_qp)], - outputs: vec![quorum_proposal_send()], - asserts: vec![], - }; - - let quorum_proposal_task_state = - QuorumProposalTaskState::::create_from(&handle).await; - let script = vec![view_qp]; - run_test_script(script, quorum_proposal_task_state).await; -} - -#[ignore] -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_quorum_proposal_task_propose_now_timeout() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let node_id = 2; - let handle = build_system_handle(node_id).await.0; - let (private_key, public_key) = key_pair_for_id(node_id); - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); + // We need to handle the views where we aren't the leader to ensure that the states are + // updated properly. - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let genesis_cert = proposals[0].data.justify_qc.clone(); + let genesis_leaf = Leaf::genesis(&*handle.hotshot.instance_state()); - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut vids = Vec::new(); - for view in (&mut generator).take(1) { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - vids.push(view.vid_proposal.clone()); - } - for view in (&mut generator).take(1) { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - vids.push(view.vid_proposal.clone()); - } + let genesis_view = TestScriptStage { + inputs: vec![ + QcFormed(either::Left(genesis_cert.clone())), + SendPayloadCommitmentAndMetadata( + make_payload_commitment(&quorum_membership, ViewNumber::new(1)), + builder_commitment.clone(), + TestMetadata, + ViewNumber::new(1), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), + ), + VidShareValidated(vid_share(&vids[0].0, handle.public_key())), + ValidatedStateUpdated( + ViewNumber::new(0), + create_fake_view_with_leaf(genesis_leaf.clone()), + ), + ], + outputs: vec![exact(UpdateHighQc(genesis_cert.clone()))], + asserts: vec![], + }; - let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - // proposal dependency data - timeout cert - let pdd_timeout = ProposalDependencyData { - commitment_and_metadata: CommitmentAndMetadata { - commitment: payload_commitment, - builder_commitment: builder_commitment.clone(), - metadata: TestMetadata, - fee: null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), - block_view: ViewNumber::new(2), - }, - secondary_proposal_information: - hotshot_types::consensus::SecondaryProposalInformation::Timeout(build_cert::< - TestTypes, - TimeoutData, - TimeoutVote, - TimeoutCertificate, - >( - TimeoutData { - view: ViewNumber::new(1), - }, - &quorum_membership, + // We send all the events that we'd have otherwise received to ensure the states are updated. + let view_1 = TestScriptStage { + inputs: vec![ + QuorumProposalValidated(proposals[0].data.clone(), genesis_leaf), + QcFormed(either::Left(proposals[1].data.justify_qc.clone())), + SendPayloadCommitmentAndMetadata( + make_payload_commitment(&quorum_membership, ViewNumber::new(2)), + builder_commitment.clone(), + TestMetadata, ViewNumber::new(2), - &public_key, - &private_key, - )), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), + ), + VidShareValidated(vid_share(&vids[1].0, handle.public_key())), + ValidatedStateUpdated( + proposals[0].data.view_number(), + create_fake_view_with_leaf(leaves[0].clone()), + ), + ], + outputs: vec![exact(UpdateHighQc(proposals[1].data.justify_qc.clone()))], + asserts: vec![], }; - let view_timeout = TestScriptStage { - inputs: vec![ProposeNow(ViewNumber::new(node_id), pdd_timeout)], - outputs: vec![quorum_proposal_send()], + // This is a little hokey, and may not reflect reality, but we are only testing, + // for this specific task, that it will propose when it receives this event. See + // the QuorumProposalRecv task tests. + let view_2 = TestScriptStage { + inputs: vec![ + LivenessCheckProposalRecv(proposals[1].data.clone()), + QcFormed(either::Left(proposals[2].data.justify_qc.clone())), + SendPayloadCommitmentAndMetadata( + make_payload_commitment(&quorum_membership, ViewNumber::new(3)), + builder_commitment.clone(), + TestMetadata, + ViewNumber::new(3), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), + ), + VidShareValidated(vid_share(&vids[2].0, handle.public_key())), + ValidatedStateUpdated( + proposals[1].data.view_number(), + create_fake_view_with_leaf(leaves[1].clone()), + ), + ], + outputs: vec![ + exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), + quorum_proposal_send(), + ], asserts: vec![], }; - let quorum_proposal_task_state = - QuorumProposalTaskState::::create_from(&handle).await; - - let script = vec![view_timeout]; - run_test_script(script, quorum_proposal_task_state).await; -} - -#[ignore] -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_quorum_proposal_task_propose_now_view_sync() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let node_id = 2; - let handle = build_system_handle(node_id).await.0; - let (private_key, public_key) = key_pair_for_id(node_id); - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); - - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut vids = Vec::new(); - for view in (&mut generator).take(1) { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - vids.push(view.vid_proposal.clone()); - } - for view in (&mut generator).take(1) { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - vids.push(view.vid_proposal.clone()); - } - - let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - - // proposal dependency data - view sync cert - let pdd_view_sync = ProposalDependencyData { - commitment_and_metadata: CommitmentAndMetadata { - commitment: payload_commitment, - builder_commitment, - metadata: TestMetadata, - fee: null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), - block_view: ViewNumber::new(2), - }, - secondary_proposal_information: - hotshot_types::consensus::SecondaryProposalInformation::ViewSync(build_cert::< - TestTypes, - ViewSyncFinalizeData, - ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate2, - >( - ViewSyncFinalizeData { - relay: 1, - round: ViewNumber::new(1), - }, - &quorum_membership, - ViewNumber::new(node_id), - &public_key, - &private_key, - )), + // Now, let's verify that we get the decide on the 3-chain. + let view_3 = TestScriptStage { + inputs: vec![ + QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), + QcFormed(either::Left(proposals[3].data.justify_qc.clone())), + SendPayloadCommitmentAndMetadata( + make_payload_commitment(&quorum_membership, ViewNumber::new(4)), + builder_commitment.clone(), + TestMetadata, + ViewNumber::new(4), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), + ), + VidShareValidated(vid_share(&vids[3].0, handle.public_key())), + ValidatedStateUpdated( + proposals[2].data.view_number(), + create_fake_view_with_leaf(leaves[2].clone()), + ), + ], + outputs: vec![exact(UpdateHighQc(proposals[3].data.justify_qc.clone()))], + asserts: vec![], }; - let view_view_sync = TestScriptStage { - inputs: vec![ProposeNow(ViewNumber::new(node_id), pdd_view_sync)], - outputs: vec![quorum_proposal_send()], + let view_4 = TestScriptStage { + inputs: vec![ + QuorumProposalValidated(proposals[3].data.clone(), leaves[2].clone()), + QcFormed(either::Left(proposals[4].data.justify_qc.clone())), + SendPayloadCommitmentAndMetadata( + make_payload_commitment(&quorum_membership, ViewNumber::new(5)), + builder_commitment, + TestMetadata, + ViewNumber::new(5), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), + ), + VidShareValidated(vid_share(&vids[4].0, handle.public_key())), + ValidatedStateUpdated( + proposals[3].data.view_number(), + create_fake_view_with_leaf(leaves[3].clone()), + ), + ], + outputs: vec![ + exact(LockedViewUpdated(ViewNumber::new(3))), + exact(LastDecidedViewUpdated(ViewNumber::new(2))), + leaf_decided(), + exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), + ], asserts: vec![], }; let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - let script = vec![view_view_sync]; + let script = vec![genesis_view, view_1, view_2, view_3, view_4]; run_test_script(script, quorum_proposal_task_state).await; } @@ -694,7 +643,7 @@ async fn test_quorum_proposal_task_with_incomplete_events() { let view_2 = TestScriptStage { inputs: vec![QuorumProposalValidated( proposals[1].data.clone(), - leaves[1].clone(), + leaves[0].clone(), )], outputs: vec![], asserts: vec![], @@ -785,6 +734,9 @@ async fn test_quorum_proposal_task_happy_path_leaf_ascension() { // This unwrap is safe here let view = generator.next().unwrap(); let proposal = view.quorum_proposal.clone(); + + // This intentionally grabs the wrong leaf since it *really* doesn't + // matter. For the record, this should be view - 1's leaf. let leaf = view.leaf.clone(); // update the consensus shared state @@ -850,6 +802,9 @@ async fn test_quorum_proposal_task_fault_injection_leaf_ascension() { // This unwrap is safe here let view = generator.next().unwrap(); let proposal = view.quorum_proposal.clone(); + + // This intentionally grabs the wrong leaf since it *really* doesn't + // matter. For the record, this should be view - 1's leaf. let leaf = view.leaf.clone(); { @@ -875,7 +830,6 @@ async fn test_quorum_proposal_task_fault_injection_leaf_ascension() { // Update the consensus shared state with a 10% failure rate if rand::random::() < 0.9 { - // if view_number != 7 && view_number != 13 { consensus_writer.update_validated_state_map( ViewNumber::new(view_number), create_fake_view_with_leaf(leaf.clone()), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 24187d52ce..3e98a37a8b 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -11,13 +11,10 @@ use tracing::{debug, error}; pub use crate::utils::{View, ViewInner}; use crate::{ - data::{Leaf, QuorumProposal, VidDisperseShare}, + data::{Leaf, VidDisperseShare}, error::HotShotError, message::Proposal, - simple_certificate::{ - DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, - ViewSyncFinalizeCertificate2, - }, + simple_certificate::{DaCertificate, QuorumCertificate, UpgradeCertificate}, traits::{ block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, @@ -494,23 +491,3 @@ pub struct CommitmentAndMetadata { /// View number this block is for pub block_view: TYPES::Time, } - -/// Helper type to hold the optional secondary information required to propose. -#[derive(Eq, Hash, PartialEq, Debug, Clone)] -pub enum SecondaryProposalInformation { - /// The quorum proposal and certificate needed to propose. - QuorumProposalAndCertificate(QuorumProposal, QuorumCertificate), - /// The timeout certificate which we can propose from. - Timeout(TimeoutCertificate), - /// The view sync certificate which we can propose from. - ViewSync(ViewSyncFinalizeCertificate2), -} - -/// Dependency data required to submit a proposal -#[derive(Eq, Hash, PartialEq, Debug, Clone)] -pub struct ProposalDependencyData { - /// The primary data in a proposal. - pub commitment_and_metadata: CommitmentAndMetadata, - /// The secondary data in a proposal - pub secondary_proposal_information: SecondaryProposalInformation, -} From 4b9d20855750a83814c053ec450c206245c737e9 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 27 May 2024 10:09:19 -0600 Subject: [PATCH 1048/1393] [CX_HARDENING] - Modernize and Audit Vid Task Tests (#3223) * finish test * merge upstream change --- hotshot/src/tasks/task_state.rs | 7 +- task-impls/src/vid.rs | 18 +---- testing/tests/tests_1/vid_task.rs | 113 ++++++++++++------------------ 3 files changed, 49 insertions(+), 89 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 56ec2489c0..d8bf712ec0 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -80,13 +80,10 @@ impl> CreateTaskState #[async_trait] impl> CreateTaskState - for VidTaskState> + for VidTaskState { - async fn create_from( - handle: &SystemContextHandle, - ) -> VidTaskState> { + async fn create_from(handle: &SystemContextHandle) -> VidTaskState { VidTaskState { - api: handle.clone(), consensus: handle.hotshot.consensus(), cur_view: handle.cur_view().await, vote_collector: None, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 82bb316e9f..8b37bf27fc 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -8,7 +8,6 @@ use hotshot_types::{ data::VidDisperseShare, message::Proposal, traits::{ - consensus_api::ConsensusApi, election::Membership, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, @@ -23,14 +22,7 @@ use crate::{ }; /// Tracks state of a VID task -pub struct VidTaskState< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi + 'static, -> { - /// The state's api - pub api: A, - +pub struct VidTaskState> { /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -50,9 +42,7 @@ pub struct VidTaskState< pub id: u64, } -impl, A: ConsensusApi + 'static> - VidTaskState -{ +impl> VidTaskState { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] pub async fn handle( @@ -165,9 +155,7 @@ impl, A: ConsensusApi + } /// task state implementation for VID Task -impl, A: ConsensusApi + 'static> TaskState - for VidTaskState -{ +impl> TaskState for VidTaskState { type Event = Arc>; type Output = HotShotTaskCompleted; diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 4d47d6d197..cd9eea0aa2 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -1,15 +1,19 @@ -use std::{collections::HashMap, marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc}; -use hotshot::types::SignatureKey; +use hotshot::{tasks::task_state::CreateTaskState, types::SignatureKey}; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, - node_types::TestTypes, + node_types::{MemoryImpl, TestTypes}, state_types::TestInstanceState, }; -use hotshot_task_impls::{events::HotShotEvent, vid::VidTaskState}; -use hotshot_testing::task_helpers::{build_system_handle, vid_scheme_from_view_number}; +use hotshot_task_impls::{events::HotShotEvent::*, vid::VidTaskState}; +use hotshot_testing::{ + predicates::event::exact, + script::{run_test_script, TestScriptStage}, + task_helpers::{build_system_handle, vid_scheme_from_view_number}, +}; use hotshot_types::{ - data::{null_block, DaProposal, VidDisperse, VidDisperseShare, ViewNumber}, + data::{null_block, DaProposal, VidDisperse, ViewNumber}, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -22,7 +26,6 @@ use jf_vid::{precomputable::Precomputable, VidScheme}; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_vid_task() { - use hotshot_task_impls::harness::run_harness; use hotshot_types::message::Proposal; async_compatibility_layer::logging::setup_logging(); @@ -69,69 +72,41 @@ async fn test_vid_task() { signature: message.signature.clone(), _pd: PhantomData, }; - let vid_share_proposals: Vec<_> = VidDisperseShare::from_vid_disperse(vid_disperse.clone()) - .into_iter() - .map(|vid_disperse_share| { - vid_disperse_share - .to_proposal(handle.private_key()) - .expect("Failed to sign block payload!") - }) - .collect(); - let vid_share_proposal = vid_share_proposals[0].clone(); - - let mut input = Vec::new(); - let mut output = HashMap::new(); - - // In view 1, node 2 is the next leader. - input.push(HotShotEvent::ViewChange(ViewNumber::new(1))); - input.push(HotShotEvent::ViewChange(ViewNumber::new(2))); - input.push(HotShotEvent::BlockRecv( - encoded_transactions, - TestMetadata, - ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}).unwrap(), - vid_precompute, - )); - input.push(HotShotEvent::BlockReady( - vid_disperse.clone(), - ViewNumber::new(2), - )); - - input.push(HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key)); - input.push(HotShotEvent::VidShareRecv(vid_share_proposal.clone())); - input.push(HotShotEvent::Shutdown); - output.insert( - HotShotEvent::BlockReady(vid_disperse, ViewNumber::new(2)), - 1, - ); + let view_1 = TestScriptStage { + inputs: vec![ViewChange(ViewNumber::new(1))], + outputs: vec![], + asserts: vec![], + }; + let view_2 = TestScriptStage { + inputs: vec![ + ViewChange(ViewNumber::new(2)), + BlockRecv( + encoded_transactions, + TestMetadata, + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), + vid_precompute, + ), + ], + outputs: vec![ + exact(SendPayloadCommitmentAndMetadata( + payload_commitment, + builder_commitment, + TestMetadata, + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) + .unwrap(), + )), + exact(BlockReady(vid_disperse, ViewNumber::new(2))), + exact(VidDisperseSend(vid_proposal.clone(), pub_key)), + ], + asserts: vec![], + }; - output.insert( - HotShotEvent::SendPayloadCommitmentAndMetadata( - payload_commitment, - builder_commitment, - TestMetadata, - ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), - ), - 1, - ); - output.insert( - HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), - 1, - ); + let vid_state = VidTaskState::::create_from(&handle).await; + let script = vec![view_1, view_2]; - let vid_state = VidTaskState { - api: handle.clone(), - consensus: handle.hotshot.consensus(), - cur_view: ViewNumber::new(0), - vote_collector: None, - network: handle.hotshot.networks.quorum_network.clone(), - membership: handle.hotshot.memberships.vid_membership.clone().into(), - public_key: handle.public_key(), - private_key: handle.private_key().clone(), - id: handle.hotshot.id, - }; - run_harness(input, output, vid_state, false).await; + run_test_script(script, vid_state).await; } From ee82a1fb5774e431e337367b47577c3583558c59 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 27 May 2024 10:28:00 -0600 Subject: [PATCH 1049/1393] [CX_HARDENING] - Modernize and Audit View Sync Task Tests (#3229) * finish test * merge upstream change * tmp * merge lower branch * revert name --- task-impls/src/harness.rs | 23 +++++++++++++---------- testing/tests/tests_1/view_sync_task.rs | 22 +++++++++++----------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 0e1ff590ef..01a944543b 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use async_broadcast::broadcast; use async_compatibility_layer::art::async_timeout; @@ -10,7 +10,7 @@ use crate::events::{HotShotEvent, HotShotTaskCompleted}; /// The state for the test harness task. Keeps track of which events and how many we expect to get pub struct TestHarnessState { /// The expected events we get from the test. Maps an event to the number of times we expect to see it - expected_output: HashMap, usize>, + expected_output: Vec>, /// If true we won't fail the test if extra events come in allow_extra_output: bool, } @@ -46,7 +46,7 @@ impl TaskState for TestHarnessState { #[allow(clippy::panic)] pub async fn run_harness>> + Send + 'static>( input: Vec>, - expected_output: HashMap, usize>, + expected_output: Vec>, state: S, allow_extra_output: bool, ) where @@ -111,17 +111,20 @@ pub fn handle_event( // * We haven't received all expected outputs yet. if !allow_extra_output || !state.expected_output.is_empty() { assert!( - state.expected_output.contains_key(&event), + state.expected_output.contains(&event), "Got an unexpected event: {event:?}", ); } - let num_expected = state.expected_output.get_mut(&event).unwrap(); - if *num_expected == 1 { - state.expected_output.remove(&event); - } else { - *num_expected -= 1; - } + // NOTE: We only care about finding a single instance of the output event, and we just + // iteratively remove the entries until they're gone. + let idx = state + .expected_output + .iter() + .position(|x| *x == *event) + .unwrap(); + + state.expected_output.remove(idx); if state.expected_output.is_empty() { tracing::info!("test harness task completed"); diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index ffa23b76a5..eaf3d5a11f 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -1,18 +1,18 @@ -use std::collections::HashMap; - use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_task_impls::events::HotShotEvent; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use hotshot_task_impls::{ + events::HotShotEvent, harness::run_harness, view_sync::ViewSyncTaskState, +}; +use hotshot_testing::task_helpers::build_system_handle; +use hotshot_types::{ + data::ViewNumber, simple_vote::ViewSyncPreCommitData, + traits::node_implementation::ConsensusTime, +}; #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_task() { - use hotshot_task_impls::{harness::run_harness, view_sync::ViewSyncTaskState}; - use hotshot_testing::task_helpers::build_system_handle; - use hotshot_types::simple_vote::ViewSyncPreCommitData; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -34,15 +34,15 @@ async fn test_view_sync_task() { tracing::error!("Vote in test is {:?}", vote.clone()); let mut input = Vec::new(); - let mut output = HashMap::new(); + let mut output = Vec::new(); input.push(HotShotEvent::Timeout(ViewNumber::new(2))); input.push(HotShotEvent::Timeout(ViewNumber::new(3))); input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); - output.insert(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone()), 1); + output.push(HotShotEvent::ViewChange(ViewNumber::new(2))); + output.push(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone())); let view_sync_state = ViewSyncTaskState::< TestTypes, From cfd30a6601b91be7bcf9c27dca2bebe2c11e57ac Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 27 May 2024 11:05:01 -0600 Subject: [PATCH 1050/1393] Remove Hash from Recently Edited Types in `HotShotEvent` (#3230) * finish test * merge upstream change * tmp * merge lower branch * revert name * remove hash from recently added types * fix all predicate --- task-impls/src/events.rs | 4 ++-- testing/src/predicates/event.rs | 21 ++------------------- types/src/traits/states.rs | 6 +++--- types/src/utils.rs | 4 ++-- 4 files changed, 9 insertions(+), 26 deletions(-) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 90fe75887a..2ba373868d 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -22,11 +22,11 @@ use vbs::version::Version; use crate::view_sync::ViewSyncPhase; /// Marker that the task completed -#[derive(Eq, Hash, PartialEq, Debug, Clone)] +#[derive(Eq, PartialEq, Debug, Clone)] pub struct HotShotTaskCompleted; /// All of the possible events that can be passed between Sequecning `HotShot` tasks -#[derive(Eq, Hash, PartialEq, Debug, Clone)] +#[derive(Eq, PartialEq, Debug, Clone)] #[allow(clippy::large_enum_variant)] pub enum HotShotEvent { /// Shutdown the task diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index 87f134a667..86ee569d27 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, sync::Arc}; +use std::sync::Arc; use async_lock::RwLock; use async_trait::async_trait; @@ -57,24 +57,7 @@ pub fn all(events: Vec>) -> Box = events.into_iter().collect(); - - let function = move |e: &Arc>| match set.take(e.as_ref()) { - Some(_) => { - if set.is_empty() { - PredicateResult::Pass - } else { - PredicateResult::Incomplete - } - } - None => PredicateResult::Fail, - }; - - Box::new(TestPredicate { - function: Arc::new(RwLock::new(function)), - info, - }) + all_predicates(events.into_iter().map(exact).collect()) } pub fn all_predicates( diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index f555ca304e..392a9dbe16 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -4,7 +4,7 @@ //! compatibilities over the current network state, which is modified by the transactions contained //! within blocks. -use std::{error::Error, fmt::Debug, future::Future, hash::Hash}; +use std::{error::Error, fmt::Debug, future::Future}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use vbs::version::Version; @@ -24,7 +24,7 @@ pub trait InstanceState: Debug + Send + Sync {} /// Application-specific state delta, which will be used to store a list of merkle tree entries. pub trait StateDelta: - Debug + PartialEq + Eq + Hash + Send + Sync + Serialize + for<'a> Deserialize<'a> + Debug + PartialEq + Eq + Send + Sync + Serialize + for<'a> Deserialize<'a> { } @@ -38,7 +38,7 @@ pub trait StateDelta: /// produce a new state, with the modifications from the block applied /// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header)) pub trait ValidatedState: - Serialize + DeserializeOwned + Debug + Default + Hash + PartialEq + Eq + Send + Sync + Serialize + DeserializeOwned + Debug + Default + PartialEq + Eq + Send + Sync { /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; diff --git a/types/src/utils.rs b/types/src/utils.rs index 0bec71ea42..8265f997df 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -24,7 +24,7 @@ use crate::{ }; /// A view's state -#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] #[serde(bound = "")] pub enum ViewInner { /// A pending view with an available block but not leaf proposal yet. @@ -133,7 +133,7 @@ impl Deref for View { } /// This exists so we can perform state transitions mutably -#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] #[serde(bound = "")] pub struct View { /// The view data. Wrapped in a struct so we can mutate From e1f9ed8c7e65953f3276bcbf4f097ff4e9656c97 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Tue, 28 May 2024 15:36:29 +0200 Subject: [PATCH 1051/1393] Lr/optimistic vid (#3221) * Optimistically calculate VID when the primary network is down * Lints * Tokio fixes * Remove trace * Don't block DA task when calculating VID * Lint --- task-impls/src/da.rs | 15 ++++++++++++++ task-impls/src/helpers.rs | 42 ++------------------------------------ task-impls/src/response.rs | 17 +++++---------- task-impls/src/vid.rs | 6 +++--- types/src/consensus.rs | 24 +++++++++++++++++++++- types/src/data.rs | 38 ++++++++++++++++++++++++++++++++-- 6 files changed, 84 insertions(+), 58 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 55a1ca4f73..ff2b238173 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -1,6 +1,7 @@ use std::{marker::PhantomData, sync::Arc}; use async_broadcast::Sender; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; @@ -16,6 +17,7 @@ use hotshot_types::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, + network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, storage::Storage, @@ -218,6 +220,19 @@ impl, A: ConsensusApi + { tracing::trace!("{e:?}"); } + // Optimistically calculate and update VID if we know that the primary network is down. + if self.da_network.is_primary_down() { + let consensus = Arc::clone(&self.consensus); + let membership = Arc::clone(&self.quorum_membership); + let pk = self.private_key.clone(); + async_spawn(async move { + consensus + .write() + .await + .calculate_and_update_vid(view, membership, &pk) + .await; + }); + } } HotShotEvent::DaVoteRecv(ref vote) => { debug!("DA vote recv, Main Task {:?}", vote.view_number()); diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 8deeea29ef..94cf5fed59 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -1,16 +1,8 @@ -use std::sync::Arc; - use async_broadcast::{SendError, Sender}; #[cfg(async_executor_impl = "async-std")] -use async_std::task::{spawn_blocking, JoinHandle}; -use hotshot_types::{ - data::VidDisperse, - traits::{election::Membership, node_implementation::NodeType}, - vid::{vid_scheme, VidPrecomputeData}, -}; -use jf_vid::{precomputable::Precomputable, VidScheme}; +use async_std::task::JoinHandle; #[cfg(async_executor_impl = "tokio")] -use tokio::task::{spawn_blocking, JoinHandle}; +use tokio::task::JoinHandle; /// Cancel a task pub async fn cancel_task(task: JoinHandle) { @@ -39,36 +31,6 @@ pub async fn broadcast_event(event: E, sender: &Send } } -/// Calculate the vid disperse information from the payload given a view and membership, -/// optionally using precompute data from builder -/// -/// # Panics -/// Panics if the VID calculation fails, this should not happen. -#[allow(clippy::panic)] -pub async fn calculate_vid_disperse( - txns: Arc<[u8]>, - membership: &Arc, - view: TYPES::Time, - precompute_data: Option, -) -> VidDisperse { - let num_nodes = membership.total_nodes(); - - let vid_disperse = spawn_blocking(move || { - precompute_data - .map_or_else( - || vid_scheme(num_nodes).disperse(Arc::clone(&txns)), - |data| vid_scheme(num_nodes).disperse_precompute(Arc::clone(&txns), &data) - ) - .unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) - }).await; - #[cfg(async_executor_impl = "tokio")] - // Tokio's JoinHandle's `Output` is `Result`, while in async-std it's just `T` - // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. - let vid_disperse = vid_disperse.unwrap(); - - VidDisperse::from_membership(view, vid_disperse, membership.as_ref()) -} - /// Utilities to print anyhow logs. pub trait AnyhowTracing { /// Print logs as debug diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 889d2862ac..ee25dbdf93 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -23,7 +23,7 @@ use sha2::{Digest, Sha256}; use tokio::task::JoinHandle; use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; -use crate::{events::HotShotEvent, helpers::calculate_vid_disperse}; +use crate::events::HotShotEvent; /// Type alias for consensus state wrapped in a lock. type LockedConsensusState = Arc>>; @@ -116,7 +116,7 @@ impl NetworkResponseState { } } - /// Get the VID share from conensus storage, or calculate it from a the payload for + /// Get the VID share from consensus storage, or calculate it from the payload for /// the view, if we have the payload. Stores all the shares calculated from the payload /// if the calculation was done async fn get_or_calc_vid_share( @@ -130,17 +130,10 @@ impl NetworkResponseState { .get(&view) .is_some_and(|m| m.contains_key(key)); if !contained { - let txns = consensus.saved_payloads().get(&view)?; - let vid = - calculate_vid_disperse(Arc::clone(txns), &Arc::clone(&self.quorum), view, None) - .await; - let shares = VidDisperseShare::from_vid_disperse(vid); let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - for share in shares { - if let Some(prop) = share.to_proposal(&self.private_key) { - consensus.update_vid_shares(view, prop); - } - } + consensus + .calculate_and_update_vid(view, Arc::clone(&self.quorum), &self.private_key) + .await; return consensus.vid_shares().get(&view)?.get(key).cloned(); } consensus.vid_shares().get(&view)?.get(key).cloned() diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 8b37bf27fc..f7dd6d4045 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -5,7 +5,7 @@ use async_lock::RwLock; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, - data::VidDisperseShare, + data::{VidDisperse, VidDisperseShare}, message::Proposal, traits::{ election::Membership, @@ -18,7 +18,7 @@ use tracing::{debug, error, instrument, warn}; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, calculate_vid_disperse}, + helpers::broadcast_event, }; /// Tracks state of a VID task @@ -61,7 +61,7 @@ impl> VidTaskState { let payload = ::BlockPayload::from_bytes(encoded_transactions, metadata); let builder_commitment = payload.builder_commitment(metadata); - let vid_disperse = calculate_vid_disperse( + let vid_disperse = VidDisperse::calculate_vid_disperse( Arc::clone(encoded_transactions), &Arc::clone(&self.membership), *view_number, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 3e98a37a8b..bc7a02a637 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -11,7 +11,7 @@ use tracing::{debug, error}; pub use crate::utils::{View, ViewInner}; use crate::{ - data::{Leaf, VidDisperseShare}, + data::{Leaf, VidDisperse, VidDisperseShare}, error::HotShotError, message::Proposal, simple_certificate::{DaCertificate, QuorumCertificate, UpgradeCertificate}, @@ -19,6 +19,7 @@ use crate::{ block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, node_implementation::NodeType, + signature_key::SignatureKey, BlockPayload, ValidatedState, }, utils::{BuilderCommitment, StateAndDelta, Terminator}, @@ -474,6 +475,27 @@ impl Consensus { .0 .expect("Decided state not found! Consensus internally inconsistent") } + + /// Calculates `VidDisperse` based on the view, the txns and the membership, + /// and updates `vid_shares` map with the signed `VidDisperseShare` proposals. + /// Returned `Option` indicates whether the update has actually happened or not. + pub async fn calculate_and_update_vid( + &mut self, + view: ::Time, + membership: Arc, + private_key: &::PrivateKey, + ) -> Option<()> { + let txns = self.saved_payloads().get(&view)?; + let vid = + VidDisperse::calculate_vid_disperse(Arc::clone(txns), &membership, view, None).await; + let shares = VidDisperseShare::from_vid_disperse(vid); + for share in shares { + if let Some(prop) = share.to_proposal(private_key) { + self.update_vid_shares(view, prop); + } + } + Some(()) + } } /// Alias for the block payload commitment and the associated metadata. The primary data diff --git a/types/src/data.rs b/types/src/data.rs index c335c36755..e91717768b 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -13,13 +13,17 @@ use std::{ use anyhow::{ensure, Result}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::spawn_blocking; use bincode::Options; use committable::{Commitment, CommitmentBoundsArkless, Committable, RawCommitmentBuilder}; use derivative::Derivative; -use jf_vid::VidDisperse as JfVidDisperse; +use jf_vid::{precomputable::Precomputable, VidDisperse as JfVidDisperse, VidScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; use snafu::Snafu; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::spawn_blocking; use tracing::error; use crate::{ @@ -39,7 +43,7 @@ use crate::{ BlockPayload, }, utils::bincode_opts, - vid::{VidCommitment, VidCommon, VidSchemeType, VidShare}, + vid::{vid_scheme, VidCommitment, VidCommon, VidPrecomputeData, VidSchemeType, VidShare}, vote::{Certificate, HasViewNumber}, }; @@ -174,6 +178,36 @@ impl VidDisperse { payload_commitment: vid_disperse.commit, } } + + /// Calculate the vid disperse information from the payload given a view and membership, + /// optionally using precompute data from builder + /// + /// # Panics + /// Panics if the VID calculation fails, this should not happen. + #[allow(clippy::panic)] + pub async fn calculate_vid_disperse( + txns: Arc<[u8]>, + membership: &Arc, + view: TYPES::Time, + precompute_data: Option, + ) -> Self { + let num_nodes = membership.total_nodes(); + + let vid_disperse = spawn_blocking(move || { + precompute_data + .map_or_else( + || vid_scheme(num_nodes).disperse(Arc::clone(&txns)), + |data| vid_scheme(num_nodes).disperse_precompute(Arc::clone(&txns), &data) + ) + .unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) + }).await; + #[cfg(async_executor_impl = "tokio")] + // Tokio's JoinHandle's `Output` is `Result`, while in async-std it's just `T` + // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. + let vid_disperse = vid_disperse.unwrap(); + + Self::from_membership(view, vid_disperse, membership.as_ref()) + } } /// Helper type to encapsulate the various ways that proposal certificates can be captured and From d32da7f2882805b5aa1d6f6772c1a0124aaa9a61 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 28 May 2024 22:34:55 +0800 Subject: [PATCH 1052/1393] [CX_CLEANUP] - Diverge the Quorum Vote Task (#3211) * init consensus2 from main * rollback, fix types, re-create module * add new parent leaf and state method * leaf chain traversal, new fields * fix build * partial quorum proposal validated event * clippy, fix build, add method * remove high qc blocking requirement * some working tests, almost done * migrate to using consensus internal state * test passes but doesnt decide * fix naming * fix task and get decide event * fix timeout test * fix view sync cert * temporarily ignore propose now * lint * make new folder * remove error logs, actually propose correctly for view 1 * fix doc * add quorum proposal recv task without liveness check * receive all events in quorum proposal recv task * fix all tests * add partial liveness check * note for later * fix error messages * naming changes * tmp push * fix build * remove comment * fix build and tests * move storage update * pr feedback * move the liveness proposal out, fix all lints * add replacement to ProposeNow event * new tests, validate vote now event * fix validated event data, make helper less redundant * remove unused import * more pr comments, collect garbage, fix bug in view update * fix bug with early return * comments, move function, remove log * comment * walk leaves by number instead * fill out context message * fix lints * Refactor vote task and fix tests * remove accidentally checked in comment * Fix build after merging * remove error when validated state not found * Add tests * Fix after merge * Rename temp type * Combine helper files * Typo * Change logging level --------- Co-authored-by: Jarred Parr --- task-impls/src/consensus/helpers.rs | 136 ++++++++---------- task-impls/src/quorum_proposal_recv/mod.rs | 4 +- task-impls/src/quorum_vote.rs | 90 +++++++----- testing/src/{task_helpers.rs => helpers.rs} | 39 ++++- testing/src/lib.rs | 5 +- testing/src/view_generator.rs | 2 +- testing/tests/tests_1/consensus_task.rs | 7 +- testing/tests/tests_1/da_task.rs | 2 +- testing/tests/tests_1/proposal_ordering.rs | 5 +- .../tests_1/quorum_proposal_recv_task.rs | 16 +-- testing/tests/tests_1/quorum_proposal_task.rs | 38 ++--- testing/tests/tests_1/quorum_vote_task.rs | 128 ++++++++++++++--- testing/tests/tests_1/upgrade_task.rs | 8 +- testing/tests/tests_1/vid_task.rs | 2 +- testing/tests/tests_1/view_sync_task.rs | 2 +- types/src/vote.rs | 4 +- 16 files changed, 306 insertions(+), 182 deletions(-) rename testing/src/{task_helpers.rs => helpers.rs} (89%) diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 22836d56f4..06b9e1c2a2 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -14,13 +14,11 @@ use async_std::task::JoinHandle; use chrono::Utc; use committable::Committable; use futures::FutureExt; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::simple_vote::QuorumData; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus, View}, data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, - message::{GeneralConsensusMessage, Proposal}, + message::Proposal, simple_certificate::UpgradeCertificate, traits::{ block_contents::BlockHeader, @@ -34,6 +32,8 @@ use hotshot_types::{ utils::{Terminator, ViewInner}, vote::{Certificate, HasViewNumber}, }; +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_types::{message::GeneralConsensusMessage, simple_vote::QuorumData}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, warn}; @@ -1014,14 +1014,10 @@ pub async fn handle_quorum_proposal_validated = PhantomData; - -/// TEMPORARY TYPE: Private key, latest decided upgrade certificate, committee membership, and -/// event stream, for sending the vote. +/// Private key, latest decided upgrade certificate, committee membership, and event stream, for +/// sending the vote. #[cfg(not(feature = "dependency-tasks"))] -type TemporaryVoteInfo = ( +type VoteInfo = ( <::SignatureKey as SignatureKey>::PrivateKey, Option>, Arc<::Membership>, @@ -1031,6 +1027,7 @@ type TemporaryVoteInfo = ( #[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_lines)] #[allow(unused_variables)] +#[cfg(not(feature = "dependency-tasks"))] /// Check if we are able to vote, like whether the proposal is valid, /// whether we have DAC and VID share, and if so, vote. pub async fn update_state_and_vote_if_able>( @@ -1041,10 +1038,9 @@ pub async fn update_state_and_vote_if_able>, quorum_membership: Arc, instance_state: Arc, - vote_info: TemporaryVoteInfo, + vote_info: VoteInfo, version: Version, ) -> bool { - #[cfg(not(feature = "dependency-tasks"))] use hotshot_types::simple_vote::QuorumVote; if !quorum_membership.has_stake(&public_key) { @@ -1066,16 +1062,13 @@ pub async fn update_state_and_vote_if_able; - - #[cfg(not(feature = "dependency-tasks"))] - { - // Validate the DAC. - message = if cert.is_valid_cert(vote_info.2.as_ref()) { - // Validate the block payload commitment for non-genesis DAC. - if cert.date().payload_commit != proposal.block_header.payload_commitment() { - warn!( - "Block payload commitment does not equal da cert payload commitment. View = {}", - *view - ); - return false; - } - if let Ok(vote) = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: proposed_leaf.commit(), - }, - view, - &public_key, - &vote_info.0, - ) { - GeneralConsensusMessage::::Vote(vote) - } else { - error!("Unable to sign quorum vote!"); - return false; - } - } else { - error!( - "Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", - cert, cur_view + // Validate the DAC. + let message = if cert.is_valid_cert(vote_info.2.as_ref()) { + // Validate the block payload commitment for non-genesis DAC. + if cert.date().payload_commit != proposal.block_header.payload_commitment() { + warn!( + "Block payload commitment does not equal da cert payload commitment. View = {}", + *view ); return false; - }; - } + } + if let Ok(vote) = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: proposed_leaf.commit(), + }, + view, + &public_key, + &vote_info.0, + ) { + GeneralConsensusMessage::::Vote(vote) + } else { + error!("Unable to sign quorum vote!"); + return false; + } + } else { + error!( + "Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", + cert, cur_view + ); + return false; + }; + let mut consensus = consensus.write().await; consensus.update_validated_state_map( cur_view, @@ -1190,28 +1179,25 @@ pub async fn update_state_and_vote_if_able> QuorumProposalRecvTaskState< ); return; }; - let Some(disperse_share) = vid_shares.get(&self.public_key) else { + let Some(vid_share) = vid_shares.get(&self.public_key) else { error!("Did not get a VID share for our public key, aborting vote"); return; }; @@ -174,7 +174,7 @@ impl> QuorumProposalRecvTaskState< VoteDependencyData { quorum_proposal: current_proposal, parent_leaf, - disperse_share: disperse_share.clone(), + vid_share: vid_share.clone(), da_cert: da_cert.clone(), }, )), diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 622cb99309..db60d70433 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -1,5 +1,3 @@ -#[cfg(feature = "dependency-tasks")] -use std::marker::PhantomData; use std::{collections::HashMap, sync::Arc}; use async_broadcast::{Receiver, Sender}; @@ -33,8 +31,6 @@ use jf_vid::VidScheme; use tokio::task::JoinHandle; use tracing::{debug, error, instrument, trace, warn}; -#[cfg(feature = "dependency-tasks")] -use crate::consensus::helpers::update_state_and_vote_if_able; use crate::{ events::HotShotEvent, helpers::{broadcast_event, cancel_task}, @@ -51,6 +47,8 @@ enum VoteDependency { Vid, /// For the `VoteNow` event. VoteNow, + /// For the `ValidatedStateUpdated` event. + ValidatedState, } /// Handler for the vote dependency. @@ -84,7 +82,7 @@ impl + 'static> HandleDepOutput let mut cur_proposal = None; let mut payload_commitment = None; let mut leaf = None; - let mut disperse_share = None; + let mut vid_share = None; for event in res { match event.as_ref() { #[allow(unused_assignments)] @@ -120,7 +118,7 @@ impl + 'static> HandleDepOutput } HotShotEvent::VidShareValidated(share) => { let vid_payload_commitment = share.data.payload_commitment; - disperse_share = Some(share.clone()); + vid_share = Some(share.clone()); if let Some(comm) = payload_commitment { if vid_payload_commitment != comm { error!("VID has inconsistent payload commitment with quorum proposal or DAC."); @@ -132,7 +130,7 @@ impl + 'static> HandleDepOutput } HotShotEvent::VoteNow(_, vote_dependency_data) => { leaf = Some(vote_dependency_data.parent_leaf.clone()); - disperse_share = Some(vote_dependency_data.disperse_share.clone()); + vid_share = Some(vote_dependency_data.vid_share.clone()); } _ => {} } @@ -145,31 +143,20 @@ impl + 'static> HandleDepOutput ) .await; - // TODO - // #[cfg(feature = "dependency-tasks")] - // { - // let Some(proposal) = cur_proposal else { - // error!("No proposal received, but it should be."); - // return; - // }; - // // For this vote task, we'll update the state in storage without voting in this function, - // // then vote later. - // update_state_and_vote_if_able::( - // self.view_number, - // proposal, - // self.public_key.clone(), - // self.consensus, - // Arc::clone(&self.storage), - // self.quorum_membership, - // self.instance_state, - // PhantomData, - // ) - // .await; - // } + if !self.quorum_membership.has_stake(&self.public_key) { + debug!( + "We were not chosen for quorum committee on {:?}", + self.view_number + ); + return; + } // Create and send the vote. let Some(leaf) = leaf else { - error!("Quorum proposal isn't validated, but it should be."); + error!( + "We don't have the leaf for this view {:?}, but we should, because the vote dependencies have completed.", + self.view_number + ); return; }; let message = if let Ok(vote) = QuorumVote::::create_signed_vote( @@ -191,10 +178,14 @@ impl + 'static> HandleDepOutput vote.view_number() + 1 ); // Add to the storage. - let Some(disperse) = disperse_share else { + let Some(vid_share) = vid_share else { + error!( + "We don't have the VID share for this view {:?}, but we should, because the vote dependencies have completed.", + self.view_number + ); return; }; - if let Err(e) = self.storage.write().await.append_vid(&disperse).await { + if let Err(e) = self.storage.write().await.append_vid(&vid_share).await { error!("Failed to store VID share with error {:?}", e); return; } @@ -282,6 +273,13 @@ impl> QuorumVoteTaskState { + if let HotShotEvent::ValidatedStateUpdated(view, _) = event { + *view + } else { + return false; + } + } VoteDependency::VoteNow => { if let HotShotEvent::VoteNow(view, _) = event { *view @@ -322,6 +320,11 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState { quorum_proposal_dependency.mark_as_completed(event); } + HotShotEvent::ValidatedStateUpdated(..) => { + validated_state_dependency.mark_as_completed(event); + } _ => {} } } - let deps = vec![quorum_proposal_dependency, dac_dependency, vid_dependency]; + let deps = vec![ + quorum_proposal_dependency, + dac_dependency, + vid_dependency, + validated_state_dependency, + ]; let dependency_chain = OrDependency::from_deps(vec![ // Either we fulfull the dependencies individiaully. AndDependency::from_deps(deps), @@ -505,9 +516,17 @@ impl> QuorumVoteTaskState { - debug!("All vote dependencies verified for view {:?}", view); - if !self.update_latest_voted_view(*view).await { + HotShotEvent::ValidatedStateUpdated(view_number, _) => { + self.create_dependency_task_if_new( + *view_number, + event_receiver, + &event_sender, + Some(event), + ); + } + HotShotEvent::QuorumVoteDependenciesValidated(view_number) => { + debug!("All vote dependencies verified for view {:?}", view_number); + if !self.update_latest_voted_view(*view_number).await { debug!("view not updated"); return; } @@ -528,6 +547,7 @@ impl> TaskState for QuorumVoteTask | HotShotEvent::QuorumVoteDependenciesValidated(_) | HotShotEvent::VoteNow(..) | HotShotEvent::QuorumProposalValidated(..) + | HotShotEvent::ValidatedStateUpdated(..) | HotShotEvent::Shutdown, ) } diff --git a/testing/src/task_helpers.rs b/testing/src/helpers.rs similarity index 89% rename from testing/src/task_helpers.rs rename to testing/src/helpers.rs index fe573eef16..05b06621b5 100644 --- a/testing/src/task_helpers.rs +++ b/testing/src/helpers.rs @@ -12,7 +12,7 @@ use hotshot::{ use hotshot_example_types::{ block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, - state_types::TestInstanceState, + state_types::{TestInstanceState, TestValidatedState}, }; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ @@ -27,6 +27,7 @@ use hotshot_types::{ election::Membership, node_implementation::{ConsensusTime, NodeType}, }, + utils::{View, ViewInner}, vid::{vid_scheme, VidCommitment, VidSchemeType}, vote::{Certificate, HasViewNumber, Vote}, }; @@ -307,3 +308,39 @@ pub async fn build_vote( .expect("Failed to create quorum vote"); GeneralConsensusMessage::::Vote(vote) } + +/// This function permutes the provided input vector `inputs`, given some order provided within the +/// `order` vector. +/// +/// # Examples +/// let output = permute_input_with_index_order(vec![1, 2, 3], vec![2, 1, 0]); +/// // Output is [3, 2, 1] now +pub fn permute_input_with_index_order(inputs: Vec, order: Vec) -> Vec +where + T: Clone, +{ + let mut ordered_inputs = Vec::with_capacity(inputs.len()); + for &index in &order { + ordered_inputs.push(inputs[index].clone()); + } + ordered_inputs +} + +/// This function will create a fake [`View`] from a provided [`Leaf`]. +pub fn build_fake_view_with_leaf(leaf: Leaf) -> View { + build_fake_view_with_leaf_and_state(leaf, TestValidatedState::default()) +} + +/// This function will create a fake [`View`] from a provided [`Leaf`] and `state`. +pub fn build_fake_view_with_leaf_and_state( + leaf: Leaf, + state: TestValidatedState, +) -> View { + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state: state.into(), + delta: None, + }, + } +} diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 6810620e5d..e1bb38e034 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -7,7 +7,7 @@ )] /// Helpers for initializing system context handle and building tasks. -pub mod task_helpers; +pub mod helpers; /// builder pub mod test_builder; @@ -45,9 +45,6 @@ pub mod script; /// view generator for tests pub mod view_generator; -/// helper functions for test scripts -pub mod test_helpers; - /// global event at the test level #[derive(Clone, Debug)] pub enum GlobalTestEvent { diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 67ca740af9..1a53144fb1 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -26,7 +26,7 @@ use hotshot_types::{ }; use sha2::{Digest, Sha256}; -use crate::task_helpers::{ +use crate::helpers::{ build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, }; diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index d8c99aae57..780dca5b80 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -14,10 +14,9 @@ use hotshot_testing::{ exact, quorum_proposal_send, quorum_proposal_validated, quorum_vote_send, timeout_vote_send, }, script::{run_test_script, TestScriptStage}, - task_helpers::{ - build_system_handle, vid_share, key_pair_for_id, vid_scheme_from_view_number, + helpers::{ + build_system_handle, vid_share, key_pair_for_id, vid_scheme_from_view_number, permute_input_with_index_order }, - test_helpers::permute_input_with_index_order, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -123,7 +122,7 @@ async fn test_consensus_vote() { use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, + helpers::build_system_handle, view_generator::TestViewGenerator, }; diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index cee01f6ff9..72acde3fd6 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -10,7 +10,7 @@ use hotshot_task_impls::{da::DaTaskState, events::HotShotEvent::*}; use hotshot_testing::{ predicates::event::exact, script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, + helpers::build_system_handle, view_generator::TestViewGenerator, }; use hotshot_types::{ diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 920f1cd4b2..3064c907f0 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -11,8 +11,7 @@ use hotshot_example_types::{ use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ predicates::event::{all_predicates, exact, quorum_proposal_send, quorum_proposal_validated}, - task_helpers::{vid_share, vid_scheme_from_view_number}, - test_helpers::permute_input_with_index_order, + helpers::{vid_share, vid_scheme_from_view_number, permute_input_with_index_order}, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -29,7 +28,7 @@ use sha2::Digest; async fn test_ordering_with_specific_order(input_permutation: Vec) { use hotshot_testing::{ script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, + helpers::build_system_handle, }; async_compatibility_layer::logging::setup_logging(); diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 85495dbab1..71c9093a7e 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -9,7 +9,7 @@ use hotshot_task_impls::{ use hotshot_testing::{ predicates::event::{exact, vote_now}, script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, + helpers::build_system_handle, view_generator::TestViewGenerator, }; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; @@ -19,7 +19,7 @@ use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_recv_task() { - use hotshot_testing::test_helpers::create_fake_view_with_leaf; + use hotshot_testing::helpers::build_fake_view_with_leaf; use hotshot_types::data::Leaf; async_compatibility_layer::logging::setup_logging(); @@ -52,7 +52,7 @@ async fn test_quorum_proposal_recv_task() { .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); consensus_writer.update_validated_state_map( view.quorum_proposal.data.view_number, - create_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone()), ); } drop(consensus_writer); @@ -82,8 +82,8 @@ async fn test_quorum_proposal_recv_task() { async fn test_quorum_proposal_recv_task_liveness_check() { use hotshot::traits::ValidatedState; use hotshot_example_types::state_types::TestValidatedState; - use hotshot_testing::test_helpers::{ - create_fake_view_with_leaf, create_fake_view_with_leaf_and_state, + use hotshot_testing::helpers::{ + build_fake_view_with_leaf, build_fake_view_with_leaf_and_state, }; use hotshot_types::{ data::Leaf, @@ -124,7 +124,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { // the parent lookup fails and we trigger a view liveness check. consensus_writer.update_validated_state_map( inserted_view_number, - create_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone()), ); // The index here is important. Since we're proposing for view 4, we need the @@ -153,7 +153,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { exact(ViewChange(ViewNumber::new(3))), exact(ValidatedStateUpdated( ViewNumber::new(3), - create_fake_view_with_leaf_and_state( + build_fake_view_with_leaf_and_state( leaves[2].clone(), >::from_header( &proposals[2].data.block_header, @@ -161,7 +161,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { ), )), exact(NewUndecidedView(leaves[2].clone())), - exact(QuorumProposalLivenessValidated(proposals[2].data.clone())), + exact(LivenessCheckProposalRecv(proposals[2].data.clone())), vote_now(), ], asserts: vec![], diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 9f67c2f46a..63cfefc2b2 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -5,7 +5,7 @@ use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ block_types::TestMetadata, node_types::{MemoryImpl, TestTypes}, - state_types::{TestInstanceState, TestValidatedState}, + state_types::{TestInstanceState}, }; use hotshot_task_impls::{ events::HotShotEvent::{self, *}, @@ -17,8 +17,8 @@ use hotshot_testing::{ Predicate, }, script::{run_test_script, TestScriptStage}, - task_helpers::{ - build_cert, build_system_handle, key_pair_for_id, vid_scheme_from_view_number, vid_share, + helpers::{ + build_cert, build_system_handle, key_pair_for_id, vid_scheme_from_view_number, vid_share, build_fake_view_with_leaf, }, test_helpers::create_fake_view_with_leaf, view_generator::TestViewGenerator, @@ -31,7 +31,7 @@ use hotshot_types::{ election::Membership, node_implementation::{ConsensusTime, NodeType}, }, - utils::{BuilderCommitment, View, ViewInner}, + utils::BuilderCommitment, vid::VidSchemeType, vote::HasViewNumber, }; @@ -102,7 +102,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { .unwrap(), ), VidShareValidated(vid_share(&vids[0].0, handle.public_key())), - ValidatedStateUpdated(ViewNumber::new(0), create_fake_view_with_leaf(genesis_leaf)), + ValidatedStateUpdated(ViewNumber::new(0), build_fake_view_with_leaf(genesis_leaf)), ], outputs: vec![ exact(UpdateHighQc(genesis_cert.clone())), @@ -175,7 +175,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { VidShareValidated(vid_share(&vids[0].0, handle.public_key())), ValidatedStateUpdated( ViewNumber::new(0), - create_fake_view_with_leaf(genesis_leaf.clone()), + build_fake_view_with_leaf(genesis_leaf.clone()), ), ], outputs: vec![exact(UpdateHighQc(genesis_cert.clone()))], @@ -198,7 +198,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { VidShareValidated(vid_share(&vids[1].0, handle.public_key())), ValidatedStateUpdated( proposals[0].data.view_number(), - create_fake_view_with_leaf(leaves[0].clone()), + build_fake_view_with_leaf(leaves[0].clone()), ), ], outputs: vec![exact(UpdateHighQc(proposals[1].data.justify_qc.clone()))], @@ -221,7 +221,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { VidShareValidated(vid_share(&vids[2].0, handle.public_key())), ValidatedStateUpdated( proposals[1].data.view_number(), - create_fake_view_with_leaf(leaves[1].clone()), + build_fake_view_with_leaf(leaves[1].clone()), ), ], outputs: vec![ @@ -248,7 +248,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { VidShareValidated(vid_share(&vids[3].0, handle.public_key())), ValidatedStateUpdated( proposals[2].data.view_number(), - create_fake_view_with_leaf(leaves[2].clone()), + build_fake_view_with_leaf(leaves[2].clone()), ), ], outputs: vec![exact(UpdateHighQc(proposals[3].data.justify_qc.clone()))], @@ -270,7 +270,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { VidShareValidated(vid_share(&vids[4].0, handle.public_key())), ValidatedStateUpdated( proposals[3].data.view_number(), - create_fake_view_with_leaf(leaves[3].clone()), + build_fake_view_with_leaf(leaves[3].clone()), ), ], outputs: vec![ @@ -348,7 +348,7 @@ async fn test_quorum_proposal_task_qc_timeout() { VidShareValidated(vid_share(&vids[2].0.clone(), handle.public_key())), ValidatedStateUpdated( ViewNumber::new(2), - create_fake_view_with_leaf(leaves[1].clone()), + build_fake_view_with_leaf(leaves[1].clone()), ), ], outputs: vec![quorum_proposal_send()], @@ -428,7 +428,7 @@ async fn test_quorum_proposal_task_view_sync() { VidShareValidated(vid_share(&vids[1].0.clone(), handle.public_key())), ValidatedStateUpdated( ViewNumber::new(1), - create_fake_view_with_leaf(leaves[1].clone()), + build_fake_view_with_leaf(leaves[1].clone()), ), ], outputs: vec![quorum_proposal_send()], @@ -498,7 +498,7 @@ async fn test_quorum_proposal_livness_check_proposal() { VidShareValidated(vid_share(&vids[0].0, handle.public_key())), ValidatedStateUpdated( ViewNumber::new(0), - create_fake_view_with_leaf(genesis_leaf.clone()), + build_fake_view_with_leaf(genesis_leaf.clone()), ), ], outputs: vec![exact(UpdateHighQc(genesis_cert.clone()))], @@ -521,7 +521,7 @@ async fn test_quorum_proposal_livness_check_proposal() { VidShareValidated(vid_share(&vids[1].0, handle.public_key())), ValidatedStateUpdated( proposals[0].data.view_number(), - create_fake_view_with_leaf(leaves[0].clone()), + build_fake_view_with_leaf(leaves[0].clone()), ), ], outputs: vec![exact(UpdateHighQc(proposals[1].data.justify_qc.clone()))], @@ -546,7 +546,7 @@ async fn test_quorum_proposal_livness_check_proposal() { VidShareValidated(vid_share(&vids[2].0, handle.public_key())), ValidatedStateUpdated( proposals[1].data.view_number(), - create_fake_view_with_leaf(leaves[1].clone()), + build_fake_view_with_leaf(leaves[1].clone()), ), ], outputs: vec![ @@ -572,7 +572,7 @@ async fn test_quorum_proposal_livness_check_proposal() { VidShareValidated(vid_share(&vids[3].0, handle.public_key())), ValidatedStateUpdated( proposals[2].data.view_number(), - create_fake_view_with_leaf(leaves[2].clone()), + build_fake_view_with_leaf(leaves[2].clone()), ), ], outputs: vec![exact(UpdateHighQc(proposals[3].data.justify_qc.clone()))], @@ -594,7 +594,7 @@ async fn test_quorum_proposal_livness_check_proposal() { VidShareValidated(vid_share(&vids[4].0, handle.public_key())), ValidatedStateUpdated( proposals[3].data.view_number(), - create_fake_view_with_leaf(leaves[3].clone()), + build_fake_view_with_leaf(leaves[3].clone()), ), ], outputs: vec![ @@ -745,7 +745,7 @@ async fn test_quorum_proposal_task_happy_path_leaf_ascension() { let mut consensus_writer = consensus.write().await; consensus_writer.update_validated_state_map( ViewNumber::new(view_number), - create_fake_view_with_leaf(leaf.clone()), + build_fake_view_with_leaf(leaf.clone()), ); consensus_writer.update_saved_leaves(leaf.clone()); consensus_writer.update_vid_shares( @@ -832,7 +832,7 @@ async fn test_quorum_proposal_task_fault_injection_leaf_ascension() { if rand::random::() < 0.9 { consensus_writer.update_validated_state_map( ViewNumber::new(view_number), - create_fake_view_with_leaf(leaf.clone()), + build_fake_view_with_leaf(leaf.clone()), ); consensus_writer.update_saved_leaves(leaf.clone()); consensus_writer.update_vid_shares( diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 26e210507c..bb68f6f376 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -1,8 +1,8 @@ #![allow(clippy::panic)] use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_testing::task_helpers::vid_share; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use hotshot_testing::helpers::{build_fake_view_with_leaf,vid_share}; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime,vote::HasViewNumber}; #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] @@ -12,7 +12,7 @@ async fn test_quorum_vote_task_success() { use hotshot_testing::{ predicates::event::{exact, quorum_vote_send}, script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, + helpers::build_system_handle, view_generator::TestViewGenerator, }; @@ -36,13 +36,17 @@ async fn test_quorum_vote_task_success() { vids.push(view.vid_proposal.clone()); } - // Send the quorum proposal, DAC, and VID disperse data, in which case a dummy vote can be - // formed and the view number will be updated. + // Send the quorum proposal, DAC, VID share data, and validated state, in which case a dummy + // vote can be formed and the view number will be updated. let view_success = TestScriptStage { inputs: vec![ QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), DaCertificateRecv(dacs[1].clone()), VidShareRecv(vids[1].0[0].clone()), + ValidatedStateUpdated( + proposals[1].data.view_number(), + build_fake_view_with_leaf(leaves[1].clone()), + ), ], outputs: vec![ exact(DaCertificateValidated(dacs[1].clone())), @@ -67,7 +71,7 @@ async fn test_quorum_vote_task_vote_now() { use hotshot_testing::{ predicates::event::{exact, quorum_vote_send}, script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, + helpers::build_system_handle, view_generator::TestViewGenerator, }; use hotshot_types::vote::VoteDependencyData; @@ -87,7 +91,7 @@ async fn test_quorum_vote_task_vote_now() { let vote_dependency_data = VoteDependencyData { quorum_proposal: view.quorum_proposal.data.clone(), parent_leaf: view.leaf.clone(), - disperse_share: view.vid_proposal.0[0].clone(), + vid_share: view.vid_proposal.0[0].clone(), da_cert: view.da_certificate.clone(), }; @@ -115,7 +119,7 @@ async fn test_quorum_vote_task_miss_dependency() { use hotshot_testing::{ predicates::event::exact, script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, + helpers::build_system_handle, view_generator::TestViewGenerator, }; @@ -134,7 +138,7 @@ async fn test_quorum_vote_task_miss_dependency() { let mut dacs = Vec::new(); let mut vids = Vec::new(); let mut leaves = Vec::new(); - for view in (&mut generator).take(3) { + for view in (&mut generator).take(5) { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); @@ -143,31 +147,56 @@ async fn test_quorum_vote_task_miss_dependency() { leaves.push(view.leaf.clone()); } - // Send two of quorum proposal, DAC, and VID disperse data, in which case there's no vote. + // Send three of quorum proposal, DAC, VID share data, and validated state, in which case + // there's no vote. let view_no_dac = TestScriptStage { inputs: vec![ - QuorumProposalValidated(proposals[0].data.clone(), leaves[0].clone()), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), + QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + VidShareRecv(vid_share(&vids[1].0, handle.public_key())), + ValidatedStateUpdated( + proposals[1].data.view_number(), + build_fake_view_with_leaf(leaves[1].clone()), + ), ], - outputs: vec![exact(VidShareValidated(vids[0].0[0].clone()))], + outputs: vec![exact(VidShareValidated(vids[1].0[0].clone()))], asserts: vec![], }; let view_no_vid = TestScriptStage { inputs: vec![ - QuorumProposalValidated(proposals[1].data.clone(), leaves[1].clone()), - DaCertificateRecv(dacs[1].clone()), + QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), + DaCertificateRecv(dacs[2].clone()), + ValidatedStateUpdated( + proposals[2].data.view_number(), + build_fake_view_with_leaf(leaves[2].clone()), + ), ], - outputs: vec![exact(DaCertificateValidated(dacs[1].clone()))], + outputs: vec![exact(DaCertificateValidated(dacs[2].clone()))], asserts: vec![], }; let view_no_quorum_proposal = TestScriptStage { inputs: vec![ - DaCertificateRecv(dacs[2].clone()), - VidShareRecv(vid_share(&vids[2].0, handle.public_key())), + DaCertificateRecv(dacs[3].clone()), + VidShareRecv(vid_share(&vids[3].0, handle.public_key())), + ValidatedStateUpdated( + proposals[3].data.view_number(), + build_fake_view_with_leaf(leaves[3].clone()), + ), ], outputs: vec![ - exact(DaCertificateValidated(dacs[2].clone())), - exact(VidShareValidated(vids[2].0[0].clone())), + exact(DaCertificateValidated(dacs[3].clone())), + exact(VidShareValidated(vids[3].0[0].clone())), + ], + asserts: vec![], + }; + let view_no_validated_state = TestScriptStage { + inputs: vec![ + QuorumProposalValidated(proposals[4].data.clone(), leaves[3].clone()), + DaCertificateRecv(dacs[4].clone()), + VidShareRecv(vid_share(&vids[4].0, handle.public_key())), + ], + outputs: vec![ + exact(DaCertificateValidated(dacs[4].clone())), + exact(VidShareValidated(vids[4].0[0].clone())), ], asserts: vec![], }; @@ -176,8 +205,65 @@ async fn test_quorum_vote_task_miss_dependency() { QuorumVoteTaskState::::create_from(&handle).await; run_test_script( - vec![view_no_dac, view_no_vid, view_no_quorum_proposal], + vec![view_no_dac, view_no_vid, view_no_quorum_proposal, view_no_validated_state], quorum_vote_state, ) .await; } + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_vote_task_incorrect_dependency() { + use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; + use hotshot_testing::{ + predicates::event::exact, + script::{run_test_script, TestScriptStage}, + helpers::build_system_handle, + view_generator::TestViewGenerator, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + + let mut proposals = Vec::new(); + let mut leaves = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(2) { + proposals.push(view.quorum_proposal.clone()); + leaves.push(view.leaf.clone()); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } + + // Send the correct quorum proposal, DAC, and VID share data, and incorrect validated state. + let view_incorrect_dependency = TestScriptStage { + inputs: vec![ + QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + DaCertificateRecv(dacs[1].clone()), + VidShareRecv(vids[1].0[0].clone()), + // The validated state is for an earlier view. + ValidatedStateUpdated( + proposals[0].data.view_number(), + build_fake_view_with_leaf(leaves[0].clone()), + ), + ], + outputs: vec![ + exact(DaCertificateValidated(dacs[1].clone())), + exact(VidShareValidated(vids[1].0[0].clone())), + ], + asserts: vec![], + }; + + let quorum_vote_state = + QuorumVoteTaskState::::create_from(&handle).await; + + run_test_script(vec![view_incorrect_dependency], quorum_vote_state).await; +} diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 5cfda6a7d3..9a2e88f6a3 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -16,7 +16,7 @@ use hotshot_task_impls::{ use hotshot_testing::{ predicates::{event::*, upgrade::*}, script::{Expectations, TaskScript}, - task_helpers::vid_share, + helpers::vid_share, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -33,7 +33,7 @@ use vbs::version::Version; async fn test_consensus_task_upgrade() { use hotshot_testing::{ script::{run_test_script, TestScriptStage}, - task_helpers::build_system_handle, + helpers::build_system_handle, }; async_compatibility_layer::logging::setup_logging(); @@ -166,7 +166,7 @@ async fn test_consensus_task_upgrade() { async fn test_upgrade_and_consensus_task() { use std::sync::Arc; - use hotshot_testing::task_helpers::build_system_handle; + use hotshot_testing::helpers::build_system_handle; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -328,7 +328,7 @@ async fn test_upgrade_and_consensus_task() { /// - we correctly propose with a null block payload in view 6, even if we have indications to do otherwise (via SendPayloadCommitmentAndMetadata, VID etc). /// - we correctly reject a QuorumProposal with a non-null block payload in view 7. async fn test_upgrade_and_consensus_task_blank_blocks() { - use hotshot_testing::task_helpers::build_system_handle; + use hotshot_testing::helpers::build_system_handle; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index cd9eea0aa2..c51180b8ae 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -10,7 +10,7 @@ use hotshot_task_impls::{events::HotShotEvent::*, vid::VidTaskState}; use hotshot_testing::{ predicates::event::exact, script::{run_test_script, TestScriptStage}, - task_helpers::{build_system_handle, vid_scheme_from_view_number}, + helpers::{build_system_handle, vid_scheme_from_view_number}, }; use hotshot_types::{ data::{null_block, DaProposal, VidDisperse, ViewNumber}, diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index eaf3d5a11f..811f74b675 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -3,7 +3,7 @@ use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{ events::HotShotEvent, harness::run_harness, view_sync::ViewSyncTaskState, }; -use hotshot_testing::task_helpers::build_system_handle; +use hotshot_testing::helpers::build_system_handle; use hotshot_types::{ data::ViewNumber, simple_vote::ViewSyncPreCommitData, traits::node_implementation::ConsensusTime, diff --git a/types/src/vote.rs b/types/src/vote.rs index bfb175d310..8c683e2812 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -194,8 +194,8 @@ pub struct VoteDependencyData { /// parent leaf. pub parent_leaf: Leaf, - /// The Vid disperse proposal. - pub disperse_share: Proposal>, + /// The VID share proposal. + pub vid_share: Proposal>, /// The DA certificate. pub da_cert: DaCertificate, From 24f025ec01aa6b1d69c4983e527a79a11b9d73a6 Mon Sep 17 00:00:00 2001 From: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Date: Tue, 28 May 2024 21:22:07 +0500 Subject: [PATCH 1053/1393] Make BlockPayload::from_transactions async and pass ValidatedState (#3219) * pass validated state to BlockPayload::from_transactions * clippy * make it async * impl stream for TestViewGenerator * clippy * replace boxfuture * async_trait and add comment * move builder fee into async_spawn tasks * rename genesis() to empty() and use empty() for null block builder fee --- builder-api/src/block_info.rs | 2 +- example-types/src/block_types.rs | 27 ++++-- example-types/src/state_types.rs | 2 +- examples/infra/mod.rs | 12 +-- hotshot/src/lib.rs | 24 ++++-- hotshot/src/tasks/task_state.rs | 12 +-- hotshot/src/types/handle.rs | 3 +- task-impls/src/consensus/helpers.rs | 10 ++- task-impls/src/consensus2/handlers.rs | 3 +- task-impls/src/consensus2/mod.rs | 15 ++-- task-impls/src/events.rs | 4 +- task-impls/src/quorum_proposal/handlers.rs | 3 +- task-impls/src/quorum_proposal/mod.rs | 9 +- task-impls/src/transactions.rs | 17 ++-- testing/src/block_builder.rs | 10 ++- testing/src/helpers.rs | 4 +- testing/src/spinning_task.rs | 7 +- testing/src/test_runner.rs | 21 +++-- testing/src/view_generator.rs | 85 +++++++++++++------ testing/tests/tests_1/block_builder.rs | 12 ++- testing/tests/tests_1/consensus_task.rs | 30 +++---- testing/tests/tests_1/da_task.rs | 16 ++-- testing/tests/tests_1/network_task.rs | 8 +- testing/tests/tests_1/proposal_ordering.rs | 17 ++-- .../tests_1/quorum_proposal_recv_task.rs | 6 +- testing/tests/tests_1/quorum_proposal_task.rs | 62 ++++++-------- testing/tests/tests_1/quorum_vote_task.rs | 30 ++++--- testing/tests/tests_1/upgrade_task.rs | 40 ++++----- testing/tests/tests_1/vid_task.rs | 21 +++-- types/src/consensus.rs | 2 +- types/src/data.rs | 35 +++++--- types/src/error.rs | 2 +- types/src/traits/block_contents.rs | 31 ++++--- types/src/traits/node_implementation.rs | 21 +++-- types/src/traits/states.rs | 4 +- 35 files changed, 338 insertions(+), 269 deletions(-) diff --git a/builder-api/src/block_info.rs b/builder-api/src/block_info.rs index 9d196984ab..51474fcb76 100644 --- a/builder-api/src/block_info.rs +++ b/builder-api/src/block_info.rs @@ -23,7 +23,7 @@ pub struct AvailableBlockInfo { #[serde(bound = "")] pub struct AvailableBlockData { pub block_payload: TYPES::BlockPayload, - pub metadata: ::Metadata, + pub metadata: >::Metadata, pub signature: <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, pub sender: ::BuilderSignatureKey, diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 1c0e9dbb8a..b7e7350b51 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -4,6 +4,7 @@ use std::{ sync::Arc, }; +use async_trait::async_trait; use committable::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ data::{BlockError, Leaf}, @@ -21,7 +22,10 @@ use snafu::Snafu; use time::OffsetDateTime; use vbs::version::Version; -use crate::{node_types::TestTypes, state_types::TestInstanceState}; +use crate::{ + node_types::TestTypes, + state_types::{TestInstanceState, TestValidatedState}, +}; /// The transaction in a [`TestBlockPayload`]. #[derive(Default, PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] @@ -134,7 +138,7 @@ impl Display for TestBlockPayload { } } -impl TestableBlock for TestBlockPayload { +impl TestableBlock for TestBlockPayload { fn genesis() -> Self { Self::genesis() } @@ -159,14 +163,17 @@ impl EncodeBytes for TestBlockPayload { } } -impl BlockPayload for TestBlockPayload { +#[async_trait] +impl BlockPayload for TestBlockPayload { type Error = BlockError; type Instance = TestInstanceState; type Transaction = TestTransaction; type Metadata = TestMetadata; + type ValidatedState = TestValidatedState; - fn from_transactions( - transactions: impl IntoIterator, + async fn from_transactions( + transactions: impl IntoIterator + Send, + _validated_state: &Self::ValidatedState, _instance_state: &Self::Instance, ) -> Result<(Self, Self::Metadata), Self::Error> { let txns_vec: Vec = transactions.into_iter().collect(); @@ -199,6 +206,10 @@ impl BlockPayload for TestBlockPayload { Self { transactions } } + fn empty() -> (Self, Self::Metadata) { + (Self::genesis(), TestMetadata) + } + fn builder_commitment(&self, _metadata: &Self::Metadata) -> BuilderCommitment { let mut digest = sha2::Sha256::new(); for txn in &self.transactions { @@ -239,7 +250,7 @@ impl> Block parent_leaf: &Leaf, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, - _metadata: ::Metadata, + _metadata: >::Metadata, _builder_fee: BuilderFee, _vid_common: VidCommon, _version: Version, @@ -264,7 +275,7 @@ impl> Block _instance_state: &>::Instance, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, - _metadata: ::Metadata, + _metadata: >::Metadata, ) -> Self { Self { block_number: 0, @@ -282,7 +293,7 @@ impl> Block self.payload_commitment } - fn metadata(&self) -> &::Metadata { + fn metadata(&self) -> &>::Metadata { &TestMetadata } diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 9bc37c35ba..51e973320d 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -107,7 +107,7 @@ impl> TestableState for _state: Option<&Self>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> >::Transaction { /// clippy appeasement for `RANDOM_TX_BASE_SIZE` const RANDOM_TX_BASE_SIZE: usize = 8; diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 559c64bbce..cd205149ee 100644 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -336,7 +336,7 @@ pub trait RunDa< >, > where ::ValidatedState: TestableState, - ::BlockPayload: TestableBlock, + ::BlockPayload: TestableBlock, TYPES: NodeType, Leaf: TestableLeaf, Self: Sync, @@ -353,6 +353,7 @@ pub trait RunDa< /// Note: sequencing leaf does not have state, so does not return state async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { let initializer = hotshot::HotShotInitializer::::from_genesis(TestInstanceState {}) + .await .expect("Couldn't generate genesis block"); let config = self.config(); @@ -608,7 +609,7 @@ impl< > RunDa, PushCdnNetwork, NODE> for PushCdnDaRun where ::ValidatedState: TestableState, - ::BlockPayload: TestableBlock, + ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, { @@ -700,7 +701,7 @@ impl< > for Libp2pDaRun where ::ValidatedState: TestableState, - ::BlockPayload: TestableBlock, + ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, { @@ -792,7 +793,7 @@ impl< > RunDa, CombinedNetworks, NODE> for CombinedDaRun where ::ValidatedState: TestableState, - ::BlockPayload: TestableBlock, + ::BlockPayload: TestableBlock, Leaf: TestableLeaf, Self: Sync, { @@ -865,7 +866,6 @@ where pub async fn main_entry_point< TYPES: NodeType< Transaction = TestTransaction, - BlockPayload = TestBlockPayload, BlockHeader = TestBlockHeader, InstanceState = TestInstanceState, >, @@ -882,7 +882,7 @@ pub async fn main_entry_point< args: ValidatorArgs, ) where ::ValidatedState: TestableState, - ::BlockPayload: TestableBlock, + ::BlockPayload: TestableBlock, Leaf: TestableLeaf, { setup_logging(); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index b90b084e5a..43fee91d06 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -57,15 +57,13 @@ use tasks::{add_request_network_task, add_response_task, add_vid_task}; use tracing::{debug, instrument, trace}; use vbs::version::Version; +#[cfg(not(feature = "dependency-tasks"))] +use crate::tasks::add_consensus_task; #[cfg(feature = "dependency-tasks")] use crate::tasks::{ add_consensus2_task, add_quorum_proposal_recv_task, add_quorum_proposal_task, add_quorum_vote_task, }; - -#[cfg(not(feature = "dependency-tasks"))] -use crate::tasks::add_consensus_task; - use crate::{ tasks::{ add_da_task, add_network_event_task, add_network_message_task, add_transaction_task, @@ -368,6 +366,12 @@ impl> SystemContext { if self.anchored_leaf.view_number() == TYPES::Time::genesis() { let (validated_state, state_delta) = TYPES::ValidatedState::genesis(&self.instance_state); + + let qc = Arc::new( + QuorumCertificate::genesis(&validated_state, self.instance_state.as_ref()) + .await, + ); + broadcast_event( Event { view_number: self.anchored_leaf.view_number(), @@ -378,7 +382,7 @@ impl> SystemContext { Some(Arc::new(state_delta)), None, )]), - qc: Arc::new(QuorumCertificate::genesis(self.instance_state.as_ref())), + qc, block_size: None, }, }, @@ -801,14 +805,18 @@ impl HotShotInitializer { /// initialize from genesis /// # Errors /// If we are unable to apply the genesis block to the default state - pub fn from_genesis(instance_state: TYPES::InstanceState) -> Result> { + pub async fn from_genesis( + instance_state: TYPES::InstanceState, + ) -> Result> { let (validated_state, state_delta) = TYPES::ValidatedState::genesis(&instance_state); + let high_qc = QuorumCertificate::genesis(&validated_state, &instance_state).await; + Ok(Self { - inner: Leaf::genesis(&instance_state), + inner: Leaf::genesis(&validated_state, &instance_state).await, validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::Time::new(0), - high_qc: QuorumCertificate::genesis(&instance_state), + high_qc, undecided_leafs: Vec::new(), undecided_state: BTreeMap::new(), instance_state, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index d8bf712ec0..1bfe5aa75c 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -1,3 +1,9 @@ +use std::{ + collections::{BTreeMap, HashMap}, + marker::PhantomData, + sync::{atomic::AtomicBool, Arc}, +}; + use async_trait::async_trait; use hotshot_task_impls::{ builder::BuilderClient, consensus::ConsensusTaskState, consensus2::Consensus2TaskState, @@ -10,12 +16,6 @@ use hotshot_types::traits::{ consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }; -use std::{ - collections::{BTreeMap, HashMap}, - marker::PhantomData, - sync::{atomic::AtomicBool, Arc}, -}; - use vbs::version::StaticVersionType; use crate::types::SystemContextHandle; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index cb859b659a..38cd3d8a5c 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -1,6 +1,6 @@ //! Provides an event-streaming handle for a [`SystemContext`] running in the background -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -18,7 +18,6 @@ use hotshot_types::{ traits::{election::Membership, node_implementation::NodeType}, BoxSyncFuture, }; -use std::time::Duration; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 06b9e1c2a2..1afbe30a0d 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -398,21 +398,23 @@ pub(crate) async fn publish_proposal_from_upgrade_cert( ) .await?; + let validated_state = consensus.read().await.decided_state(); // Special case: if we have a decided upgrade certificate AND it does not apply a version to the current view, we MUST propose with a null block. ensure!(upgrade_cert.in_interim(cur_view), "Cert is not in interim"); - let (payload, metadata) = ::from_transactions( + let (payload, metadata) = >::from_transactions( Vec::new(), + validated_state.as_ref(), instance_state.as_ref(), ) + .await .context("Failed to build null block payload and metadata")?; let builder_commitment = payload.builder_commitment(&metadata); let null_block_commitment = null_block::commitment(quorum_membership.total_nodes()) .context("Failed to calculate null block commitment")?; - let null_block_fee = - null_block::builder_fee::(quorum_membership.total_nodes(), instance_state.as_ref()) - .context("Failed to calculate null block fee info")?; + let null_block_fee = null_block::builder_fee::(quorum_membership.total_nodes()) + .context("Failed to calculate null block fee info")?; Ok(async_spawn(async move { create_and_send_proposal( diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index 673f49079c..f210af96db 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -15,14 +15,13 @@ use hotshot_types::{ }; use tracing::debug; +use super::Consensus2TaskState; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, helpers::{broadcast_event, cancel_task}, vote_collection::{create_vote_accumulator, AccumulatorInfo, HandleVoteEvent}, }; -use super::Consensus2TaskState; - /// Handle a `QuorumVoteRecv` event. pub(crate) async fn handle_quorum_vote_recv>( vote: &QuorumVote, diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index a2b99bfce7..8b47ffc878 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -1,10 +1,10 @@ -use async_broadcast::Sender; -use hotshot_task::task::Task; use std::sync::Arc; -use tracing::instrument; +use async_broadcast::Sender; use async_lock::RwLock; -use hotshot_task::task::TaskState; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, event::Event, @@ -15,17 +15,14 @@ use hotshot_types::{ signature_key::SignatureKey, }, }; - -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; - -use crate::{events::HotShotEvent, vote_collection::VoteCollectionTaskState}; +use tracing::instrument; use self::handlers::{ handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, }; +use crate::{events::HotShotEvent, vote_collection::VoteCollectionTaskState}; /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 2ba373868d..25f5695db0 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -110,14 +110,14 @@ pub enum HotShotEvent { SendPayloadCommitmentAndMetadata( VidCommitment, BuilderCommitment, - ::Metadata, + >::Metadata, TYPES::Time, BuilderFee, ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number BlockRecv( Arc<[u8]>, - ::Metadata, + >::Metadata, TYPES::Time, BuilderFee, VidPrecomputeData, diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 0ca678841e..50810e2513 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -20,9 +20,8 @@ use hotshot_types::{ }; use tracing::debug; -use crate::{events::HotShotEvent, helpers::broadcast_event}; - use super::QuorumProposalTaskState; +use crate::{events::HotShotEvent, helpers::broadcast_event}; /// Helper type to give names and to the output values of the leaf chain traversal operation. #[derive(Debug)] diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 1d6aa45303..aca27eb4f0 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -26,15 +26,14 @@ use tokio::task::JoinHandle; use tracing::{debug, instrument, warn}; use vbs::version::Version; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; - use self::{ dependency_handle::{ProposalDependency, ProposalDependencyHandle}, handlers::handle_quorum_proposal_validated, }; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; mod dependency_handle; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 7e87c826df..ed7db18ad6 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -165,26 +165,19 @@ impl< .number_of_empty_blocks_proposed .add(1); + let membership_total_nodes = self.membership.total_nodes(); + // Calculate the builder fee for the empty block - let Some(builder_fee) = null_block::builder_fee( - self.membership.total_nodes(), - self.instance_state.as_ref(), - ) else { + let Some(builder_fee) = null_block::builder_fee(membership_total_nodes) else { error!("Failed to get builder fee"); return None; }; // Create an empty block payload and metadata - let Ok((_, metadata)) = ::BlockPayload::from_transactions( - vec![], - &self.instance_state, - ) else { - error!("Failed to create empty block payload"); - return None; - }; + let (_, metadata) = ::BlockPayload::empty(); let (_, precompute_data) = - precompute_vid_commitment(&[], self.membership.total_nodes()); + precompute_vid_commitment(&[], membership_total_nodes); // Broadcast the empty block broadcast_event( diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index c2d1f0c4ef..9e296ff27c 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -561,9 +561,13 @@ async fn build_block( where ::InstanceState: Default, { - let (block_payload, metadata) = - TYPES::BlockPayload::from_transactions(transactions, &Default::default()) - .expect("failed to build block payload from transactions"); + let (block_payload, metadata) = TYPES::BlockPayload::from_transactions( + transactions, + &Default::default(), + &Default::default(), + ) + .await + .expect("failed to build block payload from transactions"); let commitment = block_payload.builder_commitment(&metadata); diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 05b06621b5..6ffaf0b3a7 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -54,7 +54,9 @@ pub async fn build_system_handle( let storage = (launcher.resource_generator.storage)(node_id); let config = launcher.resource_generator.config.clone(); - let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}).unwrap(); + let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}) + .await + .unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = config.my_own_validator_config.private_key.clone(); diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 1a3cb7d43b..40466bc004 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -2,7 +2,10 @@ use std::collections::{BTreeMap, HashMap}; use either::{Left, Right}; use hotshot::{traits::TestableNodeImplementation, types::EventType, HotShotInitializer}; -use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; +use hotshot_example_types::{ + state_types::{TestInstanceState, TestValidatedState}, + storage_types::TestStorage, +}; use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::{ data::Leaf, @@ -62,7 +65,7 @@ impl> TaskState for Spinni } impl< - TYPES: NodeType, + TYPES: NodeType, I: TestableNodeImplementation, N: ConnectedNetwork, TYPES::SignatureKey>, > TestTaskState for SpinningTask diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 83905e5e9c..c3289297c3 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -12,7 +12,10 @@ use hotshot::{ traits::TestableNodeImplementation, types::SystemContextHandle, HotShotInitializer, Memberships, SystemContext, }; -use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; +use hotshot_example_types::{ + state_types::{TestInstanceState, TestValidatedState}, + storage_types::TestStorage, +}; use hotshot_task::task::{Task, TaskRegistry, TestTask}; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -114,7 +117,7 @@ pub trait TaskErr: std::error::Error + Sync + Send + 'static {} impl TaskErr for T {} impl< - TYPES: NodeType, + TYPES: NodeType, I: TestableNodeImplementation, N: ConnectedNetwork, TYPES::SignatureKey>, > TestRunner @@ -213,8 +216,13 @@ where late_start, latest_view: None, changes, - last_decided_leaf: Leaf::genesis(&TestInstanceState {}), - high_qc: QuorumCertificate::genesis(&TestInstanceState {}), + last_decided_leaf: Leaf::genesis(&TestValidatedState::default(), &TestInstanceState {}) + .await, + high_qc: QuorumCertificate::genesis( + &TestValidatedState::default(), + &TestInstanceState {}, + ) + .await, }; let spinning_task = TestTask::, SpinningTask>::new( Task::new(tx.clone(), rx.clone(), reg.clone(), spinning_task_state), @@ -391,8 +399,9 @@ where }, ); } else { - let initializer = - HotShotInitializer::::from_genesis(TestInstanceState {}).unwrap(); + let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}) + .await + .unwrap(); // See whether or not we should be DA let is_da = node_id < config.da_staked_committee_size as u64; diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 1a53144fb1..9911cb6933 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -1,11 +1,18 @@ -use std::{cmp::max, marker::PhantomData, sync::Arc}; +use std::{ + cmp::max, + marker::PhantomData, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; use committable::Committable; +use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, - state_types::TestInstanceState, + state_types::{TestInstanceState, TestValidatedState}, }; use hotshot_types::{ data::{DaProposal, Leaf, QuorumProposal, VidDisperseShare, ViewChangeEvidence, ViewNumber}, @@ -52,7 +59,7 @@ pub struct TestView { } impl TestView { - pub fn genesis( + pub async fn genesis( quorum_membership: &::Membership, da_membership: &::Membership, ) -> Self { @@ -61,9 +68,18 @@ impl TestView { let transactions = Vec::new(); let (block_payload, metadata) = - TestBlockPayload::from_transactions(transactions.clone(), &TestInstanceState {}) - .unwrap(); - let builder_commitment = block_payload.builder_commitment(&metadata); + >::from_transactions( + transactions.clone(), + &TestValidatedState::default(), + &TestInstanceState {}, + ) + .await + .unwrap(); + + let builder_commitment = >::builder_commitment( + &block_payload, + &metadata, + ); let (private_key, public_key) = key_pair_for_id(*genesis_view); @@ -97,7 +113,11 @@ impl TestView { let quorum_proposal_inner = QuorumProposal:: { block_header: block_header.clone(), view_number: genesis_view, - justify_qc: QuorumCertificate::genesis(&TestInstanceState {}), + justify_qc: QuorumCertificate::genesis( + &TestValidatedState::default(), + &TestInstanceState {}, + ) + .await, upgrade_certificate: None, proposal_certificate: None, }; @@ -157,7 +177,7 @@ impl TestView { /// this method can be used to start from an ancestor (whose view is at least one view older /// than the current view) and construct valid views without the data structures in the task /// failing by expecting views that they has never seen. - pub fn next_view_from_ancestor(&self, ancestor: TestView) -> Self { + pub async fn next_view_from_ancestor(&self, ancestor: TestView) -> Self { let old = ancestor; let old_view = old.view_number; @@ -181,9 +201,17 @@ impl TestView { let leader_public_key = public_key; let (block_payload, metadata) = - TestBlockPayload::from_transactions(transactions.clone(), &TestInstanceState {}) - .unwrap(); - let builder_commitment = block_payload.builder_commitment(&metadata); + >::from_transactions( + transactions.clone(), + &TestValidatedState::default(), + &TestInstanceState {}, + ) + .await + .unwrap(); + let builder_commitment = >::builder_commitment( + &block_payload, + &metadata, + ); let payload_commitment = da_payload_commitment(quorum_membership, transactions.clone()); @@ -348,8 +376,8 @@ impl TestView { } } - pub fn next_view(&self) -> Self { - self.next_view_from_ancestor(self.clone()) + pub async fn next_view(&self) -> Self { + self.next_view_from_ancestor(self.clone()).await } pub fn create_quorum_vote( @@ -474,28 +502,35 @@ impl TestViewGenerator { } } - pub fn next_from_anscestor_view(&mut self, ancestor: TestView) { + pub async fn next_from_anscestor_view(&mut self, ancestor: TestView) { if let Some(ref view) = self.current_view { - self.current_view = Some(view.next_view_from_ancestor(ancestor)) + self.current_view = Some(view.next_view_from_ancestor(ancestor).await) } else { tracing::error!("Cannot attach ancestor to genesis view."); } } } -impl Iterator for TestViewGenerator { +impl Stream for TestViewGenerator { type Item = TestView; - fn next(&mut self) -> Option { - if let Some(view) = &self.current_view { - self.current_view = Some(TestView::next_view(view)); + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let qm = &self.quorum_membership.clone(); + let da = &self.da_membership.clone(); + let curr_view = &self.current_view.clone(); + + let mut fut = if let Some(ref view) = curr_view { + async move { TestView::next_view(view).await }.boxed() } else { - self.current_view = Some(TestView::genesis( - &self.quorum_membership, - &self.da_membership, - )); - } + async move { TestView::genesis(qm, da).await }.boxed() + }; - self.current_view.clone() + match fut.as_mut().poll(cx) { + Poll::Ready(test_view) => { + self.current_view = Some(test_view.clone()); + Poll::Ready(Some(test_view)) + } + Poll::Pending => Poll::Pending, + } } } diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index c7d1aba333..5cf387ac53 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -86,10 +86,14 @@ async fn test_random_block_builder() { .expect("Failed to claim block"); // Test claiming non-existent block - let commitment_for_non_existent_block = TestBlockPayload { - transactions: vec![TestTransaction::new(vec![0; 1])], - } - .builder_commitment(&TestMetadata); + let commitment_for_non_existent_block = + >::builder_commitment( + &TestBlockPayload { + transactions: vec![TestTransaction::new(vec![0; 1])], + }, + &TestMetadata, + ); + let result = client .claim_block( commitment_for_non_existent_block, diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 780dca5b80..73d9279bf7 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -3,6 +3,7 @@ use std::sync::Arc; +use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes}, @@ -33,7 +34,7 @@ use sha2::Digest; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { - use hotshot_example_types::block_types::TestMetadata; + use hotshot_example_types::{block_types::TestMetadata, state_types::TestValidatedState}; use hotshot_types::data::null_block; async_compatibility_layer::logging::setup_logging(); @@ -58,7 +59,7 @@ async fn test_consensus_task() { let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); @@ -96,8 +97,7 @@ async fn test_consensus_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), ], outputs: vec![ @@ -141,7 +141,7 @@ async fn test_consensus_vote() { let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); @@ -190,7 +190,7 @@ async fn test_vote_with_specific_order(input_permutation: Vec) { let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); @@ -258,7 +258,7 @@ async fn test_consensus_vote_with_permuted_dac() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_finalize_propose() { - use hotshot_example_types::block_types::TestMetadata; + use hotshot_example_types::{block_types::TestMetadata, state_types::TestValidatedState}; use hotshot_types::data::null_block; async_compatibility_layer::logging::setup_logging(); @@ -289,7 +289,7 @@ async fn test_view_sync_finalize_propose() { let mut vids = Vec::new(); let mut dacs = Vec::new(); - generator.next(); + generator.next().await; let view = generator.current_view.clone().unwrap(); proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); @@ -304,7 +304,7 @@ async fn test_view_sync_finalize_propose() { generator.add_view_sync_finalize(view_sync_finalize_data); // Build the next proposal from view 1 - generator.next_from_anscestor_view(view.clone()); + generator.next_from_anscestor_view(view.clone()).await; let view = generator.current_view.unwrap(); proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); @@ -375,7 +375,7 @@ async fn test_view_sync_finalize_propose() { builder_commitment, TestMetadata, ViewNumber::new(4), - null_block::builder_fee(4, &TestInstanceState {}).unwrap(), + null_block::builder_fee(4).unwrap(), ), ], outputs: vec![ @@ -419,7 +419,7 @@ async fn test_view_sync_finalize_vote() { let mut votes = Vec::new(); let mut vids = Vec::new(); let mut dacs = Vec::new(); - for view in (&mut generator).take(3) { + for view in (&mut generator).take(3).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); @@ -430,7 +430,7 @@ async fn test_view_sync_finalize_vote() { // Each call to `take` moves us to the next generated view. We advance to view // 3 and then add the finalize cert for checking there. generator.add_view_sync_finalize(view_sync_finalize_data); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); @@ -516,7 +516,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { let mut votes = Vec::new(); let mut vids = Vec::new(); let mut dacs = Vec::new(); - for view in (&mut generator).take(3) { + for view in (&mut generator).take(3).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); @@ -527,7 +527,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { // Each call to `take` moves us to the next generated view. We advance to view // 3 and then add the finalize cert for checking there. generator.add_view_sync_finalize(view_sync_finalize_data); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); @@ -620,7 +620,7 @@ async fn test_vid_disperse_storage_failure() { let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 72acde3fd6..178a8c187d 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -1,10 +1,10 @@ use std::sync::Arc; +use futures::StreamExt; use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, - state_types::TestInstanceState, }; use hotshot_task_impls::{da::DaTaskState, events::HotShotEvent::*}; use hotshot_testing::{ @@ -49,7 +49,7 @@ async fn test_da_task() { let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); @@ -59,7 +59,7 @@ async fn test_da_task() { generator.add_transactions(vec![TestTransaction::new(vec![0])]); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); @@ -76,8 +76,7 @@ async fn test_da_task() { encoded_transactions, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), precompute, ), ], @@ -131,7 +130,7 @@ async fn test_da_task_storage_failure() { let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); @@ -141,7 +140,7 @@ async fn test_da_task_storage_failure() { generator.add_transactions(transactions); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); @@ -158,8 +157,7 @@ async fn test_da_task_storage_failure() { encoded_transactions, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), precompute, ), ], diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 631c6add2e..ef3ce59143 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -26,6 +26,8 @@ use hotshot_types::{ #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[allow(clippy::too_many_lines)] async fn test_network_task() { + use futures::StreamExt; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -63,7 +65,7 @@ async fn test_network_task() { task_reg.run_task(task).await; let mut generator = TestViewGenerator::generate(membership.clone(), membership); - let view = generator.next().unwrap(); + let view = generator.next().await.unwrap(); let (out_tx, mut out_rx) = async_broadcast::broadcast(10); add_network_message_task(task_reg, out_tx.clone(), channel.clone()).await; @@ -88,6 +90,8 @@ async fn test_network_task() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_network_storage_fail() { + use futures::StreamExt; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -126,7 +130,7 @@ async fn test_network_storage_fail() { task_reg.run_task(task).await; let mut generator = TestViewGenerator::generate(membership.clone(), membership); - let view = generator.next().unwrap(); + let view = generator.next().await.unwrap(); let (out_tx, mut out_rx) = async_broadcast::broadcast(10); add_network_message_task(task_reg, out_tx.clone(), channel.clone()).await; diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 3064c907f0..4ca6f36225 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -26,6 +26,8 @@ use sha2::Digest; /// This proposal should happen no matter how the `input_permutation` is specified. #[cfg(not(feature = "dependency-tasks"))] async fn test_ordering_with_specific_order(input_permutation: Vec) { + use futures::StreamExt; + use hotshot_example_types::state_types::TestValidatedState; use hotshot_testing::{ script::{run_test_script, TestScriptStage}, helpers::build_system_handle, @@ -55,7 +57,7 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(3) { + for view in (&mut generator).take(3).collect::>().await { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); leaders.push(view.leader_public_key); @@ -91,21 +93,14 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { builder_commitment, TestMetadata, ViewNumber::new(node_id), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), ]; let mut view_2_inputs = permute_input_with_index_order(inputs, input_permutation); view_2_inputs.insert(0, DaCertificateRecv(dacs[1].clone())); - view_2_inputs.insert( - 0, - VidShareRecv(vid_share(&vids[2].0, handle.public_key())), - ); - view_2_inputs.insert( - 0, - VidShareRecv(vid_share(&vids[1].0, handle.public_key())), - ); + view_2_inputs.insert(0, VidShareRecv(vid_share(&vids[2].0, handle.public_key()))); + view_2_inputs.insert(0, VidShareRecv(vid_share(&vids[1].0, handle.public_key()))); // This stage transitions from view 1 to view 2. let view_2 = TestScriptStage { diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 71c9093a7e..66f57f4d80 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -7,9 +7,9 @@ use hotshot_task_impls::{ events::HotShotEvent::*, quorum_proposal_recv::QuorumProposalRecvTaskState, }; use hotshot_testing::{ + helpers::build_system_handle, predicates::event::{exact, vote_now}, script::{run_test_script, TestScriptStage}, - helpers::build_system_handle, view_generator::TestViewGenerator, }; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; @@ -38,7 +38,7 @@ async fn test_quorum_proposal_recv_task() { let mut dacs = Vec::new(); let mut vids = Vec::new(); let mut leaves = Vec::new(); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); @@ -106,7 +106,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { let mut dacs = Vec::new(); let mut vids = Vec::new(); let mut leaves = Vec::new(); - for view in (&mut generator).take(4) { + for view in (&mut generator).take(4).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 63cfefc2b2..19fecf5e0d 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -5,21 +5,22 @@ use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ block_types::TestMetadata, node_types::{MemoryImpl, TestTypes}, - state_types::{TestInstanceState}, + state_types::TestInstanceState, }; use hotshot_task_impls::{ events::HotShotEvent::{self, *}, quorum_proposal::QuorumProposalTaskState, }; use hotshot_testing::{ + helpers::{ + build_cert, build_fake_view_with_leaf, build_system_handle, key_pair_for_id, + vid_scheme_from_view_number, vid_share, + }, predicates::{ event::{exact, leaf_decided, quorum_proposal_send}, Predicate, }, script::{run_test_script, TestScriptStage}, - helpers::{ - build_cert, build_system_handle, key_pair_for_id, vid_scheme_from_view_number, vid_share, build_fake_view_with_leaf, - }, test_helpers::create_fake_view_with_leaf, view_generator::TestViewGenerator, }; @@ -72,7 +73,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let mut vids = Vec::new(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - for view in (&mut generator).take(2) { + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); @@ -98,8 +99,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { builder_commitment, TestMetadata, ViewNumber::new(1), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[0].0, handle.public_key())), ValidatedStateUpdated(ViewNumber::new(0), build_fake_view_with_leaf(genesis_leaf)), @@ -169,8 +169,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(1), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[0].0, handle.public_key())), ValidatedStateUpdated( @@ -192,8 +191,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[1].0, handle.public_key())), ValidatedStateUpdated( @@ -215,8 +213,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[2].0, handle.public_key())), ValidatedStateUpdated( @@ -242,8 +239,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[3].0, handle.public_key())), ValidatedStateUpdated( @@ -264,8 +260,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment, TestMetadata, ViewNumber::new(5), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[4].0, handle.public_key())), ValidatedStateUpdated( @@ -310,7 +305,7 @@ async fn test_quorum_proposal_task_qc_timeout() { let mut leaders = Vec::new(); let mut vids = Vec::new(); let mut leaves = Vec::new(); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); @@ -320,7 +315,7 @@ async fn test_quorum_proposal_task_qc_timeout() { view: ViewNumber::new(1), }; generator.add_timeout(timeout_data); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); @@ -342,8 +337,7 @@ async fn test_quorum_proposal_task_qc_timeout() { builder_commitment, TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[2].0.clone(), handle.public_key())), ValidatedStateUpdated( @@ -388,7 +382,7 @@ async fn test_quorum_proposal_task_view_sync() { let mut leaders = Vec::new(); let mut vids = Vec::new(); let mut leaves = Vec::new(); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); @@ -400,7 +394,7 @@ async fn test_quorum_proposal_task_view_sync() { round: ViewNumber::new(node_id), }; generator.add_view_sync_finalize(view_sync_finalize_data); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); @@ -422,8 +416,7 @@ async fn test_quorum_proposal_task_view_sync() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[1].0.clone(), handle.public_key())), ValidatedStateUpdated( @@ -463,7 +456,7 @@ async fn test_quorum_proposal_livness_check_proposal() { let mut vids = Vec::new(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - for view in (&mut generator).take(5) { + for view in (&mut generator).take(5).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); @@ -492,8 +485,7 @@ async fn test_quorum_proposal_livness_check_proposal() { builder_commitment.clone(), TestMetadata, ViewNumber::new(1), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[0].0, handle.public_key())), ValidatedStateUpdated( @@ -515,8 +507,7 @@ async fn test_quorum_proposal_livness_check_proposal() { builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[1].0, handle.public_key())), ValidatedStateUpdated( @@ -540,8 +531,7 @@ async fn test_quorum_proposal_livness_check_proposal() { builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[2].0, handle.public_key())), ValidatedStateUpdated( @@ -566,8 +556,7 @@ async fn test_quorum_proposal_livness_check_proposal() { builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[3].0, handle.public_key())), ValidatedStateUpdated( @@ -588,8 +577,7 @@ async fn test_quorum_proposal_livness_check_proposal() { builder_commitment, TestMetadata, ViewNumber::new(5), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[4].0, handle.public_key())), ValidatedStateUpdated( @@ -631,7 +619,7 @@ async fn test_quorum_proposal_task_with_incomplete_events() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut leaves = Vec::new(); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index bb68f6f376..b838edb310 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -1,8 +1,11 @@ #![allow(clippy::panic)] +use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_testing::helpers::{build_fake_view_with_leaf,vid_share}; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime,vote::HasViewNumber}; +use hotshot_testing::helpers::{build_fake_view_with_leaf, vid_share}; +use hotshot_types::{ + data::ViewNumber, traits::node_implementation::ConsensusTime, vote::HasViewNumber, +}; #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] @@ -10,9 +13,9 @@ use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime async fn test_quorum_vote_task_success() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ + helpers::build_system_handle, predicates::event::{exact, quorum_vote_send}, script::{run_test_script, TestScriptStage}, - helpers::build_system_handle, view_generator::TestViewGenerator, }; @@ -29,7 +32,7 @@ async fn test_quorum_vote_task_success() { let mut leaves = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaves.push(view.leaf.clone()); dacs.push(view.da_certificate.clone()); @@ -69,9 +72,9 @@ async fn test_quorum_vote_task_success() { async fn test_quorum_vote_task_vote_now() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ + helpers::build_system_handle, predicates::event::{exact, quorum_vote_send}, script::{run_test_script, TestScriptStage}, - helpers::build_system_handle, view_generator::TestViewGenerator, }; use hotshot_types::vote::VoteDependencyData; @@ -85,7 +88,7 @@ async fn test_quorum_vote_task_vote_now() { let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - generator.next(); + generator.next().await; let view = generator.current_view.clone().unwrap(); let vote_dependency_data = VoteDependencyData { @@ -117,9 +120,9 @@ async fn test_quorum_vote_task_vote_now() { async fn test_quorum_vote_task_miss_dependency() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ + helpers::build_system_handle, predicates::event::exact, script::{run_test_script, TestScriptStage}, - helpers::build_system_handle, view_generator::TestViewGenerator, }; @@ -138,7 +141,7 @@ async fn test_quorum_vote_task_miss_dependency() { let mut dacs = Vec::new(); let mut vids = Vec::new(); let mut leaves = Vec::new(); - for view in (&mut generator).take(5) { + for view in (&mut generator).take(5).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); votes.push(view.create_quorum_vote(&handle)); @@ -205,7 +208,12 @@ async fn test_quorum_vote_task_miss_dependency() { QuorumVoteTaskState::::create_from(&handle).await; run_test_script( - vec![view_no_dac, view_no_vid, view_no_quorum_proposal, view_no_validated_state], + vec![ + view_no_dac, + view_no_vid, + view_no_quorum_proposal, + view_no_validated_state, + ], quorum_vote_state, ) .await; @@ -217,9 +225,9 @@ async fn test_quorum_vote_task_miss_dependency() { async fn test_quorum_vote_task_incorrect_dependency() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ + helpers::build_system_handle, predicates::event::exact, script::{run_test_script, TestScriptStage}, - helpers::build_system_handle, view_generator::TestViewGenerator, }; @@ -236,7 +244,7 @@ async fn test_quorum_vote_task_incorrect_dependency() { let mut leaves = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaves.push(view.leaf.clone()); dacs.push(view.da_certificate.clone()); diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 9a2e88f6a3..229868c279 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -3,6 +3,7 @@ use std::time::Duration; +use futures::StreamExt; use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, @@ -63,7 +64,7 @@ async fn test_consensus_task_upgrade() { let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - for view in (&mut generator).take(2) { + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); @@ -73,7 +74,7 @@ async fn test_consensus_task_upgrade() { generator.add_upgrade(upgrade_data); - for view in generator.take(4) { + for view in generator.take(4).collect::>().await { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); @@ -198,7 +199,7 @@ async fn test_upgrade_and_consensus_task() { let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); @@ -209,7 +210,7 @@ async fn test_upgrade_and_consensus_task() { generator.add_upgrade(upgrade_data.clone()); - for view in generator.take(4) { + for view in generator.take(4).collect::>().await { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); @@ -253,8 +254,7 @@ async fn test_upgrade_and_consensus_task() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QcFormed(either::Either::Left(proposals[2].data.justify_qc.clone())), ], @@ -358,7 +358,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); @@ -369,7 +369,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { generator.add_upgrade(upgrade_data.clone()); - for view in (&mut generator).take(3) { + for view in (&mut generator).take(3).collect::>().await { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); @@ -382,7 +382,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { // Our node should vote affirmatively on this. generator.add_transactions(vec![]); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); @@ -394,7 +394,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { // We set the transactions to something not null for view 6, but we expect the node to emit a quorum proposal where they are still null. generator.add_transactions(vec![TestTransaction::new(vec![0])]); - for view in (&mut generator).take(1) { + for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); @@ -406,7 +406,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { // For view 7, we set the transactions to something not null. The node should fail to vote on this. generator.add_transactions(vec![TestTransaction::new(vec![0])]); - for view in generator.take(1) { + for view in generator.take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); @@ -440,8 +440,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[1].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), ], vec![ @@ -452,8 +451,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QuorumProposalRecv(proposals[2].clone(), leaders[2]), ], @@ -465,8 +463,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[3].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QuorumProposalRecv(proposals[3].clone(), leaders[3]), ], @@ -478,8 +475,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[4].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(5), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QuorumProposalRecv(proposals[4].clone(), leaders[4]), ], @@ -491,8 +487,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[5].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(6), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QcFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), ], @@ -504,8 +499,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { proposals[6].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(7), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), QuorumProposalRecv(proposals[6].clone(), leaders[6]), ], diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index c51180b8ae..e78418c045 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -4,7 +4,7 @@ use hotshot::{tasks::task_state::CreateTaskState, types::SignatureKey}; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, - state_types::TestInstanceState, + state_types::{TestInstanceState, TestValidatedState}, }; use hotshot_task_impls::{events::HotShotEvent::*, vid::VidTaskState}; use hotshot_testing::{ @@ -40,9 +40,16 @@ async fn test_vid_task() { let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); let transactions = vec![TestTransaction::new(vec![0])]; - let (payload, metadata) = - TestBlockPayload::from_transactions(transactions.clone(), &TestInstanceState {}).unwrap(); - let builder_commitment = payload.builder_commitment(&metadata); + + let (payload, metadata) = >::from_transactions( + transactions.clone(), + &TestValidatedState::default(), + &TestInstanceState {}, + ) + .await + .unwrap(); + let builder_commitment = + >::builder_commitment(&payload, &metadata); let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let (_, vid_precompute) = vid.commit_only_precompute(&encoded_transactions).unwrap(); @@ -85,8 +92,7 @@ async fn test_vid_task() { encoded_transactions, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), vid_precompute, ), ], @@ -96,8 +102,7 @@ async fn test_vid_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), &TestInstanceState {}) - .unwrap(), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), )), exact(BlockReady(vid_disperse, ViewNumber::new(2))), exact(VidDisperseSend(vid_proposal.clone(), pub_key)), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index bc7a02a637..79d24f418b 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -507,7 +507,7 @@ pub struct CommitmentAndMetadata { /// Builder Commitment pub builder_commitment: BuilderCommitment, /// Metadata for the block payload - pub metadata: ::Metadata, + pub metadata: >::Metadata, /// Builder fee data pub fee: BuilderFee, /// View number this block is for diff --git a/types/src/data.rs b/types/src/data.rs index e91717768b..f1e470df17 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -117,11 +117,12 @@ impl std::ops::Sub for ViewNumber { /// A proposal to start providing data availability for a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound = "TYPES: NodeType")] pub struct DaProposal { /// Encoded transactions in the block to be applied. pub encoded_transactions: Arc<[u8]>, /// Metadata of the block to be applied. - pub metadata: ::Metadata, + pub metadata: >::Metadata, /// View this proposal applies to pub view_number: TYPES::Time, } @@ -403,7 +404,7 @@ pub trait TestableLeaf { &self, rng: &mut dyn rand::RngCore, padding: u64, - ) -> <::BlockPayload as BlockPayload>::Transaction; + ) -> <::BlockPayload as BlockPayload>::Transaction; } /// This is the consensus-internal analogous concept to a block, and it contains the block proper, @@ -467,9 +468,14 @@ impl Display for Leaf { impl QuorumCertificate { #[must_use] /// Creat the Genesis certificate - pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { + pub async fn genesis( + validated_state: &TYPES::ValidatedState, + instance_state: &TYPES::InstanceState, + ) -> Self { let data = QuorumData { - leaf_commit: Leaf::genesis(instance_state).commit(), + leaf_commit: Leaf::genesis(validated_state, instance_state) + .await + .commit(), }; let commit = data.commit(); Self { @@ -490,9 +496,14 @@ impl Leaf { /// Panics if the genesis payload (`TYPES::BlockPayload::genesis()`) is malformed (unable to be /// interpreted as bytes). #[must_use] - pub fn genesis(instance_state: &TYPES::InstanceState) -> Self { + pub async fn genesis( + validated_state: &TYPES::ValidatedState, + instance_state: &TYPES::InstanceState, + ) -> Self { let (payload, metadata) = - TYPES::BlockPayload::from_transactions([], instance_state).unwrap(); + TYPES::BlockPayload::from_transactions([], validated_state, instance_state) + .await + .unwrap(); let builder_commitment = payload.builder_commitment(&metadata); let payload_bytes = payload.encode(); @@ -638,7 +649,7 @@ impl Leaf { impl TestableLeaf for Leaf where TYPES::ValidatedState: TestableState, - TYPES::BlockPayload: TestableBlock, + TYPES::BlockPayload: TestableBlock, { type NodeType = TYPES; @@ -646,7 +657,8 @@ where &self, rng: &mut dyn rand::RngCore, padding: u64, - ) -> <::BlockPayload as BlockPayload>::Transaction { + ) -> <::BlockPayload as BlockPayload>::Transaction + { TYPES::ValidatedState::create_random_transaction(None, rng, padding) } } @@ -762,10 +774,7 @@ pub mod null_block { /// Builder fee data for a null block payload #[must_use] - pub fn builder_fee( - num_storage_nodes: usize, - instance_state: &::Instance, - ) -> Option> { + pub fn builder_fee(num_storage_nodes: usize) -> Option> { /// Arbitrary fee amount, this block doesn't actually come from a builder const FEE_AMOUNT: u64 = 0; @@ -775,7 +784,7 @@ pub mod null_block { ); let (_null_block, null_block_metadata) = - ::from_transactions([], instance_state).ok()?; + >::empty(); match TYPES::BuilderSignatureKey::sign_fee( &priv_key, diff --git a/types/src/error.rs b/types/src/error.rs index 0797f726ed..9a0c0365f7 100644 --- a/types/src/error.rs +++ b/types/src/error.rs @@ -38,7 +38,7 @@ pub enum HotShotError { #[snafu(display("Failed to build or verify a block: {source}"))] BlockError { /// The underlying block error. - source: ::Error, + source: >::Error, }, /// Failure in networking layer #[snafu(display("Failure in networking layer: {source}"))] diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 61375d5171..a065e334b4 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -11,6 +11,7 @@ use std::{ sync::Arc, }; +use async_trait::async_trait; use committable::{Commitment, Committable}; use jf_vid::{precomputable::Precomputable, VidScheme}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; @@ -44,7 +45,8 @@ pub trait Transaction: /// * Must have a transaction type that can be compared for equality, serialized and serialized, /// sent between threads, and can have a hash produced of it /// * Must be hashable -pub trait BlockPayload: +#[async_trait] +pub trait BlockPayload: Serialize + Clone + Debug @@ -64,6 +66,8 @@ pub trait BlockPayload: type Instance: InstanceState; /// The type of the transitions we are applying type Transaction: Transaction; + /// Validated State + type ValidatedState: ValidatedState; /// Data created during block building which feeds into the block header type Metadata: Clone + Debug @@ -76,11 +80,12 @@ pub trait BlockPayload: + EncodeBytes; /// Build a payload and associated metadata with the transactions. - /// + /// This function is asynchronous because it may need to request updated state from the peers via GET requests. /// # Errors /// If the transaction length conversion fails. - fn from_transactions( - transactions: impl IntoIterator, + async fn from_transactions( + transactions: impl IntoIterator + Send, + validated_state: &Self::ValidatedState, instance_state: &Self::Instance, ) -> Result<(Self, Self::Metadata), Self::Error>; @@ -88,14 +93,8 @@ pub trait BlockPayload: /// and the associated number of VID storage nodes fn from_bytes(encoded_transactions: &[u8], metadata: &Self::Metadata) -> Self; - /// Build the genesis payload and metadata. - #[must_use] - fn genesis() -> (Self, Self::Metadata) - where - ::Instance: Default, - { - Self::from_transactions([], &Default::default()).unwrap() - } + /// Build the payload and metadata for genesis/null block. + fn empty() -> (Self, Self::Metadata); /// List of transaction commitments. fn transaction_commitments( @@ -121,7 +120,7 @@ pub trait BlockPayload: } /// extra functions required on block to be usable by hotshot-testing -pub trait TestableBlock: BlockPayload + Debug { +pub trait TestableBlock: BlockPayload + Debug { /// generate a genesis block fn genesis() -> Self; @@ -192,7 +191,7 @@ pub trait BlockHeader: parent_leaf: &Leaf, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, - metadata: ::Metadata, + metadata: >::Metadata, builder_fee: BuilderFee, vid_common: VidCommon, version: Version, @@ -203,7 +202,7 @@ pub trait BlockHeader: instance_state: &>::Instance, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, - metadata: ::Metadata, + metadata: >::Metadata, ) -> Self; /// Get the block number. @@ -213,7 +212,7 @@ pub trait BlockHeader: fn payload_commitment(&self) -> VidCommitment; /// Get the metadata. - fn metadata(&self) -> &::Metadata; + fn metadata(&self) -> &>::Metadata; /// Get the builder commitment fn builder_commitment(&self) -> BuilderCommitment; diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 280c8e00de..f4f1de3599 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -65,7 +65,7 @@ pub trait TestableNodeImplementation: NodeImplementation state: Option<&TYPES::ValidatedState>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> >::Transaction; /// Creates random transaction if possible /// otherwise panics @@ -74,7 +74,7 @@ pub trait TestableNodeImplementation: NodeImplementation leaf: &Leaf, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> >::Transaction; /// generate a genesis block fn block_genesis() -> TYPES::BlockPayload; @@ -96,7 +96,7 @@ pub trait TestableNodeImplementation: NodeImplementation impl> TestableNodeImplementation for I where TYPES::ValidatedState: TestableState, - TYPES::BlockPayload: TestableBlock, + TYPES::BlockPayload: TestableBlock, I::QuorumNetwork: TestableNetworkingImplementation, I::DaNetwork: TestableNetworkingImplementation, { @@ -104,7 +104,7 @@ where state: Option<&TYPES::ValidatedState>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> >::Transaction { >::create_random_transaction( state, rng, padding, ) @@ -114,16 +114,16 @@ where leaf: &Leaf, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction { + ) -> >::Transaction { Leaf::create_random_transaction(leaf, rng, padding) } fn block_genesis() -> TYPES::BlockPayload { - ::genesis() + >::genesis() } fn txn_count(block: &TYPES::BlockPayload) -> u64 { - ::txn_count(block) + >::txn_count(block) } fn gen_networks( @@ -201,7 +201,12 @@ pub trait NodeType: /// The block type that this hotshot setup is using. /// /// This should be the same block that `ValidatedState::BlockPayload` is using. - type BlockPayload: BlockPayload; + type BlockPayload: BlockPayload< + Self, + Instance = Self::InstanceState, + Transaction = Self::Transaction, + ValidatedState = Self::ValidatedState, + >; /// The signature key that this hotshot setup is using. type SignatureKey: SignatureKey; /// The transaction type that this hotshot setup is using. diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 392a9dbe16..d7974b8588 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -85,7 +85,7 @@ pub trait ValidatedState: pub trait TestableState: ValidatedState where TYPES: NodeType, - TYPES::BlockPayload: TestableBlock, + TYPES::BlockPayload: TestableBlock, { /// Creates random transaction if possible /// otherwise panics @@ -94,5 +94,5 @@ where state: Option<&Self>, rng: &mut dyn rand::RngCore, padding: u64, - ) -> ::Transaction; + ) -> >::Transaction; } From 7f5a97eba0a018fb048f2f11f2a00ba1120788f1 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 28 May 2024 19:17:59 -0400 Subject: [PATCH 1054/1393] Upgrade fixes (#3232) --- hotshot/src/tasks/task_state.rs | 1 + task-impls/src/consensus/helpers.rs | 142 ++++-------------- task-impls/src/consensus/view_change.rs | 9 +- task-impls/src/events.rs | 2 + .../src/quorum_proposal_recv/handlers.rs | 11 +- task-impls/src/quorum_proposal_recv/mod.rs | 3 +- task-impls/src/transactions.rs | 22 ++- task-impls/src/upgrade.rs | 5 +- testing/src/predicates/event.rs | 11 ++ testing/tests/tests_1/upgrade_task.rs | 7 +- types/src/simple_certificate.rs | 5 +- 11 files changed, 88 insertions(+), 130 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 1bfe5aa75c..a7017699bc 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -171,6 +171,7 @@ impl, Ver: StaticVersionType> instance_state: handle.hotshot.instance_state(), id: handle.hotshot.id, builder_client: BuilderClient::new(handle.hotshot.config.builder_url.clone()), + decided_upgrade_certificate: None, } } } diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 1afbe30a0d..ca3a806ceb 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -372,76 +372,6 @@ pub(crate) async fn parent_leaf_and_state( Ok((parent_leaf, Arc::clone(state))) } -/// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is a special -/// case proposal scenario. -#[allow(clippy::too_many_lines)] -#[allow(clippy::too_many_arguments)] -pub(crate) async fn publish_proposal_from_upgrade_cert( - cur_view: TYPES::Time, - view: TYPES::Time, - sender: Sender>>, - quorum_membership: Arc, - public_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: Arc>>, - upgrade_cert: UpgradeCertificate, - delay: u64, - instance_state: Arc, - version: Version, -) -> Result> { - let (parent_leaf, state) = parent_leaf_and_state( - cur_view, - view, - Arc::clone(&quorum_membership), - public_key.clone(), - Arc::clone(&consensus), - ) - .await?; - - let validated_state = consensus.read().await.decided_state(); - // Special case: if we have a decided upgrade certificate AND it does not apply a version to the current view, we MUST propose with a null block. - ensure!(upgrade_cert.in_interim(cur_view), "Cert is not in interim"); - let (payload, metadata) = >::from_transactions( - Vec::new(), - validated_state.as_ref(), - instance_state.as_ref(), - ) - .await - .context("Failed to build null block payload and metadata")?; - - let builder_commitment = payload.builder_commitment(&metadata); - let null_block_commitment = null_block::commitment(quorum_membership.total_nodes()) - .context("Failed to calculate null block commitment")?; - - let null_block_fee = null_block::builder_fee::(quorum_membership.total_nodes()) - .context("Failed to calculate null block fee info")?; - - Ok(async_spawn(async move { - create_and_send_proposal( - public_key, - private_key, - consensus, - sender, - view, - CommitmentAndMetadata { - commitment: null_block_commitment, - builder_commitment, - metadata, - fee: null_block_fee, - block_view: view, - }, - parent_leaf, - state, - Some(upgrade_cert), - None, - delay, - instance_state, - version, - ) - .await; - })) -} - /// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is the /// standard case proposal scenario. #[allow(clippy::too_many_arguments)] @@ -546,40 +476,23 @@ pub async fn publish_proposal_if_able( instance_state: Arc, version: Version, ) -> Result> { - if let Some(upgrade_cert) = decided_upgrade_cert { - publish_proposal_from_upgrade_cert( - cur_view, - view, - sender, - quorum_membership, - public_key, - private_key, - consensus, - upgrade_cert, - delay, - instance_state, - version, - ) - .await - } else { - publish_proposal_from_commitment_and_metadata( - cur_view, - view, - sender, - quorum_membership, - public_key, - private_key, - consensus, - delay, - formed_upgrade_certificate, - decided_upgrade_cert, - commitment_and_metadata, - proposal_cert, - instance_state, - version, - ) - .await - } + publish_proposal_from_commitment_and_metadata( + cur_view, + view, + sender, + quorum_membership, + public_key, + private_key, + consensus, + delay, + formed_upgrade_certificate, + decided_upgrade_cert, + commitment_and_metadata, + proposal_cert, + instance_state, + version, + ) + .await } // TODO: Fix `clippy::too_many_lines`. @@ -861,14 +774,17 @@ pub async fn handle_quorum_proposal_validated = if new_decide_reached { included_txns } else { @@ -1065,7 +987,7 @@ pub async fn update_state_and_vote_if_able { UpgradeVoteSend(UpgradeVote), /// Upgrade certificate has been sent to the network UpgradeCertificateFormed(UpgradeCertificate), + /// A HotShot upgrade was decided + UpgradeDecided(UpgradeCertificate), /// HotShot was upgraded, with a new network version. VersionUpgrade(Version), diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 593760ef82..3b488132ff 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -1,12 +1,11 @@ #![allow(dead_code)] -use anyhow::{bail, Context, Result}; -use async_lock::RwLockUpgradableReadGuard; -use committable::Committable; use std::sync::Arc; -use tracing::{debug, warn}; +use anyhow::{bail, Context, Result}; use async_broadcast::{broadcast, Sender}; +use async_lock::RwLockUpgradableReadGuard; +use committable::Committable; use hotshot_types::{ data::{Leaf, QuorumProposal}, message::Proposal, @@ -20,7 +19,9 @@ use hotshot_types::{ utils::{View, ViewInner}, vote::{Certificate, HasViewNumber}, }; +use tracing::{debug, warn}; +use super::QuorumProposalRecvTaskState; use crate::{ consensus::{ helpers::{validate_proposal_safety_and_liveness, validate_proposal_view_and_certs}, @@ -30,8 +31,6 @@ use crate::{ helpers::broadcast_event, }; -use super::QuorumProposalRecvTaskState; - /// Broadcast the proposal in the event that the parent state is not found for /// a given `proposal`, but it still passes the liveness check. Optionally return /// the inner [`QuorumProposal`] if the liveness check passes. diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 2518bd6bcc..efc3c08774 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -24,14 +24,13 @@ use tokio::task::JoinHandle; use tracing::{debug, error, instrument, warn}; use vbs::version::Version; +use self::handlers::handle_quorum_proposal_recv; use crate::{ consensus::helpers::parent_leaf_and_state, events::HotShotEvent, helpers::{broadcast_event, cancel_task}, }; -use self::handlers::handle_quorum_proposal_recv; - /// Event handlers for this task. mod handlers; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index ed7db18ad6..b74f4771ec 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -15,6 +15,7 @@ use hotshot_types::{ consensus::Consensus, data::{null_block, Leaf}, event::{Event, EventType}, + simple_certificate::UpgradeCertificate, traits::{ block_contents::{precompute_vid_commitment, BuilderFee, EncodeBytes}, consensus_api::ConsensusApi, @@ -80,6 +81,8 @@ pub struct TransactionTaskState< pub instance_state: Arc, /// This state's ID pub id: u64, + /// Decided upgrade certificate + pub decided_upgrade_certificate: Option>, } impl< @@ -108,6 +111,9 @@ impl< .await; return None; } + HotShotEvent::UpgradeDecided(cert) => { + self.decided_upgrade_certificate = Some(cert.clone()); + } HotShotEvent::ViewChange(view) => { let view = *view; debug!("view change in transactions to view {:?}", view); @@ -129,11 +135,24 @@ impl< } let block_view = if make_block { view } else { view + 1 }; + // Request a block from the builder unless we are between versions. + let block = { + if self + .decided_upgrade_certificate + .as_ref() + .is_some_and(|cert| cert.upgrading_in(block_view)) + { + None + } else { + self.wait_for_block().await + } + }; + if let Some(BuilderResponses { block_data, blocks_initial_info, block_header, - }) = self.wait_for_block().await + }) = block { broadcast_event( Arc::new(HotShotEvent::BlockRecv( @@ -400,6 +419,7 @@ impl< HotShotEvent::TransactionsRecv(_) | HotShotEvent::Shutdown | HotShotEvent::ViewChange(_) + | HotShotEvent::UpgradeDecided(_) ) } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index a3d7a1a72e..c72e9d842d 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -222,8 +222,9 @@ impl, A: ConsensusApi + old_version: Version { major: 0, minor: 1 }, new_version: Version { major: 1, minor: 0 }, new_version_hash: vec![1, 1, 0, 0, 1], - old_version_last_block: TYPES::Time::new(15), - new_version_first_block: TYPES::Time::new(18), + old_version_last_view: TYPES::Time::new(15), + new_version_first_view: TYPES::Time::new(18), + decide_by: TYPES::Time::new(12), }; let upgrade_proposal = UpgradeProposal { diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index 86ee569d27..e5a2c15d60 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -132,6 +132,17 @@ where Box::new(EventPredicate { check, info }) } +pub fn upgrade_decided() -> Box> +where + TYPES: NodeType, +{ + let info = "UpgradeDecided".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| matches!(e.as_ref(), UpgradeDecided(_))); + + Box::new(EventPredicate { check, info }) +} + pub fn quorum_vote_send() -> Box> where TYPES: NodeType, diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 229868c279..94b88b0177 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -145,6 +145,7 @@ async fn test_consensus_task_upgrade() { outputs: vec![ exact(ViewChange(ViewNumber::new(5))), quorum_proposal_validated(), + upgrade_decided(), leaf_decided(), ], asserts: vec![decided_upgrade_cert()], @@ -391,8 +392,9 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { views.push(view.clone()); } - // We set the transactions to something not null for view 6, but we expect the node to emit a quorum proposal where they are still null. - generator.add_transactions(vec![TestTransaction::new(vec![0])]); + // The transactions task generates an empty transaction set in this view, + // because we are proposing between versions. + generator.add_transactions(vec![]); for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); @@ -537,6 +539,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { output_asserts: vec![ exact(ViewChange(ViewNumber::new(4))), quorum_proposal_validated(), + upgrade_decided(), leaf_decided(), quorum_vote_send(), ], diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 2a9c48126f..b03aae3629 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -189,8 +189,9 @@ impl UpgradeCertificate { } } - /// Test whether a view is in the interim period prior to the new version taking effect. - pub fn in_interim(&self, view: TYPES::Time) -> bool { + /// Given an upgrade certificate and a view, tests whether the view is in the period + /// where we are upgrading, which requires that we propose with null blocks. + pub fn upgrading_in(&self, view: TYPES::Time) -> bool { view > self.data.old_version_last_view && view < self.data.new_version_first_view } } From 5bbca392beb2ec4bef83e05761d4ef1a091056f8 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 29 May 2024 13:08:37 -0400 Subject: [PATCH 1055/1393] General task architecture refactor (#3133) --- hotshot/src/lib.rs | 165 +----- hotshot/src/tasks/mod.rs | 216 ++------ hotshot/src/tasks/task_state.rs | 32 +- .../src/traits/networking/push_cdn_network.rs | 4 +- hotshot/src/types/handle.rs | 60 ++- macros/src/lib.rs | 67 +-- task-impls/src/consensus/mod.rs | 67 ++- task-impls/src/consensus2/handlers.rs | 4 +- task-impls/src/consensus2/mod.rs | 44 +- task-impls/src/da.rs | 75 +-- task-impls/src/events.rs | 7 + task-impls/src/harness.rs | 65 +-- task-impls/src/network.rs | 67 +-- task-impls/src/quorum_proposal/mod.rs | 55 +- task-impls/src/quorum_proposal_recv/mod.rs | 45 +- task-impls/src/quorum_vote.rs | 49 +- task-impls/src/request.rs | 96 ++-- task-impls/src/transactions.rs | 79 ++- task-impls/src/upgrade.rs | 72 +-- task-impls/src/vid.rs | 43 +- task-impls/src/view_sync.rs | 127 ++--- task-impls/src/vote_collection.rs | 49 +- task/Cargo.toml | 6 +- task/src/task.rs | 487 ++++-------------- testing/Cargo.toml | 1 + testing/src/completion_task.rs | 24 +- testing/src/lib.rs | 10 +- testing/src/overall_safety_task.rs | 203 ++++---- testing/src/predicates/event.rs | 21 + testing/src/script.rs | 41 +- testing/src/spinning_task.rs | 90 ++-- testing/src/test_runner.rs | 224 ++++---- testing/src/test_task.rs | 137 +++++ testing/src/txn_task.rs | 29 +- testing/src/view_sync_task.rs | 69 +-- testing/tests/tests_1/consensus_task.rs | 9 +- testing/tests/tests_1/da_task.rs | 8 +- testing/tests/tests_1/network_task.rs | 25 +- testing/tests/tests_1/proposal_ordering.rs | 4 +- testing/tests/tests_1/quorum_proposal_task.rs | 3 +- testing/tests/tests_1/upgrade_task.rs | 54 +- testing/tests/tests_1/vid_task.rs | 2 +- testing/tests/tests_1/view_sync_task.rs | 9 +- testing/tests/tests_2/catchup.rs | 1 + types/src/traits/network.rs | 1 - 45 files changed, 1173 insertions(+), 1773 deletions(-) create mode 100644 testing/src/test_task.rs diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 43fee91d06..49ed7030cf 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -26,14 +26,16 @@ use async_lock::RwLock; use async_trait::async_trait; use committable::Committable; use futures::join; -use hotshot_task::task::TaskRegistry; +use hotshot_task::task::{ConsensusTaskRegistry, NetworkTaskRegistry}; use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event, network}; // Internal /// Reexport error type pub use hotshot_types::error::HotShotError; use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, View, ViewInner}, - constants::{BASE_VERSION, EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE, STATIC_VER_0_1}, + constants::{ + Version01, BASE_VERSION, EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE, STATIC_VER_0_1, + }, data::Leaf, event::{EventType, LeafInfo}, message::{DataMessage, Message, MessageKind}, @@ -53,22 +55,12 @@ use hotshot_types::{ // External /// Reexport rand crate pub use rand; -use tasks::{add_request_network_task, add_response_task, add_vid_task}; +use tasks::{add_request_network_task, add_response_task}; use tracing::{debug, instrument, trace}; use vbs::version::Version; -#[cfg(not(feature = "dependency-tasks"))] -use crate::tasks::add_consensus_task; -#[cfg(feature = "dependency-tasks")] -use crate::tasks::{ - add_consensus2_task, add_quorum_proposal_recv_task, add_quorum_proposal_task, - add_quorum_vote_task, -}; use crate::{ - tasks::{ - add_da_task, add_network_event_task, add_network_message_task, add_transaction_task, - add_upgrade_task, add_view_sync_task, - }, + tasks::{add_consensus_tasks, add_network_event_task, add_network_message_task}, traits::NodeImplementation, types::{Event, SystemContextHandle}, }; @@ -561,8 +553,8 @@ impl> SystemContext { /// For a list of which tasks are being spawned, see this module's documentation. #[allow(clippy::too_many_lines)] pub async fn run_tasks(&self) -> SystemContextHandle { - // ED Need to set first first number to 1, or properly trigger the change upon start - let registry = Arc::new(TaskRegistry::default()); + let consensus_registry = ConsensusTaskRegistry::new(); + let network_registry = NetworkTaskRegistry::new(); let output_event_stream = self.external_event_stream.clone(); let internal_event_stream = self.internal_event_stream.clone(); @@ -574,171 +566,60 @@ impl> SystemContext { let vid_membership = self.memberships.vid_membership.clone(); let view_sync_membership = self.memberships.view_sync_membership.clone(); - let (event_tx, event_rx) = internal_event_stream.clone(); - - let handle = SystemContextHandle { - registry: Arc::clone(®istry), + let mut handle = SystemContextHandle { + consensus_registry, + network_registry, output_event_stream: output_event_stream.clone(), internal_event_stream: internal_event_stream.clone(), hotshot: self.clone().into(), storage: Arc::clone(&self.storage), }; - add_network_message_task( - Arc::clone(®istry), - event_tx.clone(), - Arc::clone(&quorum_network), - ) - .await; - add_network_message_task( - Arc::clone(®istry), - event_tx.clone(), - Arc::clone(&da_network), - ) - .await; + add_network_message_task(&mut handle, Arc::clone(&quorum_network)).await; + add_network_message_task(&mut handle, Arc::clone(&da_network)).await; - if let Some(request_rx) = da_network.spawn_request_receiver_task(STATIC_VER_0_1).await { - add_response_task( - Arc::clone(®istry), - event_rx.activate_cloned(), - request_rx, - &handle, - ) - .await; - add_request_network_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, - ) - .await; + if let Some(request_receiver) = da_network.spawn_request_receiver_task(STATIC_VER_0_1).await + { + add_response_task(&mut handle, request_receiver).await; + add_request_network_task(&mut handle).await; } add_network_event_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), + &mut handle, Arc::clone(&quorum_network), quorum_membership.clone(), network::quorum_filter, - Arc::clone(&handle.storage()), ) .await; add_network_event_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), + &mut handle, Arc::clone(&quorum_network), quorum_membership, network::upgrade_filter, - Arc::clone(&handle.storage()), ) .await; add_network_event_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), + &mut handle, Arc::clone(&da_network), da_membership, network::da_filter, - Arc::clone(&handle.storage()), ) .await; add_network_event_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), + &mut handle, Arc::clone(&quorum_network), view_sync_membership, network::view_sync_filter, - Arc::clone(&handle.storage()), ) .await; add_network_event_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), + &mut handle, Arc::clone(&quorum_network), vid_membership, network::vid_filter, - Arc::clone(&handle.storage()), - ) - .await; - #[cfg(not(feature = "dependency-tasks"))] - add_consensus_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, - ) - .await; - add_da_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, - ) - .await; - add_vid_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, - ) - .await; - add_transaction_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, - ) - .await; - add_view_sync_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, - ) - .await; - add_upgrade_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, - ) - .await; - #[cfg(feature = "dependency-tasks")] - add_quorum_proposal_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, - ) - .await; - #[cfg(feature = "dependency-tasks")] - add_quorum_vote_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, - ) - .await; - #[cfg(feature = "dependency-tasks")] - add_quorum_proposal_recv_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, - ) - .await; - #[cfg(feature = "dependency-tasks")] - add_consensus2_task( - Arc::clone(®istry), - event_tx.clone(), - event_rx.activate_cloned(), - &handle, ) .await; + add_consensus_tasks::(&mut handle).await; handle } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 71cf00a5ec..7e50849795 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -5,19 +5,13 @@ pub mod task_state; use std::{sync::Arc, time::Duration}; -use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use async_lock::RwLock; -use hotshot_task::task::{Task, TaskRegistry}; +use hotshot_task::task::Task; use hotshot_task_impls::{ consensus::ConsensusTaskState, - consensus2::Consensus2TaskState, da::DaTaskState, events::HotShotEvent, network::{NetworkEventTaskState, NetworkMessageTaskState}, - quorum_proposal::QuorumProposalTaskState, - quorum_proposal_recv::QuorumProposalRecvTaskState, - quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, response::{run_response_task, NetworkResponseState, RequestReceiver}, transactions::TransactionTaskState, @@ -31,10 +25,9 @@ use hotshot_types::{ traits::{ network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, - storage::Storage, }, }; -use tracing::error; +use vbs::version::StaticVersionType; use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; @@ -49,57 +42,59 @@ pub enum GlobalEvent { /// Add tasks for network requests and responses pub async fn add_request_network_task>( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, - handle: &SystemContextHandle, + handle: &mut SystemContextHandle, ) { let state = NetworkRequestState::::create_from(handle).await; - let task = Task::new(tx, rx, Arc::clone(&task_reg), state); - task_reg.run_task(task).await; + let task = Task::new( + state, + handle.internal_event_stream.0.clone(), + handle.internal_event_stream.1.activate_cloned(), + ); + handle.consensus_registry.run_task(task); } /// Add a task which responds to requests on the network. pub async fn add_response_task>( - task_reg: Arc, - hs_rx: Receiver>>, - rx: RequestReceiver, - handle: &SystemContextHandle, + handle: &mut SystemContextHandle, + request_receiver: RequestReceiver, ) { let state = NetworkResponseState::::new( handle.hotshot.consensus(), - rx, + request_receiver, handle.hotshot.memberships.quorum_membership.clone().into(), handle.public_key().clone(), handle.private_key().clone(), ); - task_reg - .register(run_response_task::(state, hs_rx)) - .await; + handle + .network_registry + .register(run_response_task::( + state, + handle.internal_event_stream.1.activate_cloned(), + )); } /// Add the network task to handle messages and publish events. pub async fn add_network_message_task< TYPES: NodeType, + I: NodeImplementation, NET: ConnectedNetwork, TYPES::SignatureKey>, >( - task_reg: Arc, - event_stream: Sender>>, + handle: &mut SystemContextHandle, channel: Arc, ) { let net = Arc::clone(&channel); let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { - event_stream: event_stream.clone(), + event_stream: handle.internal_event_stream.0.clone(), }; let network = Arc::clone(&net); let mut state = network_state.clone(); - let handle = async_spawn(async move { + let task_handle = async_spawn(async move { loop { let msgs = match network.recv_msgs().await { Ok(msgs) => Messages(msgs), Err(err) => { - error!("failed to receive messages: {err}"); + tracing::error!("failed to receive messages: {err}"); // return zero messages so we sleep and try again Messages(vec![]) @@ -113,21 +108,18 @@ pub async fn add_network_message_task< } } }); - task_reg.register(handle).await; + handle.network_registry.register(task_handle); } /// Add the network task to handle events and send messages. pub async fn add_network_event_task< TYPES: NodeType, + I: NodeImplementation, NET: ConnectedNetwork, TYPES::SignatureKey>, - S: Storage + 'static, >( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, + handle: &mut SystemContextHandle, channel: Arc, membership: TYPES::Membership, filter: fn(&Arc>) -> bool, - storage: Arc>, ) { let network_state: NetworkEventTaskState<_, _, _> = NetworkEventTaskState { channel, @@ -135,138 +127,38 @@ pub async fn add_network_event_task< version: VERSION_0_1, membership, filter, - storage, + storage: Arc::clone(&handle.storage()), }; - let task = Task::new(tx, rx, Arc::clone(&task_reg), network_state); - task_reg.run_task(task).await; -} - -/// add the consensus task -pub async fn add_consensus_task>( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, - handle: &SystemContextHandle, -) { - let consensus_state = ConsensusTaskState::create_from(handle).await; - - let task = Task::new(tx, rx, Arc::clone(&task_reg), consensus_state); - task_reg.run_task(task).await; -} - -/// add the VID task -pub async fn add_vid_task>( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, - handle: &SystemContextHandle, -) { - let vid_state = VidTaskState::create_from(handle).await; - let task = Task::new(tx, rx, Arc::clone(&task_reg), vid_state); - task_reg.run_task(task).await; -} - -/// add the Upgrade task. -pub async fn add_upgrade_task>( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, - handle: &SystemContextHandle, -) { - let upgrade_state = UpgradeTaskState::create_from(handle).await; - - let task = Task::new(tx, rx, Arc::clone(&task_reg), upgrade_state); - task_reg.run_task(task).await; -} -/// add the Data Availability task -pub async fn add_da_task>( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, - handle: &SystemContextHandle, -) { - // build the da task - let da_state = DaTaskState::create_from(handle).await; - - let task = Task::new(tx, rx, Arc::clone(&task_reg), da_state); - task_reg.run_task(task).await; -} - -/// add the Transaction Handling task -pub async fn add_transaction_task>( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, - handle: &SystemContextHandle, -) { - let transactions_state = TransactionTaskState::<_, _, _, Version01>::create_from(handle).await; - - let task = Task::new(tx, rx, Arc::clone(&task_reg), transactions_state); - task_reg.run_task(task).await; -} - -/// add the view sync task -pub async fn add_view_sync_task>( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, - handle: &SystemContextHandle, -) { - let view_sync_state = ViewSyncTaskState::create_from(handle).await; - - let task = Task::new(tx, rx, Arc::clone(&task_reg), view_sync_state); - task_reg.run_task(task).await; -} - -/// add the quorum proposal task -pub async fn add_quorum_proposal_task>( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, - handle: &SystemContextHandle, -) { - let quorum_proposal_task_state = QuorumProposalTaskState::create_from(handle).await; - let task = Task::new(tx, rx, Arc::clone(&task_reg), quorum_proposal_task_state); - task_reg.run_task(task).await; -} - -/// Add the quorum vote task. -pub async fn add_quorum_vote_task>( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, - handle: &SystemContextHandle, -) { - let quorum_vote_task_state = QuorumVoteTaskState::create_from(handle).await; - let task = Task::new(tx, rx, Arc::clone(&task_reg), quorum_vote_task_state); - task_reg.run_task(task).await; -} - -/// Add the quorum proposal recv task. -pub async fn add_quorum_proposal_recv_task>( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, - handle: &SystemContextHandle, -) { - let quorum_proposal_recv_task_state = QuorumProposalRecvTaskState::create_from(handle).await; let task = Task::new( - tx, - rx, - Arc::clone(&task_reg), - quorum_proposal_recv_task_state, + network_state, + handle.internal_event_stream.0.clone(), + handle.internal_event_stream.1.activate_cloned(), ); - task_reg.run_task(task).await; + handle.consensus_registry.run_task(task); } -/// Add the Consensus2 task. -pub async fn add_consensus2_task>( - task_reg: Arc, - tx: Sender>>, - rx: Receiver>>, - handle: &SystemContextHandle, +/// Adds consensus-related tasks to a `SystemContextHandle`. +pub async fn add_consensus_tasks< + TYPES: NodeType, + I: NodeImplementation, + VERSION: StaticVersionType + 'static, +>( + handle: &mut SystemContextHandle, ) { - let consensus2_task_state = Consensus2TaskState::create_from(handle).await; - let task = Task::new(tx, rx, Arc::clone(&task_reg), consensus2_task_state); - task_reg.run_task(task).await; + handle.add_task(ViewSyncTaskState::::create_from(handle).await); + handle.add_task(VidTaskState::::create_from(handle).await); + handle.add_task(DaTaskState::::create_from(handle).await); + handle.add_task(TransactionTaskState::::create_from(handle).await); + handle.add_task(UpgradeTaskState::::create_from(handle).await); + { + #![cfg(not(feature = "dependency-tasks"))] + handle.add_task(ConsensusTaskState::::create_from(handle).await); + } + { + #![cfg(feature = "dependency-tasks")] + handle.add_task(QuorumProposalTaskState::::create_from(handle).await); + handle.add_task(QuorumVoteTaskState::::create_from(handle).await); + handle.add_task(QuorumProposalRecvTaskState::::create_from(handle).await); + handle.add_task(Consensus2TaskState::::create_from(handle).await); + } } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index a7017699bc..a07887d297 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -50,19 +50,18 @@ impl, V: StaticVersionType> Create _phantom: PhantomData, id: handle.hotshot.id, shutdown_flag: Arc::new(AtomicBool::new(false)), + spawned_tasks: BTreeMap::new(), } } } #[async_trait] impl> CreateTaskState - for UpgradeTaskState> + for UpgradeTaskState { - async fn create_from( - handle: &SystemContextHandle, - ) -> UpgradeTaskState> { + async fn create_from(handle: &SystemContextHandle) -> UpgradeTaskState { UpgradeTaskState { - api: handle.clone(), + output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), @@ -98,14 +97,12 @@ impl> CreateTaskState #[async_trait] impl> CreateTaskState - for DaTaskState> + for DaTaskState { - async fn create_from( - handle: &SystemContextHandle, - ) -> DaTaskState> { + async fn create_from(handle: &SystemContextHandle) -> DaTaskState { DaTaskState { - api: handle.clone(), consensus: handle.hotshot.consensus(), + output_event_stream: handle.hotshot.external_event_stream.0.clone(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), da_network: Arc::clone(&handle.hotshot.networks.da_network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), @@ -121,11 +118,9 @@ impl> CreateTaskState #[async_trait] impl> CreateTaskState - for ViewSyncTaskState> + for ViewSyncTaskState { - async fn create_from( - handle: &SystemContextHandle, - ) -> ViewSyncTaskState> { + async fn create_from(handle: &SystemContextHandle) -> ViewSyncTaskState { let cur_view = handle.cur_view().await; ViewSyncTaskState { current_view: cur_view, @@ -139,7 +134,6 @@ impl> CreateTaskState .into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), - api: handle.clone(), num_timeouts_tracked: 0, replica_task_map: HashMap::default().into(), pre_commit_relay_map: HashMap::default().into(), @@ -154,14 +148,14 @@ impl> CreateTaskState #[async_trait] impl, Ver: StaticVersionType> - CreateTaskState - for TransactionTaskState, Ver> + CreateTaskState for TransactionTaskState { async fn create_from( handle: &SystemContextHandle, - ) -> TransactionTaskState, Ver> { + ) -> TransactionTaskState { TransactionTaskState { - api: handle.clone(), + builder_timeout: handle.builder_timeout(), + output_event_stream: handle.hotshot.external_event_stream.0.clone(), consensus: handle.hotshot.consensus(), cur_view: handle.cur_view().await, network: Arc::clone(&handle.hotshot.networks.quorum_network), diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 0c424c83b9..11e020d224 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -4,9 +4,9 @@ use std::{collections::BTreeSet, marker::PhantomData}; #[cfg(feature = "hotshot-testing")] use std::{path::Path, sync::Arc, time::Duration}; +use async_compatibility_layer::channel::UnboundedSendError; #[cfg(feature = "hotshot-testing")] -use async_compatibility_layer::art::async_spawn; -use async_compatibility_layer::{art::async_sleep, channel::UnboundedSendError}; +use async_compatibility_layer::{art::async_sleep, art::async_spawn}; use async_trait::async_trait; use bincode::config::Options; use cdn_broker::reexports::{ diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 38cd3d8a5c..e092238282 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -8,15 +8,13 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use futures::Stream; -use hotshot_task::task::TaskRegistry; +use hotshot_task::task::{ConsensusTaskRegistry, NetworkTaskRegistry, Task, TaskState}; use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; use hotshot_types::{ - boxed_sync, consensus::Consensus, data::Leaf, error::HotShotError, traits::{election::Membership, node_implementation::NodeType}, - BoxSyncFuture, }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; @@ -28,7 +26,6 @@ use crate::{traits::NodeImplementation, types::Event, SystemContext}; /// This type provides the means to message and interact with a background [`SystemContext`] instance, /// allowing the ability to receive [`Event`]s from it, send transactions to it, and interact with /// the underlying storage. -#[derive(Clone)] pub struct SystemContextHandle> { /// The [sender](Sender) and [receiver](Receiver), /// to allow the application to communicate with HotShot. @@ -40,8 +37,11 @@ pub struct SystemContextHandle> { Sender>>, InactiveReceiver>>, ), - /// registry for controlling tasks - pub(crate) registry: Arc, + /// registry for controlling consensus tasks + pub(crate) consensus_registry: ConsensusTaskRegistry>, + + /// registry for controlling network tasks + pub(crate) network_registry: NetworkTaskRegistry, /// Internal reference to the underlying [`SystemContext`] pub hotshot: Arc>, @@ -51,6 +51,17 @@ pub struct SystemContextHandle> { } impl + 'static> SystemContextHandle { + /// Adds a hotshot consensus-related task to the `SystemContextHandle`. + pub fn add_task> + 'static>(&mut self, task_state: S) { + let task = Task::new( + task_state, + self.internal_event_stream.0.clone(), + self.internal_event_stream.1.activate_cloned(), + ); + + self.consensus_registry.run_task(task); + } + /// obtains a stream to expose to the user pub fn event_stream(&self) -> impl Stream> { self.output_event_stream.1.activate_cloned() @@ -140,25 +151,24 @@ impl + 'static> SystemContextHandl } /// Shut down the the inner hotshot and wait until all background threads are closed. - // pub async fn shut_down(mut self) { - // self.registry.shutdown_all().await - pub fn shut_down<'a, 'b>(&'a mut self) -> BoxSyncFuture<'b, ()> - where - 'a: 'b, - Self: 'b, - { - boxed_sync(async move { - self.hotshot.networks.shut_down_networks().await; - // this is required because `SystemContextHandle` holds an inactive receiver and - // `broadcast_direct` below can wait indefinitely - self.internal_event_stream.0.set_await_active(false); - let _ = self - .internal_event_stream - .0 - .broadcast_direct(Arc::new(HotShotEvent::Shutdown)) - .await; - self.registry.shutdown().await; - }) + pub async fn shut_down(&mut self) { + // this is required because `SystemContextHandle` holds an inactive receiver and + // `broadcast_direct` below can wait indefinitely + self.internal_event_stream.0.set_await_active(false); + let _ = self + .internal_event_stream + .0 + .broadcast_direct(Arc::new(HotShotEvent::Shutdown)) + .await + .inspect_err(|err| tracing::error!("Failed to send shutdown event: {err}")); + tracing::error!("Shutting down network tasks!"); + self.network_registry.shutdown().await; + + tracing::error!("Shutting down networks!"); + self.hotshot.networks.shut_down_networks().await; + + tracing::error!("Shutting down consensus!"); + self.consensus_registry.shutdown().await; } /// return the timeout for a view of the underlying `SystemContext` diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 06b9c0dcae..b866a7ed02 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -294,11 +294,6 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { .map(|i| format_ident!("{}_output_index", quote::quote!(#i).to_string())) .collect(); - let task_names: Vec<_> = scripts - .iter() - .map(|i| format_ident!("{}_task", quote::quote!(#i).to_string())) - .collect(); - let task_expectations: Vec<_> = scripts .iter() .map(|i| format_ident!("{}_expectations", quote::quote!(#i).to_string())) @@ -316,25 +311,20 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { validate_task_state_or_panic_in_script, }; - use hotshot_testing::{predicates::Predicate, script::RECV_TIMEOUT}; + use hotshot_testing::{predicates::Predicate}; use async_broadcast::broadcast; use hotshot_task_impls::events::HotShotEvent; use async_compatibility_layer::art::async_timeout; - use hotshot_task::task::{Task, TaskRegistry, TaskState}; + use hotshot_task::task::{Task, TaskState}; use hotshot_types::traits::node_implementation::NodeType; use std::sync::Arc; - let registry = Arc::new(TaskRegistry::default()); - - let (test_input, task_receiver) = broadcast(1024); - // let (task_input, mut test_receiver) = broadcast(1024); + async { - let task_input = test_input.clone(); - let mut test_receiver = task_receiver.clone(); + let (to_task, mut from_test) = broadcast(1024); + let (to_test, mut from_task) = broadcast(1024); - let mut loop_receiver = task_receiver.clone(); - - #(let mut #task_names = Task::new(task_input.clone(), task_receiver.clone(), registry.clone(), #scripts.state);)* + let mut loop_receiver = from_task.clone(); #(let mut #task_expectations = #scripts.expectations;)* @@ -346,20 +336,28 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { for input in &input_group { #( - if !#task_names.state().filter(&input.clone().into()) { tracing::debug!("Test sent: {:?}", input); - if let Some(res) = #task_names.handle_event(input.clone().into()).await { - #task_names.state().handle_result(&res).await; - } + to_task + .broadcast(input.clone().into()) + .await + .expect("Failed to broadcast input message"); + + + let _ = #scripts.state + .handle_event(input.clone().into(), &to_test, &from_test) + .await + .inspect_err(|e| tracing::info!("{e}")); - while let Ok(Ok(received_output)) = async_timeout(Duration::from_millis(35), test_receiver.recv_direct()).await { + while from_test.try_recv().is_ok() {} + + while let Ok(Ok(received_output)) = async_timeout(#scripts.timeout, from_task.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); let output_asserts = &mut #task_expectations[stage_number].output_asserts; if #output_index_names >= output_asserts.len() { - panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); + panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); }; let assert = &mut output_asserts[#output_index_names]; @@ -368,26 +366,32 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { #output_index_names += 1; } - } )* } while let Ok(input) = loop_receiver.try_recv() { #( - if !#task_names.state().filter(&input) { tracing::debug!("Test sent: {:?}", input); - if let Some(res) = #task_names.handle_event(input.clone()).await { - #task_names.state().handle_result(&res).await; - } + to_task + .broadcast(input.clone().into()) + .await + .expect("Failed to broadcast input message"); + + let _ = #scripts.state + .handle_event(input.clone().into(), &to_test, &from_test) + .await + .inspect_err(|e| tracing::info!("{e}")); - while let Ok(Ok(received_output)) = async_timeout(RECV_TIMEOUT, test_receiver.recv_direct()).await { + while from_test.try_recv().is_ok() {} + + while let Ok(Ok(received_output)) = async_timeout(#scripts.timeout, from_task.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); let output_asserts = &mut #task_expectations[stage_number].output_asserts; if #output_index_names >= output_asserts.len() { - panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); + panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); }; let mut assert = &mut output_asserts[#output_index_names]; @@ -396,7 +400,6 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { #output_index_names += 1; } - } )* } @@ -410,11 +413,13 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { let task_state_asserts = &mut #task_expectations[stage_number].task_state_asserts; for assert in task_state_asserts { - validate_task_state_or_panic_in_script(stage_number, #script_names.to_string(), #task_names.state(), &**assert).await; + validate_task_state_or_panic_in_script(stage_number, #script_names.to_string(), &#scripts.state, &**assert).await; } )* } } + } + }; expanded.into() diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 92695a59d3..77f5728260 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -2,14 +2,15 @@ use std::{collections::BTreeMap, sync::Arc}; #[cfg(not(feature = "dependency-tasks"))] use anyhow::Result; -use async_broadcast::Sender; +use async_broadcast::{Receiver, Sender}; #[cfg(not(feature = "dependency-tasks"))] use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; +use async_trait::async_trait; use futures::future::join_all; -use hotshot_task::task::{Task, TaskState}; +use hotshot_task::task::TaskState; #[cfg(not(feature = "dependency-tasks"))] use hotshot_types::data::VidDisperseShare; #[cfg(not(feature = "dependency-tasks"))] @@ -347,7 +348,7 @@ impl> ConsensusTaskState let result = collector .as_mut() .unwrap() - .handle_event(Arc::clone(&event), &event_stream) + .handle_vote_event(Arc::clone(&event), &event_stream) .await; if result == Some(HotShotTaskCompleted) { @@ -386,7 +387,7 @@ impl> ConsensusTaskState let result = collector .as_mut() .unwrap() - .handle_event(Arc::clone(&event), &event_stream) + .handle_vote_event(Arc::clone(&event), &event_stream) .await; if result == Some(HotShotTaskCompleted) { @@ -729,39 +730,33 @@ impl> ConsensusTaskState } } +#[async_trait] impl> TaskState for ConsensusTaskState { - type Event = Arc>; - type Output = (); - fn filter(&self, event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::QuorumProposalRecv(_, _) - | HotShotEvent::QuorumVoteRecv(_) - | HotShotEvent::QuorumProposalValidated(..) - | HotShotEvent::QcFormed(_) - | HotShotEvent::UpgradeCertificateFormed(_) - | HotShotEvent::DaCertificateRecv(_) - | HotShotEvent::ViewChange(_) - | HotShotEvent::SendPayloadCommitmentAndMetadata(..) - | HotShotEvent::Timeout(_) - | HotShotEvent::TimeoutVoteRecv(_) - | HotShotEvent::VidShareRecv(..) - | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - | HotShotEvent::QuorumVoteSend(_) - | HotShotEvent::QuorumProposalSend(_, _) - | HotShotEvent::Shutdown, - ) - } - async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> - where - Self: Sized, - { - let sender = task.clone_sender(); - tracing::trace!("sender queue len {}", sender.len()); - task.state_mut().handle(event, sender).await; - None + type Event = HotShotEvent; + + async fn handle_event( + &mut self, + event: Arc, + sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + self.handle(event, sender.clone()).await; + + Ok(()) } - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) + + async fn cancel_subtasks(&mut self) { + while !self.spawned_tasks.is_empty() { + let Some((_, handles)) = self.spawned_tasks.pop_first() else { + break; + }; + + for handle in handles { + #[cfg(async_executor_impl = "async-std")] + handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + handle.abort(); + } + } } } diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index f210af96db..ee8c8db214 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -58,7 +58,7 @@ pub(crate) async fn handle_quorum_vote_recv> Consensus2TaskState> TaskState for Consensus2TaskState { - type Event = Arc>; - type Output = (); - - fn filter(&self, event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::QuorumVoteRecv(_) - | HotShotEvent::TimeoutVoteRecv(_) - | HotShotEvent::ViewChange(_) - | HotShotEvent::Timeout(_) - | HotShotEvent::LastDecidedViewUpdated(_) - | HotShotEvent::Shutdown - ) - } - async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> - where - Self: Sized, - { - let sender = task.clone_sender(); - task.state_mut().handle(event, sender).await; - None - } + type Event = HotShotEvent; - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) + async fn handle_event( + &mut self, + event: Arc, + sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + self.handle(event, sender.clone()).await; + + Ok(()) } + + /// Joins all subtasks. + async fn cancel_subtasks(&mut self) {} } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index ff2b238173..1c4e596406 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -1,11 +1,13 @@ use std::{marker::PhantomData, sync::Arc}; -use async_broadcast::Sender; +use anyhow::Result; +use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; -use hotshot_task::task::{Task, TaskState}; +use async_trait::async_trait; +use hotshot_task::task::TaskState; use hotshot_types::{ consensus::{Consensus, View}, data::DaProposal, @@ -15,7 +17,6 @@ use hotshot_types::{ simple_vote::{DaData, DaVote}, traits::{ block_contents::vid_commitment, - consensus_api::ConsensusApi, election::Membership, network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -42,13 +43,9 @@ use crate::{ type VoteCollectorOption = Option>; /// Tracks state of a DA task -pub struct DaTaskState< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi + 'static, -> { - /// The state's api - pub api: A, +pub struct DaTaskState> { + /// Output events to application + pub output_event_stream: async_broadcast::Sender>, /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -83,9 +80,7 @@ pub struct DaTaskState< pub storage: Arc>, } -impl, A: ConsensusApi + 'static> - DaTaskState -{ +impl> DaTaskState { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] pub async fn handle( @@ -152,15 +147,17 @@ impl, A: ConsensusApi + return None; } // Proposal is fresh and valid, notify the application layer - self.api - .send_event(Event { + broadcast_event( + Event { view_number: self.cur_view, event: EventType::DaProposal { proposal: proposal.clone(), sender: sender.clone(), }, - }) - .await; + }, + &self.output_event_stream, + ) + .await; if !self.da_membership.has_stake(&self.public_key) { debug!( @@ -262,7 +259,7 @@ impl, A: ConsensusApi + let result = collector .as_mut() .unwrap() - .handle_event(Arc::clone(&event), &event_stream) + .handle_vote_event(Arc::clone(&event), &event_stream) .await; if result == Some(HotShotTaskCompleted) { @@ -332,43 +329,27 @@ impl, A: ConsensusApi + error!("Shutting down because of shutdown signal!"); return Some(HotShotTaskCompleted); } - _ => { - error!("unexpected event {:?}", event); - } + _ => {} } None } } +#[async_trait] /// task state implementation for DA Task -impl, A: ConsensusApi + 'static> TaskState - for DaTaskState -{ - type Event = Arc>; - - type Output = HotShotTaskCompleted; - - fn filter(&self, event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::DaProposalRecv(_, _) - | HotShotEvent::DaVoteRecv(_) - | HotShotEvent::Shutdown - | HotShotEvent::BlockRecv(_, _, _, _, _) - | HotShotEvent::ViewChange(_) - | HotShotEvent::DaProposalValidated(_, _) - ) - } +impl> TaskState for DaTaskState { + type Event = HotShotEvent; async fn handle_event( - event: Self::Event, - task: &mut Task, - ) -> Option { - let sender = task.clone_sender(); - task.state_mut().handle(event, sender).await - } + &mut self, + event: Arc, + sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + self.handle(event, sender.clone()).await; - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) + Ok(()) } + + async fn cancel_subtasks(&mut self) {} } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index e3a116f00b..e1b4127ed0 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use either::Either; +use hotshot_task::task::TaskEvent; use hotshot_types::{ data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse, VidDisperseShare}, message::Proposal, @@ -21,6 +22,12 @@ use vbs::version::Version; use crate::view_sync::ViewSyncPhase; +impl TaskEvent for HotShotEvent { + fn shutdown_event() -> Self { + HotShotEvent::Shutdown + } +} + /// Marker that the task completed #[derive(Eq, PartialEq, Debug, Clone)] pub struct HotShotTaskCompleted; diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 01a944543b..8c2f732587 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::broadcast; use async_compatibility_layer::art::async_timeout; -use hotshot_task::task::{Task, TaskRegistry, TaskState}; +use hotshot_task::task::{ConsensusTaskRegistry, Task, TaskState}; use hotshot_types::traits::node_implementation::NodeType; use crate::events::{HotShotEvent, HotShotTaskCompleted}; @@ -15,23 +15,6 @@ pub struct TestHarnessState { allow_extra_output: bool, } -impl TaskState for TestHarnessState { - type Event = Arc>; - type Output = HotShotTaskCompleted; - - async fn handle_event( - event: Self::Event, - task: &mut Task, - ) -> Option { - let extra = task.state_mut().allow_extra_output; - handle_event(event, task, extra) - } - - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) - } -} - /// Runs a test by building the task using `build_fn` and then passing it the `input` events /// and testing the make sure all of the `expected_output` events are seen /// @@ -44,7 +27,7 @@ impl TaskState for TestHarnessState { /// Panics if any state the test expects is not set. Panicking causes a test failure #[allow(clippy::implicit_hasher)] #[allow(clippy::panic)] -pub async fn run_harness>> + Send + 'static>( +pub async fn run_harness> + Send + 'static>( input: Vec>, expected_output: Vec>, state: S, @@ -52,37 +35,35 @@ pub async fn run_harness>> + ) where TYPES: NodeType, { - let registry = Arc::new(TaskRegistry::default()); - let mut tasks = vec![]; + let mut registry = ConsensusTaskRegistry::new(); // set up two broadcast channels so the test sends to the task and the task back to the test let (to_task, from_test) = broadcast(1024); - let (to_test, from_task) = broadcast(1024); - let test_state = TestHarnessState { + let (to_test, mut from_task) = broadcast(1024); + let mut test_state = TestHarnessState { expected_output, allow_extra_output, }; - let test_task = Task::new( - to_test.clone(), - from_task.clone(), - Arc::clone(®istry), - test_state, - ); - let task = Task::new( - to_test.clone(), - from_test.clone(), - Arc::clone(®istry), - state, - ); + let task = Task::new(state, to_test.clone(), from_test.clone()); + + let handle = task.run(); + let test_future = async move { + loop { + if let Ok(event) = from_task.recv_direct().await { + if let Some(HotShotTaskCompleted) = check_event(event, &mut test_state) { + break; + } + } + } + }; - tasks.push(test_task.run()); - tasks.push(task.run()); + registry.register(handle); for event in input { to_task.broadcast_direct(Arc::new(event)).await.unwrap(); } - if async_timeout(Duration::from_secs(2), futures::future::join_all(tasks)) + if async_timeout(Duration::from_secs(2), test_future) .await .is_err() { @@ -100,16 +81,14 @@ pub async fn run_harness>> + /// # Panics /// Will panic to fail the test when it receives and unexpected event #[allow(clippy::needless_pass_by_value)] -pub fn handle_event( +fn check_event( event: Arc>, - task: &mut Task>, - allow_extra_output: bool, + state: &mut TestHarnessState, ) -> Option { - let state = task.state_mut(); // Check the output in either case: // * We allow outputs only in our expected output set. // * We haven't received all expected outputs yet. - if !allow_extra_output || !state.expected_output.is_empty() { + if !state.allow_extra_output || !state.expected_output.is_empty() { assert!( state.expected_output.contains(&event), "Got an unexpected event: {event:?}", diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 59a47ea19f..bcc5d59d9f 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -1,9 +1,11 @@ use std::{collections::HashMap, sync::Arc}; -use async_broadcast::Sender; +use anyhow::Result; +use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -use hotshot_task::task::{Task, TaskState}; +use async_trait::async_trait; +use hotshot_task::task::TaskState; use hotshot_types::{ constants::{BASE_VERSION, STATIC_VER_0_1}, data::{VidDisperse, VidDisperseShare}, @@ -20,7 +22,7 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; -use tracing::{debug, error, info, instrument, warn}; +use tracing::{debug, error, instrument, warn}; use vbs::version::Version; use crate::{ @@ -79,27 +81,6 @@ pub struct NetworkMessageTaskState { pub event_stream: Sender>>, } -impl TaskState for NetworkMessageTaskState { - type Event = Vec>; - type Output = (); - - async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> - where - Self: Sized, - { - task.state_mut().handle_messages(event).await; - None - } - - fn filter(&self, _event: &Self::Event) -> bool { - false - } - - fn should_shutdown(_event: &Self::Event) -> bool { - false - } -} - impl NetworkMessageTaskState { #[instrument(skip_all, name = "Network message task", level = "trace")] /// Handle the message. @@ -212,41 +193,31 @@ pub struct NetworkEventTaskState< pub storage: Arc>, } +#[async_trait] impl< TYPES: NodeType, COMMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, S: Storage + 'static, > TaskState for NetworkEventTaskState { - type Event = Arc>; - - type Output = HotShotTaskCompleted; + type Event = HotShotEvent; async fn handle_event( - event: Self::Event, - task: &mut Task, - ) -> Option { - let membership = task.state_mut().membership.clone(); - task.state_mut().handle_event(event, &membership).await - } + &mut self, + event: Arc, + _sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + let membership = self.membership.clone(); - fn should_shutdown(event: &Self::Event) -> bool { - if matches!(event.as_ref(), HotShotEvent::Shutdown) { - info!("Network Task received Shutdown event"); - return true; + if !(self.filter)(&event) { + self.handle(event, &membership).await; } - false - } - fn filter(&self, event: &Self::Event) -> bool { - (self.filter)(event) - && !matches!( - event.as_ref(), - HotShotEvent::VersionUpgrade(_) - | HotShotEvent::ViewChange(_) - | HotShotEvent::Shutdown - ) + Ok(()) } + + async fn cancel_subtasks(&mut self) {} } impl< @@ -260,7 +231,7 @@ impl< /// Returns the completion status. #[allow(clippy::too_many_lines)] // TODO https://github.com/EspressoSystems/HotShot/issues/1704 #[instrument(skip_all, fields(view = *self.view), name = "Network Task", level = "error")] - pub async fn handle_event( + pub async fn handle( &mut self, event: Arc>, membership: &TYPES::Membership, diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index aca27eb4f0..b4931c1082 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -1,14 +1,16 @@ use std::{collections::HashMap, sync::Arc}; +use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; +use async_trait::async_trait; use either::Either; use hotshot_task::{ dependency::{AndDependency, EventDependency, OrDependency}, dependency_task::DependencyTask, - task::{Task, TaskState}, + task::TaskState, }; use hotshot_types::{ consensus::Consensus, @@ -534,36 +536,33 @@ impl> QuorumProposalTaskState> TaskState for QuorumProposalTaskState { - type Event = Arc>; - type Output = (); - fn filter(&self, event: &Self::Event) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::QuorumProposalValidated(..) - | HotShotEvent::QcFormed(_) - | HotShotEvent::SendPayloadCommitmentAndMetadata(..) - | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - | HotShotEvent::QuorumProposalLivenessValidated(..) - | HotShotEvent::QuorumProposalSend(..) - | HotShotEvent::VidShareValidated(_) - | HotShotEvent::ValidatedStateUpdated(..) - | HotShotEvent::UpdateHighQc(_) - | HotShotEvent::Shutdown - ) - } - async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> - where - Self: Sized, - { - let receiver = task.subscribe(); - let sender = task.clone_sender(); - task.state_mut().handle(event, receiver, sender).await; - None + type Event = HotShotEvent; + + async fn handle_event( + &mut self, + event: Arc, + sender: &Sender>, + receiver: &Receiver>, + ) -> Result<()> { + self.handle(event, receiver.clone(), sender.clone()).await; + + Ok(()) } - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) + + async fn cancel_subtasks(&mut self) { + for handle in self + .propose_dependencies + .drain() + .map(|(_view, handle)| handle) + { + #[cfg(async_executor_impl = "async-std")] + handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + handle.abort(); + } } } diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index efc3c08774..fde3d7e056 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -2,10 +2,12 @@ use std::{collections::BTreeMap, sync::Arc}; -use async_broadcast::Sender; +use anyhow::Result; +use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; +use async_trait::async_trait; use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ @@ -190,28 +192,35 @@ impl> QuorumProposalRecvTaskState< } } +#[async_trait] impl> TaskState for QuorumProposalRecvTaskState { - type Event = Arc>; - type Output = (); - fn filter(&self, event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::QuorumProposalRecv(..) | HotShotEvent::Shutdown - ) - } + type Event = HotShotEvent; + + async fn handle_event( + &mut self, + event: Arc, + sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + self.handle(event, sender.clone()).await; - async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> - where - Self: Sized, - { - let sender = task.clone_sender(); - task.state_mut().handle(event, sender).await; - None + Ok(()) } - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) + async fn cancel_subtasks(&mut self) { + while !self.spawned_tasks.is_empty() { + let Some((_, handles)) = self.spawned_tasks.pop_first() else { + break; + }; + + for handle in handles { + #[cfg(async_executor_impl = "async-std")] + handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + handle.abort(); + } + } } } diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index db60d70433..13557871f5 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -1,14 +1,16 @@ use std::{collections::HashMap, sync::Arc}; +use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; +use async_trait::async_trait; use committable::Committable; use hotshot_task::{ dependency::{AndDependency, EventDependency, OrDependency}, dependency_task::{DependencyTask, HandleDepOutput}, - task::{Task, TaskState}, + task::TaskState, }; use hotshot_types::{ consensus::Consensus, @@ -536,32 +538,27 @@ impl> QuorumVoteTaskState> TaskState for QuorumVoteTaskState { - type Event = Arc>; - type Output = (); - fn filter(&self, event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::DaCertificateRecv(_) - | HotShotEvent::VidShareRecv(..) - | HotShotEvent::QuorumVoteDependenciesValidated(_) - | HotShotEvent::VoteNow(..) - | HotShotEvent::QuorumProposalValidated(..) - | HotShotEvent::ValidatedStateUpdated(..) - | HotShotEvent::Shutdown, - ) - } - async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> - where - Self: Sized, - { - let receiver = task.subscribe(); - let sender = task.clone_sender(); - tracing::trace!("sender queue len {}", sender.len()); - task.state_mut().handle(event, receiver, sender).await; - None + type Event = HotShotEvent; + + async fn handle_event( + &mut self, + event: Arc, + sender: &Sender>, + receiver: &Receiver>, + ) -> Result<()> { + self.handle(event, receiver.clone(), sender.clone()).await; + + Ok(()) } - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) + + async fn cancel_subtasks(&mut self) { + for handle in self.vote_dependencies.drain().map(|(_view, handle)| handle) { + #[cfg(async_executor_impl = "async-std")] + handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + handle.abort(); + } } } diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index ab16dc425e..43faddb5d4 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -1,4 +1,5 @@ use std::{ + collections::BTreeMap, marker::PhantomData, sync::{ atomic::{AtomicBool, Ordering}, @@ -7,9 +8,13 @@ use std::{ time::Duration, }; -use async_broadcast::Sender; +use anyhow::Result; +use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::Consensus, @@ -24,13 +29,12 @@ use hotshot_types::{ }; use rand::{prelude::SliceRandom, thread_rng}; use sha2::{Digest, Sha256}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, -}; +use crate::{events::HotShotEvent, helpers::broadcast_event}; /// Amount of time to try for a request before timing out. const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); @@ -42,7 +46,7 @@ const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); pub struct NetworkRequestState< TYPES: NodeType, I: NodeImplementation, - Ver: StaticVersionType, + Ver: StaticVersionType + 'static, > { /// Network to send requests over pub network: Arc, @@ -67,64 +71,69 @@ pub struct NetworkRequestState< pub id: u64, /// A flag indicating that `HotShotEvent::Shutdown` has been received pub shutdown_flag: Arc, + /// A flag indicating that `HotShotEvent::Shutdown` has been received + pub spawned_tasks: BTreeMap>>, +} + +impl, Ver: StaticVersionType + 'static> Drop + for NetworkRequestState +{ + fn drop(&mut self) { + futures::executor::block_on(async move { self.cancel_subtasks().await }); + } } /// Alias for a signature type Signature = <::SignatureKey as SignatureKey>::PureAssembledSignatureType; +#[async_trait] impl, Ver: StaticVersionType + 'static> TaskState for NetworkRequestState { - type Event = Arc>; - - type Output = HotShotTaskCompleted; + type Event = HotShotEvent; async fn handle_event( - event: Self::Event, - task: &mut hotshot_task::task::Task, - ) -> Option { + &mut self, + event: Arc, + sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _) => { - let state = task.state(); let prop_view = proposal.view_number(); - if prop_view >= state.view { - state - .spawn_requests(prop_view, task.clone_sender(), Ver::instance()) + if prop_view >= self.view { + self.spawn_requests(prop_view, sender.clone(), Ver::instance()) .await; } - None + Ok(()) } HotShotEvent::ViewChange(view) => { let view = *view; - if view > task.state().view { - task.state_mut().view = view; + if view > self.view { + self.view = view; } - None - } - HotShotEvent::Shutdown => { - task.state().set_shutdown_flag(); - Some(HotShotTaskCompleted) + Ok(()) } - _ => None, + _ => Ok(()), } } - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) - } + async fn cancel_subtasks(&mut self) { + self.set_shutdown_flag(); - fn filter(&self, event: &Self::Event) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::Shutdown - | HotShotEvent::QuorumProposalValidated(..) - | HotShotEvent::ViewChange(_) - ) - } + while !self.spawned_tasks.is_empty() { + let Some((_, handles)) = self.spawned_tasks.pop_first() else { + break; + }; - async fn shutdown(&mut self) { - self.set_shutdown_flag(); + for handle in handles { + #[cfg(async_executor_impl = "async-std")] + handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + handle.abort(); + } + } } } @@ -133,7 +142,7 @@ impl, Ver: StaticVersionType + 'st { /// Spawns tasks for a given view to retrieve any data needed. async fn spawn_requests( - &self, + &mut self, view: TYPES::Time, sender: Sender>>, bind_version: Ver, @@ -161,7 +170,7 @@ impl, Ver: StaticVersionType + 'st /// received will be sent over `sender` #[instrument(skip_all, fields(id = self.id, view = *self.view), name = "NetworkRequestState run_delay", level = "error")] fn run_delay( - &self, + &mut self, request: RequestKind, sender: Sender>>, view: TYPES::Time, @@ -193,11 +202,13 @@ impl, Ver: StaticVersionType + 'st return; }; debug!("Requesting data: {:?}", request); - async_spawn(requester.run::(request, signature)); + let handle = async_spawn(requester.run::(request, signature)); + + self.spawned_tasks.entry(view).or_default().push(handle); } /// Signals delayed requesters to finish - fn set_shutdown_flag(&self) { + pub fn set_shutdown_flag(&self) { self.shutdown_flag.store(true, Ordering::Relaxed); } } @@ -280,6 +291,7 @@ impl> DelayedRequester { } Ok(Err(e)) => { warn!("Error Sending request. Error: {:?}", e); + async_sleep(REQUEST_TIMEOUT).await; } Err(_) => { warn!("Request to other node timed out"); diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index b74f4771ec..7a24915cb4 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -3,14 +3,15 @@ use std::{ time::{Duration, Instant}, }; -use anyhow::{bail, Context}; -use async_broadcast::Sender; +use anyhow::{bail, Context, Result}; +use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_sleep; use async_lock::RwLock; +use async_trait::async_trait; use hotshot_builder_api::block_info::{ AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo, }; -use hotshot_task::task::{Task, TaskState}; +use hotshot_task::task::TaskState; use hotshot_types::{ consensus::Consensus, data::{null_block, Leaf}, @@ -18,7 +19,6 @@ use hotshot_types::{ simple_certificate::UpgradeCertificate, traits::{ block_contents::{precompute_vid_commitment, BuilderFee, EncodeBytes}, - consensus_api::ConsensusApi, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::{BuilderSignatureKey, SignatureKey}, @@ -48,15 +48,18 @@ pub struct BuilderResponses { /// It contains the final block information pub block_header: AvailableBlockHeaderInput, } + /// Tracks state of a Transaction task pub struct TransactionTaskState< TYPES: NodeType, I: NodeImplementation, - A: ConsensusApi + 'static, Ver: StaticVersionType, > { /// The state's api - pub api: A, + pub builder_timeout: Duration, + + /// Output events to application + pub output_event_stream: async_broadcast::Sender>, /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -85,12 +88,8 @@ pub struct TransactionTaskState< pub decided_upgrade_certificate: Option>, } -impl< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi + 'static, - Ver: StaticVersionType, - > TransactionTaskState +impl, Ver: StaticVersionType> + TransactionTaskState { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction task", level = "error")] @@ -101,14 +100,17 @@ impl< ) -> Option { match event.as_ref() { HotShotEvent::TransactionsRecv(transactions) => { - self.api - .send_event(Event { + broadcast_event( + Event { view_number: self.cur_view, event: EventType::Transactions { transactions: transactions.clone(), }, - }) - .await; + }, + &self.output_event_stream, + ) + .await; + return None; } HotShotEvent::UpgradeDecided(cert) => { @@ -270,10 +272,9 @@ impl< } }; - while task_start_time.elapsed() < self.api.builder_timeout() { + while task_start_time.elapsed() < self.builder_timeout { match async_compatibility_layer::art::async_timeout( - self.api - .builder_timeout() + self.builder_timeout .saturating_sub(task_start_time.elapsed()), self.block_from_builder(parent_comm, view_num, &parent_comm_sig), ) @@ -401,37 +402,23 @@ impl< } } +#[async_trait] /// task state implementation for Transactions Task -impl< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi + 'static, - Ver: StaticVersionType + 'static, - > TaskState for TransactionTaskState +impl, Ver: StaticVersionType + 'static> TaskState + for TransactionTaskState { - type Event = Arc>; - - type Output = HotShotTaskCompleted; - - fn filter(&self, event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::TransactionsRecv(_) - | HotShotEvent::Shutdown - | HotShotEvent::ViewChange(_) - | HotShotEvent::UpgradeDecided(_) - ) - } + type Event = HotShotEvent; async fn handle_event( - event: Self::Event, - task: &mut Task, - ) -> Option { - let sender = task.clone_sender(); - task.state_mut().handle(event, sender).await - } + &mut self, + event: Arc, + sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + self.handle(event, sender.clone()).await; - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) + Ok(()) } + + async fn cancel_subtasks(&mut self) {} } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index c72e9d842d..656b6dc0c7 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -1,14 +1,15 @@ use std::sync::Arc; -use async_broadcast::Sender; +use anyhow::Result; +use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; +use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ event::{Event, EventType}, simple_certificate::UpgradeCertificate, simple_vote::{UpgradeProposalData, UpgradeVote}, traits::{ - consensus_api::ConsensusApi, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, @@ -29,13 +30,10 @@ use crate::{ type VoteCollectorOption = Option>; /// Tracks state of a DA task -pub struct UpgradeTaskState< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi + 'static, -> { - /// The state's api - pub api: A, +pub struct UpgradeTaskState> { + /// Output events to application + pub output_event_stream: async_broadcast::Sender>, + /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -61,9 +59,7 @@ pub struct UpgradeTaskState< pub id: u64, } -impl, A: ConsensusApi + 'static> - UpgradeTaskState -{ +impl> UpgradeTaskState { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Upgrade Task", level = "error")] pub async fn handle( @@ -123,15 +119,17 @@ impl, A: ConsensusApi + // * the proposal was expected, // * the proposal is valid, and // so we notify the application layer - self.api - .send_event(Event { + broadcast_event( + Event { view_number: self.cur_view, event: EventType::UpgradeProposal { proposal: proposal.clone(), sender: sender.clone(), }, - }) - .await; + }, + &self.output_event_stream, + ) + .await; // If everything is fine up to here, we generate and send a vote on the proposal. let Ok(vote) = UpgradeVote::create_signed_vote( @@ -182,7 +180,7 @@ impl, A: ConsensusApi + let result = collector .as_mut() .unwrap() - .handle_event(Arc::clone(&event), &tx) + .handle_vote_event(Arc::clone(&event), &tx) .await; if result == Some(HotShotTaskCompleted) { @@ -261,43 +259,27 @@ impl, A: ConsensusApi + error!("Shutting down because of shutdown signal!"); return Some(HotShotTaskCompleted); } - _ => { - error!("unexpected event {:?}", event); - } + _ => {} } None } } +#[async_trait] /// task state implementation for the upgrade task -impl, A: ConsensusApi + 'static> TaskState - for UpgradeTaskState -{ - type Event = Arc>; - - type Output = HotShotTaskCompleted; +impl> TaskState for UpgradeTaskState { + type Event = HotShotEvent; async fn handle_event( - event: Self::Event, - task: &mut hotshot_task::task::Task, - ) -> Option { - let sender = task.clone_sender(); - tracing::trace!("sender queue len {}", sender.len()); - task.state_mut().handle(event, sender).await - } + &mut self, + event: Arc, + sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + self.handle(event, sender.clone()).await; - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) + Ok(()) } - fn filter(&self, event: &Self::Event) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::UpgradeProposalRecv(_, _) - | HotShotEvent::UpgradeVoteRecv(_) - | HotShotEvent::Shutdown - | HotShotEvent::ViewChange(_) - | HotShotEvent::VersionUpgrade(_) - ) - } + async fn cancel_subtasks(&mut self) {} } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index f7dd6d4045..25750939ac 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -1,8 +1,10 @@ use std::{marker::PhantomData, sync::Arc}; -use async_broadcast::Sender; +use anyhow::Result; +use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; -use hotshot_task::task::{Task, TaskState}; +use async_trait::async_trait; +use hotshot_task::task::TaskState; use hotshot_types::{ consensus::Consensus, data::{VidDisperse, VidDisperseShare}, @@ -25,7 +27,6 @@ use crate::{ pub struct VidTaskState> { /// View number this view is executing in. pub cur_view: TYPES::Time, - /// Reference to consensus. Leader will require a read lock on this. pub consensus: Arc>>, /// Network for all nodes @@ -146,38 +147,26 @@ impl> VidTaskState { HotShotEvent::Shutdown => { return Some(HotShotTaskCompleted); } - _ => { - error!("unexpected event {:?}", event); - } + _ => {} } None } } +#[async_trait] /// task state implementation for VID Task impl> TaskState for VidTaskState { - type Event = Arc>; - - type Output = HotShotTaskCompleted; + type Event = HotShotEvent; async fn handle_event( - event: Self::Event, - task: &mut Task, - ) -> Option { - let sender = task.clone_sender(); - task.state_mut().handle(event, sender).await; - None - } - fn filter(&self, event: &Self::Event) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::Shutdown - | HotShotEvent::BlockRecv(_, _, _, _, _) - | HotShotEvent::BlockReady(_, _) - | HotShotEvent::ViewChange(_) - ) - } - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) + &mut self, + event: Arc, + sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + self.handle(event, sender.clone()).await; + Ok(()) } + + async fn cancel_subtasks(&mut self) {} } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 36fd1424f2..0fa9e60224 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -6,12 +6,14 @@ use std::{ time::Duration, }; -use async_broadcast::Sender; +use anyhow::Result; +use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use hotshot_task::task::{Task, TaskState}; +use async_trait::async_trait; +use hotshot_task::task::TaskState; use hotshot_types::{ message::GeneralConsensusMessage, simple_certificate::{ @@ -22,7 +24,6 @@ use hotshot_types::{ ViewSyncPreCommitData, ViewSyncPreCommitVote, }, traits::{ - consensus_api::ConsensusApi, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, @@ -58,11 +59,7 @@ type RelayMap = HashMap<::Time, BTreeMap>>; /// Main view sync task state -pub struct ViewSyncTaskState< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi + 'static + std::clone::Clone, -> { +pub struct ViewSyncTaskState> { /// View HotShot is currently in pub current_view: TYPES::Time, /// View HotShot wishes to be in @@ -75,8 +72,6 @@ pub struct ViewSyncTaskState< pub public_key: TYPES::SignatureKey, /// Our Private Key pub private_key: ::PrivateKey, - /// HotShot consensus API - pub api: A, /// Our node id; for logging pub id: u64, @@ -84,7 +79,7 @@ pub struct ViewSyncTaskState< pub num_timeouts_tracked: u64, /// Map of running replica tasks - pub replica_task_map: RwLock>>, + pub replica_task_map: RwLock>>, /// Map of pre-commit vote accumulates for the relay pub pre_commit_relay_map: @@ -103,49 +98,26 @@ pub struct ViewSyncTaskState< pub last_garbage_collected_view: TYPES::Time, } -impl< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi + 'static + std::clone::Clone, - > TaskState for ViewSyncTaskState -{ - type Event = Arc>; - - type Output = (); +#[async_trait] +impl> TaskState for ViewSyncTaskState { + type Event = HotShotEvent; - async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> { - let sender = task.clone_sender(); - task.state_mut().handle(event, sender).await; - None - } + async fn handle_event( + &mut self, + event: Arc, + sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + self.handle(event, sender.clone()).await; - fn filter(&self, event: &Self::Event) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - | HotShotEvent::ViewSyncPreCommitVoteRecv(_) - | HotShotEvent::ViewSyncCommitVoteRecv(_) - | HotShotEvent::ViewSyncFinalizeVoteRecv(_) - | HotShotEvent::Shutdown - | HotShotEvent::Timeout(_) - | HotShotEvent::ViewSyncTimeout(_, _, _) - | HotShotEvent::ViewChange(_) - ) + Ok(()) } - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) - } + async fn cancel_subtasks(&mut self) {} } /// State of a view sync replica task -pub struct ViewSyncReplicaTaskState< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi + 'static, -> { +pub struct ViewSyncReplicaTaskState> { /// Timeout for view sync rounds pub view_sync_timeout: Duration, /// Current round HotShot is in @@ -171,49 +143,29 @@ pub struct ViewSyncReplicaTaskState< pub public_key: TYPES::SignatureKey, /// Our Private Key pub private_key: ::PrivateKey, - /// HotShot consensus API - pub api: A, } -impl, A: ConsensusApi + 'static> TaskState - for ViewSyncReplicaTaskState +#[async_trait] +impl> TaskState + for ViewSyncReplicaTaskState { - type Event = Arc>; + type Event = HotShotEvent; - type Output = (); + async fn handle_event( + &mut self, + event: Arc, + sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + self.handle(event, sender.clone()).await; - async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> { - let sender = task.clone_sender(); - task.state_mut().handle(event, sender).await; - None - } - fn filter(&self, event: &Self::Event) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - | HotShotEvent::ViewSyncPreCommitVoteRecv(_) - | HotShotEvent::ViewSyncCommitVoteRecv(_) - | HotShotEvent::ViewSyncFinalizeVoteRecv(_) - | HotShotEvent::Shutdown - | HotShotEvent::Timeout(_) - | HotShotEvent::ViewSyncTimeout(_, _, _) - | HotShotEvent::ViewChange(_) - ) + Ok(()) } - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) - } + async fn cancel_subtasks(&mut self) {} } -impl< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi + 'static + std::clone::Clone, - > ViewSyncTaskState -{ +impl> ViewSyncTaskState { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task @@ -249,7 +201,7 @@ impl< } // We do not have a replica task already running, so start one - let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { + let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { current_view: view, next_view: view, relay: 0, @@ -260,7 +212,6 @@ impl< network: Arc::clone(&self.network), public_key: self.public_key.clone(), private_key: self.private_key.clone(), - api: self.api.clone(), view_sync_timeout: self.view_sync_timeout, id: self.id, }; @@ -319,7 +270,7 @@ impl< if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); let result = relay_task - .handle_event(Arc::clone(&event), &event_stream) + .handle_vote_event(Arc::clone(&event), &event_stream) .await; if result == Some(HotShotTaskCompleted) { @@ -357,7 +308,7 @@ impl< if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); let result = relay_task - .handle_event(Arc::clone(&event), &event_stream) + .handle_vote_event(Arc::clone(&event), &event_stream) .await; if result == Some(HotShotTaskCompleted) { @@ -395,7 +346,7 @@ impl< if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); let result = relay_task - .handle_event(Arc::clone(&event), &event_stream) + .handle_vote_event(Arc::clone(&event), &event_stream) .await; if result == Some(HotShotTaskCompleted) { @@ -510,9 +461,7 @@ impl< } } -impl, A: ConsensusApi + 'static> - ViewSyncReplicaTaskState -{ +impl> ViewSyncReplicaTaskState { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task pub async fn handle( diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 62b604cae7..dda367a840 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -3,7 +3,6 @@ use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; use async_broadcast::Sender; use async_trait::async_trait; use either::Either::{self, Left, Right}; -use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ simple_certificate::{ DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, @@ -105,36 +104,6 @@ impl< } } -impl< - TYPES: NodeType, - VOTE: Vote - + AggregatableVote - + std::marker::Send - + std::marker::Sync - + 'static, - CERT: Certificate - + Debug - + std::marker::Send - + std::marker::Sync - + 'static, - > TaskState for VoteCollectionTaskState -where - VoteCollectionTaskState: HandleVoteEvent, -{ - type Event = Arc>; - - type Output = HotShotTaskCompleted; - - async fn handle_event(event: Self::Event, task: &mut Task) -> Option { - let sender = task.clone_sender(); - task.state_mut().handle_event(event, &sender).await - } - - fn should_shutdown(event: &Self::Event) -> bool { - matches!(event.as_ref(), HotShotEvent::Shutdown) - } -} - /// Trait for types which will handle a vote event. #[async_trait] pub trait HandleVoteEvent @@ -144,7 +113,7 @@ where CERT: Certificate + Debug, { /// Handle a vote event - async fn handle_event( + async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, @@ -211,7 +180,7 @@ where id: info.id, }; - let result = state.handle_event(Arc::clone(&event), sender).await; + let result = state.handle_vote_event(Arc::clone(&event), sender).await; if result == Some(HotShotTaskCompleted) { // The protocol has finished @@ -354,7 +323,7 @@ impl impl HandleVoteEvent, QuorumCertificate> for QuorumVoteState { - async fn handle_event( + async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, @@ -374,7 +343,7 @@ impl HandleVoteEvent, QuorumCertificat impl HandleVoteEvent, UpgradeCertificate> for UpgradeVoteState { - async fn handle_event( + async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, @@ -393,7 +362,7 @@ impl HandleVoteEvent, UpgradeCertific impl HandleVoteEvent, DaCertificate> for DaVoteState { - async fn handle_event( + async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, @@ -412,7 +381,7 @@ impl HandleVoteEvent, DaCertificate impl HandleVoteEvent, TimeoutCertificate> for TimeoutVoteState { - async fn handle_event( + async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, @@ -432,7 +401,7 @@ impl HandleVoteEvent, ViewSyncPreCommitCertificate2> for ViewSyncPreCommitState { - async fn handle_event( + async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, @@ -454,7 +423,7 @@ impl HandleVoteEvent, ViewSyncCommitCertificate2> for ViewSyncCommitVoteState { - async fn handle_event( + async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, @@ -474,7 +443,7 @@ impl HandleVoteEvent, ViewSyncFinalizeCertificate2> for ViewSyncFinalizeVoteState { - async fn handle_event( + async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, diff --git a/task/Cargo.toml b/task/Cargo.toml index 7e4dadd3a4..3983158a31 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -8,10 +8,12 @@ edition = { workspace = true } [dependencies] -futures = "0.3" -async-broadcast = "0.7" +futures = { workspace = true } +async-broadcast = { workspace = true } tracing = { workspace = true } async-compatibility-layer = { workspace = true } +anyhow = { workspace = true } +async-trait = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true, features = [ diff --git a/task/src/task.rs b/task/src/task.rs index ad7ce9c316..1daebeaa1c 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -1,466 +1,187 @@ -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; -use async_broadcast::{Receiver, SendError, Sender}; -use async_compatibility_layer::art::async_timeout; +use anyhow::Result; +use async_broadcast::{Receiver, Sender}; #[cfg(async_executor_impl = "async-std")] -use async_std::{ - sync::RwLock, - task::{spawn, JoinHandle}, -}; +use async_std::task::{spawn, JoinHandle}; +use async_trait::async_trait; #[cfg(async_executor_impl = "async-std")] use futures::future::join_all; #[cfg(async_executor_impl = "tokio")] use futures::future::try_join_all; -use futures::{future::select_all, Future}; #[cfg(async_executor_impl = "tokio")] -use tokio::{ - sync::RwLock, - task::{spawn, JoinHandle}, -}; -use tracing::{error, warn}; +use tokio::task::{spawn, JoinHandle}; -use crate::{ - dependency::Dependency, - dependency_task::{DependencyTask, HandleDepOutput}, -}; +/// Trait for events that long-running tasks handle +pub trait TaskEvent: PartialEq { + /// The shutdown signal for this event type + /// + /// Note that this is necessarily uniform across all tasks. + /// Exiting the task loop is handled by the task spawner, rather than the task individually. + fn shutdown_event() -> Self; +} +#[async_trait] /// Type for mutable task state that can be used as the state for a `Task` pub trait TaskState: Send { /// Type of event sent and received by the task - type Event: Clone + Send + Sync + 'static; - /// The result returned when this task completes - type Output: Send; - /// Handle event and update state. Return true if the task is finished - /// false otherwise. The handler can access the state through `Task::state_mut` - fn handle_event( - event: Self::Event, - task: &mut Task, - ) -> impl Future> + Send - where - Self: Sized; + type Event: TaskEvent + Clone + Send + Sync; - /// Return true if the event should be filtered - fn filter(&self, _event: &Self::Event) -> bool { - // default doesn't filter - false - } - /// Do something with the result of the task before it shuts down - fn handle_result(&self, _res: &Self::Output) -> impl std::future::Future + Send { - async {} - } - /// Return true if the event should shut the task down - fn should_shutdown(event: &Self::Event) -> bool; - /// Handle anything before the task is completely shutdown - fn shutdown(&mut self) -> impl std::future::Future + Send { - async {} - } -} + /// Joins all subtasks. + async fn cancel_subtasks(&mut self); -/// Task state for a test. Similar to `TaskState` but it handles -/// messages as well as events. Messages are events that are -/// external to this task. (i.e. a test message would be an event from non test task) -/// This is used as state for `TestTask` and messages can come from many -/// different input streams. -pub trait TestTaskState: Send { - /// Message type handled by the task - type Message: Clone + Send + Sync + 'static; - /// Result returned by the test task on completion - type Output: Send; - /// The state type - type State: TaskState; - /// Handle and incoming message and return `Some` if the task is finished - fn handle_message( - message: Self::Message, - id: usize, - task: &mut TestTask, - ) -> impl Future> + Send - where - Self: Sized; + /// Handles an event, providing direct access to the specific channel we received the event on. + async fn handle_event( + &mut self, + event: Arc, + _sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()>; } /// A basic task which loops waiting for events to come from `event_receiver` -/// and then handles them using it's state -/// It sends events to other `Task`s through `event_sender` +/// and then handles them using its state +/// It sends events to other `Task`s through `sender` /// This should be used as the primary building block for long running /// or medium running tasks (i.e. anything that can't be described as a dependency task) pub struct Task { + /// The state of the task. It is fed events from `receiver` + /// and mutated via `handle_event`. + state: S, /// Sends events all tasks including itself - event_sender: Sender, + sender: Sender>, /// Receives events that are broadcast from any task, including itself - event_receiver: Receiver, - /// Contains this task, used to register any spawned tasks - registry: Arc, - /// The state of the task. It is fed events from `event_sender` - /// and mutates it state ocordingly. Also it signals the task - /// if it is complete/should shutdown - state: S, + receiver: Receiver>, } impl Task { /// Create a new task - pub fn new( - tx: Sender, - rx: Receiver, - registry: Arc, - state: S, - ) -> Self { + pub fn new(state: S, sender: Sender>, receiver: Receiver>) -> Self { Task { - event_sender: tx, - event_receiver: rx, - registry, state, + sender, + receiver, } } - /// The Task analog of `TaskState::handle_event`. - pub fn handle_event( - &mut self, - event: S::Event, - ) -> impl Future> + Send + '_ - where - Self: Sized, - { - S::handle_event(event, self) + /// The state of the task, as a boxed dynamic trait object. + fn boxed_state(self) -> Box> { + Box::new(self.state) as Box> } /// Spawn the task loop, consuming self. Will continue until /// the task reaches some shutdown condition - pub fn run(mut self) -> JoinHandle<()> { - spawn(async move { - loop { - match self.event_receiver.recv_direct().await { - Ok(event) => { - if S::should_shutdown(&event) { - self.state.shutdown().await; - break; - } - if self.state.filter(&event) { - continue; - } - if let Some(res) = S::handle_event(event, &mut self).await { - self.state.handle_result(&res).await; - self.state.shutdown().await; - break; - } - } - Err(e) => { - tracing::error!("Failed to receiving from event stream Error: {}", e); - } - } - } - }) - } - - /// Create a new event `Receiver` from this Task's receiver. - /// The returned receiver will get all messages not yet seen by this task - pub fn subscribe(&self) -> Receiver { - self.event_receiver.clone() - } - /// Get a new sender handle for events - pub fn sender(&self) -> &Sender { - &self.event_sender - } - /// Clone the sender handle - pub fn clone_sender(&self) -> Sender { - self.event_sender.clone() - } - /// Broadcast a message to all listening tasks - /// # Errors - /// Errors if the broadcast fails - pub async fn send(&self, event: S::Event) -> Result, SendError> { - self.event_sender.broadcast(event).await - } - /// Get a mutable reference to this tasks state - pub fn state_mut(&mut self) -> &mut S { - &mut self.state - } - /// Get an immutable reference to this tasks state - pub fn state(&self) -> &S { - &self.state - } - - /// Spawn a new task and register it. It will get all events not seend - /// by the task creating it. - pub async fn run_sub_task(&self, state: S) { - let task = Task { - event_sender: self.clone_sender(), - event_receiver: self.subscribe(), - registry: Arc::clone(&self.registry), - state, - }; - // Note: await here is only awaiting the task to be added to the - // registry, not for the task to run. - self.registry.run_task(task).await; - } -} - -/// Similar to `Task` but adds functionality for testing. Notably -/// it adds message receivers to collect events from many non-test tasks -pub struct TestTask { - /// Task which handles test events - task: Task, - /// Receivers for outside events - message_receivers: Vec>, -} - -impl< - S: TaskState + Send + 'static, - T: TestTaskState + Send + Sync + 'static, - > TestTask -{ - /// Create a test task - pub fn new(task: Task, rxs: Vec>) -> Self { - Self { - task, - message_receivers: rxs, - } - } - /// Runs the task, taking events from the the test events and the message receivers. - /// Consumes self and runs until some shutdown condition is met. - /// The join handle will return the result of the task, useful for deciding if the test - /// passed or not. - pub fn run(mut self) -> JoinHandle { + pub fn run(mut self) -> JoinHandle>> { spawn(async move { loop { - let mut futs = vec![]; + match self.receiver.recv_direct().await { + Ok(input) => { + if *input == S::Event::shutdown_event() { + self.state.cancel_subtasks().await; - if let Ok(event) = self.task.event_receiver.try_recv() { - if S::should_shutdown(&event) { - self.task.state.shutdown().await; - tracing::error!("Shutting down test task TODO!"); - todo!(); - } - if !self.state().filter(&event) { - if let Some(res) = S::handle_event(event, &mut self.task).await { - self.task.state.handle_result(&res).await; - self.task.state.shutdown().await; - return res; + break self.boxed_state(); } - } - } - for rx in &mut self.message_receivers { - futs.push(rx.recv()); - } - // if let Ok((Ok(msg), id, _)) = - match async_timeout(Duration::from_secs(1), select_all(futs)).await { - Ok((Ok(msg), id, _)) => { - if let Some(res) = T::handle_message(msg, id, &mut self).await { - self.task.state.handle_result(&res).await; - self.task.state.shutdown().await; - return res; - } + let _ = + S::handle_event(&mut self.state, input, &self.sender, &self.receiver) + .await + .inspect_err(|e| tracing::info!("{e}")); } Err(e) => { - warn!("Failed to get event from task. Error: {:?}", e); - } - Ok((Err(e), _, _)) => { - error!("A task channel returned an Error: {:?}", e); + tracing::error!("Failed to receive from event stream Error: {}", e); } } } }) } - - /// Get a ref to state - pub fn state(&self) -> &S { - &self.task.state - } - /// Get a mutable ref to state - pub fn state_mut(&mut self) -> &mut S { - self.task.state_mut() - } - /// Send an event to other listening test tasks - /// - /// # Panics - /// panics if the event can't be sent (ok to panic in test) - pub async fn send_event(&self, event: S::Event) { - self.task.send(event).await.unwrap(); - } } #[derive(Default)] /// A collection of tasks which can handle shutdown -pub struct TaskRegistry { +pub struct ConsensusTaskRegistry { /// Tasks this registry controls - task_handles: RwLock>>, + task_handles: Vec>>>, } -impl TaskRegistry { +impl ConsensusTaskRegistry { + #[must_use] + /// Create a new task registry + pub fn new() -> Self { + ConsensusTaskRegistry { + task_handles: vec![], + } + } /// Add a task to the registry - pub async fn register(&self, handle: JoinHandle<()>) { - self.task_handles.write().await.push(handle); + pub fn register(&mut self, handle: JoinHandle>>) { + self.task_handles.push(handle); } /// Try to cancel/abort the task this registry has - pub async fn shutdown(&self) { - let mut handles = self.task_handles.write().await; + /// + /// # Panics + /// + /// Should not panic, unless awaiting on the JoinHandle in tokio fails. + pub async fn shutdown(&mut self) { + let handles = &mut self.task_handles; + while let Some(handle) = handles.pop() { #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; + let mut task_state = handle.await; #[cfg(async_executor_impl = "tokio")] - handle.abort(); + let mut task_state = handle.await.unwrap(); + + task_state.cancel_subtasks().await; } } /// Take a task, run it, and register it - pub async fn run_task(&self, task: Task) + pub fn run_task(&mut self, task: Task) where - S: TaskState + Send + 'static, + S: TaskState + Send + 'static, { - self.register(task.run()).await; - } - /// Create a new `DependencyTask` run it, and register it - pub async fn spawn_dependency_task( - &self, - dep: impl Dependency + Send + 'static, - handle: impl HandleDepOutput, - ) { - let join_handle = DependencyTask { dep, handle }.run(); - self.register(join_handle).await; + self.register(task.run()); } + /// Wait for the results of all the tasks registered /// # Panics /// Panics if one of the tasks panicked - pub async fn join_all(self) -> Vec<()> { + pub async fn join_all(self) -> Vec>> { #[cfg(async_executor_impl = "async-std")] - let ret = join_all(self.task_handles.into_inner()).await; + let states = join_all(self.task_handles).await; #[cfg(async_executor_impl = "tokio")] - let ret = try_join_all(self.task_handles.into_inner()).await.unwrap(); - ret + let states = try_join_all(self.task_handles).await.unwrap(); + + states } } -#[cfg(test)] -mod tests { - use std::{collections::HashSet, time::Duration}; - - use async_broadcast::broadcast; - #[cfg(async_executor_impl = "async-std")] - use async_std::task::sleep; - #[cfg(async_executor_impl = "tokio")] - use tokio::time::sleep; - - use super::*; - - #[derive(Default)] - pub struct DummyHandle { - val: usize, - seen: HashSet, - } +#[derive(Default)] +/// A collection of tasks which can handle shutdown +pub struct NetworkTaskRegistry { + /// Tasks this registry controls + pub handles: Vec>, +} - #[allow(clippy::panic)] - impl TaskState for DummyHandle { - type Event = usize; - type Output = (); - async fn handle_event(event: usize, task: &mut Task) -> Option<()> { - sleep(Duration::from_millis(10)).await; - let state = task.state_mut(); - state.seen.insert(event); - if event > state.val { - state.val = event; - assert!( - state.val < 100, - "Test should shutdown before getting an event for 100" - ); - task.send(event + 1).await.unwrap(); - } - None - } - fn should_shutdown(event: &usize) -> bool { - *event >= 98 - } - async fn shutdown(&mut self) { - for i in 1..98 { - assert!(self.seen.contains(&i)); - } - } +impl NetworkTaskRegistry { + #[must_use] + /// Create a new task registry + pub fn new() -> Self { + NetworkTaskRegistry { handles: vec![] } } - impl TestTaskState for DummyHandle { - type Message = String; - type Output = (); - type State = Self; + #[allow(clippy::unused_async)] + /// Shuts down all tasks in the registry, performing any associated cleanup. + pub async fn shutdown(&mut self) { + let handles = &mut self.handles; - async fn handle_message( - message: Self::Message, - _: usize, - _: &mut TestTask, - ) -> Option<()> { - if message == *"done".to_string() { - return Some(()); - } - None + while let Some(handle) = handles.pop() { + #[cfg(async_executor_impl = "async-std")] + handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + handle.abort(); } } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[allow(unused_must_use)] - async fn it_works() { - let reg = Arc::new(TaskRegistry::default()); - let (tx, rx) = broadcast(10); - let task1 = Task:: { - event_sender: tx.clone(), - event_receiver: rx.clone(), - registry: Arc::clone(®), - state: DummyHandle::default(), - }; - tx.broadcast(1).await.unwrap(); - let task2 = Task:: { - event_sender: tx.clone(), - event_receiver: rx, - registry: reg, - state: DummyHandle::default(), - }; - let handle = task2.run(); - let _res = task1.run().await; - handle.await; - } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 10) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[allow(clippy::should_panic_without_expect)] - #[should_panic] - async fn test_works() { - let reg = Arc::new(TaskRegistry::default()); - let (tx, rx) = broadcast(10); - let (msg_tx, msg_rx) = broadcast(10); - let task1 = Task:: { - event_sender: tx.clone(), - event_receiver: rx.clone(), - registry: Arc::clone(®), - state: DummyHandle::default(), - }; - tx.broadcast(1).await.unwrap(); - let task2 = Task:: { - event_sender: tx.clone(), - event_receiver: rx, - registry: reg, - state: DummyHandle::default(), - }; - let test1 = TestTask::<_, DummyHandle> { - task: task1, - message_receivers: vec![msg_rx.clone()], - }; - let test2 = TestTask::<_, DummyHandle> { - task: task2, - message_receivers: vec![msg_rx.clone()], - }; - let handle = test1.run(); - let handle2 = test2.run(); - sleep(Duration::from_millis(30)).await; - msg_tx.broadcast("done".into()).await.unwrap(); - #[cfg(async_executor_impl = "tokio")] - { - handle.await.unwrap(); - handle2.await.unwrap(); - } - #[cfg(async_executor_impl = "async-std")] - { - handle.await; - handle2.await; - } + /// Add a task to the registry + pub fn register(&mut self, handle: JoinHandle<()>) { + self.handles.push(handle); } } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index ad2757fdb5..af0bb6b6d4 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -14,6 +14,7 @@ dependency-tasks = ["hotshot/dependency-tasks"] [dependencies] automod = "1.0.14" +anyhow = { workspace = true } async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index c0e5d77e67..3f8f711454 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -1,7 +1,8 @@ -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_spawn, async_timeout}; +use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use hotshot::traits::TestableNodeImplementation; @@ -11,8 +12,7 @@ use snafu::Snafu; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use super::GlobalTestEvent; -use crate::test_runner::{HotShotTaskCompleted, Node}; +use crate::{test_runner::Node, test_task::TestEvent}; /// the idea here is to run as long as we want @@ -22,33 +22,33 @@ pub struct CompletionTaskErr {} /// Completion task state pub struct CompletionTask> { - pub tx: Sender, + pub tx: Sender, - pub rx: Receiver, + pub rx: Receiver, /// handles to the nodes in the test - pub(crate) handles: Vec>, + pub(crate) handles: Arc>>>, /// Duration of the task. pub duration: Duration, } impl> CompletionTask { - pub fn run(mut self) -> JoinHandle { + pub fn run(mut self) -> JoinHandle<()> { async_spawn(async move { if async_timeout(self.duration, self.wait_for_shutdown()) .await .is_err() { - broadcast_event(GlobalTestEvent::ShutDown, &self.tx).await; + broadcast_event(TestEvent::Shutdown, &self.tx).await; } - for node in &self.handles { - node.handle.clone().shut_down().await; + + for node in &mut self.handles.write().await.iter_mut() { + node.handle.shut_down().await; } - HotShotTaskCompleted::ShutDown }) } async fn wait_for_shutdown(&mut self) { while let Ok(event) = self.rx.recv_direct().await { - if matches!(event, GlobalTestEvent::ShutDown) { + if matches!(event, TestEvent::Shutdown) { tracing::error!("Completion Task shutting down"); return; } diff --git a/testing/src/lib.rs b/testing/src/lib.rs index e1bb38e034..43ce6ebe5b 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -30,6 +30,9 @@ pub mod completion_task; /// task to spin nodes up and down pub mod spinning_task; +/// the `TestTask` struct and associated trait/functions +pub mod test_task; + /// task for checking if view sync got activated pub mod view_sync_task; @@ -44,10 +47,3 @@ pub mod script; /// view generator for tests pub mod view_generator; - -/// global event at the test level -#[derive(Clone, Debug)] -pub enum GlobalTestEvent { - /// the test is shutting down - ShutDown, -} diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 635c277539..0827e57fd6 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -3,8 +3,11 @@ use std::{ sync::Arc, }; +use anyhow::Result; +use async_broadcast::Sender; +use async_lock::RwLock; +use async_trait::async_trait; use hotshot::{traits::TestableNodeImplementation, HotShotError}; -use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::{ data::Leaf, error::RoundTimedoutState, @@ -16,12 +19,13 @@ use hotshot_types::{ use snafu::Snafu; use tracing::error; -use crate::test_runner::{HotShotTaskCompleted, Node}; +use crate::{ + test_runner::Node, + test_task::{TestEvent, TestResult, TestTaskState}, +}; /// convenience type alias for state and block pub type StateAndBlock = (Vec, Vec); -use super::GlobalTestEvent; - /// the status of a view #[derive(Debug, Clone)] pub enum ViewStatus { @@ -66,78 +70,25 @@ pub enum OverallSafetyTaskErr { /// Data availability task state pub struct OverallSafetyTask> { /// handles - pub handles: Vec>, + pub handles: Arc>>>, /// ctx pub ctx: RoundCtx, /// configure properties pub properties: OverallSafetyPropertiesDescription, + /// error + pub error: Option>>, + /// sender to test event channel + pub test_sender: Sender, } -impl> TaskState - for OverallSafetyTask -{ - type Event = GlobalTestEvent; - - type Output = HotShotTaskCompleted; - - async fn handle_event(event: Self::Event, task: &mut Task) -> Option { - match event { - GlobalTestEvent::ShutDown => { - tracing::error!("Shutting down SafetyTask"); - let state = task.state_mut(); - let OverallSafetyPropertiesDescription { - check_leaf: _, - check_block: _, - num_failed_views: num_failed_rounds_total, - num_successful_views, - threshold_calculator: _, - transaction_threshold: _, - }: OverallSafetyPropertiesDescription = state.properties.clone(); - - let num_incomplete_views = state.ctx.round_results.len() - - state.ctx.successful_views.len() - - state.ctx.failed_views.len(); - - if state.ctx.successful_views.len() < num_successful_views { - return Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::NotEnoughDecides { - got: state.ctx.successful_views.len(), - expected: num_successful_views, - }, - ))); - } - - if state.ctx.failed_views.len() + num_incomplete_views >= num_failed_rounds_total { - return Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::TooManyFailures { - failed_views: state.ctx.failed_views.clone(), - }, - ))); - } - Some(HotShotTaskCompleted::ShutDown) - } - } - } - - fn should_shutdown(_event: &Self::Event) -> bool { - false - } -} - +#[async_trait] impl> TestTaskState for OverallSafetyTask { - type Message = Event; - - type Output = HotShotTaskCompleted; + type Event = Event; - type State = Self; - - async fn handle_message( - message: Self::Message, - idx: usize, - task: &mut hotshot_task::task::TestTask, - ) -> Option { + /// Handles an event from one of multiple receivers. + async fn handle_event(&mut self, (message, id): (Self::Event, usize)) -> Result<()> { let OverallSafetyPropertiesDescription { check_leaf, check_block, @@ -145,13 +96,12 @@ impl> TestTaskState num_successful_views, threshold_calculator, transaction_threshold, - }: OverallSafetyPropertiesDescription = task.state().properties.clone(); + }: OverallSafetyPropertiesDescription = self.properties.clone(); let Event { view_number, event } = message; let key = match event { EventType::Error { error } => { - task.state_mut() - .ctx - .insert_error_to_context(view_number, idx, error); + self.ctx + .insert_error_to_context(view_number, id, error.clone()); None } EventType::Decide { @@ -161,17 +111,17 @@ impl> TestTaskState } => { // Skip the genesis leaf. if leaf_chain.last().unwrap().leaf.view_number() == TYPES::Time::genesis() { - return None; + return Ok(()); } let paired_up = (leaf_chain.to_vec(), (*qc).clone()); - match task.state_mut().ctx.round_results.entry(view_number) { + match self.ctx.round_results.entry(view_number) { Entry::Occupied(mut o) => { o.get_mut() - .insert_into_result(idx, paired_up, maybe_block_size) + .insert_into_result(id, paired_up, maybe_block_size) } Entry::Vacant(v) => { let mut round_result = RoundResult::default(); - let key = round_result.insert_into_result(idx, paired_up, maybe_block_size); + let key = round_result.insert_into_result(id, paired_up, maybe_block_size); v.insert(round_result); key } @@ -182,25 +132,18 @@ impl> TestTaskState view_number, state: RoundTimedoutState::TestCollectRoundEventsTimedOut, }); - task.state_mut() - .ctx - .insert_error_to_context(view_number, idx, error); + self.ctx.insert_error_to_context(view_number, id, error); None } - _ => return None, + _ => return Ok(()), }; + let len = self.handles.read().await.len(); + // update view count - let threshold = - (threshold_calculator)(task.state().handles.len(), task.state().handles.len()); - - let len = task.state().handles.len(); - let view = task - .state_mut() - .ctx - .round_results - .get_mut(&view_number) - .unwrap(); + let threshold = (threshold_calculator)(len, len); + + let view = self.ctx.round_results.get_mut(&view_number).unwrap(); if let Some(key) = key { view.update_status( threshold, @@ -212,47 +155,77 @@ impl> TestTaskState ); match view.status.clone() { ViewStatus::Ok => { - task.state_mut().ctx.successful_views.insert(view_number); - if task.state_mut().ctx.successful_views.len() >= num_successful_views { - task.send_event(GlobalTestEvent::ShutDown).await; - return Some(HotShotTaskCompleted::ShutDown); + self.ctx.successful_views.insert(view_number); + if self.ctx.successful_views.len() >= num_successful_views { + let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; } - return None; + return Ok(()); } ViewStatus::Failed => { - task.state_mut().ctx.failed_views.insert(view_number); - if task.state_mut().ctx.failed_views.len() > num_failed_views { - task.send_event(GlobalTestEvent::ShutDown).await; - return Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::TooManyFailures { - failed_views: task.state_mut().ctx.failed_views.clone(), - }, - ))); + self.ctx.failed_views.insert(view_number); + if self.ctx.failed_views.len() > num_failed_views { + let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; + self.error = + Some(Box::new(OverallSafetyTaskErr::::TooManyFailures { + failed_views: self.ctx.failed_views.clone(), + })); } - return None; + return Ok(()); } ViewStatus::Err(e) => { - task.send_event(GlobalTestEvent::ShutDown).await; - return Some(HotShotTaskCompleted::Error(Box::new(e))); + let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; + self.error = Some(Box::new(e)); + return Ok(()); } ViewStatus::InProgress => { - return None; + return Ok(()); } } } else if view.check_if_failed(threshold, len) { view.status = ViewStatus::Failed; - task.state_mut().ctx.failed_views.insert(view_number); - if task.state_mut().ctx.failed_views.len() > num_failed_views { - task.send_event(GlobalTestEvent::ShutDown).await; - return Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::TooManyFailures { - failed_views: task.state_mut().ctx.failed_views.clone(), - }, - ))); + self.ctx.failed_views.insert(view_number); + if self.ctx.failed_views.len() > num_failed_views { + let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; + self.error = Some(Box::new(OverallSafetyTaskErr::::TooManyFailures { + failed_views: self.ctx.failed_views.clone(), + })); } - return None; + return Ok(()); } - None + Ok(()) + } + + fn check(&self) -> TestResult { + if let Some(e) = &self.error { + return TestResult::Fail(e.clone()); + } + + let OverallSafetyPropertiesDescription { + check_leaf: _, + check_block: _, + num_failed_views: num_failed_rounds_total, + num_successful_views, + threshold_calculator: _, + transaction_threshold: _, + }: OverallSafetyPropertiesDescription = self.properties.clone(); + + let num_incomplete_views = self.ctx.round_results.len() + - self.ctx.successful_views.len() + - self.ctx.failed_views.len(); + + if self.ctx.successful_views.len() < num_successful_views { + return TestResult::Fail(Box::new(OverallSafetyTaskErr::::NotEnoughDecides { + got: self.ctx.successful_views.len(), + expected: num_successful_views, + })); + } + + if self.ctx.failed_views.len() + num_incomplete_views > num_failed_rounds_total { + return TestResult::Fail(Box::new(OverallSafetyTaskErr::::TooManyFailures { + failed_views: self.ctx.failed_views.clone(), + })); + } + TestResult::Pass } } diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index e5a2c15d60..69c40db865 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -237,6 +237,27 @@ where Box::new(EventPredicate { check, info }) } +pub fn view_sync_timeout() -> Box> +where + TYPES: NodeType, +{ + let info = "ViewSyncTimeout".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| matches!(e.as_ref(), ViewSyncTimeout(..))); + Box::new(EventPredicate { check, info }) +} + +pub fn view_sync_precommit_vote_send() -> Box> +where + TYPES: NodeType, +{ + let info = "ViewSyncPreCommitVoteSend".to_string(); + let check: EventCallback = Arc::new(move |e: Arc>| { + matches!(e.as_ref(), ViewSyncPreCommitVoteSend(..)) + }); + Box::new(EventPredicate { check, info }) +} + pub fn vote_now() -> Box> where TYPES: NodeType, diff --git a/testing/src/script.rs b/testing/src/script.rs index 3edde1b4d9..2721c87846 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::broadcast; use async_compatibility_layer::art::async_timeout; -use hotshot_task::task::{Task, TaskRegistry, TaskState}; +use hotshot_task::task::TaskState; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::NodeType; @@ -10,7 +10,7 @@ use crate::predicates::{Predicate, PredicateResult}; pub const RECV_TIMEOUT: Duration = Duration::from_millis(250); -pub struct TestScriptStage>>> { +pub struct TestScriptStage>> { pub inputs: Vec>, pub outputs: Vec>>>>, pub asserts: Vec>>, @@ -89,22 +89,15 @@ where /// Note: the task is not spawned with an async thread; instead, the harness just calls `handle_event`. /// This has a few implications, e.g. shutting down tasks doesn't really make sense, /// and event ordering is deterministic. -pub async fn run_test_script< - TYPES, - S: TaskState>> + Send + 'static, ->( +pub async fn run_test_script> + Send + 'static>( mut script: TestScript, - state: S, + mut state: S, ) where TYPES: NodeType, { - let registry = Arc::new(TaskRegistry::default()); - let (to_task, mut from_test) = broadcast(1024); let (to_test, mut from_task) = broadcast(1024); - let mut task = Task::new(to_test.clone(), from_test.clone(), registry.clone(), state); - for (stage_number, stage) in script.iter_mut().enumerate() { tracing::debug!("Beginning test stage {}", stage_number); for input in &stage.inputs { @@ -113,13 +106,12 @@ pub async fn run_test_script< .await .expect("Failed to broadcast input message"); - if !task.state_mut().filter(&Arc::new(input.clone())) { - tracing::debug!("Test sent: {:?}", input.clone()); + tracing::debug!("Test sent: {:?}", input.clone()); - if let Some(res) = S::handle_event(input.clone().into(), &mut task).await { - task.state_mut().handle_result(&res).await; - } - } + let _ = state + .handle_event(input.clone().into(), &to_test, &from_test) + .await + .inspect_err(|e| tracing::info!("{e}")); while from_test.try_recv().is_ok() {} } @@ -146,13 +138,12 @@ pub async fn run_test_script< .await .expect("Failed to re-broadcast output message"); - if !task.state_mut().filter(&received_output.clone()) { - tracing::debug!("Test sent: {:?}", received_output.clone()); + tracing::debug!("Test sent: {:?}", received_output.clone()); - if let Some(res) = S::handle_event(received_output.clone(), &mut task).await { - task.state_mut().handle_result(&res).await; - } - } + let _ = state + .handle_event(received_output.clone(), &to_test, &from_test) + .await + .inspect_err(|e| tracing::info!("{e}")); while from_test.try_recv().is_ok() {} @@ -167,7 +158,7 @@ pub async fn run_test_script< } for assert in &mut stage.asserts { - validate_task_state_or_panic(stage_number, task.state(), &**assert).await; + validate_task_state_or_panic(stage_number, &state, &**assert).await; } if let Ok(received_output) = from_task.try_recv() { @@ -177,6 +168,8 @@ pub async fn run_test_script< } pub struct TaskScript { + /// The time to wait on the receiver for this script. + pub timeout: Duration, pub state: S, pub expectations: Vec>, } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 40466bc004..9a99b5fdd1 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -1,12 +1,17 @@ -use std::collections::{BTreeMap, HashMap}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; -use either::{Left, Right}; +use anyhow::Result; +use async_lock::RwLock; +use async_trait::async_trait; +use futures::future::Either::{Left, Right}; use hotshot::{traits::TestableNodeImplementation, types::EventType, HotShotInitializer}; use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; -use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::{ data::Leaf, event::Event, @@ -21,12 +26,14 @@ use hotshot_types::{ }; use snafu::Snafu; -use crate::test_runner::{HotShotTaskCompleted, LateStartNode, Node, TestRunner}; +use crate::{ + test_runner::{LateStartNode, Node, TestRunner}, + test_task::{TestResult, TestTaskState}, +}; + /// convience type for state and block pub type StateAndBlock = (Vec, Vec); -use super::GlobalTestEvent; - /// error for the spinning task #[derive(Snafu, Debug)] pub struct SpinningTaskErr {} @@ -34,7 +41,7 @@ pub struct SpinningTaskErr {} /// Spinning task state pub struct SpinningTask> { /// handle to the nodes - pub(crate) handles: Vec>, + pub(crate) handles: Arc>>>, /// late start nodes pub(crate) late_start: HashMap>, /// time based changes @@ -47,23 +54,7 @@ pub struct SpinningTask> { pub(crate) high_qc: QuorumCertificate, } -impl> TaskState for SpinningTask { - type Event = GlobalTestEvent; - - type Output = HotShotTaskCompleted; - - async fn handle_event(event: Self::Event, _task: &mut Task) -> Option { - if matches!(event, GlobalTestEvent::ShutDown) { - return Some(HotShotTaskCompleted::ShutDown); - } - None - } - - fn should_shutdown(_event: &Self::Event) -> bool { - false - } -} - +#[async_trait] impl< TYPES: NodeType, I: TestableNodeImplementation, @@ -73,21 +64,11 @@ where I: TestableNodeImplementation, I: NodeImplementation>, { - type Message = Event; - - type Output = HotShotTaskCompleted; + type Event = Event; - type State = Self; - - async fn handle_message( - message: Self::Message, - _id: usize, - task: &mut hotshot_task::task::TestTask, - ) -> Option { + async fn handle_event(&mut self, (message, _id): (Self::Event, usize)) -> Result<()> { let Event { view_number, event } = message; - let state = &mut task.state_mut(); - if let EventType::Decide { leaf_chain, qc: _, @@ -95,27 +76,28 @@ where } = event { let leaf = leaf_chain.first().unwrap().leaf.clone(); - if leaf.view_number() > state.last_decided_leaf.view_number() { - state.last_decided_leaf = leaf; + if leaf.view_number() > self.last_decided_leaf.view_number() { + self.last_decided_leaf = leaf; } } else if let EventType::QuorumProposal { proposal, sender: _, } = event { - if proposal.data.justify_qc.view_number() > state.high_qc.view_number() { - state.high_qc = proposal.data.justify_qc; + if proposal.data.justify_qc.view_number() > self.high_qc.view_number() { + self.high_qc = proposal.data.justify_qc.clone(); } } + // if we have not seen this view before - if state.latest_view.is_none() || view_number > state.latest_view.unwrap() { + if self.latest_view.is_none() || view_number > self.latest_view.unwrap() { // perform operations on the nodes - if let Some(operations) = state.changes.remove(&view_number) { + if let Some(operations) = self.changes.remove(&view_number) { for ChangeNode { idx, updown } in operations { match updown { UpDown::Up => { let node_id = idx.try_into().unwrap(); - if let Some(node) = state.late_start.remove(&node_id) { + if let Some(node) = self.late_start.remove(&node_id) { tracing::error!("Node {} spinning up late", idx); let node_id = idx.try_into().unwrap(); let context = match node.context { @@ -124,11 +106,11 @@ where // based on the received leaf. Right((storage, memberships, config)) => { let initializer = HotShotInitializer::::from_reload( - state.last_decided_leaf.clone(), + self.last_decided_leaf.clone(), TestInstanceState {}, None, view_number, - state.high_qc.clone(), + self.high_qc.clone(), Vec::new(), BTreeMap::new(), ); @@ -164,26 +146,26 @@ where networks: node.networks, handle, }; - state.handles.push(node.clone()); - node.handle.hotshot.start_consensus().await; + + self.handles.write().await.push(node); } } UpDown::Down => { - if let Some(node) = state.handles.get_mut(idx) { + if let Some(node) = self.handles.write().await.get_mut(idx) { tracing::error!("Node {} shutting down", idx); node.handle.shut_down().await; } } UpDown::NetworkUp => { - if let Some(handle) = state.handles.get(idx) { + if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks resuming", idx); handle.networks.0.resume(); handle.networks.1.resume(); } } UpDown::NetworkDown => { - if let Some(handle) = state.handles.get(idx) { + if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks pausing", idx); handle.networks.0.pause(); handle.networks.1.pause(); @@ -194,10 +176,14 @@ where } // update our latest view - state.latest_view = Some(view_number); + self.latest_view = Some(view_number); } - None + Ok(()) + } + + fn check(&self) -> TestResult { + TestResult::Pass } } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index c3289297c3..e84baa58c4 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -6,8 +6,11 @@ use std::{ }; use async_broadcast::broadcast; -use either::Either::{self, Left, Right}; -use futures::future::join_all; +use async_lock::RwLock; +use futures::future::{ + join_all, Either, + Either::{Left, Right}, +}; use hotshot::{ traits::TestableNodeImplementation, types::SystemContextHandle, HotShotInitializer, Memberships, SystemContext, @@ -16,7 +19,6 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; -use hotshot_task::task::{Task, TaskRegistry, TestTask}; use hotshot_types::{ consensus::ConsensusMetricsValue, constants::EVENT_CHANNEL_SIZE, @@ -43,76 +45,11 @@ use crate::{ completion_task::CompletionTaskDescription, spinning_task::{ChangeNode, SpinningTask, UpDown}, test_launcher::{Networks, TestLauncher}, + test_task::{TestResult, TestTask}, txn_task::TxnTaskDescription, view_sync_task::ViewSyncTask, }; -/// a node participating in a test -#[derive(Clone)] -pub struct Node> { - /// The node's unique identifier - pub node_id: u64, - /// The underlying networks belonging to the node - pub networks: Networks, - /// The handle to the node's internals - pub handle: SystemContextHandle, -} - -/// Either the node context or the parameters to construct the context for nodes that start late. -pub type LateNodeContext = Either< - Arc>, - ( - >::Storage, - Memberships, - HotShotConfig<::SignatureKey>, - ), ->; - -/// A yet-to-be-started node that participates in tests -pub struct LateStartNode> { - /// The underlying networks belonging to the node - pub networks: Networks, - /// Either the context to which we will use to launch HotShot for initialized node when it's - /// time, or the parameters that will be used to initialize the node and launch HotShot. - pub context: LateNodeContext, -} - -/// The runner of a test network -/// spin up and down nodes, execute rounds -pub struct TestRunner< - TYPES: NodeType, - I: TestableNodeImplementation, - N: ConnectedNetwork, TYPES::SignatureKey>, -> { - /// test launcher, contains a bunch of useful metadata and closures - pub(crate) launcher: TestLauncher, - /// nodes in the test - pub(crate) nodes: Vec>, - /// nodes with a late start - pub(crate) late_start: HashMap>, - /// the next node unique identifier - pub(crate) next_node_id: u64, - /// Phantom for N - pub(crate) _pd: PhantomData, -} - -/// enum describing how the tasks completed -pub enum HotShotTaskCompleted { - /// the task shut down successfully - ShutDown, - /// the task encountered an error - Error(Box), - /// the streams the task was listening for died - StreamsDied, - /// we somehow lost the state - /// this is definitely a bug. - LostState, - /// lost the return value somehow - LostReturnValue, - /// Stream exists but missing handler - MissingHandler, -} - pub trait TaskErr: std::error::Error + Sync + Send + 'static {} impl TaskErr for T {} @@ -131,7 +68,7 @@ where /// if the test fails #[allow(clippy::too_many_lines)] pub async fn run_test>(mut self) { - let (tx, rx) = broadcast(EVENT_CHANNEL_SIZE); + let (test_sender, test_receiver) = broadcast(EVENT_CHANNEL_SIZE); let spinning_changes = self .launcher .metadata @@ -165,8 +102,6 @@ where internal_event_rxs.push(r); } - let reg = Arc::new(TaskRegistry::default()); - let TestRunner { ref launcher, nodes, @@ -178,13 +113,15 @@ where let mut task_futs = vec![]; let meta = launcher.metadata.clone(); + let handles = Arc::new(RwLock::new(nodes)); + let txn_task = if let TxnTaskDescription::RoundRobinTimeBased(duration) = meta.txn_description { let txn_task = TxnTask { - handles: nodes.clone(), + handles: Arc::clone(&handles), next_node_idx: Some(0), duration, - shutdown_chan: rx.clone(), + shutdown_chan: test_receiver.clone(), }; Some(txn_task) } else { @@ -195,9 +132,9 @@ where let CompletionTaskDescription::TimeBasedCompletionTaskBuilder(time_based) = meta.completion_task_description; let completion_task = CompletionTask { - tx: tx.clone(), - rx: rx.clone(), - handles: nodes.clone(), + tx: test_sender.clone(), + rx: test_receiver.clone(), + handles: Arc::clone(&handles), duration: time_based.duration, }; @@ -212,7 +149,7 @@ where } let spinning_task_state = SpinningTask { - handles: nodes.clone(), + handles: Arc::clone(&handles), late_start, latest_view: None, changes, @@ -224,25 +161,24 @@ where ) .await, }; - let spinning_task = TestTask::, SpinningTask>::new( - Task::new(tx.clone(), rx.clone(), reg.clone(), spinning_task_state), + let spinning_task = TestTask::>::new( + spinning_task_state, event_rxs.clone(), + test_receiver.clone(), ); // add safety task let overall_safety_task_state = OverallSafetyTask { - handles: nodes.clone(), + handles: Arc::clone(&handles), ctx: RoundCtx::default(), properties: self.launcher.metadata.overall_safety_properties, + error: None, + test_sender, }; - let safety_task = TestTask::, OverallSafetyTask>::new( - Task::new( - tx.clone(), - rx.clone(), - reg.clone(), - overall_safety_task_state, - ), + let safety_task = TestTask::>::new( + overall_safety_task_state, event_rxs.clone(), + test_receiver.clone(), ); // add view sync task @@ -252,47 +188,55 @@ where _pd: PhantomData, }; - let view_sync_task = TestTask::, ViewSyncTask>::new( - Task::new(tx.clone(), rx.clone(), reg.clone(), view_sync_task_state), + let view_sync_task = TestTask::>::new( + view_sync_task_state, internal_event_rxs, + test_receiver.clone(), ); + let nodes = handles.read().await; + // wait for networks to be ready - for node in &nodes { + for node in &*nodes { node.networks.0.wait_for_ready().await; node.networks.1.wait_for_ready().await; } // Start hotshot - for node in nodes { + for node in &*nodes { if !late_start_nodes.contains(&node.node_id) { node.handle.hotshot.start_consensus().await; } } + + drop(nodes); + task_futs.push(safety_task.run()); task_futs.push(view_sync_task.run()); - if let Some(txn) = txn_task { - task_futs.push(txn.run()); - } - task_futs.push(completion_task.run()); task_futs.push(spinning_task.run()); + + // `generator` tasks that do not process events. + let txn_handle = txn_task.map(|txn| txn.run()); + let completion_handle = completion_task.run(); + let mut error_list = vec![]; #[cfg(async_executor_impl = "async-std")] { let results = join_all(task_futs).await; - tracing::info!("test tasks joined"); + tracing::error!("test tasks joined"); for result in results { match result { - HotShotTaskCompleted::ShutDown => { + TestResult::Pass => { info!("Task shut down successfully"); } - HotShotTaskCompleted::Error(e) => error_list.push(e), - _ => { - panic!("Future impl for task abstraction failed! This should never happen"); - } + TestResult::Fail(e) => error_list.push(e), } } + if let Some(handle) = txn_handle { + handle.cancel().await; + } + completion_handle.cancel().await; } #[cfg(async_executor_impl = "tokio")] @@ -302,28 +246,34 @@ where tracing::error!("test tasks joined"); for result in results { match result { - Ok(res) => { - match res { - HotShotTaskCompleted::ShutDown => { - info!("Task shut down successfully"); - } - HotShotTaskCompleted::Error(e) => error_list.push(e), - _ => { - panic!("Future impl for task abstraction failed! This should never happen"); - } + Ok(res) => match res { + TestResult::Pass => { + info!("Task shut down successfully"); } - } + TestResult::Fail(e) => error_list.push(e), + }, Err(e) => { tracing::error!("Error Joining the test task {:?}", e); } } } + + if let Some(handle) = txn_handle { + handle.abort(); + } + completion_handle.abort(); } assert!( error_list.is_empty(), "TEST FAILED! Results: {error_list:?}" ); + + let mut nodes = handles.write().await; + + for node in &mut *nodes { + node.handle.shut_down().await; + } } /// Add nodes. @@ -379,7 +329,6 @@ where let networks = (self.launcher.resource_generator.channel_generator)(node_id).await; let storage = (self.launcher.resource_generator.storage)(node_id); - // Create a future that waits for the networks to be ready let network0 = networks.0.clone(); let network1 = networks.1.clone(); let networks_ready_future = async move { @@ -387,7 +336,6 @@ where network1.wait_for_ready().await; }; - // Collect it so we can wait for all networks to be ready before starting the tasks networks_ready.push(networks_ready_future); if self.launcher.metadata.skip_late && late_start.contains(&node_id) { @@ -494,3 +442,51 @@ where .expect("Could not init hotshot") } } + +/// a node participating in a test +pub struct Node> { + /// The node's unique identifier + pub node_id: u64, + /// The underlying networks belonging to the node + pub networks: Networks, + /// The handle to the node's internals + pub handle: SystemContextHandle, +} + +/// Either the node context or the parameters to construct the context for nodes that start late. +pub type LateNodeContext = Either< + Arc>, + ( + >::Storage, + Memberships, + HotShotConfig<::SignatureKey>, + ), +>; + +/// A yet-to-be-started node that participates in tests +pub struct LateStartNode> { + /// The underlying networks belonging to the node + pub networks: Networks, + /// Either the context to which we will use to launch HotShot for initialized node when it's + /// time, or the parameters that will be used to initialize the node and launch HotShot. + pub context: LateNodeContext, +} + +/// The runner of a test network +/// spin up and down nodes, execute rounds +pub struct TestRunner< + TYPES: NodeType, + I: TestableNodeImplementation, + N: ConnectedNetwork, TYPES::SignatureKey>, +> { + /// test launcher, contains a bunch of useful metadata and closures + pub(crate) launcher: TestLauncher, + /// nodes in the test + pub(crate) nodes: Vec>, + /// nodes with a late start + pub(crate) late_start: HashMap>, + /// the next node unique identifier + pub(crate) next_node_id: u64, + /// Phantom for N + pub(crate) _pd: PhantomData, +} diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs new file mode 100644 index 0000000000..8346eec0cf --- /dev/null +++ b/testing/src/test_task.rs @@ -0,0 +1,137 @@ +use std::{sync::Arc, time::Duration}; + +use anyhow::Result; +use async_broadcast::{Receiver, Sender}; +use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::{spawn, JoinHandle}; +use async_trait::async_trait; +use futures::future::select_all; +use hotshot_task_impls::{events::HotShotEvent, network::NetworkMessageTaskState}; +use hotshot_types::{ + message::{Message, Messages}, + traits::{network::ConnectedNetwork, node_implementation::NodeType}, +}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::{spawn, JoinHandle}; +use tracing::error; + +/// enum describing how the tasks completed +pub enum TestResult { + /// the test task passed + Pass, + /// the test task failed with an error + Fail(Box), +} + +#[async_trait] +/// Type for mutable task state that can be used as the state for a `Task` +pub trait TestTaskState: Send { + /// Type of event sent and received by the task + type Event: Clone + Send + Sync; + + /// Handles an event from one of multiple receivers. + async fn handle_event(&mut self, (event, id): (Self::Event, usize)) -> Result<()>; + + /// Check the result of the test. + fn check(&self) -> TestResult; +} + +/// A basic task which loops waiting for events to come from `event_receiver` +/// and then handles them using it's state +/// It sends events to other `Task`s through `event_sender` +/// This should be used as the primary building block for long running +/// or medium running tasks (i.e. anything that can't be described as a dependency task) +pub struct TestTask { + /// The state of the task. It is fed events from `event_sender` + /// and mutates it state ocordingly. Also it signals the task + /// if it is complete/should shutdown + state: S, + /// Receives events that are broadcast from any task, including itself + receivers: Vec>, + /// Receiver for test events, used for communication between test tasks. + test_receiver: Receiver, +} + +#[derive(Clone, Debug)] +pub enum TestEvent { + Shutdown, +} + +impl TestTask { + /// Create a new task + pub fn new( + state: S, + receivers: Vec>, + test_receiver: Receiver, + ) -> Self { + TestTask { + state, + receivers, + test_receiver, + } + } + + /// Spawn the task loop, consuming self. Will continue until + /// the task reaches some shutdown condition + pub fn run(mut self) -> JoinHandle { + spawn(async move { + loop { + if let Ok(TestEvent::Shutdown) = self.test_receiver.try_recv() { + break self.state.check(); + } + + let mut messages = Vec::new(); + + for receiver in &mut self.receivers { + messages.push(receiver.recv()); + } + + if let Ok((Ok(input), id, _)) = + async_timeout(Duration::from_millis(50), select_all(messages)).await + { + let _ = S::handle_event(&mut self.state, (input, id)) + .await + .inspect_err(|e| tracing::error!("{e}")); + } + } + }) + } +} + +/// Add the network task to handle messages and publish events. +pub async fn add_network_message_test_task< + TYPES: NodeType, + NET: ConnectedNetwork, TYPES::SignatureKey>, +>( + event_stream: Sender>>, + channel: Arc, +) -> JoinHandle<()> { + let net = Arc::clone(&channel); + let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { + event_stream: event_stream.clone(), + }; + + let network = Arc::clone(&net); + let mut state = network_state.clone(); + + async_spawn(async move { + loop { + let msgs = match network.recv_msgs().await { + Ok(msgs) => Messages(msgs), + Err(err) => { + error!("failed to receive messages: {err}"); + + // return zero messages so we sleep and try again + Messages(vec![]) + } + }; + if msgs.0.is_empty() { + // TODO: Stop sleeping here: https://github.com/EspressoSystems/HotShot/issues/2558 + async_sleep(Duration::from_millis(100)).await; + } else { + state.handle_messages(msgs.0).await; + } + } + }) +} diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index dcf72dcfb5..2dfabc0446 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -1,7 +1,8 @@ -use std::time::Duration; +use std::{sync::Arc, time::Duration}; -use async_broadcast::{Receiver, TryRecvError}; +use async_broadcast::Receiver; use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use hotshot::traits::TestableNodeImplementation; @@ -11,8 +12,7 @@ use snafu::Snafu; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use super::GlobalTestEvent; -use crate::test_runner::{HotShotTaskCompleted, Node}; +use crate::{test_runner::Node, test_task::TestEvent}; // the obvious idea here is to pass in a "stream" that completes every `n` seconds // the stream construction can definitely be fancier but that's the baseline idea @@ -25,29 +25,23 @@ pub struct TxnTaskErr {} pub struct TxnTask> { // TODO should this be in a rwlock? Or maybe a similar abstraction to the registry is in order /// Handles for all nodes. - pub handles: Vec>, + pub handles: Arc>>>, /// Optional index of the next node. pub next_node_idx: Option, /// time to wait between txns pub duration: Duration, /// Receiver for the shutdown signal from the testing harness - pub shutdown_chan: Receiver, + pub shutdown_chan: Receiver, } impl> TxnTask { - pub fn run(mut self) -> JoinHandle { + pub fn run(mut self) -> JoinHandle<()> { async_spawn(async move { async_sleep(Duration::from_millis(100)).await; loop { async_sleep(self.duration).await; - match self.shutdown_chan.try_recv() { - Ok(_event) => { - return HotShotTaskCompleted::ShutDown; - } - Err(TryRecvError::Empty) => {} - Err(_) => { - return HotShotTaskCompleted::StreamsDied; - } + if let Ok(TestEvent::Shutdown) = self.shutdown_chan.try_recv() { + break; } self.submit_tx().await; } @@ -55,10 +49,11 @@ impl> TxnTask { } async fn submit_tx(&mut self) { if let Some(idx) = self.next_node_idx { + let handles = &self.handles.read().await; // submit to idx handle // increment state - self.next_node_idx = Some((idx + 1) % self.handles.len()); - match self.handles.get(idx) { + self.next_node_idx = Some((idx + 1) % handles.len()); + match handles.get(idx) { None => { tracing::error!("couldn't get node in txn task"); // should do error diff --git a/testing/src/view_sync_task.rs b/testing/src/view_sync_task.rs index 648d840dc3..9a885beec8 100644 --- a/testing/src/view_sync_task.rs +++ b/testing/src/view_sync_task.rs @@ -1,11 +1,12 @@ use std::{collections::HashSet, marker::PhantomData, sync::Arc}; -use hotshot_task::task::{Task, TaskState, TestTaskState}; +use anyhow::Result; +use async_trait::async_trait; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplementation}; use snafu::Snafu; -use crate::{test_runner::HotShotTaskCompleted, GlobalTestEvent}; +use crate::test_task::{TestResult, TestTaskState}; /// `ViewSync` Task error #[derive(Snafu, Debug, Clone)] @@ -24,49 +25,15 @@ pub struct ViewSyncTask> { pub(crate) _pd: PhantomData<(TYPES, I)>, } -impl> TaskState for ViewSyncTask { - type Event = GlobalTestEvent; - - type Output = HotShotTaskCompleted; - - async fn handle_event(event: Self::Event, task: &mut Task) -> Option { - let state = task.state_mut(); - match event { - GlobalTestEvent::ShutDown => match state.description.clone() { - ViewSyncTaskDescription::Threshold(min, max) => { - let num_hits = state.hit_view_sync.len(); - if min <= num_hits && num_hits <= max { - Some(HotShotTaskCompleted::ShutDown) - } else { - Some(HotShotTaskCompleted::Error(Box::new(ViewSyncTaskErr { - hit_view_sync: state.hit_view_sync.clone(), - }))) - } - } - }, - } - } - - fn should_shutdown(_event: &Self::Event) -> bool { - false - } -} - +#[async_trait] impl> TestTaskState for ViewSyncTask { - type Message = Arc>; + type Event = Arc>; - type Output = HotShotTaskCompleted; - - type State = Self; - - async fn handle_message( - message: Self::Message, - id: usize, - task: &mut hotshot_task::task::TestTask, - ) -> Option { - match message.as_ref() { + /// Handles an event from one of multiple receivers. + async fn handle_event(&mut self, (event, id): (Self::Event, usize)) -> Result<()> { + match event.as_ref() { // all the view sync events HotShotEvent::ViewSyncTimeout(_, _, _) | HotShotEvent::ViewSyncPreCommitVoteRecv(_) @@ -82,11 +49,27 @@ impl> TestTaskState | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) | HotShotEvent::ViewSyncTrigger(_) => { - task.state_mut().hit_view_sync.insert(id); + self.hit_view_sync.insert(id); } _ => (), } - None + + Ok(()) + } + + fn check(&self) -> TestResult { + match self.description.clone() { + ViewSyncTaskDescription::Threshold(min, max) => { + let num_hits = self.hit_view_sync.len(); + if min <= num_hits && num_hits <= max { + TestResult::Pass + } else { + TestResult::Fail(Box::new(ViewSyncTaskErr { + hit_view_sync: self.hit_view_sync.clone(), + })) + } + } + } } } diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 73d9279bf7..34c86e38d5 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -11,13 +11,14 @@ use hotshot_example_types::{ }; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ + helpers::{ + build_system_handle, key_pair_for_id, permute_input_with_index_order, + vid_scheme_from_view_number, vid_share, + }, predicates::event::{ exact, quorum_proposal_send, quorum_proposal_validated, quorum_vote_send, timeout_vote_send, }, script::{run_test_script, TestScriptStage}, - helpers::{ - build_system_handle, vid_share, key_pair_for_id, vid_scheme_from_view_number, permute_input_with_index_order - }, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -121,8 +122,8 @@ async fn test_consensus_vote() { use hotshot::tasks::task_state::CreateTaskState; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ - script::{run_test_script, TestScriptStage}, helpers::build_system_handle, + script::{run_test_script, TestScriptStage}, view_generator::TestViewGenerator, }; diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 178a8c187d..992e3fd2b9 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -1,16 +1,16 @@ use std::sync::Arc; use futures::StreamExt; -use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; +use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, }; use hotshot_task_impls::{da::DaTaskState, events::HotShotEvent::*}; use hotshot_testing::{ + helpers::build_system_handle, predicates::event::exact, script::{run_test_script, TestScriptStage}, - helpers::build_system_handle, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -94,7 +94,7 @@ async fn test_da_task() { asserts: vec![], }; - let da_state = DaTaskState::>::create_from(&handle).await; + let da_state = DaTaskState::::create_from(&handle).await; let stages = vec![view_1, view_2]; run_test_script(stages, da_state).await; @@ -181,7 +181,7 @@ async fn test_da_task_storage_failure() { asserts: vec![], }; - let da_state = DaTaskState::>::create_from(&handle).await; + let da_state = DaTaskState::::create_from(&handle).await; let stages = vec![view_1, view_2, view_3]; run_test_script(stages, da_state).await; diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index ef3ce59143..ccb591b047 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -2,14 +2,17 @@ use std::{sync::Arc, time::Duration}; use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; -use hotshot::{tasks::add_network_message_task, traits::implementations::MemoryNetwork}; +use hotshot::traits::implementations::MemoryNetwork; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_task::task::{Task, TaskRegistry}; +use hotshot_task::task::{ConsensusTaskRegistry, Task}; use hotshot_task_impls::{ events::HotShotEvent, network::{self, NetworkEventTaskState}, }; -use hotshot_testing::{test_builder::TestDescription, view_generator::TestViewGenerator}; +use hotshot_testing::{ + test_builder::TestDescription, test_task::add_network_message_test_task, + view_generator::TestViewGenerator, +}; use hotshot_types::{ constants::BASE_VERSION, data::ViewNumber, @@ -59,16 +62,16 @@ async fn test_network_task() { storage, }; let (tx, rx) = async_broadcast::broadcast(10); - let task_reg = Arc::new(TaskRegistry::default()); + let mut task_reg = ConsensusTaskRegistry::new(); - let task = Task::new(tx.clone(), rx, task_reg.clone(), network_state); - task_reg.run_task(task).await; + let task = Task::new(network_state, tx.clone(), rx); + task_reg.run_task(task); let mut generator = TestViewGenerator::generate(membership.clone(), membership); let view = generator.next().await.unwrap(); let (out_tx, mut out_rx) = async_broadcast::broadcast(10); - add_network_message_task(task_reg, out_tx.clone(), channel.clone()).await; + add_network_message_test_task(out_tx.clone(), channel.clone()).await; tx.broadcast_direct(Arc::new(HotShotEvent::QuorumProposalSend( view.quorum_proposal, @@ -124,16 +127,16 @@ async fn test_network_storage_fail() { storage, }; let (tx, rx) = async_broadcast::broadcast(10); - let task_reg = Arc::new(TaskRegistry::default()); + let mut task_reg = ConsensusTaskRegistry::new(); - let task = Task::new(tx.clone(), rx, task_reg.clone(), network_state); - task_reg.run_task(task).await; + let task = Task::new(network_state, tx.clone(), rx); + task_reg.run_task(task); let mut generator = TestViewGenerator::generate(membership.clone(), membership); let view = generator.next().await.unwrap(); let (out_tx, mut out_rx) = async_broadcast::broadcast(10); - add_network_message_task(task_reg, out_tx.clone(), channel.clone()).await; + add_network_message_test_task(out_tx.clone(), channel.clone()).await; tx.broadcast_direct(Arc::new(HotShotEvent::QuorumProposalSend( view.quorum_proposal, diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs index 4ca6f36225..3faf7b470d 100644 --- a/testing/tests/tests_1/proposal_ordering.rs +++ b/testing/tests/tests_1/proposal_ordering.rs @@ -10,8 +10,8 @@ use hotshot_example_types::{ }; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ + helpers::{permute_input_with_index_order, vid_scheme_from_view_number, vid_share}, predicates::event::{all_predicates, exact, quorum_proposal_send, quorum_proposal_validated}, - helpers::{vid_share, vid_scheme_from_view_number, permute_input_with_index_order}, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -29,8 +29,8 @@ async fn test_ordering_with_specific_order(input_permutation: Vec) { use futures::StreamExt; use hotshot_example_types::state_types::TestValidatedState; use hotshot_testing::{ - script::{run_test_script, TestScriptStage}, helpers::build_system_handle, + script::{run_test_script, TestScriptStage}, }; async_compatibility_layer::logging::setup_logging(); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 19fecf5e0d..9b8a37a529 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,5 +1,7 @@ #![cfg(feature = "dependency-tasks")] +use std::sync::Arc; + use committable::Committable; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ @@ -38,7 +40,6 @@ use hotshot_types::{ }; use jf_vid::VidScheme; use sha2::Digest; -use std::sync::Arc; fn make_payload_commitment( membership: &::Membership, diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 94b88b0177..35d7094bcc 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -15,9 +15,9 @@ use hotshot_task_impls::{ consensus::ConsensusTaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState, }; use hotshot_testing::{ + helpers::vid_share, predicates::{event::*, upgrade::*}, script::{Expectations, TaskScript}, - helpers::vid_share, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -33,8 +33,8 @@ use vbs::version::Version; /// Tests that we correctly update our internal consensus state when reaching a decided upgrade certificate. async fn test_consensus_task_upgrade() { use hotshot_testing::{ - script::{run_test_script, TestScriptStage}, helpers::build_system_handle, + script::{run_test_script, TestScriptStage}, }; async_compatibility_layer::logging::setup_logging(); @@ -118,9 +118,11 @@ async fn test_consensus_task_upgrade() { ], outputs: vec![ exact(ViewChange(ViewNumber::new(3))), - quorum_proposal_validated(), - leaf_decided(), - exact(QuorumVoteSend(votes[2].clone())), + all_predicates(vec![ + quorum_proposal_validated(), + leaf_decided(), + exact(QuorumVoteSend(votes[2].clone())), + ]), ], asserts: vec![no_decided_upgrade_cert()], }; @@ -133,9 +135,11 @@ async fn test_consensus_task_upgrade() { ], outputs: vec![ exact(ViewChange(ViewNumber::new(4))), - quorum_proposal_validated(), - leaf_decided(), - exact(QuorumVoteSend(votes[3].clone())), + all_predicates(vec![ + quorum_proposal_validated(), + leaf_decided(), + exact(QuorumVoteSend(votes[3].clone())), + ]), ], asserts: vec![no_decided_upgrade_cert()], }; @@ -144,9 +148,7 @@ async fn test_consensus_task_upgrade() { inputs: vec![QuorumProposalRecv(proposals[4].clone(), leaders[4])], outputs: vec![ exact(ViewChange(ViewNumber::new(5))), - quorum_proposal_validated(), - upgrade_decided(), - leaf_decided(), + all_predicates(vec![quorum_proposal_validated(), upgrade_decided(), leaf_decided()]), ], asserts: vec![decided_upgrade_cert()], }; @@ -225,12 +227,7 @@ async fn test_upgrade_and_consensus_task() { .map(|h| views[2].create_upgrade_vote(upgrade_data.clone(), &h.0)); let consensus_state = ConsensusTaskState::::create_from(&handle).await; - let mut upgrade_state = UpgradeTaskState::< - TestTypes, - MemoryImpl, - SystemContextHandle, - >::create_from(&handle) - .await; + let mut upgrade_state = UpgradeTaskState::::create_from(&handle).await; upgrade_state.should_vote = |_| true; @@ -261,7 +258,8 @@ async fn test_upgrade_and_consensus_task() { ], ]; - let consensus_script = TaskScript { + let mut consensus_script = TaskScript { + timeout: Duration::from_millis(35), state: consensus_state, expectations: vec![ Expectations { @@ -291,7 +289,8 @@ async fn test_upgrade_and_consensus_task() { ], }; - let upgrade_script = TaskScript { + let mut upgrade_script = TaskScript { + timeout: Duration::from_millis(35), state: upgrade_state, expectations: vec![ Expectations { @@ -313,7 +312,7 @@ async fn test_upgrade_and_consensus_task() { ], }; - test_scripts![inputs, consensus_script, upgrade_script]; + test_scripts![inputs, consensus_script, upgrade_script].await; } #[cfg(not(feature = "dependency-tasks"))] @@ -418,12 +417,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { } let consensus_state = ConsensusTaskState::::create_from(&handle).await; - let mut upgrade_state = UpgradeTaskState::< - TestTypes, - MemoryImpl, - SystemContextHandle, - >::create_from(&handle) - .await; + let mut upgrade_state = UpgradeTaskState::::create_from(&handle).await; upgrade_state.should_vote = |_| true; @@ -507,7 +501,8 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], ]; - let consensus_script = TaskScript { + let mut consensus_script = TaskScript { + timeout: Duration::from_millis(35), state: consensus_state, expectations: vec![ Expectations { @@ -571,7 +566,8 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], }; - let upgrade_script = TaskScript { + let mut upgrade_script = TaskScript { + timeout: Duration::from_millis(35), state: upgrade_state, expectations: vec![ Expectations { @@ -605,5 +601,5 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ], }; - test_scripts![inputs, consensus_script, upgrade_script]; + test_scripts![inputs, consensus_script, upgrade_script].await; } diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index e78418c045..0375385089 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -8,9 +8,9 @@ use hotshot_example_types::{ }; use hotshot_task_impls::{events::HotShotEvent::*, vid::VidTaskState}; use hotshot_testing::{ + helpers::{build_system_handle, vid_scheme_from_view_number}, predicates::event::exact, script::{run_test_script, TestScriptStage}, - helpers::{build_system_handle, vid_scheme_from_view_number}, }; use hotshot_types::{ data::{null_block, DaProposal, VidDisperse, ViewNumber}, diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index 811f74b675..72c2cdfbea 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -1,4 +1,4 @@ -use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; +use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{ events::HotShotEvent, harness::run_harness, view_sync::ViewSyncTaskState, @@ -44,11 +44,6 @@ async fn test_view_sync_task() { output.push(HotShotEvent::ViewChange(ViewNumber::new(2))); output.push(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone())); - let view_sync_state = ViewSyncTaskState::< - TestTypes, - MemoryImpl, - SystemContextHandle, - >::create_from(&handle) - .await; + let view_sync_state = ViewSyncTaskState::::create_from(&handle).await; run_harness(input, output, view_sync_state, false).await; } diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 9f2a250a41..a11fb4a0e9 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -12,6 +12,7 @@ async fn test_catchup() { spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestDescription, TimingData}, }; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index bfcf2d3de0..01248b55d0 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -245,7 +245,6 @@ pub trait ConnectedNetwork: async fn wait_for_ready(&self); /// Blocks until the network is shut down - /// then returns true fn shut_down<'a, 'b>(&'a self) -> BoxSyncFuture<'b, ()> where 'a: 'b, From 0c07616f8f08ea748e56fb863e7460eb918355b2 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Thu, 30 May 2024 08:56:56 +0200 Subject: [PATCH 1056/1393] Don't send `ResponseMessage::NotFound` right away, wait for txns (#3231) * Don't send `ResponseMessage::NotFound` right away, wait for txns * Lints * Lint * Lock `Consensus` for writes only when it's really needed * Lints --- task-impls/src/da.rs | 10 ++----- task-impls/src/response.rs | 58 +++++++++++++++++++++++++++++--------- types/src/consensus.rs | 14 +++++++-- 3 files changed, 58 insertions(+), 24 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 1c4e596406..7f4ccdad9a 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -9,7 +9,7 @@ use async_std::task::spawn_blocking; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::{Consensus, View}, + consensus::{Consensus, LockedConsensusState, View}, data::DaProposal, event::{Event, EventType}, message::Proposal, @@ -51,7 +51,7 @@ pub struct DaTaskState> { pub cur_view: TYPES::Time, /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>, + pub consensus: LockedConsensusState, /// Membership for the DA committee pub da_membership: Arc, @@ -223,11 +223,7 @@ impl> DaTaskState { let membership = Arc::clone(&self.quorum_membership); let pk = self.private_key.clone(); async_spawn(async move { - consensus - .write() - .await - .calculate_and_update_vid(view, membership, &pk) - .await; + Consensus::calculate_and_update_vid(consensus, view, membership, &pk).await; }); } } diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index ee25dbdf93..cdc26639ee 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -1,14 +1,14 @@ use std::sync::Arc; +use std::time::Duration; use async_broadcast::Receiver; -use async_compatibility_layer::art::async_spawn; -use async_lock::{RwLock, RwLockUpgradableReadGuard}; +use async_compatibility_layer::art::{async_sleep, async_spawn}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use futures::{channel::mpsc, FutureExt, StreamExt}; use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ - consensus::Consensus, + consensus::{Consensus, LockedConsensusState}, data::VidDisperseShare, message::{DaConsensusMessage, DataMessage, Message, MessageKind, Proposal, SequencingMessage}, traits::{ @@ -25,12 +25,12 @@ use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; use crate::events::HotShotEvent; -/// Type alias for consensus state wrapped in a lock. -type LockedConsensusState = Arc>>; - /// Type alias for the channel that we receive requests from the network on. pub type RequestReceiver = mpsc::Receiver<(Message, ResponseChannel>)>; +/// Time to wait for txns before sending `ResponseMessage::NotFound` +const TXNS_TIMEOUT: Duration = Duration::from_millis(100); + /// Task state for the Network Request Task. The task is responsible for handling /// requests sent to this node by the network. It will validate the sender, /// parse the request, and try to find the data request in the consensus stores. @@ -124,19 +124,49 @@ impl NetworkResponseState { view: TYPES::Time, key: &TYPES::SignatureKey, ) -> Option>> { - let consensus = self.consensus.upgradable_read().await; - let contained = consensus + let contained = self + .consensus + .read() + .await .vid_shares() .get(&view) .is_some_and(|m| m.contains_key(key)); if !contained { - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - consensus - .calculate_and_update_vid(view, Arc::clone(&self.quorum), &self.private_key) - .await; - return consensus.vid_shares().get(&view)?.get(key).cloned(); + if Consensus::calculate_and_update_vid( + Arc::clone(&self.consensus), + view, + Arc::clone(&self.quorum), + &self.private_key, + ) + .await + .is_none() + { + // Sleep in hope we receive txns in the meantime + async_sleep(TXNS_TIMEOUT).await; + Consensus::calculate_and_update_vid( + Arc::clone(&self.consensus), + view, + Arc::clone(&self.quorum), + &self.private_key, + ) + .await?; + } + return self + .consensus + .read() + .await + .vid_shares() + .get(&view)? + .get(key) + .cloned(); } - consensus.vid_shares().get(&view)?.get(key).cloned() + self.consensus + .read() + .await + .vid_shares() + .get(&view)? + .get(key) + .cloned() } /// Handle the request contained in the message. Returns the response we should send diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 79d24f418b..42f29ebaaf 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -6,6 +6,7 @@ use std::{ }; use anyhow::{ensure, Result}; +use async_lock::{RwLock, RwLockUpgradableReadGuard}; use committable::{Commitment, Committable}; use tracing::{debug, error}; @@ -35,6 +36,9 @@ pub type VidShares = BTreeMap< HashMap<::SignatureKey, Proposal>>, >; +/// Type alias for consensus state wrapped in a lock. +pub type LockedConsensusState = Arc>>; + /// A reference to the consensus algorithm /// /// This will contain the state of all rounds. @@ -476,22 +480,26 @@ impl Consensus { .expect("Decided state not found! Consensus internally inconsistent") } + /// Associated helper function: + /// Takes `LockedConsensusState` which will be updated; locks it for read and write accordingly. /// Calculates `VidDisperse` based on the view, the txns and the membership, /// and updates `vid_shares` map with the signed `VidDisperseShare` proposals. /// Returned `Option` indicates whether the update has actually happened or not. pub async fn calculate_and_update_vid( - &mut self, + consensus: LockedConsensusState, view: ::Time, membership: Arc, private_key: &::PrivateKey, ) -> Option<()> { - let txns = self.saved_payloads().get(&view)?; + let consensus = consensus.upgradable_read().await; + let txns = consensus.saved_payloads().get(&view)?; let vid = VidDisperse::calculate_vid_disperse(Arc::clone(txns), &membership, view, None).await; let shares = VidDisperseShare::from_vid_disperse(vid); + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; for share in shares { if let Some(prop) = share.to_proposal(private_key) { - self.update_vid_shares(view, prop); + consensus.update_vid_shares(view, prop); } } Some(()) From a87e4e8fbe15b663557b179003665ef0d49dc968 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 30 May 2024 08:38:44 -0600 Subject: [PATCH 1057/1393] [Dependency Tasks] Fix Test Build, Update State in Vote Task (#3233) * fix simple build failures, test inconsistencies, and lack of state updates * remove logs * lint * fix inconsistent view number * remove erroneous view decrement * PR feedback, drop held lock * use single data parameter * clippy * new view number * remove comment about upgrades * remove useless if statement * clippy --- hotshot/src/tasks/mod.rs | 7 + hotshot/src/tasks/task_state.rs | 1 + task-impls/src/consensus/helpers.rs | 6 - task-impls/src/consensus/mod.rs | 1 - task-impls/src/da.rs | 36 ++-- .../src/quorum_proposal/dependency_handle.rs | 4 +- task-impls/src/quorum_proposal/mod.rs | 12 +- .../src/quorum_proposal_recv/handlers.rs | 21 +- task-impls/src/quorum_vote.rs | 190 ++++++++++++++---- testing/src/predicates/event.rs | 11 + testing/tests/tests_1/da_task.rs | 3 +- .../tests_1/quorum_proposal_recv_task.rs | 3 +- testing/tests/tests_1/quorum_proposal_task.rs | 71 ++++--- testing/tests/tests_1/quorum_vote_task.rs | 14 +- 14 files changed, 277 insertions(+), 103 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 7e50849795..3145524422 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -19,6 +19,13 @@ use hotshot_task_impls::{ vid::VidTaskState, view_sync::ViewSyncTaskState, }; + +#[cfg(feature = "dependency-tasks")] +use hotshot_task_impls::{ + consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, + quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, +}; + use hotshot_types::{ constants::{Version01, VERSION_0_1}, message::{Message, Messages}, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index a07887d297..73be2c31a4 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -229,6 +229,7 @@ impl> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, storage: Arc::clone(&handle.storage), + version: *handle.hotshot.version.read().await, } } } diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index ca3a806ceb..65ea9f7c57 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -848,12 +848,6 @@ pub async fn handle_quorum_proposal_validated> DaTaskState { #[cfg(async_executor_impl = "tokio")] let payload_commitment = payload_commitment.unwrap(); - let view = proposal.data.view_number(); + let view_number = proposal.data.view_number(); // Generate and send vote let Ok(vote) = DaVote::create_signed_vote( DaData { payload_commit: payload_commitment, }, - view, + view_number, &self.public_key, &self.private_key, ) else { @@ -202,19 +202,23 @@ impl> DaTaskState { // Ensure this view is in the view map for garbage collection, but do not overwrite if // there is already a view there: the replica task may have inserted a `Leaf` view which // contains strictly more information. - if !consensus.validated_state_map().contains_key(&view) { - consensus.update_validated_state_map( - view, - View { - view_inner: ViewInner::Da { payload_commitment }, - }, - ); + if !consensus.validated_state_map().contains_key(&view_number) { + let view = View { + view_inner: ViewInner::Da { payload_commitment }, + }; + consensus.update_validated_state_map(view_number, view.clone()); + broadcast_event( + HotShotEvent::ValidatedStateUpdated(view_number, view).into(), + &event_stream, + ) + .await; } // Record the payload we have promised to make available. - if let Err(e) = consensus - .update_saved_payloads(view, Arc::clone(&proposal.data.encoded_transactions)) - { + if let Err(e) = consensus.update_saved_payloads( + view_number, + Arc::clone(&proposal.data.encoded_transactions), + ) { tracing::trace!("{e:?}"); } // Optimistically calculate and update VID if we know that the primary network is down. @@ -223,7 +227,13 @@ impl> DaTaskState { let membership = Arc::clone(&self.quorum_membership); let pk = self.private_key.clone(); async_spawn(async move { - Consensus::calculate_and_update_vid(consensus, view, membership, &pk).await; + Consensus::calculate_and_update_vid( + consensus, + view_number, + membership, + &pk, + ) + .await; }); } } diff --git a/task-impls/src/quorum_proposal/dependency_handle.rs b/task-impls/src/quorum_proposal/dependency_handle.rs index 5d00baabd5..7c8093f4f6 100644 --- a/task-impls/src/quorum_proposal/dependency_handle.rs +++ b/task-impls/src/quorum_proposal/dependency_handle.rs @@ -40,7 +40,7 @@ pub(crate) enum ProposalDependency { TimeoutCert, /// For the `QuroumProposalValidated` event after validating `QuorumProposalRecv` or the - /// `LivenessCheckProposalRecv` event during the liveness check in `QuorumProposalRecv`. + /// `QuorumProposalLivenessValidated` event during the liveness check in `QuorumProposalRecv`. Proposal, /// For the `VidShareValidated` event. @@ -212,7 +212,7 @@ impl HandleDepOutput for ProposalDependencyHandle { vid_share = Some(share.clone()); } _ => { - // LivenessCheckProposalRecv and QuorumProposalValidated are implicitly + // QuorumProposalLivenessValidated and QuorumProposalValidated are implicitly // handled here. } } diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index b4931c1082..b649d234d5 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -282,13 +282,15 @@ impl> QuorumProposalTaskState 1 { + primary_deps.push(validated_state_update_dependency); + } + AndDependency::from_deps(vec![OrDependency::from_deps(vec![ AndDependency::from_deps(vec![ - OrDependency::from_deps(vec![AndDependency::from_deps(vec![ - payload_commitment_dependency, - vid_share_dependency, - validated_state_update_dependency, - ])]), + OrDependency::from_deps(vec![AndDependency::from_deps(primary_deps)]), OrDependency::from_deps(secondary_deps), ]), ])]) diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 3b488132ff..4613b60178 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -199,12 +199,17 @@ pub(crate) async fn handle_quorum_proposal_recv> { view_number: TYPES::Time, /// Event sender. sender: Sender>>, + /// The current version of HotShot + version: Version, +} + +impl + 'static> VoteDependencyHandle { + /// Updates the shared consensus state with the new voting data. + async fn update_shared_state( + &self, + proposed_leaf: &Leaf, + vid_share: &Proposal>, + ) -> Result<()> { + let consensus_reader = self.consensus.read().await; + let justify_qc = &proposed_leaf.justify_qc(); + + // Justify qc's leaf commitment should be the same as the parent's leaf commitment. + let parent = consensus_reader + .saved_leaves() + .get(&justify_qc.date().leaf_commit) + .cloned() + .context(format!( + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.date().leaf_commit, + proposed_leaf.view_number(), + ))?; + + let (Some(parent_state), _) = consensus_reader.state_and_delta(parent.view_number()) else { + bail!("Parent state not found! Consensus internally inconsistent") + }; + + drop(consensus_reader); + + let (validated_state, state_delta) = parent_state + .validate_and_apply_header( + &self.instance_state, + &parent, + &proposed_leaf.block_header().clone(), + vid_share.data.common.clone(), + self.version, + ) + .await + .context("Block header doesn't extend the proposal!")?; + + let state = Arc::new(validated_state); + let delta = Arc::new(state_delta); + + // Now that we've rounded everyone up, we need to update the shared state and broadcast our events. + // We will defer broadcast until all states are updated to avoid holding onto the lock during a network call. + let mut consensus_writer = self.consensus.write().await; + + let view = View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state: Arc::clone(&state), + delta: Some(Arc::clone(&delta)), + }, + }; + consensus_writer.update_validated_state_map(proposed_leaf.view_number(), view.clone()); + consensus_writer.update_saved_leaves(proposed_leaf.clone()); + + // Kick back our updated structures for downstream usage. + let new_leaves = consensus_writer.saved_leaves().clone(); + let new_state = consensus_writer.validated_state_map().clone(); + drop(consensus_writer); + + // Broadcast now that the lock is dropped. + broadcast_event( + HotShotEvent::ValidatedStateUpdated(proposed_leaf.view_number(), view).into(), + &self.sender, + ) + .await; + + // Send the new state up to the sequencer. + self.storage + .write() + .await + .update_undecided_state(new_leaves, new_state) + .await?; + + Ok(()) + } + + /// Submits the `QuorumVoteSend` event if all the dependencies are met. + async fn submit_vote( + &self, + leaf: Leaf, + vid_share: Proposal>, + ) -> Result<()> { + ensure!( + self.quorum_membership.has_stake(&self.public_key), + format!( + "We were not chosen for quorum committee on {:?}", + self.view_number + ), + ); + + // Create and send the vote. + let vote = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(), + }, + self.view_number, + &self.public_key, + &self.private_key, + ) + .context("Failed to sign vote")?; + debug!( + "Sending vote to next quorum leader {:?}", + vote.view_number() + 1 + ); + // Add to the storage. + self.storage + .write() + .await + .append_vid(&vid_share) + .await + .context("Failed to store VID share")?; + broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &self.sender).await; + + Ok(()) + } } impl + 'static> HandleDepOutput for VoteDependencyHandle { type Output = Vec>>; + #[allow(clippy::too_many_lines)] async fn handle_dep_result(self, res: Self::Output) { #[allow(unused_variables)] @@ -145,15 +269,14 @@ impl + 'static> HandleDepOutput ) .await; - if !self.quorum_membership.has_stake(&self.public_key) { - debug!( - "We were not chosen for quorum committee on {:?}", + let Some(vid_share) = vid_share else { + error!( + "We don't have the VID share for this view {:?}, but we should, because the vote dependencies have completed.", self.view_number ); return; - } + }; - // Create and send the vote. let Some(leaf) = leaf else { error!( "We don't have the leaf for this view {:?}, but we should, because the vote dependencies have completed.", @@ -161,37 +284,15 @@ impl + 'static> HandleDepOutput ); return; }; - let message = if let Ok(vote) = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: leaf.commit(), - }, - self.view_number, - &self.public_key, - &self.private_key, - ) { - GeneralConsensusMessage::::Vote(vote) - } else { - error!("Unable to sign quorum vote!"); + + // Update internal state + if let Err(e) = self.update_shared_state(&leaf, &vid_share).await { + error!("Failed to update shared consensus state; error = {e:#}"); return; - }; - if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.view_number() + 1 - ); - // Add to the storage. - let Some(vid_share) = vid_share else { - error!( - "We don't have the VID share for this view {:?}, but we should, because the vote dependencies have completed.", - self.view_number - ); - return; - }; - if let Err(e) = self.storage.write().await.append_vid(&vid_share).await { - error!("Failed to store VID share with error {:?}", e); - return; - } - broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &self.sender).await; + } + + if let Err(e) = self.submit_vote(leaf, vid_share).await { + debug!("Failed to vote; error = {e:#}"); } } } @@ -238,6 +339,9 @@ pub struct QuorumVoteTaskState> { /// Reference to the storage. pub storage: Arc>, + + /// The curent version of HotShot + pub version: Version, } impl> QuorumVoteTaskState { @@ -309,7 +413,16 @@ impl> QuorumVoteTaskState>>, event: Option>>, ) { + if view_number <= self.latest_voted_view { + tracing::trace!("We have already voted for this view"); + return; + } + + debug!( + "Attempting to make vote dependency task for view {view_number:?} and event {event:?}" + ); if self.vote_dependencies.contains_key(&view_number) { + debug!("Task already exists"); return; } @@ -373,6 +486,7 @@ impl> QuorumVoteTaskState>| matches!(e.as_ref(), VoteNow(..))); Box::new(EventPredicate { check, info }) } + +pub fn validated_state_updated() -> Box> +where + TYPES: NodeType, +{ + let info = "ValidatedStateUpdated".to_string(); + let check: EventCallback = Arc::new(move |e: Arc>| { + matches!(e.as_ref(), ValidatedStateUpdated(..)) + }); + Box::new(EventPredicate { check, info }) +} diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 992e3fd2b9..710da5fcdd 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -9,7 +9,7 @@ use hotshot_example_types::{ use hotshot_task_impls::{da::DaTaskState, events::HotShotEvent::*}; use hotshot_testing::{ helpers::build_system_handle, - predicates::event::exact, + predicates::event::{exact, validated_state_updated}, script::{run_test_script, TestScriptStage}, view_generator::TestViewGenerator, }; @@ -90,6 +90,7 @@ async fn test_da_task() { outputs: vec![ exact(DaProposalValidated(proposals[1].clone(), leaders[1])), exact(DaVoteSend(votes[1].clone())), + validated_state_updated(), ], asserts: vec![], }; diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 66f57f4d80..75af361b5d 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -1,6 +1,7 @@ // TODO: Remove after integration #![allow(unused_imports)] +use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{ @@ -161,7 +162,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { ), )), exact(NewUndecidedView(leaves[2].clone())), - exact(LivenessCheckProposalRecv(proposals[2].data.clone())), + exact(QuorumProposalLivenessValidated(proposals[2].data.clone())), vote_now(), ], asserts: vec![], diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 9b8a37a529..cbb05daeac 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -3,7 +3,10 @@ use std::sync::Arc; use committable::Committable; +use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; +use hotshot::traits::ValidatedState; +use hotshot_example_types::state_types::TestValidatedState; use hotshot_example_types::{ block_types::TestMetadata, node_types::{MemoryImpl, TestTypes}, @@ -23,7 +26,6 @@ use hotshot_testing::{ Predicate, }, script::{run_test_script, TestScriptStage}, - test_helpers::create_fake_view_with_leaf, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -85,12 +87,21 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { consensus_writer .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); } - drop(consensus_writer); // We must send the genesis cert here to initialize hotshot successfully. - let genesis_cert = QuorumCertificate::genesis(&*handle.hotshot.instance_state()); - let genesis_leaf = Leaf::genesis(&*handle.hotshot.instance_state()); + let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); + let genesis_cert = proposals[0].data.justify_qc.clone(); + let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); + // Special case: the genesis validated state is already + // present + consensus_writer.update_validated_state_map( + ViewNumber::new(0), + build_fake_view_with_leaf(genesis_leaf.clone()), + ); + drop(consensus_writer); let view = TestScriptStage { inputs: vec![ @@ -103,7 +114,10 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), VidShareValidated(vid_share(&vids[0].0, handle.public_key())), - ValidatedStateUpdated(ViewNumber::new(0), build_fake_view_with_leaf(genesis_leaf)), + ValidatedStateUpdated( + proposals[0].data.view_number(), + build_fake_view_with_leaf(leaves[0].clone()), + ), ], outputs: vec![ exact(UpdateHighQc(genesis_cert.clone())), @@ -123,8 +137,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { - use hotshot_types::vote::HasViewNumber; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -141,7 +153,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let mut vids = Vec::new(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - for view in (&mut generator).take(5) { + for view in (&mut generator).take(5).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); @@ -152,15 +164,26 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { consensus_writer .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); } - drop(consensus_writer); - - let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); // We need to handle the views where we aren't the leader to ensure that the states are // updated properly. + let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); let genesis_cert = proposals[0].data.justify_qc.clone(); - let genesis_leaf = Leaf::genesis(&*handle.hotshot.instance_state()); + let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; + + // Special case: the genesis validated state is already + // present + consensus_writer.update_validated_state_map( + ViewNumber::new(0), + build_fake_view_with_leaf(genesis_leaf.clone()), + ); + + drop(consensus_writer); + + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let genesis_view = TestScriptStage { inputs: vec![ @@ -174,7 +197,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ), VidShareValidated(vid_share(&vids[0].0, handle.public_key())), ValidatedStateUpdated( - ViewNumber::new(0), + genesis_cert.view_number(), build_fake_view_with_leaf(genesis_leaf.clone()), ), ], @@ -342,7 +365,7 @@ async fn test_quorum_proposal_task_qc_timeout() { ), VidShareValidated(vid_share(&vids[2].0.clone(), handle.public_key())), ValidatedStateUpdated( - ViewNumber::new(2), + proposals[1].data.view_number(), build_fake_view_with_leaf(leaves[1].clone()), ), ], @@ -421,8 +444,8 @@ async fn test_quorum_proposal_task_view_sync() { ), VidShareValidated(vid_share(&vids[1].0.clone(), handle.public_key())), ValidatedStateUpdated( - ViewNumber::new(1), - build_fake_view_with_leaf(leaves[1].clone()), + proposals[0].data.view_number(), + build_fake_view_with_leaf(leaves[0].clone()), ), ], outputs: vec![quorum_proposal_send()], @@ -436,7 +459,6 @@ async fn test_quorum_proposal_task_view_sync() { run_test_script(script, quorum_proposal_task_state).await; } -#[ignore] #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -475,8 +497,11 @@ async fn test_quorum_proposal_livness_check_proposal() { // We need to handle the views where we aren't the leader to ensure that the states are // updated properly. + let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); let genesis_cert = proposals[0].data.justify_qc.clone(); - let genesis_leaf = Leaf::genesis(&*handle.hotshot.instance_state()); + let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; let genesis_view = TestScriptStage { inputs: vec![ @@ -490,7 +515,7 @@ async fn test_quorum_proposal_livness_check_proposal() { ), VidShareValidated(vid_share(&vids[0].0, handle.public_key())), ValidatedStateUpdated( - ViewNumber::new(0), + genesis_cert.view_number(), build_fake_view_with_leaf(genesis_leaf.clone()), ), ], @@ -501,7 +526,7 @@ async fn test_quorum_proposal_livness_check_proposal() { // We send all the events that we'd have otherwise received to ensure the states are updated. let view_1 = TestScriptStage { inputs: vec![ - QuorumProposalValidated(proposals[0].data.clone(), genesis_leaf), + QuorumProposalValidated(proposals[0].data.clone(), genesis_leaf.clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(2)), @@ -525,7 +550,7 @@ async fn test_quorum_proposal_livness_check_proposal() { // the QuorumProposalRecv task tests. let view_2 = TestScriptStage { inputs: vec![ - LivenessCheckProposalRecv(proposals[1].data.clone()), + QuorumProposalLivenessValidated(proposals[1].data.clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(3)), @@ -721,7 +746,7 @@ async fn test_quorum_proposal_task_happy_path_leaf_ascension() { current_chain_length = 3; } // This unwrap is safe here - let view = generator.next().unwrap(); + let view = generator.next().await.unwrap(); let proposal = view.quorum_proposal.clone(); // This intentionally grabs the wrong leaf since it *really* doesn't @@ -789,7 +814,7 @@ async fn test_quorum_proposal_task_fault_injection_leaf_ascension() { current_chain_length = 3; } // This unwrap is safe here - let view = generator.next().unwrap(); + let view = generator.next().await.unwrap(); let proposal = view.quorum_proposal.clone(); // This intentionally grabs the wrong leaf since it *really* doesn't diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index b838edb310..d732f5f9ed 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -14,7 +14,7 @@ async fn test_quorum_vote_task_success() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ helpers::build_system_handle, - predicates::event::{exact, quorum_vote_send}, + predicates::event::{exact, quorum_vote_send, validated_state_updated}, script::{run_test_script, TestScriptStage}, view_generator::TestViewGenerator, }; @@ -32,12 +32,20 @@ async fn test_quorum_vote_task_success() { let mut leaves = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); + let consensus = handle.hotshot.consensus().clone(); + let mut consensus_writer = consensus.write().await; for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaves.push(view.leaf.clone()); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); + consensus_writer.update_validated_state_map( + view.quorum_proposal.data.view_number(), + build_fake_view_with_leaf(view.leaf.clone()), + ); + consensus_writer.update_saved_leaves(view.leaf.clone()); } + drop(consensus_writer); // Send the quorum proposal, DAC, VID share data, and validated state, in which case a dummy // vote can be formed and the view number will be updated. @@ -55,6 +63,7 @@ async fn test_quorum_vote_task_success() { exact(DaCertificateValidated(dacs[1].clone())), exact(VidShareValidated(vids[1].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), + validated_state_updated(), quorum_vote_send(), ], asserts: vec![], @@ -73,7 +82,7 @@ async fn test_quorum_vote_task_vote_now() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ helpers::build_system_handle, - predicates::event::{exact, quorum_vote_send}, + predicates::event::{exact, quorum_vote_send, validated_state_updated}, script::{run_test_script, TestScriptStage}, view_generator::TestViewGenerator, }; @@ -103,6 +112,7 @@ async fn test_quorum_vote_task_vote_now() { inputs: vec![VoteNow(view.view_number, vote_dependency_data)], outputs: vec![ exact(QuorumVoteDependenciesValidated(ViewNumber::new(1))), + validated_state_updated(), quorum_vote_send(), ], asserts: vec![], From d30b9603acb35d63d43d7f4c734165752aa12e72 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 30 May 2024 10:38:53 -0400 Subject: [PATCH 1058/1393] Fix network task filters and revert CI changes (#3246) --- task-impls/src/network.rs | 21 ++++++++++++++++++--- task-impls/src/response.rs | 3 +-- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index bcc5d59d9f..fc663d6fe1 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -38,6 +38,8 @@ pub fn quorum_filter(event: &Arc>) -> bool | HotShotEvent::QuorumVoteSend(_) | HotShotEvent::DacSend(_, _) | HotShotEvent::TimeoutVoteSend(_) + | HotShotEvent::VersionUpgrade(_) + | HotShotEvent::ViewChange(_) ) } @@ -45,7 +47,10 @@ pub fn quorum_filter(event: &Arc>) -> bool pub fn upgrade_filter(event: &Arc>) -> bool { !matches!( event.as_ref(), - HotShotEvent::UpgradeProposalSend(_, _) | HotShotEvent::UpgradeVoteSend(_) + HotShotEvent::UpgradeProposalSend(_, _) + | HotShotEvent::UpgradeVoteSend(_) + | HotShotEvent::VersionUpgrade(_) + | HotShotEvent::ViewChange(_) ) } @@ -53,13 +58,21 @@ pub fn upgrade_filter(event: &Arc>) -> bool pub fn da_filter(event: &Arc>) -> bool { !matches!( event.as_ref(), - HotShotEvent::DaProposalSend(_, _) | HotShotEvent::DaVoteSend(_) + HotShotEvent::DaProposalSend(_, _) + | HotShotEvent::DaVoteSend(_) + | HotShotEvent::VersionUpgrade(_) + | HotShotEvent::ViewChange(_) ) } /// vid filter pub fn vid_filter(event: &Arc>) -> bool { - !matches!(event.as_ref(), HotShotEvent::VidDisperseSend(_, _)) + !matches!( + event.as_ref(), + HotShotEvent::VidDisperseSend(_, _) + | HotShotEvent::VersionUpgrade(_) + | HotShotEvent::ViewChange(_) + ) } /// view sync filter @@ -72,6 +85,8 @@ pub fn view_sync_filter(event: &Arc>) -> bo | HotShotEvent::ViewSyncPreCommitVoteSend(_) | HotShotEvent::ViewSyncCommitVoteSend(_) | HotShotEvent::ViewSyncFinalizeVoteSend(_) + | HotShotEvent::VersionUpgrade(_) + | HotShotEvent::ViewChange(_) ) } /// the network message task state diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index cdc26639ee..a7e5eaa57d 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -1,5 +1,4 @@ -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use async_broadcast::Receiver; use async_compatibility_layer::art::{async_sleep, async_spawn}; From f5c7395f0a3ab7d3af26d1787b3ded35a7d9074e Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Thu, 30 May 2024 12:34:34 -0400 Subject: [PATCH 1059/1393] add SimpleBuilderConfig w/ default impl (#3252) Co-authored-by: tbro --- examples/infra/mod.rs | 5 +++-- testing/src/block_builder.rs | 20 ++++++++++++++++---- 2 files changed, 19 insertions(+), 6 deletions(-) mode change 100644 => 100755 examples/infra/mod.rs diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs old mode 100644 new mode 100755 index cd205149ee..0ef93338bf --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -43,7 +43,8 @@ use hotshot_orchestrator::{ }, }; use hotshot_testing::block_builder::{ - RandomBuilderImplementation, SimpleBuilderImplementation, TestBuilderImplementation, + RandomBuilderImplementation, SimpleBuilderConfig, SimpleBuilderImplementation, + TestBuilderImplementation, }; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -939,7 +940,7 @@ pub async fn main_entry_point< let (builder_task, builder_url) = >::start( run_config.config.num_nodes_with_stake.into(), - (), + SimpleBuilderConfig::default(), ) .await; diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 9e296ff27c..046159abff 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -70,6 +70,19 @@ where } } +/// Configuration for `SimpleBuilder` +pub struct SimpleBuilderConfig { + port: u16, +} + +impl Default for SimpleBuilderConfig { + fn default() -> Self { + Self { + port: portpicker::pick_unused_port().expect("No free ports"), + } + } +} + pub struct SimpleBuilderImplementation; #[async_trait] @@ -77,14 +90,13 @@ impl TestBuilderImplementation for SimpleBuilderImplemen where ::InstanceState: Default, { - type Config = (); + type Config = SimpleBuilderConfig; async fn start( num_storage_nodes: usize, - _config: Self::Config, + config: Self::Config, ) -> (Option>>, Url) { - let port = portpicker::pick_unused_port().expect("No free ports"); - let url = Url::parse(&format!("http://localhost:{port}")).expect("Valid URL"); + let url = Url::parse(&format!("http://localhost:{0}", config.port)).expect("Valid URL"); let (source, task) = make_simple_builder(num_storage_nodes).await; let builder_api = hotshot_builder_api::builder::define_api::< From a8e81759a7a432e81911ef024af4c4799cf785e7 Mon Sep 17 00:00:00 2001 From: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Date: Fri, 31 May 2024 01:41:01 +0500 Subject: [PATCH 1060/1393] make SimpleBuilderConfig port public (#3254) --- testing/src/block_builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index 046159abff..afc295ab1e 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -72,7 +72,7 @@ where /// Configuration for `SimpleBuilder` pub struct SimpleBuilderConfig { - port: u16, + pub port: u16, } impl Default for SimpleBuilderConfig { From 2e33537538b25a13cb8cde48f925902b4a398dd4 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 31 May 2024 08:32:39 +0800 Subject: [PATCH 1061/1393] [METRICS] - Individual node health (#3235) * Add metrics * Fix build * fmt * Use workspace version --- examples/Cargo.toml | 2 +- hotshot/Cargo.toml | 1 + hotshot/src/tasks/task_state.rs | 7 +++++-- task-impls/Cargo.toml | 2 +- task-impls/src/consensus/helpers.rs | 4 ++++ task-impls/src/consensus/mod.rs | 8 ++++++++ task-impls/src/consensus/view_change.rs | 14 +++++++++++++- task-impls/src/consensus2/handlers.rs | 19 +++++++++++++++++++ task-impls/src/consensus2/mod.rs | 3 +++ .../src/quorum_proposal_recv/handlers.rs | 3 +++ task-impls/src/quorum_proposal_recv/mod.rs | 3 +++ types/src/consensus.rs | 8 ++++++++ types/src/event.rs | 5 ----- 13 files changed, 69 insertions(+), 10 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 51f6683e03..7b2b7b547d 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -106,7 +106,7 @@ lru = "0.12" hotshot-task = { path = "../task" } hotshot = { path = "../hotshot" } hotshot-example-types = { path = "../example-types" } -chrono = "0.4" +chrono = { workspace = true } vbs = { workspace = true } sha2.workspace = true local-ip-address = "0.6" diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index eb3fd6b052..71877a2dfd 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -31,6 +31,7 @@ async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6" bincode = { workspace = true } +chrono = { workspace = true } clap = { workspace = true, optional = true } committable = { workspace = true } custom_debug = { workspace = true } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 73be2c31a4..f4ed651a21 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -4,7 +4,9 @@ use std::{ sync::{atomic::AtomicBool, Arc}, }; +use crate::types::SystemContextHandle; use async_trait::async_trait; +use chrono::Utc; use hotshot_task_impls::{ builder::BuilderClient, consensus::ConsensusTaskState, consensus2::Consensus2TaskState, da::DaTaskState, quorum_proposal::QuorumProposalTaskState, @@ -18,8 +20,6 @@ use hotshot_types::traits::{ }; use vbs::version::StaticVersionType; -use crate::types::SystemContextHandle; - /// Trait for creating task states. #[async_trait] pub trait CreateTaskState @@ -184,6 +184,7 @@ impl> CreateTaskState timeout: handle.hotshot.config.next_view_timeout, round_start_delay: handle.hotshot.config.round_start_delay, cur_view: handle.cur_view().await, + cur_view_time: Utc::now().timestamp(), payload_commitment_and_metadata: None, vote_collector: None.into(), timeout_vote_collector: None.into(), @@ -281,6 +282,7 @@ impl> CreateTaskState private_key: handle.private_key().clone(), consensus, cur_view: handle.cur_view().await, + cur_view_time: Utc::now().timestamp(), quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), @@ -321,6 +323,7 @@ impl> CreateTaskState timeout_vote_collector: None.into(), storage: Arc::clone(&handle.storage), cur_view: handle.cur_view().await, + cur_view_time: Utc::now().timestamp(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), timeout_task, timeout: handle.hotshot.config.next_view_timeout, diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 120a8108e4..4cc0f9587b 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -19,7 +19,7 @@ async-trait = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } cdn-proto = { workspace = true } -chrono = "0.4" +chrono = { workspace = true } committable = { workspace = true } either = { workspace = true } futures = { workspace = true } diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 65ea9f7c57..d4caddc39c 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -513,6 +513,8 @@ pub(crate) async fn handle_quorum_proposal_recv> { /// View number this view is executing in. pub cur_view: TYPES::Time, + /// Timestamp this view starts at. + pub cur_view_time: i64, + /// The commitment to the current block payload and its metadata submitted to DA. pub payload_commitment_and_metadata: Option>, @@ -544,9 +547,11 @@ impl> ConsensusTaskState self.timeout, Arc::clone(&self.consensus), &mut self.cur_view, + &mut self.cur_view_time, &mut self.timeout_task, &self.output_event_stream, DONT_SEND_VIEW_CHANGE_EVENT, + self.quorum_membership.leader(old_view_number) == self.public_key, ) .await { @@ -602,6 +607,9 @@ impl> ConsensusTaskState .await; let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); + if self.quorum_membership.leader(view) == self.public_key { + consensus.metrics.number_of_timeouts_as_leader.add(1); + } } #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::SendPayloadCommitmentAndMetadata( diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index e9d067beb4..5796d0e862 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -1,4 +1,3 @@ -use core::time::Duration; use std::sync::Arc; use anyhow::{ensure, Result}; @@ -7,6 +6,8 @@ use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; +use chrono::Utc; +use core::time::Duration; use hotshot_types::{ consensus::Consensus, event::{Event, EventType}, @@ -40,9 +41,11 @@ pub(crate) async fn update_view( timeout: u64, consensus: Arc>>, cur_view: &mut TYPES::Time, + cur_view_time: &mut i64, timeout_task: &mut JoinHandle<()>, output_event_stream: &Sender>, send_view_change_event: bool, + is_old_view_leader: bool, ) -> Result<()> { ensure!( new_view > *cur_view, @@ -104,6 +107,15 @@ pub(crate) async fn update_view( .metrics .current_view .set(usize::try_from(cur_view.u64()).unwrap()); + let new_view_time = Utc::now().timestamp(); + if is_old_view_leader { + #[allow(clippy::cast_precision_loss)] + consensus + .metrics + .view_duration_as_leader + .add_point((new_view_time - *cur_view_time) as f64); + } + *cur_view_time = new_view_time; // Do the comparison before the subtraction to avoid potential overflow, since // `last_decided_view` may be greater than `cur_view` if the node is catching up. diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index ee8c8db214..d37bbb6e8c 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -3,6 +3,7 @@ use std::{sync::Arc, time::Duration}; use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; +use chrono::Utc; use hotshot_types::{ event::{Event, EventType}, simple_certificate::{QuorumCertificate, TimeoutCertificate}, @@ -161,6 +162,15 @@ pub(crate) async fn handle_view_change .metrics .number_of_timeouts .add(1); + if task_state.quorum_membership.leader(view_number) == task_state.public_key { + task_state + .consensus + .read() + .await + .metrics + .number_of_timeouts_as_leader + .add(1); + } Ok(()) } diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index 81aab4052c..fe01566aa5 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -72,6 +72,9 @@ pub struct Consensus2TaskState> { /// The view number that this node is currently executing in. pub cur_view: TYPES::Time, + /// Timestamp this view starts at. + pub cur_view_time: i64, + /// Output events to application pub output_event_stream: async_broadcast::Sender>, diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 4613b60178..89d08b0dd5 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -129,6 +129,7 @@ pub(crate) async fn handle_quorum_proposal_recv, ) -> Result>> { let sender = sender.clone(); + let cur_view = task_state.cur_view; validate_proposal_view_and_certs( proposal, @@ -156,9 +157,11 @@ pub(crate) async fn handle_quorum_proposal_recv, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 42f29ebaaf..f7da6c7325 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -109,6 +109,8 @@ pub struct ConsensusMetricsValue { pub number_of_views_since_last_decide: Box, /// Number of views that are in-flight since the last anchor view pub number_of_views_per_decide_event: Box, + /// Duration of views as leader + pub view_duration_as_leader: Box, /// Number of invalid QCs we've seen since the last commit. pub invalid_qc: Box, /// Number of outstanding transactions @@ -117,6 +119,8 @@ pub struct ConsensusMetricsValue { pub outstanding_transactions_memory_size: Box, /// Number of views that timed out pub number_of_timeouts: Box, + /// Number of views that timed out as leader + pub number_of_timeouts_as_leader: Box, /// The number of empty blocks that have been proposed pub number_of_empty_blocks_proposed: Box, } @@ -135,12 +139,16 @@ impl ConsensusMetricsValue { .create_gauge(String::from("number_of_views_since_last_decide"), None), number_of_views_per_decide_event: metrics .create_histogram(String::from("number_of_views_per_decide_event"), None), + view_duration_as_leader: metrics + .create_histogram(String::from("view_duration_as_leader"), None), invalid_qc: metrics.create_gauge(String::from("invalid_qc"), None), outstanding_transactions: metrics .create_gauge(String::from("outstanding_transactions"), None), outstanding_transactions_memory_size: metrics .create_gauge(String::from("outstanding_transactions_memory_size"), None), number_of_timeouts: metrics.create_counter(String::from("number_of_timeouts"), None), + number_of_timeouts_as_leader: metrics + .create_counter(String::from("number_of_timeouts_as_leader"), None), number_of_empty_blocks_proposed: metrics .create_counter(String::from("number_of_empty_blocks_proposed"), None), } diff --git a/types/src/event.rs b/types/src/event.rs index ebb0814deb..293839d52d 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -123,11 +123,6 @@ pub enum EventType { /// The view that timed out view_number: TYPES::Time, }, - /// A next leader task was canceled by a timeout interrupt - NextLeaderViewTimeout { - /// The view that timed out - view_number: TYPES::Time, - }, /// The view has finished. If values were decided on, a `Decide` event will also be emitted. ViewFinished { /// The view number that has just finished From baaeb1e5234b8915e82a9dd9973ed56fef9a5da8 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 31 May 2024 13:31:39 -0600 Subject: [PATCH 1062/1393] [CX_HARDENING] - Develop A Better Framework for Input Permutation (#3207) * moving to macro * introduce helper * upgrade task * support all predicates * tmp * done with impl and macro * docs * new all predicates macro --- macros/src/lib.rs | 172 +++++++++++++++++++----- testing/src/predicates/event.rs | 9 ++ testing/src/script.rs | 56 ++++++-- testing/tests/tests_1/consensus_task.rs | 56 ++++---- testing/tests/tests_1/da_task.rs | 89 ++++++------ testing/tests/tests_1/upgrade_task.rs | 113 ++++++++-------- 6 files changed, 326 insertions(+), 169 deletions(-) diff --git a/macros/src/lib.rs b/macros/src/lib.rs index b866a7ed02..d6f8adb162 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -311,7 +311,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { validate_task_state_or_panic_in_script, }; - use hotshot_testing::{predicates::Predicate}; + use hotshot_testing::{predicates::{Predicate, PredicateResult}}; use async_broadcast::broadcast; use hotshot_task_impls::events::HotShotEvent; use async_compatibility_layer::art::async_timeout; @@ -336,70 +336,89 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { for input in &input_group { #( - tracing::debug!("Test sent: {:?}", input); + tracing::debug!("Test sent: {:?}", input); - to_task - .broadcast(input.clone().into()) - .await - .expect("Failed to broadcast input message"); + to_task + .broadcast(input.clone().into()) + .await + .expect("Failed to broadcast input message"); - let _ = #scripts.state - .handle_event(input.clone().into(), &to_test, &from_test) - .await - .inspect_err(|e| tracing::info!("{e}")); + let _ = #scripts.state + .handle_event(input.clone().into(), &to_test, &from_test) + .await + .inspect_err(|e| tracing::info!("{e}")); - while from_test.try_recv().is_ok() {} + while from_test.try_recv().is_ok() {} - while let Ok(Ok(received_output)) = async_timeout(#scripts.timeout, from_task.recv_direct()).await { - tracing::debug!("Test received: {:?}", received_output); + let mut result = PredicateResult::Incomplete; - let output_asserts = &mut #task_expectations[stage_number].output_asserts; + while let Ok(Ok(received_output)) = async_timeout(#scripts.timeout, from_task.recv_direct()).await { + tracing::debug!("Test received: {:?}", received_output); - if #output_index_names >= output_asserts.len() { - panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); - }; + let output_asserts = &mut #task_expectations[stage_number].output_asserts; - let assert = &mut output_asserts[#output_index_names]; + if #output_index_names >= output_asserts.len() { + panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); + }; - validate_output_or_panic_in_script(stage_number, #script_names.to_string(), &received_output, &**assert).await; + let assert = &mut output_asserts[#output_index_names]; + result = validate_output_or_panic_in_script( + stage_number, + #script_names.to_string(), + &received_output, + &**assert + ) + .await; + + if result == PredicateResult::Pass { #output_index_names += 1; } + } )* } while let Ok(input) = loop_receiver.try_recv() { #( - tracing::debug!("Test sent: {:?}", input); + tracing::debug!("Test sent: {:?}", input); - to_task - .broadcast(input.clone().into()) - .await - .expect("Failed to broadcast input message"); + to_task + .broadcast(input.clone().into()) + .await + .expect("Failed to broadcast input message"); - let _ = #scripts.state - .handle_event(input.clone().into(), &to_test, &from_test) - .await - .inspect_err(|e| tracing::info!("{e}")); + let _ = #scripts.state + .handle_event(input.clone().into(), &to_test, &from_test) + .await + .inspect_err(|e| tracing::info!("{e}")); - while from_test.try_recv().is_ok() {} + while from_test.try_recv().is_ok() {} - while let Ok(Ok(received_output)) = async_timeout(#scripts.timeout, from_task.recv_direct()).await { - tracing::debug!("Test received: {:?}", received_output); + let mut result = PredicateResult::Incomplete; + while let Ok(Ok(received_output)) = async_timeout(#scripts.timeout, from_task.recv_direct()).await { + tracing::debug!("Test received: {:?}", received_output); - let output_asserts = &mut #task_expectations[stage_number].output_asserts; + let output_asserts = &mut #task_expectations[stage_number].output_asserts; - if #output_index_names >= output_asserts.len() { - panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); - }; + if #output_index_names >= output_asserts.len() { + panic_extra_output_in_script(stage_number, #script_names.to_string(), &received_output); + }; - let mut assert = &mut output_asserts[#output_index_names]; + let mut assert = &mut output_asserts[#output_index_names]; - validate_output_or_panic_in_script(stage_number, #script_names.to_string(), &received_output, &**assert).await; + result = validate_output_or_panic_in_script( + stage_number, + #script_names.to_string(), + &received_output, + &**assert + ) + .await; + if result == PredicateResult::Pass { #output_index_names += 1; } + } )* } @@ -424,3 +443,82 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { expanded.into() } + +/// Macro to run the test suite with `TaskState` scripts at once with the ability to. +/// Randomize the input values using a consistent seed value. +/// +/// **Note** When using `random!` you should use `all_predicates` in the output to ensure +/// that the test does not fail due to events happening out of order! +/// +/// Usage: +/// +/// `run_test[inputs, script1, script2, ...]` +/// +/// The generated test will: +/// - take the first entry of `inputs`, which should be a `Vec>`, +/// - if the input is random, it'll generate a `seed` and shuffle the inputs. +/// - if the input is serial, it will execute in order. +/// - print the seed being used. +/// - feed this to each task in order, validating any output received before moving on to the next one, +/// - repeat the previous steps, with the aggregated outputs received from all tasks used as the new input set, +/// - repeat until no more output has been generated by any task, and finally +/// - proceed to the next entry of inputs. +/// - print the seed being used (again) to make finding it a bit easier. +/// +/// # Panics +/// +/// The macro panics if the input stream cannot be parsed. +/// The test will panic if the any of the scripts has a different number of stages from the input. +#[proc_macro] +pub fn run_test(input: TokenStream) -> TokenStream { + // Parse the input as an iter of Expr + let inputs: Vec<_> = syn::parse::Parser::parse2( + syn::punctuated::Punctuated::::parse_terminated, + input.into(), + ) + .unwrap() + .into_iter() + .collect(); + + // Separate the first input (which should be the InputOrder enum) + let test_inputs = &inputs[0]; + let scripts = &inputs[1..]; + + // Generate code for shuffling and flattening inputs + let expanded = quote! { + { + use rand::{ + SeedableRng, rngs::StdRng, + seq::SliceRandom + }; + use hotshot_task_impls::events::HotShotEvent; + use hotshot_task::task::TaskState; + use hotshot_types::traits::node_implementation::NodeType; + use hotshot_testing::script::InputOrder; + + async { + let seed: u64 = rand::random(); + tracing::info!("Running test with seed {seed}"); + let mut rng = StdRng::seed_from_u64(seed); + let mut shuffled_inputs = Vec::new(); + + for (stage_number, input_order) in #test_inputs.into_iter().enumerate() { + match input_order { + InputOrder::Random(mut events) => { + events.shuffle(&mut rng); + shuffled_inputs.push(events); + }, + InputOrder::Serial(events) => { + shuffled_inputs.push(events); + } + } + } + + test_scripts![shuffled_inputs, #(#scripts),*].await; + tracing::info!("Suite used seed {seed}"); + } + } + }; + + expanded.into() +} diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index babe97234f..3db76afbe1 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -92,6 +92,15 @@ pub fn all_predicates( }) } +#[macro_export] +macro_rules! all_predicates { + ($($x:expr),* $(,)?) => { + { + vec![all_predicates(vec![$($x),*])] + } + }; +} + #[async_trait] impl Predicate>> for EventPredicate where diff --git a/testing/src/script.rs b/testing/src/script.rs index 2721c87846..c63cf40148 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -167,6 +167,31 @@ pub async fn run_test_script> + } } +pub enum InputOrder { + Random(Vec>), + Serial(Vec>), +} + +#[macro_export] +macro_rules! random { + ($($x:expr),* $(,)?) => { + { + let inputs = vec![$($x),*]; + InputOrder::Random(inputs) + } + }; +} + +#[macro_export] +macro_rules! serial { + ($($x:expr),* $(,)?) => { + { + let inputs = vec![$($x),*]; + InputOrder::Serial(inputs) + } + }; +} + pub struct TaskScript { /// The time to wait on the receiver for this script. pub timeout: Duration, @@ -179,6 +204,15 @@ pub struct Expectations { pub task_state_asserts: Vec>>, } +impl Expectations { + pub fn from_outputs(output_asserts: Vec>>>>) -> Self { + Self { + output_asserts, + task_state_asserts: vec![], + } + } +} + pub fn panic_extra_output_in_script(stage_number: usize, script_name: String, output: &S) where S: std::fmt::Debug, @@ -223,13 +257,17 @@ pub async fn validate_output_or_panic_in_script( script_name: String, output: &S, assert: &dyn Predicate, -) { - assert!( - assert.evaluate(output).await == PredicateResult::Pass, - "Stage {} | Output in {} failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", - stage_number, - script_name, - assert, - output - ); +) -> PredicateResult { + let result = assert.evaluate(output).await; + + match result { + PredicateResult::Pass => result, + PredicateResult::Incomplete => result, + PredicateResult::Fail => { + panic!( + "Stage {} | Output in {} failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", + stage_number, script_name, assert, output + ) + } + } } diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 34c86e38d5..7a2a58b624 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -35,7 +35,17 @@ use sha2::Digest; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { + use std::time::Duration; + use hotshot_example_types::{block_types::TestMetadata, state_types::TestValidatedState}; + use hotshot_macros::{run_test, test_scripts}; + use hotshot_testing::{ + all_predicates, + predicates::event::all_predicates, + random, + script::{Expectations, InputOrder, TaskScript}, + serial, + }; use hotshot_types::data::null_block; async_compatibility_layer::logging::setup_logging(); @@ -68,31 +78,19 @@ async fn test_consensus_task() { vids.push(view.vid_proposal.clone()); } - // Run view 1 (the genesis stage). - let view_1 = TestScriptStage { - inputs: vec![ + let cert = proposals[1].data.justify_qc.clone(); + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); + + let inputs = vec![ + random![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), VidShareRecv(vid_share(&vids[0].0, handle.public_key())), ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ], - asserts: vec![], - }; - - let cert = proposals[1].data.justify_qc.clone(); - let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - - // Run view 2 and propose. - let view_2 = TestScriptStage { - inputs: vec![ + serial![ VidShareRecv(vid_share(&vids[1].0, handle.public_key())), QuorumProposalRecv(proposals[1].clone(), leaders[1]), QcFormed(either::Left(cert)), - // We must have a payload commitment and metadata to propose. SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, @@ -101,17 +99,29 @@ async fn test_consensus_task() { null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), ], - outputs: vec![ + ]; + + let expectations = vec![ + Expectations::from_outputs(all_predicates![ + exact(ViewChange(ViewNumber::new(1))), + quorum_proposal_validated(), + exact(QuorumVoteSend(votes[0].clone())), + ]), + Expectations::from_outputs(all_predicates![ exact(ViewChange(ViewNumber::new(2))), quorum_proposal_validated(), quorum_proposal_send(), - ], - asserts: vec![], - }; + ]), + ]; let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let mut consensus_script = TaskScript { + timeout: Duration::from_millis(35), + state: consensus_state, + expectations, + }; - run_test_script(vec![view_1, view_2], consensus_state).await; + run_test![inputs, consensus_script].await; } #[cfg(test)] diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 710da5fcdd..ed2bc169a0 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; @@ -6,11 +6,12 @@ use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, }; +use hotshot_macros::test_scripts; use hotshot_task_impls::{da::DaTaskState, events::HotShotEvent::*}; use hotshot_testing::{ helpers::build_system_handle, predicates::event::{exact, validated_state_updated}, - script::{run_test_script, TestScriptStage}, + script::{Expectations, TaskScript}, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -67,9 +68,8 @@ async fn test_da_task() { vids.push(view.vid_proposal.clone()); } - // Run view 1 (the genesis stage). - let view_1 = TestScriptStage { - inputs: vec![ + let inputs = vec![ + vec![ ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), BlockRecv( @@ -80,25 +80,28 @@ async fn test_da_task() { precompute, ), ], - outputs: vec![exact(DaProposalSend(proposals[1].clone(), leaders[1]))], - asserts: vec![], - }; + vec![DaProposalRecv(proposals[1].clone(), leaders[1])], + ]; - // Run view 2 and validate proposal. - let view_2 = TestScriptStage { - inputs: vec![DaProposalRecv(proposals[1].clone(), leaders[1])], - outputs: vec![ - exact(DaProposalValidated(proposals[1].clone(), leaders[1])), - exact(DaVoteSend(votes[1].clone())), - validated_state_updated(), + let da_state = DaTaskState::::create_from(&handle).await; + let mut da_script = TaskScript { + timeout: Duration::from_millis(35), + state: da_state, + expectations: vec![ + Expectations::from_outputs(vec![exact(DaProposalSend( + proposals[1].clone(), + leaders[1], + ))]), + Expectations::from_outputs(vec![ + exact(DaProposalValidated(proposals[1].clone(), leaders[1])), + exact(DaVoteSend(votes[1].clone())), + validated_state_updated(), + ]), ], - asserts: vec![], }; - let da_state = DaTaskState::::create_from(&handle).await; - let stages = vec![view_1, view_2]; - - run_test_script(stages, da_state).await; + // run_test_script(stages, da_state).await; + test_scripts![inputs, da_script].await; } #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] @@ -149,9 +152,8 @@ async fn test_da_task_storage_failure() { vids.push(view.vid_proposal.clone()); } - // Run view 1 (the genesis stage). - let view_1 = TestScriptStage { - inputs: vec![ + let inputs = vec![ + vec![ ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), BlockRecv( @@ -162,28 +164,27 @@ async fn test_da_task_storage_failure() { precompute, ), ], - outputs: vec![exact(DaProposalSend(proposals[1].clone(), leaders[1]))], - asserts: vec![], - }; - - // Run view 2 and validate proposal. - let view_2 = TestScriptStage { - inputs: vec![DaProposalRecv(proposals[1].clone(), leaders[1])], - outputs: vec![exact(DaProposalValidated(proposals[1].clone(), leaders[1]))], - asserts: vec![], - }; - - // Run view 3 and propose. - let view_3 = TestScriptStage { - inputs: vec![DaProposalValidated(proposals[1].clone(), leaders[1])], - outputs: vec![ - /* No vote was sent due to the storage failure */ - ], - asserts: vec![], - }; + vec![DaProposalRecv(proposals[1].clone(), leaders[1])], + vec![DaProposalValidated(proposals[1].clone(), leaders[1])], + ]; + let expectations = vec![ + Expectations::from_outputs(vec![exact(DaProposalSend( + proposals[1].clone(), + leaders[1], + ))]), + Expectations::from_outputs(vec![exact(DaProposalValidated( + proposals[1].clone(), + leaders[1], + ))]), + Expectations::from_outputs(vec![]), + ]; let da_state = DaTaskState::::create_from(&handle).await; - let stages = vec![view_1, view_2, view_3]; + let mut da_script = TaskScript { + timeout: Duration::from_millis(35), + state: da_state, + expectations, + }; - run_test_script(stages, da_state).await; + test_scripts![inputs, da_script].await; } diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 35d7094bcc..4861d3ca53 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -81,83 +81,84 @@ async fn test_consensus_task_upgrade() { vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); } - - let view_1 = TestScriptStage { - inputs: vec![ + let inputs = vec![ + vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), VidShareRecv(vid_share(&vids[0].0, handle.public_key())), DaCertificateRecv(dacs[0].clone()), ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ], - asserts: vec![], - }; - - let view_2 = TestScriptStage { - inputs: vec![ + vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), VidShareRecv(vid_share(&vids[1].0, handle.public_key())), DaCertificateRecv(dacs[1].clone()), ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[1].clone())), - ], - asserts: vec![no_decided_upgrade_cert()], - }; - - let view_3 = TestScriptStage { - inputs: vec![ + vec![ QuorumProposalRecv(proposals[2].clone(), leaders[2]), DaCertificateRecv(dacs[2].clone()), VidShareRecv(vid_share(&vids[2].0, handle.public_key())), ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(3))), - all_predicates(vec![ - quorum_proposal_validated(), - leaf_decided(), - exact(QuorumVoteSend(votes[2].clone())), - ]), - ], - asserts: vec![no_decided_upgrade_cert()], - }; - - let view_4 = TestScriptStage { - inputs: vec![ + vec![ QuorumProposalRecv(proposals[3].clone(), leaders[3]), DaCertificateRecv(dacs[3].clone()), VidShareRecv(vid_share(&vids[3].0, handle.public_key())), ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(4))), - all_predicates(vec![ + vec![QuorumProposalRecv(proposals[4].clone(), leaders[4])], + ]; + + let expectations = vec![ + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(1))), + quorum_proposal_validated(), + exact(QuorumVoteSend(votes[0].clone())), + ], + task_state_asserts: vec![], + }, + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(2))), + quorum_proposal_validated(), + exact(QuorumVoteSend(votes[1].clone())), + ], + task_state_asserts: vec![no_decided_upgrade_cert()], + }, + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(3))), + quorum_proposal_validated(), + leaf_decided(), + exact(QuorumVoteSend(votes[2].clone())), + ], + task_state_asserts: vec![no_decided_upgrade_cert()], + }, + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(4))), quorum_proposal_validated(), leaf_decided(), exact(QuorumVoteSend(votes[3].clone())), - ]), - ], - asserts: vec![no_decided_upgrade_cert()], - }; - - let view_5 = TestScriptStage { - inputs: vec![QuorumProposalRecv(proposals[4].clone(), leaders[4])], - outputs: vec![ - exact(ViewChange(ViewNumber::new(5))), - all_predicates(vec![quorum_proposal_validated(), upgrade_decided(), leaf_decided()]), - ], - asserts: vec![decided_upgrade_cert()], - }; - - let script = vec![view_1, view_2, view_3, view_4, view_5]; + ], + task_state_asserts: vec![no_decided_upgrade_cert()], + }, + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(5))), + quorum_proposal_validated(), + upgrade_decided(), + leaf_decided(), + ], + task_state_asserts: vec![decided_upgrade_cert()], + }, + ]; let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let mut consensus_script = TaskScript { + timeout: Duration::from_millis(65), + state: consensus_state, + expectations, + }; - run_test_script(script, consensus_state).await; + test_scripts![inputs, consensus_script].await; } #[cfg(not(feature = "dependency-tasks"))] @@ -391,7 +392,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { views.push(view.clone()); } - // The transactions task generates an empty transaction set in this view, + // The transactions task generates an empty transaction set in this view, // because we are proposing between versions. generator.add_transactions(vec![]); From 00e4c2dce97ee4d7bc9223b7af9e9f01219e9aa8 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 4 Jun 2024 08:36:54 -0400 Subject: [PATCH 1063/1393] [CATCHUP] Add Types for Proposal Fetching (#3267) * Add request types and logic for proposals * Fix build and lint --- task-impls/src/events.rs | 2 ++ task-impls/src/request.rs | 65 +++++++++++++++++++++++++++++++++---- task-impls/src/response.rs | 8 +++++ types/src/traits/network.rs | 2 ++ 4 files changed, 71 insertions(+), 6 deletions(-) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index e1b4127ed0..f6e89e8d55 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -64,6 +64,8 @@ pub enum HotShotEvent { QuorumVoteDependenciesValidated(TYPES::Time), /// A quorum proposal with the given parent leaf is validated. QuorumProposalValidated(QuorumProposal, Leaf), + /// A quorum proposal is missing for a view that we meed + QuorumProposalMissing(TYPES::Time), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DaProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 43faddb5d4..d0e92fe567 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -115,6 +115,15 @@ impl, Ver: StaticVersionType + 'st } Ok(()) } + HotShotEvent::QuorumProposalMissing(view) => { + self.run_delay( + RequestKind::Proposal(*view), + sender.clone(), + *view, + Ver::instance(), + ); + Ok(()) + } _ => Ok(()), } } @@ -181,6 +190,7 @@ impl, Ver: StaticVersionType + 'st .whole_committee(view) .into_iter() .collect(); + let leader = self.da_membership.leader(view); // Randomize the recipients so all replicas don't overload the same 1 recipients // and so we don't implicitly rely on the same replica all the time. recipients.shuffle(&mut thread_rng()); @@ -190,6 +200,7 @@ impl, Ver: StaticVersionType + 'st sender, delay: self.delay, recipients, + leader, shutdown_flag: Arc::clone(&self.shutdown_flag), }; let Ok(data) = Serializer::::serialize(&request) else { @@ -202,7 +213,7 @@ impl, Ver: StaticVersionType + 'st return; }; debug!("Requesting data: {:?}", request); - let handle = async_spawn(requester.run::(request, signature)); + let handle = async_spawn(requester.run::(request, signature, self.public_key.clone())); self.spawned_tasks.entry(view).or_default().push(handle); } @@ -227,6 +238,8 @@ struct DelayedRequester> { delay: Duration, /// The peers we will request in a random order recipients: Vec, + /// Leader for the view of the request + leader: TYPES::SignatureKey, /// A flag indicating that `HotShotEvent::Shutdown` has been received shutdown_flag: Arc, } @@ -234,6 +247,9 @@ struct DelayedRequester> { /// Wrapper for the info in a VID request struct VidRequest(TYPES::Time, TYPES::SignatureKey); +/// Wrapper for the info in a Proposal fetch request +struct ProposalRequest(TYPES::Time, TYPES::SignatureKey); + impl> DelayedRequester { /// Wait the delay, then try to complete the request. Iterates over peers /// until the request is completed, or the data is no longer needed. @@ -241,19 +257,39 @@ impl> DelayedRequester { self, request: RequestKind, signature: Signature, + pub_key: TYPES::SignatureKey, ) { - // Do the delay only if primary is up and then start sending - if !self.network.is_primary_down() { - async_sleep(self.delay).await; - } match request { RequestKind::Vid(view, key) => { + // Do the delay only if primary is up and then start sending + if !self.network.is_primary_down() { + async_sleep(self.delay).await; + } self.do_vid::(VidRequest(view, key), signature).await; } + RequestKind::Proposal(view) => { + self.do_proposal::(ProposalRequest(view, pub_key), signature) + .await; + } RequestKind::DaProposal(..) => {} } } - + /// Handle sending a request for proposal for a view, does + /// not delay + async fn do_proposal( + &self, + req: ProposalRequest, + signature: Signature, + ) { + let _ = self + .network + .request_data::( + make_proposal_req(&req, signature), + &self.leader, + Ver::instance(), + ) + .await; + } /// Handle sending a VID Share request, runs the loop until the data exists async fn do_vid( &self, @@ -336,3 +372,20 @@ fn make_vid( kind: MessageKind::Data(DataMessage::RequestData(data_request)), } } + +/// Build a request for a Proposal +fn make_proposal_req( + req: &ProposalRequest, + signature: Signature, +) -> Message { + let kind = RequestKind::Proposal(req.0); + let data_request = DataRequest { + view: req.0, + request: kind, + signature, + }; + Message { + sender: req.1.clone(), + kind: MessageKind::Data(DataMessage::RequestData(data_request)), + } +} diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index a7e5eaa57d..2c7333c659 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -182,6 +182,7 @@ impl NetworkResponseState { } // TODO impl for DA Proposal: https://github.com/EspressoSystems/HotShot/issues/2651 RequestKind::DaProposal(_view) => self.make_msg(ResponseMessage::NotFound), + RequestKind::Proposal(view) => self.make_msg(self.respond_with_proposal(view).await), } } @@ -197,6 +198,13 @@ impl NetworkResponseState { fn valid_sender(&self, sender: &TYPES::SignatureKey) -> bool { self.quorum.has_stake(sender) } + /// Lookup the proposal for the view and respond if it's found/not found + async fn respond_with_proposal(&self, _view: TYPES::Time) -> ResponseMessage { + // Complete after we are storing our last proposed view: + // https://github.com/EspressoSystems/HotShot/issues/3240 + async {}.await; + todo!(); + } } /// Check the signature diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 01248b55d0..4d9f2837fe 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -210,6 +210,8 @@ pub enum RequestKind { Vid(TYPES::Time, TYPES::SignatureKey), /// Request a DA proposal for a certain view DaProposal(TYPES::Time), + /// Request for quorum proposal for a view + Proposal(TYPES::Time), } /// A response for a request. `SequencingMessage` is the same as other network messages From a79f205378989422b65480d8cff6b046092d441b Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 4 Jun 2024 08:31:29 -0600 Subject: [PATCH 1064/1393] [CX_HARDENING] - Move Test Suite To New Macro (#3269) * consensus task migration * almost done * vote task * check build, removing commented code next * rm commented code --- testing/src/script.rs | 162 -------- testing/src/test_helpers.rs | 18 - testing/tests/tests_1/consensus_task.rs | 380 +++++++----------- testing/tests/tests_1/da_task.rs | 20 +- testing/tests/tests_1/proposal_ordering.rs | 144 ------- .../tests_1/quorum_proposal_recv_task.rs | 100 +++-- testing/tests/tests_1/quorum_proposal_task.rs | 338 ++++++++-------- testing/tests/tests_1/quorum_vote_task.rs | 186 ++++----- testing/tests/tests_1/upgrade_task.rs | 5 +- testing/tests/tests_1/vid_task.rs | 34 +- 10 files changed, 499 insertions(+), 888 deletions(-) delete mode 100644 testing/tests/tests_1/proposal_ordering.rs diff --git a/testing/src/script.rs b/testing/src/script.rs index c63cf40148..84be2d6d9e 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -1,172 +1,10 @@ use std::{sync::Arc, time::Duration}; -use async_broadcast::broadcast; -use async_compatibility_layer::art::async_timeout; -use hotshot_task::task::TaskState; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::NodeType; use crate::predicates::{Predicate, PredicateResult}; -pub const RECV_TIMEOUT: Duration = Duration::from_millis(250); - -pub struct TestScriptStage>> { - pub inputs: Vec>, - pub outputs: Vec>>>>, - pub asserts: Vec>>, -} - -/// A `TestScript` is a sequence of triples (input sequence, output sequence, assertions). -type TestScript = Vec>; - -pub fn panic_extra_output(stage_number: usize, output: &S) -where - S: std::fmt::Debug, -{ - let extra_output_error = format!( - "Stage {} | Received unexpected additional output:\n\n{:?}", - stage_number, output - ); - - panic!("{}", extra_output_error); -} - -pub fn panic_missing_output(stage_number: usize, output: &S) -where - S: std::fmt::Debug, -{ - let output_missing_error = format!( - "Stage {} | Failed to receive output for predicate: {:?}", - stage_number, output - ); - - panic!("{}", output_missing_error); -} - -pub async fn validate_task_state_or_panic( - stage_number: usize, - state: &S, - assert: &dyn Predicate, -) { - assert!( - assert.evaluate(state).await == PredicateResult::Pass, - "Stage {} | Task state failed to satisfy: {:?}", - stage_number, - assert - ); -} - -pub async fn validate_output_or_panic( - stage_number: usize, - output: &S, - assert: &(dyn Predicate + 'static), -) -> PredicateResult -where - S: std::fmt::Debug, -{ - let result = assert.evaluate(output).await; - - match result { - PredicateResult::Pass => result, - PredicateResult::Incomplete => result, - PredicateResult::Fail => { - panic!( - "Stage {} | Output failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", - stage_number, assert, output - ) - } - } -} - -/// `run_test_script` reads a triple (inputs, outputs, asserts) in a `TestScript`, -/// It broadcasts all given inputs (in order) and waits to receive all outputs (in order). -/// Once the expected outputs have been received, it validates the task state at that stage -/// against the given assertions. -/// -/// If all assertions pass, it moves onto the next stage. If it receives an unexpected output -/// or fails to receive an output, the test fails immediately with a panic. -/// -/// Note: the task is not spawned with an async thread; instead, the harness just calls `handle_event`. -/// This has a few implications, e.g. shutting down tasks doesn't really make sense, -/// and event ordering is deterministic. -pub async fn run_test_script> + Send + 'static>( - mut script: TestScript, - mut state: S, -) where - TYPES: NodeType, -{ - let (to_task, mut from_test) = broadcast(1024); - let (to_test, mut from_task) = broadcast(1024); - - for (stage_number, stage) in script.iter_mut().enumerate() { - tracing::debug!("Beginning test stage {}", stage_number); - for input in &stage.inputs { - to_task - .broadcast(input.clone().into()) - .await - .expect("Failed to broadcast input message"); - - tracing::debug!("Test sent: {:?}", input.clone()); - - let _ = state - .handle_event(input.clone().into(), &to_test, &from_test) - .await - .inspect_err(|e| tracing::info!("{e}")); - - while from_test.try_recv().is_ok() {} - } - - for assert in &mut stage.outputs { - let mut result = PredicateResult::Incomplete; - - while let Ok(Ok(received_output)) = - async_timeout(RECV_TIMEOUT, from_task.recv_direct()).await - { - tracing::debug!("Test received: {:?}", received_output); - - result = validate_output_or_panic( - stage_number, - &received_output, - // The first * dereferences &Box to Box, the second one then dereferences the Box to the - // trait object itself and then we're good to go. - &**assert, - ) - .await; - - to_task - .broadcast(received_output.clone()) - .await - .expect("Failed to re-broadcast output message"); - - tracing::debug!("Test sent: {:?}", received_output.clone()); - - let _ = state - .handle_event(received_output.clone(), &to_test, &from_test) - .await - .inspect_err(|e| tracing::info!("{e}")); - - while from_test.try_recv().is_ok() {} - - if result == PredicateResult::Pass { - break; - } - } - - if result == PredicateResult::Incomplete { - panic_missing_output(stage_number, assert); - } - } - - for assert in &mut stage.asserts { - validate_task_state_or_panic(stage_number, &state, &**assert).await; - } - - if let Ok(received_output) = from_task.try_recv() { - panic_extra_output(stage_number, &received_output); - } - } -} - pub enum InputOrder { Random(Vec>), Serial(Vec>), diff --git a/testing/src/test_helpers.rs b/testing/src/test_helpers.rs index a25c32ec9a..a28c76bdd6 100644 --- a/testing/src/test_helpers.rs +++ b/testing/src/test_helpers.rs @@ -4,24 +4,6 @@ use hotshot_types::{ data::Leaf, utils::{View, ViewInner}, }; - -/// This function permutes the provided input vector `inputs`, given some order provided within the -/// `order` vector. -/// -/// # Examples -/// let output = permute_input_with_index_order(vec![1, 2, 3], vec![2, 1, 0]); -/// // Output is [3, 2, 1] now -pub fn permute_input_with_index_order(inputs: Vec, order: Vec) -> Vec -where - T: Clone, -{ - let mut ordered_inputs = Vec::with_capacity(inputs.len()); - for &index in &order { - ordered_inputs.push(inputs[index].clone()); - } - ordered_inputs -} - /// This function will create a fake [`View`] from a provided [`Leaf`]. pub fn create_fake_view_with_leaf(leaf: Leaf) -> View { create_fake_view_with_leaf_and_state(leaf, TestValidatedState::default()) diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 7a2a58b624..a1e1c249b8 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -1,28 +1,34 @@ // TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ + block_types::TestMetadata, node_types::{MemoryImpl, TestTypes}, state_types::TestInstanceState, }; +use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; use hotshot_testing::{ + all_predicates, helpers::{ build_system_handle, key_pair_for_id, permute_input_with_index_order, vid_scheme_from_view_number, vid_share, }, predicates::event::{ - exact, quorum_proposal_send, quorum_proposal_validated, quorum_vote_send, timeout_vote_send, + all_predicates, exact, quorum_proposal_send, quorum_proposal_validated, quorum_vote_send, + timeout_vote_send, }, - script::{run_test_script, TestScriptStage}, + random, + script::{Expectations, InputOrder, TaskScript}, + serial, view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{ViewChangeEvidence, ViewNumber}, + data::{null_block, ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}, traits::{election::Membership, node_implementation::ConsensusTime}, utils::BuilderCommitment, @@ -30,24 +36,13 @@ use hotshot_types::{ use jf_vid::VidScheme; use sha2::Digest; +const TIMEOUT: Duration = Duration::from_millis(35); + #[cfg(test)] #[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { - use std::time::Duration; - - use hotshot_example_types::{block_types::TestMetadata, state_types::TestValidatedState}; - use hotshot_macros::{run_test, test_scripts}; - use hotshot_testing::{ - all_predicates, - predicates::event::all_predicates, - random, - script::{Expectations, InputOrder, TaskScript}, - serial, - }; - use hotshot_types::data::null_block; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -116,7 +111,7 @@ async fn test_consensus_task() { let consensus_state = ConsensusTaskState::::create_from(&handle).await; let mut consensus_script = TaskScript { - timeout: Duration::from_millis(35), + timeout: TIMEOUT, state: consensus_state, expectations, }; @@ -129,14 +124,6 @@ async fn test_consensus_task() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote() { - use hotshot::tasks::task_state::CreateTaskState; - use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; - use hotshot_testing::{ - helpers::build_system_handle, - script::{run_test_script, TestScriptStage}, - view_generator::TestViewGenerator, - }; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -161,107 +148,27 @@ async fn test_consensus_vote() { } // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader - let view_1 = TestScriptStage { - inputs: vec![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), - QuorumVoteRecv(votes[0].clone()), - ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ], - asserts: vec![], - }; + let inputs = vec![random![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DaCertificateRecv(dacs[0].clone()), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), + QuorumVoteRecv(votes[0].clone()), + ]]; + + let expectations = vec![Expectations::from_outputs(all_predicates![ + exact(ViewChange(ViewNumber::new(1))), + quorum_proposal_validated(), + exact(QuorumVoteSend(votes[0].clone())), + ])]; let consensus_state = ConsensusTaskState::::create_from(&handle).await; - - run_test_script(vec![view_1], consensus_state).await; -} - -/// Tests the voting behavior by allowing the input to be permuted in any order desired. This -/// assures that, no matter what, a vote is indeed sent no matter what order the precipitating -/// events occur. The permutation is specified as `input_permutation` and is a vector of indices. -#[cfg(not(feature = "dependency-tasks"))] -async fn test_vote_with_specific_order(input_permutation: Vec) { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle(2).await.0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let mut generator = - TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); - - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut votes = Vec::new(); - let mut dacs = Vec::new(); - let mut vids = Vec::new(); - for view in (&mut generator).take(2).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - } - - // Get out of the genesis view first - let view_1 = TestScriptStage { - inputs: vec![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), - ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ], - asserts: vec![], - }; - - let inputs = vec![ - // We need a VID share for view 2 otherwise we cannot vote at view 2 (as node 2). - VidShareRecv(vid_share(&vids[1].0, handle.public_key())), - DaCertificateRecv(dacs[1].clone()), - QuorumProposalRecv(proposals[1].clone(), leaders[1]), - ]; - let view_2_inputs = permute_input_with_index_order(inputs, input_permutation); - - // Use the permuted inputs for view 2 depending on the provided index ordering. - let view_2 = TestScriptStage { - inputs: view_2_inputs, - outputs: vec![ - exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[1].clone())), - ], - asserts: vec![], + let mut consensus_script = TaskScript { + timeout: TIMEOUT, + state: consensus_state, + expectations, }; - let consensus_state = ConsensusTaskState::::create_from(&handle).await; - - run_test_script(vec![view_1, view_2], consensus_state).await; -} - -#[cfg(test)] -#[cfg(not(feature = "dependency-tasks"))] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_consensus_vote_with_permuted_dac() { - // These tests verify that a vote is indeed sent no matter when it receives a DaCertificateRecv - // event. In particular, we want to verify that receiving events in an unexpected (but still - // valid) order allows the system to proceed as it normally would. - test_vote_with_specific_order(vec![0, 1, 2]).await; - test_vote_with_specific_order(vec![0, 2, 1]).await; - test_vote_with_specific_order(vec![1, 0, 2]).await; - test_vote_with_specific_order(vec![2, 0, 1]).await; - test_vote_with_specific_order(vec![1, 2, 0]).await; - test_vote_with_specific_order(vec![2, 1, 0]).await; + run_test![inputs, consensus_script].await; } #[cfg(test)] @@ -322,37 +229,13 @@ async fn test_view_sync_finalize_propose() { votes.push(view.create_quorum_vote(&handle)); vids.push(view.vid_proposal); - // This is a bog standard view and covers the situation where everything is going normally. - let view_1 = TestScriptStage { - inputs: vec![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), - ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ], - asserts: vec![], - }; - - // Fail twice here to "trigger" a view sync event. This is accomplished above by advancing the - // view number in the generator. - let view_2_3 = TestScriptStage { - inputs: vec![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], - outputs: vec![timeout_vote_send(), timeout_vote_send()], - // Times out, so we now have a delayed view - asserts: vec![], - }; - // Handle the view sync finalize cert, get the requisite data, propose. let cert = match proposals[1].data.proposal_certificate.clone().unwrap() { ViewChangeEvidence::ViewSync(vsc) => vsc, _ => panic!("Found a TC when there should have been a view sync cert"), }; - // Generate the timeout votes for the timeouts that just occurred. + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let timeout_vote_view_2 = TimeoutVote::create_signed_vote( TimeoutData { view: ViewNumber::new(2), @@ -373,10 +256,15 @@ async fn test_view_sync_finalize_propose() { ) .unwrap(); - let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let view_4 = TestScriptStage { - inputs: vec![ - VidShareRecv(vid_share(&vids[1].0, handle.public_key())), + let inputs = vec![ + serial![VidShareRecv(vid_share(&vids[0].0, handle.public_key()))], + random![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DaCertificateRecv(dacs[0].clone()), + ], + serial![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], + serial![VidShareRecv(vid_share(&vids[1].0, handle.public_key()))], + random![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), TimeoutVoteRecv(timeout_vote_view_2), TimeoutVoteRecv(timeout_vote_view_3), @@ -389,19 +277,32 @@ async fn test_view_sync_finalize_propose() { null_block::builder_fee(4).unwrap(), ), ], - outputs: vec![ + ]; + + let expectations = vec![ + Expectations::from_outputs(vec![]), + Expectations::from_outputs(all_predicates![ + exact(ViewChange(ViewNumber::new(1))), + quorum_proposal_validated(), + exact(QuorumVoteSend(votes[0].clone())), + ]), + Expectations::from_outputs(vec![timeout_vote_send(), timeout_vote_send()]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(all_predicates![ exact(ViewChange(ViewNumber::new(4))), quorum_proposal_validated(), quorum_proposal_send(), - ], - asserts: vec![], - }; + ]), + ]; let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let mut consensus_script = TaskScript { + timeout: TIMEOUT, + state: consensus_state, + expectations, + }; - let stages = vec![view_1, view_2_3, view_4]; - - run_test_script(stages, consensus_state).await; + run_test![inputs, consensus_script].await; } #[cfg(test)] @@ -449,28 +350,7 @@ async fn test_view_sync_finalize_vote() { dacs.push(view.da_certificate.clone()); } - let view_1 = TestScriptStage { - inputs: vec![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), - ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ], - asserts: vec![], - }; - - let view_2 = TestScriptStage { - inputs: vec![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], - outputs: vec![timeout_vote_send(), timeout_vote_send()], - // Times out, so we now have a delayed view - asserts: vec![], - }; - - // Now we're on the latest view. We want to set the quorum + // When we're on the latest view. We want to set the quorum // certificate to be the previous highest QC (before the timeouts). This will be distinct from // the view sync cert, which is saying "hey, I'm _actually_ at view 4, but my highest QC is // only for view 1." This forces the QC to be for view 1, and we can move on under this @@ -482,23 +362,41 @@ async fn test_view_sync_finalize_vote() { _ => panic!("Found a TC when there should have been a view sync cert"), }; - // Now at view 3 we receive the proposal received response. - let view_3 = TestScriptStage { - inputs: vec![ - // Receive a proposal for view 4, but with the highest qc being from view 1. + let inputs = vec![ + serial![VidShareRecv(vid_share(&vids[0].0, handle.public_key()))], + random![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DaCertificateRecv(dacs[0].clone()), + ], + serial![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], + random![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - // Multiple timeouts in a row, so we call for a view sync ViewSyncFinalizeCertificate2Recv(cert), ], - outputs: vec![quorum_proposal_validated(), quorum_vote_send()], - asserts: vec![], - }; + ]; - let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let expectations = vec![ + Expectations::from_outputs(vec![]), + Expectations::from_outputs(all_predicates![ + exact(ViewChange(ViewNumber::new(1))), + quorum_proposal_validated(), + exact(QuorumVoteSend(votes[0].clone())) + ]), + Expectations::from_outputs(vec![timeout_vote_send(), timeout_vote_send()]), + Expectations::from_outputs(all_predicates![ + quorum_proposal_validated(), + quorum_vote_send() + ]), + ]; - let stages = vec![view_1, view_2, view_3]; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let mut consensus_script = TaskScript { + timeout: TIMEOUT, + state: consensus_state, + expectations, + }; - run_test_script(stages, consensus_state).await; + run_test![inputs, consensus_script].await; } #[cfg(test)] @@ -546,28 +444,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { dacs.push(view.da_certificate.clone()); } - let view_1 = TestScriptStage { - inputs: vec![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), - ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ], - asserts: vec![], - }; - - let view_2 = TestScriptStage { - inputs: vec![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], - outputs: vec![timeout_vote_send(), timeout_vote_send()], - // Times out, so we now have a delayed view - asserts: vec![], - }; - - // Now we're on the latest view. We want to set the quorum + // When we're on the latest view. We want to set the quorum // certificate to be the previous highest QC (before the timeouts). This will be distinct from // the view sync cert, which is saying "hey, I'm _actually_ at view 4, but my highest QC is // only for view 1." This forces the QC to be for view 1, and we can move on under this @@ -582,30 +459,46 @@ async fn test_view_sync_finalize_vote_fail_view_number() { // intentionally skip the proposal for this node so we can get the proposal and fail to vote. cert.view_number = ViewNumber::new(10); - // We introduce an error by setting a different view number as well, this makes the task check + // Get a good proposal first. + let good_proposal = proposals[0].clone(); + + // Now We introduce an error by setting a different view number as well, this makes the task check // for a view sync or timeout cert. This value could be anything as long as it is not the // previous view number. proposals[0].data.justify_qc.view_number = proposals[3].data.justify_qc.view_number; - // Now at view 3 we receive the proposal received response. - let view_3 = TestScriptStage { - inputs: vec![ - // Multiple timeouts in a row, so we call for a view sync + let inputs = vec![ + random![ + QuorumProposalRecv(good_proposal, leaders[0]), + DaCertificateRecv(dacs[0].clone()), + ], + serial![VidShareRecv(vid_share(&vids[0].0, handle.public_key()))], + serial![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], + random![ ViewSyncFinalizeCertificate2Recv(cert), - // Receive a proposal for view 4, but with the highest qc being from view 1. QuorumProposalRecv(proposals[0].clone(), leaders[0]), ], - outputs: vec![ - /* No outputs make it through. We never got a valid proposal, so we never vote */ - ], - asserts: vec![], - }; + ]; - let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let expectations = vec![ + Expectations::from_outputs(all_predicates![ + quorum_proposal_validated(), + exact(ViewChange(ViewNumber::new(1))), + ]), + Expectations::from_outputs(vec![exact(QuorumVoteSend(votes[0].clone()))]), + Expectations::from_outputs(vec![timeout_vote_send(), timeout_vote_send()]), + // We get no output here due to the invalid view number. + Expectations::from_outputs(vec![]), + ]; - let stages = vec![view_1, view_2, view_3]; + let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let mut consensus_script = TaskScript { + timeout: TIMEOUT, + state: consensus_state, + expectations, + }; - run_test_script(stages, consensus_state).await; + run_test![inputs, consensus_script].await; } #[cfg(test)] @@ -639,22 +532,23 @@ async fn test_vid_disperse_storage_failure() { vids.push(view.vid_proposal.clone()); } - // Run view 1 (the genesis stage). - let view_1 = TestScriptStage { - inputs: vec![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), - ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - /* Does not vote */ - ], - asserts: vec![], - }; + let inputs = vec![random![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + DaCertificateRecv(dacs[0].clone()), + VidShareRecv(vid_share(&vids[0].0, handle.public_key())), + ]]; + + let expectations = vec![Expectations::from_outputs(all_predicates![ + exact(ViewChange(ViewNumber::new(1))), + quorum_proposal_validated(), + ])]; let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let mut consensus_script = TaskScript { + timeout: TIMEOUT, + state: consensus_state, + expectations, + }; - run_test_script(vec![view_1], consensus_state).await; + run_test![inputs, consensus_script].await; } diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index ed2bc169a0..8b61cb2f1c 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -6,12 +6,13 @@ use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, }; -use hotshot_macros::test_scripts; +use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{da::DaTaskState, events::HotShotEvent::*}; use hotshot_testing::{ helpers::build_system_handle, predicates::event::{exact, validated_state_updated}, - script::{Expectations, TaskScript}, + script::{Expectations, InputOrder, TaskScript}, + serial, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -69,7 +70,7 @@ async fn test_da_task() { } let inputs = vec![ - vec![ + serial![ ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), BlockRecv( @@ -80,7 +81,7 @@ async fn test_da_task() { precompute, ), ], - vec![DaProposalRecv(proposals[1].clone(), leaders[1])], + serial![DaProposalRecv(proposals[1].clone(), leaders[1])], ]; let da_state = DaTaskState::::create_from(&handle).await; @@ -100,8 +101,7 @@ async fn test_da_task() { ], }; - // run_test_script(stages, da_state).await; - test_scripts![inputs, da_script].await; + run_test![inputs, da_script].await; } #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] @@ -153,7 +153,7 @@ async fn test_da_task_storage_failure() { } let inputs = vec![ - vec![ + serial![ ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), BlockRecv( @@ -164,8 +164,8 @@ async fn test_da_task_storage_failure() { precompute, ), ], - vec![DaProposalRecv(proposals[1].clone(), leaders[1])], - vec![DaProposalValidated(proposals[1].clone(), leaders[1])], + serial![DaProposalRecv(proposals[1].clone(), leaders[1])], + serial![DaProposalValidated(proposals[1].clone(), leaders[1])], ]; let expectations = vec![ Expectations::from_outputs(vec![exact(DaProposalSend( @@ -186,5 +186,5 @@ async fn test_da_task_storage_failure() { expectations, }; - test_scripts![inputs, da_script].await; + run_test![inputs, da_script].await; } diff --git a/testing/tests/tests_1/proposal_ordering.rs b/testing/tests/tests_1/proposal_ordering.rs deleted file mode 100644 index 3faf7b470d..0000000000 --- a/testing/tests/tests_1/proposal_ordering.rs +++ /dev/null @@ -1,144 +0,0 @@ -// TODO: Remove this after integration -#![allow(unused_imports)] -use std::sync::Arc; - -use hotshot::tasks::task_state::CreateTaskState; -use hotshot_example_types::{ - block_types::TestMetadata, - node_types::{MemoryImpl, TestTypes}, - state_types::TestInstanceState, -}; -use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; -use hotshot_testing::{ - helpers::{permute_input_with_index_order, vid_scheme_from_view_number, vid_share}, - predicates::event::{all_predicates, exact, quorum_proposal_send, quorum_proposal_validated}, - view_generator::TestViewGenerator, -}; -use hotshot_types::{ - data::{null_block, ViewNumber}, - traits::{election::Membership, node_implementation::ConsensusTime}, - utils::BuilderCommitment, -}; -use jf_vid::VidScheme; -use sha2::Digest; - -/// Runs a basic test where a qualified proposal occurs (i.e. not initiated by the genesis view or node 1). -/// This proposal should happen no matter how the `input_permutation` is specified. -#[cfg(not(feature = "dependency-tasks"))] -async fn test_ordering_with_specific_order(input_permutation: Vec) { - use futures::StreamExt; - use hotshot_example_types::state_types::TestValidatedState; - use hotshot_testing::{ - helpers::build_system_handle, - script::{run_test_script, TestScriptStage}, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let node_id = 3; - let handle = build_system_handle(node_id).await.0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let mut vid = - vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(node_id)); - - // Make some empty encoded transactions, we just care about having a commitment handy for the - // later calls. - let encoded_transactions = Vec::new(); - let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); - let payload_commitment = vid_disperse.commit; - - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut votes = Vec::new(); - let mut dacs = Vec::new(); - let mut vids = Vec::new(); - for view in (&mut generator).take(3).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); - leaders.push(view.leader_public_key); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - } - - // This stage transitions from the initial view to view 1 - let view_1 = TestScriptStage { - inputs: vec![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), - ], - outputs: vec![ - exact(ViewChange(ViewNumber::new(1))), - all_predicates(vec![ - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ]), - ], - asserts: vec![], - }; - - // Node 2 is the leader up next, so we form the QC for it. - let cert = proposals[2].data.justify_qc.clone(); - let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let inputs = vec![ - QuorumProposalRecv(proposals[1].clone(), leaders[1]), - QcFormed(either::Left(cert)), - SendPayloadCommitmentAndMetadata( - payload_commitment, - builder_commitment, - TestMetadata, - ViewNumber::new(node_id), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), - ), - ]; - - let mut view_2_inputs = permute_input_with_index_order(inputs, input_permutation); - view_2_inputs.insert(0, DaCertificateRecv(dacs[1].clone())); - view_2_inputs.insert(0, VidShareRecv(vid_share(&vids[2].0, handle.public_key()))); - view_2_inputs.insert(0, VidShareRecv(vid_share(&vids[1].0, handle.public_key()))); - - // This stage transitions from view 1 to view 2. - let view_2 = TestScriptStage { - inputs: view_2_inputs, - outputs: vec![ - exact(ViewChange(ViewNumber::new(2))), - all_predicates(vec![ - exact(QuorumVoteSend(votes[1].clone())), - quorum_proposal_validated(), - quorum_proposal_send(), - ]), - ], - // We should end on view 2. - asserts: vec![], - }; - - let script = vec![view_1, view_2]; - - let consensus_state = ConsensusTaskState::::create_from(&handle).await; - - run_test_script(script, consensus_state).await; -} - -#[cfg(not(feature = "dependency-tasks"))] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -/// A leader node may receive one of a couple of possible events which can trigger a proposal. This -/// test ensures that, no matter what order these events are received in, the node will still -/// trigger the proposal event regardless. This is to catch a regression in which -/// `SendPayloadCommitmentAndMetadata`, when received last, resulted in no proposal occurring. -async fn test_proposal_ordering() { - test_ordering_with_specific_order(vec![0, 1, 2]).await; - test_ordering_with_specific_order(vec![0, 2, 1]).await; - test_ordering_with_specific_order(vec![1, 0, 2]).await; - test_ordering_with_specific_order(vec![2, 0, 1]).await; - test_ordering_with_specific_order(vec![1, 2, 0]).await; - test_ordering_with_specific_order(vec![2, 1, 0]).await; -} diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 75af361b5d..6022d44d89 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -4,13 +4,15 @@ use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ events::HotShotEvent::*, quorum_proposal_recv::QuorumProposalRecvTaskState, }; use hotshot_testing::{ helpers::build_system_handle, - predicates::event::{exact, vote_now}, - script::{run_test_script, TestScriptStage}, + predicates::event::{all_predicates, exact, vote_now}, + script::InputOrder, + serial, view_generator::TestViewGenerator, }; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; @@ -20,7 +22,12 @@ use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_recv_task() { - use hotshot_testing::helpers::build_fake_view_with_leaf; + use std::time::Duration; + + use hotshot_testing::{ + helpers::build_fake_view_with_leaf, + script::{Expectations, TaskScript}, + }; use hotshot_types::data::Leaf; async_compatibility_layer::logging::setup_logging(); @@ -58,22 +65,27 @@ async fn test_quorum_proposal_recv_task() { } drop(consensus_writer); - // Run view 2 and propose. - let view_2 = TestScriptStage { - inputs: vec![QuorumProposalRecv(proposals[1].clone(), leaders[1])], - outputs: vec![ - exact(ViewChange(ViewNumber::new(2))), - exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), - exact(QuorumProposalValidated( - proposals[1].data.clone(), - leaves[0].clone(), - )), - ], - asserts: vec![], - }; + let inputs = vec![serial![QuorumProposalRecv( + proposals[1].clone(), + leaders[1] + )]]; + + let expectations = vec![Expectations::from_outputs(vec![ + exact(ViewChange(ViewNumber::new(2))), + exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), + exact(QuorumProposalValidated( + proposals[1].data.clone(), + leaves[0].clone(), + )), + ])]; let state = QuorumProposalRecvTaskState::::create_from(&handle).await; - run_test_script(vec![view_2], state).await; + let mut script = TaskScript { + timeout: Duration::from_millis(35), + state, + expectations, + }; + run_test![inputs, script].await; } #[cfg(test)] @@ -81,10 +93,14 @@ async fn test_quorum_proposal_recv_task() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_recv_task_liveness_check() { + use std::time::Duration; + use hotshot::traits::ValidatedState; use hotshot_example_types::state_types::TestValidatedState; - use hotshot_testing::helpers::{ - build_fake_view_with_leaf, build_fake_view_with_leaf_and_state, + use hotshot_testing::{ + all_predicates, + helpers::{build_fake_view_with_leaf, build_fake_view_with_leaf_and_state}, + script::{Expectations, TaskScript}, }; use hotshot_types::{ data::Leaf, @@ -147,27 +163,33 @@ async fn test_quorum_proposal_recv_task_liveness_check() { drop(consensus_writer); - // Run view 2 and propose. - let view_2 = TestScriptStage { - inputs: vec![QuorumProposalRecv(proposals[2].clone(), leaders[2])], - outputs: vec![ - exact(ViewChange(ViewNumber::new(3))), - exact(ValidatedStateUpdated( - ViewNumber::new(3), - build_fake_view_with_leaf_and_state( - leaves[2].clone(), - >::from_header( - &proposals[2].data.block_header, - ), + let inputs = vec![serial![QuorumProposalRecv( + proposals[2].clone(), + leaders[2] + )]]; + + let expectations = vec![Expectations::from_outputs(all_predicates![ + exact(ViewChange(ViewNumber::new(3))), + exact(ValidatedStateUpdated( + ViewNumber::new(3), + build_fake_view_with_leaf_and_state( + leaves[2].clone(), + >::from_header( + &proposals[2].data.block_header, ), - )), - exact(NewUndecidedView(leaves[2].clone())), - exact(QuorumProposalLivenessValidated(proposals[2].data.clone())), - vote_now(), - ], - asserts: vec![], - }; + ), + )), + exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), + exact(NewUndecidedView(leaves[2].clone())), + exact(QuorumProposalLivenessValidated(proposals[2].data.clone())), + vote_now(), + ])]; let state = QuorumProposalRecvTaskState::::create_from(&handle).await; - run_test_script(vec![view_2], state).await; + let mut script = TaskScript { + timeout: Duration::from_millis(35), + state, + expectations, + }; + run_test![inputs, script].await; } diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index cbb05daeac..b566be3ba6 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,6 +1,7 @@ #![cfg(feature = "dependency-tasks")] use std::sync::Arc; +use std::time::Duration; use committable::Committable; use futures::StreamExt; @@ -12,20 +13,24 @@ use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes}, state_types::TestInstanceState, }; +use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ events::HotShotEvent::{self, *}, quorum_proposal::QuorumProposalTaskState, }; use hotshot_testing::{ + all_predicates, helpers::{ build_cert, build_fake_view_with_leaf, build_system_handle, key_pair_for_id, vid_scheme_from_view_number, vid_share, }, predicates::{ - event::{exact, leaf_decided, quorum_proposal_send}, + event::{all_predicates, exact, leaf_decided, quorum_proposal_send}, Predicate, }, - script::{run_test_script, TestScriptStage}, + random, + script::{Expectations, InputOrder, TaskScript}, + serial, view_generator::TestViewGenerator, }; use hotshot_types::{ @@ -43,6 +48,8 @@ use hotshot_types::{ use jf_vid::VidScheme; use sha2::Digest; +const TIMEOUT: Duration = Duration::from_millis(35); + fn make_payload_commitment( membership: &::Membership, view: ViewNumber, @@ -58,6 +65,8 @@ fn make_payload_commitment( #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_quorum_proposal_view_1() { + use hotshot_testing::script::{Expectations, TaskScript}; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -103,8 +112,12 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { ); drop(consensus_writer); - let view = TestScriptStage { - inputs: vec![ + let inputs = vec![ + serial![VidShareValidated(vid_share( + &vids[0].0, + handle.public_key() + )),], + random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( payload_commitment, @@ -113,24 +126,30 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { ViewNumber::new(1), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[0].0, handle.public_key())), ValidatedStateUpdated( proposals[0].data.view_number(), build_fake_view_with_leaf(leaves[0].clone()), ), ], - outputs: vec![ + ]; + + let expectations = vec![ + Expectations::from_outputs(vec![]), + Expectations::from_outputs(all_predicates![ exact(UpdateHighQc(genesis_cert.clone())), quorum_proposal_send(), - ], - asserts: vec![], - }; + ]), + ]; let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - let script = vec![view]; - run_test_script(script, quorum_proposal_task_state).await; + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_proposal_task_state, + expectations, + }; + run_test![inputs, script].await; } #[cfg(test)] @@ -185,8 +204,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let genesis_view = TestScriptStage { - inputs: vec![ + let inputs = vec![ + random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(1)), @@ -201,13 +220,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { build_fake_view_with_leaf(genesis_leaf.clone()), ), ], - outputs: vec![exact(UpdateHighQc(genesis_cert.clone()))], - asserts: vec![], - }; - - // We send all the events that we'd have otherwise received to ensure the states are updated. - let view_1 = TestScriptStage { - inputs: vec![ + random![ QuorumProposalValidated(proposals[0].data.clone(), genesis_leaf), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( @@ -223,13 +236,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { build_fake_view_with_leaf(leaves[0].clone()), ), ], - outputs: vec![exact(UpdateHighQc(proposals[1].data.justify_qc.clone()))], - asserts: vec![], - }; - - // Proposing for this view since we've received a proposal for view 2. - let view_2 = TestScriptStage { - inputs: vec![ + random![ QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( @@ -245,17 +252,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { build_fake_view_with_leaf(leaves[1].clone()), ), ], - outputs: vec![ - exact(LockedViewUpdated(ViewNumber::new(1))), - exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), - quorum_proposal_send(), - ], - asserts: vec![], - }; - - // Now, let's verify that we get the decide on the 3-chain. - let view_3 = TestScriptStage { - inputs: vec![ + random![ QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( @@ -271,12 +268,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { build_fake_view_with_leaf(leaves[2].clone()), ), ], - outputs: vec![exact(UpdateHighQc(proposals[3].data.justify_qc.clone()))], - asserts: vec![], - }; - - let view_4 = TestScriptStage { - inputs: vec![ + random![ QuorumProposalValidated(proposals[3].data.clone(), leaves[2].clone()), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( @@ -292,20 +284,39 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { build_fake_view_with_leaf(leaves[3].clone()), ), ], - outputs: vec![ + ]; + + let expectations = vec![ + Expectations::from_outputs(all_predicates![exact(UpdateHighQc(genesis_cert.clone()))]), + Expectations::from_outputs(all_predicates![exact(UpdateHighQc( + proposals[1].data.justify_qc.clone(), + ))]), + Expectations::from_outputs(all_predicates![ + exact(LockedViewUpdated(ViewNumber::new(1))), + exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), + quorum_proposal_send(), + ]), + Expectations::from_outputs(all_predicates![exact(UpdateHighQc( + proposals[3].data.justify_qc.clone(), + ))]), + Expectations::from_outputs(all_predicates![ exact(LockedViewUpdated(ViewNumber::new(3))), exact(LastDecidedViewUpdated(ViewNumber::new(2))), leaf_decided(), exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), - ], - asserts: vec![], - }; + ]), + ]; let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - let script = vec![genesis_view, view_1, view_2, view_3, view_4]; - run_test_script(script, quorum_proposal_task_state).await; + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_proposal_task_state, + expectations, + }; + + run_test![inputs, script].await; } #[cfg(test)] @@ -352,32 +363,33 @@ async fn test_quorum_proposal_task_qc_timeout() { _ => panic!("Found a View Sync Cert when there should have been a Timeout cert"), }; - // Run at view 2, propose at view 3. - let view_2 = TestScriptStage { - inputs: vec![ - QcFormed(either::Right(cert.clone())), - SendPayloadCommitmentAndMetadata( - payload_commitment, - builder_commitment, - TestMetadata, - ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), - ), - VidShareValidated(vid_share(&vids[2].0.clone(), handle.public_key())), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone()), - ), - ], - outputs: vec![quorum_proposal_send()], - asserts: vec![], - }; + let inputs = vec![random![ + QcFormed(either::Right(cert.clone())), + SendPayloadCommitmentAndMetadata( + payload_commitment, + builder_commitment, + TestMetadata, + ViewNumber::new(3), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), + VidShareValidated(vid_share(&vids[2].0.clone(), handle.public_key())), + ValidatedStateUpdated( + proposals[1].data.view_number(), + build_fake_view_with_leaf(leaves[1].clone()), + ), + ]]; + + let expectations = vec![Expectations::from_outputs(vec![quorum_proposal_send()])]; let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - let script = vec![view_2]; - run_test_script(script, quorum_proposal_task_state).await; + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_proposal_task_state, + expectations, + }; + run_test![inputs, script].await; } #[cfg(test)] @@ -431,32 +443,33 @@ async fn test_quorum_proposal_task_view_sync() { _ => panic!("Found a TC when there should have been a view sync cert"), }; - // Run at view 2, the quorum vote task shouldn't care as long as the bookkeeping is correct - let view_2 = TestScriptStage { - inputs: vec![ - ViewSyncFinalizeCertificate2Recv(cert.clone()), - SendPayloadCommitmentAndMetadata( - payload_commitment, - builder_commitment, - TestMetadata, - ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), - ), - VidShareValidated(vid_share(&vids[1].0.clone(), handle.public_key())), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone()), - ), - ], - outputs: vec![quorum_proposal_send()], - asserts: vec![], - }; + let inputs = vec![random![ + ViewSyncFinalizeCertificate2Recv(cert.clone()), + SendPayloadCommitmentAndMetadata( + payload_commitment, + builder_commitment, + TestMetadata, + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), + VidShareValidated(vid_share(&vids[1].0.clone(), handle.public_key())), + ValidatedStateUpdated( + proposals[0].data.view_number(), + build_fake_view_with_leaf(leaves[0].clone()), + ), + ]]; + + let expectations = vec![Expectations::from_outputs(vec![quorum_proposal_send()])]; let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - let script = vec![view_2]; - run_test_script(script, quorum_proposal_task_state).await; + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_proposal_task_state, + expectations, + }; + run_test![inputs, script].await; } #[cfg(test)] @@ -503,8 +516,8 @@ async fn test_quorum_proposal_livness_check_proposal() { let genesis_cert = proposals[0].data.justify_qc.clone(); let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; - let genesis_view = TestScriptStage { - inputs: vec![ + let inputs = vec![ + random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(1)), @@ -519,13 +532,7 @@ async fn test_quorum_proposal_livness_check_proposal() { build_fake_view_with_leaf(genesis_leaf.clone()), ), ], - outputs: vec![exact(UpdateHighQc(genesis_cert.clone()))], - asserts: vec![], - }; - - // We send all the events that we'd have otherwise received to ensure the states are updated. - let view_1 = TestScriptStage { - inputs: vec![ + random![ QuorumProposalValidated(proposals[0].data.clone(), genesis_leaf.clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( @@ -541,15 +548,7 @@ async fn test_quorum_proposal_livness_check_proposal() { build_fake_view_with_leaf(leaves[0].clone()), ), ], - outputs: vec![exact(UpdateHighQc(proposals[1].data.justify_qc.clone()))], - asserts: vec![], - }; - - // This is a little hokey, and may not reflect reality, but we are only testing, - // for this specific task, that it will propose when it receives this event. See - // the QuorumProposalRecv task tests. - let view_2 = TestScriptStage { - inputs: vec![ + random![ QuorumProposalLivenessValidated(proposals[1].data.clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( @@ -565,16 +564,7 @@ async fn test_quorum_proposal_livness_check_proposal() { build_fake_view_with_leaf(leaves[1].clone()), ), ], - outputs: vec![ - exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), - quorum_proposal_send(), - ], - asserts: vec![], - }; - - // Now, let's verify that we get the decide on the 3-chain. - let view_3 = TestScriptStage { - inputs: vec![ + random![ QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( @@ -590,12 +580,7 @@ async fn test_quorum_proposal_livness_check_proposal() { build_fake_view_with_leaf(leaves[2].clone()), ), ], - outputs: vec![exact(UpdateHighQc(proposals[3].data.justify_qc.clone()))], - asserts: vec![], - }; - - let view_4 = TestScriptStage { - inputs: vec![ + random![ QuorumProposalValidated(proposals[3].data.clone(), leaves[2].clone()), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( @@ -611,20 +596,37 @@ async fn test_quorum_proposal_livness_check_proposal() { build_fake_view_with_leaf(leaves[3].clone()), ), ], - outputs: vec![ + ]; + + let expectations = vec![ + Expectations::from_outputs(vec![exact(UpdateHighQc(genesis_cert.clone()))]), + Expectations::from_outputs(vec![exact(UpdateHighQc( + proposals[1].data.justify_qc.clone(), + ))]), + Expectations::from_outputs(vec![ + exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), + quorum_proposal_send(), + ]), + Expectations::from_outputs(vec![exact(UpdateHighQc( + proposals[3].data.justify_qc.clone(), + ))]), + Expectations::from_outputs(vec![ exact(LockedViewUpdated(ViewNumber::new(3))), exact(LastDecidedViewUpdated(ViewNumber::new(2))), leaf_decided(), exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), - ], - asserts: vec![], - }; + ]), + ]; let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - let script = vec![genesis_view, view_1, view_2, view_3, view_4]; - run_test_script(script, quorum_proposal_task_state).await; + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_proposal_task_state, + expectations, + }; + run_test![inputs, script].await; } #[cfg(test)] @@ -654,22 +656,26 @@ async fn test_quorum_proposal_task_with_incomplete_events() { // We run the task here at view 2, but this time we ignore the crucial piece of evidence: the // payload commitment and metadata. Instead we send only one of the three "OR" required fields. // This should result in the proposal failing to be sent. - let view_2 = TestScriptStage { - inputs: vec![QuorumProposalValidated( - proposals[1].data.clone(), - leaves[0].clone(), - )], - outputs: vec![], - asserts: vec![], - }; + let inputs = vec![serial![QuorumProposalValidated( + proposals[1].data.clone(), + leaves[0].clone(), + )]]; + + let expectations = vec![Expectations::from_outputs(vec![])]; let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - let script = vec![view_2]; - run_test_script(script, quorum_proposal_task_state).await; + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_proposal_task_state, + expectations, + }; + run_test![inputs, script].await; } +/// This function generates the outputs to the quorum proposal task (i.e. the emitted events). +/// This happens depending on the view and chain length. fn generate_outputs( chain_length: i32, current_view_number: u64, @@ -739,7 +745,8 @@ async fn test_quorum_proposal_task_happy_path_leaf_ascension() { let mut generator = TestViewGenerator::generate(quorum_membership, da_membership); let mut current_chain_length = 0; - let mut script = Vec::new(); + let mut inputs = Vec::new(); + let mut expectations = Vec::new(); for view_number in 1..100u64 { current_chain_length += 1; if current_chain_length > 3 { @@ -768,17 +775,22 @@ async fn test_quorum_proposal_task_happy_path_leaf_ascension() { ); } - let view = TestScriptStage { - inputs: vec![QuorumProposalValidated(proposal.data, leaf)], - outputs: generate_outputs(current_chain_length, view_number.try_into().unwrap()), - asserts: vec![], - }; - script.push(view); + inputs.push(serial![QuorumProposalValidated(proposal.data, leaf)]); + expectations.push(Expectations::from_outputs(generate_outputs( + current_chain_length, + view_number.try_into().unwrap(), + ))); } let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - run_test_script(script, quorum_proposal_task_state).await; + + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_proposal_task_state, + expectations, + }; + run_test![inputs, script].await; } /// This test non-deterministically injects faults into the leaf ascension process where we randomly @@ -805,8 +817,10 @@ async fn test_quorum_proposal_task_fault_injection_leaf_ascension() { let mut generator = TestViewGenerator::generate(quorum_membership, da_membership); let mut current_chain_length = 0; - let mut script = Vec::new(); let mut dropped_views = Vec::new(); + + let mut inputs = Vec::new(); + let mut expectations = Vec::new(); for view_number in 1..15u64 { current_chain_length += 1; // If the chain keeps going, then let it keep going @@ -858,15 +872,19 @@ async fn test_quorum_proposal_task_fault_injection_leaf_ascension() { } } - let view = TestScriptStage { - inputs: vec![QuorumProposalValidated(proposal.data, leaf)], - outputs: generate_outputs(current_chain_length, view_number.try_into().unwrap()), - asserts: vec![], - }; - script.push(view); + inputs.push(serial![QuorumProposalValidated(proposal.data, leaf)]); + expectations.push(Expectations::from_outputs(generate_outputs( + current_chain_length, + view_number.try_into().unwrap(), + ))); } let quorum_proposal_task_state = QuorumProposalTaskState::::create_from(&handle).await; - run_test_script(script, quorum_proposal_task_state).await; + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_proposal_task_state, + expectations, + }; + run_test![inputs, script].await; } diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index d732f5f9ed..8a5be741ce 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -2,10 +2,21 @@ use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_testing::helpers::{build_fake_view_with_leaf, vid_share}; +use hotshot_macros::{run_test, test_scripts}; +use hotshot_testing::{ + all_predicates, + helpers::{build_fake_view_with_leaf, vid_share}, + predicates::event::all_predicates, + random, + script::{Expectations, InputOrder, TaskScript}, + serial, +}; use hotshot_types::{ data::ViewNumber, traits::node_implementation::ConsensusTime, vote::HasViewNumber, }; +use std::time::Duration; + +const TIMEOUT: Duration = Duration::from_millis(35); #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] @@ -15,7 +26,6 @@ async fn test_quorum_vote_task_success() { use hotshot_testing::{ helpers::build_system_handle, predicates::event::{exact, quorum_vote_send, validated_state_updated}, - script::{run_test_script, TestScriptStage}, view_generator::TestViewGenerator, }; @@ -49,30 +59,33 @@ async fn test_quorum_vote_task_success() { // Send the quorum proposal, DAC, VID share data, and validated state, in which case a dummy // vote can be formed and the view number will be updated. - let view_success = TestScriptStage { - inputs: vec![ - QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), - DaCertificateRecv(dacs[1].clone()), - VidShareRecv(vids[1].0[0].clone()), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone()), - ), - ], - outputs: vec![ - exact(DaCertificateValidated(dacs[1].clone())), - exact(VidShareValidated(vids[1].0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), - validated_state_updated(), - quorum_vote_send(), - ], - asserts: vec![], - }; + let inputs = vec![random![ + QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + DaCertificateRecv(dacs[1].clone()), + VidShareRecv(vids[1].0[0].clone()), + ValidatedStateUpdated( + proposals[1].data.view_number(), + build_fake_view_with_leaf(leaves[1].clone()), + ), + ]]; + + let expectations = vec![Expectations::from_outputs(all_predicates![ + exact(DaCertificateValidated(dacs[1].clone())), + exact(VidShareValidated(vids[1].0[0].clone())), + exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), + validated_state_updated(), + quorum_vote_send(), + ])]; let quorum_vote_state = QuorumVoteTaskState::::create_from(&handle).await; - run_test_script(vec![view_success], quorum_vote_state).await; + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_vote_state, + expectations, + }; + run_test![inputs, script].await; } #[cfg(test)] @@ -83,7 +96,6 @@ async fn test_quorum_vote_task_vote_now() { use hotshot_testing::{ helpers::build_system_handle, predicates::event::{exact, quorum_vote_send, validated_state_updated}, - script::{run_test_script, TestScriptStage}, view_generator::TestViewGenerator, }; use hotshot_types::vote::VoteDependencyData; @@ -108,20 +120,23 @@ async fn test_quorum_vote_task_vote_now() { }; // Submit an event with just the `VoteNow` event which should successfully send a vote. - let view_vote_now = TestScriptStage { - inputs: vec![VoteNow(view.view_number, vote_dependency_data)], - outputs: vec![ - exact(QuorumVoteDependenciesValidated(ViewNumber::new(1))), - validated_state_updated(), - quorum_vote_send(), - ], - asserts: vec![], - }; + let inputs = vec![serial![VoteNow(view.view_number, vote_dependency_data),]]; + + let expectations = vec![Expectations::from_outputs(vec![ + exact(QuorumVoteDependenciesValidated(ViewNumber::new(1))), + validated_state_updated(), + quorum_vote_send(), + ])]; let quorum_vote_state = QuorumVoteTaskState::::create_from(&handle).await; - run_test_script(vec![view_vote_now], quorum_vote_state).await; + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_vote_state, + expectations, + }; + run_test![inputs, script].await; } #[cfg(test)] @@ -130,10 +145,7 @@ async fn test_quorum_vote_task_vote_now() { async fn test_quorum_vote_task_miss_dependency() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ - helpers::build_system_handle, - predicates::event::exact, - script::{run_test_script, TestScriptStage}, - view_generator::TestViewGenerator, + helpers::build_system_handle, predicates::event::exact, view_generator::TestViewGenerator, }; async_compatibility_layer::logging::setup_logging(); @@ -162,8 +174,8 @@ async fn test_quorum_vote_task_miss_dependency() { // Send three of quorum proposal, DAC, VID share data, and validated state, in which case // there's no vote. - let view_no_dac = TestScriptStage { - inputs: vec![ + let inputs = vec![ + random![ QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), VidShareRecv(vid_share(&vids[1].0, handle.public_key())), ValidatedStateUpdated( @@ -171,11 +183,7 @@ async fn test_quorum_vote_task_miss_dependency() { build_fake_view_with_leaf(leaves[1].clone()), ), ], - outputs: vec![exact(VidShareValidated(vids[1].0[0].clone()))], - asserts: vec![], - }; - let view_no_vid = TestScriptStage { - inputs: vec![ + random![ QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), DaCertificateRecv(dacs[2].clone()), ValidatedStateUpdated( @@ -183,11 +191,7 @@ async fn test_quorum_vote_task_miss_dependency() { build_fake_view_with_leaf(leaves[2].clone()), ), ], - outputs: vec![exact(DaCertificateValidated(dacs[2].clone()))], - asserts: vec![], - }; - let view_no_quorum_proposal = TestScriptStage { - inputs: vec![ + random![ DaCertificateRecv(dacs[3].clone()), VidShareRecv(vid_share(&vids[3].0, handle.public_key())), ValidatedStateUpdated( @@ -195,38 +199,35 @@ async fn test_quorum_vote_task_miss_dependency() { build_fake_view_with_leaf(leaves[3].clone()), ), ], - outputs: vec![ - exact(DaCertificateValidated(dacs[3].clone())), - exact(VidShareValidated(vids[3].0[0].clone())), - ], - asserts: vec![], - }; - let view_no_validated_state = TestScriptStage { - inputs: vec![ + random![ QuorumProposalValidated(proposals[4].data.clone(), leaves[3].clone()), DaCertificateRecv(dacs[4].clone()), VidShareRecv(vid_share(&vids[4].0, handle.public_key())), ], - outputs: vec![ + ]; + + let expectations = vec![ + Expectations::from_outputs(vec![exact(VidShareValidated(vids[1].0[0].clone()))]), + Expectations::from_outputs(vec![exact(DaCertificateValidated(dacs[2].clone()))]), + Expectations::from_outputs(all_predicates![ + exact(DaCertificateValidated(dacs[3].clone())), + exact(VidShareValidated(vids[3].0[0].clone())), + ]), + Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[4].clone())), exact(VidShareValidated(vids[4].0[0].clone())), - ], - asserts: vec![], - }; + ]), + ]; let quorum_vote_state = QuorumVoteTaskState::::create_from(&handle).await; - run_test_script( - vec![ - view_no_dac, - view_no_vid, - view_no_quorum_proposal, - view_no_validated_state, - ], - quorum_vote_state, - ) - .await; + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_vote_state, + expectations, + }; + run_test![inputs, script].await; } #[cfg(test)] @@ -235,10 +236,7 @@ async fn test_quorum_vote_task_miss_dependency() { async fn test_quorum_vote_task_incorrect_dependency() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ - helpers::build_system_handle, - predicates::event::exact, - script::{run_test_script, TestScriptStage}, - view_generator::TestViewGenerator, + helpers::build_system_handle, predicates::event::exact, view_generator::TestViewGenerator, }; async_compatibility_layer::logging::setup_logging(); @@ -262,26 +260,28 @@ async fn test_quorum_vote_task_incorrect_dependency() { } // Send the correct quorum proposal, DAC, and VID share data, and incorrect validated state. - let view_incorrect_dependency = TestScriptStage { - inputs: vec![ - QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), - DaCertificateRecv(dacs[1].clone()), - VidShareRecv(vids[1].0[0].clone()), - // The validated state is for an earlier view. - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone()), - ), - ], - outputs: vec![ - exact(DaCertificateValidated(dacs[1].clone())), - exact(VidShareValidated(vids[1].0[0].clone())), - ], - asserts: vec![], - }; + let inputs = vec![random![ + QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + DaCertificateRecv(dacs[1].clone()), + VidShareRecv(vids[1].0[0].clone()), + ValidatedStateUpdated( + proposals[0].data.view_number(), + build_fake_view_with_leaf(leaves[0].clone()), + ), + ]]; + + let expectations = vec![Expectations::from_outputs(all_predicates![ + exact(DaCertificateValidated(dacs[1].clone())), + exact(VidShareValidated(vids[1].0[0].clone())), + ])]; let quorum_vote_state = QuorumVoteTaskState::::create_from(&handle).await; - run_test_script(vec![view_incorrect_dependency], quorum_vote_state).await; + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_vote_state, + expectations, + }; + run_test![inputs, script].await; } diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 4861d3ca53..a51eec3a4a 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -32,10 +32,7 @@ use vbs::version::Version; #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Tests that we correctly update our internal consensus state when reaching a decided upgrade certificate. async fn test_consensus_task_upgrade() { - use hotshot_testing::{ - helpers::build_system_handle, - script::{run_test_script, TestScriptStage}, - }; + use hotshot_testing::helpers::build_system_handle; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 0375385089..97444fd1c5 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -6,11 +6,13 @@ use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes}, state_types::{TestInstanceState, TestValidatedState}, }; +use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{events::HotShotEvent::*, vid::VidTaskState}; use hotshot_testing::{ helpers::{build_system_handle, vid_scheme_from_view_number}, predicates::event::exact, - script::{run_test_script, TestScriptStage}, + script::{Expectations, InputOrder, TaskScript}, + serial, }; use hotshot_types::{ data::{null_block, DaProposal, VidDisperse, ViewNumber}, @@ -79,14 +81,9 @@ async fn test_vid_task() { signature: message.signature.clone(), _pd: PhantomData, }; - - let view_1 = TestScriptStage { - inputs: vec![ViewChange(ViewNumber::new(1))], - outputs: vec![], - asserts: vec![], - }; - let view_2 = TestScriptStage { - inputs: vec![ + let inputs = vec![ + serial![ViewChange(ViewNumber::new(1))], + serial![ ViewChange(ViewNumber::new(2)), BlockRecv( encoded_transactions, @@ -96,7 +93,11 @@ async fn test_vid_task() { vid_precompute, ), ], - outputs: vec![ + ]; + + let expectations = vec![ + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![ exact(SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, @@ -106,12 +107,15 @@ async fn test_vid_task() { )), exact(BlockReady(vid_disperse, ViewNumber::new(2))), exact(VidDisperseSend(vid_proposal.clone(), pub_key)), - ], - asserts: vec![], - }; + ]), + ]; let vid_state = VidTaskState::::create_from(&handle).await; - let script = vec![view_1, view_2]; + let mut script = TaskScript { + timeout: std::time::Duration::from_millis(35), + state: vid_state, + expectations, + }; - run_test_script(script, vid_state).await; + run_test![inputs, script].await; } From 251bfcb4fd93a07c2efb985d599b1d7ce41d532b Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Wed, 5 Jun 2024 16:15:12 +0200 Subject: [PATCH 1065/1393] Do not cancel a dependency task for the current view (#3278) --- task-impls/src/quorum_vote.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 03e18467ad..cdadf7cd80 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -503,7 +503,7 @@ impl> QuorumVoteTaskState Date: Wed, 5 Jun 2024 20:53:10 +0200 Subject: [PATCH 1066/1393] New fallback switching algorithm, first draft (#3258) * New fallback switching algorithm, first draft * Code cleanup * Lints * Fix the variable naming * Lint * Add comments explaining the fallback algorithm * Fix the comment --- .../src/traits/networking/combined_network.rs | 150 ++++++++++++------ types/src/constants.rs | 4 +- 2 files changed, 107 insertions(+), 47 deletions(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 884886cfed..fc9aba7bb5 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -12,23 +12,24 @@ use std::{ time::Duration, }; +use async_broadcast::{broadcast, InactiveReceiver, Sender}; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, channel::UnboundedSendError, }; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; -use futures::{channel::mpsc, future::join_all, join, select, FutureExt}; -use hotshot_task_impls::helpers::cancel_task; +use futures::{channel::mpsc, join, select, FutureExt}; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{ AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, }; use hotshot_types::{ boxed_sync, - constants::{COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES}, + constants::{ + COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, + COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, + }, data::ViewNumber, message::{GeneralConsensusMessage, Message, MessageKind, SequencingMessage}, traits::{ @@ -38,9 +39,7 @@ use hotshot_types::{ BoxSyncFuture, }; use lru::LruCache; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; -use tracing::{info, warn}; +use tracing::{debug, warn}; use vbs::version::StaticVersionType; use super::{push_cdn_network::PushCdnNetwork, NetworkError}; @@ -53,8 +52,8 @@ pub fn calculate_hash_of(t: &T) -> u64 { s.finish() } -/// thread-safe ref counted lock to a map of delayed tasks -type DelayedTasksLockedMap = Arc>>>>>; +/// Thread-safe ref counted lock to a map of channels to the delayed tasks +type DelayedTasksChannelsMap = Arc, InactiveReceiver<()>)>>>; /// A communication channel with 2 networks, where we can fall back to the slower network if the /// primary fails @@ -72,11 +71,14 @@ pub struct CombinedNetworks { /// Whether primary is considered down primary_down: Arc, - /// delayed, cancelable tasks for secondary network - delayed_tasks: DelayedTasksLockedMap, - - /// how long to delay + /// How long to delay delay_duration: Arc>, + + /// Channels to the delayed tasks + delayed_tasks_channels: DelayedTasksChannelsMap, + + /// How many times messages were sent on secondary without delay because primary is down + no_delay_counter: Arc, } impl CombinedNetworks { @@ -104,8 +106,9 @@ impl CombinedNetworks { ))), primary_fail_counter: Arc::new(AtomicU64::new(0)), primary_down: Arc::new(AtomicBool::new(false)), - delayed_tasks: Arc::default(), delay_duration: Arc::new(RwLock::new(delay_duration)), + delayed_tasks_channels: Arc::default(), + no_delay_counter: Arc::new(AtomicU64::new(0)), } } @@ -141,13 +144,16 @@ impl CombinedNetworks { primary_future: impl Future> + Send + 'static, secondary_future: impl Future> + Send + 'static, ) -> Result<(), NetworkError> { - // Check if primary is down + // A local variable used to decide whether to delay this message or not let mut primary_failed = false; if self.primary_down.load(Ordering::Relaxed) { + // If the primary is considered down, we don't want to delay primary_failed = true; } else if self.primary_fail_counter.load(Ordering::Relaxed) > COMBINED_NETWORK_MIN_PRIMARY_FAILURES { + // If the primary failed more than `COMBINED_NETWORK_MIN_PRIMARY_FAILURES` times, + // we don't want to delay this message, and from now on we consider the primary as down warn!( "Primary failed more than {} times and is considered down now", COMBINED_NETWORK_MIN_PRIMARY_FAILURES @@ -156,29 +162,90 @@ impl CombinedNetworks { primary_failed = true; } - // always send on the primary network + // Always send on the primary network if let Err(e) = primary_future.await { + // If the primary failed right away, we don't want to delay this message warn!("Error on primary network: {}", e); self.primary_fail_counter.fetch_add(1, Ordering::Relaxed); primary_failed = true; }; if !primary_failed && Self::should_delay(&message) { + // We are delaying this message let duration = *self.delay_duration.read().await; + let primary_down = Arc::clone(&self.primary_down); let primary_fail_counter = Arc::clone(&self.primary_fail_counter); - self.delayed_tasks + // Each delayed task gets its own receiver clone to get a signal cancelling all tasks + // related to the given view. + let mut receiver = self + .delayed_tasks_channels .write() .await .entry(message.kind.view_number().u64()) - .or_default() - .push(async_spawn(async move { - async_sleep(duration).await; - info!("Sending on secondary after delay, message possibly has not reached recipient on primary"); - primary_fail_counter.fetch_add(1, Ordering::Relaxed); - secondary_future.await - })); + .or_insert_with(|| { + let (s, r) = broadcast(1); + (s, r.deactivate()) + }) + .1 + .activate_cloned(); + // Spawn a task that sleeps for `duration` and then sends the message if it wasn't cancelled + async_spawn(async move { + async_sleep(duration).await; + if receiver.try_recv().is_ok() { + // The task has been cancelled because the view progressed, it means the primary is working fine + debug!( + "Not sending on secondary after delay, task was canceled in view update" + ); + match primary_fail_counter.load(Ordering::Relaxed) { + 0u64 => { + // The primary fail counter reached 0, the primary is now considered up + primary_down.store(false, Ordering::Relaxed); + debug!("primary_fail_counter reached zero, primary_down set to false"); + } + c => { + // Decrement the primary fail counter + primary_fail_counter.store(c - 1, Ordering::Relaxed); + debug!("primary_fail_counter set to {:?}", c - 1); + } + } + return Ok(()); + } + // The task hasn't been cancelled, the primary probably failed. + // Increment the primary fail counter and send the message. + debug!("Sending on secondary after delay, message possibly has not reached recipient on primary"); + primary_fail_counter.fetch_add(1, Ordering::Relaxed); + secondary_future.await + }); Ok(()) } else { + // We will send without delay + if self.primary_down.load(Ordering::Relaxed) { + // If the primary is considered down, we want to periodically delay sending + // on the secondary to check whether the primary is able to deliver. + // This message will be sent without delay but the next might be delayed. + match self.no_delay_counter.load(Ordering::Relaxed) { + c if c < COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL => { + // Just increment the 'no delay counter' + self.no_delay_counter.store(c + 1, Ordering::Relaxed); + } + _ => { + // The 'no delay counter' reached the threshold + debug!( + "Sent on secondary without delay more than {} times,\ + try delaying to check primary", + COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL + ); + // Reset the 'no delay counter' + self.no_delay_counter.store(0u64, Ordering::Relaxed); + // The primary is not considered down for the moment + self.primary_down.store(false, Ordering::Relaxed); + // The primary fail counter is set just below the threshold to delay the next message + self.primary_fail_counter + .store(COMBINED_NETWORK_MIN_PRIMARY_FAILURES, Ordering::Relaxed); + } + } + } + // Send the message secondary_future.await } } @@ -257,16 +324,18 @@ impl TestableNetworkingImplementation for CombinedNetwor primary_fail_counter: Arc::new(AtomicU64::new(0)), primary_down: Arc::new(AtomicBool::new(false)), message_cache: Arc::clone(&message_cache), - delayed_tasks: Arc::default(), delay_duration: Arc::new(RwLock::new(secondary_network_delay)), + delayed_tasks_channels: Arc::default(), + no_delay_counter: Arc::new(AtomicU64::new(0)), }; let da_net = Self { networks: Arc::new(da_networks), message_cache, primary_fail_counter: Arc::new(AtomicU64::new(0)), primary_down: Arc::new(AtomicBool::new(false)), - delayed_tasks: Arc::default(), delay_duration: Arc::new(RwLock::new(secondary_network_delay)), + delayed_tasks_channels: Arc::default(), + no_delay_counter: Arc::new(AtomicU64::new(0)), }; (quorum_net.into(), da_net.into()) }) @@ -473,31 +542,22 @@ impl ConnectedNetwork, TYPES::SignatureKey> where T: NodeType + 'a, { - let delayed_map = Arc::clone(&self.delayed_tasks); + let delayed_tasks_channels = Arc::clone(&self.delayed_tasks_channels); async_spawn(async move { - let mut cancel_tasks = Vec::new(); - { - let mut map_lock = delayed_map.write().await; - while let Some((first_view, _tasks)) = map_lock.first_key_value() { - if *first_view < view { - if let Some((_view, tasks)) = map_lock.pop_first() { - let mut ctasks = tasks.into_iter().map(cancel_task).collect(); - cancel_tasks.append(&mut ctasks); - } else { - break; - } + let mut map_lock = delayed_tasks_channels.write().await; + while let Some((first_view, _)) = map_lock.first_key_value() { + // Broadcast a cancelling signal to all the tasks related to each view older than the new one + if *first_view < view { + if let Some((_, (sender, _))) = map_lock.pop_first() { + let _ = sender.try_broadcast(()); } else { break; } + } else { + break; } } - join_all(cancel_tasks).await; }); - - // View changed, let's start primary again - self.primary_down.store(false, Ordering::Relaxed); - self.primary_fail_counter.store(0, Ordering::Relaxed); - // Run `update_view` logic for the libp2p network self.networks.1.update_view::(view, membership).await; } diff --git a/types/src/constants.rs b/types/src/constants.rs index b9e95e4a02..b939ffe608 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -14,8 +14,8 @@ pub const COMBINED_NETWORK_CACHE_SIZE: usize = 1000; /// the number of messages to attempt to send over the primary network before switching to prefer the secondary network pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; -/// the number of messages to send over the secondary network before re-attempting the (presumed down) primary network -pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 5; +/// the number of messages to send over the secondary network without delay before re-attempting the (presumed down) primary network +pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 50; /// CONSTANT for protocol major version pub const VERSION_MAJ: u16 = 0; From 9a31fad9f6fecfc6c6f38a505f4d71e7e2e574c9 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Wed, 5 Jun 2024 17:29:19 -0400 Subject: [PATCH 1067/1393] Update tide-disco (#3280) --- builder-api/src/builder.rs | 18 +++++++++--------- orchestrator/src/lib.rs | 22 +++++++++++----------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/builder-api/src/builder.rs b/builder-api/src/builder.rs index 4e7544c81d..e41caaa018 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/builder.rs @@ -94,17 +94,17 @@ impl tide_disco::error::Error for Error { fn status(&self) -> StatusCode { match self { - Error::Request { .. } => StatusCode::BadRequest, + Error::Request { .. } => StatusCode::BAD_REQUEST, Error::BlockAvailable { source, .. } | Error::BlockClaim { source, .. } => match source { - BuildError::NotFound => StatusCode::NotFound, - BuildError::Missing => StatusCode::NotFound, - BuildError::Error { .. } => StatusCode::InternalServerError, + BuildError::NotFound => StatusCode::NOT_FOUND, + BuildError::Missing => StatusCode::NOT_FOUND, + BuildError::Error { .. } => StatusCode::INTERNAL_SERVER_ERROR, }, - Error::TxnUnpack { .. } => StatusCode::BadRequest, - Error::TxnSubmit { .. } => StatusCode::InternalServerError, - Error::Custom { .. } => StatusCode::InternalServerError, - Error::BuilderAddress { .. } => StatusCode::InternalServerError, + Error::TxnUnpack { .. } => StatusCode::BAD_REQUEST, + Error::TxnSubmit { .. } => StatusCode::INTERNAL_SERVER_ERROR, + Error::Custom { .. } => StatusCode::INTERNAL_SERVER_ERROR, + Error::BuilderAddress { .. } => StatusCode::INTERNAL_SERVER_ERROR, } } } @@ -119,7 +119,7 @@ fn try_extract_param TryFrom<&'a TaggedBase64>>( .try_into() .map_err(|_| Error::Custom { message: format!("Invalid {param_name}"), - status: StatusCode::UnprocessableEntity, + status: StatusCode::UNPROCESSABLE_ENTITY, }) } diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index eda3b51ba9..ace6e9be6f 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -231,7 +231,7 @@ where if usize::from(node_index) >= self.config.config.num_nodes_with_stake.get() { return Err(ServerError { - status: tide_disco::StatusCode::BadRequest, + status: tide_disco::StatusCode::BAD_REQUEST, message: "Network has reached capacity".to_string(), }); } @@ -267,7 +267,7 @@ where if usize::from(tmp_node_index) >= self.config.config.num_nodes_with_stake.get() { return Err(ServerError { - status: tide_disco::StatusCode::BadRequest, + status: tide_disco::StatusCode::BAD_REQUEST, message: "Node index getter for key pair generation has reached capacity" .to_string(), }); @@ -289,7 +289,7 @@ where if !self.accepting_new_keys { return Err(ServerError { - status: tide_disco::StatusCode::Forbidden, + status: tide_disco::StatusCode::FORBIDDEN, message: "Network has been started manually, and is no longer registering new keys." .to_string(), @@ -355,7 +355,7 @@ where fn peer_pub_ready(&self) -> Result { if !self.peer_pub_ready { return Err(ServerError { - status: tide_disco::StatusCode::BadRequest, + status: tide_disco::StatusCode::BAD_REQUEST, message: "Peer's public configs are not ready".to_string(), }); } @@ -365,7 +365,7 @@ where fn post_config_after_peer_collected(&mut self) -> Result, ServerError> { if !self.peer_pub_ready { return Err(ServerError { - status: tide_disco::StatusCode::BadRequest, + status: tide_disco::StatusCode::BAD_REQUEST, message: "Peer's public configs are not ready".to_string(), }); } @@ -378,7 +378,7 @@ where // println!("{}", self.start); if !self.start { return Err(ServerError { - status: tide_disco::StatusCode::BadRequest, + status: tide_disco::StatusCode::BAD_REQUEST, message: "Network is not ready to start".to_string(), }); } @@ -409,7 +409,7 @@ where fn post_manual_start(&mut self, password_bytes: Vec) -> Result<(), ServerError> { if !self.manual_start_allowed { return Err(ServerError { - status: tide_disco::StatusCode::Forbidden, + status: tide_disco::StatusCode::FORBIDDEN, message: "Configs have already been distributed to nodes, and the network can no longer be started manually.".to_string(), }); } @@ -420,7 +420,7 @@ where // Check that the password matches if self.config.manual_start_password != Some(password) { return Err(ServerError { - status: tide_disco::StatusCode::Forbidden, + status: tide_disco::StatusCode::FORBIDDEN, message: "Incorrect password.".to_string(), }); } @@ -436,7 +436,7 @@ where self.config.config.da_staked_committee_size = registered_da_nodes; } else { return Err(ServerError { - status: tide_disco::StatusCode::Forbidden, + status: tide_disco::StatusCode::FORBIDDEN, message: format!("We cannot manually start the network, because we only have {registered_nodes_with_stake} nodes with stake registered, with {registered_da_nodes} DA nodes.") }); } @@ -521,7 +521,7 @@ where vbs::Serializer::::deserialize(&body_bytes) else { return Err(ServerError { - status: tide_disco::StatusCode::BadRequest, + status: tide_disco::StatusCode::BAD_REQUEST, message: "Malformed body".to_string(), }); }; @@ -553,7 +553,7 @@ where vbs::Serializer::::deserialize(&body_bytes) else { return Err(ServerError { - status: tide_disco::StatusCode::BadRequest, + status: tide_disco::StatusCode::BAD_REQUEST, message: "Malformed body".to_string(), }); }; From ac53df6819d5b44c4fd16f9266940d487ee57eb9 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 6 Jun 2024 08:34:20 -0600 Subject: [PATCH 1068/1393] Add Rewind Task for Replaying All Node Events (#3279) * add rewind task * kill build in produciton * remove print statement * clippy * comments * add shallow depth cache for non test environment * revert comment changes * remove release build compiler error * fix display * re-add infinte depth --- hotshot/Cargo.toml | 6 +- hotshot/src/lib.rs | 4 + hotshot/src/tasks/mod.rs | 5 + hotshot/src/tasks/task_state.rs | 15 ++ task-impls/Cargo.toml | 1 + task-impls/src/events.rs | 251 +++++++++++++++++++++++++++++++- task-impls/src/lib.rs | 4 + task-impls/src/rewind.rs | 69 +++++++++ testing/Cargo.toml | 1 + testing/src/test_runner.rs | 10 +- 10 files changed, 358 insertions(+), 8 deletions(-) create mode 100644 task-impls/src/rewind.rs diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 71877a2dfd..2d1cea56b4 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -12,6 +12,7 @@ default = ["docs", "doc-images"] example-upgrade = ["hotshot-task-impls/example-upgrade"] gpu-vid = ["hotshot-task-impls/gpu-vid"] dependency-tasks = ["hotshot-task-impls/dependency-tasks"] +rewind = ["hotshot-task-impls/rewind"] # Features required for binaries bin-orchestrator = ["clap"] @@ -73,7 +74,10 @@ cdn-broker = { workspace = true, features = [ "runtime-async-std", "global-permits", ] } -cdn-marshal = { workspace = true, features = ["runtime-async-std", "global-permits"] } +cdn-marshal = { workspace = true, features = [ + "runtime-async-std", + "global-permits", +] } [dev-dependencies] diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 49ed7030cf..463fca73d6 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -305,6 +305,10 @@ impl> SystemContext { pub async fn start_consensus(&self) { #[cfg(feature = "dependncy-tasks")] error!("HotShot is running with the dependency tasks feature enabled!!"); + + #[cfg(all(feature = "rewind", not(debug_assertions)))] + compile_error!("Cannot run rewind in production builds!"); + debug!("Starting Consensus"); let consensus = self.consensus.read().await; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 3145524422..5ba1cf3a22 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -7,6 +7,8 @@ use std::{sync::Arc, time::Duration}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use hotshot_task::task::Task; +#[cfg(feature = "rewind")] +use hotshot_task_impls::rewind::RewindTaskState; use hotshot_task_impls::{ consensus::ConsensusTaskState, da::DaTaskState, @@ -168,4 +170,7 @@ pub async fn add_consensus_tasks< handle.add_task(QuorumProposalRecvTaskState::::create_from(handle).await); handle.add_task(Consensus2TaskState::::create_from(handle).await); } + + #[cfg(feature = "rewind")] + handle.add_task(RewindTaskState::::create_from(&handle).await); } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index f4ed651a21..a546d6a700 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -7,6 +7,8 @@ use std::{ use crate::types::SystemContextHandle; use async_trait::async_trait; use chrono::Utc; +#[cfg(feature = "rewind")] +use hotshot_task_impls::rewind::RewindTaskState; use hotshot_task_impls::{ builder::BuilderClient, consensus::ConsensusTaskState, consensus2::Consensus2TaskState, da::DaTaskState, quorum_proposal::QuorumProposalTaskState, @@ -333,3 +335,16 @@ impl> CreateTaskState } } } + +#[cfg(feature = "rewind")] +#[async_trait] +impl> CreateTaskState + for RewindTaskState +{ + async fn create_from(handle: &SystemContextHandle) -> RewindTaskState { + RewindTaskState { + events: Vec::new(), + id: handle.hotshot.id, + } + } +} diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 4cc0f9587b..5e894cd199 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -9,6 +9,7 @@ version = { workspace = true } example-upgrade = [] gpu-vid = ["hotshot-types/gpu-vid"] dependency-tasks = [] +rewind = [] [dependencies] anyhow = { workspace = true } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index f6e89e8d55..8b7940e4ed 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{fmt::Display, sync::Arc}; use either::Either; use hotshot_task::task::TaskEvent; @@ -16,7 +16,7 @@ use hotshot_types::{ traits::{block_contents::BuilderFee, node_implementation::NodeType, BlockPayload}, utils::{BuilderCommitment, View}, vid::{VidCommitment, VidPrecomputeData}, - vote::VoteDependencyData, + vote::{HasViewNumber, VoteDependencyData}, }; use vbs::version::Version; @@ -185,3 +185,250 @@ pub enum HotShotEvent { /// A new undecided view has been proposed. NewUndecidedView(Leaf), } + +impl Display for HotShotEvent { + #[allow(clippy::too_many_lines)] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + HotShotEvent::Shutdown => write!(f, "Shutdown"), + HotShotEvent::QuorumProposalRecv(proposal, _) => write!( + f, + "QuorumProposalRecv(view_number={:?})", + proposal.data.view_number() + ), + HotShotEvent::QuorumVoteRecv(v) => { + write!(f, "QuorumVoteRecv(view_number={:?})", v.view_number()) + } + HotShotEvent::TimeoutVoteRecv(v) => { + write!(f, "TimeoutVoteRecv(view_number={:?})", v.view_number()) + } + HotShotEvent::TimeoutVoteSend(v) => { + write!(f, "TimeoutVoteSend(view_number={:?})", v.view_number()) + } + HotShotEvent::DaProposalRecv(proposal, _) => write!( + f, + "DaProposalRecv(view_number={:?})", + proposal.data.view_number() + ), + HotShotEvent::DaProposalValidated(proposal, _) => write!( + f, + "DaProposalValidated(view_number={:?})", + proposal.data.view_number() + ), + HotShotEvent::DaVoteRecv(vote) => { + write!(f, "DaVoteRecv(view_number={:?})", vote.view_number()) + } + HotShotEvent::DaCertificateRecv(cert) => { + write!(f, "DaCertificateRecv(view_number={:?})", cert.view_number()) + } + HotShotEvent::DaCertificateValidated(cert) => write!( + f, + "DaCertificateValidated(view_number={:?})", + cert.view_number() + ), + HotShotEvent::QuorumProposalSend(proposal, _) => write!( + f, + "QuorumProposalSend(view_number={:?})", + proposal.data.view_number() + ), + HotShotEvent::QuorumVoteSend(vote) => { + write!(f, "QuorumVoteSend(view_number={:?})", vote.view_number()) + } + HotShotEvent::QuorumVoteDependenciesValidated(view_number) => { + write!( + f, + "QuorumVoteDependenciesValidated(view_number={view_number:?})" + ) + } + HotShotEvent::QuorumProposalValidated(proposal, _) => write!( + f, + "QuorumProposalValidated(view_number={:?})", + proposal.view_number() + ), + HotShotEvent::DaProposalSend(proposal, _) => write!( + f, + "DaProposalSend(view_number={:?})", + proposal.data.view_number() + ), + HotShotEvent::DaVoteSend(vote) => { + write!(f, "DaVoteSend(view_number={:?})", vote.view_number()) + } + HotShotEvent::QcFormed(cert) => match cert { + either::Left(qc) => write!(f, "QcFormed(view_number={:?})", qc.view_number()), + either::Right(tc) => write!(f, "QcFormed(view_number={:?})", tc.view_number()), + }, + HotShotEvent::DacSend(cert, _) => { + write!(f, "DacSend(view_number={:?})", cert.view_number()) + } + HotShotEvent::ViewChange(view_number) => { + write!(f, "ViewChange(view_number={view_number:?})") + } + HotShotEvent::ViewSyncTimeout(view_number, _, _) => { + write!(f, "ViewSyncTimeout(view_number={view_number:?})") + } + HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => write!( + f, + "ViewSyncPreCommitVoteRecv(view_nuber={:?})", + vote.view_number() + ), + HotShotEvent::ViewSyncCommitVoteRecv(vote) => write!( + f, + "ViewSyncCommitVoteRecv(view_nuber={:?})", + vote.view_number() + ), + HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => write!( + f, + "ViewSyncFinalizeVoteRecv(view_nuber={:?})", + vote.view_number() + ), + HotShotEvent::ViewSyncPreCommitVoteSend(vote) => write!( + f, + "ViewSyncPreCommitVoteSend(view_nuber={:?})", + vote.view_number() + ), + HotShotEvent::ViewSyncCommitVoteSend(vote) => write!( + f, + "ViewSyncCommitVoteSend(view_nuber={:?})", + vote.view_number() + ), + HotShotEvent::ViewSyncFinalizeVoteSend(vote) => write!( + f, + "ViewSyncFinalizeVoteSend(view_nuber={:?})", + vote.view_number() + ), + HotShotEvent::ViewSyncPreCommitCertificate2Recv(cert) => { + write!( + f, + "ViewSyncPreCommitCertificate2Recv(view_number={:?})", + cert.view_number() + ) + } + HotShotEvent::ViewSyncCommitCertificate2Recv(cert) => { + write!( + f, + "ViewSyncCommitCertificate2Recv(view_number={:?})", + cert.view_number() + ) + } + HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { + write!( + f, + "ViewSyncFinalizeCertificate2Recv(view_number={:?})", + cert.view_number() + ) + } + HotShotEvent::ViewSyncPreCommitCertificate2Send(cert, _) => { + write!( + f, + "ViewSyncPreCommitCertificate2Send(view_number={:?})", + cert.view_number() + ) + } + HotShotEvent::ViewSyncCommitCertificate2Send(cert, _) => { + write!( + f, + "ViewSyncCommitCertificate2Send(view_number={:?})", + cert.view_number() + ) + } + HotShotEvent::ViewSyncFinalizeCertificate2Send(cert, _) => { + write!( + f, + "ViewSyncFinalizeCertificate2Send(view_number={:?})", + cert.view_number() + ) + } + HotShotEvent::ViewSyncTrigger(view_number) => { + write!(f, "ViewSyncTrigger(view_number={view_number:?})") + } + HotShotEvent::Timeout(view_number) => write!(f, "Timeout(view_number={view_number:?})"), + HotShotEvent::TransactionsRecv(_) => write!(f, "TransactionsRecv"), + HotShotEvent::TransactionSend(_, _) => write!(f, "TransactionSend"), + HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _) => { + write!( + f, + "SendPayloadCommitmentAndMetadata(view_number={view_number:?})" + ) + } + HotShotEvent::BlockRecv(_, _, view_number, ..) => { + write!(f, "BlockRecv(view_number={view_number:?})") + } + HotShotEvent::BlockReady(_, view_number) => { + write!(f, "BlockReady(view_number={view_number:?})") + } + HotShotEvent::LeafDecided(leaves) => { + let view_numbers: Vec<::Time> = + leaves.iter().map(Leaf::view_number).collect(); + write!(f, "LeafDecided({view_numbers:?})") + } + HotShotEvent::VidDisperseSend(proposal, _) => write!( + f, + "VidDisperseSend(view_number={:?})", + proposal.data.view_number() + ), + HotShotEvent::VidShareRecv(proposal) => write!( + f, + "VIDShareRecv(view_number={:?})", + proposal.data.view_number() + ), + HotShotEvent::VidShareValidated(proposal) => write!( + f, + "VIDShareValidated(view_number={:?})", + proposal.data.view_number() + ), + HotShotEvent::UpgradeProposalRecv(proposal, _) => write!( + f, + "UpgradeProposalRecv(view_number={:?})", + proposal.data.view_number() + ), + HotShotEvent::UpgradeProposalSend(proposal, _) => write!( + f, + "UpgradeProposalSend(view_number={:?})", + proposal.data.view_number() + ), + HotShotEvent::UpgradeVoteRecv(vote) => { + write!(f, "UpgradeVoteRecv(view_number={:?})", vote.view_number()) + } + HotShotEvent::UpgradeVoteSend(vote) => { + write!(f, "UpgradeVoteSend(view_number={:?})", vote.view_number()) + } + HotShotEvent::UpgradeCertificateFormed(cert) => write!( + f, + "UpgradeCertificateFormed(view_number={:?})", + cert.view_number() + ), + HotShotEvent::VersionUpgrade(_) => write!(f, "VersionUpgrade"), + HotShotEvent::QuorumProposalLivenessValidated(proposal) => { + write!( + f, + "QuorumProposalLivenessValidated(view_number={:?})", + proposal.view_number() + ) + } + HotShotEvent::UpgradeDecided(cert) => { + write!(f, "UpgradeDecided(view_number{:?})", cert.view_number()) + } + HotShotEvent::QuorumProposalMissing(view_number) => { + write!(f, "QuorumProposalMissing(view_number={view_number:?})") + } + HotShotEvent::VoteNow(view_number, _) => { + write!(f, "VoteNow(view_number={view_number:?})") + } + HotShotEvent::ValidatedStateUpdated(view_number, _) => { + write!(f, "ValidatedStateUpdated(view_number={view_number:?})") + } + HotShotEvent::LockedViewUpdated(view_number) => { + write!(f, "LockedViewUpdated(view_number={view_number:?})") + } + HotShotEvent::LastDecidedViewUpdated(view_number) => { + write!(f, "LastDecidedViewUpdated(view_number={view_number:?})") + } + HotShotEvent::UpdateHighQc(cert) => { + write!(f, "UpdateHighQc(view_number={:?})", cert.view_number()) + } + HotShotEvent::NewUndecidedView(leaf) => { + write!(f, "NewUndecidedView(view_number={:?})", leaf.view_number()) + } + } + } +} diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 58b75422b0..3c27b1f1e4 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -55,3 +55,7 @@ pub mod quorum_proposal; /// Task for handling QuorumProposalRecv events pub mod quorum_proposal_recv; + +/// Task for storing and replaying all received tasks by a node +#[cfg(feature = "rewind")] +pub mod rewind; diff --git a/task-impls/src/rewind.rs b/task-impls/src/rewind.rs new file mode 100644 index 0000000000..4e760273b3 --- /dev/null +++ b/task-impls/src/rewind.rs @@ -0,0 +1,69 @@ +use std::sync::Arc; + +use anyhow::Result; +use async_broadcast::{Receiver, Sender}; +use async_trait::async_trait; +use hotshot_task::task::TaskState; +use hotshot_types::traits::node_implementation::NodeType; +use std::fs::OpenOptions; +use std::io::Write; + +use crate::events::HotShotEvent; + +/// The task state for the `Rewind` task is used to capture all events received +/// by a particular node, in the order they've been received. +pub struct RewindTaskState { + /// All events received by this node since the beginning of time. + pub events: Vec>>, + + /// The id of this node + pub id: u64, +} + +impl RewindTaskState { + /// Handles all events, storing them to the private state + pub fn handle(&mut self, event: Arc>) { + self.events.push(Arc::clone(&event)); + } +} + +#[async_trait] +impl TaskState for RewindTaskState { + type Event = HotShotEvent; + + async fn handle_event( + &mut self, + event: Arc, + _sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + self.handle(event); + Ok(()) + } + + async fn cancel_subtasks(&mut self) { + tracing::info!("Node ID {} Recording {} events", self.id, self.events.len()); + let filename = format!("rewind_{}.log", self.id); + let mut file = match OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(&filename) + { + Ok(file) => file, + Err(e) => { + tracing::error!("Failed to write file {}; error = {}", filename, e); + return; + } + }; + + for (event_number, event) in self.events.iter().enumerate() { + // We do not want to die here, so we log and move on capturing as many events as we can. + if let Err(e) = writeln!(file, "{event_number}: {event}") { + tracing::error!( + "Failed to write event number {event_number} and event {event}; error = {e}" + ); + } + } + } +} diff --git a/testing/Cargo.toml b/testing/Cargo.toml index af0bb6b6d4..fcb390e46d 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -11,6 +11,7 @@ default = [] slow-tests = [] gpu-vid = ["hotshot-types/gpu-vid"] dependency-tasks = ["hotshot/dependency-tasks"] +rewind = ["hotshot/rewind"] [dependencies] automod = "1.0.14" diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index e84baa58c4..1f3f2535a6 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -264,16 +264,16 @@ where completion_handle.abort(); } - assert!( - error_list.is_empty(), - "TEST FAILED! Results: {error_list:?}" - ); - let mut nodes = handles.write().await; for node in &mut *nodes { node.handle.shut_down().await; } + + assert!( + error_list.is_empty(), + "TEST FAILED! Results: {error_list:?}" + ); } /// Add nodes. From 6d1a9f2cc4459bd4135991757edc17a59df8c9a4 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 6 Jun 2024 15:38:33 -0400 Subject: [PATCH 1069/1393] Bf/proposal fetch (#3281) * storage interfaces * Store the proposal * add GC * fisrt to last --- example-types/src/storage_types.rs | 17 ++++++++++- hotshot/src/lib.rs | 11 +++++-- task-impls/src/consensus/helpers.rs | 6 +++- task-impls/src/network.rs | 10 ++++++- .../src/quorum_proposal/dependency_handle.rs | 2 +- testing/src/spinning_task.rs | 1 + types/src/consensus.rs | 30 +++++++++++++------ types/src/traits/storage.rs | 7 ++++- 8 files changed, 68 insertions(+), 16 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index f473c61420..2df1d5d3a7 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -8,7 +8,7 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_types::{ consensus::CommitmentMap, - data::{DaProposal, Leaf, VidDisperseShare}, + data::{DaProposal, Leaf, QuorumProposal, VidDisperseShare}, message::Proposal, traits::{node_implementation::NodeType, storage::Storage}, utils::View, @@ -23,6 +23,7 @@ type VidShares = HashMap< pub struct TestStorageState { vids: VidShares, das: HashMap>>, + proposals: HashMap>>, } impl Default for TestStorageState { @@ -30,6 +31,7 @@ impl Default for TestStorageState { Self { vids: HashMap::new(), das: HashMap::new(), + proposals: HashMap::new(), } } } @@ -75,6 +77,19 @@ impl Storage for TestStorage { .insert(proposal.data.view_number, proposal.clone()); Ok(()) } + async fn append_proposal( + &self, + proposal: &Proposal>, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to append VID proposal to storage"); + } + let mut inner = self.inner.write().await; + inner + .proposals + .insert(proposal.data.view_number, proposal.clone()); + Ok(()) + } async fn record_action( &self, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 463fca73d6..76056dde9b 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -36,9 +36,9 @@ use hotshot_types::{ constants::{ Version01, BASE_VERSION, EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE, STATIC_VER_0_1, }, - data::Leaf, + data::{Leaf, QuorumProposal}, event::{EventType, LeafInfo}, - message::{DataMessage, Message, MessageKind}, + message::{DataMessage, Message, MessageKind, Proposal}, simple_certificate::QuorumCertificate, traits::{ consensus_api::ConsensusApi, @@ -263,6 +263,7 @@ impl> SystemContext { // TODO this is incorrect // https://github.com/EspressoSystems/HotShot/issues/560 anchored_leaf.view_number(), + initializer.saved_proposals, saved_leaves, saved_payloads, initializer.high_qc, @@ -684,6 +685,8 @@ pub struct HotShotInitializer { undecided_leafs: Vec>, /// Not yet decided state undecided_state: BTreeMap>, + /// Proposals we have sent out to provide to others for catchup + saved_proposals: BTreeMap>>, } impl HotShotInitializer { @@ -701,6 +704,7 @@ impl HotShotInitializer { validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::Time::new(0), + saved_proposals: BTreeMap::new(), high_qc, undecided_leafs: Vec::new(), undecided_state: BTreeMap::new(), @@ -715,11 +719,13 @@ impl HotShotInitializer { /// after restart. /// * `validated_state` - Optional validated state that if given, will be used to construct the /// `SystemContext`. + #[allow(clippy::too_many_arguments)] pub fn from_reload( anchor_leaf: Leaf, instance_state: TYPES::InstanceState, validated_state: Option>, start_view: TYPES::Time, + saved_proposals: BTreeMap>>, high_qc: QuorumCertificate, undecided_leafs: Vec>, undecided_state: BTreeMap>, @@ -730,6 +736,7 @@ impl HotShotInitializer { validated_state, state_delta: None, start_view, + saved_proposals, high_qc, undecided_leafs, undecided_state, diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index d4caddc39c..bf593701f7 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -231,7 +231,11 @@ pub async fn create_and_send_proposal( "Sending null proposal for view {:?}", proposed_leaf.view_number(), ); - if let Err(e) = consensus.write().await.update_last_proposed_view(view) { + if let Err(e) = consensus + .write() + .await + .update_last_proposed_view(message.clone()) + { tracing::trace!("{e:?}"); return; } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index fc663d6fe1..53389f825b 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -409,7 +409,7 @@ impl< async_spawn(async move { if NetworkEventTaskState::::maybe_record_action( maybe_action, - storage, + Arc::clone(&storage), view, ) .await @@ -417,6 +417,14 @@ impl< { return; } + if let MessageKind::Consensus(SequencingMessage::General( + GeneralConsensusMessage::Proposal(prop), + )) = &message.kind + { + if storage.write().await.append_proposal(prop).await.is_err() { + return; + } + } let transmit_result = if version == BASE_VERSION { match transmit { diff --git a/task-impls/src/quorum_proposal/dependency_handle.rs b/task-impls/src/quorum_proposal/dependency_handle.rs index 7c8093f4f6..9e1b5eb8b6 100644 --- a/task-impls/src/quorum_proposal/dependency_handle.rs +++ b/task-impls/src/quorum_proposal/dependency_handle.rs @@ -157,7 +157,7 @@ impl ProposalDependencyHandle { self.consensus .write() .await - .update_last_proposed_view(self.view_number)?; + .update_last_proposed_view(message.clone())?; async_sleep(Duration::from_millis(self.round_start_delay)).await; broadcast_event( Arc::new(HotShotEvent::QuorumProposalSend( diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 9a99b5fdd1..09a1e966b8 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -110,6 +110,7 @@ where TestInstanceState {}, None, view_number, + BTreeMap::new(), self.high_qc.clone(), Vec::new(), BTreeMap::new(), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index f7da6c7325..46c3efada8 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -12,19 +12,20 @@ use tracing::{debug, error}; pub use crate::utils::{View, ViewInner}; use crate::{ - data::{Leaf, VidDisperse, VidDisperseShare}, + data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare}, error::HotShotError, message::Proposal, simple_certificate::{DaCertificate, QuorumCertificate, UpgradeCertificate}, traits::{ block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, - node_implementation::NodeType, + node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, BlockPayload, ValidatedState, }, utils::{BuilderCommitment, StateAndDelta, Terminator}, vid::VidCommitment, + vote::HasViewNumber, }; /// A type alias for `HashMap, T>` @@ -57,8 +58,9 @@ pub struct Consensus { /// View number that is currently on. cur_view: TYPES::Time, - /// View we proposed in last. To prevent duplicate proposals - last_proposed_view: TYPES::Time, + /// Last proposals we sent out, None if we haven't proposed yet. + /// Prevents duplicate proposals, and can be served to those trying to catchup + last_proposals: BTreeMap>>, /// last view had a successful decide event last_decided_view: TYPES::Time, @@ -169,6 +171,7 @@ impl Consensus { cur_view: TYPES::Time, locked_view: TYPES::Time, last_decided_view: TYPES::Time, + last_proposals: BTreeMap>>, saved_leaves: CommitmentMap>, saved_payloads: BTreeMap>, high_qc: QuorumCertificate, @@ -180,7 +183,7 @@ impl Consensus { saved_da_certs: HashMap::new(), cur_view, last_decided_view, - last_proposed_view: last_decided_view, + last_proposals, locked_view, saved_leaves, saved_payloads, @@ -248,16 +251,24 @@ impl Consensus { Ok(()) } - /// Update the last proposed view. + /// Update the last proposal. /// /// # Errors /// Can return an error when the new view_number is not higher than the existing proposed view number. - pub fn update_last_proposed_view(&mut self, view_number: TYPES::Time) -> Result<()> { + pub fn update_last_proposed_view( + &mut self, + proposal: Proposal>, + ) -> Result<()> { ensure!( - view_number > self.last_proposed_view, + proposal.data.view_number() + > self + .last_proposals + .last_key_value() + .map_or(TYPES::Time::genesis(), |(k, _)| { *k }), "New view isn't newer than the previously proposed view." ); - self.last_proposed_view = view_number; + self.last_proposals + .insert(proposal.data.view_number(), proposal); Ok(()) } @@ -440,6 +451,7 @@ impl Consensus { self.validated_state_map = self.validated_state_map.split_off(&new_anchor_view); self.saved_payloads = self.saved_payloads.split_off(&new_anchor_view); self.vid_shares = self.vid_shares.split_off(&new_anchor_view); + self.last_proposals = self.last_proposals.split_off(&new_anchor_view); } /// Gets the last decided leaf. diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 4feb880e85..23892d87e7 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -11,7 +11,7 @@ use async_trait::async_trait; use super::node_implementation::NodeType; use crate::{ consensus::{CommitmentMap, View}, - data::{DaProposal, Leaf, VidDisperseShare}, + data::{DaProposal, Leaf, QuorumProposal, VidDisperseShare}, event::HotShotAction, message::Proposal, simple_certificate::QuorumCertificate, @@ -24,6 +24,11 @@ pub trait Storage: Send + Sync + Clone { async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; /// Add a proposal to the stored DA proposals. async fn append_da(&self, proposal: &Proposal>) -> Result<()>; + /// Add a proposal we sent to the store + async fn append_proposal( + &self, + proposal: &Proposal>, + ) -> Result<()>; /// Record a HotShotAction taken. async fn record_action(&self, view: TYPES::Time, action: HotShotAction) -> Result<()>; /// Update the current high QC in storage. From d3437861b24e5779d05bcec320c68aeb2a061055 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 6 Jun 2024 13:41:57 -0600 Subject: [PATCH 1070/1393] Fix Libp2p Success Test (#3285) * fix test failure * remove state map event from da task * fix tests --- task-impls/src/da.rs | 5 -- task-impls/src/quorum_vote.rs | 42 ++--------- testing/tests/tests_1/da_task.rs | 3 +- testing/tests/tests_1/quorum_vote_task.rs | 90 +++-------------------- 4 files changed, 16 insertions(+), 124 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 2199c26974..c2572f2956 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -207,11 +207,6 @@ impl> DaTaskState { view_inner: ViewInner::Da { payload_commitment }, }; consensus.update_validated_state_map(view_number, view.clone()); - broadcast_event( - HotShotEvent::ValidatedStateUpdated(view_number, view).into(), - &event_stream, - ) - .await; } // Record the payload we have promised to make available. diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index cdadf7cd80..0053cd69b0 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -52,8 +52,6 @@ enum VoteDependency { Vid, /// For the `VoteNow` event. VoteNow, - /// For the `ValidatedStateUpdated` event. - ValidatedState, } /// Handler for the vote dependency. @@ -77,6 +75,8 @@ struct VoteDependencyHandle> { sender: Sender>>, /// The current version of HotShot version: Version, + /// The node's id + id: u64, } impl + 'static> VoteDependencyHandle { @@ -157,6 +157,7 @@ impl + 'static> VoteDependencyHand } /// Submits the `QuorumVoteSend` event if all the dependencies are met. + #[instrument(skip_all, fields(id = self.id, name = "Submit quorum vote", level = "error"))] async fn submit_vote( &self, leaf: Leaf, @@ -181,7 +182,7 @@ impl + 'static> VoteDependencyHand ) .context("Failed to sign vote")?; debug!( - "Sending vote to next quorum leader {:?}", + "sending vote to next quorum leader {:?}", vote.view_number() + 1 ); // Add to the storage. @@ -379,13 +380,6 @@ impl> QuorumVoteTaskState { - if let HotShotEvent::ValidatedStateUpdated(view, _) = event { - *view - } else { - return false; - } - } VoteDependency::VoteNow => { if let HotShotEvent::VoteNow(view, _) = event { *view @@ -418,11 +412,7 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState> QuorumVoteTaskState { quorum_proposal_dependency.mark_as_completed(event); } - HotShotEvent::ValidatedStateUpdated(..) => { - validated_state_dependency.mark_as_completed(event); - } _ => {} } } - let deps = vec![ - quorum_proposal_dependency, - dac_dependency, - vid_dependency, - validated_state_dependency, - ]; + let deps = vec![quorum_proposal_dependency, dac_dependency, vid_dependency]; let dependency_chain = OrDependency::from_deps(vec![ // Either we fulfull the dependencies individiaully. AndDependency::from_deps(deps), @@ -487,6 +464,7 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState { - self.create_dependency_task_if_new( - *view_number, - event_receiver, - &event_sender, - Some(event), - ); - } HotShotEvent::QuorumVoteDependenciesValidated(view_number) => { debug!("All vote dependencies verified for view {:?}", view_number); if !self.update_latest_voted_view(*view_number).await { diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 8b61cb2f1c..e45b7583e4 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -10,7 +10,7 @@ use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{da::DaTaskState, events::HotShotEvent::*}; use hotshot_testing::{ helpers::build_system_handle, - predicates::event::{exact, validated_state_updated}, + predicates::event::exact, script::{Expectations, InputOrder, TaskScript}, serial, view_generator::TestViewGenerator, @@ -96,7 +96,6 @@ async fn test_da_task() { Expectations::from_outputs(vec![ exact(DaProposalValidated(proposals[1].clone(), leaders[1])), exact(DaVoteSend(votes[1].clone())), - validated_state_updated(), ]), ], }; diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 8a5be741ce..69a67ed257 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -63,10 +63,6 @@ async fn test_quorum_vote_task_success() { QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), DaCertificateRecv(dacs[1].clone()), VidShareRecv(vids[1].0[0].clone()), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone()), - ), ]]; let expectations = vec![Expectations::from_outputs(all_predicates![ @@ -163,6 +159,8 @@ async fn test_quorum_vote_task_miss_dependency() { let mut dacs = Vec::new(); let mut vids = Vec::new(); let mut leaves = Vec::new(); + let consensus = handle.hotshot.consensus().clone(); + let mut consensus_writer = consensus.write().await; for view in (&mut generator).take(5).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); @@ -170,7 +168,14 @@ async fn test_quorum_vote_task_miss_dependency() { dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaves.push(view.leaf.clone()); + + consensus_writer.update_validated_state_map( + view.quorum_proposal.data.view_number(), + build_fake_view_with_leaf(view.leaf.clone()), + ); + consensus_writer.update_saved_leaves(view.leaf.clone()); } + drop(consensus_writer); // Send three of quorum proposal, DAC, VID share data, and validated state, in which case // there's no vote. @@ -178,31 +183,14 @@ async fn test_quorum_vote_task_miss_dependency() { random![ QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), VidShareRecv(vid_share(&vids[1].0, handle.public_key())), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone()), - ), ], random![ QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), DaCertificateRecv(dacs[2].clone()), - ValidatedStateUpdated( - proposals[2].data.view_number(), - build_fake_view_with_leaf(leaves[2].clone()), - ), ], random![ DaCertificateRecv(dacs[3].clone()), VidShareRecv(vid_share(&vids[3].0, handle.public_key())), - ValidatedStateUpdated( - proposals[3].data.view_number(), - build_fake_view_with_leaf(leaves[3].clone()), - ), - ], - random![ - QuorumProposalValidated(proposals[4].data.clone(), leaves[3].clone()), - DaCertificateRecv(dacs[4].clone()), - VidShareRecv(vid_share(&vids[4].0, handle.public_key())), ], ]; @@ -213,10 +201,6 @@ async fn test_quorum_vote_task_miss_dependency() { exact(DaCertificateValidated(dacs[3].clone())), exact(VidShareValidated(vids[3].0[0].clone())), ]), - Expectations::from_outputs(all_predicates![ - exact(DaCertificateValidated(dacs[4].clone())), - exact(VidShareValidated(vids[4].0[0].clone())), - ]), ]; let quorum_vote_state = @@ -229,59 +213,3 @@ async fn test_quorum_vote_task_miss_dependency() { }; run_test![inputs, script].await; } - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_quorum_vote_task_incorrect_dependency() { - use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; - use hotshot_testing::{ - helpers::build_system_handle, predicates::event::exact, view_generator::TestViewGenerator, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle(2).await.0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - - let mut proposals = Vec::new(); - let mut leaves = Vec::new(); - let mut dacs = Vec::new(); - let mut vids = Vec::new(); - for view in (&mut generator).take(2).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - leaves.push(view.leaf.clone()); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - } - - // Send the correct quorum proposal, DAC, and VID share data, and incorrect validated state. - let inputs = vec![random![ - QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), - DaCertificateRecv(dacs[1].clone()), - VidShareRecv(vids[1].0[0].clone()), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone()), - ), - ]]; - - let expectations = vec![Expectations::from_outputs(all_predicates![ - exact(DaCertificateValidated(dacs[1].clone())), - exact(VidShareValidated(vids[1].0[0].clone())), - ])]; - - let quorum_vote_state = - QuorumVoteTaskState::::create_from(&handle).await; - - let mut script = TaskScript { - timeout: TIMEOUT, - state: quorum_vote_state, - expectations, - }; - run_test![inputs, script].await; -} From 7b58e1efe537cb140987fb5d9e284d17e1f33ff7 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 6 Jun 2024 17:58:52 -0600 Subject: [PATCH 1071/1393] Turn CI Tests Back on For Dependency Tasks (#3288) * add back ci test * fix test failure --- testing/tests/tests_1/quorum_proposal_task.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index b566be3ba6..5ea565fe23 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -475,7 +475,7 @@ async fn test_quorum_proposal_task_view_sync() { #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_quorum_proposal_livness_check_proposal() { +async fn test_quorum_proposal_liveness_check_proposal() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -603,14 +603,14 @@ async fn test_quorum_proposal_livness_check_proposal() { Expectations::from_outputs(vec![exact(UpdateHighQc( proposals[1].data.justify_qc.clone(), ))]), - Expectations::from_outputs(vec![ + Expectations::from_outputs(all_predicates![ exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), quorum_proposal_send(), ]), Expectations::from_outputs(vec![exact(UpdateHighQc( proposals[3].data.justify_qc.clone(), ))]), - Expectations::from_outputs(vec![ + Expectations::from_outputs(all_predicates![ exact(LockedViewUpdated(ViewNumber::new(3))), exact(LastDecidedViewUpdated(ViewNumber::new(2))), leaf_decided(), From 914557205982fa66bbd35fc1905cb5b1225d22d9 Mon Sep 17 00:00:00 2001 From: Jeremy Date: Sat, 8 Jun 2024 12:13:12 +0800 Subject: [PATCH 1072/1393] Make SimpleBuilder listen on 0.0.0.0 (#3284) --- testing/src/block_builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index afc295ab1e..d0809f982b 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -96,7 +96,7 @@ where num_storage_nodes: usize, config: Self::Config, ) -> (Option>>, Url) { - let url = Url::parse(&format!("http://localhost:{0}", config.port)).expect("Valid URL"); + let url = Url::parse(&format!("http://0.0.0.0:{0}", config.port)).expect("Valid URL"); let (source, task) = make_simple_builder(num_storage_nodes).await; let builder_api = hotshot_builder_api::builder::define_api::< From c845b2a7adbaa9a58aa25ed5d1f6ab51b5c1fe79 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 11 Jun 2024 14:47:39 -0600 Subject: [PATCH 1073/1393] Fix Leaf Ascension (#3307) * fix tests, halfway improve impl * move the last of the variables * fix upgrade tests * move to helper method * lint * fix last suite * use debugged function, remove dead tests * fix tests * fix lint * comments, fix last test --- task-impls/src/consensus/helpers.rs | 323 +++++++++++------- task-impls/src/quorum_proposal/handlers.rs | 236 +------------ testing/tests/tests_1/quorum_proposal_task.rs | 201 +---------- testing/tests/tests_1/test_with_failures_2.rs | 2 +- testing/tests/tests_1/upgrade_task.rs | 28 +- testing/tests/tests_4/test_with_failures_f.rs | 2 +- 6 files changed, 250 insertions(+), 542 deletions(-) diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index bf593701f7..408a90ebb3 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -12,14 +12,14 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use chrono::Utc; -use committable::Committable; +use committable::{Commitment, Committable}; use futures::FutureExt; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus, View}, data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, message::Proposal, - simple_certificate::UpgradeCertificate, + simple_certificate::{QuorumCertificate, UpgradeCertificate}, traits::{ block_contents::BlockHeader, election::Membership, @@ -717,140 +717,229 @@ pub(crate) async fn handle_quorum_proposal_recv>( - proposal: &QuorumProposal, - event_stream: Sender>>, - task_state: &mut ConsensusTaskState, -) -> Result<()> { - let consensus = task_state.consensus.read().await; - let view = proposal.view_number(); - #[cfg(not(feature = "dependency-tasks"))] - { - task_state.current_proposal = Some(proposal.clone()); +/// Helper type to give names and to the output values of the leaf chain traversal operation. +#[derive(Debug)] +pub struct LeafChainTraversalOutcome { + /// The new locked view obtained from a 2 chain starting from the proposal's parent. + pub new_locked_view_number: Option, + + /// The new decided view obtained from a 3 chain starting from the proposal's parent. + pub new_decided_view_number: Option, + + /// The qc for the decided chain. + pub new_decide_qc: Option>, + + /// The decided leaves with corresponding validated state and VID info. + pub leaf_views: Vec>, + + /// The decided leaves. + pub leaves_decided: Vec>, + + /// The transactions in the block payload for each leaf. + pub included_txns: HashSet::Transaction>>, + + /// The most recent upgrade certificate from one of the leaves. + pub decided_upgrade_cert: Option>, +} + +/// We need Default to be implemented because the leaf ascension has very few failure branches, +/// and when they *do* happen, we still return intermediate states. Default makes the burden +/// of filling values easier. +impl Default for LeafChainTraversalOutcome { + /// The default method for this type is to set all of the returned values to `None`. + fn default() -> Self { + Self { + new_locked_view_number: None, + new_decided_view_number: None, + new_decide_qc: None, + leaf_views: Vec::new(), + leaves_decided: Vec::new(), + included_txns: HashSet::new(), + decided_upgrade_cert: None, + } } +} + +/// Ascends the leaf chain by traversing through the parent commitments of the proposal. We begin +/// by obtaining the parent view, and if we are in a chain (i.e. the next view from the parent is +/// one view newer), then we begin attempting to form the chain. This is a direct impl from +/// [HotStuff](https://arxiv.org/pdf/1803.05069) section 5: +/// +/// > When a node b* carries a QC that refers to a direct parent, i.e., b*.justify.node = b*.parent, +/// we say that it forms a One-Chain. Denote by b'' = b*.justify.node. Node b* forms a Two-Chain, +/// if in addition to forming a One-Chain, b''.justify.node = b''.parent. +/// It forms a Three-Chain, if b'' forms a Two-Chain. +/// +/// We follow this exact logic to determine if we are able to reach a commit and a decide. A commit +/// is reached when we have a two chain, and a decide is reached when we have a three chain. +/// +/// # Example +/// Suppose we have a decide for view 1, and we then move on to get undecided views 2, 3, and 4. Further, +/// suppose that our *next* proposal is for view 5, but this leader did not see info for view 4, so the +/// justify qc of the proposal points to view 3. This is fine, and the undecided chain now becomes +/// 2-3-5. +/// +/// Assuming we continue with honest leaders, we then eventually could get a chain like: 2-3-5-6-7-8. This +/// will prompt a decide event to occur (this code), where the `proposal` is for view 8. Now, since the +/// lowest value in the 3-chain here would be 5 (excluding 8 since we only walk the parents), we begin at +/// the first link in the chain, and walk back through all undecided views, making our new anchor view 5, +/// and out new locked view will be 6. +/// +/// Upon receipt then of a proposal for view 9, assuming it is valid, this entire process will repeat, and +/// the anchor view will be set to view 6, with the locked view as view 7. +pub async fn decide_from_proposal( + proposal: &QuorumProposal, + consensus: Arc>>, + existing_upgrade_cert: &Option>, + public_key: &TYPES::SignatureKey, +) -> LeafChainTraversalOutcome { + let consensus_reader = consensus.read().await; + let view_number = proposal.view_number(); + let parent_view_number = proposal.justify_qc.view_number(); + let old_anchor_view = consensus_reader.last_decided_view(); - #[allow(unused_mut)] - #[allow(unused_variables)] - let mut decided_upgrade_cert: Option> = None; - let mut new_anchor_view = consensus.last_decided_view(); - let mut new_locked_view = consensus.locked_view(); - let mut last_view_number_visited = view; - let mut new_commit_reached: bool = false; - let mut new_decide_reached = false; - let mut new_decide_qc = None; - let mut leaf_views = Vec::new(); - let mut leafs_decided = Vec::new(); - let mut included_txns = HashSet::new(); - let old_anchor_view = consensus.last_decided_view(); - let parent_view = proposal.justify_qc.view_number(); + let mut last_view_number_visited = view_number; let mut current_chain_length = 0usize; - if parent_view + 1 == view { - current_chain_length += 1; - if let Err(e) = consensus.visit_leaf_ancestors( - parent_view, - Terminator::Exclusive(old_anchor_view), - true, - |leaf, state, delta| { - if !new_decide_reached { - if last_view_number_visited == leaf.view_number() + 1 { - last_view_number_visited = leaf.view_number(); - current_chain_length += 1; - if current_chain_length == 2 { - new_locked_view = leaf.view_number(); - new_commit_reached = true; - // The next leaf in the chain, if there is one, is decided, so this - // leaf's justify_qc would become the QC for the decided chain. - new_decide_qc = Some(leaf.justify_qc().clone()); - } else if current_chain_length == 3 { - new_anchor_view = leaf.view_number(); - new_decide_reached = true; - } - } else { - // nothing more to do here... we don't have a new chain extension - return false; + let mut res = LeafChainTraversalOutcome::default(); + + if let Err(e) = consensus_reader.visit_leaf_ancestors( + parent_view_number, + Terminator::Exclusive(old_anchor_view), + true, + |leaf, state, delta| { + // This is the core paper logic. We're implementing the chain in chained hotstuff. + if res.new_decided_view_number.is_none() { + // If the last view number is the child of the leaf we've moved to... + if last_view_number_visited == leaf.view_number() + 1 { + last_view_number_visited = leaf.view_number(); + + // The chain grows by one + current_chain_length += 1; + + // We emit a locked view when the chain length is 2 + if current_chain_length == 2 { + res.new_locked_view_number = Some(leaf.view_number()); + // The next leaf in the chain, if there is one, is decided, so this + // leaf's justify_qc would become the QC for the decided chain. + res.new_decide_qc = Some(leaf.justify_qc().clone()); + } else if current_chain_length == 3 { + // And we decide when the chain length is 3. + res.new_decided_view_number = Some(leaf.view_number()); } + } else { + // There isn't a new chain extension available, so we signal to the callback + // owner that we can exit for now. + return false; } - // starting from the first iteration with a three chain, e.g. right after the else if case nested in the if case above - if new_decide_reached { - let mut leaf = leaf.clone(); - if leaf.view_number() == new_anchor_view { - consensus - .metrics - .last_synced_block_height - .set(usize::try_from(leaf.height()).unwrap_or(0)); - } - if let Some(cert) = leaf.upgrade_certificate() { - if leaf.upgrade_certificate() != task_state.decided_upgrade_cert { - if cert.data.decide_by < view { - warn!("Failed to decide an upgrade certificate in time. Ignoring."); - } else { - info!( + } + + // Now, if we *have* reached a decide, we need to do some state updates. + if let Some(new_decided_view) = res.new_decided_view_number { + // First, get a mutable reference to the provided leaf. + let mut leaf = leaf.clone(); + + // Update the metrics + if leaf.view_number() == new_decided_view { + consensus_reader + .metrics + .last_synced_block_height + .set(usize::try_from(leaf.height()).unwrap_or(0)); + } + + // Check if there's a new upgrade certificate available. + if let Some(cert) = leaf.upgrade_certificate() { + if leaf.upgrade_certificate() != *existing_upgrade_cert { + if cert.data.decide_by < view_number { + warn!("Failed to decide an upgrade certificate in time. Ignoring."); + } else { + info!( "Updating consensus state with decided upgrade certificate: {:?}", cert ); - task_state.decided_upgrade_cert = Some(cert.clone()); - decided_upgrade_cert = Some(cert.clone()); - } + res.decided_upgrade_cert = Some(cert.clone()); } } - // If the block payload is available for this leaf, include it in - // the leaf chain that we send to the client. - if let Some(encoded_txns) = consensus.saved_payloads().get(&leaf.view_number()) - { - let payload = - BlockPayload::from_bytes(encoded_txns, leaf.block_header().metadata()); - - leaf.fill_block_payload_unchecked(payload); - } + } + // If the block payload is available for this leaf, include it in + // the leaf chain that we send to the client. + if let Some(encoded_txns) = + consensus_reader.saved_payloads().get(&leaf.view_number()) + { + let payload = + BlockPayload::from_bytes(encoded_txns, leaf.block_header().metadata()); + + leaf.fill_block_payload_unchecked(payload); + } - // Get the VID share at the leaf's view number, corresponding to our key - // (if one exists) - let vid_share = consensus - .vid_shares() - .get(&leaf.view_number()) - .unwrap_or(&HashMap::new()) - .get(&task_state.public_key) - .cloned() - .map(|prop| prop.data); - - // Add our data into a new `LeafInfo` - leaf_views.push(LeafInfo::new( - leaf.clone(), - Arc::clone(&state), - delta.clone(), - vid_share, - )); - leafs_decided.push(leaf.clone()); - if let Some(ref payload) = leaf.block_payload() { - for txn in payload.transaction_commitments(leaf.block_header().metadata()) { - included_txns.insert(txn); - } + // Get the VID share at the leaf's view number, corresponding to our key + // (if one exists) + let vid_share = consensus_reader + .vid_shares() + .get(&leaf.view_number()) + .unwrap_or(&HashMap::new()) + .get(public_key) + .cloned() + .map(|prop| prop.data); + + // Add our data into a new `LeafInfo` + res.leaf_views.push(LeafInfo::new( + leaf.clone(), + Arc::clone(&state), + delta.clone(), + vid_share, + )); + res.leaves_decided.push(leaf.clone()); + if let Some(ref payload) = leaf.block_payload() { + for txn in payload.transaction_commitments(leaf.block_header().metadata()) { + res.included_txns.insert(txn); } } - true - }, - ) { - debug!("view publish error {e}"); - } + } + true + }, + ) { + debug!("Leaf ascension failed; error={e}"); } - drop(consensus); - if let Some(cert) = decided_upgrade_cert { + res +} + +/// Handle `QuorumProposalValidated` event content and submit a proposal if possible. +#[allow(clippy::too_many_lines)] +pub async fn handle_quorum_proposal_validated>( + proposal: &QuorumProposal, + event_stream: Sender>>, + task_state: &mut ConsensusTaskState, +) -> Result<()> { + let view = proposal.view_number(); + #[cfg(not(feature = "dependency-tasks"))] + { + task_state.current_proposal = Some(proposal.clone()); + } + + let res = decide_from_proposal( + proposal, + Arc::clone(&task_state.consensus), + &task_state.decided_upgrade_cert, + &task_state.public_key, + ) + .await; + + if let Some(cert) = res.decided_upgrade_cert { + task_state.decided_upgrade_cert = Some(cert.clone()); let _ = event_stream .broadcast(Arc::new(HotShotEvent::UpgradeDecided(cert.clone()))) .await; } - let included_txns_set: HashSet<_> = if new_decide_reached { - included_txns + let included_txns_set: HashSet<_> = if res.new_decided_view_number.is_some() { + res.included_txns } else { HashSet::new() }; let mut consensus = task_state.consensus.write().await; - if new_commit_reached { + if let Some(new_locked_view) = res.new_locked_view_number { if let Err(e) = consensus.update_locked_view(new_locked_view) { tracing::trace!("{e:?}"); } @@ -867,8 +956,8 @@ pub async fn handle_quorum_proposal_validated { - /// The new locked view obtained from a 2 chain starting from the proposal's parent. - pub new_locked_view_number: Option, - - /// The new decided view obtained from a 3 chain starting from the proposal's parent. - pub new_decided_view_number: Option, - - /// The qc for the decided chain. - pub new_decide_qc: Option>, - - /// The decided leaves with corresponding validated state and VID info. - pub leaf_views: Vec>, - - /// The decided leaves. - pub leaves_decided: Vec>, - - /// The transactions in the block payload for each leaf. - pub included_txns: HashSet::Transaction>>, - // TODO - add upgrade cert here and fill -} - -impl Default for LeafChainTraversalOutcome { - /// The default method for this type is to set all of the returned values to `None`. - fn default() -> Self { - Self { - new_locked_view_number: None, - new_decided_view_number: None, - new_decide_qc: None, - leaf_views: Vec::new(), - leaves_decided: Vec::new(), - included_txns: HashSet::new(), - } - } -} - -/// Ascends the leaf chain by traversing through the parent commitments of the proposal. We begin -/// by obtaining the parent view, and if we are in a chain (i.e. the next view from the parent is -/// one view newer), then we begin attempting to form the chain. This is a direct impl from -/// [HotStuff](https://arxiv.org/pdf/1803.05069) section 5: -/// -/// > When a node b* carries a QC that refers to a direct parent, i.e., b*.justify.node = b*.parent, -/// we say that it forms a One-Chain. Denote by b'' = b*.justify.node. Node b* forms a Two-Chain, -/// if in addition to forming a One-Chain, b''.justify.node = b''.parent. -/// It forms a Three-Chain, if b'' forms a Two-Chain. -/// -/// We follow this exact logic to determine if we are able to reach a commit and a decide. A commit -/// is reached when we have a two chain, and a decide is reached when we have a three chain. -/// -/// # Example -/// Suppose we have a decide for view 1, and we then move on to get undecided views 2, 3, and 4. Further, -/// suppose that our *next* proposal is for view 5, but this leader did not see info for view 4, so the -/// justify qc of the proposal points to view 3. This is fine, and the undecided chain now becomes -/// 2-3-5. -/// -/// Assuming we continue with honest leaders, we then eventually could get a chain like: 2-3-5-6-7-8. This -/// will prompt a decide event to occur (this code), where the `proposal` is for view 8. Now, since the -/// lowest value in the 3-chain here would be 5 (excluding 8 since we only walk the parents), we begin at -/// the first link in the chain, and walk back through all undecided views, making our new anchor view 5, -/// and out new locked view will be 6. -/// -/// Upon receipt then of a proposal for view 9, assuming it is valid, this entire process will repeat, and -/// the anchor view will be set to view 6, with the locked view as view 7. -async fn visit_leaf_chain>( - proposal: &QuorumProposal, - task_state: &QuorumProposalTaskState, -) -> Result> { - let proposal_view_number = proposal.view_number(); - let proposal_parent_view_number = proposal.justify_qc.view_number(); - - // This is the output return type object whose members will be mutated as we traverse. - let mut ret = LeafChainTraversalOutcome::default(); - - // Are these views consecutive (1-chain) - if proposal_parent_view_number + 1 != proposal_view_number { - // Since they aren't we can return early before we do anything else. - return Ok(ret); - } - - // Unpacking here prevents the need to endlessly call the function. These values don't change during - // the execution of this code. - let consensus_reader = task_state.consensus.read().await; - let validated_state_map = consensus_reader.validated_state_map(); - let saved_leaves = consensus_reader.saved_leaves(); - let last_decided_view = consensus_reader.last_decided_view(); - let saved_payloads = consensus_reader.saved_payloads(); - let vid_shares = consensus_reader.vid_shares(); - - // We are in at least a 1-chain, so we start from here. - let mut current_chain_length: usize = 1; - - // Get the state so we can traverse the chain to see if we have a 2 or 3 chain. - let mut view_number = proposal_parent_view_number; - - // The most recently seen view number (the view number of the last leaf we saw). - let mut last_seen_view_number = proposal_view_number; - - while let Some(leaf_state) = validated_state_map.get(&view_number) { - let leaf_commitment = leaf_state - .leaf_commitment() - .context("Failed to find the leaf commitment")?; - let leaf = saved_leaves - .get(&leaf_commitment) - .context("Failed to find the saved leaf")?; - - // These are all just checks to make sure we have what we need to proceed. - let current_leaf_view_number = leaf.view_number(); - - if let (Some(state), delta) = leaf_state.state_and_delta() { - // Exit if we've reached the last anchor view. - if current_leaf_view_number == last_decided_view { - return Ok(ret); - } - - // IMPORTANT: This is the logic from the paper, and is the most critical part of this function. - if ret.new_decided_view_number.is_none() { - // Does this leaf extend the chain? - if last_seen_view_number == leaf.view_number() + 1 { - last_seen_view_number = leaf.view_number(); - current_chain_length += 1; - - // We've got a 2 chain, update the locked view. - if current_chain_length == 2 { - ret.new_locked_view_number = Some(leaf.view_number()); - - // The next leaf in the chain, if there is one, is decided, so this - // leaf's justify_qc would become the QC for the decided chain. - ret.new_decide_qc = Some(leaf.justify_qc().clone()); - } else if current_chain_length == 3 { - // We've got the 3-chain, which means we can successfully decide on this leaf. - ret.new_decided_view_number = Some(leaf.view_number()); - } - } else { - // Bail out with empty values, but this is not necessarily an error, but we don't have a - // new chain extension. - return Ok(ret); - } - } - - // If we got a 3-chain, we can start our state updates, garbage collection, etc - if let Some(decided_view) = ret.new_decided_view_number { - let mut leaf = leaf.clone(); - if leaf.view_number() == decided_view { - consensus_reader - .metrics - .last_synced_block_height - .set(usize::try_from(leaf.height()).unwrap_or(0)); - } - - // TODO - Upgrade certificates - // if let Some(cert) = leaf.upgrade_certificate() { - // ensure!( - // cert.data.decide_by >= proposal_view_number, - // "Failed to decide an upgrade certificate in time. Ignoring." - // ); - // task_state.decided_upgrade_cert = Some(cert.clone()); - // } - // If the block payload is available for this leaf, include it in - // the leaf chain that we send to the client. - if let Some(encoded_txns) = saved_payloads.get(&leaf.view_number()) { - let payload = - BlockPayload::from_bytes(encoded_txns, leaf.block_header().metadata()); - - leaf.fill_block_payload_unchecked(payload); - } - - let vid_share = vid_shares - .get(&leaf.view_number()) - .unwrap_or(&HashMap::new()) - .get(&task_state.public_key) - .cloned() - .map(|prop| prop.data); - - // Add our data into a new `LeafInfo` - ret.leaf_views.push(LeafInfo::new( - leaf.clone(), - Arc::clone(&state), - delta.clone(), - vid_share, - )); - ret.leaves_decided.push(leaf.clone()); - if let Some(ref payload) = leaf.block_payload() { - for txn in payload.transaction_commitments(leaf.block_header().metadata()) { - ret.included_txns.insert(txn); - } - } - } - } else { - bail!( - "Validated state and delta do not exist for the leaf for view {current_leaf_view_number:?}" - ) - }; - - // Move on to the next leaf at the end. - view_number = leaf.justify_qc().view_number(); - } - - Ok(ret) -} +use crate::{ + consensus::helpers::{decide_from_proposal, LeafChainTraversalOutcome}, + events::HotShotEvent, + helpers::broadcast_event, +}; /// Handles the `QuorumProposalValidated` event. pub(crate) async fn handle_quorum_proposal_validated< @@ -240,7 +33,14 @@ pub(crate) async fn handle_quorum_proposal_validated< leaf_views, leaves_decided, included_txns, - } = visit_leaf_chain(proposal, task_state).await?; + .. + } = decide_from_proposal( + proposal, + Arc::clone(&task_state.consensus), + &None, + &task_state.public_key, + ) + .await; let included_txns = if new_decided_view_number.is_some() { included_txns diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 5ea565fe23..4fb8753adb 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -292,7 +292,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { proposals[1].data.justify_qc.clone(), ))]), Expectations::from_outputs(all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(1))), exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), quorum_proposal_send(), ]), @@ -300,8 +299,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { proposals[3].data.justify_qc.clone(), ))]), Expectations::from_outputs(all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(3))), - exact(LastDecidedViewUpdated(ViewNumber::new(2))), + exact(LockedViewUpdated(ViewNumber::new(2))), + exact(LastDecidedViewUpdated(ViewNumber::new(1))), leaf_decided(), exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), ]), @@ -611,8 +610,8 @@ async fn test_quorum_proposal_liveness_check_proposal() { proposals[3].data.justify_qc.clone(), ))]), Expectations::from_outputs(all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(3))), - exact(LastDecidedViewUpdated(ViewNumber::new(2))), + exact(LockedViewUpdated(ViewNumber::new(2))), + exact(LastDecidedViewUpdated(ViewNumber::new(1))), leaf_decided(), exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), ]), @@ -696,195 +695,3 @@ fn generate_outputs( _ => vec![], } } - -/// This test validates the the ascension of the leaf chain across a large input space with -/// consistently increasing inputs to ensure that decides and locked view updates -/// occur as expected. -/// -/// This test will never propose, instead, we focus exclusively on the processing of the -/// [`HotShotEvent::QuorumProposalValidated`] event in a number of different circumstances. We want to -/// guarantee that a particular space of outputs is generated. -/// -/// These outputs should be easy to run since we'll be deterministically incrementing our iterator from -/// 0..100 proposals, inserting the valid state into the map (hence "happy path"). Since we'll know ahead -/// of time, we can essentially anticipate the formation of a valid chain. -/// -/// The output sequence is essentially: -/// view 0/1 = No outputs -/// view 2 -/// ```rust -/// LockedViewUpdated(1) -/// ``` -/// -/// view 3 -/// ```rust -/// LockedViewUpdated(2) -/// LastDecidedViewUpdated(1) -/// LeafDecided() -/// ``` -/// -/// view i in 4..n -/// ```rust -/// LockedViewUpdated(i - 1) -/// LastDecidedViewUpdated(i - 2) -/// LeafDecided() -/// ``` -/// -/// Because we've inserted all of the valid data, the traversals should go exactly as we expect them to. -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_quorum_proposal_task_happy_path_leaf_ascension() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let node_id: usize = 1; - let handle = build_system_handle(node_id.try_into().unwrap()).await.0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership, da_membership); - - let mut current_chain_length = 0; - let mut inputs = Vec::new(); - let mut expectations = Vec::new(); - for view_number in 1..100u64 { - current_chain_length += 1; - if current_chain_length > 3 { - current_chain_length = 3; - } - // This unwrap is safe here - let view = generator.next().await.unwrap(); - let proposal = view.quorum_proposal.clone(); - - // This intentionally grabs the wrong leaf since it *really* doesn't - // matter. For the record, this should be view - 1's leaf. - let leaf = view.leaf.clone(); - - // update the consensus shared state - { - let consensus = handle.consensus(); - let mut consensus_writer = consensus.write().await; - consensus_writer.update_validated_state_map( - ViewNumber::new(view_number), - build_fake_view_with_leaf(leaf.clone()), - ); - consensus_writer.update_saved_leaves(leaf.clone()); - consensus_writer.update_vid_shares( - ViewNumber::new(view_number), - view.vid_proposal.0[node_id].clone(), - ); - } - - inputs.push(serial![QuorumProposalValidated(proposal.data, leaf)]); - expectations.push(Expectations::from_outputs(generate_outputs( - current_chain_length, - view_number.try_into().unwrap(), - ))); - } - - let quorum_proposal_task_state = - QuorumProposalTaskState::::create_from(&handle).await; - - let mut script = TaskScript { - timeout: TIMEOUT, - state: quorum_proposal_task_state, - expectations, - }; - run_test![inputs, script].await; -} - -/// This test non-deterministically injects faults into the leaf ascension process where we randomly -/// drop states, views, etc from the proposals to ensure that we get decide events only when a three -/// chain is detected. This is accomplished by simply looking up in the state map and checking if the -/// parents for a given node indeed exist and, if so, updating the current chain depending on how recent -/// the dropped parent was. -/// -/// We utilize the same method to generate the outputs in both cases since it's quite easy to get a predictable -/// output set depending on where we are in the chain. Again, we do *not* propose in this method and instead -/// verify that the leaf ascension is reliable. We also use non-determinism to make sure that our fault -/// injection is randomized to some degree. This helps smoke out corner cases (i.e. the start and end). -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_quorum_proposal_task_fault_injection_leaf_ascension() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let node_id: usize = 1; - let handle = build_system_handle(node_id.try_into().unwrap()).await.0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership, da_membership); - - let mut current_chain_length = 0; - let mut dropped_views = Vec::new(); - - let mut inputs = Vec::new(); - let mut expectations = Vec::new(); - for view_number in 1..15u64 { - current_chain_length += 1; - // If the chain keeps going, then let it keep going - if current_chain_length > 3 { - current_chain_length = 3; - } - // This unwrap is safe here - let view = generator.next().await.unwrap(); - let proposal = view.quorum_proposal.clone(); - - // This intentionally grabs the wrong leaf since it *really* doesn't - // matter. For the record, this should be view - 1's leaf. - let leaf = view.leaf.clone(); - - { - let consensus = handle.consensus(); - let mut consensus_writer = consensus.write().await; - - // Break the chain depending on the prior state. If the immediate parent is not found, we have a chain of - // 1, but, if it is, and the parent 2 away is not found, we have a 2 chain. - if consensus_writer - .validated_state_map() - .get(&ViewNumber::new(view_number - 1)) - .is_none() - { - current_chain_length = 1; - } else if view_number > 2 - && consensus_writer - .validated_state_map() - .get(&ViewNumber::new(view_number - 2)) - .is_none() - { - current_chain_length = 2; - } - - // Update the consensus shared state with a 10% failure rate - if rand::random::() < 0.9 { - consensus_writer.update_validated_state_map( - ViewNumber::new(view_number), - build_fake_view_with_leaf(leaf.clone()), - ); - consensus_writer.update_saved_leaves(leaf.clone()); - consensus_writer.update_vid_shares( - ViewNumber::new(view_number), - view.vid_proposal.0[node_id].clone(), - ); - } else { - dropped_views.push(view_number); - } - } - - inputs.push(serial![QuorumProposalValidated(proposal.data, leaf)]); - expectations.push(Expectations::from_outputs(generate_outputs( - current_chain_length, - view_number.try_into().unwrap(), - ))); - } - - let quorum_proposal_task_state = - QuorumProposalTaskState::::create_from(&handle).await; - let mut script = TaskScript { - timeout: TIMEOUT, - state: quorum_proposal_task_state, - expectations, - }; - run_test![inputs, script].await; -} diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index 785d2e3c66..cb9acd9dc5 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -47,7 +47,7 @@ cross_tests!( // 2 nodes fail triggering view sync, expect no other timeouts metadata.overall_safety_properties.num_failed_views = 2; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 15; + metadata.overall_safety_properties.num_successful_views = 13; metadata } diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index a51eec3a4a..13d58165a5 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -47,9 +47,9 @@ async fn test_consensus_task_upgrade() { let upgrade_data: UpgradeProposalData = UpgradeProposalData { old_version, new_version, - decide_by: ViewNumber::new(5), + decide_by: ViewNumber::new(6), new_version_hash: [0u8; 12].to_vec(), - old_version_last_view: ViewNumber::new(5), + old_version_last_view: ViewNumber::new(6), new_version_first_view: ViewNumber::new(7), }; @@ -99,7 +99,12 @@ async fn test_consensus_task_upgrade() { DaCertificateRecv(dacs[3].clone()), VidShareRecv(vid_share(&vids[3].0, handle.public_key())), ], - vec![QuorumProposalRecv(proposals[4].clone(), leaders[4])], + vec![ + QuorumProposalRecv(proposals[4].clone(), leaders[4]), + DaCertificateRecv(dacs[4].clone()), + VidShareRecv(vid_share(&vids[4].0, handle.public_key())), + ], + vec![QuorumProposalRecv(proposals[5].clone(), leaders[5])], ]; let expectations = vec![ @@ -123,7 +128,6 @@ async fn test_consensus_task_upgrade() { output_asserts: vec![ exact(ViewChange(ViewNumber::new(3))), quorum_proposal_validated(), - leaf_decided(), exact(QuorumVoteSend(votes[2].clone())), ], task_state_asserts: vec![no_decided_upgrade_cert()], @@ -141,6 +145,15 @@ async fn test_consensus_task_upgrade() { output_asserts: vec![ exact(ViewChange(ViewNumber::new(5))), quorum_proposal_validated(), + leaf_decided(), + exact(QuorumVoteSend(votes[4].clone())), + ], + task_state_asserts: vec![no_decided_upgrade_cert()], + }, + Expectations { + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(6))), + quorum_proposal_validated(), upgrade_decided(), leaf_decided(), ], @@ -341,9 +354,9 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { let upgrade_data: UpgradeProposalData = UpgradeProposalData { old_version, new_version, - decide_by: ViewNumber::new(4), + decide_by: ViewNumber::new(7), new_version_hash: [0u8; 12].to_vec(), - old_version_last_view: ViewNumber::new(4), + old_version_last_view: ViewNumber::new(7), new_version_first_view: ViewNumber::new(8), }; @@ -523,7 +536,6 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { output_asserts: vec![ exact(ViewChange(ViewNumber::new(3))), quorum_proposal_validated(), - leaf_decided(), quorum_vote_send(), ], task_state_asserts: vec![], @@ -532,7 +544,6 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { output_asserts: vec![ exact(ViewChange(ViewNumber::new(4))), quorum_proposal_validated(), - upgrade_decided(), leaf_decided(), quorum_vote_send(), ], @@ -542,6 +553,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { output_asserts: vec![ exact(ViewChange(ViewNumber::new(5))), quorum_proposal_validated(), + upgrade_decided(), leaf_decided(), // This is between versions, but we are receiving a null block and hence should vote affirmatively on it. quorum_vote_send(), diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 43a4fe51b5..78411e6f55 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -19,7 +19,7 @@ cross_tests!( let mut metadata = TestDescription::default_more_nodes(); metadata.overall_safety_properties.num_failed_views = 6; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 22; + metadata.overall_safety_properties.num_successful_views = 20; metadata.num_bootstrap_nodes = 14; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the From 06dc272467aa3d68c06928a07683ce1d6ebf9a95 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 12 Jun 2024 09:49:17 -0400 Subject: [PATCH 1074/1393] Minor test fixes (#3256) --- hotshot/src/lib.rs | 4 ++-- hotshot/src/tasks/mod.rs | 2 -- hotshot/src/tasks/task_state.rs | 3 ++- hotshot/src/traits/networking/memory_network.rs | 1 - task-impls/src/consensus/view_change.rs | 2 +- task-impls/src/quorum_vote.rs | 4 ++-- testing/src/overall_safety_task.rs | 13 +++++++++++++ testing/tests/tests_2/catchup.rs | 8 ++++---- testing/tests/tests_2/push_cdn.rs | 2 +- testing/tests/tests_2/test_with_failures_one.rs | 2 +- testing/tests/tests_4/test_with_failures_f.rs | 3 +-- testing/tests/tests_5/combined_network.rs | 8 ++++---- testing/tests/tests_5/timeout.rs | 2 +- testing/tests/tests_5/unreliable_network.rs | 4 ++-- 14 files changed, 34 insertions(+), 24 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 76056dde9b..b49f269070 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -194,7 +194,7 @@ impl> SystemContext { /// To do a full initialization, use `fn init` instead, which will set up background tasks as /// well. #[allow(clippy::too_many_arguments)] - #[instrument(skip(private_key, memberships, networks, initializer, metrics, storage))] + #[instrument(skip_all)] pub async fn new( public_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -206,7 +206,7 @@ impl> SystemContext { metrics: ConsensusMetricsValue, storage: I::Storage, ) -> Result, HotShotError> { - debug!("Creating a new hotshot"); + trace!("Creating a new instance of hotshot"); let consensus_metrics = Arc::new(metrics); let anchored_leaf = initializer.inner; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 5ba1cf3a22..4f7dae9949 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -21,13 +21,11 @@ use hotshot_task_impls::{ vid::VidTaskState, view_sync::ViewSyncTaskState, }; - #[cfg(feature = "dependency-tasks")] use hotshot_task_impls::{ consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, }; - use hotshot_types::{ constants::{Version01, VERSION_0_1}, message::{Message, Messages}, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index a546d6a700..8610957fb0 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -4,7 +4,6 @@ use std::{ sync::{atomic::AtomicBool, Arc}, }; -use crate::types::SystemContextHandle; use async_trait::async_trait; use chrono::Utc; #[cfg(feature = "rewind")] @@ -22,6 +21,8 @@ use hotshot_types::traits::{ }; use vbs::version::StaticVersionType; +use crate::types::SystemContextHandle; + /// Trait for creating task states. #[async_trait] pub trait CreateTaskState diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 6d5f5097b2..e88f977e6b 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -142,7 +142,6 @@ impl MemoryNetwork { warn!(?e, "Failed to decode incoming message, skipping"); } } - warn!("Stream shutdown"); } } .instrument(info_span!("MemoryNetwork Background task", map = ?master_map)), diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index 5796d0e862..edc25e1965 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -1,3 +1,4 @@ +use core::time::Duration; use std::sync::Arc; use anyhow::{ensure, Result}; @@ -7,7 +8,6 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use chrono::Utc; -use core::time::Duration; use hotshot_types::{ consensus::Consensus, event::{Event, EventType}, diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 0053cd69b0..2dbdfc9ec3 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -1,7 +1,6 @@ -use anyhow::{bail, ensure, Context, Result}; use std::{collections::HashMap, sync::Arc}; -use vbs::version::Version; +use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -35,6 +34,7 @@ use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, instrument, trace, warn}; +use vbs::version::Version; use crate::{ events::HotShotEvent, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 0827e57fd6..4b9ed79f7c 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -65,6 +65,12 @@ pub enum OverallSafetyTaskErr { InconsistentStates, /// mismatched blocks for a view InconsistentBlocks, + /// not enough failures. this likely means there is an issue in the test + NotEnoughFailures { + expected: usize, + + failed_views: HashSet, + }, } /// Data availability task state @@ -225,6 +231,13 @@ impl> TestTaskState failed_views: self.ctx.failed_views.clone(), })); } + + if self.ctx.failed_views.len() < num_failed_rounds_total { + return TestResult::Fail(Box::new(OverallSafetyTaskErr::::NotEnoughFailures { + expected: num_failed_rounds_total, + failed_views: self.ctx.failed_views.clone(), + })); + } TestResult::Pass } } diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index a11fb4a0e9..1e78574d58 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -46,7 +46,7 @@ async fn test_catchup() { metadata.overall_safety_properties = OverallSafetyPropertiesDescription { // Make sure we keep committing rounds after the catchup, but not the full 50. num_successful_views: 22, - num_failed_views: 5, + num_failed_views: 0, ..Default::default() }; @@ -99,7 +99,7 @@ async fn test_catchup_cdn() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 5, + num_failed_views: 0, ..Default::default() }; @@ -154,7 +154,7 @@ async fn test_catchup_one_node() { metadata.overall_safety_properties = OverallSafetyPropertiesDescription { // Make sure we keep committing rounds after the catchup, but not the full 50. num_successful_views: 22, - num_failed_views: 2, + num_failed_views: 0, ..Default::default() }; @@ -215,7 +215,7 @@ async fn test_catchup_in_view_sync() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 5, + num_failed_views: 0, ..Default::default() }; diff --git a/testing/tests/tests_2/push_cdn.rs b/testing/tests/tests_2/push_cdn.rs index b708d91ff3..cfdf798570 100644 --- a/testing/tests/tests_2/push_cdn.rs +++ b/testing/tests/tests_2/push_cdn.rs @@ -26,7 +26,7 @@ async fn push_cdn_network() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 33, + num_failed_views: 0, num_successful_views: 35, ..Default::default() }, diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_2/test_with_failures_one.rs index 0a6448a410..d0cfde0f78 100644 --- a/testing/tests/tests_2/test_with_failures_one.rs +++ b/testing/tests/tests_2/test_with_failures_one.rs @@ -31,7 +31,7 @@ cross_tests!( metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(5, dead_nodes)] }; - metadata.overall_safety_properties.num_failed_views = 3; + metadata.overall_safety_properties.num_failed_views = 1; metadata.overall_safety_properties.num_successful_views = 25; metadata } diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 78411e6f55..86b1951040 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -15,9 +15,8 @@ cross_tests!( Types: [TestTypes], Ignore: false, Metadata: { - let mut metadata = TestDescription::default_more_nodes(); - metadata.overall_safety_properties.num_failed_views = 6; + metadata.overall_safety_properties.num_failed_views = 5; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 20; metadata.num_bootstrap_nodes = 14; diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index 5be931e554..add4cdec8a 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -30,7 +30,7 @@ async fn test_combined_network() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 33, + num_failed_views: 0, num_successful_views: 25, ..Default::default() }, @@ -63,7 +63,7 @@ async fn test_combined_network_cdn_crash() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 33, + num_failed_views: 0, num_successful_views: 35, ..Default::default() }, @@ -112,7 +112,7 @@ async fn test_combined_network_reup() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 33, + num_failed_views: 0, num_successful_views: 35, ..Default::default() }, @@ -165,7 +165,7 @@ async fn test_combined_network_half_dc() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 33, + num_failed_views: 0, num_successful_views: 35, ..Default::default() }, diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 189342f4f2..a42c00fdea 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -34,7 +34,7 @@ async fn test_timeout() { metadata.timing_data = timing_data; metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 25, + num_failed_views: 3, num_successful_views: 25, ..Default::default() }; diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index c9203f1081..68f2d65433 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -178,7 +178,7 @@ async fn test_memory_network_partially_sync() { async_compatibility_layer::logging::setup_backtrace(); let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 2, + num_failed_views: 0, ..Default::default() }, // allow more time to pass in CI @@ -222,7 +222,7 @@ async fn libp2p_network_partially_sync() { async_compatibility_layer::logging::setup_backtrace(); let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 2, + num_failed_views: 0, ..Default::default() }, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( From 8e03db8e9c2217d4c0285c4c9a0d9c2fa3dd8b75 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 12 Jun 2024 10:37:12 -0400 Subject: [PATCH 1075/1393] Revert "Minor test fixes (#3256)" (#3316) This reverts commit 1932e429d66b2d5005c51661c95d8370e0eb8951. --- hotshot/src/lib.rs | 4 ++-- hotshot/src/tasks/mod.rs | 2 ++ hotshot/src/tasks/task_state.rs | 3 +-- hotshot/src/traits/networking/memory_network.rs | 1 + task-impls/src/consensus/view_change.rs | 2 +- task-impls/src/quorum_vote.rs | 4 ++-- testing/src/overall_safety_task.rs | 13 ------------- testing/tests/tests_2/catchup.rs | 8 ++++---- testing/tests/tests_2/push_cdn.rs | 2 +- testing/tests/tests_2/test_with_failures_one.rs | 2 +- testing/tests/tests_4/test_with_failures_f.rs | 3 ++- testing/tests/tests_5/combined_network.rs | 8 ++++---- testing/tests/tests_5/timeout.rs | 2 +- testing/tests/tests_5/unreliable_network.rs | 4 ++-- 14 files changed, 24 insertions(+), 34 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index b49f269070..76056dde9b 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -194,7 +194,7 @@ impl> SystemContext { /// To do a full initialization, use `fn init` instead, which will set up background tasks as /// well. #[allow(clippy::too_many_arguments)] - #[instrument(skip_all)] + #[instrument(skip(private_key, memberships, networks, initializer, metrics, storage))] pub async fn new( public_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -206,7 +206,7 @@ impl> SystemContext { metrics: ConsensusMetricsValue, storage: I::Storage, ) -> Result, HotShotError> { - trace!("Creating a new instance of hotshot"); + debug!("Creating a new hotshot"); let consensus_metrics = Arc::new(metrics); let anchored_leaf = initializer.inner; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 4f7dae9949..5ba1cf3a22 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -21,11 +21,13 @@ use hotshot_task_impls::{ vid::VidTaskState, view_sync::ViewSyncTaskState, }; + #[cfg(feature = "dependency-tasks")] use hotshot_task_impls::{ consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, }; + use hotshot_types::{ constants::{Version01, VERSION_0_1}, message::{Message, Messages}, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 8610957fb0..a546d6a700 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -4,6 +4,7 @@ use std::{ sync::{atomic::AtomicBool, Arc}, }; +use crate::types::SystemContextHandle; use async_trait::async_trait; use chrono::Utc; #[cfg(feature = "rewind")] @@ -21,8 +22,6 @@ use hotshot_types::traits::{ }; use vbs::version::StaticVersionType; -use crate::types::SystemContextHandle; - /// Trait for creating task states. #[async_trait] pub trait CreateTaskState diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index e88f977e6b..6d5f5097b2 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -142,6 +142,7 @@ impl MemoryNetwork { warn!(?e, "Failed to decode incoming message, skipping"); } } + warn!("Stream shutdown"); } } .instrument(info_span!("MemoryNetwork Background task", map = ?master_map)), diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index edc25e1965..5796d0e862 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -1,4 +1,3 @@ -use core::time::Duration; use std::sync::Arc; use anyhow::{ensure, Result}; @@ -8,6 +7,7 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use chrono::Utc; +use core::time::Duration; use hotshot_types::{ consensus::Consensus, event::{Event, EventType}, diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote.rs index 2dbdfc9ec3..0053cd69b0 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote.rs @@ -1,6 +1,7 @@ +use anyhow::{bail, ensure, Context, Result}; use std::{collections::HashMap, sync::Arc}; +use vbs::version::Version; -use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -34,7 +35,6 @@ use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, instrument, trace, warn}; -use vbs::version::Version; use crate::{ events::HotShotEvent, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 4b9ed79f7c..0827e57fd6 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -65,12 +65,6 @@ pub enum OverallSafetyTaskErr { InconsistentStates, /// mismatched blocks for a view InconsistentBlocks, - /// not enough failures. this likely means there is an issue in the test - NotEnoughFailures { - expected: usize, - - failed_views: HashSet, - }, } /// Data availability task state @@ -231,13 +225,6 @@ impl> TestTaskState failed_views: self.ctx.failed_views.clone(), })); } - - if self.ctx.failed_views.len() < num_failed_rounds_total { - return TestResult::Fail(Box::new(OverallSafetyTaskErr::::NotEnoughFailures { - expected: num_failed_rounds_total, - failed_views: self.ctx.failed_views.clone(), - })); - } TestResult::Pass } } diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 1e78574d58..a11fb4a0e9 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -46,7 +46,7 @@ async fn test_catchup() { metadata.overall_safety_properties = OverallSafetyPropertiesDescription { // Make sure we keep committing rounds after the catchup, but not the full 50. num_successful_views: 22, - num_failed_views: 0, + num_failed_views: 5, ..Default::default() }; @@ -99,7 +99,7 @@ async fn test_catchup_cdn() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 0, + num_failed_views: 5, ..Default::default() }; @@ -154,7 +154,7 @@ async fn test_catchup_one_node() { metadata.overall_safety_properties = OverallSafetyPropertiesDescription { // Make sure we keep committing rounds after the catchup, but not the full 50. num_successful_views: 22, - num_failed_views: 0, + num_failed_views: 2, ..Default::default() }; @@ -215,7 +215,7 @@ async fn test_catchup_in_view_sync() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 0, + num_failed_views: 5, ..Default::default() }; diff --git a/testing/tests/tests_2/push_cdn.rs b/testing/tests/tests_2/push_cdn.rs index cfdf798570..b708d91ff3 100644 --- a/testing/tests/tests_2/push_cdn.rs +++ b/testing/tests/tests_2/push_cdn.rs @@ -26,7 +26,7 @@ async fn push_cdn_network() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 0, + num_failed_views: 33, num_successful_views: 35, ..Default::default() }, diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_2/test_with_failures_one.rs index d0cfde0f78..0a6448a410 100644 --- a/testing/tests/tests_2/test_with_failures_one.rs +++ b/testing/tests/tests_2/test_with_failures_one.rs @@ -31,7 +31,7 @@ cross_tests!( metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(5, dead_nodes)] }; - metadata.overall_safety_properties.num_failed_views = 1; + metadata.overall_safety_properties.num_failed_views = 3; metadata.overall_safety_properties.num_successful_views = 25; metadata } diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 86b1951040..78411e6f55 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -15,8 +15,9 @@ cross_tests!( Types: [TestTypes], Ignore: false, Metadata: { + let mut metadata = TestDescription::default_more_nodes(); - metadata.overall_safety_properties.num_failed_views = 5; + metadata.overall_safety_properties.num_failed_views = 6; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 20; metadata.num_bootstrap_nodes = 14; diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index add4cdec8a..5be931e554 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -30,7 +30,7 @@ async fn test_combined_network() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 0, + num_failed_views: 33, num_successful_views: 25, ..Default::default() }, @@ -63,7 +63,7 @@ async fn test_combined_network_cdn_crash() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 0, + num_failed_views: 33, num_successful_views: 35, ..Default::default() }, @@ -112,7 +112,7 @@ async fn test_combined_network_reup() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 0, + num_failed_views: 33, num_successful_views: 35, ..Default::default() }, @@ -165,7 +165,7 @@ async fn test_combined_network_half_dc() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 0, + num_failed_views: 33, num_successful_views: 35, ..Default::default() }, diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index a42c00fdea..189342f4f2 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -34,7 +34,7 @@ async fn test_timeout() { metadata.timing_data = timing_data; metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 3, + num_failed_views: 25, num_successful_views: 25, ..Default::default() }; diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index 68f2d65433..c9203f1081 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -178,7 +178,7 @@ async fn test_memory_network_partially_sync() { async_compatibility_layer::logging::setup_backtrace(); let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 0, + num_failed_views: 2, ..Default::default() }, // allow more time to pass in CI @@ -222,7 +222,7 @@ async fn libp2p_network_partially_sync() { async_compatibility_layer::logging::setup_backtrace(); let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 0, + num_failed_views: 2, ..Default::default() }, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( From 06d900369face88c3f304bbfea1dc2d1e96bdcbc Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 12 Jun 2024 23:46:03 +0800 Subject: [PATCH 1076/1393] [DEPENDENCY_REFACTOR] - Remove proposal validation from the quorum proposal task (#3275) * Move validation * Fix fmt and import * Fix build and recv test * Remove a todo * Restore an event * Fix non-dependency lints * Fix tests * Fix CI * Fix fmt and imports * Fix proposal and proposal recv tasks tests * 3 more tests * Fix doc * Address comments * Add error * Fix Conficts in keyao/remove-proposal-validation (#3308) * fix * remove dead test * fix lint * fix test * Fix vote tests --------- Co-authored-by: Keyao Shen * Fix upgrade test --------- Co-authored-by: Jarred Parr --- hotshot/src/tasks/mod.rs | 18 ++-- task-impls/src/consensus/helpers.rs | 101 ++++++++++++------ task-impls/src/consensus/mod.rs | 1 + task-impls/src/events.rs | 13 --- .../src/quorum_proposal/dependency_handle.rs | 8 +- task-impls/src/quorum_proposal/mod.rs | 49 ++------- .../src/quorum_proposal_recv/handlers.rs | 63 +++++------ task-impls/src/quorum_proposal_recv/mod.rs | 28 +++-- .../handlers.rs | 7 +- .../{quorum_vote.rs => quorum_vote/mod.rs} | 22 ++-- task-impls/src/response.rs | 1 + testing/tests/tests_1/consensus_task.rs | 26 +++-- .../tests_1/quorum_proposal_recv_task.rs | 19 +++- testing/tests/tests_1/quorum_proposal_task.rs | 79 +++++--------- testing/tests/tests_1/quorum_vote_task.rs | 65 ++++++++++- testing/tests/tests_1/upgrade_task.rs | 19 +++- 16 files changed, 287 insertions(+), 232 deletions(-) rename task-impls/src/{quorum_proposal => quorum_vote}/handlers.rs (95%) rename task-impls/src/{quorum_vote.rs => quorum_vote/mod.rs} (98%) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 5ba1cf3a22..639775e4d6 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -5,12 +5,19 @@ pub mod task_state; use std::{sync::Arc, time::Duration}; +use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use hotshot_task::task::Task; +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_task_impls::consensus::ConsensusTaskState; #[cfg(feature = "rewind")] use hotshot_task_impls::rewind::RewindTaskState; +#[cfg(feature = "dependency-tasks")] +use hotshot_task_impls::{ + consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, + quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, +}; use hotshot_task_impls::{ - consensus::ConsensusTaskState, da::DaTaskState, events::HotShotEvent, network::{NetworkEventTaskState, NetworkMessageTaskState}, @@ -21,13 +28,6 @@ use hotshot_task_impls::{ vid::VidTaskState, view_sync::ViewSyncTaskState, }; - -#[cfg(feature = "dependency-tasks")] -use hotshot_task_impls::{ - consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, - quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, -}; - use hotshot_types::{ constants::{Version01, VERSION_0_1}, message::{Message, Messages}, @@ -38,8 +38,6 @@ use hotshot_types::{ }; use vbs::version::StaticVersionType; -use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; - /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 408a90ebb3..35caeeb308 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -1,53 +1,65 @@ -use core::time::Duration; -use std::{ - collections::{HashMap, HashSet}, - marker::PhantomData, - sync::Arc, +#[cfg(not(feature = "dependency-tasks"))] +use super::ConsensusTaskState; +#[cfg(not(feature = "dependency-tasks"))] +use crate::{ + consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, + helpers::AnyhowTracing, }; - -use anyhow::{bail, ensure, Context, Result}; +use crate::{events::HotShotEvent, helpers::broadcast_event}; +#[cfg(not(feature = "dependency-tasks"))] +use anyhow::bail; +use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; +#[cfg(not(feature = "dependency-tasks"))] use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; +#[cfg(not(feature = "dependency-tasks"))] #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; +#[cfg(not(feature = "dependency-tasks"))] use chrono::Utc; use committable::{Commitment, Committable}; +#[cfg(not(feature = "dependency-tasks"))] +use core::time::Duration; +#[cfg(not(feature = "dependency-tasks"))] use futures::FutureExt; +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_types::{ + consensus::CommitmentAndMetadata, + traits::{ + node_implementation::{ConsensusTime, NodeImplementation}, + storage::Storage, + }, +}; use hotshot_types::{ - consensus::{CommitmentAndMetadata, Consensus, View}, - data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, + consensus::{Consensus, View}, + data::{Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, message::Proposal, simple_certificate::{QuorumCertificate, UpgradeCertificate}, traits::{ - block_contents::BlockHeader, - election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, - signature_key::SignatureKey, - states::ValidatedState, - storage::Storage, - BlockPayload, + block_contents::BlockHeader, election::Membership, node_implementation::NodeType, + signature_key::SignatureKey, states::ValidatedState, BlockPayload, }, utils::{Terminator, ViewInner}, vote::{Certificate, HasViewNumber}, }; #[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::{message::GeneralConsensusMessage, simple_vote::QuorumData}; +use hotshot_types::{data::null_block, message::GeneralConsensusMessage, simple_vote::QuorumData}; +#[cfg(not(feature = "dependency-tasks"))] +use std::marker::PhantomData; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, warn}; +#[cfg(not(feature = "dependency-tasks"))] +use tracing::error; +use tracing::{debug, info, warn}; +#[cfg(not(feature = "dependency-tasks"))] use vbs::version::Version; -use super::ConsensusTaskState; -#[cfg(feature = "dependency-tasks")] -use crate::quorum_proposal_recv::QuorumProposalRecvTaskState; -use crate::{ - consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, - events::HotShotEvent, - helpers::{broadcast_event, AnyhowTracing}, -}; - /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. /// @@ -66,7 +78,7 @@ pub async fn validate_proposal_safety_and_liveness( sender: TYPES::SignatureKey, event_sender: Sender>, ) -> Result<()> { - let view = proposal.data.view_number(); + let view_number = proposal.data.view_number(); let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); ensure!( @@ -74,6 +86,29 @@ pub async fn validate_proposal_safety_and_liveness( "Proposed leaf does not extend the parent leaf." ); + let state = Arc::new( + >::from_header(&proposal.data.block_header), + ); + let view = View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state, + delta: None, // May be updated to `Some` in the vote task. + }, + }; + + consensus + .write() + .await + .update_validated_state_map(view_number, view.clone()); + + // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. + broadcast_event( + Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), + &event_stream, + ) + .await; + // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment // // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: @@ -117,7 +152,7 @@ pub async fn validate_proposal_safety_and_liveness( if let Err(e) = outcome { broadcast_event( Event { - view_number: view, + view_number, event: EventType::Error { error: Arc::new(e) }, }, &event_sender, @@ -132,7 +167,7 @@ pub async fn validate_proposal_safety_and_liveness( broadcast_event( Event { - view_number: view, + view_number, event: EventType::QuorumProposal { proposal: proposal.clone(), sender, @@ -157,6 +192,7 @@ pub async fn validate_proposal_safety_and_liveness( /// Create the header for a proposal, build the proposal, and broadcast /// the proposal send evnet. #[allow(clippy::too_many_arguments)] +#[cfg(not(feature = "dependency-tasks"))] pub async fn create_and_send_proposal( public_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -379,6 +415,7 @@ pub(crate) async fn parent_leaf_and_state( /// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is the /// standard case proposal scenario. #[allow(clippy::too_many_arguments)] +#[cfg(not(feature = "dependency-tasks"))] pub async fn publish_proposal_from_commitment_and_metadata( cur_view: TYPES::Time, view: TYPES::Time, @@ -464,6 +501,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( /// Publishes a proposal if there exists a value which we can propose from. Specifically, we must have either /// `commitment_and_metadata`, or a `decided_upgrade_cert`. #[allow(clippy::too_many_arguments)] +#[cfg(not(feature = "dependency-tasks"))] pub async fn publish_proposal_if_able( cur_view: TYPES::Time, view: TYPES::Time, @@ -499,11 +537,11 @@ pub async fn publish_proposal_if_able( .await } -// TODO: Fix `clippy::too_many_lines`. /// Handle the received quorum proposal. /// /// Returns the proposal that should be used to set the `cur_proposal` for other tasks. #[allow(clippy::too_many_lines)] +#[cfg(not(feature = "dependency-tasks"))] pub(crate) async fn handle_quorum_proposal_recv>( proposal: &Proposal>, sender: &TYPES::SignatureKey, @@ -906,6 +944,7 @@ pub async fn decide_from_proposal( /// Handle `QuorumProposalValidated` event content and submit a proposal if possible. #[allow(clippy::too_many_lines)] +#[cfg(not(feature = "dependency-tasks"))] pub async fn handle_quorum_proposal_validated>( proposal: &QuorumProposal, event_stream: Sender>>, diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index b4d46419f5..7b5c1912fa 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -288,6 +288,7 @@ impl> ConsensusTaskState event: Arc>, event_stream: Sender>>, ) { + #[cfg(not(feature = "dependency-tasks"))] let version = *self.version.read().await; match event.as_ref() { #[cfg(not(feature = "dependency-tasks"))] diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 8b7940e4ed..adcfba6963 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -160,12 +160,6 @@ pub enum HotShotEvent { /// HotShot was upgraded, with a new network version. VersionUpgrade(Version), - /// Initiate a proposal for a proposal without a parent, but passing the liveness check. - /// This is distinct from `QuorumProposalValidated` due to the fact that it is in a - /// different state than what we'd typically see with a fully validated proposal and, - /// as a result, it need to be its own event. - QuorumProposalLivenessValidated(QuorumProposal), - /// Initiate a vote right now for the designated view. VoteNow(TYPES::Time, VoteDependencyData), @@ -398,13 +392,6 @@ impl Display for HotShotEvent { cert.view_number() ), HotShotEvent::VersionUpgrade(_) => write!(f, "VersionUpgrade"), - HotShotEvent::QuorumProposalLivenessValidated(proposal) => { - write!( - f, - "QuorumProposalLivenessValidated(view_number={:?})", - proposal.view_number() - ) - } HotShotEvent::UpgradeDecided(cert) => { write!(f, "UpgradeDecided(view_number{:?})", cert.view_number()) } diff --git a/task-impls/src/quorum_proposal/dependency_handle.rs b/task-impls/src/quorum_proposal/dependency_handle.rs index 9e1b5eb8b6..accd2ee2fe 100644 --- a/task-impls/src/quorum_proposal/dependency_handle.rs +++ b/task-impls/src/quorum_proposal/dependency_handle.rs @@ -39,8 +39,7 @@ pub(crate) enum ProposalDependency { /// For the `QcFormed` event timeout branch. TimeoutCert, - /// For the `QuroumProposalValidated` event after validating `QuorumProposalRecv` or the - /// `QuorumProposalLivenessValidated` event during the liveness check in `QuorumProposalRecv`. + /// For the `QuroumProposalRecv` event. Proposal, /// For the `VidShareValidated` event. @@ -211,10 +210,7 @@ impl HandleDepOutput for ProposalDependencyHandle { HotShotEvent::VidShareValidated(share) => { vid_share = Some(share.clone()); } - _ => { - // QuorumProposalLivenessValidated and QuorumProposalValidated are implicitly - // handled here. - } + _ => {} } } diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index b649d234d5..fff4dd127d 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -28,10 +28,7 @@ use tokio::task::JoinHandle; use tracing::{debug, instrument, warn}; use vbs::version::Version; -use self::{ - dependency_handle::{ProposalDependency, ProposalDependencyHandle}, - handlers::handle_quorum_proposal_validated, -}; +use self::dependency_handle::{ProposalDependency, ProposalDependencyHandle}; use crate::{ events::HotShotEvent, helpers::{broadcast_event, cancel_task}, @@ -39,9 +36,6 @@ use crate::{ mod dependency_handle; -/// Event handlers for [`QuorumProposalTaskState`]. -mod handlers; - /// The state for the quorum proposal task. pub struct QuorumProposalTaskState> { /// Latest view number that has been proposed for. @@ -133,14 +127,9 @@ impl> QuorumProposalTaskState { - if let HotShotEvent::QuorumProposalValidated(proposal, _) = event { - proposal.view_number() + 1 - } else if let HotShotEvent::QuorumProposalLivenessValidated(proposal) = - event - { - proposal.view_number() + 1 + if let HotShotEvent::QuorumProposalRecv(proposal, _) = event { + proposal.data.view_number() + 1 } else { return false; } @@ -236,9 +225,7 @@ impl> QuorumProposalTaskState { payload_commitment_dependency.mark_as_completed(Arc::clone(&event)); } - // All proposals are equivalent in this case. - HotShotEvent::QuorumProposalValidated(..) - | HotShotEvent::QuorumProposalLivenessValidated(_) => { + HotShotEvent::QuorumProposalRecv(..) => { proposal_dependency.mark_as_completed(event); } HotShotEvent::QcFormed(quorum_certificate) => match quorum_certificate { @@ -266,13 +253,12 @@ impl> QuorumProposalTaskState 1 { secondary_deps.push(AndDependency::from_deps(vec![ qc_dependency, @@ -385,16 +371,6 @@ impl> QuorumProposalTaskState { self.version = *version; } - HotShotEvent::QuorumProposalLivenessValidated(proposal) => { - // We may not be able to propose off of this, but we still spin up an event just in case - // the other data is already here, we're still proposing for view + 1 here. - self.create_dependency_task_if_new( - proposal.view_number() + 1, - event_receiver, - event_sender, - Arc::clone(&event), - ); - } HotShotEvent::QcFormed(cert) => match cert.clone() { either::Right(timeout_cert) => { let view_number = timeout_cert.view_number + 1; @@ -454,8 +430,8 @@ impl> QuorumProposalTaskState { - let view_number = proposal.view_number(); + HotShotEvent::QuorumProposalRecv(proposal, _) => { + let view_number = proposal.data.view_number(); // All nodes get the latest proposed view as a proxy of `cur_view` of olde. if !self.update_latest_proposed_view(view_number).await { @@ -463,13 +439,6 @@ impl> QuorumProposalTaskState>( proposal: &Proposal>, event_sender: &Sender>>, - justify_qc: &QuorumCertificate, task_state: &mut QuorumProposalRecvTaskState, -) -> Option> { +) -> Result { let view_number = proposal.data.view_number(); let mut consensus_write = task_state.consensus.write().await; @@ -52,7 +59,7 @@ async fn validate_proposal_liveness consensus_write.locked_view(); + let liveness_check = + proposal.data.justify_qc.clone().view_number() > consensus_write.locked_view(); - let high_qc = consensus_write.high_qc().clone(); drop(consensus_write); // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. @@ -89,45 +96,27 @@ async fn validate_proposal_liveness>( proposal: &Proposal>, sender: &TYPES::SignatureKey, event_sender: &Sender>>, task_state: &mut QuorumProposalRecvTaskState, -) -> Result>> { +) -> Result { let sender = sender.clone(); let cur_view = task_state.cur_view; @@ -138,7 +127,7 @@ pub(crate) async fn handle_quorum_proposal_recv> QuorumProposalRecvTaskState< #[cfg(feature = "dependency-tasks")] if let HotShotEvent::QuorumProposalRecv(proposal, sender) = event.as_ref() { match handle_quorum_proposal_recv(proposal, sender, &event_stream, self).await { - Ok(Some(current_proposal)) => { + Ok(QuorumProposalValidity::Fully) => { + self.cancel_tasks(proposal.data.view_number()).await; + } + Ok(QuorumProposalValidity::Liveness) => { // Build the parent leaf since we didn't find it during the proposal check. let parent_leaf = match parent_leaf_and_state( self.cur_view, @@ -148,13 +152,13 @@ impl> QuorumProposalRecvTaskState< } }; - let view = current_proposal.view_number(); - self.cancel_tasks(proposal.data.view_number()).await; + let view_number = proposal.data.view_number(); + self.cancel_tasks(view_number).await; let consensus = self.consensus.read().await; - let Some(vid_shares) = consensus.vid_shares().get(&view) else { + let Some(vid_shares) = consensus.vid_shares().get(&view_number) else { debug!( "We have not seen the VID share for this view {:?} yet, so we cannot vote.", - view + view_number ); return; }; @@ -162,21 +166,18 @@ impl> QuorumProposalRecvTaskState< error!("Did not get a VID share for our public key, aborting vote"); return; }; - let Some(da_cert) = consensus - .saved_da_certs() - .get(¤t_proposal.view_number()) - else { + let Some(da_cert) = consensus.saved_da_certs().get(&view_number) else { debug!( "Received VID share, but couldn't find DAC cert for view {:?}", - current_proposal.view_number() + view_number ); return; }; broadcast_event( Arc::new(HotShotEvent::VoteNow( - view, + view_number, VoteDependencyData { - quorum_proposal: current_proposal, + quorum_proposal: proposal.data.clone(), parent_leaf, vid_share: vid_share.clone(), da_cert: da_cert.clone(), @@ -186,9 +187,6 @@ impl> QuorumProposalRecvTaskState< ) .await; } - Ok(None) => { - self.cancel_tasks(proposal.data.view_number()).await; - } Err(e) => debug!(?e, "Failed to propose"), } } diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_vote/handlers.rs similarity index 95% rename from task-impls/src/quorum_proposal/handlers.rs rename to task-impls/src/quorum_vote/handlers.rs index 180cf36e5f..9a039f3bb0 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -7,10 +7,11 @@ use hotshot_types::{ data::QuorumProposal, event::{Event, EventType}, traits::node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + vote::HasViewNumber, }; use tracing::debug; -use super::QuorumProposalTaskState; +use super::QuorumVoteTaskState; use crate::{ consensus::helpers::{decide_from_proposal, LeafChainTraversalOutcome}, events::HotShotEvent, @@ -24,7 +25,7 @@ pub(crate) async fn handle_quorum_proposal_validated< >( proposal: &QuorumProposal, sender: &Sender>>, - task_state: &mut QuorumProposalTaskState, + task_state: &mut QuorumVoteTaskState, ) -> Result<()> { let LeafChainTraversalOutcome { new_locked_view_number, @@ -87,7 +88,7 @@ pub(crate) async fn handle_quorum_proposal_validated< .last_decided_view .set(usize::try_from(consensus_writer.last_decided_view().u64()).unwrap()); let cur_number_of_views_per_decide_event = - *task_state.latest_proposed_view - consensus_writer.last_decided_view().u64(); + *proposal.view_number() - consensus_writer.last_decided_view().u64(); consensus_writer .metrics .number_of_views_per_decide_event diff --git a/task-impls/src/quorum_vote.rs b/task-impls/src/quorum_vote/mod.rs similarity index 98% rename from task-impls/src/quorum_vote.rs rename to task-impls/src/quorum_vote/mod.rs index 0053cd69b0..ddb6483a6c 100644 --- a/task-impls/src/quorum_vote.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -2,6 +2,11 @@ use anyhow::{bail, ensure, Context, Result}; use std::{collections::HashMap, sync::Arc}; use vbs::version::Version; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, + quorum_vote::handlers::handle_quorum_proposal_validated, +}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -36,10 +41,8 @@ use jf_vid::VidScheme; use tokio::task::JoinHandle; use tracing::{debug, error, instrument, trace, warn}; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; +/// Event handlers for `QuorumProposalValidated`. +mod handlers; /// Vote dependency types. #[derive(Debug, PartialEq)] @@ -513,10 +516,13 @@ impl> QuorumVoteTaskState { - // This task simultaneously does not rely on the state updates of the `handle_quorum_proposal_validated` - // function and that function does not return an `Error` unless the propose or vote fails, in which case - // the other would still have been attempted regardless. Therefore, we pass this through as a task and - // eschew validation in lieu of the `QuorumProposal` task doing it for us and updating the internal state. + // Handle the event before creating the dependency task. + if let Err(e) = + handle_quorum_proposal_validated(proposal, &event_sender, self).await + { + debug!("Failed to handle QuorumProposalValidated event; error = {e:#}"); + } + self.create_dependency_task_if_new( proposal.view_number, event_receiver, diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 2c7333c659..b54b011f76 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -199,6 +199,7 @@ impl NetworkResponseState { self.quorum.has_stake(sender) } /// Lookup the proposal for the view and respond if it's found/not found + #[allow(clippy::no_effect_underscore_binding)] async fn respond_with_proposal(&self, _view: TYPES::Time) -> ResponseMessage { // Complete after we are storing our last proposed view: // https://github.com/EspressoSystems/HotShot/issues/3240 diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index a1e1c249b8..2177ddeb40 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -1,3 +1,5 @@ +#![cfg(not(feature = "dependency-tasks"))] + // TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] @@ -16,11 +18,11 @@ use hotshot_testing::{ all_predicates, helpers::{ build_system_handle, key_pair_for_id, permute_input_with_index_order, - vid_scheme_from_view_number, vid_share, + vid_scheme_from_view_number, vid_share, build_fake_view_with_leaf }, predicates::event::{ all_predicates, exact, quorum_proposal_send, quorum_proposal_validated, quorum_vote_send, - timeout_vote_send, + timeout_vote_send, validated_state_updated }, random, script::{Expectations, InputOrder, TaskScript}, @@ -32,6 +34,7 @@ use hotshot_types::{ simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}, traits::{election::Membership, node_implementation::ConsensusTime}, utils::BuilderCommitment, + vote::HasViewNumber, }; use jf_vid::VidScheme; use sha2::Digest; @@ -39,7 +42,6 @@ use sha2::Digest; const TIMEOUT: Duration = Duration::from_millis(35); #[cfg(test)] -#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { @@ -62,12 +64,14 @@ async fn test_consensus_task() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); + let mut leaves = Vec::new(); let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -98,11 +102,13 @@ async fn test_consensus_task() { let expectations = vec![ Expectations::from_outputs(all_predicates![ + validated_state_updated(), exact(ViewChange(ViewNumber::new(1))), quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ]), Expectations::from_outputs(all_predicates![ + validated_state_updated(), exact(ViewChange(ViewNumber::new(2))), quorum_proposal_validated(), quorum_proposal_send(), @@ -120,7 +126,6 @@ async fn test_consensus_task() { } #[cfg(test)] -#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote() { @@ -136,12 +141,14 @@ async fn test_consensus_vote() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); + let mut leaves = Vec::new(); let mut votes = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); votes.push(view.create_quorum_vote(&handle)); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -156,6 +163,7 @@ async fn test_consensus_vote() { ]]; let expectations = vec![Expectations::from_outputs(all_predicates![ + validated_state_updated(), exact(ViewChange(ViewNumber::new(1))), quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), @@ -172,7 +180,6 @@ async fn test_consensus_vote() { } #[cfg(test)] -#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_finalize_propose() { @@ -282,6 +289,7 @@ async fn test_view_sync_finalize_propose() { let expectations = vec![ Expectations::from_outputs(vec![]), Expectations::from_outputs(all_predicates![ + validated_state_updated(), exact(ViewChange(ViewNumber::new(1))), quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), @@ -289,6 +297,7 @@ async fn test_view_sync_finalize_propose() { Expectations::from_outputs(vec![timeout_vote_send(), timeout_vote_send()]), Expectations::from_outputs(vec![]), Expectations::from_outputs(all_predicates![ + validated_state_updated(), exact(ViewChange(ViewNumber::new(4))), quorum_proposal_validated(), quorum_proposal_send(), @@ -306,7 +315,6 @@ async fn test_view_sync_finalize_propose() { } #[cfg(test)] -#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Makes sure that, when a valid ViewSyncFinalize certificate is available, the consensus task @@ -378,12 +386,14 @@ async fn test_view_sync_finalize_vote() { let expectations = vec![ Expectations::from_outputs(vec![]), Expectations::from_outputs(all_predicates![ + validated_state_updated(), exact(ViewChange(ViewNumber::new(1))), quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())) ]), Expectations::from_outputs(vec![timeout_vote_send(), timeout_vote_send()]), Expectations::from_outputs(all_predicates![ + validated_state_updated(), quorum_proposal_validated(), quorum_vote_send() ]), @@ -400,7 +410,6 @@ async fn test_view_sync_finalize_vote() { } #[cfg(test)] -#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Makes sure that, when a valid ViewSyncFinalize certificate is available, the consensus task @@ -483,6 +492,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { let expectations = vec![ Expectations::from_outputs(all_predicates![ quorum_proposal_validated(), + validated_state_updated(), exact(ViewChange(ViewNumber::new(1))), ]), Expectations::from_outputs(vec![exact(QuorumVoteSend(votes[0].clone()))]), @@ -502,7 +512,6 @@ async fn test_view_sync_finalize_vote_fail_view_number() { } #[cfg(test)] -#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_vid_disperse_storage_failure() { @@ -539,6 +548,7 @@ async fn test_vid_disperse_storage_failure() { ]]; let expectations = vec![Expectations::from_outputs(all_predicates![ + validated_state_updated(), exact(ViewChange(ViewNumber::new(1))), quorum_proposal_validated(), ])]; diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 6022d44d89..5908fcc2fa 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -3,19 +3,22 @@ use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::{ + node_types::{MemoryImpl, TestTypes}, + state_types::TestValidatedState, +}; use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ events::HotShotEvent::*, quorum_proposal_recv::QuorumProposalRecvTaskState, }; use hotshot_testing::{ - helpers::build_system_handle, + helpers::{build_fake_view_with_leaf_and_state, build_system_handle}, predicates::event::{all_predicates, exact, vote_now}, script::InputOrder, serial, view_generator::TestViewGenerator, }; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use hotshot_types::{data::ViewNumber, traits::{node_implementation::ConsensusTime,ValidatedState}}; #[cfg(test)] #[cfg(feature = "dependency-tasks")] @@ -73,6 +76,15 @@ async fn test_quorum_proposal_recv_task() { let expectations = vec![Expectations::from_outputs(vec![ exact(ViewChange(ViewNumber::new(2))), exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), + exact(ValidatedStateUpdated( + ViewNumber::new(2), + build_fake_view_with_leaf_and_state( + leaves[1].clone(), + >::from_header( + &proposals[1].data.block_header, + ), + ), + )), exact(QuorumProposalValidated( proposals[1].data.clone(), leaves[0].clone(), @@ -181,7 +193,6 @@ async fn test_quorum_proposal_recv_task_liveness_check() { )), exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), exact(NewUndecidedView(leaves[2].clone())), - exact(QuorumProposalLivenessValidated(proposals[2].data.clone())), vote_now(), ])]; diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 4fb8753adb..a70d74017d 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,8 +1,7 @@ #![cfg(feature = "dependency-tasks")] -use std::sync::Arc; use std::time::Duration; - +#[cfg(not(feature = "dependency-tasks"))] use committable::Committable; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; @@ -11,31 +10,39 @@ use hotshot_example_types::state_types::TestValidatedState; use hotshot_example_types::{ block_types::TestMetadata, node_types::{MemoryImpl, TestTypes}, - state_types::TestInstanceState, + }; +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_example_types::{state_types::TestInstanceState,}; +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_testing::{ + all_predicates, + helpers::{ + build_cert, key_pair_for_id + }}; use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ - events::HotShotEvent::{self, *}, + events::HotShotEvent::*, quorum_proposal::QuorumProposalTaskState, }; use hotshot_testing::{ all_predicates, helpers::{ - build_cert, build_fake_view_with_leaf, build_system_handle, key_pair_for_id, + build_fake_view_with_leaf, build_system_handle, vid_scheme_from_view_number, vid_share, }, predicates::{ - event::{all_predicates, exact, leaf_decided, quorum_proposal_send}, - Predicate, + event::{all_predicates, exact, quorum_proposal_send}, }, random, script::{Expectations, InputOrder, TaskScript}, serial, view_generator::TestViewGenerator, }; +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_types::{simple_certificate::QuorumCertificate,}; use hotshot_types::{ data::{null_block, Leaf, ViewChangeEvidence, ViewNumber}, - simple_certificate::QuorumCertificate, simple_vote::{TimeoutData, ViewSyncFinalizeData}, traits::{ election::Membership, @@ -221,7 +228,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ), ], random![ - QuorumProposalValidated(proposals[0].data.clone(), genesis_leaf), + QuorumProposalRecv(proposals[0].clone(), leaders[0]), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(2)), @@ -237,7 +244,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ), ], random![ - QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + QuorumProposalRecv(proposals[1].clone(), leaders[1]), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(3)), @@ -253,7 +260,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ), ], random![ - QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), + QuorumProposalRecv(proposals[2].clone(), leaders[2]), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(4)), @@ -269,7 +276,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ), ], random![ - QuorumProposalValidated(proposals[3].data.clone(), leaves[2].clone()), + QuorumProposalRecv(proposals[3].clone(), leaders[3]), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(5)), @@ -299,9 +306,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { proposals[3].data.justify_qc.clone(), ))]), Expectations::from_outputs(all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(2))), - exact(LastDecidedViewUpdated(ViewNumber::new(1))), - leaf_decided(), exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), ]), ]; @@ -474,7 +478,7 @@ async fn test_quorum_proposal_task_view_sync() { #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_quorum_proposal_liveness_check_proposal() { +async fn test_quorum_proposal_task_liveness_check() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -532,7 +536,7 @@ async fn test_quorum_proposal_liveness_check_proposal() { ), ], random![ - QuorumProposalValidated(proposals[0].data.clone(), genesis_leaf.clone()), + QuorumProposalRecv(proposals[0].clone(), leaders[0]), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(2)), @@ -548,7 +552,7 @@ async fn test_quorum_proposal_liveness_check_proposal() { ), ], random![ - QuorumProposalLivenessValidated(proposals[1].data.clone()), + QuorumProposalRecv(proposals[1].clone(), leaders[1]), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(3)), @@ -563,8 +567,9 @@ async fn test_quorum_proposal_liveness_check_proposal() { build_fake_view_with_leaf(leaves[1].clone()), ), ], + random![ - QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), + QuorumProposalRecv(proposals[2].clone(), leaders[2]), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(4)), @@ -580,7 +585,7 @@ async fn test_quorum_proposal_liveness_check_proposal() { ), ], random![ - QuorumProposalValidated(proposals[3].data.clone(), leaves[2].clone()), + QuorumProposalRecv(proposals[3].clone(), leaders[3]), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( make_payload_commitment(&quorum_membership, ViewNumber::new(5)), @@ -609,10 +614,7 @@ async fn test_quorum_proposal_liveness_check_proposal() { Expectations::from_outputs(vec![exact(UpdateHighQc( proposals[3].data.justify_qc.clone(), ))]), - Expectations::from_outputs(all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(2))), - exact(LastDecidedViewUpdated(ViewNumber::new(1))), - leaf_decided(), + Expectations::from_outputs(vec![ exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), ]), ]; @@ -655,10 +657,8 @@ async fn test_quorum_proposal_task_with_incomplete_events() { // We run the task here at view 2, but this time we ignore the crucial piece of evidence: the // payload commitment and metadata. Instead we send only one of the three "OR" required fields. // This should result in the proposal failing to be sent. - let inputs = vec![serial![QuorumProposalValidated( - proposals[1].data.clone(), - leaves[0].clone(), - )]]; + let inputs = vec![serial![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), + ]]; let expectations = vec![Expectations::from_outputs(vec![])]; @@ -672,26 +672,3 @@ async fn test_quorum_proposal_task_with_incomplete_events() { }; run_test![inputs, script].await; } - -/// This function generates the outputs to the quorum proposal task (i.e. the emitted events). -/// This happens depending on the view and chain length. -fn generate_outputs( - chain_length: i32, - current_view_number: u64, -) -> Vec>>>> { - match chain_length { - // This is not - 2 because we start from the parent - 2 => vec![exact(LockedViewUpdated(ViewNumber::new( - current_view_number - 1, - )))], - // This is not - 3 because we start from the parent - 3 => vec![ - exact(LockedViewUpdated(ViewNumber::new(current_view_number - 1))), - exact(LastDecidedViewUpdated(ViewNumber::new( - current_view_number - 2, - ))), - leaf_decided(), - ], - _ => vec![], - } -} diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 69a67ed257..1b48e44ed7 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -14,6 +14,7 @@ use hotshot_testing::{ use hotshot_types::{ data::ViewNumber, traits::node_implementation::ConsensusTime, vote::HasViewNumber, }; + use std::time::Duration; const TIMEOUT: Duration = Duration::from_millis(35); @@ -177,8 +178,7 @@ async fn test_quorum_vote_task_miss_dependency() { } drop(consensus_writer); - // Send three of quorum proposal, DAC, VID share data, and validated state, in which case - // there's no vote. + // Send two of quorum proposal, DAC, VID share data, in which case there's no vote. let inputs = vec![ random![ QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), @@ -195,8 +195,13 @@ async fn test_quorum_vote_task_miss_dependency() { ]; let expectations = vec![ - Expectations::from_outputs(vec![exact(VidShareValidated(vids[1].0[0].clone()))]), - Expectations::from_outputs(vec![exact(DaCertificateValidated(dacs[2].clone()))]), + Expectations::from_outputs(all_predicates![ + exact(VidShareValidated(vids[1].0[0].clone())) + ]), + Expectations::from_outputs(all_predicates![ + exact(LockedViewUpdated(ViewNumber::new(1))), + exact(DaCertificateValidated(dacs[2].clone())) + ]), Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[3].clone())), exact(VidShareValidated(vids[3].0[0].clone())), @@ -213,3 +218,55 @@ async fn test_quorum_vote_task_miss_dependency() { }; run_test![inputs, script].await; } + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_quorum_vote_task_incorrect_dependency() { + use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; + use hotshot_testing::{ + helpers::build_system_handle, predicates::event::exact, view_generator::TestViewGenerator, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + + let mut proposals = Vec::new(); + let mut leaves = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(2).collect::>().await { + proposals.push(view.quorum_proposal.clone()); + leaves.push(view.leaf.clone()); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } + + // Send the correct quorum proposal and DAC, and incorrect VID share data. + let inputs = vec![random![ + QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + DaCertificateRecv(dacs[1].clone()), + VidShareRecv(vids[0].0[0].clone()), + ]]; + + let expectations = vec![Expectations::from_outputs(all_predicates![ + exact(DaCertificateValidated(dacs[1].clone())), + exact(VidShareValidated(vids[0].0[0].clone())), + ])]; + + let quorum_vote_state = + QuorumVoteTaskState::::create_from(&handle).await; + + let mut script = TaskScript { + timeout: TIMEOUT, + state: quorum_vote_state, + expectations, + }; + run_test![inputs, script].await; +} diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 13d58165a5..6f4db6f49d 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -15,7 +15,7 @@ use hotshot_task_impls::{ consensus::ConsensusTaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState, }; use hotshot_testing::{ - helpers::vid_share, + helpers::{build_fake_view_with_leaf, vid_share}, predicates::{event::*, upgrade::*}, script::{Expectations, TaskScript}, view_generator::TestViewGenerator, @@ -24,6 +24,7 @@ use hotshot_types::{ data::{null_block, ViewNumber}, simple_vote::UpgradeProposalData, traits::{election::Membership, node_implementation::ConsensusTime}, + vote::HasViewNumber, }; use vbs::version::Version; @@ -58,6 +59,7 @@ async fn test_consensus_task_upgrade() { let mut dacs = Vec::new(); let mut vids = Vec::new(); let mut leaders = Vec::new(); + let mut leaves = Vec::new(); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -67,6 +69,7 @@ async fn test_consensus_task_upgrade() { dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); } generator.add_upgrade(upgrade_data); @@ -77,6 +80,7 @@ async fn test_consensus_task_upgrade() { dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); } let inputs = vec![ vec![ @@ -111,6 +115,7 @@ async fn test_consensus_task_upgrade() { Expectations { output_asserts: vec![ exact(ViewChange(ViewNumber::new(1))), + validated_state_updated(), quorum_proposal_validated(), exact(QuorumVoteSend(votes[0].clone())), ], @@ -119,6 +124,7 @@ async fn test_consensus_task_upgrade() { Expectations { output_asserts: vec![ exact(ViewChange(ViewNumber::new(2))), + validated_state_updated(), quorum_proposal_validated(), exact(QuorumVoteSend(votes[1].clone())), ], @@ -127,6 +133,7 @@ async fn test_consensus_task_upgrade() { Expectations { output_asserts: vec![ exact(ViewChange(ViewNumber::new(3))), + validated_state_updated(), quorum_proposal_validated(), exact(QuorumVoteSend(votes[2].clone())), ], @@ -135,6 +142,7 @@ async fn test_consensus_task_upgrade() { Expectations { output_asserts: vec![ exact(ViewChange(ViewNumber::new(4))), + validated_state_updated(), quorum_proposal_validated(), leaf_decided(), exact(QuorumVoteSend(votes[3].clone())), @@ -144,6 +152,7 @@ async fn test_consensus_task_upgrade() { Expectations { output_asserts: vec![ exact(ViewChange(ViewNumber::new(5))), + validated_state_updated(), quorum_proposal_validated(), leaf_decided(), exact(QuorumVoteSend(votes[4].clone())), @@ -153,6 +162,7 @@ async fn test_consensus_task_upgrade() { Expectations { output_asserts: vec![ exact(ViewChange(ViewNumber::new(6))), + validated_state_updated(), quorum_proposal_validated(), upgrade_decided(), leaf_decided(), @@ -276,6 +286,7 @@ async fn test_upgrade_and_consensus_task() { Expectations { output_asserts: vec![ exact::(ViewChange(ViewNumber::new(1))), + validated_state_updated(), quorum_proposal_validated::(), quorum_vote_send::(), ], @@ -288,6 +299,7 @@ async fn test_upgrade_and_consensus_task() { Expectations { output_asserts: vec![ exact::(ViewChange(ViewNumber::new(2))), + validated_state_updated(), quorum_proposal_validated::(), quorum_vote_send(), ], @@ -519,6 +531,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { Expectations { output_asserts: vec![ exact::(ViewChange(ViewNumber::new(1))), + validated_state_updated(), quorum_proposal_validated(), quorum_vote_send(), ], @@ -527,6 +540,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { Expectations { output_asserts: vec![ exact(ViewChange(ViewNumber::new(2))), + validated_state_updated(), quorum_proposal_validated(), quorum_vote_send(), ], @@ -535,6 +549,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { Expectations { output_asserts: vec![ exact(ViewChange(ViewNumber::new(3))), + validated_state_updated(), quorum_proposal_validated(), quorum_vote_send(), ], @@ -543,6 +558,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { Expectations { output_asserts: vec![ exact(ViewChange(ViewNumber::new(4))), + validated_state_updated(), quorum_proposal_validated(), leaf_decided(), quorum_vote_send(), @@ -552,6 +568,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { Expectations { output_asserts: vec![ exact(ViewChange(ViewNumber::new(5))), + validated_state_updated(), quorum_proposal_validated(), upgrade_decided(), leaf_decided(), From 7ee06dd21379a38a40a5e47796294395ee2ef7d7 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 12 Jun 2024 13:17:51 -0400 Subject: [PATCH 1077/1393] Fee upgrade (#3236) --- example-types/src/node_types.rs | 9 +- examples/infra/mod.rs | 34 +-- examples/libp2p/types.rs | 9 +- hotshot/src/lib.rs | 46 ++- hotshot/src/tasks/mod.rs | 50 ++- hotshot/src/tasks/task_state.rs | 43 ++- hotshot/src/traits/networking.rs | 4 +- .../src/traits/networking/combined_network.rs | 114 +++---- .../src/traits/networking/libp2p_network.rs | 284 ++++++++---------- .../src/traits/networking/memory_network.rs | 95 +++--- .../src/traits/networking/push_cdn_network.rs | 87 ++---- hotshot/src/types/handle.rs | 4 +- libp2p-networking/Cargo.toml | 2 +- libp2p-networking/src/network/node/handle.rs | 96 +++--- libp2p-networking/tests/counter.rs | 39 +-- orchestrator/run-config.toml | 6 + orchestrator/src/client.rs | 6 +- orchestrator/src/config.rs | 34 +++ orchestrator/src/lib.rs | 6 +- task-impls/src/consensus/helpers.rs | 37 +-- task-impls/src/consensus/mod.rs | 3 + task-impls/src/consensus/view_change.rs | 2 +- task-impls/src/network.rs | 134 +++++---- task-impls/src/quorum_vote/mod.rs | 15 +- task-impls/src/request.rs | 112 +++---- task-impls/src/response.rs | 66 ++-- task-impls/src/rewind.rs | 4 +- task-impls/src/upgrade.rs | 120 ++++---- testing/src/block_builder.rs | 37 ++- testing/src/spinning_task.rs | 3 +- testing/src/test_builder.rs | 4 + testing/src/test_launcher.rs | 5 +- testing/src/test_runner.rs | 5 +- testing/src/test_task.rs | 23 +- testing/tests/tests_1/block_builder.rs | 4 +- testing/tests/tests_1/network_task.rs | 14 +- testing/tests/tests_1/upgrade_task.rs | 8 +- testing/tests/tests_3/memory_network.rs | 101 ++++--- types/src/constants.rs | 29 +- types/src/error.rs | 2 + types/src/lib.rs | 8 + types/src/message.rs | 117 +++++++- types/src/traits/network.rs | 71 +++-- types/src/traits/node_implementation.rs | 5 +- 44 files changed, 992 insertions(+), 905 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 822ff21053..85388835aa 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -5,7 +5,6 @@ use hotshot::traits::{ }; use hotshot_types::{ data::ViewNumber, - message::Message, signature_key::{BLSPubKey, BuilderKey}, traits::node_implementation::NodeType, }; @@ -75,8 +74,8 @@ impl NodeImplementation for PushCdnImpl { } impl NodeImplementation for MemoryImpl { - type QuorumNetwork = MemoryNetwork, TYPES::SignatureKey>; - type DaNetwork = MemoryNetwork, TYPES::SignatureKey>; + type QuorumNetwork = MemoryNetwork; + type DaNetwork = MemoryNetwork; type Storage = TestStorage; } @@ -87,7 +86,7 @@ impl NodeImplementation for CombinedImpl { } impl NodeImplementation for Libp2pImpl { - type QuorumNetwork = Libp2pNetwork, TYPES::SignatureKey>; - type DaNetwork = Libp2pNetwork, TYPES::SignatureKey>; + type QuorumNetwork = Libp2pNetwork; + type DaNetwork = Libp2pNetwork; type Storage = TestStorage; } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 0ef93338bf..cf6586cdc9 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -50,7 +50,6 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, TestableLeaf}, event::{Event, EventType}, - message::Message, traits::{ block_contents::{BlockHeader, TestableBlock}, election::Membership, @@ -327,8 +326,8 @@ fn calculate_num_tx_per_round( #[async_trait] pub trait RunDa< TYPES: NodeType, - DANET: ConnectedNetwork, TYPES::SignatureKey>, - QUORUMNET: ConnectedNetwork, TYPES::SignatureKey>, + DANET: ConnectedNetwork, + QUORUMNET: ConnectedNetwork, NODE: NodeImplementation< TYPES, QuorumNetwork = QUORUMNET, @@ -674,9 +673,9 @@ pub struct Libp2pDaRun { /// the network configuration config: NetworkConfig, /// quorum channel - quorum_channel: Libp2pNetwork, TYPES::SignatureKey>, + quorum_channel: Libp2pNetwork, /// data availability channel - da_channel: Libp2pNetwork, TYPES::SignatureKey>, + da_channel: Libp2pNetwork, } #[async_trait] @@ -689,17 +688,12 @@ impl< >, NODE: NodeImplementation< TYPES, - QuorumNetwork = Libp2pNetwork, TYPES::SignatureKey>, - DaNetwork = Libp2pNetwork, TYPES::SignatureKey>, + QuorumNetwork = Libp2pNetwork, + DaNetwork = Libp2pNetwork, Storage = TestStorage, >, - > - RunDa< - TYPES, - Libp2pNetwork, TYPES::SignatureKey>, - Libp2pNetwork, TYPES::SignatureKey>, - NODE, - > for Libp2pDaRun + > RunDa, Libp2pNetwork, NODE> + for Libp2pDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -752,11 +746,11 @@ where } } - fn da_channel(&self) -> Libp2pNetwork, TYPES::SignatureKey> { + fn da_channel(&self) -> Libp2pNetwork { self.da_channel.clone() } - fn quorum_channel(&self) -> Libp2pNetwork, TYPES::SignatureKey> { + fn quorum_channel(&self) -> Libp2pNetwork { self.quorum_channel.clone() } @@ -806,8 +800,8 @@ where let libp2p_da_run: Libp2pDaRun = as RunDa< TYPES, - Libp2pNetwork, TYPES::SignatureKey>, - Libp2pNetwork, TYPES::SignatureKey>, + Libp2pNetwork, + Libp2pNetwork, Libp2pImpl, >>::initialize_networking(config.clone(), libp2p_advertise_address) .await; @@ -870,8 +864,8 @@ pub async fn main_entry_point< BlockHeader = TestBlockHeader, InstanceState = TestInstanceState, >, - DACHANNEL: ConnectedNetwork, TYPES::SignatureKey>, - QUORUMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, + DACHANNEL: ConnectedNetwork, + QUORUMCHANNEL: ConnectedNetwork, NODE: NodeImplementation< TYPES, QuorumNetwork = QUORUMCHANNEL, diff --git a/examples/libp2p/types.rs b/examples/libp2p/types.rs index 6823611656..5ec7c6c100 100644 --- a/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -2,10 +2,7 @@ use std::fmt::Debug; use hotshot::traits::implementations::Libp2pNetwork; use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; -use hotshot_types::{ - message::Message, - traits::node_implementation::{NodeImplementation, NodeType}, -}; +use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; use serde::{Deserialize, Serialize}; use crate::infra::Libp2pDaRun; @@ -15,9 +12,9 @@ use crate::infra::Libp2pDaRun; pub struct NodeImpl {} /// convenience type alias -pub type DaNetwork = Libp2pNetwork, ::SignatureKey>; +pub type DaNetwork = Libp2pNetwork<::SignatureKey>; /// convenience type alias -pub type QuorumNetwork = Libp2pNetwork, ::SignatureKey>; +pub type QuorumNetwork = Libp2pNetwork<::SignatureKey>; impl NodeImplementation for NodeImpl { type QuorumNetwork = QuorumNetwork; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 76056dde9b..0b7f697993 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -5,6 +5,9 @@ #[cfg(feature = "docs")] pub mod documentation; +use hotshot_types::traits::network::BroadcastDelay; +use vbs::version::StaticVersionType; + /// Contains traits consumed by [`SystemContext`] pub mod traits; /// Contains types used by the crate @@ -33,13 +36,11 @@ use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event, network pub use hotshot_types::error::HotShotError; use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, View, ViewInner}, - constants::{ - Version01, BASE_VERSION, EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE, STATIC_VER_0_1, - }, + constants::{Base, EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE}, data::{Leaf, QuorumProposal}, event::{EventType, LeafInfo}, - message::{DataMessage, Message, MessageKind, Proposal}, - simple_certificate::QuorumCertificate, + message::{DataMessage, Message, MessageKind, Proposal, VersionedMessage}, + simple_certificate::{QuorumCertificate, UpgradeCertificate}, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -162,6 +163,9 @@ pub struct SystemContext> { /// Reference to the internal storage for consensus datum. pub storage: Arc>, + + /// a potential upgrade certificate that has been decided on by the consensus tasks. + pub decided_upgrade_certificate: Arc>>>, } impl> Clone for SystemContext { #![allow(deprecated)] @@ -183,6 +187,7 @@ impl> Clone for SystemContext> SystemContext { let (internal_tx, internal_rx) = broadcast(EVENT_CHANNEL_SIZE); let (mut external_tx, mut external_rx) = broadcast(EXTERNAL_EVENT_CHANNEL_SIZE); + let decided_upgrade_certificate = Arc::new(RwLock::new(None)); + // Allow overflow on the channel, otherwise sending to it may block. external_rx.set_overflow(true); @@ -271,7 +278,7 @@ impl> SystemContext { ); let consensus = Arc::new(RwLock::new(consensus)); - let version = Arc::new(RwLock::new(BASE_VERSION)); + let version = Arc::new(RwLock::new(Base::VERSION)); // This makes it so we won't block on broadcasting if there is not a receiver // Our own copy of the receiver is inactive so it doesn't count. @@ -294,6 +301,7 @@ impl> SystemContext { external_event_stream: (external_tx, external_rx.deactivate()), anchored_leaf: anchored_leaf.clone(), storage: Arc::new(RwLock::new(storage)), + decided_upgrade_certificate, }); Ok(inner) @@ -407,6 +415,7 @@ impl> SystemContext { pub async fn publish_transaction_async( &self, transaction: TYPES::Transaction, + decided_upgrade_certificate: Arc>>>, ) -> Result<(), HotShotError> { trace!("Adding transaction to our own queue"); @@ -414,7 +423,18 @@ impl> SystemContext { let view_number = api.consensus.read().await.cur_view(); // Wrap up a message - let message = DataMessage::SubmitTransaction(transaction.clone(), view_number); + let message_kind: DataMessage = + DataMessage::SubmitTransaction(transaction.clone(), view_number); + let message = Message { + sender: api.public_key.clone(), + kind: MessageKind::from(message_kind), + }; + + let cert = decided_upgrade_certificate.read().await.clone(); + + let serialized_message = message + .serialize(&cert) + .map_err(|_| HotShotError::FailedToSerialize)?; async_spawn(async move { let da_membership = &api.memberships.da_membership.clone(); @@ -430,12 +450,9 @@ impl> SystemContext { .networks .da_network .broadcast_message( - Message { - sender: api.public_key.clone(), - kind: MessageKind::from(message), - }, + serialized_message, da_membership.whole_committee(view_number), - STATIC_VER_0_1, + BroadcastDelay::None, ), api .send_external_event(Event { @@ -583,8 +600,7 @@ impl> SystemContext { add_network_message_task(&mut handle, Arc::clone(&quorum_network)).await; add_network_message_task(&mut handle, Arc::clone(&da_network)).await; - if let Some(request_receiver) = da_network.spawn_request_receiver_task(STATIC_VER_0_1).await - { + if let Some(request_receiver) = da_network.spawn_request_receiver_task().await { add_response_task(&mut handle, request_receiver).await; add_request_network_task(&mut handle).await; } @@ -624,7 +640,7 @@ impl> SystemContext { network::vid_filter, ) .await; - add_consensus_tasks::(&mut handle).await; + add_consensus_tasks::(&mut handle).await; handle } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 639775e4d6..165ca013e5 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -5,7 +5,6 @@ pub mod task_state; use std::{sync::Arc, time::Duration}; -use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use hotshot_task::task::Task; #[cfg(not(feature = "dependency-tasks"))] @@ -29,8 +28,7 @@ use hotshot_task_impls::{ view_sync::ViewSyncTaskState, }; use hotshot_types::{ - constants::{Version01, VERSION_0_1}, - message::{Message, Messages}, + message::{Messages, VersionedMessage}, traits::{ network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -38,6 +36,8 @@ use hotshot_types::{ }; use vbs::version::StaticVersionType; +use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; + /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { @@ -51,7 +51,7 @@ pub enum GlobalEvent { pub async fn add_request_network_task>( handle: &mut SystemContextHandle, ) { - let state = NetworkRequestState::::create_from(handle).await; + let state = NetworkRequestState::::create_from(handle).await; let task = Task::new( state, @@ -64,7 +64,7 @@ pub async fn add_request_network_task>( handle: &mut SystemContextHandle, - request_receiver: RequestReceiver, + request_receiver: RequestReceiver, ) { let state = NetworkResponseState::::new( handle.hotshot.consensus(), @@ -73,18 +73,16 @@ pub async fn add_response_task>( handle.public_key().clone(), handle.private_key().clone(), ); - handle - .network_registry - .register(run_response_task::( - state, - handle.internal_event_stream.1.activate_cloned(), - )); + handle.network_registry.register(run_response_task::( + state, + handle.internal_event_stream.1.activate_cloned(), + )); } /// Add the network task to handle messages and publish events. pub async fn add_network_message_task< TYPES: NodeType, I: NodeImplementation, - NET: ConnectedNetwork, TYPES::SignatureKey>, + NET: ConnectedNetwork, >( handle: &mut SystemContextHandle, channel: Arc, @@ -94,12 +92,34 @@ pub async fn add_network_message_task< event_stream: handle.internal_event_stream.0.clone(), }; + let decided_upgrade_certificate = Arc::clone(&handle.hotshot.decided_upgrade_certificate); + let network = Arc::clone(&net); let mut state = network_state.clone(); let task_handle = async_spawn(async move { loop { + let decided_upgrade_certificate_lock = decided_upgrade_certificate.read().await.clone(); let msgs = match network.recv_msgs().await { - Ok(msgs) => Messages(msgs), + Ok(msgs) => { + let mut deserialized_messages = Vec::new(); + + for msg in msgs { + let deserialized_message = match VersionedMessage::deserialize( + &msg, + &decided_upgrade_certificate_lock, + ) { + Ok(deserialized) => deserialized, + Err(e) => { + tracing::error!("Failed to deserialize message: {}", e); + return; + } + }; + + deserialized_messages.push(deserialized_message); + } + + Messages(deserialized_messages) + } Err(err) => { tracing::error!("failed to receive messages: {err}"); @@ -121,7 +141,7 @@ pub async fn add_network_message_task< pub async fn add_network_event_task< TYPES: NodeType, I: NodeImplementation, - NET: ConnectedNetwork, TYPES::SignatureKey>, + NET: ConnectedNetwork, >( handle: &mut SystemContextHandle, channel: Arc, @@ -131,10 +151,10 @@ pub async fn add_network_event_task< let network_state: NetworkEventTaskState<_, _, _> = NetworkEventTaskState { channel, view: TYPES::Time::genesis(), - version: VERSION_0_1, membership, filter, storage: Arc::clone(&handle.storage()), + decided_upgrade_certificate: None, }; let task = Task::new( network_state, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index a546d6a700..f0b481a33e 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -1,10 +1,8 @@ use std::{ collections::{BTreeMap, HashMap}, - marker::PhantomData, sync::{atomic::AtomicBool, Arc}, }; -use crate::types::SystemContextHandle; use async_trait::async_trait; use chrono::Utc; #[cfg(feature = "rewind")] @@ -22,6 +20,8 @@ use hotshot_types::traits::{ }; use vbs::version::StaticVersionType; +use crate::types::SystemContextHandle; + /// Trait for creating task states. #[async_trait] pub trait CreateTaskState @@ -34,12 +34,10 @@ where } #[async_trait] -impl, V: StaticVersionType> CreateTaskState - for NetworkRequestState +impl> CreateTaskState + for NetworkRequestState { - async fn create_from( - handle: &SystemContextHandle, - ) -> NetworkRequestState { + async fn create_from(handle: &SystemContextHandle) -> NetworkRequestState { NetworkRequestState { network: Arc::clone(&handle.hotshot.networks.quorum_network), state: handle.hotshot.consensus(), @@ -49,7 +47,6 @@ impl, V: StaticVersionType> Create quorum_membership: handle.hotshot.memberships.quorum_membership.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), - _phantom: PhantomData, id: handle.hotshot.id, shutdown_flag: Arc::new(AtomicBool::new(false)), spawned_tasks: BTreeMap::new(), @@ -62,20 +59,37 @@ impl> CreateTaskState for UpgradeTaskState { async fn create_from(handle: &SystemContextHandle) -> UpgradeTaskState { - UpgradeTaskState { + #[cfg(not(feature = "example-upgrade"))] + return UpgradeTaskState { output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), - #[cfg(not(feature = "example-upgrade"))] - should_vote: |_upgrade_proposal| false, - #[cfg(feature = "example-upgrade")] - should_vote: |_upgrade_proposal| true, vote_collector: None.into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, - } + start_proposing_view: handle.hotshot.config.start_proposing_view, + stop_proposing_view: handle.hotshot.config.stop_proposing_view, + start_voting_view: handle.hotshot.config.start_voting_view, + stop_voting_view: handle.hotshot.config.stop_voting_view, + }; + + #[cfg(feature = "example-upgrade")] + return UpgradeTaskState { + output_event_stream: handle.hotshot.external_event_stream.0.clone(), + cur_view: handle.cur_view().await, + quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), + vote_collector: None.into(), + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + id: handle.hotshot.id, + start_proposing_view: 5, + stop_proposing_view: 10, + start_voting_view: 0, + stop_voting_view: 20, + }; } } @@ -207,6 +221,7 @@ impl> CreateTaskState quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), storage: Arc::clone(&handle.storage), + decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), } } } diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index 05292465ad..f47740c718 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -13,9 +13,7 @@ pub mod push_cdn_network; use custom_debug::Debug; use hotshot_types::traits::metrics::{Counter, Gauge, Metrics, NoMetrics}; -pub use hotshot_types::traits::network::{ - FailedToSerializeSnafu, NetworkError, NetworkReliability, -}; +pub use hotshot_types::traits::network::{NetworkError, NetworkReliability}; /// Contains several `NetworkingMetrics` that we're interested in from the networking interfaces #[derive(Clone, Debug)] diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index fc9aba7bb5..39ec59cd5a 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -31,16 +31,14 @@ use hotshot_types::{ COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, }, data::ViewNumber, - message::{GeneralConsensusMessage, Message, MessageKind, SequencingMessage}, traits::{ - network::{ConnectedNetwork, ResponseChannel, ResponseMessage, ViewMessage}, - node_implementation::{ConsensusTime, NodeType}, + network::{BroadcastDelay, ConnectedNetwork, ResponseChannel}, + node_implementation::NodeType, }, BoxSyncFuture, }; use lru::LruCache; use tracing::{debug, warn}; -use vbs::version::StaticVersionType; use super::{push_cdn_network::PushCdnNetwork, NetworkError}; use crate::traits::implementations::Libp2pNetwork; @@ -90,7 +88,7 @@ impl CombinedNetworks { #[must_use] pub fn new( primary_network: PushCdnNetwork, - secondary_network: Libp2pNetwork, TYPES::SignatureKey>, + secondary_network: Libp2pNetwork, delay_duration: Duration, ) -> Self { // Create networks from the ones passed in @@ -120,29 +118,17 @@ impl CombinedNetworks { /// Get a ref to the backup network #[must_use] - pub fn secondary(&self) -> &Libp2pNetwork, TYPES::SignatureKey> { + pub fn secondary(&self) -> &Libp2pNetwork { &self.networks.1 } - /// a helper function returning a bool whether a given message is of delayable type - fn should_delay(message: &Message) -> bool { - match &message.kind { - MessageKind::Consensus(consensus_message) => match &consensus_message { - SequencingMessage::General(general_consensus_message) => { - matches!(general_consensus_message, GeneralConsensusMessage::Vote(_)) - } - SequencingMessage::Da(_) => true, - }, - MessageKind::Data(_) => false, - } - } - /// a helper function to send messages through both networks (possibly delayed) async fn send_both_networks( &self, - message: Message, + _message: Vec, primary_future: impl Future> + Send + 'static, secondary_future: impl Future> + Send + 'static, + broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { // A local variable used to decide whether to delay this message or not let mut primary_failed = false; @@ -170,7 +156,7 @@ impl CombinedNetworks { primary_failed = true; }; - if !primary_failed && Self::should_delay(&message) { + if let (BroadcastDelay::View(view), false) = (broadcast_delay, primary_failed) { // We are delaying this message let duration = *self.delay_duration.read().await; let primary_down = Arc::clone(&self.primary_down); @@ -181,7 +167,7 @@ impl CombinedNetworks { .delayed_tasks_channels .write() .await - .entry(message.kind.view_number().u64()) + .entry(view) .or_insert_with(|| { let (s, r) = broadcast(1); (s, r.deactivate()) @@ -257,7 +243,7 @@ impl CombinedNetworks { #[derive(Clone)] pub struct UnderlyingCombinedNetworks( pub PushCdnNetwork, - pub Libp2pNetwork, TYPES::SignatureKey>, + pub Libp2pNetwork, ); #[cfg(feature = "hotshot-testing")] @@ -272,7 +258,7 @@ impl TestableNetworkingImplementation for CombinedNetwor secondary_network_delay: Duration, ) -> AsyncGenerator<(Arc, Arc)> { let generators = ( - as TestableNetworkingImplementation<_>>::generator( + as TestableNetworkingImplementation>::generator( expected_node_count, num_bootstrap, network_id, @@ -281,7 +267,7 @@ impl TestableNetworkingImplementation for CombinedNetwor None, Duration::default(), ), - , TYPES::SignatureKey> as TestableNetworkingImplementation<_>>::generator( + as TestableNetworkingImplementation>::generator( expected_node_count, num_bootstrap, network_id, @@ -302,15 +288,11 @@ impl TestableNetworkingImplementation for CombinedNetwor let (quorum_p2p, da_p2p) = gen1.await; let da_networks = UnderlyingCombinedNetworks( cdn.clone(), - Arc::, TYPES::SignatureKey>>::unwrap_or_clone( - da_p2p, - ), + Arc::>::unwrap_or_clone(da_p2p), ); let quorum_networks = UnderlyingCombinedNetworks( cdn, - Arc::, TYPES::SignatureKey>>::unwrap_or_clone( - quorum_p2p, - ), + Arc::>::unwrap_or_clone(quorum_p2p), ); // We want to the message cache between the two networks @@ -351,27 +333,21 @@ impl TestableNetworkingImplementation for CombinedNetwor } #[async_trait] -impl ConnectedNetwork, TYPES::SignatureKey> - for CombinedNetworks -{ - async fn request_data( +impl ConnectedNetwork for CombinedNetworks { + async fn request_data( &self, - request: Message, + request: Vec, recipient: &TYPES::SignatureKey, - bind_version: VER, - ) -> Result, NetworkError> { + ) -> Result, NetworkError> { self.secondary() - .request_data(request, recipient, bind_version) + .request_data::(request, recipient) .await } - async fn spawn_request_receiver_task( + async fn spawn_request_receiver_task( &self, - bind_version: VER, - ) -> Option, ResponseChannel>)>> { - self.secondary() - .spawn_request_receiver_task(bind_version) - .await + ) -> Option, ResponseChannel>)>> { + self.secondary().spawn_request_receiver_task().await } fn pause(&self) { @@ -400,11 +376,11 @@ impl ConnectedNetwork, TYPES::SignatureKey> boxed_sync(closure) } - async fn broadcast_message( + async fn broadcast_message( &self, - message: Message, + message: Vec, recipients: BTreeSet, - bind_version: VER, + broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { let primary = self.primary().clone(); let secondary = self.secondary().clone(); @@ -415,23 +391,24 @@ impl ConnectedNetwork, TYPES::SignatureKey> message, async move { primary - .broadcast_message(primary_message, primary_recipients, bind_version) + .broadcast_message(primary_message, primary_recipients, BroadcastDelay::None) .await }, async move { secondary - .broadcast_message(secondary_message, recipients, bind_version) + .broadcast_message(secondary_message, recipients, BroadcastDelay::None) .await }, + broadcast_delay, ) .await } - async fn da_broadcast_message( + async fn da_broadcast_message( &self, - message: Message, + message: Vec, recipients: BTreeSet, - bind_version: VER, + broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { let primary = self.primary().clone(); let secondary = self.secondary().clone(); @@ -442,23 +419,23 @@ impl ConnectedNetwork, TYPES::SignatureKey> message, async move { primary - .da_broadcast_message(primary_message, primary_recipients, bind_version) + .da_broadcast_message(primary_message, primary_recipients, BroadcastDelay::None) .await }, async move { secondary - .da_broadcast_message(secondary_message, recipients, bind_version) + .da_broadcast_message(secondary_message, recipients, BroadcastDelay::None) .await }, + broadcast_delay, ) .await } - async fn direct_message( + async fn direct_message( &self, - message: Message, + message: Vec, recipient: TYPES::SignatureKey, - bind_version: VER, ) -> Result<(), NetworkError> { let primary = self.primary().clone(); let secondary = self.secondary().clone(); @@ -469,34 +446,27 @@ impl ConnectedNetwork, TYPES::SignatureKey> message, async move { primary - .direct_message(primary_message, primary_recipient, bind_version) - .await - }, - async move { - secondary - .direct_message(secondary_message, recipient, bind_version) + .direct_message(primary_message, primary_recipient) .await }, + async move { secondary.direct_message(secondary_message, recipient).await }, + BroadcastDelay::None, ) .await } - async fn vid_broadcast_message( + async fn vid_broadcast_message( &self, - messages: HashMap>, - bind_version: VER, + messages: HashMap>, ) -> Result<(), NetworkError> { - self.networks - .0 - .vid_broadcast_message(messages, bind_version) - .await + self.networks.0.vid_broadcast_message(messages).await } /// Receive one or many messages from the underlying network. /// /// # Errors /// Does not error - async fn recv_msgs(&self) -> Result>, NetworkError> { + async fn recv_msgs(&self) -> Result>, NetworkError> { // recv on both networks because nodes may be accessible only on either. discard duplicates // TODO: improve this algorithm: https://github.com/EspressoSystems/HotShot/issues/2089 let mut primary_fut = self.primary().recv_msgs().fuse(); diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 23ee8f304d..7379d544d9 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -31,19 +31,16 @@ use futures::{ use hotshot_orchestrator::config::NetworkConfig; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{ - AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, ViewMessage, + AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, }; use hotshot_types::{ boxed_sync, - constants::{Version01, LOOK_AHEAD, STATIC_VER_0_1, VERSION_0_1}, + constants::LOOK_AHEAD, data::ViewNumber, message::{DataMessage::DataResponse, Message, MessageKind}, traits::{ election::Membership, - network::{ - self, ConnectedNetwork, FailedToSerializeSnafu, NetworkError, NetworkMsg, - ResponseMessage, - }, + network::{self, ConnectedNetwork, NetworkError, ResponseMessage}, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, @@ -65,14 +62,10 @@ use libp2p_networking::{ }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; -use snafu::ResultExt; use tracing::{debug, error, info, instrument, warn}; -use vbs::{ - version::{StaticVersionType, Version}, - BinarySerializer, Serializer, -}; use super::NetworkingMetricsValue; +use crate::BroadcastDelay; /// convenience alias for the type for bootstrap addresses /// concurrency primitives are needed for having tests @@ -96,33 +89,33 @@ pub struct Empty { byte: u8, } -impl Debug for Libp2pNetwork { +impl Debug for Libp2pNetwork { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Libp2p").field("inner", &"inner").finish() } } /// Locked Option of a receiver for moving the value out of the option -type TakeReceiver = Mutex)>>>; +type TakeReceiver = Mutex, ResponseChannel)>>>; /// Type alias for a shared collection of peerid, multiaddrs pub type PeerInfoVec = Arc>>; /// The underlying state of the libp2p network #[derive(Debug)] -struct Libp2pNetworkInner { +struct Libp2pNetworkInner { /// this node's public key pk: K, /// handle to control the network handle: Arc, /// Message Receiver - receiver: UnboundedReceiver, + receiver: UnboundedReceiver>, /// Receiver for Requests for Data, includes the request and the response chan /// Lock should only be used once to take the channel and move it into the request /// handler task - requests_rx: TakeReceiver, + requests_rx: TakeReceiver, /// Sender for broadcast messages - sender: UnboundedSender, + sender: UnboundedSender>, /// Sender for node lookup (relevant view number, key of node) (None for shutdown) node_lookup_send: UnboundedSender>, /// this is really cheating to enable local tests @@ -156,16 +149,14 @@ struct Libp2pNetworkInner { /// Networking implementation that uses libp2p /// generic over `M` which is the message type #[derive(Clone)] -pub struct Libp2pNetwork { +pub struct Libp2pNetwork { /// holds the state of the libp2p network - inner: Arc>, + inner: Arc>, } #[cfg(feature = "hotshot-testing")] impl TestableNetworkingImplementation - for Libp2pNetwork, TYPES::SignatureKey> -where - MessageKind: ViewMessage, + for Libp2pNetwork { /// Returns a boxed function `f(node_id, public_key) -> Libp2pNetwork` /// with the purpose of generating libp2p networks. @@ -336,7 +327,7 @@ pub fn derive_libp2p_peer_id( Ok(PeerId::from_public_key(&keypair.public())) } -impl Libp2pNetwork { +impl Libp2pNetwork { /// Create and return a Libp2p network from a network config file /// and various other configuration-specific values. /// @@ -462,7 +453,7 @@ impl Libp2pNetwork { #[cfg(feature = "hotshot-testing")] reliability_config: Option>, da_public_keys: BTreeSet, is_da: bool, - ) -> Result, NetworkError> { + ) -> Result, NetworkError> { // Error if there were no bootstrap nodes specified #[cfg(not(feature = "hotshot-testing"))] if bootstrap_addrs.read().await.len() == 0 { @@ -528,19 +519,15 @@ impl Libp2pNetwork { }; result.handle_event_generator(sender, requests_tx, rx); - result.spawn_node_lookup(node_lookup_recv, STATIC_VER_0_1); - result.spawn_connect(id, STATIC_VER_0_1); + result.spawn_node_lookup(node_lookup_recv); + result.spawn_connect(id); Ok(result) } /// Spawns task for looking up nodes pre-emptively #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] - fn spawn_node_lookup( - &self, - node_lookup_recv: UnboundedReceiver>, - _: Ver, - ) { + fn spawn_node_lookup(&self, node_lookup_recv: UnboundedReceiver>) { let handle = Arc::clone(&self.inner.handle); let dht_timeout = self.inner.dht_timeout; let latest_seen_view = Arc::clone(&self.inner.latest_seen_view); @@ -557,11 +544,15 @@ impl Libp2pNetwork { // only run if we are not too close to the next view number if latest_seen_view.load(Ordering::Relaxed) + THRESHOLD <= *view_number { + let pk_bytes = match bincode::serialize(&pk) { + Ok(serialized) => serialized, + Err(e) => { + tracing::error!("Failed to serialize public key; this should never happen. Error: {e}"); + return; + } + }; // look up - if let Err(err) = handle - .lookup_node::(pk.clone(), dht_timeout, Ver::instance()) - .await - { + if let Err(err) = handle.lookup_node(&pk_bytes, dht_timeout).await { error!("Failed to perform lookup for key {:?}: {}", pk, err); }; } @@ -570,7 +561,7 @@ impl Libp2pNetwork { } /// Initiates connection to the outside world - fn spawn_connect(&mut self, id: usize, bind_version: VER) { + fn spawn_connect(&mut self, id: usize) { let pk = self.inner.pk.clone(); let bootstrap_ref = Arc::clone(&self.inner.bootstrap_addrs); let handle = Arc::clone(&self.inner.handle); @@ -612,8 +603,14 @@ impl Libp2pNetwork { // we want our records published before // we begin participating in consensus + // + // Note: this serialization should never fail, + // and if it does the error is unrecoverable. while handle - .put_record(&pk, &handle.peer_id(), bind_version) + .put_record( + &bincode::serialize(&pk).unwrap(), + &bincode::serialize(&handle.peer_id()).unwrap(), + ) .await .is_err() { @@ -626,7 +623,10 @@ impl Libp2pNetwork { ); while handle - .put_record(&handle.peer_id(), &pk, bind_version) + .put_record( + &bincode::serialize(&handle.peer_id()).unwrap(), + &bincode::serialize(&pk).unwrap(), + ) .await .is_err() { @@ -659,55 +659,46 @@ impl Libp2pNetwork { }); } - /// Handle events for Version 0.1 of the protocol. - async fn handle_recvd_events_0_1( + /// Handle events + async fn handle_recvd_events( &self, msg: NetworkEvent, - sender: &UnboundedSender, - mut request_tx: Sender<(M, ResponseChannel)>, + sender: &UnboundedSender>, + mut request_tx: Sender<(Vec, ResponseChannel)>, ) -> Result<(), NetworkError> { match msg { GossipMsg(msg) => { - let result: Result = Serializer::::deserialize(&msg); - if let Ok(result) = result { - sender - .send(result) - .await - .map_err(|_| NetworkError::ChannelSend)?; - } + sender + .send(msg) + .await + .map_err(|_| NetworkError::ChannelSend)?; } DirectRequest(msg, _pid, chan) => { - let result: Result = - Serializer::::deserialize(&msg).context(FailedToSerializeSnafu); - if let Ok(result) = result { - sender - .send(result) - .await - .map_err(|_| NetworkError::ChannelSend)?; - } + sender + .send(msg) + .await + .map_err(|_| NetworkError::ChannelSend)?; if self .inner .handle - .direct_response(chan, &Empty { byte: 0u8 }, STATIC_VER_0_1) + .direct_response( + chan, + &bincode::serialize(&Empty { byte: 0u8 }) + .map_err(|e| NetworkError::Libp2p { source: e.into() })?, + ) .await .is_err() { error!("failed to ack!"); }; } - DirectResponse(msg, _) => { - let _result: Result = - Serializer::::deserialize(&msg).context(FailedToSerializeSnafu); - } + DirectResponse(_msg, _) => {} NetworkEvent::IsBootstrapped => { - error!("handle_recvd_events_0_1 received `NetworkEvent::IsBootstrapped`, which should be impossible."); + error!("handle_recvd_events received `NetworkEvent::IsBootstrapped`, which should be impossible."); } - NetworkEvent::ResponseRequested(msg, chan) => { - let reqeust = - Serializer::::deserialize(&msg.0).context(FailedToSerializeSnafu)?; - request_tx - .try_send((reqeust, chan)) - .map_err(|_| NetworkError::ChannelSend)?; + NetworkEvent::ResponseRequested(Request(msg), chan) => { + let res = request_tx.try_send((msg, chan)); + res.map_err(|_| NetworkError::ChannelSend)?; } } Ok::<(), NetworkError>(()) @@ -717,8 +708,8 @@ impl Libp2pNetwork { /// terminates on shut down of network fn handle_event_generator( &self, - sender: UnboundedSender, - request_tx: Sender<(M, ResponseChannel)>, + sender: UnboundedSender>, + request_tx: Sender<(Vec, ResponseChannel)>, mut network_rx: NetworkNodeReceiver, ) { let handle = self.clone(); @@ -741,33 +732,13 @@ impl Libp2pNetwork { NetworkEvent::IsBootstrapped => { is_bootstrapped.store(true, Ordering::Relaxed); } - GossipMsg(raw) - | DirectRequest(raw, _, _) - | DirectResponse(raw, _) - | NetworkEvent::ResponseRequested(Request(raw), _) => { - match Version::deserialize(raw) { - Ok((VERSION_0_1, _rest)) => { - let _ = handle - .handle_recvd_events_0_1( - message, - &sender, - request_tx.clone(), - ) - .await; - } - Ok((version, _)) => { - warn!( - "Received message with unsupported version: {:?}.\n\nPayload:\n\n{:?}", - version, message - ); - } - Err(e) => { - warn!( - "Error recovering version: {:?}.\n\nPayload:\n\n{:?}", - e, message - ); - } - } + GossipMsg(_) + | DirectRequest(_, _, _) + | DirectResponse(_, _) + | NetworkEvent::ResponseRequested(Request(_), _) => { + let _ = handle + .handle_recvd_events(message, &sender, request_tx.clone()) + .await; } } // re-set the `kill_switch` for the next loop @@ -790,19 +761,22 @@ impl Libp2pNetwork { } #[async_trait] -impl ConnectedNetwork for Libp2pNetwork { - async fn request_data( +impl ConnectedNetwork for Libp2pNetwork { + async fn request_data( &self, - request: M, + request: Vec, recipient: &K, - bind_version: VER, - ) -> Result, NetworkError> { + ) -> Result, NetworkError> { self.wait_for_ready().await; let pid = match self .inner .handle - .lookup_node::(recipient.clone(), self.inner.dht_timeout, bind_version) + .lookup_node( + &bincode::serialize(&recipient) + .map_err(|e| NetworkError::Libp2p { source: e.into() })?, + self.inner.dht_timeout, + ) .await { Ok(pid) => pid, @@ -817,34 +791,33 @@ impl ConnectedNetwork for Libp2p }); } }; - match self - .inner - .handle - .request_data(&request, pid, bind_version) - .await - { + let result = match self.inner.handle.request_data(&request, pid).await { Ok(response) => match response { Some(msg) => { - let res: Message = Serializer::::deserialize(&msg.0) - .map_err(|e| NetworkError::FailedToDeserialize { source: e })?; - let DataResponse(res) = (match res.kind { - MessageKind::Data(data) => data, - MessageKind::Consensus(_) => return Ok(ResponseMessage::NotFound), - }) else { - return Ok(ResponseMessage::NotFound); - }; - Ok(res) + if msg.0.len() < 8 { + return Err(NetworkError::FailedToDeserialize { + source: anyhow!("insufficient bytes"), + }); + } + let res: Message = bincode::deserialize(&msg.0) + .map_err(|e| NetworkError::FailedToDeserialize { source: e.into() })?; + + match res.kind { + MessageKind::Data(DataResponse(data)) => data, + _ => ResponseMessage::NotFound, + } } - None => Ok(ResponseMessage::NotFound), + None => ResponseMessage::NotFound, }, - Err(e) => Err(e.into()), - } + Err(e) => return Err(e.into()), + }; + + Ok(bincode::serialize(&result).map_err(|e| NetworkError::Libp2p { source: e.into() })?) } - async fn spawn_request_receiver_task( + async fn spawn_request_receiver_task( &self, - bind_version: VER, - ) -> Option)>> { + ) -> Option, network::ResponseChannel>)>> { let mut internal_rx = self.inner.requests_rx.lock().await.take()?; let handle = Arc::clone(&self.inner.handle); let (mut tx, rx) = mpsc::channel(100); @@ -852,7 +825,12 @@ impl ConnectedNetwork for Libp2p while let Some((request, chan)) = internal_rx.next().await { let (response_tx, response_rx) = futures::channel::oneshot::channel(); if tx - .try_send((request, network::ResponseChannel(response_tx))) + .try_send(( + request, + network::ResponseChannel { + sender: response_tx, + }, + )) .is_err() { continue; @@ -860,7 +838,8 @@ impl ConnectedNetwork for Libp2p let Ok(response) = response_rx.await else { continue; }; - let _ = handle.respond_data(&response, chan, bind_version).await; + + let _ = handle.respond_data(response, chan).await; } }); @@ -894,11 +873,11 @@ impl ConnectedNetwork for Libp2p } #[instrument(name = "Libp2pNetwork::broadcast_message", skip_all)] - async fn broadcast_message( + async fn broadcast_message( &self, - message: M, + message: Vec, recipients: BTreeSet, - bind_version: VER, + _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { self.wait_for_ready().await; info!( @@ -933,10 +912,8 @@ impl ConnectedNetwork for Libp2p if let Some(ref config) = &self.inner.reliability_config { let handle = Arc::clone(&self.inner.handle); - let serialized_msg = - Serializer::::serialize(&message).context(FailedToSerializeSnafu)?; let fut = config.clone().chaos_send_msg( - serialized_msg, + message, Arc::new(move |msg: Vec| { let topic_2 = topic.clone(); let handle_2 = Arc::clone(&handle); @@ -959,12 +936,7 @@ impl ConnectedNetwork for Libp2p } } - match self - .inner - .handle - .gossip(topic, &message, bind_version) - .await - { + match self.inner.handle.gossip(topic, &message).await { Ok(()) => { self.inner.metrics.outgoing_broadcast_message_count.add(1); Ok(()) @@ -977,15 +949,15 @@ impl ConnectedNetwork for Libp2p } #[instrument(name = "Libp2pNetwork::da_broadcast_message", skip_all)] - async fn da_broadcast_message( + async fn da_broadcast_message( &self, - message: M, + message: Vec, recipients: BTreeSet, - bind_version: VER, + _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { let future_results = recipients .into_iter() - .map(|r| self.direct_message(message.clone(), r, bind_version)); + .map(|r| self.direct_message(message.clone(), r)); let results = join_all(future_results).await; let errors: Vec<_> = results @@ -1004,12 +976,7 @@ impl ConnectedNetwork for Libp2p } #[instrument(name = "Libp2pNetwork::direct_message", skip_all)] - async fn direct_message( - &self, - message: M, - recipient: K, - bind_version: VER, - ) -> Result<(), NetworkError> { + async fn direct_message(&self, message: Vec, recipient: K) -> Result<(), NetworkError> { // short circuit if we're dming ourselves if recipient == self.inner.pk { // panic if we already shut down? @@ -1026,7 +993,11 @@ impl ConnectedNetwork for Libp2p let pid = match self .inner .handle - .lookup_node::(recipient.clone(), self.inner.dht_timeout, bind_version) + .lookup_node( + &bincode::serialize(&recipient) + .map_err(|e| NetworkError::Libp2p { source: e.into() })?, + self.inner.dht_timeout, + ) .await { Ok(pid) => pid, @@ -1048,10 +1019,8 @@ impl ConnectedNetwork for Libp2p if let Some(ref config) = &self.inner.reliability_config { let handle = Arc::clone(&self.inner.handle); - let serialized_msg = - Serializer::::serialize(&message).context(FailedToSerializeSnafu)?; let fut = config.clone().chaos_send_msg( - serialized_msg, + message, Arc::new(move |msg: Vec| { let handle_2 = Arc::clone(&handle); let metrics_2 = metrics.clone(); @@ -1073,12 +1042,7 @@ impl ConnectedNetwork for Libp2p } } - match self - .inner - .handle - .direct_request(pid, &message, bind_version) - .await - { + match self.inner.handle.direct_request(pid, &message).await { Ok(()) => Ok(()), Err(e) => Err(e.into()), } @@ -1089,7 +1053,7 @@ impl ConnectedNetwork for Libp2p /// # Errors /// If there is a network-related failure. #[instrument(name = "Libp2pNetwork::recv_msgs", skip_all)] - async fn recv_msgs(&self) -> Result, NetworkError> { + async fn recv_msgs(&self) -> Result>, NetworkError> { let result = self .inner .receiver diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 6d5f5097b2..2c922a2c0a 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -23,39 +23,37 @@ use dashmap::DashMap; use futures::StreamExt; use hotshot_types::{ boxed_sync, - constants::Version01, - message::Message, traits::{ - network::{AsyncGenerator, ConnectedNetwork, NetworkMsg, TestableNetworkingImplementation}, + network::{ + AsyncGenerator, BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation, + }, node_implementation::NodeType, signature_key::SignatureKey, }, BoxSyncFuture, }; use rand::Rng; -use snafu::ResultExt; use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; -use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; -use super::{FailedToSerializeSnafu, NetworkError, NetworkReliability, NetworkingMetricsValue}; +use super::{NetworkError, NetworkReliability, NetworkingMetricsValue}; /// Shared state for in-memory mock networking. /// /// This type is responsible for keeping track of the channels to each [`MemoryNetwork`], and is /// used to group the [`MemoryNetwork`] instances. #[derive(custom_debug::Debug)] -pub struct MasterMap { +pub struct MasterMap { /// The list of `MemoryNetwork`s #[debug(skip)] - map: DashMap>, + map: DashMap>, /// The id of this `MemoryNetwork` cluster id: u64, } -impl MasterMap { +impl MasterMap { /// Create a new, empty, `MasterMap` #[must_use] - pub fn new() -> Arc> { + pub fn new() -> Arc> { Arc::new(MasterMap { map: DashMap::new(), id: rand::thread_rng().gen(), @@ -65,13 +63,13 @@ impl MasterMap { /// Internal state for a `MemoryNetwork` instance #[derive(Debug)] -struct MemoryNetworkInner { +struct MemoryNetworkInner { /// Input for messages input: RwLock>>>, /// Output for messages - output: Mutex>, + output: Mutex>>, /// The master map - master_map: Arc>, + master_map: Arc>, /// Count of messages that are in-flight (send but not processed yet) in_flight_message_count: AtomicUsize, @@ -91,12 +89,12 @@ struct MemoryNetworkInner { /// Under the hood, this simply maintains mpmc channels to every other `MemoryNetwork` insane of the /// same group. #[derive(Clone)] -pub struct MemoryNetwork { +pub struct MemoryNetwork { /// The actual internal state - inner: Arc>, + inner: Arc>, } -impl Debug for MemoryNetwork { +impl Debug for MemoryNetwork { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("MemoryNetwork") .field("inner", &"inner") @@ -104,15 +102,15 @@ impl Debug for MemoryNetwork { } } -impl MemoryNetwork { +impl MemoryNetwork { /// Creates a new `MemoryNetwork` and hooks it up to the group through the provided `MasterMap` #[instrument(skip(metrics))] pub fn new( pub_key: K, metrics: NetworkingMetricsValue, - master_map: Arc>, + master_map: Arc>, reliability_config: Option>, - ) -> MemoryNetwork { + ) -> MemoryNetwork { info!("Attaching new MemoryNetwork"); let (input, task_recv) = bounded(128); let (task_send, output) = bounded(128); @@ -127,20 +125,12 @@ impl MemoryNetwork { while let Some(vec) = task_stream.next().await { trace!(?vec, "Incoming message"); // Attempt to decode message - let x = Serializer::::deserialize(&vec); - match x { - Ok(x) => { - let ts = task_send.clone(); - let res = ts.send(x).await; - if res.is_ok() { - trace!("Passed message to output queue"); - } else { - error!("Output queue receivers are shutdown"); - } - } - Err(e) => { - warn!(?e, "Failed to decode incoming message, skipping"); - } + let ts = task_send.clone(); + let res = ts.send(vec).await; + if res.is_ok() { + trace!("Passed message to output queue"); + } else { + error!("Output queue receivers are shutdown"); } warn!("Stream shutdown"); } @@ -181,7 +171,7 @@ impl MemoryNetwork { } impl TestableNetworkingImplementation - for MemoryNetwork, TYPES::SignatureKey> + for MemoryNetwork { fn generator( _expected_node_count: usize, @@ -214,7 +204,7 @@ impl TestableNetworkingImplementation // TODO instrument these functions #[async_trait] -impl ConnectedNetwork for MemoryNetwork { +impl ConnectedNetwork for MemoryNetwork { #[instrument(name = "MemoryNetwork::ready_blocking")] async fn wait_for_ready(&self) {} @@ -239,16 +229,13 @@ impl ConnectedNetwork for Memory } #[instrument(name = "MemoryNetwork::broadcast_message")] - async fn broadcast_message( + async fn broadcast_message( &self, - message: M, + message: Vec, recipients: BTreeSet, - _: VER, + _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { trace!(?message, "Broadcasting message"); - // Bincode the message - let vec = Serializer::::serialize(&message).context(FailedToSerializeSnafu)?; - trace!("Message bincoded, sending"); for node in &self.inner.master_map.map { // TODO delay/drop etc here let (key, node) = node.pair(); @@ -260,7 +247,7 @@ impl ConnectedNetwork for Memory { let node2 = node.clone(); let fut = config.chaos_send_msg( - vec.clone(), + message.clone(), Arc::new(move |msg: Vec| { let node3 = (node2).clone(); boxed_sync(async move { @@ -273,7 +260,7 @@ impl ConnectedNetwork for Memory async_spawn(fut); } } else { - let res = node.input(vec.clone()).await; + let res = node.input(message.clone()).await; match res { Ok(()) => { self.inner.metrics.outgoing_broadcast_message_count.add(1); @@ -290,33 +277,27 @@ impl ConnectedNetwork for Memory } #[instrument(name = "MemoryNetwork::da_broadcast_message")] - async fn da_broadcast_message( + async fn da_broadcast_message( &self, - message: M, + message: Vec, recipients: BTreeSet, - bind_version: VER, + broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - self.broadcast_message(message, recipients, bind_version) + self.broadcast_message(message, recipients, broadcast_delay) .await } #[instrument(name = "MemoryNetwork::direct_message")] - async fn direct_message( - &self, - message: M, - recipient: K, - _: VER, - ) -> Result<(), NetworkError> { + async fn direct_message(&self, message: Vec, recipient: K) -> Result<(), NetworkError> { // debug!(?message, ?recipient, "Sending direct message"); // Bincode the message - let vec = Serializer::::serialize(&message).context(FailedToSerializeSnafu)?; trace!("Message bincoded, finding recipient"); if let Some(node) = self.inner.master_map.map.get(&recipient) { let node = node.value().clone(); if let Some(ref config) = &self.inner.reliability_config { { let fut = config.chaos_send_msg( - vec.clone(), + message.clone(), Arc::new(move |msg: Vec| { let node2 = node.clone(); boxed_sync(async move { @@ -330,7 +311,7 @@ impl ConnectedNetwork for Memory } Ok(()) } else { - let res = node.input(vec).await; + let res = node.input(message).await; match res { Ok(()) => { self.inner.metrics.outgoing_direct_message_count.add(1); @@ -359,7 +340,7 @@ impl ConnectedNetwork for Memory /// # Errors /// If the other side of the channel is closed #[instrument(name = "MemoryNetwork::recv_msgs", skip_all)] - async fn recv_msgs(&self) -> Result, NetworkError> { + async fn recv_msgs(&self) -> Result>, NetworkError> { let ret = self .inner .output diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 11e020d224..637b17d8b9 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -33,11 +33,9 @@ use hotshot_types::traits::network::{ }; use hotshot_types::{ boxed_sync, - constants::{Version01, VERSION_0_1}, data::ViewNumber, - message::Message, traits::{ - network::{ConnectedNetwork, PushCdnNetworkError}, + network::{BroadcastDelay, ConnectedNetwork, PushCdnNetworkError}, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -47,11 +45,7 @@ use hotshot_types::{ use num_enum::{IntoPrimitive, TryFromPrimitive}; #[cfg(feature = "hotshot-testing")] use rand::{rngs::StdRng, RngCore, SeedableRng}; -use tracing::{error, warn}; -use vbs::{ - version::{StaticVersionType, Version}, - BinarySerializer, Serializer, -}; +use tracing::error; use super::NetworkError; @@ -206,32 +200,18 @@ impl PushCdnNetwork { /// # Errors /// - If we fail to serialize the message /// - If we fail to send the broadcast message. - async fn broadcast_message( - &self, - message: Message, - topic: Topic, - _: Ver, - ) -> Result<(), NetworkError> { + async fn broadcast_message(&self, message: Vec, topic: Topic) -> Result<(), NetworkError> { // If we're paused, don't send the message #[cfg(feature = "hotshot-testing")] if self.is_paused.load(Ordering::Relaxed) { return Ok(()); } - // Bincode the message - let serialized_message = match Serializer::::serialize(&message) { - Ok(serialized) => serialized, - Err(e) => { - warn!("Failed to serialize message: {}", e); - return Err(NetworkError::FailedToSerialize { source: e }); - } - }; - // Send the message // TODO: check if we need to print this error if self .client - .send_broadcast_message(vec![topic as u8], serialized_message) + .send_broadcast_message(vec![topic as u8], message) .await .is_err() { @@ -409,9 +389,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork } #[async_trait] -impl ConnectedNetwork, TYPES::SignatureKey> - for PushCdnNetwork -{ +impl ConnectedNetwork for PushCdnNetwork { /// Pause sending and receiving on the PushCDN network. fn pause(&self) { #[cfg(feature = "hotshot-testing")] @@ -443,14 +421,13 @@ impl ConnectedNetwork, TYPES::SignatureKey> /// # Errors /// - If we fail to serialize the message /// - If we fail to send the broadcast message. - async fn broadcast_message( + async fn broadcast_message( &self, - message: Message, + message: Vec, _recipients: BTreeSet, - bind_version: Ver, + _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - self.broadcast_message(message, Topic::Global, bind_version) - .await + self.broadcast_message(message, Topic::Global).await } /// Broadcast a message to all members of the DA committee. @@ -458,25 +435,23 @@ impl ConnectedNetwork, TYPES::SignatureKey> /// # Errors /// - If we fail to serialize the message /// - If we fail to send the broadcast message. - async fn da_broadcast_message( + async fn da_broadcast_message( &self, - message: Message, + message: Vec, _recipients: BTreeSet, - bind_version: Ver, + _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - self.broadcast_message(message, Topic::Da, bind_version) - .await + self.broadcast_message(message, Topic::Da).await } /// Send a direct message to a node with a particular key. Does not retry. /// /// - If we fail to serialize the message /// - If we fail to send the direct message - async fn direct_message( + async fn direct_message( &self, - message: Message, + message: Vec, recipient: TYPES::SignatureKey, - _: Ver, ) -> Result<(), NetworkError> { // If we're paused, don't send the message #[cfg(feature = "hotshot-testing")] @@ -484,20 +459,11 @@ impl ConnectedNetwork, TYPES::SignatureKey> return Ok(()); } - // Bincode the message - let serialized_message = match Serializer::::serialize(&message) { - Ok(serialized) => serialized, - Err(e) => { - warn!("Failed to serialize message: {}", e); - return Err(NetworkError::FailedToSerialize { source: e }); - } - }; - // Send the message // TODO: check if we need to print this error if self .client - .send_direct_message(&WrappedSignatureKey(recipient), serialized_message) + .send_direct_message(&WrappedSignatureKey(recipient), message) .await .is_err() { @@ -512,7 +478,7 @@ impl ConnectedNetwork, TYPES::SignatureKey> /// /// # Errors /// - If we fail to receive messages. Will trigger a retry automatically. - async fn recv_msgs(&self) -> Result>, NetworkError> { + async fn recv_msgs(&self) -> Result>, NetworkError> { // Receive a message let message = self.client.receive_message().await; @@ -543,24 +509,7 @@ impl ConnectedNetwork, TYPES::SignatureKey> return Ok(vec![]); }; - let message_version = Version::deserialize(&message) - .map_err(|e| NetworkError::FailedToDeserialize { source: e })?; - if message_version.0 == VERSION_0_1 { - let result: Message = Serializer::::deserialize(&message) - .map_err(|e| NetworkError::FailedToDeserialize { source: e })?; - - // Deserialize it - // Return it - Ok(vec![result]) - } else { - Err(NetworkError::FailedToDeserialize { - source: anyhow::format_err!( - "version mismatch, expected {}, got {}", - VERSION_0_1, - message_version.0 - ), - }) - } + Ok(vec![message]) } /// Do nothing here, as we don't need to look up nodes. diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index e092238282..f68c4f549a 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -135,7 +135,9 @@ impl + 'static> SystemContextHandl &self, tx: TYPES::Transaction, ) -> Result<(), HotShotError> { - self.hotshot.publish_transaction_async(tx).await + self.hotshot + .publish_transaction_async(tx, Arc::clone(&self.hotshot.decided_upgrade_certificate)) + .await } /// Get the underlying consensus state for this [`SystemContext`] diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index b982b437dd..f7e5083823 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -17,6 +17,7 @@ async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } blake3 = { workspace = true } +bincode = { workspace = true } custom_debug = { workspace = true } derive_builder = "0.20" either = { workspace = true } @@ -33,7 +34,6 @@ tide = { version = "0.16", optional = true, default-features = false, features = "h1-server", ] } tracing = { workspace = true } -vbs = { workspace = true } void = "1" lazy_static = { workspace = true } diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 88cb63b834..8437e20e02 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -12,10 +12,8 @@ use futures::channel::oneshot; use hotshot_types::traits::network::NetworkError as HotshotNetworkError; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; -use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; use tracing::{debug, info, instrument}; -use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; use crate::network::{ behaviours::request_response::{Request, Response}, @@ -184,16 +182,14 @@ impl NetworkNodeHandle { /// /// Will return a networking error if the channel closes before the result /// can be sent back - pub async fn request_data( + pub async fn request_data( &self, - request: &impl Serialize, + request: &[u8], peer: PeerId, - _: VER, ) -> Result, NetworkNodeHandleError> { let (tx, rx) = oneshot::channel(); - let serialized_msg = Serializer::::serialize(request).context(SerializationSnafu)?; let req = ClientRequest::DataRequest { - request: Request(serialized_msg), + request: Request(request.to_vec()), peer, chan: tx, }; @@ -206,15 +202,13 @@ impl NetworkNodeHandle { /// Send a response to a request with the response channel /// # Errors /// Will error if the client request channel is closed, or serialization fails. - pub async fn respond_data( + pub async fn respond_data( &self, - response: &impl Serialize, + response: Vec, chan: ResponseChannel, - _: VER, ) -> Result<(), NetworkNodeHandleError> { - let serialized_msg = Serializer::::serialize(response).context(SerializationSnafu)?; let req = ClientRequest::DataResponse { - response: Response(serialized_msg), + response: Response(response), chan, }; self.send_request(req).await @@ -234,37 +228,30 @@ impl NetworkNodeHandle { /// Looks up a node's `PeerId` and attempts to validate routing /// # Errors /// if the peer was unable to be looked up (did not provide a response, DNE) - pub async fn lookup_node Deserialize<'a> + Serialize, VER: StaticVersionType>( + pub async fn lookup_node( &self, - key: V, + key: &[u8], dht_timeout: Duration, - bind_version: VER, ) -> Result { // get record (from DHT) - let pid = self - .record_timeout::(&key, dht_timeout, bind_version) - .await?; + let pid = self.record_timeout(key, dht_timeout).await?; // pid lookup for routing // self.lookup_pid(pid).await?; - Ok(pid) + bincode::deserialize(&pid) + .map_err(|e| NetworkNodeHandleError::DeserializationError { source: e.into() }) } /// Insert a record into the kademlia DHT /// # Errors /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key or value - pub async fn put_record( - &self, - key: &impl Serialize, - value: &impl Serialize, - _: VER, - ) -> Result<(), NetworkNodeHandleError> { + pub async fn put_record(&self, key: &[u8], value: &[u8]) -> Result<(), NetworkNodeHandleError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::PutDHT { - key: Serializer::::serialize(key).context(SerializationSnafu)?, - value: Serializer::::serialize(value).context(SerializationSnafu)?, + key: key.to_vec(), + value: value.to_vec(), notify: s, }; @@ -279,22 +266,21 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value - pub async fn record Deserialize<'a>, VER: StaticVersionType>( + pub async fn record( &self, - key: &impl Serialize, + key: &[u8], retry_count: u8, - _: VER, - ) -> Result { + ) -> Result, NetworkNodeHandleError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetDHT { - key: Serializer::::serialize(key).context(SerializationSnafu)?, + key: key.to_vec(), notify: s, retry_count, }; self.send_request(req).await?; match r.await.context(CancelledRequestSnafu) { - Ok(result) => Serializer::::deserialize(&result).context(DeserializationSnafu), + Ok(result) => Ok(result), Err(e) => Err(e).context(DHTSnafu), } } @@ -305,13 +291,12 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::TimeoutError`] when times out /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value - pub async fn record_timeout Deserialize<'a>, VER: StaticVersionType>( + pub async fn record_timeout( &self, - key: &impl Serialize, + key: &[u8], timeout: Duration, - bind_version: VER, - ) -> Result { - let result = async_timeout(timeout, self.record(key, 3, bind_version)).await; + ) -> Result, NetworkNodeHandleError> { + let result = async_timeout(timeout, self.record(key, 3)).await; match result { Err(e) => Err(e).context(TimeoutSnafu), Ok(r) => r, @@ -324,14 +309,13 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::TimeoutError`] when times out /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key or value /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - pub async fn put_record_timeout( + pub async fn put_record_timeout( &self, - key: &impl Serialize, - value: &impl Serialize, + key: &[u8], + value: &[u8], timeout: Duration, - bind_version: VER, ) -> Result<(), NetworkNodeHandleError> { - let result = async_timeout(timeout, self.put_record(key, value, bind_version)).await; + let result = async_timeout(timeout, self.put_record(key, value)).await; match result { Err(e) => Err(e).context(TimeoutSnafu), Ok(r) => r, @@ -371,14 +355,12 @@ impl NetworkNodeHandle { /// # Errors /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` - pub async fn direct_request( + pub async fn direct_request( &self, pid: PeerId, - msg: &impl Serialize, - _: VER, + msg: &[u8], ) -> Result<(), NetworkNodeHandleError> { - let serialized_msg = Serializer::::serialize(msg).context(SerializationSnafu)?; - self.direct_request_no_serialize(pid, serialized_msg).await + self.direct_request_no_serialize(pid, msg.to_vec()).await } /// Make a direct request to `peer_id` containing `msg` without serializing @@ -402,14 +384,12 @@ impl NetworkNodeHandle { /// # Errors /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` - pub async fn direct_response( + pub async fn direct_response( &self, chan: ResponseChannel>, - msg: &impl Serialize, - _: VER, + msg: &[u8], ) -> Result<(), NetworkNodeHandleError> { - let serialized_msg = Serializer::::serialize(msg).context(SerializationSnafu)?; - let req = ClientRequest::DirectResponse(chan, serialized_msg); + let req = ClientRequest::DirectResponse(chan, msg.to_vec()); self.send_request(req).await } @@ -429,14 +409,8 @@ impl NetworkNodeHandle { /// # Errors /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` - pub async fn gossip( - &self, - topic: String, - msg: &impl Serialize, - _: VER, - ) -> Result<(), NetworkNodeHandleError> { - let serialized_msg = Serializer::::serialize(msg).context(SerializationSnafu)?; - self.gossip_no_serialize(topic, serialized_msg).await + pub async fn gossip(&self, topic: String, msg: &[u8]) -> Result<(), NetworkNodeHandleError> { + self.gossip_no_serialize(topic, msg.to_vec()).await } /// Gossip a message to peers without serializing diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index d51530b0e2..a06eecdd2b 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -8,7 +8,6 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::prelude::StreamExt; use common::{test_bed, HandleSnafu, HandleWithState, TestError}; -use hotshot_types::constants::{Version01, STATIC_VER_0_1}; use libp2p_networking::network::{NetworkEvent, NetworkNodeHandleError}; use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; @@ -16,7 +15,6 @@ use snafu::ResultExt; #[cfg(async_executor_impl = "tokio")] use tokio_stream::StreamExt; use tracing::{debug, error, info, instrument, warn}; -use vbs::{BinarySerializer, Serializer}; use crate::common::print_connections; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] @@ -75,7 +73,7 @@ pub async fn counter_handle_network_event( match event { IsBootstrapped | NetworkEvent::ResponseRequested(..) => {} GossipMsg(m) | DirectResponse(m, _) => { - if let Ok(msg) = Serializer::::deserialize::(&m) { + if let Ok(msg) = bincode::deserialize::(&m) { match msg { // direct message only MyCounterIs(c) => { @@ -100,7 +98,7 @@ pub async fn counter_handle_network_event( } } DirectRequest(m, _, chan) => { - if let Ok(msg) = Serializer::::deserialize::(&m) { + if let Ok(msg) = bincode::deserialize::(&m) { match msg { // direct message request IncrementCounter { from, to, .. } => { @@ -114,7 +112,10 @@ pub async fn counter_handle_network_event( .await; handle .handle - .direct_response(chan, &CounterMessage::Noop, STATIC_VER_0_1) + .direct_response( + chan, + &bincode::serialize(&CounterMessage::Noop).unwrap(), + ) .await?; } // direct message response @@ -122,19 +123,25 @@ pub async fn counter_handle_network_event( let response = MyCounterIs(handle.state.copied().await); handle .handle - .direct_response(chan, &response, STATIC_VER_0_1) + .direct_response(chan, &bincode::serialize(&response).unwrap()) .await?; } MyCounterIs(_) => { handle .handle - .direct_response(chan, &CounterMessage::Noop, STATIC_VER_0_1) + .direct_response( + chan, + &bincode::serialize(&CounterMessage::Noop).unwrap(), + ) .await?; } Noop => { handle .handle - .direct_response(chan, &CounterMessage::Noop, STATIC_VER_0_1) + .direct_response( + chan, + &bincode::serialize(&CounterMessage::Noop).unwrap(), + ) .await?; } } @@ -175,7 +182,7 @@ async fn run_request_response_increment<'a>( std::process::exit(-1)}, } requester_handle.handle - .direct_request(requestee_pid, &CounterMessage::AskForCounter, STATIC_VER_0_1) + .direct_request(requestee_pid, &bincode::serialize(&CounterMessage::AskForCounter).unwrap()) .await .context(HandleSnafu)?; match stream.next().await.unwrap() { @@ -245,7 +252,7 @@ async fn run_gossip_round( msg_handle .handle - .gossip("global".to_string(), &msg, STATIC_VER_0_1) + .gossip("global".to_string(), &bincode::serialize(&msg).unwrap()) .await .context(HandleSnafu)?; @@ -359,18 +366,12 @@ async fn run_dht_rounds( value.push(inc_val); // put the key - msg_handle - .handle - .put_record(&key, &value, STATIC_VER_0_1) - .await - .unwrap(); + msg_handle.handle.put_record(&key, &value).await.unwrap(); // get the key from the other nodes for handle in handles { - let result: Result, NetworkNodeHandleError> = handle - .handle - .record_timeout(&key, timeout, STATIC_VER_0_1) - .await; + let result: Result, NetworkNodeHandleError> = + handle.handle.record_timeout(&key, timeout).await; match result { Err(e) => { error!("DHT error {e:?} during GET"); diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index f6f68b2e19..eb3ed0703d 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -108,3 +108,9 @@ nanos = 200_000_000 [config.builder_timeout] secs = 2 nanos = 0 + +[config.upgrade] +start_proposing_view = 0 +stop_proposing_view = 0 +start_voting_view = 0 +stop_voting_view = 0 diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 5f42d57712..3511c943ea 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -4,7 +4,7 @@ use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; use hotshot_types::{ - constants::Version01, traits::signature_key::SignatureKey, PeerConfig, ValidatorConfig, + constants::Base, traits::signature_key::SignatureKey, PeerConfig, ValidatorConfig, }; use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; @@ -211,7 +211,7 @@ impl OrchestratorClient { // Serialize our (possible) libp2p-specific data let request_body = - vbs::Serializer::::serialize(&(libp2p_address, libp2p_public_key))?; + vbs::Serializer::::serialize(&(libp2p_address, libp2p_public_key))?; let identity = |client: Client| { // We need to clone here to move it into the closure @@ -325,7 +325,7 @@ impl OrchestratorClient { // Serialize our (possible) libp2p-specific data let request_body = - vbs::Serializer::::serialize(&(pubkey, libp2p_address, libp2p_public_key)) + vbs::Serializer::::serialize(&(pubkey, libp2p_address, libp2p_public_key)) .expect("failed to serialize request"); // register our public key with the orchestrator diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index b8291b792d..37e6fff2e1 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -596,6 +596,35 @@ pub struct HotShotConfigFile { /// Builder API base URL #[serde(default = "default_builder_url")] pub builder_url: Url, + /// Upgrade config + pub upgrade: UpgradeConfig, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(bound(deserialize = ""))] +/// Holds configuration for the upgrade task. +pub struct UpgradeConfig { + /// View to start proposing an upgrade + pub start_proposing_view: u64, + /// View to stop proposing an upgrade. To prevent proposing an upgrade, set stop_proposing_view <= start_proposing_view. + pub stop_proposing_view: u64, + /// View to start voting on an upgrade + pub start_voting_view: u64, + /// View to stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_view <= start_voting_view. + pub stop_voting_view: u64, +} + +// Explicitly implementing `Default` for clarity. +#[allow(clippy::derivable_impls)] +impl Default for UpgradeConfig { + fn default() -> Self { + UpgradeConfig { + start_proposing_view: u64::MAX, + stop_proposing_view: 0, + start_voting_view: u64::MAX, + stop_voting_view: 0, + } + } } /// Holds configuration for a validator node @@ -675,6 +704,10 @@ impl From> for HotShotConfig { builder_timeout: val.builder_timeout, data_request_delay: val.data_request_delay, builder_url: val.builder_url, + start_proposing_view: val.upgrade.start_proposing_view, + stop_proposing_view: val.upgrade.stop_proposing_view, + start_voting_view: val.upgrade.start_voting_view, + stop_voting_view: val.upgrade.stop_voting_view, } } } @@ -744,6 +777,7 @@ impl Default for HotShotConfigFile { builder_timeout: Duration::from_secs(10), data_request_delay: Duration::from_millis(200), builder_url: default_builder_url(), + upgrade: UpgradeConfig::default(), } } } diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index ace6e9be6f..17db9f0526 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -11,7 +11,7 @@ use async_lock::RwLock; use client::{BenchResults, BenchResultsDownloadConfig}; use csv::Writer; use futures::FutureExt; -use hotshot_types::{constants::Version01, traits::signature_key::SignatureKey, PeerConfig}; +use hotshot_types::{constants::Base, traits::signature_key::SignatureKey, PeerConfig}; use libp2p::{ identity::{ ed25519::{Keypair as EdKeypair, SecretKey}, @@ -518,7 +518,7 @@ where // Decode the libp2p data so we can add to our bootstrap nodes (if supplied) let Ok((libp2p_address, libp2p_public_key)) = - vbs::Serializer::::deserialize(&body_bytes) + vbs::Serializer::::deserialize(&body_bytes) else { return Err(ServerError { status: tide_disco::StatusCode::BAD_REQUEST, @@ -550,7 +550,7 @@ where // Decode the libp2p data so we can add to our bootstrap nodes (if supplied) let Ok((mut pubkey, libp2p_address, libp2p_public_key)) = - vbs::Serializer::::deserialize(&body_bytes) + vbs::Serializer::::deserialize(&body_bytes) else { return Err(ServerError { status: tide_disco::StatusCode::BAD_REQUEST, diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 35caeeb308..97cc75b2aa 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -1,11 +1,12 @@ #[cfg(not(feature = "dependency-tasks"))] -use super::ConsensusTaskState; +use core::time::Duration; #[cfg(not(feature = "dependency-tasks"))] -use crate::{ - consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, - helpers::AnyhowTracing, +use std::marker::PhantomData; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, }; -use crate::{events::HotShotEvent, helpers::broadcast_event}; + #[cfg(not(feature = "dependency-tasks"))] use anyhow::bail; use anyhow::{ensure, Context, Result}; @@ -20,8 +21,6 @@ use async_std::task::JoinHandle; use chrono::Utc; use committable::{Commitment, Committable}; #[cfg(not(feature = "dependency-tasks"))] -use core::time::Duration; -#[cfg(not(feature = "dependency-tasks"))] use futures::FutureExt; #[cfg(not(feature = "dependency-tasks"))] use hotshot_types::{ @@ -46,12 +45,6 @@ use hotshot_types::{ }; #[cfg(not(feature = "dependency-tasks"))] use hotshot_types::{data::null_block, message::GeneralConsensusMessage, simple_vote::QuorumData}; -#[cfg(not(feature = "dependency-tasks"))] -use std::marker::PhantomData; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; #[cfg(not(feature = "dependency-tasks"))] @@ -60,6 +53,15 @@ use tracing::{debug, info, warn}; #[cfg(not(feature = "dependency-tasks"))] use vbs::version::Version; +#[cfg(not(feature = "dependency-tasks"))] +use super::ConsensusTaskState; +#[cfg(not(feature = "dependency-tasks"))] +use crate::{ + consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, + helpers::AnyhowTracing, +}; +use crate::{events::HotShotEvent, helpers::broadcast_event}; + /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. /// @@ -890,10 +892,7 @@ pub async fn decide_from_proposal( if cert.data.decide_by < view_number { warn!("Failed to decide an upgrade certificate in time. Ignoring."); } else { - info!( - "Updating consensus state with decided upgrade certificate: {:?}", - cert - ); + info!("Reached decide on upgrade certificate: {:?}", cert); res.decided_upgrade_cert = Some(cert.clone()); } } @@ -966,6 +965,10 @@ pub async fn handle_quorum_proposal_validated> { /// This node's storage ref pub storage: Arc>, + + /// an upgrade certificate that has been decided on, if any + pub decided_upgrade_certificate: Arc>>>, } impl> ConsensusTaskState { diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index 5796d0e862..edc25e1965 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -1,3 +1,4 @@ +use core::time::Duration; use std::sync::Arc; use anyhow::{ensure, Result}; @@ -7,7 +8,6 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use chrono::Utc; -use core::time::Duration; use hotshot_types::{ consensus::Consensus, event::{Event, EventType}, diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 53389f825b..2aca0cb49e 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -7,23 +7,22 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - constants::{BASE_VERSION, STATIC_VER_0_1}, data::{VidDisperse, VidDisperseShare}, event::HotShotAction, message::{ DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, - SequencingMessage, + SequencingMessage, VersionedMessage, }, + simple_certificate::UpgradeCertificate, traits::{ election::Membership, - network::{ConnectedNetwork, TransmitType, ViewMessage}, + network::{BroadcastDelay, ConnectedNetwork, TransmitType, ViewMessage}, node_implementation::{ConsensusTime, NodeType}, storage::Storage, }, vote::{HasViewNumber, Vote}, }; -use tracing::{debug, error, instrument, warn}; -use vbs::version::Version; +use tracing::{error, instrument, warn}; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -38,7 +37,7 @@ pub fn quorum_filter(event: &Arc>) -> bool | HotShotEvent::QuorumVoteSend(_) | HotShotEvent::DacSend(_, _) | HotShotEvent::TimeoutVoteSend(_) - | HotShotEvent::VersionUpgrade(_) + | HotShotEvent::UpgradeDecided(_) | HotShotEvent::ViewChange(_) ) } @@ -49,7 +48,7 @@ pub fn upgrade_filter(event: &Arc>) -> bool event.as_ref(), HotShotEvent::UpgradeProposalSend(_, _) | HotShotEvent::UpgradeVoteSend(_) - | HotShotEvent::VersionUpgrade(_) + | HotShotEvent::UpgradeDecided(_) | HotShotEvent::ViewChange(_) ) } @@ -60,7 +59,7 @@ pub fn da_filter(event: &Arc>) -> bool { event.as_ref(), HotShotEvent::DaProposalSend(_, _) | HotShotEvent::DaVoteSend(_) - | HotShotEvent::VersionUpgrade(_) + | HotShotEvent::UpgradeDecided(_) | HotShotEvent::ViewChange(_) ) } @@ -70,7 +69,7 @@ pub fn vid_filter(event: &Arc>) -> bool { !matches!( event.as_ref(), HotShotEvent::VidDisperseSend(_, _) - | HotShotEvent::VersionUpgrade(_) + | HotShotEvent::UpgradeDecided(_) | HotShotEvent::ViewChange(_) ) } @@ -85,7 +84,7 @@ pub fn view_sync_filter(event: &Arc>) -> bo | HotShotEvent::ViewSyncPreCommitVoteSend(_) | HotShotEvent::ViewSyncCommitVoteSend(_) | HotShotEvent::ViewSyncFinalizeVoteSend(_) - | HotShotEvent::VersionUpgrade(_) + | HotShotEvent::UpgradeDecided(_) | HotShotEvent::ViewChange(_) ) } @@ -190,15 +189,13 @@ impl NetworkMessageTaskState { /// network event task state pub struct NetworkEventTaskState< TYPES: NodeType, - COMMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, + COMMCHANNEL: ConnectedNetwork, S: Storage, > { /// comm channel pub channel: Arc, /// view number pub view: TYPES::Time, - /// version - pub version: Version, /// membership for the channel pub membership: TYPES::Membership, // TODO ED Need to add exchange so we can get the recipient key and our own key? @@ -206,12 +203,14 @@ pub struct NetworkEventTaskState< pub filter: fn(&Arc>) -> bool, /// Storage to store actionable events pub storage: Arc>, + /// Decided upgrade certificate + pub decided_upgrade_certificate: Option>, } #[async_trait] impl< TYPES: NodeType, - COMMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, + COMMCHANNEL: ConnectedNetwork, S: Storage + 'static, > TaskState for NetworkEventTaskState { @@ -237,7 +236,7 @@ impl< impl< TYPES: NodeType, - COMMCHANNEL: ConnectedNetwork, TYPES::SignatureKey>, + COMMCHANNEL: ConnectedNetwork, S: Storage + 'static, > NetworkEventTaskState { @@ -250,7 +249,7 @@ impl< &mut self, event: Arc>, membership: &TYPES::Membership, - ) -> Option { + ) { let mut maybe_action = None; let (sender, message_kind, transmit): (_, _, TransmitType) = match event.as_ref().clone() { @@ -277,7 +276,8 @@ impl< ) } HotShotEvent::VidDisperseSend(proposal, sender) => { - return self.handle_vid_disperse_proposal(proposal, &sender); + self.handle_vid_disperse_proposal(proposal, &sender); + return; } HotShotEvent::DaProposalSend(proposal, sender) => { maybe_action = Some(HotShotAction::DaPropose); @@ -381,22 +381,23 @@ impl< self.channel .update_view::(self.view.u64(), membership) .await; - return None; - } - HotShotEvent::VersionUpgrade(version) => { - debug!("Updating internal version in network task to {:?}", version); - self.version = version; - return None; + return; } - HotShotEvent::Shutdown => { - error!("Networking task shutting down"); - return Some(HotShotTaskCompleted); + HotShotEvent::UpgradeDecided(cert) => { + self.decided_upgrade_certificate = Some(cert.clone()); + return; } - event => { - error!("Receieved unexpected message in network task {:?}", event); - return None; + _ => { + return; } }; + let broadcast_delay = match &message_kind { + MessageKind::Consensus( + SequencingMessage::General(GeneralConsensusMessage::Vote(_)) + | SequencingMessage::Da(_), + ) => BroadcastDelay::View(*message_kind.view_number()), + _ => BroadcastDelay::None, + }; let message = Message { sender, kind: message_kind, @@ -405,7 +406,7 @@ impl< let committee = membership.whole_committee(view); let net = Arc::clone(&self.channel); let storage = Arc::clone(&self.storage); - let version = self.version; + let decided_upgrade_certificate = self.decided_upgrade_certificate.clone(); async_spawn(async move { if NetworkEventTaskState::::maybe_record_action( maybe_action, @@ -426,23 +427,26 @@ impl< } } - let transmit_result = if version == BASE_VERSION { - match transmit { - TransmitType::Direct(recipient) => { - net.direct_message(message, recipient, STATIC_VER_0_1).await - } - TransmitType::Broadcast => { - net.broadcast_message(message, committee, STATIC_VER_0_1) - .await - } - TransmitType::DaCommitteeBroadcast => { - net.da_broadcast_message(message, committee, STATIC_VER_0_1) - .await - } + let serialized_message = match message.serialize(&decided_upgrade_certificate) { + Ok(serialized) => serialized, + Err(e) => { + error!("Failed to serialize message: {}", e); + return; + } + }; + + let transmit_result = match transmit { + TransmitType::Direct(recipient) => { + net.direct_message(serialized_message, recipient).await + } + TransmitType::Broadcast => { + net.broadcast_message(serialized_message, committee, broadcast_delay) + .await + } + TransmitType::DaCommitteeBroadcast => { + net.da_broadcast_message(serialized_message, committee, broadcast_delay) + .await } - } else { - error!("The network has upgraded to {:?}, which is not implemented in this instance of HotShot.", version); - return; }; match transmit_result { @@ -450,8 +454,6 @@ impl< Err(e) => error!("Failed to send message from network task: {:?}", e), } }); - - None } /// handle `VidDisperseSend` @@ -462,20 +464,26 @@ impl< ) -> Option { let view = vid_proposal.data.view_number; let vid_share_proposals = VidDisperseShare::to_vid_share_proposals(vid_proposal); - let messages: HashMap<_, _> = vid_share_proposals - .into_iter() - .map(|proposal| { - ( - proposal.data.recipient_key.clone(), - Message { - sender: sender.clone(), - kind: MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::VidDisperseMsg(proposal), - )), // TODO not a DaConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 - }, - ) - }) - .collect(); + let mut messages = HashMap::new(); + + for proposal in vid_share_proposals { + let recipient = proposal.data.recipient_key.clone(); + let message = Message { + sender: sender.clone(), + kind: MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::VidDisperseMsg(proposal), + )), // TODO not a DaConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 + }; + let serialized_message = match message.serialize(&self.decided_upgrade_certificate) { + Ok(serialized) => serialized, + Err(e) => { + error!("Failed to serialize message: {}", e); + continue; + } + }; + + messages.insert(recipient, serialized_message); + } let net = Arc::clone(&self.channel); let storage = Arc::clone(&self.storage); @@ -490,7 +498,7 @@ impl< { return; } - match net.vid_broadcast_message(messages, STATIC_VER_0_1).await { + match net.vid_broadcast_message(messages).await { Ok(()) => {} Err(e) => error!("Failed to send message from network task: {:?}", e), } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index ddb6483a6c..3f46ffc0cf 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -1,12 +1,6 @@ -use anyhow::{bail, ensure, Context, Result}; use std::{collections::HashMap, sync::Arc}; -use vbs::version::Version; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, - quorum_vote::handlers::handle_quorum_proposal_validated, -}; +use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -40,6 +34,13 @@ use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, instrument, trace, warn}; +use vbs::version::Version; + +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, + quorum_vote::handlers::handle_quorum_proposal_validated, +}; /// Event handlers for `QuorumProposalValidated`. mod handlers; diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index d0e92fe567..bda5ae8831 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -1,6 +1,5 @@ use std::{ collections::BTreeMap, - marker::PhantomData, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -32,7 +31,6 @@ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; -use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; use crate::{events::HotShotEvent, helpers::broadcast_event}; @@ -43,11 +41,7 @@ const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); /// The task will wait a it's `delay` and then send a request iteratively to peers /// for any data they don't have related to the proposal. For now it's just requesting VID /// shares. -pub struct NetworkRequestState< - TYPES: NodeType, - I: NodeImplementation, - Ver: StaticVersionType + 'static, -> { +pub struct NetworkRequestState> { /// Network to send requests over pub network: Arc, /// Consensus shared state so we can check if we've gotten the information @@ -65,8 +59,6 @@ pub struct NetworkRequestState< pub public_key: TYPES::SignatureKey, /// This nodes private/signign key, used to sign requests. pub private_key: ::PrivateKey, - /// Version discrimination - pub _phantom: PhantomData, /// The node's id pub id: u64, /// A flag indicating that `HotShotEvent::Shutdown` has been received @@ -75,9 +67,7 @@ pub struct NetworkRequestState< pub spawned_tasks: BTreeMap>>, } -impl, Ver: StaticVersionType + 'static> Drop - for NetworkRequestState -{ +impl> Drop for NetworkRequestState { fn drop(&mut self) { futures::executor::block_on(async move { self.cancel_subtasks().await }); } @@ -88,9 +78,7 @@ type Signature = <::SignatureKey as SignatureKey>::PureAssembledSignatureType; #[async_trait] -impl, Ver: StaticVersionType + 'static> TaskState - for NetworkRequestState -{ +impl> TaskState for NetworkRequestState { type Event = HotShotEvent; async fn handle_event( @@ -103,8 +91,7 @@ impl, Ver: StaticVersionType + 'st HotShotEvent::QuorumProposalValidated(proposal, _) => { let prop_view = proposal.view_number(); if prop_view >= self.view { - self.spawn_requests(prop_view, sender.clone(), Ver::instance()) - .await; + self.spawn_requests(prop_view, sender.clone()).await; } Ok(()) } @@ -116,12 +103,7 @@ impl, Ver: StaticVersionType + 'st Ok(()) } HotShotEvent::QuorumProposalMissing(view) => { - self.run_delay( - RequestKind::Proposal(*view), - sender.clone(), - *view, - Ver::instance(), - ); + self.run_delay(RequestKind::Proposal(*view), sender.clone(), *view); Ok(()) } _ => Ok(()), @@ -146,27 +128,24 @@ impl, Ver: StaticVersionType + 'st } } -impl, Ver: StaticVersionType + 'static> - NetworkRequestState -{ +impl> NetworkRequestState { /// Spawns tasks for a given view to retrieve any data needed. async fn spawn_requests( &mut self, view: TYPES::Time, sender: Sender>>, - bind_version: Ver, ) { - let requests = self.build_requests(view, bind_version).await; + let requests = self.build_requests(view).await; if requests.is_empty() { return; } requests .into_iter() - .for_each(|r| self.run_delay(r, sender.clone(), view, bind_version)); + .for_each(|r| self.run_delay(r, sender.clone(), view)); } /// Creates the srequest structures for all types that are needed. - async fn build_requests(&self, view: TYPES::Time, _: Ver) -> Vec> { + async fn build_requests(&self, view: TYPES::Time) -> Vec> { let mut reqs = Vec::new(); if !self.state.read().await.vid_shares().contains_key(&view) { reqs.push(RequestKind::Vid(view, self.public_key.clone())); @@ -183,7 +162,6 @@ impl, Ver: StaticVersionType + 'st request: RequestKind, sender: Sender>>, view: TYPES::Time, - _: Ver, ) { let mut recipients: Vec<_> = self .da_membership @@ -203,7 +181,7 @@ impl, Ver: StaticVersionType + 'st leader, shutdown_flag: Arc::clone(&self.shutdown_flag), }; - let Ok(data) = Serializer::::serialize(&request) else { + let Ok(data) = bincode::serialize(&request) else { tracing::error!("Failed to serialize request!"); return; }; @@ -213,7 +191,7 @@ impl, Ver: StaticVersionType + 'st return; }; debug!("Requesting data: {:?}", request); - let handle = async_spawn(requester.run::(request, signature, self.public_key.clone())); + let handle = async_spawn(requester.run(request, signature, self.public_key.clone())); self.spawned_tasks.entry(view).or_default().push(handle); } @@ -253,7 +231,7 @@ struct ProposalRequest(TYPES::Time, TYPES::SignatureKey); impl> DelayedRequester { /// Wait the delay, then try to complete the request. Iterates over peers /// until the request is completed, or the data is no longer needed. - async fn run( + async fn run( self, request: RequestKind, signature: Signature, @@ -265,10 +243,10 @@ impl> DelayedRequester { if !self.network.is_primary_down() { async_sleep(self.delay).await; } - self.do_vid::(VidRequest(view, key), signature).await; + self.do_vid(VidRequest(view, key), signature).await; } RequestKind::Proposal(view) => { - self.do_proposal::(ProposalRequest(view, pub_key), signature) + self.do_proposal(ProposalRequest(view, pub_key), signature) .await; } RequestKind::DaProposal(..) => {} @@ -276,53 +254,61 @@ impl> DelayedRequester { } /// Handle sending a request for proposal for a view, does /// not delay - async fn do_proposal( - &self, - req: ProposalRequest, - signature: Signature, - ) { - let _ = self - .network - .request_data::( - make_proposal_req(&req, signature), - &self.leader, - Ver::instance(), - ) - .await; + async fn do_proposal(&self, req: ProposalRequest, signature: Signature) { + match bincode::serialize(&make_proposal_req(&req, signature)) { + Ok(serialized_msg) => { + let _ = self + .network + .request_data::(serialized_msg, &self.leader) + .await; + } + Err(e) => { + tracing::error!( + "Failed to serialize outgoing message: this should never happen. Error: {e}" + ); + } + } } /// Handle sending a VID Share request, runs the loop until the data exists - async fn do_vid( - &self, - req: VidRequest, - signature: Signature, - ) { + async fn do_vid(&self, req: VidRequest, signature: Signature) { let message = make_vid(&req, signature); let mut recipients_it = self.recipients.iter().cycle(); + let serialized_msg = match bincode::serialize(&message) { + Ok(serialized_msg) => serialized_msg, + Err(e) => { + tracing::error!( + "Failed to serialize outgoing message: this should never happen. Error: {e}" + ); + + return; + } + }; + while !self.cancel_vid(&req).await { match async_timeout( REQUEST_TIMEOUT, - self.network.request_data::( - message.clone(), - recipients_it.next().unwrap(), - Ver::instance(), - ), + self.network + .request_data::(serialized_msg.clone(), recipients_it.next().unwrap()), ) .await { Ok(Ok(response)) => { - match response { - ResponseMessage::Found(data) => { + match bincode::deserialize(&response) { + Ok(ResponseMessage::Found(data)) => { self.handle_response_message(data).await; // keep trying, but expect the map to be populated, or view to increase async_sleep(REQUEST_TIMEOUT).await; } - ResponseMessage::NotFound => { + Ok(ResponseMessage::NotFound) => { info!("Peer Responded they did not have the data"); } - ResponseMessage::Denied => { + Ok(ResponseMessage::Denied) => { error!("Request for data was denied by the receiver"); } + Err(e) => { + error!("Failed to deserialize response: {e}"); + } } } Ok(Err(e)) => { diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index b54b011f76..db900d9ac9 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -20,12 +20,11 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use vbs::{version::StaticVersionType, BinarySerializer, Serializer}; use crate::events::HotShotEvent; /// Type alias for the channel that we receive requests from the network on. -pub type RequestReceiver = mpsc::Receiver<(Message, ResponseChannel>)>; +pub type RequestReceiver = mpsc::Receiver<(Vec, ResponseChannel>)>; /// Time to wait for txns before sending `ResponseMessage::NotFound` const TXNS_TIMEOUT: Duration = Duration::from_millis(100); @@ -37,7 +36,7 @@ pub struct NetworkResponseState { /// Locked consensus state consensus: LockedConsensusState, /// Receiver for requests - receiver: RequestReceiver, + receiver: RequestReceiver, /// Quorum membership for checking if requesters have state quorum: Arc, /// This replicas public key @@ -50,7 +49,7 @@ impl NetworkResponseState { /// Create the network request state with the info it needs pub fn new( consensus: LockedConsensusState, - receiver: RequestReceiver, + receiver: RequestReceiver, quorum: Arc, pub_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -66,16 +65,13 @@ impl NetworkResponseState { /// Run the request response loop until a `HotShotEvent::Shutdown` is received. /// Or the stream is closed. - async fn run_loop( - mut self, - shutdown: EventDependency>>, - ) { + async fn run_loop(mut self, shutdown: EventDependency>>) { let mut shutdown = Box::pin(shutdown.completed().fuse()); loop { futures::select! { req = self.receiver.next() => { match req { - Some((msg, chan)) => self.handle_message::(msg, chan).await, + Some((msg, chan)) => self.handle_message(msg, chan).await, None => return, } }, @@ -88,25 +84,41 @@ impl NetworkResponseState { /// Handle an incoming message. First validates the sender, then handles the contained request. /// Sends the response via `chan` - async fn handle_message( - &self, - req: Message, - chan: ResponseChannel>, - ) { + async fn handle_message(&self, raw_req: Vec, chan: ResponseChannel>) { + let req: Message = match bincode::deserialize(&raw_req) { + Ok(deserialized) => deserialized, + Err(e) => { + tracing::error!("Failed to deserialize message! Error: {e}"); + return; + } + }; let sender = req.sender.clone(); - if !self.valid_sender(&sender) { - let _ = chan.0.send(self.make_msg(ResponseMessage::Denied)); - return; - } match req.kind { - MessageKind::Data(DataMessage::RequestData(req)) => { - if !valid_signature::(&req, &sender) { - let _ = chan.0.send(self.make_msg(ResponseMessage::Denied)); + MessageKind::Data(DataMessage::RequestData(request)) => { + if !self.valid_sender(&sender) || !valid_signature::(&request, &sender) { + let serialized_msg = match bincode::serialize( + &self.make_msg(ResponseMessage::Denied), + ) { + Ok(serialized) => serialized, + Err(e) => { + tracing::error!("Failed to serialize outgoing message: this should never happen. Error: {e}"); + return; + } + }; + let _ = chan.sender.send(serialized_msg); return; } - let response = self.handle_request(req).await; - let _ = chan.0.send(response); + + let response = self.handle_request(request).await; + let serialized_response = match bincode::serialize(&response) { + Ok(serialized) => serialized, + Err(e) => { + tracing::error!("Failed to serialize outgoing message: this should never happen. Error: {e}"); + return; + } + }; + let _ = chan.sender.send(serialized_response); } msg => tracing::error!( "Received message that wasn't a DataRequest in the request task. Message: {:?}", @@ -209,11 +221,11 @@ impl NetworkResponseState { } /// Check the signature -fn valid_signature( +fn valid_signature( req: &DataRequest, sender: &TYPES::SignatureKey, ) -> bool { - let Ok(data) = Serializer::::serialize(&req.request) else { + let Ok(data) = bincode::serialize(&req.request) else { return false; }; sender.validate(&req.signature, &Sha256::digest(data)) @@ -222,7 +234,7 @@ fn valid_signature( /// Spawn the network response task to handle incoming request for data /// from other nodes. It will shutdown when it gets `HotshotEvent::Shutdown` /// on the `event_stream` arg. -pub fn run_response_task( +pub fn run_response_task( task_state: NetworkResponseState, event_stream: Receiver>>, ) -> JoinHandle<()> { @@ -230,5 +242,5 @@ pub fn run_response_task( event_stream, Box::new(|e| matches!(e.as_ref(), HotShotEvent::Shutdown)), ); - async_spawn(task_state.run_loop::(dep)) + async_spawn(task_state.run_loop(dep)) } diff --git a/task-impls/src/rewind.rs b/task-impls/src/rewind.rs index 4e760273b3..2d40981037 100644 --- a/task-impls/src/rewind.rs +++ b/task-impls/src/rewind.rs @@ -1,12 +1,10 @@ -use std::sync::Arc; +use std::{fs::OpenOptions, io::Write, sync::Arc}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::traits::node_implementation::NodeType; -use std::fs::OpenOptions; -use std::io::Write; use crate::events::HotShotEvent; diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 656b6dc0c7..ae7dc4d908 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -1,12 +1,16 @@ -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; +use committable::Committable; use hotshot_task::task::TaskState; use hotshot_types::{ + constants::{Base, Upgrade, UPGRADE_HASH}, + data::UpgradeProposal, event::{Event, EventType}, + message::Proposal, simple_certificate::UpgradeCertificate, simple_vote::{UpgradeProposalData, UpgradeVote}, traits::{ @@ -17,6 +21,7 @@ use hotshot_types::{ vote::HasViewNumber, }; use tracing::{debug, error, info, instrument, warn}; +use vbs::version::StaticVersionType; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -42,9 +47,6 @@ pub struct UpgradeTaskState> { /// Network for all nodes pub quorum_network: Arc, - /// Whether we should vote affirmatively on a given upgrade proposal (true) or not (false) - pub should_vote: fn(UpgradeProposalData) -> bool, - /// The current vote collection task, if there is one. pub vote_collector: RwLock, UpgradeCertificate>>, @@ -57,6 +59,18 @@ pub struct UpgradeTaskState> { /// This state's ID pub id: u64, + + /// View to start proposing an upgrade + pub start_proposing_view: u64, + + /// View to stop proposing an upgrade + pub stop_proposing_view: u64, + + /// View to start voting on an upgrade + pub start_voting_view: u64, + + /// View to stop voting on an upgrade + pub stop_voting_view: u64, } impl> UpgradeTaskState { @@ -71,9 +85,17 @@ impl> UpgradeTaskState { HotShotEvent::UpgradeProposalRecv(proposal, sender) => { info!("Received upgrade proposal: {:?}", proposal); + if *proposal.data.view_number() < self.start_voting_view + || *proposal.data.view_number() >= self.stop_voting_view + { + return None; + } + // If the proposal does not match our upgrade target, we immediately exit. - if !(self.should_vote)(proposal.data.upgrade_proposal.clone()) { - info!("Received unexpected upgrade proposal:\n{:?}", proposal.data); + if proposal.data.upgrade_proposal.new_version_hash != UPGRADE_HASH + || proposal.data.upgrade_proposal.old_version != Base::VERSION + || proposal.data.upgrade_proposal.new_version != Upgrade::VERSION + { return None; } @@ -190,9 +212,6 @@ impl> UpgradeTaskState { } } } - HotShotEvent::VersionUpgrade(version) => { - error!("The network was upgraded to {:?}. This instance of HotShot did not expect an upgrade.", version); - } HotShotEvent::ViewChange(view) => { let view = *view; if *self.cur_view >= *view { @@ -203,54 +222,47 @@ impl> UpgradeTaskState { warn!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; - - #[cfg(feature = "example-upgrade")] + // We try to form a certificate 5 views before we're leader. + if *view >= self.start_proposing_view + && *view < self.stop_proposing_view + && self.quorum_membership.leader(view + 5) == self.public_key { - use std::marker::PhantomData; + let upgrade_proposal_data = UpgradeProposalData { + old_version: Base::VERSION, + new_version: Upgrade::VERSION, + new_version_hash: UPGRADE_HASH.to_vec(), + // We schedule the upgrade to begin 15 views in the future + old_version_last_view: TYPES::Time::new(*view + 15), + // and end 20 views in the future + new_version_first_view: TYPES::Time::new(*view + 20), + decide_by: TYPES::Time::new(*view + 10), + }; - use committable::Committable; - use hotshot_types::{ - data::UpgradeProposal, message::Proposal, - traits::node_implementation::ConsensusTime, + let upgrade_proposal = UpgradeProposal { + upgrade_proposal: upgrade_proposal_data.clone(), + view_number: view + 5, }; - use vbs::version::Version; - - if *view == 5 && self.quorum_membership.leader(view + 5) == self.public_key { - let upgrade_proposal_data = UpgradeProposalData { - old_version: Version { major: 0, minor: 1 }, - new_version: Version { major: 1, minor: 0 }, - new_version_hash: vec![1, 1, 0, 0, 1], - old_version_last_view: TYPES::Time::new(15), - new_version_first_view: TYPES::Time::new(18), - decide_by: TYPES::Time::new(12), - }; - - let upgrade_proposal = UpgradeProposal { - upgrade_proposal: upgrade_proposal_data.clone(), - view_number: view + 5, - }; - - let signature = TYPES::SignatureKey::sign( - &self.private_key, - upgrade_proposal_data.commit().as_ref(), - ) - .expect("Failed to sign upgrade proposal commitment!"); - - let message = Proposal { - data: upgrade_proposal, - signature, - _pd: PhantomData, - }; - - broadcast_event( - Arc::new(HotShotEvent::UpgradeProposalSend( - message, - self.public_key.clone(), - )), - &tx, - ) - .await; - } + + let signature = TYPES::SignatureKey::sign( + &self.private_key, + upgrade_proposal_data.commit().as_ref(), + ) + .expect("Failed to sign upgrade proposal commitment!"); + + let message = Proposal { + data: upgrade_proposal, + signature, + _pd: PhantomData, + }; + + broadcast_event( + Arc::new(HotShotEvent::UpgradeProposalSend( + message, + self.public_key.clone(), + )), + &tx, + ) + .await; } return None; diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs index d0809f982b..48794508bb 100644 --- a/testing/src/block_builder.rs +++ b/testing/src/block_builder.rs @@ -23,7 +23,7 @@ use hotshot_builder_api::{ use hotshot_example_types::block_types::TestTransaction; use hotshot_orchestrator::config::RandomBuilderConfig; use hotshot_types::{ - constants::{Version01, STATIC_VER_0_1}, + constants::Base, traits::{ block_contents::{precompute_vid_commitment, BlockHeader, EncodeBytes}, node_implementation::NodeType, @@ -35,6 +35,7 @@ use hotshot_types::{ use lru::LruCache; use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; use tide_disco::{method::ReadState, App, Url}; +use vbs::version::StaticVersionType; #[async_trait] pub trait TestBuilderImplementation @@ -99,18 +100,17 @@ where let url = Url::parse(&format!("http://0.0.0.0:{0}", config.port)).expect("Valid URL"); let (source, task) = make_simple_builder(num_storage_nodes).await; - let builder_api = hotshot_builder_api::builder::define_api::< - SimpleBuilderSource, - TYPES, - Version01, - >(&Options::default()) - .expect("Failed to construct the builder API"); + let builder_api = + hotshot_builder_api::builder::define_api::, TYPES, Base>( + &Options::default(), + ) + .expect("Failed to construct the builder API"); let mut app: App, hotshot_builder_api::builder::Error> = App::with_state(source); app.register_module("block_info", builder_api) .expect("Failed to register the builder API"); - async_spawn(app.serve(url.clone(), STATIC_VER_0_1)); + async_spawn(app.serve(url.clone(), Base::instance())); (Some(Box::new(task)), url) } } @@ -291,15 +291,15 @@ where source.run(num_storage_nodes, options); let builder_api = - hotshot_builder_api::builder::define_api::, TYPES, Version01>( + hotshot_builder_api::builder::define_api::, TYPES, Base>( &Options::default(), ) .expect("Failed to construct the builder API"); let mut app: App, Error> = App::with_state(source); - app.register_module::("block_info", builder_api) + app.register_module::("block_info", builder_api) .expect("Failed to register the builder API"); - async_spawn(app.serve(url, STATIC_VER_0_1)); + async_spawn(app.serve(url, Base::instance())); } #[derive(Debug, Clone)] @@ -441,17 +441,16 @@ impl SimpleBuilderSource { where ::InstanceState: Default, { - let builder_api = hotshot_builder_api::builder::define_api::< - SimpleBuilderSource, - TYPES, - Version01, - >(&Options::default()) - .expect("Failed to construct the builder API"); + let builder_api = + hotshot_builder_api::builder::define_api::, TYPES, Base>( + &Options::default(), + ) + .expect("Failed to construct the builder API"); let mut app: App, Error> = App::with_state(self); - app.register_module::("block_info", builder_api) + app.register_module::("block_info", builder_api) .expect("Failed to register the builder API"); - async_spawn(app.serve(url, STATIC_VER_0_1)); + async_spawn(app.serve(url, Base::instance())); } } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 09a1e966b8..6736c18b51 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -15,7 +15,6 @@ use hotshot_example_types::{ use hotshot_types::{ data::Leaf, event::Event, - message::Message, simple_certificate::QuorumCertificate, traits::{ network::ConnectedNetwork, @@ -58,7 +57,7 @@ pub struct SpinningTask> { impl< TYPES: NodeType, I: TestableNodeImplementation, - N: ConnectedNetwork, TYPES::SignatureKey>, + N: ConnectedNetwork, > TestTaskState for SpinningTask where I: TestableNodeImplementation, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 3d04c99f94..7dc3edce56 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -297,6 +297,10 @@ impl TestDescription { data_request_delay: Duration::from_millis(200), // Placeholder until we spin up the builder builder_url: Url::parse("http://localhost:9999").expect("Valid URL"), + start_proposing_view: 0, + stop_proposing_view: 0, + start_voting_view: 0, + stop_voting_view: 0, }; let TimingData { next_view_timeout, diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 5845350b97..dd822e3a14 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -3,7 +3,6 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; use hotshot_example_types::storage_types::TestStorage; use hotshot_types::{ - message::Message, traits::{ network::{AsyncGenerator, ConnectedNetwork}, node_implementation::NodeType, @@ -43,9 +42,7 @@ pub struct TestLauncher> { impl> TestLauncher { /// launch the test #[must_use] - pub fn launch, TYPES::SignatureKey>>( - self, - ) -> TestRunner { + pub fn launch>(self) -> TestRunner { TestRunner:: { launcher: self, nodes: Vec::new(), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 1f3f2535a6..3d548a4623 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -23,7 +23,6 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, constants::EVENT_CHANNEL_SIZE, data::Leaf, - message::Message, simple_certificate::QuorumCertificate, traits::{ election::Membership, @@ -56,7 +55,7 @@ impl TaskErr for T {} impl< TYPES: NodeType, I: TestableNodeImplementation, - N: ConnectedNetwork, TYPES::SignatureKey>, + N: ConnectedNetwork, > TestRunner where I: TestableNodeImplementation, @@ -477,7 +476,7 @@ pub struct LateStartNode> pub struct TestRunner< TYPES: NodeType, I: TestableNodeImplementation, - N: ConnectedNetwork, TYPES::SignatureKey>, + N: ConnectedNetwork, > { /// test launcher, contains a bunch of useful metadata and closures pub(crate) launcher: TestLauncher, diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index 8346eec0cf..c934c4c3ff 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -9,7 +9,7 @@ use async_trait::async_trait; use futures::future::select_all; use hotshot_task_impls::{events::HotShotEvent, network::NetworkMessageTaskState}; use hotshot_types::{ - message::{Message, Messages}, + message::{Messages, VersionedMessage}, traits::{network::ConnectedNetwork, node_implementation::NodeType}, }; #[cfg(async_executor_impl = "tokio")] @@ -102,7 +102,7 @@ impl TestTask { /// Add the network task to handle messages and publish events. pub async fn add_network_message_test_task< TYPES: NodeType, - NET: ConnectedNetwork, TYPES::SignatureKey>, + NET: ConnectedNetwork, >( event_stream: Sender>>, channel: Arc, @@ -118,7 +118,24 @@ pub async fn add_network_message_test_task< async_spawn(async move { loop { let msgs = match network.recv_msgs().await { - Ok(msgs) => Messages(msgs), + Ok(msgs) => { + let mut deserialized_messages = Vec::new(); + + for msg in msgs { + let deserialized_message = match VersionedMessage::deserialize(&msg, &None) + { + Ok(deserialized) => deserialized, + Err(e) => { + tracing::error!("Failed to deserialize message: {}", e); + return; + } + }; + + deserialized_messages.push(deserialized_message); + } + + Messages(deserialized_messages) + } Err(err) => { error!("failed to receive messages: {err}"); diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 5cf387ac53..63202690e7 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -8,7 +8,7 @@ use hotshot_example_types::{ use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; use hotshot_testing::block_builder::run_random_builder; use hotshot_types::{ - constants::Version01, + constants::Base, traits::{node_implementation::NodeType, signature_key::SignatureKey, BlockPayload}, }; use tide_disco::Url; @@ -42,7 +42,7 @@ async fn test_random_block_builder() { ); let builder_started = Instant::now(); - let client: BuilderClient = BuilderClient::new(api_url); + let client: BuilderClient = BuilderClient::new(api_url); assert!(client.connect(Duration::from_millis(100)).await); let (pub_key, private_key) = diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index ccb591b047..bb2f725312 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -1,5 +1,6 @@ use std::{sync::Arc, time::Duration}; +use async_broadcast::Sender; use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; use hotshot::traits::implementations::MemoryNetwork; @@ -14,7 +15,6 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - constants::BASE_VERSION, data::ViewNumber, traits::{ election::Membership, @@ -52,13 +52,13 @@ async fn test_network_task() { config.fixed_leader_for_gpuvid, ); let channel = networks.0.clone(); - let network_state: NetworkEventTaskState, _> = + let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { channel: channel.clone(), view: ViewNumber::new(0), membership: membership.clone(), filter: network::quorum_filter, - version: BASE_VERSION, + decided_upgrade_certificate: None, storage, }; let (tx, rx) = async_broadcast::broadcast(10); @@ -79,7 +79,7 @@ async fn test_network_task() { ))) .await .unwrap(); - let res = async_timeout(Duration::from_millis(100), out_rx.recv_direct()) + let res: Arc> = async_timeout(Duration::from_millis(100), out_rx.recv_direct()) .await .expect("timed out waiting for response") .expect("channel closed"); @@ -117,13 +117,13 @@ async fn test_network_storage_fail() { config.fixed_leader_for_gpuvid, ); let channel = networks.0.clone(); - let network_state: NetworkEventTaskState, _> = + let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { channel: channel.clone(), view: ViewNumber::new(0), membership: membership.clone(), filter: network::quorum_filter, - version: BASE_VERSION, + decided_upgrade_certificate: None, storage, }; let (tx, rx) = async_broadcast::broadcast(10); @@ -135,7 +135,7 @@ async fn test_network_storage_fail() { let mut generator = TestViewGenerator::generate(membership.clone(), membership); let view = generator.next().await.unwrap(); - let (out_tx, mut out_rx) = async_broadcast::broadcast(10); + let (out_tx, mut out_rx): (Sender>>, _) = async_broadcast::broadcast(10); add_network_message_test_task(out_tx.clone(), channel.clone()).await; tx.broadcast_direct(Arc::new(HotShotEvent::QuorumProposalSend( diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index 6f4db6f49d..ea58e3a545 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -248,9 +248,7 @@ async fn test_upgrade_and_consensus_task() { .map(|h| views[2].create_upgrade_vote(upgrade_data.clone(), &h.0)); let consensus_state = ConsensusTaskState::::create_from(&handle).await; - let mut upgrade_state = UpgradeTaskState::::create_from(&handle).await; - - upgrade_state.should_vote = |_| true; + let upgrade_state = UpgradeTaskState::::create_from(&handle).await; let upgrade_vote_recvs: Vec<_> = upgrade_votes.map(UpgradeVoteRecv).collect(); @@ -440,9 +438,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { } let consensus_state = ConsensusTaskState::::create_from(&handle).await; - let mut upgrade_state = UpgradeTaskState::::create_from(&handle).await; - - upgrade_state.should_vote = |_| true; + let upgrade_state = UpgradeTaskState::::create_from(&handle).await; let inputs = vec![ vec![ diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index e5fb2827e3..da774d6298 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -1,6 +1,7 @@ #![allow(clippy::panic)] use std::{collections::BTreeSet, sync::Arc}; +use hotshot_types::traits::network::BroadcastDelay; use async_compatibility_layer::logging::setup_logging; use hotshot::{ traits::{ @@ -16,9 +17,8 @@ use hotshot_example_types::{ storage_types::TestStorage, }; use hotshot_types::{ - constants::STATIC_VER_0_1, data::ViewNumber, - message::{DataMessage, Message, MessageKind}, + message::{DataMessage, Message, MessageKind, VersionedMessage}, signature_key::{BLSPubKey, BuilderKey}, traits::{ network::{ConnectedNetwork, TestableNetworkingImplementation}, @@ -59,8 +59,8 @@ impl NodeType for Test { #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct TestImpl {} -pub type DaNetwork = MemoryNetwork, ::SignatureKey>; -pub type QuorumNetwork = MemoryNetwork, ::SignatureKey>; +pub type DaNetwork = MemoryNetwork<::SignatureKey>; +pub type QuorumNetwork = MemoryNetwork<::SignatureKey>; impl NodeImplementation for TestImpl { type QuorumNetwork = QuorumNetwork; @@ -119,7 +119,7 @@ fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec, ::SignatureKey>> = MasterMap::new(); + let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let _pub_key = pubkey(); } @@ -130,7 +130,7 @@ async fn memory_network_spawn_single() { #[instrument] async fn memory_network_spawn_double() { setup_logging(); - let group: Arc, ::SignatureKey>> = MasterMap::new(); + let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let _pub_key_1 = pubkey(); let _pub_key_2 = pubkey(); @@ -145,7 +145,7 @@ async fn memory_network_direct_queue() { // Create some dummy messages // Make and connect the networking instances - let group: Arc, ::SignatureKey>> = MasterMap::new(); + let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let pub_key_1 = pubkey(); @@ -169,8 +169,9 @@ async fn memory_network_direct_queue() { // Test 1 -> 2 // Send messages for sent_message in first_messages { + let serialized_message = VersionedMessage::serialize(&sent_message, &None).unwrap(); network1 - .direct_message(sent_message.clone(), pub_key_2, STATIC_VER_0_1) + .direct_message(serialized_message.clone(), pub_key_2) .await .expect("Failed to message node"); let mut recv_messages = network2 @@ -178,8 +179,9 @@ async fn memory_network_direct_queue() { .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); + let deserialized_message = VersionedMessage::deserialize(&recv_message, &None).unwrap(); assert!(recv_messages.is_empty()); - fake_message_eq(sent_message, recv_message); + fake_message_eq(sent_message, deserialized_message); } let second_messages: Vec> = gen_messages(5, 200, pub_key_2); @@ -187,8 +189,9 @@ async fn memory_network_direct_queue() { // Test 2 -> 1 // Send messages for sent_message in second_messages { + let serialized_message = VersionedMessage::serialize(&sent_message, &None).unwrap(); network2 - .direct_message(sent_message.clone(), pub_key_1, STATIC_VER_0_1) + .direct_message(serialized_message.clone(), pub_key_1) .await .expect("Failed to message node"); let mut recv_messages = network1 @@ -196,8 +199,9 @@ async fn memory_network_direct_queue() { .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); + let deserialized_message = VersionedMessage::deserialize(&recv_message, &None).unwrap(); assert!(recv_messages.is_empty()); - fake_message_eq(sent_message, recv_message); + fake_message_eq(sent_message, deserialized_message); } } @@ -208,7 +212,7 @@ async fn memory_network_direct_queue() { async fn memory_network_broadcast_queue() { setup_logging(); // Make and connect the networking instances - let group: Arc, ::SignatureKey>> = MasterMap::new(); + let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let pub_key_1 = pubkey(); let network1 = MemoryNetwork::new( @@ -230,11 +234,12 @@ async fn memory_network_broadcast_queue() { // Test 1 -> 2 // Send messages for sent_message in first_messages { + let serialized_message = VersionedMessage::serialize(&sent_message, &None).unwrap(); network1 .broadcast_message( - sent_message.clone(), + serialized_message.clone(), vec![pub_key_2].into_iter().collect::>(), - STATIC_VER_0_1, + BroadcastDelay::None, ) .await .expect("Failed to message node"); @@ -243,8 +248,9 @@ async fn memory_network_broadcast_queue() { .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); + let deserialized_message = VersionedMessage::deserialize(&recv_message, &None).unwrap(); assert!(recv_messages.is_empty()); - fake_message_eq(sent_message, recv_message); + fake_message_eq(sent_message, deserialized_message); } let second_messages: Vec> = gen_messages(5, 200, pub_key_2); @@ -252,11 +258,12 @@ async fn memory_network_broadcast_queue() { // Test 2 -> 1 // Send messages for sent_message in second_messages { + let serialized_message = VersionedMessage::serialize(&sent_message, &None).unwrap(); network2 .broadcast_message( - sent_message.clone(), + serialized_message.clone(), vec![pub_key_1].into_iter().collect::>(), - STATIC_VER_0_1, + BroadcastDelay::None, ) .await .expect("Failed to message node"); @@ -265,8 +272,9 @@ async fn memory_network_broadcast_queue() { .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); + let deserialized_message = VersionedMessage::deserialize(&recv_message, &None).unwrap(); assert!(recv_messages.is_empty()); - fake_message_eq(sent_message, recv_message); + fake_message_eq(sent_message, deserialized_message); } } @@ -277,7 +285,7 @@ async fn memory_network_broadcast_queue() { async fn memory_network_test_in_flight_message_count() { setup_logging(); - let group: Arc, ::SignatureKey>> = MasterMap::new(); + let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let pub_key_1 = pubkey(); let network1 = MemoryNetwork::new( @@ -298,44 +306,67 @@ async fn memory_network_test_in_flight_message_count() { let messages: Vec> = gen_messages(5, 100, pub_key_1); let broadcast_recipients = BTreeSet::from([pub_key_1, pub_key_2]); - assert_eq!(network1.in_flight_message_count(), Some(0)); - assert_eq!(network2.in_flight_message_count(), Some(0)); + assert_eq!( + TestableNetworkingImplementation::::in_flight_message_count(&network1), + Some(0) + ); + assert_eq!( + TestableNetworkingImplementation::::in_flight_message_count(&network2), + Some(0) + ); for (count, message) in messages.iter().enumerate() { + let serialized_message = VersionedMessage::serialize(message, &None).unwrap(); + network1 - .direct_message(message.clone(), pub_key_2, STATIC_VER_0_1) + .direct_message(serialized_message.clone(), pub_key_2) .await .unwrap(); // network 2 has received `count` broadcast messages and `count + 1` direct messages - assert_eq!(network2.in_flight_message_count(), Some(count + count + 1)); + assert_eq!( + TestableNetworkingImplementation::::in_flight_message_count(&network2), + Some(count + count + 1) + ); network2 - .broadcast_message( - message.clone(), - broadcast_recipients.clone(), - STATIC_VER_0_1, - ) + .broadcast_message(serialized_message.clone(), broadcast_recipients.clone(), BroadcastDelay::None) .await .unwrap(); // network 1 has received `count` broadcast messages - assert_eq!(network1.in_flight_message_count(), Some(count + 1)); + assert_eq!( + TestableNetworkingImplementation::::in_flight_message_count(&network1), + Some(count + 1) + ); // network 2 has received `count + 1` broadcast messages and `count + 1` direct messages - assert_eq!(network2.in_flight_message_count(), Some((count + 1) * 2)); + assert_eq!( + TestableNetworkingImplementation::::in_flight_message_count(&network2), + Some((count + 1) * 2) + ); } - while network1.in_flight_message_count().unwrap() > 0 { + while TestableNetworkingImplementation::::in_flight_message_count(&network1).unwrap() > 0 + { network1.recv_msgs().await.unwrap(); } - while network2.in_flight_message_count().unwrap() > messages.len() { + while TestableNetworkingImplementation::::in_flight_message_count(&network2).unwrap() + > messages.len() + { network2.recv_msgs().await.unwrap(); } - while network2.in_flight_message_count().unwrap() > 0 { + while TestableNetworkingImplementation::::in_flight_message_count(&network2).unwrap() > 0 + { network2.recv_msgs().await.unwrap(); } - assert_eq!(network1.in_flight_message_count(), Some(0)); - assert_eq!(network2.in_flight_message_count(), Some(0)); + assert_eq!( + TestableNetworkingImplementation::::in_flight_message_count(&network1), + Some(0) + ); + assert_eq!( + TestableNetworkingImplementation::::in_flight_message_count(&network2), + Some(0) + ); } diff --git a/types/src/constants.rs b/types/src/constants.rs index b939ffe608..36d4822c60 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -1,6 +1,6 @@ //! configurable constants for hotshot -use vbs::version::{StaticVersion, Version}; +use vbs::version::StaticVersion; /// the number of views to gather information for ahead of time pub const LOOK_AHEAD: u64 = 5; @@ -17,26 +17,15 @@ pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; /// the number of messages to send over the secondary network without delay before re-attempting the (presumed down) primary network pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 50; -/// CONSTANT for protocol major version -pub const VERSION_MAJ: u16 = 0; +/// Base protocol version, set to 0.1 +pub type Base = StaticVersion<0, 1>; +/// Upgraded protocol version, set to 0.2 +pub type Upgrade = StaticVersion<0, 2>; -/// CONSTANT for protocol major version -pub const VERSION_MIN: u16 = 1; - -/// Constant for protocol version 0.1. -pub const VERSION_0_1: Version = Version { - major: VERSION_MAJ, - minor: VERSION_MIN, -}; - -/// Constant for the base protocol version in this instance of HotShot. -pub const BASE_VERSION: Version = VERSION_0_1; - -/// Type for protocol static version 0.1. -pub type Version01 = StaticVersion; - -/// Constant for protocol static version 0.1. -pub const STATIC_VER_0_1: Version01 = StaticVersion {}; +/// Hash for the upgrade from version 0.1 to version 0.2. +pub const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, +]; /// Default channel size for consensus event sharing pub const EVENT_CHANNEL_SIZE: usize = 100_000; diff --git a/types/src/error.rs b/types/src/error.rs index 9a0c0365f7..81426e37c5 100644 --- a/types/src/error.rs +++ b/types/src/error.rs @@ -82,6 +82,8 @@ pub enum HotShotError { /// source of error context: String, }, + /// Failed to serialize message + FailedToSerialize, /// Internal value used to drive the state machine Continue, } diff --git a/types/src/lib.rs b/types/src/lib.rs index 31f9c45f76..c1fa051357 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -201,4 +201,12 @@ pub struct HotShotConfig { pub data_request_delay: Duration, /// Builder API base URL pub builder_url: Url, + /// View to start proposing an upgrade + pub start_proposing_view: u64, + /// View to stop proposing an upgrade. To prevent proposing an upgrade, set stop_proposing_view <= start_proposing_view. + pub stop_proposing_view: u64, + /// View to start voting on an upgrade + pub start_voting_view: u64, + /// View to stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_view <= start_voting_view. + pub stop_voting_view: u64, } diff --git a/types/src/message.rs b/types/src/message.rs index 85c71f57cd..64185a05d4 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -5,17 +5,22 @@ use std::{fmt, fmt::Debug, marker::PhantomData}; -use anyhow::{ensure, Result}; +use anyhow::{bail, ensure, Context, Result}; use cdn_proto::mnemonic; use committable::Committable; use derivative::Derivative; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use vbs::{ + version::{StaticVersionType, Version}, + BinarySerializer, Serializer, +}; use crate::{ + constants::{Base, Upgrade}, data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, simple_certificate::{ - DaCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, - ViewSyncPreCommitCertificate2, + DaCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, + ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ DaVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, @@ -23,7 +28,7 @@ use crate::{ }, traits::{ election::Membership, - network::{DataRequest, NetworkMsg, ResponseMessage, ViewMessage}, + network::{DataRequest, ResponseMessage, ViewMessage}, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, @@ -41,6 +46,103 @@ pub struct Message { pub kind: MessageKind, } +/// Trait for messages that have a versioned serialization. +pub trait VersionedMessage<'a, TYPES> +where + TYPES: NodeType, + Self: Serialize + Deserialize<'a> + HasViewNumber + Sized, +{ + /// Serialize a message with a version number, using `message.view_number()` and an optional decided upgrade certificate to determine the message's version. + /// + /// # Errors + /// + /// Errors if serialization fails. + fn serialize( + &self, + upgrade_certificate: &Option>, + ) -> Result> { + let view = self.view_number(); + + let version = match upgrade_certificate { + Some(ref cert) => { + if view >= cert.data.new_version_first_view + && cert.data.new_version == Upgrade::VERSION + { + Upgrade::VERSION + } else if view >= cert.data.new_version_first_view + && cert.data.new_version != Upgrade::VERSION + { + bail!("The network has upgraded to a new version that we do not support!"); + } else { + Base::VERSION + } + } + None => Base::VERSION, + }; + + let serialized_message = match version { + Base::VERSION => Serializer::::serialize(&self), + Upgrade::VERSION => Serializer::::serialize(&self), + _ => { + bail!("Attempted to serialize with an incompatible version. This should be impossible."); + } + }; + + serialized_message.context("Failed to serialize message!") + } + + /// Deserialize a message with a version number, using `message.view_number()` and an optional decided upgrade certificate to determine the message's version. This function will fail on improperly versioned messages. + /// + /// # Errors + /// + /// Errors if deserialization fails. + fn deserialize( + message: &'a [u8], + upgrade_certificate: &Option>, + ) -> Result { + let version = Version::deserialize(message) + .context("Failed to read message version!")? + .0; + + let deserialized_message: Self = match version { + Base::VERSION => Serializer::::deserialize(message), + Upgrade::VERSION => Serializer::::deserialize(message), + _ => { + bail!("Cannot deserialize message!"); + } + } + .context("Failed to deserialize message!")?; + + let view = deserialized_message.view_number(); + + let expected_version = match upgrade_certificate { + Some(ref cert) => { + if view >= cert.data.new_version_first_view + && cert.data.new_version == Upgrade::VERSION + { + Upgrade::VERSION + } else if view >= cert.data.new_version_first_view + && cert.data.new_version != Upgrade::VERSION + { + bail!("The network has upgraded to a new version that we do not support!"); + } else { + Base::VERSION + } + } + None => Base::VERSION, + }; + + ensure!( + version == expected_version, + "Message has invalid version number for its view. Expected: {expected_version}, Actual: {version}, View: {view:?}" + ); + + Ok(deserialized_message) + } +} + +impl<'a, TYPES> VersionedMessage<'a, TYPES> for Message where TYPES: NodeType {} + impl fmt::Debug for Message { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Message") @@ -50,16 +152,11 @@ impl fmt::Debug for Message { } } -impl NetworkMsg for Message {} - -impl ViewMessage for Message { +impl HasViewNumber for Message { /// get the view number out of a message fn view_number(&self) -> TYPES::Time { self.kind.view_number() } - fn purpose(&self) -> MessagePurpose { - self.kind.purpose() - } } /// A wrapper type for implementing `PassType` on a vector of `Message`. diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 4d9f2837fe..b186552e4a 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -34,7 +34,6 @@ use rand::{ }; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use vbs::version::StaticVersionType; use super::{node_implementation::NodeType, signature_key::SignatureKey}; use crate::{ @@ -174,9 +173,13 @@ pub trait NetworkMsg: { } +impl NetworkMsg for T where + T: Serialize + for<'a> Deserialize<'a> + Clone + Sync + Send + Debug + 'static +{ +} + /// Trait that bundles what we need from a request ID pub trait Id: Eq + PartialEq + Hash {} -impl NetworkMsg for Vec {} /// a message pub trait ViewMessage { @@ -188,7 +191,10 @@ pub trait ViewMessage { } /// Wraps a oneshot channel for responding to requests -pub struct ResponseChannel(pub oneshot::Sender); +pub struct ResponseChannel { + /// underlying sender for this channel + pub sender: oneshot::Sender, +} /// A request for some data that the consensus layer is asking for. #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] @@ -229,14 +235,23 @@ pub enum ResponseMessage { Denied, } +#[derive(Debug, Clone, PartialEq, Eq)] +/// When a message should be broadcast to the network. +/// +/// Network implementations may or may not respect this, at their discretion. +pub enum BroadcastDelay { + /// Broadcast the message immediately + None, + /// Delay the broadcast to a given view. + View(u64), +} + #[async_trait] /// represents a networking implmentration /// exposes low level API for interacting with a network /// intended to be implemented for libp2p, the centralized server, /// and memory network -pub trait ConnectedNetwork: - Clone + Send + Sync + 'static -{ +pub trait ConnectedNetwork: Clone + Send + Sync + 'static { /// Pauses the underlying network fn pause(&self); @@ -254,32 +269,31 @@ pub trait ConnectedNetwork: /// broadcast message to some subset of nodes /// blocking - async fn broadcast_message( + async fn broadcast_message( &self, - message: M, + message: Vec, recipients: BTreeSet, - bind_version: VER, + broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError>; /// broadcast a message only to a DA committee /// blocking - async fn da_broadcast_message( + async fn da_broadcast_message( &self, - message: M, + message: Vec, recipients: BTreeSet, - bind_version: VER, + broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError>; /// send messages with vid shares to its recipients /// blocking - async fn vid_broadcast_message( + async fn vid_broadcast_message( &self, - messages: HashMap, - bind_version: VER, + messages: HashMap>, ) -> Result<(), NetworkError> { - let future_results = messages.into_iter().map(|(recipient_key, message)| { - self.direct_message(message, recipient_key, bind_version) - }); + let future_results = messages + .into_iter() + .map(|(recipient_key, message)| self.direct_message(message, recipient_key)); let results = join_all(future_results).await; let errors: Vec<_> = results @@ -299,27 +313,21 @@ pub trait ConnectedNetwork: /// Sends a direct message to a specific node /// blocking - async fn direct_message( - &self, - message: M, - recipient: K, - bind_version: VER, - ) -> Result<(), NetworkError>; + async fn direct_message(&self, message: Vec, recipient: K) -> Result<(), NetworkError>; /// Receive one or many messages from the underlying network. /// /// # Errors /// If there is a network-related failure. - async fn recv_msgs(&self) -> Result, NetworkError>; + async fn recv_msgs(&self) -> Result>, NetworkError>; /// Ask request the network for some data. Returns the request ID for that data, /// The ID returned can be used for cancelling the request - async fn request_data( + async fn request_data( &self, - _request: M, + _request: Vec, _recipient: &K, - _bind_version: VER, - ) -> Result, NetworkError> { + ) -> Result, NetworkError> { Err(NetworkError::UnimplementedFeature) } @@ -329,10 +337,9 @@ pub trait ConnectedNetwork: /// with a return channel to send the response back to. /// /// Returns `None`` if network does not support handling requests - async fn spawn_request_receiver_task( + async fn spawn_request_receiver_task( &self, - _bind_version: VER, - ) -> Option)>> { + ) -> Option, ResponseChannel>)>> { None } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index f4f1de3599..d6413a2fe1 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -27,7 +27,6 @@ use super::{ }; use crate::{ data::{Leaf, TestableLeaf}, - message::Message, traits::{ election::Membership, signature_key::SignatureKey, states::InstanceState, BlockPayload, }, @@ -45,10 +44,10 @@ pub trait NodeImplementation: Send + Sync + Clone + Eq + Hash + 'static + Serialize + for<'de> Deserialize<'de> { /// Network for all nodes - type QuorumNetwork: ConnectedNetwork, TYPES::SignatureKey>; + type QuorumNetwork: ConnectedNetwork; /// Network for those in the DA committee - type DaNetwork: ConnectedNetwork, TYPES::SignatureKey>; + type DaNetwork: ConnectedNetwork; /// Storage for DA layer interactions type Storage: Storage; From 6b230508ae3af038f85507db3daa0131c45cda38 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 12 Jun 2024 20:52:13 +0200 Subject: [PATCH 1078/1393] Multiple builders (#3203) * Multiple builders support --- examples/Cargo.toml | 1 + examples/infra/mod.rs | 68 +- hotshot/src/tasks/task_state.rs | 9 +- orchestrator/Cargo.toml | 1 + orchestrator/src/config.rs | 13 +- task-impls/src/consensus/helpers.rs | 24 +- task-impls/src/quorum_vote/handlers.rs | 10 +- task-impls/src/transactions.rs | 280 +++++--- testing/Cargo.toml | 1 + testing/src/block_builder.rs | 630 ------------------ testing/src/block_builder/mod.rs | 178 +++++ testing/src/block_builder/random.rs | 324 +++++++++ testing/src/block_builder/simple.rs | 366 ++++++++++ testing/src/overall_safety_task.rs | 15 +- testing/src/test_builder.rs | 42 +- testing/src/test_runner.rs | 35 +- testing/tests/tests_1/block_builder.rs | 48 +- .../tests_3/test_with_builder_failures.rs | 69 ++ types/Cargo.toml | 1 + types/src/lib.rs | 3 +- 20 files changed, 1314 insertions(+), 804 deletions(-) delete mode 100644 testing/src/block_builder.rs create mode 100644 testing/src/block_builder/mod.rs create mode 100644 testing/src/block_builder/random.rs create mode 100644 testing/src/block_builder/simple.rs create mode 100644 testing/tests/tests_3/test_with_builder_failures.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 7b2b7b547d..3bb65b3f99 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -110,6 +110,7 @@ chrono = { workspace = true } vbs = { workspace = true } sha2.workspace = true local-ip-address = "0.6" +vec1 = { workspace = true } tracing = { workspace = true } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index cf6586cdc9..fff98813ee 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -1,5 +1,6 @@ #![allow(clippy::panic)] use std::{ + collections::HashMap, fmt::Debug, fs, marker::PhantomData, @@ -322,6 +323,39 @@ fn calculate_num_tx_per_round( ) } +/// Helper function to generate transactions a given node should send +fn generate_transactions>( + node_index: u64, + rounds: usize, + transactions_to_send_per_round: usize, + transaction_size: usize, +) -> Vec +where + ::ValidatedState: TestableState, + ::BlockPayload: TestableBlock, +{ + let mut txn_rng = StdRng::seed_from_u64(node_index); + let mut transactions = Vec::new(); + + for round in 0..rounds { + for _ in 0..transactions_to_send_per_round { + let txn = ::create_random_transaction( + None, + &mut txn_rng, + transaction_size as u64, + ); + + // prepend destined view number to transaction + let view_execute_number: u64 = round as u64 + 4; + let mut bytes = txn.into_bytes(); + bytes[0..8].copy_from_slice(&view_execute_number.to_be_bytes()); + + transactions.push(TestTransaction::new(bytes)); + } + } + transactions +} + /// Defines the behavior of a "run" of the network with a given configuration #[async_trait] pub trait RunDa< @@ -923,24 +957,26 @@ pub async fn main_entry_point< >::start( run_config.config.num_nodes_with_stake.into(), run_config.random_builder.clone().unwrap_or_default(), + HashMap::new(), ) .await; - run_config.config.builder_url = builder_url; + run_config.config.builder_urls = vec1::vec1![builder_url]; - builder_task + Some(builder_task) } BuilderType::Simple => { let (builder_task, builder_url) = >::start( run_config.config.num_nodes_with_stake.into(), SimpleBuilderConfig::default(), + HashMap::new(), ) .await; - run_config.config.builder_url = builder_url; + run_config.config.builder_urls = vec1::vec1![builder_url]; - builder_task + Some(builder_task) } }; @@ -965,30 +1001,18 @@ pub async fn main_entry_point< .. } = run_config; - let mut txn_rng = StdRng::seed_from_u64(node_index); let transactions_to_send_per_round = calculate_num_tx_per_round( node_index, num_nodes_with_stake.get(), transactions_per_round, ); - let mut transactions = Vec::new(); - - for round in 0..rounds { - for _ in 0..transactions_to_send_per_round { - let txn = ::create_random_transaction( - None, - &mut txn_rng, - transaction_size as u64, - ); - - // prepend destined view number to transaction - let view_execute_number: u64 = round as u64 + 4; - let mut bytes = txn.into_bytes(); - bytes[0..8].copy_from_slice(&view_execute_number.to_be_bytes()); + let mut transactions: Vec = generate_transactions::( + node_index, + rounds, + transactions_to_send_per_round, + transaction_size, + ); - transactions.push(TestTransaction::new(bytes)); - } - } if let NetworkConfigSource::Orchestrator = source { info!("Waiting for the start command from orchestrator"); orchestrator_client diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index f0b481a33e..f808593b09 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -180,7 +180,14 @@ impl, Ver: StaticVersionType> private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), id: handle.hotshot.id, - builder_client: BuilderClient::new(handle.hotshot.config.builder_url.clone()), + builder_clients: handle + .hotshot + .config + .builder_urls + .iter() + .cloned() + .map(BuilderClient::new) + .collect(), decided_upgrade_certificate: None, } } diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 202a70241e..412a847940 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -25,6 +25,7 @@ thiserror = "1" serde-inline-default = "0.1" csv = "1" vbs = { workspace = true } +vec1 = { workspace = true } multiaddr = "0.18" anyhow.workspace = true bincode.workspace = true diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 37e6fff2e1..7fd930cf5d 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -18,6 +18,7 @@ use surf_disco::Url; use thiserror::Error; use toml; use tracing::{error, info}; +use vec1::Vec1; use crate::client::OrchestratorClient; @@ -544,8 +545,8 @@ impl From> for NetworkConfig { } /// Default builder URL, used as placeholder -fn default_builder_url() -> Url { - Url::parse("http://localhost:3311").unwrap() +fn default_builder_urls() -> Vec1 { + vec1::vec1![Url::parse("http://0.0.0.0:3311").unwrap()] } /// Holds configuration for a `HotShot` @@ -594,8 +595,8 @@ pub struct HotShotConfigFile { /// Time to wait until we request data associated with a proposal pub data_request_delay: Duration, /// Builder API base URL - #[serde(default = "default_builder_url")] - pub builder_url: Url, + #[serde(default = "default_builder_urls")] + pub builder_urls: Vec1, /// Upgrade config pub upgrade: UpgradeConfig, } @@ -703,7 +704,7 @@ impl From> for HotShotConfig { num_bootstrap: val.num_bootstrap, builder_timeout: val.builder_timeout, data_request_delay: val.data_request_delay, - builder_url: val.builder_url, + builder_urls: val.builder_urls, start_proposing_view: val.upgrade.start_proposing_view, stop_proposing_view: val.upgrade.stop_proposing_view, start_voting_view: val.upgrade.start_voting_view, @@ -776,7 +777,7 @@ impl Default for HotShotConfigFile { num_bootstrap: 5, builder_timeout: Duration::from_secs(10), data_request_delay: Duration::from_millis(200), - builder_url: default_builder_url(), + builder_urls: default_builder_urls(), upgrade: UpgradeConfig::default(), } } diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 97cc75b2aa..1922c2ad46 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -776,7 +776,7 @@ pub struct LeafChainTraversalOutcome { pub leaves_decided: Vec>, /// The transactions in the block payload for each leaf. - pub included_txns: HashSet::Transaction>>, + pub included_txns: Option::Transaction>>>, /// The most recent upgrade certificate from one of the leaves. pub decided_upgrade_cert: Option>, @@ -794,7 +794,7 @@ impl Default for LeafChainTraversalOutcome { new_decide_qc: None, leaf_views: Vec::new(), leaves_decided: Vec::new(), - included_txns: HashSet::new(), + included_txns: None, decided_upgrade_cert: None, } } @@ -927,9 +927,12 @@ pub async fn decide_from_proposal( )); res.leaves_decided.push(leaf.clone()); if let Some(ref payload) = leaf.block_payload() { - for txn in payload.transaction_commitments(leaf.block_header().metadata()) { - res.included_txns.insert(txn); - } + res.included_txns = Some( + payload + .transaction_commitments(leaf.block_header().metadata()) + .into_iter() + .collect::>(), + ); } } true @@ -974,12 +977,6 @@ pub async fn handle_quorum_proposal_validated = if res.new_decided_view_number.is_some() { - res.included_txns - } else { - HashSet::new() - }; - let mut consensus = task_state.consensus.write().await; if let Some(new_locked_view) = res.new_locked_view_number { if let Err(e) = consensus.update_locked_view(new_locked_view) { @@ -1019,13 +1016,14 @@ pub async fn handle_quorum_proposal_validated { /// Initial API response @@ -74,7 +89,7 @@ pub struct TransactionTaskState< pub membership: Arc, /// Builder API client - pub builder_client: BuilderClient, + pub builder_clients: Vec>, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -305,100 +320,205 @@ impl, Ver: StaticVersionType> None } - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "block_from_builder", level = "error")] - async fn block_from_builder( + /// Query the builders for available blocks. Queries only fraction of the builders + /// based on the response time. + async fn get_available_blocks( &self, parent_comm: VidCommitment, view_number: TYPES::Time, parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> anyhow::Result> { - let available_blocks = self - .builder_client - .available_blocks( - parent_comm, - view_number.u64(), - self.public_key.clone(), - parent_comm_sig, - ) - .await - .context("getting available blocks")?; - tracing::debug!("Got available blocks: {available_blocks:?}"); + ) -> Vec<(AvailableBlockInfo, usize)> { + // Create a collection of futures that call available_blocks endpoint for every builder + let tasks = self + .builder_clients + .iter() + .enumerate() + .map(|(builder_idx, client)| async move { + client + .available_blocks( + parent_comm, + view_number.u64(), + self.public_key.clone(), + parent_comm_sig, + ) + .await + .map(move |blocks| { + // Add index into `self.builder_clients` for each block so that we know + // where to claim it from later + blocks + .into_iter() + .map(move |block_info| (block_info, builder_idx)) + }) + }) + .collect::>(); + + // A vector of resolved builder responses + let mut results = Vec::with_capacity(self.builder_clients.len()); + + // Instant we start querying builders for available blocks + let query_start = Instant::now(); + + // First we complete the query to the fastest fraction of the builders + let threshold = (self.builder_clients.len() * BUILDER_MAIN_BATCH_THRESHOLD_DIVIDEND) + .div_ceil(BUILDER_MAIN_BATCH_THRESHOLD_DIVISOR); + let mut tasks = tasks.take(threshold); + while let Some(result) = tasks.next().await { + results.push(result); + if query_start.elapsed() > BUILDER_MAIN_BATCH_CUTOFF { + break; + } + } - let block_info = available_blocks + // Then we query the rest, alotting additional `elapsed * BUILDER_ADDITIONAL_TIME_MULTIPLIER` + // for them to respond. There's a fixed floor of `BUILDER_MINIMUM_QUERY_TIME` for both + // phases + let timeout = async_sleep(std::cmp::max( + query_start + .elapsed() + .mul_f32(BUILDER_ADDITIONAL_TIME_MULTIPLIER), + BUILDER_MINIMUM_QUERY_TIME - query_start.elapsed(), + )); + futures::pin_mut!(timeout); // Stream::next requires Self::Unpin + let mut tasks = tasks.into_inner().take_until(timeout); + while let Some(result) = tasks.next().await { + results.push(result); + } + + results .into_iter() - .max_by(|l, r| { - // We want the block with the highest fee per byte of data we're going to have to - // process, thus our comparision function is: - // (l.offered_fee / l.block_size) < (r.offered_fee / r.block_size) - // To avoid floating point math (which doesn't even have an `Ord` impl) we multiply - // through by the denominators to get - // l.offered_fee * r.block_size < r.offered_fee * l.block_size - // We cast up to u128 to avoid overflow. - (u128::from(l.offered_fee) * u128::from(r.block_size)) - .cmp(&(u128::from(r.offered_fee) * u128::from(l.block_size))) + .filter_map(|result| match result { + Ok(value) => Some(value), + Err(err) => { + tracing::warn!(%err, "Error getting available blocks"); + None + } }) - .context("no available blocks")?; - tracing::debug!("Selected block: {block_info:?}"); - - // Verify signature over chosen block. - if !block_info.sender.validate_block_info_signature( - &block_info.signature, - block_info.block_size, - block_info.offered_fee, - &block_info.block_hash, - ) { - bail!("Failed to verify available block info response message signature"); + .flatten() + .collect::>() + } + + /// Get a block from builder. + /// Queries the sufficiently fast builders for available blocks and chooses the one with the + /// best fee/byte ratio, re-trying with the next best one in case of failure. + /// + /// # Errors + /// If none of the builder reports any available blocks or claiming block fails for all of the + /// builders. + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "block_from_builder", level = "error")] + async fn block_from_builder( + &self, + parent_comm: VidCommitment, + view_number: TYPES::Time, + parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> anyhow::Result> { + let mut available_blocks = self + .get_available_blocks(parent_comm, view_number, parent_comm_sig) + .await; + + available_blocks.sort_by(|(l, _), (r, _)| { + // We want the block with the highest fee per byte of data we're going to have to + // process, thus our comparision function is: + // (l.offered_fee / l.block_size) < (r.offered_fee / r.block_size) + // To avoid floating point math (which doesn't even have an `Ord` impl) we multiply + // through by the denominators to get + // l.offered_fee * r.block_size < r.offered_fee * l.block_size + // We cast up to u128 to avoid overflow. + (u128::from(l.offered_fee) * u128::from(r.block_size)) + .cmp(&(u128::from(r.offered_fee) * u128::from(l.block_size))) + }); + + if available_blocks.is_empty() { + bail!("No available blocks"); } - let request_signature = <::SignatureKey as SignatureKey>::sign( - &self.private_key, - block_info.block_hash.as_ref(), - ) - .context("signing block hash")?; + for (block_info, builder_idx) in available_blocks { + let client = &self.builder_clients[builder_idx]; + + // Verify signature over chosen block. + if !block_info.sender.validate_block_info_signature( + &block_info.signature, + block_info.block_size, + block_info.offered_fee, + &block_info.block_hash, + ) { + tracing::warn!("Failed to verify available block info response message signature"); + continue; + } - let (block, header_input) = futures::join! { - self.builder_client.claim_block(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature), - self.builder_client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) - }; + let request_signature = match <::SignatureKey as SignatureKey>::sign( + &self.private_key, + block_info.block_hash.as_ref(), + ) { + Ok(request_signature) => request_signature, + Err(err) => { + tracing::warn!(%err, "Failed to sign block hash"); + continue; + } + }; - let block_data = block.context("claiming block data")?; - - // verify the signature over the message, construct the builder commitment - let builder_commitment = block_data - .block_payload - .builder_commitment(&block_data.metadata); - if !block_data - .sender - .validate_builder_signature(&block_data.signature, builder_commitment.as_ref()) - { - bail!("Failed to verify available block data response message signature"); - } + let (block, header_input) = futures::join! { + client.claim_block(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature), + client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) + }; + + let block_data = match block { + Ok(block_data) => block_data, + Err(err) => { + tracing::warn!(%err, "Error claiming block data"); + continue; + } + }; + + let header_input = match header_input { + Ok(block_data) => block_data, + Err(err) => { + tracing::warn!(%err, "Error claiming header input"); + continue; + } + }; + + // verify the signature over the message, construct the builder commitment + let builder_commitment = block_data + .block_payload + .builder_commitment(&block_data.metadata); + if !block_data + .sender + .validate_builder_signature(&block_data.signature, builder_commitment.as_ref()) + { + tracing::warn!("Failed to verify available block data response message signature"); + continue; + } - let header_input = header_input.context("claiming header input")?; + // first verify the message signature and later verify the fee_signature + if !header_input.sender.validate_builder_signature( + &header_input.message_signature, + header_input.vid_commitment.as_ref(), + ) { + tracing::warn!( + "Failed to verify available block header input data response message signature" + ); + continue; + } - // first verify the message signature and later verify the fee_signature - if !header_input.sender.validate_builder_signature( - &header_input.message_signature, - header_input.vid_commitment.as_ref(), - ) { - bail!("Failed to verify available block header input data response message signature"); - } + // verify the signature over the message + if !header_input.sender.validate_fee_signature( + &header_input.fee_signature, + block_info.offered_fee, + &block_data.metadata, + &header_input.vid_commitment, + ) { + tracing::warn!("Failed to verify fee signature"); + continue; + } - // verify the signature over the message - if !header_input.sender.validate_fee_signature( - &header_input.fee_signature, - block_info.offered_fee, - &block_data.metadata, - &header_input.vid_commitment, - ) { - bail!("Failed to verify fee signature"); + return Ok(BuilderResponses { + blocks_initial_info: block_info, + block_data, + block_header: header_input, + }); } - Ok(BuilderResponses { - blocks_initial_info: block_info, - block_data, - block_header: header_input, - }) + bail!("Couldn't claim a block from any of the builders"); } } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index fcb390e46d..090c9595fb 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -46,6 +46,7 @@ tracing = { workspace = true } vbs = { workspace = true } lru = { workspace = true } tagged-base64.workspace = true +vec1 = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/testing/src/block_builder.rs b/testing/src/block_builder.rs deleted file mode 100644 index 48794508bb..0000000000 --- a/testing/src/block_builder.rs +++ /dev/null @@ -1,630 +0,0 @@ -use std::{ - collections::HashMap, - num::NonZeroUsize, - ops::Deref, - sync::Arc, - time::{Duration, Instant}, -}; - -use async_compatibility_layer::art::{async_sleep, async_spawn}; -use async_lock::RwLock; -use async_trait::async_trait; -use committable::{Commitment, Committable}; -use futures::{future::BoxFuture, Stream, StreamExt}; -use hotshot::{ - traits::BlockPayload, - types::{Event, EventType, SignatureKey}, -}; -use hotshot_builder_api::{ - block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, - builder::{BuildError, Error, Options}, - data_source::BuilderDataSource, -}; -use hotshot_example_types::block_types::TestTransaction; -use hotshot_orchestrator::config::RandomBuilderConfig; -use hotshot_types::{ - constants::Base, - traits::{ - block_contents::{precompute_vid_commitment, BlockHeader, EncodeBytes}, - node_implementation::NodeType, - signature_key::BuilderSignatureKey, - }, - utils::BuilderCommitment, - vid::VidCommitment, -}; -use lru::LruCache; -use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; -use tide_disco::{method::ReadState, App, Url}; -use vbs::version::StaticVersionType; - -#[async_trait] -pub trait TestBuilderImplementation -where - ::InstanceState: Default, -{ - type Config: Default; - - async fn start( - num_storage_nodes: usize, - options: Self::Config, - ) -> (Option>>, Url); -} - -pub struct RandomBuilderImplementation; - -#[async_trait] -impl TestBuilderImplementation for RandomBuilderImplementation -where - TYPES: NodeType, - ::InstanceState: Default, -{ - type Config = RandomBuilderConfig; - - async fn start( - num_storage_nodes: usize, - config: RandomBuilderConfig, - ) -> (Option>>, Url) { - let port = portpicker::pick_unused_port().expect("No free ports"); - let url = Url::parse(&format!("http://localhost:{port}")).expect("Valid URL"); - run_random_builder::(url.clone(), num_storage_nodes, config); - (None, url) - } -} - -/// Configuration for `SimpleBuilder` -pub struct SimpleBuilderConfig { - pub port: u16, -} - -impl Default for SimpleBuilderConfig { - fn default() -> Self { - Self { - port: portpicker::pick_unused_port().expect("No free ports"), - } - } -} - -pub struct SimpleBuilderImplementation; - -#[async_trait] -impl TestBuilderImplementation for SimpleBuilderImplementation -where - ::InstanceState: Default, -{ - type Config = SimpleBuilderConfig; - - async fn start( - num_storage_nodes: usize, - config: Self::Config, - ) -> (Option>>, Url) { - let url = Url::parse(&format!("http://0.0.0.0:{0}", config.port)).expect("Valid URL"); - let (source, task) = make_simple_builder(num_storage_nodes).await; - - let builder_api = - hotshot_builder_api::builder::define_api::, TYPES, Base>( - &Options::default(), - ) - .expect("Failed to construct the builder API"); - let mut app: App, hotshot_builder_api::builder::Error> = - App::with_state(source); - app.register_module("block_info", builder_api) - .expect("Failed to register the builder API"); - - async_spawn(app.serve(url.clone(), Base::instance())); - (Some(Box::new(task)), url) - } -} - -/// Entry for a built block -struct BlockEntry { - metadata: AvailableBlockInfo, - payload: Option>, - header_input: Option>, -} - -/// A mock implementation of the builder data source. -/// Builds random blocks, doesn't track HotShot state at all. -/// Evicts old available blocks if HotShot doesn't keep up. -#[derive(Clone, Debug)] -pub struct RandomBuilderSource { - /// Built blocks - blocks: Arc< - RwLock< - // Isn't strictly speaking used as a cache, - // just as a HashMap that evicts old blocks - LruCache>, - >, - >, - pub_key: TYPES::BuilderSignatureKey, - priv_key: ::BuilderPrivateKey, -} - -impl RandomBuilderSource -where - TYPES: NodeType, -{ - /// Create new [`RandomBuilderSource`] - #[must_use] - #[allow(clippy::missing_panics_doc)] // ony panics if 256 == 0 - pub fn new( - pub_key: TYPES::BuilderSignatureKey, - priv_key: ::BuilderPrivateKey, - ) -> Self { - Self { - blocks: Arc::new(RwLock::new(LruCache::new(NonZeroUsize::new(256).unwrap()))), - priv_key, - pub_key, - } - } - - /// Spawn a task building blocks, configured with given options - #[allow(clippy::missing_panics_doc)] // ony panics on 16-bit platforms - pub fn run(&self, num_storage_nodes: usize, options: RandomBuilderConfig) - where - ::InstanceState: Default, - { - let blocks = self.blocks.clone(); - let (priv_key, pub_key) = (self.priv_key.clone(), self.pub_key.clone()); - async_spawn(async move { - let mut rng = SmallRng::from_entropy(); - let time_per_block = Duration::from_secs(1) / options.blocks_per_second; - loop { - let start = std::time::Instant::now(); - let transactions: Vec = (0..options.txn_in_block) - .map(|_| { - let mut bytes = vec![ - 0; - rng.gen_range(options.txn_size.clone()) - .try_into() - .expect("We are NOT running on a 16-bit platform") - ]; - rng.fill_bytes(&mut bytes); - TestTransaction::new(bytes) - }) - .collect(); - - let (metadata, payload, header_input) = build_block( - transactions, - num_storage_nodes, - pub_key.clone(), - priv_key.clone(), - ) - .await; - - if let Some((hash, _)) = blocks.write().await.push( - metadata.block_hash.clone(), - BlockEntry { - metadata, - payload: Some(payload), - header_input: Some(header_input), - }, - ) { - tracing::warn!("Block {} evicted", hash); - }; - async_sleep(time_per_block.saturating_sub(start.elapsed())).await; - } - }); - } -} - -#[async_trait] -impl ReadState for RandomBuilderSource { - type State = Self; - - async fn read( - &self, - op: impl Send + for<'a> FnOnce(&'a Self::State) -> BoxFuture<'a, T> + 'async_trait, - ) -> T { - op(self).await - } -} - -#[async_trait] -impl BuilderDataSource for RandomBuilderSource { - async fn available_blocks( - &self, - _for_parent: &VidCommitment, - _view_number: u64, - _sender: TYPES::SignatureKey, - _signature: &::PureAssembledSignatureType, - ) -> Result>, BuildError> { - Ok(self - .blocks - .deref() - .read() - .await - .iter() - .map(|(_, BlockEntry { metadata, .. })| metadata.clone()) - .collect()) - } - - async fn claim_block( - &self, - block_hash: &BuilderCommitment, - _view_number: u64, - _sender: TYPES::SignatureKey, - _signature: &::PureAssembledSignatureType, - ) -> Result, BuildError> { - let mut blocks = self.blocks.write().await; - let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; - let payload = entry.payload.take().ok_or(BuildError::Missing)?; - // Check if header input is claimed as well, if yes, then evict block - if entry.header_input.is_none() { - blocks.pop(block_hash); - }; - Ok(payload) - } - - async fn claim_block_header_input( - &self, - block_hash: &BuilderCommitment, - _view_number: u64, - _sender: TYPES::SignatureKey, - _signature: &::PureAssembledSignatureType, - ) -> Result, BuildError> { - let mut blocks = self.blocks.write().await; - let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; - let header_input = entry.header_input.take().ok_or(BuildError::Missing)?; - // Check if payload is claimed as well, if yes, then evict block - if entry.payload.is_none() { - blocks.pop(block_hash); - }; - Ok(header_input) - } - - async fn builder_address(&self) -> Result { - Ok(self.pub_key.clone()) - } -} - -/// Construct a tide disco app that mocks the builder API. -/// -/// # Panics -/// If constructing and launching the builder fails for any reason -pub fn run_random_builder(url: Url, num_storage_nodes: usize, options: RandomBuilderConfig) -where - TYPES: NodeType, - ::InstanceState: Default, -{ - let (pub_key, priv_key) = TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); - let source = RandomBuilderSource::new(pub_key, priv_key); - source.run(num_storage_nodes, options); - - let builder_api = - hotshot_builder_api::builder::define_api::, TYPES, Base>( - &Options::default(), - ) - .expect("Failed to construct the builder API"); - let mut app: App, Error> = App::with_state(source); - app.register_module::("block_info", builder_api) - .expect("Failed to register the builder API"); - - async_spawn(app.serve(url, Base::instance())); -} - -#[derive(Debug, Clone)] -struct SubmittedTransaction { - claimed: Option, - transaction: TYPES::Transaction, -} - -pub struct SimpleBuilderSource { - pub_key: TYPES::BuilderSignatureKey, - priv_key: ::BuilderPrivateKey, - num_storage_nodes: usize, - #[allow(clippy::type_complexity)] - transactions: Arc, SubmittedTransaction>>>, - blocks: Arc>>>, -} - -#[async_trait] -impl ReadState for SimpleBuilderSource { - type State = Self; - - async fn read( - &self, - op: impl Send + for<'a> FnOnce(&'a Self::State) -> BoxFuture<'a, T> + 'async_trait, - ) -> T { - op(self).await - } -} - -#[async_trait] -impl BuilderDataSource for SimpleBuilderSource -where - ::InstanceState: Default, -{ - async fn available_blocks( - &self, - _for_parent: &VidCommitment, - _view_number: u64, - _sender: TYPES::SignatureKey, - _signature: &::PureAssembledSignatureType, - ) -> Result>, BuildError> { - let transactions = self - .transactions - .read(|txns| { - Box::pin(async { - txns.values() - .filter(|txn| { - // We want transactions that are either unclaimed, or claimed long ago - // and thus probably not included, or they would've been decided on - // already and removed from the queue - txn.claimed - .map(|claim_time| claim_time.elapsed() > Duration::from_secs(30)) - .unwrap_or(true) - }) - .cloned() - .map(|txn| txn.transaction) - .collect::>() - }) - }) - .await; - - if transactions.is_empty() { - // We don't want to return an empty block, as we will end up driving consensus to - // produce empty blocks extremely quickly. Instead, we return no blocks, so that - // consensus will keep asking for blocks until either we have something non-trivial to - // propose, or a timeout, in which case consensus will finally propose an empty block - // anyways. - return Ok(vec![]); - } - - let (metadata, payload, header_input) = build_block( - transactions, - self.num_storage_nodes, - self.pub_key.clone(), - self.priv_key.clone(), - ) - .await; - - self.blocks.write().await.insert( - metadata.block_hash.clone(), - BlockEntry { - metadata: metadata.clone(), - payload: Some(payload), - header_input: Some(header_input), - }, - ); - - Ok(vec![metadata]) - } - - async fn claim_block( - &self, - block_hash: &BuilderCommitment, - _view_number: u64, - _sender: TYPES::SignatureKey, - _signature: &::PureAssembledSignatureType, - ) -> Result, BuildError> { - let payload = { - let mut blocks = self.blocks.write().await; - let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; - entry.payload.take().ok_or(BuildError::Missing)? - }; - - let now = Instant::now(); - - let claimed_transactions = payload - .block_payload - .transaction_commitments(&payload.metadata); - - let mut transactions = self.transactions.write().await; - for txn_hash in claimed_transactions { - if let Some(txn) = transactions.get_mut(&txn_hash) { - txn.claimed = Some(now); - } - } - - Ok(payload) - } - - async fn claim_block_header_input( - &self, - block_hash: &BuilderCommitment, - _view_number: u64, - _sender: TYPES::SignatureKey, - _signature: &::PureAssembledSignatureType, - ) -> Result, BuildError> { - let mut blocks = self.blocks.write().await; - let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; - entry.header_input.take().ok_or(BuildError::Missing) - } - - async fn builder_address(&self) -> Result { - Ok(self.pub_key.clone()) - } -} - -impl SimpleBuilderSource { - pub async fn run(self, url: Url) - where - ::InstanceState: Default, - { - let builder_api = - hotshot_builder_api::builder::define_api::, TYPES, Base>( - &Options::default(), - ) - .expect("Failed to construct the builder API"); - let mut app: App, Error> = App::with_state(self); - app.register_module::("block_info", builder_api) - .expect("Failed to register the builder API"); - - async_spawn(app.serve(url, Base::instance())); - } -} - -#[derive(Clone)] -pub struct SimpleBuilderTask { - #[allow(clippy::type_complexity)] - transactions: Arc, SubmittedTransaction>>>, - blocks: Arc>>>, - decided_transactions: LruCache, ()>, -} - -pub trait BuilderTask: Send + Sync { - fn start( - self: Box, - stream: Box> + std::marker::Unpin + Send + 'static>, - ); -} - -impl BuilderTask for SimpleBuilderTask { - fn start( - mut self: Box, - mut stream: Box> + std::marker::Unpin + Send + 'static>, - ) { - async_spawn(async move { - loop { - match stream.next().await { - None => { - break; - } - Some(evt) => match evt.event { - EventType::Decide { leaf_chain, .. } => { - let mut queue = self.transactions.write().await; - for leaf_info in leaf_chain.iter() { - if let Some(ref payload) = leaf_info.leaf.block_payload() { - for txn in payload.transaction_commitments( - leaf_info.leaf.block_header().metadata(), - ) { - self.decided_transactions.put(txn, ()); - queue.remove(&txn); - } - } - } - self.blocks.write().await.clear(); - } - EventType::DaProposal { proposal, .. } => { - let payload = TYPES::BlockPayload::from_bytes( - &proposal.data.encoded_transactions, - &proposal.data.metadata, - ); - let now = Instant::now(); - - let mut queue = self.transactions.write().await; - for commitment in - payload.transaction_commitments(&proposal.data.metadata) - { - if let Some(txn) = queue.get_mut(&commitment) { - txn.claimed = Some(now); - } - } - } - EventType::Transactions { transactions } => { - let mut queue = self.transactions.write().await; - for transaction in transactions { - if !self.decided_transactions.contains(&transaction.commit()) { - queue.insert( - transaction.commit(), - SubmittedTransaction { - claimed: None, - transaction: transaction.clone(), - }, - ); - } - } - } - _ => {} - }, - } - } - }); - } -} - -pub async fn make_simple_builder( - num_storage_nodes: usize, -) -> (SimpleBuilderSource, SimpleBuilderTask) { - let (pub_key, priv_key) = TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); - - let transactions = Arc::new(RwLock::new(HashMap::new())); - let blocks = Arc::new(RwLock::new(HashMap::new())); - - let source = SimpleBuilderSource { - pub_key, - priv_key, - transactions: transactions.clone(), - blocks: blocks.clone(), - num_storage_nodes, - }; - - let task = SimpleBuilderTask { - transactions, - blocks, - decided_transactions: LruCache::new(NonZeroUsize::new(u16::MAX.into()).expect("> 0")), - }; - - (source, task) -} - -/// Helper function to construct all builder data structures from a list of transactions -async fn build_block( - transactions: Vec, - num_storage_nodes: usize, - pub_key: TYPES::BuilderSignatureKey, - priv_key: ::BuilderPrivateKey, -) -> ( - AvailableBlockInfo, - AvailableBlockData, - AvailableBlockHeaderInput, -) -where - ::InstanceState: Default, -{ - let (block_payload, metadata) = TYPES::BlockPayload::from_transactions( - transactions, - &Default::default(), - &Default::default(), - ) - .await - .expect("failed to build block payload from transactions"); - - let commitment = block_payload.builder_commitment(&metadata); - - let (vid_commitment, precompute_data) = - precompute_vid_commitment(&block_payload.encode(), num_storage_nodes); - - // Get block size from the encoded payload - let block_size = block_payload.encode().len() as u64; - - let signature_over_block_info = - TYPES::BuilderSignatureKey::sign_block_info(&priv_key, block_size, 123, &commitment) - .expect("Failed to sign block info"); - - let signature_over_builder_commitment = - TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, commitment.as_ref()) - .expect("Failed to sign commitment"); - - let signature_over_vid_commitment = - TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, vid_commitment.as_ref()) - .expect("Failed to sign block vid commitment"); - - let signature_over_fee_info = - TYPES::BuilderSignatureKey::sign_fee(&priv_key, 123_u64, &metadata, &vid_commitment) - .expect("Failed to sign fee info"); - - let block = AvailableBlockData { - block_payload, - metadata, - sender: pub_key.clone(), - signature: signature_over_builder_commitment, - }; - let metadata = AvailableBlockInfo { - sender: pub_key.clone(), - signature: signature_over_block_info, - block_hash: commitment, - block_size, - offered_fee: 123, - _phantom: std::marker::PhantomData, - }; - let header_input = AvailableBlockHeaderInput { - vid_commitment, - vid_precompute_data: precompute_data, - message_signature: signature_over_vid_commitment.clone(), - fee_signature: signature_over_fee_info, - sender: pub_key, - }; - - (metadata, block, header_input) -} diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs new file mode 100644 index 0000000000..87cde26c8b --- /dev/null +++ b/testing/src/block_builder/mod.rs @@ -0,0 +1,178 @@ +use std::collections::HashMap; + +use async_broadcast::Receiver; +use async_compatibility_layer::art::async_spawn; +use async_trait::async_trait; +use futures::Stream; +use hotshot::{traits::BlockPayload, types::Event}; +use hotshot_builder_api::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::{Error, Options}, + data_source::BuilderDataSource, +}; +use hotshot_example_types::block_types::TestTransaction; +use hotshot_types::{ + constants::Base, + traits::{ + block_contents::{precompute_vid_commitment, EncodeBytes}, + node_implementation::NodeType, + signature_key::BuilderSignatureKey, + }, +}; +use tide_disco::{method::ReadState, App, Url}; +use vbs::version::StaticVersionType; + +use crate::test_builder::BuilderChange; + +pub mod random; +pub use random::RandomBuilderImplementation; + +pub mod simple; +pub use simple::{SimpleBuilderConfig, SimpleBuilderImplementation}; + +#[async_trait] +pub trait TestBuilderImplementation +where + ::InstanceState: Default, +{ + type Config: Default; + + async fn start( + num_storage_nodes: usize, + options: Self::Config, + changes: HashMap, + ) -> (Box>, Url); +} + +pub trait BuilderTask: Send + Sync { + fn start( + self: Box, + stream: Box> + std::marker::Unpin + Send + 'static>, + ); +} + +/// Entry for a built block +#[derive(Debug, Clone)] +struct BlockEntry { + metadata: AvailableBlockInfo, + payload: Option>, + header_input: Option>, +} + +/// Construct a tide disco app that mocks the builder API. +/// +/// # Panics +/// If constructing and launching the builder fails for any reason +pub fn run_builder_source( + url: Url, + mut change_receiver: Receiver, + source: Source, +) where + TYPES: NodeType, + ::InstanceState: Default, + Source: Clone + Send + Sync + tide_disco::method::ReadState + 'static, + ::State: Sync + Send + BuilderDataSource, +{ + async_spawn(async move { + let start_builder = |url: Url, source: Source| -> _ { + let builder_api = hotshot_builder_api::builder::define_api::( + &Options::default(), + ) + .expect("Failed to construct the builder API"); + let mut app: App = App::with_state(source); + app.register_module("block_info", builder_api) + .expect("Failed to register the builder API"); + async_spawn(app.serve(url, Base::instance())) + }; + + let mut handle = Some(start_builder(url.clone(), source.clone())); + + while let Ok(event) = change_receiver.recv().await { + match event { + BuilderChange::Up if handle.is_none() => { + handle = Some(start_builder(url.clone(), source.clone())); + } + BuilderChange::Down => { + if let Some(handle) = handle.take() { + #[cfg(async_executor_impl = "tokio")] + handle.abort(); + #[cfg(async_executor_impl = "async-std")] + handle.cancel().await; + } + } + _ => {} + } + } + }); +} + +/// Helper function to construct all builder data structures from a list of transactions +async fn build_block( + transactions: Vec, + num_storage_nodes: usize, + pub_key: TYPES::BuilderSignatureKey, + priv_key: ::BuilderPrivateKey, +) -> BlockEntry +where + ::InstanceState: Default, +{ + let (block_payload, metadata) = TYPES::BlockPayload::from_transactions( + transactions, + &Default::default(), + &Default::default(), + ) + .await + .expect("failed to build block payload from transactions"); + + let commitment = block_payload.builder_commitment(&metadata); + + let (vid_commitment, precompute_data) = + precompute_vid_commitment(&block_payload.encode(), num_storage_nodes); + + // Get block size from the encoded payload + let block_size = block_payload.encode().len() as u64; + + let signature_over_block_info = + TYPES::BuilderSignatureKey::sign_block_info(&priv_key, block_size, 123, &commitment) + .expect("Failed to sign block info"); + + let signature_over_builder_commitment = + TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, commitment.as_ref()) + .expect("Failed to sign commitment"); + + let signature_over_vid_commitment = + TYPES::BuilderSignatureKey::sign_builder_message(&priv_key, vid_commitment.as_ref()) + .expect("Failed to sign block vid commitment"); + + let signature_over_fee_info = + TYPES::BuilderSignatureKey::sign_fee(&priv_key, 123_u64, &metadata, &vid_commitment) + .expect("Failed to sign fee info"); + + let block = AvailableBlockData { + block_payload, + metadata, + sender: pub_key.clone(), + signature: signature_over_builder_commitment, + }; + let metadata = AvailableBlockInfo { + sender: pub_key.clone(), + signature: signature_over_block_info, + block_hash: commitment, + block_size, + offered_fee: 123, + _phantom: std::marker::PhantomData, + }; + let header_input = AvailableBlockHeaderInput { + vid_commitment, + vid_precompute_data: precompute_data, + message_signature: signature_over_vid_commitment.clone(), + fee_signature: signature_over_fee_info, + sender: pub_key, + }; + + BlockEntry { + metadata, + payload: Some(block), + header_input: Some(header_input), + } +} diff --git a/testing/src/block_builder/random.rs b/testing/src/block_builder/random.rs new file mode 100644 index 0000000000..0bc9a19920 --- /dev/null +++ b/testing/src/block_builder/random.rs @@ -0,0 +1,324 @@ +use std::{ + collections::HashMap, + num::NonZeroUsize, + ops::Deref, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; + +use async_broadcast::{broadcast, Sender}; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_lock::RwLock; +use async_trait::async_trait; +use futures::{future::BoxFuture, Stream, StreamExt}; +use hotshot::types::{Event, EventType, SignatureKey}; +use hotshot_builder_api::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::BuildError, + data_source::BuilderDataSource, +}; +use hotshot_example_types::block_types::TestTransaction; +use hotshot_orchestrator::config::RandomBuilderConfig; +use hotshot_types::{ + traits::{node_implementation::NodeType, signature_key::BuilderSignatureKey}, + utils::BuilderCommitment, + vid::VidCommitment, +}; +use lru::LruCache; +use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; +use tide_disco::{method::ReadState, Url}; + +use super::{build_block, run_builder_source, BlockEntry, BuilderTask, TestBuilderImplementation}; +use crate::test_builder::BuilderChange; + +pub struct RandomBuilderImplementation; + +impl RandomBuilderImplementation { + pub async fn create>( + num_storage_nodes: usize, + config: RandomBuilderConfig, + changes: HashMap, + change_sender: Sender, + ) -> (RandomBuilderTask, RandomBuilderSource) + where + ::InstanceState: Default, + { + let (pub_key, priv_key) = + TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); + let blocks = Arc::new(RwLock::new(LruCache::new(NonZeroUsize::new(256).unwrap()))); + let source = RandomBuilderSource { + blocks: Arc::clone(&blocks), + pub_key: pub_key.clone(), + should_fail_claims: Arc::new(AtomicBool::new(false)), + }; + let task = RandomBuilderTask { + blocks, + config, + num_storage_nodes, + changes, + change_sender, + pub_key, + priv_key, + }; + (task, source) + } +} + +#[async_trait] +impl TestBuilderImplementation for RandomBuilderImplementation +where + TYPES: NodeType, + ::InstanceState: Default, +{ + type Config = RandomBuilderConfig; + + async fn start( + num_storage_nodes: usize, + config: RandomBuilderConfig, + changes: HashMap, + ) -> (Box>, Url) { + let port = portpicker::pick_unused_port().expect("No free ports"); + let url = Url::parse(&format!("http://0.0.0.0:{port}")).expect("Valid URL"); + let (change_sender, change_receiver) = broadcast(128); + + let (task, source) = Self::create(num_storage_nodes, config, changes, change_sender).await; + run_builder_source(url.clone(), change_receiver, source); + (Box::new(task), url) + } +} + +pub struct RandomBuilderTask> { + num_storage_nodes: usize, + config: RandomBuilderConfig, + changes: HashMap, + change_sender: Sender, + pub_key: TYPES::BuilderSignatureKey, + priv_key: ::BuilderPrivateKey, + blocks: Arc>>>, +} + +impl> RandomBuilderTask { + async fn build_blocks( + options: RandomBuilderConfig, + num_storage_nodes: usize, + pub_key: ::BuilderSignatureKey, + priv_key: <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, + blocks: Arc>>>, + ) where + ::InstanceState: Default, + { + let mut rng = SmallRng::from_entropy(); + let time_per_block = Duration::from_secs(1) / options.blocks_per_second; + loop { + let start = std::time::Instant::now(); + let transactions: Vec = (0..options.txn_in_block) + .map(|_| { + let mut bytes = vec![ + 0; + rng.gen_range(options.txn_size.clone()) + .try_into() + .expect("We are NOT running on a 16-bit platform") + ]; + rng.fill_bytes(&mut bytes); + TestTransaction::new(bytes) + }) + .collect(); + + let block = build_block( + transactions, + num_storage_nodes, + pub_key.clone(), + priv_key.clone(), + ) + .await; + + if let Some((hash, _)) = blocks + .write() + .await + .push(block.metadata.block_hash.clone(), block) + { + tracing::warn!("Block {} evicted", hash); + }; + if time_per_block < start.elapsed() { + tracing::warn!( + "Can't keep up: last block built in {}ms, target time per block: {}", + start.elapsed().as_millis(), + time_per_block.as_millis(), + ); + } + async_sleep(time_per_block.saturating_sub(start.elapsed())).await; + } + } +} + +impl> BuilderTask for RandomBuilderTask +where + ::InstanceState: Default, +{ + fn start( + mut self: Box, + mut stream: Box> + std::marker::Unpin + Send + 'static>, + ) { + let mut task = Some(async_spawn(Self::build_blocks( + self.config.clone(), + self.num_storage_nodes, + self.pub_key.clone(), + self.priv_key.clone(), + self.blocks.clone(), + ))); + + async_spawn(async move { + loop { + match stream.next().await { + None => { + break; + } + Some(evt) => { + if let EventType::ViewFinished { view_number } = evt.event { + if let Some(change) = self.changes.remove(&view_number) { + match change { + BuilderChange::Up => { + if task.is_none() { + task = Some(async_spawn(Self::build_blocks( + self.config.clone(), + self.num_storage_nodes, + self.pub_key.clone(), + self.priv_key.clone(), + self.blocks.clone(), + ))) + } + } + BuilderChange::Down => { + if let Some(handle) = task.take() { + #[cfg(async_executor_impl = "tokio")] + handle.abort(); + #[cfg(async_executor_impl = "async-std")] + handle.cancel().await; + } + } + BuilderChange::FailClaims(_) => {} + } + let _ = self.change_sender.broadcast(change).await; + } + } + } + } + } + }); + } +} + +/// A mock implementation of the builder data source. +/// Builds random blocks, doesn't track HotShot state at all. +/// Evicts old available blocks if HotShot doesn't keep up. +#[derive(Clone, Debug)] +pub struct RandomBuilderSource { + /// Built blocks + blocks: Arc< + RwLock< + // Isn't strictly speaking used as a cache, + // just as a HashMap that evicts old blocks + LruCache>, + >, + >, + pub_key: TYPES::BuilderSignatureKey, + should_fail_claims: Arc, +} + +impl RandomBuilderSource +where + TYPES: NodeType, + ::InstanceState: Default, +{ + /// Create new [`RandomBuilderSource`] + #[must_use] + #[allow(clippy::missing_panics_doc)] // ony panics if 256 == 0 + pub fn new(pub_key: TYPES::BuilderSignatureKey) -> Self { + Self { + blocks: Arc::new(RwLock::new(LruCache::new(NonZeroUsize::new(256).unwrap()))), + pub_key, + should_fail_claims: Arc::new(AtomicBool::new(false)), + } + } +} + +#[async_trait] +impl ReadState for RandomBuilderSource { + type State = Self; + + async fn read( + &self, + op: impl Send + for<'a> FnOnce(&'a Self::State) -> BoxFuture<'a, T> + 'async_trait, + ) -> T { + op(self).await + } +} + +#[async_trait] +impl BuilderDataSource for RandomBuilderSource { + async fn available_blocks( + &self, + _for_parent: &VidCommitment, + _view_number: u64, + _sender: TYPES::SignatureKey, + _signature: &::PureAssembledSignatureType, + ) -> Result>, BuildError> { + Ok(self + .blocks + .deref() + .read() + .await + .iter() + .map(|(_, BlockEntry { metadata, .. })| metadata.clone()) + .collect()) + } + + async fn claim_block( + &self, + block_hash: &BuilderCommitment, + _view_number: u64, + _sender: TYPES::SignatureKey, + _signature: &::PureAssembledSignatureType, + ) -> Result, BuildError> { + if self.should_fail_claims.load(Ordering::Relaxed) { + return Err(BuildError::Missing); + } + + let mut blocks = self.blocks.write().await; + let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; + let payload = entry.payload.take().ok_or(BuildError::Missing)?; + // Check if header input is claimed as well, if yes, then evict block + if entry.header_input.is_none() { + blocks.pop(block_hash); + }; + Ok(payload) + } + + async fn claim_block_header_input( + &self, + block_hash: &BuilderCommitment, + _view_number: u64, + _sender: TYPES::SignatureKey, + _signature: &::PureAssembledSignatureType, + ) -> Result, BuildError> { + if self.should_fail_claims.load(Ordering::Relaxed) { + return Err(BuildError::Missing); + } + + let mut blocks = self.blocks.write().await; + let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; + let header_input = entry.header_input.take().ok_or(BuildError::Missing)?; + // Check if payload is claimed as well, if yes, then evict block + if entry.payload.is_none() { + blocks.pop(block_hash); + }; + Ok(header_input) + } + + async fn builder_address(&self) -> Result { + Ok(self.pub_key.clone()) + } +} diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs new file mode 100644 index 0000000000..ace5526b6f --- /dev/null +++ b/testing/src/block_builder/simple.rs @@ -0,0 +1,366 @@ +use std::{ + collections::HashMap, + num::NonZeroUsize, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; + +use async_broadcast::{broadcast, Sender}; +use async_compatibility_layer::art::async_spawn; +use async_lock::RwLock; +use async_trait::async_trait; +use committable::{Commitment, Committable}; +use futures::{future::BoxFuture, Stream, StreamExt}; +use hotshot::{ + traits::BlockPayload, + types::{Event, EventType, SignatureKey}, +}; +use hotshot_builder_api::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::{BuildError, Error, Options}, + data_source::BuilderDataSource, +}; +use hotshot_example_types::block_types::TestTransaction; +use hotshot_types::{ + constants::Base, + traits::{ + block_contents::BlockHeader, node_implementation::NodeType, + signature_key::BuilderSignatureKey, + }, + utils::BuilderCommitment, + vid::VidCommitment, +}; +use lru::LruCache; +use tide_disco::{method::ReadState, App, Url}; +use vbs::version::StaticVersionType; + +use super::{build_block, run_builder_source, BlockEntry, BuilderTask, TestBuilderImplementation}; +use crate::test_builder::BuilderChange; + +pub struct SimpleBuilderImplementation; + +impl SimpleBuilderImplementation { + pub async fn create( + num_storage_nodes: usize, + changes: HashMap, + change_sender: Sender, + ) -> (SimpleBuilderSource, SimpleBuilderTask) { + let (pub_key, priv_key) = + TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); + + let transactions = Arc::new(RwLock::new(HashMap::new())); + let blocks = Arc::new(RwLock::new(HashMap::new())); + let should_fail_claims = Arc::new(AtomicBool::new(false)); + + let source = SimpleBuilderSource { + pub_key, + priv_key, + transactions: transactions.clone(), + blocks: blocks.clone(), + num_storage_nodes, + should_fail_claims: Arc::clone(&should_fail_claims), + }; + + let task = SimpleBuilderTask { + transactions, + blocks, + decided_transactions: LruCache::new(NonZeroUsize::new(u16::MAX.into()).expect("> 0")), + should_fail_claims, + change_sender, + changes, + }; + + (source, task) + } +} + +/// Configuration for `SimpleBuilder` +pub struct SimpleBuilderConfig { + pub port: u16, +} + +impl Default for SimpleBuilderConfig { + fn default() -> Self { + Self { + port: portpicker::pick_unused_port().expect("No free ports"), + } + } +} + +#[async_trait] +impl> TestBuilderImplementation + for SimpleBuilderImplementation +where + ::InstanceState: Default, +{ + type Config = SimpleBuilderConfig; + + async fn start( + num_storage_nodes: usize, + config: Self::Config, + changes: HashMap, + ) -> (Box>, Url) { + let url = Url::parse(&format!("http://0.0.0.0:{0}", config.port)).expect("Valid URL"); + + let (change_sender, change_receiver) = broadcast(128); + let (source, task) = Self::create(num_storage_nodes, changes, change_sender).await; + run_builder_source(url.clone(), change_receiver, source); + + (Box::new(task), url) + } +} + +#[derive(Debug, Clone)] +pub struct SimpleBuilderSource { + pub_key: TYPES::BuilderSignatureKey, + priv_key: ::BuilderPrivateKey, + num_storage_nodes: usize, + #[allow(clippy::type_complexity)] + transactions: Arc, SubmittedTransaction>>>, + blocks: Arc>>>, + should_fail_claims: Arc, +} + +#[async_trait] +impl ReadState for SimpleBuilderSource { + type State = Self; + + async fn read( + &self, + op: impl Send + for<'a> FnOnce(&'a Self::State) -> BoxFuture<'a, T> + 'async_trait, + ) -> T { + op(self).await + } +} + +#[async_trait] +impl BuilderDataSource for SimpleBuilderSource +where + ::InstanceState: Default, +{ + async fn available_blocks( + &self, + _for_parent: &VidCommitment, + _view_number: u64, + _sender: TYPES::SignatureKey, + _signature: &::PureAssembledSignatureType, + ) -> Result>, BuildError> { + let transactions = self + .transactions + .read(|txns| { + Box::pin(async { + txns.values() + .filter(|txn| { + // We want transactions that are either unclaimed, or claimed long ago + // and thus probably not included, or they would've been decided on + // already and removed from the queue + txn.claimed + .map(|claim_time| claim_time.elapsed() > Duration::from_secs(30)) + .unwrap_or(true) + }) + .cloned() + .map(|txn| txn.transaction) + .collect::>() + }) + }) + .await; + + if transactions.is_empty() { + // We don't want to return an empty block if we have no trasnactions, as we would end up + // driving consensus to produce empty blocks extremely quickly when mempool is empty. + // Instead, we return no blocks, so that view leader will keep asking for blocks until + // either we have something non-trivial to propose, or leader runs out of time to propose, + // in which case view leader will finally propose an empty block themselves. + return Ok(vec![]); + } + + let block_entry = build_block( + transactions, + self.num_storage_nodes, + self.pub_key.clone(), + self.priv_key.clone(), + ) + .await; + + let metadata = block_entry.metadata.clone(); + + self.blocks + .write() + .await + .insert(block_entry.metadata.block_hash.clone(), block_entry); + + Ok(vec![metadata]) + } + + async fn claim_block( + &self, + block_hash: &BuilderCommitment, + _view_number: u64, + _sender: TYPES::SignatureKey, + _signature: &::PureAssembledSignatureType, + ) -> Result, BuildError> { + if self.should_fail_claims.load(Ordering::Relaxed) { + return Err(BuildError::Missing); + } + + let payload = { + let mut blocks = self.blocks.write().await; + let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; + entry.payload.take().ok_or(BuildError::Missing)? + }; + + let now = Instant::now(); + + let claimed_transactions = payload + .block_payload + .transaction_commitments(&payload.metadata); + + let mut transactions = self.transactions.write().await; + for txn_hash in claimed_transactions { + if let Some(txn) = transactions.get_mut(&txn_hash) { + txn.claimed = Some(now); + } + } + + Ok(payload) + } + + async fn claim_block_header_input( + &self, + block_hash: &BuilderCommitment, + _view_number: u64, + _sender: TYPES::SignatureKey, + _signature: &::PureAssembledSignatureType, + ) -> Result, BuildError> { + if self.should_fail_claims.load(Ordering::Relaxed) { + return Err(BuildError::Missing); + } + + let mut blocks = self.blocks.write().await; + let entry = blocks.get_mut(block_hash).ok_or(BuildError::NotFound)?; + entry.header_input.take().ok_or(BuildError::Missing) + } + + async fn builder_address(&self) -> Result { + Ok(self.pub_key.clone()) + } +} + +impl SimpleBuilderSource { + pub async fn run(self, url: Url) + where + ::InstanceState: Default, + { + let builder_api = + hotshot_builder_api::builder::define_api::, TYPES, Base>( + &Options::default(), + ) + .expect("Failed to construct the builder API"); + let mut app: App, Error> = App::with_state(self); + app.register_module::("block_info", builder_api) + .expect("Failed to register the builder API"); + + async_spawn(app.serve(url, Base::instance())); + } +} + +#[derive(Debug, Clone)] +struct SubmittedTransaction { + claimed: Option, + transaction: TYPES::Transaction, +} + +#[derive(Clone)] +pub struct SimpleBuilderTask { + #[allow(clippy::type_complexity)] + transactions: Arc, SubmittedTransaction>>>, + blocks: Arc>>>, + decided_transactions: LruCache, ()>, + should_fail_claims: Arc, + changes: HashMap, + change_sender: Sender, +} + +impl BuilderTask for SimpleBuilderTask { + fn start( + mut self: Box, + mut stream: Box> + std::marker::Unpin + Send + 'static>, + ) { + async_spawn(async move { + let mut should_build_blocks = true; + loop { + match stream.next().await { + None => { + break; + } + Some(evt) => match evt.event { + EventType::ViewFinished { view_number } => { + if let Some(change) = self.changes.remove(&view_number) { + match change { + BuilderChange::Up => should_build_blocks = true, + BuilderChange::Down => { + should_build_blocks = false; + self.transactions.write().await.clear(); + self.blocks.write().await.clear(); + } + BuilderChange::FailClaims(value) => { + self.should_fail_claims.store(value, Ordering::Relaxed); + } + } + let _ = self.change_sender.broadcast(change).await; + } + } + EventType::Decide { leaf_chain, .. } if should_build_blocks => { + let mut queue = self.transactions.write().await; + for leaf_info in leaf_chain.iter() { + if let Some(ref payload) = leaf_info.leaf.block_payload() { + for txn in payload.transaction_commitments( + leaf_info.leaf.block_header().metadata(), + ) { + self.decided_transactions.put(txn, ()); + queue.remove(&txn); + } + } + } + self.blocks.write().await.clear(); + } + EventType::DaProposal { proposal, .. } if should_build_blocks => { + let payload = TYPES::BlockPayload::from_bytes( + &proposal.data.encoded_transactions, + &proposal.data.metadata, + ); + let now = Instant::now(); + + let mut queue = self.transactions.write().await; + for commitment in + payload.transaction_commitments(&proposal.data.metadata) + { + if let Some(txn) = queue.get_mut(&commitment) { + txn.claimed = Some(now); + } + } + } + EventType::Transactions { transactions } if should_build_blocks => { + let mut queue = self.transactions.write().await; + for transaction in transactions { + if !self.decided_transactions.contains(&transaction.commit()) { + queue.insert( + transaction.commit(), + SubmittedTransaction { + claimed: None, + transaction: transaction.clone(), + }, + ); + } + } + } + _ => {} + }, + } + } + }); + } +} diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 0827e57fd6..7b9a1bed79 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -44,7 +44,7 @@ pub enum ViewStatus { pub enum OverallSafetyTaskErr { /// inconsistent txn nums InconsistentTxnsNum { - /// node idx -> number transactions + /// number of transactions -> number of nodes reporting that number map: HashMap, }, /// too many failed views @@ -253,7 +253,7 @@ pub struct RoundResult { /// block -> # entries decided on that block pub block_map: HashMap, - /// node idx -> number transactions + /// number of transactions -> number of nodes reporting that number pub num_txns_map: HashMap, } @@ -379,9 +379,6 @@ impl RoundResult { } /// determines whether or not the round passes /// also do a safety check - /// # Panics - /// if the `num_txns_map` is somehow empty - /// This should never happen because this function should never be called in that case #[allow(clippy::too_many_arguments, clippy::let_unit_value)] pub fn update_status( &mut self, @@ -425,9 +422,11 @@ impl RoundResult { }); return; } - if *self.num_txns_map.iter().last().unwrap().0 < transaction_threshold { - self.status = ViewStatus::Failed; - return; + if let Some((n_txn, _)) = self.num_txns_map.iter().last() { + if *n_txn < transaction_threshold { + self.status = ViewStatus::Failed; + return; + } } } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 7dc3edce56..662d2fd86b 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,4 +1,4 @@ -use std::{num::NonZeroUsize, sync::Arc, time::Duration}; +use std::{collections::HashMap, num::NonZeroUsize, sync::Arc, time::Duration}; use hotshot::traits::{NetworkReliability, TestableNodeImplementation}; use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; @@ -6,6 +6,7 @@ use hotshot_types::{ traits::node_implementation::NodeType, ExecutionType, HotShotConfig, ValidatorConfig, }; use tide_disco::Url; +use vec1::Vec1; use super::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -70,6 +71,27 @@ pub struct TestDescription { pub unreliable_network: Option>, /// view sync check task pub view_sync_properties: ViewSyncTaskDescription, + /// description of builders to run + pub builders: Vec1, +} + +/// Describes a possible change to builder status during test +#[derive(Clone, Debug)] +pub enum BuilderChange { + // Builder should start up + Up, + // Builder should shut down completely + Down, + // Toggles whether builder should always respond + // to claim calls with errors + FailClaims(bool), +} + +/// Metadata describing builder behaviour during a test +#[derive(Clone, Debug)] +pub struct BuilderDescription { + /// view number -> change to builder status + pub changes: HashMap, } impl Default for TimingData { @@ -97,8 +119,8 @@ impl TestDescription { TestDescription { num_bootstrap_nodes: num_nodes_with_stake, - num_nodes_with_stake: num_nodes_with_stake, - num_nodes_without_stake: num_nodes_without_stake, + num_nodes_with_stake, + num_nodes_without_stake, start_nodes: num_nodes_with_stake, overall_safety_properties: OverallSafetyPropertiesDescription { num_successful_views: 50, @@ -158,8 +180,8 @@ impl TestDescription { let num_nodes_with_stake = 20; let num_nodes_without_stake = 0; TestDescription { - num_nodes_with_stake: num_nodes_with_stake, - num_nodes_without_stake: num_nodes_without_stake, + num_nodes_with_stake, + num_nodes_without_stake, start_nodes: num_nodes_with_stake, num_bootstrap_nodes: num_nodes_with_stake, // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the @@ -216,6 +238,14 @@ impl Default for TestDescription { ), unreliable_network: None, view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), + builders: vec1::vec1![ + BuilderDescription { + changes: HashMap::new() + }, + BuilderDescription { + changes: HashMap::new() + } + ], } } } @@ -296,7 +326,7 @@ impl TestDescription { builder_timeout: Duration::from_millis(1000), data_request_delay: Duration::from_millis(200), // Placeholder until we spin up the builder - builder_url: Url::parse("http://localhost:9999").expect("Valid URL"), + builder_urls: vec1::vec1![Url::parse("http://localhost:9999").expect("Valid URL")], start_proposing_view: 0, stop_proposing_view: 0, start_voting_view: 0, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 3d548a4623..aff3e7dbe1 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -288,8 +288,18 @@ where let config = self.launcher.resource_generator.config.clone(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let (mut builder_task, builder_url) = - B::start(config.num_nodes_with_stake.into(), B::Config::default()).await; + let mut builder_tasks = Vec::new(); + let mut builder_urls = Vec::new(); + for metadata in &self.launcher.metadata.builders { + let (builder_task, builder_url) = B::start( + config.num_nodes_with_stake.into(), + B::Config::default(), + metadata.changes.clone(), + ) + .await; + builder_tasks.push(builder_task); + builder_urls.push(builder_url); + } // Collect uninitialized nodes because we need to wait for all networks to be ready before starting the tasks let mut uninitialized_nodes = Vec::new(); @@ -323,7 +333,10 @@ where config.fixed_leader_for_gpuvid, ), }; - config.builder_url = builder_url.clone(); + config.builder_urls = builder_urls + .clone() + .try_into() + .expect("Non-empty by construction"); let networks = (self.launcher.resource_generator.channel_generator)(node_id).await; let storage = (self.launcher.resource_generator.storage)(node_id); @@ -388,10 +401,20 @@ where // Then start the necessary tasks for (node_id, networks, hotshot) in uninitialized_nodes { let handle = hotshot.run_tasks().await; - if node_id == 1 { - if let Some(task) = builder_task.take() { - task.start(Box::new(handle.event_stream())) + + match node_id.cmp(&(config.da_staked_committee_size as u64 - 1)) { + std::cmp::Ordering::Less => { + if let Some(task) = builder_tasks.pop() { + task.start(Box::new(handle.event_stream())) + } + } + std::cmp::Ordering::Equal => { + // If we have more builder tasks than DA nodes, pin them all on the last node. + while let Some(task) = builder_tasks.pop() { + task.start(Box::new(handle.event_stream())) + } } + std::cmp::Ordering::Greater => {} } self.nodes.push(Node { diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 63202690e7..3f81352895 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -1,15 +1,25 @@ -use std::time::Duration; +use std::{ + collections::HashMap, + time::{Duration, Instant}, +}; use async_compatibility_layer::art::async_sleep; +use hotshot_builder_api::block_info::AvailableBlockData; use hotshot_example_types::{ - block_types::{TestBlockPayload, TestTransaction}, + block_types::{TestBlockPayload, TestMetadata, TestTransaction}, node_types::TestTypes, }; +use hotshot_orchestrator::config::RandomBuilderConfig; use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; -use hotshot_testing::block_builder::run_random_builder; +use hotshot_testing::block_builder::{ + BuilderTask, RandomBuilderImplementation, TestBuilderImplementation, +}; use hotshot_types::{ constants::Base, - traits::{node_implementation::NodeType, signature_key::SignatureKey, BlockPayload}, + traits::{ + block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, + BlockPayload, + }, }; use tide_disco::Url; @@ -20,26 +30,18 @@ use tide_disco::Url; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_random_block_builder() { - use std::time::Instant; - - use hotshot_builder_api::block_info::AvailableBlockData; - use hotshot_example_types::block_types::TestMetadata; - use hotshot_orchestrator::config::RandomBuilderConfig; - use hotshot_types::traits::block_contents::vid_commitment; - - let port = portpicker::pick_unused_port().expect("Could not find an open port"); - let api_url = Url::parse(format!("http://localhost:{port}").as_str()).unwrap(); + let (task, api_url): (Box>, Url) = + RandomBuilderImplementation::start( + 1, + RandomBuilderConfig { + blocks_per_second: u32::MAX, + ..Default::default() + }, + HashMap::new(), + ) + .await; + task.start(Box::new(futures::stream::empty())); - run_random_builder::( - api_url.clone(), - 1, - RandomBuilderConfig { - // Essentially removes delays so that builder doesn't slow - // down the test - blocks_per_second: u32::MAX, - ..Default::default() - }, - ); let builder_started = Instant::now(); let client: BuilderClient = BuilderClient::new(api_url); diff --git a/testing/tests/tests_3/test_with_builder_failures.rs b/testing/tests/tests_3/test_with_builder_failures.rs new file mode 100644 index 0000000000..7be111d04a --- /dev/null +++ b/testing/tests/tests_3/test_with_builder_failures.rs @@ -0,0 +1,69 @@ +use std::time::Duration; + +use hotshot_example_types::node_types::{MemoryImpl, PushCdnImpl, TestTypes}; +use hotshot_macros::cross_tests; +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + test_builder::{BuilderChange, BuilderDescription, TestDescription}, + txn_task::TxnTaskDescription, +}; + +// Test one node leaving the network. +cross_tests!( + TestName: test_with_builder_failures, + Impls: [MemoryImpl, PushCdnImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default_multiple_rounds(); + // Every block should contain at least one transaction - builders are never offline + // simultaneously + metadata.overall_safety_properties.transaction_threshold = 1; + // Generate a lot of transactions so that freshly restarted builders still have + // transactions + metadata.txn_description = TxnTaskDescription::RoundRobinTimeBased(Duration::from_millis(1)); + + // Two builders running as follows: + // view 1st 2nd + // 0 Up Down + // 1 Up Up + // 2 Down Up + // 3 Up Up + // 4 Up Down + // 5 Up Up + // 6 Down Up + // 7 Up Up + // ... + // + // We give each builder a view of uptime before making it the only available builder so that it + // has time to initialize + + // First builder will always respond with available blocks, but will periodically fail claim calls + let first_builder = (0..metadata.overall_safety_properties.num_successful_views as u64).filter_map(|view_num| { + match view_num % 4 { + 2 => Some((view_num, BuilderChange::FailClaims(true))), + 3 => Some((view_num, BuilderChange::FailClaims(false))), + _ => None, + } + }).collect(); + // Second builder will periodically be completely down + #[allow(clippy::unnecessary_filter_map)] // False positive + let second_builder = (0..metadata.overall_safety_properties.num_successful_views as u64).filter_map(|view_num| { + match view_num % 4 { + 0 => Some((view_num, BuilderChange::Down)), + 1 => Some((view_num, BuilderChange::Up)), + _ => None, + } + }).collect(); + + metadata.builders = vec1::vec1![ + BuilderDescription { + changes: first_builder, + }, + BuilderDescription { + changes: second_builder, + }, + ]; + metadata + } +); diff --git a/types/Cargo.toml b/types/Cargo.toml index ef7a60860b..0ea8ef5e34 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -54,6 +54,7 @@ vbs = { workspace = true } displaydoc = { version = "0.2.3", default-features = false } dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } url = { workspace = true } +vec1 = { workspace = true } [dev-dependencies] serde_json = { workspace = true } diff --git a/types/src/lib.rs b/types/src/lib.rs index c1fa051357..7be084f6ec 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -8,6 +8,7 @@ use light_client::StateVerKey; use tracing::error; use traits::signature_key::SignatureKey; use url::Url; +use vec1::Vec1; use crate::utils::bincode_opts; pub mod consensus; @@ -200,7 +201,7 @@ pub struct HotShotConfig { /// time to wait until we request data associated with a proposal pub data_request_delay: Duration, /// Builder API base URL - pub builder_url: Url, + pub builder_urls: Vec1, /// View to start proposing an upgrade pub start_proposing_view: u64, /// View to stop proposing an upgrade. To prevent proposing an upgrade, set stop_proposing_view <= start_proposing_view. From 2dacf94bb2533b0a842cd8a2b47aa28bade3d1de Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 13 Jun 2024 23:03:13 +0800 Subject: [PATCH 1079/1393] [DEPENDENCY_REFACTOR] - Modify `update_validated_state_map` to not override a view with more info (#3315) * Move validation * Fix fmt and import * Fix build and recv test * Remove a todo * Restore an event * Fix non-dependency lints * Fix tests * Fix CI * Fix fmt and imports * Fix proposal and proposal recv tasks tests * 3 more tests * Fix doc * Address comments * Add error * Fix Conficts in keyao/remove-proposal-validation (#3308) * fix * remove dead test * fix lint * fix test * Fix vote tests --------- Co-authored-by: Keyao Shen * Save changes * Fix upgrade test * Fix build * Add error handling to update_validated_state_map * Fix tests * use ref --------- Co-authored-by: Jarred Parr --- hotshot/src/tasks/mod.rs | 3 +- task-impls/src/consensus/helpers.rs | 36 +++++++++++-------- task-impls/src/da.rs | 14 ++++---- task-impls/src/quorum_proposal/mod.rs | 8 +++-- .../src/quorum_proposal_recv/handlers.rs | 4 ++- task-impls/src/quorum_vote/mod.rs | 17 +++++---- .../tests_1/quorum_proposal_recv_task.rs | 4 +-- testing/tests/tests_1/quorum_proposal_task.rs | 17 --------- testing/tests/tests_1/quorum_vote_task.rs | 4 +-- types/src/consensus.rs | 28 +++++++++++++-- 10 files changed, 77 insertions(+), 58 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 165ca013e5..b55801b828 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -5,6 +5,7 @@ pub mod task_state; use std::{sync::Arc, time::Duration}; +use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use hotshot_task::task::Task; #[cfg(not(feature = "dependency-tasks"))] @@ -36,8 +37,6 @@ use hotshot_types::{ }; use vbs::version::StaticVersionType; -use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; - /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 1922c2ad46..fbd3d60fa5 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -7,6 +7,14 @@ use std::{ sync::Arc, }; +#[cfg(not(feature = "dependency-tasks"))] +use super::ConsensusTaskState; +#[cfg(not(feature = "dependency-tasks"))] +use crate::{ + consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, + helpers::AnyhowTracing, +}; +use crate::{events::HotShotEvent, helpers::broadcast_event}; #[cfg(not(feature = "dependency-tasks"))] use anyhow::bail; use anyhow::{ensure, Context, Result}; @@ -53,15 +61,6 @@ use tracing::{debug, info, warn}; #[cfg(not(feature = "dependency-tasks"))] use vbs::version::Version; -#[cfg(not(feature = "dependency-tasks"))] -use super::ConsensusTaskState; -#[cfg(not(feature = "dependency-tasks"))] -use crate::{ - consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, - helpers::AnyhowTracing, -}; -use crate::{events::HotShotEvent, helpers::broadcast_event}; - /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. /// @@ -99,10 +98,13 @@ pub async fn validate_proposal_safety_and_liveness( }, }; - consensus + if let Err(e) = consensus .write() .await - .update_validated_state_map(view_number, view.clone()); + .update_validated_state_map(view_number, view.clone()) + { + tracing::trace!("{e:?}"); + } // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. broadcast_event( @@ -647,7 +649,7 @@ pub(crate) async fn handle_quorum_proposal_recv> DaTaskState { broadcast_event(Arc::new(HotShotEvent::DaVoteSend(vote)), &event_stream).await; let mut consensus = self.consensus.write().await; - // Ensure this view is in the view map for garbage collection, but do not overwrite if - // there is already a view there: the replica task may have inserted a `Leaf` view which - // contains strictly more information. - if !consensus.validated_state_map().contains_key(&view_number) { - let view = View { - view_inner: ViewInner::Da { payload_commitment }, - }; - consensus.update_validated_state_map(view_number, view.clone()); + // Ensure this view is in the view map for garbage collection. + let view = View { + view_inner: ViewInner::Da { payload_commitment }, + }; + if let Err(e) = consensus.update_validated_state_map(view_number, view.clone()) { + tracing::trace!("{e:?}"); } // Record the payload we have promised to make available. diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index fff4dd127d..4d5005b49c 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -472,10 +472,14 @@ impl> QuorumProposalTaskState { // Update the internal validated state map. - self.consensus + if let Err(e) = self + .consensus .write() .await - .update_validated_state_map(*view_number, view.clone()); + .update_validated_state_map(*view_number, view.clone()) + { + tracing::trace!("{e:?}"); + } self.create_dependency_task_if_new( *view_number + 1, diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 21028f7d18..5d732df49a 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -63,7 +63,9 @@ async fn validate_proposal_liveness + 'static> VoteDependencyHand delta: Some(Arc::clone(&delta)), }, }; - consensus_writer.update_validated_state_map(proposed_leaf.view_number(), view.clone()); + if let Err(e) = + consensus_writer.update_validated_state_map(proposed_leaf.view_number(), view.clone()) + { + tracing::trace!("{e:?}"); + } consensus_writer.update_saved_leaves(proposed_leaf.clone()); // Kick back our updated structures for downstream usage. diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 5908fcc2fa..d36f773488 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -64,7 +64,7 @@ async fn test_quorum_proposal_recv_task() { consensus_writer.update_validated_state_map( view.quorum_proposal.data.view_number, build_fake_view_with_leaf(view.leaf.clone()), - ); + ).unwrap(); } drop(consensus_writer); @@ -154,7 +154,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { consensus_writer.update_validated_state_map( inserted_view_number, build_fake_view_with_leaf(view.leaf.clone()), - ); + ).unwrap(); // The index here is important. Since we're proposing for view 4, we need the // value from entry 2 to align the public key from the shares map. diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index a70d74017d..676659b3cd 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -105,18 +105,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { } // We must send the genesis cert here to initialize hotshot successfully. - let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); let genesis_cert = proposals[0].data.justify_qc.clone(); - let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - // Special case: the genesis validated state is already - // present - consensus_writer.update_validated_state_map( - ViewNumber::new(0), - build_fake_view_with_leaf(genesis_leaf.clone()), - ); drop(consensus_writer); let inputs = vec![ @@ -200,13 +190,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let genesis_cert = proposals[0].data.justify_qc.clone(); let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; - // Special case: the genesis validated state is already - // present - consensus_writer.update_validated_state_map( - ViewNumber::new(0), - build_fake_view_with_leaf(genesis_leaf.clone()), - ); - drop(consensus_writer); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 1b48e44ed7..625fb6aa05 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -53,7 +53,7 @@ async fn test_quorum_vote_task_success() { consensus_writer.update_validated_state_map( view.quorum_proposal.data.view_number(), build_fake_view_with_leaf(view.leaf.clone()), - ); + ).unwrap(); consensus_writer.update_saved_leaves(view.leaf.clone()); } drop(consensus_writer); @@ -173,7 +173,7 @@ async fn test_quorum_vote_task_miss_dependency() { consensus_writer.update_validated_state_map( view.quorum_proposal.data.view_number(), build_fake_view_with_leaf(view.leaf.clone()), - ); + ).unwrap(); consensus_writer.update_saved_leaves(view.leaf.clone()); } drop(consensus_writer); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 46c3efada8..e4d31e395a 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -5,7 +5,7 @@ use std::{ sync::Arc, }; -use anyhow::{ensure, Result}; +use anyhow::{bail, ensure, Result}; use async_lock::{RwLock, RwLockUpgradableReadGuard}; use committable::{Commitment, Committable}; use tracing::{debug, error}; @@ -299,8 +299,32 @@ impl Consensus { } /// Update the validated state map with a new view_number/view combo. - pub fn update_validated_state_map(&mut self, view_number: TYPES::Time, view: View) { + /// + /// # Errors + /// Can return an error when the new view contains less information than the exisiting view + /// with the same view number. + pub fn update_validated_state_map( + &mut self, + view_number: TYPES::Time, + view: View, + ) -> Result<()> { + if let Some(existing_view) = self.validated_state_map().get(&view_number) { + if let ViewInner::Leaf { .. } = existing_view.view_inner { + match view.view_inner { + ViewInner::Leaf { ref delta, .. } => { + ensure!( + delta.is_some(), + "Skipping the state update to not override a `Leaf` view with `None` state delta." + ); + } + _ => { + bail!("Skipping the state update to not override a `Leaf` view with a non-`Leaf` view."); + } + } + } + } self.validated_state_map.insert(view_number, view); + Ok(()) } /// Update the saved leaves with a new leaf. From 425b96219d4b841a825e411a5ce128a7ecf55734 Mon Sep 17 00:00:00 2001 From: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Date: Fri, 14 Jun 2024 03:26:43 +0500 Subject: [PATCH 1080/1393] remove TestTransaction bound from simple builder (#3326) --- testing/src/block_builder/mod.rs | 3 +-- testing/src/block_builder/simple.rs | 4 +--- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index 87cde26c8b..a5ccd51b80 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -10,7 +10,6 @@ use hotshot_builder_api::{ builder::{Error, Options}, data_source::BuilderDataSource, }; -use hotshot_example_types::block_types::TestTransaction; use hotshot_types::{ constants::Base, traits::{ @@ -68,7 +67,7 @@ pub fn run_builder_source( mut change_receiver: Receiver, source: Source, ) where - TYPES: NodeType, + TYPES: NodeType, ::InstanceState: Default, Source: Clone + Send + Sync + tide_disco::method::ReadState + 'static, ::State: Sync + Send + BuilderDataSource, diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index ace5526b6f..ef3dccdd34 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -23,7 +23,6 @@ use hotshot_builder_api::{ builder::{BuildError, Error, Options}, data_source::BuilderDataSource, }; -use hotshot_example_types::block_types::TestTransaction; use hotshot_types::{ constants::Base, traits::{ @@ -91,8 +90,7 @@ impl Default for SimpleBuilderConfig { } #[async_trait] -impl> TestBuilderImplementation - for SimpleBuilderImplementation +impl TestBuilderImplementation for SimpleBuilderImplementation where ::InstanceState: Default, { From c1910c79993a0d9ff5bbcad71191ee54a692810d Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 14 Jun 2024 08:44:11 -0400 Subject: [PATCH 1081/1393] Update the CDN (#3297) * update the CDN * clippy * clippy * sigh... feature unification * command line arg the global memory pool * fix latency calculation --- examples/combined/all.rs | 2 ++ examples/push-cdn/all.rs | 2 ++ examples/push-cdn/broker.rs | 8 ++++++++ examples/push-cdn/marshal.rs | 8 ++++++++ hotshot/src/traits/networking/push_cdn_network.rs | 10 ++++++---- 5 files changed, 26 insertions(+), 4 deletions(-) diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 8f82011985..3422b4842a 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -83,6 +83,7 @@ async fn main() { metrics_bind_endpoint: None, ca_cert_path: None, ca_key_path: None, + global_memory_pool_size: Some(1024 * 1024 * 1024), }; // Create and spawn the broker @@ -110,6 +111,7 @@ async fn main() { metrics_bind_endpoint: None, ca_cert_path: None, ca_key_path: None, + global_memory_pool_size: Some(1024 * 1024 * 1024), }; // Spawn the marshal diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 24f89b874e..25fb9beba1 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -74,6 +74,7 @@ async fn main() { metrics_bind_endpoint: None, ca_cert_path: None, ca_key_path: None, + global_memory_pool_size: Some(1024 * 1024 * 1024), }; // Create and spawn the broker @@ -99,6 +100,7 @@ async fn main() { metrics_bind_endpoint: None, ca_cert_path: None, ca_key_path: None, + global_memory_pool_size: Some(1024 * 1024 * 1024), }; // Spawn the marshal diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index 66d535d7c0..67e987c31d 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -54,6 +54,13 @@ struct Args { /// The seed for broker key generation #[arg(short, long, default_value_t = 0)] key_seed: u64, + + /// The size of the global memory pool (in bytes). This is the maximum number of bytes that + /// can be allocated at once for all connections. A connection will block if it + /// tries to allocate more than this amount until some memory is freed. + /// Default is 1GB. + #[arg(long, default_value_t = 1_073_741_824)] + global_memory_pool_size: usize, } #[cfg_attr(async_executor_impl = "tokio", tokio::main)] #[cfg_attr(async_executor_impl = "async-std", async_std::main)] @@ -94,6 +101,7 @@ async fn main() -> Result<()> { public_advertise_endpoint: args.public_advertise_endpoint, private_bind_endpoint: args.private_bind_endpoint, private_advertise_endpoint: args.private_advertise_endpoint, + global_memory_pool_size: Some(args.global_memory_pool_size), }; // Create new `Broker` diff --git a/examples/push-cdn/marshal.rs b/examples/push-cdn/marshal.rs index 9a9995cc83..d8e7c83e55 100644 --- a/examples/push-cdn/marshal.rs +++ b/examples/push-cdn/marshal.rs @@ -36,6 +36,13 @@ struct Args { /// If not provided, a local, pinned CA is used #[arg(long)] ca_key_path: Option, + + /// The size of the global memory pool (in bytes). This is the maximum number of bytes that + /// can be allocated at once for all connections. A connection will block if it + /// tries to allocate more than this amount until some memory is freed. + /// Default is 1GB. + #[arg(long, default_value_t = 1_073_741_824)] + global_memory_pool_size: usize, } #[cfg_attr(async_executor_impl = "tokio", tokio::main)] @@ -63,6 +70,7 @@ async fn main() -> Result<()> { metrics_bind_endpoint: args.metrics_bind_endpoint, ca_cert_path: args.ca_cert_path, ca_key_path: args.ca_key_path, + global_memory_pool_size: Some(args.global_memory_pool_size), }; // Create new `Marshal` from the config diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 637b17d8b9..b36267465f 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -10,7 +10,7 @@ use async_compatibility_layer::{art::async_sleep, art::async_spawn}; use async_trait::async_trait; use bincode::config::Options; use cdn_broker::reexports::{ - connection::{protocols::Tcp, NoMiddleware, TrustedMiddleware, UntrustedMiddleware}, + connection::protocols::Tcp, def::{ConnectionDef, RunDef, Topic as TopicTrait}, discovery::{Embedded, Redis}, }; @@ -105,7 +105,6 @@ pub struct UserDef(PhantomData); impl ConnectionDef for UserDef { type Scheme = WrappedSignatureKey; type Protocol = Quic; - type Middleware = UntrustedMiddleware; } /// The broker definition for the Push CDN. @@ -114,7 +113,6 @@ pub struct BrokerDef(PhantomData); impl ConnectionDef for BrokerDef { type Scheme = WrappedSignatureKey; type Protocol = Tcp; - type Middleware = TrustedMiddleware; } /// The client definition for the Push CDN. Uses the Quic @@ -125,7 +123,6 @@ pub struct ClientDef(PhantomData); impl ConnectionDef for ClientDef { type Scheme = WrappedSignatureKey; type Protocol = Quic; - type Middleware = NoMiddleware; } /// The testing run definition for the Push CDN. @@ -226,6 +223,7 @@ impl PushCdnNetwork { impl TestableNetworkingImplementation for PushCdnNetwork { /// Generate n Push CDN clients, a marshal, and two brokers (that run locally). /// Uses a `SQLite` database instead of Redis. + #[allow(clippy::too_many_lines)] fn generator( _expected_node_count: usize, _num_bootstrap: usize, @@ -294,6 +292,8 @@ impl TestableNetworkingImplementation for PushCdnNetwork discovery_endpoint: discovery_endpoint.clone(), ca_cert_path: None, ca_key_path: None, + // 1GB + global_memory_pool_size: Some(1024 * 1024 * 1024), }; // Create and spawn the broker @@ -325,6 +325,8 @@ impl TestableNetworkingImplementation for PushCdnNetwork metrics_bind_endpoint: None, ca_cert_path: None, ca_key_path: None, + // 1GB + global_memory_pool_size: Some(1024 * 1024 * 1024), }; // Spawn the marshal From 2333f4975dd6e31e84f4863e0b8effc61bd59136 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 14 Jun 2024 11:00:13 -0400 Subject: [PATCH 1082/1393] [Libp2p] Metrics and logging (#3323) * improve libp2p/cdn metrics and logging * remove panic * lint * clippy * fix double counting in `da_broadcast_message` * arc readability Co-authored-by: lukaszrzasik --------- Co-authored-by: lukaszrzasik --- examples/infra/mod.rs | 6 +- hotshot/src/traits.rs | 7 +- hotshot/src/traits/networking.rs | 48 ------- .../src/traits/networking/libp2p_network.rs | 124 ++++++++++-------- .../src/traits/networking/memory_network.rs | 26 +--- .../src/traits/networking/push_cdn_network.rs | 48 ++++++- libp2p-networking/src/network/mod.rs | 2 + libp2p-networking/src/network/node.rs | 12 ++ libp2p-networking/src/network/node/handle.rs | 11 +- libp2p-networking/tests/counter.rs | 4 +- testing/tests/tests_3/memory_network.rs | 52 ++------ 11 files changed, 156 insertions(+), 184 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index fff98813ee..e5d581b0d5 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -21,8 +21,8 @@ use futures::StreamExt; use hotshot::{ traits::{ implementations::{ - derive_libp2p_peer_id, CombinedNetworks, Libp2pNetwork, PushCdnNetwork, Topic, - WrappedSignatureKey, + derive_libp2p_peer_id, CdnMetricsValue, CombinedNetworks, Libp2pMetricsValue, + Libp2pNetwork, PushCdnNetwork, Topic, WrappedSignatureKey, }, BlockPayload, NodeImplementation, }, @@ -674,6 +674,7 @@ where .expect("`cdn_marshal_address` needs to be supplied for a push CDN run"), topics, keypair, + CdnMetricsValue::default(), ) .expect("failed to create network"); @@ -766,6 +767,7 @@ where bind_address, &public_key, &private_key, + Libp2pMetricsValue::default(), ) .await .expect("failed to create libp2p network"); diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 99d22fa569..a70eca75a3 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -13,12 +13,13 @@ pub mod implementations { pub use super::networking::{ combined_network::{CombinedNetworks, UnderlyingCombinedNetworks}, libp2p_network::{ - derive_libp2p_keypair, derive_libp2p_peer_id, Libp2pNetwork, PeerInfoVec, + derive_libp2p_keypair, derive_libp2p_peer_id, Libp2pMetricsValue, Libp2pNetwork, + PeerInfoVec, }, memory_network::{MasterMap, MemoryNetwork}, push_cdn_network::{ - KeyPair, ProductionDef, PushCdnNetwork, TestingDef, Topic, WrappedSignatureKey, + CdnMetricsValue, KeyPair, ProductionDef, PushCdnNetwork, TestingDef, Topic, + WrappedSignatureKey, }, - NetworkingMetricsValue, }; } diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index f47740c718..58fae50bdd 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -11,52 +11,4 @@ pub mod memory_network; /// The Push CDN network pub mod push_cdn_network; -use custom_debug::Debug; -use hotshot_types::traits::metrics::{Counter, Gauge, Metrics, NoMetrics}; pub use hotshot_types::traits::network::{NetworkError, NetworkReliability}; - -/// Contains several `NetworkingMetrics` that we're interested in from the networking interfaces -#[derive(Clone, Debug)] -pub struct NetworkingMetricsValue { - #[allow(dead_code)] - /// A [`Gauge`] which tracks how many peers are connected - pub connected_peers: Box, - /// A [`Counter`] which tracks how many messages have been received - pub incoming_message_count: Box, - /// A [`Counter`] which tracks how many messages have been send directly - pub outgoing_direct_message_count: Box, - /// A [`Counter`] which tracks how many messages have been send by broadcast - pub outgoing_broadcast_message_count: Box, - /// A [`Counter`] which tracks how many messages failed to send - pub message_failed_to_send: Box, - // A [`Gauge`] which tracks how many connected entries there are in the gossipsub mesh - // pub gossipsub_mesh_connected: Box, - // A [`Gauge`] which tracks how many kademlia entries there are - // pub kademlia_entries: Box, - // A [`Gauge`] which tracks how many kademlia buckets there are - // pub kademlia_buckets: Box, -} - -impl NetworkingMetricsValue { - /// Create a new instance of this [`NetworkingMetricsValue`] struct, setting all the counters and gauges - #[must_use] - pub fn new(metrics: &dyn Metrics) -> Self { - Self { - connected_peers: metrics.create_gauge(String::from("connected_peers"), None), - incoming_message_count: metrics - .create_counter(String::from("incoming_message_count"), None), - outgoing_direct_message_count: metrics - .create_counter(String::from("outgoing_direct_message_count"), None), - outgoing_broadcast_message_count: metrics - .create_counter(String::from("outgoing_broadcast_message_count"), None), - message_failed_to_send: metrics - .create_counter(String::from("message_failed_to_send"), None), - } - } -} - -impl Default for NetworkingMetricsValue { - fn default() -> Self { - Self::new(&*NoMetrics::boxed()) - } -} diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 7379d544d9..b17de79788 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -40,6 +40,7 @@ use hotshot_types::{ message::{DataMessage::DataResponse, Message, MessageKind}, traits::{ election::Membership, + metrics::{Counter, Gauge, Metrics, NoMetrics}, network::{self, ConnectedNetwork, NetworkError, ResponseMessage}, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, @@ -62,11 +63,40 @@ use libp2p_networking::{ }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; -use tracing::{debug, error, info, instrument, warn}; +use tracing::{debug, error, info, instrument, trace, warn}; -use super::NetworkingMetricsValue; use crate::BroadcastDelay; +/// Libp2p-specific metrics +#[derive(Clone, Debug)] +pub struct Libp2pMetricsValue { + /// The number of currently connected peers + pub num_connected_peers: Box, + /// The number of failed messages + pub num_failed_messages: Box, +} + +impl Libp2pMetricsValue { + /// Populate the metrics with Libp2p-specific metrics + pub fn new(metrics: &dyn Metrics) -> Self { + // Create a `libp2p subgroup + let subgroup = metrics.subgroup("libp2p".into()); + + // Create the metrics + Self { + num_connected_peers: subgroup.create_gauge("num_connected_peers".into(), None), + num_failed_messages: subgroup.create_counter("num_failed_messages".into(), None), + } + } +} + +impl Default for Libp2pMetricsValue { + /// Initialize with empty metrics + fn default() -> Self { + Self::new(&*NoMetrics::boxed()) + } +} + /// convenience alias for the type for bootstrap addresses /// concurrency primitives are needed for having tests pub type BootstrapAddrs = Arc>>; @@ -127,8 +157,8 @@ struct Libp2pNetworkInner { dht_timeout: Duration, /// whether or not we've bootstrapped into the DHT yet is_bootstrapped: Arc, - /// The networking metrics we're keeping track of - metrics: NetworkingMetricsValue, + /// The Libp2p metrics we're managing + metrics: Libp2pMetricsValue, /// topic map /// hash(hashset) -> topic /// btreemap ordered so is hashable @@ -267,7 +297,7 @@ impl TestableNetworkingImplementation Box::pin(async move { let net = Arc::new( match Libp2pNetwork::new( - NetworkingMetricsValue::default(), + Libp2pMetricsValue::default(), config, pubkey.clone(), bootstrap_addrs_ref, @@ -341,6 +371,7 @@ impl Libp2pNetwork { bind_address: SocketAddr, pub_key: &K, priv_key: &K::PrivateKey, + metrics: Libp2pMetricsValue, ) -> anyhow::Result { // Try to take our Libp2p config from our broader network config let libp2p_config = config @@ -403,7 +434,7 @@ impl Libp2pNetwork { } Ok(Libp2pNetwork::new( - NetworkingMetricsValue::default(), + metrics, node_config, pub_key.clone(), Arc::new(RwLock::new(bootstrap_nodes)), @@ -443,7 +474,7 @@ impl Libp2pNetwork { /// This will panic if there are less than 5 bootstrap nodes #[allow(clippy::too_many_arguments)] pub async fn new( - metrics: NetworkingMetricsValue, + metrics: Libp2pMetricsValue, config: NetworkNodeConfig, pk: K, bootstrap_addrs: BootstrapAddrs, @@ -540,7 +571,7 @@ impl Libp2pNetwork { #[allow(clippy::cast_possible_truncation)] const THRESHOLD: u64 = (LOOK_AHEAD as f64 * 0.8) as u64; - info!("Performing lookup for peer {:?}", pk); + trace!("Performing lookup for peer {:?}", pk); // only run if we are not too close to the next view number if latest_seen_view.load(Ordering::Relaxed) + THRESHOLD <= *view_number { @@ -567,7 +598,6 @@ impl Libp2pNetwork { let handle = Arc::clone(&self.inner.handle); let is_bootstrapped = Arc::clone(&self.inner.is_bootstrapped); let node_type = self.inner.handle.config().node_type; - let metrics_connected_peers = Arc::clone(&self.inner); let is_da = self.inner.is_da; async_spawn({ @@ -632,28 +662,16 @@ impl Libp2pNetwork { { async_sleep(Duration::from_secs(1)).await; } - // 10 minute timeout - let timeout_duration = Duration::from_secs(600); // perform connection info!("WAITING TO CONNECT ON NODE {:?}", id); - handle - .wait_to_connect(4, id, timeout_duration) - .await - .unwrap(); - info!( - "node {:?} is barring bootstrap, type: {:?}", - handle.peer_id(), - node_type - ); - let connected_num = handle.num_connected().await?; - metrics_connected_peers - .metrics - .connected_peers - .set(connected_num); + // Wait for the network to connect to the required number of peers + if let Err(e) = handle.wait_to_connect(4, id).await { + error!("Failed to connect to peers: {:?}", e); + return Err::<(), NetworkError>(e.into()); + } is_ready.store(true, Ordering::Relaxed); - info!("STARTING CONSENSUS ON {:?}", handle.peer_id()); Ok::<(), NetworkError>(()) } }); @@ -700,6 +718,7 @@ impl Libp2pNetwork { let res = request_tx.try_send((msg, chan)); res.map_err(|_| NetworkError::ChannelSend)?; } + NetworkEvent::ConnectedPeersUpdate(_) => {} } Ok::<(), NetworkError>(()) } @@ -740,6 +759,9 @@ impl Libp2pNetwork { .handle_recvd_events(message, &sender, request_tx.clone()) .await; } + NetworkEvent::ConnectedPeersUpdate(num_peers) => { + handle.inner.metrics.num_connected_peers.set(*num_peers); + } } // re-set the `kill_switch` for the next loop kill_switch = other_stream; @@ -781,7 +803,7 @@ impl ConnectedNetwork for Libp2pNetwork { { Ok(pid) => pid, Err(err) => { - self.inner.metrics.message_failed_to_send.add(1); + self.inner.metrics.num_failed_messages.add(1); error!( "Failed to message {:?} because could not find recipient peer id for pk {:?}", request, recipient @@ -809,7 +831,10 @@ impl ConnectedNetwork for Libp2pNetwork { } None => ResponseMessage::NotFound, }, - Err(e) => return Err(e.into()), + Err(e) => { + self.inner.metrics.num_failed_messages.add(1); + return Err(e.into()); + } }; Ok(bincode::serialize(&result).map_err(|e| NetworkError::Libp2p { source: e.into() })?) @@ -880,7 +905,7 @@ impl ConnectedNetwork for Libp2pNetwork { _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { self.wait_for_ready().await; - info!( + trace!( "broadcasting msg: {:?} with nodes: {:?} connected", message, self.inner.handle.connected_pids().await @@ -893,7 +918,7 @@ impl ConnectedNetwork for Libp2pNetwork { source: Box::new(NetworkNodeHandleError::NoSuchTopic), })? .clone(); - info!("broadcasting to topic: {}", topic); + trace!("broadcasting to topic: {}", topic); // gossip doesn't broadcast from itself, so special case if recipients.contains(&self.inner.pk) { @@ -919,14 +944,9 @@ impl ConnectedNetwork for Libp2pNetwork { let handle_2 = Arc::clone(&handle); let metrics_2 = metrics.clone(); boxed_sync(async move { - match handle_2.gossip_no_serialize(topic_2, msg).await { - Err(e) => { - metrics_2.message_failed_to_send.add(1); - warn!("Failed to broadcast to libp2p: {:?}", e); - } - Ok(()) => { - metrics_2.outgoing_direct_message_count.add(1); - } + if let Err(e) = handle_2.gossip_no_serialize(topic_2, msg).await { + metrics_2.num_failed_messages.add(1); + warn!("Failed to broadcast to libp2p: {:?}", e); } }) }), @@ -936,16 +956,12 @@ impl ConnectedNetwork for Libp2pNetwork { } } - match self.inner.handle.gossip(topic, &message).await { - Ok(()) => { - self.inner.metrics.outgoing_broadcast_message_count.add(1); - Ok(()) - } - Err(e) => { - self.inner.metrics.message_failed_to_send.add(1); - Err(e.into()) - } + if let Err(e) = self.inner.handle.gossip(topic, &message).await { + self.inner.metrics.num_failed_messages.add(1); + return Err(e.into()); } + + Ok(()) } #[instrument(name = "Libp2pNetwork::da_broadcast_message", skip_all)] @@ -1002,7 +1018,7 @@ impl ConnectedNetwork for Libp2pNetwork { { Ok(pid) => pid, Err(err) => { - self.inner.metrics.message_failed_to_send.add(1); + self.inner.metrics.num_failed_messages.add(1); error!( "Failed to message {:?} because could not find recipient peer id for pk {:?}", message, recipient @@ -1025,14 +1041,9 @@ impl ConnectedNetwork for Libp2pNetwork { let handle_2 = Arc::clone(&handle); let metrics_2 = metrics.clone(); boxed_sync(async move { - match handle_2.direct_request_no_serialize(pid, msg).await { - Err(e) => { - metrics_2.message_failed_to_send.add(1); - warn!("Failed to broadcast to libp2p: {:?}", e); - } - Ok(()) => { - metrics_2.outgoing_direct_message_count.add(1); - } + if let Err(e) = handle_2.direct_request_no_serialize(pid, msg).await { + metrics_2.num_failed_messages.add(1); + warn!("Failed to broadcast to libp2p: {:?}", e); } }) }), @@ -1060,7 +1071,6 @@ impl ConnectedNetwork for Libp2pNetwork { .drain_at_least_one() .await .map_err(|_x| NetworkError::ShutDown)?; - self.inner.metrics.incoming_message_count.add(result.len()); Ok(result) } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 2c922a2c0a..4843ebc03a 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -35,7 +35,7 @@ use hotshot_types::{ use rand::Rng; use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; -use super::{NetworkError, NetworkReliability, NetworkingMetricsValue}; +use super::{NetworkError, NetworkReliability}; /// Shared state for in-memory mock networking. /// @@ -74,9 +74,6 @@ struct MemoryNetworkInner { /// Count of messages that are in-flight (send but not processed yet) in_flight_message_count: AtomicUsize, - /// The networking metrics we're keeping track of - metrics: NetworkingMetricsValue, - /// config to introduce unreliability to the network reliability_config: Option>, } @@ -104,11 +101,9 @@ impl Debug for MemoryNetwork { impl MemoryNetwork { /// Creates a new `MemoryNetwork` and hooks it up to the group through the provided `MasterMap` - #[instrument(skip(metrics))] pub fn new( pub_key: K, - metrics: NetworkingMetricsValue, - master_map: Arc>, + master_map: &Arc>, reliability_config: Option>, ) -> MemoryNetwork { info!("Attaching new MemoryNetwork"); @@ -143,9 +138,8 @@ impl MemoryNetwork { inner: Arc::new(MemoryNetworkInner { input: RwLock::new(Some(input)), output: Mutex::new(output), - master_map: Arc::clone(&master_map), + master_map: Arc::clone(master_map), in_flight_message_count, - metrics, reliability_config, }), }; @@ -162,7 +156,6 @@ impl MemoryNetwork { .fetch_add(1, Ordering::Relaxed); let input = self.inner.input.read().await; if let Some(input) = &*input { - self.inner.metrics.outgoing_direct_message_count.add(1); input.send(message).await } else { Err(SendError(message)) @@ -187,12 +180,7 @@ impl TestableNetworkingImplementation Box::pin(move |node_id| { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); - let net = MemoryNetwork::new( - pubkey, - NetworkingMetricsValue::default(), - Arc::clone(&master), - reliability_config.clone(), - ); + let net = MemoryNetwork::new(pubkey, &master, reliability_config.clone()); Box::pin(async move { (net.clone().into(), net.into()) }) }) } @@ -263,11 +251,9 @@ impl ConnectedNetwork for MemoryNetwork { let res = node.input(message.clone()).await; match res { Ok(()) => { - self.inner.metrics.outgoing_broadcast_message_count.add(1); trace!(?key, "Delivered message to remote"); } Err(e) => { - self.inner.metrics.message_failed_to_send.add(1); warn!(?e, ?key, "Error sending broadcast message to node"); } } @@ -314,19 +300,16 @@ impl ConnectedNetwork for MemoryNetwork { let res = node.input(message).await; match res { Ok(()) => { - self.inner.metrics.outgoing_direct_message_count.add(1); trace!(?recipient, "Delivered message to remote"); Ok(()) } Err(e) => { - self.inner.metrics.message_failed_to_send.add(1); warn!(?e, ?recipient, "Error delivering direct message"); Err(NetworkError::CouldNotDeliver) } } } } else { - self.inner.metrics.message_failed_to_send.add(1); warn!( "{:#?} {:#?} {:#?}", recipient, self.inner.master_map.map, "Node does not exist in map" @@ -352,7 +335,6 @@ impl ConnectedNetwork for MemoryNetwork { self.inner .in_flight_message_count .fetch_sub(ret.len(), Ordering::Relaxed); - self.inner.metrics.incoming_message_count.add(ret.len()); Ok(ret) } } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index b36267465f..4155fe6760 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -35,6 +35,7 @@ use hotshot_types::{ boxed_sync, data::ViewNumber, traits::{ + metrics::{Counter, Metrics, NoMetrics}, network::{BroadcastDelay, ConnectedNetwork, PushCdnNetworkError}, node_implementation::NodeType, signature_key::SignatureKey, @@ -49,6 +50,33 @@ use tracing::error; use super::NetworkError; +/// CDN-specific metrics +#[derive(Clone)] +pub struct CdnMetricsValue { + /// The number of failed messages + pub num_failed_messages: Box, +} + +impl CdnMetricsValue { + /// Populate the metrics with the CDN-specific ones + pub fn new(metrics: &dyn Metrics) -> Self { + // Create a subgroup for the CDN + let subgroup = metrics.subgroup("cdn".into()); + + // Create the CDN-specific metrics + Self { + num_failed_messages: subgroup.create_counter("num_failed_messages".into(), None), + } + } +} + +impl Default for CdnMetricsValue { + // The default is empty metrics + fn default() -> Self { + Self::new(&*NoMetrics::boxed()) + } +} + /// A wrapped `SignatureKey`. We need to implement the Push CDN's `SignatureScheme` /// trait in order to sign and verify messages to/from the CDN. #[derive(Clone, Eq, PartialEq)] @@ -142,6 +170,8 @@ impl RunDef for TestingDef { pub struct PushCdnNetwork { /// The underlying client client: Client>, + /// The CDN-specific metrics + metrics: Arc, /// Whether or not the underlying network is supposed to be paused #[cfg(feature = "hotshot-testing")] is_paused: Arc, @@ -172,6 +202,7 @@ impl PushCdnNetwork { marshal_endpoint: String, topics: Vec, keypair: KeyPair>, + metrics: CdnMetricsValue, ) -> anyhow::Result { // Build config let config = ClientConfig { @@ -186,6 +217,7 @@ impl PushCdnNetwork { Ok(Self { client, + metrics: Arc::from(metrics), // Start unpaused #[cfg(feature = "hotshot-testing")] is_paused: Arc::from(AtomicBool::new(false)), @@ -374,6 +406,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork // Create our client let client = Arc::new(PushCdnNetwork { client: Client::new(client_config), + metrics: Arc::new(CdnMetricsValue::default()), #[cfg(feature = "hotshot-testing")] is_paused: Arc::from(AtomicBool::new(false)), }); @@ -429,7 +462,12 @@ impl ConnectedNetwork for PushCdnNetwork, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - self.broadcast_message(message, Topic::Global).await + self.broadcast_message(message, Topic::Global) + .await + .map_err(|e| { + self.metrics.num_failed_messages.add(1); + e + }) } /// Broadcast a message to all members of the DA committee. @@ -443,7 +481,12 @@ impl ConnectedNetwork for PushCdnNetwork, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - self.broadcast_message(message, Topic::Da).await + self.broadcast_message(message, Topic::Da) + .await + .map_err(|e| { + self.metrics.num_failed_messages.add(1); + e + }) } /// Send a direct message to a node with a particular key. Does not retry. @@ -469,6 +512,7 @@ impl ConnectedNetwork for PushCdnNetwork), /// Report that kademlia has successfully bootstrapped into the network IsBootstrapped, + /// The number of connected peers has possibly changed + ConnectedPeersUpdate(usize), } #[derive(Debug)] diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 917fba34f7..bd3a061c5e 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -555,6 +555,12 @@ impl NetworkNode { } else { info!("peerid {:?} connection is established to {:?} with endpoint {:?} with concurrent dial errors {:?}. {:?} connections left", self.peer_id, peer_id, endpoint, concurrent_dial_errors, num_established); } + + // Send the number of connected peers to the client + send_to_client + .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) + .await + .map_err(|_e| NetworkError::StreamClosed)?; } SwarmEvent::ConnectionClosed { connection_id: _, @@ -571,6 +577,12 @@ impl NetworkNode { } else { info!("peerid {:?} connection is closed to {:?} with endpoint {:?}. {:?} connections left. Cause: {:?}", self.peer_id, peer_id, endpoint, num_established, cause); } + + // Send the number of connected peers to the client + send_to_client + .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) + .await + .map_err(|_e| NetworkError::StreamClosed)?; } SwarmEvent::Dialing { peer_id, diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 8437e20e02..aec2dae058 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -1,8 +1,4 @@ -use std::{ - collections::HashSet, - fmt::Debug, - time::{Duration, Instant}, -}; +use std::{collections::HashSet, fmt::Debug, time::Duration}; use async_compatibility_layer::{ art::{async_sleep, async_timeout, future::to}, @@ -156,15 +152,10 @@ impl NetworkNodeHandle { &self, num_peers: usize, node_id: usize, - timeout: Duration, ) -> Result<(), NetworkNodeHandleError> { - let start = Instant::now(); self.begin_bootstrap().await?; let mut connected_ok = false; while !connected_ok { - if start.elapsed() >= timeout { - return Err(NetworkNodeHandleError::ConnectTimeout); - } async_sleep(Duration::from_secs(1)).await; let num_connected = self.num_connected().await?; info!( diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index a06eecdd2b..4f3cdf0fa4 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -71,7 +71,9 @@ pub async fn counter_handle_network_event( use CounterMessage::*; use NetworkEvent::*; match event { - IsBootstrapped | NetworkEvent::ResponseRequested(..) => {} + IsBootstrapped + | NetworkEvent::ResponseRequested(..) + | NetworkEvent::ConnectedPeersUpdate(..) => {} GossipMsg(m) | DirectResponse(m, _) => { if let Ok(msg) = bincode::deserialize::(&m) { match msg { diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index da774d6298..a910fe18e6 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -1,12 +1,11 @@ #![allow(clippy::panic)] use std::{collections::BTreeSet, sync::Arc}; -use hotshot_types::traits::network::BroadcastDelay; use async_compatibility_layer::logging::setup_logging; use hotshot::{ traits::{ election::static_committee::GeneralStaticCommittee, - implementations::{MasterMap, MemoryNetwork, NetworkingMetricsValue}, + implementations::{MasterMap, MemoryNetwork}, NodeImplementation, }, types::SignatureKey, @@ -16,6 +15,7 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; +use hotshot_types::traits::network::BroadcastDelay; use hotshot_types::{ data::ViewNumber, message::{DataMessage, Message, MessageKind, VersionedMessage}, @@ -149,20 +149,10 @@ async fn memory_network_direct_queue() { trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new( - pub_key_1, - NetworkingMetricsValue::default(), - group.clone(), - Option::None, - ); + let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new( - pub_key_2, - NetworkingMetricsValue::default(), - group, - Option::None, - ); + let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); let first_messages: Vec> = gen_messages(5, 100, pub_key_1); @@ -215,19 +205,9 @@ async fn memory_network_broadcast_queue() { let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new( - pub_key_1, - NetworkingMetricsValue::default(), - group.clone(), - Option::None, - ); + let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new( - pub_key_2, - NetworkingMetricsValue::default(), - group, - Option::None, - ); + let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); let first_messages: Vec> = gen_messages(5, 100, pub_key_1); @@ -288,19 +268,9 @@ async fn memory_network_test_in_flight_message_count() { let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new( - pub_key_1, - NetworkingMetricsValue::default(), - group.clone(), - Option::None, - ); + let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new( - pub_key_2, - NetworkingMetricsValue::default(), - group, - Option::None, - ); + let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); // Create some dummy messages let messages: Vec> = gen_messages(5, 100, pub_key_1); @@ -329,7 +299,11 @@ async fn memory_network_test_in_flight_message_count() { ); network2 - .broadcast_message(serialized_message.clone(), broadcast_recipients.clone(), BroadcastDelay::None) + .broadcast_message( + serialized_message.clone(), + broadcast_recipients.clone(), + BroadcastDelay::None, + ) .await .unwrap(); // network 1 has received `count` broadcast messages From fbcceca25892a24bb1d34032c472d00164797346 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 14 Jun 2024 10:08:30 -0600 Subject: [PATCH 1083/1393] fix import (#3327) --- hotshot/src/traits/networking/push_cdn_network.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 4155fe6760..84fbef4832 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -1,8 +1,9 @@ #[cfg(feature = "hotshot-testing")] use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; use std::{collections::BTreeSet, marker::PhantomData}; #[cfg(feature = "hotshot-testing")] -use std::{path::Path, sync::Arc, time::Duration}; +use std::{path::Path, time::Duration}; use async_compatibility_layer::channel::UnboundedSendError; #[cfg(feature = "hotshot-testing")] From 617b72e0fabcae52d47d7e56ba42811f9dbf83d0 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 14 Jun 2024 20:44:22 +0200 Subject: [PATCH 1084/1393] Fix panicing arithmetics (#3328) --- task-impls/src/transactions.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 2e6f18dac1..c8f8a023b6 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -376,7 +376,7 @@ impl, Ver: StaticVersionType> query_start .elapsed() .mul_f32(BUILDER_ADDITIONAL_TIME_MULTIPLIER), - BUILDER_MINIMUM_QUERY_TIME - query_start.elapsed(), + BUILDER_MINIMUM_QUERY_TIME.saturating_sub(query_start.elapsed()), )); futures::pin_mut!(timeout); // Stream::next requires Self::Unpin let mut tasks = tasks.into_inner().take_until(timeout); From 9b0eade221ca5b22d2fb6f210c6e7d2f71c0abcf Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 14 Jun 2024 14:58:47 -0400 Subject: [PATCH 1085/1393] [CATCHUP] Fetch Proposal when Missing (#3310) * Respond with proposal if we have it * add response chan to missing proposal event * Send proposal back out to requester * do the fetch in when parent proposal is missing * add timeout to request for data * always send back response to chan * simplify match * spawn request task * update maps, add timeout * don't always fetch * fix test * fix upgrade off by one * merge * fix test again after merge * fix dependency * merge * lint for dep version * Combine Serialize and Sign to one fn * Don't always spawn request * rename --- hotshot/src/lib.rs | 2 +- task-impls/src/consensus/helpers.rs | 116 +++++++++++++++--- task-impls/src/events.rs | 25 +++- task-impls/src/request.rs | 170 ++++++++++++++++++-------- task-impls/src/response.rs | 18 +-- testing/tests/tests_1/upgrade_task.rs | 17 ++- types/src/consensus.rs | 5 + 7 files changed, 273 insertions(+), 80 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 0b7f697993..7da7f102f6 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -601,8 +601,8 @@ impl> SystemContext { add_network_message_task(&mut handle, Arc::clone(&da_network)).await; if let Some(request_receiver) = da_network.spawn_request_receiver_task().await { - add_response_task(&mut handle, request_receiver).await; add_request_network_task(&mut handle).await; + add_response_task(&mut handle, request_receiver).await; } add_network_event_task( diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index fbd3d60fa5..369517b7bd 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -1,11 +1,7 @@ +use crate::{events::HotShotEvent, helpers::broadcast_event}; + #[cfg(not(feature = "dependency-tasks"))] -use core::time::Duration; -#[cfg(not(feature = "dependency-tasks"))] -use std::marker::PhantomData; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; +use crate::{events::ProposalMissing, request::REQUEST_TIMEOUT}; #[cfg(not(feature = "dependency-tasks"))] use super::ConsensusTaskState; @@ -14,12 +10,15 @@ use crate::{ consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, helpers::AnyhowTracing, }; -use crate::{events::HotShotEvent, helpers::broadcast_event}; #[cfg(not(feature = "dependency-tasks"))] use anyhow::bail; use anyhow::{ensure, Context, Result}; +#[cfg(not(feature = "dependency-tasks"))] +use async_broadcast::broadcast; use async_broadcast::Sender; #[cfg(not(feature = "dependency-tasks"))] +use async_compatibility_layer::art::async_timeout; +#[cfg(not(feature = "dependency-tasks"))] use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; #[cfg(not(feature = "dependency-tasks"))] @@ -29,6 +28,8 @@ use async_std::task::JoinHandle; use chrono::Utc; use committable::{Commitment, Committable}; #[cfg(not(feature = "dependency-tasks"))] +use core::time::Duration; +#[cfg(not(feature = "dependency-tasks"))] use futures::FutureExt; #[cfg(not(feature = "dependency-tasks"))] use hotshot_types::{ @@ -53,6 +54,12 @@ use hotshot_types::{ }; #[cfg(not(feature = "dependency-tasks"))] use hotshot_types::{data::null_block, message::GeneralConsensusMessage, simple_vote::QuorumData}; +#[cfg(not(feature = "dependency-tasks"))] +use std::marker::PhantomData; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; #[cfg(not(feature = "dependency-tasks"))] @@ -541,6 +548,56 @@ pub async fn publish_proposal_if_able( .await } +/// Trigger a request to the network for a proposal for a view and wait for the response +#[cfg(not(feature = "dependency-tasks"))] +async fn fetch_proposal( + view: TYPES::Time, + event_stream: Sender>>, + quorum_membership: Arc, + consensus: Arc>>, +) -> Result> { + let (tx, mut rx) = broadcast(1); + let event = ProposalMissing { + view, + response_chan: tx, + }; + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalRequest(event)), + &event_stream, + ) + .await; + let Ok(Ok(Some(proposal))) = async_timeout(REQUEST_TIMEOUT, rx.recv_direct()).await else { + bail!("Request for proposal failed"); + }; + let view = proposal.data.view_number(); + let justify_qc = proposal.data.justify_qc.clone(); + + if !justify_qc.is_valid_cert(quorum_membership.as_ref()) { + bail!("Invalid justify_qc in proposal for view {}", *view); + } + let mut consensus_write = consensus.write().await; + let leaf = Leaf::from_quorum_proposal(&proposal.data); + let state = Arc::new( + >::from_header(&proposal.data.block_header), + ); + + if let Err(e) = consensus_write.update_validated_state_map( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state, + delta: None, + }, + }, + ) { + tracing::trace!("{e:?}"); + } + + consensus_write.update_saved_leaves(leaf.clone()); + Ok(leaf) +} + /// Handle the received quorum proposal. /// /// Returns the proposal that should be used to set the `cur_proposal` for other tasks. @@ -598,14 +655,29 @@ pub(crate) async fn handle_quorum_proposal_recv Some(p), + None => fetch_proposal( + justify_qc.view_number(), + event_stream.clone(), + Arc::clone(&task_state.quorum_membership), + Arc::clone(&task_state.consensus), + ) + .await + .ok(), + }; let consensus_read = task_state.consensus.read().await; // Get the parent leaf and state. - let parent = match consensus_read - .saved_leaves() - .get(&justify_qc.date().leaf_commit) - .cloned() - { + let parent = match parent_leaf { Some(leaf) => { if let (Some(state), _) = consensus_read.state_and_delta(leaf.view_number()) { Some((leaf, Arc::clone(&state))) @@ -1136,14 +1208,30 @@ pub async fn update_state_and_vote_if_able Some(p), + None => fetch_proposal( + justify_qc.view_number(), + vote_info.3.clone(), + Arc::clone(&quorum_membership), + Arc::clone(&consensus), + ) + .await + .ok(), + }; + + let read_consnesus = consensus.read().await; // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) let Some(parent) = parent else { diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index adcfba6963..10cba4e195 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,5 +1,6 @@ use std::{fmt::Display, sync::Arc}; +use async_broadcast::Sender; use either::Either; use hotshot_task::task::TaskEvent; use hotshot_types::{ @@ -28,6 +29,24 @@ impl TaskEvent for HotShotEvent { } } +/// Wrapper type for the event to notify tasks that a proposal for a view is missing +/// and the channel to send the event back to +#[derive(Debug, Clone)] +pub struct ProposalMissing { + /// View of missing proposal + pub view: TYPES::Time, + /// Channel to send the response back to + pub response_chan: Sender>>>, +} + +impl PartialEq for ProposalMissing { + fn eq(&self, other: &Self) -> bool { + self.view == other.view + } +} + +impl Eq for ProposalMissing {} + /// Marker that the task completed #[derive(Eq, PartialEq, Debug, Clone)] pub struct HotShotTaskCompleted; @@ -65,7 +84,7 @@ pub enum HotShotEvent { /// A quorum proposal with the given parent leaf is validated. QuorumProposalValidated(QuorumProposal, Leaf), /// A quorum proposal is missing for a view that we meed - QuorumProposalMissing(TYPES::Time), + QuorumProposalRequest(ProposalMissing), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DaProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal @@ -395,8 +414,8 @@ impl Display for HotShotEvent { HotShotEvent::UpgradeDecided(cert) => { write!(f, "UpgradeDecided(view_number{:?})", cert.view_number()) } - HotShotEvent::QuorumProposalMissing(view_number) => { - write!(f, "QuorumProposalMissing(view_number={view_number:?})") + HotShotEvent::QuorumProposalRequest(view_number) => { + write!(f, "QuorumProposalRequest(view_number={view_number:?})") } HotShotEvent::VoteNow(view_number, _) => { write!(f, "VoteNow(view_number={view_number:?})") diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index bda5ae8831..9d30067b3c 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -17,7 +17,11 @@ use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::Consensus, - message::{DaConsensusMessage, DataMessage, Message, MessageKind, SequencingMessage}, + data::QuorumProposal, + message::{ + DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, + SequencingMessage, + }, traits::{ election::Membership, network::{ConnectedNetwork, DataRequest, RequestKind, ResponseMessage}, @@ -32,10 +36,13 @@ use sha2::{Digest, Sha256}; use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; -use crate::{events::HotShotEvent, helpers::broadcast_event}; +use crate::{ + events::{HotShotEvent, ProposalMissing}, + helpers::broadcast_event, +}; /// Amount of time to try for a request before timing out. -const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); +pub const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); /// Long running task which will request information after a proposal is received. /// The task will wait a it's `delay` and then send a request iteratively to peers @@ -102,8 +109,12 @@ impl> TaskState for NetworkRequest } Ok(()) } - HotShotEvent::QuorumProposalMissing(view) => { - self.run_delay(RequestKind::Proposal(*view), sender.clone(), *view); + HotShotEvent::QuorumProposalRequest(missing) => { + let ProposalMissing { + view, + response_chan: chan, + } = missing; + self.run_proposal(&RequestKind::Proposal(*view), chan.clone(), *view); Ok(()) } _ => Ok(()), @@ -154,6 +165,22 @@ impl> NetworkRequestState, + ) -> Option<::PureAssembledSignatureType> { + let Ok(data) = bincode::serialize(&request) else { + tracing::error!("Failed to serialize request!"); + return None; + }; + let Ok(signature) = TYPES::SignatureKey::sign(&self.private_key, &Sha256::digest(data)) + else { + error!("Failed to sign Data Request"); + return None; + }; + Some(signature) + } /// run a delayed request task for a request. The first response /// received will be sent over `sender` #[instrument(skip_all, fields(id = self.id, view = *self.view), name = "NetworkRequestState run_delay", level = "error")] @@ -168,7 +195,6 @@ impl> NetworkRequestState> NetworkRequestState, + response_chan: Sender>>>, + view: TYPES::Time, + ) { + let leader = self.da_membership.leader(view); + let requester = ProposalRequester:: { + network: Arc::clone(&self.network), + sender: response_chan, + leader, + }; + let Some(signature) = self.serialize_and_sign(request) else { + return; + }; + + let pub_key = self.public_key.clone(); + async_spawn(async move { + requester.do_proposal(view, signature, pub_key).await; + }); + } + /// Signals delayed requesters to finish pub fn set_shutdown_flag(&self) { self.shutdown_flag.store(true, Ordering::Relaxed); @@ -216,27 +258,71 @@ struct DelayedRequester> { delay: Duration, /// The peers we will request in a random order recipients: Vec, - /// Leader for the view of the request - leader: TYPES::SignatureKey, /// A flag indicating that `HotShotEvent::Shutdown` has been received shutdown_flag: Arc, } +/// A task the requests some data immediately from one peer + +struct ProposalRequester> { + /// Network to send requests + network: Arc, + /// Channel to send the event when we receive a response + sender: Sender>>>, + /// Leader for the view of the request + leader: TYPES::SignatureKey, +} + +impl> ProposalRequester { + /// Handle sending a request for proposal for a view, does + /// not delay + async fn do_proposal( + &self, + view: TYPES::Time, + signature: Signature, + key: TYPES::SignatureKey, + ) { + let response = match bincode::serialize(&make_proposal_req::(view, signature, key)) { + Ok(serialized_msg) => { + async_timeout( + REQUEST_TIMEOUT, + self.network + .request_data::(serialized_msg, &self.leader), + ) + .await + } + Err(e) => { + tracing::error!( + "Failed to serialize outgoing message: this should never happen. Error: {e}" + ); + broadcast_event(None, &self.sender).await; + return; + } + }; + if let Ok(Ok(serialized_response)) = response { + if let Ok(ResponseMessage::Found(msg)) = bincode::deserialize(&serialized_response) { + let SequencingMessage::General(GeneralConsensusMessage::Proposal(prop)) = msg + else { + error!("Requested Proposal but received a non-proposal in response. Response was {:?}", msg); + broadcast_event(None, &self.sender).await; + return; + }; + broadcast_event(Some(prop), &self.sender).await; + } + broadcast_event(None, &self.sender).await; + } else { + broadcast_event(None, &self.sender).await; + } + } +} + /// Wrapper for the info in a VID request struct VidRequest(TYPES::Time, TYPES::SignatureKey); -/// Wrapper for the info in a Proposal fetch request -struct ProposalRequest(TYPES::Time, TYPES::SignatureKey); - impl> DelayedRequester { /// Wait the delay, then try to complete the request. Iterates over peers /// until the request is completed, or the data is no longer needed. - async fn run( - self, - request: RequestKind, - signature: Signature, - pub_key: TYPES::SignatureKey, - ) { + async fn run(self, request: RequestKind, signature: Signature) { match request { RequestKind::Vid(view, key) => { // Do the delay only if primary is up and then start sending @@ -245,28 +331,7 @@ impl> DelayedRequester { } self.do_vid(VidRequest(view, key), signature).await; } - RequestKind::Proposal(view) => { - self.do_proposal(ProposalRequest(view, pub_key), signature) - .await; - } - RequestKind::DaProposal(..) => {} - } - } - /// Handle sending a request for proposal for a view, does - /// not delay - async fn do_proposal(&self, req: ProposalRequest, signature: Signature) { - match bincode::serialize(&make_proposal_req(&req, signature)) { - Ok(serialized_msg) => { - let _ = self - .network - .request_data::(serialized_msg, &self.leader) - .await; - } - Err(e) => { - tracing::error!( - "Failed to serialize outgoing message: this should never happen. Error: {e}" - ); - } + RequestKind::Proposal(..) | RequestKind::DaProposal(..) => {} } } /// Handle sending a VID Share request, runs the loop until the data exists @@ -361,17 +426,18 @@ fn make_vid( /// Build a request for a Proposal fn make_proposal_req( - req: &ProposalRequest, + view: TYPES::Time, signature: Signature, + key: TYPES::SignatureKey, ) -> Message { - let kind = RequestKind::Proposal(req.0); + let kind = RequestKind::Proposal(view); let data_request = DataRequest { - view: req.0, + view, request: kind, signature, }; Message { - sender: req.1.clone(), + sender: key, kind: MessageKind::Data(DataMessage::RequestData(data_request)), } } diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index db900d9ac9..fa5774a3a4 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -9,7 +9,10 @@ use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ consensus::{Consensus, LockedConsensusState}, data::VidDisperseShare, - message::{DaConsensusMessage, DataMessage, Message, MessageKind, Proposal, SequencingMessage}, + message::{ + DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, + SequencingMessage, + }, traits::{ election::Membership, network::{DataRequest, RequestKind, ResponseChannel, ResponseMessage}, @@ -211,12 +214,13 @@ impl NetworkResponseState { self.quorum.has_stake(sender) } /// Lookup the proposal for the view and respond if it's found/not found - #[allow(clippy::no_effect_underscore_binding)] - async fn respond_with_proposal(&self, _view: TYPES::Time) -> ResponseMessage { - // Complete after we are storing our last proposed view: - // https://github.com/EspressoSystems/HotShot/issues/3240 - async {}.await; - todo!(); + async fn respond_with_proposal(&self, view: TYPES::Time) -> ResponseMessage { + match self.consensus.read().await.last_proposals().get(&view) { + Some(prop) => ResponseMessage::Found(SequencingMessage::General( + GeneralConsensusMessage::Proposal(prop.clone()), + )), + None => ResponseMessage::NotFound, + } } } diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task.rs index ea58e3a545..61dd5a37c1 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task.rs @@ -366,7 +366,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { new_version, decide_by: ViewNumber::new(7), new_version_hash: [0u8; 12].to_vec(), - old_version_last_view: ViewNumber::new(7), + old_version_last_view: ViewNumber::new(6), new_version_first_view: ViewNumber::new(8), }; @@ -504,6 +504,7 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { ViewNumber::new(6), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), + QuorumProposalRecv(proposals[5].clone(), leaders[5]), QcFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), ], vec![ @@ -574,14 +575,24 @@ async fn test_upgrade_and_consensus_task_blank_blocks() { task_state_asserts: vec![], }, Expectations { - output_asserts: vec![quorum_proposal_send_with_null_block( + output_asserts: vec![ + exact(ViewChange(ViewNumber::new(6))), + validated_state_updated(), + quorum_proposal_validated(), + quorum_proposal_send_with_null_block( quorum_membership.total_nodes(), - )], + ), + leaf_decided(), + quorum_vote_send(), + ], task_state_asserts: vec![], }, Expectations { output_asserts: vec![ exact(ViewChange(ViewNumber::new(7))), + validated_state_updated(), + quorum_proposal_validated(), + leaf_decided(), // We do NOT expect a quorum_vote_send() because we have set the block to be non-null in this view. ], task_state_asserts: vec![], diff --git a/types/src/consensus.rs b/types/src/consensus.rs index e4d31e395a..1add6ba71c 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -239,6 +239,11 @@ impl Consensus { &self.saved_da_certs } + /// Get the map of our recent proposals + pub fn last_proposals(&self) -> &BTreeMap>> { + &self.last_proposals + } + /// Update the current view. /// # Errors /// Can return an error when the new view_number is not higher than the existing view number. From 5c81f3fce9ed4f6b2968cd76eb346bc549fa1d04 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 17 Jun 2024 12:02:54 -0400 Subject: [PATCH 1086/1393] Clean up consensus import groups (#3331) --- hotshot/src/tasks/mod.rs | 3 +- .../src/traits/networking/push_cdn_network.rs | 3 +- task-impls/src/consensus/helpers.rs | 77 ++++++++----------- task-impls/src/consensus/mod.rs | 31 ++++---- task-impls/src/quorum_vote/mod.rs | 11 +-- 5 files changed, 54 insertions(+), 71 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b55801b828..165ca013e5 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -5,7 +5,6 @@ pub mod task_state; use std::{sync::Arc, time::Duration}; -use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use hotshot_task::task::Task; #[cfg(not(feature = "dependency-tasks"))] @@ -37,6 +36,8 @@ use hotshot_types::{ }; use vbs::version::StaticVersionType; +use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; + /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 84fbef4832..97d23c613e 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -1,7 +1,6 @@ #[cfg(feature = "hotshot-testing")] use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; -use std::{collections::BTreeSet, marker::PhantomData}; +use std::{collections::BTreeSet, marker::PhantomData, sync::Arc}; #[cfg(feature = "hotshot-testing")] use std::{path::Path, time::Duration}; diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 369517b7bd..8354662009 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -1,44 +1,14 @@ -use crate::{events::HotShotEvent, helpers::broadcast_event}; - -#[cfg(not(feature = "dependency-tasks"))] -use crate::{events::ProposalMissing, request::REQUEST_TIMEOUT}; - -#[cfg(not(feature = "dependency-tasks"))] -use super::ConsensusTaskState; -#[cfg(not(feature = "dependency-tasks"))] -use crate::{ - consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, - helpers::AnyhowTracing, +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, }; -#[cfg(not(feature = "dependency-tasks"))] -use anyhow::bail; + use anyhow::{ensure, Context, Result}; -#[cfg(not(feature = "dependency-tasks"))] -use async_broadcast::broadcast; use async_broadcast::Sender; -#[cfg(not(feature = "dependency-tasks"))] -use async_compatibility_layer::art::async_timeout; -#[cfg(not(feature = "dependency-tasks"))] -use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; -#[cfg(not(feature = "dependency-tasks"))] #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -#[cfg(not(feature = "dependency-tasks"))] -use chrono::Utc; use committable::{Commitment, Committable}; -#[cfg(not(feature = "dependency-tasks"))] -use core::time::Duration; -#[cfg(not(feature = "dependency-tasks"))] -use futures::FutureExt; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::{ - consensus::CommitmentAndMetadata, - traits::{ - node_implementation::{ConsensusTime, NodeImplementation}, - storage::Storage, - }, -}; use hotshot_types::{ consensus::{Consensus, View}, data::{Leaf, QuorumProposal, ViewChangeEvidence}, @@ -52,21 +22,38 @@ use hotshot_types::{ utils::{Terminator, ViewInner}, vote::{Certificate, HasViewNumber}, }; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::{data::null_block, message::GeneralConsensusMessage, simple_vote::QuorumData}; -#[cfg(not(feature = "dependency-tasks"))] -use std::marker::PhantomData; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -#[cfg(not(feature = "dependency-tasks"))] -use tracing::error; use tracing::{debug, info, warn}; #[cfg(not(feature = "dependency-tasks"))] -use vbs::version::Version; +use { + super::ConsensusTaskState, + crate::{ + consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, + helpers::AnyhowTracing, + }, + crate::{events::ProposalMissing, request::REQUEST_TIMEOUT}, + anyhow::bail, + async_broadcast::broadcast, + async_compatibility_layer::art::async_timeout, + async_compatibility_layer::art::{async_sleep, async_spawn}, + chrono::Utc, + core::time::Duration, + futures::FutureExt, + hotshot_types::{ + consensus::CommitmentAndMetadata, + traits::{ + node_implementation::{ConsensusTime, NodeImplementation}, + storage::Storage, + }, + }, + hotshot_types::{data::null_block, message::GeneralConsensusMessage, simple_vote::QuorumData}, + std::marker::PhantomData, + tracing::error, + vbs::version::Version, +}; + +use crate::{events::HotShotEvent, helpers::broadcast_event}; /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 436e20d550..59263b0b6b 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -2,20 +2,12 @@ use std::{collections::BTreeMap, sync::Arc}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; -#[cfg(not(feature = "dependency-tasks"))] -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use async_trait::async_trait; use futures::future::join_all; use hotshot_task::task::TaskState; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::data::VidDisperseShare; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::message::Proposal; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::vid::vid_scheme; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus}, data::{QuorumProposal, ViewChangeEvidence}, @@ -29,22 +21,25 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::{traits::storage::Storage, vote::Certificate}; -#[cfg(not(feature = "dependency-tasks"))] -use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -#[cfg(not(feature = "dependency-tasks"))] -use tracing::info; use tracing::{debug, error, instrument, warn}; use vbs::version::Version; - #[cfg(not(feature = "dependency-tasks"))] -use crate::consensus::helpers::{ - handle_quorum_proposal_recv, handle_quorum_proposal_validated, publish_proposal_if_able, - update_state_and_vote_if_able, +use { + crate::consensus::helpers::{ + handle_quorum_proposal_recv, handle_quorum_proposal_validated, publish_proposal_if_able, + update_state_and_vote_if_able, + }, + async_compatibility_layer::art::async_spawn, + hotshot_types::data::VidDisperseShare, + hotshot_types::message::Proposal, + hotshot_types::vid::vid_scheme, + hotshot_types::{traits::storage::Storage, vote::Certificate}, + jf_vid::VidScheme, + tracing::info, }; + use crate::{ consensus::view_change::{update_view, DONT_SEND_VIEW_CHANGE_EVENT}, events::{HotShotEvent, HotShotTaskCompleted}, diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 4d7731beed..3461afc4ab 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -1,10 +1,5 @@ use std::{collections::HashMap, sync::Arc}; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, - quorum_vote::handlers::handle_quorum_proposal_validated, -}; use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; @@ -41,6 +36,12 @@ use tokio::task::JoinHandle; use tracing::{debug, error, instrument, trace, warn}; use vbs::version::Version; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, + quorum_vote::handlers::handle_quorum_proposal_validated, +}; + /// Event handlers for `QuorumProposalValidated`. mod handlers; From b1e7febc4234acf19b766bc42cff80c7226dafd7 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 18 Jun 2024 22:15:51 +0800 Subject: [PATCH 1087/1393] [DEPENDENCY_REFACTOR] - Fix the ValidatedStateUpdated dependency in the quorum proposal and vote tasks (#3322) * Move validation * Fix fmt and import * Fix build and recv test * Remove a todo * Restore an event * Fix non-dependency lints * Fix tests * Fix CI * Fix fmt and imports * Fix proposal and proposal recv tasks tests * 3 more tests * Fix doc * Address comments * Add error * Fix Conficts in keyao/remove-proposal-validation (#3308) * fix * remove dead test * fix lint * fix test * Fix vote tests --------- Co-authored-by: Keyao Shen * Save changes * Fix upgrade test * Fix build * Add error handling to update_validated_state_map * Fix tests * use ref * Update the proposal task part * block on state update * implement feature for both tasks * turn on tests * lint * turn ont he rest of the tests * maybe fix test 5 * revert, turn off ci 5 * turn off actual ci 5 * turn on ci 5 * fix ci 1 * fix docs * cosmetic fixes * fix * Add more places to inert into the validated state map, remove explicit handling from quorum proposal * re-add insert on event handler * try without state update * make it official --------- Co-authored-by: Jarred Parr --- hotshot/src/tasks/task_state.rs | 2 +- task-impls/src/consensus/helpers.rs | 18 ++--- task-impls/src/consensus/mod.rs | 1 - task-impls/src/events.rs | 6 -- .../src/quorum_proposal/dependency_handle.rs | 52 +++++++++---- task-impls/src/quorum_proposal/mod.rs | 72 ++++-------------- .../src/quorum_proposal_recv/handlers.rs | 5 -- task-impls/src/quorum_proposal_recv/mod.rs | 1 - task-impls/src/quorum_vote/mod.rs | 32 +++++++- testing/src/helpers.rs | 34 ++++++--- testing/src/view_generator.rs | 12 ++- testing/tests/tests_1/libp2p.rs | 2 - .../tests_1/quorum_proposal_recv_task.rs | 1 - testing/tests/tests_1/quorum_proposal_task.rs | 74 ++++++++++--------- 14 files changed, 163 insertions(+), 149 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index f808593b09..19b415cab9 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -271,7 +271,7 @@ impl> CreateTaskState QuorumProposalTaskState { latest_proposed_view: handle.cur_view().await, - propose_dependencies: HashMap::new(), + proposal_dependencies: HashMap::new(), quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), da_network: Arc::clone(&handle.hotshot.networks.da_network), output_event_stream: handle.hotshot.external_event_stream.0.clone(), diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 8354662009..d2718e8de4 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -99,6 +99,10 @@ pub async fn validate_proposal_safety_and_liveness( { tracing::trace!("{e:?}"); } + consensus + .write() + .await + .update_saved_leaves(proposed_leaf.clone()); // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. broadcast_event( @@ -354,15 +358,14 @@ pub fn validate_proposal_view_and_certs( /// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. pub(crate) async fn parent_leaf_and_state( - cur_view: TYPES::Time, - view_number: TYPES::Time, + next_proposal_view_number: TYPES::Time, quorum_membership: Arc, public_key: TYPES::SignatureKey, consensus: Arc>>, ) -> Result<(Leaf, Arc<::ValidatedState>)> { ensure!( - quorum_membership.leader(view_number) == public_key, - "Somehow we formed a QC but are not the leader for the next view {view_number:?}", + quorum_membership.leader(next_proposal_view_number) == public_key, + "Somehow we formed a QC but are not the leader for the next view {next_proposal_view_number:?}", ); let consensus_reader = consensus.read().await; @@ -397,7 +400,7 @@ pub(crate) async fn parent_leaf_and_state( // Walk back until we find a decide if !reached_decided { - debug!("We have not reached decide from view {:?}", cur_view); + debug!("We have not reached decide"); while let Some(next_parent_leaf) = consensus_reader.saved_leaves().get(&next_parent_hash) { if next_parent_leaf.view_number() <= consensus_reader.last_decided_view() { break; @@ -415,7 +418,6 @@ pub(crate) async fn parent_leaf_and_state( #[allow(clippy::too_many_arguments)] #[cfg(not(feature = "dependency-tasks"))] pub async fn publish_proposal_from_commitment_and_metadata( - cur_view: TYPES::Time, view: TYPES::Time, sender: Sender>>, quorum_membership: Arc, @@ -431,7 +433,6 @@ pub async fn publish_proposal_from_commitment_and_metadata( version: Version, ) -> Result> { let (parent_leaf, state) = parent_leaf_and_state( - cur_view, view, quorum_membership, public_key.clone(), @@ -501,7 +502,6 @@ pub async fn publish_proposal_from_commitment_and_metadata( #[allow(clippy::too_many_arguments)] #[cfg(not(feature = "dependency-tasks"))] pub async fn publish_proposal_if_able( - cur_view: TYPES::Time, view: TYPES::Time, sender: Sender>>, quorum_membership: Arc, @@ -517,7 +517,6 @@ pub async fn publish_proposal_if_able( version: Version, ) -> Result> { publish_proposal_from_commitment_and_metadata( - cur_view, view, sender, quorum_membership, @@ -765,7 +764,6 @@ pub(crate) async fn handle_quorum_proposal_recv> ConsensusTaskState event_stream: Sender>>, ) -> Result<()> { let create_and_send_proposal_handle = publish_proposal_if_able( - self.cur_view, view, event_stream, Arc::clone(&self.quorum_membership), diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 10cba4e195..0cd13789ab 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -194,9 +194,6 @@ pub enum HotShotEvent { /// A new high_qc has been reached by this node. UpdateHighQc(QuorumCertificate), - - /// A new undecided view has been proposed. - NewUndecidedView(Leaf), } impl Display for HotShotEvent { @@ -432,9 +429,6 @@ impl Display for HotShotEvent { HotShotEvent::UpdateHighQc(cert) => { write!(f, "UpdateHighQc(view_number={:?})", cert.view_number()) } - HotShotEvent::NewUndecidedView(leaf) => { - write!(f, "NewUndecidedView(view_number={:?})", leaf.view_number()) - } } } } diff --git a/task-impls/src/quorum_proposal/dependency_handle.rs b/task-impls/src/quorum_proposal/dependency_handle.rs index accd2ee2fe..d2d5adf442 100644 --- a/task-impls/src/quorum_proposal/dependency_handle.rs +++ b/task-impls/src/quorum_proposal/dependency_handle.rs @@ -4,14 +4,17 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; use anyhow::{ensure, Context, Result}; -use async_broadcast::Sender; +use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_sleep; use async_lock::RwLock; use committable::Committable; -use hotshot_task::dependency_task::HandleDepOutput; +use hotshot_task::{ + dependency::{Dependency, EventDependency}, + dependency_task::HandleDepOutput, +}; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus}, - data::{Leaf, QuorumProposal, VidDisperseShare, ViewChangeEvidence}, + data::{Leaf, QuorumProposal, VidDisperse, ViewChangeEvidence}, message::Proposal, traits::{ block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, @@ -31,7 +34,7 @@ pub(crate) enum ProposalDependency { PayloadAndMetadata, /// For the `QcFormed` event. - QC, + Qc, /// For the `ViewSyncFinalizeCertificate2Recv` event. ViewSyncCert, @@ -44,13 +47,10 @@ pub(crate) enum ProposalDependency { /// For the `VidShareValidated` event. VidShare, - - /// For the `ValidatedStateUpdated` event. - ValidatedState, } /// Handler for the proposal dependency -pub(crate) struct ProposalDependencyHandle { +pub struct ProposalDependencyHandle { /// Latest view number that has been proposed for (proxy for cur_view). pub latest_proposed_view: TYPES::Time, @@ -60,6 +60,9 @@ pub(crate) struct ProposalDependencyHandle { /// The event sender. pub sender: Sender>>, + /// The event receiver. + pub receiver: Receiver>>, + /// Immutable instance state pub instance_state: Arc, @@ -83,17 +86,16 @@ pub(crate) struct ProposalDependencyHandle { } impl ProposalDependencyHandle { - /// Publishes a proposal given the [`CommitmentAndMetadata`], [`VidDisperseShare`] + /// Publishes a proposal given the [`CommitmentAndMetadata`], [`VidDisperse`] /// and high qc [`hotshot_types::simple_certificate::QuorumCertificate`], /// with optional [`ViewChangeEvidence`]. async fn publish_proposal( &self, commitment_and_metadata: CommitmentAndMetadata, - vid_share: Proposal>, + vid_share: Proposal>, view_change_evidence: Option>, ) -> Result<()> { let (parent_leaf, state) = parent_leaf_and_state( - self.latest_proposed_view, self.view_number, Arc::clone(&self.quorum_membership), self.public_key.clone(), @@ -175,6 +177,30 @@ impl HandleDepOutput for ProposalDependencyHandle { #[allow(clippy::no_effect_underscore_binding)] async fn handle_dep_result(self, res: Self::Output) { + let high_qc_view_number = self.consensus.read().await.high_qc().view_number; + if !self + .consensus + .read() + .await + .validated_state_map() + .contains_key(&high_qc_view_number) + { + // Block on receiving the event from the event stream. + EventDependency::new( + self.receiver.clone(), + Box::new(move |event| { + let event = event.as_ref(); + if let HotShotEvent::ValidatedStateUpdated(view_number, _) = event { + *view_number == high_qc_view_number + } else { + false + } + }), + ) + .completed() + .await; + } + let mut commit_and_metadata: Option> = None; let mut timeout_certificate = None; let mut view_sync_finalize_cert = None; @@ -201,13 +227,13 @@ impl HandleDepOutput for ProposalDependencyHandle { timeout_certificate = Some(timeout.clone()); } either::Left(_) => { - // Handled by the HighQcUpdated event. + // Handled by the UpdateHighQc event. } }, HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { view_sync_finalize_cert = Some(cert.clone()); } - HotShotEvent::VidShareValidated(share) => { + HotShotEvent::VidDisperseSend(share, _) => { vid_share = Some(share.clone()); } _ => {} diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 4d5005b49c..cb0036516f 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -42,7 +42,7 @@ pub struct QuorumProposalTaskState pub latest_proposed_view: TYPES::Time, /// Table for the in-progress proposal depdencey tasks. - pub propose_dependencies: HashMap>, + pub proposal_dependencies: HashMap>, /// Network for all nodes pub quorum_network: Arc, @@ -104,7 +104,7 @@ impl> QuorumProposalTaskState { + ProposalDependency::Qc => { if let HotShotEvent::UpdateHighQc(qc) = event { qc.view_number() + 1 } else { @@ -149,19 +149,12 @@ impl> QuorumProposalTaskState { - if let HotShotEvent::VidShareValidated(vid_share) = event { + if let HotShotEvent::VidDisperseSend(vid_share, _) = event { vid_share.data.view_number() } else { return false; } } - ProposalDependency::ValidatedState => { - if let HotShotEvent::ValidatedStateUpdated(view_number, _) = event { - *view_number + 1 - } else { - return false; - } - } }; let valid = event_view == view_number; if valid { @@ -176,7 +169,7 @@ impl> QuorumProposalTaskState>>, + event_receiver: &Receiver>>, event: Arc>, ) -> AndDependency>>>> { let mut proposal_dependency = self.create_event_dependency( @@ -186,7 +179,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { payload_commitment_dependency.mark_as_completed(Arc::clone(&event)); @@ -239,12 +226,9 @@ impl> QuorumProposalTaskState { view_sync_dependency.mark_as_completed(event); } - HotShotEvent::VidShareValidated(_) => { + HotShotEvent::VidDisperseSend(_, _) => { vid_share_dependency.mark_as_completed(event); } - HotShotEvent::ValidatedStateUpdated(_, _) => { - validated_state_update_dependency.mark_as_completed(event); - } HotShotEvent::UpdateHighQc(_) => { qc_dependency.mark_as_completed(event); } @@ -268,11 +252,7 @@ impl> QuorumProposalTaskState 1 { - primary_deps.push(validated_state_update_dependency); - } + let primary_deps = vec![payload_commitment_dependency, vid_share_dependency]; AndDependency::from_deps(vec![OrDependency::from_deps(vec![ AndDependency::from_deps(vec![ @@ -308,13 +288,13 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState> QuorumProposalTaskState> QuorumProposalTaskState> QuorumProposalTaskState { + HotShotEvent::VidDisperseSend(vid_share, _) => { let view_number = vid_share.data.view_number(); - // Update the vid shares map if we need to include the new value. - let share = vid_share.clone(); - self.consensus - .write() - .await - .update_vid_shares(view_number, share.clone()); - self.create_dependency_task_if_new( view_number, event_receiver, @@ -470,24 +444,6 @@ impl> QuorumProposalTaskState { - // Update the internal validated state map. - if let Err(e) = self - .consensus - .write() - .await - .update_validated_state_map(*view_number, view.clone()) - { - tracing::trace!("{e:?}"); - } - - self.create_dependency_task_if_new( - *view_number + 1, - event_receiver, - event_sender, - Arc::clone(&event), - ); - } HotShotEvent::UpdateHighQc(qc) => { // First, update the high QC. if let Err(e) = self.consensus.write().await.update_high_qc(qc.clone()) { @@ -530,7 +486,7 @@ impl> TaskState async fn cancel_subtasks(&mut self) { for handle in self - .propose_dependencies + .proposal_dependencies .drain() .map(|(_view, handle)| handle) { diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 5d732df49a..2f28139f45 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -92,11 +92,6 @@ async fn validate_proposal_liveness> QuorumProposalRecvTaskState< Ok(QuorumProposalValidity::Liveness) => { // Build the parent leaf since we didn't find it during the proposal check. let parent_leaf = match parent_leaf_and_state( - self.cur_view, proposal.data.view_number() + 1, Arc::clone(&self.quorum_membership), self.public_key.clone(), diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 3461afc4ab..599185f020 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -8,7 +8,7 @@ use async_std::task::JoinHandle; use async_trait::async_trait; use committable::Committable; use hotshot_task::{ - dependency::{AndDependency, EventDependency, OrDependency}, + dependency::{AndDependency, Dependency, EventDependency, OrDependency}, dependency_task::{DependencyTask, HandleDepOutput}, task::TaskState, }; @@ -77,6 +77,8 @@ struct VoteDependencyHandle> { view_number: TYPES::Time, /// Event sender. sender: Sender>>, + /// Event receiver. + receiver: Receiver>>, /// The current version of HotShot version: Version, /// The node's id @@ -213,8 +215,30 @@ impl + 'static> HandleDepOutput #[allow(clippy::too_many_lines)] async fn handle_dep_result(self, res: Self::Output) { - #[allow(unused_variables)] - let mut cur_proposal = None; + let high_qc_view_number = self.consensus.read().await.high_qc().view_number; + if !self + .consensus + .read() + .await + .validated_state_map() + .contains_key(&high_qc_view_number) + { + // Block on receiving the event from the event stream. + EventDependency::new( + self.receiver.clone(), + Box::new(move |event| { + let event = event.as_ref(); + if let HotShotEvent::ValidatedStateUpdated(view_number, _) = event { + *view_number == high_qc_view_number + } else { + false + } + }), + ) + .completed() + .await; + } + let mut payload_commitment = None; let mut leaf = None; let mut vid_share = None; @@ -222,7 +246,6 @@ impl + 'static> HandleDepOutput match event.as_ref() { #[allow(unused_assignments)] HotShotEvent::QuorumProposalValidated(proposal, parent_leaf) => { - cur_proposal = Some(proposal.clone()); let proposal_payload_comm = proposal.block_header.payload_commitment(); if let Some(comm) = payload_commitment { if proposal_payload_comm != comm { @@ -471,6 +494,7 @@ impl> QuorumVoteTaskState +#[allow(clippy::type_complexity)] pub fn build_vid_proposal( quorum_membership: &::Membership, view_number: ViewNumber, transactions: Vec, private_key: &::PrivateKey, -) -> Vec>> { +) -> ( + Proposal>, + Vec>>, +) { let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); let encoded_transactions = TestTransaction::encode(&transactions); @@ -256,14 +260,26 @@ pub fn build_vid_proposal( quorum_membership, ); - VidDisperseShare::from_vid_disperse(vid_disperse) - .into_iter() - .map(|vid_disperse| { - vid_disperse - .to_proposal(private_key) - .expect("Failed to sign payload commitment") - }) - .collect() + let signature = + ::sign(private_key, vid_disperse.payload_commitment.as_ref()) + .expect("Failed to sign VID commitment"); + let vid_disperse_proposal = Proposal { + data: vid_disperse.clone(), + signature, + _pd: PhantomData, + }; + + ( + vid_disperse_proposal, + VidDisperseShare::from_vid_disperse(vid_disperse) + .into_iter() + .map(|vid_disperse| { + vid_disperse + .to_proposal(private_key) + .expect("Failed to sign payload commitment") + }) + .collect(), + ) } pub fn build_da_certificate( diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 9911cb6933..6e582ca738 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -15,7 +15,10 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, }; use hotshot_types::{ - data::{DaProposal, Leaf, QuorumProposal, VidDisperseShare, ViewChangeEvidence, ViewNumber}, + data::{ + DaProposal, Leaf, QuorumProposal, VidDisperse, VidDisperseShare, ViewChangeEvidence, + ViewNumber, + }, message::Proposal, simple_certificate::{ DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, @@ -45,6 +48,7 @@ pub struct TestView { pub view_number: ViewNumber, pub quorum_membership: ::Membership, pub da_membership: ::Membership, + pub vid_disperse: Proposal>, pub vid_proposal: ( Vec>>, ::SignatureKey, @@ -87,7 +91,7 @@ impl TestView { let payload_commitment = da_payload_commitment(quorum_membership, transactions.clone()); - let vid_proposal = build_vid_proposal( + let (vid_disperse, vid_proposal) = build_vid_proposal( quorum_membership, genesis_view, transactions.clone(), @@ -160,6 +164,7 @@ impl TestView { view_number: genesis_view, quorum_membership: quorum_membership.clone(), da_membership: da_membership.clone(), + vid_disperse, vid_proposal: (vid_proposal, public_key), da_certificate, transactions, @@ -215,7 +220,7 @@ impl TestView { let payload_commitment = da_payload_commitment(quorum_membership, transactions.clone()); - let vid_proposal = build_vid_proposal( + let (vid_disperse, vid_proposal) = build_vid_proposal( quorum_membership, next_view, transactions.clone(), @@ -360,6 +365,7 @@ impl TestView { view_number: next_view, quorum_membership: quorum_membership.clone(), da_membership: self.da_membership.clone(), + vid_disperse, vid_proposal: (vid_proposal, public_key), da_certificate, leader_public_key, diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index f4fa98636e..6b415191b8 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -1,7 +1,6 @@ use std::time::Duration; use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; -#[cfg(not(feature = "dependency-tasks"))] use hotshot_testing::spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, @@ -43,7 +42,6 @@ async fn libp2p_network() { } /// libp2p network test with failures -#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index d36f773488..54fadb98c6 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -192,7 +192,6 @@ async fn test_quorum_proposal_recv_task_liveness_check() { ), )), exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), - exact(NewUndecidedView(leaves[2].clone())), vote_now(), ])]; diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 676659b3cd..7e2b49ecde 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,8 +1,6 @@ #![cfg(feature = "dependency-tasks")] use std::time::Duration; -#[cfg(not(feature = "dependency-tasks"))] -use committable::Committable; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot::traits::ValidatedState; @@ -12,14 +10,6 @@ use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes}, }; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_example_types::{state_types::TestInstanceState,}; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_testing::{ - all_predicates, - helpers::{ - build_cert, key_pair_for_id - }}; use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ events::HotShotEvent::*, @@ -29,7 +19,7 @@ use hotshot_testing::{ all_predicates, helpers::{ build_fake_view_with_leaf, build_system_handle, - vid_scheme_from_view_number, vid_share, + vid_scheme_from_view_number, }, predicates::{ event::{all_predicates, exact, quorum_proposal_send}, @@ -39,8 +29,6 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_types::{simple_certificate::QuorumCertificate,}; use hotshot_types::{ data::{null_block, Leaf, ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, ViewSyncFinalizeData}, @@ -90,6 +78,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let mut leaders = Vec::new(); let mut leaves = Vec::new(); let mut vids = Vec::new(); + let mut vid_dispersals = Vec::new(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; for view in (&mut generator).take(2).collect::>().await { @@ -97,6 +86,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); vids.push(view.vid_proposal.clone()); + vid_dispersals.push(view.vid_disperse.clone()); // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. @@ -110,10 +100,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { drop(consensus_writer); let inputs = vec![ - serial![VidShareValidated(vid_share( - &vids[0].0, - handle.public_key() - )),], + serial![VidDisperseSend(vid_dispersals[0].clone(), handle.public_key())], random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( @@ -167,6 +154,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let mut leaders = Vec::new(); let mut leaves = Vec::new(); let mut vids = Vec::new(); + let mut vid_dispersals = Vec::new(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; for view in (&mut generator).take(5).collect::>().await { @@ -174,11 +162,18 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); vids.push(view.vid_proposal.clone()); + vid_dispersals.push(view.vid_disperse.clone()); // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. consensus_writer .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + + consensus_writer + .update_validated_state_map( + view.quorum_proposal.data.view_number(), + build_fake_view_with_leaf(view.leaf.clone()), + ).unwrap(); } // We need to handle the views where we aren't the leader to ensure that the states are @@ -204,7 +199,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ViewNumber::new(1), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[0].0, handle.public_key())), + VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), ValidatedStateUpdated( genesis_cert.view_number(), build_fake_view_with_leaf(genesis_leaf.clone()), @@ -220,7 +215,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[1].0, handle.public_key())), + VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( proposals[0].data.view_number(), build_fake_view_with_leaf(leaves[0].clone()), @@ -236,7 +231,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ViewNumber::new(3), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[2].0, handle.public_key())), + VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( proposals[1].data.view_number(), build_fake_view_with_leaf(leaves[1].clone()), @@ -252,7 +247,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ViewNumber::new(4), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[3].0, handle.public_key())), + VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), ValidatedStateUpdated( proposals[2].data.view_number(), build_fake_view_with_leaf(leaves[2].clone()), @@ -268,7 +263,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ViewNumber::new(5), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[4].0, handle.public_key())), + VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), ValidatedStateUpdated( proposals[3].data.view_number(), build_fake_view_with_leaf(leaves[3].clone()), @@ -325,11 +320,13 @@ async fn test_quorum_proposal_task_qc_timeout() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut vids = Vec::new(); + let mut vid_dispersals = Vec::new(); let mut leaves = Vec::new(); for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); + vid_dispersals.push(view.vid_disperse.clone()); leaves.push(view.leaf.clone()); } let timeout_data = TimeoutData { @@ -340,6 +337,7 @@ async fn test_quorum_proposal_task_qc_timeout() { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); + vid_dispersals.push(view.vid_disperse.clone()); leaves.push(view.leaf.clone()); } @@ -358,7 +356,7 @@ async fn test_quorum_proposal_task_qc_timeout() { ViewNumber::new(3), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[2].0.clone(), handle.public_key())), + VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( proposals[1].data.view_number(), build_fake_view_with_leaf(leaves[1].clone()), @@ -388,8 +386,6 @@ async fn test_quorum_proposal_task_view_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - // We need to propose as the leader for view 2, otherwise we get caught up with the special - // case in the genesis view. let node_id = 2; let handle = build_system_handle(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); @@ -403,11 +399,13 @@ async fn test_quorum_proposal_task_view_sync() { let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut vids = Vec::new(); + let mut vid_dispersals = Vec::new(); let mut leaves = Vec::new(); for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); + vid_dispersals.push(view.vid_disperse.clone()); leaves.push(view.leaf.clone()); } @@ -420,6 +418,7 @@ async fn test_quorum_proposal_task_view_sync() { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); vids.push(view.vid_proposal.clone()); + vid_dispersals.push(view.vid_disperse.clone()); leaves.push(view.leaf.clone()); } @@ -438,7 +437,7 @@ async fn test_quorum_proposal_task_view_sync() { ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[1].0.clone(), handle.public_key())), + VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( proposals[0].data.view_number(), build_fake_view_with_leaf(leaves[0].clone()), @@ -476,6 +475,7 @@ async fn test_quorum_proposal_task_liveness_check() { let mut leaders = Vec::new(); let mut leaves = Vec::new(); let mut vids = Vec::new(); + let mut vid_dispersals = Vec::new(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; for view in (&mut generator).take(5).collect::>().await { @@ -483,11 +483,17 @@ async fn test_quorum_proposal_task_liveness_check() { leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); vids.push(view.vid_proposal.clone()); + vid_dispersals.push(view.vid_disperse.clone()); // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. consensus_writer .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + consensus_writer + .update_validated_state_map( + view.quorum_proposal.data.view_number(), + build_fake_view_with_leaf(view.leaf.clone()), + ).unwrap(); } drop(consensus_writer); @@ -512,7 +518,7 @@ async fn test_quorum_proposal_task_liveness_check() { ViewNumber::new(1), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[0].0, handle.public_key())), + VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), ValidatedStateUpdated( genesis_cert.view_number(), build_fake_view_with_leaf(genesis_leaf.clone()), @@ -528,7 +534,7 @@ async fn test_quorum_proposal_task_liveness_check() { ViewNumber::new(2), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[1].0, handle.public_key())), + VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( proposals[0].data.view_number(), build_fake_view_with_leaf(leaves[0].clone()), @@ -544,7 +550,7 @@ async fn test_quorum_proposal_task_liveness_check() { ViewNumber::new(3), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[2].0, handle.public_key())), + VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( proposals[1].data.view_number(), build_fake_view_with_leaf(leaves[1].clone()), @@ -561,7 +567,7 @@ async fn test_quorum_proposal_task_liveness_check() { ViewNumber::new(4), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[3].0, handle.public_key())), + VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), ValidatedStateUpdated( proposals[2].data.view_number(), build_fake_view_with_leaf(leaves[2].clone()), @@ -577,7 +583,7 @@ async fn test_quorum_proposal_task_liveness_check() { ViewNumber::new(5), null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), ), - VidShareValidated(vid_share(&vids[4].0, handle.public_key())), + VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), ValidatedStateUpdated( proposals[3].data.view_number(), build_fake_view_with_leaf(leaves[3].clone()), @@ -620,8 +626,6 @@ async fn test_quorum_proposal_task_with_incomplete_events() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - // We need to propose as the leader for view 2, otherwise we get caught up with the special - // case in the genesis view. let handle = build_system_handle(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -640,8 +644,7 @@ async fn test_quorum_proposal_task_with_incomplete_events() { // We run the task here at view 2, but this time we ignore the crucial piece of evidence: the // payload commitment and metadata. Instead we send only one of the three "OR" required fields. // This should result in the proposal failing to be sent. - let inputs = vec![serial![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - ]]; + let inputs = vec![serial![QuorumProposalRecv(proposals[1].clone(), leaders[1])]]; let expectations = vec![Expectations::from_outputs(vec![])]; @@ -655,3 +658,4 @@ async fn test_quorum_proposal_task_with_incomplete_events() { }; run_test![inputs, script].await; } + From 263ac4242108ff7e106c84071fe749693697912f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Jun 2024 10:33:34 -0400 Subject: [PATCH 1088/1393] Bump dashmap from 5.5.3 to 6.0.0 (#3338) Bumps [dashmap](https://github.com/xacrimon/dashmap) from 5.5.3 to 6.0.0. - [Release notes](https://github.com/xacrimon/dashmap/releases) - [Commits](https://github.com/xacrimon/dashmap/compare/v.5.5.3...v6.0.0) --- updated-dependencies: - dependency-name: dashmap dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/Cargo.toml | 2 +- hotshot/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 3bb65b3f99..06e8b41e50 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -86,7 +86,7 @@ bimap = "0.6" clap = { workspace = true, optional = true } committable = { workspace = true } custom_debug = { workspace = true } -dashmap = "5" +dashmap = "6" either = { workspace = true } futures = { workspace = true } hotshot-orchestrator = { version = "0.5.36", path = "../orchestrator", default-features = false } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 2d1cea56b4..d7c2f2c3b8 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -36,7 +36,7 @@ chrono = { workspace = true } clap = { workspace = true, optional = true } committable = { workspace = true } custom_debug = { workspace = true } -dashmap = "5" +dashmap = "6" derive_more = "0.99" either = { workspace = true } ethereum-types = { workspace = true } From 9809b3471e77cac811424f8b90abe8d7b105eeb1 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 19 Jun 2024 14:55:17 -0400 Subject: [PATCH 1089/1393] [CATCHUP] Fetch Proposals in Dependecny Task (#3339) * fetch proposal from dep task * emit validated event from fetch_proposal * fetch proposal when trying to propose * fix dep build * fix dep unit test --- task-impls/src/consensus/helpers.rs | 37 ++++----- .../src/quorum_proposal/dependency_handle.rs | 13 +++- .../src/quorum_proposal_recv/handlers.rs | 76 +++++++++++-------- task-impls/src/quorum_vote/mod.rs | 31 ++++++-- testing/src/predicates/event.rs | 11 +++ .../tests_1/quorum_proposal_recv_task.rs | 3 +- 6 files changed, 111 insertions(+), 60 deletions(-) diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index d2718e8de4..67eb919382 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -3,8 +3,11 @@ use std::{ sync::Arc, }; +use crate::{events::ProposalMissing, request::REQUEST_TIMEOUT}; +use anyhow::bail; use anyhow::{ensure, Context, Result}; -use async_broadcast::Sender; +use async_broadcast::{broadcast, Sender}; +use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; @@ -32,10 +35,6 @@ use { consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, helpers::AnyhowTracing, }, - crate::{events::ProposalMissing, request::REQUEST_TIMEOUT}, - anyhow::bail, - async_broadcast::broadcast, - async_compatibility_layer::art::async_timeout, async_compatibility_layer::art::{async_sleep, async_spawn}, chrono::Utc, core::time::Duration, @@ -535,8 +534,7 @@ pub async fn publish_proposal_if_able( } /// Trigger a request to the network for a proposal for a view and wait for the response -#[cfg(not(feature = "dependency-tasks"))] -async fn fetch_proposal( +pub(crate) async fn fetch_proposal( view: TYPES::Time, event_stream: Sender>>, quorum_membership: Arc, @@ -555,11 +553,11 @@ async fn fetch_proposal( let Ok(Ok(Some(proposal))) = async_timeout(REQUEST_TIMEOUT, rx.recv_direct()).await else { bail!("Request for proposal failed"); }; - let view = proposal.data.view_number(); + let view_number = proposal.data.view_number(); let justify_qc = proposal.data.justify_qc.clone(); if !justify_qc.is_valid_cert(quorum_membership.as_ref()) { - bail!("Invalid justify_qc in proposal for view {}", *view); + bail!("Invalid justify_qc in proposal for view {}", *view_number); } let mut consensus_write = consensus.write().await; let leaf = Leaf::from_quorum_proposal(&proposal.data); @@ -567,20 +565,23 @@ async fn fetch_proposal( >::from_header(&proposal.data.block_header), ); - if let Err(e) = consensus_write.update_validated_state_map( - view, - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - state, - delta: None, - }, + let view = View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state, + delta: None, }, - ) { + }; + if let Err(e) = consensus_write.update_validated_state_map(view_number, view.clone()) { tracing::trace!("{e:?}"); } consensus_write.update_saved_leaves(leaf.clone()); + broadcast_event( + HotShotEvent::ValidatedStateUpdated(view_number, view).into(), + &event_stream, + ) + .await; Ok(leaf) } diff --git a/task-impls/src/quorum_proposal/dependency_handle.rs b/task-impls/src/quorum_proposal/dependency_handle.rs index d2d5adf442..3bbd002083 100644 --- a/task-impls/src/quorum_proposal/dependency_handle.rs +++ b/task-impls/src/quorum_proposal/dependency_handle.rs @@ -5,7 +5,7 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; use anyhow::{ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::async_sleep; +use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use committable::Committable; use hotshot_task::{ @@ -24,7 +24,9 @@ use tracing::{debug, error}; use vbs::version::Version; use crate::{ - consensus::helpers::parent_leaf_and_state, events::HotShotEvent, helpers::broadcast_event, + consensus::helpers::{fetch_proposal, parent_leaf_and_state}, + events::HotShotEvent, + helpers::broadcast_event, }; /// Proposal dependency types. These types represent events that precipitate a proposal. @@ -185,6 +187,13 @@ impl HandleDepOutput for ProposalDependencyHandle { .validated_state_map() .contains_key(&high_qc_view_number) { + // The proposal for the high qc view is missing, try to get it asynchronously + let memberhsip = Arc::clone(&self.quorum_membership); + let sender = self.sender.clone(); + let consensus = Arc::clone(&self.consensus); + async_spawn(async move { + fetch_proposal(high_qc_view_number, sender, memberhsip, consensus).await + }); // Block on receiving the event from the event stream. EventDependency::new( self.receiver.clone(), diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 2f28139f45..b191b594c7 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -24,7 +24,9 @@ use tracing::{debug, error, warn}; use super::QuorumProposalRecvTaskState; use crate::{ consensus::{ - helpers::{validate_proposal_safety_and_liveness, validate_proposal_view_and_certs}, + helpers::{ + fetch_proposal, validate_proposal_safety_and_liveness, validate_proposal_view_and_certs, + }, view_change::{update_view, SEND_VIEW_CHANGE_EVENT}, }, events::HotShotEvent, @@ -154,40 +156,52 @@ pub(crate) async fn handle_quorum_proposal_recv { - if let (Some(state), _) = consensus_read.state_and_delta(leaf.view_number()) { - Some((leaf, Arc::clone(&state))) - } else { - bail!("Parent state not found! Consensus internally inconsistent"); - } - } - None => None, - }; - - if justify_qc.view_number() > consensus_read.high_qc().view_number { - if let Err(e) = task_state - .storage - .write() - .await - .update_high_qc(justify_qc.clone()) - .await - { - bail!("Failed to store High QC, not voting; error = {:?}", e); + // Get the parent leaf and state. + let mut parent_leaf = task_state + .consensus + .read() + .await + .saved_leaves() + .get(&justify_qc.data.leaf_commit) + .cloned(); + + parent_leaf = match parent_leaf { + Some(p) => Some(p), + None => fetch_proposal( + justify_qc.view_number(), + event_sender.clone(), + Arc::clone(&task_state.quorum_membership), + Arc::clone(&task_state.consensus), + ) + .await + .ok(), + }; + let consensus_read = task_state.consensus.read().await; + + let parent = match parent_leaf { + Some(leaf) => { + if let (Some(state), _) = consensus_read.state_and_delta(leaf.view_number()) { + Some((leaf, Arc::clone(&state))) + } else { + bail!("Parent state not found! Consensus internally inconsistent"); } } - - parent + None => None, }; + if justify_qc.view_number() > consensus_read.high_qc().view_number { + if let Err(e) = task_state + .storage + .write() + .await + .update_high_qc(justify_qc.clone()) + .await + { + bail!("Failed to store High QC, not voting; error = {:?}", e); + } + } + drop(consensus_read); + let mut consensus_write = task_state.consensus.write().await; if let Err(e) = consensus_write.update_high_qc(justify_qc.clone()) { tracing::trace!("{e:?}"); diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 599185f020..a4c5f50d94 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -37,6 +37,7 @@ use tracing::{debug, error, instrument, trace, warn}; use vbs::version::Version; use crate::{ + consensus::helpers::fetch_proposal, events::HotShotEvent, helpers::{broadcast_event, cancel_task}, quorum_vote::handlers::handle_quorum_proposal_validated, @@ -92,19 +93,33 @@ impl + 'static> VoteDependencyHand proposed_leaf: &Leaf, vid_share: &Proposal>, ) -> Result<()> { - let consensus_reader = self.consensus.read().await; let justify_qc = &proposed_leaf.justify_qc(); // Justify qc's leaf commitment should be the same as the parent's leaf commitment. - let parent = consensus_reader + let mut maybe_parent = self + .consensus + .read() + .await .saved_leaves() .get(&justify_qc.date().leaf_commit) - .cloned() - .context(format!( - "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.date().leaf_commit, - proposed_leaf.view_number(), - ))?; + .cloned(); + maybe_parent = match maybe_parent { + Some(p) => Some(p), + None => fetch_proposal( + justify_qc.view_number(), + self.sender.clone(), + Arc::clone(&self.quorum_membership), + Arc::clone(&self.consensus), + ) + .await + .ok(), + }; + let parent = maybe_parent.context(format!( + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.date().leaf_commit, + proposed_leaf.view_number(), + ))?; + let consensus_reader = self.consensus.read().await; let (Some(parent_state), _) = consensus_reader.state_and_delta(parent.view_number()) else { bail!("Parent state not found! Consensus internally inconsistent") diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index 3db76afbe1..81a33fa639 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -208,6 +208,17 @@ where Box::new(EventPredicate { check, info }) } +pub fn quorum_proposal_missing() -> Box> +where + TYPES: NodeType, +{ + let info = "QuorumProposalRequest".to_string(); + let check: EventCallback = Arc::new(move |e: Arc>| { + matches!(*e.clone(), QuorumProposalRequest(..)) + }); + Box::new(EventPredicate { check, info }) +} + pub fn quorum_proposal_send() -> Box> where TYPES: NodeType, diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 54fadb98c6..a90c4cc566 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -13,7 +13,7 @@ use hotshot_task_impls::{ }; use hotshot_testing::{ helpers::{build_fake_view_with_leaf_and_state, build_system_handle}, - predicates::event::{all_predicates, exact, vote_now}, + predicates::event::{all_predicates, quorum_proposal_missing, exact, vote_now}, script::InputOrder, serial, view_generator::TestViewGenerator, @@ -191,6 +191,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { ), ), )), + quorum_proposal_missing(), exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), vote_now(), ])]; From f5d0b8c46c03ace09edf2b54f386506444798f68 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 20 Jun 2024 17:07:17 +0200 Subject: [PATCH 1090/1393] Fix view (#3343) --- task-impls/src/quorum_vote/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index a4c5f50d94..34b15a8fc0 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -528,7 +528,7 @@ impl> QuorumVoteTaskState Date: Thu, 20 Jun 2024 12:42:05 -0400 Subject: [PATCH 1091/1393] Declutter CI Logs (#3347) * fix ci logs * remove verbose flag from tests * lints --- hotshot/src/lib.rs | 13 +++++++------ testing/src/test_runner.rs | 2 -- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 7da7f102f6..e5f85fe960 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -198,9 +198,11 @@ impl> SystemContext { /// /// To do a full initialization, use `fn init` instead, which will set up background tasks as /// well. + /// + /// # Errors + /// - #[allow(clippy::too_many_arguments)] - #[instrument(skip(private_key, memberships, networks, initializer, metrics, storage))] - pub async fn new( + pub fn new( public_key: TYPES::SignatureKey, private_key: ::PrivateKey, nonce: u64, @@ -210,7 +212,7 @@ impl> SystemContext { initializer: HotShotInitializer, metrics: ConsensusMetricsValue, storage: I::Storage, - ) -> Result, HotShotError> { + ) -> Arc { debug!("Creating a new hotshot"); let consensus_metrics = Arc::new(metrics); @@ -304,7 +306,7 @@ impl> SystemContext { decided_upgrade_certificate, }); - Ok(inner) + inner } /// "Starts" consensus by sending a `QcFormed`, `ViewChange`, and `ValidatedStateUpdated` events @@ -555,8 +557,7 @@ impl> SystemContext { initializer, metrics, storage, - ) - .await?; + ); let handle = Arc::clone(&hotshot).run_tasks().await; let (tx, rx) = hotshot.internal_event_stream.clone(); diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index aff3e7dbe1..e99f852cfc 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -460,8 +460,6 @@ where ConsensusMetricsValue::default(), storage, ) - .await - .expect("Could not init hotshot") } } From c6b86117d84fd99c41118106c85128ec3d3198b1 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 20 Jun 2024 14:00:55 -0700 Subject: [PATCH 1092/1393] [Auto Bench] Integrate push-based CDN to scripts of auto benchmarking on AWS (#3135) * README for push-cdn * Readme update * Readme update * update readme * update readme * scripts for benchmarks with cdn * scripts for benchmarks with cdn * script update * add push-cdn related files * add push-cdn related files * edit gitignore * upd readme * cdn script update * config update * restore config * available to run on ecs * scripts done * merge main * fix bug * again..commit for branch switch * script again * fix cdn_marshal_address in script * merge orchestrator and add fixed_leader_number * commit script again * tweak script * adding access to remote server in script * a more efficient script * lint * partially solve comment * partially solve comment * readme for benchmarks * neat * lint * run dockers * rebuild validator-push-cdn docker * more details in README * some fix and update * lint * random tx * debug * more results and better script * [Auto Bench] Scripts for benchmark with CDN + GPU VID (#3209) * init push cdn_gpu auto script * scripts for gpuvid with cdn * a more parameterizable script * more scripts and updated readme * tested use of remote script * more results and twist in scripts * uncomment in scripts * more results for large tx * finish cdn_gpu script * more results * more results * lint * [Auto Benchmarks] Builder only on DA nodes with benchmarks (#3234) * init push cdn_gpu auto script * scripts for gpuvid with cdn * a more parameterizable script * more scripts and updated readme * tested use of remote script * more results and twist in scripts * uncomment in scripts * more results for large tx * builder on da nodes * assign builder_address by fetching local ip ... * solve divergence * retrieve local ip for validators * rename file * minor changes * more results * more runs * script update: allow more brokers now * gpu script update * readme update * readme update * update scripts according to readme * conflict solved * conflict solved * neat * re-orgnaize benchmark related scripts into one sub-folder * re-orgnaize benchmark related scripts into one sub-folder * cargo fmt * more results * remove old results * cargo fmt * clippy * remove webserver script * Choose builder url on orchestrator * Multiple builders * Fix lints * Fix malformed URLs * no second broker needed * more results * Refactor URL generation in examples (#3346) * Refactor URL generation in examples * Lints --------- Co-authored-by: Artemii Gerasimovich --- examples/Cargo.toml | 4 + examples/combined/all.rs | 15 +-- examples/combined/multi-validator.rs | 2 +- examples/combined/validator.rs | 4 +- examples/infra/mod.rs | 174 +++++++++++++++++++------ examples/libp2p/all.rs | 10 +- examples/libp2p/multi-validator.rs | 2 +- examples/libp2p/validator.rs | 4 +- examples/orchestrator.rs | 2 +- examples/push-cdn/README.md | 68 ++++++++++ examples/push-cdn/all.rs | 5 +- examples/push-cdn/multi-validator.rs | 41 ++++++ examples/push-cdn/validator.rs | 6 +- examples/push-cdn/whitelist-adapter.rs | 1 + orchestrator/api.toml | 15 +++ orchestrator/run-config.toml | 2 +- orchestrator/src/client.rs | 60 ++++++++- orchestrator/src/lib.rs | 111 +++++++++++++++- testing/src/block_builder/mod.rs | 5 +- testing/src/block_builder/random.rs | 9 +- testing/src/block_builder/simple.rs | 26 +--- testing/src/test_runner.rs | 7 +- testing/tests/tests_1/block_builder.rs | 22 ++-- 23 files changed, 487 insertions(+), 108 deletions(-) create mode 100644 examples/push-cdn/README.md create mode 100644 examples/push-cdn/multi-validator.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 06e8b41e50..1b328cc452 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -65,6 +65,10 @@ path = "push-cdn/all.rs" name = "validator-push-cdn" path = "push-cdn/validator.rs" +[[example]] +name = "multi-validator-push-cdn" +path = "push-cdn/multi-validator.rs" + [[example]] name = "cdn-broker" path = "push-cdn/broker.rs" diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 3422b4842a..2ec6ede3ce 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -2,10 +2,7 @@ /// types used for this example pub mod types; -use std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, - path::Path, -}; +use std::path::Path; use async_compatibility_layer::{ art::async_spawn, @@ -20,6 +17,7 @@ use hotshot::{ use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; +use infra::{gen_local_address, BUILDER_BASE_PORT, VALIDATOR_BASE_PORT}; use rand::{rngs::StdRng, RngCore, SeedableRng}; use tracing::{error, instrument}; @@ -137,17 +135,16 @@ async fn main() { for i in 0..config.config.num_nodes_with_stake.into() { // Calculate our libp2p advertise address, which we will later derive the // bind address from for example purposes. - let advertise_address = SocketAddr::new( - IpAddr::V4(Ipv4Addr::LOCALHOST), - 8000 + (u16::try_from(i).expect("failed to create advertise address")), - ); - + let advertise_address = gen_local_address::(i); let orchestrator_url = orchestrator_url.clone(); + let builder_address = gen_local_address::(i); + let node = async_spawn(async move { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, advertise_address: Some(advertise_address), + builder_address: Some(builder_address), network_config_file: None, }, ) diff --git a/examples/combined/multi-validator.rs b/examples/combined/multi-validator.rs index 9d12549eee..2c36af9f28 100644 --- a/examples/combined/multi-validator.rs +++ b/examples/combined/multi-validator.rs @@ -24,7 +24,7 @@ async fn main() { setup_logging(); setup_backtrace(); let args = MultiValidatorArgs::parse(); - tracing::error!("connecting to orchestrator at {:?}", args.url); + tracing::debug!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for node_index in 0..args.num_nodes { let args = args.clone(); diff --git a/examples/combined/validator.rs b/examples/combined/validator.rs index 36ee261377..b4a09949cd 100644 --- a/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -6,7 +6,7 @@ use clap::Parser; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; -use tracing::{info, instrument}; +use tracing::{debug, instrument}; use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; @@ -35,6 +35,6 @@ async fn main() { ), ); - info!("connecting to orchestrator at {:?}", args.url); + debug!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::(args).await; } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index e5d581b0d5..ca7ee73916 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -44,7 +44,7 @@ use hotshot_orchestrator::{ }, }; use hotshot_testing::block_builder::{ - RandomBuilderImplementation, SimpleBuilderConfig, SimpleBuilderImplementation, + BuilderTask, RandomBuilderImplementation, SimpleBuilderImplementation, TestBuilderImplementation, }; use hotshot_types::{ @@ -62,7 +62,7 @@ use hotshot_types::{ }; use rand::{rngs::StdRng, SeedableRng}; use surf_disco::Url; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; #[derive(Debug, Clone)] /// Arguments passed to the orchestrator @@ -153,7 +153,7 @@ pub fn read_orchestrator_init_config() -> (NetworkConfig() -> (NetworkConfig("config_file") { @@ -263,6 +271,9 @@ pub fn read_orchestrator_init_config() -> (NetworkConfig("builder") { config.builder = *builder_type; } + if let Some(cdn_marshal_address_string) = matches.get_one::("cdn_marshal_address") { + config.cdn_marshal_address = Some(cdn_marshal_address_string.to_string()); + } (config, orchestrator_url) } @@ -309,7 +320,7 @@ pub async fn run_orchestrator( let _ = hotshot_orchestrator::run_orchestrator::(config, url).await; } -/// Helper function to calculate the nuymber of transactions to send per node per round +/// Helper function to calculate the number of transactions to send per node per round #[allow(clippy::cast_possible_truncation)] fn calculate_num_tx_per_round( node_index: u64, @@ -337,7 +348,7 @@ where let mut txn_rng = StdRng::seed_from_u64(node_index); let mut transactions = Vec::new(); - for round in 0..rounds { + for _ in 0..rounds { for _ in 0..transactions_to_send_per_round { let txn = ::create_random_transaction( None, @@ -345,12 +356,7 @@ where transaction_size as u64, ); - // prepend destined view number to transaction - let view_execute_number: u64 = round as u64 + 4; - let mut bytes = txn.into_bytes(); - bytes[0..8].copy_from_slice(&view_execute_number.to_be_bytes()); - - transactions.push(TestTransaction::new(bytes)); + transactions.push(txn); } } transactions @@ -544,6 +550,7 @@ pub trait RunDa< if let Some(size) = block_size { total_transactions_committed += size; + debug!("[{node_index}] got block with size: {:?}", size); } num_successful_commits += leaf_chain.len(); @@ -587,6 +594,7 @@ pub trait RunDa< let avg_latency_in_sec = total_latency / num_latency; println!("[{node_index}]: throughput: {throughput_bytes_per_sec} bytes/sec, avg_latency: {avg_latency_in_sec} sec."); BenchResults { + partial_results: "Unset".to_string(), avg_latency_in_sec, num_latency, minimum_latency_in_sec: minimum_latency, @@ -952,35 +960,24 @@ pub async fn main_entry_point< .await .expect("failed to get config"); - let builder_task = match run_config.builder { - BuilderType::External => None, - BuilderType::Random => { - let (builder_task, builder_url) = - >::start( - run_config.config.num_nodes_with_stake.into(), - run_config.random_builder.clone().unwrap_or_default(), - HashMap::new(), - ) - .await; + let builder_task = initialize_builder(&mut run_config, &args, &orchestrator_client).await; - run_config.config.builder_urls = vec1::vec1![builder_url]; - - Some(builder_task) - } - BuilderType::Simple => { - let (builder_task, builder_url) = - >::start( - run_config.config.num_nodes_with_stake.into(), - SimpleBuilderConfig::default(), - HashMap::new(), - ) - .await; - - run_config.config.builder_urls = vec1::vec1![builder_url]; - - Some(builder_task) - } - }; + run_config.config.builder_urls = orchestrator_client + .get_builder_addresses() + .await + .try_into() + .expect("Orchestrator didn't provide any builder addresses"); + + debug!( + "Assigned urls from orchestrator: {}", + run_config + .config + .builder_urls + .iter() + .map(ToString::to_string) + .collect::>() + .join(",") + ); info!("Initializing networking"); let run = RUNDA::initialize_networking(run_config.clone(), args.advertise_address).await; @@ -1033,3 +1030,102 @@ pub async fn main_entry_point< .await; orchestrator_client.post_bench_results(bench_results).await; } + +/// Sets correct builder_url and registers a builder with orchestrator if this node is running one. +/// Returns a `BuilderTask` if this node is going to be running a builder. +async fn initialize_builder< + TYPES: NodeType< + Transaction = TestTransaction, + BlockHeader = TestBlockHeader, + InstanceState = TestInstanceState, + >, +>( + run_config: &mut NetworkConfig<::SignatureKey>, + args: &ValidatorArgs, + orchestrator_client: &OrchestratorClient, +) -> Option>> +where + ::ValidatedState: TestableState, + ::BlockPayload: TestableBlock, + Leaf: TestableLeaf, +{ + if !run_config.config.my_own_validator_config.is_da { + return None; + } + + let advertise_urls: Vec; + let bind_address: Url; + + match args.builder_address { + None => { + let port = portpicker::pick_unused_port().expect("Failed to pick an unused port"); + advertise_urls = local_ip_address::list_afinet_netifas() + .expect("Couldn't get list of local IP addresses") + .into_iter() + .map(|(_name, ip)| ip) + .filter(|ip| !ip.is_loopback()) + .map(|ip| match ip { + IpAddr::V4(addr) => Url::parse(&format!("http://{addr}:{port}")).unwrap(), + IpAddr::V6(addr) => Url::parse(&format!("http://[{addr}]:{port}")).unwrap(), + }) + .collect(); + bind_address = Url::parse(&format!("http://0.0.0.0:{port}")).unwrap(); + } + Some(ref addr) => { + bind_address = Url::parse(&format!("http://{addr}")).expect("Valid URL"); + advertise_urls = vec![bind_address.clone()]; + } + } + + match run_config.builder { + BuilderType::External => None, + BuilderType::Random => { + let builder_task = + >::start( + run_config.config.num_nodes_with_stake.into(), + bind_address, + run_config.random_builder.clone().unwrap_or_default(), + HashMap::new(), + ) + .await; + + orchestrator_client + .post_builder_addresses(advertise_urls) + .await; + + Some(builder_task) + } + BuilderType::Simple => { + let builder_task = + >::start( + run_config.config.num_nodes_with_stake.into(), + bind_address, + (), + HashMap::new(), + ) + .await; + + orchestrator_client + .post_builder_addresses(advertise_urls) + .await; + + Some(builder_task) + } + } +} + +/// Base port for validator +pub const VALIDATOR_BASE_PORT: u16 = 8000; +/// Base port for builder +pub const BUILDER_BASE_PORT: u16 = 9000; + +/// Generate a local address for node with index `node_index`, offsetting from port `BASE_PORT`. +/// # Panics +/// If `node_index` is too large to fit in a `u16` +#[must_use] +pub fn gen_local_address(node_index: usize) -> SocketAddr { + SocketAddr::new( + IpAddr::V4(Ipv4Addr::LOCALHOST), + BASE_PORT + (u16::try_from(node_index).expect("node index too large")), + ) +} diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index 500e90992d..4643019966 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -2,14 +2,13 @@ /// types used for this example pub mod types; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use async_compatibility_layer::{ art::async_spawn, logging::{setup_backtrace, setup_logging}, }; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; +use infra::{gen_local_address, BUILDER_BASE_PORT, VALIDATOR_BASE_PORT}; use tracing::instrument; use crate::{ @@ -42,16 +41,15 @@ async fn main() { for i in 0..config.config.num_nodes_with_stake.into() { // Calculate our libp2p advertise address, which we will later derive the // bind address from for example purposes. - let advertise_address = SocketAddr::new( - IpAddr::V4(Ipv4Addr::LOCALHOST), - 8000 + (u16::try_from(i).expect("failed to create advertise address")), - ); + let advertise_address = gen_local_address::(i); + let builder_address = gen_local_address::(i); let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, advertise_address: Some(advertise_address), + builder_address: Some(builder_address), network_config_file: None, }, ) diff --git a/examples/libp2p/multi-validator.rs b/examples/libp2p/multi-validator.rs index 3b212a5da1..2ffbc53ddb 100644 --- a/examples/libp2p/multi-validator.rs +++ b/examples/libp2p/multi-validator.rs @@ -24,7 +24,7 @@ async fn main() { setup_logging(); setup_backtrace(); let args = MultiValidatorArgs::parse(); - tracing::error!("connecting to orchestrator at {:?}", args.url); + tracing::debug!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for node_index in 0..args.num_nodes { let args = args.clone(); diff --git a/examples/libp2p/validator.rs b/examples/libp2p/validator.rs index 9c0b8b60f8..810522820f 100644 --- a/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -6,7 +6,7 @@ use clap::Parser; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; -use tracing::{info, instrument}; +use tracing::{debug, instrument}; use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; @@ -34,6 +34,6 @@ async fn main() { ), ); - info!("connecting to orchestrator at {:?}", args.url); + debug!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::(args).await; } diff --git a/examples/orchestrator.rs b/examples/orchestrator.rs index 6e352e9ffc..732264a876 100644 --- a/examples/orchestrator.rs +++ b/examples/orchestrator.rs @@ -1,4 +1,4 @@ -//! A orchestrator using the web server +//! A orchestrator use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot_example_types::state_types::TestTypes; diff --git a/examples/push-cdn/README.md b/examples/push-cdn/README.md new file mode 100644 index 0000000000..c49f7dedb2 --- /dev/null +++ b/examples/push-cdn/README.md @@ -0,0 +1,68 @@ +Steps +--------------- + +KeyDB is the ephemeral database, it's like Redis but with extra features. The only thing we run it with is with `--requirepass` to set a password. + +**Marshals:** +The marshal is the entry point of the push CDN, all users connect there first. It tells users which broker to connect to. + +- `-d` is the "discovery endpoint", which in this case is the URL of KeyDB. +- `-b` is the bind port. This is what you would set in run_config.toml for cdn_broker_marshal_endpoint +- `-m` is metrics stuff. You shouldn't have to use that + + +**Brokers:** +In a run with multiple machines, we want two brokers. With one machine, it's probably fine to do one broker. These are what route the messages. Here are the relevant command line arguments: + +- `-d` is the "discovery endpoint", which in this case is the URL of KeyDB. +- `--public-bind-endpoint`: the endpoint which we bind to locally for users to connect to (e.g. 0.0.0.0:1740) +- `--public-advertise-endpoint`: the endpoint which we advertise to users (e.g. my.public.ip:1740) +- `--private-bind-endpoint`: the endpoint which we bind to locally for brokers to connect to (e.g. 0.0.0.0:1741) +- `--private-advertise-endpoint`: the endpoint which we advertise to brokers (e.g. my.public.ip:1741) +- `-m` is metrics stuff. You shouldn't have to use that +For brokers, there is a magic value called `local_ip`. This resolves to the local IP address, which skips the need for talking to the AWS metadata server. For in-AWS uses, the following configuration is probably fine: +`cdn-broker --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741`. You won't need to put this port or values anywhere, as the marshal does everything for you. + +Examples: +--------------- + +**Run Locally** + +`just async_std example all-push-cdn -- --config_file ./crates/orchestrator/run-config.toml` + +OR + +``` +docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb +just async_std example cdn-marshal -- -d redis://localhost:6379 -b 9000 +just async_std example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 +just async_std example orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 +just async_std example multi-validator-push-cdn -- 10 http://127.0.0.1:4444 +``` + +**Run with GPU-VID** +``` +docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb +just async_std example cdn-marshal -- -d redis://localhost:6379 -b 9000 +just async_std example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 +just async_std example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1 +just async_std example_gpuvid_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444 +sleep 1m +just async_std example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 +``` + +Where ones using `example_gpuvid_leader` could be the leader and should be running on a nvidia GPU, and other validators using `example_fixed_leader` will never be a leader. In practice, these url should be changed to the corresponding ip and port. + + +If you don't have a gpu but want to test out fixed leader, you can run: +``` +docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb +just async_std example cdn-marshal -- -d redis://localhost:6379 -b 9000 +just async_std example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 +just async_std example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1 +just async_std example_fixed_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444 +sleep 1m +just async_std example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 +``` + +Remember, you have to run leaders first, then other validators, so that leaders will have lower index. \ No newline at end of file diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 25fb9beba1..0092889c6f 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -12,6 +12,7 @@ use hotshot::{ use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; +use infra::{gen_local_address, BUILDER_BASE_PORT}; use crate::{ infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, @@ -117,13 +118,15 @@ async fn main() { // Start the proper number of nodes let mut nodes = Vec::new(); - for _ in 0..(config.config.num_nodes_with_stake.get()) { + for i in 0..(config.config.num_nodes_with_stake.get()) { let orchestrator_url = orchestrator_url.clone(); + let builder_address = gen_local_address::(i); let node = async_spawn(async move { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, advertise_address: None, + builder_address: Some(builder_address), network_config_file: None, }, ) diff --git a/examples/push-cdn/multi-validator.rs b/examples/push-cdn/multi-validator.rs new file mode 100644 index 0000000000..c9c6b1beb1 --- /dev/null +++ b/examples/push-cdn/multi-validator.rs @@ -0,0 +1,41 @@ +//! A multi validator +use async_compatibility_layer::{ + art::async_spawn, + logging::{setup_backtrace, setup_logging}, +}; +use clap::Parser; +use hotshot_example_types::state_types::TestTypes; +use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; +use tracing::instrument; + +use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; + +/// types used for this example +pub mod types; + +/// general infra used for this example +#[path = "../infra/mod.rs"] +pub mod infra; + +#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[instrument] +async fn main() { + setup_logging(); + setup_backtrace(); + let args = MultiValidatorArgs::parse(); + tracing::debug!("connecting to orchestrator at {:?}", args.url); + let mut nodes = Vec::new(); + for node_index in 0..args.num_nodes { + let args = args.clone(); + + let node = async_spawn(async move { + infra::main_entry_point::( + ValidatorArgs::from_multi_args(args, node_index), + ) + .await; + }); + nodes.push(node); + } + let _result = futures::future::join_all(nodes).await; +} diff --git a/examples/push-cdn/validator.rs b/examples/push-cdn/validator.rs index 35d0b4470b..e806178549 100644 --- a/examples/push-cdn/validator.rs +++ b/examples/push-cdn/validator.rs @@ -1,9 +1,9 @@ -//! A validator using the web server +//! A validator use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; -use tracing::{info, instrument}; +use tracing::{debug, instrument}; use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; @@ -21,6 +21,6 @@ async fn main() { setup_logging(); setup_backtrace(); let args = ValidatorArgs::parse(); - info!("connecting to orchestrator at {:?}", args.url); + debug!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::(args).await; } diff --git a/examples/push-cdn/whitelist-adapter.rs b/examples/push-cdn/whitelist-adapter.rs index 9666e459f7..9a80b227aa 100644 --- a/examples/push-cdn/whitelist-adapter.rs +++ b/examples/push-cdn/whitelist-adapter.rs @@ -47,6 +47,7 @@ async fn main() -> Result<()> { let orchestrator_client = OrchestratorClient::new(ValidatorArgs { url: Url::from_str(&args.orchestrator_url).with_context(|| "Invalid URL")?, advertise_address: None, + builder_address: None, network_config_file: None, }); diff --git a/orchestrator/api.toml b/orchestrator/api.toml index 4915e0c9e3..25fcb2b7c2 100644 --- a/orchestrator/api.toml +++ b/orchestrator/api.toml @@ -87,3 +87,18 @@ METHOD = "POST" DOC = """ Post whether the orchestrator should start the run immediately, with the nodes that have already registered. """ + +# GET builder URLs +[route.get_builders] +PATH = ["builders"] +DOC = """ +Get list of builder URLs +""" + +# POST builder URL +[route.post_builder] +PATH = ["builder"] +METHOD = "POST" +DOC = """ +Register a builder URL to orchestrator's pool of builder URLs +""" diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index eb3ed0703d..cc49703802 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -106,7 +106,7 @@ nanos = 200_000_000 # TODO (Keyao) Clean up configuration parameters. # [config.builder_timeout] -secs = 2 +secs = 10 nanos = 0 [config.upgrade] diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 3511c943ea..dc604b47ee 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -22,6 +22,8 @@ pub struct OrchestratorClient { /// Struct describing a benchmark result #[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Default, PartialEq)] pub struct BenchResults { + /// Whether it's partial collected results + pub partial_results: String, /// The average latency of the transactions pub avg_latency_in_sec: i64, /// The number of transactions that were latency measured @@ -48,7 +50,7 @@ impl BenchResults { /// printout the results of one example run pub fn printout(&self) { println!("====================="); - println!("Benchmark results:"); + println!("{0} Benchmark results:", self.partial_results); println!( "Average latency: {} seconds, Minimum latency: {} seconds, Maximum latency: {} seconds", self.avg_latency_in_sec, self.minimum_latency_in_sec, self.maximum_latency_in_sec @@ -76,6 +78,8 @@ pub struct BenchResultsDownloadConfig { pub total_nodes: usize, /// The size of the da committee pub da_committee_size: usize, + /// The number of fixed_leader_for_gpuvid when we enable the feature [fixed-leader-election] + pub fixed_leader_for_gpuvid: usize, /// Number of transactions submitted per round pub transactions_per_round: usize, /// The size of each transaction in bytes @@ -84,7 +88,13 @@ pub struct BenchResultsDownloadConfig { pub rounds: usize, /// The type of leader election: static, fixed, random pub leader_election_type: String, + // Results starting here + /// Whether the results are partially collected + /// "One" when the results are collected for one node + /// "Half" when the results are collecte for half running nodes if not all nodes terminate successfully + /// "Full" if the results are successfully collected from all nodes + pub partial_results: String, /// The average latency of the transactions pub avg_latency_in_sec: i64, /// The minimum latency of the transactions @@ -116,6 +126,8 @@ pub struct ValidatorArgs { pub url: Url, /// The optional advertise address to use for Libp2p pub advertise_address: Option, + /// Optional address to run builder on. Address must be accessible by other nodes + pub builder_address: Option, /// An optional network config file to save to/load from /// Allows for rejoining the network on a complete state loss #[arg(short, long)] @@ -166,6 +178,7 @@ impl ValidatorArgs { Self { url: multi_args.url, advertise_address: multi_args.advertise_address, + builder_address: None, network_config_file: multi_args .network_config_file .map(|s| format!("{s}-{node_index}")), @@ -297,6 +310,51 @@ impl OrchestratorClient { .await } + /// Registers a builder URL with the orchestrator + /// + /// # Panics + /// if unable to serialize `address` + pub async fn post_builder_addresses(&self, addresses: Vec) { + let send_builder_f = |client: Client| { + let request_body = vbs::Serializer::::serialize(&addresses) + .expect("Failed to serialize request"); + + async move { + let result: Result<_, ClientError> = client + .post("api/builder") + .body_binary(&request_body) + .unwrap() + .send() + .await + .inspect_err(|err| tracing::error!("{err}")); + result + } + .boxed() + }; + self.wait_for_fn_from_orchestrator::<_, _, ()>(send_builder_f) + .await; + } + + /// Requests a builder URL from orchestrator + pub async fn get_builder_addresses(&self) -> Vec { + // Define the request for post-register configurations + let get_builder = |client: Client| { + async move { + let result = client.get("api/builders").send().await; + + if let Err(ref err) = result { + tracing::error!("{err}"); + } + + result + } + .boxed() + }; + + // Loop until successful + self.wait_for_fn_from_orchestrator(get_builder).await + } + /// Sends my public key to the orchestrator so that it can collect all public keys /// And get the updated config /// Blocks until the orchestrator collects all peer's public keys/configs diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 17db9f0526..798923687e 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -5,12 +5,18 @@ pub mod client; /// Configuration for the orchestrator pub mod config; -use std::{collections::HashMap, fs::OpenOptions, io, io::ErrorKind}; +use std::{ + collections::HashMap, + fs::OpenOptions, + io::{self, ErrorKind}, + time::Duration, +}; use async_lock::RwLock; use client::{BenchResults, BenchResultsDownloadConfig}; +use config::BuilderType; use csv::Writer; -use futures::FutureExt; +use futures::{stream::FuturesUnordered, FutureExt, StreamExt}; use hotshot_types::{constants::Base, traits::signature_key::SignatureKey, PeerConfig}; use libp2p::{ identity::{ @@ -84,11 +90,18 @@ struct OrchestratorState { manual_start_allowed: bool, /// Whether we are still accepting new keys for registration accepting_new_keys: bool, + /// Builder address pool + builders: Vec, } impl OrchestratorState { /// create a new [`OrchestratorState`] pub fn new(network_config: NetworkConfig) -> Self { + let builders = if matches!(network_config.builder, BuilderType::External) { + network_config.config.builder_urls.clone().into() + } else { + vec![] + }; OrchestratorState { latest_index: 0, tmp_latest_index: 0, @@ -101,6 +114,7 @@ impl OrchestratorState { nodes_post_results: 0, manual_start_allowed: true, accepting_new_keys: true, + builders, } } @@ -131,10 +145,12 @@ impl OrchestratorState { commit_sha: self.config.commit_sha.clone(), total_nodes: self.config.config.num_nodes_with_stake.into(), da_committee_size: self.config.config.da_staked_committee_size, + fixed_leader_for_gpuvid: self.config.config.fixed_leader_for_gpuvid, transactions_per_round: self.config.transactions_per_round, transaction_size: self.bench_results.transaction_size_in_bytes, rounds: self.config.rounds, leader_election_type: OrchestratorState::::election_type(), + partial_results: self.bench_results.partial_results.clone(), avg_latency_in_sec: self.bench_results.avg_latency_in_sec, minimum_latency_in_sec: self.bench_results.minimum_latency_in_sec, maximum_latency_in_sec: self.bench_results.maximum_latency_in_sec, @@ -211,6 +227,14 @@ pub trait OrchestratorApi { /// # Errors /// if unable to serve fn post_manual_start(&mut self, password_bytes: Vec) -> Result<(), ServerError>; + /// post endpoint for registering a builder with the orchestrator + /// # Errors + /// if unable to serve + fn post_builder(&mut self, builder: Url) -> Result<(), ServerError>; + /// get endpoints for builders + /// # Errors + /// if not all builders are registered yet + fn get_builders(&self) -> Result, ServerError>; } impl OrchestratorApi for OrchestratorState @@ -488,15 +512,55 @@ where } } self.nodes_post_results += 1; - if self.nodes_post_results >= (self.config.config.num_nodes_with_stake.get() as u64) { + if self.bench_results.partial_results == "Unset" { + self.bench_results.partial_results = "One".to_string(); + self.bench_results.printout(); + self.output_to_csv(); + } + if self.bench_results.partial_results == "One" + && self.nodes_post_results >= (self.config.config.da_staked_committee_size as u64 / 2) + { + self.bench_results.partial_results = "HalfDA".to_string(); + self.bench_results.printout(); + self.output_to_csv(); + } + if self.bench_results.partial_results == "HalfDA" + && self.nodes_post_results >= (self.config.config.num_nodes_with_stake.get() as u64 / 2) + { + self.bench_results.partial_results = "Half".to_string(); + self.bench_results.printout(); + self.output_to_csv(); + } + if self.bench_results.partial_results != "Full" + && self.nodes_post_results >= (self.config.config.num_nodes_with_stake.get() as u64) + { + self.bench_results.partial_results = "Full".to_string(); self.bench_results.printout(); self.output_to_csv(); } Ok(()) } + + fn post_builder(&mut self, builder: Url) -> Result<(), ServerError> { + self.builders.push(builder); + Ok(()) + } + + fn get_builders(&self) -> Result, ServerError> { + if !matches!(self.config.builder, BuilderType::External) + && self.builders.len() != self.config.config.da_staked_committee_size + { + return Err(ServerError { + status: tide_disco::StatusCode::NOT_FOUND, + message: "Not all builders are registered yet".to_string(), + }); + } + Ok(self.builders.clone()) + } } /// Sets up all API routes +#[allow(clippy::too_many_lines)] fn define_api() -> Result, ApiError> where State: 'static + Send + Sync + ReadState + WriteState, @@ -591,6 +655,47 @@ where state.post_run_results(metrics.unwrap()) } .boxed() + })? + .post("post_builder", |req, state| { + async move { + // Read the bytes from the body + let mut body_bytes = req.body_bytes(); + body_bytes.drain(..12); + + let Ok(urls) = vbs::Serializer::::deserialize::>(&body_bytes) else { + return Err(ServerError { + status: tide_disco::StatusCode::BAD_REQUEST, + message: "Malformed body".to_string(), + }); + }; + + let mut futures = urls + .into_iter() + .map(|url| async { + let client: surf_disco::Client = + surf_disco::client::Client::builder(url.clone()).build(); + if client.connect(Some(Duration::from_secs(2))).await { + Some(url) + } else { + None + } + }) + .collect::>() + .filter_map(futures::future::ready); + + if let Some(url) = futures.next().await { + state.post_builder(url) + } else { + Err(ServerError { + status: tide_disco::StatusCode::BAD_REQUEST, + message: "No reachable adddresses".to_string(), + }) + } + } + .boxed() + })? + .get("get_builders", |_req, state| { + async move { state.get_builders() }.boxed() })?; Ok(api) } diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index a5ccd51b80..cda850d855 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -27,7 +27,7 @@ pub mod random; pub use random::RandomBuilderImplementation; pub mod simple; -pub use simple::{SimpleBuilderConfig, SimpleBuilderImplementation}; +pub use simple::SimpleBuilderImplementation; #[async_trait] pub trait TestBuilderImplementation @@ -38,9 +38,10 @@ where async fn start( num_storage_nodes: usize, + url: Url, options: Self::Config, changes: HashMap, - ) -> (Box>, Url); + ) -> Box>; } pub trait BuilderTask: Send + Sync { diff --git a/testing/src/block_builder/random.rs b/testing/src/block_builder/random.rs index 0bc9a19920..64cc27d3ef 100644 --- a/testing/src/block_builder/random.rs +++ b/testing/src/block_builder/random.rs @@ -77,16 +77,15 @@ where async fn start( num_storage_nodes: usize, + url: Url, config: RandomBuilderConfig, changes: HashMap, - ) -> (Box>, Url) { - let port = portpicker::pick_unused_port().expect("No free ports"); - let url = Url::parse(&format!("http://0.0.0.0:{port}")).expect("Valid URL"); + ) -> Box> { let (change_sender, change_receiver) = broadcast(128); let (task, source) = Self::create(num_storage_nodes, config, changes, change_sender).await; - run_builder_source(url.clone(), change_receiver, source); - (Box::new(task), url) + run_builder_source(url, change_receiver, source); + Box::new(task) } } diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index ef3dccdd34..416002fe7d 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -76,38 +76,24 @@ impl SimpleBuilderImplementation { } } -/// Configuration for `SimpleBuilder` -pub struct SimpleBuilderConfig { - pub port: u16, -} - -impl Default for SimpleBuilderConfig { - fn default() -> Self { - Self { - port: portpicker::pick_unused_port().expect("No free ports"), - } - } -} - #[async_trait] impl TestBuilderImplementation for SimpleBuilderImplementation where ::InstanceState: Default, { - type Config = SimpleBuilderConfig; + type Config = (); async fn start( num_storage_nodes: usize, - config: Self::Config, + url: Url, + _config: Self::Config, changes: HashMap, - ) -> (Box>, Url) { - let url = Url::parse(&format!("http://0.0.0.0:{0}", config.port)).expect("Valid URL"); - + ) -> Box> { let (change_sender, change_receiver) = broadcast(128); let (source, task) = Self::create(num_storage_nodes, changes, change_sender).await; - run_builder_source(url.clone(), change_receiver, source); + run_builder_source(url, change_receiver, source); - (Box::new(task), url) + Box::new(task) } } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index e99f852cfc..bd3bc61ea0 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -31,6 +31,7 @@ use hotshot_types::{ }, HotShotConfig, ValidatorConfig, }; +use tide_disco::Url; #[allow(deprecated)] use tracing::info; @@ -291,8 +292,12 @@ where let mut builder_tasks = Vec::new(); let mut builder_urls = Vec::new(); for metadata in &self.launcher.metadata.builders { - let (builder_task, builder_url) = B::start( + let builder_port = portpicker::pick_unused_port().expect("No free ports"); + let builder_url = + Url::parse(&format!("http://localhost:{builder_port}")).expect("Valid URL"); + let builder_task = B::start( config.num_nodes_with_stake.into(), + builder_url.clone(), B::Config::default(), metadata.changes.clone(), ) diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 3f81352895..7d76a6ac04 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -30,16 +30,18 @@ use tide_disco::Url; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_random_block_builder() { - let (task, api_url): (Box>, Url) = - RandomBuilderImplementation::start( - 1, - RandomBuilderConfig { - blocks_per_second: u32::MAX, - ..Default::default() - }, - HashMap::new(), - ) - .await; + let port = portpicker::pick_unused_port().expect("No free ports"); + let api_url = Url::parse(&format!("http://localhost:{port}")).expect("Valid URL"); + let task: Box> = RandomBuilderImplementation::start( + 1, + api_url.clone(), + RandomBuilderConfig { + blocks_per_second: u32::MAX, + ..Default::default() + }, + HashMap::new(), + ) + .await; task.start(Box::new(futures::stream::empty())); let builder_started = Instant::now(); From d692b01458897c7f9015a19719eab127bf58cacf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Jun 2024 15:05:29 +0000 Subject: [PATCH 1093/1393] Bump displaydoc from 0.2.4 to 0.2.5 (#3352) --- types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/Cargo.toml b/types/Cargo.toml index 0ea8ef5e34..fcd35bd8d4 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -51,7 +51,7 @@ rand_chacha = { workspace = true } serde = { workspace = true } tagged-base64 = { workspace = true } vbs = { workspace = true } -displaydoc = { version = "0.2.3", default-features = false } +displaydoc = { version = "0.2.5", default-features = false } dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } url = { workspace = true } vec1 = { workspace = true } From 83b74bef7bf2e3db648f9791424595a8f780855a Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 21 Jun 2024 11:40:23 -0400 Subject: [PATCH 1094/1393] Minor test fixes (again) (#3317) --- .../src/traits/networking/memory_network.rs | 1 - task-impls/src/consensus/helpers.rs | 1 + testing/src/consistency_task.rs | 184 ++++++++++++++++++ testing/src/lib.rs | 3 + testing/src/overall_safety_task.rs | 18 ++ testing/src/test_runner.rs | 26 ++- testing/src/test_task.rs | 2 +- testing/tests/tests_2/catchup.rs | 8 +- testing/tests/tests_2/push_cdn.rs | 2 +- .../tests/tests_2/test_with_failures_one.rs | 2 +- testing/tests/tests_4/test_with_failures_f.rs | 3 +- testing/tests/tests_5/combined_network.rs | 8 +- testing/tests/tests_5/timeout.rs | 2 +- testing/tests/tests_5/unreliable_network.rs | 4 +- 14 files changed, 243 insertions(+), 21 deletions(-) create mode 100644 testing/src/consistency_task.rs diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 4843ebc03a..e2e2266917 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -127,7 +127,6 @@ impl MemoryNetwork { } else { error!("Output queue receivers are shutdown"); } - warn!("Stream shutdown"); } } .instrument(info_span!("MemoryNetwork Background task", map = ?master_map)), diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 67eb919382..c1a7bbdbd5 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -9,6 +9,7 @@ use anyhow::{ensure, Context, Result}; use async_broadcast::{broadcast, Sender}; use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; +#[cfg(not(feature = "dependency-tasks"))] #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use committable::{Commitment, Committable}; diff --git a/testing/src/consistency_task.rs b/testing/src/consistency_task.rs new file mode 100644 index 0000000000..4b9ecc0f22 --- /dev/null +++ b/testing/src/consistency_task.rs @@ -0,0 +1,184 @@ +#![allow(clippy::unwrap_or_default)] +use std::collections::BTreeMap; + +use anyhow::{bail, ensure, Context, Result}; +use async_trait::async_trait; +use committable::Committable; +use hotshot_types::{ + data::Leaf, + event::{Event, EventType}, + traits::node_implementation::NodeType, +}; + +use crate::{ + overall_safety_task::OverallSafetyPropertiesDescription, + test_task::{TestResult, TestTaskState}, +}; + +trait Validatable { + fn valid(&self) -> Result<()>; +} + +/// Map from views to leaves for a single node, allowing multiple leaves for each view (because the node may a priori send us multiple leaves for a given view). +pub type NodeMap = BTreeMap<::Time, Vec>>; + +/// A sanitized map from views to leaves for a single node, with only a single leaf per view. +pub type NodeMapSanitized = BTreeMap<::Time, Leaf>; + +/// Validate that the `NodeMap` only has a single leaf per view. +pub fn sanitize_node_map( + node_map: &NodeMap, +) -> Result> { + let mut result = BTreeMap::new(); + + for (view, leaves) in node_map.iter() { + let mut reduced = leaves.clone(); + + reduced.dedup(); + + match reduced.len() { + 0 => {} + 1 => { + result.insert(*view, reduced[0].clone()); + } + _ => { + bail!( + "We have received inconsistent leaves for view {view:?}. Leaves:\n\n{leaves:?}" + ); + } + } + } + + Ok(result) +} + +/// For a NodeLeafMap, we validate that each leaf extends the preceding leaf. +impl Validatable for NodeMapSanitized { + fn valid(&self) -> Result<()> { + let leaf_pairs = self.values().zip(self.values().skip(1)); + + // Check that the child leaf follows the parent, possibly with a gap. + for (parent, child) in leaf_pairs { + ensure!( + child.justify_qc().view_number >= parent.view_number(), + "The node has provided leaf:\n\n{child:?}\n\nbut its quorum certificate points to a view before the most recent leaf:\n\n{parent:?}" + ); + + if child.justify_qc().view_number == parent.view_number() + && child.justify_qc().data.leaf_commit != parent.commit() + { + bail!("The node has provided leaf:\n\n{child:?}\n\nwhich points to:\n\n{parent:?}\n\nbut the commits do not match."); + } + } + + Ok(()) + } +} + +/// A map from node ids to `NodeMap`s; note that the latter may have multiple leaves per view in principle. +pub type NetworkMap = BTreeMap>; + +/// A map from node ids to `NodeMapSanitized`s; the latter has been sanitized validated to have a single leaf per view. +pub type NetworkMapSanitized = BTreeMap>; + +/// Validate that each node has only produced one unique leaf per view, and produce a `NetworkMapSanitized`. +pub fn sanitize_network_map( + network_map: &NetworkMap, +) -> Result> { + let mut result = BTreeMap::new(); + + for (node, node_map) in network_map { + result.insert( + *node, + sanitize_node_map(node_map) + .context(format!("Node {node} produced inconsistent leaves."))?, + ); + } + + Ok(result) +} + +impl Validatable for NetworkMap { + fn valid(&self) -> Result<()> { + let sanitized = sanitize_network_map(self)?; + + sanitized.valid() + } +} + +/// For a NetworkLeafMap, we validate that no two nodes have submitted differing leaves for any given view, in addition to the individual NodeLeafMap checks. +impl Validatable for NetworkMapSanitized { + fn valid(&self) -> Result<()> { + // Invert the map by interchanging the roles of the node_id and view number. + let mut inverted_map = BTreeMap::new(); + for (node_id, node_map) in self.iter() { + node_map + .valid() + .context(format!("Node {node_id} has an invalid leaf history"))?; + + // validate each node's leaf map + for (view, leaf) in node_map.iter() { + let view_map = inverted_map.entry(*view).or_insert(BTreeMap::new()); + view_map.insert(*node_id, leaf.clone()); + } + } + + for (view, view_map) in inverted_map.iter() { + let mut leaves: Vec<_> = view_map.iter().collect(); + + leaves.dedup_by(|(_node_a, leaf_a), (_node_b, leaf_b)| leaf_a == leaf_b); + + ensure!( + leaves.len() <= 1, + view_map.iter().fold( + format!("The network does not agree on view {view:?}."), + |acc, (node, leaf)| { + format!("{acc}\n\nNode {node} sent us leaf:\n\n{leaf:?}") + } + ) + ); + } + + Ok(()) + } +} + +/// Data availability task state +pub struct ConsistencyTask { + /// A map from node ids to (leaves keyed on view number) + pub consensus_leaves: NetworkMap, + /// safety task requirements + pub safety_properties: OverallSafetyPropertiesDescription, +} + +#[async_trait] +impl TestTaskState for ConsistencyTask { + type Event = Event; + + /// Handles an event from one of multiple receivers. + async fn handle_event(&mut self, (message, id): (Self::Event, usize)) -> Result<()> { + if let Event { + event: EventType::Decide { leaf_chain, .. }, + .. + } = message + { + let map = &mut self.consensus_leaves.entry(id).or_insert(BTreeMap::new()); + + leaf_chain.iter().for_each(|leaf_info| { + map.entry(leaf_info.leaf.view_number()) + .and_modify(|vec| vec.push(leaf_info.leaf.clone())) + .or_insert(vec![leaf_info.leaf.clone()]); + }); + } + + Ok(()) + } + + fn check(&self) -> TestResult { + if let Err(e) = self.consensus_leaves.valid() { + return TestResult::Fail(Box::new(e)); + } + + TestResult::Pass + } +} diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 43ce6ebe5b..330a40c5f9 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -21,6 +21,9 @@ pub mod test_runner; /// task that's consuming events and asserting safety pub mod overall_safety_task; +/// task that checks leaves received across all nodes from decide events for consistency +pub mod consistency_task; + /// task that's submitting transactions to the stream pub mod txn_task; diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 7b9a1bed79..4751518877 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -65,6 +65,12 @@ pub enum OverallSafetyTaskErr { InconsistentStates, /// mismatched blocks for a view InconsistentBlocks, + /// not enough failures. this likely means there is an issue in the test + NotEnoughFailures { + expected: usize, + + failed_views: HashSet, + }, } /// Data availability task state @@ -225,6 +231,18 @@ impl> TestTaskState failed_views: self.ctx.failed_views.clone(), })); } + + // We should really be able to include a check like this: + // + // if self.ctx.failed_views.len() < num_failed_rounds_total { + // return TestResult::Fail(Box::new(OverallSafetyTaskErr::::NotEnoughFailures { + // expected: num_failed_rounds_total, + // failed_views: self.ctx.failed_views.clone(), + // })); + // } + // + // but we have several tests where it's not possible to fail pin down an exact number of failures (just from async timing issues, if nothing else). Ideally, we should refactor some of the failure count logic for this. + TestResult::Pass } } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index bd3bc61ea0..0fa3f5cc19 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -37,6 +37,7 @@ use tracing::info; use super::{ completion_task::CompletionTask, + consistency_task::ConsistencyTask, overall_safety_task::{OverallSafetyTask, RoundCtx}, txn_task::TxnTask, }; @@ -170,12 +171,23 @@ where let overall_safety_task_state = OverallSafetyTask { handles: Arc::clone(&handles), ctx: RoundCtx::default(), - properties: self.launcher.metadata.overall_safety_properties, + properties: self.launcher.metadata.overall_safety_properties.clone(), error: None, test_sender, }; - let safety_task = TestTask::>::new( + let consistency_task_state = ConsistencyTask { + consensus_leaves: BTreeMap::new(), + safety_properties: self.launcher.metadata.overall_safety_properties, + }; + + let consistency_task = TestTask::>::new( + consistency_task_state, + event_rxs.clone(), + test_receiver.clone(), + ); + + let overall_safety_task = TestTask::>::new( overall_safety_task_state, event_rxs.clone(), test_receiver.clone(), @@ -211,7 +223,8 @@ where drop(nodes); - task_futs.push(safety_task.run()); + task_futs.push(overall_safety_task.run()); + task_futs.push(consistency_task.run()); task_futs.push(view_sync_task.run()); task_futs.push(spinning_task.run()); @@ -272,7 +285,12 @@ where assert!( error_list.is_empty(), - "TEST FAILED! Results: {error_list:?}" + "{}", + error_list + .iter() + .fold("TEST FAILED! Results:".to_string(), |acc, error| { + format!("{acc}\n\n{error:?}") + }) ); } diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index c934c4c3ff..3b1c7c7248 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -21,7 +21,7 @@ pub enum TestResult { /// the test task passed Pass, /// the test task failed with an error - Fail(Box), + Fail(Box), } #[async_trait] diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index a11fb4a0e9..1e78574d58 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -46,7 +46,7 @@ async fn test_catchup() { metadata.overall_safety_properties = OverallSafetyPropertiesDescription { // Make sure we keep committing rounds after the catchup, but not the full 50. num_successful_views: 22, - num_failed_views: 5, + num_failed_views: 0, ..Default::default() }; @@ -99,7 +99,7 @@ async fn test_catchup_cdn() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 5, + num_failed_views: 0, ..Default::default() }; @@ -154,7 +154,7 @@ async fn test_catchup_one_node() { metadata.overall_safety_properties = OverallSafetyPropertiesDescription { // Make sure we keep committing rounds after the catchup, but not the full 50. num_successful_views: 22, - num_failed_views: 2, + num_failed_views: 0, ..Default::default() }; @@ -215,7 +215,7 @@ async fn test_catchup_in_view_sync() { }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 5, + num_failed_views: 0, ..Default::default() }; diff --git a/testing/tests/tests_2/push_cdn.rs b/testing/tests/tests_2/push_cdn.rs index b708d91ff3..cfdf798570 100644 --- a/testing/tests/tests_2/push_cdn.rs +++ b/testing/tests/tests_2/push_cdn.rs @@ -26,7 +26,7 @@ async fn push_cdn_network() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 33, + num_failed_views: 0, num_successful_views: 35, ..Default::default() }, diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_2/test_with_failures_one.rs index 0a6448a410..d0cfde0f78 100644 --- a/testing/tests/tests_2/test_with_failures_one.rs +++ b/testing/tests/tests_2/test_with_failures_one.rs @@ -31,7 +31,7 @@ cross_tests!( metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(5, dead_nodes)] }; - metadata.overall_safety_properties.num_failed_views = 3; + metadata.overall_safety_properties.num_failed_views = 1; metadata.overall_safety_properties.num_successful_views = 25; metadata } diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 78411e6f55..86b1951040 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -15,9 +15,8 @@ cross_tests!( Types: [TestTypes], Ignore: false, Metadata: { - let mut metadata = TestDescription::default_more_nodes(); - metadata.overall_safety_properties.num_failed_views = 6; + metadata.overall_safety_properties.num_failed_views = 5; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 20; metadata.num_bootstrap_nodes = 14; diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index 5be931e554..add4cdec8a 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -30,7 +30,7 @@ async fn test_combined_network() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 33, + num_failed_views: 0, num_successful_views: 25, ..Default::default() }, @@ -63,7 +63,7 @@ async fn test_combined_network_cdn_crash() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 33, + num_failed_views: 0, num_successful_views: 35, ..Default::default() }, @@ -112,7 +112,7 @@ async fn test_combined_network_reup() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 33, + num_failed_views: 0, num_successful_views: 35, ..Default::default() }, @@ -165,7 +165,7 @@ async fn test_combined_network_half_dc() { ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 33, + num_failed_views: 0, num_successful_views: 35, ..Default::default() }, diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 189342f4f2..dcd6737e4c 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -34,7 +34,7 @@ async fn test_timeout() { metadata.timing_data = timing_data; metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - num_failed_views: 25, + num_failed_views: 4, num_successful_views: 25, ..Default::default() }; diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index c9203f1081..68f2d65433 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -178,7 +178,7 @@ async fn test_memory_network_partially_sync() { async_compatibility_layer::logging::setup_backtrace(); let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 2, + num_failed_views: 0, ..Default::default() }, // allow more time to pass in CI @@ -222,7 +222,7 @@ async fn libp2p_network_partially_sync() { async_compatibility_layer::logging::setup_backtrace(); let metadata = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { - num_failed_views: 2, + num_failed_views: 0, ..Default::default() }, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( From d1044acf995567973f2b83bfd05e9788ccb7a8e5 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Mon, 24 Jun 2024 17:09:24 +0200 Subject: [PATCH 1095/1393] Lr/fix high qc race2 (#3360) * Fix high qc race * Lints * Fix unit tests --- task-impls/src/events.rs | 6 +++ task-impls/src/quorum_proposal/mod.rs | 11 ++++- testing/tests/tests_1/quorum_proposal_task.rs | 46 +++++++++++++------ 3 files changed, 46 insertions(+), 17 deletions(-) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 0cd13789ab..90d945aec9 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -194,6 +194,9 @@ pub enum HotShotEvent { /// A new high_qc has been reached by this node. UpdateHighQc(QuorumCertificate), + + /// A new high_qc has been updated in `Consensus`. + HighQcUpdated(QuorumCertificate), } impl Display for HotShotEvent { @@ -429,6 +432,9 @@ impl Display for HotShotEvent { HotShotEvent::UpdateHighQc(cert) => { write!(f, "UpdateHighQc(view_number={:?})", cert.view_number()) } + HotShotEvent::HighQcUpdated(cert) => { + write!(f, "HighQcUpdated(view_number={:?})", cert.view_number()) + } } } } diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index cb0036516f..9145f791c9 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -105,7 +105,7 @@ impl> QuorumProposalTaskState { - if let HotShotEvent::UpdateHighQc(qc) = event { + if let HotShotEvent::HighQcUpdated(qc) = event { qc.view_number() + 1 } else { return false; @@ -229,7 +229,7 @@ impl> QuorumProposalTaskState { vid_share_dependency.mark_as_completed(event); } - HotShotEvent::UpdateHighQc(_) => { + HotShotEvent::HighQcUpdated(_) => { qc_dependency.mark_as_completed(event); } _ => {} @@ -454,6 +454,13 @@ impl> QuorumProposalTaskState { let view_number = qc.view_number() + 1; self.create_dependency_task_if_new( view_number, diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 7e2b49ecde..905bfefcba 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -121,8 +121,10 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { Expectations::from_outputs(vec![]), Expectations::from_outputs(all_predicates![ exact(UpdateHighQc(genesis_cert.clone())), + exact(HighQcUpdated(genesis_cert.clone())), quorum_proposal_send(), ]), + ]; let quorum_proposal_task_state = @@ -272,19 +274,26 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ]; let expectations = vec![ - Expectations::from_outputs(all_predicates![exact(UpdateHighQc(genesis_cert.clone()))]), - Expectations::from_outputs(all_predicates![exact(UpdateHighQc( - proposals[1].data.justify_qc.clone(), - ))]), + Expectations::from_outputs(all_predicates![ + exact(UpdateHighQc(genesis_cert.clone())), + exact(HighQcUpdated(genesis_cert.clone())), + ]), + Expectations::from_outputs(all_predicates![ + exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), + ]), Expectations::from_outputs(all_predicates![ exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), quorum_proposal_send(), ]), - Expectations::from_outputs(all_predicates![exact(UpdateHighQc( - proposals[3].data.justify_qc.clone(), - ))]), + Expectations::from_outputs(all_predicates![ + exact(UpdateHighQc(proposals[3].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[3].data.justify_qc.clone())), + ]), Expectations::from_outputs(all_predicates![ exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[4].data.justify_qc.clone())), ]), ]; @@ -592,19 +601,26 @@ async fn test_quorum_proposal_task_liveness_check() { ]; let expectations = vec![ - Expectations::from_outputs(vec![exact(UpdateHighQc(genesis_cert.clone()))]), - Expectations::from_outputs(vec![exact(UpdateHighQc( - proposals[1].data.justify_qc.clone(), - ))]), + Expectations::from_outputs(all_predicates![ + exact(UpdateHighQc(genesis_cert.clone())), + exact(HighQcUpdated(genesis_cert.clone())), + ]), + Expectations::from_outputs(all_predicates![ + exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), + ]), Expectations::from_outputs(all_predicates![ exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), quorum_proposal_send(), ]), - Expectations::from_outputs(vec![exact(UpdateHighQc( - proposals[3].data.justify_qc.clone(), - ))]), - Expectations::from_outputs(vec![ + Expectations::from_outputs(all_predicates![ + exact(UpdateHighQc(proposals[3].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[3].data.justify_qc.clone())), + ]), + Expectations::from_outputs(all_predicates![ exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[4].data.justify_qc.clone())), ]), ]; From 968dafd016f2a68e74c262cf09bc0743527e6b37 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 25 Jun 2024 12:43:02 +0200 Subject: [PATCH 1096/1393] Fix arg (#3367) --- examples/infra/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index ca7ee73916..ac8b731a40 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -153,7 +153,7 @@ pub fn read_orchestrator_init_config() -> (NetworkConfig Date: Tue, 25 Jun 2024 09:02:50 -0700 Subject: [PATCH 1097/1393] Benchmark script adjust and ORCHESTRATOR_DEFAULT_NUM_ROUNDS adjust (#3377) * benchmark script and parameter adjust * fmt * totally remove web_server_url --- examples/infra/mod.rs | 31 ------------------------------- orchestrator/run-config.toml | 17 ----------------- orchestrator/src/config.rs | 16 +--------------- 3 files changed, 1 insertion(+), 63 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index ac8b731a40..14e24dc7db 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -40,7 +40,6 @@ use hotshot_orchestrator::{ client::{BenchResults, OrchestratorClient, ValidatorArgs}, config::{ BuilderType, CombinedNetworkConfig, NetworkConfig, NetworkConfigFile, NetworkConfigSource, - WebServerConfig, }, }; use hotshot_testing::block_builder::{ @@ -167,22 +166,6 @@ pub fn read_orchestrator_init_config() -> (NetworkConfig() -> (NetworkConfig("orchestrator_url") { orchestrator_url = Url::parse(orchestrator_url_string).unwrap(); } - if let Some(webserver_url_string) = matches.get_one::("webserver_url") { - let updated_web_server_config = WebServerConfig { - url: Url::parse(webserver_url_string).unwrap(), - wait_between_polls: config.web_server_config.unwrap().wait_between_polls, - }; - config.web_server_config = Some(updated_web_server_config); - } - if let Some(da_webserver_url_string) = matches.get_one::("da_webserver_url") { - let updated_da_web_server_config = WebServerConfig { - url: Url::parse(da_webserver_url_string).unwrap(), - wait_between_polls: config.da_web_server_config.unwrap().wait_between_polls, - }; - config.da_web_server_config = Some(updated_da_web_server_config); - } if let Some(builder_type) = matches.get_one::("builder") { config.builder = *builder_type; } diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index cc49703802..c8030920a3 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -76,25 +76,10 @@ txn_in_block = 100 blocks_per_second = 1 txn_size = { start = 20, end = 100 } -[web_server_config] -url = "http://localhost:9000" - -[da_web_server_config] -url = "http://localhost:9001" - [combined_network_config.delay_duration] secs = 1 nanos = 0 -[web_server_config.wait_between_polls] -secs = 0 -nanos = 10_000_000 - -[da_web_server_config.wait_between_polls] -secs = 0 -nanos = 10_000_000 - - [config.view_sync_timeout] secs = 2 nanos = 0 @@ -103,8 +88,6 @@ nanos = 0 secs = 0 nanos = 200_000_000 -# TODO (Keyao) Clean up configuration parameters. -# [config.builder_timeout] secs = 10 nanos = 0 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 7fd930cf5d..ab55993e63 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -191,10 +191,6 @@ pub struct NetworkConfig { pub libp2p_config: Option, /// the hotshot config pub config: HotShotConfig, - /// the webserver config - pub web_server_config: Option, - /// the data availability web server config - pub da_web_server_config: Option, /// The address for the Push CDN's "marshal", A.K.A. load balancer pub cdn_marshal_address: Option, /// combined network config @@ -426,8 +422,6 @@ impl Default for NetworkConfig { config: HotShotConfigFile::default().into(), start_delay_seconds: 60, key_type_name: std::any::type_name::().to_string(), - web_server_config: None, - da_web_server_config: None, cdn_marshal_address: None, combined_network_config: None, next_view_timeout: 10, @@ -481,12 +475,6 @@ pub struct NetworkConfigFile { /// The address of the Push CDN's "marshal", A.K.A. load balancer #[serde(default)] pub cdn_marshal_address: Option, - /// the webserver config - #[serde(default)] - pub web_server_config: Option, - /// the data availability web server config - #[serde(default)] - pub da_web_server_config: Option, /// combined network config #[serde(default)] pub combined_network_config: Option, @@ -534,8 +522,6 @@ impl From> for NetworkConfig { key_type_name: std::any::type_name::().to_string(), start_delay_seconds: val.start_delay_seconds, cdn_marshal_address: val.cdn_marshal_address, - web_server_config: val.web_server_config, - da_web_server_config: val.da_web_server_config, combined_network_config: val.combined_network_config, commit_sha: String::new(), builder: val.builder, @@ -713,7 +699,7 @@ impl From> for HotShotConfig { } } /// default number of rounds to run -pub const ORCHESTRATOR_DEFAULT_NUM_ROUNDS: usize = 10; +pub const ORCHESTRATOR_DEFAULT_NUM_ROUNDS: usize = 100; /// default number of transactions per round pub const ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND: usize = 10; /// default size of transactions From b01e23739cabc93bc3bd1bfa7b940ac8f878342a Mon Sep 17 00:00:00 2001 From: Elias Rad <146735585+nnsW3@users.noreply.github.com> Date: Wed, 26 Jun 2024 20:41:29 +0300 Subject: [PATCH 1098/1393] Docs rectify typographical inaccuracies (#3340) * fix EXTERNAL_ISSUE_FORM.yml * fix builder.toml * fix data_source.rs * fix typos block_types.rs * fix all.rs * fix typos mod.rs * fix typos all.rs --- builder-api/api/builder.toml | 2 +- builder-api/src/data_source.rs | 2 +- example-types/src/block_types.rs | 2 +- examples/combined/all.rs | 2 +- examples/infra/mod.rs | 2 +- examples/libp2p/all.rs | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/builder-api/api/builder.toml b/builder-api/api/builder.toml index 8a2b6fe152..f16911a948 100644 --- a/builder-api/api/builder.toml +++ b/builder-api/api/builder.toml @@ -74,7 +74,7 @@ Returns application-specific block header type [route.builder_address] PATH = ["builderaddress"] DOC = """ -Get the builder address. +Get the builder's address. Returns the builder's public key """ diff --git a/builder-api/src/data_source.rs b/builder-api/src/data_source.rs index eaa6f01860..49b8948a3f 100644 --- a/builder-api/src/data_source.rs +++ b/builder-api/src/data_source.rs @@ -40,7 +40,7 @@ pub trait BuilderDataSource { signature: &::PureAssembledSignatureType, ) -> Result, BuildError>; - /// To get the builder address + /// To get the builder's address async fn builder_address(&self) -> Result; } diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index b7e7350b51..e02c6582d8 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -44,7 +44,7 @@ impl TryFrom> for TestTransaction { } impl TestTransaction { - /// Construct new transaction + /// Construct a new transaction /// /// # Panics /// If `bytes.len()` > `u32::MAX` diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 2ec6ede3ce..5eff8a4d10 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -1,4 +1,4 @@ -//! A example program using both the web server and libp2p +//! An example program using both the web server and libp2p /// types used for this example pub mod types; diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 14e24dc7db..07e0ad85bf 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -100,7 +100,7 @@ pub fn read_orchestrator_init_config() -> (NetworkConfig Date: Wed, 26 Jun 2024 20:51:17 -0400 Subject: [PATCH 1099/1393] unify quorum/da networks (#3382) --- example-types/src/node_types.rs | 12 +- examples/combined/all.rs | 16 +- examples/combined/multi-validator.rs | 4 +- examples/combined/types.rs | 13 +- examples/combined/validator.rs | 4 +- examples/infra/mod.rs | 175 ++++++------------ examples/libp2p/all.rs | 16 +- examples/libp2p/multi-validator.rs | 4 +- examples/libp2p/types.rs | 9 +- examples/libp2p/validator.rs | 4 +- examples/push-cdn/all.rs | 16 +- examples/push-cdn/multi-validator.rs | 4 +- examples/push-cdn/types.rs | 11 +- examples/push-cdn/validator.rs | 4 +- hotshot/src/lib.rs | 70 ++----- hotshot/src/tasks/task_state.rs | 28 ++- .../src/traits/networking/combined_network.rs | 38 ++-- .../src/traits/networking/libp2p_network.rs | 7 +- .../src/traits/networking/memory_network.rs | 4 +- .../src/traits/networking/push_cdn_network.rs | 8 +- hotshot/src/types/handle.rs | 11 +- task-impls/src/consensus/mod.rs | 7 +- task-impls/src/consensus2/mod.rs | 7 +- task-impls/src/da.rs | 6 +- task-impls/src/quorum_proposal/mod.rs | 7 +- task-impls/src/quorum_proposal_recv/mod.rs | 4 +- task-impls/src/quorum_vote/mod.rs | 7 +- task-impls/src/request.rs | 11 +- task-impls/src/transactions.rs | 4 +- task-impls/src/upgrade.rs | 4 +- task-impls/src/vid.rs | 4 +- task-impls/src/view_sync.rs | 8 +- testing/src/helpers.rs | 12 +- testing/src/spinning_task.rs | 12 +- testing/src/test_launcher.rs | 9 +- testing/src/test_runner.rs | 45 ++--- testing/tests/tests_1/network_task.rs | 26 +-- testing/tests/tests_3/memory_network.rs | 6 +- types/src/traits/network.rs | 2 +- types/src/traits/node_implementation.rs | 16 +- 40 files changed, 232 insertions(+), 423 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 85388835aa..45d7684a0c 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -68,25 +68,21 @@ pub struct CombinedImpl; pub type StaticMembership = StaticCommittee; impl NodeImplementation for PushCdnImpl { - type QuorumNetwork = PushCdnNetwork; - type DaNetwork = PushCdnNetwork; + type Network = PushCdnNetwork; type Storage = TestStorage; } impl NodeImplementation for MemoryImpl { - type QuorumNetwork = MemoryNetwork; - type DaNetwork = MemoryNetwork; + type Network = MemoryNetwork; type Storage = TestStorage; } impl NodeImplementation for CombinedImpl { - type QuorumNetwork = CombinedNetworks; - type DaNetwork = CombinedNetworks; + type Network = CombinedNetworks; type Storage = TestStorage; } impl NodeImplementation for Libp2pImpl { - type QuorumNetwork = Libp2pNetwork; - type DaNetwork = Libp2pNetwork; + type Network = Libp2pNetwork; type Storage = TestStorage; } diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 5eff8a4d10..0d8307dc5b 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -23,7 +23,7 @@ use tracing::{error, instrument}; use crate::{ infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, - types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}, + types::{Network, NodeImpl, ThisRun}, }; /// general infra used for this example @@ -140,14 +140,12 @@ async fn main() { let builder_address = gen_local_address::(i); let node = async_spawn(async move { - infra::main_entry_point::( - ValidatorArgs { - url: orchestrator_url, - advertise_address: Some(advertise_address), - builder_address: Some(builder_address), - network_config_file: None, - }, - ) + infra::main_entry_point::(ValidatorArgs { + url: orchestrator_url, + advertise_address: Some(advertise_address), + builder_address: Some(builder_address), + network_config_file: None, + }) .await; }); nodes.push(node); diff --git a/examples/combined/multi-validator.rs b/examples/combined/multi-validator.rs index 2c36af9f28..f894030d59 100644 --- a/examples/combined/multi-validator.rs +++ b/examples/combined/multi-validator.rs @@ -8,7 +8,7 @@ use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use tracing::instrument; -use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; +use crate::types::{Network, NodeImpl, ThisRun}; /// types used for this example pub mod types; @@ -30,7 +30,7 @@ async fn main() { let args = args.clone(); let node = async_spawn(async move { - infra::main_entry_point::( + infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) .await; diff --git a/examples/combined/types.rs b/examples/combined/types.rs index 90b5bff2d8..a7be6c32bb 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -11,18 +11,11 @@ use crate::infra::CombinedDaRun; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -/// convenience type alias -pub type DaNetwork = CombinedNetworks; -/// convenience type alias -pub type VidNetwork = CombinedNetworks; -/// convenience type alias -pub type QuorumNetwork = CombinedNetworks; -/// convenience type alias -pub type ViewSyncNetwork = CombinedNetworks; +/// Convenience type alias +pub type Network = CombinedNetworks; impl NodeImplementation for NodeImpl { - type QuorumNetwork = QuorumNetwork; - type DaNetwork = DaNetwork; + type Network = Network; type Storage = TestStorage; } /// convenience type alias diff --git a/examples/combined/validator.rs b/examples/combined/validator.rs index b4a09949cd..eafdd21559 100644 --- a/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -8,7 +8,7 @@ use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; use tracing::{debug, instrument}; -use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; +use crate::types::{Network, NodeImpl, ThisRun}; /// types used for this example pub mod types; @@ -36,5 +36,5 @@ async fn main() { ); debug!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::(args).await; + infra::main_entry_point::(args).await; } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 07e0ad85bf..77af85f8d8 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -3,9 +3,9 @@ use std::{ collections::HashMap, fmt::Debug, fs, - marker::PhantomData, net::{IpAddr, Ipv4Addr, SocketAddr}, num::NonZeroUsize, + sync::Arc, time::{Duration, Instant}, }; @@ -27,7 +27,7 @@ use hotshot::{ BlockPayload, NodeImplementation, }, types::SystemContextHandle, - Memberships, Networks, SystemContext, + Memberships, SystemContext, }; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, @@ -335,14 +335,8 @@ where #[async_trait] pub trait RunDa< TYPES: NodeType, - DANET: ConnectedNetwork, - QUORUMNET: ConnectedNetwork, - NODE: NodeImplementation< - TYPES, - QuorumNetwork = QUORUMNET, - DaNetwork = DANET, - Storage = TestStorage, - >, + NETWORK: ConnectedNetwork, + NODE: NodeImplementation>, > where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -372,14 +366,7 @@ pub trait RunDa< let sk = config.config.my_own_validator_config.private_key.clone(); let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); - let da_network = self.da_channel(); - let quorum_network = self.quorum_channel(); - - let networks_bundle = Networks { - quorum_network: quorum_network.clone().into(), - da_network: da_network.clone().into(), - _pd: PhantomData, - }; + let network = self.network(); // Create the quorum membership from all nodes let quorum_membership = ::Membership::create_election( @@ -409,7 +396,7 @@ pub trait RunDa< config.node_index, config.config, memberships, - networks_bundle, + Arc::from(network), initializer, ConsensusMetricsValue::default(), TestStorage::::default(), @@ -581,11 +568,8 @@ pub trait RunDa< } } - /// Returns the da network for this run - fn da_channel(&self) -> DANET; - - /// Returns the quorum network for this run - fn quorum_channel(&self) -> QUORUMNET; + /// Returns the underlying network for this run + fn network(&self) -> NETWORK; /// Returns the config for this run fn config(&self) -> NetworkConfig; @@ -597,10 +581,8 @@ pub trait RunDa< pub struct PushCdnDaRun { /// The underlying configuration config: NetworkConfig, - /// The quorum channel - quorum_channel: PushCdnNetwork, - /// The DA channel - da_channel: PushCdnNetwork, + /// The underlying network + network: PushCdnNetwork, } #[async_trait] @@ -611,13 +593,8 @@ impl< BlockHeader = TestBlockHeader, InstanceState = TestInstanceState, >, - NODE: NodeImplementation< - TYPES, - QuorumNetwork = PushCdnNetwork, - DaNetwork = PushCdnNetwork, - Storage = TestStorage, - >, - > RunDa, PushCdnNetwork, NODE> for PushCdnDaRun + NODE: NodeImplementation, Storage = TestStorage>, + > RunDa, NODE> for PushCdnDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -658,19 +635,11 @@ where // Wait for the network to be ready network.wait_for_ready().await; - PushCdnDaRun { - config, - quorum_channel: network.clone(), - da_channel: network, - } - } - - fn da_channel(&self) -> PushCdnNetwork { - self.da_channel.clone() + PushCdnDaRun { config, network } } - fn quorum_channel(&self) -> PushCdnNetwork { - self.quorum_channel.clone() + fn network(&self) -> PushCdnNetwork { + self.network.clone() } fn config(&self) -> NetworkConfig { @@ -682,12 +651,10 @@ where /// Represents a libp2p-based run pub struct Libp2pDaRun { - /// the network configuration + /// The underlying network configuration config: NetworkConfig, - /// quorum channel - quorum_channel: Libp2pNetwork, - /// data availability channel - da_channel: Libp2pNetwork, + /// The underlying network + network: Libp2pNetwork, } #[async_trait] @@ -700,12 +667,10 @@ impl< >, NODE: NodeImplementation< TYPES, - QuorumNetwork = Libp2pNetwork, - DaNetwork = Libp2pNetwork, + Network = Libp2pNetwork, Storage = TestStorage, >, - > RunDa, Libp2pNetwork, NODE> - for Libp2pDaRun + > RunDa, NODE> for Libp2pDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -754,17 +719,12 @@ where Libp2pDaRun { config, - quorum_channel: libp2p_network.clone(), - da_channel: libp2p_network, + network: libp2p_network, } } - fn da_channel(&self) -> Libp2pNetwork { - self.da_channel.clone() - } - - fn quorum_channel(&self) -> Libp2pNetwork { - self.quorum_channel.clone() + fn network(&self) -> Libp2pNetwork { + self.network.clone() } fn config(&self) -> NetworkConfig { @@ -776,12 +736,10 @@ where /// Represents a combined-network-based run pub struct CombinedDaRun { - /// the network configuration + /// The underlying network configuration config: NetworkConfig, - /// quorum channel - quorum_channel: CombinedNetworks, - /// data availability channel - da_channel: CombinedNetworks, + /// The underlying network + network: CombinedNetworks, } #[async_trait] @@ -792,13 +750,8 @@ impl< BlockHeader = TestBlockHeader, InstanceState = TestInstanceState, >, - NODE: NodeImplementation< - TYPES, - QuorumNetwork = CombinedNetworks, - DaNetwork = CombinedNetworks, - Storage = TestStorage, - >, - > RunDa, CombinedNetworks, NODE> for CombinedDaRun + NODE: NodeImplementation, Storage = TestStorage>, + > RunDa, NODE> for CombinedDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -810,24 +763,24 @@ where libp2p_advertise_address: Option, ) -> CombinedDaRun { // Initialize our Libp2p network - let libp2p_da_run: Libp2pDaRun = - as RunDa< - TYPES, - Libp2pNetwork, - Libp2pNetwork, - Libp2pImpl, - >>::initialize_networking(config.clone(), libp2p_advertise_address) - .await; + let libp2p_network: Libp2pDaRun = as RunDa< + TYPES, + Libp2pNetwork, + Libp2pImpl, + >>::initialize_networking( + config.clone(), libp2p_advertise_address + ) + .await; // Initialize our CDN network - let cdn_da_run: PushCdnDaRun = - as RunDa< - TYPES, - PushCdnNetwork, - PushCdnNetwork, - PushCdnImpl, - >>::initialize_networking(config.clone(), libp2p_advertise_address) - .await; + let cdn_network: PushCdnDaRun = as RunDa< + TYPES, + PushCdnNetwork, + PushCdnImpl, + >>::initialize_networking( + config.clone(), libp2p_advertise_address + ) + .await; // Create our combined network config let CombinedNetworkConfig { delay_duration }: CombinedNetworkConfig = config @@ -835,32 +788,16 @@ where .combined_network_config .expect("combined network config not specified"); - // Combine the two communication channels - let da_channel = CombinedNetworks::new( - cdn_da_run.da_channel, - libp2p_da_run.da_channel, - delay_duration, - ); - let quorum_channel = CombinedNetworks::new( - cdn_da_run.quorum_channel, - libp2p_da_run.quorum_channel, - delay_duration, - ); + // Create our combined network + let network = + CombinedNetworks::new(cdn_network.network, libp2p_network.network, delay_duration); // Return the run configuration - CombinedDaRun { - config, - quorum_channel, - da_channel, - } - } - - fn da_channel(&self) -> CombinedNetworks { - self.da_channel.clone() + CombinedDaRun { config, network } } - fn quorum_channel(&self) -> CombinedNetworks { - self.quorum_channel.clone() + fn network(&self) -> CombinedNetworks { + self.network.clone() } fn config(&self) -> NetworkConfig { @@ -877,15 +814,9 @@ pub async fn main_entry_point< BlockHeader = TestBlockHeader, InstanceState = TestInstanceState, >, - DACHANNEL: ConnectedNetwork, - QUORUMCHANNEL: ConnectedNetwork, - NODE: NodeImplementation< - TYPES, - QuorumNetwork = QUORUMCHANNEL, - DaNetwork = DACHANNEL, - Storage = TestStorage, - >, - RUNDA: RunDa, + NETWORK: ConnectedNetwork, + NODE: NodeImplementation>, + RUNDA: RunDa, >( args: ValidatorArgs, ) where diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index 95e844d86a..cf1f4cbe38 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -13,7 +13,7 @@ use tracing::instrument; use crate::{ infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, - types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}, + types::{Network, NodeImpl, ThisRun}, }; /// general infra used for this example @@ -45,14 +45,12 @@ async fn main() { let builder_address = gen_local_address::(i); let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { - infra::main_entry_point::( - ValidatorArgs { - url: orchestrator_url, - advertise_address: Some(advertise_address), - builder_address: Some(builder_address), - network_config_file: None, - }, - ) + infra::main_entry_point::(ValidatorArgs { + url: orchestrator_url, + advertise_address: Some(advertise_address), + builder_address: Some(builder_address), + network_config_file: None, + }) .await; }); nodes.push(node); diff --git a/examples/libp2p/multi-validator.rs b/examples/libp2p/multi-validator.rs index 2ffbc53ddb..c802f3da50 100644 --- a/examples/libp2p/multi-validator.rs +++ b/examples/libp2p/multi-validator.rs @@ -8,7 +8,7 @@ use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use tracing::instrument; -use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; +use crate::types::{Network, NodeImpl, ThisRun}; /// types used for this example pub mod types; @@ -30,7 +30,7 @@ async fn main() { let args = args.clone(); let node = async_spawn(async move { - infra::main_entry_point::( + infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) .await; diff --git a/examples/libp2p/types.rs b/examples/libp2p/types.rs index 5ec7c6c100..f47a7e5bce 100644 --- a/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -11,14 +11,11 @@ use crate::infra::Libp2pDaRun; #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct NodeImpl {} -/// convenience type alias -pub type DaNetwork = Libp2pNetwork<::SignatureKey>; -/// convenience type alias -pub type QuorumNetwork = Libp2pNetwork<::SignatureKey>; +/// Convenience type alias +pub type Network = Libp2pNetwork<::SignatureKey>; impl NodeImplementation for NodeImpl { - type QuorumNetwork = QuorumNetwork; - type DaNetwork = DaNetwork; + type Network = Network; type Storage = TestStorage; } /// convenience type alias diff --git a/examples/libp2p/validator.rs b/examples/libp2p/validator.rs index 810522820f..bfc1a4e387 100644 --- a/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -8,7 +8,7 @@ use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; use tracing::{debug, instrument}; -use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; +use crate::types::{Network, NodeImpl, ThisRun}; /// types used for this example pub mod types; @@ -35,5 +35,5 @@ async fn main() { ); debug!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::(args).await; + infra::main_entry_point::(args).await; } diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 0092889c6f..ac3d9c2df1 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -16,7 +16,7 @@ use infra::{gen_local_address, BUILDER_BASE_PORT}; use crate::{ infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, - types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}, + types::{Network, NodeImpl, ThisRun}, }; /// The infra implementation @@ -122,14 +122,12 @@ async fn main() { let orchestrator_url = orchestrator_url.clone(); let builder_address = gen_local_address::(i); let node = async_spawn(async move { - infra::main_entry_point::( - ValidatorArgs { - url: orchestrator_url, - advertise_address: None, - builder_address: Some(builder_address), - network_config_file: None, - }, - ) + infra::main_entry_point::(ValidatorArgs { + url: orchestrator_url, + advertise_address: None, + builder_address: Some(builder_address), + network_config_file: None, + }) .await; }); nodes.push(node); diff --git a/examples/push-cdn/multi-validator.rs b/examples/push-cdn/multi-validator.rs index c9c6b1beb1..53598afcb2 100644 --- a/examples/push-cdn/multi-validator.rs +++ b/examples/push-cdn/multi-validator.rs @@ -8,7 +8,7 @@ use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use tracing::instrument; -use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; +use crate::types::{Network, NodeImpl, ThisRun}; /// types used for this example pub mod types; @@ -30,7 +30,7 @@ async fn main() { let args = args.clone(); let node = async_spawn(async move { - infra::main_entry_point::( + infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) .await; diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs index 7930207365..2d0047f352 100644 --- a/examples/push-cdn/types.rs +++ b/examples/push-cdn/types.rs @@ -9,17 +9,10 @@ use crate::infra::PushCdnDaRun; pub struct NodeImpl {} /// Convenience type alias -pub type DaNetwork = PushCdnNetwork; -/// Convenience type alias -pub type VidNetwork = PushCdnNetwork; -/// Convenience type alias -pub type QuorumNetwork = PushCdnNetwork; -/// Convenience type alias -pub type ViewSyncNetwork = PushCdnNetwork; +pub type Network = PushCdnNetwork; impl NodeImplementation for NodeImpl { - type DaNetwork = DaNetwork; - type QuorumNetwork = QuorumNetwork; + type Network = Network; type Storage = TestStorage; } diff --git a/examples/push-cdn/validator.rs b/examples/push-cdn/validator.rs index e806178549..3f0497ce02 100644 --- a/examples/push-cdn/validator.rs +++ b/examples/push-cdn/validator.rs @@ -5,7 +5,7 @@ use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use tracing::{debug, instrument}; -use crate::types::{DaNetwork, NodeImpl, QuorumNetwork, ThisRun}; +use crate::types::{Network, NodeImpl, ThisRun}; /// types used for this example pub mod types; @@ -22,5 +22,5 @@ async fn main() { setup_backtrace(); let args = ValidatorArgs::parse(); debug!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::(args).await; + infra::main_entry_point::(args).await; } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e5f85fe960..c8e0a30e3e 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -17,7 +17,6 @@ pub mod tasks; use std::{ collections::{BTreeMap, HashMap}, - marker::PhantomData, num::NonZeroUsize, sync::Arc, time::Duration, @@ -71,32 +70,6 @@ pub const H_512: usize = 64; /// Length, in bytes, of a 256 bit hash pub const H_256: usize = 32; -/// Bundle of the networks used in consensus -pub struct Networks> { - /// Network for reaching all nodes - pub quorum_network: Arc, - - /// Network for reaching the DA committee - pub da_network: Arc, - - /// Phantom for TYPES and I - pub _pd: PhantomData<(TYPES, I)>, -} - -impl> Networks { - /// wait for all networks to be ready - pub async fn wait_for_networks_ready(&self) { - self.quorum_network.wait_for_ready().await; - self.da_network.wait_for_ready().await; - } - - /// shut down all networks - pub async fn shut_down_networks(&self) { - self.quorum_network.shut_down().await; - self.da_network.shut_down().await; - } -} - /// Bundle of all the memberships a consensus instance uses #[derive(Clone)] pub struct Memberships { @@ -121,8 +94,8 @@ pub struct SystemContext> { /// Configuration items for this hotshot instance pub config: HotShotConfig, - /// Networks used by the instance of hotshot - pub networks: Arc>, + /// The underlying network + pub network: Arc, /// Memberships used by consensus pub memberships: Arc>, @@ -174,7 +147,7 @@ impl> Clone for SystemContext> SystemContext { nonce: u64, config: HotShotConfig, memberships: Memberships, - networks: Networks, + network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, storage: I::Storage, @@ -295,7 +268,7 @@ impl> SystemContext { config, version, start_view: initializer.start_view, - networks: Arc::new(networks), + network, memberships: Arc::new(memberships), metrics: Arc::clone(&consensus_metrics), internal_event_stream: (internal_tx, internal_rx.deactivate()), @@ -449,9 +422,7 @@ impl> SystemContext { // and will be updated to be part of SystemContext. I wanted to use associated // constants in NodeType, but that seems to be unavailable in the current Rust. api - .networks - .da_network - .broadcast_message( + .network.broadcast_message( serialized_message, da_membership.whole_committee(view_number), BroadcastDelay::None, @@ -535,7 +506,7 @@ impl> SystemContext { node_id: u64, config: HotShotConfig, memberships: Memberships, - networks: Networks, + network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, storage: I::Storage, @@ -553,7 +524,7 @@ impl> SystemContext { node_id, config, memberships, - networks, + network, initializer, metrics, storage, @@ -582,8 +553,7 @@ impl> SystemContext { let output_event_stream = self.external_event_stream.clone(); let internal_event_stream = self.internal_event_stream.clone(); - let quorum_network = Arc::clone(&self.networks.quorum_network); - let da_network = Arc::clone(&self.networks.da_network); + let network = Arc::clone(&self.network); let quorum_membership = self.memberships.quorum_membership.clone(); let da_membership = self.memberships.da_membership.clone(); let vid_membership = self.memberships.vid_membership.clone(); @@ -598,49 +568,43 @@ impl> SystemContext { storage: Arc::clone(&self.storage), }; - add_network_message_task(&mut handle, Arc::clone(&quorum_network)).await; - add_network_message_task(&mut handle, Arc::clone(&da_network)).await; + add_network_message_task(&mut handle, Arc::clone(&network)).await; + add_network_message_task(&mut handle, Arc::clone(&network)).await; - if let Some(request_receiver) = da_network.spawn_request_receiver_task().await { + if let Some(request_receiver) = network.spawn_request_receiver_task().await { add_request_network_task(&mut handle).await; add_response_task(&mut handle, request_receiver).await; } add_network_event_task( &mut handle, - Arc::clone(&quorum_network), + Arc::clone(&network), quorum_membership.clone(), network::quorum_filter, ) .await; add_network_event_task( &mut handle, - Arc::clone(&quorum_network), + Arc::clone(&network), quorum_membership, network::upgrade_filter, ) .await; add_network_event_task( &mut handle, - Arc::clone(&da_network), + Arc::clone(&network), da_membership, network::da_filter, ) .await; add_network_event_task( &mut handle, - Arc::clone(&quorum_network), + Arc::clone(&network), view_sync_membership, network::view_sync_filter, ) .await; - add_network_event_task( - &mut handle, - Arc::clone(&quorum_network), - vid_membership, - network::vid_filter, - ) - .await; + add_network_event_task(&mut handle, network, vid_membership, network::vid_filter).await; add_consensus_tasks::(&mut handle).await; handle } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 19b415cab9..cde54d27ae 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -39,7 +39,7 @@ impl> CreateTaskState { async fn create_from(handle: &SystemContextHandle) -> NetworkRequestState { NetworkRequestState { - network: Arc::clone(&handle.hotshot.networks.quorum_network), + network: Arc::clone(&handle.hotshot.network), state: handle.hotshot.consensus(), view: handle.cur_view().await, delay: handle.hotshot.config.data_request_delay, @@ -64,7 +64,7 @@ impl> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), + network: Arc::clone(&handle.hotshot.network), vote_collector: None.into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -80,7 +80,7 @@ impl> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), + network: Arc::clone(&handle.hotshot.network), vote_collector: None.into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -102,7 +102,7 @@ impl> CreateTaskState consensus: handle.hotshot.consensus(), cur_view: handle.cur_view().await, vote_collector: None, - network: Arc::clone(&handle.hotshot.networks.quorum_network), + network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.vid_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -120,7 +120,7 @@ impl> CreateTaskState consensus: handle.hotshot.consensus(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), - da_network: Arc::clone(&handle.hotshot.networks.da_network), + network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), cur_view: handle.cur_view().await, vote_collector: None.into(), @@ -141,7 +141,7 @@ impl> CreateTaskState ViewSyncTaskState { current_view: cur_view, next_view: cur_view, - network: Arc::clone(&handle.hotshot.networks.quorum_network), + network: Arc::clone(&handle.hotshot.network), membership: handle .hotshot .memberships @@ -174,7 +174,7 @@ impl, Ver: StaticVersionType> output_event_stream: handle.hotshot.external_event_stream.0.clone(), consensus: handle.hotshot.consensus(), cur_view: handle.cur_view().await, - network: Arc::clone(&handle.hotshot.networks.quorum_network), + network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -222,8 +222,7 @@ impl> CreateTaskState id: handle.hotshot.id, public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), - quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), - da_network: Arc::clone(&handle.hotshot.networks.da_network), + network: Arc::clone(&handle.hotshot.network), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), @@ -247,8 +246,7 @@ impl> CreateTaskState instance_state: handle.hotshot.instance_state(), latest_voted_view: handle.cur_view().await, vote_dependencies: HashMap::new(), - quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), - da_network: Arc::clone(&handle.hotshot.networks.da_network), + network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), @@ -272,8 +270,7 @@ impl> CreateTaskState QuorumProposalTaskState { latest_proposed_view: handle.cur_view().await, proposal_dependencies: HashMap::new(), - quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), - da_network: Arc::clone(&handle.hotshot.networks.da_network), + network: Arc::clone(&handle.hotshot.network), output_event_stream: handle.hotshot.external_event_stream.0.clone(), consensus, instance_state: handle.hotshot.instance_state(), @@ -307,7 +304,7 @@ impl> CreateTaskState consensus, cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), - quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), + network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), timeout_task, @@ -338,8 +335,7 @@ impl> CreateTaskState public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), - quorum_network: Arc::clone(&handle.hotshot.networks.quorum_network), - da_network: Arc::clone(&handle.hotshot.networks.da_network), + network: Arc::clone(&handle.hotshot.network), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), committee_membership: handle.hotshot.memberships.da_membership.clone().into(), diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 39ec59cd5a..7d44947f05 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -256,7 +256,7 @@ impl TestableNetworkingImplementation for CombinedNetwor is_da: bool, reliability_config: Option>, secondary_network_delay: Duration, - ) -> AsyncGenerator<(Arc, Arc)> { + ) -> AsyncGenerator> { let generators = ( as TestableNetworkingImplementation>::generator( expected_node_count, @@ -282,27 +282,27 @@ impl TestableNetworkingImplementation for CombinedNetwor let gen1 = generators.1(node_id); Box::pin(async move { - let (cdn, _) = gen0.await; + // Generate the CDN network + let cdn = gen0.await; let cdn = Arc::>::into_inner(cdn).unwrap(); - let (quorum_p2p, da_p2p) = gen1.await; - let da_networks = UnderlyingCombinedNetworks( + // Generate the p2p network + let p2p = gen1.await; + + // Combine the two + let underlying_combined = UnderlyingCombinedNetworks( cdn.clone(), - Arc::>::unwrap_or_clone(da_p2p), - ); - let quorum_networks = UnderlyingCombinedNetworks( - cdn, - Arc::>::unwrap_or_clone(quorum_p2p), + Arc::>::unwrap_or_clone(p2p), ); - // We want to the message cache between the two networks + // We want to use the same message cache between the two networks let message_cache = Arc::new(RwLock::new(LruCache::new( NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), ))); - // Create the quorum and da networks - let quorum_net = Self { - networks: Arc::new(quorum_networks), + // Combine the two networks with the same cache + let combined_network = Self { + networks: Arc::new(underlying_combined), primary_fail_counter: Arc::new(AtomicU64::new(0)), primary_down: Arc::new(AtomicBool::new(false)), message_cache: Arc::clone(&message_cache), @@ -310,16 +310,8 @@ impl TestableNetworkingImplementation for CombinedNetwor delayed_tasks_channels: Arc::default(), no_delay_counter: Arc::new(AtomicU64::new(0)), }; - let da_net = Self { - networks: Arc::new(da_networks), - message_cache, - primary_fail_counter: Arc::new(AtomicU64::new(0)), - primary_down: Arc::new(AtomicBool::new(false)), - delay_duration: Arc::new(RwLock::new(secondary_network_delay)), - delayed_tasks_channels: Arc::default(), - no_delay_counter: Arc::new(AtomicU64::new(0)), - }; - (quorum_net.into(), da_net.into()) + + Arc::new(combined_network) }) }) } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index b17de79788..3b439e5d52 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -206,7 +206,7 @@ impl TestableNetworkingImplementation _is_da: bool, reliability_config: Option>, _secondary_network_delay: Duration, - ) -> AsyncGenerator<(Arc, Arc)> { + ) -> AsyncGenerator> { assert!( da_committee_size <= expected_node_count, "DA committee size must be less than or equal to total # nodes" @@ -295,7 +295,7 @@ impl TestableNetworkingImplementation let da = da_keys.clone(); let reliability_config_dup = reliability_config.clone(); Box::pin(async move { - let net = Arc::new( + Arc::new( match Libp2pNetwork::new( Libp2pMetricsValue::default(), config, @@ -315,8 +315,7 @@ impl TestableNetworkingImplementation panic!("Failed to create libp2p network: {err:?}"); } }, - ); - (Arc::clone(&net), net) + ) }) } }) diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index e2e2266917..e0b95c0ce8 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -173,14 +173,14 @@ impl TestableNetworkingImplementation _is_da: bool, reliability_config: Option>, _secondary_network_delay: Duration, - ) -> AsyncGenerator<(Arc, Arc)> { + ) -> AsyncGenerator> { let master: Arc<_> = MasterMap::new(); // We assign known_nodes' public key and stake value rather than read from config file since it's a test Box::pin(move |node_id| { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); let net = MemoryNetwork::new(pubkey, &master, reliability_config.clone()); - Box::pin(async move { (net.clone().into(), net.into()) }) + Box::pin(async move { net.into() }) }) } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 97d23c613e..40de0ed9db 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -264,7 +264,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork _is_da: bool, _reliability_config: Option>, _secondary_network_delay: Duration, - ) -> AsyncGenerator<(Arc, Arc)> { + ) -> AsyncGenerator> { // The configuration we are using for testing is 2 brokers & 1 marshal // A keypair shared between brokers @@ -404,14 +404,12 @@ impl TestableNetworkingImplementation for PushCdnNetwork }; // Create our client - let client = Arc::new(PushCdnNetwork { + Arc::new(PushCdnNetwork { client: Client::new(client_config), metrics: Arc::new(CdnMetricsValue::default()), #[cfg(feature = "hotshot-testing")] is_paused: Arc::from(AtomicBool::new(false)), - }); - - (Arc::clone(&client), client) + }) }) } }) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index f68c4f549a..c859bd070d 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -10,6 +10,7 @@ use async_std::task::JoinHandle; use futures::Stream; use hotshot_task::task::{ConsensusTaskRegistry, NetworkTaskRegistry, Task, TaskState}; use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; +use hotshot_types::traits::network::ConnectedNetwork; use hotshot_types::{ consensus::Consensus, data::Leaf, @@ -146,12 +147,6 @@ impl + 'static> SystemContextHandl self.hotshot.consensus() } - /// Block the underlying quorum (and DA) networking interfaces until node is - /// successfully initialized into the networks. - pub async fn wait_for_networks_ready(&self) { - self.hotshot.networks.wait_for_networks_ready().await; - } - /// Shut down the the inner hotshot and wait until all background threads are closed. pub async fn shut_down(&mut self) { // this is required because `SystemContextHandle` holds an inactive receiver and @@ -166,8 +161,8 @@ impl + 'static> SystemContextHandl tracing::error!("Shutting down network tasks!"); self.network_registry.shutdown().await; - tracing::error!("Shutting down networks!"); - self.hotshot.networks.shut_down_networks().await; + tracing::error!("Shutting down the network!"); + self.hotshot.network.shut_down().await; tracing::error!("Shutting down consensus!"); self.consensus_registry.shutdown().await; diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 2e557a17ac..9c61d3cfb8 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -82,11 +82,8 @@ pub struct ConsensusTaskState> { /// The commitment to the current block payload and its metadata submitted to DA. pub payload_commitment_and_metadata: Option>, - /// Network for all nodes - pub quorum_network: Arc, - - /// Network for DA committee - pub da_network: Arc, + /// The underlying network + pub network: Arc, /// Membership for Timeout votes/certs pub timeout_membership: Arc, diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index fe01566aa5..35fc574190 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -43,11 +43,8 @@ pub struct Consensus2TaskState> { /// Immutable instance state pub instance_state: Arc, - /// Network for all nodes - pub quorum_network: Arc, - - /// Network for DA committee - pub da_network: Arc, + /// The underlying network + pub network: Arc, /// Membership for Timeout votes/certs pub timeout_membership: Arc, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 1bc0e5f818..9e06545937 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -61,8 +61,8 @@ pub struct DaTaskState> { /// from the number of nodes in the quorum. pub quorum_membership: Arc, - /// Network for DA - pub da_network: Arc, + /// The underlying network + pub network: Arc, /// The current vote collection task, if there is one. pub vote_collector: RwLock, DaCertificate>>, @@ -215,7 +215,7 @@ impl> DaTaskState { tracing::trace!("{e:?}"); } // Optimistically calculate and update VID if we know that the primary network is down. - if self.da_network.is_primary_down() { + if self.network.is_primary_down() { let consensus = Arc::clone(&self.consensus); let membership = Arc::clone(&self.quorum_membership); let pk = self.private_key.clone(); diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 9145f791c9..f755ccf2b0 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -44,11 +44,8 @@ pub struct QuorumProposalTaskState /// Table for the in-progress proposal depdencey tasks. pub proposal_dependencies: HashMap>, - /// Network for all nodes - pub quorum_network: Arc, - - /// Network for DA committee - pub da_network: Arc, + /// The underlying network + pub network: Arc, /// Output events to application pub output_event_stream: async_broadcast::Sender>, diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 6f24cb933d..89a81c16a5 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -55,8 +55,8 @@ pub struct QuorumProposalRecvTaskState, + /// The underlying network + pub network: Arc, /// Membership for Quorum Certs/votes pub quorum_membership: Arc, diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 34b15a8fc0..9018db610b 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -366,11 +366,8 @@ pub struct QuorumVoteTaskState> { /// Table for the in-progress dependency tasks. pub vote_dependencies: HashMap>, - /// Network for all nodes - pub quorum_network: Arc, - - /// Network for DA committee - pub da_network: Arc, + /// The underlying network + pub network: Arc, /// Membership for Quorum certs/votes. pub quorum_membership: Arc, diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 9d30067b3c..b40bf413b6 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -50,7 +50,8 @@ pub const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); /// shares. pub struct NetworkRequestState> { /// Network to send requests over - pub network: Arc, + /// The underlying network + pub network: Arc, /// Consensus shared state so we can check if we've gotten the information /// before sending a request pub state: Arc>>, @@ -248,8 +249,8 @@ impl> NetworkRequestState> { - /// Network to send requests - network: Arc, + /// The underlying network to send requests on + pub network: Arc, /// Shared state to check if the data go populated state: Arc>>, /// Channel to send the event when we receive a response @@ -265,8 +266,8 @@ struct DelayedRequester> { /// A task the requests some data immediately from one peer struct ProposalRequester> { - /// Network to send requests - network: Arc, + /// The underlying network to send requests on + pub network: Arc, /// Channel to send the event when we receive a response sender: Sender>>>, /// Leader for the view of the request diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index c8f8a023b6..efa1f9b543 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -82,8 +82,8 @@ pub struct TransactionTaskState< /// Reference to consensus. Leader will require a read lock on this. pub consensus: Arc>>, - /// Network for all nodes - pub network: Arc, + /// The underlying network + pub network: Arc, /// Membership for the quorum pub membership: Arc, diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index ae7dc4d908..75264cc0cd 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -44,8 +44,8 @@ pub struct UpgradeTaskState> { /// Membership for Quorum Certs/votes pub quorum_membership: Arc, - /// Network for all nodes - pub quorum_network: Arc, + /// The underlying network + pub network: Arc, /// The current vote collection task, if there is one. pub vote_collector: diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 25750939ac..97ef0a1704 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -29,8 +29,8 @@ pub struct VidTaskState> { pub cur_view: TYPES::Time, /// Reference to consensus. Leader will require a read lock on this. pub consensus: Arc>>, - /// Network for all nodes - pub network: Arc, + /// The underlying network + pub network: Arc, /// Membership for the quorum pub membership: Arc, /// This Nodes Public Key diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 0fa9e60224..c319939d57 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -64,8 +64,8 @@ pub struct ViewSyncTaskState> { pub current_view: TYPES::Time, /// View HotShot wishes to be in pub next_view: TYPES::Time, - /// Network for all nodes - pub network: Arc, + /// The underlying network + pub network: Arc, /// Membership for the quorum pub membership: Arc, /// This Nodes Public Key @@ -135,8 +135,8 @@ pub struct ViewSyncReplicaTaskState, + /// The underlying network + pub network: Arc, /// Membership for the quorum pub membership: Arc, /// This Nodes Public Key diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index d0510373ba..c33fcd7fc9 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -7,7 +7,7 @@ use committable::Committable; use ethereum_types::U256; use hotshot::{ types::{BLSPubKey, SignatureKey, SystemContextHandle}, - HotShotInitializer, Memberships, Networks, SystemContext, + HotShotInitializer, Memberships, SystemContext, }; use hotshot_example_types::{ block_types::TestTransaction, @@ -50,7 +50,7 @@ pub async fn build_system_handle( let launcher = builder.gen_launcher::(node_id); - let networks = (launcher.resource_generator.channel_generator)(node_id).await; + let network = (launcher.resource_generator.channel_generator)(node_id).await; let storage = (launcher.resource_generator.storage)(node_id); let config = launcher.resource_generator.config.clone(); @@ -64,12 +64,6 @@ pub async fn build_system_handle( let _known_nodes_without_stake = config.known_nodes_without_stake.clone(); - let networks_bundle = Networks { - quorum_network: networks.0.clone(), - da_network: networks.1.clone(), - _pd: PhantomData, - }; - let memberships = Memberships { quorum_membership: ::Membership::create_election( known_nodes_with_stake.clone(), @@ -99,7 +93,7 @@ pub async fn build_system_handle( node_id, config, memberships, - networks_bundle, + network, initializer, ConsensusMetricsValue::default(), storage, diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 6736c18b51..8acf65264b 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -61,7 +61,7 @@ impl< > TestTaskState for SpinningTask where I: TestableNodeImplementation, - I: NodeImplementation>, + I: NodeImplementation>, { type Event = Event; @@ -125,7 +125,7 @@ where ); TestRunner::add_node_with_config( node_id, - node.networks.clone(), + node.network.clone(), memberships, initializer, config, @@ -143,7 +143,7 @@ where // safety task. let node = Node { node_id, - networks: node.networks, + network: node.network, handle, }; node.handle.hotshot.start_consensus().await; @@ -160,15 +160,13 @@ where UpDown::NetworkUp => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks resuming", idx); - handle.networks.0.resume(); - handle.networks.1.resume(); + handle.network.resume(); } } UpDown::NetworkDown => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks pausing", idx); - handle.networks.0.pause(); - handle.networks.1.pause(); + handle.network.pause(); } } } diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index dd822e3a14..13819c3a33 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -12,11 +12,8 @@ use hotshot_types::{ use super::{test_builder::TestDescription, test_runner::TestRunner}; -/// convience type alias for the networks available -pub type Networks = ( - Arc<>::QuorumNetwork>, - Arc<>::QuorumNetwork>, -); +/// A type alias to help readability +pub type Network = Arc<>::Network>; /// Wrapper for a function that takes a `node_id` and returns an instance of `T`. pub type Generator = Box T + 'static>; @@ -24,7 +21,7 @@ pub type Generator = Box T + 'static>; /// generators for resources used by each node pub struct ResourceGenerators> { /// generate channels - pub channel_generator: AsyncGenerator>, + pub channel_generator: AsyncGenerator>, /// generate new storage for each node pub storage: Generator>, /// configuration used to generate each hotshot node diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 0fa3f5cc19..0e39bf2b75 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -45,7 +45,7 @@ use crate::{ block_builder::TestBuilderImplementation, completion_task::CompletionTaskDescription, spinning_task::{ChangeNode, SpinningTask, UpDown}, - test_launcher::{Networks, TestLauncher}, + test_launcher::{Network, TestLauncher}, test_task::{TestResult, TestTask}, txn_task::TxnTaskDescription, view_sync_task::ViewSyncTask, @@ -61,7 +61,7 @@ impl< > TestRunner where I: TestableNodeImplementation, - I: NodeImplementation>, + I: NodeImplementation>, { /// execute test /// @@ -210,8 +210,7 @@ where // wait for networks to be ready for node in &*nodes { - node.networks.0.wait_for_ready().await; - node.networks.1.wait_for_ready().await; + node.network.wait_for_ready().await; } // Start hotshot @@ -361,14 +360,12 @@ where .try_into() .expect("Non-empty by construction"); - let networks = (self.launcher.resource_generator.channel_generator)(node_id).await; + let network = (self.launcher.resource_generator.channel_generator)(node_id).await; let storage = (self.launcher.resource_generator.storage)(node_id); - let network0 = networks.0.clone(); - let network1 = networks.1.clone(); + let network_clone = network.clone(); let networks_ready_future = async move { - network0.wait_for_ready().await; - network1.wait_for_ready().await; + network_clone.wait_for_ready().await; }; networks_ready.push(networks_ready_future); @@ -377,7 +374,7 @@ where self.late_start.insert( node_id, LateStartNode { - networks, + network, context: Right((storage, memberships, config)), }, ); @@ -394,7 +391,7 @@ where ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, is_da); let hotshot = Self::add_node_with_config( node_id, - networks.clone(), + network.clone(), memberships, initializer, config, @@ -406,12 +403,12 @@ where self.late_start.insert( node_id, LateStartNode { - networks, + network, context: Left(hotshot), }, ); } else { - uninitialized_nodes.push((node_id, networks, hotshot)); + uninitialized_nodes.push((node_id, network, hotshot)); } } @@ -422,7 +419,7 @@ where join_all(networks_ready).await; // Then start the necessary tasks - for (node_id, networks, hotshot) in uninitialized_nodes { + for (node_id, network, hotshot) in uninitialized_nodes { let handle = hotshot.run_tasks().await; match node_id.cmp(&(config.da_staked_committee_size as u64 - 1)) { @@ -442,7 +439,7 @@ where self.nodes.push(Node { node_id, - networks, + network, handle, }); } @@ -455,7 +452,7 @@ where /// if unable to initialize the node's `SystemContext` based on the config pub async fn add_node_with_config( node_id: u64, - networks: Networks, + network: Network, memberships: Memberships, initializer: HotShotInitializer, config: HotShotConfig, @@ -466,19 +463,13 @@ where let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); - let network_bundle = hotshot::Networks { - quorum_network: networks.0.clone(), - da_network: networks.1.clone(), - _pd: PhantomData, - }; - SystemContext::new( public_key, private_key, node_id, config, memberships, - network_bundle, + network, initializer, ConsensusMetricsValue::default(), storage, @@ -490,8 +481,8 @@ where pub struct Node> { /// The node's unique identifier pub node_id: u64, - /// The underlying networks belonging to the node - pub networks: Networks, + /// The underlying network belonging to the node + pub network: Network, /// The handle to the node's internals pub handle: SystemContextHandle, } @@ -508,8 +499,8 @@ pub type LateNodeContext = Either< /// A yet-to-be-started node that participates in tests pub struct LateStartNode> { - /// The underlying networks belonging to the node - pub networks: Networks, + /// The underlying network belonging to the node + pub network: Network, /// Either the context to which we will use to launch HotShot for initialized node when it's /// time, or the parameters that will be used to initialize the node and launch HotShot. pub context: LateNodeContext, diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index bb2f725312..24089edfab 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -39,7 +39,7 @@ async fn test_network_task() { let launcher = builder.gen_launcher::(node_id); - let networks = (launcher.resource_generator.channel_generator)(node_id).await; + let network = (launcher.resource_generator.channel_generator)(node_id).await; let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); let config = launcher.resource_generator.config.clone(); @@ -51,10 +51,9 @@ async fn test_network_task() { known_nodes_with_stake, config.fixed_leader_for_gpuvid, ); - let channel = networks.0.clone(); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { - channel: channel.clone(), + channel: network.clone(), view: ViewNumber::new(0), membership: membership.clone(), filter: network::quorum_filter, @@ -71,7 +70,7 @@ async fn test_network_task() { let view = generator.next().await.unwrap(); let (out_tx, mut out_rx) = async_broadcast::broadcast(10); - add_network_message_test_task(out_tx.clone(), channel.clone()).await; + add_network_message_test_task(out_tx.clone(), network.clone()).await; tx.broadcast_direct(Arc::new(HotShotEvent::QuorumProposalSend( view.quorum_proposal, @@ -79,10 +78,11 @@ async fn test_network_task() { ))) .await .unwrap(); - let res: Arc> = async_timeout(Duration::from_millis(100), out_rx.recv_direct()) - .await - .expect("timed out waiting for response") - .expect("channel closed"); + let res: Arc> = + async_timeout(Duration::from_millis(100), out_rx.recv_direct()) + .await + .expect("timed out waiting for response") + .expect("channel closed"); assert!(matches!( res.as_ref(), HotShotEvent::QuorumProposalRecv(_, _) @@ -103,7 +103,7 @@ async fn test_network_storage_fail() { let launcher = builder.gen_launcher::(node_id); - let networks = (launcher.resource_generator.channel_generator)(node_id).await; + let network = (launcher.resource_generator.channel_generator)(node_id).await; let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); storage.write().await.should_return_err = true; @@ -116,10 +116,9 @@ async fn test_network_storage_fail() { known_nodes_with_stake, config.fixed_leader_for_gpuvid, ); - let channel = networks.0.clone(); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { - channel: channel.clone(), + channel: network.clone(), view: ViewNumber::new(0), membership: membership.clone(), filter: network::quorum_filter, @@ -135,8 +134,9 @@ async fn test_network_storage_fail() { let mut generator = TestViewGenerator::generate(membership.clone(), membership); let view = generator.next().await.unwrap(); - let (out_tx, mut out_rx): (Sender>>, _) = async_broadcast::broadcast(10); - add_network_message_test_task(out_tx.clone(), channel.clone()).await; + let (out_tx, mut out_rx): (Sender>>, _) = + async_broadcast::broadcast(10); + add_network_message_test_task(out_tx.clone(), network.clone()).await; tx.broadcast_direct(Arc::new(HotShotEvent::QuorumProposalSend( view.quorum_proposal, diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index a910fe18e6..5b07e09483 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -59,12 +59,8 @@ impl NodeType for Test { #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] pub struct TestImpl {} -pub type DaNetwork = MemoryNetwork<::SignatureKey>; -pub type QuorumNetwork = MemoryNetwork<::SignatureKey>; - impl NodeImplementation for TestImpl { - type QuorumNetwork = QuorumNetwork; - type DaNetwork = DaNetwork; + type Network = MemoryNetwork<::SignatureKey>; type Storage = TestStorage; } diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index b186552e4a..d14482d0c9 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -383,7 +383,7 @@ where is_da: bool, reliability_config: Option>, secondary_network_delay: Duration, - ) -> AsyncGenerator<(Arc, Arc)>; + ) -> AsyncGenerator>; /// Get the number of messages in-flight. /// diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index d6413a2fe1..80370ab2e8 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -43,11 +43,8 @@ use crate::{ pub trait NodeImplementation: Send + Sync + Clone + Eq + Hash + 'static + Serialize + for<'de> Deserialize<'de> { - /// Network for all nodes - type QuorumNetwork: ConnectedNetwork; - - /// Network for those in the DA committee - type DaNetwork: ConnectedNetwork; + /// The underlying network type + type Network: ConnectedNetwork; /// Storage for DA layer interactions type Storage: Storage; @@ -88,7 +85,7 @@ pub trait TestableNodeImplementation: NodeImplementation da_committee_size: usize, reliability_config: Option>, secondary_network_delay: Duration, - ) -> AsyncGenerator<(Arc, Arc)>; + ) -> AsyncGenerator>; } #[async_trait] @@ -96,8 +93,7 @@ impl> TestableNodeImplementation, TYPES::BlockPayload: TestableBlock, - I::QuorumNetwork: TestableNetworkingImplementation, - I::DaNetwork: TestableNetworkingImplementation, + I::Network: TestableNetworkingImplementation, { fn state_create_random_transaction( state: Option<&TYPES::ValidatedState>, @@ -131,8 +127,8 @@ where da_committee_size: usize, reliability_config: Option>, secondary_network_delay: Duration, - ) -> AsyncGenerator<(Arc, Arc)> { - >::generator( + ) -> AsyncGenerator> { + >::generator( expected_node_count, num_bootstrap, 0, From 22e88663c889c941303472faa6e94c64fafe5fce Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 27 Jun 2024 10:26:29 -0400 Subject: [PATCH 1100/1393] remove combined fail counter increment (#3387) --- hotshot/src/traits/networking/combined_network.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 7d44947f05..5af4b07926 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -199,7 +199,6 @@ impl CombinedNetworks { // The task hasn't been cancelled, the primary probably failed. // Increment the primary fail counter and send the message. debug!("Sending on secondary after delay, message possibly has not reached recipient on primary"); - primary_fail_counter.fetch_add(1, Ordering::Relaxed); secondary_future.await }); Ok(()) From 209e700d436cc664f2c0d0b32bbae0b3f7f83f60 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 27 Jun 2024 17:07:01 +0200 Subject: [PATCH 1101/1393] [DEPENDENCY_REFACTOR] - Integrate upgradability into dependency tasks (#3379) * Integrate upgradability * Fix short arg * fix tests * Add tests * Use better test scripts --- hotshot/src/tasks/task_state.rs | 14 +- task-impls/src/consensus/helpers.rs | 169 +++++++++++++- task-impls/src/consensus/mod.rs | 1 + task-impls/src/consensus2/handlers.rs | 19 +- task-impls/src/consensus2/mod.rs | 9 +- .../src/quorum_proposal/dependency_handle.rs | 52 ++++- task-impls/src/quorum_proposal/mod.rs | 34 ++- .../src/quorum_proposal_recv/handlers.rs | 2 +- task-impls/src/quorum_proposal_recv/mod.rs | 18 +- task-impls/src/quorum_vote/handlers.rs | 17 +- task-impls/src/quorum_vote/mod.rs | 32 +-- testing/src/helpers.rs | 11 + testing/src/predicates/mod.rs | 7 +- .../{upgrade.rs => upgrade_with_consensus.rs} | 2 + .../src/predicates/upgrade_with_proposal.rs | 50 ++++ testing/src/predicates/upgrade_with_vote.rs | 50 ++++ testing/src/script.rs | 9 + testing/tests/tests_1/quorum_proposal_task.rs | 43 ++-- ...task.rs => upgrade_task_with_consensus.rs} | 15 +- .../tests_1/upgrade_task_with_proposal.rs | 220 ++++++++++++++++++ .../tests/tests_1/upgrade_task_with_vote.rs | 190 +++++++++++++++ types/src/consensus.rs | 20 +- types/src/data.rs | 46 +++- types/src/simple_certificate.rs | 28 ++- 24 files changed, 941 insertions(+), 117 deletions(-) rename testing/src/predicates/{upgrade.rs => upgrade_with_consensus.rs} (97%) create mode 100644 testing/src/predicates/upgrade_with_proposal.rs create mode 100644 testing/src/predicates/upgrade_with_vote.rs rename testing/tests/tests_1/{upgrade_task.rs => upgrade_task_with_consensus.rs} (98%) create mode 100644 testing/tests/tests_1/upgrade_task_with_proposal.rs create mode 100644 testing/tests/tests_1/upgrade_task_with_vote.rs diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index cde54d27ae..b768b18ae7 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -252,7 +252,8 @@ impl> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, storage: Arc::clone(&handle.storage), - version: *handle.hotshot.version.read().await, + version: Arc::clone(&handle.hotshot.version), + decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), } } } @@ -283,7 +284,9 @@ impl> CreateTaskState timeout_task, round_start_delay: handle.hotshot.config.round_start_delay, id: handle.hotshot.id, - version: *handle.hotshot.version.read().await, + version: Arc::clone(&handle.hotshot.version), + formed_upgrade_certificate: None, + decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), } } } @@ -312,13 +315,12 @@ impl> CreateTaskState round_start_delay: handle.hotshot.config.round_start_delay, output_event_stream: handle.hotshot.external_event_stream.0.clone(), storage: Arc::clone(&handle.storage), - formed_upgrade_certificate: None, proposal_cert: None, - decided_upgrade_cert: None, spawned_tasks: BTreeMap::new(), instance_state: handle.hotshot.instance_state(), id: handle.hotshot.id, - version: *handle.hotshot.version.read().await, + version: Arc::clone(&handle.hotshot.version), + decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), } } } @@ -350,6 +352,8 @@ impl> CreateTaskState consensus, last_decided_view: handle.cur_view().await, id: handle.hotshot.id, + version: Arc::clone(&handle.hotshot.version), + decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), } } } diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index c1a7bbdbd5..7498b569f2 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -3,14 +3,13 @@ use std::{ sync::Arc, }; -use crate::{events::ProposalMissing, request::REQUEST_TIMEOUT}; -use anyhow::bail; -use anyhow::{ensure, Context, Result}; +use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{broadcast, Sender}; use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; #[cfg(not(feature = "dependency-tasks"))] #[cfg(async_executor_impl = "async-std")] +#[cfg(not(feature = "dependency-tasks"))] use async_std::task::JoinHandle; use committable::{Commitment, Committable}; use hotshot_types::{ @@ -53,8 +52,15 @@ use { vbs::version::Version, }; -use crate::{events::HotShotEvent, helpers::broadcast_event}; +use crate::{ + events::{HotShotEvent, ProposalMissing}, + helpers::broadcast_event, + request::REQUEST_TIMEOUT, +}; +// TODO: Replace this function with `validate_proposal_safety_and_liveness` after the following +// issue is done: +// https://github.com/EspressoSystems/HotShot/issues/3357. /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. /// @@ -62,7 +68,8 @@ use crate::{events::HotShotEvent, helpers::broadcast_event}; /// we merge the dependency tasks. #[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_lines)] -pub async fn validate_proposal_safety_and_liveness( +#[cfg(not(feature = "dependency-tasks"))] +pub async fn temp_validate_proposal_safety_and_liveness( proposal: Proposal>, parent_leaf: Leaf, consensus: Arc>>, @@ -126,7 +133,145 @@ pub async fn validate_proposal_safety_and_liveness( UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; // Validate that the upgrade certificate is re-attached, if we saw one on the parent - proposed_leaf.extends_upgrade(&parent_leaf, &decided_upgrade_certificate)?; + proposed_leaf.temp_extends_upgrade(&parent_leaf, &decided_upgrade_certificate)?; + + let justify_qc = proposal.data.justify_qc.clone(); + // Create a positive vote if either liveness or safety check + // passes. + + // Liveness check. + let read_consensus = consensus.read().await; + let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = read_consensus.visit_leaf_ancestors( + justify_qc.view_number(), + Terminator::Inclusive(read_consensus.locked_view()), + false, + |leaf, _, _| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.view_number() != read_consensus.locked_view() + }, + ); + let safety_check = outcome.is_ok(); + + ensure!(safety_check || liveness_check, { + if let Err(e) = outcome { + broadcast_event( + Event { + view_number, + event: EventType::Error { error: Arc::new(e) }, + }, + &event_sender, + ) + .await; + } + + format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) + }); + + // We accept the proposal, notify the application layer + + broadcast_event( + Event { + view_number, + event: EventType::QuorumProposal { + proposal: proposal.clone(), + sender, + }, + }, + &event_sender, + ) + .await; + // Notify other tasks + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalValidated( + proposal.data.clone(), + parent_leaf, + )), + &event_stream, + ) + .await; + + Ok(()) +} + +/// Validate the state and safety and liveness of a proposal then emit +/// a `QuorumProposalValidated` event. +/// +/// TODO - This should just take the QuorumProposalRecv task state after +/// we merge the dependency tasks. +#[allow(clippy::too_many_arguments)] +#[allow(clippy::too_many_lines)] +pub async fn validate_proposal_safety_and_liveness( + proposal: Proposal>, + parent_leaf: Leaf, + consensus: Arc>>, + decided_upgrade_certificate: Arc>>>, + quorum_membership: Arc, + view_leader_key: TYPES::SignatureKey, + event_stream: Sender>>, + sender: TYPES::SignatureKey, + event_sender: Sender>, +) -> Result<()> { + let view_number = proposal.data.view_number(); + + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + ensure!( + proposed_leaf.parent_commitment() == parent_leaf.commit(), + "Proposed leaf does not extend the parent leaf." + ); + + let state = Arc::new( + >::from_header(&proposal.data.block_header), + ); + let view = View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state, + delta: None, // May be updated to `Some` in the vote task. + }, + }; + + if let Err(e) = consensus + .write() + .await + .update_validated_state_map(view_number, view.clone()) + { + tracing::trace!("{e:?}"); + } + consensus + .write() + .await + .update_saved_leaves(proposed_leaf.clone()); + + // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. + broadcast_event( + Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), + &event_stream, + ) + .await; + + // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment + // + // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: + // + // proposal.validate_signature(&quorum_membership)?; + // + // in a future PR. + ensure!( + view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), + "Could not verify proposal." + ); + + UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; + + // Validate that the upgrade certificate is re-attached, if we saw one on the parent + proposed_leaf + .extends_upgrade(&parent_leaf, &decided_upgrade_certificate) + .await?; let justify_qc = proposal.data.justify_qc.clone(); // Create a positive vote if either liveness or safety check @@ -453,7 +598,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( if !proposal_upgrade_certificate .clone() - .is_some_and(|cert| cert.is_relevant(view, decided_upgrade_cert).is_ok()) + .is_some_and(|cert| cert.temp_is_relevant(view, decided_upgrade_cert).is_ok()) { proposal_upgrade_certificate = None; } @@ -804,7 +949,7 @@ pub(crate) async fn handle_quorum_proposal_recv { pub included_txns: Option::Transaction>>>, /// The most recent upgrade certificate from one of the leaves. - pub decided_upgrade_cert: Option>, + pub decided_upgrade_certificate: Option>, } /// We need Default to be implemented because the leaf ascension has very few failure branches, @@ -858,7 +1003,7 @@ impl Default for LeafChainTraversalOutcome { leaf_views: Vec::new(), leaves_decided: Vec::new(), included_txns: None, - decided_upgrade_cert: None, + decided_upgrade_certificate: None, } } } @@ -956,7 +1101,7 @@ pub async fn decide_from_proposal( warn!("Failed to decide an upgrade certificate in time. Ignoring."); } else { info!("Reached decide on upgrade certificate: {:?}", cert); - res.decided_upgrade_cert = Some(cert.clone()); + res.decided_upgrade_certificate = Some(cert.clone()); } } } @@ -1029,7 +1174,7 @@ pub async fn handle_quorum_proposal_validated> ConsensusTaskState }; } }, + #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::UpgradeCertificateFormed(cert) => { debug!( "Upgrade certificate received for view {}!", diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index d37bbb6e8c..c11c96c3db 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -14,7 +14,7 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -use tracing::debug; +use tracing::{debug, warn}; use super::Consensus2TaskState; use crate::{ @@ -133,6 +133,23 @@ pub(crate) async fn handle_view_change> { /// The node's id pub id: u64, + + /// Globally shared reference to the current network version. + pub version: Arc>, + + /// An upgrade certificate that has been decided on, if any. + pub decided_upgrade_certificate: Arc>>>, } impl> Consensus2TaskState { /// Handles a consensus event received on the event stream diff --git a/task-impls/src/quorum_proposal/dependency_handle.rs b/task-impls/src/quorum_proposal/dependency_handle.rs index 3bbd002083..fcf5975141 100644 --- a/task-impls/src/quorum_proposal/dependency_handle.rs +++ b/task-impls/src/quorum_proposal/dependency_handle.rs @@ -16,6 +16,7 @@ use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus}, data::{Leaf, QuorumProposal, VidDisperse, ViewChangeEvidence}, message::Proposal, + simple_certificate::UpgradeCertificate, traits::{ block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -83,8 +84,19 @@ pub struct ProposalDependencyHandle { /// Shared consensus task state pub consensus: Arc>>, - /// The current version of consensus - pub version: Version, + /// Globally shared reference to the current network version. + pub version: Arc>, + + /// The most recent upgrade certificate this node formed. + /// Note: this is ONLY for certificates that have been formed internally, + /// so that we can propose with them. + /// + /// Certificates received from other nodes will get reattached regardless of this fields, + /// since they will be present in the leaf we propose off of. + pub formed_upgrade_certificate: Option>, + + /// An upgrade certificate that has been decided on, if any. + pub decided_upgrade_certificate: Arc>>>, } impl ProposalDependencyHandle { @@ -96,6 +108,8 @@ impl ProposalDependencyHandle { commitment_and_metadata: CommitmentAndMetadata, vid_share: Proposal>, view_change_evidence: Option>, + formed_upgrade_certificate: Option>, + decided_upgrade_certificate: Arc>>>, ) -> Result<()> { let (parent_leaf, state) = parent_leaf_and_state( self.view_number, @@ -105,6 +119,34 @@ impl ProposalDependencyHandle { ) .await?; + // In order of priority, we should try to attach: + // - the parent certificate if it exists, or + // - our own certificate that we formed. + // In either case, we need to ensure that the certificate is still relevant. + // + // Note: once we reach a point of potentially propose with our formed upgrade certificate, + // we will ALWAYS drop it. If we cannot immediately use it for whatever reason, we choose + // to discard it. + // + // It is possible that multiple nodes form separate upgrade certificates for the some + // upgrade if we are not careful about voting. But this shouldn't bother us: the first + // leader to propose is the one whose certificate will be used. And if that fails to reach + // a decide for whatever reason, we may lose our own certificate, but something will likely + // have gone wrong there anyway. + let mut upgrade_certificate = parent_leaf + .upgrade_certificate() + .or(formed_upgrade_certificate); + + if let Some(cert) = upgrade_certificate.clone() { + if cert + .is_relevant(self.view_number, Arc::clone(&decided_upgrade_certificate)) + .await + .is_err() + { + upgrade_certificate = None; + } + } + let proposal_certificate = view_change_evidence .as_ref() .filter(|cert| cert.is_valid_for_view(&self.view_number)) @@ -124,7 +166,7 @@ impl ProposalDependencyHandle { commitment_and_metadata.metadata, commitment_and_metadata.fee, vid_share.data.common.clone(), - self.version, + *self.version.read().await, ) .await .context("Failed to construct block header")?; @@ -134,7 +176,7 @@ impl ProposalDependencyHandle { view_number: self.view_number, justify_qc: self.consensus.read().await.high_qc().clone(), proposal_certificate, - upgrade_certificate: None, + upgrade_certificate, }; let proposed_leaf = Leaf::from_quorum_proposal(&proposal); @@ -272,6 +314,8 @@ impl HandleDepOutput for ProposalDependencyHandle { commit_and_metadata.unwrap(), vid_share.unwrap(), proposal_cert, + self.formed_upgrade_certificate.clone(), + Arc::clone(&self.decided_upgrade_certificate), ) .await { diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index f755ccf2b0..2584c16c87 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -15,6 +15,7 @@ use hotshot_task::{ use hotshot_types::{ consensus::Consensus, event::Event, + simple_certificate::UpgradeCertificate, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -83,8 +84,19 @@ pub struct QuorumProposalTaskState /// The node's id pub id: u64, - /// Current version of consensus - pub version: Version, + /// Globally shared reference to the current network version. + pub version: Arc>, + + /// The most recent upgrade certificate this node formed. + /// Note: this is ONLY for certificates that have been formed internally, + /// so that we can propose with them. + /// + /// Certificates received from other nodes will get reattached regardless of this fields, + /// since they will be present in the leaf we propose off of. + pub formed_upgrade_certificate: Option>, + + /// An upgrade certificate that has been decided on, if any. + pub decided_upgrade_certificate: Arc>>>, } impl> QuorumProposalTaskState { @@ -306,7 +318,9 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState>>, ) { match event.as_ref() { - HotShotEvent::VersionUpgrade(version) => { - self.version = *version; + HotShotEvent::UpgradeCertificateFormed(cert) => { + debug!( + "Upgrade certificate received for view {}!", + *cert.view_number + ); + + // Update our current upgrade_cert as long as we still have a chance of reaching a decide on it in time. + if cert.data.decide_by >= self.latest_proposed_view + 3 { + debug!("Updating current formed_upgrade_certificate"); + + self.formed_upgrade_certificate = Some(cert.clone()); + } } HotShotEvent::QcFormed(cert) => match cert.clone() { either::Right(timeout_cert) => { diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index b191b594c7..44dc4ca8b0 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -227,7 +227,7 @@ pub(crate) async fn handle_quorum_proposal_recv>, - /// The most recent upgrade certificate this node formed. - /// Note: this is ONLY for certificates that have been formed internally, - /// so that we can propose with them. - /// - /// Certificates received from other nodes will get reattached regardless of this fields, - /// since they will be present in the leaf we propose off of. - pub formed_upgrade_certificate: Option>, - /// last View Sync Certificate or Timeout Certificate this node formed. pub proposal_cert: Option>, - /// most recent decided upgrade certificate - pub decided_upgrade_cert: Option>, - /// Spawned tasks related to a specific view, so we can cancel them when /// they are stale pub spawned_tasks: BTreeMap>>, @@ -103,8 +92,11 @@ pub struct QuorumProposalRecvTaskState>, + + /// An upgrade certificate that has been decided on, if any. + pub decided_upgrade_certificate: Arc>>>, } impl> QuorumProposalRecvTaskState { diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index ae60330f3e..70577f4523 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -27,6 +27,7 @@ pub(crate) async fn handle_quorum_proposal_validated< sender: &Sender>>, task_state: &mut QuorumVoteTaskState, ) -> Result<()> { + let decided_upgrade_certificate_read = task_state.decided_upgrade_certificate.read().await; let LeafChainTraversalOutcome { new_locked_view_number, new_decided_view_number, @@ -34,14 +35,24 @@ pub(crate) async fn handle_quorum_proposal_validated< leaf_views, leaves_decided, included_txns, - .. + decided_upgrade_certificate, } = decide_from_proposal( proposal, Arc::clone(&task_state.consensus), - &None, + &decided_upgrade_certificate_read, &task_state.public_key, ) .await; + drop(decided_upgrade_certificate_read); + + if let Some(cert) = decided_upgrade_certificate.clone() { + let mut decided_certificate_lock = task_state.decided_upgrade_certificate.write().await; + *decided_certificate_lock = Some(cert.clone()); + drop(decided_certificate_lock); + let _ = sender + .broadcast(Arc::new(HotShotEvent::UpgradeDecided(cert.clone()))) + .await; + } let mut consensus_writer = task_state.consensus.write().await; if let Some(locked_view_number) = new_locked_view_number { @@ -55,8 +66,6 @@ pub(crate) async fn handle_quorum_proposal_validated< consensus_writer.update_locked_view(locked_view_number)?; } - // TODO - update decided upgrade cert - #[allow(clippy::cast_precision_loss)] if let Some(decided_view_number) = new_decided_view_number { // Bring in the cleanup crew. When a new decide is indeed valid, we need to clear out old memory. diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 9018db610b..bd0c1c0ace 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -14,9 +14,10 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::Consensus, - data::{Leaf, VidDisperseShare}, + data::{Leaf, VidDisperseShare, ViewNumber}, event::Event, message::Proposal, + simple_certificate::UpgradeCertificate, simple_vote::{QuorumData, QuorumVote}, traits::{ block_contents::BlockHeader, @@ -80,8 +81,8 @@ struct VoteDependencyHandle> { sender: Sender>>, /// Event receiver. receiver: Receiver>>, - /// The current version of HotShot - version: Version, + /// Globally shared reference to the current network version. + pub version: Arc>, /// The node's id id: u64, } @@ -133,7 +134,7 @@ impl + 'static> VoteDependencyHand &parent, &proposed_leaf.block_header().clone(), vid_share.data.common.clone(), - self.version, + *self.version.read().await, ) .await .context("Block header doesn't extend the proposal!")?; @@ -231,12 +232,14 @@ impl + 'static> HandleDepOutput #[allow(clippy::too_many_lines)] async fn handle_dep_result(self, res: Self::Output) { let high_qc_view_number = self.consensus.read().await.high_qc().view_number; - if !self - .consensus - .read() - .await - .validated_state_map() - .contains_key(&high_qc_view_number) + // The validated state of a non-genesis high QC should exist in the state map. + if *high_qc_view_number != *ViewNumber::genesis() + && !self + .consensus + .read() + .await + .validated_state_map() + .contains_key(&high_qc_view_number) { // Block on receiving the event from the event stream. EventDependency::new( @@ -384,8 +387,11 @@ pub struct QuorumVoteTaskState> { /// Reference to the storage. pub storage: Arc>, - /// The curent version of HotShot - pub version: Version, + /// Globally shared reference to the current network version. + pub version: Arc>, + + /// An upgrade certificate that has been decided on, if any. + pub decided_upgrade_certificate: Arc>>>, } impl> QuorumVoteTaskState { @@ -507,7 +513,7 @@ impl> QuorumVoteTaskState::Membership, + view: ViewNumber, +) -> ::Commit { + // Make some empty encoded transactions, we just care about having a commitment handy for the + // later calls. We need the VID commitment to be able to propose later. + let mut vid = vid_scheme_from_view_number::(membership, view); + let encoded_transactions = Vec::new(); + vid.commit_only(&encoded_transactions).unwrap() +} + /// TODO: #[allow(clippy::type_complexity)] pub fn build_vid_proposal( diff --git a/testing/src/predicates/mod.rs b/testing/src/predicates/mod.rs index 9042b41068..7b2acb7bd1 100644 --- a/testing/src/predicates/mod.rs +++ b/testing/src/predicates/mod.rs @@ -1,5 +1,10 @@ pub mod event; -pub mod upgrade; +#[cfg(not(feature = "dependency-tasks"))] +pub mod upgrade_with_consensus; +#[cfg(feature = "dependency-tasks")] +pub mod upgrade_with_proposal; +#[cfg(feature = "dependency-tasks")] +pub mod upgrade_with_vote; use async_trait::async_trait; diff --git a/testing/src/predicates/upgrade.rs b/testing/src/predicates/upgrade_with_consensus.rs similarity index 97% rename from testing/src/predicates/upgrade.rs rename to testing/src/predicates/upgrade_with_consensus.rs index b5f33ec56c..a733c8e56a 100644 --- a/testing/src/predicates/upgrade.rs +++ b/testing/src/predicates/upgrade_with_consensus.rs @@ -1,3 +1,5 @@ +#![cfg(not(feature = "dependency-tasks"))] + use std::sync::Arc; use async_trait::async_trait; diff --git a/testing/src/predicates/upgrade_with_proposal.rs b/testing/src/predicates/upgrade_with_proposal.rs new file mode 100644 index 0000000000..1af9a8dd42 --- /dev/null +++ b/testing/src/predicates/upgrade_with_proposal.rs @@ -0,0 +1,50 @@ +#![cfg(feature = "dependency-tasks")] + +use std::sync::Arc; + +use async_trait::async_trait; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_task_impls::quorum_proposal::QuorumProposalTaskState; +use hotshot_types::simple_certificate::UpgradeCertificate; + +use crate::predicates::{Predicate, PredicateResult}; + +type QuorumProposalTaskTestState = QuorumProposalTaskState; + +type UpgradeCertCallback = + Arc>>) -> bool + Send + Sync>; + +pub struct UpgradeCertPredicate { + check: UpgradeCertCallback, + info: String, +} + +impl std::fmt::Debug for UpgradeCertPredicate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.info) + } +} + +#[async_trait] +impl Predicate for UpgradeCertPredicate { + async fn evaluate(&self, input: &QuorumProposalTaskTestState) -> PredicateResult { + let upgrade_cert = input.decided_upgrade_certificate.read().await.clone(); + PredicateResult::from((self.check)(upgrade_cert.into())) + } + + async fn info(&self) -> String { + self.info.clone() + } +} + +pub fn no_decided_upgrade_cert() -> Box { + let info = "expected decided_upgrade_cert to be None".to_string(); + let check: UpgradeCertCallback = Arc::new(move |s| s.is_none()); + Box::new(UpgradeCertPredicate { info, check }) +} + +pub fn decided_upgrade_cert() -> Box { + let info = "expected decided_upgrade_cert to be Some(_)".to_string(); + let check: UpgradeCertCallback = Arc::new(move |s| s.is_some()); + Box::new(UpgradeCertPredicate { info, check }) +} diff --git a/testing/src/predicates/upgrade_with_vote.rs b/testing/src/predicates/upgrade_with_vote.rs new file mode 100644 index 0000000000..12bcf72c10 --- /dev/null +++ b/testing/src/predicates/upgrade_with_vote.rs @@ -0,0 +1,50 @@ +#![cfg(feature = "dependency-tasks")] + +use std::sync::Arc; + +use async_trait::async_trait; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_task_impls::quorum_vote::QuorumVoteTaskState; +use hotshot_types::simple_certificate::UpgradeCertificate; + +use crate::predicates::{Predicate, PredicateResult}; +type QuorumVoteTaskTestState = QuorumVoteTaskState; + +type UpgradeCertCallback = + Arc>>) -> bool + Send + Sync>; + +pub struct UpgradeCertPredicate { + check: UpgradeCertCallback, + info: String, +} + +impl std::fmt::Debug for UpgradeCertPredicate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.info) + } +} + +#[cfg(feature = "dependency-tasks")] +#[async_trait] +impl Predicate for UpgradeCertPredicate { + async fn evaluate(&self, input: &QuorumVoteTaskTestState) -> PredicateResult { + let upgrade_cert = input.decided_upgrade_certificate.read().await.clone(); + PredicateResult::from((self.check)(upgrade_cert.into())) + } + + async fn info(&self) -> String { + self.info.clone() + } +} + +pub fn no_decided_upgrade_cert() -> Box { + let info = "expected decided_upgrade_cert to be None".to_string(); + let check: UpgradeCertCallback = Arc::new(move |s| s.is_none()); + Box::new(UpgradeCertPredicate { info, check }) +} + +pub fn decided_upgrade_cert() -> Box { + let info = "expected decided_upgrade_cert to be Some(_)".to_string(); + let check: UpgradeCertCallback = Arc::new(move |s| s.is_some()); + Box::new(UpgradeCertPredicate { info, check }) +} diff --git a/testing/src/script.rs b/testing/src/script.rs index 84be2d6d9e..47ef255f15 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -49,6 +49,15 @@ impl Expectations { task_state_asserts: vec![], } } + pub fn from_outputs_and_task_states( + output_asserts: Vec>>>>, + task_state_asserts: Vec>>, + ) -> Self { + Self { + output_asserts, + task_state_asserts, + } + } } pub fn panic_extra_output_in_script(stage_number: usize, script_name: String, output: &S) diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 905bfefcba..54a2f7767d 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -19,7 +19,7 @@ use hotshot_testing::{ all_predicates, helpers::{ build_fake_view_with_leaf, build_system_handle, - vid_scheme_from_view_number, + build_payload_commitment }, predicates::{ event::{all_predicates, exact, quorum_proposal_send}, @@ -34,28 +34,15 @@ use hotshot_types::{ simple_vote::{TimeoutData, ViewSyncFinalizeData}, traits::{ election::Membership, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime}, }, utils::BuilderCommitment, - vid::VidSchemeType, vote::HasViewNumber, }; -use jf_vid::VidScheme; use sha2::Digest; const TIMEOUT: Duration = Duration::from_millis(35); -fn make_payload_commitment( - membership: &::Membership, - view: ViewNumber, -) -> ::Commit { - // Make some empty encoded transactions, we just care about having a commitment handy for the - // later calls. We need the VID commitment to be able to propose later. - let mut vid = vid_scheme_from_view_number::(membership, view); - let encoded_transactions = Vec::new(); - vid.commit_only(&encoded_transactions).unwrap() -} - #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -70,7 +57,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = build_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -195,7 +182,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - make_payload_commitment(&quorum_membership, ViewNumber::new(1)), + build_payload_commitment(&quorum_membership, ViewNumber::new(1)), builder_commitment.clone(), TestMetadata, ViewNumber::new(1), @@ -211,7 +198,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalRecv(proposals[0].clone(), leaders[0]), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - make_payload_commitment(&quorum_membership, ViewNumber::new(2)), + build_payload_commitment(&quorum_membership, ViewNumber::new(2)), builder_commitment.clone(), TestMetadata, ViewNumber::new(2), @@ -227,7 +214,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalRecv(proposals[1].clone(), leaders[1]), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - make_payload_commitment(&quorum_membership, ViewNumber::new(3)), + build_payload_commitment(&quorum_membership, ViewNumber::new(3)), builder_commitment.clone(), TestMetadata, ViewNumber::new(3), @@ -243,7 +230,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalRecv(proposals[2].clone(), leaders[2]), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - make_payload_commitment(&quorum_membership, ViewNumber::new(4)), + build_payload_commitment(&quorum_membership, ViewNumber::new(4)), builder_commitment.clone(), TestMetadata, ViewNumber::new(4), @@ -259,7 +246,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalRecv(proposals[3].clone(), leaders[3]), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - make_payload_commitment(&quorum_membership, ViewNumber::new(5)), + build_payload_commitment(&quorum_membership, ViewNumber::new(5)), builder_commitment, TestMetadata, ViewNumber::new(5), @@ -321,7 +308,7 @@ async fn test_quorum_proposal_task_qc_timeout() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = build_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -400,7 +387,7 @@ async fn test_quorum_proposal_task_view_sync() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = make_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = build_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -521,7 +508,7 @@ async fn test_quorum_proposal_task_liveness_check() { random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - make_payload_commitment(&quorum_membership, ViewNumber::new(1)), + build_payload_commitment(&quorum_membership, ViewNumber::new(1)), builder_commitment.clone(), TestMetadata, ViewNumber::new(1), @@ -537,7 +524,7 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalRecv(proposals[0].clone(), leaders[0]), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - make_payload_commitment(&quorum_membership, ViewNumber::new(2)), + build_payload_commitment(&quorum_membership, ViewNumber::new(2)), builder_commitment.clone(), TestMetadata, ViewNumber::new(2), @@ -553,7 +540,7 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalRecv(proposals[1].clone(), leaders[1]), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - make_payload_commitment(&quorum_membership, ViewNumber::new(3)), + build_payload_commitment(&quorum_membership, ViewNumber::new(3)), builder_commitment.clone(), TestMetadata, ViewNumber::new(3), @@ -570,7 +557,7 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalRecv(proposals[2].clone(), leaders[2]), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - make_payload_commitment(&quorum_membership, ViewNumber::new(4)), + build_payload_commitment(&quorum_membership, ViewNumber::new(4)), builder_commitment.clone(), TestMetadata, ViewNumber::new(4), @@ -586,7 +573,7 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalRecv(proposals[3].clone(), leaders[3]), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - make_payload_commitment(&quorum_membership, ViewNumber::new(5)), + build_payload_commitment(&quorum_membership, ViewNumber::new(5)), builder_commitment, TestMetadata, ViewNumber::new(5), diff --git a/testing/tests/tests_1/upgrade_task.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs similarity index 98% rename from testing/tests/tests_1/upgrade_task.rs rename to testing/tests/tests_1/upgrade_task_with_consensus.rs index 61dd5a37c1..11027dda70 100644 --- a/testing/tests/tests_1/upgrade_task.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -1,3 +1,5 @@ +#![cfg(not(feature = "dependency-tasks"))] + // TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] @@ -12,11 +14,11 @@ use hotshot_example_types::{ }; use hotshot_macros::test_scripts; use hotshot_task_impls::{ - consensus::ConsensusTaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState, + consensus::ConsensusTaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState }; use hotshot_testing::{ helpers::{build_fake_view_with_leaf, vid_share}, - predicates::{event::*, upgrade::*}, + predicates::{event::*, upgrade_with_consensus::*}, script::{Expectations, TaskScript}, view_generator::TestViewGenerator, }; @@ -28,11 +30,10 @@ use hotshot_types::{ }; use vbs::version::Version; -#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Tests that we correctly update our internal consensus state when reaching a decided upgrade certificate. -async fn test_consensus_task_upgrade() { +async fn test_upgrade_task_vote() { use hotshot_testing::helpers::build_system_handle; async_compatibility_layer::logging::setup_logging(); @@ -181,14 +182,13 @@ async fn test_consensus_task_upgrade() { test_scripts![inputs, consensus_script].await; } -#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Test that we correctly form and include an `UpgradeCertificate` when receiving votes. -async fn test_upgrade_and_consensus_task() { +async fn test_upgrade_task_propose() { use std::sync::Arc; use hotshot_testing::helpers::build_system_handle; @@ -336,7 +336,6 @@ async fn test_upgrade_and_consensus_task() { test_scripts![inputs, consensus_script, upgrade_script].await; } -#[cfg(not(feature = "dependency-tasks"))] #[cfg_attr( async_executor_impl = "tokio", tokio::test(flavor = "multi_thread", worker_threads = 2) @@ -348,7 +347,7 @@ async fn test_upgrade_and_consensus_task() { /// - we correctly vote affirmatively on a QuorumProposal with a null block payload in view 5 /// - we correctly propose with a null block payload in view 6, even if we have indications to do otherwise (via SendPayloadCommitmentAndMetadata, VID etc). /// - we correctly reject a QuorumProposal with a non-null block payload in view 7. -async fn test_upgrade_and_consensus_task_blank_blocks() { +async fn test_upgrade_task_blank_blocks() { use hotshot_testing::helpers::build_system_handle; async_compatibility_layer::logging::setup_logging(); diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs new file mode 100644 index 0000000000..ffdc025753 --- /dev/null +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -0,0 +1,220 @@ +#![cfg(feature = "dependency-tasks")] + +// TODO: Remove after integration of dependency-tasks +#![allow(unused_imports)] + +use std::time::Duration; + +use futures::StreamExt; +use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; +use hotshot_example_types::{ + block_types::{TestMetadata, TestTransaction}, + node_types::{MemoryImpl, TestTypes}, + state_types::TestInstanceState, +}; +use sha2::Digest; +use hotshot_macros::{test_scripts, run_test}; +use hotshot_task_impls::{ + quorum_proposal::QuorumProposalTaskState, + consensus::ConsensusTaskState, consensus2::Consensus2TaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState +}; +use hotshot_testing::{ + helpers::{build_fake_view_with_leaf, vid_share,build_payload_commitment}, + predicates::{event::*, upgrade_with_proposal::*}, + script::{Expectations, InputOrder,TaskScript}, + view_generator::TestViewGenerator, + all_predicates, random, serial +}; +use hotshot_types::{ + data::{null_block,Leaf, ViewNumber}, + simple_vote::UpgradeProposalData, + traits::{election::Membership,ValidatedState, node_implementation::ConsensusTime}, + vote::HasViewNumber, + utils::BuilderCommitment +}; +use hotshot_example_types::state_types::TestValidatedState; +use vbs::version::Version; + +const TIMEOUT: Duration = Duration::from_millis(35); + +#[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) +)] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +/// Test that we correctly form and include an `UpgradeCertificate` when receiving votes. +async fn test_upgrade_task_with_proposal() { + use std::sync::Arc; + + use hotshot_testing::helpers::build_system_handle; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(3).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let other_handles = futures::future::join_all((0..=9).map(build_system_handle)).await; + + let old_version = Version { major: 0, minor: 1 }; + let new_version = Version { major: 0, minor: 2 }; + + let upgrade_data: UpgradeProposalData = UpgradeProposalData { + old_version, + new_version, + decide_by: ViewNumber::new(4), + new_version_hash: [0u8; 12].to_vec(), + old_version_last_view: ViewNumber::new(5), + new_version_first_view: ViewNumber::new(7), + }; + + let mut proposals = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vid_dispersals = Vec::new(); + let mut leaders = Vec::new(); + let mut leaves = Vec::new(); + let mut views = Vec::new(); + let consensus = handle.hotshot.consensus(); + let mut consensus_writer = consensus.write().await; + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + + for view in (&mut generator).take(1).collect::>().await { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vid_dispersals.push(view.vid_disperse.clone()); + leaders.push(view.leader_public_key); + views.push(view.clone()); + consensus_writer + .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + consensus_writer + .update_validated_state_map( + view.quorum_proposal.data.view_number(), + build_fake_view_with_leaf(view.leaf.clone()), + ).unwrap(); + } + + generator.add_upgrade(upgrade_data.clone()); + + for view in generator.take(4).collect::>().await { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vid_dispersals.push(view.vid_disperse.clone()); + leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); + views.push(view.clone()); + consensus_writer + .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + consensus_writer + .update_validated_state_map( + view.quorum_proposal.data.view_number(), + build_fake_view_with_leaf(view.leaf.clone()), + ).unwrap(); + } + drop(consensus_writer); + + let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); + let genesis_cert = proposals[0].data.justify_qc.clone(); + let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; + let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); + let upgrade_votes = other_handles + .iter() + .map(|h| views[2].create_upgrade_vote(upgrade_data.clone(), &h.0)); + + let proposal_state = QuorumProposalTaskState::::create_from(&handle).await; + let upgrade_state = UpgradeTaskState::::create_from(&handle).await; + + let upgrade_vote_recvs: Vec<_> = upgrade_votes.map(UpgradeVoteRecv).collect(); + + let inputs = vec![ + random![ + QcFormed(either::Left(genesis_cert.clone())), + SendPayloadCommitmentAndMetadata( + build_payload_commitment(&quorum_membership, ViewNumber::new(1)), + builder_commitment.clone(), + TestMetadata, + ViewNumber::new(1), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), + VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), + ValidatedStateUpdated( + genesis_cert.view_number(), + build_fake_view_with_leaf(genesis_leaf.clone()), + ), + ], + random![ + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + QcFormed(either::Left(proposals[1].data.justify_qc.clone())), + SendPayloadCommitmentAndMetadata( + build_payload_commitment(&quorum_membership, ViewNumber::new(2)), + builder_commitment.clone(), + TestMetadata, + ViewNumber::new(2), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), + VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), + ValidatedStateUpdated( + proposals[0].data.view_number(), + build_fake_view_with_leaf(leaves[0].clone()), + ), + ], + InputOrder::Random(upgrade_vote_recvs), + random![ + QuorumProposalRecv(proposals[1].clone(), leaders[1]), + QcFormed(either::Left(proposals[2].data.justify_qc.clone())), + SendPayloadCommitmentAndMetadata( + build_payload_commitment(&quorum_membership, ViewNumber::new(3)), + builder_commitment.clone(), + TestMetadata, + ViewNumber::new(3), + null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + ), + VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), + ValidatedStateUpdated( + proposals[1].data.view_number(), + build_fake_view_with_leaf(leaves[1].clone()), + ), + ], + ]; + + let mut proposal_script = TaskScript { + timeout: TIMEOUT, + state: proposal_state, + expectations: vec![ + Expectations::from_outputs(all_predicates![ + exact(UpdateHighQc(genesis_cert.clone())), + exact(HighQcUpdated(genesis_cert.clone())), + ]), + Expectations::from_outputs(all_predicates![ + exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), + ]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(all_predicates![ + exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), + quorum_proposal_send_with_upgrade_certificate::() + ]), + ], + }; + + let mut upgrade_script = TaskScript { + timeout: TIMEOUT, + state: upgrade_state, + expectations: vec![ + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![]), + Expectations { + output_asserts: vec![upgrade_certificate_formed::()], + task_state_asserts: vec![], + }, + Expectations::from_outputs(vec![]), + ], + }; + + run_test![inputs, proposal_script, upgrade_script].await; +} diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs new file mode 100644 index 0000000000..f22a3f0b52 --- /dev/null +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -0,0 +1,190 @@ +#![cfg(feature = "dependency-tasks")] + +// TODO: Remove after integration of dependency-tasks +#![allow(unused_imports)] + +use std::time::Duration; + +use futures::StreamExt; +use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; +use hotshot_example_types::{ + block_types::{TestMetadata, TestTransaction}, + node_types::{MemoryImpl, TestTypes}, + state_types::TestInstanceState, +}; +use hotshot_macros::{run_test, test_scripts}; +use hotshot_task_impls::{ + consensus::ConsensusTaskState, consensus2::Consensus2TaskState, events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState, upgrade::UpgradeTaskState +}; +use hotshot_testing::{ + helpers::{build_fake_view_with_leaf, vid_share}, + predicates::{event::*, upgrade_with_vote::*}, + script::{Expectations, TaskScript, InputOrder}, + view_generator::TestViewGenerator, + random, all_predicates +}; +use hotshot_types::{ + data::{null_block, ViewNumber}, + simple_vote::UpgradeProposalData, + traits::{election::Membership, node_implementation::ConsensusTime}, + vote::HasViewNumber, +}; +use vbs::version::Version; + +const TIMEOUT: Duration = Duration::from_millis(65); + +#[cfg(feature = "dependency-tasks")] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +/// Tests that we correctly update our internal quorum vote state when reaching a decided upgrade +/// certificate. +async fn test_upgrade_task_with_vote() { + use hotshot_testing::helpers::build_system_handle; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle(2).await.0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let old_version = Version { major: 0, minor: 1 }; + let new_version = Version { major: 0, minor: 2 }; + + let upgrade_data: UpgradeProposalData = UpgradeProposalData { + old_version, + new_version, + decide_by: ViewNumber::new(6), + new_version_hash: [0u8; 12].to_vec(), + old_version_last_view: ViewNumber::new(6), + new_version_first_view: ViewNumber::new(7), + }; + + let mut proposals = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + let mut leaders = Vec::new(); + let mut leaves = Vec::new(); + let consensus = handle.hotshot.consensus().clone(); + let mut consensus_writer = consensus.write().await; + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + for view in (&mut generator).take(2).collect::>().await { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); + consensus_writer.update_validated_state_map( + view.quorum_proposal.data.view_number(), + build_fake_view_with_leaf(view.leaf.clone()), + ).unwrap(); + consensus_writer.update_saved_leaves(view.leaf.clone()); + } + drop(consensus_writer); + + generator.add_upgrade(upgrade_data); + + for view in generator.take(4).collect::>().await { + proposals.push(view.quorum_proposal.clone()); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); + } + + let inputs = vec![ + random![ + QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + DaCertificateRecv(dacs[1].clone()), + VidShareRecv(vids[1].0[0].clone()), + ], + random![ + QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), + DaCertificateRecv(dacs[2].clone()), + VidShareRecv(vids[2].0[0].clone()), + ], + random![ + QuorumProposalValidated(proposals[3].data.clone(), leaves[2].clone()), + DaCertificateRecv(dacs[3].clone()), + VidShareRecv(vids[3].0[0].clone()), + ], + random![ + QuorumProposalValidated(proposals[4].data.clone(), leaves[3].clone()), + DaCertificateRecv(dacs[4].clone()), + VidShareRecv(vids[4].0[0].clone()), + ], + random![ + QuorumProposalValidated(proposals[5].data.clone(), leaves[5].clone()), + ], + ]; + + let expectations = vec![ + Expectations::from_outputs(all_predicates![ + exact(DaCertificateValidated(dacs[1].clone())), + exact(VidShareValidated(vids[1].0[0].clone())), + exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), + validated_state_updated(), + quorum_vote_send(), + ]), + Expectations::from_outputs_and_task_states( + all_predicates![ + exact(LockedViewUpdated(ViewNumber::new(1))), + exact(DaCertificateValidated(dacs[2].clone())), + exact(VidShareValidated(vids[2].0[0].clone())), + exact(QuorumVoteDependenciesValidated(ViewNumber::new(3))), + validated_state_updated(), + quorum_vote_send(), + ], + vec![no_decided_upgrade_cert()], + ), + Expectations::from_outputs_and_task_states( + all_predicates![ + exact(LockedViewUpdated(ViewNumber::new(2))), + exact(LastDecidedViewUpdated(ViewNumber::new(1))), + leaf_decided(), + exact(DaCertificateValidated(dacs[3].clone())), + exact(VidShareValidated(vids[3].0[0].clone())), + exact(QuorumVoteDependenciesValidated(ViewNumber::new(4))), + validated_state_updated(), + quorum_vote_send(), + ], + vec![no_decided_upgrade_cert()], + ), + Expectations::from_outputs_and_task_states( + all_predicates![ + exact(LockedViewUpdated(ViewNumber::new(3))), + exact(LastDecidedViewUpdated(ViewNumber::new(2))), + leaf_decided(), + exact(DaCertificateValidated(dacs[4].clone())), + exact(VidShareValidated(vids[4].0[0].clone())), + exact(QuorumVoteDependenciesValidated(ViewNumber::new(5))), + validated_state_updated(), + quorum_vote_send(), + ], + vec![no_decided_upgrade_cert()], + ), + Expectations::from_outputs_and_task_states( + all_predicates![ + upgrade_decided(), + exact(LockedViewUpdated(ViewNumber::new(4))), + exact(LastDecidedViewUpdated(ViewNumber::new(3))), + leaf_decided(), + ], + vec![decided_upgrade_cert()], + ), + ]; + + let vote_state = QuorumVoteTaskState::::create_from(&handle).await; + let mut vote_script = TaskScript { + timeout: TIMEOUT, + state: vote_state, + expectations, + }; + + + run_test![inputs, vote_script].await; +} diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 1add6ba71c..6cbc39719c 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -15,7 +15,7 @@ use crate::{ data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare}, error::HotShotError, message::Proposal, - simple_certificate::{DaCertificate, QuorumCertificate, UpgradeCertificate}, + simple_certificate::{DaCertificate, QuorumCertificate}, traits::{ block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, @@ -83,17 +83,6 @@ pub struct Consensus { /// A reference to the metrics trait pub metrics: Arc, - - /// The most recent upgrade certificate this node formed. - /// Note: this is ONLY for certificates that have been formed internally, - /// so that we can propose with them. - /// - /// Certificates received from other nodes will get reattached regardless of this fields, - /// since they will be present in the leaf we propose off of. - dontuse_formed_upgrade_certificate: Option>, - - /// most recent decided upgrade certificate - dontuse_decided_upgrade_cert: Option>, } /// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces @@ -189,8 +178,6 @@ impl Consensus { saved_payloads, high_qc, metrics, - dontuse_decided_upgrade_cert: None, - dontuse_formed_upgrade_certificate: None, } } @@ -385,11 +372,6 @@ impl Consensus { self.saved_da_certs.insert(view_number, cert); } - /// Update the most recent decided upgrade certificate. - pub fn update_dontuse_decided_upgrade_cert(&mut self, cert: Option>) { - self.dontuse_decided_upgrade_cert = cert; - } - /// gather information from the parent chain of leaves /// # Errors /// If the leaf or its ancestors are not found in storage diff --git a/types/src/data.rs b/types/src/data.rs index f1e470df17..c060c0e261 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -13,6 +13,7 @@ use std::{ use anyhow::{ensure, Result}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; use bincode::Options; @@ -605,6 +606,8 @@ impl Leaf { self.block_header().payload_commitment() } + // TODO: Replace this function with `extends_upgrade` after the following issue is done: + // https://github.com/EspressoSystems/HotShot/issues/3357. /// Validate that a leaf has the right upgrade certificate to be the immediate child of another leaf /// /// This may not be a complete function. Please double-check that it performs the checks you expect before subtituting validation logic with it. @@ -612,7 +615,7 @@ impl Leaf { /// # Errors /// Returns an error if the certificates are not identical, or that when we no longer see a /// cert, it's for the right reason. - pub fn extends_upgrade( + pub fn temp_extends_upgrade( &self, parent: &Self, decided_upgrade_certificate: &Option>, @@ -644,6 +647,47 @@ impl Leaf { Ok(()) } + + /// Validate that a leaf has the right upgrade certificate to be the immediate child of another leaf + /// + /// This may not be a complete function. Please double-check that it performs the checks you expect before subtituting validation logic with it. + /// + /// # Errors + /// Returns an error if the certificates are not identical, or that when we no longer see a + /// cert, it's for the right reason. + pub async fn extends_upgrade( + &self, + parent: &Self, + decided_upgrade_certificate: &Arc>>>, + ) -> Result<()> { + match (self.upgrade_certificate(), parent.upgrade_certificate()) { + // Easiest cases are: + // - no upgrade certificate on either: this is the most common case, and is always fine. + // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. + (None | Some(_), None) => {} + // If we no longer see a cert, we have to make sure that we either: + // - no longer care because we have passed new_version_first_view, or + // - no longer care because we have passed `decide_by` without deciding the certificate. + (None, Some(parent_cert)) => { + let decided_upgrade_certificate_read = decided_upgrade_certificate.read().await; + ensure!(self.view_number() > parent_cert.data.new_version_first_view + || (self.view_number() > parent_cert.data.decide_by && decided_upgrade_certificate_read.is_none()), + "The new leaf is missing an upgrade certificate that was present in its parent, and should still be live." + ); + } + // If we both have a certificate, they should be identical. + // Technically, this prevents us from initiating a new upgrade in the view immediately following an upgrade. + // I think this is a fairly lax restriction. + (Some(cert), Some(parent_cert)) => { + ensure!(cert == parent_cert, "The new leaf does not extend the parent leaf, because it has attached a different upgrade certificate."); + } + } + + // This check should be added once we sort out the genesis leaf/justify_qc issue. + // ensure!(self.parent_commitment() == parent_leaf.commit(), "The commitment of the parent leaf does not match the specified parent commitment."); + + Ok(()) + } } impl TestableLeaf for Leaf diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index b03aae3629..bf80724ab9 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -4,9 +4,11 @@ use std::{ fmt::{self, Debug, Display, Formatter}, hash::Hash, marker::PhantomData, + sync::Arc, }; use anyhow::{ensure, Result}; +use async_lock::RwLock; use committable::{Commitment, Committable}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; @@ -152,12 +154,14 @@ impl Display for QuorumCertificate { } impl UpgradeCertificate { + // TODO: Replace this function with `is_relevant` after the following issue is done: + // https://github.com/EspressoSystems/HotShot/issues/3357. /// Determines whether or not a certificate is relevant (i.e. we still have time to reach a /// decide) /// /// # Errors /// Returns an error when the certificate is no longer relevant - pub fn is_relevant( + pub fn temp_is_relevant( &self, view_number: TYPES::Time, decided_upgrade_certificate: Option, @@ -171,6 +175,28 @@ impl UpgradeCertificate { Ok(()) } + /// Determines whether or not a certificate is relevant (i.e. we still have time to reach a + /// decide) + /// + /// # Errors + /// Returns an error when the certificate is no longer relevant + pub async fn is_relevant( + &self, + view_number: TYPES::Time, + decided_upgrade_certificate: Arc>>, + ) -> Result<()> { + let decided_upgrade_certificate_read = decided_upgrade_certificate.read().await; + ensure!( + self.data.decide_by >= view_number + || decided_upgrade_certificate_read + .clone() + .is_some_and(|cert| cert == *self), + "Upgrade certificate is no longer relevant." + ); + + Ok(()) + } + /// Validate an upgrade certificate. /// # Errors /// Returns an error when the upgrade certificate is invalid. From 31ff5c8c2ee000dc564b353acf2f4c56e05657cd Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 27 Jun 2024 11:14:26 -0600 Subject: [PATCH 1102/1393] [CX-Marketplace] - Define the AuctionResults Trait (#3386) * initial change * finish adding type everywhere * more docs, better naming * rename * ugh * I hate docs --- example-types/Cargo.toml | 1 + .../src/auction_results_provider_types.rs | 52 +++++++++++++++ example-types/src/lib.rs | 3 + example-types/src/node_types.rs | 5 ++ examples/combined/types.rs | 6 +- examples/infra/mod.rs | 31 +++++++-- examples/libp2p/types.rs | 6 +- examples/push-cdn/types.rs | 6 +- hotshot/src/lib.rs | 8 +++ testing/src/helpers.rs | 2 + testing/src/spinning_task.rs | 24 +++++-- testing/src/test_builder.rs | 6 +- testing/src/test_launcher.rs | 6 +- testing/src/test_runner.rs | 64 ++++++++++++++----- testing/tests/tests_3/memory_network.rs | 2 + types/src/traits.rs | 1 + types/src/traits/auction_results_provider.rs | 33 ++++++++++ types/src/traits/node_implementation.rs | 4 ++ 18 files changed, 230 insertions(+), 30 deletions(-) create mode 100644 example-types/src/auction_results_provider_types.rs create mode 100644 types/src/traits/auction_results_provider.rs diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index d958b6d8f9..e7877def99 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -34,6 +34,7 @@ bitvec = { workspace = true } ethereum-types = { workspace = true } hotshot-task = { path = "../task" } vbs = { workspace = true } +url = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/example-types/src/auction_results_provider_types.rs b/example-types/src/auction_results_provider_types.rs new file mode 100644 index 0000000000..120edd073c --- /dev/null +++ b/example-types/src/auction_results_provider_types.rs @@ -0,0 +1,52 @@ +use anyhow::{bail, Result}; +use async_trait::async_trait; +use hotshot_types::traits::{ + auction_results_provider::{AuctionResultsProvider, HasUrl}, + node_implementation::NodeType, +}; +use url::Url; + +/// A mock result for the auction solver. This type is just a pointer to a URL. +#[derive(Debug, Clone)] +pub struct TestAuctionResult { + /// The URL of the builder to reach out to. + pub url: Url, +} + +impl HasUrl for TestAuctionResult { + fn url(&self) -> Url { + self.url.clone() + } +} + +/// The test auction results type is used to mimic the results from the Solver. +#[derive(Debug, Default)] +pub struct TestAuctionResultsProvider { + /// We intentionally allow for the results to be pre-cooked for the unit test to gurantee a + /// particular outcome is met. + pub solver_results: Vec, + + /// A canned type to ensure that an error is thrown in absence of a true fault-injectible + /// system for logical tests. This will guarantee that `fetch_auction_result` always throws an + /// error. + pub should_return_err: bool, +} + +#[async_trait] +impl AuctionResultsProvider for TestAuctionResultsProvider { + type AuctionResult = TestAuctionResult; + + /// Mock fetching the auction results, with optional error injection to simulate failure cases + /// in the solver. + async fn fetch_auction_result( + &self, + _view_number: TYPES::Time, + ) -> Result> { + if self.should_return_err { + bail!("Something went wrong") + } + + // Otherwise, return our pre-made results + Ok(self.solver_results.clone()) + } +} diff --git a/example-types/src/lib.rs b/example-types/src/lib.rs index 4049099852..d2b82cd90c 100644 --- a/example-types/src/lib.rs +++ b/example-types/src/lib.rs @@ -9,3 +9,6 @@ pub mod node_types; /// storage types for hotshot storage pub mod storage_types; + +/// auction types for solver-to-hotshot interactions +pub mod auction_results_provider_types; diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 45d7684a0c..4e196e4df8 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -11,6 +11,7 @@ use hotshot_types::{ use serde::{Deserialize, Serialize}; use crate::{ + auction_results_provider_types::TestAuctionResultsProvider, block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, @@ -70,19 +71,23 @@ pub type StaticMembership = StaticCommittee; impl NodeImplementation for PushCdnImpl { type Network = PushCdnNetwork; type Storage = TestStorage; + type AuctionResultsProvider = TestAuctionResultsProvider; } impl NodeImplementation for MemoryImpl { type Network = MemoryNetwork; type Storage = TestStorage; + type AuctionResultsProvider = TestAuctionResultsProvider; } impl NodeImplementation for CombinedImpl { type Network = CombinedNetworks; type Storage = TestStorage; + type AuctionResultsProvider = TestAuctionResultsProvider; } impl NodeImplementation for Libp2pImpl { type Network = Libp2pNetwork; type Storage = TestStorage; + type AuctionResultsProvider = TestAuctionResultsProvider; } diff --git a/examples/combined/types.rs b/examples/combined/types.rs index a7be6c32bb..f15f4d8891 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -1,7 +1,10 @@ use std::fmt::Debug; use hotshot::traits::implementations::CombinedNetworks; -use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; +use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResultsProvider, state_types::TestTypes, + storage_types::TestStorage, +}; use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; @@ -17,6 +20,7 @@ pub type Network = CombinedNetworks; impl NodeImplementation for NodeImpl { type Network = Network; type Storage = TestStorage; + type AuctionResultsProvider = TestAuctionResultsProvider; } /// convenience type alias pub type ThisRun = CombinedDaRun; diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 77af85f8d8..41cb3f7306 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -30,6 +30,7 @@ use hotshot::{ Memberships, SystemContext, }; use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResultsProvider, block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, node_types::{Libp2pImpl, PushCdnImpl}, state_types::TestInstanceState, @@ -336,7 +337,12 @@ where pub trait RunDa< TYPES: NodeType, NETWORK: ConnectedNetwork, - NODE: NodeImplementation>, + NODE: NodeImplementation< + TYPES, + Network = NETWORK, + Storage = TestStorage, + AuctionResultsProvider = TestAuctionResultsProvider, + >, > where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -400,6 +406,7 @@ pub trait RunDa< initializer, ConsensusMetricsValue::default(), TestStorage::::default(), + TestAuctionResultsProvider::default(), ) .await .expect("Could not init hotshot") @@ -593,7 +600,12 @@ impl< BlockHeader = TestBlockHeader, InstanceState = TestInstanceState, >, - NODE: NodeImplementation, Storage = TestStorage>, + NODE: NodeImplementation< + TYPES, + Network = PushCdnNetwork, + Storage = TestStorage, + AuctionResultsProvider = TestAuctionResultsProvider, + >, > RunDa, NODE> for PushCdnDaRun where ::ValidatedState: TestableState, @@ -669,6 +681,7 @@ impl< TYPES, Network = Libp2pNetwork, Storage = TestStorage, + AuctionResultsProvider = TestAuctionResultsProvider, >, > RunDa, NODE> for Libp2pDaRun where @@ -750,7 +763,12 @@ impl< BlockHeader = TestBlockHeader, InstanceState = TestInstanceState, >, - NODE: NodeImplementation, Storage = TestStorage>, + NODE: NodeImplementation< + TYPES, + Network = CombinedNetworks, + Storage = TestStorage, + AuctionResultsProvider = TestAuctionResultsProvider, + >, > RunDa, NODE> for CombinedDaRun where ::ValidatedState: TestableState, @@ -815,7 +833,12 @@ pub async fn main_entry_point< InstanceState = TestInstanceState, >, NETWORK: ConnectedNetwork, - NODE: NodeImplementation>, + NODE: NodeImplementation< + TYPES, + Network = NETWORK, + Storage = TestStorage, + AuctionResultsProvider = TestAuctionResultsProvider, + >, RUNDA: RunDa, >( args: ValidatorArgs, diff --git a/examples/libp2p/types.rs b/examples/libp2p/types.rs index f47a7e5bce..af73237d0b 100644 --- a/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -1,7 +1,10 @@ use std::fmt::Debug; use hotshot::traits::implementations::Libp2pNetwork; -use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; +use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResultsProvider, state_types::TestTypes, + storage_types::TestStorage, +}; use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; use serde::{Deserialize, Serialize}; @@ -17,6 +20,7 @@ pub type Network = Libp2pNetwork<::SignatureKey>; impl NodeImplementation for NodeImpl { type Network = Network; type Storage = TestStorage; + type AuctionResultsProvider = TestAuctionResultsProvider; } /// convenience type alias pub type ThisRun = Libp2pDaRun; diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs index 2d0047f352..c9721af3cb 100644 --- a/examples/push-cdn/types.rs +++ b/examples/push-cdn/types.rs @@ -1,5 +1,8 @@ use hotshot::traits::{implementations::PushCdnNetwork, NodeImplementation}; -use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; +use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResultsProvider, state_types::TestTypes, + storage_types::TestStorage, +}; use serde::{Deserialize, Serialize}; use crate::infra::PushCdnDaRun; @@ -14,6 +17,7 @@ pub type Network = PushCdnNetwork; impl NodeImplementation for NodeImpl { type Network = Network; type Storage = TestStorage; + type AuctionResultsProvider = TestAuctionResultsProvider; } /// Convenience type alias diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index c8e0a30e3e..5d6ffa0fc3 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -139,6 +139,9 @@ pub struct SystemContext> { /// a potential upgrade certificate that has been decided on by the consensus tasks. pub decided_upgrade_certificate: Arc>>>, + + /// Reference to the AuctionResultsProvider type for acquiring solver results. + pub auction_results_provider: Arc, } impl> Clone for SystemContext { #![allow(deprecated)] @@ -161,6 +164,7 @@ impl> Clone for SystemContext> SystemContext { initializer: HotShotInitializer, metrics: ConsensusMetricsValue, storage: I::Storage, + auction_results_provider: I::AuctionResultsProvider, ) -> Arc { debug!("Creating a new hotshot"); @@ -277,6 +282,7 @@ impl> SystemContext { anchored_leaf: anchored_leaf.clone(), storage: Arc::new(RwLock::new(storage)), decided_upgrade_certificate, + auction_results_provider: Arc::new(auction_results_provider), }); inner @@ -510,6 +516,7 @@ impl> SystemContext { initializer: HotShotInitializer, metrics: ConsensusMetricsValue, storage: I::Storage, + auction_results_provider: I::AuctionResultsProvider, ) -> Result< ( SystemContextHandle, @@ -528,6 +535,7 @@ impl> SystemContext { initializer, metrics, storage, + auction_results_provider, ); let handle = Arc::clone(&hotshot).run_tasks().await; let (tx, rx) = hotshot.internal_event_stream.clone(); diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index d2c30aa575..c7c54bdb55 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -52,6 +52,7 @@ pub async fn build_system_handle( let network = (launcher.resource_generator.channel_generator)(node_id).await; let storage = (launcher.resource_generator.storage)(node_id); + let auction_results_provider = (launcher.resource_generator.auction_results_provider)(node_id); let config = launcher.resource_generator.config.clone(); let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}) @@ -97,6 +98,7 @@ pub async fn build_system_handle( initializer, ConsensusMetricsValue::default(), storage, + auction_results_provider, ) .await .expect("Could not init hotshot") diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 8acf65264b..73c27193c5 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -6,9 +6,9 @@ use std::{ use anyhow::Result; use async_lock::RwLock; use async_trait::async_trait; -use futures::future::Either::{Left, Right}; use hotshot::{traits::TestableNodeImplementation, types::EventType, HotShotInitializer}; use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResultsProvider, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; @@ -26,7 +26,7 @@ use hotshot_types::{ use snafu::Snafu; use crate::{ - test_runner::{LateStartNode, Node, TestRunner}, + test_runner::{LateNodeContext, LateNodeContextParameters, LateStartNode, Node, TestRunner}, test_task::{TestResult, TestTaskState}, }; @@ -61,7 +61,12 @@ impl< > TestTaskState for SpinningTask where I: TestableNodeImplementation, - I: NodeImplementation>, + I: NodeImplementation< + TYPES, + Network = N, + Storage = TestStorage, + AuctionResultsProvider = TestAuctionResultsProvider, + >, { type Event = Event; @@ -100,10 +105,18 @@ where tracing::error!("Node {} spinning up late", idx); let node_id = idx.try_into().unwrap(); let context = match node.context { - Left(context) => context, + LateNodeContext::InitializedContext(context) => context, // Node not initialized. Initialize it // based on the received leaf. - Right((storage, memberships, config)) => { + LateNodeContext::UninitializedContext(late_context_params) => { + // We'll deconstruct the individual terms here. + let LateNodeContextParameters { + storage, + memberships, + config, + auction_results_provider, + } = late_context_params; + let initializer = HotShotInitializer::::from_reload( self.last_decided_leaf.clone(), TestInstanceState {}, @@ -131,6 +144,7 @@ where config, validator_config, storage, + auction_results_provider, ) .await } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 662d2fd86b..c191ee4fb0 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,7 +1,10 @@ use std::{collections::HashMap, num::NonZeroUsize, sync::Arc, time::Duration}; use hotshot::traits::{NetworkReliability, TestableNodeImplementation}; -use hotshot_example_types::{state_types::TestInstanceState, storage_types::TestStorage}; +use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResultsProvider, state_types::TestInstanceState, + storage_types::TestStorage, +}; use hotshot_types::{ traits::node_implementation::NodeType, ExecutionType, HotShotConfig, ValidatorConfig, }; @@ -365,6 +368,7 @@ impl TestDescription { ), storage: Box::new(|_| TestStorage::::default()), config, + auction_results_provider: Box::new(|_| TestAuctionResultsProvider::default()), }, metadata: self, } diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 13819c3a33..83caf521ea 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -1,7 +1,9 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; -use hotshot_example_types::storage_types::TestStorage; +use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResultsProvider, storage_types::TestStorage, +}; use hotshot_types::{ traits::{ network::{AsyncGenerator, ConnectedNetwork}, @@ -26,6 +28,8 @@ pub struct ResourceGenerators>, /// configuration used to generate each hotshot node pub config: HotShotConfig, + /// generate a new auction results connector for each node + pub auction_results_provider: Generator, } /// test launcher diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 0e39bf2b75..32aca14760 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -7,15 +7,13 @@ use std::{ use async_broadcast::broadcast; use async_lock::RwLock; -use futures::future::{ - join_all, Either, - Either::{Left, Right}, -}; +use futures::future::join_all; use hotshot::{ traits::TestableNodeImplementation, types::SystemContextHandle, HotShotInitializer, Memberships, SystemContext, }; use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResultsProvider, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; @@ -61,7 +59,12 @@ impl< > TestRunner where I: TestableNodeImplementation, - I: NodeImplementation>, + I: NodeImplementation< + TYPES, + Network = N, + Storage = TestStorage, + AuctionResultsProvider = TestAuctionResultsProvider, + >, { /// execute test /// @@ -362,6 +365,8 @@ where let network = (self.launcher.resource_generator.channel_generator)(node_id).await; let storage = (self.launcher.resource_generator.storage)(node_id); + let auction_results_provider = + (self.launcher.resource_generator.auction_results_provider)(node_id); let network_clone = network.clone(); let networks_ready_future = async move { @@ -375,7 +380,12 @@ where node_id, LateStartNode { network, - context: Right((storage, memberships, config)), + context: LateNodeContext::UninitializedContext(LateNodeContextParameters { + storage, + memberships, + config, + auction_results_provider, + }), }, ); } else { @@ -397,6 +407,7 @@ where config, validator_config, storage, + auction_results_provider, ) .await; if late_start.contains(&node_id) { @@ -404,7 +415,7 @@ where node_id, LateStartNode { network, - context: Left(hotshot), + context: LateNodeContext::InitializedContext(hotshot), }, ); } else { @@ -450,6 +461,7 @@ where /// add a specific node with a config /// # Panics /// if unable to initialize the node's `SystemContext` based on the config + #[allow(clippy::too_many_arguments)] pub async fn add_node_with_config( node_id: u64, network: Network, @@ -458,6 +470,7 @@ where config: HotShotConfig, validator_config: ValidatorConfig, storage: I::Storage, + auction_results_provider: I::AuctionResultsProvider, ) -> Arc> { // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); @@ -473,6 +486,7 @@ where initializer, ConsensusMetricsValue::default(), storage, + auction_results_provider, ) } } @@ -487,15 +501,33 @@ pub struct Node> { pub handle: SystemContextHandle, } -/// Either the node context or the parameters to construct the context for nodes that start late. -pub type LateNodeContext = Either< - Arc>, - ( - >::Storage, - Memberships, - HotShotConfig<::SignatureKey>, - ), ->; +/// This type combines all of the paramters needed to build the context for a node that started +/// late during a unit test or integration test. +pub struct LateNodeContextParameters> { + /// The storage trait for Sequencer persistence. + pub storage: I::Storage, + + /// The memberships of this particular node. + pub memberships: Memberships, + + /// The config associted with this node. + pub config: HotShotConfig, + + /// The Auction Results handle for this node. + pub auction_results_provider: I::AuctionResultsProvider, +} + +/// The late node context dictates how we're building a node that started late during the test. +#[allow(clippy::large_enum_variant)] +pub enum LateNodeContext> { + /// The system context that we're passing directly to the node, this means the node is already + /// initialized successfully. + InitializedContext(Arc>), + + /// The system context that we're passing to the node when it is not yet initialized, so we're + /// initializing it based on the received leaf and init parameters. + UninitializedContext(LateNodeContextParameters), +} /// A yet-to-be-started node that participates in tests pub struct LateStartNode> { diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 5b07e09483..53bbec0fda 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -14,6 +14,7 @@ use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, + auction_results_provider_types::TestAuctionResultsProvider, }; use hotshot_types::traits::network::BroadcastDelay; use hotshot_types::{ @@ -62,6 +63,7 @@ pub struct TestImpl {} impl NodeImplementation for TestImpl { type Network = MemoryNetwork<::SignatureKey>; type Storage = TestStorage; + type AuctionResultsProvider = TestAuctionResultsProvider; } /// fake Eq diff --git a/types/src/traits.rs b/types/src/traits.rs index 765b202402..fb52674701 100644 --- a/types/src/traits.rs +++ b/types/src/traits.rs @@ -1,4 +1,5 @@ //! Common traits for the `HotShot` protocol +pub mod auction_results_provider; pub mod block_contents; pub mod consensus_api; pub mod election; diff --git a/types/src/traits/auction_results_provider.rs b/types/src/traits/auction_results_provider.rs new file mode 100644 index 0000000000..c4022b289c --- /dev/null +++ b/types/src/traits/auction_results_provider.rs @@ -0,0 +1,33 @@ +//! This module defines the interaction layer with the Solver via the [`AuctionResultsProvider`] trait, +//! which handles connecting to, and fetching the allocation results from, the Solver. + +use super::node_implementation::NodeType; +use anyhow::Result; +use async_trait::async_trait; +use url::Url; + +/// This trait guarantees that a particular type has a url associated with it. This trait +/// essentially ensures that the results returned by the [`AuctionResultsProvider`] trait includes a URL +/// for the builder that HotShot must request from. +pub trait HasUrl { + /// Returns the builer url associated with the datatype + fn url(&self) -> Url; +} + +/// The AuctionResultsProvider trait is the sole source of Solver-originated state and interaction, +/// and returns the results of the Solver's allocation via the associated type. The associated type, +/// `AuctionResult`, also implements the [`HasUrl`] trait, which requires that the output +/// type has the requisite fields available. +#[async_trait] +pub trait AuctionResultsProvider: Send + Sync { + /// The AuctionSolverResult is a type that holds the data associated with a particular solver + /// run, for a particular view. + type AuctionResult: HasUrl; + + /// Fetches the auction result for a view. Does not cache the result, + /// subsequent calls will invoke additional wasted calls. + async fn fetch_auction_result( + &self, + view_number: TYPES::Time, + ) -> Result>; +} diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 80370ab2e8..479cccc659 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -16,6 +16,7 @@ use committable::Committable; use serde::{Deserialize, Serialize}; use super::{ + auction_results_provider::AuctionResultsProvider, block_contents::{BlockHeader, TestableBlock, Transaction}, network::{ AsyncGenerator, ConnectedNetwork, NetworkReliability, TestableNetworkingImplementation, @@ -48,6 +49,9 @@ pub trait NodeImplementation: /// Storage for DA layer interactions type Storage: Storage; + + /// The auction results type for Solver interactions + type AuctionResultsProvider: AuctionResultsProvider; } /// extra functions required on a node implementation to be usable by hotshot-testing From 81cc72811512d2709a0bb8dcca69e3fd24aaa456 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 27 Jun 2024 14:32:18 -0400 Subject: [PATCH 1103/1393] Move versioning out of constants and into `NodeType` (#3391) --- example-types/src/node_types.rs | 7 ++++++ hotshot/src/lib.rs | 6 ++--- hotshot/src/tasks/mod.rs | 15 +++++++------ hotshot/src/tasks/task_state.rs | 9 +++----- hotshot/src/types/handle.rs | 3 +-- orchestrator/src/client.rs | 21 +++++++++-------- orchestrator/src/lib.rs | 12 +++++----- task-impls/src/transactions.rs | 17 ++++---------- task-impls/src/upgrade.rs | 13 +++++------ testing/src/block_builder/mod.rs | 22 +++++++++--------- testing/src/block_builder/simple.rs | 16 ++++++------- testing/tests/tests_1/block_builder.rs | 3 +-- testing/tests/tests_3/memory_network.rs | 7 ++++-- types/src/constants.rs | 10 --------- types/src/message.rs | 30 ++++++++++++------------- types/src/traits/node_implementation.rs | 10 +++++++++ 16 files changed, 100 insertions(+), 101 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 4e196e4df8..57f708b84f 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -9,6 +9,7 @@ use hotshot_types::{ traits::node_implementation::NodeType, }; use serde::{Deserialize, Serialize}; +use vbs::version::StaticVersion; use crate::{ auction_results_provider_types::TestAuctionResultsProvider, @@ -34,6 +35,12 @@ use crate::{ /// to select our traits pub struct TestTypes; impl NodeType for TestTypes { + type Base = StaticVersion<0, 1>; + type Upgrade = StaticVersion<0, 2>; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, + ]; type Time = ViewNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 5d6ffa0fc3..9a01ebd956 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -35,7 +35,7 @@ use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event, network pub use hotshot_types::error::HotShotError; use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, View, ViewInner}, - constants::{Base, EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE}, + constants::{EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE}, data::{Leaf, QuorumProposal}, event::{EventType, LeafInfo}, message::{DataMessage, Message, MessageKind, Proposal, VersionedMessage}, @@ -258,7 +258,7 @@ impl> SystemContext { ); let consensus = Arc::new(RwLock::new(consensus)); - let version = Arc::new(RwLock::new(Base::VERSION)); + let version = Arc::new(RwLock::new(TYPES::Base::VERSION)); // This makes it so we won't block on broadcasting if there is not a receiver // Our own copy of the receiver is inactive so it doesn't count. @@ -613,7 +613,7 @@ impl> SystemContext { ) .await; add_network_event_task(&mut handle, network, vid_membership, network::vid_filter).await; - add_consensus_tasks::(&mut handle).await; + add_consensus_tasks::(&mut handle).await; handle } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 165ca013e5..43509c23c0 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -165,18 +165,19 @@ pub async fn add_network_event_task< } /// Adds consensus-related tasks to a `SystemContextHandle`. -pub async fn add_consensus_tasks< - TYPES: NodeType, - I: NodeImplementation, - VERSION: StaticVersionType + 'static, ->( +pub async fn add_consensus_tasks>( handle: &mut SystemContextHandle, ) { handle.add_task(ViewSyncTaskState::::create_from(handle).await); handle.add_task(VidTaskState::::create_from(handle).await); handle.add_task(DaTaskState::::create_from(handle).await); - handle.add_task(TransactionTaskState::::create_from(handle).await); - handle.add_task(UpgradeTaskState::::create_from(handle).await); + handle.add_task(TransactionTaskState::::create_from(handle).await); + + // only spawn the upgrade task if we are actually configured to perform an upgrade. + if TYPES::Base::VERSION < TYPES::Upgrade::VERSION { + handle.add_task(UpgradeTaskState::::create_from(handle).await); + } + { #![cfg(not(feature = "dependency-tasks"))] handle.add_task(ConsensusTaskState::::create_from(handle).await); diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index b768b18ae7..0202689c60 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -18,7 +18,6 @@ use hotshot_types::traits::{ consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }; -use vbs::version::StaticVersionType; use crate::types::SystemContextHandle; @@ -163,12 +162,10 @@ impl> CreateTaskState } #[async_trait] -impl, Ver: StaticVersionType> - CreateTaskState for TransactionTaskState +impl> CreateTaskState + for TransactionTaskState { - async fn create_from( - handle: &SystemContextHandle, - ) -> TransactionTaskState { + async fn create_from(handle: &SystemContextHandle) -> TransactionTaskState { TransactionTaskState { builder_timeout: handle.builder_timeout(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index c859bd070d..6ab61469f5 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -10,12 +10,11 @@ use async_std::task::JoinHandle; use futures::Stream; use hotshot_task::task::{ConsensusTaskRegistry, NetworkTaskRegistry, Task, TaskState}; use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; -use hotshot_types::traits::network::ConnectedNetwork; use hotshot_types::{ consensus::Consensus, data::Leaf, error::HotShotError, - traits::{election::Membership, node_implementation::NodeType}, + traits::{election::Membership, network::ConnectedNetwork, node_implementation::NodeType}, }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index dc604b47ee..8b29580624 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -3,9 +3,7 @@ use std::{net::SocketAddr, time::Duration}; use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; -use hotshot_types::{ - constants::Base, traits::signature_key::SignatureKey, PeerConfig, ValidatorConfig, -}; +use hotshot_types::{traits::signature_key::SignatureKey, PeerConfig, ValidatorConfig}; use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; @@ -223,8 +221,10 @@ impl OrchestratorClient { }); // Serialize our (possible) libp2p-specific data - let request_body = - vbs::Serializer::::serialize(&(libp2p_address, libp2p_public_key))?; + let request_body = vbs::Serializer::::serialize(&( + libp2p_address, + libp2p_public_key, + ))?; let identity = |client: Client| { // We need to clone here to move it into the closure @@ -316,7 +316,7 @@ impl OrchestratorClient { /// if unable to serialize `address` pub async fn post_builder_addresses(&self, addresses: Vec) { let send_builder_f = |client: Client| { - let request_body = vbs::Serializer::::serialize(&addresses) + let request_body = vbs::Serializer::::serialize(&addresses) .expect("Failed to serialize request"); async move { @@ -382,9 +382,12 @@ impl OrchestratorClient { let da_requested: bool = validator_config.is_da; // Serialize our (possible) libp2p-specific data - let request_body = - vbs::Serializer::::serialize(&(pubkey, libp2p_address, libp2p_public_key)) - .expect("failed to serialize request"); + let request_body = vbs::Serializer::::serialize(&( + pubkey, + libp2p_address, + libp2p_public_key, + )) + .expect("failed to serialize request"); // register our public key with the orchestrator let (node_index, is_da): (u64, bool) = loop { diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 798923687e..922898019b 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -17,7 +17,7 @@ use client::{BenchResults, BenchResultsDownloadConfig}; use config::BuilderType; use csv::Writer; use futures::{stream::FuturesUnordered, FutureExt, StreamExt}; -use hotshot_types::{constants::Base, traits::signature_key::SignatureKey, PeerConfig}; +use hotshot_types::{traits::signature_key::SignatureKey, PeerConfig}; use libp2p::{ identity::{ ed25519::{Keypair as EdKeypair, SecretKey}, @@ -582,7 +582,7 @@ where // Decode the libp2p data so we can add to our bootstrap nodes (if supplied) let Ok((libp2p_address, libp2p_public_key)) = - vbs::Serializer::::deserialize(&body_bytes) + vbs::Serializer::::deserialize(&body_bytes) else { return Err(ServerError { status: tide_disco::StatusCode::BAD_REQUEST, @@ -614,7 +614,7 @@ where // Decode the libp2p data so we can add to our bootstrap nodes (if supplied) let Ok((mut pubkey, libp2p_address, libp2p_public_key)) = - vbs::Serializer::::deserialize(&body_bytes) + vbs::Serializer::::deserialize(&body_bytes) else { return Err(ServerError { status: tide_disco::StatusCode::BAD_REQUEST, @@ -662,7 +662,9 @@ where let mut body_bytes = req.body_bytes(); body_bytes.drain(..12); - let Ok(urls) = vbs::Serializer::::deserialize::>(&body_bytes) else { + let Ok(urls) = + vbs::Serializer::::deserialize::>(&body_bytes) + else { return Err(ServerError { status: tide_disco::StatusCode::BAD_REQUEST, message: "Malformed body".to_string(), @@ -672,7 +674,7 @@ where let mut futures = urls .into_iter() .map(|url| async { - let client: surf_disco::Client = + let client: surf_disco::Client = surf_disco::client::Client::builder(url.clone()).build(); if client.connect(Some(Duration::from_secs(2))).await { Some(url) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index efa1f9b543..84fb9bcb09 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -29,7 +29,6 @@ use hotshot_types::{ vid::VidCommitment, }; use tracing::{debug, error, instrument, warn}; -use vbs::version::StaticVersionType; use crate::{ builder::BuilderClient, @@ -65,11 +64,7 @@ pub struct BuilderResponses { } /// Tracks state of a Transaction task -pub struct TransactionTaskState< - TYPES: NodeType, - I: NodeImplementation, - Ver: StaticVersionType, -> { +pub struct TransactionTaskState> { /// The state's api pub builder_timeout: Duration, @@ -89,7 +84,7 @@ pub struct TransactionTaskState< pub membership: Arc, /// Builder API client - pub builder_clients: Vec>, + pub builder_clients: Vec>, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -103,9 +98,7 @@ pub struct TransactionTaskState< pub decided_upgrade_certificate: Option>, } -impl, Ver: StaticVersionType> - TransactionTaskState -{ +impl> TransactionTaskState { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction task", level = "error")] pub async fn handle( @@ -524,9 +517,7 @@ impl, Ver: StaticVersionType> #[async_trait] /// task state implementation for Transactions Task -impl, Ver: StaticVersionType + 'static> TaskState - for TransactionTaskState -{ +impl> TaskState for TransactionTaskState { type Event = HotShotEvent; async fn handle_event( diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 75264cc0cd..cae449c795 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -7,7 +7,6 @@ use async_trait::async_trait; use committable::Committable; use hotshot_task::task::TaskState; use hotshot_types::{ - constants::{Base, Upgrade, UPGRADE_HASH}, data::UpgradeProposal, event::{Event, EventType}, message::Proposal, @@ -92,9 +91,9 @@ impl> UpgradeTaskState { } // If the proposal does not match our upgrade target, we immediately exit. - if proposal.data.upgrade_proposal.new_version_hash != UPGRADE_HASH - || proposal.data.upgrade_proposal.old_version != Base::VERSION - || proposal.data.upgrade_proposal.new_version != Upgrade::VERSION + if proposal.data.upgrade_proposal.new_version_hash != TYPES::UPGRADE_HASH + || proposal.data.upgrade_proposal.old_version != TYPES::Base::VERSION + || proposal.data.upgrade_proposal.new_version != TYPES::Upgrade::VERSION { return None; } @@ -228,9 +227,9 @@ impl> UpgradeTaskState { && self.quorum_membership.leader(view + 5) == self.public_key { let upgrade_proposal_data = UpgradeProposalData { - old_version: Base::VERSION, - new_version: Upgrade::VERSION, - new_version_hash: UPGRADE_HASH.to_vec(), + old_version: TYPES::Base::VERSION, + new_version: TYPES::Upgrade::VERSION, + new_version_hash: TYPES::UPGRADE_HASH.to_vec(), // We schedule the upgrade to begin 15 views in the future old_version_last_view: TYPES::Time::new(*view + 15), // and end 20 views in the future diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index cda850d855..e9b75f4edc 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -10,13 +10,10 @@ use hotshot_builder_api::{ builder::{Error, Options}, data_source::BuilderDataSource, }; -use hotshot_types::{ - constants::Base, - traits::{ - block_contents::{precompute_vid_commitment, EncodeBytes}, - node_implementation::NodeType, - signature_key::BuilderSignatureKey, - }, +use hotshot_types::traits::{ + block_contents::{precompute_vid_commitment, EncodeBytes}, + node_implementation::NodeType, + signature_key::BuilderSignatureKey, }; use tide_disco::{method::ReadState, App, Url}; use vbs::version::StaticVersionType; @@ -75,14 +72,15 @@ pub fn run_builder_source( { async_spawn(async move { let start_builder = |url: Url, source: Source| -> _ { - let builder_api = hotshot_builder_api::builder::define_api::( - &Options::default(), - ) - .expect("Failed to construct the builder API"); + let builder_api = + hotshot_builder_api::builder::define_api::( + &Options::default(), + ) + .expect("Failed to construct the builder API"); let mut app: App = App::with_state(source); app.register_module("block_info", builder_api) .expect("Failed to register the builder API"); - async_spawn(app.serve(url, Base::instance())) + async_spawn(app.serve(url, TYPES::Base::instance())) }; let mut handle = Some(start_builder(url.clone(), source.clone())); diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index 416002fe7d..f88f3a9044 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -24,7 +24,6 @@ use hotshot_builder_api::{ data_source::BuilderDataSource, }; use hotshot_types::{ - constants::Base, traits::{ block_contents::BlockHeader, node_implementation::NodeType, signature_key::BuilderSignatureKey, @@ -238,16 +237,17 @@ impl SimpleBuilderSource { where ::InstanceState: Default, { - let builder_api = - hotshot_builder_api::builder::define_api::, TYPES, Base>( - &Options::default(), - ) - .expect("Failed to construct the builder API"); + let builder_api = hotshot_builder_api::builder::define_api::< + SimpleBuilderSource, + TYPES, + TYPES::Base, + >(&Options::default()) + .expect("Failed to construct the builder API"); let mut app: App, Error> = App::with_state(self); - app.register_module::("block_info", builder_api) + app.register_module::("block_info", builder_api) .expect("Failed to register the builder API"); - async_spawn(app.serve(url, Base::instance())); + async_spawn(app.serve(url, TYPES::Base::instance())); } } diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 7d76a6ac04..03f13f83de 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -15,7 +15,6 @@ use hotshot_testing::block_builder::{ BuilderTask, RandomBuilderImplementation, TestBuilderImplementation, }; use hotshot_types::{ - constants::Base, traits::{ block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, BlockPayload, @@ -46,7 +45,7 @@ async fn test_random_block_builder() { let builder_started = Instant::now(); - let client: BuilderClient = BuilderClient::new(api_url); + let client: BuilderClient::Base> = BuilderClient::new(api_url); assert!(client.connect(Duration::from_millis(100)).await); let (pub_key, private_key) = diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 53bbec0fda..b3dff52722 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -16,19 +16,19 @@ use hotshot_example_types::{ storage_types::TestStorage, auction_results_provider_types::TestAuctionResultsProvider, }; -use hotshot_types::traits::network::BroadcastDelay; use hotshot_types::{ data::ViewNumber, message::{DataMessage, Message, MessageKind, VersionedMessage}, signature_key::{BLSPubKey, BuilderKey}, traits::{ - network::{ConnectedNetwork, TestableNetworkingImplementation}, + network::{BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation}, node_implementation::{ConsensusTime, NodeType}, }, }; use rand::{rngs::StdRng, RngCore, SeedableRng}; use serde::{Deserialize, Serialize}; use tracing::{instrument, trace}; +use vbs::version::StaticVersion; #[derive( Copy, @@ -46,6 +46,9 @@ use tracing::{instrument, trace}; pub struct Test; impl NodeType for Test { + type Base = StaticVersion<0, 1>; + type Upgrade = StaticVersion<0, 2>; + const UPGRADE_HASH: [u8; 32] = [1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,]; type Time = ViewNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; diff --git a/types/src/constants.rs b/types/src/constants.rs index 36d4822c60..67faf3b268 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -17,16 +17,6 @@ pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; /// the number of messages to send over the secondary network without delay before re-attempting the (presumed down) primary network pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 50; -/// Base protocol version, set to 0.1 -pub type Base = StaticVersion<0, 1>; -/// Upgraded protocol version, set to 0.2 -pub type Upgrade = StaticVersion<0, 2>; - -/// Hash for the upgrade from version 0.1 to version 0.2. -pub const UPGRADE_HASH: [u8; 32] = [ - 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, -]; - /// Default channel size for consensus event sharing pub const EVENT_CHANNEL_SIZE: usize = 100_000; diff --git a/types/src/message.rs b/types/src/message.rs index 64185a05d4..869c08a40e 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -16,7 +16,6 @@ use vbs::{ }; use crate::{ - constants::{Base, Upgrade}, data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, simple_certificate::{ DaCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, @@ -66,23 +65,24 @@ where let version = match upgrade_certificate { Some(ref cert) => { if view >= cert.data.new_version_first_view - && cert.data.new_version == Upgrade::VERSION + && cert.data.new_version == TYPES::Upgrade::VERSION { - Upgrade::VERSION + TYPES::Upgrade::VERSION } else if view >= cert.data.new_version_first_view - && cert.data.new_version != Upgrade::VERSION + && cert.data.new_version != TYPES::Upgrade::VERSION { bail!("The network has upgraded to a new version that we do not support!"); } else { - Base::VERSION + TYPES::Base::VERSION } } - None => Base::VERSION, + None => TYPES::Base::VERSION, }; let serialized_message = match version { - Base::VERSION => Serializer::::serialize(&self), - Upgrade::VERSION => Serializer::::serialize(&self), + // Associated constants cannot be used in pattern matches, so we do this trick instead. + v if v == TYPES::Base::VERSION => Serializer::::serialize(&self), + v if v == TYPES::Upgrade::VERSION => Serializer::::serialize(&self), _ => { bail!("Attempted to serialize with an incompatible version. This should be impossible."); } @@ -105,8 +105,8 @@ where .0; let deserialized_message: Self = match version { - Base::VERSION => Serializer::::deserialize(message), - Upgrade::VERSION => Serializer::::deserialize(message), + v if v == TYPES::Base::VERSION => Serializer::::deserialize(message), + v if v == TYPES::Upgrade::VERSION => Serializer::::deserialize(message), _ => { bail!("Cannot deserialize message!"); } @@ -118,18 +118,18 @@ where let expected_version = match upgrade_certificate { Some(ref cert) => { if view >= cert.data.new_version_first_view - && cert.data.new_version == Upgrade::VERSION + && cert.data.new_version == TYPES::Upgrade::VERSION { - Upgrade::VERSION + TYPES::Upgrade::VERSION } else if view >= cert.data.new_version_first_view - && cert.data.new_version != Upgrade::VERSION + && cert.data.new_version != TYPES::Upgrade::VERSION { bail!("The network has upgraded to a new version that we do not support!"); } else { - Base::VERSION + TYPES::Base::VERSION } } - None => Base::VERSION, + None => TYPES::Base::VERSION, }; ensure!( diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 479cccc659..4dba2104c1 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -14,6 +14,7 @@ use std::{ use async_trait::async_trait; use committable::Committable; use serde::{Deserialize, Serialize}; +use vbs::version::StaticVersionType; use super::{ auction_results_provider::AuctionResultsProvider, @@ -191,6 +192,15 @@ pub trait NodeType: + Sync + 'static { + /// The base version of HotShot this node is instantiated with. + type Base: StaticVersionType; + + /// The version of HotShot this node may be upgraded to. Set equal to `Base` to disable upgrades. + type Upgrade: StaticVersionType; + + /// The hash for the upgrade. + const UPGRADE_HASH: [u8; 32]; + /// The time type that this hotshot setup is using. /// /// This should be the same `Time` that `ValidatedState::Time` is using. From c07d70f3ee3860eb6a7b6d1a2071802737cef6d1 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 27 Jun 2024 12:33:32 -0700 Subject: [PATCH 1104/1393] orchestrator changes (#3392) --- orchestrator/src/lib.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 922898019b..06dc1abb53 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -498,10 +498,6 @@ where self.bench_results.total_transactions_committed = metrics .total_transactions_committed .max(cur_metrics.total_transactions_committed); - assert_eq!( - metrics.transaction_size_in_bytes, - cur_metrics.transaction_size_in_bytes - ); self.bench_results.total_time_elapsed_in_sec = metrics .total_time_elapsed_in_sec .max(cur_metrics.total_time_elapsed_in_sec); From fb964a75c7e840142b84fa425a3f9db327bdc3e2 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 27 Jun 2024 17:02:57 -0400 Subject: [PATCH 1105/1393] Add helper function to calculate version (#3393) --- types/src/message.rs | 70 ++++++++++---------- types/src/traits/auction_results_provider.rs | 3 +- 2 files changed, 36 insertions(+), 37 deletions(-) diff --git a/types/src/message.rs b/types/src/message.rs index 869c08a40e..4275c0b9c1 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -34,6 +34,34 @@ use crate::{ vote::HasViewNumber, }; +/// Calculate the version applied in a view, based on the provided upgrade certificate. +/// +/// # Errors +/// Returns an error if we do not support the version required by the upgrade certificate. +pub fn version( + view: TYPES::Time, + upgrade_certificate: &Option>, +) -> Result { + let version = match upgrade_certificate { + Some(ref cert) => { + if view >= cert.data.new_version_first_view + && cert.data.new_version == TYPES::Upgrade::VERSION + { + TYPES::Upgrade::VERSION + } else if view >= cert.data.new_version_first_view + && cert.data.new_version != TYPES::Upgrade::VERSION + { + bail!("The network has upgraded to a new version that we do not support!"); + } else { + TYPES::Base::VERSION + } + } + None => TYPES::Base::VERSION, + }; + + Ok(version) +} + /// Incoming message #[derive(Serialize, Deserialize, Clone, Derivative, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] @@ -62,22 +90,7 @@ where ) -> Result> { let view = self.view_number(); - let version = match upgrade_certificate { - Some(ref cert) => { - if view >= cert.data.new_version_first_view - && cert.data.new_version == TYPES::Upgrade::VERSION - { - TYPES::Upgrade::VERSION - } else if view >= cert.data.new_version_first_view - && cert.data.new_version != TYPES::Upgrade::VERSION - { - bail!("The network has upgraded to a new version that we do not support!"); - } else { - TYPES::Base::VERSION - } - } - None => TYPES::Base::VERSION, - }; + let version = version(view, upgrade_certificate)?; let serialized_message = match version { // Associated constants cannot be used in pattern matches, so we do this trick instead. @@ -100,11 +113,11 @@ where message: &'a [u8], upgrade_certificate: &Option>, ) -> Result { - let version = Version::deserialize(message) + let actual_version = Version::deserialize(message) .context("Failed to read message version!")? .0; - let deserialized_message: Self = match version { + let deserialized_message: Self = match actual_version { v if v == TYPES::Base::VERSION => Serializer::::deserialize(message), v if v == TYPES::Upgrade::VERSION => Serializer::::deserialize(message), _ => { @@ -115,26 +128,11 @@ where let view = deserialized_message.view_number(); - let expected_version = match upgrade_certificate { - Some(ref cert) => { - if view >= cert.data.new_version_first_view - && cert.data.new_version == TYPES::Upgrade::VERSION - { - TYPES::Upgrade::VERSION - } else if view >= cert.data.new_version_first_view - && cert.data.new_version != TYPES::Upgrade::VERSION - { - bail!("The network has upgraded to a new version that we do not support!"); - } else { - TYPES::Base::VERSION - } - } - None => TYPES::Base::VERSION, - }; + let expected_version = version(view, upgrade_certificate)?; ensure!( - version == expected_version, - "Message has invalid version number for its view. Expected: {expected_version}, Actual: {version}, View: {view:?}" + actual_version == expected_version, + "Message has invalid version number for its view. Expected: {expected_version}, Actual: {actual_version}, View: {view:?}" ); Ok(deserialized_message) diff --git a/types/src/traits/auction_results_provider.rs b/types/src/traits/auction_results_provider.rs index c4022b289c..4b65d4238d 100644 --- a/types/src/traits/auction_results_provider.rs +++ b/types/src/traits/auction_results_provider.rs @@ -1,11 +1,12 @@ //! This module defines the interaction layer with the Solver via the [`AuctionResultsProvider`] trait, //! which handles connecting to, and fetching the allocation results from, the Solver. -use super::node_implementation::NodeType; use anyhow::Result; use async_trait::async_trait; use url::Url; +use super::node_implementation::NodeType; + /// This trait guarantees that a particular type has a url associated with it. This trait /// essentially ensures that the results returned by the [`AuctionResultsProvider`] trait includes a URL /// for the builder that HotShot must request from. From d24ddd7c1ee54e170d434a7b571d0160436a1b4e Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 28 Jun 2024 10:44:53 -0400 Subject: [PATCH 1106/1393] [DEPENDENCY_TASK] Fix CDN Crash Tests (#3390) * add logs * build fix * more log * less log * log vid share recv * log all vote deps * build fix * fix? * lint * revert logging changes * lower log level * Update crates/task-impls/src/consensus/helpers.rs Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> * Update crates/task-impls/src/request.rs Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> * Update crates/task-impls/src/request.rs Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> --------- Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> --- task-impls/src/consensus/helpers.rs | 1 + task-impls/src/da.rs | 17 ++++++++++++++++- task-impls/src/quorum_vote/mod.rs | 5 ++++- task-impls/src/request.rs | 27 +++++++++++++++++++++++++-- task-impls/src/transactions.rs | 1 + 5 files changed, 47 insertions(+), 4 deletions(-) diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs index 7498b569f2..f736ddd900 100644 --- a/task-impls/src/consensus/helpers.rs +++ b/task-impls/src/consensus/helpers.rs @@ -686,6 +686,7 @@ pub(crate) async fn fetch_proposal( quorum_membership: Arc, consensus: Arc>>, ) -> Result> { + tracing::debug!("Fetching proposal for view {:?}", view); let (tx, mut rx) = broadcast(1); let event = ProposalMissing { view, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 9e06545937..accb841821 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -219,14 +219,29 @@ impl> DaTaskState { let consensus = Arc::clone(&self.consensus); let membership = Arc::clone(&self.quorum_membership); let pk = self.private_key.clone(); + let public_key = self.public_key.clone(); + let chan = event_stream.clone(); async_spawn(async move { Consensus::calculate_and_update_vid( - consensus, + Arc::clone(&consensus), view_number, membership, &pk, ) .await; + if let Some(Some(vid_share)) = consensus + .read() + .await + .vid_shares() + .get(&view_number) + .map(|shares| shares.get(&public_key).cloned()) + { + broadcast_event( + Arc::new(HotShotEvent::VidShareRecv(vid_share.clone())), + &chan, + ) + .await; + } }); } } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index bd0c1c0ace..d232db1153 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -34,7 +34,7 @@ use hotshot_types::{ use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, instrument, trace, warn}; +use tracing::{debug, error, info, instrument, trace, warn}; use vbs::version::Version; use crate::{ @@ -555,6 +555,7 @@ impl> QuorumVoteTaskState { + info!("Vote NOW for view {:?}", *view); self.create_dependency_task_if_new( *view, event_receiver, @@ -563,6 +564,8 @@ impl> QuorumVoteTaskState { + trace!("Received Proposal for view {}", *proposal.view_number()); + // Handle the event before creating the dependency task. if let Err(e) = handle_quorum_proposal_validated(proposal, &event_sender, self).await diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index b40bf413b6..5fd0254718 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -202,6 +202,7 @@ impl> NetworkRequestState { network: Arc::clone(&self.network), state: Arc::clone(&self.state), + public_key: self.public_key.clone(), sender, delay: self.delay, recipients, @@ -253,6 +254,8 @@ struct DelayedRequester> { pub network: Arc, /// Shared state to check if the data go populated state: Arc>>, + /// our public key + public_key: TYPES::SignatureKey, /// Channel to send the event when we receive a response sender: Sender>>, /// Duration to delay sending the first request @@ -391,15 +394,35 @@ impl> DelayedRequester { async fn cancel_vid(&self, req: &VidRequest) -> bool { let view = req.0; let state = self.state.read().await; - self.shutdown_flag.load(Ordering::Relaxed) + let cancel = self.shutdown_flag.load(Ordering::Relaxed) || state.vid_shares().contains_key(&view) - || state.cur_view() > view + || state.cur_view() > view; + if cancel { + if let Some(Some(vid_share)) = state + .vid_shares() + .get(&view) + .map(|shares| shares.get(&self.public_key).cloned()) + { + broadcast_event( + Arc::new(HotShotEvent::VidShareRecv(vid_share.clone())), + &self.sender, + ) + .await; + } + tracing::debug!( + "Canceling vid request for view {:?}, cur view is {:?}", + view, + state.cur_view() + ); + } + cancel } /// Transform a response into a `HotShotEvent` async fn handle_response_message(&self, message: SequencingMessage) { let event = match message { SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg(prop)) => { + tracing::info!("vid req complete, got vid {:?}", prop); HotShotEvent::VidShareRecv(prop) } _ => return, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 84fb9bcb09..5db22ca8fc 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -28,6 +28,7 @@ use hotshot_types::{ utils::ViewInner, vid::VidCommitment, }; + use tracing::{debug, error, instrument, warn}; use crate::{ From b65d19895f102b59985f4ee55be0b886ddd402d3 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 28 Jun 2024 11:46:32 -0400 Subject: [PATCH 1107/1393] ignore random block builder test (#3398) --- testing/tests/tests_1/block_builder.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 03f13f83de..fb60e1a1e2 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -28,6 +28,7 @@ use tide_disco::Url; tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[ignore] async fn test_random_block_builder() { let port = portpicker::pick_unused_port().expect("No free ports"); let api_url = Url::parse(&format!("http://localhost:{port}")).expect("Valid URL"); From e530b498b23a39a2ed890f74b628ddf6090d9552 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 28 Jun 2024 12:38:57 -0400 Subject: [PATCH 1108/1393] Add time-based configurability for upgrades (#3400) --- hotshot/src/tasks/task_state.rs | 8 ++++ orchestrator/run-config.toml | 8 +++- orchestrator/src/config.rs | 16 ++++++++ task-impls/src/upgrade.rs | 68 ++++++++++++++++++++++++--------- testing/src/test_builder.rs | 8 +++- types/src/constants.rs | 12 ++++++ types/src/lib.rs | 8 ++++ 7 files changed, 106 insertions(+), 22 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 0202689c60..6b9002180b 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -72,6 +72,10 @@ impl> CreateTaskState stop_proposing_view: handle.hotshot.config.stop_proposing_view, start_voting_view: handle.hotshot.config.start_voting_view, stop_voting_view: handle.hotshot.config.stop_voting_view, + start_proposing_time: handle.hotshot.config.start_proposing_time, + stop_proposing_time: handle.hotshot.config.stop_proposing_time, + start_voting_time: handle.hotshot.config.start_voting_time, + stop_voting_time: handle.hotshot.config.stop_voting_time, }; #[cfg(feature = "example-upgrade")] @@ -88,6 +92,10 @@ impl> CreateTaskState stop_proposing_view: 10, start_voting_view: 0, stop_voting_view: 20, + start_proposing_time: 0, + stop_proposing_time: u64::MAX, + start_voting_time: 0, + stop_voting_time: u64::MAX, }; } } diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index c8030920a3..7fe8bb7e2a 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -93,7 +93,11 @@ secs = 10 nanos = 0 [config.upgrade] -start_proposing_view = 0 +start_proposing_view = 1 stop_proposing_view = 0 -start_voting_view = 0 +start_voting_view = 1 stop_voting_view = 0 +start_proposing_time = 1 +stop_proposing_time = 0 +start_voting_time = 1 +stop_voting_time = 0 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index ab55993e63..e3924482a5 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -599,6 +599,14 @@ pub struct UpgradeConfig { pub start_voting_view: u64, /// View to stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_view <= start_voting_view. pub stop_voting_view: u64, + /// Unix time in seconds at which we start proposing an upgrade + pub start_proposing_time: u64, + /// Unix time in seconds at which we stop proposing an upgrade. To prevent proposing an upgrade, set stop_proposing_time <= start_proposing_time. + pub stop_proposing_time: u64, + /// Unix time in seconds at which we start voting on an upgrade + pub start_voting_time: u64, + /// Unix time in seconds at which we stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_time <= start_voting_time. + pub stop_voting_time: u64, } // Explicitly implementing `Default` for clarity. @@ -610,6 +618,10 @@ impl Default for UpgradeConfig { stop_proposing_view: 0, start_voting_view: u64::MAX, stop_voting_view: 0, + start_proposing_time: u64::MAX, + stop_proposing_time: 0, + start_voting_time: u64::MAX, + stop_voting_time: 0, } } } @@ -695,6 +707,10 @@ impl From> for HotShotConfig { stop_proposing_view: val.upgrade.stop_proposing_view, start_voting_view: val.upgrade.start_voting_view, stop_voting_view: val.upgrade.stop_voting_view, + start_proposing_time: val.upgrade.start_proposing_time, + stop_proposing_time: val.upgrade.stop_proposing_time, + start_voting_time: val.upgrade.start_voting_time, + stop_voting_time: val.upgrade.stop_voting_time, } } } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index cae449c795..06e7b89214 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -1,4 +1,4 @@ -use std::{marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc, time::SystemTime}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; @@ -7,6 +7,10 @@ use async_trait::async_trait; use committable::Committable; use hotshot_task::task::TaskState; use hotshot_types::{ + constants::{ + UPGRADE_BEGIN_OFFSET, UPGRADE_DECIDE_BY_OFFSET, UPGRADE_FINISH_OFFSET, + UPGRADE_PROPOSE_OFFSET, + }, data::UpgradeProposal, event::{Event, EventType}, message::Proposal, @@ -70,6 +74,18 @@ pub struct UpgradeTaskState> { /// View to stop voting on an upgrade pub stop_voting_view: u64, + + /// Unix time in seconds at which we start proposing an upgrade + pub start_proposing_time: u64, + + /// Unix time in seconds at which we stop proposing an upgrade + pub stop_proposing_time: u64, + + /// Unix time in seconds at which we start voting on an upgrade + pub start_voting_time: u64, + + /// Unix time in seconds at which we stop voting on an upgrade + pub stop_voting_time: u64, } impl> UpgradeTaskState { @@ -84,9 +100,17 @@ impl> UpgradeTaskState { HotShotEvent::UpgradeProposalRecv(proposal, sender) => { info!("Received upgrade proposal: {:?}", proposal); - if *proposal.data.view_number() < self.start_voting_view - || *proposal.data.view_number() >= self.stop_voting_view - { + let view = *proposal.data.view_number(); + let time = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .ok()? + .as_secs(); + + if time < self.start_voting_time || time >= self.stop_voting_time { + return None; + } + + if view < self.start_voting_view || view >= self.stop_voting_view { return None; } @@ -211,35 +235,43 @@ impl> UpgradeTaskState { } } } - HotShotEvent::ViewChange(view) => { - let view = *view; - if *self.cur_view >= *view { + HotShotEvent::ViewChange(new_view) => { + if self.cur_view >= *new_view { return None; } - if *view - *self.cur_view > 1 { - warn!("View changed by more than 1 going to view {:?}", view); - } - self.cur_view = view; + self.cur_view = *new_view; + + let view: u64 = *self.cur_view; + let time = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .ok()? + .as_secs(); + // We try to form a certificate 5 views before we're leader. - if *view >= self.start_proposing_view - && *view < self.stop_proposing_view - && self.quorum_membership.leader(view + 5) == self.public_key + if view >= self.start_proposing_view + && view < self.stop_proposing_view + && time >= self.start_proposing_time + && time < self.stop_proposing_time + && self + .quorum_membership + .leader(TYPES::Time::new(view + UPGRADE_PROPOSE_OFFSET)) + == self.public_key { let upgrade_proposal_data = UpgradeProposalData { old_version: TYPES::Base::VERSION, new_version: TYPES::Upgrade::VERSION, new_version_hash: TYPES::UPGRADE_HASH.to_vec(), // We schedule the upgrade to begin 15 views in the future - old_version_last_view: TYPES::Time::new(*view + 15), + old_version_last_view: TYPES::Time::new(view + UPGRADE_BEGIN_OFFSET), // and end 20 views in the future - new_version_first_view: TYPES::Time::new(*view + 20), - decide_by: TYPES::Time::new(*view + 10), + new_version_first_view: TYPES::Time::new(view + UPGRADE_FINISH_OFFSET), + decide_by: TYPES::Time::new(view + UPGRADE_DECIDE_BY_OFFSET), }; let upgrade_proposal = UpgradeProposal { upgrade_proposal: upgrade_proposal_data.clone(), - view_number: view + 5, + view_number: TYPES::Time::new(view + UPGRADE_PROPOSE_OFFSET), }; let signature = TYPES::SignatureKey::sign( diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index c191ee4fb0..40257c077f 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -330,10 +330,14 @@ impl TestDescription { data_request_delay: Duration::from_millis(200), // Placeholder until we spin up the builder builder_urls: vec1::vec1![Url::parse("http://localhost:9999").expect("Valid URL")], - start_proposing_view: 0, + start_proposing_view: u64::MAX, stop_proposing_view: 0, - start_voting_view: 0, + start_voting_view: u64::MAX, stop_voting_view: 0, + start_proposing_time: u64::MAX, + stop_proposing_time: 0, + start_voting_time: u64::MAX, + stop_voting_time: 0, }; let TimingData { next_view_timeout, diff --git a/types/src/constants.rs b/types/src/constants.rs index 67faf3b268..9b66b81d19 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -36,6 +36,18 @@ pub type WebServerVersion = StaticVersion { pub start_voting_view: u64, /// View to stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_view <= start_voting_view. pub stop_voting_view: u64, + /// Unix time in seconds at which we start proposing an upgrade + pub start_proposing_time: u64, + /// Unix time in seconds at which we stop proposing an upgrade. To prevent proposing an upgrade, set stop_proposing_time <= start_proposing_time. + pub stop_proposing_time: u64, + /// Unix time in seconds at which we start voting on an upgrade + pub start_voting_time: u64, + /// Unix time in seconds at which we stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_time <= start_voting_time. + pub stop_voting_time: u64, } From c4e63ecfc2735e7c0115c2fba284d81b9d0d3607 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 28 Jun 2024 19:25:57 +0200 Subject: [PATCH 1109/1393] [DEPENDENCY_REFACTOR] - Move all the dependency code into consensus2 (#3397) * Fix view * Move files * Fix after merge * Fix doc --- hotshot/src/tasks/task_state.rs | 18 +- task-impls/src/consensus/handlers.rs | 810 +++++++++ task-impls/src/consensus/helpers.rs | 1485 ----------------- task-impls/src/consensus/mod.rs | 49 +- task-impls/src/consensus/view_change.rs | 137 -- task-impls/src/helpers.rs | 801 ++++++++- .../{dependency_handle.rs => handlers.rs} | 5 +- task-impls/src/quorum_proposal/mod.rs | 6 +- .../src/quorum_proposal_recv/handlers.rs | 12 +- task-impls/src/quorum_proposal_recv/mod.rs | 14 +- task-impls/src/quorum_vote/handlers.rs | 5 +- task-impls/src/quorum_vote/mod.rs | 5 +- task-impls/src/transactions.rs | 1 - .../tests_1/quorum_proposal_recv_task.rs | 1 + testing/tests/tests_1/quorum_vote_task.rs | 2 + .../tests_1/upgrade_task_with_consensus.rs | 1 + .../tests_1/upgrade_task_with_proposal.rs | 2 +- .../tests/tests_1/upgrade_task_with_vote.rs | 2 +- 18 files changed, 1670 insertions(+), 1686 deletions(-) create mode 100644 task-impls/src/consensus/handlers.rs delete mode 100644 task-impls/src/consensus/helpers.rs delete mode 100644 task-impls/src/consensus/view_change.rs rename task-impls/src/quorum_proposal/{dependency_handle.rs => handlers.rs} (99%) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 6b9002180b..5ce533682b 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -5,14 +5,19 @@ use std::{ use async_trait::async_trait; use chrono::Utc; +#[cfg(not(feature = "dependency-tasks"))] +use hotshot_task_impls::consensus::ConsensusTaskState; #[cfg(feature = "rewind")] use hotshot_task_impls::rewind::RewindTaskState; use hotshot_task_impls::{ - builder::BuilderClient, consensus::ConsensusTaskState, consensus2::Consensus2TaskState, - da::DaTaskState, quorum_proposal::QuorumProposalTaskState, + builder::BuilderClient, da::DaTaskState, request::NetworkRequestState, + transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, + view_sync::ViewSyncTaskState, +}; +#[cfg(feature = "dependency-tasks")] +use hotshot_task_impls::{ + consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, - request::NetworkRequestState, transactions::TransactionTaskState, upgrade::UpgradeTaskState, - vid::VidTaskState, view_sync::ViewSyncTaskState, }; use hotshot_types::traits::{ consensus_api::ConsensusApi, @@ -198,6 +203,7 @@ impl> CreateTaskState } } +#[cfg(not(feature = "dependency-tasks"))] #[async_trait] impl> CreateTaskState for ConsensusTaskState @@ -237,6 +243,7 @@ impl> CreateTaskState } } +#[cfg(feature = "dependency-tasks")] #[async_trait] impl> CreateTaskState for QuorumVoteTaskState @@ -263,6 +270,7 @@ impl> CreateTaskState } } +#[cfg(feature = "dependency-tasks")] #[async_trait] impl> CreateTaskState for QuorumProposalTaskState @@ -296,6 +304,7 @@ impl> CreateTaskState } } +#[cfg(feature = "dependency-tasks")] #[async_trait] impl> CreateTaskState for QuorumProposalRecvTaskState @@ -330,6 +339,7 @@ impl> CreateTaskState } } +#[cfg(feature = "dependency-tasks")] #[async_trait] impl> CreateTaskState for Consensus2TaskState diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs new file mode 100644 index 0000000000..43956c96c7 --- /dev/null +++ b/task-impls/src/consensus/handlers.rs @@ -0,0 +1,810 @@ +#![cfg(not(feature = "dependency-tasks"))] + +use core::time::Duration; +use std::{marker::PhantomData, sync::Arc}; + +use anyhow::{bail, ensure, Context, Result}; +use async_broadcast::Sender; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +use chrono::Utc; +use committable::Committable; +use futures::FutureExt; +use hotshot_types::{ + consensus::{CommitmentAndMetadata, Consensus, View}, + data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, + event::{Event, EventType}, + message::{GeneralConsensusMessage, Proposal}, + simple_certificate::UpgradeCertificate, + simple_vote::QuorumData, + traits::{ + block_contents::BlockHeader, + election::Membership, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + signature_key::SignatureKey, + states::ValidatedState, + storage::Storage, + }, + utils::ViewInner, + vote::{Certificate, HasViewNumber}, +}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; +use tracing::{debug, error, info, warn}; +use vbs::version::Version; + +use super::ConsensusTaskState; +use crate::{ + events::HotShotEvent, + helpers::{ + broadcast_event, decide_from_proposal, fetch_proposal, parent_leaf_and_state, + temp_validate_proposal_safety_and_liveness, update_view, validate_proposal_view_and_certs, + AnyhowTracing, SEND_VIEW_CHANGE_EVENT, + }, +}; + +/// Create the header for a proposal, build the proposal, and broadcast +/// the proposal send evnet. +#[allow(clippy::too_many_arguments)] +pub async fn create_and_send_proposal( + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + consensus: Arc>>, + event_stream: Sender>>, + view: TYPES::Time, + commitment_and_metadata: CommitmentAndMetadata, + parent_leaf: Leaf, + state: Arc, + upgrade_cert: Option>, + proposal_cert: Option>, + round_start_delay: u64, + instance_state: Arc, + version: Version, +) { + let consensus_read = consensus.read().await; + let Some(Some(vid_share)) = consensus_read + .vid_shares() + .get(&view) + .map(|shares| shares.get(&public_key).cloned()) + else { + error!("Cannot propopse without our VID share, view {:?}", view); + return; + }; + drop(consensus_read); + let block_header = match TYPES::BlockHeader::new( + state.as_ref(), + instance_state.as_ref(), + &parent_leaf, + commitment_and_metadata.commitment, + commitment_and_metadata.builder_commitment, + commitment_and_metadata.metadata, + commitment_and_metadata.fee, + vid_share.data.common, + version, + ) + .await + { + Ok(header) => header, + Err(err) => { + error!(%err, "Failed to construct block header"); + return; + } + }; + + let proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: consensus.read().await.high_qc().clone(), + proposal_certificate: proposal_cert, + upgrade_certificate: upgrade_cert, + }; + + let proposed_leaf = Leaf::from_quorum_proposal(&proposal); + if proposed_leaf.parent_commitment() != parent_leaf.commit() { + return; + } + + let Ok(signature) = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref()) + else { + // This should never happen. + error!("Failed to sign proposed_leaf.commit()!"); + return; + }; + + let message = Proposal { + data: proposal, + signature, + _pd: PhantomData, + }; + debug!( + "Sending null proposal for view {:?}", + proposed_leaf.view_number(), + ); + if let Err(e) = consensus + .write() + .await + .update_last_proposed_view(message.clone()) + { + tracing::trace!("{e:?}"); + return; + } + async_sleep(Duration::from_millis(round_start_delay)).await; + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalSend( + message.clone(), + public_key, + )), + &event_stream, + ) + .await; +} + +/// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is the +/// standard case proposal scenario. +#[allow(clippy::too_many_arguments)] +pub async fn publish_proposal_from_commitment_and_metadata( + view: TYPES::Time, + sender: Sender>>, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + consensus: Arc>>, + delay: u64, + formed_upgrade_certificate: Option>, + decided_upgrade_cert: Option>, + commitment_and_metadata: Option>, + proposal_cert: Option>, + instance_state: Arc, + version: Version, +) -> Result> { + let (parent_leaf, state) = parent_leaf_and_state( + view, + quorum_membership, + public_key.clone(), + Arc::clone(&consensus), + ) + .await?; + + // In order of priority, we should try to attach: + // - the parent certificate if it exists, or + // - our own certificate that we formed. + // In either case, we need to ensure that the certificate is still relevant. + // + // Note: once we reach a point of potentially propose with our formed upgrade certificate, we will ALWAYS drop it. If we cannot immediately use it for whatever reason, we choose to discard it. + // It is possible that multiple nodes form separate upgrade certificates for the some upgrade if we are not careful about voting. But this shouldn't bother us: the first leader to propose is the one whose certificate will be used. And if that fails to reach a decide for whatever reason, we may lose our own certificate, but something will likely have gone wrong there anyway. + let mut proposal_upgrade_certificate = parent_leaf + .upgrade_certificate() + .or(formed_upgrade_certificate); + + if !proposal_upgrade_certificate + .clone() + .is_some_and(|cert| cert.temp_is_relevant(view, decided_upgrade_cert).is_ok()) + { + proposal_upgrade_certificate = None; + } + + // We only want to proposal to be attached if any of them are valid. + let proposal_certificate = proposal_cert + .as_ref() + .filter(|cert| cert.is_valid_for_view(&view)) + .cloned(); + + // FIXME - This is not great, and will be fixed later. + // If it's > July, 2024 and this is still here, something has gone horribly wrong. + let cnm = commitment_and_metadata + .clone() + .context("Cannot propose because we don't have the VID payload commitment and metadata")?; + + ensure!( + cnm.block_view == view, + "Cannot propose because our VID payload commitment and metadata is for an older view." + ); + + let create_and_send_proposal_handle = async_spawn(async move { + create_and_send_proposal( + public_key, + private_key, + consensus, + sender, + view, + cnm, + parent_leaf.clone(), + state, + proposal_upgrade_certificate, + proposal_certificate, + delay, + instance_state, + version, + ) + .await; + }); + + Ok(create_and_send_proposal_handle) +} + +/// Publishes a proposal if there exists a value which we can propose from. Specifically, we must have either +/// `commitment_and_metadata`, or a `decided_upgrade_cert`. +#[allow(clippy::too_many_arguments)] +pub async fn publish_proposal_if_able( + view: TYPES::Time, + sender: Sender>>, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + consensus: Arc>>, + delay: u64, + formed_upgrade_certificate: Option>, + decided_upgrade_cert: Option>, + commitment_and_metadata: Option>, + proposal_cert: Option>, + instance_state: Arc, + version: Version, +) -> Result> { + publish_proposal_from_commitment_and_metadata( + view, + sender, + quorum_membership, + public_key, + private_key, + consensus, + delay, + formed_upgrade_certificate, + decided_upgrade_cert, + commitment_and_metadata, + proposal_cert, + instance_state, + version, + ) + .await +} + +/// Handle the received quorum proposal. +/// +/// Returns the proposal that should be used to set the `cur_proposal` for other tasks. +#[allow(clippy::too_many_lines)] +pub(crate) async fn handle_quorum_proposal_recv>( + proposal: &Proposal>, + sender: &TYPES::SignatureKey, + event_stream: Sender>>, + task_state: &mut ConsensusTaskState, + version: Version, +) -> Result>> { + let sender = sender.clone(); + debug!( + "Received Quorum Proposal for view {}", + *proposal.data.view_number + ); + + let cur_view = task_state.cur_view; + + validate_proposal_view_and_certs( + proposal, + &sender, + task_state.cur_view, + &task_state.quorum_membership, + &task_state.timeout_membership, + ) + .context("Failed to validate proposal view and attached certs")?; + + let view = proposal.data.view_number(); + let view_leader_key = task_state.quorum_membership.leader(view); + let justify_qc = proposal.data.justify_qc.clone(); + + if !justify_qc.is_valid_cert(task_state.quorum_membership.as_ref()) { + let consensus = task_state.consensus.read().await; + consensus.metrics.invalid_qc.update(1); + bail!("Invalid justify_qc in proposal for view {}", *view); + } + + // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here + if let Err(e) = update_view::( + view, + &event_stream, + task_state.timeout, + Arc::clone(&task_state.consensus), + &mut task_state.cur_view, + &mut task_state.cur_view_time, + &mut task_state.timeout_task, + &task_state.output_event_stream, + SEND_VIEW_CHANGE_EVENT, + task_state.quorum_membership.leader(cur_view) == task_state.public_key, + ) + .await + { + debug!("Failed to update view; error = {e:#}"); + } + + let mut parent_leaf = task_state + .consensus + .read() + .await + .saved_leaves() + .get(&justify_qc.date().leaf_commit) + .cloned(); + + parent_leaf = match parent_leaf { + Some(p) => Some(p), + None => fetch_proposal( + justify_qc.view_number(), + event_stream.clone(), + Arc::clone(&task_state.quorum_membership), + Arc::clone(&task_state.consensus), + ) + .await + .ok(), + }; + let consensus_read = task_state.consensus.read().await; + + // Get the parent leaf and state. + let parent = match parent_leaf { + Some(leaf) => { + if let (Some(state), _) = consensus_read.state_and_delta(leaf.view_number()) { + Some((leaf, Arc::clone(&state))) + } else { + bail!("Parent state not found! Consensus internally inconsistent"); + } + } + None => None, + }; + + if justify_qc.view_number() > consensus_read.high_qc().view_number { + if let Err(e) = task_state + .storage + .write() + .await + .update_high_qc(justify_qc.clone()) + .await + { + bail!("Failed to store High QC not voting. Error: {:?}", e); + } + } + + drop(consensus_read); + let mut consensus_write = task_state.consensus.write().await; + + if let Err(e) = consensus_write.update_high_qc(justify_qc.clone()) { + tracing::trace!("{e:?}"); + } + + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some((parent_leaf, _parent_state)) = parent else { + warn!( + "Proposal's parent missing from storage with commitment: {:?}", + justify_qc.date().leaf_commit + ); + let leaf = Leaf::from_quorum_proposal(&proposal.data); + + let state = Arc::new( + >::from_header( + &proposal.data.block_header, + ), + ); + + if let Err(e) = consensus_write.update_validated_state_map( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state, + delta: None, + }, + }, + ) { + tracing::trace!("{e:?}"); + } + + consensus_write.update_saved_leaves(leaf.clone()); + let new_leaves = consensus_write.saved_leaves().clone(); + let new_state = consensus_write.validated_state_map().clone(); + drop(consensus_write); + + if let Err(e) = task_state + .storage + .write() + .await + .update_undecided_state(new_leaves, new_state) + .await + { + warn!("Couldn't store undecided state. Error: {:?}", e); + } + + // If we are missing the parent from storage, the safety check will fail. But we can + // still vote if the liveness check succeeds. + let consensus_read = task_state.consensus.read().await; + let liveness_check = justify_qc.view_number() > consensus_read.locked_view(); + + let high_qc = consensus_read.high_qc().clone(); + let locked_view = consensus_read.locked_view(); + + drop(consensus_read); + + let mut current_proposal = None; + if liveness_check { + current_proposal = Some(proposal.data.clone()); + let new_view = proposal.data.view_number + 1; + + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = task_state.quorum_membership.leader(new_view) + == task_state.public_key + && high_qc.view_number == current_proposal.clone().unwrap().view_number; + + let qc = high_qc.clone(); + if should_propose { + debug!( + "Attempting to publish proposal after voting for liveness; now in view: {}", + *new_view + ); + let create_and_send_proposal_handle = publish_proposal_if_able( + qc.view_number + 1, + event_stream, + Arc::clone(&task_state.quorum_membership), + task_state.public_key.clone(), + task_state.private_key.clone(), + Arc::clone(&task_state.consensus), + task_state.round_start_delay, + task_state.formed_upgrade_certificate.clone(), + task_state.decided_upgrade_cert.clone(), + task_state.payload_commitment_and_metadata.clone(), + task_state.proposal_cert.clone(), + Arc::clone(&task_state.instance_state), + version, + ) + .await?; + + task_state + .spawned_tasks + .entry(view) + .or_default() + .push(create_and_send_proposal_handle); + } + } else { + warn!(?high_qc, ?proposal.data, ?locked_view, "Failed liveneess check; cannot find parent either."); + } + + return Ok(current_proposal); + }; + + task_state + .spawned_tasks + .entry(proposal.data.view_number()) + .or_default() + .push(async_spawn( + temp_validate_proposal_safety_and_liveness( + proposal.clone(), + parent_leaf, + Arc::clone(&task_state.consensus), + task_state.decided_upgrade_cert.clone(), + Arc::clone(&task_state.quorum_membership), + view_leader_key, + event_stream.clone(), + sender, + task_state.output_event_stream.clone(), + ) + .map(AnyhowTracing::err_as_debug), + )); + Ok(None) +} + +/// Handle `QuorumProposalValidated` event content and submit a proposal if possible. +#[allow(clippy::too_many_lines)] +pub async fn handle_quorum_proposal_validated>( + proposal: &QuorumProposal, + event_stream: Sender>>, + task_state: &mut ConsensusTaskState, +) -> Result<()> { + let view = proposal.view_number(); + task_state.current_proposal = Some(proposal.clone()); + + let res = decide_from_proposal( + proposal, + Arc::clone(&task_state.consensus), + &task_state.decided_upgrade_cert, + &task_state.public_key, + ) + .await; + + if let Some(cert) = res.decided_upgrade_certificate { + task_state.decided_upgrade_cert = Some(cert.clone()); + + let mut decided_certificate_lock = task_state.decided_upgrade_certificate.write().await; + *decided_certificate_lock = Some(cert.clone()); + drop(decided_certificate_lock); + let _ = event_stream + .broadcast(Arc::new(HotShotEvent::UpgradeDecided(cert.clone()))) + .await; + } + + let mut consensus = task_state.consensus.write().await; + if let Some(new_locked_view) = res.new_locked_view_number { + if let Err(e) = consensus.update_locked_view(new_locked_view) { + tracing::trace!("{e:?}"); + } + } + + drop(consensus); + + let new_view = task_state.current_proposal.clone().unwrap().view_number + 1; + // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = task_state.quorum_membership.leader(new_view) == task_state.public_key + && task_state.consensus.read().await.high_qc().view_number + == task_state.current_proposal.clone().unwrap().view_number; + + if let Some(new_decided_view) = res.new_decided_view_number { + task_state.cancel_tasks(new_decided_view).await; + } + task_state.current_proposal = Some(proposal.clone()); + task_state.spawn_vote_task(view, event_stream.clone()).await; + if should_propose { + debug!( + "Attempting to publish proposal after voting; now in view: {}", + *new_view + ); + if let Err(e) = task_state + .publish_proposal(new_view, event_stream.clone()) + .await + { + debug!("Failed to propose; error = {e:?}"); + }; + } + + #[allow(clippy::cast_precision_loss)] + if let Some(new_anchor_view) = res.new_decided_view_number { + let block_size = res.included_txns.map(|set| set.len().try_into().unwrap()); + let decide_sent = broadcast_event( + Event { + view_number: new_anchor_view, + event: EventType::Decide { + leaf_chain: Arc::new(res.leaf_views), + qc: Arc::new(res.new_decide_qc.unwrap()), + block_size, + }, + }, + &task_state.output_event_stream, + ); + let mut consensus = task_state.consensus.write().await; + + let old_anchor_view = consensus.last_decided_view(); + consensus.collect_garbage(old_anchor_view, new_anchor_view); + if let Err(e) = consensus.update_last_decided_view(new_anchor_view) { + tracing::trace!("{e:?}"); + } + consensus + .metrics + .last_decided_time + .set(Utc::now().timestamp().try_into().unwrap()); + consensus.metrics.invalid_qc.set(0); + consensus + .metrics + .last_decided_view + .set(usize::try_from(consensus.last_decided_view().u64()).unwrap()); + let cur_number_of_views_per_decide_event = + *task_state.cur_view - consensus.last_decided_view().u64(); + consensus + .metrics + .number_of_views_per_decide_event + .add_point(cur_number_of_views_per_decide_event as f64); + + debug!( + "Sending Decide for view {:?}", + consensus.last_decided_view() + ); + drop(consensus); + debug!("Decided txns len {:?}", block_size); + decide_sent.await; + broadcast_event( + Arc::new(HotShotEvent::LeafDecided(res.leaves_decided)), + &event_stream, + ) + .await; + debug!("decide send succeeded"); + } + + Ok(()) +} + +/// Private key, latest decided upgrade certificate, committee membership, and event stream, for +/// sending the vote. +type VoteInfo = ( + <::SignatureKey as SignatureKey>::PrivateKey, + Option>, + Arc<::Membership>, + Sender>>, +); + +#[allow(clippy::too_many_arguments)] +#[allow(clippy::too_many_lines)] +#[allow(unused_variables)] +/// Check if we are able to vote, like whether the proposal is valid, +/// whether we have DAC and VID share, and if so, vote. +pub async fn update_state_and_vote_if_able>( + cur_view: TYPES::Time, + proposal: QuorumProposal, + public_key: TYPES::SignatureKey, + consensus: Arc>>, + storage: Arc>, + quorum_membership: Arc, + instance_state: Arc, + vote_info: VoteInfo, + version: Version, +) -> bool { + use hotshot_types::simple_vote::QuorumVote; + + if !quorum_membership.has_stake(&public_key) { + debug!("We were not chosen for quorum committee on {:?}", cur_view); + return false; + } + + let read_consnesus = consensus.read().await; + // Only vote if you has seen the VID share for this view + let Some(vid_shares) = read_consnesus.vid_shares().get(&proposal.view_number) else { + debug!( + "We have not seen the VID share for this view {:?} yet, so we cannot vote.", + proposal.view_number + ); + return false; + }; + let Some(vid_share) = vid_shares.get(&public_key).cloned() else { + debug!("we have not seen our VID share yet"); + return false; + }; + + if let Some(upgrade_cert) = &vote_info.1 { + if upgrade_cert.upgrading_in(cur_view) + && Some(proposal.block_header.payload_commitment()) + != null_block::commitment(quorum_membership.total_nodes()) + { + info!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(quorum_membership.total_nodes()), Some(proposal.block_header.payload_commitment())); + return false; + } + } + + // Only vote if you have the DA cert + // ED Need to update the view number this is stored under? + let Some(cert) = read_consnesus.saved_da_certs().get(&cur_view).cloned() else { + return false; + }; + drop(read_consnesus); + + let view = cert.view_number; + // TODO: do some of this logic without the vote token check, only do that when voting. + let justify_qc = proposal.justify_qc.clone(); + let mut parent = consensus + .read() + .await + .saved_leaves() + .get(&justify_qc.date().leaf_commit) + .cloned(); + parent = match parent { + Some(p) => Some(p), + None => fetch_proposal( + justify_qc.view_number(), + vote_info.3.clone(), + Arc::clone(&quorum_membership), + Arc::clone(&consensus), + ) + .await + .ok(), + }; + + let read_consnesus = consensus.read().await; + + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some(parent) = parent else { + error!( + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.date().leaf_commit, + proposal.view_number, + ); + return false; + }; + let (Some(parent_state), _) = read_consnesus.state_and_delta(parent.view_number()) else { + warn!("Parent state not found! Consensus internally inconsistent"); + return false; + }; + drop(read_consnesus); + let Ok((validated_state, state_delta)) = parent_state + .validate_and_apply_header( + instance_state.as_ref(), + &parent, + &proposal.block_header.clone(), + vid_share.data.common.clone(), + version, + ) + .await + else { + warn!("Block header doesn't extend the proposal!"); + return false; + }; + + let state = Arc::new(validated_state); + let delta = Arc::new(state_delta); + let parent_commitment = parent.commit(); + + let proposed_leaf = Leaf::from_quorum_proposal(&proposal); + if proposed_leaf.parent_commitment() != parent_commitment { + return false; + } + + // Validate the DAC. + let message = if cert.is_valid_cert(vote_info.2.as_ref()) { + // Validate the block payload commitment for non-genesis DAC. + if cert.date().payload_commit != proposal.block_header.payload_commitment() { + warn!( + "Block payload commitment does not equal da cert payload commitment. View = {}", + *view + ); + return false; + } + if let Ok(vote) = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: proposed_leaf.commit(), + }, + view, + &public_key, + &vote_info.0, + ) { + GeneralConsensusMessage::::Vote(vote) + } else { + error!("Unable to sign quorum vote!"); + return false; + } + } else { + error!( + "Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", + cert, cur_view + ); + return false; + }; + + let mut consensus = consensus.write().await; + if let Err(e) = consensus.update_validated_state_map( + cur_view, + View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state: Arc::clone(&state), + delta: Some(Arc::clone(&delta)), + }, + }, + ) { + tracing::trace!("{e:?}"); + } + consensus.update_saved_leaves(proposed_leaf.clone()); + let new_leaves = consensus.saved_leaves().clone(); + let new_state = consensus.validated_state_map().clone(); + drop(consensus); + + if let Err(e) = storage + .write() + .await + .update_undecided_state(new_leaves, new_state) + .await + { + error!("Couldn't store undecided state. Error: {:?}", e); + } + + if let GeneralConsensusMessage::Vote(vote) = message { + debug!( + "Sending vote to next quorum leader {:?}", + vote.view_number() + 1 + ); + // Add to the storage that we have received the VID disperse for a specific view + if let Err(e) = storage.write().await.append_vid(&vid_share).await { + warn!( + "Failed to store VID Disperse Proposal with error {:?}, aborting vote", + e + ); + return false; + } + broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &vote_info.3).await; + return true; + } + debug!( + "Received VID share, but couldn't find DAC cert for view {:?}", + *proposal.view_number(), + ); + false +} diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs deleted file mode 100644 index f736ddd900..0000000000 --- a/task-impls/src/consensus/helpers.rs +++ /dev/null @@ -1,1485 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; - -use anyhow::{bail, ensure, Context, Result}; -use async_broadcast::{broadcast, Sender}; -use async_compatibility_layer::art::async_timeout; -use async_lock::RwLock; -#[cfg(not(feature = "dependency-tasks"))] -#[cfg(async_executor_impl = "async-std")] -#[cfg(not(feature = "dependency-tasks"))] -use async_std::task::JoinHandle; -use committable::{Commitment, Committable}; -use hotshot_types::{ - consensus::{Consensus, View}, - data::{Leaf, QuorumProposal, ViewChangeEvidence}, - event::{Event, EventType, LeafInfo}, - message::Proposal, - simple_certificate::{QuorumCertificate, UpgradeCertificate}, - traits::{ - block_contents::BlockHeader, election::Membership, node_implementation::NodeType, - signature_key::SignatureKey, states::ValidatedState, BlockPayload, - }, - utils::{Terminator, ViewInner}, - vote::{Certificate, HasViewNumber}, -}; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; -use tracing::{debug, info, warn}; -#[cfg(not(feature = "dependency-tasks"))] -use { - super::ConsensusTaskState, - crate::{ - consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, - helpers::AnyhowTracing, - }, - async_compatibility_layer::art::{async_sleep, async_spawn}, - chrono::Utc, - core::time::Duration, - futures::FutureExt, - hotshot_types::{ - consensus::CommitmentAndMetadata, - traits::{ - node_implementation::{ConsensusTime, NodeImplementation}, - storage::Storage, - }, - }, - hotshot_types::{data::null_block, message::GeneralConsensusMessage, simple_vote::QuorumData}, - std::marker::PhantomData, - tracing::error, - vbs::version::Version, -}; - -use crate::{ - events::{HotShotEvent, ProposalMissing}, - helpers::broadcast_event, - request::REQUEST_TIMEOUT, -}; - -// TODO: Replace this function with `validate_proposal_safety_and_liveness` after the following -// issue is done: -// https://github.com/EspressoSystems/HotShot/issues/3357. -/// Validate the state and safety and liveness of a proposal then emit -/// a `QuorumProposalValidated` event. -/// -/// TODO - This should just take the QuorumProposalRecv task state after -/// we merge the dependency tasks. -#[allow(clippy::too_many_arguments)] -#[allow(clippy::too_many_lines)] -#[cfg(not(feature = "dependency-tasks"))] -pub async fn temp_validate_proposal_safety_and_liveness( - proposal: Proposal>, - parent_leaf: Leaf, - consensus: Arc>>, - decided_upgrade_certificate: Option>, - quorum_membership: Arc, - view_leader_key: TYPES::SignatureKey, - event_stream: Sender>>, - sender: TYPES::SignatureKey, - event_sender: Sender>, -) -> Result<()> { - let view_number = proposal.data.view_number(); - - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); - ensure!( - proposed_leaf.parent_commitment() == parent_leaf.commit(), - "Proposed leaf does not extend the parent leaf." - ); - - let state = Arc::new( - >::from_header(&proposal.data.block_header), - ); - let view = View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), - state, - delta: None, // May be updated to `Some` in the vote task. - }, - }; - - if let Err(e) = consensus - .write() - .await - .update_validated_state_map(view_number, view.clone()) - { - tracing::trace!("{e:?}"); - } - consensus - .write() - .await - .update_saved_leaves(proposed_leaf.clone()); - - // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. - broadcast_event( - Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), - &event_stream, - ) - .await; - - // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - // - // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: - // - // proposal.validate_signature(&quorum_membership)?; - // - // in a future PR. - ensure!( - view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), - "Could not verify proposal." - ); - - UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; - - // Validate that the upgrade certificate is re-attached, if we saw one on the parent - proposed_leaf.temp_extends_upgrade(&parent_leaf, &decided_upgrade_certificate)?; - - let justify_qc = proposal.data.justify_qc.clone(); - // Create a positive vote if either liveness or safety check - // passes. - - // Liveness check. - let read_consensus = consensus.read().await; - let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); - - // Safety check. - // Check if proposal extends from the locked leaf. - let outcome = read_consensus.visit_leaf_ancestors( - justify_qc.view_number(), - Terminator::Inclusive(read_consensus.locked_view()), - false, - |leaf, _, _| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.view_number() != read_consensus.locked_view() - }, - ); - let safety_check = outcome.is_ok(); - - ensure!(safety_check || liveness_check, { - if let Err(e) = outcome { - broadcast_event( - Event { - view_number, - event: EventType::Error { error: Arc::new(e) }, - }, - &event_sender, - ) - .await; - } - - format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) - }); - - // We accept the proposal, notify the application layer - - broadcast_event( - Event { - view_number, - event: EventType::QuorumProposal { - proposal: proposal.clone(), - sender, - }, - }, - &event_sender, - ) - .await; - // Notify other tasks - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalValidated( - proposal.data.clone(), - parent_leaf, - )), - &event_stream, - ) - .await; - - Ok(()) -} - -/// Validate the state and safety and liveness of a proposal then emit -/// a `QuorumProposalValidated` event. -/// -/// TODO - This should just take the QuorumProposalRecv task state after -/// we merge the dependency tasks. -#[allow(clippy::too_many_arguments)] -#[allow(clippy::too_many_lines)] -pub async fn validate_proposal_safety_and_liveness( - proposal: Proposal>, - parent_leaf: Leaf, - consensus: Arc>>, - decided_upgrade_certificate: Arc>>>, - quorum_membership: Arc, - view_leader_key: TYPES::SignatureKey, - event_stream: Sender>>, - sender: TYPES::SignatureKey, - event_sender: Sender>, -) -> Result<()> { - let view_number = proposal.data.view_number(); - - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); - ensure!( - proposed_leaf.parent_commitment() == parent_leaf.commit(), - "Proposed leaf does not extend the parent leaf." - ); - - let state = Arc::new( - >::from_header(&proposal.data.block_header), - ); - let view = View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), - state, - delta: None, // May be updated to `Some` in the vote task. - }, - }; - - if let Err(e) = consensus - .write() - .await - .update_validated_state_map(view_number, view.clone()) - { - tracing::trace!("{e:?}"); - } - consensus - .write() - .await - .update_saved_leaves(proposed_leaf.clone()); - - // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. - broadcast_event( - Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), - &event_stream, - ) - .await; - - // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - // - // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: - // - // proposal.validate_signature(&quorum_membership)?; - // - // in a future PR. - ensure!( - view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), - "Could not verify proposal." - ); - - UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; - - // Validate that the upgrade certificate is re-attached, if we saw one on the parent - proposed_leaf - .extends_upgrade(&parent_leaf, &decided_upgrade_certificate) - .await?; - - let justify_qc = proposal.data.justify_qc.clone(); - // Create a positive vote if either liveness or safety check - // passes. - - // Liveness check. - let read_consensus = consensus.read().await; - let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); - - // Safety check. - // Check if proposal extends from the locked leaf. - let outcome = read_consensus.visit_leaf_ancestors( - justify_qc.view_number(), - Terminator::Inclusive(read_consensus.locked_view()), - false, - |leaf, _, _| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.view_number() != read_consensus.locked_view() - }, - ); - let safety_check = outcome.is_ok(); - - ensure!(safety_check || liveness_check, { - if let Err(e) = outcome { - broadcast_event( - Event { - view_number, - event: EventType::Error { error: Arc::new(e) }, - }, - &event_sender, - ) - .await; - } - - format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) - }); - - // We accept the proposal, notify the application layer - - broadcast_event( - Event { - view_number, - event: EventType::QuorumProposal { - proposal: proposal.clone(), - sender, - }, - }, - &event_sender, - ) - .await; - // Notify other tasks - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalValidated( - proposal.data.clone(), - parent_leaf, - )), - &event_stream, - ) - .await; - - Ok(()) -} - -/// Create the header for a proposal, build the proposal, and broadcast -/// the proposal send evnet. -#[allow(clippy::too_many_arguments)] -#[cfg(not(feature = "dependency-tasks"))] -pub async fn create_and_send_proposal( - public_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: Arc>>, - event_stream: Sender>>, - view: TYPES::Time, - commitment_and_metadata: CommitmentAndMetadata, - parent_leaf: Leaf, - state: Arc, - upgrade_cert: Option>, - proposal_cert: Option>, - round_start_delay: u64, - instance_state: Arc, - version: Version, -) { - let consensus_read = consensus.read().await; - let Some(Some(vid_share)) = consensus_read - .vid_shares() - .get(&view) - .map(|shares| shares.get(&public_key).cloned()) - else { - error!("Cannot propopse without our VID share, view {:?}", view); - return; - }; - drop(consensus_read); - let block_header = match TYPES::BlockHeader::new( - state.as_ref(), - instance_state.as_ref(), - &parent_leaf, - commitment_and_metadata.commitment, - commitment_and_metadata.builder_commitment, - commitment_and_metadata.metadata, - commitment_and_metadata.fee, - vid_share.data.common, - version, - ) - .await - { - Ok(header) => header, - Err(err) => { - error!(%err, "Failed to construct block header"); - return; - } - }; - - let proposal = QuorumProposal { - block_header, - view_number: view, - justify_qc: consensus.read().await.high_qc().clone(), - proposal_certificate: proposal_cert, - upgrade_certificate: upgrade_cert, - }; - - let proposed_leaf = Leaf::from_quorum_proposal(&proposal); - if proposed_leaf.parent_commitment() != parent_leaf.commit() { - return; - } - - let Ok(signature) = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref()) - else { - // This should never happen. - error!("Failed to sign proposed_leaf.commit()!"); - return; - }; - - let message = Proposal { - data: proposal, - signature, - _pd: PhantomData, - }; - debug!( - "Sending null proposal for view {:?}", - proposed_leaf.view_number(), - ); - if let Err(e) = consensus - .write() - .await - .update_last_proposed_view(message.clone()) - { - tracing::trace!("{e:?}"); - return; - } - async_sleep(Duration::from_millis(round_start_delay)).await; - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalSend( - message.clone(), - public_key, - )), - &event_stream, - ) - .await; -} - -/// Validates, from a given `proposal` that the view that it is being submitted for is valid when -/// compared to `cur_view` which is the highest proposed view (so far) for the caller. If the proposal -/// is for a view that's later than expected, that the proposal includes a timeout or view sync certificate. -pub fn validate_proposal_view_and_certs( - proposal: &Proposal>, - sender: &TYPES::SignatureKey, - cur_view: TYPES::Time, - quorum_membership: &Arc, - timeout_membership: &Arc, -) -> Result<()> { - let view = proposal.data.view_number(); - ensure!( - view >= cur_view, - "Proposal is from an older view {:?}", - proposal.data.clone() - ); - - let view_leader_key = quorum_membership.leader(view); - ensure!( - view_leader_key == *sender, - "Leader key does not match key in proposal" - ); - - // Verify a timeout certificate OR a view sync certificate exists and is valid. - if proposal.data.justify_qc.view_number() != view - 1 { - let received_proposal_cert = - proposal.data.proposal_certificate.clone().context(format!( - "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", - *view - ))?; - - match received_proposal_cert { - ViewChangeEvidence::Timeout(timeout_cert) => { - ensure!( - timeout_cert.date().view == view - 1, - "Timeout certificate for view {} was not for the immediately preceding view", - *view - ); - ensure!( - timeout_cert.is_valid_cert(timeout_membership.as_ref()), - "Timeout certificate for view {} was invalid", - *view - ); - } - ViewChangeEvidence::ViewSync(view_sync_cert) => { - ensure!( - view_sync_cert.view_number == view, - "View sync cert view number {:?} does not match proposal view number {:?}", - view_sync_cert.view_number, - view - ); - - // View sync certs must also be valid. - ensure!( - view_sync_cert.is_valid_cert(quorum_membership.as_ref()), - "Invalid view sync finalize cert provided" - ); - } - } - } - - // Validate the upgrade certificate -- this is just a signature validation. - // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. - UpgradeCertificate::validate(&proposal.data.upgrade_certificate, quorum_membership)?; - - Ok(()) -} - -/// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. -pub(crate) async fn parent_leaf_and_state( - next_proposal_view_number: TYPES::Time, - quorum_membership: Arc, - public_key: TYPES::SignatureKey, - consensus: Arc>>, -) -> Result<(Leaf, Arc<::ValidatedState>)> { - ensure!( - quorum_membership.leader(next_proposal_view_number) == public_key, - "Somehow we formed a QC but are not the leader for the next view {next_proposal_view_number:?}", - ); - - let consensus_reader = consensus.read().await; - let parent_view_number = consensus_reader.high_qc().view_number(); - let parent_view = consensus_reader.validated_state_map().get(&parent_view_number).context( - format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", *parent_view_number) - )?; - - // Leaf hash in view inner does not match high qc hash - Why? - let (leaf_commitment, state) = parent_view.leaf_and_state().context( - format!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") - )?; - - if leaf_commitment != consensus_reader.high_qc().date().leaf_commit { - // NOTE: This happens on the genesis block - debug!( - "They don't equal: {:?} {:?}", - leaf_commitment, - consensus_reader.high_qc().date().leaf_commit - ); - } - - let leaf = consensus_reader - .saved_leaves() - .get(&leaf_commitment) - .context("Failed to find high QC of parent")?; - - let reached_decided = leaf.view_number() == consensus_reader.last_decided_view(); - let parent_leaf = leaf.clone(); - let original_parent_hash = parent_leaf.commit(); - let mut next_parent_hash = original_parent_hash; - - // Walk back until we find a decide - if !reached_decided { - debug!("We have not reached decide"); - while let Some(next_parent_leaf) = consensus_reader.saved_leaves().get(&next_parent_hash) { - if next_parent_leaf.view_number() <= consensus_reader.last_decided_view() { - break; - } - next_parent_hash = next_parent_leaf.parent_commitment(); - } - // TODO do some sort of sanity check on the view number that it matches decided - } - - Ok((parent_leaf, Arc::clone(state))) -} - -/// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is the -/// standard case proposal scenario. -#[allow(clippy::too_many_arguments)] -#[cfg(not(feature = "dependency-tasks"))] -pub async fn publish_proposal_from_commitment_and_metadata( - view: TYPES::Time, - sender: Sender>>, - quorum_membership: Arc, - public_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: Arc>>, - delay: u64, - formed_upgrade_certificate: Option>, - decided_upgrade_cert: Option>, - commitment_and_metadata: Option>, - proposal_cert: Option>, - instance_state: Arc, - version: Version, -) -> Result> { - let (parent_leaf, state) = parent_leaf_and_state( - view, - quorum_membership, - public_key.clone(), - Arc::clone(&consensus), - ) - .await?; - - // In order of priority, we should try to attach: - // - the parent certificate if it exists, or - // - our own certificate that we formed. - // In either case, we need to ensure that the certificate is still relevant. - // - // Note: once we reach a point of potentially propose with our formed upgrade certificate, we will ALWAYS drop it. If we cannot immediately use it for whatever reason, we choose to discard it. - // It is possible that multiple nodes form separate upgrade certificates for the some upgrade if we are not careful about voting. But this shouldn't bother us: the first leader to propose is the one whose certificate will be used. And if that fails to reach a decide for whatever reason, we may lose our own certificate, but something will likely have gone wrong there anyway. - let mut proposal_upgrade_certificate = parent_leaf - .upgrade_certificate() - .or(formed_upgrade_certificate); - - if !proposal_upgrade_certificate - .clone() - .is_some_and(|cert| cert.temp_is_relevant(view, decided_upgrade_cert).is_ok()) - { - proposal_upgrade_certificate = None; - } - - // We only want to proposal to be attached if any of them are valid. - let proposal_certificate = proposal_cert - .as_ref() - .filter(|cert| cert.is_valid_for_view(&view)) - .cloned(); - - // FIXME - This is not great, and will be fixed later. - // If it's > July, 2024 and this is still here, something has gone horribly wrong. - let cnm = commitment_and_metadata - .clone() - .context("Cannot propose because we don't have the VID payload commitment and metadata")?; - - ensure!( - cnm.block_view == view, - "Cannot propose because our VID payload commitment and metadata is for an older view." - ); - - let create_and_send_proposal_handle = async_spawn(async move { - create_and_send_proposal( - public_key, - private_key, - consensus, - sender, - view, - cnm, - parent_leaf.clone(), - state, - proposal_upgrade_certificate, - proposal_certificate, - delay, - instance_state, - version, - ) - .await; - }); - - Ok(create_and_send_proposal_handle) -} - -/// Publishes a proposal if there exists a value which we can propose from. Specifically, we must have either -/// `commitment_and_metadata`, or a `decided_upgrade_cert`. -#[allow(clippy::too_many_arguments)] -#[cfg(not(feature = "dependency-tasks"))] -pub async fn publish_proposal_if_able( - view: TYPES::Time, - sender: Sender>>, - quorum_membership: Arc, - public_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: Arc>>, - delay: u64, - formed_upgrade_certificate: Option>, - decided_upgrade_cert: Option>, - commitment_and_metadata: Option>, - proposal_cert: Option>, - instance_state: Arc, - version: Version, -) -> Result> { - publish_proposal_from_commitment_and_metadata( - view, - sender, - quorum_membership, - public_key, - private_key, - consensus, - delay, - formed_upgrade_certificate, - decided_upgrade_cert, - commitment_and_metadata, - proposal_cert, - instance_state, - version, - ) - .await -} - -/// Trigger a request to the network for a proposal for a view and wait for the response -pub(crate) async fn fetch_proposal( - view: TYPES::Time, - event_stream: Sender>>, - quorum_membership: Arc, - consensus: Arc>>, -) -> Result> { - tracing::debug!("Fetching proposal for view {:?}", view); - let (tx, mut rx) = broadcast(1); - let event = ProposalMissing { - view, - response_chan: tx, - }; - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalRequest(event)), - &event_stream, - ) - .await; - let Ok(Ok(Some(proposal))) = async_timeout(REQUEST_TIMEOUT, rx.recv_direct()).await else { - bail!("Request for proposal failed"); - }; - let view_number = proposal.data.view_number(); - let justify_qc = proposal.data.justify_qc.clone(); - - if !justify_qc.is_valid_cert(quorum_membership.as_ref()) { - bail!("Invalid justify_qc in proposal for view {}", *view_number); - } - let mut consensus_write = consensus.write().await; - let leaf = Leaf::from_quorum_proposal(&proposal.data); - let state = Arc::new( - >::from_header(&proposal.data.block_header), - ); - - let view = View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - state, - delta: None, - }, - }; - if let Err(e) = consensus_write.update_validated_state_map(view_number, view.clone()) { - tracing::trace!("{e:?}"); - } - - consensus_write.update_saved_leaves(leaf.clone()); - broadcast_event( - HotShotEvent::ValidatedStateUpdated(view_number, view).into(), - &event_stream, - ) - .await; - Ok(leaf) -} - -/// Handle the received quorum proposal. -/// -/// Returns the proposal that should be used to set the `cur_proposal` for other tasks. -#[allow(clippy::too_many_lines)] -#[cfg(not(feature = "dependency-tasks"))] -pub(crate) async fn handle_quorum_proposal_recv>( - proposal: &Proposal>, - sender: &TYPES::SignatureKey, - event_stream: Sender>>, - task_state: &mut ConsensusTaskState, - version: Version, -) -> Result>> { - let sender = sender.clone(); - debug!( - "Received Quorum Proposal for view {}", - *proposal.data.view_number - ); - - let cur_view = task_state.cur_view; - - validate_proposal_view_and_certs( - proposal, - &sender, - task_state.cur_view, - &task_state.quorum_membership, - &task_state.timeout_membership, - ) - .context("Failed to validate proposal view and attached certs")?; - - let view = proposal.data.view_number(); - let view_leader_key = task_state.quorum_membership.leader(view); - let justify_qc = proposal.data.justify_qc.clone(); - - if !justify_qc.is_valid_cert(task_state.quorum_membership.as_ref()) { - let consensus = task_state.consensus.read().await; - consensus.metrics.invalid_qc.update(1); - bail!("Invalid justify_qc in proposal for view {}", *view); - } - - // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here - if let Err(e) = update_view::( - view, - &event_stream, - task_state.timeout, - Arc::clone(&task_state.consensus), - &mut task_state.cur_view, - &mut task_state.cur_view_time, - &mut task_state.timeout_task, - &task_state.output_event_stream, - SEND_VIEW_CHANGE_EVENT, - task_state.quorum_membership.leader(cur_view) == task_state.public_key, - ) - .await - { - debug!("Failed to update view; error = {e:#}"); - } - - let mut parent_leaf = task_state - .consensus - .read() - .await - .saved_leaves() - .get(&justify_qc.date().leaf_commit) - .cloned(); - - parent_leaf = match parent_leaf { - Some(p) => Some(p), - None => fetch_proposal( - justify_qc.view_number(), - event_stream.clone(), - Arc::clone(&task_state.quorum_membership), - Arc::clone(&task_state.consensus), - ) - .await - .ok(), - }; - let consensus_read = task_state.consensus.read().await; - - // Get the parent leaf and state. - let parent = match parent_leaf { - Some(leaf) => { - if let (Some(state), _) = consensus_read.state_and_delta(leaf.view_number()) { - Some((leaf, Arc::clone(&state))) - } else { - bail!("Parent state not found! Consensus internally inconsistent"); - } - } - None => None, - }; - - if justify_qc.view_number() > consensus_read.high_qc().view_number { - if let Err(e) = task_state - .storage - .write() - .await - .update_high_qc(justify_qc.clone()) - .await - { - bail!("Failed to store High QC not voting. Error: {:?}", e); - } - } - - drop(consensus_read); - let mut consensus_write = task_state.consensus.write().await; - - if let Err(e) = consensus_write.update_high_qc(justify_qc.clone()) { - tracing::trace!("{e:?}"); - } - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some((parent_leaf, _parent_state)) = parent else { - warn!( - "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.date().leaf_commit - ); - let leaf = Leaf::from_quorum_proposal(&proposal.data); - - let state = Arc::new( - >::from_header( - &proposal.data.block_header, - ), - ); - - if let Err(e) = consensus_write.update_validated_state_map( - view, - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - state, - delta: None, - }, - }, - ) { - tracing::trace!("{e:?}"); - } - - consensus_write.update_saved_leaves(leaf.clone()); - let new_leaves = consensus_write.saved_leaves().clone(); - let new_state = consensus_write.validated_state_map().clone(); - drop(consensus_write); - - if let Err(e) = task_state - .storage - .write() - .await - .update_undecided_state(new_leaves, new_state) - .await - { - warn!("Couldn't store undecided state. Error: {:?}", e); - } - - // If we are missing the parent from storage, the safety check will fail. But we can - // still vote if the liveness check succeeds. - #[cfg(not(feature = "dependency-tasks"))] - { - let consensus_read = task_state.consensus.read().await; - let liveness_check = justify_qc.view_number() > consensus_read.locked_view(); - - let high_qc = consensus_read.high_qc().clone(); - let locked_view = consensus_read.locked_view(); - - drop(consensus_read); - - let mut current_proposal = None; - if liveness_check { - current_proposal = Some(proposal.data.clone()); - let new_view = proposal.data.view_number + 1; - - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = task_state.quorum_membership.leader(new_view) - == task_state.public_key - && high_qc.view_number == current_proposal.clone().unwrap().view_number; - - let qc = high_qc.clone(); - if should_propose { - debug!( - "Attempting to publish proposal after voting for liveness; now in view: {}", - *new_view - ); - let create_and_send_proposal_handle = publish_proposal_if_able( - qc.view_number + 1, - event_stream, - Arc::clone(&task_state.quorum_membership), - task_state.public_key.clone(), - task_state.private_key.clone(), - Arc::clone(&task_state.consensus), - task_state.round_start_delay, - task_state.formed_upgrade_certificate.clone(), - task_state.decided_upgrade_cert.clone(), - task_state.payload_commitment_and_metadata.clone(), - task_state.proposal_cert.clone(), - Arc::clone(&task_state.instance_state), - version, - ) - .await?; - - task_state - .spawned_tasks - .entry(view) - .or_default() - .push(create_and_send_proposal_handle); - } - } else { - warn!(?high_qc, ?proposal.data, ?locked_view, "Failed liveneess check; cannot find parent either."); - } - - return Ok(current_proposal); - } - - #[cfg(feature = "dependency-tasks")] - return Ok(None); - }; - - task_state - .spawned_tasks - .entry(proposal.data.view_number()) - .or_default() - .push(async_spawn( - temp_validate_proposal_safety_and_liveness( - proposal.clone(), - parent_leaf, - Arc::clone(&task_state.consensus), - task_state.decided_upgrade_cert.clone(), - Arc::clone(&task_state.quorum_membership), - view_leader_key, - event_stream.clone(), - sender, - task_state.output_event_stream.clone(), - ) - .map(AnyhowTracing::err_as_debug), - )); - Ok(None) -} - -/// Helper type to give names and to the output values of the leaf chain traversal operation. -#[derive(Debug)] -pub struct LeafChainTraversalOutcome { - /// The new locked view obtained from a 2 chain starting from the proposal's parent. - pub new_locked_view_number: Option, - - /// The new decided view obtained from a 3 chain starting from the proposal's parent. - pub new_decided_view_number: Option, - - /// The qc for the decided chain. - pub new_decide_qc: Option>, - - /// The decided leaves with corresponding validated state and VID info. - pub leaf_views: Vec>, - - /// The decided leaves. - pub leaves_decided: Vec>, - - /// The transactions in the block payload for each leaf. - pub included_txns: Option::Transaction>>>, - - /// The most recent upgrade certificate from one of the leaves. - pub decided_upgrade_certificate: Option>, -} - -/// We need Default to be implemented because the leaf ascension has very few failure branches, -/// and when they *do* happen, we still return intermediate states. Default makes the burden -/// of filling values easier. -impl Default for LeafChainTraversalOutcome { - /// The default method for this type is to set all of the returned values to `None`. - fn default() -> Self { - Self { - new_locked_view_number: None, - new_decided_view_number: None, - new_decide_qc: None, - leaf_views: Vec::new(), - leaves_decided: Vec::new(), - included_txns: None, - decided_upgrade_certificate: None, - } - } -} - -/// Ascends the leaf chain by traversing through the parent commitments of the proposal. We begin -/// by obtaining the parent view, and if we are in a chain (i.e. the next view from the parent is -/// one view newer), then we begin attempting to form the chain. This is a direct impl from -/// [HotStuff](https://arxiv.org/pdf/1803.05069) section 5: -/// -/// > When a node b* carries a QC that refers to a direct parent, i.e., b*.justify.node = b*.parent, -/// we say that it forms a One-Chain. Denote by b'' = b*.justify.node. Node b* forms a Two-Chain, -/// if in addition to forming a One-Chain, b''.justify.node = b''.parent. -/// It forms a Three-Chain, if b'' forms a Two-Chain. -/// -/// We follow this exact logic to determine if we are able to reach a commit and a decide. A commit -/// is reached when we have a two chain, and a decide is reached when we have a three chain. -/// -/// # Example -/// Suppose we have a decide for view 1, and we then move on to get undecided views 2, 3, and 4. Further, -/// suppose that our *next* proposal is for view 5, but this leader did not see info for view 4, so the -/// justify qc of the proposal points to view 3. This is fine, and the undecided chain now becomes -/// 2-3-5. -/// -/// Assuming we continue with honest leaders, we then eventually could get a chain like: 2-3-5-6-7-8. This -/// will prompt a decide event to occur (this code), where the `proposal` is for view 8. Now, since the -/// lowest value in the 3-chain here would be 5 (excluding 8 since we only walk the parents), we begin at -/// the first link in the chain, and walk back through all undecided views, making our new anchor view 5, -/// and out new locked view will be 6. -/// -/// Upon receipt then of a proposal for view 9, assuming it is valid, this entire process will repeat, and -/// the anchor view will be set to view 6, with the locked view as view 7. -pub async fn decide_from_proposal( - proposal: &QuorumProposal, - consensus: Arc>>, - existing_upgrade_cert: &Option>, - public_key: &TYPES::SignatureKey, -) -> LeafChainTraversalOutcome { - let consensus_reader = consensus.read().await; - let view_number = proposal.view_number(); - let parent_view_number = proposal.justify_qc.view_number(); - let old_anchor_view = consensus_reader.last_decided_view(); - - let mut last_view_number_visited = view_number; - let mut current_chain_length = 0usize; - let mut res = LeafChainTraversalOutcome::default(); - - if let Err(e) = consensus_reader.visit_leaf_ancestors( - parent_view_number, - Terminator::Exclusive(old_anchor_view), - true, - |leaf, state, delta| { - // This is the core paper logic. We're implementing the chain in chained hotstuff. - if res.new_decided_view_number.is_none() { - // If the last view number is the child of the leaf we've moved to... - if last_view_number_visited == leaf.view_number() + 1 { - last_view_number_visited = leaf.view_number(); - - // The chain grows by one - current_chain_length += 1; - - // We emit a locked view when the chain length is 2 - if current_chain_length == 2 { - res.new_locked_view_number = Some(leaf.view_number()); - // The next leaf in the chain, if there is one, is decided, so this - // leaf's justify_qc would become the QC for the decided chain. - res.new_decide_qc = Some(leaf.justify_qc().clone()); - } else if current_chain_length == 3 { - // And we decide when the chain length is 3. - res.new_decided_view_number = Some(leaf.view_number()); - } - } else { - // There isn't a new chain extension available, so we signal to the callback - // owner that we can exit for now. - return false; - } - } - - // Now, if we *have* reached a decide, we need to do some state updates. - if let Some(new_decided_view) = res.new_decided_view_number { - // First, get a mutable reference to the provided leaf. - let mut leaf = leaf.clone(); - - // Update the metrics - if leaf.view_number() == new_decided_view { - consensus_reader - .metrics - .last_synced_block_height - .set(usize::try_from(leaf.height()).unwrap_or(0)); - } - - // Check if there's a new upgrade certificate available. - if let Some(cert) = leaf.upgrade_certificate() { - if leaf.upgrade_certificate() != *existing_upgrade_cert { - if cert.data.decide_by < view_number { - warn!("Failed to decide an upgrade certificate in time. Ignoring."); - } else { - info!("Reached decide on upgrade certificate: {:?}", cert); - res.decided_upgrade_certificate = Some(cert.clone()); - } - } - } - // If the block payload is available for this leaf, include it in - // the leaf chain that we send to the client. - if let Some(encoded_txns) = - consensus_reader.saved_payloads().get(&leaf.view_number()) - { - let payload = - BlockPayload::from_bytes(encoded_txns, leaf.block_header().metadata()); - - leaf.fill_block_payload_unchecked(payload); - } - - // Get the VID share at the leaf's view number, corresponding to our key - // (if one exists) - let vid_share = consensus_reader - .vid_shares() - .get(&leaf.view_number()) - .unwrap_or(&HashMap::new()) - .get(public_key) - .cloned() - .map(|prop| prop.data); - - // Add our data into a new `LeafInfo` - res.leaf_views.push(LeafInfo::new( - leaf.clone(), - Arc::clone(&state), - delta.clone(), - vid_share, - )); - res.leaves_decided.push(leaf.clone()); - if let Some(ref payload) = leaf.block_payload() { - res.included_txns = Some( - payload - .transaction_commitments(leaf.block_header().metadata()) - .into_iter() - .collect::>(), - ); - } - } - true - }, - ) { - debug!("Leaf ascension failed; error={e}"); - } - - res -} - -/// Handle `QuorumProposalValidated` event content and submit a proposal if possible. -#[allow(clippy::too_many_lines)] -#[cfg(not(feature = "dependency-tasks"))] -pub async fn handle_quorum_proposal_validated>( - proposal: &QuorumProposal, - event_stream: Sender>>, - task_state: &mut ConsensusTaskState, -) -> Result<()> { - let view = proposal.view_number(); - #[cfg(not(feature = "dependency-tasks"))] - { - task_state.current_proposal = Some(proposal.clone()); - } - - let res = decide_from_proposal( - proposal, - Arc::clone(&task_state.consensus), - &task_state.decided_upgrade_cert, - &task_state.public_key, - ) - .await; - - if let Some(cert) = res.decided_upgrade_certificate { - task_state.decided_upgrade_cert = Some(cert.clone()); - - let mut decided_certificate_lock = task_state.decided_upgrade_certificate.write().await; - *decided_certificate_lock = Some(cert.clone()); - drop(decided_certificate_lock); - let _ = event_stream - .broadcast(Arc::new(HotShotEvent::UpgradeDecided(cert.clone()))) - .await; - } - - let mut consensus = task_state.consensus.write().await; - if let Some(new_locked_view) = res.new_locked_view_number { - if let Err(e) = consensus.update_locked_view(new_locked_view) { - tracing::trace!("{e:?}"); - } - } - - drop(consensus); - - #[cfg(not(feature = "dependency-tasks"))] - { - let new_view = task_state.current_proposal.clone().unwrap().view_number + 1; - // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = task_state.quorum_membership.leader(new_view) == task_state.public_key - && task_state.consensus.read().await.high_qc().view_number - == task_state.current_proposal.clone().unwrap().view_number; - - if let Some(new_decided_view) = res.new_decided_view_number { - task_state.cancel_tasks(new_decided_view).await; - } - task_state.current_proposal = Some(proposal.clone()); - task_state.spawn_vote_task(view, event_stream.clone()).await; - if should_propose { - debug!( - "Attempting to publish proposal after voting; now in view: {}", - *new_view - ); - if let Err(e) = task_state - .publish_proposal(new_view, event_stream.clone()) - .await - { - debug!("Failed to propose; error = {e:?}"); - }; - } - } - - #[allow(clippy::cast_precision_loss)] - if let Some(new_anchor_view) = res.new_decided_view_number { - let block_size = res.included_txns.map(|set| set.len().try_into().unwrap()); - let decide_sent = broadcast_event( - Event { - view_number: new_anchor_view, - event: EventType::Decide { - leaf_chain: Arc::new(res.leaf_views), - qc: Arc::new(res.new_decide_qc.unwrap()), - block_size, - }, - }, - &task_state.output_event_stream, - ); - let mut consensus = task_state.consensus.write().await; - - let old_anchor_view = consensus.last_decided_view(); - consensus.collect_garbage(old_anchor_view, new_anchor_view); - if let Err(e) = consensus.update_last_decided_view(new_anchor_view) { - tracing::trace!("{e:?}"); - } - consensus - .metrics - .last_decided_time - .set(Utc::now().timestamp().try_into().unwrap()); - consensus.metrics.invalid_qc.set(0); - consensus - .metrics - .last_decided_view - .set(usize::try_from(consensus.last_decided_view().u64()).unwrap()); - let cur_number_of_views_per_decide_event = - *task_state.cur_view - consensus.last_decided_view().u64(); - consensus - .metrics - .number_of_views_per_decide_event - .add_point(cur_number_of_views_per_decide_event as f64); - - debug!( - "Sending Decide for view {:?}", - consensus.last_decided_view() - ); - drop(consensus); - debug!("Decided txns len {:?}", block_size); - decide_sent.await; - broadcast_event( - Arc::new(HotShotEvent::LeafDecided(res.leaves_decided)), - &event_stream, - ) - .await; - debug!("decide send succeeded"); - } - - Ok(()) -} - -/// Private key, latest decided upgrade certificate, committee membership, and event stream, for -/// sending the vote. -#[cfg(not(feature = "dependency-tasks"))] -type VoteInfo = ( - <::SignatureKey as SignatureKey>::PrivateKey, - Option>, - Arc<::Membership>, - Sender>>, -); - -#[allow(clippy::too_many_arguments)] -#[allow(clippy::too_many_lines)] -#[allow(unused_variables)] -#[cfg(not(feature = "dependency-tasks"))] -/// Check if we are able to vote, like whether the proposal is valid, -/// whether we have DAC and VID share, and if so, vote. -pub async fn update_state_and_vote_if_able>( - cur_view: TYPES::Time, - proposal: QuorumProposal, - public_key: TYPES::SignatureKey, - consensus: Arc>>, - storage: Arc>, - quorum_membership: Arc, - instance_state: Arc, - vote_info: VoteInfo, - version: Version, -) -> bool { - use hotshot_types::simple_vote::QuorumVote; - - if !quorum_membership.has_stake(&public_key) { - debug!("We were not chosen for quorum committee on {:?}", cur_view); - return false; - } - - let read_consnesus = consensus.read().await; - // Only vote if you has seen the VID share for this view - let Some(vid_shares) = read_consnesus.vid_shares().get(&proposal.view_number) else { - debug!( - "We have not seen the VID share for this view {:?} yet, so we cannot vote.", - proposal.view_number - ); - return false; - }; - let Some(vid_share) = vid_shares.get(&public_key).cloned() else { - debug!("we have not seen our VID share yet"); - return false; - }; - - if let Some(upgrade_cert) = &vote_info.1 { - if upgrade_cert.upgrading_in(cur_view) - && Some(proposal.block_header.payload_commitment()) - != null_block::commitment(quorum_membership.total_nodes()) - { - info!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(quorum_membership.total_nodes()), Some(proposal.block_header.payload_commitment())); - return false; - } - } - - // Only vote if you have the DA cert - // ED Need to update the view number this is stored under? - let Some(cert) = read_consnesus.saved_da_certs().get(&cur_view).cloned() else { - return false; - }; - drop(read_consnesus); - - let view = cert.view_number; - // TODO: do some of this logic without the vote token check, only do that when voting. - let justify_qc = proposal.justify_qc.clone(); - let mut parent = consensus - .read() - .await - .saved_leaves() - .get(&justify_qc.date().leaf_commit) - .cloned(); - parent = match parent { - Some(p) => Some(p), - None => fetch_proposal( - justify_qc.view_number(), - vote_info.3.clone(), - Arc::clone(&quorum_membership), - Arc::clone(&consensus), - ) - .await - .ok(), - }; - - let read_consnesus = consensus.read().await; - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some(parent) = parent else { - error!( - "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.date().leaf_commit, - proposal.view_number, - ); - return false; - }; - let (Some(parent_state), _) = read_consnesus.state_and_delta(parent.view_number()) else { - warn!("Parent state not found! Consensus internally inconsistent"); - return false; - }; - drop(read_consnesus); - let Ok((validated_state, state_delta)) = parent_state - .validate_and_apply_header( - instance_state.as_ref(), - &parent, - &proposal.block_header.clone(), - vid_share.data.common.clone(), - version, - ) - .await - else { - warn!("Block header doesn't extend the proposal!"); - return false; - }; - - let state = Arc::new(validated_state); - let delta = Arc::new(state_delta); - let parent_commitment = parent.commit(); - - let proposed_leaf = Leaf::from_quorum_proposal(&proposal); - if proposed_leaf.parent_commitment() != parent_commitment { - return false; - } - - // Validate the DAC. - let message = if cert.is_valid_cert(vote_info.2.as_ref()) { - // Validate the block payload commitment for non-genesis DAC. - if cert.date().payload_commit != proposal.block_header.payload_commitment() { - warn!( - "Block payload commitment does not equal da cert payload commitment. View = {}", - *view - ); - return false; - } - if let Ok(vote) = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: proposed_leaf.commit(), - }, - view, - &public_key, - &vote_info.0, - ) { - GeneralConsensusMessage::::Vote(vote) - } else { - error!("Unable to sign quorum vote!"); - return false; - } - } else { - error!( - "Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", - cert, cur_view - ); - return false; - }; - - let mut consensus = consensus.write().await; - if let Err(e) = consensus.update_validated_state_map( - cur_view, - View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), - state: Arc::clone(&state), - delta: Some(Arc::clone(&delta)), - }, - }, - ) { - tracing::trace!("{e:?}"); - } - consensus.update_saved_leaves(proposed_leaf.clone()); - let new_leaves = consensus.saved_leaves().clone(); - let new_state = consensus.validated_state_map().clone(); - drop(consensus); - - if let Err(e) = storage - .write() - .await - .update_undecided_state(new_leaves, new_state) - .await - { - error!("Couldn't store undecided state. Error: {:?}", e); - } - - if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.view_number() + 1 - ); - // Add to the storage that we have received the VID disperse for a specific view - if let Err(e) = storage.write().await.append_vid(&vid_share).await { - warn!( - "Failed to store VID Disperse Proposal with error {:?}, aborting vote", - e - ); - return false; - } - broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &vote_info.3).await; - return true; - } - debug!( - "Received VID share, but couldn't find DAC cert for view {:?}", - *proposal.view_number(), - ); - false -} diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 43e2fb33bc..5f33243970 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -1,7 +1,10 @@ +#![cfg(not(feature = "dependency-tasks"))] + use std::{collections::BTreeMap, sync::Arc}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; @@ -10,50 +13,40 @@ use futures::future::join_all; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::{CommitmentAndMetadata, Consensus}, - data::{QuorumProposal, ViewChangeEvidence}, + data::{QuorumProposal, VidDisperseShare, ViewChangeEvidence}, event::{Event, EventType}, + message::Proposal, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, traits::{ election::Membership, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, + storage::Storage, }, - vote::HasViewNumber, + vid::vid_scheme, + vote::{Certificate, HasViewNumber}, }; +use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, info, instrument, warn}; use vbs::version::Version; -#[cfg(not(feature = "dependency-tasks"))] -use { - crate::consensus::helpers::{ + +use crate::{ + consensus::handlers::{ handle_quorum_proposal_recv, handle_quorum_proposal_validated, publish_proposal_if_able, update_state_and_vote_if_able, }, - async_compatibility_layer::art::async_spawn, - hotshot_types::data::VidDisperseShare, - hotshot_types::message::Proposal, - hotshot_types::vid::vid_scheme, - hotshot_types::{traits::storage::Storage, vote::Certificate}, - jf_vid::VidScheme, - tracing::info, -}; - -use crate::{ - consensus::view_change::{update_view, DONT_SEND_VIEW_CHANGE_EVENT}, events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, + helpers::{broadcast_event, cancel_task, update_view, DONT_SEND_VIEW_CHANGE_EVENT}, vote_collection::{ create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, }, }; /// Helper functions to handle proposal-related functionality. -pub(crate) mod helpers; - -/// Handles view-change related functionality. -pub(crate) mod view_change; +pub(crate) mod handlers; /// Alias for Optional type for Vote Collectors type VoteCollectorOption = Option>; @@ -158,7 +151,6 @@ impl> ConsensusTaskState } /// Validate the VID disperse is correctly signed and has the correct share. - #[cfg(not(feature = "dependency-tasks"))] fn validate_disperse(&self, disperse: &Proposal>) -> bool { let view = disperse.data.view_number(); let payload_commitment = disperse.data.payload_commitment; @@ -203,7 +195,6 @@ impl> ConsensusTaskState true } - #[cfg(not(feature = "dependency-tasks"))] /// Publishes a proposal async fn publish_proposal( &mut self, @@ -237,7 +228,6 @@ impl> ConsensusTaskState /// Spawn a vote task for the given view. Will try to vote /// and emit a `QuorumVoteSend` event we should vote on the current proposal - #[cfg(not(feature = "dependency-tasks"))] async fn spawn_vote_task( &mut self, view: TYPES::Time, @@ -282,10 +272,8 @@ impl> ConsensusTaskState event: Arc>, event_stream: Sender>>, ) { - #[cfg(not(feature = "dependency-tasks"))] let version = *self.version.read().await; match event.as_ref() { - #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QuorumProposalRecv(proposal, sender) => { debug!("proposal recv view: {:?}", proposal.data.view_number()); match handle_quorum_proposal_recv( @@ -306,7 +294,6 @@ impl> ConsensusTaskState Err(e) => debug!("Failed to propose {e:#}"), } } - #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QuorumProposalValidated(proposal, _) => { debug!("proposal validated view: {:?}", proposal.view_number()); if let Err(e) = @@ -394,7 +381,6 @@ impl> ConsensusTaskState } } } - #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QcFormed(cert) => match cert { either::Right(qc) => { self.proposal_cert = Some(ViewChangeEvidence::Timeout(qc.clone())); @@ -446,7 +432,6 @@ impl> ConsensusTaskState self.formed_upgrade_certificate = Some(cert.clone()); } } - #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::DaCertificateRecv(cert) => { debug!("DAC Received for view {}!", *cert.view_number); let view = cert.view_number; @@ -463,7 +448,6 @@ impl> ConsensusTaskState } self.spawn_vote_task(view, event_stream).await; } - #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::VidShareRecv(disperse) => { let view = disperse.data.view_number(); @@ -607,7 +591,6 @@ impl> ConsensusTaskState consensus.metrics.number_of_timeouts_as_leader.add(1); } } - #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, @@ -661,7 +644,6 @@ impl> ConsensusTaskState } } } - #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { if !certificate.is_valid_cert(self.quorum_membership.as_ref()) { error!( @@ -686,7 +668,6 @@ impl> ConsensusTaskState }; } } - #[cfg(not(feature = "dependency-tasks"))] HotShotEvent::QuorumVoteSend(vote) => { let Some(proposal) = self.current_proposal.clone() else { return; diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs deleted file mode 100644 index edc25e1965..0000000000 --- a/task-impls/src/consensus/view_change.rs +++ /dev/null @@ -1,137 +0,0 @@ -use core::time::Duration; -use std::sync::Arc; - -use anyhow::{ensure, Result}; -use async_broadcast::Sender; -use async_compatibility_layer::art::{async_sleep, async_spawn}; -use async_lock::{RwLock, RwLockUpgradableReadGuard}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; -use chrono::Utc; -use hotshot_types::{ - consensus::Consensus, - event::{Event, EventType}, - traits::node_implementation::{ConsensusTime, NodeType}, -}; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; -use tracing::{debug, error}; - -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; - -/// Constant which tells [`update_view`] to send a view change event when called. -pub(crate) const SEND_VIEW_CHANGE_EVENT: bool = true; - -/// Constant which tells [`update_view`] to not send a view change event when called. -pub(crate) const DONT_SEND_VIEW_CHANGE_EVENT: bool = false; - -/// Update the view if it actually changed, takes a mutable reference to the `cur_view` and the -/// `timeout_task` which are updated during the operation of the function. -/// -/// # Errors -/// Returns an [`anyhow::Error`] when the new view is not greater than the current view. -/// TODO: Remove args when we merge dependency tasks. -#[allow(clippy::too_many_arguments)] -pub(crate) async fn update_view( - new_view: TYPES::Time, - event_stream: &Sender>>, - timeout: u64, - consensus: Arc>>, - cur_view: &mut TYPES::Time, - cur_view_time: &mut i64, - timeout_task: &mut JoinHandle<()>, - output_event_stream: &Sender>, - send_view_change_event: bool, - is_old_view_leader: bool, -) -> Result<()> { - ensure!( - new_view > *cur_view, - "New view is not greater than our current view" - ); - - let old_view = *cur_view; - - debug!("Updating view from {} to {}", *old_view, *new_view); - - if *old_view / 100 != *new_view / 100 { - // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): - // switch to info! when INFO logs become less cluttered - error!("Progress: entered view {:>6}", *new_view); - } - - *cur_view = new_view; - - // The next view is just the current view + 1 - let next_view = *cur_view + 1; - - if send_view_change_event { - futures::join! { - broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream), - broadcast_event( - Event { - view_number: old_view, - event: EventType::ViewFinished { - view_number: old_view, - }, - }, - output_event_stream, - ) - }; - } - - // Spawn a timeout task if we did actually update view - let new_timeout_task = async_spawn({ - let stream = event_stream.clone(); - // Nuance: We timeout on the view + 1 here because that means that we have - // not seen evidence to transition to this new view - let view_number = next_view; - let timeout = Duration::from_millis(timeout); - async move { - async_sleep(timeout).await; - broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), - &stream, - ) - .await; - } - }); - - // cancel the old timeout task - cancel_task(std::mem::replace(timeout_task, new_timeout_task)).await; - - let consensus = consensus.upgradable_read().await; - consensus - .metrics - .current_view - .set(usize::try_from(cur_view.u64()).unwrap()); - let new_view_time = Utc::now().timestamp(); - if is_old_view_leader { - #[allow(clippy::cast_precision_loss)] - consensus - .metrics - .view_duration_as_leader - .add_point((new_view_time - *cur_view_time) as f64); - } - *cur_view_time = new_view_time; - - // Do the comparison before the subtraction to avoid potential overflow, since - // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(cur_view.u64()).unwrap() - > usize::try_from(consensus.last_decided_view().u64()).unwrap() - { - consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(cur_view.u64()).unwrap() - - usize::try_from(consensus.last_decided_view().u64()).unwrap(), - ); - } - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; - if let Err(e) = consensus.update_view(new_view) { - tracing::trace!("{e:?}"); - } - tracing::trace!("View updated successfully"); - - Ok(()) -} diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 94cf5fed59..4094f37128 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -1,8 +1,807 @@ -use async_broadcast::{SendError, Sender}; +use core::time::Duration; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use anyhow::{bail, ensure, Context, Result}; +use async_broadcast::{broadcast, SendError, Sender}; +use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; +use async_lock::{RwLock, RwLockUpgradableReadGuard}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; +use chrono::Utc; +use committable::{Commitment, Committable}; +use hotshot_types::{ + consensus::Consensus, + data::{Leaf, QuorumProposal, ViewChangeEvidence}, + event::{Event, EventType, LeafInfo}, + message::Proposal, + simple_certificate::{QuorumCertificate, UpgradeCertificate}, + traits::{ + block_contents::BlockHeader, + election::Membership, + node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, + BlockPayload, ValidatedState, + }, + utils::{Terminator, View, ViewInner}, + vote::{Certificate, HasViewNumber}, +}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; +use tracing::{debug, error, info, warn}; + +use crate::{ + events::{HotShotEvent, ProposalMissing}, + request::REQUEST_TIMEOUT, +}; + +/// Trigger a request to the network for a proposal for a view and wait for the response +pub(crate) async fn fetch_proposal( + view: TYPES::Time, + event_stream: Sender>>, + quorum_membership: Arc, + consensus: Arc>>, +) -> Result> { + tracing::debug!("Fetching proposal for view {:?}", view); + let (tx, mut rx) = broadcast(1); + let event = ProposalMissing { + view, + response_chan: tx, + }; + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalRequest(event)), + &event_stream, + ) + .await; + let Ok(Ok(Some(proposal))) = async_timeout(REQUEST_TIMEOUT, rx.recv_direct()).await else { + bail!("Request for proposal failed"); + }; + let view_number = proposal.data.view_number(); + let justify_qc = proposal.data.justify_qc.clone(); + + if !justify_qc.is_valid_cert(quorum_membership.as_ref()) { + bail!("Invalid justify_qc in proposal for view {}", *view_number); + } + let mut consensus_write = consensus.write().await; + let leaf = Leaf::from_quorum_proposal(&proposal.data); + let state = Arc::new( + >::from_header(&proposal.data.block_header), + ); + + let view = View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state, + delta: None, + }, + }; + if let Err(e) = consensus_write.update_validated_state_map(view_number, view.clone()) { + tracing::trace!("{e:?}"); + } + + consensus_write.update_saved_leaves(leaf.clone()); + broadcast_event( + HotShotEvent::ValidatedStateUpdated(view_number, view).into(), + &event_stream, + ) + .await; + Ok(leaf) +} + +/// Helper type to give names and to the output values of the leaf chain traversal operation. +#[derive(Debug)] +pub struct LeafChainTraversalOutcome { + /// The new locked view obtained from a 2 chain starting from the proposal's parent. + pub new_locked_view_number: Option, + + /// The new decided view obtained from a 3 chain starting from the proposal's parent. + pub new_decided_view_number: Option, + + /// The qc for the decided chain. + pub new_decide_qc: Option>, + + /// The decided leaves with corresponding validated state and VID info. + pub leaf_views: Vec>, + + /// The decided leaves. + pub leaves_decided: Vec>, + + /// The transactions in the block payload for each leaf. + pub included_txns: Option::Transaction>>>, + + /// The most recent upgrade certificate from one of the leaves. + pub decided_upgrade_certificate: Option>, +} + +/// We need Default to be implemented because the leaf ascension has very few failure branches, +/// and when they *do* happen, we still return intermediate states. Default makes the burden +/// of filling values easier. +impl Default for LeafChainTraversalOutcome { + /// The default method for this type is to set all of the returned values to `None`. + fn default() -> Self { + Self { + new_locked_view_number: None, + new_decided_view_number: None, + new_decide_qc: None, + leaf_views: Vec::new(), + leaves_decided: Vec::new(), + included_txns: None, + decided_upgrade_certificate: None, + } + } +} + +/// Ascends the leaf chain by traversing through the parent commitments of the proposal. We begin +/// by obtaining the parent view, and if we are in a chain (i.e. the next view from the parent is +/// one view newer), then we begin attempting to form the chain. This is a direct impl from +/// [HotStuff](https://arxiv.org/pdf/1803.05069) section 5: +/// +/// > When a node b* carries a QC that refers to a direct parent, i.e., b*.justify.node = b*.parent, +/// we say that it forms a One-Chain. Denote by b'' = b*.justify.node. Node b* forms a Two-Chain, +/// if in addition to forming a One-Chain, b''.justify.node = b''.parent. +/// It forms a Three-Chain, if b'' forms a Two-Chain. +/// +/// We follow this exact logic to determine if we are able to reach a commit and a decide. A commit +/// is reached when we have a two chain, and a decide is reached when we have a three chain. +/// +/// # Example +/// Suppose we have a decide for view 1, and we then move on to get undecided views 2, 3, and 4. Further, +/// suppose that our *next* proposal is for view 5, but this leader did not see info for view 4, so the +/// justify qc of the proposal points to view 3. This is fine, and the undecided chain now becomes +/// 2-3-5. +/// +/// Assuming we continue with honest leaders, we then eventually could get a chain like: 2-3-5-6-7-8. This +/// will prompt a decide event to occur (this code), where the `proposal` is for view 8. Now, since the +/// lowest value in the 3-chain here would be 5 (excluding 8 since we only walk the parents), we begin at +/// the first link in the chain, and walk back through all undecided views, making our new anchor view 5, +/// and out new locked view will be 6. +/// +/// Upon receipt then of a proposal for view 9, assuming it is valid, this entire process will repeat, and +/// the anchor view will be set to view 6, with the locked view as view 7. +pub async fn decide_from_proposal( + proposal: &QuorumProposal, + consensus: Arc>>, + existing_upgrade_cert: &Option>, + public_key: &TYPES::SignatureKey, +) -> LeafChainTraversalOutcome { + let consensus_reader = consensus.read().await; + let view_number = proposal.view_number(); + let parent_view_number = proposal.justify_qc.view_number(); + let old_anchor_view = consensus_reader.last_decided_view(); + + let mut last_view_number_visited = view_number; + let mut current_chain_length = 0usize; + let mut res = LeafChainTraversalOutcome::default(); + + if let Err(e) = consensus_reader.visit_leaf_ancestors( + parent_view_number, + Terminator::Exclusive(old_anchor_view), + true, + |leaf, state, delta| { + // This is the core paper logic. We're implementing the chain in chained hotstuff. + if res.new_decided_view_number.is_none() { + // If the last view number is the child of the leaf we've moved to... + if last_view_number_visited == leaf.view_number() + 1 { + last_view_number_visited = leaf.view_number(); + + // The chain grows by one + current_chain_length += 1; + + // We emit a locked view when the chain length is 2 + if current_chain_length == 2 { + res.new_locked_view_number = Some(leaf.view_number()); + // The next leaf in the chain, if there is one, is decided, so this + // leaf's justify_qc would become the QC for the decided chain. + res.new_decide_qc = Some(leaf.justify_qc().clone()); + } else if current_chain_length == 3 { + // And we decide when the chain length is 3. + res.new_decided_view_number = Some(leaf.view_number()); + } + } else { + // There isn't a new chain extension available, so we signal to the callback + // owner that we can exit for now. + return false; + } + } + + // Now, if we *have* reached a decide, we need to do some state updates. + if let Some(new_decided_view) = res.new_decided_view_number { + // First, get a mutable reference to the provided leaf. + let mut leaf = leaf.clone(); + + // Update the metrics + if leaf.view_number() == new_decided_view { + consensus_reader + .metrics + .last_synced_block_height + .set(usize::try_from(leaf.height()).unwrap_or(0)); + } + + // Check if there's a new upgrade certificate available. + if let Some(cert) = leaf.upgrade_certificate() { + if leaf.upgrade_certificate() != *existing_upgrade_cert { + if cert.data.decide_by < view_number { + warn!("Failed to decide an upgrade certificate in time. Ignoring."); + } else { + info!("Reached decide on upgrade certificate: {:?}", cert); + res.decided_upgrade_certificate = Some(cert.clone()); + } + } + } + // If the block payload is available for this leaf, include it in + // the leaf chain that we send to the client. + if let Some(encoded_txns) = + consensus_reader.saved_payloads().get(&leaf.view_number()) + { + let payload = + BlockPayload::from_bytes(encoded_txns, leaf.block_header().metadata()); + + leaf.fill_block_payload_unchecked(payload); + } + + // Get the VID share at the leaf's view number, corresponding to our key + // (if one exists) + let vid_share = consensus_reader + .vid_shares() + .get(&leaf.view_number()) + .unwrap_or(&HashMap::new()) + .get(public_key) + .cloned() + .map(|prop| prop.data); + + // Add our data into a new `LeafInfo` + res.leaf_views.push(LeafInfo::new( + leaf.clone(), + Arc::clone(&state), + delta.clone(), + vid_share, + )); + res.leaves_decided.push(leaf.clone()); + if let Some(ref payload) = leaf.block_payload() { + res.included_txns = Some( + payload + .transaction_commitments(leaf.block_header().metadata()) + .into_iter() + .collect::>(), + ); + } + } + true + }, + ) { + debug!("Leaf ascension failed; error={e}"); + } + + res +} + +/// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. +pub(crate) async fn parent_leaf_and_state( + next_proposal_view_number: TYPES::Time, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + consensus: Arc>>, +) -> Result<(Leaf, Arc<::ValidatedState>)> { + ensure!( + quorum_membership.leader(next_proposal_view_number) == public_key, + "Somehow we formed a QC but are not the leader for the next view {next_proposal_view_number:?}", + ); + + let consensus_reader = consensus.read().await; + let parent_view_number = consensus_reader.high_qc().view_number(); + let parent_view = consensus_reader.validated_state_map().get(&parent_view_number).context( + format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", *parent_view_number) + )?; + + // Leaf hash in view inner does not match high qc hash - Why? + let (leaf_commitment, state) = parent_view.leaf_and_state().context( + format!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") + )?; + + if leaf_commitment != consensus_reader.high_qc().date().leaf_commit { + // NOTE: This happens on the genesis block + debug!( + "They don't equal: {:?} {:?}", + leaf_commitment, + consensus_reader.high_qc().date().leaf_commit + ); + } + + let leaf = consensus_reader + .saved_leaves() + .get(&leaf_commitment) + .context("Failed to find high QC of parent")?; + + let reached_decided = leaf.view_number() == consensus_reader.last_decided_view(); + let parent_leaf = leaf.clone(); + let original_parent_hash = parent_leaf.commit(); + let mut next_parent_hash = original_parent_hash; + + // Walk back until we find a decide + if !reached_decided { + debug!("We have not reached decide"); + while let Some(next_parent_leaf) = consensus_reader.saved_leaves().get(&next_parent_hash) { + if next_parent_leaf.view_number() <= consensus_reader.last_decided_view() { + break; + } + next_parent_hash = next_parent_leaf.parent_commitment(); + } + // TODO do some sort of sanity check on the view number that it matches decided + } + + Ok((parent_leaf, Arc::clone(state))) +} + +// TODO: Replace this function with `validate_proposal_safety_and_liveness` after the following +// issue is done: +// https://github.com/EspressoSystems/HotShot/issues/3357. +/// Validate the state and safety and liveness of a proposal then emit +/// a `QuorumProposalValidated` event. +/// +/// # Errors +/// If any validation or state update fails. +/// +/// TODO - This should just take the QuorumProposalRecv task state after +/// we merge the dependency tasks. +#[allow(clippy::too_many_arguments)] +#[allow(clippy::too_many_lines)] +#[cfg(not(feature = "dependency-tasks"))] +pub async fn temp_validate_proposal_safety_and_liveness( + proposal: Proposal>, + parent_leaf: Leaf, + consensus: Arc>>, + decided_upgrade_certificate: Option>, + quorum_membership: Arc, + view_leader_key: TYPES::SignatureKey, + event_stream: Sender>>, + sender: TYPES::SignatureKey, + event_sender: Sender>, +) -> Result<()> { + let view_number = proposal.data.view_number(); + + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + ensure!( + proposed_leaf.parent_commitment() == parent_leaf.commit(), + "Proposed leaf does not extend the parent leaf." + ); + + let state = Arc::new( + >::from_header(&proposal.data.block_header), + ); + let view = View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state, + delta: None, // May be updated to `Some` in the vote task. + }, + }; + + if let Err(e) = consensus + .write() + .await + .update_validated_state_map(view_number, view.clone()) + { + tracing::trace!("{e:?}"); + } + consensus + .write() + .await + .update_saved_leaves(proposed_leaf.clone()); + + // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. + broadcast_event( + Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), + &event_stream, + ) + .await; + + // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment + // + // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: + // + // proposal.validate_signature(&quorum_membership)?; + // + // in a future PR. + ensure!( + view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), + "Could not verify proposal." + ); + + UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; + + // Validate that the upgrade certificate is re-attached, if we saw one on the parent + proposed_leaf.temp_extends_upgrade(&parent_leaf, &decided_upgrade_certificate)?; + + let justify_qc = proposal.data.justify_qc.clone(); + // Create a positive vote if either liveness or safety check + // passes. + + // Liveness check. + let read_consensus = consensus.read().await; + let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = read_consensus.visit_leaf_ancestors( + justify_qc.view_number(), + Terminator::Inclusive(read_consensus.locked_view()), + false, + |leaf, _, _| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.view_number() != read_consensus.locked_view() + }, + ); + let safety_check = outcome.is_ok(); + + ensure!(safety_check || liveness_check, { + if let Err(e) = outcome { + broadcast_event( + Event { + view_number, + event: EventType::Error { error: Arc::new(e) }, + }, + &event_sender, + ) + .await; + } + + format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) + }); + + // We accept the proposal, notify the application layer + + broadcast_event( + Event { + view_number, + event: EventType::QuorumProposal { + proposal: proposal.clone(), + sender, + }, + }, + &event_sender, + ) + .await; + // Notify other tasks + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalValidated( + proposal.data.clone(), + parent_leaf, + )), + &event_stream, + ) + .await; + + Ok(()) +} + +/// Validate the state and safety and liveness of a proposal then emit +/// a `QuorumProposalValidated` event. +/// +/// +/// # Errors +/// If any validation or state update fails. +/// TODO - This should just take the QuorumProposalRecv task state after +/// we merge the dependency tasks. +#[allow(clippy::too_many_arguments)] +#[allow(clippy::too_many_lines)] +pub async fn validate_proposal_safety_and_liveness( + proposal: Proposal>, + parent_leaf: Leaf, + consensus: Arc>>, + decided_upgrade_certificate: Arc>>>, + quorum_membership: Arc, + view_leader_key: TYPES::SignatureKey, + event_stream: Sender>>, + sender: TYPES::SignatureKey, + event_sender: Sender>, +) -> Result<()> { + let view_number = proposal.data.view_number(); + + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + ensure!( + proposed_leaf.parent_commitment() == parent_leaf.commit(), + "Proposed leaf does not extend the parent leaf." + ); + + let state = Arc::new( + >::from_header(&proposal.data.block_header), + ); + let view = View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state, + delta: None, // May be updated to `Some` in the vote task. + }, + }; + + if let Err(e) = consensus + .write() + .await + .update_validated_state_map(view_number, view.clone()) + { + tracing::trace!("{e:?}"); + } + consensus + .write() + .await + .update_saved_leaves(proposed_leaf.clone()); + + // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. + broadcast_event( + Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), + &event_stream, + ) + .await; + + // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment + // + // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: + // + // proposal.validate_signature(&quorum_membership)?; + // + // in a future PR. + ensure!( + view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), + "Could not verify proposal." + ); + + UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; + + // Validate that the upgrade certificate is re-attached, if we saw one on the parent + proposed_leaf + .extends_upgrade(&parent_leaf, &decided_upgrade_certificate) + .await?; + + let justify_qc = proposal.data.justify_qc.clone(); + // Create a positive vote if either liveness or safety check + // passes. + + // Liveness check. + let read_consensus = consensus.read().await; + let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = read_consensus.visit_leaf_ancestors( + justify_qc.view_number(), + Terminator::Inclusive(read_consensus.locked_view()), + false, + |leaf, _, _| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.view_number() != read_consensus.locked_view() + }, + ); + let safety_check = outcome.is_ok(); + + ensure!(safety_check || liveness_check, { + if let Err(e) = outcome { + broadcast_event( + Event { + view_number, + event: EventType::Error { error: Arc::new(e) }, + }, + &event_sender, + ) + .await; + } + + format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) + }); + + // We accept the proposal, notify the application layer + + broadcast_event( + Event { + view_number, + event: EventType::QuorumProposal { + proposal: proposal.clone(), + sender, + }, + }, + &event_sender, + ) + .await; + // Notify other tasks + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalValidated( + proposal.data.clone(), + parent_leaf, + )), + &event_stream, + ) + .await; + + Ok(()) +} + +/// Validates, from a given `proposal` that the view that it is being submitted for is valid when +/// compared to `cur_view` which is the highest proposed view (so far) for the caller. If the proposal +/// is for a view that's later than expected, that the proposal includes a timeout or view sync certificate. +/// +/// # Errors +/// If any validation or view number check fails. +pub fn validate_proposal_view_and_certs( + proposal: &Proposal>, + sender: &TYPES::SignatureKey, + cur_view: TYPES::Time, + quorum_membership: &Arc, + timeout_membership: &Arc, +) -> Result<()> { + let view = proposal.data.view_number(); + ensure!( + view >= cur_view, + "Proposal is from an older view {:?}", + proposal.data.clone() + ); + + let view_leader_key = quorum_membership.leader(view); + ensure!( + view_leader_key == *sender, + "Leader key does not match key in proposal" + ); + + // Verify a timeout certificate OR a view sync certificate exists and is valid. + if proposal.data.justify_qc.view_number() != view - 1 { + let received_proposal_cert = + proposal.data.proposal_certificate.clone().context(format!( + "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", + *view + ))?; + + match received_proposal_cert { + ViewChangeEvidence::Timeout(timeout_cert) => { + ensure!( + timeout_cert.date().view == view - 1, + "Timeout certificate for view {} was not for the immediately preceding view", + *view + ); + ensure!( + timeout_cert.is_valid_cert(timeout_membership.as_ref()), + "Timeout certificate for view {} was invalid", + *view + ); + } + ViewChangeEvidence::ViewSync(view_sync_cert) => { + ensure!( + view_sync_cert.view_number == view, + "View sync cert view number {:?} does not match proposal view number {:?}", + view_sync_cert.view_number, + view + ); + + // View sync certs must also be valid. + ensure!( + view_sync_cert.is_valid_cert(quorum_membership.as_ref()), + "Invalid view sync finalize cert provided" + ); + } + } + } + + // Validate the upgrade certificate -- this is just a signature validation. + // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. + UpgradeCertificate::validate(&proposal.data.upgrade_certificate, quorum_membership)?; + + Ok(()) +} + +/// Constant which tells [`update_view`] to send a view change event when called. +pub(crate) const SEND_VIEW_CHANGE_EVENT: bool = true; + +/// Constant which tells `update_view` to not send a view change event when called. +pub const DONT_SEND_VIEW_CHANGE_EVENT: bool = false; + +/// Update the view if it actually changed, takes a mutable reference to the `cur_view` and the +/// `timeout_task` which are updated during the operation of the function. +/// +/// # Errors +/// Returns an [`anyhow::Error`] when the new view is not greater than the current view. +/// TODO: Remove args when we merge dependency tasks. +#[allow(clippy::too_many_arguments)] +pub(crate) async fn update_view( + new_view: TYPES::Time, + event_stream: &Sender>>, + timeout: u64, + consensus: Arc>>, + cur_view: &mut TYPES::Time, + cur_view_time: &mut i64, + timeout_task: &mut JoinHandle<()>, + output_event_stream: &Sender>, + send_view_change_event: bool, + is_old_view_leader: bool, +) -> Result<()> { + ensure!( + new_view > *cur_view, + "New view is not greater than our current view" + ); + + let old_view = *cur_view; + + debug!("Updating view from {} to {}", *old_view, *new_view); + + if *old_view / 100 != *new_view / 100 { + // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): + // switch to info! when INFO logs become less cluttered + error!("Progress: entered view {:>6}", *new_view); + } + + *cur_view = new_view; + + // The next view is just the current view + 1 + let next_view = *cur_view + 1; + + if send_view_change_event { + futures::join! { + broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream), + broadcast_event( + Event { + view_number: old_view, + event: EventType::ViewFinished { + view_number: old_view, + }, + }, + output_event_stream, + ) + }; + } + + // Spawn a timeout task if we did actually update view + let new_timeout_task = async_spawn({ + let stream = event_stream.clone(); + // Nuance: We timeout on the view + 1 here because that means that we have + // not seen evidence to transition to this new view + let view_number = next_view; + let timeout = Duration::from_millis(timeout); + async move { + async_sleep(timeout).await; + broadcast_event( + Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), + &stream, + ) + .await; + } + }); + + // cancel the old timeout task + cancel_task(std::mem::replace(timeout_task, new_timeout_task)).await; + + let consensus = consensus.upgradable_read().await; + consensus + .metrics + .current_view + .set(usize::try_from(cur_view.u64()).unwrap()); + let new_view_time = Utc::now().timestamp(); + if is_old_view_leader { + #[allow(clippy::cast_precision_loss)] + consensus + .metrics + .view_duration_as_leader + .add_point((new_view_time - *cur_view_time) as f64); + } + *cur_view_time = new_view_time; + + // Do the comparison before the subtraction to avoid potential overflow, since + // `last_decided_view` may be greater than `cur_view` if the node is catching up. + if usize::try_from(cur_view.u64()).unwrap() + > usize::try_from(consensus.last_decided_view().u64()).unwrap() + { + consensus.metrics.number_of_views_since_last_decide.set( + usize::try_from(cur_view.u64()).unwrap() + - usize::try_from(consensus.last_decided_view().u64()).unwrap(), + ); + } + let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + if let Err(e) = consensus.update_view(new_view) { + tracing::trace!("{e:?}"); + } + tracing::trace!("View updated successfully"); + + Ok(()) +} /// Cancel a task pub async fn cancel_task(task: JoinHandle) { diff --git a/task-impls/src/quorum_proposal/dependency_handle.rs b/task-impls/src/quorum_proposal/handlers.rs similarity index 99% rename from task-impls/src/quorum_proposal/dependency_handle.rs rename to task-impls/src/quorum_proposal/handlers.rs index fcf5975141..a927c5292a 100644 --- a/task-impls/src/quorum_proposal/dependency_handle.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -1,6 +1,8 @@ //! This module holds the dependency task for the QuorumProposalTask. It is spawned whenever an event that could //! initiate a proposal occurs. +#![cfg(feature = "dependency-tasks")] + use std::{marker::PhantomData, sync::Arc, time::Duration}; use anyhow::{ensure, Context, Result}; @@ -25,9 +27,8 @@ use tracing::{debug, error}; use vbs::version::Version; use crate::{ - consensus::helpers::{fetch_proposal, parent_leaf_and_state}, events::HotShotEvent, - helpers::broadcast_event, + helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, }; /// Proposal dependency types. These types represent events that precipitate a proposal. diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 2584c16c87..456267eaa1 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -1,3 +1,5 @@ +#![cfg(feature = "dependency-tasks")] + use std::{collections::HashMap, sync::Arc}; use anyhow::Result; @@ -29,13 +31,13 @@ use tokio::task::JoinHandle; use tracing::{debug, instrument, warn}; use vbs::version::Version; -use self::dependency_handle::{ProposalDependency, ProposalDependencyHandle}; +use self::handlers::{ProposalDependency, ProposalDependencyHandle}; use crate::{ events::HotShotEvent, helpers::{broadcast_event, cancel_task}, }; -mod dependency_handle; +mod handlers; /// The state for the quorum proposal task. pub struct QuorumProposalTaskState> { diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 44dc4ca8b0..ffcf032a54 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -1,4 +1,5 @@ #![allow(dead_code)] +#![cfg(feature = "dependency-tasks")] use std::sync::Arc; @@ -23,14 +24,11 @@ use tracing::{debug, error, warn}; use super::QuorumProposalRecvTaskState; use crate::{ - consensus::{ - helpers::{ - fetch_proposal, validate_proposal_safety_and_liveness, validate_proposal_view_and_certs, - }, - view_change::{update_view, SEND_VIEW_CHANGE_EVENT}, - }, events::HotShotEvent, - helpers::broadcast_event, + helpers::{ + broadcast_event, fetch_proposal, update_view, validate_proposal_safety_and_liveness, + validate_proposal_view_and_certs, SEND_VIEW_CHANGE_EVENT, + }, }; /// Whether the proposal contained in `QuorumProposalRecv` is fully validated or only the liveness diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 3b07cf4935..ef06c848e2 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -1,9 +1,10 @@ #![allow(unused_imports)] +#![cfg(feature = "dependency-tasks")] use std::{collections::BTreeMap, sync::Arc}; -use anyhow::Result; -use async_broadcast::{Receiver, Sender}; +use anyhow::{bail, Result}; +use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; @@ -12,7 +13,7 @@ use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, - data::ViewChangeEvidence, + data::{Leaf, ViewChangeEvidence}, event::Event, simple_certificate::UpgradeCertificate, traits::{ @@ -23,14 +24,13 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, info, instrument, warn}; use vbs::version::Version; use self::handlers::handle_quorum_proposal_recv; use crate::{ - consensus::helpers::parent_leaf_and_state, - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, + events::{HotShotEvent, ProposalMissing}, + helpers::{broadcast_event, cancel_task, parent_leaf_and_state}, quorum_proposal_recv::handlers::QuorumProposalValidity, }; diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 70577f4523..6172d3a907 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -1,3 +1,5 @@ +#![cfg(feature = "dependency-tasks")] + use std::sync::Arc; use anyhow::Result; @@ -13,9 +15,8 @@ use tracing::debug; use super::QuorumVoteTaskState; use crate::{ - consensus::helpers::{decide_from_proposal, LeafChainTraversalOutcome}, events::HotShotEvent, - helpers::broadcast_event, + helpers::{broadcast_event, decide_from_proposal, LeafChainTraversalOutcome}, }; /// Handles the `QuorumProposalValidated` event. diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index d232db1153..9969a04a4a 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -1,3 +1,5 @@ +#![cfg(feature = "dependency-tasks")] + use std::{collections::HashMap, sync::Arc}; use anyhow::{bail, ensure, Context, Result}; @@ -38,9 +40,8 @@ use tracing::{debug, error, info, instrument, trace, warn}; use vbs::version::Version; use crate::{ - consensus::helpers::fetch_proposal, events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, + helpers::{broadcast_event, cancel_task, fetch_proposal}, quorum_vote::handlers::handle_quorum_proposal_validated, }; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 5db22ca8fc..84fb9bcb09 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -28,7 +28,6 @@ use hotshot_types::{ utils::ViewInner, vid::VidCommitment, }; - use tracing::{debug, error, instrument, warn}; use crate::{ diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index a90c4cc566..2f58bd1797 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -1,5 +1,6 @@ // TODO: Remove after integration #![allow(unused_imports)] +#![cfg(feature = "dependency-tasks")] use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 625fb6aa05..4c29165aca 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -1,4 +1,6 @@ #![allow(clippy::panic)] +#![cfg(feature = "dependency-tasks")] + use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs index 11027dda70..8183e37fed 100644 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -1,6 +1,7 @@ #![cfg(not(feature = "dependency-tasks"))] // TODO: Remove after integration of dependency-tasks +#![cfg(not(feature = "dependency-tasks"))] #![allow(unused_imports)] use std::time::Duration; diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index ffdc025753..2a109eb0ed 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -16,7 +16,7 @@ use sha2::Digest; use hotshot_macros::{test_scripts, run_test}; use hotshot_task_impls::{ quorum_proposal::QuorumProposalTaskState, - consensus::ConsensusTaskState, consensus2::Consensus2TaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState + consensus2::Consensus2TaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState }; use hotshot_testing::{ helpers::{build_fake_view_with_leaf, vid_share,build_payload_commitment}, diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index f22a3f0b52..9859b8f9fd 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -14,7 +14,7 @@ use hotshot_example_types::{ }; use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ - consensus::ConsensusTaskState, consensus2::Consensus2TaskState, events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState, upgrade::UpgradeTaskState + consensus2::Consensus2TaskState, events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState, upgrade::UpgradeTaskState }; use hotshot_testing::{ helpers::{build_fake_view_with_leaf, vid_share}, From 4b6296b45798ee7039761b26a62f42bd17a2a58b Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Mon, 1 Jul 2024 18:03:16 +0200 Subject: [PATCH 1110/1393] Add OuterConsensus for debugging locks (#3287) * Add OuterConsensus for debugging locks * Fixes after merge * Improve logging around Consensus * Fix formatting * Fix parameters number * Add id and view to some spans * Fix lints * Fix types for dependency-tasks feature --- hotshot/src/lib.rs | 17 +- hotshot/src/tasks/mod.rs | 1 + hotshot/src/tasks/task_state.rs | 27 +- hotshot/src/types/handle.rs | 2 + task-impls/src/consensus/handlers.rs | 56 +- task-impls/src/consensus/helpers.rs | 1357 +++++++++++++++++ task-impls/src/consensus/mod.rs | 17 +- task-impls/src/consensus/view_change.rs | 137 ++ task-impls/src/consensus2/handlers.rs | 4 +- task-impls/src/consensus2/mod.rs | 6 +- task-impls/src/da.rs | 11 +- task-impls/src/helpers.rs | 31 +- task-impls/src/quorum_proposal/handlers.rs | 14 +- task-impls/src/quorum_proposal/mod.rs | 9 +- .../src/quorum_proposal_recv/handlers.rs | 12 +- task-impls/src/quorum_proposal_recv/mod.rs | 6 +- task-impls/src/quorum_vote/handlers.rs | 6 +- task-impls/src/quorum_vote/mod.rs | 13 +- task-impls/src/request.rs | 14 +- task-impls/src/response.rs | 12 +- task-impls/src/transactions.rs | 9 +- task-impls/src/vid.rs | 7 +- types/src/consensus.rs | 192 ++- 23 files changed, 1844 insertions(+), 116 deletions(-) create mode 100644 task-impls/src/consensus/helpers.rs create mode 100644 task-impls/src/consensus/view_change.rs diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 9a01ebd956..0c5f7b742d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -34,7 +34,7 @@ use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event, network /// Reexport error type pub use hotshot_types::error::HotShotError; use hotshot_types::{ - consensus::{Consensus, ConsensusMetricsValue, View, ViewInner}, + consensus::{Consensus, ConsensusMetricsValue, OuterConsensus, View, ViewInner}, constants::{EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE}, data::{Leaf, QuorumProposal}, event::{EventType, LeafInfo}, @@ -104,7 +104,7 @@ pub struct SystemContext> { metrics: Arc, /// The hotstuff implementation - consensus: Arc>>, + consensus: OuterConsensus, /// Immutable instance state instance_state: Arc, @@ -153,7 +153,7 @@ impl> Clone for SystemContext> SystemContext { let inner: Arc> = Arc::new(SystemContext { id: nonce, - consensus, + consensus: OuterConsensus::new(consensus), instance_state: Arc::new(instance_state), public_key, private_key, @@ -292,6 +292,7 @@ impl> SystemContext { /// /// # Panics /// Panics if sending genesis fails + #[instrument(skip_all, target = "SystemContext", fields(id = self.id))] pub async fn start_consensus(&self) { #[cfg(feature = "dependncy-tasks")] error!("HotShot is running with the dependency tasks feature enabled!!"); @@ -392,7 +393,7 @@ impl> SystemContext { /// # Errors /// /// Always returns Ok; does not return an error if the transaction couldn't be published to the network - #[instrument(skip(self), err)] + #[instrument(skip(self), err, target = "SystemContext", fields(id = self.id))] pub async fn publish_transaction_async( &self, transaction: TYPES::Transaction, @@ -448,7 +449,7 @@ impl> SystemContext { /// Returns a copy of the consensus struct #[must_use] pub fn consensus(&self) -> Arc>> { - Arc::clone(&self.consensus) + Arc::clone(&self.consensus.inner_consensus) } /// Returns a copy of the instance state @@ -459,6 +460,7 @@ impl> SystemContext { /// Returns a copy of the last decided leaf /// # Panics /// Panics if internal leaf for consensus is inconsistent + #[instrument(skip_all, target = "SystemContext", fields(id = self.id))] pub async fn decided_leaf(&self) -> Leaf { self.consensus.read().await.decided_leaf() } @@ -469,6 +471,7 @@ impl> SystemContext { /// # Panics /// Panics if internal state for consensus is inconsistent #[must_use] + #[instrument(skip_all, target = "SystemContext", fields(id = self.id))] pub fn try_decided_leaf(&self) -> Option> { self.consensus.try_read().map(|guard| guard.decided_leaf()) } @@ -477,6 +480,7 @@ impl> SystemContext { /// /// # Panics /// Panics if internal state for consensus is inconsistent + #[instrument(skip_all, target = "SystemContext", fields(id = self.id))] pub async fn decided_state(&self) -> Arc { Arc::clone(&self.consensus.read().await.decided_state()) } @@ -488,6 +492,7 @@ impl> SystemContext { /// return [`None`] if the requested view has already been decided (but see /// [`decided_state`](Self::decided_state)) or if there is no path for the requested /// view to ever be decided. + #[instrument(skip_all, target = "SystemContext", fields(id = self.id))] pub async fn state(&self, view: TYPES::Time) -> Option> { self.consensus.read().await.state(view).cloned() } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 43509c23c0..5b160263ea 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -72,6 +72,7 @@ pub async fn add_response_task>( handle.hotshot.memberships.quorum_membership.clone().into(), handle.public_key().clone(), handle.private_key().clone(), + handle.hotshot.id, ); handle.network_registry.register(run_response_task::( state, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 5ce533682b..1599f9683d 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -19,9 +19,12 @@ use hotshot_task_impls::{ consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, }; -use hotshot_types::traits::{ - consensus_api::ConsensusApi, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, +use hotshot_types::{ + consensus::OuterConsensus, + traits::{ + consensus_api::ConsensusApi, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + }, }; use crate::types::SystemContextHandle; @@ -44,7 +47,7 @@ impl> CreateTaskState async fn create_from(handle: &SystemContextHandle) -> NetworkRequestState { NetworkRequestState { network: Arc::clone(&handle.hotshot.network), - state: handle.hotshot.consensus(), + state: OuterConsensus::new(handle.hotshot.consensus()), view: handle.cur_view().await, delay: handle.hotshot.config.data_request_delay, da_membership: handle.hotshot.memberships.da_membership.clone(), @@ -111,7 +114,7 @@ impl> CreateTaskState { async fn create_from(handle: &SystemContextHandle) -> VidTaskState { VidTaskState { - consensus: handle.hotshot.consensus(), + consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, vote_collector: None, network: Arc::clone(&handle.hotshot.network), @@ -129,7 +132,7 @@ impl> CreateTaskState { async fn create_from(handle: &SystemContextHandle) -> DaTaskState { DaTaskState { - consensus: handle.hotshot.consensus(), + consensus: OuterConsensus::new(handle.hotshot.consensus()), output_event_stream: handle.hotshot.external_event_stream.0.clone(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), network: Arc::clone(&handle.hotshot.network), @@ -182,7 +185,7 @@ impl> CreateTaskState TransactionTaskState { builder_timeout: handle.builder_timeout(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), - consensus: handle.hotshot.consensus(), + consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), @@ -213,7 +216,7 @@ impl> CreateTaskState let timeout_task = handle.spawn_initial_timeout_task(); ConsensusTaskState { - consensus, + consensus: OuterConsensus::new(consensus), instance_state: handle.hotshot.instance_state(), timeout: handle.hotshot.config.next_view_timeout, round_start_delay: handle.hotshot.config.round_start_delay, @@ -254,7 +257,7 @@ impl> CreateTaskState QuorumVoteTaskState { public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), - consensus, + consensus: OuterConsensus::new(consensus), instance_state: handle.hotshot.instance_state(), latest_voted_view: handle.cur_view().await, vote_dependencies: HashMap::new(), @@ -286,7 +289,7 @@ impl> CreateTaskState proposal_dependencies: HashMap::new(), network: Arc::clone(&handle.hotshot.network), output_event_stream: handle.hotshot.external_event_stream.0.clone(), - consensus, + consensus: OuterConsensus::new(consensus), instance_state: handle.hotshot.instance_state(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), @@ -318,7 +321,7 @@ impl> CreateTaskState QuorumProposalRecvTaskState { public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), - consensus, + consensus: OuterConsensus::new(consensus), cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), network: Arc::clone(&handle.hotshot.network), @@ -364,7 +367,7 @@ impl> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), timeout_task, timeout: handle.hotshot.config.next_view_timeout, - consensus, + consensus: OuterConsensus::new(consensus), last_decided_view: handle.cur_view().await, id: handle.hotshot.id, version: Arc::clone(&handle.hotshot.version), diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 6ab61469f5..6342f15f9f 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -18,6 +18,7 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; +use tracing::instrument; use crate::{traits::NodeImplementation, types::Event, SystemContext}; @@ -191,6 +192,7 @@ impl + 'static> SystemContextHandl } /// Wrapper to get the view number this node is on. + #[instrument(skip_all, target = "SystemContextHandle", fields(id = self.hotshot.id))] pub async fn cur_view(&self) -> TYPES::Time { self.hotshot.consensus.read().await.cur_view() } diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 43956c96c7..9578df9671 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -3,6 +3,15 @@ use core::time::Duration; use std::{marker::PhantomData, sync::Arc}; +use super::ConsensusTaskState; +use crate::{ + events::HotShotEvent, + helpers::{ + broadcast_event, decide_from_proposal, fetch_proposal, parent_leaf_and_state, + temp_validate_proposal_safety_and_liveness, update_view, validate_proposal_view_and_certs, + AnyhowTracing, SEND_VIEW_CHANGE_EVENT, + }, +}; use anyhow::{bail, ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -12,8 +21,9 @@ use async_std::task::JoinHandle; use chrono::Utc; use committable::Committable; use futures::FutureExt; +use hotshot_types::consensus::OuterConsensus; use hotshot_types::{ - consensus::{CommitmentAndMetadata, Consensus, View}, + consensus::{CommitmentAndMetadata, View}, data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, @@ -32,26 +42,17 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, info, instrument, warn}; use vbs::version::Version; -use super::ConsensusTaskState; -use crate::{ - events::HotShotEvent, - helpers::{ - broadcast_event, decide_from_proposal, fetch_proposal, parent_leaf_and_state, - temp_validate_proposal_safety_and_liveness, update_view, validate_proposal_view_and_certs, - AnyhowTracing, SEND_VIEW_CHANGE_EVENT, - }, -}; - /// Create the header for a proposal, build the proposal, and broadcast /// the proposal send evnet. #[allow(clippy::too_many_arguments)] +#[instrument(skip_all, fields(id = id, view = *view))] pub async fn create_and_send_proposal( public_key: TYPES::SignatureKey, private_key: ::PrivateKey, - consensus: Arc>>, + consensus: OuterConsensus, event_stream: Sender>>, view: TYPES::Time, commitment_and_metadata: CommitmentAndMetadata, @@ -62,6 +63,7 @@ pub async fn create_and_send_proposal( round_start_delay: u64, instance_state: Arc, version: Version, + id: u64, ) { let consensus_read = consensus.read().await; let Some(Some(vid_share)) = consensus_read @@ -144,13 +146,14 @@ pub async fn create_and_send_proposal( /// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is the /// standard case proposal scenario. #[allow(clippy::too_many_arguments)] +#[instrument(skip_all)] pub async fn publish_proposal_from_commitment_and_metadata( view: TYPES::Time, sender: Sender>>, quorum_membership: Arc, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, - consensus: Arc>>, + consensus: OuterConsensus, delay: u64, formed_upgrade_certificate: Option>, decided_upgrade_cert: Option>, @@ -158,12 +161,13 @@ pub async fn publish_proposal_from_commitment_and_metadata( proposal_cert: Option>, instance_state: Arc, version: Version, + id: u64, ) -> Result> { let (parent_leaf, state) = parent_leaf_and_state( view, quorum_membership, public_key.clone(), - Arc::clone(&consensus), + OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), ) .await?; @@ -217,6 +221,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( delay, instance_state, version, + id, ) .await; }); @@ -227,13 +232,14 @@ pub async fn publish_proposal_from_commitment_and_metadata( /// Publishes a proposal if there exists a value which we can propose from. Specifically, we must have either /// `commitment_and_metadata`, or a `decided_upgrade_cert`. #[allow(clippy::too_many_arguments)] +#[instrument(skip_all)] pub async fn publish_proposal_if_able( view: TYPES::Time, sender: Sender>>, quorum_membership: Arc, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, - consensus: Arc>>, + consensus: OuterConsensus, delay: u64, formed_upgrade_certificate: Option>, decided_upgrade_cert: Option>, @@ -241,6 +247,7 @@ pub async fn publish_proposal_if_able( proposal_cert: Option>, instance_state: Arc, version: Version, + id: u64, ) -> Result> { publish_proposal_from_commitment_and_metadata( view, @@ -256,6 +263,7 @@ pub async fn publish_proposal_if_able( proposal_cert, instance_state, version, + id, ) .await } @@ -303,7 +311,7 @@ pub(crate) async fn handle_quorum_proposal_recv, public_key: TYPES::SignatureKey, - consensus: Arc>>, + consensus: OuterConsensus, storage: Arc>, quorum_membership: Arc, instance_state: Arc, vote_info: VoteInfo, version: Version, + id: u64, ) -> bool { use hotshot_types::simple_vote::QuorumVote; @@ -683,7 +693,7 @@ pub async fn update_state_and_vote_if_able( + proposal: Proposal>, + parent_leaf: Leaf, + consensus: OuterConsensus, + decided_upgrade_certificate: Option>, + quorum_membership: Arc, + view_leader_key: TYPES::SignatureKey, + event_stream: Sender>>, + sender: TYPES::SignatureKey, + event_sender: Sender>, + id: u64, +) -> Result<()> { + let view_number = proposal.data.view_number(); + + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + ensure!( + proposed_leaf.parent_commitment() == parent_leaf.commit(), + "Proposed leaf does not extend the parent leaf." + ); + + let state = Arc::new( + >::from_header(&proposal.data.block_header), + ); + let view = View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state, + delta: None, // May be updated to `Some` in the vote task. + }, + }; + + if let Err(e) = consensus + .write() + .await + .update_validated_state_map(view_number, view.clone()) + { + tracing::trace!("{e:?}"); + } + consensus + .write() + .await + .update_saved_leaves(proposed_leaf.clone()); + + // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. + broadcast_event( + Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), + &event_stream, + ) + .await; + + // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment + // + // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: + // + // proposal.validate_signature(&quorum_membership)?; + // + // in a future PR. + ensure!( + view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), + "Could not verify proposal." + ); + + UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; + + // Validate that the upgrade certificate is re-attached, if we saw one on the parent + proposed_leaf.extends_upgrade(&parent_leaf, &decided_upgrade_certificate)?; + + let justify_qc = proposal.data.justify_qc.clone(); + // Create a positive vote if either liveness or safety check + // passes. + + // Liveness check. + let read_consensus = consensus.read().await; + let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = read_consensus.visit_leaf_ancestors( + justify_qc.view_number(), + Terminator::Inclusive(read_consensus.locked_view()), + false, + |leaf, _, _| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.view_number() != read_consensus.locked_view() + }, + ); + let safety_check = outcome.is_ok(); + + ensure!(safety_check || liveness_check, { + if let Err(e) = outcome { + broadcast_event( + Event { + view_number, + event: EventType::Error { error: Arc::new(e) }, + }, + &event_sender, + ) + .await; + } + + format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) + }); + + // We accept the proposal, notify the application layer + + broadcast_event( + Event { + view_number, + event: EventType::QuorumProposal { + proposal: proposal.clone(), + sender, + }, + }, + &event_sender, + ) + .await; + // Notify other tasks + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalValidated( + proposal.data.clone(), + parent_leaf, + )), + &event_stream, + ) + .await; + + Ok(()) +} + +/// Create the header for a proposal, build the proposal, and broadcast +/// the proposal send evnet. +#[allow(clippy::too_many_arguments)] +#[cfg(not(feature = "dependency-tasks"))] +#[instrument(skip_all, fields(id = id, view = *view))] +pub async fn create_and_send_proposal( + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + consensus: OuterConsensus, + event_stream: Sender>>, + view: TYPES::Time, + commitment_and_metadata: CommitmentAndMetadata, + parent_leaf: Leaf, + state: Arc, + upgrade_cert: Option>, + proposal_cert: Option>, + round_start_delay: u64, + instance_state: Arc, + version: Version, + id: u64, +) { + let consensus_read = consensus.read().await; + let Some(Some(vid_share)) = consensus_read + .vid_shares() + .get(&view) + .map(|shares| shares.get(&public_key).cloned()) + else { + error!("Cannot propopse without our VID share, view {:?}", view); + return; + }; + drop(consensus_read); + let block_header = match TYPES::BlockHeader::new( + state.as_ref(), + instance_state.as_ref(), + &parent_leaf, + commitment_and_metadata.commitment, + commitment_and_metadata.builder_commitment, + commitment_and_metadata.metadata, + commitment_and_metadata.fee, + vid_share.data.common, + version, + ) + .await + { + Ok(header) => header, + Err(err) => { + error!(%err, "Failed to construct block header"); + return; + } + }; + + let proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: consensus.read().await.high_qc().clone(), + proposal_certificate: proposal_cert, + upgrade_certificate: upgrade_cert, + }; + + let proposed_leaf = Leaf::from_quorum_proposal(&proposal); + if proposed_leaf.parent_commitment() != parent_leaf.commit() { + return; + } + + let Ok(signature) = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref()) + else { + // This should never happen. + error!("Failed to sign proposed_leaf.commit()!"); + return; + }; + + let message = Proposal { + data: proposal, + signature, + _pd: PhantomData, + }; + debug!( + "Sending null proposal for view {:?}", + proposed_leaf.view_number(), + ); + if let Err(e) = consensus + .write() + .await + .update_last_proposed_view(message.clone()) + { + tracing::trace!("{e:?}"); + return; + } + async_sleep(Duration::from_millis(round_start_delay)).await; + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalSend( + message.clone(), + public_key, + )), + &event_stream, + ) + .await; +} + +/// Validates, from a given `proposal` that the view that it is being submitted for is valid when +/// compared to `cur_view` which is the highest proposed view (so far) for the caller. If the proposal +/// is for a view that's later than expected, that the proposal includes a timeout or view sync certificate. +pub fn validate_proposal_view_and_certs( + proposal: &Proposal>, + sender: &TYPES::SignatureKey, + cur_view: TYPES::Time, + quorum_membership: &Arc, + timeout_membership: &Arc, +) -> Result<()> { + let view = proposal.data.view_number(); + ensure!( + view >= cur_view, + "Proposal is from an older view {:?}", + proposal.data.clone() + ); + + let view_leader_key = quorum_membership.leader(view); + ensure!( + view_leader_key == *sender, + "Leader key does not match key in proposal" + ); + + // Verify a timeout certificate OR a view sync certificate exists and is valid. + if proposal.data.justify_qc.view_number() != view - 1 { + let received_proposal_cert = + proposal.data.proposal_certificate.clone().context(format!( + "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", + *view + ))?; + + match received_proposal_cert { + ViewChangeEvidence::Timeout(timeout_cert) => { + ensure!( + timeout_cert.date().view == view - 1, + "Timeout certificate for view {} was not for the immediately preceding view", + *view + ); + ensure!( + timeout_cert.is_valid_cert(timeout_membership.as_ref()), + "Timeout certificate for view {} was invalid", + *view + ); + } + ViewChangeEvidence::ViewSync(view_sync_cert) => { + ensure!( + view_sync_cert.view_number == view, + "View sync cert view number {:?} does not match proposal view number {:?}", + view_sync_cert.view_number, + view + ); + + // View sync certs must also be valid. + ensure!( + view_sync_cert.is_valid_cert(quorum_membership.as_ref()), + "Invalid view sync finalize cert provided" + ); + } + } + } + + // Validate the upgrade certificate -- this is just a signature validation. + // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. + UpgradeCertificate::validate(&proposal.data.upgrade_certificate, quorum_membership)?; + + Ok(()) +} + +/// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. +#[instrument(skip_all)] +pub(crate) async fn parent_leaf_and_state( + next_proposal_view_number: TYPES::Time, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + consensus: OuterConsensus, +) -> Result<(Leaf, Arc<::ValidatedState>)> { + ensure!( + quorum_membership.leader(next_proposal_view_number) == public_key, + "Somehow we formed a QC but are not the leader for the next view {next_proposal_view_number:?}", + ); + + let consensus_reader = consensus.read().await; + let parent_view_number = consensus_reader.high_qc().view_number(); + let parent_view = consensus_reader.validated_state_map().get(&parent_view_number).context( + format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", *parent_view_number) + )?; + + // Leaf hash in view inner does not match high qc hash - Why? + let (leaf_commitment, state) = parent_view.leaf_and_state().context( + format!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") + )?; + + if leaf_commitment != consensus_reader.high_qc().date().leaf_commit { + // NOTE: This happens on the genesis block + debug!( + "They don't equal: {:?} {:?}", + leaf_commitment, + consensus_reader.high_qc().date().leaf_commit + ); + } + + let leaf = consensus_reader + .saved_leaves() + .get(&leaf_commitment) + .context("Failed to find high QC of parent")?; + + let reached_decided = leaf.view_number() == consensus_reader.last_decided_view(); + let parent_leaf = leaf.clone(); + let original_parent_hash = parent_leaf.commit(); + let mut next_parent_hash = original_parent_hash; + + // Walk back until we find a decide + if !reached_decided { + debug!("We have not reached decide"); + while let Some(next_parent_leaf) = consensus_reader.saved_leaves().get(&next_parent_hash) { + if next_parent_leaf.view_number() <= consensus_reader.last_decided_view() { + break; + } + next_parent_hash = next_parent_leaf.parent_commitment(); + } + // TODO do some sort of sanity check on the view number that it matches decided + } + + Ok((parent_leaf, Arc::clone(state))) +} + +/// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is the +/// standard case proposal scenario. +#[allow(clippy::too_many_arguments)] +#[cfg(not(feature = "dependency-tasks"))] +#[instrument(skip_all)] +pub async fn publish_proposal_from_commitment_and_metadata( + view: TYPES::Time, + sender: Sender>>, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + consensus: OuterConsensus, + delay: u64, + formed_upgrade_certificate: Option>, + decided_upgrade_cert: Option>, + commitment_and_metadata: Option>, + proposal_cert: Option>, + instance_state: Arc, + version: Version, + id: u64, +) -> Result> { + let (parent_leaf, state) = parent_leaf_and_state( + view, + quorum_membership, + public_key.clone(), + OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), + ) + .await?; + + // In order of priority, we should try to attach: + // - the parent certificate if it exists, or + // - our own certificate that we formed. + // In either case, we need to ensure that the certificate is still relevant. + // + // Note: once we reach a point of potentially propose with our formed upgrade certificate, we will ALWAYS drop it. If we cannot immediately use it for whatever reason, we choose to discard it. + // It is possible that multiple nodes form separate upgrade certificates for the some upgrade if we are not careful about voting. But this shouldn't bother us: the first leader to propose is the one whose certificate will be used. And if that fails to reach a decide for whatever reason, we may lose our own certificate, but something will likely have gone wrong there anyway. + let mut proposal_upgrade_certificate = parent_leaf + .upgrade_certificate() + .or(formed_upgrade_certificate); + + if !proposal_upgrade_certificate + .clone() + .is_some_and(|cert| cert.is_relevant(view, decided_upgrade_cert).is_ok()) + { + proposal_upgrade_certificate = None; + } + + // We only want to proposal to be attached if any of them are valid. + let proposal_certificate = proposal_cert + .as_ref() + .filter(|cert| cert.is_valid_for_view(&view)) + .cloned(); + + // FIXME - This is not great, and will be fixed later. + // If it's > July, 2024 and this is still here, something has gone horribly wrong. + let cnm = commitment_and_metadata + .clone() + .context("Cannot propose because we don't have the VID payload commitment and metadata")?; + + ensure!( + cnm.block_view == view, + "Cannot propose because our VID payload commitment and metadata is for an older view." + ); + + let create_and_send_proposal_handle = async_spawn(async move { + create_and_send_proposal( + public_key, + private_key, + OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), + sender, + view, + cnm, + parent_leaf.clone(), + state, + proposal_upgrade_certificate, + proposal_certificate, + delay, + instance_state, + version, + id, + ) + .await; + }); + + Ok(create_and_send_proposal_handle) +} + +/// Publishes a proposal if there exists a value which we can propose from. Specifically, we must have either +/// `commitment_and_metadata`, or a `decided_upgrade_cert`. +#[allow(clippy::too_many_arguments)] +#[cfg(not(feature = "dependency-tasks"))] +#[instrument(skip_all)] +pub async fn publish_proposal_if_able( + view: TYPES::Time, + sender: Sender>>, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + consensus: OuterConsensus, + delay: u64, + formed_upgrade_certificate: Option>, + decided_upgrade_cert: Option>, + commitment_and_metadata: Option>, + proposal_cert: Option>, + instance_state: Arc, + version: Version, + id: u64, +) -> Result> { + publish_proposal_from_commitment_and_metadata( + view, + sender, + quorum_membership, + public_key, + private_key, + consensus, + delay, + formed_upgrade_certificate, + decided_upgrade_cert, + commitment_and_metadata, + proposal_cert, + instance_state, + version, + id, + ) + .await +} + +/// Trigger a request to the network for a proposal for a view and wait for the response +#[instrument(skip_all)] +pub(crate) async fn fetch_proposal( + view: TYPES::Time, + event_stream: Sender>>, + quorum_membership: Arc, + consensus: OuterConsensus, +) -> Result> { + let (tx, mut rx) = broadcast(1); + let event = ProposalMissing { + view, + response_chan: tx, + }; + broadcast_event( + Arc::new(HotShotEvent::QuorumProposalRequest(event)), + &event_stream, + ) + .await; + let Ok(Ok(Some(proposal))) = async_timeout(REQUEST_TIMEOUT, rx.recv_direct()).await else { + bail!("Request for proposal failed"); + }; + let view_number = proposal.data.view_number(); + let justify_qc = proposal.data.justify_qc.clone(); + + if !justify_qc.is_valid_cert(quorum_membership.as_ref()) { + bail!("Invalid justify_qc in proposal for view {}", *view_number); + } + let mut consensus_write = consensus.write().await; + let leaf = Leaf::from_quorum_proposal(&proposal.data); + let state = Arc::new( + >::from_header(&proposal.data.block_header), + ); + + let view = View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state, + delta: None, + }, + }; + if let Err(e) = consensus_write.update_validated_state_map(view_number, view.clone()) { + tracing::trace!("{e:?}"); + } + + consensus_write.update_saved_leaves(leaf.clone()); + broadcast_event( + HotShotEvent::ValidatedStateUpdated(view_number, view).into(), + &event_stream, + ) + .await; + Ok(leaf) +} + +/// Handle the received quorum proposal. +/// +/// Returns the proposal that should be used to set the `cur_proposal` for other tasks. +#[allow(clippy::too_many_lines)] +#[cfg(not(feature = "dependency-tasks"))] +#[instrument(skip_all)] +pub(crate) async fn handle_quorum_proposal_recv>( + proposal: &Proposal>, + sender: &TYPES::SignatureKey, + event_stream: Sender>>, + task_state: &mut ConsensusTaskState, + version: Version, +) -> Result>> { + let sender = sender.clone(); + debug!( + "Received Quorum Proposal for view {}", + *proposal.data.view_number + ); + + let cur_view = task_state.cur_view; + + validate_proposal_view_and_certs( + proposal, + &sender, + task_state.cur_view, + &task_state.quorum_membership, + &task_state.timeout_membership, + ) + .context("Failed to validate proposal view and attached certs")?; + + let view = proposal.data.view_number(); + let view_leader_key = task_state.quorum_membership.leader(view); + let justify_qc = proposal.data.justify_qc.clone(); + + if !justify_qc.is_valid_cert(task_state.quorum_membership.as_ref()) { + let consensus = task_state.consensus.read().await; + consensus.metrics.invalid_qc.update(1); + bail!("Invalid justify_qc in proposal for view {}", *view); + } + + // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here + if let Err(e) = update_view::( + view, + &event_stream, + task_state.timeout, + OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + &mut task_state.cur_view, + &mut task_state.cur_view_time, + &mut task_state.timeout_task, + &task_state.output_event_stream, + SEND_VIEW_CHANGE_EVENT, + task_state.quorum_membership.leader(cur_view) == task_state.public_key, + ) + .await + { + debug!("Failed to update view; error = {e:#}"); + } + + let mut parent_leaf = task_state + .consensus + .read() + .await + .saved_leaves() + .get(&justify_qc.date().leaf_commit) + .cloned(); + + parent_leaf = match parent_leaf { + Some(p) => Some(p), + None => fetch_proposal( + justify_qc.view_number(), + event_stream.clone(), + Arc::clone(&task_state.quorum_membership), + OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + ) + .await + .ok(), + }; + let consensus_read = task_state.consensus.read().await; + + // Get the parent leaf and state. + let parent = match parent_leaf { + Some(leaf) => { + if let (Some(state), _) = consensus_read.state_and_delta(leaf.view_number()) { + Some((leaf, Arc::clone(&state))) + } else { + bail!("Parent state not found! Consensus internally inconsistent"); + } + } + None => None, + }; + + if justify_qc.view_number() > consensus_read.high_qc().view_number { + if let Err(e) = task_state + .storage + .write() + .await + .update_high_qc(justify_qc.clone()) + .await + { + bail!("Failed to store High QC not voting. Error: {:?}", e); + } + } + + drop(consensus_read); + let mut consensus_write = task_state.consensus.write().await; + + if let Err(e) = consensus_write.update_high_qc(justify_qc.clone()) { + tracing::trace!("{e:?}"); + } + + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some((parent_leaf, _parent_state)) = parent else { + warn!( + "Proposal's parent missing from storage with commitment: {:?}", + justify_qc.date().leaf_commit + ); + let leaf = Leaf::from_quorum_proposal(&proposal.data); + + let state = Arc::new( + >::from_header( + &proposal.data.block_header, + ), + ); + + if let Err(e) = consensus_write.update_validated_state_map( + view, + View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(), + state, + delta: None, + }, + }, + ) { + tracing::trace!("{e:?}"); + } + + consensus_write.update_saved_leaves(leaf.clone()); + let new_leaves = consensus_write.saved_leaves().clone(); + let new_state = consensus_write.validated_state_map().clone(); + drop(consensus_write); + + if let Err(e) = task_state + .storage + .write() + .await + .update_undecided_state(new_leaves, new_state) + .await + { + warn!("Couldn't store undecided state. Error: {:?}", e); + } + + // If we are missing the parent from storage, the safety check will fail. But we can + // still vote if the liveness check succeeds. + #[cfg(not(feature = "dependency-tasks"))] + { + let consensus_read = task_state.consensus.read().await; + let liveness_check = justify_qc.view_number() > consensus_read.locked_view(); + + let high_qc = consensus_read.high_qc().clone(); + let locked_view = consensus_read.locked_view(); + + drop(consensus_read); + + let mut current_proposal = None; + if liveness_check { + current_proposal = Some(proposal.data.clone()); + let new_view = proposal.data.view_number + 1; + + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = task_state.quorum_membership.leader(new_view) + == task_state.public_key + && high_qc.view_number == current_proposal.clone().unwrap().view_number; + + let qc = high_qc.clone(); + if should_propose { + debug!( + "Attempting to publish proposal after voting for liveness; now in view: {}", + *new_view + ); + let create_and_send_proposal_handle = publish_proposal_if_able( + qc.view_number + 1, + event_stream, + Arc::clone(&task_state.quorum_membership), + task_state.public_key.clone(), + task_state.private_key.clone(), + OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + task_state.round_start_delay, + task_state.formed_upgrade_certificate.clone(), + task_state.decided_upgrade_cert.clone(), + task_state.payload_commitment_and_metadata.clone(), + task_state.proposal_cert.clone(), + Arc::clone(&task_state.instance_state), + version, + task_state.id, + ) + .await?; + + task_state + .spawned_tasks + .entry(view) + .or_default() + .push(create_and_send_proposal_handle); + } + } else { + warn!(?high_qc, ?proposal.data, ?locked_view, "Failed liveneess check; cannot find parent either."); + } + + return Ok(current_proposal); + } + + #[cfg(feature = "dependency-tasks")] + return Ok(None); + }; + + task_state + .spawned_tasks + .entry(proposal.data.view_number()) + .or_default() + .push(async_spawn( + validate_proposal_safety_and_liveness( + proposal.clone(), + parent_leaf, + OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + task_state.decided_upgrade_cert.clone(), + Arc::clone(&task_state.quorum_membership), + view_leader_key, + event_stream.clone(), + sender, + task_state.output_event_stream.clone(), + task_state.id, + ) + .map(AnyhowTracing::err_as_debug), + )); + Ok(None) +} + +/// Helper type to give names and to the output values of the leaf chain traversal operation. +#[derive(Debug)] +pub struct LeafChainTraversalOutcome { + /// The new locked view obtained from a 2 chain starting from the proposal's parent. + pub new_locked_view_number: Option, + + /// The new decided view obtained from a 3 chain starting from the proposal's parent. + pub new_decided_view_number: Option, + + /// The qc for the decided chain. + pub new_decide_qc: Option>, + + /// The decided leaves with corresponding validated state and VID info. + pub leaf_views: Vec>, + + /// The decided leaves. + pub leaves_decided: Vec>, + + /// The transactions in the block payload for each leaf. + pub included_txns: Option::Transaction>>>, + + /// The most recent upgrade certificate from one of the leaves. + pub decided_upgrade_cert: Option>, +} + +/// We need Default to be implemented because the leaf ascension has very few failure branches, +/// and when they *do* happen, we still return intermediate states. Default makes the burden +/// of filling values easier. +impl Default for LeafChainTraversalOutcome { + /// The default method for this type is to set all of the returned values to `None`. + fn default() -> Self { + Self { + new_locked_view_number: None, + new_decided_view_number: None, + new_decide_qc: None, + leaf_views: Vec::new(), + leaves_decided: Vec::new(), + included_txns: None, + decided_upgrade_cert: None, + } + } +} + +/// Ascends the leaf chain by traversing through the parent commitments of the proposal. We begin +/// by obtaining the parent view, and if we are in a chain (i.e. the next view from the parent is +/// one view newer), then we begin attempting to form the chain. This is a direct impl from +/// [HotStuff](https://arxiv.org/pdf/1803.05069) section 5: +/// +/// > When a node b* carries a QC that refers to a direct parent, i.e., b*.justify.node = b*.parent, +/// we say that it forms a One-Chain. Denote by b'' = b*.justify.node. Node b* forms a Two-Chain, +/// if in addition to forming a One-Chain, b''.justify.node = b''.parent. +/// It forms a Three-Chain, if b'' forms a Two-Chain. +/// +/// We follow this exact logic to determine if we are able to reach a commit and a decide. A commit +/// is reached when we have a two chain, and a decide is reached when we have a three chain. +/// +/// # Example +/// Suppose we have a decide for view 1, and we then move on to get undecided views 2, 3, and 4. Further, +/// suppose that our *next* proposal is for view 5, but this leader did not see info for view 4, so the +/// justify qc of the proposal points to view 3. This is fine, and the undecided chain now becomes +/// 2-3-5. +/// +/// Assuming we continue with honest leaders, we then eventually could get a chain like: 2-3-5-6-7-8. This +/// will prompt a decide event to occur (this code), where the `proposal` is for view 8. Now, since the +/// lowest value in the 3-chain here would be 5 (excluding 8 since we only walk the parents), we begin at +/// the first link in the chain, and walk back through all undecided views, making our new anchor view 5, +/// and out new locked view will be 6. +/// +/// Upon receipt then of a proposal for view 9, assuming it is valid, this entire process will repeat, and +/// the anchor view will be set to view 6, with the locked view as view 7. +#[instrument(skip_all)] +pub async fn decide_from_proposal( + proposal: &QuorumProposal, + consensus: OuterConsensus, + existing_upgrade_cert: &Option>, + public_key: &TYPES::SignatureKey, +) -> LeafChainTraversalOutcome { + let consensus_reader = consensus.read().await; + let view_number = proposal.view_number(); + let parent_view_number = proposal.justify_qc.view_number(); + let old_anchor_view = consensus_reader.last_decided_view(); + + let mut last_view_number_visited = view_number; + let mut current_chain_length = 0usize; + let mut res = LeafChainTraversalOutcome::default(); + + if let Err(e) = consensus_reader.visit_leaf_ancestors( + parent_view_number, + Terminator::Exclusive(old_anchor_view), + true, + |leaf, state, delta| { + // This is the core paper logic. We're implementing the chain in chained hotstuff. + if res.new_decided_view_number.is_none() { + // If the last view number is the child of the leaf we've moved to... + if last_view_number_visited == leaf.view_number() + 1 { + last_view_number_visited = leaf.view_number(); + + // The chain grows by one + current_chain_length += 1; + + // We emit a locked view when the chain length is 2 + if current_chain_length == 2 { + res.new_locked_view_number = Some(leaf.view_number()); + // The next leaf in the chain, if there is one, is decided, so this + // leaf's justify_qc would become the QC for the decided chain. + res.new_decide_qc = Some(leaf.justify_qc().clone()); + } else if current_chain_length == 3 { + // And we decide when the chain length is 3. + res.new_decided_view_number = Some(leaf.view_number()); + } + } else { + // There isn't a new chain extension available, so we signal to the callback + // owner that we can exit for now. + return false; + } + } + + // Now, if we *have* reached a decide, we need to do some state updates. + if let Some(new_decided_view) = res.new_decided_view_number { + // First, get a mutable reference to the provided leaf. + let mut leaf = leaf.clone(); + + // Update the metrics + if leaf.view_number() == new_decided_view { + consensus_reader + .metrics + .last_synced_block_height + .set(usize::try_from(leaf.height()).unwrap_or(0)); + } + + // Check if there's a new upgrade certificate available. + if let Some(cert) = leaf.upgrade_certificate() { + if leaf.upgrade_certificate() != *existing_upgrade_cert { + if cert.data.decide_by < view_number { + warn!("Failed to decide an upgrade certificate in time. Ignoring."); + } else { + info!("Reached decide on upgrade certificate: {:?}", cert); + res.decided_upgrade_cert = Some(cert.clone()); + } + } + } + // If the block payload is available for this leaf, include it in + // the leaf chain that we send to the client. + if let Some(encoded_txns) = + consensus_reader.saved_payloads().get(&leaf.view_number()) + { + let payload = + BlockPayload::from_bytes(encoded_txns, leaf.block_header().metadata()); + + leaf.fill_block_payload_unchecked(payload); + } + + // Get the VID share at the leaf's view number, corresponding to our key + // (if one exists) + let vid_share = consensus_reader + .vid_shares() + .get(&leaf.view_number()) + .unwrap_or(&HashMap::new()) + .get(public_key) + .cloned() + .map(|prop| prop.data); + + // Add our data into a new `LeafInfo` + res.leaf_views.push(LeafInfo::new( + leaf.clone(), + Arc::clone(&state), + delta.clone(), + vid_share, + )); + res.leaves_decided.push(leaf.clone()); + if let Some(ref payload) = leaf.block_payload() { + res.included_txns = Some( + payload + .transaction_commitments(leaf.block_header().metadata()) + .into_iter() + .collect::>(), + ); + } + } + true + }, + ) { + debug!("Leaf ascension failed; error={e}"); + } + + res +} + +/// Handle `QuorumProposalValidated` event content and submit a proposal if possible. +#[allow(clippy::too_many_lines)] +#[cfg(not(feature = "dependency-tasks"))] +#[instrument(skip_all)] +pub async fn handle_quorum_proposal_validated>( + proposal: &QuorumProposal, + event_stream: Sender>>, + task_state: &mut ConsensusTaskState, +) -> Result<()> { + let view = proposal.view_number(); + #[cfg(not(feature = "dependency-tasks"))] + { + task_state.current_proposal = Some(proposal.clone()); + } + + let res = decide_from_proposal( + proposal, + OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + &task_state.decided_upgrade_cert, + &task_state.public_key, + ) + .await; + + if let Some(cert) = res.decided_upgrade_cert { + task_state.decided_upgrade_cert = Some(cert.clone()); + + let mut decided_certificate_lock = task_state.decided_upgrade_certificate.write().await; + *decided_certificate_lock = Some(cert.clone()); + drop(decided_certificate_lock); + let _ = event_stream + .broadcast(Arc::new(HotShotEvent::UpgradeDecided(cert.clone()))) + .await; + } + + let mut consensus = task_state.consensus.write().await; + if let Some(new_locked_view) = res.new_locked_view_number { + if let Err(e) = consensus.update_locked_view(new_locked_view) { + tracing::trace!("{e:?}"); + } + } + + drop(consensus); + + #[cfg(not(feature = "dependency-tasks"))] + { + let new_view = task_state.current_proposal.clone().unwrap().view_number + 1; + // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here + // This is for the case where we form a QC but have not yet seen the previous proposal ourselves + let should_propose = task_state.quorum_membership.leader(new_view) == task_state.public_key + && task_state.consensus.read().await.high_qc().view_number + == task_state.current_proposal.clone().unwrap().view_number; + + if let Some(new_decided_view) = res.new_decided_view_number { + task_state.cancel_tasks(new_decided_view).await; + } + task_state.current_proposal = Some(proposal.clone()); + task_state.spawn_vote_task(view, event_stream.clone()).await; + if should_propose { + debug!( + "Attempting to publish proposal after voting; now in view: {}", + *new_view + ); + if let Err(e) = task_state + .publish_proposal(new_view, event_stream.clone()) + .await + { + debug!("Failed to propose; error = {e:?}"); + }; + } + } + + #[allow(clippy::cast_precision_loss)] + if let Some(new_anchor_view) = res.new_decided_view_number { + let block_size = res.included_txns.map(|set| set.len().try_into().unwrap()); + let decide_sent = broadcast_event( + Event { + view_number: new_anchor_view, + event: EventType::Decide { + leaf_chain: Arc::new(res.leaf_views), + qc: Arc::new(res.new_decide_qc.unwrap()), + block_size, + }, + }, + &task_state.output_event_stream, + ); + let mut consensus = task_state.consensus.write().await; + + let old_anchor_view = consensus.last_decided_view(); + consensus.collect_garbage(old_anchor_view, new_anchor_view); + if let Err(e) = consensus.update_last_decided_view(new_anchor_view) { + tracing::trace!("{e:?}"); + } + consensus + .metrics + .last_decided_time + .set(Utc::now().timestamp().try_into().unwrap()); + consensus.metrics.invalid_qc.set(0); + consensus + .metrics + .last_decided_view + .set(usize::try_from(consensus.last_decided_view().u64()).unwrap()); + let cur_number_of_views_per_decide_event = + *task_state.cur_view - consensus.last_decided_view().u64(); + consensus + .metrics + .number_of_views_per_decide_event + .add_point(cur_number_of_views_per_decide_event as f64); + + debug!( + "Sending Decide for view {:?}", + consensus.last_decided_view() + ); + drop(consensus); + debug!("Decided txns len {:?}", block_size); + decide_sent.await; + broadcast_event( + Arc::new(HotShotEvent::LeafDecided(res.leaves_decided)), + &event_stream, + ) + .await; + debug!("decide send succeeded"); + } + + Ok(()) +} + +/// Private key, latest decided upgrade certificate, committee membership, and event stream, for +/// sending the vote. +#[cfg(not(feature = "dependency-tasks"))] +type VoteInfo = ( + <::SignatureKey as SignatureKey>::PrivateKey, + Option>, + Arc<::Membership>, + Sender>>, +); + +#[allow(clippy::too_many_arguments)] +#[allow(clippy::too_many_lines)] +#[allow(unused_variables)] +#[cfg(not(feature = "dependency-tasks"))] +/// Check if we are able to vote, like whether the proposal is valid, +/// whether we have DAC and VID share, and if so, vote. +#[instrument(skip_all, fields(id = id, view = *cur_view))] +pub async fn update_state_and_vote_if_able>( + cur_view: TYPES::Time, + proposal: QuorumProposal, + public_key: TYPES::SignatureKey, + consensus: OuterConsensus, + storage: Arc>, + quorum_membership: Arc, + instance_state: Arc, + vote_info: VoteInfo, + version: Version, + id: u64, +) -> bool { + use hotshot_types::simple_vote::QuorumVote; + + if !quorum_membership.has_stake(&public_key) { + debug!("We were not chosen for quorum committee on {:?}", cur_view); + return false; + } + + let read_consnesus = consensus.read().await; + // Only vote if you has seen the VID share for this view + let Some(vid_shares) = read_consnesus.vid_shares().get(&proposal.view_number) else { + debug!( + "We have not seen the VID share for this view {:?} yet, so we cannot vote.", + proposal.view_number + ); + return false; + }; + let Some(vid_share) = vid_shares.get(&public_key).cloned() else { + debug!("we have not seen our VID share yet"); + return false; + }; + + if let Some(upgrade_cert) = &vote_info.1 { + if upgrade_cert.upgrading_in(cur_view) + && Some(proposal.block_header.payload_commitment()) + != null_block::commitment(quorum_membership.total_nodes()) + { + info!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(quorum_membership.total_nodes()), Some(proposal.block_header.payload_commitment())); + return false; + } + } + + // Only vote if you have the DA cert + // ED Need to update the view number this is stored under? + let Some(cert) = read_consnesus.saved_da_certs().get(&cur_view).cloned() else { + return false; + }; + drop(read_consnesus); + + let view = cert.view_number; + // TODO: do some of this logic without the vote token check, only do that when voting. + let justify_qc = proposal.justify_qc.clone(); + let mut parent = consensus + .read() + .await + .saved_leaves() + .get(&justify_qc.date().leaf_commit) + .cloned(); + parent = match parent { + Some(p) => Some(p), + None => fetch_proposal( + justify_qc.view_number(), + vote_info.3.clone(), + Arc::clone(&quorum_membership), + OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), + ) + .await + .ok(), + }; + + let read_consnesus = consensus.read().await; + + // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) + let Some(parent) = parent else { + error!( + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.date().leaf_commit, + proposal.view_number, + ); + return false; + }; + let (Some(parent_state), _) = read_consnesus.state_and_delta(parent.view_number()) else { + warn!("Parent state not found! Consensus internally inconsistent"); + return false; + }; + drop(read_consnesus); + let Ok((validated_state, state_delta)) = parent_state + .validate_and_apply_header( + instance_state.as_ref(), + &parent, + &proposal.block_header.clone(), + vid_share.data.common.clone(), + version, + ) + .await + else { + warn!("Block header doesn't extend the proposal!"); + return false; + }; + + let state = Arc::new(validated_state); + let delta = Arc::new(state_delta); + let parent_commitment = parent.commit(); + + let proposed_leaf = Leaf::from_quorum_proposal(&proposal); + if proposed_leaf.parent_commitment() != parent_commitment { + return false; + } + + // Validate the DAC. + let message = if cert.is_valid_cert(vote_info.2.as_ref()) { + // Validate the block payload commitment for non-genesis DAC. + if cert.date().payload_commit != proposal.block_header.payload_commitment() { + warn!( + "Block payload commitment does not equal da cert payload commitment. View = {}", + *view + ); + return false; + } + if let Ok(vote) = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: proposed_leaf.commit(), + }, + view, + &public_key, + &vote_info.0, + ) { + GeneralConsensusMessage::::Vote(vote) + } else { + error!("Unable to sign quorum vote!"); + return false; + } + } else { + error!( + "Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", + cert, cur_view + ); + return false; + }; + + let mut consensus = consensus.write().await; + if let Err(e) = consensus.update_validated_state_map( + cur_view, + View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(), + state: Arc::clone(&state), + delta: Some(Arc::clone(&delta)), + }, + }, + ) { + tracing::trace!("{e:?}"); + } + consensus.update_saved_leaves(proposed_leaf.clone()); + let new_leaves = consensus.saved_leaves().clone(); + let new_state = consensus.validated_state_map().clone(); + drop(consensus); + + if let Err(e) = storage + .write() + .await + .update_undecided_state(new_leaves, new_state) + .await + { + error!("Couldn't store undecided state. Error: {:?}", e); + } + + if let GeneralConsensusMessage::Vote(vote) = message { + debug!( + "Sending vote to next quorum leader {:?}", + vote.view_number() + 1 + ); + // Add to the storage that we have received the VID disperse for a specific view + if let Err(e) = storage.write().await.append_vid(&vid_share).await { + warn!( + "Failed to store VID Disperse Proposal with error {:?}, aborting vote", + e + ); + return false; + } + broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &vote_info.3).await; + return true; + } + debug!( + "Received VID share, but couldn't find DAC cert for view {:?}", + *proposal.view_number(), + ); + false +} diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 5f33243970..cdcb596cdf 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -12,7 +12,7 @@ use async_trait::async_trait; use futures::future::join_all; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::{CommitmentAndMetadata, Consensus}, + consensus::{CommitmentAndMetadata, OuterConsensus}, data::{QuorumProposal, VidDisperseShare, ViewChangeEvidence}, event::{Event, EventType}, message::Proposal, @@ -59,7 +59,7 @@ pub struct ConsensusTaskState> { /// Our Private Key pub private_key: ::PrivateKey, /// Reference to consensus. The replica will require a write lock on this. - pub consensus: Arc>>, + pub consensus: OuterConsensus, /// Immutable instance state pub instance_state: Arc, /// View timeout from config. @@ -196,6 +196,7 @@ impl> ConsensusTaskState } /// Publishes a proposal + #[instrument(skip_all, target = "ConsensusTaskState", fields(id = self.id, view = *self.cur_view))] async fn publish_proposal( &mut self, view: TYPES::Time, @@ -207,7 +208,7 @@ impl> ConsensusTaskState Arc::clone(&self.quorum_membership), self.public_key.clone(), self.private_key.clone(), - Arc::clone(&self.consensus), + OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), self.round_start_delay, self.formed_upgrade_certificate.clone(), self.decided_upgrade_cert.clone(), @@ -215,6 +216,7 @@ impl> ConsensusTaskState self.proposal_cert.clone(), Arc::clone(&self.instance_state), *self.version.read().await, + self.id, ) .await?; @@ -228,6 +230,7 @@ impl> ConsensusTaskState /// Spawn a vote task for the given view. Will try to vote /// and emit a `QuorumVoteSend` event we should vote on the current proposal + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), target = "ConsensusTaskState")] async fn spawn_vote_task( &mut self, view: TYPES::Time, @@ -242,12 +245,13 @@ impl> ConsensusTaskState let upgrade = self.decided_upgrade_cert.clone(); let pub_key = self.public_key.clone(); let priv_key = self.private_key.clone(); - let consensus = Arc::clone(&self.consensus); + let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); let storage = Arc::clone(&self.storage); let quorum_mem = Arc::clone(&self.quorum_membership); let da_mem = Arc::clone(&self.da_membership); let instance_state = Arc::clone(&self.instance_state); let version = *self.version.read().await; + let id = self.id; let handle = async_spawn(async move { update_state_and_vote_if_able::( view, @@ -259,6 +263,7 @@ impl> ConsensusTaskState instance_state, (priv_key, upgrade, da_mem, event_stream), version, + id, ) .await; }); @@ -266,7 +271,7 @@ impl> ConsensusTaskState } /// Handles a consensus event received on the event stream - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -525,7 +530,7 @@ impl> ConsensusTaskState new_view, &event_stream, self.timeout, - Arc::clone(&self.consensus), + OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), &mut self.cur_view, &mut self.cur_view_time, &mut self.timeout_task, diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs new file mode 100644 index 0000000000..4f6c7bdac2 --- /dev/null +++ b/task-impls/src/consensus/view_change.rs @@ -0,0 +1,137 @@ +use core::time::Duration; +use std::sync::Arc; + +use anyhow::{ensure, Result}; +use async_broadcast::Sender; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +use chrono::Utc; +use hotshot_types::{ + consensus::{ConsensusUpgradableReadLockGuard, OuterConsensus}, + event::{Event, EventType}, + traits::node_implementation::{ConsensusTime, NodeType}, +}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; +use tracing::{debug, error, instrument}; + +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, +}; + +/// Constant which tells [`update_view`] to send a view change event when called. +pub(crate) const SEND_VIEW_CHANGE_EVENT: bool = true; + +/// Constant which tells [`update_view`] to not send a view change event when called. +pub(crate) const DONT_SEND_VIEW_CHANGE_EVENT: bool = false; + +/// Update the view if it actually changed, takes a mutable reference to the `cur_view` and the +/// `timeout_task` which are updated during the operation of the function. +/// +/// # Errors +/// Returns an [`anyhow::Error`] when the new view is not greater than the current view. +/// TODO: Remove args when we merge dependency tasks. +#[allow(clippy::too_many_arguments)] +#[instrument(skip_all)] +pub(crate) async fn update_view( + new_view: TYPES::Time, + event_stream: &Sender>>, + timeout: u64, + consensus: OuterConsensus, + cur_view: &mut TYPES::Time, + cur_view_time: &mut i64, + timeout_task: &mut JoinHandle<()>, + output_event_stream: &Sender>, + send_view_change_event: bool, + is_old_view_leader: bool, +) -> Result<()> { + ensure!( + new_view > *cur_view, + "New view is not greater than our current view" + ); + + let old_view = *cur_view; + + debug!("Updating view from {} to {}", *old_view, *new_view); + + if *old_view / 100 != *new_view / 100 { + // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): + // switch to info! when INFO logs become less cluttered + error!("Progress: entered view {:>6}", *new_view); + } + + *cur_view = new_view; + + // The next view is just the current view + 1 + let next_view = *cur_view + 1; + + if send_view_change_event { + futures::join! { + broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream), + broadcast_event( + Event { + view_number: old_view, + event: EventType::ViewFinished { + view_number: old_view, + }, + }, + output_event_stream, + ) + }; + } + + // Spawn a timeout task if we did actually update view + let new_timeout_task = async_spawn({ + let stream = event_stream.clone(); + // Nuance: We timeout on the view + 1 here because that means that we have + // not seen evidence to transition to this new view + let view_number = next_view; + let timeout = Duration::from_millis(timeout); + async move { + async_sleep(timeout).await; + broadcast_event( + Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), + &stream, + ) + .await; + } + }); + + // cancel the old timeout task + cancel_task(std::mem::replace(timeout_task, new_timeout_task)).await; + + let consensus = consensus.upgradable_read().await; + consensus + .metrics + .current_view + .set(usize::try_from(cur_view.u64()).unwrap()); + let new_view_time = Utc::now().timestamp(); + if is_old_view_leader { + #[allow(clippy::cast_precision_loss)] + consensus + .metrics + .view_duration_as_leader + .add_point((new_view_time - *cur_view_time) as f64); + } + *cur_view_time = new_view_time; + + // Do the comparison before the subtraction to avoid potential overflow, since + // `last_decided_view` may be greater than `cur_view` if the node is catching up. + if usize::try_from(cur_view.u64()).unwrap() + > usize::try_from(consensus.last_decided_view().u64()).unwrap() + { + consensus.metrics.number_of_views_since_last_decide.set( + usize::try_from(cur_view.u64()).unwrap() + - usize::try_from(consensus.last_decided_view().u64()).unwrap(), + ); + } + let mut consensus = ConsensusUpgradableReadLockGuard::upgrade(consensus).await; + if let Err(e) = consensus.update_view(new_view) { + tracing::trace!("{e:?}"); + } + tracing::trace!("View updated successfully"); + + Ok(()) +} diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index c11c96c3db..f82cb78377 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -14,7 +14,7 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -use tracing::{debug, warn}; +use tracing::{debug, instrument, warn}; use super::Consensus2TaskState; use crate::{ @@ -117,6 +117,7 @@ pub(crate) async fn handle_timeout_vote_recv>( new_view_number: TYPES::Time, sender: &Sender>>, @@ -214,6 +215,7 @@ pub(crate) async fn handle_view_change>( view_number: TYPES::Time, sender: &Sender>>, diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index a0063c0f2c..912a783637 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -8,7 +8,7 @@ use async_std::task::JoinHandle; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::Consensus, + consensus::OuterConsensus, event::Event, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, simple_vote::{QuorumVote, TimeoutVote}, @@ -83,7 +83,7 @@ pub struct Consensus2TaskState> { pub timeout: u64, /// A reference to the metrics trait. - pub consensus: Arc>>, + pub consensus: OuterConsensus, /// The last decided view pub last_decided_view: TYPES::Time, @@ -99,7 +99,7 @@ pub struct Consensus2TaskState> { } impl> Consensus2TaskState { /// Handles a consensus event received on the event stream - #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, last_decided_view = *self.last_decided_view), name = "Consensus replica task", level = "error")] + #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, last_decided_view = *self.last_decided_view), name = "Consensus replica task", level = "error", target = "Consensus2TaskState")] pub async fn handle( &mut self, event: Arc>, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index accb841821..dcd97ae727 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -9,7 +9,7 @@ use async_std::task::spawn_blocking; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::{Consensus, LockedConsensusState, View}, + consensus::{Consensus, OuterConsensus, View}, data::DaProposal, event::{Event, EventType}, message::Proposal, @@ -51,7 +51,7 @@ pub struct DaTaskState> { pub cur_view: TYPES::Time, /// Reference to consensus. Leader will require a read lock on this. - pub consensus: LockedConsensusState, + pub consensus: OuterConsensus, /// Membership for the DA committee pub da_membership: Arc, @@ -82,7 +82,7 @@ pub struct DaTaskState> { impl> DaTaskState { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error", target = "DaTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -216,14 +216,15 @@ impl> DaTaskState { } // Optimistically calculate and update VID if we know that the primary network is down. if self.network.is_primary_down() { - let consensus = Arc::clone(&self.consensus); + let consensus = + OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); let membership = Arc::clone(&self.quorum_membership); let pk = self.private_key.clone(); let public_key = self.public_key.clone(); let chan = event_stream.clone(); async_spawn(async move { Consensus::calculate_and_update_vid( - Arc::clone(&consensus), + OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), view_number, membership, &pk, diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 4094f37128..8282729516 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -4,16 +4,20 @@ use std::{ sync::Arc, }; +use crate::{ + events::{HotShotEvent, ProposalMissing}, + request::REQUEST_TIMEOUT, +}; use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{broadcast, SendError, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; -use async_lock::{RwLock, RwLockUpgradableReadGuard}; +use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use chrono::Utc; use committable::{Commitment, Committable}; use hotshot_types::{ - consensus::Consensus, + consensus::{ConsensusUpgradableReadLockGuard, OuterConsensus}, data::{Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, message::Proposal, @@ -30,19 +34,14 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, warn}; - -use crate::{ - events::{HotShotEvent, ProposalMissing}, - request::REQUEST_TIMEOUT, -}; +use tracing::{debug, error, info, instrument, warn}; /// Trigger a request to the network for a proposal for a view and wait for the response pub(crate) async fn fetch_proposal( view: TYPES::Time, event_stream: Sender>>, quorum_membership: Arc, - consensus: Arc>>, + consensus: OuterConsensus, ) -> Result> { tracing::debug!("Fetching proposal for view {:?}", view); let (tx, mut rx) = broadcast(1); @@ -162,7 +161,7 @@ impl Default for LeafChainTraversalOutcome { /// the anchor view will be set to view 6, with the locked view as view 7. pub async fn decide_from_proposal( proposal: &QuorumProposal, - consensus: Arc>>, + consensus: OuterConsensus, existing_upgrade_cert: &Option>, public_key: &TYPES::SignatureKey, ) -> LeafChainTraversalOutcome { @@ -282,7 +281,7 @@ pub(crate) async fn parent_leaf_and_state( next_proposal_view_number: TYPES::Time, quorum_membership: Arc, public_key: TYPES::SignatureKey, - consensus: Arc>>, + consensus: OuterConsensus, ) -> Result<(Leaf, Arc<::ValidatedState>)> { ensure!( quorum_membership.leader(next_proposal_view_number) == public_key, @@ -351,7 +350,7 @@ pub(crate) async fn parent_leaf_and_state( pub async fn temp_validate_proposal_safety_and_liveness( proposal: Proposal>, parent_leaf: Leaf, - consensus: Arc>>, + consensus: OuterConsensus, decided_upgrade_certificate: Option>, quorum_membership: Arc, view_leader_key: TYPES::SignatureKey, @@ -487,16 +486,18 @@ pub async fn temp_validate_proposal_safety_and_liveness( /// we merge the dependency tasks. #[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_lines)] +#[instrument(skip_all, fields(id = id, view = *proposal.data.view_number()))] pub async fn validate_proposal_safety_and_liveness( proposal: Proposal>, parent_leaf: Leaf, - consensus: Arc>>, + consensus: OuterConsensus, decided_upgrade_certificate: Arc>>>, quorum_membership: Arc, view_leader_key: TYPES::SignatureKey, event_stream: Sender>>, sender: TYPES::SignatureKey, event_sender: Sender>, + id: u64, ) -> Result<()> { let view_number = proposal.data.view_number(); @@ -706,7 +707,7 @@ pub(crate) async fn update_view( new_view: TYPES::Time, event_stream: &Sender>>, timeout: u64, - consensus: Arc>>, + consensus: OuterConsensus, cur_view: &mut TYPES::Time, cur_view_time: &mut i64, timeout_task: &mut JoinHandle<()>, @@ -794,7 +795,7 @@ pub(crate) async fn update_view( - usize::try_from(consensus.last_decided_view().u64()).unwrap(), ); } - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + let mut consensus = ConsensusUpgradableReadLockGuard::upgrade(consensus).await; if let Err(e) = consensus.update_view(new_view) { tracing::trace!("{e:?}"); } diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index a927c5292a..01e2636fa0 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -15,7 +15,7 @@ use hotshot_task::{ dependency_task::HandleDepOutput, }; use hotshot_types::{ - consensus::{CommitmentAndMetadata, Consensus}, + consensus::{CommitmentAndMetadata, OuterConsensus}, data::{Leaf, QuorumProposal, VidDisperse, ViewChangeEvidence}, message::Proposal, simple_certificate::UpgradeCertificate, @@ -23,7 +23,7 @@ use hotshot_types::{ block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, }, }; -use tracing::{debug, error}; +use tracing::{debug, error, instrument}; use vbs::version::Version; use crate::{ @@ -83,7 +83,7 @@ pub struct ProposalDependencyHandle { pub round_start_delay: u64, /// Shared consensus task state - pub consensus: Arc>>, + pub consensus: OuterConsensus, /// Globally shared reference to the current network version. pub version: Arc>, @@ -98,12 +98,16 @@ pub struct ProposalDependencyHandle { /// An upgrade certificate that has been decided on, if any. pub decided_upgrade_certificate: Arc>>>, + + /// The node's id + pub id: u64, } impl ProposalDependencyHandle { /// Publishes a proposal given the [`CommitmentAndMetadata`], [`VidDisperse`] /// and high qc [`hotshot_types::simple_certificate::QuorumCertificate`], /// with optional [`ViewChangeEvidence`]. + #[instrument(skip_all, target = "ProposalDependencyHandle", fields(id = self.id, view_number = *self.view_number, latest_proposed_view = *self.latest_proposed_view))] async fn publish_proposal( &self, commitment_and_metadata: CommitmentAndMetadata, @@ -116,7 +120,7 @@ impl ProposalDependencyHandle { self.view_number, Arc::clone(&self.quorum_membership), self.public_key.clone(), - Arc::clone(&self.consensus), + OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), ) .await?; @@ -233,7 +237,7 @@ impl HandleDepOutput for ProposalDependencyHandle { // The proposal for the high qc view is missing, try to get it asynchronously let memberhsip = Arc::clone(&self.quorum_membership); let sender = self.sender.clone(); - let consensus = Arc::clone(&self.consensus); + let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); async_spawn(async move { fetch_proposal(high_qc_view_number, sender, memberhsip, consensus).await }); diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 456267eaa1..cc4539da3f 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -15,7 +15,7 @@ use hotshot_task::{ task::TaskState, }; use hotshot_types::{ - consensus::Consensus, + consensus::OuterConsensus, event::Event, simple_certificate::UpgradeCertificate, traits::{ @@ -81,7 +81,7 @@ pub struct QuorumProposalTaskState pub storage: Arc>, /// Shared consensus task state - pub consensus: Arc>>, + pub consensus: OuterConsensus, /// The node's id pub id: u64, @@ -319,10 +319,11 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState>, diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index ffcf032a54..e64fefa9af 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -8,6 +8,7 @@ use async_broadcast::{broadcast, Sender}; use async_lock::RwLockUpgradableReadGuard; use committable::Committable; use hotshot_types::{ + consensus::OuterConsensus, data::{Leaf, QuorumProposal}, message::Proposal, simple_certificate::QuorumCertificate, @@ -20,7 +21,7 @@ use hotshot_types::{ utils::{View, ViewInner}, vote::{Certificate, HasViewNumber}, }; -use tracing::{debug, error, warn}; +use tracing::{debug, error, instrument, warn}; use super::QuorumProposalRecvTaskState; use crate::{ @@ -42,6 +43,7 @@ pub(crate) enum QuorumProposalValidity { } /// Update states in the event that the parent state is not found for a given `proposal`. +#[instrument(skip_all)] async fn validate_proposal_liveness>( proposal: &Proposal>, event_sender: &Sender>>, @@ -108,6 +110,7 @@ async fn validate_proposal_liveness>( proposal: &Proposal>, sender: &TYPES::SignatureKey, @@ -141,7 +144,7 @@ pub(crate) async fn handle_quorum_proposal_recv::PrivateKey, /// Reference to consensus. The replica will require a write lock on this. - pub consensus: Arc>>, + pub consensus: OuterConsensus, /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -132,7 +132,7 @@ impl> QuorumProposalRecvTaskState< proposal.data.view_number() + 1, Arc::clone(&self.quorum_membership), self.public_key.clone(), - Arc::clone(&self.consensus), + OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), ) .await { diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 6172d3a907..7976e5d413 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -5,13 +5,14 @@ use std::sync::Arc; use anyhow::Result; use async_broadcast::Sender; use chrono::Utc; +use hotshot_types::consensus::OuterConsensus; use hotshot_types::{ data::QuorumProposal, event::{Event, EventType}, traits::node_implementation::{ConsensusTime, NodeImplementation, NodeType}, vote::HasViewNumber, }; -use tracing::debug; +use tracing::{debug, instrument}; use super::QuorumVoteTaskState; use crate::{ @@ -20,6 +21,7 @@ use crate::{ }; /// Handles the `QuorumProposalValidated` event. +#[instrument(skip_all)] pub(crate) async fn handle_quorum_proposal_validated< TYPES: NodeType, I: NodeImplementation, @@ -39,7 +41,7 @@ pub(crate) async fn handle_quorum_proposal_validated< decided_upgrade_certificate, } = decide_from_proposal( proposal, - Arc::clone(&task_state.consensus), + OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), &decided_upgrade_certificate_read, &task_state.public_key, ) diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 9969a04a4a..24e2b5f570 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -15,7 +15,7 @@ use hotshot_task::{ task::TaskState, }; use hotshot_types::{ - consensus::Consensus, + consensus::OuterConsensus, data::{Leaf, VidDisperseShare, ViewNumber}, event::Event, message::Proposal, @@ -69,7 +69,7 @@ struct VoteDependencyHandle> { /// Private Key. pub private_key: ::PrivateKey, /// Reference to consensus. The replica will require a write lock on this. - consensus: Arc>>, + consensus: OuterConsensus, /// Immutable instance state instance_state: Arc, /// Membership for Quorum certs/votes. @@ -90,6 +90,7 @@ struct VoteDependencyHandle> { impl + 'static> VoteDependencyHandle { /// Updates the shared consensus state with the new voting data. + #[instrument(skip_all, target = "VoteDependencyHandle", fields(id = self.id, view = *self.view_number))] async fn update_shared_state( &self, proposed_leaf: &Leaf, @@ -111,7 +112,7 @@ impl + 'static> VoteDependencyHand justify_qc.view_number(), self.sender.clone(), Arc::clone(&self.quorum_membership), - Arc::clone(&self.consensus), + OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), ) .await .ok(), @@ -359,7 +360,7 @@ pub struct QuorumVoteTaskState> { pub private_key: ::PrivateKey, /// Reference to consensus. The replica will require a write lock on this. - pub consensus: Arc>>, + pub consensus: OuterConsensus, /// Immutable instance state pub instance_state: Arc, @@ -507,7 +508,7 @@ impl> QuorumVoteTaskState { public_key: self.public_key.clone(), private_key: self.private_key.clone(), - consensus: Arc::clone(&self.consensus), + consensus: OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), instance_state: Arc::clone(&self.instance_state), quorum_membership: Arc::clone(&self.quorum_membership), storage: Arc::clone(&self.storage), @@ -547,7 +548,7 @@ impl> QuorumVoteTaskState>, diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 5fd0254718..b41dde76fb 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -10,13 +10,12 @@ use std::{ use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; -use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::Consensus, + consensus::OuterConsensus, data::QuorumProposal, message::{ DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, @@ -54,7 +53,7 @@ pub struct NetworkRequestState> { pub network: Arc, /// Consensus shared state so we can check if we've gotten the information /// before sending a request - pub state: Arc>>, + pub state: OuterConsensus, /// Last seen view, we won't request for proposals before older than this view pub view: TYPES::Time, /// Delay before requesting peers @@ -157,6 +156,7 @@ impl> NetworkRequestState Vec> { let mut reqs = Vec::new(); if !self.state.read().await.vid_shares().contains_key(&view) { @@ -201,12 +201,13 @@ impl> NetworkRequestState { network: Arc::clone(&self.network), - state: Arc::clone(&self.state), + state: OuterConsensus::new(Arc::clone(&self.state.inner_consensus)), public_key: self.public_key.clone(), sender, delay: self.delay, recipients, shutdown_flag: Arc::clone(&self.shutdown_flag), + id: self.id, }; let Some(signature) = self.serialize_and_sign(&request) else { return; @@ -253,7 +254,7 @@ struct DelayedRequester> { /// The underlying network to send requests on pub network: Arc, /// Shared state to check if the data go populated - state: Arc>>, + state: OuterConsensus, /// our public key public_key: TYPES::SignatureKey, /// Channel to send the event when we receive a response @@ -264,6 +265,8 @@ struct DelayedRequester> { recipients: Vec, /// A flag indicating that `HotShotEvent::Shutdown` has been received shutdown_flag: Arc, + /// The node's id + id: u64, } /// A task the requests some data immediately from one peer @@ -391,6 +394,7 @@ impl> DelayedRequester { } } /// Returns true if we got the data we wanted, or the view has moved on. + #[instrument(skip_all, target = "DelayedRequester", fields(id = self.id, view = *req.0))] async fn cancel_vid(&self, req: &VidRequest) -> bool { let view = req.0; let state = self.state.read().await; diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index fa5774a3a4..b6264101ab 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -7,7 +7,7 @@ use async_std::task::JoinHandle; use futures::{channel::mpsc, FutureExt, StreamExt}; use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ - consensus::{Consensus, LockedConsensusState}, + consensus::{Consensus, LockedConsensusState, OuterConsensus}, data::VidDisperseShare, message::{ DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, @@ -23,6 +23,7 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; +use tracing::instrument; use crate::events::HotShotEvent; @@ -46,6 +47,8 @@ pub struct NetworkResponseState { pub_key: TYPES::SignatureKey, /// This replicas private key private_key: ::PrivateKey, + /// The node's id + id: u64, } impl NetworkResponseState { @@ -56,6 +59,7 @@ impl NetworkResponseState { quorum: Arc, pub_key: TYPES::SignatureKey, private_key: ::PrivateKey, + id: u64, ) -> Self { Self { consensus, @@ -63,6 +67,7 @@ impl NetworkResponseState { quorum, pub_key, private_key, + id, } } @@ -133,6 +138,7 @@ impl NetworkResponseState { /// Get the VID share from consensus storage, or calculate it from the payload for /// the view, if we have the payload. Stores all the shares calculated from the payload /// if the calculation was done + #[instrument(skip_all, target = "NetworkResponseState", fields(id = self.id))] async fn get_or_calc_vid_share( &self, view: TYPES::Time, @@ -147,7 +153,7 @@ impl NetworkResponseState { .is_some_and(|m| m.contains_key(key)); if !contained { if Consensus::calculate_and_update_vid( - Arc::clone(&self.consensus), + OuterConsensus::new(Arc::clone(&self.consensus)), view, Arc::clone(&self.quorum), &self.private_key, @@ -158,7 +164,7 @@ impl NetworkResponseState { // Sleep in hope we receive txns in the meantime async_sleep(TXNS_TIMEOUT).await; Consensus::calculate_and_update_vid( - Arc::clone(&self.consensus), + OuterConsensus::new(Arc::clone(&self.consensus)), view, Arc::clone(&self.quorum), &self.private_key, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 84fb9bcb09..4396ddeae6 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -6,7 +6,6 @@ use std::{ use anyhow::{bail, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_sleep; -use async_lock::RwLock; use async_trait::async_trait; use futures::{stream::FuturesUnordered, StreamExt}; use hotshot_builder_api::block_info::{ @@ -14,7 +13,7 @@ use hotshot_builder_api::block_info::{ }; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::Consensus, + consensus::OuterConsensus, data::{null_block, Leaf}, event::{Event, EventType}, simple_certificate::UpgradeCertificate, @@ -75,7 +74,7 @@ pub struct TransactionTaskState> { pub cur_view: TYPES::Time, /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>, + pub consensus: OuterConsensus, /// The underlying network pub network: Arc, @@ -100,7 +99,7 @@ pub struct TransactionTaskState> { impl> TransactionTaskState { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction task", level = "error", target = "TransactionTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -233,9 +232,9 @@ impl> TransactionTaskState (TYPES::Time, VidCommitment) { let consensus = self.consensus.read().await; - let mut prev_view = TYPES::Time::new(self.cur_view.saturating_sub(1)); // Search through all previous views... diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 97ef0a1704..67b66788bc 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -2,11 +2,10 @@ use std::{marker::PhantomData, sync::Arc}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::Consensus, + consensus::OuterConsensus, data::{VidDisperse, VidDisperseShare}, message::Proposal, traits::{ @@ -28,7 +27,7 @@ pub struct VidTaskState> { /// View number this view is executing in. pub cur_view: TYPES::Time, /// Reference to consensus. Leader will require a read lock on this. - pub consensus: Arc>>, + pub consensus: OuterConsensus, /// The underlying network pub network: Arc, /// Membership for the quorum @@ -45,7 +44,7 @@ pub struct VidTaskState> { impl> VidTaskState { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error", target = "VidTaskState")] pub async fn handle( &mut self, event: Arc>, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 6cbc39719c..d21808da01 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -2,13 +2,15 @@ use std::{ collections::{BTreeMap, HashMap}, + mem::ManuallyDrop, + ops::{Deref, DerefMut}, sync::Arc, }; use anyhow::{bail, ensure, Result}; -use async_lock::{RwLock, RwLockUpgradableReadGuard}; +use async_lock::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard}; use committable::{Commitment, Committable}; -use tracing::{debug, error}; +use tracing::{debug, error, instrument}; pub use crate::utils::{View, ViewInner}; use crate::{ @@ -40,6 +42,187 @@ pub type VidShares = BTreeMap< /// Type alias for consensus state wrapped in a lock. pub type LockedConsensusState = Arc>>; +/// A thin wrapper around `LockedConsensusState` that helps debugging locks +#[derive(Clone, Debug)] +pub struct OuterConsensus { + /// Inner `LockedConsensusState` + pub inner_consensus: LockedConsensusState, +} + +impl OuterConsensus { + /// Create a new instance of `OuterConsensus`, hopefully uniquely named + pub fn new(consensus: LockedConsensusState) -> Self { + Self { + inner_consensus: consensus, + } + } + + /// Locks inner consensus for reading and leaves debug traces + #[instrument(skip_all, target = "OuterConsensus")] + pub async fn read(&self) -> ConsensusReadLockGuard<'_, TYPES> { + debug!("Trying to acquire read lock on consensus"); + let ret = self.inner_consensus.read().await; + debug!("Acquired read lock on consensus"); + ConsensusReadLockGuard::new(ret) + } + + /// Locks inner consensus for writing and leaves debug traces + #[instrument(skip_all, target = "OuterConsensus")] + pub async fn write(&self) -> ConsensusWriteLockGuard<'_, TYPES> { + debug!("Trying to acquire write lock on consensus"); + let ret = self.inner_consensus.write().await; + debug!("Acquired write lock on consensus"); + ConsensusWriteLockGuard::new(ret) + } + + /// Tries to acquire write lock on inner consensus and leaves debug traces + #[instrument(skip_all, target = "OuterConsensus")] + pub fn try_write(&self) -> Option> { + debug!("Trying to acquire write lock on consensus"); + let ret = self.inner_consensus.try_write(); + if let Some(guard) = ret { + debug!("Acquired write lock on consensus"); + Some(ConsensusWriteLockGuard::new(guard)) + } else { + debug!("Failed to acquire write lock"); + None + } + } + + /// Acquires upgradable read lock on inner consensus and leaves debug traces + #[instrument(skip_all, target = "OuterConsensus")] + pub async fn upgradable_read(&self) -> ConsensusUpgradableReadLockGuard<'_, TYPES> { + debug!("Trying to acquire upgradable read lock on consensus"); + let ret = self.inner_consensus.upgradable_read().await; + debug!("Acquired upgradable read lock on consensus"); + ConsensusUpgradableReadLockGuard::new(ret) + } + + /// Tries to acquire read lock on inner consensus and leaves debug traces + #[instrument(skip_all, target = "OuterConsensus")] + pub fn try_read(&self) -> Option> { + debug!("Trying to acquire read lock on consensus"); + let ret = self.inner_consensus.try_read(); + if let Some(guard) = ret { + debug!("Acquired read lock on consensus"); + Some(ConsensusReadLockGuard::new(guard)) + } else { + debug!("Failed to acquire read lock"); + None + } + } +} + +/// A thin wrapper around `RwLockReadGuard` for `Consensus` that leaves debug traces when the lock is freed +pub struct ConsensusReadLockGuard<'a, TYPES: NodeType> { + /// Inner `RwLockReadGuard` + lock_guard: RwLockReadGuard<'a, Consensus>, +} + +impl<'a, TYPES: NodeType> ConsensusReadLockGuard<'a, TYPES> { + /// Creates a new instance of `ConsensusReadLockGuard` with the same name as parent `OuterConsensus` + #[must_use] + pub fn new(lock_guard: RwLockReadGuard<'a, Consensus>) -> Self { + Self { lock_guard } + } +} + +impl<'a, TYPES: NodeType> Deref for ConsensusReadLockGuard<'a, TYPES> { + type Target = Consensus; + fn deref(&self) -> &Self::Target { + &self.lock_guard + } +} + +impl<'a, TYPES: NodeType> Drop for ConsensusReadLockGuard<'a, TYPES> { + #[instrument(skip_all, target = "ConsensusReadLockGuard")] + fn drop(&mut self) { + debug!("Read lock on consensus dropped"); + } +} + +/// A thin wrapper around `RwLockWriteGuard` for `Consensus` that leaves debug traces when the lock is freed +pub struct ConsensusWriteLockGuard<'a, TYPES: NodeType> { + /// Inner `RwLockWriteGuard` + lock_guard: RwLockWriteGuard<'a, Consensus>, +} + +impl<'a, TYPES: NodeType> ConsensusWriteLockGuard<'a, TYPES> { + /// Creates a new instance of `ConsensusWriteLockGuard` with the same name as parent `OuterConsensus` + #[must_use] + pub fn new(lock_guard: RwLockWriteGuard<'a, Consensus>) -> Self { + Self { lock_guard } + } +} + +impl<'a, TYPES: NodeType> Deref for ConsensusWriteLockGuard<'a, TYPES> { + type Target = Consensus; + fn deref(&self) -> &Self::Target { + &self.lock_guard + } +} + +impl<'a, TYPES: NodeType> DerefMut for ConsensusWriteLockGuard<'a, TYPES> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.lock_guard + } +} + +impl<'a, TYPES: NodeType> Drop for ConsensusWriteLockGuard<'a, TYPES> { + #[instrument(skip_all, target = "ConsensusWriteLockGuard")] + fn drop(&mut self) { + debug!("Write lock on consensus dropped"); + } +} + +/// A thin wrapper around `RwLockUpgradableReadGuard` for `Consensus` that leaves debug traces when the lock is freed or upgraded +pub struct ConsensusUpgradableReadLockGuard<'a, TYPES: NodeType> { + /// Inner `RwLockUpgradableReadGuard` + lock_guard: ManuallyDrop>>, + /// A helper bool to indicate whether inner lock has been unsafely taken or not + taken: bool, +} + +impl<'a, TYPES: NodeType> ConsensusUpgradableReadLockGuard<'a, TYPES> { + /// Creates a new instance of `ConsensusUpgradableReadLockGuard` with the same name as parent `OuterConsensus` + #[must_use] + pub fn new(lock_guard: RwLockUpgradableReadGuard<'a, Consensus>) -> Self { + Self { + lock_guard: ManuallyDrop::new(lock_guard), + taken: false, + } + } + + /// Upgrades the inner `RwLockUpgradableReadGuard` and leaves debug traces + #[instrument(skip_all, target = "ConsensusUpgradableReadLockGuard")] + pub async fn upgrade(mut guard: Self) -> ConsensusWriteLockGuard<'a, TYPES> { + let inner_guard = unsafe { ManuallyDrop::take(&mut guard.lock_guard) }; + guard.taken = true; + debug!("Trying to upgrade upgradable read lock on consensus"); + let ret = RwLockUpgradableReadGuard::upgrade(inner_guard).await; + debug!("Upgraded upgradable read lock on consensus"); + ConsensusWriteLockGuard::new(ret) + } +} + +impl<'a, TYPES: NodeType> Deref for ConsensusUpgradableReadLockGuard<'a, TYPES> { + type Target = Consensus; + + fn deref(&self) -> &Self::Target { + &self.lock_guard + } +} + +impl<'a, TYPES: NodeType> Drop for ConsensusUpgradableReadLockGuard<'a, TYPES> { + #[instrument(skip_all, target = "ConsensusUpgradableReadLockGuard")] + fn drop(&mut self) { + if !self.taken { + unsafe { ManuallyDrop::drop(&mut self.lock_guard) } + debug!("Upgradable read lock on consensus dropped"); + } + } +} + /// A reference to the consensus algorithm /// /// This will contain the state of all rounds. @@ -516,8 +699,9 @@ impl Consensus { /// Calculates `VidDisperse` based on the view, the txns and the membership, /// and updates `vid_shares` map with the signed `VidDisperseShare` proposals. /// Returned `Option` indicates whether the update has actually happened or not. + #[instrument(skip_all, target = "Consensus", fields(view = *view))] pub async fn calculate_and_update_vid( - consensus: LockedConsensusState, + consensus: OuterConsensus, view: ::Time, membership: Arc, private_key: &::PrivateKey, @@ -527,7 +711,7 @@ impl Consensus { let vid = VidDisperse::calculate_vid_disperse(Arc::clone(txns), &membership, view, None).await; let shares = VidDisperseShare::from_vid_disperse(vid); - let mut consensus = RwLockUpgradableReadGuard::upgrade(consensus).await; + let mut consensus = ConsensusUpgradableReadLockGuard::upgrade(consensus).await; for share in shares { if let Some(prop) = share.to_proposal(private_key) { consensus.update_vid_shares(view, prop); From c57f361936bcd2cec5d43ae215863419091a8930 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 2 Jul 2024 09:08:53 -0600 Subject: [PATCH 1111/1393] [CX-Marketplace] - Add the `Bundle` Complementary Builder Type (#3388) * add new field * fix tests and types * use new when broadcasting * pr feedback * fix build --- task-impls/Cargo.toml | 1 + task-impls/src/da.rs | 14 +++++--- task-impls/src/events.rs | 21 +++++------ task-impls/src/transactions.rs | 59 +++++++++++++++++++++---------- task-impls/src/vid.rs | 22 ++++++------ testing/tests/tests_1/da_task.rs | 12 +++++-- testing/tests/tests_1/vid_task.rs | 16 +++++---- types/src/data.rs | 47 +++++++++++++++++++++++- 8 files changed, 138 insertions(+), 54 deletions(-) diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 5e894cd199..1561c74a7e 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -38,6 +38,7 @@ tagged-base64 = { workspace = true } time = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } +vec1 = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index dcd97ae727..2519836c5d 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -10,7 +10,7 @@ use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::{Consensus, OuterConsensus, View}, - data::DaProposal, + data::{DaProposal, PackedBundle}, event::{Event, EventType}, message::Proposal, simple_certificate::DaCertificate, @@ -303,8 +303,14 @@ impl> DaTaskState { return None; } - HotShotEvent::BlockRecv(encoded_transactions, metadata, view, _fee, _vid_precomp) => { - let view = *view; + HotShotEvent::BlockRecv(packed_bundle) => { + let PackedBundle:: { + encoded_transactions, + metadata, + view_number, + .. + } = packed_bundle; + let view_number = *view_number; // quick hash the encoded txns with sha256 let encoded_transactions_hash = Sha256::digest(encoded_transactions); @@ -321,7 +327,7 @@ impl> DaTaskState { encoded_transactions: Arc::clone(encoded_transactions), metadata: metadata.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? - view_number: view, + view_number, }; let message = Proposal { diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 90d945aec9..8c46229090 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,10 +1,13 @@ -use std::{fmt::Display, sync::Arc}; +use std::fmt::Display; use async_broadcast::Sender; use either::Either; use hotshot_task::task::TaskEvent; use hotshot_types::{ - data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperse, VidDisperseShare}, + data::{ + DaProposal, Leaf, PackedBundle, QuorumProposal, UpgradeProposal, VidDisperse, + VidDisperseShare, + }, message::Proposal, simple_certificate::{ DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, @@ -16,7 +19,7 @@ use hotshot_types::{ }, traits::{block_contents::BuilderFee, node_implementation::NodeType, BlockPayload}, utils::{BuilderCommitment, View}, - vid::{VidCommitment, VidPrecomputeData}, + vid::VidCommitment, vote::{HasViewNumber, VoteDependencyData}, }; use vbs::version::Version; @@ -143,13 +146,7 @@ pub enum HotShotEvent { BuilderFee, ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number - BlockRecv( - Arc<[u8]>, - >::Metadata, - TYPES::Time, - BuilderFee, - VidPrecomputeData, - ), + BlockRecv(PackedBundle), /// Event when the transactions task has a block formed BlockReady(VidDisperse, TYPES::Time), /// Event when consensus decided on a leaf @@ -363,8 +360,8 @@ impl Display for HotShotEvent { "SendPayloadCommitmentAndMetadata(view_number={view_number:?})" ) } - HotShotEvent::BlockRecv(_, _, view_number, ..) => { - write!(f, "BlockRecv(view_number={view_number:?})") + HotShotEvent::BlockRecv(packed_bundle) => { + write!(f, "BlockRecv(view_number={:?})", packed_bundle.view_number) } HotShotEvent::BlockReady(_, view_number) => { write!(f, "BlockReady(view_number={view_number:?})") diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4396ddeae6..49fa374e37 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -3,6 +3,11 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + builder::BuilderClient, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; use anyhow::{bail, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_sleep; @@ -14,7 +19,7 @@ use hotshot_builder_api::block_info::{ use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, - data::{null_block, Leaf}, + data::{null_block, Leaf, PackedBundle}, event::{Event, EventType}, simple_certificate::UpgradeCertificate, traits::{ @@ -29,12 +34,6 @@ use hotshot_types::{ }; use tracing::{debug, error, instrument, warn}; -use crate::{ - builder::BuilderClient, - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, -}; - // Parameters for builder querying algorithm /// Proportion of builders queried in first batch, dividend @@ -62,6 +61,22 @@ pub struct BuilderResponses { pub block_header: AvailableBlockHeaderInput, } +/// The Bundle for a portion of a block, provided by a downstream builder that exists in a bundle +/// auction. +pub struct Bundle { + /// The bundle transactions sent by the builder. + pub transactions: Vec<>::Transaction>, + + /// The signature over the bundle. + pub signature: TYPES::SignatureKey, + + /// The fee for submitting a bid. + pub bid_fee: BuilderFee, + + /// The fee for sequencing + pub sequencing_fee: BuilderFee, +} + /// Tracks state of a Transaction task pub struct TransactionTaskState> { /// The state's api @@ -163,18 +178,26 @@ impl> TransactionTaskState> TransactionTaskState> TransactionTaskState> VidTaskState { event_stream: Sender>>, ) -> Option { match event.as_ref() { - HotShotEvent::BlockRecv( - encoded_transactions, - metadata, - view_number, - fee, - precompute_data, - ) => { + HotShotEvent::BlockRecv(packed_bundle) => { + let PackedBundle:: { + encoded_transactions, + metadata, + view_number, + bid_fees, + vid_precompute, + .. + } = packed_bundle; let payload = ::BlockPayload::from_bytes(encoded_transactions, metadata); let builder_commitment = payload.builder_commitment(metadata); @@ -65,7 +67,7 @@ impl> VidTaskState { Arc::clone(encoded_transactions), &Arc::clone(&self.membership), *view_number, - Some(precompute_data.clone()), + Some(vid_precompute.clone()), ) .await; let payload_commitment = vid_disperse.payload_commitment; @@ -85,7 +87,7 @@ impl> VidTaskState { builder_commitment, metadata.clone(), *view_number, - fee.clone(), + bid_fees.first().clone(), )), &event_stream, ) diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index e45b7583e4..ddaed595d6 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -16,7 +16,7 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{null_block, ViewNumber}, + data::{null_block, ViewNumber, PackedBundle}, simple_vote::DaData, traits::{ block_contents::precompute_vid_commitment, election::Membership, @@ -74,11 +74,14 @@ async fn test_da_task() { ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), BlockRecv( + PackedBundle::new( encoded_transactions, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], + vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], precompute, + ) ), ], serial![DaProposalRecv(proposals[1].clone(), leaders[1])], @@ -156,11 +159,14 @@ async fn test_da_task_storage_failure() { ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), BlockRecv( + PackedBundle::new( encoded_transactions, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], + vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], precompute, + ), ), ], serial![DaProposalRecv(proposals[1].clone(), leaders[1])], diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 97444fd1c5..fccb4525b0 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -15,7 +15,7 @@ use hotshot_testing::{ serial, }; use hotshot_types::{ - data::{null_block, DaProposal, VidDisperse, ViewNumber}, + data::{null_block, DaProposal, VidDisperse, ViewNumber,PackedBundle}, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -86,12 +86,16 @@ async fn test_vid_task() { serial![ ViewChange(ViewNumber::new(2)), BlockRecv( - encoded_transactions, - TestMetadata, - ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), - vid_precompute, + PackedBundle::new( + encoded_transactions, + TestMetadata, + ViewNumber::new(2), + vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], + vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], + vid_precompute, + ) ), + ], ]; diff --git a/types/src/data.rs b/types/src/data.rs index c060c0e261..7e1847a8e4 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -26,6 +26,7 @@ use snafu::Snafu; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; use tracing::error; +use vec1::Vec1; use crate::{ message::Proposal, @@ -35,7 +36,8 @@ use crate::{ simple_vote::{QuorumData, UpgradeProposalData}, traits::{ block_contents::{ - vid_commitment, BlockHeader, EncodeBytes, TestableBlock, GENESIS_VID_NUM_STORAGE_NODES, + vid_commitment, BlockHeader, BuilderFee, EncodeBytes, TestableBlock, + GENESIS_VID_NUM_STORAGE_NODES, }, election::Membership, node_implementation::{ConsensusTime, NodeType}, @@ -845,3 +847,46 @@ pub mod null_block { } } } + +/// A packed bundle constructed from a sequence of bundles. +#[derive(Debug, Eq, PartialEq, Clone)] +pub struct PackedBundle { + /// The combined transactions as bytes. + pub encoded_transactions: Arc<[u8]>, + + /// The metadata of the block. + pub metadata: >::Metadata, + + /// The view number that this block is associated with. + pub view_number: TYPES::Time, + + /// The bid fees for submitting the block. + pub bid_fees: Vec1>, + + /// The sequencing fee for submitting bundles. + pub sequencing_fees: Vec1>, + + /// The Vid precompute for the block. + pub vid_precompute: VidPrecomputeData, +} + +impl PackedBundle { + /// Create a new [`PackedBundle`]. + pub fn new( + encoded_transactions: Arc<[u8]>, + metadata: >::Metadata, + view_number: TYPES::Time, + bid_fees: Vec1>, + sequencing_fees: Vec1>, + vid_precompute: VidPrecomputeData, + ) -> Self { + Self { + encoded_transactions, + metadata, + view_number, + bid_fees, + sequencing_fees, + vid_precompute, + } + } +} From f1ee7de5ee84772a6803fdd176d1652e3ab30fe9 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 2 Jul 2024 11:50:14 -0400 Subject: [PATCH 1112/1393] remove hotshot-qc (#3408) --- hotshot-qc/Cargo.toml | 38 --- hotshot-qc/src/bit_vector.rs | 362 -------------------- hotshot-qc/src/bit_vector_old.rs | 329 ------------------ hotshot-qc/src/lib.rs | 5 - hotshot-qc/src/snarked.rs | 5 - hotshot-qc/src/snarked/circuit.rs | 546 ------------------------------ 6 files changed, 1285 deletions(-) delete mode 100644 hotshot-qc/Cargo.toml delete mode 100644 hotshot-qc/src/bit_vector.rs delete mode 100644 hotshot-qc/src/bit_vector_old.rs delete mode 100644 hotshot-qc/src/lib.rs delete mode 100644 hotshot-qc/src/snarked.rs delete mode 100644 hotshot-qc/src/snarked/circuit.rs diff --git a/hotshot-qc/Cargo.toml b/hotshot-qc/Cargo.toml deleted file mode 100644 index 6ab2f9d0bf..0000000000 --- a/hotshot-qc/Cargo.toml +++ /dev/null @@ -1,38 +0,0 @@ -[package] -name = "hotshot-qc" -description = "Quorum certificate instantiations" -version = { workspace = true } -authors = { workspace = true } -edition = { workspace = true } -rust-version = { workspace = true } - -[dependencies] -ark-bls12-377 = "0.4" -ark-bn254 = "0.4" -ark-ec = { workspace = true } -ark-ff = "0.4" -ark-std = { workspace = true } -bitvec = { workspace = true } -ethereum-types = { workspace = true } -generic-array = { workspace = true } -hotshot-types = { path = "../types" } -jf-signature = { workspace = true } -jf-rescue = { workspace = true, features = ["gadgets"] } -jf-relation = { workspace = true } -jf-utils = { workspace = true } -serde = { workspace = true } -typenum = { workspace = true } - -[dev-dependencies] -hotshot-stake-table = { path = "../hotshot-stake-table" } - -[features] -default = ["parallel"] -std = ["ark-std/std"] -parallel = [ - "jf-utils/parallel", "jf-rescue/parallel", - "jf-signature/parallel", "jf-relation/parallel" -] - -[lints] -workspace = true diff --git a/hotshot-qc/src/bit_vector.rs b/hotshot-qc/src/bit_vector.rs deleted file mode 100644 index 2dfb10e2f4..0000000000 --- a/hotshot-qc/src/bit_vector.rs +++ /dev/null @@ -1,362 +0,0 @@ -//! Implementation for `BitVectorQc` that uses BLS signature + Bit vector. -//! See more details in `HotShot` paper. - -use ark_std::{ - fmt::Debug, - format, - marker::PhantomData, - rand::{CryptoRng, RngCore}, - vec, - vec::Vec, -}; -use bitvec::prelude::*; -use ethereum_types::U256; -use generic_array::GenericArray; -use hotshot_types::traits::{ - qc::QuorumCertificate, - stake_table::{SnapshotVersion, StakeTableScheme}, -}; -use jf_signature::AggregateableSignatureSchemes; -use serde::{Deserialize, Serialize}; -use typenum::U32; - -/// An implementation of QC using BLS signature and a bit-vector. -pub struct BitVectorQc( - PhantomData, - PhantomData, -); - -/// Public parameters of [`BitVectorQc`] -#[derive(Serialize, Deserialize, PartialEq, Debug)] -pub struct QcParams { - /// the stake table (snapshot) this QC is verified against - pub stake_table: ST, - /// threshold for the accumulated "weight" of votes to form a QC - pub threshold: U256, - /// public parameter for the aggregated signature scheme - pub agg_sig_pp: A::PublicParameter, -} - -impl QuorumCertificate for BitVectorQc -where - A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a> + PartialEq, - ST: StakeTableScheme - + Serialize - + for<'a> Deserialize<'a> - + PartialEq, -{ - type QcProverParams = QcParams; - - // TODO: later with SNARKs we'll use a smaller verifier parameter - type QcVerifierParams = QcParams; - - type Qc = (A::Signature, BitVec); - type MessageLength = U32; - type QuorumSize = U256; - - /// Sign a message with the signing key - fn sign>( - pp: &A::PublicParameter, - sk: &A::SigningKey, - msg: M, - prng: &mut R, - ) -> Result { - A::sign(pp, sk, msg, prng) - } - - fn assemble( - qc_pp: &Self::QcProverParams, - signers: &BitSlice, - sigs: &[A::Signature], - ) -> Result { - let st_len = qc_pp.stake_table.len(SnapshotVersion::LastEpochStart)?; - if signers.len() != st_len { - return Err(ParameterError(format!( - "bit vector len {} != the number of stake entries {}", - signers.len(), - st_len, - ))); - } - let total_weight: U256 = qc_pp - .stake_table - .try_iter(SnapshotVersion::LastEpochStart)? - .zip(signers.iter()) - .fold( - U256::zero(), - |acc, (entry, b)| { - if *b { - acc + entry.1 - } else { - acc - } - }, - ); - if total_weight < qc_pp.threshold { - return Err(ParameterError(format!( - "total_weight {} less than threshold {}", - total_weight, qc_pp.threshold, - ))); - } - let mut ver_keys = vec![]; - for (entry, b) in qc_pp - .stake_table - .try_iter(SnapshotVersion::LastEpochStart)? - .zip(signers.iter()) - { - if *b { - ver_keys.push(entry.0.clone()); - } - } - if ver_keys.len() != sigs.len() { - return Err(ParameterError(format!( - "the number of ver_keys {} != the number of partial signatures {}", - ver_keys.len(), - sigs.len(), - ))); - } - let sig = A::aggregate(&qc_pp.agg_sig_pp, &ver_keys[..], sigs)?; - - Ok((sig, signers.into())) - } - - fn check( - qc_vp: &Self::QcVerifierParams, - message: &GenericArray, - qc: &Self::Qc, - ) -> Result { - let (sig, signers) = qc; - let st_len = qc_vp.stake_table.len(SnapshotVersion::LastEpochStart)?; - if signers.len() != st_len { - return Err(ParameterError(format!( - "signers bit vector len {} != the number of stake entries {}", - signers.len(), - st_len, - ))); - } - let total_weight: U256 = qc_vp - .stake_table - .try_iter(SnapshotVersion::LastEpochStart)? - .zip(signers.iter()) - .fold( - U256::zero(), - |acc, (entry, b)| { - if *b { - acc + entry.1 - } else { - acc - } - }, - ); - if total_weight < qc_vp.threshold { - return Err(ParameterError(format!( - "total_weight {} less than threshold {}", - total_weight, qc_vp.threshold, - ))); - } - let mut ver_keys = vec![]; - for (entry, b) in qc_vp - .stake_table - .try_iter(SnapshotVersion::LastEpochStart)? - .zip(signers.iter()) - { - if *b { - ver_keys.push(entry.0.clone()); - } - } - A::multi_sig_verify(&qc_vp.agg_sig_pp, &ver_keys[..], message, sig)?; - - Ok(total_weight) - } - - fn trace( - qc_vp: &Self::QcVerifierParams, - message: &GenericArray<::MessageUnit, Self::MessageLength>, - qc: &Self::Qc, - ) -> Result::VerificationKey>, PrimitivesError> { - let (_sig, signers) = qc; - let st_len = qc_vp.stake_table.len(SnapshotVersion::LastEpochStart)?; - if signers.len() != st_len { - return Err(ParameterError(format!( - "signers bit vector len {} != the number of stake entries {}", - signers.len(), - st_len, - ))); - } - - Self::check(qc_vp, message, qc)?; - - let signer_pks: Vec<_> = qc_vp - .stake_table - .try_iter(SnapshotVersion::LastEpochStart)? - .zip(signers.iter()) - .filter(|(_, b)| **b) - .map(|(pk, _)| pk.0) - .collect(); - Ok(signer_pks) - } -} - -#[cfg(test)] -mod tests { - use hotshot_stake_table::mt_based::StakeTable; - use hotshot_types::traits::stake_table::StakeTableScheme; - use jf_signature::{ - bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, - SignatureScheme, - }; - - use super::*; - - macro_rules! test_quorum_certificate { - ($aggsig:tt) => { - type ST = StakeTable<<$aggsig as SignatureScheme>::VerificationKey>; - let mut rng = jf_utils::test_rng(); - - let agg_sig_pp = $aggsig::param_gen(Some(&mut rng)).unwrap(); - let key_pair1 = KeyPair::generate(&mut rng); - let key_pair2 = KeyPair::generate(&mut rng); - let key_pair3 = KeyPair::generate(&mut rng); - - let mut st = ST::new(3); - st.register(key_pair1.ver_key(), U256::from(3u8), ()) - .unwrap(); - st.register(key_pair2.ver_key(), U256::from(5u8), ()) - .unwrap(); - st.register(key_pair3.ver_key(), U256::from(7u8), ()) - .unwrap(); - st.advance(); - st.advance(); - - let qc_pp = QcParams { - stake_table: st, - threshold: U256::from(10u8), - agg_sig_pp, - }; - - let msg = [72u8; 32]; - let sig1 = BitVectorQc::<$aggsig, ST>::sign( - &agg_sig_pp, - &msg.into(), - key_pair1.sign_key_ref(), - &mut rng, - ) - .unwrap(); - let sig2 = BitVectorQc::<$aggsig, ST>::sign( - &agg_sig_pp, - &msg.into(), - key_pair2.sign_key_ref(), - &mut rng, - ) - .unwrap(); - let sig3 = BitVectorQc::<$aggsig, ST>::sign( - &agg_sig_pp, - &msg.into(), - key_pair3.sign_key_ref(), - &mut rng, - ) - .unwrap(); - - // happy path - let signers = bitvec![0, 1, 1]; - let qc = BitVectorQc::<$aggsig, ST>::assemble( - &qc_pp, - signers.as_bitslice(), - &[sig2.clone(), sig3.clone()], - ) - .unwrap(); - assert!(BitVectorQc::<$aggsig, ST>::check(&qc_pp, &msg.into(), &qc).is_ok()); - assert_eq!( - BitVectorQc::<$aggsig, ST>::trace(&qc_pp, &msg.into(), &qc).unwrap(), - vec![key_pair2.ver_key(), key_pair3.ver_key()], - ); - - // Check the QC and the QcParams can be serialized / deserialized - assert_eq!( - qc, - Serializer::::deserialize( - &Serializer::::serialize(&qc).unwrap() - ) - .unwrap() - ); - - // (alex) since deserialized stake table's leaf would contain normalized projective - // points with Z=1, which differs from the original projective representation. - // We compare individual fields for equivalence instead. - let de_qc_pp: QcParams<$aggsig, ST> = Serializer::::deserialize( - &Serializer::::serialize(&qc_pp).unwrap(), - ) - .unwrap(); - assert_eq!( - qc_pp.stake_table.commitment(SnapshotVersion::Head).unwrap(), - de_qc_pp - .stake_table - .commitment(SnapshotVersion::Head) - .unwrap(), - ); - assert_eq!( - qc_pp - .stake_table - .commitment(SnapshotVersion::LastEpochStart) - .unwrap(), - de_qc_pp - .stake_table - .commitment(SnapshotVersion::LastEpochStart) - .unwrap(), - ); - assert_eq!(qc_pp.threshold, de_qc_pp.threshold); - assert_eq!(qc_pp.agg_sig_pp, de_qc_pp.agg_sig_pp); - - // bad paths - // number of signatures unmatch - assert!(BitVectorQc::<$aggsig, ST>::assemble( - &qc_pp, - signers.as_bitslice(), - &[sig2.clone()] - ) - .is_err()); - // total weight under threshold - let active_bad = bitvec![1, 1, 0]; - assert!(BitVectorQc::<$aggsig, ST>::assemble( - &qc_pp, - active_bad.as_bitslice(), - &[sig1.clone(), sig2.clone()] - ) - .is_err()); - // wrong bool vector length - let active_bad_2 = bitvec![0, 1, 1, 0]; - assert!(BitVectorQc::<$aggsig, ST>::assemble( - &qc_pp, - active_bad_2.as_bitslice(), - &[sig2, sig3], - ) - .is_err()); - - assert!(BitVectorQc::<$aggsig, ST>::check( - &qc_pp, - &msg.into(), - &(qc.0.clone(), active_bad) - ) - .is_err()); - assert!(BitVectorQc::<$aggsig, ST>::check( - &qc_pp, - &msg.into(), - &(qc.0.clone(), active_bad_2) - ) - .is_err()); - let bad_msg = [70u8; 32]; - assert!(BitVectorQc::<$aggsig, ST>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); - - let bad_sig = &sig1; - assert!(BitVectorQc::<$aggsig, ST>::check( - &qc_pp, - &msg.into(), - &(bad_sig.clone(), qc.1) - ) - .is_err()); - }; - } - #[test] - fn crypto_test_quorum_certificate() { - test_quorum_certificate!(BLSOverBN254CurveSignatureScheme); - } -} diff --git a/hotshot-qc/src/bit_vector_old.rs b/hotshot-qc/src/bit_vector_old.rs deleted file mode 100644 index 938f1e5ca7..0000000000 --- a/hotshot-qc/src/bit_vector_old.rs +++ /dev/null @@ -1,329 +0,0 @@ -//! Implementation for `BitVectorQc` that uses BLS signature + Bit vector. -//! See more details in `HotShot` paper. - -use ark_std::{ - fmt::Debug, - format, - marker::PhantomData, - rand::{CryptoRng, RngCore}, - vec, - vec::Vec, -}; -use bitvec::prelude::*; -use ethereum_types::U256; -use generic_array::GenericArray; -use hotshot_types::traits::{qc::QuorumCertificate, signature_key::StakeTableEntryType}; -use jf_signature::AggregateableSignatureSchemes; -use serde::{Deserialize, Serialize}; -use typenum::U32; - -/// An implementation of QC using BLS signature and a bit-vector. -#[derive(Serialize, Deserialize)] -pub struct BitVectorQc Deserialize<'a>>( - PhantomData, -); - -/// Stake table entry -#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash, Eq)] -pub struct StakeTableEntry { - /// Stake table key - pub stake_key: V, - /// Stake table value - pub stake_amount: U256, -} - -impl StakeTableEntryType for StakeTableEntry { - fn stake(&self) -> U256 { - self.stake_amount - } -} - -/// Public parameters of [`BitVectorQc`] -#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Hash)] -pub struct QcParams { - /// the stake table (snapshot) this QC is verified against - pub stake_entries: Vec>, - /// threshold for the accumulated "weight" of votes to form a QC - pub threshold: U256, - /// public parameter for the aggregated signature scheme - pub agg_sig_pp: P, -} - -impl QuorumCertificate for BitVectorQc -where - A: AggregateableSignatureSchemes + Serialize + for<'a> Deserialize<'a>, -{ - type QcProverParams = QcParams; - - // TODO: later with SNARKs we'll use a smaller verifier parameter - type QcVerifierParams = QcParams; - - type Qc = (A::Signature, BitVec); - type MessageLength = U32; - type QuorumSize = U256; - - fn sign( - agg_sig_pp: &A::PublicParameter, - message: &GenericArray, - sk: &A::SigningKey, - prng: &mut R, - ) -> Result { - A::sign(agg_sig_pp, sk, message, prng) - } - - fn assemble( - qc_pp: &Self::QcProverParams, - signers: &BitSlice, - sigs: &[A::Signature], - ) -> Result { - if signers.len() != qc_pp.stake_entries.len() { - return Err(ParameterError(format!( - "bit vector len {} != the number of stake entries {}", - signers.len(), - qc_pp.stake_entries.len(), - ))); - } - let total_weight: U256 = - qc_pp - .stake_entries - .iter() - .zip(signers.iter()) - .fold(U256::zero(), |acc, (entry, b)| { - if *b { - acc + entry.stake_amount - } else { - acc - } - }); - if total_weight < qc_pp.threshold { - return Err(ParameterError(format!( - "total_weight {} less than threshold {}", - total_weight, qc_pp.threshold, - ))); - } - let mut ver_keys = vec![]; - for (entry, b) in qc_pp.stake_entries.iter().zip(signers.iter()) { - if *b { - ver_keys.push(entry.stake_key.clone()); - } - } - if ver_keys.len() != sigs.len() { - return Err(ParameterError(format!( - "the number of ver_keys {} != the number of partial signatures {}", - ver_keys.len(), - sigs.len(), - ))); - } - let sig = A::aggregate(&qc_pp.agg_sig_pp, &ver_keys[..], sigs)?; - - Ok((sig, signers.into())) - } - - fn check( - qc_vp: &Self::QcVerifierParams, - message: &GenericArray, - qc: &Self::Qc, - ) -> Result { - let (sig, signers) = qc; - if signers.len() != qc_vp.stake_entries.len() { - return Err(ParameterError(format!( - "signers bit vector len {} != the number of stake entries {}", - signers.len(), - qc_vp.stake_entries.len(), - ))); - } - let total_weight: U256 = - qc_vp - .stake_entries - .iter() - .zip(signers.iter()) - .fold(U256::zero(), |acc, (entry, b)| { - if *b { - acc + entry.stake_amount - } else { - acc - } - }); - if total_weight < qc_vp.threshold { - return Err(ParameterError(format!( - "total_weight {} less than threshold {}", - total_weight, qc_vp.threshold, - ))); - } - let mut ver_keys = vec![]; - for (entry, b) in qc_vp.stake_entries.iter().zip(signers.iter()) { - if *b { - ver_keys.push(entry.stake_key.clone()); - } - } - A::multi_sig_verify(&qc_vp.agg_sig_pp, &ver_keys[..], message, sig)?; - - Ok(total_weight) - } - - fn trace( - qc_vp: &Self::QcVerifierParams, - message: &GenericArray<::MessageUnit, Self::MessageLength>, - qc: &Self::Qc, - ) -> Result::VerificationKey>, PrimitivesError> { - let (_sig, signers) = qc; - if signers.len() != qc_vp.stake_entries.len() { - return Err(ParameterError(format!( - "signers bit vector len {} != the number of stake entries {}", - signers.len(), - qc_vp.stake_entries.len(), - ))); - } - - Self::check(qc_vp, message, qc)?; - - let signer_pks: Vec<_> = qc_vp - .stake_entries - .iter() - .zip(signers.iter()) - .filter(|(_, b)| **b) - .map(|(pk, _)| pk.stake_key.clone()) - .collect(); - Ok(signer_pks) - } -} - -#[cfg(test)] -mod tests { - use jf_signature::{ - bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair}, - SignatureScheme, - }; - - use super::*; - - macro_rules! test_quorum_certificate { - ($aggsig:tt) => { - let mut rng = jf_utils::test_rng(); - let agg_sig_pp = $aggsig::param_gen(Some(&mut rng)).unwrap(); - let key_pair1 = KeyPair::generate(&mut rng); - let key_pair2 = KeyPair::generate(&mut rng); - let key_pair3 = KeyPair::generate(&mut rng); - let entry1 = StakeTableEntry { - stake_key: key_pair1.ver_key(), - stake_amount: U256::from(3u8), - }; - let entry2 = StakeTableEntry { - stake_key: key_pair2.ver_key(), - stake_amount: U256::from(5u8), - }; - let entry3 = StakeTableEntry { - stake_key: key_pair3.ver_key(), - stake_amount: U256::from(7u8), - }; - let qc_pp = QcParams { - stake_entries: vec![entry1, entry2, entry3], - threshold: U256::from(10u8), - agg_sig_pp, - }; - let msg = [72u8; 32]; - let sig1 = BitVectorQc::<$aggsig>::sign( - &agg_sig_pp, - &msg.into(), - key_pair1.sign_key_ref(), - &mut rng, - ) - .unwrap(); - let sig2 = BitVectorQc::<$aggsig>::sign( - &agg_sig_pp, - &msg.into(), - key_pair2.sign_key_ref(), - &mut rng, - ) - .unwrap(); - let sig3 = BitVectorQc::<$aggsig>::sign( - &agg_sig_pp, - &msg.into(), - key_pair3.sign_key_ref(), - &mut rng, - ) - .unwrap(); - - // happy path - let signers = bitvec![0, 1, 1]; - let qc = BitVectorQc::<$aggsig>::assemble( - &qc_pp, - signers.as_bitslice(), - &[sig2.clone(), sig3.clone()], - ) - .unwrap(); - assert!(BitVectorQc::<$aggsig>::check(&qc_pp, &msg.into(), &qc).is_ok()); - assert_eq!( - BitVectorQc::<$aggsig>::trace(&qc_pp, &msg.into(), &qc).unwrap(), - vec![key_pair2.ver_key(), key_pair3.ver_key()], - ); - - // Check the QC and the QcParams can be serialized / deserialized - assert_eq!( - qc, - Serializer::::deserialize( - &Serializer::::serialize(&qc).unwrap() - ) - .unwrap() - ); - - assert_eq!( - qc_pp, - Serializer::::deserialize( - &Serializer::::serialize(&qc_pp).unwrap() - ) - .unwrap() - ); - - // bad paths - // number of signatures unmatch - assert!(BitVectorQc::<$aggsig>::assemble( - &qc_pp, - signers.as_bitslice(), - &[sig2.clone()] - ) - .is_err()); - // total weight under threshold - let active_bad = bitvec![1, 1, 0]; - assert!(BitVectorQc::<$aggsig>::assemble( - &qc_pp, - active_bad.as_bitslice(), - &[sig1.clone(), sig2.clone()] - ) - .is_err()); - // wrong bool vector length - let active_bad_2 = bitvec![0, 1, 1, 0]; - assert!(BitVectorQc::<$aggsig>::assemble( - &qc_pp, - active_bad_2.as_bitslice(), - &[sig2, sig3], - ) - .is_err()); - - assert!(BitVectorQc::<$aggsig>::check( - &qc_pp, - &msg.into(), - &(qc.0.clone(), active_bad) - ) - .is_err()); - assert!(BitVectorQc::<$aggsig>::check( - &qc_pp, - &msg.into(), - &(qc.0.clone(), active_bad_2) - ) - .is_err()); - let bad_msg = [70u8; 32]; - assert!(BitVectorQc::<$aggsig>::check(&qc_pp, &bad_msg.into(), &qc).is_err()); - - let bad_sig = &sig1; - assert!( - BitVectorQc::<$aggsig>::check(&qc_pp, &msg.into(), &(bad_sig.clone(), qc.1)) - .is_err() - ); - }; - } - #[test] - fn crypto_test_quorum_certificate() { - test_quorum_certificate!(BLSOverBN254CurveSignatureScheme); - } -} diff --git a/hotshot-qc/src/lib.rs b/hotshot-qc/src/lib.rs deleted file mode 100644 index 3f44a4e766..0000000000 --- a/hotshot-qc/src/lib.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! This crates offer implementations of quorum certificates used in `HotShot`. -//! Deprecated crate!!! -pub mod bit_vector; -pub mod bit_vector_old; -pub mod snarked; diff --git a/hotshot-qc/src/snarked.rs b/hotshot-qc/src/snarked.rs deleted file mode 100644 index 06f4a7bfaf..0000000000 --- a/hotshot-qc/src/snarked.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! This is a `SNARKed` QC implementation, see more in the `HotShot` paper. - -mod circuit; - -// TODO: add impl: https://github.com/EspressoSystems/hotshot-primitives/issues/5 diff --git a/hotshot-qc/src/snarked/circuit.rs b/hotshot-qc/src/snarked/circuit.rs deleted file mode 100644 index d278896c25..0000000000 --- a/hotshot-qc/src/snarked/circuit.rs +++ /dev/null @@ -1,546 +0,0 @@ -//! Circuit implementation of stake key aggregation for quorum certificates verification. - -use ark_ff::PrimeField; -use ark_std::{format, vec, vec::Vec}; -use jf_relation::{ - errors::CircuitError, - gadgets::{ - ecc::{ - emulated::{EmulatedSWPointVariable, EmulatedTEPointVariable, SWPoint}, - TEPoint, - }, - EmulationConfig, SerializableEmulatedStruct, - }, - BoolVar, Circuit, PlonkCircuit, Variable, -}; -use jf_rescue::{crhf::RescueCRHF, gadgets::RescueNativeGadget, RescueParameter}; - -/// Digest a list of verification keys and their associated stake amounts -/// * `stack_amts` - stake amounts -/// * `keys` - list of verification keys -#[allow(dead_code)] -pub fn compute_stake_table_hash>( - stake_amts: &[F], - keys: &[T], -) -> F { - let mut input_vec = vec![]; - for (&amt, key) in stake_amts.iter().zip(keys.iter()) { - input_vec.extend(key.serialize_to_native_elements()); - input_vec.push(amt); - } - RescueCRHF::sponge_with_bit_padding(&input_vec[..], 1)[0] -} - -/// Traits for verification keys -pub trait VerKeyVar: Sized + Clone { - /// The type of key - type KeyType: Default; - - /// Returns a list of variables associated with this key variable. - fn native_vars(&self) -> Vec; - - /// Aggregate the verification keys with Boolean selectors. - /// * `circuit` - associated Plonk circuit. - /// * `keys` - list of input verification keys. - /// * `selectors` - list of Boolean selectors. - /// * `coef` - the internal curve parameter. - /// * Returns an aggregated key variable. - fn aggregate_with_selectors( - circuit: &mut PlonkCircuit, - keys: &[Self], - selectors: &[BoolVar], - coef: E, - ) -> Result - where - F: PrimeField, - E: EmulationConfig; - - /// Check whether two input verification key variables are equal. - /// * `circuit` - associated Plonk circuit. - /// * `p0` - first verification key variable. - /// * `p1` - second verification key variable. - /// * Returns a Boolean variable indicates whether `p0 == p1`. - fn is_equal( - circuit: &mut PlonkCircuit, - p0: &Self, - p1: &Self, - ) -> Result - where - F: PrimeField, - E: EmulationConfig; - - /// Enforce that two input verification key variables are equal. - /// * `circuit` - associated Plonk circuit. - /// * `p0` - first verification key variable. - /// * `p1` - second verification key variable. - fn enforce_equal( - circuit: &mut PlonkCircuit, - p0: &Self, - p1: &Self, - ) -> Result<(), CircuitError> - where - F: PrimeField, - E: EmulationConfig; -} - -/// Plonk circuit gadget for stake key aggregation for quorum certificates. -pub trait QcKeyAggregateGadget -where - F: RescueParameter, -{ - /// Key aggregation circuit - /// * `vks` - list of stake public keys. - /// * `bit_vec` - the indicator vector for the quorum set, `bit_vec[i] = 1` if `i` is in the quorum set, o/w `bit_vec[i] = 0`. - /// * `agg_vk` - the public aggregated stake key. - /// * `coef` - the internal curve parameter - fn check_aggregate_vk, V: VerKeyVar>( - &mut self, - vks: &[V], - bit_vec: &[BoolVar], - agg_vk: &V, - coef: E, - ) -> Result<(), CircuitError>; - - /// Stake table commitment checking circuit - /// * `vk` - list of stake public keys. - /// * `stake_amts` - list of stake amounts for the corresponding stake keys. - /// * `digest` - the hash of the stake table. - fn check_stake_table_digest, V: VerKeyVar>( - &mut self, - vks: &[V], - stake_amts: &[Variable], - digest: Variable, - ) -> Result<(), CircuitError>; - - /// Quorum threshold checking circuit - /// * `stake_amts` - list of stake amounts for the corresponding stake keys. - /// * `bit_vec` - the indicator vector for the quorum set. - /// * `threshold` - the public quorum threshold. - fn check_threshold( - &mut self, - stake_amts: &[Variable], - bit_vec: &[BoolVar], - threshold: Variable, - ) -> Result<(), CircuitError>; -} - -impl QcKeyAggregateGadget for PlonkCircuit -where - F: RescueParameter, -{ - fn check_aggregate_vk, V: VerKeyVar>( - &mut self, - vks: &[V], - bit_vec: &[BoolVar], - agg_vk: &V, - coef: E, - ) -> Result<(), CircuitError> { - if vks.len() != bit_vec.len() { - return Err(CircuitError::ParameterError(format!( - "bit vector len {} != the number of stake keys {}", - bit_vec.len(), - vks.len(), - ))); - } - let agg_key_var = V::aggregate_with_selectors::(self, vks, bit_vec, coef)?; - V::enforce_equal(self, &agg_key_var, agg_vk) - } - - fn check_stake_table_digest, V: VerKeyVar>( - &mut self, - vks: &[V], - stake_amts: &[Variable], - digest: Variable, - ) -> Result<(), CircuitError> { - if stake_amts.len() != vks.len() { - return Err(CircuitError::ParameterError(format!( - "the number of stake amounts {} != the number of stake verification keys {}", - stake_amts.len(), - vks.len(), - ))); - } - let mut hash_input = vec![]; - for (vk, &stake_amt) in vks.iter().zip(stake_amts.iter()) { - hash_input.append(&mut vk.native_vars()); - hash_input.push(stake_amt); - } - let expected_digest = - RescueNativeGadget::::rescue_sponge_with_padding(self, &hash_input, 1)?[0]; - self.enforce_equal(expected_digest, digest) - } - - fn check_threshold( - &mut self, - stake_amts: &[Variable], - bit_vec: &[BoolVar], - threshold: Variable, - ) -> Result<(), CircuitError> { - if stake_amts.len() != bit_vec.len() { - return Err(CircuitError::ParameterError(format!( - "bit vector len {} != the number of stake entries {}", - bit_vec.len(), - stake_amts.len(), - ))); - } - let mut active_amts = vec![]; - for (&stake_amt, &bit) in stake_amts.iter().zip(bit_vec.iter()) { - active_amts.push(self.mul(stake_amt, bit.into())?); - } - let sum = self.sum(&active_amts[..])?; - self.enforce_geq(sum, threshold) - } -} - -impl VerKeyVar for EmulatedSWPointVariable -where - E: PrimeField, -{ - type KeyType = SWPoint; - - fn native_vars(&self) -> Vec { - let mut ret = self.0.native_vars(); - ret.append(&mut self.1.native_vars()); - ret.push(self.2 .0); - ret - } - - fn aggregate_with_selectors( - circuit: &mut PlonkCircuit, - keys: &[Self], - selectors: &[BoolVar], - coef: E, - ) -> Result - where - F: PrimeField, - E: EmulationConfig, - { - let neutral_point = Self::KeyType::default(); - let emulated_neutral_point_var = - circuit.create_constant_emulated_sw_point_variable(neutral_point)?; - let mut agg_key_var = emulated_neutral_point_var.clone(); - for (key, &bit) in keys.iter().zip(selectors.iter()) { - let point_var = circuit.binary_emulated_sw_point_vars_select( - bit, - &emulated_neutral_point_var, - key, - )?; - agg_key_var = circuit.emulated_sw_ecc_add::(&agg_key_var, &point_var, coef)?; - } - Ok(agg_key_var) - } - - fn is_equal( - circuit: &mut PlonkCircuit, - p0: &Self, - p1: &Self, - ) -> Result - where - F: PrimeField, - E: EmulationConfig, - { - circuit.is_emulated_sw_point_equal(p0, p1) - } - - fn enforce_equal( - circuit: &mut PlonkCircuit, - p0: &Self, - p1: &Self, - ) -> Result<(), CircuitError> - where - F: PrimeField, - E: EmulationConfig, - { - circuit.enforce_emulated_sw_point_equal(p0, p1) - } -} - -impl VerKeyVar for EmulatedTEPointVariable -where - E: PrimeField, -{ - type KeyType = TEPoint; - - fn native_vars(&self) -> Vec { - let mut ret = self.0.native_vars(); - ret.append(&mut self.1.native_vars()); - ret - } - - fn aggregate_with_selectors( - circuit: &mut PlonkCircuit, - keys: &[Self], - selectors: &[BoolVar], - coef: E, - ) -> Result - where - F: PrimeField, - E: EmulationConfig, - { - let neutral_point = Self::KeyType::default(); - let emulated_neutral_point_var = - circuit.create_constant_emulated_te_point_variable(neutral_point)?; - let mut agg_key_var = emulated_neutral_point_var.clone(); - for (key, &bit) in keys.iter().zip(selectors.iter()) { - let point_var = circuit.binary_emulated_te_point_vars_select( - bit, - &emulated_neutral_point_var, - key, - )?; - agg_key_var = circuit.emulated_te_ecc_add::(&agg_key_var, &point_var, coef)?; - } - Ok(agg_key_var) - } - - fn is_equal( - circuit: &mut PlonkCircuit, - p0: &Self, - p1: &Self, - ) -> Result - where - F: PrimeField, - E: EmulationConfig, - { - circuit.is_emulated_te_point_equal(p0, p1) - } - - fn enforce_equal( - circuit: &mut PlonkCircuit, - p0: &Self, - p1: &Self, - ) -> Result<(), CircuitError> - where - F: PrimeField, - E: EmulationConfig, - { - circuit.enforce_emulated_te_point_equal(p0, p1) - } -} - -#[cfg(test)] -mod tests { - use ark_bls12_377::{g1::Config as Param377, Fq as Fq377}; - use ark_bn254::{g1::Config as Param254, Fq as Fq254, Fr as Fr254}; - use ark_ec::{ - short_weierstrass::{Projective, SWCurveConfig}, - CurveGroup, - }; - use ark_ff::MontFp; - use ark_std::{vec::Vec, UniformRand, Zero}; - use jf_relation::{ - errors::CircuitError, gadgets::ecc::SWToTEConParam, Circuit, PlonkCircuit, Variable, - }; - - use super::*; - - #[test] - fn crypto_test_vk_aggregate_sw_circuit() -> Result<(), CircuitError> { - let a_ecc = Fq377::zero(); - test_vk_aggregate_sw_circuit_helper::(a_ecc)?; - let a_ecc = Fq254::zero(); - test_vk_aggregate_sw_circuit_helper::(a_ecc) - } - - // TODO: use Aggregate signature APIs to aggregate the keys outside the circuit - fn test_vk_aggregate_sw_circuit_helper(a_ecc: E) -> Result<(), CircuitError> - where - E: EmulationConfig, - F: RescueParameter, - P: SWCurveConfig, - { - let mut rng = jf_utils::test_rng(); - let vk_points: Vec> = - (0..5).map(|_| Projective::

::rand(&mut rng)).collect(); - let selector = [false, true, false, true, false]; - let agg_vk_point = - vk_points - .iter() - .zip(selector.iter()) - .fold( - Projective::

::zero(), - |acc, (x, &b)| { - if b { - acc + x - } else { - acc - } - }, - ); - let agg_vk_point: SWPoint = agg_vk_point.into_affine().into(); - let vk_points: Vec> = vk_points.iter().map(|p| p.into_affine().into()).collect(); - #[allow(clippy::cast_sign_loss)] - let stake_amts: Vec = (0..5).map(|i| F::from((i + 1) as u32)).collect(); - let threshold = F::from(6u8); - let digest = compute_stake_table_hash::>(&stake_amts[..], &vk_points[..]); - - let mut circuit = PlonkCircuit::::new_ultra_plonk(20); - // public input - let agg_vk_var = circuit.create_public_emulated_sw_point_variable(agg_vk_point)?; - let public_input = agg_vk_point.serialize_to_native_elements(); - let threshold_var = circuit.create_variable(threshold)?; - let digest_var = circuit.create_variable(digest)?; - - // add witness - let vk_vars: Vec> = vk_points - .iter() - .map(|&p| circuit.create_emulated_sw_point_variable(p).unwrap()) - .collect(); - let stake_amt_vars: Vec = stake_amts - .iter() - .map(|&amt| circuit.create_variable(amt).unwrap()) - .collect(); - let selector_vars: Vec = selector - .iter() - .map(|&b| circuit.create_boolean_variable(b).unwrap()) - .collect(); - // add circuit gadgets - circuit.check_aggregate_vk::>( - &vk_vars[..], - &selector_vars[..], - &agg_vk_var, - a_ecc, - )?; - circuit.check_stake_table_digest(&vk_vars[..], &stake_amt_vars[..], digest_var)?; - circuit.check_threshold(&stake_amt_vars[..], &selector_vars[..], threshold_var)?; - assert!(circuit.check_circuit_satisfiability(&public_input).is_ok()); - - // bad path: wrong aggregated vk - let tmp_var = agg_vk_var.native_vars()[0]; - let tmp = circuit.witness(tmp_var)?; - *circuit.witness_mut(tmp_var) = F::zero(); - assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); - *circuit.witness_mut(tmp_var) = tmp; - - // bad path: wrong digest - let tmp = circuit.witness(digest_var)?; - *circuit.witness_mut(digest_var) = F::zero(); - assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); - *circuit.witness_mut(digest_var) = tmp; - - // bad path: bad threshold - *circuit.witness_mut(threshold_var) = F::from(7u8); - assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); - - // check input parameter errors - assert!(circuit - .check_aggregate_vk::>( - &vk_vars[..], - &selector_vars[1..], - &agg_vk_var, - a_ecc - ) - .is_err()); - assert!(circuit - .check_stake_table_digest(&vk_vars[..], &stake_amt_vars[1..], digest_var) - .is_err()); - assert!(circuit - .check_threshold(&stake_amt_vars[..], &selector_vars[1..], threshold_var) - .is_err()); - - Ok(()) - } - - #[test] - fn crypto_test_vk_aggregate_te_circuit() -> Result<(), CircuitError> { - let d_ecc : Fq377 = MontFp!("122268283598675559488486339158635529096981886914877139579534153582033676785385790730042363341236035746924960903179"); - test_vk_aggregate_te_circuit_helper::(d_ecc) - } - - // TODO: use Aggregate signature APIs to aggregate the keys outside the circuit - fn test_vk_aggregate_te_circuit_helper(d_ecc: E) -> Result<(), CircuitError> - where - E: EmulationConfig + SWToTEConParam, - F: RescueParameter, - P: SWCurveConfig, - { - let mut rng = jf_utils::test_rng(); - let vk_points: Vec> = - (0..5).map(|_| Projective::

::rand(&mut rng)).collect(); - let selector = [false, true, false, true, false]; - let agg_vk_point = - vk_points - .iter() - .zip(selector.iter()) - .fold( - Projective::

::zero(), - |acc, (x, &b)| { - if b { - acc + x - } else { - acc - } - }, - ); - let agg_vk_point: TEPoint = agg_vk_point.into_affine().into(); - let vk_points: Vec> = vk_points.iter().map(|p| p.into_affine().into()).collect(); - #[allow(clippy::cast_sign_loss)] - let stake_amts: Vec = (0..5).map(|i| F::from((i + 1) as u32)).collect(); - let threshold = F::from(6u8); - let digest = compute_stake_table_hash::>(&stake_amts[..], &vk_points[..]); - - let mut circuit = PlonkCircuit::::new_ultra_plonk(20); - // public input - let agg_vk_var = circuit.create_public_emulated_te_point_variable(agg_vk_point)?; - let public_input = agg_vk_point.serialize_to_native_elements(); - let threshold_var = circuit.create_variable(threshold)?; - let digest_var = circuit.create_variable(digest)?; - - // add witness - let vk_vars: Vec> = vk_points - .iter() - .map(|&p| circuit.create_emulated_te_point_variable(p).unwrap()) - .collect(); - let stake_amt_vars: Vec = stake_amts - .iter() - .map(|&amt| circuit.create_variable(amt).unwrap()) - .collect(); - let selector_vars: Vec = selector - .iter() - .map(|&b| circuit.create_boolean_variable(b).unwrap()) - .collect(); - // add circuit gadgets - circuit.check_aggregate_vk::>( - &vk_vars[..], - &selector_vars[..], - &agg_vk_var, - d_ecc, - )?; - circuit.check_stake_table_digest(&vk_vars[..], &stake_amt_vars[..], digest_var)?; - circuit.check_threshold(&stake_amt_vars[..], &selector_vars[..], threshold_var)?; - assert!(circuit.check_circuit_satisfiability(&public_input).is_ok()); - - // bad path: wrong aggregated vk - let tmp_var = agg_vk_var.native_vars()[0]; - let tmp = circuit.witness(tmp_var)?; - *circuit.witness_mut(tmp_var) = F::zero(); - assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); - *circuit.witness_mut(tmp_var) = tmp; - - // bad path: wrong digest - let tmp = circuit.witness(digest_var)?; - *circuit.witness_mut(digest_var) = F::zero(); - assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); - *circuit.witness_mut(digest_var) = tmp; - - // bad path: bad threshold - *circuit.witness_mut(threshold_var) = F::from(7u8); - assert!(circuit.check_circuit_satisfiability(&public_input).is_err()); - - // check input parameter errors - assert!(circuit - .check_aggregate_vk::>( - &vk_vars[..], - &selector_vars[1..], - &agg_vk_var, - d_ecc - ) - .is_err()); - assert!(circuit - .check_stake_table_digest(&vk_vars[..], &stake_amt_vars[1..], digest_var) - .is_err()); - assert!(circuit - .check_threshold(&stake_amt_vars[..], &selector_vars[1..], threshold_var) - .is_err()); - - Ok(()) - } -} From b8a7659ce57c2579c1bf554e7e7285210502ce88 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 2 Jul 2024 11:55:24 -0400 Subject: [PATCH 1113/1393] Allow intercepting network messages for byzantine testing (#3335) --- .../src/auction_results_provider_types.rs | 2 +- hotshot/src/lib.rs | 290 +++++++++++++++--- hotshot/src/tasks/mod.rs | 175 ++++++++++- hotshot/src/types/handle.rs | 8 +- task-impls/src/consensus/handlers.rs | 22 +- task-impls/src/helpers.rs | 9 +- task-impls/src/quorum_vote/handlers.rs | 2 +- types/src/consensus.rs | 2 +- types/src/traits/auction_results_provider.rs | 2 +- types/src/traits/states.rs | 4 +- 10 files changed, 441 insertions(+), 75 deletions(-) diff --git a/example-types/src/auction_results_provider_types.rs b/example-types/src/auction_results_provider_types.rs index 120edd073c..22e84c4189 100644 --- a/example-types/src/auction_results_provider_types.rs +++ b/example-types/src/auction_results_provider_types.rs @@ -20,7 +20,7 @@ impl HasUrl for TestAuctionResult { } /// The test auction results type is used to mimic the results from the Solver. -#[derive(Debug, Default)] +#[derive(Clone, Debug, Default)] pub struct TestAuctionResultsProvider { /// We intentionally allow for the results to be pre-cooked for the unit test to gurantee a /// particular outcome is met. diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 0c5f7b742d..bd5c9fddf4 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -5,7 +5,9 @@ #[cfg(feature = "docs")] pub mod documentation; +use futures::future::{select, Either}; use hotshot_types::traits::network::BroadcastDelay; +use rand::Rng; use vbs::version::StaticVersionType; /// Contains traits consumed by [`SystemContext`] @@ -29,7 +31,7 @@ use async_trait::async_trait; use committable::Committable; use futures::join; use hotshot_task::task::{ConsensusTaskRegistry, NetworkTaskRegistry}; -use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event, network}; +use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; // Internal /// Reexport error type pub use hotshot_types::error::HotShotError; @@ -55,12 +57,11 @@ use hotshot_types::{ // External /// Reexport rand crate pub use rand; -use tasks::{add_request_network_task, add_response_task}; use tracing::{debug, instrument, trace}; use vbs::version::Version; use crate::{ - tasks::{add_consensus_tasks, add_network_event_task, add_network_message_task}, + tasks::{add_consensus_tasks, add_network_tasks}, traits::NodeImplementation, types::{Event, SystemContextHandle}, }; @@ -558,7 +559,6 @@ impl> SystemContext { /// Spawn all tasks that operate on [`SystemContextHandle`]. /// /// For a list of which tasks are being spawned, see this module's documentation. - #[allow(clippy::too_many_lines)] pub async fn run_tasks(&self) -> SystemContextHandle { let consensus_registry = ConsensusTaskRegistry::new(); let network_registry = NetworkTaskRegistry::new(); @@ -566,12 +566,6 @@ impl> SystemContext { let output_event_stream = self.external_event_stream.clone(); let internal_event_stream = self.internal_event_stream.clone(); - let network = Arc::clone(&self.network); - let quorum_membership = self.memberships.quorum_membership.clone(); - let da_membership = self.memberships.da_membership.clone(); - let vid_membership = self.memberships.vid_membership.clone(); - let view_sync_membership = self.memberships.view_sync_membership.clone(); - let mut handle = SystemContextHandle { consensus_registry, network_registry, @@ -579,50 +573,253 @@ impl> SystemContext { internal_event_stream: internal_event_stream.clone(), hotshot: self.clone().into(), storage: Arc::clone(&self.storage), + network: Arc::clone(&self.network), + memberships: Arc::clone(&self.memberships), }; - add_network_message_task(&mut handle, Arc::clone(&network)).await; - add_network_message_task(&mut handle, Arc::clone(&network)).await; + add_network_tasks::(&mut handle).await; + add_consensus_tasks::(&mut handle).await; - if let Some(request_receiver) = network.spawn_request_receiver_task().await { - add_request_network_task(&mut handle).await; - add_response_task(&mut handle, request_receiver).await; - } + handle + } - add_network_event_task( - &mut handle, - Arc::clone(&network), - quorum_membership.clone(), - network::quorum_filter, - ) - .await; - add_network_event_task( - &mut handle, - Arc::clone(&network), - quorum_membership, - network::upgrade_filter, - ) - .await; - add_network_event_task( - &mut handle, - Arc::clone(&network), - da_membership, - network::da_filter, - ) - .await; - add_network_event_task( - &mut handle, + #[allow(clippy::too_many_arguments)] + /// Spawn all tasks that operate on [`SystemContextHandle`]. + /// + /// For a list of which tasks are being spawned, see this module's documentation. + pub async fn spawn_twin_handles( + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + nonce: u64, + config: HotShotConfig, + memberships: Memberships, + network: Arc, + initializer: HotShotInitializer, + metrics: ConsensusMetricsValue, + storage: I::Storage, + auction_results_provider: I::AuctionResultsProvider, + ) -> (SystemContextHandle, SystemContextHandle) { + let left_system_context = Self::new( + public_key.clone(), + private_key.clone(), + nonce, + config.clone(), + memberships.clone(), Arc::clone(&network), - view_sync_membership, - network::view_sync_filter, - ) - .await; - add_network_event_task(&mut handle, network, vid_membership, network::vid_filter).await; - add_consensus_tasks::(&mut handle).await; - handle + initializer.clone(), + metrics.clone(), + storage.clone(), + auction_results_provider.clone(), + ); + let right_system_context = Self::new( + public_key, + private_key, + nonce, + config, + memberships, + network, + initializer, + metrics, + storage, + auction_results_provider, + ); + + // create registries for both handles + let left_consensus_registry = ConsensusTaskRegistry::new(); + let left_network_registry = NetworkTaskRegistry::new(); + + let right_consensus_registry = ConsensusTaskRegistry::new(); + let right_network_registry = NetworkTaskRegistry::new(); + + // create external channels for both handles + let (left_external_sender, left_external_receiver) = broadcast(EXTERNAL_EVENT_CHANNEL_SIZE); + let left_external_event_stream = + (left_external_sender, left_external_receiver.deactivate()); + + let (right_external_sender, right_external_receiver) = + broadcast(EXTERNAL_EVENT_CHANNEL_SIZE); + let right_external_event_stream = + (right_external_sender, right_external_receiver.deactivate()); + + // create internal channels for both handles + let (left_internal_sender, left_internal_receiver) = broadcast(EVENT_CHANNEL_SIZE); + let left_internal_event_stream = ( + left_internal_sender.clone(), + left_internal_receiver.clone().deactivate(), + ); + + let (right_internal_sender, right_internal_receiver) = broadcast(EVENT_CHANNEL_SIZE); + let right_internal_event_stream = ( + right_internal_sender.clone(), + right_internal_receiver.clone().deactivate(), + ); + + // create each handle + let mut left_handle = SystemContextHandle { + consensus_registry: left_consensus_registry, + network_registry: left_network_registry, + output_event_stream: left_external_event_stream.clone(), + internal_event_stream: left_internal_event_stream.clone(), + hotshot: Arc::clone(&left_system_context), + storage: Arc::clone(&left_system_context.storage), + network: Arc::clone(&left_system_context.network), + memberships: Arc::clone(&left_system_context.memberships), + }; + + let mut right_handle = SystemContextHandle { + consensus_registry: right_consensus_registry, + network_registry: right_network_registry, + output_event_stream: right_external_event_stream.clone(), + internal_event_stream: right_internal_event_stream.clone(), + hotshot: Arc::clone(&right_system_context), + storage: Arc::clone(&right_system_context.storage), + network: Arc::clone(&right_system_context.network), + memberships: Arc::clone(&right_system_context.memberships), + }; + + // add consensus tasks to each handle, using their individual internal event streams + add_consensus_tasks::(&mut left_handle).await; + add_consensus_tasks::(&mut right_handle).await; + + // fuse the event streams from both handles before initializing the network tasks + let fused_internal_event_stream = fuse_channels::, RandomTwinsHandler>( + (left_internal_sender, left_internal_receiver), + (right_internal_sender, right_internal_receiver), + ); + + // swap out the event stream on the left handle + left_handle.internal_event_stream = ( + fused_internal_event_stream.0, + fused_internal_event_stream.1.deactivate(), + ); + + // add the network tasks to the left handle. note: because the left handle has the fused event stream, the network tasks on the left handle will handle messages from both handles. + add_network_tasks::(&mut left_handle).await; + + // revert to the original event stream on the left handle, for any applications that want to listen to it + left_handle.internal_event_stream = left_internal_event_stream.clone(); + + (left_handle, right_handle) + } +} + +/// An async broadcast channel +type Channel = (Sender>, Receiver>); + +#[async_trait] +/// Trait for handling messages for a node with a twin copy of consensus +pub trait TwinsHandlerState +where + Self: Sync, + MESSAGE: Send + Sync, +{ + /// Initialize the state + fn new() -> Self; + + /// Handle a message sent to the twin from the outside, forwarding it to one of the two twins. + async fn send_handler(&mut self, event: &MESSAGE) -> Either; + + /// Wrapper for `send_handler`. + async fn send_handler_arc( + lock: &Arc>, + event: &MESSAGE, + ) -> Either { + let mut state = lock.write().await; + + state.send_handler(event).await + } + + /// Handle a message from either twin, forwarding it to the outside + async fn recv_handler(&mut self, event: &Either) -> MESSAGE; + + /// Wrapper for `recv_handler`. + async fn recv_handler_arc( + lock: &Arc>, + event: &Either, + ) -> MESSAGE { + let mut state = lock.write().await; + + state.recv_handler(event).await } } +/// A `TwinsHandlerState` that randomly forwards a message to either twin, +/// and returns messages from both. +pub struct RandomTwinsHandler; + +#[async_trait] +impl TwinsHandlerState for RandomTwinsHandler { + fn new() -> Self { + RandomTwinsHandler + } + async fn send_handler(&mut self, event: &MESSAGE) -> Either { + let random: bool = rand::thread_rng().gen(); + + #[allow(clippy::match_bool)] + match random { + true => Either::Left(event.clone()), + false => Either::Right(event.clone()), + } + } + + async fn recv_handler(&mut self, event: &Either) -> MESSAGE { + match event { + Either::Left(msg) | Either::Right(msg) => msg.clone(), + } + } +} + +/// Fuse two channels into a single channel, using handlers provided by the `STATE` type. +/// +/// Note: the channels are fused using two async loops, whose `JoinHandle`s are dropped. +fn fuse_channels( + left: Channel, + right: Channel, +) -> Channel +where + MESSAGE: Clone + std::marker::Send + std::marker::Sync + 'static, + STATE: TwinsHandlerState + Send + 'static, +{ + let send_state = Arc::new(RwLock::new(STATE::new())); + let recv_state = Arc::clone(&send_state); + + let (left_sender, mut left_receiver) = (left.0, left.1); + let (right_sender, mut right_receiver) = (right.0, right.1); + + let result: Channel = broadcast(EVENT_CHANNEL_SIZE); + let (result_sender, mut result_receiver) = (result.0.clone(), result.1.clone()); + + let _recv_loop_handle = async_spawn(async move { + loop { + let msg = match select(left_receiver.recv(), right_receiver.recv()).await { + Either::Left(msg) => Either::Left(msg.0.unwrap().as_ref().clone()), + Either::Right(msg) => Either::Right(msg.0.unwrap().as_ref().clone()), + }; + + let _ = result_sender + .broadcast(STATE::recv_handler_arc(&recv_state, &msg).await.into()) + .await; + } + }); + + let _send_loop_handle = async_spawn(async move { + loop { + if let Ok(msg) = result_receiver.recv().await { + match STATE::send_handler_arc(&send_state, &msg).await { + Either::Left(msg) => { + let _ = left_sender.broadcast(msg.into()).await; + } + Either::Right(msg) => { + let _ = right_sender.broadcast(msg.into()).await; + } + } + } + } + }); + + result +} + #[async_trait] impl> ConsensusApi for SystemContextHandle @@ -649,6 +846,7 @@ impl> ConsensusApi } } +#[derive(Clone)] /// initializer struct for creating starting block pub struct HotShotInitializer { /// the leaf specified initialization diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 5b160263ea..058d7b5621 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -2,10 +2,12 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; - use std::{sync::Arc, time::Duration}; +use async_broadcast::broadcast; use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_lock::RwLock; +use async_trait::async_trait; use hotshot_task::task::Task; #[cfg(not(feature = "dependency-tasks"))] use hotshot_task_impls::consensus::ConsensusTaskState; @@ -19,6 +21,7 @@ use hotshot_task_impls::{ use hotshot_task_impls::{ da::DaTaskState, events::HotShotEvent, + network, network::{NetworkEventTaskState, NetworkMessageTaskState}, request::NetworkRequestState, response::{run_response_task, NetworkResponseState, RequestReceiver}, @@ -28,6 +31,7 @@ use hotshot_task_impls::{ view_sync::ViewSyncTaskState, }; use hotshot_types::{ + constants::EVENT_CHANNEL_SIZE, message::{Messages, VersionedMessage}, traits::{ network::ConnectedNetwork, @@ -62,7 +66,7 @@ pub async fn add_request_network_task>( +pub fn add_response_task>( handle: &mut SystemContextHandle, request_receiver: RequestReceiver, ) { @@ -79,23 +83,23 @@ pub async fn add_response_task>( handle.internal_event_stream.1.activate_cloned(), )); } + /// Add the network task to handle messages and publish events. -pub async fn add_network_message_task< +pub fn add_network_message_task< TYPES: NodeType, I: NodeImplementation, NET: ConnectedNetwork, >( handle: &mut SystemContextHandle, - channel: Arc, + channel: &Arc, ) { - let net = Arc::clone(&channel); let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { event_stream: handle.internal_event_stream.0.clone(), }; let decided_upgrade_certificate = Arc::clone(&handle.hotshot.decided_upgrade_certificate); - let network = Arc::clone(&net); + let network = Arc::clone(channel); let mut state = network_state.clone(); let task_handle = async_spawn(async move { loop { @@ -138,8 +142,9 @@ pub async fn add_network_message_task< }); handle.network_registry.register(task_handle); } + /// Add the network task to handle events and send messages. -pub async fn add_network_event_task< +pub fn add_network_event_task< TYPES: NodeType, I: NodeImplementation, NET: ConnectedNetwork, @@ -194,3 +199,159 @@ pub async fn add_consensus_tasks>( #[cfg(feature = "rewind")] handle.add_task(RewindTaskState::::create_from(&handle).await); } + +#[async_trait] +/// Trait for intercepting and modifying messages between the network and consensus layers. +/// +/// Consensus <-> [Byzantine logic layer] <-> Network +pub trait EventTransformerState> +where + Self: Sized + Send + Sync + 'static, +{ + /// Initialize the state + fn new() -> Self; + + /// modify incoming messages from the network + fn transform_in(&mut self, event: &HotShotEvent) -> HotShotEvent; + + /// modify outgoing messages from the network + fn transform_out(&mut self, event: &HotShotEvent) -> HotShotEvent; + + /// `transform_in`, but wrapping the state in an `Arc` lock + async fn transform_in_arc( + lock: Arc>, + event: &HotShotEvent, + ) -> HotShotEvent { + let mut state = lock.write().await; + + state.transform_in(event) + } + + /// `transform_out`, but wrapping the state in an `Arc` lock + async fn transform_out_arc( + lock: Arc>, + event: &HotShotEvent, + ) -> HotShotEvent { + let mut state = lock.write().await; + + state.transform_out(event) + } + + /// Add byzantine network tasks with the trait + async fn add_network_tasks(handle: &mut SystemContextHandle) { + let state_in = Arc::new(RwLock::new(Self::new())); + let state_out = Arc::clone(&state_in); + + // channel between the task spawned in this function and the network tasks. + // with this, we can control exactly what events the network tasks see. + let (sender, mut receiver) = broadcast(EVENT_CHANNEL_SIZE); + + // replace the internal event stream with the one we just created, + // so that the network tasks are spawned with our channel. + let mut internal_event_stream = (sender.clone(), receiver.clone().deactivate()); + std::mem::swap( + &mut internal_event_stream, + &mut handle.internal_event_stream, + ); + + // spawn the network tasks with our newly-created channel + add_network_tasks::(handle).await; + + // create a copy of the original receiver + let (original_sender, mut original_receiver) = ( + internal_event_stream.0.clone(), + internal_event_stream.1.activate_cloned(), + ); + + // spawn a task to listen on the (original) internal event stream, + // and broadcast the transformed events to the replacement event stream we just created. + let out_handle = async_spawn(async move { + loop { + if let Ok(msg) = original_receiver.recv().await { + let _ = sender + .broadcast( + Self::transform_out_arc(Arc::clone(&state_out), &msg) + .await + .into(), + ) + .await; + } + } + }); + + // spawn a task to listen on the newly created event stream, + // and broadcast the transformed events to the original internal event stream + let in_handle = async_spawn(async move { + loop { + if let Ok(msg) = receiver.recv().await { + let _ = original_sender + .broadcast( + Self::transform_in_arc(Arc::clone(&state_in), &msg) + .await + .into(), + ) + .await; + } + } + }); + + handle.network_registry.register(out_handle); + handle.network_registry.register(in_handle); + + // put the old channel back. + std::mem::swap( + &mut internal_event_stream, + &mut handle.internal_event_stream, + ); + } +} + +/// adds tasks for sending/receiving messages to/from the network. +pub async fn add_network_tasks>( + handle: &mut SystemContextHandle, +) { + let network = Arc::clone(&handle.network); + let quorum_membership = handle.memberships.quorum_membership.clone(); + let da_membership = handle.memberships.da_membership.clone(); + let vid_membership = handle.memberships.vid_membership.clone(); + let view_sync_membership = handle.memberships.view_sync_membership.clone(); + + add_network_message_task(handle, &network); + add_network_message_task(handle, &network); + + if let Some(request_receiver) = network.spawn_request_receiver_task().await { + add_request_network_task(handle).await; + add_response_task(handle, request_receiver); + } + + add_network_event_task( + handle, + Arc::clone(&network), + quorum_membership.clone(), + network::quorum_filter, + ); + add_network_event_task( + handle, + Arc::clone(&network), + quorum_membership, + network::upgrade_filter, + ); + add_network_event_task( + handle, + Arc::clone(&network), + da_membership, + network::da_filter, + ); + add_network_event_task( + handle, + Arc::clone(&network), + view_sync_membership, + network::view_sync_filter, + ); + add_network_event_task( + handle, + Arc::clone(&network), + vid_membership, + network::vid_filter, + ); +} diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 6342f15f9f..58692d7625 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -20,7 +20,7 @@ use hotshot_types::{ use tokio::task::JoinHandle; use tracing::instrument; -use crate::{traits::NodeImplementation, types::Event, SystemContext}; +use crate::{traits::NodeImplementation, types::Event, Memberships, SystemContext}; /// Event streaming handle for a [`SystemContext`] instance running in the background /// @@ -49,6 +49,12 @@ pub struct SystemContextHandle> { /// Reference to the internal storage for consensus datum. pub(crate) storage: Arc>, + + /// Networks used by the instance of hotshot + pub network: Arc, + + /// Memberships used by consensus + pub memberships: Arc>, } impl + 'static> SystemContextHandle { diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 9578df9671..c57e248321 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -3,15 +3,6 @@ use core::time::Duration; use std::{marker::PhantomData, sync::Arc}; -use super::ConsensusTaskState; -use crate::{ - events::HotShotEvent, - helpers::{ - broadcast_event, decide_from_proposal, fetch_proposal, parent_leaf_and_state, - temp_validate_proposal_safety_and_liveness, update_view, validate_proposal_view_and_certs, - AnyhowTracing, SEND_VIEW_CHANGE_EVENT, - }, -}; use anyhow::{bail, ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -21,9 +12,8 @@ use async_std::task::JoinHandle; use chrono::Utc; use committable::Committable; use futures::FutureExt; -use hotshot_types::consensus::OuterConsensus; use hotshot_types::{ - consensus::{CommitmentAndMetadata, View}, + consensus::{CommitmentAndMetadata, OuterConsensus, View}, data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, @@ -45,6 +35,16 @@ use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use vbs::version::Version; +use super::ConsensusTaskState; +use crate::{ + events::HotShotEvent, + helpers::{ + broadcast_event, decide_from_proposal, fetch_proposal, parent_leaf_and_state, + temp_validate_proposal_safety_and_liveness, update_view, validate_proposal_view_and_certs, + AnyhowTracing, SEND_VIEW_CHANGE_EVENT, + }, +}; + /// Create the header for a proposal, build the proposal, and broadcast /// the proposal send evnet. #[allow(clippy::too_many_arguments)] diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 8282729516..ab046687d0 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -4,10 +4,6 @@ use std::{ sync::Arc, }; -use crate::{ - events::{HotShotEvent, ProposalMissing}, - request::REQUEST_TIMEOUT, -}; use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{broadcast, SendError, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; @@ -36,6 +32,11 @@ use hotshot_types::{ use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; +use crate::{ + events::{HotShotEvent, ProposalMissing}, + request::REQUEST_TIMEOUT, +}; + /// Trigger a request to the network for a proposal for a view and wait for the response pub(crate) async fn fetch_proposal( view: TYPES::Time, diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 7976e5d413..b8552e0f7d 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -5,8 +5,8 @@ use std::sync::Arc; use anyhow::Result; use async_broadcast::Sender; use chrono::Utc; -use hotshot_types::consensus::OuterConsensus; use hotshot_types::{ + consensus::OuterConsensus, data::QuorumProposal, event::{Event, EventType}, traits::node_implementation::{ConsensusTime, NodeImplementation, NodeType}, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index d21808da01..f99fad4852 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -226,7 +226,7 @@ impl<'a, TYPES: NodeType> Drop for ConsensusUpgradableReadLockGuard<'a, TYPES> { /// A reference to the consensus algorithm /// /// This will contain the state of all rounds. -#[derive(custom_debug::Debug)] +#[derive(custom_debug::Debug, Clone)] pub struct Consensus { /// The validated states that are currently loaded in memory. validated_state_map: BTreeMap>, diff --git a/types/src/traits/auction_results_provider.rs b/types/src/traits/auction_results_provider.rs index 4b65d4238d..c6cd4c2f51 100644 --- a/types/src/traits/auction_results_provider.rs +++ b/types/src/traits/auction_results_provider.rs @@ -20,7 +20,7 @@ pub trait HasUrl { /// `AuctionResult`, also implements the [`HasUrl`] trait, which requires that the output /// type has the requisite fields available. #[async_trait] -pub trait AuctionResultsProvider: Send + Sync { +pub trait AuctionResultsProvider: Send + Sync + Clone { /// The AuctionSolverResult is a type that holds the data associated with a particular solver /// run, for a particular view. type AuctionResult: HasUrl; diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index d7974b8588..fb3caeb9ae 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -20,7 +20,7 @@ use crate::{ }; /// Instance-level state, which allows us to fetch missing validated state. -pub trait InstanceState: Debug + Send + Sync {} +pub trait InstanceState: Debug + Clone + Send + Sync {} /// Application-specific state delta, which will be used to store a list of merkle tree entries. pub trait StateDelta: @@ -38,7 +38,7 @@ pub trait StateDelta: /// produce a new state, with the modifications from the block applied /// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header)) pub trait ValidatedState: - Serialize + DeserializeOwned + Debug + Default + PartialEq + Eq + Send + Sync + Serialize + DeserializeOwned + Debug + Default + PartialEq + Eq + Send + Sync + Clone { /// The error type for this particular type of ledger state type Error: Error + Debug + Send + Sync; From bc5cd65a48f7bb5dd10e58eb6b6385da8dce5a60 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Tue, 2 Jul 2024 18:10:32 +0200 Subject: [PATCH 1114/1393] Bring back failure counter increment and adjust warning message (#3409) --- hotshot/src/traits/networking/combined_network.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 5af4b07926..b3fd23ba0b 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -141,8 +141,7 @@ impl CombinedNetworks { // If the primary failed more than `COMBINED_NETWORK_MIN_PRIMARY_FAILURES` times, // we don't want to delay this message, and from now on we consider the primary as down warn!( - "Primary failed more than {} times and is considered down now", - COMBINED_NETWORK_MIN_PRIMARY_FAILURES + "View progression is slower than normally, stop delaying messages on the secondary" ); self.primary_down.store(true, Ordering::Relaxed); primary_failed = true; @@ -199,6 +198,7 @@ impl CombinedNetworks { // The task hasn't been cancelled, the primary probably failed. // Increment the primary fail counter and send the message. debug!("Sending on secondary after delay, message possibly has not reached recipient on primary"); + primary_fail_counter.fetch_add(1, Ordering::Relaxed); secondary_future.await }); Ok(()) From 7aa99ed541c0b41bb5e385befc398ea6ca0a36a1 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Mon, 8 Jul 2024 15:49:08 +0200 Subject: [PATCH 1115/1393] Use correct view when requesting new block (#3414) --- task-impls/src/transactions.rs | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 49fa374e37..e7aedfce77 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -3,11 +3,6 @@ use std::{ time::{Duration, Instant}, }; -use crate::{ - builder::BuilderClient, - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, -}; use anyhow::{bail, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_sleep; @@ -34,6 +29,12 @@ use hotshot_types::{ }; use tracing::{debug, error, instrument, warn}; +use crate::{ + builder::BuilderClient, + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; + // Parameters for builder querying algorithm /// Proportion of builders queried in first batch, dividend @@ -168,7 +169,7 @@ impl> TransactionTaskState> TransactionTaskState (TYPES::Time, VidCommitment) { + #[instrument(skip_all, target = "TransactionTaskState", fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view))] + async fn latest_known_vid_commitment( + &self, + block_view: TYPES::Time, + ) -> (TYPES::Time, VidCommitment) { let consensus = self.consensus.read().await; - let mut prev_view = TYPES::Time::new(self.cur_view.saturating_sub(1)); + let mut prev_view = TYPES::Time::new(block_view.saturating_sub(1)); // Search through all previous views... while prev_view != TYPES::Time::genesis() { @@ -285,12 +289,12 @@ impl> TransactionTaskState Option> { + #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view), name = "wait_for_block", level = "error")] + async fn wait_for_block(&self, block_view: TYPES::Time) -> Option> { let task_start_time = Instant::now(); // Find commitment to the block we want to build upon - let (view_num, parent_comm) = self.latest_known_vid_commitment().await; + let (view_num, parent_comm) = self.latest_known_vid_commitment(block_view).await; let parent_comm_sig = match <::SignatureKey as SignatureKey>::sign( &self.private_key, parent_comm.as_ref(), From 8f4d8b0efb067cf94ffe13c166c0eef07498355d Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 8 Jul 2024 11:41:20 -0600 Subject: [PATCH 1116/1393] [CX-Marketplace] - Develop Mock Solver API for Testing Suite (#3412) * tmp * first solver api done, implementatio next * clippy * make instance * finally add integration * add simple unit test * fix test to support errors as well * tests passing * fmt * nuke builder toml * move solver type to spawn the server and test when spawned * Update crates/fakeapi/src/fake_solver.rs Co-authored-by: lukaszrzasik * Update crates/fakeapi/src/fake_solver.rs Co-authored-by: lukaszrzasik * feedback addressed * fix lints * fix fmt --------- Co-authored-by: lukaszrzasik --- example-types/Cargo.toml | 1 + .../src/auction_results_provider_types.rs | 30 +- fakeapi/Cargo.toml | 26 ++ fakeapi/apis/solver.toml | 27 ++ fakeapi/src/fake_solver.rs | 194 ++++++++++++ fakeapi/src/lib.rs | 4 + testing/Cargo.toml | 3 + testing/src/test_builder.rs | 12 + testing/src/test_launcher.rs | 3 +- testing/src/test_runner.rs | 84 ++++- testing/tests/tests_5/fake_solver.rs | 297 ++++++++++++++++++ 11 files changed, 659 insertions(+), 22 deletions(-) create mode 100644 fakeapi/Cargo.toml create mode 100644 fakeapi/apis/solver.toml create mode 100644 fakeapi/src/fake_solver.rs create mode 100644 fakeapi/src/lib.rs create mode 100644 testing/tests/tests_5/fake_solver.rs diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index e7877def99..db0cf6c275 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -35,6 +35,7 @@ ethereum-types = { workspace = true } hotshot-task = { path = "../task" } vbs = { workspace = true } url = { workspace = true } +reqwest = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/example-types/src/auction_results_provider_types.rs b/example-types/src/auction_results_provider_types.rs index 22e84c4189..e7c9046f1d 100644 --- a/example-types/src/auction_results_provider_types.rs +++ b/example-types/src/auction_results_provider_types.rs @@ -4,10 +4,11 @@ use hotshot_types::traits::{ auction_results_provider::{AuctionResultsProvider, HasUrl}, node_implementation::NodeType, }; +use serde::{Deserialize, Serialize}; use url::Url; /// A mock result for the auction solver. This type is just a pointer to a URL. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct TestAuctionResult { /// The URL of the builder to reach out to. pub url: Url, @@ -30,6 +31,11 @@ pub struct TestAuctionResultsProvider { /// system for logical tests. This will guarantee that `fetch_auction_result` always throws an /// error. pub should_return_err: bool, + + /// The broadcast URL that the solver is running on. This type allows for the url to be + /// optional, where `None` means to just return whatever `solver_results` contains, and `Some` + /// means that we have a `FakeSolver` instance available to query. + pub broadcast_url: Option, } #[async_trait] @@ -40,13 +46,23 @@ impl AuctionResultsProvider for TestAuctionResultsProvid /// in the solver. async fn fetch_auction_result( &self, - _view_number: TYPES::Time, + view_number: TYPES::Time, ) -> Result> { - if self.should_return_err { - bail!("Something went wrong") - } + if let Some(url) = &self.broadcast_url { + let resp = + reqwest::get(url.join(&format!("/v0/api/auction_results/{}", *view_number))?) + .await? + .json::>() + .await?; - // Otherwise, return our pre-made results - Ok(self.solver_results.clone()) + Ok(resp) + } else { + if self.should_return_err { + bail!("Something went wrong") + } + + // Otherwise, return our pre-made results + Ok(self.solver_results.clone()) + } } } diff --git a/fakeapi/Cargo.toml b/fakeapi/Cargo.toml new file mode 100644 index 0000000000..764a3077f0 --- /dev/null +++ b/fakeapi/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "hotshot-fakeapi" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +homepage.workspace = true +documentation.workspace = true +repository.workspace = true + +[dependencies] +toml = { workspace = true } +tide-disco = { workspace = true } +anyhow = { workspace = true } +hotshot-types = { path = "../types" } +vbs = { workspace = true } +serde = { workspace = true } +rand = { workspace = true } +hotshot-example-types = { path = "../example-types" } +async-trait = { workspace = true } +futures = { workspace = true } +async-lock = { workspace = true } +tracing = { workspace = true } + +[lints] +workspace = true diff --git a/fakeapi/apis/solver.toml b/fakeapi/apis/solver.toml new file mode 100644 index 0000000000..8c68fcf390 --- /dev/null +++ b/fakeapi/apis/solver.toml @@ -0,0 +1,27 @@ +[meta] +NAME = "fake-solver" +DESCRIPTION = "Fake Solver for testing within hotshot" +FORMAT_VERSION = "0.1.0" + +# GET the auction result - non permissioned +[route.get_auction_results_non_permissioned] +PATH = ["auction_results/:view_number"] +":view_number" = "Integer" +METHOD = "GET" +DOC = """ +GET a fake auction result from the fake Solver. Returns a json object containing a list of +builder URLs corresponding to other running instances of fake-builder, or an empty list if no +values are present. This endpoint is open access. +""" + +# GET the auction result - permissioned +[route.get_auction_results_permissioned] +PATH = ["auction_results/:view_number/:signature"] +":view_number" = "Integer" +":signature" = "TaggedBase64" +METHOD = "GET" +DOC = """ +GET a fake auction result from the fake Solver. Returns a json object containing a list of +builder URLs corresponding to other running instances of fake-builder, or an empty list if no +values are present. This endpoint checks the leader provided in the signature. +""" diff --git a/fakeapi/src/fake_solver.rs b/fakeapi/src/fake_solver.rs new file mode 100644 index 0000000000..ab40cbeefc --- /dev/null +++ b/fakeapi/src/fake_solver.rs @@ -0,0 +1,194 @@ +use anyhow::Result; +use async_lock::RwLock; +use futures::FutureExt; +use hotshot_example_types::auction_results_provider_types::TestAuctionResult; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; +use std::{ + io::{self, ErrorKind}, + thread, time, +}; +use tide_disco::{ + api::ApiError, + error::ServerError, + method::{ReadState, WriteState}, + Api, App, Url, +}; +use vbs::version::{StaticVersion, StaticVersionType}; + +/// The max time that HotShot will wait for the solver to complete +const SOLVER_MAX_TIMEOUT_S: time::Duration = time::Duration::from_secs(1); + +/// The type of fake solver error +pub enum FakeSolverFaultType { + /// A 500 error + InternalServerFault, + + /// An arbitrary timeout error + TimeoutFault, +} + +/// The state of the fake solver instance +#[derive(Debug, Clone)] +pub struct FakeSolverState { + /// The rate at which an error of any kind occurs + pub error_pct: f32, + + /// The available builder list + pub available_builders: Vec, +} + +impl FakeSolverState { + /// Make a new `FakeSolverState` object + #[must_use] + pub fn new(error_pct: Option, available_builders: Vec) -> Self { + Self { + error_pct: error_pct.unwrap_or(0.0), + available_builders, + } + } + + /// Runs the fake solver + /// # Errors + /// This errors if tide disco runs into an issue during serving + /// # Panics + /// This panics if unable to register the api with tide disco + pub async fn run(self, url: Url) -> io::Result<()> { + let solver_api = define_api::, StaticVersion<0, 1>>() + .map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api"))?; + let state = RwLock::new(self); + let mut app = App::, ServerError>::with_state(state); + app.register_module::>("api", solver_api) + .expect("Error registering api"); + app.serve(url, StaticVersion::<0, 1> {}).await + } + + /// If a random fault event happens, what fault should we send? + #[must_use] + fn should_fault(&self) -> Option { + if rand::random::() < self.error_pct { + // Spin a random number over the fault types + if rand::random::() < 0.5 { + return Some(FakeSolverFaultType::InternalServerFault); + } + + return Some(FakeSolverFaultType::TimeoutFault); + } + + None + } + + /// Dumps back the builders with non deterministic error if the `error_pct` field + /// is nonzero. + /// + /// # Errors + /// Returns an error if the `should_fault` method is `Some`. + fn dump_builders(&self) -> Result, ServerError> { + if let Some(fault) = self.should_fault() { + match fault { + FakeSolverFaultType::InternalServerFault => { + return Err(ServerError { + status: tide_disco::StatusCode::INTERNAL_SERVER_ERROR, + message: "Internal Server Error".to_string(), + }); + } + FakeSolverFaultType::TimeoutFault => { + // Sleep for the preconfigured 1 second timeout interval + thread::sleep(SOLVER_MAX_TIMEOUT_S); + } + } + } + + // Now just send the builder urls + Ok(self + .available_builders + .iter() + .map(|url| TestAuctionResult { url: url.clone() }) + .collect()) + } +} + +/// The `FakeSolverApi` is a mock API which mimics the API contract of the solver and returns +/// custom types that are relevant to HotShot. +#[async_trait::async_trait] +pub trait FakeSolverApi { + /// Get the auction results without checking the signature. + async fn get_auction_results_non_permissioned( + &self, + _view_number: u64, + ) -> Result, ServerError>; + + /// Get the auction results with a valid signature. + async fn get_auction_results_permissioned( + &self, + _view_number: u64, + _signature: &::PureAssembledSignatureType, + ) -> Result, ServerError>; +} + +#[async_trait::async_trait] +impl FakeSolverApi for FakeSolverState { + /// Get the auction results without checking the signature. + async fn get_auction_results_non_permissioned( + &self, + _view_number: u64, + ) -> Result, ServerError> { + self.dump_builders() + } + + /// Get the auction results with a valid signature. + async fn get_auction_results_permissioned( + &self, + _view_number: u64, + _signature: &::PureAssembledSignatureType, + ) -> Result, ServerError> { + self.dump_builders() + } +} + +/// Defines the API for the Fake solver. +/// # Errors +/// Returns an error if any of the initialization operations fail. +/// # Panics +/// Panics when type conversion fails. +pub fn define_api() -> Result, ApiError> +where + TYPES: NodeType, + State: 'static + Send + Sync + ReadState + WriteState, + ::State: Send + Sync + FakeSolverApi, + VER: StaticVersionType + 'static, +{ + let api_toml = toml::from_str::(include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/apis", + "/solver.toml" + ))) + .expect("API file is not valid toml"); + + let mut api = Api::::new(api_toml)?; + api.get("get_auction_results_non_permissioned", |req, state| { + async move { + let view_number = req.integer_param("view_number")?; + state + .get_auction_results_non_permissioned(view_number) + .await + } + .boxed() + })? + .get("get_auction_results_permissioned", |req, state| { + async move { + let view_number = req.integer_param("view_number")?; + let signature = req.tagged_base64_param("signature")?; + state + .get_auction_results_permissioned( + view_number, + &signature.try_into().map_err(|_| ServerError { + message: "Invalid signature".to_string(), + status: tide_disco::StatusCode::UNPROCESSABLE_ENTITY, + })?, + ) + .await + } + .boxed() + })?; + Ok(api) +} diff --git a/fakeapi/src/lib.rs b/fakeapi/src/lib.rs new file mode 100644 index 0000000000..a4ebf66a13 --- /dev/null +++ b/fakeapi/src/lib.rs @@ -0,0 +1,4 @@ +//! Fake APIs + +/// Fake solver +pub mod fake_solver; diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 090c9595fb..e826349de8 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -31,6 +31,7 @@ hotshot-macros = { path = "../macros" } hotshot-orchestrator = { version = "0.5.36", path = "../orchestrator", default-features = false } hotshot-task = { path = "../task" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } +hotshot-fakeapi = { path = "../fakeapi" } hotshot-types = { path = "../types" } hotshot-builder-api = { path = "../builder-api" } jf-signature = { workspace = true } @@ -47,6 +48,8 @@ vbs = { workspace = true } lru = { workspace = true } tagged-base64.workspace = true vec1 = { workspace = true } +reqwest = { workspace = true } +url = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 40257c077f..37756528bf 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -76,6 +76,8 @@ pub struct TestDescription { pub view_sync_properties: ViewSyncTaskDescription, /// description of builders to run pub builders: Vec1, + /// description of the solver to run + pub solver: FakeSolverApiDescription, } /// Describes a possible change to builder status during test @@ -97,6 +99,12 @@ pub struct BuilderDescription { pub changes: HashMap, } +#[derive(Clone, Debug)] +pub struct FakeSolverApiDescription { + /// The rate at which errors occur in the mock solver API + pub error_pct: f32, +} + impl Default for TimingData { fn default() -> Self { Self { @@ -249,6 +257,10 @@ impl Default for TestDescription { changes: HashMap::new() } ], + solver: FakeSolverApiDescription { + // Default to a 10% error rate. + error_pct: 0.1, + }, } } } diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 83caf521ea..5aa17143b5 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -36,7 +36,7 @@ pub struct ResourceGenerators> { /// generator for resources pub resource_generator: ResourceGenerators, - /// metadasta used for tasks + /// metadata used for tasks pub metadata: TestDescription, } @@ -47,6 +47,7 @@ impl> TestLauncher { launcher: self, nodes: Vec::new(), + solver_server: None, late_start: HashMap::new(), next_node_id: 0, _pd: PhantomData, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 32aca14760..0139f870f5 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -1,11 +1,16 @@ #![allow(clippy::panic)] +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use std::{ collections::{BTreeMap, HashMap, HashSet}, marker::PhantomData, sync::Arc, }; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; use async_broadcast::broadcast; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use futures::future::join_all; use hotshot::{ @@ -17,6 +22,7 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; +use hotshot_fakeapi::fake_solver::FakeSolverState; use hotshot_types::{ consensus::ConsensusMetricsValue, constants::EVENT_CHANNEL_SIZE, @@ -40,7 +46,7 @@ use super::{ txn_task::TxnTask, }; use crate::{ - block_builder::TestBuilderImplementation, + block_builder::{BuilderTask, TestBuilderImplementation}, completion_task::CompletionTaskDescription, spinning_task::{ChangeNode, SpinningTask, UpDown}, test_launcher::{Network, TestLauncher}, @@ -109,6 +115,7 @@ where let TestRunner { ref launcher, nodes, + solver_server, late_start, next_node_id: _, _pd: _, @@ -285,6 +292,15 @@ where node.handle.shut_down().await; } + // Shutdown all of the servers at the end + // Aborting here doesn't cause any problems because we don't maintain any state + if let Some(solver_server) = solver_server { + #[cfg(async_executor_impl = "async-std")] + solver_server.1.cancel().await; + #[cfg(async_executor_impl = "tokio")] + solver_server.1.abort(); + } + assert!( error_list.is_empty(), "{}", @@ -296,19 +312,10 @@ where ); } - /// Add nodes. - /// - /// # Panics - /// Panics if unable to create a [`HotShotInitializer`] - pub async fn add_nodes>( - &mut self, - total: usize, - late_start: &HashSet, - ) -> Vec { - let mut results = vec![]; + pub async fn init_builders>( + &self, + ) -> (Vec>>, Vec) { let config = self.launcher.resource_generator.config.clone(); - let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let mut builder_tasks = Vec::new(); let mut builder_urls = Vec::new(); for metadata in &self.launcher.metadata.builders { @@ -326,6 +333,50 @@ where builder_urls.push(builder_url); } + (builder_tasks, builder_urls) + } + + /// Add servers. + pub async fn add_servers(&mut self, builder_urls: Vec) { + let solver_error_pct = self.launcher.metadata.solver.error_pct; + let solver_port = portpicker::pick_unused_port().expect("No available ports"); + + // This should basically never fail + let solver_url: Url = format!("http://localhost:{solver_port}") + .parse() + .expect("Failed to parse solver URL"); + + // Initialize the solver API state + let solver_state = FakeSolverState::new(Some(solver_error_pct), builder_urls); + + // Then, fire it up as a background thread. + self.solver_server = Some(( + solver_url.clone(), + async_spawn(async move { + solver_state + .run::(solver_url) + .await + .expect("Unable to run solver api"); + }), + )); + } + + /// Add nodes. + /// + /// # Panics + /// Panics if unable to create a [`HotShotInitializer`] + pub async fn add_nodes>( + &mut self, + total: usize, + late_start: &HashSet, + ) -> Vec { + let mut results = vec![]; + let config = self.launcher.resource_generator.config.clone(); + let known_nodes_with_stake = config.known_nodes_with_stake.clone(); + + let (mut builder_tasks, builder_urls) = self.init_builders::().await; + self.add_servers(builder_urls.clone()).await; + // Collect uninitialized nodes because we need to wait for all networks to be ready before starting the tasks let mut uninitialized_nodes = Vec::new(); let mut networks_ready = Vec::new(); @@ -365,8 +416,11 @@ where let network = (self.launcher.resource_generator.channel_generator)(node_id).await; let storage = (self.launcher.resource_generator.storage)(node_id); - let auction_results_provider = + let mut auction_results_provider = (self.launcher.resource_generator.auction_results_provider)(node_id); + if let Some(solver_server) = &self.solver_server { + auction_results_provider.broadcast_url = Some(solver_server.0.clone()); + } let network_clone = network.clone(); let networks_ready_future = async move { @@ -549,6 +603,8 @@ pub struct TestRunner< pub(crate) launcher: TestLauncher, /// nodes in the test pub(crate) nodes: Vec>, + /// the solver server running in the test + pub(crate) solver_server: Option<(Url, JoinHandle<()>)>, /// nodes with a late start pub(crate) late_start: HashMap>, /// the next node unique identifier diff --git a/testing/tests/tests_5/fake_solver.rs b/testing/tests/tests_5/fake_solver.rs new file mode 100644 index 0000000000..a3dca125d6 --- /dev/null +++ b/testing/tests/tests_5/fake_solver.rs @@ -0,0 +1,297 @@ +use async_compatibility_layer::art::async_spawn; +use hotshot_fakeapi::fake_solver::FakeSolverState; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; + +use hotshot_example_types::node_types::TestTypes; +use hotshot_testing::helpers::key_pair_for_id; +use std::collections::HashMap; +use tracing::instrument; +use url::Url; + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_fake_solver_fetch_non_permissioned_no_error() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let solver_state = FakeSolverState::new( + None, /* 0% error rate */ + vec![ + "http://localhost:1111".parse().unwrap(), + "http://localhost:1112".parse().unwrap(), + "http://localhost:1113".parse().unwrap(), + ], + ); + + // Fire up the solver. + let solver_url: Url = format!( + "http://localhost:{}", + portpicker::pick_unused_port().unwrap() + ) + .parse() + .unwrap(); + let url = solver_url.clone(); + let solver_handle = async_spawn(async move { + solver_state.run::(url).await.unwrap(); + }); + + let client = reqwest::Client::new(); + + // Then, hit the API + let resp = client + .get(solver_url.join("v0/api/auction_results/1").unwrap()) + .send() + .await + .unwrap() + .json::>>() + .await + .unwrap(); + + #[cfg(async_executor_impl = "async-std")] + solver_handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + solver_handle.abort(); + + assert_eq!(resp[0]["url"], "http://localhost:1111/"); + assert_eq!(resp[1]["url"], "http://localhost:1112/"); + assert_eq!(resp[2]["url"], "http://localhost:1113/"); +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_fake_solver_fetch_non_permissioned_with_errors() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let solver_state = + FakeSolverState::new(Some(0.5), vec!["http://localhost:1111".parse().unwrap()]); + + // Fire up the solver. + let solver_url: Url = format!( + "http://localhost:{}", + portpicker::pick_unused_port().unwrap() + ) + .parse() + .unwrap(); + let url = solver_url.clone(); + let solver_handle = async_spawn(async move { + solver_state.run::(url).await.unwrap(); + }); + + let client = reqwest::Client::new(); + let mut payloads = Vec::new(); + + for i in 0..10 { + // Then, hit the API + let resp = client + .get( + solver_url + .join(&format!("v0/api/auction_results/{i}")) + .unwrap(), + ) + .send() + .await; + + if let Err(ref e) = resp { + // We want to die if we don't get a 500, because that's an unexpected error. + assert!( + e.is_status(), + "Got unexpected error response; error = {e:?}" + ); + + // Otherwise, make sure it's a 500 error + let status = e.status().unwrap(); + + // if it is, we're good to go, and we expect this. + assert_eq!( + status, + reqwest::StatusCode::INTERNAL_SERVER_ERROR, + "Got unexpected error code; code = {status:?}" + ); + } else { + let resp = resp.unwrap(); + + if resp.status() != reqwest::StatusCode::OK { + assert_eq!( + resp.status(), + reqwest::StatusCode::INTERNAL_SERVER_ERROR, + "Got unexpected error code; code = {:?}", + resp.status(), + ); + + // Early return if it's an okay status code + return; + } + + let payload = resp.json::>>().await.unwrap(); + payloads.push(payload); + } + } + + #[cfg(async_executor_impl = "async-std")] + solver_handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + solver_handle.abort(); + + // Assert over the payloads with a 50% error rate. + for payload in payloads { + assert_eq!(payload[0]["url"], "http://localhost:1111/"); + } +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_fake_solver_fetch_permissioned_no_error() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let solver_state = FakeSolverState::new( + None, /* 0% error rate */ + vec![ + "http://localhost:1111".parse().unwrap(), + "http://localhost:1112".parse().unwrap(), + "http://localhost:1113".parse().unwrap(), + ], + ); + + // We need a private key + let (private_key, _) = key_pair_for_id(0); + + // Fire up the solver. + let solver_url: Url = format!( + "http://localhost:{}", + portpicker::pick_unused_port().unwrap() + ) + .parse() + .unwrap(); + let url = solver_url.clone(); + let solver_handle = async_spawn(async move { + solver_state.run::(url).await.unwrap(); + }); + + let client = reqwest::Client::new(); + let encoded_signature: tagged_base64::TaggedBase64 = + ::SignatureKey::sign(&private_key, &[0; 32]) + .unwrap() + .into(); + + // Then, hit the API + let resp = client + .get( + solver_url + .join(&format!("v0/api/auction_results/1/{encoded_signature}")) + .unwrap(), + ) + .send() + .await + .unwrap() + .json::>>() + .await + .unwrap(); + + #[cfg(async_executor_impl = "async-std")] + solver_handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + solver_handle.abort(); + + assert_eq!(resp[0]["url"], "http://localhost:1111/"); + assert_eq!(resp[1]["url"], "http://localhost:1112/"); + assert_eq!(resp[2]["url"], "http://localhost:1113/"); +} + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn test_fake_solver_fetch_permissioned_with_errors() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let solver_state = + FakeSolverState::new(Some(0.5), vec!["http://localhost:1111".parse().unwrap()]); + + // We need a private key + let (private_key, _) = key_pair_for_id(0); + + // Fire up the solver. + let solver_url: Url = format!( + "http://localhost:{}", + portpicker::pick_unused_port().unwrap() + ) + .parse() + .unwrap(); + let url = solver_url.clone(); + let solver_handle = async_spawn(async move { + solver_state.run::(url).await.unwrap(); + }); + + let client = reqwest::Client::new(); + let mut payloads = Vec::new(); + let encoded_signature: tagged_base64::TaggedBase64 = + ::SignatureKey::sign(&private_key, &[0; 32]) + .unwrap() + .into(); + + for i in 0..10 { + // Then, hit the API + let resp = client + .get( + solver_url + .join(&format!("v0/api/auction_results/{i}/{encoded_signature}")) + .unwrap(), + ) + .send() + .await; + + if let Err(ref e) = resp { + // We want to die if we don't get a 500, because that's an unexpected error. + assert!( + e.is_status(), + "Got unexpected error response; error = {e:?}" + ); + + // Otherwise, make sure it's a 500 error + let status = e.status().unwrap(); + + // if it is, we're good to go, and we expect this. + assert_eq!( + status, + reqwest::StatusCode::INTERNAL_SERVER_ERROR, + "Got unexpected error code; code = {status:?}" + ); + } else { + let resp = resp.unwrap(); + + if resp.status() != reqwest::StatusCode::OK { + assert_eq!( + resp.status(), + reqwest::StatusCode::INTERNAL_SERVER_ERROR, + "Got unexpected error code; code = {:?}", + resp.status(), + ); + + // Early return if it's an okay status code + return; + } + + let payload = resp.json::>>().await.unwrap(); + payloads.push(payload); + } + } + + #[cfg(async_executor_impl = "async-std")] + solver_handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + solver_handle.abort(); + + // Assert over the payloads with a 50% error rate. + for payload in payloads { + assert_eq!(payload[0]["url"], "http://localhost:1111/"); + } +} From bbd44443282caeae9a2e9e9e548c6c97c6e377a4 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Wed, 10 Jul 2024 12:54:16 +0000 Subject: [PATCH 1117/1393] Split consensus helpers (#3427) --- fakeapi/src/fake_solver.rs | 9 +- task-impls/src/consensus/handlers.rs | 8 +- task-impls/src/consensus/helpers.rs | 1357 ------------------------ task-impls/src/helpers.rs | 11 +- task-impls/src/quorum_vote/handlers.rs | 4 +- testing/src/test_runner.rs | 8 +- 6 files changed, 24 insertions(+), 1373 deletions(-) delete mode 100644 task-impls/src/consensus/helpers.rs diff --git a/fakeapi/src/fake_solver.rs b/fakeapi/src/fake_solver.rs index ab40cbeefc..dfeec834ca 100644 --- a/fakeapi/src/fake_solver.rs +++ b/fakeapi/src/fake_solver.rs @@ -1,12 +1,13 @@ +use std::{ + io::{self, ErrorKind}, + thread, time, +}; + use anyhow::Result; use async_lock::RwLock; use futures::FutureExt; use hotshot_example_types::auction_results_provider_types::TestAuctionResult; use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; -use std::{ - io::{self, ErrorKind}, - thread, time, -}; use tide_disco::{ api::ApiError, error::ServerError, diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index c57e248321..4596a2115e 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -210,7 +210,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( create_and_send_proposal( public_key, private_key, - consensus, + OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), sender, view, cnm, @@ -272,6 +272,7 @@ pub async fn publish_proposal_if_able( /// /// Returns the proposal that should be used to set the `cur_proposal` for other tasks. #[allow(clippy::too_many_lines)] +#[instrument(skip_all)] pub(crate) async fn handle_quorum_proposal_recv>( proposal: &Proposal>, sender: &TYPES::SignatureKey, @@ -490,6 +491,7 @@ pub(crate) async fn handle_quorum_proposal_recv>( proposal: &QuorumProposal, event_stream: Sender>>, @@ -514,7 +517,7 @@ pub async fn handle_quorum_proposal_validated = ( #[allow(unused_variables)] /// Check if we are able to vote, like whether the proposal is valid, /// whether we have DAC and VID share, and if so, vote. +#[instrument(skip_all, fields(id = id, view = *cur_view))] pub async fn update_state_and_vote_if_able>( cur_view: TYPES::Time, proposal: QuorumProposal, diff --git a/task-impls/src/consensus/helpers.rs b/task-impls/src/consensus/helpers.rs deleted file mode 100644 index 8aca2b2ef7..0000000000 --- a/task-impls/src/consensus/helpers.rs +++ /dev/null @@ -1,1357 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; - -use crate::{events::ProposalMissing, request::REQUEST_TIMEOUT}; -use anyhow::bail; -use anyhow::{ensure, Context, Result}; -use async_broadcast::{broadcast, Sender}; -use async_compatibility_layer::art::async_timeout; -use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; -use committable::{Commitment, Committable}; -use hotshot_types::{ - consensus::{OuterConsensus, View}, - data::{Leaf, QuorumProposal, ViewChangeEvidence}, - event::{Event, EventType, LeafInfo}, - message::Proposal, - simple_certificate::{QuorumCertificate, UpgradeCertificate}, - traits::{ - block_contents::BlockHeader, election::Membership, node_implementation::NodeType, - signature_key::SignatureKey, states::ValidatedState, BlockPayload, - }, - utils::{Terminator, ViewInner}, - vote::{Certificate, HasViewNumber}, -}; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; -use tracing::{debug, info, instrument, warn}; -#[cfg(not(feature = "dependency-tasks"))] -use { - super::ConsensusTaskState, - crate::{ - consensus::{update_view, view_change::SEND_VIEW_CHANGE_EVENT}, - helpers::AnyhowTracing, - }, - async_compatibility_layer::art::{async_sleep, async_spawn}, - chrono::Utc, - core::time::Duration, - futures::FutureExt, - hotshot_types::{ - consensus::CommitmentAndMetadata, - traits::{ - node_implementation::{ConsensusTime, NodeImplementation}, - storage::Storage, - }, - }, - hotshot_types::{data::null_block, message::GeneralConsensusMessage, simple_vote::QuorumData}, - std::marker::PhantomData, - tracing::error, - vbs::version::Version, -}; - -use crate::{events::HotShotEvent, helpers::broadcast_event}; - -/// Validate the state and safety and liveness of a proposal then emit -/// a `QuorumProposalValidated` event. -/// -/// TODO - This should just take the QuorumProposalRecv task state after -/// we merge the dependency tasks. -#[allow(clippy::too_many_arguments)] -#[allow(clippy::too_many_lines)] -#[instrument(skip_all, fields(id = id, view = *proposal.data.view_number()))] -pub async fn validate_proposal_safety_and_liveness( - proposal: Proposal>, - parent_leaf: Leaf, - consensus: OuterConsensus, - decided_upgrade_certificate: Option>, - quorum_membership: Arc, - view_leader_key: TYPES::SignatureKey, - event_stream: Sender>>, - sender: TYPES::SignatureKey, - event_sender: Sender>, - id: u64, -) -> Result<()> { - let view_number = proposal.data.view_number(); - - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); - ensure!( - proposed_leaf.parent_commitment() == parent_leaf.commit(), - "Proposed leaf does not extend the parent leaf." - ); - - let state = Arc::new( - >::from_header(&proposal.data.block_header), - ); - let view = View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), - state, - delta: None, // May be updated to `Some` in the vote task. - }, - }; - - if let Err(e) = consensus - .write() - .await - .update_validated_state_map(view_number, view.clone()) - { - tracing::trace!("{e:?}"); - } - consensus - .write() - .await - .update_saved_leaves(proposed_leaf.clone()); - - // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. - broadcast_event( - Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), - &event_stream, - ) - .await; - - // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - // - // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: - // - // proposal.validate_signature(&quorum_membership)?; - // - // in a future PR. - ensure!( - view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), - "Could not verify proposal." - ); - - UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; - - // Validate that the upgrade certificate is re-attached, if we saw one on the parent - proposed_leaf.extends_upgrade(&parent_leaf, &decided_upgrade_certificate)?; - - let justify_qc = proposal.data.justify_qc.clone(); - // Create a positive vote if either liveness or safety check - // passes. - - // Liveness check. - let read_consensus = consensus.read().await; - let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); - - // Safety check. - // Check if proposal extends from the locked leaf. - let outcome = read_consensus.visit_leaf_ancestors( - justify_qc.view_number(), - Terminator::Inclusive(read_consensus.locked_view()), - false, - |leaf, _, _| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.view_number() != read_consensus.locked_view() - }, - ); - let safety_check = outcome.is_ok(); - - ensure!(safety_check || liveness_check, { - if let Err(e) = outcome { - broadcast_event( - Event { - view_number, - event: EventType::Error { error: Arc::new(e) }, - }, - &event_sender, - ) - .await; - } - - format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) - }); - - // We accept the proposal, notify the application layer - - broadcast_event( - Event { - view_number, - event: EventType::QuorumProposal { - proposal: proposal.clone(), - sender, - }, - }, - &event_sender, - ) - .await; - // Notify other tasks - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalValidated( - proposal.data.clone(), - parent_leaf, - )), - &event_stream, - ) - .await; - - Ok(()) -} - -/// Create the header for a proposal, build the proposal, and broadcast -/// the proposal send evnet. -#[allow(clippy::too_many_arguments)] -#[cfg(not(feature = "dependency-tasks"))] -#[instrument(skip_all, fields(id = id, view = *view))] -pub async fn create_and_send_proposal( - public_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: OuterConsensus, - event_stream: Sender>>, - view: TYPES::Time, - commitment_and_metadata: CommitmentAndMetadata, - parent_leaf: Leaf, - state: Arc, - upgrade_cert: Option>, - proposal_cert: Option>, - round_start_delay: u64, - instance_state: Arc, - version: Version, - id: u64, -) { - let consensus_read = consensus.read().await; - let Some(Some(vid_share)) = consensus_read - .vid_shares() - .get(&view) - .map(|shares| shares.get(&public_key).cloned()) - else { - error!("Cannot propopse without our VID share, view {:?}", view); - return; - }; - drop(consensus_read); - let block_header = match TYPES::BlockHeader::new( - state.as_ref(), - instance_state.as_ref(), - &parent_leaf, - commitment_and_metadata.commitment, - commitment_and_metadata.builder_commitment, - commitment_and_metadata.metadata, - commitment_and_metadata.fee, - vid_share.data.common, - version, - ) - .await - { - Ok(header) => header, - Err(err) => { - error!(%err, "Failed to construct block header"); - return; - } - }; - - let proposal = QuorumProposal { - block_header, - view_number: view, - justify_qc: consensus.read().await.high_qc().clone(), - proposal_certificate: proposal_cert, - upgrade_certificate: upgrade_cert, - }; - - let proposed_leaf = Leaf::from_quorum_proposal(&proposal); - if proposed_leaf.parent_commitment() != parent_leaf.commit() { - return; - } - - let Ok(signature) = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref()) - else { - // This should never happen. - error!("Failed to sign proposed_leaf.commit()!"); - return; - }; - - let message = Proposal { - data: proposal, - signature, - _pd: PhantomData, - }; - debug!( - "Sending null proposal for view {:?}", - proposed_leaf.view_number(), - ); - if let Err(e) = consensus - .write() - .await - .update_last_proposed_view(message.clone()) - { - tracing::trace!("{e:?}"); - return; - } - async_sleep(Duration::from_millis(round_start_delay)).await; - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalSend( - message.clone(), - public_key, - )), - &event_stream, - ) - .await; -} - -/// Validates, from a given `proposal` that the view that it is being submitted for is valid when -/// compared to `cur_view` which is the highest proposed view (so far) for the caller. If the proposal -/// is for a view that's later than expected, that the proposal includes a timeout or view sync certificate. -pub fn validate_proposal_view_and_certs( - proposal: &Proposal>, - sender: &TYPES::SignatureKey, - cur_view: TYPES::Time, - quorum_membership: &Arc, - timeout_membership: &Arc, -) -> Result<()> { - let view = proposal.data.view_number(); - ensure!( - view >= cur_view, - "Proposal is from an older view {:?}", - proposal.data.clone() - ); - - let view_leader_key = quorum_membership.leader(view); - ensure!( - view_leader_key == *sender, - "Leader key does not match key in proposal" - ); - - // Verify a timeout certificate OR a view sync certificate exists and is valid. - if proposal.data.justify_qc.view_number() != view - 1 { - let received_proposal_cert = - proposal.data.proposal_certificate.clone().context(format!( - "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", - *view - ))?; - - match received_proposal_cert { - ViewChangeEvidence::Timeout(timeout_cert) => { - ensure!( - timeout_cert.date().view == view - 1, - "Timeout certificate for view {} was not for the immediately preceding view", - *view - ); - ensure!( - timeout_cert.is_valid_cert(timeout_membership.as_ref()), - "Timeout certificate for view {} was invalid", - *view - ); - } - ViewChangeEvidence::ViewSync(view_sync_cert) => { - ensure!( - view_sync_cert.view_number == view, - "View sync cert view number {:?} does not match proposal view number {:?}", - view_sync_cert.view_number, - view - ); - - // View sync certs must also be valid. - ensure!( - view_sync_cert.is_valid_cert(quorum_membership.as_ref()), - "Invalid view sync finalize cert provided" - ); - } - } - } - - // Validate the upgrade certificate -- this is just a signature validation. - // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. - UpgradeCertificate::validate(&proposal.data.upgrade_certificate, quorum_membership)?; - - Ok(()) -} - -/// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. -#[instrument(skip_all)] -pub(crate) async fn parent_leaf_and_state( - next_proposal_view_number: TYPES::Time, - quorum_membership: Arc, - public_key: TYPES::SignatureKey, - consensus: OuterConsensus, -) -> Result<(Leaf, Arc<::ValidatedState>)> { - ensure!( - quorum_membership.leader(next_proposal_view_number) == public_key, - "Somehow we formed a QC but are not the leader for the next view {next_proposal_view_number:?}", - ); - - let consensus_reader = consensus.read().await; - let parent_view_number = consensus_reader.high_qc().view_number(); - let parent_view = consensus_reader.validated_state_map().get(&parent_view_number).context( - format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", *parent_view_number) - )?; - - // Leaf hash in view inner does not match high qc hash - Why? - let (leaf_commitment, state) = parent_view.leaf_and_state().context( - format!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") - )?; - - if leaf_commitment != consensus_reader.high_qc().date().leaf_commit { - // NOTE: This happens on the genesis block - debug!( - "They don't equal: {:?} {:?}", - leaf_commitment, - consensus_reader.high_qc().date().leaf_commit - ); - } - - let leaf = consensus_reader - .saved_leaves() - .get(&leaf_commitment) - .context("Failed to find high QC of parent")?; - - let reached_decided = leaf.view_number() == consensus_reader.last_decided_view(); - let parent_leaf = leaf.clone(); - let original_parent_hash = parent_leaf.commit(); - let mut next_parent_hash = original_parent_hash; - - // Walk back until we find a decide - if !reached_decided { - debug!("We have not reached decide"); - while let Some(next_parent_leaf) = consensus_reader.saved_leaves().get(&next_parent_hash) { - if next_parent_leaf.view_number() <= consensus_reader.last_decided_view() { - break; - } - next_parent_hash = next_parent_leaf.parent_commitment(); - } - // TODO do some sort of sanity check on the view number that it matches decided - } - - Ok((parent_leaf, Arc::clone(state))) -} - -/// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is the -/// standard case proposal scenario. -#[allow(clippy::too_many_arguments)] -#[cfg(not(feature = "dependency-tasks"))] -#[instrument(skip_all)] -pub async fn publish_proposal_from_commitment_and_metadata( - view: TYPES::Time, - sender: Sender>>, - quorum_membership: Arc, - public_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: OuterConsensus, - delay: u64, - formed_upgrade_certificate: Option>, - decided_upgrade_cert: Option>, - commitment_and_metadata: Option>, - proposal_cert: Option>, - instance_state: Arc, - version: Version, - id: u64, -) -> Result> { - let (parent_leaf, state) = parent_leaf_and_state( - view, - quorum_membership, - public_key.clone(), - OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), - ) - .await?; - - // In order of priority, we should try to attach: - // - the parent certificate if it exists, or - // - our own certificate that we formed. - // In either case, we need to ensure that the certificate is still relevant. - // - // Note: once we reach a point of potentially propose with our formed upgrade certificate, we will ALWAYS drop it. If we cannot immediately use it for whatever reason, we choose to discard it. - // It is possible that multiple nodes form separate upgrade certificates for the some upgrade if we are not careful about voting. But this shouldn't bother us: the first leader to propose is the one whose certificate will be used. And if that fails to reach a decide for whatever reason, we may lose our own certificate, but something will likely have gone wrong there anyway. - let mut proposal_upgrade_certificate = parent_leaf - .upgrade_certificate() - .or(formed_upgrade_certificate); - - if !proposal_upgrade_certificate - .clone() - .is_some_and(|cert| cert.is_relevant(view, decided_upgrade_cert).is_ok()) - { - proposal_upgrade_certificate = None; - } - - // We only want to proposal to be attached if any of them are valid. - let proposal_certificate = proposal_cert - .as_ref() - .filter(|cert| cert.is_valid_for_view(&view)) - .cloned(); - - // FIXME - This is not great, and will be fixed later. - // If it's > July, 2024 and this is still here, something has gone horribly wrong. - let cnm = commitment_and_metadata - .clone() - .context("Cannot propose because we don't have the VID payload commitment and metadata")?; - - ensure!( - cnm.block_view == view, - "Cannot propose because our VID payload commitment and metadata is for an older view." - ); - - let create_and_send_proposal_handle = async_spawn(async move { - create_and_send_proposal( - public_key, - private_key, - OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), - sender, - view, - cnm, - parent_leaf.clone(), - state, - proposal_upgrade_certificate, - proposal_certificate, - delay, - instance_state, - version, - id, - ) - .await; - }); - - Ok(create_and_send_proposal_handle) -} - -/// Publishes a proposal if there exists a value which we can propose from. Specifically, we must have either -/// `commitment_and_metadata`, or a `decided_upgrade_cert`. -#[allow(clippy::too_many_arguments)] -#[cfg(not(feature = "dependency-tasks"))] -#[instrument(skip_all)] -pub async fn publish_proposal_if_able( - view: TYPES::Time, - sender: Sender>>, - quorum_membership: Arc, - public_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: OuterConsensus, - delay: u64, - formed_upgrade_certificate: Option>, - decided_upgrade_cert: Option>, - commitment_and_metadata: Option>, - proposal_cert: Option>, - instance_state: Arc, - version: Version, - id: u64, -) -> Result> { - publish_proposal_from_commitment_and_metadata( - view, - sender, - quorum_membership, - public_key, - private_key, - consensus, - delay, - formed_upgrade_certificate, - decided_upgrade_cert, - commitment_and_metadata, - proposal_cert, - instance_state, - version, - id, - ) - .await -} - -/// Trigger a request to the network for a proposal for a view and wait for the response -#[instrument(skip_all)] -pub(crate) async fn fetch_proposal( - view: TYPES::Time, - event_stream: Sender>>, - quorum_membership: Arc, - consensus: OuterConsensus, -) -> Result> { - let (tx, mut rx) = broadcast(1); - let event = ProposalMissing { - view, - response_chan: tx, - }; - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalRequest(event)), - &event_stream, - ) - .await; - let Ok(Ok(Some(proposal))) = async_timeout(REQUEST_TIMEOUT, rx.recv_direct()).await else { - bail!("Request for proposal failed"); - }; - let view_number = proposal.data.view_number(); - let justify_qc = proposal.data.justify_qc.clone(); - - if !justify_qc.is_valid_cert(quorum_membership.as_ref()) { - bail!("Invalid justify_qc in proposal for view {}", *view_number); - } - let mut consensus_write = consensus.write().await; - let leaf = Leaf::from_quorum_proposal(&proposal.data); - let state = Arc::new( - >::from_header(&proposal.data.block_header), - ); - - let view = View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - state, - delta: None, - }, - }; - if let Err(e) = consensus_write.update_validated_state_map(view_number, view.clone()) { - tracing::trace!("{e:?}"); - } - - consensus_write.update_saved_leaves(leaf.clone()); - broadcast_event( - HotShotEvent::ValidatedStateUpdated(view_number, view).into(), - &event_stream, - ) - .await; - Ok(leaf) -} - -/// Handle the received quorum proposal. -/// -/// Returns the proposal that should be used to set the `cur_proposal` for other tasks. -#[allow(clippy::too_many_lines)] -#[cfg(not(feature = "dependency-tasks"))] -#[instrument(skip_all)] -pub(crate) async fn handle_quorum_proposal_recv>( - proposal: &Proposal>, - sender: &TYPES::SignatureKey, - event_stream: Sender>>, - task_state: &mut ConsensusTaskState, - version: Version, -) -> Result>> { - let sender = sender.clone(); - debug!( - "Received Quorum Proposal for view {}", - *proposal.data.view_number - ); - - let cur_view = task_state.cur_view; - - validate_proposal_view_and_certs( - proposal, - &sender, - task_state.cur_view, - &task_state.quorum_membership, - &task_state.timeout_membership, - ) - .context("Failed to validate proposal view and attached certs")?; - - let view = proposal.data.view_number(); - let view_leader_key = task_state.quorum_membership.leader(view); - let justify_qc = proposal.data.justify_qc.clone(); - - if !justify_qc.is_valid_cert(task_state.quorum_membership.as_ref()) { - let consensus = task_state.consensus.read().await; - consensus.metrics.invalid_qc.update(1); - bail!("Invalid justify_qc in proposal for view {}", *view); - } - - // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here - if let Err(e) = update_view::( - view, - &event_stream, - task_state.timeout, - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - &mut task_state.cur_view, - &mut task_state.cur_view_time, - &mut task_state.timeout_task, - &task_state.output_event_stream, - SEND_VIEW_CHANGE_EVENT, - task_state.quorum_membership.leader(cur_view) == task_state.public_key, - ) - .await - { - debug!("Failed to update view; error = {e:#}"); - } - - let mut parent_leaf = task_state - .consensus - .read() - .await - .saved_leaves() - .get(&justify_qc.date().leaf_commit) - .cloned(); - - parent_leaf = match parent_leaf { - Some(p) => Some(p), - None => fetch_proposal( - justify_qc.view_number(), - event_stream.clone(), - Arc::clone(&task_state.quorum_membership), - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - ) - .await - .ok(), - }; - let consensus_read = task_state.consensus.read().await; - - // Get the parent leaf and state. - let parent = match parent_leaf { - Some(leaf) => { - if let (Some(state), _) = consensus_read.state_and_delta(leaf.view_number()) { - Some((leaf, Arc::clone(&state))) - } else { - bail!("Parent state not found! Consensus internally inconsistent"); - } - } - None => None, - }; - - if justify_qc.view_number() > consensus_read.high_qc().view_number { - if let Err(e) = task_state - .storage - .write() - .await - .update_high_qc(justify_qc.clone()) - .await - { - bail!("Failed to store High QC not voting. Error: {:?}", e); - } - } - - drop(consensus_read); - let mut consensus_write = task_state.consensus.write().await; - - if let Err(e) = consensus_write.update_high_qc(justify_qc.clone()) { - tracing::trace!("{e:?}"); - } - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some((parent_leaf, _parent_state)) = parent else { - warn!( - "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.date().leaf_commit - ); - let leaf = Leaf::from_quorum_proposal(&proposal.data); - - let state = Arc::new( - >::from_header( - &proposal.data.block_header, - ), - ); - - if let Err(e) = consensus_write.update_validated_state_map( - view, - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(), - state, - delta: None, - }, - }, - ) { - tracing::trace!("{e:?}"); - } - - consensus_write.update_saved_leaves(leaf.clone()); - let new_leaves = consensus_write.saved_leaves().clone(); - let new_state = consensus_write.validated_state_map().clone(); - drop(consensus_write); - - if let Err(e) = task_state - .storage - .write() - .await - .update_undecided_state(new_leaves, new_state) - .await - { - warn!("Couldn't store undecided state. Error: {:?}", e); - } - - // If we are missing the parent from storage, the safety check will fail. But we can - // still vote if the liveness check succeeds. - #[cfg(not(feature = "dependency-tasks"))] - { - let consensus_read = task_state.consensus.read().await; - let liveness_check = justify_qc.view_number() > consensus_read.locked_view(); - - let high_qc = consensus_read.high_qc().clone(); - let locked_view = consensus_read.locked_view(); - - drop(consensus_read); - - let mut current_proposal = None; - if liveness_check { - current_proposal = Some(proposal.data.clone()); - let new_view = proposal.data.view_number + 1; - - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = task_state.quorum_membership.leader(new_view) - == task_state.public_key - && high_qc.view_number == current_proposal.clone().unwrap().view_number; - - let qc = high_qc.clone(); - if should_propose { - debug!( - "Attempting to publish proposal after voting for liveness; now in view: {}", - *new_view - ); - let create_and_send_proposal_handle = publish_proposal_if_able( - qc.view_number + 1, - event_stream, - Arc::clone(&task_state.quorum_membership), - task_state.public_key.clone(), - task_state.private_key.clone(), - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - task_state.round_start_delay, - task_state.formed_upgrade_certificate.clone(), - task_state.decided_upgrade_cert.clone(), - task_state.payload_commitment_and_metadata.clone(), - task_state.proposal_cert.clone(), - Arc::clone(&task_state.instance_state), - version, - task_state.id, - ) - .await?; - - task_state - .spawned_tasks - .entry(view) - .or_default() - .push(create_and_send_proposal_handle); - } - } else { - warn!(?high_qc, ?proposal.data, ?locked_view, "Failed liveneess check; cannot find parent either."); - } - - return Ok(current_proposal); - } - - #[cfg(feature = "dependency-tasks")] - return Ok(None); - }; - - task_state - .spawned_tasks - .entry(proposal.data.view_number()) - .or_default() - .push(async_spawn( - validate_proposal_safety_and_liveness( - proposal.clone(), - parent_leaf, - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - task_state.decided_upgrade_cert.clone(), - Arc::clone(&task_state.quorum_membership), - view_leader_key, - event_stream.clone(), - sender, - task_state.output_event_stream.clone(), - task_state.id, - ) - .map(AnyhowTracing::err_as_debug), - )); - Ok(None) -} - -/// Helper type to give names and to the output values of the leaf chain traversal operation. -#[derive(Debug)] -pub struct LeafChainTraversalOutcome { - /// The new locked view obtained from a 2 chain starting from the proposal's parent. - pub new_locked_view_number: Option, - - /// The new decided view obtained from a 3 chain starting from the proposal's parent. - pub new_decided_view_number: Option, - - /// The qc for the decided chain. - pub new_decide_qc: Option>, - - /// The decided leaves with corresponding validated state and VID info. - pub leaf_views: Vec>, - - /// The decided leaves. - pub leaves_decided: Vec>, - - /// The transactions in the block payload for each leaf. - pub included_txns: Option::Transaction>>>, - - /// The most recent upgrade certificate from one of the leaves. - pub decided_upgrade_cert: Option>, -} - -/// We need Default to be implemented because the leaf ascension has very few failure branches, -/// and when they *do* happen, we still return intermediate states. Default makes the burden -/// of filling values easier. -impl Default for LeafChainTraversalOutcome { - /// The default method for this type is to set all of the returned values to `None`. - fn default() -> Self { - Self { - new_locked_view_number: None, - new_decided_view_number: None, - new_decide_qc: None, - leaf_views: Vec::new(), - leaves_decided: Vec::new(), - included_txns: None, - decided_upgrade_cert: None, - } - } -} - -/// Ascends the leaf chain by traversing through the parent commitments of the proposal. We begin -/// by obtaining the parent view, and if we are in a chain (i.e. the next view from the parent is -/// one view newer), then we begin attempting to form the chain. This is a direct impl from -/// [HotStuff](https://arxiv.org/pdf/1803.05069) section 5: -/// -/// > When a node b* carries a QC that refers to a direct parent, i.e., b*.justify.node = b*.parent, -/// we say that it forms a One-Chain. Denote by b'' = b*.justify.node. Node b* forms a Two-Chain, -/// if in addition to forming a One-Chain, b''.justify.node = b''.parent. -/// It forms a Three-Chain, if b'' forms a Two-Chain. -/// -/// We follow this exact logic to determine if we are able to reach a commit and a decide. A commit -/// is reached when we have a two chain, and a decide is reached when we have a three chain. -/// -/// # Example -/// Suppose we have a decide for view 1, and we then move on to get undecided views 2, 3, and 4. Further, -/// suppose that our *next* proposal is for view 5, but this leader did not see info for view 4, so the -/// justify qc of the proposal points to view 3. This is fine, and the undecided chain now becomes -/// 2-3-5. -/// -/// Assuming we continue with honest leaders, we then eventually could get a chain like: 2-3-5-6-7-8. This -/// will prompt a decide event to occur (this code), where the `proposal` is for view 8. Now, since the -/// lowest value in the 3-chain here would be 5 (excluding 8 since we only walk the parents), we begin at -/// the first link in the chain, and walk back through all undecided views, making our new anchor view 5, -/// and out new locked view will be 6. -/// -/// Upon receipt then of a proposal for view 9, assuming it is valid, this entire process will repeat, and -/// the anchor view will be set to view 6, with the locked view as view 7. -#[instrument(skip_all)] -pub async fn decide_from_proposal( - proposal: &QuorumProposal, - consensus: OuterConsensus, - existing_upgrade_cert: &Option>, - public_key: &TYPES::SignatureKey, -) -> LeafChainTraversalOutcome { - let consensus_reader = consensus.read().await; - let view_number = proposal.view_number(); - let parent_view_number = proposal.justify_qc.view_number(); - let old_anchor_view = consensus_reader.last_decided_view(); - - let mut last_view_number_visited = view_number; - let mut current_chain_length = 0usize; - let mut res = LeafChainTraversalOutcome::default(); - - if let Err(e) = consensus_reader.visit_leaf_ancestors( - parent_view_number, - Terminator::Exclusive(old_anchor_view), - true, - |leaf, state, delta| { - // This is the core paper logic. We're implementing the chain in chained hotstuff. - if res.new_decided_view_number.is_none() { - // If the last view number is the child of the leaf we've moved to... - if last_view_number_visited == leaf.view_number() + 1 { - last_view_number_visited = leaf.view_number(); - - // The chain grows by one - current_chain_length += 1; - - // We emit a locked view when the chain length is 2 - if current_chain_length == 2 { - res.new_locked_view_number = Some(leaf.view_number()); - // The next leaf in the chain, if there is one, is decided, so this - // leaf's justify_qc would become the QC for the decided chain. - res.new_decide_qc = Some(leaf.justify_qc().clone()); - } else if current_chain_length == 3 { - // And we decide when the chain length is 3. - res.new_decided_view_number = Some(leaf.view_number()); - } - } else { - // There isn't a new chain extension available, so we signal to the callback - // owner that we can exit for now. - return false; - } - } - - // Now, if we *have* reached a decide, we need to do some state updates. - if let Some(new_decided_view) = res.new_decided_view_number { - // First, get a mutable reference to the provided leaf. - let mut leaf = leaf.clone(); - - // Update the metrics - if leaf.view_number() == new_decided_view { - consensus_reader - .metrics - .last_synced_block_height - .set(usize::try_from(leaf.height()).unwrap_or(0)); - } - - // Check if there's a new upgrade certificate available. - if let Some(cert) = leaf.upgrade_certificate() { - if leaf.upgrade_certificate() != *existing_upgrade_cert { - if cert.data.decide_by < view_number { - warn!("Failed to decide an upgrade certificate in time. Ignoring."); - } else { - info!("Reached decide on upgrade certificate: {:?}", cert); - res.decided_upgrade_cert = Some(cert.clone()); - } - } - } - // If the block payload is available for this leaf, include it in - // the leaf chain that we send to the client. - if let Some(encoded_txns) = - consensus_reader.saved_payloads().get(&leaf.view_number()) - { - let payload = - BlockPayload::from_bytes(encoded_txns, leaf.block_header().metadata()); - - leaf.fill_block_payload_unchecked(payload); - } - - // Get the VID share at the leaf's view number, corresponding to our key - // (if one exists) - let vid_share = consensus_reader - .vid_shares() - .get(&leaf.view_number()) - .unwrap_or(&HashMap::new()) - .get(public_key) - .cloned() - .map(|prop| prop.data); - - // Add our data into a new `LeafInfo` - res.leaf_views.push(LeafInfo::new( - leaf.clone(), - Arc::clone(&state), - delta.clone(), - vid_share, - )); - res.leaves_decided.push(leaf.clone()); - if let Some(ref payload) = leaf.block_payload() { - res.included_txns = Some( - payload - .transaction_commitments(leaf.block_header().metadata()) - .into_iter() - .collect::>(), - ); - } - } - true - }, - ) { - debug!("Leaf ascension failed; error={e}"); - } - - res -} - -/// Handle `QuorumProposalValidated` event content and submit a proposal if possible. -#[allow(clippy::too_many_lines)] -#[cfg(not(feature = "dependency-tasks"))] -#[instrument(skip_all)] -pub async fn handle_quorum_proposal_validated>( - proposal: &QuorumProposal, - event_stream: Sender>>, - task_state: &mut ConsensusTaskState, -) -> Result<()> { - let view = proposal.view_number(); - #[cfg(not(feature = "dependency-tasks"))] - { - task_state.current_proposal = Some(proposal.clone()); - } - - let res = decide_from_proposal( - proposal, - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - &task_state.decided_upgrade_cert, - &task_state.public_key, - ) - .await; - - if let Some(cert) = res.decided_upgrade_cert { - task_state.decided_upgrade_cert = Some(cert.clone()); - - let mut decided_certificate_lock = task_state.decided_upgrade_certificate.write().await; - *decided_certificate_lock = Some(cert.clone()); - drop(decided_certificate_lock); - let _ = event_stream - .broadcast(Arc::new(HotShotEvent::UpgradeDecided(cert.clone()))) - .await; - } - - let mut consensus = task_state.consensus.write().await; - if let Some(new_locked_view) = res.new_locked_view_number { - if let Err(e) = consensus.update_locked_view(new_locked_view) { - tracing::trace!("{e:?}"); - } - } - - drop(consensus); - - #[cfg(not(feature = "dependency-tasks"))] - { - let new_view = task_state.current_proposal.clone().unwrap().view_number + 1; - // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = task_state.quorum_membership.leader(new_view) == task_state.public_key - && task_state.consensus.read().await.high_qc().view_number - == task_state.current_proposal.clone().unwrap().view_number; - - if let Some(new_decided_view) = res.new_decided_view_number { - task_state.cancel_tasks(new_decided_view).await; - } - task_state.current_proposal = Some(proposal.clone()); - task_state.spawn_vote_task(view, event_stream.clone()).await; - if should_propose { - debug!( - "Attempting to publish proposal after voting; now in view: {}", - *new_view - ); - if let Err(e) = task_state - .publish_proposal(new_view, event_stream.clone()) - .await - { - debug!("Failed to propose; error = {e:?}"); - }; - } - } - - #[allow(clippy::cast_precision_loss)] - if let Some(new_anchor_view) = res.new_decided_view_number { - let block_size = res.included_txns.map(|set| set.len().try_into().unwrap()); - let decide_sent = broadcast_event( - Event { - view_number: new_anchor_view, - event: EventType::Decide { - leaf_chain: Arc::new(res.leaf_views), - qc: Arc::new(res.new_decide_qc.unwrap()), - block_size, - }, - }, - &task_state.output_event_stream, - ); - let mut consensus = task_state.consensus.write().await; - - let old_anchor_view = consensus.last_decided_view(); - consensus.collect_garbage(old_anchor_view, new_anchor_view); - if let Err(e) = consensus.update_last_decided_view(new_anchor_view) { - tracing::trace!("{e:?}"); - } - consensus - .metrics - .last_decided_time - .set(Utc::now().timestamp().try_into().unwrap()); - consensus.metrics.invalid_qc.set(0); - consensus - .metrics - .last_decided_view - .set(usize::try_from(consensus.last_decided_view().u64()).unwrap()); - let cur_number_of_views_per_decide_event = - *task_state.cur_view - consensus.last_decided_view().u64(); - consensus - .metrics - .number_of_views_per_decide_event - .add_point(cur_number_of_views_per_decide_event as f64); - - debug!( - "Sending Decide for view {:?}", - consensus.last_decided_view() - ); - drop(consensus); - debug!("Decided txns len {:?}", block_size); - decide_sent.await; - broadcast_event( - Arc::new(HotShotEvent::LeafDecided(res.leaves_decided)), - &event_stream, - ) - .await; - debug!("decide send succeeded"); - } - - Ok(()) -} - -/// Private key, latest decided upgrade certificate, committee membership, and event stream, for -/// sending the vote. -#[cfg(not(feature = "dependency-tasks"))] -type VoteInfo = ( - <::SignatureKey as SignatureKey>::PrivateKey, - Option>, - Arc<::Membership>, - Sender>>, -); - -#[allow(clippy::too_many_arguments)] -#[allow(clippy::too_many_lines)] -#[allow(unused_variables)] -#[cfg(not(feature = "dependency-tasks"))] -/// Check if we are able to vote, like whether the proposal is valid, -/// whether we have DAC and VID share, and if so, vote. -#[instrument(skip_all, fields(id = id, view = *cur_view))] -pub async fn update_state_and_vote_if_able>( - cur_view: TYPES::Time, - proposal: QuorumProposal, - public_key: TYPES::SignatureKey, - consensus: OuterConsensus, - storage: Arc>, - quorum_membership: Arc, - instance_state: Arc, - vote_info: VoteInfo, - version: Version, - id: u64, -) -> bool { - use hotshot_types::simple_vote::QuorumVote; - - if !quorum_membership.has_stake(&public_key) { - debug!("We were not chosen for quorum committee on {:?}", cur_view); - return false; - } - - let read_consnesus = consensus.read().await; - // Only vote if you has seen the VID share for this view - let Some(vid_shares) = read_consnesus.vid_shares().get(&proposal.view_number) else { - debug!( - "We have not seen the VID share for this view {:?} yet, so we cannot vote.", - proposal.view_number - ); - return false; - }; - let Some(vid_share) = vid_shares.get(&public_key).cloned() else { - debug!("we have not seen our VID share yet"); - return false; - }; - - if let Some(upgrade_cert) = &vote_info.1 { - if upgrade_cert.upgrading_in(cur_view) - && Some(proposal.block_header.payload_commitment()) - != null_block::commitment(quorum_membership.total_nodes()) - { - info!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(quorum_membership.total_nodes()), Some(proposal.block_header.payload_commitment())); - return false; - } - } - - // Only vote if you have the DA cert - // ED Need to update the view number this is stored under? - let Some(cert) = read_consnesus.saved_da_certs().get(&cur_view).cloned() else { - return false; - }; - drop(read_consnesus); - - let view = cert.view_number; - // TODO: do some of this logic without the vote token check, only do that when voting. - let justify_qc = proposal.justify_qc.clone(); - let mut parent = consensus - .read() - .await - .saved_leaves() - .get(&justify_qc.date().leaf_commit) - .cloned(); - parent = match parent { - Some(p) => Some(p), - None => fetch_proposal( - justify_qc.view_number(), - vote_info.3.clone(), - Arc::clone(&quorum_membership), - OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), - ) - .await - .ok(), - }; - - let read_consnesus = consensus.read().await; - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some(parent) = parent else { - error!( - "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.date().leaf_commit, - proposal.view_number, - ); - return false; - }; - let (Some(parent_state), _) = read_consnesus.state_and_delta(parent.view_number()) else { - warn!("Parent state not found! Consensus internally inconsistent"); - return false; - }; - drop(read_consnesus); - let Ok((validated_state, state_delta)) = parent_state - .validate_and_apply_header( - instance_state.as_ref(), - &parent, - &proposal.block_header.clone(), - vid_share.data.common.clone(), - version, - ) - .await - else { - warn!("Block header doesn't extend the proposal!"); - return false; - }; - - let state = Arc::new(validated_state); - let delta = Arc::new(state_delta); - let parent_commitment = parent.commit(); - - let proposed_leaf = Leaf::from_quorum_proposal(&proposal); - if proposed_leaf.parent_commitment() != parent_commitment { - return false; - } - - // Validate the DAC. - let message = if cert.is_valid_cert(vote_info.2.as_ref()) { - // Validate the block payload commitment for non-genesis DAC. - if cert.date().payload_commit != proposal.block_header.payload_commitment() { - warn!( - "Block payload commitment does not equal da cert payload commitment. View = {}", - *view - ); - return false; - } - if let Ok(vote) = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: proposed_leaf.commit(), - }, - view, - &public_key, - &vote_info.0, - ) { - GeneralConsensusMessage::::Vote(vote) - } else { - error!("Unable to sign quorum vote!"); - return false; - } - } else { - error!( - "Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", - cert, cur_view - ); - return false; - }; - - let mut consensus = consensus.write().await; - if let Err(e) = consensus.update_validated_state_map( - cur_view, - View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), - state: Arc::clone(&state), - delta: Some(Arc::clone(&delta)), - }, - }, - ) { - tracing::trace!("{e:?}"); - } - consensus.update_saved_leaves(proposed_leaf.clone()); - let new_leaves = consensus.saved_leaves().clone(); - let new_state = consensus.validated_state_map().clone(); - drop(consensus); - - if let Err(e) = storage - .write() - .await - .update_undecided_state(new_leaves, new_state) - .await - { - error!("Couldn't store undecided state. Error: {:?}", e); - } - - if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.view_number() + 1 - ); - // Add to the storage that we have received the VID disperse for a specific view - if let Err(e) = storage.write().await.append_vid(&vid_share).await { - warn!( - "Failed to store VID Disperse Proposal with error {:?}, aborting vote", - e - ); - return false; - } - broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &vote_info.3).await; - return true; - } - debug!( - "Received VID share, but couldn't find DAC cert for view {:?}", - *proposal.view_number(), - ); - false -} diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index ab046687d0..203659e59d 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -38,13 +38,13 @@ use crate::{ }; /// Trigger a request to the network for a proposal for a view and wait for the response +#[instrument(skip_all)] pub(crate) async fn fetch_proposal( view: TYPES::Time, event_stream: Sender>>, quorum_membership: Arc, consensus: OuterConsensus, ) -> Result> { - tracing::debug!("Fetching proposal for view {:?}", view); let (tx, mut rx) = broadcast(1); let event = ProposalMissing { view, @@ -112,7 +112,7 @@ pub struct LeafChainTraversalOutcome { pub included_txns: Option::Transaction>>>, /// The most recent upgrade certificate from one of the leaves. - pub decided_upgrade_certificate: Option>, + pub decided_upgrade_cert: Option>, } /// We need Default to be implemented because the leaf ascension has very few failure branches, @@ -128,7 +128,7 @@ impl Default for LeafChainTraversalOutcome { leaf_views: Vec::new(), leaves_decided: Vec::new(), included_txns: None, - decided_upgrade_certificate: None, + decided_upgrade_cert: None, } } } @@ -226,7 +226,7 @@ pub async fn decide_from_proposal( warn!("Failed to decide an upgrade certificate in time. Ignoring."); } else { info!("Reached decide on upgrade certificate: {:?}", cert); - res.decided_upgrade_certificate = Some(cert.clone()); + res.decided_upgrade_cert = Some(cert.clone()); } } } @@ -278,6 +278,7 @@ pub async fn decide_from_proposal( } /// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. +#[instrument(skip_all)] pub(crate) async fn parent_leaf_and_state( next_proposal_view_number: TYPES::Time, quorum_membership: Arc, @@ -347,6 +348,7 @@ pub(crate) async fn parent_leaf_and_state( /// we merge the dependency tasks. #[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_lines)] +#[instrument(skip_all, fields(id = id, view = *proposal.data.view_number()))] #[cfg(not(feature = "dependency-tasks"))] pub async fn temp_validate_proposal_safety_and_liveness( proposal: Proposal>, @@ -358,6 +360,7 @@ pub async fn temp_validate_proposal_safety_and_liveness( event_stream: Sender>>, sender: TYPES::SignatureKey, event_sender: Sender>, + id: u64, ) -> Result<()> { let view_number = proposal.data.view_number(); diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index b8552e0f7d..f251eef6ba 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -38,7 +38,7 @@ pub(crate) async fn handle_quorum_proposal_validated< leaf_views, leaves_decided, included_txns, - decided_upgrade_certificate, + decided_upgrade_cert, } = decide_from_proposal( proposal, OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), @@ -48,7 +48,7 @@ pub(crate) async fn handle_quorum_proposal_validated< .await; drop(decided_upgrade_certificate_read); - if let Some(cert) = decided_upgrade_certificate.clone() { + if let Some(cert) = decided_upgrade_cert.clone() { let mut decided_certificate_lock = task_state.decided_upgrade_certificate.write().await; *decided_certificate_lock = Some(cert.clone()); drop(decided_certificate_lock); diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 0139f870f5..c0849c7485 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -1,17 +1,15 @@ #![allow(clippy::panic)] -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use std::{ collections::{BTreeMap, HashMap, HashSet}, marker::PhantomData, sync::Arc, }; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; use async_broadcast::broadcast; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use futures::future::join_all; use hotshot::{ traits::TestableNodeImplementation, types::SystemContextHandle, HotShotInitializer, @@ -36,6 +34,8 @@ use hotshot_types::{ HotShotConfig, ValidatorConfig, }; use tide_disco::Url; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; #[allow(deprecated)] use tracing::info; From 2791d5ba3c977aad5a23b431c546ee947678e418 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 11 Jul 2024 08:54:59 -0400 Subject: [PATCH 1118/1393] Node lookup `try_send` (#3430) * Node lookup `try_send` * fmt --- .../src/traits/networking/combined_network.rs | 12 +++++------- .../src/traits/networking/libp2p_network.rs | 19 ++++++++++--------- .../src/traits/networking/push_cdn_network.rs | 6 +++--- types/src/traits/network.rs | 9 ++++++--- 4 files changed, 24 insertions(+), 22 deletions(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index b3fd23ba0b..30cf0c7f02 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -15,7 +15,7 @@ use std::{ use async_broadcast::{broadcast, InactiveReceiver, Sender}; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, - channel::UnboundedSendError, + channel::TrySendError, }; use async_lock::RwLock; use async_trait::async_trait; @@ -488,15 +488,13 @@ impl ConnectedNetwork for CombinedNetworks Ok(filtered_msgs) } - async fn queue_node_lookup( + fn queue_node_lookup( &self, view_number: ViewNumber, pk: TYPES::SignatureKey, - ) -> Result<(), UnboundedSendError>> { - self.primary() - .queue_node_lookup(view_number, pk.clone()) - .await?; - self.secondary().queue_node_lookup(view_number, pk).await + ) -> Result<(), TrySendError>> { + self.primary().queue_node_lookup(view_number, pk.clone())?; + self.secondary().queue_node_lookup(view_number, pk) } async fn update_view<'a, T>(&'a self, view: u64, membership: &T::Membership) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 3b439e5d52..f5f22c0490 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -18,7 +18,10 @@ use std::{ use anyhow::anyhow; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, - channel::{self, bounded, unbounded, UnboundedReceiver, UnboundedSendError, UnboundedSender}, + channel::{ + self, bounded, unbounded, Receiver as BoundedReceiver, Sender as BoundedSender, + TrySendError, UnboundedReceiver, UnboundedSender, + }, }; use async_lock::{Mutex, RwLock}; use async_trait::async_trait; @@ -147,7 +150,7 @@ struct Libp2pNetworkInner { /// Sender for broadcast messages sender: UnboundedSender>, /// Sender for node lookup (relevant view number, key of node) (None for shutdown) - node_lookup_send: UnboundedSender>, + node_lookup_send: BoundedSender>, /// this is really cheating to enable local tests /// hashset of (bootstrap_addr, peer_id) bootstrap_addrs: PeerInfoVec, @@ -517,7 +520,7 @@ impl Libp2pNetwork { // if bounded figure out a way to log dropped msgs let (sender, receiver) = unbounded(); let (requests_tx, requests_rx) = channel(100); - let (node_lookup_send, node_lookup_recv) = unbounded(); + let (node_lookup_send, node_lookup_recv) = bounded(10); let (kill_tx, kill_rx) = bounded(1); rx.set_kill_switch(kill_rx); @@ -557,7 +560,7 @@ impl Libp2pNetwork { /// Spawns task for looking up nodes pre-emptively #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] - fn spawn_node_lookup(&self, node_lookup_recv: UnboundedReceiver>) { + fn spawn_node_lookup(&self, mut node_lookup_recv: BoundedReceiver>) { let handle = Arc::clone(&self.inner.handle); let dht_timeout = self.inner.dht_timeout; let latest_seen_view = Arc::clone(&self.inner.latest_seen_view); @@ -1075,15 +1078,14 @@ impl ConnectedNetwork for Libp2pNetwork { } #[instrument(name = "Libp2pNetwork::queue_node_lookup", skip_all)] - async fn queue_node_lookup( + fn queue_node_lookup( &self, view_number: ViewNumber, pk: K, - ) -> Result<(), UnboundedSendError>> { + ) -> Result<(), TrySendError>> { self.inner .node_lookup_send - .send(Some((view_number, pk))) - .await + .try_send(Some((view_number, pk))) } /// handles view update @@ -1096,7 +1098,6 @@ impl ConnectedNetwork for Libp2pNetwork { let _ = self .queue_node_lookup(ViewNumber::new(*future_view), future_leader) - .await .map_err(|err| tracing::warn!("failed to process node lookup request: {err}")); } } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 40de0ed9db..52a62e0b58 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -4,7 +4,7 @@ use std::{collections::BTreeSet, marker::PhantomData, sync::Arc}; #[cfg(feature = "hotshot-testing")] use std::{path::Path, time::Duration}; -use async_compatibility_layer::channel::UnboundedSendError; +use async_compatibility_layer::channel::TrySendError; #[cfg(feature = "hotshot-testing")] use async_compatibility_layer::{art::async_sleep, art::async_spawn}; use async_trait::async_trait; @@ -557,11 +557,11 @@ impl ConnectedNetwork for PushCdnNetwork Result<(), UnboundedSendError>> { + ) -> Result<(), TrySendError>> { Ok(()) } } diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index d14482d0c9..4e01e8eb07 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -25,7 +25,7 @@ use std::{ time::Duration, }; -use async_compatibility_layer::channel::UnboundedSendError; +use async_compatibility_layer::channel::TrySendError; use async_trait::async_trait; use futures::future::join_all; use rand::{ @@ -344,11 +344,14 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st } /// queues lookup of a node - async fn queue_node_lookup( + /// + /// # Errors + /// Does not error. + fn queue_node_lookup( &self, _view_number: ViewNumber, _pk: K, - ) -> Result<(), UnboundedSendError>> { + ) -> Result<(), TrySendError>> { Ok(()) } From 1e31b11c600ed07e97870310b501b02fa817267c Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 11 Jul 2024 10:32:18 -0400 Subject: [PATCH 1119/1393] [Upgradability] - Remove duplicated fields for the decided upgrade cert in `ConsensusTaskState` (#3431) * Save changes * Split consensus helpers * Fix build --- hotshot/src/tasks/task_state.rs | 1 - task-impls/src/consensus/handlers.rs | 41 ++--- task-impls/src/consensus/mod.rs | 9 +- task-impls/src/helpers.rs | 150 +----------------- task-impls/src/quorum_vote/handlers.rs | 4 +- .../src/predicates/upgrade_with_consensus.rs | 10 +- .../src/predicates/upgrade_with_proposal.rs | 8 +- testing/src/predicates/upgrade_with_vote.rs | 8 +- .../tests_1/upgrade_task_with_consensus.rs | 10 +- .../tests/tests_1/upgrade_task_with_vote.rs | 8 +- 10 files changed, 50 insertions(+), 199 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 1599f9683d..1a459771af 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -229,7 +229,6 @@ impl> CreateTaskState spawned_tasks: BTreeMap::new(), formed_upgrade_certificate: None, proposal_cert: None, - decided_upgrade_cert: None, version: Arc::clone(&handle.hotshot.version), output_event_stream: handle.hotshot.external_event_stream.0.clone(), current_proposal: None, diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 4596a2115e..c8ae196f6f 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -39,9 +39,9 @@ use super::ConsensusTaskState; use crate::{ events::HotShotEvent, helpers::{ - broadcast_event, decide_from_proposal, fetch_proposal, parent_leaf_and_state, - temp_validate_proposal_safety_and_liveness, update_view, validate_proposal_view_and_certs, - AnyhowTracing, SEND_VIEW_CHANGE_EVENT, + broadcast_event, decide_from_proposal, fetch_proposal, parent_leaf_and_state, update_view, + validate_proposal_safety_and_liveness, validate_proposal_view_and_certs, AnyhowTracing, + SEND_VIEW_CHANGE_EVENT, }, }; @@ -156,7 +156,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( consensus: OuterConsensus, delay: u64, formed_upgrade_certificate: Option>, - decided_upgrade_cert: Option>, + decided_upgrade_certificate: Arc>>>, commitment_and_metadata: Option>, proposal_cert: Option>, instance_state: Arc, @@ -182,11 +182,14 @@ pub async fn publish_proposal_from_commitment_and_metadata( .upgrade_certificate() .or(formed_upgrade_certificate); - if !proposal_upgrade_certificate - .clone() - .is_some_and(|cert| cert.temp_is_relevant(view, decided_upgrade_cert).is_ok()) - { - proposal_upgrade_certificate = None; + if let Some(cert) = proposal_upgrade_certificate.clone() { + if cert + .is_relevant(view, Arc::clone(&decided_upgrade_certificate)) + .await + .is_err() + { + proposal_upgrade_certificate = None; + } } // We only want to proposal to be attached if any of them are valid. @@ -230,7 +233,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( } /// Publishes a proposal if there exists a value which we can propose from. Specifically, we must have either -/// `commitment_and_metadata`, or a `decided_upgrade_cert`. +/// `commitment_and_metadata`, or a `decided_upgrade_certificate`. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] pub async fn publish_proposal_if_able( @@ -242,7 +245,7 @@ pub async fn publish_proposal_if_able( consensus: OuterConsensus, delay: u64, formed_upgrade_certificate: Option>, - decided_upgrade_cert: Option>, + decided_upgrade_certificate: Arc>>>, commitment_and_metadata: Option>, proposal_cert: Option>, instance_state: Arc, @@ -258,7 +261,7 @@ pub async fn publish_proposal_if_able( consensus, delay, formed_upgrade_certificate, - decided_upgrade_cert, + decided_upgrade_certificate, commitment_and_metadata, proposal_cert, instance_state, @@ -454,7 +457,7 @@ pub(crate) async fn handle_quorum_proposal_recv = ( <::SignatureKey as SignatureKey>::PrivateKey, - Option>, + Arc>>>, Arc<::Membership>, Sender>>, ); @@ -665,7 +666,7 @@ pub async fn update_state_and_vote_if_able> { /// last View Sync Certificate or Timeout Certificate this node formed. pub proposal_cert: Option>, - /// most recent decided upgrade certificate - pub decided_upgrade_cert: Option>, - /// Globally shared reference to the current network version. pub version: Arc>, @@ -211,7 +208,7 @@ impl> ConsensusTaskState OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), self.round_start_delay, self.formed_upgrade_certificate.clone(), - self.decided_upgrade_cert.clone(), + Arc::clone(&self.decided_upgrade_certificate), self.payload_commitment_and_metadata.clone(), self.proposal_cert.clone(), Arc::clone(&self.instance_state), @@ -242,7 +239,7 @@ impl> ConsensusTaskState if proposal.view_number() != view { return; } - let upgrade = self.decided_upgrade_cert.clone(); + let upgrade = Arc::clone(&self.decided_upgrade_certificate); let pub_key = self.public_key.clone(); let priv_key = self.private_key.clone(); let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); @@ -500,7 +497,7 @@ impl> ConsensusTaskState // If we have a decided upgrade certificate, // we may need to upgrade the protocol version on a view change. - if let Some(ref cert) = self.decided_upgrade_cert { + if let Some(cert) = self.decided_upgrade_certificate.read().await.clone() { if new_view == cert.data.new_version_first_view { warn!( "Updating version based on a decided upgrade cert: {:?}", diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 203659e59d..324dd65d05 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -163,10 +163,11 @@ impl Default for LeafChainTraversalOutcome { pub async fn decide_from_proposal( proposal: &QuorumProposal, consensus: OuterConsensus, - existing_upgrade_cert: &Option>, + existing_upgrade_cert: Arc>>>, public_key: &TYPES::SignatureKey, ) -> LeafChainTraversalOutcome { let consensus_reader = consensus.read().await; + let existing_upgrade_cert_reader = existing_upgrade_cert.read().await; let view_number = proposal.view_number(); let parent_view_number = proposal.justify_qc.view_number(); let old_anchor_view = consensus_reader.last_decided_view(); @@ -221,7 +222,7 @@ pub async fn decide_from_proposal( // Check if there's a new upgrade certificate available. if let Some(cert) = leaf.upgrade_certificate() { - if leaf.upgrade_certificate() != *existing_upgrade_cert { + if leaf.upgrade_certificate() != *existing_upgrade_cert_reader { if cert.data.decide_by < view_number { warn!("Failed to decide an upgrade certificate in time. Ignoring."); } else { @@ -335,151 +336,6 @@ pub(crate) async fn parent_leaf_and_state( Ok((parent_leaf, Arc::clone(state))) } -// TODO: Replace this function with `validate_proposal_safety_and_liveness` after the following -// issue is done: -// https://github.com/EspressoSystems/HotShot/issues/3357. -/// Validate the state and safety and liveness of a proposal then emit -/// a `QuorumProposalValidated` event. -/// -/// # Errors -/// If any validation or state update fails. -/// -/// TODO - This should just take the QuorumProposalRecv task state after -/// we merge the dependency tasks. -#[allow(clippy::too_many_arguments)] -#[allow(clippy::too_many_lines)] -#[instrument(skip_all, fields(id = id, view = *proposal.data.view_number()))] -#[cfg(not(feature = "dependency-tasks"))] -pub async fn temp_validate_proposal_safety_and_liveness( - proposal: Proposal>, - parent_leaf: Leaf, - consensus: OuterConsensus, - decided_upgrade_certificate: Option>, - quorum_membership: Arc, - view_leader_key: TYPES::SignatureKey, - event_stream: Sender>>, - sender: TYPES::SignatureKey, - event_sender: Sender>, - id: u64, -) -> Result<()> { - let view_number = proposal.data.view_number(); - - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); - ensure!( - proposed_leaf.parent_commitment() == parent_leaf.commit(), - "Proposed leaf does not extend the parent leaf." - ); - - let state = Arc::new( - >::from_header(&proposal.data.block_header), - ); - let view = View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), - state, - delta: None, // May be updated to `Some` in the vote task. - }, - }; - - if let Err(e) = consensus - .write() - .await - .update_validated_state_map(view_number, view.clone()) - { - tracing::trace!("{e:?}"); - } - consensus - .write() - .await - .update_saved_leaves(proposed_leaf.clone()); - - // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. - broadcast_event( - Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), - &event_stream, - ) - .await; - - // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - // - // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: - // - // proposal.validate_signature(&quorum_membership)?; - // - // in a future PR. - ensure!( - view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), - "Could not verify proposal." - ); - - UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; - - // Validate that the upgrade certificate is re-attached, if we saw one on the parent - proposed_leaf.temp_extends_upgrade(&parent_leaf, &decided_upgrade_certificate)?; - - let justify_qc = proposal.data.justify_qc.clone(); - // Create a positive vote if either liveness or safety check - // passes. - - // Liveness check. - let read_consensus = consensus.read().await; - let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); - - // Safety check. - // Check if proposal extends from the locked leaf. - let outcome = read_consensus.visit_leaf_ancestors( - justify_qc.view_number(), - Terminator::Inclusive(read_consensus.locked_view()), - false, - |leaf, _, _| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.view_number() != read_consensus.locked_view() - }, - ); - let safety_check = outcome.is_ok(); - - ensure!(safety_check || liveness_check, { - if let Err(e) = outcome { - broadcast_event( - Event { - view_number, - event: EventType::Error { error: Arc::new(e) }, - }, - &event_sender, - ) - .await; - } - - format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) - }); - - // We accept the proposal, notify the application layer - - broadcast_event( - Event { - view_number, - event: EventType::QuorumProposal { - proposal: proposal.clone(), - sender, - }, - }, - &event_sender, - ) - .await; - // Notify other tasks - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalValidated( - proposal.data.clone(), - parent_leaf, - )), - &event_stream, - ) - .await; - - Ok(()) -} - /// Validate the state and safety and liveness of a proposal then emit /// a `QuorumProposalValidated` event. /// diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index f251eef6ba..6f29c5f701 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -30,7 +30,6 @@ pub(crate) async fn handle_quorum_proposal_validated< sender: &Sender>>, task_state: &mut QuorumVoteTaskState, ) -> Result<()> { - let decided_upgrade_certificate_read = task_state.decided_upgrade_certificate.read().await; let LeafChainTraversalOutcome { new_locked_view_number, new_decided_view_number, @@ -42,11 +41,10 @@ pub(crate) async fn handle_quorum_proposal_validated< } = decide_from_proposal( proposal, OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - &decided_upgrade_certificate_read, + Arc::clone(&task_state.decided_upgrade_certificate), &task_state.public_key, ) .await; - drop(decided_upgrade_certificate_read); if let Some(cert) = decided_upgrade_cert.clone() { let mut decided_certificate_lock = task_state.decided_upgrade_certificate.write().await; diff --git a/testing/src/predicates/upgrade_with_consensus.rs b/testing/src/predicates/upgrade_with_consensus.rs index a733c8e56a..fbeda224fa 100644 --- a/testing/src/predicates/upgrade_with_consensus.rs +++ b/testing/src/predicates/upgrade_with_consensus.rs @@ -28,7 +28,7 @@ impl std::fmt::Debug for UpgradeCertPredicate { #[async_trait] impl Predicate for UpgradeCertPredicate { async fn evaluate(&self, input: &ConsensusTaskTestState) -> PredicateResult { - let upgrade_cert = input.decided_upgrade_cert.clone(); + let upgrade_cert = input.decided_upgrade_certificate.read().await.clone(); PredicateResult::from((self.check)(upgrade_cert.into())) } @@ -37,14 +37,14 @@ impl Predicate for UpgradeCertPredicate { } } -pub fn no_decided_upgrade_cert() -> Box { - let info = "expected decided_upgrade_cert to be None".to_string(); +pub fn no_decided_upgrade_certificate() -> Box { + let info = "expected decided_upgrade_certificate to be None".to_string(); let check: UpgradeCertCallback = Arc::new(move |s| s.is_none()); Box::new(UpgradeCertPredicate { info, check }) } -pub fn decided_upgrade_cert() -> Box { - let info = "expected decided_upgrade_cert to be Some(_)".to_string(); +pub fn decided_upgrade_certificate() -> Box { + let info = "expected decided_upgrade_certificate to be Some(_)".to_string(); let check: UpgradeCertCallback = Arc::new(move |s| s.is_some()); Box::new(UpgradeCertPredicate { info, check }) } diff --git a/testing/src/predicates/upgrade_with_proposal.rs b/testing/src/predicates/upgrade_with_proposal.rs index 1af9a8dd42..f743ae55bb 100644 --- a/testing/src/predicates/upgrade_with_proposal.rs +++ b/testing/src/predicates/upgrade_with_proposal.rs @@ -37,14 +37,14 @@ impl Predicate for UpgradeCertPredicate { } } -pub fn no_decided_upgrade_cert() -> Box { - let info = "expected decided_upgrade_cert to be None".to_string(); +pub fn no_decided_upgrade_certificate() -> Box { + let info = "expected decided_upgrade_certificate to be None".to_string(); let check: UpgradeCertCallback = Arc::new(move |s| s.is_none()); Box::new(UpgradeCertPredicate { info, check }) } -pub fn decided_upgrade_cert() -> Box { - let info = "expected decided_upgrade_cert to be Some(_)".to_string(); +pub fn decided_upgrade_certificate() -> Box { + let info = "expected decided_upgrade_certificate to be Some(_)".to_string(); let check: UpgradeCertCallback = Arc::new(move |s| s.is_some()); Box::new(UpgradeCertPredicate { info, check }) } diff --git a/testing/src/predicates/upgrade_with_vote.rs b/testing/src/predicates/upgrade_with_vote.rs index 12bcf72c10..89afe1786d 100644 --- a/testing/src/predicates/upgrade_with_vote.rs +++ b/testing/src/predicates/upgrade_with_vote.rs @@ -37,14 +37,14 @@ impl Predicate for UpgradeCertPredicate { } } -pub fn no_decided_upgrade_cert() -> Box { - let info = "expected decided_upgrade_cert to be None".to_string(); +pub fn no_decided_upgrade_certificate() -> Box { + let info = "expected decided_upgrade_certificate to be None".to_string(); let check: UpgradeCertCallback = Arc::new(move |s| s.is_none()); Box::new(UpgradeCertPredicate { info, check }) } -pub fn decided_upgrade_cert() -> Box { - let info = "expected decided_upgrade_cert to be Some(_)".to_string(); +pub fn decided_upgrade_certificate() -> Box { + let info = "expected decided_upgrade_certificate to be Some(_)".to_string(); let check: UpgradeCertCallback = Arc::new(move |s| s.is_some()); Box::new(UpgradeCertPredicate { info, check }) } diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs index 8183e37fed..d17e9ae859 100644 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -130,7 +130,7 @@ async fn test_upgrade_task_vote() { quorum_proposal_validated(), exact(QuorumVoteSend(votes[1].clone())), ], - task_state_asserts: vec![no_decided_upgrade_cert()], + task_state_asserts: vec![no_decided_upgrade_certificate()], }, Expectations { output_asserts: vec![ @@ -139,7 +139,7 @@ async fn test_upgrade_task_vote() { quorum_proposal_validated(), exact(QuorumVoteSend(votes[2].clone())), ], - task_state_asserts: vec![no_decided_upgrade_cert()], + task_state_asserts: vec![no_decided_upgrade_certificate()], }, Expectations { output_asserts: vec![ @@ -149,7 +149,7 @@ async fn test_upgrade_task_vote() { leaf_decided(), exact(QuorumVoteSend(votes[3].clone())), ], - task_state_asserts: vec![no_decided_upgrade_cert()], + task_state_asserts: vec![no_decided_upgrade_certificate()], }, Expectations { output_asserts: vec![ @@ -159,7 +159,7 @@ async fn test_upgrade_task_vote() { leaf_decided(), exact(QuorumVoteSend(votes[4].clone())), ], - task_state_asserts: vec![no_decided_upgrade_cert()], + task_state_asserts: vec![no_decided_upgrade_certificate()], }, Expectations { output_asserts: vec![ @@ -169,7 +169,7 @@ async fn test_upgrade_task_vote() { upgrade_decided(), leaf_decided(), ], - task_state_asserts: vec![decided_upgrade_cert()], + task_state_asserts: vec![decided_upgrade_certificate()], }, ]; diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 9859b8f9fd..2cc4124da6 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -139,7 +139,7 @@ async fn test_upgrade_task_with_vote() { validated_state_updated(), quorum_vote_send(), ], - vec![no_decided_upgrade_cert()], + vec![no_decided_upgrade_certificate()], ), Expectations::from_outputs_and_task_states( all_predicates![ @@ -152,7 +152,7 @@ async fn test_upgrade_task_with_vote() { validated_state_updated(), quorum_vote_send(), ], - vec![no_decided_upgrade_cert()], + vec![no_decided_upgrade_certificate()], ), Expectations::from_outputs_and_task_states( all_predicates![ @@ -165,7 +165,7 @@ async fn test_upgrade_task_with_vote() { validated_state_updated(), quorum_vote_send(), ], - vec![no_decided_upgrade_cert()], + vec![no_decided_upgrade_certificate()], ), Expectations::from_outputs_and_task_states( all_predicates![ @@ -174,7 +174,7 @@ async fn test_upgrade_task_with_vote() { exact(LastDecidedViewUpdated(ViewNumber::new(3))), leaf_decided(), ], - vec![decided_upgrade_cert()], + vec![decided_upgrade_certificate()], ), ]; From 12bfc8509945bf0c660cbf99fe878d22bc365471 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Thu, 11 Jul 2024 16:33:13 +0200 Subject: [PATCH 1120/1393] Fix possible bug in tests (#3434) --- testing/src/overall_safety_task.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 4751518877..add9853a14 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -393,7 +393,7 @@ impl RoundResult { /// check if the test failed due to not enough nodes getting through enough views pub fn check_if_failed(&mut self, threshold: usize, total_num_nodes: usize) -> bool { let num_failed = self.failed_nodes.len(); - total_num_nodes - num_failed >= threshold + total_num_nodes - num_failed < threshold } /// determines whether or not the round passes /// also do a safety check From 87d4736e76b909614dd1bd5bdd34ed7aabcc71d4 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Fri, 12 Jul 2024 09:27:47 +0200 Subject: [PATCH 1121/1393] Increase delays to 5 seconds (#3333) * Increase delays to 5 seconds * Adjust tests to new delay * Try longer view timeout * Bring back next view timout to 10s * 30 seconds view timeout * Commit staging config with adjusted delays * Revert unwanted change * Add new line * Use a proper toml config file * Set default secondary network delay * Add the default value for data request delay * Adjust `next_view_timeout` and `view_sync_timeout` in the config file --- examples/infra/mod.rs | 10 +- .../src/traits/networking/combined_network.rs | 10 +- orchestrator/src/config.rs | 17 ++-- orchestrator/staging-config.toml | 95 +++++++++++++++++++ types/src/constants.rs | 6 ++ 5 files changed, 122 insertions(+), 16 deletions(-) create mode 100644 orchestrator/staging-config.toml diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 41cb3f7306..c8ca5e626d 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -39,9 +39,7 @@ use hotshot_example_types::{ use hotshot_orchestrator::{ self, client::{BenchResults, OrchestratorClient, ValidatorArgs}, - config::{ - BuilderType, CombinedNetworkConfig, NetworkConfig, NetworkConfigFile, NetworkConfigSource, - }, + config::{BuilderType, NetworkConfig, NetworkConfigFile, NetworkConfigSource}, }; use hotshot_testing::block_builder::{ BuilderTask, RandomBuilderImplementation, SimpleBuilderImplementation, @@ -801,10 +799,10 @@ where .await; // Create our combined network config - let CombinedNetworkConfig { delay_duration }: CombinedNetworkConfig = config - .clone() + let delay_duration = config .combined_network_config - .expect("combined network config not specified"); + .as_ref() + .map(|config| config.delay_duration); // Create our combined network let network = diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 30cf0c7f02..250d0a1757 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -27,8 +27,8 @@ use hotshot_types::traits::network::{ use hotshot_types::{ boxed_sync, constants::{ - COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_MIN_PRIMARY_FAILURES, - COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, + COMBINED_NETWORK_CACHE_SIZE, COMBINED_NETWORK_DELAY_DURATION, + COMBINED_NETWORK_MIN_PRIMARY_FAILURES, COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, }, data::ViewNumber, traits::{ @@ -89,7 +89,7 @@ impl CombinedNetworks { pub fn new( primary_network: PushCdnNetwork, secondary_network: Libp2pNetwork, - delay_duration: Duration, + delay_duration: Option, ) -> Self { // Create networks from the ones passed in let networks = Arc::from(UnderlyingCombinedNetworks( @@ -104,7 +104,9 @@ impl CombinedNetworks { ))), primary_fail_counter: Arc::new(AtomicU64::new(0)), primary_down: Arc::new(AtomicBool::new(false)), - delay_duration: Arc::new(RwLock::new(delay_duration)), + delay_duration: Arc::new(RwLock::new( + delay_duration.unwrap_or(Duration::from_millis(COMBINED_NETWORK_DELAY_DURATION)), + )), delayed_tasks_channels: Arc::default(), no_delay_counter: Arc::new(AtomicU64::new(0)), } diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index e3924482a5..c29607f2e8 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -8,7 +8,9 @@ use std::{ vec, }; +use crate::client::OrchestratorClient; use clap::ValueEnum; +use hotshot_types::constants::REQUEST_DATA_DELAY; use hotshot_types::{ traits::signature_key::SignatureKey, ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, }; @@ -20,8 +22,6 @@ use toml; use tracing::{error, info}; use vec1::Vec1; -use crate::client::OrchestratorClient; - /// Configuration describing a libp2p node #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { @@ -498,7 +498,10 @@ impl From> for NetworkConfig { next_view_timeout: val.config.next_view_timeout, view_sync_timeout: val.config.view_sync_timeout, builder_timeout: val.config.builder_timeout, - data_request_delay: val.config.data_request_delay, + data_request_delay: val + .config + .data_request_delay + .unwrap_or(Duration::from_millis(REQUEST_DATA_DELAY)), seed: val.seed, transaction_size: val.transaction_size, libp2p_config: val.libp2p_config.map(|libp2p_config| Libp2pConfig { @@ -579,7 +582,7 @@ pub struct HotShotConfigFile { /// The maximum amount of time a leader can wait to get a block from a builder pub builder_timeout: Duration, /// Time to wait until we request data associated with a proposal - pub data_request_delay: Duration, + pub data_request_delay: Option, /// Builder API base URL #[serde(default = "default_builder_urls")] pub builder_urls: Vec1, @@ -701,7 +704,9 @@ impl From> for HotShotConfig { start_delay: val.start_delay, num_bootstrap: val.num_bootstrap, builder_timeout: val.builder_timeout, - data_request_delay: val.data_request_delay, + data_request_delay: val + .data_request_delay + .unwrap_or(Duration::from_millis(REQUEST_DATA_DELAY)), builder_urls: val.builder_urls, start_proposing_view: val.upgrade.start_proposing_view, stop_proposing_view: val.upgrade.stop_proposing_view, @@ -778,7 +783,7 @@ impl Default for HotShotConfigFile { start_delay: 1, num_bootstrap: 5, builder_timeout: Duration::from_secs(10), - data_request_delay: Duration::from_millis(200), + data_request_delay: Some(Duration::from_millis(REQUEST_DATA_DELAY)), builder_urls: default_builder_urls(), upgrade: UpgradeConfig::default(), } diff --git a/orchestrator/staging-config.toml b/orchestrator/staging-config.toml new file mode 100644 index 0000000000..a6973ae69b --- /dev/null +++ b/orchestrator/staging-config.toml @@ -0,0 +1,95 @@ +rounds = 10 +indexed_da = false +transactions_per_round = 10 +manual_start_password = "tuktu6-tohnaX-gihxib" +node_index = 0 +seed = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 +] +transaction_size = 100 +start_delay_seconds = 10 +builder = "Simple" + +[libp2p_config] +bootstrap_mesh_n_high = 4 +bootstrap_mesh_n_low = 4 +bootstrap_mesh_outbound_min = 2 +bootstrap_mesh_n = 4 +mesh_n_high = 4 +mesh_n_low = 4 +mesh_outbound_min = 2 +mesh_n = 4 +online_time = 10 +num_txn_per_round = 0 +server_mode = false + +[config] +start_threshold = [ 8, 10 ] +num_nodes_with_stake = 10 +num_nodes_without_stake = 0 +staked_da_nodes = 10 +non_staked_da_nodes = 0 +fixed_leader_for_gpuvid = 1 +next_view_timeout = 15_000 +timeout_ratio = [ 11, 10 ] +round_start_delay = 1 +start_delay = 10_000 +num_bootstrap = 5 +builder_urls = [ "https://builder.staging.testnet.espresso.network/" ] + +[config.view_sync_timeout] +secs = 15 +nanos = 0 + +[config.builder_timeout] +secs = 8 +nanos = 0 + +[config.data_request_delay] +secs = 5 +nanos = 0 + +[config.upgrade] +start_proposing_view = 1 +stop_proposing_view = 0 +start_voting_view = 1 +stop_voting_view = 0 +start_proposing_time = 1 +stop_proposing_time = 0 +start_voting_time = 1 +stop_voting_time = 0 + +[combined_network_config.delay_duration] +secs = 5 +nanos = 0 diff --git a/types/src/constants.rs b/types/src/constants.rs index 9b66b81d19..295b6011dd 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -17,6 +17,12 @@ pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; /// the number of messages to send over the secondary network without delay before re-attempting the (presumed down) primary network pub const COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL: u64 = 50; +/// the default delay duration value in milliseconds of sending on the secondary in the combined networks +pub const COMBINED_NETWORK_DELAY_DURATION: u64 = 5000; + +/// The default network data request delay in milliseconds +pub const REQUEST_DATA_DELAY: u64 = 5000; + /// Default channel size for consensus event sharing pub const EVENT_CHANNEL_SIZE: usize = 100_000; From d03a5bd275353c50211b1c1bd910dae10135ec4c Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 12 Jul 2024 16:42:28 -0400 Subject: [PATCH 1122/1393] Add byzantine integration tests (#3428) --- hotshot/src/lib.rs | 224 +++++++++++--------- hotshot/src/tasks/mod.rs | 177 ++++++++++------ macros/src/lib.rs | 2 +- testing/src/helpers.rs | 5 +- testing/src/test_builder.rs | 142 ++++++++++--- testing/src/test_launcher.rs | 2 +- testing/src/test_runner.rs | 103 +++++---- testing/tests/tests_1/libp2p.rs | 12 +- testing/tests/tests_1/network_task.rs | 8 +- testing/tests/tests_1/test_success.rs | 28 +++ testing/tests/tests_2/catchup.rs | 20 +- testing/tests/tests_2/push_cdn.rs | 4 +- testing/tests/tests_5/combined_network.rs | 20 +- testing/tests/tests_5/timeout.rs | 8 +- testing/tests/tests_5/unreliable_network.rs | 36 ++-- types/src/consensus.rs | 28 +-- 16 files changed, 524 insertions(+), 295 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index bd5c9fddf4..fa57e2a187 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -582,12 +582,97 @@ impl> SystemContext { handle } +} + +/// An async broadcast channel +type Channel = (Sender>, Receiver>); + +#[async_trait] +/// Trait for handling messages for a node with a twin copy of consensus +pub trait TwinsHandlerState +where + Self: std::fmt::Debug + Send + Sync, + TYPES: NodeType, + I: NodeImplementation, +{ + /// Handle a message sent to the twin from the network task, forwarding it to one of the two twins. + async fn send_handler( + &mut self, + event: &HotShotEvent, + ) -> Vec, HotShotEvent>>; + + /// Handle a message from either twin, forwarding it to the network task + async fn recv_handler( + &mut self, + event: &Either, HotShotEvent>, + ) -> Vec>; + + /// Fuse two channels into a single channel + /// + /// Note: the channels are fused using two async loops, whose `JoinHandle`s are dropped. + fn fuse_channels( + &'static mut self, + left: Channel>, + right: Channel>, + ) -> Channel> { + let send_state = Arc::new(RwLock::new(self)); + let recv_state = Arc::clone(&send_state); + + let (left_sender, mut left_receiver) = (left.0, left.1); + let (right_sender, mut right_receiver) = (right.0, right.1); + + // channel to the network task + let (sender_to_network, network_task_receiver) = broadcast(EVENT_CHANNEL_SIZE); + // channel from the network task + let (network_task_sender, mut receiver_from_network): Channel> = + broadcast(EVENT_CHANNEL_SIZE); + + let _recv_loop_handle = async_spawn(async move { + loop { + let msg = match select(left_receiver.recv(), right_receiver.recv()).await { + Either::Left(msg) => Either::Left(msg.0.unwrap().as_ref().clone()), + Either::Right(msg) => Either::Right(msg.0.unwrap().as_ref().clone()), + }; + + let mut state = recv_state.write().await; + let mut result = state.recv_handler(&msg).await; + + while let Some(event) = result.pop() { + let _ = sender_to_network.broadcast(event.into()).await; + } + } + }); + + let _send_loop_handle = async_spawn(async move { + loop { + if let Ok(msg) = receiver_from_network.recv().await { + let mut state = send_state.write().await; + + let mut result = state.send_handler(&msg).await; + + while let Some(event) = result.pop() { + match event { + Either::Left(msg) => { + let _ = left_sender.broadcast(msg.into()).await; + } + Either::Right(msg) => { + let _ = right_sender.broadcast(msg.into()).await; + } + } + } + } + } + }); + + (network_task_sender, network_task_receiver) + } #[allow(clippy::too_many_arguments)] /// Spawn all tasks that operate on [`SystemContextHandle`]. /// /// For a list of which tasks are being spawned, see this module's documentation. - pub async fn spawn_twin_handles( + async fn spawn_twin_handles( + &'static mut self, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, nonce: u64, @@ -599,7 +684,7 @@ impl> SystemContext { storage: I::Storage, auction_results_provider: I::AuctionResultsProvider, ) -> (SystemContextHandle, SystemContextHandle) { - let left_system_context = Self::new( + let left_system_context = SystemContext::new( public_key.clone(), private_key.clone(), nonce, @@ -611,7 +696,7 @@ impl> SystemContext { storage.clone(), auction_results_provider.clone(), ); - let right_system_context = Self::new( + let right_system_context = SystemContext::new( public_key, private_key, nonce, @@ -682,7 +767,7 @@ impl> SystemContext { add_consensus_tasks::(&mut right_handle).await; // fuse the event streams from both handles before initializing the network tasks - let fused_internal_event_stream = fuse_channels::, RandomTwinsHandler>( + let fused_internal_event_stream = self.fuse_channels( (left_internal_sender, left_internal_receiver), (right_internal_sender, right_internal_receiver), ); @@ -703,121 +788,62 @@ impl> SystemContext { } } -/// An async broadcast channel -type Channel = (Sender>, Receiver>); - -#[async_trait] -/// Trait for handling messages for a node with a twin copy of consensus -pub trait TwinsHandlerState -where - Self: Sync, - MESSAGE: Send + Sync, -{ - /// Initialize the state - fn new() -> Self; - - /// Handle a message sent to the twin from the outside, forwarding it to one of the two twins. - async fn send_handler(&mut self, event: &MESSAGE) -> Either; - - /// Wrapper for `send_handler`. - async fn send_handler_arc( - lock: &Arc>, - event: &MESSAGE, - ) -> Either { - let mut state = lock.write().await; - - state.send_handler(event).await - } - - /// Handle a message from either twin, forwarding it to the outside - async fn recv_handler(&mut self, event: &Either) -> MESSAGE; - - /// Wrapper for `recv_handler`. - async fn recv_handler_arc( - lock: &Arc>, - event: &Either, - ) -> MESSAGE { - let mut state = lock.write().await; - - state.recv_handler(event).await - } -} - +#[derive(Debug)] /// A `TwinsHandlerState` that randomly forwards a message to either twin, /// and returns messages from both. pub struct RandomTwinsHandler; #[async_trait] -impl TwinsHandlerState for RandomTwinsHandler { - fn new() -> Self { - RandomTwinsHandler - } - async fn send_handler(&mut self, event: &MESSAGE) -> Either { +impl> TwinsHandlerState + for RandomTwinsHandler +{ + async fn send_handler( + &mut self, + event: &HotShotEvent, + ) -> Vec, HotShotEvent>> { let random: bool = rand::thread_rng().gen(); #[allow(clippy::match_bool)] match random { - true => Either::Left(event.clone()), - false => Either::Right(event.clone()), + true => vec![Either::Left(event.clone())], + false => vec![Either::Right(event.clone())], } } - async fn recv_handler(&mut self, event: &Either) -> MESSAGE { + async fn recv_handler( + &mut self, + event: &Either, HotShotEvent>, + ) -> Vec> { match event { - Either::Left(msg) | Either::Right(msg) => msg.clone(), + Either::Left(msg) | Either::Right(msg) => vec![msg.clone()], } } } -/// Fuse two channels into a single channel, using handlers provided by the `STATE` type. -/// -/// Note: the channels are fused using two async loops, whose `JoinHandle`s are dropped. -fn fuse_channels( - left: Channel, - right: Channel, -) -> Channel -where - MESSAGE: Clone + std::marker::Send + std::marker::Sync + 'static, - STATE: TwinsHandlerState + Send + 'static, -{ - let send_state = Arc::new(RwLock::new(STATE::new())); - let recv_state = Arc::clone(&send_state); - - let (left_sender, mut left_receiver) = (left.0, left.1); - let (right_sender, mut right_receiver) = (right.0, right.1); +#[derive(Debug)] +/// A `TwinsHandlerState` that forwards each message to both twins, +/// and returns messages from each of them. +pub struct DoubleTwinsHandler; - let result: Channel = broadcast(EVENT_CHANNEL_SIZE); - let (result_sender, mut result_receiver) = (result.0.clone(), result.1.clone()); - - let _recv_loop_handle = async_spawn(async move { - loop { - let msg = match select(left_receiver.recv(), right_receiver.recv()).await { - Either::Left(msg) => Either::Left(msg.0.unwrap().as_ref().clone()), - Either::Right(msg) => Either::Right(msg.0.unwrap().as_ref().clone()), - }; +#[async_trait] +impl> TwinsHandlerState + for DoubleTwinsHandler +{ + async fn send_handler( + &mut self, + event: &HotShotEvent, + ) -> Vec, HotShotEvent>> { + vec![Either::Left(event.clone()), Either::Right(event.clone())] + } - let _ = result_sender - .broadcast(STATE::recv_handler_arc(&recv_state, &msg).await.into()) - .await; - } - }); - - let _send_loop_handle = async_spawn(async move { - loop { - if let Ok(msg) = result_receiver.recv().await { - match STATE::send_handler_arc(&send_state, &msg).await { - Either::Left(msg) => { - let _ = left_sender.broadcast(msg.into()).await; - } - Either::Right(msg) => { - let _ = right_sender.broadcast(msg.into()).await; - } - } - } + async fn recv_handler( + &mut self, + event: &Either, HotShotEvent>, + ) -> Vec> { + match event { + Either::Left(msg) | Either::Right(msg) => vec![msg.clone()], } - }); - - result + } } #[async_trait] diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 058d7b5621..6fc25b5b2d 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -40,7 +40,11 @@ use hotshot_types::{ }; use vbs::version::StaticVersionType; -use crate::{tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi}; +use crate::{ + tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, + ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, Memberships, + NetworkTaskRegistry, SignatureKey, SystemContext, +}; /// event for global event stream #[derive(Clone, Debug)] @@ -206,49 +210,87 @@ pub async fn add_consensus_tasks>( /// Consensus <-> [Byzantine logic layer] <-> Network pub trait EventTransformerState> where - Self: Sized + Send + Sync + 'static, + Self: std::fmt::Debug + Send + Sync + 'static, { - /// Initialize the state - fn new() -> Self; - /// modify incoming messages from the network - fn transform_in(&mut self, event: &HotShotEvent) -> HotShotEvent; + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec>; /// modify outgoing messages from the network - fn transform_out(&mut self, event: &HotShotEvent) -> HotShotEvent; - - /// `transform_in`, but wrapping the state in an `Arc` lock - async fn transform_in_arc( - lock: Arc>, - event: &HotShotEvent, - ) -> HotShotEvent { - let mut state = lock.write().await; - - state.transform_in(event) - } - - /// `transform_out`, but wrapping the state in an `Arc` lock - async fn transform_out_arc( - lock: Arc>, - event: &HotShotEvent, - ) -> HotShotEvent { - let mut state = lock.write().await; - - state.transform_out(event) + async fn send_handler(&mut self, event: &HotShotEvent) -> Vec>; + + #[allow(clippy::too_many_arguments)] + /// Creates a `SystemContextHandle` with the given even transformer + async fn spawn_handle( + &'static mut self, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + nonce: u64, + config: HotShotConfig, + memberships: Memberships, + network: Arc, + initializer: HotShotInitializer, + metrics: ConsensusMetricsValue, + storage: I::Storage, + auction_results_provider: I::AuctionResultsProvider, + ) -> SystemContextHandle { + let hotshot = SystemContext::new( + public_key, + private_key, + nonce, + config, + memberships, + network, + initializer, + metrics, + storage, + auction_results_provider, + ); + let consensus_registry = ConsensusTaskRegistry::new(); + let network_registry = NetworkTaskRegistry::new(); + + let output_event_stream = hotshot.external_event_stream.clone(); + let internal_event_stream = hotshot.internal_event_stream.clone(); + + let mut handle = SystemContextHandle { + consensus_registry, + network_registry, + output_event_stream: output_event_stream.clone(), + internal_event_stream: internal_event_stream.clone(), + hotshot: Arc::clone(&hotshot), + storage: Arc::clone(&hotshot.storage), + network: Arc::clone(&hotshot.network), + memberships: Arc::clone(&hotshot.memberships), + }; + + add_consensus_tasks::(&mut handle).await; + self.add_network_tasks(&mut handle).await; + + handle } /// Add byzantine network tasks with the trait - async fn add_network_tasks(handle: &mut SystemContextHandle) { - let state_in = Arc::new(RwLock::new(Self::new())); + async fn add_network_tasks(&'static mut self, handle: &mut SystemContextHandle) { + let state_in = Arc::new(RwLock::new(self)); let state_out = Arc::clone(&state_in); - - // channel between the task spawned in this function and the network tasks. + // channels between the task spawned in this function and the network tasks. // with this, we can control exactly what events the network tasks see. - let (sender, mut receiver) = broadcast(EVENT_CHANNEL_SIZE); + + // channel to the network task + let (sender_to_network, network_task_receiver) = broadcast(EVENT_CHANNEL_SIZE); + // channel from the network task + let (network_task_sender, mut receiver_from_network) = broadcast(EVENT_CHANNEL_SIZE); + // create a copy of the original receiver + let (original_sender, mut original_receiver) = ( + handle.internal_event_stream.0.clone(), + handle.internal_event_stream.1.activate_cloned(), + ); // replace the internal event stream with the one we just created, // so that the network tasks are spawned with our channel. - let mut internal_event_stream = (sender.clone(), receiver.clone().deactivate()); + let mut internal_event_stream = ( + network_task_sender.clone(), + network_task_receiver.clone().deactivate(), + ); std::mem::swap( &mut internal_event_stream, &mut handle.internal_event_stream, @@ -257,52 +299,67 @@ where // spawn the network tasks with our newly-created channel add_network_tasks::(handle).await; - // create a copy of the original receiver - let (original_sender, mut original_receiver) = ( - internal_event_stream.0.clone(), - internal_event_stream.1.activate_cloned(), + std::mem::swap( + &mut internal_event_stream, + &mut handle.internal_event_stream, ); // spawn a task to listen on the (original) internal event stream, // and broadcast the transformed events to the replacement event stream we just created. - let out_handle = async_spawn(async move { + let send_handle = async_spawn(async move { loop { if let Ok(msg) = original_receiver.recv().await { - let _ = sender - .broadcast( - Self::transform_out_arc(Arc::clone(&state_out), &msg) - .await - .into(), - ) - .await; + let mut state = state_out.write().await; + + let mut results = state.send_handler(&msg).await; + + while let Some(event) = results.pop() { + let _ = sender_to_network.broadcast(event.into()).await; + } } } }); // spawn a task to listen on the newly created event stream, // and broadcast the transformed events to the original internal event stream - let in_handle = async_spawn(async move { + let recv_handle = async_spawn(async move { loop { - if let Ok(msg) = receiver.recv().await { - let _ = original_sender - .broadcast( - Self::transform_in_arc(Arc::clone(&state_in), &msg) - .await - .into(), - ) - .await; + if let Ok(msg) = receiver_from_network.recv().await { + let mut state = state_in.write().await; + + let mut results = state.recv_handler(&msg).await; + + while let Some(event) = results.pop() { + let _ = original_sender.broadcast(event.into()).await; + } } } }); - handle.network_registry.register(out_handle); - handle.network_registry.register(in_handle); + handle.network_registry.register(send_handle); + handle.network_registry.register(recv_handle); + } +} - // put the old channel back. - std::mem::swap( - &mut internal_event_stream, - &mut handle.internal_event_stream, - ); +#[derive(Debug)] +/// An `EventHandlerState` that doubles the `QuorumVoteSend` and `QuorumProposalSend` events +pub struct DoubleProposeVote; + +#[async_trait] +impl> EventTransformerState + for DoubleProposeVote +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + vec![event.clone()] + } + + async fn send_handler(&mut self, event: &HotShotEvent) -> Vec> { + match event { + HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::QuorumVoteSend(_) => { + vec![event.clone(), event.clone()] + } + _ => vec![event.clone()], + } } } diff --git a/macros/src/lib.rs b/macros/src/lib.rs index d6f8adb162..8811f8b172 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -118,7 +118,7 @@ impl TestData { async fn #test_name() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - (#metadata).gen_launcher::<#ty, #imply>(0).launch().run_test::().await; + TestDescription::<#ty, #imply>::gen_launcher((#metadata), 0).launch().run_test::().await; } } } diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index c7c54bdb55..66cb0781d8 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -46,9 +46,10 @@ pub async fn build_system_handle( Sender>>, Receiver>>, ) { - let builder = TestDescription::default_multiple_rounds(); + let builder: TestDescription = + TestDescription::default_multiple_rounds(); - let launcher = builder.gen_launcher::(node_id); + let launcher = builder.gen_launcher(node_id); let network = (launcher.resource_generator.channel_generator)(node_id).await; let storage = (launcher.resource_generator.storage)(node_id); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 37756528bf..909ccaa898 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,12 +1,18 @@ -use std::{collections::HashMap, num::NonZeroUsize, sync::Arc, time::Duration}; +use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; -use hotshot::traits::{NetworkReliability, TestableNodeImplementation}; +use hotshot::{ + tasks::EventTransformerState, + traits::{NetworkReliability, NodeImplementation, TestableNodeImplementation}, + types::SystemContextHandle, + HotShotInitializer, Memberships, SystemContext, TwinsHandlerState, +}; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, state_types::TestInstanceState, storage_types::TestStorage, }; use hotshot_types::{ - traits::node_implementation::NodeType, ExecutionType, HotShotConfig, ValidatorConfig, + consensus::ConsensusMetricsValue, traits::node_implementation::NodeType, ExecutionType, + HotShotConfig, ValidatorConfig, }; use tide_disco::Url; use vec1::Vec1; @@ -18,7 +24,7 @@ use super::{ }; use crate::{ spinning_task::SpinningTaskDescription, - test_launcher::{ResourceGenerators, TestLauncher}, + test_launcher::{Network, ResourceGenerators, TestLauncher}, view_sync_task::ViewSyncTaskDescription, }; /// data describing how a round should be timed. @@ -43,8 +49,8 @@ pub struct TimingData { } /// metadata describing a test -#[derive(Clone, Debug)] -pub struct TestDescription { +#[derive(Clone)] +pub struct TestDescription> { /// Total number of staked nodes in the test pub num_nodes_with_stake: usize, /// Total number of non-staked nodes in the test @@ -78,6 +84,97 @@ pub struct TestDescription { pub builders: Vec1, /// description of the solver to run pub solver: FakeSolverApiDescription, + /// nodes with byzantine behaviour + pub behaviour: Rc Behaviour>, +} + +#[derive(Debug)] +pub enum Behaviour> { + ByzantineTwins(Box>), + Byzantine(Box>), + Standard, +} + +pub async fn create_test_handle< + TYPES: NodeType, + I: NodeImplementation, +>( + behaviour: Behaviour, + node_id: u64, + network: Network, + memberships: Memberships, + config: HotShotConfig, + storage: I::Storage, + auction_results_provider: I::AuctionResultsProvider, +) -> SystemContextHandle { + let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}) + .await + .unwrap(); + + // See whether or not we should be DA + let is_da = node_id < config.da_staked_committee_size as u64; + + let validator_config: ValidatorConfig = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, is_da); + + // Get key pair for certificate aggregation + let private_key = validator_config.private_key.clone(); + let public_key = validator_config.public_key.clone(); + + match behaviour { + Behaviour::ByzantineTwins(state) => { + let state = Box::leak(state); + let (left_handle, _right_handle) = state + .spawn_twin_handles( + public_key, + private_key, + node_id, + config, + memberships, + network, + initializer, + ConsensusMetricsValue::default(), + storage, + auction_results_provider, + ) + .await; + + left_handle + } + Behaviour::Byzantine(state) => { + let state = Box::leak(state); + state + .spawn_handle( + public_key, + private_key, + node_id, + config, + memberships, + network, + initializer, + ConsensusMetricsValue::default(), + storage, + auction_results_provider, + ) + .await + } + Behaviour::Standard => { + let hotshot = SystemContext::::new( + public_key, + private_key, + node_id, + config, + memberships, + network, + initializer, + ConsensusMetricsValue::default(), + storage, + auction_results_provider, + ); + + hotshot.run_tasks().await + } + } } /// Describes a possible change to builder status during test @@ -120,7 +217,7 @@ impl Default for TimingData { } } -impl TestDescription { +impl> TestDescription { /// the default metadata for a stress test #[must_use] #[allow(clippy::redundant_field_names)] @@ -128,7 +225,7 @@ impl TestDescription { let num_nodes_with_stake = 100; let num_nodes_without_stake = 0; - TestDescription { + Self { num_bootstrap_nodes: num_nodes_with_stake, num_nodes_with_stake, num_nodes_without_stake, @@ -149,17 +246,17 @@ impl TestDescription { ..TimingData::default() }, view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), - ..TestDescription::default() + ..Self::default() } } /// the default metadata for multiple rounds #[must_use] #[allow(clippy::redundant_field_names)] - pub fn default_multiple_rounds() -> TestDescription { + pub fn default_multiple_rounds() -> Self { let num_nodes_with_stake = 10; let num_nodes_without_stake = 0; - TestDescription { + TestDescription:: { // TODO: remove once we have fixed the DHT timeout issue // https://github.com/EspressoSystems/HotShot/issues/2088 num_bootstrap_nodes: num_nodes_with_stake, @@ -180,17 +277,17 @@ impl TestDescription { ..TimingData::default() }, view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), - ..TestDescription::default() + ..TestDescription::::default() } } /// Default setting with 20 nodes and 8 views of successful views. #[must_use] #[allow(clippy::redundant_field_names)] - pub fn default_more_nodes() -> TestDescription { + pub fn default_more_nodes() -> Self { let num_nodes_with_stake = 20; let num_nodes_without_stake = 0; - TestDescription { + Self { num_nodes_with_stake, num_nodes_without_stake, start_nodes: num_nodes_with_stake, @@ -215,12 +312,12 @@ impl TestDescription { ..TimingData::default() }, view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), - ..TestDescription::default() + ..Self::default() } } } -impl Default for TestDescription { +impl> Default for TestDescription { /// by default, just a single round #[allow(clippy::redundant_field_names)] fn default() -> Self { @@ -261,23 +358,20 @@ impl Default for TestDescription { // Default to a 10% error rate. error_pct: 0.1, }, + behaviour: Rc::new(|_| Behaviour::Standard), } } } -impl TestDescription { +impl, I: TestableNodeImplementation> + TestDescription +{ /// turn a description of a test (e.g. a [`TestDescription`]) into /// a [`TestLauncher`] that can be used to launch the test. /// # Panics /// if some of the the configuration values are zero #[must_use] - pub fn gen_launcher< - TYPES: NodeType, - I: TestableNodeImplementation, - >( - self, - node_id: u64, - ) -> TestLauncher { + pub fn gen_launcher(self, node_id: u64) -> TestLauncher { let TestDescription { num_nodes_with_stake, num_bootstrap_nodes, diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 5aa17143b5..0ed2907fe5 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -37,7 +37,7 @@ pub struct TestLauncher> { /// generator for resources pub resource_generator: ResourceGenerators, /// metadata used for tasks - pub metadata: TestDescription, + pub metadata: TestDescription, } impl> TestLauncher { diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index c0849c7485..519621e9d1 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -49,6 +49,7 @@ use crate::{ block_builder::{BuilderTask, TestBuilderImplementation}, completion_task::CompletionTaskDescription, spinning_task::{ChangeNode, SpinningTask, UpDown}, + test_builder::create_test_handle, test_launcher::{Network, TestLauncher}, test_task::{TestResult, TestTask}, txn_task::TxnTaskDescription, @@ -429,52 +430,62 @@ where networks_ready.push(networks_ready_future); - if self.launcher.metadata.skip_late && late_start.contains(&node_id) { - self.late_start.insert( - node_id, - LateStartNode { - network, - context: LateNodeContext::UninitializedContext(LateNodeContextParameters { - storage, - memberships, - config, - auction_results_provider, - }), - }, - ); - } else { - let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}) - .await - .unwrap(); - - // See whether or not we should be DA - let is_da = node_id < config.da_staked_committee_size as u64; - - // We assign node's public key and stake value rather than read from config file since it's a test - let validator_config = - ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, is_da); - let hotshot = Self::add_node_with_config( - node_id, - network.clone(), - memberships, - initializer, - config, - validator_config, - storage, - auction_results_provider, - ) - .await; - if late_start.contains(&node_id) { + if late_start.contains(&node_id) { + if self.launcher.metadata.skip_late { self.late_start.insert( node_id, LateStartNode { network, - context: LateNodeContext::InitializedContext(hotshot), + context: LateNodeContext::UninitializedContext( + LateNodeContextParameters { + storage, + memberships, + config, + auction_results_provider, + }, + ), }, ); } else { - uninitialized_nodes.push((node_id, network, hotshot)); + let initializer = + HotShotInitializer::::from_genesis(TestInstanceState {}) + .await + .unwrap(); + + // See whether or not we should be DA + let is_da = node_id < config.da_staked_committee_size as u64; + + // We assign node's public key and stake value rather than read from config file since it's a test + let validator_config = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, is_da); + let hotshot = Self::add_node_with_config( + node_id, + network.clone(), + memberships, + initializer, + config, + validator_config, + storage, + auction_results_provider, + ) + .await; + self.late_start.insert( + node_id, + LateStartNode { + network, + context: LateNodeContext::InitializedContext(hotshot), + }, + ); } + } else { + uninitialized_nodes.push(( + node_id, + network, + memberships, + config, + storage, + auction_results_provider, + )); } results.push(node_id); @@ -484,8 +495,20 @@ where join_all(networks_ready).await; // Then start the necessary tasks - for (node_id, network, hotshot) in uninitialized_nodes { - let handle = hotshot.run_tasks().await; + for (node_id, network, memberships, config, storage, auction_results_provider) in + uninitialized_nodes + { + let behaviour = (self.launcher.metadata.behaviour)(node_id); + let handle = create_test_handle( + behaviour, + node_id, + network.clone(), + memberships, + config.clone(), + storage, + auction_results_provider, + ) + .await; match node_id.cmp(&(config.da_staked_committee_size as u64 - 1)) { std::cmp::Ordering::Less => { diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index 6b415191b8..e7cbb8d6f2 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -17,7 +17,7 @@ use tracing::instrument; async fn libp2p_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -35,7 +35,7 @@ async fn libp2p_network() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -48,7 +48,7 @@ async fn libp2p_network() { async fn libp2p_network_failures_2() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestDescription { + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -82,7 +82,7 @@ async fn libp2p_network_failures_2() { metadata.overall_safety_properties.num_successful_views = 15; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -96,9 +96,9 @@ async fn libp2p_network_failures_2() { async fn test_stress_libp2p_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription::default_stress(); + let metadata: TestDescription = TestDescription::default_stress(); metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 24089edfab..2b3bd5f6df 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -34,10 +34,10 @@ async fn test_network_task() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let builder = TestDescription::default_multiple_rounds(); + let builder: TestDescription = TestDescription::default_multiple_rounds(); let node_id = 1; - let launcher = builder.gen_launcher::(node_id); + let launcher = builder.gen_launcher(node_id); let network = (launcher.resource_generator.channel_generator)(node_id).await; @@ -98,10 +98,10 @@ async fn test_network_storage_fail() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let builder = TestDescription::default_multiple_rounds(); + let builder: TestDescription = TestDescription::default_multiple_rounds(); let node_id = 1; - let launcher = builder.gen_launcher::(node_id); + let launcher = builder.gen_launcher(node_id); let network = (launcher.resource_generator.channel_generator)(node_id).await; diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 07a8c7d3b6..fb2ef50d5b 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -10,6 +10,9 @@ use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, test_builder::TestDescription, }; +#[cfg(async_executor_impl = "async-std")] +use {hotshot::tasks::DoubleProposeVote, hotshot_testing::test_builder::Behaviour, std::rc::Rc}; + cross_tests!( TestName: test_success, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -27,3 +30,28 @@ cross_tests!( } }, ); + +#[cfg(async_executor_impl = "async-std")] +cross_tests!( + TestName: twins_test_success, + Impls: [MemoryImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let behaviour = Rc::new(|node_id| { match node_id { + 1 => Behaviour::Byzantine(Box::new(DoubleProposeVote)), + _ => Behaviour::Standard, + } }); + + TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + } + }, +); diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 1e78574d58..69a0774dd5 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -19,7 +19,7 @@ async fn test_catchup() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestDescription::default(); + let mut metadata: TestDescription = TestDescription::default(); let catchup_node = vec![ChangeNode { idx: 19, updown: UpDown::Up, @@ -51,7 +51,7 @@ async fn test_catchup() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -78,7 +78,7 @@ async fn test_catchup_cdn() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestDescription::default(); + let mut metadata: TestDescription = TestDescription::default(); let catchup_nodes = vec![ChangeNode { idx: 18, updown: UpDown::Up, @@ -104,7 +104,7 @@ async fn test_catchup_cdn() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -131,7 +131,7 @@ async fn test_catchup_one_node() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestDescription::default(); + let mut metadata: TestDescription = TestDescription::default(); let catchup_nodes = vec![ChangeNode { idx: 18, updown: UpDown::Up, @@ -159,7 +159,7 @@ async fn test_catchup_one_node() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -186,7 +186,7 @@ async fn test_catchup_in_view_sync() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestDescription::default(); + let mut metadata: TestDescription = TestDescription::default(); let catchup_nodes = vec![ ChangeNode { idx: 18, @@ -220,7 +220,7 @@ async fn test_catchup_in_view_sync() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -249,7 +249,7 @@ async fn test_catchup_reload() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestDescription::default(); + let mut metadata: TestDescription = TestDescription::default(); let catchup_node = vec![ChangeNode { idx: 19, updown: UpDown::Up, @@ -281,7 +281,7 @@ async fn test_catchup_reload() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; diff --git a/testing/tests/tests_2/push_cdn.rs b/testing/tests/tests_2/push_cdn.rs index cfdf798570..2959cdbdc8 100644 --- a/testing/tests/tests_2/push_cdn.rs +++ b/testing/tests/tests_2/push_cdn.rs @@ -17,7 +17,7 @@ use tracing::instrument; async fn push_cdn_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription { + let metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -38,7 +38,7 @@ async fn push_cdn_network() { ..TestDescription::default() }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index add4cdec8a..fec8124df7 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -21,7 +21,7 @@ async fn test_combined_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -43,7 +43,7 @@ async fn test_combined_network() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -56,7 +56,7 @@ async fn test_combined_network() { async fn test_combined_network_cdn_crash() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -89,7 +89,7 @@ async fn test_combined_network_cdn_crash() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -103,7 +103,7 @@ async fn test_combined_network_cdn_crash() { async fn test_combined_network_reup() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -143,7 +143,7 @@ async fn test_combined_network_reup() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -156,7 +156,7 @@ async fn test_combined_network_reup() { async fn test_combined_network_half_dc() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -191,7 +191,7 @@ async fn test_combined_network_half_dc() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -232,7 +232,7 @@ fn generate_random_node_changes( async fn test_stress_combined_network_fuzzy() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata = TestDescription { + let mut metadata: TestDescription = TestDescription { num_bootstrap_nodes: 10, num_nodes_with_stake: 20, start_nodes: 20, @@ -261,7 +261,7 @@ async fn test_stress_combined_network_fuzzy() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index dcd6737e4c..90b48ed02b 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -21,7 +21,7 @@ async fn test_timeout() { ..Default::default() }; - let mut metadata = TestDescription { + let mut metadata: TestDescription = TestDescription { num_nodes_with_stake: 10, start_nodes: 10, ..Default::default() @@ -53,7 +53,7 @@ async fn test_timeout() { // TODO ED Test with memory network once issue is resolved // https://github.com/EspressoSystems/HotShot/issues/1790 metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -84,7 +84,7 @@ async fn test_timeout_libp2p() { ..Default::default() }; - let mut metadata = TestDescription { + let mut metadata: TestDescription = TestDescription { num_nodes_with_stake: 10, start_nodes: 10, num_bootstrap_nodes: 10, @@ -117,7 +117,7 @@ async fn test_timeout_libp2p() { // TODO ED Test with memory network once issue is resolved // https://github.com/EspressoSystems/HotShot/issues/1790 metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index 68f2d65433..28e29c0fd1 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -18,7 +18,7 @@ use tracing::instrument; async fn libp2p_network_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -36,7 +36,7 @@ async fn libp2p_network_sync() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -56,7 +56,7 @@ async fn test_memory_network_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription { + let metadata: TestDescription = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { @@ -70,7 +70,7 @@ async fn test_memory_network_sync() { ..TestDescription::default() }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -83,7 +83,7 @@ async fn test_memory_network_sync() { async fn libp2p_network_async() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, num_failed_views: 50, @@ -97,7 +97,7 @@ async fn libp2p_network_async() { timing_data: TimingData { timeout_ratio: (1, 1), next_view_timeout: 25000, - ..TestDescription::default_multiple_rounds().timing_data + ..TestDescription::::default_multiple_rounds().timing_data }, unreliable_network: Some(Box::new(AsynchronousNetwork { keep_numerator: 9, @@ -109,7 +109,7 @@ async fn libp2p_network_async() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -130,7 +130,7 @@ async fn test_memory_network_async() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, num_failed_views: 5000, @@ -145,7 +145,7 @@ async fn test_memory_network_async() { timing_data: TimingData { timeout_ratio: (1, 1), next_view_timeout: 1000, - ..TestDescription::default_multiple_rounds().timing_data + ..TestDescription::::default_multiple_rounds().timing_data }, unreliable_network: Some(Box::new(AsynchronousNetwork { keep_numerator: 95, @@ -156,7 +156,7 @@ async fn test_memory_network_async() { ..TestDescription::default() }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -176,7 +176,7 @@ async fn test_memory_network_partially_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 0, ..Default::default() @@ -208,7 +208,7 @@ async fn test_memory_network_partially_sync() { ..TestDescription::default() }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -220,7 +220,7 @@ async fn test_memory_network_partially_sync() { async fn libp2p_network_partially_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 0, ..Default::default() @@ -248,7 +248,7 @@ async fn libp2p_network_partially_sync() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -269,7 +269,7 @@ async fn test_memory_network_chaos() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription { + let metadata: TestDescription = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { @@ -287,7 +287,7 @@ async fn test_memory_network_chaos() { ..TestDescription::default() }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; @@ -300,7 +300,7 @@ async fn test_memory_network_chaos() { async fn libp2p_network_chaos() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -322,7 +322,7 @@ async fn libp2p_network_chaos() { }; metadata - .gen_launcher::(0) + .gen_launcher(0) .launch() .run_test::() .await; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index f99fad4852..9ef89601f6 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -10,7 +10,7 @@ use std::{ use anyhow::{bail, ensure, Result}; use async_lock::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard}; use committable::{Commitment, Committable}; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, instrument, trace}; pub use crate::utils::{View, ViewInner}; use crate::{ @@ -60,31 +60,31 @@ impl OuterConsensus { /// Locks inner consensus for reading and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub async fn read(&self) -> ConsensusReadLockGuard<'_, TYPES> { - debug!("Trying to acquire read lock on consensus"); + trace!("Trying to acquire read lock on consensus"); let ret = self.inner_consensus.read().await; - debug!("Acquired read lock on consensus"); + trace!("Acquired read lock on consensus"); ConsensusReadLockGuard::new(ret) } /// Locks inner consensus for writing and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub async fn write(&self) -> ConsensusWriteLockGuard<'_, TYPES> { - debug!("Trying to acquire write lock on consensus"); + trace!("Trying to acquire write lock on consensus"); let ret = self.inner_consensus.write().await; - debug!("Acquired write lock on consensus"); + trace!("Acquired write lock on consensus"); ConsensusWriteLockGuard::new(ret) } /// Tries to acquire write lock on inner consensus and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub fn try_write(&self) -> Option> { - debug!("Trying to acquire write lock on consensus"); + trace!("Trying to acquire write lock on consensus"); let ret = self.inner_consensus.try_write(); if let Some(guard) = ret { - debug!("Acquired write lock on consensus"); + trace!("Acquired write lock on consensus"); Some(ConsensusWriteLockGuard::new(guard)) } else { - debug!("Failed to acquire write lock"); + trace!("Failed to acquire write lock"); None } } @@ -92,22 +92,22 @@ impl OuterConsensus { /// Acquires upgradable read lock on inner consensus and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub async fn upgradable_read(&self) -> ConsensusUpgradableReadLockGuard<'_, TYPES> { - debug!("Trying to acquire upgradable read lock on consensus"); + trace!("Trying to acquire upgradable read lock on consensus"); let ret = self.inner_consensus.upgradable_read().await; - debug!("Acquired upgradable read lock on consensus"); + trace!("Acquired upgradable read lock on consensus"); ConsensusUpgradableReadLockGuard::new(ret) } /// Tries to acquire read lock on inner consensus and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub fn try_read(&self) -> Option> { - debug!("Trying to acquire read lock on consensus"); + trace!("Trying to acquire read lock on consensus"); let ret = self.inner_consensus.try_read(); if let Some(guard) = ret { - debug!("Acquired read lock on consensus"); + trace!("Acquired read lock on consensus"); Some(ConsensusReadLockGuard::new(guard)) } else { - debug!("Failed to acquire read lock"); + trace!("Failed to acquire read lock"); None } } @@ -137,7 +137,7 @@ impl<'a, TYPES: NodeType> Deref for ConsensusReadLockGuard<'a, TYPES> { impl<'a, TYPES: NodeType> Drop for ConsensusReadLockGuard<'a, TYPES> { #[instrument(skip_all, target = "ConsensusReadLockGuard")] fn drop(&mut self) { - debug!("Read lock on consensus dropped"); + trace!("Read lock on consensus dropped"); } } From 385bb3b5a71e961b7dad0f5a8610e2d56399f64f Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 15 Jul 2024 10:35:04 -0400 Subject: [PATCH 1123/1393] [Libp2p] Patches (#3447) * node lookup queue `try_send` * max lookup patch * `replication_factor` patch * clippy * log levels * lint --- .../src/traits/networking/libp2p_network.rs | 128 ++++++++++-------- .../src/network/behaviours/dht/mod.rs | 44 +++--- libp2p-networking/src/network/node.rs | 2 +- libp2p-networking/src/network/node/config.rs | 7 +- libp2p-networking/src/network/node/handle.rs | 28 ++-- types/src/traits/network.rs | 2 + 6 files changed, 124 insertions(+), 87 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index f5f22c0490..a20ef97901 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -66,7 +66,7 @@ use libp2p_networking::{ }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; -use tracing::{debug, error, info, instrument, trace, warn}; +use tracing::{debug, error, info, instrument, warn}; use crate::BroadcastDelay; @@ -77,6 +77,8 @@ pub struct Libp2pMetricsValue { pub num_connected_peers: Box, /// The number of failed messages pub num_failed_messages: Box, + /// Whether or not the network is considered ready + pub is_ready: Box, } impl Libp2pMetricsValue { @@ -89,6 +91,7 @@ impl Libp2pMetricsValue { Self { num_connected_peers: subgroup.create_gauge("num_connected_peers".into(), None), num_failed_messages: subgroup.create_counter("num_failed_messages".into(), None), + is_ready: subgroup.create_gauge("is_ready".into(), None), } } } @@ -397,10 +400,6 @@ impl Libp2pNetwork { let mut config_builder = NetworkNodeConfigBuilder::default(); config_builder - .replication_factor( - NonZeroUsize::new(config.config.num_nodes_with_stake.get() - 2) - .expect("failed to calculate replication factor"), - ) .server_mode(libp2p_config.server_mode) .identity(keypair) .bound_addr(Some(bind_address.clone())) @@ -452,10 +451,16 @@ impl Libp2pNetwork { .await?) } - /// Returns when network is ready + /// Returns whether or not the network is currently ready. + #[must_use] + pub fn is_ready(&self) -> bool { + self.inner.is_ready.load(Ordering::Relaxed) + } + + /// Returns only when the network is ready. pub async fn wait_for_ready(&self) { loop { - if self.inner.is_ready.load(Ordering::Relaxed) { + if self.is_ready() { break; } async_sleep(Duration::from_secs(1)).await; @@ -551,6 +556,9 @@ impl Libp2pNetwork { }), }; + // Set the network as not ready + result.inner.metrics.is_ready.set(0); + result.handle_event_generator(sender, requests_tx, rx); result.spawn_node_lookup(node_lookup_recv); result.spawn_connect(id); @@ -573,7 +581,7 @@ impl Libp2pNetwork { #[allow(clippy::cast_possible_truncation)] const THRESHOLD: u64 = (LOOK_AHEAD as f64 * 0.8) as u64; - trace!("Performing lookup for peer {:?}", pk); + debug!("Performing lookup for peer {:?}", pk); // only run if we are not too close to the next view number if latest_seen_view.load(Ordering::Relaxed) + THRESHOLD <= *view_number { @@ -586,7 +594,7 @@ impl Libp2pNetwork { }; // look up if let Err(err) = handle.lookup_node(&pk_bytes, dht_timeout).await { - error!("Failed to perform lookup for key {:?}: {}", pk, err); + warn!("Failed to perform lookup for key {:?}: {}", pk, err); }; } } @@ -599,7 +607,7 @@ impl Libp2pNetwork { let bootstrap_ref = Arc::clone(&self.inner.bootstrap_addrs); let handle = Arc::clone(&self.inner.handle); let is_bootstrapped = Arc::clone(&self.inner.is_bootstrapped); - let node_type = self.inner.handle.config().node_type; + let inner = Arc::clone(&self.inner); let is_da = self.inner.is_da; async_spawn({ @@ -607,31 +615,23 @@ impl Libp2pNetwork { async move { let bs_addrs = bootstrap_ref.read().await.clone(); - debug!("Finished adding bootstrap addresses."); + // Add known peers to the network handle.add_known_peers(bs_addrs).await.unwrap(); + // Begin the bootstrap process handle.begin_bootstrap().await?; - while !is_bootstrapped.load(Ordering::Relaxed) { async_sleep(Duration::from_secs(1)).await; handle.begin_bootstrap().await?; } + // Subscribe to the QC topic handle.subscribe(QC_TOPIC.to_string()).await.unwrap(); - // only subscribe to DA events if we are DA + // Only subscribe to DA events if we are DA if is_da { handle.subscribe("DA".to_string()).await.unwrap(); } - // TODO figure out some way of passing in ALL keypairs. That way we can add the - // global topic to the topic map - // NOTE this wont' work without this change - - info!( - "peer {:?} waiting for publishing, type: {:?}", - handle.peer_id(), - node_type - ); // we want our records published before // we begin participating in consensus @@ -648,11 +648,6 @@ impl Libp2pNetwork { { async_sleep(Duration::from_secs(1)).await; } - info!( - "Node {:?} is ready, type: {:?}", - handle.peer_id(), - node_type - ); while handle .put_record( @@ -664,16 +659,20 @@ impl Libp2pNetwork { { async_sleep(Duration::from_secs(1)).await; } - // perform connection - info!("WAITING TO CONNECT ON NODE {:?}", id); + + info!("Finished putting Kademlia records"); // Wait for the network to connect to the required number of peers if let Err(e) = handle.wait_to_connect(4, id).await { error!("Failed to connect to peers: {:?}", e); return Err::<(), NetworkError>(e.into()); } + info!("Connected to required number of peers"); + // Set the network as ready is_ready.store(true, Ordering::Relaxed); + inner.metrics.is_ready.set(1); + Ok::<(), NetworkError>(()) } }); @@ -791,7 +790,11 @@ impl ConnectedNetwork for Libp2pNetwork { request: Vec, recipient: &K, ) -> Result, NetworkError> { - self.wait_for_ready().await; + // If we're not ready, return an error + if !self.is_ready() { + self.inner.metrics.num_failed_messages.add(1); + return Err(NetworkError::NotReady); + }; let pid = match self .inner @@ -806,7 +809,7 @@ impl ConnectedNetwork for Libp2pNetwork { Ok(pid) => pid, Err(err) => { self.inner.metrics.num_failed_messages.add(1); - error!( + debug!( "Failed to message {:?} because could not find recipient peer id for pk {:?}", request, recipient ); @@ -839,7 +842,10 @@ impl ConnectedNetwork for Libp2pNetwork { } }; - Ok(bincode::serialize(&result).map_err(|e| NetworkError::Libp2p { source: e.into() })?) + Ok(bincode::serialize(&result).map_err(|e| { + self.inner.metrics.num_failed_messages.add(1); + NetworkError::Libp2p { source: e.into() } + })?) } async fn spawn_request_receiver_task( @@ -906,30 +912,30 @@ impl ConnectedNetwork for Libp2pNetwork { recipients: BTreeSet, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - self.wait_for_ready().await; - trace!( - "broadcasting msg: {:?} with nodes: {:?} connected", - message, - self.inner.handle.connected_pids().await - ); + // If we're not ready, return an error + if !self.is_ready() { + self.inner.metrics.num_failed_messages.add(1); + return Err(NetworkError::NotReady); + }; let topic_map = self.inner.topic_map.read().await; let topic = topic_map .get_by_left(&recipients) - .ok_or(NetworkError::Libp2p { - source: Box::new(NetworkNodeHandleError::NoSuchTopic), + .ok_or_else(|| { + self.inner.metrics.num_failed_messages.add(1); + NetworkError::Libp2p { + source: Box::new(NetworkNodeHandleError::NoSuchTopic), + } })? .clone(); - trace!("broadcasting to topic: {}", topic); // gossip doesn't broadcast from itself, so special case if recipients.contains(&self.inner.pk) { // send to self - self.inner - .sender - .send(message.clone()) - .await - .map_err(|_| NetworkError::ShutDown)?; + self.inner.sender.send(message.clone()).await.map_err(|_| { + self.inner.metrics.num_failed_messages.add(1); + NetworkError::ShutDown + })?; } // NOTE: metrics is threadsafe, so clone is fine (and lightweight) @@ -973,6 +979,12 @@ impl ConnectedNetwork for Libp2pNetwork { recipients: BTreeSet, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { + // If we're not ready, return an error + if !self.is_ready() { + self.inner.metrics.num_failed_messages.add(1); + return Err(NetworkError::NotReady); + }; + let future_results = recipients .into_iter() .map(|r| self.direct_message(message.clone(), r)); @@ -995,19 +1007,22 @@ impl ConnectedNetwork for Libp2pNetwork { #[instrument(name = "Libp2pNetwork::direct_message", skip_all)] async fn direct_message(&self, message: Vec, recipient: K) -> Result<(), NetworkError> { + // If we're not ready, return an error + if !self.is_ready() { + self.inner.metrics.num_failed_messages.add(1); + return Err(NetworkError::NotReady); + }; + // short circuit if we're dming ourselves if recipient == self.inner.pk { // panic if we already shut down? - self.inner - .sender - .send(message) - .await - .map_err(|_x| NetworkError::ShutDown)?; + self.inner.sender.send(message).await.map_err(|_x| { + self.inner.metrics.num_failed_messages.add(1); + NetworkError::ShutDown + })?; return Ok(()); } - self.wait_for_ready().await; - let pid = match self .inner .handle @@ -1021,7 +1036,7 @@ impl ConnectedNetwork for Libp2pNetwork { Ok(pid) => pid, Err(err) => { self.inner.metrics.num_failed_messages.add(1); - error!( + debug!( "Failed to message {:?} because could not find recipient peer id for pk {:?}", message, recipient ); @@ -1057,7 +1072,10 @@ impl ConnectedNetwork for Libp2pNetwork { match self.inner.handle.direct_request(pid, &message).await { Ok(()) => Ok(()), - Err(e) => Err(e.into()), + Err(e) => { + self.inner.metrics.num_failed_messages.add(1); + Err(e.into()) + } } } diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 9a1ae61303..854ce3d295 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -1,6 +1,10 @@ /// Task for doing bootstraps at a regular interval pub mod bootstrap; -use std::{collections::HashMap, num::NonZeroUsize, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + num::NonZeroUsize, + time::Duration, +}; use async_compatibility_layer::{art, channel::UnboundedSender}; /// a local caching layer for the DHT key value pairs @@ -43,6 +47,8 @@ pub struct DHTBehaviour { pub in_progress_get_closest_peers: HashMap>, /// List of in-progress get requests in_progress_record_queries: HashMap, + /// The lookup keys for all outstanding DHT queries + outstanding_dht_query_keys: HashSet>, /// List of in-progress put requests in_progress_put_record_queries: HashMap, /// State of bootstrapping @@ -103,6 +109,7 @@ impl DHTBehaviour { peer_id: pid, in_progress_record_queries: HashMap::default(), in_progress_put_record_queries: HashMap::default(), + outstanding_dht_query_keys: HashSet::default(), bootstrap_state: Bootstrap { state: State::NotStarted, backoff: ExponentialBackoff::new(2, Duration::from_secs(1)), @@ -161,26 +168,28 @@ impl DHTBehaviour { return; } - // check cache before making the request + // Check the cache before making the (expensive) query if let Some(entry) = kad.store_mut().get(&key.clone().into()) { - // exists in cache + // The key already exists in the cache if chan.send(entry.value.clone()).is_err() { error!("Get DHT: channel closed before get record request result could be sent"); } } else { - tracing::debug!("DHT cache miss, key: {:?}", key); - // doesn't exist in cache, actually propagate request - let qid = kad.get_record(key.clone().into()); - let query = KadGetQuery { - backoff, - progress: DHTProgress::InProgress(qid), - notify: chan, - num_replicas: factor, - key, - retry_count: retry_count - 1, - records: HashMap::default(), - }; - self.in_progress_record_queries.insert(qid, query); + // Check if the key is already being queried + if self.outstanding_dht_query_keys.insert(key.clone()) { + // The key was not already being queried and was not in the cache. Start a new query. + let qid = kad.get_record(key.clone().into()); + let query = KadGetQuery { + backoff, + progress: DHTProgress::InProgress(qid), + notify: chan, + num_replicas: factor, + key, + retry_count: retry_count - 1, + records: HashMap::default(), + }; + self.in_progress_record_queries.insert(qid, query); + } } } @@ -275,6 +284,9 @@ impl DHTBehaviour { records, }) = self.in_progress_record_queries.remove(&id) { + // Remove the key from the outstanding queries so we are in sync + self.outstanding_dht_query_keys.remove(&key); + // if channel has been dropped, cancel request if notify.is_canceled() { return; diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index bd3a061c5e..6ad9149576 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -392,7 +392,7 @@ impl NetworkNode { Ok(msg) => { match msg { ClientRequest::BeginBootstrap => { - debug!("begin bootstrap"); + debug!("Beginning Libp2p bootstrap"); let _ = self.swarm.behaviour_mut().dht.bootstrap(); } ClientRequest::LookupPeer(pid, chan) => { diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 147da7f363..aa831f0ce6 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -5,8 +5,8 @@ use libp2p_identity::PeerId; use crate::network::NetworkNodeType; -/// replication factor for kademlia -pub const DEFAULT_REPLICATION_FACTOR: Option = NonZeroUsize::new(20); +/// The default Kademlia replication factor +pub const DEFAULT_REPLICATION_FACTOR: Option = NonZeroUsize::new(10); /// describe the configuration of the network #[derive(Clone, Default, derive_builder::Builder, custom_debug::Debug)] @@ -21,8 +21,7 @@ pub struct NetworkNodeConfig { /// address to bind to #[builder(default)] pub bound_addr: Option, - /// replication factor for entries in the DHT - /// default is [`libp2p::kad::K_VALUE`] which is 20 + /// Replication factor for entries in the DHT #[builder(setter(into, strip_option), default = "DEFAULT_REPLICATION_FACTOR")] pub replication_factor: Option, diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index aec2dae058..b344697430 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -143,27 +143,33 @@ impl NetworkNodeHandle { self.send_request(req).await?; r.await.map_err(|_| NetworkNodeHandleError::RecvError) } - /// Wait until at least `num_peers` have connected, or until `timeout` time has passed. + /// Wait until at least `num_peers` have connected /// /// # Errors - /// - /// Will return any networking error encountered, or `ConnectTimeout` if the `timeout` has elapsed. + /// If the channel closes before the result can be sent back pub async fn wait_to_connect( &self, - num_peers: usize, + num_required_peers: usize, node_id: usize, ) -> Result<(), NetworkNodeHandleError> { - self.begin_bootstrap().await?; - let mut connected_ok = false; - while !connected_ok { - async_sleep(Duration::from_secs(1)).await; + // Wait for the required number of peers to connect + loop { + // Get the number of currently connected peers let num_connected = self.num_connected().await?; + if num_connected >= num_required_peers { + break; + } + + // Log the number of connected peers info!( - "WAITING TO CONNECT, connected to {} / {} peers ON NODE {}", - num_connected, num_peers, node_id + "Node {} connected to {}/{} peers", + node_id, num_connected, num_required_peers ); - connected_ok = num_connected >= num_peers; + + // Sleep for a second before checking again + async_sleep(Duration::from_secs(1)).await; } + Ok(()) } diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 4e01e8eb07..7fecd87c9c 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -165,6 +165,8 @@ pub enum NetworkError { /// vec of errors errors: Vec>, }, + /// The network is not ready yet + NotReady, } /// common traits we would like our network messages to implement From 8ee71dfc14ceeb47154190657cea9666431010b5 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 15 Jul 2024 18:18:20 -0400 Subject: [PATCH 1124/1393] [Upgradability] - Avoid tracking the version in task states (#3437) * Save changes * Split consensus helpers * Fix build * Remove version * Improve error handling --- hotshot/src/tasks/task_state.rs | 5 ---- orchestrator/src/config.rs | 7 +++-- task-impls/src/consensus/handlers.rs | 18 +++++++----- task-impls/src/consensus/mod.rs | 32 ++++------------------ task-impls/src/consensus2/handlers.rs | 9 ++---- task-impls/src/consensus2/mod.rs | 4 --- task-impls/src/events.rs | 4 --- task-impls/src/quorum_proposal/handlers.rs | 13 +++++---- task-impls/src/quorum_proposal/mod.rs | 5 ---- task-impls/src/quorum_proposal_recv/mod.rs | 3 -- task-impls/src/quorum_vote/mod.rs | 18 ++++++------ types/src/message.rs | 30 +------------------- types/src/simple_certificate.rs | 29 +++++++++++++++++++- 13 files changed, 67 insertions(+), 110 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 1a459771af..f025cd809e 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -229,7 +229,6 @@ impl> CreateTaskState spawned_tasks: BTreeMap::new(), formed_upgrade_certificate: None, proposal_cert: None, - version: Arc::clone(&handle.hotshot.version), output_event_stream: handle.hotshot.external_event_stream.0.clone(), current_proposal: None, id: handle.hotshot.id, @@ -266,7 +265,6 @@ impl> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, storage: Arc::clone(&handle.storage), - version: Arc::clone(&handle.hotshot.version), decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), } } @@ -299,7 +297,6 @@ impl> CreateTaskState timeout_task, round_start_delay: handle.hotshot.config.round_start_delay, id: handle.hotshot.id, - version: Arc::clone(&handle.hotshot.version), formed_upgrade_certificate: None, decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), } @@ -335,7 +332,6 @@ impl> CreateTaskState spawned_tasks: BTreeMap::new(), instance_state: handle.hotshot.instance_state(), id: handle.hotshot.id, - version: Arc::clone(&handle.hotshot.version), decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), } } @@ -369,7 +365,6 @@ impl> CreateTaskState consensus: OuterConsensus::new(consensus), last_decided_view: handle.cur_view().await, id: handle.hotshot.id, - version: Arc::clone(&handle.hotshot.version), decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), } } diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index c29607f2e8..f28c39c823 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -8,11 +8,10 @@ use std::{ vec, }; -use crate::client::OrchestratorClient; use clap::ValueEnum; -use hotshot_types::constants::REQUEST_DATA_DELAY; use hotshot_types::{ - traits::signature_key::SignatureKey, ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, + constants::REQUEST_DATA_DELAY, traits::signature_key::SignatureKey, ExecutionType, + HotShotConfig, PeerConfig, ValidatorConfig, }; use libp2p::{Multiaddr, PeerId}; use serde_inline_default::serde_inline_default; @@ -22,6 +21,8 @@ use toml; use tracing::{error, info}; use vec1::Vec1; +use crate::client::OrchestratorClient; + /// Configuration describing a libp2p node #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index c8ae196f6f..3acaa84cd6 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -17,7 +17,7 @@ use hotshot_types::{ data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, - simple_certificate::UpgradeCertificate, + simple_certificate::{version, UpgradeCertificate}, simple_vote::QuorumData, traits::{ block_contents::BlockHeader, @@ -160,7 +160,6 @@ pub async fn publish_proposal_from_commitment_and_metadata( commitment_and_metadata: Option>, proposal_cert: Option>, instance_state: Arc, - version: Version, id: u64, ) -> Result> { let (parent_leaf, state) = parent_leaf_and_state( @@ -209,6 +208,8 @@ pub async fn publish_proposal_from_commitment_and_metadata( "Cannot propose because our VID payload commitment and metadata is for an older view." ); + let version = version(view, &decided_upgrade_certificate.read().await.clone())?; + let create_and_send_proposal_handle = async_spawn(async move { create_and_send_proposal( public_key, @@ -249,7 +250,6 @@ pub async fn publish_proposal_if_able( commitment_and_metadata: Option>, proposal_cert: Option>, instance_state: Arc, - version: Version, id: u64, ) -> Result> { publish_proposal_from_commitment_and_metadata( @@ -265,7 +265,6 @@ pub async fn publish_proposal_if_able( commitment_and_metadata, proposal_cert, instance_state, - version, id, ) .await @@ -281,7 +280,6 @@ pub(crate) async fn handle_quorum_proposal_recv>>, task_state: &mut ConsensusTaskState, - version: Version, ) -> Result>> { let sender = sender.clone(); debug!( @@ -461,7 +459,6 @@ pub(crate) async fn handle_quorum_proposal_recv, instance_state: Arc, vote_info: VoteInfo, - version: Version, id: u64, ) -> bool { use hotshot_types::simple_vote::QuorumVote; @@ -720,6 +716,14 @@ pub async fn update_state_and_vote_if_able version, + Err(e) => { + error!("Failed to calculate the version: {e:?}"); + return false; + } + }; let Ok((validated_state, state_delta)) = parent_state .validate_and_apply_header( instance_state.as_ref(), diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 5bfc4571b3..190035baa1 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -31,7 +31,6 @@ use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; -use vbs::version::Version; use crate::{ consensus::handlers::{ @@ -113,9 +112,6 @@ pub struct ConsensusTaskState> { /// last View Sync Certificate or Timeout Certificate this node formed. pub proposal_cert: Option>, - /// Globally shared reference to the current network version. - pub version: Arc>, - /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -212,7 +208,6 @@ impl> ConsensusTaskState self.payload_commitment_and_metadata.clone(), self.proposal_cert.clone(), Arc::clone(&self.instance_state), - *self.version.read().await, self.id, ) .await?; @@ -247,7 +242,6 @@ impl> ConsensusTaskState let quorum_mem = Arc::clone(&self.quorum_membership); let da_mem = Arc::clone(&self.da_membership); let instance_state = Arc::clone(&self.instance_state); - let version = *self.version.read().await; let id = self.id; let handle = async_spawn(async move { update_state_and_vote_if_able::( @@ -259,7 +253,6 @@ impl> ConsensusTaskState quorum_mem, instance_state, (priv_key, upgrade, da_mem, event_stream), - version, id, ) .await; @@ -274,18 +267,11 @@ impl> ConsensusTaskState event: Arc>, event_stream: Sender>>, ) { - let version = *self.version.read().await; match event.as_ref() { HotShotEvent::QuorumProposalRecv(proposal, sender) => { debug!("proposal recv view: {:?}", proposal.data.view_number()); - match handle_quorum_proposal_recv( - proposal, - sender, - event_stream.clone(), - self, - version, - ) - .await + match handle_quorum_proposal_recv(proposal, sender, event_stream.clone(), self) + .await { Ok(Some(current_proposal)) => { let view = current_proposal.view_number(); @@ -495,22 +481,14 @@ impl> ConsensusTaskState let old_view_number = self.cur_view; - // If we have a decided upgrade certificate, - // we may need to upgrade the protocol version on a view change. + // If we have a decided upgrade certificate, the protocol version may also have + // been upgraded. if let Some(cert) = self.decided_upgrade_certificate.read().await.clone() { if new_view == cert.data.new_version_first_view { warn!( - "Updating version based on a decided upgrade cert: {:?}", + "Version upgraded based on a decided upgrade cert: {:?}", cert ); - let mut version = self.version.write().await; - *version = cert.data.new_version; - - broadcast_event( - Arc::new(HotShotEvent::VersionUpgrade(cert.data.new_version)), - &event_stream, - ) - .await; } } diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index f82cb78377..8673ede034 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -134,20 +134,15 @@ pub(crate) async fn handle_view_change> { /// The node's id pub id: u64, - /// Globally shared reference to the current network version. - pub version: Arc>, - /// An upgrade certificate that has been decided on, if any. pub decided_upgrade_certificate: Arc>>>, } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 8c46229090..f12fe1bd23 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -22,7 +22,6 @@ use hotshot_types::{ vid::VidCommitment, vote::{HasViewNumber, VoteDependencyData}, }; -use vbs::version::Version; use crate::view_sync::ViewSyncPhase; @@ -173,8 +172,6 @@ pub enum HotShotEvent { UpgradeCertificateFormed(UpgradeCertificate), /// A HotShot upgrade was decided UpgradeDecided(UpgradeCertificate), - /// HotShot was upgraded, with a new network version. - VersionUpgrade(Version), /// Initiate a vote right now for the designated view. VoteNow(TYPES::Time, VoteDependencyData), @@ -407,7 +404,6 @@ impl Display for HotShotEvent { "UpgradeCertificateFormed(view_number={:?})", cert.view_number() ), - HotShotEvent::VersionUpgrade(_) => write!(f, "VersionUpgrade"), HotShotEvent::UpgradeDecided(cert) => { write!(f, "UpgradeDecided(view_number{:?})", cert.view_number()) } diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 01e2636fa0..5d240fcd8f 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -18,13 +18,12 @@ use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, data::{Leaf, QuorumProposal, VidDisperse, ViewChangeEvidence}, message::Proposal, - simple_certificate::UpgradeCertificate, + simple_certificate::{version, UpgradeCertificate}, traits::{ block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, }, }; use tracing::{debug, error, instrument}; -use vbs::version::Version; use crate::{ events::HotShotEvent, @@ -85,9 +84,6 @@ pub struct ProposalDependencyHandle { /// Shared consensus task state pub consensus: OuterConsensus, - /// Globally shared reference to the current network version. - pub version: Arc>, - /// The most recent upgrade certificate this node formed. /// Note: this is ONLY for certificates that have been formed internally, /// so that we can propose with them. @@ -162,6 +158,11 @@ impl ProposalDependencyHandle { "Cannot propose because our VID payload commitment and metadata is for an older view." ); + let version = version( + self.view_number, + &self.decided_upgrade_certificate.read().await.clone(), + )?; + let block_header = TYPES::BlockHeader::new( state.as_ref(), self.instance_state.as_ref(), @@ -171,7 +172,7 @@ impl ProposalDependencyHandle { commitment_and_metadata.metadata, commitment_and_metadata.fee, vid_share.data.common.clone(), - *self.version.read().await, + version, ) .await .context("Failed to construct block header")?; diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index cc4539da3f..ea0059b230 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -29,7 +29,6 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, instrument, warn}; -use vbs::version::Version; use self::handlers::{ProposalDependency, ProposalDependencyHandle}; use crate::{ @@ -86,9 +85,6 @@ pub struct QuorumProposalTaskState /// The node's id pub id: u64, - /// Globally shared reference to the current network version. - pub version: Arc>, - /// The most recent upgrade certificate this node formed. /// Note: this is ONLY for certificates that have been formed internally, /// so that we can propose with them. @@ -320,7 +316,6 @@ impl> QuorumProposalTaskState>, - /// An upgrade certificate that has been decided on, if any. pub decided_upgrade_certificate: Arc>>>, } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 24e2b5f570..6bc60beb08 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -19,7 +19,7 @@ use hotshot_types::{ data::{Leaf, VidDisperseShare, ViewNumber}, event::Event, message::Proposal, - simple_certificate::UpgradeCertificate, + simple_certificate::{version, UpgradeCertificate}, simple_vote::{QuorumData, QuorumVote}, traits::{ block_contents::BlockHeader, @@ -37,7 +37,6 @@ use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, trace, warn}; -use vbs::version::Version; use crate::{ events::HotShotEvent, @@ -82,8 +81,8 @@ struct VoteDependencyHandle> { sender: Sender>>, /// Event receiver. receiver: Receiver>>, - /// Globally shared reference to the current network version. - pub version: Arc>, + /// An upgrade certificate that has been decided on, if any. + pub decided_upgrade_certificate: Arc>>>, /// The node's id id: u64, } @@ -130,13 +129,17 @@ impl + 'static> VoteDependencyHand drop(consensus_reader); + let version = version( + self.view_number, + &self.decided_upgrade_certificate.read().await.clone(), + )?; let (validated_state, state_delta) = parent_state .validate_and_apply_header( &self.instance_state, &parent, &proposed_leaf.block_header().clone(), vid_share.data.common.clone(), - *self.version.read().await, + version, ) .await .context("Block header doesn't extend the proposal!")?; @@ -389,9 +392,6 @@ pub struct QuorumVoteTaskState> { /// Reference to the storage. pub storage: Arc>, - /// Globally shared reference to the current network version. - pub version: Arc>, - /// An upgrade certificate that has been decided on, if any. pub decided_upgrade_certificate: Arc>>>, } @@ -515,7 +515,7 @@ impl> QuorumVoteTaskState( - view: TYPES::Time, - upgrade_certificate: &Option>, -) -> Result { - let version = match upgrade_certificate { - Some(ref cert) => { - if view >= cert.data.new_version_first_view - && cert.data.new_version == TYPES::Upgrade::VERSION - { - TYPES::Upgrade::VERSION - } else if view >= cert.data.new_version_first_view - && cert.data.new_version != TYPES::Upgrade::VERSION - { - bail!("The network has upgraded to a new version that we do not support!"); - } else { - TYPES::Base::VERSION - } - } - None => TYPES::Base::VERSION, - }; - - Ok(version) -} - /// Incoming message #[derive(Serialize, Deserialize, Clone, Derivative, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index bf80724ab9..5184fc79c6 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -7,11 +7,12 @@ use std::{ sync::Arc, }; -use anyhow::{ensure, Result}; +use anyhow::{bail, ensure, Result}; use async_lock::RwLock; use committable::{Commitment, Committable}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; +use vbs::version::{StaticVersionType, Version}; use crate::{ data::serialize_signature2, @@ -240,3 +241,29 @@ pub type ViewSyncFinalizeCertificate2 = /// Type alias for a `UpgradeCertificate`, which is a `SimpleCertificate` of `UpgradeProposalData` pub type UpgradeCertificate = SimpleCertificate, UpgradeThreshold>; + +/// Calculate the version applied in a view, based on the provided upgrade certificate. +/// +/// # Errors +/// Returns an error if we do not support the version required by the upgrade certificate. +pub fn version( + view: TYPES::Time, + upgrade_certificate: &Option>, +) -> Result { + let version = match upgrade_certificate { + Some(ref cert) => { + if view >= cert.data.new_version_first_view { + if cert.data.new_version == TYPES::Upgrade::VERSION { + TYPES::Upgrade::VERSION + } else { + bail!("The network has upgraded to a new version that we do not support!"); + } + } else { + TYPES::Base::VERSION + } + } + None => TYPES::Base::VERSION, + }; + + Ok(version) +} From bc6535779df3290e86fe12110cb65a54fd5869c0 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 15 Jul 2024 18:27:10 -0400 Subject: [PATCH 1125/1393] [Upgradability] - Stop proposing and voting for successfully upgraded versions (#3450) * Save changes * Split consensus helpers * Fix build * Remove version * Improve error handling * Add proposal and vote termination --- hotshot/src/tasks/task_state.rs | 2 ++ task-impls/src/upgrade.rs | 30 ++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index f025cd809e..8d14394455 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -84,6 +84,7 @@ impl> CreateTaskState stop_proposing_time: handle.hotshot.config.stop_proposing_time, start_voting_time: handle.hotshot.config.start_voting_time, stop_voting_time: handle.hotshot.config.stop_voting_time, + decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), }; #[cfg(feature = "example-upgrade")] @@ -104,6 +105,7 @@ impl> CreateTaskState stop_proposing_time: u64::MAX, start_voting_time: 0, stop_voting_time: u64::MAX, + decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), }; } } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 06e7b89214..5fc4b70151 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -86,9 +86,20 @@ pub struct UpgradeTaskState> { /// Unix time in seconds at which we stop voting on an upgrade pub stop_voting_time: u64, + + /// Upgrade certificate that has been decided on, if any + pub decided_upgrade_certificate: Arc>>>, } impl> UpgradeTaskState { + /// Check if the version has been upgraded. + async fn upgraded(&self) -> bool { + if let Some(cert) = self.decided_upgrade_certificate.read().await.clone() { + return cert.data.new_version == TYPES::Upgrade::VERSION; + } + false + } + /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Upgrade Task", level = "error")] pub async fn handle( @@ -101,6 +112,16 @@ impl> UpgradeTaskState { info!("Received upgrade proposal: {:?}", proposal); let view = *proposal.data.view_number(); + + // Skip voting if the version has already been upgraded. + if self.upgraded().await { + info!( + "Already upgraded to {:?}, skip voting.", + TYPES::Upgrade::VERSION + ); + return None; + } + let time = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .ok()? @@ -242,6 +263,15 @@ impl> UpgradeTaskState { self.cur_view = *new_view; + // Skip proposing if the version has already been upgraded. + if self.upgraded().await { + info!( + "Already upgraded to {:?}, skip proposing.", + TYPES::Upgrade::VERSION + ); + return None; + } + let view: u64 = *self.cur_view; let time = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) From 0c087c53169be47944de657e3851fd526b3489d5 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 16 Jul 2024 12:14:24 -0400 Subject: [PATCH 1126/1393] lower libp2p logging levels (#3451) --- .../src/network/behaviours/dht/mod.rs | 51 +++++------- .../src/network/behaviours/direct_message.rs | 22 ++---- libp2p-networking/src/network/node.rs | 79 +++++++++---------- libp2p-networking/src/network/node/handle.rs | 5 +- 4 files changed, 63 insertions(+), 94 deletions(-) diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 854ce3d295..208856d6ab 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -21,7 +21,7 @@ use libp2p::kad::{ store::RecordStore, Behaviour as KademliaBehaviour, BootstrapError, Event as KademliaEvent, }; use libp2p_identity::PeerId; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; /// the number of nodes required to get an answer from /// in order to trust that the answer is correct when retrieving from the DHT @@ -373,7 +373,7 @@ impl DHTBehaviour { } /// Send that the bootsrap suceeded - fn finsish_bootstrap(&mut self) { + fn finish_bootstrap(&mut self) { if let Some(mut tx) = self.bootstrap_tx.clone() { art::async_spawn( async move { tx.send(bootstrap::InputEvent::BootstrapFinished).await }, @@ -401,29 +401,23 @@ impl DHTBehaviour { KademliaEvent::OutboundQueryProgressed { result: QueryResult::GetClosestPeers(r), id: query_id, - stats, + stats: _, step: ProgressStep { last: true, .. }, .. } => match r { - Ok(GetClosestPeersOk { key, peers }) => { + Ok(GetClosestPeersOk { key, peers: _ }) => { if let Some(chan) = self.in_progress_get_closest_peers.remove(&query_id) { if chan.send(()).is_err() { - warn!("DHT: finished query but client no longer interested"); + warn!("DHT: finished query but client was no longer interested"); }; }; - info!( - "peer {:?} successfully completed get closest peers for {:?} with peers {:?}", - self.peer_id, key, peers - ); + debug!("Successfully got closest peers for key {:?}", key); } Err(e) => { if let Some(chan) = self.in_progress_get_closest_peers.remove(&query_id) { let _: Result<_, _> = chan.send(()); }; - warn!( - "peer {:?} failed to get closest peers with {:?} and stats {:?}", - self.peer_id, e, stats - ); + warn!("Failed to get closest peers: {:?}", e); } }, KademliaEvent::OutboundQueryProgressed { @@ -444,13 +438,10 @@ impl DHTBehaviour { .. } => { if num_remaining == 0 { - info!("Finished bootstrap for peer {:?}", self.peer_id); - self.finsish_bootstrap(); + info!("Finished bootstrapping"); + self.finish_bootstrap(); } else { - warn!( - "Bootstrap in progress: num remaining nodes to ping {:?}", - num_remaining - ); + debug!("Bootstrap in progress, {} nodes remaining", num_remaining); } return Some(NetworkEvent::IsBootstrapped); } @@ -460,24 +451,18 @@ impl DHTBehaviour { } => { let BootstrapError::Timeout { num_remaining, .. } = e; if num_remaining.is_none() { - error!( - "Peer {:?} failed bootstrap with error {:?}. This should not happen and means all bootstrap nodes are down or were evicted from our local DHT.", - self.peer_id, e, - ); + error!("Failed to bootstrap: {:?}", e); } - self.finsish_bootstrap(); + self.finish_bootstrap(); } KademliaEvent::RoutablePeer { peer, address: _ } => { - info!("on peer {:?} found routable peer {:?}", self.peer_id, peer); + debug!("Found routable peer {:?}", peer); } KademliaEvent::PendingRoutablePeer { peer, address: _ } => { - info!( - "on peer {:?} have pending routable peer {:?}", - self.peer_id, peer - ); + debug!("Found pending routable peer {:?}", peer); } KademliaEvent::UnroutablePeer { peer } => { - info!("on peer {:?} have unroutable peer {:?}", self.peer_id, peer); + debug!("Found unroutable peer {:?}", peer); } KademliaEvent::InboundRequest { request: _r } => {} KademliaEvent::RoutingUpdated { @@ -487,13 +472,13 @@ impl DHTBehaviour { bucket_range: _, old_peer: _, } => { - info!("Routing table update"); + debug!("Routing table updated"); } e @ KademliaEvent::OutboundQueryProgressed { .. } => { - info!("Not handling dht event {:?}", e); + debug!("Not handling dht event {:?}", e); } e => { - error!("UNHANDLED NEW SWARM VARIANT: {e:?}"); + debug!("New unhandled swarm event: {e:?}"); } } None diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index 0d84faff3c..8c633ff992 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -6,7 +6,7 @@ use async_compatibility_layer::{ }; use libp2p::request_response::{Event, Message, OutboundRequestId, ResponseChannel}; use libp2p_identity::PeerId; -use tracing::{error, info}; +use tracing::{debug, error, warn}; use super::exponential_backoff::ExponentialBackoff; use crate::network::{ClientRequest, NetworkEvent}; @@ -54,10 +54,7 @@ impl DMBehaviour { request_id: _, error, } => { - error!( - "inbound failure to send message to {:?} with error {:?}", - peer, error - ); + error!("Inbound message failure from {:?}: {:?}", peer, error); None } Event::OutboundFailure { @@ -65,10 +62,7 @@ impl DMBehaviour { request_id, error, } => { - error!( - "outbound failure to send message to {:?} with error {:?}", - peer, error - ); + error!("Outbound message failure to {:?}: {:?}", peer, error); if let Some(mut req) = self.in_progress_rr.remove(&request_id) { if req.retry_count == 0 { return None; @@ -95,7 +89,7 @@ impl DMBehaviour { channel, .. } => { - info!("recv-ed DIRECT REQUEST {:?}", msg); + debug!("Received direct request {:?}", msg); // receiver, not initiator. // don't track. If we are disconnected, sender will reinitiate Some(NetworkEvent::DirectRequest(msg, peer, channel)) @@ -106,16 +100,16 @@ impl DMBehaviour { } => { // success, finished. if let Some(req) = self.in_progress_rr.remove(&request_id) { - info!("recv-ed DIRECT RESPONSE {:?}", msg); + debug!("Received direct response {:?}", msg); Some(NetworkEvent::DirectResponse(msg, req.peer_id)) } else { - error!("recv-ed a direct response, but is no longer tracking message!"); + warn!("Received response for unknown request id {:?}", request_id); None } } }, e @ Event::ResponseSent { .. } => { - info!(?e, " sending response"); + debug!("Response sent {:?}", e); None } } @@ -131,7 +125,7 @@ impl DMBehaviour { req.retry_count -= 1; - info!("direct message request with id {:?}", request_id); + debug!("Adding direct request {:?}", req); self.in_progress_rr.insert(request_id, req); } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 6ad9149576..c91589f32a 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -40,7 +40,7 @@ use libp2p::{ use libp2p_identity::PeerId; use rand::{prelude::SliceRandom, thread_rng}; use snafu::ResultExt; -use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; +use tracing::{debug, error, info, info_span, instrument, warn, Instrument}; pub use self::{ config::{ @@ -124,7 +124,7 @@ impl NetworkNode { break address; } }; - info!("peerid {:?} started on addr: {:?}", self.peer_id, addr); + info!("Libp2p listening on {:?}", addr); Ok(addr) } @@ -134,7 +134,7 @@ impl NetworkNode { /// will start connecting to peers #[instrument(skip(self))] pub fn add_known_peers(&mut self, known_peers: &[(PeerId, Multiaddr)]) { - info!("Adding nodes {:?} to {:?}", known_peers, self.peer_id); + debug!("Adding {} known peers", known_peers.len()); let behaviour = self.swarm.behaviour_mut(); let mut bs_nodes = HashMap::>::new(); let mut shuffled = known_peers.iter().collect::>(); @@ -167,7 +167,7 @@ impl NetworkNode { let peer_id = PeerId::from(identity.public()); debug!(?peer_id); let transport: BoxedTransport = gen_transport(identity.clone()).await?; - trace!("Launched network transport"); + debug!("Launched network transport"); // Generate the swarm let mut swarm: Swarm = { // Use the hash of the message's contents as the ID @@ -364,7 +364,7 @@ impl NetworkNode { error!("Error publishing to DHT: {e:?} for peer {:?}", self.peer_id); } Ok(qid) => { - info!("Success publishing {:?} to DHT", qid); + debug!("Published record to DHT with qid {:?}", qid); let query = KadPutQuery { progress: DHTProgress::InProgress(qid), ..query @@ -476,7 +476,7 @@ impl NetworkNode { contents, retry_count, } => { - info!("pid {:?} adding direct request", self.peer_id); + debug!("Sending direct request to {:?}", pid); let id = behaviour.add_direct_request(pid, contents.clone()); let req = DMRequest { peer_id: pid, @@ -503,7 +503,7 @@ impl NetworkNode { .send_response(chan, response) .is_err() { - info!("Data Response dropped because response peer disconnected"); + debug!("Data response dropped because client is no longer connected"); } } ClientRequest::AddKnownPeers(peers) => { @@ -511,10 +511,7 @@ impl NetworkNode { } ClientRequest::Prune(pid) => { if self.swarm.disconnect_peer_id(pid).is_err() { - error!( - "Peer {:?} could not disconnect from pid {:?}", - self.peer_id, pid - ); + warn!("Could not disconnect from {:?}", pid); } } } @@ -535,7 +532,7 @@ impl NetworkNode { send_to_client: &UnboundedSender, ) -> Result<(), NetworkError> { // Make the match cleaner - info!("event observed {:?}", event); + debug!("Swarm event observed {:?}", event); #[allow(deprecated)] match event { @@ -553,7 +550,10 @@ impl NetworkNode { ESTABLISHED_LIMIT, num_established ); } else { - info!("peerid {:?} connection is established to {:?} with endpoint {:?} with concurrent dial errors {:?}. {:?} connections left", self.peer_id, peer_id, endpoint, concurrent_dial_errors, num_established); + debug!( + "Connection established with {:?} at {:?} with {:?} concurrent dial errors", + peer_id, endpoint, concurrent_dial_errors + ); } // Send the number of connected peers to the client @@ -575,7 +575,10 @@ impl NetworkNode { ESTABLISHED_LIMIT, num_established ); } else { - info!("peerid {:?} connection is closed to {:?} with endpoint {:?}. {:?} connections left. Cause: {:?}", self.peer_id, peer_id, endpoint, num_established, cause); + debug!( + "Connection closed with {:?} at {:?} due to {:?}", + peer_id, endpoint, cause + ); } // Send the number of connected peers to the client @@ -588,7 +591,7 @@ impl NetworkNode { peer_id, connection_id: _, } => { - info!("{:?} is dialing {:?}", self.peer_id, peer_id); + debug!("Attempting to dial {:?}", peer_id); } SwarmEvent::ListenerClosed { listener_id: _, @@ -626,19 +629,12 @@ impl NetworkNode { public_key: _, protocol_version: _, agent_version: _, - observed_addr, + observed_addr: _, }, } = *e { let behaviour = self.swarm.behaviour_mut(); - // NOTE in practice, we will want to NOT include this. E.g. only DNS/non localhost IPs - // NOTE I manually checked and peer_id corresponds to listen_addrs. - // NOTE Once we've tested on DNS addresses, this should be swapped out to play nicely - // with autonat - info!( - "local peer {:?} IDENTIFY ADDRS LISTEN: {:?} for peer {:?}, ADDRS OBSERVED: {:?} ", - self.dht_handler.peer_id , peer_id, listen_addrs, observed_addr - ); + // into hashset to delete duplicates (I checked: there are duplicates) for addr in listen_addrs.iter().collect::>() { behaviour.dht.add_address(&peer_id, addr.clone()); @@ -653,15 +649,15 @@ impl NetworkNode { message, } => Some(NetworkEvent::GossipMsg(message.data)), GossipEvent::Subscribed { peer_id, topic } => { - info!("Peer: {:?}, Subscribed to topic: {:?}", peer_id, topic); + debug!("Peer {:?} subscribed to topic {:?}", peer_id, topic); None } GossipEvent::Unsubscribed { peer_id, topic } => { - info!("Peer: {:?}, Unsubscribed from topic: {:?}", peer_id, topic); + debug!("Peer {:?} unsubscribed from topic {:?}", peer_id, topic); None } GossipEvent::GossipsubNotSupported { peer_id } => { - info!("Peer: {:?}, Does not support Gossip", peer_id); + warn!("Peer {:?} does not support gossipsub", peer_id); None } }, @@ -683,13 +679,13 @@ impl NetworkNode { error, } => { warn!( - "Autonat Probe failed to peer {:?}, with error: {:?}", + "AutoNAT Probe failed to peer {:?} with error: {:?}", peer, error ); } }, autonat::Event::StatusChanged { old, new } => { - info!("autonat Status changed. Old: {:?}, New: {:?}", old, new); + debug!("AutoNAT Status changed. Old: {:?}, New: {:?}", old, new); } }; None @@ -706,24 +702,24 @@ impl NetworkNode { } SwarmEvent::OutgoingConnectionError { connection_id: _, - peer_id: _, + peer_id, error, } => { - info!(?error, "OUTGOING CONNECTION ERROR, {:?}", error); + warn!("Outgoing connection error to {:?}: {:?}", peer_id, error); } SwarmEvent::IncomingConnectionError { connection_id: _, - local_addr, - send_back_addr, + local_addr: _, + send_back_addr: _, error, } => { - info!( - "INCOMING CONNECTION ERROR: {:?} {:?} {:?}", - local_addr, send_back_addr, error - ); + warn!("Incoming connection error: {:?}", error); } - SwarmEvent::ListenerError { listener_id, error } => { - info!("LISTENER ERROR {:?} {:?}", listener_id, error); + SwarmEvent::ListenerError { + listener_id: _, + error, + } => { + warn!("Listener error: {:?}", error); } SwarmEvent::ExternalAddrConfirmed { address } => { let my_id = *self.swarm.local_peer_id(); @@ -733,10 +729,7 @@ impl NetworkNode { .add_address(&my_id, address.clone()); } _ => { - error!( - "Unhandled swarm event {:?}. This should not be possible.", - event - ); + debug!("Unhandled swarm event {:?}", event); } } Ok(()) diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index b344697430..981ec56938 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -97,8 +97,6 @@ pub async fn spawn_network_node( recv_kill: None, }; - info!("LISTEN ADDRESS IS {:?}", listen_addr); - let handle = NetworkNodeHandle { network_config: config, send_network: send_chan, @@ -430,7 +428,7 @@ impl NetworkNodeHandle { &self, known_peers: Vec<(PeerId, Multiaddr)>, ) -> Result<(), NetworkNodeHandleError> { - info!("ADDING KNOWN PEERS TO {:?}", self.peer_id); + debug!("Adding {} known peers", known_peers.len()); let req = ClientRequest::AddKnownPeers(known_peers); self.send_request(req).await } @@ -440,7 +438,6 @@ impl NetworkNodeHandle { /// # Errors /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed async fn send_request(&self, req: ClientRequest) -> Result<(), NetworkNodeHandleError> { - debug!("peerid {:?}\t\tsending message {:?}", self.peer_id, req); self.send_network .send(req) .await From c96a729210a7696593fce6df0c691b65f5a6939a Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Tue, 16 Jul 2024 19:18:52 +0200 Subject: [PATCH 1127/1393] Add `broken_3_chain` test (#3448) * node lookup queue `try_send` * max lookup patch * `replication_factor` patch * clippy * Add `broken_3_chain` test (disabled) * Remove unused import --------- Co-authored-by: Rob --- testing/tests/tests_5/broken_3_chain.rs | 68 +++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 testing/tests/tests_5/broken_3_chain.rs diff --git a/testing/tests/tests_5/broken_3_chain.rs b/testing/tests/tests_5/broken_3_chain.rs new file mode 100644 index 0000000000..9d020cb144 --- /dev/null +++ b/testing/tests/tests_5/broken_3_chain.rs @@ -0,0 +1,68 @@ +#![cfg(feature = "broken_3_chain_fixed")] +use std::time::Duration; + +use hotshot_example_types::node_types::{PushCdnImpl, TestTypes}; +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestDescription, TimingData}, +}; +use tracing::instrument; + +/// Broken 3-chain test +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[instrument] +async fn broken_3_chain() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata: TestDescription = TestDescription { + overall_safety_properties: OverallSafetyPropertiesDescription { + check_leaf: true, + ..Default::default() + }, + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(240), + }, + ), + timing_data: TimingData { + next_view_timeout: 4000, + ..Default::default() + }, + ..TestDescription::default_multiple_rounds() + }; + + let dead_nodes = vec![ + ChangeNode { + idx: 3, + updown: UpDown::NetworkDown, + }, + ChangeNode { + idx: 6, + updown: UpDown::NetworkDown, + }, + ChangeNode { + idx: 9, + updown: UpDown::NetworkDown, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(3, dead_nodes)], + }; + metadata.num_nodes_with_stake = 10; + metadata.da_staked_committee_size = 10; + metadata.start_nodes = 10; + metadata.overall_safety_properties.num_failed_views = 100; + // Check whether we see at least 10 decides + metadata.overall_safety_properties.num_successful_views = 10; + + metadata + .gen_launcher(0) + .launch() + .run_test::() + .await; +} From 8527709004ce1aa0732d304cf3958afd330ebde2 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 17 Jul 2024 18:01:18 +0200 Subject: [PATCH 1128/1393] Versioning for builder API, removal of VID enpoint (#3413) * Versioned builder API * Remove VID endpoint in marketplace * Correct usage of upgrade certificate * Named versions * Fee signature differences * Fix dependency tasks --- builder-api/api/{ => v0_1}/builder.toml | 0 builder-api/api/{ => v0_1}/submit.toml | 0 builder-api/api/v0_3/builder.toml | 68 +++ builder-api/api/v0_3/submit.toml | 46 ++ builder-api/src/lib.rs | 10 +- builder-api/src/{ => v0_1}/block_info.rs | 28 +- builder-api/src/{ => v0_1}/builder.rs | 19 +- builder-api/src/{ => v0_1}/data_source.rs | 2 +- builder-api/src/v0_1/mod.rs | 6 + builder-api/src/{ => v0_1}/query_data.rs | 2 +- builder-api/src/v0_3/block_info.rs | 32 ++ builder-api/src/v0_3/builder.rs | 61 +++ builder-api/src/v0_3/data_source.rs | 37 ++ builder-api/src/v0_3/mod.rs | 7 + hotshot/src/tasks/task_state.rs | 3 +- task-impls/src/builder.rs | 156 +++++-- task-impls/src/transactions.rs | 393 +++++++++++------- task-impls/src/vid.rs | 2 +- testing/src/block_builder/mod.rs | 11 +- testing/src/block_builder/random.rs | 2 +- testing/src/block_builder/simple.rs | 7 +- testing/tests/tests_1/block_builder.rs | 13 +- testing/tests/tests_1/consensus_task.rs | 18 +- testing/tests/tests_1/da_task.rs | 42 +- testing/tests/tests_1/quorum_proposal_task.rs | 93 +++-- .../tests_1/upgrade_task_with_consensus.rs | 28 +- .../tests_1/upgrade_task_with_proposal.rs | 60 +-- testing/tests/tests_1/vid_task.rs | 32 +- types/src/constants.rs | 10 + types/src/data.rs | 52 ++- types/src/traits/signature_key.rs | 30 +- 31 files changed, 901 insertions(+), 369 deletions(-) rename builder-api/api/{ => v0_1}/builder.toml (100%) rename builder-api/api/{ => v0_1}/submit.toml (100%) create mode 100644 builder-api/api/v0_3/builder.toml create mode 100644 builder-api/api/v0_3/submit.toml rename builder-api/src/{ => v0_1}/block_info.rs (64%) rename builder-api/src/{ => v0_1}/builder.rs (94%) rename builder-api/src/{ => v0_1}/data_source.rs (99%) create mode 100644 builder-api/src/v0_1/mod.rs rename builder-api/src/{ => v0_1}/query_data.rs (87%) create mode 100644 builder-api/src/v0_3/block_info.rs create mode 100644 builder-api/src/v0_3/builder.rs create mode 100644 builder-api/src/v0_3/data_source.rs create mode 100644 builder-api/src/v0_3/mod.rs diff --git a/builder-api/api/builder.toml b/builder-api/api/v0_1/builder.toml similarity index 100% rename from builder-api/api/builder.toml rename to builder-api/api/v0_1/builder.toml diff --git a/builder-api/api/submit.toml b/builder-api/api/v0_1/submit.toml similarity index 100% rename from builder-api/api/submit.toml rename to builder-api/api/v0_1/submit.toml diff --git a/builder-api/api/v0_3/builder.toml b/builder-api/api/v0_3/builder.toml new file mode 100644 index 0000000000..76a27deb3d --- /dev/null +++ b/builder-api/api/v0_3/builder.toml @@ -0,0 +1,68 @@ +# Copyright (c) 2024 Espresso Systems (espressosys.com) +# This file is part of the HotShot Builder Protocol. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +[meta] +NAME = "hs-builder-get" +DESCRIPTION = "" +FORMAT_VERSION = "0.1.0" + +[route.available_blocks] +PATH = ["availableblocks/:parent_hash/:view_number/:sender/:signature"] +":parent_hash" = "TaggedBase64" +":view_number" = "Integer" +":sender" = "TaggedBase64" +":signature" = "TaggedBase64" +DOC = """ +Get descriptions for all block candidates based on a specific parent block. + +Returns +``` +[ + "block_metadata": { + "block_hash": TaggedBase64, + "block_size": integer, + "offered_fee": integer, + }, +] +``` +""" + +[route.claim_block] +PATH = ["claimblock/:block_hash/:view_number/:sender/:signature"] +":block_hash" = "TaggedBase64" +":view_number" = "Integer" +":sender" = "TaggedBase64" +":signature" = "TaggedBase64" +DOC = """ +Get the specified block candidate. + +Returns application-specific encoded transactions type +""" + +[route.builder_address] +PATH = ["builderaddress"] +DOC = """ +Get the builder's address. + +Returns the builder's public key +""" diff --git a/builder-api/api/v0_3/submit.toml b/builder-api/api/v0_3/submit.toml new file mode 100644 index 0000000000..929ec45854 --- /dev/null +++ b/builder-api/api/v0_3/submit.toml @@ -0,0 +1,46 @@ +# Copyright (c) 2024 Espresso Systems (espressosys.com) +# This file is part of the HotShot Builder Protocol. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +[meta] +NAME = "hs-builder-submit" +DESCRIPTION = "" +FORMAT_VERSION = "0.1.0" + +[route.submit_txn] +PATH = ["/submit"] +METHOD = "POST" +DOC = """ +Submit a transaction to builder's private mempool." + +Returns transaction hash +""" + +[route.submit_batch] +PATH = ["/batch"] +METHOD = "POST" +DOC = """ +Submit a list of transactions to builder's private mempool." + +Returns the corresponding list of transaction hashes +""" diff --git a/builder-api/src/lib.rs b/builder-api/src/lib.rs index 152dc4a9ec..329a94e2cb 100644 --- a/builder-api/src/lib.rs +++ b/builder-api/src/lib.rs @@ -21,7 +21,9 @@ // SOFTWARE. mod api; -pub mod block_info; -pub mod builder; -pub mod data_source; -pub mod query_data; +pub mod v0_1; +pub mod v0_2 { + pub use super::v0_1::*; + pub type Version = vbs::version::StaticVersion<0, 2>; +} +pub mod v0_3; diff --git a/builder-api/src/block_info.rs b/builder-api/src/v0_1/block_info.rs similarity index 64% rename from builder-api/src/block_info.rs rename to builder-api/src/v0_1/block_info.rs index 51474fcb76..fd14eeb311 100644 --- a/builder-api/src/block_info.rs +++ b/builder-api/src/v0_1/block_info.rs @@ -1,4 +1,4 @@ -use std::{hash::Hash, marker::PhantomData}; +use std::marker::PhantomData; use hotshot_types::{ traits::{node_implementation::NodeType, signature_key::BuilderSignatureKey, BlockPayload}, @@ -29,6 +29,15 @@ pub struct AvailableBlockData { pub sender: ::BuilderSignatureKey, } +impl AvailableBlockData { + pub fn validate_signature(&self) -> bool { + // verify the signature over the message, construct the builder commitment + let builder_commitment = self.block_payload.builder_commitment(&self.metadata); + self.sender + .validate_builder_signature(&self.signature, builder_commitment.as_ref()) + } +} + #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound = "")] pub struct AvailableBlockHeaderInput { @@ -42,3 +51,20 @@ pub struct AvailableBlockHeaderInput { <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, pub sender: ::BuilderSignatureKey, } + +impl AvailableBlockHeaderInput { + pub fn validate_signature( + &self, + offered_fee: u64, + metadata: &>::Metadata, + ) -> bool { + self.sender + .validate_builder_signature(&self.message_signature, self.vid_commitment.as_ref()) + && self.sender.validate_fee_signature( + &self.fee_signature, + offered_fee, + metadata, + &self.vid_commitment, + ) + } +} diff --git a/builder-api/src/builder.rs b/builder-api/src/v0_1/builder.rs similarity index 94% rename from builder-api/src/builder.rs rename to builder-api/src/v0_1/builder.rs index e41caaa018..7af58a4fd5 100644 --- a/builder-api/src/builder.rs +++ b/builder-api/src/v0_1/builder.rs @@ -11,10 +11,11 @@ use tagged_base64::TaggedBase64; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, RequestParams, StatusCode}; use vbs::version::StaticVersionType; -use crate::{ - api::load_api, +use super::{ data_source::{AcceptsTxnSubmits, BuilderDataSource}, + Version, }; +use crate::api::load_api; #[derive(Args, Default)] pub struct Options { @@ -109,7 +110,7 @@ impl tide_disco::error::Error for Error { } } -fn try_extract_param TryFrom<&'a TaggedBase64>>( +pub(crate) fn try_extract_param TryFrom<&'a TaggedBase64>>( params: &RequestParams, param_name: &str, ) -> Result { @@ -123,19 +124,19 @@ fn try_extract_param TryFrom<&'a TaggedBase64>>( }) } -pub fn define_api( +pub fn define_api( options: &Options, -) -> Result, ApiError> +) -> Result, ApiError> where State: 'static + Send + Sync + ReadState, ::State: Send + Sync + BuilderDataSource, { - let mut api = load_api::( + let mut api = load_api::( options.api_path.as_ref(), - include_str!("../api/builder.toml"), + include_str!("../../api/v0_1/builder.toml"), options.extensions.clone(), )?; - api.with_version("0.0.1".parse().unwrap()) + api.with_version("0.1.0".parse().unwrap()) .get("available_blocks", |req, state| { async move { let hash = req.blob_param("parent_hash")?; @@ -195,7 +196,7 @@ where { let mut api = load_api::( options.api_path.as_ref(), - include_str!("../api/submit.toml"), + include_str!("../../api/v0_1/submit.toml"), options.extensions.clone(), )?; api.with_version("0.0.1".parse().unwrap()) diff --git a/builder-api/src/data_source.rs b/builder-api/src/v0_1/data_source.rs similarity index 99% rename from builder-api/src/data_source.rs rename to builder-api/src/v0_1/data_source.rs index 49b8948a3f..703e15869a 100644 --- a/builder-api/src/data_source.rs +++ b/builder-api/src/v0_1/data_source.rs @@ -6,7 +6,7 @@ use hotshot_types::{ vid::VidCommitment, }; -use crate::{ +use super::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::BuildError, }; diff --git a/builder-api/src/v0_1/mod.rs b/builder-api/src/v0_1/mod.rs new file mode 100644 index 0000000000..1ec31c884c --- /dev/null +++ b/builder-api/src/v0_1/mod.rs @@ -0,0 +1,6 @@ +pub mod block_info; +pub mod builder; +pub mod data_source; +pub mod query_data; + +pub type Version = vbs::version::StaticVersion<0, 1>; diff --git a/builder-api/src/query_data.rs b/builder-api/src/v0_1/query_data.rs similarity index 87% rename from builder-api/src/query_data.rs rename to builder-api/src/v0_1/query_data.rs index 795eabecf1..c06a0bc1f2 100644 --- a/builder-api/src/query_data.rs +++ b/builder-api/src/v0_1/query_data.rs @@ -1,7 +1,7 @@ use hotshot_types::traits::node_implementation::NodeType; use serde::{Deserialize, Serialize}; -use crate::block_info::AvailableBlockInfo; +use super::block_info::AvailableBlockInfo; #[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq, Hash)] #[serde(bound = "")] diff --git a/builder-api/src/v0_3/block_info.rs b/builder-api/src/v0_3/block_info.rs new file mode 100644 index 0000000000..56df60864b --- /dev/null +++ b/builder-api/src/v0_3/block_info.rs @@ -0,0 +1,32 @@ +use hotshot_types::traits::{ + node_implementation::NodeType, signature_key::BuilderSignatureKey, BlockPayload, +}; +use serde::{Deserialize, Serialize}; + +/// No changes to these types +pub use crate::v0_1::block_info::AvailableBlockInfo; + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] +#[serde(bound = "")] +pub struct AvailableBlockData { + pub block_payload: TYPES::BlockPayload, + pub metadata: >::Metadata, + pub fee: u64, + pub signature: + <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, + pub fee_signature: + <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, + pub sender: ::BuilderSignatureKey, +} + +impl AvailableBlockData { + pub fn validate_signature(&self) -> bool { + // verify the signature over the message, construct the builder commitment + let builder_commitment = self.block_payload.builder_commitment(&self.metadata); + self.sender + .validate_builder_signature(&self.signature, builder_commitment.as_ref()) + && self + .sender + .validate_sequencing_fee_signature_marketplace(&self.fee_signature, self.fee) + } +} diff --git a/builder-api/src/v0_3/builder.rs b/builder-api/src/v0_3/builder.rs new file mode 100644 index 0000000000..d5ebad46fa --- /dev/null +++ b/builder-api/src/v0_3/builder.rs @@ -0,0 +1,61 @@ +use futures::FutureExt; +use hotshot_types::{traits::node_implementation::NodeType, utils::BuilderCommitment}; +use snafu::ResultExt; +use tide_disco::{api::ApiError, method::ReadState, Api}; + +use super::{data_source::BuilderDataSource, Version}; +/// No changes to these types +pub use crate::v0_1::builder::{ + submit_api, BlockAvailableSnafu, BlockClaimSnafu, BuildError, BuilderAddressSnafu, Error, + Options, +}; +use crate::{api::load_api, v0_1::builder::try_extract_param}; + +pub fn define_api( + options: &Options, +) -> Result, ApiError> +where + State: 'static + Send + Sync + ReadState, + ::State: Send + Sync + BuilderDataSource, +{ + let mut api = load_api::( + options.api_path.as_ref(), + include_str!("../../api/v0_3/builder.toml"), + options.extensions.clone(), + )?; + api.with_version("0.0.3".parse().unwrap()) + .get("available_blocks", |req, state| { + async move { + let hash = req.blob_param("parent_hash")?; + let view_number = req.integer_param("view_number")?; + let signature = try_extract_param(&req, "signature")?; + let sender = try_extract_param(&req, "sender")?; + state + .available_blocks(&hash, view_number, sender, &signature) + .await + .context(BlockAvailableSnafu { + resource: hash.to_string(), + }) + } + .boxed() + })? + .get("claim_block", |req, state| { + async move { + let block_hash: BuilderCommitment = req.blob_param("block_hash")?; + let view_number = req.integer_param("view_number")?; + let signature = try_extract_param(&req, "signature")?; + let sender = try_extract_param(&req, "sender")?; + state + .claim_block(&block_hash, view_number, sender, &signature) + .await + .context(BlockClaimSnafu { + resource: block_hash.to_string(), + }) + } + .boxed() + })? + .get("builder_address", |_req, state| { + async move { state.builder_address().await.context(BuilderAddressSnafu) }.boxed() + })?; + Ok(api) +} diff --git a/builder-api/src/v0_3/data_source.rs b/builder-api/src/v0_3/data_source.rs new file mode 100644 index 0000000000..8face03e42 --- /dev/null +++ b/builder-api/src/v0_3/data_source.rs @@ -0,0 +1,37 @@ +use async_trait::async_trait; +use hotshot_types::{ + traits::{node_implementation::NodeType, signature_key::SignatureKey}, + utils::BuilderCommitment, + vid::VidCommitment, +}; + +use super::{ + block_info::{AvailableBlockData, AvailableBlockInfo}, + builder::BuildError, +}; +/// No changes to these types +pub use crate::v0_1::data_source::AcceptsTxnSubmits; + +#[async_trait] +pub trait BuilderDataSource { + /// To get the list of available blocks + async fn available_blocks( + &self, + for_parent: &VidCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &::PureAssembledSignatureType, + ) -> Result>, BuildError>; + + /// to claim a block from the list of provided available blocks + async fn claim_block( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &::PureAssembledSignatureType, + ) -> Result, BuildError>; + + /// To get the builder's address + async fn builder_address(&self) -> Result; +} diff --git a/builder-api/src/v0_3/mod.rs b/builder-api/src/v0_3/mod.rs new file mode 100644 index 0000000000..21b42d83e0 --- /dev/null +++ b/builder-api/src/v0_3/mod.rs @@ -0,0 +1,7 @@ +pub mod block_info; +pub mod builder; +pub mod data_source; +/// No changes to this module +pub use super::v0_1::query_data; + +pub type Version = vbs::version::StaticVersion<0, 3>; diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 8d14394455..a0f9a9d611 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -203,7 +203,8 @@ impl> CreateTaskState .cloned() .map(BuilderClient::new) .collect(), - decided_upgrade_certificate: None, + builder_clients_marketplace: Vec::new(), + decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), } } } diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index d69748bc85..df7fd5f661 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -1,13 +1,12 @@ use std::time::{Duration, Instant}; use async_compatibility_layer::art::async_sleep; -use hotshot_builder_api::{ - block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, +use hotshot_builder_api::v0_1::{ + block_info::AvailableBlockInfo, builder::{BuildError, Error as BuilderApiError}, }; use hotshot_types::{ traits::{node_implementation::NodeType, signature_key::SignatureKey}, - utils::BuilderCommitment, vid::VidCommitment, }; use serde::{Deserialize, Serialize}; @@ -120,48 +119,119 @@ impl BuilderClient { .await .map_err(Into::into) } +} - /// Claim block - /// - /// # Errors - /// - [`BuilderClientError::NotFound`] if block isn't available - /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly - pub async fn claim_block( - &self, - block_hash: BuilderCommitment, - view_number: u64, - sender: TYPES::SignatureKey, - signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, BuilderClientError> { - let encoded_signature: TaggedBase64 = signature.clone().into(); - self.inner - .get(&format!( - "claimblock/{block_hash}/{view_number}/{sender}/{encoded_signature}" - )) - .send() - .await - .map_err(Into::into) +/// Version 0.1 +pub mod v0_1 { + + use hotshot_builder_api::v0_1::block_info::{AvailableBlockData, AvailableBlockHeaderInput}; + pub use hotshot_builder_api::v0_1::Version; + use hotshot_types::{ + traits::{node_implementation::NodeType, signature_key::SignatureKey}, + utils::BuilderCommitment, + }; + use tagged_base64::TaggedBase64; + + use super::BuilderClientError; + + /// Client for builder API + pub type BuilderClient = super::BuilderClient; + + impl BuilderClient { + /// Claim block header input + /// + /// # Errors + /// - [`BuilderClientError::NotFound`] if block isn't available + /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly + pub async fn claim_block_header_input( + &self, + block_hash: BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuilderClientError> { + let encoded_signature: TaggedBase64 = signature.clone().into(); + self.inner + .get(&format!( + "claimheaderinput/{block_hash}/{view_number}/{sender}/{encoded_signature}" + )) + .send() + .await + .map_err(Into::into) + } + + /// Claim block + /// + /// # Errors + /// - [`BuilderClientError::NotFound`] if block isn't available + /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly + pub async fn claim_block( + &self, + block_hash: BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuilderClientError> { + let encoded_signature: TaggedBase64 = signature.clone().into(); + self.inner + .get(&format!( + "claimblock/{block_hash}/{view_number}/{sender}/{encoded_signature}" + )) + .send() + .await + .map_err(Into::into) + } } +} - /// Claim block header input - /// - /// # Errors - /// - [`BuilderClientError::NotFound`] if block isn't available - /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly - pub async fn claim_block_header_input( - &self, - block_hash: BuilderCommitment, - view_number: u64, - sender: TYPES::SignatureKey, - signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, BuilderClientError> { - let encoded_signature: TaggedBase64 = signature.clone().into(); - self.inner - .get(&format!( - "claimheaderinput/{block_hash}/{view_number}/{sender}/{encoded_signature}" - )) - .send() - .await - .map_err(Into::into) +/// Version 0.2. No changes in API +pub mod v0_2 { + use vbs::version::StaticVersion; + + pub use super::v0_1::*; + + /// Builder API version + pub type Version = StaticVersion<0, 2>; +} + +/// Version 0.3. Removes `claim_block_header_input` endpoint, adds fee information +/// to `claim_block` endpoint. +pub mod v0_3 { + use hotshot_builder_api::v0_3::block_info::AvailableBlockData; + pub use hotshot_builder_api::v0_3::Version; + use hotshot_types::{ + traits::{node_implementation::NodeType, signature_key::SignatureKey}, + utils::BuilderCommitment, + }; + use tagged_base64::TaggedBase64; + use vbs::version::StaticVersion; + + pub use super::BuilderClientError; + + /// Client for builder API + pub type BuilderClient = super::BuilderClient>; + + impl BuilderClient { + /// Claim block + /// + /// # Errors + /// - [`BuilderClientError::NotFound`] if block isn't available + /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly + pub async fn claim_block( + &self, + block_hash: BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuilderClientError> { + let encoded_signature: TaggedBase64 = signature.clone().into(); + self.inner + .get(&format!( + "claimblock/{block_hash}/{view_number}/{sender}/{encoded_signature}" + )) + .send() + .await + .map_err(Into::into) + } } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index e7aedfce77..a21927ff86 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -6,11 +6,10 @@ use std::{ use anyhow::{bail, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_sleep; +use async_lock::RwLock; use async_trait::async_trait; use futures::{stream::FuturesUnordered, StreamExt}; -use hotshot_builder_api::block_info::{ - AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo, -}; +use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, @@ -25,12 +24,15 @@ use hotshot_types::{ BlockPayload, }, utils::ViewInner, - vid::VidCommitment, + vid::{VidCommitment, VidPrecomputeData}, }; use tracing::{debug, error, instrument, warn}; +use vbs::version::{StaticVersionType, Version}; use crate::{ - builder::BuilderClient, + builder::{ + v0_1::BuilderClient as BuilderClientBase, v0_3::BuilderClient as BuilderClientMarketplace, + }, events::{HotShotEvent, HotShotTaskCompleted}, helpers::broadcast_event, }; @@ -49,17 +51,19 @@ const BUILDER_ADDITIONAL_TIME_MULTIPLIER: f32 = 0.2; /// responds extremely fast. const BUILDER_MINIMUM_QUERY_TIME: Duration = Duration::from_millis(300); +/// The version of the builder API used by the marketplace +type MarketplaceVersion = crate::builder::v0_3::Version; + /// Builder Provided Responses -pub struct BuilderResponses { - /// Initial API response - /// It contains information about the available blocks - pub blocks_initial_info: AvailableBlockInfo, - /// Second API response - /// It contains information about the chosen blocks - pub block_data: AvailableBlockData, - /// Third API response - /// It contains the final block information - pub block_header: AvailableBlockHeaderInput, +pub struct BuilderResponse { + /// Fee information + pub fee: BuilderFee, + /// Block payload + pub block_payload: TYPES::BlockPayload, + /// Block metadata + pub metadata: >::Metadata, + /// Optional precomputed commitment + pub precompute_data: Option, } /// The Bundle for a portion of a block, provided by a downstream builder that exists in a bundle @@ -98,8 +102,11 @@ pub struct TransactionTaskState> { /// Membership for the quorum pub membership: Arc, - /// Builder API client - pub builder_clients: Vec>, + /// Builder 0.1 API clients + pub builder_clients: Vec>, + + /// Builder 0.3 API clients + pub builder_clients_marketplace: Vec>, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -110,7 +117,7 @@ pub struct TransactionTaskState> { /// This state's ID pub id: u64, /// Decided upgrade certificate - pub decided_upgrade_certificate: Option>, + pub decided_upgrade_certificate: Arc>>>, } impl> TransactionTaskState { @@ -136,9 +143,6 @@ impl> TransactionTaskState { - self.decided_upgrade_certificate = Some(cert.clone()); - } HotShotEvent::ViewChange(view) => { let view = *view; debug!("view change in transactions to view {:?}", view); @@ -160,44 +164,59 @@ impl> TransactionTaskState v, + Err(err) => { + error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); + return None; + } + }; + // Request a block from the builder unless we are between versions. let block = { if self .decided_upgrade_certificate + .read() + .await .as_ref() .is_some_and(|cert| cert.upgrading_in(block_view)) { None } else { - self.wait_for_block(block_view).await + self.wait_for_block(block_view, version).await } }; - if let Some(BuilderResponses { - block_data, - blocks_initial_info, - block_header, + if let Some(BuilderResponse { + block_payload, + metadata, + fee, + precompute_data, }) = block { - let Some(sequencing_fee) = - null_block::builder_fee(self.membership.total_nodes()) + let Some(bid_fee) = + null_block::builder_fee(self.membership.total_nodes(), version) else { - error!("Failed to get sequencing fee"); + error!("Failed to get bid fee"); return None; }; broadcast_event( Arc::new(HotShotEvent::BlockRecv(PackedBundle::new( - block_data.block_payload.encode(), - block_data.metadata, + block_payload.encode(), + metadata, block_view, - vec1::vec1![BuilderFee { - fee_amount: blocks_initial_info.offered_fee, - fee_account: block_data.sender, - fee_signature: block_header.fee_signature, - },], - vec1::vec1![sequencing_fee], - block_header.vid_precompute_data, + vec1::vec1![bid_fee], + vec1::vec1![fee], + precompute_data, ))), &event_stream, ) @@ -218,7 +237,8 @@ impl> TransactionTaskState> TransactionTaskState> TransactionTaskState Option> { + async fn wait_for_block( + &self, + block_view: TYPES::Time, + version: Version, + ) -> Option> { let task_start_time = Instant::now(); // Find commitment to the block we want to build upon @@ -310,7 +334,7 @@ impl> TransactionTaskState> TransactionTaskState::SignatureKey as SignatureKey>::PureAssembledSignatureType, + version: Version, ) -> Vec<(AvailableBlockInfo, usize)> { - // Create a collection of futures that call available_blocks endpoint for every builder - let tasks = self - .builder_clients - .iter() - .enumerate() - .map(|(builder_idx, client)| async move { - client - .available_blocks( - parent_comm, - view_number.u64(), - self.public_key.clone(), - parent_comm_sig, - ) - .await - .map(move |blocks| { - // Add index into `self.builder_clients` for each block so that we know - // where to claim it from later - blocks - .into_iter() - .map(move |block_info| (block_info, builder_idx)) + /// Implementations between versions are essentially the same except for the builder + /// clients used. The most conscise way to express this is with a macro. + macro_rules! inner_impl { + ($clients:ident) => {{ + // Create a collection of futures that call available_blocks endpoint for every builder + let tasks = self + .$clients + .iter() + .enumerate() + .map(|(builder_idx, client)| async move { + client + .available_blocks( + parent_comm, + view_number.u64(), + self.public_key.clone(), + parent_comm_sig, + ) + .await + .map(move |blocks| { + // Add index into `self.builder_clients` for each block so that we know + // where to claim it from later + blocks + .into_iter() + .map(move |block_info| (block_info, builder_idx)) + }) }) - }) - .collect::>(); - - // A vector of resolved builder responses - let mut results = Vec::with_capacity(self.builder_clients.len()); - - // Instant we start querying builders for available blocks - let query_start = Instant::now(); - - // First we complete the query to the fastest fraction of the builders - let threshold = (self.builder_clients.len() * BUILDER_MAIN_BATCH_THRESHOLD_DIVIDEND) - .div_ceil(BUILDER_MAIN_BATCH_THRESHOLD_DIVISOR); - let mut tasks = tasks.take(threshold); - while let Some(result) = tasks.next().await { - results.push(result); - if query_start.elapsed() > BUILDER_MAIN_BATCH_CUTOFF { - break; - } - } + .collect::>(); + + // A vector of resolved builder responses + let mut results = Vec::with_capacity(self.$clients.len()); + + // Instant we start querying builders for available blocks + let query_start = Instant::now(); + + // First we complete the query to the fastest fraction of the builders + let threshold = (self.$clients.len() * BUILDER_MAIN_BATCH_THRESHOLD_DIVIDEND) + .div_ceil(BUILDER_MAIN_BATCH_THRESHOLD_DIVISOR); + let mut tasks = tasks.take(threshold); + while let Some(result) = tasks.next().await { + results.push(result); + if query_start.elapsed() > BUILDER_MAIN_BATCH_CUTOFF { + break; + } + } - // Then we query the rest, alotting additional `elapsed * BUILDER_ADDITIONAL_TIME_MULTIPLIER` - // for them to respond. There's a fixed floor of `BUILDER_MINIMUM_QUERY_TIME` for both - // phases - let timeout = async_sleep(std::cmp::max( - query_start - .elapsed() - .mul_f32(BUILDER_ADDITIONAL_TIME_MULTIPLIER), - BUILDER_MINIMUM_QUERY_TIME.saturating_sub(query_start.elapsed()), - )); - futures::pin_mut!(timeout); // Stream::next requires Self::Unpin - let mut tasks = tasks.into_inner().take_until(timeout); - while let Some(result) = tasks.next().await { - results.push(result); + // Then we query the rest, alotting additional `elapsed * BUILDER_ADDITIONAL_TIME_MULTIPLIER` + // for them to respond. There's a fixed floor of `BUILDER_MINIMUM_QUERY_TIME` for both + // phases + let timeout = async_sleep(std::cmp::max( + query_start + .elapsed() + .mul_f32(BUILDER_ADDITIONAL_TIME_MULTIPLIER), + BUILDER_MINIMUM_QUERY_TIME.saturating_sub(query_start.elapsed()), + )); + futures::pin_mut!(timeout); // Stream::next requires Self::Unpin + let mut tasks = tasks.into_inner().take_until(timeout); + while let Some(result) = tasks.next().await { + results.push(result); + } + + results + .into_iter() + .filter_map(|result| match result { + Ok(value) => Some(value), + Err(err) => { + tracing::warn!(%err, "Error getting available blocks"); + None + } + }) + .flatten() + .collect::>() + }} } - results - .into_iter() - .filter_map(|result| match result { - Ok(value) => Some(value), - Err(err) => { - tracing::warn!(%err, "Error getting available blocks"); - None - } - }) - .flatten() - .collect::>() + if version >= MarketplaceVersion::version() { + inner_impl!(builder_clients_marketplace) + } else { + inner_impl!(builder_clients) + } } /// Get a block from builder. @@ -429,9 +466,10 @@ impl> TransactionTaskState::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> anyhow::Result> { + version: Version, + ) -> anyhow::Result> { let mut available_blocks = self - .get_available_blocks(parent_comm, view_number, parent_comm_sig) + .get_available_blocks(parent_comm, view_number, parent_comm_sig, version) .await; available_blocks.sort_by(|(l, _), (r, _)| { @@ -451,8 +489,6 @@ impl> TransactionTaskState> TransactionTaskState= MarketplaceVersion::version() { + let client = &self.builder_clients_marketplace[builder_idx]; - let block_data = match block { - Ok(block_data) => block_data, - Err(err) => { - tracing::warn!(%err, "Error claiming block data"); + let block = client + .claim_block( + block_info.block_hash.clone(), + view_number.u64(), + self.public_key.clone(), + &request_signature, + ) + .await; + + let block_data = match block { + Ok(block_data) => block_data, + Err(err) => { + tracing::warn!(%err, "Error claiming block data"); + continue; + } + }; + + // verify the signature over the message, construct the builder commitment + let builder_commitment = block_data + .block_payload + .builder_commitment(&block_data.metadata); + if !block_data + .sender + .validate_builder_signature(&block_data.signature, builder_commitment.as_ref()) + { + tracing::warn!( + "Failed to verify available block data response message signature" + ); continue; } - }; - let header_input = match header_input { - Ok(block_data) => block_data, - Err(err) => { - tracing::warn!(%err, "Error claiming header input"); - continue; + let fee = BuilderFee { + fee_amount: block_info.offered_fee, + fee_account: block_data.sender, + fee_signature: block_data.signature, + }; + + BuilderResponse { + fee, + block_payload: block_data.block_payload, + metadata: block_data.metadata, + precompute_data: None, } - }; + } else { + let client = &self.builder_clients[builder_idx]; - // verify the signature over the message, construct the builder commitment - let builder_commitment = block_data - .block_payload - .builder_commitment(&block_data.metadata); - if !block_data - .sender - .validate_builder_signature(&block_data.signature, builder_commitment.as_ref()) - { - tracing::warn!("Failed to verify available block data response message signature"); - continue; - } + let (block, header_input) = futures::join! { + client.claim_block(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature), + client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) + }; - // first verify the message signature and later verify the fee_signature - if !header_input.sender.validate_builder_signature( - &header_input.message_signature, - header_input.vid_commitment.as_ref(), - ) { - tracing::warn!( + let block_data = match block { + Ok(block_data) => block_data, + Err(err) => { + tracing::warn!(%err, "Error claiming block data"); + continue; + } + }; + + let header_input = match header_input { + Ok(block_data) => block_data, + Err(err) => { + tracing::warn!(%err, "Error claiming header input"); + continue; + } + }; + + // verify the signature over the message + if !block_data.validate_signature() { + tracing::warn!( + "Failed to verify available block data response message signature" + ); + continue; + } + + // verify the message signature and the fee_signature + if !header_input.validate_signature(block_info.offered_fee, &block_data.metadata) { + tracing::warn!( "Failed to verify available block header input data response message signature" ); - continue; - } + continue; + } - // verify the signature over the message - if !header_input.sender.validate_fee_signature( - &header_input.fee_signature, - block_info.offered_fee, - &block_data.metadata, - &header_input.vid_commitment, - ) { - tracing::warn!("Failed to verify fee signature"); - continue; - } + let fee = BuilderFee { + fee_amount: block_info.offered_fee, + fee_account: header_input.sender, + fee_signature: header_input.fee_signature, + }; + + BuilderResponse { + fee, + block_payload: block_data.block_payload, + metadata: block_data.metadata, + precompute_data: Some(header_input.vid_precompute_data), + } + }; - return Ok(BuilderResponses { - blocks_initial_info: block_info, - block_data, - block_header: header_input, - }); + return Ok(response); } bail!("Couldn't claim a block from any of the builders"); diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 3175c137b6..e5a0b925d5 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -67,7 +67,7 @@ impl> VidTaskState { Arc::clone(encoded_transactions), &Arc::clone(&self.membership), *view_number, - Some(vid_precompute.clone()), + vid_precompute.clone(), ) .await; let payload_commitment = vid_disperse.payload_commitment; diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index e9b75f4edc..4d95f435ee 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -5,7 +5,7 @@ use async_compatibility_layer::art::async_spawn; use async_trait::async_trait; use futures::Stream; use hotshot::{traits::BlockPayload, types::Event}; -use hotshot_builder_api::{ +use hotshot_builder_api::v0_1::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::{Error, Options}, data_source::BuilderDataSource, @@ -72,11 +72,10 @@ pub fn run_builder_source( { async_spawn(async move { let start_builder = |url: Url, source: Source| -> _ { - let builder_api = - hotshot_builder_api::builder::define_api::( - &Options::default(), - ) - .expect("Failed to construct the builder API"); + let builder_api = hotshot_builder_api::v0_1::builder::define_api::( + &Options::default(), + ) + .expect("Failed to construct the builder API"); let mut app: App = App::with_state(source); app.register_module("block_info", builder_api) .expect("Failed to register the builder API"); diff --git a/testing/src/block_builder/random.rs b/testing/src/block_builder/random.rs index 64cc27d3ef..0121805e67 100644 --- a/testing/src/block_builder/random.rs +++ b/testing/src/block_builder/random.rs @@ -15,7 +15,7 @@ use async_lock::RwLock; use async_trait::async_trait; use futures::{future::BoxFuture, Stream, StreamExt}; use hotshot::types::{Event, EventType, SignatureKey}; -use hotshot_builder_api::{ +use hotshot_builder_api::v0_1::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::BuildError, data_source::BuilderDataSource, diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index f88f3a9044..592ab5b322 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -18,7 +18,7 @@ use hotshot::{ traits::BlockPayload, types::{Event, EventType, SignatureKey}, }; -use hotshot_builder_api::{ +use hotshot_builder_api::v0_1::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::{BuildError, Error, Options}, data_source::BuilderDataSource, @@ -237,14 +237,13 @@ impl SimpleBuilderSource { where ::InstanceState: Default, { - let builder_api = hotshot_builder_api::builder::define_api::< + let builder_api = hotshot_builder_api::v0_1::builder::define_api::< SimpleBuilderSource, TYPES, - TYPES::Base, >(&Options::default()) .expect("Failed to construct the builder API"); let mut app: App, Error> = App::with_state(self); - app.register_module::("block_info", builder_api) + app.register_module::("block_info", builder_api) .expect("Failed to register the builder API"); async_spawn(app.serve(url, TYPES::Base::instance())); diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index fb60e1a1e2..4eb0dcdd2f 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -4,7 +4,7 @@ use std::{ }; use async_compatibility_layer::art::async_sleep; -use hotshot_builder_api::block_info::AvailableBlockData; +use hotshot_builder_api::v0_1::block_info::AvailableBlockData; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, node_types::TestTypes, @@ -14,11 +14,9 @@ use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; use hotshot_testing::block_builder::{ BuilderTask, RandomBuilderImplementation, TestBuilderImplementation, }; -use hotshot_types::{ - traits::{ - block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, - BlockPayload, - }, +use hotshot_types::traits::{ + block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, + BlockPayload, }; use tide_disco::Url; @@ -46,7 +44,8 @@ async fn test_random_block_builder() { let builder_started = Instant::now(); - let client: BuilderClient::Base> = BuilderClient::new(api_url); + let client: BuilderClient::Base> = + BuilderClient::new(api_url); assert!(client.connect(Duration::from_millis(100)).await); let (pub_key, private_key) = diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 2177ddeb40..5f0b3e9bec 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -1,5 +1,4 @@ #![cfg(not(feature = "dependency-tasks"))] - // TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] @@ -17,12 +16,12 @@ use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*} use hotshot_testing::{ all_predicates, helpers::{ - build_system_handle, key_pair_for_id, permute_input_with_index_order, - vid_scheme_from_view_number, vid_share, build_fake_view_with_leaf + build_fake_view_with_leaf, build_system_handle, key_pair_for_id, + permute_input_with_index_order, vid_scheme_from_view_number, vid_share, }, predicates::event::{ all_predicates, exact, quorum_proposal_send, quorum_proposal_validated, quorum_vote_send, - timeout_vote_send, validated_state_updated + timeout_vote_send, validated_state_updated, }, random, script::{Expectations, InputOrder, TaskScript}, @@ -45,6 +44,9 @@ const TIMEOUT: Duration = Duration::from_millis(35); #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { + use hotshot_types::constants::BaseVersion; + use vbs::version::StaticVersionType; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -95,7 +97,8 @@ async fn test_consensus_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) + .unwrap(), ), ], ]; @@ -184,7 +187,8 @@ async fn test_consensus_vote() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_finalize_propose() { use hotshot_example_types::{block_types::TestMetadata, state_types::TestValidatedState}; - use hotshot_types::data::null_block; + use hotshot_types::{constants::BaseVersion, data::null_block}; + use vbs::version::StaticVersionType; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -281,7 +285,7 @@ async fn test_view_sync_finalize_propose() { builder_commitment, TestMetadata, ViewNumber::new(4), - null_block::builder_fee(4).unwrap(), + null_block::builder_fee(4, BaseVersion::version()).unwrap(), ), ], ]; diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index ddaed595d6..7b79f992d6 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -16,13 +16,15 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{null_block, ViewNumber, PackedBundle}, + constants::BaseVersion, + data::{null_block, PackedBundle, ViewNumber}, simple_vote::DaData, traits::{ block_contents::precompute_vid_commitment, election::Membership, node_implementation::ConsensusTime, }, }; +use vbs::version::StaticVersionType; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -73,16 +75,22 @@ async fn test_da_task() { serial![ ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), - BlockRecv( - PackedBundle::new( + BlockRecv(PackedBundle::new( encoded_transactions, TestMetadata, ViewNumber::new(2), - vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], - vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], - precompute, + vec1::vec1![null_block::builder_fee( + quorum_membership.total_nodes(), + BaseVersion::version() ) - ), + .unwrap()], + vec1::vec1![null_block::builder_fee( + quorum_membership.total_nodes(), + BaseVersion::version() + ) + .unwrap()], + Some(precompute), + )), ], serial![DaProposalRecv(proposals[1].clone(), leaders[1])], ]; @@ -158,16 +166,22 @@ async fn test_da_task_storage_failure() { serial![ ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), - BlockRecv( - PackedBundle::new( + BlockRecv(PackedBundle::new( encoded_transactions, TestMetadata, ViewNumber::new(2), - vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], - vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], - precompute, - ), - ), + vec1::vec1![null_block::builder_fee( + quorum_membership.total_nodes(), + BaseVersion::version() + ) + .unwrap()], + vec1::vec1![null_block::builder_fee( + quorum_membership.total_nodes(), + BaseVersion::version() + ) + .unwrap()], + Some(precompute), + ),) ], serial![DaProposalRecv(proposals[1].clone(), leaders[1])], serial![DaProposalValidated(proposals[1].clone(), leaders[1])], diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 54a2f7767d..b435d98fa3 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,29 +1,20 @@ #![cfg(feature = "dependency-tasks")] use std::time::Duration; + use futures::StreamExt; -use hotshot::tasks::task_state::CreateTaskState; -use hotshot::traits::ValidatedState; -use hotshot_example_types::state_types::TestValidatedState; +use hotshot::{tasks::task_state::CreateTaskState, traits::ValidatedState}; use hotshot_example_types::{ block_types::TestMetadata, node_types::{MemoryImpl, TestTypes}, - + state_types::TestValidatedState, }; use hotshot_macros::{run_test, test_scripts}; -use hotshot_task_impls::{ - events::HotShotEvent::*, - quorum_proposal::QuorumProposalTaskState, -}; +use hotshot_task_impls::{events::HotShotEvent::*, quorum_proposal::QuorumProposalTaskState}; use hotshot_testing::{ all_predicates, - helpers::{ - build_fake_view_with_leaf, build_system_handle, - build_payload_commitment - }, - predicates::{ - event::{all_predicates, exact, quorum_proposal_send}, - }, + helpers::{build_fake_view_with_leaf, build_payload_commitment, build_system_handle}, + predicates::event::{all_predicates, exact, quorum_proposal_send}, random, script::{Expectations, InputOrder, TaskScript}, serial, @@ -32,10 +23,7 @@ use hotshot_testing::{ use hotshot_types::{ data::{null_block, Leaf, ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, ViewSyncFinalizeData}, - traits::{ - election::Membership, - node_implementation::{ConsensusTime}, - }, + traits::{election::Membership, node_implementation::ConsensusTime}, utils::BuilderCommitment, vote::HasViewNumber, }; @@ -48,6 +36,8 @@ const TIMEOUT: Duration = Duration::from_millis(35); #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_quorum_proposal_view_1() { use hotshot_testing::script::{Expectations, TaskScript}; + use hotshot_types::constants::BaseVersion; + use vbs::version::StaticVersionType; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -84,10 +74,15 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { // We must send the genesis cert here to initialize hotshot successfully. let genesis_cert = proposals[0].data.justify_qc.clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); + let builder_fee = + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()).unwrap(); drop(consensus_writer); let inputs = vec![ - serial![VidDisperseSend(vid_dispersals[0].clone(), handle.public_key())], + serial![VidDisperseSend( + vid_dispersals[0].clone(), + handle.public_key() + )], random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( @@ -95,7 +90,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { builder_commitment, TestMetadata, ViewNumber::new(1), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), ValidatedStateUpdated( proposals[0].data.view_number(), @@ -111,7 +106,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { exact(HighQcUpdated(genesis_cert.clone())), quorum_proposal_send(), ]), - ]; let quorum_proposal_task_state = @@ -129,6 +123,9 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { + use hotshot_types::constants::BaseVersion; + use vbs::version::StaticVersionType; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -162,7 +159,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { .update_validated_state_map( view.quorum_proposal.data.view_number(), build_fake_view_with_leaf(view.leaf.clone()), - ).unwrap(); + ) + .unwrap(); } // We need to handle the views where we aren't the leader to ensure that the states are @@ -177,6 +175,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { drop(consensus_writer); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); + let builder_fee = + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()).unwrap(); let inputs = vec![ random![ @@ -186,7 +186,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(1), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), ValidatedStateUpdated( @@ -202,7 +202,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( @@ -218,7 +218,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( @@ -234,7 +234,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), ValidatedStateUpdated( @@ -250,7 +250,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment, TestMetadata, ViewNumber::new(5), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), ValidatedStateUpdated( @@ -300,6 +300,9 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_qc_timeout() { + use hotshot_types::constants::BaseVersion; + use vbs::version::StaticVersionType; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -350,7 +353,8 @@ async fn test_quorum_proposal_task_qc_timeout() { builder_commitment, TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) + .unwrap(), ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( @@ -377,7 +381,8 @@ async fn test_quorum_proposal_task_qc_timeout() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_view_sync() { use hotshot_example_types::block_types::TestMetadata; - use hotshot_types::data::null_block; + use hotshot_types::{constants::BaseVersion, data::null_block}; + use vbs::version::StaticVersionType; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -431,7 +436,8 @@ async fn test_quorum_proposal_task_view_sync() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) + .unwrap(), ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( @@ -457,6 +463,9 @@ async fn test_quorum_proposal_task_view_sync() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_liveness_check() { + use hotshot_types::constants::BaseVersion; + use vbs::version::StaticVersionType; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -489,11 +498,14 @@ async fn test_quorum_proposal_task_liveness_check() { .update_validated_state_map( view.quorum_proposal.data.view_number(), build_fake_view_with_leaf(view.leaf.clone()), - ).unwrap(); + ) + .unwrap(); } drop(consensus_writer); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); + let builder_fee = + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()).unwrap(); // We need to handle the views where we aren't the leader to ensure that the states are // updated properly. @@ -512,7 +524,7 @@ async fn test_quorum_proposal_task_liveness_check() { builder_commitment.clone(), TestMetadata, ViewNumber::new(1), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), ValidatedStateUpdated( @@ -528,7 +540,7 @@ async fn test_quorum_proposal_task_liveness_check() { builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( @@ -544,7 +556,7 @@ async fn test_quorum_proposal_task_liveness_check() { builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( @@ -552,7 +564,6 @@ async fn test_quorum_proposal_task_liveness_check() { build_fake_view_with_leaf(leaves[1].clone()), ), ], - random![ QuorumProposalRecv(proposals[2].clone(), leaders[2]), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), @@ -561,7 +572,7 @@ async fn test_quorum_proposal_task_liveness_check() { builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), ValidatedStateUpdated( @@ -577,7 +588,7 @@ async fn test_quorum_proposal_task_liveness_check() { builder_commitment, TestMetadata, ViewNumber::new(5), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), ValidatedStateUpdated( @@ -647,7 +658,10 @@ async fn test_quorum_proposal_task_with_incomplete_events() { // We run the task here at view 2, but this time we ignore the crucial piece of evidence: the // payload commitment and metadata. Instead we send only one of the three "OR" required fields. // This should result in the proposal failing to be sent. - let inputs = vec![serial![QuorumProposalRecv(proposals[1].clone(), leaders[1])]]; + let inputs = vec![serial![QuorumProposalRecv( + proposals[1].clone(), + leaders[1] + )]]; let expectations = vec![Expectations::from_outputs(vec![])]; @@ -661,4 +675,3 @@ async fn test_quorum_proposal_task_with_incomplete_events() { }; run_test![inputs, script].await; } - diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs index d17e9ae859..a247d837f1 100644 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -1,5 +1,4 @@ #![cfg(not(feature = "dependency-tasks"))] - // TODO: Remove after integration of dependency-tasks #![cfg(not(feature = "dependency-tasks"))] #![allow(unused_imports)] @@ -15,7 +14,7 @@ use hotshot_example_types::{ }; use hotshot_macros::test_scripts; use hotshot_task_impls::{ - consensus::ConsensusTaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState + consensus::ConsensusTaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState, }; use hotshot_testing::{ helpers::{build_fake_view_with_leaf, vid_share}, @@ -24,12 +23,13 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ + constants::BaseVersion, data::{null_block, ViewNumber}, simple_vote::UpgradeProposalData, traits::{election::Membership, node_implementation::ConsensusTime}, vote::HasViewNumber, }; -use vbs::version::Version; +use vbs::version::{StaticVersionType, Version}; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -272,7 +272,8 @@ async fn test_upgrade_task_propose() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) + .unwrap(), ), QcFormed(either::Either::Left(proposals[2].data.justify_qc.clone())), ], @@ -361,6 +362,9 @@ async fn test_upgrade_task_blank_blocks() { let old_version = Version { major: 0, minor: 1 }; let new_version = Version { major: 0, minor: 2 }; + let builder_fee = + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()).unwrap(); + let upgrade_data: UpgradeProposalData = UpgradeProposalData { old_version, new_version, @@ -455,7 +459,7 @@ async fn test_upgrade_task_blank_blocks() { proposals[1].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), ], vec![ @@ -466,7 +470,7 @@ async fn test_upgrade_task_blank_blocks() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), QuorumProposalRecv(proposals[2].clone(), leaders[2]), ], @@ -478,7 +482,7 @@ async fn test_upgrade_task_blank_blocks() { proposals[3].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), QuorumProposalRecv(proposals[3].clone(), leaders[3]), ], @@ -490,7 +494,7 @@ async fn test_upgrade_task_blank_blocks() { proposals[4].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(5), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), QuorumProposalRecv(proposals[4].clone(), leaders[4]), ], @@ -502,7 +506,7 @@ async fn test_upgrade_task_blank_blocks() { proposals[5].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(6), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), QuorumProposalRecv(proposals[5].clone(), leaders[5]), QcFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), @@ -515,7 +519,7 @@ async fn test_upgrade_task_blank_blocks() { proposals[6].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(7), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee, ), QuorumProposalRecv(proposals[6].clone(), leaders[6]), ], @@ -579,9 +583,7 @@ async fn test_upgrade_task_blank_blocks() { exact(ViewChange(ViewNumber::new(6))), validated_state_updated(), quorum_proposal_validated(), - quorum_proposal_send_with_null_block( - quorum_membership.total_nodes(), - ), + quorum_proposal_send_with_null_block(quorum_membership.total_nodes()), leaf_decided(), quorum_vote_send(), ], diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 2a109eb0ed..de852f9824 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -1,5 +1,4 @@ #![cfg(feature = "dependency-tasks")] - // TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] @@ -10,30 +9,32 @@ use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes}, - state_types::TestInstanceState, + state_types::{TestInstanceState, TestValidatedState}, }; -use sha2::Digest; -use hotshot_macros::{test_scripts, run_test}; +use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ - quorum_proposal::QuorumProposalTaskState, - consensus2::Consensus2TaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState + consensus2::Consensus2TaskState, events::HotShotEvent::*, + quorum_proposal::QuorumProposalTaskState, upgrade::UpgradeTaskState, }; use hotshot_testing::{ - helpers::{build_fake_view_with_leaf, vid_share,build_payload_commitment}, + all_predicates, + helpers::{build_fake_view_with_leaf, build_payload_commitment, vid_share}, predicates::{event::*, upgrade_with_proposal::*}, - script::{Expectations, InputOrder,TaskScript}, + random, + script::{Expectations, InputOrder, TaskScript}, + serial, view_generator::TestViewGenerator, - all_predicates, random, serial }; use hotshot_types::{ - data::{null_block,Leaf, ViewNumber}, + constants::BaseVersion, + data::{null_block, Leaf, ViewNumber}, simple_vote::UpgradeProposalData, - traits::{election::Membership,ValidatedState, node_implementation::ConsensusTime}, + traits::{election::Membership, node_implementation::ConsensusTime, ValidatedState}, + utils::BuilderCommitment, vote::HasViewNumber, - utils::BuilderCommitment }; -use hotshot_example_types::state_types::TestValidatedState; -use vbs::version::Version; +use sha2::Digest; +use vbs::version::{StaticVersionType, Version}; const TIMEOUT: Duration = Duration::from_millis(35); @@ -94,7 +95,8 @@ async fn test_upgrade_task_with_proposal() { .update_validated_state_map( view.quorum_proposal.data.view_number(), build_fake_view_with_leaf(view.leaf.clone()), - ).unwrap(); + ) + .unwrap(); } generator.add_upgrade(upgrade_data.clone()); @@ -113,19 +115,25 @@ async fn test_upgrade_task_with_proposal() { .update_validated_state_map( view.quorum_proposal.data.view_number(), build_fake_view_with_leaf(view.leaf.clone()), - ).unwrap(); + ) + .unwrap(); } drop(consensus_writer); - let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); + let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); let genesis_cert = proposals[0].data.justify_qc.clone(); let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); + let builder_fee = + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()).unwrap(); let upgrade_votes = other_handles .iter() .map(|h| views[2].create_upgrade_vote(upgrade_data.clone(), &h.0)); - let proposal_state = QuorumProposalTaskState::::create_from(&handle).await; + let proposal_state = + QuorumProposalTaskState::::create_from(&handle).await; let upgrade_state = UpgradeTaskState::::create_from(&handle).await; let upgrade_vote_recvs: Vec<_> = upgrade_votes.map(UpgradeVoteRecv).collect(); @@ -138,7 +146,7 @@ async fn test_upgrade_task_with_proposal() { builder_commitment.clone(), TestMetadata, ViewNumber::new(1), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), ValidatedStateUpdated( @@ -154,7 +162,7 @@ async fn test_upgrade_task_with_proposal() { builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( @@ -171,7 +179,7 @@ async fn test_upgrade_task_with_proposal() { builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + builder_fee.clone(), ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( @@ -187,17 +195,17 @@ async fn test_upgrade_task_with_proposal() { expectations: vec![ Expectations::from_outputs(all_predicates![ exact(UpdateHighQc(genesis_cert.clone())), - exact(HighQcUpdated(genesis_cert.clone())), + exact(HighQcUpdated(genesis_cert.clone())), ]), Expectations::from_outputs(all_predicates![ exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), ]), Expectations::from_outputs(vec![]), Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), - quorum_proposal_send_with_upgrade_certificate::() + exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), + quorum_proposal_send_with_upgrade_certificate::() ]), ], }; diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index fccb4525b0..c547ea7a5c 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -15,7 +15,8 @@ use hotshot_testing::{ serial, }; use hotshot_types::{ - data::{null_block, DaProposal, VidDisperse, ViewNumber,PackedBundle}, + constants::BaseVersion, + data::{null_block, DaProposal, PackedBundle, VidDisperse, ViewNumber}, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -24,6 +25,7 @@ use hotshot_types::{ }, }; use jf_vid::{precomputable::Precomputable, VidScheme}; +use vbs::version::StaticVersionType; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -85,17 +87,22 @@ async fn test_vid_task() { serial![ViewChange(ViewNumber::new(1))], serial![ ViewChange(ViewNumber::new(2)), - BlockRecv( - PackedBundle::new( - encoded_transactions, - TestMetadata, - ViewNumber::new(2), - vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], - vec1::vec1![null_block::builder_fee(quorum_membership.total_nodes()).unwrap()], - vid_precompute, + BlockRecv(PackedBundle::new( + encoded_transactions, + TestMetadata, + ViewNumber::new(2), + vec1::vec1![null_block::builder_fee( + quorum_membership.total_nodes(), + BaseVersion::version() ) - ), - + .unwrap()], + vec1::vec1![null_block::builder_fee( + quorum_membership.total_nodes(), + BaseVersion::version() + ) + .unwrap()], + Some(vid_precompute), + )), ], ]; @@ -107,7 +114,8 @@ async fn test_vid_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes()).unwrap(), + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) + .unwrap(), )), exact(BlockReady(vid_disperse, ViewNumber::new(2))), exact(VidDisperseSend(vid_proposal.clone(), pub_key)), diff --git a/types/src/constants.rs b/types/src/constants.rs index 295b6011dd..b5aef3b8cf 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -42,6 +42,16 @@ pub type WebServerVersion = StaticVersion; +/// Constant for semver representation of "Base" version +pub const BASE_VERSION: BaseVersion = StaticVersion {}; + +/// Type for semver representation of "Marketplace" version +pub type MarketplaceVersion = StaticVersion<0, 3>; +/// Constant for semver representation of "Marketplace" version +pub const MARKETPLACE_VERSION: MarketplaceVersion = StaticVersion {}; + /// The offset for how far in the future we will send out a `QuorumProposal` with an `UpgradeCertificate` we form. This is also how far in advance of sending a `QuorumProposal` we begin collecting votes on an `UpgradeProposal`. pub const UPGRADE_PROPOSE_OFFSET: u64 = 5; diff --git a/types/src/data.rs b/types/src/data.rs index 7e1847a8e4..d9953744e8 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -792,6 +792,7 @@ pub mod null_block { use jf_vid::VidScheme; use memoize::memoize; + use vbs::version::StaticVersionType; use crate::{ traits::{ @@ -820,7 +821,10 @@ pub mod null_block { /// Builder fee data for a null block payload #[must_use] - pub fn builder_fee(num_storage_nodes: usize) -> Option> { + pub fn builder_fee( + num_storage_nodes: usize, + version: vbs::version::Version, + ) -> Option> { /// Arbitrary fee amount, this block doesn't actually come from a builder const FEE_AMOUNT: u64 = 0; @@ -829,21 +833,33 @@ pub mod null_block { [0_u8; 32], 0, ); - let (_null_block, null_block_metadata) = - >::empty(); - - match TYPES::BuilderSignatureKey::sign_fee( - &priv_key, - FEE_AMOUNT, - &null_block_metadata, - &commitment(num_storage_nodes)?, - ) { - Ok(sig) => Some(BuilderFee { - fee_amount: FEE_AMOUNT, - fee_account: pub_key, - fee_signature: sig, - }), - Err(_) => None, + if version >= crate::constants::MarketplaceVersion::version() { + match TYPES::BuilderSignatureKey::sign_sequencing_fee_marketplace(&priv_key, FEE_AMOUNT) + { + Ok(sig) => Some(BuilderFee { + fee_amount: FEE_AMOUNT, + fee_account: pub_key, + fee_signature: sig, + }), + Err(_) => None, + } + } else { + let (_null_block, null_block_metadata) = + >::empty(); + + match TYPES::BuilderSignatureKey::sign_fee( + &priv_key, + FEE_AMOUNT, + &null_block_metadata, + &commitment(num_storage_nodes)?, + ) { + Ok(sig) => Some(BuilderFee { + fee_amount: FEE_AMOUNT, + fee_account: pub_key, + fee_signature: sig, + }), + Err(_) => None, + } } } } @@ -867,7 +883,7 @@ pub struct PackedBundle { pub sequencing_fees: Vec1>, /// The Vid precompute for the block. - pub vid_precompute: VidPrecomputeData, + pub vid_precompute: Option, } impl PackedBundle { @@ -878,7 +894,7 @@ impl PackedBundle { view_number: TYPES::Time, bid_fees: Vec1>, sequencing_fees: Vec1>, - vid_precompute: VidPrecomputeData, + vid_precompute: Option, ) -> Self { Self { encoded_transactions, diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index fd4ece125a..0146e8af0a 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -1,4 +1,8 @@ //! Minimal compatibility over public key signatures + +// data is serialized as big-endian for signing purposes +#![forbid(clippy::little_endian_bytes)] + use std::{ fmt::{Debug, Display}, hash::Hash, @@ -190,7 +194,8 @@ pub trait BuilderSignatureKey: /// validate the message with the builder's public key fn validate_builder_signature(&self, signature: &Self::BuilderSignature, data: &[u8]) -> bool; - /// validate signature over fee information with the builder's public key + /// validate signature over sequencing fee information + /// with the builder's public key fn validate_fee_signature( &self, signature: &Self::BuilderSignature, @@ -204,6 +209,16 @@ pub trait BuilderSignatureKey: ) } + /// validate signature over sequencing fee information + /// with the builder's public key (marketplace version) + fn validate_sequencing_fee_signature_marketplace( + &self, + signature: &Self::BuilderSignature, + fee_amount: u64, + ) -> bool { + self.validate_builder_signature(signature, &fee_amount.to_be_bytes()) + } + /// validate signature over block information with the builder's public key fn validate_block_info_signature( &self, @@ -226,7 +241,7 @@ pub trait BuilderSignatureKey: data: &[u8], ) -> Result; - /// sign fee offer for proposed payload + /// sign sequencing fee offer for proposed payload /// # Errors /// If unable to sign the data with the key fn sign_fee( @@ -241,6 +256,17 @@ pub trait BuilderSignatureKey: ) } + /// sign fee offer for proposed payload (marketplace version) + /// # Errors + /// If unable to sign the data with the key + // TODO: this should include view number + fn sign_sequencing_fee_marketplace( + private_key: &Self::BuilderPrivateKey, + fee_amount: u64, + ) -> Result { + Self::sign_builder_message(private_key, &fee_amount.to_be_bytes()) + } + /// sign information about offered block /// # Errors /// If unable to sign the data with the key From c31508990f182c79c6109c058f6dfa57e383c11d Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Fri, 19 Jul 2024 08:43:45 -0400 Subject: [PATCH 1129/1393] Minor change to AuctionResults trait to allow multiple URLs (#3460) * Minor change to AuctionResults trait to allow multiple URLs * Update versioning import for transactions * Update TestAuctionResults in fake-solver * Change HashSet to Vec * lints * Update doc references * Fix solver tests --- .../src/auction_results_provider_types.rs | 21 ++++++------- fakeapi/src/fake_solver.rs | 19 +++++------- task-impls/src/transactions.rs | 2 +- testing/tests/tests_5/fake_solver.rs | 30 +++++++++++-------- types/src/traits/auction_results_provider.rs | 19 +++++------- 5 files changed, 43 insertions(+), 48 deletions(-) diff --git a/example-types/src/auction_results_provider_types.rs b/example-types/src/auction_results_provider_types.rs index e7c9046f1d..87dbdb9374 100644 --- a/example-types/src/auction_results_provider_types.rs +++ b/example-types/src/auction_results_provider_types.rs @@ -1,22 +1,22 @@ use anyhow::{bail, Result}; use async_trait::async_trait; use hotshot_types::traits::{ - auction_results_provider::{AuctionResultsProvider, HasUrl}, + auction_results_provider::{AuctionResultsProvider, HasUrls}, node_implementation::NodeType, }; use serde::{Deserialize, Serialize}; use url::Url; /// A mock result for the auction solver. This type is just a pointer to a URL. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub struct TestAuctionResult { /// The URL of the builder to reach out to. - pub url: Url, + pub urls: Vec, } -impl HasUrl for TestAuctionResult { - fn url(&self) -> Url { - self.url.clone() +impl HasUrls for TestAuctionResult { + fn urls(&self) -> Vec { + self.urls.clone() } } @@ -25,7 +25,7 @@ impl HasUrl for TestAuctionResult { pub struct TestAuctionResultsProvider { /// We intentionally allow for the results to be pre-cooked for the unit test to gurantee a /// particular outcome is met. - pub solver_results: Vec, + pub solver_results: TestAuctionResult, /// A canned type to ensure that an error is thrown in absence of a true fault-injectible /// system for logical tests. This will guarantee that `fetch_auction_result` always throws an @@ -44,15 +44,12 @@ impl AuctionResultsProvider for TestAuctionResultsProvid /// Mock fetching the auction results, with optional error injection to simulate failure cases /// in the solver. - async fn fetch_auction_result( - &self, - view_number: TYPES::Time, - ) -> Result> { + async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result { if let Some(url) = &self.broadcast_url { let resp = reqwest::get(url.join(&format!("/v0/api/auction_results/{}", *view_number))?) .await? - .json::>() + .json::() .await?; Ok(resp) diff --git a/fakeapi/src/fake_solver.rs b/fakeapi/src/fake_solver.rs index dfeec834ca..0a09b7648b 100644 --- a/fakeapi/src/fake_solver.rs +++ b/fakeapi/src/fake_solver.rs @@ -83,7 +83,7 @@ impl FakeSolverState { /// /// # Errors /// Returns an error if the `should_fault` method is `Some`. - fn dump_builders(&self) -> Result, ServerError> { + fn dump_builders(&self) -> Result { if let Some(fault) = self.should_fault() { match fault { FakeSolverFaultType::InternalServerFault => { @@ -99,12 +99,9 @@ impl FakeSolverState { } } - // Now just send the builder urls - Ok(self - .available_builders - .iter() - .map(|url| TestAuctionResult { url: url.clone() }) - .collect()) + Ok(TestAuctionResult { + urls: self.available_builders.clone(), + }) } } @@ -116,14 +113,14 @@ pub trait FakeSolverApi { async fn get_auction_results_non_permissioned( &self, _view_number: u64, - ) -> Result, ServerError>; + ) -> Result; /// Get the auction results with a valid signature. async fn get_auction_results_permissioned( &self, _view_number: u64, _signature: &::PureAssembledSignatureType, - ) -> Result, ServerError>; + ) -> Result; } #[async_trait::async_trait] @@ -132,7 +129,7 @@ impl FakeSolverApi for FakeSolverState { async fn get_auction_results_non_permissioned( &self, _view_number: u64, - ) -> Result, ServerError> { + ) -> Result { self.dump_builders() } @@ -141,7 +138,7 @@ impl FakeSolverApi for FakeSolverState { &self, _view_number: u64, _signature: &::PureAssembledSignatureType, - ) -> Result, ServerError> { + ) -> Result { self.dump_builders() } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index a21927ff86..cdf71241db 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -164,7 +164,7 @@ impl> TransactionTaskState>>() + .json::() .await .unwrap(); @@ -54,9 +55,9 @@ async fn test_fake_solver_fetch_non_permissioned_no_error() { #[cfg(async_executor_impl = "tokio")] solver_handle.abort(); - assert_eq!(resp[0]["url"], "http://localhost:1111/"); - assert_eq!(resp[1]["url"], "http://localhost:1112/"); - assert_eq!(resp[2]["url"], "http://localhost:1113/"); + assert_eq!(resp.urls[0], Url::parse("http://localhost:1111/").unwrap()); + assert_eq!(resp.urls[1], Url::parse("http://localhost:1112/").unwrap()); + assert_eq!(resp.urls[2], Url::parse("http://localhost:1113/").unwrap()); } #[cfg(test)] @@ -64,6 +65,7 @@ async fn test_fake_solver_fetch_non_permissioned_no_error() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_fake_solver_fetch_non_permissioned_with_errors() { + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -127,7 +129,7 @@ async fn test_fake_solver_fetch_non_permissioned_with_errors() { return; } - let payload = resp.json::>>().await.unwrap(); + let payload = resp.json::().await.unwrap(); payloads.push(payload); } } @@ -139,7 +141,7 @@ async fn test_fake_solver_fetch_non_permissioned_with_errors() { // Assert over the payloads with a 50% error rate. for payload in payloads { - assert_eq!(payload[0]["url"], "http://localhost:1111/"); + assert_eq!(payload.urls[0], Url::parse("http://localhost:1111/").unwrap()); } } @@ -148,6 +150,7 @@ async fn test_fake_solver_fetch_non_permissioned_with_errors() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_fake_solver_fetch_permissioned_no_error() { + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -191,7 +194,7 @@ async fn test_fake_solver_fetch_permissioned_no_error() { .send() .await .unwrap() - .json::>>() + .json::() .await .unwrap(); @@ -200,9 +203,9 @@ async fn test_fake_solver_fetch_permissioned_no_error() { #[cfg(async_executor_impl = "tokio")] solver_handle.abort(); - assert_eq!(resp[0]["url"], "http://localhost:1111/"); - assert_eq!(resp[1]["url"], "http://localhost:1112/"); - assert_eq!(resp[2]["url"], "http://localhost:1113/"); + assert_eq!(resp.urls[0], Url::parse("http://localhost:1111/").unwrap()); + assert_eq!(resp.urls[1], Url::parse("http://localhost:1112/").unwrap()); + assert_eq!(resp.urls[2], Url::parse("http://localhost:1113/").unwrap()); } #[cfg(test)] @@ -210,6 +213,7 @@ async fn test_fake_solver_fetch_permissioned_no_error() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_fake_solver_fetch_permissioned_with_errors() { + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -280,7 +284,7 @@ async fn test_fake_solver_fetch_permissioned_with_errors() { return; } - let payload = resp.json::>>().await.unwrap(); + let payload = resp.json::().await.unwrap(); payloads.push(payload); } } @@ -292,6 +296,6 @@ async fn test_fake_solver_fetch_permissioned_with_errors() { // Assert over the payloads with a 50% error rate. for payload in payloads { - assert_eq!(payload[0]["url"], "http://localhost:1111/"); + assert_eq!(payload.urls[0], Url::parse("http://localhost:1111/").unwrap()); } } diff --git a/types/src/traits/auction_results_provider.rs b/types/src/traits/auction_results_provider.rs index c6cd4c2f51..09bbcb54e4 100644 --- a/types/src/traits/auction_results_provider.rs +++ b/types/src/traits/auction_results_provider.rs @@ -7,28 +7,25 @@ use url::Url; use super::node_implementation::NodeType; -/// This trait guarantees that a particular type has a url associated with it. This trait -/// essentially ensures that the results returned by the [`AuctionResultsProvider`] trait includes a URL -/// for the builder that HotShot must request from. -pub trait HasUrl { +/// This trait guarantees that a particular type has urls that can be extracted from it. This trait +/// essentially ensures that the results returned by the [`AuctionResultsProvider`] trait includes a +/// list of urls for the builders that HotShot must request from. +pub trait HasUrls { /// Returns the builer url associated with the datatype - fn url(&self) -> Url; + fn urls(&self) -> Vec; } /// The AuctionResultsProvider trait is the sole source of Solver-originated state and interaction, /// and returns the results of the Solver's allocation via the associated type. The associated type, -/// `AuctionResult`, also implements the [`HasUrl`] trait, which requires that the output +/// `AuctionResult`, also implements the [`HasUrls`] trait, which requires that the output /// type has the requisite fields available. #[async_trait] pub trait AuctionResultsProvider: Send + Sync + Clone { /// The AuctionSolverResult is a type that holds the data associated with a particular solver /// run, for a particular view. - type AuctionResult: HasUrl; + type AuctionResult: HasUrls; /// Fetches the auction result for a view. Does not cache the result, /// subsequent calls will invoke additional wasted calls. - async fn fetch_auction_result( - &self, - view_number: TYPES::Time, - ) -> Result>; + async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result; } From ac0713f60a10584110440d69ce900be5f67a1a24 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 19 Jul 2024 09:46:50 -0400 Subject: [PATCH 1130/1393] Update CDN (#3466) * update CDN * lint --- task-impls/src/view_sync.rs | 2 +- types/src/message.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index c319939d57..c7a358b4c9 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -424,7 +424,7 @@ impl> ViewSyncTaskState let leader = self.membership.leader(view_number); error!( %leader, - leader_mnemonic = cdn_proto::mnemonic(&leader), + leader_mnemonic = cdn_proto::util::mnemonic(&leader), view_number = *view_number, num_timeouts_tracked = self.num_timeouts_tracked, "view timed out", diff --git a/types/src/message.rs b/types/src/message.rs index e1e91f8232..ac755f12cb 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -6,7 +6,7 @@ use std::{fmt, fmt::Debug, marker::PhantomData}; use anyhow::{bail, ensure, Context, Result}; -use cdn_proto::mnemonic; +use cdn_proto::util::mnemonic; use committable::Committable; use derivative::Derivative; use serde::{de::DeserializeOwned, Deserialize, Serialize}; From 018f50daa86da4a8e37f6a7cc788854ed498c865 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 19 Jul 2024 19:14:54 +0200 Subject: [PATCH 1131/1393] Don't request block for older views (#3468) --- task-impls/src/transactions.rs | 80 ++++++++++++++++++++++------------ 1 file changed, 51 insertions(+), 29 deletions(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index cdf71241db..fb7de6e25f 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -13,7 +13,7 @@ use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, - data::{null_block, Leaf, PackedBundle}, + data::{null_block, PackedBundle}, event::{Event, EventType}, simple_certificate::UpgradeCertificate, traits::{ @@ -50,6 +50,8 @@ const BUILDER_ADDITIONAL_TIME_MULTIPLIER: f32 = 0.2; /// Minimum amount of time allotted to both batches, cannot be cut shorter if the first batch /// responds extremely fast. const BUILDER_MINIMUM_QUERY_TIME: Duration = Duration::from_millis(300); +/// Delay between re-tries on unsuccessful calls +const RETRY_DELAY: Duration = Duration::from_millis(100); /// The version of the builder API used by the marketplace type MarketplaceVersion = crate::builder::v0_3::Version; @@ -275,38 +277,44 @@ impl> TransactionTaskState (TYPES::Time, VidCommitment) { + ) -> Option<(TYPES::Time, VidCommitment)> { let consensus = self.consensus.read().await; - let mut prev_view = TYPES::Time::new(block_view.saturating_sub(1)); - - // Search through all previous views... - while prev_view != TYPES::Time::genesis() { - if let Some(commitment) = - consensus - .validated_state_map() - .get(&prev_view) - .and_then(|view| match view.view_inner { - // For a view for which we have a Leaf stored - ViewInner::Da { payload_commitment } => Some(payload_commitment), - ViewInner::Leaf { leaf, .. } => consensus - .saved_leaves() - .get(&leaf) - .map(Leaf::payload_commitment), - ViewInner::Failed => None, - }) - { - return (prev_view, commitment); + let mut target_view = TYPES::Time::new(block_view.saturating_sub(1)); + + while target_view != TYPES::Time::genesis() { + let Some(view_data) = consensus.validated_state_map().get(&target_view) else { + tracing::warn!(?target_view, "Missing record for view in validated state"); + return None; + }; + match view_data.view_inner { + ViewInner::Da { payload_commitment } => { + return Some((target_view, payload_commitment)) + } + ViewInner::Leaf { + leaf: leaf_commitment, + .. + } => { + let Some(leaf) = consensus.saved_leaves().get(&leaf_commitment) else { + tracing::warn!(?target_view, %leaf_commitment, "Missing leaf in saved_leaves"); + return None; + }; + return Some((target_view, leaf.payload_commitment())); + } + ViewInner::Failed => { + // For failed views, backtrack + target_view = target_view - 1; + continue; + } } - prev_view = prev_view - 1; } - // If not found, return commitment for last decided block - (prev_view, consensus.decided_leaf().payload_commitment()) + None } #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view), name = "wait_for_block", level = "error")] @@ -318,7 +326,21 @@ impl> TransactionTaskState break (view, comm), + None if task_start_time.elapsed() < self.builder_timeout => { + // We still have time, will re-try in a bit + async_sleep(RETRY_DELAY).await; + continue; + } + _ => { + tracing::warn!("Failed to find commitment in time"); + return None; + } + } + }; + let parent_comm_sig = match <::SignatureKey as SignatureKey>::sign( &self.private_key, parent_comm.as_ref(), @@ -334,7 +356,7 @@ impl> TransactionTaskState> TransactionTaskState { tracing::warn!("Couldn't get a block: {err:#}"); // pause a bit - async_sleep(Duration::from_millis(100)).await; + async_sleep(RETRY_DELAY).await; continue; } From 83900d710fbcbc67d17abab67d4cea2c8ce915a9 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 22 Jul 2024 10:55:16 -0400 Subject: [PATCH 1132/1393] [Frontend] External message passthrough (#3457) * sequencer message passthrough * switch to external event * `sequencer` -> `external` * remove passthrough topic --- hotshot/src/tasks/mod.rs | 3 ++- task-impls/src/network.rs | 23 ++++++++++++++++---- testing/src/test_task.rs | 7 +++++-- testing/tests/tests_1/network_task.rs | 30 ++++++++++++++++++++------- types/src/event.rs | 3 +++ types/src/message.rs | 6 ++++++ 6 files changed, 57 insertions(+), 15 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 6fc25b5b2d..a370034a97 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -98,7 +98,8 @@ pub fn add_network_message_task< channel: &Arc, ) { let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { - event_stream: handle.internal_event_stream.0.clone(), + internal_event_stream: handle.internal_event_stream.0.clone(), + external_event_stream: handle.output_event_stream.0.clone(), }; let decided_upgrade_certificate = Arc::clone(&handle.hotshot.decided_upgrade_certificate); diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 2aca0cb49e..3573424af2 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -8,7 +8,7 @@ use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ data::{VidDisperse, VidDisperseShare}, - event::HotShotAction, + event::{Event, EventType, HotShotAction}, message::{ DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, SequencingMessage, VersionedMessage, @@ -92,7 +92,10 @@ pub fn view_sync_filter(event: &Arc>) -> bo #[derive(Clone)] pub struct NetworkMessageTaskState { /// Sender to send internal events this task generates to other tasks - pub event_stream: Sender>>, + pub internal_event_stream: Sender>>, + + /// Sender to send external events this task generates to the event stream + pub external_event_stream: Sender>, } impl NetworkMessageTaskState { @@ -164,7 +167,7 @@ impl NetworkMessageTaskState { // TODO (Keyao benchmarking) Update these event variants (similar to the // `TransactionsRecv` event) so we can send one event for a vector of messages. // - broadcast_event(Arc::new(event), &self.event_stream).await; + broadcast_event(Arc::new(event), &self.internal_event_stream).await; } MessageKind::Data(message) => match message { DataMessage::SubmitTransaction(transaction, _) => { @@ -174,12 +177,24 @@ impl NetworkMessageTaskState { warn!("Request and Response messages should not be received in the NetworkMessage task"); } }, + + MessageKind::External(data) => { + // Send the external message to the external event stream so it can be processed + broadcast_event( + Event { + view_number: TYPES::Time::new(1), + event: EventType::ExternalMessageReceived(data), + }, + &self.external_event_stream, + ) + .await; + } }; } if !transactions.is_empty() { broadcast_event( Arc::new(HotShotEvent::TransactionsRecv(transactions)), - &self.event_stream, + &self.internal_event_stream, ) .await; } diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index 3b1c7c7248..5c769fe7fc 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -7,6 +7,7 @@ use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_std::task::{spawn, JoinHandle}; use async_trait::async_trait; use futures::future::select_all; +use hotshot::types::Event; use hotshot_task_impls::{events::HotShotEvent, network::NetworkMessageTaskState}; use hotshot_types::{ message::{Messages, VersionedMessage}, @@ -104,12 +105,14 @@ pub async fn add_network_message_test_task< TYPES: NodeType, NET: ConnectedNetwork, >( - event_stream: Sender>>, + internal_event_stream: Sender>>, + external_event_stream: Sender>, channel: Arc, ) -> JoinHandle<()> { let net = Arc::clone(&channel); let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { - event_stream: event_stream.clone(), + internal_event_stream: internal_event_stream.clone(), + external_event_stream: external_event_stream.clone(), }; let network = Arc::clone(&net); diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 2b3bd5f6df..517ca6cf5d 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -34,7 +34,8 @@ async fn test_network_task() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let builder: TestDescription = TestDescription::default_multiple_rounds(); + let builder: TestDescription = + TestDescription::default_multiple_rounds(); let node_id = 1; let launcher = builder.gen_launcher(node_id); @@ -69,8 +70,14 @@ async fn test_network_task() { let mut generator = TestViewGenerator::generate(membership.clone(), membership); let view = generator.next().await.unwrap(); - let (out_tx, mut out_rx) = async_broadcast::broadcast(10); - add_network_message_test_task(out_tx.clone(), network.clone()).await; + let (out_tx_internal, mut out_rx_internal) = async_broadcast::broadcast(10); + let (out_tx_external, _) = async_broadcast::broadcast(10); + add_network_message_test_task( + out_tx_internal.clone(), + out_tx_external.clone(), + network.clone(), + ) + .await; tx.broadcast_direct(Arc::new(HotShotEvent::QuorumProposalSend( view.quorum_proposal, @@ -79,7 +86,7 @@ async fn test_network_task() { .await .unwrap(); let res: Arc> = - async_timeout(Duration::from_millis(100), out_rx.recv_direct()) + async_timeout(Duration::from_millis(100), out_rx_internal.recv_direct()) .await .expect("timed out waiting for response") .expect("channel closed"); @@ -98,7 +105,8 @@ async fn test_network_storage_fail() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let builder: TestDescription = TestDescription::default_multiple_rounds(); + let builder: TestDescription = + TestDescription::default_multiple_rounds(); let node_id = 1; let launcher = builder.gen_launcher(node_id); @@ -134,9 +142,15 @@ async fn test_network_storage_fail() { let mut generator = TestViewGenerator::generate(membership.clone(), membership); let view = generator.next().await.unwrap(); - let (out_tx, mut out_rx): (Sender>>, _) = + let (out_tx_internal, mut out_rx_internal): (Sender>>, _) = async_broadcast::broadcast(10); - add_network_message_test_task(out_tx.clone(), network.clone()).await; + let (out_tx_external, _) = async_broadcast::broadcast(10); + add_network_message_test_task( + out_tx_internal.clone(), + out_tx_external.clone(), + network.clone(), + ) + .await; tx.broadcast_direct(Arc::new(HotShotEvent::QuorumProposalSend( view.quorum_proposal, @@ -144,6 +158,6 @@ async fn test_network_storage_fail() { ))) .await .unwrap(); - let res = async_timeout(Duration::from_millis(100), out_rx.recv_direct()).await; + let res = async_timeout(Duration::from_millis(100), out_rx_internal.recv_direct()).await; assert!(res.is_err()); } diff --git a/types/src/event.rs b/types/src/event.rs index 293839d52d..3c07a15782 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -163,6 +163,9 @@ pub enum EventType { /// Public key of the leader submitting the proposal sender: TYPES::SignatureKey, }, + + /// A message destined for external listeners was received + ExternalMessageReceived(Vec), } #[derive(Debug, Serialize, Deserialize, Clone)] /// A list of actions that we track for nodes diff --git a/types/src/message.rs b/types/src/message.rs index ac755f12cb..03c8e50ab7 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -160,6 +160,8 @@ pub enum MessagePurpose { UpgradeProposal, /// Upgrade vote. UpgradeVote, + /// A message to be passed through to external listeners + External, } // TODO (da) make it more customized to the consensus layer, maybe separating the specific message @@ -172,6 +174,8 @@ pub enum MessageKind { Consensus(SequencingMessage), /// Messages relating to sharing data between nodes Data(DataMessage), + /// A (still serialized) message to be passed through to external listeners + External(Vec), } impl MessageKind { @@ -199,6 +203,7 @@ impl ViewMessage for MessageKind { ResponseMessage::Found(m) => m.view_number(), ResponseMessage::NotFound | ResponseMessage::Denied => TYPES::Time::new(1), }, + MessageKind::External(_) => TYPES::Time::new(1), } } @@ -206,6 +211,7 @@ impl ViewMessage for MessageKind { match &self { MessageKind::Consensus(message) => message.purpose(), MessageKind::Data(_) => MessagePurpose::Data, + MessageKind::External(_) => MessagePurpose::External, } } } From 5c6bcad62fa63cce0ac9c9f38f4bfefa60811650 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:56:36 -0400 Subject: [PATCH 1133/1393] change warn to error (#3473) --- task-impls/src/consensus/mod.rs | 2 +- task-impls/src/consensus2/handlers.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 190035baa1..352363e380 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -485,7 +485,7 @@ impl> ConsensusTaskState // been upgraded. if let Some(cert) = self.decided_upgrade_certificate.read().await.clone() { if new_view == cert.data.new_version_first_view { - warn!( + error!( "Version upgraded based on a decided upgrade cert: {:?}", cert ); diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index 8673ede034..4d09298ffb 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -14,7 +14,7 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -use tracing::{debug, instrument, warn}; +use tracing::{debug, error, instrument}; use super::Consensus2TaskState; use crate::{ @@ -139,7 +139,7 @@ pub(crate) async fn handle_view_change Date: Tue, 23 Jul 2024 10:32:02 -0400 Subject: [PATCH 1134/1393] [WEEKLY RELEASE] Hotshot - rc-0.5.63 (#3465) * bump version * fix build * Use correct sequencing fee (#3469) --------- Co-authored-by: Artemii Gerasimovich --- task-impls/src/vid.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index e5a0b925d5..775acfd768 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -56,7 +56,7 @@ impl> VidTaskState { encoded_transactions, metadata, view_number, - bid_fees, + sequencing_fees, vid_precompute, .. } = packed_bundle; @@ -87,7 +87,7 @@ impl> VidTaskState { builder_commitment, metadata.clone(), *view_number, - bid_fees.first().clone(), + sequencing_fees.first().clone(), )), &event_stream, ) From 5d6f32b9f040db75650d3a6eec4b1bcd135fd1c5 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 23 Jul 2024 10:45:55 -0400 Subject: [PATCH 1135/1393] prune unused `deps/features` (#3478) --- orchestrator/src/lib.rs | 2 +- types/Cargo.toml | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 06dc1abb53..8ed7bb78a6 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -686,7 +686,7 @@ where } else { Err(ServerError { status: tide_disco::StatusCode::BAD_REQUEST, - message: "No reachable adddresses".to_string(), + message: "No reachable addresses".to_string(), }) } } diff --git a/types/Cargo.toml b/types/Cargo.toml index fcd35bd8d4..c7cc923292 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -7,9 +7,7 @@ version = "0.1.11" [dependencies] anyhow = { workspace = true } -ark-bls12-381 = { workspace = true } ark-bn254 = { workspace = true } -ark-ec = { workspace = true } ark-ed-on-bn254 = { workspace = true } ark-ff = { workspace = true } ark-serialize = { workspace = true } From 22952309b28a3d4c9224bb750a177f6397e060cf Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 23 Jul 2024 12:16:03 -0400 Subject: [PATCH 1136/1393] Minor Libp2p changes (#3477) * lower replication factor and quorum required to start * lints --- .../src/traits/networking/libp2p_network.rs | 19 ++++++++++++++++--- libp2p-networking/src/network/mod.rs | 2 +- libp2p-networking/src/network/node.rs | 9 ++++++++- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index a20ef97901..c948202c47 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -4,6 +4,7 @@ #[cfg(feature = "hotshot-testing")] use std::str::FromStr; use std::{ + cmp::min, collections::{BTreeSet, HashSet}, fmt::Debug, net::SocketAddr, @@ -15,7 +16,7 @@ use std::{ time::Duration, }; -use anyhow::anyhow; +use anyhow::{anyhow, Context}; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, channel::{ @@ -60,7 +61,7 @@ use libp2p_networking::{ spawn_network_node, MeshParams, NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, - NetworkNodeReceiver, NetworkNodeType, + NetworkNodeReceiver, NetworkNodeType, DEFAULT_REPLICATION_FACTOR, }, reexport::{Multiaddr, ResponseChannel}, }; @@ -397,11 +398,23 @@ impl Libp2pNetwork { .parse()?; // Build our libp2p configuration from our global, network configuration - let mut config_builder = NetworkNodeConfigBuilder::default(); + let mut config_builder: NetworkNodeConfigBuilder = NetworkNodeConfigBuilder::default(); + + // The replication factor is the minimum of [the default and 2/3 the number of nodes] + let Some(default_replication_factor) = DEFAULT_REPLICATION_FACTOR else { + return Err(anyhow!("Default replication factor not supplied")); + }; + + let replication_factor = NonZeroUsize::new(min( + default_replication_factor.get(), + config.config.num_nodes_with_stake.get() * 2 / 3, + )) + .with_context(|| "Failed to calculate replication factor")?; config_builder .server_mode(libp2p_config.server_mode) .identity(keypair) + .replication_factor(replication_factor) .bound_addr(Some(bind_address.clone())) .mesh_params(Some(MeshParams { mesh_n_high: libp2p_config.mesh_n_high, diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 1fa5391f20..94796117a9 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -39,7 +39,7 @@ pub use self::{ node::{ network_node_handle_error, spawn_network_node, MeshParams, NetworkNode, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, NetworkNodeHandle, - NetworkNodeHandleError, NetworkNodeReceiver, + NetworkNodeHandleError, NetworkNodeReceiver, DEFAULT_REPLICATION_FACTOR, }, }; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index c91589f32a..8a782a0e63 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -45,6 +45,7 @@ use tracing::{debug, error, info, info_span, instrument, warn, Instrument}; pub use self::{ config::{ MeshParams, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, + DEFAULT_REPLICATION_FACTOR, }, handle::{ network_node_handle_error, spawn_network_node, NetworkNodeHandle, NetworkNodeHandleError, @@ -351,11 +352,17 @@ impl NetworkNode { /// Once replicated upon all nodes, the caller is notified over /// `chan`. If there is an error, a [`super::error::DHTError`] is /// sent instead. + /// + /// # Panics + /// If the default replication factor is `None` pub fn put_record(&mut self, mut query: KadPutQuery) { let record = Record::new(query.key.clone(), query.value.clone()); match self.swarm.behaviour_mut().dht.put_record( record, - libp2p::kad::Quorum::N(self.dht_handler.replication_factor()), + libp2p::kad::Quorum::N( + NonZeroUsize::try_from(self.dht_handler.replication_factor().get() / 2) + .expect("replication factor should be bigger than 0"), + ), ) { Err(e) => { // failed try again later From 519dbde7a1567edf58342b22607283ba62fe43a6 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 23 Jul 2024 15:11:15 -0400 Subject: [PATCH 1137/1393] Marketplace header upgrades (#3480) * Marketplace header upgrades * Fix typo --- example-types/src/block_types.rs | 21 ++++++++++++++++++- task-impls/src/consensus/handlers.rs | 4 +++- task-impls/src/quorum_proposal/handlers.rs | 2 +- types/src/traits/block_contents.rs | 24 +++++++++++++++++++--- 4 files changed, 45 insertions(+), 6 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index e02c6582d8..33a7850477 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -9,6 +9,7 @@ use committable::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ data::{BlockError, Leaf}, traits::{ + auction_results_provider::HasUrls, block_contents::{BlockHeader, BuilderFee, EncodeBytes, TestableBlock, Transaction}, node_implementation::NodeType, BlockPayload, ValidatedState, @@ -244,7 +245,7 @@ impl> Block { type Error = std::convert::Infallible; - async fn new( + async fn new_legacy( _parent_state: &TYPES::ValidatedState, _instance_state: &>::Instance, parent_leaf: &Leaf, @@ -271,6 +272,20 @@ impl> Block }) } + async fn new_marketplace( + _parent_state: &TYPES::ValidatedState, + _instance_state: &>::Instance, + _parent_leaf: &Leaf, + _payload_commitment: VidCommitment, + _metadata: >::Metadata, + _builder_fee: Vec>, + _vid_common: VidCommon, + _auction_results: Option, + _version: Version, + ) -> Result { + unimplemented!() + } + fn genesis( _instance_state: &>::Instance, payload_commitment: VidCommitment, @@ -300,6 +315,10 @@ impl> Block fn builder_commitment(&self) -> BuilderCommitment { self.builder_commitment.clone() } + + fn get_auction_results(&self) -> Option { + unimplemented!() + } } impl Committable for TestBlockHeader { diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 3acaa84cd6..c8efa8c84f 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -74,8 +74,10 @@ pub async fn create_and_send_proposal( error!("Cannot propopse without our VID share, view {:?}", view); return; }; + // TODO ED: This will need to be version-gated to use the appropriate `BlockHeader::new` function. + // Pre-marketplace versions will use `new_legacy` and post-marketplace versions will use `new_marketplace` drop(consensus_read); - let block_header = match TYPES::BlockHeader::new( + let block_header = match TYPES::BlockHeader::new_legacy( state.as_ref(), instance_state.as_ref(), &parent_leaf, diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 5d240fcd8f..20f8dbbacd 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -163,7 +163,7 @@ impl ProposalDependencyHandle { &self.decided_upgrade_certificate.read().await.clone(), )?; - let block_header = TYPES::BlockHeader::new( + let block_header = TYPES::BlockHeader::new_legacy( state.as_ref(), self.instance_state.as_ref(), &parent_leaf, diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index a065e334b4..36e5cd3dca 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -17,7 +17,7 @@ use jf_vid::{precomputable::Precomputable, VidScheme}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use vbs::version::Version; -use super::signature_key::BuilderSignatureKey; +use super::{auction_results_provider::HasUrls, signature_key::BuilderSignatureKey}; use crate::{ data::Leaf, traits::{node_implementation::NodeType, states::InstanceState, ValidatedState}, @@ -183,9 +183,9 @@ pub trait BlockHeader: type Error: Error + Debug + Send + Sync; /// Build a header with the parent validate state, instance-level state, parent leaf, payload - /// commitment, and metadata. + /// commitment, and metadata. This is only used in pre-marketplace versions #[allow(clippy::too_many_arguments)] - fn new( + fn new_legacy( parent_state: &TYPES::ValidatedState, instance_state: &>::Instance, parent_leaf: &Leaf, @@ -197,6 +197,21 @@ pub trait BlockHeader: version: Version, ) -> impl Future> + Send; + /// Build a header with the parent validate state, instance-level state, parent leaf, payload + /// commitment, metadata, and auction results. This is only used in post-marketplace versions + #[allow(clippy::too_many_arguments)] + fn new_marketplace( + parent_state: &TYPES::ValidatedState, + instance_state: &>::Instance, + parent_leaf: &Leaf, + payload_commitment: VidCommitment, + metadata: >::Metadata, + builder_fee: Vec>, + vid_common: VidCommon, + auction_results: Option, + version: Version, + ) -> impl Future> + Send; + /// Build the genesis header, payload, and metadata. fn genesis( instance_state: &>::Instance, @@ -216,4 +231,7 @@ pub trait BlockHeader: /// Get the builder commitment fn builder_commitment(&self) -> BuilderCommitment; + + /// Get the results of the auction for this Header. Only used in post-marketplace versions + fn get_auction_results(&self) -> Option; } From 46b2eefaab5330700340b2fde1f7b1668a19070b Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 25 Jul 2024 11:48:41 -0400 Subject: [PATCH 1138/1393] Add `Bundle` struct and request bundles in transaction task (#3472) --- hotshot/src/tasks/task_state.rs | 1 + task-impls/src/transactions.rs | 345 ++++++++++++------ testing/tests/tests_1/da_task.rs | 10 - testing/tests/tests_1/libp2p.rs | 2 +- .../tests_1/quorum_proposal_recv_task.rs | 27 +- testing/tests/tests_1/quorum_vote_task.rs | 30 +- .../tests/tests_1/upgrade_task_with_vote.rs | 27 +- testing/tests/tests_1/vid_task.rs | 5 - testing/tests/tests_3/memory_network.rs | 7 +- testing/tests/tests_5/fake_solver.rs | 22 +- testing/tests/tests_5/timeout.rs | 2 +- types/Cargo.toml | 1 + types/src/bundle.rs | 21 ++ types/src/constants.rs | 10 +- types/src/data.rs | 5 - types/src/lib.rs | 1 + types/src/traits/auction_results_provider.rs | 39 +- types/src/traits/block_contents.rs | 2 +- 18 files changed, 370 insertions(+), 187 deletions(-) create mode 100644 types/src/bundle.rs diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index a0f9a9d611..c74bd654fb 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -205,6 +205,7 @@ impl> CreateTaskState .collect(), builder_clients_marketplace: Vec::new(), decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), + auction_results_provider: Arc::clone(&handle.hotshot.auction_results_provider), } } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index fb7de6e25f..e912b5a42d 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -13,10 +13,12 @@ use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, + constants::MarketplaceVersion, data::{null_block, PackedBundle}, event::{Event, EventType}, - simple_certificate::UpgradeCertificate, + simple_certificate::{version, UpgradeCertificate}, traits::{ + auction_results_provider::AuctionResultsProvider, block_contents::{precompute_vid_commitment, BuilderFee, EncodeBytes}, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -28,6 +30,7 @@ use hotshot_types::{ }; use tracing::{debug, error, instrument, warn}; use vbs::version::{StaticVersionType, Version}; +use vec1::Vec1; use crate::{ builder::{ @@ -53,9 +56,6 @@ const BUILDER_MINIMUM_QUERY_TIME: Duration = Duration::from_millis(300); /// Delay between re-tries on unsuccessful calls const RETRY_DELAY: Duration = Duration::from_millis(100); -/// The version of the builder API used by the marketplace -type MarketplaceVersion = crate::builder::v0_3::Version; - /// Builder Provided Responses pub struct BuilderResponse { /// Fee information @@ -68,22 +68,6 @@ pub struct BuilderResponse { pub precompute_data: Option, } -/// The Bundle for a portion of a block, provided by a downstream builder that exists in a bundle -/// auction. -pub struct Bundle { - /// The bundle transactions sent by the builder. - pub transactions: Vec<>::Transaction>, - - /// The signature over the bundle. - pub signature: TYPES::SignatureKey, - - /// The fee for submitting a bid. - pub bid_fee: BuilderFee, - - /// The fee for sequencing - pub sequencing_fee: BuilderFee, -} - /// Tracks state of a Transaction task pub struct TransactionTaskState> { /// The state's api @@ -120,9 +104,226 @@ pub struct TransactionTaskState> { pub id: u64, /// Decided upgrade certificate pub decided_upgrade_certificate: Arc>>>, + /// auction results provider + pub auction_results_provider: Arc, } impl> TransactionTaskState { + /// legacy view change handler + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction task", level = "error", target = "TransactionTaskState")] + pub async fn handle_view_change_legacy( + &mut self, + event_stream: Sender>>, + block_view: TYPES::Time, + ) -> Option { + let version = match hotshot_types::simple_certificate::version( + block_view, + &self + .decided_upgrade_certificate + .read() + .await + .as_ref() + .cloned(), + ) { + Ok(v) => v, + Err(err) => { + error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); + return None; + } + }; + + // Request a block from the builder unless we are between versions. + let block = { + if self + .decided_upgrade_certificate + .read() + .await + .as_ref() + .is_some_and(|cert| cert.upgrading_in(block_view)) + { + None + } else { + self.wait_for_block(block_view, version).await + } + }; + + if let Some(BuilderResponse { + block_payload, + metadata, + fee, + precompute_data, + }) = block + { + broadcast_event( + Arc::new(HotShotEvent::BlockRecv(PackedBundle::new( + block_payload.encode(), + metadata, + block_view, + vec1::vec1![fee], + precompute_data, + ))), + &event_stream, + ) + .await; + } else { + // If we couldn't get a block, send an empty block + warn!( + "Failed to get a block for view {:?}, proposing empty block", + block_view + ); + + // Increment the metric for number of empty blocks proposed + self.consensus + .write() + .await + .metrics + .number_of_empty_blocks_proposed + .add(1); + + let membership_total_nodes = self.membership.total_nodes(); + let Some(null_fee) = null_block::builder_fee(self.membership.total_nodes(), version) + else { + error!("Failed to get null fee"); + return None; + }; + + // Create an empty block payload and metadata + let (_, metadata) = ::BlockPayload::empty(); + + let (_, precompute_data) = precompute_vid_commitment(&[], membership_total_nodes); + + // Broadcast the empty block + broadcast_event( + Arc::new(HotShotEvent::BlockRecv(PackedBundle::new( + vec![].into(), + metadata, + block_view, + vec1::vec1![null_fee], + Some(precompute_data), + ))), + &event_stream, + ) + .await; + }; + + return None; + } + + #[allow(clippy::too_many_lines)] + /// marketplace view change handler + pub async fn handle_view_change_marketplace( + &mut self, + event_stream: Sender>>, + block_view: TYPES::Time, + ) -> Option { + let version = match hotshot_types::simple_certificate::version( + block_view, + &self + .decided_upgrade_certificate + .read() + .await + .as_ref() + .cloned(), + ) { + Ok(v) => v, + Err(err) => { + error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); + return None; + } + }; + + // Only request bundles and propose with a nonempty block if we are not between versions. + if !self + .decided_upgrade_certificate + .read() + .await + .as_ref() + .is_some_and(|cert| cert.upgrading_in(block_view)) + { + if let Ok(bundles) = self + .auction_results_provider + .fetch_bundles(block_view) + .await + { + let mut sequencing_fees = Vec::new(); + let mut transactions: Vec< + >::Transaction, + > = Vec::new(); + + for bundle in bundles { + sequencing_fees.push(bundle.sequencing_fee); + transactions.extend(bundle.transactions); + } + + let validated_state = self.consensus.read().await.decided_state(); + + if let (Ok(sequencing_fees), Ok((block_payload, metadata))) = ( + Vec1::try_from_vec(sequencing_fees), + TYPES::BlockPayload::from_transactions( + transactions, + &validated_state, + &Arc::clone(&self.instance_state), + ) + .await, + ) { + broadcast_event( + Arc::new(HotShotEvent::BlockRecv(PackedBundle::new( + block_payload.encode(), + metadata, + block_view, + sequencing_fees, + None, + ))), + &event_stream, + ) + .await; + + return None; + } + } + } + + // If we couldn't get any bundles (due to either the builders or solver failing to return a result), send an empty block + warn!( + "Failed to get a block for view {:?}, proposing empty block", + block_view + ); + + // Increment the metric for number of empty blocks proposed + self.consensus + .write() + .await + .metrics + .number_of_empty_blocks_proposed + .add(1); + + let membership_total_nodes = self.membership.total_nodes(); + let Some(null_fee) = null_block::builder_fee(self.membership.total_nodes(), version) else { + error!("Failed to get null fee"); + return None; + }; + + // Create an empty block payload and metadata + let (_, metadata) = ::BlockPayload::empty(); + + let (_, precompute_data) = precompute_vid_commitment(&[], membership_total_nodes); + + // Broadcast the empty block + broadcast_event( + Arc::new(HotShotEvent::BlockRecv(PackedBundle::new( + vec![].into(), + metadata, + block_view, + vec1::vec1![null_fee], + Some(precompute_data), + ))), + &event_stream, + ) + .await; + + None + } + /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction task", level = "error", target = "TransactionTaskState")] pub async fn handle( @@ -166,108 +367,24 @@ impl> TransactionTaskState v, - Err(err) => { - error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); + Err(e) => { + tracing::error!("Failed to calculate version: {:?}", e); return None; } }; - // Request a block from the builder unless we are between versions. - let block = { - if self - .decided_upgrade_certificate - .read() - .await - .as_ref() - .is_some_and(|cert| cert.upgrading_in(block_view)) - { - None - } else { - self.wait_for_block(block_view, version).await - } - }; - - if let Some(BuilderResponse { - block_payload, - metadata, - fee, - precompute_data, - }) = block - { - let Some(bid_fee) = - null_block::builder_fee(self.membership.total_nodes(), version) - else { - error!("Failed to get bid fee"); - return None; - }; - - broadcast_event( - Arc::new(HotShotEvent::BlockRecv(PackedBundle::new( - block_payload.encode(), - metadata, - block_view, - vec1::vec1![bid_fee], - vec1::vec1![fee], - precompute_data, - ))), - &event_stream, - ) - .await; + if version < MarketplaceVersion::VERSION { + self.handle_view_change_legacy(event_stream, block_view) + .await; } else { - // If we couldn't get a block, send an empty block - warn!( - "Failed to get a block for view {:?}, proposing empty block", - view - ); - - // Increment the metric for number of empty blocks proposed - self.consensus - .write() - .await - .metrics - .number_of_empty_blocks_proposed - .add(1); - - let membership_total_nodes = self.membership.total_nodes(); - let Some(null_fee) = - null_block::builder_fee(self.membership.total_nodes(), version) - else { - error!("Failed to get null fee"); - return None; - }; - - // Create an empty block payload and metadata - let (_, metadata) = ::BlockPayload::empty(); - - let (_, precompute_data) = - precompute_vid_commitment(&[], membership_total_nodes); - - // Broadcast the empty block - broadcast_event( - Arc::new(HotShotEvent::BlockRecv(PackedBundle::new( - vec![].into(), - metadata, - block_view, - vec1::vec1![null_fee.clone()], - vec1::vec1![null_fee], - Some(precompute_data), - ))), - &event_stream, - ) - .await; - }; - - return None; + self.handle_view_change_marketplace(event_stream, block_view) + .await; + } } HotShotEvent::Shutdown => { return Some(HotShotTaskCompleted); diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 7b79f992d6..1e9c772d45 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -84,11 +84,6 @@ async fn test_da_task() { BaseVersion::version() ) .unwrap()], - vec1::vec1![null_block::builder_fee( - quorum_membership.total_nodes(), - BaseVersion::version() - ) - .unwrap()], Some(precompute), )), ], @@ -175,11 +170,6 @@ async fn test_da_task_storage_failure() { BaseVersion::version() ) .unwrap()], - vec1::vec1![null_block::builder_fee( - quorum_membership.total_nodes(), - BaseVersion::version() - ) - .unwrap()], Some(precompute), ),) ], diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index e7cbb8d6f2..059dcdb236 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -1,11 +1,11 @@ use std::time::Duration; use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; -use hotshot_testing::spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::{TestDescription, TimingData}, }; use tracing::instrument; diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 2f58bd1797..29be512551 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -14,12 +14,15 @@ use hotshot_task_impls::{ }; use hotshot_testing::{ helpers::{build_fake_view_with_leaf_and_state, build_system_handle}, - predicates::event::{all_predicates, quorum_proposal_missing, exact, vote_now}, + predicates::event::{all_predicates, exact, quorum_proposal_missing, vote_now}, script::InputOrder, serial, view_generator::TestViewGenerator, }; -use hotshot_types::{data::ViewNumber, traits::{node_implementation::ConsensusTime,ValidatedState}}; +use hotshot_types::{ + data::ViewNumber, + traits::{node_implementation::ConsensusTime, ValidatedState}, +}; #[cfg(test)] #[cfg(feature = "dependency-tasks")] @@ -62,10 +65,12 @@ async fn test_quorum_proposal_recv_task() { // to that, we'll just put them in here. consensus_writer .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); - consensus_writer.update_validated_state_map( - view.quorum_proposal.data.view_number, - build_fake_view_with_leaf(view.leaf.clone()), - ).unwrap(); + consensus_writer + .update_validated_state_map( + view.quorum_proposal.data.view_number, + build_fake_view_with_leaf(view.leaf.clone()), + ) + .unwrap(); } drop(consensus_writer); @@ -152,10 +157,12 @@ async fn test_quorum_proposal_recv_task_liveness_check() { // we don't have access to that, we'll just put them in here. We // specifically ignore writing the saved leaves so that way // the parent lookup fails and we trigger a view liveness check. - consensus_writer.update_validated_state_map( - inserted_view_number, - build_fake_view_with_leaf(view.leaf.clone()), - ).unwrap(); + consensus_writer + .update_validated_state_map( + inserted_view_number, + build_fake_view_with_leaf(view.leaf.clone()), + ) + .unwrap(); // The index here is important. Since we're proposing for view 4, we need the // value from entry 2 to align the public key from the shares map. diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 4c29165aca..015ecedd6a 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -1,6 +1,8 @@ #![allow(clippy::panic)] #![cfg(feature = "dependency-tasks")] +use std::time::Duration; + use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; @@ -17,8 +19,6 @@ use hotshot_types::{ data::ViewNumber, traits::node_implementation::ConsensusTime, vote::HasViewNumber, }; -use std::time::Duration; - const TIMEOUT: Duration = Duration::from_millis(35); #[cfg(test)] @@ -52,10 +52,12 @@ async fn test_quorum_vote_task_success() { leaves.push(view.leaf.clone()); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); - consensus_writer.update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone()), - ).unwrap(); + consensus_writer + .update_validated_state_map( + view.quorum_proposal.data.view_number(), + build_fake_view_with_leaf(view.leaf.clone()), + ) + .unwrap(); consensus_writer.update_saved_leaves(view.leaf.clone()); } drop(consensus_writer); @@ -172,10 +174,12 @@ async fn test_quorum_vote_task_miss_dependency() { vids.push(view.vid_proposal.clone()); leaves.push(view.leaf.clone()); - consensus_writer.update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone()), - ).unwrap(); + consensus_writer + .update_validated_state_map( + view.quorum_proposal.data.view_number(), + build_fake_view_with_leaf(view.leaf.clone()), + ) + .unwrap(); consensus_writer.update_saved_leaves(view.leaf.clone()); } drop(consensus_writer); @@ -197,9 +201,9 @@ async fn test_quorum_vote_task_miss_dependency() { ]; let expectations = vec![ - Expectations::from_outputs(all_predicates![ - exact(VidShareValidated(vids[1].0[0].clone())) - ]), + Expectations::from_outputs(all_predicates![exact(VidShareValidated( + vids[1].0[0].clone() + ))]), Expectations::from_outputs(all_predicates![ exact(LockedViewUpdated(ViewNumber::new(1))), exact(DaCertificateValidated(dacs[2].clone())) diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 2cc4124da6..42b8013108 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -1,5 +1,4 @@ #![cfg(feature = "dependency-tasks")] - // TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] @@ -14,14 +13,16 @@ use hotshot_example_types::{ }; use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ - consensus2::Consensus2TaskState, events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState, upgrade::UpgradeTaskState + consensus2::Consensus2TaskState, events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState, + upgrade::UpgradeTaskState, }; use hotshot_testing::{ + all_predicates, helpers::{build_fake_view_with_leaf, vid_share}, predicates::{event::*, upgrade_with_vote::*}, - script::{Expectations, TaskScript, InputOrder}, + random, + script::{Expectations, InputOrder, TaskScript}, view_generator::TestViewGenerator, - random, all_predicates }; use hotshot_types::{ data::{null_block, ViewNumber}, @@ -77,10 +78,12 @@ async fn test_upgrade_task_with_vote() { vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); - consensus_writer.update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone()), - ).unwrap(); + consensus_writer + .update_validated_state_map( + view.quorum_proposal.data.view_number(), + build_fake_view_with_leaf(view.leaf.clone()), + ) + .unwrap(); consensus_writer.update_saved_leaves(view.leaf.clone()); } drop(consensus_writer); @@ -117,9 +120,10 @@ async fn test_upgrade_task_with_vote() { DaCertificateRecv(dacs[4].clone()), VidShareRecv(vids[4].0[0].clone()), ], - random![ - QuorumProposalValidated(proposals[5].data.clone(), leaves[5].clone()), - ], + random![QuorumProposalValidated( + proposals[5].data.clone(), + leaves[5].clone() + ),], ]; let expectations = vec![ @@ -185,6 +189,5 @@ async fn test_upgrade_task_with_vote() { expectations, }; - run_test![inputs, vote_script].await; } diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index c547ea7a5c..13a4e0741e 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -96,11 +96,6 @@ async fn test_vid_task() { BaseVersion::version() ) .unwrap()], - vec1::vec1![null_block::builder_fee( - quorum_membership.total_nodes(), - BaseVersion::version() - ) - .unwrap()], Some(vid_precompute), )), ], diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index b3dff52722..a392efb25c 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -11,10 +11,10 @@ use hotshot::{ types::SignatureKey, }; use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResultsProvider, block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, - auction_results_provider_types::TestAuctionResultsProvider, }; use hotshot_types::{ data::ViewNumber, @@ -48,7 +48,10 @@ pub struct Test; impl NodeType for Test { type Base = StaticVersion<0, 1>; type Upgrade = StaticVersion<0, 2>; - const UPGRADE_HASH: [u8; 32] = [1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,]; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, + ]; type Time = ViewNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; diff --git a/testing/tests/tests_5/fake_solver.rs b/testing/tests/tests_5/fake_solver.rs index 0d40c13657..4e90656a60 100644 --- a/testing/tests/tests_5/fake_solver.rs +++ b/testing/tests/tests_5/fake_solver.rs @@ -1,10 +1,10 @@ use async_compatibility_layer::art::async_spawn; +use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResult, node_types::TestTypes, +}; use hotshot_fakeapi::fake_solver::FakeSolverState; -use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; - -use hotshot_example_types::node_types::TestTypes; -use hotshot_example_types::auction_results_provider_types::TestAuctionResult; use hotshot_testing::helpers::key_pair_for_id; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use tracing::instrument; use url::Url; @@ -13,7 +13,6 @@ use url::Url; #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_fake_solver_fetch_non_permissioned_no_error() { - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -65,7 +64,6 @@ async fn test_fake_solver_fetch_non_permissioned_no_error() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_fake_solver_fetch_non_permissioned_with_errors() { - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -141,7 +139,10 @@ async fn test_fake_solver_fetch_non_permissioned_with_errors() { // Assert over the payloads with a 50% error rate. for payload in payloads { - assert_eq!(payload.urls[0], Url::parse("http://localhost:1111/").unwrap()); + assert_eq!( + payload.urls[0], + Url::parse("http://localhost:1111/").unwrap() + ); } } @@ -150,7 +151,6 @@ async fn test_fake_solver_fetch_non_permissioned_with_errors() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_fake_solver_fetch_permissioned_no_error() { - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -213,7 +213,6 @@ async fn test_fake_solver_fetch_permissioned_no_error() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn test_fake_solver_fetch_permissioned_with_errors() { - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -296,6 +295,9 @@ async fn test_fake_solver_fetch_permissioned_with_errors() { // Assert over the payloads with a 50% error rate. for payload in payloads { - assert_eq!(payload.urls[0], Url::parse("http://localhost:1111/").unwrap()); + assert_eq!( + payload.urls[0], + Url::parse("http://localhost:1111/").unwrap() + ); } } diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 90b48ed02b..22d0e83deb 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -21,7 +21,7 @@ async fn test_timeout() { ..Default::default() }; - let mut metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { num_nodes_with_stake: 10, start_nodes: 10, ..Default::default() diff --git a/types/Cargo.toml b/types/Cargo.toml index c7cc923292..ebce2c9eeb 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -27,6 +27,7 @@ espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } futures = { workspace = true } cdn-proto = { workspace = true } +reqwest = { workspace = true } generic-array = { workspace = true } diff --git a/types/src/bundle.rs b/types/src/bundle.rs new file mode 100644 index 0000000000..81bf666b98 --- /dev/null +++ b/types/src/bundle.rs @@ -0,0 +1,21 @@ +//! This module provides the `Bundle` type + +use serde::{Deserialize, Serialize}; + +use crate::traits::{block_contents::BuilderFee, node_implementation::NodeType, BlockPayload}; + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "TYPES: NodeType")] +/// The Bundle for a portion of a block, provided by a downstream +/// builder that exists in a bundle auction. +/// This type is maintained by HotShot +pub struct Bundle { + /// The bundle transactions sent by the builder. + pub transactions: Vec<>::Transaction>, + + /// The signature over the bundle. + pub signature: TYPES::SignatureKey, + + /// The fee for sequencing + pub sequencing_fee: BuilderFee, +} diff --git a/types/src/constants.rs b/types/src/constants.rs index b5aef3b8cf..ad84f11475 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -1,7 +1,15 @@ //! configurable constants for hotshot +use std::time::Duration; + use vbs::version::StaticVersion; +/// timeout for fetching auction results from the solver +pub const AUCTION_RESULTS_FETCH_TIMEOUT: Duration = Duration::from_millis(500); + +/// timeout for fetching bundles from builders +pub const BUNDLE_FETCH_TIMEOUT: Duration = Duration::from_millis(500); + /// the number of views to gather information for ahead of time pub const LOOK_AHEAD: u64 = 5; @@ -44,8 +52,6 @@ pub const WEB_SERVER_VERSION: WebServerVersion = StaticVersion {}; /// Type for semver representation of "Base" version pub type BaseVersion = StaticVersion<0, 1>; -/// Constant for semver representation of "Base" version -pub const BASE_VERSION: BaseVersion = StaticVersion {}; /// Type for semver representation of "Marketplace" version pub type MarketplaceVersion = StaticVersion<0, 3>; diff --git a/types/src/data.rs b/types/src/data.rs index d9953744e8..c963cb5648 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -876,9 +876,6 @@ pub struct PackedBundle { /// The view number that this block is associated with. pub view_number: TYPES::Time, - /// The bid fees for submitting the block. - pub bid_fees: Vec1>, - /// The sequencing fee for submitting bundles. pub sequencing_fees: Vec1>, @@ -892,7 +889,6 @@ impl PackedBundle { encoded_transactions: Arc<[u8]>, metadata: >::Metadata, view_number: TYPES::Time, - bid_fees: Vec1>, sequencing_fees: Vec1>, vid_precompute: Option, ) -> Self { @@ -900,7 +896,6 @@ impl PackedBundle { encoded_transactions, metadata, view_number, - bid_fees, sequencing_fees, vid_precompute, } diff --git a/types/src/lib.rs b/types/src/lib.rs index ca4d2fa8e4..89c7426953 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -11,6 +11,7 @@ use url::Url; use vec1::Vec1; use crate::utils::bincode_opts; +pub mod bundle; pub mod consensus; pub mod constants; pub mod data; diff --git a/types/src/traits/auction_results_provider.rs b/types/src/traits/auction_results_provider.rs index 09bbcb54e4..aa27925510 100644 --- a/types/src/traits/auction_results_provider.rs +++ b/types/src/traits/auction_results_provider.rs @@ -2,10 +2,16 @@ //! which handles connecting to, and fetching the allocation results from, the Solver. use anyhow::Result; +use async_compatibility_layer::art::async_timeout; use async_trait::async_trait; +use futures::future::join_all; use url::Url; use super::node_implementation::NodeType; +use crate::{ + bundle::Bundle, + constants::{AUCTION_RESULTS_FETCH_TIMEOUT, BUNDLE_FETCH_TIMEOUT}, +}; /// This trait guarantees that a particular type has urls that can be extracted from it. This trait /// essentially ensures that the results returned by the [`AuctionResultsProvider`] trait includes a @@ -23,9 +29,40 @@ pub trait HasUrls { pub trait AuctionResultsProvider: Send + Sync + Clone { /// The AuctionSolverResult is a type that holds the data associated with a particular solver /// run, for a particular view. - type AuctionResult: HasUrls; + type AuctionResult: HasUrls + Send; /// Fetches the auction result for a view. Does not cache the result, /// subsequent calls will invoke additional wasted calls. async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result; + + /// Fetches the bundles for a view. + async fn fetch_bundles(&self, view_number: TYPES::Time) -> Result>> { + let result = async_timeout( + AUCTION_RESULTS_FETCH_TIMEOUT, + self.fetch_auction_result(view_number), + ) + .await??; + + let client = reqwest::Client::new(); + + let mut futures = Vec::new(); + + for url in result.urls() { + futures.push(async_timeout( + BUNDLE_FETCH_TIMEOUT, + client.get(url).send().await?.json::>(), + )); + } + + let mut bundles = Vec::new(); + + for bundle in join_all(futures).await { + match bundle { + Ok(Ok(b)) => bundles.push(b), + _ => continue, + } + } + + Ok(bundles) + } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 36e5cd3dca..7b35fe440d 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -65,7 +65,7 @@ pub trait BlockPayload: /// The type of the instance-level state this state is associated with type Instance: InstanceState; /// The type of the transitions we are applying - type Transaction: Transaction; + type Transaction: Transaction + Serialize + DeserializeOwned; /// Validated State type ValidatedState: ValidatedState; /// Data created during block building which feeds into the block header From 30eca6870b0d552bf52ef0a1ceac285d58d071b9 Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Thu, 25 Jul 2024 13:18:15 -0400 Subject: [PATCH 1139/1393] Add AuctionResult associated type to BlockHeader (#3488) * Add AuctionResult associated type to BlockHeader * Change AuctionResults to AuctionResult --- example-types/src/block_types.rs | 9 +++++---- types/src/traits/block_contents.rs | 8 +++++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 33a7850477..2820bd0898 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -9,7 +9,6 @@ use committable::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ data::{BlockError, Leaf}, traits::{ - auction_results_provider::HasUrls, block_contents::{BlockHeader, BuilderFee, EncodeBytes, TestableBlock, Transaction}, node_implementation::NodeType, BlockPayload, ValidatedState, @@ -24,6 +23,7 @@ use time::OffsetDateTime; use vbs::version::Version; use crate::{ + auction_results_provider_types::TestAuctionResult, node_types::TestTypes, state_types::{TestInstanceState, TestValidatedState}, }; @@ -244,6 +244,7 @@ impl> Block for TestBlockHeader { type Error = std::convert::Infallible; + type AuctionResult = TestAuctionResult; async fn new_legacy( _parent_state: &TYPES::ValidatedState, @@ -272,7 +273,7 @@ impl> Block }) } - async fn new_marketplace( + async fn new_marketplace( _parent_state: &TYPES::ValidatedState, _instance_state: &>::Instance, _parent_leaf: &Leaf, @@ -280,7 +281,7 @@ impl> Block _metadata: >::Metadata, _builder_fee: Vec>, _vid_common: VidCommon, - _auction_results: Option, + _auction_results: Option, _version: Version, ) -> Result { unimplemented!() @@ -316,7 +317,7 @@ impl> Block self.builder_commitment.clone() } - fn get_auction_results(&self) -> Option { + fn get_auction_results(&self) -> Option { unimplemented!() } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 7b35fe440d..253554650f 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -181,6 +181,8 @@ pub trait BlockHeader: { /// Error type for this type of block header type Error: Error + Debug + Send + Sync; + /// Type of Auction Results + type AuctionResult: HasUrls + Send; /// Build a header with the parent validate state, instance-level state, parent leaf, payload /// commitment, and metadata. This is only used in pre-marketplace versions @@ -200,7 +202,7 @@ pub trait BlockHeader: /// Build a header with the parent validate state, instance-level state, parent leaf, payload /// commitment, metadata, and auction results. This is only used in post-marketplace versions #[allow(clippy::too_many_arguments)] - fn new_marketplace( + fn new_marketplace( parent_state: &TYPES::ValidatedState, instance_state: &>::Instance, parent_leaf: &Leaf, @@ -208,7 +210,7 @@ pub trait BlockHeader: metadata: >::Metadata, builder_fee: Vec>, vid_common: VidCommon, - auction_results: Option, + auction_results: Option, version: Version, ) -> impl Future> + Send; @@ -233,5 +235,5 @@ pub trait BlockHeader: fn builder_commitment(&self) -> BuilderCommitment; /// Get the results of the auction for this Header. Only used in post-marketplace versions - fn get_auction_results(&self) -> Option; + fn get_auction_results(&self) -> Option; } From cd43f6c73d31dfc805de47b90c70bb4c080d4de8 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 26 Jul 2024 08:55:17 -0400 Subject: [PATCH 1140/1393] adjust libp2p (#3489) --- libp2p-networking/src/network/node.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 8a782a0e63..f80f322835 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -208,16 +208,15 @@ impl NetworkNode { // Create a custom gossipsub let gossipsub_config = GossipsubConfigBuilder::default() - .opportunistic_graft_ticks(3) .heartbeat_interval(Duration::from_secs(1)) // Force all messages to have valid signatures .validation_mode(ValidationMode::Strict) - .history_gossip(50) + .history_gossip(10) .mesh_n_high(params.mesh_n_high) .mesh_n_low(params.mesh_n_low) .mesh_outbound_min(params.mesh_outbound_min) .mesh_n(params.mesh_n) - .history_length(500) + .history_length(10) .max_transmit_size(MAX_GOSSIP_MSG_SIZE) // Use the (blake3) hash of a message as its ID .message_id_fn(message_id_fn) From 3a24741a168ab65e7e1ff6ead5a04a68ac2d5198 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 26 Jul 2024 17:22:01 +0200 Subject: [PATCH 1141/1393] Builder API adjustments (#3493) --- builder-api/api/v0_3/builder.toml | 33 +-- builder-api/src/v0_3/block_info.rs | 32 --- builder-api/src/v0_3/builder.rs | 33 +-- builder-api/src/v0_3/data_source.rs | 28 +-- builder-api/src/v0_3/mod.rs | 1 - task-impls/src/builder.rs | 23 +- task-impls/src/transactions.rs | 231 +++++++------------ types/src/bundle.rs | 7 +- types/src/traits/auction_results_provider.rs | 37 --- 9 files changed, 111 insertions(+), 314 deletions(-) delete mode 100644 builder-api/src/v0_3/block_info.rs diff --git a/builder-api/api/v0_3/builder.toml b/builder-api/api/v0_3/builder.toml index 76a27deb3d..fbdd3a1c98 100644 --- a/builder-api/api/v0_3/builder.toml +++ b/builder-api/api/v0_3/builder.toml @@ -26,37 +26,12 @@ NAME = "hs-builder-get" DESCRIPTION = "" FORMAT_VERSION = "0.1.0" -[route.available_blocks] -PATH = ["availableblocks/:parent_hash/:view_number/:sender/:signature"] -":parent_hash" = "TaggedBase64" +[route.bundle] +PATH = ["bundle/:view_number"] ":view_number" = "Integer" -":sender" = "TaggedBase64" -":signature" = "TaggedBase64" +METHOD = "POST" DOC = """ -Get descriptions for all block candidates based on a specific parent block. - -Returns -``` -[ - "block_metadata": { - "block_hash": TaggedBase64, - "block_size": integer, - "offered_fee": integer, - }, -] -``` -""" - -[route.claim_block] -PATH = ["claimblock/:block_hash/:view_number/:sender/:signature"] -":block_hash" = "TaggedBase64" -":view_number" = "Integer" -":sender" = "TaggedBase64" -":signature" = "TaggedBase64" -DOC = """ -Get the specified block candidate. - -Returns application-specific encoded transactions type +Fetch the bundle from the builder for the specified view. """ [route.builder_address] diff --git a/builder-api/src/v0_3/block_info.rs b/builder-api/src/v0_3/block_info.rs deleted file mode 100644 index 56df60864b..0000000000 --- a/builder-api/src/v0_3/block_info.rs +++ /dev/null @@ -1,32 +0,0 @@ -use hotshot_types::traits::{ - node_implementation::NodeType, signature_key::BuilderSignatureKey, BlockPayload, -}; -use serde::{Deserialize, Serialize}; - -/// No changes to these types -pub use crate::v0_1::block_info::AvailableBlockInfo; - -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] -#[serde(bound = "")] -pub struct AvailableBlockData { - pub block_payload: TYPES::BlockPayload, - pub metadata: >::Metadata, - pub fee: u64, - pub signature: - <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, - pub fee_signature: - <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, - pub sender: ::BuilderSignatureKey, -} - -impl AvailableBlockData { - pub fn validate_signature(&self) -> bool { - // verify the signature over the message, construct the builder commitment - let builder_commitment = self.block_payload.builder_commitment(&self.metadata); - self.sender - .validate_builder_signature(&self.signature, builder_commitment.as_ref()) - && self - .sender - .validate_sequencing_fee_signature_marketplace(&self.fee_signature, self.fee) - } -} diff --git a/builder-api/src/v0_3/builder.rs b/builder-api/src/v0_3/builder.rs index d5ebad46fa..b064fd84af 100644 --- a/builder-api/src/v0_3/builder.rs +++ b/builder-api/src/v0_3/builder.rs @@ -1,15 +1,15 @@ use futures::FutureExt; -use hotshot_types::{traits::node_implementation::NodeType, utils::BuilderCommitment}; +use hotshot_types::traits::node_implementation::NodeType; use snafu::ResultExt; use tide_disco::{api::ApiError, method::ReadState, Api}; use super::{data_source::BuilderDataSource, Version}; +use crate::api::load_api; /// No changes to these types pub use crate::v0_1::builder::{ submit_api, BlockAvailableSnafu, BlockClaimSnafu, BuildError, BuilderAddressSnafu, Error, Options, }; -use crate::{api::load_api, v0_1::builder::try_extract_param}; pub fn define_api( options: &Options, @@ -24,33 +24,12 @@ where options.extensions.clone(), )?; api.with_version("0.0.3".parse().unwrap()) - .get("available_blocks", |req, state| { + .get("bundle", |req, state| { async move { - let hash = req.blob_param("parent_hash")?; let view_number = req.integer_param("view_number")?; - let signature = try_extract_param(&req, "signature")?; - let sender = try_extract_param(&req, "sender")?; - state - .available_blocks(&hash, view_number, sender, &signature) - .await - .context(BlockAvailableSnafu { - resource: hash.to_string(), - }) - } - .boxed() - })? - .get("claim_block", |req, state| { - async move { - let block_hash: BuilderCommitment = req.blob_param("block_hash")?; - let view_number = req.integer_param("view_number")?; - let signature = try_extract_param(&req, "signature")?; - let sender = try_extract_param(&req, "sender")?; - state - .claim_block(&block_hash, view_number, sender, &signature) - .await - .context(BlockClaimSnafu { - resource: block_hash.to_string(), - }) + state.bundle(view_number).await.context(BlockClaimSnafu { + resource: view_number.to_string(), + }) } .boxed() })? diff --git a/builder-api/src/v0_3/data_source.rs b/builder-api/src/v0_3/data_source.rs index 8face03e42..d37acd3dde 100644 --- a/builder-api/src/v0_3/data_source.rs +++ b/builder-api/src/v0_3/data_source.rs @@ -1,36 +1,14 @@ use async_trait::async_trait; -use hotshot_types::{ - traits::{node_implementation::NodeType, signature_key::SignatureKey}, - utils::BuilderCommitment, - vid::VidCommitment, -}; +use hotshot_types::{bundle::Bundle, traits::node_implementation::NodeType}; -use super::{ - block_info::{AvailableBlockData, AvailableBlockInfo}, - builder::BuildError, -}; +use super::builder::BuildError; /// No changes to these types pub use crate::v0_1::data_source::AcceptsTxnSubmits; #[async_trait] pub trait BuilderDataSource { /// To get the list of available blocks - async fn available_blocks( - &self, - for_parent: &VidCommitment, - view_number: u64, - sender: TYPES::SignatureKey, - signature: &::PureAssembledSignatureType, - ) -> Result>, BuildError>; - - /// to claim a block from the list of provided available blocks - async fn claim_block( - &self, - block_hash: &BuilderCommitment, - view_number: u64, - sender: TYPES::SignatureKey, - signature: &::PureAssembledSignatureType, - ) -> Result, BuildError>; + async fn bundle(&self, view_number: u64) -> Result, BuildError>; /// To get the builder's address async fn builder_address(&self) -> Result; diff --git a/builder-api/src/v0_3/mod.rs b/builder-api/src/v0_3/mod.rs index 21b42d83e0..b691543cb9 100644 --- a/builder-api/src/v0_3/mod.rs +++ b/builder-api/src/v0_3/mod.rs @@ -1,4 +1,3 @@ -pub mod block_info; pub mod builder; pub mod data_source; /// No changes to this module diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index df7fd5f661..f5b5b1261d 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -194,16 +194,10 @@ pub mod v0_2 { pub type Version = StaticVersion<0, 2>; } -/// Version 0.3. Removes `claim_block_header_input` endpoint, adds fee information -/// to `claim_block` endpoint. +/// Version 0.3: marketplace. Bundles. pub mod v0_3 { - use hotshot_builder_api::v0_3::block_info::AvailableBlockData; pub use hotshot_builder_api::v0_3::Version; - use hotshot_types::{ - traits::{node_implementation::NodeType, signature_key::SignatureKey}, - utils::BuilderCommitment, - }; - use tagged_base64::TaggedBase64; + use hotshot_types::{bundle::Bundle, traits::node_implementation::NodeType}; use vbs::version::StaticVersion; pub use super::BuilderClientError; @@ -217,18 +211,9 @@ pub mod v0_3 { /// # Errors /// - [`BuilderClientError::NotFound`] if block isn't available /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly - pub async fn claim_block( - &self, - block_hash: BuilderCommitment, - view_number: u64, - sender: TYPES::SignatureKey, - signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, BuilderClientError> { - let encoded_signature: TaggedBase64 = signature.clone().into(); + pub async fn bundle(&self, view_number: u64) -> Result, BuilderClientError> { self.inner - .get(&format!( - "claimblock/{block_hash}/{view_number}/{sender}/{encoded_signature}" - )) + .get(&format!("bundle/{view_number}")) .send() .await .map_err(Into::into) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index e912b5a42d..1885bdaf3a 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -5,10 +5,10 @@ use std::{ use anyhow::{bail, Result}; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::async_sleep; +use async_compatibility_layer::art::{async_sleep, async_timeout}; use async_lock::RwLock; use async_trait::async_trait; -use futures::{stream::FuturesUnordered, StreamExt}; +use futures::{future::join_all, stream::FuturesUnordered, StreamExt}; use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; use hotshot_task::task::TaskState; use hotshot_types::{ @@ -18,7 +18,7 @@ use hotshot_types::{ event::{Event, EventType}, simple_certificate::{version, UpgradeCertificate}, traits::{ - auction_results_provider::AuctionResultsProvider, + auction_results_provider::{AuctionResultsProvider, HasUrls}, block_contents::{precompute_vid_commitment, BuilderFee, EncodeBytes}, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -29,7 +29,7 @@ use hotshot_types::{ vid::{VidCommitment, VidPrecomputeData}, }; use tracing::{debug, error, instrument, warn}; -use vbs::version::{StaticVersionType, Version}; +use vbs::version::StaticVersionType; use vec1::Vec1; use crate::{ @@ -143,7 +143,7 @@ impl> TransactionTaskState> TransactionTaskState bundles.push(b), + _ => continue, + } + } + let mut sequencing_fees = Vec::new(); let mut transactions: Vec< >::Transaction, @@ -435,11 +460,7 @@ impl> TransactionTaskState Option> { + async fn wait_for_block(&self, block_view: TYPES::Time) -> Option> { let task_start_time = Instant::now(); // Find commitment to the block we want to build upon @@ -470,10 +491,10 @@ impl> TransactionTaskState> TransactionTaskState::SignatureKey as SignatureKey>::PureAssembledSignatureType, - version: Version, ) -> Vec<(AvailableBlockInfo, usize)> { - /// Implementations between versions are essentially the same except for the builder - /// clients used. The most conscise way to express this is with a macro. - macro_rules! inner_impl { - ($clients:ident) => {{ - // Create a collection of futures that call available_blocks endpoint for every builder - let tasks = self - .$clients - .iter() - .enumerate() - .map(|(builder_idx, client)| async move { - client - .available_blocks( - parent_comm, - view_number.u64(), - self.public_key.clone(), - parent_comm_sig, - ) - .await - .map(move |blocks| { - // Add index into `self.builder_clients` for each block so that we know - // where to claim it from later - blocks - .into_iter() - .map(move |block_info| (block_info, builder_idx)) - }) - }) - .collect::>(); - - // A vector of resolved builder responses - let mut results = Vec::with_capacity(self.$clients.len()); - - // Instant we start querying builders for available blocks - let query_start = Instant::now(); - - // First we complete the query to the fastest fraction of the builders - let threshold = (self.$clients.len() * BUILDER_MAIN_BATCH_THRESHOLD_DIVIDEND) - .div_ceil(BUILDER_MAIN_BATCH_THRESHOLD_DIVISOR); - let mut tasks = tasks.take(threshold); - while let Some(result) = tasks.next().await { - results.push(result); - if query_start.elapsed() > BUILDER_MAIN_BATCH_CUTOFF { - break; - } - } - - // Then we query the rest, alotting additional `elapsed * BUILDER_ADDITIONAL_TIME_MULTIPLIER` - // for them to respond. There's a fixed floor of `BUILDER_MINIMUM_QUERY_TIME` for both - // phases - let timeout = async_sleep(std::cmp::max( - query_start - .elapsed() - .mul_f32(BUILDER_ADDITIONAL_TIME_MULTIPLIER), - BUILDER_MINIMUM_QUERY_TIME.saturating_sub(query_start.elapsed()), - )); - futures::pin_mut!(timeout); // Stream::next requires Self::Unpin - let mut tasks = tasks.into_inner().take_until(timeout); - while let Some(result) = tasks.next().await { - results.push(result); - } - - results - .into_iter() - .filter_map(|result| match result { - Ok(value) => Some(value), - Err(err) => { - tracing::warn!(%err, "Error getting available blocks"); - None - } + let tasks = self + .builder_clients + .iter() + .enumerate() + .map(|(builder_idx, client)| async move { + client + .available_blocks( + parent_comm, + view_number.u64(), + self.public_key.clone(), + parent_comm_sig, + ) + .await + .map(move |blocks| { + blocks + .into_iter() + .map(move |block_info| (block_info, builder_idx)) }) - .flatten() - .collect::>() - }} + }) + .collect::>(); + let mut results = Vec::with_capacity(self.builder_clients.len()); + let query_start = Instant::now(); + let threshold = (self.builder_clients.len() * BUILDER_MAIN_BATCH_THRESHOLD_DIVIDEND) + .div_ceil(BUILDER_MAIN_BATCH_THRESHOLD_DIVISOR); + let mut tasks = tasks.take(threshold); + while let Some(result) = tasks.next().await { + results.push(result); + if query_start.elapsed() > BUILDER_MAIN_BATCH_CUTOFF { + break; + } } - - if version >= MarketplaceVersion::version() { - inner_impl!(builder_clients_marketplace) - } else { - inner_impl!(builder_clients) + let timeout = async_sleep(std::cmp::max( + query_start + .elapsed() + .mul_f32(BUILDER_ADDITIONAL_TIME_MULTIPLIER), + BUILDER_MINIMUM_QUERY_TIME.saturating_sub(query_start.elapsed()), + )); + futures::pin_mut!(timeout); + let mut tasks = tasks.into_inner().take_until(timeout); + while let Some(result) = tasks.next().await { + results.push(result); } + results + .into_iter() + .filter_map(|result| match result { + Ok(value) => Some(value), + Err(err) => { + tracing::warn!(%err,"Error getting available blocks"); + None + } + }) + .flatten() + .collect::>() } /// Get a block from builder. @@ -605,10 +599,9 @@ impl> TransactionTaskState::SignatureKey as SignatureKey>::PureAssembledSignatureType, - version: Version, ) -> anyhow::Result> { let mut available_blocks = self - .get_available_blocks(parent_comm, view_number, parent_comm_sig, version) + .get_available_blocks(parent_comm, view_number, parent_comm_sig) .await; available_blocks.sort_by(|(l, _), (r, _)| { @@ -650,53 +643,7 @@ impl> TransactionTaskState= MarketplaceVersion::version() { - let client = &self.builder_clients_marketplace[builder_idx]; - - let block = client - .claim_block( - block_info.block_hash.clone(), - view_number.u64(), - self.public_key.clone(), - &request_signature, - ) - .await; - - let block_data = match block { - Ok(block_data) => block_data, - Err(err) => { - tracing::warn!(%err, "Error claiming block data"); - continue; - } - }; - - // verify the signature over the message, construct the builder commitment - let builder_commitment = block_data - .block_payload - .builder_commitment(&block_data.metadata); - if !block_data - .sender - .validate_builder_signature(&block_data.signature, builder_commitment.as_ref()) - { - tracing::warn!( - "Failed to verify available block data response message signature" - ); - continue; - } - - let fee = BuilderFee { - fee_amount: block_info.offered_fee, - fee_account: block_data.sender, - fee_signature: block_data.signature, - }; - - BuilderResponse { - fee, - block_payload: block_data.block_payload, - metadata: block_data.metadata, - precompute_data: None, - } - } else { + let response = { let client = &self.builder_clients[builder_idx]; let (block, header_input) = futures::join! { diff --git a/types/src/bundle.rs b/types/src/bundle.rs index 81bf666b98..e211772557 100644 --- a/types/src/bundle.rs +++ b/types/src/bundle.rs @@ -2,7 +2,10 @@ use serde::{Deserialize, Serialize}; -use crate::traits::{block_contents::BuilderFee, node_implementation::NodeType, BlockPayload}; +use crate::traits::{ + block_contents::BuilderFee, node_implementation::NodeType, signature_key::BuilderSignatureKey, + BlockPayload, +}; #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "TYPES: NodeType")] @@ -14,7 +17,7 @@ pub struct Bundle { pub transactions: Vec<>::Transaction>, /// The signature over the bundle. - pub signature: TYPES::SignatureKey, + pub signature: ::BuilderSignature, /// The fee for sequencing pub sequencing_fee: BuilderFee, diff --git a/types/src/traits/auction_results_provider.rs b/types/src/traits/auction_results_provider.rs index aa27925510..1d373e2af2 100644 --- a/types/src/traits/auction_results_provider.rs +++ b/types/src/traits/auction_results_provider.rs @@ -2,16 +2,10 @@ //! which handles connecting to, and fetching the allocation results from, the Solver. use anyhow::Result; -use async_compatibility_layer::art::async_timeout; use async_trait::async_trait; -use futures::future::join_all; use url::Url; use super::node_implementation::NodeType; -use crate::{ - bundle::Bundle, - constants::{AUCTION_RESULTS_FETCH_TIMEOUT, BUNDLE_FETCH_TIMEOUT}, -}; /// This trait guarantees that a particular type has urls that can be extracted from it. This trait /// essentially ensures that the results returned by the [`AuctionResultsProvider`] trait includes a @@ -34,35 +28,4 @@ pub trait AuctionResultsProvider: Send + Sync + Clone { /// Fetches the auction result for a view. Does not cache the result, /// subsequent calls will invoke additional wasted calls. async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result; - - /// Fetches the bundles for a view. - async fn fetch_bundles(&self, view_number: TYPES::Time) -> Result>> { - let result = async_timeout( - AUCTION_RESULTS_FETCH_TIMEOUT, - self.fetch_auction_result(view_number), - ) - .await??; - - let client = reqwest::Client::new(); - - let mut futures = Vec::new(); - - for url in result.urls() { - futures.push(async_timeout( - BUNDLE_FETCH_TIMEOUT, - client.get(url).send().await?.json::>(), - )); - } - - let mut bundles = Vec::new(); - - for bundle in join_all(futures).await { - match bundle { - Ok(Ok(b)) => bundles.push(b), - _ => continue, - } - } - - Ok(bundles) - } } From a9a6beba3f4535eae23fa3a3fb9de10ea6123121 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 26 Jul 2024 14:24:36 -0400 Subject: [PATCH 1142/1393] Remove log for skipping upgrade proposal (#3498) --- task-impls/src/upgrade.rs | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 5fc4b70151..03df3d2b09 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -92,12 +92,9 @@ pub struct UpgradeTaskState> { } impl> UpgradeTaskState { - /// Check if the version has been upgraded. + /// Check if we have decided on an upgrade certificate async fn upgraded(&self) -> bool { - if let Some(cert) = self.decided_upgrade_certificate.read().await.clone() { - return cert.data.new_version == TYPES::Upgrade::VERSION; - } - false + self.decided_upgrade_certificate.read().await.is_some() } /// main task event handler @@ -263,15 +260,6 @@ impl> UpgradeTaskState { self.cur_view = *new_view; - // Skip proposing if the version has already been upgraded. - if self.upgraded().await { - info!( - "Already upgraded to {:?}, skip proposing.", - TYPES::Upgrade::VERSION - ); - return None; - } - let view: u64 = *self.cur_view; let time = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) @@ -283,6 +271,7 @@ impl> UpgradeTaskState { && view < self.stop_proposing_view && time >= self.start_proposing_time && time < self.stop_proposing_time + && !self.upgraded().await && self .quorum_membership .leader(TYPES::Time::new(view + UPGRADE_PROPOSE_OFFSET)) @@ -310,6 +299,8 @@ impl> UpgradeTaskState { ) .expect("Failed to sign upgrade proposal commitment!"); + warn!("Sending upgrade proposal:\n\n {:?}", upgrade_proposal); + let message = Proposal { data: upgrade_proposal, signature, From 58b8a9f3a971d740aa01ea611629aa7f9c696023 Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Mon, 29 Jul 2024 09:16:54 -0400 Subject: [PATCH 1143/1393] [Audit] - Fetch Blocks Properly when Leader Consecutively (#3491) * work on fix where we may skip a block if leader two views in a row * add parameterization for build_system_handle so that we can choose which NodeTypes we use * run linter * address comments * fix double reference * cleanup --- example-types/src/node_types.rs | 40 +++- hotshot/src/traits/election.rs | 2 + .../static_committee_leader_two_views.rs | 187 ++++++++++++++++++ task-impls/src/transactions.rs | 65 +++--- testing/src/helpers.rs | 33 ++-- testing/tests/tests_1/consensus_task.rs | 12 +- testing/tests/tests_1/da_task.rs | 4 +- .../tests_1/quorum_proposal_recv_task.rs | 4 +- testing/tests/tests_1/quorum_proposal_task.rs | 12 +- testing/tests/tests_1/quorum_vote_task.rs | 8 +- testing/tests/tests_1/transaction_task.rs | 59 ++++++ .../tests_1/upgrade_task_with_consensus.rs | 6 +- .../tests_1/upgrade_task_with_proposal.rs | 2 +- .../tests/tests_1/upgrade_task_with_vote.rs | 2 +- testing/tests/tests_1/vid_task.rs | 2 +- testing/tests/tests_1/view_sync_task.rs | 2 +- 16 files changed, 375 insertions(+), 65 deletions(-) create mode 100644 hotshot/src/traits/election/static_committee_leader_two_views.rs create mode 100644 testing/tests/tests_1/transaction_task.rs diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 57f708b84f..57b031e0ff 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -1,5 +1,8 @@ use hotshot::traits::{ - election::static_committee::{GeneralStaticCommittee, StaticCommittee}, + election::{ + static_committee::{GeneralStaticCommittee, StaticCommittee}, + static_committee_leader_two_views::StaticCommitteeLeaderForTwoViews, + }, implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork}, NodeImplementation, }; @@ -52,6 +55,41 @@ impl NodeType for TestTypes { type BuilderSignatureKey = BuilderKey; } +#[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, +)] +/// filler struct to implement node type and allow us +/// to select our traits +pub struct TestConsecutiveLeaderTypes; +impl NodeType for TestConsecutiveLeaderTypes { + type Base = StaticVersion<0, 1>; + type Upgrade = StaticVersion<0, 2>; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, + ]; + type Time = ViewNumber; + type BlockHeader = TestBlockHeader; + type BlockPayload = TestBlockPayload; + type SignatureKey = BLSPubKey; + type Transaction = TestTransaction; + type ValidatedState = TestValidatedState; + type InstanceState = TestInstanceState; + type Membership = + StaticCommitteeLeaderForTwoViews; + type BuilderSignatureKey = BuilderKey; +} + /// The Push CDN implementation #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct PushCdnImpl; diff --git a/hotshot/src/traits/election.rs b/hotshot/src/traits/election.rs index b3753bbc09..fdc277477d 100644 --- a/hotshot/src/traits/election.rs +++ b/hotshot/src/traits/election.rs @@ -2,3 +2,5 @@ /// static (round robin) committee election pub mod static_committee; +/// static (round robin leader for 2 consecutive views) committee election +pub mod static_committee_leader_two_views; diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs new file mode 100644 index 0000000000..8d3c0c78e7 --- /dev/null +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -0,0 +1,187 @@ +use std::{marker::PhantomData, num::NonZeroU64}; + +use ethereum_types::U256; +// use ark_bls12_381::Parameters as Param381; +use hotshot_types::traits::signature_key::StakeTableEntryType; +use hotshot_types::{ + signature_key::BLSPubKey, + traits::{election::Membership, node_implementation::NodeType, signature_key::SignatureKey}, + PeerConfig, +}; +use tracing::debug; + +/// Dummy implementation of [`Membership`] + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub struct StaticCommitteeLeaderForTwoViews { + /// All the nodes participating and their stake + all_nodes_with_stake: Vec, + /// The nodes on the static committee and their stake + committee_nodes_with_stake: Vec, + /// builder nodes + committee_nodes_without_stake: Vec, + /// the number of fixed leader for gpuvid + fixed_leader_for_gpuvid: usize, + /// Node type phantom + _type_phantom: PhantomData, +} + +/// static committee using a vrf kp +pub type StaticCommittee = StaticCommitteeLeaderForTwoViews; + +impl StaticCommitteeLeaderForTwoViews { + /// Creates a new dummy elector + #[must_use] + pub fn new( + _nodes: &[PUBKEY], + nodes_with_stake: Vec, + nodes_without_stake: Vec, + fixed_leader_for_gpuvid: usize, + ) -> Self { + Self { + all_nodes_with_stake: nodes_with_stake.clone(), + committee_nodes_with_stake: nodes_with_stake, + committee_nodes_without_stake: nodes_without_stake, + fixed_leader_for_gpuvid, + _type_phantom: PhantomData, + } + } +} + +impl Membership + for StaticCommitteeLeaderForTwoViews +where + TYPES: NodeType, +{ + /// Clone the public key and corresponding stake table for current elected committee + fn committee_qc_stake_table(&self) -> Vec { + self.committee_nodes_with_stake.clone() + } + + /// Index the vector of public keys with the current view number + fn leader(&self, view_number: TYPES::Time) -> PUBKEY { + // two connsecutive views will have same index starting with even number. + // eg 0->1, 2->3 ... 10->11 etc + let index = + usize::try_from((*view_number / 2) % self.all_nodes_with_stake.len() as u64).unwrap(); + let res = self.all_nodes_with_stake[index].clone(); + TYPES::SignatureKey::public_key(&res) + } + + fn has_stake(&self, pub_key: &PUBKEY) -> bool { + let entry = pub_key.stake_table_entry(1u64); + self.committee_nodes_with_stake.contains(&entry) + } + + fn stake( + &self, + pub_key: &::SignatureKey, + ) -> Option<::StakeTableEntry> { + let entry = pub_key.stake_table_entry(1u64); + if self.committee_nodes_with_stake.contains(&entry) { + Some(entry) + } else { + None + } + } + + fn create_election( + mut all_nodes: Vec>, + committee_members: Vec>, + fixed_leader_for_gpuvid: usize, + ) -> Self { + let mut committee_nodes_with_stake = Vec::new(); + let mut committee_nodes_without_stake = Vec::new(); + + // Iterate over committee members + for entry in committee_members + .iter() + .map(|entry| entry.stake_table_entry.clone()) + { + if entry.stake() > U256::from(0) { + // Positive stake + committee_nodes_with_stake.push(entry); + } else { + // Zero stake + committee_nodes_without_stake.push(PUBKEY::public_key(&entry)); + } + } + + // Retain all nodes with stake + all_nodes.retain(|entry| entry.stake_table_entry.stake() > U256::from(0)); + + debug!( + "Election Membership Size: {}", + committee_nodes_with_stake.len() + ); + + Self { + all_nodes_with_stake: all_nodes + .into_iter() + .map(|entry| entry.stake_table_entry) + .collect(), + committee_nodes_with_stake, + committee_nodes_without_stake, + fixed_leader_for_gpuvid, + _type_phantom: PhantomData, + } + } + + fn total_nodes(&self) -> usize { + self.committee_nodes_with_stake.len() + } + + fn success_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64 * 2) / 3) + 1).unwrap() + } + + fn failure_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64) / 3) + 1).unwrap() + } + + fn upgrade_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64 * 9) / 10) + 1).unwrap() + } + + fn staked_committee( + &self, + _view_number: ::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.committee_nodes_with_stake + .iter() + .map(|node| ::SignatureKey::public_key(node)) + .collect() + } + + fn non_staked_committee( + &self, + _view_number: ::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.committee_nodes_without_stake.iter().cloned().collect() + } + + fn whole_committee( + &self, + view_number: ::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + let mut committee = self.staked_committee(view_number); + committee.extend(self.non_staked_committee(view_number)); + committee + } +} + +impl StaticCommitteeLeaderForTwoViews +where + TYPES: NodeType, +{ + #[allow(clippy::must_use_candidate)] + /// get the non-staked builder nodes + pub fn non_staked_nodes_count(&self) -> usize { + self.committee_nodes_without_stake.len() + } + #[allow(clippy::must_use_candidate)] + /// get all the non-staked nodes + pub fn non_staked_nodes(&self) -> Vec { + self.committee_nodes_without_stake.clone() + } +} diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 1885bdaf3a..621ff66077 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -109,11 +109,37 @@ pub struct TransactionTaskState> { } impl> TransactionTaskState { + /// handle view change decide legacy or not + pub async fn handle_view_change( + &mut self, + event_stream: &Sender>>, + block_view: TYPES::Time, + ) -> Option { + let version = match version( + block_view, + &self.decided_upgrade_certificate.read().await.clone(), + ) { + Ok(v) => v, + Err(e) => { + tracing::error!("Failed to calculate version: {:?}", e); + return None; + } + }; + + if version < MarketplaceVersion::VERSION { + self.handle_view_change_legacy(event_stream, block_view) + .await + } else { + self.handle_view_change_marketplace(event_stream, block_view) + .await + } + } + /// legacy view change handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction task", level = "error", target = "TransactionTaskState")] pub async fn handle_view_change_legacy( &mut self, - event_stream: Sender>>, + event_stream: &Sender>>, block_view: TYPES::Time, ) -> Option { let version = match hotshot_types::simple_certificate::version( @@ -162,7 +188,7 @@ impl> TransactionTaskState> TransactionTaskState> TransactionTaskState>>, + event_stream: &Sender>>, block_view: TYPES::Time, ) -> Option { let version = match hotshot_types::simple_certificate::version( @@ -299,7 +325,7 @@ impl> TransactionTaskState> TransactionTaskState> TransactionTaskState v, - Err(e) => { - tracing::error!("Failed to calculate version: {:?}", e); - return None; - } - }; + if make_block { + self.handle_view_change(&event_stream, self.cur_view).await; + } - if version < MarketplaceVersion::VERSION { - self.handle_view_change_legacy(event_stream, block_view) - .await; - } else { - self.handle_view_change_marketplace(event_stream, block_view) - .await; + if next_leader { + self.handle_view_change(&event_stream, next_view).await; } } HotShotEvent::Shutdown => { diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 66cb0781d8..f96e6569bc 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -6,13 +6,16 @@ use bitvec::bitvec; use committable::Committable; use ethereum_types::U256; use hotshot::{ + traits::{NodeImplementation, TestableNodeImplementation}, types::{BLSPubKey, SignatureKey, SystemContextHandle}, HotShotInitializer, Memberships, SystemContext, }; use hotshot_example_types::{ + auction_results_provider_types::TestAuctionResultsProvider, block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, state_types::{TestInstanceState, TestValidatedState}, + storage_types::TestStorage, }; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ @@ -39,15 +42,21 @@ use crate::test_builder::TestDescription; /// create the [`SystemContextHandle`] from a node id /// # Panics /// if cannot create a [`HotShotInitializer`] -pub async fn build_system_handle( +pub async fn build_system_handle< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Storage = TestStorage, + AuctionResultsProvider = TestAuctionResultsProvider, + > + TestableNodeImplementation, +>( node_id: u64, ) -> ( - SystemContextHandle, - Sender>>, - Receiver>>, + SystemContextHandle, + Sender>>, + Receiver>>, ) { - let builder: TestDescription = - TestDescription::default_multiple_rounds(); + let builder: TestDescription = TestDescription::default_multiple_rounds(); let launcher = builder.gen_launcher(node_id); @@ -56,33 +65,33 @@ pub async fn build_system_handle( let auction_results_provider = (launcher.resource_generator.auction_results_provider)(node_id); let config = launcher.resource_generator.config.clone(); - let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}) + let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}) .await .unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = config.my_own_validator_config.private_key.clone(); - let public_key = config.my_own_validator_config.public_key; + let public_key = config.my_own_validator_config.public_key.clone(); let _known_nodes_without_stake = config.known_nodes_without_stake.clone(); let memberships = Memberships { - quorum_membership: ::Membership::create_election( + quorum_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), config.fixed_leader_for_gpuvid, ), - da_membership: ::Membership::create_election( + da_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), config.known_da_nodes.clone(), config.fixed_leader_for_gpuvid, ), - vid_membership: ::Membership::create_election( + vid_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), config.fixed_leader_for_gpuvid, ), - view_sync_membership: ::Membership::create_election( + view_sync_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake, config.fixed_leader_for_gpuvid, diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 5f0b3e9bec..4a7314ced5 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -50,7 +50,7 @@ async fn test_consensus_task() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -135,7 +135,7 @@ async fn test_consensus_vote() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -193,7 +193,7 @@ async fn test_view_sync_finalize_propose() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(4).await.0; + let handle = build_system_handle::(4).await.0; let (priv_key, pub_key) = key_pair_for_id(4); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -327,7 +327,7 @@ async fn test_view_sync_finalize_vote() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(5).await.0; + let handle = build_system_handle::(5).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -422,7 +422,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(5).await.0; + let handle = build_system_handle::(5).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -522,7 +522,7 @@ async fn test_vid_disperse_storage_failure() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; // Set the error flag here for the system handle. This causes it to emit an error on append. handle.storage().write().await.should_return_err = true; diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 1e9c772d45..e98711d5ed 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -32,7 +32,7 @@ async fn test_da_task() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -115,7 +115,7 @@ async fn test_da_task_storage_failure() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; // Set the error flag here for the system handle. This causes it to emit an error on append. handle.storage().write().await.should_return_err = true; diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 29be512551..8773aac6e2 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -40,7 +40,7 @@ async fn test_quorum_proposal_recv_task() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); let consensus = handle.hotshot.consensus(); @@ -128,7 +128,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(4).await.0; + let handle = build_system_handle::(4).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); let consensus = handle.hotshot.consensus(); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index b435d98fa3..96c11122f9 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -43,7 +43,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { async_compatibility_layer::logging::setup_backtrace(); let node_id = 1; - let handle = build_system_handle(node_id).await.0; + let handle = build_system_handle::(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -130,7 +130,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { async_compatibility_layer::logging::setup_backtrace(); let node_id = 3; - let handle = build_system_handle(node_id).await.0; + let handle = build_system_handle::(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -307,7 +307,7 @@ async fn test_quorum_proposal_task_qc_timeout() { async_compatibility_layer::logging::setup_backtrace(); let node_id = 3; - let handle = build_system_handle(node_id).await.0; + let handle = build_system_handle::(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -388,7 +388,7 @@ async fn test_quorum_proposal_task_view_sync() { async_compatibility_layer::logging::setup_backtrace(); let node_id = 2; - let handle = build_system_handle(node_id).await.0; + let handle = build_system_handle::(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -470,7 +470,7 @@ async fn test_quorum_proposal_task_liveness_check() { async_compatibility_layer::logging::setup_backtrace(); let node_id = 3; - let handle = build_system_handle(node_id).await.0; + let handle = build_system_handle::(node_id).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -640,7 +640,7 @@ async fn test_quorum_proposal_task_with_incomplete_events() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 015ecedd6a..b8b4680bcd 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -35,7 +35,7 @@ async fn test_quorum_vote_task_success() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -104,7 +104,7 @@ async fn test_quorum_vote_task_vote_now() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -152,7 +152,7 @@ async fn test_quorum_vote_task_miss_dependency() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -237,7 +237,7 @@ async fn test_quorum_vote_task_incorrect_dependency() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs new file mode 100644 index 0000000000..25b2f53942 --- /dev/null +++ b/testing/tests/tests_1/transaction_task.rs @@ -0,0 +1,59 @@ +use hotshot::tasks::task_state::CreateTaskState; +use hotshot_example_types::{ + node_types::{MemoryImpl, TestConsecutiveLeaderTypes}, + block_types::TestMetadata +}; +use hotshot_task_impls::{ + events::HotShotEvent, harness::run_harness, transactions::TransactionTaskState +}; +use hotshot_testing::helpers::build_system_handle; +use hotshot_types::{ + data::{ViewNumber, null_block, PackedBundle}, + traits::{node_implementation::ConsensusTime, election::Membership, block_contents::precompute_vid_commitment}, + constants::BaseVersion +}; +use vbs::version::StaticVersionType; + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_transaction_task_leader_two_views_in_a_row() { + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + // Build the API for node 2. + let node_id = 2; + let handle = build_system_handle::(node_id).await.0; + + let mut input = Vec::new(); + let mut output = Vec::new(); + + let current_view = ViewNumber::new(4); + input.push(HotShotEvent::ViewChange(current_view)); + input.push(HotShotEvent::Shutdown); + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + + let (_, precompute_data) = + precompute_vid_commitment(&[], quorum_membership.total_nodes()); + + // current view + let mut exp_packed_bundle = PackedBundle::new( + vec![].into(), + TestMetadata, + current_view, + vec1::vec1![null_block::builder_fee( + quorum_membership.total_nodes(), + BaseVersion::version() + ) + .unwrap()], + Some(precompute_data.clone()), + ); + output.push(HotShotEvent::BlockRecv(exp_packed_bundle.clone())); + + // next view + exp_packed_bundle.view_number = current_view + 1; + output.push(HotShotEvent::BlockRecv(exp_packed_bundle)); + + let transaction_state = TransactionTaskState::::create_from(&handle).await; + run_harness(input, output, transaction_state, false).await; +} \ No newline at end of file diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs index a247d837f1..958ccad63a 100644 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -40,7 +40,7 @@ async fn test_upgrade_task_vote() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(1).await.0; + let handle = build_system_handle::(1).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -197,7 +197,7 @@ async fn test_upgrade_task_propose() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(3).await.0; + let handle = build_system_handle::(3).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -355,7 +355,7 @@ async fn test_upgrade_task_blank_blocks() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(6).await.0; + let handle = build_system_handle::(6).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index de852f9824..3095c4ef25 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -52,7 +52,7 @@ async fn test_upgrade_task_with_proposal() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(3).await.0; + let handle = build_system_handle::(3).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 42b8013108..9ea15925d5 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -45,7 +45,7 @@ async fn test_upgrade_task_with_vote() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 13a4e0741e..7d6ad7bc08 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -36,7 +36,7 @@ async fn test_vid_task() { async_compatibility_layer::logging::setup_backtrace(); // Build the API for node 2. - let handle = build_system_handle(2).await.0; + let handle = build_system_handle::(2).await.0; let pub_key = handle.public_key(); // quorum membership for VID share distribution diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index 72c2cdfbea..42a4dd6a56 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -17,7 +17,7 @@ async fn test_view_sync_task() { async_compatibility_layer::logging::setup_backtrace(); // Build the API for node 5. - let handle = build_system_handle(5).await.0; + let handle = build_system_handle::(5).await.0; let vote_data = ViewSyncPreCommitData { relay: 0, From c6da740f5e2338a49596d8d5a1356bb11021a2b6 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 29 Jul 2024 09:35:39 -0400 Subject: [PATCH 1144/1393] Topic refactor (#3483) * topic refactor * remove topic map --- examples/infra/mod.rs | 12 ++-- hotshot/src/lib.rs | 2 +- hotshot/src/traits.rs | 2 +- .../src/traits/election/static_committee.rs | 13 ++++ .../src/traits/networking/combined_network.rs | 10 +-- .../src/traits/networking/libp2p_network.rs | 54 +++++---------- .../src/traits/networking/memory_network.rs | 68 +++++++++++++++---- .../src/traits/networking/push_cdn_network.rs | 15 +++- task-impls/src/network.rs | 3 +- testing/src/helpers.rs | 5 ++ testing/src/test_runner.rs | 6 +- testing/tests/tests_1/network_task.rs | 4 ++ testing/tests/tests_3/memory_network.rs | 29 +++----- types/src/traits/election.rs | 6 +- types/src/traits/network.rs | 23 ++++++- 15 files changed, 164 insertions(+), 88 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index c8ca5e626d..23350e43bf 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -21,8 +21,8 @@ use futures::StreamExt; use hotshot::{ traits::{ implementations::{ - derive_libp2p_peer_id, CdnMetricsValue, CombinedNetworks, Libp2pMetricsValue, - Libp2pNetwork, PushCdnNetwork, Topic, WrappedSignatureKey, + derive_libp2p_peer_id, CdnMetricsValue, CdnTopic, CombinedNetworks, Libp2pMetricsValue, + Libp2pNetwork, PushCdnNetwork, WrappedSignatureKey, }, BlockPayload, NodeImplementation, }, @@ -52,7 +52,7 @@ use hotshot_types::{ traits::{ block_contents::{BlockHeader, TestableBlock}, election::Membership, - network::ConnectedNetwork, + network::{ConnectedNetwork, Topic}, node_implementation::{ConsensusTime, NodeType}, states::TestableState, }, @@ -376,6 +376,7 @@ pub trait RunDa< let quorum_membership = ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.config.fixed_leader_for_gpuvid, ); @@ -384,6 +385,7 @@ pub trait RunDa< let da_membership = ::Membership::create_election( known_nodes_with_stake.clone(), config.config.known_da_nodes.clone(), + Topic::Da, config.config.fixed_leader_for_gpuvid, ); @@ -625,9 +627,9 @@ where }; // See if we should be DA, subscribe to the DA topic if so - let mut topics = vec![Topic::Global]; + let mut topics = vec![CdnTopic::Global]; if config.config.my_own_validator_config.is_da { - topics.push(Topic::Da); + topics.push(CdnTopic::Da); } // Create the network and await the initial connection diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index fa57e2a187..4e47c9d037 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -432,7 +432,7 @@ impl> SystemContext { api .network.broadcast_message( serialized_message, - da_membership.whole_committee(view_number), + da_membership.committee_topic(), BroadcastDelay::None, ), api diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index a70eca75a3..a4d6372a5f 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -18,7 +18,7 @@ pub mod implementations { }, memory_network::{MasterMap, MemoryNetwork}, push_cdn_network::{ - CdnMetricsValue, KeyPair, ProductionDef, PushCdnNetwork, TestingDef, Topic, + CdnMetricsValue, KeyPair, ProductionDef, PushCdnNetwork, TestingDef, Topic as CdnTopic, WrappedSignatureKey, }, }; diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 00f6f55b60..3163d3b532 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,6 +1,7 @@ use std::{marker::PhantomData, num::NonZeroU64}; use ethereum_types::U256; +use hotshot_types::traits::network::Topic; // use ark_bls12_381::Parameters as Param381; use hotshot_types::traits::signature_key::StakeTableEntryType; use hotshot_types::{ @@ -26,6 +27,9 @@ pub struct GeneralStaticCommittee { fixed_leader_for_gpuvid: usize, /// Node type phantom _type_phantom: PhantomData, + + /// The network topic of the committee + committee_topic: Topic, } /// static committee using a vrf kp @@ -39,6 +43,7 @@ impl GeneralStaticCommittee { nodes_with_stake: Vec, nodes_without_stake: Vec, fixed_leader_for_gpuvid: usize, + committee_topic: Topic, ) -> Self { Self { all_nodes_with_stake: nodes_with_stake.clone(), @@ -46,6 +51,7 @@ impl GeneralStaticCommittee { committee_nodes_without_stake: nodes_without_stake, fixed_leader_for_gpuvid, _type_phantom: PhantomData, + committee_topic, } } } @@ -60,6 +66,11 @@ where self.committee_nodes_with_stake.clone() } + /// Get the network topic for the committee + fn committee_topic(&self) -> Topic { + self.committee_topic.clone() + } + #[cfg(not(any( feature = "randomized-leader-election", feature = "fixed-leader-election" @@ -115,6 +126,7 @@ where fn create_election( mut all_nodes: Vec>, committee_members: Vec>, + committee_topic: Topic, fixed_leader_for_gpuvid: usize, ) -> Self { let mut committee_nodes_with_stake = Vec::new(); @@ -151,6 +163,7 @@ where committee_nodes_without_stake, fixed_leader_for_gpuvid, _type_phantom: PhantomData, + committee_topic, } } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 250d0a1757..7c9b590144 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -32,7 +32,7 @@ use hotshot_types::{ }, data::ViewNumber, traits::{ - network::{BroadcastDelay, ConnectedNetwork, ResponseChannel}, + network::{BroadcastDelay, ConnectedNetwork, ResponseChannel, Topic}, node_implementation::NodeType, }, BoxSyncFuture, @@ -372,24 +372,24 @@ impl ConnectedNetwork for CombinedNetworks async fn broadcast_message( &self, message: Vec, - recipients: BTreeSet, + topic: Topic, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { let primary = self.primary().clone(); let secondary = self.secondary().clone(); let primary_message = message.clone(); let secondary_message = message.clone(); - let primary_recipients = recipients.clone(); + let topic_clone = topic.clone(); self.send_both_networks( message, async move { primary - .broadcast_message(primary_message, primary_recipients, BroadcastDelay::None) + .broadcast_message(primary_message, topic_clone, BroadcastDelay::None) .await }, async move { secondary - .broadcast_message(secondary_message, recipients, BroadcastDelay::None) + .broadcast_message(secondary_message, topic, BroadcastDelay::None) .await }, broadcast_delay, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index c948202c47..6af0d9ba81 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -45,7 +45,7 @@ use hotshot_types::{ traits::{ election::Membership, metrics::{Counter, Gauge, Metrics, NoMetrics}, - network::{self, ConnectedNetwork, NetworkError, ResponseMessage}, + network::{self, ConnectedNetwork, NetworkError, ResponseMessage, Topic}, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, @@ -60,8 +60,8 @@ use libp2p_networking::{ behaviours::request_response::{Request, Response}, spawn_network_node, MeshParams, NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, - NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, - NetworkNodeReceiver, NetworkNodeType, DEFAULT_REPLICATION_FACTOR, + NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeReceiver, + NetworkNodeType, DEFAULT_REPLICATION_FACTOR, }, reexport::{Multiaddr, ResponseChannel}, }; @@ -166,10 +166,8 @@ struct Libp2pNetworkInner { is_bootstrapped: Arc, /// The Libp2p metrics we're managing metrics: Libp2pMetricsValue, - /// topic map - /// hash(hashset) -> topic - /// btreemap ordered so is hashable - topic_map: RwLock, String>>, + /// The list of topics we're subscribed to + subscribed_topics: HashSet, /// the latest view number (for node lookup purposes) /// NOTE: supposed to represent a ViewNumber but we /// haven't made that atomic yet and we prefer lock-free @@ -298,7 +296,6 @@ impl TestableNetworkingImplementation .unwrap() }; let bootstrap_addrs_ref = Arc::clone(&bootstrap_addrs); - let keys = all_keys.clone(); let da = da_keys.clone(); let reliability_config_dup = reliability_config.clone(); Box::pin(async move { @@ -309,10 +306,8 @@ impl TestableNetworkingImplementation pubkey.clone(), bootstrap_addrs_ref, usize::try_from(node_id).unwrap(), - keys, #[cfg(feature = "hotshot-testing")] reliability_config_dup, - da.clone(), da.contains(&pubkey), ) .await @@ -453,12 +448,8 @@ impl Libp2pNetwork { pub_key.clone(), Arc::new(RwLock::new(bootstrap_nodes)), usize::try_from(config.node_index)?, - // NOTE: this introduces an invariant that the keys are assigned using this indexed - // function - all_keys, #[cfg(feature = "hotshot-testing")] None, - da_keys.clone(), da_keys.contains(pub_key), ) .await?) @@ -499,10 +490,7 @@ impl Libp2pNetwork { pk: K, bootstrap_addrs: BootstrapAddrs, id: usize, - // HACK - quorum_public_keys: BTreeSet, #[cfg(feature = "hotshot-testing")] reliability_config: Option>, - da_public_keys: BTreeSet, is_da: bool, ) -> Result, NetworkError> { // Error if there were no bootstrap nodes specified @@ -528,11 +516,11 @@ impl Libp2pNetwork { let mut pubkey_pid_map = BiHashMap::new(); pubkey_pid_map.insert(pk.clone(), network_handle.peer_id()); - let mut topic_map = BiHashMap::new(); - topic_map.insert(quorum_public_keys, QC_TOPIC.to_string()); - topic_map.insert(da_public_keys, "DA".to_string()); - - let topic_map = RwLock::new(topic_map); + // Subscribe to the relevant topics + let mut subscribed_topics = HashSet::from_iter(vec![QC_TOPIC.to_string()]); + if is_da { + subscribed_topics.insert("DA".to_string()); + } // unbounded channels may not be the best choice (spammed?) // if bounded figure out a way to log dropped msgs @@ -556,7 +544,7 @@ impl Libp2pNetwork { dht_timeout: Duration::from_secs(120), is_bootstrapped: Arc::new(AtomicBool::new(false)), metrics, - topic_map, + subscribed_topics, node_lookup_send, // Start the latest view from 0. "Latest" refers to "most recent view we are polling for // proposals on". We need this because to have consensus info injected we need a working @@ -922,7 +910,7 @@ impl ConnectedNetwork for Libp2pNetwork { async fn broadcast_message( &self, message: Vec, - recipients: BTreeSet, + topic: Topic, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { // If we're not ready, return an error @@ -931,20 +919,10 @@ impl ConnectedNetwork for Libp2pNetwork { return Err(NetworkError::NotReady); }; - let topic_map = self.inner.topic_map.read().await; - let topic = topic_map - .get_by_left(&recipients) - .ok_or_else(|| { - self.inner.metrics.num_failed_messages.add(1); - NetworkError::Libp2p { - source: Box::new(NetworkNodeHandleError::NoSuchTopic), - } - })? - .clone(); - - // gossip doesn't broadcast from itself, so special case - if recipients.contains(&self.inner.pk) { - // send to self + // If we are subscribed to the topic, + let topic = topic.to_string(); + if self.inner.subscribed_topics.contains(&topic) { + // Short-circuit-send the message to ourselves self.inner.sender.send(message.clone()).await.map_err(|_| { self.inner.metrics.num_failed_messages.add(1); NetworkError::ShutDown diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index e0b95c0ce8..d1f6c22bfe 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -26,6 +26,7 @@ use hotshot_types::{ traits::{ network::{ AsyncGenerator, BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation, + Topic, }, node_implementation::NodeType, signature_key::SignatureKey, @@ -46,6 +47,10 @@ pub struct MasterMap { /// The list of `MemoryNetwork`s #[debug(skip)] map: DashMap>, + + /// The list of `MemoryNetwork`s aggregated by topic + subscribed_map: DashMap)>>, + /// The id of this `MemoryNetwork` cluster id: u64, } @@ -56,6 +61,7 @@ impl MasterMap { pub fn new() -> Arc> { Arc::new(MasterMap { map: DashMap::new(), + subscribed_map: DashMap::new(), id: rand::thread_rng().gen(), }) } @@ -102,8 +108,9 @@ impl Debug for MemoryNetwork { impl MemoryNetwork { /// Creates a new `MemoryNetwork` and hooks it up to the group through the provided `MasterMap` pub fn new( - pub_key: K, + pub_key: &K, master_map: &Arc>, + subscribed_topics: &[Topic], reliability_config: Option>, ) -> MemoryNetwork { info!("Attaching new MemoryNetwork"); @@ -142,8 +149,16 @@ impl MemoryNetwork { reliability_config, }), }; - master_map.map.insert(pub_key, mn.clone()); - trace!("Master map updated"); + // Insert our public key into the master map + master_map.map.insert(pub_key.clone(), mn.clone()); + // Insert our subscribed topics into the master map + for topic in subscribed_topics { + master_map + .subscribed_map + .entry(topic.clone()) + .or_default() + .push((pub_key.clone(), mn.clone())); + } mn } @@ -169,7 +184,7 @@ impl TestableNetworkingImplementation _expected_node_count: usize, _num_bootstrap: usize, _network_id: usize, - _da_committee_size: usize, + da_committee_size: usize, _is_da: bool, reliability_config: Option>, _secondary_network_delay: Duration, @@ -179,7 +194,22 @@ impl TestableNetworkingImplementation Box::pin(move |node_id| { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); - let net = MemoryNetwork::new(pubkey, &master, reliability_config.clone()); + + // Subscribe to topics based on our index + let subscribed_topics = if node_id < da_committee_size as u64 { + // DA node + vec![Topic::Da, Topic::Global] + } else { + // Non-DA node + vec![Topic::Global] + }; + + let net = MemoryNetwork::new( + &pubkey, + &master, + &subscribed_topics, + reliability_config.clone(), + ); Box::pin(async move { net.into() }) }) } @@ -219,16 +249,20 @@ impl ConnectedNetwork for MemoryNetwork { async fn broadcast_message( &self, message: Vec, - recipients: BTreeSet, + topic: Topic, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { trace!(?message, "Broadcasting message"); - for node in &self.inner.master_map.map { + for node in self + .inner + .master_map + .subscribed_map + .entry(topic) + .or_default() + .iter() + { // TODO delay/drop etc here - let (key, node) = node.pair(); - if !recipients.contains(key) { - continue; - } + let (key, node) = node; trace!(?key, "Sending message to node"); if let Some(ref config) = &self.inner.reliability_config { { @@ -268,7 +302,17 @@ impl ConnectedNetwork for MemoryNetwork { recipients: BTreeSet, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - self.broadcast_message(message, recipients, broadcast_delay) + // Iterate over all topics, compare to recipients, and get the `Topic` + let topic = self + .inner + .master_map + .subscribed_map + .iter() + .find(|v| v.value().iter().all(|(k, _)| recipients.contains(k))) + .map(|v| v.key().clone()) + .ok_or(NetworkError::NotFound)?; + + self.broadcast_message(message, topic, broadcast_delay) .await } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 52a62e0b58..9cb0e2b142 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -36,7 +36,7 @@ use hotshot_types::{ data::ViewNumber, traits::{ metrics::{Counter, Metrics, NoMetrics}, - network::{BroadcastDelay, ConnectedNetwork, PushCdnNetworkError}, + network::{BroadcastDelay, ConnectedNetwork, PushCdnNetworkError, Topic as HotShotTopic}, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -457,10 +457,10 @@ impl ConnectedNetwork for PushCdnNetwork, - _recipients: BTreeSet, + topic: HotShotTopic, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - self.broadcast_message(message, Topic::Global) + self.broadcast_message(message, topic.into()) .await .map_err(|e| { self.metrics.num_failed_messages.add(1); @@ -565,3 +565,12 @@ impl ConnectedNetwork for PushCdnNetwork for Topic { + fn from(topic: HotShotTopic) -> Self { + match topic { + HotShotTopic::Global => Topic::Global, + HotShotTopic::Da => Topic::Da, + } + } +} diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 3573424af2..26ea2a9249 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -419,6 +419,7 @@ impl< }; let view = message.kind.view_number(); let committee = membership.whole_committee(view); + let committee_topic = membership.committee_topic(); let net = Arc::clone(&self.channel); let storage = Arc::clone(&self.storage); let decided_upgrade_certificate = self.decided_upgrade_certificate.clone(); @@ -455,7 +456,7 @@ impl< net.direct_message(serialized_message, recipient).await } TransmitType::Broadcast => { - net.broadcast_message(serialized_message, committee, broadcast_delay) + net.broadcast_message(serialized_message, committee_topic, broadcast_delay) .await } TransmitType::DaCommitteeBroadcast => { diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index f96e6569bc..ac6730b93a 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -28,6 +28,7 @@ use hotshot_types::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, + network::Topic, node_implementation::{ConsensusTime, NodeType}, }, utils::{View, ViewInner}, @@ -79,21 +80,25 @@ pub async fn build_system_handle< quorum_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.fixed_leader_for_gpuvid, ), da_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), config.known_da_nodes.clone(), + Topic::Da, config.fixed_leader_for_gpuvid, ), vid_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.fixed_leader_for_gpuvid, ), view_sync_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake, + Topic::Global, config.fixed_leader_for_gpuvid, ), }; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 519621e9d1..76ccbc8625 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -28,7 +28,7 @@ use hotshot_types::{ simple_certificate::QuorumCertificate, traits::{ election::Membership, - network::ConnectedNetwork, + network::{ConnectedNetwork, Topic}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, HotShotConfig, ValidatorConfig, @@ -392,21 +392,25 @@ where quorum_membership: ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.fixed_leader_for_gpuvid, ), da_membership: ::Membership::create_election( known_nodes_with_stake.clone(), config.known_da_nodes.clone(), + Topic::Da, config.fixed_leader_for_gpuvid, ), vid_membership: ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.fixed_leader_for_gpuvid, ), view_sync_membership: ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.fixed_leader_for_gpuvid, ), }; diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 517ca6cf5d..d362e4f991 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -30,6 +30,7 @@ use hotshot_types::{ #[allow(clippy::too_many_lines)] async fn test_network_task() { use futures::StreamExt; + use hotshot_types::traits::network::Topic; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -50,6 +51,7 @@ async fn test_network_task() { let membership = ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake, + Topic::Global, config.fixed_leader_for_gpuvid, ); let network_state: NetworkEventTaskState, _> = @@ -101,6 +103,7 @@ async fn test_network_task() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_network_storage_fail() { use futures::StreamExt; + use hotshot_types::traits::network::Topic; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -122,6 +125,7 @@ async fn test_network_storage_fail() { let membership = ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake, + Topic::Global, config.fixed_leader_for_gpuvid, ); let network_state: NetworkEventTaskState, _> = diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index a392efb25c..591f7a5147 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -1,5 +1,5 @@ #![allow(clippy::panic)] -use std::{collections::BTreeSet, sync::Arc}; +use std::sync::Arc; use async_compatibility_layer::logging::setup_logging; use hotshot::{ @@ -21,7 +21,7 @@ use hotshot_types::{ message::{DataMessage, Message, MessageKind, VersionedMessage}, signature_key::{BLSPubKey, BuilderKey}, traits::{ - network::{BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation}, + network::{BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation, Topic}, node_implementation::{ConsensusTime, NodeType}, }, }; @@ -153,10 +153,10 @@ async fn memory_network_direct_queue() { trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); + let network1 = MemoryNetwork::new(&pub_key_1, &group.clone(), &[Topic::Global], Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); + let network2 = MemoryNetwork::new(&pub_key_2, &group, &[Topic::Global], Option::None); let first_messages: Vec> = gen_messages(5, 100, pub_key_1); @@ -204,14 +204,12 @@ async fn memory_network_direct_queue() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn memory_network_broadcast_queue() { - setup_logging(); // Make and connect the networking instances let group: Arc::SignatureKey>> = MasterMap::new(); - trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); + let network1 = MemoryNetwork::new(&pub_key_1, &group.clone(), &[Topic::Global], Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); + let network2 = MemoryNetwork::new(&pub_key_2, &group, &[Topic::Da], Option::None); let first_messages: Vec> = gen_messages(5, 100, pub_key_1); @@ -220,11 +218,7 @@ async fn memory_network_broadcast_queue() { for sent_message in first_messages { let serialized_message = VersionedMessage::serialize(&sent_message, &None).unwrap(); network1 - .broadcast_message( - serialized_message.clone(), - vec![pub_key_2].into_iter().collect::>(), - BroadcastDelay::None, - ) + .broadcast_message(serialized_message.clone(), Topic::Da, BroadcastDelay::None) .await .expect("Failed to message node"); let mut recv_messages = network2 @@ -246,7 +240,7 @@ async fn memory_network_broadcast_queue() { network2 .broadcast_message( serialized_message.clone(), - vec![pub_key_1].into_iter().collect::>(), + Topic::Global, BroadcastDelay::None, ) .await @@ -272,13 +266,12 @@ async fn memory_network_test_in_flight_message_count() { let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); + let network1 = MemoryNetwork::new(&pub_key_1, &group.clone(), &[Topic::Global], Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); + let network2 = MemoryNetwork::new(&pub_key_2, &group, &[Topic::Global], Option::None); // Create some dummy messages let messages: Vec> = gen_messages(5, 100, pub_key_1); - let broadcast_recipients = BTreeSet::from([pub_key_1, pub_key_2]); assert_eq!( TestableNetworkingImplementation::::in_flight_message_count(&network1), @@ -305,7 +298,7 @@ async fn memory_network_test_in_flight_message_count() { network2 .broadcast_message( serialized_message.clone(), - broadcast_recipients.clone(), + Topic::Global, BroadcastDelay::None, ) .await diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index c192e94680..9a7012bb00 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -7,7 +7,7 @@ use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; use snafu::Snafu; -use super::node_implementation::NodeType; +use super::{network::Topic, node_implementation::NodeType}; use crate::{traits::signature_key::SignatureKey, PeerConfig}; /// Error for election problems @@ -30,6 +30,7 @@ pub trait Membership: fn create_election( all_nodes: Vec>, committee_members: Vec>, + committee_topic: Topic, fixed_leader_for_gpuvid: usize, ) -> Self; @@ -50,6 +51,9 @@ pub trait Membership: /// Get whole (staked + non-staked) committee for view `view_number`. fn whole_committee(&self, view_number: TYPES::Time) -> BTreeSet; + /// Get the network topic for the committee + fn committee_topic(&self) -> Topic; + /// Check if a key has stake fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 7fecd87c9c..e602c5598f 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -18,7 +18,7 @@ use tokio::time::error::Elapsed as TimeoutError; compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use std::{ collections::{BTreeSet, HashMap}, - fmt::Debug, + fmt::{Debug, Display}, hash::Hash, pin::Pin, sync::Arc, @@ -274,7 +274,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st async fn broadcast_message( &self, message: Vec, - recipients: BTreeSet, + topic: Topic, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError>; @@ -674,3 +674,22 @@ impl NetworkReliability for ChaosNetwork { Uniform::new_inclusive(self.repeat_low, self.repeat_high).sample(&mut rand::thread_rng()) } } + +/// Used when broadcasting messages +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum Topic { + /// The `Global` topic goes out to all nodes + Global, + /// The `Da` topic goes out to only the DA committee + Da, +} + +/// Libp2p topics require a string, so we need to convert our enum to a string +impl Display for Topic { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Topic::Global => write!(f, "global"), + Topic::Da => write!(f, "DA"), + } + } +} From 132b00b1491bca3f4378f10796caad8573e23d06 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 29 Jul 2024 09:55:04 -0400 Subject: [PATCH 1145/1393] revert topic refactor to fix merge (#3503) --- examples/infra/mod.rs | 12 ++-- hotshot/src/lib.rs | 2 +- hotshot/src/traits.rs | 2 +- .../src/traits/election/static_committee.rs | 13 ---- .../src/traits/networking/combined_network.rs | 10 +-- .../src/traits/networking/libp2p_network.rs | 54 ++++++++++----- .../src/traits/networking/memory_network.rs | 68 ++++--------------- .../src/traits/networking/push_cdn_network.rs | 15 +--- task-impls/src/network.rs | 3 +- testing/src/helpers.rs | 5 -- testing/src/test_runner.rs | 6 +- testing/tests/tests_1/network_task.rs | 4 -- testing/tests/tests_3/memory_network.rs | 29 +++++--- types/src/traits/election.rs | 6 +- types/src/traits/network.rs | 23 +------ 15 files changed, 88 insertions(+), 164 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 23350e43bf..c8ca5e626d 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -21,8 +21,8 @@ use futures::StreamExt; use hotshot::{ traits::{ implementations::{ - derive_libp2p_peer_id, CdnMetricsValue, CdnTopic, CombinedNetworks, Libp2pMetricsValue, - Libp2pNetwork, PushCdnNetwork, WrappedSignatureKey, + derive_libp2p_peer_id, CdnMetricsValue, CombinedNetworks, Libp2pMetricsValue, + Libp2pNetwork, PushCdnNetwork, Topic, WrappedSignatureKey, }, BlockPayload, NodeImplementation, }, @@ -52,7 +52,7 @@ use hotshot_types::{ traits::{ block_contents::{BlockHeader, TestableBlock}, election::Membership, - network::{ConnectedNetwork, Topic}, + network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeType}, states::TestableState, }, @@ -376,7 +376,6 @@ pub trait RunDa< let quorum_membership = ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), - Topic::Global, config.config.fixed_leader_for_gpuvid, ); @@ -385,7 +384,6 @@ pub trait RunDa< let da_membership = ::Membership::create_election( known_nodes_with_stake.clone(), config.config.known_da_nodes.clone(), - Topic::Da, config.config.fixed_leader_for_gpuvid, ); @@ -627,9 +625,9 @@ where }; // See if we should be DA, subscribe to the DA topic if so - let mut topics = vec![CdnTopic::Global]; + let mut topics = vec![Topic::Global]; if config.config.my_own_validator_config.is_da { - topics.push(CdnTopic::Da); + topics.push(Topic::Da); } // Create the network and await the initial connection diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 4e47c9d037..fa57e2a187 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -432,7 +432,7 @@ impl> SystemContext { api .network.broadcast_message( serialized_message, - da_membership.committee_topic(), + da_membership.whole_committee(view_number), BroadcastDelay::None, ), api diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index a4d6372a5f..a70eca75a3 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -18,7 +18,7 @@ pub mod implementations { }, memory_network::{MasterMap, MemoryNetwork}, push_cdn_network::{ - CdnMetricsValue, KeyPair, ProductionDef, PushCdnNetwork, TestingDef, Topic as CdnTopic, + CdnMetricsValue, KeyPair, ProductionDef, PushCdnNetwork, TestingDef, Topic, WrappedSignatureKey, }, }; diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 3163d3b532..00f6f55b60 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,7 +1,6 @@ use std::{marker::PhantomData, num::NonZeroU64}; use ethereum_types::U256; -use hotshot_types::traits::network::Topic; // use ark_bls12_381::Parameters as Param381; use hotshot_types::traits::signature_key::StakeTableEntryType; use hotshot_types::{ @@ -27,9 +26,6 @@ pub struct GeneralStaticCommittee { fixed_leader_for_gpuvid: usize, /// Node type phantom _type_phantom: PhantomData, - - /// The network topic of the committee - committee_topic: Topic, } /// static committee using a vrf kp @@ -43,7 +39,6 @@ impl GeneralStaticCommittee { nodes_with_stake: Vec, nodes_without_stake: Vec, fixed_leader_for_gpuvid: usize, - committee_topic: Topic, ) -> Self { Self { all_nodes_with_stake: nodes_with_stake.clone(), @@ -51,7 +46,6 @@ impl GeneralStaticCommittee { committee_nodes_without_stake: nodes_without_stake, fixed_leader_for_gpuvid, _type_phantom: PhantomData, - committee_topic, } } } @@ -66,11 +60,6 @@ where self.committee_nodes_with_stake.clone() } - /// Get the network topic for the committee - fn committee_topic(&self) -> Topic { - self.committee_topic.clone() - } - #[cfg(not(any( feature = "randomized-leader-election", feature = "fixed-leader-election" @@ -126,7 +115,6 @@ where fn create_election( mut all_nodes: Vec>, committee_members: Vec>, - committee_topic: Topic, fixed_leader_for_gpuvid: usize, ) -> Self { let mut committee_nodes_with_stake = Vec::new(); @@ -163,7 +151,6 @@ where committee_nodes_without_stake, fixed_leader_for_gpuvid, _type_phantom: PhantomData, - committee_topic, } } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 7c9b590144..250d0a1757 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -32,7 +32,7 @@ use hotshot_types::{ }, data::ViewNumber, traits::{ - network::{BroadcastDelay, ConnectedNetwork, ResponseChannel, Topic}, + network::{BroadcastDelay, ConnectedNetwork, ResponseChannel}, node_implementation::NodeType, }, BoxSyncFuture, @@ -372,24 +372,24 @@ impl ConnectedNetwork for CombinedNetworks async fn broadcast_message( &self, message: Vec, - topic: Topic, + recipients: BTreeSet, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { let primary = self.primary().clone(); let secondary = self.secondary().clone(); let primary_message = message.clone(); let secondary_message = message.clone(); - let topic_clone = topic.clone(); + let primary_recipients = recipients.clone(); self.send_both_networks( message, async move { primary - .broadcast_message(primary_message, topic_clone, BroadcastDelay::None) + .broadcast_message(primary_message, primary_recipients, BroadcastDelay::None) .await }, async move { secondary - .broadcast_message(secondary_message, topic, BroadcastDelay::None) + .broadcast_message(secondary_message, recipients, BroadcastDelay::None) .await }, broadcast_delay, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 6af0d9ba81..c948202c47 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -45,7 +45,7 @@ use hotshot_types::{ traits::{ election::Membership, metrics::{Counter, Gauge, Metrics, NoMetrics}, - network::{self, ConnectedNetwork, NetworkError, ResponseMessage, Topic}, + network::{self, ConnectedNetwork, NetworkError, ResponseMessage}, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, @@ -60,8 +60,8 @@ use libp2p_networking::{ behaviours::request_response::{Request, Response}, spawn_network_node, MeshParams, NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, - NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeReceiver, - NetworkNodeType, DEFAULT_REPLICATION_FACTOR, + NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, + NetworkNodeReceiver, NetworkNodeType, DEFAULT_REPLICATION_FACTOR, }, reexport::{Multiaddr, ResponseChannel}, }; @@ -166,8 +166,10 @@ struct Libp2pNetworkInner { is_bootstrapped: Arc, /// The Libp2p metrics we're managing metrics: Libp2pMetricsValue, - /// The list of topics we're subscribed to - subscribed_topics: HashSet, + /// topic map + /// hash(hashset) -> topic + /// btreemap ordered so is hashable + topic_map: RwLock, String>>, /// the latest view number (for node lookup purposes) /// NOTE: supposed to represent a ViewNumber but we /// haven't made that atomic yet and we prefer lock-free @@ -296,6 +298,7 @@ impl TestableNetworkingImplementation .unwrap() }; let bootstrap_addrs_ref = Arc::clone(&bootstrap_addrs); + let keys = all_keys.clone(); let da = da_keys.clone(); let reliability_config_dup = reliability_config.clone(); Box::pin(async move { @@ -306,8 +309,10 @@ impl TestableNetworkingImplementation pubkey.clone(), bootstrap_addrs_ref, usize::try_from(node_id).unwrap(), + keys, #[cfg(feature = "hotshot-testing")] reliability_config_dup, + da.clone(), da.contains(&pubkey), ) .await @@ -448,8 +453,12 @@ impl Libp2pNetwork { pub_key.clone(), Arc::new(RwLock::new(bootstrap_nodes)), usize::try_from(config.node_index)?, + // NOTE: this introduces an invariant that the keys are assigned using this indexed + // function + all_keys, #[cfg(feature = "hotshot-testing")] None, + da_keys.clone(), da_keys.contains(pub_key), ) .await?) @@ -490,7 +499,10 @@ impl Libp2pNetwork { pk: K, bootstrap_addrs: BootstrapAddrs, id: usize, + // HACK + quorum_public_keys: BTreeSet, #[cfg(feature = "hotshot-testing")] reliability_config: Option>, + da_public_keys: BTreeSet, is_da: bool, ) -> Result, NetworkError> { // Error if there were no bootstrap nodes specified @@ -516,11 +528,11 @@ impl Libp2pNetwork { let mut pubkey_pid_map = BiHashMap::new(); pubkey_pid_map.insert(pk.clone(), network_handle.peer_id()); - // Subscribe to the relevant topics - let mut subscribed_topics = HashSet::from_iter(vec![QC_TOPIC.to_string()]); - if is_da { - subscribed_topics.insert("DA".to_string()); - } + let mut topic_map = BiHashMap::new(); + topic_map.insert(quorum_public_keys, QC_TOPIC.to_string()); + topic_map.insert(da_public_keys, "DA".to_string()); + + let topic_map = RwLock::new(topic_map); // unbounded channels may not be the best choice (spammed?) // if bounded figure out a way to log dropped msgs @@ -544,7 +556,7 @@ impl Libp2pNetwork { dht_timeout: Duration::from_secs(120), is_bootstrapped: Arc::new(AtomicBool::new(false)), metrics, - subscribed_topics, + topic_map, node_lookup_send, // Start the latest view from 0. "Latest" refers to "most recent view we are polling for // proposals on". We need this because to have consensus info injected we need a working @@ -910,7 +922,7 @@ impl ConnectedNetwork for Libp2pNetwork { async fn broadcast_message( &self, message: Vec, - topic: Topic, + recipients: BTreeSet, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { // If we're not ready, return an error @@ -919,10 +931,20 @@ impl ConnectedNetwork for Libp2pNetwork { return Err(NetworkError::NotReady); }; - // If we are subscribed to the topic, - let topic = topic.to_string(); - if self.inner.subscribed_topics.contains(&topic) { - // Short-circuit-send the message to ourselves + let topic_map = self.inner.topic_map.read().await; + let topic = topic_map + .get_by_left(&recipients) + .ok_or_else(|| { + self.inner.metrics.num_failed_messages.add(1); + NetworkError::Libp2p { + source: Box::new(NetworkNodeHandleError::NoSuchTopic), + } + })? + .clone(); + + // gossip doesn't broadcast from itself, so special case + if recipients.contains(&self.inner.pk) { + // send to self self.inner.sender.send(message.clone()).await.map_err(|_| { self.inner.metrics.num_failed_messages.add(1); NetworkError::ShutDown diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index d1f6c22bfe..e0b95c0ce8 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -26,7 +26,6 @@ use hotshot_types::{ traits::{ network::{ AsyncGenerator, BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation, - Topic, }, node_implementation::NodeType, signature_key::SignatureKey, @@ -47,10 +46,6 @@ pub struct MasterMap { /// The list of `MemoryNetwork`s #[debug(skip)] map: DashMap>, - - /// The list of `MemoryNetwork`s aggregated by topic - subscribed_map: DashMap)>>, - /// The id of this `MemoryNetwork` cluster id: u64, } @@ -61,7 +56,6 @@ impl MasterMap { pub fn new() -> Arc> { Arc::new(MasterMap { map: DashMap::new(), - subscribed_map: DashMap::new(), id: rand::thread_rng().gen(), }) } @@ -108,9 +102,8 @@ impl Debug for MemoryNetwork { impl MemoryNetwork { /// Creates a new `MemoryNetwork` and hooks it up to the group through the provided `MasterMap` pub fn new( - pub_key: &K, + pub_key: K, master_map: &Arc>, - subscribed_topics: &[Topic], reliability_config: Option>, ) -> MemoryNetwork { info!("Attaching new MemoryNetwork"); @@ -149,16 +142,8 @@ impl MemoryNetwork { reliability_config, }), }; - // Insert our public key into the master map - master_map.map.insert(pub_key.clone(), mn.clone()); - // Insert our subscribed topics into the master map - for topic in subscribed_topics { - master_map - .subscribed_map - .entry(topic.clone()) - .or_default() - .push((pub_key.clone(), mn.clone())); - } + master_map.map.insert(pub_key, mn.clone()); + trace!("Master map updated"); mn } @@ -184,7 +169,7 @@ impl TestableNetworkingImplementation _expected_node_count: usize, _num_bootstrap: usize, _network_id: usize, - da_committee_size: usize, + _da_committee_size: usize, _is_da: bool, reliability_config: Option>, _secondary_network_delay: Duration, @@ -194,22 +179,7 @@ impl TestableNetworkingImplementation Box::pin(move |node_id| { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); - - // Subscribe to topics based on our index - let subscribed_topics = if node_id < da_committee_size as u64 { - // DA node - vec![Topic::Da, Topic::Global] - } else { - // Non-DA node - vec![Topic::Global] - }; - - let net = MemoryNetwork::new( - &pubkey, - &master, - &subscribed_topics, - reliability_config.clone(), - ); + let net = MemoryNetwork::new(pubkey, &master, reliability_config.clone()); Box::pin(async move { net.into() }) }) } @@ -249,20 +219,16 @@ impl ConnectedNetwork for MemoryNetwork { async fn broadcast_message( &self, message: Vec, - topic: Topic, + recipients: BTreeSet, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { trace!(?message, "Broadcasting message"); - for node in self - .inner - .master_map - .subscribed_map - .entry(topic) - .or_default() - .iter() - { + for node in &self.inner.master_map.map { // TODO delay/drop etc here - let (key, node) = node; + let (key, node) = node.pair(); + if !recipients.contains(key) { + continue; + } trace!(?key, "Sending message to node"); if let Some(ref config) = &self.inner.reliability_config { { @@ -302,17 +268,7 @@ impl ConnectedNetwork for MemoryNetwork { recipients: BTreeSet, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - // Iterate over all topics, compare to recipients, and get the `Topic` - let topic = self - .inner - .master_map - .subscribed_map - .iter() - .find(|v| v.value().iter().all(|(k, _)| recipients.contains(k))) - .map(|v| v.key().clone()) - .ok_or(NetworkError::NotFound)?; - - self.broadcast_message(message, topic, broadcast_delay) + self.broadcast_message(message, recipients, broadcast_delay) .await } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 9cb0e2b142..52a62e0b58 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -36,7 +36,7 @@ use hotshot_types::{ data::ViewNumber, traits::{ metrics::{Counter, Metrics, NoMetrics}, - network::{BroadcastDelay, ConnectedNetwork, PushCdnNetworkError, Topic as HotShotTopic}, + network::{BroadcastDelay, ConnectedNetwork, PushCdnNetworkError}, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -457,10 +457,10 @@ impl ConnectedNetwork for PushCdnNetwork, - topic: HotShotTopic, + _recipients: BTreeSet, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - self.broadcast_message(message, topic.into()) + self.broadcast_message(message, Topic::Global) .await .map_err(|e| { self.metrics.num_failed_messages.add(1); @@ -565,12 +565,3 @@ impl ConnectedNetwork for PushCdnNetwork for Topic { - fn from(topic: HotShotTopic) -> Self { - match topic { - HotShotTopic::Global => Topic::Global, - HotShotTopic::Da => Topic::Da, - } - } -} diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 26ea2a9249..3573424af2 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -419,7 +419,6 @@ impl< }; let view = message.kind.view_number(); let committee = membership.whole_committee(view); - let committee_topic = membership.committee_topic(); let net = Arc::clone(&self.channel); let storage = Arc::clone(&self.storage); let decided_upgrade_certificate = self.decided_upgrade_certificate.clone(); @@ -456,7 +455,7 @@ impl< net.direct_message(serialized_message, recipient).await } TransmitType::Broadcast => { - net.broadcast_message(serialized_message, committee_topic, broadcast_delay) + net.broadcast_message(serialized_message, committee, broadcast_delay) .await } TransmitType::DaCommitteeBroadcast => { diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index ac6730b93a..f96e6569bc 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -28,7 +28,6 @@ use hotshot_types::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, - network::Topic, node_implementation::{ConsensusTime, NodeType}, }, utils::{View, ViewInner}, @@ -80,25 +79,21 @@ pub async fn build_system_handle< quorum_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), - Topic::Global, config.fixed_leader_for_gpuvid, ), da_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), config.known_da_nodes.clone(), - Topic::Da, config.fixed_leader_for_gpuvid, ), vid_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), - Topic::Global, config.fixed_leader_for_gpuvid, ), view_sync_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake, - Topic::Global, config.fixed_leader_for_gpuvid, ), }; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 76ccbc8625..519621e9d1 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -28,7 +28,7 @@ use hotshot_types::{ simple_certificate::QuorumCertificate, traits::{ election::Membership, - network::{ConnectedNetwork, Topic}, + network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, HotShotConfig, ValidatorConfig, @@ -392,25 +392,21 @@ where quorum_membership: ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), - Topic::Global, config.fixed_leader_for_gpuvid, ), da_membership: ::Membership::create_election( known_nodes_with_stake.clone(), config.known_da_nodes.clone(), - Topic::Da, config.fixed_leader_for_gpuvid, ), vid_membership: ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), - Topic::Global, config.fixed_leader_for_gpuvid, ), view_sync_membership: ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), - Topic::Global, config.fixed_leader_for_gpuvid, ), }; diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index d362e4f991..517ca6cf5d 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -30,7 +30,6 @@ use hotshot_types::{ #[allow(clippy::too_many_lines)] async fn test_network_task() { use futures::StreamExt; - use hotshot_types::traits::network::Topic; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -51,7 +50,6 @@ async fn test_network_task() { let membership = ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake, - Topic::Global, config.fixed_leader_for_gpuvid, ); let network_state: NetworkEventTaskState, _> = @@ -103,7 +101,6 @@ async fn test_network_task() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_network_storage_fail() { use futures::StreamExt; - use hotshot_types::traits::network::Topic; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -125,7 +122,6 @@ async fn test_network_storage_fail() { let membership = ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake, - Topic::Global, config.fixed_leader_for_gpuvid, ); let network_state: NetworkEventTaskState, _> = diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 591f7a5147..a392efb25c 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -1,5 +1,5 @@ #![allow(clippy::panic)] -use std::sync::Arc; +use std::{collections::BTreeSet, sync::Arc}; use async_compatibility_layer::logging::setup_logging; use hotshot::{ @@ -21,7 +21,7 @@ use hotshot_types::{ message::{DataMessage, Message, MessageKind, VersionedMessage}, signature_key::{BLSPubKey, BuilderKey}, traits::{ - network::{BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation, Topic}, + network::{BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation}, node_implementation::{ConsensusTime, NodeType}, }, }; @@ -153,10 +153,10 @@ async fn memory_network_direct_queue() { trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new(&pub_key_1, &group.clone(), &[Topic::Global], Option::None); + let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new(&pub_key_2, &group, &[Topic::Global], Option::None); + let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); let first_messages: Vec> = gen_messages(5, 100, pub_key_1); @@ -204,12 +204,14 @@ async fn memory_network_direct_queue() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn memory_network_broadcast_queue() { + setup_logging(); // Make and connect the networking instances let group: Arc::SignatureKey>> = MasterMap::new(); + trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new(&pub_key_1, &group.clone(), &[Topic::Global], Option::None); + let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new(&pub_key_2, &group, &[Topic::Da], Option::None); + let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); let first_messages: Vec> = gen_messages(5, 100, pub_key_1); @@ -218,7 +220,11 @@ async fn memory_network_broadcast_queue() { for sent_message in first_messages { let serialized_message = VersionedMessage::serialize(&sent_message, &None).unwrap(); network1 - .broadcast_message(serialized_message.clone(), Topic::Da, BroadcastDelay::None) + .broadcast_message( + serialized_message.clone(), + vec![pub_key_2].into_iter().collect::>(), + BroadcastDelay::None, + ) .await .expect("Failed to message node"); let mut recv_messages = network2 @@ -240,7 +246,7 @@ async fn memory_network_broadcast_queue() { network2 .broadcast_message( serialized_message.clone(), - Topic::Global, + vec![pub_key_1].into_iter().collect::>(), BroadcastDelay::None, ) .await @@ -266,12 +272,13 @@ async fn memory_network_test_in_flight_message_count() { let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new(&pub_key_1, &group.clone(), &[Topic::Global], Option::None); + let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new(&pub_key_2, &group, &[Topic::Global], Option::None); + let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); // Create some dummy messages let messages: Vec> = gen_messages(5, 100, pub_key_1); + let broadcast_recipients = BTreeSet::from([pub_key_1, pub_key_2]); assert_eq!( TestableNetworkingImplementation::::in_flight_message_count(&network1), @@ -298,7 +305,7 @@ async fn memory_network_test_in_flight_message_count() { network2 .broadcast_message( serialized_message.clone(), - Topic::Global, + broadcast_recipients.clone(), BroadcastDelay::None, ) .await diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 9a7012bb00..c192e94680 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -7,7 +7,7 @@ use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; use snafu::Snafu; -use super::{network::Topic, node_implementation::NodeType}; +use super::node_implementation::NodeType; use crate::{traits::signature_key::SignatureKey, PeerConfig}; /// Error for election problems @@ -30,7 +30,6 @@ pub trait Membership: fn create_election( all_nodes: Vec>, committee_members: Vec>, - committee_topic: Topic, fixed_leader_for_gpuvid: usize, ) -> Self; @@ -51,9 +50,6 @@ pub trait Membership: /// Get whole (staked + non-staked) committee for view `view_number`. fn whole_committee(&self, view_number: TYPES::Time) -> BTreeSet; - /// Get the network topic for the committee - fn committee_topic(&self) -> Topic; - /// Check if a key has stake fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index e602c5598f..7fecd87c9c 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -18,7 +18,7 @@ use tokio::time::error::Elapsed as TimeoutError; compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use std::{ collections::{BTreeSet, HashMap}, - fmt::{Debug, Display}, + fmt::Debug, hash::Hash, pin::Pin, sync::Arc, @@ -274,7 +274,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st async fn broadcast_message( &self, message: Vec, - topic: Topic, + recipients: BTreeSet, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError>; @@ -674,22 +674,3 @@ impl NetworkReliability for ChaosNetwork { Uniform::new_inclusive(self.repeat_low, self.repeat_high).sample(&mut rand::thread_rng()) } } - -/// Used when broadcasting messages -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum Topic { - /// The `Global` topic goes out to all nodes - Global, - /// The `Da` topic goes out to only the DA committee - Da, -} - -/// Libp2p topics require a string, so we need to convert our enum to a string -impl Display for Topic { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Topic::Global => write!(f, "global"), - Topic::Da => write!(f, "DA"), - } - } -} From 1985634140432e5682ad6d7c77feb2e6c98c48d4 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Mon, 29 Jul 2024 16:40:56 +0200 Subject: [PATCH 1146/1393] Fix builder API /bundle method (#3502) --- builder-api/api/v0_3/builder.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/builder-api/api/v0_3/builder.toml b/builder-api/api/v0_3/builder.toml index fbdd3a1c98..5614e38bb8 100644 --- a/builder-api/api/v0_3/builder.toml +++ b/builder-api/api/v0_3/builder.toml @@ -29,7 +29,6 @@ FORMAT_VERSION = "0.1.0" [route.bundle] PATH = ["bundle/:view_number"] ":view_number" = "Integer" -METHOD = "POST" DOC = """ Fetch the bundle from the builder for the specified view. """ From 22b47428e0403bf06f86a3df3276b9fbd2ccc9e8 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 29 Jul 2024 10:45:02 -0400 Subject: [PATCH 1147/1393] Topic refactor (#3504) * topic refactor * remove topic map * lint --- examples/infra/mod.rs | 12 ++-- hotshot/src/lib.rs | 2 +- hotshot/src/traits.rs | 2 +- .../src/traits/election/static_committee.rs | 13 ++++ .../static_committee_leader_two_views.rs | 13 ++++ .../src/traits/networking/combined_network.rs | 10 +-- .../src/traits/networking/libp2p_network.rs | 54 +++++---------- .../src/traits/networking/memory_network.rs | 68 +++++++++++++++---- .../src/traits/networking/push_cdn_network.rs | 15 +++- task-impls/src/network.rs | 3 +- testing/src/helpers.rs | 5 ++ testing/src/test_runner.rs | 6 +- testing/tests/tests_1/network_task.rs | 4 ++ testing/tests/tests_3/memory_network.rs | 29 +++----- types/src/traits/election.rs | 6 +- types/src/traits/network.rs | 23 ++++++- 16 files changed, 177 insertions(+), 88 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index c8ca5e626d..23350e43bf 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -21,8 +21,8 @@ use futures::StreamExt; use hotshot::{ traits::{ implementations::{ - derive_libp2p_peer_id, CdnMetricsValue, CombinedNetworks, Libp2pMetricsValue, - Libp2pNetwork, PushCdnNetwork, Topic, WrappedSignatureKey, + derive_libp2p_peer_id, CdnMetricsValue, CdnTopic, CombinedNetworks, Libp2pMetricsValue, + Libp2pNetwork, PushCdnNetwork, WrappedSignatureKey, }, BlockPayload, NodeImplementation, }, @@ -52,7 +52,7 @@ use hotshot_types::{ traits::{ block_contents::{BlockHeader, TestableBlock}, election::Membership, - network::ConnectedNetwork, + network::{ConnectedNetwork, Topic}, node_implementation::{ConsensusTime, NodeType}, states::TestableState, }, @@ -376,6 +376,7 @@ pub trait RunDa< let quorum_membership = ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.config.fixed_leader_for_gpuvid, ); @@ -384,6 +385,7 @@ pub trait RunDa< let da_membership = ::Membership::create_election( known_nodes_with_stake.clone(), config.config.known_da_nodes.clone(), + Topic::Da, config.config.fixed_leader_for_gpuvid, ); @@ -625,9 +627,9 @@ where }; // See if we should be DA, subscribe to the DA topic if so - let mut topics = vec![Topic::Global]; + let mut topics = vec![CdnTopic::Global]; if config.config.my_own_validator_config.is_da { - topics.push(Topic::Da); + topics.push(CdnTopic::Da); } // Create the network and await the initial connection diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index fa57e2a187..4e47c9d037 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -432,7 +432,7 @@ impl> SystemContext { api .network.broadcast_message( serialized_message, - da_membership.whole_committee(view_number), + da_membership.committee_topic(), BroadcastDelay::None, ), api diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index a70eca75a3..a4d6372a5f 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -18,7 +18,7 @@ pub mod implementations { }, memory_network::{MasterMap, MemoryNetwork}, push_cdn_network::{ - CdnMetricsValue, KeyPair, ProductionDef, PushCdnNetwork, TestingDef, Topic, + CdnMetricsValue, KeyPair, ProductionDef, PushCdnNetwork, TestingDef, Topic as CdnTopic, WrappedSignatureKey, }, }; diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 00f6f55b60..3163d3b532 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,6 +1,7 @@ use std::{marker::PhantomData, num::NonZeroU64}; use ethereum_types::U256; +use hotshot_types::traits::network::Topic; // use ark_bls12_381::Parameters as Param381; use hotshot_types::traits::signature_key::StakeTableEntryType; use hotshot_types::{ @@ -26,6 +27,9 @@ pub struct GeneralStaticCommittee { fixed_leader_for_gpuvid: usize, /// Node type phantom _type_phantom: PhantomData, + + /// The network topic of the committee + committee_topic: Topic, } /// static committee using a vrf kp @@ -39,6 +43,7 @@ impl GeneralStaticCommittee { nodes_with_stake: Vec, nodes_without_stake: Vec, fixed_leader_for_gpuvid: usize, + committee_topic: Topic, ) -> Self { Self { all_nodes_with_stake: nodes_with_stake.clone(), @@ -46,6 +51,7 @@ impl GeneralStaticCommittee { committee_nodes_without_stake: nodes_without_stake, fixed_leader_for_gpuvid, _type_phantom: PhantomData, + committee_topic, } } } @@ -60,6 +66,11 @@ where self.committee_nodes_with_stake.clone() } + /// Get the network topic for the committee + fn committee_topic(&self) -> Topic { + self.committee_topic.clone() + } + #[cfg(not(any( feature = "randomized-leader-election", feature = "fixed-leader-election" @@ -115,6 +126,7 @@ where fn create_election( mut all_nodes: Vec>, committee_members: Vec>, + committee_topic: Topic, fixed_leader_for_gpuvid: usize, ) -> Self { let mut committee_nodes_with_stake = Vec::new(); @@ -151,6 +163,7 @@ where committee_nodes_without_stake, fixed_leader_for_gpuvid, _type_phantom: PhantomData, + committee_topic, } } diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 8d3c0c78e7..882b7565c1 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -1,6 +1,7 @@ use std::{marker::PhantomData, num::NonZeroU64}; use ethereum_types::U256; +use hotshot_types::traits::network::Topic; // use ark_bls12_381::Parameters as Param381; use hotshot_types::traits::signature_key::StakeTableEntryType; use hotshot_types::{ @@ -24,6 +25,9 @@ pub struct StaticCommitteeLeaderForTwoViews { fixed_leader_for_gpuvid: usize, /// Node type phantom _type_phantom: PhantomData, + + /// The network topic of the committee + committee_topic: Topic, } /// static committee using a vrf kp @@ -37,6 +41,7 @@ impl StaticCommitteeLeaderForTwoViews { nodes_with_stake: Vec, nodes_without_stake: Vec, fixed_leader_for_gpuvid: usize, + committee_topic: Topic, ) -> Self { Self { all_nodes_with_stake: nodes_with_stake.clone(), @@ -44,6 +49,7 @@ impl StaticCommitteeLeaderForTwoViews { committee_nodes_without_stake: nodes_without_stake, fixed_leader_for_gpuvid, _type_phantom: PhantomData, + committee_topic, } } } @@ -58,6 +64,11 @@ where self.committee_nodes_with_stake.clone() } + /// Get the network topic for the committee + fn committee_topic(&self) -> Topic { + self.committee_topic.clone() + } + /// Index the vector of public keys with the current view number fn leader(&self, view_number: TYPES::Time) -> PUBKEY { // two connsecutive views will have same index starting with even number. @@ -88,6 +99,7 @@ where fn create_election( mut all_nodes: Vec>, committee_members: Vec>, + committee_topic: Topic, fixed_leader_for_gpuvid: usize, ) -> Self { let mut committee_nodes_with_stake = Vec::new(); @@ -124,6 +136,7 @@ where committee_nodes_without_stake, fixed_leader_for_gpuvid, _type_phantom: PhantomData, + committee_topic, } } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 250d0a1757..7c9b590144 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -32,7 +32,7 @@ use hotshot_types::{ }, data::ViewNumber, traits::{ - network::{BroadcastDelay, ConnectedNetwork, ResponseChannel}, + network::{BroadcastDelay, ConnectedNetwork, ResponseChannel, Topic}, node_implementation::NodeType, }, BoxSyncFuture, @@ -372,24 +372,24 @@ impl ConnectedNetwork for CombinedNetworks async fn broadcast_message( &self, message: Vec, - recipients: BTreeSet, + topic: Topic, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { let primary = self.primary().clone(); let secondary = self.secondary().clone(); let primary_message = message.clone(); let secondary_message = message.clone(); - let primary_recipients = recipients.clone(); + let topic_clone = topic.clone(); self.send_both_networks( message, async move { primary - .broadcast_message(primary_message, primary_recipients, BroadcastDelay::None) + .broadcast_message(primary_message, topic_clone, BroadcastDelay::None) .await }, async move { secondary - .broadcast_message(secondary_message, recipients, BroadcastDelay::None) + .broadcast_message(secondary_message, topic, BroadcastDelay::None) .await }, broadcast_delay, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index c948202c47..6af0d9ba81 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -45,7 +45,7 @@ use hotshot_types::{ traits::{ election::Membership, metrics::{Counter, Gauge, Metrics, NoMetrics}, - network::{self, ConnectedNetwork, NetworkError, ResponseMessage}, + network::{self, ConnectedNetwork, NetworkError, ResponseMessage, Topic}, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, @@ -60,8 +60,8 @@ use libp2p_networking::{ behaviours::request_response::{Request, Response}, spawn_network_node, MeshParams, NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, - NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, - NetworkNodeReceiver, NetworkNodeType, DEFAULT_REPLICATION_FACTOR, + NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeReceiver, + NetworkNodeType, DEFAULT_REPLICATION_FACTOR, }, reexport::{Multiaddr, ResponseChannel}, }; @@ -166,10 +166,8 @@ struct Libp2pNetworkInner { is_bootstrapped: Arc, /// The Libp2p metrics we're managing metrics: Libp2pMetricsValue, - /// topic map - /// hash(hashset) -> topic - /// btreemap ordered so is hashable - topic_map: RwLock, String>>, + /// The list of topics we're subscribed to + subscribed_topics: HashSet, /// the latest view number (for node lookup purposes) /// NOTE: supposed to represent a ViewNumber but we /// haven't made that atomic yet and we prefer lock-free @@ -298,7 +296,6 @@ impl TestableNetworkingImplementation .unwrap() }; let bootstrap_addrs_ref = Arc::clone(&bootstrap_addrs); - let keys = all_keys.clone(); let da = da_keys.clone(); let reliability_config_dup = reliability_config.clone(); Box::pin(async move { @@ -309,10 +306,8 @@ impl TestableNetworkingImplementation pubkey.clone(), bootstrap_addrs_ref, usize::try_from(node_id).unwrap(), - keys, #[cfg(feature = "hotshot-testing")] reliability_config_dup, - da.clone(), da.contains(&pubkey), ) .await @@ -453,12 +448,8 @@ impl Libp2pNetwork { pub_key.clone(), Arc::new(RwLock::new(bootstrap_nodes)), usize::try_from(config.node_index)?, - // NOTE: this introduces an invariant that the keys are assigned using this indexed - // function - all_keys, #[cfg(feature = "hotshot-testing")] None, - da_keys.clone(), da_keys.contains(pub_key), ) .await?) @@ -499,10 +490,7 @@ impl Libp2pNetwork { pk: K, bootstrap_addrs: BootstrapAddrs, id: usize, - // HACK - quorum_public_keys: BTreeSet, #[cfg(feature = "hotshot-testing")] reliability_config: Option>, - da_public_keys: BTreeSet, is_da: bool, ) -> Result, NetworkError> { // Error if there were no bootstrap nodes specified @@ -528,11 +516,11 @@ impl Libp2pNetwork { let mut pubkey_pid_map = BiHashMap::new(); pubkey_pid_map.insert(pk.clone(), network_handle.peer_id()); - let mut topic_map = BiHashMap::new(); - topic_map.insert(quorum_public_keys, QC_TOPIC.to_string()); - topic_map.insert(da_public_keys, "DA".to_string()); - - let topic_map = RwLock::new(topic_map); + // Subscribe to the relevant topics + let mut subscribed_topics = HashSet::from_iter(vec![QC_TOPIC.to_string()]); + if is_da { + subscribed_topics.insert("DA".to_string()); + } // unbounded channels may not be the best choice (spammed?) // if bounded figure out a way to log dropped msgs @@ -556,7 +544,7 @@ impl Libp2pNetwork { dht_timeout: Duration::from_secs(120), is_bootstrapped: Arc::new(AtomicBool::new(false)), metrics, - topic_map, + subscribed_topics, node_lookup_send, // Start the latest view from 0. "Latest" refers to "most recent view we are polling for // proposals on". We need this because to have consensus info injected we need a working @@ -922,7 +910,7 @@ impl ConnectedNetwork for Libp2pNetwork { async fn broadcast_message( &self, message: Vec, - recipients: BTreeSet, + topic: Topic, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { // If we're not ready, return an error @@ -931,20 +919,10 @@ impl ConnectedNetwork for Libp2pNetwork { return Err(NetworkError::NotReady); }; - let topic_map = self.inner.topic_map.read().await; - let topic = topic_map - .get_by_left(&recipients) - .ok_or_else(|| { - self.inner.metrics.num_failed_messages.add(1); - NetworkError::Libp2p { - source: Box::new(NetworkNodeHandleError::NoSuchTopic), - } - })? - .clone(); - - // gossip doesn't broadcast from itself, so special case - if recipients.contains(&self.inner.pk) { - // send to self + // If we are subscribed to the topic, + let topic = topic.to_string(); + if self.inner.subscribed_topics.contains(&topic) { + // Short-circuit-send the message to ourselves self.inner.sender.send(message.clone()).await.map_err(|_| { self.inner.metrics.num_failed_messages.add(1); NetworkError::ShutDown diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index e0b95c0ce8..d1f6c22bfe 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -26,6 +26,7 @@ use hotshot_types::{ traits::{ network::{ AsyncGenerator, BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation, + Topic, }, node_implementation::NodeType, signature_key::SignatureKey, @@ -46,6 +47,10 @@ pub struct MasterMap { /// The list of `MemoryNetwork`s #[debug(skip)] map: DashMap>, + + /// The list of `MemoryNetwork`s aggregated by topic + subscribed_map: DashMap)>>, + /// The id of this `MemoryNetwork` cluster id: u64, } @@ -56,6 +61,7 @@ impl MasterMap { pub fn new() -> Arc> { Arc::new(MasterMap { map: DashMap::new(), + subscribed_map: DashMap::new(), id: rand::thread_rng().gen(), }) } @@ -102,8 +108,9 @@ impl Debug for MemoryNetwork { impl MemoryNetwork { /// Creates a new `MemoryNetwork` and hooks it up to the group through the provided `MasterMap` pub fn new( - pub_key: K, + pub_key: &K, master_map: &Arc>, + subscribed_topics: &[Topic], reliability_config: Option>, ) -> MemoryNetwork { info!("Attaching new MemoryNetwork"); @@ -142,8 +149,16 @@ impl MemoryNetwork { reliability_config, }), }; - master_map.map.insert(pub_key, mn.clone()); - trace!("Master map updated"); + // Insert our public key into the master map + master_map.map.insert(pub_key.clone(), mn.clone()); + // Insert our subscribed topics into the master map + for topic in subscribed_topics { + master_map + .subscribed_map + .entry(topic.clone()) + .or_default() + .push((pub_key.clone(), mn.clone())); + } mn } @@ -169,7 +184,7 @@ impl TestableNetworkingImplementation _expected_node_count: usize, _num_bootstrap: usize, _network_id: usize, - _da_committee_size: usize, + da_committee_size: usize, _is_da: bool, reliability_config: Option>, _secondary_network_delay: Duration, @@ -179,7 +194,22 @@ impl TestableNetworkingImplementation Box::pin(move |node_id| { let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); - let net = MemoryNetwork::new(pubkey, &master, reliability_config.clone()); + + // Subscribe to topics based on our index + let subscribed_topics = if node_id < da_committee_size as u64 { + // DA node + vec![Topic::Da, Topic::Global] + } else { + // Non-DA node + vec![Topic::Global] + }; + + let net = MemoryNetwork::new( + &pubkey, + &master, + &subscribed_topics, + reliability_config.clone(), + ); Box::pin(async move { net.into() }) }) } @@ -219,16 +249,20 @@ impl ConnectedNetwork for MemoryNetwork { async fn broadcast_message( &self, message: Vec, - recipients: BTreeSet, + topic: Topic, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { trace!(?message, "Broadcasting message"); - for node in &self.inner.master_map.map { + for node in self + .inner + .master_map + .subscribed_map + .entry(topic) + .or_default() + .iter() + { // TODO delay/drop etc here - let (key, node) = node.pair(); - if !recipients.contains(key) { - continue; - } + let (key, node) = node; trace!(?key, "Sending message to node"); if let Some(ref config) = &self.inner.reliability_config { { @@ -268,7 +302,17 @@ impl ConnectedNetwork for MemoryNetwork { recipients: BTreeSet, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - self.broadcast_message(message, recipients, broadcast_delay) + // Iterate over all topics, compare to recipients, and get the `Topic` + let topic = self + .inner + .master_map + .subscribed_map + .iter() + .find(|v| v.value().iter().all(|(k, _)| recipients.contains(k))) + .map(|v| v.key().clone()) + .ok_or(NetworkError::NotFound)?; + + self.broadcast_message(message, topic, broadcast_delay) .await } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 52a62e0b58..9cb0e2b142 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -36,7 +36,7 @@ use hotshot_types::{ data::ViewNumber, traits::{ metrics::{Counter, Metrics, NoMetrics}, - network::{BroadcastDelay, ConnectedNetwork, PushCdnNetworkError}, + network::{BroadcastDelay, ConnectedNetwork, PushCdnNetworkError, Topic as HotShotTopic}, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -457,10 +457,10 @@ impl ConnectedNetwork for PushCdnNetwork, - _recipients: BTreeSet, + topic: HotShotTopic, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - self.broadcast_message(message, Topic::Global) + self.broadcast_message(message, topic.into()) .await .map_err(|e| { self.metrics.num_failed_messages.add(1); @@ -565,3 +565,12 @@ impl ConnectedNetwork for PushCdnNetwork for Topic { + fn from(topic: HotShotTopic) -> Self { + match topic { + HotShotTopic::Global => Topic::Global, + HotShotTopic::Da => Topic::Da, + } + } +} diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 3573424af2..26ea2a9249 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -419,6 +419,7 @@ impl< }; let view = message.kind.view_number(); let committee = membership.whole_committee(view); + let committee_topic = membership.committee_topic(); let net = Arc::clone(&self.channel); let storage = Arc::clone(&self.storage); let decided_upgrade_certificate = self.decided_upgrade_certificate.clone(); @@ -455,7 +456,7 @@ impl< net.direct_message(serialized_message, recipient).await } TransmitType::Broadcast => { - net.broadcast_message(serialized_message, committee, broadcast_delay) + net.broadcast_message(serialized_message, committee_topic, broadcast_delay) .await } TransmitType::DaCommitteeBroadcast => { diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index f96e6569bc..ac6730b93a 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -28,6 +28,7 @@ use hotshot_types::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, + network::Topic, node_implementation::{ConsensusTime, NodeType}, }, utils::{View, ViewInner}, @@ -79,21 +80,25 @@ pub async fn build_system_handle< quorum_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.fixed_leader_for_gpuvid, ), da_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), config.known_da_nodes.clone(), + Topic::Da, config.fixed_leader_for_gpuvid, ), vid_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.fixed_leader_for_gpuvid, ), view_sync_membership: TYPES::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake, + Topic::Global, config.fixed_leader_for_gpuvid, ), }; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 519621e9d1..76ccbc8625 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -28,7 +28,7 @@ use hotshot_types::{ simple_certificate::QuorumCertificate, traits::{ election::Membership, - network::ConnectedNetwork, + network::{ConnectedNetwork, Topic}, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, HotShotConfig, ValidatorConfig, @@ -392,21 +392,25 @@ where quorum_membership: ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.fixed_leader_for_gpuvid, ), da_membership: ::Membership::create_election( known_nodes_with_stake.clone(), config.known_da_nodes.clone(), + Topic::Da, config.fixed_leader_for_gpuvid, ), vid_membership: ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.fixed_leader_for_gpuvid, ), view_sync_membership: ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake.clone(), + Topic::Global, config.fixed_leader_for_gpuvid, ), }; diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 517ca6cf5d..d362e4f991 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -30,6 +30,7 @@ use hotshot_types::{ #[allow(clippy::too_many_lines)] async fn test_network_task() { use futures::StreamExt; + use hotshot_types::traits::network::Topic; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -50,6 +51,7 @@ async fn test_network_task() { let membership = ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake, + Topic::Global, config.fixed_leader_for_gpuvid, ); let network_state: NetworkEventTaskState, _> = @@ -101,6 +103,7 @@ async fn test_network_task() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_network_storage_fail() { use futures::StreamExt; + use hotshot_types::traits::network::Topic; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -122,6 +125,7 @@ async fn test_network_storage_fail() { let membership = ::Membership::create_election( known_nodes_with_stake.clone(), known_nodes_with_stake, + Topic::Global, config.fixed_leader_for_gpuvid, ); let network_state: NetworkEventTaskState, _> = diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index a392efb25c..591f7a5147 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -1,5 +1,5 @@ #![allow(clippy::panic)] -use std::{collections::BTreeSet, sync::Arc}; +use std::sync::Arc; use async_compatibility_layer::logging::setup_logging; use hotshot::{ @@ -21,7 +21,7 @@ use hotshot_types::{ message::{DataMessage, Message, MessageKind, VersionedMessage}, signature_key::{BLSPubKey, BuilderKey}, traits::{ - network::{BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation}, + network::{BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation, Topic}, node_implementation::{ConsensusTime, NodeType}, }, }; @@ -153,10 +153,10 @@ async fn memory_network_direct_queue() { trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); + let network1 = MemoryNetwork::new(&pub_key_1, &group.clone(), &[Topic::Global], Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); + let network2 = MemoryNetwork::new(&pub_key_2, &group, &[Topic::Global], Option::None); let first_messages: Vec> = gen_messages(5, 100, pub_key_1); @@ -204,14 +204,12 @@ async fn memory_network_direct_queue() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[instrument] async fn memory_network_broadcast_queue() { - setup_logging(); // Make and connect the networking instances let group: Arc::SignatureKey>> = MasterMap::new(); - trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); + let network1 = MemoryNetwork::new(&pub_key_1, &group.clone(), &[Topic::Global], Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); + let network2 = MemoryNetwork::new(&pub_key_2, &group, &[Topic::Da], Option::None); let first_messages: Vec> = gen_messages(5, 100, pub_key_1); @@ -220,11 +218,7 @@ async fn memory_network_broadcast_queue() { for sent_message in first_messages { let serialized_message = VersionedMessage::serialize(&sent_message, &None).unwrap(); network1 - .broadcast_message( - serialized_message.clone(), - vec![pub_key_2].into_iter().collect::>(), - BroadcastDelay::None, - ) + .broadcast_message(serialized_message.clone(), Topic::Da, BroadcastDelay::None) .await .expect("Failed to message node"); let mut recv_messages = network2 @@ -246,7 +240,7 @@ async fn memory_network_broadcast_queue() { network2 .broadcast_message( serialized_message.clone(), - vec![pub_key_1].into_iter().collect::>(), + Topic::Global, BroadcastDelay::None, ) .await @@ -272,13 +266,12 @@ async fn memory_network_test_in_flight_message_count() { let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let pub_key_1 = pubkey(); - let network1 = MemoryNetwork::new(pub_key_1, &group.clone(), Option::None); + let network1 = MemoryNetwork::new(&pub_key_1, &group.clone(), &[Topic::Global], Option::None); let pub_key_2 = pubkey(); - let network2 = MemoryNetwork::new(pub_key_2, &group, Option::None); + let network2 = MemoryNetwork::new(&pub_key_2, &group, &[Topic::Global], Option::None); // Create some dummy messages let messages: Vec> = gen_messages(5, 100, pub_key_1); - let broadcast_recipients = BTreeSet::from([pub_key_1, pub_key_2]); assert_eq!( TestableNetworkingImplementation::::in_flight_message_count(&network1), @@ -305,7 +298,7 @@ async fn memory_network_test_in_flight_message_count() { network2 .broadcast_message( serialized_message.clone(), - broadcast_recipients.clone(), + Topic::Global, BroadcastDelay::None, ) .await diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index c192e94680..9a7012bb00 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -7,7 +7,7 @@ use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; use snafu::Snafu; -use super::node_implementation::NodeType; +use super::{network::Topic, node_implementation::NodeType}; use crate::{traits::signature_key::SignatureKey, PeerConfig}; /// Error for election problems @@ -30,6 +30,7 @@ pub trait Membership: fn create_election( all_nodes: Vec>, committee_members: Vec>, + committee_topic: Topic, fixed_leader_for_gpuvid: usize, ) -> Self; @@ -50,6 +51,9 @@ pub trait Membership: /// Get whole (staked + non-staked) committee for view `view_number`. fn whole_committee(&self, view_number: TYPES::Time) -> BTreeSet; + /// Get the network topic for the committee + fn committee_topic(&self) -> Topic; + /// Check if a key has stake fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 7fecd87c9c..e602c5598f 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -18,7 +18,7 @@ use tokio::time::error::Elapsed as TimeoutError; compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use std::{ collections::{BTreeSet, HashMap}, - fmt::Debug, + fmt::{Debug, Display}, hash::Hash, pin::Pin, sync::Arc, @@ -274,7 +274,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st async fn broadcast_message( &self, message: Vec, - recipients: BTreeSet, + topic: Topic, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError>; @@ -674,3 +674,22 @@ impl NetworkReliability for ChaosNetwork { Uniform::new_inclusive(self.repeat_low, self.repeat_high).sample(&mut rand::thread_rng()) } } + +/// Used when broadcasting messages +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum Topic { + /// The `Global` topic goes out to all nodes + Global, + /// The `Da` topic goes out to only the DA committee + Da, +} + +/// Libp2p topics require a string, so we need to convert our enum to a string +impl Display for Topic { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Topic::Global => write!(f, "global"), + Topic::Da => write!(f, "DA"), + } + } +} From 529fb87a4344027ea5050dc2466e31fb37e0cd32 Mon Sep 17 00:00:00 2001 From: osrm <90407222+osrm@users.noreply.github.com> Date: Mon, 29 Jul 2024 23:49:28 +0900 Subject: [PATCH 1148/1393] chore: correction typos (#3499) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: correction typo in CHANGELOG.md "paramaeter" to "parameter" * chore: correction typo README.md "quourm propsoals" → "quorum proposals" * chore: correction typo in QuorumProposalRecv.md QuorumPropsoal => QuorumProposal --- task-impls/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/task-impls/README.md b/task-impls/README.md index 9dd0c2ae81..7c57137183 100644 --- a/task-impls/README.md +++ b/task-impls/README.md @@ -1,6 +1,6 @@ -HotShot uses an event-based architecture. This architecture is made of 4 main tasks: Network Task, View Sync Task, Consensus Task, and DA Task. The Network Task handles all incoming and outgoing messages. It forwards incoming messages to the correct task and listens for outgoing messages from the other tasks. The View Sync Task coordinates the view sync protocol. It listens for timeout events from the Consensus Task. Once a certain threshold of timeouts seen has been reached, the View Sync Task starts the View Sync protocol to bring the network back into agreement on which view it should be in. The Consensus Task handles the core HotShot consensus logic. It manages replicas that listen for quourm propsoals and vote on them, leaders who send quorum proposals, and next leaders who listen for quorum votes and form QCs. The DA task handles the data availability protocol of HotShot. It listens for DA proposals, sends DA proposals, and forms a Data Availability Certificate (DAC) +HotShot uses an event-based architecture. This architecture is made of 4 main tasks: Network Task, View Sync Task, Consensus Task, and DA Task. The Network Task handles all incoming and outgoing messages. It forwards incoming messages to the correct task and listens for outgoing messages from the other tasks. The View Sync Task coordinates the view sync protocol. It listens for timeout events from the Consensus Task. Once a certain threshold of timeouts seen has been reached, the View Sync Task starts the View Sync protocol to bring the network back into agreement on which view it should be in. The Consensus Task handles the core HotShot consensus logic. It manages replicas that listen for quorum proposals and vote on them, leaders who send quorum proposals, and next leaders who listen for quorum votes and form QCs. The DA task handles the data availability protocol of HotShot. It listens for DA proposals, sends DA proposals, and forms a Data Availability Certificate (DAC) A diagram of how events interact with each task is below: ![HotShot Event Architecture](HotShot_event_architecture.png) -For more information about each event see `./src/events.rs` \ No newline at end of file +For more information about each event see `./src/events.rs` From b6a87d759d2aab855eb56665c6742637b693f7fe Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 29 Jul 2024 11:43:54 -0400 Subject: [PATCH 1149/1393] [AUDIT][LOW SEVERITY] Fix Typo in Dependency Task Startup (#3506) * fix typo * fix build --- hotshot/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 4e47c9d037..8e67eda4c1 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -295,8 +295,8 @@ impl> SystemContext { /// Panics if sending genesis fails #[instrument(skip_all, target = "SystemContext", fields(id = self.id))] pub async fn start_consensus(&self) { - #[cfg(feature = "dependncy-tasks")] - error!("HotShot is running with the dependency tasks feature enabled!!"); + #[cfg(feature = "dependency-tasks")] + tracing::error!("HotShot is running with the dependency tasks feature enabled!!"); #[cfg(all(feature = "rewind", not(debug_assertions)))] compile_error!("Cannot run rewind in production builds!"); From 67956a0d8529a334d488c62316406b71b5f96a27 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 29 Jul 2024 13:09:11 -0400 Subject: [PATCH 1150/1393] temp dir CDN (#3509) --- examples/push-cdn/all.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index ac3d9c2df1..d5a2f98906 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -2,6 +2,8 @@ /// The types we're importing pub mod types; +use std::path::Path; + use async_compatibility_layer::art::async_spawn; use cdn_broker::{reexports::crypto::signature::KeyPair, Broker}; use cdn_marshal::Marshal; @@ -13,6 +15,7 @@ use hotshot_example_types::state_types::TestTypes; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; use infra::{gen_local_address, BUILDER_BASE_PORT}; +use rand::{rngs::StdRng, RngCore, SeedableRng}; use crate::{ infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, @@ -47,8 +50,17 @@ async fn main() { let (broker_public_key, broker_private_key) = ::SignatureKey::generated_from_seed_indexed([0u8; 32], 1337); - // The broker (peer) discovery endpoint shall be a local SQLite file - let discovery_endpoint = "test.sqlite".to_string(); + // Get the OS temporary directory + let temp_dir = std::env::temp_dir(); + + // Create an SQLite file inside of the temporary directory + let discovery_endpoint = temp_dir + .join(Path::new(&format!( + "test-{}.sqlite", + StdRng::from_entropy().next_u64() + ))) + .to_string_lossy() + .into_owned(); // 2 brokers for _ in 0..2 { From fd7db49e7cc42d100bf0a470eb9184508b6edfea Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 29 Jul 2024 14:18:34 -0400 Subject: [PATCH 1151/1393] ungate dependency task modules (#3511) --- hotshot/src/tasks/mod.rs | 14 +++++++------- hotshot/src/traits/election/static_committee.rs | 6 ++++-- .../election/static_committee_leader_two_views.rs | 6 ++++-- task-impls/src/quorum_proposal/handlers.rs | 2 -- task-impls/src/quorum_proposal/mod.rs | 2 -- task-impls/src/quorum_proposal_recv/handlers.rs | 1 - task-impls/src/quorum_proposal_recv/mod.rs | 1 - task-impls/src/quorum_vote/handlers.rs | 2 -- task-impls/src/quorum_vote/mod.rs | 2 -- 9 files changed, 15 insertions(+), 21 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index a370034a97..dbaa964bc8 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -9,15 +9,8 @@ use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::Task; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_task_impls::consensus::ConsensusTaskState; #[cfg(feature = "rewind")] use hotshot_task_impls::rewind::RewindTaskState; -#[cfg(feature = "dependency-tasks")] -use hotshot_task_impls::{ - consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, - quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, -}; use hotshot_task_impls::{ da::DaTaskState, events::HotShotEvent, @@ -191,10 +184,17 @@ pub async fn add_consensus_tasks>( { #![cfg(not(feature = "dependency-tasks"))] + use hotshot_task_impls::consensus::ConsensusTaskState; + handle.add_task(ConsensusTaskState::::create_from(handle).await); } { #![cfg(feature = "dependency-tasks")] + use hotshot_task_impls::{ + consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, + quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, + }; + handle.add_task(QuorumProposalTaskState::::create_from(handle).await); handle.add_task(QuorumVoteTaskState::::create_from(handle).await); handle.add_task(QuorumProposalRecvTaskState::::create_from(handle).await); diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 3163d3b532..255c446401 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,12 +1,14 @@ use std::{marker::PhantomData, num::NonZeroU64}; use ethereum_types::U256; -use hotshot_types::traits::network::Topic; // use ark_bls12_381::Parameters as Param381; use hotshot_types::traits::signature_key::StakeTableEntryType; use hotshot_types::{ signature_key::BLSPubKey, - traits::{election::Membership, node_implementation::NodeType, signature_key::SignatureKey}, + traits::{ + election::Membership, network::Topic, node_implementation::NodeType, + signature_key::SignatureKey, + }, PeerConfig, }; #[cfg(feature = "randomized-leader-election")] diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 882b7565c1..d114a5d904 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -1,12 +1,14 @@ use std::{marker::PhantomData, num::NonZeroU64}; use ethereum_types::U256; -use hotshot_types::traits::network::Topic; // use ark_bls12_381::Parameters as Param381; use hotshot_types::traits::signature_key::StakeTableEntryType; use hotshot_types::{ signature_key::BLSPubKey, - traits::{election::Membership, node_implementation::NodeType, signature_key::SignatureKey}, + traits::{ + election::Membership, network::Topic, node_implementation::NodeType, + signature_key::SignatureKey, + }, PeerConfig, }; use tracing::debug; diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 20f8dbbacd..b344d4679f 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -1,8 +1,6 @@ //! This module holds the dependency task for the QuorumProposalTask. It is spawned whenever an event that could //! initiate a proposal occurs. -#![cfg(feature = "dependency-tasks")] - use std::{marker::PhantomData, sync::Arc, time::Duration}; use anyhow::{ensure, Context, Result}; diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index ea0059b230..596ff0f7d7 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -1,5 +1,3 @@ -#![cfg(feature = "dependency-tasks")] - use std::{collections::HashMap, sync::Arc}; use anyhow::Result; diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index e64fefa9af..fdfafb6666 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -1,5 +1,4 @@ #![allow(dead_code)] -#![cfg(feature = "dependency-tasks")] use std::sync::Arc; diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 4d79419c2f..80a53375e6 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -1,5 +1,4 @@ #![allow(unused_imports)] -#![cfg(feature = "dependency-tasks")] use std::{collections::BTreeMap, sync::Arc}; diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 6f29c5f701..8de6d2eea6 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -1,5 +1,3 @@ -#![cfg(feature = "dependency-tasks")] - use std::sync::Arc; use anyhow::Result; diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 6bc60beb08..5314a27f57 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -1,5 +1,3 @@ -#![cfg(feature = "dependency-tasks")] - use std::{collections::HashMap, sync::Arc}; use anyhow::{bail, ensure, Context, Result}; From d3bcce9fb4994986eeb99b8fddf4d42393ee3956 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 30 Jul 2024 09:52:44 -0400 Subject: [PATCH 1152/1393] remove useless check and remove paramteter (#3507) --- task-impls/src/consensus/mod.rs | 4 ++-- task-impls/src/consensus2/handlers.rs | 10 ++-------- task-impls/src/da.rs | 2 +- task-impls/src/upgrade.rs | 2 +- task-impls/src/view_sync.rs | 9 +++------ task-impls/src/vote_collection.rs | 9 --------- 6 files changed, 9 insertions(+), 27 deletions(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 352363e380..be087581f8 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -314,7 +314,7 @@ impl> ConsensusTaskState TYPES, QuorumVote, QuorumCertificate, - >(&info, vote.clone(), event, &event_stream) + >(&info, event, &event_stream) .await; } else { let result = collector @@ -353,7 +353,7 @@ impl> ConsensusTaskState TYPES, TimeoutVote, TimeoutCertificate, - >(&info, vote.clone(), event, &event_stream) + >(&info, event, &event_stream) .await; } else { let result = collector diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index 4d09298ffb..6f57703855 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -49,10 +49,7 @@ pub(crate) async fn handle_quorum_vote_recv, QuorumCertificate>( - &info, - vote.clone(), - event, - sender, + &info, event, sender, ) .await; } else { @@ -96,10 +93,7 @@ pub(crate) async fn handle_timeout_vote_recv, TimeoutCertificate>( - &info, - vote.clone(), - event, - sender, + &info, event, sender, ) .await; } else { diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 2519836c5d..4bb6fd15d0 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -268,7 +268,7 @@ impl> DaTaskState { TYPES, DaVote, DaCertificate, - >(&info, vote.clone(), event, &event_stream) + >(&info, event, &event_stream) .await; } else { let result = collector diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 03df3d2b09..34e1aba4b8 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -237,7 +237,7 @@ impl> UpgradeTaskState { TYPES, UpgradeVote, UpgradeCertificate, - >(&info, vote.clone(), event, &tx) + >(&info, event, &tx) .await; } else { let result = collector diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index c7a358b4c9..f3b1e596d3 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -293,8 +293,7 @@ impl> ViewSyncTaskState view: vote_view, id: self.id, }; - let vote_collector = - create_vote_accumulator(&info, vote.clone(), event, &event_stream).await; + let vote_collector = create_vote_accumulator(&info, event, &event_stream).await; if let Some(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } @@ -331,8 +330,7 @@ impl> ViewSyncTaskState view: vote_view, id: self.id, }; - let vote_collector = - create_vote_accumulator(&info, vote.clone(), event, &event_stream).await; + let vote_collector = create_vote_accumulator(&info, event, &event_stream).await; if let Some(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } @@ -369,8 +367,7 @@ impl> ViewSyncTaskState view: vote_view, id: self.id, }; - let vote_collector = - create_vote_accumulator(&info, vote.clone(), event, &event_stream).await; + let vote_collector = create_vote_accumulator(&info, event, &event_stream).await; if let Some(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index dda367a840..72cc982a1a 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -140,7 +140,6 @@ pub struct AccumulatorInfo { /// Calls unwrap but should never panic. pub async fn create_vote_accumulator( info: &AccumulatorInfo, - vote: VOTE, event: Arc>, sender: &Sender>>, ) -> Option> @@ -158,14 +157,6 @@ where + 'static, VoteCollectionTaskState: HandleVoteEvent, { - if vote.view_number() != info.view { - error!( - "Vote view does not match! vote view is {} current view is {}", - *vote.view_number(), - *info.view - ); - return None; - } let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), signers: HashMap::new(), From ca2bacffb1f73d7097a4556f39b268af5e116dac Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 30 Jul 2024 09:53:11 -0400 Subject: [PATCH 1153/1393] fix tests and impl (#3508) --- .../src/quorum_proposal_recv/handlers.rs | 56 ++++++++++++------- .../tests_1/quorum_proposal_recv_task.rs | 2 +- 2 files changed, 38 insertions(+), 20 deletions(-) diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index fdfafb6666..df214463f8 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -94,8 +94,26 @@ async fn validate_proposal_liveness( + view_number, + event_sender, + task_state.timeout, + OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + &mut task_state.cur_view, + &mut task_state.cur_view_time, + &mut task_state.timeout_task, + &task_state.output_event_stream, + SEND_VIEW_CHANGE_EVENT, + task_state.quorum_membership.leader(cur_view) == task_state.public_key, + ) + .await + { + debug!("Liveness Branch - Failed to update view; error = {e:#}"); + } + if !liveness_check { - bail!("Liveness invalid."); + bail!("Quorum Proposal failed the liveness check"); } Ok(QuorumProposalValidity::Liveness) @@ -138,24 +156,6 @@ pub(crate) async fn handle_quorum_proposal_recv( - view_number, - event_sender, - task_state.timeout, - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - &mut task_state.cur_view, - &mut task_state.cur_view_time, - &mut task_state.timeout_task, - &task_state.output_event_stream, - SEND_VIEW_CHANGE_EVENT, - task_state.quorum_membership.leader(cur_view) == task_state.public_key, - ) - .await - { - debug!("Failed to update view; error = {e:#}"); - } - // Get the parent leaf and state. let mut parent_leaf = task_state .consensus @@ -237,5 +237,23 @@ pub(crate) async fn handle_quorum_proposal_recv( + view_number, + event_sender, + task_state.timeout, + OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + &mut task_state.cur_view, + &mut task_state.cur_view_time, + &mut task_state.timeout_task, + &task_state.output_event_stream, + SEND_VIEW_CHANGE_EVENT, + task_state.quorum_membership.leader(cur_view) == task_state.public_key, + ) + .await + { + debug!("Full Branch - Failed to update view; error = {e:#}"); + } + Ok(QuorumProposalValidity::Fully) } diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 8773aac6e2..9930d1de9a 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -80,7 +80,6 @@ async fn test_quorum_proposal_recv_task() { )]]; let expectations = vec![Expectations::from_outputs(vec![ - exact(ViewChange(ViewNumber::new(2))), exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), exact(ValidatedStateUpdated( ViewNumber::new(2), @@ -95,6 +94,7 @@ async fn test_quorum_proposal_recv_task() { proposals[1].data.clone(), leaves[0].clone(), )), + exact(ViewChange(ViewNumber::new(2))), ])]; let state = QuorumProposalRecvTaskState::::create_from(&handle).await; From f65a3002a7c6a36729340fe57ae63473d92b99e5 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 30 Jul 2024 13:46:12 -0400 Subject: [PATCH 1154/1393] Add version check for block header creation (#3510) --- .../src/auction_results_provider_types.rs | 18 ++-- example-types/src/block_types.rs | 6 +- example-types/src/node_types.rs | 12 ++- examples/combined/types.rs | 2 +- examples/infra/mod.rs | 12 +-- examples/libp2p/types.rs | 2 +- examples/push-cdn/types.rs | 2 +- task-impls/src/consensus/handlers.rs | 102 ++++++++++-------- task-impls/src/consensus/mod.rs | 6 +- task-impls/src/events.rs | 6 +- task-impls/src/quorum_proposal/handlers.rs | 50 ++++++--- task-impls/src/quorum_proposal/mod.rs | 2 + task-impls/src/transactions.rs | 10 +- task-impls/src/vid.rs | 4 +- testing/src/helpers.rs | 2 +- testing/src/spinning_task.rs | 2 +- testing/src/test_builder.rs | 4 +- testing/src/test_launcher.rs | 2 +- testing/src/test_runner.rs | 2 +- testing/tests/tests_1/consensus_task.rs | 12 ++- testing/tests/tests_1/da_task.rs | 2 + testing/tests/tests_1/quorum_proposal_task.rs | 68 ++++++++---- testing/tests/tests_1/transaction_task.rs | 3 +- .../tests_1/upgrade_task_with_consensus.rs | 27 +++-- .../tests_1/upgrade_task_with_proposal.rs | 10 +- testing/tests/tests_1/vid_task.rs | 10 +- testing/tests/tests_3/memory_network.rs | 5 +- types/src/consensus.rs | 5 +- types/src/data.rs | 5 + types/src/traits/auction_results_provider.rs | 17 +-- types/src/traits/block_contents.rs | 8 +- types/src/traits/node_implementation.rs | 22 +++- 32 files changed, 278 insertions(+), 162 deletions(-) diff --git a/example-types/src/auction_results_provider_types.rs b/example-types/src/auction_results_provider_types.rs index 87dbdb9374..d363446655 100644 --- a/example-types/src/auction_results_provider_types.rs +++ b/example-types/src/auction_results_provider_types.rs @@ -1,14 +1,14 @@ use anyhow::{bail, Result}; use async_trait::async_trait; use hotshot_types::traits::{ - auction_results_provider::{AuctionResultsProvider, HasUrls}, - node_implementation::NodeType, + auction_results_provider::AuctionResultsProvider, + node_implementation::{HasUrls, NodeType}, }; use serde::{Deserialize, Serialize}; use url::Url; /// A mock result for the auction solver. This type is just a pointer to a URL. -#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize, Default)] pub struct TestAuctionResult { /// The URL of the builder to reach out to. pub urls: Vec, @@ -22,10 +22,10 @@ impl HasUrls for TestAuctionResult { /// The test auction results type is used to mimic the results from the Solver. #[derive(Clone, Debug, Default)] -pub struct TestAuctionResultsProvider { +pub struct TestAuctionResultsProvider { /// We intentionally allow for the results to be pre-cooked for the unit test to gurantee a /// particular outcome is met. - pub solver_results: TestAuctionResult, + pub solver_results: TYPES::AuctionResult, /// A canned type to ensure that an error is thrown in absence of a true fault-injectible /// system for logical tests. This will guarantee that `fetch_auction_result` always throws an @@ -39,17 +39,15 @@ pub struct TestAuctionResultsProvider { } #[async_trait] -impl AuctionResultsProvider for TestAuctionResultsProvider { - type AuctionResult = TestAuctionResult; - +impl AuctionResultsProvider for TestAuctionResultsProvider { /// Mock fetching the auction results, with optional error injection to simulate failure cases /// in the solver. - async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result { + async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result { if let Some(url) = &self.broadcast_url { let resp = reqwest::get(url.join(&format!("/v0/api/auction_results/{}", *view_number))?) .await? - .json::() + .json::() .await?; Ok(resp) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 2820bd0898..0768d956bb 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -23,7 +23,6 @@ use time::OffsetDateTime; use vbs::version::Version; use crate::{ - auction_results_provider_types::TestAuctionResult, node_types::TestTypes, state_types::{TestInstanceState, TestValidatedState}, }; @@ -244,7 +243,6 @@ impl> Block for TestBlockHeader { type Error = std::convert::Infallible; - type AuctionResult = TestAuctionResult; async fn new_legacy( _parent_state: &TYPES::ValidatedState, @@ -281,7 +279,7 @@ impl> Block _metadata: >::Metadata, _builder_fee: Vec>, _vid_common: VidCommon, - _auction_results: Option, + _auction_results: Option, _version: Version, ) -> Result { unimplemented!() @@ -317,7 +315,7 @@ impl> Block self.builder_commitment.clone() } - fn get_auction_results(&self) -> Option { + fn get_auction_results(&self) -> Option { unimplemented!() } } diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 57b031e0ff..13ba83a383 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -15,7 +15,7 @@ use serde::{Deserialize, Serialize}; use vbs::version::StaticVersion; use crate::{ - auction_results_provider_types::TestAuctionResultsProvider, + auction_results_provider_types::{TestAuctionResult, TestAuctionResultsProvider}, block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, @@ -38,6 +38,7 @@ use crate::{ /// to select our traits pub struct TestTypes; impl NodeType for TestTypes { + type AuctionResult = TestAuctionResult; type Base = StaticVersion<0, 1>; type Upgrade = StaticVersion<0, 2>; const UPGRADE_HASH: [u8; 32] = [ @@ -72,6 +73,7 @@ impl NodeType for TestTypes { /// to select our traits pub struct TestConsecutiveLeaderTypes; impl NodeType for TestConsecutiveLeaderTypes { + type AuctionResult = TestAuctionResult; type Base = StaticVersion<0, 1>; type Upgrade = StaticVersion<0, 2>; const UPGRADE_HASH: [u8; 32] = [ @@ -116,23 +118,23 @@ pub type StaticMembership = StaticCommittee; impl NodeImplementation for PushCdnImpl { type Network = PushCdnNetwork; type Storage = TestStorage; - type AuctionResultsProvider = TestAuctionResultsProvider; + type AuctionResultsProvider = TestAuctionResultsProvider; } impl NodeImplementation for MemoryImpl { type Network = MemoryNetwork; type Storage = TestStorage; - type AuctionResultsProvider = TestAuctionResultsProvider; + type AuctionResultsProvider = TestAuctionResultsProvider; } impl NodeImplementation for CombinedImpl { type Network = CombinedNetworks; type Storage = TestStorage; - type AuctionResultsProvider = TestAuctionResultsProvider; + type AuctionResultsProvider = TestAuctionResultsProvider; } impl NodeImplementation for Libp2pImpl { type Network = Libp2pNetwork; type Storage = TestStorage; - type AuctionResultsProvider = TestAuctionResultsProvider; + type AuctionResultsProvider = TestAuctionResultsProvider; } diff --git a/examples/combined/types.rs b/examples/combined/types.rs index f15f4d8891..caf0218621 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -20,7 +20,7 @@ pub type Network = CombinedNetworks; impl NodeImplementation for NodeImpl { type Network = Network; type Storage = TestStorage; - type AuctionResultsProvider = TestAuctionResultsProvider; + type AuctionResultsProvider = TestAuctionResultsProvider; } /// convenience type alias pub type ThisRun = CombinedDaRun; diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 23350e43bf..00c5c320e2 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -339,7 +339,7 @@ pub trait RunDa< TYPES, Network = NETWORK, Storage = TestStorage, - AuctionResultsProvider = TestAuctionResultsProvider, + AuctionResultsProvider = TestAuctionResultsProvider, >, > where ::ValidatedState: TestableState, @@ -406,7 +406,7 @@ pub trait RunDa< initializer, ConsensusMetricsValue::default(), TestStorage::::default(), - TestAuctionResultsProvider::default(), + TestAuctionResultsProvider::::default(), ) .await .expect("Could not init hotshot") @@ -604,7 +604,7 @@ impl< TYPES, Network = PushCdnNetwork, Storage = TestStorage, - AuctionResultsProvider = TestAuctionResultsProvider, + AuctionResultsProvider = TestAuctionResultsProvider, >, > RunDa, NODE> for PushCdnDaRun where @@ -681,7 +681,7 @@ impl< TYPES, Network = Libp2pNetwork, Storage = TestStorage, - AuctionResultsProvider = TestAuctionResultsProvider, + AuctionResultsProvider = TestAuctionResultsProvider, >, > RunDa, NODE> for Libp2pDaRun where @@ -767,7 +767,7 @@ impl< TYPES, Network = CombinedNetworks, Storage = TestStorage, - AuctionResultsProvider = TestAuctionResultsProvider, + AuctionResultsProvider = TestAuctionResultsProvider, >, > RunDa, NODE> for CombinedDaRun where @@ -837,7 +837,7 @@ pub async fn main_entry_point< TYPES, Network = NETWORK, Storage = TestStorage, - AuctionResultsProvider = TestAuctionResultsProvider, + AuctionResultsProvider = TestAuctionResultsProvider, >, RUNDA: RunDa, >( diff --git a/examples/libp2p/types.rs b/examples/libp2p/types.rs index af73237d0b..c3f2c59d2d 100644 --- a/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -20,7 +20,7 @@ pub type Network = Libp2pNetwork<::SignatureKey>; impl NodeImplementation for NodeImpl { type Network = Network; type Storage = TestStorage; - type AuctionResultsProvider = TestAuctionResultsProvider; + type AuctionResultsProvider = TestAuctionResultsProvider; } /// convenience type alias pub type ThisRun = Libp2pDaRun; diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs index c9721af3cb..17852a99af 100644 --- a/examples/push-cdn/types.rs +++ b/examples/push-cdn/types.rs @@ -17,7 +17,7 @@ pub type Network = PushCdnNetwork; impl NodeImplementation for NodeImpl { type Network = Network; type Storage = TestStorage; - type AuctionResultsProvider = TestAuctionResultsProvider; + type AuctionResultsProvider = TestAuctionResultsProvider; } /// Convenience type alias diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index c8efa8c84f..c025be9d13 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -14,6 +14,7 @@ use committable::Committable; use futures::FutureExt; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus, View}, + constants::MarketplaceVersion, data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, @@ -33,7 +34,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; -use vbs::version::Version; +use vbs::version::{StaticVersionType, Version}; use super::ConsensusTaskState; use crate::{ @@ -64,37 +65,46 @@ pub async fn create_and_send_proposal( instance_state: Arc, version: Version, id: u64, -) { +) -> Result<()> { let consensus_read = consensus.read().await; - let Some(Some(vid_share)) = consensus_read + let vid_share = consensus_read .vid_shares() .get(&view) .map(|shares| shares.get(&public_key).cloned()) - else { - error!("Cannot propopse without our VID share, view {:?}", view); - return; - }; - // TODO ED: This will need to be version-gated to use the appropriate `BlockHeader::new` function. - // Pre-marketplace versions will use `new_legacy` and post-marketplace versions will use `new_marketplace` + .context(format!( + "Cannot propopse without our VID share, view {view:?}" + ))? + .context("Failed to get vid share")?; drop(consensus_read); - let block_header = match TYPES::BlockHeader::new_legacy( - state.as_ref(), - instance_state.as_ref(), - &parent_leaf, - commitment_and_metadata.commitment, - commitment_and_metadata.builder_commitment, - commitment_and_metadata.metadata, - commitment_and_metadata.fee, - vid_share.data.common, - version, - ) - .await - { - Ok(header) => header, - Err(err) => { - error!(%err, "Failed to construct block header"); - return; - } + + let block_header = if version < MarketplaceVersion::VERSION { + TYPES::BlockHeader::new_legacy( + state.as_ref(), + instance_state.as_ref(), + &parent_leaf, + commitment_and_metadata.commitment, + commitment_and_metadata.builder_commitment, + commitment_and_metadata.metadata, + commitment_and_metadata.fees.first().clone(), + vid_share.data.common, + version, + ) + .await + .context("Failed to construct legacy block header")? + } else { + TYPES::BlockHeader::new_marketplace( + state.as_ref(), + instance_state.as_ref(), + &parent_leaf, + commitment_and_metadata.commitment, + commitment_and_metadata.metadata, + commitment_and_metadata.fees.to_vec(), + vid_share.data.common, + commitment_and_metadata.auction_result, + version, + ) + .await + .context("Failed to construct marketplace block header")? }; let proposal = QuorumProposal { @@ -106,35 +116,29 @@ pub async fn create_and_send_proposal( }; let proposed_leaf = Leaf::from_quorum_proposal(&proposal); - if proposed_leaf.parent_commitment() != parent_leaf.commit() { - return; - } - let Ok(signature) = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref()) - else { - // This should never happen. - error!("Failed to sign proposed_leaf.commit()!"); - return; - }; + ensure!(proposed_leaf.parent_commitment() == parent_leaf.commit()); + + let signature = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref())?; let message = Proposal { data: proposal, signature, _pd: PhantomData, }; + debug!( - "Sending null proposal for view {:?}", + "Sending proposal for view {:?}", proposed_leaf.view_number(), ); - if let Err(e) = consensus + + consensus .write() .await - .update_last_proposed_view(message.clone()) - { - tracing::trace!("{e:?}"); - return; - } + .update_last_proposed_view(message.clone())?; + async_sleep(Duration::from_millis(round_start_delay)).await; + broadcast_event( Arc::new(HotShotEvent::QuorumProposalSend( message.clone(), @@ -143,6 +147,8 @@ pub async fn create_and_send_proposal( &event_stream, ) .await; + + Ok(()) } /// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is the @@ -213,7 +219,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( let version = version(view, &decided_upgrade_certificate.read().await.clone())?; let create_and_send_proposal_handle = async_spawn(async move { - create_and_send_proposal( + match create_and_send_proposal( public_key, private_key, OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), @@ -229,7 +235,13 @@ pub async fn publish_proposal_from_commitment_and_metadata( version, id, ) - .await; + .await + { + Ok(()) => {} + Err(e) => { + tracing::error!("Failed to send proposal: {}", e); + } + }; }); Ok(create_and_send_proposal_handle) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index be087581f8..75368139d3 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -576,7 +576,8 @@ impl> ConsensusTaskState builder_commitment, metadata, view, - fee, + fees, + auction_result, ) => { let view = *view; debug!( @@ -587,8 +588,9 @@ impl> ConsensusTaskState commitment: *payload_commitment, builder_commitment: builder_commitment.clone(), metadata: metadata.clone(), - fee: fee.clone(), + fees: fees.clone(), block_view: view, + auction_result: auction_result.clone(), }); if self.quorum_membership.leader(view) == self.public_key && self.consensus.read().await.high_qc().view_number() + 1 == view diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index f12fe1bd23..9eac42273d 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -22,6 +22,7 @@ use hotshot_types::{ vid::VidCommitment, vote::{HasViewNumber, VoteDependencyData}, }; +use vec1::Vec1; use crate::view_sync::ViewSyncPhase; @@ -142,7 +143,8 @@ pub enum HotShotEvent { BuilderCommitment, >::Metadata, TYPES::Time, - BuilderFee, + Vec1>, + Option, ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number BlockRecv(PackedBundle), @@ -351,7 +353,7 @@ impl Display for HotShotEvent { HotShotEvent::Timeout(view_number) => write!(f, "Timeout(view_number={view_number:?})"), HotShotEvent::TransactionsRecv(_) => write!(f, "TransactionsRecv"), HotShotEvent::TransactionSend(_, _) => write!(f, "TransactionSend"), - HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _) => { + HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _, _) => { write!( f, "SendPayloadCommitmentAndMetadata(view_number={view_number:?})" diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index b344d4679f..b03c738445 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -14,6 +14,7 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, + constants::MarketplaceVersion, data::{Leaf, QuorumProposal, VidDisperse, ViewChangeEvidence}, message::Proposal, simple_certificate::{version, UpgradeCertificate}, @@ -22,6 +23,7 @@ use hotshot_types::{ }, }; use tracing::{debug, error, instrument}; +use vbs::version::StaticVersionType; use crate::{ events::HotShotEvent, @@ -161,19 +163,35 @@ impl ProposalDependencyHandle { &self.decided_upgrade_certificate.read().await.clone(), )?; - let block_header = TYPES::BlockHeader::new_legacy( - state.as_ref(), - self.instance_state.as_ref(), - &parent_leaf, - commitment_and_metadata.commitment, - commitment_and_metadata.builder_commitment, - commitment_and_metadata.metadata, - commitment_and_metadata.fee, - vid_share.data.common.clone(), - version, - ) - .await - .context("Failed to construct block header")?; + let block_header = if version < MarketplaceVersion::VERSION { + TYPES::BlockHeader::new_legacy( + state.as_ref(), + self.instance_state.as_ref(), + &parent_leaf, + commitment_and_metadata.commitment, + commitment_and_metadata.builder_commitment, + commitment_and_metadata.metadata, + commitment_and_metadata.fees.first().clone(), + vid_share.data.common.clone(), + version, + ) + .await + .context("Failed to construct legacy block header")? + } else { + TYPES::BlockHeader::new_marketplace( + state.as_ref(), + self.instance_state.as_ref(), + &parent_leaf, + commitment_and_metadata.commitment, + commitment_and_metadata.metadata, + commitment_and_metadata.fees.to_vec(), + vid_share.data.common.clone(), + commitment_and_metadata.auction_result, + version, + ) + .await + .context("Failed to construct marketplace block header")? + }; let proposal = QuorumProposal { block_header, @@ -267,14 +285,16 @@ impl HandleDepOutput for ProposalDependencyHandle { builder_commitment, metadata, view, - fee, + fees, + auction_result, ) => { commit_and_metadata = Some(CommitmentAndMetadata { commitment: *payload_commitment, builder_commitment: builder_commitment.clone(), metadata: metadata.clone(), - fee: fee.clone(), + fees: fees.clone(), block_view: *view, + auction_result: auction_result.clone(), }); } HotShotEvent::QcFormed(cert) => match cert { diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 596ff0f7d7..dd25f65f0e 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -146,6 +146,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState { let view_number = *view_number; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 621ff66077..eece7205c5 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -18,10 +18,10 @@ use hotshot_types::{ event::{Event, EventType}, simple_certificate::{version, UpgradeCertificate}, traits::{ - auction_results_provider::{AuctionResultsProvider, HasUrls}, + auction_results_provider::AuctionResultsProvider, block_contents::{precompute_vid_commitment, BuilderFee, EncodeBytes}, election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, HasUrls, NodeImplementation, NodeType}, signature_key::{BuilderSignatureKey, SignatureKey}, BlockPayload, }, @@ -187,6 +187,7 @@ impl> TransactionTaskState> TransactionTaskState> TransactionTaskState> TransactionTaskState> TransactionTaskState> VidTaskState { view_number, sequencing_fees, vid_precompute, + auction_result, .. } = packed_bundle; let payload = @@ -87,7 +88,8 @@ impl> VidTaskState { builder_commitment, metadata.clone(), *view_number, - sequencing_fees.first().clone(), + sequencing_fees.clone(), + auction_result.clone(), )), &event_stream, ) diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index ac6730b93a..67480905ff 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -48,7 +48,7 @@ pub async fn build_system_handle< I: NodeImplementation< TYPES, Storage = TestStorage, - AuctionResultsProvider = TestAuctionResultsProvider, + AuctionResultsProvider = TestAuctionResultsProvider, > + TestableNodeImplementation, >( node_id: u64, diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 73c27193c5..65dffac329 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -65,7 +65,7 @@ where TYPES, Network = N, Storage = TestStorage, - AuctionResultsProvider = TestAuctionResultsProvider, + AuctionResultsProvider = TestAuctionResultsProvider, >, { type Event = Event; diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 909ccaa898..d2726aa25f 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -478,7 +478,9 @@ impl, I: TestableNodeImplemen ), storage: Box::new(|_| TestStorage::::default()), config, - auction_results_provider: Box::new(|_| TestAuctionResultsProvider::default()), + auction_results_provider: Box::new(|_| { + TestAuctionResultsProvider::::default() + }), }, metadata: self, } diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 0ed2907fe5..077194fad7 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -29,7 +29,7 @@ pub struct ResourceGenerators, /// generate a new auction results connector for each node - pub auction_results_provider: Generator, + pub auction_results_provider: Generator>, } /// test launcher diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 76ccbc8625..e11a343676 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -70,7 +70,7 @@ where TYPES, Network = N, Storage = TestStorage, - AuctionResultsProvider = TestAuctionResultsProvider, + AuctionResultsProvider = TestAuctionResultsProvider, >, { /// execute test diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 4a7314ced5..c3abf8c2ff 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -37,6 +37,7 @@ use hotshot_types::{ }; use jf_vid::VidScheme; use sha2::Digest; +use vec1::vec1; const TIMEOUT: Duration = Duration::from_millis(35); @@ -97,8 +98,12 @@ async fn test_consensus_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) - .unwrap(), + vec1![null_block::builder_fee( + quorum_membership.total_nodes(), + BaseVersion::version() + ) + .unwrap()], + None, ), ], ]; @@ -285,7 +290,8 @@ async fn test_view_sync_finalize_propose() { builder_commitment, TestMetadata, ViewNumber::new(4), - null_block::builder_fee(4, BaseVersion::version()).unwrap(), + vec1![null_block::builder_fee(4, BaseVersion::version()).unwrap()], + None, ), ], ]; diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index e98711d5ed..ac550f6a1e 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -85,6 +85,7 @@ async fn test_da_task() { ) .unwrap()], Some(precompute), + None, )), ], serial![DaProposalRecv(proposals[1].clone(), leaders[1])], @@ -171,6 +172,7 @@ async fn test_da_task_storage_failure() { ) .unwrap()], Some(precompute), + None, ),) ], serial![DaProposalRecv(proposals[1].clone(), leaders[1])], diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 96c11122f9..5783498acd 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -28,6 +28,7 @@ use hotshot_types::{ vote::HasViewNumber, }; use sha2::Digest; +use vec1::vec1; const TIMEOUT: Duration = Duration::from_millis(35); @@ -43,7 +44,9 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { async_compatibility_layer::logging::setup_backtrace(); let node_id = 1; - let handle = build_system_handle::(node_id).await.0; + let handle = build_system_handle::(node_id) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -90,7 +93,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { builder_commitment, TestMetadata, ViewNumber::new(1), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), ValidatedStateUpdated( proposals[0].data.view_number(), @@ -130,7 +134,9 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { async_compatibility_layer::logging::setup_backtrace(); let node_id = 3; - let handle = build_system_handle::(node_id).await.0; + let handle = build_system_handle::(node_id) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -186,7 +192,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(1), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), ValidatedStateUpdated( @@ -202,7 +209,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( @@ -218,7 +226,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( @@ -234,7 +243,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), ValidatedStateUpdated( @@ -250,7 +260,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { builder_commitment, TestMetadata, ViewNumber::new(5), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), ValidatedStateUpdated( @@ -307,7 +318,9 @@ async fn test_quorum_proposal_task_qc_timeout() { async_compatibility_layer::logging::setup_backtrace(); let node_id = 3; - let handle = build_system_handle::(node_id).await.0; + let handle = build_system_handle::(node_id) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -353,8 +366,11 @@ async fn test_quorum_proposal_task_qc_timeout() { builder_commitment, TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) - .unwrap(), + vec1![ + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) + .unwrap() + ], + None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( @@ -388,7 +404,9 @@ async fn test_quorum_proposal_task_view_sync() { async_compatibility_layer::logging::setup_backtrace(); let node_id = 2; - let handle = build_system_handle::(node_id).await.0; + let handle = build_system_handle::(node_id) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -436,8 +454,11 @@ async fn test_quorum_proposal_task_view_sync() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) - .unwrap(), + vec1![ + null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) + .unwrap() + ], + None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( @@ -470,7 +491,9 @@ async fn test_quorum_proposal_task_liveness_check() { async_compatibility_layer::logging::setup_backtrace(); let node_id = 3; - let handle = build_system_handle::(node_id).await.0; + let handle = build_system_handle::(node_id) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -524,7 +547,8 @@ async fn test_quorum_proposal_task_liveness_check() { builder_commitment.clone(), TestMetadata, ViewNumber::new(1), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), ValidatedStateUpdated( @@ -540,7 +564,8 @@ async fn test_quorum_proposal_task_liveness_check() { builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( @@ -556,7 +581,8 @@ async fn test_quorum_proposal_task_liveness_check() { builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( @@ -572,7 +598,8 @@ async fn test_quorum_proposal_task_liveness_check() { builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), ValidatedStateUpdated( @@ -588,7 +615,8 @@ async fn test_quorum_proposal_task_liveness_check() { builder_commitment, TestMetadata, ViewNumber::new(5), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), ValidatedStateUpdated( diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index 25b2f53942..dc3386162f 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -47,6 +47,7 @@ async fn test_transaction_task_leader_two_views_in_a_row() { ) .unwrap()], Some(precompute_data.clone()), + None, ); output.push(HotShotEvent::BlockRecv(exp_packed_bundle.clone())); @@ -56,4 +57,4 @@ async fn test_transaction_task_leader_two_views_in_a_row() { let transaction_state = TransactionTaskState::::create_from(&handle).await; run_harness(input, output, transaction_state, false).await; -} \ No newline at end of file +} diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs index 958ccad63a..28b0b5b6a5 100644 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -30,6 +30,7 @@ use hotshot_types::{ vote::HasViewNumber, }; use vbs::version::{StaticVersionType, Version}; +use vec1::vec1; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -272,8 +273,12 @@ async fn test_upgrade_task_propose() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) - .unwrap(), + vec1![null_block::builder_fee( + quorum_membership.total_nodes(), + BaseVersion::version() + ) + .unwrap()], + None, ), QcFormed(either::Either::Left(proposals[2].data.justify_qc.clone())), ], @@ -459,7 +464,8 @@ async fn test_upgrade_task_blank_blocks() { proposals[1].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), ], vec![ @@ -470,7 +476,8 @@ async fn test_upgrade_task_blank_blocks() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), QuorumProposalRecv(proposals[2].clone(), leaders[2]), ], @@ -482,7 +489,8 @@ async fn test_upgrade_task_blank_blocks() { proposals[3].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(4), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), QuorumProposalRecv(proposals[3].clone(), leaders[3]), ], @@ -494,7 +502,8 @@ async fn test_upgrade_task_blank_blocks() { proposals[4].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(5), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), QuorumProposalRecv(proposals[4].clone(), leaders[4]), ], @@ -506,7 +515,8 @@ async fn test_upgrade_task_blank_blocks() { proposals[5].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(6), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), QuorumProposalRecv(proposals[5].clone(), leaders[5]), QcFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), @@ -519,7 +529,8 @@ async fn test_upgrade_task_blank_blocks() { proposals[6].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(7), - builder_fee, + vec1![builder_fee], + None, ), QuorumProposalRecv(proposals[6].clone(), leaders[6]), ], diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 3095c4ef25..40f766b273 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -35,6 +35,7 @@ use hotshot_types::{ }; use sha2::Digest; use vbs::version::{StaticVersionType, Version}; +use vec1::vec1; const TIMEOUT: Duration = Duration::from_millis(35); @@ -146,7 +147,8 @@ async fn test_upgrade_task_with_proposal() { builder_commitment.clone(), TestMetadata, ViewNumber::new(1), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), ValidatedStateUpdated( @@ -162,7 +164,8 @@ async fn test_upgrade_task_with_proposal() { builder_commitment.clone(), TestMetadata, ViewNumber::new(2), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( @@ -179,7 +182,8 @@ async fn test_upgrade_task_with_proposal() { builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - builder_fee.clone(), + vec1![builder_fee.clone()], + None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 7d6ad7bc08..3087293ccc 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -26,6 +26,7 @@ use hotshot_types::{ }; use jf_vid::{precomputable::Precomputable, VidScheme}; use vbs::version::StaticVersionType; +use vec1::vec1; #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -97,6 +98,7 @@ async fn test_vid_task() { ) .unwrap()], Some(vid_precompute), + None, )), ], ]; @@ -109,8 +111,12 @@ async fn test_vid_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) - .unwrap(), + vec1![null_block::builder_fee( + quorum_membership.total_nodes(), + BaseVersion::version() + ) + .unwrap()], + None, )), exact(BlockReady(vid_disperse, ViewNumber::new(2))), exact(VidDisperseSend(vid_proposal.clone(), pub_key)), diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 591f7a5147..e4869e383c 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -11,7 +11,7 @@ use hotshot::{ types::SignatureKey, }; use hotshot_example_types::{ - auction_results_provider_types::TestAuctionResultsProvider, + auction_results_provider_types::{TestAuctionResult, TestAuctionResultsProvider}, block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, @@ -46,6 +46,7 @@ use vbs::version::StaticVersion; pub struct Test; impl NodeType for Test { + type AuctionResult = TestAuctionResult; type Base = StaticVersion<0, 1>; type Upgrade = StaticVersion<0, 2>; const UPGRADE_HASH: [u8; 32] = [ @@ -69,7 +70,7 @@ pub struct TestImpl {} impl NodeImplementation for TestImpl { type Network = MemoryNetwork<::SignatureKey>; type Storage = TestStorage; - type AuctionResultsProvider = TestAuctionResultsProvider; + type AuctionResultsProvider = TestAuctionResultsProvider; } /// fake Eq diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 9ef89601f6..33b91cd147 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -11,6 +11,7 @@ use anyhow::{bail, ensure, Result}; use async_lock::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard}; use committable::{Commitment, Committable}; use tracing::{debug, error, instrument, trace}; +use vec1::Vec1; pub use crate::utils::{View, ViewInner}; use crate::{ @@ -732,7 +733,9 @@ pub struct CommitmentAndMetadata { /// Metadata for the block payload pub metadata: >::Metadata, /// Builder fee data - pub fee: BuilderFee, + pub fees: Vec1>, /// View number this block is for pub block_view: TYPES::Time, + /// auction result that the block was produced from, if any + pub auction_result: Option, } diff --git a/types/src/data.rs b/types/src/data.rs index c963cb5648..d2356e1df9 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -881,6 +881,9 @@ pub struct PackedBundle { /// The Vid precompute for the block. pub vid_precompute: Option, + + /// The auction results for the block, if it was produced as the result of an auction + pub auction_result: Option, } impl PackedBundle { @@ -891,6 +894,7 @@ impl PackedBundle { view_number: TYPES::Time, sequencing_fees: Vec1>, vid_precompute: Option, + auction_result: Option, ) -> Self { Self { encoded_transactions, @@ -898,6 +902,7 @@ impl PackedBundle { view_number, sequencing_fees, vid_precompute, + auction_result, } } } diff --git a/types/src/traits/auction_results_provider.rs b/types/src/traits/auction_results_provider.rs index 1d373e2af2..e9154151ce 100644 --- a/types/src/traits/auction_results_provider.rs +++ b/types/src/traits/auction_results_provider.rs @@ -3,29 +3,16 @@ use anyhow::Result; use async_trait::async_trait; -use url::Url; use super::node_implementation::NodeType; -/// This trait guarantees that a particular type has urls that can be extracted from it. This trait -/// essentially ensures that the results returned by the [`AuctionResultsProvider`] trait includes a -/// list of urls for the builders that HotShot must request from. -pub trait HasUrls { - /// Returns the builer url associated with the datatype - fn urls(&self) -> Vec; -} - /// The AuctionResultsProvider trait is the sole source of Solver-originated state and interaction, /// and returns the results of the Solver's allocation via the associated type. The associated type, -/// `AuctionResult`, also implements the [`HasUrls`] trait, which requires that the output +/// `AuctionResult`, also implements the `HasUrls` trait, which requires that the output /// type has the requisite fields available. #[async_trait] pub trait AuctionResultsProvider: Send + Sync + Clone { - /// The AuctionSolverResult is a type that holds the data associated with a particular solver - /// run, for a particular view. - type AuctionResult: HasUrls + Send; - /// Fetches the auction result for a view. Does not cache the result, /// subsequent calls will invoke additional wasted calls. - async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result; + async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result; } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 253554650f..6c1bb1704f 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -17,7 +17,7 @@ use jf_vid::{precomputable::Precomputable, VidScheme}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use vbs::version::Version; -use super::{auction_results_provider::HasUrls, signature_key::BuilderSignatureKey}; +use super::signature_key::BuilderSignatureKey; use crate::{ data::Leaf, traits::{node_implementation::NodeType, states::InstanceState, ValidatedState}, @@ -181,8 +181,6 @@ pub trait BlockHeader: { /// Error type for this type of block header type Error: Error + Debug + Send + Sync; - /// Type of Auction Results - type AuctionResult: HasUrls + Send; /// Build a header with the parent validate state, instance-level state, parent leaf, payload /// commitment, and metadata. This is only used in pre-marketplace versions @@ -210,7 +208,7 @@ pub trait BlockHeader: metadata: >::Metadata, builder_fee: Vec>, vid_common: VidCommon, - auction_results: Option, + auction_results: Option, version: Version, ) -> impl Future> + Send; @@ -235,5 +233,5 @@ pub trait BlockHeader: fn builder_commitment(&self) -> BuilderCommitment; /// Get the results of the auction for this Header. Only used in post-marketplace versions - fn get_auction_results(&self) -> Option; + fn get_auction_results(&self) -> Option; } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 4dba2104c1..49a93de123 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -13,7 +13,8 @@ use std::{ use async_trait::async_trait; use committable::Committable; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use url::Url; use vbs::version::StaticVersionType; use super::{ @@ -34,6 +35,14 @@ use crate::{ }, }; +/// This trait guarantees that a particular type has urls that can be extracted from it. This trait +/// essentially ensures that the results returned by the [`AuctionResultsProvider`] trait includes a +/// list of urls for the builders that HotShot must request from. +pub trait HasUrls { + /// Returns the builer url associated with the datatype + fn urls(&self) -> Vec; +} + /// Node implementation aggregate trait /// /// This trait exists to collect multiple behavior implementations into one type, to allow @@ -205,6 +214,17 @@ pub trait NodeType: /// /// This should be the same `Time` that `ValidatedState::Time` is using. type Time: ConsensusTime; + /// The AuctionSolverResult is a type that holds the data associated with a particular solver + /// run, for a particular view. + type AuctionResult: Debug + + HasUrls + + DeserializeOwned + + Default + + PartialEq + + Eq + + Clone + + Send + + Sync; /// The block header type that this hotshot setup is using. type BlockHeader: BlockHeader; /// The block type that this hotshot setup is using. From f0361d19557a0456efd64efc3cfb3ad48f4dd2bb Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Wed, 31 Jul 2024 09:35:31 +0200 Subject: [PATCH 1155/1393] Lr/split validation (#3495) * Split validation * Remove byzantine code for now * Adjust tests * Fix one more test * Change the broadcast event indiciating correclty the High QC has been updated * Cleanup * Add more verbose description of the `QuorumProposalValidated` event * Fix a call to `validate_signature` * Remove an obsolete comment. --- task-impls/src/consensus/handlers.rs | 3 --- task-impls/src/events.rs | 19 ++++++++++++++++ task-impls/src/helpers.rs | 22 ++----------------- task-impls/src/quorum_proposal/mod.rs | 7 +++--- .../src/quorum_proposal_recv/handlers.rs | 13 +++++++---- task-impls/src/quorum_proposal_recv/mod.rs | 2 +- .../tests_1/quorum_proposal_recv_task.rs | 6 +++-- testing/tests/tests_1/quorum_proposal_task.rs | 16 +++++++------- .../tests_1/upgrade_task_with_proposal.rs | 4 ++-- types/src/consensus.rs | 2 +- 10 files changed, 50 insertions(+), 44 deletions(-) diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index c025be9d13..79c7e9f9b8 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -305,7 +305,6 @@ pub(crate) async fn handle_quorum_proposal_recv { /// All dependencies for the quorum vote are validated. QuorumVoteDependenciesValidated(TYPES::Time), /// A quorum proposal with the given parent leaf is validated. + /// The full validation checks include: + /// 1. The proposal is not for an old view + /// 2. The proposal has been correctly signed by the leader of the current view + /// 3. The justify QC is valid + /// 4. The proposal passes either liveness or safety check. QuorumProposalValidated(QuorumProposal, Leaf), /// A quorum proposal is missing for a view that we meed QuorumProposalRequest(ProposalMissing), @@ -193,6 +198,13 @@ pub enum HotShotEvent { /// A new high_qc has been updated in `Consensus`. HighQcUpdated(QuorumCertificate), + + /// A quorum proposal has been preliminarily validated. + /// The preliminary checks include: + /// 1. The proposal is not for an old view + /// 2. The proposal has been correctly signed by the leader of the current view + /// 3. The justify QC is valid + QuorumProposalPreliminarilyValidated(Proposal>), } impl Display for HotShotEvent { @@ -430,6 +442,13 @@ impl Display for HotShotEvent { HotShotEvent::HighQcUpdated(cert) => { write!(f, "HighQcUpdated(view_number={:?})", cert.view_number()) } + HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { + write!( + f, + "QuorumProposalPreliminarilyValidated(view_number={:?}", + proposal.data.view_number() + ) + } } } } diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 324dd65d05..10cd2e7528 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -22,7 +22,6 @@ use hotshot_types::{ block_contents::BlockHeader, election::Membership, node_implementation::{ConsensusTime, NodeType}, - signature_key::SignatureKey, BlockPayload, ValidatedState, }, utils::{Terminator, View, ViewInner}, @@ -353,7 +352,6 @@ pub async fn validate_proposal_safety_and_liveness( consensus: OuterConsensus, decided_upgrade_certificate: Arc>>>, quorum_membership: Arc, - view_leader_key: TYPES::SignatureKey, event_stream: Sender>>, sender: TYPES::SignatureKey, event_sender: Sender>, @@ -397,18 +395,6 @@ pub async fn validate_proposal_safety_and_liveness( ) .await; - // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - // - // There is a mistake here originating in the genesis leaf/qc commit. This should be replaced by: - // - // proposal.validate_signature(&quorum_membership)?; - // - // in a future PR. - ensure!( - view_leader_key.validate(&proposal.signature, proposed_leaf.commit().as_ref()), - "Could not verify proposal." - ); - UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; // Validate that the upgrade certificate is re-attached, if we saw one on the parent @@ -487,7 +473,6 @@ pub async fn validate_proposal_safety_and_liveness( /// If any validation or view number check fails. pub fn validate_proposal_view_and_certs( proposal: &Proposal>, - sender: &TYPES::SignatureKey, cur_view: TYPES::Time, quorum_membership: &Arc, timeout_membership: &Arc, @@ -499,11 +484,8 @@ pub fn validate_proposal_view_and_certs( proposal.data.clone() ); - let view_leader_key = quorum_membership.leader(view); - ensure!( - view_leader_key == *sender, - "Leader key does not match key in proposal" - ); + // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment + proposal.validate_signature(quorum_membership)?; // Verify a timeout certificate OR a view sync certificate exists and is valid. if proposal.data.justify_qc.view_number() != view - 1 { diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index dd25f65f0e..62b199225c 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -133,7 +133,8 @@ impl> QuorumProposalTaskState { - if let HotShotEvent::QuorumProposalRecv(proposal, _) = event { + if let HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) = event + { proposal.data.view_number() + 1 } else { return false; @@ -218,7 +219,7 @@ impl> QuorumProposalTaskState { payload_commitment_dependency.mark_as_completed(Arc::clone(&event)); } - HotShotEvent::QuorumProposalRecv(..) => { + HotShotEvent::QuorumProposalPreliminarilyValidated(..) => { proposal_dependency.mark_as_completed(event); } HotShotEvent::QcFormed(quorum_certificate) => match quorum_certificate { @@ -430,7 +431,7 @@ impl> QuorumProposalTaskState { + HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { let view_number = proposal.data.view_number(); // All nodes get the latest proposed view as a proxy of `cur_view` of olde. diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index df214463f8..858a293adc 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -139,7 +139,6 @@ pub(crate) async fn handle_quorum_proposal_recv> QuorumProposalRecvTaskState< ) .await; } - Err(e) => debug!(?e, "Failed to propose"), + Err(e) => debug!(?e, "Failed to validate the proposal"), } } } diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 9930d1de9a..6ab29ef35f 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -80,7 +80,8 @@ async fn test_quorum_proposal_recv_task() { )]]; let expectations = vec![Expectations::from_outputs(vec![ - exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), + exact(QuorumProposalPreliminarilyValidated(proposals[1].clone())), + exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), exact(ValidatedStateUpdated( ViewNumber::new(2), build_fake_view_with_leaf_and_state( @@ -189,6 +190,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { )]]; let expectations = vec![Expectations::from_outputs(all_predicates![ + exact(QuorumProposalPreliminarilyValidated(proposals[2].clone())), exact(ViewChange(ViewNumber::new(3))), exact(ValidatedStateUpdated( ViewNumber::new(3), @@ -200,7 +202,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { ), )), quorum_proposal_missing(), - exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), + exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), vote_now(), ])]; diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 5783498acd..488b57ae16 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -202,7 +202,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ), ], random![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), + QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment(&quorum_membership, ViewNumber::new(2)), @@ -219,7 +219,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ), ], random![ - QuorumProposalRecv(proposals[1].clone(), leaders[1]), + QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment(&quorum_membership, ViewNumber::new(3)), @@ -236,7 +236,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ), ], random![ - QuorumProposalRecv(proposals[2].clone(), leaders[2]), + QuorumProposalPreliminarilyValidated(proposals[2].clone()), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment(&quorum_membership, ViewNumber::new(4)), @@ -253,7 +253,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ), ], random![ - QuorumProposalRecv(proposals[3].clone(), leaders[3]), + QuorumProposalPreliminarilyValidated(proposals[3].clone()), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment(&quorum_membership, ViewNumber::new(5)), @@ -557,7 +557,7 @@ async fn test_quorum_proposal_task_liveness_check() { ), ], random![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), + QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment(&quorum_membership, ViewNumber::new(2)), @@ -574,7 +574,7 @@ async fn test_quorum_proposal_task_liveness_check() { ), ], random![ - QuorumProposalRecv(proposals[1].clone(), leaders[1]), + QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment(&quorum_membership, ViewNumber::new(3)), @@ -591,7 +591,7 @@ async fn test_quorum_proposal_task_liveness_check() { ), ], random![ - QuorumProposalRecv(proposals[2].clone(), leaders[2]), + QuorumProposalPreliminarilyValidated(proposals[2].clone()), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment(&quorum_membership, ViewNumber::new(4)), @@ -608,7 +608,7 @@ async fn test_quorum_proposal_task_liveness_check() { ), ], random![ - QuorumProposalRecv(proposals[3].clone(), leaders[3]), + QuorumProposalPreliminarilyValidated(proposals[3].clone()), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment(&quorum_membership, ViewNumber::new(5)), diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 40f766b273..f92592a099 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -157,7 +157,7 @@ async fn test_upgrade_task_with_proposal() { ), ], random![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), + QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment(&quorum_membership, ViewNumber::new(2)), @@ -175,7 +175,7 @@ async fn test_upgrade_task_with_proposal() { ], InputOrder::Random(upgrade_vote_recvs), random![ - QuorumProposalRecv(proposals[1].clone(), leaders[1]), + QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment(&quorum_membership, ViewNumber::new(3)), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 33b91cd147..948428ed05 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -577,7 +577,7 @@ impl Consensus { view.leaf_commitment() .ok_or_else(|| HotShotError::InvalidState { context: format!( - "Visited failed view {start_from:?} leaf. Expected successfuil leaf" + "Visited failed view {start_from:?} leaf. Expected successful leaf" ), })? } else { From 9ff5f796c51c4998c8cacd1abfc0461b6c62c1a8 Mon Sep 17 00:00:00 2001 From: Theodore Schnepper Date: Wed, 31 Jul 2024 07:07:25 -0600 Subject: [PATCH 1156/1393] Fix an early return in message loop processing (#3513) --- hotshot/src/tasks/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index dbaa964bc8..49935b5ba3 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -114,7 +114,7 @@ pub fn add_network_message_task< Ok(deserialized) => deserialized, Err(e) => { tracing::error!("Failed to deserialize message: {}", e); - return; + continue; } }; From 575fc6153d4aa1da8f5752387b056d42342bf7c9 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 31 Jul 2024 17:52:34 +0200 Subject: [PATCH 1157/1393] Fix last_vid_commitment (#3518) --- task-impls/src/transactions.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index eece7205c5..92cc1a9006 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -448,7 +448,7 @@ impl> TransactionTaskState> TransactionTaskState { // For failed views, backtrack - target_view = target_view - 1; + target_view = TYPES::Time::new(target_view.checked_sub(1)?); continue; } } } - - None } #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view), name = "wait_for_block", level = "error")] From ea031a819264b68e4b1277853e1d226a2f433e57 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 31 Jul 2024 12:16:53 -0400 Subject: [PATCH 1158/1393] Byzantine test for spamming bad proposals (#3486) --- hotshot/src/tasks/mod.rs | 44 +++++++++++++++++++++++++++ testing/tests/tests_1/test_success.rs | 38 +++++++++++++++++++++-- 2 files changed, 80 insertions(+), 2 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 49935b5ba3..5ed9503891 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -314,6 +314,8 @@ where let mut results = state.send_handler(&msg).await; + results.reverse(); + while let Some(event) = results.pop() { let _ = sender_to_network.broadcast(event.into()).await; } @@ -330,6 +332,8 @@ where let mut results = state.recv_handler(&msg).await; + results.reverse(); + while let Some(event) = results.pop() { let _ = original_sender.broadcast(event.into()).await; } @@ -342,6 +346,46 @@ where } } +#[derive(Debug)] +/// An `EventTransformerState` that multiplies `QuorumProposalSend` events, incrementing the view number of the proposal +pub struct BadProposalViewDos { + /// The number of times to duplicate a `QuorumProposalSend` event + pub multiplier: u64, + /// The view number increment each time it's duplicated + pub increment: u64, +} + +#[async_trait] +impl> EventTransformerState + for BadProposalViewDos +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + vec![event.clone()] + } + + async fn send_handler(&mut self, event: &HotShotEvent) -> Vec> { + match event { + HotShotEvent::QuorumProposalSend(proposal, signature) => { + let mut result = Vec::new(); + + for n in 0..self.multiplier { + let mut modified_proposal = proposal.clone(); + + modified_proposal.data.view_number += n * self.increment; + + result.push(HotShotEvent::QuorumProposalSend( + modified_proposal, + signature.clone(), + )); + } + + result + } + _ => vec![event.clone()], + } + } +} + #[derive(Debug)] /// An `EventHandlerState` that doubles the `QuorumVoteSend` and `QuorumProposalSend` events pub struct DoubleProposeVote; diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index fb2ef50d5b..90acc43eb2 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -11,7 +11,11 @@ use hotshot_testing::{ test_builder::TestDescription, }; #[cfg(async_executor_impl = "async-std")] -use {hotshot::tasks::DoubleProposeVote, hotshot_testing::test_builder::Behaviour, std::rc::Rc}; +use { + hotshot::tasks::{BadProposalViewDos, DoubleProposeVote}, + hotshot_testing::test_builder::Behaviour, + std::rc::Rc, +}; cross_tests!( TestName: test_success, @@ -33,7 +37,7 @@ cross_tests!( #[cfg(async_executor_impl = "async-std")] cross_tests!( - TestName: twins_test_success, + TestName: double_propose_vote, Impls: [MemoryImpl], Types: [TestTypes], Ignore: false, @@ -55,3 +59,33 @@ cross_tests!( } }, ); + +// Test where node 4 sends out the correct quorum proposal and additionally spams the network with an extra 99 malformed proposals +#[cfg(async_executor_impl = "async-std")] +cross_tests!( + TestName: multiple_bad_proposals, + Impls: [MemoryImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let behaviour = Rc::new(|node_id| { match node_id { + 4 => Behaviour::Byzantine(Box::new(BadProposalViewDos { multiplier: 100, increment: 1 })), + _ => Behaviour::Standard, + } }); + + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + }; + + metadata.overall_safety_properties.num_failed_views = 0; + + metadata + }, +); From 02f57f0e48b214fa313b51d4fbcbaeb077fa32bd Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 1 Aug 2024 17:07:05 +0200 Subject: [PATCH 1159/1393] Remove builder_clients_marketplace from txn task (#3527) --- hotshot/src/tasks/task_state.rs | 1 - task-impls/src/transactions.rs | 5 +---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index c74bd654fb..e94a3e0a21 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -203,7 +203,6 @@ impl> CreateTaskState .cloned() .map(BuilderClient::new) .collect(), - builder_clients_marketplace: Vec::new(), decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), auction_results_provider: Arc::clone(&handle.hotshot.auction_results_provider), } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 92cc1a9006..e70c734714 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -91,9 +91,6 @@ pub struct TransactionTaskState> { /// Builder 0.1 API clients pub builder_clients: Vec>, - /// Builder 0.3 API clients - pub builder_clients_marketplace: Vec>, - /// This Nodes Public Key pub public_key: TYPES::SignatureKey, /// Our Private Key @@ -283,7 +280,7 @@ impl> TransactionTaskState Date: Thu, 1 Aug 2024 16:45:27 -0400 Subject: [PATCH 1160/1393] [CATCHUP] Test all nodes restarting (#3454) * Adding restart to spinning task * Add restart test * reduce loggin * Fetch on proposal path too * Fix for net and channel, passing sometimes * cleanup/test params * fix lints * remove debug log * adjust replication for tests * remove log, full replication factor * Reset libp2p info after restarting * lint * fmt * remove debug error logs * remove duplicate code * review comments * Fix some comments --- example-types/src/storage_types.rs | 29 +++++- hotshot/src/lib.rs | 51 +++++++++- .../src/traits/networking/libp2p_network.rs | 19 ++-- hotshot/src/types/handle.rs | 14 +++ .../src/network/behaviours/direct_message.rs | 2 +- libp2p-networking/src/network/node.rs | 2 +- task-impls/src/consensus/handlers.rs | 1 + task-impls/src/helpers.rs | 18 +++- task-impls/src/quorum_proposal/handlers.rs | 1 + task-impls/src/quorum_proposal_recv/mod.rs | 1 + task-impls/src/request.rs | 34 ++++--- testing/src/spinning_task.rs | 98 +++++++++++++++++++ testing/src/test_runner.rs | 70 ++++++++++++- testing/tests/tests_2/catchup.rs | 60 ++++++++++++ 14 files changed, 368 insertions(+), 32 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 2df1d5d3a7..5192de3f20 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -10,8 +10,10 @@ use hotshot_types::{ consensus::CommitmentMap, data::{DaProposal, Leaf, QuorumProposal, VidDisperseShare}, message::Proposal, + simple_certificate::QuorumCertificate, traits::{node_implementation::NodeType, storage::Storage}, utils::View, + vote::HasViewNumber, }; type VidShares = HashMap< @@ -23,7 +25,8 @@ type VidShares = HashMap< pub struct TestStorageState { vids: VidShares, das: HashMap>>, - proposals: HashMap>>, + proposals: BTreeMap>>, + high_qc: Option>, } impl Default for TestStorageState { @@ -31,7 +34,8 @@ impl Default for TestStorageState { Self { vids: HashMap::new(), das: HashMap::new(), - proposals: HashMap::new(), + proposals: BTreeMap::new(), + high_qc: None, } } } @@ -52,6 +56,17 @@ impl Default for TestStorage { } } +impl TestStorage { + pub async fn proposals_cloned( + &self, + ) -> BTreeMap>> { + self.inner.read().await.proposals.clone() + } + pub async fn high_qc_cloned(&self) -> Option> { + self.inner.read().await.high_qc.clone() + } +} + #[async_trait] impl Storage for TestStorage { async fn append_vid(&self, proposal: &Proposal>) -> Result<()> { @@ -104,11 +119,19 @@ impl Storage for TestStorage { async fn update_high_qc( &self, - _high_qc: hotshot_types::simple_certificate::QuorumCertificate, + new_high_qc: hotshot_types::simple_certificate::QuorumCertificate, ) -> Result<()> { if self.should_return_err { bail!("Failed to update high qc to storage"); } + let mut inner = self.inner.write().await; + if let Some(ref current_high_qc) = inner.high_qc { + if new_high_qc.view_number() > current_high_qc.view_number() { + inner.high_qc = Some(new_high_qc); + } + } else { + inner.high_qc = Some(new_high_qc); + } Ok(()) } async fn update_undecided_state( diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 8e67eda4c1..0b3c17514c 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -177,8 +177,7 @@ impl> SystemContext { /// To do a full initialization, use `fn init` instead, which will set up background tasks as /// well. /// - /// # Errors - /// - + /// Use this instead of `init` if you want to start the tasks manually #[allow(clippy::too_many_arguments)] pub fn new( public_key: TYPES::SignatureKey, @@ -191,6 +190,50 @@ impl> SystemContext { metrics: ConsensusMetricsValue, storage: I::Storage, auction_results_provider: I::AuctionResultsProvider, + ) -> Arc { + let interal_chan = broadcast(EVENT_CHANNEL_SIZE); + let external_chan = broadcast(EXTERNAL_EVENT_CHANNEL_SIZE); + + Self::new_from_channels( + public_key, + private_key, + nonce, + config, + memberships, + network, + initializer, + metrics, + storage, + auction_results_provider, + interal_chan, + external_chan, + ) + } + + /// Creates a new [`Arc`] with the given configuration options. + /// + /// To do a full initialization, use `fn init` instead, which will set up background tasks as + /// well. + /// + /// Use this function if you want to use some prexisting channels and to spin up the tasks + /// and start consensus manually. Mostly useful for tests + #[allow(clippy::too_many_arguments, clippy::type_complexity)] + pub fn new_from_channels( + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + nonce: u64, + config: HotShotConfig, + memberships: Memberships, + network: Arc, + initializer: HotShotInitializer, + metrics: ConsensusMetricsValue, + storage: I::Storage, + auction_results_provider: I::AuctionResultsProvider, + internal_channel: ( + Sender>>, + Receiver>>, + ), + external_channel: (Sender>, Receiver>), ) -> Arc { debug!("Creating a new hotshot"); @@ -198,8 +241,8 @@ impl> SystemContext { let anchored_leaf = initializer.inner; let instance_state = initializer.instance_state; - let (internal_tx, internal_rx) = broadcast(EVENT_CHANNEL_SIZE); - let (mut external_tx, mut external_rx) = broadcast(EXTERNAL_EVENT_CHANNEL_SIZE); + let (internal_tx, internal_rx) = internal_channel; + let (mut external_tx, mut external_rx) = external_channel; let decided_upgrade_certificate = Arc::new(RwLock::new(None)); diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 6af0d9ba81..f0bcf2fe5b 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -202,7 +202,7 @@ impl TestableNetworkingImplementation /// - An invalid configuration /// (probably an issue with the defaults of this function) /// - An inability to spin up the replica's network - #[allow(clippy::panic)] + #[allow(clippy::panic, clippy::too_many_lines)] fn generator( expected_node_count: usize, num_bootstrap: usize, @@ -217,8 +217,8 @@ impl TestableNetworkingImplementation "DA committee size must be less than or equal to total # nodes" ); let bootstrap_addrs: PeerInfoVec = Arc::default(); + let node_ids: Arc>> = Arc::default(); // We assign known_nodes' public key and stake value rather than read from config file since it's a test - let mut all_keys = BTreeSet::new(); let mut da_keys = BTreeSet::new(); for i in 0u64..(expected_node_count as u64) { @@ -227,7 +227,6 @@ impl TestableNetworkingImplementation if i < da_committee_size as u64 { da_keys.insert(pubkey.clone()); } - all_keys.insert(pubkey); } // NOTE uncomment this for easier debugging @@ -296,9 +295,18 @@ impl TestableNetworkingImplementation .unwrap() }; let bootstrap_addrs_ref = Arc::clone(&bootstrap_addrs); + let node_ids_ref = Arc::clone(&node_ids); let da = da_keys.clone(); let reliability_config_dup = reliability_config.clone(); Box::pin(async move { + // If it's the second time we are starting this network, clear the bootstrap info + let mut write_ids = node_ids_ref.write().await; + if write_ids.contains(&node_id) { + write_ids.clear(); + bootstrap_addrs_ref.write().await.clear(); + } + write_ids.insert(node_id); + drop(write_ids); Arc::new( match Libp2pNetwork::new( Libp2pMetricsValue::default(), @@ -493,11 +501,6 @@ impl Libp2pNetwork { #[cfg(feature = "hotshot-testing")] reliability_config: Option>, is_da: bool, ) -> Result, NetworkError> { - // Error if there were no bootstrap nodes specified - #[cfg(not(feature = "hotshot-testing"))] - if bootstrap_addrs.read().await.len() == 0 { - return Err(NetworkError::NoBootstrapNodesSpecified); - } let (mut rx, network_handle) = spawn_network_node(config.clone(), id) .await .map_err(Into::::into)?; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 58692d7625..2f609fd311 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -197,6 +197,20 @@ impl + 'static> SystemContextHandl self.hotshot.public_key.clone() } + /// Get the sender side of the external event stream for testing purpose + #[cfg(feature = "hotshot-testing")] + #[must_use] + pub fn external_channel_sender(&self) -> Sender> { + self.output_event_stream.0.clone() + } + + /// Get the sender side of the internal event stream for testing purpose + #[cfg(feature = "hotshot-testing")] + #[must_use] + pub fn internal_channel_sender(&self) -> Sender>> { + self.internal_event_stream.0.clone() + } + /// Wrapper to get the view number this node is on. #[instrument(skip_all, target = "SystemContextHandle", fields(id = self.hotshot.id))] pub async fn cur_view(&self) -> TYPES::Time { diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index 8c633ff992..a37a3505e7 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -62,7 +62,7 @@ impl DMBehaviour { request_id, error, } => { - error!("Outbound message failure to {:?}: {:?}", peer, error); + warn!("Outbound message failure to {:?}: {:?}", peer, error); if let Some(mut req) = self.in_progress_rr.remove(&request_id) { if req.retry_count == 0 { return None; diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index f80f322835..94cf903020 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -768,7 +768,7 @@ impl NetworkNode { event = self.swarm.next() => { debug!("peerid {:?}\t\thandling maybe event {:?}", self.peer_id, event); if let Some(event) = event { - info!("peerid {:?}\t\thandling event {:?}", self.peer_id, event); + debug!("peerid {:?}\t\thandling event {:?}", self.peer_id, event); self.handle_swarm_events(event, &r_input).await?; } }, diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 79c7e9f9b8..52127a88f4 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -172,6 +172,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( ) -> Result> { let (parent_leaf, state) = parent_leaf_and_state( view, + &sender, quorum_membership, public_key.clone(), OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 10cd2e7528..daa2f5f37e 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -281,6 +281,7 @@ pub async fn decide_from_proposal( #[instrument(skip_all)] pub(crate) async fn parent_leaf_and_state( next_proposal_view_number: TYPES::Time, + event_stream: &Sender>>, quorum_membership: Arc, public_key: TYPES::SignatureKey, consensus: OuterConsensus, @@ -289,7 +290,22 @@ pub(crate) async fn parent_leaf_and_state( quorum_membership.leader(next_proposal_view_number) == public_key, "Somehow we formed a QC but are not the leader for the next view {next_proposal_view_number:?}", ); - + let parent_view_number = consensus.read().await.high_qc().view_number(); + if !consensus + .read() + .await + .validated_state_map() + .contains_key(&parent_view_number) + { + let _ = fetch_proposal( + parent_view_number, + event_stream.clone(), + quorum_membership, + consensus.clone(), + ) + .await + .context("Failed to fetch proposal")?; + } let consensus_reader = consensus.read().await; let parent_view_number = consensus_reader.high_qc().view_number(); let parent_view = consensus_reader.validated_state_map().get(&parent_view_number).context( diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index b03c738445..5d7113a81b 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -114,6 +114,7 @@ impl ProposalDependencyHandle { ) -> Result<()> { let (parent_leaf, state) = parent_leaf_and_state( self.view_number, + &self.sender, Arc::clone(&self.quorum_membership), self.public_key.clone(), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 3435e1f8e7..4475315675 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -126,6 +126,7 @@ impl> QuorumProposalRecvTaskState< // Build the parent leaf since we didn't find it during the proposal check. let parent_leaf = match parent_leaf_and_state( proposal.data.view_number() + 1, + &event_stream, Arc::clone(&self.quorum_membership), self.public_key.clone(), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index b41dde76fb..aa9cd7d73d 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -306,19 +306,31 @@ impl> ProposalRequester return; } }; - if let Ok(Ok(serialized_response)) = response { - if let Ok(ResponseMessage::Found(msg)) = bincode::deserialize(&serialized_response) { - let SequencingMessage::General(GeneralConsensusMessage::Proposal(prop)) = msg - else { - error!("Requested Proposal but received a non-proposal in response. Response was {:?}", msg); - broadcast_event(None, &self.sender).await; + match response { + Ok(Ok(serialized_response)) => { + if let Ok(ResponseMessage::Found(msg)) = bincode::deserialize(&serialized_response) + { + let SequencingMessage::General(GeneralConsensusMessage::Proposal(prop)) = msg + else { + error!("Requested Proposal but received a non-proposal in response. Response was {:?}", msg); + broadcast_event(None, &self.sender).await; + return; + }; + debug!("proposal found {:?}", prop); + broadcast_event(Some(prop), &self.sender).await; return; - }; - broadcast_event(Some(prop), &self.sender).await; + } + debug!("Proposal not found"); + broadcast_event(None, &self.sender).await; + } + Ok(Err(e)) => { + debug!("request for proposal failed with error {:?}", e); + broadcast_event(None, &self.sender).await; + } + Err(e) => { + debug!("request for proposal timed out with error {:?}", e); + broadcast_event(None, &self.sender).await; } - broadcast_event(None, &self.sender).await; - } else { - broadcast_event(None, &self.sender).await; } } } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 65dffac329..653e5096f9 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -6,6 +6,7 @@ use std::{ use anyhow::Result; use async_lock::RwLock; use async_trait::async_trait; +use futures::future::join_all; use hotshot::{traits::TestableNodeImplementation, types::EventType, HotShotInitializer}; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, @@ -93,6 +94,8 @@ where } } + let mut new_nodes = vec![]; + let mut new_networks = vec![]; // if we have not seen this view before if self.latest_view.is_none() || view_number > self.latest_view.unwrap() { // perform operations on the nodes @@ -148,6 +151,9 @@ where ) .await } + LateNodeContext::Restart => { + panic!("Cannot spin up a node with Restart context") + } }; let handle = context.run_tasks().await; @@ -171,6 +177,74 @@ where node.handle.shut_down().await; } } + UpDown::Restart => { + let node_id = idx.try_into().unwrap(); + if let Some(node) = self.handles.write().await.get_mut(idx) { + tracing::error!("Node {} shutting down", idx); + node.handle.shut_down().await; + + let Some(LateStartNode { + network, + context: LateNodeContext::Restart, + }) = self.late_start.get(&node_id) + else { + panic!("Restated Nodes must have an unitialized context"); + }; + + let storage = node.handle.storage().clone(); + let memberships = node.handle.memberships.clone(); + let config = node.handle.hotshot.config.clone(); + let auction_results_provider = + node.handle.hotshot.auction_results_provider.clone(); + let read_storage = storage.read().await; + let initializer = HotShotInitializer::::from_reload( + self.last_decided_leaf.clone(), + TestInstanceState {}, + None, + view_number, + read_storage.proposals_cloned().await, + read_storage.high_qc_cloned().await.unwrap_or( + QuorumCertificate::genesis( + &TestValidatedState::default(), + &TestInstanceState {}, + ) + .await, + ), + Vec::new(), + BTreeMap::new(), + ); + // We assign node's public key and stake value rather than read from config file since it's a test + let validator_config = ValidatorConfig::generated_from_seed_indexed( + [0u8; 32], + node_id, + 1, + // For tests, make the node DA based on its index + node_id < config.da_staked_committee_size as u64, + ); + let context = + TestRunner::::add_node_with_config_and_channels( + node_id, + network.clone(), + (*memberships).clone(), + initializer, + config, + validator_config, + (*read_storage).clone(), + (*auction_results_provider).clone(), + ( + node.handle.internal_channel_sender(), + node.handle.internal_event_stream_known_impl(), + ), + ( + node.handle.external_channel_sender(), + node.handle.event_stream_known_impl(), + ), + ) + .await; + new_nodes.push((context, idx)); + new_networks.push(network.clone()); + } + } UpDown::NetworkUp => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks resuming", idx); @@ -186,7 +260,29 @@ where } } } + let mut ready_futs = vec![]; + while let Some(net) = new_networks.pop() { + ready_futs.push(async move { + net.wait_for_ready().await; + }); + } + join_all(ready_futs).await; + + while let Some((node, id)) = new_nodes.pop() { + let handle = node.run_tasks().await; + // Create the node and add it to the state, so we can shut them + // down properly later to avoid the overflow error in the overall + // safety task. + let node = Node { + node_id: id.try_into().unwrap(), + network: node.network.clone(), + handle, + }; + node.handle.hotshot.start_consensus().await; + + self.handles.write().await[id] = node; + } // update our latest view self.latest_view = Some(view_number); } @@ -210,6 +306,8 @@ pub enum UpDown { NetworkUp, /// spin the node's network down NetworkDown, + /// restart the node + Restart, } /// denotes a change in node state diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index e11a343676..307ee37fc8 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -5,15 +5,16 @@ use std::{ sync::Arc, }; -use async_broadcast::broadcast; +use async_broadcast::{broadcast, Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use futures::future::join_all; use hotshot::{ - traits::TestableNodeImplementation, types::SystemContextHandle, HotShotInitializer, - Memberships, SystemContext, + traits::TestableNodeImplementation, + types::{Event, SystemContextHandle}, + HotShotInitializer, Memberships, SystemContext, }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, @@ -21,6 +22,7 @@ use hotshot_example_types::{ storage_types::TestStorage, }; use hotshot_fakeapi::fake_solver::FakeSolverState; +use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, constants::EVENT_CHANNEL_SIZE, @@ -88,17 +90,22 @@ where .clone(); let mut late_start_nodes: HashSet = HashSet::new(); + let mut restart_nodes: HashSet = HashSet::new(); for (_, changes) in &spinning_changes { for change in changes { if matches!(change.updown, UpDown::Up) { late_start_nodes.insert(change.idx.try_into().unwrap()); } + if matches!(change.updown, UpDown::Restart) { + restart_nodes.insert(change.idx.try_into().unwrap()); + } } } self.add_nodes::( self.launcher.metadata.num_nodes_with_stake, &late_start_nodes, + &restart_nodes, ) .await; let mut event_rxs = vec![]; @@ -370,6 +377,7 @@ where &mut self, total: usize, late_start: &HashSet, + restart: &HashSet, ) -> Vec { let mut results = vec![]; let config = self.launcher.resource_generator.config.clone(); @@ -495,6 +503,21 @@ where results.push(node_id); } + // Add the restart nodes after the rest. This must be done after all the original networks are + // created because this will reset the bootstrap info for the restarted nodes + for node_id in &results { + if restart.contains(node_id) { + self.late_start.insert( + *node_id, + LateStartNode { + network: (self.launcher.resource_generator.channel_generator)(*node_id) + .await, + context: LateNodeContext::Restart, + }, + ); + } + } + // Wait for all networks to be ready join_all(networks_ready).await; @@ -570,6 +593,45 @@ where auction_results_provider, ) } + + /// add a specific node with a config + /// # Panics + /// if unable to initialize the node's `SystemContext` based on the config + #[allow(clippy::too_many_arguments, clippy::type_complexity)] + pub async fn add_node_with_config_and_channels( + node_id: u64, + network: Network, + memberships: Memberships, + initializer: HotShotInitializer, + config: HotShotConfig, + validator_config: ValidatorConfig, + storage: I::Storage, + auction_results_provider: I::AuctionResultsProvider, + internal_channel: ( + Sender>>, + Receiver>>, + ), + external_channel: (Sender>, Receiver>), + ) -> Arc> { + // Get key pair for certificate aggregation + let private_key = validator_config.private_key.clone(); + let public_key = validator_config.public_key.clone(); + + SystemContext::new_from_channels( + public_key, + private_key, + node_id, + config, + memberships, + network, + initializer, + ConsensusMetricsValue::default(), + storage, + auction_results_provider, + internal_channel, + external_channel, + ) + } } /// a node participating in a test @@ -608,6 +670,8 @@ pub enum LateNodeContext> /// The system context that we're passing to the node when it is not yet initialized, so we're /// initializing it based on the received leaf and init parameters. UninitializedContext(LateNodeContextParameters), + /// The node is to be restarted so we will build the context from the node that was already running. + Restart, } /// A yet-to-be-started node that participates in tests diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 69a0774dd5..4a3bd8c8f8 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -286,3 +286,63 @@ async fn test_catchup_reload() { .run_test::() .await; } + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_all_restart() { + use std::time::Duration; + + use hotshot_example_types::node_types::{CombinedImpl, TestTypes}; + use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestDescription, TimingData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let timing_data = TimingData { + next_view_timeout: 2000, + ..Default::default() + }; + let mut metadata: TestDescription = TestDescription::default(); + let mut catchup_nodes = vec![]; + for i in 1..20 { + catchup_nodes.push(ChangeNode { + idx: i, + updown: UpDown::Restart, + })}; + + metadata.timing_data = timing_data; + metadata.start_nodes = 20; + metadata.num_nodes_with_stake = 20; + + metadata.spinning_properties = SpinningTaskDescription { + // Restart all the nodes in view 13 + node_changes: vec![(13, catchup_nodes)], + }; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep committing rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 15, + ..Default::default() + }; + + metadata + .gen_launcher(0) + .launch() + .run_test::() + .await; +} From 1b46c2465b6e8fe60fff5015ce89f1c71055ea4d Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 2 Aug 2024 11:11:37 -0400 Subject: [PATCH 1161/1393] [AUDIT][LOW][CX_HARDENING] - Add Bespoke Tests For VoteDependencyHandle (#3517) * add test * add permutations * better comment * comments * feedback --- hotshot/src/types/handle.rs | 8 +- task-impls/src/quorum_vote/mod.rs | 17 +- testing/Cargo.toml | 1 + testing/src/test_runner.rs | 2 +- testing/tests/tests_1/quorum_vote_task.rs | 1 + .../tests/tests_1/vote_dependency_handle.rs | 145 ++++++++++++++++++ 6 files changed, 163 insertions(+), 11 deletions(-) create mode 100644 testing/tests/tests_1/vote_dependency_handle.rs diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 2f609fd311..0b65b66cec 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -83,13 +83,19 @@ impl + 'static> SystemContextHandl self.output_event_stream.1.activate_cloned() } + /// HACK so we can create dependency tasks when running tests + #[must_use] + pub fn internal_event_stream_sender(&self) -> Sender>> { + self.internal_event_stream.0.clone() + } + /// HACK so we can know the types when running tests... /// there are two cleaner solutions: /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper /// NOTE: this is only used for sanity checks in our tests #[must_use] - pub fn internal_event_stream_known_impl(&self) -> Receiver>> { + pub fn internal_event_stream_receiver_known_impl(&self) -> Receiver>> { self.internal_event_stream.1.activate_cloned() } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 5314a27f57..d260bee460 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -59,30 +59,29 @@ enum VoteDependency { } /// Handler for the vote dependency. -#[allow(dead_code)] -struct VoteDependencyHandle> { +pub struct VoteDependencyHandle> { /// Public key. pub public_key: TYPES::SignatureKey, /// Private Key. pub private_key: ::PrivateKey, /// Reference to consensus. The replica will require a write lock on this. - consensus: OuterConsensus, + pub consensus: OuterConsensus, /// Immutable instance state - instance_state: Arc, + pub instance_state: Arc, /// Membership for Quorum certs/votes. - quorum_membership: Arc, + pub quorum_membership: Arc, /// Reference to the storage. pub storage: Arc>, /// View number to vote on. - view_number: TYPES::Time, + pub view_number: TYPES::Time, /// Event sender. - sender: Sender>>, + pub sender: Sender>>, /// Event receiver. - receiver: Receiver>>, + pub receiver: Receiver>>, /// An upgrade certificate that has been decided on, if any. pub decided_upgrade_certificate: Arc>>>, /// The node's id - id: u64, + pub id: u64, } impl + 'static> VoteDependencyHandle { diff --git a/testing/Cargo.toml b/testing/Cargo.toml index e826349de8..cccd8c8f5e 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -50,6 +50,7 @@ tagged-base64.workspace = true vec1 = { workspace = true } reqwest = { workspace = true } url = { workspace = true } +itertools = "0.13.0" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 307ee37fc8..54b683ef33 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -116,7 +116,7 @@ where event_rxs.push(r); } for node in &self.nodes { - let r = node.handle.internal_event_stream_known_impl(); + let r = node.handle.internal_event_stream_receiver_known_impl(); internal_event_rxs.push(r); } diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index b8b4680bcd..eea01a8b6b 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -276,3 +276,4 @@ async fn test_quorum_vote_task_incorrect_dependency() { }; run_test![inputs, script].await; } + diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs new file mode 100644 index 0000000000..4a7df73c6c --- /dev/null +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -0,0 +1,145 @@ +#![cfg(feature = "dependency-tasks")] + +use itertools::Itertools; +use std::time::Duration; + +use async_compatibility_layer::art::async_timeout; +use futures::StreamExt; +use hotshot::tasks::task_state::CreateTaskState; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_macros::{run_test, test_scripts}; +use hotshot_task_impls::{ + events::HotShotEvent::*, + quorum_vote::{QuorumVoteTaskState, VoteDependencyHandle}, +}; +use hotshot_testing::{ + all_predicates, + helpers::{build_fake_view_with_leaf, build_system_handle, vid_share}, + predicates::{event::*, Predicate, PredicateResult}, + random, + script::{Expectations, InputOrder, TaskScript}, + serial, + view_generator::TestViewGenerator, +}; +use hotshot_types::{ + consensus::OuterConsensus, data::ViewNumber, traits::node_implementation::ConsensusTime, + vote::HasViewNumber, +}; + +const TIMEOUT: Duration = Duration::from_millis(35); + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_vote_dependency_handle() { + use std::sync::Arc; + + use hotshot_task_impls::helpers::broadcast_event; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + // We use a node ID of 2 here abitrarily. We just need it to build the system handle. + let node_id = 2; + + // Construct the system handle for the node ID to build all of the state objects. + let handle = build_system_handle::(node_id) + .await + .0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + + // Generate our state for the test + let mut proposals = Vec::new(); + let mut leaves = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + let consensus = handle.hotshot.consensus().clone(); + let mut consensus_writer = consensus.write().await; + for view in (&mut generator).take(2).collect::>().await { + proposals.push(view.quorum_proposal.clone()); + leaves.push(view.leaf.clone()); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + consensus_writer + .update_validated_state_map( + view.quorum_proposal.data.view_number(), + build_fake_view_with_leaf(view.leaf.clone()), + ) + .unwrap(); + consensus_writer.update_saved_leaves(view.leaf.clone()); + } + drop(consensus_writer); + + // We permute all possible orderings of inputs. Ordinarily we'd use `random!` for this, but + // the dependency handles do not (yet) work with the existing test suite. + let all_inputs = vec![ + DaCertificateValidated(dacs[1].clone()), + QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + VidShareValidated(vids[1].0[0].clone()), + ] + .into_iter() + .permutations(3); + + // For each permutation... + for inputs in all_inputs.into_iter() { + // The outputs are static here, but we re-make them since we use `into_iter` below + let outputs = vec![ + exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), + validated_state_updated(), + quorum_vote_send(), + ]; + + // We only need this to be able to make the vote dependency handle state. It's not explicitly necessary, but it's easy. + let qv = QuorumVoteTaskState::::create_from(&handle).await; + + let event_sender = handle.internal_event_stream_sender(); + let mut event_receiver = handle.internal_event_stream_receiver_known_impl(); + let view_number = ViewNumber::new(node_id); + + let vote_dependency_handle_state = VoteDependencyHandle:: { + public_key: qv.public_key.clone(), + private_key: qv.private_key.clone(), + consensus: OuterConsensus::new(Arc::clone(&qv.consensus.inner_consensus)), + instance_state: Arc::clone(&qv.instance_state), + quorum_membership: Arc::clone(&qv.quorum_membership), + storage: Arc::clone(&qv.storage), + view_number, + sender: event_sender.clone(), + receiver: event_receiver.clone(), + decided_upgrade_certificate: Arc::clone(&qv.decided_upgrade_certificate), + id: qv.id, + }; + + let inputs_len = inputs.len(); + for event in inputs.into_iter() { + broadcast_event(event.into(), &event_sender).await; + } + + // We need to avoid re-processing the inputs during our output evaluation. This part here is not + // strictly necessary, but it makes writing the outputs easier. + let mut i = 0; + let mut output_events = vec![]; + while let Ok(Ok(received_output)) = + async_timeout(TIMEOUT, event_receiver.recv_direct()).await + { + // Skip over all inputs (the order is deterministic). + if i < inputs_len { + i += 1; + continue; + } + + output_events.push(received_output); + } + + // Finally, evaluate that the test does what we expected. The control flow of the handle always + // outputs in the same order. + for (check, real) in outputs.into_iter().zip(output_events) { + if check.evaluate(&real).await == PredicateResult::Fail { + panic!("Output {real} did not match expected output {check:?}"); + } + } + } +} From 5a18deecb6d1c31c18ca562be8f5d018b76a9e46 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 2 Aug 2024 11:15:28 -0400 Subject: [PATCH 1162/1393] Add `MarketplaceConfig` to `SystemContext::new/init` (#3529) --- examples/Cargo.toml | 1 + examples/infra/mod.rs | 10 +++++-- hotshot/Cargo.toml | 1 + hotshot/src/lib.rs | 34 ++++++++++++++-------- hotshot/src/tasks/mod.rs | 8 +++--- hotshot/src/tasks/task_state.rs | 9 +++++- task-impls/Cargo.toml | 1 + task-impls/src/transactions.rs | 12 ++++++-- testing/src/helpers.rs | 4 +-- testing/src/spinning_task.rs | 10 +++---- testing/src/test_builder.rs | 18 +++++++----- testing/src/test_launcher.rs | 11 +++---- testing/src/test_runner.rs | 35 +++++++++++++---------- testing/tests/tests_1/transaction_task.rs | 25 +++++++++------- 14 files changed, 113 insertions(+), 66 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 1b328cc452..b51ccc857d 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -115,6 +115,7 @@ vbs = { workspace = true } sha2.workspace = true local-ip-address = "0.6" vec1 = { workspace = true } +url = { workspace = true } tracing = { workspace = true } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 00c5c320e2..980afff810 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -27,7 +27,7 @@ use hotshot::{ BlockPayload, NodeImplementation, }, types::SystemContextHandle, - Memberships, SystemContext, + MarketplaceConfig, Memberships, SystemContext, }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, @@ -396,6 +396,12 @@ pub trait RunDa< view_sync_membership: quorum_membership, }; + let marketplace_config = MarketplaceConfig { + auction_results_provider: TestAuctionResultsProvider::::default().into(), + // TODO: we need to pass a valid generic builder url here somehow + generic_builder_url: url::Url::parse("http://localhost").unwrap(), + }; + SystemContext::init( pk, sk, @@ -406,7 +412,7 @@ pub trait RunDa< initializer, ConsensusMetricsValue::default(), TestStorage::::default(), - TestAuctionResultsProvider::::default(), + marketplace_config, ) .await .expect("Could not init hotshot") diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index d7c2f2c3b8..7bebae6c46 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -59,6 +59,7 @@ jf-signature.workspace = true hotshot-orchestrator = { path = "../orchestrator" } blake3.workspace = true sha2 = { workspace = true } +url = { workspace = true } num_enum = "0.7" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 0b3c17514c..11d4ca29b9 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -8,6 +8,7 @@ pub mod documentation; use futures::future::{select, Either}; use hotshot_types::traits::network::BroadcastDelay; use rand::Rng; +use url::Url; use vbs::version::StaticVersionType; /// Contains traits consumed by [`SystemContext`] @@ -71,6 +72,15 @@ pub const H_512: usize = 64; /// Length, in bytes, of a 256 bit hash pub const H_256: usize = 32; +#[derive(Clone)] +/// Wrapper for all marketplace config that needs to be passed when creating a new instance of HotShot +pub struct MarketplaceConfig> { + /// auction results provider + pub auction_results_provider: Arc, + /// generic builder + pub generic_builder_url: Url, +} + /// Bundle of all the memberships a consensus instance uses #[derive(Clone)] pub struct Memberships { @@ -141,8 +151,8 @@ pub struct SystemContext> { /// a potential upgrade certificate that has been decided on by the consensus tasks. pub decided_upgrade_certificate: Arc>>>, - /// Reference to the AuctionResultsProvider type for acquiring solver results. - pub auction_results_provider: Arc, + /// Marketplace config for this instance of HotShot + pub marketplace_config: MarketplaceConfig, } impl> Clone for SystemContext { #![allow(deprecated)] @@ -165,7 +175,7 @@ impl> Clone for SystemContext> SystemContext { initializer: HotShotInitializer, metrics: ConsensusMetricsValue, storage: I::Storage, - auction_results_provider: I::AuctionResultsProvider, + marketplace_config: MarketplaceConfig, ) -> Arc { let interal_chan = broadcast(EVENT_CHANNEL_SIZE); let external_chan = broadcast(EXTERNAL_EVENT_CHANNEL_SIZE); @@ -204,7 +214,7 @@ impl> SystemContext { initializer, metrics, storage, - auction_results_provider, + marketplace_config, interal_chan, external_chan, ) @@ -228,7 +238,7 @@ impl> SystemContext { initializer: HotShotInitializer, metrics: ConsensusMetricsValue, storage: I::Storage, - auction_results_provider: I::AuctionResultsProvider, + marketplace_config: MarketplaceConfig, internal_channel: ( Sender>>, Receiver>>, @@ -326,7 +336,7 @@ impl> SystemContext { anchored_leaf: anchored_leaf.clone(), storage: Arc::new(RwLock::new(storage)), decided_upgrade_certificate, - auction_results_provider: Arc::new(auction_results_provider), + marketplace_config, }); inner @@ -565,7 +575,7 @@ impl> SystemContext { initializer: HotShotInitializer, metrics: ConsensusMetricsValue, storage: I::Storage, - auction_results_provider: I::AuctionResultsProvider, + marketplace_config: MarketplaceConfig, ) -> Result< ( SystemContextHandle, @@ -584,7 +594,7 @@ impl> SystemContext { initializer, metrics, storage, - auction_results_provider, + marketplace_config, ); let handle = Arc::clone(&hotshot).run_tasks().await; let (tx, rx) = hotshot.internal_event_stream.clone(); @@ -725,7 +735,7 @@ where initializer: HotShotInitializer, metrics: ConsensusMetricsValue, storage: I::Storage, - auction_results_provider: I::AuctionResultsProvider, + marketplace_config: MarketplaceConfig, ) -> (SystemContextHandle, SystemContextHandle) { let left_system_context = SystemContext::new( public_key.clone(), @@ -737,7 +747,7 @@ where initializer.clone(), metrics.clone(), storage.clone(), - auction_results_provider.clone(), + marketplace_config.clone(), ); let right_system_context = SystemContext::new( public_key, @@ -749,7 +759,7 @@ where initializer, metrics, storage, - auction_results_provider, + marketplace_config, ); // create registries for both handles diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 5ed9503891..35fb0b0a3f 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -35,8 +35,8 @@ use vbs::version::StaticVersionType; use crate::{ tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, - ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, Memberships, - NetworkTaskRegistry, SignatureKey, SystemContext, + ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, + MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, }; /// event for global event stream @@ -232,7 +232,7 @@ where initializer: HotShotInitializer, metrics: ConsensusMetricsValue, storage: I::Storage, - auction_results_provider: I::AuctionResultsProvider, + marketplace_config: MarketplaceConfig, ) -> SystemContextHandle { let hotshot = SystemContext::new( public_key, @@ -244,7 +244,7 @@ where initializer, metrics, storage, - auction_results_provider, + marketplace_config, ); let consensus_registry = ConsensusTaskRegistry::new(); let network_registry = NetworkTaskRegistry::new(); diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index e94a3e0a21..2c5e14f91c 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -204,7 +204,14 @@ impl> CreateTaskState .map(BuilderClient::new) .collect(), decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), - auction_results_provider: Arc::clone(&handle.hotshot.auction_results_provider), + auction_results_provider: Arc::clone( + &handle.hotshot.marketplace_config.auction_results_provider, + ), + generic_builder_url: handle + .hotshot + .marketplace_config + .generic_builder_url + .clone(), } } } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 1561c74a7e..4aea968dff 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -37,6 +37,7 @@ surf-disco = { workspace = true } tagged-base64 = { workspace = true } time = { workspace = true } tracing = { workspace = true } +url = { workspace = true } vbs = { workspace = true } vec1 = { workspace = true } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index e70c734714..1dda8c5a30 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -29,6 +29,7 @@ use hotshot_types::{ vid::{VidCommitment, VidPrecomputeData}, }; use tracing::{debug, error, instrument, warn}; +use url::Url; use vbs::version::StaticVersionType; use vec1::Vec1; @@ -103,6 +104,8 @@ pub struct TransactionTaskState> { pub decided_upgrade_certificate: Arc>>>, /// auction results provider pub auction_results_provider: Arc, + /// generic builder url + pub generic_builder_url: Url, } impl> TransactionTaskState { @@ -267,7 +270,7 @@ impl> TransactionTaskState> TransactionTaskState> TransactionTaskState::from_genesis(TestInstanceState {}) @@ -113,7 +113,7 @@ pub async fn build_system_handle< initializer, ConsensusMetricsValue::default(), storage, - auction_results_provider, + marketplace_config, ) .await .expect("Could not init hotshot") diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 653e5096f9..8d64910351 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -117,7 +117,7 @@ where storage, memberships, config, - auction_results_provider, + marketplace_config, } = late_context_params; let initializer = HotShotInitializer::::from_reload( @@ -147,7 +147,7 @@ where config, validator_config, storage, - auction_results_provider, + marketplace_config, ) .await } @@ -194,8 +194,8 @@ where let storage = node.handle.storage().clone(); let memberships = node.handle.memberships.clone(); let config = node.handle.hotshot.config.clone(); - let auction_results_provider = - node.handle.hotshot.auction_results_provider.clone(); + let marketplace_config = + node.handle.hotshot.marketplace_config.clone(); let read_storage = storage.read().await; let initializer = HotShotInitializer::::from_reload( self.last_decided_leaf.clone(), @@ -230,7 +230,7 @@ where config, validator_config, (*read_storage).clone(), - (*auction_results_provider).clone(), + marketplace_config.clone(), ( node.handle.internal_channel_sender(), node.handle.internal_event_stream_known_impl(), diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index d2726aa25f..d09690eebb 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -4,7 +4,7 @@ use hotshot::{ tasks::EventTransformerState, traits::{NetworkReliability, NodeImplementation, TestableNodeImplementation}, types::SystemContextHandle, - HotShotInitializer, Memberships, SystemContext, TwinsHandlerState, + HotShotInitializer, MarketplaceConfig, Memberships, SystemContext, TwinsHandlerState, }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, state_types::TestInstanceState, @@ -105,7 +105,7 @@ pub async fn create_test_handle< memberships: Memberships, config: HotShotConfig, storage: I::Storage, - auction_results_provider: I::AuctionResultsProvider, + marketplace_config: MarketplaceConfig, ) -> SystemContextHandle { let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}) .await @@ -135,7 +135,7 @@ pub async fn create_test_handle< initializer, ConsensusMetricsValue::default(), storage, - auction_results_provider, + marketplace_config, ) .await; @@ -154,7 +154,7 @@ pub async fn create_test_handle< initializer, ConsensusMetricsValue::default(), storage, - auction_results_provider, + marketplace_config, ) .await } @@ -169,7 +169,7 @@ pub async fn create_test_handle< initializer, ConsensusMetricsValue::default(), storage, - auction_results_provider, + marketplace_config, ); hotshot.run_tasks().await @@ -365,6 +365,8 @@ impl> Default for TestDescription< impl, I: TestableNodeImplementation> TestDescription +where + I: NodeImplementation>, { /// turn a description of a test (e.g. a [`TestDescription`]) into /// a [`TestLauncher`] that can be used to launch the test. @@ -478,8 +480,10 @@ impl, I: TestableNodeImplemen ), storage: Box::new(|_| TestStorage::::default()), config, - auction_results_provider: Box::new(|_| { - TestAuctionResultsProvider::::default() + marketplace_config: Box::new(|_| MarketplaceConfig:: { + auction_results_provider: TestAuctionResultsProvider::::default().into(), + // TODO: we need to pass a valid generic builder url here somehow + generic_builder_url: Url::parse("http://localhost").unwrap(), }), }, metadata: self, diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 077194fad7..6a1c077b6b 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -1,9 +1,10 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; -use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; -use hotshot_example_types::{ - auction_results_provider_types::TestAuctionResultsProvider, storage_types::TestStorage, +use hotshot::{ + traits::{NodeImplementation, TestableNodeImplementation}, + MarketplaceConfig, }; +use hotshot_example_types::storage_types::TestStorage; use hotshot_types::{ traits::{ network::{AsyncGenerator, ConnectedNetwork}, @@ -28,8 +29,8 @@ pub struct ResourceGenerators>, /// configuration used to generate each hotshot node pub config: HotShotConfig, - /// generate a new auction results connector for each node - pub auction_results_provider: Generator>, + /// generate a new marketplace config for each node + pub marketplace_config: Generator>, } /// test launcher diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 54b683ef33..80ef447efb 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -14,7 +14,7 @@ use futures::future::join_all; use hotshot::{ traits::TestableNodeImplementation, types::{Event, SystemContextHandle}, - HotShotInitializer, Memberships, SystemContext, + HotShotInitializer, MarketplaceConfig, Memberships, SystemContext, }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, @@ -429,10 +429,15 @@ where let network = (self.launcher.resource_generator.channel_generator)(node_id).await; let storage = (self.launcher.resource_generator.storage)(node_id); - let mut auction_results_provider = - (self.launcher.resource_generator.auction_results_provider)(node_id); + let mut marketplace_config = + (self.launcher.resource_generator.marketplace_config)(node_id); if let Some(solver_server) = &self.solver_server { - auction_results_provider.broadcast_url = Some(solver_server.0.clone()); + let mut new_auction_results_provider = + marketplace_config.auction_results_provider.as_ref().clone(); + + new_auction_results_provider.broadcast_url = Some(solver_server.0.clone()); + + marketplace_config.auction_results_provider = new_auction_results_provider.into(); } let network_clone = network.clone(); @@ -453,7 +458,7 @@ where storage, memberships, config, - auction_results_provider, + marketplace_config, }, ), }, @@ -478,7 +483,7 @@ where config, validator_config, storage, - auction_results_provider, + marketplace_config, ) .await; self.late_start.insert( @@ -496,7 +501,7 @@ where memberships, config, storage, - auction_results_provider, + marketplace_config, )); } @@ -522,7 +527,7 @@ where join_all(networks_ready).await; // Then start the necessary tasks - for (node_id, network, memberships, config, storage, auction_results_provider) in + for (node_id, network, memberships, config, storage, marketplace_config) in uninitialized_nodes { let behaviour = (self.launcher.metadata.behaviour)(node_id); @@ -533,7 +538,7 @@ where memberships, config.clone(), storage, - auction_results_provider, + marketplace_config, ) .await; @@ -574,7 +579,7 @@ where config: HotShotConfig, validator_config: ValidatorConfig, storage: I::Storage, - auction_results_provider: I::AuctionResultsProvider, + marketplace_config: MarketplaceConfig, ) -> Arc> { // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); @@ -590,7 +595,7 @@ where initializer, ConsensusMetricsValue::default(), storage, - auction_results_provider, + marketplace_config, ) } @@ -606,7 +611,7 @@ where config: HotShotConfig, validator_config: ValidatorConfig, storage: I::Storage, - auction_results_provider: I::AuctionResultsProvider, + marketplace_config: MarketplaceConfig, internal_channel: ( Sender>>, Receiver>>, @@ -627,7 +632,7 @@ where initializer, ConsensusMetricsValue::default(), storage, - auction_results_provider, + marketplace_config, internal_channel, external_channel, ) @@ -656,8 +661,8 @@ pub struct LateNodeContextParameters, - /// The Auction Results handle for this node. - pub auction_results_provider: I::AuctionResultsProvider, + /// The marketplace config for this node. + pub marketplace_config: MarketplaceConfig, } /// The late node context dictates how we're building a node that started late during the test. diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index dc3386162f..14ffd17f61 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -1,16 +1,19 @@ use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ + block_types::TestMetadata, node_types::{MemoryImpl, TestConsecutiveLeaderTypes}, - block_types::TestMetadata }; use hotshot_task_impls::{ - events::HotShotEvent, harness::run_harness, transactions::TransactionTaskState + events::HotShotEvent, harness::run_harness, transactions::TransactionTaskState, }; use hotshot_testing::helpers::build_system_handle; use hotshot_types::{ - data::{ViewNumber, null_block, PackedBundle}, - traits::{node_implementation::ConsensusTime, election::Membership, block_contents::precompute_vid_commitment}, - constants::BaseVersion + constants::BaseVersion, + data::{null_block, PackedBundle, ViewNumber}, + traits::{ + block_contents::precompute_vid_commitment, election::Membership, + node_implementation::ConsensusTime, + }, }; use vbs::version::StaticVersionType; @@ -23,7 +26,9 @@ async fn test_transaction_task_leader_two_views_in_a_row() { // Build the API for node 2. let node_id = 2; - let handle = build_system_handle::(node_id).await.0; + let handle = build_system_handle::(node_id) + .await + .0; let mut input = Vec::new(); let mut output = Vec::new(); @@ -32,9 +37,8 @@ async fn test_transaction_task_leader_two_views_in_a_row() { input.push(HotShotEvent::ViewChange(current_view)); input.push(HotShotEvent::Shutdown); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - - let (_, precompute_data) = - precompute_vid_commitment(&[], quorum_membership.total_nodes()); + + let (_, precompute_data) = precompute_vid_commitment(&[], quorum_membership.total_nodes()); // current view let mut exp_packed_bundle = PackedBundle::new( @@ -55,6 +59,7 @@ async fn test_transaction_task_leader_two_views_in_a_row() { exp_packed_bundle.view_number = current_view + 1; output.push(HotShotEvent::BlockRecv(exp_packed_bundle)); - let transaction_state = TransactionTaskState::::create_from(&handle).await; + let transaction_state = + TransactionTaskState::::create_from(&handle).await; run_harness(input, output, transaction_state, false).await; } From f453735f97455f33fa63ea8f55f05302a9a9192d Mon Sep 17 00:00:00 2001 From: 0xkato <106168398+0xkato@users.noreply.github.com> Date: Fri, 2 Aug 2024 17:20:01 +0200 Subject: [PATCH 1163/1393] Tj/typos and ordering (#3524) * correction of typos and ordering of variables * correction of typo * correction of various typos * superfluous code * Remove unused import --- task-impls/src/events.rs | 8 ++++---- task-impls/src/lib.rs | 2 +- task-impls/src/quorum_proposal/handlers.rs | 6 +++--- task-impls/src/quorum_proposal/mod.rs | 6 +++--- task-impls/src/quorum_vote/mod.rs | 2 +- task-impls/src/request.rs | 6 +++--- task-impls/src/response.rs | 2 +- task-impls/src/transactions.rs | 2 +- task-impls/src/upgrade.rs | 2 +- task-impls/src/vid.rs | 7 ------- task-impls/src/vote_collection.rs | 4 ++-- 11 files changed, 20 insertions(+), 27 deletions(-) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 04a2407274..fd103341e3 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -54,7 +54,7 @@ impl Eq for ProposalMissing {} #[derive(Eq, PartialEq, Debug, Clone)] pub struct HotShotTaskCompleted; -/// All of the possible events that can be passed between Sequecning `HotShot` tasks +/// All of the possible events that can be passed between Sequencing `HotShot` tasks #[derive(Eq, PartialEq, Debug, Clone)] #[allow(clippy::large_enum_variant)] pub enum HotShotEvent { @@ -64,7 +64,7 @@ pub enum HotShotEvent { QuorumProposalRecv(Proposal>, TYPES::SignatureKey), /// A quorum vote has been received from the network; handled by the consensus task QuorumVoteRecv(QuorumVote), - /// A timeout vote recevied from the network; handled by consensus task + /// A timeout vote received from the network; handled by consensus task TimeoutVoteRecv(TimeoutVote), /// Send a timeout vote to the network; emitted by consensus task replicas TimeoutVoteSend(TimeoutVote), @@ -74,7 +74,7 @@ pub enum HotShotEvent { DaProposalValidated(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task DaVoteRecv(DaVote), - /// A Data Availability Certificate (DAC) has been recieved by the network; handled by the consensus task + /// A Data Availability Certificate (DAC) has been received by the network; handled by the consensus task DaCertificateRecv(DaCertificate), /// A DAC is validated. DaCertificateValidated(DaCertificate), @@ -91,7 +91,7 @@ pub enum HotShotEvent { /// 3. The justify QC is valid /// 4. The proposal passes either liveness or safety check. QuorumProposalValidated(QuorumProposal, Leaf), - /// A quorum proposal is missing for a view that we meed + /// A quorum proposal is missing for a view that we need QuorumProposalRequest(ProposalMissing), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DaProposalSend(Proposal>, TYPES::SignatureKey), diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 3c27b1f1e4..86729d6a1b 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -44,7 +44,7 @@ pub mod builder; /// Helper functions used by any task pub mod helpers; -/// Task which responsds to requests from the network +/// Task which responses to requests from the network pub mod response; /// Task for requesting the network for things diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 5d7113a81b..c6497a6059 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -198,8 +198,8 @@ impl ProposalDependencyHandle { block_header, view_number: self.view_number, justify_qc: self.consensus.read().await.high_qc().clone(), - proposal_certificate, upgrade_certificate, + proposal_certificate, }; let proposed_leaf = Leaf::from_quorum_proposal(&proposal); @@ -253,11 +253,11 @@ impl HandleDepOutput for ProposalDependencyHandle { .contains_key(&high_qc_view_number) { // The proposal for the high qc view is missing, try to get it asynchronously - let memberhsip = Arc::clone(&self.quorum_membership); + let membership = Arc::clone(&self.quorum_membership); let sender = self.sender.clone(); let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); async_spawn(async move { - fetch_proposal(high_qc_view_number, sender, memberhsip, consensus).await + fetch_proposal(high_qc_view_number, sender, membership, consensus).await }); // Block on receiving the event from the event stream. EventDependency::new( diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 62b199225c..2449e95e09 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -41,7 +41,7 @@ pub struct QuorumProposalTaskState /// Latest view number that has been proposed for. pub latest_proposed_view: TYPES::Time, - /// Table for the in-progress proposal depdencey tasks. + /// Table for the in-progress proposal dependency tasks. pub proposal_dependencies: HashMap>, /// The underlying network @@ -282,7 +282,7 @@ impl> QuorumProposalTaskState>>, event: Arc>, ) { - // Don't even bother making the task if we are not entitled to propose anyay. + // Don't even bother making the task if we are not entitled to propose anyway. if self.quorum_membership.leader(view_number) != self.public_key { tracing::trace!("We are not the leader of the next view"); return; @@ -434,7 +434,7 @@ impl> QuorumProposalTaskState { let view_number = proposal.data.view_number(); - // All nodes get the latest proposed view as a proxy of `cur_view` of olde. + // All nodes get the latest proposed view as a proxy of `cur_view` of old. if !self.update_latest_proposed_view(view_number).await { tracing::trace!("Failed to update latest proposed view"); return; diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index d260bee460..7427b17f61 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -494,7 +494,7 @@ impl> QuorumVoteTaskState> { pub quorum_membership: TYPES::Membership, /// This nodes public key pub public_key: TYPES::SignatureKey, - /// This nodes private/signign key, used to sign requests. + /// This nodes private/signing key, used to sign requests. pub private_key: ::PrivateKey, /// The node's id pub id: u64, @@ -155,7 +155,7 @@ impl> NetworkRequestState Vec> { let mut reqs = Vec::new(); @@ -182,7 +182,7 @@ impl> NetworkRequestState NetworkResponseState { } /// Helper to turn a `ResponseMessage` into a `Message` by filling - /// in the surrounding feilds and creating the `MessageKind` + /// in the surrounding fields and creating the `MessageKind` fn make_msg(&self, msg: ResponseMessage) -> Message { Message { sender: self.pub_key.clone(), diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 1dda8c5a30..eba8bd4204 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -626,7 +626,7 @@ impl> TransactionTaskState> UpgradeTaskState { // At this point, we could choose to validate // that the proposal was issued by the correct leader - // for the indiciated view. + // for the indicated view. // // We choose not to, because we don't gain that much from it. // The certificate itself is only useful to the leader for that view anyway, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 702e296965..5e62015bdb 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -9,7 +9,6 @@ use hotshot_types::{ data::{PackedBundle, VidDisperse, VidDisperseShare}, message::Proposal, traits::{ - election::Membership, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, BlockPayload, @@ -138,12 +137,6 @@ impl> VidTaskState { } self.cur_view = view; - // If we are not the next leader, we should exit - if self.membership.leader(self.cur_view + 1) != self.public_key { - // panic!("We are not the DA leader for view {}", *self.cur_view + 1); - return None; - } - return None; } diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 72cc982a1a..241c62b393 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -64,7 +64,7 @@ impl< CERT: Certificate + Debug, > VoteCollectionTaskState { - /// Take one vote and accumultate it. Returns either the cert or the updated state + /// Take one vote and accumulate it. Returns either the cert or the updated state /// after the vote is accumulated #[allow(clippy::question_mark)] pub async fn accumulate_vote( @@ -135,7 +135,7 @@ pub struct AccumulatorInfo { pub id: u64, } -/// Generic function for spawnnig a vote task. Returns the event stream id of the spawned task if created +/// Generic function for spawning a vote task. Returns the event stream id of the spawned task if created /// # Panics /// Calls unwrap but should never panic. pub async fn create_vote_accumulator( From ca44b56aa30d634c0a80a60ce89d4aac3dd9dc96 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 2 Aug 2024 11:52:48 -0400 Subject: [PATCH 1164/1393] Fix build errors (#3533) --- testing/src/spinning_task.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 8d64910351..cfcae0bdf9 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -233,7 +233,7 @@ where marketplace_config.clone(), ( node.handle.internal_channel_sender(), - node.handle.internal_event_stream_known_impl(), + node.handle.internal_event_stream_receiver_known_impl(), ), ( node.handle.external_channel_sender(), From d0939affe74b2da03b9e1def1e162845f6ea0721 Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Fri, 2 Aug 2024 12:28:33 -0400 Subject: [PATCH 1165/1393] Add Byzantine Test where we have a dishonest leader (#3516) * add a test case where we have dishonest leader * add functionality to fail out if we fail unexpected view * cleanup, use last validated proposal * add a view lookback when getting the QC * address comments * ensure all the views we expect to fail actually fail * remove debug log * fix test --- hotshot/src/tasks/mod.rs | 80 ++++++++++++++++- testing/src/consistency_task.rs | 2 +- testing/src/overall_safety_task.rs | 88 ++++++++++++++----- testing/src/test_builder.rs | 8 +- testing/tests/tests_1/test_with_failures_2.rs | 51 ++++++++++- 5 files changed, 200 insertions(+), 29 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 35fb0b0a3f..58a804c70e 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -2,7 +2,7 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; -use std::{sync::Arc, time::Duration}; +use std::{collections::HashSet, sync::Arc, time::Duration}; use async_broadcast::broadcast; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -25,6 +25,8 @@ use hotshot_task_impls::{ }; use hotshot_types::{ constants::EVENT_CHANNEL_SIZE, + data::QuorumProposal, + message::Proposal, message::{Messages, VersionedMessage}, traits::{ network::ConnectedNetwork, @@ -408,6 +410,82 @@ impl> EventTransformerState> { + /// Store events from previous views + pub validated_proposals: Vec>, + /// How many times current node has been elected leader and sent proposal + pub total_proposals_from_node: u64, + /// Which proposals to be dishonest at + pub dishonest_at_proposal_numbers: HashSet, + /// How far back to look for a QC + pub view_look_back: usize, + /// Phantom + pub _phantom: std::marker::PhantomData, +} + +/// Add method that will handle `QuorumProposalSend` events +/// If we have previous proposals stored and the total_proposals_from_node matches a value specified in dishonest_at_proposal_numbers +/// Then send out the event with the modified proposal that has an older QC +impl> DishonestLeader { + /// When a leader is sending a proposal this method will mock a dishonest leader + /// We accomplish this by looking back a number of specified views and using that cached proposals QC + fn handle_proposal_send_event( + &self, + event: &HotShotEvent, + proposal: &Proposal>, + sender: &TYPES::SignatureKey, + ) -> HotShotEvent { + let length = self.validated_proposals.len(); + if !self + .dishonest_at_proposal_numbers + .contains(&self.total_proposals_from_node) + || length == 0 + { + return event.clone(); + } + + // Grab proposal from specified view look back + let proposal_from_look_back = if length - 1 < self.view_look_back { + // If look back is too far just take the first proposal + self.validated_proposals[0].clone() + } else { + let index = (self.validated_proposals.len() - 1) - self.view_look_back; + self.validated_proposals[index].clone() + }; + + // Create a dishonest proposal by using the old proposals qc + let mut dishonest_proposal = proposal.clone(); + dishonest_proposal.data.justify_qc = proposal_from_look_back.justify_qc; + + HotShotEvent::QuorumProposalSend(dishonest_proposal, sender.clone()) + } +} + +#[async_trait] +impl + std::fmt::Debug> + EventTransformerState for DishonestLeader +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + vec![event.clone()] + } + + async fn send_handler(&mut self, event: &HotShotEvent) -> Vec> { + match event { + HotShotEvent::QuorumProposalSend(proposal, sender) => { + self.total_proposals_from_node += 1; + return vec![self.handle_proposal_send_event(event, proposal, sender)]; + } + HotShotEvent::QuorumProposalValidated(proposal, _) => { + self.validated_proposals.push(proposal.clone()); + } + _ => {} + } + vec![event.clone()] + } +} + /// adds tasks for sending/receiving messages to/from the network. pub async fn add_network_tasks>( handle: &mut SystemContextHandle, diff --git a/testing/src/consistency_task.rs b/testing/src/consistency_task.rs index 4b9ecc0f22..01c868a5a1 100644 --- a/testing/src/consistency_task.rs +++ b/testing/src/consistency_task.rs @@ -148,7 +148,7 @@ pub struct ConsistencyTask { /// A map from node ids to (leaves keyed on view number) pub consensus_leaves: NetworkMap, /// safety task requirements - pub safety_properties: OverallSafetyPropertiesDescription, + pub safety_properties: OverallSafetyPropertiesDescription, } #[async_trait] diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index add9853a14..3a27e6edbc 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -71,6 +71,11 @@ pub enum OverallSafetyTaskErr { failed_views: HashSet, }, + /// mismatched expected failed view vs actual failed view + InconsistentFailedViews { + expected_failed_views: HashSet, + actual_failed_views: HashSet, + }, } /// Data availability task state @@ -80,13 +85,42 @@ pub struct OverallSafetyTask, /// configure properties - pub properties: OverallSafetyPropertiesDescription, + pub properties: OverallSafetyPropertiesDescription, /// error pub error: Option>>, /// sender to test event channel pub test_sender: Sender, } +impl> OverallSafetyTask { + async fn handle_view_failure(&mut self, num_failed_views: usize, view_number: TYPES::Time) { + let expected_view_to_fail = &mut self.properties.expected_views_to_fail; + + self.ctx.failed_views.insert(view_number); + if self.ctx.failed_views.len() > num_failed_views { + let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; + self.error = Some(Box::new(OverallSafetyTaskErr::::TooManyFailures { + failed_views: self.ctx.failed_views.clone(), + })); + } else if !expected_view_to_fail.is_empty() { + match expected_view_to_fail.get(&view_number) { + Some(_) => { + expected_view_to_fail.insert(view_number, true); + } + None => { + let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; + self.error = Some(Box::new( + OverallSafetyTaskErr::::InconsistentFailedViews { + expected_failed_views: expected_view_to_fail.keys().cloned().collect(), + actual_failed_views: self.ctx.failed_views.clone(), + }, + )); + } + } + } + } +} + #[async_trait] impl> TestTaskState for OverallSafetyTask @@ -95,14 +129,15 @@ impl> TestTaskState /// Handles an event from one of multiple receivers. async fn handle_event(&mut self, (message, id): (Self::Event, usize)) -> Result<()> { - let OverallSafetyPropertiesDescription { + let OverallSafetyPropertiesDescription:: { check_leaf, check_block, num_failed_views, num_successful_views, threshold_calculator, transaction_threshold, - }: OverallSafetyPropertiesDescription = self.properties.clone(); + .. + }: OverallSafetyPropertiesDescription = self.properties.clone(); let Event { view_number, event } = message; let key = match event { EventType::Error { error } => { @@ -168,14 +203,9 @@ impl> TestTaskState return Ok(()); } ViewStatus::Failed => { - self.ctx.failed_views.insert(view_number); - if self.ctx.failed_views.len() > num_failed_views { - let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; - self.error = - Some(Box::new(OverallSafetyTaskErr::::TooManyFailures { - failed_views: self.ctx.failed_views.clone(), - })); - } + self.handle_view_failure(num_failed_views, view_number) + .await; + return Ok(()); } ViewStatus::Err(e) => { @@ -189,13 +219,8 @@ impl> TestTaskState } } else if view.check_if_failed(threshold, len) { view.status = ViewStatus::Failed; - self.ctx.failed_views.insert(view_number); - if self.ctx.failed_views.len() > num_failed_views { - let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; - self.error = Some(Box::new(OverallSafetyTaskErr::::TooManyFailures { - failed_views: self.ctx.failed_views.clone(), - })); - } + self.handle_view_failure(num_failed_views, view_number) + .await; return Ok(()); } Ok(()) @@ -206,14 +231,15 @@ impl> TestTaskState return TestResult::Fail(e.clone()); } - let OverallSafetyPropertiesDescription { + let OverallSafetyPropertiesDescription:: { check_leaf: _, check_block: _, num_failed_views: num_failed_rounds_total, num_successful_views, threshold_calculator: _, transaction_threshold: _, - }: OverallSafetyPropertiesDescription = self.properties.clone(); + expected_views_to_fail, + }: OverallSafetyPropertiesDescription = self.properties.clone(); let num_incomplete_views = self.ctx.round_results.len() - self.ctx.successful_views.len() @@ -232,6 +258,18 @@ impl> TestTaskState })); } + if !expected_views_to_fail + .values() + .all(|&view_failed| view_failed) + { + return TestResult::Fail(Box::new( + OverallSafetyTaskErr::::InconsistentFailedViews { + actual_failed_views: self.ctx.failed_views.clone(), + expected_failed_views: expected_views_to_fail.keys().cloned().collect(), + }, + )); + } + // We should really be able to include a check like this: // // if self.ctx.failed_views.len() < num_failed_rounds_total { @@ -495,7 +533,7 @@ impl RoundResult { /// cross node safety properties #[derive(Clone)] -pub struct OverallSafetyPropertiesDescription { +pub struct OverallSafetyPropertiesDescription { /// required number of successful views pub num_successful_views: usize, /// whether or not to check the leaf @@ -512,9 +550,11 @@ pub struct OverallSafetyPropertiesDescription { /// threshold calculator. Given number of live and total nodes, provide number of successes /// required to mark view as successful pub threshold_calculator: Arc usize + Send + Sync>, + /// pass in the views that we expect to fail + pub expected_views_to_fail: HashMap, } -impl std::fmt::Debug for OverallSafetyPropertiesDescription { +impl std::fmt::Debug for OverallSafetyPropertiesDescription { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("OverallSafetyPropertiesDescription") .field("num successful views", &self.num_successful_views) @@ -522,11 +562,12 @@ impl std::fmt::Debug for OverallSafetyPropertiesDescription { .field("check_block", &self.check_block) .field("num_failed_rounds_total", &self.num_failed_views) .field("transaction_threshold", &self.transaction_threshold) + .field("expected views to fail", &self.expected_views_to_fail) .finish_non_exhaustive() } } -impl Default for OverallSafetyPropertiesDescription { +impl Default for OverallSafetyPropertiesDescription { fn default() -> Self { Self { num_successful_views: 50, @@ -536,6 +577,7 @@ impl Default for OverallSafetyPropertiesDescription { transaction_threshold: 0, // very strict threshold_calculator: Arc::new(|_num_live, num_total| 2 * num_total / 3 + 1), + expected_views_to_fail: HashMap::new(), } } } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index d09690eebb..aebae95f13 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -67,7 +67,7 @@ pub struct TestDescription> { /// Size of the non-staked DA committee for the test pub da_non_staked_committee_size: usize, /// overall safety property description - pub overall_safety_properties: OverallSafetyPropertiesDescription, + pub overall_safety_properties: OverallSafetyPropertiesDescription, /// spinning properties pub spinning_properties: SpinningTaskDescription, /// txns timing @@ -230,13 +230,14 @@ impl> TestDescription { num_nodes_with_stake, num_nodes_without_stake, start_nodes: num_nodes_with_stake, - overall_safety_properties: OverallSafetyPropertiesDescription { + overall_safety_properties: OverallSafetyPropertiesDescription:: { num_successful_views: 50, check_leaf: true, check_block: true, num_failed_views: 15, transaction_threshold: 0, threshold_calculator: Arc::new(|_active, total| (2 * total / 3 + 1)), + expected_views_to_fail: HashMap::new(), }, timing_data: TimingData { next_view_timeout: 2000, @@ -263,13 +264,14 @@ impl> TestDescription { num_nodes_with_stake, num_nodes_without_stake, start_nodes: num_nodes_with_stake, - overall_safety_properties: OverallSafetyPropertiesDescription { + overall_safety_properties: OverallSafetyPropertiesDescription:: { num_successful_views: 20, check_leaf: true, check_block: true, num_failed_views: 8, transaction_threshold: 0, threshold_calculator: Arc::new(|_active, total| (2 * total / 3 + 1)), + expected_views_to_fail: HashMap::new(), }, timing_data: TimingData { start_delay: 120_000, diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index cb9acd9dc5..c41d362c24 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -1,6 +1,5 @@ // TODO: Remove this after integration #![allow(unused_imports)] - use hotshot_example_types::{ node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, state_types::TestTypes, @@ -8,9 +7,17 @@ use hotshot_example_types::{ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::TestDescription, }; +use hotshot_types::data::ViewNumber; +use hotshot_types::traits::node_implementation::ConsensusTime; +use std::{collections::HashMap, time::Duration}; +use std::collections::HashSet; + +#[cfg(async_executor_impl = "async-std")] +use {hotshot::tasks::DishonestLeader, hotshot_testing::test_builder::Behaviour, std::rc::Rc}; // Test that a good leader can succeed in the view directly after view sync #[cfg(not(feature = "dependency-tasks"))] cross_tests!( @@ -52,3 +59,45 @@ cross_tests!( metadata } ); + +#[cfg(async_executor_impl = "async-std")] +cross_tests!( + TestName: dishonest_leader, + Impls: [MemoryImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let behaviour = Rc::new(|node_id| { + let dishonest_leader = DishonestLeader:: { + dishonest_at_proposal_numbers: HashSet::from([2, 3]), + validated_proposals: Vec::new(), + total_proposals_from_node: 0, + view_look_back: 1, + _phantom: std::marker::PhantomData + }; + match node_id { + 2 => Behaviour::Byzantine(Box::new(dishonest_leader)), + _ => Behaviour::Standard, + } + }); + + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + }; + + metadata.overall_safety_properties.num_failed_views = 2; + metadata.num_nodes_with_stake = 5; + metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ + (ViewNumber::new(7), false), + (ViewNumber::new(12), false) + ]); + metadata + }, +); From eb63c6abb5d974017320f692edfe10a37e66eadd Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Mon, 5 Aug 2024 11:38:59 -0400 Subject: [PATCH 1166/1393] [AUDIT] - Investigate Double View Update from View Sync task (#3532) * remove double view update from sync task * add expected views to fail, and set the threshold for how many nodes enter view sync * rename and cleanup * run formatter * add spacing --- task-impls/src/view_sync.rs | 6 --- testing/src/overall_safety_task.rs | 14 +++--- testing/tests/tests_1/test_with_failures_2.rs | 50 ++++++++++++++++++- 3 files changed, 55 insertions(+), 15 deletions(-) diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index f3b1e596d3..c2b74fc03e 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -597,12 +597,6 @@ impl> ViewSyncReplicaTaskState> OverallSafetyTask { async fn handle_view_failure(&mut self, num_failed_views: usize, view_number: TYPES::Time) { - let expected_view_to_fail = &mut self.properties.expected_views_to_fail; + let expected_views_to_fail = &mut self.properties.expected_views_to_fail; self.ctx.failed_views.insert(view_number); if self.ctx.failed_views.len() > num_failed_views { @@ -102,16 +102,16 @@ impl> OverallSafetyTask::TooManyFailures { failed_views: self.ctx.failed_views.clone(), })); - } else if !expected_view_to_fail.is_empty() { - match expected_view_to_fail.get(&view_number) { - Some(_) => { - expected_view_to_fail.insert(view_number, true); + } else if !expected_views_to_fail.is_empty() { + match expected_views_to_fail.entry(view_number) { + Entry::Occupied(mut view_seen) => { + *view_seen.get_mut() = true; } - None => { + Entry::Vacant(_v) => { let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; self.error = Some(Box::new( OverallSafetyTaskErr::::InconsistentFailedViews { - expected_failed_views: expected_view_to_fail.keys().cloned().collect(), + expected_failed_views: expected_views_to_fail.keys().cloned().collect(), actual_failed_views: self.ctx.failed_views.clone(), }, )); diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index c41d362c24..8a7eca8763 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -1,7 +1,7 @@ // TODO: Remove this after integration #![allow(unused_imports)] use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes}, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -10,11 +10,12 @@ use hotshot_testing::{ completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, test_builder::TestDescription, + view_sync_task::ViewSyncTaskDescription, }; use hotshot_types::data::ViewNumber; use hotshot_types::traits::node_implementation::ConsensusTime; -use std::{collections::HashMap, time::Duration}; use std::collections::HashSet; +use std::{collections::HashMap, time::Duration}; #[cfg(async_executor_impl = "async-std")] use {hotshot::tasks::DishonestLeader, hotshot_testing::test_builder::Behaviour, std::rc::Rc}; @@ -101,3 +102,48 @@ cross_tests!( metadata }, ); + +#[cfg(not(feature = "dependency-tasks"))] +cross_tests!( + TestName: test_with_double_leader_failures, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestConsecutiveLeaderTypes], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default_more_nodes(); + metadata.num_bootstrap_nodes = 10; + metadata.num_nodes_with_stake = 12; + metadata.da_staked_committee_size = 12; + metadata.start_nodes = 12; + let dead_nodes = vec![ + ChangeNode { + idx: 3, + updown: UpDown::Down, + }, + ]; + + // shutdown while node 3 is leader + // we want to trigger `ViewSyncTrigger` + // then ensure we do not fail again as next leader will be leader 2 views also + let view_spin_node_down = 5; + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(view_spin_node_down, dead_nodes)] + }; + + // node 3 is leader twice when we shut down + metadata.overall_safety_properties.num_failed_views = 2; + metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ + // next views after turning node off + (ViewNumber::new(view_spin_node_down + 1), false), + (ViewNumber::new(view_spin_node_down + 2), false) + ]); + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 13; + + // only turning off 1 node, so expected should be num_nodes_with_stake - 1 + let expected_nodes_in_view_sync = metadata.num_nodes_with_stake - 1; + metadata.view_sync_properties = ViewSyncTaskDescription::Threshold(expected_nodes_in_view_sync, expected_nodes_in_view_sync); + + metadata + } +); From aaa916eace48402d2191649aa94ace7306e352a0 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 6 Aug 2024 09:41:07 -0400 Subject: [PATCH 1167/1393] Fix The Justfile Lints (#3536) * fix commands * fix justfile commands, use just in the lint stage --- testing/tests/tests_1/quorum_vote_task.rs | 1 - testing/tests/tests_2/catchup.rs | 7 ++++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index eea01a8b6b..b8b4680bcd 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -276,4 +276,3 @@ async fn test_quorum_vote_task_incorrect_dependency() { }; run_test![inputs, script].await; } - diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 4a3bd8c8f8..433a35a666 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -312,9 +312,10 @@ async fn test_all_restart() { let mut catchup_nodes = vec![]; for i in 1..20 { catchup_nodes.push(ChangeNode { - idx: i, - updown: UpDown::Restart, - })}; + idx: i, + updown: UpDown::Restart, + }) + } metadata.timing_data = timing_data; metadata.start_nodes = 20; From 0ed5313b35e00b2bd54488e358879c71654623fd Mon Sep 17 00:00:00 2001 From: Charles Lu Date: Wed, 7 Aug 2024 12:33:18 -0400 Subject: [PATCH 1168/1393] add MIT license (#3415) Co-authored-by: Jarred Parr --- builder-api/src/api.rs | 6 +++++ builder-api/src/lib.rs | 24 ++++--------------- builder-api/src/v0_1/block_info.rs | 6 +++++ builder-api/src/v0_1/builder.rs | 6 +++++ builder-api/src/v0_1/data_source.rs | 6 +++++ builder-api/src/v0_1/query_data.rs | 6 +++++ .../src/auction_results_provider_types.rs | 6 +++++ example-types/src/block_types.rs | 6 +++++ example-types/src/lib.rs | 6 +++++ example-types/src/node_types.rs | 6 +++++ example-types/src/state_types.rs | 6 +++++ example-types/src/storage_types.rs | 6 +++++ examples/combined/all.rs | 6 +++++ examples/combined/multi-validator.rs | 6 +++++ examples/combined/orchestrator.rs | 6 +++++ examples/combined/types.rs | 6 +++++ examples/combined/validator.rs | 6 +++++ examples/infra/mod.rs | 6 +++++ examples/libp2p/all.rs | 6 +++++ examples/libp2p/multi-validator.rs | 6 +++++ examples/libp2p/types.rs | 6 +++++ examples/libp2p/validator.rs | 6 +++++ examples/orchestrator.rs | 6 +++++ examples/push-cdn/all.rs | 6 +++++ examples/push-cdn/broker.rs | 6 +++++ examples/push-cdn/marshal.rs | 6 +++++ examples/push-cdn/multi-validator.rs | 6 +++++ examples/push-cdn/types.rs | 6 +++++ examples/push-cdn/validator.rs | 6 +++++ examples/push-cdn/whitelist-adapter.rs | 6 +++++ hotshot-stake-table/src/config.rs | 6 +++++ hotshot-stake-table/src/lib.rs | 6 +++++ hotshot-stake-table/src/mt_based.rs | 6 +++++ hotshot-stake-table/src/mt_based/config.rs | 6 +++++ hotshot-stake-table/src/mt_based/internal.rs | 6 +++++ hotshot-stake-table/src/utils.rs | 6 +++++ hotshot-stake-table/src/vec_based.rs | 6 +++++ hotshot-stake-table/src/vec_based/config.rs | 6 +++++ hotshot/src/documentation.rs | 6 +++++ hotshot/src/lib.rs | 6 +++++ hotshot/src/tasks/mod.rs | 6 +++++ hotshot/src/tasks/task_state.rs | 6 +++++ hotshot/src/traits.rs | 6 +++++ hotshot/src/traits/election.rs | 6 +++++ .../src/traits/election/static_committee.rs | 6 +++++ hotshot/src/traits/networking.rs | 6 +++++ .../src/traits/networking/combined_network.rs | 6 +++++ .../src/traits/networking/libp2p_network.rs | 6 +++++ .../src/traits/networking/memory_network.rs | 6 +++++ .../src/traits/networking/push_cdn_network.rs | 6 +++++ hotshot/src/traits/node_implementation.rs | 6 +++++ hotshot/src/types.rs | 6 +++++ hotshot/src/types/event.rs | 6 +++++ hotshot/src/types/handle.rs | 6 +++++ libp2p-networking/src/lib.rs | 6 +++++ .../src/network/behaviours/dht/bootstrap.rs | 6 +++++ .../src/network/behaviours/dht/mod.rs | 6 +++++ .../src/network/behaviours/direct_message.rs | 6 +++++ .../network/behaviours/exponential_backoff.rs | 6 +++++ .../src/network/behaviours/mod.rs | 6 +++++ .../network/behaviours/request_response.rs | 6 +++++ libp2p-networking/src/network/def.rs | 6 +++++ libp2p-networking/src/network/error.rs | 6 +++++ libp2p-networking/src/network/mod.rs | 6 +++++ libp2p-networking/src/network/node.rs | 6 +++++ libp2p-networking/src/network/node/config.rs | 6 +++++ libp2p-networking/src/network/node/handle.rs | 6 +++++ libp2p-networking/tests/common/mod.rs | 6 +++++ libp2p-networking/tests/counter.rs | 6 +++++ macros/src/lib.rs | 6 +++++ orchestrator/src/client.rs | 6 +++++ orchestrator/src/config.rs | 6 +++++ orchestrator/src/lib.rs | 6 +++++ task-impls/src/builder.rs | 6 +++++ task-impls/src/consensus/handlers.rs | 6 +++++ task-impls/src/consensus/mod.rs | 6 +++++ task-impls/src/consensus/view_change.rs | 6 +++++ task-impls/src/consensus2/handlers.rs | 6 +++++ task-impls/src/consensus2/mod.rs | 6 +++++ task-impls/src/da.rs | 6 +++++ task-impls/src/events.rs | 6 +++++ task-impls/src/harness.rs | 6 +++++ task-impls/src/helpers.rs | 6 +++++ task-impls/src/lib.rs | 6 +++++ task-impls/src/network.rs | 6 +++++ task-impls/src/quorum_proposal/handlers.rs | 6 +++++ task-impls/src/quorum_proposal/mod.rs | 6 +++++ .../src/quorum_proposal_recv/handlers.rs | 6 +++++ task-impls/src/quorum_proposal_recv/mod.rs | 6 +++++ task-impls/src/quorum_vote/handlers.rs | 6 +++++ task-impls/src/quorum_vote/mod.rs | 6 +++++ task-impls/src/request.rs | 6 +++++ task-impls/src/response.rs | 6 +++++ task-impls/src/rewind.rs | 6 +++++ task-impls/src/transactions.rs | 6 +++++ task-impls/src/upgrade.rs | 6 +++++ task-impls/src/vid.rs | 6 +++++ task-impls/src/view_sync.rs | 6 +++++ task-impls/src/vote_collection.rs | 6 +++++ task/src/dependency.rs | 6 +++++ task/src/dependency_task.rs | 6 +++++ task/src/lib.rs | 6 +++++ task/src/task.rs | 6 +++++ testing/src/block_builder/mod.rs | 6 +++++ testing/src/block_builder/random.rs | 6 +++++ testing/src/block_builder/simple.rs | 6 +++++ testing/src/completion_task.rs | 6 +++++ testing/src/consistency_task.rs | 6 +++++ testing/src/helpers.rs | 6 +++++ testing/src/lib.rs | 6 +++++ testing/src/node_ctx.rs | 6 +++++ testing/src/overall_safety_task.rs | 6 +++++ testing/src/predicates/event.rs | 6 +++++ testing/src/predicates/mod.rs | 6 +++++ .../src/predicates/upgrade_with_consensus.rs | 6 +++++ .../src/predicates/upgrade_with_proposal.rs | 6 +++++ testing/src/predicates/upgrade_with_vote.rs | 6 +++++ testing/src/script.rs | 6 +++++ testing/src/spinning_task.rs | 6 +++++ testing/src/test_builder.rs | 6 +++++ testing/src/test_helpers.rs | 6 +++++ testing/src/test_launcher.rs | 6 +++++ testing/src/test_runner.rs | 6 +++++ testing/src/test_task.rs | 6 +++++ testing/src/txn_task.rs | 6 +++++ testing/src/view_generator.rs | 6 +++++ testing/src/view_sync_task.rs | 6 +++++ testing/tests/tests_1.rs | 6 +++++ testing/tests/tests_1/block_builder.rs | 6 +++++ testing/tests/tests_1/consensus_task.rs | 6 +++++ testing/tests/tests_1/da_task.rs | 6 +++++ testing/tests/tests_1/gen_key_pair.rs | 6 +++++ testing/tests/tests_1/libp2p.rs | 6 +++++ testing/tests/tests_1/message.rs | 6 +++++ testing/tests/tests_1/network_task.rs | 6 +++++ .../tests_1/quorum_proposal_recv_task.rs | 6 +++++ testing/tests/tests_1/quorum_proposal_task.rs | 6 +++++ testing/tests/tests_1/quorum_vote_task.rs | 6 +++++ testing/tests/tests_1/test_success.rs | 6 +++++ testing/tests/tests_1/test_with_failures_2.rs | 6 +++++ .../tests_1/upgrade_task_with_consensus.rs | 6 +++++ .../tests_1/upgrade_task_with_proposal.rs | 6 +++++ .../tests/tests_1/upgrade_task_with_vote.rs | 6 +++++ testing/tests/tests_1/vid_task.rs | 6 +++++ testing/tests/tests_1/view_sync_task.rs | 6 +++++ testing/tests/tests_2.rs | 6 +++++ testing/tests/tests_2/catchup.rs | 6 +++++ testing/tests/tests_2/push_cdn.rs | 6 +++++ .../tests/tests_2/test_with_failures_one.rs | 6 +++++ testing/tests/tests_3.rs | 6 +++++ testing/tests/tests_3/memory_network.rs | 6 +++++ .../tests_3/test_with_builder_failures.rs | 6 +++++ .../tests_3/test_with_failures_half_f.rs | 6 +++++ testing/tests/tests_4.rs | 6 +++++ testing/tests/tests_4/test_with_failures_f.rs | 6 +++++ testing/tests/tests_5.rs | 6 +++++ testing/tests/tests_5/combined_network.rs | 6 +++++ testing/tests/tests_5/timeout.rs | 6 +++++ testing/tests/tests_5/unreliable_network.rs | 6 +++++ types/src/consensus.rs | 6 +++++ types/src/constants.rs | 6 +++++ types/src/data.rs | 6 +++++ types/src/error.rs | 6 +++++ types/src/event.rs | 6 +++++ types/src/lib.rs | 6 +++++ types/src/light_client.rs | 6 +++++ types/src/message.rs | 6 +++++ types/src/qc.rs | 6 +++++ types/src/signature_key.rs | 6 +++++ types/src/simple_certificate.rs | 6 +++++ types/src/simple_vote.rs | 6 +++++ types/src/stake_table.rs | 6 +++++ types/src/traits.rs | 6 +++++ types/src/traits/auction_results_provider.rs | 6 +++++ types/src/traits/block_contents.rs | 6 +++++ types/src/traits/consensus_api.rs | 6 +++++ types/src/traits/election.rs | 6 +++++ types/src/traits/metrics.rs | 6 +++++ types/src/traits/network.rs | 6 +++++ types/src/traits/node_implementation.rs | 6 +++++ types/src/traits/qc.rs | 6 +++++ types/src/traits/signature_key.rs | 6 +++++ types/src/traits/stake_table.rs | 6 +++++ types/src/traits/states.rs | 6 +++++ types/src/traits/storage.rs | 6 +++++ types/src/utils.rs | 6 +++++ types/src/vid.rs | 6 +++++ types/src/vote.rs | 6 +++++ 188 files changed, 1126 insertions(+), 20 deletions(-) diff --git a/builder-api/src/api.rs b/builder-api/src/api.rs index b1ad3f380b..04042630c1 100644 --- a/builder-api/src/api.rs +++ b/builder-api/src/api.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{fs, path::Path}; use tide_disco::api::{Api, ApiError}; diff --git a/builder-api/src/lib.rs b/builder-api/src/lib.rs index 329a94e2cb..d273528191 100644 --- a/builder-api/src/lib.rs +++ b/builder-api/src/lib.rs @@ -1,24 +1,8 @@ -// MIT License +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. -// Copyright (c) 2024 Espresso Systems - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . mod api; pub mod v0_1; diff --git a/builder-api/src/v0_1/block_info.rs b/builder-api/src/v0_1/block_info.rs index fd14eeb311..9048283140 100644 --- a/builder-api/src/v0_1/block_info.rs +++ b/builder-api/src/v0_1/block_info.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::marker::PhantomData; use hotshot_types::{ diff --git a/builder-api/src/v0_1/builder.rs b/builder-api/src/v0_1/builder.rs index 7af58a4fd5..6d91698fcc 100644 --- a/builder-api/src/v0_1/builder.rs +++ b/builder-api/src/v0_1/builder.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::path::PathBuf; use clap::Args; diff --git a/builder-api/src/v0_1/data_source.rs b/builder-api/src/v0_1/data_source.rs index 703e15869a..c36b457623 100644 --- a/builder-api/src/v0_1/data_source.rs +++ b/builder-api/src/v0_1/data_source.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use async_trait::async_trait; use committable::Commitment; use hotshot_types::{ diff --git a/builder-api/src/v0_1/query_data.rs b/builder-api/src/v0_1/query_data.rs index c06a0bc1f2..a9b5da8cbd 100644 --- a/builder-api/src/v0_1/query_data.rs +++ b/builder-api/src/v0_1/query_data.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use hotshot_types::traits::node_implementation::NodeType; use serde::{Deserialize, Serialize}; diff --git a/example-types/src/auction_results_provider_types.rs b/example-types/src/auction_results_provider_types.rs index d363446655..af2f8b7026 100644 --- a/example-types/src/auction_results_provider_types.rs +++ b/example-types/src/auction_results_provider_types.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use anyhow::{bail, Result}; use async_trait::async_trait; use hotshot_types::traits::{ diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 0768d956bb..21e8c64bc6 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ fmt::{Debug, Display}, mem::size_of, diff --git a/example-types/src/lib.rs b/example-types/src/lib.rs index d2b82cd90c..95622b7c21 100644 --- a/example-types/src/lib.rs +++ b/example-types/src/lib.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + /// block types pub mod block_types; diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 13ba83a383..de74d12b3a 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use hotshot::traits::{ election::{ static_committee::{GeneralStaticCommittee, StaticCommittee}, diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 51e973320d..8563dbd3fa 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Implementations for examples and tests only use std::fmt::Debug; diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 5192de3f20..024bfe1a89 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ collections::{BTreeMap, HashMap}, sync::Arc, diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 0d8307dc5b..bd69dc61c1 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! An example program using both the web server and libp2p /// types used for this example pub mod types; diff --git a/examples/combined/multi-validator.rs b/examples/combined/multi-validator.rs index f894030d59..9b6e37f516 100644 --- a/examples/combined/multi-validator.rs +++ b/examples/combined/multi-validator.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! A multi-validator using both the web server libp2p use async_compatibility_layer::{ art::async_spawn, diff --git a/examples/combined/orchestrator.rs b/examples/combined/orchestrator.rs index 12b376628e..17b6f2dec2 100644 --- a/examples/combined/orchestrator.rs +++ b/examples/combined/orchestrator.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Orchestrator using the web server /// types used for this example pub mod types; diff --git a/examples/combined/types.rs b/examples/combined/types.rs index caf0218621..1209891b71 100644 --- a/examples/combined/types.rs +++ b/examples/combined/types.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::fmt::Debug; use hotshot::traits::implementations::CombinedNetworks; diff --git a/examples/combined/validator.rs b/examples/combined/validator.rs index eafdd21559..9f97b7f8db 100644 --- a/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! A validator using both the web server and libp2p use std::{net::SocketAddr, str::FromStr}; diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 980afff810..cda136d090 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![allow(clippy::panic)] use std::{ collections::HashMap, diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index cf1f4cbe38..770e6ea91d 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! An example program using libp2p /// types used for this example pub mod types; diff --git a/examples/libp2p/multi-validator.rs b/examples/libp2p/multi-validator.rs index c802f3da50..7f55d9462a 100644 --- a/examples/libp2p/multi-validator.rs +++ b/examples/libp2p/multi-validator.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! A multi-validator using libp2p use async_compatibility_layer::{ art::async_spawn, diff --git a/examples/libp2p/types.rs b/examples/libp2p/types.rs index c3f2c59d2d..afcfa236a1 100644 --- a/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::fmt::Debug; use hotshot::traits::implementations::Libp2pNetwork; diff --git a/examples/libp2p/validator.rs b/examples/libp2p/validator.rs index bfc1a4e387..5948ebd107 100644 --- a/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! A validator using libp2p use std::{net::SocketAddr, str::FromStr}; diff --git a/examples/orchestrator.rs b/examples/orchestrator.rs index 732264a876..f78c0b35b1 100644 --- a/examples/orchestrator.rs +++ b/examples/orchestrator.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! A orchestrator use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index d5a2f98906..b360170c21 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! A example program using the Push CDN /// The types we're importing pub mod types; diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index 67e987c31d..00232e771d 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! The following is the main `Broker` binary, which just instantiates and runs //! a `Broker` object. use anyhow::Result; diff --git a/examples/push-cdn/marshal.rs b/examples/push-cdn/marshal.rs index d8e7c83e55..fde57cd28d 100644 --- a/examples/push-cdn/marshal.rs +++ b/examples/push-cdn/marshal.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! The following is the main `Marshal` binary, which just instantiates and runs //! a `Marshal` object. diff --git a/examples/push-cdn/multi-validator.rs b/examples/push-cdn/multi-validator.rs index 53598afcb2..77520cdd25 100644 --- a/examples/push-cdn/multi-validator.rs +++ b/examples/push-cdn/multi-validator.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! A multi validator use async_compatibility_layer::{ art::async_spawn, diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs index 17852a99af..963d51bd6e 100644 --- a/examples/push-cdn/types.rs +++ b/examples/push-cdn/types.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use hotshot::traits::{implementations::PushCdnNetwork, NodeImplementation}; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, state_types::TestTypes, diff --git a/examples/push-cdn/validator.rs b/examples/push-cdn/validator.rs index 3f0497ce02..5e6c0e67bc 100644 --- a/examples/push-cdn/validator.rs +++ b/examples/push-cdn/validator.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! A validator use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; diff --git a/examples/push-cdn/whitelist-adapter.rs b/examples/push-cdn/whitelist-adapter.rs index 9a80b227aa..9a3d585b18 100644 --- a/examples/push-cdn/whitelist-adapter.rs +++ b/examples/push-cdn/whitelist-adapter.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! The whitelist is an adaptor that is able to update the allowed public keys for //! all brokers. Right now, we do this by asking the orchestrator for the list of //! allowed public keys. In the future, we will pull the stake table from the L1. diff --git a/hotshot-stake-table/src/config.rs b/hotshot-stake-table/src/config.rs index 12f69b2e85..9e7cb3c8cb 100644 --- a/hotshot-stake-table/src/config.rs +++ b/hotshot-stake-table/src/config.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Configuration file for stake table /// Capacity of a stake table diff --git a/hotshot-stake-table/src/lib.rs b/hotshot-stake-table/src/lib.rs index fd4f19e9d9..0c153672db 100644 --- a/hotshot-stake-table/src/lib.rs +++ b/hotshot-stake-table/src/lib.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! This crate contains some stake table implementations for `HotShot` system. pub mod config; pub mod mt_based; diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index b870478628..0929b78bee 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! A stake table implementation that's based on Append-only Merkle Tree. mod config; diff --git a/hotshot-stake-table/src/mt_based/config.rs b/hotshot-stake-table/src/mt_based/config.rs index 6b720850a9..70e6a2a6f3 100644 --- a/hotshot-stake-table/src/mt_based/config.rs +++ b/hotshot-stake-table/src/mt_based/config.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Config file for stake table use ark_ff::PrimeField; use ark_std::vec; diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index 50440cc4b3..ee98af4670 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Utilities and internals for maintaining a local stake table use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; diff --git a/hotshot-stake-table/src/utils.rs b/hotshot-stake-table/src/utils.rs index e4382109fc..e3ef251fd0 100644 --- a/hotshot-stake-table/src/utils.rs +++ b/hotshot-stake-table/src/utils.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Utilities to help building a stake table. use ark_ff::{Field, PrimeField}; diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 78b9e326ee..66b9f23d05 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! A vector based stake table implementation. The commitment is the rescue hash of the list of (key, amount) pairs; use ark_std::{collections::HashMap, hash::Hash, rand::SeedableRng}; diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index 83ea7f8358..4415555142 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Config file for stake table use ark_ff::PrimeField; use ark_std::vec; diff --git a/hotshot/src/documentation.rs b/hotshot/src/documentation.rs index 25630e22bc..6015a63d89 100644 --- a/hotshot/src/documentation.rs +++ b/hotshot/src/documentation.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + // This is prosaic documentation, we don't need clippy #![allow( clippy::all, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 11d4ca29b9..40cfea0091 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Provides a generic rust implementation of the `HotShot` BFT protocol //! diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 58a804c70e..f519a28a48 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Provides a number of tasks that run continuously /// Provides trait to create task states from a `SystemContextHandle` diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 2c5e14f91c..6c3746b527 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ collections::{BTreeMap, HashMap}, sync::{atomic::AtomicBool, Arc}, diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index a4d6372a5f..8d26f9e7f0 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + /// Sortition trait pub mod election; mod networking; diff --git a/hotshot/src/traits/election.rs b/hotshot/src/traits/election.rs index fdc277477d..d00e7806b0 100644 --- a/hotshot/src/traits/election.rs +++ b/hotshot/src/traits/election.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! elections used for consensus /// static (round robin) committee election diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 255c446401..842c2378c6 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{marker::PhantomData, num::NonZeroU64}; use ethereum_types::U256; diff --git a/hotshot/src/traits/networking.rs b/hotshot/src/traits/networking.rs index 58fae50bdd..85bd626c15 100644 --- a/hotshot/src/traits/networking.rs +++ b/hotshot/src/traits/networking.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Network access compatibility //! //! This module contains a trait abstracting over network access, as well as implementations of that diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 7c9b590144..74d858e912 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Networking Implementation that has a primary and a fallback network. If the primary //! Errors we will use the backup to send or receive use std::{ diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index f0bcf2fe5b..3646e1de71 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Libp2p based/production networking implementation //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index d1f6c22bfe..392dc9c005 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! In memory network simulator //! //! This module provides an in-memory only simulation of an actual network, useful for unit and diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 9cb0e2b142..3a1c68db4f 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #[cfg(feature = "hotshot-testing")] use std::sync::atomic::{AtomicBool, Ordering}; use std::{collections::BTreeSet, marker::PhantomData, sync::Arc}; diff --git a/hotshot/src/traits/node_implementation.rs b/hotshot/src/traits/node_implementation.rs index 5bd8cfbe77..904decc780 100644 --- a/hotshot/src/traits/node_implementation.rs +++ b/hotshot/src/traits/node_implementation.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Composite trait for node behavior //! //! This module defines the [`NodeImplementation`] trait, which is a composite trait used for diff --git a/hotshot/src/types.rs b/hotshot/src/types.rs index b018bf1813..5013e44298 100644 --- a/hotshot/src/types.rs +++ b/hotshot/src/types.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + mod event; mod handle; diff --git a/hotshot/src/types/event.rs b/hotshot/src/types/event.rs index 772bd28bf5..f4311074af 100644 --- a/hotshot/src/types/event.rs +++ b/hotshot/src/types/event.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Events that a [`SystemContext`](crate::SystemContext) instance can emit pub use hotshot_types::event::{Event, EventType}; diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 0b65b66cec..dc7ae4c326 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Provides an event-streaming handle for a [`SystemContext`] running in the background use std::{sync::Arc, time::Duration}; diff --git a/libp2p-networking/src/lib.rs b/libp2p-networking/src/lib.rs index 6af2522019..6b71165223 100644 --- a/libp2p-networking/src/lib.rs +++ b/libp2p-networking/src/lib.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Library for p2p communication /// Network logic diff --git a/libp2p-networking/src/network/behaviours/dht/bootstrap.rs b/libp2p-networking/src/network/behaviours/dht/bootstrap.rs index d209132f0b..7fa8674558 100644 --- a/libp2p-networking/src/network/behaviours/dht/bootstrap.rs +++ b/libp2p-networking/src/network/behaviours/dht/bootstrap.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::time::Duration; use async_compatibility_layer::{art, channel::UnboundedSender}; diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 208856d6ab..d7245f8226 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + /// Task for doing bootstraps at a regular interval pub mod bootstrap; use std::{ diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index a37a3505e7..61f64b8b91 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::collections::HashMap; use async_compatibility_layer::{ diff --git a/libp2p-networking/src/network/behaviours/exponential_backoff.rs b/libp2p-networking/src/network/behaviours/exponential_backoff.rs index 04c848035b..ce78d5eb3e 100644 --- a/libp2p-networking/src/network/behaviours/exponential_backoff.rs +++ b/libp2p-networking/src/network/behaviours/exponential_backoff.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::time::{Duration, Instant}; /// Track (with exponential backoff) diff --git a/libp2p-networking/src/network/behaviours/mod.rs b/libp2p-networking/src/network/behaviours/mod.rs index 18b7504acf..aa40fd5f25 100644 --- a/libp2p-networking/src/network/behaviours/mod.rs +++ b/libp2p-networking/src/network/behaviours/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + /// Wrapper around `RequestResponse` pub mod direct_message; diff --git a/libp2p-networking/src/network/behaviours/request_response.rs b/libp2p-networking/src/network/behaviours/request_response.rs index f7814d1452..7c75e49fc1 100644 --- a/libp2p-networking/src/network/behaviours/request_response.rs +++ b/libp2p-networking/src/network/behaviours/request_response.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::collections::HashMap; use futures::channel::oneshot::Sender; diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index c81a22b734..d4c09743ae 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use libp2p::{ autonat, gossipsub::{Behaviour as GossipBehaviour, Event as GossipEvent, IdentTopic}, diff --git a/libp2p-networking/src/network/error.rs b/libp2p-networking/src/network/error.rs index bc4dd5f2ca..05d7857b7f 100644 --- a/libp2p-networking/src/network/error.rs +++ b/libp2p-networking/src/network/error.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Contains the [`NetworkError`] snafu types use std::fmt::{Debug, Display}; diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 94796117a9..48228861f8 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + /// networking behaviours wrapping libp2p's behaviours pub mod behaviours; /// defines the swarm and network definition (internal) diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 94cf903020..97353e27ee 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + /// configuration for the libp2p network (e.g. how it should be built) mod config; diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index aa831f0ce6..310814b56a 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{collections::HashSet, num::NonZeroUsize, time::Duration}; use libp2p::{identity::Keypair, Multiaddr}; diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 981ec56938..fb6fc106ac 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{collections::HashSet, fmt::Debug, time::Duration}; use async_compatibility_layer::{ diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index bdb1aa09bd..c31e3780a3 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ collections::{HashMap, HashSet}, fmt::Debug, diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 4f3cdf0fa4..0f89efc89d 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![allow(clippy::panic)] mod common; diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 8811f8b172..cc333dabae 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Macros for use in testing. use proc_macro::TokenStream; diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 8b29580624..0843844da6 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{net::SocketAddr, time::Duration}; use async_compatibility_layer::art::async_sleep; diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index f28c39c823..3e8c1747d8 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ env, fs, net::SocketAddr, diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 8ed7bb78a6..72c48a4117 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Orchestrator for manipulating nodes and recording results during a run of `HotShot` tests /// The orchestrator's clients diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index f5b5b1261d..63927c2f4e 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::time::{Duration, Instant}; use async_compatibility_layer::art::async_sleep; diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 52127a88f4..40aca7bf23 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![cfg(not(feature = "dependency-tasks"))] use core::time::Duration; diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 75368139d3..e87a59438d 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![cfg(not(feature = "dependency-tasks"))] use std::{collections::BTreeMap, sync::Arc}; diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs index 4f6c7bdac2..82928afcfc 100644 --- a/task-impls/src/consensus/view_change.rs +++ b/task-impls/src/consensus/view_change.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use core::time::Duration; use std::sync::Arc; diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index 6f57703855..a72d4958b7 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{sync::Arc, time::Duration}; use anyhow::{ensure, Context, Result}; diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index 8a48ef5b69..a31c97850d 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::sync::Arc; use anyhow::Result; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 4bb6fd15d0..0cf96e4fea 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{marker::PhantomData, sync::Arc}; use anyhow::Result; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index fd103341e3..4349d7969b 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::fmt::Display; use async_broadcast::Sender; diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 8c2f732587..16e8a273b8 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{sync::Arc, time::Duration}; use async_broadcast::broadcast; diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index daa2f5f37e..14b6f0483f 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use core::time::Duration; use std::{ collections::{HashMap, HashSet}, diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 86729d6a1b..5693000636 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! The consensus layer for hotshot. This currently implements sequencing //! consensus in an event driven way diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 26ea2a9249..bcef607db7 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{collections::HashMap, sync::Arc}; use anyhow::Result; diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index c6497a6059..537f6e6c66 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! This module holds the dependency task for the QuorumProposalTask. It is spawned whenever an event that could //! initiate a proposal occurs. diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 2449e95e09..cbc0ef96d6 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{collections::HashMap, sync::Arc}; use anyhow::Result; diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 858a293adc..92eac13eb6 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![allow(dead_code)] use std::sync::Arc; diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 4475315675..f34a59e05c 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![allow(unused_imports)] use std::{collections::BTreeMap, sync::Arc}; diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 8de6d2eea6..862f60ca7c 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::sync::Arc; use anyhow::Result; diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 7427b17f61..cf6040f3f5 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{collections::HashMap, sync::Arc}; use anyhow::{bail, ensure, Context, Result}; diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 6bba07a61f..171cdb7d02 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ collections::BTreeMap, sync::{ diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 8e60c35fe0..6462962190 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{sync::Arc, time::Duration}; use async_broadcast::Receiver; diff --git a/task-impls/src/rewind.rs b/task-impls/src/rewind.rs index 2d40981037..b61c1a186e 100644 --- a/task-impls/src/rewind.rs +++ b/task-impls/src/rewind.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{fs::OpenOptions, io::Write, sync::Arc}; use anyhow::Result; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index eba8bd4204..e10db5bc1d 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ sync::Arc, time::{Duration, Instant}, diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 1d2a6fca84..e4463c9076 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{marker::PhantomData, sync::Arc, time::SystemTime}; use anyhow::Result; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 5e62015bdb..3243f356ae 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{marker::PhantomData, sync::Arc}; use anyhow::Result; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index c2b74fc03e..e2c4e5cb95 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![allow(clippy::module_name_repetitions)] use std::{ collections::{BTreeMap, HashMap}, diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 241c62b393..710676f32d 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; use async_broadcast::Sender; diff --git a/task/src/dependency.rs b/task/src/dependency.rs index 0921ac6f85..c4eee030a8 100644 --- a/task/src/dependency.rs +++ b/task/src/dependency.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::future::Future; use async_broadcast::{Receiver, RecvError}; diff --git a/task/src/dependency_task.rs b/task/src/dependency_task.rs index 281e858a06..2ebe4fc032 100644 --- a/task/src/dependency_task.rs +++ b/task/src/dependency_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #[cfg(async_executor_impl = "async-std")] use async_std::task::{spawn, JoinHandle}; use futures::Future; diff --git a/task/src/lib.rs b/task/src/lib.rs index f38e568065..5b5fb50243 100644 --- a/task/src/lib.rs +++ b/task/src/lib.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Task primitives for `HotShot` /// Simple Dependency types diff --git a/task/src/task.rs b/task/src/task.rs index 1daebeaa1c..2c3603f81b 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::sync::Arc; use anyhow::Result; diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index 4d95f435ee..0261a1481f 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::collections::HashMap; use async_broadcast::Receiver; diff --git a/testing/src/block_builder/random.rs b/testing/src/block_builder/random.rs index 0121805e67..c3270d5bfb 100644 --- a/testing/src/block_builder/random.rs +++ b/testing/src/block_builder/random.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ collections::HashMap, num::NonZeroUsize, diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index 592ab5b322..54e9c9318b 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ collections::HashMap, num::NonZeroUsize, diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index 3f8f711454..5cdd70bbcf 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{sync::Arc, time::Duration}; use async_broadcast::{Receiver, Sender}; diff --git a/testing/src/consistency_task.rs b/testing/src/consistency_task.rs index 01c868a5a1..42c30762c1 100644 --- a/testing/src/consistency_task.rs +++ b/testing/src/consistency_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![allow(clippy::unwrap_or_default)] use std::collections::BTreeMap; diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 4873582696..5089c6d61e 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![allow(clippy::panic)] use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 330a40c5f9..1ad73a1ea2 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Testing infrastructure for `HotShot` #![cfg_attr( diff --git a/testing/src/node_ctx.rs b/testing/src/node_ctx.rs index 00e886b479..d9be089cfb 100644 --- a/testing/src/node_ctx.rs +++ b/testing/src/node_ctx.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{collections::HashMap, sync::Arc}; use hotshot::{traits::TestableNodeImplementation, HotShotError}; diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 7c84ae23dd..ce967ad8d2 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index 81a33fa639..61e358d569 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::sync::Arc; use async_lock::RwLock; diff --git a/testing/src/predicates/mod.rs b/testing/src/predicates/mod.rs index 7b2acb7bd1..c4c05e7f11 100644 --- a/testing/src/predicates/mod.rs +++ b/testing/src/predicates/mod.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + pub mod event; #[cfg(not(feature = "dependency-tasks"))] pub mod upgrade_with_consensus; diff --git a/testing/src/predicates/upgrade_with_consensus.rs b/testing/src/predicates/upgrade_with_consensus.rs index fbeda224fa..5cf8de8a4e 100644 --- a/testing/src/predicates/upgrade_with_consensus.rs +++ b/testing/src/predicates/upgrade_with_consensus.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![cfg(not(feature = "dependency-tasks"))] use std::sync::Arc; diff --git a/testing/src/predicates/upgrade_with_proposal.rs b/testing/src/predicates/upgrade_with_proposal.rs index f743ae55bb..de39befe9e 100644 --- a/testing/src/predicates/upgrade_with_proposal.rs +++ b/testing/src/predicates/upgrade_with_proposal.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![cfg(feature = "dependency-tasks")] use std::sync::Arc; diff --git a/testing/src/predicates/upgrade_with_vote.rs b/testing/src/predicates/upgrade_with_vote.rs index 89afe1786d..1d28914f79 100644 --- a/testing/src/predicates/upgrade_with_vote.rs +++ b/testing/src/predicates/upgrade_with_vote.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![cfg(feature = "dependency-tasks")] use std::sync::Arc; diff --git a/testing/src/script.rs b/testing/src/script.rs index 47ef255f15..b25e286f9f 100644 --- a/testing/src/script.rs +++ b/testing/src/script.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{sync::Arc, time::Duration}; use hotshot_task_impls::events::HotShotEvent; diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index cfcae0bdf9..0f7eff831e 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ collections::{BTreeMap, HashMap}, sync::Arc, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index aebae95f13..ffe6396f78 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; use hotshot::{ diff --git a/testing/src/test_helpers.rs b/testing/src/test_helpers.rs index a28c76bdd6..218f29f1bf 100644 --- a/testing/src/test_helpers.rs +++ b/testing/src/test_helpers.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use committable::Committable; use hotshot_example_types::{node_types::TestTypes, state_types::TestValidatedState}; use hotshot_types::{ diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 6a1c077b6b..8db6169c75 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use hotshot::{ diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 80ef447efb..2d74d8db86 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![allow(clippy::panic)] use std::{ collections::{BTreeMap, HashMap, HashSet}, diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index 5c769fe7fc..cba6ae4d9d 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{sync::Arc, time::Duration}; use anyhow::Result; diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index 2dfabc0446..fa2d839352 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{sync::Arc, time::Duration}; use async_broadcast::Receiver; diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 6e582ca738..6fe5b77a7b 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ cmp::max, marker::PhantomData, diff --git a/testing/src/view_sync_task.rs b/testing/src/view_sync_task.rs index 9a885beec8..f788c29af5 100644 --- a/testing/src/view_sync_task.rs +++ b/testing/src/view_sync_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use anyhow::Result; diff --git a/testing/tests/tests_1.rs b/testing/tests/tests_1.rs index a856dca727..ef5d4cd82c 100644 --- a/testing/tests/tests_1.rs +++ b/testing/tests/tests_1.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + mod tests_1 { automod::dir!("tests/tests_1"); } diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 4eb0dcdd2f..ef68a4b9b7 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{ collections::HashMap, time::{Duration, Instant}, diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index c3abf8c2ff..4464e15682 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![cfg(not(feature = "dependency-tasks"))] // TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index ac550f6a1e..61a2c1671d 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{sync::Arc, time::Duration}; use futures::StreamExt; diff --git a/testing/tests/tests_1/gen_key_pair.rs b/testing/tests/tests_1/gen_key_pair.rs index 2a7637b33c..3af843264d 100644 --- a/testing/tests/tests_1/gen_key_pair.rs +++ b/testing/tests/tests_1/gen_key_pair.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![allow(clippy::panic)] #[cfg(test)] diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index 059dcdb236..30239d38bd 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::time::Duration; use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index 83cbc00bb3..541e427251 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #[cfg(test)] use std::marker::PhantomData; diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index d362e4f991..858e9446ce 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{sync::Arc, time::Duration}; use async_broadcast::Sender; diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 6ab29ef35f..ffb4a92541 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + // TODO: Remove after integration #![allow(unused_imports)] #![cfg(feature = "dependency-tasks")] diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 488b57ae16..eda6c459c8 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![cfg(feature = "dependency-tasks")] use std::time::Duration; diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index b8b4680bcd..9d884bf3be 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![allow(clippy::panic)] #![cfg(feature = "dependency-tasks")] diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 90acc43eb2..639181ee91 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::time::Duration; use hotshot_example_types::{ diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index 8a7eca8763..00b24488af 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + // TODO: Remove this after integration #![allow(unused_imports)] use hotshot_example_types::{ diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs index 28b0b5b6a5..a3784da1cb 100644 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![cfg(not(feature = "dependency-tasks"))] // TODO: Remove after integration of dependency-tasks #![cfg(not(feature = "dependency-tasks"))] diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index f92592a099..07099f5182 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![cfg(feature = "dependency-tasks")] // TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 9ea15925d5..9595a0355c 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![cfg(feature = "dependency-tasks")] // TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 3087293ccc..20852633d4 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::{marker::PhantomData, sync::Arc}; use hotshot::{tasks::task_state::CreateTaskState, types::SignatureKey}; diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index 42a4dd6a56..671d9506a2 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; use hotshot_task_impls::{ diff --git a/testing/tests/tests_2.rs b/testing/tests/tests_2.rs index 517ed4c2ca..7eee7edf88 100644 --- a/testing/tests/tests_2.rs +++ b/testing/tests/tests_2.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + mod tests_2 { automod::dir!("tests/tests_2"); } diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 433a35a666..f78cc9c580 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] diff --git a/testing/tests/tests_2/push_cdn.rs b/testing/tests/tests_2/push_cdn.rs index 2959cdbdc8..dbc0d152c1 100644 --- a/testing/tests/tests_2/push_cdn.rs +++ b/testing/tests/tests_2/push_cdn.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::time::Duration; use async_compatibility_layer::logging::shutdown_logging; diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_2/test_with_failures_one.rs index d0cfde0f78..030103a37e 100644 --- a/testing/tests/tests_2/test_with_failures_one.rs +++ b/testing/tests/tests_2/test_with_failures_one.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use hotshot_example_types::{ node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, state_types::TestTypes, diff --git a/testing/tests/tests_3.rs b/testing/tests/tests_3.rs index c24caf7c74..9d43482130 100644 --- a/testing/tests/tests_3.rs +++ b/testing/tests/tests_3.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + mod tests_3 { automod::dir!("tests/tests_3"); } diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index e4869e383c..4fe3ae6d49 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #![allow(clippy::panic)] use std::sync::Arc; diff --git a/testing/tests/tests_3/test_with_builder_failures.rs b/testing/tests/tests_3/test_with_builder_failures.rs index 7be111d04a..4f01d92d94 100644 --- a/testing/tests/tests_3/test_with_builder_failures.rs +++ b/testing/tests/tests_3/test_with_builder_failures.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::time::Duration; use hotshot_example_types::node_types::{MemoryImpl, PushCdnImpl, TestTypes}; diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index a79cc99920..76f37bde10 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use hotshot_example_types::{ node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, state_types::TestTypes, diff --git a/testing/tests/tests_4.rs b/testing/tests/tests_4.rs index e8de159afc..8332b191ab 100644 --- a/testing/tests/tests_4.rs +++ b/testing/tests/tests_4.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + mod tests_4 { automod::dir!("tests/tests_4"); } diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 86b1951040..778062fc66 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use hotshot_example_types::{ node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, state_types::TestTypes, diff --git a/testing/tests/tests_5.rs b/testing/tests/tests_5.rs index 040fc1e199..6254cb727f 100644 --- a/testing/tests/tests_5.rs +++ b/testing/tests/tests_5.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + mod tests_5 { automod::dir!("tests/tests_5"); } diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index fec8124df7..45e36e895f 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::time::Duration; use hotshot_example_types::node_types::{CombinedImpl, TestTypes}; diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 22d0e83deb..b7b31c9e7c 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index 28e29c0fd1..f3d4c18d52 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + use std::time::{Duration, Instant}; use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 948428ed05..48ae1145ef 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Provides the core consensus types use std::{ diff --git a/types/src/constants.rs b/types/src/constants.rs index ad84f11475..2d497c0674 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! configurable constants for hotshot use std::time::Duration; diff --git a/types/src/data.rs b/types/src/data.rs index d2356e1df9..9c61f6b9b6 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Provides types useful for representing `HotShot`'s data structures //! //! This module provides types for representing consensus internal state, such as leaves, diff --git a/types/src/error.rs b/types/src/error.rs index 81426e37c5..5127f1d68c 100644 --- a/types/src/error.rs +++ b/types/src/error.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Error type for `HotShot` //! //! This module provides [`HotShotError`], which is an enum representing possible faults that can diff --git a/types/src/event.rs b/types/src/event.rs index 3c07a15782..88c429d5f0 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Events that a `HotShot` instance can emit use std::sync::Arc; diff --git a/types/src/lib.rs b/types/src/lib.rs index 89c7426953..a5f67c1879 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Types and Traits for the `HotShot` consensus module use std::{fmt::Debug, future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; diff --git a/types/src/light_client.rs b/types/src/light_client.rs index 14481a1181..debb265495 100644 --- a/types/src/light_client.rs +++ b/types/src/light_client.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Types and structs associated with light client state use std::collections::HashMap; diff --git a/types/src/message.rs b/types/src/message.rs index 03c8e50ab7..ccbd53347f 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Network message types //! //! This module contains types used to represent the various types of messages that diff --git a/types/src/qc.rs b/types/src/qc.rs index b999a2a5fe..18a9739347 100644 --- a/types/src/qc.rs +++ b/types/src/qc.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Implementation for `BitVectorQc` that uses BLS signature + Bit vector. //! See more details in hotshot paper. diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index 44eb1b1e21..562e77cb06 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Types and structs for the hotshot signature keys use ark_serialize::SerializationError; diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 5184fc79c6..637a32d1e7 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Implementations of the simple certificate type. Used for Quorum, DA, and Timeout Certificates use std::{ diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index ba18349b20..c91aac557c 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Implementations of the simple vote types. use std::{fmt::Debug, hash::Hash}; diff --git a/types/src/stake_table.rs b/types/src/stake_table.rs index feac5a38da..9d41931f0c 100644 --- a/types/src/stake_table.rs +++ b/types/src/stake_table.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Types and structs related to the stake table use ethereum_types::U256; diff --git a/types/src/traits.rs b/types/src/traits.rs index fb52674701..314136ad16 100644 --- a/types/src/traits.rs +++ b/types/src/traits.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Common traits for the `HotShot` protocol pub mod auction_results_provider; pub mod block_contents; diff --git a/types/src/traits/auction_results_provider.rs b/types/src/traits/auction_results_provider.rs index e9154151ce..283ada9d68 100644 --- a/types/src/traits/auction_results_provider.rs +++ b/types/src/traits/auction_results_provider.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! This module defines the interaction layer with the Solver via the [`AuctionResultsProvider`] trait, //! which handles connecting to, and fetching the allocation results from, the Solver. diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 6c1bb1704f..5329dd0597 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Abstraction over the contents of a block //! //! This module provides the [`Transaction`], [`BlockPayload`], and [`BlockHeader`] traits, which diff --git a/types/src/traits/consensus_api.rs b/types/src/traits/consensus_api.rs index a8de5efbf3..4c4daff9a7 100644 --- a/types/src/traits/consensus_api.rs +++ b/types/src/traits/consensus_api.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Contains the [`ConsensusApi`] trait. use std::{num::NonZeroUsize, time::Duration}; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 9a7012bb00..f6caa02da9 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! The election trait, used to decide which node is the leader and determine if a vote is valid. // Needed to avoid the non-binding `let` warning. diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs index af2ed32dee..e81d4d423f 100644 --- a/types/src/traits/metrics.rs +++ b/types/src/traits/metrics.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! The [`Metrics`] trait is used to collect information from multiple components in the entire system. //! //! This trait can be used to spawn the following traits: diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index e602c5598f..0287f952b7 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Network access compatibility //! //! Contains types and traits used by `HotShot` to abstract over network access diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 49a93de123..8d9a6a4ffd 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Composite trait for node behavior //! //! This module defines the [`NodeImplementation`] trait, which is a composite trait used for diff --git a/types/src/traits/qc.rs b/types/src/traits/qc.rs index 1e72f7d098..439b5bcc89 100644 --- a/types/src/traits/qc.rs +++ b/types/src/traits/qc.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! The quorum certificate (QC) trait is a certificate of a sufficient quorum of distinct //! parties voted for a message or statement. diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 0146e8af0a..d764b08163 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Minimal compatibility over public key signatures // data is serialized as big-endian for signing purposes diff --git a/types/src/traits/stake_table.rs b/types/src/traits/stake_table.rs index eced8e30b9..32d8590264 100644 --- a/types/src/traits/stake_table.rs +++ b/types/src/traits/stake_table.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Trait for stake table data structures use ark_std::{rand::SeedableRng, vec::Vec}; diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index fb3caeb9ae..ce93502361 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Abstractions over the immutable instance-level state and the global state that blocks modify. //! //! This module provides the [`InstanceState`] and [`ValidatedState`] traits, which serve as diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 23892d87e7..234135cb43 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Abstract storage type for storing DA proposals and VID shares //! //! This modules provides the [`Storage`] trait. diff --git a/types/src/utils.rs b/types/src/utils.rs index 8265f997df..8795ab028a 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Utility functions, type aliases, helper structs and enum definitions. use std::{ops::Deref, sync::Arc}; diff --git a/types/src/vid.rs b/types/src/vid.rs index 3df852b9b9..b4b63cf7b4 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! This module provides: //! - an opaque constructor [`vid_scheme`] that returns a new instance of a //! VID scheme. diff --git a/types/src/vote.rs b/types/src/vote.rs index 8c683e2812..160c963f50 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + //! Vote, Accumulator, and Certificate Types use std::{ From 48ae38bc568e206bb7828a202a883944d94893a1 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 7 Aug 2024 13:14:04 -0400 Subject: [PATCH 1169/1393] Authenticate Libp2p connections (#3534) * authenticate libp2p connections * clippy * fix lint and build * fmt * fix incompatibility in tests * `anyhow!` to `ensure!` Co-authored-by: Jarred Parr * ensure import * match to `PeerId` * format and lint * namespace everything to `st transport` * move some items * deduplicate some code * rename file --------- Co-authored-by: Jarred Parr --- .../src/traits/networking/libp2p_network.rs | 27 +- libp2p-networking/Cargo.toml | 1 + libp2p-networking/src/network/mod.rs | 32 +- libp2p-networking/src/network/node.rs | 28 +- libp2p-networking/src/network/node/config.rs | 13 +- libp2p-networking/src/network/node/handle.rs | 18 +- libp2p-networking/src/network/transport.rs | 734 ++++++++++++++++++ libp2p-networking/tests/common/mod.rs | 36 +- libp2p-networking/tests/counter.rs | 93 ++- 9 files changed, 899 insertions(+), 83 deletions(-) create mode 100644 libp2p-networking/src/network/transport.rs diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 3646e1de71..a6103213a2 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -64,7 +64,9 @@ use libp2p_identity::{ use libp2p_networking::{ network::{ behaviours::request_response::{Request, Response}, - spawn_network_node, MeshParams, + spawn_network_node, + transport::construct_auth_message, + MeshParams, NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeReceiver, NetworkNodeType, DEFAULT_REPLICATION_FACTOR, @@ -150,7 +152,7 @@ struct Libp2pNetworkInner { /// this node's public key pk: K, /// handle to control the network - handle: Arc, + handle: Arc>, /// Message Receiver receiver: UnboundedReceiver>, /// Receiver for Requests for Data, includes the request and the response chan @@ -407,7 +409,24 @@ impl Libp2pNetwork { .parse()?; // Build our libp2p configuration from our global, network configuration - let mut config_builder: NetworkNodeConfigBuilder = NetworkNodeConfigBuilder::default(); + let mut config_builder = NetworkNodeConfigBuilder::default(); + + // Extrapolate the stake table from the known nodes + let stake_table: HashSet = config + .config + .known_nodes_with_stake + .iter() + .map(|node| K::public_key(&node.stake_table_entry)) + .collect(); + + let auth_message = + construct_auth_message(pub_key, &keypair.public().to_peer_id(), priv_key) + .with_context(|| "Failed to construct auth message")?; + + // Set the auth message and stake table + config_builder + .stake_table(Some(stake_table)) + .auth_message(Some(auth_message)); // The replication factor is the minimum of [the default and 2/3 the number of nodes] let Some(default_replication_factor) = DEFAULT_REPLICATION_FACTOR else { @@ -500,7 +519,7 @@ impl Libp2pNetwork { #[allow(clippy::too_many_arguments)] pub async fn new( metrics: Libp2pMetricsValue, - config: NetworkNodeConfig, + config: NetworkNodeConfig, pk: K, bootstrap_addrs: BootstrapAddrs, id: usize, diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index f7e5083823..5e2d53f5f9 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -36,6 +36,7 @@ tide = { version = "0.16", optional = true, default-features = false, features = tracing = { workspace = true } void = "1" lazy_static = { workspace = true } +pin-project = "1" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] libp2p = { workspace = true, features = ["tokio"] } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 48228861f8..b95acdc79a 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -12,10 +12,13 @@ mod def; pub mod error; /// functionality of a libp2p network node mod node; +/// Alternative Libp2p transport implementations +pub mod transport; use std::{collections::HashSet, fmt::Debug, str::FromStr}; use futures::channel::oneshot::{self, Sender}; +use hotshot_types::traits::signature_key::SignatureKey; #[cfg(async_executor_impl = "async-std")] use libp2p::dns::async_std::Transport as DnsTransport; #[cfg(async_executor_impl = "tokio")] @@ -37,6 +40,7 @@ use quic::async_std::Transport as QuicTransport; use quic::tokio::Transport as QuicTransport; use serde::{Deserialize, Serialize}; use tracing::instrument; +use transport::StakeTableAuthentication; use self::behaviours::request_response::{Request, Response}; pub use self::{ @@ -211,31 +215,43 @@ pub fn gen_multiaddr(port: u16) -> Multiaddr { /// This type is used to represent a transport in the libp2p network framework. The `PeerId` is a unique identifier for each peer in the network, and the `StreamMuxerBox` is a type of multiplexer that can handle multiple substreams over a single connection. type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; -/// Generate authenticated transport +/// Generates an authenticated transport checked against the stake table. +/// If the stake table or authentication message is not provided, the transport will +/// not participate in stake table authentication. +/// /// # Errors -/// could not sign the quic key with `identity` +/// If we could not create a DNS transport #[instrument(skip(identity))] -pub async fn gen_transport(identity: Keypair) -> Result { - let quic_transport = { +pub async fn gen_transport( + identity: Keypair, + stake_table: Option>, + auth_message: Option>, +) -> Result { + // Create the initial `Quic` transport + let transport = { let mut config = quic::Config::new(&identity); config.handshake_timeout = std::time::Duration::from_secs(20); QuicTransport::new(config) }; - let dns_quic = { + // Require authentication against the stake table + let transport = StakeTableAuthentication::new(transport, stake_table, auth_message); + + // Support DNS resolution + let transport = { #[cfg(async_executor_impl = "async-std")] { - DnsTransport::system(quic_transport).await + DnsTransport::system(transport).await } #[cfg(async_executor_impl = "tokio")] { - DnsTransport::system(quic_transport) + DnsTransport::system(transport) } } .map_err(|e| NetworkError::TransportLaunch { source: e })?; - Ok(dns_quic + Ok(transport .map(|(peer_id, connection), _| (peer_id, StreamMuxerBox::new(connection))) .boxed()) } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 97353e27ee..b63c415ebe 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -23,7 +23,9 @@ use async_compatibility_layer::{ channel::{unbounded, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, }; use futures::{channel::mpsc, select, FutureExt, SinkExt, StreamExt}; -use hotshot_types::constants::KAD_DEFAULT_REPUB_INTERVAL_SEC; +use hotshot_types::{ + constants::KAD_DEFAULT_REPUB_INTERVAL_SEC, traits::signature_key::SignatureKey, +}; use libp2p::{ autonat, core::transport::ListenerId, @@ -82,7 +84,7 @@ pub const ESTABLISHED_LIMIT_UNWR: u32 = 10; /// Network definition #[derive(custom_debug::Debug)] -pub struct NetworkNode { +pub struct NetworkNode { /// pub/private key from with peer_id is derived identity: Keypair, /// peer id of network node @@ -91,7 +93,7 @@ pub struct NetworkNode { #[debug(skip)] swarm: Swarm, /// the configuration parameters of the netework - config: NetworkNodeConfig, + config: NetworkNodeConfig, /// the listener id we are listening on, if it exists listener_id: Option, /// Handler for requests and response behavior events. @@ -106,7 +108,7 @@ pub struct NetworkNode { bootstrap_tx: Option>, } -impl NetworkNode { +impl NetworkNode { /// Returns number of peers this node is connected to pub fn num_connected(&self) -> usize { self.swarm.connected_peers().count() @@ -164,17 +166,25 @@ impl NetworkNode { /// * Generates a connection to the "broadcast" topic /// * Creates a swarm to manage peers and events #[instrument] - pub async fn new(config: NetworkNodeConfig) -> Result { - // Generate a random PeerId + pub async fn new(config: NetworkNodeConfig) -> Result { + // Generate a random `KeyPair` if one is not specified let identity = if let Some(ref kp) = config.identity { kp.clone() } else { Keypair::generate_ed25519() }; + + // Get the `PeerId` from the `KeyPair` let peer_id = PeerId::from(identity.public()); - debug!(?peer_id); - let transport: BoxedTransport = gen_transport(identity.clone()).await?; - debug!("Launched network transport"); + + // Generate the transport from the identity, stake table, and auth message + let transport: BoxedTransport = gen_transport::( + identity.clone(), + config.stake_table.clone(), + config.auth_message.clone(), + ) + .await?; + // Generate the swarm let mut swarm: Swarm = { // Use the hash of the message's contents as the ID diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 310814b56a..a7ea06e86e 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -6,6 +6,7 @@ use std::{collections::HashSet, num::NonZeroUsize, time::Duration}; +use hotshot_types::traits::signature_key::SignatureKey; use libp2p::{identity::Keypair, Multiaddr}; use libp2p_identity::PeerId; @@ -16,7 +17,7 @@ pub const DEFAULT_REPLICATION_FACTOR: Option = NonZeroUsize::new(1 /// describe the configuration of the network #[derive(Clone, Default, derive_builder::Builder, custom_debug::Debug)] -pub struct NetworkNodeConfig { +pub struct NetworkNodeConfig { #[builder(default)] /// The type of node (bootstrap etc) pub node_type: NetworkNodeType, @@ -46,6 +47,16 @@ pub struct NetworkNodeConfig { /// whether to start in libp2p::kad::Mode::Server mode #[builder(default = "false")] pub server_mode: bool, + + /// The stake table. Used for authenticating other nodes. If not supplied + /// we will not check other nodes against the stake table + #[builder(default)] + pub stake_table: Option>, + + /// The signed authentication message sent to the remote peer + /// If not supplied we will not send an authentication message during the handshake + #[builder(default)] + pub auth_message: Option>, } /// NOTE: `mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high` diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index fb6fc106ac..175d37a558 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -11,7 +11,9 @@ use async_compatibility_layer::{ channel::{Receiver, SendError, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, }; use futures::channel::oneshot; -use hotshot_types::traits::network::NetworkError as HotshotNetworkError; +use hotshot_types::traits::{ + network::NetworkError as HotshotNetworkError, signature_key::SignatureKey, +}; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; use snafu::{ResultExt, Snafu}; @@ -28,9 +30,9 @@ use crate::network::{ /// - A reference to the state /// - Controls for the swarm #[derive(Debug, Clone)] -pub struct NetworkNodeHandle { +pub struct NetworkNodeHandle { /// network configuration - network_config: NetworkNodeConfig, + network_config: NetworkNodeConfig, /// send an action to the networkbehaviour send_network: UnboundedSender, @@ -76,10 +78,10 @@ impl NetworkNodeReceiver { /// Spawn a network node task task and return the handle and the receiver for it /// # Errors /// Errors if spawning the task fails -pub async fn spawn_network_node( - config: NetworkNodeConfig, +pub async fn spawn_network_node( + config: NetworkNodeConfig, id: usize, -) -> Result<(NetworkNodeReceiver, NetworkNodeHandle), NetworkNodeHandleError> { +) -> Result<(NetworkNodeReceiver, NetworkNodeHandle), NetworkNodeHandleError> { let mut network = NetworkNode::new(config.clone()) .await .context(NetworkSnafu)?; @@ -113,7 +115,7 @@ pub async fn spawn_network_node( Ok((receiver, handle)) } -impl NetworkNodeHandle { +impl NetworkNodeHandle { /// Cleanly shuts down a swarm node /// This is done by sending a message to /// the swarm itself to spin down @@ -493,7 +495,7 @@ impl NetworkNodeHandle { /// Return a reference to the network config #[must_use] - pub fn config(&self) -> &NetworkNodeConfig { + pub fn config(&self) -> &NetworkNodeConfig { &self.network_config } } diff --git a/libp2p-networking/src/network/transport.rs b/libp2p-networking/src/network/transport.rs new file mode 100644 index 0000000000..7c754235c1 --- /dev/null +++ b/libp2p-networking/src/network/transport.rs @@ -0,0 +1,734 @@ +use anyhow::Result as AnyhowResult; +use anyhow::{ensure, Context}; +use async_compatibility_layer::art::async_timeout; +use futures::AsyncRead; +use futures::AsyncWrite; +use serde::Deserialize; +use serde::Serialize; +use std::collections::HashSet; +use std::future::Future; +use std::hash::BuildHasher; +use std::pin::Pin; +use std::sync::Arc; +use std::task::Poll; +use tracing::warn; +use {std::io::Error as IoError, std::io::ErrorKind as IoErrorKind}; + +use futures::future::poll_fn; +use futures::{AsyncReadExt, AsyncWriteExt}; +use hotshot_types::traits::signature_key::SignatureKey; +use libp2p::core::muxing::StreamMuxerExt; +use libp2p::core::transport::TransportEvent; +use libp2p::core::StreamMuxer; +use libp2p::identity::PeerId; +use libp2p::Transport; +use pin_project::pin_project; + +/// The maximum size of an authentication message. This is used to prevent +/// DoS attacks by sending large messages. +const MAX_AUTH_MESSAGE_SIZE: usize = 1024; + +/// The timeout for the authentication handshake. This is used to prevent +/// attacks that keep connections open indefinitely by half-finishing the +/// handshake. +const AUTH_HANDSHAKE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(5); + +/// A wrapper for a `Transport` that bidirectionally authenticates connections +/// by performing a handshake that checks if the remote peer is present in the +/// stake table. +#[pin_project] +pub struct StakeTableAuthentication +{ + #[pin] + /// The underlying transport we are wrapping + pub inner: T, + + /// The stake table we check against to authenticate connections + pub stake_table: Arc>>, + + /// A pre-signed message that we send to the remote peer for authentication + pub auth_message: Arc>>, + + /// Phantom data for the connection type + pd: std::marker::PhantomData, +} + +/// A type alias for the future that upgrades a connection to perform the authentication handshake +type UpgradeFuture = + Pin::Output, ::Error>> + Send>>; + +impl StakeTableAuthentication { + /// Create a new `StakeTableAuthentication` transport that wraps the given transport + /// and authenticates connections against the stake table. + pub fn new(inner: T, stake_table: Option>, auth_message: Option>) -> Self { + Self { + inner, + stake_table: Arc::from(stake_table), + auth_message: Arc::from(auth_message), + pd: std::marker::PhantomData, + } + } + + /// Prove to the remote peer that we are in the stake table by sending + /// them our authentication message. + /// + /// # Errors + /// - If we fail to write the message to the stream + pub async fn authenticate_with_remote_peer( + stream: &mut W, + auth_message: Arc>>, + ) -> AnyhowResult<()> { + // If we have an auth message, send it to the remote peer, prefixed with + // the message length + if let Some(auth_message) = auth_message.as_ref() { + // Write the length-delimited message + write_length_delimited(stream, auth_message).await?; + } + + Ok(()) + } + + /// Verify that the remote peer is: + /// - In the stake table + /// - Sending us a valid authentication message + /// - Sending us a valid signature + /// - Matching the peer ID we expect + /// + /// # Errors + /// If the peer fails verification. This can happen if: + /// - We fail to read the message from the stream + /// - The message is too large + /// - The message is invalid + /// - The peer is not in the stake table + /// - The signature is invalid + pub async fn verify_peer_authentication( + stream: &mut R, + stake_table: Arc>>, + required_peer_id: &PeerId, + ) -> AnyhowResult<()> { + // If we have a stake table, check if the remote peer is in it + if let Some(stake_table) = stake_table.as_ref() { + // Read the length-delimited message from the remote peer + let message = read_length_delimited(stream, MAX_AUTH_MESSAGE_SIZE).await?; + + // Deserialize the authentication message + let auth_message: AuthMessage = bincode::deserialize(&message) + .with_context(|| "Failed to deserialize auth message")?; + + // Verify the signature on the public keys + let public_key = auth_message + .validate() + .with_context(|| "Failed to verify authentication message")?; + + // Deserialize the `PeerId` + let peer_id = PeerId::from_bytes(&auth_message.peer_id_bytes) + .with_context(|| "Failed to deserialize peer ID")?; + + // Verify that the peer ID is the same as the remote peer + if peer_id != *required_peer_id { + return Err(anyhow::anyhow!("Peer ID mismatch")); + } + + // Check if the public key is in the stake table + if !stake_table.contains(&public_key) { + return Err(anyhow::anyhow!("Peer not in stake table")); + } + } + + Ok(()) + } + + /// Wrap the supplied future in an upgrade that performs the authentication handshake. + /// + /// `outgoing` is a boolean that indicates if the connection is incoming or outgoing. + /// This is needed because the flow of the handshake is different for each. + fn gen_handshake> + Send + 'static>( + original_future: F, + outgoing: bool, + stake_table: Arc>>, + auth_message: Arc>>, + ) -> UpgradeFuture + where + T::Error: From<::Error> + From, + T::Output: AsOutput + Send, + + C::Substream: Unpin + Send, + { + // Create a new upgrade that performs the authentication handshake on top + Box::pin(async move { + // Wait for the original future to resolve + let mut stream = original_future.await?; + + // Time out the authentication block + async_timeout(AUTH_HANDSHAKE_TIMEOUT, async { + // Open a substream for the handshake. + // The handshake order depends on whether the connection is incoming or outgoing. + let mut substream = if outgoing { + poll_fn(|cx| stream.as_connection().poll_outbound_unpin(cx)).await? + } else { + poll_fn(|cx| stream.as_connection().poll_inbound_unpin(cx)).await? + }; + + if outgoing { + // If the connection is outgoing, authenticate with the remote peer first + Self::authenticate_with_remote_peer(&mut substream, auth_message) + .await + .map_err(|e| { + warn!("Failed to authenticate with remote peer: {:?}", e); + IoError::new(IoErrorKind::Other, e) + })?; + + // Verify the remote peer's authentication + Self::verify_peer_authentication( + &mut substream, + stake_table, + stream.as_peer_id(), + ) + .await + .map_err(|e| { + warn!("Failed to verify remote peer: {:?}", e); + IoError::new(IoErrorKind::Other, e) + })?; + } else { + // If it is incoming, verify the remote peer's authentication first + Self::verify_peer_authentication( + &mut substream, + stake_table, + stream.as_peer_id(), + ) + .await + .map_err(|e| { + warn!("Failed to verify remote peer: {:?}", e); + IoError::new(IoErrorKind::Other, e) + })?; + + // Authenticate with the remote peer + Self::authenticate_with_remote_peer(&mut substream, auth_message) + .await + .map_err(|e| { + warn!("Failed to authenticate with remote peer: {:?}", e); + IoError::new(IoErrorKind::Other, e) + })?; + } + + Ok(stream) + }) + .await + .map_err(|e| { + warn!("Timed out performing authentication handshake: {:?}", e); + IoError::new(IoErrorKind::TimedOut, e) + })? + }) + } +} + +/// The deserialized form of an authentication message that is sent to the remote peer +#[derive(Clone, Serialize, Deserialize)] +struct AuthMessage { + /// The encoded (stake table) public key of the sender. This, along with the peer ID, is + /// signed. It is still encoded here to enable easy verification. + public_key_bytes: Vec, + + /// The encoded peer ID of the sender. This is appended to the public key before signing. + /// It is still encoded here to enable easy verification. + peer_id_bytes: Vec, + + /// The signature on the public key + signature: S::PureAssembledSignatureType, +} + +impl AuthMessage { + /// Validate the signature on the public key and return it if valid + pub fn validate(&self) -> AnyhowResult { + // Deserialize the stake table public key + let public_key = S::from_bytes(&self.public_key_bytes) + .with_context(|| "Failed to deserialize public key")?; + + // Reconstruct the signed message from the public key and peer ID + let mut signed_message = public_key.to_bytes(); + signed_message.extend(self.peer_id_bytes.clone()); + + // Check if the signature is valid across both + if !public_key.validate(&self.signature, &signed_message) { + return Err(anyhow::anyhow!("Invalid signature")); + } + + Ok(public_key) + } +} + +/// Create an sign an authentication message to be sent to the remote peer +/// +/// # Errors +/// - If we fail to sign the public key +/// - If we fail to serialize the authentication message +pub fn construct_auth_message( + public_key: &S, + peer_id: &PeerId, + private_key: &S::PrivateKey, +) -> AnyhowResult> { + // Serialize the stake table public key + let mut public_key_bytes = public_key.to_bytes(); + + // Serialize the peer ID and append it + let peer_id_bytes = peer_id.to_bytes(); + public_key_bytes.extend_from_slice(&peer_id_bytes); + + // Sign our public key + let signature = + S::sign(private_key, &public_key_bytes).with_context(|| "Failed to sign public key")?; + + // Create the auth message + let auth_message = AuthMessage:: { + public_key_bytes, + peer_id_bytes, + signature, + }; + + // Serialize the auth message + bincode::serialize(&auth_message).with_context(|| "Failed to serialize auth message") +} + +impl Transport + for StakeTableAuthentication +where + T::Dial: Future> + Send + 'static, + T::ListenerUpgrade: Send + 'static, + T::Output: AsOutput + Send, + T::Error: From<::Error> + From, + + C::Substream: Unpin + Send, +{ + // `Dial` is for connecting out, `ListenerUpgrade` is for accepting incoming connections + type Dial = Pin> + Send>>; + type ListenerUpgrade = Pin> + Send>>; + + // These are just passed through + type Output = T::Output; + type Error = T::Error; + + /// Dial a remote peer. This function is changed to perform an authentication handshake + /// on top. + fn dial( + &mut self, + addr: libp2p::Multiaddr, + ) -> Result> { + // Perform the inner dial + let res = self.inner.dial(addr); + + // Clone the necessary fields + let auth_message = Arc::clone(&self.auth_message); + let stake_table = Arc::clone(&self.stake_table); + + // If the dial was successful, perform the authentication handshake on top + match res { + Ok(dial) => Ok(Self::gen_handshake(dial, true, stake_table, auth_message)), + Err(err) => Err(err), + } + } + + /// Dial a remote peer as a listener. This function is changed to perform an authentication + /// handshake on top. The flow should be the reverse of the `dial` function and the + /// same as the `poll` function. + fn dial_as_listener( + &mut self, + addr: libp2p::Multiaddr, + ) -> Result> { + // Perform the inner dial + let res = self.inner.dial(addr); + + // Clone the necessary fields + let auth_message = Arc::clone(&self.auth_message); + let stake_table = Arc::clone(&self.stake_table); + + // If the dial was successful, perform the authentication handshake on top + match res { + Ok(dial) => Ok(Self::gen_handshake(dial, false, stake_table, auth_message)), + Err(err) => Err(err), + } + } + + /// This function is where we perform the authentication handshake for _incoming_ connections. + /// The flow in this case is the reverse of the `dial` function: we first verify the remote peer's + /// authentication, and then authenticate with them. + fn poll( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> + { + match self.as_mut().project().inner.poll(cx) { + Poll::Ready(event) => Poll::Ready(match event { + // If we have an incoming connection, we need to perform the authentication handshake + TransportEvent::Incoming { + listener_id, + upgrade, + local_addr, + send_back_addr, + } => { + // Clone the necessary fields + let auth_message = Arc::clone(&self.auth_message); + let stake_table = Arc::clone(&self.stake_table); + + // Generate the handshake upgrade future (inbound) + let auth_upgrade = + Self::gen_handshake(upgrade, false, stake_table, auth_message); + + // Return the new event + TransportEvent::Incoming { + listener_id, + upgrade: auth_upgrade, + local_addr, + send_back_addr, + } + } + + // We need to re-map the other events because we changed the type of the upgrade + TransportEvent::AddressExpired { + listener_id, + listen_addr, + } => TransportEvent::AddressExpired { + listener_id, + listen_addr, + }, + TransportEvent::ListenerClosed { + listener_id, + reason, + } => TransportEvent::ListenerClosed { + listener_id, + reason, + }, + TransportEvent::ListenerError { listener_id, error } => { + TransportEvent::ListenerError { listener_id, error } + } + TransportEvent::NewAddress { + listener_id, + listen_addr, + } => TransportEvent::NewAddress { + listener_id, + listen_addr, + }, + }), + + Poll::Pending => Poll::Pending, + } + } + + /// The below functions just pass through to the inner transport, but we had + /// to define them + fn remove_listener(&mut self, id: libp2p::core::transport::ListenerId) -> bool { + self.inner.remove_listener(id) + } + fn address_translation( + &self, + listen: &libp2p::Multiaddr, + observed: &libp2p::Multiaddr, + ) -> Option { + self.inner.address_translation(listen, observed) + } + fn listen_on( + &mut self, + id: libp2p::core::transport::ListenerId, + addr: libp2p::Multiaddr, + ) -> Result<(), libp2p::TransportError> { + self.inner.listen_on(id, addr) + } +} + +/// A helper trait that allows us to access the underlying connection +/// and `PeerId` from a transport output +trait AsOutput { + /// Get a mutable reference to the underlying connection + fn as_connection(&mut self) -> &mut C; + + /// Get a mutable reference to the underlying `PeerId` + fn as_peer_id(&mut self) -> &mut PeerId; +} + +/// The implementation of the `AsConnection` trait for a tuple of a `PeerId` +/// and a connection. +impl AsOutput for (PeerId, C) { + /// Get a mutable reference to the underlying connection + fn as_connection(&mut self) -> &mut C { + &mut self.1 + } + + /// Get a mutable reference to the underlying `PeerId` + fn as_peer_id(&mut self) -> &mut PeerId { + &mut self.0 + } +} + +/// A helper function to read a length-delimited message from a stream. Takes into +/// account the maximum message size. +/// +/// # Errors +/// - If the message is too big +/// - If we fail to read from the stream +pub async fn read_length_delimited( + stream: &mut S, + max_size: usize, +) -> AnyhowResult> { + // Receive the first 8 bytes of the message, which is the length + let mut len_bytes = [0u8; 4]; + stream + .read_exact(&mut len_bytes) + .await + .with_context(|| "Failed to read message length")?; + + // Parse the length of the message as a `u32` + let len = usize::try_from(u32::from_be_bytes(len_bytes))?; + + // Quit if the message is too large + ensure!(len <= max_size, "Message too large"); + + // Read the actual message + let mut message = vec![0u8; len]; + stream + .read_exact(&mut message) + .await + .with_context(|| "Failed to read message")?; + + Ok(message) +} + +/// A helper function to write a length-delimited message to a stream. +/// +/// # Errors +/// - If we fail to write to the stream +pub async fn write_length_delimited( + stream: &mut S, + message: &[u8], +) -> AnyhowResult<()> { + // Write the length of the message + stream + .write_all(&u32::try_from(message.len())?.to_be_bytes()) + .await + .with_context(|| "Failed to write message length")?; + + // Write the actual message + stream + .write_all(message) + .await + .with_context(|| "Failed to write message")?; + + Ok(()) +} + +#[cfg(test)] +mod test { + use libp2p::{core::transport::dummy::DummyTransport, quic::Connection}; + use rand::Rng; + + use std::{collections::HashSet, sync::Arc}; + + use super::*; + use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; + + /// A mock type to help with readability + type MockStakeTableAuth = StakeTableAuthentication; + + // Helper macro for generating a new identity and authentication message + macro_rules! new_identity { + () => {{ + // Gen a new seed + let seed = rand::rngs::OsRng.gen::<[u8; 32]>(); + + // Create a new keypair + let keypair = BLSPubKey::generated_from_seed_indexed(seed, 1337); + + // Create a peer ID + let peer_id = libp2p::identity::Keypair::generate_ed25519() + .public() + .to_peer_id(); + + // Construct an authentication message + let auth_message = + super::construct_auth_message(&keypair.0, &peer_id, &keypair.1).unwrap(); + + (keypair, peer_id, auth_message) + }}; + } + + // Helper macro to generator a cursor from a length-delimited message + macro_rules! cursor_from { + ($auth_message:expr) => {{ + let mut stream = futures::io::Cursor::new(vec![]); + write_length_delimited(&mut stream, &$auth_message) + .await + .expect("Failed to write message"); + stream.set_position(0); + stream + }}; + } + + /// Test valid construction and verification of an authentication message + #[test] + fn signature_verify() { + // Create a new identity + let (_, _, auth_message) = new_identity!(); + + // Verify the authentication message + let public_key = super::AuthMessage::::validate( + &bincode::deserialize(&auth_message).unwrap(), + ); + assert!(public_key.is_ok()); + } + + /// Test invalid construction and verification of an authentication message with + /// an invalid public key. This ensures we are signing over it correctly. + #[test] + fn signature_verify_invalid_public_key() { + // Create a new identity + let (_, _, auth_message) = new_identity!(); + + // Deserialize the authentication message + let mut auth_message: super::AuthMessage = + bincode::deserialize(&auth_message).unwrap(); + + // Change the public key + auth_message.public_key_bytes[0] ^= 0x01; + + // Serialize the message again + let auth_message = bincode::serialize(&auth_message).unwrap(); + + // Verify the authentication message + let public_key = super::AuthMessage::::validate( + &bincode::deserialize(&auth_message).unwrap(), + ); + assert!(public_key.is_err()); + } + + /// Test invalid construction and verification of an authentication message with + /// an invalid peer ID. This ensures we are signing over it correctly. + #[test] + fn signature_verify_invalid_peer_id() { + // Create a new identity + let (_, _, auth_message) = new_identity!(); + + // Deserialize the authentication message + let mut auth_message: super::AuthMessage = + bincode::deserialize(&auth_message).unwrap(); + + // Change the peer ID + auth_message.peer_id_bytes[0] ^= 0x01; + + // Serialize the message again + let auth_message = bincode::serialize(&auth_message).unwrap(); + + // Verify the authentication message + let public_key = super::AuthMessage::::validate( + &bincode::deserialize(&auth_message).unwrap(), + ); + assert!(public_key.is_err()); + } + + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn valid_authentication() { + // Create a new identity + let (keypair, peer_id, auth_message) = new_identity!(); + + // Create a stream and write the message to it + let mut stream = cursor_from!(auth_message); + + // Create a stake table with the key + let mut stake_table = std::collections::HashSet::new(); + stake_table.insert(keypair.0); + + // Verify the authentication message + let result = MockStakeTableAuth::verify_peer_authentication( + &mut stream, + Arc::new(Some(stake_table)), + &peer_id, + ) + .await; + + assert!( + result.is_ok(), + "Should have passed authentication but did not" + ); + } + + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn key_not_in_stake_table() { + // Create a new identity + let (_, peer_id, auth_message) = new_identity!(); + + // Create a stream and write the message to it + let mut stream = cursor_from!(auth_message); + + // Create an empty stake table + let stake_table: HashSet = std::collections::HashSet::new(); + + // Verify the authentication message + let result = MockStakeTableAuth::verify_peer_authentication( + &mut stream, + Arc::new(Some(stake_table)), + &peer_id, + ) + .await; + + // Make sure it errored for the right reason + assert!( + result + .expect_err("Should have failed authentication but did not") + .to_string() + .contains("Peer not in stake table"), + "Did not fail with the correct error" + ); + } + + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn peer_id_mismatch() { + // Create a new identity and authentication message + let (keypair, _, auth_message) = new_identity!(); + + // Create a second (malicious) identity + let (_, malicious_peer_id, _) = new_identity!(); + + // Create a stream and write the message to it + let mut stream = cursor_from!(auth_message); + + // Create a stake table with the key + let mut stake_table: HashSet = std::collections::HashSet::new(); + stake_table.insert(keypair.0); + + // Check against the malicious peer ID + let result = MockStakeTableAuth::verify_peer_authentication( + &mut stream, + Arc::new(Some(stake_table)), + &malicious_peer_id, + ) + .await; + + // Make sure it errored for the right reason + assert!( + result + .expect_err("Should have failed authentication but did not") + .to_string() + .contains("Peer ID mismatch"), + "Did not fail with the correct error" + ); + } + + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn read_and_write_length_delimited() { + // Create a message + let message = b"Hello, world!"; + + // Write the message to a buffer + let mut buffer = Vec::new(); + write_length_delimited(&mut buffer, message).await.unwrap(); + + // Read the message from the buffer + let read_message = read_length_delimited(&mut buffer.as_slice(), 1024) + .await + .unwrap(); + + // Check if the messages are the same + assert_eq!(message, read_message.as_slice()); + } +} diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index c31e3780a3..28631fd4cc 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -20,6 +20,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use futures::{future::join_all, Future, FutureExt}; +use hotshot_types::traits::signature_key::SignatureKey; use libp2p::{identity::Keypair, Multiaddr}; use libp2p_identity::PeerId; use libp2p_networking::network::{ @@ -31,8 +32,8 @@ use snafu::{ResultExt, Snafu}; use tracing::{info, instrument, warn}; #[derive(Clone, Debug)] -pub(crate) struct HandleWithState { - pub(crate) handle: Arc, +pub(crate) struct HandleWithState { + pub(crate) handle: Arc>, pub(crate) state: Arc>, } @@ -41,13 +42,13 @@ pub(crate) struct HandleWithState { /// # Panics /// /// Will panic if a handler is already spawned -pub fn spawn_handler( - handle_and_state: HandleWithState, +pub fn spawn_handler( + handle_and_state: HandleWithState, mut receiver: NetworkNodeReceiver, cb: F, ) -> impl Future where - F: Fn(NetworkEvent, HandleWithState) -> RET + Sync + Send + 'static, + F: Fn(NetworkEvent, HandleWithState) -> RET + Sync + Send + 'static, RET: Future> + Send + 'static, S: Debug + Default + Send + Clone + 'static, { @@ -97,7 +98,14 @@ where /// - Initialize network nodes /// - Kill network nodes /// - A test assertion fails -pub async fn test_bed( +pub async fn test_bed< + S: 'static + Send + Default + Debug + Clone, + F, + FutF, + G, + FutG, + K: SignatureKey + 'static, +>( run_test: F, client_handler: G, num_nodes: usize, @@ -106,8 +114,8 @@ pub async fn test_bed, FutG: Future> + 'static + Send + Sync, - F: FnOnce(Vec>, Duration) -> FutF, - G: Fn(NetworkEvent, HandleWithState) -> FutG + 'static + Send + Sync + Clone, + F: FnOnce(Vec>, Duration) -> FutF, + G: Fn(NetworkEvent, HandleWithState) -> FutG + 'static + Send + Sync + Clone, { setup_logging(); setup_backtrace(); @@ -115,7 +123,7 @@ pub async fn test_bed(num_nodes, timeout, num_of_bootstrap) + let handles_and_receivers = spin_up_swarms::(num_nodes, timeout, num_of_bootstrap) .await .unwrap(); @@ -145,7 +153,9 @@ pub async fn test_bed]) -> HashMap { +fn gen_peerid_map( + handles: &[Arc>], +) -> HashMap { let mut r_val = HashMap::new(); for handle in handles { r_val.insert(handle.peer_id(), handle.id()); @@ -155,7 +165,7 @@ fn gen_peerid_map(handles: &[Arc]) -> HashMap /// print the connections for each handle in `handles` /// useful for debugging -pub async fn print_connections(handles: &[Arc]) { +pub async fn print_connections(handles: &[Arc>]) { let m = gen_peerid_map(handles); warn!("PRINTING CONNECTION STATES"); for handle in handles { @@ -177,11 +187,11 @@ pub async fn print_connections(handles: &[Arc]) { /// and waits for connections to propagate to all nodes. #[allow(clippy::type_complexity)] #[instrument] -pub async fn spin_up_swarms( +pub async fn spin_up_swarms( num_of_nodes: usize, timeout_len: Duration, num_bootstrap: usize, -) -> Result, NetworkNodeReceiver)>, TestError> { +) -> Result, NetworkNodeReceiver)>, TestError> { let mut handles = Vec::new(); let mut bootstrap_addrs = Vec::<(PeerId, Multiaddr)>::new(); let mut connecting_futs = Vec::new(); diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 0f89efc89d..88f08af159 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -14,6 +14,7 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::prelude::StreamExt; use common::{test_bed, HandleSnafu, HandleWithState, TestError}; +use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; use libp2p_networking::network::{NetworkEvent, NetworkNodeHandleError}; use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; @@ -59,10 +60,10 @@ pub enum CounterMessage { /// chooses one /// # Panics /// panics if handles is of length 0 -fn random_handle( - handles: &[HandleWithState], +fn random_handle( + handles: &[HandleWithState], rng: &mut dyn rand::RngCore, -) -> HandleWithState { +) -> HandleWithState { handles.iter().choose(rng).unwrap().clone() } @@ -70,9 +71,9 @@ fn random_handle( /// - updates state based on events received /// - replies to direct messages #[instrument] -pub async fn counter_handle_network_event( +pub async fn counter_handle_network_event( event: NetworkEvent, - handle: HandleWithState, + handle: HandleWithState, ) -> Result<(), NetworkNodeHandleError> { use CounterMessage::*; use NetworkEvent::*; @@ -164,9 +165,9 @@ pub async fn counter_handle_network_event( /// # Panics /// on error #[allow(clippy::similar_names)] -async fn run_request_response_increment<'a>( - requester_handle: HandleWithState, - requestee_handle: HandleWithState, +async fn run_request_response_increment<'a, K: SignatureKey + 'static>( + requester_handle: HandleWithState, + requestee_handle: HandleWithState, timeout: Duration, ) -> Result<(), TestError> { async move { @@ -216,8 +217,8 @@ async fn run_request_response_increment<'a>( /// broadcasts `msg` from a randomly chosen handle /// then asserts that all nodes match `new_state` -async fn run_gossip_round( - handles: &[HandleWithState], +async fn run_gossip_round( + handles: &[HandleWithState], msg: CounterMessage, new_state: CounterState, timeout_duration: Duration, @@ -291,8 +292,8 @@ async fn run_gossip_round( Ok(()) } -async fn run_intersperse_many_rounds( - handles: Vec>, +async fn run_intersperse_many_rounds( + handles: Vec>, timeout: Duration, ) { for i in 0..u32::try_from(NUM_ROUNDS).unwrap() { @@ -307,16 +308,22 @@ async fn run_intersperse_many_rounds( } } -async fn run_dht_many_rounds(handles: Vec>, timeout: Duration) { +async fn run_dht_many_rounds( + handles: Vec>, + timeout: Duration, +) { run_dht_rounds(&handles, timeout, 0, NUM_ROUNDS).await; } -async fn run_dht_one_round(handles: Vec>, timeout: Duration) { +async fn run_dht_one_round( + handles: Vec>, + timeout: Duration, +) { run_dht_rounds(&handles, timeout, 0, 1).await; } -async fn run_request_response_many_rounds( - handles: Vec>, +async fn run_request_response_many_rounds( + handles: Vec>, timeout: Duration, ) { for _i in 0..NUM_ROUNDS { @@ -330,8 +337,8 @@ async fn run_request_response_many_rounds( /// runs one round of request response /// # Panics /// on error -async fn run_request_response_one_round( - handles: Vec>, +async fn run_request_response_one_round( + handles: Vec>, timeout: Duration, ) { run_request_response_increment_all(&handles, timeout).await; @@ -343,22 +350,28 @@ async fn run_request_response_one_round( /// runs multiple rounds of gossip /// # Panics /// on error -async fn run_gossip_many_rounds(handles: Vec>, timeout: Duration) { +async fn run_gossip_many_rounds( + handles: Vec>, + timeout: Duration, +) { run_gossip_rounds(&handles, NUM_ROUNDS, 0, timeout).await; } /// runs one round of gossip /// # Panics /// on error -async fn run_gossip_one_round(handles: Vec>, timeout: Duration) { +async fn run_gossip_one_round( + handles: Vec>, + timeout: Duration, +) { run_gossip_rounds(&handles, 1, 0, timeout).await; } /// runs many rounds of dht /// # Panics /// on error -async fn run_dht_rounds( - handles: &[HandleWithState], +async fn run_dht_rounds( + handles: &[HandleWithState], timeout: Duration, starting_val: usize, num_rounds: usize, @@ -394,8 +407,8 @@ async fn run_dht_rounds( } /// runs `num_rounds` of message broadcast, incrementing the state of all nodes each broadcast -async fn run_gossip_rounds( - handles: &[HandleWithState], +async fn run_gossip_rounds( + handles: &[HandleWithState], num_rounds: usize, starting_state: CounterState, timeout: Duration, @@ -420,8 +433,8 @@ async fn run_gossip_rounds( /// then has all other peers request its state /// and update their state to the recv'ed state #[allow(clippy::similar_names)] -async fn run_request_response_increment_all( - handles: &[HandleWithState], +async fn run_request_response_increment_all( + handles: &[HandleWithState], timeout: Duration, ) { let mut rng = rand::thread_rng(); @@ -496,7 +509,7 @@ async fn run_request_response_increment_all( #[instrument] async fn test_coverage_request_response_one_round() { Box::pin(test_bed( - run_request_response_one_round, + run_request_response_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, @@ -511,7 +524,7 @@ async fn test_coverage_request_response_one_round() { #[instrument] async fn test_coverage_request_response_many_rounds() { Box::pin(test_bed( - run_request_response_many_rounds, + run_request_response_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, @@ -526,7 +539,7 @@ async fn test_coverage_request_response_many_rounds() { #[instrument] async fn test_coverage_intersperse_many_rounds() { Box::pin(test_bed( - run_intersperse_many_rounds, + run_intersperse_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, @@ -541,7 +554,7 @@ async fn test_coverage_intersperse_many_rounds() { #[instrument] async fn test_coverage_gossip_many_rounds() { Box::pin(test_bed( - run_gossip_many_rounds, + run_gossip_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, @@ -556,7 +569,7 @@ async fn test_coverage_gossip_many_rounds() { #[instrument] async fn test_coverage_gossip_one_round() { Box::pin(test_bed( - run_gossip_one_round, + run_gossip_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, @@ -572,7 +585,7 @@ async fn test_coverage_gossip_one_round() { #[ignore] async fn test_stress_request_response_one_round() { Box::pin(test_bed( - run_request_response_one_round, + run_request_response_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, @@ -588,7 +601,7 @@ async fn test_stress_request_response_one_round() { #[ignore] async fn test_stress_request_response_many_rounds() { Box::pin(test_bed( - run_request_response_many_rounds, + run_request_response_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, @@ -604,7 +617,7 @@ async fn test_stress_request_response_many_rounds() { #[ignore] async fn test_stress_intersperse_many_rounds() { Box::pin(test_bed( - run_intersperse_many_rounds, + run_intersperse_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, @@ -620,7 +633,7 @@ async fn test_stress_intersperse_many_rounds() { #[ignore] async fn test_stress_gossip_many_rounds() { Box::pin(test_bed( - run_gossip_many_rounds, + run_gossip_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, @@ -636,7 +649,7 @@ async fn test_stress_gossip_many_rounds() { #[ignore] async fn test_stress_gossip_one_round() { Box::pin(test_bed( - run_gossip_one_round, + run_gossip_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, @@ -652,7 +665,7 @@ async fn test_stress_gossip_one_round() { #[ignore] async fn test_stress_dht_one_round() { Box::pin(test_bed( - run_dht_one_round, + run_dht_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, @@ -668,7 +681,7 @@ async fn test_stress_dht_one_round() { #[ignore] async fn test_stress_dht_many_rounds() { Box::pin(test_bed( - run_dht_many_rounds, + run_dht_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, NUM_OF_BOOTSTRAP_STRESS, @@ -683,7 +696,7 @@ async fn test_stress_dht_many_rounds() { #[instrument] async fn test_coverage_dht_one_round() { Box::pin(test_bed( - run_dht_one_round, + run_dht_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, @@ -698,7 +711,7 @@ async fn test_coverage_dht_one_round() { #[instrument] async fn test_coverage_dht_many_rounds() { Box::pin(test_bed( - run_dht_many_rounds, + run_dht_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, NUM_OF_BOOTSTRAP_COVERAGE, From 5896b638df0bea1bfedc3ea365f95a17116ad2b0 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 7 Aug 2024 23:35:26 +0200 Subject: [PATCH 1170/1393] Use AuctionResults::default instead of None (#3539) --- task-impls/src/transactions.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index e10db5bc1d..ab6b3fb9c6 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -379,7 +379,7 @@ impl> TransactionTaskState Date: Thu, 8 Aug 2024 02:39:10 +0200 Subject: [PATCH 1171/1393] Add builder_commitment to new_marketplace (#3549) --- example-types/src/block_types.rs | 49 +++++++++++++------ hotshot/src/tasks/mod.rs | 3 +- libp2p-networking/src/network/transport.rs | 47 +++++++++--------- task-impls/src/consensus/handlers.rs | 1 + task-impls/src/quorum_proposal/handlers.rs | 1 + testing/tests/tests_1/test_with_failures_2.rs | 11 +++-- .../tests/tests_1/vote_dependency_handle.rs | 2 +- types/src/traits/block_contents.rs | 6 ++- 8 files changed, 70 insertions(+), 50 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 21e8c64bc6..46450affe9 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -245,6 +245,29 @@ pub struct TestBlockHeader { pub timestamp: u64, } +impl TestBlockHeader { + fn new>( + parent_leaf: &Leaf, + payload_commitment: VidCommitment, + builder_commitment: BuilderCommitment, + ) -> Self { + let parent = parent_leaf.block_header(); + + let mut timestamp = OffsetDateTime::now_utc().unix_timestamp() as u64; + if timestamp < parent.timestamp { + // Prevent decreasing timestamps. + timestamp = parent.timestamp; + } + + Self { + block_number: parent.block_number + 1, + payload_commitment, + builder_commitment, + timestamp, + } + } +} + impl> BlockHeader for TestBlockHeader { @@ -261,34 +284,30 @@ impl> Block _vid_common: VidCommon, _version: Version, ) -> Result { - let parent = parent_leaf.block_header(); - - let mut timestamp = OffsetDateTime::now_utc().unix_timestamp() as u64; - if timestamp < parent.timestamp { - // Prevent decreasing timestamps. - timestamp = parent.timestamp; - } - - Ok(Self { - block_number: parent.block_number + 1, + Ok(Self::new( + parent_leaf, payload_commitment, builder_commitment, - timestamp, - }) + )) } async fn new_marketplace( _parent_state: &TYPES::ValidatedState, _instance_state: &>::Instance, - _parent_leaf: &Leaf, - _payload_commitment: VidCommitment, + parent_leaf: &Leaf, + payload_commitment: VidCommitment, + builder_commitment: BuilderCommitment, _metadata: >::Metadata, _builder_fee: Vec>, _vid_common: VidCommon, _auction_results: Option, _version: Version, ) -> Result { - unimplemented!() + Ok(Self::new( + parent_leaf, + payload_commitment, + builder_commitment, + )) } fn genesis( diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index f519a28a48..12ab6c025c 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -32,8 +32,7 @@ use hotshot_task_impls::{ use hotshot_types::{ constants::EVENT_CHANNEL_SIZE, data::QuorumProposal, - message::Proposal, - message::{Messages, VersionedMessage}, + message::{Messages, Proposal, VersionedMessage}, traits::{ network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, diff --git a/libp2p-networking/src/network/transport.rs b/libp2p-networking/src/network/transport.rs index 7c754235c1..ea58db2001 100644 --- a/libp2p-networking/src/network/transport.rs +++ b/libp2p-networking/src/network/transport.rs @@ -1,28 +1,25 @@ -use anyhow::Result as AnyhowResult; -use anyhow::{ensure, Context}; +use std::{ + collections::HashSet, + future::Future, + hash::BuildHasher, + io::{Error as IoError, ErrorKind as IoErrorKind}, + pin::Pin, + sync::Arc, + task::Poll, +}; + +use anyhow::{ensure, Context, Result as AnyhowResult}; use async_compatibility_layer::art::async_timeout; -use futures::AsyncRead; -use futures::AsyncWrite; -use serde::Deserialize; -use serde::Serialize; -use std::collections::HashSet; -use std::future::Future; -use std::hash::BuildHasher; -use std::pin::Pin; -use std::sync::Arc; -use std::task::Poll; -use tracing::warn; -use {std::io::Error as IoError, std::io::ErrorKind as IoErrorKind}; - -use futures::future::poll_fn; -use futures::{AsyncReadExt, AsyncWriteExt}; +use futures::{future::poll_fn, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use hotshot_types::traits::signature_key::SignatureKey; -use libp2p::core::muxing::StreamMuxerExt; -use libp2p::core::transport::TransportEvent; -use libp2p::core::StreamMuxer; -use libp2p::identity::PeerId; -use libp2p::Transport; +use libp2p::{ + core::{muxing::StreamMuxerExt, transport::TransportEvent, StreamMuxer}, + identity::PeerId, + Transport, +}; use pin_project::pin_project; +use serde::{Deserialize, Serialize}; +use tracing::warn; /// The maximum size of an authentication message. This is used to prevent /// DoS attacks by sending large messages. @@ -516,13 +513,13 @@ pub async fn write_length_delimited( #[cfg(test)] mod test { + use std::{collections::HashSet, sync::Arc}; + + use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; use libp2p::{core::transport::dummy::DummyTransport, quic::Connection}; use rand::Rng; - use std::{collections::HashSet, sync::Arc}; - use super::*; - use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; /// A mock type to help with readability type MockStakeTableAuth = StakeTableAuthentication; diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 40aca7bf23..37a97093f8 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -103,6 +103,7 @@ pub async fn create_and_send_proposal( instance_state.as_ref(), &parent_leaf, commitment_and_metadata.commitment, + commitment_and_metadata.builder_commitment, commitment_and_metadata.metadata, commitment_and_metadata.fees.to_vec(), vid_share.data.common, diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 537f6e6c66..3dbf147c4f 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -190,6 +190,7 @@ impl ProposalDependencyHandle { self.instance_state.as_ref(), &parent_leaf, commitment_and_metadata.commitment, + commitment_and_metadata.builder_commitment, commitment_and_metadata.metadata, commitment_and_metadata.fees.to_vec(), vid_share.data.common.clone(), diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index 00b24488af..c9acc5896d 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -6,6 +6,11 @@ // TODO: Remove this after integration #![allow(unused_imports)] +use std::{ + collections::{HashMap, HashSet}, + time::Duration, +}; + use hotshot_example_types::{ node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes}, state_types::TestTypes, @@ -18,11 +23,7 @@ use hotshot_testing::{ test_builder::TestDescription, view_sync_task::ViewSyncTaskDescription, }; -use hotshot_types::data::ViewNumber; -use hotshot_types::traits::node_implementation::ConsensusTime; -use std::collections::HashSet; -use std::{collections::HashMap, time::Duration}; - +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; #[cfg(async_executor_impl = "async-std")] use {hotshot::tasks::DishonestLeader, hotshot_testing::test_builder::Behaviour, std::rc::Rc}; // Test that a good leader can succeed in the view directly after view sync diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 4a7df73c6c..80f4e18192 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -1,6 +1,5 @@ #![cfg(feature = "dependency-tasks")] -use itertools::Itertools; use std::time::Duration; use async_compatibility_layer::art::async_timeout; @@ -25,6 +24,7 @@ use hotshot_types::{ consensus::OuterConsensus, data::ViewNumber, traits::node_implementation::ConsensusTime, vote::HasViewNumber, }; +use itertools::Itertools; const TIMEOUT: Duration = Duration::from_millis(35); diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 5329dd0597..c8ba1c3883 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -189,7 +189,7 @@ pub trait BlockHeader: type Error: Error + Debug + Send + Sync; /// Build a header with the parent validate state, instance-level state, parent leaf, payload - /// commitment, and metadata. This is only used in pre-marketplace versions + /// and builder commitments, and metadata. This is only used in pre-marketplace versions #[allow(clippy::too_many_arguments)] fn new_legacy( parent_state: &TYPES::ValidatedState, @@ -204,13 +204,15 @@ pub trait BlockHeader: ) -> impl Future> + Send; /// Build a header with the parent validate state, instance-level state, parent leaf, payload - /// commitment, metadata, and auction results. This is only used in post-marketplace versions + /// and builder commitments, metadata, and auction results. This is only used in post-marketplace + /// versions #[allow(clippy::too_many_arguments)] fn new_marketplace( parent_state: &TYPES::ValidatedState, instance_state: &>::Instance, parent_leaf: &Leaf, payload_commitment: VidCommitment, + builder_commitment: BuilderCommitment, metadata: >::Metadata, builder_fee: Vec>, vid_common: VidCommon, From e6b9fecf29925d22f929424c17f314bbae99eee2 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 8 Aug 2024 16:10:56 -0400 Subject: [PATCH 1172/1393] Remove whole vote now path (#3553) --- task-impls/src/events.rs | 8 +-- .../src/quorum_proposal_recv/handlers.rs | 18 ++---- task-impls/src/quorum_proposal_recv/mod.rs | 58 +------------------ task-impls/src/quorum_vote/mod.rs | 49 ++-------------- testing/src/predicates/event.rs | 10 ---- .../tests_1/quorum_proposal_recv_task.rs | 8 +-- testing/tests/tests_1/quorum_vote_task.rs | 51 ---------------- types/src/vote.rs | 22 +------ 8 files changed, 15 insertions(+), 209 deletions(-) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 4349d7969b..ee49a73141 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -26,7 +26,7 @@ use hotshot_types::{ traits::{block_contents::BuilderFee, node_implementation::NodeType, BlockPayload}, utils::{BuilderCommitment, View}, vid::VidCommitment, - vote::{HasViewNumber, VoteDependencyData}, + vote::HasViewNumber, }; use vec1::Vec1; @@ -186,9 +186,6 @@ pub enum HotShotEvent { /// A HotShot upgrade was decided UpgradeDecided(UpgradeCertificate), - /// Initiate a vote right now for the designated view. - VoteNow(TYPES::Time, VoteDependencyData), - /* Consensus State Update Events */ /// A undecided view has been created and added to the validated state storage. ValidatedStateUpdated(TYPES::Time, View), @@ -430,9 +427,6 @@ impl Display for HotShotEvent { HotShotEvent::QuorumProposalRequest(view_number) => { write!(f, "QuorumProposalRequest(view_number={view_number:?})") } - HotShotEvent::VoteNow(view_number, _) => { - write!(f, "VoteNow(view_number={view_number:?})") - } HotShotEvent::ValidatedStateUpdated(view_number, _) => { write!(f, "ValidatedStateUpdated(view_number={view_number:?})") } diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 92eac13eb6..0b53c96880 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -37,23 +37,13 @@ use crate::{ }, }; -/// Whether the proposal contained in `QuorumProposalRecv` is fully validated or only the liveness -/// is checked. -pub(crate) enum QuorumProposalValidity { - /// Fully validated. - Fully, - /// Not fully validated due to the parent information missing in the internal state, but the - /// liveness is validated. - Liveness, -} - /// Update states in the event that the parent state is not found for a given `proposal`. #[instrument(skip_all)] async fn validate_proposal_liveness>( proposal: &Proposal>, event_sender: &Sender>>, task_state: &mut QuorumProposalRecvTaskState, -) -> Result { +) -> Result<()> { let view_number = proposal.data.view_number(); let mut consensus_write = task_state.consensus.write().await; @@ -122,7 +112,7 @@ async fn validate_proposal_liveness>>, task_state: &mut QuorumProposalRecvTaskState, -) -> Result { +) -> Result<()> { let sender = sender.clone(); let cur_view = task_state.cur_view; @@ -266,5 +256,5 @@ pub(crate) async fn handle_quorum_proposal_recv> QuorumProposalRecvTaskState< #[cfg(feature = "dependency-tasks")] if let HotShotEvent::QuorumProposalRecv(proposal, sender) = event.as_ref() { match handle_quorum_proposal_recv(proposal, sender, &event_stream, self).await { - Ok(QuorumProposalValidity::Fully) => { + Ok(()) => { self.cancel_tasks(proposal.data.view_number()).await; } - Ok(QuorumProposalValidity::Liveness) => { - // Build the parent leaf since we didn't find it during the proposal check. - let parent_leaf = match parent_leaf_and_state( - proposal.data.view_number() + 1, - &event_stream, - Arc::clone(&self.quorum_membership), - self.public_key.clone(), - OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), - ) - .await - { - Ok((parent_leaf, _ /* state */)) => parent_leaf, - Err(error) => { - warn!("Failed to get parent leaf and state during VoteNow data construction; error = {error:#}"); - return; - } - }; - - let view_number = proposal.data.view_number(); - self.cancel_tasks(view_number).await; - let consensus = self.consensus.read().await; - let Some(vid_shares) = consensus.vid_shares().get(&view_number) else { - debug!( - "We have not seen the VID share for this view {:?} yet, so we cannot vote.", - view_number - ); - return; - }; - let Some(vid_share) = vid_shares.get(&self.public_key) else { - error!("Did not get a VID share for our public key, aborting vote"); - return; - }; - let Some(da_cert) = consensus.saved_da_certs().get(&view_number) else { - debug!( - "Received VID share, but couldn't find DAC cert for view {:?}", - view_number - ); - return; - }; - broadcast_event( - Arc::new(HotShotEvent::VoteNow( - view_number, - VoteDependencyData { - quorum_proposal: proposal.data.clone(), - parent_leaf, - vid_share: vid_share.clone(), - da_cert: da_cert.clone(), - }, - )), - &event_stream, - ) - .await; - } Err(e) => debug!(?e, "Failed to validate the proposal"), } } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index cf6040f3f5..38117e0d3a 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -14,7 +14,7 @@ use async_std::task::JoinHandle; use async_trait::async_trait; use committable::Committable; use hotshot_task::{ - dependency::{AndDependency, Dependency, EventDependency, OrDependency}, + dependency::{AndDependency, Dependency, EventDependency}, dependency_task::{DependencyTask, HandleDepOutput}, task::TaskState, }; @@ -40,7 +40,7 @@ use hotshot_types::{ use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, trace, warn}; +use tracing::{debug, error, instrument, trace, warn}; use crate::{ events::HotShotEvent, @@ -60,8 +60,6 @@ enum VoteDependency { Dac, /// For the `VidShareRecv` event. Vid, - /// For the `VoteNow` event. - VoteNow, } /// Handler for the vote dependency. @@ -312,10 +310,6 @@ impl + 'static> HandleDepOutput payload_commitment = Some(vid_payload_commitment); } } - HotShotEvent::VoteNow(_, vote_dependency_data) => { - leaf = Some(vote_dependency_data.parent_leaf.clone()); - vid_share = Some(vote_dependency_data.vid_share.clone()); - } _ => {} } } @@ -434,13 +428,6 @@ impl> QuorumVoteTaskState { - if let HotShotEvent::VoteNow(view, _) = event { - *view - } else { - return false; - } - } }; if event_view == view_number { trace!("Vote dependency {:?} completed", dependency_type); @@ -479,32 +466,15 @@ impl> QuorumVoteTaskState { - vote_now_dependency.mark_as_completed(event); - } - HotShotEvent::QuorumProposalValidated(..) => { - quorum_proposal_dependency.mark_as_completed(event); - } - _ => {} + if let HotShotEvent::QuorumProposalValidated(..) = event.as_ref() { + quorum_proposal_dependency.mark_as_completed(event); } } let deps = vec![quorum_proposal_dependency, dac_dependency, vid_dependency]; - let dependency_chain = OrDependency::from_deps(vec![ - // Either we fulfill the dependencies individually. - AndDependency::from_deps(deps), - // Or we fulfill the single dependency that contains all the info that we need. - AndDependency::from_deps(vec![vote_now_dependency]), - ]); + let dependency_chain = AndDependency::from_deps(deps); let dependency_task = DependencyTask::new( dependency_chain, @@ -559,15 +529,6 @@ impl> QuorumVoteTaskState>>, ) { match event.as_ref() { - HotShotEvent::VoteNow(view, ..) => { - info!("Vote NOW for view {:?}", *view); - self.create_dependency_task_if_new( - *view, - event_receiver, - &event_sender, - Some(event), - ); - } HotShotEvent::QuorumProposalValidated(proposal, _leaf) => { trace!("Received Proposal for view {}", *proposal.view_number()); diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index 61e358d569..8b740b8482 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -284,16 +284,6 @@ where Box::new(EventPredicate { check, info }) } -pub fn vote_now() -> Box> -where - TYPES: NodeType, -{ - let info = "VoteNow".to_string(); - let check: EventCallback = - Arc::new(move |e: Arc>| matches!(e.as_ref(), VoteNow(..))); - Box::new(EventPredicate { check, info }) -} - pub fn validated_state_updated() -> Box> where TYPES: NodeType, diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index ffb4a92541..b737d90927 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -20,7 +20,7 @@ use hotshot_task_impls::{ }; use hotshot_testing::{ helpers::{build_fake_view_with_leaf_and_state, build_system_handle}, - predicates::event::{all_predicates, exact, quorum_proposal_missing, vote_now}, + predicates::event::{all_predicates, exact, quorum_proposal_missing}, script::InputOrder, serial, view_generator::TestViewGenerator, @@ -127,10 +127,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { helpers::{build_fake_view_with_leaf, build_fake_view_with_leaf_and_state}, script::{Expectations, TaskScript}, }; - use hotshot_types::{ - data::Leaf, - vote::{HasViewNumber, VoteDependencyData}, - }; + use hotshot_types::{data::Leaf, vote::HasViewNumber}; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -209,7 +206,6 @@ async fn test_quorum_proposal_recv_task_liveness_check() { )), quorum_proposal_missing(), exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), - vote_now(), ])]; let state = QuorumProposalRecvTaskState::::create_from(&handle).await; diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 9d884bf3be..d89789a482 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -95,57 +95,6 @@ async fn test_quorum_vote_task_success() { run_test![inputs, script].await; } -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_quorum_vote_task_vote_now() { - use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; - use hotshot_testing::{ - helpers::build_system_handle, - predicates::event::{exact, quorum_vote_send, validated_state_updated}, - view_generator::TestViewGenerator, - }; - use hotshot_types::vote::VoteDependencyData; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle::(2).await.0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - - generator.next().await; - let view = generator.current_view.clone().unwrap(); - - let vote_dependency_data = VoteDependencyData { - quorum_proposal: view.quorum_proposal.data.clone(), - parent_leaf: view.leaf.clone(), - vid_share: view.vid_proposal.0[0].clone(), - da_cert: view.da_certificate.clone(), - }; - - // Submit an event with just the `VoteNow` event which should successfully send a vote. - let inputs = vec![serial![VoteNow(view.view_number, vote_dependency_data),]]; - - let expectations = vec![Expectations::from_outputs(vec![ - exact(QuorumVoteDependenciesValidated(ViewNumber::new(1))), - validated_state_updated(), - quorum_vote_send(), - ])]; - - let quorum_vote_state = - QuorumVoteTaskState::::create_from(&handle).await; - - let mut script = TaskScript { - timeout: TIMEOUT, - state: quorum_vote_state, - expectations, - }; - run_test![inputs, script].await; -} - #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] diff --git a/types/src/vote.rs b/types/src/vote.rs index 160c963f50..14b868d624 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -18,9 +18,7 @@ use ethereum_types::U256; use tracing::error; use crate::{ - data::{Leaf, QuorumProposal, VidDisperseShare}, - message::Proposal, - simple_certificate::{DaCertificate, Threshold}, + simple_certificate::Threshold, simple_vote::Voteable, traits::{ election::Membership, @@ -188,21 +186,3 @@ impl, CERT: Certificate = HashMap)>; - -/// Payload for the `HotShotEvents::VoteNow` event type. The proposal and leaf are -/// obtained via a `QuorumProposalValidated` event being processed. -#[derive(Eq, Hash, PartialEq, Debug, Clone)] -pub struct VoteDependencyData { - /// The quorum proposal (not necessarily valid). - pub quorum_proposal: QuorumProposal, - - /// The leaf we've obtained from the `QuorumProposalValidated` event. This is the - /// parent leaf. - pub parent_leaf: Leaf, - - /// The VID share proposal. - pub vid_share: Proposal>, - - /// The DA certificate. - pub da_cert: DaCertificate, -} From fd805f1c6c0db3fa97055e1b8f8744730b3ad529 Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Thu, 8 Aug 2024 16:49:07 -0400 Subject: [PATCH 1173/1393] Consecutive Leader Proposal Fix (#3545) * add fix and UT * remove return, and remove uneeded logic --- task-impls/src/quorum_proposal/mod.rs | 1 - testing/tests/tests_1/test_success.rs | 23 ++++++++++++++++++- testing/tests/tests_1/test_with_failures_2.rs | 2 -- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index cbc0ef96d6..f2179058cb 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -443,7 +443,6 @@ impl> QuorumProposalTaskState Date: Fri, 9 Aug 2024 19:51:11 +0530 Subject: [PATCH 1174/1393] fix:: Implement graceful shutdown for Byzantine integration tests in tokio (#3535) * fix:: Implement graceful shutdown for Byzantine integration tests in tokio and async-std Signed-off-by: shamb0 * Refactor code: reviewed and removed duplicates Signed-off-by: shamb0 * Refactor code: reviewed and removed duplicates Signed-off-by: shamb0 --------- Signed-off-by: shamb0 --- hotshot/src/tasks/mod.rs | 172 +++++++++++++++++++++----- hotshot/src/types/handle.rs | 5 +- task/src/task.rs | 24 ++-- testing/tests/tests_1/test_success.rs | 3 - 4 files changed, 162 insertions(+), 42 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 12ab6c025c..ce9634140c 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -14,6 +14,10 @@ use async_broadcast::broadcast; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; +use futures::{ + future::{BoxFuture, FutureExt}, + stream, StreamExt, +}; use hotshot_task::task::Task; #[cfg(feature = "rewind")] use hotshot_task_impls::rewind::RewindTaskState; @@ -106,13 +110,15 @@ pub fn add_network_message_task< let network = Arc::clone(channel); let mut state = network_state.clone(); + let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); let task_handle = async_spawn(async move { - loop { + futures::pin_mut!(shutdown_signal); + + let recv_stream = stream::unfold((), |()| async { let decided_upgrade_certificate_lock = decided_upgrade_certificate.read().await.clone(); let msgs = match network.recv_msgs().await { Ok(msgs) => { let mut deserialized_messages = Vec::new(); - for msg in msgs { let deserialized_message = match VersionedMessage::deserialize( &msg, @@ -124,24 +130,42 @@ pub fn add_network_message_task< continue; } }; - deserialized_messages.push(deserialized_message); } - Messages(deserialized_messages) } Err(err) => { tracing::error!("failed to receive messages: {err}"); - - // return zero messages so we sleep and try again Messages(vec![]) } }; - if msgs.0.is_empty() { - // TODO: Stop sleeping here: https://github.com/EspressoSystems/HotShot/issues/2558 - async_sleep(Duration::from_millis(100)).await; - } else { - state.handle_messages(msgs.0).await; + Some((msgs, ())) + }); + + let fused_recv_stream = recv_stream.boxed().fuse(); + futures::pin_mut!(fused_recv_stream); + + loop { + futures::select! { + () = shutdown_signal => { + tracing::error!("Shutting down network message task"); + return; + } + msgs_option = fused_recv_stream.next() => { + if let Some(msgs) = msgs_option { + if msgs.0.is_empty() { + // TODO: Stop sleeping here: https://github.com/EspressoSystems/HotShot/issues/2558 + async_sleep(Duration::from_millis(100)).await; + } else { + state.handle_messages(msgs.0).await; + } + } else { + // Stream has ended, which shouldn't happen in this case. + // You might want to handle this situation, perhaps by breaking the loop or logging an error. + tracing::error!("Network message stream unexpectedly ended"); + return; + } + } } } }); @@ -212,6 +236,38 @@ pub async fn add_consensus_tasks>( handle.add_task(RewindTaskState::::create_from(&handle).await); } +/// Creates a monitor for shutdown events. +/// +/// # Returns +/// A `BoxFuture<'static, ()>` that resolves when a `HotShotEvent::Shutdown` is detected. +/// +/// # Usage +/// Use in `select!` macros or similar constructs for graceful shutdowns: +#[must_use] +pub fn create_shutdown_event_monitor>( + handle: &SystemContextHandle, +) -> BoxFuture<'static, ()> { + // Activate the cloned internal event stream + let mut event_stream = handle.internal_event_stream.1.activate_cloned(); + + // Create a future that completes when the `HotShotEvent::Shutdown` is received + async move { + loop { + match event_stream.recv_direct().await { + Ok(event) => { + if matches!(event.as_ref(), HotShotEvent::Shutdown) { + return; + } + } + Err(e) => { + tracing::error!("Shutdown event monitor channel recv error: {}", e); + } + } + } + } + .boxed() +} + #[async_trait] /// Trait for intercepting and modifying messages between the network and consensus layers. /// @@ -277,6 +333,7 @@ where } /// Add byzantine network tasks with the trait + #[allow(clippy::too_many_lines)] async fn add_network_tasks(&'static mut self, handle: &mut SystemContextHandle) { let state_in = Arc::new(RwLock::new(self)); let state_out = Arc::clone(&state_in); @@ -286,9 +343,9 @@ where // channel to the network task let (sender_to_network, network_task_receiver) = broadcast(EVENT_CHANNEL_SIZE); // channel from the network task - let (network_task_sender, mut receiver_from_network) = broadcast(EVENT_CHANNEL_SIZE); + let (network_task_sender, receiver_from_network) = broadcast(EVENT_CHANNEL_SIZE); // create a copy of the original receiver - let (original_sender, mut original_receiver) = ( + let (original_sender, original_receiver) = ( handle.internal_event_stream.0.clone(), handle.internal_event_stream.1.activate_cloned(), ); @@ -314,17 +371,47 @@ where // spawn a task to listen on the (original) internal event stream, // and broadcast the transformed events to the replacement event stream we just created. + let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); let send_handle = async_spawn(async move { - loop { - if let Ok(msg) = original_receiver.recv().await { - let mut state = state_out.write().await; + futures::pin_mut!(shutdown_signal); - let mut results = state.send_handler(&msg).await; + let recv_stream = stream::unfold(original_receiver, |mut recv| async move { + match recv.recv().await { + Ok(event) => Some((Ok(event), recv)), + Err(async_broadcast::RecvError::Closed) => None, + Err(e) => Some((Err(e), recv)), + } + }) + .boxed(); - results.reverse(); + let fused_recv_stream = recv_stream.fuse(); + futures::pin_mut!(fused_recv_stream); - while let Some(event) = results.pop() { - let _ = sender_to_network.broadcast(event.into()).await; + loop { + futures::select! { + () = shutdown_signal => { + tracing::error!("Shutting down relay send task"); + let _ = sender_to_network.broadcast(HotShotEvent::::Shutdown.into()).await; + return; + } + event = fused_recv_stream.next() => { + match event { + Some(Ok(msg)) => { + let mut state = state_out.write().await; + let mut results = state.send_handler(&msg).await; + results.reverse(); + while let Some(event) = results.pop() { + let _ = sender_to_network.broadcast(event.into()).await; + } + } + Some(Err(e)) => { + tracing::error!("Relay Task, send_handle, Error receiving event: {:?}", e); + } + None => { + tracing::info!("Relay Task, send_handle, Event stream closed"); + return; + } + } } } } @@ -332,17 +419,46 @@ where // spawn a task to listen on the newly created event stream, // and broadcast the transformed events to the original internal event stream + let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); let recv_handle = async_spawn(async move { - loop { - if let Ok(msg) = receiver_from_network.recv().await { - let mut state = state_in.write().await; - - let mut results = state.recv_handler(&msg).await; + futures::pin_mut!(shutdown_signal); + + let network_recv_stream = + stream::unfold(receiver_from_network, |mut recv| async move { + match recv.recv().await { + Ok(event) => Some((Ok(event), recv)), + Err(async_broadcast::RecvError::Closed) => None, + Err(e) => Some((Err(e), recv)), + } + }); - results.reverse(); + let fused_network_recv_stream = network_recv_stream.boxed().fuse(); + futures::pin_mut!(fused_network_recv_stream); - while let Some(event) = results.pop() { - let _ = original_sender.broadcast(event.into()).await; + loop { + futures::select! { + () = shutdown_signal => { + tracing::error!("Shutting down relay receive task"); + return; + } + event = fused_network_recv_stream.next() => { + match event { + Some(Ok(msg)) => { + let mut state = state_in.write().await; + let mut results = state.recv_handler(&msg).await; + results.reverse(); + while let Some(event) = results.pop() { + let _ = original_sender.broadcast(event.into()).await; + } + } + Some(Err(e)) => { + tracing::error!("Relay Task, recv_handle, Error receiving event from network: {:?}", e); + } + None => { + tracing::info!("Relay Task, recv_handle, Network event stream closed"); + return; + } + } } } } diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index dc7ae4c326..4587e145aa 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -176,12 +176,13 @@ impl + 'static> SystemContextHandl .broadcast_direct(Arc::new(HotShotEvent::Shutdown)) .await .inspect_err(|err| tracing::error!("Failed to send shutdown event: {err}")); - tracing::error!("Shutting down network tasks!"); - self.network_registry.shutdown().await; tracing::error!("Shutting down the network!"); self.hotshot.network.shut_down().await; + tracing::error!("Shutting down network tasks!"); + self.network_registry.shutdown().await; + tracing::error!("Shutting down consensus!"); self.consensus_registry.shutdown().await; } diff --git a/task/src/task.rs b/task/src/task.rs index 2c3603f81b..af195d1e27 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -174,16 +174,22 @@ impl NetworkTaskRegistry { } #[allow(clippy::unused_async)] - /// Shuts down all tasks in the registry, performing any associated cleanup. + /// Shuts down all tasks managed by this instance. + /// + /// This function waits for all tasks to complete before returning. + /// + /// # Panics + /// + /// When using the tokio executor, this function will panic if any of the + /// tasks being joined return an error. pub async fn shutdown(&mut self) { - let handles = &mut self.handles; - - while let Some(handle) = handles.pop() { - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] - handle.abort(); - } + let handles = std::mem::take(&mut self.handles); + #[cfg(async_executor_impl = "async-std")] + join_all(handles).await; + #[cfg(async_executor_impl = "tokio")] + try_join_all(handles) + .await + .expect("Failed to join all tasks during shutdown"); } /// Add a task to the registry diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index fb0fa12266..5f5276f19f 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -17,7 +17,6 @@ use hotshot_testing::{ test_builder::TestDescription, view_sync_task::ViewSyncTaskDescription, }; -#[cfg(async_executor_impl = "async-std")] use { hotshot::tasks::{BadProposalViewDos, DoubleProposeVote}, hotshot_testing::test_builder::Behaviour, @@ -42,7 +41,6 @@ cross_tests!( }, ); -#[cfg(async_executor_impl = "async-std")] cross_tests!( TestName: double_propose_vote, Impls: [MemoryImpl], @@ -68,7 +66,6 @@ cross_tests!( ); // Test where node 4 sends out the correct quorum proposal and additionally spams the network with an extra 99 malformed proposals -#[cfg(async_executor_impl = "async-std")] cross_tests!( TestName: multiple_bad_proposals, Impls: [MemoryImpl], From f6688f89c748abac438e73f475caec6e91412e35 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 15:25:23 +0000 Subject: [PATCH 1175/1393] Bump derive_more from 0.99.18 to 1.0.0 (#3552) --- builder-api/Cargo.toml | 6 +++--- examples/Cargo.toml | 2 +- hotshot/Cargo.toml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/builder-api/Cargo.toml b/builder-api/Cargo.toml index 97d171b3be..9ed37b292f 100644 --- a/builder-api/Cargo.toml +++ b/builder-api/Cargo.toml @@ -7,9 +7,9 @@ edition = "2021" [dependencies] async-trait = { workspace = true } -clap.workspace = true -derive_more = "0.99" -futures = "0.3" +clap = { workspace = true } +derive_more = { workspace = true } +futures = { workspace = true } hotshot-types = { path = "../types" } serde = { workspace = true } snafu = { workspace = true } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b51ccc857d..479b0422bb 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -104,7 +104,7 @@ serde = { workspace = true, features = ["rc"] } snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } -derive_more = "0.99" +derive_more = { workspace = true } portpicker = "0.1" lru = "0.12" hotshot-task = { path = "../task" } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 7bebae6c46..b17b060a73 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -37,7 +37,7 @@ clap = { workspace = true, optional = true } committable = { workspace = true } custom_debug = { workspace = true } dashmap = "6" -derive_more = "0.99" +derive_more = { workspace = true } either = { workspace = true } ethereum-types = { workspace = true } futures = { workspace = true } From 7381fc7773d4a470d65d8c9e9c64ab496ff739c4 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 9 Aug 2024 17:44:10 +0200 Subject: [PATCH 1176/1393] Don't skip fallback builder when solver fails (#3558) --- examples/infra/mod.rs | 4 ++-- hotshot/src/lib.rs | 4 ++-- hotshot/src/tasks/task_state.rs | 4 ++-- task-impls/src/transactions.rs | 16 +++++++++++----- testing/src/test_builder.rs | 3 +-- 5 files changed, 18 insertions(+), 13 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index cda136d090..3afdf5f090 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -404,8 +404,8 @@ pub trait RunDa< let marketplace_config = MarketplaceConfig { auction_results_provider: TestAuctionResultsProvider::::default().into(), - // TODO: we need to pass a valid generic builder url here somehow - generic_builder_url: url::Url::parse("http://localhost").unwrap(), + // TODO: we need to pass a valid fallback builder url here somehow + fallback_builder_url: url::Url::parse("http://localhost").unwrap(), }; SystemContext::init( diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 40cfea0091..7998efc9b1 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -83,8 +83,8 @@ pub const H_256: usize = 32; pub struct MarketplaceConfig> { /// auction results provider pub auction_results_provider: Arc, - /// generic builder - pub generic_builder_url: Url, + /// fallback builder + pub fallback_builder_url: Url, } /// Bundle of all the memberships a consensus instance uses diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 6c3746b527..9d78dfa285 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -213,10 +213,10 @@ impl> CreateTaskState auction_results_provider: Arc::clone( &handle.hotshot.marketplace_config.auction_results_provider, ), - generic_builder_url: handle + fallback_builder_url: handle .hotshot .marketplace_config - .generic_builder_url + .fallback_builder_url .clone(), } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index ab6b3fb9c6..3aaa24b763 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -110,8 +110,8 @@ pub struct TransactionTaskState> { pub decided_upgrade_certificate: Arc>>>, /// auction results provider pub auction_results_provider: Arc, - /// generic builder url - pub generic_builder_url: Url, + /// fallback builder url + pub fallback_builder_url: Url, } impl> TransactionTaskState { @@ -276,17 +276,21 @@ impl> TransactionTaskState> TransactionTaskState { auction_results_provider: TestAuctionResultsProvider::::default().into(), - // TODO: we need to pass a valid generic builder url here somehow - generic_builder_url: Url::parse("http://localhost").unwrap(), + fallback_builder_url: Url::parse("http://localhost").unwrap(), }), }, metadata: self, From d47d8613b32146c8f8892f3805047cac5c860f52 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 9 Aug 2024 12:12:22 -0400 Subject: [PATCH 1177/1393] fix test (#3559) --- task-impls/src/quorum_vote/mod.rs | 1 + testing/tests/tests_1/quorum_vote_task.rs | 1 - .../tests/tests_1/vote_dependency_handle.rs | 66 +++++++------------ 3 files changed, 26 insertions(+), 42 deletions(-) diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 38117e0d3a..2cde7fc04e 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -238,6 +238,7 @@ impl + 'static> HandleDepOutput #[allow(clippy::too_many_lines)] async fn handle_dep_result(self, res: Self::Output) { let high_qc_view_number = self.consensus.read().await.high_qc().view_number; + // The validated state of a non-genesis high QC should exist in the state map. if *high_qc_view_number != *ViewNumber::genesis() && !self diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index d89789a482..b9643c3073 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -19,7 +19,6 @@ use hotshot_testing::{ predicates::event::all_predicates, random, script::{Expectations, InputOrder, TaskScript}, - serial, }; use hotshot_types::{ data::ViewNumber, traits::node_implementation::ConsensusTime, vote::HasViewNumber, diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 80f4e18192..b831c256bd 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -2,27 +2,20 @@ use std::time::Duration; +use async_broadcast::broadcast; use async_compatibility_layer::art::async_timeout; use futures::StreamExt; -use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; -use hotshot_macros::{run_test, test_scripts}; -use hotshot_task_impls::{ - events::HotShotEvent::*, - quorum_vote::{QuorumVoteTaskState, VoteDependencyHandle}, -}; +use hotshot_task::dependency_task::HandleDepOutput; +use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::VoteDependencyHandle}; use hotshot_testing::{ - all_predicates, - helpers::{build_fake_view_with_leaf, build_system_handle, vid_share}, + helpers::{build_fake_view_with_leaf, build_system_handle}, predicates::{event::*, Predicate, PredicateResult}, - random, - script::{Expectations, InputOrder, TaskScript}, - serial, view_generator::TestViewGenerator, }; use hotshot_types::{ - consensus::OuterConsensus, data::ViewNumber, traits::node_implementation::ConsensusTime, - vote::HasViewNumber, + consensus::OuterConsensus, data::ViewNumber, traits::consensus_api::ConsensusApi, + traits::node_implementation::ConsensusTime, vote::HasViewNumber, }; use itertools::Itertools; @@ -34,14 +27,11 @@ const TIMEOUT: Duration = Duration::from_millis(35); async fn test_vote_dependency_handle() { use std::sync::Arc; - use hotshot_task_impls::helpers::broadcast_event; - async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); // We use a node ID of 2 here abitrarily. We just need it to build the system handle. let node_id = 2; - // Construct the system handle for the node ID to build all of the state objects. let handle = build_system_handle::(node_id) .await @@ -58,7 +48,7 @@ async fn test_vote_dependency_handle() { let mut vids = Vec::new(); let consensus = handle.hotshot.consensus().clone(); let mut consensus_writer = consensus.write().await; - for view in (&mut generator).take(2).collect::>().await { + for view in (&mut generator).take(3).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaves.push(view.leaf.clone()); dacs.push(view.da_certificate.clone()); @@ -92,48 +82,42 @@ async fn test_vote_dependency_handle() { quorum_vote_send(), ]; - // We only need this to be able to make the vote dependency handle state. It's not explicitly necessary, but it's easy. - let qv = QuorumVoteTaskState::::create_from(&handle).await; - - let event_sender = handle.internal_event_stream_sender(); - let mut event_receiver = handle.internal_event_stream_receiver_known_impl(); + let (event_sender, mut event_receiver) = broadcast(1024); let view_number = ViewNumber::new(node_id); let vote_dependency_handle_state = VoteDependencyHandle:: { - public_key: qv.public_key.clone(), - private_key: qv.private_key.clone(), - consensus: OuterConsensus::new(Arc::clone(&qv.consensus.inner_consensus)), - instance_state: Arc::clone(&qv.instance_state), - quorum_membership: Arc::clone(&qv.quorum_membership), - storage: Arc::clone(&qv.storage), + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + consensus: OuterConsensus::new(consensus.clone()), + instance_state: handle.hotshot.instance_state(), + quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + storage: Arc::clone(&handle.storage()), view_number, sender: event_sender.clone(), receiver: event_receiver.clone(), - decided_upgrade_certificate: Arc::clone(&qv.decided_upgrade_certificate), - id: qv.id, + decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), + id: handle.hotshot.id, }; - let inputs_len = inputs.len(); - for event in inputs.into_iter() { - broadcast_event(event.into(), &event_sender).await; - } + vote_dependency_handle_state + .handle_dep_result(inputs.clone().into_iter().map(|i| i.into()).collect()) + .await; // We need to avoid re-processing the inputs during our output evaluation. This part here is not // strictly necessary, but it makes writing the outputs easier. - let mut i = 0; let mut output_events = vec![]; while let Ok(Ok(received_output)) = async_timeout(TIMEOUT, event_receiver.recv_direct()).await { - // Skip over all inputs (the order is deterministic). - if i < inputs_len { - i += 1; - continue; - } - output_events.push(received_output); } + assert_eq!( + output_events.len(), + outputs.len(), + "Output event count differs from expected" + ); + // Finally, evaluate that the test does what we expected. The control flow of the handle always // outputs in the same order. for (check, real) in outputs.into_iter().zip(output_events) { From a260d252f2e243724721659136b7cbee3e4ab3c7 Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Mon, 12 Aug 2024 11:38:35 -0400 Subject: [PATCH 1178/1393] Adding delays to trait functions the sequencer implements (#3556) * framework for adding delay options * add delay and write test * refactoring * add support to set delay times * remove async guard * run linter * add default implementation for handle_async_delay * update assert log * fix formatting --- example-types/src/block_types.rs | 27 +++++- example-types/src/lib.rs | 3 + example-types/src/state_types.rs | 35 +++++-- example-types/src/storage_types.rs | 20 ++++ example-types/src/testable_delay.rs | 132 ++++++++++++++++++++++++++ examples/infra/mod.rs | 7 +- testing/src/helpers.rs | 8 +- testing/src/spinning_task.rs | 9 +- testing/src/test_builder.rs | 24 +++-- testing/src/test_runner.rs | 22 +++-- testing/src/view_generator.rs | 6 +- testing/tests/tests_1/test_success.rs | 71 ++++++++++++++ testing/tests/tests_1/vid_task.rs | 2 +- 13 files changed, 328 insertions(+), 38 deletions(-) create mode 100644 example-types/src/testable_delay.rs diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 46450affe9..556312a05f 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -31,6 +31,7 @@ use vbs::version::Version; use crate::{ node_types::TestTypes, state_types::{TestInstanceState, TestValidatedState}, + testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}, }; /// The transaction in a [`TestBlockPayload`]. @@ -268,14 +269,19 @@ impl TestBlockHeader { } } -impl> BlockHeader - for TestBlockHeader +impl< + TYPES: NodeType< + BlockHeader = Self, + BlockPayload = TestBlockPayload, + InstanceState = TestInstanceState, + >, + > BlockHeader for TestBlockHeader { type Error = std::convert::Infallible; async fn new_legacy( _parent_state: &TYPES::ValidatedState, - _instance_state: &>::Instance, + instance_state: &>::Instance, parent_leaf: &Leaf, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, @@ -284,6 +290,7 @@ impl> Block _vid_common: VidCommon, _version: Version, ) -> Result { + Self::run_delay_settings_from_config(&instance_state.delay_config).await; Ok(Self::new( parent_leaf, payload_commitment, @@ -293,7 +300,7 @@ impl> Block async fn new_marketplace( _parent_state: &TYPES::ValidatedState, - _instance_state: &>::Instance, + instance_state: &>::Instance, parent_leaf: &Leaf, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, @@ -303,6 +310,7 @@ impl> Block _auction_results: Option, _version: Version, ) -> Result { + Self::run_delay_settings_from_config(&instance_state.delay_config).await; Ok(Self::new( parent_leaf, payload_commitment, @@ -365,3 +373,14 @@ impl Committable for TestBlockHeader { "TEST_HEADER".to_string() } } + +#[async_trait] +impl TestableDelay for TestBlockHeader { + async fn run_delay_settings_from_config(delay_config: &DelayConfig) { + if let Some(settings) = + delay_config.get_setting(&SupportedTraitTypesForAsyncDelay::BlockHeader) + { + Self::handle_async_delay(settings).await; + } + } +} diff --git a/example-types/src/lib.rs b/example-types/src/lib.rs index 95622b7c21..7815506794 100644 --- a/example-types/src/lib.rs +++ b/example-types/src/lib.rs @@ -18,3 +18,6 @@ pub mod storage_types; /// auction types for solver-to-hotshot interactions pub mod auction_results_provider_types; + +/// add a delay to async functions +pub mod testable_delay; diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 8563dbd3fa..d005084d0f 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -5,8 +5,7 @@ // along with the HotShot repository. If not, see . //! Implementations for examples and tests only -use std::fmt::Debug; - +use async_trait::async_trait; use committable::{Commitment, Committable}; use hotshot_types::{ data::{fake_commitment, BlockError, Leaf, ViewNumber}, @@ -20,17 +19,29 @@ use hotshot_types::{ }; use rand::Rng; use serde::{Deserialize, Serialize}; +use std::fmt::Debug; use vbs::version::Version; -use crate::block_types::{TestBlockPayload, TestTransaction}; pub use crate::node_types::TestTypes; +use crate::{ + block_types::{TestBlockPayload, TestTransaction}, + testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}, +}; /// Instance-level state implementation for testing purposes. -#[derive(Clone, Copy, Debug, Default)] -pub struct TestInstanceState {} +#[derive(Clone, Debug, Default)] +pub struct TestInstanceState { + pub delay_config: DelayConfig, +} impl InstanceState for TestInstanceState {} +impl TestInstanceState { + pub fn new(delay_config: DelayConfig) -> Self { + TestInstanceState { delay_config } + } +} + /// Application-specific state delta implementation for testing purposes. #[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct TestStateDelta {} @@ -68,6 +79,17 @@ impl Default for TestValidatedState { } } +#[async_trait] +impl TestableDelay for TestValidatedState { + async fn run_delay_settings_from_config(delay_config: &DelayConfig) { + if let Some(settings) = + delay_config.get_setting(&SupportedTraitTypesForAsyncDelay::ValidatedState) + { + Self::handle_async_delay(settings).await; + } + } +} + impl ValidatedState for TestValidatedState { type Error = BlockError; @@ -79,12 +101,13 @@ impl ValidatedState for TestValidatedState { async fn validate_and_apply_header( &self, - _instance: &Self::Instance, + instance: &Self::Instance, _parent_leaf: &Leaf, _proposed_header: &TYPES::BlockHeader, _vid_common: VidCommon, _version: Version, ) -> Result<(Self, Self::Delta), Self::Error> { + Self::run_delay_settings_from_config(&instance.delay_config).await; Ok(( TestValidatedState { block_height: self.block_height + 1, diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 024bfe1a89..e6be6ab756 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -22,6 +22,8 @@ use hotshot_types::{ vote::HasViewNumber, }; +use crate::testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}; + type VidShares = HashMap< ::Time, HashMap<::SignatureKey, Proposal>>, @@ -51,6 +53,7 @@ pub struct TestStorage { inner: Arc>>, /// `should_return_err` is a testing utility to validate negative cases. pub should_return_err: bool, + pub delay_config: DelayConfig, } impl Default for TestStorage { @@ -58,6 +61,17 @@ impl Default for TestStorage { Self { inner: Arc::new(RwLock::new(TestStorageState::default())), should_return_err: false, + delay_config: DelayConfig::default(), + } + } +} + +#[async_trait] +impl TestableDelay for TestStorage { + async fn run_delay_settings_from_config(delay_config: &DelayConfig) { + if let Some(settings) = delay_config.get_setting(&SupportedTraitTypesForAsyncDelay::Storage) + { + Self::handle_async_delay(settings).await; } } } @@ -79,6 +93,7 @@ impl Storage for TestStorage { if self.should_return_err { bail!("Failed to append VID proposal to storage"); } + Self::run_delay_settings_from_config(&self.delay_config).await; let mut inner = self.inner.write().await; inner .vids @@ -92,6 +107,7 @@ impl Storage for TestStorage { if self.should_return_err { bail!("Failed to append VID proposal to storage"); } + Self::run_delay_settings_from_config(&self.delay_config).await; let mut inner = self.inner.write().await; inner .das @@ -105,6 +121,7 @@ impl Storage for TestStorage { if self.should_return_err { bail!("Failed to append VID proposal to storage"); } + Self::run_delay_settings_from_config(&self.delay_config).await; let mut inner = self.inner.write().await; inner .proposals @@ -120,6 +137,7 @@ impl Storage for TestStorage { if self.should_return_err { bail!("Failed to append Action to storage"); } + Self::run_delay_settings_from_config(&self.delay_config).await; Ok(()) } @@ -130,6 +148,7 @@ impl Storage for TestStorage { if self.should_return_err { bail!("Failed to update high qc to storage"); } + Self::run_delay_settings_from_config(&self.delay_config).await; let mut inner = self.inner.write().await; if let Some(ref current_high_qc) = inner.high_qc { if new_high_qc.view_number() > current_high_qc.view_number() { @@ -148,6 +167,7 @@ impl Storage for TestStorage { if self.should_return_err { bail!("Failed to update high qc to storage"); } + Self::run_delay_settings_from_config(&self.delay_config).await; Ok(()) } } diff --git a/example-types/src/testable_delay.rs b/example-types/src/testable_delay.rs new file mode 100644 index 0000000000..98bb6c7d6b --- /dev/null +++ b/example-types/src/testable_delay.rs @@ -0,0 +1,132 @@ +use std::{collections::HashMap, time::Duration}; + +use async_compatibility_layer::art::async_sleep; +use async_trait::async_trait; +use rand::Rng; + +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +/// What type of delay we want to apply to +pub enum DelayOptions { + None, + Random, + Fixed, +} + +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +/// Current implementations that are supported for testing async delays +pub enum SupportedTraitTypesForAsyncDelay { + Storage, + ValidatedState, + BlockHeader, +} + +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +/// Config for each supported type +pub struct DelaySettings { + // Option to tell the async function what to do + pub delay_option: DelayOptions, + // Rng min time + pub min_time_in_milliseconds: u64, + // Rng max time + pub max_time_in_milliseconds: u64, + // Fixed time for fixed delay option + pub fixed_time_in_milliseconds: u64, +} + +impl Default for DelaySettings { + fn default() -> Self { + DelaySettings { + delay_option: DelayOptions::None, + min_time_in_milliseconds: 0, + max_time_in_milliseconds: 0, + fixed_time_in_milliseconds: 0, + } + } +} + +#[derive(Eq, PartialEq, Debug, Clone, Default)] +/// Settings for each type +pub struct DelayConfig { + config: HashMap, +} + +impl DelayConfig { + pub fn new(config: HashMap) -> Self { + DelayConfig { config } + } + + pub fn add_settings_for_all_types(&mut self, settings: DelaySettings) { + let iterator = SupportedTraitTypesForAsyncDelayIterator::new(); + + for supported_type in iterator { + self.config.insert(supported_type, settings.clone()); + } + } + + pub fn add_setting( + &mut self, + supported_type: SupportedTraitTypesForAsyncDelay, + settings: &DelaySettings, + ) { + self.config.insert(supported_type, settings.clone()); + } + + pub fn get_setting( + &self, + supported_type: &SupportedTraitTypesForAsyncDelay, + ) -> Option<&DelaySettings> { + self.config.get(supported_type) + } +} + +#[async_trait] +/// Implement this method to add some delay to async call +pub trait TestableDelay { + /// Add a delay from settings + async fn handle_async_delay(settings: &DelaySettings) { + match settings.delay_option { + DelayOptions::None => {} + DelayOptions::Fixed => { + async_sleep(Duration::from_millis(settings.fixed_time_in_milliseconds)).await; + } + DelayOptions::Random => { + let sleep_in_millis = rand::thread_rng().gen_range( + settings.min_time_in_milliseconds..=settings.max_time_in_milliseconds, + ); + async_sleep(Duration::from_millis(sleep_in_millis)).await; + } + } + } + + /// Look for settings in the config and run it + async fn run_delay_settings_from_config(delay_config: &DelayConfig); +} + +/// Iterator to iterate over enum +struct SupportedTraitTypesForAsyncDelayIterator { + index: usize, +} + +impl SupportedTraitTypesForAsyncDelayIterator { + fn new() -> Self { + SupportedTraitTypesForAsyncDelayIterator { index: 0 } + } +} + +impl Iterator for SupportedTraitTypesForAsyncDelayIterator { + type Item = SupportedTraitTypesForAsyncDelay; + + fn next(&mut self) -> Option { + let supported_type = match self.index { + 0 => Some(SupportedTraitTypesForAsyncDelay::Storage), + 1 => Some(SupportedTraitTypesForAsyncDelay::ValidatedState), + 2 => Some(SupportedTraitTypesForAsyncDelay::BlockHeader), + _ => { + assert_eq!(self.index, 3, "Need to ensure that newly added or removed `SupportedTraitTypesForAsyncDelay` enum is handled in iterator"); + return None; + } + }; + self.index += 1; + supported_type + } +} diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 3afdf5f090..812168bb2c 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -365,9 +365,10 @@ pub trait RunDa< /// get the anchored view /// Note: sequencing leaf does not have state, so does not return state async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { - let initializer = hotshot::HotShotInitializer::::from_genesis(TestInstanceState {}) - .await - .expect("Couldn't generate genesis block"); + let initializer = + hotshot::HotShotInitializer::::from_genesis(TestInstanceState::default()) + .await + .expect("Couldn't generate genesis block"); let config = self.config(); diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 5089c6d61e..dfb5781d64 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -72,9 +72,11 @@ pub async fn build_system_handle< let marketplace_config = (launcher.resource_generator.marketplace_config)(node_id); let config = launcher.resource_generator.config.clone(); - let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}) - .await - .unwrap(); + let initializer = HotShotInitializer::::from_genesis(TestInstanceState::new( + launcher.metadata.async_delay_config, + )) + .await + .unwrap(); let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = config.my_own_validator_config.private_key.clone(); diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 0f7eff831e..72611d44d9 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -18,6 +18,7 @@ use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, + testable_delay::DelayConfig, }; use hotshot_types::{ data::Leaf, @@ -58,6 +59,8 @@ pub struct SpinningTask> { pub(crate) last_decided_leaf: Leaf, /// Highest qc seen in the test for restarting nodes pub(crate) high_qc: QuorumCertificate, + /// Add specified delay to async calls + pub(crate) async_delay_config: DelayConfig, } #[async_trait] @@ -128,7 +131,7 @@ where let initializer = HotShotInitializer::::from_reload( self.last_decided_leaf.clone(), - TestInstanceState {}, + TestInstanceState::new(self.async_delay_config.clone()), None, view_number, BTreeMap::new(), @@ -205,14 +208,14 @@ where let read_storage = storage.read().await; let initializer = HotShotInitializer::::from_reload( self.last_decided_leaf.clone(), - TestInstanceState {}, + TestInstanceState::new(self.async_delay_config.clone()), None, view_number, read_storage.proposals_cloned().await, read_storage.high_qc_cloned().await.unwrap_or( QuorumCertificate::genesis( &TestValidatedState::default(), - &TestInstanceState {}, + &TestInstanceState::default(), ) .await, ), diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index d90eb6e6c7..bb82847592 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -14,7 +14,7 @@ use hotshot::{ }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, state_types::TestInstanceState, - storage_types::TestStorage, + storage_types::TestStorage, testable_delay::DelayConfig, }; use hotshot_types::{ consensus::ConsensusMetricsValue, traits::node_implementation::NodeType, ExecutionType, @@ -92,6 +92,8 @@ pub struct TestDescription> { pub solver: FakeSolverApiDescription, /// nodes with byzantine behaviour pub behaviour: Rc Behaviour>, + /// Delay config if any to add delays to asynchronous calls + pub async_delay_config: DelayConfig, } #[derive(Debug)] @@ -105,7 +107,7 @@ pub async fn create_test_handle< TYPES: NodeType, I: NodeImplementation, >( - behaviour: Behaviour, + metadata: TestDescription, node_id: u64, network: Network, memberships: Memberships, @@ -113,9 +115,11 @@ pub async fn create_test_handle< storage: I::Storage, marketplace_config: MarketplaceConfig, ) -> SystemContextHandle { - let initializer = HotShotInitializer::::from_genesis(TestInstanceState {}) - .await - .unwrap(); + let initializer = HotShotInitializer::::from_genesis(TestInstanceState::new( + metadata.async_delay_config, + )) + .await + .unwrap(); // See whether or not we should be DA let is_da = node_id < config.da_staked_committee_size as u64; @@ -127,6 +131,7 @@ pub async fn create_test_handle< let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); + let behaviour = (metadata.behaviour)(node_id); match behaviour { Behaviour::ByzantineTwins(state) => { let state = Box::leak(state); @@ -367,6 +372,7 @@ impl> Default for TestDescription< error_pct: 0.1, }, behaviour: Rc::new(|_| Behaviour::Standard), + async_delay_config: DelayConfig::default(), } } } @@ -477,6 +483,7 @@ where a.view_sync_timeout = view_sync_timeout; }; + let metadata = self.clone(); TestLauncher { resource_generator: ResourceGenerators { channel_generator: >::gen_networks( @@ -486,7 +493,12 @@ where unreliable_network, secondary_network_delay, ), - storage: Box::new(|_| TestStorage::::default()), + storage: Box::new(move |_| { + let mut storage = TestStorage::::default(); + // update storage impl to use settings delay option + storage.delay_config = metadata.async_delay_config.clone(); + storage + }), config, marketplace_config: Box::new(|_| MarketplaceConfig:: { auction_results_provider: TestAuctionResultsProvider::::default().into(), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 2d74d8db86..06b9c87579 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -178,13 +178,17 @@ where late_start, latest_view: None, changes, - last_decided_leaf: Leaf::genesis(&TestValidatedState::default(), &TestInstanceState {}) - .await, + last_decided_leaf: Leaf::genesis( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await, high_qc: QuorumCertificate::genesis( &TestValidatedState::default(), - &TestInstanceState {}, + &TestInstanceState::default(), ) .await, + async_delay_config: self.launcher.metadata.async_delay_config, }; let spinning_task = TestTask::>::new( spinning_task_state, @@ -470,10 +474,11 @@ where }, ); } else { - let initializer = - HotShotInitializer::::from_genesis(TestInstanceState {}) - .await - .unwrap(); + let initializer = HotShotInitializer::::from_genesis( + TestInstanceState::new(self.launcher.metadata.async_delay_config.clone()), + ) + .await + .unwrap(); // See whether or not we should be DA let is_da = node_id < config.da_staked_committee_size as u64; @@ -536,9 +541,8 @@ where for (node_id, network, memberships, config, storage, marketplace_config) in uninitialized_nodes { - let behaviour = (self.launcher.metadata.behaviour)(node_id); let handle = create_test_handle( - behaviour, + self.launcher.metadata.clone(), node_id, network.clone(), memberships, diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 6fe5b77a7b..70d858a01a 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -81,7 +81,7 @@ impl TestView { >::from_transactions( transactions.clone(), &TestValidatedState::default(), - &TestInstanceState {}, + &TestInstanceState::default(), ) .await .unwrap(); @@ -125,7 +125,7 @@ impl TestView { view_number: genesis_view, justify_qc: QuorumCertificate::genesis( &TestValidatedState::default(), - &TestInstanceState {}, + &TestInstanceState::default(), ) .await, upgrade_certificate: None, @@ -215,7 +215,7 @@ impl TestView { >::from_transactions( transactions.clone(), &TestValidatedState::default(), - &TestInstanceState {}, + &TestInstanceState::default(), ) .await .unwrap(); diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 5f5276f19f..07c1f72697 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -9,6 +9,7 @@ use std::time::Duration; use hotshot_example_types::{ node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes}, state_types::TestTypes, + testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; use hotshot_macros::cross_tests; use hotshot_testing::{ @@ -41,6 +42,76 @@ cross_tests!( }, ); +cross_tests!( + TestName: test_success_with_async_delay, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + ..TestDescription::default() + }; + + metadata.overall_safety_properties.num_failed_views = 0; + metadata.overall_safety_properties.num_successful_views = 10; + let mut config = DelayConfig::default(); + let delay_settings = DelaySettings { + delay_option: DelayOptions::Random, + min_time_in_milliseconds: 10, + max_time_in_milliseconds: 100, + fixed_time_in_milliseconds: 0, + }; + config.add_settings_for_all_types(delay_settings); + metadata.async_delay_config = config; + metadata + }, +); + +cross_tests!( + TestName: test_success_with_async_delay_2, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Ignore: false, + Metadata: { + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + ..TestDescription::default() + }; + + metadata.overall_safety_properties.num_failed_views = 0; + metadata.overall_safety_properties.num_successful_views = 10; + let mut config = DelayConfig::default(); + let mut delay_settings = DelaySettings { + delay_option: DelayOptions::Random, + min_time_in_milliseconds: 10, + max_time_in_milliseconds: 100, + fixed_time_in_milliseconds: 15, + }; + config.add_setting(SupportedTraitTypesForAsyncDelay::Storage, &delay_settings); + + delay_settings.delay_option = DelayOptions::Fixed; + config.add_setting(SupportedTraitTypesForAsyncDelay::BlockHeader, &delay_settings); + + delay_settings.delay_option = DelayOptions::Random; + delay_settings.min_time_in_milliseconds = 5; + delay_settings.max_time_in_milliseconds = 20; + config.add_setting(SupportedTraitTypesForAsyncDelay::ValidatedState, &delay_settings); + metadata.async_delay_config = config; + metadata + }, +); + cross_tests!( TestName: double_propose_vote, Impls: [MemoryImpl], diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 20852633d4..131cfe48e0 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -55,7 +55,7 @@ async fn test_vid_task() { let (payload, metadata) = >::from_transactions( transactions.clone(), &TestValidatedState::default(), - &TestInstanceState {}, + &TestInstanceState::default(), ) .await .unwrap(); From 6d0af6d45f17793a3b60dc99e4792e3b20492541 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 12 Aug 2024 17:30:09 -0400 Subject: [PATCH 1179/1393] Separate versioning from `NodeType` (#3555) --- example-types/src/node_types.rs | 28 +-- example-types/src/state_types.rs | 3 +- examples/combined/all.rs | 16 +- examples/combined/multi-validator.rs | 4 +- examples/combined/validator.rs | 4 +- examples/infra/mod.rs | 36 ++-- examples/libp2p/all.rs | 16 +- examples/libp2p/multi-validator.rs | 4 +- examples/libp2p/validator.rs | 4 +- examples/push-cdn/all.rs | 16 +- examples/push-cdn/multi-validator.rs | 4 +- examples/push-cdn/validator.rs | 4 +- hotshot/src/lib.rs | 78 ++++---- hotshot/src/tasks/mod.rs | 81 ++++---- hotshot/src/tasks/task_state.rs | 137 ++++++------- hotshot/src/types/handle.rs | 14 +- macros/src/lib.rs | 62 ++++-- task-impls/src/consensus/handlers.rs | 70 ++++--- task-impls/src/consensus/mod.rs | 32 ++-- task-impls/src/consensus2/handlers.rs | 37 +++- task-impls/src/consensus2/mod.rs | 17 +- task-impls/src/lib.rs | 1 - task-impls/src/network.rs | 34 ++-- task-impls/src/quorum_proposal/handlers.rs | 23 +-- task-impls/src/quorum_proposal/mod.rs | 19 +- .../src/quorum_proposal_recv/handlers.rs | 15 +- task-impls/src/quorum_proposal_recv/mod.rs | 17 +- task-impls/src/quorum_vote/handlers.rs | 12 +- task-impls/src/quorum_vote/mod.rs | 41 ++-- task-impls/src/rewind.rs | 6 +- task-impls/src/transactions.rs | 54 ++---- task-impls/src/upgrade.rs | 36 ++-- testing/src/block_builder/mod.rs | 2 +- testing/src/block_builder/simple.rs | 2 +- testing/src/completion_task.rs | 10 +- testing/src/helpers.rs | 11 +- testing/src/overall_safety_task.rs | 14 +- .../src/predicates/upgrade_with_consensus.rs | 11 +- .../src/predicates/upgrade_with_proposal.rs | 13 +- testing/src/predicates/upgrade_with_vote.rs | 14 +- testing/src/spinning_task.rs | 13 +- testing/src/test_builder.rs | 41 ++-- testing/src/test_launcher.rs | 12 +- testing/src/test_runner.rs | 32 ++-- testing/src/test_task.rs | 12 +- testing/src/txn_task.rs | 8 +- testing/src/view_generator.rs | 8 +- testing/tests/tests_1/block_builder.rs | 8 +- testing/tests/tests_1/consensus_task.rs | 62 ++++-- testing/tests/tests_1/da_task.rs | 24 ++- testing/tests/tests_1/libp2p.rs | 9 +- testing/tests/tests_1/network_task.rs | 19 +- .../tests_1/quorum_proposal_recv_task.rs | 18 +- testing/tests/tests_1/quorum_proposal_task.rs | 78 ++++---- testing/tests/tests_1/quorum_vote_task.rs | 20 +- testing/tests/tests_1/test_success.rs | 18 +- testing/tests/tests_1/test_with_failures_2.rs | 5 +- testing/tests/tests_1/transaction_task.rs | 32 ++-- .../tests_1/upgrade_task_with_consensus.rs | 46 +++-- .../tests_1/upgrade_task_with_proposal.rs | 25 ++- .../tests/tests_1/upgrade_task_with_vote.rs | 9 +- testing/tests/tests_1/vid_task.rs | 17 +- testing/tests/tests_1/view_sync_task.rs | 6 +- .../tests/tests_1/vote_dependency_handle.rs | 37 ++-- testing/tests/tests_2/catchup.rs | 30 +-- testing/tests/tests_2/push_cdn.rs | 4 +- .../tests/tests_2/test_with_failures_one.rs | 3 +- testing/tests/tests_3/memory_network.rs | 34 ++-- .../tests_3/test_with_builder_failures.rs | 3 +- .../tests_3/test_with_failures_half_f.rs | 3 +- testing/tests/tests_4/test_with_failures_f.rs | 3 +- testing/tests/tests_5/combined_network.rs | 12 +- testing/tests/tests_5/timeout.rs | 8 +- testing/tests/tests_5/unreliable_network.rs | 24 +-- types/src/constants.rs | 23 --- types/src/data.rs | 10 +- types/src/message.rs | 180 +++++++++++------- types/src/simple_certificate.rs | 29 +-- types/src/traits/node_implementation.rs | 24 ++- 79 files changed, 1082 insertions(+), 869 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index de74d12b3a..a3a657acaf 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -15,7 +15,7 @@ use hotshot::traits::{ use hotshot_types::{ data::ViewNumber, signature_key::{BLSPubKey, BuilderKey}, - traits::node_implementation::NodeType, + traits::node_implementation::{NodeType, Versions}, }; use serde::{Deserialize, Serialize}; use vbs::version::StaticVersion; @@ -45,12 +45,6 @@ use crate::{ pub struct TestTypes; impl NodeType for TestTypes { type AuctionResult = TestAuctionResult; - type Base = StaticVersion<0, 1>; - type Upgrade = StaticVersion<0, 2>; - const UPGRADE_HASH: [u8; 32] = [ - 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, - 0, 0, - ]; type Time = ViewNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; @@ -80,12 +74,6 @@ impl NodeType for TestTypes { pub struct TestConsecutiveLeaderTypes; impl NodeType for TestConsecutiveLeaderTypes { type AuctionResult = TestAuctionResult; - type Base = StaticVersion<0, 1>; - type Upgrade = StaticVersion<0, 2>; - const UPGRADE_HASH: [u8; 32] = [ - 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, - 0, 0, - ]; type Time = ViewNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; @@ -144,3 +132,17 @@ impl NodeImplementation for Libp2pImpl { type Storage = TestStorage; type AuctionResultsProvider = TestAuctionResultsProvider; } + +#[derive(Clone, Debug, Copy)] +pub struct TestVersions {} + +impl Versions for TestVersions { + type Base = StaticVersion<0, 1>; + type Upgrade = StaticVersion<0, 2>; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, + ]; + + type Marketplace = StaticVersion<0, 3>; +} diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index d005084d0f..50c40fee73 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -5,6 +5,8 @@ // along with the HotShot repository. If not, see . //! Implementations for examples and tests only +use std::fmt::Debug; + use async_trait::async_trait; use committable::{Commitment, Committable}; use hotshot_types::{ @@ -19,7 +21,6 @@ use hotshot_types::{ }; use rand::Rng; use serde::{Deserialize, Serialize}; -use std::fmt::Debug; use vbs::version::Version; pub use crate::node_types::TestTypes; diff --git a/examples/combined/all.rs b/examples/combined/all.rs index bd69dc61c1..70363db3ec 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -20,7 +20,7 @@ use hotshot::{ traits::implementations::{KeyPair, TestingDef, WrappedSignatureKey}, types::SignatureKey, }; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; use infra::{gen_local_address, BUILDER_BASE_PORT, VALIDATOR_BASE_PORT}; @@ -146,12 +146,14 @@ async fn main() { let builder_address = gen_local_address::(i); let node = async_spawn(async move { - infra::main_entry_point::(ValidatorArgs { - url: orchestrator_url, - advertise_address: Some(advertise_address), - builder_address: Some(builder_address), - network_config_file: None, - }) + infra::main_entry_point::( + ValidatorArgs { + url: orchestrator_url, + advertise_address: Some(advertise_address), + builder_address: Some(builder_address), + network_config_file: None, + }, + ) .await; }); nodes.push(node); diff --git a/examples/combined/multi-validator.rs b/examples/combined/multi-validator.rs index 9b6e37f516..71ab7afd91 100644 --- a/examples/combined/multi-validator.rs +++ b/examples/combined/multi-validator.rs @@ -10,7 +10,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use tracing::instrument; @@ -36,7 +36,7 @@ async fn main() { let args = args.clone(); let node = async_spawn(async move { - infra::main_entry_point::( + infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) .await; diff --git a/examples/combined/validator.rs b/examples/combined/validator.rs index 9f97b7f8db..a73c80f70d 100644 --- a/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -9,7 +9,7 @@ use std::{net::SocketAddr, str::FromStr}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; use tracing::{debug, instrument}; @@ -42,5 +42,5 @@ async fn main() { ); debug!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::(args).await; + infra::main_entry_point::(args).await; } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 812168bb2c..9ca8cc127a 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -59,7 +59,7 @@ use hotshot_types::{ block_contents::{BlockHeader, TestableBlock}, election::Membership, network::{ConnectedNetwork, Topic}, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeType, Versions}, states::TestableState, }, HotShotConfig, PeerConfig, ValidatorConfig, @@ -347,6 +347,7 @@ pub trait RunDa< Storage = TestStorage, AuctionResultsProvider = TestAuctionResultsProvider, >, + V: Versions, > where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -364,7 +365,7 @@ pub trait RunDa< /// # Panics if it cannot generate a genesis block, fails to initialize HotShot, or cannot /// get the anchored view /// Note: sequencing leaf does not have state, so does not return state - async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { + async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { let initializer = hotshot::HotShotInitializer::::from_genesis(TestInstanceState::default()) .await @@ -430,7 +431,7 @@ pub trait RunDa< #[allow(clippy::too_many_lines)] async fn run_hotshot( &self, - context: SystemContextHandle, + context: SystemContextHandle, transactions: &mut Vec, transactions_to_send_per_round: u64, transaction_size_in_bytes: u64, @@ -619,7 +620,8 @@ impl< Storage = TestStorage, AuctionResultsProvider = TestAuctionResultsProvider, >, - > RunDa, NODE> for PushCdnDaRun + V: Versions, + > RunDa, NODE, V> for PushCdnDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -696,7 +698,8 @@ impl< Storage = TestStorage, AuctionResultsProvider = TestAuctionResultsProvider, >, - > RunDa, NODE> for Libp2pDaRun + V: Versions, + > RunDa, NODE, V> for Libp2pDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -782,7 +785,8 @@ impl< Storage = TestStorage, AuctionResultsProvider = TestAuctionResultsProvider, >, - > RunDa, NODE> for CombinedDaRun + V: Versions, + > RunDa, NODE, V> for CombinedDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -794,20 +798,21 @@ where libp2p_advertise_address: Option, ) -> CombinedDaRun { // Initialize our Libp2p network - let libp2p_network: Libp2pDaRun = as RunDa< - TYPES, - Libp2pNetwork, - Libp2pImpl, - >>::initialize_networking( - config.clone(), libp2p_advertise_address - ) - .await; + let libp2p_network: Libp2pDaRun = + as RunDa< + TYPES, + Libp2pNetwork, + Libp2pImpl, + V, + >>::initialize_networking(config.clone(), libp2p_advertise_address) + .await; // Initialize our CDN network let cdn_network: PushCdnDaRun = as RunDa< TYPES, PushCdnNetwork, PushCdnImpl, + V, >>::initialize_networking( config.clone(), libp2p_advertise_address ) @@ -852,7 +857,8 @@ pub async fn main_entry_point< Storage = TestStorage, AuctionResultsProvider = TestAuctionResultsProvider, >, - RUNDA: RunDa, + V: Versions, + RUNDA: RunDa, >( args: ValidatorArgs, ) where diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index 770e6ea91d..8f0e0a75a6 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -12,7 +12,7 @@ use async_compatibility_layer::{ art::async_spawn, logging::{setup_backtrace, setup_logging}, }; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use infra::{gen_local_address, BUILDER_BASE_PORT, VALIDATOR_BASE_PORT}; use tracing::instrument; @@ -51,12 +51,14 @@ async fn main() { let builder_address = gen_local_address::(i); let orchestrator_url = orchestrator_url.clone(); let node = async_spawn(async move { - infra::main_entry_point::(ValidatorArgs { - url: orchestrator_url, - advertise_address: Some(advertise_address), - builder_address: Some(builder_address), - network_config_file: None, - }) + infra::main_entry_point::( + ValidatorArgs { + url: orchestrator_url, + advertise_address: Some(advertise_address), + builder_address: Some(builder_address), + network_config_file: None, + }, + ) .await; }); nodes.push(node); diff --git a/examples/libp2p/multi-validator.rs b/examples/libp2p/multi-validator.rs index 7f55d9462a..42621d6946 100644 --- a/examples/libp2p/multi-validator.rs +++ b/examples/libp2p/multi-validator.rs @@ -10,7 +10,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use tracing::instrument; @@ -36,7 +36,7 @@ async fn main() { let args = args.clone(); let node = async_spawn(async move { - infra::main_entry_point::( + infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) .await; diff --git a/examples/libp2p/validator.rs b/examples/libp2p/validator.rs index 5948ebd107..41a02d6eb1 100644 --- a/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -9,7 +9,7 @@ use std::{net::SocketAddr, str::FromStr}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; use tracing::{debug, instrument}; @@ -41,5 +41,5 @@ async fn main() { ); debug!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::(args).await; + infra::main_entry_point::(args).await; } diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index b360170c21..6d97d34d9f 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -17,7 +17,7 @@ use hotshot::{ traits::implementations::{TestingDef, WrappedSignatureKey}, types::SignatureKey, }; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; use infra::{gen_local_address, BUILDER_BASE_PORT}; @@ -140,12 +140,14 @@ async fn main() { let orchestrator_url = orchestrator_url.clone(); let builder_address = gen_local_address::(i); let node = async_spawn(async move { - infra::main_entry_point::(ValidatorArgs { - url: orchestrator_url, - advertise_address: None, - builder_address: Some(builder_address), - network_config_file: None, - }) + infra::main_entry_point::( + ValidatorArgs { + url: orchestrator_url, + advertise_address: None, + builder_address: Some(builder_address), + network_config_file: None, + }, + ) .await; }); nodes.push(node); diff --git a/examples/push-cdn/multi-validator.rs b/examples/push-cdn/multi-validator.rs index 77520cdd25..b8070f8b1c 100644 --- a/examples/push-cdn/multi-validator.rs +++ b/examples/push-cdn/multi-validator.rs @@ -10,7 +10,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use clap::Parser; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; use tracing::instrument; @@ -36,7 +36,7 @@ async fn main() { let args = args.clone(); let node = async_spawn(async move { - infra::main_entry_point::( + infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) .await; diff --git a/examples/push-cdn/validator.rs b/examples/push-cdn/validator.rs index 5e6c0e67bc..70d53cdc33 100644 --- a/examples/push-cdn/validator.rs +++ b/examples/push-cdn/validator.rs @@ -7,7 +7,7 @@ //! A validator use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; -use hotshot_example_types::state_types::TestTypes; +use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use tracing::{debug, instrument}; @@ -28,5 +28,5 @@ async fn main() { setup_backtrace(); let args = ValidatorArgs::parse(); debug!("connecting to orchestrator at {:?}", args.url); - infra::main_entry_point::(args).await; + infra::main_entry_point::(args).await; } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 7998efc9b1..090a9687de 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -12,10 +12,12 @@ pub mod documentation; use futures::future::{select, Either}; -use hotshot_types::traits::network::BroadcastDelay; +use hotshot_types::{ + message::UpgradeLock, + traits::{network::BroadcastDelay, node_implementation::Versions}, +}; use rand::Rng; use url::Url; -use vbs::version::StaticVersionType; /// Contains traits consumed by [`SystemContext`] pub mod traits; @@ -47,8 +49,8 @@ use hotshot_types::{ constants::{EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE}, data::{Leaf, QuorumProposal}, event::{EventType, LeafInfo}, - message::{DataMessage, Message, MessageKind, Proposal, VersionedMessage}, - simple_certificate::{QuorumCertificate, UpgradeCertificate}, + message::{DataMessage, Message, MessageKind, Proposal}, + simple_certificate::QuorumCertificate, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -65,7 +67,6 @@ use hotshot_types::{ /// Reexport rand crate pub use rand; use tracing::{debug, instrument, trace}; -use vbs::version::Version; use crate::{ tasks::{add_consensus_tasks, add_network_tasks}, @@ -101,7 +102,7 @@ pub struct Memberships { } /// Holds the state needed to participate in `HotShot` consensus -pub struct SystemContext> { +pub struct SystemContext, V: Versions> { /// The public key of this node public_key: TYPES::SignatureKey, @@ -126,9 +127,6 @@ pub struct SystemContext> { /// Immutable instance state instance_state: Arc, - /// The network version - version: Arc>, - /// The view to enter when first starting consensus start_view: TYPES::Time, @@ -154,13 +152,15 @@ pub struct SystemContext> { /// Reference to the internal storage for consensus datum. pub storage: Arc>, - /// a potential upgrade certificate that has been decided on by the consensus tasks. - pub decided_upgrade_certificate: Arc>>>, + /// shared lock for upgrade information + pub upgrade_lock: UpgradeLock, /// Marketplace config for this instance of HotShot pub marketplace_config: MarketplaceConfig, } -impl> Clone for SystemContext { +impl, V: Versions> Clone + for SystemContext +{ #![allow(deprecated)] fn clone(&self) -> Self { Self { @@ -172,7 +172,6 @@ impl> Clone for SystemContext> Clone for SystemContext> SystemContext { +impl, V: Versions> SystemContext { #![allow(deprecated)] /// Creates a new [`Arc`] with the given configuration options. /// @@ -260,7 +259,7 @@ impl> SystemContext { let (internal_tx, internal_rx) = internal_channel; let (mut external_tx, mut external_rx) = external_channel; - let decided_upgrade_certificate = Arc::new(RwLock::new(None)); + let upgrade_lock = UpgradeLock::::new(); // Allow overflow on the channel, otherwise sending to it may block. external_rx.set_overflow(true); @@ -318,20 +317,18 @@ impl> SystemContext { ); let consensus = Arc::new(RwLock::new(consensus)); - let version = Arc::new(RwLock::new(TYPES::Base::VERSION)); // This makes it so we won't block on broadcasting if there is not a receiver // Our own copy of the receiver is inactive so it doesn't count. external_tx.set_await_active(false); - let inner: Arc> = Arc::new(SystemContext { + let inner: Arc> = Arc::new(SystemContext { id: nonce, consensus: OuterConsensus::new(consensus), instance_state: Arc::new(instance_state), public_key, private_key, config, - version, start_view: initializer.start_view, network, memberships: Arc::new(memberships), @@ -341,7 +338,7 @@ impl> SystemContext { external_event_stream: (external_tx, external_rx.deactivate()), anchored_leaf: anchored_leaf.clone(), storage: Arc::new(RwLock::new(storage)), - decided_upgrade_certificate, + upgrade_lock, marketplace_config, }); @@ -457,7 +454,6 @@ impl> SystemContext { pub async fn publish_transaction_async( &self, transaction: TYPES::Transaction, - decided_upgrade_certificate: Arc>>>, ) -> Result<(), HotShotError> { trace!("Adding transaction to our own queue"); @@ -472,10 +468,10 @@ impl> SystemContext { kind: MessageKind::from(message_kind), }; - let cert = decided_upgrade_certificate.read().await.clone(); - - let serialized_message = message - .serialize(&cert) + let serialized_message = self + .upgrade_lock + .serialize(&message) + .await .map_err(|_| HotShotError::FailedToSerialize)?; async_spawn(async move { @@ -584,7 +580,7 @@ impl> SystemContext { marketplace_config: MarketplaceConfig, ) -> Result< ( - SystemContextHandle, + SystemContextHandle, Sender>>, Receiver>>, ), @@ -614,11 +610,11 @@ impl> SystemContext { } } -impl> SystemContext { +impl, V: Versions> SystemContext { /// Spawn all tasks that operate on [`SystemContextHandle`]. /// /// For a list of which tasks are being spawned, see this module's documentation. - pub async fn run_tasks(&self) -> SystemContextHandle { + pub async fn run_tasks(&self) -> SystemContextHandle { let consensus_registry = ConsensusTaskRegistry::new(); let network_registry = NetworkTaskRegistry::new(); @@ -636,8 +632,8 @@ impl> SystemContext { memberships: Arc::clone(&self.memberships), }; - add_network_tasks::(&mut handle).await; - add_consensus_tasks::(&mut handle).await; + add_network_tasks::(&mut handle).await; + add_consensus_tasks::(&mut handle).await; handle } @@ -648,11 +644,12 @@ type Channel = (Sender>, Receiver>); #[async_trait] /// Trait for handling messages for a node with a twin copy of consensus -pub trait TwinsHandlerState +pub trait TwinsHandlerState where Self: std::fmt::Debug + Send + Sync, TYPES: NodeType, I: NodeImplementation, + V: Versions, { /// Handle a message sent to the twin from the network task, forwarding it to one of the two twins. async fn send_handler( @@ -742,7 +739,10 @@ where metrics: ConsensusMetricsValue, storage: I::Storage, marketplace_config: MarketplaceConfig, - ) -> (SystemContextHandle, SystemContextHandle) { + ) -> ( + SystemContextHandle, + SystemContextHandle, + ) { let left_system_context = SystemContext::new( public_key.clone(), private_key.clone(), @@ -822,8 +822,8 @@ where }; // add consensus tasks to each handle, using their individual internal event streams - add_consensus_tasks::(&mut left_handle).await; - add_consensus_tasks::(&mut right_handle).await; + add_consensus_tasks::(&mut left_handle).await; + add_consensus_tasks::(&mut right_handle).await; // fuse the event streams from both handles before initializing the network tasks let fused_internal_event_stream = self.fuse_channels( @@ -838,7 +838,7 @@ where ); // add the network tasks to the left handle. note: because the left handle has the fused event stream, the network tasks on the left handle will handle messages from both handles. - add_network_tasks::(&mut left_handle).await; + add_network_tasks::(&mut left_handle).await; // revert to the original event stream on the left handle, for any applications that want to listen to it left_handle.internal_event_stream = left_internal_event_stream.clone(); @@ -853,7 +853,7 @@ where pub struct RandomTwinsHandler; #[async_trait] -impl> TwinsHandlerState +impl, V: Versions> TwinsHandlerState for RandomTwinsHandler { async fn send_handler( @@ -885,7 +885,7 @@ impl> TwinsHandlerState pub struct DoubleTwinsHandler; #[async_trait] -impl> TwinsHandlerState +impl, V: Versions> TwinsHandlerState for DoubleTwinsHandler { async fn send_handler( @@ -906,8 +906,8 @@ impl> TwinsHandlerState } #[async_trait] -impl> ConsensusApi - for SystemContextHandle +impl, V: Versions> ConsensusApi + for SystemContextHandle { fn total_nodes(&self) -> NonZeroUsize { self.hotshot.config.num_nodes_with_stake diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index ce9634140c..d01facfadd 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -36,7 +36,7 @@ use hotshot_task_impls::{ use hotshot_types::{ constants::EVENT_CHANNEL_SIZE, data::QuorumProposal, - message::{Messages, Proposal, VersionedMessage}, + message::{Messages, Proposal}, traits::{ network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -47,7 +47,8 @@ use vbs::version::StaticVersionType; use crate::{ tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, - MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, + MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, UpgradeLock, + Versions, }; /// event for global event stream @@ -60,8 +61,12 @@ pub enum GlobalEvent { } /// Add tasks for network requests and responses -pub async fn add_request_network_task>( - handle: &mut SystemContextHandle, +pub async fn add_request_network_task< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + handle: &mut SystemContextHandle, ) { let state = NetworkRequestState::::create_from(handle).await; @@ -74,8 +79,8 @@ pub async fn add_request_network_task>( - handle: &mut SystemContextHandle, +pub fn add_response_task, V: Versions>( + handle: &mut SystemContextHandle, request_receiver: RequestReceiver, ) { let state = NetworkResponseState::::new( @@ -97,8 +102,9 @@ pub fn add_network_message_task< TYPES: NodeType, I: NodeImplementation, NET: ConnectedNetwork, + V: Versions, >( - handle: &mut SystemContextHandle, + handle: &mut SystemContextHandle, channel: &Arc, ) { let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { @@ -106,7 +112,7 @@ pub fn add_network_message_task< external_event_stream: handle.output_event_stream.0.clone(), }; - let decided_upgrade_certificate = Arc::clone(&handle.hotshot.decided_upgrade_certificate); + let upgrade_lock = handle.hotshot.upgrade_lock.clone(); let network = Arc::clone(channel); let mut state = network_state.clone(); @@ -115,15 +121,11 @@ pub fn add_network_message_task< futures::pin_mut!(shutdown_signal); let recv_stream = stream::unfold((), |()| async { - let decided_upgrade_certificate_lock = decided_upgrade_certificate.read().await.clone(); let msgs = match network.recv_msgs().await { Ok(msgs) => { let mut deserialized_messages = Vec::new(); for msg in msgs { - let deserialized_message = match VersionedMessage::deserialize( - &msg, - &decided_upgrade_certificate_lock, - ) { + let deserialized_message = match upgrade_lock.deserialize(&msg).await { Ok(deserialized) => deserialized, Err(e) => { tracing::error!("Failed to deserialize message: {}", e); @@ -176,20 +178,21 @@ pub fn add_network_message_task< pub fn add_network_event_task< TYPES: NodeType, I: NodeImplementation, + V: Versions, NET: ConnectedNetwork, >( - handle: &mut SystemContextHandle, + handle: &mut SystemContextHandle, channel: Arc, membership: TYPES::Membership, filter: fn(&Arc>) -> bool, ) { - let network_state: NetworkEventTaskState<_, _, _> = NetworkEventTaskState { + let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { channel, view: TYPES::Time::genesis(), membership, filter, storage: Arc::clone(&handle.storage()), - decided_upgrade_certificate: None, + upgrade_lock: UpgradeLock::new(), }; let task = Task::new( network_state, @@ -200,24 +203,24 @@ pub fn add_network_event_task< } /// Adds consensus-related tasks to a `SystemContextHandle`. -pub async fn add_consensus_tasks>( - handle: &mut SystemContextHandle, +pub async fn add_consensus_tasks, V: Versions>( + handle: &mut SystemContextHandle, ) { handle.add_task(ViewSyncTaskState::::create_from(handle).await); handle.add_task(VidTaskState::::create_from(handle).await); handle.add_task(DaTaskState::::create_from(handle).await); - handle.add_task(TransactionTaskState::::create_from(handle).await); + handle.add_task(TransactionTaskState::::create_from(handle).await); // only spawn the upgrade task if we are actually configured to perform an upgrade. - if TYPES::Base::VERSION < TYPES::Upgrade::VERSION { - handle.add_task(UpgradeTaskState::::create_from(handle).await); + if V::Base::VERSION < V::Upgrade::VERSION { + handle.add_task(UpgradeTaskState::::create_from(handle).await); } { #![cfg(not(feature = "dependency-tasks"))] use hotshot_task_impls::consensus::ConsensusTaskState; - handle.add_task(ConsensusTaskState::::create_from(handle).await); + handle.add_task(ConsensusTaskState::::create_from(handle).await); } { #![cfg(feature = "dependency-tasks")] @@ -226,10 +229,10 @@ pub async fn add_consensus_tasks>( quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, }; - handle.add_task(QuorumProposalTaskState::::create_from(handle).await); - handle.add_task(QuorumVoteTaskState::::create_from(handle).await); - handle.add_task(QuorumProposalRecvTaskState::::create_from(handle).await); - handle.add_task(Consensus2TaskState::::create_from(handle).await); + handle.add_task(QuorumProposalTaskState::::create_from(handle).await); + handle.add_task(QuorumVoteTaskState::::create_from(handle).await); + handle.add_task(QuorumProposalRecvTaskState::::create_from(handle).await); + handle.add_task(Consensus2TaskState::::create_from(handle).await); } #[cfg(feature = "rewind")] @@ -244,8 +247,8 @@ pub async fn add_consensus_tasks>( /// # Usage /// Use in `select!` macros or similar constructs for graceful shutdowns: #[must_use] -pub fn create_shutdown_event_monitor>( - handle: &SystemContextHandle, +pub fn create_shutdown_event_monitor, V: Versions>( + handle: &SystemContextHandle, ) -> BoxFuture<'static, ()> { // Activate the cloned internal event stream let mut event_stream = handle.internal_event_stream.1.activate_cloned(); @@ -272,7 +275,7 @@ pub fn create_shutdown_event_monitor [Byzantine logic layer] <-> Network -pub trait EventTransformerState> +pub trait EventTransformerState, V: Versions> where Self: std::fmt::Debug + Send + Sync + 'static, { @@ -296,7 +299,7 @@ where metrics: ConsensusMetricsValue, storage: I::Storage, marketplace_config: MarketplaceConfig, - ) -> SystemContextHandle { + ) -> SystemContextHandle { let hotshot = SystemContext::new( public_key, private_key, @@ -326,7 +329,7 @@ where memberships: Arc::clone(&hotshot.memberships), }; - add_consensus_tasks::(&mut handle).await; + add_consensus_tasks::(&mut handle).await; self.add_network_tasks(&mut handle).await; handle @@ -334,7 +337,7 @@ where /// Add byzantine network tasks with the trait #[allow(clippy::too_many_lines)] - async fn add_network_tasks(&'static mut self, handle: &mut SystemContextHandle) { + async fn add_network_tasks(&'static mut self, handle: &mut SystemContextHandle) { let state_in = Arc::new(RwLock::new(self)); let state_out = Arc::clone(&state_in); // channels between the task spawned in this function and the network tasks. @@ -362,7 +365,7 @@ where ); // spawn the network tasks with our newly-created channel - add_network_tasks::(handle).await; + add_network_tasks::(handle).await; std::mem::swap( &mut internal_event_stream, @@ -479,7 +482,7 @@ pub struct BadProposalViewDos { } #[async_trait] -impl> EventTransformerState +impl, V: Versions> EventTransformerState for BadProposalViewDos { async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { @@ -514,7 +517,7 @@ impl> EventTransformerState> EventTransformerState +impl, V: Versions> EventTransformerState for DoubleProposeVote { async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { @@ -585,8 +588,8 @@ impl> DishonestLeader { } #[async_trait] -impl + std::fmt::Debug> - EventTransformerState for DishonestLeader +impl + std::fmt::Debug, V: Versions> + EventTransformerState for DishonestLeader { async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { vec![event.clone()] @@ -608,8 +611,8 @@ impl + std::fmt::Debug> } /// adds tasks for sending/receiving messages to/from the network. -pub async fn add_network_tasks>( - handle: &mut SystemContextHandle, +pub async fn add_network_tasks, V: Versions>( + handle: &mut SystemContextHandle, ) { let network = Arc::clone(&handle.network); let quorum_membership = handle.memberships.quorum_membership.clone(); diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 9d78dfa285..6cd8e14077 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -11,19 +11,12 @@ use std::{ use async_trait::async_trait; use chrono::Utc; -#[cfg(not(feature = "dependency-tasks"))] -use hotshot_task_impls::consensus::ConsensusTaskState; -#[cfg(feature = "rewind")] -use hotshot_task_impls::rewind::RewindTaskState; use hotshot_task_impls::{ - builder::BuilderClient, da::DaTaskState, request::NetworkRequestState, - transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, - view_sync::ViewSyncTaskState, -}; -#[cfg(feature = "dependency-tasks")] -use hotshot_task_impls::{ - consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, + builder::BuilderClient, consensus::ConsensusTaskState, consensus2::Consensus2TaskState, + da::DaTaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, + request::NetworkRequestState, rewind::RewindTaskState, transactions::TransactionTaskState, + upgrade::UpgradeTaskState, vid::VidTaskState, view_sync::ViewSyncTaskState, }; use hotshot_types::{ consensus::OuterConsensus, @@ -33,25 +26,26 @@ use hotshot_types::{ }, }; -use crate::types::SystemContextHandle; +use crate::{types::SystemContextHandle, Versions}; /// Trait for creating task states. #[async_trait] -pub trait CreateTaskState +pub trait CreateTaskState where TYPES: NodeType, I: NodeImplementation, + V: Versions, { /// Function to create the task state from a given `SystemContextHandle`. - async fn create_from(handle: &SystemContextHandle) -> Self; + async fn create_from(handle: &SystemContextHandle) -> Self; } #[async_trait] -impl> CreateTaskState +impl, V: Versions> CreateTaskState for NetworkRequestState { - async fn create_from(handle: &SystemContextHandle) -> NetworkRequestState { - NetworkRequestState { + async fn create_from(handle: &SystemContextHandle) -> Self { + Self { network: Arc::clone(&handle.hotshot.network), state: OuterConsensus::new(handle.hotshot.consensus()), view: handle.cur_view().await, @@ -68,12 +62,12 @@ impl> CreateTaskState } #[async_trait] -impl> CreateTaskState - for UpgradeTaskState +impl, V: Versions> CreateTaskState + for UpgradeTaskState { - async fn create_from(handle: &SystemContextHandle) -> UpgradeTaskState { + async fn create_from(handle: &SystemContextHandle) -> Self { #[cfg(not(feature = "example-upgrade"))] - return UpgradeTaskState { + return Self { output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), @@ -90,11 +84,11 @@ impl> CreateTaskState stop_proposing_time: handle.hotshot.config.stop_proposing_time, start_voting_time: handle.hotshot.config.start_voting_time, stop_voting_time: handle.hotshot.config.stop_voting_time, - decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), }; #[cfg(feature = "example-upgrade")] - return UpgradeTaskState { + return Self { output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), @@ -111,17 +105,17 @@ impl> CreateTaskState stop_proposing_time: u64::MAX, start_voting_time: 0, stop_voting_time: u64::MAX, - decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), }; } } #[async_trait] -impl> CreateTaskState +impl, V: Versions> CreateTaskState for VidTaskState { - async fn create_from(handle: &SystemContextHandle) -> VidTaskState { - VidTaskState { + async fn create_from(handle: &SystemContextHandle) -> Self { + Self { consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, vote_collector: None, @@ -135,11 +129,11 @@ impl> CreateTaskState } #[async_trait] -impl> CreateTaskState +impl, V: Versions> CreateTaskState for DaTaskState { - async fn create_from(handle: &SystemContextHandle) -> DaTaskState { - DaTaskState { + async fn create_from(handle: &SystemContextHandle) -> Self { + Self { consensus: OuterConsensus::new(handle.hotshot.consensus()), output_event_stream: handle.hotshot.external_event_stream.0.clone(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), @@ -156,12 +150,13 @@ impl> CreateTaskState } #[async_trait] -impl> CreateTaskState +impl, V: Versions> CreateTaskState for ViewSyncTaskState { - async fn create_from(handle: &SystemContextHandle) -> ViewSyncTaskState { + async fn create_from(handle: &SystemContextHandle) -> Self { let cur_view = handle.cur_view().await; - ViewSyncTaskState { + + Self { current_view: cur_view, next_view: cur_view, network: Arc::clone(&handle.hotshot.network), @@ -186,11 +181,11 @@ impl> CreateTaskState } #[async_trait] -impl> CreateTaskState - for TransactionTaskState +impl, V: Versions> CreateTaskState + for TransactionTaskState { - async fn create_from(handle: &SystemContextHandle) -> TransactionTaskState { - TransactionTaskState { + async fn create_from(handle: &SystemContextHandle) -> Self { + Self { builder_timeout: handle.builder_timeout(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), consensus: OuterConsensus::new(handle.hotshot.consensus()), @@ -209,7 +204,7 @@ impl> CreateTaskState .cloned() .map(BuilderClient::new) .collect(), - decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), auction_results_provider: Arc::clone( &handle.hotshot.marketplace_config.auction_results_provider, ), @@ -222,16 +217,15 @@ impl> CreateTaskState } } -#[cfg(not(feature = "dependency-tasks"))] #[async_trait] -impl> CreateTaskState - for ConsensusTaskState +impl, V: Versions> CreateTaskState + for ConsensusTaskState { - async fn create_from(handle: &SystemContextHandle) -> ConsensusTaskState { + async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); let timeout_task = handle.spawn_initial_timeout_task(); - ConsensusTaskState { + Self { consensus: OuterConsensus::new(consensus), instance_state: handle.hotshot.instance_state(), timeout: handle.hotshot.config.next_view_timeout, @@ -255,20 +249,19 @@ impl> CreateTaskState quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), storage: Arc::clone(&handle.storage), - decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), } } } -#[cfg(feature = "dependency-tasks")] #[async_trait] -impl> CreateTaskState - for QuorumVoteTaskState +impl, V: Versions> CreateTaskState + for QuorumVoteTaskState { - async fn create_from(handle: &SystemContextHandle) -> QuorumVoteTaskState { + async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); - QuorumVoteTaskState { + Self { public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), consensus: OuterConsensus::new(consensus), @@ -281,23 +274,20 @@ impl> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, storage: Arc::clone(&handle.storage), - decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), } } } -#[cfg(feature = "dependency-tasks")] #[async_trait] -impl> CreateTaskState - for QuorumProposalTaskState +impl, V: Versions> CreateTaskState + for QuorumProposalTaskState { - async fn create_from( - handle: &SystemContextHandle, - ) -> QuorumProposalTaskState { + async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); let timeout_task = handle.spawn_initial_timeout_task(); - QuorumProposalTaskState { + Self { latest_proposed_view: handle.cur_view().await, proposal_dependencies: HashMap::new(), network: Arc::clone(&handle.hotshot.network), @@ -314,23 +304,20 @@ impl> CreateTaskState round_start_delay: handle.hotshot.config.round_start_delay, id: handle.hotshot.id, formed_upgrade_certificate: None, - decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), } } } -#[cfg(feature = "dependency-tasks")] #[async_trait] -impl> CreateTaskState - for QuorumProposalRecvTaskState +impl, V: Versions> CreateTaskState + for QuorumProposalRecvTaskState { - async fn create_from( - handle: &SystemContextHandle, - ) -> QuorumProposalRecvTaskState { + async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); let timeout_task = handle.spawn_initial_timeout_task(); - QuorumProposalRecvTaskState { + Self { public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), consensus: OuterConsensus::new(consensus), @@ -348,21 +335,20 @@ impl> CreateTaskState spawned_tasks: BTreeMap::new(), instance_state: handle.hotshot.instance_state(), id: handle.hotshot.id, - decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), } } } -#[cfg(feature = "dependency-tasks")] #[async_trait] -impl> CreateTaskState - for Consensus2TaskState +impl, V: Versions> CreateTaskState + for Consensus2TaskState { - async fn create_from(handle: &SystemContextHandle) -> Consensus2TaskState { + async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); let timeout_task = handle.spawn_initial_timeout_task(); - Consensus2TaskState { + Self { public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), @@ -381,18 +367,17 @@ impl> CreateTaskState consensus: OuterConsensus::new(consensus), last_decided_view: handle.cur_view().await, id: handle.hotshot.id, - decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), } } } -#[cfg(feature = "rewind")] #[async_trait] -impl> CreateTaskState +impl, V: Versions> CreateTaskState for RewindTaskState { - async fn create_from(handle: &SystemContextHandle) -> RewindTaskState { - RewindTaskState { + async fn create_from(handle: &SystemContextHandle) -> Self { + Self { events: Vec::new(), id: handle.hotshot.id, } diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 4587e145aa..0b285b593f 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -26,14 +26,14 @@ use hotshot_types::{ use tokio::task::JoinHandle; use tracing::instrument; -use crate::{traits::NodeImplementation, types::Event, Memberships, SystemContext}; +use crate::{traits::NodeImplementation, types::Event, Memberships, SystemContext, Versions}; /// Event streaming handle for a [`SystemContext`] instance running in the background /// /// This type provides the means to message and interact with a background [`SystemContext`] instance, /// allowing the ability to receive [`Event`]s from it, send transactions to it, and interact with /// the underlying storage. -pub struct SystemContextHandle> { +pub struct SystemContextHandle, V: Versions> { /// The [sender](Sender) and [receiver](Receiver), /// to allow the application to communicate with HotShot. pub(crate) output_event_stream: (Sender>, InactiveReceiver>), @@ -51,7 +51,7 @@ pub struct SystemContextHandle> { pub(crate) network_registry: NetworkTaskRegistry, /// Internal reference to the underlying [`SystemContext`] - pub hotshot: Arc>, + pub hotshot: Arc>, /// Reference to the internal storage for consensus datum. pub(crate) storage: Arc>, @@ -63,7 +63,9 @@ pub struct SystemContextHandle> { pub memberships: Arc>, } -impl + 'static> SystemContextHandle { +impl + 'static, V: Versions> + SystemContextHandle +{ /// Adds a hotshot consensus-related task to the `SystemContextHandle`. pub fn add_task> + 'static>(&mut self, task_state: S) { let task = Task::new( @@ -154,9 +156,7 @@ impl + 'static> SystemContextHandl &self, tx: TYPES::Transaction, ) -> Result<(), HotShotError> { - self.hotshot - .publish_transaction_async(tx, Arc::clone(&self.hotshot.decided_upgrade_certificate)) - .await + self.hotshot.publish_transaction_async(tx).await } /// Get the underlying consensus state for this [`SystemContext`] diff --git a/macros/src/lib.rs b/macros/src/lib.rs index cc333dabae..4a9f921fd2 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -19,6 +19,8 @@ use syn::{ struct CrossTestData { /// imlementations impls: ExprArray, + /// versions + versions: ExprArray, /// types types: ExprArray, /// name of the test @@ -34,6 +36,7 @@ impl CrossTestDataBuilder { fn is_ready(&self) -> bool { self.impls.is_some() && self.types.is_some() + && self.versions.is_some() && self.test_name.is_some() && self.metadata.is_some() && self.test_name.is_some() @@ -48,6 +51,8 @@ struct TestData { ty: ExprPath, /// impl imply: ExprPath, + /// impl + version: ExprPath, /// name of test test_name: Ident, /// test description @@ -101,6 +106,7 @@ impl TestData { let TestData { ty, imply, + version, test_name, metadata, ignore, @@ -124,7 +130,7 @@ impl TestData { async fn #test_name() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - TestDescription::<#ty, #imply>::gen_launcher((#metadata), 0).launch().run_test::().await; + TestDescription::<#ty, #imply, #version>::gen_launcher((#metadata), 0).launch().run_test::().await; } } } @@ -137,6 +143,7 @@ mod keywords { syn::custom_keyword!(TestName); syn::custom_keyword!(Types); syn::custom_keyword!(Impls); + syn::custom_keyword!(Versions); } impl Parse for CrossTestData { @@ -156,6 +163,11 @@ impl Parse for CrossTestData { input.parse::()?; let impls = input.parse::()?; description.impls(impls); + } else if input.peek(keywords::Versions) { + let _ = input.parse::()?; + input.parse::()?; + let versions = input.parse::()?; + description.versions(versions); } else if input.peek(keywords::TestName) { let _ = input.parse::()?; input.parse::()?; @@ -173,7 +185,7 @@ impl Parse for CrossTestData { description.ignore(ignore); } else { panic!( - "Unexpected token. Expected one of: Metadata, Ignore, Impls, Types, Testname" + "Unexpected token. Expected one of: Metadata, Ignore, Impls, Versions, Types, Testname" ); } if input.peek(Token![,]) { @@ -204,28 +216,38 @@ fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { p }); + let versions = test_spec.versions.elems.iter().map(|t| { + let Expr::Path(p) = t else { + panic!("Expected Path for Version! Got {t:?}"); + }; + p + }); + let mut result = quote! {}; for ty in types.clone() { let mut type_mod = quote! {}; for imp in impls.clone() { - let test_data = TestDataBuilder::create_empty() - .test_name(test_spec.test_name.clone()) - .metadata(test_spec.metadata.clone()) - .ignore(test_spec.ignore.clone()) - .imply(imp.clone()) - .ty(ty.clone()) - .build() - .unwrap(); - let test = test_data.generate_test(); - - let impl_str = format_ident!("{}", imp.to_lower_snake_str()); - let impl_result = quote! { - pub mod #impl_str { - use super::*; - #test - } - }; - type_mod.extend(impl_result); + for version in versions.clone() { + let test_data = TestDataBuilder::create_empty() + .test_name(test_spec.test_name.clone()) + .metadata(test_spec.metadata.clone()) + .ignore(test_spec.ignore.clone()) + .version(version.clone()) + .imply(imp.clone()) + .ty(ty.clone()) + .build() + .unwrap(); + let test = test_data.generate_test(); + + let impl_str = format_ident!("{}", imp.to_lower_snake_str()); + let impl_result = quote! { + pub mod #impl_str { + use super::*; + #test + } + }; + type_mod.extend(impl_result); + } } let ty_str = format_ident!("{}", ty.to_lower_snake_str()); let typ_result = quote! { diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 37a97093f8..187b486ebb 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -4,8 +4,6 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -#![cfg(not(feature = "dependency-tasks"))] - use core::time::Duration; use std::{marker::PhantomData, sync::Arc}; @@ -20,11 +18,10 @@ use committable::Committable; use futures::FutureExt; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus, View}, - constants::MarketplaceVersion, data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType}, message::{GeneralConsensusMessage, Proposal}, - simple_certificate::{version, UpgradeCertificate}, + simple_certificate::UpgradeCertificate, simple_vote::QuorumData, traits::{ block_contents::BlockHeader, @@ -44,6 +41,7 @@ use vbs::version::{StaticVersionType, Version}; use super::ConsensusTaskState; use crate::{ + consensus::{UpgradeLock, Versions}, events::HotShotEvent, helpers::{ broadcast_event, decide_from_proposal, fetch_proposal, parent_leaf_and_state, update_view, @@ -56,7 +54,7 @@ use crate::{ /// the proposal send evnet. #[allow(clippy::too_many_arguments)] #[instrument(skip_all, fields(id = id, view = *view))] -pub async fn create_and_send_proposal( +pub async fn create_and_send_proposal( public_key: TYPES::SignatureKey, private_key: ::PrivateKey, consensus: OuterConsensus, @@ -83,7 +81,7 @@ pub async fn create_and_send_proposal( .context("Failed to get vid share")?; drop(consensus_read); - let block_header = if version < MarketplaceVersion::VERSION { + let block_header = if version < V::Marketplace::VERSION { TYPES::BlockHeader::new_legacy( state.as_ref(), instance_state.as_ref(), @@ -162,7 +160,7 @@ pub async fn create_and_send_proposal( /// standard case proposal scenario. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] -pub async fn publish_proposal_from_commitment_and_metadata( +pub async fn publish_proposal_from_commitment_and_metadata( view: TYPES::Time, sender: Sender>>, quorum_membership: Arc, @@ -171,7 +169,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( consensus: OuterConsensus, delay: u64, formed_upgrade_certificate: Option>, - decided_upgrade_certificate: Arc>>>, + upgrade_lock: UpgradeLock, commitment_and_metadata: Option>, proposal_cert: Option>, instance_state: Arc, @@ -199,7 +197,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( if let Some(cert) = proposal_upgrade_certificate.clone() { if cert - .is_relevant(view, Arc::clone(&decided_upgrade_certificate)) + .is_relevant(view, Arc::clone(&upgrade_lock.decided_upgrade_certificate)) .await .is_err() { @@ -224,10 +222,10 @@ pub async fn publish_proposal_from_commitment_and_metadata( "Cannot propose because our VID payload commitment and metadata is for an older view." ); - let version = version(view, &decided_upgrade_certificate.read().await.clone())?; + let version = upgrade_lock.version(view).await?; let create_and_send_proposal_handle = async_spawn(async move { - match create_and_send_proposal( + match create_and_send_proposal::( public_key, private_key, OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), @@ -259,7 +257,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( /// `commitment_and_metadata`, or a `decided_upgrade_certificate`. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] -pub async fn publish_proposal_if_able( +pub async fn publish_proposal_if_able( view: TYPES::Time, sender: Sender>>, quorum_membership: Arc, @@ -268,7 +266,7 @@ pub async fn publish_proposal_if_able( consensus: OuterConsensus, delay: u64, formed_upgrade_certificate: Option>, - decided_upgrade_certificate: Arc>>>, + upgrade_lock: UpgradeLock, commitment_and_metadata: Option>, proposal_cert: Option>, instance_state: Arc, @@ -283,7 +281,7 @@ pub async fn publish_proposal_if_able( consensus, delay, formed_upgrade_certificate, - decided_upgrade_certificate, + upgrade_lock, commitment_and_metadata, proposal_cert, instance_state, @@ -297,11 +295,15 @@ pub async fn publish_proposal_if_able( /// Returns the proposal that should be used to set the `cur_proposal` for other tasks. #[allow(clippy::too_many_lines)] #[instrument(skip_all)] -pub(crate) async fn handle_quorum_proposal_recv>( +pub(crate) async fn handle_quorum_proposal_recv< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( proposal: &Proposal>, sender: &TYPES::SignatureKey, event_stream: Sender>>, - task_state: &mut ConsensusTaskState, + task_state: &mut ConsensusTaskState, ) -> Result>> { let sender = sender.clone(); debug!( @@ -475,7 +477,7 @@ pub(crate) async fn handle_quorum_proposal_recv>( +pub async fn handle_quorum_proposal_validated< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( proposal: &QuorumProposal, event_stream: Sender>>, - task_state: &mut ConsensusTaskState, + task_state: &mut ConsensusTaskState, ) -> Result<()> { let view = proposal.view_number(); task_state.current_proposal = Some(proposal.clone()); @@ -531,13 +537,17 @@ pub async fn handle_quorum_proposal_validated = ( +type VoteInfo = ( <::SignatureKey as SignatureKey>::PrivateKey, - Arc>>>, + UpgradeLock, Arc<::Membership>, Sender>>, ); @@ -649,7 +659,11 @@ type VoteInfo = ( /// Check if we are able to vote, like whether the proposal is valid, /// whether we have DAC and VID share, and if so, vote. #[instrument(skip_all, fields(id = id, view = *cur_view))] -pub async fn update_state_and_vote_if_able>( +pub async fn update_state_and_vote_if_able< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( cur_view: TYPES::Time, proposal: QuorumProposal, public_key: TYPES::SignatureKey, @@ -657,7 +671,7 @@ pub async fn update_state_and_vote_if_able>, quorum_membership: Arc, instance_state: Arc, - vote_info: VoteInfo, + vote_info: VoteInfo, id: u64, ) -> bool { use hotshot_types::simple_vote::QuorumVote; @@ -681,7 +695,7 @@ pub async fn update_state_and_vote_if_able version, Err(e) => { error!("Failed to calculate the version: {e:?}"); diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index e87a59438d..2b6970f785 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -4,8 +4,6 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -#![cfg(not(feature = "dependency-tasks"))] - use std::{collections::BTreeMap, sync::Arc}; use anyhow::Result; @@ -21,12 +19,12 @@ use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, data::{QuorumProposal, VidDisperseShare, ViewChangeEvidence}, event::{Event, EventType}, - message::Proposal, + message::{Proposal, UpgradeLock}, simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, traits::{ election::Membership, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, }, @@ -58,7 +56,7 @@ type VoteCollectorOption = Option> { +pub struct ConsensusTaskState, V: Versions> { /// Our public key pub public_key: TYPES::SignatureKey, /// Our Private Key @@ -132,11 +130,11 @@ pub struct ConsensusTaskState> { /// This node's storage ref pub storage: Arc>, - /// an upgrade certificate that has been decided on, if any - pub decided_upgrade_certificate: Arc>>>, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, } -impl> ConsensusTaskState { +impl, V: Versions> ConsensusTaskState { /// Cancel all tasks the consensus tasks has spawned before the given view pub async fn cancel_tasks(&mut self, view: TYPES::Time) { let keep = self.spawned_tasks.split_off(&view); @@ -210,7 +208,7 @@ impl> ConsensusTaskState OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), self.round_start_delay, self.formed_upgrade_certificate.clone(), - Arc::clone(&self.decided_upgrade_certificate), + self.upgrade_lock.clone(), self.payload_commitment_and_metadata.clone(), self.proposal_cert.clone(), Arc::clone(&self.instance_state), @@ -240,7 +238,7 @@ impl> ConsensusTaskState if proposal.view_number() != view { return; } - let upgrade = Arc::clone(&self.decided_upgrade_certificate); + let upgrade = self.upgrade_lock.clone(); let pub_key = self.public_key.clone(); let priv_key = self.private_key.clone(); let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); @@ -250,7 +248,7 @@ impl> ConsensusTaskState let instance_state = Arc::clone(&self.instance_state); let id = self.id; let handle = async_spawn(async move { - update_state_and_vote_if_able::( + update_state_and_vote_if_able::( view, proposal, pub_key, @@ -489,7 +487,13 @@ impl> ConsensusTaskState // If we have a decided upgrade certificate, the protocol version may also have // been upgraded. - if let Some(cert) = self.decided_upgrade_certificate.read().await.clone() { + if let Some(cert) = self + .upgrade_lock + .decided_upgrade_certificate + .read() + .await + .clone() + { if new_view == cert.data.new_version_first_view { error!( "Version upgraded based on a decided upgrade cert: {:?}", @@ -703,7 +707,9 @@ impl> ConsensusTaskState } #[async_trait] -impl> TaskState for ConsensusTaskState { +impl, V: Versions> TaskState + for ConsensusTaskState +{ type Event = HotShotEvent; async fn handle_event( diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index a72d4958b7..69d10e9788 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -24,17 +24,22 @@ use tracing::{debug, error, instrument}; use super::Consensus2TaskState; use crate::{ + consensus2::Versions, events::{HotShotEvent, HotShotTaskCompleted}, helpers::{broadcast_event, cancel_task}, vote_collection::{create_vote_accumulator, AccumulatorInfo, HandleVoteEvent}, }; /// Handle a `QuorumVoteRecv` event. -pub(crate) async fn handle_quorum_vote_recv>( +pub(crate) async fn handle_quorum_vote_recv< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( vote: &QuorumVote, event: Arc>, sender: &Sender>>, - task_state: &mut Consensus2TaskState, + task_state: &mut Consensus2TaskState, ) -> Result<()> { // Are we the leader for this view? ensure!( @@ -73,11 +78,15 @@ pub(crate) async fn handle_quorum_vote_recv>( +pub(crate) async fn handle_timeout_vote_recv< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( vote: &TimeoutVote, event: Arc>, sender: &Sender>>, - task_state: &mut Consensus2TaskState, + task_state: &mut Consensus2TaskState, ) -> Result<()> { // Are we the leader for this view? ensure!( @@ -118,10 +127,14 @@ pub(crate) async fn handle_timeout_vote_recv>( +pub(crate) async fn handle_view_change< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( new_view_number: TYPES::Time, sender: &Sender>>, - task_state: &mut Consensus2TaskState, + task_state: &mut Consensus2TaskState, ) -> Result<()> { ensure!( new_view_number > task_state.cur_view, @@ -135,8 +148,12 @@ pub(crate) async fn handle_view_change>( +pub(crate) async fn handle_timeout, V: Versions>( view_number: TYPES::Time, sender: &Sender>>, - task_state: &mut Consensus2TaskState, + task_state: &mut Consensus2TaskState, ) -> Result<()> { ensure!( task_state.cur_view < view_number, diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index a31c97850d..65c80cab34 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -16,10 +16,11 @@ use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, event::Event, - simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, + message::UpgradeLock, + simple_certificate::{QuorumCertificate, TimeoutCertificate}, simple_vote::{QuorumVote, TimeoutVote}, traits::{ - node_implementation::{NodeImplementation, NodeType}, + node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, }; @@ -39,7 +40,7 @@ type VoteCollectorOption = Option> { +pub struct Consensus2TaskState, V: Versions> { /// Our public key pub public_key: TYPES::SignatureKey, @@ -96,10 +97,10 @@ pub struct Consensus2TaskState> { /// The node's id pub id: u64, - /// An upgrade certificate that has been decided on, if any. - pub decided_upgrade_certificate: Arc>>>, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, } -impl> Consensus2TaskState { +impl, V: Versions> Consensus2TaskState { /// Handles a consensus event received on the event stream #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, last_decided_view = *self.last_decided_view), name = "Consensus replica task", level = "error", target = "Consensus2TaskState")] pub async fn handle( @@ -153,7 +154,9 @@ impl> Consensus2TaskState> TaskState for Consensus2TaskState { +impl, V: Versions> TaskState + for Consensus2TaskState +{ type Event = HotShotEvent; async fn handle_event( diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 5693000636..ed3dc5a0ee 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -63,5 +63,4 @@ pub mod quorum_proposal; pub mod quorum_proposal_recv; /// Task for storing and replaying all received tasks by a node -#[cfg(feature = "rewind")] pub mod rewind; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index bcef607db7..fe6712a1c4 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -17,13 +17,12 @@ use hotshot_types::{ event::{Event, EventType, HotShotAction}, message::{ DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, - SequencingMessage, VersionedMessage, + SequencingMessage, UpgradeLock, }, - simple_certificate::UpgradeCertificate, traits::{ election::Membership, network::{BroadcastDelay, ConnectedNetwork, TransmitType, ViewMessage}, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeType, Versions}, storage::Storage, }, vote::{HasViewNumber, Vote}, @@ -210,6 +209,7 @@ impl NetworkMessageTaskState { /// network event task state pub struct NetworkEventTaskState< TYPES: NodeType, + V: Versions, COMMCHANNEL: ConnectedNetwork, S: Storage, > { @@ -224,16 +224,17 @@ pub struct NetworkEventTaskState< pub filter: fn(&Arc>) -> bool, /// Storage to store actionable events pub storage: Arc>, - /// Decided upgrade certificate - pub decided_upgrade_certificate: Option>, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, } #[async_trait] impl< TYPES: NodeType, + V: Versions, COMMCHANNEL: ConnectedNetwork, S: Storage + 'static, - > TaskState for NetworkEventTaskState + > TaskState for NetworkEventTaskState { type Event = HotShotEvent; @@ -257,9 +258,10 @@ impl< impl< TYPES: NodeType, + V: Versions, COMMCHANNEL: ConnectedNetwork, S: Storage + 'static, - > NetworkEventTaskState + > NetworkEventTaskState { /// Handle the given event. /// @@ -297,7 +299,7 @@ impl< ) } HotShotEvent::VidDisperseSend(proposal, sender) => { - self.handle_vid_disperse_proposal(proposal, &sender); + self.handle_vid_disperse_proposal(proposal, &sender).await; return; } HotShotEvent::DaProposalSend(proposal, sender) => { @@ -404,10 +406,6 @@ impl< .await; return; } - HotShotEvent::UpgradeDecided(cert) => { - self.decided_upgrade_certificate = Some(cert.clone()); - return; - } _ => { return; } @@ -428,9 +426,9 @@ impl< let committee_topic = membership.committee_topic(); let net = Arc::clone(&self.channel); let storage = Arc::clone(&self.storage); - let decided_upgrade_certificate = self.decided_upgrade_certificate.clone(); + let upgrade_lock = self.upgrade_lock.clone(); async_spawn(async move { - if NetworkEventTaskState::::maybe_record_action( + if NetworkEventTaskState::::maybe_record_action( maybe_action, Arc::clone(&storage), view, @@ -449,7 +447,7 @@ impl< } } - let serialized_message = match message.serialize(&decided_upgrade_certificate) { + let serialized_message = match upgrade_lock.serialize(&message).await { Ok(serialized) => serialized, Err(e) => { error!("Failed to serialize message: {}", e); @@ -479,7 +477,7 @@ impl< } /// handle `VidDisperseSend` - fn handle_vid_disperse_proposal( + async fn handle_vid_disperse_proposal( &self, vid_proposal: Proposal>, sender: &::SignatureKey, @@ -496,7 +494,7 @@ impl< DaConsensusMessage::VidDisperseMsg(proposal), )), // TODO not a DaConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 }; - let serialized_message = match message.serialize(&self.decided_upgrade_certificate) { + let serialized_message = match self.upgrade_lock.serialize(&message).await { Ok(serialized) => serialized, Err(e) => { error!("Failed to serialize message: {}", e); @@ -510,7 +508,7 @@ impl< let net = Arc::clone(&self.channel); let storage = Arc::clone(&self.storage); async_spawn(async move { - if NetworkEventTaskState::::maybe_record_action( + if NetworkEventTaskState::::maybe_record_action( Some(HotShotAction::VidDisperse), storage, view, diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 3dbf147c4f..f2386ce826 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -20,10 +20,9 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, - constants::MarketplaceVersion, data::{Leaf, QuorumProposal, VidDisperse, ViewChangeEvidence}, message::Proposal, - simple_certificate::{version, UpgradeCertificate}, + simple_certificate::UpgradeCertificate, traits::{ block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -34,6 +33,7 @@ use vbs::version::StaticVersionType; use crate::{ events::HotShotEvent, helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, + quorum_proposal::{UpgradeLock, Versions}, }; /// Proposal dependency types. These types represent events that precipitate a proposal. @@ -59,7 +59,7 @@ pub(crate) enum ProposalDependency { } /// Handler for the proposal dependency -pub struct ProposalDependencyHandle { +pub struct ProposalDependencyHandle { /// Latest view number that has been proposed for (proxy for cur_view). pub latest_proposed_view: TYPES::Time, @@ -98,14 +98,14 @@ pub struct ProposalDependencyHandle { /// since they will be present in the leaf we propose off of. pub formed_upgrade_certificate: Option>, - /// An upgrade certificate that has been decided on, if any. - pub decided_upgrade_certificate: Arc>>>, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, /// The node's id pub id: u64, } -impl ProposalDependencyHandle { +impl ProposalDependencyHandle { /// Publishes a proposal given the [`CommitmentAndMetadata`], [`VidDisperse`] /// and high qc [`hotshot_types::simple_certificate::QuorumCertificate`], /// with optional [`ViewChangeEvidence`]. @@ -165,12 +165,9 @@ impl ProposalDependencyHandle { "Cannot propose because our VID payload commitment and metadata is for an older view." ); - let version = version( - self.view_number, - &self.decided_upgrade_certificate.read().await.clone(), - )?; + let version = self.upgrade_lock.version(self.view_number).await?; - let block_header = if version < MarketplaceVersion::VERSION { + let block_header = if version < V::Marketplace::VERSION { TYPES::BlockHeader::new_legacy( state.as_ref(), self.instance_state.as_ref(), @@ -246,7 +243,7 @@ impl ProposalDependencyHandle { Ok(()) } } -impl HandleDepOutput for ProposalDependencyHandle { +impl HandleDepOutput for ProposalDependencyHandle { type Output = Vec>>>>; #[allow(clippy::no_effect_underscore_binding)] @@ -347,7 +344,7 @@ impl HandleDepOutput for ProposalDependencyHandle { vid_share.unwrap(), proposal_cert, self.formed_upgrade_certificate.clone(), - Arc::clone(&self.decided_upgrade_certificate), + Arc::clone(&self.upgrade_lock.decided_upgrade_certificate), ) .await { diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index f2179058cb..cd382c38f5 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -21,10 +21,11 @@ use hotshot_task::{ use hotshot_types::{ consensus::OuterConsensus, event::Event, + message::UpgradeLock, simple_certificate::UpgradeCertificate, traits::{ election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, }, @@ -43,7 +44,7 @@ use crate::{ mod handlers; /// The state for the quorum proposal task. -pub struct QuorumProposalTaskState> { +pub struct QuorumProposalTaskState, V: Versions> { /// Latest view number that has been proposed for. pub latest_proposed_view: TYPES::Time, @@ -97,11 +98,13 @@ pub struct QuorumProposalTaskState /// since they will be present in the leaf we propose off of. pub formed_upgrade_certificate: Option>, - /// An upgrade certificate that has been decided on, if any. - pub decided_upgrade_certificate: Arc>>>, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, } -impl> QuorumProposalTaskState { +impl, V: Versions> + QuorumProposalTaskState +{ /// Create an event dependency #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Create event dependency", level = "info")] fn create_event_dependency( @@ -323,7 +326,7 @@ impl> QuorumProposalTaskState> QuorumProposalTaskState> TaskState - for QuorumProposalTaskState +impl, V: Versions> TaskState + for QuorumProposalTaskState { type Event = HotShotEvent; diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 0b53c96880..9792fa64d6 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -35,14 +35,15 @@ use crate::{ broadcast_event, fetch_proposal, update_view, validate_proposal_safety_and_liveness, validate_proposal_view_and_certs, SEND_VIEW_CHANGE_EVENT, }, + quorum_proposal_recv::{UpgradeLock, Versions}, }; /// Update states in the event that the parent state is not found for a given `proposal`. #[instrument(skip_all)] -async fn validate_proposal_liveness>( +async fn validate_proposal_liveness, V: Versions>( proposal: &Proposal>, event_sender: &Sender>>, - task_state: &mut QuorumProposalRecvTaskState, + task_state: &mut QuorumProposalRecvTaskState, ) -> Result<()> { let view_number = proposal.data.view_number(); let mut consensus_write = task_state.consensus.write().await; @@ -124,11 +125,15 @@ async fn validate_proposal_liveness>( +pub(crate) async fn handle_quorum_proposal_recv< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( proposal: &Proposal>, sender: &TYPES::SignatureKey, event_sender: &Sender>>, - task_state: &mut QuorumProposalRecvTaskState, + task_state: &mut QuorumProposalRecvTaskState, ) -> Result<()> { let sender = sender.clone(); let cur_view = task_state.cur_view; @@ -229,7 +234,7 @@ pub(crate) async fn handle_quorum_proposal_recv> { +pub struct QuorumProposalRecvTaskState, V: Versions> { /// Our public key pub public_key: TYPES::SignatureKey, @@ -96,11 +97,13 @@ pub struct QuorumProposalRecvTaskState>>>, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, } -impl> QuorumProposalRecvTaskState { +impl, V: Versions> + QuorumProposalRecvTaskState +{ /// Cancel all tasks the consensus tasks has spawned before the given view pub async fn cancel_tasks(&mut self, view: TYPES::Time) { let keep = self.spawned_tasks.split_off(&view); @@ -134,8 +137,8 @@ impl> QuorumProposalRecvTaskState< } #[async_trait] -impl> TaskState - for QuorumProposalRecvTaskState +impl, V: Versions> TaskState + for QuorumProposalRecvTaskState { type Event = HotShotEvent; diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 862f60ca7c..3a296ad8c7 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -22,6 +22,7 @@ use super::QuorumVoteTaskState; use crate::{ events::HotShotEvent, helpers::{broadcast_event, decide_from_proposal, LeafChainTraversalOutcome}, + quorum_vote::Versions, }; /// Handles the `QuorumProposalValidated` event. @@ -29,10 +30,11 @@ use crate::{ pub(crate) async fn handle_quorum_proposal_validated< TYPES: NodeType, I: NodeImplementation, + V: Versions, >( proposal: &QuorumProposal, sender: &Sender>>, - task_state: &mut QuorumVoteTaskState, + task_state: &mut QuorumVoteTaskState, ) -> Result<()> { let LeafChainTraversalOutcome { new_locked_view_number, @@ -45,13 +47,17 @@ pub(crate) async fn handle_quorum_proposal_validated< } = decide_from_proposal( proposal, OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - Arc::clone(&task_state.decided_upgrade_certificate), + Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), &task_state.public_key, ) .await; if let Some(cert) = decided_upgrade_cert.clone() { - let mut decided_certificate_lock = task_state.decided_upgrade_certificate.write().await; + let mut decided_certificate_lock = task_state + .upgrade_lock + .decided_upgrade_certificate + .write() + .await; *decided_certificate_lock = Some(cert.clone()); drop(decided_certificate_lock); let _ = sender diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 2cde7fc04e..6c4f8651e3 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -22,13 +22,12 @@ use hotshot_types::{ consensus::OuterConsensus, data::{Leaf, VidDisperseShare, ViewNumber}, event::Event, - message::Proposal, - simple_certificate::{version, UpgradeCertificate}, + message::{Proposal, UpgradeLock}, simple_vote::{QuorumData, QuorumVote}, traits::{ block_contents::BlockHeader, election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, ValidatedState, @@ -63,7 +62,7 @@ enum VoteDependency { } /// Handler for the vote dependency. -pub struct VoteDependencyHandle> { +pub struct VoteDependencyHandle, V: Versions> { /// Public key. pub public_key: TYPES::SignatureKey, /// Private Key. @@ -82,13 +81,15 @@ pub struct VoteDependencyHandle> { pub sender: Sender>>, /// Event receiver. pub receiver: Receiver>>, - /// An upgrade certificate that has been decided on, if any. - pub decided_upgrade_certificate: Arc>>>, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, /// The node's id pub id: u64, } -impl + 'static> VoteDependencyHandle { +impl + 'static, V: Versions> + VoteDependencyHandle +{ /// Updates the shared consensus state with the new voting data. #[instrument(skip_all, target = "VoteDependencyHandle", fields(id = self.id, view = *self.view_number))] async fn update_shared_state( @@ -130,10 +131,8 @@ impl + 'static> VoteDependencyHand drop(consensus_reader); - let version = version( - self.view_number, - &self.decided_upgrade_certificate.read().await.clone(), - )?; + let version = self.upgrade_lock.version(self.view_number).await?; + let (validated_state, state_delta) = parent_state .validate_and_apply_header( &self.instance_state, @@ -230,8 +229,8 @@ impl + 'static> VoteDependencyHand } } -impl + 'static> HandleDepOutput - for VoteDependencyHandle +impl + 'static, V: Versions> HandleDepOutput + for VoteDependencyHandle { type Output = Vec>>; @@ -353,7 +352,7 @@ impl + 'static> HandleDepOutput /// The state for the quorum vote task. /// /// Contains all of the information for the quorum vote. -pub struct QuorumVoteTaskState> { +pub struct QuorumVoteTaskState, V: Versions> { /// Public key. pub public_key: TYPES::SignatureKey, @@ -390,11 +389,11 @@ pub struct QuorumVoteTaskState> { /// Reference to the storage. pub storage: Arc>, - /// An upgrade certificate that has been decided on, if any. - pub decided_upgrade_certificate: Arc>>>, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, } -impl> QuorumVoteTaskState { +impl, V: Versions> QuorumVoteTaskState { /// Create an event dependency. #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote create event dependency", level = "error")] fn create_event_dependency( @@ -479,7 +478,7 @@ impl> QuorumVoteTaskState { + VoteDependencyHandle:: { public_key: self.public_key.clone(), private_key: self.private_key.clone(), consensus: OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), @@ -489,7 +488,7 @@ impl> QuorumVoteTaskState> QuorumVoteTaskState> TaskState for QuorumVoteTaskState { +impl, V: Versions> TaskState + for QuorumVoteTaskState +{ type Event = HotShotEvent; async fn handle_event( diff --git a/task-impls/src/rewind.rs b/task-impls/src/rewind.rs index b61c1a186e..669b410b52 100644 --- a/task-impls/src/rewind.rs +++ b/task-impls/src/rewind.rs @@ -26,8 +26,8 @@ pub struct RewindTaskState { impl RewindTaskState { /// Handles all events, storing them to the private state - pub fn handle(&mut self, event: Arc>) { - self.events.push(Arc::clone(&event)); + pub fn handle(&mut self, event: &Arc>) { + self.events.push(Arc::clone(event)); } } @@ -41,7 +41,7 @@ impl TaskState for RewindTaskState { _sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event); + self.handle(&event); Ok(()) } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 3aaa24b763..ee7203f491 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -12,22 +12,20 @@ use std::{ use anyhow::{bail, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_timeout}; -use async_lock::RwLock; use async_trait::async_trait; use futures::{future::join_all, stream::FuturesUnordered, StreamExt}; use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, - constants::MarketplaceVersion, data::{null_block, PackedBundle}, event::{Event, EventType}, - simple_certificate::{version, UpgradeCertificate}, + message::UpgradeLock, traits::{ auction_results_provider::AuctionResultsProvider, block_contents::{precompute_vid_commitment, BuilderFee, EncodeBytes}, election::Membership, - node_implementation::{ConsensusTime, HasUrls, NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, HasUrls, NodeImplementation, NodeType, Versions}, signature_key::{BuilderSignatureKey, SignatureKey}, BlockPayload, }, @@ -76,7 +74,7 @@ pub struct BuilderResponse { } /// Tracks state of a Transaction task -pub struct TransactionTaskState> { +pub struct TransactionTaskState, V: Versions> { /// The state's api pub builder_timeout: Duration, @@ -106,25 +104,22 @@ pub struct TransactionTaskState> { pub instance_state: Arc, /// This state's ID pub id: u64, - /// Decided upgrade certificate - pub decided_upgrade_certificate: Arc>>>, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, /// auction results provider pub auction_results_provider: Arc, /// fallback builder url pub fallback_builder_url: Url, } -impl> TransactionTaskState { +impl, V: Versions> TransactionTaskState { /// handle view change decide legacy or not pub async fn handle_view_change( &mut self, event_stream: &Sender>>, block_view: TYPES::Time, ) -> Option { - let version = match version( - block_view, - &self.decided_upgrade_certificate.read().await.clone(), - ) { + let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, Err(e) => { tracing::error!("Failed to calculate version: {:?}", e); @@ -132,7 +127,7 @@ impl> TransactionTaskState> TransactionTaskState>>, block_view: TYPES::Time, ) -> Option { - let version = match hotshot_types::simple_certificate::version( - block_view, - &self - .decided_upgrade_certificate - .read() - .await - .as_ref() - .cloned(), - ) { + let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, Err(err) => { error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); @@ -167,6 +154,7 @@ impl> TransactionTaskState> TransactionTaskState(self.membership.total_nodes(), version) else { error!("Failed to get null fee"); return None; @@ -250,15 +239,7 @@ impl> TransactionTaskState>>, block_view: TYPES::Time, ) -> Option { - let version = match hotshot_types::simple_certificate::version( - block_view, - &self - .decided_upgrade_certificate - .read() - .await - .as_ref() - .cloned(), - ) { + let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, Err(err) => { error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); @@ -268,6 +249,7 @@ impl> TransactionTaskState> TransactionTaskState(self.membership.total_nodes(), version) + else { error!("Failed to get null fee"); return None; }; @@ -738,7 +722,9 @@ impl> TransactionTaskState> TaskState for TransactionTaskState { +impl, V: Versions> TaskState + for TransactionTaskState +{ type Event = HotShotEvent; async fn handle_event( diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index e4463c9076..680c61d82c 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -19,12 +19,12 @@ use hotshot_types::{ }, data::UpgradeProposal, event::{Event, EventType}, - message::Proposal, + message::{Proposal, UpgradeLock}, simple_certificate::UpgradeCertificate, simple_vote::{UpgradeProposalData, UpgradeVote}, traits::{ election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, vote::HasViewNumber, @@ -44,7 +44,7 @@ use crate::{ type VoteCollectorOption = Option>; /// Tracks state of a DA task -pub struct UpgradeTaskState> { +pub struct UpgradeTaskState, V: Versions> { /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -93,14 +93,18 @@ pub struct UpgradeTaskState> { /// Unix time in seconds at which we stop voting on an upgrade pub stop_voting_time: u64, - /// Upgrade certificate that has been decided on, if any - pub decided_upgrade_certificate: Arc>>>, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, } -impl> UpgradeTaskState { +impl, V: Versions> UpgradeTaskState { /// Check if we have decided on an upgrade certificate async fn upgraded(&self) -> bool { - self.decided_upgrade_certificate.read().await.is_some() + self.upgrade_lock + .decided_upgrade_certificate + .read() + .await + .is_some() } /// main task event handler @@ -120,7 +124,7 @@ impl> UpgradeTaskState { if self.upgraded().await { info!( "Already upgraded to {:?}, skip voting.", - TYPES::Upgrade::VERSION + V::Upgrade::VERSION ); return None; } @@ -139,9 +143,9 @@ impl> UpgradeTaskState { } // If the proposal does not match our upgrade target, we immediately exit. - if proposal.data.upgrade_proposal.new_version_hash != TYPES::UPGRADE_HASH - || proposal.data.upgrade_proposal.old_version != TYPES::Base::VERSION - || proposal.data.upgrade_proposal.new_version != TYPES::Upgrade::VERSION + if proposal.data.upgrade_proposal.new_version_hash != V::UPGRADE_HASH + || proposal.data.upgrade_proposal.old_version != V::Base::VERSION + || proposal.data.upgrade_proposal.new_version != V::Upgrade::VERSION { return None; } @@ -284,9 +288,9 @@ impl> UpgradeTaskState { == self.public_key { let upgrade_proposal_data = UpgradeProposalData { - old_version: TYPES::Base::VERSION, - new_version: TYPES::Upgrade::VERSION, - new_version_hash: TYPES::UPGRADE_HASH.to_vec(), + old_version: V::Base::VERSION, + new_version: V::Upgrade::VERSION, + new_version_hash: V::UPGRADE_HASH.to_vec(), // We schedule the upgrade to begin 15 views in the future old_version_last_view: TYPES::Time::new(view + UPGRADE_BEGIN_OFFSET), // and end 20 views in the future @@ -337,7 +341,9 @@ impl> UpgradeTaskState { #[async_trait] /// task state implementation for the upgrade task -impl> TaskState for UpgradeTaskState { +impl, V: Versions> TaskState + for UpgradeTaskState +{ type Event = HotShotEvent; async fn handle_event( diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index 0261a1481f..b6cbffd91b 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -85,7 +85,7 @@ pub fn run_builder_source( let mut app: App = App::with_state(source); app.register_module("block_info", builder_api) .expect("Failed to register the builder API"); - async_spawn(app.serve(url, TYPES::Base::instance())) + async_spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())) }; let mut handle = Some(start_builder(url.clone(), source.clone())); diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index 54e9c9318b..cd9a320b9b 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -252,7 +252,7 @@ impl SimpleBuilderSource { app.register_module::("block_info", builder_api) .expect("Failed to register the builder API"); - async_spawn(app.serve(url, TYPES::Base::instance())); + async_spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())); } } diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index 5cdd70bbcf..b5e61fe1ca 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -13,7 +13,7 @@ use async_lock::RwLock; use async_std::task::JoinHandle; use hotshot::traits::TestableNodeImplementation; use hotshot_task_impls::helpers::broadcast_event; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::traits::node_implementation::{NodeType, Versions}; use snafu::Snafu; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; @@ -27,17 +27,19 @@ use crate::{test_runner::Node, test_task::TestEvent}; pub struct CompletionTaskErr {} /// Completion task state -pub struct CompletionTask> { +pub struct CompletionTask, V: Versions> { pub tx: Sender, pub rx: Receiver, /// handles to the nodes in the test - pub(crate) handles: Arc>>>, + pub(crate) handles: Arc>>>, /// Duration of the task. pub duration: Duration, } -impl> CompletionTask { +impl, V: Versions> + CompletionTask +{ pub fn run(mut self) -> JoinHandle<()> { async_spawn(async move { if async_timeout(self.duration, self.wait_for_shutdown()) diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index dfb5781d64..09c3b6e87e 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -19,7 +19,7 @@ use hotshot::{ use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, block_types::TestTransaction, - node_types::{MemoryImpl, TestTypes}, + node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; @@ -35,7 +35,7 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::Membership, network::Topic, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeType, Versions}, }, utils::{View, ViewInner}, vid::{vid_scheme, VidCommitment, VidSchemeType}, @@ -56,14 +56,15 @@ pub async fn build_system_handle< Storage = TestStorage, AuctionResultsProvider = TestAuctionResultsProvider, > + TestableNodeImplementation, + V: Versions, >( node_id: u64, ) -> ( - SystemContextHandle, + SystemContextHandle, Sender>>, Receiver>>, ) { - let builder: TestDescription = TestDescription::default_multiple_rounds(); + let builder: TestDescription = TestDescription::default_multiple_rounds(); let launcher = builder.gen_launcher(node_id); @@ -339,7 +340,7 @@ pub fn build_da_certificate( } pub async fn build_vote( - handle: &SystemContextHandle, + handle: &SystemContextHandle, proposal: QuorumProposal, ) -> GeneralConsensusMessage { let view = ViewNumber::new(*proposal.view_number); diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index ce967ad8d2..97ea7fcdf1 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -19,7 +19,7 @@ use hotshot_types::{ error::RoundTimedoutState, event::{Event, EventType, LeafChain}, simple_certificate::QuorumCertificate, - traits::node_implementation::{ConsensusTime, NodeType}, + traits::node_implementation::{ConsensusTime, NodeType, Versions}, vid::VidCommitment, }; use snafu::Snafu; @@ -85,9 +85,9 @@ pub enum OverallSafetyTaskErr { } /// Data availability task state -pub struct OverallSafetyTask> { +pub struct OverallSafetyTask, V: Versions> { /// handles - pub handles: Arc>>>, + pub handles: Arc>>>, /// ctx pub ctx: RoundCtx, /// configure properties @@ -98,7 +98,9 @@ pub struct OverallSafetyTask, } -impl> OverallSafetyTask { +impl, V: Versions> + OverallSafetyTask +{ async fn handle_view_failure(&mut self, num_failed_views: usize, view_number: TYPES::Time) { let expected_views_to_fail = &mut self.properties.expected_views_to_fail; @@ -128,8 +130,8 @@ impl> OverallSafetyTask> TestTaskState - for OverallSafetyTask +impl, V: Versions> TestTaskState + for OverallSafetyTask { type Event = Event; diff --git a/testing/src/predicates/upgrade_with_consensus.rs b/testing/src/predicates/upgrade_with_consensus.rs index 5cf8de8a4e..7a63edd583 100644 --- a/testing/src/predicates/upgrade_with_consensus.rs +++ b/testing/src/predicates/upgrade_with_consensus.rs @@ -9,13 +9,13 @@ use std::sync::Arc; use async_trait::async_trait; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_task_impls::consensus::ConsensusTaskState; use hotshot_types::simple_certificate::UpgradeCertificate; use crate::predicates::{Predicate, PredicateResult}; -type ConsensusTaskTestState = ConsensusTaskState; +type ConsensusTaskTestState = ConsensusTaskState; type UpgradeCertCallback = Arc>>) -> bool + Send + Sync>; @@ -34,7 +34,12 @@ impl std::fmt::Debug for UpgradeCertPredicate { #[async_trait] impl Predicate for UpgradeCertPredicate { async fn evaluate(&self, input: &ConsensusTaskTestState) -> PredicateResult { - let upgrade_cert = input.decided_upgrade_certificate.read().await.clone(); + let upgrade_cert = input + .upgrade_lock + .decided_upgrade_certificate + .read() + .await + .clone(); PredicateResult::from((self.check)(upgrade_cert.into())) } diff --git a/testing/src/predicates/upgrade_with_proposal.rs b/testing/src/predicates/upgrade_with_proposal.rs index de39befe9e..8f45414a9a 100644 --- a/testing/src/predicates/upgrade_with_proposal.rs +++ b/testing/src/predicates/upgrade_with_proposal.rs @@ -4,18 +4,16 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -#![cfg(feature = "dependency-tasks")] - use std::sync::Arc; use async_trait::async_trait; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_task_impls::quorum_proposal::QuorumProposalTaskState; use hotshot_types::simple_certificate::UpgradeCertificate; use crate::predicates::{Predicate, PredicateResult}; -type QuorumProposalTaskTestState = QuorumProposalTaskState; +type QuorumProposalTaskTestState = QuorumProposalTaskState; type UpgradeCertCallback = Arc>>) -> bool + Send + Sync>; @@ -34,7 +32,12 @@ impl std::fmt::Debug for UpgradeCertPredicate { #[async_trait] impl Predicate for UpgradeCertPredicate { async fn evaluate(&self, input: &QuorumProposalTaskTestState) -> PredicateResult { - let upgrade_cert = input.decided_upgrade_certificate.read().await.clone(); + let upgrade_cert = input + .upgrade_lock + .decided_upgrade_certificate + .read() + .await + .clone(); PredicateResult::from((self.check)(upgrade_cert.into())) } diff --git a/testing/src/predicates/upgrade_with_vote.rs b/testing/src/predicates/upgrade_with_vote.rs index 1d28914f79..20f4102495 100644 --- a/testing/src/predicates/upgrade_with_vote.rs +++ b/testing/src/predicates/upgrade_with_vote.rs @@ -4,17 +4,15 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -#![cfg(feature = "dependency-tasks")] - use std::sync::Arc; use async_trait::async_trait; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_task_impls::quorum_vote::QuorumVoteTaskState; use hotshot_types::simple_certificate::UpgradeCertificate; use crate::predicates::{Predicate, PredicateResult}; -type QuorumVoteTaskTestState = QuorumVoteTaskState; +type QuorumVoteTaskTestState = QuorumVoteTaskState; type UpgradeCertCallback = Arc>>) -> bool + Send + Sync>; @@ -30,11 +28,15 @@ impl std::fmt::Debug for UpgradeCertPredicate { } } -#[cfg(feature = "dependency-tasks")] #[async_trait] impl Predicate for UpgradeCertPredicate { async fn evaluate(&self, input: &QuorumVoteTaskTestState) -> PredicateResult { - let upgrade_cert = input.decided_upgrade_certificate.read().await.clone(); + let upgrade_cert = input + .upgrade_lock + .decided_upgrade_certificate + .read() + .await + .clone(); PredicateResult::from((self.check)(upgrade_cert.into())) } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 72611d44d9..5d94297f60 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -26,7 +26,7 @@ use hotshot_types::{ simple_certificate::QuorumCertificate, traits::{ network::ConnectedNetwork, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::{NodeImplementation, NodeType, Versions}, }, vote::HasViewNumber, ValidatorConfig, @@ -46,11 +46,11 @@ pub type StateAndBlock = (Vec, Vec); pub struct SpinningTaskErr {} /// Spinning task state -pub struct SpinningTask> { +pub struct SpinningTask, V: Versions> { /// handle to the nodes - pub(crate) handles: Arc>>>, + pub(crate) handles: Arc>>>, /// late start nodes - pub(crate) late_start: HashMap>, + pub(crate) late_start: HashMap>, /// time based changes pub(crate) changes: BTreeMap>, /// most recent view seen by spinning task @@ -68,7 +68,8 @@ impl< TYPES: NodeType, I: TestableNodeImplementation, N: ConnectedNetwork, - > TestTaskState for SpinningTask + V: Versions, + > TestTaskState for SpinningTask where I: TestableNodeImplementation, I: NodeImplementation< @@ -231,7 +232,7 @@ where node_id < config.da_staked_committee_size as u64, ); let context = - TestRunner::::add_node_with_config_and_channels( + TestRunner::::add_node_with_config_and_channels( node_id, network.clone(), (*memberships).clone(), diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index bb82847592..9a654e619e 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -17,8 +17,9 @@ use hotshot_example_types::{ storage_types::TestStorage, testable_delay::DelayConfig, }; use hotshot_types::{ - consensus::ConsensusMetricsValue, traits::node_implementation::NodeType, ExecutionType, - HotShotConfig, ValidatorConfig, + consensus::ConsensusMetricsValue, + traits::node_implementation::{NodeType, Versions}, + ExecutionType, HotShotConfig, ValidatorConfig, }; use tide_disco::Url; use vec1::Vec1; @@ -56,7 +57,7 @@ pub struct TimingData { /// metadata describing a test #[derive(Clone)] -pub struct TestDescription> { +pub struct TestDescription, V: Versions> { /// Total number of staked nodes in the test pub num_nodes_with_stake: usize, /// Total number of non-staked nodes in the test @@ -91,30 +92,31 @@ pub struct TestDescription> { /// description of the solver to run pub solver: FakeSolverApiDescription, /// nodes with byzantine behaviour - pub behaviour: Rc Behaviour>, + pub behaviour: Rc Behaviour>, /// Delay config if any to add delays to asynchronous calls pub async_delay_config: DelayConfig, } #[derive(Debug)] -pub enum Behaviour> { - ByzantineTwins(Box>), - Byzantine(Box>), +pub enum Behaviour, V: Versions> { + ByzantineTwins(Box>), + Byzantine(Box>), Standard, } pub async fn create_test_handle< TYPES: NodeType, I: NodeImplementation, + V: Versions, >( - metadata: TestDescription, + metadata: TestDescription, node_id: u64, network: Network, memberships: Memberships, config: HotShotConfig, storage: I::Storage, marketplace_config: MarketplaceConfig, -) -> SystemContextHandle { +) -> SystemContextHandle { let initializer = HotShotInitializer::::from_genesis(TestInstanceState::new( metadata.async_delay_config, )) @@ -170,7 +172,7 @@ pub async fn create_test_handle< .await } Behaviour::Standard => { - let hotshot = SystemContext::::new( + let hotshot = SystemContext::::new( public_key, private_key, node_id, @@ -228,7 +230,7 @@ impl Default for TimingData { } } -impl> TestDescription { +impl, V: Versions> TestDescription { /// the default metadata for a stress test #[must_use] #[allow(clippy::redundant_field_names)] @@ -268,7 +270,7 @@ impl> TestDescription { pub fn default_multiple_rounds() -> Self { let num_nodes_with_stake = 10; let num_nodes_without_stake = 0; - TestDescription:: { + TestDescription:: { // TODO: remove once we have fixed the DHT timeout issue // https://github.com/EspressoSystems/HotShot/issues/2088 num_bootstrap_nodes: num_nodes_with_stake, @@ -290,7 +292,7 @@ impl> TestDescription { ..TimingData::default() }, view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), - ..TestDescription::::default() + ..TestDescription::::default() } } @@ -330,7 +332,9 @@ impl> TestDescription { } } -impl> Default for TestDescription { +impl, V: Versions> Default + for TestDescription +{ /// by default, just a single round #[allow(clippy::redundant_field_names)] fn default() -> Self { @@ -377,8 +381,11 @@ impl> Default for TestDescription< } } -impl, I: TestableNodeImplementation> - TestDescription +impl< + TYPES: NodeType, + I: TestableNodeImplementation, + V: Versions, + > TestDescription where I: NodeImplementation>, { @@ -387,7 +394,7 @@ where /// # Panics /// if some of the the configuration values are zero #[must_use] - pub fn gen_launcher(self, node_id: u64) -> TestLauncher { + pub fn gen_launcher(self, node_id: u64) -> TestLauncher { let TestDescription { num_nodes_with_stake, num_bootstrap_nodes, diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 8db6169c75..7f197fd1ea 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -14,7 +14,7 @@ use hotshot_example_types::storage_types::TestStorage; use hotshot_types::{ traits::{ network::{AsyncGenerator, ConnectedNetwork}, - node_implementation::NodeType, + node_implementation::{NodeType, Versions}, }, HotShotConfig, }; @@ -40,18 +40,18 @@ pub struct ResourceGenerators> { +pub struct TestLauncher, V: Versions> { /// generator for resources pub resource_generator: ResourceGenerators, /// metadata used for tasks - pub metadata: TestDescription, + pub metadata: TestDescription, } -impl> TestLauncher { +impl, V: Versions> TestLauncher { /// launch the test #[must_use] - pub fn launch>(self) -> TestRunner { - TestRunner:: { + pub fn launch>(self) -> TestRunner { + TestRunner:: { launcher: self, nodes: Vec::new(), solver_server: None, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 06b9c87579..372e3ece3b 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -37,7 +37,7 @@ use hotshot_types::{ traits::{ election::Membership, network::{ConnectedNetwork, Topic}, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, }, HotShotConfig, ValidatorConfig, }; @@ -70,8 +70,9 @@ impl TaskErr for T {} impl< TYPES: NodeType, I: TestableNodeImplementation, + V: Versions, N: ConnectedNetwork, - > TestRunner + > TestRunner where I: TestableNodeImplementation, I: NodeImplementation< @@ -190,7 +191,7 @@ where .await, async_delay_config: self.launcher.metadata.async_delay_config, }; - let spinning_task = TestTask::>::new( + let spinning_task = TestTask::>::new( spinning_task_state, event_rxs.clone(), test_receiver.clone(), @@ -215,7 +216,7 @@ where test_receiver.clone(), ); - let overall_safety_task = TestTask::>::new( + let overall_safety_task = TestTask::>::new( overall_safety_task_state, event_rxs.clone(), test_receiver.clone(), @@ -590,7 +591,7 @@ where validator_config: ValidatorConfig, storage: I::Storage, marketplace_config: MarketplaceConfig, - ) -> Arc> { + ) -> Arc> { // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); @@ -627,7 +628,7 @@ where Receiver>>, ), external_channel: (Sender>, Receiver>), - ) -> Arc> { + ) -> Arc> { // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); @@ -650,13 +651,13 @@ where } /// a node participating in a test -pub struct Node> { +pub struct Node, V: Versions> { /// The node's unique identifier pub node_id: u64, /// The underlying network belonging to the node pub network: Network, /// The handle to the node's internals - pub handle: SystemContextHandle, + pub handle: SystemContextHandle, } /// This type combines all of the paramters needed to build the context for a node that started @@ -677,10 +678,10 @@ pub struct LateNodeContextParameters> { +pub enum LateNodeContext, V: Versions> { /// The system context that we're passing directly to the node, this means the node is already /// initialized successfully. - InitializedContext(Arc>), + InitializedContext(Arc>), /// The system context that we're passing to the node when it is not yet initialized, so we're /// initializing it based on the received leaf and init parameters. @@ -690,12 +691,12 @@ pub enum LateNodeContext> } /// A yet-to-be-started node that participates in tests -pub struct LateStartNode> { +pub struct LateStartNode, V: Versions> { /// The underlying network belonging to the node pub network: Network, /// Either the context to which we will use to launch HotShot for initialized node when it's /// time, or the parameters that will be used to initialize the node and launch HotShot. - pub context: LateNodeContext, + pub context: LateNodeContext, } /// The runner of a test network @@ -703,16 +704,17 @@ pub struct LateStartNode> pub struct TestRunner< TYPES: NodeType, I: TestableNodeImplementation, + V: Versions, N: ConnectedNetwork, > { /// test launcher, contains a bunch of useful metadata and closures - pub(crate) launcher: TestLauncher, + pub(crate) launcher: TestLauncher, /// nodes in the test - pub(crate) nodes: Vec>, + pub(crate) nodes: Vec>, /// the solver server running in the test pub(crate) solver_server: Option<(Url, JoinHandle<()>)>, /// nodes with a late start - pub(crate) late_start: HashMap>, + pub(crate) late_start: HashMap>, /// the next node unique identifier pub(crate) next_node_id: u64, /// Phantom for N diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index cba6ae4d9d..0dcc56c068 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -16,8 +16,11 @@ use futures::future::select_all; use hotshot::types::Event; use hotshot_task_impls::{events::HotShotEvent, network::NetworkMessageTaskState}; use hotshot_types::{ - message::{Messages, VersionedMessage}, - traits::{network::ConnectedNetwork, node_implementation::NodeType}, + message::{Messages, UpgradeLock}, + traits::{ + network::ConnectedNetwork, + node_implementation::{NodeType, Versions}, + }, }; #[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn, JoinHandle}; @@ -109,10 +112,12 @@ impl TestTask { /// Add the network task to handle messages and publish events. pub async fn add_network_message_test_task< TYPES: NodeType, + V: Versions, NET: ConnectedNetwork, >( internal_event_stream: Sender>>, external_event_stream: Sender>, + upgrade_lock: UpgradeLock, channel: Arc, ) -> JoinHandle<()> { let net = Arc::clone(&channel); @@ -131,8 +136,7 @@ pub async fn add_network_message_test_task< let mut deserialized_messages = Vec::new(); for msg in msgs { - let deserialized_message = match VersionedMessage::deserialize(&msg, &None) - { + let deserialized_message = match upgrade_lock.deserialize(&msg).await { Ok(deserialized) => deserialized, Err(e) => { tracing::error!("Failed to deserialize message: {}", e); diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index fa2d839352..b2238db02a 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -12,7 +12,7 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use hotshot::traits::TestableNodeImplementation; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::traits::node_implementation::{NodeType, Versions}; use rand::thread_rng; use snafu::Snafu; #[cfg(async_executor_impl = "tokio")] @@ -28,10 +28,10 @@ use crate::{test_runner::Node, test_task::TestEvent}; pub struct TxnTaskErr {} /// state of task that decides when things are completed -pub struct TxnTask> { +pub struct TxnTask, V: Versions> { // TODO should this be in a rwlock? Or maybe a similar abstraction to the registry is in order /// Handles for all nodes. - pub handles: Arc>>>, + pub handles: Arc>>>, /// Optional index of the next node. pub next_node_idx: Option, /// time to wait between txns @@ -40,7 +40,7 @@ pub struct TxnTask> { pub shutdown_chan: Receiver, } -impl> TxnTask { +impl, V: Versions> TxnTask { pub fn run(mut self) -> JoinHandle<()> { async_spawn(async move { async_sleep(Duration::from_millis(100)).await; diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 70d858a01a..0c335bbc14 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -17,7 +17,7 @@ use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_example_types::{ block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, - node_types::{MemoryImpl, TestTypes}, + node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; use hotshot_types::{ @@ -394,7 +394,7 @@ impl TestView { pub fn create_quorum_vote( &self, - handle: &SystemContextHandle, + handle: &SystemContextHandle, ) -> QuorumVote { QuorumVote::::create_signed_vote( QuorumData { @@ -410,7 +410,7 @@ impl TestView { pub fn create_upgrade_vote( &self, data: UpgradeProposalData, - handle: &SystemContextHandle, + handle: &SystemContextHandle, ) -> UpgradeVote { UpgradeVote::::create_signed_vote( data, @@ -424,7 +424,7 @@ impl TestView { pub fn create_da_vote( &self, data: DaData, - handle: &SystemContextHandle, + handle: &SystemContextHandle, ) -> DaVote { DaVote::create_signed_vote( data, diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index ef68a4b9b7..b7099b62d4 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -13,7 +13,7 @@ use async_compatibility_layer::art::async_sleep; use hotshot_builder_api::v0_1::block_info::AvailableBlockData; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, - node_types::TestTypes, + node_types::{TestTypes, TestVersions}, }; use hotshot_orchestrator::config::RandomBuilderConfig; use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; @@ -21,7 +21,9 @@ use hotshot_testing::block_builder::{ BuilderTask, RandomBuilderImplementation, TestBuilderImplementation, }; use hotshot_types::traits::{ - block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, + block_contents::vid_commitment, + node_implementation::{NodeType, Versions}, + signature_key::SignatureKey, BlockPayload, }; use tide_disco::Url; @@ -50,7 +52,7 @@ async fn test_random_block_builder() { let builder_started = Instant::now(); - let client: BuilderClient::Base> = + let client: BuilderClient::Base> = BuilderClient::new(api_url); assert!(client.connect(Duration::from_millis(100)).await); diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 4464e15682..2e02c9a7b9 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -14,7 +14,7 @@ use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ block_types::TestMetadata, - node_types::{MemoryImpl, TestTypes}, + node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::TestInstanceState, }; use hotshot_macros::{run_test, test_scripts}; @@ -37,7 +37,10 @@ use hotshot_testing::{ use hotshot_types::{ data::{null_block, ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}, - traits::{election::Membership, node_implementation::ConsensusTime}, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, Versions}, + }, utils::BuilderCommitment, vote::HasViewNumber, }; @@ -51,13 +54,14 @@ const TIMEOUT: Duration = Duration::from_millis(35); #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { - use hotshot_types::constants::BaseVersion; use vbs::version::StaticVersionType; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -104,9 +108,9 @@ async fn test_consensus_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - vec1![null_block::builder_fee( + vec1![null_block::builder_fee::( quorum_membership.total_nodes(), - BaseVersion::version() + ::Base::VERSION, ) .unwrap()], None, @@ -129,7 +133,8 @@ async fn test_consensus_task() { ]), ]; - let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let consensus_state = + ConsensusTaskState::::create_from(&handle).await; let mut consensus_script = TaskScript { timeout: TIMEOUT, state: consensus_state, @@ -146,7 +151,9 @@ async fn test_consensus_vote() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -183,7 +190,8 @@ async fn test_consensus_vote() { exact(QuorumVoteSend(votes[0].clone())), ])]; - let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let consensus_state = + ConsensusTaskState::::create_from(&handle).await; let mut consensus_script = TaskScript { timeout: TIMEOUT, state: consensus_state, @@ -198,13 +206,15 @@ async fn test_consensus_vote() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_finalize_propose() { use hotshot_example_types::{block_types::TestMetadata, state_types::TestValidatedState}; - use hotshot_types::{constants::BaseVersion, data::null_block}; + use hotshot_types::data::null_block; use vbs::version::StaticVersionType; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(4).await.0; + let handle = build_system_handle::(4) + .await + .0; let (priv_key, pub_key) = key_pair_for_id(4); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -296,7 +306,11 @@ async fn test_view_sync_finalize_propose() { builder_commitment, TestMetadata, ViewNumber::new(4), - vec1![null_block::builder_fee(4, BaseVersion::version()).unwrap()], + vec1![null_block::builder_fee::( + 4, + ::Base::VERSION + ) + .unwrap()], None, ), ], @@ -320,7 +334,8 @@ async fn test_view_sync_finalize_propose() { ]), ]; - let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let consensus_state = + ConsensusTaskState::::create_from(&handle).await; let mut consensus_script = TaskScript { timeout: TIMEOUT, state: consensus_state, @@ -339,7 +354,9 @@ async fn test_view_sync_finalize_vote() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(5).await.0; + let handle = build_system_handle::(5) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -415,7 +432,8 @@ async fn test_view_sync_finalize_vote() { ]), ]; - let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let consensus_state = + ConsensusTaskState::::create_from(&handle).await; let mut consensus_script = TaskScript { timeout: TIMEOUT, state: consensus_state, @@ -434,7 +452,9 @@ async fn test_view_sync_finalize_vote_fail_view_number() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(5).await.0; + let handle = build_system_handle::(5) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -517,7 +537,8 @@ async fn test_view_sync_finalize_vote_fail_view_number() { Expectations::from_outputs(vec![]), ]; - let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let consensus_state = + ConsensusTaskState::::create_from(&handle).await; let mut consensus_script = TaskScript { timeout: TIMEOUT, state: consensus_state, @@ -534,7 +555,9 @@ async fn test_vid_disperse_storage_failure() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; // Set the error flag here for the system handle. This causes it to emit an error on append. handle.storage().write().await.should_return_err = true; @@ -569,7 +592,8 @@ async fn test_vid_disperse_storage_failure() { quorum_proposal_validated(), ])]; - let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let consensus_state = + ConsensusTaskState::::create_from(&handle).await; let mut consensus_script = TaskScript { timeout: TIMEOUT, state: consensus_state, diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 61a2c1671d..34dfc406b6 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -10,7 +10,7 @@ use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, - node_types::{MemoryImpl, TestTypes}, + node_types::{MemoryImpl, TestTypes, TestVersions}, }; use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{da::DaTaskState, events::HotShotEvent::*}; @@ -22,12 +22,12 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - constants::BaseVersion, data::{null_block, PackedBundle, ViewNumber}, simple_vote::DaData, traits::{ - block_contents::precompute_vid_commitment, election::Membership, - node_implementation::ConsensusTime, + block_contents::precompute_vid_commitment, + election::Membership, + node_implementation::{ConsensusTime, Versions}, }, }; use vbs::version::StaticVersionType; @@ -38,7 +38,9 @@ async fn test_da_task() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -85,9 +87,9 @@ async fn test_da_task() { encoded_transactions, TestMetadata, ViewNumber::new(2), - vec1::vec1![null_block::builder_fee( + vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(), - BaseVersion::version() + ::Base::VERSION ) .unwrap()], Some(precompute), @@ -122,7 +124,9 @@ async fn test_da_task_storage_failure() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; // Set the error flag here for the system handle. This causes it to emit an error on append. handle.storage().write().await.should_return_err = true; @@ -172,9 +176,9 @@ async fn test_da_task_storage_failure() { encoded_transactions, TestMetadata, ViewNumber::new(2), - vec1::vec1![null_block::builder_fee( + vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(), - BaseVersion::version() + ::Base::VERSION ) .unwrap()], Some(precompute), diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index 30239d38bd..32f0878999 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -6,7 +6,7 @@ use std::time::Duration; -use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; +use hotshot_example_types::node_types::{Libp2pImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -23,7 +23,7 @@ use tracing::instrument; async fn libp2p_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -54,7 +54,7 @@ async fn libp2p_network() { async fn libp2p_network_failures_2() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -102,7 +102,8 @@ async fn libp2p_network_failures_2() { async fn test_stress_libp2p_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription::default_stress(); + let metadata: TestDescription = + TestDescription::default_stress(); metadata .gen_launcher(0) .launch() diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 858e9446ce..a3b3245533 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -10,7 +10,7 @@ use async_broadcast::Sender; use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; use hotshot::traits::implementations::MemoryNetwork; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_task::task::{ConsensusTaskRegistry, Task}; use hotshot_task_impls::{ events::HotShotEvent, @@ -22,6 +22,7 @@ use hotshot_testing::{ }; use hotshot_types::{ data::ViewNumber, + message::UpgradeLock, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeType}, @@ -41,8 +42,9 @@ async fn test_network_task() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let builder: TestDescription = + let builder: TestDescription = TestDescription::default_multiple_rounds(); + let upgrade_lock = UpgradeLock::::new(); let node_id = 1; let launcher = builder.gen_launcher(node_id); @@ -60,13 +62,13 @@ async fn test_network_task() { Topic::Global, config.fixed_leader_for_gpuvid, ); - let network_state: NetworkEventTaskState, _> = + let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { channel: network.clone(), view: ViewNumber::new(0), membership: membership.clone(), filter: network::quorum_filter, - decided_upgrade_certificate: None, + upgrade_lock: upgrade_lock.clone(), storage, }; let (tx, rx) = async_broadcast::broadcast(10); @@ -83,6 +85,7 @@ async fn test_network_task() { add_network_message_test_task( out_tx_internal.clone(), out_tx_external.clone(), + upgrade_lock, network.clone(), ) .await; @@ -114,7 +117,7 @@ async fn test_network_storage_fail() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let builder: TestDescription = + let builder: TestDescription = TestDescription::default_multiple_rounds(); let node_id = 1; @@ -127,6 +130,7 @@ async fn test_network_storage_fail() { let config = launcher.resource_generator.config.clone(); let public_key = config.my_own_validator_config.public_key; let known_nodes_with_stake = config.known_nodes_with_stake.clone(); + let upgrade_lock = UpgradeLock::::new(); let membership = ::Membership::create_election( known_nodes_with_stake.clone(), @@ -134,13 +138,13 @@ async fn test_network_storage_fail() { Topic::Global, config.fixed_leader_for_gpuvid, ); - let network_state: NetworkEventTaskState, _> = + let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { channel: network.clone(), view: ViewNumber::new(0), membership: membership.clone(), filter: network::quorum_filter, - decided_upgrade_certificate: None, + upgrade_lock: upgrade_lock.clone(), storage, }; let (tx, rx) = async_broadcast::broadcast(10); @@ -158,6 +162,7 @@ async fn test_network_storage_fail() { add_network_message_test_task( out_tx_internal.clone(), out_tx_external.clone(), + upgrade_lock, network.clone(), ) .await; diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index b737d90927..d0d727b370 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -11,7 +11,7 @@ use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ - node_types::{MemoryImpl, TestTypes}, + node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::TestValidatedState, }; use hotshot_macros::{run_test, test_scripts}; @@ -46,7 +46,9 @@ async fn test_quorum_proposal_recv_task() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); let consensus = handle.hotshot.consensus(); @@ -104,7 +106,9 @@ async fn test_quorum_proposal_recv_task() { exact(ViewChange(ViewNumber::new(2))), ])]; - let state = QuorumProposalRecvTaskState::::create_from(&handle).await; + let state = + QuorumProposalRecvTaskState::::create_from(&handle) + .await; let mut script = TaskScript { timeout: Duration::from_millis(35), state, @@ -132,7 +136,9 @@ async fn test_quorum_proposal_recv_task_liveness_check() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(4).await.0; + let handle = build_system_handle::(4) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); let consensus = handle.hotshot.consensus(); @@ -208,7 +214,9 @@ async fn test_quorum_proposal_recv_task_liveness_check() { exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), ])]; - let state = QuorumProposalRecvTaskState::::create_from(&handle).await; + let state = + QuorumProposalRecvTaskState::::create_from(&handle) + .await; let mut script = TaskScript { timeout: Duration::from_millis(35), state, diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index eda6c459c8..61372110a8 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -12,7 +12,7 @@ use futures::StreamExt; use hotshot::{tasks::task_state::CreateTaskState, traits::ValidatedState}; use hotshot_example_types::{ block_types::TestMetadata, - node_types::{MemoryImpl, TestTypes}, + node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::TestValidatedState, }; use hotshot_macros::{run_test, test_scripts}; @@ -29,7 +29,10 @@ use hotshot_testing::{ use hotshot_types::{ data::{null_block, Leaf, ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, ViewSyncFinalizeData}, - traits::{election::Membership, node_implementation::ConsensusTime}, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, Versions}, + }, utils::BuilderCommitment, vote::HasViewNumber, }; @@ -43,14 +46,13 @@ const TIMEOUT: Duration = Duration::from_millis(35); #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_quorum_proposal_view_1() { use hotshot_testing::script::{Expectations, TaskScript}; - use hotshot_types::constants::BaseVersion; use vbs::version::StaticVersionType; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let node_id = 1; - let handle = build_system_handle::(node_id) + let handle = build_system_handle::(node_id) .await .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); @@ -83,8 +85,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { // We must send the genesis cert here to initialize hotshot successfully. let genesis_cert = proposals[0].data.justify_qc.clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let builder_fee = - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()).unwrap(); + let builder_fee = null_block::builder_fee::( + quorum_membership.total_nodes(), + ::Base::VERSION, + ) + .unwrap(); drop(consensus_writer); let inputs = vec![ @@ -119,7 +124,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { ]; let quorum_proposal_task_state = - QuorumProposalTaskState::::create_from(&handle).await; + QuorumProposalTaskState::::create_from(&handle).await; let mut script = TaskScript { timeout: TIMEOUT, @@ -133,14 +138,13 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { - use hotshot_types::constants::BaseVersion; use vbs::version::StaticVersionType; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let node_id = 3; - let handle = build_system_handle::(node_id) + let handle = build_system_handle::(node_id) .await .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); @@ -187,8 +191,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { drop(consensus_writer); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let builder_fee = - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()).unwrap(); + let builder_fee = null_block::builder_fee::( + quorum_membership.total_nodes(), + ::Base::VERSION, + ) + .unwrap(); let inputs = vec![ random![ @@ -302,7 +309,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ]; let quorum_proposal_task_state = - QuorumProposalTaskState::::create_from(&handle).await; + QuorumProposalTaskState::::create_from(&handle).await; let mut script = TaskScript { timeout: TIMEOUT, @@ -317,14 +324,13 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_qc_timeout() { - use hotshot_types::constants::BaseVersion; use vbs::version::StaticVersionType; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let node_id = 3; - let handle = build_system_handle::(node_id) + let handle = build_system_handle::(node_id) .await .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); @@ -372,10 +378,11 @@ async fn test_quorum_proposal_task_qc_timeout() { builder_commitment, TestMetadata, ViewNumber::new(3), - vec1![ - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) - .unwrap() - ], + vec1![null_block::builder_fee::( + quorum_membership.total_nodes(), + ::Base::VERSION + ) + .unwrap()], None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), @@ -388,7 +395,7 @@ async fn test_quorum_proposal_task_qc_timeout() { let expectations = vec![Expectations::from_outputs(vec![quorum_proposal_send()])]; let quorum_proposal_task_state = - QuorumProposalTaskState::::create_from(&handle).await; + QuorumProposalTaskState::::create_from(&handle).await; let mut script = TaskScript { timeout: TIMEOUT, @@ -403,14 +410,14 @@ async fn test_quorum_proposal_task_qc_timeout() { #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_view_sync() { use hotshot_example_types::block_types::TestMetadata; - use hotshot_types::{constants::BaseVersion, data::null_block}; + use hotshot_types::data::null_block; use vbs::version::StaticVersionType; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let node_id = 2; - let handle = build_system_handle::(node_id) + let handle = build_system_handle::(node_id) .await .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); @@ -460,10 +467,11 @@ async fn test_quorum_proposal_task_view_sync() { builder_commitment, TestMetadata, ViewNumber::new(2), - vec1![ - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()) - .unwrap() - ], + vec1![null_block::builder_fee::( + quorum_membership.total_nodes(), + ::Base::VERSION + ) + .unwrap()], None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), @@ -476,7 +484,7 @@ async fn test_quorum_proposal_task_view_sync() { let expectations = vec![Expectations::from_outputs(vec![quorum_proposal_send()])]; let quorum_proposal_task_state = - QuorumProposalTaskState::::create_from(&handle).await; + QuorumProposalTaskState::::create_from(&handle).await; let mut script = TaskScript { timeout: TIMEOUT, @@ -490,14 +498,13 @@ async fn test_quorum_proposal_task_view_sync() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_task_liveness_check() { - use hotshot_types::constants::BaseVersion; use vbs::version::StaticVersionType; async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); let node_id = 3; - let handle = build_system_handle::(node_id) + let handle = build_system_handle::(node_id) .await .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); @@ -533,8 +540,11 @@ async fn test_quorum_proposal_task_liveness_check() { drop(consensus_writer); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let builder_fee = - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()).unwrap(); + let builder_fee = null_block::builder_fee::( + quorum_membership.total_nodes(), + ::Base::VERSION, + ) + .unwrap(); // We need to handle the views where we aren't the leader to ensure that the states are // updated properly. @@ -657,7 +667,7 @@ async fn test_quorum_proposal_task_liveness_check() { ]; let quorum_proposal_task_state = - QuorumProposalTaskState::::create_from(&handle).await; + QuorumProposalTaskState::::create_from(&handle).await; let mut script = TaskScript { timeout: TIMEOUT, @@ -674,7 +684,9 @@ async fn test_quorum_proposal_task_with_incomplete_events() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -700,7 +712,7 @@ async fn test_quorum_proposal_task_with_incomplete_events() { let expectations = vec![Expectations::from_outputs(vec![])]; let quorum_proposal_task_state = - QuorumProposalTaskState::::create_from(&handle).await; + QuorumProposalTaskState::::create_from(&handle).await; let mut script = TaskScript { timeout: TIMEOUT, diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index b9643c3073..83019caad4 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -11,7 +11,7 @@ use std::time::Duration; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_macros::{run_test, test_scripts}; use hotshot_testing::{ all_predicates, @@ -40,7 +40,9 @@ async fn test_quorum_vote_task_success() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -84,7 +86,7 @@ async fn test_quorum_vote_task_success() { ])]; let quorum_vote_state = - QuorumVoteTaskState::::create_from(&handle).await; + QuorumVoteTaskState::::create_from(&handle).await; let mut script = TaskScript { timeout: TIMEOUT, @@ -106,7 +108,9 @@ async fn test_quorum_vote_task_miss_dependency() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -169,7 +173,7 @@ async fn test_quorum_vote_task_miss_dependency() { ]; let quorum_vote_state = - QuorumVoteTaskState::::create_from(&handle).await; + QuorumVoteTaskState::::create_from(&handle).await; let mut script = TaskScript { timeout: TIMEOUT, @@ -191,7 +195,9 @@ async fn test_quorum_vote_task_incorrect_dependency() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -221,7 +227,7 @@ async fn test_quorum_vote_task_incorrect_dependency() { ])]; let quorum_vote_state = - QuorumVoteTaskState::::create_from(&handle).await; + QuorumVoteTaskState::::create_from(&handle).await; let mut script = TaskScript { timeout: TIMEOUT, diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 07c1f72697..ef955e13d7 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -4,10 +4,11 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::time::Duration; +use std::{rc::Rc, time::Duration}; +use hotshot::tasks::{BadProposalViewDos, DoubleProposeVote}; use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestVersions}, state_types::TestTypes, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; @@ -15,19 +16,15 @@ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - test_builder::TestDescription, + test_builder::{Behaviour, TestDescription}, view_sync_task::ViewSyncTaskDescription, }; -use { - hotshot::tasks::{BadProposalViewDos, DoubleProposeVote}, - hotshot_testing::test_builder::Behaviour, - std::rc::Rc, -}; cross_tests!( TestName: test_success, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], + Versions: [TestVersions], Ignore: false, Metadata: { TestDescription { @@ -46,6 +43,7 @@ cross_tests!( TestName: test_success_with_async_delay, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let mut metadata = TestDescription { @@ -77,6 +75,7 @@ cross_tests!( TestName: test_success_with_async_delay_2, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let mut metadata = TestDescription { @@ -116,6 +115,7 @@ cross_tests!( TestName: double_propose_vote, Impls: [MemoryImpl], Types: [TestTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let behaviour = Rc::new(|node_id| { match node_id { @@ -141,6 +141,7 @@ cross_tests!( TestName: multiple_bad_proposals, Impls: [MemoryImpl], Types: [TestTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let behaviour = Rc::new(|node_id| { match node_id { @@ -169,6 +170,7 @@ cross_tests!( TestName: test_with_double_leader_no_failures, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestConsecutiveLeaderTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index 661f3aa3d0..54d3e5193f 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -12,7 +12,7 @@ use std::{ }; use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestVersions}, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -31,6 +31,7 @@ cross_tests!( TestName: test_with_failures_2, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); @@ -72,6 +73,7 @@ cross_tests!( TestName: dishonest_leader, Impls: [MemoryImpl], Types: [TestTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let behaviour = Rc::new(|node_id| { @@ -113,6 +115,7 @@ cross_tests!( TestName: test_with_double_leader_failures, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestConsecutiveLeaderTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index 14ffd17f61..24764f6a3d 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -1,18 +1,18 @@ use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ block_types::TestMetadata, - node_types::{MemoryImpl, TestConsecutiveLeaderTypes}, + node_types::{MemoryImpl, TestConsecutiveLeaderTypes, TestVersions}, }; use hotshot_task_impls::{ events::HotShotEvent, harness::run_harness, transactions::TransactionTaskState, }; use hotshot_testing::helpers::build_system_handle; use hotshot_types::{ - constants::BaseVersion, data::{null_block, PackedBundle, ViewNumber}, traits::{ - block_contents::precompute_vid_commitment, election::Membership, - node_implementation::ConsensusTime, + block_contents::precompute_vid_commitment, + election::Membership, + node_implementation::{ConsensusTime, Versions}, }, }; use vbs::version::StaticVersionType; @@ -26,9 +26,10 @@ async fn test_transaction_task_leader_two_views_in_a_row() { // Build the API for node 2. let node_id = 2; - let handle = build_system_handle::(node_id) - .await - .0; + let handle = + build_system_handle::(node_id) + .await + .0; let mut input = Vec::new(); let mut output = Vec::new(); @@ -45,11 +46,13 @@ async fn test_transaction_task_leader_two_views_in_a_row() { vec![].into(), TestMetadata, current_view, - vec1::vec1![null_block::builder_fee( - quorum_membership.total_nodes(), - BaseVersion::version() - ) - .unwrap()], + vec1::vec1![ + null_block::builder_fee::( + quorum_membership.total_nodes(), + ::Base::VERSION + ) + .unwrap() + ], Some(precompute_data.clone()), None, ); @@ -60,6 +63,9 @@ async fn test_transaction_task_leader_two_views_in_a_row() { output.push(HotShotEvent::BlockRecv(exp_packed_bundle)); let transaction_state = - TransactionTaskState::::create_from(&handle).await; + TransactionTaskState::::create_from( + &handle, + ) + .await; run_harness(input, output, transaction_state, false).await; } diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs index a3784da1cb..d196356455 100644 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -15,7 +15,7 @@ use futures::StreamExt; use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, - node_types::{MemoryImpl, TestTypes}, + node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::TestInstanceState, }; use hotshot_macros::test_scripts; @@ -29,10 +29,12 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - constants::BaseVersion, data::{null_block, ViewNumber}, simple_vote::UpgradeProposalData, - traits::{election::Membership, node_implementation::ConsensusTime}, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, Versions}, + }, vote::HasViewNumber, }; use vbs::version::{StaticVersionType, Version}; @@ -47,7 +49,9 @@ async fn test_upgrade_task_vote() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(1).await.0; + let handle = build_system_handle::(1) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -180,7 +184,8 @@ async fn test_upgrade_task_vote() { }, ]; - let consensus_state = ConsensusTaskState::::create_from(&handle).await; + let consensus_state = + ConsensusTaskState::::create_from(&handle).await; let mut consensus_script = TaskScript { timeout: Duration::from_millis(65), state: consensus_state, @@ -204,7 +209,9 @@ async fn test_upgrade_task_propose() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(3).await.0; + let handle = build_system_handle::(3) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -255,8 +262,10 @@ async fn test_upgrade_task_propose() { .iter() .map(|h| views[2].create_upgrade_vote(upgrade_data.clone(), &h.0)); - let consensus_state = ConsensusTaskState::::create_from(&handle).await; - let upgrade_state = UpgradeTaskState::::create_from(&handle).await; + let consensus_state = + ConsensusTaskState::::create_from(&handle).await; + let upgrade_state = + UpgradeTaskState::::create_from(&handle).await; let upgrade_vote_recvs: Vec<_> = upgrade_votes.map(UpgradeVoteRecv).collect(); @@ -279,9 +288,9 @@ async fn test_upgrade_task_propose() { proposals[2].data.block_header.builder_commitment.clone(), TestMetadata, ViewNumber::new(3), - vec1![null_block::builder_fee( + vec1![null_block::builder_fee::( quorum_membership.total_nodes(), - BaseVersion::version() + ::Base::VERSION ) .unwrap()], None, @@ -366,15 +375,20 @@ async fn test_upgrade_task_blank_blocks() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(6).await.0; + let handle = build_system_handle::(6) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); let old_version = Version { major: 0, minor: 1 }; let new_version = Version { major: 0, minor: 2 }; - let builder_fee = - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()).unwrap(); + let builder_fee = null_block::builder_fee::( + quorum_membership.total_nodes(), + ::Base::VERSION, + ) + .unwrap(); let upgrade_data: UpgradeProposalData = UpgradeProposalData { old_version, @@ -452,8 +466,10 @@ async fn test_upgrade_task_blank_blocks() { views.push(view.clone()); } - let consensus_state = ConsensusTaskState::::create_from(&handle).await; - let upgrade_state = UpgradeTaskState::::create_from(&handle).await; + let consensus_state = + ConsensusTaskState::::create_from(&handle).await; + let upgrade_state = + UpgradeTaskState::::create_from(&handle).await; let inputs = vec![ vec![ diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 07099f5182..ce30a6f1fd 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -14,7 +14,7 @@ use futures::StreamExt; use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, - node_types::{MemoryImpl, TestTypes}, + node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; use hotshot_macros::{run_test, test_scripts}; @@ -32,10 +32,13 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - constants::BaseVersion, data::{null_block, Leaf, ViewNumber}, simple_vote::UpgradeProposalData, - traits::{election::Membership, node_implementation::ConsensusTime, ValidatedState}, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, Versions}, + ValidatedState, + }, utils::BuilderCommitment, vote::HasViewNumber, }; @@ -59,7 +62,9 @@ async fn test_upgrade_task_with_proposal() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(3).await.0; + let handle = build_system_handle::(3) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -133,15 +138,19 @@ async fn test_upgrade_task_with_proposal() { let genesis_cert = proposals[0].data.justify_qc.clone(); let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let builder_fee = - null_block::builder_fee(quorum_membership.total_nodes(), BaseVersion::version()).unwrap(); + let builder_fee = null_block::builder_fee::( + quorum_membership.total_nodes(), + ::Base::VERSION, + ) + .unwrap(); let upgrade_votes = other_handles .iter() .map(|h| views[2].create_upgrade_vote(upgrade_data.clone(), &h.0)); let proposal_state = - QuorumProposalTaskState::::create_from(&handle).await; - let upgrade_state = UpgradeTaskState::::create_from(&handle).await; + QuorumProposalTaskState::::create_from(&handle).await; + let upgrade_state = + UpgradeTaskState::::create_from(&handle).await; let upgrade_vote_recvs: Vec<_> = upgrade_votes.map(UpgradeVoteRecv).collect(); diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 9595a0355c..5613f66734 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -14,7 +14,7 @@ use futures::StreamExt; use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, - node_types::{MemoryImpl, TestTypes}, + node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::TestInstanceState, }; use hotshot_macros::{run_test, test_scripts}; @@ -51,7 +51,9 @@ async fn test_upgrade_task_with_vote() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -188,7 +190,8 @@ async fn test_upgrade_task_with_vote() { ), ]; - let vote_state = QuorumVoteTaskState::::create_from(&handle).await; + let vote_state = + QuorumVoteTaskState::::create_from(&handle).await; let mut vote_script = TaskScript { timeout: TIMEOUT, state: vote_state, diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 131cfe48e0..cf42272d58 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -9,7 +9,7 @@ use std::{marker::PhantomData, sync::Arc}; use hotshot::{tasks::task_state::CreateTaskState, types::SignatureKey}; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, - node_types::{MemoryImpl, TestTypes}, + node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; use hotshot_macros::{run_test, test_scripts}; @@ -21,12 +21,11 @@ use hotshot_testing::{ serial, }; use hotshot_types::{ - constants::BaseVersion, data::{null_block, DaProposal, PackedBundle, VidDisperse, ViewNumber}, traits::{ consensus_api::ConsensusApi, election::Membership, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeType, Versions}, BlockPayload, }, }; @@ -43,7 +42,9 @@ async fn test_vid_task() { async_compatibility_layer::logging::setup_backtrace(); // Build the API for node 2. - let handle = build_system_handle::(2).await.0; + let handle = build_system_handle::(2) + .await + .0; let pub_key = handle.public_key(); // quorum membership for VID share distribution @@ -98,9 +99,9 @@ async fn test_vid_task() { encoded_transactions, TestMetadata, ViewNumber::new(2), - vec1::vec1![null_block::builder_fee( + vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(), - BaseVersion::version() + ::Base::VERSION ) .unwrap()], Some(vid_precompute), @@ -117,9 +118,9 @@ async fn test_vid_task() { builder_commitment, TestMetadata, ViewNumber::new(2), - vec1![null_block::builder_fee( + vec1![null_block::builder_fee::( quorum_membership.total_nodes(), - BaseVersion::version() + ::Base::VERSION ) .unwrap()], None, diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index 671d9506a2..eba1c2ebdf 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -5,7 +5,7 @@ // along with the HotShot repository. If not, see . use hotshot::tasks::task_state::CreateTaskState; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_task_impls::{ events::HotShotEvent, harness::run_harness, view_sync::ViewSyncTaskState, }; @@ -23,7 +23,9 @@ async fn test_view_sync_task() { async_compatibility_layer::logging::setup_backtrace(); // Build the API for node 5. - let handle = build_system_handle::(5).await.0; + let handle = build_system_handle::(5) + .await + .0; let vote_data = ViewSyncPreCommitData { relay: 0, diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index b831c256bd..2938ea6ef9 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -5,7 +5,7 @@ use std::time::Duration; use async_broadcast::broadcast; use async_compatibility_layer::art::async_timeout; use futures::StreamExt; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; +use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_task::dependency_task::HandleDepOutput; use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::VoteDependencyHandle}; use hotshot_testing::{ @@ -14,8 +14,10 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - consensus::OuterConsensus, data::ViewNumber, traits::consensus_api::ConsensusApi, - traits::node_implementation::ConsensusTime, vote::HasViewNumber, + consensus::OuterConsensus, + data::ViewNumber, + traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, + vote::HasViewNumber, }; use itertools::Itertools; @@ -33,7 +35,7 @@ async fn test_vote_dependency_handle() { // We use a node ID of 2 here abitrarily. We just need it to build the system handle. let node_id = 2; // Construct the system handle for the node ID to build all of the state objects. - let handle = build_system_handle::(node_id) + let handle = build_system_handle::(node_id) .await .0; let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); @@ -85,19 +87,20 @@ async fn test_vote_dependency_handle() { let (event_sender, mut event_receiver) = broadcast(1024); let view_number = ViewNumber::new(node_id); - let vote_dependency_handle_state = VoteDependencyHandle:: { - public_key: handle.public_key().clone(), - private_key: handle.private_key().clone(), - consensus: OuterConsensus::new(consensus.clone()), - instance_state: handle.hotshot.instance_state(), - quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - storage: Arc::clone(&handle.storage()), - view_number, - sender: event_sender.clone(), - receiver: event_receiver.clone(), - decided_upgrade_certificate: Arc::clone(&handle.hotshot.decided_upgrade_certificate), - id: handle.hotshot.id, - }; + let vote_dependency_handle_state = + VoteDependencyHandle:: { + public_key: handle.public_key().clone(), + private_key: handle.private_key().clone(), + consensus: OuterConsensus::new(consensus.clone()), + instance_state: handle.hotshot.instance_state(), + quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + storage: Arc::clone(&handle.storage()), + view_number, + sender: event_sender.clone(), + receiver: event_receiver.clone(), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), + id: handle.hotshot.id, + }; vote_dependency_handle_state .handle_dep_result(inputs.clone().into_iter().map(|i| i.into()).collect()) diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index f78cc9c580..33f8af1576 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -10,7 +10,7 @@ async fn test_catchup() { use std::time::Duration; - use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -25,7 +25,8 @@ async fn test_catchup() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata: TestDescription = TestDescription::default(); + let mut metadata: TestDescription = + TestDescription::default(); let catchup_node = vec![ChangeNode { idx: 19, updown: UpDown::Up, @@ -69,7 +70,7 @@ async fn test_catchup() { async fn test_catchup_cdn() { use std::time::Duration; - use hotshot_example_types::node_types::{PushCdnImpl, TestTypes}; + use hotshot_example_types::node_types::{PushCdnImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -84,7 +85,8 @@ async fn test_catchup_cdn() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata: TestDescription = TestDescription::default(); + let mut metadata: TestDescription = + TestDescription::default(); let catchup_nodes = vec![ChangeNode { idx: 18, updown: UpDown::Up, @@ -123,7 +125,7 @@ async fn test_catchup_cdn() { async fn test_catchup_one_node() { use std::time::Duration; - use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -137,7 +139,8 @@ async fn test_catchup_one_node() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata: TestDescription = TestDescription::default(); + let mut metadata: TestDescription = + TestDescription::default(); let catchup_nodes = vec![ChangeNode { idx: 18, updown: UpDown::Up, @@ -178,7 +181,7 @@ async fn test_catchup_one_node() { async fn test_catchup_in_view_sync() { use std::time::Duration; - use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -192,7 +195,8 @@ async fn test_catchup_in_view_sync() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata: TestDescription = TestDescription::default(); + let mut metadata: TestDescription = + TestDescription::default(); let catchup_nodes = vec![ ChangeNode { idx: 18, @@ -240,7 +244,7 @@ async fn test_catchup_in_view_sync() { async fn test_catchup_reload() { use std::time::Duration; - use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -255,7 +259,8 @@ async fn test_catchup_reload() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata: TestDescription = TestDescription::default(); + let mut metadata: TestDescription = + TestDescription::default(); let catchup_node = vec![ChangeNode { idx: 19, updown: UpDown::Up, @@ -299,7 +304,7 @@ async fn test_catchup_reload() { async fn test_all_restart() { use std::time::Duration; - use hotshot_example_types::node_types::{CombinedImpl, TestTypes}; + use hotshot_example_types::node_types::{CombinedImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -314,7 +319,8 @@ async fn test_all_restart() { next_view_timeout: 2000, ..Default::default() }; - let mut metadata: TestDescription = TestDescription::default(); + let mut metadata: TestDescription = + TestDescription::default(); let mut catchup_nodes = vec![]; for i in 1..20 { catchup_nodes.push(ChangeNode { diff --git a/testing/tests/tests_2/push_cdn.rs b/testing/tests/tests_2/push_cdn.rs index dbc0d152c1..5b2dc0d5b1 100644 --- a/testing/tests/tests_2/push_cdn.rs +++ b/testing/tests/tests_2/push_cdn.rs @@ -7,7 +7,7 @@ use std::time::Duration; use async_compatibility_layer::logging::shutdown_logging; -use hotshot_example_types::node_types::{PushCdnImpl, TestTypes}; +use hotshot_example_types::node_types::{PushCdnImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -23,7 +23,7 @@ use tracing::instrument; async fn push_cdn_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_2/test_with_failures_one.rs index 030103a37e..5408b57eb5 100644 --- a/testing/tests/tests_2/test_with_failures_one.rs +++ b/testing/tests/tests_2/test_with_failures_one.rs @@ -5,7 +5,7 @@ // along with the HotShot repository. If not, see . use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestVersions}, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -20,6 +20,7 @@ cross_tests!( TestName: test_with_failures_one, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 4fe3ae6d49..03a9abe2d1 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -19,12 +19,13 @@ use hotshot::{ use hotshot_example_types::{ auction_results_provider_types::{TestAuctionResult, TestAuctionResultsProvider}, block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + node_types::TestVersions, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; use hotshot_types::{ data::ViewNumber, - message::{DataMessage, Message, MessageKind, VersionedMessage}, + message::{DataMessage, Message, MessageKind, UpgradeLock}, signature_key::{BLSPubKey, BuilderKey}, traits::{ network::{BroadcastDelay, ConnectedNetwork, TestableNetworkingImplementation, Topic}, @@ -34,7 +35,6 @@ use hotshot_types::{ use rand::{rngs::StdRng, RngCore, SeedableRng}; use serde::{Deserialize, Serialize}; use tracing::{instrument, trace}; -use vbs::version::StaticVersion; #[derive( Copy, @@ -53,12 +53,6 @@ pub struct Test; impl NodeType for Test { type AuctionResult = TestAuctionResult; - type Base = StaticVersion<0, 1>; - type Upgrade = StaticVersion<0, 2>; - const UPGRADE_HASH: [u8; 32] = [ - 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, - 0, 0, - ]; type Time = ViewNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; @@ -167,10 +161,12 @@ async fn memory_network_direct_queue() { let first_messages: Vec> = gen_messages(5, 100, pub_key_1); + let upgrade_lock = UpgradeLock::::new(); + // Test 1 -> 2 // Send messages for sent_message in first_messages { - let serialized_message = VersionedMessage::serialize(&sent_message, &None).unwrap(); + let serialized_message = upgrade_lock.serialize(&sent_message).await.unwrap(); network1 .direct_message(serialized_message.clone(), pub_key_2) .await @@ -180,7 +176,7 @@ async fn memory_network_direct_queue() { .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); - let deserialized_message = VersionedMessage::deserialize(&recv_message, &None).unwrap(); + let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); assert!(recv_messages.is_empty()); fake_message_eq(sent_message, deserialized_message); } @@ -190,7 +186,7 @@ async fn memory_network_direct_queue() { // Test 2 -> 1 // Send messages for sent_message in second_messages { - let serialized_message = VersionedMessage::serialize(&sent_message, &None).unwrap(); + let serialized_message = upgrade_lock.serialize(&sent_message).await.unwrap(); network2 .direct_message(serialized_message.clone(), pub_key_1) .await @@ -200,7 +196,7 @@ async fn memory_network_direct_queue() { .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); - let deserialized_message = VersionedMessage::deserialize(&recv_message, &None).unwrap(); + let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); assert!(recv_messages.is_empty()); fake_message_eq(sent_message, deserialized_message); } @@ -220,10 +216,12 @@ async fn memory_network_broadcast_queue() { let first_messages: Vec> = gen_messages(5, 100, pub_key_1); + let upgrade_lock = UpgradeLock::::new(); + // Test 1 -> 2 // Send messages for sent_message in first_messages { - let serialized_message = VersionedMessage::serialize(&sent_message, &None).unwrap(); + let serialized_message = upgrade_lock.serialize(&sent_message).await.unwrap(); network1 .broadcast_message(serialized_message.clone(), Topic::Da, BroadcastDelay::None) .await @@ -233,7 +231,7 @@ async fn memory_network_broadcast_queue() { .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); - let deserialized_message = VersionedMessage::deserialize(&recv_message, &None).unwrap(); + let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); assert!(recv_messages.is_empty()); fake_message_eq(sent_message, deserialized_message); } @@ -243,7 +241,7 @@ async fn memory_network_broadcast_queue() { // Test 2 -> 1 // Send messages for sent_message in second_messages { - let serialized_message = VersionedMessage::serialize(&sent_message, &None).unwrap(); + let serialized_message = upgrade_lock.serialize(&sent_message).await.unwrap(); network2 .broadcast_message( serialized_message.clone(), @@ -257,7 +255,7 @@ async fn memory_network_broadcast_queue() { .await .expect("Failed to receive message"); let recv_message = recv_messages.pop().unwrap(); - let deserialized_message = VersionedMessage::deserialize(&recv_message, &None).unwrap(); + let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); assert!(recv_messages.is_empty()); fake_message_eq(sent_message, deserialized_message); } @@ -289,8 +287,10 @@ async fn memory_network_test_in_flight_message_count() { Some(0) ); + let upgrade_lock = UpgradeLock::::new(); + for (count, message) in messages.iter().enumerate() { - let serialized_message = VersionedMessage::serialize(message, &None).unwrap(); + let serialized_message = upgrade_lock.serialize(message).await.unwrap(); network1 .direct_message(serialized_message.clone(), pub_key_2) diff --git a/testing/tests/tests_3/test_with_builder_failures.rs b/testing/tests/tests_3/test_with_builder_failures.rs index 4f01d92d94..1b2d3ac13c 100644 --- a/testing/tests/tests_3/test_with_builder_failures.rs +++ b/testing/tests/tests_3/test_with_builder_failures.rs @@ -6,7 +6,7 @@ use std::time::Duration; -use hotshot_example_types::node_types::{MemoryImpl, PushCdnImpl, TestTypes}; +use hotshot_example_types::node_types::{MemoryImpl, PushCdnImpl, TestTypes, TestVersions}; use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, @@ -19,6 +19,7 @@ cross_tests!( TestName: test_with_builder_failures, Impls: [MemoryImpl, PushCdnImpl], Types: [TestTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let mut metadata = TestDescription::default_multiple_rounds(); diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index 76f37bde10..e4d7f58d68 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -5,7 +5,7 @@ // along with the HotShot repository. If not, see . use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestVersions}, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -19,6 +19,7 @@ cross_tests!( TestName: test_with_failures_half_f, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 778062fc66..c36b043294 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -5,7 +5,7 @@ // along with the HotShot repository. If not, see . use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestVersions}, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -19,6 +19,7 @@ cross_tests!( TestName: test_with_failures_f, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], Types: [TestTypes], + Versions: [TestVersions], Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index 45e36e895f..bc35b9ad3c 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -6,7 +6,7 @@ use std::time::Duration; -use hotshot_example_types::node_types::{CombinedImpl, TestTypes}; +use hotshot_example_types::node_types::{CombinedImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -27,7 +27,7 @@ async fn test_combined_network() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -62,7 +62,7 @@ async fn test_combined_network() { async fn test_combined_network_cdn_crash() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -109,7 +109,7 @@ async fn test_combined_network_cdn_crash() { async fn test_combined_network_reup() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -162,7 +162,7 @@ async fn test_combined_network_reup() { async fn test_combined_network_half_dc() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { round_start_delay: 25, next_view_timeout: 10_000, @@ -238,7 +238,7 @@ fn generate_random_node_changes( async fn test_stress_combined_network_fuzzy() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { num_bootstrap_nodes: 10, num_nodes_with_stake: 20, start_nodes: 20, diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index b7b31c9e7c..2a9cd4e73a 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -12,7 +12,7 @@ async fn test_timeout() { use std::time::Duration; - use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -27,7 +27,7 @@ async fn test_timeout() { ..Default::default() }; - let mut metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { num_nodes_with_stake: 10, start_nodes: 10, ..Default::default() @@ -72,7 +72,7 @@ async fn test_timeout() { async fn test_timeout_libp2p() { use std::time::Duration; - use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; + use hotshot_example_types::node_types::{Libp2pImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -90,7 +90,7 @@ async fn test_timeout_libp2p() { ..Default::default() }; - let mut metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { num_nodes_with_stake: 10, start_nodes: 10, num_bootstrap_nodes: 10, diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index f3d4c18d52..1525ca55e9 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -6,7 +6,7 @@ use std::time::{Duration, Instant}; -use hotshot_example_types::node_types::{Libp2pImpl, TestTypes}; +use hotshot_example_types::node_types::{Libp2pImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, @@ -24,7 +24,7 @@ use tracing::instrument; async fn libp2p_network_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -62,7 +62,7 @@ async fn test_memory_network_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { @@ -89,7 +89,7 @@ async fn test_memory_network_sync() { async fn libp2p_network_async() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, num_failed_views: 50, @@ -103,7 +103,8 @@ async fn libp2p_network_async() { timing_data: TimingData { timeout_ratio: (1, 1), next_view_timeout: 25000, - ..TestDescription::::default_multiple_rounds().timing_data + ..TestDescription::::default_multiple_rounds() + .timing_data }, unreliable_network: Some(Box::new(AsynchronousNetwork { keep_numerator: 9, @@ -136,7 +137,7 @@ async fn test_memory_network_async() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, num_failed_views: 5000, @@ -151,7 +152,8 @@ async fn test_memory_network_async() { timing_data: TimingData { timeout_ratio: (1, 1), next_view_timeout: 1000, - ..TestDescription::::default_multiple_rounds().timing_data + ..TestDescription::::default_multiple_rounds() + .timing_data }, unreliable_network: Some(Box::new(AsynchronousNetwork { keep_numerator: 95, @@ -182,7 +184,7 @@ async fn test_memory_network_partially_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 0, ..Default::default() @@ -226,7 +228,7 @@ async fn test_memory_network_partially_sync() { async fn libp2p_network_partially_sync() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 0, ..Default::default() @@ -275,7 +277,7 @@ async fn test_memory_network_chaos() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { @@ -306,7 +308,7 @@ async fn test_memory_network_chaos() { async fn libp2p_network_chaos() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let metadata: TestDescription = TestDescription { + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() diff --git a/types/src/constants.rs b/types/src/constants.rs index 2d497c0674..51db4cd41e 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -8,8 +8,6 @@ use std::time::Duration; -use vbs::version::StaticVersion; - /// timeout for fetching auction results from the solver pub const AUCTION_RESULTS_FETCH_TIMEOUT: Duration = Duration::from_millis(500); @@ -43,27 +41,6 @@ pub const EVENT_CHANNEL_SIZE: usize = 100_000; /// Default channel size for HotShot -> application communication pub const EXTERNAL_EVENT_CHANNEL_SIZE: usize = 100_000; -/// Constants for `WebServerNetwork` and `WebServer` -/// The Web CDN is not, strictly speaking, bound to the network; it can have its own versioning. -/// Web Server CDN Version (major) -pub const WEB_SERVER_MAJOR_VERSION: u16 = 0; -/// Web Server CDN Version (minor) -pub const WEB_SERVER_MINOR_VERSION: u16 = 1; - -/// Type for Web Server CDN Version -pub type WebServerVersion = StaticVersion; - -/// Constant for Web Server CDN Version -pub const WEB_SERVER_VERSION: WebServerVersion = StaticVersion {}; - -/// Type for semver representation of "Base" version -pub type BaseVersion = StaticVersion<0, 1>; - -/// Type for semver representation of "Marketplace" version -pub type MarketplaceVersion = StaticVersion<0, 3>; -/// Constant for semver representation of "Marketplace" version -pub const MARKETPLACE_VERSION: MarketplaceVersion = StaticVersion {}; - /// The offset for how far in the future we will send out a `QuorumProposal` with an `UpgradeCertificate` we form. This is also how far in advance of sending a `QuorumProposal` we begin collecting votes on an `UpgradeProposal`. pub const UPGRADE_PROPOSE_OFFSET: u64 = 5; diff --git a/types/src/data.rs b/types/src/data.rs index 9c61f6b9b6..15eb3599e6 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -802,8 +802,10 @@ pub mod null_block { use crate::{ traits::{ - block_contents::BuilderFee, node_implementation::NodeType, - signature_key::BuilderSignatureKey, BlockPayload, + block_contents::BuilderFee, + node_implementation::{NodeType, Versions}, + signature_key::BuilderSignatureKey, + BlockPayload, }, vid::{vid_scheme, VidCommitment}, }; @@ -827,7 +829,7 @@ pub mod null_block { /// Builder fee data for a null block payload #[must_use] - pub fn builder_fee( + pub fn builder_fee( num_storage_nodes: usize, version: vbs::version::Version, ) -> Option> { @@ -839,7 +841,7 @@ pub mod null_block { [0_u8; 32], 0, ); - if version >= crate::constants::MarketplaceVersion::version() { + if version >= V::Marketplace::VERSION { match TYPES::BuilderSignatureKey::sign_sequencing_fee_marketplace(&priv_key, FEE_AMOUNT) { Ok(sig) => Some(BuilderFee { diff --git a/types/src/message.rs b/types/src/message.rs index ccbd53347f..dcd653e7b2 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -9,9 +9,10 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. -use std::{fmt, fmt::Debug, marker::PhantomData}; +use std::{fmt, fmt::Debug, marker::PhantomData, sync::Arc}; use anyhow::{bail, ensure, Context, Result}; +use async_lock::RwLock; use cdn_proto::util::mnemonic; use committable::Committable; use derivative::Derivative; @@ -24,7 +25,7 @@ use vbs::{ use crate::{ data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, simple_certificate::{ - version, DaCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, + DaCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ @@ -34,7 +35,7 @@ use crate::{ traits::{ election::Membership, network::{DataRequest, ResponseMessage, ViewMessage}, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, vote::HasViewNumber, @@ -51,74 +52,6 @@ pub struct Message { pub kind: MessageKind, } -/// Trait for messages that have a versioned serialization. -pub trait VersionedMessage<'a, TYPES> -where - TYPES: NodeType, - Self: Serialize + Deserialize<'a> + HasViewNumber + Sized, -{ - /// Serialize a message with a version number, using `message.view_number()` and an optional decided upgrade certificate to determine the message's version. - /// - /// # Errors - /// - /// Errors if serialization fails. - fn serialize( - &self, - upgrade_certificate: &Option>, - ) -> Result> { - let view = self.view_number(); - - let version = version(view, upgrade_certificate)?; - - let serialized_message = match version { - // Associated constants cannot be used in pattern matches, so we do this trick instead. - v if v == TYPES::Base::VERSION => Serializer::::serialize(&self), - v if v == TYPES::Upgrade::VERSION => Serializer::::serialize(&self), - _ => { - bail!("Attempted to serialize with an incompatible version. This should be impossible."); - } - }; - - serialized_message.context("Failed to serialize message!") - } - - /// Deserialize a message with a version number, using `message.view_number()` and an optional decided upgrade certificate to determine the message's version. This function will fail on improperly versioned messages. - /// - /// # Errors - /// - /// Errors if deserialization fails. - fn deserialize( - message: &'a [u8], - upgrade_certificate: &Option>, - ) -> Result { - let actual_version = Version::deserialize(message) - .context("Failed to read message version!")? - .0; - - let deserialized_message: Self = match actual_version { - v if v == TYPES::Base::VERSION => Serializer::::deserialize(message), - v if v == TYPES::Upgrade::VERSION => Serializer::::deserialize(message), - _ => { - bail!("Cannot deserialize message!"); - } - } - .context("Failed to deserialize message!")?; - - let view = deserialized_message.view_number(); - - let expected_version = version(view, upgrade_certificate)?; - - ensure!( - actual_version == expected_version, - "Message has invalid version number for its view. Expected: {expected_version}, Actual: {actual_version}, View: {view:?}" - ); - - Ok(deserialized_message) - } -} - -impl<'a, TYPES> VersionedMessage<'a, TYPES> for Message where TYPES: NodeType {} - impl fmt::Debug for Message { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Message") @@ -420,3 +353,108 @@ where Ok(()) } } + +#[derive(Clone)] +/// A lock for an upgrade certificate decided by HotShot, which doubles as `PhantomData` for an instance of the `Versions` trait. +pub struct UpgradeLock { + /// a shared lock to an upgrade certificate decided by consensus + pub decided_upgrade_certificate: Arc>>>, + + /// phantom data for the `Versions` trait + pub _pd: PhantomData, +} + +impl UpgradeLock { + #[allow(clippy::new_without_default)] + /// Create a new `UpgradeLock` for a fresh instance of HotShot + pub fn new() -> Self { + Self { + decided_upgrade_certificate: Arc::new(RwLock::new(None)), + _pd: PhantomData::, + } + } + + /// Calculate the version applied in a view, based on the provided upgrade lock. + /// + /// # Errors + /// Returns an error if we do not support the version required by the decided upgrade certificate. + pub async fn version(&self, view: TYPES::Time) -> Result { + let upgrade_certificate = self.decided_upgrade_certificate.read().await; + + let version = match *upgrade_certificate { + Some(ref cert) => { + if view >= cert.data.new_version_first_view { + if cert.data.new_version == V::Upgrade::VERSION { + V::Upgrade::VERSION + } else { + bail!("The network has upgraded to a new version that we do not support!"); + } + } else { + V::Base::VERSION + } + } + None => V::Base::VERSION, + }; + + Ok(version) + } + + /// Serialize a message with a version number, using `message.view_number()` and an optional decided upgrade certificate to determine the message's version. + /// + /// # Errors + /// + /// Errors if serialization fails. + pub async fn serialize + Serialize>( + &self, + message: &M, + ) -> Result> { + let view = message.view_number(); + + let version = self.version(view).await?; + + let serialized_message = match version { + // Associated constants cannot be used in pattern matches, so we do this trick instead. + v if v == V::Base::VERSION => Serializer::::serialize(&message), + v if v == V::Upgrade::VERSION => Serializer::::serialize(&message), + _ => { + bail!("Attempted to serialize with an incompatible version. This should be impossible."); + } + }; + + serialized_message.context("Failed to serialize message!") + } + + /// Deserialize a message with a version number, using `message.view_number()` to determine the message's version. This function will fail on improperly versioned messages. + /// + /// # Errors + /// + /// Errors if deserialization fails. + pub async fn deserialize + for<'a> Deserialize<'a>>( + &self, + message: &[u8], + ) -> Result { + let actual_version = Version::deserialize(message) + .context("Failed to read message version!")? + .0; + + let deserialized_message: M = match actual_version { + v if v == V::Base::VERSION => Serializer::::deserialize(message), + v if v == V::Upgrade::VERSION => Serializer::::deserialize(message), + _ => { + bail!("Cannot deserialize message!"); + } + } + .context("Failed to deserialize message!")?; + + let view = deserialized_message.view_number(); + + let expected_version = self.version(view).await?; + + ensure!( + actual_version == expected_version, + "Message has invalid version number for its view. Expected: {expected_version}, Actual: {actual_version}, View: {view:?}" + ); + + Ok(deserialized_message) + } +} diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 637a32d1e7..4069cb2d98 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -13,12 +13,11 @@ use std::{ sync::Arc, }; -use anyhow::{bail, ensure, Result}; +use anyhow::{ensure, Result}; use async_lock::RwLock; use committable::{Commitment, Committable}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; -use vbs::version::{StaticVersionType, Version}; use crate::{ data::serialize_signature2, @@ -247,29 +246,3 @@ pub type ViewSyncFinalizeCertificate2 = /// Type alias for a `UpgradeCertificate`, which is a `SimpleCertificate` of `UpgradeProposalData` pub type UpgradeCertificate = SimpleCertificate, UpgradeThreshold>; - -/// Calculate the version applied in a view, based on the provided upgrade certificate. -/// -/// # Errors -/// Returns an error if we do not support the version required by the upgrade certificate. -pub fn version( - view: TYPES::Time, - upgrade_certificate: &Option>, -) -> Result { - let version = match upgrade_certificate { - Some(ref cert) => { - if view >= cert.data.new_version_first_view { - if cert.data.new_version == TYPES::Upgrade::VERSION { - TYPES::Upgrade::VERSION - } else { - bail!("The network has upgraded to a new version that we do not support!"); - } - } else { - TYPES::Base::VERSION - } - } - None => TYPES::Base::VERSION, - }; - - Ok(version) -} diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 8d9a6a4ffd..1bb9bcb3fc 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -207,15 +207,6 @@ pub trait NodeType: + Sync + 'static { - /// The base version of HotShot this node is instantiated with. - type Base: StaticVersionType; - - /// The version of HotShot this node may be upgraded to. Set equal to `Base` to disable upgrades. - type Upgrade: StaticVersionType; - - /// The hash for the upgrade. - const UPGRADE_HASH: [u8; 32]; - /// The time type that this hotshot setup is using. /// /// This should be the same `Time` that `ValidatedState::Time` is using. @@ -261,3 +252,18 @@ pub trait NodeType: /// The type builder uses to sign its messages type BuilderSignatureKey: BuilderSignatureKey; } + +/// Version information for HotShot +pub trait Versions: Clone + Copy + Debug + Send + Sync + 'static { + /// The base version of HotShot this node is instantiated with. + type Base: StaticVersionType; + + /// The version of HotShot this node may be upgraded to. Set equal to `Base` to disable upgrades. + type Upgrade: StaticVersionType; + + /// The hash for the upgrade. + const UPGRADE_HASH: [u8; 32]; + + /// The version at which to switch over to marketplace logic + type Marketplace: StaticVersionType; +} From cf5a1c3432d8a2ea54183806eddd9c032e121d5b Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 13 Aug 2024 11:02:28 -0400 Subject: [PATCH 1180/1393] [Libp2p] Cleanup for PushCDN Request Response Implementation (#3566) * wip first pass * autofix * change pushcdn to use signature key directly instead of TYPES * complete refactor of type namespace --- example-types/src/node_types.rs | 2 +- examples/combined/all.rs | 44 ++++---- examples/infra/mod.rs | 25 ++--- examples/push-cdn/all.rs | 44 ++++---- examples/push-cdn/broker.rs | 4 +- examples/push-cdn/marshal.rs | 4 +- examples/push-cdn/types.rs | 3 +- hotshot/src/tasks/mod.rs | 3 +- .../src/traits/networking/combined_network.rs | 15 +-- .../src/traits/networking/libp2p_network.rs | 35 +++--- .../src/traits/networking/push_cdn_network.rs | 103 ++++++++++-------- .../network/behaviours/request_response.rs | 14 +-- libp2p-networking/src/network/def.rs | 6 +- libp2p-networking/src/network/mod.rs | 6 +- libp2p-networking/src/network/node.rs | 6 +- libp2p-networking/src/network/node/handle.rs | 6 +- task-impls/src/response.rs | 11 +- types/Cargo.toml | 2 + types/src/lib.rs | 1 + types/src/request_response.rs | 41 +++++++ types/src/traits/network.rs | 34 +----- 21 files changed, 225 insertions(+), 184 deletions(-) create mode 100644 types/src/request_response.rs diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index a3a657acaf..90e1394326 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -110,7 +110,7 @@ pub struct CombinedImpl; pub type StaticMembership = StaticCommittee; impl NodeImplementation for PushCdnImpl { - type Network = PushCdnNetwork; + type Network = PushCdnNetwork; type Storage = TestStorage; type AuctionResultsProvider = TestAuctionResultsProvider; } diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 70363db3ec..4ca1e46f3f 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -72,27 +72,28 @@ async fn main() { let private_address = format!("127.0.0.1:{private_port}"); let public_address = format!("127.0.0.1:{public_port}"); - let config: cdn_broker::Config> = cdn_broker::Config { - discovery_endpoint: discovery_endpoint.clone(), - public_advertise_endpoint: public_address.clone(), - public_bind_endpoint: public_address, - private_advertise_endpoint: private_address.clone(), - private_bind_endpoint: private_address, - - keypair: KeyPair { - public_key: WrappedSignatureKey(broker_public_key), - private_key: broker_private_key.clone(), - }, - - metrics_bind_endpoint: None, - ca_cert_path: None, - ca_key_path: None, - global_memory_pool_size: Some(1024 * 1024 * 1024), - }; + let config: cdn_broker::Config::SignatureKey>> = + cdn_broker::Config { + discovery_endpoint: discovery_endpoint.clone(), + public_advertise_endpoint: public_address.clone(), + public_bind_endpoint: public_address, + private_advertise_endpoint: private_address.clone(), + private_bind_endpoint: private_address, + + keypair: KeyPair { + public_key: WrappedSignatureKey(broker_public_key), + private_key: broker_private_key.clone(), + }, + + metrics_bind_endpoint: None, + ca_cert_path: None, + ca_key_path: None, + global_memory_pool_size: Some(1024 * 1024 * 1024), + }; // Create and spawn the broker async_spawn(async move { - let broker: Broker> = + let broker: Broker::SignatureKey>> = Broker::new(config).await.expect("broker failed to start"); // Error if we stopped unexpectedly @@ -120,9 +121,10 @@ async fn main() { // Spawn the marshal async_spawn(async move { - let marshal: Marshal> = Marshal::new(marshal_config) - .await - .expect("failed to spawn marshal"); + let marshal: Marshal::SignatureKey>> = + Marshal::new(marshal_config) + .await + .expect("failed to spawn marshal"); // Error if we stopped unexpectedly if let Err(err) = marshal.start().await { diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 9ca8cc127a..8fe91fa855 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -603,7 +603,7 @@ pub struct PushCdnDaRun { /// The underlying configuration config: NetworkConfig, /// The underlying network - network: PushCdnNetwork, + network: PushCdnNetwork, } #[async_trait] @@ -616,12 +616,12 @@ impl< >, NODE: NodeImplementation< TYPES, - Network = PushCdnNetwork, + Network = PushCdnNetwork, Storage = TestStorage, AuctionResultsProvider = TestAuctionResultsProvider, >, V: Versions, - > RunDa, NODE, V> for PushCdnDaRun + > RunDa, NODE, V> for PushCdnDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -665,7 +665,7 @@ where PushCdnDaRun { config, network } } - fn network(&self) -> PushCdnNetwork { + fn network(&self) -> PushCdnNetwork { self.network.clone() } @@ -808,15 +808,14 @@ where .await; // Initialize our CDN network - let cdn_network: PushCdnDaRun = as RunDa< - TYPES, - PushCdnNetwork, - PushCdnImpl, - V, - >>::initialize_networking( - config.clone(), libp2p_advertise_address - ) - .await; + let cdn_network: PushCdnDaRun = + as RunDa< + TYPES, + PushCdnNetwork, + PushCdnImpl, + V, + >>::initialize_networking(config.clone(), libp2p_advertise_address) + .await; // Create our combined network config let delay_duration = config diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 6d97d34d9f..3d3de5d42e 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -78,27 +78,28 @@ async fn main() { let private_address = format!("127.0.0.1:{private_port}"); let public_address = format!("127.0.0.1:{public_port}"); - let config: cdn_broker::Config> = cdn_broker::Config { - discovery_endpoint: discovery_endpoint.clone(), - public_advertise_endpoint: public_address.clone(), - public_bind_endpoint: public_address, - private_advertise_endpoint: private_address.clone(), - private_bind_endpoint: private_address, - - keypair: KeyPair { - public_key: WrappedSignatureKey(broker_public_key), - private_key: broker_private_key.clone(), - }, - - metrics_bind_endpoint: None, - ca_cert_path: None, - ca_key_path: None, - global_memory_pool_size: Some(1024 * 1024 * 1024), - }; + let config: cdn_broker::Config::SignatureKey>> = + cdn_broker::Config { + discovery_endpoint: discovery_endpoint.clone(), + public_advertise_endpoint: public_address.clone(), + public_bind_endpoint: public_address, + private_advertise_endpoint: private_address.clone(), + private_bind_endpoint: private_address, + + keypair: KeyPair { + public_key: WrappedSignatureKey(broker_public_key), + private_key: broker_private_key.clone(), + }, + + metrics_bind_endpoint: None, + ca_cert_path: None, + ca_key_path: None, + global_memory_pool_size: Some(1024 * 1024 * 1024), + }; // Create and spawn the broker async_spawn(async move { - let broker: Broker> = + let broker: Broker::SignatureKey>> = Broker::new(config).await.expect("broker failed to start"); // Error if we stopped unexpectedly @@ -124,9 +125,10 @@ async fn main() { // Spawn the marshal async_spawn(async move { - let marshal: Marshal> = Marshal::new(marshal_config) - .await - .expect("failed to spawn marshal"); + let marshal: Marshal::SignatureKey>> = + Marshal::new(marshal_config) + .await + .expect("failed to spawn marshal"); // Error if we stopped unexpectedly if let Err(err) = marshal.start().await { diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index 00232e771d..7eabbec50f 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -33,7 +33,7 @@ struct Args { #[arg(long, default_value = "local_ip:1738")] public_advertise_endpoint: String, - /// The broker-facing endpoint in `IP:port` form to bind to for connections from + /// The broker-facing endpoint in `IP:port` form to bind to for connections from /// other brokers #[arg(long, default_value = "0.0.0.0:1739")] private_bind_endpoint: String, @@ -92,7 +92,7 @@ async fn main() -> Result<()> { ::SignatureKey::generated_from_seed_indexed(key_hash.into(), 1337); // Create config - let broker_config: Config> = Config { + let broker_config: Config::SignatureKey>> = Config { ca_cert_path: args.ca_cert_path, ca_key_path: args.ca_key_path, diff --git a/examples/push-cdn/marshal.rs b/examples/push-cdn/marshal.rs index fde57cd28d..39d2267bd8 100644 --- a/examples/push-cdn/marshal.rs +++ b/examples/push-cdn/marshal.rs @@ -12,6 +12,7 @@ use cdn_marshal::{Config, Marshal}; use clap::Parser; use hotshot::traits::implementations::ProductionDef; use hotshot_example_types::node_types::TestTypes; +use hotshot_types::traits::node_implementation::NodeType; use tracing_subscriber::EnvFilter; // TODO: forall, add logging where we need it @@ -80,7 +81,8 @@ async fn main() -> Result<()> { }; // Create new `Marshal` from the config - let marshal = Marshal::>::new(config).await?; + let marshal = + Marshal::::SignatureKey>>::new(config).await?; // Start the main loop, consuming it marshal.start().await?; diff --git a/examples/push-cdn/types.rs b/examples/push-cdn/types.rs index 963d51bd6e..8803c72152 100644 --- a/examples/push-cdn/types.rs +++ b/examples/push-cdn/types.rs @@ -9,6 +9,7 @@ use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, state_types::TestTypes, storage_types::TestStorage, }; +use hotshot_types::traits::node_implementation::NodeType; use serde::{Deserialize, Serialize}; use crate::infra::PushCdnDaRun; @@ -18,7 +19,7 @@ use crate::infra::PushCdnDaRun; pub struct NodeImpl {} /// Convenience type alias -pub type Network = PushCdnNetwork; +pub type Network = PushCdnNetwork<::SignatureKey>; impl NodeImplementation for NodeImpl { type Network = Network; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index d01facfadd..ea5710dd23 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -27,7 +27,7 @@ use hotshot_task_impls::{ network, network::{NetworkEventTaskState, NetworkMessageTaskState}, request::NetworkRequestState, - response::{run_response_task, NetworkResponseState, RequestReceiver}, + response::{run_response_task, NetworkResponseState}, transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, @@ -37,6 +37,7 @@ use hotshot_types::{ constants::EVENT_CHANNEL_SIZE, data::QuorumProposal, message::{Messages, Proposal}, + request_response::RequestReceiver, traits::{ network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 74d858e912..8403964d60 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -37,8 +37,9 @@ use hotshot_types::{ COMBINED_NETWORK_MIN_PRIMARY_FAILURES, COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, }, data::ViewNumber, + request_response::NetworkMsgResponseChannel, traits::{ - network::{BroadcastDelay, ConnectedNetwork, ResponseChannel, Topic}, + network::{BroadcastDelay, ConnectedNetwork, Topic}, node_implementation::NodeType, }, BoxSyncFuture, @@ -93,7 +94,7 @@ impl CombinedNetworks { /// Panics if `COMBINED_NETWORK_CACHE_SIZE` is 0 #[must_use] pub fn new( - primary_network: PushCdnNetwork, + primary_network: PushCdnNetwork, secondary_network: Libp2pNetwork, delay_duration: Option, ) -> Self { @@ -120,7 +121,7 @@ impl CombinedNetworks { /// Get a ref to the primary network #[must_use] - pub fn primary(&self) -> &PushCdnNetwork { + pub fn primary(&self) -> &PushCdnNetwork { &self.networks.0 } @@ -249,7 +250,7 @@ impl CombinedNetworks { /// on the tuple #[derive(Clone)] pub struct UnderlyingCombinedNetworks( - pub PushCdnNetwork, + pub PushCdnNetwork, pub Libp2pNetwork, ); @@ -265,7 +266,7 @@ impl TestableNetworkingImplementation for CombinedNetwor secondary_network_delay: Duration, ) -> AsyncGenerator> { let generators = ( - as TestableNetworkingImplementation>::generator( + as TestableNetworkingImplementation>::generator( expected_node_count, num_bootstrap, network_id, @@ -291,7 +292,7 @@ impl TestableNetworkingImplementation for CombinedNetwor Box::pin(async move { // Generate the CDN network let cdn = gen0.await; - let cdn = Arc::>::into_inner(cdn).unwrap(); + let cdn = Arc::>::into_inner(cdn).unwrap(); // Generate the p2p network let p2p = gen1.await; @@ -345,7 +346,7 @@ impl ConnectedNetwork for CombinedNetworks async fn spawn_request_receiver_task( &self, - ) -> Option, ResponseChannel>)>> { + ) -> Option, NetworkMsgResponseChannel>)>> { self.secondary().spawn_request_receiver_task().await } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index a6103213a2..1aaada8428 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -34,36 +34,37 @@ use async_lock::{Mutex, RwLock}; use async_trait::async_trait; use bimap::BiHashMap; use futures::{ - channel::mpsc::{self, channel, Receiver, Sender}, + channel::mpsc::{self, channel, Sender}, future::{join_all, Either}, FutureExt, StreamExt, }; use hotshot_orchestrator::config::NetworkConfig; -#[cfg(feature = "hotshot-testing")] -use hotshot_types::traits::network::{ - AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, -}; use hotshot_types::{ boxed_sync, constants::LOOK_AHEAD, data::ViewNumber, message::{DataMessage::DataResponse, Message, MessageKind}, + request_response::{NetworkMsgResponseChannel, Request, Response}, traits::{ election::Membership, metrics::{Counter, Gauge, Metrics, NoMetrics}, - network::{self, ConnectedNetwork, NetworkError, ResponseMessage, Topic}, + network::{ConnectedNetwork, NetworkError, ResponseMessage, Topic}, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, BoxSyncFuture, }; +#[cfg(feature = "hotshot-testing")] +use hotshot_types::{ + request_response::TakeReceiver, + traits::network::{AsyncGenerator, NetworkReliability, TestableNetworkingImplementation}, +}; use libp2p_identity::{ ed25519::{self, SecretKey}, Keypair, PeerId, }; use libp2p_networking::{ network::{ - behaviours::request_response::{Request, Response}, spawn_network_node, transport::construct_auth_message, MeshParams, @@ -140,9 +141,6 @@ impl Debug for Libp2pNetwork { } } -/// Locked Option of a receiver for moving the value out of the option -type TakeReceiver = Mutex, ResponseChannel)>>>; - /// Type alias for a shared collection of peerid, multiaddrs pub type PeerInfoVec = Arc>>; @@ -879,7 +877,7 @@ impl ConnectedNetwork for Libp2pNetwork { async fn spawn_request_receiver_task( &self, - ) -> Option, network::ResponseChannel>)>> { + ) -> Option, NetworkMsgResponseChannel>)>> { let mut internal_rx = self.inner.requests_rx.lock().await.take()?; let handle = Arc::clone(&self.inner.handle); let (mut tx, rx) = mpsc::channel(100); @@ -889,7 +887,7 @@ impl ConnectedNetwork for Libp2pNetwork { if tx .try_send(( request, - network::ResponseChannel { + NetworkMsgResponseChannel { sender: response_tx, }, )) @@ -1125,7 +1123,18 @@ impl ConnectedNetwork for Libp2pNetwork { .try_send(Some((view_number, pk))) } - /// handles view update + /// The libp2p view update is a special operation intrinsic to its internal behavior. + /// + /// Libp2p needs to do a lookup because a libp2p address is not releated to + /// hotshot keys. So in libp2p we store a mapping of HotShot key to libp2p address + /// in a distributed hash table. + /// + /// This means to directly message someone on libp2p we need to lookup in the hash + /// table what their libp2p address is, using their HotShot public key as the key. + /// + /// So the logic with libp2p is to prefetch upcomming leaders libp2p address to + /// save time when we later need to direct message the leader our vote. Hence the + /// use of the future view and leader to queue the lookups. async fn update_view<'a, TYPES>(&'a self, view: u64, membership: &TYPES::Membership) where TYPES: NodeType + 'a, diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 3a1c68db4f..36a3b5138f 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -125,27 +125,27 @@ impl Serializable for WrappedSignatureKey { /// The production run definition for the Push CDN. /// Uses the real protocols and a Redis discovery client. -pub struct ProductionDef(PhantomData); -impl RunDef for ProductionDef { - type User = UserDef; - type Broker = BrokerDef; +pub struct ProductionDef(PhantomData); +impl RunDef for ProductionDef { + type User = UserDef; + type Broker = BrokerDef; type DiscoveryClientType = Redis; type Topic = Topic; } /// The user definition for the Push CDN. /// Uses the Quic protocol and untrusted middleware. -pub struct UserDef(PhantomData); -impl ConnectionDef for UserDef { - type Scheme = WrappedSignatureKey; +pub struct UserDef(PhantomData); +impl ConnectionDef for UserDef { + type Scheme = WrappedSignatureKey; type Protocol = Quic; } /// The broker definition for the Push CDN. /// Uses the TCP protocol and trusted middleware. -pub struct BrokerDef(PhantomData); -impl ConnectionDef for BrokerDef { - type Scheme = WrappedSignatureKey; +pub struct BrokerDef(PhantomData); +impl ConnectionDef for BrokerDef { + type Scheme = WrappedSignatureKey; type Protocol = Tcp; } @@ -153,18 +153,18 @@ impl ConnectionDef for BrokerDef { /// protocol and no middleware. Differs from the user /// definition in that is on the client-side. #[derive(Clone)] -pub struct ClientDef(PhantomData); -impl ConnectionDef for ClientDef { - type Scheme = WrappedSignatureKey; +pub struct ClientDef(PhantomData); +impl ConnectionDef for ClientDef { + type Scheme = WrappedSignatureKey; type Protocol = Quic; } /// The testing run definition for the Push CDN. /// Uses the real protocols, but with an embedded discovery client. -pub struct TestingDef(PhantomData); -impl RunDef for TestingDef { - type User = UserDef; - type Broker = BrokerDef; +pub struct TestingDef(PhantomData); +impl RunDef for TestingDef { + type User = UserDef; + type Broker = BrokerDef; type DiscoveryClientType = Embedded; type Topic = Topic; } @@ -173,14 +173,16 @@ impl RunDef for TestingDef { /// that helps organize them all. #[derive(Clone)] /// Is generic over both the type of key and the network protocol. -pub struct PushCdnNetwork { +pub struct PushCdnNetwork { /// The underlying client - client: Client>, + client: Client>, /// The CDN-specific metrics metrics: Arc, /// Whether or not the underlying network is supposed to be paused #[cfg(feature = "hotshot-testing")] is_paused: Arc, + // The receiver channel for + // request_receiver_channel: TakeReceiver, } /// The enum for the topics we can subscribe to in the Push CDN @@ -197,7 +199,7 @@ pub enum Topic { /// topics that are not implemented at the application level. impl TopicTrait for Topic {} -impl PushCdnNetwork { +impl PushCdnNetwork { /// Create a new `PushCdnNetwork` (really a client) from a marshal endpoint, a list of initial /// topics we are interested in, and our wrapped keypair that we use to authenticate with the /// marshal. @@ -207,7 +209,7 @@ impl PushCdnNetwork { pub fn new( marshal_endpoint: String, topics: Vec, - keypair: KeyPair>, + keypair: KeyPair>, metrics: CdnMetricsValue, ) -> anyhow::Result { // Build config @@ -258,7 +260,9 @@ impl PushCdnNetwork { } #[cfg(feature = "hotshot-testing")] -impl TestableNetworkingImplementation for PushCdnNetwork { +impl TestableNetworkingImplementation + for PushCdnNetwork +{ /// Generate n Push CDN clients, a marshal, and two brokers (that run locally). /// Uses a `SQLite` database instead of Redis. #[allow(clippy::too_many_lines)] @@ -317,7 +321,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork let other_broker_identifier = format!("{other_public_address}/{other_public_address}"); // Configure the broker - let config: BrokerConfig> = BrokerConfig { + let config: BrokerConfig> = BrokerConfig { public_advertise_endpoint: public_address.clone(), public_bind_endpoint: public_address, private_advertise_endpoint: private_address.clone(), @@ -336,7 +340,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork // Create and spawn the broker async_spawn(async move { - let broker: Broker> = + let broker: Broker> = Broker::new(config).await.expect("broker failed to start"); // If we are the first broker by identifier, we need to sleep a bit @@ -369,7 +373,7 @@ impl TestableNetworkingImplementation for PushCdnNetwork // Spawn the marshal async_spawn(async move { - let marshal: Marshal> = Marshal::new(marshal_config) + let marshal: Marshal> = Marshal::new(marshal_config) .await .expect("failed to spawn marshal"); @@ -399,15 +403,16 @@ impl TestableNetworkingImplementation for PushCdnNetwork }; // Configure our client - let client_config: ClientConfig> = ClientConfig { - keypair: KeyPair { - public_key: WrappedSignatureKey(public_key), - private_key, - }, - subscribed_topics: topics, - endpoint: marshal_endpoint, - use_local_authority: true, - }; + let client_config: ClientConfig> = + ClientConfig { + keypair: KeyPair { + public_key: WrappedSignatureKey(public_key), + private_key, + }, + subscribed_topics: topics, + endpoint: marshal_endpoint, + use_local_authority: true, + }; // Create our client Arc::new(PushCdnNetwork { @@ -428,7 +433,23 @@ impl TestableNetworkingImplementation for PushCdnNetwork } #[async_trait] -impl ConnectedNetwork for PushCdnNetwork { +impl ConnectedNetwork for PushCdnNetwork { + // async fn request_data( + // &self, + // request: Vec, + // recipient: ReqDataK, + // ) -> Result, NetworkError> { + // self.client.send_direct_message(recipient, request).await; + + // Ok(vec![]) + // } + + // async fn spawn_request_receiver_task( + // &self, + // ) -> Option, NetworkMsgResponseChannel>)>> { + // None + // } + /// Pause sending and receiving on the PushCDN network. fn pause(&self) { #[cfg(feature = "hotshot-testing")] @@ -482,7 +503,7 @@ impl ConnectedNetwork for PushCdnNetwork, - _recipients: BTreeSet, + _recipients: BTreeSet, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { self.broadcast_message(message, Topic::Da) @@ -497,11 +518,7 @@ impl ConnectedNetwork for PushCdnNetwork, - recipient: TYPES::SignatureKey, - ) -> Result<(), NetworkError> { + async fn direct_message(&self, message: Vec, recipient: K) -> Result<(), NetworkError> { // If we're paused, don't send the message #[cfg(feature = "hotshot-testing")] if self.is_paused.load(Ordering::Relaxed) { @@ -566,8 +583,8 @@ impl ConnectedNetwork for PushCdnNetwork Result<(), TrySendError>> { + _pk: K, + ) -> Result<(), TrySendError>> { Ok(()) } } diff --git a/libp2p-networking/src/network/behaviours/request_response.rs b/libp2p-networking/src/network/behaviours/request_response.rs index 7c75e49fc1..82dd4dab05 100644 --- a/libp2p-networking/src/network/behaviours/request_response.rs +++ b/libp2p-networking/src/network/behaviours/request_response.rs @@ -7,23 +7,11 @@ use std::collections::HashMap; use futures::channel::oneshot::Sender; +use hotshot_types::request_response::{Request, Response}; use libp2p::request_response::{Message, OutboundRequestId}; -use serde::{Deserialize, Serialize}; use crate::network::NetworkEvent; -/// Request for Consenus data -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Request(#[serde(with = "serde_bytes")] pub Vec); - -/// Response for some VID data that we already collected -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Response( - /// Data - #[serde(with = "serde_bytes")] - pub Vec, -); - #[derive(Default, Debug)] /// Handler for request response messages pub(crate) struct RequestResponseState { diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index d4c09743ae..b43db8a04d 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -16,10 +16,8 @@ use libp2p_identity::PeerId; use libp2p_swarm_derive::NetworkBehaviour; use tracing::{debug, error}; -use super::{ - behaviours::request_response::{Request, Response}, - NetworkEventInternal, -}; +use super::NetworkEventInternal; +use hotshot_types::request_response::{Request, Response}; /// Overarching network behaviour performing: /// - network topology discovoery diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index b95acdc79a..f9dce21c42 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -18,7 +18,10 @@ pub mod transport; use std::{collections::HashSet, fmt::Debug, str::FromStr}; use futures::channel::oneshot::{self, Sender}; -use hotshot_types::traits::signature_key::SignatureKey; +use hotshot_types::{ + request_response::{Request, Response}, + traits::signature_key::SignatureKey, +}; #[cfg(async_executor_impl = "async-std")] use libp2p::dns::async_std::Transport as DnsTransport; #[cfg(async_executor_impl = "tokio")] @@ -42,7 +45,6 @@ use serde::{Deserialize, Serialize}; use tracing::instrument; use transport::StakeTableAuthentication; -use self::behaviours::request_response::{Request, Response}; pub use self::{ def::NetworkDef, error::NetworkError, diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index b63c415ebe..ff77c0675b 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -24,7 +24,9 @@ use async_compatibility_layer::{ }; use futures::{channel::mpsc, select, FutureExt, SinkExt, StreamExt}; use hotshot_types::{ - constants::KAD_DEFAULT_REPUB_INTERVAL_SEC, traits::signature_key::SignatureKey, + constants::KAD_DEFAULT_REPUB_INTERVAL_SEC, + request_response::{Request, Response}, + traits::signature_key::SignatureKey, }; use libp2p::{ autonat, @@ -70,7 +72,7 @@ use crate::network::behaviours::{ dht::{DHTBehaviour, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, direct_message::{DMBehaviour, DMRequest}, exponential_backoff::ExponentialBackoff, - request_response::{Request, RequestResponseState, Response}, + request_response::RequestResponseState, }; /// Maximum size of a message diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 175d37a558..89e820c12e 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -11,8 +11,9 @@ use async_compatibility_layer::{ channel::{Receiver, SendError, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, }; use futures::channel::oneshot; -use hotshot_types::traits::{ - network::NetworkError as HotshotNetworkError, signature_key::SignatureKey, +use hotshot_types::{ + request_response::{Request, Response}, + traits::{network::NetworkError as HotshotNetworkError, signature_key::SignatureKey}, }; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; @@ -20,7 +21,6 @@ use snafu::{ResultExt, Snafu}; use tracing::{debug, info, instrument}; use crate::network::{ - behaviours::request_response::{Request, Response}, error::{CancelledRequestSnafu, DHTError}, gen_multiaddr, ClientRequest, NetworkError, NetworkEvent, NetworkNode, NetworkNodeConfig, NetworkNodeConfigBuilderError, diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 6462962190..e5d3563089 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -10,7 +10,7 @@ use async_broadcast::Receiver; use async_compatibility_layer::art::{async_sleep, async_spawn}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use futures::{channel::mpsc, FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt}; use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ consensus::{Consensus, LockedConsensusState, OuterConsensus}, @@ -19,9 +19,10 @@ use hotshot_types::{ DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, SequencingMessage, }, + request_response::{NetworkMsgResponseChannel, RequestReceiver}, traits::{ election::Membership, - network::{DataRequest, RequestKind, ResponseChannel, ResponseMessage}, + network::{DataRequest, RequestKind, ResponseMessage}, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -32,10 +33,6 @@ use tokio::task::JoinHandle; use tracing::instrument; use crate::events::HotShotEvent; - -/// Type alias for the channel that we receive requests from the network on. -pub type RequestReceiver = mpsc::Receiver<(Vec, ResponseChannel>)>; - /// Time to wait for txns before sending `ResponseMessage::NotFound` const TXNS_TIMEOUT: Duration = Duration::from_millis(100); @@ -98,7 +95,7 @@ impl NetworkResponseState { /// Handle an incoming message. First validates the sender, then handles the contained request. /// Sends the response via `chan` - async fn handle_message(&self, raw_req: Vec, chan: ResponseChannel>) { + async fn handle_message(&self, raw_req: Vec, chan: NetworkMsgResponseChannel>) { let req: Message = match bincode::deserialize(&raw_req) { Ok(deserialized) => deserialized, Err(e) => { diff --git a/types/Cargo.toml b/types/Cargo.toml index ebce2c9eeb..cb0f1faede 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -48,12 +48,14 @@ jf-signature = { workspace = true, features = ["schnorr"] } jf-utils = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } +serde_bytes = { workspace = true } tagged-base64 = { workspace = true } vbs = { workspace = true } displaydoc = { version = "0.2.5", default-features = false } dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } url = { workspace = true } vec1 = { workspace = true } +libp2p = { workspace = true } [dev-dependencies] serde_json = { workspace = true } diff --git a/types/src/lib.rs b/types/src/lib.rs index a5f67c1879..566cebeecf 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -26,6 +26,7 @@ pub mod event; pub mod light_client; pub mod message; pub mod qc; +pub mod request_response; pub mod signature_key; pub mod simple_certificate; pub mod simple_vote; diff --git a/types/src/request_response.rs b/types/src/request_response.rs new file mode 100644 index 0000000000..ca65c204c7 --- /dev/null +++ b/types/src/request_response.rs @@ -0,0 +1,41 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +//! Types for the request/response implementations. This module incorporates all +//! of the shared types for all of the network backends. + +use crate::traits::network::NetworkMsg; +use async_lock::Mutex; +use futures::channel::{mpsc::Receiver, oneshot}; +use libp2p::request_response::ResponseChannel; +use serde::{Deserialize, Serialize}; + +/// Request for Consenus data +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Request(#[serde(with = "serde_bytes")] pub Vec); + +/// Response for some VID data that we already collected +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Response( + /// Data + #[serde(with = "serde_bytes")] + pub Vec, +); + +/// Wraps a oneshot channel for responding to requests. This is a +/// specialized version of the libp2p request-response `ResponseChannel` +/// which accepts any generic response. +pub struct NetworkMsgResponseChannel { + /// underlying sender for this channel + pub sender: oneshot::Sender, +} + +/// Type alias for the channel that we receive requests from the network on. +pub type RequestReceiver = Receiver<(Vec, NetworkMsgResponseChannel>)>; + +/// Locked Option of a receiver for moving the value out of the option. This +/// type takes any `Response` type depending on the underlying network impl. +pub type TakeReceiver = Mutex, ResponseChannel)>>>; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 0287f952b7..33fc55df30 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -14,7 +14,7 @@ use async_std::future::TimeoutError; use derivative::Derivative; use dyn_clone::DynClone; use futures::{ - channel::{mpsc, oneshot}, + channel::mpsc::{self}, Future, }; #[cfg(async_executor_impl = "tokio")] @@ -45,6 +45,7 @@ use super::{node_implementation::NodeType, signature_key::SignatureKey}; use crate::{ data::ViewNumber, message::{MessagePurpose, SequencingMessage}, + request_response::NetworkMsgResponseChannel, BoxSyncFuture, }; @@ -74,20 +75,6 @@ pub enum PushCdnNetworkError { FailedToSend, } -/// Web server specific errors -#[derive(Debug, Snafu, Serialize, Deserialize)] -#[snafu(visibility(pub))] -pub enum WebServerNetworkError { - /// The injected consensus data is incorrect - IncorrectConsensusData, - /// The client returned an error - ClientError, - /// Endpoint parsed incorrectly - EndpointError, - /// Client disconnected - ClientDisconnected, -} - /// the type of transmission #[derive(Debug, Clone, Serialize, Deserialize)] pub enum TransmitType { @@ -128,12 +115,6 @@ pub enum NetworkError { /// source of error source: CentralizedServerNetworkError, }, - - /// Web server specific errors - WebServer { - /// source of error - source: WebServerNetworkError, - }, /// unimplemented functionality UnimplementedFeature, /// Could not deliver a message to a specified recipient @@ -198,12 +179,6 @@ pub trait ViewMessage { fn purpose(&self) -> MessagePurpose; } -/// Wraps a oneshot channel for responding to requests -pub struct ResponseChannel { - /// underlying sender for this channel - pub sender: oneshot::Sender, -} - /// A request for some data that the consensus layer is asking for. #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] @@ -347,7 +322,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st /// Returns `None`` if network does not support handling requests async fn spawn_request_receiver_task( &self, - ) -> Option, ResponseChannel>)>> { + ) -> Option, NetworkMsgResponseChannel>)>> { None } @@ -363,7 +338,8 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st Ok(()) } - /// handles view update + /// Update view can be used for any reason, but mostly it's for canceling tasks, + /// and looking up the address of the leader of a future view. async fn update_view<'a, TYPES>(&'a self, _view: u64, _membership: &TYPES::Membership) where TYPES: NodeType + 'a, From 6f011149b79bc722473868de56e6dd117dfdd139 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 14 Aug 2024 15:09:36 -0400 Subject: [PATCH 1181/1393] [Libp2p] DHT overhaul (#3548) --- examples/Cargo.toml | 2 +- hotshot/Cargo.toml | 2 +- .../src/traits/networking/libp2p_network.rs | 84 ++-- libp2p-networking/Cargo.toml | 1 + .../src/network/behaviours/dht/mod.rs | 43 ++- .../src/network/behaviours/dht/record.rs | 358 ++++++++++++++++++ .../src/network/behaviours/dht/store.rs | 168 ++++++++ libp2p-networking/src/network/def.rs | 23 +- libp2p-networking/src/network/node.rs | 24 +- libp2p-networking/src/network/node/handle.rs | 79 ++-- libp2p-networking/tests/counter.rs | 39 +- testing/Cargo.toml | 2 +- 12 files changed, 717 insertions(+), 108 deletions(-) create mode 100644 libp2p-networking/src/network/behaviours/dht/record.rs create mode 100644 libp2p-networking/src/network/behaviours/dht/store.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 479b0422bb..6e0a36da7f 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -106,7 +106,7 @@ surf-disco = { workspace = true } time = { workspace = true } derive_more = { workspace = true } portpicker = "0.1" -lru = "0.12" +lru.workspace = true hotshot-task = { path = "../task" } hotshot = { path = "../hotshot" } hotshot-example-types = { path = "../example-types" } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index b17b060a73..696a9cc7c4 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -46,7 +46,7 @@ hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-featu hotshot-types = { path = "../types" } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } -lru = "0.12" +lru.workspace = true portpicker = "0.1" rand = { workspace = true } serde = { workspace = true, features = ["rc"] } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 1aaada8428..072560ea6d 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -65,6 +65,7 @@ use libp2p_identity::{ }; use libp2p_networking::{ network::{ + behaviours::dht::record::{Namespace, RecordKey, RecordValue}, spawn_network_node, transport::construct_auth_message, MeshParams, @@ -76,7 +77,7 @@ use libp2p_networking::{ }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; -use tracing::{debug, error, info, instrument, warn}; +use tracing::{debug, error, info, instrument, trace, warn}; use crate::BroadcastDelay; @@ -255,6 +256,19 @@ impl TestableNetworkingImplementation let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; let pubkey = TYPES::SignatureKey::from_private(&privkey); + + // Derive the Libp2p keypair from the private key + let libp2p_keypair = derive_libp2p_keypair::(&privkey) + .expect("Failed to derive libp2p keypair"); + + // Sign the lookup record + let lookup_record_value = RecordValue::new_signed( + &RecordKey::new(Namespace::Lookup, pubkey.to_bytes()), + libp2p_keypair.public().to_peer_id().to_bytes(), + &privkey, + ) + .expect("Failed to sign DHT lookup record"); + // we want the majority of peers to have this lying around. let replication_factor = NonZeroUsize::new(2 * expected_node_count / 3).unwrap(); let config = if node_id < num_bootstrap as u64 { @@ -269,6 +283,7 @@ impl TestableNetworkingImplementation mesh_n: (expected_node_count / 2 + 3), })) .server_mode(true) + .identity(libp2p_keypair) .replication_factor(replication_factor) .node_type(NetworkNodeType::Bootstrap) .bound_addr(Some(addr)) @@ -290,6 +305,7 @@ impl TestableNetworkingImplementation mesh_n: 8, })) .server_mode(true) + .identity(libp2p_keypair) .replication_factor(replication_factor) .node_type(NetworkNodeType::Regular) .bound_addr(Some(addr)) @@ -304,6 +320,7 @@ impl TestableNetworkingImplementation let node_ids_ref = Arc::clone(&node_ids); let da = da_keys.clone(); let reliability_config_dup = reliability_config.clone(); + Box::pin(async move { // If it's the second time we are starting this network, clear the bootstrap info let mut write_ids = node_ids_ref.write().await; @@ -318,6 +335,7 @@ impl TestableNetworkingImplementation Libp2pMetricsValue::default(), config, pubkey.clone(), + lookup_record_value, bootstrap_addrs_ref, usize::try_from(node_id).unwrap(), #[cfg(feature = "hotshot-testing")] @@ -437,6 +455,15 @@ impl Libp2pNetwork { )) .with_context(|| "Failed to calculate replication factor")?; + // Sign our DHT lookup record + let lookup_record_value = RecordValue::new_signed( + &RecordKey::new(Namespace::Lookup, pub_key.to_bytes()), + // The value is our Libp2p Peer ID + keypair.public().to_peer_id().to_bytes(), + priv_key, + ) + .with_context(|| "Failed to sign DHT lookup record")?; + config_builder .server_mode(libp2p_config.server_mode) .identity(keypair) @@ -477,6 +504,7 @@ impl Libp2pNetwork { metrics, node_config, pub_key.clone(), + lookup_record_value, Arc::new(RwLock::new(bootstrap_nodes)), usize::try_from(config.node_index)?, #[cfg(feature = "hotshot-testing")] @@ -519,12 +547,13 @@ impl Libp2pNetwork { metrics: Libp2pMetricsValue, config: NetworkNodeConfig, pk: K, + lookup_record_value: RecordValue, bootstrap_addrs: BootstrapAddrs, id: usize, #[cfg(feature = "hotshot-testing")] reliability_config: Option>, is_da: bool, ) -> Result, NetworkError> { - let (mut rx, network_handle) = spawn_network_node(config.clone(), id) + let (mut rx, network_handle) = spawn_network_node::(config.clone(), id) .await .map_err(Into::::into)?; // Make bootstrap mappings known @@ -588,7 +617,7 @@ impl Libp2pNetwork { result.handle_event_generator(sender, requests_tx, rx); result.spawn_node_lookup(node_lookup_recv); - result.spawn_connect(id); + result.spawn_connect(id, lookup_record_value); Ok(result) } @@ -608,19 +637,12 @@ impl Libp2pNetwork { #[allow(clippy::cast_possible_truncation)] const THRESHOLD: u64 = (LOOK_AHEAD as f64 * 0.8) as u64; - debug!("Performing lookup for peer {:?}", pk); + trace!("Performing lookup for peer {:?}", pk); // only run if we are not too close to the next view number if latest_seen_view.load(Ordering::Relaxed) + THRESHOLD <= *view_number { - let pk_bytes = match bincode::serialize(&pk) { - Ok(serialized) => serialized, - Err(e) => { - tracing::error!("Failed to serialize public key; this should never happen. Error: {e}"); - return; - } - }; // look up - if let Err(err) = handle.lookup_node(&pk_bytes, dht_timeout).await { + if let Err(err) = handle.lookup_node(&pk.to_bytes(), dht_timeout).await { warn!("Failed to perform lookup for key {:?}: {}", pk, err); }; } @@ -629,7 +651,7 @@ impl Libp2pNetwork { } /// Initiates connection to the outside world - fn spawn_connect(&mut self, id: usize) { + fn spawn_connect(&mut self, id: usize, lookup_record_value: RecordValue) { let pk = self.inner.pk.clone(); let bootstrap_ref = Arc::clone(&self.inner.bootstrap_addrs); let handle = Arc::clone(&self.inner.handle); @@ -660,26 +682,12 @@ impl Libp2pNetwork { handle.subscribe("DA".to_string()).await.unwrap(); } - // we want our records published before - // we begin participating in consensus - // - // Note: this serialization should never fail, - // and if it does the error is unrecoverable. - while handle - .put_record( - &bincode::serialize(&pk).unwrap(), - &bincode::serialize(&handle.peer_id()).unwrap(), - ) - .await - .is_err() - { - async_sleep(Duration::from_secs(1)).await; - } - + // Map our staking key to our Libp2p Peer ID so we can properly + // route direct messages while handle .put_record( - &bincode::serialize(&handle.peer_id()).unwrap(), - &bincode::serialize(&pk).unwrap(), + RecordKey::new(Namespace::Lookup, pk.to_bytes()), + lookup_record_value.clone(), ) .await .is_err() @@ -687,8 +695,6 @@ impl Libp2pNetwork { async_sleep(Duration::from_secs(1)).await; } - info!("Finished putting Kademlia records"); - // Wait for the network to connect to the required number of peers if let Err(e) = handle.wait_to_connect(4, id).await { error!("Failed to connect to peers: {:?}", e); @@ -826,11 +832,7 @@ impl ConnectedNetwork for Libp2pNetwork { let pid = match self .inner .handle - .lookup_node( - &bincode::serialize(&recipient) - .map_err(|e| NetworkError::Libp2p { source: e.into() })?, - self.inner.dht_timeout, - ) + .lookup_node(&recipient.to_bytes(), self.inner.dht_timeout) .await { Ok(pid) => pid, @@ -1043,11 +1045,7 @@ impl ConnectedNetwork for Libp2pNetwork { let pid = match self .inner .handle - .lookup_node( - &bincode::serialize(&recipient) - .map_err(|e| NetworkError::Libp2p { source: e.into() })?, - self.inner.dht_timeout, - ) + .lookup_node(&recipient.to_bytes(), self.inner.dht_timeout) .await { Ok(pid) => pid, diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 5e2d53f5f9..01c46f91d4 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -19,6 +19,7 @@ async-trait = { workspace = true } blake3 = { workspace = true } bincode = { workspace = true } custom_debug = { workspace = true } +delegate = "0.12" derive_builder = "0.20" either = { workspace = true } futures = { workspace = true } diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index d7245f8226..c65969c870 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -8,6 +8,7 @@ pub mod bootstrap; use std::{ collections::{HashMap, HashSet}, + marker::PhantomData, num::NonZeroUsize, time::Duration, }; @@ -18,6 +19,7 @@ use futures::{ channel::{mpsc, oneshot::Sender}, SinkExt, }; +use hotshot_types::traits::signature_key::SignatureKey; use lazy_static::lazy_static; use libp2p::kad::{ /* handler::KademliaHandlerIn, */ store::MemoryStore, BootstrapOk, GetClosestPeersOk, @@ -27,8 +29,15 @@ use libp2p::kad::{ store::RecordStore, Behaviour as KademliaBehaviour, BootstrapError, Event as KademliaEvent, }; use libp2p_identity::PeerId; +use store::ValidatedStore; use tracing::{debug, error, info, warn}; +/// Additional DHT record functionality +pub mod record; + +/// Additional DHT store functionality +pub mod store; + /// the number of nodes required to get an answer from /// in order to trust that the answer is correct when retrieving from the DHT pub(crate) const NUM_REPLICATED_TO_TRUST: usize = 2; @@ -48,7 +57,7 @@ use crate::network::{ClientRequest, NetworkEvent}; /// - bootstrapping into the network /// - peer discovery #[derive(Debug)] -pub struct DHTBehaviour { +pub struct DHTBehaviour { /// in progress queries for nearby peers pub in_progress_get_closest_peers: HashMap>, /// List of in-progress get requests @@ -67,6 +76,9 @@ pub struct DHTBehaviour { retry_tx: Option>, /// Sender to the bootstrap task bootstrap_tx: Option>, + + /// Phantom type for the key + phantom: PhantomData, } /// State of bootstrapping @@ -94,7 +106,7 @@ pub enum DHTEvent { IsBootstrapped, } -impl DHTBehaviour { +impl DHTBehaviour { /// Give the handler a way to retry requests. pub fn set_retry(&mut self, tx: UnboundedSender) { self.retry_tx = Some(tx); @@ -124,11 +136,15 @@ impl DHTBehaviour { replication_factor, retry_tx: None, bootstrap_tx: None, + phantom: PhantomData, } } /// print out the routing table to stderr - pub fn print_routing_table(&mut self, kadem: &mut KademliaBehaviour) { + pub fn print_routing_table( + &mut self, + kadem: &mut KademliaBehaviour>, + ) { let mut err = format!("KBUCKETS: PID: {:?}, ", self.peer_id); let v = kadem.kbuckets().collect::>(); for i in v { @@ -160,14 +176,14 @@ impl DHTBehaviour { /// Value (serialized) is sent over `chan`, and if a value is not found, /// a [`crate::network::error::DHTError`] is sent instead. /// NOTE: noop if `retry_count` is 0 - pub fn record( + pub fn get_record( &mut self, key: Vec, chan: Sender>, factor: NonZeroUsize, backoff: ExponentialBackoff, retry_count: u8, - kad: &mut KademliaBehaviour, + kad: &mut KademliaBehaviour>, ) { // noop if retry_count == 0 { @@ -235,7 +251,7 @@ impl DHTBehaviour { /// update state based on recv-ed get query fn handle_get_query( &mut self, - store: &mut MemoryStore, + store: &mut ValidatedStore, record_results: GetRecordResult, id: QueryId, mut last: bool, @@ -314,10 +330,14 @@ impl DHTBehaviour { publisher: None, expires: None, }; - let _ = store.put(record); - // return value - if notify.send(r).is_err() { - error!("Get DHT: channel closed before get record request result could be sent"); + + // Only return the record if we can store it (validation passed) + if store.put(record).is_ok() { + if notify.send(r).is_err() { + error!("Get DHT: channel closed before get record request result could be sent"); + } + } else { + error!("Failed to store record in local store"); } } // disagreement => query more nodes @@ -391,7 +411,7 @@ impl DHTBehaviour { pub fn dht_handle_event( &mut self, event: KademliaEvent, - store: &mut MemoryStore, + store: &mut ValidatedStore, ) -> Option { match event { KademliaEvent::OutboundQueryProgressed { @@ -470,7 +490,6 @@ impl DHTBehaviour { KademliaEvent::UnroutablePeer { peer } => { debug!("Found unroutable peer {:?}", peer); } - KademliaEvent::InboundRequest { request: _r } => {} KademliaEvent::RoutingUpdated { peer: _, is_new_peer: _, diff --git a/libp2p-networking/src/network/behaviours/dht/record.rs b/libp2p-networking/src/network/behaviours/dht/record.rs new file mode 100644 index 0000000000..189f0f5774 --- /dev/null +++ b/libp2p-networking/src/network/behaviours/dht/record.rs @@ -0,0 +1,358 @@ +use anyhow::{bail, Context, Result}; +use hotshot_types::traits::signature_key::SignatureKey; +use libp2p::kad::Record; +use serde::{Deserialize, Serialize}; +use tracing::warn; + +/// A (signed or unsigned) record value to be stored (serialized) in the DHT. +/// This is a wrapper around a value that includes a possible signature. +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] +pub enum RecordValue { + /// A signed record value + Signed(Vec, K::PureAssembledSignatureType), + + /// An unsigned record value + Unsigned(Vec), +} + +/// The namespace of a record. This is included with the key +/// and allows for multiple types of records to be stored in the DHT. +#[repr(u8)] +#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq)] +pub enum Namespace { + /// A namespace for looking up P2P identities + Lookup = 0, + + /// An authenticated namespace useful for testing + #[cfg(test)] + Testing = 254, + + /// An unauthenticated namespace useful for testing + #[cfg(test)] + TestingUnauthenticated = 255, +} + +/// Require certain namespaces to be authenticated +fn requires_authentication(namespace: Namespace) -> bool { + match namespace { + Namespace::Lookup => true, + #[cfg(test)] + Namespace::Testing => true, + #[cfg(test)] + Namespace::TestingUnauthenticated => false, + } +} + +/// Allow fallible conversion from a byte to a namespace +impl TryFrom for Namespace { + type Error = anyhow::Error; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(Self::Lookup), + #[cfg(test)] + 254 => Ok(Self::Testing), + #[cfg(test)] + 255 => Ok(Self::TestingUnauthenticated), + _ => bail!("Unknown namespace"), + } + } +} + +/// A record's key. This is a concatenation of the namespace and the key. +#[derive(Clone)] +pub struct RecordKey { + /// The namespace of the record key + pub namespace: Namespace, + + /// The actual key + pub key: Vec, +} + +impl RecordKey { + #[must_use] + /// Create and return a new record key in the given namespace + pub fn new(namespace: Namespace, key: Vec) -> Self { + Self { namespace, key } + } + + /// Convert the record key to a byte vector + #[must_use] + pub fn to_bytes(&self) -> Vec { + // Concatenate the namespace and the key + let mut bytes = vec![self.namespace as u8]; + bytes.extend_from_slice(&self.key); + bytes + } + + /// Try to convert a byte vector to a record key + /// + /// # Errors + /// If the provided array is empty + pub fn try_from_bytes(bytes: &[u8]) -> Result { + // Check if the bytes are empty + if bytes.is_empty() { + bail!("Empty record key bytes") + } + + // The first byte is the namespace + let namespace = Namespace::try_from(bytes[0])?; + + // Return the record key + Ok(Self { + namespace, + key: bytes[1..].to_vec(), + }) + } +} + +impl RecordValue { + /// Creates and returns a new signed record by signing the key and value + /// with the private key + /// + /// # Errors + /// - If we fail to sign the value + /// - If we fail to serialize the signature + pub fn new_signed( + record_key: &RecordKey, + value: Vec, + private_key: &K::PrivateKey, + ) -> Result { + // The value to sign should be the record key concatenated with the value + let mut value_to_sign = record_key.to_bytes(); + value_to_sign.extend_from_slice(&value); + + let signature = + K::sign(private_key, &value_to_sign).with_context(|| "Failed to sign record")?; + + // Return the signed record + Ok(Self::Signed(value, signature)) + } + + /// Creates and returns a new unsigned record + #[must_use] + pub fn new(value: Vec) -> Self { + Self::Unsigned(value) + } + + /// If the message requires authentication, validate the record by verifying the signature with the + /// given key + pub fn validate(&self, record_key: &RecordKey) -> bool { + // If the record requires authentication, validate the signature + if !requires_authentication(record_key.namespace) { + return true; + } + + // The record must be signed + let Self::Signed(value, signature) = self else { + warn!("Record should be signed but is not"); + return false; + }; + + // If the request is "signed", the public key is the record's key + let Ok(public_key) = K::from_bytes(record_key.key.as_slice()) else { + warn!("Failed to deserialize signer's public key"); + return false; + }; + + // The value to sign should be the record key concatenated with the value + let mut signed_value = record_key.to_bytes(); + signed_value.extend_from_slice(value); + + // Validate the signature + public_key.validate(signature, &signed_value) + } + + /// Get the underlying value of the record + pub fn value(&self) -> &[u8] { + match self { + Self::Unsigned(value) | Self::Signed(value, _) => value, + } + } +} + +impl TryFrom for RecordValue { + type Error = anyhow::Error; + + fn try_from(record: Record) -> Result { + // Deserialize the record value + let record: RecordValue = bincode::deserialize(&record.value) + .with_context(|| "Failed to deserialize record value")?; + + // Return the record + Ok(record) + } +} + +#[cfg(test)] +mod test { + use hotshot_types::signature_key::BLSPubKey; + + use super::*; + + /// Test that namespace serialization and deserialization is consistent + #[test] + fn test_namespace_serialization_parity() { + // Serialize the namespace + let namespace = Namespace::Lookup; + let bytes = namespace as u8; + + // Deserialize the namespace + let namespace = Namespace::try_from(bytes).expect("Failed to deserialize namespace"); + assert!(namespace == Namespace::Lookup, "Wrong namespace"); + } + + /// Test that record key serialization and deserialization is consistent + #[test] + fn test_record_key_serialization_parity() { + // Create a new record key + let namespace = Namespace::Lookup; + let key = vec![1, 2, 3, 4]; + let record_key = RecordKey::new(namespace, key.clone()); + + // Serialize it + let bytes = record_key.to_bytes(); + + // Deserialize it + let record_key = + RecordKey::try_from_bytes(&bytes).expect("Failed to deserialize record key"); + + // Make sure the deserialized record key is the same as the original + assert!(record_key.namespace == namespace, "Namespace mismatch"); + assert!(record_key.key == key, "Key mismatch"); + } + + /// Test that the validity of a valid, signed record is correct + #[test] + fn test_valid_signature() { + // Generate a staking keypair + let (public_key, private_key) = BLSPubKey::generated_from_seed_indexed([1; 32], 1337); + + // Create a value. The key is the public key + let value = vec![5, 6, 7, 8]; + + // Create a record key (as we need to sign both the key and the value) + let record_key = RecordKey::new(Namespace::Lookup, public_key.to_bytes()); + + // Sign the record and value with the private key + let record_value: RecordValue = + RecordValue::new_signed(&record_key, value.clone(), &private_key).unwrap(); + + // Validate the signed record + assert!( + record_value.validate(&record_key), + "Failed to validate signed record" + ); + } + + /// Test that altering the namespace byte causes a validation failure + #[test] + fn test_invalid_namespace() { + // Generate a staking keypair + let (public_key, private_key) = BLSPubKey::generated_from_seed_indexed([1; 32], 1337); + + // Create a value. The key is the public key + let value = vec![5, 6, 7, 8]; + + // Create a record key (as we need to sign both the key and the value) + let mut record_key = RecordKey::new(Namespace::Lookup, public_key.to_bytes()); + + // Sign the record and value with the private key + let record_value: RecordValue = + RecordValue::new_signed(&record_key, value.clone(), &private_key).unwrap(); + + // Alter the namespace + record_key.namespace = Namespace::Testing; + + // Validate the signed record + assert!( + !record_value.validate(&record_key), + "Failed to detect invalid namespace" + ); + } + + /// Test that altering the contents of the record key causes a validation failure + #[test] + fn test_invalid_key() { + // Generate a staking keypair + let (public_key, private_key) = BLSPubKey::generated_from_seed_indexed([1; 32], 1337); + + // Create a value. The key is the public key + let value = vec![5, 6, 7, 8]; + + // Create a record key (as we need to sign both the key and the value) + let mut record_key = RecordKey::new(Namespace::Lookup, public_key.to_bytes()); + + // Sign the record and value with the private key + let record_value: RecordValue = + RecordValue::new_signed(&record_key, value.clone(), &private_key).unwrap(); + + // Set the key to a different one + record_key.key = BLSPubKey::generated_from_seed_indexed([1; 32], 1338) + .0 + .to_bytes(); + + // Validate the signed record + assert!( + !record_value.validate(&record_key), + "Failed to detect invalid record key" + ); + } + + /// Test that unsigned records are always valid + #[test] + fn test_unsigned_record_is_valid() { + // Create a value + let value = vec![5, 6, 7, 8]; + + // Create a record key + let record_key = RecordKey::new(Namespace::TestingUnauthenticated, vec![1, 2, 3, 4]); + + // Create an unsigned record + let record_value: RecordValue = RecordValue::new(value.clone()); + + // Validate the unsigned record + assert!( + record_value.validate(&record_key), + "Failed to validate unsigned record" + ); + } + + /// Test that unauthenticated namespaces do not require validation for unsigned records + #[test] + fn test_unauthenticated_namespace() { + // Generate a staking keypair + let (public_key, _) = BLSPubKey::generated_from_seed_indexed([1; 32], 1337); + + // Create a record key (as we need to sign both the key and the value) + let record_key = RecordKey::new(Namespace::TestingUnauthenticated, public_key.to_bytes()); + + // Created an unsigned record + let record_value: RecordValue = RecordValue::new(vec![5, 6, 7, 8]); + + // Validate it + assert!( + record_value.validate(&record_key), + "Failed to validate unsigned record in unauthenticated namespace" + ); + } + + /// Test that authenticated namespaces do require validation for unsigned records + #[test] + fn test_authenticated_namespace() { + // Generate a staking keypair + let (public_key, _) = BLSPubKey::generated_from_seed_indexed([1; 32], 1337); + + // Create a record key (as we need to sign both the key and the value) + let record_key = RecordKey::new(Namespace::Lookup, public_key.to_bytes()); + + // Created an unsigned record + let record_value: RecordValue = RecordValue::new(vec![5, 6, 7, 8]); + + // Validate it + assert!( + !record_value.validate(&record_key), + "Failed to detect invalid unsigned record" + ); + } +} diff --git a/libp2p-networking/src/network/behaviours/dht/store.rs b/libp2p-networking/src/network/behaviours/dht/store.rs new file mode 100644 index 0000000000..af4fd7477f --- /dev/null +++ b/libp2p-networking/src/network/behaviours/dht/store.rs @@ -0,0 +1,168 @@ +//! This file contains the `ValidatedStore` struct, which is a wrapper around a `RecordStore` that +//! validates records before storing them. +//! +//! The `ValidatedStore` struct is used to ensure that only valid records are stored in the DHT. + +use std::marker::PhantomData; + +use delegate::delegate; +use hotshot_types::traits::signature_key::SignatureKey; +use libp2p::kad::store::{Error, RecordStore, Result}; +use tracing::warn; + +use crate::network::behaviours::dht::record::RecordKey; + +use super::record::RecordValue; + +/// A `RecordStore` wrapper that validates records before storing them. +pub struct ValidatedStore { + /// The underlying store + store: R, + + /// Phantom type for the key + phantom: std::marker::PhantomData, +} + +impl ValidatedStore { + /// Create a new `ValidatedStore` with the given underlying store + pub fn new(store: R) -> Self { + ValidatedStore { + store, + phantom: PhantomData, + } + } +} + +/// Implement the `RecordStore` trait for `ValidatedStore` +impl RecordStore for ValidatedStore +where + K: 'static, +{ + type ProvidedIter<'a> = R::ProvidedIter<'a> where R: 'a, K: 'a; + type RecordsIter<'a> = R::RecordsIter<'a> where R: 'a, K: 'a; + + // Delegate all `RecordStore` methods except `put` to the inner store + delegate! { + to self.store{ + fn add_provider(&mut self, record: libp2p::kad::ProviderRecord) -> libp2p::kad::store::Result<()>; + fn get(&self, k: &libp2p::kad::RecordKey) -> Option>; + fn provided(&self) -> Self::ProvidedIter<'_>; + fn providers(&self, key: &libp2p::kad::RecordKey) -> Vec; + fn records(&self) -> Self::RecordsIter<'_>; + fn remove(&mut self, k: &libp2p::kad::RecordKey); + fn remove_provider(&mut self, k: &libp2p::kad::RecordKey, p: &libp2p::PeerId); + } + } + + /// Overwrite the `put` method to validate the record before storing it + fn put(&mut self, record: libp2p::kad::Record) -> Result<()> { + // Convert the record to the correct type + if let Ok(record_value) = RecordValue::::try_from(record.clone()) { + // Convert the key to the correct type + let Ok(record_key) = RecordKey::try_from_bytes(&record.key.to_vec()) else { + warn!("Failed to convert record key"); + return Err(Error::MaxRecords); + }; + + // If the record is signed by the correct key, + if record_value.validate(&record_key) { + // Store the record + if let Err(err) = self.store.put(record.clone()) { + warn!("Failed to store record: {:?}", err); + return Err(Error::MaxRecords); + } + } else { + warn!("Failed to validate record"); + return Err(Error::MaxRecords); + } + } + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use hotshot_types::signature_key::BLSPubKey; + use libp2p::{ + kad::{store::MemoryStore, Record}, + PeerId, + }; + + use crate::network::behaviours::dht::record::Namespace; + + use super::*; + + /// Test that a valid record is stored + #[test] + fn test_valid_stored() { + // Generate a staking keypair + let (public_key, private_key) = BLSPubKey::generated_from_seed_indexed([1; 32], 1337); + + // Create a value. The key is the public key + let value = vec![5, 6, 7, 8]; + + // Create a record key (as we need to sign both the key and the value) + let record_key = RecordKey::new(Namespace::Lookup, public_key.to_bytes()); + + // Sign the record and value with the private key + let record_value: RecordValue = + RecordValue::new_signed(&record_key, value.clone(), &private_key).unwrap(); + + // Initialize the store + let mut store: ValidatedStore = + ValidatedStore::new(MemoryStore::new(PeerId::random())); + + // Serialize the record value + let record_value_bytes = + bincode::serialize(&record_value).expect("Failed to serialize record value"); + + // Create and store the record + let record = Record::new(record_key.to_bytes(), record_value_bytes); + store.put(record).expect("Failed to store record"); + + // Check that the record is stored + let libp2p_record_key = libp2p::kad::RecordKey::new(&record_key.to_bytes()); + let stored_record = store.get(&libp2p_record_key).expect("Failed to get record"); + let stored_record_value: RecordValue = + bincode::deserialize(&stored_record.value).expect("Failed to deserialize record value"); + + // Make sure the stored record is the same as the original record + assert_eq!( + record_value, stored_record_value, + "Stored record is not the same as original" + ); + } + + /// Test that an invalid record is not stored + #[test] + fn test_invalid_not_stored() { + // Generate a staking keypair + let (public_key, _) = BLSPubKey::generated_from_seed_indexed([1; 32], 1337); + + // Create a record key (as we need to sign both the key and the value) + let record_key = RecordKey::new(Namespace::Lookup, public_key.to_bytes()); + + // Create a new (unsigned, invalid) record value + let record_value: RecordValue = RecordValue::new(vec![2, 3]); + + // Initialize the store + let mut store: ValidatedStore = + ValidatedStore::new(MemoryStore::new(PeerId::random())); + + // Serialize the record value + let record_value_bytes = + bincode::serialize(&record_value).expect("Failed to serialize record value"); + + // Make sure we are unable to store the record + let record = Record::new(record_key.to_bytes(), record_value_bytes); + assert!(store.put(record).is_err(), "Should not have stored record"); + + // Check that the record is not stored + let libp2p_record_key = libp2p::kad::RecordKey::new(&record_key.to_bytes()); + assert!( + store.get(&libp2p_record_key).is_none(), + "Should not have stored record" + ); + } +} diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index b43db8a04d..9d97db9d75 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -16,8 +16,11 @@ use libp2p_identity::PeerId; use libp2p_swarm_derive::NetworkBehaviour; use tracing::{debug, error}; -use super::NetworkEventInternal; -use hotshot_types::request_response::{Request, Response}; +use super::{behaviours::dht::store::ValidatedStore, NetworkEventInternal}; +use hotshot_types::{ + request_response::{Request, Response}, + traits::signature_key::SignatureKey, +}; /// Overarching network behaviour performing: /// - network topology discovoery @@ -26,7 +29,7 @@ use hotshot_types::request_response::{Request, Response}; /// - connection management #[derive(NetworkBehaviour, custom_debug::Debug)] #[behaviour(to_swarm = "NetworkEventInternal")] -pub struct NetworkDef { +pub struct NetworkDef { /// purpose: broadcasting messages to many peers /// NOTE gossipsub works ONLY for sharing messages right now /// in the future it may be able to do peer discovery and routing @@ -37,7 +40,7 @@ pub struct NetworkDef { /// purpose: peer routing /// purpose: storing pub key <-> peer id bijection #[debug(skip)] - pub dht: libp2p::kad::Behaviour, + pub dht: libp2p::kad::Behaviour>, /// purpose: identifying the addresses from an outside POV #[debug(skip)] @@ -57,17 +60,17 @@ pub struct NetworkDef { pub autonat: libp2p::autonat::Behaviour, } -impl NetworkDef { +impl NetworkDef { /// Create a new instance of a `NetworkDef` #[must_use] pub fn new( gossipsub: GossipBehaviour, - dht: libp2p::kad::Behaviour, + dht: libp2p::kad::Behaviour>, identify: IdentifyBehaviour, direct_message: cbor::Behaviour, Vec>, request_response: cbor::Behaviour, autonat: autonat::Behaviour, - ) -> NetworkDef { + ) -> NetworkDef { Self { gossipsub, dht, @@ -80,7 +83,7 @@ impl NetworkDef { } /// Address functions -impl NetworkDef { +impl NetworkDef { /// Add an address pub fn add_address(&mut self, peer_id: &PeerId, address: Multiaddr) { // NOTE to get this address to play nice with the other @@ -94,7 +97,7 @@ impl NetworkDef { } /// Gossip functions -impl NetworkDef { +impl NetworkDef { /// Publish a given gossip pub fn publish_gossip(&mut self, topic: IdentTopic, contents: Vec) { if let Err(e) = self.gossipsub.publish(topic, contents) { @@ -117,7 +120,7 @@ impl NetworkDef { } /// Request/response functions -impl NetworkDef { +impl NetworkDef { /// Add a direct request for a given peer pub fn add_direct_request(&mut self, peer_id: PeerId, data: Vec) -> OutboundRequestId { self.direct_message.send_request(&peer_id, data) diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index ff77c0675b..3ef40fbab8 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -14,6 +14,7 @@ mod handle; use std::{ collections::{HashMap, HashSet}, iter, + marker::PhantomData, num::{NonZeroU32, NonZeroUsize}, time::Duration, }; @@ -63,7 +64,10 @@ pub use self::{ }, }; use super::{ - behaviours::dht::bootstrap::{self, DHTBootstrapTask, InputEvent}, + behaviours::dht::{ + bootstrap::{self, DHTBootstrapTask, InputEvent}, + store::ValidatedStore, + }, error::{GossipsubBuildSnafu, GossipsubConfigSnafu, NetworkError, TransportSnafu}, gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkEvent, NetworkEventInternal, NetworkNodeType, @@ -93,7 +97,7 @@ pub struct NetworkNode { peer_id: PeerId, /// the swarm of networkbehaviours #[debug(skip)] - swarm: Swarm, + swarm: Swarm>, /// the configuration parameters of the netework config: NetworkNodeConfig, /// the listener id we are listening on, if it exists @@ -103,11 +107,14 @@ pub struct NetworkNode { /// Handler for direct messages direct_message_state: DMBehaviour, /// Handler for DHT Events - dht_handler: DHTBehaviour, + dht_handler: DHTBehaviour, /// Channel to resend requests, set to Some when we call `spawn_listeners` resend_tx: Option>, /// Send to the bootstrap task to tell it to start a bootstrap bootstrap_tx: Option>, + + /// Phantom data to hold the key type + pd: PhantomData, } impl NetworkNode { @@ -188,7 +195,7 @@ impl NetworkNode { .await?; // Generate the swarm - let mut swarm: Swarm = { + let mut swarm: Swarm> = { // Use the hash of the message's contents as the ID // Use blake3 for much paranoia at very high speeds let message_id_fn = |message: &GossipsubMessage| { @@ -286,7 +293,11 @@ impl NetworkNode { panic!("Replication factor not set"); } - let mut kadem = Behaviour::with_config(peer_id, MemoryStore::new(peer_id), kconfig); + let mut kadem = Behaviour::with_config( + peer_id, + ValidatedStore::new(MemoryStore::new(peer_id)), + kconfig, + ); if config.server_mode { kadem.set_mode(Some(Mode::Server)); } @@ -362,6 +373,7 @@ impl NetworkNode { ), resend_tx: None, bootstrap_tx: None, + pd: PhantomData, }) } @@ -457,7 +469,7 @@ impl NetworkNode { notify, retry_count, } => { - self.dht_handler.record( + self.dht_handler.get_record( key, notify, NonZeroUsize::new(NUM_REPLICATED_TO_TRUST).unwrap(), diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 89e820c12e..6aa1804b6e 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashSet, fmt::Debug, time::Duration}; +use std::{collections::HashSet, fmt::Debug, marker::PhantomData, time::Duration}; use async_compatibility_layer::{ art::{async_sleep, async_timeout, future::to}, @@ -21,6 +21,7 @@ use snafu::{ResultExt, Snafu}; use tracing::{debug, info, instrument}; use crate::network::{ + behaviours::dht::record::{Namespace, RecordKey, RecordValue}, error::{CancelledRequestSnafu, DHTError}, gen_multiaddr, ClientRequest, NetworkError, NetworkEvent, NetworkNode, NetworkNodeConfig, NetworkNodeConfigBuilderError, @@ -45,6 +46,9 @@ pub struct NetworkNodeHandle { /// human readable id id: usize, + + /// Phantom data to hold the key type + pd: PhantomData, } /// internal network node receiver @@ -105,12 +109,13 @@ pub async fn spawn_network_node( recv_kill: None, }; - let handle = NetworkNodeHandle { + let handle = NetworkNodeHandle:: { network_config: config, send_network: send_chan, listen_addr, peer_id, id, + pd: PhantomData, }; Ok((receiver, handle)) } @@ -228,33 +233,45 @@ impl NetworkNodeHandle { r.await.map_err(|_| NetworkNodeHandleError::RecvError) } - /// Looks up a node's `PeerId` and attempts to validate routing + /// Looks up a node's `PeerId` by its staking key. Is authenticated through + /// `get_record` assuming each record should be signed. + /// /// # Errors - /// if the peer was unable to be looked up (did not provide a response, DNE) + /// If the DHT lookup fails pub async fn lookup_node( &self, key: &[u8], dht_timeout: Duration, ) -> Result { - // get record (from DHT) - let pid = self.record_timeout(key, dht_timeout).await?; + // Create the record key + let key = RecordKey::new(Namespace::Lookup, key.to_vec()); - // pid lookup for routing - // self.lookup_pid(pid).await?; + // Get the record from the DHT + let pid = self.get_record_timeout(key, dht_timeout).await?; - bincode::deserialize(&pid) - .map_err(|e| NetworkNodeHandleError::DeserializationError { source: e.into() }) + PeerId::from_bytes(&pid).map_err(|_| NetworkNodeHandleError::FailedToDeserialize) } /// Insert a record into the kademlia DHT /// # Errors /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key or value - pub async fn put_record(&self, key: &[u8], value: &[u8]) -> Result<(), NetworkNodeHandleError> { + pub async fn put_record( + &self, + key: RecordKey, + value: RecordValue, + ) -> Result<(), NetworkNodeHandleError> { + // Serialize the key + let key = key.to_bytes(); + + // Serialize the record + let value = bincode::serialize(&value) + .map_err(|e| NetworkNodeHandleError::SerializationError { source: e.into() })?; + let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::PutDHT { - key: key.to_vec(), - value: value.to_vec(), + key: key.clone(), + value, notify: s, }; @@ -269,23 +286,33 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value - pub async fn record( + pub async fn get_record( &self, - key: &[u8], + key: RecordKey, retry_count: u8, ) -> Result, NetworkNodeHandleError> { + // Serialize the key + let serialized_key = key.to_bytes(); + let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetDHT { - key: key.to_vec(), + key: serialized_key.clone(), notify: s, retry_count, }; self.send_request(req).await?; - match r.await.context(CancelledRequestSnafu) { + // Map the error + let result = match r.await.context(CancelledRequestSnafu) { Ok(result) => Ok(result), Err(e) => Err(e).context(DHTSnafu), - } + }?; + + // Deserialize the record's value + let record: RecordValue = bincode::deserialize(&result) + .map_err(|e| NetworkNodeHandleError::DeserializationError { source: e.into() })?; + + Ok(record.value().to_vec()) } /// Get a record from the kademlia DHT with a timeout @@ -294,12 +321,12 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::TimeoutError`] when times out /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value - pub async fn record_timeout( + pub async fn get_record_timeout( &self, - key: &[u8], + key: RecordKey, timeout: Duration, ) -> Result, NetworkNodeHandleError> { - let result = async_timeout(timeout, self.record(key, 3)).await; + let result = async_timeout(timeout, self.get_record(key, 3)).await; match result { Err(e) => Err(e).context(TimeoutSnafu), Ok(r) => r, @@ -314,8 +341,8 @@ impl NetworkNodeHandle { /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed pub async fn put_record_timeout( &self, - key: &[u8], - value: &[u8], + key: RecordKey, + value: RecordValue, timeout: Duration, ) -> Result<(), NetworkNodeHandleError> { let result = async_timeout(timeout, self.put_record(key, value)).await; @@ -554,6 +581,12 @@ pub enum NetworkNodeHandleError { }, /// no known topic matches the hashset of keys NoSuchTopic, + + /// Deserialization error + FailedToDeserialize, + + /// Signature verification error + FailedToVerify, } impl From for HotshotNetworkError { diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 88f08af159..036991bea9 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -15,8 +15,11 @@ use async_lock::RwLock; use async_std::prelude::StreamExt; use common::{test_bed, HandleSnafu, HandleWithState, TestError}; use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; -use libp2p_networking::network::{NetworkEvent, NetworkNodeHandleError}; -use rand::seq::IteratorRandom; +use libp2p_networking::network::{ + behaviours::dht::record::{Namespace, RecordKey, RecordValue}, + NetworkEvent, NetworkNodeHandleError, +}; +use rand::{rngs::StdRng, seq::IteratorRandom, Rng, SeedableRng}; use serde::{Deserialize, Serialize}; use snafu::ResultExt; #[cfg(async_executor_impl = "tokio")] @@ -373,33 +376,47 @@ async fn run_gossip_one_round( async fn run_dht_rounds( handles: &[HandleWithState], timeout: Duration, - starting_val: usize, + _starting_val: usize, num_rounds: usize, ) { let mut rng = rand::thread_rng(); for i in 0..num_rounds { debug!("begin round {}", i); let msg_handle = random_handle(handles, &mut rng); - let mut key = vec![0; DHT_KV_PADDING]; - let inc_val = u8::try_from(starting_val + i).unwrap(); - key.push(inc_val); - let mut value = vec![0; DHT_KV_PADDING]; - value.push(inc_val); + + // Create a random keypair + let mut rng = StdRng::from_entropy(); + let (public_key, private_key) = K::generated_from_seed_indexed([1; 32], rng.gen::()); + + // Create a random value to sign + let value = (0..DHT_KV_PADDING) + .map(|_| rng.gen::()) + .collect::>(); + + // Create the record key + let key = RecordKey::new(Namespace::Lookup, public_key.to_bytes().clone()); + + // Sign the value + let value = RecordValue::new_signed(&key, value, &private_key).expect("signing failed"); // put the key - msg_handle.handle.put_record(&key, &value).await.unwrap(); + msg_handle + .handle + .put_record(key.clone(), value.clone()) + .await + .unwrap(); // get the key from the other nodes for handle in handles { let result: Result, NetworkNodeHandleError> = - handle.handle.record_timeout(&key, timeout).await; + handle.handle.get_record_timeout(key.clone(), timeout).await; match result { Err(e) => { error!("DHT error {e:?} during GET"); std::process::exit(-1); } Ok(v) => { - assert_eq!(v, value); + assert_eq!(v, value.value()); } } } diff --git a/testing/Cargo.toml b/testing/Cargo.toml index cccd8c8f5e..721f526a08 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -45,7 +45,7 @@ snafu = { workspace = true } tide-disco = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } -lru = { workspace = true } +lru.workspace = true tagged-base64.workspace = true vec1 = { workspace = true } reqwest = { workspace = true } From 4c74c106d6d43224619ec2f5b783116d19bae989 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 14 Aug 2024 15:48:25 -0400 Subject: [PATCH 1182/1393] Update builder marketplace API (#3573) --- builder-api/api/v0_3/builder.toml | 4 +- .../src/network/behaviours/dht/store.rs | 6 +- libp2p-networking/src/network/def.rs | 8 +- task-impls/src/builder.rs | 13 +- task-impls/src/transactions.rs | 325 ++++++++++-------- types/src/request_response.rs | 3 +- 6 files changed, 210 insertions(+), 149 deletions(-) diff --git a/builder-api/api/v0_3/builder.toml b/builder-api/api/v0_3/builder.toml index 5614e38bb8..a7f3b92832 100644 --- a/builder-api/api/v0_3/builder.toml +++ b/builder-api/api/v0_3/builder.toml @@ -27,7 +27,9 @@ DESCRIPTION = "" FORMAT_VERSION = "0.1.0" [route.bundle] -PATH = ["bundle/:view_number"] +PATH = ["bundle/:parent_view/:parent_hash/:view_number"] +":parent_view" = "Integer" +":parent_hash" = "TaggedBase64" ":view_number" = "Integer" DOC = """ Fetch the bundle from the builder for the specified view. diff --git a/libp2p-networking/src/network/behaviours/dht/store.rs b/libp2p-networking/src/network/behaviours/dht/store.rs index af4fd7477f..6969ced1ff 100644 --- a/libp2p-networking/src/network/behaviours/dht/store.rs +++ b/libp2p-networking/src/network/behaviours/dht/store.rs @@ -10,9 +10,8 @@ use hotshot_types::traits::signature_key::SignatureKey; use libp2p::kad::store::{Error, RecordStore, Result}; use tracing::warn; -use crate::network::behaviours::dht::record::RecordKey; - use super::record::RecordValue; +use crate::network::behaviours::dht::record::RecordKey; /// A `RecordStore` wrapper that validates records before storing them. pub struct ValidatedStore { @@ -89,9 +88,8 @@ mod test { PeerId, }; - use crate::network::behaviours::dht::record::Namespace; - use super::*; + use crate::network::behaviours::dht::record::Namespace; /// Test that a valid record is stored #[test] diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 9d97db9d75..9c1871c43f 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -4,6 +4,10 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use hotshot_types::{ + request_response::{Request, Response}, + traits::signature_key::SignatureKey, +}; use libp2p::{ autonat, gossipsub::{Behaviour as GossipBehaviour, Event as GossipEvent, IdentTopic}, @@ -17,10 +21,6 @@ use libp2p_swarm_derive::NetworkBehaviour; use tracing::{debug, error}; use super::{behaviours::dht::store::ValidatedStore, NetworkEventInternal}; -use hotshot_types::{ - request_response::{Request, Response}, - traits::signature_key::SignatureKey, -}; /// Overarching network behaviour performing: /// - network topology discovoery diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 63927c2f4e..01c29f7e2c 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -203,7 +203,9 @@ pub mod v0_2 { /// Version 0.3: marketplace. Bundles. pub mod v0_3 { pub use hotshot_builder_api::v0_3::Version; - use hotshot_types::{bundle::Bundle, traits::node_implementation::NodeType}; + use hotshot_types::{ + bundle::Bundle, traits::node_implementation::NodeType, vid::VidCommitment, + }; use vbs::version::StaticVersion; pub use super::BuilderClientError; @@ -217,9 +219,14 @@ pub mod v0_3 { /// # Errors /// - [`BuilderClientError::NotFound`] if block isn't available /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly - pub async fn bundle(&self, view_number: u64) -> Result, BuilderClientError> { + pub async fn bundle( + &self, + parent_view: u64, + parent_hash: VidCommitment, + view_number: u64, + ) -> Result, BuilderClientError> { self.inner - .get(&format!("bundle/{view_number}")) + .get(&format!("bundle/{parent_view}/{parent_hash}/{view_number}")) .send() .await .map_err(Into::into) diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index ee7203f491..fc10b549ee 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -9,7 +9,7 @@ use std::{ time::{Duration, Instant}, }; -use anyhow::{bail, Result}; +use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_timeout}; use async_trait::async_trait; @@ -34,7 +34,7 @@ use hotshot_types::{ }; use tracing::{debug, error, instrument, warn}; use url::Url; -use vbs::version::StaticVersionType; +use vbs::version::{StaticVersionType, Version}; use vec1::Vec1; use crate::{ @@ -232,127 +232,118 @@ impl, V: Versions> TransactionTask return None; } - #[allow(clippy::too_many_lines)] - /// marketplace view change handler - pub async fn handle_view_change_marketplace( + /// Produce a block by fetching auction results from the solver and bundles from builders. + /// + /// # Errors + /// + /// Returns an error if the solver cannot be contacted, or if none of the builders respond. + async fn produce_block_marketplace( &mut self, - event_stream: &Sender>>, block_view: TYPES::Time, - ) -> Option { - let version = match self.upgrade_lock.version(block_view).await { - Ok(v) => v, - Err(err) => { - error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); - return None; - } - }; - - // Only request bundles and propose with a nonempty block if we are not between versions. - if !self - .upgrade_lock - .decided_upgrade_certificate - .read() - .await - .as_ref() - .is_some_and(|cert| cert.upgrading_in(block_view)) - { - let start = Instant::now(); + task_start_time: Instant, + ) -> Result> { + ensure!( + !self + .upgrade_lock + .decided_upgrade_certificate + .read() + .await + .as_ref() + .is_some_and(|cert| cert.upgrading_in(block_view)), + "Not requesting block because we are upgrading", + ); - if let Ok(maybe_auction_result) = async_timeout( - self.builder_timeout, - self.auction_results_provider - .fetch_auction_result(block_view), - ) + let (parent_view, parent_hash) = self + .last_vid_commitment_retry(block_view, task_start_time) .await - { - let auction_result = maybe_auction_result - .map_err(|e| warn!("Failed to get auction results: {e:#}")) - .unwrap_or_default(); // We continue here, as we still have fallback builder URL + .context("Failed to find parent hash in time")?; - let mut futures = Vec::new(); + let start = Instant::now(); - let mut builder_urls = auction_result.clone().urls(); - builder_urls.push(self.fallback_builder_url.clone()); - - for url in builder_urls { - futures.push(async_timeout( - self.builder_timeout.saturating_sub(start.elapsed()), - async { - let client = BuilderClientMarketplace::new(url); - client.bundle(*block_view).await - }, - )); - } + let maybe_auction_result = async_timeout( + self.builder_timeout, + self.auction_results_provider + .fetch_auction_result(block_view), + ) + .await + .context("Timeout while getting auction result")?; + + let auction_result = maybe_auction_result + .map_err(|e| warn!("Failed to get auction results: {e:#}")) + .unwrap_or_default(); // We continue here, as we still have fallback builder URL + + let mut futures = Vec::new(); + + let mut builder_urls = auction_result.clone().urls(); + builder_urls.push(self.fallback_builder_url.clone()); + + for url in builder_urls { + futures.push(async_timeout( + self.builder_timeout.saturating_sub(start.elapsed()), + async { + let client = BuilderClientMarketplace::new(url); + client.bundle(*parent_view, parent_hash, *block_view).await + }, + )); + } - let mut bundles = Vec::new(); + let mut bundles = Vec::new(); - for bundle in join_all(futures).await { - match bundle { - Ok(Ok(b)) => bundles.push(b), - _ => continue, - } + for bundle in join_all(futures).await { + match bundle { + Ok(Ok(b)) => bundles.push(b), + Ok(Err(e)) => { + tracing::debug!("Failed to retrieve bundle: {e}"); + continue; } - - let mut sequencing_fees = Vec::new(); - let mut transactions: Vec< - >::Transaction, - > = Vec::new(); - - for bundle in bundles { - sequencing_fees.push(bundle.sequencing_fee); - transactions.extend(bundle.transactions); + Err(e) => { + tracing::debug!("Failed to retrieve bundle: {e}"); + continue; } + } + } - let validated_state = self.consensus.read().await.decided_state(); - - if let (Ok(sequencing_fees), Ok((block_payload, metadata))) = ( - Vec1::try_from_vec(sequencing_fees), - TYPES::BlockPayload::from_transactions( - transactions, - &validated_state, - &Arc::clone(&self.instance_state), - ) - .await, - ) { - broadcast_event( - Arc::new(HotShotEvent::BlockRecv(PackedBundle::new( - block_payload.encode(), - metadata, - block_view, - sequencing_fees, - None, - Some(auction_result), - ))), - event_stream, - ) - .await; + let mut sequencing_fees = Vec::new(); + let mut transactions: Vec<>::Transaction> = + Vec::new(); - return None; - } - } else { - warn!("Timeout while getting auction results"); - } + for bundle in bundles { + sequencing_fees.push(bundle.sequencing_fee); + transactions.extend(bundle.transactions); } - // If we couldn't get any bundles (due to either all of the builders or solver failing to return a result), send an empty block - warn!( - "Failed to get a block for view {:?}, proposing empty block", - block_view - ); + let validated_state = self.consensus.read().await.decided_state(); - // Increment the metric for number of empty blocks proposed - self.consensus - .write() - .await - .metrics - .number_of_empty_blocks_proposed - .add(1); + let sequencing_fees = Vec1::try_from_vec(sequencing_fees) + .context("Failed to receive a bundle from any builder.")?; + let (block_payload, metadata) = TYPES::BlockPayload::from_transactions( + transactions, + &validated_state, + &Arc::clone(&self.instance_state), + ) + .await?; + + Ok(PackedBundle::new( + block_payload.encode(), + metadata, + block_view, + sequencing_fees, + None, + Some(auction_result), + )) + } + /// Produce a null block + pub fn null_block( + &self, + block_view: TYPES::Time, + version: Version, + ) -> Option> { let membership_total_nodes = self.membership.total_nodes(); let Some(null_fee) = null_block::builder_fee::(self.membership.total_nodes(), version) else { - error!("Failed to get null fee"); + error!("Failed to calculate null block fee."); return None; }; @@ -361,16 +352,61 @@ impl, V: Versions> TransactionTask let (_, precompute_data) = precompute_vid_commitment(&[], membership_total_nodes); - // Broadcast the empty block + Some(PackedBundle::new( + vec![].into(), + metadata, + block_view, + vec1::vec1![null_fee], + Some(precompute_data), + Some(TYPES::AuctionResult::default()), + )) + } + + #[allow(clippy::too_many_lines)] + /// marketplace view change handler + pub async fn handle_view_change_marketplace( + &mut self, + event_stream: &Sender>>, + block_view: TYPES::Time, + ) -> Option { + let task_start_time = Instant::now(); + + let version = match self.upgrade_lock.version(block_view).await { + Ok(v) => v, + Err(err) => { + error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); + return None; + } + }; + + let packed_bundle = match self + .produce_block_marketplace(block_view, task_start_time) + .await + { + Ok(b) => b, + Err(e) => { + tracing::info!( + "Failed to get a block for view {:?}: {}. Continuing with empty block.", + block_view, + e + ); + + let null_block = self.null_block(block_view, version)?; + + // Increment the metric for number of empty blocks proposed + self.consensus + .write() + .await + .metrics + .number_of_empty_blocks_proposed + .add(1); + + null_block + } + }; + broadcast_event( - Arc::new(HotShotEvent::BlockRecv(PackedBundle::new( - vec![].into(), - metadata, - block_view, - vec1::vec1![null_fee], - Some(precompute_data), - Some(TYPES::AuctionResult::default()), - ))), + Arc::new(HotShotEvent::BlockRecv(packed_bundle)), event_stream, ) .await; @@ -437,38 +473,59 @@ impl, V: Versions> TransactionTask None } + /// Get VID commitment for the last successful view before `block_view`. + /// Returns None if we don't have said commitment recorded. + #[instrument(skip_all, target = "TransactionTaskState", fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view))] + async fn last_vid_commitment_retry( + &self, + block_view: TYPES::Time, + task_start_time: Instant, + ) -> Result<(TYPES::Time, VidCommitment)> { + loop { + match self.last_vid_commitment(block_view).await { + Ok((view, comm)) => break Ok((view, comm)), + Err(e) if task_start_time.elapsed() >= self.builder_timeout => break Err(e), + _ => { + // We still have time, will re-try in a bit + async_sleep(RETRY_DELAY).await; + continue; + } + } + } + } + /// Get VID commitment for the last successful view before `block_view`. /// Returns None if we don't have said commitment recorded. #[instrument(skip_all, target = "TransactionTaskState", fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view))] async fn last_vid_commitment( &self, block_view: TYPES::Time, - ) -> Option<(TYPES::Time, VidCommitment)> { + ) -> Result<(TYPES::Time, VidCommitment)> { let consensus = self.consensus.read().await; let mut target_view = TYPES::Time::new(block_view.saturating_sub(1)); loop { - let Some(view_data) = consensus.validated_state_map().get(&target_view) else { - tracing::warn!(?target_view, "Missing record for view in validated state"); - return None; - }; + let view_data = consensus + .validated_state_map() + .get(&target_view) + .context("Missing record for view {?target_view} in validated state")?; + match view_data.view_inner { ViewInner::Da { payload_commitment } => { - return Some((target_view, payload_commitment)) + return Ok((target_view, payload_commitment)) } ViewInner::Leaf { leaf: leaf_commitment, .. } => { - let Some(leaf) = consensus.saved_leaves().get(&leaf_commitment) else { - tracing::warn!(?target_view, %leaf_commitment, "Missing leaf in saved_leaves"); - return None; - }; - return Some((target_view, leaf.payload_commitment())); + let leaf = consensus.saved_leaves().get(&leaf_commitment).context + ("Missing leaf with commitment {leaf_commitment} for view {target_view} in saved_leaves")?; + return Ok((target_view, leaf.payload_commitment())); } ViewInner::Failed => { // For failed views, backtrack - target_view = TYPES::Time::new(target_view.checked_sub(1)?); + target_view = + TYPES::Time::new(target_view.checked_sub(1).context("Reached genesis")?); continue; } } @@ -480,18 +537,14 @@ impl, V: Versions> TransactionTask let task_start_time = Instant::now(); // Find commitment to the block we want to build upon - let (parent_view, parent_comm) = loop { - match self.last_vid_commitment(block_view).await { - Some((view, comm)) => break (view, comm), - None if task_start_time.elapsed() < self.builder_timeout => { - // We still have time, will re-try in a bit - async_sleep(RETRY_DELAY).await; - continue; - } - _ => { - tracing::warn!("Failed to find commitment in time"); - return None; - } + let (parent_view, parent_comm) = match self + .last_vid_commitment_retry(block_view, task_start_time) + .await + { + Ok((v, c)) => (v, c), + Err(e) => { + tracing::warn!("Failed to find last vid commitment in time: {e}"); + return None; } }; diff --git a/types/src/request_response.rs b/types/src/request_response.rs index ca65c204c7..3397445935 100644 --- a/types/src/request_response.rs +++ b/types/src/request_response.rs @@ -7,12 +7,13 @@ //! Types for the request/response implementations. This module incorporates all //! of the shared types for all of the network backends. -use crate::traits::network::NetworkMsg; use async_lock::Mutex; use futures::channel::{mpsc::Receiver, oneshot}; use libp2p::request_response::ResponseChannel; use serde::{Deserialize, Serialize}; +use crate::traits::network::NetworkMsg; + /// Request for Consenus data #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Request(#[serde(with = "serde_bytes")] pub Vec); From 35eef86e30c896045e79f43bff1198ec4e0f8c6f Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 15 Aug 2024 15:46:14 +0300 Subject: [PATCH 1183/1393] Fix BuilderDataSource for marketplace builder (#3576) * Fix BuilderDataSource for marketplace builder --- builder-api/src/v0_3/builder.rs | 13 ++++++++++--- builder-api/src/v0_3/data_source.rs | 9 +++++++-- hotshot/src/traits/networking/libp2p_network.rs | 11 +++++------ 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/builder-api/src/v0_3/builder.rs b/builder-api/src/v0_3/builder.rs index b064fd84af..6c17dd0cee 100644 --- a/builder-api/src/v0_3/builder.rs +++ b/builder-api/src/v0_3/builder.rs @@ -26,10 +26,17 @@ where api.with_version("0.0.3".parse().unwrap()) .get("bundle", |req, state| { async move { + let parent_view = req.integer_param("parent_view")?; + let parent_hash = req.blob_param("parent_hash")?; let view_number = req.integer_param("view_number")?; - state.bundle(view_number).await.context(BlockClaimSnafu { - resource: view_number.to_string(), - }) + state + .bundle(parent_view, &parent_hash, view_number) + .await + .with_context(|_| BlockClaimSnafu { + resource: format!( + "Block for parent {parent_hash}@{parent_view} and view {view_number}" + ), + }) } .boxed() })? diff --git a/builder-api/src/v0_3/data_source.rs b/builder-api/src/v0_3/data_source.rs index d37acd3dde..60a77c37f3 100644 --- a/builder-api/src/v0_3/data_source.rs +++ b/builder-api/src/v0_3/data_source.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use hotshot_types::{bundle::Bundle, traits::node_implementation::NodeType}; +use hotshot_types::{bundle::Bundle, traits::node_implementation::NodeType, vid::VidCommitment}; use super::builder::BuildError; /// No changes to these types @@ -8,7 +8,12 @@ pub use crate::v0_1::data_source::AcceptsTxnSubmits; #[async_trait] pub trait BuilderDataSource { /// To get the list of available blocks - async fn bundle(&self, view_number: u64) -> Result, BuildError>; + async fn bundle( + &self, + parent_view: u64, + parent_hash: &VidCommitment, + view_number: u64, + ) -> Result, BuildError>; /// To get the builder's address async fn builder_address(&self) -> Result; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 072560ea6d..b334c27fe4 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -39,12 +39,16 @@ use futures::{ FutureExt, StreamExt, }; use hotshot_orchestrator::config::NetworkConfig; +#[cfg(feature = "hotshot-testing")] +use hotshot_types::traits::network::{ + AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, +}; use hotshot_types::{ boxed_sync, constants::LOOK_AHEAD, data::ViewNumber, message::{DataMessage::DataResponse, Message, MessageKind}, - request_response::{NetworkMsgResponseChannel, Request, Response}, + request_response::{NetworkMsgResponseChannel, Request, Response, TakeReceiver}, traits::{ election::Membership, metrics::{Counter, Gauge, Metrics, NoMetrics}, @@ -54,11 +58,6 @@ use hotshot_types::{ }, BoxSyncFuture, }; -#[cfg(feature = "hotshot-testing")] -use hotshot_types::{ - request_response::TakeReceiver, - traits::network::{AsyncGenerator, NetworkReliability, TestableNetworkingImplementation}, -}; use libp2p_identity::{ ed25519::{self, SecretKey}, Keypair, PeerId, From 4d6a1211e06d321b56f283f1f37cc42d3a397274 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 15 Aug 2024 12:34:37 -0400 Subject: [PATCH 1184/1393] Fix upgrade lock in network task (#3580) --- hotshot/src/tasks/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index ea5710dd23..269c771ff0 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -48,8 +48,7 @@ use vbs::version::StaticVersionType; use crate::{ tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, - MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, UpgradeLock, - Versions, + MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, }; /// event for global event stream @@ -193,7 +192,7 @@ pub fn add_network_event_task< membership, filter, storage: Arc::clone(&handle.storage()), - upgrade_lock: UpgradeLock::new(), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), }; let task = Task::new( network_state, From 67effa7fcb63a881867015e9af941255e94fc0da Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 16 Aug 2024 11:52:11 -0400 Subject: [PATCH 1185/1393] Add marketplace integration test (#3569) --- example-types/src/block_types.rs | 4 +- example-types/src/node_types.rs | 14 ++ task-impls/src/consensus/handlers.rs | 3 - task-impls/src/events.rs | 5 - task-impls/src/network.rs | 8 +- task-impls/src/quorum_vote/handlers.rs | 3 - testing/src/consistency_task.rs | 138 +++++++++++------- testing/src/predicates/event.rs | 11 -- testing/src/test_builder.rs | 5 +- testing/src/test_runner.rs | 7 + testing/tests/tests_1/test_success.rs | 27 +++- .../tests_1/upgrade_task_with_consensus.rs | 2 - .../tests/tests_1/upgrade_task_with_vote.rs | 1 - types/src/lib.rs | 14 ++ 14 files changed, 150 insertions(+), 92 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 556312a05f..1547e46b9b 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -29,6 +29,7 @@ use time::OffsetDateTime; use vbs::version::Version; use crate::{ + auction_results_provider_types::TestAuctionResult, node_types::TestTypes, state_types::{TestInstanceState, TestValidatedState}, testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}, @@ -274,6 +275,7 @@ impl< BlockHeader = Self, BlockPayload = TestBlockPayload, InstanceState = TestInstanceState, + AuctionResult = TestAuctionResult, >, > BlockHeader for TestBlockHeader { @@ -349,7 +351,7 @@ impl< } fn get_auction_results(&self) -> Option { - unimplemented!() + Some(TYPES::AuctionResult { urls: vec![] }) } } diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 90e1394326..390b5472d2 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -146,3 +146,17 @@ impl Versions for TestVersions { type Marketplace = StaticVersion<0, 3>; } + +#[derive(Clone, Debug, Copy)] +pub struct MarketplaceUpgradeTestVersions {} + +impl Versions for MarketplaceUpgradeTestVersions { + type Base = StaticVersion<0, 2>; + type Upgrade = StaticVersion<0, 3>; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, + ]; + + type Marketplace = StaticVersion<0, 3>; +} diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 187b486ebb..ffca94fdd8 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -550,9 +550,6 @@ pub async fn handle_quorum_proposal_validated< .await; *decided_certificate_lock = Some(cert.clone()); drop(decided_certificate_lock); - let _ = event_stream - .broadcast(Arc::new(HotShotEvent::UpgradeDecided(cert.clone()))) - .await; } let mut consensus = task_state.consensus.write().await; diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index ee49a73141..17a9c4fb9b 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -183,8 +183,6 @@ pub enum HotShotEvent { UpgradeVoteSend(UpgradeVote), /// Upgrade certificate has been sent to the network UpgradeCertificateFormed(UpgradeCertificate), - /// A HotShot upgrade was decided - UpgradeDecided(UpgradeCertificate), /* Consensus State Update Events */ /// A undecided view has been created and added to the validated state storage. @@ -421,9 +419,6 @@ impl Display for HotShotEvent { "UpgradeCertificateFormed(view_number={:?})", cert.view_number() ), - HotShotEvent::UpgradeDecided(cert) => { - write!(f, "UpgradeDecided(view_number{:?})", cert.view_number()) - } HotShotEvent::QuorumProposalRequest(view_number) => { write!(f, "QuorumProposalRequest(view_number={view_number:?})") } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index fe6712a1c4..3ca3771fa4 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -42,7 +42,6 @@ pub fn quorum_filter(event: &Arc>) -> bool | HotShotEvent::QuorumVoteSend(_) | HotShotEvent::DacSend(_, _) | HotShotEvent::TimeoutVoteSend(_) - | HotShotEvent::UpgradeDecided(_) | HotShotEvent::ViewChange(_) ) } @@ -53,7 +52,6 @@ pub fn upgrade_filter(event: &Arc>) -> bool event.as_ref(), HotShotEvent::UpgradeProposalSend(_, _) | HotShotEvent::UpgradeVoteSend(_) - | HotShotEvent::UpgradeDecided(_) | HotShotEvent::ViewChange(_) ) } @@ -64,7 +62,6 @@ pub fn da_filter(event: &Arc>) -> bool { event.as_ref(), HotShotEvent::DaProposalSend(_, _) | HotShotEvent::DaVoteSend(_) - | HotShotEvent::UpgradeDecided(_) | HotShotEvent::ViewChange(_) ) } @@ -73,9 +70,7 @@ pub fn da_filter(event: &Arc>) -> bool { pub fn vid_filter(event: &Arc>) -> bool { !matches!( event.as_ref(), - HotShotEvent::VidDisperseSend(_, _) - | HotShotEvent::UpgradeDecided(_) - | HotShotEvent::ViewChange(_) + HotShotEvent::VidDisperseSend(_, _) | HotShotEvent::ViewChange(_) ) } @@ -89,7 +84,6 @@ pub fn view_sync_filter(event: &Arc>) -> bo | HotShotEvent::ViewSyncPreCommitVoteSend(_) | HotShotEvent::ViewSyncCommitVoteSend(_) | HotShotEvent::ViewSyncFinalizeVoteSend(_) - | HotShotEvent::UpgradeDecided(_) | HotShotEvent::ViewChange(_) ) } diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 3a296ad8c7..a19a7ce5be 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -60,9 +60,6 @@ pub(crate) async fn handle_quorum_proposal_validated< .await; *decided_certificate_lock = Some(cert.clone()); drop(decided_certificate_lock); - let _ = sender - .broadcast(Arc::new(HotShotEvent::UpgradeDecided(cert.clone()))) - .await; } let mut consensus_writer = task_state.consensus.write().await; diff --git a/testing/src/consistency_task.rs b/testing/src/consistency_task.rs index 42c30762c1..2d05c7c4df 100644 --- a/testing/src/consistency_task.rs +++ b/testing/src/consistency_task.rs @@ -21,10 +21,6 @@ use crate::{ test_task::{TestResult, TestTaskState}, }; -trait Validatable { - fn valid(&self) -> Result<()>; -} - /// Map from views to leaves for a single node, allowing multiple leaves for each view (because the node may a priori send us multiple leaves for a given view). pub type NodeMap = BTreeMap<::Time, Vec>>; @@ -32,7 +28,7 @@ pub type NodeMap = BTreeMap<::Time, Vec>>; pub type NodeMapSanitized = BTreeMap<::Time, Leaf>; /// Validate that the `NodeMap` only has a single leaf per view. -pub fn sanitize_node_map( +fn sanitize_node_map( node_map: &NodeMap, ) -> Result> { let mut result = BTreeMap::new(); @@ -58,27 +54,25 @@ pub fn sanitize_node_map( Ok(result) } -/// For a NodeLeafMap, we validate that each leaf extends the preceding leaf. -impl Validatable for NodeMapSanitized { - fn valid(&self) -> Result<()> { - let leaf_pairs = self.values().zip(self.values().skip(1)); +/// For a NodeMapSanitized, we validate that each leaf extends the preceding leaf. +fn validate_node_map(node_map: &NodeMapSanitized) -> Result<()> { + let leaf_pairs = node_map.values().zip(node_map.values().skip(1)); - // Check that the child leaf follows the parent, possibly with a gap. - for (parent, child) in leaf_pairs { - ensure!( + // Check that the child leaf follows the parent, possibly with a gap. + for (parent, child) in leaf_pairs { + ensure!( child.justify_qc().view_number >= parent.view_number(), "The node has provided leaf:\n\n{child:?}\n\nbut its quorum certificate points to a view before the most recent leaf:\n\n{parent:?}" ); - if child.justify_qc().view_number == parent.view_number() - && child.justify_qc().data.leaf_commit != parent.commit() - { - bail!("The node has provided leaf:\n\n{child:?}\n\nwhich points to:\n\n{parent:?}\n\nbut the commits do not match."); - } + if child.justify_qc().view_number == parent.view_number() + && child.justify_qc().data.leaf_commit != parent.commit() + { + bail!("The node has provided leaf:\n\n{child:?}\n\nwhich points to:\n\n{parent:?}\n\nbut the commits do not match."); } - - Ok(()) } + + Ok(()) } /// A map from node ids to `NodeMap`s; note that the latter may have multiple leaves per view in principle. @@ -88,7 +82,7 @@ pub type NetworkMap = BTreeMap>; pub type NetworkMapSanitized = BTreeMap>; /// Validate that each node has only produced one unique leaf per view, and produce a `NetworkMapSanitized`. -pub fn sanitize_network_map( +fn sanitize_network_map( network_map: &NetworkMap, ) -> Result> { let mut result = BTreeMap::new(); @@ -104,49 +98,58 @@ pub fn sanitize_network_map( Ok(result) } -impl Validatable for NetworkMap { - fn valid(&self) -> Result<()> { - let sanitized = sanitize_network_map(self)?; - - sanitized.valid() +pub type ViewMap = BTreeMap<::Time, BTreeMap>>; + +// Invert the network map by interchanging the roles of the node_id and view number. +// +// # Errors +// +// Returns an error if any node map is invalid. +fn invert_network_map( + network_map: &NetworkMapSanitized, +) -> Result> { + let mut inverted_map = BTreeMap::new(); + for (node_id, node_map) in network_map.iter() { + validate_node_map(node_map) + .context(format!("Node {node_id} has an invalid leaf history"))?; + + // validate each node's leaf map + for (view, leaf) in node_map.iter() { + let view_map = inverted_map.entry(*view).or_insert(BTreeMap::new()); + view_map.insert(*node_id, leaf.clone()); + } } + + Ok(inverted_map) } -/// For a NetworkLeafMap, we validate that no two nodes have submitted differing leaves for any given view, in addition to the individual NodeLeafMap checks. -impl Validatable for NetworkMapSanitized { - fn valid(&self) -> Result<()> { - // Invert the map by interchanging the roles of the node_id and view number. - let mut inverted_map = BTreeMap::new(); - for (node_id, node_map) in self.iter() { - node_map - .valid() - .context(format!("Node {node_id} has an invalid leaf history"))?; - - // validate each node's leaf map - for (view, leaf) in node_map.iter() { - let view_map = inverted_map.entry(*view).or_insert(BTreeMap::new()); - view_map.insert(*node_id, leaf.clone()); - } - } +/// A view map, sanitized to have exactly one leaf per view. +pub type ViewMapSanitized = BTreeMap<::Time, Leaf>; - for (view, view_map) in inverted_map.iter() { - let mut leaves: Vec<_> = view_map.iter().collect(); +fn sanitize_view_map( + view_map: &ViewMap, +) -> Result> { + let mut result = BTreeMap::new(); - leaves.dedup_by(|(_node_a, leaf_a), (_node_b, leaf_b)| leaf_a == leaf_b); + for (view, leaf_map) in view_map.iter() { + let mut node_leaves: Vec<_> = leaf_map.iter().collect(); - ensure!( - leaves.len() <= 1, - view_map.iter().fold( - format!("The network does not agree on view {view:?}."), - |acc, (node, leaf)| { - format!("{acc}\n\nNode {node} sent us leaf:\n\n{leaf:?}") - } - ) - ); - } + node_leaves.dedup_by(|(_node_a, leaf_a), (_node_b, leaf_b)| leaf_a == leaf_b); - Ok(()) + ensure!( + node_leaves.len() <= 1, + leaf_map.iter().fold( + format!("The network does not agree on view {view:?}."), + |acc, (node, leaf)| { format!("{acc}\n\nNode {node} sent us leaf:\n\n{leaf:?}") } + ) + ); + + if let Some(leaf) = node_leaves.first() { + result.insert(*view, leaf.1.clone()); + } } + + Ok(result) } /// Data availability task state @@ -155,6 +158,29 @@ pub struct ConsistencyTask { pub consensus_leaves: NetworkMap, /// safety task requirements pub safety_properties: OverallSafetyPropertiesDescription, + /// whether we should have seen an upgrade certificate or not + pub ensure_upgrade: bool, +} + +impl ConsistencyTask { + pub fn validate(&self) -> Result<()> { + let sanitized_network_map = sanitize_network_map(&self.consensus_leaves)?; + + let inverted_map = invert_network_map(&sanitized_network_map)?; + + let sanitized_view_map = sanitize_view_map(&inverted_map)?; + + let expected_upgrade = self.ensure_upgrade; + let actual_upgrade = sanitized_view_map.iter().fold(false, |acc, (_view, leaf)| { + acc || leaf.upgrade_certificate().is_some() + }); + + ensure!(expected_upgrade == actual_upgrade, + "Mismatch between expected and actual upgrade. Expected upgrade: {expected_upgrade}. Actual upgrade: {actual_upgrade}" + ); + + Ok(()) + } } #[async_trait] @@ -181,7 +207,7 @@ impl TestTaskState for ConsistencyTask { } fn check(&self) -> TestResult { - if let Err(e) = self.consensus_leaves.valid() { + if let Err(e) = self.validate() { return TestResult::Fail(Box::new(e)); } diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index 8b740b8482..d933184c1e 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -147,17 +147,6 @@ where Box::new(EventPredicate { check, info }) } -pub fn upgrade_decided() -> Box> -where - TYPES: NodeType, -{ - let info = "UpgradeDecided".to_string(); - let check: EventCallback = - Arc::new(move |e: Arc>| matches!(e.as_ref(), UpgradeDecided(_))); - - Box::new(EventPredicate { check, info }) -} - pub fn quorum_vote_send() -> Box> where TYPES: NodeType, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 9a654e619e..2cb521b08b 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -95,6 +95,8 @@ pub struct TestDescription, V: Ver pub behaviour: Rc Behaviour>, /// Delay config if any to add delays to asynchronous calls pub async_delay_config: DelayConfig, + /// view in which to propose an upgrade + pub upgrade_view: Option, } #[derive(Debug)] @@ -377,6 +379,7 @@ impl, V: Versions> Default }, behaviour: Rc::new(|_| Behaviour::Standard), async_delay_config: DelayConfig::default(), + upgrade_view: None, } } } @@ -509,7 +512,7 @@ where config, marketplace_config: Box::new(|_| MarketplaceConfig:: { auction_results_provider: TestAuctionResultsProvider::::default().into(), - fallback_builder_url: Url::parse("http://localhost").unwrap(), + fallback_builder_url: Url::parse("http://localhost:9999").unwrap(), }), }, metadata: self, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 372e3ece3b..6167d23698 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -208,6 +208,7 @@ where let consistency_task_state = ConsistencyTask { consensus_leaves: BTreeMap::new(), safety_properties: self.launcher.metadata.overall_safety_properties, + ensure_upgrade: self.launcher.metadata.upgrade_view.is_some(), }; let consistency_task = TestTask::>::new( @@ -403,6 +404,9 @@ where for i in 0..total { let mut config = config.clone(); + if let Some(upgrade_view) = self.launcher.metadata.upgrade_view { + config.set_view_upgrade(upgrade_view); + } let node_id = self.next_node_id; self.next_node_id += 1; tracing::debug!("launch node {}", i); @@ -451,6 +455,8 @@ where marketplace_config.auction_results_provider = new_auction_results_provider.into(); } + marketplace_config.fallback_builder_url = builder_urls.first().unwrap().clone(); + let network_clone = network.clone(); let networks_ready_future = async move { network_clone.wait_for_ready().await; @@ -487,6 +493,7 @@ where // We assign node's public key and stake value rather than read from config file since it's a test let validator_config = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, is_da); + let hotshot = Self::add_node_with_config( node_id, network.clone(), diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index ef955e13d7..5a2b141cdd 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -8,7 +8,10 @@ use std::{rc::Rc, time::Duration}; use hotshot::tasks::{BadProposalViewDos, DoubleProposeVote}; use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestVersions}, + node_types::{ + Libp2pImpl, MarketplaceUpgradeTestVersions, MemoryImpl, PushCdnImpl, + TestConsecutiveLeaderTypes, TestVersions, + }, state_types::TestTypes, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; @@ -39,6 +42,26 @@ cross_tests!( }, ); +cross_tests!( + TestName: test_success_marketplace, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [MarketplaceUpgradeTestVersions], + Ignore: false, + Metadata: { + TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + upgrade_view: Some(5), + ..TestDescription::default() + } + }, +); + cross_tests!( TestName: test_success_with_async_delay, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -57,7 +80,7 @@ cross_tests!( }; metadata.overall_safety_properties.num_failed_views = 0; - metadata.overall_safety_properties.num_successful_views = 10; + metadata.overall_safety_properties.num_successful_views = 0; let mut config = DelayConfig::default(); let delay_settings = DelaySettings { delay_option: DelayOptions::Random, diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs index d196356455..9c4ada9ae4 100644 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -177,7 +177,6 @@ async fn test_upgrade_task_vote() { exact(ViewChange(ViewNumber::new(6))), validated_state_updated(), quorum_proposal_validated(), - upgrade_decided(), leaf_decided(), ], task_state_asserts: vec![decided_upgrade_certificate()], @@ -604,7 +603,6 @@ async fn test_upgrade_task_blank_blocks() { exact(ViewChange(ViewNumber::new(5))), validated_state_updated(), quorum_proposal_validated(), - upgrade_decided(), leaf_decided(), // This is between versions, but we are receiving a null block and hence should vote affirmatively on it. quorum_vote_send(), diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 5613f66734..7fa5d3bf40 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -181,7 +181,6 @@ async fn test_upgrade_task_with_vote() { ), Expectations::from_outputs_and_task_states( all_predicates![ - upgrade_decided(), exact(LockedViewUpdated(ViewNumber::new(4))), exact(LastDecidedViewUpdated(ViewNumber::new(3))), leaf_decided(), diff --git a/types/src/lib.rs b/types/src/lib.rs index 566cebeecf..6206497700 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -227,3 +227,17 @@ pub struct HotShotConfig { /// Unix time in seconds at which we stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_time <= start_voting_time. pub stop_voting_time: u64, } + +impl HotShotConfig { + /// Update a hotshot config to have a view-based upgrade. + pub fn set_view_upgrade(&mut self, view: u64) { + self.start_proposing_view = view; + self.stop_proposing_view = view + 1; + self.start_voting_view = view.saturating_sub(1); + self.stop_voting_view = view + 10; + self.start_proposing_time = 0; + self.stop_proposing_time = u64::MAX; + self.start_voting_time = 0; + self.stop_voting_time = u64::MAX; + } +} From 6dee5198200cc7ebfb9fd13d06f331b68331146c Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 19 Aug 2024 09:35:30 -0400 Subject: [PATCH 1186/1393] [LibP2P] - Implement Request/Response For The PushCDN Network (#3575) * all tests passing still, wired up secondary event source * revert * working * cleanup, clippy * working test * propagate event streams and remove propser * fix dependency tasks and invalid key * comments * reduce complexity of spawning behavior --- .../src/traits/networking/push_cdn_network.rs | 31 ++-- task-impls/src/consensus/handlers.rs | 77 +++++++--- task-impls/src/consensus/mod.rs | 95 ++++++++---- task-impls/src/events.rs | 27 +++- task-impls/src/helpers.rs | 74 ++++++--- task-impls/src/network.rs | 25 ++++ task-impls/src/quorum_proposal/handlers.rs | 17 ++- .../src/quorum_proposal_recv/handlers.rs | 14 +- task-impls/src/quorum_proposal_recv/mod.rs | 17 ++- task-impls/src/quorum_vote/mod.rs | 2 + task-impls/src/request.rs | 140 +++--------------- testing/src/predicates/event.rs | 11 -- .../tests_1/quorum_proposal_recv_task.rs | 7 +- .../tests/tests_1/vote_dependency_handle.rs | 2 +- testing/tests/tests_2/catchup.rs | 62 ++++++++ types/src/message.rs | 14 ++ 16 files changed, 380 insertions(+), 235 deletions(-) diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 36a3b5138f..3c544a6e61 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -33,6 +33,7 @@ use cdn_client::{ }; #[cfg(feature = "hotshot-testing")] use cdn_marshal::{Config as MarshalConfig, Marshal}; +use futures::channel::mpsc; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{ AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, @@ -40,6 +41,7 @@ use hotshot_types::traits::network::{ use hotshot_types::{ boxed_sync, data::ViewNumber, + request_response::NetworkMsgResponseChannel, traits::{ metrics::{Counter, Metrics, NoMetrics}, network::{BroadcastDelay, ConnectedNetwork, PushCdnNetworkError, Topic as HotShotTopic}, @@ -434,21 +436,20 @@ impl TestableNetworkingImplementation #[async_trait] impl ConnectedNetwork for PushCdnNetwork { - // async fn request_data( - // &self, - // request: Vec, - // recipient: ReqDataK, - // ) -> Result, NetworkError> { - // self.client.send_direct_message(recipient, request).await; - - // Ok(vec![]) - // } - - // async fn spawn_request_receiver_task( - // &self, - // ) -> Option, NetworkMsgResponseChannel>)>> { - // None - // } + async fn request_data( + &self, + _request: Vec, + _recipient: &K, + ) -> Result, NetworkError> { + Ok(vec![]) + } + + async fn spawn_request_receiver_task( + &self, + ) -> Option, NetworkMsgResponseChannel>)>> { + let (mut _tx, rx) = mpsc::channel(1); + Some(rx) + } /// Pause sending and receiving on the PushCDN network. fn pause(&self) { diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index ffca94fdd8..340741b5f5 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -8,7 +8,7 @@ use core::time::Duration; use std::{marker::PhantomData, sync::Arc}; use anyhow::{bail, ensure, Context, Result}; -use async_broadcast::Sender; +use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -163,6 +163,7 @@ pub async fn create_and_send_proposal( pub async fn publish_proposal_from_commitment_and_metadata( view: TYPES::Time, sender: Sender>>, + receiver: Receiver>>, quorum_membership: Arc, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -178,6 +179,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( view: TYPES::Time, sender: Sender>>, + receiver: Receiver>>, quorum_membership: Arc, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -275,6 +278,7 @@ pub async fn publish_proposal_if_able( publish_proposal_from_commitment_and_metadata( view, sender, + receiver, quorum_membership, public_key, private_key, @@ -302,7 +306,8 @@ pub(crate) async fn handle_quorum_proposal_recv< >( proposal: &Proposal>, sender: &TYPES::SignatureKey, - event_stream: Sender>>, + event_sender: Sender>>, + event_receiver: Receiver>>, task_state: &mut ConsensusTaskState, ) -> Result>> { let sender = sender.clone(); @@ -333,7 +338,7 @@ pub(crate) async fn handle_quorum_proposal_recv< // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here if let Err(e) = update_view::( view, - &event_stream, + &event_sender, task_state.timeout, OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), &mut task_state.cur_view, @@ -360,9 +365,11 @@ pub(crate) async fn handle_quorum_proposal_recv< Some(p) => Some(p), None => fetch_proposal( justify_qc.view_number(), - event_stream.clone(), + event_sender.clone(), + event_receiver.clone(), Arc::clone(&task_state.quorum_membership), OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + task_state.public_key.clone(), ) .await .ok(), @@ -470,7 +477,8 @@ pub(crate) async fn handle_quorum_proposal_recv< ); let create_and_send_proposal_handle = publish_proposal_if_able( qc.view_number + 1, - event_stream, + event_sender, + event_receiver, Arc::clone(&task_state.quorum_membership), task_state.public_key.clone(), task_state.private_key.clone(), @@ -509,7 +517,7 @@ pub(crate) async fn handle_quorum_proposal_recv< OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), Arc::clone(&task_state.quorum_membership), - event_stream.clone(), + event_sender.clone(), sender, task_state.output_event_stream.clone(), task_state.id, @@ -528,7 +536,8 @@ pub async fn handle_quorum_proposal_validated< V: Versions, >( proposal: &QuorumProposal, - event_stream: Sender>>, + event_sender: Sender>>, + event_receiver: Receiver>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { let view = proposal.view_number(); @@ -572,14 +581,16 @@ pub async fn handle_quorum_proposal_validated< task_state.cancel_tasks(new_decided_view).await; } task_state.current_proposal = Some(proposal.clone()); - task_state.spawn_vote_task(view, event_stream.clone()).await; + task_state + .spawn_vote_task(view, event_sender.clone(), event_receiver.clone()) + .await; if should_propose { debug!( "Attempting to publish proposal after voting; now in view: {}", *new_view ); if let Err(e) = task_state - .publish_proposal(new_view, event_stream.clone()) + .publish_proposal(new_view, event_sender.clone(), event_receiver.clone()) .await { debug!("Failed to propose; error = {e:?}"); @@ -632,7 +643,7 @@ pub async fn handle_quorum_proposal_validated< decide_sent.await; broadcast_event( Arc::new(HotShotEvent::LeafDecided(res.leaves_decided)), - &event_stream, + &event_sender, ) .await; debug!("decide send succeeded"); @@ -643,12 +654,22 @@ pub async fn handle_quorum_proposal_validated< /// Private key, latest decided upgrade certificate, committee membership, and event stream, for /// sending the vote. -type VoteInfo = ( - <::SignatureKey as SignatureKey>::PrivateKey, - UpgradeLock, - Arc<::Membership>, - Sender>>, -); +pub(crate) struct VoteInfo { + /// The private key of the voting node. + pub private_key: <::SignatureKey as SignatureKey>::PrivateKey, + + /// The locked upgrade of the voting node. + pub upgrade_lock: UpgradeLock, + + /// The DA Membership handle + pub da_membership: Arc<::Membership>, + + /// The event sending stream. + pub event_sender: Sender>>, + + /// The event receiver stream. + pub event_receiver: Receiver>>, +} #[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_lines)] @@ -692,7 +713,13 @@ pub async fn update_state_and_vote_if_able< return false; }; - if let Some(upgrade_cert) = &vote_info.1.decided_upgrade_certificate.read().await.clone() { + if let Some(upgrade_cert) = &vote_info + .upgrade_lock + .decided_upgrade_certificate + .read() + .await + .clone() + { if upgrade_cert.upgrading_in(cur_view) && Some(proposal.block_header.payload_commitment()) != null_block::commitment(quorum_membership.total_nodes()) @@ -722,9 +749,11 @@ pub async fn update_state_and_vote_if_able< Some(p) => Some(p), None => fetch_proposal( justify_qc.view_number(), - vote_info.3.clone(), + vote_info.event_sender.clone(), + vote_info.event_receiver.clone(), Arc::clone(&quorum_membership), OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), + public_key.clone(), ) .await .ok(), @@ -747,7 +776,7 @@ pub async fn update_state_and_vote_if_able< }; drop(read_consnesus); - let version = match vote_info.1.version(view).await { + let version = match vote_info.upgrade_lock.version(view).await { Ok(version) => version, Err(e) => { error!("Failed to calculate the version: {e:?}"); @@ -778,7 +807,7 @@ pub async fn update_state_and_vote_if_able< } // Validate the DAC. - let message = if cert.is_valid_cert(vote_info.2.as_ref()) { + let message = if cert.is_valid_cert(vote_info.da_membership.as_ref()) { // Validate the block payload commitment for non-genesis DAC. if cert.date().payload_commit != proposal.block_header.payload_commitment() { warn!( @@ -793,7 +822,7 @@ pub async fn update_state_and_vote_if_able< }, view, &public_key, - &vote_info.0, + &vote_info.private_key, ) { GeneralConsensusMessage::::Vote(vote) } else { @@ -848,7 +877,11 @@ pub async fn update_state_and_vote_if_able< ); return false; } - broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &vote_info.3).await; + broadcast_event( + Arc::new(HotShotEvent::QuorumVoteSend(vote)), + &vote_info.event_sender, + ) + .await; return true; } debug!( diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 2b6970f785..71cc267034 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -39,7 +39,7 @@ use tracing::{debug, error, info, instrument, warn}; use crate::{ consensus::handlers::{ handle_quorum_proposal_recv, handle_quorum_proposal_validated, publish_proposal_if_able, - update_state_and_vote_if_able, + update_state_and_vote_if_able, VoteInfo, }, events::{HotShotEvent, HotShotTaskCompleted}, helpers::{broadcast_event, cancel_task, update_view, DONT_SEND_VIEW_CHANGE_EVENT}, @@ -197,11 +197,13 @@ impl, V: Versions> ConsensusTaskSt async fn publish_proposal( &mut self, view: TYPES::Time, - event_stream: Sender>>, + event_sender: Sender>>, + event_receiver: Receiver>>, ) -> Result<()> { let create_and_send_proposal_handle = publish_proposal_if_able( view, - event_stream, + event_sender, + event_receiver, Arc::clone(&self.quorum_membership), self.public_key.clone(), self.private_key.clone(), @@ -230,7 +232,8 @@ impl, V: Versions> ConsensusTaskSt async fn spawn_vote_task( &mut self, view: TYPES::Time, - event_stream: Sender>>, + event_sender: Sender>>, + event_receiver: Receiver>>, ) { let Some(proposal) = self.current_proposal.clone() else { return; @@ -256,7 +259,13 @@ impl, V: Versions> ConsensusTaskSt storage, quorum_mem, instance_state, - (priv_key, upgrade, da_mem, event_stream), + VoteInfo { + private_key: priv_key, + upgrade_lock: upgrade, + da_membership: da_mem, + event_sender, + event_receiver, + }, id, ) .await; @@ -269,18 +278,26 @@ impl, V: Versions> ConsensusTaskSt pub async fn handle( &mut self, event: Arc>, - event_stream: Sender>>, + event_sender: Sender>>, + event_receiver: Receiver>>, ) { match event.as_ref() { HotShotEvent::QuorumProposalRecv(proposal, sender) => { debug!("proposal recv view: {:?}", proposal.data.view_number()); - match handle_quorum_proposal_recv(proposal, sender, event_stream.clone(), self) - .await + match handle_quorum_proposal_recv( + proposal, + sender, + event_sender.clone(), + event_receiver.clone(), + self, + ) + .await { Ok(Some(current_proposal)) => { let view = current_proposal.view_number(); self.current_proposal = Some(current_proposal); - self.spawn_vote_task(view, event_stream).await; + self.spawn_vote_task(view, event_sender, event_receiver) + .await; } Ok(None) => {} Err(e) => debug!("Failed to propose {e:#}"), @@ -288,8 +305,13 @@ impl, V: Versions> ConsensusTaskSt } HotShotEvent::QuorumProposalValidated(proposal, _) => { debug!("proposal validated view: {:?}", proposal.view_number()); - if let Err(e) = - handle_quorum_proposal_validated(proposal, event_stream.clone(), self).await + if let Err(e) = handle_quorum_proposal_validated( + proposal, + event_sender.clone(), + event_receiver.clone(), + self, + ) + .await { warn!("Failed to handle QuorumProposalValidated event {e:#}"); } @@ -318,13 +340,13 @@ impl, V: Versions> ConsensusTaskSt TYPES, QuorumVote, QuorumCertificate, - >(&info, event, &event_stream) + >(&info, event, &event_sender) .await; } else { let result = collector .as_mut() .unwrap() - .handle_vote_event(Arc::clone(&event), &event_stream) + .handle_vote_event(Arc::clone(&event), &event_sender) .await; if result == Some(HotShotTaskCompleted) { @@ -357,13 +379,13 @@ impl, V: Versions> ConsensusTaskSt TYPES, TimeoutVote, TimeoutCertificate, - >(&info, event, &event_stream) + >(&info, event, &event_sender) .await; } else { let result = collector .as_mut() .unwrap() - .handle_vote_event(Arc::clone(&event), &event_stream) + .handle_vote_event(Arc::clone(&event), &event_sender) .await; if result == Some(HotShotTaskCompleted) { @@ -383,7 +405,7 @@ impl, V: Versions> ConsensusTaskSt ); if let Err(e) = self - .publish_proposal(qc.view_number + 1, event_stream) + .publish_proposal(qc.view_number + 1, event_sender, event_receiver) .await { debug!("Failed to propose; error = {e:?}"); @@ -403,7 +425,7 @@ impl, V: Versions> ConsensusTaskSt ); if let Err(e) = self - .publish_proposal(qc.view_number + 1, event_stream) + .publish_proposal(qc.view_number + 1, event_sender, event_receiver) .await { debug!("Failed to propose; error = {e:?}"); @@ -438,7 +460,8 @@ impl, V: Versions> ConsensusTaskSt if proposal.view_number() != view { return; } - self.spawn_vote_task(view, event_stream).await; + self.spawn_vote_task(view, event_sender, event_receiver) + .await; } HotShotEvent::VidShareRecv(disperse) => { let view = disperse.data.view_number(); @@ -477,7 +500,8 @@ impl, V: Versions> ConsensusTaskSt if proposal.view_number() != view { return; } - self.spawn_vote_task(view, event_stream.clone()).await; + self.spawn_vote_task(view, event_sender.clone(), event_receiver.clone()) + .await; } HotShotEvent::ViewChange(new_view) => { let new_view = *new_view; @@ -513,7 +537,7 @@ impl, V: Versions> ConsensusTaskSt // Returns if the view does not need updating. if let Err(e) = update_view::( new_view, - &event_stream, + &event_sender, self.timeout, OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), &mut self.cur_view, @@ -553,7 +577,7 @@ impl, V: Versions> ConsensusTaskSt return; }; - broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), &event_stream).await; + broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), &event_sender).await; broadcast_event( Event { view_number: view, @@ -605,7 +629,10 @@ impl, V: Versions> ConsensusTaskSt if self.quorum_membership.leader(view) == self.public_key && self.consensus.read().await.high_qc().view_number() + 1 == view { - if let Err(e) = self.publish_proposal(view, event_stream.clone()).await { + if let Err(e) = self + .publish_proposal(view, event_sender.clone(), event_receiver.clone()) + .await + { error!("Failed to propose; error = {e:?}"); }; } @@ -621,14 +648,20 @@ impl, V: Versions> ConsensusTaskSt if self.quorum_membership.leader(tc.view_number() + 1) == self.public_key { - if let Err(e) = self.publish_proposal(view, event_stream).await { + if let Err(e) = self + .publish_proposal(view, event_sender, event_receiver) + .await + { debug!("Failed to propose; error = {e:?}"); }; } } ViewChangeEvidence::ViewSync(vsc) => { if self.quorum_membership.leader(vsc.view_number()) == self.public_key { - if let Err(e) = self.publish_proposal(view, event_stream).await { + if let Err(e) = self + .publish_proposal(view, event_sender, event_receiver) + .await + { debug!("Failed to propose; error = {e:?}"); }; } @@ -655,7 +688,10 @@ impl, V: Versions> ConsensusTaskSt *certificate.view_number ); - if let Err(e) = self.publish_proposal(view, event_stream).await { + if let Err(e) = self + .publish_proposal(view, event_sender, event_receiver) + .await + { debug!("Failed to propose; error = {e:?}"); }; } @@ -675,7 +711,10 @@ impl, V: Versions> ConsensusTaskSt "Attempting to publish proposal after voting; now in view: {}", *new_view ); - if let Err(e) = self.publish_proposal(new_view, event_stream.clone()).await { + if let Err(e) = self + .publish_proposal(new_view, event_sender.clone(), event_receiver.clone()) + .await + { debug!("failed to propose e = {:?}", e); } } @@ -716,9 +755,9 @@ impl, V: Versions> TaskState &mut self, event: Arc, sender: &Sender>, - _receiver: &Receiver>, + receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; + self.handle(event, sender.clone(), receiver.clone()).await; Ok(()) } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 17a9c4fb9b..082caef45c 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -97,8 +97,18 @@ pub enum HotShotEvent { /// 3. The justify QC is valid /// 4. The proposal passes either liveness or safety check. QuorumProposalValidated(QuorumProposal, Leaf), - /// A quorum proposal is missing for a view that we need - QuorumProposalRequest(ProposalMissing), + /// A quorum proposal is missing for a view that we need. Also includes the sender key. + QuorumProposalRequestSend(TYPES::Time, TYPES::SignatureKey), + /// A quorum proposal was requested by a node for a view. Also includes the sender key. + QuorumProposalRequestRecv(TYPES::Time, TYPES::SignatureKey), + /// A quorum proposal was missing for a view. As the leader, we send a reply to the recipient with their key. + QuorumProposalResponseSend( + TYPES::Time, + TYPES::SignatureKey, + Proposal>, + ), + /// A quorum proposal was requested by a node for a view. Also includes the sender key. + QuorumProposalResponseRecv(TYPES::Time, Proposal>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DaProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal @@ -419,8 +429,17 @@ impl Display for HotShotEvent { "UpgradeCertificateFormed(view_number={:?})", cert.view_number() ), - HotShotEvent::QuorumProposalRequest(view_number) => { - write!(f, "QuorumProposalRequest(view_number={view_number:?})") + HotShotEvent::QuorumProposalRequestSend(view_number, _) => { + write!(f, "QuorumProposalRequestSend(view_number={view_number:?})") + } + HotShotEvent::QuorumProposalRequestRecv(view_number, _) => { + write!(f, "QuorumProposalRequestRecv(view_number={view_number:?})") + } + HotShotEvent::QuorumProposalResponseSend(view_number, _, _) => { + write!(f, "QuorumProposalResponseSend(view_number={view_number:?})") + } + HotShotEvent::QuorumProposalResponseRecv(view_number, _) => { + write!(f, "QuorumProposalResponseRecv(view_number={view_number:?})") } HotShotEvent::ValidatedStateUpdated(view_number, _) => { write!(f, "ValidatedStateUpdated(view_number={view_number:?})") diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 14b6f0483f..4a7903b847 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -11,13 +11,14 @@ use std::{ }; use anyhow::{bail, ensure, Context, Result}; -use async_broadcast::{broadcast, SendError, Sender}; +use async_broadcast::{Receiver, SendError, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use chrono::Utc; use committable::{Commitment, Committable}; +use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ consensus::{ConsensusUpgradableReadLockGuard, OuterConsensus}, data::{Leaf, QuorumProposal, ViewChangeEvidence}, @@ -37,32 +38,64 @@ use hotshot_types::{ use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; -use crate::{ - events::{HotShotEvent, ProposalMissing}, - request::REQUEST_TIMEOUT, -}; +use crate::{events::HotShotEvent, request::REQUEST_TIMEOUT}; -/// Trigger a request to the network for a proposal for a view and wait for the response +/// Trigger a request to the network for a proposal for a view and wait for the response or timeout. #[instrument(skip_all)] pub(crate) async fn fetch_proposal( - view: TYPES::Time, - event_stream: Sender>>, + view_number: TYPES::Time, + event_sender: Sender>>, + event_receiver: Receiver>>, quorum_membership: Arc, consensus: OuterConsensus, + sender_key: TYPES::SignatureKey, ) -> Result> { - let (tx, mut rx) = broadcast(1); - let event = ProposalMissing { - view, - response_chan: tx, - }; + // First, broadcast that we need a proposal to the current leader broadcast_event( - Arc::new(HotShotEvent::QuorumProposalRequest(event)), - &event_stream, + HotShotEvent::QuorumProposalRequestSend(view_number, sender_key).into(), + &event_sender, ) .await; - let Ok(Ok(Some(proposal))) = async_timeout(REQUEST_TIMEOUT, rx.recv_direct()).await else { + + // Make a background task to await the arrival of the event data. + let Ok(Some(proposal)) = + // We want to explicitly timeout here so we aren't waiting around for the data. + async_timeout(REQUEST_TIMEOUT, async move { + // First, capture the output from the event dependency + let event = EventDependency::new( + event_receiver.clone(), + Box::new(move |event| { + let event = event.as_ref(); + if let HotShotEvent::QuorumProposalResponseRecv( + qpr_view_number, + _quorum_proposal, + ) = event + { + *qpr_view_number == view_number + } else { + false + } + }), + ) + .completed() + .await; + + // Then, if it's `Some`, make sure that the data is correct + if let Some(hs_event) = event.as_ref() { + if let HotShotEvent::QuorumProposalResponseRecv(_view_number, quorum_proposal) = + hs_event.as_ref() + { + return Some(quorum_proposal.clone()); + } + } + + None + }) + .await + else { bail!("Request for proposal failed"); }; + let view_number = proposal.data.view_number(); let justify_qc = proposal.data.justify_qc.clone(); @@ -89,7 +122,7 @@ pub(crate) async fn fetch_proposal( consensus_write.update_saved_leaves(leaf.clone()); broadcast_event( HotShotEvent::ValidatedStateUpdated(view_number, view).into(), - &event_stream, + &event_sender, ) .await; Ok(leaf) @@ -287,7 +320,8 @@ pub async fn decide_from_proposal( #[instrument(skip_all)] pub(crate) async fn parent_leaf_and_state( next_proposal_view_number: TYPES::Time, - event_stream: &Sender>>, + event_sender: &Sender>>, + event_receiver: &Receiver>>, quorum_membership: Arc, public_key: TYPES::SignatureKey, consensus: OuterConsensus, @@ -305,9 +339,11 @@ pub(crate) async fn parent_leaf_and_state( { let _ = fetch_proposal( parent_view_number, - event_stream.clone(), + event_sender.clone(), + event_receiver.clone(), quorum_membership, consensus.clone(), + public_key.clone(), ) .await .context("Failed to fetch proposal")?; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 3ca3771fa4..4f94f37840 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -39,6 +39,8 @@ pub fn quorum_filter(event: &Arc>) -> bool !matches!( event.as_ref(), HotShotEvent::QuorumProposalSend(_, _) + | HotShotEvent::QuorumProposalRequestSend(..) + | HotShotEvent::QuorumProposalResponseSend(..) | HotShotEvent::QuorumVoteSend(_) | HotShotEvent::DacSend(_, _) | HotShotEvent::TimeoutVoteSend(_) @@ -113,6 +115,15 @@ impl NetworkMessageTaskState { GeneralConsensusMessage::Proposal(proposal) => { HotShotEvent::QuorumProposalRecv(proposal, sender) } + GeneralConsensusMessage::ProposalRequested(view, sender) => { + HotShotEvent::QuorumProposalRequestRecv(view, sender) + } + GeneralConsensusMessage::LeaderProposalAvailable(proposal) => { + HotShotEvent::QuorumProposalResponseRecv( + proposal.data.view_number, + proposal, + ) + } GeneralConsensusMessage::Vote(vote) => { HotShotEvent::QuorumVoteRecv(vote.clone()) } @@ -292,6 +303,20 @@ impl< TransmitType::Direct(membership.leader(vote.view_number() + 1)), ) } + HotShotEvent::QuorumProposalRequestSend(view_number, sender_key) => ( + sender_key.clone(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ProposalRequested(view_number, sender_key), + )), + TransmitType::Direct(membership.leader(view_number)), + ), + HotShotEvent::QuorumProposalResponseSend(_view_number, sender_key, proposal) => ( + sender_key.clone(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::LeaderProposalAvailable(proposal), + )), + TransmitType::Direct(sender_key), + ), HotShotEvent::VidDisperseSend(proposal, sender) => { self.handle_vid_disperse_proposal(proposal, &sender).await; return; diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index f2386ce826..cafb29b597 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -121,6 +121,7 @@ impl ProposalDependencyHandle { let (parent_leaf, state) = parent_leaf_and_state( self.view_number, &self.sender, + &self.receiver, Arc::clone(&self.quorum_membership), self.public_key.clone(), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), @@ -246,7 +247,7 @@ impl ProposalDependencyHandle { impl HandleDepOutput for ProposalDependencyHandle { type Output = Vec>>>>; - #[allow(clippy::no_effect_underscore_binding)] + #[allow(clippy::no_effect_underscore_binding, clippy::too_many_lines)] async fn handle_dep_result(self, res: Self::Output) { let high_qc_view_number = self.consensus.read().await.high_qc().view_number; if !self @@ -258,10 +259,20 @@ impl HandleDepOutput for ProposalDependencyHandle< { // The proposal for the high qc view is missing, try to get it asynchronously let membership = Arc::clone(&self.quorum_membership); - let sender = self.sender.clone(); + let event_sender = self.sender.clone(); + let event_receiver = self.receiver.clone(); + let sender_key = self.public_key.clone(); let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); async_spawn(async move { - fetch_proposal(high_qc_view_number, sender, membership, consensus).await + fetch_proposal( + high_qc_view_number, + event_sender, + event_receiver, + membership, + consensus, + sender_key, + ) + .await }); // Block on receiving the event from the event stream. EventDependency::new( diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 9792fa64d6..c1774e11ee 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -9,7 +9,7 @@ use std::sync::Arc; use anyhow::{bail, Context, Result}; -use async_broadcast::{broadcast, Sender}; +use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLockUpgradableReadGuard; use committable::Committable; use hotshot_types::{ @@ -131,11 +131,12 @@ pub(crate) async fn handle_quorum_proposal_recv< V: Versions, >( proposal: &Proposal>, - sender: &TYPES::SignatureKey, + quorum_proposal_sender_key: &TYPES::SignatureKey, event_sender: &Sender>>, + event_receiver: &Receiver>>, task_state: &mut QuorumProposalRecvTaskState, ) -> Result<()> { - let sender = sender.clone(); + let quorum_proposal_sender_key = quorum_proposal_sender_key.clone(); let cur_view = task_state.cur_view; validate_proposal_view_and_certs( @@ -177,8 +178,13 @@ pub(crate) async fn handle_quorum_proposal_recv< None => fetch_proposal( justify_qc.view_number(), event_sender.clone(), + event_receiver.clone(), Arc::clone(&task_state.quorum_membership), OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + // Note that we explicitly use the node key here instead of the provided key in the signature. + // This is because the key that we receive is for the prior leader, so the payload would be routed + // incorrectly. + task_state.public_key.clone(), ) .await .ok(), @@ -237,7 +243,7 @@ pub(crate) async fn handle_quorum_proposal_recv< Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), Arc::clone(&task_state.quorum_membership), event_sender.clone(), - sender, + quorum_proposal_sender_key, task_state.output_event_stream.clone(), task_state.id, ) diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 688c393c5a..dc0fb5124c 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -122,11 +122,20 @@ impl, V: Versions> pub async fn handle( &mut self, event: Arc>, - event_stream: Sender>>, + event_sender: Sender>>, + event_receiver: Receiver>>, ) { #[cfg(feature = "dependency-tasks")] if let HotShotEvent::QuorumProposalRecv(proposal, sender) = event.as_ref() { - match handle_quorum_proposal_recv(proposal, sender, &event_stream, self).await { + match handle_quorum_proposal_recv( + proposal, + sender, + &event_sender, + &event_receiver, + self, + ) + .await + { Ok(()) => { self.cancel_tasks(proposal.data.view_number()).await; } @@ -146,9 +155,9 @@ impl, V: Versions> TaskState &mut self, event: Arc, sender: &Sender>, - _receiver: &Receiver>, + receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; + self.handle(event, sender.clone(), receiver.clone()).await; Ok(()) } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 6c4f8651e3..43c279af2b 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -112,8 +112,10 @@ impl + 'static, V: Versions> None => fetch_proposal( justify_qc.view_number(), self.sender.clone(), + self.receiver.clone(), Arc::clone(&self.quorum_membership), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), + self.public_key.clone(), ) .await .ok(), diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 171cdb7d02..ef0faf0ed5 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -22,11 +22,7 @@ use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, - data::QuorumProposal, - message::{ - DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, - SequencingMessage, - }, + message::{DaConsensusMessage, DataMessage, Message, MessageKind, SequencingMessage}, traits::{ election::Membership, network::{ConnectedNetwork, DataRequest, RequestKind, ResponseMessage}, @@ -41,10 +37,7 @@ use sha2::{Digest, Sha256}; use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; -use crate::{ - events::{HotShotEvent, ProposalMissing}, - helpers::broadcast_event, -}; +use crate::{events::HotShotEvent, helpers::broadcast_event}; /// Amount of time to try for a request before timing out. pub const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); @@ -115,12 +108,22 @@ impl> TaskState for NetworkRequest } Ok(()) } - HotShotEvent::QuorumProposalRequest(missing) => { - let ProposalMissing { - view, - response_chan: chan, - } = missing; - self.run_proposal(&RequestKind::Proposal(*view), chan.clone(), *view); + HotShotEvent::QuorumProposalRequestRecv(view_number, sender_key) => { + if let Some(quorum_proposal) = + self.state.read().await.last_proposals().get(view_number) + { + broadcast_event( + HotShotEvent::QuorumProposalResponseSend( + *view_number, + sender_key.clone(), + quorum_proposal.clone(), + ) + .into(), + sender, + ) + .await; + } + Ok(()) } _ => Ok(()), @@ -224,29 +227,6 @@ impl> NetworkRequestState, - response_chan: Sender>>>, - view: TYPES::Time, - ) { - let leader = self.da_membership.leader(view); - let requester = ProposalRequester:: { - network: Arc::clone(&self.network), - sender: response_chan, - leader, - }; - let Some(signature) = self.serialize_and_sign(request) else { - return; - }; - - let pub_key = self.public_key.clone(); - async_spawn(async move { - requester.do_proposal(view, signature, pub_key).await; - }); - } - /// Signals delayed requesters to finish pub fn set_shutdown_flag(&self) { self.shutdown_flag.store(true, Ordering::Relaxed); @@ -275,72 +255,6 @@ struct DelayedRequester> { id: u64, } -/// A task the requests some data immediately from one peer - -struct ProposalRequester> { - /// The underlying network to send requests on - pub network: Arc, - /// Channel to send the event when we receive a response - sender: Sender>>>, - /// Leader for the view of the request - leader: TYPES::SignatureKey, -} - -impl> ProposalRequester { - /// Handle sending a request for proposal for a view, does - /// not delay - async fn do_proposal( - &self, - view: TYPES::Time, - signature: Signature, - key: TYPES::SignatureKey, - ) { - let response = match bincode::serialize(&make_proposal_req::(view, signature, key)) { - Ok(serialized_msg) => { - async_timeout( - REQUEST_TIMEOUT, - self.network - .request_data::(serialized_msg, &self.leader), - ) - .await - } - Err(e) => { - tracing::error!( - "Failed to serialize outgoing message: this should never happen. Error: {e}" - ); - broadcast_event(None, &self.sender).await; - return; - } - }; - match response { - Ok(Ok(serialized_response)) => { - if let Ok(ResponseMessage::Found(msg)) = bincode::deserialize(&serialized_response) - { - let SequencingMessage::General(GeneralConsensusMessage::Proposal(prop)) = msg - else { - error!("Requested Proposal but received a non-proposal in response. Response was {:?}", msg); - broadcast_event(None, &self.sender).await; - return; - }; - debug!("proposal found {:?}", prop); - broadcast_event(Some(prop), &self.sender).await; - return; - } - debug!("Proposal not found"); - broadcast_event(None, &self.sender).await; - } - Ok(Err(e)) => { - debug!("request for proposal failed with error {:?}", e); - broadcast_event(None, &self.sender).await; - } - Err(e) => { - debug!("request for proposal timed out with error {:?}", e); - broadcast_event(None, &self.sender).await; - } - } - } -} - /// Wrapper for the info in a VID request struct VidRequest(TYPES::Time, TYPES::SignatureKey); @@ -469,21 +383,3 @@ fn make_vid( kind: MessageKind::Data(DataMessage::RequestData(data_request)), } } - -/// Build a request for a Proposal -fn make_proposal_req( - view: TYPES::Time, - signature: Signature, - key: TYPES::SignatureKey, -) -> Message { - let kind = RequestKind::Proposal(view); - let data_request = DataRequest { - view, - request: kind, - signature, - }; - Message { - sender: key, - kind: MessageKind::Data(DataMessage::RequestData(data_request)), - } -} diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index d933184c1e..22be12227d 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -203,17 +203,6 @@ where Box::new(EventPredicate { check, info }) } -pub fn quorum_proposal_missing() -> Box> -where - TYPES: NodeType, -{ - let info = "QuorumProposalRequest".to_string(); - let check: EventCallback = Arc::new(move |e: Arc>| { - matches!(*e.clone(), QuorumProposalRequest(..)) - }); - Box::new(EventPredicate { check, info }) -} - pub fn quorum_proposal_send() -> Box> where TYPES: NodeType, diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index d0d727b370..7479536487 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -20,7 +20,7 @@ use hotshot_task_impls::{ }; use hotshot_testing::{ helpers::{build_fake_view_with_leaf_and_state, build_system_handle}, - predicates::event::{all_predicates, exact, quorum_proposal_missing}, + predicates::event::{all_predicates, exact}, script::InputOrder, serial, view_generator::TestViewGenerator, @@ -210,7 +210,10 @@ async fn test_quorum_proposal_recv_task_liveness_check() { ), ), )), - quorum_proposal_missing(), + exact(QuorumProposalRequestSend( + ViewNumber::new(2), + handle.public_key() + )), exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), ])]; diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 2938ea6ef9..13d5e86297 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -89,7 +89,7 @@ async fn test_vote_dependency_handle() { let vote_dependency_handle_state = VoteDependencyHandle:: { - public_key: handle.public_key().clone(), + public_key: handle.public_key(), private_key: handle.private_key().clone(), consensus: OuterConsensus::new(consensus.clone()), instance_state: handle.hotshot.instance_state(), diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 33f8af1576..9b1fcb4376 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -359,3 +359,65 @@ async fn test_all_restart() { .run_test::() .await; } + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_all_restart_cdn() { + use std::time::Duration; + + use hotshot_example_types::node_types::{PushCdnImpl, TestTypes, TestVersions}; + use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestDescription, TimingData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let timing_data = TimingData { + next_view_timeout: 2000, + ..Default::default() + }; + let mut metadata: TestDescription = + TestDescription::default(); + let mut catchup_nodes = vec![]; + for i in 1..20 { + catchup_nodes.push(ChangeNode { + idx: i, + updown: UpDown::Restart, + }) + } + + metadata.timing_data = timing_data; + metadata.start_nodes = 20; + metadata.num_nodes_with_stake = 20; + + metadata.spinning_properties = SpinningTaskDescription { + // Restart all the nodes in view 13 + node_changes: vec![(13, catchup_nodes)], + }; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep committing rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 15, + ..Default::default() + }; + + metadata + .gen_launcher(0) + .launch() + .run_test::() + .await; +} diff --git a/types/src/message.rs b/types/src/message.rs index dcd653e7b2..e328e4d970 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -162,6 +162,12 @@ pub enum GeneralConsensusMessage { /// Message with a quorum proposal. Proposal(Proposal>), + /// A peer node needs a proposal from the leader. + ProposalRequested(TYPES::Time, TYPES::SignatureKey), + + /// The leader has responded with a valid proposal. + LeaderProposalAvailable(Proposal>), + /// Message with a quorum vote. Vote(QuorumVote), @@ -235,6 +241,10 @@ impl SequencingMessage { // this should match replica upon receipt p.data.view_number() } + GeneralConsensusMessage::ProposalRequested(view_number, _) => *view_number, + GeneralConsensusMessage::LeaderProposalAvailable(proposal) => { + proposal.data.view_number() + } GeneralConsensusMessage::Vote(vote_message) => vote_message.view_number(), GeneralConsensusMessage::TimeoutVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { @@ -277,6 +287,10 @@ impl SequencingMessage { match &self { SequencingMessage::General(general_message) => match general_message { GeneralConsensusMessage::Proposal(_) => MessagePurpose::Proposal, + GeneralConsensusMessage::ProposalRequested(_, _) + | GeneralConsensusMessage::LeaderProposalAvailable(_) => { + MessagePurpose::LatestProposal + } GeneralConsensusMessage::Vote(_) | GeneralConsensusMessage::TimeoutVote(_) => { MessagePurpose::Vote } From 69b02d69e075212f54a367de5f557009f008475f Mon Sep 17 00:00:00 2001 From: elliedavidson <118024407+elliedavidson@users.noreply.github.com> Date: Tue, 20 Aug 2024 14:07:08 -0400 Subject: [PATCH 1187/1393] Add test for verify_share return type (#3590) * Add test for verify_share return type * Remove patch and ignore test * Fix typo * commit other mod.rs file * just fmt * Remove jellyfish patch from cargo toml * PR comments * Clippy fixes --- task-impls/Cargo.toml | 1 + task-impls/src/consensus/mod.rs | 17 +++--- task-impls/src/quorum_vote/mod.rs | 24 ++++---- testing/Cargo.toml | 1 + testing/tests/tests_1/consensus_task.rs | 75 +++++++++++++++++++++++++ types/src/vid.rs | 6 ++ 6 files changed, 104 insertions(+), 20 deletions(-) diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 4aea968dff..b22adee2fb 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -10,6 +10,7 @@ example-upgrade = [] gpu-vid = ["hotshot-types/gpu-vid"] dependency-tasks = [] rewind = [] +test-srs = ["jf-vid/test-srs"] [dependencies] anyhow = { workspace = true } diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 71cc267034..67ae099035 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -177,19 +177,16 @@ impl, V: Versions> ConsensusTaskSt } // Validate the VID share. - if vid_scheme(self.quorum_membership.total_nodes()) - .verify_share( + // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner + // and outer results + matches!( + vid_scheme(self.quorum_membership.total_nodes()).verify_share( &disperse.data.share, &disperse.data.common, &payload_commitment, - ) - .is_err() - { - debug!("Invalid VID share."); - return false; - } - - true + ), + Ok(Ok(())) + ) } /// Publishes a proposal diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 43c279af2b..4ddd531cab 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -605,16 +605,20 @@ impl, V: Versions> QuorumVoteTaskS return; } } - if vid_scheme(self.quorum_membership.total_nodes()) - .verify_share( - &disperse.data.share, - &disperse.data.common, - &payload_commitment, - ) - .is_err() - { - debug!("Invalid VID share."); - return; + // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner + // and outer results + #[allow(clippy::no_effect)] + match vid_scheme(self.quorum_membership.total_nodes()).verify_share( + &disperse.data.share, + &disperse.data.common, + &payload_commitment, + ) { + Ok(Err(())) | Err(_) => { + return; + } + Ok(Ok(())) => { + (); + } } self.consensus diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 721f526a08..02ed054d5d 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -12,6 +12,7 @@ slow-tests = [] gpu-vid = ["hotshot-types/gpu-vid"] dependency-tasks = ["hotshot/dependency-tasks"] rewind = ["hotshot/rewind"] +test-srs = ["jf-vid/test-srs"] [dependencies] automod = "1.0.14" diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 2e02c9a7b9..1de54635a8 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -602,3 +602,78 @@ async fn test_vid_disperse_storage_failure() { run_test![inputs, consensus_script].await; } + +/// Tests that VID shares that return validation with an Ok(Err) result +/// are correctly rejected +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[cfg(feature = "test-srs")] +async fn test_invalid_vid_disperse() { + use hotshot_testing::{ + helpers::{build_payload_commitment, build_vid_proposal}, + test_builder::TestDescription, + }; + use hotshot_types::traits::{ + consensus_api::ConsensusApi, network::Topic, node_implementation::NodeType, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let handle = build_system_handle::(0) + .await + .0; + + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let mut generator = + TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut votes = Vec::new(); + let mut dacs = Vec::new(); + let mut vids = Vec::new(); + for view in (&mut generator).take(1).collect::>().await { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + votes.push(view.create_quorum_vote(&handle)); + dacs.push(view.da_certificate.clone()); + vids.push(view.vid_proposal.clone()); + } + + let vid_scheme = + vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); + + let corrupt_share = vid_scheme.corrupt_share_index(vids[0].0[0].data.share.clone()); + + // Corrupt one of the shares + let mut share = vid_share(&vids[0].0, handle.public_key()); + share.data.share = corrupt_share; + + let inputs = vec![random![ + VidShareRecv(share), + DaCertificateRecv(dacs[0].clone()), + QuorumProposalRecv(proposals[0].clone(), leaders[0]), + ]]; + + // If verify_share does not correctly handle this case, a `QuorumVote` + // will be emitted and cause a test failure + let expectations = vec![Expectations::from_outputs(all_predicates![ + validated_state_updated(), + exact(ViewChange(ViewNumber::new(1))), + quorum_proposal_validated(), + ])]; + + let consensus_state = + ConsensusTaskState::::create_from(&handle).await; + let mut consensus_script = TaskScript { + timeout: TIMEOUT, + state: consensus_state, + expectations, + }; + + run_test![inputs, consensus_script].await; +} diff --git a/types/src/vid.rs b/types/src/vid.rs index b4b63cf7b4..1d8afe2091 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -231,6 +231,12 @@ impl VidScheme for VidSchemeType { fn get_multiplicity(common: &Self::Common) -> u32 { ::get_multiplicity(common) } + + /// Helper function for testing only + #[cfg(feature = "test-srs")] + fn corrupt_share_index(&self, share: Self::Share) -> Self::Share { + self.0.corrupt_share_index(share) + } } impl PayloadProver for VidSchemeType { From e9d68f12bcaff4033b28abf6911bd7dcddbc88e4 Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Wed, 21 Aug 2024 10:57:51 -0400 Subject: [PATCH 1188/1393] [LOGGING] - Log Errors or Panic When Critical Tasks exit (#3577) * add health check task * add a periodic heart beat for main tasks * rebase and format * cleanup * minor cleanup * change task id to include task name and a number * cleanup * cleanup async-std and tokio scopes in loops * cleanup * remove uneeded helper functions to avoid duplication * minor cleanup * pin fused receive stream on task startup * fix tests * address comments --- hotshot/src/lib.rs | 2 + hotshot/src/tasks/mod.rs | 76 ++++--- hotshot/src/tasks/task_state.rs | 16 +- hotshot/src/types/handle.rs | 20 ++ task-impls/src/consensus/mod.rs | 4 + task-impls/src/consensus2/mod.rs | 4 + task-impls/src/da.rs | 4 + task-impls/src/events.rs | 10 + task-impls/src/harness.rs | 7 +- task-impls/src/health_check.rs | 121 +++++++++++ task-impls/src/lib.rs | 3 + task-impls/src/network.rs | 10 + task-impls/src/quorum_proposal/mod.rs | 4 + task-impls/src/quorum_proposal_recv/mod.rs | 4 + task-impls/src/quorum_vote/mod.rs | 4 + task-impls/src/request.rs | 4 + task-impls/src/response.rs | 44 ++-- task-impls/src/rewind.rs | 4 + task-impls/src/transactions.rs | 4 + task-impls/src/upgrade.rs | 4 + task-impls/src/vid.rs | 4 + task-impls/src/view_sync.rs | 8 + task/src/task.rs | 193 +++++++++++++++--- testing/tests/tests_1/network_task.rs | 14 +- testing/tests/tests_1/test_with_failures_2.rs | 2 - 25 files changed, 490 insertions(+), 80 deletions(-) create mode 100644 task-impls/src/health_check.rs diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 090a9687de..461638444a 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -17,6 +17,7 @@ use hotshot_types::{ traits::{network::BroadcastDelay, node_implementation::Versions}, }; use rand::Rng; +use tasks::add_health_check_task; use url::Url; /// Contains traits consumed by [`SystemContext`] @@ -634,6 +635,7 @@ impl, V: Versions> SystemContext(&mut handle).await; add_consensus_tasks::(&mut handle).await; + add_health_check_task::(&mut handle).await; handle } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 269c771ff0..0d8428469c 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -8,6 +8,7 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; +use hotshot_task::task::{NetworkHandle, Task}; use std::{collections::HashSet, sync::Arc, time::Duration}; use async_broadcast::broadcast; @@ -18,14 +19,14 @@ use futures::{ future::{BoxFuture, FutureExt}, stream, StreamExt, }; -use hotshot_task::task::Task; #[cfg(feature = "rewind")] use hotshot_task_impls::rewind::RewindTaskState; use hotshot_task_impls::{ da::DaTaskState, events::HotShotEvent, - network, - network::{NetworkEventTaskState, NetworkMessageTaskState}, + health_check::HealthCheckTaskState, + helpers::broadcast_event, + network::{self, NetworkEventTaskState, NetworkMessageTaskState}, request::NetworkRequestState, response::{run_response_task, NetworkResponseState}, transactions::TransactionTaskState, @@ -68,14 +69,7 @@ pub async fn add_request_network_task< >( handle: &mut SystemContextHandle, ) { - let state = NetworkRequestState::::create_from(handle).await; - - let task = Task::new( - state, - handle.internal_event_stream.0.clone(), - handle.internal_event_stream.1.activate_cloned(), - ); - handle.consensus_registry.run_task(task); + handle.add_task(NetworkRequestState::::create_from(handle).await); } /// Add a task which responds to requests on the network. @@ -91,9 +85,12 @@ pub fn add_response_task, V: Versi handle.private_key().clone(), handle.hotshot.id, ); + let task_name = state.get_task_name(); handle.network_registry.register(run_response_task::( state, + handle.internal_event_stream.0.clone(), handle.internal_event_stream.1.activate_cloned(), + handle.generate_task_id(task_name), )); } @@ -117,9 +114,10 @@ pub fn add_network_message_task< let network = Arc::clone(channel); let mut state = network_state.clone(); let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); + let stream = handle.internal_event_stream.0.clone(); + let task_id = handle.generate_task_id(network_state.get_task_name()); + let handle_task_id = task_id.clone(); let task_handle = async_spawn(async move { - futures::pin_mut!(shutdown_signal); - let recv_stream = stream::unfold((), |()| async { let msgs = match network.recv_msgs().await { Ok(msgs) => { @@ -144,9 +142,10 @@ pub fn add_network_message_task< Some((msgs, ())) }); + let heartbeat_interval = + Task::>::get_periodic_interval_in_secs(); let fused_recv_stream = recv_stream.boxed().fuse(); - futures::pin_mut!(fused_recv_stream); - + futures::pin_mut!(fused_recv_stream, heartbeat_interval, shutdown_signal); loop { futures::select! { () = shutdown_signal => { @@ -168,10 +167,16 @@ pub fn add_network_message_task< return; } } + _ = Task::>::handle_periodic_delay(&mut heartbeat_interval) => { + broadcast_event(Arc::new(HotShotEvent::HeartBeat(handle_task_id.clone())), &stream).await; + } } } }); - handle.network_registry.register(task_handle); + handle.network_registry.register(NetworkHandle { + handle: task_handle, + task_id, + }); } /// Add the network task to handle events and send messages. @@ -194,12 +199,7 @@ pub fn add_network_event_task< storage: Arc::clone(&handle.storage()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), }; - let task = Task::new( - network_state, - handle.internal_event_stream.0.clone(), - handle.internal_event_stream.1.activate_cloned(), - ); - handle.consensus_registry.run_task(task); + handle.add_task(network_state); } /// Adds consensus-related tasks to a `SystemContextHandle`. @@ -331,6 +331,7 @@ where add_consensus_tasks::(&mut handle).await; self.add_network_tasks(&mut handle).await; + add_health_check_task(&mut handle).await; handle } @@ -338,6 +339,7 @@ where /// Add byzantine network tasks with the trait #[allow(clippy::too_many_lines)] async fn add_network_tasks(&'static mut self, handle: &mut SystemContextHandle) { + let task_id = self.get_task_name(); let state_in = Arc::new(RwLock::new(self)); let state_out = Arc::clone(&state_in); // channels between the task spawned in this function and the network tasks. @@ -376,8 +378,6 @@ where // and broadcast the transformed events to the replacement event stream we just created. let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); let send_handle = async_spawn(async move { - futures::pin_mut!(shutdown_signal); - let recv_stream = stream::unfold(original_receiver, |mut recv| async move { match recv.recv().await { Ok(event) => Some((Ok(event), recv)), @@ -388,7 +388,7 @@ where .boxed(); let fused_recv_stream = recv_stream.fuse(); - futures::pin_mut!(fused_recv_stream); + futures::pin_mut!(fused_recv_stream, shutdown_signal); loop { futures::select! { @@ -424,8 +424,6 @@ where // and broadcast the transformed events to the original internal event stream let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); let recv_handle = async_spawn(async move { - futures::pin_mut!(shutdown_signal); - let network_recv_stream = stream::unfold(receiver_from_network, |mut recv| async move { match recv.recv().await { @@ -436,7 +434,7 @@ where }); let fused_network_recv_stream = network_recv_stream.boxed().fuse(); - futures::pin_mut!(fused_network_recv_stream); + futures::pin_mut!(fused_network_recv_stream, shutdown_signal); loop { futures::select! { @@ -467,8 +465,19 @@ where } }); - handle.network_registry.register(send_handle); - handle.network_registry.register(recv_handle); + handle.network_registry.register(NetworkHandle { + handle: send_handle, + task_id: handle.generate_task_id(task_id), + }); + handle.network_registry.register(NetworkHandle { + handle: recv_handle, + task_id: handle.generate_task_id(task_id), + }); + } + + /// Gets the name of the current task + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() } } @@ -659,3 +668,10 @@ pub async fn add_network_tasks, V: network::vid_filter, ); } + +/// Add the health check task +pub async fn add_health_check_task, V: Versions>( + handle: &mut SystemContextHandle, +) { + handle.add_task(HealthCheckTaskState::::create_from(handle).await); +} diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 6cd8e14077..5391283afb 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -13,7 +13,7 @@ use async_trait::async_trait; use chrono::Utc; use hotshot_task_impls::{ builder::BuilderClient, consensus::ConsensusTaskState, consensus2::Consensus2TaskState, - da::DaTaskState, quorum_proposal::QuorumProposalTaskState, + da::DaTaskState, health_check::HealthCheckTaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, rewind::RewindTaskState, transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, view_sync::ViewSyncTaskState, @@ -383,3 +383,17 @@ impl, V: Versions> CreateTaskState } } } + +#[async_trait] +impl, V: Versions> CreateTaskState + for HealthCheckTaskState +{ + async fn create_from(handle: &SystemContextHandle) -> Self { + let heartbeat_timeout_duration_in_secs = 30; + HealthCheckTaskState::new( + handle.hotshot.id, + handle.get_task_ids(), + heartbeat_timeout_duration_in_secs, + ) + } +} diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 0b285b593f..8ba28c6c5e 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -22,6 +22,7 @@ use hotshot_types::{ error::HotShotError, traits::{election::Membership, network::ConnectedNetwork, node_implementation::NodeType}, }; +use rand::Rng; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; @@ -68,15 +69,34 @@ impl + 'static, V: Versions> { /// Adds a hotshot consensus-related task to the `SystemContextHandle`. pub fn add_task> + 'static>(&mut self, task_state: S) { + let task_name = task_state.get_task_name(); let task = Task::new( task_state, self.internal_event_stream.0.clone(), self.internal_event_stream.1.activate_cloned(), + self.generate_task_id(task_name), ); self.consensus_registry.run_task(task); } + #[must_use] + /// generate a task id for a task + pub fn generate_task_id(&self, task_name: &str) -> String { + let random = rand::thread_rng().gen_range(0..=9999); + let tasks_spawned = + self.consensus_registry.task_handles.len() + self.network_registry.handles.len(); + format!("{task_name}_{tasks_spawned}_{random}") + } + + #[must_use] + /// Get a list of all the running tasks ids + pub fn get_task_ids(&self) -> Vec { + let mut task_ids = self.consensus_registry.get_task_ids(); + task_ids.extend(self.network_registry.get_task_ids()); + task_ids + } + /// obtains a stream to expose to the user pub fn event_stream(&self) -> impl Stream> { self.output_event_stream.1.activate_cloned() diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 67ae099035..a9cf49aa73 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -773,4 +773,8 @@ impl, V: Versions> TaskState } } } + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index 65c80cab34..1e74508278 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -172,4 +172,8 @@ impl, V: Versions> TaskState /// Joins all subtasks. async fn cancel_subtasks(&mut self) {} + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 0cf96e4fea..bc38aff941 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -379,4 +379,8 @@ impl> TaskState for DaTaskState &'static str { + std::any::type_name::>() + } } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 082caef45c..bf535af95e 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -36,6 +36,10 @@ impl TaskEvent for HotShotEvent { fn shutdown_event() -> Self { HotShotEvent::Shutdown } + + fn heartbeat_event(task_id: String) -> Self { + HotShotEvent::HeartBeat(task_id) + } } /// Wrapper type for the event to notify tasks that a proposal for a view is missing @@ -216,6 +220,9 @@ pub enum HotShotEvent { /// 2. The proposal has been correctly signed by the leader of the current view /// 3. The justify QC is valid QuorumProposalPreliminarilyValidated(Proposal>), + + /// Periodic heart beat event for health checking + HeartBeat(String), } impl Display for HotShotEvent { @@ -463,6 +470,9 @@ impl Display for HotShotEvent { proposal.data.view_number() ) } + HotShotEvent::HeartBeat(task_id) => { + write!(f, "HeartBeat(task_id={task_id:?}") + } } } } diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 16e8a273b8..5f116dee31 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -50,7 +50,12 @@ pub async fn run_harness> + Send allow_extra_output, }; - let task = Task::new(state, to_test.clone(), from_test.clone()); + let task = Task::new( + state, + to_test.clone(), + from_test.clone(), + "task_0".to_string(), + ); let handle = task.run(); let test_future = async move { diff --git a/task-impls/src/health_check.rs b/task-impls/src/health_check.rs new file mode 100644 index 0000000000..ee00dbbe7f --- /dev/null +++ b/task-impls/src/health_check.rs @@ -0,0 +1,121 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{ + collections::{hash_map::Entry, HashMap}, + marker::PhantomData, + sync::Arc, + time::Instant, +}; + +use anyhow::Result; +use async_broadcast::{Receiver, Sender}; +use async_lock::Mutex; +use async_trait::async_trait; +use hotshot_task::task::TaskState; +use hotshot_types::traits::node_implementation::NodeType; + +use crate::events::{HotShotEvent, HotShotTaskCompleted}; + +/// Health event task, recieve heart beats from other tasks +pub struct HealthCheckTaskState { + /// Node id + pub node_id: u64, + /// Map of the task id to timestamp of last heartbeat + pub task_ids_heartbeat_timestamp: Mutex>, + /// Specify the time we start logging when no heartbeat received + pub heartbeat_timeout_duration_in_secs: u64, + /// phantom + pub _phantom: PhantomData, +} + +impl HealthCheckTaskState { + /// Create a new instance of task state with task ids pre populated + #[must_use] + pub fn new( + node_id: u64, + task_ids: Vec, + heartbeat_timeout_duration_in_secs: u64, + ) -> Self { + let time = Instant::now(); + let mut task_ids_heartbeat_timestamp: HashMap = HashMap::new(); + for task_id in task_ids { + task_ids_heartbeat_timestamp.insert(task_id, time); + } + + HealthCheckTaskState { + node_id, + task_ids_heartbeat_timestamp: Mutex::new(task_ids_heartbeat_timestamp), + heartbeat_timeout_duration_in_secs, + _phantom: std::marker::PhantomData, + } + } + /// Handles only HeartBeats and updates the timestamp for a task + pub async fn handle( + &mut self, + event: &Arc>, + ) -> Option { + match event.as_ref() { + HotShotEvent::HeartBeat(task_id) => { + let mut task_ids_heartbeat_timestamp = + self.task_ids_heartbeat_timestamp.lock().await; + match task_ids_heartbeat_timestamp.entry(task_id.clone()) { + Entry::Occupied(mut heartbeat_timestamp) => { + *heartbeat_timestamp.get_mut() = Instant::now(); + } + Entry::Vacant(_) => { + // On startup of this task we populate the map with all task ids + } + } + } + HotShotEvent::Shutdown => { + return Some(HotShotTaskCompleted); + } + _ => {} + } + None + } +} + +#[async_trait] +impl TaskState for HealthCheckTaskState { + type Event = HotShotEvent; + + async fn handle_event( + &mut self, + event: Arc, + _sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + self.handle(&event).await; + + Ok(()) + } + + async fn cancel_subtasks(&mut self) {} + + async fn periodic_task(&self, _sender: &Sender>, _task_id: String) { + let current_time = Instant::now(); + + let task_ids_heartbeat = self.task_ids_heartbeat_timestamp.lock().await; + for (task_id, heartbeat_timestamp) in task_ids_heartbeat.iter() { + if current_time.duration_since(*heartbeat_timestamp).as_secs() + > self.heartbeat_timeout_duration_in_secs + { + tracing::error!( + "Node Id {} has not received a heartbeat for task id {} for {} seconds", + self.node_id, + task_id, + self.heartbeat_timeout_duration_in_secs + ); + } + } + } + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } +} diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index ed3dc5a0ee..b97685d9e8 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -64,3 +64,6 @@ pub mod quorum_proposal_recv; /// Task for storing and replaying all received tasks by a node pub mod rewind; + +/// Task for listening to HeartBeat events and logging any task that doesnt broadcast after sometime +pub mod health_check; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 4f94f37840..52b94e2cf5 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -209,6 +209,12 @@ impl NetworkMessageTaskState { .await; } } + + /// Gets the name of the current task + #[must_use] + pub fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } /// network event task state @@ -259,6 +265,10 @@ impl< } async fn cancel_subtasks(&mut self) {} + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } impl< diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index cd382c38f5..8fc4c3141e 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -531,4 +531,8 @@ impl, V: Versions> TaskState handle.abort(); } } + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index dc0fb5124c..24436cc450 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -176,4 +176,8 @@ impl, V: Versions> TaskState } } } + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 4ddd531cab..969d99ba26 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -675,4 +675,8 @@ impl, V: Versions> TaskState handle.abort(); } } + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index ef0faf0ed5..d587a927a7 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -146,6 +146,10 @@ impl> TaskState for NetworkRequest } } } + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } impl> NetworkRequestState { diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index e5d3563089..7d0329b140 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -6,12 +6,13 @@ use std::{sync::Arc, time::Duration}; -use async_broadcast::Receiver; +use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use futures::{FutureExt, StreamExt}; -use hotshot_task::dependency::{Dependency, EventDependency}; +use hotshot_task::{ + dependency::{Dependency, EventDependency}, + task::{NetworkHandle, Task}, +}; use hotshot_types::{ consensus::{Consensus, LockedConsensusState, OuterConsensus}, data::VidDisperseShare, @@ -28,11 +29,9 @@ use hotshot_types::{ }, }; use sha2::{Digest, Sha256}; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; use tracing::instrument; -use crate::events::HotShotEvent; +use crate::{events::HotShotEvent, health_check::HealthCheckTaskState, helpers::broadcast_event}; /// Time to wait for txns before sending `ResponseMessage::NotFound` const TXNS_TIMEOUT: Duration = Duration::from_millis(100); @@ -76,8 +75,16 @@ impl NetworkResponseState { /// Run the request response loop until a `HotShotEvent::Shutdown` is received. /// Or the stream is closed. - async fn run_loop(mut self, shutdown: EventDependency>>) { + async fn run_loop( + mut self, + shutdown: EventDependency>>, + sender: Sender>>, + task_name: String, + ) { let mut shutdown = Box::pin(shutdown.completed().fuse()); + let heartbeat_interval = + Task::>::get_periodic_interval_in_secs(); + futures::pin_mut!(heartbeat_interval); loop { futures::select! { req = self.receiver.next() => { @@ -86,6 +93,9 @@ impl NetworkResponseState { None => return, } }, + _ = Task::>::handle_periodic_delay(&mut heartbeat_interval) => { + broadcast_event(Arc::new(HotShotEvent::HeartBeat(task_name.clone())), &sender).await; + }, _ = shutdown => { return; } @@ -231,6 +241,11 @@ impl NetworkResponseState { None => ResponseMessage::NotFound, } } + + /// Get the task name + pub fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } /// Check the signature @@ -249,11 +264,14 @@ fn valid_signature( /// on the `event_stream` arg. pub fn run_response_task( task_state: NetworkResponseState, - event_stream: Receiver>>, -) -> JoinHandle<()> { - let dep = EventDependency::new( - event_stream, + sender: Sender>>, + receiver: Receiver>>, + task_id: String, +) -> NetworkHandle { + let shutdown = EventDependency::new( + receiver, Box::new(|e| matches!(e.as_ref(), HotShotEvent::Shutdown)), ); - async_spawn(task_state.run_loop(dep)) + let handle = async_spawn(task_state.run_loop(shutdown, sender, task_id.clone())); + NetworkHandle { handle, task_id } } diff --git a/task-impls/src/rewind.rs b/task-impls/src/rewind.rs index 669b410b52..33907f71c8 100644 --- a/task-impls/src/rewind.rs +++ b/task-impls/src/rewind.rs @@ -70,4 +70,8 @@ impl TaskState for RewindTaskState { } } } + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index fc10b549ee..e6238d74a7 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -792,4 +792,8 @@ impl, V: Versions> TaskState } async fn cancel_subtasks(&mut self) {} + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 680c61d82c..1a05629879 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -358,4 +358,8 @@ impl, V: Versions> TaskState } async fn cancel_subtasks(&mut self) {} + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 3243f356ae..54e40ef177 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -171,4 +171,8 @@ impl> TaskState for VidTaskState &'static str { + std::any::type_name::>() + } } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index e2c4e5cb95..2e9544a5c6 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -120,6 +120,10 @@ impl> TaskState for ViewSyncTaskSt } async fn cancel_subtasks(&mut self) {} + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } /// State of a view sync replica task @@ -169,6 +173,10 @@ impl> TaskState } async fn cancel_subtasks(&mut self) {} + + fn get_task_name(&self) -> &'static str { + std::any::type_name::>() + } } impl> ViewSyncTaskState { diff --git a/task/src/task.rs b/task/src/task.rs index af195d1e27..4ec0986c91 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; @@ -16,6 +16,9 @@ use futures::future::join_all; #[cfg(async_executor_impl = "tokio")] use futures::future::try_join_all; #[cfg(async_executor_impl = "tokio")] +use futures::FutureExt; +use futures::StreamExt; +#[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn, JoinHandle}; /// Trait for events that long-running tasks handle @@ -25,11 +28,14 @@ pub trait TaskEvent: PartialEq { /// Note that this is necessarily uniform across all tasks. /// Exiting the task loop is handled by the task spawner, rather than the task individually. fn shutdown_event() -> Self; + + /// The heartbeat event + fn heartbeat_event(task_id: String) -> Self; } #[async_trait] /// Type for mutable task state that can be used as the state for a `Task` -pub trait TaskState: Send { +pub trait TaskState: Send + Sync { /// Type of event sent and received by the task type Event: TaskEvent + Clone + Send + Sync; @@ -43,6 +49,27 @@ pub trait TaskState: Send { _sender: &Sender>, _receiver: &Receiver>, ) -> Result<()>; + + /// Runs a specified job in the main task every `Task::PERIODIC_INTERVAL_IN_SECS` + async fn periodic_task(&self, sender: &Sender>, task_id: String) { + match sender + .broadcast_direct(Arc::new(Self::Event::heartbeat_event(task_id))) + .await + { + Ok(None) => (), + Ok(Some(_overflowed)) => { + tracing::error!( + "Event sender queue overflow, Oldest event removed form queue: Heartbeat Event" + ); + } + Err(async_broadcast::SendError(_e)) => { + tracing::warn!("Event: Heartbeat\n Sending failed, event stream probably shutdown"); + } + } + } + + /// Gets the name of the current task + fn get_task_name(&self) -> &'static str; } /// A basic task which loops waiting for events to come from `event_receiver` @@ -58,15 +85,26 @@ pub struct Task { sender: Sender>, /// Receives events that are broadcast from any task, including itself receiver: Receiver>, + /// The generated task id + task_id: String, } impl Task { + /// Constant for how often we run our periodic tasks, such as broadcasting a hearbeat + const PERIODIC_INTERVAL_IN_SECS: u64 = 10; + /// Create a new task - pub fn new(state: S, sender: Sender>, receiver: Receiver>) -> Self { + pub fn new( + state: S, + sender: Sender>, + receiver: Receiver>, + task_id: String, + ) -> Self { Task { state, sender, receiver, + task_id, } } @@ -75,38 +113,100 @@ impl Task { Box::new(self.state) as Box> } + #[cfg(async_executor_impl = "async-std")] + /// Periodic delay + pub fn get_periodic_interval_in_secs() -> futures::stream::Fuse { + async_std::stream::interval(Duration::from_secs(Self::PERIODIC_INTERVAL_IN_SECS)).fuse() + } + + #[cfg(async_executor_impl = "async-std")] + /// Handle periodic delay interval + pub fn handle_periodic_delay( + periodic_interval: &mut futures::stream::Fuse, + ) -> futures::stream::Next<'_, futures::stream::Fuse> { + periodic_interval.next() + } + + #[cfg(async_executor_impl = "tokio")] + #[must_use] + /// Periodic delay + pub fn get_periodic_interval_in_secs() -> tokio::time::Interval { + tokio::time::interval(Duration::from_secs(Self::PERIODIC_INTERVAL_IN_SECS)) + } + + #[cfg(async_executor_impl = "tokio")] + /// Handle periodic delay interval + pub fn handle_periodic_delay( + periodic_interval: &mut tokio::time::Interval, + ) -> futures::future::Fuse + '_> { + periodic_interval.tick().fuse() + } + /// Spawn the task loop, consuming self. Will continue until /// the task reaches some shutdown condition - pub fn run(mut self) -> JoinHandle>> { - spawn(async move { - loop { - match self.receiver.recv_direct().await { - Ok(input) => { - if *input == S::Event::shutdown_event() { - self.state.cancel_subtasks().await; + pub fn run(mut self) -> HotShotTaskHandle { + let task_id = self.task_id.clone(); + let handle = spawn(async move { + let recv_stream = + futures::stream::unfold(self.receiver.clone(), |mut recv| async move { + match recv.recv_direct().await { + Ok(event) => Some((Ok(event), recv)), + Err(e) => Some((Err(e), recv)), + } + }) + .boxed(); - break self.boxed_state(); - } + let fused_recv_stream = recv_stream.fuse(); + let periodic_interval = Self::get_periodic_interval_in_secs(); + futures::pin_mut!(periodic_interval, fused_recv_stream); + loop { + futures::select! { + input = fused_recv_stream.next() => { + match input { + Some(Ok(input)) => { + if *input == S::Event::shutdown_event() { + self.state.cancel_subtasks().await; - let _ = - S::handle_event(&mut self.state, input, &self.sender, &self.receiver) + break self.boxed_state(); + } + let _ = S::handle_event( + &mut self.state, + input, + &self.sender, + &self.receiver, + ) .await .inspect_err(|e| tracing::info!("{e}")); + } + Some(Err(e)) => { + tracing::error!("Failed to receive from event stream Error: {}", e); + } + None => {} + } } - Err(e) => { - tracing::error!("Failed to receive from event stream Error: {}", e); - } + _ = Self::handle_periodic_delay(&mut periodic_interval) => { + self.state.periodic_task(&self.sender, self.task_id.clone()).await; + }, } } - }) + }); + HotShotTaskHandle { handle, task_id } } } +/// Wrapper around handle and task id so we can map +pub struct HotShotTaskHandle { + /// Handle for the task + pub handle: JoinHandle>>, + /// Generated task id + pub task_id: String, +} + #[derive(Default)] /// A collection of tasks which can handle shutdown pub struct ConsensusTaskRegistry { /// Tasks this registry controls - task_handles: Vec>>>, + pub task_handles: Vec>, } impl ConsensusTaskRegistry { @@ -117,10 +217,21 @@ impl ConsensusTaskRegistry { task_handles: vec![], } } + /// Add a task to the registry - pub fn register(&mut self, handle: JoinHandle>>) { + pub fn register(&mut self, handle: HotShotTaskHandle) { self.task_handles.push(handle); } + + #[must_use] + /// Get all task ids from registry + pub fn get_task_ids(&self) -> Vec { + self.task_handles + .iter() + .map(|wrapped_handle| wrapped_handle.task_id.clone()) + .collect() + } + /// Try to cancel/abort the task this registry has /// /// # Panics @@ -129,11 +240,11 @@ impl ConsensusTaskRegistry { pub async fn shutdown(&mut self) { let handles = &mut self.task_handles; - while let Some(handle) = handles.pop() { + while let Some(wrapped_handle) = handles.pop() { #[cfg(async_executor_impl = "async-std")] - let mut task_state = handle.await; + let mut task_state = wrapped_handle.handle.await; #[cfg(async_executor_impl = "tokio")] - let mut task_state = handle.await.unwrap(); + let mut task_state = wrapped_handle.handle.await.unwrap(); task_state.cancel_subtasks().await; } @@ -150,20 +261,33 @@ impl ConsensusTaskRegistry { /// # Panics /// Panics if one of the tasks panicked pub async fn join_all(self) -> Vec>> { + let handles: Vec>>> = self + .task_handles + .into_iter() + .map(|wrapped| wrapped.handle) + .collect(); #[cfg(async_executor_impl = "async-std")] - let states = join_all(self.task_handles).await; + let states = join_all(handles).await; #[cfg(async_executor_impl = "tokio")] - let states = try_join_all(self.task_handles).await.unwrap(); + let states = try_join_all(handles).await.unwrap(); states } } +/// Wrapper around join handle and task id for network tasks +pub struct NetworkHandle { + /// Task handle + pub handle: JoinHandle<()>, + /// Generated task id + pub task_id: String, +} + #[derive(Default)] /// A collection of tasks which can handle shutdown pub struct NetworkTaskRegistry { /// Tasks this registry controls - pub handles: Vec>, + pub handles: Vec, } impl NetworkTaskRegistry { @@ -173,6 +297,15 @@ impl NetworkTaskRegistry { NetworkTaskRegistry { handles: vec![] } } + #[must_use] + /// Get all task ids from registry + pub fn get_task_ids(&self) -> Vec { + self.handles + .iter() + .map(|wrapped_handle| wrapped_handle.task_id.clone()) + .collect() + } + #[allow(clippy::unused_async)] /// Shuts down all tasks managed by this instance. /// @@ -184,16 +317,18 @@ impl NetworkTaskRegistry { /// tasks being joined return an error. pub async fn shutdown(&mut self) { let handles = std::mem::take(&mut self.handles); + let task_handles: Vec> = + handles.into_iter().map(|wrapped| wrapped.handle).collect(); #[cfg(async_executor_impl = "async-std")] - join_all(handles).await; + join_all(task_handles).await; #[cfg(async_executor_impl = "tokio")] - try_join_all(handles) + try_join_all(task_handles) .await .expect("Failed to join all tasks during shutdown"); } /// Add a task to the registry - pub fn register(&mut self, handle: JoinHandle<()>) { + pub fn register(&mut self, handle: NetworkHandle) { self.handles.push(handle); } } diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index a3b3245533..1631d0be19 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -4,13 +4,12 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{sync::Arc, time::Duration}; - use async_broadcast::Sender; use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; use hotshot::traits::implementations::MemoryNetwork; use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; +use hotshot_task::task::TaskState; use hotshot_task::task::{ConsensusTaskRegistry, Task}; use hotshot_task_impls::{ events::HotShotEvent, @@ -28,6 +27,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, }, }; +use std::{sync::Arc, time::Duration}; // Test that the event task sends a message, and the message task receives it // and emits the proper event @@ -74,7 +74,8 @@ async fn test_network_task() { let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); - let task = Task::new(network_state, tx.clone(), rx); + let task_name = network_state.get_task_name(); + let task = Task::new(network_state, tx.clone(), rx, task_name.to_string()); task_reg.run_task(task); let mut generator = TestViewGenerator::generate(membership.clone(), membership); @@ -150,7 +151,12 @@ async fn test_network_storage_fail() { let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); - let task = Task::new(network_state, tx.clone(), rx); + let task = Task::new( + network_state, + tx.clone(), + rx, + "NetworkEventTaskState_0".to_string(), + ); task_reg.run_task(task); let mut generator = TestViewGenerator::generate(membership.clone(), membership); diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index 54d3e5193f..9d551629b8 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -24,7 +24,6 @@ use hotshot_testing::{ view_sync_task::ViewSyncTaskDescription, }; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; -#[cfg(async_executor_impl = "async-std")] use {hotshot::tasks::DishonestLeader, hotshot_testing::test_builder::Behaviour, std::rc::Rc}; // Test that a good leader can succeed in the view directly after view sync cross_tests!( @@ -68,7 +67,6 @@ cross_tests!( } ); -#[cfg(async_executor_impl = "async-std")] cross_tests!( TestName: dishonest_leader, Impls: [MemoryImpl], From 85b4892b463599b76ce88ff0b899d42999b21249 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 22 Aug 2024 11:16:32 -0400 Subject: [PATCH 1189/1393] Add versioning to vote commitment calculations (#3584) --- example-types/src/node_types.rs | 129 ++++++++++++++++ hotshot/src/tasks/mod.rs | 43 +++++- hotshot/src/tasks/task_state.rs | 6 +- task-impls/src/consensus/handlers.rs | 25 +++- task-impls/src/consensus/mod.rs | 41 +++-- task-impls/src/consensus2/handlers.rs | 14 +- task-impls/src/consensus2/mod.rs | 7 +- task-impls/src/da.rs | 38 +++-- task-impls/src/helpers.rs | 45 ++++-- task-impls/src/quorum_proposal/handlers.rs | 3 + task-impls/src/quorum_proposal/mod.rs | 5 +- .../src/quorum_proposal_recv/handlers.rs | 12 +- task-impls/src/quorum_vote/mod.rs | 8 +- task-impls/src/upgrade.rs | 18 +-- task-impls/src/view_sync.rs | 101 +++++++++---- task-impls/src/vote_collection.rs | 80 +++++----- testing/src/helpers.rs | 140 +++++++++++------- testing/src/view_generator.rs | 62 ++++++-- testing/tests/tests_1/consensus_task.rs | 24 +-- testing/tests/tests_1/da_task.rs | 24 ++- testing/tests/tests_1/message.rs | 9 +- .../tests_1/quorum_proposal_recv_task.rs | 4 +- testing/tests/tests_1/quorum_proposal_task.rs | 29 ++-- testing/tests/tests_1/quorum_vote_task.rs | 2 +- testing/tests/tests_1/test_with_failures_2.rs | 41 ++++- .../tests_1/upgrade_task_with_consensus.rs | 32 ++-- .../tests_1/upgrade_task_with_proposal.rs | 25 ++-- .../tests/tests_1/upgrade_task_with_vote.rs | 4 +- testing/tests/tests_1/view_sync_task.rs | 5 +- testing/tests/tests_5/fake_solver.rs | 4 +- types/src/data.rs | 26 ++-- types/src/simple_certificate.rs | 65 ++++++-- types/src/simple_vote.rs | 91 ++++++++++-- types/src/vote.rs | 70 +++++++-- 34 files changed, 908 insertions(+), 324 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 390b5472d2..a3ff669ab3 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -160,3 +160,132 @@ impl Versions for MarketplaceUpgradeTestVersions { type Marketplace = StaticVersion<0, 3>; } + +#[derive(Clone, Debug, Copy)] +pub struct MarketplaceTestVersions {} + +impl Versions for MarketplaceTestVersions { + type Base = StaticVersion<0, 3>; + type Upgrade = StaticVersion<0, 3>; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, + ]; + + type Marketplace = StaticVersion<0, 3>; +} + +#[cfg(test)] +mod tests { + use committable::{Commitment, Committable}; + use hotshot_types::{ + message::UpgradeLock, simple_vote::VersionedVoteData, + traits::node_implementation::ConsensusTime, + }; + use serde::{Deserialize, Serialize}; + + use crate::node_types::{MarketplaceTestVersions, NodeType, TestTypes, TestVersions}; + #[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Hash, Eq)] + /// Dummy data used for test + struct TestData { + data: u64, + } + + impl Committable for TestData { + fn commit(&self) -> Commitment { + committable::RawCommitmentBuilder::new("Test data") + .u64(self.data) + .finalize() + } + } + + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn test_versioned_commitment() { + let view = ::Time::new(0); + let upgrade_lock = UpgradeLock::new(); + + let data = TestData { data: 10 }; + let data_commitment: [u8; 32] = data.commit().into(); + + let versioned_data = + VersionedVoteData::::new(data, view, &upgrade_lock) + .await + .unwrap(); + let versioned_data_commitment: [u8; 32] = versioned_data.commit().into(); + + assert_eq!(versioned_data_commitment, data_commitment); + } + + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + /// Test that the view number affects the commitment post-marketplace + async fn test_versioned_commitment_includes_view() { + let upgrade_lock = UpgradeLock::new(); + + let data = TestData { data: 10 }; + + let view_0 = ::Time::new(0); + let view_1 = ::Time::new(1); + + let versioned_data_0 = + VersionedVoteData::::new( + data, + view_0, + &upgrade_lock, + ) + .await + .unwrap(); + let versioned_data_1 = + VersionedVoteData::::new( + data, + view_1, + &upgrade_lock, + ) + .await + .unwrap(); + + let versioned_data_commitment_0: [u8; 32] = versioned_data_0.commit().into(); + let versioned_data_commitment_1: [u8; 32] = versioned_data_1.commit().into(); + + assert!( + versioned_data_commitment_0 != versioned_data_commitment_1, + "left: {versioned_data_commitment_0:?}, right: {versioned_data_commitment_1:?}" + ); + } + + #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + /// Test that the view number does not affect the commitment pre-marketplace + async fn test_versioned_commitment_excludes_view() { + let upgrade_lock = UpgradeLock::new(); + + let data = TestData { data: 10 }; + + let view_0 = ::Time::new(0); + let view_1 = ::Time::new(1); + + let versioned_data_0 = VersionedVoteData::::new( + data, + view_0, + &upgrade_lock, + ) + .await + .unwrap(); + let versioned_data_1 = VersionedVoteData::::new( + data, + view_1, + &upgrade_lock, + ) + .await + .unwrap(); + + let versioned_data_commitment_0: [u8; 32] = versioned_data_0.commit().into(); + let versioned_data_commitment_1: [u8; 32] = versioned_data_1.commit().into(); + + assert!( + versioned_data_commitment_0 == versioned_data_commitment_1, + "left: {versioned_data_commitment_0:?}, right: {versioned_data_commitment_1:?}" + ); + } +} diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 0d8428469c..47e99c15db 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -206,9 +206,9 @@ pub fn add_network_event_task< pub async fn add_consensus_tasks, V: Versions>( handle: &mut SystemContextHandle, ) { - handle.add_task(ViewSyncTaskState::::create_from(handle).await); + handle.add_task(ViewSyncTaskState::::create_from(handle).await); handle.add_task(VidTaskState::::create_from(handle).await); - handle.add_task(DaTaskState::::create_from(handle).await); + handle.add_task(DaTaskState::::create_from(handle).await); handle.add_task(TransactionTaskState::::create_from(handle).await); // only spawn the upgrade task if we are actually configured to perform an upgrade. @@ -619,6 +619,45 @@ impl + std::fmt::Debug, V: Version } } +#[derive(Debug)] +/// An `EventHandlerState` that modifies view number on the certificate of `DacSend` event to that of a future view +pub struct DishonestDa { + /// How many times current node has been elected leader and sent Da Cert + pub total_da_certs_sent_from_node: u64, + /// Which proposals to be dishonest at + pub dishonest_at_da_cert_sent_numbers: HashSet, + /// When leader how many times we will send DacSend and increment view number + pub total_views_add_to_cert: u64, +} + +#[async_trait] +impl + std::fmt::Debug, V: Versions> + EventTransformerState for DishonestDa +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + vec![event.clone()] + } + + async fn send_handler(&mut self, event: &HotShotEvent) -> Vec> { + if let HotShotEvent::DacSend(cert, sender) = event { + self.total_da_certs_sent_from_node += 1; + if self + .dishonest_at_da_cert_sent_numbers + .contains(&self.total_da_certs_sent_from_node) + { + let mut result = vec![HotShotEvent::DacSend(cert.clone(), sender.clone())]; + for i in 1..=self.total_views_add_to_cert { + let mut bad_cert = cert.clone(); + bad_cert.view_number = cert.view_number + i; + result.push(HotShotEvent::DacSend(bad_cert, sender.clone())); + } + return result; + } + } + vec![event.clone()] + } +} + /// adds tasks for sending/receiving messages to/from the network. pub async fn add_network_tasks, V: Versions>( handle: &mut SystemContextHandle, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 5391283afb..7754822716 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -130,7 +130,7 @@ impl, V: Versions> CreateTaskState #[async_trait] impl, V: Versions> CreateTaskState - for DaTaskState + for DaTaskState { async fn create_from(handle: &SystemContextHandle) -> Self { Self { @@ -145,13 +145,14 @@ impl, V: Versions> CreateTaskState private_key: handle.private_key().clone(), id: handle.hotshot.id, storage: Arc::clone(&handle.storage), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), } } } #[async_trait] impl, V: Versions> CreateTaskState - for ViewSyncTaskState + for ViewSyncTaskState { async fn create_from(handle: &SystemContextHandle) -> Self { let cur_view = handle.cur_view().await; @@ -176,6 +177,7 @@ impl, V: Versions> CreateTaskState view_sync_timeout: handle.hotshot.config.view_sync_timeout, id: handle.hotshot.id, last_garbage_collected_view: TYPES::Time::new(0), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), } } } diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 340741b5f5..9716ca6998 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -183,6 +183,7 @@ pub async fn publish_proposal_from_commitment_and_metadata, vote_info: VoteInfo, id: u64, + upgrade_lock: &UpgradeLock, ) -> bool { use hotshot_types::simple_vote::QuorumVote; @@ -754,6 +766,7 @@ pub async fn update_state_and_vote_if_able< Arc::clone(&quorum_membership), OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), public_key.clone(), + upgrade_lock, ) .await .ok(), @@ -807,7 +820,10 @@ pub async fn update_state_and_vote_if_able< } // Validate the DAC. - let message = if cert.is_valid_cert(vote_info.da_membership.as_ref()) { + let message = if cert + .is_valid_cert(vote_info.da_membership.as_ref(), upgrade_lock) + .await + { // Validate the block payload commitment for non-genesis DAC. if cert.date().payload_commit != proposal.block_header.payload_commitment() { warn!( @@ -823,7 +839,10 @@ pub async fn update_state_and_vote_if_able< view, &public_key, &vote_info.private_key, - ) { + &vote_info.upgrade_lock, + ) + .await + { GeneralConsensusMessage::::Vote(vote) } else { error!("Unable to sign quorum vote!"); diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index a9cf49aa73..445c81fc1f 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -52,7 +52,8 @@ use crate::{ pub(crate) mod handlers; /// Alias for Optional type for Vote Collectors -type VoteCollectorOption = Option>; +type VoteCollectorOption = + Option>; /// The state for the consensus task. Contains all of the information for the implementation /// of consensus @@ -92,11 +93,11 @@ pub struct ConsensusTaskState, V: /// Current Vote collection task, with it's view. pub vote_collector: - RwLock, QuorumCertificate>>, + RwLock, QuorumCertificate, V>>, /// Current timeout vote collection task with its view pub timeout_vote_collector: - RwLock, TimeoutCertificate>>, + RwLock, TimeoutCertificate, V>>, /// timeout task handle pub timeout_task: JoinHandle<()>, @@ -248,6 +249,7 @@ impl, V: Versions> ConsensusTaskSt let instance_state = Arc::clone(&self.instance_state); let id = self.id; let handle = async_spawn(async move { + let upgrade_lock = upgrade.clone(); update_state_and_vote_if_able::( view, proposal, @@ -264,6 +266,7 @@ impl, V: Versions> ConsensusTaskSt event_receiver, }, id, + &upgrade_lock, ) .await; }); @@ -333,11 +336,12 @@ impl, V: Versions> ConsensusTaskSt view: vote.view_number(), id: self.id, }; - *collector = create_vote_accumulator::< - TYPES, - QuorumVote, - QuorumCertificate, - >(&info, event, &event_sender) + *collector = create_vote_accumulator( + &info, + event, + &event_sender, + self.upgrade_lock.clone(), + ) .await; } else { let result = collector @@ -372,11 +376,12 @@ impl, V: Versions> ConsensusTaskSt view: vote.view_number(), id: self.id, }; - *collector = create_vote_accumulator::< - TYPES, - TimeoutVote, - TimeoutCertificate, - >(&info, event, &event_sender) + *collector = create_vote_accumulator( + &info, + event, + &event_sender, + self.upgrade_lock.clone(), + ) .await; } else { let result = collector @@ -569,7 +574,10 @@ impl, V: Versions> ConsensusTaskSt view, &self.public_key, &self.private_key, - ) else { + &self.upgrade_lock, + ) + .await + else { error!("Failed to sign TimeoutData!"); return; }; @@ -667,7 +675,10 @@ impl, V: Versions> ConsensusTaskSt } } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { - if !certificate.is_valid_cert(self.quorum_membership.as_ref()) { + if !certificate + .is_valid_cert(self.quorum_membership.as_ref(), &self.upgrade_lock) + .await + { error!( "View Sync Finalize certificate {:?} was invalid", certificate.date() diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index 69d10e9788..1c615636e0 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -12,7 +12,6 @@ use async_compatibility_layer::art::{async_sleep, async_spawn}; use chrono::Utc; use hotshot_types::{ event::{Event, EventType}, - simple_certificate::{QuorumCertificate, TimeoutCertificate}, simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, traits::{ election::Membership, @@ -59,10 +58,8 @@ pub(crate) async fn handle_quorum_vote_recv< view: vote.view_number(), id: task_state.id, }; - *collector = create_vote_accumulator::, QuorumCertificate>( - &info, event, sender, - ) - .await; + *collector = + create_vote_accumulator(&info, event, sender, task_state.upgrade_lock.clone()).await; } else { let result = collector .as_mut() @@ -107,10 +104,7 @@ pub(crate) async fn handle_timeout_vote_recv< id: task_state.id, }; *collector = - create_vote_accumulator::, TimeoutCertificate>( - &info, event, sender, - ) - .await; + create_vote_accumulator(&info, event, sender, task_state.upgrade_lock.clone()).await; } else { let result = collector .as_mut() @@ -250,7 +244,9 @@ pub(crate) async fn handle_timeout view_number, &task_state.public_key, &task_state.private_key, + &task_state.upgrade_lock, ) + .await .context("Failed to sign TimeoutData")?; broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), sender).await; diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index 1e74508278..4b503f46fc 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -34,7 +34,8 @@ use self::handlers::{ use crate::{events::HotShotEvent, vote_collection::VoteCollectionTaskState}; /// Alias for Optional type for Vote Collectors -type VoteCollectorOption = Option>; +type VoteCollectorOption = + Option>; /// Event handlers for use in the `handle` method. mod handlers; @@ -64,11 +65,11 @@ pub struct Consensus2TaskState, V: /// Current Vote collection task, with it's view. pub vote_collector: - RwLock, QuorumCertificate>>, + RwLock, QuorumCertificate, V>>, /// Current timeout vote collection task with its view pub timeout_vote_collector: - RwLock, TimeoutCertificate>>, + RwLock, TimeoutCertificate, V>>, /// This node's storage ref pub storage: Arc>, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index bc38aff941..111dc0d43a 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -18,14 +18,14 @@ use hotshot_types::{ consensus::{Consensus, OuterConsensus, View}, data::{DaProposal, PackedBundle}, event::{Event, EventType}, - message::Proposal, + message::{Proposal, UpgradeLock}, simple_certificate::DaCertificate, simple_vote::{DaData, DaVote}, traits::{ block_contents::vid_commitment, election::Membership, network::ConnectedNetwork, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, }, @@ -46,10 +46,11 @@ use crate::{ }; /// Alias for Optional type for Vote Collectors -type VoteCollectorOption = Option>; +type VoteCollectorOption = + Option>; /// Tracks state of a DA task -pub struct DaTaskState> { +pub struct DaTaskState, V: Versions> { /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -71,7 +72,7 @@ pub struct DaTaskState> { pub network: Arc, /// The current vote collection task, if there is one. - pub vote_collector: RwLock, DaCertificate>>, + pub vote_collector: RwLock, DaCertificate, V>>, /// This Nodes public key pub public_key: TYPES::SignatureKey, @@ -84,9 +85,12 @@ pub struct DaTaskState> { /// This node's storage ref pub storage: Arc>, + + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, } -impl> DaTaskState { +impl, V: Versions> DaTaskState { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error", target = "DaTaskState")] pub async fn handle( @@ -195,7 +199,10 @@ impl> DaTaskState { view_number, &self.public_key, &self.private_key, - ) else { + &self.upgrade_lock, + ) + .await + else { error!("Failed to sign DA Vote!"); return None; }; @@ -270,11 +277,12 @@ impl> DaTaskState { view: vote.view_number(), id: self.id, }; - *collector = create_vote_accumulator::< - TYPES, - DaVote, - DaCertificate, - >(&info, event, &event_stream) + *collector = create_vote_accumulator( + &info, + event, + &event_stream, + self.upgrade_lock.clone(), + ) .await; } else { let result = collector @@ -364,7 +372,9 @@ impl> DaTaskState { #[async_trait] /// task state implementation for DA Task -impl> TaskState for DaTaskState { +impl, V: Versions> TaskState + for DaTaskState +{ type Event = HotShotEvent; async fn handle_event( @@ -381,6 +391,6 @@ impl> TaskState for DaTaskState &'static str { - std::any::type_name::>() + std::any::type_name::>() } } diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 4a7903b847..f8af2e37df 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -10,6 +10,7 @@ use std::{ sync::Arc, }; +use crate::{events::HotShotEvent, request::REQUEST_TIMEOUT}; use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, SendError, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; @@ -19,6 +20,8 @@ use async_std::task::JoinHandle; use chrono::Utc; use committable::{Commitment, Committable}; use hotshot_task::dependency::{Dependency, EventDependency}; +use hotshot_types::message::UpgradeLock; +use hotshot_types::traits::node_implementation::Versions; use hotshot_types::{ consensus::{ConsensusUpgradableReadLockGuard, OuterConsensus}, data::{Leaf, QuorumProposal, ViewChangeEvidence}, @@ -38,17 +41,16 @@ use hotshot_types::{ use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; -use crate::{events::HotShotEvent, request::REQUEST_TIMEOUT}; - /// Trigger a request to the network for a proposal for a view and wait for the response or timeout. #[instrument(skip_all)] -pub(crate) async fn fetch_proposal( +pub(crate) async fn fetch_proposal( view_number: TYPES::Time, event_sender: Sender>>, event_receiver: Receiver>>, quorum_membership: Arc, consensus: OuterConsensus, sender_key: TYPES::SignatureKey, + upgrade_lock: &UpgradeLock, ) -> Result> { // First, broadcast that we need a proposal to the current leader broadcast_event( @@ -99,7 +101,10 @@ pub(crate) async fn fetch_proposal( let view_number = proposal.data.view_number(); let justify_qc = proposal.data.justify_qc.clone(); - if !justify_qc.is_valid_cert(quorum_membership.as_ref()) { + if !justify_qc + .is_valid_cert(quorum_membership.as_ref(), upgrade_lock) + .await + { bail!("Invalid justify_qc in proposal for view {}", *view_number); } let mut consensus_write = consensus.write().await; @@ -318,13 +323,14 @@ pub async fn decide_from_proposal( /// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. #[instrument(skip_all)] -pub(crate) async fn parent_leaf_and_state( +pub(crate) async fn parent_leaf_and_state( next_proposal_view_number: TYPES::Time, event_sender: &Sender>>, event_receiver: &Receiver>>, quorum_membership: Arc, public_key: TYPES::SignatureKey, consensus: OuterConsensus, + upgrade_lock: &UpgradeLock, ) -> Result<(Leaf, Arc<::ValidatedState>)> { ensure!( quorum_membership.leader(next_proposal_view_number) == public_key, @@ -344,6 +350,7 @@ pub(crate) async fn parent_leaf_and_state( quorum_membership, consensus.clone(), public_key.clone(), + upgrade_lock, ) .await .context("Failed to fetch proposal")?; @@ -404,7 +411,7 @@ pub(crate) async fn parent_leaf_and_state( #[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_lines)] #[instrument(skip_all, fields(id = id, view = *proposal.data.view_number()))] -pub async fn validate_proposal_safety_and_liveness( +pub async fn validate_proposal_safety_and_liveness( proposal: Proposal>, parent_leaf: Leaf, consensus: OuterConsensus, @@ -414,6 +421,7 @@ pub async fn validate_proposal_safety_and_liveness( sender: TYPES::SignatureKey, event_sender: Sender>, id: u64, + upgrade_lock: UpgradeLock, ) -> Result<()> { let view_number = proposal.data.view_number(); @@ -453,7 +461,12 @@ pub async fn validate_proposal_safety_and_liveness( ) .await; - UpgradeCertificate::validate(&proposal.data.upgrade_certificate, &quorum_membership)?; + UpgradeCertificate::validate( + &proposal.data.upgrade_certificate, + &quorum_membership, + &upgrade_lock, + ) + .await?; // Validate that the upgrade certificate is re-attached, if we saw one on the parent proposed_leaf @@ -529,11 +542,12 @@ pub async fn validate_proposal_safety_and_liveness( /// /// # Errors /// If any validation or view number check fails. -pub fn validate_proposal_view_and_certs( +pub async fn validate_proposal_view_and_certs( proposal: &Proposal>, cur_view: TYPES::Time, quorum_membership: &Arc, timeout_membership: &Arc, + upgrade_lock: &UpgradeLock, ) -> Result<()> { let view = proposal.data.view_number(); ensure!( @@ -561,7 +575,9 @@ pub fn validate_proposal_view_and_certs( *view ); ensure!( - timeout_cert.is_valid_cert(timeout_membership.as_ref()), + timeout_cert + .is_valid_cert(timeout_membership.as_ref(), upgrade_lock) + .await, "Timeout certificate for view {} was invalid", *view ); @@ -576,7 +592,9 @@ pub fn validate_proposal_view_and_certs( // View sync certs must also be valid. ensure!( - view_sync_cert.is_valid_cert(quorum_membership.as_ref()), + view_sync_cert + .is_valid_cert(quorum_membership.as_ref(), upgrade_lock) + .await, "Invalid view sync finalize cert provided" ); } @@ -585,7 +603,12 @@ pub fn validate_proposal_view_and_certs( // Validate the upgrade certificate -- this is just a signature validation. // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. - UpgradeCertificate::validate(&proposal.data.upgrade_certificate, quorum_membership)?; + UpgradeCertificate::validate( + &proposal.data.upgrade_certificate, + quorum_membership, + upgrade_lock, + ) + .await?; Ok(()) } diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index cafb29b597..a3f9ec5e1b 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -125,6 +125,7 @@ impl ProposalDependencyHandle { Arc::clone(&self.quorum_membership), self.public_key.clone(), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), + &self.upgrade_lock, ) .await?; @@ -263,6 +264,7 @@ impl HandleDepOutput for ProposalDependencyHandle< let event_receiver = self.receiver.clone(); let sender_key = self.public_key.clone(); let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); + let upgrade_lock = self.upgrade_lock.clone(); async_spawn(async move { fetch_proposal( high_qc_view_number, @@ -271,6 +273,7 @@ impl HandleDepOutput for ProposalDependencyHandle< membership, consensus, sender_key, + &upgrade_lock, ) .await }); diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 8fc4c3141e..fb146eb058 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -423,7 +423,10 @@ impl, V: Versions> ); } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { - if !certificate.is_valid_cert(self.quorum_membership.as_ref()) { + if !certificate + .is_valid_cert(self.quorum_membership.as_ref(), &self.upgrade_lock) + .await + { warn!( "View Sync Finalize certificate {:?} was invalid", certificate.date() diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index c1774e11ee..dcc4eb405c 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -144,13 +144,21 @@ pub(crate) async fn handle_quorum_proposal_recv< task_state.cur_view, &task_state.quorum_membership, &task_state.timeout_membership, + &task_state.upgrade_lock, ) + .await .context("Failed to validate proposal view or attached certs")?; let view_number = proposal.data.view_number(); let justify_qc = proposal.data.justify_qc.clone(); - if !justify_qc.is_valid_cert(task_state.quorum_membership.as_ref()) { + if !justify_qc + .is_valid_cert( + task_state.quorum_membership.as_ref(), + &task_state.upgrade_lock, + ) + .await + { let consensus = task_state.consensus.read().await; consensus.metrics.invalid_qc.update(1); bail!("Invalid justify_qc in proposal for view {}", *view_number); @@ -185,6 +193,7 @@ pub(crate) async fn handle_quorum_proposal_recv< // This is because the key that we receive is for the prior leader, so the payload would be routed // incorrectly. task_state.public_key.clone(), + &task_state.upgrade_lock, ) .await .ok(), @@ -246,6 +255,7 @@ pub(crate) async fn handle_quorum_proposal_recv< quorum_proposal_sender_key, task_state.output_event_stream.clone(), task_state.id, + task_state.upgrade_lock.clone(), ) .await?; diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 969d99ba26..18545172ec 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -116,6 +116,7 @@ impl + 'static, V: Versions> Arc::clone(&self.quorum_membership), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), self.public_key.clone(), + &self.upgrade_lock, ) .await .ok(), @@ -212,7 +213,9 @@ impl + 'static, V: Versions> self.view_number, &self.public_key, &self.private_key, + &self.upgrade_lock, ) + .await .context("Failed to sign vote")?; debug!( "sending vote to next quorum leader {:?}", @@ -556,7 +559,10 @@ impl, V: Versions> QuorumVoteTaskS } // Validate the DAC. - if !cert.is_valid_cert(self.da_membership.as_ref()) { + if !cert + .is_valid_cert(self.da_membership.as_ref(), &self.upgrade_lock) + .await + { return; } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 1a05629879..9b39344cb8 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -41,7 +41,8 @@ use crate::{ }; /// Alias for Optional type for Vote Collectors -type VoteCollectorOption = Option>; +type VoteCollectorOption = + Option>; /// Tracks state of a DA task pub struct UpgradeTaskState, V: Versions> { @@ -58,7 +59,7 @@ pub struct UpgradeTaskState, V: Ve /// The current vote collection task, if there is one. pub vote_collector: - RwLock, UpgradeCertificate>>, + RwLock, UpgradeCertificate, V>>, /// This Nodes public key pub public_key: TYPES::SignatureKey, @@ -210,7 +211,10 @@ impl, V: Versions> UpgradeTaskStat view, &self.public_key, &self.private_key, - ) else { + &self.upgrade_lock, + ) + .await + else { error!("Failed to sign UpgradeVote!"); return None; }; @@ -243,12 +247,8 @@ impl, V: Versions> UpgradeTaskStat view: vote.view_number(), id: self.id, }; - *collector = create_vote_accumulator::< - TYPES, - UpgradeVote, - UpgradeCertificate, - >(&info, event, &tx) - .await; + *collector = + create_vote_accumulator(&info, event, &tx, self.upgrade_lock.clone()).await; } else { let result = collector .as_mut() diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 2e9544a5c6..d6568c48b7 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -21,7 +21,7 @@ use async_std::task::JoinHandle; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - message::GeneralConsensusMessage, + message::{GeneralConsensusMessage, UpgradeLock}, simple_certificate::{ ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, @@ -31,7 +31,7 @@ use hotshot_types::{ }, traits::{ election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, vote::{Certificate, HasViewNumber, Vote}, @@ -61,11 +61,13 @@ pub enum ViewSyncPhase { } /// Type alias for a map from View Number to Relay to Vote Task -type RelayMap = - HashMap<::Time, BTreeMap>>; +type RelayMap = HashMap< + ::Time, + BTreeMap>, +>; /// Main view sync task state -pub struct ViewSyncTaskState> { +pub struct ViewSyncTaskState, V: Versions> { /// View HotShot is currently in pub current_view: TYPES::Time, /// View HotShot wishes to be in @@ -85,27 +87,34 @@ pub struct ViewSyncTaskState> { pub num_timeouts_tracked: u64, /// Map of running replica tasks - pub replica_task_map: RwLock>>, + pub replica_task_map: RwLock>>, /// Map of pre-commit vote accumulates for the relay - pub pre_commit_relay_map: - RwLock, ViewSyncPreCommitCertificate2>>, + pub pre_commit_relay_map: RwLock< + RelayMap, ViewSyncPreCommitCertificate2, V>, + >, /// Map of commit vote accumulates for the relay pub commit_relay_map: - RwLock, ViewSyncCommitCertificate2>>, + RwLock, ViewSyncCommitCertificate2, V>>, /// Map of finalize vote accumulates for the relay - pub finalize_relay_map: - RwLock, ViewSyncFinalizeCertificate2>>, + pub finalize_relay_map: RwLock< + RelayMap, ViewSyncFinalizeCertificate2, V>, + >, /// Timeout duration for view sync rounds pub view_sync_timeout: Duration, /// Last view we garbage collected old tasks pub last_garbage_collected_view: TYPES::Time, + + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, } #[async_trait] -impl> TaskState for ViewSyncTaskState { +impl, V: Versions> TaskState + for ViewSyncTaskState +{ type Event = HotShotEvent; async fn handle_event( @@ -122,12 +131,12 @@ impl> TaskState for ViewSyncTaskSt async fn cancel_subtasks(&mut self) {} fn get_task_name(&self) -> &'static str { - std::any::type_name::>() + std::any::type_name::>() } } /// State of a view sync replica task -pub struct ViewSyncReplicaTaskState> { +pub struct ViewSyncReplicaTaskState, V: Versions> { /// Timeout for view sync rounds pub view_sync_timeout: Duration, /// Current round HotShot is in @@ -153,11 +162,13 @@ pub struct ViewSyncReplicaTaskState::PrivateKey, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, } #[async_trait] -impl> TaskState - for ViewSyncReplicaTaskState +impl, V: Versions> TaskState + for ViewSyncReplicaTaskState { type Event = HotShotEvent; @@ -175,11 +186,11 @@ impl> TaskState async fn cancel_subtasks(&mut self) {} fn get_task_name(&self) -> &'static str { - std::any::type_name::>() + std::any::type_name::>() } } -impl> ViewSyncTaskState { +impl, V: Versions> ViewSyncTaskState { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task @@ -215,7 +226,7 @@ impl> ViewSyncTaskState } // We do not have a replica task already running, so start one - let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { + let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { current_view: view, next_view: view, relay: 0, @@ -228,6 +239,7 @@ impl> ViewSyncTaskState private_key: self.private_key.clone(), view_sync_timeout: self.view_sync_timeout, id: self.id, + upgrade_lock: self.upgrade_lock.clone(), }; let result = replica_state @@ -307,7 +319,9 @@ impl> ViewSyncTaskState view: vote_view, id: self.id, }; - let vote_collector = create_vote_accumulator(&info, event, &event_stream).await; + let vote_collector = + create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) + .await; if let Some(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } @@ -344,7 +358,9 @@ impl> ViewSyncTaskState view: vote_view, id: self.id, }; - let vote_collector = create_vote_accumulator(&info, event, &event_stream).await; + let vote_collector = + create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) + .await; if let Some(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } @@ -381,7 +397,9 @@ impl> ViewSyncTaskState view: vote_view, id: self.id, }; - let vote_collector = create_vote_accumulator(&info, event, &event_stream).await; + let vote_collector = + create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) + .await; if let Some(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } @@ -472,7 +490,9 @@ impl> ViewSyncTaskState } } -impl> ViewSyncReplicaTaskState { +impl, V: Versions> + ViewSyncReplicaTaskState +{ #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task pub async fn handle( @@ -492,7 +512,10 @@ impl> ViewSyncReplicaTaskState> ViewSyncReplicaTaskState> ViewSyncReplicaTaskState> ViewSyncReplicaTaskState> ViewSyncReplicaTaskState> ViewSyncReplicaTaskState> ViewSyncReplicaTaskState, CERT: Certificate + Debug, + V: Versions, > { /// Public key for this node. pub public_key: TYPES::SignatureKey, @@ -41,7 +46,7 @@ pub struct VoteCollectionTaskState< pub membership: Arc, /// accumulator handles aggregating the votes - pub accumulator: Option>, + pub accumulator: Option>, /// The view which we are collecting votes for pub view: TYPES::Time, @@ -68,7 +73,8 @@ impl< TYPES: NodeType, VOTE: Vote + AggregatableVote, CERT: Certificate + Debug, - > VoteCollectionTaskState + V: Versions, + > VoteCollectionTaskState { /// Take one vote and accumulate it. Returns either the cert or the updated state /// after the vote is accumulated @@ -93,7 +99,7 @@ impl< } let accumulator = self.accumulator.as_mut()?; - match accumulator.accumulate(vote, &self.membership) { + match accumulator.accumulate(vote, &self.membership).await { Either::Left(()) => None, Either::Right(cert) => { debug!("Certificate Formed! {:?}", cert); @@ -144,11 +150,12 @@ pub struct AccumulatorInfo { /// Generic function for spawning a vote task. Returns the event stream id of the spawned task if created /// # Panics /// Calls unwrap but should never panic. -pub async fn create_vote_accumulator( +pub async fn create_vote_accumulator( info: &AccumulatorInfo, event: Arc>, sender: &Sender>>, -) -> Option> + upgrade_lock: UpgradeLock, +) -> Option> where TYPES: NodeType, VOTE: Vote @@ -161,15 +168,17 @@ where + std::marker::Send + std::marker::Sync + 'static, - VoteCollectionTaskState: HandleVoteEvent, + V: Versions, + VoteCollectionTaskState: HandleVoteEvent, { let new_accumulator = VoteAccumulator { vote_outcomes: HashMap::new(), signers: HashMap::new(), phantom: PhantomData, + upgrade_lock, }; - let mut state = VoteCollectionTaskState:: { + let mut state = VoteCollectionTaskState:: { membership: Arc::clone(&info.membership), public_key: info.public_key.clone(), accumulator: Some(new_accumulator), @@ -188,30 +197,32 @@ where } /// Alias for Quorum vote accumulator -type QuorumVoteState = - VoteCollectionTaskState, QuorumCertificate>; +type QuorumVoteState = + VoteCollectionTaskState, QuorumCertificate, V>; /// Alias for DA vote accumulator -type DaVoteState = VoteCollectionTaskState, DaCertificate>; +type DaVoteState = VoteCollectionTaskState, DaCertificate, V>; /// Alias for Timeout vote accumulator -type TimeoutVoteState = - VoteCollectionTaskState, TimeoutCertificate>; +type TimeoutVoteState = + VoteCollectionTaskState, TimeoutCertificate, V>; /// Alias for upgrade vote accumulator -type UpgradeVoteState = - VoteCollectionTaskState, UpgradeCertificate>; +type UpgradeVoteState = + VoteCollectionTaskState, UpgradeCertificate, V>; /// Alias for View Sync Pre Commit vote accumulator -type ViewSyncPreCommitState = VoteCollectionTaskState< +type ViewSyncPreCommitState = VoteCollectionTaskState< TYPES, ViewSyncPreCommitVote, ViewSyncPreCommitCertificate2, + V, >; /// Alias for View Sync Commit vote accumulator -type ViewSyncCommitVoteState = - VoteCollectionTaskState, ViewSyncCommitCertificate2>; +type ViewSyncCommitVoteState = + VoteCollectionTaskState, ViewSyncCommitCertificate2, V>; /// Alias for View Sync Finalize vote accumulator -type ViewSyncFinalizeVoteState = VoteCollectionTaskState< +type ViewSyncFinalizeVoteState = VoteCollectionTaskState< TYPES, ViewSyncFinalizeVote, ViewSyncFinalizeCertificate2, + V, >; impl AggregatableVote, QuorumCertificate> @@ -317,8 +328,9 @@ impl // Handlers for all vote accumulators #[async_trait] -impl HandleVoteEvent, QuorumCertificate> - for QuorumVoteState +impl + HandleVoteEvent, QuorumCertificate> + for QuorumVoteState { async fn handle_vote_event( &mut self, @@ -337,8 +349,9 @@ impl HandleVoteEvent, QuorumCertificat // Handlers for all vote accumulators #[async_trait] -impl HandleVoteEvent, UpgradeCertificate> - for UpgradeVoteState +impl + HandleVoteEvent, UpgradeCertificate> + for UpgradeVoteState { async fn handle_vote_event( &mut self, @@ -356,8 +369,8 @@ impl HandleVoteEvent, UpgradeCertific } #[async_trait] -impl HandleVoteEvent, DaCertificate> - for DaVoteState +impl HandleVoteEvent, DaCertificate> + for DaVoteState { async fn handle_vote_event( &mut self, @@ -375,8 +388,9 @@ impl HandleVoteEvent, DaCertificate } #[async_trait] -impl HandleVoteEvent, TimeoutCertificate> - for TimeoutVoteState +impl + HandleVoteEvent, TimeoutCertificate> + for TimeoutVoteState { async fn handle_vote_event( &mut self, @@ -394,9 +408,9 @@ impl HandleVoteEvent, TimeoutCertific } #[async_trait] -impl +impl HandleVoteEvent, ViewSyncPreCommitCertificate2> - for ViewSyncPreCommitState + for ViewSyncPreCommitState { async fn handle_vote_event( &mut self, @@ -416,9 +430,9 @@ impl } #[async_trait] -impl +impl HandleVoteEvent, ViewSyncCommitCertificate2> - for ViewSyncCommitVoteState + for ViewSyncCommitVoteState { async fn handle_vote_event( &mut self, @@ -436,9 +450,9 @@ impl } #[async_trait] -impl +impl HandleVoteEvent, ViewSyncFinalizeCertificate2> - for ViewSyncFinalizeVoteState + for ViewSyncFinalizeVoteState { async fn handle_vote_event( &mut self, diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 09c3b6e87e..8ffe9fafe7 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -13,29 +13,29 @@ use committable::Committable; use ethereum_types::U256; use hotshot::{ traits::{NodeImplementation, TestableNodeImplementation}, - types::{BLSPubKey, SignatureKey, SystemContextHandle}, + types::{SignatureKey, SystemContextHandle}, HotShotInitializer, Memberships, SystemContext, }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, block_types::TestTransaction, - node_types::{MemoryImpl, TestTypes, TestVersions}, + node_types::TestTypes, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, - data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare, ViewNumber}, - message::{GeneralConsensusMessage, Proposal}, + data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare}, + message::{GeneralConsensusMessage, Proposal, UpgradeLock}, simple_certificate::DaCertificate, - simple_vote::{DaData, DaVote, QuorumData, QuorumVote, SimpleVote}, + simple_vote::{DaData, DaVote, QuorumData, QuorumVote, SimpleVote, VersionedVoteData}, traits::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, network::Topic, - node_implementation::{ConsensusTime, NodeType, Versions}, + node_implementation::{NodeType, Versions}, }, utils::{View, ViewInner}, vid::{vid_scheme, VidCommitment, VidSchemeType}, @@ -131,8 +131,9 @@ pub async fn build_system_handle< /// create certificate /// # Panics /// if we fail to sign the data -pub fn build_cert< - TYPES: NodeType, +pub async fn build_cert< + TYPES: NodeType, + V: Versions, DATAType: Committable + Clone + Eq + Hash + Serialize + Debug + 'static, VOTE: Vote, CERT: Certificate, @@ -142,14 +143,34 @@ pub fn build_cert< view: TYPES::Time, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, + upgrade_lock: &UpgradeLock, ) -> CERT { - let real_qc_sig = build_assembled_sig::(&data, membership, view); + let real_qc_sig = build_assembled_sig::( + &data, + membership, + view, + upgrade_lock, + ) + .await; + + let vote = SimpleVote::::create_signed_vote( + data, + view, + public_key, + private_key, + upgrade_lock, + ) + .await + .expect("Failed to sign data!"); + + let vote_commitment = + VersionedVoteData::new(vote.date().clone(), vote.view_number(), upgrade_lock) + .await + .expect("Failed to create VersionedVoteData!") + .commit(); - let vote = - SimpleVote::::create_signed_vote(data, view, public_key, private_key) - .expect("Failed to sign data!"); let cert = CERT::create_signed_certificate( - vote.date_commitment(), + vote_commitment, vote.date().clone(), real_qc_sig, vote.view_number(), @@ -174,8 +195,9 @@ pub fn vid_share( /// create signature /// # Panics /// if fails to convert node id into keypair -pub fn build_assembled_sig< - TYPES: NodeType, +pub async fn build_assembled_sig< + TYPES: NodeType, + V: Versions, VOTE: Vote, CERT: Certificate, DATAType: Committable + Clone + Eq + Hash + Serialize + Debug + 'static, @@ -183,6 +205,7 @@ pub fn build_assembled_sig< data: &DATAType, membership: &TYPES::Membership, view: TYPES::Time, + upgrade_lock: &UpgradeLock, ) -> ::QcType { let stake_table = membership.committee_qc_stake_table(); let real_qc_pp: ::QcParams = @@ -196,13 +219,15 @@ pub fn build_assembled_sig< // assemble the vote for node_id in 0..total_nodes { - let (private_key_i, public_key_i) = key_pair_for_id(node_id.try_into().unwrap()); + let (private_key_i, public_key_i) = key_pair_for_id::(node_id.try_into().unwrap()); let vote: SimpleVote = SimpleVote::::create_signed_vote( data.clone(), view, &public_key_i, &private_key_i, + upgrade_lock, ) + .await .expect("Failed to sign data!"); let original_signature: ::PureAssembledSignatureType = vote.signature(); @@ -220,10 +245,14 @@ pub fn build_assembled_sig< /// get the keypair for a node id #[must_use] -pub fn key_pair_for_id(node_id: u64) -> (::PrivateKey, BLSPubKey) { - let private_key = - ::generated_from_seed_indexed([0u8; 32], node_id).1; - let public_key = ::SignatureKey::from_private(&private_key); +pub fn key_pair_for_id( + node_id: u64, +) -> ( + ::PrivateKey, + TYPES::SignatureKey, +) { + let private_key = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; + let public_key = ::SignatureKey::from_private(&private_key); (private_key, public_key) } @@ -239,20 +268,20 @@ pub fn vid_scheme_from_view_number( vid_scheme(num_storage_nodes) } -pub fn vid_payload_commitment( - quorum_membership: &::Membership, - view_number: ViewNumber, +pub fn vid_payload_commitment( + quorum_membership: &::Membership, + view_number: TYPES::Time, transactions: Vec, ) -> VidCommitment { - let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); + let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); vid_disperse.commit } -pub fn da_payload_commitment( - quorum_membership: &::Membership, +pub fn da_payload_commitment( + quorum_membership: &::Membership, transactions: Vec, ) -> VidCommitment { let encoded_transactions = TestTransaction::encode(&transactions); @@ -260,29 +289,29 @@ pub fn da_payload_commitment( vid_commitment(&encoded_transactions, quorum_membership.total_nodes()) } -pub fn build_payload_commitment( - membership: &::Membership, - view: ViewNumber, +pub fn build_payload_commitment( + membership: &::Membership, + view: TYPES::Time, ) -> ::Commit { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. - let mut vid = vid_scheme_from_view_number::(membership, view); + let mut vid = vid_scheme_from_view_number::(membership, view); let encoded_transactions = Vec::new(); vid.commit_only(&encoded_transactions).unwrap() } /// TODO: #[allow(clippy::type_complexity)] -pub fn build_vid_proposal( - quorum_membership: &::Membership, - view_number: ViewNumber, +pub fn build_vid_proposal( + quorum_membership: &::Membership, + view_number: TYPES::Time, transactions: Vec, - private_key: &::PrivateKey, + private_key: &::PrivateKey, ) -> ( - Proposal>, - Vec>>, + Proposal>, + Vec>>, ) { - let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); + let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = VidDisperse::from_membership( @@ -292,7 +321,7 @@ pub fn build_vid_proposal( ); let signature = - ::sign(private_key, vid_disperse.payload_commitment.as_ref()) + TYPES::SignatureKey::sign(private_key, vid_disperse.payload_commitment.as_ref()) .expect("Failed to sign VID commitment"); let vid_disperse_proposal = Proposal { data: vid_disperse.clone(), @@ -313,14 +342,15 @@ pub fn build_vid_proposal( ) } -pub fn build_da_certificate( - quorum_membership: &::Membership, - da_membership: &::Membership, - view_number: ViewNumber, +pub async fn build_da_certificate( + quorum_membership: &::Membership, + da_membership: &::Membership, + view_number: TYPES::Time, transactions: Vec, - public_key: &::SignatureKey, - private_key: &::PrivateKey, -) -> DaCertificate { + public_key: &TYPES::SignatureKey, + private_key: &::PrivateKey, + upgrade_lock: &UpgradeLock, +) -> DaCertificate { let encoded_transactions = TestTransaction::encode(&transactions); let da_payload_commitment = @@ -330,32 +360,36 @@ pub fn build_da_certificate( payload_commit: da_payload_commitment, }; - build_cert::, DaCertificate>( + build_cert::, DaCertificate>( da_data, da_membership, view_number, public_key, private_key, + upgrade_lock, ) + .await } -pub async fn build_vote( - handle: &SystemContextHandle, - proposal: QuorumProposal, -) -> GeneralConsensusMessage { - let view = ViewNumber::new(*proposal.view_number); +pub async fn build_vote, V: Versions>( + handle: &SystemContextHandle, + proposal: QuorumProposal, +) -> GeneralConsensusMessage { + let view = proposal.view_number; let leaf: Leaf<_> = Leaf::from_quorum_proposal(&proposal); - let vote = QuorumVote::::create_signed_vote( + let vote = QuorumVote::::create_signed_vote( QuorumData { leaf_commit: leaf.commit(), }, view, &handle.public_key(), handle.private_key(), + &handle.hotshot.upgrade_lock, ) + .await .expect("Failed to create quorum vote"); - GeneralConsensusMessage::::Vote(vote) + GeneralConsensusMessage::::Vote(vote) } /// This function permutes the provided input vector `inputs`, given some order provided within the diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 0c335bbc14..7c6c8af2ee 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -25,7 +25,7 @@ use hotshot_types::{ DaProposal, Leaf, QuorumProposal, VidDisperse, VidDisperseShare, ViewChangeEvidence, ViewNumber, }, - message::Proposal, + message::{Proposal, UpgradeLock}, simple_certificate::{ DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, @@ -66,6 +66,7 @@ pub struct TestView { formed_upgrade_certificate: Option>, view_sync_finalize_data: Option>, timeout_cert_data: Option>, + upgrade_lock: UpgradeLock, } impl TestView { @@ -74,6 +75,7 @@ impl TestView { da_membership: &::Membership, ) -> Self { let genesis_view = ViewNumber::new(1); + let upgrade_lock = UpgradeLock::new(); let transactions = Vec::new(); @@ -91,11 +93,12 @@ impl TestView { &metadata, ); - let (private_key, public_key) = key_pair_for_id(*genesis_view); + let (private_key, public_key) = key_pair_for_id::(*genesis_view); let leader_public_key = public_key; - let payload_commitment = da_payload_commitment(quorum_membership, transactions.clone()); + let payload_commitment = + da_payload_commitment::(quorum_membership, transactions.clone()); let (vid_disperse, vid_proposal) = build_vid_proposal( quorum_membership, @@ -111,7 +114,9 @@ impl TestView { transactions.clone(), &public_key, &private_key, - ); + &upgrade_lock, + ) + .await; let block_header = TestBlockHeader { block_number: 1, @@ -180,6 +185,7 @@ impl TestView { view_sync_finalize_data: None, timeout_cert_data: None, da_proposal, + upgrade_lock, } } @@ -205,9 +211,9 @@ impl TestView { leaf_commit: old.leaf.commit(), }; - let (old_private_key, old_public_key) = key_pair_for_id(*old_view); + let (old_private_key, old_public_key) = key_pair_for_id::(*old_view); - let (private_key, public_key) = key_pair_for_id(*next_view); + let (private_key, public_key) = key_pair_for_id::(*next_view); let leader_public_key = public_key; @@ -224,7 +230,8 @@ impl TestView { &metadata, ); - let payload_commitment = da_payload_commitment(quorum_membership, transactions.clone()); + let payload_commitment = + da_payload_commitment::(quorum_membership, transactions.clone()); let (vid_disperse, vid_proposal) = build_vid_proposal( quorum_membership, @@ -233,17 +240,20 @@ impl TestView { &private_key, ); - let da_certificate = build_da_certificate( + let da_certificate = build_da_certificate::( quorum_membership, da_membership, next_view, transactions.clone(), &public_key, &private_key, - ); + &self.upgrade_lock, + ) + .await; let quorum_certificate = build_cert::< TestTypes, + TestVersions, QuorumData, QuorumVote, QuorumCertificate, @@ -253,11 +263,14 @@ impl TestView { old_view, &old_public_key, &old_private_key, - ); + &self.upgrade_lock, + ) + .await; let upgrade_certificate = if let Some(ref data) = self.upgrade_data { let cert = build_cert::< TestTypes, + TestVersions, UpgradeProposalData, UpgradeVote, UpgradeCertificate, @@ -267,7 +280,9 @@ impl TestView { next_view, &public_key, &private_key, - ); + &self.upgrade_lock, + ) + .await; Some(cert) } else { @@ -277,6 +292,7 @@ impl TestView { let view_sync_certificate = if let Some(ref data) = self.view_sync_finalize_data { let cert = build_cert::< TestTypes, + TestVersions, ViewSyncFinalizeData, ViewSyncFinalizeVote, ViewSyncFinalizeCertificate2, @@ -286,7 +302,9 @@ impl TestView { next_view, &public_key, &private_key, - ); + &self.upgrade_lock, + ) + .await; Some(cert) } else { @@ -296,6 +314,7 @@ impl TestView { let timeout_certificate = if let Some(ref data) = self.timeout_cert_data { let cert = build_cert::< TestTypes, + TestVersions, TimeoutData, TimeoutVote, TimeoutCertificate, @@ -305,7 +324,9 @@ impl TestView { next_view, &public_key, &private_key, - ); + &self.upgrade_lock, + ) + .await; Some(cert) } else { @@ -365,6 +386,8 @@ impl TestView { _pd: PhantomData, }; + let upgrade_lock = UpgradeLock::new(); + TestView { quorum_proposal, leaf, @@ -385,6 +408,7 @@ impl TestView { view_sync_finalize_data: None, timeout_cert_data: None, da_proposal, + upgrade_lock, } } @@ -392,7 +416,7 @@ impl TestView { self.next_view_from_ancestor(self.clone()).await } - pub fn create_quorum_vote( + pub async fn create_quorum_vote( &self, handle: &SystemContextHandle, ) -> QuorumVote { @@ -403,11 +427,13 @@ impl TestView { self.view_number, &handle.public_key(), handle.private_key(), + &handle.hotshot.upgrade_lock, ) + .await .expect("Failed to generate a signature on QuorumVote") } - pub fn create_upgrade_vote( + pub async fn create_upgrade_vote( &self, data: UpgradeProposalData, handle: &SystemContextHandle, @@ -417,11 +443,13 @@ impl TestView { self.view_number, &handle.public_key(), handle.private_key(), + &handle.hotshot.upgrade_lock, ) + .await .expect("Failed to generate a signature on UpgradVote") } - pub fn create_da_vote( + pub async fn create_da_vote( &self, data: DaData, handle: &SystemContextHandle, @@ -431,7 +459,9 @@ impl TestView { self.view_number, &handle.public_key(), handle.private_key(), + &handle.hotshot.upgrade_lock, ) + .await .expect("Failed to sign DaData") } } diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 1de54635a8..0c2506af47 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -85,7 +85,7 @@ async fn test_consensus_task() { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } @@ -170,7 +170,7 @@ async fn test_consensus_vote() { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } @@ -215,7 +215,7 @@ async fn test_view_sync_finalize_propose() { let handle = build_system_handle::(4) .await .0; - let (priv_key, pub_key) = key_pair_for_id(4); + let (priv_key, pub_key) = key_pair_for_id::(4); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); @@ -243,7 +243,7 @@ async fn test_view_sync_finalize_propose() { let view = generator.current_view.clone().unwrap(); proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); vids.push(view.vid_proposal.clone()); dacs.push(view.da_certificate.clone()); @@ -258,7 +258,7 @@ async fn test_view_sync_finalize_propose() { let view = generator.current_view.unwrap(); proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); vids.push(view.vid_proposal); // Handle the view sync finalize cert, get the requisite data, propose. @@ -275,7 +275,9 @@ async fn test_view_sync_finalize_propose() { ViewNumber::new(2), &pub_key, &priv_key, + &handle.hotshot.upgrade_lock, ) + .await .unwrap(); let timeout_vote_view_3 = TimeoutVote::create_signed_vote( @@ -285,7 +287,9 @@ async fn test_view_sync_finalize_propose() { ViewNumber::new(3), &pub_key, &priv_key, + &handle.hotshot.upgrade_lock, ) + .await .unwrap(); let inputs = vec![ @@ -375,7 +379,7 @@ async fn test_view_sync_finalize_vote() { for view in (&mut generator).take(3).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); vids.push(view.vid_proposal.clone()); dacs.push(view.da_certificate.clone()); } @@ -386,7 +390,7 @@ async fn test_view_sync_finalize_vote() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); vids.push(view.vid_proposal.clone()); dacs.push(view.da_certificate.clone()); } @@ -473,7 +477,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { for view in (&mut generator).take(3).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); vids.push(view.vid_proposal.clone()); dacs.push(view.da_certificate.clone()); } @@ -484,7 +488,7 @@ async fn test_view_sync_finalize_vote_fail_view_number() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); vids.push(view.vid_proposal.clone()); dacs.push(view.da_certificate.clone()); } @@ -575,7 +579,7 @@ async fn test_vid_disperse_storage_failure() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 34dfc406b6..9f8fb0b744 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -64,7 +64,10 @@ async fn test_da_task() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); + votes.push( + view.create_da_vote(DaData { payload_commit }, &handle) + .await, + ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } @@ -74,7 +77,10 @@ async fn test_da_task() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); + votes.push( + view.create_da_vote(DaData { payload_commit }, &handle) + .await, + ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } @@ -99,7 +105,7 @@ async fn test_da_task() { serial![DaProposalRecv(proposals[1].clone(), leaders[1])], ]; - let da_state = DaTaskState::::create_from(&handle).await; + let da_state = DaTaskState::::create_from(&handle).await; let mut da_script = TaskScript { timeout: Duration::from_millis(35), state: da_state, @@ -153,7 +159,10 @@ async fn test_da_task_storage_failure() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); + votes.push( + view.create_da_vote(DaData { payload_commit }, &handle) + .await, + ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } @@ -163,7 +172,10 @@ async fn test_da_task_storage_failure() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_da_vote(DaData { payload_commit }, &handle)); + votes.push( + view.create_da_vote(DaData { payload_commit }, &handle) + .await, + ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); } @@ -200,7 +212,7 @@ async fn test_da_task_storage_failure() { Expectations::from_outputs(vec![]), ]; - let da_state = DaTaskState::::create_from(&handle).await; + let da_state = DaTaskState::::create_from(&handle).await; let mut da_script = TaskScript { timeout: Duration::from_millis(35), state: da_state, diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index 541e427251..92c8cb1da7 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -41,13 +41,8 @@ fn version_number_at_start_of_serialization() { relay: 37, round: view_number, }; - let simple_certificate = SimpleCertificate { - data: data.clone(), - vote_commitment: data.commit(), - view_number, - signatures: None, - _pd: PhantomData, - }; + let simple_certificate = + SimpleCertificate::new(data.clone(), data.commit(), view_number, None, PhantomData); let message = Message { sender, kind: MessageKind::Consensus(SequencingMessage::General( diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 7479536487..007d16185d 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -64,7 +64,7 @@ async fn test_quorum_proposal_recv_task() { for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaves.push(view.leaf.clone()); @@ -154,7 +154,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { for view in (&mut generator).take(4).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaves.push(view.leaf.clone()); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 61372110a8..b984c60889 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -58,7 +58,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = build_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = + build_payload_commitment::(&quorum_membership, ViewNumber::new(node_id)); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -201,7 +202,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(1)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), builder_commitment.clone(), TestMetadata, ViewNumber::new(1), @@ -218,7 +219,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(2)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), builder_commitment.clone(), TestMetadata, ViewNumber::new(2), @@ -235,7 +236,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(3)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), builder_commitment.clone(), TestMetadata, ViewNumber::new(3), @@ -252,7 +253,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[2].clone()), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(4)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(4)), builder_commitment.clone(), TestMetadata, ViewNumber::new(4), @@ -269,7 +270,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[3].clone()), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(5)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(5)), builder_commitment, TestMetadata, ViewNumber::new(5), @@ -336,7 +337,8 @@ async fn test_quorum_proposal_task_qc_timeout() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = build_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = + build_payload_commitment::(&quorum_membership, ViewNumber::new(node_id)); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -423,7 +425,8 @@ async fn test_quorum_proposal_task_view_sync() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = build_payload_commitment(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = + build_payload_commitment::(&quorum_membership, ViewNumber::new(node_id)); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -559,7 +562,7 @@ async fn test_quorum_proposal_task_liveness_check() { random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(1)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), builder_commitment.clone(), TestMetadata, ViewNumber::new(1), @@ -576,7 +579,7 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(2)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), builder_commitment.clone(), TestMetadata, ViewNumber::new(2), @@ -593,7 +596,7 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(3)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), builder_commitment.clone(), TestMetadata, ViewNumber::new(3), @@ -610,7 +613,7 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[2].clone()), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(4)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(4)), builder_commitment.clone(), TestMetadata, ViewNumber::new(4), @@ -627,7 +630,7 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[3].clone()), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(5)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(5)), builder_commitment, TestMetadata, ViewNumber::new(5), diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 83019caad4..265910168d 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -127,7 +127,7 @@ async fn test_quorum_vote_task_miss_dependency() { for view in (&mut generator).take(5).collect::>().await { proposals.push(view.quorum_proposal.clone()); leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaves.push(view.leaf.clone()); diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index 9d551629b8..eedb2e55c2 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -11,8 +11,12 @@ use std::{ time::Duration, }; +use hotshot::tasks::DishonestDa; use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestVersions}, + node_types::{ + Libp2pImpl, MarketplaceTestVersions, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, + TestVersions, + }, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -109,6 +113,41 @@ cross_tests!( }, ); +cross_tests!( + TestName: dishonest_da, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Versions: [MarketplaceTestVersions], + Ignore: false, + Metadata: { + let behaviour = Rc::new(|node_id| { + let dishonest_da = DishonestDa { + dishonest_at_da_cert_sent_numbers: HashSet::from([2]), + total_da_certs_sent_from_node: 0, + total_views_add_to_cert: 4 + }; + match node_id { + 2 => Behaviour::Byzantine(Box::new(dishonest_da)), + _ => Behaviour::Standard, + } + }); + + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + }; + + metadata.num_nodes_with_stake = 10; + metadata + }, +); + cross_tests!( TestName: test_with_double_leader_failures, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs index 9c4ada9ae4..bae16c1267 100644 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -78,7 +78,7 @@ async fn test_upgrade_task_vote() { for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); @@ -89,7 +89,7 @@ async fn test_upgrade_task_vote() { for view in generator.take(4).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); @@ -239,7 +239,7 @@ async fn test_upgrade_task_propose() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); @@ -250,23 +250,29 @@ async fn test_upgrade_task_propose() { for view in generator.take(4).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); views.push(view.clone()); } - let upgrade_votes = other_handles - .iter() - .map(|h| views[2].create_upgrade_vote(upgrade_data.clone(), &h.0)); + let mut upgrade_votes = Vec::new(); + + for handle in other_handles { + upgrade_votes.push( + views[2] + .create_upgrade_vote(upgrade_data.clone(), &handle.0) + .await, + ); + } let consensus_state = ConsensusTaskState::::create_from(&handle).await; let upgrade_state = UpgradeTaskState::::create_from(&handle).await; - let upgrade_vote_recvs: Vec<_> = upgrade_votes.map(UpgradeVoteRecv).collect(); + let upgrade_vote_recvs: Vec<_> = upgrade_votes.into_iter().map(UpgradeVoteRecv).collect(); let inputs = vec![ vec![ @@ -409,7 +415,7 @@ async fn test_upgrade_task_blank_blocks() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); @@ -420,7 +426,7 @@ async fn test_upgrade_task_blank_blocks() { for view in (&mut generator).take(3).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); @@ -433,7 +439,7 @@ async fn test_upgrade_task_blank_blocks() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); @@ -446,7 +452,7 @@ async fn test_upgrade_task_blank_blocks() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); @@ -458,7 +464,7 @@ async fn test_upgrade_task_blank_blocks() { for view in generator.take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index ce30a6f1fd..fcb25aaf23 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -96,7 +96,7 @@ async fn test_upgrade_task_with_proposal() { for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vid_dispersals.push(view.vid_disperse.clone()); leaders.push(view.leader_public_key); @@ -115,7 +115,7 @@ async fn test_upgrade_task_with_proposal() { for view in generator.take(4).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vid_dispersals.push(view.vid_disperse.clone()); leaders.push(view.leader_public_key); @@ -143,22 +143,29 @@ async fn test_upgrade_task_with_proposal() { ::Base::VERSION, ) .unwrap(); - let upgrade_votes = other_handles - .iter() - .map(|h| views[2].create_upgrade_vote(upgrade_data.clone(), &h.0)); + + let mut upgrade_votes = Vec::new(); + + for handle in other_handles { + upgrade_votes.push( + views[2] + .create_upgrade_vote(upgrade_data.clone(), &handle.0) + .await, + ); + } let proposal_state = QuorumProposalTaskState::::create_from(&handle).await; let upgrade_state = UpgradeTaskState::::create_from(&handle).await; - let upgrade_vote_recvs: Vec<_> = upgrade_votes.map(UpgradeVoteRecv).collect(); + let upgrade_vote_recvs: Vec<_> = upgrade_votes.into_iter().map(UpgradeVoteRecv).collect(); let inputs = vec![ random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(1)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), builder_commitment.clone(), TestMetadata, ViewNumber::new(1), @@ -175,7 +182,7 @@ async fn test_upgrade_task_with_proposal() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(2)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), builder_commitment.clone(), TestMetadata, ViewNumber::new(2), @@ -193,7 +200,7 @@ async fn test_upgrade_task_with_proposal() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment(&quorum_membership, ViewNumber::new(3)), + build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), builder_commitment.clone(), TestMetadata, ViewNumber::new(3), diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 7fa5d3bf40..37e3134bf1 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -81,7 +81,7 @@ async fn test_upgrade_task_with_vote() { let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); @@ -100,7 +100,7 @@ async fn test_upgrade_task_with_vote() { for view in generator.take(4).collect::>().await { proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle)); + votes.push(view.create_quorum_vote(&handle).await); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); leaders.push(view.leader_public_key); diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index eba1c2ebdf..c0c4913981 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -36,7 +36,9 @@ async fn test_view_sync_task() { ::Time::new(4), hotshot_types::traits::consensus_api::ConsensusApi::public_key(&handle), hotshot_types::traits::consensus_api::ConsensusApi::private_key(&handle), + &handle.hotshot.upgrade_lock, ) + .await .expect("Failed to create a ViewSyncPreCommitVote!"); tracing::error!("Vote in test is {:?}", vote.clone()); @@ -52,6 +54,7 @@ async fn test_view_sync_task() { output.push(HotShotEvent::ViewChange(ViewNumber::new(2))); output.push(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone())); - let view_sync_state = ViewSyncTaskState::::create_from(&handle).await; + let view_sync_state = + ViewSyncTaskState::::create_from(&handle).await; run_harness(input, output, view_sync_state, false).await; } diff --git a/testing/tests/tests_5/fake_solver.rs b/testing/tests/tests_5/fake_solver.rs index 4e90656a60..27f1c91bb8 100644 --- a/testing/tests/tests_5/fake_solver.rs +++ b/testing/tests/tests_5/fake_solver.rs @@ -164,7 +164,7 @@ async fn test_fake_solver_fetch_permissioned_no_error() { ); // We need a private key - let (private_key, _) = key_pair_for_id(0); + let (private_key, _) = key_pair_for_id::(0); // Fire up the solver. let solver_url: Url = format!( @@ -220,7 +220,7 @@ async fn test_fake_solver_fetch_permissioned_with_errors() { FakeSolverState::new(Some(0.5), vec!["http://localhost:1111".parse().unwrap()]); // We need a private key - let (private_key, _) = key_pair_for_id(0); + let (private_key, _) = key_pair_for_id::(0); // Fire up the solver. let solver_url: Url = format!( diff --git a/types/src/data.rs b/types/src/data.rs index 15eb3599e6..168ca0669b 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -487,13 +487,13 @@ impl QuorumCertificate { .commit(), }; let commit = data.commit(); - Self { + Self::new( data, - vote_commitment: commit, - view_number: ::genesis(), - signatures: None, - _pd: PhantomData, - } + commit, + ::genesis(), + None, + PhantomData, + ) } } @@ -529,13 +529,13 @@ impl Leaf { leaf_commit: Commitment::>::default_commitment_no_preimage(), }; - let justify_qc = QuorumCertificate { - data: null_quorum_data.clone(), - vote_commitment: null_quorum_data.commit(), - view_number: ::genesis(), - signatures: None, - _pd: PhantomData, - }; + let justify_qc = QuorumCertificate::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + ::genesis(), + None, + PhantomData, + ); Self { view_number: TYPES::Time::genesis(), diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 4069cb2d98..c3b2b93e1e 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -19,15 +19,16 @@ use committable::{Commitment, Committable}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; +use crate::message::UpgradeLock; use crate::{ data::serialize_signature2, simple_vote::{ - DaData, QuorumData, TimeoutData, UpgradeProposalData, ViewSyncCommitData, - ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, + DaData, QuorumData, TimeoutData, UpgradeProposalData, VersionedVoteData, + ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, }, traits::{ election::Membership, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, vote::{Certificate, HasViewNumber}, @@ -75,7 +76,7 @@ pub struct SimpleCertificate, + vote_commitment: Commitment, /// Which view this QC relates to pub view_number: TYPES::Time, /// assembled signature for certificate aggregation @@ -84,6 +85,27 @@ pub struct SimpleCertificate, } +impl> + SimpleCertificate +{ + /// Creates a new instance of `SimpleCertificate` + pub fn new( + data: VOTEABLE, + vote_commitment: Commitment, + view_number: TYPES::Time, + signatures: Option<::QcType>, + pd: PhantomData<(TYPES, THRESHOLD)>, + ) -> Self { + Self { + data, + vote_commitment, + view_number, + signatures, + _pd: pd, + } + } +} + impl> Committable for SimpleCertificate { @@ -107,21 +129,27 @@ impl> type Voteable = VOTEABLE; type Threshold = THRESHOLD; - fn create_signed_certificate( - vote_commitment: Commitment, + fn create_signed_certificate( + vote_commitment: Commitment>, data: Self::Voteable, sig: ::QcType, view: TYPES::Time, ) -> Self { + let vote_commitment_bytes: [u8; 32] = vote_commitment.into(); + SimpleCertificate { data, - vote_commitment, + vote_commitment: Commitment::from_raw(vote_commitment_bytes), view_number: view, signatures: Some(sig), _pd: PhantomData, } } - fn is_valid_cert>(&self, membership: &MEMBERSHIP) -> bool { + async fn is_valid_cert, V: Versions>( + &self, + membership: &MEMBERSHIP, + upgrade_lock: &UpgradeLock, + ) -> bool { if self.view_number == TYPES::Time::genesis() { return true; } @@ -129,9 +157,12 @@ impl> membership.committee_qc_stake_table(), U256::from(Self::threshold(membership)), ); + let Ok(commit) = self.date_commitment(upgrade_lock).await else { + return false; + }; ::check( &real_qc_pp, - self.vote_commitment.as_ref(), + commit.as_ref(), self.signatures.as_ref().unwrap(), ) } @@ -141,8 +172,15 @@ impl> fn date(&self) -> &Self::Voteable { &self.data } - fn date_commitment(&self) -> Commitment { - self.vote_commitment + async fn date_commitment( + &self, + upgrade_lock: &UpgradeLock, + ) -> Result>> { + Ok( + VersionedVoteData::new(self.data.clone(), self.view_number, upgrade_lock) + .await? + .commit(), + ) } } @@ -206,13 +244,14 @@ impl UpgradeCertificate { /// Validate an upgrade certificate. /// # Errors /// Returns an error when the upgrade certificate is invalid. - pub fn validate( + pub async fn validate( upgrade_certificate: &Option, quorum_membership: &TYPES::Membership, + upgrade_lock: &UpgradeLock, ) -> Result<()> { if let Some(ref cert) = upgrade_certificate { ensure!( - cert.is_valid_cert(quorum_membership), + cert.is_valid_cert(quorum_membership, upgrade_lock).await, "Invalid upgrade certificate." ); Ok(()) diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index c91aac557c..fd1500c04c 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -6,15 +6,20 @@ //! Implementations of the simple vote types. -use std::{fmt::Debug, hash::Hash}; +use std::{fmt::Debug, hash::Hash, marker::PhantomData}; +use anyhow::Result; use committable::{Commitment, Committable}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use vbs::version::Version; +use vbs::version::{StaticVersionType, Version}; use crate::{ data::Leaf, - traits::{node_implementation::NodeType, signature_key::SignatureKey}, + message::UpgradeLock, + traits::{ + node_implementation::{NodeType, Versions}, + signature_key::SignatureKey, + }, vid::VidCommitment, vote::{HasViewNumber, Vote}, }; @@ -151,19 +156,81 @@ impl SimpleVote { /// Creates and signs a simple vote /// # Errors /// If we are unable to sign the data - pub fn create_signed_vote( + pub async fn create_signed_vote( data: DATA, view: TYPES::Time, pub_key: &TYPES::SignatureKey, private_key: &::PrivateKey, - ) -> Result::SignError> { - match TYPES::SignatureKey::sign(private_key, data.commit().as_ref()) { - Ok(signature) => Ok(Self { - signature: (pub_key.clone(), signature), - data, - view_number: view, - }), - Err(e) => Err(e), + upgrade_lock: &UpgradeLock, + ) -> Result { + let commit = VersionedVoteData::new(data.clone(), view, upgrade_lock) + .await? + .commit(); + + let signature = ( + pub_key.clone(), + TYPES::SignatureKey::sign(private_key, commit.as_ref())?, + ); + + Ok(Self { + signature, + data, + view_number: view, + }) + } +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// A wrapper for vote data that carries a view number and an `upgrade_lock`, allowing switching the commitment calculation dynamically depending on the version +pub struct VersionedVoteData { + /// underlying vote data + data: DATA, + + /// view number + view: TYPES::Time, + + /// version applied to the view number + version: Version, + + /// phantom data + _pd: PhantomData, +} + +impl VersionedVoteData { + /// Create a new `VersionedVoteData` struct + /// + /// # Errors + /// + /// Returns an error if `upgrade_lock.version(view)` is unable to return a version we support + pub async fn new( + data: DATA, + view: TYPES::Time, + upgrade_lock: &UpgradeLock, + ) -> Result { + let version = upgrade_lock.version(view).await?; + + Ok(Self { + data, + view, + version, + _pd: PhantomData, + }) + } +} + +impl Committable + for VersionedVoteData +{ + fn commit(&self) -> Commitment { + if self.version < V::Marketplace::VERSION { + let bytes: [u8; 32] = self.data.commit().into(); + + Commitment::::from_raw(bytes) + } else { + committable::RawCommitmentBuilder::new("Vote") + .var_size_bytes(self.data.commit().as_ref()) + .u64(*self.view) + .finalize() } } } diff --git a/types/src/vote.rs b/types/src/vote.rs index 14b868d624..83b0898c24 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -11,18 +11,20 @@ use std::{ marker::PhantomData, }; +use anyhow::Result; use bitvec::{bitvec, vec::BitVec}; -use committable::Commitment; +use committable::{Commitment, Committable}; use either::Either; use ethereum_types::U256; use tracing::error; use crate::{ + message::UpgradeLock, simple_certificate::Threshold, - simple_vote::Voteable, + simple_vote::{VersionedVoteData, Voteable}, traits::{ election::Membership, - node_implementation::NodeType, + node_implementation::{NodeType, Versions}, signature_key::{SignatureKey, StakeTableEntryType}, }, }; @@ -62,22 +64,29 @@ pub trait Certificate: HasViewNumber { type Threshold: Threshold; /// Build a certificate from the data commitment and the quorum of signers - fn create_signed_certificate( - vote_commitment: Commitment, + fn create_signed_certificate( + vote_commitment: Commitment>, data: Self::Voteable, sig: ::QcType, view: TYPES::Time, ) -> Self; /// Checks if the cert is valid - fn is_valid_cert>(&self, membership: &MEMBERSHIP) -> bool; + fn is_valid_cert, V: Versions>( + &self, + membership: &MEMBERSHIP, + upgrade_lock: &UpgradeLock, + ) -> impl std::future::Future; /// Returns the amount of stake needed to create this certificate // TODO: Make this a static ratio of the total stake of `Membership` fn threshold>(membership: &MEMBERSHIP) -> u64; /// Get the commitment which was voted on fn date(&self) -> &Self::Voteable; /// Get the vote commitment which the votes commit to - fn date_commitment(&self) -> Commitment; + fn date_commitment( + &self, + upgrade_lock: &UpgradeLock, + ) -> impl std::future::Future>>>; } /// Mapping of vote commitment to signatures and bitvec type SignersMap = HashMap< @@ -87,34 +96,63 @@ type SignersMap = HashMap< Vec<::PureAssembledSignatureType>, ), >; + +#[allow(clippy::type_complexity)] /// Accumulates votes until a certificate is formed. This implementation works for all simple vote and certificate pairs pub struct VoteAccumulator< TYPES: NodeType, VOTE: Vote, CERT: Certificate, + V: Versions, > { /// Map of all signatures accumulated so far pub vote_outcomes: VoteMap2< - Commitment, + Commitment>::Commitment, V>>, TYPES::SignatureKey, ::PureAssembledSignatureType, >, /// A bitvec to indicate which node is active and send out a valid signature for certificate aggregation, this automatically do uniqueness check /// And a list of valid signatures for certificate aggregation - pub signers: SignersMap, TYPES::SignatureKey>, + pub signers: SignersMap< + Commitment>::Commitment, V>>, + TYPES::SignatureKey, + >, /// Phantom data to specify the types this accumulator is for pub phantom: PhantomData<(TYPES, VOTE, CERT)>, + /// version information + pub upgrade_lock: UpgradeLock, } -impl, CERT: Certificate> - VoteAccumulator +impl< + TYPES: NodeType, + VOTE: Vote, + CERT: Certificate, + V: Versions, + > VoteAccumulator { /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we /// have accumulated enough votes to exceed the threshold for creating a certificate. - pub fn accumulate(&mut self, vote: &VOTE, membership: &TYPES::Membership) -> Either<(), CERT> { + pub async fn accumulate( + &mut self, + vote: &VOTE, + membership: &TYPES::Membership, + ) -> Either<(), CERT> { let key = vote.signing_key(); - let vote_commitment = vote.date_commitment(); + let vote_commitment = match VersionedVoteData::new( + vote.date().clone(), + vote.view_number(), + &self.upgrade_lock, + ) + .await + { + Ok(data) => data.commit(), + Err(e) => { + tracing::warn!("Failed to generate versioned vote data: {e}"); + return Either::Left(()); + } + }; + if !key.validate(&vote.signature(), vote_commitment.as_ref()) { error!("Invalid vote! Vote Data {:?}", vote.date()); return Either::Left(()); @@ -156,7 +194,7 @@ impl, CERT: Certificate= CERT::threshold(membership).into() { // Assemble QC @@ -172,8 +210,8 @@ impl, CERT: Certificate( + vote_commitment, vote.date().clone(), real_qc_sig, vote.view_number(), From bc856628f96fe77ba46d57e8cebeb0d830f80840 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 23 Aug 2024 10:11:11 -0400 Subject: [PATCH 1190/1393] update address on new peer (#3607) --- libp2p-networking/src/network/node.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 3ef40fbab8..7015825c34 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -764,6 +764,12 @@ impl NetworkNode { .dht .add_address(&my_id, address.clone()); } + SwarmEvent::NewExternalAddrOfPeer { peer_id, address } => { + self.swarm + .behaviour_mut() + .dht + .add_address(&peer_id, address.clone()); + } _ => { debug!("Unhandled swarm event {:?}", event); } From 7950fe46f0490ffb2e6aac25f7401181f022158b Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 23 Aug 2024 13:26:59 -0400 Subject: [PATCH 1191/1393] [Libp2p] - Sign Proposal Request Messages (#3604) * implement message signing * fix comments * fix compiler error on dep tasks, fix test * fix name * continuously wait for the proposal --- task-impls/src/consensus/handlers.rs | 4 + task-impls/src/consensus/mod.rs | 1 + task-impls/src/events.rs | 45 +++++++---- task-impls/src/helpers.rs | 81 ++++++++++++------- task-impls/src/network.rs | 19 ++--- task-impls/src/quorum_proposal/handlers.rs | 7 +- .../src/quorum_proposal_recv/handlers.rs | 1 + task-impls/src/quorum_vote/mod.rs | 1 + task-impls/src/request.rs | 22 +++-- .../tests_1/quorum_proposal_recv_task.rs | 21 ++++- types/src/message.rs | 8 +- types/src/request_response.rs | 24 +++++- 12 files changed, 164 insertions(+), 70 deletions(-) diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 9716ca6998..1dd8d0d47c 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -182,6 +182,7 @@ pub async fn publish_proposal_from_commitment_and_metadata, public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, consensus: OuterConsensus, storage: Arc>, quorum_membership: Arc, @@ -766,6 +769,7 @@ pub async fn update_state_and_vote_if_able< Arc::clone(&quorum_membership), OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), public_key.clone(), + private_key.clone(), upgrade_lock, ) .await diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 445c81fc1f..a2af8fd41e 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -254,6 +254,7 @@ impl, V: Versions> ConsensusTaskSt view, proposal, pub_key, + priv_key.clone(), consensus, storage, quorum_mem, diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index bf535af95e..ea2834eb9a 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -6,15 +6,18 @@ use std::fmt::Display; +use crate::view_sync::ViewSyncPhase; use async_broadcast::Sender; use either::Either; use hotshot_task::task::TaskEvent; +use hotshot_types::traits::signature_key::SignatureKey; use hotshot_types::{ data::{ DaProposal, Leaf, PackedBundle, QuorumProposal, UpgradeProposal, VidDisperse, VidDisperseShare, }, message::Proposal, + request_response::ProposalRequestPayload, simple_certificate::{ DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, @@ -30,8 +33,6 @@ use hotshot_types::{ }; use vec1::Vec1; -use crate::view_sync::ViewSyncPhase; - impl TaskEvent for HotShotEvent { fn shutdown_event() -> Self { HotShotEvent::Shutdown @@ -101,18 +102,20 @@ pub enum HotShotEvent { /// 3. The justify QC is valid /// 4. The proposal passes either liveness or safety check. QuorumProposalValidated(QuorumProposal, Leaf), - /// A quorum proposal is missing for a view that we need. Also includes the sender key. - QuorumProposalRequestSend(TYPES::Time, TYPES::SignatureKey), - /// A quorum proposal was requested by a node for a view. Also includes the sender key. - QuorumProposalRequestRecv(TYPES::Time, TYPES::SignatureKey), - /// A quorum proposal was missing for a view. As the leader, we send a reply to the recipient with their key. - QuorumProposalResponseSend( - TYPES::Time, - TYPES::SignatureKey, - Proposal>, + /// A quorum proposal is missing for a view that we need. + QuorumProposalRequestSend( + ProposalRequestPayload, + ::PureAssembledSignatureType, + ), + /// A quorum proposal was requested by a node for a view. + QuorumProposalRequestRecv( + ProposalRequestPayload, + ::PureAssembledSignatureType, ), - /// A quorum proposal was requested by a node for a view. Also includes the sender key. - QuorumProposalResponseRecv(TYPES::Time, Proposal>), + /// A quorum proposal was missing for a view. As the leader, we send a reply to the recipient with their key. + QuorumProposalResponseSend(TYPES::SignatureKey, Proposal>), + /// A quorum proposal was requested by a node for a view. + QuorumProposalResponseRecv(Proposal>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DaProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal @@ -442,11 +445,19 @@ impl Display for HotShotEvent { HotShotEvent::QuorumProposalRequestRecv(view_number, _) => { write!(f, "QuorumProposalRequestRecv(view_number={view_number:?})") } - HotShotEvent::QuorumProposalResponseSend(view_number, _, _) => { - write!(f, "QuorumProposalResponseSend(view_number={view_number:?})") + HotShotEvent::QuorumProposalResponseSend(_, proposal) => { + write!( + f, + "QuorumProposalResponseSend(view_number={:?})", + proposal.data.view_number + ) } - HotShotEvent::QuorumProposalResponseRecv(view_number, _) => { - write!(f, "QuorumProposalResponseRecv(view_number={view_number:?})") + HotShotEvent::QuorumProposalResponseRecv(proposal) => { + write!( + f, + "QuorumProposalResponseRecv(view_number={:?})", + proposal.data.view_number + ) } HotShotEvent::ValidatedStateUpdated(view_number, _) => { write!(f, "ValidatedStateUpdated(view_number={view_number:?})") diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index f8af2e37df..2eb80a234f 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -20,7 +20,6 @@ use async_std::task::JoinHandle; use chrono::Utc; use committable::{Commitment, Committable}; use hotshot_task::dependency::{Dependency, EventDependency}; -use hotshot_types::message::UpgradeLock; use hotshot_types::traits::node_implementation::Versions; use hotshot_types::{ consensus::{ConsensusUpgradableReadLockGuard, OuterConsensus}, @@ -32,66 +31,91 @@ use hotshot_types::{ block_contents::BlockHeader, election::Membership, node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, BlockPayload, ValidatedState, }, utils::{Terminator, View, ViewInner}, vote::{Certificate, HasViewNumber}, }; +use hotshot_types::{message::UpgradeLock, request_response::ProposalRequestPayload}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; /// Trigger a request to the network for a proposal for a view and wait for the response or timeout. #[instrument(skip_all)] +#[allow(clippy::too_many_arguments)] pub(crate) async fn fetch_proposal( view_number: TYPES::Time, event_sender: Sender>>, event_receiver: Receiver>>, quorum_membership: Arc, consensus: OuterConsensus, - sender_key: TYPES::SignatureKey, + sender_public_key: TYPES::SignatureKey, + sender_private_key: ::PrivateKey, upgrade_lock: &UpgradeLock, ) -> Result> { + // We need to be able to sign this request before submitting it to the network. Compute the + // payload first. + let signed_proposal_request = ProposalRequestPayload { + view_number, + key: sender_public_key, + }; + + // Finally, compute the signature for the payload. + let signature = TYPES::SignatureKey::sign( + &sender_private_key, + signed_proposal_request.commit().as_ref(), + )?; + // First, broadcast that we need a proposal to the current leader broadcast_event( - HotShotEvent::QuorumProposalRequestSend(view_number, sender_key).into(), + HotShotEvent::QuorumProposalRequestSend(signed_proposal_request, signature).into(), &event_sender, ) .await; + let mem = Arc::clone(&quorum_membership); // Make a background task to await the arrival of the event data. let Ok(Some(proposal)) = // We want to explicitly timeout here so we aren't waiting around for the data. async_timeout(REQUEST_TIMEOUT, async move { - // First, capture the output from the event dependency - let event = EventDependency::new( - event_receiver.clone(), - Box::new(move |event| { - let event = event.as_ref(); - if let HotShotEvent::QuorumProposalResponseRecv( - qpr_view_number, - _quorum_proposal, - ) = event + // We want to iterate until the proposal is not None, or until we reach the timeout. + let mut proposal = None; + while proposal.is_none() { + // First, capture the output from the event dependency + let event = EventDependency::new( + event_receiver.clone(), + Box::new(move |event| { + let event = event.as_ref(); + if let HotShotEvent::QuorumProposalResponseRecv( + quorum_proposal, + ) = event + { + quorum_proposal.data.view_number() == view_number + } else { + false + } + }), + ) + .completed() + .await; + + // Then, if it's `Some`, make sure that the data is correct + if let Some(hs_event) = event.as_ref() { + if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = + hs_event.as_ref() { - *qpr_view_number == view_number - } else { - false - } - }), - ) - .completed() - .await; + // Make sure that the quorum_proposal is valid + if quorum_proposal.validate_signature(&mem).is_ok() { + proposal = Some(quorum_proposal.clone()); + } - // Then, if it's `Some`, make sure that the data is correct - if let Some(hs_event) = event.as_ref() { - if let HotShotEvent::QuorumProposalResponseRecv(_view_number, quorum_proposal) = - hs_event.as_ref() - { - return Some(quorum_proposal.clone()); + } } } - None + proposal }) .await else { @@ -323,12 +347,14 @@ pub async fn decide_from_proposal( /// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. #[instrument(skip_all)] +#[allow(clippy::too_many_arguments)] pub(crate) async fn parent_leaf_and_state( next_proposal_view_number: TYPES::Time, event_sender: &Sender>>, event_receiver: &Receiver>>, quorum_membership: Arc, public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, consensus: OuterConsensus, upgrade_lock: &UpgradeLock, ) -> Result<(Leaf, Arc<::ValidatedState>)> { @@ -350,6 +376,7 @@ pub(crate) async fn parent_leaf_and_state( quorum_membership, consensus.clone(), public_key.clone(), + private_key.clone(), upgrade_lock, ) .await diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 52b94e2cf5..e658cca42b 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -115,14 +115,11 @@ impl NetworkMessageTaskState { GeneralConsensusMessage::Proposal(proposal) => { HotShotEvent::QuorumProposalRecv(proposal, sender) } - GeneralConsensusMessage::ProposalRequested(view, sender) => { - HotShotEvent::QuorumProposalRequestRecv(view, sender) + GeneralConsensusMessage::ProposalRequested(req, sig) => { + HotShotEvent::QuorumProposalRequestRecv(req, sig) } GeneralConsensusMessage::LeaderProposalAvailable(proposal) => { - HotShotEvent::QuorumProposalResponseRecv( - proposal.data.view_number, - proposal, - ) + HotShotEvent::QuorumProposalResponseRecv(proposal) } GeneralConsensusMessage::Vote(vote) => { HotShotEvent::QuorumVoteRecv(vote.clone()) @@ -313,14 +310,14 @@ impl< TransmitType::Direct(membership.leader(vote.view_number() + 1)), ) } - HotShotEvent::QuorumProposalRequestSend(view_number, sender_key) => ( - sender_key.clone(), + HotShotEvent::QuorumProposalRequestSend(req, signature) => ( + req.key.clone(), MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ProposalRequested(view_number, sender_key), + GeneralConsensusMessage::ProposalRequested(req.clone(), signature), )), - TransmitType::Direct(membership.leader(view_number)), + TransmitType::Direct(membership.leader(req.view_number)), ), - HotShotEvent::QuorumProposalResponseSend(_view_number, sender_key, proposal) => ( + HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => ( sender_key.clone(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::LeaderProposalAvailable(proposal), diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index a3f9ec5e1b..f1fc665343 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -124,6 +124,7 @@ impl ProposalDependencyHandle { &self.receiver, Arc::clone(&self.quorum_membership), self.public_key.clone(), + self.private_key.clone(), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), &self.upgrade_lock, ) @@ -262,7 +263,8 @@ impl HandleDepOutput for ProposalDependencyHandle< let membership = Arc::clone(&self.quorum_membership); let event_sender = self.sender.clone(); let event_receiver = self.receiver.clone(); - let sender_key = self.public_key.clone(); + let sender_public_key = self.public_key.clone(); + let sender_private_key = self.private_key.clone(); let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); let upgrade_lock = self.upgrade_lock.clone(); async_spawn(async move { @@ -272,7 +274,8 @@ impl HandleDepOutput for ProposalDependencyHandle< event_receiver, membership, consensus, - sender_key, + sender_public_key, + sender_private_key, &upgrade_lock, ) .await diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index dcc4eb405c..c5ae9cdca9 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -193,6 +193,7 @@ pub(crate) async fn handle_quorum_proposal_recv< // This is because the key that we receive is for the prior leader, so the payload would be routed // incorrectly. task_state.public_key.clone(), + task_state.private_key.clone(), &task_state.upgrade_lock, ) .await diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 18545172ec..6510c930a3 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -116,6 +116,7 @@ impl + 'static, V: Versions> Arc::clone(&self.quorum_membership), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), self.public_key.clone(), + self.private_key.clone(), &self.upgrade_lock, ) .await diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index d587a927a7..c917c47256 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -13,12 +13,13 @@ use std::{ time::Duration, }; -use anyhow::Result; +use anyhow::{ensure, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use async_trait::async_trait; +use committable::Committable; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, @@ -108,14 +109,23 @@ impl> TaskState for NetworkRequest } Ok(()) } - HotShotEvent::QuorumProposalRequestRecv(view_number, sender_key) => { - if let Some(quorum_proposal) = - self.state.read().await.last_proposals().get(view_number) + HotShotEvent::QuorumProposalRequestRecv(req, signature) => { + // Make sure that this request came from who we think it did + ensure!( + req.key.validate(signature, req.commit().as_ref()), + "Invalid signature key on proposal request." + ); + + if let Some(quorum_proposal) = self + .state + .read() + .await + .last_proposals() + .get(&req.view_number) { broadcast_event( HotShotEvent::QuorumProposalResponseSend( - *view_number, - sender_key.clone(), + req.key.clone(), quorum_proposal.clone(), ) .into(), diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 007d16185d..e6fad450ac 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -8,6 +8,7 @@ #![allow(unused_imports)] #![cfg(feature = "dependency-tasks")] +use committable::Committable; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ @@ -25,6 +26,10 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; +use hotshot_types::request_response::ProposalRequestPayload; +use hotshot_types::traits::consensus_api::ConsensusApi; +use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::traits::signature_key::SignatureKey; use hotshot_types::{ data::ViewNumber, traits::{node_implementation::ConsensusTime, ValidatedState}, @@ -198,6 +203,17 @@ async fn test_quorum_proposal_recv_task_liveness_check() { leaders[2] )]]; + // make the request payload + let req = ProposalRequestPayload { + view_number: ViewNumber::new(2), + key: handle.public_key(), + }; + + // make the signed commitment + let signature = + ::SignatureKey::sign(handle.private_key(), req.commit().as_ref()) + .unwrap(); + let expectations = vec![Expectations::from_outputs(all_predicates![ exact(QuorumProposalPreliminarilyValidated(proposals[2].clone())), exact(ViewChange(ViewNumber::new(3))), @@ -210,10 +226,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { ), ), )), - exact(QuorumProposalRequestSend( - ViewNumber::new(2), - handle.public_key() - )), + exact(QuorumProposalRequestSend(req, signature)), exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), ])]; diff --git a/types/src/message.rs b/types/src/message.rs index e328e4d970..eed73dcdb9 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -22,6 +22,7 @@ use vbs::{ BinarySerializer, Serializer, }; +use crate::request_response::ProposalRequestPayload; use crate::{ data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, simple_certificate::{ @@ -163,7 +164,10 @@ pub enum GeneralConsensusMessage { Proposal(Proposal>), /// A peer node needs a proposal from the leader. - ProposalRequested(TYPES::Time, TYPES::SignatureKey), + ProposalRequested( + ProposalRequestPayload, + ::PureAssembledSignatureType, + ), /// The leader has responded with a valid proposal. LeaderProposalAvailable(Proposal>), @@ -241,7 +245,7 @@ impl SequencingMessage { // this should match replica upon receipt p.data.view_number() } - GeneralConsensusMessage::ProposalRequested(view_number, _) => *view_number, + GeneralConsensusMessage::ProposalRequested(req, _) => req.view_number, GeneralConsensusMessage::LeaderProposalAvailable(proposal) => { proposal.data.view_number() } diff --git a/types/src/request_response.rs b/types/src/request_response.rs index 3397445935..cb679c1446 100644 --- a/types/src/request_response.rs +++ b/types/src/request_response.rs @@ -8,11 +8,13 @@ //! of the shared types for all of the network backends. use async_lock::Mutex; +use committable::{Committable, RawCommitmentBuilder}; use futures::channel::{mpsc::Receiver, oneshot}; use libp2p::request_response::ResponseChannel; use serde::{Deserialize, Serialize}; -use crate::traits::network::NetworkMsg; +use crate::traits::signature_key::SignatureKey; +use crate::traits::{network::NetworkMsg, node_implementation::NodeType}; /// Request for Consenus data #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -40,3 +42,23 @@ pub type RequestReceiver = Receiver<(Vec, NetworkMsgResponseChannel> /// Locked Option of a receiver for moving the value out of the option. This /// type takes any `Response` type depending on the underlying network impl. pub type TakeReceiver = Mutex, ResponseChannel)>>>; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +/// A signed request for a proposal. +pub struct ProposalRequestPayload { + /// The view number that we're requesting a proposal for. + pub view_number: TYPES::Time, + + /// Our public key. The ensures that the receipient can reply to + /// us directly. + pub key: TYPES::SignatureKey, +} + +impl Committable for ProposalRequestPayload { + fn commit(&self) -> committable::Commitment { + RawCommitmentBuilder::new("signed proposal request commitment") + .u64_field("view number", *self.view_number) + .var_size_bytes(&self.key.to_bytes()) + .finalize() + } +} From 270f1529e138c6bcff99ed386878f714b61c12e1 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 26 Aug 2024 15:45:14 -0400 Subject: [PATCH 1192/1393] [WEEKLY RELEASE] HotShot - rc-0.5.72 (#3601) --- hotshot/src/tasks/mod.rs | 2 +- task-impls/src/events.rs | 9 ++++++--- task-impls/src/helpers.rs | 10 +++++----- testing/tests/tests_1/network_task.rs | 6 +++--- .../tests_1/quorum_proposal_recv_task.rs | 12 ++++++----- testing/tests/tests_1/test_with_failures_2.rs | 6 +++--- types/src/message.rs | 20 +++++++++---------- types/src/request_response.rs | 5 +++-- types/src/simple_certificate.rs | 2 +- 9 files changed, 39 insertions(+), 33 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 47e99c15db..359687d505 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -8,7 +8,6 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; -use hotshot_task::task::{NetworkHandle, Task}; use std::{collections::HashSet, sync::Arc, time::Duration}; use async_broadcast::broadcast; @@ -19,6 +18,7 @@ use futures::{ future::{BoxFuture, FutureExt}, stream, StreamExt, }; +use hotshot_task::task::{NetworkHandle, Task}; #[cfg(feature = "rewind")] use hotshot_task_impls::rewind::RewindTaskState; use hotshot_task_impls::{ diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index ea2834eb9a..9d2fe76d13 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -6,11 +6,9 @@ use std::fmt::Display; -use crate::view_sync::ViewSyncPhase; use async_broadcast::Sender; use either::Either; use hotshot_task::task::TaskEvent; -use hotshot_types::traits::signature_key::SignatureKey; use hotshot_types::{ data::{ DaProposal, Leaf, PackedBundle, QuorumProposal, UpgradeProposal, VidDisperse, @@ -26,13 +24,18 @@ use hotshot_types::{ DaVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, - traits::{block_contents::BuilderFee, node_implementation::NodeType, BlockPayload}, + traits::{ + block_contents::BuilderFee, node_implementation::NodeType, signature_key::SignatureKey, + BlockPayload, + }, utils::{BuilderCommitment, View}, vid::VidCommitment, vote::HasViewNumber, }; use vec1::Vec1; +use crate::view_sync::ViewSyncPhase; + impl TaskEvent for HotShotEvent { fn shutdown_event() -> Self { HotShotEvent::Shutdown diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 2eb80a234f..0eb0160790 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -10,7 +10,6 @@ use std::{ sync::Arc, }; -use crate::{events::HotShotEvent, request::REQUEST_TIMEOUT}; use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, SendError, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; @@ -20,28 +19,29 @@ use async_std::task::JoinHandle; use chrono::Utc; use committable::{Commitment, Committable}; use hotshot_task::dependency::{Dependency, EventDependency}; -use hotshot_types::traits::node_implementation::Versions; use hotshot_types::{ consensus::{ConsensusUpgradableReadLockGuard, OuterConsensus}, data::{Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, - message::Proposal, + message::{Proposal, UpgradeLock}, + request_response::ProposalRequestPayload, simple_certificate::{QuorumCertificate, UpgradeCertificate}, traits::{ block_contents::BlockHeader, election::Membership, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, BlockPayload, ValidatedState, }, utils::{Terminator, View, ViewInner}, vote::{Certificate, HasViewNumber}, }; -use hotshot_types::{message::UpgradeLock, request_response::ProposalRequestPayload}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; +use crate::{events::HotShotEvent, request::REQUEST_TIMEOUT}; + /// Trigger a request to the network for a proposal for a view and wait for the response or timeout. #[instrument(skip_all)] #[allow(clippy::too_many_arguments)] diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 1631d0be19..ad61da040e 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -4,13 +4,14 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{sync::Arc, time::Duration}; + use async_broadcast::Sender; use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; use hotshot::traits::implementations::MemoryNetwork; use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; -use hotshot_task::task::TaskState; -use hotshot_task::task::{ConsensusTaskRegistry, Task}; +use hotshot_task::task::{ConsensusTaskRegistry, Task, TaskState}; use hotshot_task_impls::{ events::HotShotEvent, network::{self, NetworkEventTaskState}, @@ -27,7 +28,6 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, }, }; -use std::{sync::Arc, time::Duration}; // Test that the event task sends a message, and the message task receives it // and emits the proper event diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index e6fad450ac..f7f905b54a 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -26,13 +26,15 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; -use hotshot_types::request_response::ProposalRequestPayload; -use hotshot_types::traits::consensus_api::ConsensusApi; -use hotshot_types::traits::node_implementation::NodeType; -use hotshot_types::traits::signature_key::SignatureKey; use hotshot_types::{ data::ViewNumber, - traits::{node_implementation::ConsensusTime, ValidatedState}, + request_response::ProposalRequestPayload, + traits::{ + consensus_api::ConsensusApi, + node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, + ValidatedState, + }, }; #[cfg(test)] diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index eedb2e55c2..7323e1c3fe 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -8,10 +8,11 @@ #![allow(unused_imports)] use std::{ collections::{HashMap, HashSet}, + rc::Rc, time::Duration, }; -use hotshot::tasks::DishonestDa; +use hotshot::tasks::{DishonestDa, DishonestLeader}; use hotshot_example_types::{ node_types::{ Libp2pImpl, MarketplaceTestVersions, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, @@ -24,11 +25,10 @@ use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::TestDescription, + test_builder::{Behaviour, TestDescription}, view_sync_task::ViewSyncTaskDescription, }; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; -use {hotshot::tasks::DishonestLeader, hotshot_testing::test_builder::Behaviour, std::rc::Rc}; // Test that a good leader can succeed in the view directly after view sync cross_tests!( TestName: test_with_failures_2, diff --git a/types/src/message.rs b/types/src/message.rs index eed73dcdb9..18a047d17c 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -22,9 +22,9 @@ use vbs::{ BinarySerializer, Serializer, }; -use crate::request_response::ProposalRequestPayload; use crate::{ data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, + request_response::ProposalRequestPayload, simple_certificate::{ DaCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, @@ -163,15 +163,6 @@ pub enum GeneralConsensusMessage { /// Message with a quorum proposal. Proposal(Proposal>), - /// A peer node needs a proposal from the leader. - ProposalRequested( - ProposalRequestPayload, - ::PureAssembledSignatureType, - ), - - /// The leader has responded with a valid proposal. - LeaderProposalAvailable(Proposal>), - /// Message with a quorum vote. Vote(QuorumVote), @@ -201,6 +192,15 @@ pub enum GeneralConsensusMessage { /// Message with an upgrade vote UpgradeVote(UpgradeVote), + + /// A peer node needs a proposal from the leader. + ProposalRequested( + ProposalRequestPayload, + ::PureAssembledSignatureType, + ), + + /// The leader has responded with a valid proposal. + LeaderProposalAvailable(Proposal>), } #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] diff --git a/types/src/request_response.rs b/types/src/request_response.rs index cb679c1446..8b6aab7785 100644 --- a/types/src/request_response.rs +++ b/types/src/request_response.rs @@ -13,8 +13,9 @@ use futures::channel::{mpsc::Receiver, oneshot}; use libp2p::request_response::ResponseChannel; use serde::{Deserialize, Serialize}; -use crate::traits::signature_key::SignatureKey; -use crate::traits::{network::NetworkMsg, node_implementation::NodeType}; +use crate::traits::{ + network::NetworkMsg, node_implementation::NodeType, signature_key::SignatureKey, +}; /// Request for Consenus data #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index c3b2b93e1e..3208d69203 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -19,9 +19,9 @@ use committable::{Commitment, Committable}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; -use crate::message::UpgradeLock; use crate::{ data::serialize_signature2, + message::UpgradeLock, simple_vote::{ DaData, QuorumData, TimeoutData, UpgradeProposalData, VersionedVoteData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, From b0959a9bd51a8855d07f9ba5e2b56ff1163345f5 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 29 Aug 2024 12:03:39 -0400 Subject: [PATCH 1193/1393] Allow Proposal requesting from any DA node (#3619) * add support for DA broadcast * working test * tmp * add support for fused message type, verify failure condition and fix it * rename --- task-impls/src/consensus/handlers.rs | 8 +- task-impls/src/consensus/mod.rs | 2 +- task-impls/src/helpers.rs | 114 ++++++++++-------- task-impls/src/network.rs | 23 +++- task-impls/src/quorum_proposal/handlers.rs | 4 - .../src/quorum_proposal_recv/handlers.rs | 3 +- task-impls/src/request.rs | 1 + testing/tests/tests_2/catchup.rs | 74 ++++++++++++ types/src/traits/network.rs | 2 + 9 files changed, 168 insertions(+), 63 deletions(-) diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 1dd8d0d47c..0668b87360 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -137,11 +137,6 @@ pub async fn create_and_send_proposal( proposed_leaf.view_number(), ); - consensus - .write() - .await - .update_last_proposed_view(message.clone())?; - async_sleep(Duration::from_millis(round_start_delay)).await; broadcast_event( @@ -523,7 +518,7 @@ pub(crate) async fn handle_quorum_proposal_recv< .entry(proposal.data.view_number()) .or_default() .push(async_spawn( - validate_proposal_safety_and_liveness( + validate_proposal_safety_and_liveness::( proposal.clone(), parent_leaf, OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), @@ -534,6 +529,7 @@ pub(crate) async fn handle_quorum_proposal_recv< task_state.output_event_stream.clone(), task_state.id, task_state.upgrade_lock.clone(), + Arc::clone(&task_state.storage), ) .map(AnyhowTracing::err_as_debug), )); diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index a2af8fd41e..6c3b6fac21 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -420,7 +420,7 @@ impl, V: Versions> ConsensusTaskSt } if let Err(e) = self.consensus.write().await.update_high_qc(qc.clone()) { - tracing::error!("{e:?}"); + tracing::trace!("{e:?}"); } debug!( "Attempting to publish proposal after forming a QC for view {}", diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 0eb0160790..9fb4da777c 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -29,8 +29,9 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, election::Membership, - node_implementation::{ConsensusTime, NodeType, Versions}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, + storage::Storage, BlockPayload, ValidatedState, }, utils::{Terminator, View, ViewInner}, @@ -438,7 +439,11 @@ pub(crate) async fn parent_leaf_and_state( #[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_lines)] #[instrument(skip_all, fields(id = id, view = *proposal.data.view_number()))] -pub async fn validate_proposal_safety_and_liveness( +pub async fn validate_proposal_safety_and_liveness< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( proposal: Proposal>, parent_leaf: Leaf, consensus: OuterConsensus, @@ -449,6 +454,7 @@ pub async fn validate_proposal_safety_and_liveness event_sender: Sender>, id: u64, upgrade_lock: UpgradeLock, + storage: Arc>, ) -> Result<()> { let view_number = proposal.data.view_number(); @@ -469,24 +475,33 @@ pub async fn validate_proposal_safety_and_liveness }, }; - if let Err(e) = consensus - .write() - .await - .update_validated_state_map(view_number, view.clone()) { - tracing::trace!("{e:?}"); - } - consensus - .write() - .await - .update_saved_leaves(proposed_leaf.clone()); + let mut consensus_write = consensus.write().await; + if let Err(e) = consensus_write.update_validated_state_map(view_number, view.clone()) { + tracing::trace!("{e:?}"); + } + consensus_write.update_saved_leaves(proposed_leaf.clone()); - // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. - broadcast_event( - Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), - &event_stream, - ) - .await; + // Update our internal storage of the proposal. The proposal is valid, so + // we swallow this error and just log if it occurs. + if let Err(e) = consensus_write.update_last_proposed_view(proposal.clone()) { + tracing::debug!("Internal proposal update failed; error = {e:#}"); + }; + + // Update our persistent storage of the proposal. We also itentionally swallow + // this error as it should not affect consensus and would, instead, imply an + // issue on the sequencer side. + if let Err(e) = storage.write().await.append_proposal(&proposal).await { + tracing::debug!("Persisting the proposal update failed; error = {e:#}"); + }; + + // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. + broadcast_event( + Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), + &event_stream, + ) + .await; + } UpgradeCertificate::validate( &proposal.data.upgrade_certificate, @@ -505,37 +520,39 @@ pub async fn validate_proposal_safety_and_liveness // passes. // Liveness check. - let read_consensus = consensus.read().await; - let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); - - // Safety check. - // Check if proposal extends from the locked leaf. - let outcome = read_consensus.visit_leaf_ancestors( - justify_qc.view_number(), - Terminator::Inclusive(read_consensus.locked_view()), - false, - |leaf, _, _| { - // if leaf view no == locked view no then we're done, report success by - // returning true - leaf.view_number() != read_consensus.locked_view() - }, - ); - let safety_check = outcome.is_ok(); - - ensure!(safety_check || liveness_check, { - if let Err(e) = outcome { - broadcast_event( - Event { - view_number, - event: EventType::Error { error: Arc::new(e) }, - }, - &event_sender, - ) - .await; - } + { + let read_consensus = consensus.read().await; + let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); + + // Safety check. + // Check if proposal extends from the locked leaf. + let outcome = read_consensus.visit_leaf_ancestors( + justify_qc.view_number(), + Terminator::Inclusive(read_consensus.locked_view()), + false, + |leaf, _, _| { + // if leaf view no == locked view no then we're done, report success by + // returning true + leaf.view_number() != read_consensus.locked_view() + }, + ); + let safety_check = outcome.is_ok(); + + ensure!(safety_check || liveness_check, { + if let Err(e) = outcome { + broadcast_event( + Event { + view_number, + event: EventType::Error { error: Arc::new(e) }, + }, + &event_sender, + ) + .await; + } - format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) - }); + format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) + }); + } // We accept the proposal, notify the application layer @@ -550,6 +567,7 @@ pub async fn validate_proposal_safety_and_liveness &event_sender, ) .await; + // Notify other tasks broadcast_event( Arc::new(HotShotEvent::QuorumProposalValidated( diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index e658cca42b..319c23b12a 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -39,8 +39,6 @@ pub fn quorum_filter(event: &Arc>) -> bool !matches!( event.as_ref(), HotShotEvent::QuorumProposalSend(_, _) - | HotShotEvent::QuorumProposalRequestSend(..) - | HotShotEvent::QuorumProposalResponseSend(..) | HotShotEvent::QuorumVoteSend(_) | HotShotEvent::DacSend(_, _) | HotShotEvent::TimeoutVoteSend(_) @@ -63,6 +61,8 @@ pub fn da_filter(event: &Arc>) -> bool { !matches!( event.as_ref(), HotShotEvent::DaProposalSend(_, _) + | HotShotEvent::QuorumProposalRequestSend(..) + | HotShotEvent::QuorumProposalResponseSend(..) | HotShotEvent::DaVoteSend(_) | HotShotEvent::ViewChange(_) ) @@ -315,7 +315,7 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ProposalRequested(req.clone(), signature), )), - TransmitType::Direct(membership.leader(req.view_number)), + TransmitType::DaCommitteeAndLeaderBroadcast(membership.leader(req.view_number)), ), HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => ( sender_key.clone(), @@ -464,6 +464,7 @@ impl< { return; } + if let MessageKind::Consensus(SequencingMessage::General( GeneralConsensusMessage::Proposal(prop), )) = &message.kind @@ -493,6 +494,22 @@ impl< net.da_broadcast_message(serialized_message, committee, broadcast_delay) .await } + TransmitType::DaCommitteeAndLeaderBroadcast(recipient) => { + // Short-circuit exit from this call if we get an error during the direct leader broadcast. + // NOTE: An improvement to this is to check if the leader is in the DA committee but it's + // just a single extra message to the leader, so it's not an optimization that we need now. + if let Err(e) = net + .direct_message(serialized_message.clone(), recipient) + .await + { + error!("Failed to send message from network task: {e:?}"); + return; + } + + // Otherwise, send the next message. + net.da_broadcast_message(serialized_message, committee, broadcast_delay) + .await + } }; match transmit_result { diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index f1fc665343..5a73bec2d1 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -229,10 +229,6 @@ impl ProposalDependencyHandle { proposed_leaf.view_number(), ); - self.consensus - .write() - .await - .update_last_proposed_view(message.clone())?; async_sleep(Duration::from_millis(self.round_start_delay)).await; broadcast_event( Arc::new(HotShotEvent::QuorumProposalSend( diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index c5ae9cdca9..1658910986 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -246,7 +246,7 @@ pub(crate) async fn handle_quorum_proposal_recv< }; // Validate the proposal - validate_proposal_safety_and_liveness( + validate_proposal_safety_and_liveness::( proposal.clone(), parent_leaf, OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), @@ -257,6 +257,7 @@ pub(crate) async fn handle_quorum_proposal_recv< task_state.output_event_stream.clone(), task_state.id, task_state.upgrade_lock.clone(), + Arc::clone(&task_state.storage), ) .await?; diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index c917c47256..faaa217ba9 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -88,6 +88,7 @@ type Signature = impl> TaskState for NetworkRequestState { type Event = HotShotEvent; + #[instrument(skip_all, target = "NetworkRequestState", fields(id = self.id))] async fn handle_event( &mut self, event: Arc, diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 9b1fcb4376..e0b79f7302 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -421,3 +421,77 @@ async fn test_all_restart_cdn() { .run_test::() .await; } + +/// This test case ensures that proposals persist off of a restart. We demonstrate this by +/// artificially removing node 0 (the only DA committee member) from the candidate pool, +/// meaning that the entire DA also does not have the proposal, but we're still able to +/// move on because the *leader* does have the proposal. +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_all_restart_one_da() { + use std::time::Duration; + + use hotshot_example_types::node_types::{CombinedImpl, TestTypes, TestVersions}; + use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::{TestDescription, TimingData}, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let timing_data = TimingData { + next_view_timeout: 2000, + ..Default::default() + }; + let mut metadata: TestDescription = + TestDescription::default(); + + let node_0_down = vec![ChangeNode { + idx: 0, + updown: UpDown::Restart, + }]; + let mut catchup_nodes = vec![]; + for i in 1..20 { + catchup_nodes.push(ChangeNode { + idx: i, + updown: UpDown::Restart, + }) + } + + metadata.timing_data = timing_data; + metadata.start_nodes = 20; + metadata.num_nodes_with_stake = 20; + + // Explicitly make the DA tiny to exaggerate a missing proposal. + metadata.da_staked_committee_size = 1; + + metadata.spinning_properties = SpinningTaskDescription { + // Restart all the nodes in view 13 + node_changes: vec![(12, node_0_down), (13, catchup_nodes)], + }; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep committing rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 15, + ..Default::default() + }; + + metadata + .gen_launcher(0) + .launch() + .run_test::() + .await; +} diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 33fc55df30..d8d89e7c20 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -84,6 +84,8 @@ pub enum TransmitType { Broadcast, /// broadcast to DA committee DaCommitteeBroadcast, + /// broadcast to the leader and the DA + DaCommitteeAndLeaderBroadcast(TYPES::SignatureKey), } /// Error type for networking From 17c4351ceae2c975bd1933fdd3020f090b7e09c8 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 29 Aug 2024 15:07:57 -0400 Subject: [PATCH 1194/1393] [AUDIT][MEDIUM][O-1] Orchestrator slots may be trivially exhausted (#3621) * first pass * lints * try to pull stake table key (#3622) * misc cleanup * allow disable verification * finish all validation and bug fixes * remove old log * vec->hashset * misc feedback fixes * remove check --------- Co-authored-by: rob-maron <132852777+rob-maron@users.noreply.github.com> --- examples/infra/mod.rs | 7 +++- orchestrator/run-config.toml | 24 ++++++++---- orchestrator/src/client.rs | 5 ++- orchestrator/src/config.rs | 15 +++++++ orchestrator/src/lib.rs | 65 +++++++++++++++++++++++++++---- types/src/stake_table.rs | 7 +++- types/src/traits/signature_key.rs | 6 ++- 7 files changed, 107 insertions(+), 22 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 8fe91fa855..f1d8d83474 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -886,6 +886,11 @@ pub async fn main_entry_point< derive_libp2p_peer_id::(&my_own_validator_config.private_key) .expect("failed to derive Libp2p keypair"); + // We need this to be able to register our node + let peer_config = + PeerConfig::::to_bytes(&my_own_validator_config.public_config()) + .clone(); + // conditionally save/load config from file or orchestrator // This is a function that will return correct complete config from orchestrator. // It takes in a valid args.network_config_file when loading from file, or valid validator_config when loading from orchestrator, the invalid one will be ignored. @@ -956,7 +961,7 @@ pub async fn main_entry_point< if let NetworkConfigSource::Orchestrator = source { info!("Waiting for the start command from orchestrator"); orchestrator_client - .wait_for_all_nodes_ready(run_config.clone().node_index) + .wait_for_all_nodes_ready(peer_config) .await; } diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 7fe8bb7e2a..e5eecaa9c5 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -39,22 +39,30 @@ seed = [ ] start_delay_seconds = 0 cdn_marshal_address = "127.0.0.1:9000" +public_keys = [ + "BLS_VER_KEY~p-JKk1VvO1RoMrDrqyjz0P1VGwtOaEjF5jLjpOZbJi5O747fvYEOg0OvCl_CLe4shh7vsqeG9uMF9RssM12sLSuaiVJkCClxEI5mRLV4qff1UjZAZJIBgeL1_hRhRUkpqC0Trm1qtvXtZ8FwOCIzYXv8c300Au824k7FxjjcWLBL", + "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", + "BLS_VER_KEY~4zQnaCOFJ7m95OjxeNls0QOOwWbz4rfxaL3NwmN2zSdnf8t5Nw_dfmMHq05ee8jCegw6Bn5T8inmrnGGAsQJMMWLv77nd7FJziz2ViAbXg-XGGF7o4HyzELCmypDOIYF3X2UWferFE_n72ZX0iQkUhOvYZZ7cfXToXxRTtb_mwRR", + "BLS_VER_KEY~rO2PIjyY30HGfapFcloFe3mNDKMIFi6JlOLkH5ZWBSYoRm5fE2-Rm6Lp3EvmAcB5r7KFJ0c1Uor308x78r04EY_sfjcsDCWt7RSJdL4cJoD_4fSTCv_bisO8k98hs_8BtqQt8BHlPeJohpUXvcfnK8suXJETiJ6Er97pfxRbzgAL", + "BLS_VER_KEY~r6b-Cwzp-b3czlt0MHmYPJIow5kMsXbrNmZsLSYg9RV49oCCO4WEeCRFR02x9bqLCa_sgNFMrIeNdEa11qNiBAohApYFIvrSa-zP5QGj3xbZaMOCrshxYit6E2TR-XsWvv6gjOrypmugjyTAth-iqQzTboSfmO9DD1-gjJIdCaD7", + "BLS_VER_KEY~IBRoz_Q1EXvcm1pNZcmVlyYZU8hZ7qmy337ePAjEMhz8Hl2q8vWPFOd3BaLwgRS1UzAPW3z4E-XIgRDGcRBTAMZX9b_0lKYjlyTlNF2EZfNnKmvv-xJ0yurkfjiveeYEsD2l5d8q_rJJbH1iZdXy-yPEbwI0SIvQfwdlcaKw9po4", + "BLS_VER_KEY~kEUEUJFBtCXl68fM_2roQw856wQlu1ZoDmPn8uu4bQgeZwyb5oz5_kMl-oAJ_OtbYV1serjWE--eXB_qYIpQLZka42-cML6WjCQjNl1hGSejtoBDkExNeUNcweFQBbEsaDiIy3-sgHTrfYpFd1icKeAVihLRn5_RtSU_RUu1TQqR", + "BLS_VER_KEY~PAAQNgOYfj3GiVX7LxSlkXfOCDSnNKZDqPVYQ_jBMxKzOCn0PXbqQ62kKPenWOmCxiCE7X158s-VenBna6MjHJgf61eBAO-3-OyTP5NWVx49RTgHhQf2iMTKk2iqK2gjnjZimBU135YU4lQFtrG-ZgRezwqkC5vy8V-q46fschIG", + "BLS_VER_KEY~96hAcdFZxQT8CEHcyV8j2ILJRsXagquENPkc9AwLSx3u6AE_uMupIKGbNJRiM99oFneK2vI5g1u61HidWeuTLRPM2537xAXeaO8e-wJYx4FaPKw_xTcLPrIm0OZT7SsLAMwFuqfMbDdKM71-RyrLwhff5517xXBKEk5Tg9iT9Qrr", + "BLS_VER_KEY~-pVi7j6TEBeG7ABata4uWWDRM2SrY8wWotWsGnTpIhnOVYJI_lNWyig6VJUuFmBsMS8rLMU7nDxDm8SbObxyA-SLFcr_jCkZqsbx8GcVQrnBAfjNRWuPZP0xcTDMu2IkQqtc3L0OpzbMEgGRGE8Wj09pNqouzl-xhPoYjTmD06Bw", + "BLS_VER_KEY~IUPSdnsNUHgNx_74ZhBPrICcDZ9Bp_DAt-6kFz8vSwJES2Vy1Ws8NJ1mxb9XGE1u13sw0FRe8kn5Ib3p2stbEtR_1Qgbuif6aoLrGaSUzy0MvwrO58u9kHZk3rXIuSAN7n4ok3-KKk2CmnBfx7fchFoqT56FXCd1EJ7XRrYj8wTh", +] +enable_registration_verification = true [config] num_nodes_with_stake = 10 num_nodes_without_stake = 0 -start_threshold = [ - 8, - 10, -] +start_threshold = [8, 10] staked_da_nodes = 10 non_staked_da_nodes = 0 fixed_leader_for_gpuvid = 1 next_view_timeout = 30000 -timeout_ratio = [ - 11, - 10, -] +timeout_ratio = [11, 10] round_start_delay = 1 start_delay = 1 num_bootstrap = 5 diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 0843844da6..b4a2e6f898 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -442,12 +442,13 @@ impl OrchestratorClient { /// # Panics /// Panics if unable to post. #[instrument(skip(self), name = "orchestrator ready signal")] - pub async fn wait_for_all_nodes_ready(&self, node_index: u64) -> bool { + pub async fn wait_for_all_nodes_ready(&self, peer_config: Vec) -> bool { let send_ready_f = |client: Client| { + let pk = peer_config.clone(); async move { let result: Result<_, ClientError> = client .post("api/ready") - .body_json(&node_index) + .body_binary(&pk) .unwrap() .send() .await diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 3e8c1747d8..96e26e6b08 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -5,6 +5,7 @@ // along with the HotShot repository. If not, see . use std::{ + collections::HashSet, env, fs, net::SocketAddr, num::NonZeroUsize, @@ -208,6 +209,10 @@ pub struct NetworkConfig { pub builder: BuilderType, /// random builder config pub random_builder: Option, + /// The list of public keys that are allowed to connect to the orchestrator + pub public_keys: HashSet, + /// Whether or not to disable registration verification. + pub enable_registration_verification: bool, } /// the source of the network config @@ -439,6 +444,8 @@ impl Default for NetworkConfig { commit_sha: String::new(), builder: BuilderType::default(), random_builder: None, + public_keys: HashSet::new(), + enable_registration_verification: true, } } } @@ -491,6 +498,12 @@ pub struct NetworkConfigFile { /// random builder configuration #[serde(default)] pub random_builder: Option, + /// The list of public keys that are allowed to connect to the orchestrator + #[serde(default)] + pub public_keys: HashSet, + /// Whether or not to disable registration verification. + #[serde(default)] + pub enable_registration_verification: bool, } impl From> for NetworkConfig { @@ -536,6 +549,8 @@ impl From> for NetworkConfig { commit_sha: String::new(), builder: val.builder, random_builder: val.random_builder, + public_keys: val.public_keys, + enable_registration_verification: val.enable_registration_verification, } } } diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 72c48a4117..311d36c2cc 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -23,7 +23,10 @@ use client::{BenchResults, BenchResultsDownloadConfig}; use config::BuilderType; use csv::Writer; use futures::{stream::FuturesUnordered, FutureExt, StreamExt}; -use hotshot_types::{traits::signature_key::SignatureKey, PeerConfig}; +use hotshot_types::{ + traits::signature_key::{SignatureKey, StakeTableEntryType}, + PeerConfig, +}; use libp2p::{ identity::{ ed25519::{Keypair as EdKeypair, SecretKey}, @@ -225,10 +228,10 @@ pub trait OrchestratorApi { /// # Errors /// if unable to serve fn post_run_results(&mut self, metrics: BenchResults) -> Result<(), ServerError>; - /// post endpoint for whether or not all nodes are ready + /// A node POSTs its public key to let the orchestrator know that it is ready /// # Errors /// if unable to serve - fn post_ready(&mut self) -> Result<(), ServerError>; + fn post_ready(&mut self, peer_config: &PeerConfig) -> Result<(), ServerError>; /// post endpoint for manually starting the orchestrator /// # Errors /// if unable to serve @@ -328,7 +331,22 @@ where let node_index = self.pub_posted.len() as u64; + // Deserialize the public key let staked_pubkey = PeerConfig::::from_bytes(pubkey).unwrap(); + + // Check if the node is allowed to connect + if self.config.enable_registration_verification + && !self + .config + .public_keys + .contains(&staked_pubkey.stake_table_entry.public_key()) + { + return Err(ServerError { + status: tide_disco::StatusCode::FORBIDDEN, + message: "You are unauthorized to register with the orchestrator".to_string(), + }); + } + self.config .config .known_nodes_with_stake @@ -371,7 +389,7 @@ where } } - println!("Posted public key for node_index {node_index}"); + tracing::error!("Posted public key for node_index {node_index}"); // node_index starts at 0, so once it matches `num_nodes_with_stake` // we will have registered one node too many. hence, we want `node_index + 1`. @@ -416,11 +434,24 @@ where } // Assumes nodes do not post 'ready' twice - // TODO ED Add a map to verify which nodes have posted they're ready - fn post_ready(&mut self) -> Result<(), ServerError> { + fn post_ready(&mut self, peer_config: &PeerConfig) -> Result<(), ServerError> { + // If we have not disabled registration verification. + // Is this node allowed to connect? + if !self + .config + .config + .known_nodes_with_stake + .contains(peer_config) + { + return Err(ServerError { + status: tide_disco::StatusCode::FORBIDDEN, + message: "You are unauthorized to register with the orchestrator".to_string(), + }); + } + self.nodes_connected += 1; - println!("Nodes connected: {}", self.nodes_connected); + tracing::error!("Nodes connected: {}", self.nodes_connected); // i.e. nodes_connected >= num_nodes_with_stake * (start_threshold.0 / start_threshold.1) if self.nodes_connected * self.config.config.start_threshold.1 @@ -636,7 +667,21 @@ where })? .post( "post_ready", - |_req, state: &mut ::State| async move { state.post_ready() }.boxed(), + |req, state: &mut ::State| { + async move { + let mut body_bytes = req.body_bytes(); + body_bytes.drain(..12); + // Decode the payload-supplied pubkey + let Some(pubkey) = PeerConfig::::from_bytes(&body_bytes) else { + return Err(ServerError { + status: tide_disco::StatusCode::BAD_REQUEST, + message: "Malformed body".to_string(), + }); + }; + state.post_ready(&pubkey) + } + .boxed() + }, )? .post( "post_manual_start", @@ -726,6 +771,10 @@ where network_config.config.known_nodes_with_stake = vec![]; network_config.config.known_da_nodes = vec![]; + if network_config.enable_registration_verification { + tracing::error!("REGISTRATION VERIFICATION IS TURNED OFF"); + } + let web_api = define_api().map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api")); diff --git a/types/src/stake_table.rs b/types/src/stake_table.rs index 9d41931f0c..eddfd1caef 100644 --- a/types/src/stake_table.rs +++ b/types/src/stake_table.rs @@ -21,11 +21,16 @@ pub struct StakeTableEntry { pub stake_amount: U256, } -impl StakeTableEntryType for StakeTableEntry { +impl StakeTableEntryType for StakeTableEntry { /// Get the stake amount fn stake(&self) -> U256 { self.stake_amount } + + /// Get the public key + fn public_key(&self) -> K { + self.stake_key.clone() + } } impl StakeTableEntry { diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index d764b08163..ebee7c6b75 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -25,9 +25,11 @@ use super::EncodeBytes; use crate::{utils::BuilderCommitment, vid::VidSchemeType}; /// Type representing stake table entries in a `StakeTable` -pub trait StakeTableEntryType { +pub trait StakeTableEntryType { /// Get the stake value fn stake(&self) -> U256; + /// Get the public key + fn public_key(&self) -> K; } /// Trait for abstracting public key signatures @@ -60,7 +62,7 @@ pub trait SignatureKey: + for<'a> Deserialize<'a> + Hash; /// The type of the entry that contain both public key and stake value - type StakeTableEntry: StakeTableEntryType + type StakeTableEntry: StakeTableEntryType + Send + Sync + Sized From 670beecc077df2d5b14e975b721905693465d869 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 29 Aug 2024 17:41:25 -0400 Subject: [PATCH 1195/1393] Clean up Libp2p (#3618) * clean up Libp2p * remove bootstrap from tests * small updates * Not sure how this was working before * pick port * lint * Update crates/libp2p-networking/src/network/node.rs Co-authored-by: Artemii Gerasimovich * Update crates/libp2p-networking/src/network/node.rs Co-authored-by: Artemii Gerasimovich * review changes * move all configuration up to `from_config` * `bootstrap_addrs` -> `node_addrs` * fix examples * remove `libp2p` config from config file --------- Co-authored-by: Artemii Gerasimovich --- examples/Cargo.toml | 2 +- examples/infra/mod.rs | 2 + .../src/traits/networking/libp2p_network.rs | 101 ++++++------------ libp2p-networking/Cargo.toml | 1 + libp2p-networking/src/network/mod.rs | 44 +------- libp2p-networking/src/network/node.rs | 89 +++++---------- libp2p-networking/src/network/node/config.rs | 67 +++++++----- libp2p-networking/src/network/node/handle.rs | 2 +- libp2p-networking/tests/common/mod.rs | 91 +++------------- libp2p-networking/tests/counter.rs | 18 +--- orchestrator/run-config.toml | 12 --- orchestrator/src/config.rs | 74 +------------ orchestrator/staging-config.toml | 13 --- 13 files changed, 126 insertions(+), 390 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 6e0a36da7f..b004023a27 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -105,7 +105,7 @@ snafu = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } derive_more = { workspace = true } -portpicker = "0.1" +portpicker.workspace = true lru.workspace = true hotshot-task = { path = "../task" } hotshot = { path = "../hotshot" } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index f1d8d83474..ca6044b646 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -64,6 +64,7 @@ use hotshot_types::{ }, HotShotConfig, PeerConfig, ValidatorConfig, }; +use libp2p_networking::network::GossipConfig; use rand::{rngs::StdRng, SeedableRng}; use surf_disco::Url; use tracing::{debug, error, info, warn}; @@ -735,6 +736,7 @@ where // Create the Libp2p network let libp2p_network = Libp2pNetwork::from_config::( config.clone(), + GossipConfig::default(), bind_address, &public_key, &private_key, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index b334c27fe4..bbd6608a30 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -62,18 +62,19 @@ use libp2p_identity::{ ed25519::{self, SecretKey}, Keypair, PeerId, }; +pub use libp2p_networking::network::GossipConfig; use libp2p_networking::{ network::{ behaviours::dht::record::{Namespace, RecordKey, RecordValue}, spawn_network_node, transport::construct_auth_message, - MeshParams, NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeReceiver, - NetworkNodeType, DEFAULT_REPLICATION_FACTOR, + DEFAULT_REPLICATION_FACTOR, }, reexport::{Multiaddr, ResponseChannel}, }; + use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; use tracing::{debug, error, info, instrument, trace, warn}; @@ -268,53 +269,20 @@ impl TestableNetworkingImplementation ) .expect("Failed to sign DHT lookup record"); - // we want the majority of peers to have this lying around. - let replication_factor = NonZeroUsize::new(2 * expected_node_count / 3).unwrap(); - let config = if node_id < num_bootstrap as u64 { - NetworkNodeConfigBuilder::default() - // NOTICE the implicit assumption that bootstrap is less - // than half the network. This seems reasonable. - .mesh_params(Some(MeshParams { - mesh_n_high: expected_node_count, - mesh_n_low: 5, - mesh_outbound_min: 3, - // the worst case of 7/2+3 > 5 - mesh_n: (expected_node_count / 2 + 3), - })) - .server_mode(true) - .identity(libp2p_keypair) - .replication_factor(replication_factor) - .node_type(NetworkNodeType::Bootstrap) - .bound_addr(Some(addr)) - .to_connect_addrs(HashSet::default()) - // setting to sane defaults - .ttl(None) - .republication_interval(None) - .build() - .unwrap() - } else { - NetworkNodeConfigBuilder::default() - // NOTE I'm hardcoding these because this is probably the MAX - // parameters. If there aren't this many nodes, gossip keeps looking - // for more. That is fine. - .mesh_params(Some(MeshParams { - mesh_n_high: 15, - mesh_n_low: 5, - mesh_outbound_min: 4, - mesh_n: 8, - })) - .server_mode(true) - .identity(libp2p_keypair) - .replication_factor(replication_factor) - .node_type(NetworkNodeType::Regular) - .bound_addr(Some(addr)) - .to_connect_addrs(HashSet::default()) - // setting to sane defaults - .ttl(None) - .republication_interval(None) - .build() - .unwrap() - }; + // We want at least 2/3 of the nodes to have any given record in the DHT + let replication_factor = + NonZeroUsize::new((2 * expected_node_count).div_ceil(3)).unwrap(); + + // Build the network node configuration + let config = NetworkNodeConfigBuilder::default() + .keypair(libp2p_keypair) + .replication_factor(replication_factor) + .bind_address(Some(addr)) + .to_connect_addrs(HashSet::default()) + .republication_interval(None) + .build() + .expect("Failed to build network node config"); + let bootstrap_addrs_ref = Arc::clone(&bootstrap_addrs); let node_ids_ref = Arc::clone(&node_ids); let da = da_keys.clone(); @@ -400,6 +368,7 @@ impl Libp2pNetwork { /// If we are unable to calculate the replication factor pub async fn from_config( mut config: NetworkConfig, + gossip_config: GossipConfig, bind_address: SocketAddr, pub_key: &K, priv_key: &K::PrivateKey, @@ -423,9 +392,12 @@ impl Libp2pNetwork { ) .parse()?; - // Build our libp2p configuration from our global, network configuration + // Build our libp2p configuration let mut config_builder = NetworkNodeConfigBuilder::default(); + // Set the gossip configuration + config_builder.gossip_config(gossip_config.clone()); + // Extrapolate the stake table from the known nodes let stake_table: HashSet = config .config @@ -464,22 +436,15 @@ impl Libp2pNetwork { .with_context(|| "Failed to sign DHT lookup record")?; config_builder - .server_mode(libp2p_config.server_mode) - .identity(keypair) + .keypair(keypair) .replication_factor(replication_factor) - .bound_addr(Some(bind_address.clone())) - .mesh_params(Some(MeshParams { - mesh_n_high: libp2p_config.mesh_n_high, - mesh_n_low: libp2p_config.mesh_n_low, - mesh_outbound_min: libp2p_config.mesh_outbound_min, - mesh_n: libp2p_config.mesh_n, - })); + .bind_address(Some(bind_address.clone())); // Choose `mesh_n` random nodes to connect to for bootstrap let bootstrap_nodes = libp2p_config .bootstrap_nodes .into_iter() - .choose_multiple(&mut StdRng::from_entropy(), libp2p_config.mesh_n); + .choose_multiple(&mut StdRng::from_entropy(), gossip_config.mesh_n); config_builder.to_connect_addrs(HashSet::from_iter(bootstrap_nodes.clone())); // Build the node's configuration @@ -555,17 +520,11 @@ impl Libp2pNetwork { let (mut rx, network_handle) = spawn_network_node::(config.clone(), id) .await .map_err(Into::::into)?; - // Make bootstrap mappings known - if matches!( - network_handle.config().node_type, - NetworkNodeType::Bootstrap - ) { - let addr = network_handle.listen_addr(); - let pid = network_handle.peer_id(); - let mut bs_cp = bootstrap_addrs.write().await; - bs_cp.push((pid, addr)); - drop(bs_cp); - } + + // Add our own address to the bootstrap addresses + let addr = network_handle.listen_addr(); + let pid = network_handle.peer_id(); + bootstrap_addrs.write().await.push((pid, addr)); let mut pubkey_pid_map = BiHashMap::new(); pubkey_pid_map.insert(pk.clone(), network_handle.peer_id()); diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 01c46f91d4..ca5fac67d1 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -38,6 +38,7 @@ tracing = { workspace = true } void = "1" lazy_static = { workspace = true } pin-project = "1" +portpicker.workspace = true [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] libp2p = { workspace = true, features = ["tokio"] } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index f9dce21c42..620ee93bf9 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -15,7 +15,7 @@ mod node; /// Alternative Libp2p transport implementations pub mod transport; -use std::{collections::HashSet, fmt::Debug, str::FromStr}; +use std::{collections::HashSet, fmt::Debug}; use futures::channel::oneshot::{self, Sender}; use hotshot_types::{ @@ -41,7 +41,6 @@ use libp2p_identity::PeerId; use quic::async_std::Transport as QuicTransport; #[cfg(async_executor_impl = "tokio")] use quic::tokio::Transport as QuicTransport; -use serde::{Deserialize, Serialize}; use tracing::instrument; use transport::StakeTableAuthentication; @@ -49,49 +48,14 @@ pub use self::{ def::NetworkDef, error::NetworkError, node::{ - network_node_handle_error, spawn_network_node, MeshParams, NetworkNode, NetworkNodeConfig, - NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, NetworkNodeHandle, - NetworkNodeHandleError, NetworkNodeReceiver, DEFAULT_REPLICATION_FACTOR, + network_node_handle_error, spawn_network_node, GossipConfig, NetworkNode, + NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, + NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, DEFAULT_REPLICATION_FACTOR, }, }; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} -/// this is mostly to estimate how many network connections -/// a node should allow -#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)] -pub enum NetworkNodeType { - /// bootstrap node accepts all connections - Bootstrap, - /// regular node has a limit to the - /// number of connections to accept - Regular, - /// conductor node is never pruned - Conductor, -} - -impl FromStr for NetworkNodeType { - type Err = String; - - fn from_str(input: &str) -> Result { - match input { - "Conductor" => Ok(NetworkNodeType::Conductor), - "Regular" => Ok(NetworkNodeType::Regular), - "Bootstrap" => Ok(NetworkNodeType::Bootstrap), - _ => Err( - "Couldn't parse node type. Must be one of Conductor, Bootstrap, Regular" - .to_string(), - ), - } - } -} - -impl Default for NetworkNodeType { - fn default() -> Self { - Self::Bootstrap - } -} - /// Actions to send from the client to the swarm #[derive(Debug)] pub enum ClientRequest { diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 7015825c34..c90d1f1fc3 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -55,7 +55,7 @@ use tracing::{debug, error, info, info_span, instrument, warn, Instrument}; pub use self::{ config::{ - MeshParams, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, + GossipConfig, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, DEFAULT_REPLICATION_FACTOR, }, handle::{ @@ -70,7 +70,6 @@ use super::{ }, error::{GossipsubBuildSnafu, GossipsubConfigSnafu, NetworkError, TransportSnafu}, gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkEvent, NetworkEventInternal, - NetworkNodeType, }; use crate::network::behaviours::{ dht::{DHTBehaviour, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, @@ -91,8 +90,8 @@ pub const ESTABLISHED_LIMIT_UNWR: u32 = 10; /// Network definition #[derive(custom_debug::Debug)] pub struct NetworkNode { - /// pub/private key from with peer_id is derived - identity: Keypair, + /// The keypair for the node + keypair: Keypair, /// peer id of network node peer_id: PeerId, /// the swarm of networkbehaviours @@ -177,18 +176,17 @@ impl NetworkNode { #[instrument] pub async fn new(config: NetworkNodeConfig) -> Result { // Generate a random `KeyPair` if one is not specified - let identity = if let Some(ref kp) = config.identity { - kp.clone() - } else { - Keypair::generate_ed25519() - }; + let keypair = config + .keypair + .clone() + .unwrap_or_else(Keypair::generate_ed25519); // Get the `PeerId` from the `KeyPair` - let peer_id = PeerId::from(identity.public()); + let peer_id = PeerId::from(keypair.public()); - // Generate the transport from the identity, stake table, and auth message + // Generate the transport from the keypair, stake table, and auth message let transport: BoxedTransport = gen_transport::( - identity.clone(), + keypair.clone(), config.stake_table.clone(), config.auth_message.clone(), ) @@ -196,55 +194,24 @@ impl NetworkNode { // Generate the swarm let mut swarm: Swarm> = { - // Use the hash of the message's contents as the ID - // Use blake3 for much paranoia at very high speeds + // Use the `Blake3` hash of the message's contents as the ID let message_id_fn = |message: &GossipsubMessage| { let hash = blake3::hash(&message.data); MessageId::from(hash.as_bytes().to_vec()) }; - let params = if let Some(ref params) = config.mesh_params { - params.clone() - } else { - // NOTE this should most likely be a builder pattern - // at some point in the future. - match config.node_type { - NetworkNodeType::Bootstrap => MeshParams { - mesh_n_high: 1000, // make this super high in case we end up scaling to 1k - // nodes - mesh_n_low: 10, - mesh_outbound_min: 5, - mesh_n: 15, - }, - NetworkNodeType::Regular => MeshParams { - mesh_n_high: 15, - mesh_n_low: 8, - mesh_outbound_min: 4, - mesh_n: 12, - }, - NetworkNodeType::Conductor => MeshParams { - mesh_n_high: 21, - mesh_n_low: 8, - mesh_outbound_min: 4, - mesh_n: 12, - }, - } - }; - - // Create a custom gossipsub + // Derive a `Gossipsub` config from our gossip config let gossipsub_config = GossipsubConfigBuilder::default() - .heartbeat_interval(Duration::from_secs(1)) - // Force all messages to have valid signatures - .validation_mode(ValidationMode::Strict) - .history_gossip(10) - .mesh_n_high(params.mesh_n_high) - .mesh_n_low(params.mesh_n_low) - .mesh_outbound_min(params.mesh_outbound_min) - .mesh_n(params.mesh_n) - .history_length(10) - .max_transmit_size(MAX_GOSSIP_MSG_SIZE) - // Use the (blake3) hash of a message as its ID - .message_id_fn(message_id_fn) + .message_id_fn(message_id_fn) // Use the (blake3) hash of a message as its ID + .validation_mode(ValidationMode::Strict) // Force all messages to have valid signatures + .heartbeat_interval(config.gossip_config.heartbeat_interval) // Time between gossip heartbeats + .history_gossip(config.gossip_config.history_gossip) // Number of heartbeats to gossip about + .history_length(config.gossip_config.history_length) // Number of heartbeats to remember the full message for + .mesh_n(config.gossip_config.mesh_n) // Target number of mesh peers + .mesh_n_high(config.gossip_config.mesh_n_high) // Upper limit of mesh peers + .mesh_n_low(config.gossip_config.mesh_n_low) // Lower limit of mesh peers + .mesh_outbound_min(config.gossip_config.mesh_outbound_min) // Minimum number of outbound peers in mesh + .max_transmit_size(config.gossip_config.max_transmit_size) // Maximum size of a message .build() .map_err(|s| { GossipsubConfigSnafu { @@ -259,7 +226,7 @@ impl NetworkNode { // // if messages are signed at the the consensus level AND the network // level (noise), this feels redundant. - MessageAuthenticity::Signed(identity.clone()), + MessageAuthenticity::Signed(keypair.clone()), gossipsub_config, ) .map_err(|s| GossipsubBuildSnafu { message: s }.build())?; @@ -269,7 +236,7 @@ impl NetworkNode { // E.g. this will answer the question: how are other nodes // seeing the peer from behind a NAT let identify_cfg = - IdentifyConfig::new("HotShot/identify/1.0".to_string(), identity.public()); + IdentifyConfig::new("HotShot/identify/1.0".to_string(), keypair.public()); let identify = IdentifyBehaviour::new(identify_cfg); // - Build DHT needed for peer discovery @@ -298,9 +265,7 @@ impl NetworkNode { ValidatedStore::new(MemoryStore::new(peer_id)), kconfig, ); - if config.server_mode { - kadem.set_mode(Some(Mode::Server)); - } + kadem.set_mode(Some(Mode::Server)); let rrconfig = RequestResponseConfig::default(); @@ -338,7 +303,7 @@ impl NetworkNode { ); // build swarm - let swarm = SwarmBuilder::with_existing_identity(identity.clone()); + let swarm = SwarmBuilder::with_existing_identity(keypair.clone()); #[cfg(async_executor_impl = "async-std")] let swarm = swarm.with_async_std(); #[cfg(async_executor_impl = "tokio")] @@ -358,7 +323,7 @@ impl NetworkNode { } Ok(Self { - identity, + keypair, peer_id, swarm, config: config.clone(), diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index a7ea06e86e..838bee0f97 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -10,7 +10,7 @@ use hotshot_types::traits::signature_key::SignatureKey; use libp2p::{identity::Keypair, Multiaddr}; use libp2p_identity::PeerId; -use crate::network::NetworkNodeType; +use super::MAX_GOSSIP_MSG_SIZE; /// The default Kademlia replication factor pub const DEFAULT_REPLICATION_FACTOR: Option = NonZeroUsize::new(10); @@ -18,23 +18,20 @@ pub const DEFAULT_REPLICATION_FACTOR: Option = NonZeroUsize::new(1 /// describe the configuration of the network #[derive(Clone, Default, derive_builder::Builder, custom_debug::Debug)] pub struct NetworkNodeConfig { - #[builder(default)] - /// The type of node (bootstrap etc) - pub node_type: NetworkNodeType, - /// optional identity + /// The keypair for the node #[builder(setter(into, strip_option), default)] #[debug(skip)] - pub identity: Option, - /// address to bind to + pub keypair: Option, + /// The address to bind to #[builder(default)] - pub bound_addr: Option, + pub bind_address: Option, /// Replication factor for entries in the DHT #[builder(setter(into, strip_option), default = "DEFAULT_REPLICATION_FACTOR")] pub replication_factor: Option, #[builder(default)] - /// parameters for gossipsub mesh network - pub mesh_params: Option, + /// Configuration for `GossipSub` + pub gossip_config: GossipConfig, /// list of addresses to connect to at initialization pub to_connect_addrs: HashSet<(PeerId, Multiaddr)>, @@ -44,9 +41,6 @@ pub struct NetworkNodeConfig { /// expiratiry for records in DHT #[builder(default)] pub ttl: Option, - /// whether to start in libp2p::kad::Mode::Server mode - #[builder(default = "false")] - pub server_mode: bool, /// The stake table. Used for authenticating other nodes. If not supplied /// we will not check other nodes against the stake table @@ -59,28 +53,47 @@ pub struct NetworkNodeConfig { pub auth_message: Option>, } -/// NOTE: `mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high` -/// NOTE: `mesh_outbound_min <= self.config.mesh_n / 2` -/// parameters fed into gossipsub controlling the structure of the mesh +/// Configuration for Libp2p's Gossipsub #[derive(Clone, Debug)] -pub struct MeshParams { - /// mesh_n_high from gossipsub +pub struct GossipConfig { + /// The heartbeat interval + pub heartbeat_interval: Duration, + + /// The number of past heartbeats to gossip about + pub history_gossip: usize, + /// The number of past heartbeats to remember the full messages for + pub history_length: usize, + + /// The target number of peers in the mesh + pub mesh_n: usize, + /// The maximum number of peers in the mesh pub mesh_n_high: usize, - /// mesh_n_low from gossipsub + /// The minimum number of peers in the mesh pub mesh_n_low: usize, - /// mesh_outbound_min from gossipsub + /// The minimum number of mesh peers that must be outbound pub mesh_outbound_min: usize, - /// mesh_n from gossipsub - pub mesh_n: usize, + + /// The maximum gossip message size + pub max_transmit_size: usize, } -impl Default for MeshParams { +impl Default for GossipConfig { fn default() -> Self { Self { - mesh_n_high: 15, - mesh_n_low: 8, - mesh_outbound_min: 4, - mesh_n: 12, + heartbeat_interval: Duration::from_secs(1), // Default of Libp2p + + // The following are slightly modified defaults of Libp2p + history_gossip: 6, // The number of past heartbeats to gossip about + history_length: 8, // The number of past heartbeats to remember the full messages for + + // The mesh parameters are borrowed from Ethereum: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#the-gossip-domain-gossipsub + mesh_n: 8, // The target number of peers in the mesh + mesh_n_high: 12, // The maximum number of peers in the mesh + mesh_n_low: 6, // The minimum number of peers in the mesh + mesh_outbound_min: 2, // The minimum number of mesh peers that must be outbound + + max_transmit_size: MAX_GOSSIP_MSG_SIZE, // The maximum gossip message size } } } diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 6aa1804b6e..5f4311d3f9 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -91,7 +91,7 @@ pub async fn spawn_network_node( .context(NetworkSnafu)?; // randomly assigned port let listen_addr = config - .bound_addr + .bind_address .clone() .unwrap_or_else(|| gen_multiaddr(0)); let peer_id = network.peer_id(); diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index 28631fd4cc..222ee41795 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -21,15 +21,14 @@ use async_compatibility_layer::{ }; use futures::{future::join_all, Future, FutureExt}; use hotshot_types::traits::signature_key::SignatureKey; -use libp2p::{identity::Keypair, Multiaddr}; +use libp2p::Multiaddr; use libp2p_identity::PeerId; use libp2p_networking::network::{ network_node_handle_error::NodeConfigSnafu, spawn_network_node, NetworkEvent, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, - NetworkNodeType, }; use snafu::{ResultExt, Snafu}; -use tracing::{info, instrument, warn}; +use tracing::{instrument, warn}; #[derive(Clone, Debug)] pub(crate) struct HandleWithState { @@ -109,7 +108,6 @@ pub async fn test_bed< run_test: F, client_handler: G, num_nodes: usize, - num_of_bootstrap: usize, timeout: Duration, ) where FutF: Future, @@ -123,9 +121,7 @@ pub async fn test_bed< let mut kill_switches = Vec::new(); // NOTE we want this to panic if we can't spin up the swarms. // that amounts to a failed test. - let handles_and_receivers = spin_up_swarms::(num_nodes, timeout, num_of_bootstrap) - .await - .unwrap(); + let handles_and_receivers = spin_up_swarms::(num_nodes, timeout).await.unwrap(); let (handles, receivers): (Vec<_>, Vec<_>) = handles_and_receivers.into_iter().unzip(); let mut handler_futures = Vec::new(); @@ -190,77 +186,33 @@ pub async fn print_connections(handles: &[Arc( num_of_nodes: usize, timeout_len: Duration, - num_bootstrap: usize, ) -> Result, NetworkNodeReceiver)>, TestError> { let mut handles = Vec::new(); - let mut bootstrap_addrs = Vec::<(PeerId, Multiaddr)>::new(); + let mut node_addrs = Vec::<(PeerId, Multiaddr)>::new(); let mut connecting_futs = Vec::new(); // should never panic unless num_nodes is 0 let replication_factor = NonZeroUsize::new(num_of_nodes - 1).unwrap(); - for i in 0..num_bootstrap { - let mut config = NetworkNodeConfigBuilder::default(); - let identity = Keypair::generate_ed25519(); - // let start_port = 5000; - // NOTE use this if testing locally and want human readable ports - // as opposed to random ports. These are harder to track - // especially since the "listener"/inbound connection sees a different - // port - // let addr = Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{}/quic-v1", start_port + i)).unwrap(); + for i in 0..num_of_nodes { + // Get an unused port + let port = portpicker::pick_unused_port().expect("Failed to get an unused port"); - let addr = Multiaddr::from_str("/ip4/127.0.0.1/udp/0/quic-v1").unwrap(); - config - .identity(identity) + // Use the port to create a Multiaddr + let addr = + Multiaddr::from_str(format!("/ip4/127.0.0.1/udp/{port}/quic-v1").as_str()).unwrap(); + + let config = NetworkNodeConfigBuilder::default() .replication_factor(replication_factor) - .node_type(NetworkNodeType::Bootstrap) + .bind_address(Some(addr.clone())) .to_connect_addrs(HashSet::default()) - .bound_addr(Some(addr)) - .ttl(None) - .republication_interval(None) - .server_mode(true); - let config = config .build() .context(NodeConfigSnafu) .context(HandleSnafu)?; + let (rx, node) = spawn_network_node(config.clone(), i).await.unwrap(); - let node = Arc::new(node); - let addr = node.listen_addr(); - info!("listen addr for {} is {:?}", i, addr); - bootstrap_addrs.push((node.peer_id(), addr)); - connecting_futs.push({ - let node = Arc::clone(&node); - async move { - node.begin_bootstrap().await?; - node.lookup_pid(PeerId::random()).await - } - .boxed_local() - }); - let node_with_state = HandleWithState { - handle: Arc::clone(&node), - state: Arc::default(), - }; - handles.push((node_with_state, rx)); - } - for j in 0..(num_of_nodes - num_bootstrap) { - let addr = Multiaddr::from_str("/ip4/127.0.0.1/udp/0/quic-v1").unwrap(); - // NOTE use this if testing locally and want human readable ports - // let addr = Multiaddr::from_str(&format!( - // "/ip4/127.0.0.1/udp/{}/quic-v1", - // start_port + num_bootstrap + j - // )).unwrap(); - let regular_node_config = NetworkNodeConfigBuilder::default() - .node_type(NetworkNodeType::Regular) - .replication_factor(replication_factor) - .bound_addr(Some(addr.clone())) - .to_connect_addrs(HashSet::default()) - .server_mode(true) - .build() - .context(NodeConfigSnafu) - .context(HandleSnafu)?; - let (rx, node) = spawn_network_node(regular_node_config.clone(), j + num_bootstrap) - .await - .unwrap(); + // Add ourselves to the list of node addresses to connect to + node_addrs.push((node.peer_id(), addr)); let node = Arc::new(node); connecting_futs.push({ @@ -277,18 +229,9 @@ pub async fn spin_up_swarms>() - ); for (handle, _) in &handles[0..num_of_nodes] { - let to_share = bootstrap_addrs.clone(); + let to_share = node_addrs.clone(); handle .handle .add_known_peers(to_share) diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 036991bea9..8b8d58a76b 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -35,11 +35,9 @@ pub type CounterState = u32; const NUM_ROUNDS: usize = 100; const TOTAL_NUM_PEERS_COVERAGE: usize = 10; -const NUM_OF_BOOTSTRAP_COVERAGE: usize = 5; const TIMEOUT_COVERAGE: Duration = Duration::from_secs(120); const TOTAL_NUM_PEERS_STRESS: usize = 100; -const NUM_OF_BOOTSTRAP_STRESS: usize = 25; const TIMEOUT_STRESS: Duration = Duration::from_secs(60); const DHT_KV_PADDING: usize = 1024; @@ -399,7 +397,7 @@ async fn run_dht_rounds( // Sign the value let value = RecordValue::new_signed(&key, value, &private_key).expect("signing failed"); - // put the key + // Put the key msg_handle .handle .put_record(key.clone(), value.clone()) @@ -529,7 +527,6 @@ async fn test_coverage_request_response_one_round() { run_request_response_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, - NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, )) .await; @@ -544,7 +541,6 @@ async fn test_coverage_request_response_many_rounds() { run_request_response_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, - NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, )) .await; @@ -559,7 +555,6 @@ async fn test_coverage_intersperse_many_rounds() { run_intersperse_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, - NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, )) .await; @@ -574,7 +569,6 @@ async fn test_coverage_gossip_many_rounds() { run_gossip_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, - NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, )) .await; @@ -589,7 +583,6 @@ async fn test_coverage_gossip_one_round() { run_gossip_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, - NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, )) .await; @@ -605,7 +598,6 @@ async fn test_stress_request_response_one_round() { run_request_response_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, - NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, )) .await; @@ -621,7 +613,6 @@ async fn test_stress_request_response_many_rounds() { run_request_response_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, - NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, )) .await; @@ -637,7 +628,6 @@ async fn test_stress_intersperse_many_rounds() { run_intersperse_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, - NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, )) .await; @@ -653,7 +643,6 @@ async fn test_stress_gossip_many_rounds() { run_gossip_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, - NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, )) .await; @@ -669,7 +658,6 @@ async fn test_stress_gossip_one_round() { run_gossip_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, - NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, )) .await; @@ -685,7 +673,6 @@ async fn test_stress_dht_one_round() { run_dht_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, - NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, )) .await; @@ -701,7 +688,6 @@ async fn test_stress_dht_many_rounds() { run_dht_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, - NUM_OF_BOOTSTRAP_STRESS, TIMEOUT_STRESS, )) .await; @@ -716,7 +702,6 @@ async fn test_coverage_dht_one_round() { run_dht_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, - NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, )) .await; @@ -731,7 +716,6 @@ async fn test_coverage_dht_many_rounds() { run_dht_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, - NUM_OF_BOOTSTRAP_COVERAGE, TIMEOUT_COVERAGE, )) .await; diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index e5eecaa9c5..e0672d0164 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -67,18 +67,6 @@ round_start_delay = 1 start_delay = 1 num_bootstrap = 5 -[libp2p_config] -bootstrap_mesh_n_high = 4 -bootstrap_mesh_n_low = 4 -bootstrap_mesh_outbound_min = 2 -bootstrap_mesh_n = 4 -mesh_n_high = 4 -mesh_n_low = 4 -mesh_outbound_min = 2 -mesh_n = 4 -online_time = 10 -server_mode = true - [random_builder] txn_in_block = 100 blocks_per_second = 1 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 96e26e6b08..514eda8e96 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -33,61 +33,8 @@ use crate::client::OrchestratorClient; /// Configuration describing a libp2p node #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct Libp2pConfig { - /// bootstrap nodes (multiaddress, serialized public key) + /// The bootstrap nodes to connect to (multiaddress, serialized public key) pub bootstrap_nodes: Vec<(PeerId, Multiaddr)>, - /// global index of node (for testing purposes a uid) - pub node_index: u64, - /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes - pub bootstrap_mesh_n_high: usize, - /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes - pub bootstrap_mesh_n_low: usize, - /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes - pub bootstrap_mesh_outbound_min: usize, - /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes - pub bootstrap_mesh_n: usize, - /// corresponds to libp2p DHT parameter of the same name - pub mesh_n_high: usize, - /// corresponds to libp2p DHT parameter of the same name - pub mesh_n_low: usize, - /// corresponds to libp2p DHT parameter of the same name - pub mesh_outbound_min: usize, - /// corresponds to libp2p DHT parameter of the same name - pub mesh_n: usize, - /// timeout before starting the next view - pub next_view_timeout: u64, - /// The maximum amount of time a leader can wait to get a block from a builder - pub builder_timeout: Duration, - /// time node has been running - pub online_time: u64, - /// number of transactions per view - pub num_txn_per_round: usize, - /// whether to start in libp2p::kad::Mode::Server mode - pub server_mode: bool, -} - -/// configuration serialized into a file -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -pub struct Libp2pConfigFile { - /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes - pub bootstrap_mesh_n_high: usize, - /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes - pub bootstrap_mesh_n_low: usize, - /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes - pub bootstrap_mesh_outbound_min: usize, - /// corresponds to libp2p DHT parameter of the same name for bootstrap nodes - pub bootstrap_mesh_n: usize, - /// corresponds to libp2p DHT parameter of the same name - pub mesh_n_high: usize, - /// corresponds to libp2p DHT parameter of the same name - pub mesh_n_low: usize, - /// corresponds to libp2p DHT parameter of the same name - pub mesh_outbound_min: usize, - /// corresponds to libp2p DHT parameter of the same name - pub mesh_n: usize, - /// time node has been running - pub online_time: u64, - /// whether to start in libp2p::kad::Mode::Server mode - pub server_mode: bool, } /// configuration for a web server @@ -480,9 +427,6 @@ pub struct NetworkConfigFile { /// delay before beginning consensus #[serde_inline_default(ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS)] pub start_delay_seconds: u64, - /// the libp2p config - #[serde(default)] - pub libp2p_config: Option, /// the hotshot config file #[serde(default)] pub config: HotShotConfigFile, @@ -524,22 +468,8 @@ impl From> for NetworkConfig { .unwrap_or(Duration::from_millis(REQUEST_DATA_DELAY)), seed: val.seed, transaction_size: val.transaction_size, - libp2p_config: val.libp2p_config.map(|libp2p_config| Libp2pConfig { + libp2p_config: Some(Libp2pConfig { bootstrap_nodes: Vec::new(), - node_index: 0, - bootstrap_mesh_n_high: libp2p_config.bootstrap_mesh_n_high, - bootstrap_mesh_n_low: libp2p_config.bootstrap_mesh_n_low, - bootstrap_mesh_outbound_min: libp2p_config.bootstrap_mesh_outbound_min, - bootstrap_mesh_n: libp2p_config.bootstrap_mesh_n, - mesh_n_high: libp2p_config.mesh_n_high, - mesh_n_low: libp2p_config.mesh_n_low, - mesh_outbound_min: libp2p_config.mesh_outbound_min, - mesh_n: libp2p_config.mesh_n, - next_view_timeout: val.config.next_view_timeout, - builder_timeout: val.config.builder_timeout, - online_time: libp2p_config.online_time, - num_txn_per_round: val.transactions_per_round, - server_mode: libp2p_config.server_mode, }), config: val.config.into(), key_type_name: std::any::type_name::().to_string(), diff --git a/orchestrator/staging-config.toml b/orchestrator/staging-config.toml index a6973ae69b..7290ced3b0 100644 --- a/orchestrator/staging-config.toml +++ b/orchestrator/staging-config.toml @@ -41,19 +41,6 @@ transaction_size = 100 start_delay_seconds = 10 builder = "Simple" -[libp2p_config] -bootstrap_mesh_n_high = 4 -bootstrap_mesh_n_low = 4 -bootstrap_mesh_outbound_min = 2 -bootstrap_mesh_n = 4 -mesh_n_high = 4 -mesh_n_low = 4 -mesh_outbound_min = 2 -mesh_n = 4 -online_time = 10 -num_txn_per_round = 0 -server_mode = false - [config] start_threshold = [ 8, 10 ] num_nodes_with_stake = 10 From 776a54266abd07a881c2bb289202619bdd4584ac Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 3 Sep 2024 10:09:41 -0400 Subject: [PATCH 1196/1393] Fix Registration Verification Log (#3634) * fix log * make entry true again --- orchestrator/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 311d36c2cc..b311ef3059 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -771,7 +771,7 @@ where network_config.config.known_nodes_with_stake = vec![]; network_config.config.known_da_nodes = vec![]; - if network_config.enable_registration_verification { + if !network_config.enable_registration_verification { tracing::error!("REGISTRATION VERIFICATION IS TURNED OFF"); } From 725c33839ab57c5b7afa9ec29b49acfe8a5af185 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 10:43:54 -0400 Subject: [PATCH 1197/1393] Bump the all group across 1 directory with 5 updates (#3632) Bumps the all group with 5 updates in the / directory: | Package | From | To | | --- | --- | --- | | [async-trait](https://github.com/dtolnay/async-trait) | `0.1.81` | `0.1.82` | | [tokio](https://github.com/tokio-rs/tokio) | `1.39.3` | `1.40.0` | | [delegate](https://github.com/kobzol/rust-delegate) | `0.12.0` | `0.13.0` | | [derive_builder](https://github.com/colin-kiegel/rust-derive-builder) | `0.20.0` | `0.20.1` | | [syn](https://github.com/dtolnay/syn) | `2.0.76` | `2.0.77` | Updates `async-trait` from 0.1.81 to 0.1.82 - [Release notes](https://github.com/dtolnay/async-trait/releases) - [Commits](https://github.com/dtolnay/async-trait/compare/0.1.81...0.1.82) Updates `tokio` from 1.39.3 to 1.40.0 - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.39.3...tokio-1.40.0) Updates `delegate` from 0.12.0 to 0.13.0 - [Changelog](https://github.com/Kobzol/rust-delegate/blob/main/CHANGELOG.md) - [Commits](https://github.com/kobzol/rust-delegate/compare/v0.12.0...v0.13.0) Updates `derive_builder` from 0.20.0 to 0.20.1 - [Release notes](https://github.com/colin-kiegel/rust-derive-builder/releases) - [Commits](https://github.com/colin-kiegel/rust-derive-builder/compare/v0.20.0...v0.20.1) Updates `syn` from 2.0.76 to 2.0.77 - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.76...2.0.77) --- updated-dependencies: - dependency-name: async-trait dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-minor dependency-group: all - dependency-name: delegate dependency-type: direct:production update-type: version-update:semver-minor dependency-group: all - dependency-name: derive_builder dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- libp2p-networking/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index ca5fac67d1..288ba0f627 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -19,7 +19,7 @@ async-trait = { workspace = true } blake3 = { workspace = true } bincode = { workspace = true } custom_debug = { workspace = true } -delegate = "0.12" +delegate = "0.13" derive_builder = "0.20" either = { workspace = true } futures = { workspace = true } From 3cd689729a42a1f4af28c42dc6440fa515dff89d Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Tue, 3 Sep 2024 12:31:49 -0400 Subject: [PATCH 1198/1393] Revert "[LOGGING] - Log Errors or Panic When Critical Tasks exit (#3577)" (#3629) This reverts commit 30bcd4d801a8e8066f0df031c4c31c9df14b354b. --- hotshot/src/lib.rs | 2 - hotshot/src/tasks/mod.rs | 76 +++---- hotshot/src/tasks/task_state.rs | 16 +- hotshot/src/types/handle.rs | 20 -- task-impls/src/consensus/mod.rs | 4 - task-impls/src/consensus2/mod.rs | 4 - task-impls/src/da.rs | 4 - task-impls/src/events.rs | 10 - task-impls/src/harness.rs | 7 +- task-impls/src/health_check.rs | 121 ----------- task-impls/src/lib.rs | 3 - task-impls/src/network.rs | 10 - task-impls/src/quorum_proposal/mod.rs | 4 - task-impls/src/quorum_proposal_recv/mod.rs | 4 - task-impls/src/quorum_vote/mod.rs | 4 - task-impls/src/request.rs | 4 - task-impls/src/response.rs | 44 ++-- task-impls/src/rewind.rs | 4 - task-impls/src/transactions.rs | 4 - task-impls/src/upgrade.rs | 4 - task-impls/src/vid.rs | 4 - task-impls/src/view_sync.rs | 8 - task/src/task.rs | 193 +++--------------- testing/tests/tests_1/network_task.rs | 12 +- testing/tests/tests_1/test_with_failures_2.rs | 1 + 25 files changed, 78 insertions(+), 489 deletions(-) delete mode 100644 task-impls/src/health_check.rs diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 461638444a..090a9687de 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -17,7 +17,6 @@ use hotshot_types::{ traits::{network::BroadcastDelay, node_implementation::Versions}, }; use rand::Rng; -use tasks::add_health_check_task; use url::Url; /// Contains traits consumed by [`SystemContext`] @@ -635,7 +634,6 @@ impl, V: Versions> SystemContext(&mut handle).await; add_consensus_tasks::(&mut handle).await; - add_health_check_task::(&mut handle).await; handle } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 359687d505..3d5259081a 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -18,15 +18,14 @@ use futures::{ future::{BoxFuture, FutureExt}, stream, StreamExt, }; -use hotshot_task::task::{NetworkHandle, Task}; +use hotshot_task::task::Task; #[cfg(feature = "rewind")] use hotshot_task_impls::rewind::RewindTaskState; use hotshot_task_impls::{ da::DaTaskState, events::HotShotEvent, - health_check::HealthCheckTaskState, - helpers::broadcast_event, - network::{self, NetworkEventTaskState, NetworkMessageTaskState}, + network, + network::{NetworkEventTaskState, NetworkMessageTaskState}, request::NetworkRequestState, response::{run_response_task, NetworkResponseState}, transactions::TransactionTaskState, @@ -69,7 +68,14 @@ pub async fn add_request_network_task< >( handle: &mut SystemContextHandle, ) { - handle.add_task(NetworkRequestState::::create_from(handle).await); + let state = NetworkRequestState::::create_from(handle).await; + + let task = Task::new( + state, + handle.internal_event_stream.0.clone(), + handle.internal_event_stream.1.activate_cloned(), + ); + handle.consensus_registry.run_task(task); } /// Add a task which responds to requests on the network. @@ -85,12 +91,9 @@ pub fn add_response_task, V: Versi handle.private_key().clone(), handle.hotshot.id, ); - let task_name = state.get_task_name(); handle.network_registry.register(run_response_task::( state, - handle.internal_event_stream.0.clone(), handle.internal_event_stream.1.activate_cloned(), - handle.generate_task_id(task_name), )); } @@ -114,10 +117,9 @@ pub fn add_network_message_task< let network = Arc::clone(channel); let mut state = network_state.clone(); let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); - let stream = handle.internal_event_stream.0.clone(); - let task_id = handle.generate_task_id(network_state.get_task_name()); - let handle_task_id = task_id.clone(); let task_handle = async_spawn(async move { + futures::pin_mut!(shutdown_signal); + let recv_stream = stream::unfold((), |()| async { let msgs = match network.recv_msgs().await { Ok(msgs) => { @@ -142,10 +144,9 @@ pub fn add_network_message_task< Some((msgs, ())) }); - let heartbeat_interval = - Task::>::get_periodic_interval_in_secs(); let fused_recv_stream = recv_stream.boxed().fuse(); - futures::pin_mut!(fused_recv_stream, heartbeat_interval, shutdown_signal); + futures::pin_mut!(fused_recv_stream); + loop { futures::select! { () = shutdown_signal => { @@ -167,16 +168,10 @@ pub fn add_network_message_task< return; } } - _ = Task::>::handle_periodic_delay(&mut heartbeat_interval) => { - broadcast_event(Arc::new(HotShotEvent::HeartBeat(handle_task_id.clone())), &stream).await; - } } } }); - handle.network_registry.register(NetworkHandle { - handle: task_handle, - task_id, - }); + handle.network_registry.register(task_handle); } /// Add the network task to handle events and send messages. @@ -199,7 +194,12 @@ pub fn add_network_event_task< storage: Arc::clone(&handle.storage()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), }; - handle.add_task(network_state); + let task = Task::new( + network_state, + handle.internal_event_stream.0.clone(), + handle.internal_event_stream.1.activate_cloned(), + ); + handle.consensus_registry.run_task(task); } /// Adds consensus-related tasks to a `SystemContextHandle`. @@ -331,7 +331,6 @@ where add_consensus_tasks::(&mut handle).await; self.add_network_tasks(&mut handle).await; - add_health_check_task(&mut handle).await; handle } @@ -339,7 +338,6 @@ where /// Add byzantine network tasks with the trait #[allow(clippy::too_many_lines)] async fn add_network_tasks(&'static mut self, handle: &mut SystemContextHandle) { - let task_id = self.get_task_name(); let state_in = Arc::new(RwLock::new(self)); let state_out = Arc::clone(&state_in); // channels between the task spawned in this function and the network tasks. @@ -378,6 +376,8 @@ where // and broadcast the transformed events to the replacement event stream we just created. let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); let send_handle = async_spawn(async move { + futures::pin_mut!(shutdown_signal); + let recv_stream = stream::unfold(original_receiver, |mut recv| async move { match recv.recv().await { Ok(event) => Some((Ok(event), recv)), @@ -388,7 +388,7 @@ where .boxed(); let fused_recv_stream = recv_stream.fuse(); - futures::pin_mut!(fused_recv_stream, shutdown_signal); + futures::pin_mut!(fused_recv_stream); loop { futures::select! { @@ -424,6 +424,8 @@ where // and broadcast the transformed events to the original internal event stream let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); let recv_handle = async_spawn(async move { + futures::pin_mut!(shutdown_signal); + let network_recv_stream = stream::unfold(receiver_from_network, |mut recv| async move { match recv.recv().await { @@ -434,7 +436,7 @@ where }); let fused_network_recv_stream = network_recv_stream.boxed().fuse(); - futures::pin_mut!(fused_network_recv_stream, shutdown_signal); + futures::pin_mut!(fused_network_recv_stream); loop { futures::select! { @@ -465,19 +467,8 @@ where } }); - handle.network_registry.register(NetworkHandle { - handle: send_handle, - task_id: handle.generate_task_id(task_id), - }); - handle.network_registry.register(NetworkHandle { - handle: recv_handle, - task_id: handle.generate_task_id(task_id), - }); - } - - /// Gets the name of the current task - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() + handle.network_registry.register(send_handle); + handle.network_registry.register(recv_handle); } } @@ -707,10 +698,3 @@ pub async fn add_network_tasks, V: network::vid_filter, ); } - -/// Add the health check task -pub async fn add_health_check_task, V: Versions>( - handle: &mut SystemContextHandle, -) { - handle.add_task(HealthCheckTaskState::::create_from(handle).await); -} diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 7754822716..a6bf6cd58d 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -13,7 +13,7 @@ use async_trait::async_trait; use chrono::Utc; use hotshot_task_impls::{ builder::BuilderClient, consensus::ConsensusTaskState, consensus2::Consensus2TaskState, - da::DaTaskState, health_check::HealthCheckTaskState, quorum_proposal::QuorumProposalTaskState, + da::DaTaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, rewind::RewindTaskState, transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, view_sync::ViewSyncTaskState, @@ -385,17 +385,3 @@ impl, V: Versions> CreateTaskState } } } - -#[async_trait] -impl, V: Versions> CreateTaskState - for HealthCheckTaskState -{ - async fn create_from(handle: &SystemContextHandle) -> Self { - let heartbeat_timeout_duration_in_secs = 30; - HealthCheckTaskState::new( - handle.hotshot.id, - handle.get_task_ids(), - heartbeat_timeout_duration_in_secs, - ) - } -} diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 8ba28c6c5e..0b285b593f 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -22,7 +22,6 @@ use hotshot_types::{ error::HotShotError, traits::{election::Membership, network::ConnectedNetwork, node_implementation::NodeType}, }; -use rand::Rng; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; @@ -69,34 +68,15 @@ impl + 'static, V: Versions> { /// Adds a hotshot consensus-related task to the `SystemContextHandle`. pub fn add_task> + 'static>(&mut self, task_state: S) { - let task_name = task_state.get_task_name(); let task = Task::new( task_state, self.internal_event_stream.0.clone(), self.internal_event_stream.1.activate_cloned(), - self.generate_task_id(task_name), ); self.consensus_registry.run_task(task); } - #[must_use] - /// generate a task id for a task - pub fn generate_task_id(&self, task_name: &str) -> String { - let random = rand::thread_rng().gen_range(0..=9999); - let tasks_spawned = - self.consensus_registry.task_handles.len() + self.network_registry.handles.len(); - format!("{task_name}_{tasks_spawned}_{random}") - } - - #[must_use] - /// Get a list of all the running tasks ids - pub fn get_task_ids(&self) -> Vec { - let mut task_ids = self.consensus_registry.get_task_ids(); - task_ids.extend(self.network_registry.get_task_ids()); - task_ids - } - /// obtains a stream to expose to the user pub fn event_stream(&self) -> impl Stream> { self.output_event_stream.1.activate_cloned() diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 6c3b6fac21..d328ff3683 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -785,8 +785,4 @@ impl, V: Versions> TaskState } } } - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index 4b503f46fc..034ac8ab92 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -173,8 +173,4 @@ impl, V: Versions> TaskState /// Joins all subtasks. async fn cancel_subtasks(&mut self) {} - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 111dc0d43a..0686461603 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -389,8 +389,4 @@ impl, V: Versions> TaskState } async fn cancel_subtasks(&mut self) {} - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 9d2fe76d13..564bd80d19 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -40,10 +40,6 @@ impl TaskEvent for HotShotEvent { fn shutdown_event() -> Self { HotShotEvent::Shutdown } - - fn heartbeat_event(task_id: String) -> Self { - HotShotEvent::HeartBeat(task_id) - } } /// Wrapper type for the event to notify tasks that a proposal for a view is missing @@ -226,9 +222,6 @@ pub enum HotShotEvent { /// 2. The proposal has been correctly signed by the leader of the current view /// 3. The justify QC is valid QuorumProposalPreliminarilyValidated(Proposal>), - - /// Periodic heart beat event for health checking - HeartBeat(String), } impl Display for HotShotEvent { @@ -484,9 +477,6 @@ impl Display for HotShotEvent { proposal.data.view_number() ) } - HotShotEvent::HeartBeat(task_id) => { - write!(f, "HeartBeat(task_id={task_id:?}") - } } } } diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 5f116dee31..16e8a273b8 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -50,12 +50,7 @@ pub async fn run_harness> + Send allow_extra_output, }; - let task = Task::new( - state, - to_test.clone(), - from_test.clone(), - "task_0".to_string(), - ); + let task = Task::new(state, to_test.clone(), from_test.clone()); let handle = task.run(); let test_future = async move { diff --git a/task-impls/src/health_check.rs b/task-impls/src/health_check.rs deleted file mode 100644 index ee00dbbe7f..0000000000 --- a/task-impls/src/health_check.rs +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -use std::{ - collections::{hash_map::Entry, HashMap}, - marker::PhantomData, - sync::Arc, - time::Instant, -}; - -use anyhow::Result; -use async_broadcast::{Receiver, Sender}; -use async_lock::Mutex; -use async_trait::async_trait; -use hotshot_task::task::TaskState; -use hotshot_types::traits::node_implementation::NodeType; - -use crate::events::{HotShotEvent, HotShotTaskCompleted}; - -/// Health event task, recieve heart beats from other tasks -pub struct HealthCheckTaskState { - /// Node id - pub node_id: u64, - /// Map of the task id to timestamp of last heartbeat - pub task_ids_heartbeat_timestamp: Mutex>, - /// Specify the time we start logging when no heartbeat received - pub heartbeat_timeout_duration_in_secs: u64, - /// phantom - pub _phantom: PhantomData, -} - -impl HealthCheckTaskState { - /// Create a new instance of task state with task ids pre populated - #[must_use] - pub fn new( - node_id: u64, - task_ids: Vec, - heartbeat_timeout_duration_in_secs: u64, - ) -> Self { - let time = Instant::now(); - let mut task_ids_heartbeat_timestamp: HashMap = HashMap::new(); - for task_id in task_ids { - task_ids_heartbeat_timestamp.insert(task_id, time); - } - - HealthCheckTaskState { - node_id, - task_ids_heartbeat_timestamp: Mutex::new(task_ids_heartbeat_timestamp), - heartbeat_timeout_duration_in_secs, - _phantom: std::marker::PhantomData, - } - } - /// Handles only HeartBeats and updates the timestamp for a task - pub async fn handle( - &mut self, - event: &Arc>, - ) -> Option { - match event.as_ref() { - HotShotEvent::HeartBeat(task_id) => { - let mut task_ids_heartbeat_timestamp = - self.task_ids_heartbeat_timestamp.lock().await; - match task_ids_heartbeat_timestamp.entry(task_id.clone()) { - Entry::Occupied(mut heartbeat_timestamp) => { - *heartbeat_timestamp.get_mut() = Instant::now(); - } - Entry::Vacant(_) => { - // On startup of this task we populate the map with all task ids - } - } - } - HotShotEvent::Shutdown => { - return Some(HotShotTaskCompleted); - } - _ => {} - } - None - } -} - -#[async_trait] -impl TaskState for HealthCheckTaskState { - type Event = HotShotEvent; - - async fn handle_event( - &mut self, - event: Arc, - _sender: &Sender>, - _receiver: &Receiver>, - ) -> Result<()> { - self.handle(&event).await; - - Ok(()) - } - - async fn cancel_subtasks(&mut self) {} - - async fn periodic_task(&self, _sender: &Sender>, _task_id: String) { - let current_time = Instant::now(); - - let task_ids_heartbeat = self.task_ids_heartbeat_timestamp.lock().await; - for (task_id, heartbeat_timestamp) in task_ids_heartbeat.iter() { - if current_time.duration_since(*heartbeat_timestamp).as_secs() - > self.heartbeat_timeout_duration_in_secs - { - tracing::error!( - "Node Id {} has not received a heartbeat for task id {} for {} seconds", - self.node_id, - task_id, - self.heartbeat_timeout_duration_in_secs - ); - } - } - } - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } -} diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index b97685d9e8..ed3dc5a0ee 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -64,6 +64,3 @@ pub mod quorum_proposal_recv; /// Task for storing and replaying all received tasks by a node pub mod rewind; - -/// Task for listening to HeartBeat events and logging any task that doesnt broadcast after sometime -pub mod health_check; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 319c23b12a..4273e50d5d 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -206,12 +206,6 @@ impl NetworkMessageTaskState { .await; } } - - /// Gets the name of the current task - #[must_use] - pub fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } /// network event task state @@ -262,10 +256,6 @@ impl< } async fn cancel_subtasks(&mut self) {} - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } impl< diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index fb146eb058..9c24941e71 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -534,8 +534,4 @@ impl, V: Versions> TaskState handle.abort(); } } - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 24436cc450..dc0fb5124c 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -176,8 +176,4 @@ impl, V: Versions> TaskState } } } - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 6510c930a3..bd4666a699 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -682,8 +682,4 @@ impl, V: Versions> TaskState handle.abort(); } } - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index faaa217ba9..abe40ef702 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -157,10 +157,6 @@ impl> TaskState for NetworkRequest } } } - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } impl> NetworkRequestState { diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 7d0329b140..e5d3563089 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -6,13 +6,12 @@ use std::{sync::Arc, time::Duration}; -use async_broadcast::{Receiver, Sender}; +use async_broadcast::Receiver; use async_compatibility_layer::art::{async_sleep, async_spawn}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use futures::{FutureExt, StreamExt}; -use hotshot_task::{ - dependency::{Dependency, EventDependency}, - task::{NetworkHandle, Task}, -}; +use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ consensus::{Consensus, LockedConsensusState, OuterConsensus}, data::VidDisperseShare, @@ -29,9 +28,11 @@ use hotshot_types::{ }, }; use sha2::{Digest, Sha256}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; use tracing::instrument; -use crate::{events::HotShotEvent, health_check::HealthCheckTaskState, helpers::broadcast_event}; +use crate::events::HotShotEvent; /// Time to wait for txns before sending `ResponseMessage::NotFound` const TXNS_TIMEOUT: Duration = Duration::from_millis(100); @@ -75,16 +76,8 @@ impl NetworkResponseState { /// Run the request response loop until a `HotShotEvent::Shutdown` is received. /// Or the stream is closed. - async fn run_loop( - mut self, - shutdown: EventDependency>>, - sender: Sender>>, - task_name: String, - ) { + async fn run_loop(mut self, shutdown: EventDependency>>) { let mut shutdown = Box::pin(shutdown.completed().fuse()); - let heartbeat_interval = - Task::>::get_periodic_interval_in_secs(); - futures::pin_mut!(heartbeat_interval); loop { futures::select! { req = self.receiver.next() => { @@ -93,9 +86,6 @@ impl NetworkResponseState { None => return, } }, - _ = Task::>::handle_periodic_delay(&mut heartbeat_interval) => { - broadcast_event(Arc::new(HotShotEvent::HeartBeat(task_name.clone())), &sender).await; - }, _ = shutdown => { return; } @@ -241,11 +231,6 @@ impl NetworkResponseState { None => ResponseMessage::NotFound, } } - - /// Get the task name - pub fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } /// Check the signature @@ -264,14 +249,11 @@ fn valid_signature( /// on the `event_stream` arg. pub fn run_response_task( task_state: NetworkResponseState, - sender: Sender>>, - receiver: Receiver>>, - task_id: String, -) -> NetworkHandle { - let shutdown = EventDependency::new( - receiver, + event_stream: Receiver>>, +) -> JoinHandle<()> { + let dep = EventDependency::new( + event_stream, Box::new(|e| matches!(e.as_ref(), HotShotEvent::Shutdown)), ); - let handle = async_spawn(task_state.run_loop(shutdown, sender, task_id.clone())); - NetworkHandle { handle, task_id } + async_spawn(task_state.run_loop(dep)) } diff --git a/task-impls/src/rewind.rs b/task-impls/src/rewind.rs index 33907f71c8..669b410b52 100644 --- a/task-impls/src/rewind.rs +++ b/task-impls/src/rewind.rs @@ -70,8 +70,4 @@ impl TaskState for RewindTaskState { } } } - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index e6238d74a7..fc10b549ee 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -792,8 +792,4 @@ impl, V: Versions> TaskState } async fn cancel_subtasks(&mut self) {} - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 9b39344cb8..9af3221b70 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -358,8 +358,4 @@ impl, V: Versions> TaskState } async fn cancel_subtasks(&mut self) {} - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 54e40ef177..3243f356ae 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -171,8 +171,4 @@ impl> TaskState for VidTaskState &'static str { - std::any::type_name::>() - } } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index d6568c48b7..134766283b 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -129,10 +129,6 @@ impl, V: Versions> TaskState } async fn cancel_subtasks(&mut self) {} - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } /// State of a view sync replica task @@ -184,10 +180,6 @@ impl, V: Versions> TaskState } async fn cancel_subtasks(&mut self) {} - - fn get_task_name(&self) -> &'static str { - std::any::type_name::>() - } } impl, V: Versions> ViewSyncTaskState { diff --git a/task/src/task.rs b/task/src/task.rs index 4ec0986c91..af195d1e27 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use anyhow::Result; use async_broadcast::{Receiver, Sender}; @@ -16,9 +16,6 @@ use futures::future::join_all; #[cfg(async_executor_impl = "tokio")] use futures::future::try_join_all; #[cfg(async_executor_impl = "tokio")] -use futures::FutureExt; -use futures::StreamExt; -#[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn, JoinHandle}; /// Trait for events that long-running tasks handle @@ -28,14 +25,11 @@ pub trait TaskEvent: PartialEq { /// Note that this is necessarily uniform across all tasks. /// Exiting the task loop is handled by the task spawner, rather than the task individually. fn shutdown_event() -> Self; - - /// The heartbeat event - fn heartbeat_event(task_id: String) -> Self; } #[async_trait] /// Type for mutable task state that can be used as the state for a `Task` -pub trait TaskState: Send + Sync { +pub trait TaskState: Send { /// Type of event sent and received by the task type Event: TaskEvent + Clone + Send + Sync; @@ -49,27 +43,6 @@ pub trait TaskState: Send + Sync { _sender: &Sender>, _receiver: &Receiver>, ) -> Result<()>; - - /// Runs a specified job in the main task every `Task::PERIODIC_INTERVAL_IN_SECS` - async fn periodic_task(&self, sender: &Sender>, task_id: String) { - match sender - .broadcast_direct(Arc::new(Self::Event::heartbeat_event(task_id))) - .await - { - Ok(None) => (), - Ok(Some(_overflowed)) => { - tracing::error!( - "Event sender queue overflow, Oldest event removed form queue: Heartbeat Event" - ); - } - Err(async_broadcast::SendError(_e)) => { - tracing::warn!("Event: Heartbeat\n Sending failed, event stream probably shutdown"); - } - } - } - - /// Gets the name of the current task - fn get_task_name(&self) -> &'static str; } /// A basic task which loops waiting for events to come from `event_receiver` @@ -85,26 +58,15 @@ pub struct Task { sender: Sender>, /// Receives events that are broadcast from any task, including itself receiver: Receiver>, - /// The generated task id - task_id: String, } impl Task { - /// Constant for how often we run our periodic tasks, such as broadcasting a hearbeat - const PERIODIC_INTERVAL_IN_SECS: u64 = 10; - /// Create a new task - pub fn new( - state: S, - sender: Sender>, - receiver: Receiver>, - task_id: String, - ) -> Self { + pub fn new(state: S, sender: Sender>, receiver: Receiver>) -> Self { Task { state, sender, receiver, - task_id, } } @@ -113,100 +75,38 @@ impl Task { Box::new(self.state) as Box> } - #[cfg(async_executor_impl = "async-std")] - /// Periodic delay - pub fn get_periodic_interval_in_secs() -> futures::stream::Fuse { - async_std::stream::interval(Duration::from_secs(Self::PERIODIC_INTERVAL_IN_SECS)).fuse() - } - - #[cfg(async_executor_impl = "async-std")] - /// Handle periodic delay interval - pub fn handle_periodic_delay( - periodic_interval: &mut futures::stream::Fuse, - ) -> futures::stream::Next<'_, futures::stream::Fuse> { - periodic_interval.next() - } - - #[cfg(async_executor_impl = "tokio")] - #[must_use] - /// Periodic delay - pub fn get_periodic_interval_in_secs() -> tokio::time::Interval { - tokio::time::interval(Duration::from_secs(Self::PERIODIC_INTERVAL_IN_SECS)) - } - - #[cfg(async_executor_impl = "tokio")] - /// Handle periodic delay interval - pub fn handle_periodic_delay( - periodic_interval: &mut tokio::time::Interval, - ) -> futures::future::Fuse + '_> { - periodic_interval.tick().fuse() - } - /// Spawn the task loop, consuming self. Will continue until /// the task reaches some shutdown condition - pub fn run(mut self) -> HotShotTaskHandle { - let task_id = self.task_id.clone(); - let handle = spawn(async move { - let recv_stream = - futures::stream::unfold(self.receiver.clone(), |mut recv| async move { - match recv.recv_direct().await { - Ok(event) => Some((Ok(event), recv)), - Err(e) => Some((Err(e), recv)), - } - }) - .boxed(); - - let fused_recv_stream = recv_stream.fuse(); - let periodic_interval = Self::get_periodic_interval_in_secs(); - futures::pin_mut!(periodic_interval, fused_recv_stream); + pub fn run(mut self) -> JoinHandle>> { + spawn(async move { loop { - futures::select! { - input = fused_recv_stream.next() => { - match input { - Some(Ok(input)) => { - if *input == S::Event::shutdown_event() { - self.state.cancel_subtasks().await; + match self.receiver.recv_direct().await { + Ok(input) => { + if *input == S::Event::shutdown_event() { + self.state.cancel_subtasks().await; - break self.boxed_state(); - } - let _ = S::handle_event( - &mut self.state, - input, - &self.sender, - &self.receiver, - ) + break self.boxed_state(); + } + + let _ = + S::handle_event(&mut self.state, input, &self.sender, &self.receiver) .await .inspect_err(|e| tracing::info!("{e}")); - } - Some(Err(e)) => { - tracing::error!("Failed to receive from event stream Error: {}", e); - } - None => {} - } } - _ = Self::handle_periodic_delay(&mut periodic_interval) => { - self.state.periodic_task(&self.sender, self.task_id.clone()).await; - }, + Err(e) => { + tracing::error!("Failed to receive from event stream Error: {}", e); + } } } - }); - HotShotTaskHandle { handle, task_id } + }) } } -/// Wrapper around handle and task id so we can map -pub struct HotShotTaskHandle { - /// Handle for the task - pub handle: JoinHandle>>, - /// Generated task id - pub task_id: String, -} - #[derive(Default)] /// A collection of tasks which can handle shutdown pub struct ConsensusTaskRegistry { /// Tasks this registry controls - pub task_handles: Vec>, + task_handles: Vec>>>, } impl ConsensusTaskRegistry { @@ -217,21 +117,10 @@ impl ConsensusTaskRegistry { task_handles: vec![], } } - /// Add a task to the registry - pub fn register(&mut self, handle: HotShotTaskHandle) { + pub fn register(&mut self, handle: JoinHandle>>) { self.task_handles.push(handle); } - - #[must_use] - /// Get all task ids from registry - pub fn get_task_ids(&self) -> Vec { - self.task_handles - .iter() - .map(|wrapped_handle| wrapped_handle.task_id.clone()) - .collect() - } - /// Try to cancel/abort the task this registry has /// /// # Panics @@ -240,11 +129,11 @@ impl ConsensusTaskRegistry { pub async fn shutdown(&mut self) { let handles = &mut self.task_handles; - while let Some(wrapped_handle) = handles.pop() { + while let Some(handle) = handles.pop() { #[cfg(async_executor_impl = "async-std")] - let mut task_state = wrapped_handle.handle.await; + let mut task_state = handle.await; #[cfg(async_executor_impl = "tokio")] - let mut task_state = wrapped_handle.handle.await.unwrap(); + let mut task_state = handle.await.unwrap(); task_state.cancel_subtasks().await; } @@ -261,33 +150,20 @@ impl ConsensusTaskRegistry { /// # Panics /// Panics if one of the tasks panicked pub async fn join_all(self) -> Vec>> { - let handles: Vec>>> = self - .task_handles - .into_iter() - .map(|wrapped| wrapped.handle) - .collect(); #[cfg(async_executor_impl = "async-std")] - let states = join_all(handles).await; + let states = join_all(self.task_handles).await; #[cfg(async_executor_impl = "tokio")] - let states = try_join_all(handles).await.unwrap(); + let states = try_join_all(self.task_handles).await.unwrap(); states } } -/// Wrapper around join handle and task id for network tasks -pub struct NetworkHandle { - /// Task handle - pub handle: JoinHandle<()>, - /// Generated task id - pub task_id: String, -} - #[derive(Default)] /// A collection of tasks which can handle shutdown pub struct NetworkTaskRegistry { /// Tasks this registry controls - pub handles: Vec, + pub handles: Vec>, } impl NetworkTaskRegistry { @@ -297,15 +173,6 @@ impl NetworkTaskRegistry { NetworkTaskRegistry { handles: vec![] } } - #[must_use] - /// Get all task ids from registry - pub fn get_task_ids(&self) -> Vec { - self.handles - .iter() - .map(|wrapped_handle| wrapped_handle.task_id.clone()) - .collect() - } - #[allow(clippy::unused_async)] /// Shuts down all tasks managed by this instance. /// @@ -317,18 +184,16 @@ impl NetworkTaskRegistry { /// tasks being joined return an error. pub async fn shutdown(&mut self) { let handles = std::mem::take(&mut self.handles); - let task_handles: Vec> = - handles.into_iter().map(|wrapped| wrapped.handle).collect(); #[cfg(async_executor_impl = "async-std")] - join_all(task_handles).await; + join_all(handles).await; #[cfg(async_executor_impl = "tokio")] - try_join_all(task_handles) + try_join_all(handles) .await .expect("Failed to join all tasks during shutdown"); } /// Add a task to the registry - pub fn register(&mut self, handle: NetworkHandle) { + pub fn register(&mut self, handle: JoinHandle<()>) { self.handles.push(handle); } } diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index ad61da040e..a3b3245533 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -11,7 +11,7 @@ use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; use hotshot::traits::implementations::MemoryNetwork; use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; -use hotshot_task::task::{ConsensusTaskRegistry, Task, TaskState}; +use hotshot_task::task::{ConsensusTaskRegistry, Task}; use hotshot_task_impls::{ events::HotShotEvent, network::{self, NetworkEventTaskState}, @@ -74,8 +74,7 @@ async fn test_network_task() { let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); - let task_name = network_state.get_task_name(); - let task = Task::new(network_state, tx.clone(), rx, task_name.to_string()); + let task = Task::new(network_state, tx.clone(), rx); task_reg.run_task(task); let mut generator = TestViewGenerator::generate(membership.clone(), membership); @@ -151,12 +150,7 @@ async fn test_network_storage_fail() { let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); - let task = Task::new( - network_state, - tx.clone(), - rx, - "NetworkEventTaskState_0".to_string(), - ); + let task = Task::new(network_state, tx.clone(), rx); task_reg.run_task(task); let mut generator = TestViewGenerator::generate(membership.clone(), membership); diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index 7323e1c3fe..e5dba1fe1d 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -71,6 +71,7 @@ cross_tests!( } ); +#[cfg(async_executor_impl = "async-std")] cross_tests!( TestName: dishonest_leader, Impls: [MemoryImpl], From 013971814a6bffcf6f8dcad9190afe0e5393c4e9 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Tue, 3 Sep 2024 20:46:06 +0200 Subject: [PATCH 1199/1393] Lr/vote collector (#3615) * Add a test for dishonest voting - not finished * Finish up the test for dishonest voting * Create a helper function to remove code duplication * Use new helper function in all applicable places * Update crates/task-impls/src/network.rs Co-authored-by: lukeiannucci * Simplify if else and adjust clippy --------- Co-authored-by: lukeiannucci --- hotshot/src/tasks/mod.rs | 232 +++++++- hotshot/src/tasks/task_state.rs | 12 +- task-impls/src/consensus/mod.rs | 105 +--- task-impls/src/consensus2/handlers.rs | 68 +-- task-impls/src/consensus2/mod.rs | 17 +- task-impls/src/da.rs | 52 +- task-impls/src/network.rs | 540 +++++++++++------- task-impls/src/upgrade.rs | 50 +- task-impls/src/vote_collection.rs | 66 ++- testing/src/test_builder.rs | 21 +- testing/tests/tests_1/test_with_failures_2.rs | 56 +- types/src/message.rs | 2 +- 12 files changed, 775 insertions(+), 446 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 3d5259081a..32ef57748f 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -8,7 +8,12 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; -use std::{collections::HashSet, sync::Arc, time::Duration}; +use crate::{ + tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, + ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, + MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, +}; +use anyhow::Context; use async_broadcast::broadcast; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -24,8 +29,11 @@ use hotshot_task_impls::rewind::RewindTaskState; use hotshot_task_impls::{ da::DaTaskState, events::HotShotEvent, - network, - network::{NetworkEventTaskState, NetworkMessageTaskState}, + network::{ + self, + test::{ModifierClosure, NetworkEventTaskStateModifier}, + NetworkEventTaskState, NetworkMessageTaskState, + }, request::NetworkRequestState, response::{run_response_task, NetworkResponseState}, transactions::TransactionTaskState, @@ -33,24 +41,22 @@ use hotshot_task_impls::{ vid::VidTaskState, view_sync::ViewSyncTaskState, }; +use hotshot_types::message::UpgradeLock; use hotshot_types::{ constants::EVENT_CHANNEL_SIZE, data::QuorumProposal, message::{Messages, Proposal}, request_response::RequestReceiver, + simple_vote::QuorumVote, traits::{ network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; +use std::fmt::{Debug, Formatter}; +use std::{collections::HashSet, sync::Arc, time::Duration}; use vbs::version::StaticVersionType; -use crate::{ - tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, - ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, - MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, -}; - /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { @@ -283,7 +289,13 @@ where async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec>; /// modify outgoing messages from the network - async fn send_handler(&mut self, event: &HotShotEvent) -> Vec>; + async fn send_handler( + &mut self, + event: &HotShotEvent, + public_key: &TYPES::SignatureKey, + private_key: &::PrivateKey, + upgrade_lock: &UpgradeLock, + ) -> Vec>; #[allow(clippy::too_many_arguments)] /// Creates a `SystemContextHandle` with the given even transformer @@ -338,8 +350,6 @@ where /// Add byzantine network tasks with the trait #[allow(clippy::too_many_lines)] async fn add_network_tasks(&'static mut self, handle: &mut SystemContextHandle) { - let state_in = Arc::new(RwLock::new(self)); - let state_out = Arc::clone(&state_in); // channels between the task spawned in this function and the network tasks. // with this, we can control exactly what events the network tasks see. @@ -365,16 +375,22 @@ where ); // spawn the network tasks with our newly-created channel - add_network_tasks::(handle).await; + add_network_message_and_request_receiver_tasks(handle).await; + self.add_network_event_tasks(handle); std::mem::swap( &mut internal_event_stream, &mut handle.internal_event_stream, ); + let state_in = Arc::new(RwLock::new(self)); + let state_out = Arc::clone(&state_in); // spawn a task to listen on the (original) internal event stream, // and broadcast the transformed events to the replacement event stream we just created. let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); + let public_key = handle.public_key(); + let private_key = handle.private_key().clone(); + let upgrade_lock = handle.hotshot.upgrade_lock.clone(); let send_handle = async_spawn(async move { futures::pin_mut!(shutdown_signal); @@ -401,7 +417,12 @@ where match event { Some(Ok(msg)) => { let mut state = state_out.write().await; - let mut results = state.send_handler(&msg).await; + let mut results = state.send_handler( + &msg, + &public_key, + &private_key, + &upgrade_lock, + ).await; results.reverse(); while let Some(event) = results.pop() { let _ = sender_to_network.broadcast(event.into()).await; @@ -470,6 +491,57 @@ where handle.network_registry.register(send_handle); handle.network_registry.register(recv_handle); } + + /// Adds the `NetworkEventTaskState` tasks possibly modifying them as well. + fn add_network_event_tasks(&self, handle: &mut SystemContextHandle) { + let network = Arc::clone(&handle.network); + let quorum_membership = handle.memberships.quorum_membership.clone(); + let da_membership = handle.memberships.da_membership.clone(); + let vid_membership = handle.memberships.vid_membership.clone(); + let view_sync_membership = handle.memberships.view_sync_membership.clone(); + + self.add_network_event_task( + handle, + Arc::clone(&network), + quorum_membership.clone(), + network::quorum_filter, + ); + self.add_network_event_task( + handle, + Arc::clone(&network), + quorum_membership, + network::upgrade_filter, + ); + self.add_network_event_task( + handle, + Arc::clone(&network), + da_membership, + network::da_filter, + ); + self.add_network_event_task( + handle, + Arc::clone(&network), + view_sync_membership, + network::view_sync_filter, + ); + self.add_network_event_task( + handle, + Arc::clone(&network), + vid_membership, + network::vid_filter, + ); + } + + /// Adds a `NetworkEventTaskState` task. Can be reimplemented to modify its behaviour. + fn add_network_event_task( + &self, + handle: &mut SystemContextHandle, + channel: Arc<>::Network>, + membership: TYPES::Membership, + filter: fn(&Arc>) -> bool, + ) { + add_network_event_task(handle, channel, membership, filter); + } } #[derive(Debug)] @@ -489,7 +561,13 @@ impl, V: Versions> EventTransforme vec![event.clone()] } - async fn send_handler(&mut self, event: &HotShotEvent) -> Vec> { + async fn send_handler( + &mut self, + event: &HotShotEvent, + _public_key: &TYPES::SignatureKey, + _private_key: &::PrivateKey, + _upgrade_lock: &UpgradeLock, + ) -> Vec> { match event { HotShotEvent::QuorumProposalSend(proposal, signature) => { let mut result = Vec::new(); @@ -524,7 +602,13 @@ impl, V: Versions> EventTransforme vec![event.clone()] } - async fn send_handler(&mut self, event: &HotShotEvent) -> Vec> { + async fn send_handler( + &mut self, + event: &HotShotEvent, + _public_key: &TYPES::SignatureKey, + _private_key: &::PrivateKey, + _upgrade_lock: &UpgradeLock, + ) -> Vec> { match event { HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::QuorumVoteSend(_) => { vec![event.clone(), event.clone()] @@ -595,7 +679,13 @@ impl + std::fmt::Debug, V: Version vec![event.clone()] } - async fn send_handler(&mut self, event: &HotShotEvent) -> Vec> { + async fn send_handler( + &mut self, + event: &HotShotEvent, + _public_key: &TYPES::SignatureKey, + _private_key: &::PrivateKey, + _upgrade_lock: &UpgradeLock, + ) -> Vec> { match event { HotShotEvent::QuorumProposalSend(proposal, sender) => { self.total_proposals_from_node += 1; @@ -629,7 +719,13 @@ impl + std::fmt::Debug, V: Version vec![event.clone()] } - async fn send_handler(&mut self, event: &HotShotEvent) -> Vec> { + async fn send_handler( + &mut self, + event: &HotShotEvent, + _public_key: &TYPES::SignatureKey, + _private_key: &::PrivateKey, + _upgrade_lock: &UpgradeLock, + ) -> Vec> { if let HotShotEvent::DacSend(cert, sender) = event { self.total_da_certs_sent_from_node += 1; if self @@ -649,15 +745,96 @@ impl + std::fmt::Debug, V: Version } } +/// An `EventHandlerState` that modifies view number on the vote of `QuorumVoteSend` event to that of a future view and correctly signs the vote +pub struct DishonestVoting { + /// Number added to the original vote's view number + pub view_increment: u64, + /// A function passed to `NetworkEventTaskStateModifier` to modify `NetworkEventTaskState` behaviour. + pub modifier: Arc>, +} + +#[async_trait] +impl + std::fmt::Debug, V: Versions> + EventTransformerState for DishonestVoting +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + vec![event.clone()] + } + + async fn send_handler( + &mut self, + event: &HotShotEvent, + public_key: &TYPES::SignatureKey, + private_key: &::PrivateKey, + upgrade_lock: &UpgradeLock, + ) -> Vec> { + if let HotShotEvent::QuorumVoteSend(vote) = event { + let new_view = vote.view_number + self.view_increment; + let spoofed_vote = QuorumVote::::create_signed_vote( + vote.data.clone(), + new_view, + public_key, + private_key, + upgrade_lock, + ) + .await + .context("Failed to sign vote") + .unwrap(); + tracing::debug!("Sending Quorum Vote for view: {new_view:?}"); + return vec![HotShotEvent::QuorumVoteSend(spoofed_vote)]; + } + vec![event.clone()] + } + + fn add_network_event_task( + &self, + handle: &mut SystemContextHandle, + channel: Arc<>::Network>, + membership: TYPES::Membership, + filter: fn(&Arc>) -> bool, + ) { + let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { + channel, + view: TYPES::Time::genesis(), + membership, + filter, + storage: Arc::clone(&handle.storage()), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), + }; + let modified_network_state = NetworkEventTaskStateModifier { + network_event_task_state: network_state, + modifier: Arc::clone(&self.modifier), + }; + handle.add_task(modified_network_state); + } +} + +impl Debug for DishonestVoting { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DishonestVoting") + .field("view_increment", &self.view_increment) + .finish_non_exhaustive() + } +} + /// adds tasks for sending/receiving messages to/from the network. pub async fn add_network_tasks, V: Versions>( handle: &mut SystemContextHandle, +) { + add_network_message_and_request_receiver_tasks(handle).await; + + add_network_event_tasks(handle); +} + +/// Adds the `NetworkMessageTaskState` tasks and the request / receiver tasks. +pub async fn add_network_message_and_request_receiver_tasks< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + handle: &mut SystemContextHandle, ) { let network = Arc::clone(&handle.network); - let quorum_membership = handle.memberships.quorum_membership.clone(); - let da_membership = handle.memberships.da_membership.clone(); - let vid_membership = handle.memberships.vid_membership.clone(); - let view_sync_membership = handle.memberships.view_sync_membership.clone(); add_network_message_task(handle, &network); add_network_message_task(handle, &network); @@ -666,6 +843,17 @@ pub async fn add_network_tasks, V: add_request_network_task(handle).await; add_response_task(handle, request_receiver); } +} + +/// Adds the `NetworkEventTaskState` tasks. +pub fn add_network_event_tasks, V: Versions>( + handle: &mut SystemContextHandle, +) { + let network = Arc::clone(&handle.network); + let quorum_membership = handle.memberships.quorum_membership.clone(); + let da_membership = handle.memberships.da_membership.clone(); + let vid_membership = handle.memberships.vid_membership.clone(); + let view_sync_membership = handle.memberships.view_sync_membership.clone(); add_network_event_task( handle, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index a6bf6cd58d..8ce9d19236 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -72,7 +72,7 @@ impl, V: Versions> CreateTaskState cur_view: handle.cur_view().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), network: Arc::clone(&handle.hotshot.network), - vote_collector: None.into(), + vote_collectors: BTreeMap::default(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, @@ -140,7 +140,7 @@ impl, V: Versions> CreateTaskState network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), cur_view: handle.cur_view().await, - vote_collector: None.into(), + vote_collectors: BTreeMap::default(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, @@ -235,8 +235,8 @@ impl, V: Versions> CreateTaskState cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), payload_commitment_and_metadata: None, - vote_collector: None.into(), - timeout_vote_collector: None.into(), + vote_collectors: BTreeMap::default(), + timeout_vote_collectors: BTreeMap::default(), timeout_task, spawned_tasks: BTreeMap::new(), formed_upgrade_certificate: None, @@ -358,8 +358,8 @@ impl, V: Versions> CreateTaskState timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), committee_membership: handle.hotshot.memberships.da_membership.clone().into(), - vote_collector: None.into(), - timeout_vote_collector: None.into(), + vote_collectors: BTreeMap::default(), + timeout_vote_collectors: BTreeMap::default(), storage: Arc::clone(&handle.storage), cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index d328ff3683..2e8507cd93 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -41,20 +41,14 @@ use crate::{ handle_quorum_proposal_recv, handle_quorum_proposal_validated, publish_proposal_if_able, update_state_and_vote_if_able, VoteInfo, }, - events::{HotShotEvent, HotShotTaskCompleted}, + events::HotShotEvent, helpers::{broadcast_event, cancel_task, update_view, DONT_SEND_VIEW_CHANGE_EVENT}, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, - }, + vote_collection::{handle_vote, VoteCollectorsMap}, }; /// Helper functions to handle proposal-related functionality. pub(crate) mod handlers; -/// Alias for Optional type for Vote Collectors -type VoteCollectorOption = - Option>; - /// The state for the consensus task. Contains all of the information for the implementation /// of consensus pub struct ConsensusTaskState, V: Versions> { @@ -91,13 +85,12 @@ pub struct ConsensusTaskState, V: /// Membership for DA committee Votes/certs pub da_membership: Arc, - /// Current Vote collection task, with it's view. - pub vote_collector: - RwLock, QuorumCertificate, V>>, + /// A map of `QuorumVote` collector tasks. + pub vote_collectors: VoteCollectorsMap, QuorumCertificate, V>, - /// Current timeout vote collection task with its view - pub timeout_vote_collector: - RwLock, TimeoutCertificate, V>>, + /// A map of `TimeoutVote` collector tasks. + pub timeout_vote_collectors: + VoteCollectorsMap, TimeoutCertificate, V>, /// timeout task handle pub timeout_task: JoinHandle<()>, @@ -327,36 +320,18 @@ impl, V: Versions> ConsensusTaskSt ); return; } - let mut collector = self.vote_collector.write().await; - - if collector.is_none() || vote.view_number() > collector.as_ref().unwrap().view { - debug!("Starting vote handle for view {:?}", vote.view_number()); - let info = AccumulatorInfo { - public_key: self.public_key.clone(), - membership: Arc::clone(&self.quorum_membership), - view: vote.view_number(), - id: self.id, - }; - *collector = create_vote_accumulator( - &info, - event, - &event_sender, - self.upgrade_lock.clone(), - ) - .await; - } else { - let result = collector - .as_mut() - .unwrap() - .handle_vote_event(Arc::clone(&event), &event_sender) - .await; - - if result == Some(HotShotTaskCompleted) { - *collector = None; - // The protocol has finished - return; - } - } + + handle_vote( + &mut self.vote_collectors, + vote, + self.public_key.clone(), + &self.quorum_membership, + self.id, + &event, + &event_sender, + &self.upgrade_lock, + ) + .await; } HotShotEvent::TimeoutVoteRecv(ref vote) => { if self.timeout_membership.leader(vote.view_number() + 1) != self.public_key { @@ -367,36 +342,18 @@ impl, V: Versions> ConsensusTaskSt ); return; } - let mut collector = self.timeout_vote_collector.write().await; - - if collector.is_none() || vote.view_number() > collector.as_ref().unwrap().view { - debug!("Starting vote handle for view {:?}", vote.view_number()); - let info = AccumulatorInfo { - public_key: self.public_key.clone(), - membership: Arc::clone(&self.quorum_membership), - view: vote.view_number(), - id: self.id, - }; - *collector = create_vote_accumulator( - &info, - event, - &event_sender, - self.upgrade_lock.clone(), - ) - .await; - } else { - let result = collector - .as_mut() - .unwrap() - .handle_vote_event(Arc::clone(&event), &event_sender) - .await; - - if result == Some(HotShotTaskCompleted) { - *collector = None; - // The protocol has finished - return; - } - } + + handle_vote( + &mut self.timeout_vote_collectors, + vote, + self.public_key.clone(), + &self.quorum_membership, + self.id, + &event, + &event_sender, + &self.upgrade_lock, + ) + .await; } HotShotEvent::QcFormed(cert) => match cert { either::Right(qc) => { diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs index 1c615636e0..c766334471 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus2/handlers.rs @@ -24,9 +24,9 @@ use tracing::{debug, error, instrument}; use super::Consensus2TaskState; use crate::{ consensus2::Versions, - events::{HotShotEvent, HotShotTaskCompleted}, + events::HotShotEvent, helpers::{broadcast_event, cancel_task}, - vote_collection::{create_vote_accumulator, AccumulatorInfo, HandleVoteEvent}, + vote_collection::handle_vote, }; /// Handle a `QuorumVoteRecv` event. @@ -49,28 +49,18 @@ pub(crate) async fn handle_quorum_vote_recv< ) ); - let mut collector = task_state.vote_collector.write().await; - - if collector.is_none() || vote.view_number() > collector.as_ref().unwrap().view { - let info = AccumulatorInfo { - public_key: task_state.public_key.clone(), - membership: Arc::clone(&task_state.quorum_membership), - view: vote.view_number(), - id: task_state.id, - }; - *collector = - create_vote_accumulator(&info, event, sender, task_state.upgrade_lock.clone()).await; - } else { - let result = collector - .as_mut() - .unwrap() - .handle_vote_event(Arc::clone(&event), sender) - .await; + handle_vote( + &mut task_state.vote_collectors, + vote, + task_state.public_key.clone(), + &task_state.quorum_membership, + task_state.id, + &event, + sender, + &task_state.upgrade_lock, + ) + .await; - if result == Some(HotShotTaskCompleted) { - *collector = None; - } - } Ok(()) } @@ -94,28 +84,18 @@ pub(crate) async fn handle_timeout_vote_recv< ) ); - let mut collector = task_state.timeout_vote_collector.write().await; - - if collector.is_none() || vote.view_number() > collector.as_ref().unwrap().view { - let info = AccumulatorInfo { - public_key: task_state.public_key.clone(), - membership: Arc::clone(&task_state.quorum_membership), - view: vote.view_number(), - id: task_state.id, - }; - *collector = - create_vote_accumulator(&info, event, sender, task_state.upgrade_lock.clone()).await; - } else { - let result = collector - .as_mut() - .unwrap() - .handle_vote_event(Arc::clone(&event), sender) - .await; + handle_vote( + &mut task_state.timeout_vote_collectors, + vote, + task_state.public_key.clone(), + &task_state.quorum_membership, + task_state.id, + &event, + sender, + &task_state.upgrade_lock, + ) + .await; - if result == Some(HotShotTaskCompleted) { - *collector = None; - } - } Ok(()) } diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index 034ac8ab92..7b139842f9 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -31,11 +31,7 @@ use tracing::instrument; use self::handlers::{ handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, }; -use crate::{events::HotShotEvent, vote_collection::VoteCollectionTaskState}; - -/// Alias for Optional type for Vote Collectors -type VoteCollectorOption = - Option>; +use crate::{events::HotShotEvent, vote_collection::VoteCollectorsMap}; /// Event handlers for use in the `handle` method. mod handlers; @@ -63,13 +59,12 @@ pub struct Consensus2TaskState, V: /// Membership for DA committee Votes/certs pub committee_membership: Arc, - /// Current Vote collection task, with it's view. - pub vote_collector: - RwLock, QuorumCertificate, V>>, + /// A map of `QuorumVote` collector tasks. + pub vote_collectors: VoteCollectorsMap, QuorumCertificate, V>, - /// Current timeout vote collection task with its view - pub timeout_vote_collector: - RwLock, TimeoutCertificate, V>>, + /// A map of `TimeoutVote` collector tasks. + pub timeout_vote_collectors: + VoteCollectorsMap, TimeoutCertificate, V>, /// This node's storage ref pub storage: Arc>, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 0686461603..ab50b8f2cd 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -40,15 +40,9 @@ use tracing::{debug, error, instrument, warn}; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, helpers::broadcast_event, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, - }, + vote_collection::{handle_vote, VoteCollectorsMap}, }; -/// Alias for Optional type for Vote Collectors -type VoteCollectorOption = - Option>; - /// Tracks state of a DA task pub struct DaTaskState, V: Versions> { /// Output events to application @@ -71,8 +65,8 @@ pub struct DaTaskState, V: Version /// The underlying network pub network: Arc, - /// The current vote collection task, if there is one. - pub vote_collector: RwLock, DaCertificate, V>>, + /// A map of `DaVote` collector tasks. + pub vote_collectors: VoteCollectorsMap, DaCertificate, V>, /// This Nodes public key pub public_key: TYPES::SignatureKey, @@ -267,36 +261,18 @@ impl, V: Versions> DaTaskState collector.as_ref().unwrap().view { - debug!("Starting vote handle for view {:?}", vote.view_number()); - let info = AccumulatorInfo { - public_key: self.public_key.clone(), - membership: Arc::clone(&self.da_membership), - view: vote.view_number(), - id: self.id, - }; - *collector = create_vote_accumulator( - &info, - event, - &event_stream, - self.upgrade_lock.clone(), - ) - .await; - } else { - let result = collector - .as_mut() - .unwrap() - .handle_vote_event(Arc::clone(&event), &event_stream) - .await; - if result == Some(HotShotTaskCompleted) { - *collector = None; - // The protocol has finished - return None; - } - } + handle_vote( + &mut self.vote_collectors, + vote, + self.public_key.clone(), + &self.da_membership, + self.id, + &event, + &event_stream, + &self.upgrade_lock, + ) + .await; } HotShotEvent::ViewChange(view) => { let view = *view; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 4273e50d5d..82cd9b2e49 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -268,7 +268,6 @@ impl< /// Handle the given event. /// /// Returns the completion status. - #[allow(clippy::too_many_lines)] // TODO https://github.com/EspressoSystems/HotShot/issues/1704 #[instrument(skip_all, fields(view = *self.view), name = "Network Task", level = "error")] pub async fn handle( &mut self, @@ -276,156 +275,261 @@ impl< membership: &TYPES::Membership, ) { let mut maybe_action = None; - let (sender, message_kind, transmit): (_, _, TransmitType) = - match event.as_ref().clone() { - HotShotEvent::QuorumProposalSend(proposal, sender) => { - maybe_action = Some(HotShotAction::Propose); - ( - sender, - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::Proposal(proposal), - )), - TransmitType::Broadcast, - ) - } + if let Some((sender, message_kind, transmit)) = + self.parse_event(event, &mut maybe_action, membership).await + { + self.spawn_transmit_task(message_kind, membership, maybe_action, transmit, sender); + }; + } - // ED Each network task is subscribed to all these message types. Need filters per network task - HotShotEvent::QuorumVoteSend(vote) => { - maybe_action = Some(HotShotAction::Vote); - ( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::Vote(vote.clone()), - )), - TransmitType::Direct(membership.leader(vote.view_number() + 1)), - ) - } - HotShotEvent::QuorumProposalRequestSend(req, signature) => ( - req.key.clone(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ProposalRequested(req.clone(), signature), - )), - TransmitType::DaCommitteeAndLeaderBroadcast(membership.leader(req.view_number)), - ), - HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => ( - sender_key.clone(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::LeaderProposalAvailable(proposal), - )), - TransmitType::Direct(sender_key), - ), - HotShotEvent::VidDisperseSend(proposal, sender) => { - self.handle_vid_disperse_proposal(proposal, &sender).await; - return; - } - HotShotEvent::DaProposalSend(proposal, sender) => { - maybe_action = Some(HotShotAction::DaPropose); - ( - sender, - MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::DaProposal(proposal), - )), - TransmitType::DaCommitteeBroadcast, - ) - } - HotShotEvent::DaVoteSend(vote) => { - maybe_action = Some(HotShotAction::DaVote); - ( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::DaVote(vote.clone()), - )), - TransmitType::Direct(membership.leader(vote.view_number())), - ) + /// handle `VidDisperseSend` + async fn handle_vid_disperse_proposal( + &self, + vid_proposal: Proposal>, + sender: &::SignatureKey, + ) -> Option { + let view = vid_proposal.data.view_number; + let vid_share_proposals = VidDisperseShare::to_vid_share_proposals(vid_proposal); + let mut messages = HashMap::new(); + + for proposal in vid_share_proposals { + let recipient = proposal.data.recipient_key.clone(); + let message = Message { + sender: sender.clone(), + kind: MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::VidDisperseMsg(proposal), + )), // TODO not a DaConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 + }; + let serialized_message = match self.upgrade_lock.serialize(&message).await { + Ok(serialized) => serialized, + Err(e) => { + error!("Failed to serialize message: {}", e); + continue; } - // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee - HotShotEvent::DacSend(certificate, sender) => { - maybe_action = Some(HotShotAction::DaCert); - ( - sender, - MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::DaCertificate(certificate), - )), - TransmitType::Broadcast, - ) + }; + + messages.insert(recipient, serialized_message); + } + + let net = Arc::clone(&self.channel); + let storage = Arc::clone(&self.storage); + async_spawn(async move { + if NetworkEventTaskState::::maybe_record_action( + Some(HotShotAction::VidDisperse), + storage, + view, + ) + .await + .is_err() + { + return; + } + match net.vid_broadcast_message(messages).await { + Ok(()) => {} + Err(e) => error!("Failed to send message from network task: {:?}", e), + } + }); + + None + } + + /// Record `HotShotAction` if available + async fn maybe_record_action( + maybe_action: Option, + storage: Arc>, + view: ::Time, + ) -> Result<(), ()> { + if let Some(action) = maybe_action { + match storage + .write() + .await + .record_action(view, action.clone()) + .await + { + Ok(()) => Ok(()), + Err(e) => { + warn!("Not Sending {:?} because of storage error: {:?}", action, e); + Err(()) } - HotShotEvent::ViewSyncPreCommitVoteSend(vote) => ( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), - )), - TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), - ), - HotShotEvent::ViewSyncCommitVoteSend(vote) => ( - vote.signing_key(), + } + } else { + Ok(()) + } + } + + /// Parses a `HotShotEvent` and returns a tuple of: (sender's public key, `MessageKind`, `TransmitType`) + /// which will be used to create a message and transmit on the wire. + /// Returns `None` if the parsing result should not be sent on the wire. + /// Handles the `VidDisperseSend` event separately using a helper method. + #[allow(clippy::too_many_lines)] // TODO https://github.com/EspressoSystems/HotShot/issues/1704 + async fn parse_event( + &mut self, + event: Arc>, + maybe_action: &mut Option, + membership: &TYPES::Membership, + ) -> Option<( + ::SignatureKey, + MessageKind, + TransmitType, + )> { + match event.as_ref().clone() { + HotShotEvent::QuorumProposalSend(proposal, sender) => { + *maybe_action = Some(HotShotAction::Propose); + Some(( + sender, MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), + GeneralConsensusMessage::Proposal(proposal), )), - TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), - ), - HotShotEvent::ViewSyncFinalizeVoteSend(vote) => ( + TransmitType::Broadcast, + )) + } + + // ED Each network task is subscribed to all these message types. Need filters per network task + HotShotEvent::QuorumVoteSend(vote) => { + *maybe_action = Some(HotShotAction::Vote); + Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), + GeneralConsensusMessage::Vote(vote.clone()), )), - TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), - ), - HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => ( + TransmitType::Direct(membership.leader(vote.view_number() + 1)), + )) + } + HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( + req.key.clone(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ProposalRequested(req.clone(), signature), + )), + TransmitType::DaCommitteeAndLeaderBroadcast(membership.leader(req.view_number)), + )), + HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => Some(( + sender_key.clone(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::LeaderProposalAvailable(proposal), + )), + TransmitType::Direct(sender_key), + )), + HotShotEvent::VidDisperseSend(proposal, sender) => { + self.handle_vid_disperse_proposal(proposal, &sender).await; + None + } + HotShotEvent::DaProposalSend(proposal, sender) => { + *maybe_action = Some(HotShotAction::DaPropose); + Some(( sender, - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate), + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaProposal(proposal), )), - TransmitType::Broadcast, - ), - HotShotEvent::ViewSyncCommitCertificate2Send(certificate, sender) => ( - sender, - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncCommitCertificate(certificate), + TransmitType::DaCommitteeBroadcast, + )) + } + HotShotEvent::DaVoteSend(vote) => { + *maybe_action = Some(HotShotAction::DaVote); + Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaVote(vote.clone()), )), - TransmitType::Broadcast, - ), - HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, sender) => ( + TransmitType::Direct(membership.leader(vote.view_number())), + )) + } + // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee + HotShotEvent::DacSend(certificate, sender) => { + *maybe_action = Some(HotShotAction::DaCert); + Some(( sender, - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate), + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaCertificate(certificate), )), TransmitType::Broadcast, - ), - HotShotEvent::TimeoutVoteSend(vote) => ( + )) + } + HotShotEvent::ViewSyncPreCommitVoteSend(vote) => Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), + )), + TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), + )), + HotShotEvent::ViewSyncCommitVoteSend(vote) => Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), + )), + TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), + )), + HotShotEvent::ViewSyncFinalizeVoteSend(vote) => Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), + )), + TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), + )), + HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => Some(( + sender, + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate), + )), + TransmitType::Broadcast, + )), + HotShotEvent::ViewSyncCommitCertificate2Send(certificate, sender) => Some(( + sender, + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncCommitCertificate(certificate), + )), + TransmitType::Broadcast, + )), + HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, sender) => Some(( + sender, + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate), + )), + TransmitType::Broadcast, + )), + HotShotEvent::TimeoutVoteSend(vote) => Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::TimeoutVote(vote.clone()), + )), + TransmitType::Direct(membership.leader(vote.view_number() + 1)), + )), + HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( + sender, + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::UpgradeProposal(proposal), + )), + TransmitType::Broadcast, + )), + HotShotEvent::UpgradeVoteSend(vote) => { + error!("Sending upgrade vote!"); + Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::TimeoutVote(vote.clone()), - )), - TransmitType::Direct(membership.leader(vote.view_number() + 1)), - ), - HotShotEvent::UpgradeProposalSend(proposal, sender) => ( - sender, - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::UpgradeProposal(proposal), + GeneralConsensusMessage::UpgradeVote(vote.clone()), )), - TransmitType::Broadcast, - ), - HotShotEvent::UpgradeVoteSend(vote) => { - error!("Sending upgrade vote!"); - ( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::UpgradeVote(vote.clone()), - )), - TransmitType::Direct(membership.leader(vote.view_number())), - ) - } - HotShotEvent::ViewChange(view) => { - self.view = view; - self.channel - .update_view::(self.view.u64(), membership) - .await; - return; - } - _ => { - return; - } - }; + TransmitType::Direct(membership.leader(vote.view_number())), + )) + } + HotShotEvent::ViewChange(view) => { + self.view = view; + self.channel + .update_view::(self.view.u64(), membership) + .await; + None + } + _ => None, + } + } + + /// Creates a network message and spawns a task that transmits it on the wire. + fn spawn_transmit_task( + &self, + message_kind: MessageKind, + membership: &TYPES::Membership, + maybe_action: Option, + transmit: TransmitType, + sender: TYPES::SignatureKey, + ) { let broadcast_delay = match &message_kind { MessageKind::Consensus( SequencingMessage::General(GeneralConsensusMessage::Vote(_)) @@ -454,7 +558,6 @@ impl< { return; } - if let MessageKind::Consensus(SequencingMessage::General( GeneralConsensusMessage::Proposal(prop), )) = &message.kind @@ -508,79 +611,116 @@ impl< } }); } +} - /// handle `VidDisperseSend` - async fn handle_vid_disperse_proposal( - &self, - vid_proposal: Proposal>, - sender: &::SignatureKey, - ) -> Option { - let view = vid_proposal.data.view_number; - let vid_share_proposals = VidDisperseShare::to_vid_share_proposals(vid_proposal); - let mut messages = HashMap::new(); - - for proposal in vid_share_proposals { - let recipient = proposal.data.recipient_key.clone(); - let message = Message { - sender: sender.clone(), - kind: MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::VidDisperseMsg(proposal), - )), // TODO not a DaConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 - }; - let serialized_message = match self.upgrade_lock.serialize(&message).await { - Ok(serialized) => serialized, - Err(e) => { - error!("Failed to serialize message: {}", e); - continue; - } - }; - - messages.insert(recipient, serialized_message); - } +/// A module with test helpers +pub mod test { + use super::{ + Arc, ConnectedNetwork, HotShotEvent, MessageKind, NetworkEventTaskState, NodeType, + Receiver, Result, Sender, Storage, TaskState, TransmitType, Versions, + }; + use async_trait::async_trait; + use std::ops::{Deref, DerefMut}; + + /// A dynamic type alias for a function that takes the result of `NetworkEventTaskState::parse_event` + /// and changes it before transmitting on the network. + pub type ModifierClosure = dyn Fn( + &mut ::SignatureKey, + &mut MessageKind, + &mut TransmitType, + &::Membership, + ) + Send + + Sync; + + /// A helper wrapper around `NetworkEventTaskState` that can modify its behaviour for tests + pub struct NetworkEventTaskStateModifier< + TYPES: NodeType, + V: Versions, + COMMCHANNEL: ConnectedNetwork, + S: Storage, + > { + /// The real `NetworkEventTaskState` + pub network_event_task_state: NetworkEventTaskState, + /// A function that takes the result of `NetworkEventTaskState::parse_event` and + /// changes it before transmitting on the network. + pub modifier: Arc>, + } - let net = Arc::clone(&self.channel); - let storage = Arc::clone(&self.storage); - async_spawn(async move { - if NetworkEventTaskState::::maybe_record_action( - Some(HotShotAction::VidDisperse), - storage, - view, - ) - .await - .is_err() + impl< + TYPES: NodeType, + V: Versions, + COMMCHANNEL: ConnectedNetwork, + S: Storage + 'static, + > NetworkEventTaskStateModifier + { + /// Handles the received event modifying it before sending on the network. + pub async fn handle( + &mut self, + event: Arc>, + membership: &TYPES::Membership, + ) { + let mut maybe_action = None; + if let Some((mut sender, mut message_kind, mut transmit)) = + self.parse_event(event, &mut maybe_action, membership).await { - return; + // Modify the values acquired by parsing the event. + (self.modifier)(&mut sender, &mut message_kind, &mut transmit, membership); + self.spawn_transmit_task(message_kind, membership, maybe_action, transmit, sender); } - match net.vid_broadcast_message(messages).await { - Ok(()) => {} - Err(e) => error!("Failed to send message from network task: {:?}", e), - } - }); - - None + } } - /// Record `HotShotAction` if available - async fn maybe_record_action( - maybe_action: Option, - storage: Arc>, - view: ::Time, - ) -> Result<(), ()> { - if let Some(action) = maybe_action { - match storage - .write() - .await - .record_action(view, action.clone()) - .await - { - Ok(()) => Ok(()), - Err(e) => { - warn!("Not Sending {:?} because of storage error: {:?}", action, e); - Err(()) - } + #[async_trait] + impl< + TYPES: NodeType, + V: Versions, + COMMCHANNEL: ConnectedNetwork, + S: Storage + 'static, + > TaskState for NetworkEventTaskStateModifier + { + type Event = HotShotEvent; + + async fn handle_event( + &mut self, + event: Arc, + _sender: &Sender>, + _receiver: &Receiver>, + ) -> Result<()> { + let membership = self.network_event_task_state.membership.clone(); + + if !(self.network_event_task_state.filter)(&event) { + self.handle(event, &membership).await; } - } else { + Ok(()) } + + async fn cancel_subtasks(&mut self) {} + } + + impl< + TYPES: NodeType, + V: Versions, + COMMCHANNEL: ConnectedNetwork, + S: Storage, + > Deref for NetworkEventTaskStateModifier + { + type Target = NetworkEventTaskState; + + fn deref(&self) -> &Self::Target { + &self.network_event_task_state + } + } + + impl< + TYPES: NodeType, + V: Versions, + COMMCHANNEL: ConnectedNetwork, + S: Storage, + > DerefMut for NetworkEventTaskStateModifier + { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.network_event_task_state + } } } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 9af3221b70..ef8f952004 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -8,7 +8,6 @@ use std::{marker::PhantomData, sync::Arc, time::SystemTime}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use committable::Committable; use hotshot_task::task::TaskState; @@ -35,15 +34,9 @@ use vbs::version::StaticVersionType; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, helpers::broadcast_event, - vote_collection::{ - create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState, - }, + vote_collection::{handle_vote, VoteCollectorsMap}, }; -/// Alias for Optional type for Vote Collectors -type VoteCollectorOption = - Option>; - /// Tracks state of a DA task pub struct UpgradeTaskState, V: Versions> { /// Output events to application @@ -57,9 +50,8 @@ pub struct UpgradeTaskState, V: Ve /// The underlying network pub network: Arc, - /// The current vote collection task, if there is one. - pub vote_collector: - RwLock, UpgradeCertificate, V>>, + /// A map of `UpgradeVote` collector tasks + pub vote_collectors: VoteCollectorsMap, UpgradeCertificate, V>, /// This Nodes public key pub public_key: TYPES::SignatureKey, @@ -237,31 +229,17 @@ impl, V: Versions> UpgradeTaskStat } } - let mut collector = self.vote_collector.write().await; - - if collector.is_none() || vote.view_number() > collector.as_ref().unwrap().view { - debug!("Starting vote handle for view {:?}", vote.view_number()); - let info = AccumulatorInfo { - public_key: self.public_key.clone(), - membership: Arc::clone(&self.quorum_membership), - view: vote.view_number(), - id: self.id, - }; - *collector = - create_vote_accumulator(&info, event, &tx, self.upgrade_lock.clone()).await; - } else { - let result = collector - .as_mut() - .unwrap() - .handle_vote_event(Arc::clone(&event), &tx) - .await; - - if result == Some(HotShotTaskCompleted) { - *collector = None; - // The protocol has finished - return None; - } - } + handle_vote( + &mut self.vote_collectors, + vote, + self.public_key.clone(), + &self.quorum_membership, + self.id, + &event, + &tx, + &self.upgrade_lock, + ) + .await; } HotShotEvent::ViewChange(new_view) => { if self.cur_view >= *new_view { diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 04c36bfd9d..a7ec9cd7e9 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -4,7 +4,12 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; +use std::{ + collections::{btree_map::Entry, BTreeMap, HashMap}, + fmt::Debug, + marker::PhantomData, + sync::Arc, +}; use async_broadcast::Sender; use async_trait::async_trait; @@ -32,6 +37,10 @@ use crate::{ helpers::broadcast_event, }; +/// Alias for a map of Vote Collectors +pub type VoteCollectorsMap = + BTreeMap<::Time, VoteCollectionTaskState>; + /// Task state for collecting votes of one type and emitting a certificate pub struct VoteCollectionTaskState< TYPES: NodeType, @@ -196,6 +205,61 @@ where Some(state) } +/// A helper function that handles a vote regardless whether it's the first vote in the view or not. +#[allow(clippy::too_many_arguments)] +pub async fn handle_vote< + TYPES: NodeType, + VOTE: Vote + AggregatableVote + Send + Sync + 'static, + CERT: Certificate + Debug + Send + Sync + 'static, + V: Versions, +>( + collectors: &mut VoteCollectorsMap, + vote: &VOTE, + public_key: TYPES::SignatureKey, + membership: &Arc, + id: u64, + event: &Arc>, + event_stream: &Sender>>, + upgrade_lock: &UpgradeLock, +) where + VoteCollectionTaskState: HandleVoteEvent, +{ + match collectors.entry(vote.view_number()) { + Entry::Vacant(entry) => { + debug!("Starting vote handle for view {:?}", vote.view_number()); + let info = AccumulatorInfo { + public_key, + membership: Arc::clone(membership), + view: vote.view_number(), + id, + }; + if let Some(collector) = create_vote_accumulator( + &info, + Arc::clone(event), + event_stream, + upgrade_lock.clone(), + ) + .await + { + entry.insert(collector); + }; + } + Entry::Occupied(mut entry) => { + let result = entry + .get_mut() + .handle_vote_event(Arc::clone(event), event_stream) + .await; + + if result == Some(HotShotTaskCompleted) { + // garbage collect vote collectors for old views (including the one just finished) + entry.remove(); + *collectors = collectors.split_off(&vote.view_number()); + // The protocol has finished + } + } + } +} + /// Alias for Quorum vote accumulator type QuorumVoteState = VoteCollectionTaskState, QuorumCertificate, V>; diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 2cb521b08b..a11ceb94b9 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -6,6 +6,16 @@ use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; +use super::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + txn_task::TxnTaskDescription, +}; +use crate::{ + spinning_task::SpinningTaskDescription, + test_launcher::{Network, ResourceGenerators, TestLauncher}, + view_sync_task::ViewSyncTaskDescription, +}; use hotshot::{ tasks::EventTransformerState, traits::{NetworkReliability, NodeImplementation, TestableNodeImplementation}, @@ -23,17 +33,6 @@ use hotshot_types::{ }; use tide_disco::Url; use vec1::Vec1; - -use super::{ - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - overall_safety_task::OverallSafetyPropertiesDescription, - txn_task::TxnTaskDescription, -}; -use crate::{ - spinning_task::SpinningTaskDescription, - test_launcher::{Network, ResourceGenerators, TestLauncher}, - view_sync_task::ViewSyncTaskDescription, -}; /// data describing how a round should be timed. #[derive(Clone, Debug, Copy)] pub struct TimingData { diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index e5dba1fe1d..fb618a6c80 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -9,10 +9,11 @@ use std::{ collections::{HashMap, HashSet}, rc::Rc, + sync::Arc, time::Duration, }; -use hotshot::tasks::{DishonestDa, DishonestLeader}; +use hotshot::tasks::{DishonestDa, DishonestLeader, DishonestVoting}; use hotshot_example_types::{ node_types::{ Libp2pImpl, MarketplaceTestVersions, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, @@ -28,7 +29,17 @@ use hotshot_testing::{ test_builder::{Behaviour, TestDescription}, view_sync_task::ViewSyncTaskDescription, }; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use hotshot_types::message::{GeneralConsensusMessage, MessageKind, SequencingMessage}; +use hotshot_types::{ + data::ViewNumber, + traits::{ + election::Membership, + network::TransmitType, + node_implementation::{ConsensusTime, NodeType}, + }, + vote::HasViewNumber, +}; + // Test that a good leader can succeed in the view directly after view sync cross_tests!( TestName: test_with_failures_2, @@ -193,3 +204,44 @@ cross_tests!( metadata } ); + +cross_tests!( + TestName: dishonest_voting, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Versions: [MarketplaceTestVersions], + Ignore: false, + Metadata: { + let nodes_count: usize = 10; + let behaviour = Rc::new(move |node_id| { + let dishonest_voting = DishonestVoting { + view_increment: nodes_count as u64, + modifier: Arc::new(move |_pk, message_kind, transmit_type: &mut TransmitType, membership: &::Membership| { + if let MessageKind::Consensus(SequencingMessage::General(GeneralConsensusMessage::Vote(vote))) = message_kind { + *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64)); + } else { + {} + } + }) + }; + match node_id { + 5 => Behaviour::Byzantine(Box::new(dishonest_voting)), + _ => Behaviour::Standard, + } + }); + + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + }; + + metadata.num_nodes_with_stake = nodes_count; + metadata + }, +); diff --git a/types/src/message.rs b/types/src/message.rs index 18a047d17c..05c25fab12 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -372,7 +372,7 @@ where } } -#[derive(Clone)] +#[derive(Clone, Debug)] /// A lock for an upgrade certificate decided by HotShot, which doubles as `PhantomData` for an instance of the `Versions` trait. pub struct UpgradeLock { /// a shared lock to an upgrade certificate decided by consensus From 50f26e08fbd192ad5fd17bb2f62ec6eb4d9a4b3f Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Tue, 3 Sep 2024 16:13:18 -0400 Subject: [PATCH 1200/1393] [Byzantine testing] - View Lagging node (#3609) * refactor byzantine code, start view lag * test cleanup * cleanup * remove node id * cleanup * run clippy * run formatter * address comments * cleanup * restore cargo * speed up tests * fix check to go into lookback logic * remove view as we see * add guarding * remove heartbeat event * rebase and fix merge conflicts --------- Co-authored-by: Luke Iannucci --- hotshot/src/tasks/mod.rs | 82 +---- task-impls/src/events.rs | 92 ++++- testing/src/byzantine/byzantine_behaviour.rs | 342 ++++++++++++++++++ testing/src/byzantine/mod.rs | 2 + testing/src/lib.rs | 3 + testing/tests/tests_1/test_success.rs | 67 +--- testing/tests/tests_1/test_with_failures_2.rs | 135 +------ testing/tests/tests_2/byzantine_tests.rs | 249 +++++++++++++ 8 files changed, 695 insertions(+), 277 deletions(-) create mode 100644 testing/src/byzantine/byzantine_behaviour.rs create mode 100644 testing/src/byzantine/mod.rs create mode 100644 testing/tests/tests_2/byzantine_tests.rs diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 32ef57748f..466136d887 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -13,7 +13,6 @@ use crate::{ ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, }; -use anyhow::Context; use async_broadcast::broadcast; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -29,11 +28,7 @@ use hotshot_task_impls::rewind::RewindTaskState; use hotshot_task_impls::{ da::DaTaskState, events::HotShotEvent, - network::{ - self, - test::{ModifierClosure, NetworkEventTaskStateModifier}, - NetworkEventTaskState, NetworkMessageTaskState, - }, + network::{self, NetworkEventTaskState, NetworkMessageTaskState}, request::NetworkRequestState, response::{run_response_task, NetworkResponseState}, transactions::TransactionTaskState, @@ -47,13 +42,12 @@ use hotshot_types::{ data::QuorumProposal, message::{Messages, Proposal}, request_response::RequestReceiver, - simple_vote::QuorumVote, traits::{ network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; -use std::fmt::{Debug, Formatter}; +use std::fmt::Debug; use std::{collections::HashSet, sync::Arc, time::Duration}; use vbs::version::StaticVersionType; @@ -745,78 +739,6 @@ impl + std::fmt::Debug, V: Version } } -/// An `EventHandlerState` that modifies view number on the vote of `QuorumVoteSend` event to that of a future view and correctly signs the vote -pub struct DishonestVoting { - /// Number added to the original vote's view number - pub view_increment: u64, - /// A function passed to `NetworkEventTaskStateModifier` to modify `NetworkEventTaskState` behaviour. - pub modifier: Arc>, -} - -#[async_trait] -impl + std::fmt::Debug, V: Versions> - EventTransformerState for DishonestVoting -{ - async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { - vec![event.clone()] - } - - async fn send_handler( - &mut self, - event: &HotShotEvent, - public_key: &TYPES::SignatureKey, - private_key: &::PrivateKey, - upgrade_lock: &UpgradeLock, - ) -> Vec> { - if let HotShotEvent::QuorumVoteSend(vote) = event { - let new_view = vote.view_number + self.view_increment; - let spoofed_vote = QuorumVote::::create_signed_vote( - vote.data.clone(), - new_view, - public_key, - private_key, - upgrade_lock, - ) - .await - .context("Failed to sign vote") - .unwrap(); - tracing::debug!("Sending Quorum Vote for view: {new_view:?}"); - return vec![HotShotEvent::QuorumVoteSend(spoofed_vote)]; - } - vec![event.clone()] - } - - fn add_network_event_task( - &self, - handle: &mut SystemContextHandle, - channel: Arc<>::Network>, - membership: TYPES::Membership, - filter: fn(&Arc>) -> bool, - ) { - let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { - channel, - view: TYPES::Time::genesis(), - membership, - filter, - storage: Arc::clone(&handle.storage()), - upgrade_lock: handle.hotshot.upgrade_lock.clone(), - }; - let modified_network_state = NetworkEventTaskStateModifier { - network_event_task_state: network_state, - modifier: Arc::clone(&self.modifier), - }; - handle.add_task(modified_network_state); - } -} - -impl Debug for DishonestVoting { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("DishonestVoting") - .field("view_increment", &self.view_increment) - .finish_non_exhaustive() - } -} - /// adds tasks for sending/receiving messages to/from the network. pub async fn add_network_tasks, V: Versions>( handle: &mut SystemContextHandle, diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 564bd80d19..f355b92e24 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -224,6 +224,86 @@ pub enum HotShotEvent { QuorumProposalPreliminarilyValidated(Proposal>), } +impl HotShotEvent { + #[allow(clippy::too_many_lines)] + /// Return the view number for a hotshot event if present + pub fn view_number(&self) -> Option { + match self { + HotShotEvent::QuorumVoteRecv(v) => Some(v.view_number()), + HotShotEvent::TimeoutVoteRecv(v) | HotShotEvent::TimeoutVoteSend(v) => { + Some(v.view_number()) + } + HotShotEvent::QuorumProposalRecv(proposal, _) + | HotShotEvent::QuorumProposalSend(proposal, _) => Some(proposal.data.view_number()), + HotShotEvent::QuorumVoteSend(vote) => Some(vote.view_number()), + HotShotEvent::QuorumProposalValidated(proposal, _) => Some(proposal.view_number()), + HotShotEvent::DaProposalRecv(proposal, _) + | HotShotEvent::DaProposalValidated(proposal, _) + | HotShotEvent::DaProposalSend(proposal, _) => Some(proposal.data.view_number()), + HotShotEvent::DaVoteRecv(vote) | HotShotEvent::DaVoteSend(vote) => { + Some(vote.view_number()) + } + HotShotEvent::QcFormed(cert) => match cert { + either::Left(qc) => Some(qc.view_number()), + either::Right(tc) => Some(tc.view_number()), + }, + HotShotEvent::ViewSyncCommitVoteSend(vote) + | HotShotEvent::ViewSyncCommitVoteRecv(vote) => Some(vote.view_number()), + HotShotEvent::ViewSyncPreCommitVoteRecv(vote) + | HotShotEvent::ViewSyncPreCommitVoteSend(vote) => Some(vote.view_number()), + HotShotEvent::ViewSyncFinalizeVoteRecv(vote) + | HotShotEvent::ViewSyncFinalizeVoteSend(vote) => Some(vote.view_number()), + HotShotEvent::ViewSyncPreCommitCertificate2Recv(cert) + | HotShotEvent::ViewSyncPreCommitCertificate2Send(cert, _) => Some(cert.view_number()), + HotShotEvent::ViewSyncCommitCertificate2Recv(cert) + | HotShotEvent::ViewSyncCommitCertificate2Send(cert, _) => Some(cert.view_number()), + HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) + | HotShotEvent::ViewSyncFinalizeCertificate2Send(cert, _) => Some(cert.view_number()), + HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _, _) => { + Some(*view_number) + } + HotShotEvent::BlockRecv(packed_bundle) => Some(packed_bundle.view_number), + HotShotEvent::Shutdown + | HotShotEvent::TransactionSend(_, _) + | HotShotEvent::LeafDecided(_) + | HotShotEvent::TransactionsRecv(_) => None, + HotShotEvent::VidDisperseSend(proposal, _) => Some(proposal.data.view_number()), + HotShotEvent::VidShareRecv(proposal) | HotShotEvent::VidShareValidated(proposal) => { + Some(proposal.data.view_number()) + } + HotShotEvent::UpgradeProposalRecv(proposal, _) + | HotShotEvent::UpgradeProposalSend(proposal, _) => Some(proposal.data.view_number()), + HotShotEvent::UpgradeVoteRecv(vote) | HotShotEvent::UpgradeVoteSend(vote) => { + Some(vote.view_number()) + } + HotShotEvent::QuorumProposalRequestSend(req, _) + | HotShotEvent::QuorumProposalRequestRecv(req, _) => Some(req.view_number), + HotShotEvent::QuorumProposalResponseSend(_, proposal) + | HotShotEvent::QuorumProposalResponseRecv(proposal) + | HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { + Some(proposal.data.view_number()) + } + HotShotEvent::QuorumVoteDependenciesValidated(view_number) + | HotShotEvent::ViewChange(view_number) + | HotShotEvent::ViewSyncTimeout(view_number, _, _) + | HotShotEvent::ViewSyncTrigger(view_number) + | HotShotEvent::Timeout(view_number) + | HotShotEvent::BlockReady(_, view_number) + | HotShotEvent::LockedViewUpdated(view_number) + | HotShotEvent::LastDecidedViewUpdated(view_number) + | HotShotEvent::ValidatedStateUpdated(view_number, _) => Some(*view_number), + HotShotEvent::DaCertificateRecv(cert) | HotShotEvent::DacSend(cert, _) => { + Some(cert.view_number()) + } + HotShotEvent::UpdateHighQc(cert) | HotShotEvent::HighQcUpdated(cert) => { + Some(cert.view_number()) + } + HotShotEvent::DaCertificateValidated(cert) => Some(cert.view_number), + HotShotEvent::UpgradeCertificateFormed(cert) => Some(cert.view_number()), + } + } +} + impl Display for HotShotEvent { #[allow(clippy::too_many_lines)] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -306,32 +386,32 @@ impl Display for HotShotEvent { } HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => write!( f, - "ViewSyncPreCommitVoteRecv(view_nuber={:?})", + "ViewSyncPreCommitVoteRecv(view_number={:?})", vote.view_number() ), HotShotEvent::ViewSyncCommitVoteRecv(vote) => write!( f, - "ViewSyncCommitVoteRecv(view_nuber={:?})", + "ViewSyncCommitVoteRecv(view_number={:?})", vote.view_number() ), HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => write!( f, - "ViewSyncFinalizeVoteRecv(view_nuber={:?})", + "ViewSyncFinalizeVoteRecv(view_number={:?})", vote.view_number() ), HotShotEvent::ViewSyncPreCommitVoteSend(vote) => write!( f, - "ViewSyncPreCommitVoteSend(view_nuber={:?})", + "ViewSyncPreCommitVoteSend(view_number={:?})", vote.view_number() ), HotShotEvent::ViewSyncCommitVoteSend(vote) => write!( f, - "ViewSyncCommitVoteSend(view_nuber={:?})", + "ViewSyncCommitVoteSend(view_number={:?})", vote.view_number() ), HotShotEvent::ViewSyncFinalizeVoteSend(vote) => write!( f, - "ViewSyncFinalizeVoteSend(view_nuber={:?})", + "ViewSyncFinalizeVoteSend(view_number={:?})", vote.view_number() ), HotShotEvent::ViewSyncPreCommitCertificate2Recv(cert) => { diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs new file mode 100644 index 0000000000..aecc3ace33 --- /dev/null +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -0,0 +1,342 @@ +use anyhow::Context; +use async_trait::async_trait; +use hotshot::tasks::EventTransformerState; +use hotshot::types::{SignatureKey, SystemContextHandle}; +use hotshot_task_impls::events::HotShotEvent; +use hotshot_task_impls::network::test::{ModifierClosure, NetworkEventTaskStateModifier}; +use hotshot_task_impls::network::NetworkEventTaskState; +use hotshot_types::message::UpgradeLock; +use hotshot_types::simple_vote::QuorumVote; +use hotshot_types::traits::node_implementation::ConsensusTime; +use hotshot_types::{ + data::QuorumProposal, + message::Proposal, + traits::node_implementation::{NodeImplementation, NodeType, Versions}, +}; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; + +#[derive(Debug)] +/// An `EventTransformerState` that multiplies `QuorumProposalSend` events, incrementing the view number of the proposal +pub struct BadProposalViewDos { + /// The number of times to duplicate a `QuorumProposalSend` event + pub multiplier: u64, + /// The view number increment each time it's duplicatedjust + pub increment: u64, +} + +#[async_trait] +impl, V: Versions> EventTransformerState + for BadProposalViewDos +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + vec![event.clone()] + } + + async fn send_handler( + &mut self, + event: &HotShotEvent, + _public_key: &TYPES::SignatureKey, + _private_key: &::PrivateKey, + _upgrade_lock: &UpgradeLock, + ) -> Vec> { + match event { + HotShotEvent::QuorumProposalSend(proposal, signature) => { + let mut result = Vec::new(); + + for n in 0..self.multiplier { + let mut modified_proposal = proposal.clone(); + + modified_proposal.data.view_number += n * self.increment; + + result.push(HotShotEvent::QuorumProposalSend( + modified_proposal, + signature.clone(), + )); + } + + result + } + _ => vec![event.clone()], + } + } +} + +#[derive(Debug)] +/// An `EventHandlerState` that doubles the `QuorumVoteSend` and `QuorumProposalSend` events +pub struct DoubleProposeVote; + +#[async_trait] +impl, V: Versions> EventTransformerState + for DoubleProposeVote +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + vec![event.clone()] + } + + async fn send_handler( + &mut self, + event: &HotShotEvent, + _public_key: &TYPES::SignatureKey, + _private_key: &::PrivateKey, + _upgrade_lock: &UpgradeLock, + ) -> Vec> { + match event { + HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::QuorumVoteSend(_) => { + vec![event.clone(), event.clone()] + } + _ => vec![event.clone()], + } + } +} + +#[derive(Debug)] +/// An `EventHandlerState` that modifies justify_qc on `QuorumProposalSend` to that of a previous view to mock dishonest leader +pub struct DishonestLeader { + /// Store events from previous views + pub validated_proposals: Vec>, + /// How many times current node has been elected leader and sent proposal + pub total_proposals_from_node: u64, + /// Which proposals to be dishonest at + pub dishonest_at_proposal_numbers: HashSet, + /// How far back to look for a QC + pub view_look_back: usize, +} + +/// Add method that will handle `QuorumProposalSend` events +/// If we have previous proposals stored and the total_proposals_from_node matches a value specified in dishonest_at_proposal_numbers +/// Then send out the event with the modified proposal that has an older QC +impl DishonestLeader { + /// When a leader is sending a proposal this method will mock a dishonest leader + /// We accomplish this by looking back a number of specified views and using that cached proposals QC + fn handle_proposal_send_event( + &self, + event: &HotShotEvent, + proposal: &Proposal>, + sender: &TYPES::SignatureKey, + ) -> HotShotEvent { + let length = self.validated_proposals.len(); + if !self + .dishonest_at_proposal_numbers + .contains(&self.total_proposals_from_node) + || length == 0 + { + return event.clone(); + } + + // Grab proposal from specified view look back + let proposal_from_look_back = if length - 1 < self.view_look_back { + // If look back is too far just take the first proposal + self.validated_proposals[0].clone() + } else { + let index = (self.validated_proposals.len() - 1) - self.view_look_back; + self.validated_proposals[index].clone() + }; + + // Create a dishonest proposal by using the old proposals qc + let mut dishonest_proposal = proposal.clone(); + dishonest_proposal.data.justify_qc = proposal_from_look_back.justify_qc; + + HotShotEvent::QuorumProposalSend(dishonest_proposal, sender.clone()) + } +} + +#[async_trait] +impl + std::fmt::Debug, V: Versions> + EventTransformerState for DishonestLeader +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + vec![event.clone()] + } + + async fn send_handler( + &mut self, + event: &HotShotEvent, + _public_key: &TYPES::SignatureKey, + _private_key: &::PrivateKey, + _upgrade_lock: &UpgradeLock, + ) -> Vec> { + match event { + HotShotEvent::QuorumProposalSend(proposal, sender) => { + self.total_proposals_from_node += 1; + return vec![self.handle_proposal_send_event(event, proposal, sender)]; + } + HotShotEvent::QuorumProposalValidated(proposal, _) => { + self.validated_proposals.push(proposal.clone()); + } + _ => {} + } + vec![event.clone()] + } +} + +#[derive(Debug)] +/// An `EventHandlerState` that modifies view number on the certificate of `DacSend` event to that of a future view +pub struct DishonestDa { + /// How many times current node has been elected leader and sent Da Cert + pub total_da_certs_sent_from_node: u64, + /// Which proposals to be dishonest at + pub dishonest_at_da_cert_sent_numbers: HashSet, + /// When leader how many times we will send DacSend and increment view number + pub total_views_add_to_cert: u64, +} + +#[async_trait] +impl + std::fmt::Debug, V: Versions> + EventTransformerState for DishonestDa +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + vec![event.clone()] + } + + async fn send_handler( + &mut self, + event: &HotShotEvent, + _public_key: &TYPES::SignatureKey, + _private_key: &::PrivateKey, + _upgrade_lock: &UpgradeLock, + ) -> Vec> { + if let HotShotEvent::DacSend(cert, sender) = event { + self.total_da_certs_sent_from_node += 1; + if self + .dishonest_at_da_cert_sent_numbers + .contains(&self.total_da_certs_sent_from_node) + { + let mut result = vec![HotShotEvent::DacSend(cert.clone(), sender.clone())]; + for i in 1..=self.total_views_add_to_cert { + let mut bad_cert = cert.clone(); + bad_cert.view_number = cert.view_number + i; + result.push(HotShotEvent::DacSend(bad_cert, sender.clone())); + } + return result; + } + } + vec![event.clone()] + } +} + +/// View delay configuration +#[derive(Debug)] +pub struct ViewDelay { + /// How many views the node will be delayed + pub number_of_views_to_delay: u64, + /// A map that is from view number to vector of events + pub events_for_view: HashMap>>, + /// Specify which view number to stop delaying + pub stop_view_delay_at_view_number: u64, +} + +#[async_trait] +impl + std::fmt::Debug, V: Versions> + EventTransformerState for ViewDelay +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + let correct_event = vec![event.clone()]; + if let Some(view_number) = event.view_number() { + if *view_number >= self.stop_view_delay_at_view_number { + return correct_event; + } + + // add current view or push event to the map if view number has been added + let events_for_current_view = self.events_for_view.entry(view_number).or_default(); + events_for_current_view.push(event.clone()); + + // ensure we are actually able to lookback enough views + let view_diff = (*view_number).saturating_sub(self.number_of_views_to_delay); + if view_diff > 0 { + return match self + .events_for_view + .remove(&::Time::new(view_diff)) + { + Some(lookback_events) => lookback_events.clone(), + // we have already return all received events for this view + None => vec![], + }; + } + } + + correct_event + } + + async fn send_handler( + &mut self, + event: &HotShotEvent, + _public_key: &TYPES::SignatureKey, + _private_key: &::PrivateKey, + _upgrade_lock: &UpgradeLock, + ) -> Vec> { + vec![event.clone()] + } +} + +/// An `EventHandlerState` that modifies view number on the vote of `QuorumVoteSend` event to that of a future view and correctly signs the vote +pub struct DishonestVoting { + /// Number added to the original vote's view number + pub view_increment: u64, + /// A function passed to `NetworkEventTaskStateModifier` to modify `NetworkEventTaskState` behaviour. + pub modifier: Arc>, +} + +#[async_trait] +impl + std::fmt::Debug, V: Versions> + EventTransformerState for DishonestVoting +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + vec![event.clone()] + } + + async fn send_handler( + &mut self, + event: &HotShotEvent, + public_key: &TYPES::SignatureKey, + private_key: &::PrivateKey, + upgrade_lock: &UpgradeLock, + ) -> Vec> { + if let HotShotEvent::QuorumVoteSend(vote) = event { + let new_view = vote.view_number + self.view_increment; + let spoofed_vote = QuorumVote::::create_signed_vote( + vote.data.clone(), + new_view, + public_key, + private_key, + upgrade_lock, + ) + .await + .context("Failed to sign vote") + .unwrap(); + tracing::debug!("Sending Quorum Vote for view: {new_view:?}"); + return vec![HotShotEvent::QuorumVoteSend(spoofed_vote)]; + } + vec![event.clone()] + } + + fn add_network_event_task( + &self, + handle: &mut SystemContextHandle, + channel: Arc<>::Network>, + membership: TYPES::Membership, + filter: fn(&Arc>) -> bool, + ) { + let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { + channel, + view: TYPES::Time::genesis(), + membership, + filter, + storage: Arc::clone(&handle.storage()), + upgrade_lock: handle.hotshot.upgrade_lock.clone(), + }; + let modified_network_state = NetworkEventTaskStateModifier { + network_event_task_state: network_state, + modifier: Arc::clone(&self.modifier), + }; + handle.add_task(modified_network_state); + } +} + +impl std::fmt::Debug for DishonestVoting { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DishonestVoting") + .field("view_increment", &self.view_increment) + .finish_non_exhaustive() + } +} diff --git a/testing/src/byzantine/mod.rs b/testing/src/byzantine/mod.rs new file mode 100644 index 0000000000..0673cd72b8 --- /dev/null +++ b/testing/src/byzantine/mod.rs @@ -0,0 +1,2 @@ +/// Byzantine defintions and implementations of different behaviours +pub mod byzantine_behaviour; diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 1ad73a1ea2..43861fcbf2 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -56,3 +56,6 @@ pub mod script; /// view generator for tests pub mod view_generator; + +/// byzantine framework for tests +pub mod byzantine; diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 5a2b141cdd..c229f68122 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -4,22 +4,24 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{rc::Rc, time::Duration}; +use std::time::Duration; -use hotshot::tasks::{BadProposalViewDos, DoubleProposeVote}; +#[cfg(feature = "dependency-tasks")] +use hotshot_example_types::testable_delay::{ + DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay, +}; use hotshot_example_types::{ node_types::{ Libp2pImpl, MarketplaceUpgradeTestVersions, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestVersions, }, state_types::TestTypes, - testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - test_builder::{Behaviour, TestDescription}, + test_builder::TestDescription, view_sync_task::ViewSyncTaskDescription, }; @@ -62,6 +64,7 @@ cross_tests!( }, ); +#[cfg(feature = "dependency-tasks")] cross_tests!( TestName: test_success_with_async_delay, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -94,6 +97,7 @@ cross_tests!( }, ); +#[cfg(feature = "dependency-tasks")] cross_tests!( TestName: test_success_with_async_delay_2, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -134,61 +138,6 @@ cross_tests!( }, ); -cross_tests!( - TestName: double_propose_vote, - Impls: [MemoryImpl], - Types: [TestTypes], - Versions: [TestVersions], - Ignore: false, - Metadata: { - let behaviour = Rc::new(|node_id| { match node_id { - 1 => Behaviour::Byzantine(Box::new(DoubleProposeVote)), - _ => Behaviour::Standard, - } }); - - TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - behaviour, - ..TestDescription::default() - } - }, -); - -// Test where node 4 sends out the correct quorum proposal and additionally spams the network with an extra 99 malformed proposals -cross_tests!( - TestName: multiple_bad_proposals, - Impls: [MemoryImpl], - Types: [TestTypes], - Versions: [TestVersions], - Ignore: false, - Metadata: { - let behaviour = Rc::new(|node_id| { match node_id { - 4 => Behaviour::Byzantine(Box::new(BadProposalViewDos { multiplier: 100, increment: 1 })), - _ => Behaviour::Standard, - } }); - - let mut metadata = TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - behaviour, - ..TestDescription::default() - }; - - metadata.overall_safety_properties.num_failed_views = 0; - - metadata - }, -); - cross_tests!( TestName: test_with_double_leader_no_failures, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index fb618a6c80..a4153b407b 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -6,27 +6,17 @@ // TODO: Remove this after integration #![allow(unused_imports)] -use std::{ - collections::{HashMap, HashSet}, - rc::Rc, - sync::Arc, - time::Duration, -}; +use std::collections::HashMap; -use hotshot::tasks::{DishonestDa, DishonestLeader, DishonestVoting}; use hotshot_example_types::{ - node_types::{ - Libp2pImpl, MarketplaceTestVersions, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, - TestVersions, - }, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestVersions}, state_types::TestTypes, }; use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, - test_builder::{Behaviour, TestDescription}, + test_builder::TestDescription, view_sync_task::ViewSyncTaskDescription, }; use hotshot_types::message::{GeneralConsensusMessage, MessageKind, SequencingMessage}; @@ -82,84 +72,6 @@ cross_tests!( } ); -#[cfg(async_executor_impl = "async-std")] -cross_tests!( - TestName: dishonest_leader, - Impls: [MemoryImpl], - Types: [TestTypes], - Versions: [TestVersions], - Ignore: false, - Metadata: { - let behaviour = Rc::new(|node_id| { - let dishonest_leader = DishonestLeader:: { - dishonest_at_proposal_numbers: HashSet::from([2, 3]), - validated_proposals: Vec::new(), - total_proposals_from_node: 0, - view_look_back: 1, - _phantom: std::marker::PhantomData - }; - match node_id { - 2 => Behaviour::Byzantine(Box::new(dishonest_leader)), - _ => Behaviour::Standard, - } - }); - - let mut metadata = TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - behaviour, - ..TestDescription::default() - }; - - metadata.overall_safety_properties.num_failed_views = 2; - metadata.num_nodes_with_stake = 5; - metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ - (ViewNumber::new(7), false), - (ViewNumber::new(12), false) - ]); - metadata - }, -); - -cross_tests!( - TestName: dishonest_da, - Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], - Versions: [MarketplaceTestVersions], - Ignore: false, - Metadata: { - let behaviour = Rc::new(|node_id| { - let dishonest_da = DishonestDa { - dishonest_at_da_cert_sent_numbers: HashSet::from([2]), - total_da_certs_sent_from_node: 0, - total_views_add_to_cert: 4 - }; - match node_id { - 2 => Behaviour::Byzantine(Box::new(dishonest_da)), - _ => Behaviour::Standard, - } - }); - - let mut metadata = TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - behaviour, - ..TestDescription::default() - }; - - metadata.num_nodes_with_stake = 10; - metadata - }, -); - cross_tests!( TestName: test_with_double_leader_failures, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -204,44 +116,3 @@ cross_tests!( metadata } ); - -cross_tests!( - TestName: dishonest_voting, - Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], - Versions: [MarketplaceTestVersions], - Ignore: false, - Metadata: { - let nodes_count: usize = 10; - let behaviour = Rc::new(move |node_id| { - let dishonest_voting = DishonestVoting { - view_increment: nodes_count as u64, - modifier: Arc::new(move |_pk, message_kind, transmit_type: &mut TransmitType, membership: &::Membership| { - if let MessageKind::Consensus(SequencingMessage::General(GeneralConsensusMessage::Vote(vote))) = message_kind { - *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64)); - } else { - {} - } - }) - }; - match node_id { - 5 => Behaviour::Byzantine(Box::new(dishonest_voting)), - _ => Behaviour::Standard, - } - }); - - let mut metadata = TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - behaviour, - ..TestDescription::default() - }; - - metadata.num_nodes_with_stake = nodes_count; - metadata - }, -); diff --git a/testing/tests/tests_2/byzantine_tests.rs b/testing/tests/tests_2/byzantine_tests.rs new file mode 100644 index 0000000000..6120e9bd92 --- /dev/null +++ b/testing/tests/tests_2/byzantine_tests.rs @@ -0,0 +1,249 @@ +use std::{ + collections::{HashMap, HashSet}, + time::Duration, +}; + +use hotshot_example_types::{ + node_types::{Libp2pImpl, MarketplaceTestVersions, MemoryImpl, PushCdnImpl, TestVersions}, + state_types::TestTypes, +}; +use hotshot_macros::cross_tests; +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + byzantine::byzantine_behaviour::{ + BadProposalViewDos, DishonestDa, DishonestLeader, DishonestVoting, DoubleProposeVote, + ViewDelay, + }, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + test_builder::{Behaviour, TestDescription}, +}; +use hotshot_types::{ + data::ViewNumber, + message::{GeneralConsensusMessage, MessageKind, SequencingMessage}, + traits::{ + election::Membership, + network::TransmitType, + node_implementation::{ConsensusTime, NodeType}, + }, + vote::HasViewNumber, +}; +use std::rc::Rc; +use std::sync::Arc; +cross_tests!( + TestName: double_propose_vote, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [TestVersions], + Ignore: false, + Metadata: { + let behaviour = Rc::new(|node_id| { match node_id { + 1 => Behaviour::Byzantine(Box::new(DoubleProposeVote)), + _ => Behaviour::Standard, + } }); + + TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + } + }, +); + +// Test where node 4 sends out the correct quorum proposal and additionally spams the network with an extra 99 malformed proposals +cross_tests!( + TestName: multiple_bad_proposals, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [TestVersions], + Ignore: false, + Metadata: { + let behaviour = Rc::new(|node_id| { match node_id { + 4 => Behaviour::Byzantine(Box::new(BadProposalViewDos { multiplier: 100, increment: 1 })), + _ => Behaviour::Standard, + } }); + + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + }; + + metadata.overall_safety_properties.num_failed_views = 0; + + metadata + }, +); + +cross_tests!( + TestName: dishonest_leader, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [TestVersions], + Ignore: false, + Metadata: { + let behaviour = Rc::new(|node_id| { + let dishonest_leader = DishonestLeader { + dishonest_at_proposal_numbers: HashSet::from([2, 3]), + validated_proposals: Vec::new(), + total_proposals_from_node: 0, + view_look_back: 1 + }; + match node_id { + 2 => Behaviour::Byzantine(Box::new(dishonest_leader)), + _ => Behaviour::Standard, + } + }); + + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + }; + + metadata.overall_safety_properties.num_failed_views = 2; + metadata.num_nodes_with_stake = 5; + metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ + (ViewNumber::new(7), false), + (ViewNumber::new(12), false) + ]); + metadata + }, +); + +cross_tests!( + TestName: dishonest_da, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Versions: [MarketplaceTestVersions], + Ignore: false, + Metadata: { + let behaviour = Rc::new(|node_id| { + let dishonest_da = DishonestDa { + dishonest_at_da_cert_sent_numbers: HashSet::from([2]), + total_da_certs_sent_from_node: 0, + total_views_add_to_cert: 4 + }; + match node_id { + 2 => Behaviour::Byzantine(Box::new(dishonest_da)), + _ => Behaviour::Standard, + } + }); + + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + }; + + metadata.num_nodes_with_stake = 10; + metadata + }, +); + +cross_tests!( + TestName: view_delay, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Versions: [MarketplaceTestVersions], + Ignore: false, + Metadata: { + + let behaviour = Rc::new(|node_id| { + let view_delay = ViewDelay { + number_of_views_to_delay: node_id/3, + events_for_view: HashMap::new(), + stop_view_delay_at_view_number: 25, + }; + match node_id { + 6|10|14 => Behaviour::Byzantine(Box::new(view_delay)), + _ => Behaviour::Standard, + } + }); + + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + }; + + let num_nodes_with_stake = 15; + metadata.num_nodes_with_stake = num_nodes_with_stake; + metadata.da_staked_committee_size = num_nodes_with_stake; + metadata.overall_safety_properties.num_failed_views = 20; + metadata.overall_safety_properties.num_successful_views = 20; + metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ + (ViewNumber::new(6), false), + (ViewNumber::new(10), false), + (ViewNumber::new(14), false), + (ViewNumber::new(21), false), + (ViewNumber::new(25), false), + ]); + metadata + }, +); + +cross_tests!( + TestName: dishonest_voting, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Versions: [MarketplaceTestVersions], + Ignore: false, + Metadata: { + let nodes_count: usize = 10; + let behaviour = Rc::new(move |node_id| { + let dishonest_voting = DishonestVoting { + view_increment: nodes_count as u64, + modifier: Arc::new(move |_pk, message_kind, transmit_type: &mut TransmitType, membership: &::Membership| { + if let MessageKind::Consensus(SequencingMessage::General(GeneralConsensusMessage::Vote(vote))) = message_kind { + *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64)); + } else { + {} + } + }) + }; + match node_id { + 5 => Behaviour::Byzantine(Box::new(dishonest_voting)), + _ => Behaviour::Standard, + } + }); + + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + }; + + metadata.num_nodes_with_stake = nodes_count; + metadata + }, +); From 4f48662553f41b08560a3306bde3cbe7a7ca7d51 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 4 Sep 2024 10:45:52 -0400 Subject: [PATCH 1201/1393] reexport `GossipConfig` (#3643) --- hotshot/src/traits.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 8d26f9e7f0..b0aa3a67e3 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -19,8 +19,8 @@ pub mod implementations { pub use super::networking::{ combined_network::{CombinedNetworks, UnderlyingCombinedNetworks}, libp2p_network::{ - derive_libp2p_keypair, derive_libp2p_peer_id, Libp2pMetricsValue, Libp2pNetwork, - PeerInfoVec, + derive_libp2p_keypair, derive_libp2p_peer_id, GossipConfig, Libp2pMetricsValue, + Libp2pNetwork, PeerInfoVec, }, memory_network::{MasterMap, MemoryNetwork}, push_cdn_network::{ From 99e2bc0c31190eebcf69d14490f8b51640f3280d Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Wed, 4 Sep 2024 11:09:14 -0400 Subject: [PATCH 1202/1393] remove unecessary code (#3644) --- hotshot/src/tasks/mod.rs | 206 +-------------------------------------- 1 file changed, 2 insertions(+), 204 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 466136d887..fe9a75dbfd 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -39,8 +39,7 @@ use hotshot_task_impls::{ use hotshot_types::message::UpgradeLock; use hotshot_types::{ constants::EVENT_CHANNEL_SIZE, - data::QuorumProposal, - message::{Messages, Proposal}, + message::Messages, request_response::RequestReceiver, traits::{ network::ConnectedNetwork, @@ -48,7 +47,7 @@ use hotshot_types::{ }, }; use std::fmt::Debug; -use std::{collections::HashSet, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use vbs::version::StaticVersionType; /// event for global event stream @@ -538,207 +537,6 @@ where } } -#[derive(Debug)] -/// An `EventTransformerState` that multiplies `QuorumProposalSend` events, incrementing the view number of the proposal -pub struct BadProposalViewDos { - /// The number of times to duplicate a `QuorumProposalSend` event - pub multiplier: u64, - /// The view number increment each time it's duplicated - pub increment: u64, -} - -#[async_trait] -impl, V: Versions> EventTransformerState - for BadProposalViewDos -{ - async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { - vec![event.clone()] - } - - async fn send_handler( - &mut self, - event: &HotShotEvent, - _public_key: &TYPES::SignatureKey, - _private_key: &::PrivateKey, - _upgrade_lock: &UpgradeLock, - ) -> Vec> { - match event { - HotShotEvent::QuorumProposalSend(proposal, signature) => { - let mut result = Vec::new(); - - for n in 0..self.multiplier { - let mut modified_proposal = proposal.clone(); - - modified_proposal.data.view_number += n * self.increment; - - result.push(HotShotEvent::QuorumProposalSend( - modified_proposal, - signature.clone(), - )); - } - - result - } - _ => vec![event.clone()], - } - } -} - -#[derive(Debug)] -/// An `EventHandlerState` that doubles the `QuorumVoteSend` and `QuorumProposalSend` events -pub struct DoubleProposeVote; - -#[async_trait] -impl, V: Versions> EventTransformerState - for DoubleProposeVote -{ - async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { - vec![event.clone()] - } - - async fn send_handler( - &mut self, - event: &HotShotEvent, - _public_key: &TYPES::SignatureKey, - _private_key: &::PrivateKey, - _upgrade_lock: &UpgradeLock, - ) -> Vec> { - match event { - HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::QuorumVoteSend(_) => { - vec![event.clone(), event.clone()] - } - _ => vec![event.clone()], - } - } -} - -#[derive(Debug)] -/// An `EventHandlerState` that modifies justify_qc on `QuorumProposalSend` to that of a previous view to mock dishonest leader -pub struct DishonestLeader> { - /// Store events from previous views - pub validated_proposals: Vec>, - /// How many times current node has been elected leader and sent proposal - pub total_proposals_from_node: u64, - /// Which proposals to be dishonest at - pub dishonest_at_proposal_numbers: HashSet, - /// How far back to look for a QC - pub view_look_back: usize, - /// Phantom - pub _phantom: std::marker::PhantomData, -} - -/// Add method that will handle `QuorumProposalSend` events -/// If we have previous proposals stored and the total_proposals_from_node matches a value specified in dishonest_at_proposal_numbers -/// Then send out the event with the modified proposal that has an older QC -impl> DishonestLeader { - /// When a leader is sending a proposal this method will mock a dishonest leader - /// We accomplish this by looking back a number of specified views and using that cached proposals QC - fn handle_proposal_send_event( - &self, - event: &HotShotEvent, - proposal: &Proposal>, - sender: &TYPES::SignatureKey, - ) -> HotShotEvent { - let length = self.validated_proposals.len(); - if !self - .dishonest_at_proposal_numbers - .contains(&self.total_proposals_from_node) - || length == 0 - { - return event.clone(); - } - - // Grab proposal from specified view look back - let proposal_from_look_back = if length - 1 < self.view_look_back { - // If look back is too far just take the first proposal - self.validated_proposals[0].clone() - } else { - let index = (self.validated_proposals.len() - 1) - self.view_look_back; - self.validated_proposals[index].clone() - }; - - // Create a dishonest proposal by using the old proposals qc - let mut dishonest_proposal = proposal.clone(); - dishonest_proposal.data.justify_qc = proposal_from_look_back.justify_qc; - - HotShotEvent::QuorumProposalSend(dishonest_proposal, sender.clone()) - } -} - -#[async_trait] -impl + std::fmt::Debug, V: Versions> - EventTransformerState for DishonestLeader -{ - async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { - vec![event.clone()] - } - - async fn send_handler( - &mut self, - event: &HotShotEvent, - _public_key: &TYPES::SignatureKey, - _private_key: &::PrivateKey, - _upgrade_lock: &UpgradeLock, - ) -> Vec> { - match event { - HotShotEvent::QuorumProposalSend(proposal, sender) => { - self.total_proposals_from_node += 1; - return vec![self.handle_proposal_send_event(event, proposal, sender)]; - } - HotShotEvent::QuorumProposalValidated(proposal, _) => { - self.validated_proposals.push(proposal.clone()); - } - _ => {} - } - vec![event.clone()] - } -} - -#[derive(Debug)] -/// An `EventHandlerState` that modifies view number on the certificate of `DacSend` event to that of a future view -pub struct DishonestDa { - /// How many times current node has been elected leader and sent Da Cert - pub total_da_certs_sent_from_node: u64, - /// Which proposals to be dishonest at - pub dishonest_at_da_cert_sent_numbers: HashSet, - /// When leader how many times we will send DacSend and increment view number - pub total_views_add_to_cert: u64, -} - -#[async_trait] -impl + std::fmt::Debug, V: Versions> - EventTransformerState for DishonestDa -{ - async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { - vec![event.clone()] - } - - async fn send_handler( - &mut self, - event: &HotShotEvent, - _public_key: &TYPES::SignatureKey, - _private_key: &::PrivateKey, - _upgrade_lock: &UpgradeLock, - ) -> Vec> { - if let HotShotEvent::DacSend(cert, sender) = event { - self.total_da_certs_sent_from_node += 1; - if self - .dishonest_at_da_cert_sent_numbers - .contains(&self.total_da_certs_sent_from_node) - { - let mut result = vec![HotShotEvent::DacSend(cert.clone(), sender.clone())]; - for i in 1..=self.total_views_add_to_cert { - let mut bad_cert = cert.clone(); - bad_cert.view_number = cert.view_number + i; - result.push(HotShotEvent::DacSend(bad_cert, sender.clone())); - } - return result; - } - } - vec![event.clone()] - } -} - /// adds tasks for sending/receiving messages to/from the network. pub async fn add_network_tasks, V: Versions>( handle: &mut SystemContextHandle, From f65281b8379ee851b6ca15e9872c1463e6b7d51f Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 4 Sep 2024 08:32:30 -0700 Subject: [PATCH 1203/1393] [WEEKLY RELEASE] HotShot - 0.5.73 (#3624) * bump version to 0.5.73 * update cargo.lock * turn off enable_registration_verification * restore enable_registration_verification to true * latest main * clippy * clippy --- hotshot/src/tasks/mod.rs | 2 +- macros/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index fe9a75dbfd..007962e7d3 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -381,7 +381,7 @@ where // spawn a task to listen on the (original) internal event stream, // and broadcast the transformed events to the replacement event stream we just created. let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); - let public_key = handle.public_key(); + let public_key = handle.public_key().clone(); let private_key = handle.private_key().clone(); let upgrade_lock = handle.hotshot.upgrade_lock.clone(); let send_handle = async_spawn(async move { diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 4a9f921fd2..9ab4e4aea1 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -275,7 +275,7 @@ fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { /// - `Types: []` - a list types that implement `NodeImplementation` over the types in `Impls` /// - `TestName: example_test` - the name of the test /// - `Ignore`: whether or not this set of tests are ignored -/// Example usage: see tests in this module +/// Example usage: see tests in this module #[proc_macro] pub fn cross_tests(input: TokenStream) -> TokenStream { let test_spec = parse_macro_input!(input as CrossTestData); From 023d477eda536a4b4ac3848173bca1976be1235f Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:21:13 -0400 Subject: [PATCH 1204/1393] Update simple builder for marketplace (#3620) --- examples/infra/mod.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 1 - task-impls/src/builder.rs | 34 +++--- testing/src/block_builder/mod.rs | 84 ++++++++++++-- testing/src/block_builder/random.rs | 6 +- testing/src/block_builder/simple.rs | 106 ++++++++++++++++-- types/src/constants.rs | 6 + types/src/traits/signature_key.rs | 35 +++++- 8 files changed, 235 insertions(+), 39 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index ca6044b646..77c3ba3fb6 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -408,7 +408,7 @@ pub trait RunDa< let marketplace_config = MarketplaceConfig { auction_results_provider: TestAuctionResultsProvider::::default().into(), // TODO: we need to pass a valid fallback builder url here somehow - fallback_builder_url: url::Url::parse("http://localhost").unwrap(), + fallback_builder_url: config.config.builder_urls.first().clone(), }; SystemContext::init( diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index bbd6608a30..dbc4d36e5b 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -74,7 +74,6 @@ use libp2p_networking::{ }, reexport::{Multiaddr, ResponseChannel}, }; - use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; use tracing::{debug, error, info, instrument, trace, warn}; diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 01c29f7e2c..b2c07aae75 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -12,6 +12,7 @@ use hotshot_builder_api::v0_1::{ builder::{BuildError, Error as BuilderApiError}, }; use hotshot_types::{ + constants::LEGACY_BUILDER_MODULE, traits::{node_implementation::NodeType, signature_key::SignatureKey}, vid::VidCommitment, }; @@ -64,8 +65,8 @@ impl From for BuilderClientError { /// Client for builder API pub struct BuilderClient { - /// Underlying surf_disco::Client - inner: Client, + /// Underlying surf_disco::Client for the legacy builder api + client: Client, /// Marker for [`NodeType`] used here _marker: std::marker::PhantomData, } @@ -77,8 +78,10 @@ impl BuilderClient { /// /// If the URL is malformed. pub fn new(base_url: impl Into) -> Self { + let url = base_url.into(); + Self { - inner: Client::builder(base_url.into().join("block_info").unwrap()) + client: Client::builder(url.clone()) .set_timeout(Some(Duration::from_secs(2))) .build(), _marker: std::marker::PhantomData, @@ -93,7 +96,7 @@ impl BuilderClient { let mut backoff = Duration::from_millis(50); while Instant::now() < timeout { if matches!( - self.inner.healthcheck::().await, + self.client.healthcheck::().await, Ok(HealthStatus::Available) ) { return true; @@ -117,9 +120,9 @@ impl BuilderClient { signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result>, BuilderClientError> { let encoded_signature: TaggedBase64 = signature.clone().into(); - self.inner + self.client .get(&format!( - "availableblocks/{parent}/{view_number}/{sender}/{encoded_signature}" + "{LEGACY_BUILDER_MODULE}/availableblocks/{parent}/{view_number}/{sender}/{encoded_signature}" )) .send() .await @@ -129,10 +132,10 @@ impl BuilderClient { /// Version 0.1 pub mod v0_1 { - use hotshot_builder_api::v0_1::block_info::{AvailableBlockData, AvailableBlockHeaderInput}; pub use hotshot_builder_api::v0_1::Version; use hotshot_types::{ + constants::LEGACY_BUILDER_MODULE, traits::{node_implementation::NodeType, signature_key::SignatureKey}, utils::BuilderCommitment, }; @@ -157,9 +160,9 @@ pub mod v0_1 { signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result, BuilderClientError> { let encoded_signature: TaggedBase64 = signature.clone().into(); - self.inner + self.client .get(&format!( - "claimheaderinput/{block_hash}/{view_number}/{sender}/{encoded_signature}" + "{LEGACY_BUILDER_MODULE}/claimheaderinput/{block_hash}/{view_number}/{sender}/{encoded_signature}" )) .send() .await @@ -179,9 +182,9 @@ pub mod v0_1 { signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Result, BuilderClientError> { let encoded_signature: TaggedBase64 = signature.clone().into(); - self.inner + self.client .get(&format!( - "claimblock/{block_hash}/{view_number}/{sender}/{encoded_signature}" + "{LEGACY_BUILDER_MODULE}/claimblock/{block_hash}/{view_number}/{sender}/{encoded_signature}" )) .send() .await @@ -204,7 +207,8 @@ pub mod v0_2 { pub mod v0_3 { pub use hotshot_builder_api::v0_3::Version; use hotshot_types::{ - bundle::Bundle, traits::node_implementation::NodeType, vid::VidCommitment, + bundle::Bundle, constants::MARKETPLACE_BUILDER_MODULE, + traits::node_implementation::NodeType, vid::VidCommitment, }; use vbs::version::StaticVersion; @@ -225,8 +229,10 @@ pub mod v0_3 { parent_hash: VidCommitment, view_number: u64, ) -> Result, BuilderClientError> { - self.inner - .get(&format!("bundle/{parent_view}/{parent_hash}/{view_number}")) + self.client + .get(&format!( + "{MARKETPLACE_BUILDER_MODULE}/bundle/{parent_view}/{parent_hash}/{view_number}" + )) .send() .await .map_err(Into::into) diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index b6cbffd91b..b047c7710e 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -11,15 +11,21 @@ use async_compatibility_layer::art::async_spawn; use async_trait::async_trait; use futures::Stream; use hotshot::{traits::BlockPayload, types::Event}; -use hotshot_builder_api::v0_1::{ - block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, - builder::{Error, Options}, - data_source::BuilderDataSource, +use hotshot_builder_api::{ + v0_1, + v0_1::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::{Error, Options}, + }, + v0_3, }; -use hotshot_types::traits::{ - block_contents::{precompute_vid_commitment, EncodeBytes}, - node_implementation::NodeType, - signature_key::BuilderSignatureKey, +use hotshot_types::{ + constants::{LEGACY_BUILDER_MODULE, MARKETPLACE_BUILDER_MODULE}, + traits::{ + block_contents::{precompute_vid_commitment, EncodeBytes}, + node_implementation::NodeType, + signature_key::BuilderSignatureKey, + }, }; use tide_disco::{method::ReadState, App, Url}; use vbs::version::StaticVersionType; @@ -62,7 +68,7 @@ struct BlockEntry { header_input: Option>, } -/// Construct a tide disco app that mocks the builder API. +/// Construct a tide disco app that mocks the builder API 0.1 + 0.3. /// /// # Panics /// If constructing and launching the builder fails for any reason @@ -74,7 +80,63 @@ pub fn run_builder_source( TYPES: NodeType, ::InstanceState: Default, Source: Clone + Send + Sync + tide_disco::method::ReadState + 'static, - ::State: Sync + Send + BuilderDataSource, + ::State: Sync + + Send + + v0_1::data_source::BuilderDataSource + + v0_3::data_source::BuilderDataSource, +{ + async_spawn(async move { + let start_builder = |url: Url, source: Source| -> _ { + let builder_api_0_1 = hotshot_builder_api::v0_1::builder::define_api::( + &Options::default(), + ) + .expect("Failed to construct the builder API"); + let builder_api_0_3 = hotshot_builder_api::v0_3::builder::define_api::( + &Options::default(), + ) + .expect("Failed to construct the builder API"); + let mut app: App = App::with_state(source); + app.register_module(LEGACY_BUILDER_MODULE, builder_api_0_1) + .expect("Failed to register the builder API 0.1") + .register_module(MARKETPLACE_BUILDER_MODULE, builder_api_0_3) + .expect("Failed to register the builder API 0.3"); + async_spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())) + }; + + let mut handle = Some(start_builder(url.clone(), source.clone())); + + while let Ok(event) = change_receiver.recv().await { + match event { + BuilderChange::Up if handle.is_none() => { + handle = Some(start_builder(url.clone(), source.clone())); + } + BuilderChange::Down => { + if let Some(handle) = handle.take() { + #[cfg(async_executor_impl = "tokio")] + handle.abort(); + #[cfg(async_executor_impl = "async-std")] + handle.cancel().await; + } + } + _ => {} + } + } + }); +} + +/// Construct a tide disco app that mocks the builder API 0.1. +/// +/// # Panics +/// If constructing and launching the builder fails for any reason +pub fn run_builder_source_0_1( + url: Url, + mut change_receiver: Receiver, + source: Source, +) where + TYPES: NodeType, + ::InstanceState: Default, + Source: Clone + Send + Sync + tide_disco::method::ReadState + 'static, + ::State: Sync + Send + v0_1::data_source::BuilderDataSource, { async_spawn(async move { let start_builder = |url: Url, source: Source| -> _ { @@ -83,7 +145,7 @@ pub fn run_builder_source( ) .expect("Failed to construct the builder API"); let mut app: App = App::with_state(source); - app.register_module("block_info", builder_api) + app.register_module(LEGACY_BUILDER_MODULE, builder_api) .expect("Failed to register the builder API"); async_spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())) }; diff --git a/testing/src/block_builder/random.rs b/testing/src/block_builder/random.rs index c3270d5bfb..b264fb7dfb 100644 --- a/testing/src/block_builder/random.rs +++ b/testing/src/block_builder/random.rs @@ -37,7 +37,9 @@ use lru::LruCache; use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; use tide_disco::{method::ReadState, Url}; -use super::{build_block, run_builder_source, BlockEntry, BuilderTask, TestBuilderImplementation}; +use super::{ + build_block, run_builder_source_0_1, BlockEntry, BuilderTask, TestBuilderImplementation, +}; use crate::test_builder::BuilderChange; pub struct RandomBuilderImplementation; @@ -90,7 +92,7 @@ where let (change_sender, change_receiver) = broadcast(128); let (task, source) = Self::create(num_storage_nodes, config, changes, change_sender).await; - run_builder_source(url, change_receiver, source); + run_builder_source_0_1(url, change_receiver, source); Box::new(task) } } diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index cd9a320b9b..10ea1132dd 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -24,14 +24,20 @@ use hotshot::{ traits::BlockPayload, types::{Event, EventType, SignatureKey}, }; -use hotshot_builder_api::v0_1::{ - block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, - builder::{BuildError, Error, Options}, - data_source::BuilderDataSource, +use hotshot_builder_api::{ + v0_1, + v0_1::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::{BuildError, Error, Options}, + }, + v0_3, }; use hotshot_types::{ + bundle::Bundle, + constants::{LEGACY_BUILDER_MODULE, MARKETPLACE_BUILDER_MODULE}, traits::{ - block_contents::BlockHeader, node_implementation::NodeType, + block_contents::{BlockHeader, BuilderFee}, + node_implementation::NodeType, signature_key::BuilderSignatureKey, }, utils::BuilderCommitment, @@ -126,7 +132,80 @@ impl ReadState for SimpleBuilderSource { } #[async_trait] -impl BuilderDataSource for SimpleBuilderSource +impl v0_3::data_source::BuilderDataSource for SimpleBuilderSource +where + ::InstanceState: Default, +{ + /// To get the list of available blocks + async fn bundle( + &self, + _parent_view: u64, + _parent_hash: &VidCommitment, + _view_number: u64, + ) -> Result, BuildError> { + let transactions = self + .transactions + .read(|txns| { + Box::pin(async { + txns.values() + .filter(|txn| { + // We want transactions that are either unclaimed, or claimed long ago + // and thus probably not included, or they would've been decided on + // already and removed from the queue + txn.claimed + .map(|claim_time| claim_time.elapsed() > Duration::from_secs(30)) + .unwrap_or(true) + }) + .cloned() + .map(|txn| txn.transaction) + .collect::>() + }) + }) + .await; + + let fee_amount = 1; + let sequencing_fee: BuilderFee = BuilderFee { + fee_amount, + fee_account: self.pub_key.clone(), + fee_signature: TYPES::BuilderSignatureKey::sign_sequencing_fee_marketplace( + &self.priv_key.clone(), + fee_amount, + ) + .expect("Failed to sign fee!"), + }; + + let signature = + TYPES::BuilderSignatureKey::sign_bundle::(&self.priv_key, &transactions) + .unwrap(); + + { + // claim transactions + let mut transactions_lock = self.transactions.write().await; + let transaction_hashes = transactions.iter().map(|txn| txn.commit()); + let time = Instant::now(); + + for hash in transaction_hashes { + if let Some(txn) = transactions_lock.get_mut(&hash) { + txn.claimed = Some(time); + } + } + } + + Ok(Bundle { + transactions, + signature, + sequencing_fee, + }) + } + + /// To get the builder's address + async fn builder_address(&self) -> Result { + Ok(self.pub_key.clone()) + } +} + +#[async_trait] +impl v0_1::data_source::BuilderDataSource for SimpleBuilderSource where ::InstanceState: Default, { @@ -243,14 +322,23 @@ impl SimpleBuilderSource { where ::InstanceState: Default, { - let builder_api = hotshot_builder_api::v0_1::builder::define_api::< + let builder_api_0_1 = hotshot_builder_api::v0_1::builder::define_api::< SimpleBuilderSource, TYPES, >(&Options::default()) .expect("Failed to construct the builder API"); + + let builder_api_0_3 = hotshot_builder_api::v0_3::builder::define_api::< + SimpleBuilderSource, + TYPES, + >(&Options::default()) + .expect("Failed to construct the builder API"); + let mut app: App, Error> = App::with_state(self); - app.register_module::("block_info", builder_api) - .expect("Failed to register the builder API"); + app.register_module::(LEGACY_BUILDER_MODULE, builder_api_0_1) + .expect("Failed to register builder API 0.1") + .register_module::(MARKETPLACE_BUILDER_MODULE, builder_api_0_3) + .expect("Failed to register builder API 0.3"); async_spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())); } diff --git a/types/src/constants.rs b/types/src/constants.rs index 51db4cd41e..ddcddebd80 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -59,3 +59,9 @@ pub const UPGRADE_FINISH_OFFSET: u64 = UPGRADE_BEGIN_OFFSET + 5; /// the `+2` is just an artifact from the jellyfish's Plonk proof system. #[allow(clippy::cast_possible_truncation)] pub const SRS_DEGREE: usize = 2u64.pow(20) as usize + 2; + +/// The `tide` module name for the legacy builder +pub const LEGACY_BUILDER_MODULE: &str = "block_info"; + +/// The `tide` module name for the marketplace builder +pub const MARKETPLACE_BUILDER_MODULE: &str = "bundle_info"; diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index ebee7c6b75..e9133e9286 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -16,13 +16,17 @@ use std::{ use ark_serialize::SerializationError; use bitvec::prelude::*; +use committable::Committable; use ethereum_types::U256; use jf_vid::VidScheme; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use tagged_base64::TaggedBase64; use super::EncodeBytes; -use crate::{utils::BuilderCommitment, vid::VidSchemeType}; +use crate::{ + bundle::Bundle, traits::node_implementation::NodeType, utils::BuilderCommitment, + vid::VidSchemeType, +}; /// Type representing stake table entries in a `StakeTable` pub trait StakeTableEntryType { @@ -227,6 +231,20 @@ pub trait BuilderSignatureKey: self.validate_builder_signature(signature, &fee_amount.to_be_bytes()) } + /// validate the bundle's signature using the builder's public key + fn validate_bundle_signature>( + &self, + bundle: Bundle, + ) -> bool where { + let commitments = bundle + .transactions + .iter() + .flat_map(|txn| <[u8; 32]>::from(txn.commit())) + .collect::>(); + + self.validate_builder_signature(&bundle.signature, &commitments) + } + /// validate signature over block information with the builder's public key fn validate_block_info_signature( &self, @@ -275,6 +293,21 @@ pub trait BuilderSignatureKey: Self::sign_builder_message(private_key, &fee_amount.to_be_bytes()) } + /// sign transactions (marketplace version) + /// # Errors + /// If unable to sign the data with the key + fn sign_bundle( + private_key: &Self::BuilderPrivateKey, + transactions: &[TYPES::Transaction], + ) -> Result { + let commitments = transactions + .iter() + .flat_map(|txn| <[u8; 32]>::from(txn.commit())) + .collect::>(); + + Self::sign_builder_message(private_key, &commitments) + } + /// sign information about offered block /// # Errors /// If unable to sign the data with the key From 45bff51c53150b0e639208f43a03b5ae27055c61 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 5 Sep 2024 14:00:10 -0400 Subject: [PATCH 1205/1393] [CATCHUP] Init Consensus with 2 views to prevent Double Vote/Propose (#3648) * Add second view param to init, spin nodes in tests up from view 0 * add last actioned views to consensus * check view before doing action * fmt and lint * log error when trying to double propose/vote * fix for DA votes * still update the da vote view * restart with actioned view from storage * lint * Allow access to consensus from byz tests, reset actions in byz behavior * lint --- example-types/src/storage_types.rs | 19 ++++- hotshot/src/lib.rs | 11 ++- hotshot/src/tasks/mod.rs | 6 +- task-impls/src/helpers.rs | 2 +- task-impls/src/network.rs | 19 +++-- testing/src/byzantine/byzantine_behaviour.rs | 11 +++ testing/src/spinning_task.rs | 5 +- testing/tests/tests_1/network_task.rs | 17 +++-- types/src/consensus.rs | 77 +++++++++++++++++++- types/src/event.rs | 2 +- 10 files changed, 147 insertions(+), 22 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index e6be6ab756..460152c16e 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -15,9 +15,13 @@ use async_trait::async_trait; use hotshot_types::{ consensus::CommitmentMap, data::{DaProposal, Leaf, QuorumProposal, VidDisperseShare}, + event::HotShotAction, message::Proposal, simple_certificate::QuorumCertificate, - traits::{node_implementation::NodeType, storage::Storage}, + traits::{ + node_implementation::{ConsensusTime, NodeType}, + storage::Storage, + }, utils::View, vote::HasViewNumber, }; @@ -35,6 +39,7 @@ pub struct TestStorageState { das: HashMap>>, proposals: BTreeMap>>, high_qc: Option>, + action: TYPES::Time, } impl Default for TestStorageState { @@ -44,6 +49,7 @@ impl Default for TestStorageState { das: HashMap::new(), proposals: BTreeMap::new(), high_qc: None, + action: TYPES::Time::genesis(), } } } @@ -85,6 +91,9 @@ impl TestStorage { pub async fn high_qc_cloned(&self) -> Option> { self.inner.read().await.high_qc.clone() } + pub async fn last_actioned_view(&self) -> TYPES::Time { + self.inner.read().await.action + } } #[async_trait] @@ -131,12 +140,16 @@ impl Storage for TestStorage { async fn record_action( &self, - _view: ::Time, - _action: hotshot_types::event::HotShotAction, + view: ::Time, + action: hotshot_types::event::HotShotAction, ) -> Result<()> { if self.should_return_err { bail!("Failed to append Action to storage"); } + let mut inner = self.inner.write().await; + if view > inner.action && matches!(action, HotShotAction::Vote | HotShotAction::Propose) { + inner.action = view; + } Self::run_delay_settings_from_config(&self.delay_config).await; Ok(()) } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 090a9687de..e67e0ebd8f 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -306,9 +306,8 @@ impl, V: Versions> SystemContext { /// If it's given, we'll use it to construct the `SystemContext`. state_delta: Option>::Delta>>, - /// Starting view number that we are confident won't lead to a double vote after restart. + /// Starting view number that should be equivelant to the view the node shut down with last. start_view: TYPES::Time, + /// The view we last performed an action in. An action is Proposing or voting for + /// Either the quorum or DA. + actioned_view: TYPES::Time, /// Highest QC that was seen, for genesis it's the genesis QC. It should be for a view greater /// than `inner`s view number for the non genesis case because we must have seen higher QCs /// to decide on the leaf. @@ -981,6 +983,7 @@ impl HotShotInitializer { validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::Time::new(0), + actioned_view: TYPES::Time::new(0), saved_proposals: BTreeMap::new(), high_qc, undecided_leafs: Vec::new(), @@ -1002,6 +1005,7 @@ impl HotShotInitializer { instance_state: TYPES::InstanceState, validated_state: Option>, start_view: TYPES::Time, + actioned_view: TYPES::Time, saved_proposals: BTreeMap>>, high_qc: QuorumCertificate, undecided_leafs: Vec>, @@ -1013,6 +1017,7 @@ impl HotShotInitializer { validated_state, state_delta: None, start_view, + actioned_view, saved_proposals, high_qc, undecided_leafs, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 007962e7d3..bf28daba3a 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -36,7 +36,7 @@ use hotshot_task_impls::{ vid::VidTaskState, view_sync::ViewSyncTaskState, }; -use hotshot_types::message::UpgradeLock; +use hotshot_types::{consensus::Consensus, message::UpgradeLock}; use hotshot_types::{ constants::EVENT_CHANNEL_SIZE, message::Messages, @@ -191,6 +191,7 @@ pub fn add_network_event_task< membership, filter, storage: Arc::clone(&handle.storage()), + consensus: Arc::clone(&handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), }; let task = Task::new( @@ -288,6 +289,7 @@ where public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, + consensus: Arc>>, ) -> Vec>; #[allow(clippy::too_many_arguments)] @@ -384,6 +386,7 @@ where let public_key = handle.public_key().clone(); let private_key = handle.private_key().clone(); let upgrade_lock = handle.hotshot.upgrade_lock.clone(); + let consensus = Arc::clone(&handle.hotshot.consensus()); let send_handle = async_spawn(async move { futures::pin_mut!(shutdown_signal); @@ -415,6 +418,7 @@ where &public_key, &private_key, &upgrade_lock, + Arc::clone(&consensus) ).await; results.reverse(); while let Some(event) = results.pop() { diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 9fb4da777c..441948887d 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -484,7 +484,7 @@ pub async fn validate_proposal_safety_and_liveness< // Update our internal storage of the proposal. The proposal is valid, so // we swallow this error and just log if it occurs. - if let Err(e) = consensus_write.update_last_proposed_view(proposal.clone()) { + if let Err(e) = consensus_write.update_proposed_view(proposal.clone()) { tracing::debug!("Internal proposal update failed; error = {e:#}"); }; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 82cd9b2e49..de6f3ba534 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -13,6 +13,7 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ + consensus::Consensus, data::{VidDisperse, VidDisperseShare}, event::{Event, EventType, HotShotAction}, message::{ @@ -226,6 +227,8 @@ pub struct NetworkEventTaskState< pub filter: fn(&Arc>) -> bool, /// Storage to store actionable events pub storage: Arc>, + /// Shared consensus state + pub consensus: Arc>>, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, } @@ -313,10 +316,12 @@ impl< let net = Arc::clone(&self.channel); let storage = Arc::clone(&self.storage); + let state = Arc::clone(&self.consensus); async_spawn(async move { if NetworkEventTaskState::::maybe_record_action( Some(HotShotAction::VidDisperse), storage, + state, view, ) .await @@ -337,15 +342,15 @@ impl< async fn maybe_record_action( maybe_action: Option, storage: Arc>, + state: Arc>>, view: ::Time, ) -> Result<(), ()> { if let Some(action) = maybe_action { - match storage - .write() - .await - .record_action(view, action.clone()) - .await - { + if !state.write().await.update_action(action, view) { + warn!("Already actioned {:?} in view {:?}", action, view); + return Err(()); + } + match storage.write().await.record_action(view, action).await { Ok(()) => Ok(()), Err(e) => { warn!("Not Sending {:?} because of storage error: {:?}", action, e); @@ -546,11 +551,13 @@ impl< let committee_topic = membership.committee_topic(); let net = Arc::clone(&self.channel); let storage = Arc::clone(&self.storage); + let state = Arc::clone(&self.consensus); let upgrade_lock = self.upgrade_lock.clone(); async_spawn(async move { if NetworkEventTaskState::::maybe_record_action( maybe_action, Arc::clone(&storage), + state, view, ) .await diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index aecc3ace33..6bca9a2fc0 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -1,10 +1,12 @@ use anyhow::Context; +use async_lock::RwLock; use async_trait::async_trait; use hotshot::tasks::EventTransformerState; use hotshot::types::{SignatureKey, SystemContextHandle}; use hotshot_task_impls::events::HotShotEvent; use hotshot_task_impls::network::test::{ModifierClosure, NetworkEventTaskStateModifier}; use hotshot_task_impls::network::NetworkEventTaskState; +use hotshot_types::consensus::Consensus; use hotshot_types::message::UpgradeLock; use hotshot_types::simple_vote::QuorumVote; use hotshot_types::traits::node_implementation::ConsensusTime; @@ -39,12 +41,15 @@ impl, V: Versions> EventTransforme _public_key: &TYPES::SignatureKey, _private_key: &::PrivateKey, _upgrade_lock: &UpgradeLock, + consensus: Arc>>, ) -> Vec> { match event { HotShotEvent::QuorumProposalSend(proposal, signature) => { let mut result = Vec::new(); for n in 0..self.multiplier { + // reset last actioned view so we actually propose multiple times + consensus.write().await.reset_actions(); let mut modified_proposal = proposal.clone(); modified_proposal.data.view_number += n * self.increment; @@ -80,6 +85,7 @@ impl, V: Versions> EventTransforme _public_key: &TYPES::SignatureKey, _private_key: &::PrivateKey, _upgrade_lock: &UpgradeLock, + _consensus: Arc>>, ) -> Vec> { match event { HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::QuorumVoteSend(_) => { @@ -155,6 +161,7 @@ impl + std::fmt::Debug, V: Version _public_key: &TYPES::SignatureKey, _private_key: &::PrivateKey, _upgrade_lock: &UpgradeLock, + _consensus: Arc>>, ) -> Vec> { match event { HotShotEvent::QuorumProposalSend(proposal, sender) => { @@ -195,6 +202,7 @@ impl + std::fmt::Debug, V: Version _public_key: &TYPES::SignatureKey, _private_key: &::PrivateKey, _upgrade_lock: &UpgradeLock, + _consensus: Arc>>, ) -> Vec> { if let HotShotEvent::DacSend(cert, sender) = event { self.total_da_certs_sent_from_node += 1; @@ -264,6 +272,7 @@ impl + std::fmt::Debug, V: Version _public_key: &TYPES::SignatureKey, _private_key: &::PrivateKey, _upgrade_lock: &UpgradeLock, + _consensus: Arc>>, ) -> Vec> { vec![event.clone()] } @@ -291,6 +300,7 @@ impl + std::fmt::Debug, V: Version public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, + _consensus: Arc>>, ) -> Vec> { if let HotShotEvent::QuorumVoteSend(vote) = event { let new_view = vote.view_number + self.view_increment; @@ -323,6 +333,7 @@ impl + std::fmt::Debug, V: Version membership, filter, storage: Arc::clone(&handle.storage()), + consensus: Arc::clone(&handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), }; let modified_network_state = NetworkEventTaskStateModifier { diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 5d94297f60..4f6743fee7 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -20,6 +20,7 @@ use hotshot_example_types::{ storage_types::TestStorage, testable_delay::DelayConfig, }; +use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ data::Leaf, event::Event, @@ -134,7 +135,8 @@ where self.last_decided_leaf.clone(), TestInstanceState::new(self.async_delay_config.clone()), None, - view_number, + TYPES::Time::genesis(), + TYPES::Time::genesis(), BTreeMap::new(), self.high_qc.clone(), Vec::new(), @@ -212,6 +214,7 @@ where TestInstanceState::new(self.async_delay_config.clone()), None, view_number, + read_storage.last_actioned_view().await, read_storage.proposals_cloned().await, read_storage.high_qc_cloned().await.unwrap_or( QuorumCertificate::genesis( diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index a3b3245533..0b97676722 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -17,8 +17,8 @@ use hotshot_task_impls::{ network::{self, NetworkEventTaskState}, }; use hotshot_testing::{ - test_builder::TestDescription, test_task::add_network_message_test_task, - view_generator::TestViewGenerator, + helpers::build_system_handle, test_builder::TestDescription, + test_task::add_network_message_test_task, view_generator::TestViewGenerator, }; use hotshot_types::{ data::ViewNumber, @@ -28,7 +28,6 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, }, }; - // Test that the event task sends a message, and the message task receives it // and emits the proper event #[cfg(test)] @@ -46,12 +45,15 @@ async fn test_network_task() { TestDescription::default_multiple_rounds(); let upgrade_lock = UpgradeLock::::new(); let node_id = 1; - + let handle = build_system_handle::(node_id) + .await + .0; let launcher = builder.gen_launcher(node_id); let network = (launcher.resource_generator.channel_generator)(node_id).await; let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); + let consensus = handle.hotshot.consensus(); let config = launcher.resource_generator.config.clone(); let public_key = config.my_own_validator_config.public_key; let known_nodes_with_stake = config.known_nodes_with_stake.clone(); @@ -70,6 +72,7 @@ async fn test_network_task() { filter: network::quorum_filter, upgrade_lock: upgrade_lock.clone(), storage, + consensus, }; let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); @@ -120,11 +123,14 @@ async fn test_network_storage_fail() { let builder: TestDescription = TestDescription::default_multiple_rounds(); let node_id = 1; - + let handle = build_system_handle::(node_id) + .await + .0; let launcher = builder.gen_launcher(node_id); let network = (launcher.resource_generator.channel_generator)(node_id).await; + let consensus = handle.hotshot.consensus(); let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); storage.write().await.should_return_err = true; let config = launcher.resource_generator.config.clone(); @@ -146,6 +152,7 @@ async fn test_network_storage_fail() { filter: network::quorum_filter, upgrade_lock: upgrade_lock.clone(), storage, + consensus, }; let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 48ae1145ef..0c85127f8b 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -23,6 +23,7 @@ pub use crate::utils::{View, ViewInner}; use crate::{ data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare}, error::HotShotError, + event::HotShotAction, message::Proposal, simple_certificate::{DaCertificate, QuorumCertificate}, traits::{ @@ -230,6 +231,41 @@ impl<'a, TYPES: NodeType> Drop for ConsensusUpgradableReadLockGuard<'a, TYPES> { } } +/// A bundle of views that we have most recently performed some action +#[derive(Debug, Clone, Copy)] +struct HotShotActionViews { + /// View we last proposed in to the Quorum + proposed: T, + /// View we last voted in for a QuorumProposal + voted: T, + /// View we last proposed to the DA committee + da_proposed: T, + /// View we lasted voted for DA proposal + da_vote: T, +} + +impl Default for HotShotActionViews { + fn default() -> Self { + let genesis = T::genesis(); + Self { + proposed: genesis, + voted: genesis, + da_proposed: genesis, + da_vote: genesis, + } + } +} +impl HotShotActionViews { + /// Create HotShotActionViews from a view number + fn from_view(view: T) -> Self { + Self { + proposed: view, + voted: view, + da_proposed: view, + da_vote: view, + } + } +} /// A reference to the consensus algorithm /// /// This will contain the state of all rounds. @@ -263,6 +299,11 @@ pub struct Consensus { /// - includes the MOST RECENT decided leaf saved_leaves: CommitmentMap>, + /// Bundle of views which we performed the most recent action + /// visibible to the network. Actions are votes and proposals + /// for DA and Quorum + last_actions: HotShotActionViews, + /// Saved payloads. /// /// Encoded transactions for every view if we got a payload for that view. @@ -350,6 +391,7 @@ impl Consensus { cur_view: TYPES::Time, locked_view: TYPES::Time, last_decided_view: TYPES::Time, + last_actioned_view: TYPES::Time, last_proposals: BTreeMap>>, saved_leaves: CommitmentMap>, saved_payloads: BTreeMap>, @@ -363,6 +405,7 @@ impl Consensus { cur_view, last_decided_view, last_proposals, + last_actions: HotShotActionViews::from_view(last_actioned_view), locked_view, saved_leaves, saved_payloads, @@ -433,11 +476,43 @@ impl Consensus { Ok(()) } + /// Update the last actioned view internally for votes and proposals + /// + /// Returns true if the action is for a newer view than the last action of that type + pub fn update_action(&mut self, action: HotShotAction, view: TYPES::Time) -> bool { + let old_view = match action { + HotShotAction::Vote => &mut self.last_actions.voted, + HotShotAction::Propose => &mut self.last_actions.proposed, + HotShotAction::DaPropose => &mut self.last_actions.da_proposed, + HotShotAction::DaVote => { + if view > self.last_actions.da_vote { + self.last_actions.da_vote = view; + } + // TODO Add logic to prevent double voting. For now the simple check if + // the last voted view is less than the view we are trying to vote doesn't work + // becuase the leader of view n + 1 may propose to the DA (and we would vote) + // before the leader of view n. + return true; + } + _ => return true, + }; + if view > *old_view { + *old_view = view; + return true; + } + false + } + + /// reset last actions to genesis so we can resend events in tests + pub fn reset_actions(&mut self) { + self.last_actions = HotShotActionViews::default(); + } + /// Update the last proposal. /// /// # Errors /// Can return an error when the new view_number is not higher than the existing proposed view number. - pub fn update_last_proposed_view( + pub fn update_proposed_view( &mut self, proposal: Proposal>, ) -> Result<()> { diff --git a/types/src/event.rs b/types/src/event.rs index 88c429d5f0..8f027b7370 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -173,7 +173,7 @@ pub enum EventType { /// A message destined for external listeners was received ExternalMessageReceived(Vec), } -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy)] /// A list of actions that we track for nodes pub enum HotShotAction { /// A quorum vote was sent From e1ccd56146bfd01e3300e34aa5e74be2959c1d04 Mon Sep 17 00:00:00 2001 From: Philippe Camacho Date: Fri, 6 Sep 2024 12:41:37 -0400 Subject: [PATCH 1206/1393] Light client contract related changes (#3655) * Add struct StakeTableState. * Remove fields from the GenericLightClientState * Remove no longer used fields from the generalPublicInput that have been removed from the light client state --------- Co-authored-by: Alysia Huggins Co-authored-by: Alex Xiong --- hotshot/src/tasks/mod.rs | 18 ++-- task-impls/src/network.rs | 6 +- testing/src/byzantine/byzantine_behaviour.rs | 33 ++++--- testing/src/spinning_task.rs | 3 +- testing/src/test_builder.rs | 21 ++--- types/src/light_client.rs | 93 +++++++++++++------- 6 files changed, 105 insertions(+), 69 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index bf28daba3a..5f7964c24b 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -8,11 +8,7 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; -use crate::{ - tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, - ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, - MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, -}; +use std::{fmt::Debug, sync::Arc, time::Duration}; use async_broadcast::broadcast; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -36,20 +32,24 @@ use hotshot_task_impls::{ vid::VidTaskState, view_sync::ViewSyncTaskState, }; -use hotshot_types::{consensus::Consensus, message::UpgradeLock}; use hotshot_types::{ + consensus::Consensus, constants::EVENT_CHANNEL_SIZE, - message::Messages, + message::{Messages, UpgradeLock}, request_response::RequestReceiver, traits::{ network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; -use std::fmt::Debug; -use std::{sync::Arc, time::Duration}; use vbs::version::StaticVersionType; +use crate::{ + tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, + ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, + MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, +}; + /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index de6f3ba534..1ea7a3cc1d 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -622,12 +622,14 @@ impl< /// A module with test helpers pub mod test { + use std::ops::{Deref, DerefMut}; + + use async_trait::async_trait; + use super::{ Arc, ConnectedNetwork, HotShotEvent, MessageKind, NetworkEventTaskState, NodeType, Receiver, Result, Sender, Storage, TaskState, TransmitType, Versions, }; - use async_trait::async_trait; - use std::ops::{Deref, DerefMut}; /// A dynamic type alias for a function that takes the result of `NetworkEventTaskState::parse_event` /// and changes it before transmitting on the network. diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index 6bca9a2fc0..27ac3a0161 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -1,22 +1,29 @@ +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + use anyhow::Context; use async_lock::RwLock; use async_trait::async_trait; -use hotshot::tasks::EventTransformerState; -use hotshot::types::{SignatureKey, SystemContextHandle}; -use hotshot_task_impls::events::HotShotEvent; -use hotshot_task_impls::network::test::{ModifierClosure, NetworkEventTaskStateModifier}; -use hotshot_task_impls::network::NetworkEventTaskState; -use hotshot_types::consensus::Consensus; -use hotshot_types::message::UpgradeLock; -use hotshot_types::simple_vote::QuorumVote; -use hotshot_types::traits::node_implementation::ConsensusTime; +use hotshot::{ + tasks::EventTransformerState, + types::{SignatureKey, SystemContextHandle}, +}; +use hotshot_task_impls::{ + events::HotShotEvent, + network::{ + test::{ModifierClosure, NetworkEventTaskStateModifier}, + NetworkEventTaskState, + }, +}; use hotshot_types::{ + consensus::Consensus, data::QuorumProposal, - message::Proposal, - traits::node_implementation::{NodeImplementation, NodeType, Versions}, + message::{Proposal, UpgradeLock}, + simple_vote::QuorumVote, + traits::node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, }; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; #[derive(Debug)] /// An `EventTransformerState` that multiplies `QuorumProposalSend` events, incrementing the view number of the proposal diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 4f6743fee7..8d5d8420ae 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -20,14 +20,13 @@ use hotshot_example_types::{ storage_types::TestStorage, testable_delay::DelayConfig, }; -use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ data::Leaf, event::Event, simple_certificate::QuorumCertificate, traits::{ network::ConnectedNetwork, - node_implementation::{NodeImplementation, NodeType, Versions}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, }, vote::HasViewNumber, ValidatorConfig, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index a11ceb94b9..2cb521b08b 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -6,16 +6,6 @@ use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; -use super::{ - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - overall_safety_task::OverallSafetyPropertiesDescription, - txn_task::TxnTaskDescription, -}; -use crate::{ - spinning_task::SpinningTaskDescription, - test_launcher::{Network, ResourceGenerators, TestLauncher}, - view_sync_task::ViewSyncTaskDescription, -}; use hotshot::{ tasks::EventTransformerState, traits::{NetworkReliability, NodeImplementation, TestableNodeImplementation}, @@ -33,6 +23,17 @@ use hotshot_types::{ }; use tide_disco::Url; use vec1::Vec1; + +use super::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + txn_task::TxnTaskDescription, +}; +use crate::{ + spinning_task::SpinningTaskDescription, + test_launcher::{Network, ResourceGenerators, TestLauncher}, + view_sync_task::ViewSyncTaskDescription, +}; /// data describing how a round should be timed. #[derive(Clone, Debug, Copy)] pub struct TimingData { diff --git a/types/src/light_client.rs b/types/src/light_client.rs index debb265495..1fac6614e4 100644 --- a/types/src/light_client.rs +++ b/types/src/light_client.rs @@ -22,6 +22,8 @@ use tagged_base64::tagged; pub type CircuitField = ark_ed_on_bn254::Fq; /// Concrete type for light client state pub type LightClientState = GenericLightClientState; +/// Concrete type for stake table state +pub type StakeTableState = GenericStakeTableState; /// Signature scheme pub type StateSignatureScheme = jf_signature::schnorr::SchnorrSignatureScheme; @@ -80,39 +82,54 @@ pub struct GenericLightClientState { pub block_height: usize, /// Root of the block commitment tree pub block_comm_root: F, - /// Commitment for fee ledger - pub fee_ledger_comm: F, - /// Commitment for the stake table - pub stake_table_comm: (F, F, F), } -impl From> for [F; 7] { +impl From> for [F; 3] { fn from(state: GenericLightClientState) -> Self { [ F::from(state.view_number as u64), F::from(state.block_height as u64), state.block_comm_root, - state.fee_ledger_comm, - state.stake_table_comm.0, - state.stake_table_comm.1, - state.stake_table_comm.2, ] } } -impl From<&GenericLightClientState> for [F; 7] { + +impl From<&GenericLightClientState> for [F; 3] { fn from(state: &GenericLightClientState) -> Self { [ F::from(state.view_number as u64), F::from(state.block_height as u64), state.block_comm_root, - state.fee_ledger_comm, - state.stake_table_comm.0, - state.stake_table_comm.1, - state.stake_table_comm.2, ] } } +/// Stake table state +#[tagged("STAKE_TABLE_STATE")] +#[derive( + Clone, + Debug, + CanonicalSerialize, + CanonicalDeserialize, + Default, + Eq, + PartialEq, + PartialOrd, + Ord, + Hash, + Copy, +)] +pub struct GenericStakeTableState { + /// Commitments to the table column for BLS public keys + pub bls_key_comm: F, + /// Commitments to the table column for Schnorr public keys + pub schnorr_key_comm: F, + /// Commitments to the table column for Stake amounts + pub amount_comm: F, + /// threshold + pub threshold: F, +} + impl std::ops::Deref for StateKeyPair { type Target = schnorr::KeyPair; @@ -161,6 +178,22 @@ impl From> for StateKeyPair { #[derive(Clone, Debug)] pub struct GenericPublicInput(Vec); +impl GenericPublicInput { + /// Construct a public input from light client state and static stake table state + pub fn new(lc_state: GenericLightClientState, st_state: GenericStakeTableState) -> Self { + let lc_state_f: [F; 3] = lc_state.into(); + Self(vec![ + lc_state_f[0], + lc_state_f[1], + lc_state_f[2], + st_state.bls_key_comm, + st_state.schnorr_key_comm, + st_state.amount_comm, + st_state.threshold, + ]) + } +} + impl AsRef<[F]> for GenericPublicInput { fn as_ref(&self) -> &[F] { &self.0 @@ -174,57 +207,51 @@ impl From> for GenericPublicInput { } impl GenericPublicInput { - /// Return the threshold - #[must_use] - pub fn threshold(&self) -> F { - self.0[0] - } - /// Return the view number of the light client state #[must_use] pub fn view_number(&self) -> F { - self.0[1] + self.0[0] } /// Return the block height of the light client state #[must_use] pub fn block_height(&self) -> F { - self.0[2] + self.0[1] } /// Return the block commitment root of the light client state #[must_use] pub fn block_comm_root(&self) -> F { - self.0[3] - } - - /// Return the fee ledger commitment of the light client state - #[must_use] - pub fn fee_ledger_comm(&self) -> F { - self.0[4] + self.0[2] } /// Return the stake table commitment of the light client state #[must_use] pub fn stake_table_comm(&self) -> (F, F, F) { - (self.0[5], self.0[6], self.0[7]) + (self.0[3], self.0[4], self.0[5]) } /// Return the qc key commitment of the light client state #[must_use] pub fn qc_key_comm(&self) -> F { - self.0[5] + self.0[3] } /// Return the state key commitment of the light client state #[must_use] pub fn state_key_comm(&self) -> F { - self.0[6] + self.0[4] } /// Return the stake amount commitment of the light client state #[must_use] pub fn stake_amount_comm(&self) -> F { - self.0[7] + self.0[5] + } + + /// Return the threshold + #[must_use] + pub fn threshold(&self) -> F { + self.0[6] } } From e7a37a814712e063dc2a97e34b694411da7ebaa4 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 10 Sep 2024 07:16:42 -0400 Subject: [PATCH 1207/1393] Include block header in leaf commitment (#3647) --- examples/infra/mod.rs | 2 +- hotshot/src/lib.rs | 30 +++++--- hotshot/src/tasks/mod.rs | 3 +- task-impls/src/consensus/handlers.rs | 37 +++++---- task-impls/src/helpers.rs | 22 ++++-- task-impls/src/quorum_proposal/handlers.rs | 11 +-- .../src/quorum_proposal_recv/handlers.rs | 6 +- task-impls/src/quorum_vote/mod.rs | 11 +-- testing/src/consistency_task.rs | 77 +++++++++++++++---- testing/src/helpers.rs | 16 ++-- testing/src/overall_safety_task.rs | 2 +- testing/src/spinning_task.rs | 4 +- testing/src/test_builder.rs | 5 +- testing/src/test_runner.rs | 9 ++- testing/src/test_task.rs | 4 +- testing/src/view_generator.rs | 21 +++-- testing/src/view_sync_task.rs | 2 +- .../tests_1/quorum_proposal_recv_task.rs | 18 +++-- testing/tests/tests_1/quorum_proposal_task.rs | 48 +++++++----- testing/tests/tests_1/quorum_vote_task.rs | 12 ++- testing/tests/tests_1/test_with_failures_2.rs | 2 +- .../tests_1/upgrade_task_with_proposal.rs | 22 ++++-- .../tests/tests_1/upgrade_task_with_vote.rs | 6 +- .../tests/tests_1/vote_dependency_handle.rs | 6 +- testing/tests/tests_2/byzantine_tests.rs | 4 +- types/src/consensus.rs | 15 ++-- types/src/data.rs | 52 +++++++++++-- types/src/message.rs | 30 +++++++- types/src/simple_vote.rs | 18 +++++ 29 files changed, 350 insertions(+), 145 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 77c3ba3fb6..20b0ed8a56 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -368,7 +368,7 @@ pub trait RunDa< /// Note: sequencing leaf does not have state, so does not return state async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { let initializer = - hotshot::HotShotInitializer::::from_genesis(TestInstanceState::default()) + hotshot::HotShotInitializer::::from_genesis::(TestInstanceState::default()) .await .expect("Couldn't generate genesis block"); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e67e0ebd8f..3a1a2cd086 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -37,7 +37,6 @@ use async_broadcast::{broadcast, InactiveReceiver, Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; -use committable::Committable; use futures::join; use hotshot_task::task::{ConsensusTaskRegistry, NetworkTaskRegistry}; use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; @@ -194,7 +193,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, @@ -223,6 +222,7 @@ impl, V: Versions> SystemContext`] with the given configuration options. @@ -233,7 +233,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, @@ -279,7 +279,7 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext(&validated_state, self.instance_state.as_ref()) .await, ); @@ -596,7 +599,8 @@ impl, V: Versions> SystemContext HotShotInitializer { /// initialize from genesis /// # Errors /// If we are unable to apply the genesis block to the default state - pub async fn from_genesis( + pub async fn from_genesis( instance_state: TYPES::InstanceState, ) -> Result> { let (validated_state, state_delta) = TYPES::ValidatedState::genesis(&instance_state); - let high_qc = QuorumCertificate::genesis(&validated_state, &instance_state).await; + let high_qc = QuorumCertificate::genesis::(&validated_state, &instance_state).await; Ok(Self { inner: Leaf::genesis(&validated_state, &instance_state).await, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 5f7964c24b..9bab74694a 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -318,7 +318,8 @@ where metrics, storage, marketplace_config, - ); + ) + .await; let consensus_registry = ConsensusTaskRegistry::new(); let network_registry = NetworkTaskRegistry::new(); diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 0668b87360..36f6831e5a 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -14,7 +14,6 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use chrono::Utc; -use committable::Committable; use futures::FutureExt; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus, View}, @@ -37,7 +36,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; -use vbs::version::{StaticVersionType, Version}; +use vbs::version::StaticVersionType; use super::ConsensusTaskState; use crate::{ @@ -67,7 +66,7 @@ pub async fn create_and_send_proposal( proposal_cert: Option>, round_start_delay: u64, instance_state: Arc, - version: Version, + upgrade_lock: UpgradeLock, id: u64, ) -> Result<()> { let consensus_read = consensus.read().await; @@ -81,6 +80,11 @@ pub async fn create_and_send_proposal( .context("Failed to get vid share")?; drop(consensus_read); + let version = upgrade_lock + .version(view) + .await + .context("Failed to get version number")?; + let block_header = if version < V::Marketplace::VERSION { TYPES::BlockHeader::new_legacy( state.as_ref(), @@ -122,9 +126,12 @@ pub async fn create_and_send_proposal( let proposed_leaf = Leaf::from_quorum_proposal(&proposal); - ensure!(proposed_leaf.parent_commitment() == parent_leaf.commit()); + ensure!(proposed_leaf.parent_commitment() == parent_leaf.commit(&upgrade_lock).await); - let signature = TYPES::SignatureKey::sign(&private_key, proposed_leaf.commit().as_ref())?; + let signature = TYPES::SignatureKey::sign( + &private_key, + proposed_leaf.commit(&upgrade_lock).await.as_ref(), + )?; let message = Proposal { data: proposal, @@ -221,8 +228,6 @@ pub async fn publish_proposal_from_commitment_and_metadata( public_key, @@ -237,7 +242,7 @@ pub async fn publish_proposal_from_commitment_and_metadata::create_signed_vote( QuorumData { - leaf_commit: proposed_leaf.commit(), + leaf_commit: proposed_leaf.commit(upgrade_lock).await, }, view, &public_key, @@ -861,7 +868,7 @@ pub async fn update_state_and_vote_if_able< cur_view, View { view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), + leaf: proposed_leaf.commit(upgrade_lock).await, state: Arc::clone(&state), delta: Some(Arc::clone(&delta)), }, @@ -869,7 +876,9 @@ pub async fn update_state_and_vote_if_able< ) { tracing::trace!("{e:?}"); } - consensus.update_saved_leaves(proposed_leaf.clone()); + consensus + .update_saved_leaves(proposed_leaf.clone(), upgrade_lock) + .await; let new_leaves = consensus.saved_leaves().clone(); let new_state = consensus.validated_state_map().clone(); drop(consensus); diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 441948887d..67f78ac670 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -108,7 +108,7 @@ pub(crate) async fn fetch_proposal( hs_event.as_ref() { // Make sure that the quorum_proposal is valid - if quorum_proposal.validate_signature(&mem).is_ok() { + if quorum_proposal.validate_signature(&mem, upgrade_lock).await.is_ok() { proposal = Some(quorum_proposal.clone()); } @@ -140,7 +140,7 @@ pub(crate) async fn fetch_proposal( let view = View { view_inner: ViewInner::Leaf { - leaf: leaf.commit(), + leaf: leaf.commit(upgrade_lock).await, state, delta: None, }, @@ -149,7 +149,9 @@ pub(crate) async fn fetch_proposal( tracing::trace!("{e:?}"); } - consensus_write.update_saved_leaves(leaf.clone()); + consensus_write + .update_saved_leaves(leaf.clone(), upgrade_lock) + .await; broadcast_event( HotShotEvent::ValidatedStateUpdated(view_number, view).into(), &event_sender, @@ -410,7 +412,7 @@ pub(crate) async fn parent_leaf_and_state( let reached_decided = leaf.view_number() == consensus_reader.last_decided_view(); let parent_leaf = leaf.clone(); - let original_parent_hash = parent_leaf.commit(); + let original_parent_hash = parent_leaf.commit(upgrade_lock).await; let mut next_parent_hash = original_parent_hash; // Walk back until we find a decide @@ -460,7 +462,7 @@ pub async fn validate_proposal_safety_and_liveness< let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); ensure!( - proposed_leaf.parent_commitment() == parent_leaf.commit(), + proposed_leaf.parent_commitment() == parent_leaf.commit(&upgrade_lock).await, "Proposed leaf does not extend the parent leaf." ); @@ -469,7 +471,7 @@ pub async fn validate_proposal_safety_and_liveness< ); let view = View { view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), + leaf: proposed_leaf.commit(&upgrade_lock).await, state, delta: None, // May be updated to `Some` in the vote task. }, @@ -480,7 +482,9 @@ pub async fn validate_proposal_safety_and_liveness< if let Err(e) = consensus_write.update_validated_state_map(view_number, view.clone()) { tracing::trace!("{e:?}"); } - consensus_write.update_saved_leaves(proposed_leaf.clone()); + consensus_write + .update_saved_leaves(proposed_leaf.clone(), &upgrade_lock) + .await; // Update our internal storage of the proposal. The proposal is valid, so // we swallow this error and just log if it occurs. @@ -602,7 +606,9 @@ pub async fn validate_proposal_view_and_certs( ); // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - proposal.validate_signature(quorum_membership)?; + proposal + .validate_signature(quorum_membership, upgrade_lock) + .await?; // Verify a timeout certificate OR a view sync certificate exists and is valid. if proposal.data.justify_qc.view_number() != view - 1 { diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 5a73bec2d1..867b6298a9 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -13,7 +13,6 @@ use anyhow::{ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; -use committable::Committable; use hotshot_task::{ dependency::{Dependency, EventDependency}, dependency_task::HandleDepOutput, @@ -211,13 +210,15 @@ impl ProposalDependencyHandle { let proposed_leaf = Leaf::from_quorum_proposal(&proposal); ensure!( - proposed_leaf.parent_commitment() == parent_leaf.commit(), + proposed_leaf.parent_commitment() == parent_leaf.commit(&self.upgrade_lock).await, "Proposed leaf parent does not equal high qc" ); - let signature = - TYPES::SignatureKey::sign(&self.private_key, proposed_leaf.commit().as_ref()) - .context("Failed to compute proposed_leaf.commit()")?; + let signature = TYPES::SignatureKey::sign( + &self.private_key, + proposed_leaf.commit(&self.upgrade_lock).await.as_ref(), + ) + .context("Failed to compute proposed_leaf.commit()")?; let message = Proposal { data: proposal, diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 1658910986..7145c3bde8 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -55,7 +55,7 @@ async fn validate_proposal_liveness + 'static, V: Versions> let view = View { view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(), + leaf: proposed_leaf.commit(&self.upgrade_lock).await, state: Arc::clone(&state), delta: Some(Arc::clone(&delta)), }, @@ -167,7 +166,9 @@ impl + 'static, V: Versions> { tracing::trace!("{e:?}"); } - consensus_writer.update_saved_leaves(proposed_leaf.clone()); + consensus_writer + .update_saved_leaves(proposed_leaf.clone(), &self.upgrade_lock) + .await; // Kick back our updated structures for downstream usage. let new_leaves = consensus_writer.saved_leaves().clone(); @@ -209,7 +210,7 @@ impl + 'static, V: Versions> // Create and send the vote. let vote = QuorumVote::::create_signed_vote( QuorumData { - leaf_commit: leaf.commit(), + leaf_commit: leaf.commit(&self.upgrade_lock).await, }, self.view_number, &self.public_key, @@ -285,7 +286,7 @@ impl + 'static, V: Versions> Handl } else { payload_commitment = Some(proposal_payload_comm); } - let parent_commitment = parent_leaf.commit(); + let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; let proposed_leaf = Leaf::from_quorum_proposal(proposal); if proposed_leaf.parent_commitment() != parent_commitment { warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); diff --git a/testing/src/consistency_task.rs b/testing/src/consistency_task.rs index 2d05c7c4df..9f9c2ad1e8 100644 --- a/testing/src/consistency_task.rs +++ b/testing/src/consistency_task.rs @@ -5,15 +5,15 @@ // along with the HotShot repository. If not, see . #![allow(clippy::unwrap_or_default)] -use std::collections::BTreeMap; +use std::{collections::BTreeMap, marker::PhantomData}; use anyhow::{bail, ensure, Context, Result}; use async_trait::async_trait; -use committable::Committable; use hotshot_types::{ data::Leaf, event::{Event, EventType}, - traits::node_implementation::NodeType, + message::UpgradeLock, + traits::node_implementation::{ConsensusTime, NodeType, Versions}, }; use crate::{ @@ -55,7 +55,36 @@ fn sanitize_node_map( } /// For a NodeMapSanitized, we validate that each leaf extends the preceding leaf. -fn validate_node_map(node_map: &NodeMapSanitized) -> Result<()> { +async fn validate_node_map( + node_map: &NodeMapSanitized, +) -> Result<()> { + // We first scan 3-chains to find an upgrade certificate that has reached a decide. + let leaf_triples = node_map + .values() + .zip(node_map.values().skip(1)) + .zip(node_map.values().skip(2)) + .map(|((a, b), c)| (a, b, c)); + + let mut decided_upgrade_certificate = None; + let mut view_decided = TYPES::Time::new(0); + + for (grandparent, _parent, child) in leaf_triples { + if let Some(cert) = grandparent.upgrade_certificate() { + if cert.data.decide_by <= child.view_number() { + decided_upgrade_certificate = Some(cert); + view_decided = child.view_number(); + + break; + } + } + } + + // To mimic consensus to use e.g. the `extends_upgrade` method, + // we cannot immediately put the upgrade certificate in the lock. + // + // Instead, we initialize an empty lock and add the certificate in the appropriate view. + let upgrade_lock = UpgradeLock::::new(); + let leaf_pairs = node_map.values().zip(node_map.values().skip(1)); // Check that the child leaf follows the parent, possibly with a gap. @@ -63,13 +92,28 @@ fn validate_node_map(node_map: &NodeMapSanitized) -> Res ensure!( child.justify_qc().view_number >= parent.view_number(), "The node has provided leaf:\n\n{child:?}\n\nbut its quorum certificate points to a view before the most recent leaf:\n\n{parent:?}" - ); + ); + + child + .extends_upgrade(parent, &upgrade_lock.decided_upgrade_certificate) + .await + .context("Leaf {child} does not extend its parent {parent}")?; + // We want to make sure the commitment matches, + // but allow for the possibility that we may have skipped views in between. if child.justify_qc().view_number == parent.view_number() - && child.justify_qc().data.leaf_commit != parent.commit() + && child.justify_qc().data.leaf_commit != parent.commit(&upgrade_lock).await { bail!("The node has provided leaf:\n\n{child:?}\n\nwhich points to:\n\n{parent:?}\n\nbut the commits do not match."); } + + if child.view_number() == view_decided { + upgrade_lock + .decided_upgrade_certificate + .write() + .await + .clone_from(&decided_upgrade_certificate); + } } Ok(()) @@ -105,12 +149,13 @@ pub type ViewMap = BTreeMap<::Time, BTreeMap( +async fn invert_network_map( network_map: &NetworkMapSanitized, ) -> Result> { let mut inverted_map = BTreeMap::new(); for (node_id, node_map) in network_map.iter() { - validate_node_map(node_map) + validate_node_map::(node_map) + .await .context(format!("Node {node_id} has an invalid leaf history"))?; // validate each node's leaf map @@ -153,20 +198,22 @@ fn sanitize_view_map( } /// Data availability task state -pub struct ConsistencyTask { +pub struct ConsistencyTask { /// A map from node ids to (leaves keyed on view number) pub consensus_leaves: NetworkMap, /// safety task requirements pub safety_properties: OverallSafetyPropertiesDescription, /// whether we should have seen an upgrade certificate or not pub ensure_upgrade: bool, + /// phantom marker + pub _pd: PhantomData, } -impl ConsistencyTask { - pub fn validate(&self) -> Result<()> { +impl ConsistencyTask { + pub async fn validate(&self) -> Result<()> { let sanitized_network_map = sanitize_network_map(&self.consensus_leaves)?; - let inverted_map = invert_network_map(&sanitized_network_map)?; + let inverted_map = invert_network_map::(&sanitized_network_map).await?; let sanitized_view_map = sanitize_view_map(&inverted_map)?; @@ -184,7 +231,7 @@ impl ConsistencyTask { } #[async_trait] -impl TestTaskState for ConsistencyTask { +impl TestTaskState for ConsistencyTask { type Event = Event; /// Handles an event from one of multiple receivers. @@ -206,8 +253,8 @@ impl TestTaskState for ConsistencyTask { Ok(()) } - fn check(&self) -> TestResult { - if let Err(e) = self.validate() { + async fn check(&self) -> TestResult { + if let Err(e) = self.validate().await { return TestResult::Fail(Box::new(e)); } diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 8ffe9fafe7..54d6039f33 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -73,7 +73,7 @@ pub async fn build_system_handle< let marketplace_config = (launcher.resource_generator.marketplace_config)(node_id); let config = launcher.resource_generator.config.clone(); - let initializer = HotShotInitializer::::from_genesis(TestInstanceState::new( + let initializer = HotShotInitializer::::from_genesis::(TestInstanceState::new( launcher.metadata.async_delay_config, )) .await @@ -380,7 +380,7 @@ pub async fn build_vote, V: Versio let leaf: Leaf<_> = Leaf::from_quorum_proposal(&proposal); let vote = QuorumVote::::create_signed_vote( QuorumData { - leaf_commit: leaf.commit(), + leaf_commit: leaf.commit(&handle.hotshot.upgrade_lock).await, }, view, &handle.public_key(), @@ -410,18 +410,22 @@ where } /// This function will create a fake [`View`] from a provided [`Leaf`]. -pub fn build_fake_view_with_leaf(leaf: Leaf) -> View { - build_fake_view_with_leaf_and_state(leaf, TestValidatedState::default()) +pub async fn build_fake_view_with_leaf( + leaf: Leaf, + upgrade_lock: &UpgradeLock, +) -> View { + build_fake_view_with_leaf_and_state(leaf, TestValidatedState::default(), upgrade_lock).await } /// This function will create a fake [`View`] from a provided [`Leaf`] and `state`. -pub fn build_fake_view_with_leaf_and_state( +pub async fn build_fake_view_with_leaf_and_state( leaf: Leaf, state: TestValidatedState, + upgrade_lock: &UpgradeLock, ) -> View { View { view_inner: ViewInner::Leaf { - leaf: leaf.commit(), + leaf: leaf.commit(upgrade_lock).await, state: state.into(), delta: None, }, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 97ea7fcdf1..a4680d5000 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -234,7 +234,7 @@ impl, V: Versions> TestTas Ok(()) } - fn check(&self) -> TestResult { + async fn check(&self) -> TestResult { if let Some(e) = &self.error { return TestResult::Fail(e.clone()); } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 8d5d8420ae..ec9bda7660 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -216,7 +216,7 @@ where read_storage.last_actioned_view().await, read_storage.proposals_cloned().await, read_storage.high_qc_cloned().await.unwrap_or( - QuorumCertificate::genesis( + QuorumCertificate::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -302,7 +302,7 @@ where Ok(()) } - fn check(&self) -> TestResult { + async fn check(&self) -> TestResult { TestResult::Pass } } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 2cb521b08b..098d12512d 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -119,7 +119,7 @@ pub async fn create_test_handle< storage: I::Storage, marketplace_config: MarketplaceConfig, ) -> SystemContextHandle { - let initializer = HotShotInitializer::::from_genesis(TestInstanceState::new( + let initializer = HotShotInitializer::::from_genesis::(TestInstanceState::new( metadata.async_delay_config, )) .await @@ -185,7 +185,8 @@ pub async fn create_test_handle< ConsensusMetricsValue::default(), storage, marketplace_config, - ); + ) + .await; hotshot.run_tasks().await } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 6167d23698..e616e270c8 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -184,7 +184,7 @@ where &TestInstanceState::default(), ) .await, - high_qc: QuorumCertificate::genesis( + high_qc: QuorumCertificate::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -209,9 +209,10 @@ where consensus_leaves: BTreeMap::new(), safety_properties: self.launcher.metadata.overall_safety_properties, ensure_upgrade: self.launcher.metadata.upgrade_view.is_some(), + _pd: PhantomData, }; - let consistency_task = TestTask::>::new( + let consistency_task = TestTask::>::new( consistency_task_state, event_rxs.clone(), test_receiver.clone(), @@ -481,7 +482,7 @@ where }, ); } else { - let initializer = HotShotInitializer::::from_genesis( + let initializer = HotShotInitializer::::from_genesis::( TestInstanceState::new(self.launcher.metadata.async_delay_config.clone()), ) .await @@ -615,6 +616,7 @@ where storage, marketplace_config, ) + .await } /// add a specific node with a config @@ -654,6 +656,7 @@ where internal_channel, external_channel, ) + .await } } diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index 0dcc56c068..3559c4cee9 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -44,7 +44,7 @@ pub trait TestTaskState: Send { async fn handle_event(&mut self, (event, id): (Self::Event, usize)) -> Result<()>; /// Check the result of the test. - fn check(&self) -> TestResult; + async fn check(&self) -> TestResult; } /// A basic task which loops waiting for events to come from `event_receiver` @@ -88,7 +88,7 @@ impl TestTask { spawn(async move { loop { if let Ok(TestEvent::Shutdown) = self.test_receiver.try_recv() { - break self.state.check(); + break self.state.check().await; } let mut messages = Vec::new(); diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 7c6c8af2ee..21d5eb7c66 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -12,7 +12,6 @@ use std::{ task::{Context, Poll}, }; -use committable::Committable; use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_example_types::{ @@ -128,7 +127,7 @@ impl TestView { let quorum_proposal_inner = QuorumProposal:: { block_header: block_header.clone(), view_number: genesis_view, - justify_qc: QuorumCertificate::genesis( + justify_qc: QuorumCertificate::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -160,8 +159,11 @@ impl TestView { transactions: transactions.clone(), }); - let signature = ::sign(&private_key, leaf.commit().as_ref()) - .expect("Failed to sign leaf commitment!"); + let signature = ::sign( + &private_key, + leaf.commit(&upgrade_lock).await.as_ref(), + ) + .expect("Failed to sign leaf commitment!"); let quorum_proposal = Proposal { data: quorum_proposal_inner, @@ -208,7 +210,7 @@ impl TestView { let transactions = &self.transactions; let quorum_data = QuorumData { - leaf_commit: old.leaf.commit(), + leaf_commit: old.leaf.commit(&self.upgrade_lock).await, }; let (old_private_key, old_public_key) = key_pair_for_id::(*old_view); @@ -359,8 +361,11 @@ impl TestView { transactions: transactions.clone(), }); - let signature = ::sign(&private_key, leaf.commit().as_ref()) - .expect("Failed to sign leaf commitment."); + let signature = ::sign( + &private_key, + leaf.commit(&self.upgrade_lock).await.as_ref(), + ) + .expect("Failed to sign leaf commitment."); let quorum_proposal = Proposal { data: proposal, @@ -422,7 +427,7 @@ impl TestView { ) -> QuorumVote { QuorumVote::::create_signed_vote( QuorumData { - leaf_commit: self.leaf.commit(), + leaf_commit: self.leaf.commit(&handle.hotshot.upgrade_lock).await, }, self.view_number, &handle.public_key(), diff --git a/testing/src/view_sync_task.rs b/testing/src/view_sync_task.rs index f788c29af5..b107fc68f9 100644 --- a/testing/src/view_sync_task.rs +++ b/testing/src/view_sync_task.rs @@ -63,7 +63,7 @@ impl> TestTaskState Ok(()) } - fn check(&self) -> TestResult { + async fn check(&self) -> TestResult { match self.description.clone() { ViewSyncTaskDescription::Threshold(min, max) => { let num_hits = self.hit_view_sync.len(); diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index f7f905b54a..940e30f62e 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -79,11 +79,15 @@ async fn test_quorum_proposal_recv_task() { // These are both updated when we vote. Since we don't have access // to that, we'll just put them in here. consensus_writer - .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + .update_saved_leaves( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + &handle.hotshot.upgrade_lock, + ) + .await; consensus_writer .update_validated_state_map( view.quorum_proposal.data.view_number, - build_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, ) .unwrap(); } @@ -104,7 +108,9 @@ async fn test_quorum_proposal_recv_task() { >::from_header( &proposals[1].data.block_header, ), - ), + &handle.hotshot.upgrade_lock, + ) + .await, )), exact(QuorumProposalValidated( proposals[1].data.clone(), @@ -177,7 +183,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { consensus_writer .update_validated_state_map( inserted_view_number, - build_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, ) .unwrap(); @@ -226,7 +232,9 @@ async fn test_quorum_proposal_recv_task_liveness_check() { >::from_header( &proposals[2].data.block_header, ), - ), + &handle.hotshot.upgrade_lock + ) + .await, )), exact(QuorumProposalRequestSend(req, signature)), exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index b984c60889..324d3241b2 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -80,7 +80,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. consensus_writer - .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + .update_saved_leaves( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + &handle.hotshot.upgrade_lock, + ) + .await; } // We must send the genesis cert here to initialize hotshot successfully. @@ -110,7 +114,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { ), ValidatedStateUpdated( proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone()), + build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, ), ], ]; @@ -170,12 +174,16 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. consensus_writer - .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + .update_saved_leaves( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + &handle.hotshot.upgrade_lock, + ) + .await; consensus_writer .update_validated_state_map( view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, ) .unwrap(); } @@ -212,7 +220,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), ValidatedStateUpdated( genesis_cert.view_number(), - build_fake_view_with_leaf(genesis_leaf.clone()), + build_fake_view_with_leaf(genesis_leaf.clone(), &handle.hotshot.upgrade_lock).await, ), ], random![ @@ -229,7 +237,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone()), + build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, ), ], random![ @@ -246,7 +254,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone()), + build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, ), ], random![ @@ -263,7 +271,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), ValidatedStateUpdated( proposals[2].data.view_number(), - build_fake_view_with_leaf(leaves[2].clone()), + build_fake_view_with_leaf(leaves[2].clone(), &handle.hotshot.upgrade_lock).await, ), ], random![ @@ -280,7 +288,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), ValidatedStateUpdated( proposals[3].data.view_number(), - build_fake_view_with_leaf(leaves[3].clone()), + build_fake_view_with_leaf(leaves[3].clone(), &handle.hotshot.upgrade_lock).await, ), ], ]; @@ -390,7 +398,7 @@ async fn test_quorum_proposal_task_qc_timeout() { VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone()), + build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, ), ]]; @@ -480,7 +488,7 @@ async fn test_quorum_proposal_task_view_sync() { VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone()), + build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, ), ]]; @@ -532,11 +540,15 @@ async fn test_quorum_proposal_task_liveness_check() { // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. consensus_writer - .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + .update_saved_leaves( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + &handle.hotshot.upgrade_lock, + ) + .await; consensus_writer .update_validated_state_map( view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, ) .unwrap(); } @@ -572,7 +584,7 @@ async fn test_quorum_proposal_task_liveness_check() { VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), ValidatedStateUpdated( genesis_cert.view_number(), - build_fake_view_with_leaf(genesis_leaf.clone()), + build_fake_view_with_leaf(genesis_leaf.clone(), &handle.hotshot.upgrade_lock).await, ), ], random![ @@ -589,7 +601,7 @@ async fn test_quorum_proposal_task_liveness_check() { VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone()), + build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, ), ], random![ @@ -606,7 +618,7 @@ async fn test_quorum_proposal_task_liveness_check() { VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone()), + build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, ), ], random![ @@ -623,7 +635,7 @@ async fn test_quorum_proposal_task_liveness_check() { VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), ValidatedStateUpdated( proposals[2].data.view_number(), - build_fake_view_with_leaf(leaves[2].clone()), + build_fake_view_with_leaf(leaves[2].clone(), &handle.hotshot.upgrade_lock).await, ), ], random![ @@ -640,7 +652,7 @@ async fn test_quorum_proposal_task_liveness_check() { VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), ValidatedStateUpdated( proposals[3].data.view_number(), - build_fake_view_with_leaf(leaves[3].clone()), + build_fake_view_with_leaf(leaves[3].clone(), &handle.hotshot.upgrade_lock).await, ), ], ]; diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 265910168d..e11ac16e5a 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -62,10 +62,12 @@ async fn test_quorum_vote_task_success() { consensus_writer .update_validated_state_map( view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, ) .unwrap(); - consensus_writer.update_saved_leaves(view.leaf.clone()); + consensus_writer + .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) + .await; } drop(consensus_writer); @@ -135,10 +137,12 @@ async fn test_quorum_vote_task_miss_dependency() { consensus_writer .update_validated_state_map( view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, ) .unwrap(); - consensus_writer.update_saved_leaves(view.leaf.clone()); + consensus_writer + .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) + .await; } drop(consensus_writer); diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index a4153b407b..c757b5d1b7 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -19,9 +19,9 @@ use hotshot_testing::{ test_builder::TestDescription, view_sync_task::ViewSyncTaskDescription, }; -use hotshot_types::message::{GeneralConsensusMessage, MessageKind, SequencingMessage}; use hotshot_types::{ data::ViewNumber, + message::{GeneralConsensusMessage, MessageKind, SequencingMessage}, traits::{ election::Membership, network::TransmitType, diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index fcb25aaf23..a465e94b37 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -102,11 +102,15 @@ async fn test_upgrade_task_with_proposal() { leaders.push(view.leader_public_key); views.push(view.clone()); consensus_writer - .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + .update_saved_leaves( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + &handle.hotshot.upgrade_lock, + ) + .await; consensus_writer .update_validated_state_map( view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, ) .unwrap(); } @@ -122,11 +126,15 @@ async fn test_upgrade_task_with_proposal() { leaves.push(view.leaf.clone()); views.push(view.clone()); consensus_writer - .update_saved_leaves(Leaf::from_quorum_proposal(&view.quorum_proposal.data)); + .update_saved_leaves( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + &handle.hotshot.upgrade_lock, + ) + .await; consensus_writer .update_validated_state_map( view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, ) .unwrap(); } @@ -175,7 +183,7 @@ async fn test_upgrade_task_with_proposal() { VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), ValidatedStateUpdated( genesis_cert.view_number(), - build_fake_view_with_leaf(genesis_leaf.clone()), + build_fake_view_with_leaf(genesis_leaf.clone(), &handle.hotshot.upgrade_lock).await, ), ], random![ @@ -192,7 +200,7 @@ async fn test_upgrade_task_with_proposal() { VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), ValidatedStateUpdated( proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone()), + build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, ), ], InputOrder::Random(upgrade_vote_recvs), @@ -210,7 +218,7 @@ async fn test_upgrade_task_with_proposal() { VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), ValidatedStateUpdated( proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone()), + build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, ), ], ]; diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 37e3134bf1..71b2719c27 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -89,10 +89,12 @@ async fn test_upgrade_task_with_vote() { consensus_writer .update_validated_state_map( view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, ) .unwrap(); - consensus_writer.update_saved_leaves(view.leaf.clone()); + consensus_writer + .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) + .await; } drop(consensus_writer); diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 13d5e86297..62d1d8040b 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -58,10 +58,12 @@ async fn test_vote_dependency_handle() { consensus_writer .update_validated_state_map( view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone()), + build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, ) .unwrap(); - consensus_writer.update_saved_leaves(view.leaf.clone()); + consensus_writer + .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) + .await; } drop(consensus_writer); diff --git a/testing/tests/tests_2/byzantine_tests.rs b/testing/tests/tests_2/byzantine_tests.rs index 6120e9bd92..902999c55c 100644 --- a/testing/tests/tests_2/byzantine_tests.rs +++ b/testing/tests/tests_2/byzantine_tests.rs @@ -1,5 +1,7 @@ use std::{ collections::{HashMap, HashSet}, + rc::Rc, + sync::Arc, time::Duration, }; @@ -27,8 +29,6 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -use std::rc::Rc; -use std::sync::Arc; cross_tests!( TestName: double_propose_vote, Impls: [MemoryImpl], diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 0c85127f8b..3fc23951e8 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -15,7 +15,7 @@ use std::{ use anyhow::{bail, ensure, Result}; use async_lock::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard}; -use committable::{Commitment, Committable}; +use committable::Commitment; use tracing::{debug, error, instrument, trace}; use vec1::Vec1; @@ -24,12 +24,12 @@ use crate::{ data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare}, error::HotShotError, event::HotShotAction, - message::Proposal, + message::{Proposal, UpgradeLock}, simple_certificate::{DaCertificate, QuorumCertificate}, traits::{ block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, BlockPayload, ValidatedState, }, @@ -585,8 +585,13 @@ impl Consensus { } /// Update the saved leaves with a new leaf. - pub fn update_saved_leaves(&mut self, leaf: Leaf) { - self.saved_leaves.insert(leaf.commit(), leaf); + pub async fn update_saved_leaves( + &mut self, + leaf: Leaf, + upgrade_lock: &UpgradeLock, + ) { + self.saved_leaves + .insert(leaf.commit(upgrade_lock).await, leaf); } /// Update the saved payloads with a new encoded transaction. diff --git a/types/src/data.rs b/types/src/data.rs index 168ca0669b..03d30ca817 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -32,21 +32,22 @@ use snafu::Snafu; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; use tracing::error; +use vbs::version::StaticVersionType; use vec1::Vec1; use crate::{ - message::Proposal, + message::{Proposal, UpgradeLock}, simple_certificate::{ QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, }, - simple_vote::{QuorumData, UpgradeProposalData}, + simple_vote::{QuorumData, UpgradeProposalData, VersionedVoteData}, traits::{ block_contents::{ vid_commitment, BlockHeader, BuilderFee, EncodeBytes, TestableBlock, GENESIS_VID_NUM_STORAGE_NODES, }, election::Membership, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, states::TestableState, BlockPayload, @@ -444,6 +445,29 @@ pub struct Leaf { block_payload: Option, } +impl Leaf { + /// Calculate the leaf commitment, + /// which is gated on the version to include the block header. + pub async fn commit( + &self, + upgrade_lock: &UpgradeLock, + ) -> Commitment { + let version = upgrade_lock.version_infallible(self.view_number).await; + + if version < V::Marketplace::VERSION { + ::commit(self) + } else { + RawCommitmentBuilder::new("leaf commitment") + .u64_field("view number", *self.view_number) + .field("parent leaf commitment", self.parent_commitment) + .field("block header", self.block_header.commit()) + .field("justify qc", self.justify_qc.commit()) + .optional("upgrade certificate", &self.upgrade_certificate) + .finalize() + } + } +} + impl PartialEq for Leaf { fn eq(&self, other: &Self) -> bool { self.view_number == other.view_number @@ -477,20 +501,32 @@ impl Display for Leaf { impl QuorumCertificate { #[must_use] /// Creat the Genesis certificate - pub async fn genesis( + pub async fn genesis( validated_state: &TYPES::ValidatedState, instance_state: &TYPES::InstanceState, ) -> Self { + // since this is genesis, we should never have a decided upgrade certificate. + let upgrade_lock = UpgradeLock::::new(); + + let genesis_view = ::genesis(); + let data = QuorumData { leaf_commit: Leaf::genesis(validated_state, instance_state) .await - .commit(), + .commit(&upgrade_lock) + .await, }; - let commit = data.commit(); + + let versioned_data = + VersionedVoteData::<_, _, V>::new_infallible(data.clone(), genesis_view, &upgrade_lock) + .await; + + let bytes: [u8; 32] = versioned_data.commit().into(); + Self::new( data, - commit, - ::genesis(), + Commitment::from_raw(bytes), + genesis_view, None, PhantomData, ) diff --git a/types/src/message.rs b/types/src/message.rs index 05c25fab12..303e1d7073 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -14,7 +14,6 @@ use std::{fmt, fmt::Debug, marker::PhantomData, sync::Arc}; use anyhow::{bail, ensure, Context, Result}; use async_lock::RwLock; use cdn_proto::util::mnemonic; -use committable::Committable; use derivative::Derivative; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use vbs::{ @@ -358,13 +357,20 @@ where /// Checks that the signature of the quorum proposal is valid. /// # Errors /// Returns an error when the proposal signature is invalid. - pub fn validate_signature(&self, quorum_membership: &TYPES::Membership) -> Result<()> { + pub async fn validate_signature( + &self, + quorum_membership: &TYPES::Membership, + upgrade_lock: &UpgradeLock, + ) -> Result<()> { let view_number = self.data.view_number(); let view_leader_key = quorum_membership.leader(view_number); let proposed_leaf = Leaf::from_quorum_proposal(&self.data); ensure!( - view_leader_key.validate(&self.signature, proposed_leaf.commit().as_ref()), + view_leader_key.validate( + &self.signature, + proposed_leaf.commit(upgrade_lock).await.as_ref() + ), "Proposal signature is invalid." ); @@ -417,6 +423,24 @@ impl UpgradeLock { Ok(version) } + /// Calculate the version applied in a view, based on the provided upgrade lock. + /// + /// This function does not fail, since it does not check that the version is supported. + pub async fn version_infallible(&self, view: TYPES::Time) -> Version { + let upgrade_certificate = self.decided_upgrade_certificate.read().await; + + match *upgrade_certificate { + Some(ref cert) => { + if view >= cert.data.new_version_first_view { + cert.data.new_version + } else { + cert.data.old_version + } + } + None => V::Base::VERSION, + } + } + /// Serialize a message with a version number, using `message.view_number()` and an optional decided upgrade certificate to determine the message's version. /// /// # Errors diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index fd1500c04c..a86452b9a9 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -216,6 +216,24 @@ impl VersionedVoteData, + ) -> Self { + let version = upgrade_lock.version_infallible(view).await; + + Self { + data, + view, + version, + _pd: PhantomData, + } + } } impl Committable From c8a4e4b5fd92844733729ce65b3c6979579da43e Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 10 Sep 2024 15:16:52 -0400 Subject: [PATCH 1208/1393] [TECH DEBT] Refactor memberships, remove non-staked nodes (#3653) * refactor memberships * fmt * improve API * Make sure each node has above-zero stake * remove non-staked node support * PR comments * fix `random/fixed` leader election to use `eligible_leaders` --- example-types/src/node_types.rs | 5 +- examples/infra/mod.rs | 20 +- hotshot/src/lib.rs | 8 +- hotshot/src/tasks/mod.rs | 16 +- hotshot/src/tasks/task_state.rs | 9 +- .../src/traits/election/static_committee.rs | 271 ++++++++---------- .../static_committee_leader_two_views.rs | 263 +++++++---------- orchestrator/run-config.toml | 2 - orchestrator/src/config.rs | 8 - orchestrator/staging-config.toml | 2 - task-impls/src/consensus/mod.rs | 2 +- task-impls/src/network.rs | 2 +- task-impls/src/quorum_vote/mod.rs | 2 +- task-impls/src/request.rs | 2 +- testing/src/helpers.rs | 34 +-- testing/src/test_builder.rs | 16 -- testing/src/test_runner.rs | 30 +- testing/tests/tests_1/network_task.rs | 19 +- testing/tests/tests_3/memory_network.rs | 2 +- types/src/data.rs | 2 +- types/src/lib.rs | 4 - types/src/simple_certificate.rs | 2 +- types/src/traits/election.rs | 65 ++--- types/src/vote.rs | 2 +- 24 files changed, 307 insertions(+), 481 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index a3ff669ab3..c5e5c6a3a9 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -52,7 +52,7 @@ impl NodeType for TestTypes { type Transaction = TestTransaction; type ValidatedState = TestValidatedState; type InstanceState = TestInstanceState; - type Membership = GeneralStaticCommittee; + type Membership = GeneralStaticCommittee; type BuilderSignatureKey = BuilderKey; } @@ -81,8 +81,7 @@ impl NodeType for TestConsecutiveLeaderTypes { type Transaction = TestTransaction; type ValidatedState = TestValidatedState; type InstanceState = TestInstanceState; - type Membership = - StaticCommitteeLeaderForTwoViews; + type Membership = StaticCommitteeLeaderForTwoViews; type BuilderSignatureKey = BuilderKey; } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 20b0ed8a56..0f37e713aa 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -377,32 +377,34 @@ pub trait RunDa< // Get KeyPair for certificate Aggregation let pk = config.config.my_own_validator_config.public_key.clone(); let sk = config.config.my_own_validator_config.private_key.clone(); - let known_nodes_with_stake = config.config.known_nodes_with_stake.clone(); let network = self.network(); + let all_nodes = config.config.known_nodes_with_stake.clone(); + let da_nodes = config.config.known_da_nodes.clone(); + // Create the quorum membership from all nodes - let quorum_membership = ::Membership::create_election( - known_nodes_with_stake.clone(), - known_nodes_with_stake.clone(), + let quorum_membership = ::Membership::new( + all_nodes.clone(), + all_nodes.clone(), Topic::Global, + #[cfg(feature = "fixed-leader-election")] config.config.fixed_leader_for_gpuvid, ); // Create the quorum membership from all nodes, specifying the committee // as the known da nodes - let da_membership = ::Membership::create_election( - known_nodes_with_stake.clone(), - config.config.known_da_nodes.clone(), + let da_membership = ::Membership::new( + all_nodes.clone(), + da_nodes, Topic::Da, + #[cfg(feature = "fixed-leader-election")] config.config.fixed_leader_for_gpuvid, ); let memberships = Memberships { quorum_membership: quorum_membership.clone(), da_membership, - vid_membership: quorum_membership.clone(), - view_sync_membership: quorum_membership, }; let marketplace_config = MarketplaceConfig { diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 3a1a2cd086..2fd7cde872 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -90,14 +90,10 @@ pub struct MarketplaceConfig> { /// Bundle of all the memberships a consensus instance uses #[derive(Clone)] pub struct Memberships { - /// Quorum Membership + /// The entire quorum pub quorum_membership: TYPES::Membership, - /// DA + /// The DA nodes pub da_membership: TYPES::Membership, - /// VID - pub vid_membership: TYPES::Membership, - /// View Sync - pub view_sync_membership: TYPES::Membership, } /// Holds the state needed to participate in `HotShot` consensus diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 9bab74694a..5db94d0e14 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -495,8 +495,6 @@ where let network = Arc::clone(&handle.network); let quorum_membership = handle.memberships.quorum_membership.clone(); let da_membership = handle.memberships.da_membership.clone(); - let vid_membership = handle.memberships.vid_membership.clone(); - let view_sync_membership = handle.memberships.view_sync_membership.clone(); self.add_network_event_task( handle, @@ -507,7 +505,7 @@ where self.add_network_event_task( handle, Arc::clone(&network), - quorum_membership, + quorum_membership.clone(), network::upgrade_filter, ); self.add_network_event_task( @@ -519,13 +517,13 @@ where self.add_network_event_task( handle, Arc::clone(&network), - view_sync_membership, + quorum_membership.clone(), network::view_sync_filter, ); self.add_network_event_task( handle, Arc::clone(&network), - vid_membership, + quorum_membership, network::vid_filter, ); } @@ -577,8 +575,6 @@ pub fn add_network_event_tasks, V: let network = Arc::clone(&handle.network); let quorum_membership = handle.memberships.quorum_membership.clone(); let da_membership = handle.memberships.da_membership.clone(); - let vid_membership = handle.memberships.vid_membership.clone(); - let view_sync_membership = handle.memberships.view_sync_membership.clone(); add_network_event_task( handle, @@ -589,7 +585,7 @@ pub fn add_network_event_tasks, V: add_network_event_task( handle, Arc::clone(&network), - quorum_membership, + quorum_membership.clone(), network::upgrade_filter, ); add_network_event_task( @@ -601,13 +597,13 @@ pub fn add_network_event_tasks, V: add_network_event_task( handle, Arc::clone(&network), - view_sync_membership, + quorum_membership.clone(), network::view_sync_filter, ); add_network_event_task( handle, Arc::clone(&network), - vid_membership, + quorum_membership, network::vid_filter, ); } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 8ce9d19236..ab7f68fdc4 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -120,7 +120,7 @@ impl, V: Versions> CreateTaskState cur_view: handle.cur_view().await, vote_collector: None, network: Arc::clone(&handle.hotshot.network), - membership: handle.hotshot.memberships.vid_membership.clone().into(), + membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, @@ -161,12 +161,7 @@ impl, V: Versions> CreateTaskState current_view: cur_view, next_view: cur_view, network: Arc::clone(&handle.hotshot.network), - membership: handle - .hotshot - .memberships - .view_sync_membership - .clone() - .into(), + membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), num_timeouts_tracked: 0, diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 842c2378c6..01b56be103 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -4,74 +4,126 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{marker::PhantomData, num::NonZeroU64}; +use std::collections::BTreeMap; +use std::num::NonZeroU64; use ethereum_types::U256; -// use ark_bls12_381::Parameters as Param381; -use hotshot_types::traits::signature_key::StakeTableEntryType; use hotshot_types::{ - signature_key::BLSPubKey, traits::{ - election::Membership, network::Topic, node_implementation::NodeType, - signature_key::SignatureKey, + election::Membership, + network::Topic, + node_implementation::NodeType, + signature_key::{SignatureKey, StakeTableEntryType}, }, PeerConfig, }; #[cfg(feature = "randomized-leader-election")] use rand::{rngs::StdRng, Rng}; -use tracing::debug; - -/// Dummy implementation of [`Membership`] #[derive(Clone, Debug, Eq, PartialEq, Hash)] -pub struct GeneralStaticCommittee { - /// All the nodes participating and their stake - all_nodes_with_stake: Vec, - /// The nodes on the static committee and their stake - committee_nodes_with_stake: Vec, - /// builder nodes - committee_nodes_without_stake: Vec, - /// the number of fixed leader for gpuvid + +/// The static committee election +pub struct GeneralStaticCommittee { + /// The nodes eligible for leadership. + /// NOTE: This is currently a hack because the DA leader needs to be the quorum + /// leader but without voting rights. + eligible_leaders: Vec<::StakeTableEntry>, + + /// The nodes on the committee and their stake + stake_table: Vec<::StakeTableEntry>, + + /// The nodes on the committee and their stake, indexed by public key + indexed_stake_table: + BTreeMap::StakeTableEntry>, + + // /// The members of the committee + // committee_members: BTreeSet, + #[cfg(feature = "fixed-leader-election")] + /// The number of fixed leaders for gpuvid fixed_leader_for_gpuvid: usize, - /// Node type phantom - _type_phantom: PhantomData, /// The network topic of the committee committee_topic: Topic, } /// static committee using a vrf kp -pub type StaticCommittee = GeneralStaticCommittee; - -impl GeneralStaticCommittee { - /// Creates a new dummy elector - #[must_use] - pub fn new( - _nodes: &[PUBKEY], - nodes_with_stake: Vec, - nodes_without_stake: Vec, - fixed_leader_for_gpuvid: usize, +pub type StaticCommittee = GeneralStaticCommittee; + +impl Membership for GeneralStaticCommittee { + /// Create a new election + fn new( + eligible_leaders: Vec::SignatureKey>>, + committee_members: Vec::SignatureKey>>, committee_topic: Topic, + #[cfg(feature = "fixed-leader-election")] fixed_leader_for_gpuvid: usize, ) -> Self { + // For each eligible leader, get the stake table entry + let eligible_leaders: Vec<::StakeTableEntry> = + eligible_leaders + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // For each member, get the stake table entry + let members: Vec<::StakeTableEntry> = + committee_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // Index the stake table by public key + let indexed_stake_table: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = members + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + Self { - all_nodes_with_stake: nodes_with_stake.clone(), - committee_nodes_with_stake: nodes_with_stake, - committee_nodes_without_stake: nodes_without_stake, - fixed_leader_for_gpuvid, - _type_phantom: PhantomData, + eligible_leaders, + stake_table: members, + indexed_stake_table, committee_topic, + #[cfg(feature = "fixed-leader-election")] + fixed_leader_for_gpuvid, } } -} -impl Membership - for GeneralStaticCommittee -where - TYPES: NodeType, -{ - /// Clone the public key and corresponding stake table for current elected committee - fn committee_qc_stake_table(&self) -> Vec { - self.committee_nodes_with_stake.clone() + /// Get the stake table for the current view + fn stake_table( + &self, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + self.stake_table.clone() + } + + /// Get all members of the committee for the current view + fn committee_members( + &self, + _view_number: ::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.stake_table + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } + + /// Get the stake table entry for a public key + fn stake( + &self, + pub_key: &::SignatureKey, + ) -> Option<::StakeTableEntry> { + // Only return the stake if it is above zero + self.indexed_stake_table.get(pub_key).cloned() + } + + /// Check if a node has stake in the committee + fn has_stake(&self, pub_key: &::SignatureKey) -> bool { + self.indexed_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) } /// Get the network topic for the committee @@ -84,152 +136,53 @@ where feature = "fixed-leader-election" )))] /// Index the vector of public keys with the current view number - fn leader(&self, view_number: TYPES::Time) -> PUBKEY { - let index = usize::try_from(*view_number % self.all_nodes_with_stake.len() as u64).unwrap(); - let res = self.all_nodes_with_stake[index].clone(); + fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + let index = usize::try_from(*view_number % self.eligible_leaders.len() as u64).unwrap(); + let res = self.eligible_leaders[index].clone(); TYPES::SignatureKey::public_key(&res) } #[cfg(feature = "fixed-leader-election")] /// Only get leader in fixed set /// Index the fixed vector (first fixed_leader_for_gpuvid element) of public keys with the current view number - fn leader(&self, view_number: TYPES::Time) -> PUBKEY { + fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { if self.fixed_leader_for_gpuvid <= 0 - || self.fixed_leader_for_gpuvid > self.all_nodes_with_stake.len() + || self.fixed_leader_for_gpuvid > self.eligible_leaders.len() { panic!("fixed_leader_for_gpuvid is not set correctly."); } let index = usize::try_from(*view_number % self.fixed_leader_for_gpuvid as u64).unwrap(); - let res = self.all_nodes_with_stake[index].clone(); + let res = self.eligible_leaders[index].clone(); TYPES::SignatureKey::public_key(&res) } #[cfg(feature = "randomized-leader-election")] /// Index the vector of public keys with a random number generated using the current view number as a seed - fn leader(&self, view_number: TYPES::Time) -> PUBKEY { + fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); let randomized_view_number: usize = rng.gen(); - let index = randomized_view_number % self.nodes_with_stake.len(); - let res = self.all_nodes_with_stake[index].clone(); + let index = randomized_view_number % self.eligible_leaders.len(); + let res = self.eligible_leaders[index].clone(); TYPES::SignatureKey::public_key(&res) } - fn has_stake(&self, pub_key: &PUBKEY) -> bool { - let entry = pub_key.stake_table_entry(1u64); - self.committee_nodes_with_stake.contains(&entry) - } - - fn stake( - &self, - pub_key: &::SignatureKey, - ) -> Option<::StakeTableEntry> { - let entry = pub_key.stake_table_entry(1u64); - if self.committee_nodes_with_stake.contains(&entry) { - Some(entry) - } else { - None - } - } - - fn create_election( - mut all_nodes: Vec>, - committee_members: Vec>, - committee_topic: Topic, - fixed_leader_for_gpuvid: usize, - ) -> Self { - let mut committee_nodes_with_stake = Vec::new(); - let mut committee_nodes_without_stake = Vec::new(); - - // Iterate over committee members - for entry in committee_members - .iter() - .map(|entry| entry.stake_table_entry.clone()) - { - if entry.stake() > U256::from(0) { - // Positive stake - committee_nodes_with_stake.push(entry); - } else { - // Zero stake - committee_nodes_without_stake.push(PUBKEY::public_key(&entry)); - } - } - - // Retain all nodes with stake - all_nodes.retain(|entry| entry.stake_table_entry.stake() > U256::from(0)); - - debug!( - "Election Membership Size: {}", - committee_nodes_with_stake.len() - ); - - Self { - all_nodes_with_stake: all_nodes - .into_iter() - .map(|entry| entry.stake_table_entry) - .collect(), - committee_nodes_with_stake, - committee_nodes_without_stake, - fixed_leader_for_gpuvid, - _type_phantom: PhantomData, - committee_topic, - } - } - + /// Get the total number of nodes in the committee fn total_nodes(&self) -> usize { - self.committee_nodes_with_stake.len() + self.stake_table.len() } + /// Get the voting success threshold for the committee fn success_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64 * 2) / 3) + 1).unwrap() + NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } + /// Get the voting failure threshold for the committee fn failure_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64) / 3) + 1).unwrap() + NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } + /// Get the voting upgrade threshold for the committee fn upgrade_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64 * 9) / 10) + 1).unwrap() - } - - fn staked_committee( - &self, - _view_number: ::Time, - ) -> std::collections::BTreeSet<::SignatureKey> { - self.committee_nodes_with_stake - .iter() - .map(|node| ::SignatureKey::public_key(node)) - .collect() - } - - fn non_staked_committee( - &self, - _view_number: ::Time, - ) -> std::collections::BTreeSet<::SignatureKey> { - self.committee_nodes_without_stake.iter().cloned().collect() - } - - fn whole_committee( - &self, - view_number: ::Time, - ) -> std::collections::BTreeSet<::SignatureKey> { - let mut committee = self.staked_committee(view_number); - committee.extend(self.non_staked_committee(view_number)); - committee - } -} - -impl GeneralStaticCommittee -where - TYPES: NodeType, -{ - #[allow(clippy::must_use_candidate)] - /// get the non-staked builder nodes - pub fn non_staked_nodes_count(&self) -> usize { - self.committee_nodes_without_stake.len() - } - #[allow(clippy::must_use_candidate)] - /// get all the non-staked nodes - pub fn non_staked_nodes(&self) -> Vec { - self.committee_nodes_without_stake.clone() + NonZeroU64::new(((self.stake_table.len() as u64 * 9) / 10) + 1).unwrap() } } diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index d114a5d904..2f88c5ba4d 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -1,202 +1,159 @@ -use std::{marker::PhantomData, num::NonZeroU64}; +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::collections::BTreeMap; +use std::num::NonZeroU64; use ethereum_types::U256; -// use ark_bls12_381::Parameters as Param381; -use hotshot_types::traits::signature_key::StakeTableEntryType; use hotshot_types::{ - signature_key::BLSPubKey, traits::{ - election::Membership, network::Topic, node_implementation::NodeType, - signature_key::SignatureKey, + election::Membership, + network::Topic, + node_implementation::NodeType, + signature_key::{SignatureKey, StakeTableEntryType}, }, PeerConfig, }; -use tracing::debug; - -/// Dummy implementation of [`Membership`] #[derive(Clone, Debug, Eq, PartialEq, Hash)] -pub struct StaticCommitteeLeaderForTwoViews { - /// All the nodes participating and their stake - all_nodes_with_stake: Vec, - /// The nodes on the static committee and their stake - committee_nodes_with_stake: Vec, - /// builder nodes - committee_nodes_without_stake: Vec, - /// the number of fixed leader for gpuvid + +/// The static committee election +pub struct StaticCommitteeLeaderForTwoViews { + /// The nodes eligible for leadership. + /// NOTE: This is currently a hack because the DA leader needs to be the quorum + /// leader but without voting rights. + eligible_leaders: Vec<::StakeTableEntry>, + + /// The nodes on the committee and their stake + stake_table: Vec<::StakeTableEntry>, + + /// The nodes on the committee and their stake, indexed by public key + indexed_stake_table: + BTreeMap::StakeTableEntry>, + + // /// The members of the committee + // committee_members: BTreeSet, + #[cfg(feature = "fixed-leader-election")] + /// The number of fixed leaders for gpuvid fixed_leader_for_gpuvid: usize, - /// Node type phantom - _type_phantom: PhantomData, /// The network topic of the committee committee_topic: Topic, } /// static committee using a vrf kp -pub type StaticCommittee = StaticCommitteeLeaderForTwoViews; - -impl StaticCommitteeLeaderForTwoViews { - /// Creates a new dummy elector - #[must_use] - pub fn new( - _nodes: &[PUBKEY], - nodes_with_stake: Vec, - nodes_without_stake: Vec, - fixed_leader_for_gpuvid: usize, +pub type StaticCommittee = StaticCommitteeLeaderForTwoViews; + +impl Membership for StaticCommitteeLeaderForTwoViews { + /// Create a new election + fn new( + eligible_leaders: Vec::SignatureKey>>, + committee_members: Vec::SignatureKey>>, committee_topic: Topic, + #[cfg(feature = "fixed-leader-election")] fixed_leader_for_gpuvid: usize, ) -> Self { + // For each eligible leader, get the stake table entry + let eligible_leaders: Vec<::StakeTableEntry> = + eligible_leaders + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // For each member, get the stake table entry + let members: Vec<::StakeTableEntry> = + committee_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // Index the stake table by public key + let indexed_stake_table: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = members + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + Self { - all_nodes_with_stake: nodes_with_stake.clone(), - committee_nodes_with_stake: nodes_with_stake, - committee_nodes_without_stake: nodes_without_stake, - fixed_leader_for_gpuvid, - _type_phantom: PhantomData, + eligible_leaders, + stake_table: members, + indexed_stake_table, committee_topic, + #[cfg(feature = "fixed-leader-election")] + fixed_leader_for_gpuvid, } } -} - -impl Membership - for StaticCommitteeLeaderForTwoViews -where - TYPES: NodeType, -{ - /// Clone the public key and corresponding stake table for current elected committee - fn committee_qc_stake_table(&self) -> Vec { - self.committee_nodes_with_stake.clone() - } - - /// Get the network topic for the committee - fn committee_topic(&self) -> Topic { - self.committee_topic.clone() - } - /// Index the vector of public keys with the current view number - fn leader(&self, view_number: TYPES::Time) -> PUBKEY { - // two connsecutive views will have same index starting with even number. - // eg 0->1, 2->3 ... 10->11 etc - let index = - usize::try_from((*view_number / 2) % self.all_nodes_with_stake.len() as u64).unwrap(); - let res = self.all_nodes_with_stake[index].clone(); - TYPES::SignatureKey::public_key(&res) + /// Get the stake table for the current view + fn stake_table( + &self, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + self.stake_table.clone() } - fn has_stake(&self, pub_key: &PUBKEY) -> bool { - let entry = pub_key.stake_table_entry(1u64); - self.committee_nodes_with_stake.contains(&entry) + /// Get all members of the committee for the current view + fn committee_members( + &self, + _view_number: ::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.stake_table + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() } + /// Get the stake table entry for a public key fn stake( &self, pub_key: &::SignatureKey, ) -> Option<::StakeTableEntry> { - let entry = pub_key.stake_table_entry(1u64); - if self.committee_nodes_with_stake.contains(&entry) { - Some(entry) - } else { - None - } + // Only return the stake if it is above zero + self.indexed_stake_table.get(pub_key).cloned() } - fn create_election( - mut all_nodes: Vec>, - committee_members: Vec>, - committee_topic: Topic, - fixed_leader_for_gpuvid: usize, - ) -> Self { - let mut committee_nodes_with_stake = Vec::new(); - let mut committee_nodes_without_stake = Vec::new(); - - // Iterate over committee members - for entry in committee_members - .iter() - .map(|entry| entry.stake_table_entry.clone()) - { - if entry.stake() > U256::from(0) { - // Positive stake - committee_nodes_with_stake.push(entry); - } else { - // Zero stake - committee_nodes_without_stake.push(PUBKEY::public_key(&entry)); - } - } - - // Retain all nodes with stake - all_nodes.retain(|entry| entry.stake_table_entry.stake() > U256::from(0)); + /// Check if a node has stake in the committee + fn has_stake(&self, pub_key: &::SignatureKey) -> bool { + self.indexed_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } - debug!( - "Election Membership Size: {}", - committee_nodes_with_stake.len() - ); + /// Get the network topic for the committee + fn committee_topic(&self) -> Topic { + self.committee_topic.clone() + } - Self { - all_nodes_with_stake: all_nodes - .into_iter() - .map(|entry| entry.stake_table_entry) - .collect(), - committee_nodes_with_stake, - committee_nodes_without_stake, - fixed_leader_for_gpuvid, - _type_phantom: PhantomData, - committee_topic, - } + /// Index the vector of public keys with the current view number + fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + let index = + usize::try_from((*view_number / 2) % self.eligible_leaders.len() as u64).unwrap(); + let res = self.eligible_leaders[index].clone(); + TYPES::SignatureKey::public_key(&res) } + /// Get the total number of nodes in the committee fn total_nodes(&self) -> usize { - self.committee_nodes_with_stake.len() + self.stake_table.len() } + /// Get the voting success threshold for the committee fn success_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64 * 2) / 3) + 1).unwrap() + NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } + /// Get the voting failure threshold for the committee fn failure_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64) / 3) + 1).unwrap() + NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } + /// Get the voting upgrade threshold for the committee fn upgrade_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(((self.committee_nodes_with_stake.len() as u64 * 9) / 10) + 1).unwrap() - } - - fn staked_committee( - &self, - _view_number: ::Time, - ) -> std::collections::BTreeSet<::SignatureKey> { - self.committee_nodes_with_stake - .iter() - .map(|node| ::SignatureKey::public_key(node)) - .collect() - } - - fn non_staked_committee( - &self, - _view_number: ::Time, - ) -> std::collections::BTreeSet<::SignatureKey> { - self.committee_nodes_without_stake.iter().cloned().collect() - } - - fn whole_committee( - &self, - view_number: ::Time, - ) -> std::collections::BTreeSet<::SignatureKey> { - let mut committee = self.staked_committee(view_number); - committee.extend(self.non_staked_committee(view_number)); - committee - } -} - -impl StaticCommitteeLeaderForTwoViews -where - TYPES: NodeType, -{ - #[allow(clippy::must_use_candidate)] - /// get the non-staked builder nodes - pub fn non_staked_nodes_count(&self) -> usize { - self.committee_nodes_without_stake.len() - } - #[allow(clippy::must_use_candidate)] - /// get all the non-staked nodes - pub fn non_staked_nodes(&self) -> Vec { - self.committee_nodes_without_stake.clone() + NonZeroU64::new(((self.stake_table.len() as u64 * 9) / 10) + 1).unwrap() } } diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index e0672d0164..ae5cfe59ad 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -56,10 +56,8 @@ enable_registration_verification = true [config] num_nodes_with_stake = 10 -num_nodes_without_stake = 0 start_threshold = [8, 10] staked_da_nodes = 10 -non_staked_da_nodes = 0 fixed_leader_for_gpuvid = 1 next_view_timeout = 30000 timeout_ratio = [11, 10] diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 514eda8e96..0464c1bd5f 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -499,8 +499,6 @@ pub struct HotShotConfigFile { pub start_threshold: (u64, u64), /// Total number of staked nodes in the network pub num_nodes_with_stake: NonZeroUsize, - /// Total number of non-staked nodes in the network - pub num_nodes_without_stake: usize, #[serde(skip)] /// My own public key, secret key, stake value pub my_own_validator_config: ValidatorConfig, @@ -515,8 +513,6 @@ pub struct HotShotConfigFile { pub known_nodes_without_stake: Vec, /// Number of staking DA nodes pub staked_da_nodes: usize, - /// Number of non-staking DA nodes - pub non_staked_da_nodes: usize, /// Number of fixed leaders for GPU VID pub fixed_leader_for_gpuvid: usize, /// Base duration for next-view timeout, in milliseconds @@ -641,13 +637,11 @@ impl From> for HotShotConfig { execution_type: ExecutionType::Continuous, start_threshold: val.start_threshold, num_nodes_with_stake: val.num_nodes_with_stake, - num_nodes_without_stake: val.num_nodes_without_stake, known_da_nodes: val.known_da_nodes, known_nodes_with_stake: val.known_nodes_with_stake, known_nodes_without_stake: val.known_nodes_without_stake, my_own_validator_config: val.my_own_validator_config, da_staked_committee_size: val.staked_da_nodes, - da_non_staked_committee_size: val.non_staked_da_nodes, fixed_leader_for_gpuvid: val.fixed_leader_for_gpuvid, next_view_timeout: val.next_view_timeout, view_sync_timeout: val.view_sync_timeout, @@ -720,13 +714,11 @@ impl Default for HotShotConfigFile { Self { num_nodes_with_stake: NonZeroUsize::new(10).unwrap(), start_threshold: (1, 1), - num_nodes_without_stake: 0, my_own_validator_config: ValidatorConfig::default(), known_nodes_with_stake: gen_known_nodes_with_stake, known_nodes_without_stake: vec![], staked_da_nodes, known_da_nodes, - non_staked_da_nodes: 0, fixed_leader_for_gpuvid: 1, next_view_timeout: 10000, view_sync_timeout: Duration::from_millis(1000), diff --git a/orchestrator/staging-config.toml b/orchestrator/staging-config.toml index 7290ced3b0..417a19f10b 100644 --- a/orchestrator/staging-config.toml +++ b/orchestrator/staging-config.toml @@ -44,9 +44,7 @@ builder = "Simple" [config] start_threshold = [ 8, 10 ] num_nodes_with_stake = 10 -num_nodes_without_stake = 0 staked_da_nodes = 10 -non_staked_da_nodes = 0 fixed_leader_for_gpuvid = 1 next_view_timeout = 15_000 timeout_ratio = [ 11, 10 ] diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 2e8507cd93..c4b7291bc3 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -159,7 +159,7 @@ impl, V: Versions> ConsensusTaskSt .validate(&disperse.signature, payload_commitment.as_ref()) { let mut validated = false; - for da_member in self.da_membership.staked_committee(view) { + for da_member in self.da_membership.committee_members(view) { if da_member.validate(&disperse.signature, payload_commitment.as_ref()) { validated = true; break; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 1ea7a3cc1d..7102179585 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -547,7 +547,7 @@ impl< kind: message_kind, }; let view = message.kind.view_number(); - let committee = membership.whole_committee(view); + let committee = membership.committee_members(view); let committee_topic = membership.committee_topic(); let net = Arc::clone(&self.channel); let storage = Arc::clone(&self.storage); diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 67a28f1da4..bd7e04d993 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -603,7 +603,7 @@ impl, V: Versions> QuorumVoteTaskS .validate(&disperse.signature, payload_commitment.as_ref()) { let mut validated = false; - for da_member in self.da_membership.staked_committee(view) { + for da_member in self.da_membership.committee_members(view) { if da_member.validate(&disperse.signature, payload_commitment.as_ref()) { validated = true; break; diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index abe40ef702..d7d2c0b8ff 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -213,7 +213,7 @@ impl> NetworkRequestState = self .da_membership - .whole_committee(view) + .committee_members(view) .into_iter() .collect(); // Randomize the recipients so all replicas don't overload the same 1 recipients diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 54d6039f33..a5a992784c 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -79,35 +79,25 @@ pub async fn build_system_handle< .await .unwrap(); - let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let private_key = config.my_own_validator_config.private_key.clone(); let public_key = config.my_own_validator_config.public_key.clone(); - let _known_nodes_without_stake = config.known_nodes_without_stake.clone(); + let all_nodes = config.known_nodes_with_stake.clone(); + let da_nodes = config.known_da_nodes.clone(); let memberships = Memberships { - quorum_membership: TYPES::Membership::create_election( - known_nodes_with_stake.clone(), - known_nodes_with_stake.clone(), + quorum_membership: TYPES::Membership::new( + all_nodes.clone(), + all_nodes.clone(), Topic::Global, + #[cfg(feature = "fixed-leader-election")] config.fixed_leader_for_gpuvid, ), - da_membership: TYPES::Membership::create_election( - known_nodes_with_stake.clone(), - config.known_da_nodes.clone(), + da_membership: TYPES::Membership::new( + all_nodes, + da_nodes, Topic::Da, - config.fixed_leader_for_gpuvid, - ), - vid_membership: TYPES::Membership::create_election( - known_nodes_with_stake.clone(), - known_nodes_with_stake.clone(), - Topic::Global, - config.fixed_leader_for_gpuvid, - ), - view_sync_membership: TYPES::Membership::create_election( - known_nodes_with_stake.clone(), - known_nodes_with_stake, - Topic::Global, + #[cfg(feature = "fixed-leader-election")] config.fixed_leader_for_gpuvid, ), }; @@ -207,7 +197,7 @@ pub async fn build_assembled_sig< view: TYPES::Time, upgrade_lock: &UpgradeLock, ) -> ::QcType { - let stake_table = membership.committee_qc_stake_table(); + let stake_table = membership.stake_table(); let real_qc_pp: ::QcParams = ::public_parameter( stake_table.clone(), @@ -264,7 +254,7 @@ pub fn vid_scheme_from_view_number( membership: &TYPES::Membership, view_number: TYPES::Time, ) -> VidSchemeType { - let num_storage_nodes = membership.staked_committee(view_number).len(); + let num_storage_nodes = membership.committee_members(view_number).len(); vid_scheme(num_storage_nodes) } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 098d12512d..3ff40a68ff 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -60,8 +60,6 @@ pub struct TimingData { pub struct TestDescription, V: Versions> { /// Total number of staked nodes in the test pub num_nodes_with_stake: usize, - /// Total number of non-staked nodes in the test - pub num_nodes_without_stake: usize, /// nodes available at start pub start_nodes: usize, /// Whether to skip initializing nodes that will start late, which will catch up later with @@ -71,8 +69,6 @@ pub struct TestDescription, V: Ver pub num_bootstrap_nodes: usize, /// Size of the staked DA committee for the test pub da_staked_committee_size: usize, - /// Size of the non-staked DA committee for the test - pub da_non_staked_committee_size: usize, /// overall safety property description pub overall_safety_properties: OverallSafetyPropertiesDescription, /// spinning properties @@ -239,12 +235,10 @@ impl, V: Versions> TestDescription #[allow(clippy::redundant_field_names)] pub fn default_stress() -> Self { let num_nodes_with_stake = 100; - let num_nodes_without_stake = 0; Self { num_bootstrap_nodes: num_nodes_with_stake, num_nodes_with_stake, - num_nodes_without_stake, start_nodes: num_nodes_with_stake, overall_safety_properties: OverallSafetyPropertiesDescription:: { num_successful_views: 50, @@ -272,13 +266,11 @@ impl, V: Versions> TestDescription #[allow(clippy::redundant_field_names)] pub fn default_multiple_rounds() -> Self { let num_nodes_with_stake = 10; - let num_nodes_without_stake = 0; TestDescription:: { // TODO: remove once we have fixed the DHT timeout issue // https://github.com/EspressoSystems/HotShot/issues/2088 num_bootstrap_nodes: num_nodes_with_stake, num_nodes_with_stake, - num_nodes_without_stake, start_nodes: num_nodes_with_stake, overall_safety_properties: OverallSafetyPropertiesDescription:: { num_successful_views: 20, @@ -304,10 +296,8 @@ impl, V: Versions> TestDescription #[allow(clippy::redundant_field_names)] pub fn default_more_nodes() -> Self { let num_nodes_with_stake = 20; - let num_nodes_without_stake = 0; Self { num_nodes_with_stake, - num_nodes_without_stake, start_nodes: num_nodes_with_stake, num_bootstrap_nodes: num_nodes_with_stake, // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the @@ -342,16 +332,13 @@ impl, V: Versions> Default #[allow(clippy::redundant_field_names)] fn default() -> Self { let num_nodes_with_stake = 6; - let num_nodes_without_stake = 0; Self { timing_data: TimingData::default(), num_nodes_with_stake, - num_nodes_without_stake, start_nodes: num_nodes_with_stake, skip_late: false, num_bootstrap_nodes: num_nodes_with_stake, da_staked_committee_size: num_nodes_with_stake, - da_non_staked_committee_size: num_nodes_without_stake, spinning_properties: SpinningTaskDescription { node_changes: vec![], }, @@ -404,7 +391,6 @@ where num_bootstrap_nodes, timing_data, da_staked_committee_size, - da_non_staked_committee_size, unreliable_network, .. } = self.clone(); @@ -446,13 +432,11 @@ where num_nodes_with_stake: NonZeroUsize::new(num_nodes_with_stake).unwrap(), // Currently making this zero for simplicity known_da_nodes, - num_nodes_without_stake: 0, num_bootstrap: num_bootstrap_nodes, known_nodes_with_stake, known_nodes_without_stake: vec![], my_own_validator_config, da_staked_committee_size, - da_non_staked_committee_size, fixed_leader_for_gpuvid: 1, next_view_timeout: 500, view_sync_timeout: Duration::from_millis(250), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index e616e270c8..0f6faffae3 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -394,7 +394,6 @@ where ) -> Vec { let mut results = vec![]; let config = self.launcher.resource_generator.config.clone(); - let known_nodes_with_stake = config.known_nodes_with_stake.clone(); let (mut builder_tasks, builder_urls) = self.init_builders::().await; self.add_servers(builder_urls.clone()).await; @@ -412,29 +411,22 @@ where self.next_node_id += 1; tracing::debug!("launch node {}", i); + let all_nodes = config.known_nodes_with_stake.clone(); + let da_nodes = config.known_da_nodes.clone(); + let memberships = Memberships { - quorum_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - known_nodes_with_stake.clone(), + quorum_membership: ::Membership::new( + all_nodes.clone(), + all_nodes.clone(), Topic::Global, + #[cfg(feature = "fixed-leader-election")] config.fixed_leader_for_gpuvid, ), - da_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - config.known_da_nodes.clone(), + da_membership: ::Membership::new( + all_nodes, + da_nodes, Topic::Da, - config.fixed_leader_for_gpuvid, - ), - vid_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - known_nodes_with_stake.clone(), - Topic::Global, - config.fixed_leader_for_gpuvid, - ), - view_sync_membership: ::Membership::create_election( - known_nodes_with_stake.clone(), - known_nodes_with_stake.clone(), - Topic::Global, + #[cfg(feature = "fixed-leader-election")] config.fixed_leader_for_gpuvid, ), }; diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 0b97676722..322640b054 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -56,12 +56,14 @@ async fn test_network_task() { let consensus = handle.hotshot.consensus(); let config = launcher.resource_generator.config.clone(); let public_key = config.my_own_validator_config.public_key; - let known_nodes_with_stake = config.known_nodes_with_stake.clone(); - let membership = ::Membership::create_election( - known_nodes_with_stake.clone(), - known_nodes_with_stake, + let all_nodes = config.known_nodes_with_stake.clone(); + + let membership = ::Membership::new( + all_nodes.clone(), + all_nodes, Topic::Global, + #[cfg(feature = "fixed-leader-election")] config.fixed_leader_for_gpuvid, ); let network_state: NetworkEventTaskState, _> = @@ -135,13 +137,14 @@ async fn test_network_storage_fail() { storage.write().await.should_return_err = true; let config = launcher.resource_generator.config.clone(); let public_key = config.my_own_validator_config.public_key; - let known_nodes_with_stake = config.known_nodes_with_stake.clone(); + let all_nodes = config.known_nodes_with_stake.clone(); let upgrade_lock = UpgradeLock::::new(); - let membership = ::Membership::create_election( - known_nodes_with_stake.clone(), - known_nodes_with_stake, + let membership = ::Membership::new( + all_nodes.clone(), + all_nodes, Topic::Global, + #[cfg(feature = "fixed-leader-election")] config.fixed_leader_for_gpuvid, ); let network_state: NetworkEventTaskState, _> = diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 03a9abe2d1..0ecbbccc50 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -60,7 +60,7 @@ impl NodeType for Test { type Transaction = TestTransaction; type ValidatedState = TestValidatedState; type InstanceState = TestInstanceState; - type Membership = GeneralStaticCommittee; + type Membership = GeneralStaticCommittee; type BuilderSignatureKey = BuilderKey; } diff --git a/types/src/data.rs b/types/src/data.rs index 03d30ca817..88071936f7 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -177,7 +177,7 @@ impl VidDisperse { membership: &TYPES::Membership, ) -> Self { let shares = membership - .staked_committee(view_number) + .committee_members(view_number) .iter() .map(|node| (node.clone(), vid_disperse.shares.remove(0))) .collect(); diff --git a/types/src/lib.rs b/types/src/lib.rs index 6206497700..1a9ff34b6c 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -176,8 +176,6 @@ pub struct HotShotConfig { /// Total number of nodes in the network // Earlier it was total_nodes pub num_nodes_with_stake: NonZeroUsize, - /// Number of nodes without stake - pub num_nodes_without_stake: usize, /// List of known node's public keys and stake value for certificate aggregation, serving as public parameter pub known_nodes_with_stake: Vec>, /// All public keys known to be DA nodes @@ -188,8 +186,6 @@ pub struct HotShotConfig { pub my_own_validator_config: ValidatorConfig, /// List of DA committee (staking)nodes for static DA committee pub da_staked_committee_size: usize, - /// List of DA committee nodes (non-staking)nodes for static DA committee - pub da_non_staked_committee_size: usize, /// Number of fixed leaders for GPU VID, normally it will be 0, it's only used when running GPU VID pub fixed_leader_for_gpuvid: usize, /// Base duration for next-view timeout, in milliseconds diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 3208d69203..5d87393ea7 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -154,7 +154,7 @@ impl> return true; } let real_qc_pp = ::public_parameter( - membership.committee_qc_stake_table(), + membership.stake_table(), U256::from(Self::threshold(membership)), ); let Ok(commit) = self.date_commitment(upgrade_lock).await else { diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index f6caa02da9..d21f1a82bf 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -5,63 +5,29 @@ // along with the HotShot repository. If not, see . //! The election trait, used to decide which node is the leader and determine if a vote is valid. - -// Needed to avoid the non-binding `let` warning. -#![allow(clippy::let_underscore_untyped)] - -use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; - -use snafu::Snafu; - use super::{network::Topic, node_implementation::NodeType}; use crate::{traits::signature_key::SignatureKey, PeerConfig}; - -/// Error for election problems -#[derive(Snafu, Debug)] -pub enum ElectionError { - /// stub error to be filled in - StubError, - /// Math error doing something - /// NOTE: it would be better to make Election polymorphic over - /// the election error and then have specific math errors - MathError, -} +use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; /// A protocol for determining membership in and participating in a committee. pub trait Membership: Clone + Debug + Eq + PartialEq + Send + Sync + Hash + 'static { - /// create an election - /// TODO may want to move this to a testableelection trait - fn create_election( - all_nodes: Vec>, + /// Create a committee + fn new( + // Note: eligible_leaders is currently a hack because the DA leader == the quorum leader + // but they should not have voting power. + eligible_leaders: Vec>, committee_members: Vec>, committee_topic: Topic, - fixed_leader_for_gpuvid: usize, + #[cfg(feature = "fixed-leader-election")] fixed_leader_for_gpuvid: usize, ) -> Self; - /// Clone the public key and corresponding stake table for current elected committee - fn committee_qc_stake_table( - &self, - ) -> Vec<::StakeTableEntry>; - - /// The leader of the committee for view `view_number`. - fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey; - - /// The staked members of the committee for view `view_number`. - fn staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; - - /// The non-staked members of the committee for view `view_number`. - fn non_staked_committee(&self, view_number: TYPES::Time) -> BTreeSet; + /// Get all participants in the committee (including their stake) + fn stake_table(&self) -> Vec<::StakeTableEntry>; - /// Get whole (staked + non-staked) committee for view `view_number`. - fn whole_committee(&self, view_number: TYPES::Time) -> BTreeSet; - - /// Get the network topic for the committee - fn committee_topic(&self) -> Topic; - - /// Check if a key has stake - fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; + /// Get all participants in the committee for a specific view + fn committee_members(&self, view_number: TYPES::Time) -> BTreeSet; /// Get the stake table entry for a public key, returns `None` if the /// key is not in the table @@ -70,6 +36,15 @@ pub trait Membership: pub_key: &TYPES::SignatureKey, ) -> Option<::StakeTableEntry>; + /// See if a node has stake in the committee + fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; + + /// The leader of the committee for view `view_number`. + fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey; + + /// Get the network topic for the committee + fn committee_topic(&self) -> Topic; + /// Returns the number of total nodes in the committee fn total_nodes(&self) -> usize; diff --git a/types/src/vote.rs b/types/src/vote.rs index 83b0898c24..0123ba0085 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -161,7 +161,7 @@ impl< let Some(stake_table_entry) = membership.stake(&key) else { return Either::Left(()); }; - let stake_table = membership.committee_qc_stake_table(); + let stake_table = membership.stake_table(); let Some(vote_node_id) = stake_table .iter() .position(|x| *x == stake_table_entry.clone()) From 7013c8b3bf0a01096b1d56d6217d6570330749c2 Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Tue, 10 Sep 2024 15:45:12 -0400 Subject: [PATCH 1209/1393] [Byzantine Testing] - Byzantine coordination attack (#3657) * coordination attack * cleanup * cleanup and dont send timeout for Byzantine replica * refactor how we send votes, check a shared hashset * cleanup --- testing/src/byzantine/byzantine_behaviour.rs | 77 +++++++++++++++++++- testing/tests/tests_2/byzantine_tests.rs | 54 +++++++++++++- 2 files changed, 126 insertions(+), 5 deletions(-) diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index 27ac3a0161..c4a345b0a8 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -114,6 +114,8 @@ pub struct DishonestLeader { pub dishonest_at_proposal_numbers: HashSet, /// How far back to look for a QC pub view_look_back: usize, + /// Shared state of all view numbers we send bad proposal at + pub dishonest_proposal_view_numbers: Arc>>, } /// Add method that will handle `QuorumProposalSend` events @@ -122,7 +124,7 @@ pub struct DishonestLeader { impl DishonestLeader { /// When a leader is sending a proposal this method will mock a dishonest leader /// We accomplish this by looking back a number of specified views and using that cached proposals QC - fn handle_proposal_send_event( + async fn handle_proposal_send_event( &self, event: &HotShotEvent, proposal: &Proposal>, @@ -150,6 +152,10 @@ impl DishonestLeader { let mut dishonest_proposal = proposal.clone(); dishonest_proposal.data.justify_qc = proposal_from_look_back.justify_qc; + // Save the view we sent the dishonest proposal on (used for coordination attacks with other byzantine replicas) + let mut dishonest_proposal_sent = self.dishonest_proposal_view_numbers.write().await; + dishonest_proposal_sent.insert(proposal.data.view_number); + HotShotEvent::QuorumProposalSend(dishonest_proposal, sender.clone()) } } @@ -173,7 +179,10 @@ impl + std::fmt::Debug, V: Version match event { HotShotEvent::QuorumProposalSend(proposal, sender) => { self.total_proposals_from_node += 1; - return vec![self.handle_proposal_send_event(event, proposal, sender)]; + return vec![ + self.handle_proposal_send_event(event, proposal, sender) + .await, + ]; } HotShotEvent::QuorumProposalValidated(proposal, _) => { self.validated_proposals.push(proposal.clone()); @@ -358,3 +367,67 @@ impl std::fmt::Debug for DishonestVoting { .finish_non_exhaustive() } } + +#[derive(Debug)] +/// An `EventHandlerState` that will send a vote for a bad proposal +pub struct DishonestVoter { + /// Collect all votes the node sends + pub votes_sent: Vec>, + /// Shared state with views numbers that leaders were dishonest at + pub dishonest_proposal_view_numbers: Arc>>, +} + +#[async_trait] +impl + std::fmt::Debug, V: Versions> + EventTransformerState for DishonestVoter +{ + async fn recv_handler(&mut self, event: &HotShotEvent) -> Vec> { + vec![event.clone()] + } + + async fn send_handler( + &mut self, + event: &HotShotEvent, + public_key: &TYPES::SignatureKey, + private_key: &::PrivateKey, + upgrade_lock: &UpgradeLock, + _consensus: Arc>>, + ) -> Vec> { + match event { + HotShotEvent::QuorumProposalRecv(proposal, _sender) => { + // Check if view is a dishonest proposal, if true send a vote + let dishonest_proposals = self.dishonest_proposal_view_numbers.read().await; + if dishonest_proposals.contains(&proposal.data.view_number) { + // Create a vote using data from most recent vote and the current event number + // We wont update internal consensus state for this Byzantine replica but we are at least + // Going to send a vote to the next honest leader + let vote = QuorumVote::::create_signed_vote( + self.votes_sent.last().unwrap().data.clone(), + event.view_number().unwrap(), + public_key, + private_key, + upgrade_lock, + ) + .await + .context("Failed to sign vote") + .unwrap(); + return vec![HotShotEvent::QuorumVoteSend(vote)]; + } + } + HotShotEvent::TimeoutVoteSend(vote) => { + // Check if this view was a dishonest proposal view, if true dont send timeout + let dishonest_proposals = self.dishonest_proposal_view_numbers.read().await; + if dishonest_proposals.contains(&vote.view_number) { + // We craft the vote upon `QuorumProposalRecv` and send out a vote. + // So, dont send the timeout to the next leader from this byzantine replica + return vec![]; + } + } + HotShotEvent::QuorumVoteSend(vote) => { + self.votes_sent.push(vote.clone()); + } + _ => {} + } + vec![event.clone()] + } +} diff --git a/testing/tests/tests_2/byzantine_tests.rs b/testing/tests/tests_2/byzantine_tests.rs index 902999c55c..8eb6364c75 100644 --- a/testing/tests/tests_2/byzantine_tests.rs +++ b/testing/tests/tests_2/byzantine_tests.rs @@ -5,6 +5,7 @@ use std::{ time::Duration, }; +use async_lock::RwLock; use hotshot_example_types::{ node_types::{Libp2pImpl, MarketplaceTestVersions, MemoryImpl, PushCdnImpl, TestVersions}, state_types::TestTypes, @@ -13,8 +14,8 @@ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, byzantine::byzantine_behaviour::{ - BadProposalViewDos, DishonestDa, DishonestLeader, DishonestVoting, DoubleProposeVote, - ViewDelay, + BadProposalViewDos, DishonestDa, DishonestLeader, DishonestVoter, DishonestVoting, + DoubleProposeVote, ViewDelay, }, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, test_builder::{Behaviour, TestDescription}, @@ -96,7 +97,8 @@ cross_tests!( dishonest_at_proposal_numbers: HashSet::from([2, 3]), validated_proposals: Vec::new(), total_proposals_from_node: 0, - view_look_back: 1 + view_look_back: 1, + dishonest_proposal_view_numbers: Arc::new(RwLock::new(HashSet::new())), }; match node_id { 2 => Behaviour::Byzantine(Box::new(dishonest_leader)), @@ -247,3 +249,49 @@ cross_tests!( metadata }, ); + +cross_tests!( + TestName: coordination_attack, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [MarketplaceTestVersions], + Ignore: false, + Metadata: { + let dishonest_proposal_view_numbers = Arc::new(RwLock::new(HashSet::new())); + let behaviour = Rc::new(move |node_id| { + match node_id { + 4 => Behaviour::Byzantine(Box::new(DishonestLeader { + // On second proposal send a dishonest qc + dishonest_at_proposal_numbers: HashSet::from([2]), + validated_proposals: Vec::new(), + total_proposals_from_node: 0, + view_look_back: 1, + dishonest_proposal_view_numbers: Arc::clone(&dishonest_proposal_view_numbers), + })), + 5 | 6 => Behaviour::Byzantine(Box::new(DishonestVoter { + votes_sent: Vec::new(), + dishonest_proposal_view_numbers: Arc::clone(&dishonest_proposal_view_numbers), + })), + _ => Behaviour::Standard, + } + }); + + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + }; + + metadata.overall_safety_properties.num_failed_views = 1; + metadata.num_nodes_with_stake = 10; + metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ + (ViewNumber::new(14), false), + ]); + metadata + }, +); From feefe21f7b9a895c5fc16234361a1d64f2af6dde Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 11 Sep 2024 13:34:14 -0400 Subject: [PATCH 1210/1393] Bf/restart tune (#3668) * actually restart all nodes in restart tests * saturating sub * lint --- testing/src/overall_safety_task.rs | 9 ++++++--- testing/tests/tests_2/catchup.rs | 12 ++++-------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index a4680d5000..5aac9086d3 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -249,9 +249,12 @@ impl, V: Versions> TestTas expected_views_to_fail, }: OverallSafetyPropertiesDescription = self.properties.clone(); - let num_incomplete_views = self.ctx.round_results.len() - - self.ctx.successful_views.len() - - self.ctx.failed_views.len(); + let num_incomplete_views = self + .ctx + .round_results + .len() + .saturating_sub(self.ctx.successful_views.len()) + .saturating_sub(self.ctx.failed_views.len()); if self.ctx.successful_views.len() < num_successful_views { return TestResult::Fail(Box::new(OverallSafetyTaskErr::::NotEnoughDecides { diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index e0b79f7302..16d7e530ea 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -322,7 +322,7 @@ async fn test_all_restart() { let mut metadata: TestDescription = TestDescription::default(); let mut catchup_nodes = vec![]; - for i in 1..20 { + for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, updown: UpDown::Restart, @@ -384,7 +384,7 @@ async fn test_all_restart_cdn() { let mut metadata: TestDescription = TestDescription::default(); let mut catchup_nodes = vec![]; - for i in 1..20 { + for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, updown: UpDown::Restart, @@ -450,12 +450,8 @@ async fn test_all_restart_one_da() { let mut metadata: TestDescription = TestDescription::default(); - let node_0_down = vec![ChangeNode { - idx: 0, - updown: UpDown::Restart, - }]; let mut catchup_nodes = vec![]; - for i in 1..20 { + for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, updown: UpDown::Restart, @@ -471,7 +467,7 @@ async fn test_all_restart_one_da() { metadata.spinning_properties = SpinningTaskDescription { // Restart all the nodes in view 13 - node_changes: vec![(12, node_0_down), (13, catchup_nodes)], + node_changes: vec![(13, catchup_nodes)], }; metadata.view_sync_properties = hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); From fb578d7749fec5d1450398acd6ba09d7003efbc8 Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Thu, 12 Sep 2024 12:29:33 -0400 Subject: [PATCH 1211/1393] [TESTING] - Fix Startup / Restart Test Failures (#3669) * add logic to remove from failed views if node decides on view after timeout * test ci for new failure * subtract properly * cleanup * spin up node a few views earlier * increase round start delay --- testing/src/overall_safety_task.rs | 76 ++++++++++++++++++++++++++---- testing/tests/tests_2/catchup.rs | 3 ++ 2 files changed, 71 insertions(+), 8 deletions(-) diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 5aac9086d3..2a7066b07c 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -82,6 +82,12 @@ pub enum OverallSafetyTaskErr { expected_failed_views: HashSet, actual_failed_views: HashSet, }, + /// This is a case where we have too many failed + succesful views over round results + /// This should never be the case and requires debugging if we see this get thrown + NotEnoughRoundResults { + results_count: usize, + views_count: usize, + }, } /// Data availability task state @@ -165,8 +171,19 @@ impl, V: Versions> TestTas let paired_up = (leaf_chain.to_vec(), (*qc).clone()); match self.ctx.round_results.entry(view_number) { Entry::Occupied(mut o) => { - o.get_mut() - .insert_into_result(id, paired_up, maybe_block_size) + let entry = o.get_mut(); + let leaf = entry.insert_into_result(id, paired_up, maybe_block_size); + + // Here we noticed is a node may start up and time out waiting for a proposal + // So we add the timeout to failed_views, but eventually the proposal is received we decide on the view + // If we do indeed have a view timeout for the node at this point we want to remove it + entry.cleanup_previous_timeouts_on_view( + &mut self.ctx.failed_views, + &view_number, + &(id as u64), + ); + + leaf } Entry::Vacant(v) => { let mut round_result = RoundResult::default(); @@ -249,12 +266,21 @@ impl, V: Versions> TestTas expected_views_to_fail, }: OverallSafetyPropertiesDescription = self.properties.clone(); - let num_incomplete_views = self - .ctx - .round_results - .len() - .saturating_sub(self.ctx.successful_views.len()) - .saturating_sub(self.ctx.failed_views.len()); + let views_count = self.ctx.failed_views.len() + self.ctx.successful_views.len(); + let results_count = self.ctx.round_results.len(); + + // This can cause tests to crash if we do the subtracting to get `num_incomplete_views` below + // So lets fail return an error instead + // Use this check instead of saturating_sub as that could hide a real problem + if views_count > results_count { + return TestResult::Fail(Box::new( + OverallSafetyTaskErr::::NotEnoughRoundResults { + results_count, + views_count, + }, + )); + } + let num_incomplete_views = results_count - views_count; if self.ctx.successful_views.len() < num_successful_views { return TestResult::Fail(Box::new(OverallSafetyTaskErr::::NotEnoughDecides { @@ -540,6 +566,40 @@ impl RoundResult { } leaves } + + fn cleanup_previous_timeouts_on_view( + &mut self, + failed_views: &mut HashSet, + view_number: &TYPES::Time, + id: &u64, + ) { + // check if this node had a previous timeout + match self.failed_nodes.get(id) { + Some(error) => match error.as_ref() { + HotShotError::ViewTimeoutError { + view_number, + state: _, + } => { + tracing::debug!( + "Node {} originally timeout for view: {:?}. It has now been decided on.", + id, + view_number + ); + self.failed_nodes.remove(id); + } + _ => return, + }, + None => return, + } + + // check if no more failed nodes + if self.failed_nodes.is_empty() && failed_views.remove(view_number) { + tracing::debug!( + "Removed view {:?} from failed views, all nodes have agreed upon view.", + view_number + ); + } + } } /// cross node safety properties diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 16d7e530ea..8253ae8084 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -23,6 +23,9 @@ async fn test_catchup() { async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { next_view_timeout: 2000, + // increase the round delay for this test + // TODO: remove this delay increase for test - https://github.com/EspressoSystems/HotShot/issues/3673 + round_start_delay: 200, ..Default::default() }; let mut metadata: TestDescription = From 7e5d89bad30cfd82fcb953623e4d0bc2dc6bbf4e Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 12 Sep 2024 16:33:23 -0400 Subject: [PATCH 1212/1393] [TECH DEBT] Improve `ConnectedNetwork` - `recv_message` (#3671) * improve `ConnectedNetwork` * remove accidental `run_config` change * fix `memory network` test --- hotshot/Cargo.toml | 1 + hotshot/src/tasks/mod.rs | 70 +++---- .../src/traits/networking/combined_network.rs | 48 ++--- .../src/traits/networking/libp2p_network.rs | 6 +- .../src/traits/networking/memory_network.rs | 8 +- .../src/traits/networking/push_cdn_network.rs | 4 +- task-impls/src/network.rs | 193 +++++++++--------- testing/src/test_task.rs | 54 ++--- testing/tests/tests_3/memory_network.rs | 54 +++-- types/src/traits/network.rs | 2 +- 10 files changed, 210 insertions(+), 230 deletions(-) diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 696a9cc7c4..9dd8e2a35a 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -61,6 +61,7 @@ blake3.workspace = true sha2 = { workspace = true } url = { workspace = true } num_enum = "0.7" +parking_lot = "0.12" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 5db94d0e14..106300e271 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -8,10 +8,10 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; -use std::{fmt::Debug, sync::Arc, time::Duration}; +use std::{fmt::Debug, sync::Arc}; use async_broadcast::broadcast; -use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; use futures::{ @@ -35,7 +35,7 @@ use hotshot_task_impls::{ use hotshot_types::{ consensus::Consensus, constants::EVENT_CHANNEL_SIZE, - message::{Messages, UpgradeLock}, + message::{Message, UpgradeLock}, request_response::RequestReceiver, traits::{ network::ConnectedNetwork, @@ -119,53 +119,37 @@ pub fn add_network_message_task< let task_handle = async_spawn(async move { futures::pin_mut!(shutdown_signal); - let recv_stream = stream::unfold((), |()| async { - let msgs = match network.recv_msgs().await { - Ok(msgs) => { - let mut deserialized_messages = Vec::new(); - for msg in msgs { - let deserialized_message = match upgrade_lock.deserialize(&msg).await { - Ok(deserialized) => deserialized, - Err(e) => { - tracing::error!("Failed to deserialize message: {}", e); - continue; - } - }; - deserialized_messages.push(deserialized_message); - } - Messages(deserialized_messages) - } - Err(err) => { - tracing::error!("failed to receive messages: {err}"); - Messages(vec![]) - } - }; - Some((msgs, ())) - }); - - let fused_recv_stream = recv_stream.boxed().fuse(); - futures::pin_mut!(fused_recv_stream); - loop { + // Wait for one of the following to resolve: futures::select! { + // Wait for a shutdown signal () = shutdown_signal => { tracing::error!("Shutting down network message task"); return; } - msgs_option = fused_recv_stream.next() => { - if let Some(msgs) = msgs_option { - if msgs.0.is_empty() { - // TODO: Stop sleeping here: https://github.com/EspressoSystems/HotShot/issues/2558 - async_sleep(Duration::from_millis(100)).await; - } else { - state.handle_messages(msgs.0).await; + + // Wait for a message from the network + message = network.recv_message().fuse() => { + // Make sure the message did not fail + let message = match message { + Ok(message) => message, + Err(e) => { + tracing::error!("Failed to receive message: {:?}", e); + continue; } - } else { - // Stream has ended, which shouldn't happen in this case. - // You might want to handle this situation, perhaps by breaking the loop or logging an error. - tracing::error!("Network message stream unexpectedly ended"); - return; - } + }; + + // Deserialize the message + let deserialized_message: Message = match upgrade_lock.deserialize(&message).await { + Ok(message) => message, + Err(e) => { + tracing::error!("Failed to deserialize message: {:?}", e); + continue; + } + }; + + // Handle the message + state.handle_message(deserialized_message).await; } } } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 8403964d60..4bed1471b6 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -18,6 +18,8 @@ use std::{ time::Duration, }; +use parking_lot::RwLock as PlRwLock; + use async_broadcast::{broadcast, InactiveReceiver, Sender}; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, @@ -68,7 +70,7 @@ pub struct CombinedNetworks { networks: Arc>, /// Last n seen messages to prevent processing duplicates - message_cache: Arc>>, + message_cache: Arc>>, /// How many times primary failed to deliver primary_fail_counter: Arc, @@ -106,7 +108,7 @@ impl CombinedNetworks { Self { networks, - message_cache: Arc::new(RwLock::new(LruCache::new( + message_cache: Arc::new(PlRwLock::new(LruCache::new( NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), ))), primary_fail_counter: Arc::new(AtomicU64::new(0)), @@ -304,7 +306,7 @@ impl TestableNetworkingImplementation for CombinedNetwor ); // We want to use the same message cache between the two networks - let message_cache = Arc::new(RwLock::new(LruCache::new( + let message_cache = Arc::new(PlRwLock::new(LruCache::new( NonZeroUsize::new(COMBINED_NETWORK_CACHE_SIZE).unwrap(), ))); @@ -466,35 +468,29 @@ impl ConnectedNetwork for CombinedNetworks /// /// # Errors /// Does not error - async fn recv_msgs(&self) -> Result>, NetworkError> { - // recv on both networks because nodes may be accessible only on either. discard duplicates - // TODO: improve this algorithm: https://github.com/EspressoSystems/HotShot/issues/2089 - let mut primary_fut = self.primary().recv_msgs().fuse(); - let mut secondary_fut = self.secondary().recv_msgs().fuse(); - - let msgs = select! { - p = primary_fut => p?, - s = secondary_fut => s?, - }; + async fn recv_message(&self) -> Result, NetworkError> { + loop { + // Receive from both networks + let mut primary_fut = self.primary().recv_message().fuse(); + let mut secondary_fut = self.secondary().recv_message().fuse(); + + // Wait for one to return a message + let message = select! { + p = primary_fut => p?, + s = secondary_fut => s?, + }; - let mut filtered_msgs = Vec::with_capacity(msgs.len()); - - // For each message, - for msg in msgs { // Calculate hash of the message - let message_hash = calculate_hash_of(&msg); + let message_hash = calculate_hash_of(&message); - // Add the hash to the cache - if !self.message_cache.read().await.contains(&message_hash) { - // If the message is not in the cache, process it - filtered_msgs.push(msg.clone()); + // Check if the hash is in the cache + if !self.message_cache.read().contains(&message_hash) { + // Add the hash to the cache + self.message_cache.write().put(message_hash, ()); - // Add it to the cache - self.message_cache.write().await.put(message_hash, ()); + break Ok(message); } } - - Ok(filtered_msgs) } fn queue_node_lookup( diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index dbc4d36e5b..e75e601235 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -1055,12 +1055,12 @@ impl ConnectedNetwork for Libp2pNetwork { /// /// # Errors /// If there is a network-related failure. - #[instrument(name = "Libp2pNetwork::recv_msgs", skip_all)] - async fn recv_msgs(&self) -> Result>, NetworkError> { + #[instrument(name = "Libp2pNetwork::recv_message", skip_all)] + async fn recv_message(&self) -> Result, NetworkError> { let result = self .inner .receiver - .drain_at_least_one() + .recv() .await .map_err(|_x| NetworkError::ShutDown)?; diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 392dc9c005..ee3a55684f 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -371,19 +371,19 @@ impl ConnectedNetwork for MemoryNetwork { /// /// # Errors /// If the other side of the channel is closed - #[instrument(name = "MemoryNetwork::recv_msgs", skip_all)] - async fn recv_msgs(&self) -> Result>, NetworkError> { + #[instrument(name = "MemoryNetwork::recv_messages", skip_all)] + async fn recv_message(&self) -> Result, NetworkError> { let ret = self .inner .output .lock() .await - .drain_at_least_one() + .recv() .await .map_err(|_x| NetworkError::ShutDown)?; self.inner .in_flight_message_count - .fetch_sub(ret.len(), Ordering::Relaxed); + .fetch_sub(1, Ordering::Relaxed); Ok(ret) } } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 3c544a6e61..f675b01f37 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -546,7 +546,7 @@ impl ConnectedNetwork for PushCdnNetwork { /// /// # Errors /// - If we fail to receive messages. Will trigger a retry automatically. - async fn recv_msgs(&self) -> Result>, NetworkError> { + async fn recv_message(&self) -> Result, NetworkError> { // Receive a message let message = self.client.receive_message().await; @@ -577,7 +577,7 @@ impl ConnectedNetwork for PushCdnNetwork { return Ok(vec![]); }; - Ok(vec![message]) + Ok(message) } /// Do nothing here, as we don't need to look up nodes. diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 7102179585..2042a9fc04 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -102,109 +102,106 @@ pub struct NetworkMessageTaskState { impl NetworkMessageTaskState { #[instrument(skip_all, name = "Network message task", level = "trace")] - /// Handle the message. - pub async fn handle_messages(&mut self, messages: Vec>) { - // We will send only one event for a vector of transactions. - let mut transactions = Vec::new(); - for message in messages { - tracing::trace!("Received message from network:\n\n{message:?}"); - let sender = message.sender; - match message.kind { - MessageKind::Consensus(consensus_message) => { - let event = match consensus_message { - SequencingMessage::General(general_message) => match general_message { - GeneralConsensusMessage::Proposal(proposal) => { - HotShotEvent::QuorumProposalRecv(proposal, sender) - } - GeneralConsensusMessage::ProposalRequested(req, sig) => { - HotShotEvent::QuorumProposalRequestRecv(req, sig) - } - GeneralConsensusMessage::LeaderProposalAvailable(proposal) => { - HotShotEvent::QuorumProposalResponseRecv(proposal) - } - GeneralConsensusMessage::Vote(vote) => { - HotShotEvent::QuorumVoteRecv(vote.clone()) - } - GeneralConsensusMessage::ViewSyncPreCommitVote(view_sync_message) => { - HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message) - } - GeneralConsensusMessage::ViewSyncPreCommitCertificate( - view_sync_message, - ) => HotShotEvent::ViewSyncPreCommitCertificate2Recv(view_sync_message), - - GeneralConsensusMessage::ViewSyncCommitVote(view_sync_message) => { - HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message) - } - GeneralConsensusMessage::ViewSyncCommitCertificate( - view_sync_message, - ) => HotShotEvent::ViewSyncCommitCertificate2Recv(view_sync_message), - - GeneralConsensusMessage::ViewSyncFinalizeVote(view_sync_message) => { - HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message) - } - GeneralConsensusMessage::ViewSyncFinalizeCertificate( - view_sync_message, - ) => HotShotEvent::ViewSyncFinalizeCertificate2Recv(view_sync_message), - - GeneralConsensusMessage::TimeoutVote(message) => { - HotShotEvent::TimeoutVoteRecv(message) - } - GeneralConsensusMessage::UpgradeProposal(message) => { - HotShotEvent::UpgradeProposalRecv(message, sender) - } - GeneralConsensusMessage::UpgradeVote(message) => { - error!("Received upgrade vote!"); - HotShotEvent::UpgradeVoteRecv(message) - } - }, - SequencingMessage::Da(da_message) => match da_message { - DaConsensusMessage::DaProposal(proposal) => { - HotShotEvent::DaProposalRecv(proposal, sender) - } - DaConsensusMessage::DaVote(vote) => { - HotShotEvent::DaVoteRecv(vote.clone()) - } - DaConsensusMessage::DaCertificate(cert) => { - HotShotEvent::DaCertificateRecv(cert) - } - DaConsensusMessage::VidDisperseMsg(proposal) => { - HotShotEvent::VidShareRecv(proposal) - } - }, - }; - // TODO (Keyao benchmarking) Update these event variants (similar to the - // `TransactionsRecv` event) so we can send one event for a vector of messages. - // - broadcast_event(Arc::new(event), &self.internal_event_stream).await; - } - MessageKind::Data(message) => match message { - DataMessage::SubmitTransaction(transaction, _) => { - transactions.push(transaction); - } - DataMessage::DataResponse(_) | DataMessage::RequestData(_) => { - warn!("Request and Response messages should not be received in the NetworkMessage task"); - } - }, + /// Handles a (deserialized) message from the network + pub async fn handle_message(&mut self, message: Message) { + tracing::trace!("Received message from network:\n\n{message:?}"); + + // Match the message kind and send the appropriate event to the internal event stream + let sender = message.sender; + match message.kind { + // Handle consensus messages + MessageKind::Consensus(consensus_message) => { + let event = match consensus_message { + SequencingMessage::General(general_message) => match general_message { + GeneralConsensusMessage::Proposal(proposal) => { + HotShotEvent::QuorumProposalRecv(proposal, sender) + } + GeneralConsensusMessage::ProposalRequested(req, sig) => { + HotShotEvent::QuorumProposalRequestRecv(req, sig) + } + GeneralConsensusMessage::LeaderProposalAvailable(proposal) => { + HotShotEvent::QuorumProposalResponseRecv(proposal) + } + GeneralConsensusMessage::Vote(vote) => { + HotShotEvent::QuorumVoteRecv(vote.clone()) + } + GeneralConsensusMessage::ViewSyncPreCommitVote(view_sync_message) => { + HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message) + } + GeneralConsensusMessage::ViewSyncPreCommitCertificate( + view_sync_message, + ) => HotShotEvent::ViewSyncPreCommitCertificate2Recv(view_sync_message), + + GeneralConsensusMessage::ViewSyncCommitVote(view_sync_message) => { + HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message) + } + GeneralConsensusMessage::ViewSyncCommitCertificate(view_sync_message) => { + HotShotEvent::ViewSyncCommitCertificate2Recv(view_sync_message) + } + + GeneralConsensusMessage::ViewSyncFinalizeVote(view_sync_message) => { + HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message) + } + GeneralConsensusMessage::ViewSyncFinalizeCertificate(view_sync_message) => { + HotShotEvent::ViewSyncFinalizeCertificate2Recv(view_sync_message) + } + + GeneralConsensusMessage::TimeoutVote(message) => { + HotShotEvent::TimeoutVoteRecv(message) + } + GeneralConsensusMessage::UpgradeProposal(message) => { + HotShotEvent::UpgradeProposalRecv(message, sender) + } + GeneralConsensusMessage::UpgradeVote(message) => { + error!("Received upgrade vote!"); + HotShotEvent::UpgradeVoteRecv(message) + } + }, + SequencingMessage::Da(da_message) => match da_message { + DaConsensusMessage::DaProposal(proposal) => { + HotShotEvent::DaProposalRecv(proposal, sender) + } + DaConsensusMessage::DaVote(vote) => HotShotEvent::DaVoteRecv(vote.clone()), + DaConsensusMessage::DaCertificate(cert) => { + HotShotEvent::DaCertificateRecv(cert) + } + DaConsensusMessage::VidDisperseMsg(proposal) => { + HotShotEvent::VidShareRecv(proposal) + } + }, + }; + // TODO (Keyao benchmarking) Update these event variants (similar to the + // `TransactionsRecv` event) so we can send one event for a vector of messages. + // + broadcast_event(Arc::new(event), &self.internal_event_stream).await; + } - MessageKind::External(data) => { - // Send the external message to the external event stream so it can be processed + // Handle data messages + MessageKind::Data(message) => match message { + DataMessage::SubmitTransaction(transaction, _) => { broadcast_event( - Event { - view_number: TYPES::Time::new(1), - event: EventType::ExternalMessageReceived(data), - }, - &self.external_event_stream, + Arc::new(HotShotEvent::TransactionsRecv(vec![transaction])), + &self.internal_event_stream, ) .await; } - }; - } - if !transactions.is_empty() { - broadcast_event( - Arc::new(HotShotEvent::TransactionsRecv(transactions)), - &self.internal_event_stream, - ) - .await; + DataMessage::DataResponse(_) | DataMessage::RequestData(_) => { + warn!("Request and Response messages should not be received in the NetworkMessage task"); + } + }, + + // Handle external messages + MessageKind::External(data) => { + // Send the external message to the external event stream so it can be processed + broadcast_event( + Event { + view_number: TYPES::Time::new(1), + event: EventType::ExternalMessageReceived(data), + }, + &self.external_event_stream, + ) + .await; + } } } } diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index 3559c4cee9..536153b77c 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -8,15 +8,15 @@ use std::{sync::Arc, time::Duration}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; +use async_compatibility_layer::art::{async_spawn, async_timeout}; #[cfg(async_executor_impl = "async-std")] use async_std::task::{spawn, JoinHandle}; use async_trait::async_trait; use futures::future::select_all; -use hotshot::types::Event; +use hotshot::types::{Event, Message}; use hotshot_task_impls::{events::HotShotEvent, network::NetworkMessageTaskState}; use hotshot_types::{ - message::{Messages, UpgradeLock}, + message::UpgradeLock, traits::{ network::ConnectedNetwork, node_implementation::{NodeType, Versions}, @@ -131,37 +131,27 @@ pub async fn add_network_message_test_task< async_spawn(async move { loop { - let msgs = match network.recv_msgs().await { - Ok(msgs) => { - let mut deserialized_messages = Vec::new(); - - for msg in msgs { - let deserialized_message = match upgrade_lock.deserialize(&msg).await { - Ok(deserialized) => deserialized, - Err(e) => { - tracing::error!("Failed to deserialize message: {}", e); - return; - } - }; - - deserialized_messages.push(deserialized_message); - } - - Messages(deserialized_messages) - } - Err(err) => { - error!("failed to receive messages: {err}"); - - // return zero messages so we sleep and try again - Messages(vec![]) + // Get the next message from the network + let message = match network.recv_message().await { + Ok(message) => message, + Err(e) => { + error!("Failed to receive message: {:?}", e); + continue; } }; - if msgs.0.is_empty() { - // TODO: Stop sleeping here: https://github.com/EspressoSystems/HotShot/issues/2558 - async_sleep(Duration::from_millis(100)).await; - } else { - state.handle_messages(msgs.0).await; - } + + // Deserialize the message + let deserialized_message: Message = + match upgrade_lock.deserialize(&message).await { + Ok(message) => message, + Err(e) => { + tracing::error!("Failed to deserialize message: {:?}", e); + continue; + } + }; + + // Handle the message + state.handle_message(deserialized_message).await; } }) } diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 0ecbbccc50..21db37e42d 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -5,9 +5,9 @@ // along with the HotShot repository. If not, see . #![allow(clippy::panic)] -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; -use async_compatibility_layer::logging::setup_logging; +use async_compatibility_layer::{art::async_timeout, logging::setup_logging}; use hotshot::{ traits::{ election::static_committee::GeneralStaticCommittee, @@ -171,13 +171,16 @@ async fn memory_network_direct_queue() { .direct_message(serialized_message.clone(), pub_key_2) .await .expect("Failed to message node"); - let mut recv_messages = network2 - .recv_msgs() + let recv_message = network2 + .recv_message() .await .expect("Failed to receive message"); - let recv_message = recv_messages.pop().unwrap(); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!(recv_messages.is_empty()); + assert!( + async_timeout(Duration::from_secs(1), network2.recv_message()) + .await + .is_err() + ); fake_message_eq(sent_message, deserialized_message); } @@ -191,13 +194,16 @@ async fn memory_network_direct_queue() { .direct_message(serialized_message.clone(), pub_key_1) .await .expect("Failed to message node"); - let mut recv_messages = network1 - .recv_msgs() + let recv_message = network1 + .recv_message() .await .expect("Failed to receive message"); - let recv_message = recv_messages.pop().unwrap(); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!(recv_messages.is_empty()); + assert!( + async_timeout(Duration::from_secs(1), network1.recv_message()) + .await + .is_err() + ); fake_message_eq(sent_message, deserialized_message); } } @@ -226,13 +232,16 @@ async fn memory_network_broadcast_queue() { .broadcast_message(serialized_message.clone(), Topic::Da, BroadcastDelay::None) .await .expect("Failed to message node"); - let mut recv_messages = network2 - .recv_msgs() + let recv_message = network2 + .recv_message() .await .expect("Failed to receive message"); - let recv_message = recv_messages.pop().unwrap(); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!(recv_messages.is_empty()); + assert!( + async_timeout(Duration::from_secs(1), network2.recv_message()) + .await + .is_err() + ); fake_message_eq(sent_message, deserialized_message); } @@ -250,13 +259,16 @@ async fn memory_network_broadcast_queue() { ) .await .expect("Failed to message node"); - let mut recv_messages = network1 - .recv_msgs() + let recv_message = network1 + .recv_message() .await .expect("Failed to receive message"); - let recv_message = recv_messages.pop().unwrap(); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!(recv_messages.is_empty()); + assert!( + async_timeout(Duration::from_secs(1), network1.recv_message()) + .await + .is_err() + ); fake_message_eq(sent_message, deserialized_message); } } @@ -325,18 +337,18 @@ async fn memory_network_test_in_flight_message_count() { while TestableNetworkingImplementation::::in_flight_message_count(&network1).unwrap() > 0 { - network1.recv_msgs().await.unwrap(); + network1.recv_message().await.unwrap(); } while TestableNetworkingImplementation::::in_flight_message_count(&network2).unwrap() > messages.len() { - network2.recv_msgs().await.unwrap(); + network2.recv_message().await.unwrap(); } while TestableNetworkingImplementation::::in_flight_message_count(&network2).unwrap() > 0 { - network2.recv_msgs().await.unwrap(); + network2.recv_message().await.unwrap(); } assert_eq!( diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index d8d89e7c20..c67644eecd 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -304,7 +304,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st /// /// # Errors /// If there is a network-related failure. - async fn recv_msgs(&self) -> Result>, NetworkError>; + async fn recv_message(&self) -> Result, NetworkError>; /// Ask request the network for some data. Returns the request ID for that data, /// The ID returned can be used for cancelling the request From f2aca30d4f39b73046dec24dcbc0a0da0b146184 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 13 Sep 2024 16:47:37 -0400 Subject: [PATCH 1213/1393] lower upgrade threshold (#3676) --- hotshot/src/traits/election/static_committee.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 01b56be103..555050b89a 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -4,8 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::collections::BTreeMap; -use std::num::NonZeroU64; +use std::{cmp::max, collections::BTreeMap, num::NonZeroU64}; use ethereum_types::U256; use hotshot_types::{ @@ -183,6 +182,10 @@ impl Membership for GeneralStaticCommittee { /// Get the voting upgrade threshold for the committee fn upgrade_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(((self.stake_table.len() as u64 * 9) / 10) + 1).unwrap() + NonZeroU64::new(max( + (self.stake_table.len() as u64 * 9) / 10, + ((self.stake_table.len() as u64 * 2) / 3) + 1, + )) + .unwrap() } } From aef35179a222017322dcdfad86f95dbee5f6668b Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 17 Sep 2024 12:00:39 -0400 Subject: [PATCH 1214/1393] More marketplace testing (#3662) --- example-types/src/block_types.rs | 42 +++++- .../static_committee_leader_two_views.rs | 3 +- .../src/traits/networking/combined_network.rs | 3 +- testing/src/consistency_task.rs | 27 +++- testing/src/spinning_task.rs | 7 +- testing/src/test_builder.rs | 82 +++++++++-- testing/src/test_runner.rs | 42 ++++-- testing/src/view_generator.rs | 23 ++- testing/tests/tests_1/block_builder.rs | 4 +- testing/tests/tests_1/consensus_task.rs | 8 +- testing/tests/tests_1/da_task.rs | 14 +- testing/tests/tests_1/quorum_proposal_task.rs | 36 +++-- testing/tests/tests_1/test_success.rs | 25 +--- testing/tests/tests_1/transaction_task.rs | 4 +- .../tests_1/upgrade_task_with_consensus.rs | 16 +-- .../tests_1/upgrade_task_with_proposal.rs | 8 +- testing/tests/tests_1/vid_task.rs | 14 +- testing/tests/tests_3/test_marketplace.rs | 135 ++++++++++++++++++ .../tests_3/test_with_builder_failures.rs | 2 +- types/src/traits/election.rs | 3 +- 20 files changed, 394 insertions(+), 104 deletions(-) create mode 100644 testing/tests/tests_3/test_marketplace.rs diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 1547e46b9b..742c77fb88 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -22,6 +22,7 @@ use hotshot_types::{ utils::BuilderCommitment, vid::{VidCommitment, VidCommon}, }; +use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; use snafu::Snafu; @@ -157,7 +158,9 @@ impl TestableBlock for TestBlockPayload { } #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct TestMetadata; +pub struct TestMetadata { + pub num_transactions: u64, +} impl EncodeBytes for TestMetadata { fn encode(&self) -> Arc<[u8]> { @@ -185,11 +188,14 @@ impl BlockPayload for TestBlockPayload { _instance_state: &Self::Instance, ) -> Result<(Self, Self::Metadata), Self::Error> { let txns_vec: Vec = transactions.into_iter().collect(); + let metadata = TestMetadata { + num_transactions: txns_vec.len() as u64, + }; Ok(( Self { transactions: txns_vec, }, - TestMetadata, + metadata, )) } @@ -215,7 +221,12 @@ impl BlockPayload for TestBlockPayload { } fn empty() -> (Self, Self::Metadata) { - (Self::genesis(), TestMetadata) + ( + Self::genesis(), + TestMetadata { + num_transactions: 0, + }, + ) } fn builder_commitment(&self, _metadata: &Self::Metadata) -> BuilderCommitment { @@ -243,15 +254,20 @@ pub struct TestBlockHeader { pub payload_commitment: VidCommitment, /// Fast commitment for builder verification pub builder_commitment: BuilderCommitment, + /// block metdata + pub metadata: TestMetadata, /// Timestamp when this header was created. pub timestamp: u64, + /// random + pub random: u64, } impl TestBlockHeader { - fn new>( + pub fn new>( parent_leaf: &Leaf, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, + metadata: TestMetadata, ) -> Self { let parent = parent_leaf.block_header(); @@ -261,11 +277,15 @@ impl TestBlockHeader { timestamp = parent.timestamp; } + let random = thread_rng().gen_range(0..=u64::MAX); + Self { block_number: parent.block_number + 1, payload_commitment, builder_commitment, + metadata, timestamp, + random, } } } @@ -287,7 +307,7 @@ impl< parent_leaf: &Leaf, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, - _metadata: >::Metadata, + metadata: >::Metadata, _builder_fee: BuilderFee, _vid_common: VidCommon, _version: Version, @@ -297,6 +317,7 @@ impl< parent_leaf, payload_commitment, builder_commitment, + metadata, )) } @@ -306,7 +327,7 @@ impl< parent_leaf: &Leaf, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, - _metadata: >::Metadata, + metadata: >::Metadata, _builder_fee: Vec>, _vid_common: VidCommon, _auction_results: Option, @@ -317,6 +338,7 @@ impl< parent_leaf, payload_commitment, builder_commitment, + metadata, )) } @@ -326,11 +348,17 @@ impl< builder_commitment: BuilderCommitment, _metadata: >::Metadata, ) -> Self { + let metadata = TestMetadata { + num_transactions: 0, + }; + Self { block_number: 0, payload_commitment, builder_commitment, + metadata, timestamp: 0, + random: 0, } } @@ -343,7 +371,7 @@ impl< } fn metadata(&self) -> &>::Metadata { - &TestMetadata + &self.metadata } fn builder_commitment(&self) -> BuilderCommitment { diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 2f88c5ba4d..9aed92f531 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -4,8 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::collections::BTreeMap; -use std::num::NonZeroU64; +use std::{collections::BTreeMap, num::NonZeroU64}; use ethereum_types::U256; use hotshot_types::{ diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 4bed1471b6..2a4bd88f8f 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -18,8 +18,6 @@ use std::{ time::Duration, }; -use parking_lot::RwLock as PlRwLock; - use async_broadcast::{broadcast, InactiveReceiver, Sender}; use async_compatibility_layer::{ art::{async_sleep, async_spawn}, @@ -47,6 +45,7 @@ use hotshot_types::{ BoxSyncFuture, }; use lru::LruCache; +use parking_lot::RwLock as PlRwLock; use tracing::{debug, warn}; use super::{push_cdn_network::PushCdnNetwork, NetworkError}; diff --git a/testing/src/consistency_task.rs b/testing/src/consistency_task.rs index 9f9c2ad1e8..3cb04f65c1 100644 --- a/testing/src/consistency_task.rs +++ b/testing/src/consistency_task.rs @@ -9,6 +9,7 @@ use std::{collections::BTreeMap, marker::PhantomData}; use anyhow::{bail, ensure, Context, Result}; use async_trait::async_trait; +use hotshot_example_types::block_types::TestBlockHeader; use hotshot_types::{ data::Leaf, event::{Event, EventType}, @@ -18,6 +19,7 @@ use hotshot_types::{ use crate::{ overall_safety_task::OverallSafetyPropertiesDescription, + test_builder::TransactionValidator, test_task::{TestResult, TestTaskState}, }; @@ -207,9 +209,11 @@ pub struct ConsistencyTask { pub ensure_upgrade: bool, /// phantom marker pub _pd: PhantomData, + /// function used to validate the number of transactions committed in each block + pub validate_transactions: TransactionValidator, } -impl ConsistencyTask { +impl, V: Versions> ConsistencyTask { pub async fn validate(&self) -> Result<()> { let sanitized_network_map = sanitize_network_map(&self.consensus_leaves)?; @@ -222,8 +226,21 @@ impl ConsistencyTask { acc || leaf.upgrade_certificate().is_some() }); - ensure!(expected_upgrade == actual_upgrade, - "Mismatch between expected and actual upgrade. Expected upgrade: {expected_upgrade}. Actual upgrade: {actual_upgrade}" + let mut transactions = Vec::new(); + + transactions = sanitized_view_map + .iter() + .fold(transactions, |mut acc, (view, leaf)| { + acc.push((**view, leaf.block_header().metadata.num_transactions)); + + acc + }); + + (self.validate_transactions)(&transactions)?; + + ensure!( + expected_upgrade == actual_upgrade, + "Mismatch between expected and actual upgrade. Expected upgrade: {expected_upgrade}. Actual upgrade: {actual_upgrade}" ); Ok(()) @@ -231,7 +248,9 @@ impl ConsistencyTask { } #[async_trait] -impl TestTaskState for ConsistencyTask { +impl, V: Versions> TestTaskState + for ConsistencyTask +{ type Event = Event; /// Handles an event from one of multiple receivers. diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index ec9bda7660..8a46976cac 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -16,6 +16,7 @@ use futures::future::join_all; use hotshot::{traits::TestableNodeImplementation, types::EventType, HotShotInitializer}; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, + block_types::TestBlockHeader, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, testable_delay::DelayConfig, @@ -65,7 +66,11 @@ pub struct SpinningTask, V #[async_trait] impl< - TYPES: NodeType, + TYPES: NodeType< + InstanceState = TestInstanceState, + ValidatedState = TestValidatedState, + BlockHeader = TestBlockHeader, + >, I: TestableNodeImplementation, N: ConnectedNetwork, V: Versions, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 3ff40a68ff..df05b8414f 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -6,6 +6,7 @@ use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; +use anyhow::{ensure, Result}; use hotshot::{ tasks::EventTransformerState, traits::{NetworkReliability, NodeImplementation, TestableNodeImplementation}, @@ -34,6 +35,9 @@ use crate::{ test_launcher::{Network, ResourceGenerators, TestLauncher}, view_sync_task::ViewSyncTaskDescription, }; + +pub type TransactionValidator = Arc) -> Result<()> + Send + Sync>; + /// data describing how a round should be timed. #[derive(Clone, Debug, Copy)] pub struct TimingData { @@ -85,6 +89,8 @@ pub struct TestDescription, V: Ver pub view_sync_properties: ViewSyncTaskDescription, /// description of builders to run pub builders: Vec1, + /// description of fallback builder to run + pub fallback_builder: BuilderDescription, /// description of the solver to run pub solver: FakeSolverApiDescription, /// nodes with byzantine behaviour @@ -93,6 +99,68 @@ pub struct TestDescription, V: Ver pub async_delay_config: DelayConfig, /// view in which to propose an upgrade pub upgrade_view: Option, + /// whether to initialize the solver on startup + pub start_solver: bool, + /// boxed closure used to validate the resulting transactions + pub validate_transactions: TransactionValidator, +} + +pub fn nonempty_block_threshold(threshold: (u64, u64)) -> TransactionValidator { + Arc::new(move |transactions| { + if matches!(threshold, (0, _)) { + return Ok(()); + } + + let blocks: Vec<_> = transactions.iter().filter(|(view, _)| *view != 0).collect(); + + let num_blocks = blocks.len() as u64; + let mut num_nonempty_blocks = 0; + + ensure!(num_blocks > 0, "Failed to commit any non-genesis blocks"); + + for (_, num_transactions) in blocks { + if *num_transactions > 0 { + num_nonempty_blocks += 1; + } + } + + ensure!( + // i.e. num_nonempty_blocks / num_blocks >= threshold.0 / threshold.1 + num_nonempty_blocks * threshold.1 >= threshold.0 * num_blocks, + "Failed to meet nonempty block threshold of {}/{}; got {num_nonempty_blocks} nonempty blocks out of a total of {num_blocks}", threshold.0, threshold.1 + ); + + Ok(()) + }) +} + +pub fn nonempty_block_limit(limit: (u64, u64)) -> TransactionValidator { + Arc::new(move |transactions| { + if matches!(limit, (_, 0)) { + return Ok(()); + } + + let blocks: Vec<_> = transactions.iter().filter(|(view, _)| *view != 0).collect(); + + let num_blocks = blocks.len() as u64; + let mut num_nonempty_blocks = 0; + + ensure!(num_blocks > 0, "Failed to commit any non-genesis blocks"); + + for (_, num_transactions) in blocks { + if *num_transactions > 0 { + num_nonempty_blocks += 1; + } + } + + ensure!( + // i.e. num_nonempty_blocks / num_blocks <= limit.0 / limit.1 + num_nonempty_blocks * limit.1 <= limit.0 * num_blocks, + "Exceeded nonempty block limit of {}/{}; got {num_nonempty_blocks} nonempty blocks out of a total of {num_blocks}", limit.0, limit.1 + ); + + Ok(()) + }) } #[derive(Debug)] @@ -202,7 +270,7 @@ pub enum BuilderChange { } /// Metadata describing builder behaviour during a test -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub struct BuilderDescription { /// view number -> change to builder status pub changes: HashMap, @@ -353,14 +421,8 @@ impl, V: Versions> Default ), unreliable_network: None, view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), - builders: vec1::vec1![ - BuilderDescription { - changes: HashMap::new() - }, - BuilderDescription { - changes: HashMap::new() - } - ], + builders: vec1::vec1![BuilderDescription::default(), BuilderDescription::default(),], + fallback_builder: BuilderDescription::default(), solver: FakeSolverApiDescription { // Default to a 10% error rate. error_pct: 0.1, @@ -368,6 +430,8 @@ impl, V: Versions> Default behaviour: Rc::new(|_| Behaviour::Standard), async_delay_config: DelayConfig::default(), upgrade_view: None, + start_solver: true, + validate_transactions: Arc::new(|_| Ok(())), } } } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 0f6faffae3..66e6da19a4 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -24,6 +24,7 @@ use hotshot::{ }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, + block_types::TestBlockHeader, state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; @@ -68,7 +69,11 @@ pub trait TaskErr: std::error::Error + Sync + Send + 'static {} impl TaskErr for T {} impl< - TYPES: NodeType, + TYPES: NodeType< + InstanceState = TestInstanceState, + ValidatedState = TestValidatedState, + BlockHeader = TestBlockHeader, + >, I: TestableNodeImplementation, V: Versions, N: ConnectedNetwork, @@ -209,6 +214,7 @@ where consensus_leaves: BTreeMap::new(), safety_properties: self.launcher.metadata.overall_safety_properties, ensure_upgrade: self.launcher.metadata.upgrade_view.is_some(), + validate_transactions: self.launcher.metadata.validate_transactions, _pd: PhantomData, }; @@ -335,14 +341,14 @@ where pub async fn init_builders>( &self, - ) -> (Vec>>, Vec) { + ) -> (Vec>>, Vec, Url) { let config = self.launcher.resource_generator.config.clone(); let mut builder_tasks = Vec::new(); let mut builder_urls = Vec::new(); for metadata in &self.launcher.metadata.builders { let builder_port = portpicker::pick_unused_port().expect("No free ports"); let builder_url = - Url::parse(&format!("http://localhost:{builder_port}")).expect("Valid URL"); + Url::parse(&format!("http://localhost:{builder_port}")).expect("Invalid URL"); let builder_task = B::start( config.num_nodes_with_stake.into(), builder_url.clone(), @@ -354,11 +360,25 @@ where builder_urls.push(builder_url); } - (builder_tasks, builder_urls) + let fallback_builder_port = portpicker::pick_unused_port().expect("No free ports"); + let fallback_builder_url = + Url::parse(&format!("http://localhost:{fallback_builder_port}")).expect("Invalid URL"); + + let fallback_builder_task = B::start( + config.num_nodes_with_stake.into(), + fallback_builder_url.clone(), + B::Config::default(), + self.launcher.metadata.fallback_builder.changes.clone(), + ) + .await; + + builder_tasks.push(fallback_builder_task); + + (builder_tasks, builder_urls, fallback_builder_url) } - /// Add servers. - pub async fn add_servers(&mut self, builder_urls: Vec) { + /// Add auction solver. + pub async fn add_solver(&mut self, builder_urls: Vec) { let solver_error_pct = self.launcher.metadata.solver.error_pct; let solver_port = portpicker::pick_unused_port().expect("No available ports"); @@ -395,8 +415,12 @@ where let mut results = vec![]; let config = self.launcher.resource_generator.config.clone(); - let (mut builder_tasks, builder_urls) = self.init_builders::().await; - self.add_servers(builder_urls.clone()).await; + let (mut builder_tasks, builder_urls, fallback_builder_url) = + self.init_builders::().await; + + if self.launcher.metadata.start_solver { + self.add_solver(builder_urls.clone()).await; + } // Collect uninitialized nodes because we need to wait for all networks to be ready before starting the tasks let mut uninitialized_nodes = Vec::new(); @@ -448,7 +472,7 @@ where marketplace_config.auction_results_provider = new_auction_results_provider.into(); } - marketplace_config.fallback_builder_url = builder_urls.first().unwrap().clone(); + marketplace_config.fallback_builder_url = fallback_builder_url.clone(); let network_clone = network.clone(); let networks_ready_future = async move { diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 21d5eb7c66..315ac2d5e8 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -15,7 +15,7 @@ use std::{ use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_example_types::{ - block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; @@ -39,6 +39,7 @@ use hotshot_types::{ BlockPayload, }, }; +use rand::{thread_rng, Rng}; use sha2::{Digest, Sha256}; use crate::helpers::{ @@ -117,12 +118,16 @@ impl TestView { ) .await; - let block_header = TestBlockHeader { - block_number: 1, - timestamp: 1, + let block_header = TestBlockHeader::new( + &Leaf::::genesis( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await, payload_commitment, builder_commitment, - }; + metadata, + ); let quorum_proposal_inner = QuorumProposal:: { block_header: block_header.clone(), @@ -144,7 +149,7 @@ impl TestView { let da_proposal_inner = DaProposal:: { encoded_transactions: encoded_transactions.clone(), - metadata: TestMetadata, + metadata, view_number: genesis_view, }; @@ -341,11 +346,15 @@ impl TestView { view_sync_certificate.map(ViewChangeEvidence::ViewSync) }; + let random = thread_rng().gen_range(0..=u64::MAX); + let block_header = TestBlockHeader { block_number: *next_view, timestamp: *next_view, payload_commitment, builder_commitment, + metadata, + random, }; let proposal = QuorumProposal:: { @@ -381,7 +390,7 @@ impl TestView { let da_proposal_inner = DaProposal:: { encoded_transactions: encoded_transactions.clone(), - metadata: TestMetadata, + metadata, view_number: next_view, }; diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index b7099b62d4..db878c078a 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -102,7 +102,9 @@ async fn test_random_block_builder() { &TestBlockPayload { transactions: vec![TestTransaction::new(vec![0; 1])], }, - &TestMetadata, + &TestMetadata { + num_transactions: 1, + }, ); let result = client diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index 0c2506af47..a2efcb5050 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -106,7 +106,9 @@ async fn test_consensus_task() { SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - TestMetadata, + TestMetadata { + num_transactions: 0, + }, ViewNumber::new(2), vec1![null_block::builder_fee::( quorum_membership.total_nodes(), @@ -308,7 +310,9 @@ async fn test_view_sync_finalize_propose() { SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - TestMetadata, + TestMetadata { + num_transactions: 0, + }, ViewNumber::new(4), vec1![null_block::builder_fee::( 4, diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 9f8fb0b744..b39f59ff32 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -90,8 +90,10 @@ async fn test_da_task() { ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), BlockRecv(PackedBundle::new( - encoded_transactions, - TestMetadata, + encoded_transactions.clone(), + TestMetadata { + num_transactions: transactions.len() as u64 + }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(), @@ -167,7 +169,7 @@ async fn test_da_task_storage_failure() { vids.push(view.vid_proposal.clone()); } - generator.add_transactions(transactions); + generator.add_transactions(transactions.clone()); for view in (&mut generator).take(1).collect::>().await { proposals.push(view.da_proposal.clone()); @@ -185,8 +187,10 @@ async fn test_da_task_storage_failure() { ViewChange(ViewNumber::new(1)), ViewChange(ViewNumber::new(2)), BlockRecv(PackedBundle::new( - encoded_transactions, - TestMetadata, + encoded_transactions.clone(), + TestMetadata { + num_transactions: transactions.len() as u64 + }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(), diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 324d3241b2..8e5bb64259 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -107,7 +107,9 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - TestMetadata, + TestMetadata { + num_transactions: 0 + }, ViewNumber::new(1), vec1![builder_fee.clone()], None, @@ -212,7 +214,9 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), builder_commitment.clone(), - TestMetadata, + TestMetadata { + num_transactions: 0 + }, ViewNumber::new(1), vec1![builder_fee.clone()], None, @@ -229,7 +233,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), builder_commitment.clone(), - TestMetadata, + proposals[0].data.block_header.metadata, ViewNumber::new(2), vec1![builder_fee.clone()], None, @@ -246,7 +250,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), builder_commitment.clone(), - TestMetadata, + proposals[1].data.block_header.metadata, ViewNumber::new(3), vec1![builder_fee.clone()], None, @@ -263,7 +267,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(4)), builder_commitment.clone(), - TestMetadata, + proposals[2].data.block_header.metadata, ViewNumber::new(4), vec1![builder_fee.clone()], None, @@ -280,7 +284,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(5)), builder_commitment, - TestMetadata, + proposals[3].data.block_header.metadata, ViewNumber::new(5), vec1![builder_fee.clone()], None, @@ -386,7 +390,9 @@ async fn test_quorum_proposal_task_qc_timeout() { SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - TestMetadata, + TestMetadata { + num_transactions: 0 + }, ViewNumber::new(3), vec1![null_block::builder_fee::( quorum_membership.total_nodes(), @@ -476,7 +482,9 @@ async fn test_quorum_proposal_task_view_sync() { SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - TestMetadata, + TestMetadata { + num_transactions: 0 + }, ViewNumber::new(2), vec1![null_block::builder_fee::( quorum_membership.total_nodes(), @@ -576,7 +584,9 @@ async fn test_quorum_proposal_task_liveness_check() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), builder_commitment.clone(), - TestMetadata, + TestMetadata { + num_transactions: 0 + }, ViewNumber::new(1), vec1![builder_fee.clone()], None, @@ -593,7 +603,7 @@ async fn test_quorum_proposal_task_liveness_check() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), builder_commitment.clone(), - TestMetadata, + proposals[0].data.block_header.metadata, ViewNumber::new(2), vec1![builder_fee.clone()], None, @@ -610,7 +620,7 @@ async fn test_quorum_proposal_task_liveness_check() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), builder_commitment.clone(), - TestMetadata, + proposals[1].data.block_header.metadata, ViewNumber::new(3), vec1![builder_fee.clone()], None, @@ -627,7 +637,7 @@ async fn test_quorum_proposal_task_liveness_check() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(4)), builder_commitment.clone(), - TestMetadata, + proposals[2].data.block_header.metadata, ViewNumber::new(4), vec1![builder_fee.clone()], None, @@ -644,7 +654,7 @@ async fn test_quorum_proposal_task_liveness_check() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(5)), builder_commitment, - TestMetadata, + proposals[3].data.block_header.metadata, ViewNumber::new(5), vec1![builder_fee.clone()], None, diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index c229f68122..266f97ab5b 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -11,10 +11,7 @@ use hotshot_example_types::testable_delay::{ DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay, }; use hotshot_example_types::{ - node_types::{ - Libp2pImpl, MarketplaceUpgradeTestVersions, MemoryImpl, PushCdnImpl, - TestConsecutiveLeaderTypes, TestVersions, - }, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestVersions}, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -44,26 +41,6 @@ cross_tests!( }, ); -cross_tests!( - TestName: test_success_marketplace, - Impls: [MemoryImpl], - Types: [TestTypes], - Versions: [MarketplaceUpgradeTestVersions], - Ignore: false, - Metadata: { - TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - upgrade_view: Some(5), - ..TestDescription::default() - } - }, -); - #[cfg(feature = "dependency-tasks")] cross_tests!( TestName: test_success_with_async_delay, diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index 24764f6a3d..d73cd48ea7 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -44,7 +44,9 @@ async fn test_transaction_task_leader_two_views_in_a_row() { // current view let mut exp_packed_bundle = PackedBundle::new( vec![].into(), - TestMetadata, + TestMetadata { + num_transactions: 0, + }, current_view, vec1::vec1![ null_block::builder_fee::( diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs index bae16c1267..fe1d9279a9 100644 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -14,7 +14,7 @@ use std::time::Duration; use futures::StreamExt; use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ - block_types::{TestMetadata, TestTransaction}, + block_types::TestTransaction, node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::TestInstanceState, }; @@ -291,7 +291,7 @@ async fn test_upgrade_task_propose() { SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, proposals[2].data.block_header.builder_commitment.clone(), - TestMetadata, + proposals[2].data.block_header.metadata, ViewNumber::new(3), vec1![null_block::builder_fee::( quorum_membership.total_nodes(), @@ -489,7 +489,7 @@ async fn test_upgrade_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[1].0[0].data.payload_commitment, proposals[1].data.block_header.builder_commitment.clone(), - TestMetadata, + proposals[1].data.block_header.metadata, ViewNumber::new(2), vec1![builder_fee.clone()], None, @@ -501,7 +501,7 @@ async fn test_upgrade_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, proposals[2].data.block_header.builder_commitment.clone(), - TestMetadata, + proposals[2].data.block_header.metadata, ViewNumber::new(3), vec1![builder_fee.clone()], None, @@ -514,7 +514,7 @@ async fn test_upgrade_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[3].0[0].data.payload_commitment, proposals[3].data.block_header.builder_commitment.clone(), - TestMetadata, + proposals[3].data.block_header.metadata, ViewNumber::new(4), vec1![builder_fee.clone()], None, @@ -527,7 +527,7 @@ async fn test_upgrade_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[4].0[0].data.payload_commitment, proposals[4].data.block_header.builder_commitment.clone(), - TestMetadata, + proposals[4].data.block_header.metadata, ViewNumber::new(5), vec1![builder_fee.clone()], None, @@ -540,7 +540,7 @@ async fn test_upgrade_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[5].0[0].data.payload_commitment, proposals[5].data.block_header.builder_commitment.clone(), - TestMetadata, + proposals[5].data.block_header.metadata, ViewNumber::new(6), vec1![builder_fee.clone()], None, @@ -554,7 +554,7 @@ async fn test_upgrade_task_blank_blocks() { SendPayloadCommitmentAndMetadata( vids[6].0[0].data.payload_commitment, proposals[6].data.block_header.builder_commitment.clone(), - TestMetadata, + proposals[6].data.block_header.metadata, ViewNumber::new(7), vec1![builder_fee], None, diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index a465e94b37..b429fddaf4 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -175,7 +175,9 @@ async fn test_upgrade_task_with_proposal() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), builder_commitment.clone(), - TestMetadata, + TestMetadata { + num_transactions: 0 + }, ViewNumber::new(1), vec1![builder_fee.clone()], None, @@ -192,7 +194,7 @@ async fn test_upgrade_task_with_proposal() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), builder_commitment.clone(), - TestMetadata, + proposals[0].data.block_header.metadata, ViewNumber::new(2), vec1![builder_fee.clone()], None, @@ -210,7 +212,7 @@ async fn test_upgrade_task_with_proposal() { SendPayloadCommitmentAndMetadata( build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), builder_commitment.clone(), - TestMetadata, + proposals[1].data.block_header.metadata, ViewNumber::new(3), vec1![builder_fee.clone()], None, diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index cf42272d58..17922d42cb 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -74,7 +74,9 @@ async fn test_vid_task() { .expect("Failed to sign block payload!"); let proposal: DaProposal = DaProposal { encoded_transactions: encoded_transactions.clone(), - metadata: TestMetadata, + metadata: TestMetadata { + num_transactions: encoded_transactions.len() as u64, + }, view_number: ViewNumber::new(2), }; let message = Proposal { @@ -96,8 +98,10 @@ async fn test_vid_task() { serial![ ViewChange(ViewNumber::new(2)), BlockRecv(PackedBundle::new( - encoded_transactions, - TestMetadata, + encoded_transactions.clone(), + TestMetadata { + num_transactions: transactions.len() as u64 + }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(), @@ -116,7 +120,9 @@ async fn test_vid_task() { exact(SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, - TestMetadata, + TestMetadata { + num_transactions: transactions.len() as u64, + }, ViewNumber::new(2), vec1![null_block::builder_fee::( quorum_membership.total_nodes(), diff --git a/testing/tests/tests_3/test_marketplace.rs b/testing/tests/tests_3/test_marketplace.rs new file mode 100644 index 0000000000..2d97535427 --- /dev/null +++ b/testing/tests/tests_3/test_marketplace.rs @@ -0,0 +1,135 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{collections::HashMap, time::Duration}; + +use hotshot_example_types::{ + node_types::{MarketplaceTestVersions, MarketplaceUpgradeTestVersions, MemoryImpl}, + state_types::TestTypes, +}; +use hotshot_macros::cross_tests; +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + test_builder::{ + nonempty_block_limit, nonempty_block_threshold, BuilderChange, BuilderDescription, + TestDescription, + }, +}; +use vec1::vec1; + +// Test marketplace with the auction solver and fallback builder down +// Requires no nonempty blocks be committed +cross_tests!( + TestName: test_marketplace_solver_down, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [MarketplaceTestVersions], + Ignore: false, + Metadata: { + TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + fallback_builder: + BuilderDescription { + changes: HashMap::from([(0, BuilderChange::Down)]) + }, + validate_transactions: nonempty_block_limit((0,100)), + start_solver: false, + ..TestDescription::default() + } + }, +); + +// Test marketplace upgrade with no builder changes +// Upgrade is proposed in view 5 and completes in view 25. +// Requires 80% nonempty blocks +cross_tests!( + TestName: test_marketplace_upgrade, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [MarketplaceUpgradeTestVersions], + Ignore: false, + Metadata: { + TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + upgrade_view: Some(5), + validate_transactions: nonempty_block_threshold((40,50)), + ..TestDescription::default() + } + }, +); + +// Test marketplace with both regular builders down but solver + fallback builder up +// Requires 90% nonempty blocks +cross_tests!( + TestName: test_marketplace_builders_down, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [MarketplaceTestVersions], + Ignore: false, + Metadata: { + TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + builders: vec1![ + BuilderDescription { + changes: HashMap::from([(0, BuilderChange::Down)]) + }, + BuilderDescription { + changes: HashMap::from([(0, BuilderChange::Down)]) + } + ], + validate_transactions: nonempty_block_threshold((90,100)), + ..TestDescription::default() + } + }, +); + +// Test marketplace with the fallback and one regular builder down +// Requires at least 80% of blocks to be nonempty +cross_tests!( + TestName: test_marketplace_fallback_builder_down, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [MarketplaceTestVersions], + Ignore: false, + Metadata: { + TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + builders: vec1![ + BuilderDescription { + changes: HashMap::from([(0, BuilderChange::Down)]) + }, + BuilderDescription::default(), + ], + fallback_builder: + BuilderDescription { + changes: HashMap::from([(0, BuilderChange::Down)]) + }, + validate_transactions: nonempty_block_threshold((80,100)), + ..TestDescription::default() + } + }, +); diff --git a/testing/tests/tests_3/test_with_builder_failures.rs b/testing/tests/tests_3/test_with_builder_failures.rs index 1b2d3ac13c..947ece0404 100644 --- a/testing/tests/tests_3/test_with_builder_failures.rs +++ b/testing/tests/tests_3/test_with_builder_failures.rs @@ -68,7 +68,7 @@ cross_tests!( changes: first_builder, }, BuilderDescription { - changes: second_builder, + changes: second_builder, }, ]; metadata diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index d21f1a82bf..2db04c283e 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -5,9 +5,10 @@ // along with the HotShot repository. If not, see . //! The election trait, used to decide which node is the leader and determine if a vote is valid. +use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; + use super::{network::Topic, node_implementation::NodeType}; use crate::{traits::signature_key::SignatureKey, PeerConfig}; -use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; /// A protocol for determining membership in and participating in a committee. pub trait Membership: From 3eb1b4d9d1521972ea4a7ab91d4e5b4acfd465f4 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 19 Sep 2024 14:32:13 -0400 Subject: [PATCH 1215/1393] [CATCHUP] Repro Restart Bug + Fix (#3686) * create restart context, and allow restarting after number of views * repros the issue * Fix the issue by saving the timeout vote as an action * cleanup/lint * revert some logging remove unused file * logging * Rename UpDown enum --- task-impls/src/consensus/handlers.rs | 3 +- task-impls/src/consensus/view_change.rs | 143 ------------------ task-impls/src/network.rs | 17 ++- testing/src/spinning_task.rs | 84 +++++++--- testing/src/test_runner.rs | 9 +- testing/tests/tests_1/libp2p.rs | 4 +- testing/tests/tests_1/test_with_failures_2.rs | 8 +- testing/tests/tests_2/catchup.rs | 109 ++++++++++--- .../tests/tests_2/test_with_failures_one.rs | 4 +- .../tests_3/test_with_failures_half_f.rs | 8 +- testing/tests/tests_4/test_with_failures_f.rs | 14 +- testing/tests/tests_5/combined_network.rs | 14 +- testing/tests/tests_5/timeout.rs | 8 +- 13 files changed, 203 insertions(+), 222 deletions(-) delete mode 100644 task-impls/src/consensus/view_change.rs diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 36f6831e5a..9ef7f69702 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -140,8 +140,9 @@ pub async fn create_and_send_proposal( }; debug!( - "Sending proposal for view {:?}", + "Sending proposal for view {:?} ID: {}", proposed_leaf.view_number(), + id, ); async_sleep(Duration::from_millis(round_start_delay)).await; diff --git a/task-impls/src/consensus/view_change.rs b/task-impls/src/consensus/view_change.rs deleted file mode 100644 index 82928afcfc..0000000000 --- a/task-impls/src/consensus/view_change.rs +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -use core::time::Duration; -use std::sync::Arc; - -use anyhow::{ensure, Result}; -use async_broadcast::Sender; -use async_compatibility_layer::art::{async_sleep, async_spawn}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; -use chrono::Utc; -use hotshot_types::{ - consensus::{ConsensusUpgradableReadLockGuard, OuterConsensus}, - event::{Event, EventType}, - traits::node_implementation::{ConsensusTime, NodeType}, -}; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; -use tracing::{debug, error, instrument}; - -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; - -/// Constant which tells [`update_view`] to send a view change event when called. -pub(crate) const SEND_VIEW_CHANGE_EVENT: bool = true; - -/// Constant which tells [`update_view`] to not send a view change event when called. -pub(crate) const DONT_SEND_VIEW_CHANGE_EVENT: bool = false; - -/// Update the view if it actually changed, takes a mutable reference to the `cur_view` and the -/// `timeout_task` which are updated during the operation of the function. -/// -/// # Errors -/// Returns an [`anyhow::Error`] when the new view is not greater than the current view. -/// TODO: Remove args when we merge dependency tasks. -#[allow(clippy::too_many_arguments)] -#[instrument(skip_all)] -pub(crate) async fn update_view( - new_view: TYPES::Time, - event_stream: &Sender>>, - timeout: u64, - consensus: OuterConsensus, - cur_view: &mut TYPES::Time, - cur_view_time: &mut i64, - timeout_task: &mut JoinHandle<()>, - output_event_stream: &Sender>, - send_view_change_event: bool, - is_old_view_leader: bool, -) -> Result<()> { - ensure!( - new_view > *cur_view, - "New view is not greater than our current view" - ); - - let old_view = *cur_view; - - debug!("Updating view from {} to {}", *old_view, *new_view); - - if *old_view / 100 != *new_view / 100 { - // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): - // switch to info! when INFO logs become less cluttered - error!("Progress: entered view {:>6}", *new_view); - } - - *cur_view = new_view; - - // The next view is just the current view + 1 - let next_view = *cur_view + 1; - - if send_view_change_event { - futures::join! { - broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream), - broadcast_event( - Event { - view_number: old_view, - event: EventType::ViewFinished { - view_number: old_view, - }, - }, - output_event_stream, - ) - }; - } - - // Spawn a timeout task if we did actually update view - let new_timeout_task = async_spawn({ - let stream = event_stream.clone(); - // Nuance: We timeout on the view + 1 here because that means that we have - // not seen evidence to transition to this new view - let view_number = next_view; - let timeout = Duration::from_millis(timeout); - async move { - async_sleep(timeout).await; - broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), - &stream, - ) - .await; - } - }); - - // cancel the old timeout task - cancel_task(std::mem::replace(timeout_task, new_timeout_task)).await; - - let consensus = consensus.upgradable_read().await; - consensus - .metrics - .current_view - .set(usize::try_from(cur_view.u64()).unwrap()); - let new_view_time = Utc::now().timestamp(); - if is_old_view_leader { - #[allow(clippy::cast_precision_loss)] - consensus - .metrics - .view_duration_as_leader - .add_point((new_view_time - *cur_view_time) as f64); - } - *cur_view_time = new_view_time; - - // Do the comparison before the subtraction to avoid potential overflow, since - // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(cur_view.u64()).unwrap() - > usize::try_from(consensus.last_decided_view().u64()).unwrap() - { - consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(cur_view.u64()).unwrap() - - usize::try_from(consensus.last_decided_view().u64()).unwrap(), - ); - } - let mut consensus = ConsensusUpgradableReadLockGuard::upgrade(consensus).await; - if let Err(e) = consensus.update_view(new_view) { - tracing::trace!("{e:?}"); - } - tracing::trace!("View updated successfully"); - - Ok(()) -} diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 2042a9fc04..40597b75b2 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -488,13 +488,16 @@ impl< )), TransmitType::Broadcast, )), - HotShotEvent::TimeoutVoteSend(vote) => Some(( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::TimeoutVote(vote.clone()), - )), - TransmitType::Direct(membership.leader(vote.view_number() + 1)), - )), + HotShotEvent::TimeoutVoteSend(vote) => { + *maybe_action = Some(HotShotAction::Vote); + Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::TimeoutVote(vote.clone()), + )), + TransmitType::Direct(membership.leader(vote.view_number() + 1)), + )) + } HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 8a46976cac..ef4709af68 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -10,10 +10,13 @@ use std::{ }; use anyhow::Result; +use async_broadcast::broadcast; use async_lock::RwLock; use async_trait::async_trait; use futures::future::join_all; -use hotshot::{traits::TestableNodeImplementation, types::EventType, HotShotInitializer}; +use hotshot::{ + traits::TestableNodeImplementation, types::EventType, HotShotInitializer, SystemContext, +}; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, block_types::TestBlockHeader, @@ -22,6 +25,7 @@ use hotshot_example_types::{ testable_delay::DelayConfig, }; use hotshot_types::{ + constants::EVENT_CHANNEL_SIZE, data::Leaf, event::Event, simple_certificate::QuorumCertificate, @@ -47,7 +51,12 @@ pub type StateAndBlock = (Vec, Vec); pub struct SpinningTaskErr {} /// Spinning task state -pub struct SpinningTask, V: Versions> { +pub struct SpinningTask< + TYPES: NodeType, + N: ConnectedNetwork, + I: TestableNodeImplementation, + V: Versions, +> { /// handle to the nodes pub(crate) handles: Arc>>>, /// late start nodes @@ -62,6 +71,8 @@ pub struct SpinningTask, V pub(crate) high_qc: QuorumCertificate, /// Add specified delay to async calls pub(crate) async_delay_config: DelayConfig, + /// Context stored for nodes to be restarted with + pub(crate) restart_contexts: HashMap>, } #[async_trait] @@ -74,7 +85,7 @@ impl< I: TestableNodeImplementation, N: ConnectedNetwork, V: Versions, - > TestTaskState for SpinningTask + > TestTaskState for SpinningTask where I: TestableNodeImplementation, I: NodeImplementation< @@ -117,7 +128,7 @@ where if let Some(operations) = self.changes.remove(&view_number) { for ChangeNode { idx, updown } in operations { match updown { - UpDown::Up => { + NodeAction::Up => { let node_id = idx.try_into().unwrap(); if let Some(node) = self.late_start.remove(&node_id) { tracing::error!("Node {} spinning up late", idx); @@ -187,13 +198,13 @@ where self.handles.write().await.push(node); } } - UpDown::Down => { + NodeAction::Down => { if let Some(node) = self.handles.write().await.get_mut(idx) { tracing::error!("Node {} shutting down", idx); node.handle.shut_down().await; } } - UpDown::Restart => { + NodeAction::RestartDown(delay_views) => { let node_id = idx.try_into().unwrap(); if let Some(node) = self.handles.write().await.get_mut(idx) { tracing::error!("Node {} shutting down", idx); @@ -217,7 +228,7 @@ where self.last_decided_leaf.clone(), TestInstanceState::new(self.async_delay_config.clone()), None, - view_number, + read_storage.last_actioned_view().await, read_storage.last_actioned_view().await, read_storage.proposals_cloned().await, read_storage.high_qc_cloned().await.unwrap_or( @@ -238,6 +249,7 @@ where // For tests, make the node DA based on its index node_id < config.da_staked_committee_size as u64, ); + let internal_chan = broadcast(EVENT_CHANNEL_SIZE); let context = TestRunner::::add_node_with_config_and_channels( node_id, @@ -248,27 +260,44 @@ where validator_config, (*read_storage).clone(), marketplace_config.clone(), - ( - node.handle.internal_channel_sender(), - node.handle.internal_event_stream_receiver_known_impl(), - ), + internal_chan, ( node.handle.external_channel_sender(), - node.handle.event_stream_known_impl(), + node.handle.event_stream_known_impl().new_receiver(), ), ) .await; - new_nodes.push((context, idx)); - new_networks.push(network.clone()); + if delay_views == 0 { + new_nodes.push((context, idx)); + new_networks.push(network.clone()); + } else { + let up_view = view_number + delay_views; + let change = ChangeNode { + idx, + updown: NodeAction::RestartUp, + }; + self.changes.entry(up_view).or_default().push(change); + let new_ctx = RestartContext { + context, + network: network.clone(), + }; + self.restart_contexts.insert(idx, new_ctx); + } } } - UpDown::NetworkUp => { + NodeAction::RestartUp => { + if let Some(ctx) = self.restart_contexts.remove(&idx) { + new_nodes.push((ctx.context, idx)); + new_networks.push(ctx.network.clone()); + } + } + NodeAction::NetworkUp => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks resuming", idx); handle.network.resume(); } } - UpDown::NetworkDown => { + NodeAction::NetworkDown => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks pausing", idx); handle.network.pause(); @@ -286,6 +315,7 @@ where join_all(ready_futs).await; while let Some((node, id)) = new_nodes.pop() { + tracing::error!("Starting node {} back up", id); let handle = node.run_tasks().await; // Create the node and add it to the state, so we can shut them @@ -312,9 +342,20 @@ where } } +#[derive(Clone)] +pub(crate) struct RestartContext< + TYPES: NodeType, + N: ConnectedNetwork, + I: TestableNodeImplementation, + V: Versions, +> { + context: Arc>, + network: Arc, +} + /// Spin the node up or down #[derive(Clone, Debug)] -pub enum UpDown { +pub enum NodeAction { /// spin the node up Up, /// spin the node down @@ -323,8 +364,11 @@ pub enum UpDown { NetworkUp, /// spin the node's network down NetworkDown, - /// restart the node - Restart, + /// Take a node down to be restarted after a number of views + RestartDown(u64), + /// Start a node up again after it's been shutdown for restart. This + /// should only be created following a `ResartDown` + RestartUp, } /// denotes a change in node state @@ -333,7 +377,7 @@ pub struct ChangeNode { /// the index of the node pub idx: usize, /// spin the node or node's network up or down - pub updown: UpDown, + pub updown: NodeAction, } /// description of the spinning task diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 66e6da19a4..6a24730d31 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -57,7 +57,7 @@ use super::{ use crate::{ block_builder::{BuilderTask, TestBuilderImplementation}, completion_task::CompletionTaskDescription, - spinning_task::{ChangeNode, SpinningTask, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTask}, test_builder::create_test_handle, test_launcher::{Network, TestLauncher}, test_task::{TestResult, TestTask}, @@ -105,10 +105,10 @@ where let mut restart_nodes: HashSet = HashSet::new(); for (_, changes) in &spinning_changes { for change in changes { - if matches!(change.updown, UpDown::Up) { + if matches!(change.updown, NodeAction::Up) { late_start_nodes.insert(change.idx.try_into().unwrap()); } - if matches!(change.updown, UpDown::Restart) { + if matches!(change.updown, NodeAction::RestartDown(_)) { restart_nodes.insert(change.idx.try_into().unwrap()); } } @@ -195,8 +195,9 @@ where ) .await, async_delay_config: self.launcher.metadata.async_delay_config, + restart_contexts: HashMap::new(), }; - let spinning_task = TestTask::>::new( + let spinning_task = TestTask::>::new( spinning_task_state, event_rxs.clone(), test_receiver.clone(), diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index 32f0878999..f19e3b0798 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -11,7 +11,7 @@ use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; use tracing::instrument; @@ -73,7 +73,7 @@ async fn libp2p_network_failures_2() { let dead_nodes = vec![ChangeNode { idx: 11, - updown: UpDown::Down, + updown: NodeAction::Down, }]; metadata.spinning_properties = SpinningTaskDescription { diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index c757b5d1b7..8573af51f5 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -15,7 +15,7 @@ use hotshot_example_types::{ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, view_sync_task::ViewSyncTaskDescription, }; @@ -51,11 +51,11 @@ cross_tests!( let dead_nodes = vec![ ChangeNode { idx: 10, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 11, - updown: UpDown::Down, + updown: NodeAction::Down, }, ]; @@ -87,7 +87,7 @@ cross_tests!( let dead_nodes = vec![ ChangeNode { idx: 3, - updown: UpDown::Down, + updown: NodeAction::Down, }, ]; diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 8253ae8084..0b8fcbfdb9 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -15,7 +15,7 @@ async fn test_catchup() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -32,7 +32,7 @@ async fn test_catchup() { TestDescription::default(); let catchup_node = vec![ChangeNode { idx: 19, - updown: UpDown::Up, + updown: NodeAction::Up, }]; metadata.timing_data = timing_data; @@ -78,7 +78,7 @@ async fn test_catchup_cdn() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -92,7 +92,7 @@ async fn test_catchup_cdn() { TestDescription::default(); let catchup_nodes = vec![ChangeNode { idx: 18, - updown: UpDown::Up, + updown: NodeAction::Up, }]; metadata.timing_data = timing_data; metadata.start_nodes = 19; @@ -133,7 +133,7 @@ async fn test_catchup_one_node() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); @@ -146,7 +146,7 @@ async fn test_catchup_one_node() { TestDescription::default(); let catchup_nodes = vec![ChangeNode { idx: 18, - updown: UpDown::Up, + updown: NodeAction::Up, }]; metadata.timing_data = timing_data; metadata.start_nodes = 19; @@ -189,7 +189,7 @@ async fn test_catchup_in_view_sync() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); @@ -203,11 +203,11 @@ async fn test_catchup_in_view_sync() { let catchup_nodes = vec![ ChangeNode { idx: 18, - updown: UpDown::Up, + updown: NodeAction::Up, }, ChangeNode { idx: 19, - updown: UpDown::Up, + updown: NodeAction::Up, }, ]; @@ -252,7 +252,7 @@ async fn test_catchup_reload() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -266,7 +266,7 @@ async fn test_catchup_reload() { TestDescription::default(); let catchup_node = vec![ChangeNode { idx: 19, - updown: UpDown::Up, + updown: NodeAction::Up, }]; metadata.timing_data = timing_data; @@ -312,7 +312,7 @@ async fn test_all_restart() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -328,7 +328,7 @@ async fn test_all_restart() { for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, - updown: UpDown::Restart, + updown: NodeAction::RestartDown(0), }) } @@ -374,7 +374,7 @@ async fn test_all_restart_cdn() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -390,7 +390,7 @@ async fn test_all_restart_cdn() { for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, - updown: UpDown::Restart, + updown: NodeAction::RestartDown(0), }) } @@ -440,7 +440,7 @@ async fn test_all_restart_one_da() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -457,7 +457,7 @@ async fn test_all_restart_one_da() { for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, - updown: UpDown::Restart, + updown: NodeAction::RestartDown(0), }) } @@ -494,3 +494,78 @@ async fn test_all_restart_one_da() { .run_test::() .await; } + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_staggered_restart() { + use std::time::Duration; + + use hotshot_example_types::node_types::{CombinedImpl, TestTypes, TestVersions}; + use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, + test_builder::TestDescription, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata: TestDescription = + TestDescription::default(); + + let mut down_da_nodes = vec![]; + for i in 1..4 { + down_da_nodes.push(ChangeNode { + idx: i, + updown: NodeAction::RestartDown(20), + }); + } + + let mut down_regular_nodes = vec![]; + for i in 4..10 { + down_regular_nodes.push(ChangeNode { + idx: i, + updown: NodeAction::RestartDown(0), + }); + } + // restart the last da so it gets the new libp2p routing table + down_regular_nodes.push(ChangeNode { + idx: 0, + updown: NodeAction::RestartDown(0), + }); + + metadata.start_nodes = 10; + metadata.num_nodes_with_stake = 10; + + // Explicitly make the DA small to simulate real network. + metadata.da_staked_committee_size = 4; + + metadata.spinning_properties = SpinningTaskDescription { + // Restart all the nodes in view 13 + node_changes: vec![(13, down_da_nodes), (33, down_regular_nodes)], + }; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 50); + + // Give the test some extra time because we are purposely timing out views + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(240), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep committing rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 30, + ..Default::default() + }; + + metadata + .gen_launcher(0) + .launch() + .run_test::() + .await; +} diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_2/test_with_failures_one.rs index 5408b57eb5..c540c9cbfc 100644 --- a/testing/tests/tests_2/test_with_failures_one.rs +++ b/testing/tests/tests_2/test_with_failures_one.rs @@ -11,7 +11,7 @@ use hotshot_example_types::{ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, }; @@ -32,7 +32,7 @@ cross_tests!( // let dead_nodes = vec![ChangeNode { idx: 19, - updown: UpDown::Down, + updown: NodeAction::Down, }]; metadata.spinning_properties = SpinningTaskDescription { diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index e4d7f58d68..8b1eb531a2 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -11,7 +11,7 @@ use hotshot_example_types::{ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, }; // Test f/2 nodes leaving the network. @@ -32,15 +32,15 @@ cross_tests!( let dead_nodes = vec![ ChangeNode { idx: 17, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 18, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 19, - updown: UpDown::Down, + updown: NodeAction::Down, }, ]; diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index c36b043294..931d7eaf5e 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -11,7 +11,7 @@ use hotshot_example_types::{ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, }; // Test f nodes leaving the network. @@ -35,27 +35,27 @@ cross_tests!( let dead_nodes = vec![ ChangeNode { idx: 14, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 15, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 16, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 17, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 18, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 19, - updown: UpDown::Down, + updown: NodeAction::Down, }, ]; diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index bc35b9ad3c..e4d0fb4625 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -11,7 +11,7 @@ use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; use rand::Rng; @@ -86,7 +86,7 @@ async fn test_combined_network_cdn_crash() { for node in 0..metadata.num_nodes_with_stake { all_nodes.push(ChangeNode { idx: node, - updown: UpDown::NetworkDown, + updown: NodeAction::NetworkDown, }); } @@ -136,11 +136,11 @@ async fn test_combined_network_reup() { for node in 0..metadata.num_nodes_with_stake { all_down.push(ChangeNode { idx: node, - updown: UpDown::NetworkDown, + updown: NodeAction::NetworkDown, }); all_up.push(ChangeNode { idx: node, - updown: UpDown::NetworkUp, + updown: NodeAction::NetworkUp, }); } @@ -188,7 +188,7 @@ async fn test_combined_network_half_dc() { for node in 0..metadata.num_nodes_with_stake / 2 { half.push(ChangeNode { idx: node, - updown: UpDown::NetworkDown, + updown: NodeAction::NetworkDown, }); } @@ -212,9 +212,9 @@ fn generate_random_node_changes( for _ in 0..total_nodes * 2 { let updown = if rng.gen::() { - UpDown::NetworkUp + NodeAction::NetworkUp } else { - UpDown::NetworkDown + NodeAction::NetworkDown }; let node_change = ChangeNode { diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 2a9cd4e73a..2269d5000d 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -17,7 +17,7 @@ async fn test_timeout() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); @@ -34,7 +34,7 @@ async fn test_timeout() { }; let dead_nodes = vec![ChangeNode { idx: 0, - updown: UpDown::Down, + updown: NodeAction::Down, }]; metadata.timing_data = timing_data; @@ -77,7 +77,7 @@ async fn test_timeout_libp2p() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -98,7 +98,7 @@ async fn test_timeout_libp2p() { }; let dead_nodes = vec![ChangeNode { idx: 9, - updown: UpDown::Down, + updown: NodeAction::Down, }]; metadata.timing_data = timing_data; From 69c30e46e6494556eedc2f52f1ef1c4632eda933 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 20 Sep 2024 11:50:05 -0400 Subject: [PATCH 1216/1393] Use pre-defined stake-table in orchestrator (#3685) --- examples/infra/mod.rs | 4 +- orchestrator/run-config.toml | 66 +++------ orchestrator/src/config.rs | 34 +++-- orchestrator/src/lib.rs | 259 ++++++++++++++++++++++++----------- 4 files changed, 221 insertions(+), 142 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 0f37e713aa..26caa3dd1b 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -880,8 +880,8 @@ pub async fn main_entry_point< let my_own_validator_config = NetworkConfig::::generate_init_validator_config( &orchestrator_client, - // This is false for now, we only use it to generate the keypair - false, + // we assign nodes to the DA committee by default + true, ) .await; diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index ae5cfe59ad..909a6043b0 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -3,56 +3,30 @@ indexed_da = true transactions_per_round = 10 transaction_size = 1000 node_index = 0 -seed = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, -] +seed = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] start_delay_seconds = 0 cdn_marshal_address = "127.0.0.1:9000" public_keys = [ - "BLS_VER_KEY~p-JKk1VvO1RoMrDrqyjz0P1VGwtOaEjF5jLjpOZbJi5O747fvYEOg0OvCl_CLe4shh7vsqeG9uMF9RssM12sLSuaiVJkCClxEI5mRLV4qff1UjZAZJIBgeL1_hRhRUkpqC0Trm1qtvXtZ8FwOCIzYXv8c300Au824k7FxjjcWLBL", - "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", - "BLS_VER_KEY~4zQnaCOFJ7m95OjxeNls0QOOwWbz4rfxaL3NwmN2zSdnf8t5Nw_dfmMHq05ee8jCegw6Bn5T8inmrnGGAsQJMMWLv77nd7FJziz2ViAbXg-XGGF7o4HyzELCmypDOIYF3X2UWferFE_n72ZX0iQkUhOvYZZ7cfXToXxRTtb_mwRR", - "BLS_VER_KEY~rO2PIjyY30HGfapFcloFe3mNDKMIFi6JlOLkH5ZWBSYoRm5fE2-Rm6Lp3EvmAcB5r7KFJ0c1Uor308x78r04EY_sfjcsDCWt7RSJdL4cJoD_4fSTCv_bisO8k98hs_8BtqQt8BHlPeJohpUXvcfnK8suXJETiJ6Er97pfxRbzgAL", - "BLS_VER_KEY~r6b-Cwzp-b3czlt0MHmYPJIow5kMsXbrNmZsLSYg9RV49oCCO4WEeCRFR02x9bqLCa_sgNFMrIeNdEa11qNiBAohApYFIvrSa-zP5QGj3xbZaMOCrshxYit6E2TR-XsWvv6gjOrypmugjyTAth-iqQzTboSfmO9DD1-gjJIdCaD7", - "BLS_VER_KEY~IBRoz_Q1EXvcm1pNZcmVlyYZU8hZ7qmy337ePAjEMhz8Hl2q8vWPFOd3BaLwgRS1UzAPW3z4E-XIgRDGcRBTAMZX9b_0lKYjlyTlNF2EZfNnKmvv-xJ0yurkfjiveeYEsD2l5d8q_rJJbH1iZdXy-yPEbwI0SIvQfwdlcaKw9po4", - "BLS_VER_KEY~kEUEUJFBtCXl68fM_2roQw856wQlu1ZoDmPn8uu4bQgeZwyb5oz5_kMl-oAJ_OtbYV1serjWE--eXB_qYIpQLZka42-cML6WjCQjNl1hGSejtoBDkExNeUNcweFQBbEsaDiIy3-sgHTrfYpFd1icKeAVihLRn5_RtSU_RUu1TQqR", - "BLS_VER_KEY~PAAQNgOYfj3GiVX7LxSlkXfOCDSnNKZDqPVYQ_jBMxKzOCn0PXbqQ62kKPenWOmCxiCE7X158s-VenBna6MjHJgf61eBAO-3-OyTP5NWVx49RTgHhQf2iMTKk2iqK2gjnjZimBU135YU4lQFtrG-ZgRezwqkC5vy8V-q46fschIG", - "BLS_VER_KEY~96hAcdFZxQT8CEHcyV8j2ILJRsXagquENPkc9AwLSx3u6AE_uMupIKGbNJRiM99oFneK2vI5g1u61HidWeuTLRPM2537xAXeaO8e-wJYx4FaPKw_xTcLPrIm0OZT7SsLAMwFuqfMbDdKM71-RyrLwhff5517xXBKEk5Tg9iT9Qrr", - "BLS_VER_KEY~-pVi7j6TEBeG7ABata4uWWDRM2SrY8wWotWsGnTpIhnOVYJI_lNWyig6VJUuFmBsMS8rLMU7nDxDm8SbObxyA-SLFcr_jCkZqsbx8GcVQrnBAfjNRWuPZP0xcTDMu2IkQqtc3L0OpzbMEgGRGE8Wj09pNqouzl-xhPoYjTmD06Bw", - "BLS_VER_KEY~IUPSdnsNUHgNx_74ZhBPrICcDZ9Bp_DAt-6kFz8vSwJES2Vy1Ws8NJ1mxb9XGE1u13sw0FRe8kn5Ib3p2stbEtR_1Qgbuif6aoLrGaSUzy0MvwrO58u9kHZk3rXIuSAN7n4ok3-KKk2CmnBfx7fchFoqT56FXCd1EJ7XRrYj8wTh", + { stake_table_key = "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", state_ver_key = "SCHNORR_VER_KEY~lJqDaVZyM0hWP2Br52IX5FeE-dCAIC-dPX7bL5-qUx-vjbunwe-ENOeZxj6FuOyvDCFzoGeP7yZ0fM995qF-CRE", stake = 1, da = true }, + + { stake_table_key = "BLS_VER_KEY~IBRoz_Q1EXvcm1pNZcmVlyYZU8hZ7qmy337ePAjEMhz8Hl2q8vWPFOd3BaLwgRS1UzAPW3z4E-XIgRDGcRBTAMZX9b_0lKYjlyTlNF2EZfNnKmvv-xJ0yurkfjiveeYEsD2l5d8q_rJJbH1iZdXy-yPEbwI0SIvQfwdlcaKw9po4", state_ver_key = "SCHNORR_VER_KEY~tyuplKrHzvjODsjPKMHVFYfoMcgklQsMye-2aSCktBcbW_CIzLOq3wZXRIPBbw3FiV6_QoUXYAlpZ5up0zG_ANY", stake = 1, da = true }, + + { stake_table_key = "BLS_VER_KEY~4zQnaCOFJ7m95OjxeNls0QOOwWbz4rfxaL3NwmN2zSdnf8t5Nw_dfmMHq05ee8jCegw6Bn5T8inmrnGGAsQJMMWLv77nd7FJziz2ViAbXg-XGGF7o4HyzELCmypDOIYF3X2UWferFE_n72ZX0iQkUhOvYZZ7cfXToXxRTtb_mwRR", state_ver_key = "SCHNORR_VER_KEY~qQAC373HPv4s0mTTpdmSaynfUXC4SfPCuGD2fbeigSpexFB2ycCeXV9UAjuR86CC9udPhopgMsFLyD29VO2iJSg", stake = 1, da = true}, + + { stake_table_key = "BLS_VER_KEY~rO2PIjyY30HGfapFcloFe3mNDKMIFi6JlOLkH5ZWBSYoRm5fE2-Rm6Lp3EvmAcB5r7KFJ0c1Uor308x78r04EY_sfjcsDCWt7RSJdL4cJoD_4fSTCv_bisO8k98hs_8BtqQt8BHlPeJohpUXvcfnK8suXJETiJ6Er97pfxRbzgAL", state_ver_key = "SCHNORR_VER_KEY~le6RHdTasbBsTcbMqArt0XWFwfIJTY7RbUwaCvdxswL8LpXpO3eb86iyYUr63dtv4GGa5fIJaRH97nCd1lV9H8g", stake = 1, da = true }, + + { stake_table_key = "BLS_VER_KEY~r6b-Cwzp-b3czlt0MHmYPJIow5kMsXbrNmZsLSYg9RV49oCCO4WEeCRFR02x9bqLCa_sgNFMrIeNdEa11qNiBAohApYFIvrSa-zP5QGj3xbZaMOCrshxYit6E2TR-XsWvv6gjOrypmugjyTAth-iqQzTboSfmO9DD1-gjJIdCaD7", state_ver_key = "SCHNORR_VER_KEY~LfL6fFJQ8UZWR1Jro6LHtKm_y5-VQZBapO0XhcB8ABAmsVght9B8k7NntrgniffAMD8_OJ6Zjg8XUklhbb42CIw", stake = 1, da = true }, + + { stake_table_key = "BLS_VER_KEY~kEUEUJFBtCXl68fM_2roQw856wQlu1ZoDmPn8uu4bQgeZwyb5oz5_kMl-oAJ_OtbYV1serjWE--eXB_qYIpQLZka42-cML6WjCQjNl1hGSejtoBDkExNeUNcweFQBbEsaDiIy3-sgHTrfYpFd1icKeAVihLRn5_RtSU_RUu1TQqR", state_ver_key = "SCHNORR_VER_KEY~qKOggsQMNIIvmiIPM3smiGk40kYGXCVupsmgIrf3RgMmua683F3vUwzWcx0s7mxdzLXJwPAB06LD96cxip7JLJM", stake = 1, da = true }, + + { stake_table_key = "BLS_VER_KEY~96hAcdFZxQT8CEHcyV8j2ILJRsXagquENPkc9AwLSx3u6AE_uMupIKGbNJRiM99oFneK2vI5g1u61HidWeuTLRPM2537xAXeaO8e-wJYx4FaPKw_xTcLPrIm0OZT7SsLAMwFuqfMbDdKM71-RyrLwhff5517xXBKEk5Tg9iT9Qrr", state_ver_key = "SCHNORR_VER_KEY~y0nltwKyKSpwO3ki9Czu5asjAt5g1Ya3XmAywcerOSUg__FuZOcYq6tKxMsnsjE7ylpWLZv8R5W4-6WkP0DWI94", stake = 1, da = true }, + + { stake_table_key = "BLS_VER_KEY~-pVi7j6TEBeG7ABata4uWWDRM2SrY8wWotWsGnTpIhnOVYJI_lNWyig6VJUuFmBsMS8rLMU7nDxDm8SbObxyA-SLFcr_jCkZqsbx8GcVQrnBAfjNRWuPZP0xcTDMu2IkQqtc3L0OpzbMEgGRGE8Wj09pNqouzl-xhPoYjTmD06Bw", state_ver_key = "SCHNORR_VER_KEY~6rPZ_plXxp8Uoh-E8VPb37csDRLND66zAorA3crYOhf9ARJapk8151RRVXWHe5Q2uF_RmQQmOCAov6tIpJ4yHz0", stake = 1, da = true }, + + { stake_table_key = "BLS_VER_KEY~IUPSdnsNUHgNx_74ZhBPrICcDZ9Bp_DAt-6kFz8vSwJES2Vy1Ws8NJ1mxb9XGE1u13sw0FRe8kn5Ib3p2stbEtR_1Qgbuif6aoLrGaSUzy0MvwrO58u9kHZk3rXIuSAN7n4ok3-KKk2CmnBfx7fchFoqT56FXCd1EJ7XRrYj8wTh", state_ver_key = "SCHNORR_VER_KEY~qLqeTM1ZT1ecLEpzHwmlr-GeMUOest-kAm5nKOnKnB-W_TRj4IL77lnmamYvUdXR_ddQp24wQh2IlOIdp5jKEgw", stake = 1, da = true }, + + { stake_table_key = "BLS_VER_KEY~PAAQNgOYfj3GiVX7LxSlkXfOCDSnNKZDqPVYQ_jBMxKzOCn0PXbqQ62kKPenWOmCxiCE7X158s-VenBna6MjHJgf61eBAO-3-OyTP5NWVx49RTgHhQf2iMTKk2iqK2gjnjZimBU135YU4lQFtrG-ZgRezwqkC5vy8V-q46fschIG", state_ver_key = "SCHNORR_VER_KEY~APKGX39-mOmApq6jMdIEiuuyddJ_k8xFeIwU1Zs2zShH1rI--eZR180Us5vqNWmK3zAidScvVcW4bAsOMHB3LPg", stake = 1, da = true } ] -enable_registration_verification = true [config] num_nodes_with_stake = 10 diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 0464c1bd5f..b7b69a48df 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -5,7 +5,6 @@ // along with the HotShot repository. If not, see . use std::{ - collections::HashSet, env, fs, net::SocketAddr, num::NonZeroUsize, @@ -17,8 +16,8 @@ use std::{ use clap::ValueEnum; use hotshot_types::{ - constants::REQUEST_DATA_DELAY, traits::signature_key::SignatureKey, ExecutionType, - HotShotConfig, PeerConfig, ValidatorConfig, + constants::REQUEST_DATA_DELAY, light_client::StateVerKey, traits::signature_key::SignatureKey, + ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, }; use libp2p::{Multiaddr, PeerId}; use serde_inline_default::serde_inline_default; @@ -86,6 +85,20 @@ pub enum BuilderType { Random, } +/// Node PeerConfig keys +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +#[serde(bound(deserialize = ""))] +pub struct PeerConfigKeys { + /// The peer's public key + pub stake_table_key: KEY, + /// the peer's state public key + pub state_ver_key: StateVerKey, + /// the peer's stake + pub stake: u64, + /// whether the node is a DA node + pub da: bool, +} + /// Options controlling how the random builder generates blocks #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct RandomBuilderConfig { @@ -157,9 +170,7 @@ pub struct NetworkConfig { /// random builder config pub random_builder: Option, /// The list of public keys that are allowed to connect to the orchestrator - pub public_keys: HashSet, - /// Whether or not to disable registration verification. - pub enable_registration_verification: bool, + pub public_keys: Vec>, } /// the source of the network config @@ -391,8 +402,7 @@ impl Default for NetworkConfig { commit_sha: String::new(), builder: BuilderType::default(), random_builder: None, - public_keys: HashSet::new(), - enable_registration_verification: true, + public_keys: vec![], } } } @@ -443,11 +453,10 @@ pub struct NetworkConfigFile { #[serde(default)] pub random_builder: Option, /// The list of public keys that are allowed to connect to the orchestrator + /// + /// If nonempty, this list becomes the stake table and is used to determine DA membership (ignoring the node's request). #[serde(default)] - pub public_keys: HashSet, - /// Whether or not to disable registration verification. - #[serde(default)] - pub enable_registration_verification: bool, + pub public_keys: Vec>, } impl From> for NetworkConfig { @@ -480,7 +489,6 @@ impl From> for NetworkConfig { builder: val.builder, random_builder: val.random_builder, public_keys: val.public_keys, - enable_registration_verification: val.enable_registration_verification, } } } diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index b311ef3059..f6c83c9309 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -101,21 +101,35 @@ struct OrchestratorState { accepting_new_keys: bool, /// Builder address pool builders: Vec, + /// whether we are using a fixed stake table, disabling public key registration + fixed_stake_table: bool, } impl OrchestratorState { /// create a new [`OrchestratorState`] pub fn new(network_config: NetworkConfig) -> Self { + let mut peer_pub_ready = false; + let mut fixed_stake_table = false; + + if network_config.config.known_nodes_with_stake.is_empty() { + println!("No nodes were loaded from the config file. Nodes will be allowed to register dynamically."); + } else { + println!("Initializing orchestrator with fixed stake table."); + peer_pub_ready = true; + fixed_stake_table = true; + } + let builders = if matches!(network_config.builder, BuilderType::External) { network_config.config.builder_urls.clone().into() } else { vec![] }; + OrchestratorState { latest_index: 0, tmp_latest_index: 0, config: network_config, - peer_pub_ready: false, + peer_pub_ready, pub_posted: HashMap::new(), nodes_connected: 0, start: false, @@ -124,6 +138,7 @@ impl OrchestratorState { manual_start_allowed: true, accepting_new_keys: true, builders, + fixed_stake_table, } } @@ -246,70 +261,13 @@ pub trait OrchestratorApi { fn get_builders(&self) -> Result, ServerError>; } -impl OrchestratorApi for OrchestratorState +impl OrchestratorState where KEY: serde::Serialize + Clone + SignatureKey + 'static, { - /// Post an identity to the orchestrator. Takes in optional - /// arguments so others can identify us on the Libp2p network. - /// # Errors - /// If we were unable to serve the request - fn post_identity( - &mut self, - libp2p_address: Option, - libp2p_public_key: Option, - ) -> Result { - let node_index = self.latest_index; - self.latest_index += 1; - - if usize::from(node_index) >= self.config.config.num_nodes_with_stake.get() { - return Err(ServerError { - status: tide_disco::StatusCode::BAD_REQUEST, - message: "Network has reached capacity".to_string(), - }); - } - - // If the orchestrator is set up for libp2p and we have supplied the proper - // Libp2p data, add our node to the list of bootstrap nodes. - if self.config.libp2p_config.clone().is_some() { - if let (Some(libp2p_public_key), Some(libp2p_address)) = - (libp2p_public_key, libp2p_address) - { - // Push to our bootstrap nodes - self.config - .libp2p_config - .as_mut() - .unwrap() - .bootstrap_nodes - .push((libp2p_public_key, libp2p_address)); - } - } - Ok(node_index) - } - - // Assumes nodes will set their own index that they received from the - // 'identity' endpoint - fn post_getconfig(&mut self, _node_index: u16) -> Result, ServerError> { - Ok(self.config.clone()) - } - - // Assumes one node do not get twice - fn get_tmp_node_index(&mut self) -> Result { - let tmp_node_index = self.tmp_latest_index; - self.tmp_latest_index += 1; - - if usize::from(tmp_node_index) >= self.config.config.num_nodes_with_stake.get() { - return Err(ServerError { - status: tide_disco::StatusCode::BAD_REQUEST, - message: "Node index getter for key pair generation has reached capacity" - .to_string(), - }); - } - Ok(tmp_node_index) - } - - #[allow(clippy::cast_possible_truncation)] - fn register_public_key( + /// register a node with an unknown public key. + /// this method should be used when we don't have a fixed stake table + fn register_unknown( &mut self, pubkey: &mut Vec, da_requested: bool, @@ -334,19 +292,6 @@ where // Deserialize the public key let staked_pubkey = PeerConfig::::from_bytes(pubkey).unwrap(); - // Check if the node is allowed to connect - if self.config.enable_registration_verification - && !self - .config - .public_keys - .contains(&staked_pubkey.stake_table_entry.public_key()) - { - return Err(ServerError { - status: tide_disco::StatusCode::FORBIDDEN, - message: "You are unauthorized to register with the orchestrator".to_string(), - }); - } - self.config .config .known_nodes_with_stake @@ -400,6 +345,146 @@ where Ok((node_index, added_to_da)) } + /// register a node on the fixed stake table, which was loaded at startup + fn register_from_list( + &mut self, + pubkey: &mut Vec, + da_requested: bool, + libp2p_address: Option, + libp2p_public_key: Option, + ) -> Result<(u64, bool), ServerError> { + // if we've already registered this node before, we just retrieve its info from `pub_posted` + if let Some((node_index, is_da)) = self.pub_posted.get(pubkey) { + return Ok((*node_index, *is_da)); + } + + // Deserialize the public key + let staked_pubkey = PeerConfig::::from_bytes(pubkey).unwrap(); + + // Check if the node is allowed to connect, returning its index and config entry if so. + let Some((node_index, node_config)) = + self.config.public_keys.iter().enumerate().find(|keys| { + keys.1.stake_table_key == staked_pubkey.stake_table_entry.public_key() + }) + else { + return Err(ServerError { + status: tide_disco::StatusCode::FORBIDDEN, + message: "You are unauthorized to register with the orchestrator".to_string(), + }); + }; + + // Check that our recorded DA status for the node matches what the node actually requested + if node_config.da != da_requested { + return Err(ServerError { + status: tide_disco::StatusCode::BAD_REQUEST, + message: format!("Mismatch in DA status in registration for node {}. DA requested: {}, expected: {}", node_index, da_requested, node_config.da), + }); + } + + let added_to_da = node_config.da; + + self.pub_posted + .insert(pubkey.clone(), (node_index as u64, added_to_da)); + + // If the orchestrator is set up for libp2p and we have supplied the proper + // Libp2p data, add our node to the list of bootstrap nodes. + if self.config.libp2p_config.clone().is_some() { + if let (Some(libp2p_public_key), Some(libp2p_address)) = + (libp2p_public_key, libp2p_address) + { + // Push to our bootstrap nodes + self.config + .libp2p_config + .as_mut() + .unwrap() + .bootstrap_nodes + .push((libp2p_public_key, libp2p_address)); + } + } + + tracing::error!("Node {node_index} has registered."); + + Ok((node_index as u64, added_to_da)) + } +} + +impl OrchestratorApi for OrchestratorState +where + KEY: serde::Serialize + Clone + SignatureKey + 'static, +{ + /// Post an identity to the orchestrator. Takes in optional + /// arguments so others can identify us on the Libp2p network. + /// # Errors + /// If we were unable to serve the request + fn post_identity( + &mut self, + libp2p_address: Option, + libp2p_public_key: Option, + ) -> Result { + let node_index = self.latest_index; + self.latest_index += 1; + + if usize::from(node_index) >= self.config.config.num_nodes_with_stake.get() { + return Err(ServerError { + status: tide_disco::StatusCode::BAD_REQUEST, + message: "Network has reached capacity".to_string(), + }); + } + + // If the orchestrator is set up for libp2p and we have supplied the proper + // Libp2p data, add our node to the list of bootstrap nodes. + if self.config.libp2p_config.clone().is_some() { + if let (Some(libp2p_public_key), Some(libp2p_address)) = + (libp2p_public_key, libp2p_address) + { + // Push to our bootstrap nodes + self.config + .libp2p_config + .as_mut() + .unwrap() + .bootstrap_nodes + .push((libp2p_public_key, libp2p_address)); + } + } + Ok(node_index) + } + + // Assumes nodes will set their own index that they received from the + // 'identity' endpoint + fn post_getconfig(&mut self, _node_index: u16) -> Result, ServerError> { + Ok(self.config.clone()) + } + + // Assumes one node do not get twice + fn get_tmp_node_index(&mut self) -> Result { + let tmp_node_index = self.tmp_latest_index; + self.tmp_latest_index += 1; + + if usize::from(tmp_node_index) >= self.config.config.num_nodes_with_stake.get() { + return Err(ServerError { + status: tide_disco::StatusCode::BAD_REQUEST, + message: "Node index getter for key pair generation has reached capacity" + .to_string(), + }); + } + Ok(tmp_node_index) + } + + #[allow(clippy::cast_possible_truncation)] + fn register_public_key( + &mut self, + pubkey: &mut Vec, + da_requested: bool, + libp2p_address: Option, + libp2p_public_key: Option, + ) -> Result<(u64, bool), ServerError> { + if self.fixed_stake_table { + self.register_from_list(pubkey, da_requested, libp2p_address, libp2p_public_key) + } else { + self.register_unknown(pubkey, da_requested, libp2p_address, libp2p_public_key) + } + } + fn peer_pub_ready(&self) -> Result { if !self.peer_pub_ready { return Err(ServerError { @@ -768,12 +853,24 @@ where network_config.manual_start_password = env_password.ok(); } - network_config.config.known_nodes_with_stake = vec![]; - network_config.config.known_da_nodes = vec![]; - - if !network_config.enable_registration_verification { - tracing::error!("REGISTRATION VERIFICATION IS TURNED OFF"); - } + network_config.config.known_nodes_with_stake = network_config + .public_keys + .iter() + .map(|keys| PeerConfig { + stake_table_entry: keys.stake_table_key.stake_table_entry(keys.stake), + state_ver_key: keys.state_ver_key.clone(), + }) + .collect(); + + network_config.config.known_da_nodes = network_config + .public_keys + .iter() + .filter(|keys| keys.da) + .map(|keys| PeerConfig { + stake_table_entry: keys.stake_table_key.stake_table_entry(keys.stake), + state_ver_key: keys.state_ver_key.clone(), + }) + .collect(); let web_api = define_api().map_err(|_e| io::Error::new(ErrorKind::Other, "Failed to define api")); From 967dfd5802b6e2cabb4303acfe7cb33ea576e546 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 23 Sep 2024 10:40:00 -0400 Subject: [PATCH 1217/1393] Include details in error message for header construction (#3690) --- task-impls/src/quorum_proposal/handlers.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 867b6298a9..e53a618320 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -362,7 +362,7 @@ impl HandleDepOutput for ProposalDependencyHandle< ) .await { - error!("Failed to publish proposal; error = {e}"); + error!("Failed to publish proposal; error = {e:#}"); } } } From 8a89c5a4a18d3d37266023af94fab2ab384553b5 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 23 Sep 2024 11:04:03 -0400 Subject: [PATCH 1218/1393] Lower threshold for marketplace test (#3692) --- testing/tests/tests_3/test_marketplace.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/tests/tests_3/test_marketplace.rs b/testing/tests/tests_3/test_marketplace.rs index 2d97535427..eb0738ffc7 100644 --- a/testing/tests/tests_3/test_marketplace.rs +++ b/testing/tests/tests_3/test_marketplace.rs @@ -73,7 +73,7 @@ cross_tests!( ); // Test marketplace with both regular builders down but solver + fallback builder up -// Requires 90% nonempty blocks +// Requires 80% nonempty blocks cross_tests!( TestName: test_marketplace_builders_down, Impls: [MemoryImpl], @@ -96,7 +96,7 @@ cross_tests!( changes: HashMap::from([(0, BuilderChange::Down)]) } ], - validate_transactions: nonempty_block_threshold((90,100)), + validate_transactions: nonempty_block_threshold((40,50)), ..TestDescription::default() } }, @@ -128,7 +128,7 @@ cross_tests!( BuilderDescription { changes: HashMap::from([(0, BuilderChange::Down)]) }, - validate_transactions: nonempty_block_threshold((80,100)), + validate_transactions: nonempty_block_threshold((40,50)), ..TestDescription::default() } }, From 191171942c12b691ba46da8b3c080cca8f94f564 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 23 Sep 2024 12:25:47 -0400 Subject: [PATCH 1219/1393] Fix double counting nodes in `post_ready` (#3694) --- orchestrator/src/lib.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index f6c83c9309..ba9d31e463 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -12,7 +12,7 @@ pub mod client; pub mod config; use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, fs::OpenOptions, io::{self, ErrorKind}, time::Duration, @@ -90,7 +90,7 @@ struct OrchestratorState { /// Will be set to true once all nodes post they are ready to start start: bool, /// The total nodes that have posted they are ready to start - nodes_connected: u64, + nodes_connected: HashSet>, /// The results of the benchmarks bench_results: BenchResults, /// The number of nodes that have posted their results @@ -131,7 +131,7 @@ impl OrchestratorState { config: network_config, peer_pub_ready, pub_posted: HashMap::new(), - nodes_connected: 0, + nodes_connected: HashSet::new(), start: false, bench_results: BenchResults::default(), nodes_post_results: 0, @@ -534,12 +534,16 @@ where }); } - self.nodes_connected += 1; - - tracing::error!("Nodes connected: {}", self.nodes_connected); + // `HashSet::insert()` returns whether the node was newly inserted (true) or not + if self.nodes_connected.insert(peer_config.clone()) { + tracing::error!( + "Node {peer_config} connected. Total nodes connected: {}", + self.nodes_connected.len() + ); + } // i.e. nodes_connected >= num_nodes_with_stake * (start_threshold.0 / start_threshold.1) - if self.nodes_connected * self.config.config.start_threshold.1 + if self.nodes_connected.len() as u64 * self.config.config.start_threshold.1 >= (self.config.config.num_nodes_with_stake.get() as u64) * self.config.config.start_threshold.0 { From d2819edd1475ac88d68b25af75537f4f65d5385f Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Mon, 23 Sep 2024 12:47:33 -0600 Subject: [PATCH 1220/1393] remove generic-array (#3696) --- types/Cargo.toml | 4 ---- types/src/qc.rs | 2 +- types/src/signature_key.rs | 2 +- types/src/traits/qc.rs | 2 +- 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/types/Cargo.toml b/types/Cargo.toml index cb0f1faede..652c035e89 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -29,10 +29,6 @@ futures = { workspace = true } cdn-proto = { workspace = true } reqwest = { workspace = true } -generic-array = { workspace = true } - -# TODO generic-array should not be a direct dependency -# https://github.com/EspressoSystems/HotShot/issues/1850 lazy_static = { workspace = true } memoize = { workspace = true } rand = { workspace = true } diff --git a/types/src/qc.rs b/types/src/qc.rs index 18a9739347..2f556159a2 100644 --- a/types/src/qc.rs +++ b/types/src/qc.rs @@ -16,8 +16,8 @@ use ark_std::{ vec::Vec, }; use bitvec::prelude::*; +use digest::generic_array::GenericArray; use ethereum_types::U256; -use generic_array::GenericArray; use jf_signature::{AggregateableSignatureSchemes, SignatureError}; use serde::{Deserialize, Serialize}; use typenum::U32; diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index 562e77cb06..2741912f14 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -8,8 +8,8 @@ use ark_serialize::SerializationError; use bitvec::{slice::BitSlice, vec::BitVec}; +use digest::generic_array::GenericArray; use ethereum_types::U256; -use generic_array::GenericArray; use jf_signature::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair, SignKey, VerKey}, SignatureError, SignatureScheme, diff --git a/types/src/traits/qc.rs b/types/src/traits/qc.rs index 439b5bcc89..1f907ed59c 100644 --- a/types/src/traits/qc.rs +++ b/types/src/traits/qc.rs @@ -12,7 +12,7 @@ use ark_std::{ vec::Vec, }; use bitvec::prelude::*; -use generic_array::{ArrayLength, GenericArray}; +use digest::generic_array::{ArrayLength, GenericArray}; use jf_signature::{AggregateableSignatureSchemes, SignatureError}; use serde::{Deserialize, Serialize}; From 0481638cfe081deb7578f6d93aec9a3b1b034f94 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 24 Sep 2024 08:22:54 -0600 Subject: [PATCH 1221/1393] add configurable network config (#3698) --- hotshot/src/traits/networking/libp2p_network.rs | 2 +- libp2p-networking/src/network/node/config.rs | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index e75e601235..39f5965751 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -553,7 +553,7 @@ impl Libp2pNetwork { is_ready: Arc::new(AtomicBool::new(false)), // This is optimal for 10-30 nodes. TODO: parameterize this for both tests and examples // https://github.com/EspressoSystems/HotShot/issues/2088 - dht_timeout: Duration::from_secs(120), + dht_timeout: config.dht_timeout.unwrap_or(Duration::from_secs(120)), is_bootstrapped: Arc::new(AtomicBool::new(false)), metrics, subscribed_topics, diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 838bee0f97..e7eb748342 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -51,6 +51,10 @@ pub struct NetworkNodeConfig { /// If not supplied we will not send an authentication message during the handshake #[builder(default)] pub auth_message: Option>, + + #[builder(default)] + /// The timeout for DHT lookups. + pub dht_timeout: Option, } /// Configuration for Libp2p's Gossipsub From 7b9408413967a72a8fc05cfaa0a43ce2929b9339 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 24 Sep 2024 11:16:27 -0400 Subject: [PATCH 1222/1393] Fix commitment in all versions (#3699) --- example-types/src/node_types.rs | 55 +-------------------------------- types/src/data.rs | 27 +++------------- types/src/simple_vote.rs | 16 +++------- 3 files changed, 11 insertions(+), 87 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index c5e5c6a3a9..ec29f2a3c6 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -183,7 +183,7 @@ mod tests { }; use serde::{Deserialize, Serialize}; - use crate::node_types::{MarketplaceTestVersions, NodeType, TestTypes, TestVersions}; + use crate::node_types::{MarketplaceTestVersions, NodeType, TestTypes}; #[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Hash, Eq)] /// Dummy data used for test struct TestData { @@ -198,24 +198,6 @@ mod tests { } } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_versioned_commitment() { - let view = ::Time::new(0); - let upgrade_lock = UpgradeLock::new(); - - let data = TestData { data: 10 }; - let data_commitment: [u8; 32] = data.commit().into(); - - let versioned_data = - VersionedVoteData::::new(data, view, &upgrade_lock) - .await - .unwrap(); - let versioned_data_commitment: [u8; 32] = versioned_data.commit().into(); - - assert_eq!(versioned_data_commitment, data_commitment); - } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Test that the view number affects the commitment post-marketplace @@ -252,39 +234,4 @@ mod tests { "left: {versioned_data_commitment_0:?}, right: {versioned_data_commitment_1:?}" ); } - - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - /// Test that the view number does not affect the commitment pre-marketplace - async fn test_versioned_commitment_excludes_view() { - let upgrade_lock = UpgradeLock::new(); - - let data = TestData { data: 10 }; - - let view_0 = ::Time::new(0); - let view_1 = ::Time::new(1); - - let versioned_data_0 = VersionedVoteData::::new( - data, - view_0, - &upgrade_lock, - ) - .await - .unwrap(); - let versioned_data_1 = VersionedVoteData::::new( - data, - view_1, - &upgrade_lock, - ) - .await - .unwrap(); - - let versioned_data_commitment_0: [u8; 32] = versioned_data_0.commit().into(); - let versioned_data_commitment_1: [u8; 32] = versioned_data_1.commit().into(); - - assert!( - versioned_data_commitment_0 == versioned_data_commitment_1, - "left: {versioned_data_commitment_0:?}, right: {versioned_data_commitment_1:?}" - ); - } } diff --git a/types/src/data.rs b/types/src/data.rs index 88071936f7..ca8f746008 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -32,7 +32,6 @@ use snafu::Snafu; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; use tracing::error; -use vbs::version::StaticVersionType; use vec1::Vec1; use crate::{ @@ -446,25 +445,14 @@ pub struct Leaf { } impl Leaf { + #[allow(clippy::unused_async)] /// Calculate the leaf commitment, /// which is gated on the version to include the block header. pub async fn commit( &self, - upgrade_lock: &UpgradeLock, + _upgrade_lock: &UpgradeLock, ) -> Commitment { - let version = upgrade_lock.version_infallible(self.view_number).await; - - if version < V::Marketplace::VERSION { - ::commit(self) - } else { - RawCommitmentBuilder::new("leaf commitment") - .u64_field("view number", *self.view_number) - .field("parent leaf commitment", self.parent_commitment) - .field("block header", self.block_header.commit()) - .field("justify qc", self.justify_qc.commit()) - .optional("upgrade certificate", &self.upgrade_certificate) - .finalize() - } + ::commit(self) } } @@ -791,15 +779,10 @@ pub fn serialize_signature2( impl Committable for Leaf { fn commit(&self) -> committable::Commitment { - // Skip the transaction commitments, so that the repliacs can reconstruct the leaf. RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) - .u64_field("block number", self.height()) - .field("parent Leaf commitment", self.parent_commitment) - .var_size_field( - "block payload commitment", - self.payload_commitment().as_ref(), - ) + .field("parent leaf commitment", self.parent_commitment) + .field("block header", self.block_header.commit()) .field("justify qc", self.justify_qc.commit()) .optional("upgrade certificate", &self.upgrade_certificate) .finalize() diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index a86452b9a9..70ff7f6a1c 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -11,7 +11,7 @@ use std::{fmt::Debug, hash::Hash, marker::PhantomData}; use anyhow::Result; use committable::{Commitment, Committable}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use vbs::version::{StaticVersionType, Version}; +use vbs::version::Version; use crate::{ data::Leaf, @@ -240,16 +240,10 @@ impl Committable for VersionedVoteData { fn commit(&self) -> Commitment { - if self.version < V::Marketplace::VERSION { - let bytes: [u8; 32] = self.data.commit().into(); - - Commitment::::from_raw(bytes) - } else { - committable::RawCommitmentBuilder::new("Vote") - .var_size_bytes(self.data.commit().as_ref()) - .u64(*self.view) - .finalize() - } + committable::RawCommitmentBuilder::new("Vote") + .var_size_bytes(self.data.commit().as_ref()) + .u64(*self.view) + .finalize() } } From c3f99fbd2da82cf2b87743bd918ed7fd62a9a9c8 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 24 Sep 2024 14:16:15 -0400 Subject: [PATCH 1223/1393] load orchestrator config from file (#3702) --- orchestrator/src/config.rs | 12 ++++++++++++ orchestrator/src/lib.rs | 21 ++++++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index b7b69a48df..6316f1c9cb 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -407,6 +407,18 @@ impl Default for NetworkConfig { } } +/// a network config stored in a file +#[serde_inline_default] +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] +#[serde(bound(deserialize = ""))] +pub struct PublicKeysFile { + /// The list of public keys that are allowed to connect to the orchestrator + /// + /// If nonempty, this list becomes the stake table and is used to determine DA membership (ignoring the node's request). + #[serde(default)] + pub public_keys: Vec>, +} + /// a network config stored in a file #[serde_inline_default] #[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index ba9d31e463..397f797734 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -13,6 +13,7 @@ pub mod config; use std::{ collections::{HashMap, HashSet}, + fs, fs::OpenOptions, io::{self, ErrorKind}, time::Duration, @@ -46,7 +47,7 @@ use vbs::{ BinarySerializer, }; -use crate::config::NetworkConfig; +use crate::config::{NetworkConfig, PublicKeysFile}; /// Orchestrator is not, strictly speaking, bound to the network; it can have its own versioning. /// Orchestrator Version (major) @@ -857,6 +858,24 @@ where network_config.manual_start_password = env_password.ok(); } + // Try to overwrite the network_config public keys + // from the file the env var points to, or panic. + { + let env_public_keys = std::env::var("ORCHESTRATOR_PUBLIC_KEYS"); + + if let Ok(filepath) = env_public_keys { + #[allow(clippy::panic)] + let config_file_as_string: String = fs::read_to_string(filepath.clone()) + .unwrap_or_else(|_| panic!("Could not read config file located at {filepath}")); + + let file: PublicKeysFile = + toml::from_str::>(&config_file_as_string) + .expect("Unable to convert config file to TOML"); + + network_config.public_keys = file.public_keys; + } + } + network_config.config.known_nodes_with_stake = network_config .public_keys .iter() From efd7c48c0ace16aa701a887881f9f064d10b1b40 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 24 Sep 2024 20:39:12 -0600 Subject: [PATCH 1224/1393] [CX_HARDENING] Evaluate and Remove Issue Links in HotShot (#3701) --- hotshot/src/lib.rs | 2 - .../src/traits/networking/libp2p_network.rs | 1 - libp2p-networking/README.md | 2 +- libp2p-networking/src/network/node.rs | 4 - task-impls/src/consensus/handlers.rs | 90 ++++++------------- task-impls/src/consensus/mod.rs | 5 +- task-impls/src/helpers.rs | 6 +- task-impls/src/network.rs | 7 +- task-impls/src/view_sync.rs | 3 - testing/src/helpers.rs | 8 +- testing/src/test_builder.rs | 4 - testing/tests/tests_1/test_with_failures_2.rs | 2 - .../tests/tests_2/test_with_failures_one.rs | 2 - .../tests_3/test_with_failures_half_f.rs | 2 - testing/tests/tests_4/test_with_failures_f.rs | 2 - testing/tests/tests_5/timeout.rs | 6 -- types/src/data.rs | 42 --------- types/src/error.rs | 2 - types/src/message.rs | 1 - types/src/simple_certificate.rs | 21 ----- types/src/vid.rs | 10 ++- 21 files changed, 46 insertions(+), 176 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 2fd7cde872..4c5e40a0b5 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -436,8 +436,6 @@ impl, V: Versions> SystemContext) { debug!(?event, "send_external_event"); broadcast_event(event, &self.external_event_stream.0).await; diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 39f5965751..fbb5f32c67 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -552,7 +552,6 @@ impl Libp2pNetwork { bootstrap_addrs, is_ready: Arc::new(AtomicBool::new(false)), // This is optimal for 10-30 nodes. TODO: parameterize this for both tests and examples - // https://github.com/EspressoSystems/HotShot/issues/2088 dht_timeout: config.dht_timeout.unwrap_or(Duration::from_secs(120)), is_bootstrapped: Arc::new(AtomicBool::new(false)), metrics, diff --git a/libp2p-networking/README.md b/libp2p-networking/README.md index b6f7b86bff..62a190398b 100644 --- a/libp2p-networking/README.md +++ b/libp2p-networking/README.md @@ -45,7 +45,7 @@ spawns off five integration tests. - One that intersperses both broadcast and increments. - Two that publishes entries to the DHT and checks that other nodes can access these entries. -This can fail on MacOS (and linux) due to ["too many open files."](https://github.com/EspressoSystems/hotshot-networking-demo/issues/18) The fix is: +This can fail on MacOS (and linux) due to "too many open files." The fix is: ```bash ulimit -n 4096 diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index c90d1f1fc3..47e6e6d5e3 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -222,10 +222,6 @@ impl NetworkNode { // - Build a gossipsub network behavior let gossipsub: Gossipsub = Gossipsub::new( - // TODO do we even need this? - // - // if messages are signed at the the consensus level AND the network - // level (noise), this feels redundant. MessageAuthenticity::Signed(keypair.clone()), gossipsub_config, ) diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 9ef7f69702..a2c4e6170c 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -218,14 +218,16 @@ pub async fn publish_proposal_from_commitment_and_metadata July, 2024 and this is still here, something has gone horribly wrong. - let cnm = commitment_and_metadata - .clone() - .context("Cannot propose because we don't have the VID payload commitment and metadata")?; + ensure!( + commitment_and_metadata.is_some(), + "Cannot propose because we don't have the VID payload commitment and metadata" + ); + + // This is a safe unwrap due to the prior ensure call. + let commitment_and_metadata = commitment_and_metadata.unwrap(); ensure!( - cnm.block_view == view, + commitment_and_metadata.block_view == view, "Cannot propose because our VID payload commitment and metadata is for an older view." ); @@ -236,7 +238,7 @@ pub async fn publish_proposal_from_commitment_and_metadata( - view: TYPES::Time, - sender: Sender>>, - receiver: Receiver>>, - quorum_membership: Arc, - public_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: OuterConsensus, - delay: u64, - formed_upgrade_certificate: Option>, - upgrade_lock: UpgradeLock, - commitment_and_metadata: Option>, - proposal_cert: Option>, - instance_state: Arc, - id: u64, -) -> Result> { - publish_proposal_from_commitment_and_metadata( - view, - sender, - receiver, - quorum_membership, - public_key, - private_key, - consensus, - delay, - formed_upgrade_certificate, - upgrade_lock, - commitment_and_metadata, - proposal_cert, - instance_state, - id, - ) - .await -} - /// Handle the received quorum proposal. /// /// Returns the proposal that should be used to set the `cur_proposal` for other tasks. @@ -490,23 +453,24 @@ pub(crate) async fn handle_quorum_proposal_recv< "Attempting to publish proposal after voting for liveness; now in view: {}", *new_view ); - let create_and_send_proposal_handle = publish_proposal_if_able( - qc.view_number + 1, - event_sender, - event_receiver, - Arc::clone(&task_state.quorum_membership), - task_state.public_key.clone(), - task_state.private_key.clone(), - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - task_state.round_start_delay, - task_state.formed_upgrade_certificate.clone(), - task_state.upgrade_lock.clone(), - task_state.payload_commitment_and_metadata.clone(), - task_state.proposal_cert.clone(), - Arc::clone(&task_state.instance_state), - task_state.id, - ) - .await?; + let create_and_send_proposal_handle = + publish_proposal_from_commitment_and_metadata( + qc.view_number + 1, + event_sender, + event_receiver, + Arc::clone(&task_state.quorum_membership), + task_state.public_key.clone(), + task_state.private_key.clone(), + OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + task_state.round_start_delay, + task_state.formed_upgrade_certificate.clone(), + task_state.upgrade_lock.clone(), + task_state.payload_commitment_and_metadata.clone(), + task_state.proposal_cert.clone(), + Arc::clone(&task_state.instance_state), + task_state.id, + ) + .await?; task_state .spawned_tasks diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index c4b7291bc3..8770de2af4 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -14,6 +14,7 @@ use async_lock::RwLock; use async_std::task::JoinHandle; use async_trait::async_trait; use futures::future::join_all; +use handlers::publish_proposal_from_commitment_and_metadata; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, @@ -38,7 +39,7 @@ use tracing::{debug, error, info, instrument, warn}; use crate::{ consensus::handlers::{ - handle_quorum_proposal_recv, handle_quorum_proposal_validated, publish_proposal_if_able, + handle_quorum_proposal_recv, handle_quorum_proposal_validated, update_state_and_vote_if_able, VoteInfo, }, events::HotShotEvent, @@ -191,7 +192,7 @@ impl, V: Versions> ConsensusTaskSt event_sender: Sender>>, event_receiver: Receiver>>, ) -> Result<()> { - let create_and_send_proposal_handle = publish_proposal_if_able( + let create_and_send_proposal_handle = publish_proposal_from_commitment_and_metadata( view, event_sender, event_receiver, diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 67f78ac670..1c266029c9 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -39,7 +39,7 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; +use tracing::{debug, info, instrument, warn}; use crate::{events::HotShotEvent, request::REQUEST_TIMEOUT}; @@ -699,9 +699,7 @@ pub(crate) async fn update_view( debug!("Updating view from {} to {}", *old_view, *new_view); if *old_view / 100 != *new_view / 100 { - // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): - // switch to info! when INFO logs become less cluttered - error!("Progress: entered view {:>6}", *new_view); + info!("Progress: entered view {:>6}", *new_view); } *cur_view = new_view; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 40597b75b2..6894f0efb0 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -170,9 +170,6 @@ impl NetworkMessageTaskState { } }, }; - // TODO (Keyao benchmarking) Update these event variants (similar to the - // `TransactionsRecv` event) so we can send one event for a vector of messages. - // broadcast_event(Arc::new(event), &self.internal_event_stream).await; } @@ -298,7 +295,7 @@ impl< sender: sender.clone(), kind: MessageKind::::from_consensus_message(SequencingMessage::Da( DaConsensusMessage::VidDisperseMsg(proposal), - )), // TODO not a DaConsensusMessage https://github.com/EspressoSystems/HotShot/issues/1696 + )), }; let serialized_message = match self.upgrade_lock.serialize(&message).await { Ok(serialized) => serialized, @@ -363,7 +360,7 @@ impl< /// which will be used to create a message and transmit on the wire. /// Returns `None` if the parsing result should not be sent on the wire. /// Handles the `VidDisperseSend` event separately using a helper method. - #[allow(clippy::too_many_lines)] // TODO https://github.com/EspressoSystems/HotShot/issues/1704 + #[allow(clippy::too_many_lines)] async fn parse_event( &mut self, event: Arc>, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 134766283b..0b8452a2f1 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -300,7 +300,6 @@ impl, V: Versions> ViewSyncTaskSta // We do not have a relay task already running, so start one if self.membership.leader(vote_view + relay) != self.public_key { - // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); return; } @@ -339,7 +338,6 @@ impl, V: Versions> ViewSyncTaskSta // We do not have a relay task already running, so start one if self.membership.leader(vote_view + relay) != self.public_key { - // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); return; } @@ -378,7 +376,6 @@ impl, V: Versions> ViewSyncTaskSta // We do not have a relay task already running, so start one if self.membership.leader(vote_view + relay) != self.public_key { - // TODO ED This will occur because everyone is pulling down votes for now. Will be fixed in `https://github.com/EspressoSystems/HotShot/issues/1471` debug!("View sync vote sent to wrong leader"); return; } diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index a5a992784c..5c41e58380 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -38,7 +38,7 @@ use hotshot_types::{ node_implementation::{NodeType, Versions}, }, utils::{View, ViewInner}, - vid::{vid_scheme, VidCommitment, VidSchemeType}, + vid::{vid_scheme, VidCommitment, VidProposal, VidSchemeType}, vote::{Certificate, HasViewNumber, Vote}, }; use jf_vid::VidScheme; @@ -291,16 +291,12 @@ pub fn build_payload_commitment( } /// TODO: -#[allow(clippy::type_complexity)] pub fn build_vid_proposal( quorum_membership: &::Membership, view_number: TYPES::Time, transactions: Vec, private_key: &::PrivateKey, -) -> ( - Proposal>, - Vec>>, -) { +) -> VidProposal { let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); let encoded_transactions = TestTransaction::encode(&transactions); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index df05b8414f..ea14c428be 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -335,8 +335,6 @@ impl, V: Versions> TestDescription pub fn default_multiple_rounds() -> Self { let num_nodes_with_stake = 10; TestDescription:: { - // TODO: remove once we have fixed the DHT timeout issue - // https://github.com/EspressoSystems/HotShot/issues/2088 num_bootstrap_nodes: num_nodes_with_stake, num_nodes_with_stake, start_nodes: num_nodes_with_stake, @@ -371,8 +369,6 @@ impl, V: Versions> TestDescription // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. - // TODO: Update message broadcasting to avoid hanging - // da_staked_committee_size: 14, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index 8573af51f5..c922b7fd6e 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -46,8 +46,6 @@ cross_tests!( // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. - // TODO: Update message broadcasting to avoid hanging - // let dead_nodes = vec![ ChangeNode { idx: 10, diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_2/test_with_failures_one.rs index c540c9cbfc..4fc96e5564 100644 --- a/testing/tests/tests_2/test_with_failures_one.rs +++ b/testing/tests/tests_2/test_with_failures_one.rs @@ -28,8 +28,6 @@ cross_tests!( // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. - // TODO: Update message broadcasting to avoid hanging - // let dead_nodes = vec![ChangeNode { idx: 19, updown: NodeAction::Down, diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index 8b1eb531a2..797aa77cab 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -27,8 +27,6 @@ cross_tests!( // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. - // TODO: Update message broadcasting to avoid hanging - // let dead_nodes = vec![ ChangeNode { idx: 17, diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 931d7eaf5e..6ea256fc2d 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -30,8 +30,6 @@ cross_tests!( // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. - // TODO: Update message broadcasting to avoid hanging - // let dead_nodes = vec![ ChangeNode { idx: 14, diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 2269d5000d..791e9551bd 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -7,8 +7,6 @@ #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] -// TODO Add memory network tests after this issue is finished: -// https://github.com/EspressoSystems/HotShot/issues/1790 async fn test_timeout() { use std::time::Duration; @@ -56,8 +54,6 @@ async fn test_timeout() { }, ); - // TODO ED Test with memory network once issue is resolved - // https://github.com/EspressoSystems/HotShot/issues/1790 metadata .gen_launcher(0) .launch() @@ -120,8 +116,6 @@ async fn test_timeout_libp2p() { }, ); - // TODO ED Test with memory network once issue is resolved - // https://github.com/EspressoSystems/HotShot/issues/1790 metadata .gen_launcher(0) .launch() diff --git a/types/src/data.rs b/types/src/data.rs index ca8f746008..a51b512ae4 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -638,48 +638,6 @@ impl Leaf { self.block_header().payload_commitment() } - // TODO: Replace this function with `extends_upgrade` after the following issue is done: - // https://github.com/EspressoSystems/HotShot/issues/3357. - /// Validate that a leaf has the right upgrade certificate to be the immediate child of another leaf - /// - /// This may not be a complete function. Please double-check that it performs the checks you expect before subtituting validation logic with it. - /// - /// # Errors - /// Returns an error if the certificates are not identical, or that when we no longer see a - /// cert, it's for the right reason. - pub fn temp_extends_upgrade( - &self, - parent: &Self, - decided_upgrade_certificate: &Option>, - ) -> Result<()> { - match (self.upgrade_certificate(), parent.upgrade_certificate()) { - // Easiest cases are: - // - no upgrade certificate on either: this is the most common case, and is always fine. - // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. - (None | Some(_), None) => {} - // If we no longer see a cert, we have to make sure that we either: - // - no longer care because we have passed new_version_first_view, or - // - no longer care because we have passed `decide_by` without deciding the certificate. - (None, Some(parent_cert)) => { - ensure!(self.view_number() > parent_cert.data.new_version_first_view - || (self.view_number() > parent_cert.data.decide_by && decided_upgrade_certificate.is_none()), - "The new leaf is missing an upgrade certificate that was present in its parent, and should still be live." - ); - } - // If we both have a certificate, they should be identical. - // Technically, this prevents us from initiating a new upgrade in the view immediately following an upgrade. - // I think this is a fairly lax restriction. - (Some(cert), Some(parent_cert)) => { - ensure!(cert == parent_cert, "The new leaf does not extend the parent leaf, because it has attached a different upgrade certificate."); - } - } - - // This check should be added once we sort out the genesis leaf/justify_qc issue. - // ensure!(self.parent_commitment() == parent_leaf.commit(), "The commitment of the parent leaf does not match the specified parent commitment."); - - Ok(()) - } - /// Validate that a leaf has the right upgrade certificate to be the immediate child of another leaf /// /// This may not be a complete function. Please double-check that it performs the checks you expect before subtituting validation logic with it. diff --git a/types/src/error.rs b/types/src/error.rs index 5127f1d68c..a73b0e1a88 100644 --- a/types/src/error.rs +++ b/types/src/error.rs @@ -82,8 +82,6 @@ pub enum HotShotError { threshold: NonZeroU64, }, /// Miscellaneous error - /// TODO fix this with - /// #181 Misc { /// source of error context: String, diff --git a/types/src/message.rs b/types/src/message.rs index 303e1d7073..70a73cec5f 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -218,7 +218,6 @@ pub enum DaConsensusMessage { /// Initiate VID dispersal. /// /// Like [`DaProposal`]. Use `Msg` suffix to distinguish from `VidDisperse`. - /// TODO this variant should not be a [`DaConsensusMessage`] because VidDisperseMsg(Proposal>), } diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 5d87393ea7..f948e3a20b 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -198,27 +198,6 @@ impl Display for QuorumCertificate { } impl UpgradeCertificate { - // TODO: Replace this function with `is_relevant` after the following issue is done: - // https://github.com/EspressoSystems/HotShot/issues/3357. - /// Determines whether or not a certificate is relevant (i.e. we still have time to reach a - /// decide) - /// - /// # Errors - /// Returns an error when the certificate is no longer relevant - pub fn temp_is_relevant( - &self, - view_number: TYPES::Time, - decided_upgrade_certificate: Option, - ) -> Result<()> { - ensure!( - self.data.decide_by >= view_number - || decided_upgrade_certificate.is_some_and(|cert| cert == *self), - "Upgrade certificate is no longer relevant." - ); - - Ok(()) - } - /// Determines whether or not a certificate is relevant (i.e. we still have time to reach a /// decide) /// diff --git a/types/src/vid.rs b/types/src/vid.rs index 1d8afe2091..07f5b2cb1d 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -34,7 +34,10 @@ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use sha2::Sha256; -use crate::constants::SRS_DEGREE; +use crate::{ + constants::SRS_DEGREE, data::VidDisperse as HotShotVidDisperse, data::VidDisperseShare, + message::Proposal, +}; /// VID scheme constructor. /// @@ -104,6 +107,11 @@ pub type VidCommon = ::Common; pub type VidShare = ::Share; /// VID PrecomputeData type pub type VidPrecomputeData = ::PrecomputeData; +/// VID proposal type +pub type VidProposal = ( + Proposal>, + Vec>>, +); #[cfg(not(feature = "gpu-vid"))] /// Internal Jellyfish VID scheme From 251b8df17cdfb1ba308cab63bc17a75ceabf825d Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:11:48 -0400 Subject: [PATCH 1225/1393] Store upgrade certificate on decide and load on restart (#3679) --- example-types/src/storage_types.rs | 15 ++++++++++++++- hotshot/src/lib.rs | 10 ++++++++-- hotshot/src/tasks/mod.rs | 17 +++++++++++++++++ task-impls/src/consensus/handlers.rs | 7 +++++++ task-impls/src/quorum_vote/handlers.rs | 12 +++++++++++- task-impls/src/upgrade.rs | 2 -- testing/src/spinning_task.rs | 2 ++ types/src/constants.rs | 4 ++++ types/src/message.rs | 17 +++++++++++++---- types/src/traits/storage.rs | 7 ++++++- 10 files changed, 82 insertions(+), 11 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 460152c16e..5c6277f35c 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -17,7 +17,7 @@ use hotshot_types::{ data::{DaProposal, Leaf, QuorumProposal, VidDisperseShare}, event::HotShotAction, message::Proposal, - simple_certificate::QuorumCertificate, + simple_certificate::{QuorumCertificate, UpgradeCertificate}, traits::{ node_implementation::{ConsensusTime, NodeType}, storage::Storage, @@ -60,6 +60,7 @@ pub struct TestStorage { /// `should_return_err` is a testing utility to validate negative cases. pub should_return_err: bool, pub delay_config: DelayConfig, + pub decided_upgrade_certificate: Arc>>>, } impl Default for TestStorage { @@ -68,6 +69,7 @@ impl Default for TestStorage { inner: Arc::new(RwLock::new(TestStorageState::default())), should_return_err: false, delay_config: DelayConfig::default(), + decided_upgrade_certificate: Arc::new(RwLock::new(None)), } } } @@ -91,6 +93,9 @@ impl TestStorage { pub async fn high_qc_cloned(&self) -> Option> { self.inner.read().await.high_qc.clone() } + pub async fn decided_upgrade_certificate(&self) -> Option> { + self.decided_upgrade_certificate.read().await.clone() + } pub async fn last_actioned_view(&self) -> TYPES::Time { self.inner.read().await.action } @@ -183,4 +188,12 @@ impl Storage for TestStorage { Self::run_delay_settings_from_config(&self.delay_config).await; Ok(()) } + async fn update_decided_upgrade_certificate( + &self, + decided_upgrade_certificate: Option>, + ) -> Result<()> { + *self.decided_upgrade_certificate.write().await = decided_upgrade_certificate; + + Ok(()) + } } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 4c5e40a0b5..e5fa8a48bd 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -49,7 +49,7 @@ use hotshot_types::{ data::{Leaf, QuorumProposal}, event::{EventType, LeafInfo}, message::{DataMessage, Message, MessageKind, Proposal}, - simple_certificate::QuorumCertificate, + simple_certificate::{QuorumCertificate, UpgradeCertificate}, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -255,7 +255,8 @@ impl, V: Versions> SystemContext::new(); + let upgrade_lock = + UpgradeLock::::from_certificate(&initializer.decided_upgrade_certificate); // Allow overflow on the channel, otherwise sending to it may block. external_rx.set_overflow(true); @@ -959,6 +960,8 @@ pub struct HotShotInitializer { /// than `inner`s view number for the non genesis case because we must have seen higher QCs /// to decide on the leaf. high_qc: QuorumCertificate, + /// Previously decided upgrade certificate; this is necessary if an upgrade has happened and we are not restarting with the new version + decided_upgrade_certificate: Option>, /// Undecided leafs that were seen, but not yet decided on. These allow a restarting node /// to vote and propose right away if they didn't miss anything while down. undecided_leafs: Vec>, @@ -986,6 +989,7 @@ impl HotShotInitializer { actioned_view: TYPES::Time::new(0), saved_proposals: BTreeMap::new(), high_qc, + decided_upgrade_certificate: None, undecided_leafs: Vec::new(), undecided_state: BTreeMap::new(), instance_state, @@ -1008,6 +1012,7 @@ impl HotShotInitializer { actioned_view: TYPES::Time, saved_proposals: BTreeMap>>, high_qc: QuorumCertificate, + decided_upgrade_certificate: Option>, undecided_leafs: Vec>, undecided_state: BTreeMap>, ) -> Self { @@ -1020,6 +1025,7 @@ impl HotShotInitializer { actioned_view, saved_proposals, high_qc, + decided_upgrade_certificate, undecided_leafs, undecided_state, } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 106300e271..bdd59eeb86 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -195,6 +195,23 @@ pub async fn add_consensus_tasks, handle.add_task(DaTaskState::::create_from(handle).await); handle.add_task(TransactionTaskState::::create_from(handle).await); + { + let mut upgrade_certificate_lock = handle + .hotshot + .upgrade_lock + .decided_upgrade_certificate + .write() + .await; + + // clear the loaded certificate if it's now outdated + if upgrade_certificate_lock + .as_ref() + .is_some_and(|cert| V::Base::VERSION >= cert.data.new_version) + { + *upgrade_certificate_lock = None; + } + } + // only spawn the upgrade task if we are actually configured to perform an upgrade. if V::Base::VERSION < V::Upgrade::VERSION { handle.add_task(UpgradeTaskState::::create_from(handle).await); diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index a2c4e6170c..193ebbd257 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -540,6 +540,13 @@ pub async fn handle_quorum_proposal_validated< .await; *decided_certificate_lock = Some(cert.clone()); drop(decided_certificate_lock); + + let _ = task_state + .storage + .write() + .await + .update_decided_upgrade_certificate(Some(cert.clone())) + .await; } let mut consensus = task_state.consensus.write().await; diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index a19a7ce5be..d414573e19 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -13,7 +13,10 @@ use hotshot_types::{ consensus::OuterConsensus, data::QuorumProposal, event::{Event, EventType}, - traits::node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + traits::{ + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + storage::Storage, + }, vote::HasViewNumber, }; use tracing::{debug, instrument}; @@ -60,6 +63,13 @@ pub(crate) async fn handle_quorum_proposal_validated< .await; *decided_certificate_lock = Some(cert.clone()); drop(decided_certificate_lock); + + let _ = task_state + .storage + .write() + .await + .update_decided_upgrade_certificate(Some(cert.clone())) + .await; } let mut consensus_writer = task_state.consensus.write().await; diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index ef8f952004..d050ef4e54 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -269,9 +269,7 @@ impl, V: Versions> UpgradeTaskStat old_version: V::Base::VERSION, new_version: V::Upgrade::VERSION, new_version_hash: V::UPGRADE_HASH.to_vec(), - // We schedule the upgrade to begin 15 views in the future old_version_last_view: TYPES::Time::new(view + UPGRADE_BEGIN_OFFSET), - // and end 20 views in the future new_version_first_view: TYPES::Time::new(view + UPGRADE_FINISH_OFFSET), decide_by: TYPES::Time::new(view + UPGRADE_DECIDE_BY_OFFSET), }; diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index ef4709af68..bd4d6850c4 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -154,6 +154,7 @@ where TYPES::Time::genesis(), BTreeMap::new(), self.high_qc.clone(), + None, Vec::new(), BTreeMap::new(), ); @@ -238,6 +239,7 @@ where ) .await, ), + read_storage.decided_upgrade_certificate().await, Vec::new(), BTreeMap::new(), ); diff --git a/types/src/constants.rs b/types/src/constants.rs index ddcddebd80..d559e93655 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -44,8 +44,12 @@ pub const EXTERNAL_EVENT_CHANNEL_SIZE: usize = 100_000; /// The offset for how far in the future we will send out a `QuorumProposal` with an `UpgradeCertificate` we form. This is also how far in advance of sending a `QuorumProposal` we begin collecting votes on an `UpgradeProposal`. pub const UPGRADE_PROPOSE_OFFSET: u64 = 5; +#[cfg(test)] /// The offset for how far in the future the upgrade certificate we attach should be decided on (or else discarded). pub const UPGRADE_DECIDE_BY_OFFSET: u64 = UPGRADE_PROPOSE_OFFSET + 5; +#[cfg(not(test))] +/// The offset for how far in the future the upgrade certificate we attach should be decided on (or else discarded). +pub const UPGRADE_DECIDE_BY_OFFSET: u64 = UPGRADE_PROPOSE_OFFSET + 100; /// The offset for how far in the future the upgrade actually begins. pub const UPGRADE_BEGIN_OFFSET: u64 = UPGRADE_DECIDE_BY_OFFSET + 5; diff --git a/types/src/message.rs b/types/src/message.rs index 70a73cec5f..407845b001 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -397,6 +397,15 @@ impl UpgradeLock { } } + #[allow(clippy::new_without_default)] + /// Create a new `UpgradeLock` from an optional upgrade certificate + pub fn from_certificate(certificate: &Option>) -> Self { + Self { + decided_upgrade_certificate: Arc::new(RwLock::new(certificate.clone())), + _pd: PhantomData::, + } + } + /// Calculate the version applied in a view, based on the provided upgrade lock. /// /// # Errors @@ -457,8 +466,8 @@ impl UpgradeLock { // Associated constants cannot be used in pattern matches, so we do this trick instead. v if v == V::Base::VERSION => Serializer::::serialize(&message), v if v == V::Upgrade::VERSION => Serializer::::serialize(&message), - _ => { - bail!("Attempted to serialize with an incompatible version. This should be impossible."); + v => { + bail!("Attempted to serialize with version {}, which is incompatible. This should be impossible.", v); } }; @@ -481,8 +490,8 @@ impl UpgradeLock { let deserialized_message: M = match actual_version { v if v == V::Base::VERSION => Serializer::::deserialize(message), v if v == V::Upgrade::VERSION => Serializer::::deserialize(message), - _ => { - bail!("Cannot deserialize message!"); + v => { + bail!("Cannot deserialize message with stated version {}", v); } } .context("Failed to deserialize message!")?; diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 234135cb43..c11ba771b3 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -20,7 +20,7 @@ use crate::{ data::{DaProposal, Leaf, QuorumProposal, VidDisperseShare}, event::HotShotAction, message::Proposal, - simple_certificate::QuorumCertificate, + simple_certificate::{QuorumCertificate, UpgradeCertificate}, }; /// Abstraction for storing a variety of consensus payload datum. @@ -46,4 +46,9 @@ pub trait Storage: Send + Sync + Clone { leafs: CommitmentMap>, state: BTreeMap>, ) -> Result<()>; + /// Upgrade the current decided upgrade certificate in storage. + async fn update_decided_upgrade_certificate( + &self, + decided_upgrade_certificate: Option>, + ) -> Result<()>; } From ee9d93ce193ebaa1c736db4fef6c98f9e6de1497 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 27 Sep 2024 09:46:37 -0400 Subject: [PATCH 1226/1393] Fix Hanging CI (#3697) * shutdown completion task last * fix shutdown order * fmt * log everything at info when test fails * clear failed, logging * fix build * different log level * no capture again * typo * move logging + do startups in parallel * fmt * change initial timeout * remove nocapture * nocapture again * fix * only log nodes started when there are nodes starting * log exit * log when timeout starts * log id and view * only shutdown from 1 place * fix build, remove handles from completetion task * leave one up in cdn test * more logs, less threads, maybe fix? * actual fix * lint fmt * allow more than 1 thread, tweaks * remove nocapture * move byzantine tests to ci-3 * rebalance tests more * one more test to 4 * only spawn first timeout when starting consensus * cleanup * fix justfile lint tokio * fix justfil * sleep longer, nocapture to debug * info * fix another hot loop maybe * don't spawn r/r tasks for cdn that do nothing * lint no sleep * lower log level in libp2p * lower builder test threshold * remove nocapture for the last time, hopefully * remove cleanup_previous_timeouts_on_view --- hotshot/src/lib.rs | 20 +++++++- hotshot/src/tasks/task_state.rs | 12 ++--- .../src/traits/networking/push_cdn_network.rs | 5 +- hotshot/src/types/handle.rs | 31 +----------- .../src/network/behaviours/dht/bootstrap.rs | 12 +++-- .../src/network/behaviours/dht/mod.rs | 3 +- task-impls/src/consensus/mod.rs | 26 ++++++++++ task-impls/src/consensus2/mod.rs | 36 ++++++++++++-- task-impls/src/network.rs | 1 + task-impls/src/quorum_proposal/mod.rs | 3 -- task-impls/src/request.rs | 31 +----------- testing/src/byzantine/byzantine_behaviour.rs | 1 + testing/src/completion_task.rs | 19 ++----- testing/src/overall_safety_task.rs | 49 ++----------------- testing/src/spinning_task.rs | 35 ++++++++----- testing/src/test_runner.rs | 28 ++++++----- testing/src/test_task.rs | 21 +++++--- testing/src/txn_task.rs | 2 - testing/tests/tests_2/catchup.rs | 3 ++ .../{tests_2 => tests_3}/byzantine_tests.rs | 0 .../{tests_3 => tests_4}/test_marketplace.rs | 2 +- .../test_with_builder_failures.rs | 0 22 files changed, 159 insertions(+), 181 deletions(-) rename testing/tests/{tests_2 => tests_3}/byzantine_tests.rs (100%) rename testing/tests/{tests_3 => tests_4}/test_marketplace.rs (98%) rename testing/tests/{tests_3 => tests_4}/test_with_builder_failures.rs (100%) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e5fa8a48bd..a397656621 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -34,7 +34,7 @@ use std::{ }; use async_broadcast::{broadcast, InactiveReceiver, Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; +use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; use futures::join; @@ -370,6 +370,24 @@ impl, V: Versions> SystemContext, V: Versions> CreateTaskState { async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); - let timeout_task = handle.spawn_initial_timeout_task(); Self { consensus: OuterConsensus::new(consensus), @@ -232,7 +232,7 @@ impl, V: Versions> CreateTaskState payload_commitment_and_metadata: None, vote_collectors: BTreeMap::default(), timeout_vote_collectors: BTreeMap::default(), - timeout_task, + timeout_task: async_spawn(async {}), spawned_tasks: BTreeMap::new(), formed_upgrade_certificate: None, proposal_cert: None, @@ -282,7 +282,6 @@ impl, V: Versions> CreateTaskState { async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); - let timeout_task = handle.spawn_initial_timeout_task(); Self { latest_proposed_view: handle.cur_view().await, @@ -297,7 +296,6 @@ impl, V: Versions> CreateTaskState private_key: handle.private_key().clone(), storage: Arc::clone(&handle.storage), timeout: handle.hotshot.config.next_view_timeout, - timeout_task, round_start_delay: handle.hotshot.config.round_start_delay, id: handle.hotshot.id, formed_upgrade_certificate: None, @@ -312,7 +310,6 @@ impl, V: Versions> CreateTaskState { async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); - let timeout_task = handle.spawn_initial_timeout_task(); Self { public_key: handle.public_key().clone(), @@ -323,7 +320,7 @@ impl, V: Versions> CreateTaskState network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - timeout_task, + timeout_task: async_spawn(async {}), timeout: handle.hotshot.config.next_view_timeout, round_start_delay: handle.hotshot.config.round_start_delay, output_event_stream: handle.hotshot.external_event_stream.0.clone(), @@ -343,7 +340,6 @@ impl, V: Versions> CreateTaskState { async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); - let timeout_task = handle.spawn_initial_timeout_task(); Self { public_key: handle.public_key().clone(), @@ -359,7 +355,7 @@ impl, V: Versions> CreateTaskState cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), - timeout_task, + timeout_task: async_spawn(async {}), timeout: handle.hotshot.config.next_view_timeout, consensus: OuterConsensus::new(consensus), last_decided_view: handle.cur_view().await, diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index f675b01f37..229b7e9eb4 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -381,7 +381,7 @@ impl TestableNetworkingImplementation // Error if we stopped unexpectedly if let Err(err) = marshal.start().await { - error!("broker stopped: {err}"); + error!("marshal stopped: {err}"); } }); @@ -447,8 +447,7 @@ impl ConnectedNetwork for PushCdnNetwork { async fn spawn_request_receiver_task( &self, ) -> Option, NetworkMsgResponseChannel>)>> { - let (mut _tx, rx) = mpsc::channel(1); - Some(rx) + None } /// Pause sending and receiving on the PushCDN network. diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 0b285b593f..0879a349fe 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -6,24 +6,19 @@ //! Provides an event-streaming handle for a [`SystemContext`] running in the background -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use async_broadcast::{InactiveReceiver, Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use futures::Stream; use hotshot_task::task::{ConsensusTaskRegistry, NetworkTaskRegistry, Task, TaskState}; -use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; +use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::Consensus, data::Leaf, error::HotShotError, traits::{election::Membership, network::ConnectedNetwork, node_implementation::NodeType}, }; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; use tracing::instrument; use crate::{traits::NodeImplementation, types::Event, Memberships, SystemContext, Versions}; @@ -236,26 +231,4 @@ impl + 'static, V: Versions> pub fn storage(&self) -> Arc> { Arc::clone(&self.storage) } - - /// A helper function to spawn the initial timeout task from a given `SystemContextHandle`. - #[must_use] - pub fn spawn_initial_timeout_task(&self) -> JoinHandle<()> { - // Clone the event stream that we send the timeout event to - let event_stream = self.internal_event_stream.0.clone(); - let next_view_timeout = self.hotshot.config.next_view_timeout; - let start_view = self.hotshot.start_view; - - // Spawn a task that will sleep for the next view timeout and then send a timeout event - // if not cancelled - async_spawn({ - async move { - async_sleep(Duration::from_millis(next_view_timeout)).await; - broadcast_event( - Arc::new(HotShotEvent::Timeout(start_view + 1)), - &event_stream, - ) - .await; - } - }) - } } diff --git a/libp2p-networking/src/network/behaviours/dht/bootstrap.rs b/libp2p-networking/src/network/behaviours/dht/bootstrap.rs index 7fa8674558..075a70ffa7 100644 --- a/libp2p-networking/src/network/behaviours/dht/bootstrap.rs +++ b/libp2p-networking/src/network/behaviours/dht/bootstrap.rs @@ -45,7 +45,6 @@ impl DHTBootstrapTask { /// Task's loop async fn run_loop(mut self) { loop { - tracing::debug!("looping bootstrap"); if self.in_progress { match self.rx.next().await { Some(InputEvent::BootstrapFinished) => { @@ -53,10 +52,13 @@ impl DHTBootstrapTask { self.in_progress = false; } Some(InputEvent::ShutdownBootstrap) => { - tracing::debug!("ShutdownBootstrap received, shutting down"); + tracing::info!("ShutdownBootstrap received, shutting down"); break; } - Some(_) => {} + Some(InputEvent::StartBootstrap) => { + tracing::warn!("Trying to start bootstrap that's already in progress"); + continue; + } None => { tracing::debug!("Bootstrap channel closed, exiting loop"); break; @@ -74,7 +76,9 @@ impl DHTBootstrapTask { tracing::debug!("ShutdownBootstrap received, shutting down"); break; } - Some(_) => {} + Some(InputEvent::BootstrapFinished) => { + tracing::debug!("not in progress got bootstrap finished"); + } None => { tracing::debug!("Bootstrap channel closed, exiting loop"); break; diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index c65969c870..1204190d25 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -30,7 +30,7 @@ use libp2p::kad::{ }; use libp2p_identity::PeerId; use store::ValidatedStore; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, warn}; /// Additional DHT record functionality pub mod record; @@ -464,7 +464,6 @@ impl DHTBehaviour { .. } => { if num_remaining == 0 { - info!("Finished bootstrapping"); self.finish_bootstrap(); } else { debug!("Bootstrap in progress, {} nodes remaining", num_remaining); diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 8770de2af4..7f7ea60a72 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -13,6 +13,7 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use async_trait::async_trait; +use committable::Committable; use futures::future::join_all; use handlers::publish_proposal_from_commitment_and_metadata; use hotshot_task::task::TaskState; @@ -311,6 +312,31 @@ impl, V: Versions> ConsensusTaskSt warn!("Failed to handle QuorumProposalValidated event {e:#}"); } } + HotShotEvent::QuorumProposalRequestRecv(req, signature) => { + // Make sure that this request came from who we think it did + if !req.key.validate(signature, req.commit().as_ref()) { + warn!("Invalid signature key on proposal request."); + return; + } + + if let Some(quorum_proposal) = self + .consensus + .read() + .await + .last_proposals() + .get(&req.view_number) + { + broadcast_event( + HotShotEvent::QuorumProposalResponseSend( + req.key.clone(), + quorum_proposal.clone(), + ) + .into(), + &event_sender, + ) + .await; + } + } HotShotEvent::QuorumVoteRecv(ref vote) => { debug!("Received quorum vote: {:?}", vote.view_number()); if self.quorum_membership.leader(vote.view_number() + 1) != self.public_key { diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index 7b139842f9..3e7511fe8f 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -6,12 +6,18 @@ use std::sync::Arc; +use self::handlers::{ + handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, +}; +use crate::helpers::broadcast_event; +use crate::{events::HotShotEvent, vote_collection::VoteCollectorsMap}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use async_trait::async_trait; +use committable::Committable; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, @@ -28,11 +34,6 @@ use hotshot_types::{ use tokio::task::JoinHandle; use tracing::instrument; -use self::handlers::{ - handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, -}; -use crate::{events::HotShotEvent, vote_collection::VoteCollectorsMap}; - /// Event handlers for use in the `handle` method. mod handlers; @@ -112,6 +113,31 @@ impl, V: Versions> Consensus2TaskS tracing::debug!("Failed to handle QuorumVoteRecv event; error = {e}"); } } + HotShotEvent::QuorumProposalRequestRecv(req, signature) => { + // Make sure that this request came from who we think it did + if !req.key.validate(signature, req.commit().as_ref()) { + tracing::warn!("Invalid signature key on proposal request."); + return; + } + + if let Some(quorum_proposal) = self + .consensus + .read() + .await + .last_proposals() + .get(&req.view_number) + { + broadcast_event( + HotShotEvent::QuorumProposalResponseSend( + req.key.clone(), + quorum_proposal.clone(), + ) + .into(), + &sender, + ) + .await; + } + } HotShotEvent::TimeoutVoteRecv(ref vote) => { if let Err(e) = handle_timeout_vote_recv(vote, Arc::clone(&event), &sender, self).await diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 6894f0efb0..a17ba8229a 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -519,6 +519,7 @@ impl< .await; None } + _ => None, } } diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 9c24941e71..0d845f2070 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -78,9 +78,6 @@ pub struct QuorumProposalTaskState /// Round start delay from config, in milliseconds. pub round_start_delay: u64, - /// timeout task handle - pub timeout_task: JoinHandle<()>, - /// This node's storage ref pub storage: Arc>, diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index d7d2c0b8ff..1129ab2c61 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -13,13 +13,12 @@ use std::{ time::Duration, }; -use anyhow::{ensure, Result}; +use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use async_trait::async_trait; -use committable::Committable; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, @@ -110,33 +109,6 @@ impl> TaskState for NetworkRequest } Ok(()) } - HotShotEvent::QuorumProposalRequestRecv(req, signature) => { - // Make sure that this request came from who we think it did - ensure!( - req.key.validate(signature, req.commit().as_ref()), - "Invalid signature key on proposal request." - ); - - if let Some(quorum_proposal) = self - .state - .read() - .await - .last_proposals() - .get(&req.view_number) - { - broadcast_event( - HotShotEvent::QuorumProposalResponseSend( - req.key.clone(), - quorum_proposal.clone(), - ) - .into(), - sender, - ) - .await; - } - - Ok(()) - } _ => Ok(()), } } @@ -323,6 +295,7 @@ impl> DelayedRequester { } Err(e) => { error!("Failed to deserialize response: {e}"); + async_sleep(REQUEST_TIMEOUT).await; } } } diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index c4a345b0a8..58eb6ed58d 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -66,6 +66,7 @@ impl, V: Versions> EventTransforme signature.clone(), )); } + consensus.write().await.reset_actions(); result } diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index b5e61fe1ca..f6f68f3db6 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -4,21 +4,18 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{sync::Arc, time::Duration}; +use std::time::Duration; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_spawn, async_timeout}; -use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use hotshot::traits::TestableNodeImplementation; use hotshot_task_impls::helpers::broadcast_event; -use hotshot_types::traits::node_implementation::{NodeType, Versions}; use snafu::Snafu; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use crate::{test_runner::Node, test_task::TestEvent}; +use crate::test_task::TestEvent; /// the idea here is to run as long as we want @@ -27,19 +24,15 @@ use crate::{test_runner::Node, test_task::TestEvent}; pub struct CompletionTaskErr {} /// Completion task state -pub struct CompletionTask, V: Versions> { +pub struct CompletionTask { pub tx: Sender, pub rx: Receiver, - /// handles to the nodes in the test - pub(crate) handles: Arc>>>, /// Duration of the task. pub duration: Duration, } -impl, V: Versions> - CompletionTask -{ +impl CompletionTask { pub fn run(mut self) -> JoinHandle<()> { async_spawn(async move { if async_timeout(self.duration, self.wait_for_shutdown()) @@ -48,10 +41,6 @@ impl, V: Versions> { broadcast_event(TestEvent::Shutdown, &self.tx).await; } - - for node in &mut self.handles.write().await.iter_mut() { - node.handle.shut_down().await; - } }) } async fn wait_for_shutdown(&mut self) { diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 2a7066b07c..f259215730 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -172,18 +172,7 @@ impl, V: Versions> TestTas match self.ctx.round_results.entry(view_number) { Entry::Occupied(mut o) => { let entry = o.get_mut(); - let leaf = entry.insert_into_result(id, paired_up, maybe_block_size); - - // Here we noticed is a node may start up and time out waiting for a proposal - // So we add the timeout to failed_views, but eventually the proposal is received we decide on the view - // If we do indeed have a view timeout for the node at this point we want to remove it - entry.cleanup_previous_timeouts_on_view( - &mut self.ctx.failed_views, - &view_number, - &(id as u64), - ); - - leaf + entry.insert_into_result(id, paired_up, maybe_block_size) } Entry::Vacant(v) => { let mut round_result = RoundResult::default(); @@ -222,6 +211,8 @@ impl, V: Versions> TestTas match view.status.clone() { ViewStatus::Ok => { self.ctx.successful_views.insert(view_number); + // if a view succeeds remove it from the failed views + self.ctx.failed_views.remove(&view_number); if self.ctx.successful_views.len() >= num_successful_views { let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; } @@ -566,40 +557,6 @@ impl RoundResult { } leaves } - - fn cleanup_previous_timeouts_on_view( - &mut self, - failed_views: &mut HashSet, - view_number: &TYPES::Time, - id: &u64, - ) { - // check if this node had a previous timeout - match self.failed_nodes.get(id) { - Some(error) => match error.as_ref() { - HotShotError::ViewTimeoutError { - view_number, - state: _, - } => { - tracing::debug!( - "Node {} originally timeout for view: {:?}. It has now been decided on.", - id, - view_number - ); - self.failed_nodes.remove(id); - } - _ => return, - }, - None => return, - } - - // check if no more failed nodes - if self.failed_nodes.is_empty() && failed_views.remove(view_number) { - tracing::debug!( - "Removed view {:?} from failed views, all nodes have agreed upon view.", - view_number - ); - } - } } /// cross node safety properties diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index bd4d6850c4..f134376209 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -316,22 +316,33 @@ where } join_all(ready_futs).await; + let mut start_futs = vec![]; + while let Some((node, id)) = new_nodes.pop() { - tracing::error!("Starting node {} back up", id); - let handle = node.run_tasks().await; + let handles = self.handles.clone(); + let fut = async move { + tracing::info!("Starting node {} back up", id); + let handle = node.run_tasks().await; - // Create the node and add it to the state, so we can shut them - // down properly later to avoid the overflow error in the overall - // safety task. - let node = Node { - node_id: id.try_into().unwrap(), - network: node.network.clone(), - handle, - }; - node.handle.hotshot.start_consensus().await; + // Create the node and add it to the state, so we can shut them + // down properly later to avoid the overflow error in the overall + // safety task. + let node = Node { + node_id: id.try_into().unwrap(), + network: node.network.clone(), + handle, + }; + node.handle.hotshot.start_consensus().await; - self.handles.write().await[id] = node; + handles.write().await[id] = node; + }; + start_futs.push(fut); } + if !start_futs.is_empty() { + join_all(start_futs).await; + tracing::info!("Nodes all started"); + } + // update our latest view self.latest_view = Some(view_number); } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 6a24730d31..75c92e4ecd 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -165,7 +165,6 @@ where let completion_task = CompletionTask { tx: test_sender.clone(), rx: test_receiver.clone(), - handles: Arc::clone(&handles), duration: time_based.duration, }; @@ -274,7 +273,6 @@ where #[cfg(async_executor_impl = "async-std")] { let results = join_all(task_futs).await; - tracing::error!("test tasks joined"); for result in results { match result { TestResult::Pass => { @@ -286,14 +284,17 @@ where if let Some(handle) = txn_handle { handle.cancel().await; } - completion_handle.cancel().await; + // Shutdown all of the servers at the end + // Aborting here doesn't cause any problems because we don't maintain any state + if let Some(solver_server) = solver_server { + solver_server.1.cancel().await; + } } #[cfg(async_executor_impl = "tokio")] { let results = join_all(task_futs).await; - tracing::error!("test tasks joined"); for result in results { match result { Ok(res) => match res { @@ -311,7 +312,11 @@ where if let Some(handle) = txn_handle { handle.abort(); } - completion_handle.abort(); + // Shutdown all of the servers at the end + // Aborting here doesn't cause any problems because we don't maintain any state + if let Some(solver_server) = solver_server { + solver_server.1.abort(); + } } let mut nodes = handles.write().await; @@ -319,15 +324,12 @@ where for node in &mut *nodes { node.handle.shut_down().await; } + tracing::info!("Nodes shtudown"); - // Shutdown all of the servers at the end - // Aborting here doesn't cause any problems because we don't maintain any state - if let Some(solver_server) = solver_server { - #[cfg(async_executor_impl = "async-std")] - solver_server.1.cancel().await; - #[cfg(async_executor_impl = "tokio")] - solver_server.1.abort(); - } + #[cfg(async_executor_impl = "async-std")] + completion_handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + completion_handle.abort(); assert!( error_list.is_empty(), diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index 536153b77c..4fae50c233 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -8,7 +8,7 @@ use std::{sync::Arc, time::Duration}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_spawn, async_timeout}; +use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; #[cfg(async_executor_impl = "async-std")] use async_std::task::{spawn, JoinHandle}; use async_trait::async_trait; @@ -97,13 +97,18 @@ impl TestTask { messages.push(receiver.recv()); } - if let Ok((Ok(input), id, _)) = - async_timeout(Duration::from_millis(50), select_all(messages)).await - { - let _ = S::handle_event(&mut self.state, (input, id)) - .await - .inspect_err(|e| tracing::error!("{e}")); - } + match async_timeout(Duration::from_millis(2500), select_all(messages)).await { + Ok((Ok(input), id, _)) => { + let _ = S::handle_event(&mut self.state, (input, id)) + .await + .inspect_err(|e| tracing::error!("{e}")); + } + Ok((Err(e), _id, _)) => { + error!("Error from one channel in test task {:?}", e); + async_sleep(Duration::from_millis(4000)).await; + } + _ => {} + }; } }) } diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index b2238db02a..20e46c2baa 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -62,8 +62,6 @@ impl, V: Versions> TxnTask match handles.get(idx) { None => { tracing::error!("couldn't get node in txn task"); - // should do error - unimplemented!() } Some(node) => { // use rand::seq::IteratorRandom; diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 0b8fcbfdb9..06a0099847 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -320,6 +320,7 @@ async fn test_all_restart() { async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { next_view_timeout: 2000, + round_start_delay: 500, ..Default::default() }; let mut metadata: TestDescription = @@ -382,6 +383,7 @@ async fn test_all_restart_cdn() { async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { next_view_timeout: 2000, + round_start_delay: 500, ..Default::default() }; let mut metadata: TestDescription = @@ -448,6 +450,7 @@ async fn test_all_restart_one_da() { async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { next_view_timeout: 2000, + round_start_delay: 500, ..Default::default() }; let mut metadata: TestDescription = diff --git a/testing/tests/tests_2/byzantine_tests.rs b/testing/tests/tests_3/byzantine_tests.rs similarity index 100% rename from testing/tests/tests_2/byzantine_tests.rs rename to testing/tests/tests_3/byzantine_tests.rs diff --git a/testing/tests/tests_3/test_marketplace.rs b/testing/tests/tests_4/test_marketplace.rs similarity index 98% rename from testing/tests/tests_3/test_marketplace.rs rename to testing/tests/tests_4/test_marketplace.rs index eb0738ffc7..b8a7d8c84c 100644 --- a/testing/tests/tests_3/test_marketplace.rs +++ b/testing/tests/tests_4/test_marketplace.rs @@ -96,7 +96,7 @@ cross_tests!( changes: HashMap::from([(0, BuilderChange::Down)]) } ], - validate_transactions: nonempty_block_threshold((40,50)), + validate_transactions: nonempty_block_threshold((35,50)), ..TestDescription::default() } }, diff --git a/testing/tests/tests_3/test_with_builder_failures.rs b/testing/tests/tests_4/test_with_builder_failures.rs similarity index 100% rename from testing/tests/tests_3/test_with_builder_failures.rs rename to testing/tests/tests_4/test_with_builder_failures.rs From b8f47d5c6700c201bd0df0767d88f9f1667255c1 Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Fri, 27 Sep 2024 10:29:18 -0400 Subject: [PATCH 1227/1393] [Libp2p] - Refactor VID Delayed Requester To Use Message Passing (#3688) * start * refactor and cleanup * refactor * fix which public key we use for lookup in response * fix some tests * increase timeout * lint * cleanup * refactor * cleanup * verify vid response * remove loop * fix tests * fix name * remove from general message * clippy * cleanup * clippy * refactor and cleanup * leave signatures the same * linter * fix tokio * address comments * clippy * always spawn request response tasks --- hotshot/src/tasks/mod.rs | 10 +- hotshot/src/tasks/task_state.rs | 1 - task-impls/src/consensus/mod.rs | 40 +- task-impls/src/da.rs | 5 +- task-impls/src/events.rs | 67 ++- task-impls/src/network.rs | 52 ++- task-impls/src/quorum_vote/mod.rs | 35 +- task-impls/src/request.rs | 389 +++++++++--------- task-impls/src/response.rs | 179 +++----- testing/tests/tests_1/consensus_task.rs | 28 +- testing/tests/tests_1/quorum_vote_task.rs | 12 +- .../tests_1/upgrade_task_with_consensus.rs | 30 +- .../tests/tests_1/upgrade_task_with_vote.rs | 8 +- 13 files changed, 459 insertions(+), 397 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index bdd59eeb86..e875dd2c48 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -36,7 +36,6 @@ use hotshot_types::{ consensus::Consensus, constants::EVENT_CHANNEL_SIZE, message::{Message, UpgradeLock}, - request_response::RequestReceiver, traits::{ network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -80,11 +79,9 @@ pub async fn add_request_network_task< /// Add a task which responds to requests on the network. pub fn add_response_task, V: Versions>( handle: &mut SystemContextHandle, - request_receiver: RequestReceiver, ) { let state = NetworkResponseState::::new( handle.hotshot.consensus(), - request_receiver, handle.hotshot.memberships.quorum_membership.clone().into(), handle.public_key().clone(), handle.private_key().clone(), @@ -93,6 +90,7 @@ pub fn add_response_task, V: Versi handle.network_registry.register(run_response_task::( state, handle.internal_event_stream.1.activate_cloned(), + handle.internal_event_stream.0.clone(), )); } @@ -563,10 +561,8 @@ pub async fn add_network_message_and_request_receiver_tasks< add_network_message_task(handle, &network); add_network_message_task(handle, &network); - if let Some(request_receiver) = network.spawn_request_receiver_task().await { - add_request_network_task(handle).await; - add_response_task(handle, request_receiver); - } + add_request_network_task(handle).await; + add_response_task(handle); } /// Adds the `NetworkEventTaskState` tasks. diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 46f9202e83..c1a10f4646 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -52,7 +52,6 @@ impl, V: Versions> CreateTaskState view: handle.cur_view().await, delay: handle.hotshot.config.data_request_delay, da_membership: handle.hotshot.memberships.da_membership.clone(), - quorum_membership: handle.hotshot.memberships.quorum_membership.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 7f7ea60a72..313e3ed331 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -144,32 +144,30 @@ impl, V: Versions> ConsensusTaskSt } /// Validate the VID disperse is correctly signed and has the correct share. - fn validate_disperse(&self, disperse: &Proposal>) -> bool { + fn validate_disperse( + &self, + sender: &TYPES::SignatureKey, + disperse: &Proposal>, + ) -> bool { let view = disperse.data.view_number(); let payload_commitment = disperse.data.payload_commitment; + // Check sender of VID disperse share is signed by DA committee member + let validate_sender = sender.validate(&disperse.signature, payload_commitment.as_ref()) + && self.da_membership.committee_members(view).contains(sender); + // Check whether the data satisfies one of the following. // * From the right leader for this view. // * Calculated and signed by the current node. - // * Signed by one of the staked DA committee members. - if !self - .quorum_membership - .leader(view) + let validated = self + .public_key .validate(&disperse.signature, payload_commitment.as_ref()) - && !self - .public_key - .validate(&disperse.signature, payload_commitment.as_ref()) - { - let mut validated = false; - for da_member in self.da_membership.committee_members(view) { - if da_member.validate(&disperse.signature, payload_commitment.as_ref()) { - validated = true; - break; - } - } - if !validated { - return false; - } + || self + .quorum_membership + .leader(view) + .validate(&disperse.signature, payload_commitment.as_ref()); + if !validate_sender && !validated { + return false; } // Validate the VID share. @@ -450,7 +448,7 @@ impl, V: Versions> ConsensusTaskSt self.spawn_vote_task(view, event_sender, event_receiver) .await; } - HotShotEvent::VidShareRecv(disperse) => { + HotShotEvent::VidShareRecv(sender, disperse) => { let view = disperse.data.view_number(); debug!( @@ -469,7 +467,7 @@ impl, V: Versions> ConsensusTaskSt debug!("VID disperse data is not more than one view older."); - if !self.validate_disperse(disperse) { + if !self.validate_disperse(sender, disperse) { warn!("Failed to validated the VID dispersal/share sig."); return; } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index ab50b8f2cd..19452752d6 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -245,7 +245,10 @@ impl, V: Versions> DaTaskState { /// Vid disperse share has been received from the network; handled by the consensus task /// /// Like [`HotShotEvent::DaProposalRecv`]. - VidShareRecv(Proposal>), + VidShareRecv( + TYPES::SignatureKey, + Proposal>, + ), /// VID share data is validated. VidShareValidated(Proposal>), /// Upgrade proposal has been received from the network @@ -222,6 +225,36 @@ pub enum HotShotEvent { /// 2. The proposal has been correctly signed by the leader of the current view /// 3. The justify QC is valid QuorumProposalPreliminarilyValidated(Proposal>), + + /// Send a VID request to the network; emitted to on of the members of DA committee. + /// Includes the data request, node's public key and signature as well as public key of DA committee who we want to send to. + VidRequestSend( + DataRequest, + // Sender + TYPES::SignatureKey, + // Recipient + TYPES::SignatureKey, + ), + + /// Receive a VID request from the network; Received by a node in the DA committee. + /// Includes the data request and nodes public key. + VidRequestRecv(DataRequest, TYPES::SignatureKey), + + /// Send a VID response to the network; emitted to the sending node. + /// Includes nodes public key, recipient public key, and vid disperse + VidResponseSend( + /// Sender key + TYPES::SignatureKey, + /// Recipient key + TYPES::SignatureKey, + Proposal>, + ), + + /// Receive a VID response from the network; received by the node that triggered the VID request. + VidResponseRecv( + TYPES::SignatureKey, + Proposal>, + ), } impl HotShotEvent { @@ -268,7 +301,7 @@ impl HotShotEvent { | HotShotEvent::LeafDecided(_) | HotShotEvent::TransactionsRecv(_) => None, HotShotEvent::VidDisperseSend(proposal, _) => Some(proposal.data.view_number()), - HotShotEvent::VidShareRecv(proposal) | HotShotEvent::VidShareValidated(proposal) => { + HotShotEvent::VidShareRecv(_, proposal) | HotShotEvent::VidShareValidated(proposal) => { Some(proposal.data.view_number()) } HotShotEvent::UpgradeProposalRecv(proposal, _) @@ -300,6 +333,10 @@ impl HotShotEvent { } HotShotEvent::DaCertificateValidated(cert) => Some(cert.view_number), HotShotEvent::UpgradeCertificateFormed(cert) => Some(cert.view_number()), + HotShotEvent::VidRequestSend(request, _, _) + | HotShotEvent::VidRequestRecv(request, _) => Some(request.view), + HotShotEvent::VidResponseSend(_, _, proposal) + | HotShotEvent::VidResponseRecv(_, proposal) => Some(proposal.data.view_number), } } } @@ -484,7 +521,7 @@ impl Display for HotShotEvent { "VidDisperseSend(view_number={:?})", proposal.data.view_number() ), - HotShotEvent::VidShareRecv(proposal) => write!( + HotShotEvent::VidShareRecv(_, proposal) => write!( f, "VIDShareRecv(view_number={:?})", proposal.data.view_number() @@ -557,6 +594,26 @@ impl Display for HotShotEvent { proposal.data.view_number() ) } + HotShotEvent::VidRequestSend(request, _, _) => { + write!(f, "VidRequestSend(view_number={:?}", request.view) + } + HotShotEvent::VidRequestRecv(request, _) => { + write!(f, "VidRequestRecv(view_number={:?}", request.view) + } + HotShotEvent::VidResponseSend(_, _, proposal) => { + write!( + f, + "VidResponseSend(view_number={:?}", + proposal.data.view_number + ) + } + HotShotEvent::VidResponseRecv(_, proposal) => { + write!( + f, + "VidResponseRecv(view_number={:?}", + proposal.data.view_number + ) + } } } } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a17ba8229a..718d752f27 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -22,7 +22,10 @@ use hotshot_types::{ }, traits::{ election::Membership, - network::{BroadcastDelay, ConnectedNetwork, TransmitType, ViewMessage}, + network::{ + BroadcastDelay, ConnectedNetwork, RequestKind, ResponseMessage, TransmitType, + ViewMessage, + }, node_implementation::{ConsensusTime, NodeType, Versions}, storage::Storage, }, @@ -64,6 +67,8 @@ pub fn da_filter(event: &Arc>) -> bool { HotShotEvent::DaProposalSend(_, _) | HotShotEvent::QuorumProposalRequestSend(..) | HotShotEvent::QuorumProposalResponseSend(..) + | HotShotEvent::VidResponseSend(..) + | HotShotEvent::VidRequestSend(..) | HotShotEvent::DaVoteSend(_) | HotShotEvent::ViewChange(_) ) @@ -166,7 +171,7 @@ impl NetworkMessageTaskState { HotShotEvent::DaCertificateRecv(cert) } DaConsensusMessage::VidDisperseMsg(proposal) => { - HotShotEvent::VidShareRecv(proposal) + HotShotEvent::VidShareRecv(sender, proposal) } }, }; @@ -182,8 +187,31 @@ impl NetworkMessageTaskState { ) .await; } - DataMessage::DataResponse(_) | DataMessage::RequestData(_) => { - warn!("Request and Response messages should not be received in the NetworkMessage task"); + DataMessage::DataResponse(response) => { + if let ResponseMessage::Found(message) = response { + match message { + SequencingMessage::Da(da_message) => { + if let DaConsensusMessage::VidDisperseMsg(proposal) = da_message { + broadcast_event( + Arc::new(HotShotEvent::VidResponseRecv(sender, proposal)), + &self.internal_event_stream, + ) + .await; + } + } + SequencingMessage::General(_) => {} + } + } + } + DataMessage::RequestData(data) => { + let req_data = data.clone(); + if let RequestKind::Vid(_view_number, _key) = req_data.request { + broadcast_event( + Arc::new(HotShotEvent::VidRequestRecv(data, sender)), + &self.internal_event_stream, + ) + .await; + } } }, @@ -519,7 +547,21 @@ impl< .await; None } - + HotShotEvent::VidRequestSend(req, sender, to) => Some(( + sender, + MessageKind::Data(DataMessage::RequestData(req)), + TransmitType::Direct(to), + )), + HotShotEvent::VidResponseSend(sender, to, proposal) => { + let da_message = DaConsensusMessage::VidDisperseMsg(proposal); + let sequencing_msg = SequencingMessage::Da(da_message); + let response_message = ResponseMessage::Found(sequencing_msg); + Some(( + sender, + MessageKind::Data(DataMessage::DataResponse(response_message)), + TransmitType::Direct(to), + )) + } _ => None, } } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index bd7e04d993..e7dd9c1699 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -581,7 +581,7 @@ impl, V: Versions> QuorumVoteTaskS .await; self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); } - HotShotEvent::VidShareRecv(disperse) => { + HotShotEvent::VidShareRecv(sender, disperse) => { let view = disperse.data.view_number(); trace!("Received VID share for view {}", *view); if view <= self.latest_voted_view { @@ -590,29 +590,26 @@ impl, V: Versions> QuorumVoteTaskS // Validate the VID share. let payload_commitment = disperse.data.payload_commitment; + // Check sender of VID disperse share is signed by DA committee member + let validate_sender = sender + .validate(&disperse.signature, payload_commitment.as_ref()) + && self.da_membership.committee_members(view).contains(sender); + // Check whether the data satisfies one of the following. // * From the right leader for this view. // * Calculated and signed by the current node. - // * Signed by one of the staked DA committee members. - if !self - .quorum_membership - .leader(view) + let validated = self + .public_key .validate(&disperse.signature, payload_commitment.as_ref()) - && !self - .public_key - .validate(&disperse.signature, payload_commitment.as_ref()) - { - let mut validated = false; - for da_member in self.da_membership.committee_members(view) { - if da_member.validate(&disperse.signature, payload_commitment.as_ref()) { - validated = true; - break; - } - } - if !validated { - return; - } + || self + .quorum_membership + .leader(view) + .validate(&disperse.signature, payload_commitment.as_ref()); + if !validate_sender && !validated { + warn!("Failed to validated the VID dispersal/share sig."); + return; } + // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner // and outer results #[allow(clippy::no_effect)] diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 1129ab2c61..22fa5ab4ec 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -5,7 +5,7 @@ // along with the HotShot repository. If not, see . use std::{ - collections::BTreeMap, + collections::{BTreeMap, BTreeSet}, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -19,23 +19,25 @@ use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use async_trait::async_trait; -use hotshot_task::task::TaskState; +use hotshot_task::{ + dependency::{Dependency, EventDependency}, + task::TaskState, +}; use hotshot_types::{ consensus::OuterConsensus, - message::{DaConsensusMessage, DataMessage, Message, MessageKind, SequencingMessage}, traits::{ election::Membership, - network::{ConnectedNetwork, DataRequest, RequestKind, ResponseMessage}, + network::{ConnectedNetwork, DataRequest, RequestKind}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, }, vote::HasViewNumber, }; -use rand::{prelude::SliceRandom, thread_rng}; +use rand::{seq::SliceRandom, thread_rng}; use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; +use tracing::instrument; use crate::{events::HotShotEvent, helpers::broadcast_event}; @@ -59,8 +61,6 @@ pub struct NetworkRequestState> { pub delay: Duration, /// DA Membership pub da_membership: TYPES::Membership, - /// Quorum Membership - pub quorum_membership: TYPES::Membership, /// This nodes public key pub public_key: TYPES::SignatureKey, /// This nodes private/signing key, used to sign requests. @@ -92,13 +92,22 @@ impl> TaskState for NetworkRequest &mut self, event: Arc, sender: &Sender>, - _receiver: &Receiver>, + receiver: &Receiver>, ) -> Result<()> { match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _) => { let prop_view = proposal.view_number(); - if prop_view >= self.view { - self.spawn_requests(prop_view, sender.clone()).await; + + // If we already have the VID shares for the next view, do nothing. + if prop_view >= self.view + && !self + .state + .read() + .await + .vid_shares() + .contains_key(&prop_view) + { + self.spawn_requests(prop_view, sender, receiver); } Ok(()) } @@ -114,7 +123,7 @@ impl> TaskState for NetworkRequest } async fn cancel_subtasks(&mut self) { - self.set_shutdown_flag(); + self.shutdown_flag.store(true, Ordering::Relaxed); while !self.spawned_tasks.is_empty() { let Some((_, handles)) = self.spawned_tasks.pop_first() else { @@ -132,58 +141,46 @@ impl> TaskState for NetworkRequest } impl> NetworkRequestState { - /// Spawns tasks for a given view to retrieve any data needed. - async fn spawn_requests( + /// Creates and signs the payload, then will create a request task + fn spawn_requests( &mut self, view: TYPES::Time, - sender: Sender>>, + sender: &Sender>>, + receiver: &Receiver>>, ) { - let requests = self.build_requests(view).await; - if requests.is_empty() { - return; - } - requests - .into_iter() - .for_each(|r| self.run_delay(r, sender.clone(), view)); - } + let request = RequestKind::Vid(view, self.public_key.clone()); - /// Creates the request structures for all types that are needed. - #[instrument(skip_all, target = "NetworkRequestState", fields(id = self.id, view = *view))] - async fn build_requests(&self, view: TYPES::Time) -> Vec> { - let mut reqs = Vec::new(); - if !self.state.read().await.vid_shares().contains_key(&view) { - reqs.push(RequestKind::Vid(view, self.public_key.clone())); + // First sign the request for the VID shares. + if let Some(signature) = self.serialize_and_sign(&request) { + self.create_vid_request_task( + request, + signature, + sender.clone(), + receiver.clone(), + view, + ); } - // TODO request other things - reqs } - /// Sign the serialized version of the request - fn serialize_and_sign( - &self, - request: &RequestKind, - ) -> Option<::PureAssembledSignatureType> { - let Ok(data) = bincode::serialize(&request) else { - tracing::error!("Failed to serialize request!"); - return None; - }; - let Ok(signature) = TYPES::SignatureKey::sign(&self.private_key, &Sha256::digest(data)) - else { - error!("Failed to sign Data Request"); - return None; - }; - Some(signature) - } - /// run a delayed request task for a request. The first response - /// received will be sent over `sender` - #[instrument(skip_all, fields(id = self.id, view = *self.view), name = "NetworkRequestState run_delay", level = "error")] - fn run_delay( + /// Creates a task that will request a VID share from a DA member and wait for the `HotShotEvent::VidResponseRecv`event + /// If we get the VID disperse share, broadcast `HotShotEvent::VidShareRecv` and terminate task + fn create_vid_request_task( &mut self, request: RequestKind, + signature: Signature, sender: Sender>>, + receiver: Receiver>>, view: TYPES::Time, ) { - let mut recipients: Vec<_> = self + let state = OuterConsensus::new(Arc::clone(&self.state.inner_consensus)); + let network = Arc::clone(&self.network); + let shutdown_flag = Arc::clone(&self.shutdown_flag); + let delay = self.delay; + let da_committee_for_view = self.da_membership.committee_members(view); + let public_key = self.public_key.clone(); + + // Get committee members for view + let mut recipients: Vec = self .da_membership .committee_members(view) .into_iter() @@ -191,141 +188,158 @@ impl> NetworkRequestState { - network: Arc::clone(&self.network), - state: OuterConsensus::new(Arc::clone(&self.state.inner_consensus)), - public_key: self.public_key.clone(), - sender, - delay: self.delay, - recipients, - shutdown_flag: Arc::clone(&self.shutdown_flag), - id: self.id, - }; - let Some(signature) = self.serialize_and_sign(&request) else { - return; + + // prepare request + let data_request = DataRequest:: { + request, + view, + signature, }; - debug!("Requesting data: {:?}", request); - let handle = async_spawn(requester.run(request, signature)); + let handle = async_spawn(async move { + // Do the delay only if primary is up and then start sending + if !network.is_primary_down() { + async_sleep(delay).await; + } + let mut recipients_it = recipients.iter().cycle(); + // First check if we got the data before continuing + while !Self::cancel_vid_request_task( + &state, + &sender, + &public_key, + &view, + &shutdown_flag, + ) + .await + { + // Cycle da members we send the request to each time + if let Some(recipient) = recipients_it.next() { + // If we got the data after we make the request then we are done + if Self::handle_vid_request_task( + &sender, + &receiver, + &data_request, + recipient, + &da_committee_for_view, + &public_key, + view, + ) + .await + { + return; + } + } else { + tracing::warn!( + "Sent VID request to all available DA members and got no reponse for view: {:?}", + view + ); + return; + } + } + }); self.spawned_tasks.entry(view).or_default().push(handle); } - /// Signals delayed requesters to finish - pub fn set_shutdown_flag(&self) { - self.shutdown_flag.store(true, Ordering::Relaxed); - } -} - -/// A short lived task that waits a delay and starts trying peers until it completes -/// a request. If at any point the requested info is seen in the data stores or -/// the view has moved beyond the view we are requesting, the task will completed. -struct DelayedRequester> { - /// The underlying network to send requests on - pub network: Arc, - /// Shared state to check if the data go populated - state: OuterConsensus, - /// our public key - public_key: TYPES::SignatureKey, - /// Channel to send the event when we receive a response - sender: Sender>>, - /// Duration to delay sending the first request - delay: Duration, - /// The peers we will request in a random order - recipients: Vec, - /// A flag indicating that `HotShotEvent::Shutdown` has been received - shutdown_flag: Arc, - /// The node's id - id: u64, -} + /// Handles main logic for the Request / Response of a vid share + /// Make the request to get VID share to a DA member and wait for the response. + /// Returns true if response received, otherwise false + async fn handle_vid_request_task( + sender: &Sender>>, + receiver: &Receiver>>, + data_request: &DataRequest, + recipient: &TYPES::SignatureKey, + da_committee_for_view: &BTreeSet<::SignatureKey>, + public_key: &::SignatureKey, + view: TYPES::Time, + ) -> bool { + // First send request to a random DA member for the view + broadcast_event( + HotShotEvent::VidRequestSend( + data_request.clone(), + public_key.clone(), + recipient.clone(), + ) + .into(), + sender, + ) + .await; -/// Wrapper for the info in a VID request -struct VidRequest(TYPES::Time, TYPES::SignatureKey); + // Wait for a response + let result = async_timeout( + REQUEST_TIMEOUT, + Self::handle_event_dependency(receiver, da_committee_for_view.clone(), view), + ) + .await; -impl> DelayedRequester { - /// Wait the delay, then try to complete the request. Iterates over peers - /// until the request is completed, or the data is no longer needed. - async fn run(self, request: RequestKind, signature: Signature) { - match request { - RequestKind::Vid(view, key) => { - // Do the delay only if primary is up and then start sending - if !self.network.is_primary_down() { - async_sleep(self.delay).await; - } - self.do_vid(VidRequest(view, key), signature).await; + // Check if we got a result, if not we timed out + if let Ok(Some(event)) = result { + if let HotShotEvent::VidResponseRecv(sender_pub_key, proposal) = event.as_ref() { + broadcast_event( + Arc::new(HotShotEvent::VidShareRecv( + sender_pub_key.clone(), + proposal.clone(), + )), + sender, + ) + .await; + return true; } - RequestKind::Proposal(..) | RequestKind::DaProposal(..) => {} } + false } - /// Handle sending a VID Share request, runs the loop until the data exists - async fn do_vid(&self, req: VidRequest, signature: Signature) { - let message = make_vid(&req, signature); - let mut recipients_it = self.recipients.iter().cycle(); - - let serialized_msg = match bincode::serialize(&message) { - Ok(serialized_msg) => serialized_msg, - Err(e) => { - tracing::error!( - "Failed to serialize outgoing message: this should never happen. Error: {e}" - ); - return; - } - }; - - while !self.cancel_vid(&req).await { - match async_timeout( - REQUEST_TIMEOUT, - self.network - .request_data::(serialized_msg.clone(), recipients_it.next().unwrap()), - ) - .await - { - Ok(Ok(response)) => { - match bincode::deserialize(&response) { - Ok(ResponseMessage::Found(data)) => { - self.handle_response_message(data).await; - // keep trying, but expect the map to be populated, or view to increase - async_sleep(REQUEST_TIMEOUT).await; - } - Ok(ResponseMessage::NotFound) => { - info!("Peer Responded they did not have the data"); - } - Ok(ResponseMessage::Denied) => { - error!("Request for data was denied by the receiver"); - } - Err(e) => { - error!("Failed to deserialize response: {e}"); - async_sleep(REQUEST_TIMEOUT).await; - } - } - } - Ok(Err(e)) => { - warn!("Error Sending request. Error: {:?}", e); - async_sleep(REQUEST_TIMEOUT).await; - } - Err(_) => { - warn!("Request to other node timed out"); + /// Create event dependency and wait for `VidResponseRecv` after we send out the request + /// Returns an optional with `VidResponseRecv` if received, otherwise None + async fn handle_event_dependency( + receiver: &Receiver>>, + da_members_for_view: BTreeSet<::SignatureKey>, + view: TYPES::Time, + ) -> Option>> { + EventDependency::new( + receiver.clone(), + Box::new(move |event: &Arc>| { + let event = event.as_ref(); + if let HotShotEvent::VidResponseRecv(sender_key, proposal) = event { + proposal.data.view_number() == view + && da_members_for_view.contains(sender_key) + && sender_key.validate( + &proposal.signature, + proposal.data.payload_commitment.as_ref(), + ) + } else { + false } - } - } + }), + ) + .completed() + .await } - /// Returns true if we got the data we wanted, or the view has moved on. - #[instrument(skip_all, target = "DelayedRequester", fields(id = self.id, view = *req.0))] - async fn cancel_vid(&self, req: &VidRequest) -> bool { - let view = req.0; - let state = self.state.read().await; - let cancel = self.shutdown_flag.load(Ordering::Relaxed) - || state.vid_shares().contains_key(&view) - || state.cur_view() > view; + + /// Returns true if we got the data we wanted, a shutdown even was received, or the view has moved on. + async fn cancel_vid_request_task( + state: &OuterConsensus, + sender: &Sender>>, + public_key: &::SignatureKey, + view: &TYPES::Time, + shutdown_flag: &Arc, + ) -> bool { + let state = state.read().await; + + let cancel = shutdown_flag.load(Ordering::Relaxed) + || state.vid_shares().contains_key(view) + || state.cur_view() > *view; if cancel { if let Some(Some(vid_share)) = state .vid_shares() - .get(&view) - .map(|shares| shares.get(&self.public_key).cloned()) + .get(view) + .map(|shares| shares.get(public_key).cloned()) { broadcast_event( - Arc::new(HotShotEvent::VidShareRecv(vid_share.clone())), - &self.sender, + Arc::new(HotShotEvent::VidShareRecv( + public_key.clone(), + vid_share.clone(), + )), + sender, ) .await; } @@ -338,32 +352,17 @@ impl> DelayedRequester { cancel } - /// Transform a response into a `HotShotEvent` - async fn handle_response_message(&self, message: SequencingMessage) { - let event = match message { - SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg(prop)) => { - tracing::info!("vid req complete, got vid {:?}", prop); - HotShotEvent::VidShareRecv(prop) - } - _ => return, + /// Sign the serialized version of the request + fn serialize_and_sign(&self, request: &RequestKind) -> Option> { + let Ok(data) = bincode::serialize(&request) else { + tracing::error!("Failed to serialize request!"); + return None; }; - broadcast_event(Arc::new(event), &self.sender).await; - } -} - -/// Make a VID Request Message to send -fn make_vid( - req: &VidRequest, - signature: Signature, -) -> Message { - let kind = RequestKind::Vid(req.0, req.1.clone()); - let data_request = DataRequest { - view: req.0, - request: kind, - signature, - }; - Message { - sender: req.1.clone(), - kind: MessageKind::Data(DataMessage::RequestData(data_request)), + let Ok(signature) = TYPES::SignatureKey::sign(&self.private_key, &Sha256::digest(data)) + else { + tracing::error!("Failed to sign Data Request"); + return None; + }; + Some(signature) } } diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index e5d3563089..f06ec3ac58 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -6,24 +6,17 @@ use std::{sync::Arc, time::Duration}; -use async_broadcast::Receiver; +use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use futures::{FutureExt, StreamExt}; -use hotshot_task::dependency::{Dependency, EventDependency}; +use committable::Committable; use hotshot_types::{ consensus::{Consensus, LockedConsensusState, OuterConsensus}, data::VidDisperseShare, - message::{ - DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, - SequencingMessage, - }, - request_response::{NetworkMsgResponseChannel, RequestReceiver}, + message::Proposal, traits::{ - election::Membership, - network::{DataRequest, RequestKind, ResponseMessage}, - node_implementation::NodeType, + election::Membership, network::DataRequest, node_implementation::NodeType, signature_key::SignatureKey, }, }; @@ -32,7 +25,7 @@ use sha2::{Digest, Sha256}; use tokio::task::JoinHandle; use tracing::instrument; -use crate::events::HotShotEvent; +use crate::{events::HotShotEvent, helpers::broadcast_event}; /// Time to wait for txns before sending `ResponseMessage::NotFound` const TXNS_TIMEOUT: Duration = Duration::from_millis(100); @@ -42,8 +35,6 @@ const TXNS_TIMEOUT: Duration = Duration::from_millis(100); pub struct NetworkResponseState { /// Locked consensus state consensus: LockedConsensusState, - /// Receiver for requests - receiver: RequestReceiver, /// Quorum membership for checking if requesters have state quorum: Arc, /// This replicas public key @@ -58,7 +49,6 @@ impl NetworkResponseState { /// Create the network request state with the info it needs pub fn new( consensus: LockedConsensusState, - receiver: RequestReceiver, quorum: Arc, pub_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -66,7 +56,6 @@ impl NetworkResponseState { ) -> Self { Self { consensus, - receiver, quorum, pub_key, private_key, @@ -74,67 +63,71 @@ impl NetworkResponseState { } } - /// Run the request response loop until a `HotShotEvent::Shutdown` is received. - /// Or the stream is closed. - async fn run_loop(mut self, shutdown: EventDependency>>) { - let mut shutdown = Box::pin(shutdown.completed().fuse()); + /// Process request events or loop until a `HotShotEvent::Shutdown` is received. + async fn run_response_loop( + self, + mut receiver: Receiver>>, + event_sender: Sender>>, + ) { loop { - futures::select! { - req = self.receiver.next() => { - match req { - Some((msg, chan)) => self.handle_message(msg, chan).await, - None => return, - } - }, - _ = shutdown => { - return; - } - } - } - } - - /// Handle an incoming message. First validates the sender, then handles the contained request. - /// Sends the response via `chan` - async fn handle_message(&self, raw_req: Vec, chan: NetworkMsgResponseChannel>) { - let req: Message = match bincode::deserialize(&raw_req) { - Ok(deserialized) => deserialized, - Err(e) => { - tracing::error!("Failed to deserialize message! Error: {e}"); - return; - } - }; - let sender = req.sender.clone(); - - match req.kind { - MessageKind::Data(DataMessage::RequestData(request)) => { - if !self.valid_sender(&sender) || !valid_signature::(&request, &sender) { - let serialized_msg = match bincode::serialize( - &self.make_msg(ResponseMessage::Denied), - ) { - Ok(serialized) => serialized, - Err(e) => { - tracing::error!("Failed to serialize outgoing message: this should never happen. Error: {e}"); + match receiver.recv_direct().await { + Ok(event) => { + // break loop when false, this means shutdown received + match event.as_ref() { + HotShotEvent::VidRequestRecv(request, sender) => { + // Verify request is valid + if !self.valid_sender(sender) + || !valid_signature::(request, sender) + { + continue; + } + if let Some(proposal) = + self.get_or_calc_vid_share(request.view, sender).await + { + broadcast_event( + HotShotEvent::VidResponseSend( + self.pub_key.clone(), + sender.clone(), + proposal, + ) + .into(), + &event_sender, + ) + .await; + } + } + HotShotEvent::QuorumProposalRequestRecv(req, signature) => { + if let Some(quorum_proposal) = self + .consensus + .read() + .await + .last_proposals() + .get(&req.view_number) + { + // Make sure that this request came from who we think it did + if req.key.validate(signature, req.commit().as_ref()) { + broadcast_event( + HotShotEvent::QuorumProposalResponseSend( + req.key.clone(), + quorum_proposal.clone(), + ) + .into(), + &event_sender, + ) + .await; + } + } + } + HotShotEvent::Shutdown => { return; } - }; - let _ = chan.sender.send(serialized_msg); - return; - } - - let response = self.handle_request(request).await; - let serialized_response = match bincode::serialize(&response) { - Ok(serialized) => serialized, - Err(e) => { - tracing::error!("Failed to serialize outgoing message: this should never happen. Error: {e}"); - return; + _ => {} } - }; - let _ = chan.sender.send(serialized_response); + } + Err(e) => { + tracing::error!("Failed to receive event. {:?}", e); + } } - msg => tracing::error!( - "Received message that wasn't a DataRequest in the request task. Message: {:?}", - msg - ), } } @@ -192,45 +185,10 @@ impl NetworkResponseState { .cloned() } - /// Handle the request contained in the message. Returns the response we should send - /// First parses the kind and passes to the appropriate handler for the specific type - /// of the request. - async fn handle_request(&self, req: DataRequest) -> Message { - match req.request { - RequestKind::Vid(view, pub_key) => { - let Some(share) = self.get_or_calc_vid_share(view, &pub_key).await else { - return self.make_msg(ResponseMessage::NotFound); - }; - let seq_msg = SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg(share)); - self.make_msg(ResponseMessage::Found(seq_msg)) - } - // TODO impl for DA Proposal: https://github.com/EspressoSystems/HotShot/issues/2651 - RequestKind::DaProposal(_view) => self.make_msg(ResponseMessage::NotFound), - RequestKind::Proposal(view) => self.make_msg(self.respond_with_proposal(view).await), - } - } - - /// Helper to turn a `ResponseMessage` into a `Message` by filling - /// in the surrounding fields and creating the `MessageKind` - fn make_msg(&self, msg: ResponseMessage) -> Message { - Message { - sender: self.pub_key.clone(), - kind: MessageKind::Data(DataMessage::DataResponse(msg)), - } - } /// Makes sure the sender is allowed to send a request. fn valid_sender(&self, sender: &TYPES::SignatureKey) -> bool { self.quorum.has_stake(sender) } - /// Lookup the proposal for the view and respond if it's found/not found - async fn respond_with_proposal(&self, view: TYPES::Time) -> ResponseMessage { - match self.consensus.read().await.last_proposals().get(&view) { - Some(prop) => ResponseMessage::Found(SequencingMessage::General( - GeneralConsensusMessage::Proposal(prop.clone()), - )), - None => ResponseMessage::NotFound, - } - } } /// Check the signature @@ -250,10 +208,7 @@ fn valid_signature( pub fn run_response_task( task_state: NetworkResponseState, event_stream: Receiver>>, + sender: Sender>>, ) -> JoinHandle<()> { - let dep = EventDependency::new( - event_stream, - Box::new(|e| matches!(e.as_ref(), HotShotEvent::Shutdown)), - ); - async_spawn(task_state.run_loop(dep)) + async_spawn(task_state.run_response_loop(event_stream, sender)) } diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index a2efcb5050..ad27388433 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -97,10 +97,10 @@ async fn test_consensus_task() { random![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), ], serial![ - VidShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(leaders[0], vid_share(&vids[1].0, handle.public_key())), QuorumProposalRecv(proposals[1].clone(), leaders[1]), QcFormed(either::Left(cert)), SendPayloadCommitmentAndMetadata( @@ -181,7 +181,7 @@ async fn test_consensus_vote() { let inputs = vec![random![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), QuorumVoteRecv(votes[0].clone()), ]]; @@ -295,13 +295,19 @@ async fn test_view_sync_finalize_propose() { .unwrap(); let inputs = vec![ - serial![VidShareRecv(vid_share(&vids[0].0, handle.public_key()))], + serial![VidShareRecv( + leaders[0], + vid_share(&vids[0].0, handle.public_key()) + )], random![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), ], serial![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], - serial![VidShareRecv(vid_share(&vids[1].0, handle.public_key()))], + serial![VidShareRecv( + leaders[0], + vid_share(&vids[1].0, handle.public_key()) + )], random![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), TimeoutVoteRecv(timeout_vote_view_2), @@ -412,7 +418,10 @@ async fn test_view_sync_finalize_vote() { }; let inputs = vec![ - serial![VidShareRecv(vid_share(&vids[0].0, handle.public_key()))], + serial![VidShareRecv( + leaders[0], + vid_share(&vids[0].0, handle.public_key()) + )], random![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), @@ -525,7 +534,10 @@ async fn test_view_sync_finalize_vote_fail_view_number() { QuorumProposalRecv(good_proposal, leaders[0]), DaCertificateRecv(dacs[0].clone()), ], - serial![VidShareRecv(vid_share(&vids[0].0, handle.public_key()))], + serial![VidShareRecv( + leaders[0], + vid_share(&vids[0].0, handle.public_key()) + )], serial![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], random![ ViewSyncFinalizeCertificate2Recv(cert), @@ -591,7 +603,7 @@ async fn test_vid_disperse_storage_failure() { let inputs = vec![random![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), DaCertificateRecv(dacs[0].clone()), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), ]]; let expectations = vec![Expectations::from_outputs(all_predicates![ diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index e11ac16e5a..72fb7d97e6 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -52,9 +52,11 @@ async fn test_quorum_vote_task_success() { let mut leaves = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); + let mut leaders = Vec::new(); let consensus = handle.hotshot.consensus().clone(); let mut consensus_writer = consensus.write().await; for view in (&mut generator).take(2).collect::>().await { + leaders.push(view.leader_public_key); proposals.push(view.quorum_proposal.clone()); leaves.push(view.leaf.clone()); dacs.push(view.da_certificate.clone()); @@ -76,7 +78,7 @@ async fn test_quorum_vote_task_success() { let inputs = vec![random![ QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), DaCertificateRecv(dacs[1].clone()), - VidShareRecv(vids[1].0[0].clone()), + VidShareRecv(leaders[1], vids[1].0[0].clone()), ]]; let expectations = vec![Expectations::from_outputs(all_predicates![ @@ -150,7 +152,7 @@ async fn test_quorum_vote_task_miss_dependency() { let inputs = vec![ random![ QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), - VidShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(leaders[1], vid_share(&vids[1].0, handle.public_key())), ], random![ QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), @@ -158,7 +160,7 @@ async fn test_quorum_vote_task_miss_dependency() { ], random![ DaCertificateRecv(dacs[3].clone()), - VidShareRecv(vid_share(&vids[3].0, handle.public_key())), + VidShareRecv(leaders[3], vid_share(&vids[3].0, handle.public_key())), ], ]; @@ -211,7 +213,9 @@ async fn test_quorum_vote_task_incorrect_dependency() { let mut leaves = Vec::new(); let mut dacs = Vec::new(); let mut vids = Vec::new(); + let mut leaders = Vec::new(); for view in (&mut generator).take(2).collect::>().await { + leaders.push(view.leader_public_key); proposals.push(view.quorum_proposal.clone()); leaves.push(view.leaf.clone()); dacs.push(view.da_certificate.clone()); @@ -222,7 +226,7 @@ async fn test_quorum_vote_task_incorrect_dependency() { let inputs = vec![random![ QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), DaCertificateRecv(dacs[1].clone()), - VidShareRecv(vids[0].0[0].clone()), + VidShareRecv(leaders[0], vids[0].0[0].clone()), ]]; let expectations = vec![Expectations::from_outputs(all_predicates![ diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs index fe1d9279a9..e226726246 100644 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -98,28 +98,28 @@ async fn test_upgrade_task_vote() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), DaCertificateRecv(dacs[0].clone()), ], vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VidShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(leaders[1], vid_share(&vids[1].0, handle.public_key())), DaCertificateRecv(dacs[1].clone()), ], vec![ QuorumProposalRecv(proposals[2].clone(), leaders[2]), DaCertificateRecv(dacs[2].clone()), - VidShareRecv(vid_share(&vids[2].0, handle.public_key())), + VidShareRecv(leaders[2], vid_share(&vids[2].0, handle.public_key())), ], vec![ QuorumProposalRecv(proposals[3].clone(), leaders[3]), DaCertificateRecv(dacs[3].clone()), - VidShareRecv(vid_share(&vids[3].0, handle.public_key())), + VidShareRecv(leaders[3], vid_share(&vids[3].0, handle.public_key())), ], vec![ QuorumProposalRecv(proposals[4].clone(), leaders[4]), DaCertificateRecv(dacs[4].clone()), - VidShareRecv(vid_share(&vids[4].0, handle.public_key())), + VidShareRecv(leaders[4], vid_share(&vids[4].0, handle.public_key())), ], vec![QuorumProposalRecv(proposals[5].clone(), leaders[5])], ]; @@ -277,17 +277,17 @@ async fn test_upgrade_task_propose() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), DaCertificateRecv(dacs[0].clone()), ], upgrade_vote_recvs, vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), DaCertificateRecv(dacs[1].clone()), - VidShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(leaders[1], vid_share(&vids[1].0, handle.public_key())), ], vec![ - VidShareRecv(vid_share(&vids[2].0, handle.public_key())), + VidShareRecv(leaders[2], vid_share(&vids[2].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, proposals[2].data.block_header.builder_commitment.clone(), @@ -479,12 +479,12 @@ async fn test_upgrade_task_blank_blocks() { let inputs = vec![ vec![ QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidShareRecv(vid_share(&vids[0].0, handle.public_key())), + VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), DaCertificateRecv(dacs[0].clone()), ], vec![ QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VidShareRecv(vid_share(&vids[1].0, handle.public_key())), + VidShareRecv(leaders[1], vid_share(&vids[1].0, handle.public_key())), DaCertificateRecv(dacs[1].clone()), SendPayloadCommitmentAndMetadata( vids[1].0[0].data.payload_commitment, @@ -497,7 +497,7 @@ async fn test_upgrade_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[2].clone()), - VidShareRecv(vid_share(&vids[2].0, handle.public_key())), + VidShareRecv(leaders[2], vid_share(&vids[2].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[2].0[0].data.payload_commitment, proposals[2].data.block_header.builder_commitment.clone(), @@ -510,7 +510,7 @@ async fn test_upgrade_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[3].clone()), - VidShareRecv(vid_share(&vids[3].0, handle.public_key())), + VidShareRecv(leaders[3], vid_share(&vids[3].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[3].0[0].data.payload_commitment, proposals[3].data.block_header.builder_commitment.clone(), @@ -523,7 +523,7 @@ async fn test_upgrade_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[4].clone()), - VidShareRecv(vid_share(&vids[4].0, handle.public_key())), + VidShareRecv(leaders[4], vid_share(&vids[4].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[4].0[0].data.payload_commitment, proposals[4].data.block_header.builder_commitment.clone(), @@ -536,7 +536,7 @@ async fn test_upgrade_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[5].clone()), - VidShareRecv(vid_share(&vids[5].0, handle.public_key())), + VidShareRecv(leaders[5], vid_share(&vids[5].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[5].0[0].data.payload_commitment, proposals[5].data.block_header.builder_commitment.clone(), @@ -550,7 +550,7 @@ async fn test_upgrade_task_blank_blocks() { ], vec![ DaCertificateRecv(dacs[6].clone()), - VidShareRecv(vid_share(&vids[6].0, handle.public_key())), + VidShareRecv(leaders[6], vid_share(&vids[6].0, handle.public_key())), SendPayloadCommitmentAndMetadata( vids[6].0[0].data.payload_commitment, proposals[6].data.block_header.builder_commitment.clone(), diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 71b2719c27..49805d53d5 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -113,22 +113,22 @@ async fn test_upgrade_task_with_vote() { random![ QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), DaCertificateRecv(dacs[1].clone()), - VidShareRecv(vids[1].0[0].clone()), + VidShareRecv(leaders[1], vids[1].0[0].clone()), ], random![ QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), DaCertificateRecv(dacs[2].clone()), - VidShareRecv(vids[2].0[0].clone()), + VidShareRecv(leaders[2], vids[2].0[0].clone()), ], random![ QuorumProposalValidated(proposals[3].data.clone(), leaves[2].clone()), DaCertificateRecv(dacs[3].clone()), - VidShareRecv(vids[3].0[0].clone()), + VidShareRecv(leaders[3], vids[3].0[0].clone()), ], random![ QuorumProposalValidated(proposals[4].data.clone(), leaves[3].clone()), DaCertificateRecv(dacs[4].clone()), - VidShareRecv(vids[4].0[0].clone()), + VidShareRecv(leaders[4], vids[4].0[0].clone()), ], random![QuorumProposalValidated( proposals[5].data.clone(), From 8756d0136c5e11c3a98509900cedf4aef6ca6052 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 27 Sep 2024 13:46:18 -0400 Subject: [PATCH 1228/1393] fix orchestrator manual start (#3707) --- orchestrator/src/lib.rs | 2 +- task-impls/src/consensus2/mod.rs | 10 +++++----- types/src/vid.rs | 3 ++- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 397f797734..7a3dc5c218 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -504,7 +504,6 @@ where }); } - self.manual_start_allowed = false; Ok(self.config.clone()) } @@ -595,6 +594,7 @@ where self.accepting_new_keys = false; self.manual_start_allowed = false; self.peer_pub_ready = true; + self.start = true; Ok(()) } diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index 3e7511fe8f..dca4019d26 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -6,11 +6,6 @@ use std::sync::Arc; -use self::handlers::{ - handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, -}; -use crate::helpers::broadcast_event; -use crate::{events::HotShotEvent, vote_collection::VoteCollectorsMap}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; @@ -34,6 +29,11 @@ use hotshot_types::{ use tokio::task::JoinHandle; use tracing::instrument; +use self::handlers::{ + handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, +}; +use crate::{events::HotShotEvent, helpers::broadcast_event, vote_collection::VoteCollectorsMap}; + /// Event handlers for use in the `handle` method. mod handlers; diff --git a/types/src/vid.rs b/types/src/vid.rs index 07f5b2cb1d..5c2fd2cead 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -35,7 +35,8 @@ use serde::{Deserialize, Serialize}; use sha2::Sha256; use crate::{ - constants::SRS_DEGREE, data::VidDisperse as HotShotVidDisperse, data::VidDisperseShare, + constants::SRS_DEGREE, + data::{VidDisperse as HotShotVidDisperse, VidDisperseShare}, message::Proposal, }; From 558671547b196b87a845c68717579af6c9a82840 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 27 Sep 2024 16:04:32 -0400 Subject: [PATCH 1229/1393] break task loops when channel closes (#3709) --- hotshot/src/tasks/mod.rs | 5 ++++- task/src/task.rs | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index e875dd2c48..0e7be118f8 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -10,7 +10,7 @@ pub mod task_state; use std::{fmt::Debug, sync::Arc}; -use async_broadcast::broadcast; +use async_broadcast::{broadcast, RecvError}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; @@ -261,6 +261,9 @@ pub fn create_shutdown_event_monitor { + return; + } Err(e) => { tracing::error!("Shutdown event monitor channel recv error: {}", e); } diff --git a/task/src/task.rs b/task/src/task.rs index af195d1e27..c623c5e43a 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -7,7 +7,7 @@ use std::sync::Arc; use anyhow::Result; -use async_broadcast::{Receiver, Sender}; +use async_broadcast::{Receiver, RecvError, Sender}; #[cfg(async_executor_impl = "async-std")] use async_std::task::{spawn, JoinHandle}; use async_trait::async_trait; @@ -93,6 +93,9 @@ impl Task { .await .inspect_err(|e| tracing::info!("{e}")); } + Err(RecvError::Closed) => { + break self.boxed_state(); + } Err(e) => { tracing::error!("Failed to receive from event stream Error: {}", e); } From 6d428e4e8b86af30e6aa0e4e45303d582a7b3ed0 Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Fri, 27 Sep 2024 20:24:41 -0400 Subject: [PATCH 1230/1393] [CLEANUP] - Remove `QuorumProposalRequestRecv` from Consensus Tasks (#3711) * cleanup * clippy --- task-impls/src/consensus/mod.rs | 26 -------------------------- task-impls/src/consensus2/mod.rs | 28 +--------------------------- task-impls/src/request.rs | 6 ++++++ task-impls/src/response.rs | 25 ++++++++++++++----------- 4 files changed, 21 insertions(+), 64 deletions(-) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 313e3ed331..b8fad36a08 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -13,7 +13,6 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use async_trait::async_trait; -use committable::Committable; use futures::future::join_all; use handlers::publish_proposal_from_commitment_and_metadata; use hotshot_task::task::TaskState; @@ -310,31 +309,6 @@ impl, V: Versions> ConsensusTaskSt warn!("Failed to handle QuorumProposalValidated event {e:#}"); } } - HotShotEvent::QuorumProposalRequestRecv(req, signature) => { - // Make sure that this request came from who we think it did - if !req.key.validate(signature, req.commit().as_ref()) { - warn!("Invalid signature key on proposal request."); - return; - } - - if let Some(quorum_proposal) = self - .consensus - .read() - .await - .last_proposals() - .get(&req.view_number) - { - broadcast_event( - HotShotEvent::QuorumProposalResponseSend( - req.key.clone(), - quorum_proposal.clone(), - ) - .into(), - &event_sender, - ) - .await; - } - } HotShotEvent::QuorumVoteRecv(ref vote) => { debug!("Received quorum vote: {:?}", vote.view_number()); if self.quorum_membership.leader(vote.view_number() + 1) != self.public_key { diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus2/mod.rs index dca4019d26..7b139842f9 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus2/mod.rs @@ -12,7 +12,6 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use async_trait::async_trait; -use committable::Committable; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, @@ -32,7 +31,7 @@ use tracing::instrument; use self::handlers::{ handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, }; -use crate::{events::HotShotEvent, helpers::broadcast_event, vote_collection::VoteCollectorsMap}; +use crate::{events::HotShotEvent, vote_collection::VoteCollectorsMap}; /// Event handlers for use in the `handle` method. mod handlers; @@ -113,31 +112,6 @@ impl, V: Versions> Consensus2TaskS tracing::debug!("Failed to handle QuorumVoteRecv event; error = {e}"); } } - HotShotEvent::QuorumProposalRequestRecv(req, signature) => { - // Make sure that this request came from who we think it did - if !req.key.validate(signature, req.commit().as_ref()) { - tracing::warn!("Invalid signature key on proposal request."); - return; - } - - if let Some(quorum_proposal) = self - .consensus - .read() - .await - .last_proposals() - .get(&req.view_number) - { - broadcast_event( - HotShotEvent::QuorumProposalResponseSend( - req.key.clone(), - quorum_proposal.clone(), - ) - .into(), - &sender, - ) - .await; - } - } HotShotEvent::TimeoutVoteRecv(ref vote) => { if let Err(e) = handle_timeout_vote_recv(vote, Arc::clone(&event), &sender, self).await diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 22fa5ab4ec..4ea5966fec 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -214,6 +214,11 @@ impl> NetworkRequestState> NetworkRequestState NetworkResponseState { } } HotShotEvent::QuorumProposalRequestRecv(req, signature) => { + // Make sure that this request came from who we think it did + if !req.key.validate(signature, req.commit().as_ref()) { + tracing::warn!("Invalid signature key on proposal request."); + return; + } + if let Some(quorum_proposal) = self .consensus .read() @@ -104,18 +110,15 @@ impl NetworkResponseState { .last_proposals() .get(&req.view_number) { - // Make sure that this request came from who we think it did - if req.key.validate(signature, req.commit().as_ref()) { - broadcast_event( - HotShotEvent::QuorumProposalResponseSend( - req.key.clone(), - quorum_proposal.clone(), - ) - .into(), - &event_sender, + broadcast_event( + HotShotEvent::QuorumProposalResponseSend( + req.key.clone(), + quorum_proposal.clone(), ) - .await; - } + .into(), + &event_sender, + ) + .await; } } HotShotEvent::Shutdown => { From 4a3451ab38c1399319107967a65eca878e8fa3b6 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 1 Oct 2024 09:44:20 -0400 Subject: [PATCH 1231/1393] [Libp2p] Use native Libp2p multiaddresses (#3714) * native Libp2p multiaddresses * `String` to reference * format * make orchestrator client only take the URL * add tests * lints * `address` -> `advertise_address` * lint * format * namespace tests --- examples/combined/all.rs | 2 +- examples/combined/validator.rs | 8 +- examples/infra/mod.rs | 36 +++-- examples/libp2p/all.rs | 2 +- examples/libp2p/validator.rs | 8 +- examples/push-cdn/whitelist-adapter.rs | 14 +- hotshot/src/traits.rs | 4 +- .../src/traits/networking/libp2p_network.rs | 147 ++++++++++++++++-- orchestrator/src/client.rs | 38 +---- orchestrator/src/config.rs | 11 +- 10 files changed, 185 insertions(+), 85 deletions(-) diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 4ca1e46f3f..d6edbf3253 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -151,7 +151,7 @@ async fn main() { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, - advertise_address: Some(advertise_address), + advertise_address: Some(advertise_address.to_string()), builder_address: Some(builder_address), network_config_file: None, }, diff --git a/examples/combined/validator.rs b/examples/combined/validator.rs index a73c80f70d..5007181adc 100644 --- a/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -5,7 +5,6 @@ // along with the HotShot repository. If not, see . //! A validator using both the web server and libp2p -use std::{net::SocketAddr, str::FromStr}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; @@ -34,12 +33,7 @@ async fn main() { // If we did not set the advertise address, use our local IP and port 8000 let local_ip = local_ip().expect("failed to get local IP"); - args.advertise_address = Some( - args.advertise_address.unwrap_or( - SocketAddr::from_str(&format!("{local_ip}:8000")) - .expect("failed to convert local IP to socket address"), - ), - ); + args.advertise_address = Some(args.advertise_address.unwrap_or(format!("{local_ip}:8000"))); debug!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::(args).await; diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 26caa3dd1b..480b00445f 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -9,7 +9,7 @@ use std::{ collections::HashMap, fmt::Debug, fs, - net::{IpAddr, Ipv4Addr, SocketAddr}, + net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4}, num::NonZeroUsize, sync::Arc, time::{Duration, Instant}, @@ -27,8 +27,9 @@ use futures::StreamExt; use hotshot::{ traits::{ implementations::{ - derive_libp2p_peer_id, CdnMetricsValue, CdnTopic, CombinedNetworks, Libp2pMetricsValue, - Libp2pNetwork, PushCdnNetwork, WrappedSignatureKey, + derive_libp2p_multiaddr, derive_libp2p_peer_id, CdnMetricsValue, CdnTopic, + CombinedNetworks, Libp2pMetricsValue, Libp2pNetwork, PushCdnNetwork, + WrappedSignatureKey, }, BlockPayload, NodeImplementation, }, @@ -359,7 +360,7 @@ pub trait RunDa< /// Initializes networking, returns self async fn initialize_networking( config: NetworkConfig, - libp2p_advertise_address: Option, + libp2p_advertise_address: Option, ) -> Self; /// Initializes the genesis state and HotShot instance; does not start HotShot consensus @@ -633,7 +634,7 @@ where { async fn initialize_networking( config: NetworkConfig, - _libp2p_advertise_address: Option, + _libp2p_advertise_address: Option, ) -> PushCdnDaRun { // Get our own key let key = config.config.my_own_validator_config.clone(); @@ -711,7 +712,7 @@ where { async fn initialize_networking( config: NetworkConfig, - libp2p_advertise_address: Option, + libp2p_advertise_address: Option, ) -> Libp2pDaRun { // Extrapolate keys for ease of use let keys = config.clone().config.my_own_validator_config; @@ -721,11 +722,16 @@ where // In an example, we can calculate the libp2p bind address as a function // of the advertise address. let bind_address = if let Some(libp2p_advertise_address) = libp2p_advertise_address { + let libp2p_advertise_address: SocketAddrV4 = libp2p_advertise_address + .parse() + .expect("failed to parse advertise address"); + // If we have supplied one, use it SocketAddr::new( IpAddr::V4(Ipv4Addr::UNSPECIFIED), libp2p_advertise_address.port(), ) + .to_string() } else { // If not, index a base port with our node index SocketAddr::new( @@ -733,8 +739,13 @@ where 8000 + (u16::try_from(config.node_index) .expect("failed to create advertise address")), ) + .to_string() }; + // Derive the bind address + let bind_address = + derive_libp2p_multiaddr(&bind_address).expect("failed to derive bind address"); + // Create the Libp2p network let libp2p_network = Libp2pNetwork::from_config::( config.clone(), @@ -799,7 +810,7 @@ where { async fn initialize_networking( config: NetworkConfig, - libp2p_advertise_address: Option, + libp2p_advertise_address: Option, ) -> CombinedDaRun { // Initialize our Libp2p network let libp2p_network: Libp2pDaRun = @@ -808,7 +819,7 @@ where Libp2pNetwork, Libp2pImpl, V, - >>::initialize_networking(config.clone(), libp2p_advertise_address) + >>::initialize_networking(config.clone(), libp2p_advertise_address.clone()) .await; // Initialize our CDN network @@ -874,7 +885,7 @@ pub async fn main_entry_point< info!("Starting validator"); - let orchestrator_client: OrchestratorClient = OrchestratorClient::new(args.clone()); + let orchestrator_client: OrchestratorClient = OrchestratorClient::new(args.url.clone()); // We assume one node will not call this twice to generate two validator_config-s with same identity. let my_own_validator_config = @@ -895,6 +906,11 @@ pub async fn main_entry_point< PeerConfig::::to_bytes(&my_own_validator_config.public_config()) .clone(); + // Derive the advertise multiaddress from the supplied string + let advertise_multiaddress = args.advertise_address.clone().map(|advertise_address| { + derive_libp2p_multiaddr(&advertise_address).expect("failed to derive Libp2p multiaddr") + }); + // conditionally save/load config from file or orchestrator // This is a function that will return correct complete config from orchestrator. // It takes in a valid args.network_config_file when loading from file, or valid validator_config when loading from orchestrator, the invalid one will be ignored. @@ -904,7 +920,7 @@ pub async fn main_entry_point< let (mut run_config, source) = NetworkConfig::::get_complete_config( &orchestrator_client, my_own_validator_config, - args.advertise_address, + advertise_multiaddress, Some(libp2p_public_key), ) .await diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index 8f0e0a75a6..7cf2101d9f 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -54,7 +54,7 @@ async fn main() { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, - advertise_address: Some(advertise_address), + advertise_address: Some(advertise_address.to_string()), builder_address: Some(builder_address), network_config_file: None, }, diff --git a/examples/libp2p/validator.rs b/examples/libp2p/validator.rs index 41a02d6eb1..1e2bb8d096 100644 --- a/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -5,7 +5,6 @@ // along with the HotShot repository. If not, see . //! A validator using libp2p -use std::{net::SocketAddr, str::FromStr}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; @@ -33,12 +32,7 @@ async fn main() { // If we did not set the advertise address, use our local IP and port 8000 let local_ip = local_ip().expect("failed to get local IP"); - args.advertise_address = Some( - args.advertise_address.unwrap_or( - SocketAddr::from_str(&format!("{local_ip}:8000")) - .expect("failed to convert local IP to socket address"), - ), - ); + args.advertise_address = Some(args.advertise_address.unwrap_or(format!("{local_ip}:8000"))); debug!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::(args).await; diff --git a/examples/push-cdn/whitelist-adapter.rs b/examples/push-cdn/whitelist-adapter.rs index 9a3d585b18..d68f618fb5 100644 --- a/examples/push-cdn/whitelist-adapter.rs +++ b/examples/push-cdn/whitelist-adapter.rs @@ -14,10 +14,7 @@ use anyhow::{Context, Result}; use cdn_broker::reexports::discovery::{DiscoveryClient, Embedded, Redis}; use clap::Parser; use hotshot_example_types::node_types::TestTypes; -use hotshot_orchestrator::{ - client::{OrchestratorClient, ValidatorArgs}, - config::NetworkConfig, -}; +use hotshot_orchestrator::{client::OrchestratorClient, config::NetworkConfig}; use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use surf_disco::Url; @@ -50,12 +47,9 @@ async fn main() -> Result<()> { tracing_subscriber::fmt::init(); // Create a new `OrchestratorClient` from the supplied URL - let orchestrator_client = OrchestratorClient::new(ValidatorArgs { - url: Url::from_str(&args.orchestrator_url).with_context(|| "Invalid URL")?, - advertise_address: None, - builder_address: None, - network_config_file: None, - }); + let orchestrator_client = OrchestratorClient::new( + Url::from_str(&args.orchestrator_url).with_context(|| "Invalid URL")?, + ); // Attempt to get the config from the orchestrator. // Loops internally until the config is received. diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index b0aa3a67e3..8667943189 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -19,8 +19,8 @@ pub mod implementations { pub use super::networking::{ combined_network::{CombinedNetworks, UnderlyingCombinedNetworks}, libp2p_network::{ - derive_libp2p_keypair, derive_libp2p_peer_id, GossipConfig, Libp2pMetricsValue, - Libp2pNetwork, PeerInfoVec, + derive_libp2p_keypair, derive_libp2p_multiaddr, derive_libp2p_peer_id, GossipConfig, + Libp2pMetricsValue, Libp2pNetwork, PeerInfoVec, }, memory_network::{MasterMap, MemoryNetwork}, push_cdn_network::{ diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index fbb5f32c67..9e3df13052 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -13,7 +13,7 @@ use std::{ cmp::min, collections::{BTreeSet, HashSet}, fmt::Debug, - net::SocketAddr, + net::{IpAddr, ToSocketAddrs}, num::NonZeroUsize, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -356,6 +356,55 @@ pub fn derive_libp2p_peer_id( Ok(PeerId::from_public_key(&keypair.public())) } +/// Parse a Libp2p Multiaddr from a string. The input string should be in the format +/// `hostname:port` or `ip:port`. This function derives a `Multiaddr` from the input string. +/// +/// This borrows from Rust's implementation of `to_socket_addrs` but will only warn if the domain +/// does not yet resolve. +/// +/// # Errors +/// - If the input string is not in the correct format +pub fn derive_libp2p_multiaddr(addr: &String) -> anyhow::Result { + // Split the address into the host and port parts + let (host, port) = match addr.rfind(':') { + Some(idx) => (&addr[..idx], &addr[idx + 1..]), + None => return Err(anyhow!("Invalid address format, no port supplied")), + }; + + // Try parsing the host as an IP address + let ip = host.parse::(); + + // Conditionally build the multiaddr string + let multiaddr_string = match ip { + Ok(IpAddr::V4(ip)) => format!("/ip4/{ip}/udp/{port}/quic-v1"), + Ok(IpAddr::V6(ip)) => format!("/ip6/{ip}/udp/{port}/quic-v1"), + Err(_) => { + // Try resolving the host. If it fails, continue but warn the user + let lookup_result = addr.to_socket_addrs(); + + // See if the lookup failed + let failed = lookup_result + .map(|result| result.collect::>().is_empty()) + .unwrap_or(true); + + // If it did, warn the user + if failed { + warn!( + "Failed to resolve domain name {}, assuming it has not yet been provisioned", + host + ); + } + + format!("/dns/{host}/udp/{port}/quic-v1") + } + }; + + // Convert the multiaddr string to a `Multiaddr` + multiaddr_string.parse().with_context(|| { + format!("Failed to convert Multiaddr string to Multiaddr: {multiaddr_string}",) + }) +} + impl Libp2pNetwork { /// Create and return a Libp2p network from a network config file /// and various other configuration-specific values. @@ -368,7 +417,7 @@ impl Libp2pNetwork { pub async fn from_config( mut config: NetworkConfig, gossip_config: GossipConfig, - bind_address: SocketAddr, + bind_address: Multiaddr, pub_key: &K, priv_key: &K::PrivateKey, metrics: Libp2pMetricsValue, @@ -382,15 +431,6 @@ impl Libp2pNetwork { // Derive our Libp2p keypair from our supplied private key let keypair = derive_libp2p_keypair::(priv_key)?; - // Convert our bind address to a `Multiaddr` - let bind_address: Multiaddr = format!( - "/{}/{}/udp/{}/quic-v1", - if bind_address.is_ipv4() { "ip4" } else { "ip6" }, - bind_address.ip(), - bind_address.port() - ) - .parse()?; - // Build our libp2p configuration let mut config_builder = NetworkNodeConfigBuilder::default(); @@ -1101,3 +1141,88 @@ impl ConnectedNetwork for Libp2pNetwork { .map_err(|err| tracing::warn!("failed to process node lookup request: {err}")); } } + +#[cfg(test)] +mod test { + mod derive_multiaddr { + use super::super::*; + use std::net::Ipv6Addr; + + /// Test derivation of a valid IPv4 address -> Multiaddr + #[test] + fn test_v4_valid() { + // Derive a multiaddr from a valid IPv4 address + let addr = "1.1.1.1:8080".to_string(); + let multiaddr = + derive_libp2p_multiaddr(&addr).expect("Failed to derive valid multiaddr, {}"); + + // Make sure it's the correct (quic) multiaddr + assert_eq!(multiaddr.to_string(), "/ip4/1.1.1.1/udp/8080/quic-v1"); + } + + /// Test derivation of a valid IPv6 address -> Multiaddr + #[test] + fn test_v6_valid() { + // Derive a multiaddr from a valid IPv6 address + let ipv6_addr = Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8); + let addr = format!("{ipv6_addr}:8080"); + let multiaddr = + derive_libp2p_multiaddr(&addr).expect("Failed to derive valid multiaddr, {}"); + + // Make sure it's the correct (quic) multiaddr + assert_eq!( + multiaddr.to_string(), + format!("/ip6/{ipv6_addr}/udp/8080/quic-v1") + ); + } + + /// Test that an invalid address fails to derive to a Multiaddr + #[test] + fn test_no_port() { + // Derive a multiaddr from an invalid port + let addr = "1.1.1.1".to_string(); + let multiaddr = derive_libp2p_multiaddr(&addr); + + // Make sure it fails + assert!(multiaddr.is_err()); + } + + /// Test that an existing domain name resolves to a Multiaddr + #[test] + fn test_fqdn_exists() { + // Derive a multiaddr from a valid FQDN + let addr = "example.com:8080".to_string(); + let multiaddr = + derive_libp2p_multiaddr(&addr).expect("Failed to derive valid multiaddr, {}"); + + // Make sure it's the correct (quic) multiaddr + assert_eq!(multiaddr.to_string(), "/dns/example.com/udp/8080/quic-v1"); + } + + /// Test that a non-existent domain name still resolves to a Multiaddr + #[test] + fn test_fqdn_does_not_exist() { + // Derive a multiaddr from an invalid FQDN + let addr = "libp2p.example.com:8080".to_string(); + let multiaddr = + derive_libp2p_multiaddr(&addr).expect("Failed to derive valid multiaddr, {}"); + + // Make sure it still worked + assert_eq!( + multiaddr.to_string(), + "/dns/libp2p.example.com/udp/8080/quic-v1" + ); + } + + /// Test that a domain name without a port fails to derive to a Multiaddr + #[test] + fn test_fqdn_no_port() { + // Derive a multiaddr from an invalid port + let addr = "example.com".to_string(); + let multiaddr = derive_libp2p_multiaddr(&addr); + + // Make sure it fails + assert!(multiaddr.is_err()); + } + } +} diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index b4a2e6f898..bb6eeac585 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -129,7 +129,7 @@ pub struct ValidatorArgs { /// The address the orchestrator runs on pub url: Url, /// The optional advertise address to use for Libp2p - pub advertise_address: Option, + pub advertise_address: Option, /// Optional address to run builder on. Address must be accessible by other nodes pub builder_address: Option, /// An optional network config file to save to/load from @@ -146,7 +146,7 @@ pub struct MultiValidatorArgs { /// The address the orchestrator runs on pub url: Url, /// The optional advertise address to use for Libp2p - pub advertise_address: Option, + pub advertise_address: Option, /// An optional network config file to save to/load from /// Allows for rejoining the network on a complete state loss #[arg(short, long)] @@ -193,8 +193,8 @@ impl ValidatorArgs { impl OrchestratorClient { /// Creates the client that will connect to the orchestrator #[must_use] - pub fn new(args: ValidatorArgs) -> Self { - let client = surf_disco::Client::::new(args.url); + pub fn new(url: Url) -> Self { + let client = surf_disco::Client::::new(url); // TODO ED: Add healthcheck wait here OrchestratorClient { client } } @@ -212,23 +212,12 @@ impl OrchestratorClient { #[allow(clippy::type_complexity)] pub async fn get_config_without_peer( &self, - libp2p_address: Option, + libp2p_advertise_address: Option, libp2p_public_key: Option, ) -> anyhow::Result> { - // Get the (possible) Libp2p advertise address from our args - let libp2p_address = libp2p_address.map(|f| { - Multiaddr::try_from(format!( - "/{}/{}/udp/{}/quic-v1", - if f.is_ipv4() { "ip4" } else { "ip6" }, - f.ip(), - f.port() - )) - .expect("failed to create multiaddress") - }); - // Serialize our (possible) libp2p-specific data let request_body = vbs::Serializer::::serialize(&( - libp2p_address, + libp2p_advertise_address, libp2p_public_key, ))?; @@ -370,27 +359,16 @@ impl OrchestratorClient { pub async fn post_and_wait_all_public_keys( &self, mut validator_config: ValidatorConfig, - libp2p_address: Option, + libp2p_advertise_address: Option, libp2p_public_key: Option, ) -> NetworkConfig { - // Get the (possible) Libp2p advertise address from our args - let libp2p_address: Option = libp2p_address.map(|f| { - Multiaddr::try_from(format!( - "/{}/{}/udp/{}/quic-v1", - if f.is_ipv4() { "ip4" } else { "ip6" }, - f.ip(), - f.port() - )) - .expect("failed to create multiaddress") - }); - let pubkey: Vec = PeerConfig::::to_bytes(&validator_config.public_config()).clone(); let da_requested: bool = validator_config.is_da; // Serialize our (possible) libp2p-specific data let request_body = vbs::Serializer::::serialize(&( pubkey, - libp2p_address, + libp2p_advertise_address, libp2p_public_key, )) .expect("failed to serialize request"); diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs index 6316f1c9cb..81bd03cf9d 100644 --- a/orchestrator/src/config.rs +++ b/orchestrator/src/config.rs @@ -6,7 +6,6 @@ use std::{ env, fs, - net::SocketAddr, num::NonZeroUsize, ops::Range, path::{Path, PathBuf}, @@ -210,7 +209,7 @@ impl NetworkConfig { pub async fn from_file_or_orchestrator( client: &OrchestratorClient, file: Option, - libp2p_address: Option, + libp2p_advertise_address: Option, libp2p_public_key: Option, ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { if let Some(file) = file { @@ -223,7 +222,7 @@ impl NetworkConfig { error!("{e}, falling back to orchestrator"); let config = client - .get_config_without_peer(libp2p_address, libp2p_public_key) + .get_config_without_peer(libp2p_advertise_address, libp2p_public_key) .await?; // save to file if we fell back @@ -240,7 +239,7 @@ impl NetworkConfig { // otherwise just get from orchestrator Ok(( client - .get_config_without_peer(libp2p_address, libp2p_public_key) + .get_config_without_peer(libp2p_advertise_address, libp2p_public_key) .await?, NetworkConfigSource::Orchestrator, )) @@ -266,14 +265,14 @@ impl NetworkConfig { pub async fn get_complete_config( client: &OrchestratorClient, my_own_validator_config: ValidatorConfig, - libp2p_address: Option, + libp2p_advertise_address: Option, libp2p_public_key: Option, ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { // get the configuration from the orchestrator let run_config: NetworkConfig = client .post_and_wait_all_public_keys::( my_own_validator_config, - libp2p_address, + libp2p_advertise_address, libp2p_public_key, ) .await; From d96544979f9e400ff02948a267fbbdc5a315e26a Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 1 Oct 2024 13:45:08 -0400 Subject: [PATCH 1232/1393] Randomize leader election (#3693) --- example-types/src/node_types.rs | 33 +- examples/Cargo.toml | 1 - examples/infra/mod.rs | 33 +- hotshot/Cargo.toml | 2 - hotshot/src/traits/election.rs | 2 + .../traits/election/randomized_committee.rs | 168 +++++++ .../src/traits/election/static_committee.rs | 60 +-- .../static_committee_leader_two_views.rs | 23 +- .../src/traits/networking/libp2p_network.rs | 3 +- orchestrator/Cargo.toml | 4 - orchestrator/src/client.rs | 7 +- orchestrator/src/lib.rs | 23 +- testing/src/helpers.rs | 10 +- testing/src/test_runner.rs | 10 +- testing/tests/tests_1/network_task.rs | 18 +- testing/tests/tests_1/test_success.rs | 10 +- testing/tests/tests_2/catchup.rs | 458 +++++++----------- testing/tests/tests_3/memory_network.rs | 4 +- types/src/traits/election.rs | 4 +- 19 files changed, 461 insertions(+), 412 deletions(-) create mode 100644 hotshot/src/traits/election/randomized_committee.rs diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index ec29f2a3c6..a8f06976c3 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -6,7 +6,7 @@ use hotshot::traits::{ election::{ - static_committee::{GeneralStaticCommittee, StaticCommittee}, + randomized_committee::RandomizedCommittee, static_committee::StaticCommittee, static_committee_leader_two_views::StaticCommitteeLeaderForTwoViews, }, implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork}, @@ -52,7 +52,36 @@ impl NodeType for TestTypes { type Transaction = TestTransaction; type ValidatedState = TestValidatedState; type InstanceState = TestInstanceState; - type Membership = GeneralStaticCommittee; + type Membership = StaticCommittee; + type BuilderSignatureKey = BuilderKey; +} + +#[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, +)] +/// filler struct to implement node type and allow us +/// to select our traits +pub struct TestTypesRandomizedLeader; +impl NodeType for TestTypesRandomizedLeader { + type AuctionResult = TestAuctionResult; + type Time = ViewNumber; + type BlockHeader = TestBlockHeader; + type BlockPayload = TestBlockPayload; + type SignatureKey = BLSPubKey; + type Transaction = TestTransaction; + type ValidatedState = TestValidatedState; + type InstanceState = TestInstanceState; + type Membership = RandomizedCommittee; type BuilderSignatureKey = BuilderKey; } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b004023a27..80c0423510 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -18,7 +18,6 @@ bin-orchestrator = ["clap"] docs = [] doc-images = [] hotshot-testing = ["hotshot/hotshot-testing"] -randomized-leader-election = [] fixed-leader-election = [] # Common diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 480b00445f..056eb92782 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -381,7 +381,16 @@ pub trait RunDa< let network = self.network(); - let all_nodes = config.config.known_nodes_with_stake.clone(); + let all_nodes = if cfg!(feature = "fixed-leader-election") { + let mut vec = config.config.known_nodes_with_stake.clone(); + + vec.truncate(config.config.fixed_leader_for_gpuvid); + + vec + } else { + config.config.known_nodes_with_stake.clone() + }; + let da_nodes = config.config.known_da_nodes.clone(); // Create the quorum membership from all nodes @@ -389,19 +398,12 @@ pub trait RunDa< all_nodes.clone(), all_nodes.clone(), Topic::Global, - #[cfg(feature = "fixed-leader-election")] - config.config.fixed_leader_for_gpuvid, ); // Create the quorum membership from all nodes, specifying the committee // as the known da nodes - let da_membership = ::Membership::new( - all_nodes.clone(), - da_nodes, - Topic::Da, - #[cfg(feature = "fixed-leader-election")] - config.config.fixed_leader_for_gpuvid, - ); + let da_membership = + ::Membership::new(all_nodes.clone(), da_nodes, Topic::Da); let memberships = Memberships { quorum_membership: quorum_membership.clone(), @@ -557,6 +559,12 @@ pub trait RunDa< } let consensus_lock = context.hotshot.consensus(); let consensus = consensus_lock.read().await; + let num_eligible_leaders = context + .hotshot + .memberships + .quorum_membership + .committee_leaders(TYPES::Time::genesis()) + .len(); let total_num_views = usize::try_from(consensus.locked_view().u64()).unwrap(); // `failed_num_views` could include uncommitted views let failed_num_views = total_num_views - num_successful_commits; @@ -574,6 +582,7 @@ pub trait RunDa< / total_time_elapsed_sec; let avg_latency_in_sec = total_latency / num_latency; println!("[{node_index}]: throughput: {throughput_bytes_per_sec} bytes/sec, avg_latency: {avg_latency_in_sec} sec."); + BenchResults { partial_results: "Unset".to_string(), avg_latency_in_sec, @@ -586,6 +595,10 @@ pub trait RunDa< total_time_elapsed_in_sec: total_time_elapsed.as_secs(), total_num_views, failed_num_views, + committee_type: format!( + "{} with {num_eligible_leaders} eligible leaders", + std::any::type_name::() + ), } } else { // all values with zero diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 9dd8e2a35a..b96a366530 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -21,8 +21,6 @@ bin-orchestrator = ["clap"] docs = [] doc-images = [] hotshot-testing = [] -randomized-leader-election = [] -fixed-leader-election = [] [dependencies] anyhow = { workspace = true } diff --git a/hotshot/src/traits/election.rs b/hotshot/src/traits/election.rs index d00e7806b0..4f9212705f 100644 --- a/hotshot/src/traits/election.rs +++ b/hotshot/src/traits/election.rs @@ -6,6 +6,8 @@ //! elections used for consensus +/// leader completely randomized every view +pub mod randomized_committee; /// static (round robin) committee election pub mod static_committee; /// static (round robin leader for 2 consecutive views) committee election diff --git a/hotshot/src/traits/election/randomized_committee.rs b/hotshot/src/traits/election/randomized_committee.rs new file mode 100644 index 0000000000..b761eb1ee2 --- /dev/null +++ b/hotshot/src/traits/election/randomized_committee.rs @@ -0,0 +1,168 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{cmp::max, collections::BTreeMap, num::NonZeroU64}; + +use ethereum_types::U256; +use hotshot_types::{ + traits::{ + election::Membership, + network::Topic, + node_implementation::NodeType, + signature_key::{SignatureKey, StakeTableEntryType}, + }, + PeerConfig, +}; +use rand::{rngs::StdRng, Rng}; + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] + +/// The static committee election + +pub struct RandomizedCommittee { + /// The nodes eligible for leadership. + /// NOTE: This is currently a hack because the DA leader needs to be the quorum + /// leader but without voting rights. + eligible_leaders: Vec<::StakeTableEntry>, + + /// The nodes on the committee and their stake + stake_table: Vec<::StakeTableEntry>, + + /// The nodes on the committee and their stake, indexed by public key + indexed_stake_table: + BTreeMap::StakeTableEntry>, + + /// The network topic of the committee + committee_topic: Topic, +} + +impl Membership for RandomizedCommittee { + /// Create a new election + fn new( + eligible_leaders: Vec::SignatureKey>>, + committee_members: Vec::SignatureKey>>, + committee_topic: Topic, + ) -> Self { + // For each eligible leader, get the stake table entry + let eligible_leaders: Vec<::StakeTableEntry> = + eligible_leaders + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // For each member, get the stake table entry + let members: Vec<::StakeTableEntry> = + committee_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // Index the stake table by public key + let indexed_stake_table: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = members + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + + Self { + eligible_leaders, + stake_table: members, + indexed_stake_table, + committee_topic, + } + } + + /// Get the stake table for the current view + fn stake_table( + &self, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + self.stake_table.clone() + } + + /// Get all members of the committee for the current view + fn committee_members( + &self, + _view_number: ::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.stake_table + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } + + /// Get all eligible leaders of the committee for the current view + fn committee_leaders( + &self, + _view_number: ::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.eligible_leaders + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } + + /// Get the stake table entry for a public key + fn stake( + &self, + pub_key: &::SignatureKey, + ) -> Option<::StakeTableEntry> { + // Only return the stake if it is above zero + self.indexed_stake_table.get(pub_key).cloned() + } + + /// Check if a node has stake in the committee + fn has_stake(&self, pub_key: &::SignatureKey) -> bool { + self.indexed_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } + + /// Get the network topic for the committee + fn committee_topic(&self) -> Topic { + self.committee_topic.clone() + } + + /// Index the vector of public keys with the current view number + fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); + + let randomized_view_number: u64 = rng.gen_range(0..=u64::MAX); + #[allow(clippy::cast_possible_truncation)] + let index = randomized_view_number as usize % self.eligible_leaders.len(); + + let res = self.eligible_leaders[index].clone(); + + TYPES::SignatureKey::public_key(&res) + } + + /// Get the total number of nodes in the committee + fn total_nodes(&self) -> usize { + self.stake_table.len() + } + + /// Get the voting success threshold for the committee + fn success_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() + } + + /// Get the voting failure threshold for the committee + fn failure_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() + } + + /// Get the voting upgrade threshold for the committee + fn upgrade_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(max( + (self.stake_table.len() as u64 * 9) / 10, + ((self.stake_table.len() as u64 * 2) / 3) + 1, + )) + .unwrap() + } +} diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 555050b89a..69cc5ce9c2 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -16,13 +16,11 @@ use hotshot_types::{ }, PeerConfig, }; -#[cfg(feature = "randomized-leader-election")] -use rand::{rngs::StdRng, Rng}; #[derive(Clone, Debug, Eq, PartialEq, Hash)] /// The static committee election -pub struct GeneralStaticCommittee { +pub struct StaticCommittee { /// The nodes eligible for leadership. /// NOTE: This is currently a hack because the DA leader needs to be the quorum /// leader but without voting rights. @@ -35,26 +33,16 @@ pub struct GeneralStaticCommittee { indexed_stake_table: BTreeMap::StakeTableEntry>, - // /// The members of the committee - // committee_members: BTreeSet, - #[cfg(feature = "fixed-leader-election")] - /// The number of fixed leaders for gpuvid - fixed_leader_for_gpuvid: usize, - /// The network topic of the committee committee_topic: Topic, } -/// static committee using a vrf kp -pub type StaticCommittee = GeneralStaticCommittee; - -impl Membership for GeneralStaticCommittee { +impl Membership for StaticCommittee { /// Create a new election fn new( eligible_leaders: Vec::SignatureKey>>, committee_members: Vec::SignatureKey>>, committee_topic: Topic, - #[cfg(feature = "fixed-leader-election")] fixed_leader_for_gpuvid: usize, ) -> Self { // For each eligible leader, get the stake table entry let eligible_leaders: Vec<::StakeTableEntry> = @@ -86,8 +74,6 @@ impl Membership for GeneralStaticCommittee { stake_table: members, indexed_stake_table, committee_topic, - #[cfg(feature = "fixed-leader-election")] - fixed_leader_for_gpuvid, } } @@ -109,6 +95,17 @@ impl Membership for GeneralStaticCommittee { .collect() } + /// Get all eligible leaders of the committee for the current view + fn committee_leaders( + &self, + _view_number: ::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.eligible_leaders + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } + /// Get the stake table entry for a public key fn stake( &self, @@ -130,37 +127,10 @@ impl Membership for GeneralStaticCommittee { self.committee_topic.clone() } - #[cfg(not(any( - feature = "randomized-leader-election", - feature = "fixed-leader-election" - )))] /// Index the vector of public keys with the current view number fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { - let index = usize::try_from(*view_number % self.eligible_leaders.len() as u64).unwrap(); - let res = self.eligible_leaders[index].clone(); - TYPES::SignatureKey::public_key(&res) - } - - #[cfg(feature = "fixed-leader-election")] - /// Only get leader in fixed set - /// Index the fixed vector (first fixed_leader_for_gpuvid element) of public keys with the current view number - fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { - if self.fixed_leader_for_gpuvid <= 0 - || self.fixed_leader_for_gpuvid > self.eligible_leaders.len() - { - panic!("fixed_leader_for_gpuvid is not set correctly."); - } - let index = usize::try_from(*view_number % self.fixed_leader_for_gpuvid as u64).unwrap(); - let res = self.eligible_leaders[index].clone(); - TYPES::SignatureKey::public_key(&res) - } - - #[cfg(feature = "randomized-leader-election")] - /// Index the vector of public keys with a random number generated using the current view number as a seed - fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { - let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); - let randomized_view_number: usize = rng.gen(); - let index = randomized_view_number % self.eligible_leaders.len(); + #[allow(clippy::cast_possible_truncation)] + let index = *view_number as usize % self.eligible_leaders.len(); let res = self.eligible_leaders[index].clone(); TYPES::SignatureKey::public_key(&res) } diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 9aed92f531..9ce83c14a0 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -33,26 +33,16 @@ pub struct StaticCommitteeLeaderForTwoViews { indexed_stake_table: BTreeMap::StakeTableEntry>, - // /// The members of the committee - // committee_members: BTreeSet, - #[cfg(feature = "fixed-leader-election")] - /// The number of fixed leaders for gpuvid - fixed_leader_for_gpuvid: usize, - /// The network topic of the committee committee_topic: Topic, } -/// static committee using a vrf kp -pub type StaticCommittee = StaticCommitteeLeaderForTwoViews; - impl Membership for StaticCommitteeLeaderForTwoViews { /// Create a new election fn new( eligible_leaders: Vec::SignatureKey>>, committee_members: Vec::SignatureKey>>, committee_topic: Topic, - #[cfg(feature = "fixed-leader-election")] fixed_leader_for_gpuvid: usize, ) -> Self { // For each eligible leader, get the stake table entry let eligible_leaders: Vec<::StakeTableEntry> = @@ -84,8 +74,6 @@ impl Membership for StaticCommitteeLeaderForTwoViews Membership for StaticCommitteeLeaderForTwoViews::Time, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.eligible_leaders + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } + /// Get the stake table entry for a public key fn stake( &self, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 9e3df13052..1a7220a6e0 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -1145,9 +1145,10 @@ impl ConnectedNetwork for Libp2pNetwork { #[cfg(test)] mod test { mod derive_multiaddr { - use super::super::*; use std::net::Ipv6Addr; + use super::super::*; + /// Test derivation of a valid IPv4 address -> Multiaddr #[test] fn test_v4_valid() { diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 412a847940..9526203411 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -3,10 +3,6 @@ name = "hotshot-orchestrator" version = { workspace = true } edition = { workspace = true } -[features] -randomized-leader-election = [] -fixed-leader-election = [] - [dependencies] async-compatibility-layer = { workspace = true } async-lock = { workspace = true } diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index bb6eeac585..da7a050ae8 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -48,6 +48,8 @@ pub struct BenchResults { pub total_num_views: usize, /// The number of failed views during benchmarking pub failed_num_views: usize, + /// The membership committee type used + pub committee_type: String, } impl BenchResults { @@ -55,6 +57,7 @@ impl BenchResults { pub fn printout(&self) { println!("====================="); println!("{0} Benchmark results:", self.partial_results); + println!("Committee type: {}", self.committee_type); println!( "Average latency: {} seconds, Minimum latency: {} seconds, Maximum latency: {} seconds", self.avg_latency_in_sec, self.minimum_latency_in_sec, self.maximum_latency_in_sec @@ -90,8 +93,6 @@ pub struct BenchResultsDownloadConfig { pub transaction_size: u64, /// The number of rounds pub rounds: usize, - /// The type of leader election: static, fixed, random - pub leader_election_type: String, // Results starting here /// Whether the results are partially collected @@ -115,6 +116,8 @@ pub struct BenchResultsDownloadConfig { pub total_num_views: usize, /// The number of failed views during benchmarking pub failed_num_views: usize, + /// The membership committee type used + pub committee_type: String, } // VALIDATOR diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 7a3dc5c218..2090eaf273 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -143,27 +143,6 @@ impl OrchestratorState { } } - /// get election type in use - #[must_use] - pub fn election_type() -> String { - // leader is chosen in index order - #[cfg(not(any( - feature = "randomized-leader-election", - feature = "fixed-leader-election" - )))] - let election_type = "static-leader-selection".to_string(); - - // leader is from a fixed set - #[cfg(feature = "fixed-leader-election")] - let election_type = "fixed-leader-election".to_string(); - - // leader is randomly chosen - #[cfg(feature = "randomized-leader-election")] - let election_type = "randomized-leader-election".to_string(); - - election_type - } - /// Output the results to a csv file according to orchestrator state pub fn output_to_csv(&self) { let output_csv = BenchResultsDownloadConfig { @@ -174,7 +153,6 @@ impl OrchestratorState { transactions_per_round: self.config.transactions_per_round, transaction_size: self.bench_results.transaction_size_in_bytes, rounds: self.config.rounds, - leader_election_type: OrchestratorState::::election_type(), partial_results: self.bench_results.partial_results.clone(), avg_latency_in_sec: self.bench_results.avg_latency_in_sec, minimum_latency_in_sec: self.bench_results.minimum_latency_in_sec, @@ -184,6 +162,7 @@ impl OrchestratorState { total_time_elapsed_in_sec: self.bench_results.total_time_elapsed_in_sec, total_num_views: self.bench_results.total_num_views, failed_num_views: self.bench_results.failed_num_views, + committee_type: self.bench_results.committee_type.clone(), }; // Open the CSV file in append mode let results_csv_file = OpenOptions::new() diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 5c41e58380..b370f8d279 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -90,16 +90,8 @@ pub async fn build_system_handle< all_nodes.clone(), all_nodes.clone(), Topic::Global, - #[cfg(feature = "fixed-leader-election")] - config.fixed_leader_for_gpuvid, - ), - da_membership: TYPES::Membership::new( - all_nodes, - da_nodes, - Topic::Da, - #[cfg(feature = "fixed-leader-election")] - config.fixed_leader_for_gpuvid, ), + da_membership: TYPES::Membership::new(all_nodes, da_nodes, Topic::Da), }; SystemContext::init( diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 75c92e4ecd..8ef534860f 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -446,16 +446,8 @@ where all_nodes.clone(), all_nodes.clone(), Topic::Global, - #[cfg(feature = "fixed-leader-election")] - config.fixed_leader_for_gpuvid, - ), - da_membership: ::Membership::new( - all_nodes, - da_nodes, - Topic::Da, - #[cfg(feature = "fixed-leader-election")] - config.fixed_leader_for_gpuvid, ), + da_membership: ::Membership::new(all_nodes, da_nodes, Topic::Da), }; config.builder_urls = builder_urls .clone() diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 322640b054..cb623926d1 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -59,13 +59,8 @@ async fn test_network_task() { let all_nodes = config.known_nodes_with_stake.clone(); - let membership = ::Membership::new( - all_nodes.clone(), - all_nodes, - Topic::Global, - #[cfg(feature = "fixed-leader-election")] - config.fixed_leader_for_gpuvid, - ); + let membership = + ::Membership::new(all_nodes.clone(), all_nodes, Topic::Global); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { channel: network.clone(), @@ -140,13 +135,8 @@ async fn test_network_storage_fail() { let all_nodes = config.known_nodes_with_stake.clone(); let upgrade_lock = UpgradeLock::::new(); - let membership = ::Membership::new( - all_nodes.clone(), - all_nodes, - Topic::Global, - #[cfg(feature = "fixed-leader-election")] - config.fixed_leader_for_gpuvid, - ); + let membership = + ::Membership::new(all_nodes.clone(), all_nodes, Topic::Global); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { channel: network.clone(), diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 266f97ab5b..c4ba56416c 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -6,14 +6,14 @@ use std::time::Duration; +use hotshot_example_types::node_types::{ + Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestTypes, + TestTypesRandomizedLeader, TestVersions, +}; #[cfg(feature = "dependency-tasks")] use hotshot_example_types::testable_delay::{ DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay, }; -use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestVersions}, - state_types::TestTypes, -}; use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, @@ -25,7 +25,7 @@ use hotshot_testing::{ cross_tests!( TestName: test_success, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], + Types: [TestTypes, TestTypesRandomizedLeader], Versions: [TestVersions], Ignore: false, Metadata: { diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 06a0099847..4e4cb38f99 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -4,6 +4,24 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::time::Duration; + +use hotshot_example_types::node_types::{ + CombinedImpl, PushCdnImpl, TestTypes, TestTypesRandomizedLeader, TestVersions, +}; +#[cfg(feature = "dependency-tasks")] +use hotshot_example_types::testable_delay::{ + DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay, +}; +use hotshot_macros::cross_tests; +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, + test_builder::{TestDescription, TimingData}, +}; + #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -301,274 +319,172 @@ async fn test_catchup_reload() { .await; } -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_all_restart() { - use std::time::Duration; - - use hotshot_example_types::node_types::{CombinedImpl, TestTypes, TestVersions}; - use hotshot_testing::{ - block_builder::SimpleBuilderImplementation, - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, - test_builder::{TestDescription, TimingData}, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let timing_data = TimingData { - next_view_timeout: 2000, - round_start_delay: 500, - ..Default::default() - }; - let mut metadata: TestDescription = - TestDescription::default(); - let mut catchup_nodes = vec![]; - for i in 0..20 { - catchup_nodes.push(ChangeNode { - idx: i, - updown: NodeAction::RestartDown(0), - }) - } - - metadata.timing_data = timing_data; - metadata.start_nodes = 20; - metadata.num_nodes_with_stake = 20; - - metadata.spinning_properties = SpinningTaskDescription { - // Restart all the nodes in view 13 - node_changes: vec![(13, catchup_nodes)], - }; - metadata.view_sync_properties = - hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); - - metadata.completion_task_description = - CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ); - metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - // Make sure we keep committing rounds after the catchup, but not the full 50. - num_successful_views: 22, - num_failed_views: 15, - ..Default::default() - }; - - metadata - .gen_launcher(0) - .launch() - .run_test::() - .await; -} - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_all_restart_cdn() { - use std::time::Duration; - - use hotshot_example_types::node_types::{PushCdnImpl, TestTypes, TestVersions}; - use hotshot_testing::{ - block_builder::SimpleBuilderImplementation, - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, - test_builder::{TestDescription, TimingData}, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let timing_data = TimingData { - next_view_timeout: 2000, - round_start_delay: 500, - ..Default::default() - }; - let mut metadata: TestDescription = - TestDescription::default(); - let mut catchup_nodes = vec![]; - for i in 0..20 { - catchup_nodes.push(ChangeNode { - idx: i, - updown: NodeAction::RestartDown(0), - }) - } - - metadata.timing_data = timing_data; - metadata.start_nodes = 20; - metadata.num_nodes_with_stake = 20; - - metadata.spinning_properties = SpinningTaskDescription { - // Restart all the nodes in view 13 - node_changes: vec![(13, catchup_nodes)], - }; - metadata.view_sync_properties = - hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); - - metadata.completion_task_description = - CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ); - metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - // Make sure we keep committing rounds after the catchup, but not the full 50. - num_successful_views: 22, - num_failed_views: 15, - ..Default::default() - }; - - metadata - .gen_launcher(0) - .launch() - .run_test::() - .await; -} - -/// This test case ensures that proposals persist off of a restart. We demonstrate this by -/// artificially removing node 0 (the only DA committee member) from the candidate pool, -/// meaning that the entire DA also does not have the proposal, but we're still able to -/// move on because the *leader* does have the proposal. -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_all_restart_one_da() { - use std::time::Duration; - - use hotshot_example_types::node_types::{CombinedImpl, TestTypes, TestVersions}; - use hotshot_testing::{ - block_builder::SimpleBuilderImplementation, - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, - test_builder::{TestDescription, TimingData}, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let timing_data = TimingData { - next_view_timeout: 2000, - round_start_delay: 500, - ..Default::default() - }; - let mut metadata: TestDescription = - TestDescription::default(); - - let mut catchup_nodes = vec![]; - for i in 0..20 { - catchup_nodes.push(ChangeNode { - idx: i, - updown: NodeAction::RestartDown(0), - }) - } - - metadata.timing_data = timing_data; - metadata.start_nodes = 20; - metadata.num_nodes_with_stake = 20; - - // Explicitly make the DA tiny to exaggerate a missing proposal. - metadata.da_staked_committee_size = 1; - - metadata.spinning_properties = SpinningTaskDescription { - // Restart all the nodes in view 13 - node_changes: vec![(13, catchup_nodes)], - }; - metadata.view_sync_properties = - hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); - - metadata.completion_task_description = - CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ); - metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - // Make sure we keep committing rounds after the catchup, but not the full 50. - num_successful_views: 22, - num_failed_views: 15, - ..Default::default() - }; - - metadata - .gen_launcher(0) - .launch() - .run_test::() - .await; -} - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_staggered_restart() { - use std::time::Duration; - - use hotshot_example_types::node_types::{CombinedImpl, TestTypes, TestVersions}; - use hotshot_testing::{ - block_builder::SimpleBuilderImplementation, - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, - test_builder::TestDescription, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - let mut metadata: TestDescription = - TestDescription::default(); - - let mut down_da_nodes = vec![]; - for i in 1..4 { - down_da_nodes.push(ChangeNode { - idx: i, - updown: NodeAction::RestartDown(20), - }); - } - - let mut down_regular_nodes = vec![]; - for i in 4..10 { - down_regular_nodes.push(ChangeNode { - idx: i, - updown: NodeAction::RestartDown(0), - }); - } - // restart the last da so it gets the new libp2p routing table - down_regular_nodes.push(ChangeNode { - idx: 0, - updown: NodeAction::RestartDown(0), - }); - - metadata.start_nodes = 10; - metadata.num_nodes_with_stake = 10; - - // Explicitly make the DA small to simulate real network. - metadata.da_staked_committee_size = 4; - - metadata.spinning_properties = SpinningTaskDescription { - // Restart all the nodes in view 13 - node_changes: vec![(13, down_da_nodes), (33, down_regular_nodes)], - }; - metadata.view_sync_properties = - hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 50); - - // Give the test some extra time because we are purposely timing out views - metadata.completion_task_description = - CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(240), - }, - ); - metadata.overall_safety_properties = OverallSafetyPropertiesDescription { - // Make sure we keep committing rounds after the catchup, but not the full 50. - num_successful_views: 22, - num_failed_views: 30, - ..Default::default() - }; - - metadata - .gen_launcher(0) - .launch() - .run_test::() - .await; -} +cross_tests!( + TestName: test_all_restart, + Impls: [CombinedImpl, PushCdnImpl], + Types: [TestTypes, TestTypesRandomizedLeader], + Versions: [TestVersions], + Ignore: false, + Metadata: { + let timing_data = TimingData { + next_view_timeout: 2000, + round_start_delay: 500, + ..Default::default() + }; + let mut metadata = TestDescription::default(); + let mut catchup_nodes = vec![]; + + for i in 0..20 { + catchup_nodes.push(ChangeNode { + idx: i, + updown: NodeAction::RestartDown(0), + }) + } + + metadata.timing_data = timing_data; + metadata.start_nodes = 20; + metadata.num_nodes_with_stake = 20; + + metadata.spinning_properties = SpinningTaskDescription { + // Restart all the nodes in view 13 + node_changes: vec![(13, catchup_nodes)], + }; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep committing rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 15, + ..Default::default() + }; + + metadata + }, +); + +// This test case ensures that proposals persist off of a restart. We demonstrate this by +// artificially removing node 0 (the only DA committee member) from the candidate pool, +// meaning that the entire DA also does not have the proposal, but we're still able to +// move on because the *leader* does have the proposal. +cross_tests!( + TestName: test_all_restart_one_da, + Impls: [CombinedImpl], + Types: [TestTypes], + Versions: [TestVersions], + Ignore: false, + Metadata: { + let timing_data = TimingData { + next_view_timeout: 2000, + round_start_delay: 500, + ..Default::default() + }; + let mut metadata: TestDescription = + TestDescription::default(); + + let mut catchup_nodes = vec![]; + for i in 0..20 { + catchup_nodes.push(ChangeNode { + idx: i, + updown: NodeAction::RestartDown(0), + }) + } + + metadata.timing_data = timing_data; + metadata.start_nodes = 20; + metadata.num_nodes_with_stake = 20; + + // Explicitly make the DA tiny to exaggerate a missing proposal. + metadata.da_staked_committee_size = 1; + + metadata.spinning_properties = SpinningTaskDescription { + // Restart all the nodes in view 13 + node_changes: vec![(13, catchup_nodes)], + }; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep committing rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 15, + ..Default::default() + }; + + + metadata + }, +); + +cross_tests!( + TestName: test_staggered_restart, + Impls: [CombinedImpl], + Types: [TestTypes], + Versions: [TestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default(); + + let mut down_da_nodes = vec![]; + for i in 1..4 { + down_da_nodes.push(ChangeNode { + idx: i, + updown: NodeAction::RestartDown(20), + }); + } + + let mut down_regular_nodes = vec![]; + for i in 4..10 { + down_regular_nodes.push(ChangeNode { + idx: i, + updown: NodeAction::RestartDown(0), + }); + } + // restart the last da so it gets the new libp2p routing table + down_regular_nodes.push(ChangeNode { + idx: 0, + updown: NodeAction::RestartDown(0), + }); + + metadata.start_nodes = 10; + metadata.num_nodes_with_stake = 10; + + // Explicitly make the DA small to simulate real network. + metadata.da_staked_committee_size = 4; + + metadata.spinning_properties = SpinningTaskDescription { + // Restart all the nodes in view 13 + node_changes: vec![(13, down_da_nodes), (33, down_regular_nodes)], + }; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 50); + + // Give the test some extra time because we are purposely timing out views + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(240), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep committing rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 30, + ..Default::default() + }; + + metadata + }, +); diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 21db37e42d..38b2da02e1 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -10,7 +10,7 @@ use std::{sync::Arc, time::Duration}; use async_compatibility_layer::{art::async_timeout, logging::setup_logging}; use hotshot::{ traits::{ - election::static_committee::GeneralStaticCommittee, + election::static_committee::StaticCommittee, implementations::{MasterMap, MemoryNetwork}, NodeImplementation, }, @@ -60,7 +60,7 @@ impl NodeType for Test { type Transaction = TestTransaction; type ValidatedState = TestValidatedState; type InstanceState = TestInstanceState; - type Membership = GeneralStaticCommittee; + type Membership = StaticCommittee; type BuilderSignatureKey = BuilderKey; } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 2db04c283e..b03bf8a843 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -21,7 +21,6 @@ pub trait Membership: eligible_leaders: Vec>, committee_members: Vec>, committee_topic: Topic, - #[cfg(feature = "fixed-leader-election")] fixed_leader_for_gpuvid: usize, ) -> Self; /// Get all participants in the committee (including their stake) @@ -30,6 +29,9 @@ pub trait Membership: /// Get all participants in the committee for a specific view fn committee_members(&self, view_number: TYPES::Time) -> BTreeSet; + /// Get all leaders in the committee for a specific view + fn committee_leaders(&self, view_number: TYPES::Time) -> BTreeSet; + /// Get the stake table entry for a public key, returns `None` if the /// key is not in the table fn stake( From 41c083b3a9ecfbd001940d735fe644539a87e8f8 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 2 Oct 2024 11:34:41 -0400 Subject: [PATCH 1233/1393] Rebalance CI tests (#3718) --- testing/tests/tests_3/byzantine_tests.rs | 50 +-------------- testing/tests/tests_4/byzantine_tests.rs | 61 +++++++++++++++++++ .../tests/{tests_2 => tests_5}/push_cdn.rs | 0 .../test_with_failures.rs} | 0 4 files changed, 63 insertions(+), 48 deletions(-) create mode 100644 testing/tests/tests_4/byzantine_tests.rs rename testing/tests/{tests_2 => tests_5}/push_cdn.rs (100%) rename testing/tests/{tests_2/test_with_failures_one.rs => tests_5/test_with_failures.rs} (100%) diff --git a/testing/tests/tests_3/byzantine_tests.rs b/testing/tests/tests_3/byzantine_tests.rs index 8eb6364c75..4687d3ff66 100644 --- a/testing/tests/tests_3/byzantine_tests.rs +++ b/testing/tests/tests_3/byzantine_tests.rs @@ -15,7 +15,7 @@ use hotshot_testing::{ block_builder::SimpleBuilderImplementation, byzantine::byzantine_behaviour::{ BadProposalViewDos, DishonestDa, DishonestLeader, DishonestVoter, DishonestVoting, - DoubleProposeVote, ViewDelay, + DoubleProposeVote, }, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, test_builder::{Behaviour, TestDescription}, @@ -30,6 +30,7 @@ use hotshot_types::{ }, vote::HasViewNumber, }; + cross_tests!( TestName: double_propose_vote, Impls: [MemoryImpl], @@ -162,53 +163,6 @@ cross_tests!( }, ); -cross_tests!( - TestName: view_delay, - Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], - Versions: [MarketplaceTestVersions], - Ignore: false, - Metadata: { - - let behaviour = Rc::new(|node_id| { - let view_delay = ViewDelay { - number_of_views_to_delay: node_id/3, - events_for_view: HashMap::new(), - stop_view_delay_at_view_number: 25, - }; - match node_id { - 6|10|14 => Behaviour::Byzantine(Box::new(view_delay)), - _ => Behaviour::Standard, - } - }); - - let mut metadata = TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - behaviour, - ..TestDescription::default() - }; - - let num_nodes_with_stake = 15; - metadata.num_nodes_with_stake = num_nodes_with_stake; - metadata.da_staked_committee_size = num_nodes_with_stake; - metadata.overall_safety_properties.num_failed_views = 20; - metadata.overall_safety_properties.num_successful_views = 20; - metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ - (ViewNumber::new(6), false), - (ViewNumber::new(10), false), - (ViewNumber::new(14), false), - (ViewNumber::new(21), false), - (ViewNumber::new(25), false), - ]); - metadata - }, -); - cross_tests!( TestName: dishonest_voting, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], diff --git a/testing/tests/tests_4/byzantine_tests.rs b/testing/tests/tests_4/byzantine_tests.rs new file mode 100644 index 0000000000..8b460a5665 --- /dev/null +++ b/testing/tests/tests_4/byzantine_tests.rs @@ -0,0 +1,61 @@ +use std::{collections::HashMap, rc::Rc, time::Duration}; + +use hotshot_example_types::{ + node_types::{Libp2pImpl, MarketplaceTestVersions, MemoryImpl, PushCdnImpl}, + state_types::TestTypes, +}; +use hotshot_macros::cross_tests; +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + byzantine::byzantine_behaviour::ViewDelay, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + test_builder::{Behaviour, TestDescription}, +}; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; + +cross_tests!( + TestName: view_delay, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Versions: [MarketplaceTestVersions], + Ignore: false, + Metadata: { + + let behaviour = Rc::new(|node_id| { + let view_delay = ViewDelay { + number_of_views_to_delay: node_id/3, + events_for_view: HashMap::new(), + stop_view_delay_at_view_number: 25, + }; + match node_id { + 6|10|14 => Behaviour::Byzantine(Box::new(view_delay)), + _ => Behaviour::Standard, + } + }); + + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + behaviour, + ..TestDescription::default() + }; + + let num_nodes_with_stake = 15; + metadata.num_nodes_with_stake = num_nodes_with_stake; + metadata.da_staked_committee_size = num_nodes_with_stake; + metadata.overall_safety_properties.num_failed_views = 20; + metadata.overall_safety_properties.num_successful_views = 20; + metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ + (ViewNumber::new(6), false), + (ViewNumber::new(10), false), + (ViewNumber::new(14), false), + (ViewNumber::new(21), false), + (ViewNumber::new(25), false), + ]); + metadata + }, +); diff --git a/testing/tests/tests_2/push_cdn.rs b/testing/tests/tests_5/push_cdn.rs similarity index 100% rename from testing/tests/tests_2/push_cdn.rs rename to testing/tests/tests_5/push_cdn.rs diff --git a/testing/tests/tests_2/test_with_failures_one.rs b/testing/tests/tests_5/test_with_failures.rs similarity index 100% rename from testing/tests/tests_2/test_with_failures_one.rs rename to testing/tests/tests_5/test_with_failures.rs From 498d8665955871e1414faf2a88b14da0158b21e1 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 2 Oct 2024 16:05:57 -0400 Subject: [PATCH 1234/1393] [CLEANUP] Remove Duplicate Network Task Spawns (#3719) * remove extra network spawn * remove duplicated network event tasks and filters --- hotshot/src/tasks/mod.rs | 74 ++----- task-impls/src/network.rs | 195 +++++++------------ testing/src/byzantine/byzantine_behaviour.rs | 12 +- testing/tests/tests_1/network_task.rs | 17 +- 4 files changed, 90 insertions(+), 208 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 0e7be118f8..e9981b8b5f 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -24,7 +24,7 @@ use hotshot_task_impls::rewind::RewindTaskState; use hotshot_task_impls::{ da::DaTaskState, events::HotShotEvent, - network::{self, NetworkEventTaskState, NetworkMessageTaskState}, + network::{NetworkEventTaskState, NetworkMessageTaskState}, request::NetworkRequestState, response::{run_response_task, NetworkResponseState}, transactions::TransactionTaskState, @@ -163,15 +163,15 @@ pub fn add_network_event_task< NET: ConnectedNetwork, >( handle: &mut SystemContextHandle, - channel: Arc, - membership: TYPES::Membership, - filter: fn(&Arc>) -> bool, + network: Arc, + quorum_membership: TYPES::Membership, + da_membership: TYPES::Membership, ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { - channel, + network, view: TYPES::Time::genesis(), - membership, - filter, + quorum_membership, + da_membership, storage: Arc::clone(&handle.storage()), consensus: Arc::clone(&handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), @@ -502,31 +502,7 @@ where handle, Arc::clone(&network), quorum_membership.clone(), - network::quorum_filter, - ); - self.add_network_event_task( - handle, - Arc::clone(&network), - quorum_membership.clone(), - network::upgrade_filter, - ); - self.add_network_event_task( - handle, - Arc::clone(&network), da_membership, - network::da_filter, - ); - self.add_network_event_task( - handle, - Arc::clone(&network), - quorum_membership.clone(), - network::view_sync_filter, - ); - self.add_network_event_task( - handle, - Arc::clone(&network), - quorum_membership, - network::vid_filter, ); } @@ -535,10 +511,10 @@ where &self, handle: &mut SystemContextHandle, channel: Arc<>::Network>, - membership: TYPES::Membership, - filter: fn(&Arc>) -> bool, + quorum_membership: TYPES::Membership, + da_membership: TYPES::Membership, ) { - add_network_event_task(handle, channel, membership, filter); + add_network_event_task(handle, channel, quorum_membership, da_membership); } } @@ -561,7 +537,6 @@ pub async fn add_network_message_and_request_receiver_tasks< ) { let network = Arc::clone(&handle.network); - add_network_message_task(handle, &network); add_network_message_task(handle, &network); add_request_network_task(handle).await; @@ -572,38 +547,13 @@ pub async fn add_network_message_and_request_receiver_tasks< pub fn add_network_event_tasks, V: Versions>( handle: &mut SystemContextHandle, ) { - let network = Arc::clone(&handle.network); let quorum_membership = handle.memberships.quorum_membership.clone(); let da_membership = handle.memberships.da_membership.clone(); add_network_event_task( handle, - Arc::clone(&network), - quorum_membership.clone(), - network::quorum_filter, - ); - add_network_event_task( - handle, - Arc::clone(&network), - quorum_membership.clone(), - network::upgrade_filter, - ); - add_network_event_task( - handle, - Arc::clone(&network), - da_membership, - network::da_filter, - ); - add_network_event_task( - handle, - Arc::clone(&network), - quorum_membership.clone(), - network::view_sync_filter, - ); - add_network_event_task( - handle, - Arc::clone(&network), + Arc::clone(&handle.network), quorum_membership, - network::vid_filter, + da_membership, ); } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 718d752f27..dcd7e85945 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -38,63 +38,6 @@ use crate::{ helpers::broadcast_event, }; -/// quorum filter -pub fn quorum_filter(event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::QuorumProposalSend(_, _) - | HotShotEvent::QuorumVoteSend(_) - | HotShotEvent::DacSend(_, _) - | HotShotEvent::TimeoutVoteSend(_) - | HotShotEvent::ViewChange(_) - ) -} - -/// upgrade filter -pub fn upgrade_filter(event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::UpgradeProposalSend(_, _) - | HotShotEvent::UpgradeVoteSend(_) - | HotShotEvent::ViewChange(_) - ) -} - -/// DA filter -pub fn da_filter(event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::DaProposalSend(_, _) - | HotShotEvent::QuorumProposalRequestSend(..) - | HotShotEvent::QuorumProposalResponseSend(..) - | HotShotEvent::VidResponseSend(..) - | HotShotEvent::VidRequestSend(..) - | HotShotEvent::DaVoteSend(_) - | HotShotEvent::ViewChange(_) - ) -} - -/// vid filter -pub fn vid_filter(event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::VidDisperseSend(_, _) | HotShotEvent::ViewChange(_) - ) -} - -/// view sync filter -pub fn view_sync_filter(event: &Arc>) -> bool { - !matches!( - event.as_ref(), - HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) - | HotShotEvent::ViewSyncPreCommitVoteSend(_) - | HotShotEvent::ViewSyncCommitVoteSend(_) - | HotShotEvent::ViewSyncFinalizeVoteSend(_) - | HotShotEvent::ViewChange(_) - ) -} /// the network message task state #[derive(Clone)] pub struct NetworkMessageTaskState { @@ -235,18 +178,17 @@ impl NetworkMessageTaskState { pub struct NetworkEventTaskState< TYPES: NodeType, V: Versions, - COMMCHANNEL: ConnectedNetwork, + NET: ConnectedNetwork, S: Storage, > { - /// comm channel - pub channel: Arc, + /// comm network + pub network: Arc, /// view number pub view: TYPES::Time, - /// membership for the channel - pub membership: TYPES::Membership, - // TODO ED Need to add exchange so we can get the recipient key and our own key? - /// Filter which returns false for the events that this specific network task cares about - pub filter: fn(&Arc>) -> bool, + /// quorum for the network + pub quorum_membership: TYPES::Membership, + /// da for the network + pub da_membership: TYPES::Membership, /// Storage to store actionable events pub storage: Arc>, /// Shared consensus state @@ -259,9 +201,9 @@ pub struct NetworkEventTaskState< impl< TYPES: NodeType, V: Versions, - COMMCHANNEL: ConnectedNetwork, + NET: ConnectedNetwork, S: Storage + 'static, - > TaskState for NetworkEventTaskState + > TaskState for NetworkEventTaskState { type Event = HotShotEvent; @@ -271,11 +213,7 @@ impl< _sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - let membership = self.membership.clone(); - - if !(self.filter)(&event) { - self.handle(event, &membership).await; - } + self.handle(event).await; Ok(()) } @@ -286,24 +224,20 @@ impl< impl< TYPES: NodeType, V: Versions, - COMMCHANNEL: ConnectedNetwork, + NET: ConnectedNetwork, S: Storage + 'static, - > NetworkEventTaskState + > NetworkEventTaskState { /// Handle the given event. /// /// Returns the completion status. #[instrument(skip_all, fields(view = *self.view), name = "Network Task", level = "error")] - pub async fn handle( - &mut self, - event: Arc>, - membership: &TYPES::Membership, - ) { + pub async fn handle(&mut self, event: Arc>) { let mut maybe_action = None; if let Some((sender, message_kind, transmit)) = - self.parse_event(event, &mut maybe_action, membership).await + self.parse_event(event, &mut maybe_action).await { - self.spawn_transmit_task(message_kind, membership, maybe_action, transmit, sender); + self.spawn_transmit_task(message_kind, maybe_action, transmit, sender); }; } @@ -336,11 +270,11 @@ impl< messages.insert(recipient, serialized_message); } - let net = Arc::clone(&self.channel); + let net = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); let state = Arc::clone(&self.consensus); async_spawn(async move { - if NetworkEventTaskState::::maybe_record_action( + if NetworkEventTaskState::::maybe_record_action( Some(HotShotAction::VidDisperse), storage, state, @@ -393,7 +327,6 @@ impl< &mut self, event: Arc>, maybe_action: &mut Option, - membership: &TYPES::Membership, ) -> Option<( ::SignatureKey, MessageKind, @@ -419,7 +352,7 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote(vote.clone()), )), - TransmitType::Direct(membership.leader(vote.view_number() + 1)), + TransmitType::Direct(self.quorum_membership.leader(vote.view_number() + 1)), )) } HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( @@ -427,7 +360,9 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ProposalRequested(req.clone(), signature), )), - TransmitType::DaCommitteeAndLeaderBroadcast(membership.leader(req.view_number)), + TransmitType::DaCommitteeAndLeaderBroadcast( + self.quorum_membership.leader(req.view_number), + ), )), HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => Some(( sender_key.clone(), @@ -457,10 +392,9 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::Da( DaConsensusMessage::DaVote(vote.clone()), )), - TransmitType::Direct(membership.leader(vote.view_number())), + TransmitType::Direct(self.quorum_membership.leader(vote.view_number())), )) } - // ED NOTE: This needs to be broadcasted to all nodes, not just ones on the DA committee HotShotEvent::DacSend(certificate, sender) => { *maybe_action = Some(HotShotAction::DaCert); Some(( @@ -476,21 +410,30 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), )), - TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), + TransmitType::Direct( + self.quorum_membership + .leader(vote.view_number() + vote.date().relay), + ), )), HotShotEvent::ViewSyncCommitVoteSend(vote) => Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), )), - TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), + TransmitType::Direct( + self.quorum_membership + .leader(vote.view_number() + vote.date().relay), + ), )), HotShotEvent::ViewSyncFinalizeVoteSend(vote) => Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), )), - TransmitType::Direct(membership.leader(vote.view_number() + vote.date().relay)), + TransmitType::Direct( + self.quorum_membership + .leader(vote.view_number() + vote.date().relay), + ), )), HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => Some(( sender, @@ -520,7 +463,7 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::TimeoutVote(vote.clone()), )), - TransmitType::Direct(membership.leader(vote.view_number() + 1)), + TransmitType::Direct(self.quorum_membership.leader(vote.view_number() + 1)), )) } HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( @@ -537,13 +480,13 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::UpgradeVote(vote.clone()), )), - TransmitType::Direct(membership.leader(vote.view_number())), + TransmitType::Direct(self.quorum_membership.leader(vote.view_number())), )) } HotShotEvent::ViewChange(view) => { self.view = view; - self.channel - .update_view::(self.view.u64(), membership) + self.network + .update_view::(self.view.u64(), &self.quorum_membership) .await; None } @@ -570,7 +513,6 @@ impl< fn spawn_transmit_task( &self, message_kind: MessageKind, - membership: &TYPES::Membership, maybe_action: Option, transmit: TransmitType, sender: TYPES::SignatureKey, @@ -587,14 +529,14 @@ impl< kind: message_kind, }; let view = message.kind.view_number(); - let committee = membership.committee_members(view); - let committee_topic = membership.committee_topic(); - let net = Arc::clone(&self.channel); + let committee_topic = self.quorum_membership.committee_topic(); + let da_committee = self.da_membership.committee_members(view); + let net = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); let state = Arc::clone(&self.consensus); let upgrade_lock = self.upgrade_lock.clone(); async_spawn(async move { - if NetworkEventTaskState::::maybe_record_action( + if NetworkEventTaskState::::maybe_record_action( maybe_action, Arc::clone(&storage), state, @@ -631,23 +573,19 @@ impl< .await } TransmitType::DaCommitteeBroadcast => { - net.da_broadcast_message(serialized_message, committee, broadcast_delay) + net.da_broadcast_message(serialized_message, da_committee, broadcast_delay) .await } TransmitType::DaCommitteeAndLeaderBroadcast(recipient) => { - // Short-circuit exit from this call if we get an error during the direct leader broadcast. - // NOTE: An improvement to this is to check if the leader is in the DA committee but it's - // just a single extra message to the leader, so it's not an optimization that we need now. if let Err(e) = net .direct_message(serialized_message.clone(), recipient) .await { error!("Failed to send message from network task: {e:?}"); - return; } // Otherwise, send the next message. - net.da_broadcast_message(serialized_message, committee, broadcast_delay) + net.da_broadcast_message(serialized_message, da_committee, broadcast_delay) .await } }; @@ -685,11 +623,11 @@ pub mod test { pub struct NetworkEventTaskStateModifier< TYPES: NodeType, V: Versions, - COMMCHANNEL: ConnectedNetwork, + NET: ConnectedNetwork, S: Storage, > { /// The real `NetworkEventTaskState` - pub network_event_task_state: NetworkEventTaskState, + pub network_event_task_state: NetworkEventTaskState, /// A function that takes the result of `NetworkEventTaskState::parse_event` and /// changes it before transmitting on the network. pub modifier: Arc>, @@ -698,23 +636,24 @@ pub mod test { impl< TYPES: NodeType, V: Versions, - COMMCHANNEL: ConnectedNetwork, + NET: ConnectedNetwork, S: Storage + 'static, - > NetworkEventTaskStateModifier + > NetworkEventTaskStateModifier { /// Handles the received event modifying it before sending on the network. - pub async fn handle( - &mut self, - event: Arc>, - membership: &TYPES::Membership, - ) { + pub async fn handle(&mut self, event: Arc>) { let mut maybe_action = None; if let Some((mut sender, mut message_kind, mut transmit)) = - self.parse_event(event, &mut maybe_action, membership).await + self.parse_event(event, &mut maybe_action).await { // Modify the values acquired by parsing the event. - (self.modifier)(&mut sender, &mut message_kind, &mut transmit, membership); - self.spawn_transmit_task(message_kind, membership, maybe_action, transmit, sender); + (self.modifier)( + &mut sender, + &mut message_kind, + &mut transmit, + &self.quorum_membership, + ); + self.spawn_transmit_task(message_kind, maybe_action, transmit, sender); } } } @@ -723,9 +662,9 @@ pub mod test { impl< TYPES: NodeType, V: Versions, - COMMCHANNEL: ConnectedNetwork, + NET: ConnectedNetwork, S: Storage + 'static, - > TaskState for NetworkEventTaskStateModifier + > TaskState for NetworkEventTaskStateModifier { type Event = HotShotEvent; @@ -735,11 +674,7 @@ pub mod test { _sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - let membership = self.network_event_task_state.membership.clone(); - - if !(self.network_event_task_state.filter)(&event) { - self.handle(event, &membership).await; - } + self.handle(event).await; Ok(()) } @@ -750,11 +685,11 @@ pub mod test { impl< TYPES: NodeType, V: Versions, - COMMCHANNEL: ConnectedNetwork, + NET: ConnectedNetwork, S: Storage, - > Deref for NetworkEventTaskStateModifier + > Deref for NetworkEventTaskStateModifier { - type Target = NetworkEventTaskState; + type Target = NetworkEventTaskState; fn deref(&self) -> &Self::Target { &self.network_event_task_state @@ -764,9 +699,9 @@ pub mod test { impl< TYPES: NodeType, V: Versions, - COMMCHANNEL: ConnectedNetwork, + NET: ConnectedNetwork, S: Storage, - > DerefMut for NetworkEventTaskStateModifier + > DerefMut for NetworkEventTaskStateModifier { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.network_event_task_state diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index 58eb6ed58d..beab8b5ae6 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -340,15 +340,15 @@ impl + std::fmt::Debug, V: Version fn add_network_event_task( &self, handle: &mut SystemContextHandle, - channel: Arc<>::Network>, - membership: TYPES::Membership, - filter: fn(&Arc>) -> bool, + network: Arc<>::Network>, + quorum_membership: TYPES::Membership, + da_membership: TYPES::Membership, ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { - channel, + network, view: TYPES::Time::genesis(), - membership, - filter, + quorum_membership, + da_membership, storage: Arc::clone(&handle.storage()), consensus: Arc::clone(&handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index cb623926d1..03a855a05f 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -12,10 +12,7 @@ use async_lock::RwLock; use hotshot::traits::implementations::MemoryNetwork; use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_task::task::{ConsensusTaskRegistry, Task}; -use hotshot_task_impls::{ - events::HotShotEvent, - network::{self, NetworkEventTaskState}, -}; +use hotshot_task_impls::{events::HotShotEvent, network::NetworkEventTaskState}; use hotshot_testing::{ helpers::build_system_handle, test_builder::TestDescription, test_task::add_network_message_test_task, view_generator::TestViewGenerator, @@ -63,10 +60,10 @@ async fn test_network_task() { ::Membership::new(all_nodes.clone(), all_nodes, Topic::Global); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { - channel: network.clone(), + network: network.clone(), view: ViewNumber::new(0), - membership: membership.clone(), - filter: network::quorum_filter, + quorum_membership: membership.clone(), + da_membership: membership.clone(), upgrade_lock: upgrade_lock.clone(), storage, consensus, @@ -139,10 +136,10 @@ async fn test_network_storage_fail() { ::Membership::new(all_nodes.clone(), all_nodes, Topic::Global); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { - channel: network.clone(), + network: network.clone(), view: ViewNumber::new(0), - membership: membership.clone(), - filter: network::quorum_filter, + quorum_membership: membership.clone(), + da_membership: membership.clone(), upgrade_lock: upgrade_lock.clone(), storage, consensus, From 8bf3db99049561c8627abf3f8051a90b8f8ff644 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 3 Oct 2024 15:00:51 -0400 Subject: [PATCH 1235/1393] gate vote commitment again (#3732) --- .../src/network/behaviours/dht/store.rs | 12 ++++++++++-- types/src/simple_vote.rs | 16 +++++++++++----- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/libp2p-networking/src/network/behaviours/dht/store.rs b/libp2p-networking/src/network/behaviours/dht/store.rs index 6969ced1ff..cf5c22d61e 100644 --- a/libp2p-networking/src/network/behaviours/dht/store.rs +++ b/libp2p-networking/src/network/behaviours/dht/store.rs @@ -37,8 +37,16 @@ impl RecordStore for ValidatedStore where K: 'static, { - type ProvidedIter<'a> = R::ProvidedIter<'a> where R: 'a, K: 'a; - type RecordsIter<'a> = R::RecordsIter<'a> where R: 'a, K: 'a; + type ProvidedIter<'a> + = R::ProvidedIter<'a> + where + R: 'a, + K: 'a; + type RecordsIter<'a> + = R::RecordsIter<'a> + where + R: 'a, + K: 'a; // Delegate all `RecordStore` methods except `put` to the inner store delegate! { diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 70ff7f6a1c..a86452b9a9 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -11,7 +11,7 @@ use std::{fmt::Debug, hash::Hash, marker::PhantomData}; use anyhow::Result; use committable::{Commitment, Committable}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use vbs::version::Version; +use vbs::version::{StaticVersionType, Version}; use crate::{ data::Leaf, @@ -240,10 +240,16 @@ impl Committable for VersionedVoteData { fn commit(&self) -> Commitment { - committable::RawCommitmentBuilder::new("Vote") - .var_size_bytes(self.data.commit().as_ref()) - .u64(*self.view) - .finalize() + if self.version < V::Marketplace::VERSION { + let bytes: [u8; 32] = self.data.commit().into(); + + Commitment::::from_raw(bytes) + } else { + committable::RawCommitmentBuilder::new("Vote") + .var_size_bytes(self.data.commit().as_ref()) + .u64(*self.view) + .finalize() + } } } From c3bc9335381a37bcb90108f0d0a47265f3eaa776 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 7 Oct 2024 11:52:45 -0600 Subject: [PATCH 1236/1393] [TECH DEBT] Refactor errors (#3724) * refactor errors * format * doc * doc * unpin patch versions for random deps * add back invalid block header error * self review --- builder-api/Cargo.toml | 2 +- builder-api/src/v0_1/builder.rs | 77 +++-- builder-api/src/v0_3/builder.rs | 11 +- example-types/Cargo.toml | 2 +- example-types/src/block_types.rs | 13 +- examples/Cargo.toml | 2 +- hotshot/Cargo.toml | 2 +- hotshot/src/lib.rs | 8 +- .../src/traits/networking/libp2p_network.rs | 90 +++--- .../src/traits/networking/memory_network.rs | 19 +- .../src/traits/networking/push_cdn_network.rs | 25 +- libp2p-networking/Cargo.toml | 2 +- .../src/network/behaviours/dht/mod.rs | 6 +- libp2p-networking/src/network/error.rs | 112 ------- libp2p-networking/src/network/mod.rs | 13 +- libp2p-networking/src/network/node.rs | 36 +-- libp2p-networking/src/network/node/handle.rs | 277 ++++++------------ libp2p-networking/tests/common/mod.rs | 69 ++--- libp2p-networking/tests/counter.rs | 22 +- task-impls/Cargo.toml | 2 +- task-impls/src/builder.rs | 57 ++-- testing/Cargo.toml | 2 +- testing/src/completion_task.rs | 7 - testing/src/overall_safety_task.rs | 68 ++--- testing/src/spinning_task.rs | 5 - testing/src/txn_task.rs | 5 - testing/src/view_sync_task.rs | 12 +- testing/tests/tests_1/block_builder.rs | 2 +- types/Cargo.toml | 2 +- types/src/consensus.rs | 25 +- types/src/data.rs | 21 +- types/src/error.rs | 94 ++---- types/src/event.rs | 2 +- types/src/traits/network.rs | 164 +++++------ types/src/traits/node_implementation.rs | 4 +- 35 files changed, 450 insertions(+), 810 deletions(-) delete mode 100644 libp2p-networking/src/network/error.rs diff --git a/builder-api/Cargo.toml b/builder-api/Cargo.toml index 9ed37b292f..45fc2e89be 100644 --- a/builder-api/Cargo.toml +++ b/builder-api/Cargo.toml @@ -12,7 +12,7 @@ derive_more = { workspace = true } futures = { workspace = true } hotshot-types = { path = "../types" } serde = { workspace = true } -snafu = { workspace = true } +thiserror = { workspace = true } tagged-base64 = { workspace = true } tide-disco = { workspace = true } toml = { workspace = true } diff --git a/builder-api/src/v0_1/builder.rs b/builder-api/src/v0_1/builder.rs index 6d91698fcc..0e1066a8ba 100644 --- a/builder-api/src/v0_1/builder.rs +++ b/builder-api/src/v0_1/builder.rs @@ -12,8 +12,8 @@ use derive_more::From; use futures::FutureExt; use hotshot_types::{traits::node_implementation::NodeType, utils::BuilderCommitment}; use serde::{Deserialize, Serialize}; -use snafu::{ResultExt, Snafu}; use tagged_base64::TaggedBase64; +use thiserror::Error; use tide_disco::{api::ApiError, method::ReadState, Api, RequestError, RequestParams, StatusCode}; use vbs::version::StaticVersionType; @@ -40,55 +40,38 @@ pub struct Options { pub extensions: Vec, } -#[derive(Clone, Debug, Snafu, Deserialize, Serialize)] -#[snafu(visibility(pub))] +#[derive(Clone, Debug, Error, Deserialize, Serialize)] pub enum BuildError { - /// The requested resource does not exist or is not known to this builder service. + #[error("The requested resource does not exist or is not known to this builder service")] NotFound, - /// The requested resource exists but is not currently available. + #[error("The requested resource exists but is not currently available")] Missing, - /// There was an error while trying to fetch the requested resource. - #[snafu(display("Failed to fetch requested resource: {message}"))] - Error { message: String }, + #[error("Error trying to fetch the requested resource: {0}")] + Error(String), } -#[derive(Clone, Debug, From, Snafu, Deserialize, Serialize)] -#[snafu(visibility(pub))] +#[derive(Clone, Debug, Error, Deserialize, Serialize)] pub enum Error { - Request { - source: RequestError, - }, - #[snafu(display("error building block from {resource}: {source}"))] - #[from(ignore)] + #[error("Error processing request: {0}")] + Request(#[from] RequestError), + #[error("Error building block from {resource}: {source}")] BlockAvailable { source: BuildError, resource: String, }, - #[snafu(display("error claiming block {resource}: {source}"))] - #[from(ignore)] + #[error("Error claiming block {resource}: {source}")] BlockClaim { source: BuildError, resource: String, }, - #[snafu(display("error unpacking transaction: {source}"))] - #[from(ignore)] - TxnUnpack { - source: RequestError, - }, - #[snafu(display("error submitting transaction: {source}"))] - #[from(ignore)] - TxnSubmit { - source: BuildError, - }, - #[snafu(display("error getting builder address: {source}"))] - #[from(ignore)] - BuilderAddress { - source: BuildError, - }, - Custom { - message: String, - status: StatusCode, - }, + #[error("Error unpacking transactions: {0}")] + TxnUnpack(RequestError), + #[error("Error submitting transaction: {0}")] + TxnSubmit(BuildError), + #[error("Error getting builder address: {0}")] + BuilderAddress(#[from] BuildError), + #[error("Custom error {status}: {message}")] + Custom { message: String, status: StatusCode }, } impl tide_disco::error::Error for Error { @@ -152,7 +135,8 @@ where state .available_blocks(&hash, view_number, sender, &signature) .await - .context(BlockAvailableSnafu { + .map_err(|source| Error::BlockAvailable { + source, resource: hash.to_string(), }) } @@ -167,7 +151,8 @@ where state .claim_block(&block_hash, view_number, sender, &signature) .await - .context(BlockClaimSnafu { + .map_err(|source| Error::BlockClaim { + source, resource: block_hash.to_string(), }) } @@ -182,14 +167,15 @@ where state .claim_block_header_input(&block_hash, view_number, sender, &signature) .await - .context(BlockClaimSnafu { + .map_err(|source| Error::BlockClaim { + source, resource: block_hash.to_string(), }) } .boxed() })? .get("builder_address", |_req, state| { - async move { state.builder_address().await.context(BuilderAddressSnafu) }.boxed() + async move { state.builder_address().await.map_err(|e| e.into()) }.boxed() })?; Ok(api) } @@ -210,9 +196,12 @@ where async move { let tx = req .body_auto::<::Transaction, Ver>(Ver::instance()) - .context(TxnUnpackSnafu)?; + .map_err(Error::TxnUnpack)?; let hash = tx.commit(); - state.submit_txns(vec![tx]).await.context(TxnSubmitSnafu)?; + state + .submit_txns(vec![tx]) + .await + .map_err(Error::TxnSubmit)?; Ok(hash) } .boxed() @@ -221,9 +210,9 @@ where async move { let txns = req .body_auto::::Transaction>, Ver>(Ver::instance()) - .context(TxnUnpackSnafu)?; + .map_err(Error::TxnUnpack)?; let hashes = txns.iter().map(|tx| tx.commit()).collect::>(); - state.submit_txns(txns).await.context(TxnSubmitSnafu)?; + state.submit_txns(txns).await.map_err(Error::TxnSubmit)?; Ok(hashes) } .boxed() diff --git a/builder-api/src/v0_3/builder.rs b/builder-api/src/v0_3/builder.rs index 6c17dd0cee..c98e2f4f8c 100644 --- a/builder-api/src/v0_3/builder.rs +++ b/builder-api/src/v0_3/builder.rs @@ -1,15 +1,11 @@ use futures::FutureExt; use hotshot_types::traits::node_implementation::NodeType; -use snafu::ResultExt; use tide_disco::{api::ApiError, method::ReadState, Api}; use super::{data_source::BuilderDataSource, Version}; use crate::api::load_api; /// No changes to these types -pub use crate::v0_1::builder::{ - submit_api, BlockAvailableSnafu, BlockClaimSnafu, BuildError, BuilderAddressSnafu, Error, - Options, -}; +pub use crate::v0_1::builder::{submit_api, BuildError, Error, Options}; pub fn define_api( options: &Options, @@ -32,7 +28,8 @@ where state .bundle(parent_view, &parent_hash, view_number) .await - .with_context(|_| BlockClaimSnafu { + .map_err(|source| Error::BlockClaim { + source, resource: format!( "Block for parent {parent_hash}@{parent_view} and view {view_number}" ), @@ -41,7 +38,7 @@ where .boxed() })? .get("builder_address", |_req, state| { - async move { state.builder_address().await.context(BuilderAddressSnafu) }.boxed() + async move { state.builder_address().await.map_err(Error::BuilderAddress) }.boxed() })?; Ok(api) } diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index db0cf6c275..1c38092bc1 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -24,7 +24,7 @@ hotshot = { path = "../hotshot" } hotshot-types = { path = "../types" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } rand = { workspace = true } -snafu = { workspace = true } +thiserror = { workspace = true } tracing = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 742c77fb88..dc5d5f2045 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -25,7 +25,7 @@ use hotshot_types::{ use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use sha3::{Digest, Keccak256}; -use snafu::Snafu; +use thiserror::Error; use time::OffsetDateTime; use vbs::version::Version; @@ -41,14 +41,17 @@ use crate::{ #[serde(try_from = "Vec")] pub struct TestTransaction(Vec); -#[derive(Debug, Snafu)] -pub struct TransactionTooLong; +#[derive(Debug, Error)] +pub enum TransactionError { + #[error("Transaction too long")] + TransactionTooLong, +} impl TryFrom> for TestTransaction { - type Error = TransactionTooLong; + type Error = TransactionError; fn try_from(value: Vec) -> Result { - Self::try_new(value).ok_or(TransactionTooLong) + Self::try_new(value).ok_or(TransactionError::TransactionTooLong) } } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 80c0423510..313c6ac746 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -100,7 +100,7 @@ libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["rc"] } -snafu = { workspace = true } +thiserror = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } derive_more = { workspace = true } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index b96a366530..1bbf122a8a 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -48,7 +48,7 @@ lru.workspace = true portpicker = "0.1" rand = { workspace = true } serde = { workspace = true, features = ["rc"] } -snafu = { workspace = true } +thiserror = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index a397656621..fa85394aeb 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -483,11 +483,9 @@ impl, V: Versions> SystemContext Libp2pNetwork { ) -> Result, NetworkError> { let (mut rx, network_handle) = spawn_network_node::(config.clone(), id) .await - .map_err(Into::::into)?; + .map_err(|e| NetworkError::ConfigError(format!("failed to spawn network node: {e}")))?; // Add our own address to the bootstrap addresses let addr = network_handle.listen_addr(); @@ -694,7 +694,7 @@ impl Libp2pNetwork { // Wait for the network to connect to the required number of peers if let Err(e) = handle.wait_to_connect(4, id).await { error!("Failed to connect to peers: {:?}", e); - return Err::<(), NetworkError>(e.into()); + return Err::<(), NetworkError>(e); } info!("Connected to required number of peers"); @@ -716,23 +716,26 @@ impl Libp2pNetwork { ) -> Result<(), NetworkError> { match msg { GossipMsg(msg) => { - sender - .send(msg) - .await - .map_err(|_| NetworkError::ChannelSend)?; + sender.send(msg).await.map_err(|err| { + NetworkError::ChannelSendError(format!("failed to send gossip message: {err}")) + })?; } DirectRequest(msg, _pid, chan) => { - sender - .send(msg) - .await - .map_err(|_| NetworkError::ChannelSend)?; + sender.send(msg).await.map_err(|err| { + NetworkError::ChannelSendError(format!( + "failed to send direct request message: {err}" + )) + })?; if self .inner .handle .direct_response( chan, - &bincode::serialize(&Empty { byte: 0u8 }) - .map_err(|e| NetworkError::Libp2p { source: e.into() })?, + &bincode::serialize(&Empty { byte: 0u8 }).map_err(|e| { + NetworkError::FailedToSerialize(format!( + "failed to serialize acknowledgement: {e}" + )) + })?, ) .await .is_err() @@ -746,7 +749,11 @@ impl Libp2pNetwork { } NetworkEvent::ResponseRequested(Request(msg), chan) => { let res = request_tx.try_send((msg, chan)); - res.map_err(|_| NetworkError::ChannelSend)?; + res.map_err(|err| { + NetworkError::ChannelSendError(format!( + "failed to respond to a peer's data request: {err}" + )) + })?; } NetworkEvent::ConnectedPeersUpdate(_) => {} } @@ -822,7 +829,7 @@ impl ConnectedNetwork for Libp2pNetwork { // If we're not ready, return an error if !self.is_ready() { self.inner.metrics.num_failed_messages.add(1); - return Err(NetworkError::NotReady); + return Err(NetworkError::NotReadyYet); }; let pid = match self @@ -834,25 +841,24 @@ impl ConnectedNetwork for Libp2pNetwork { Ok(pid) => pid, Err(err) => { self.inner.metrics.num_failed_messages.add(1); - debug!( - "Failed to message {:?} because could not find recipient peer id for pk {:?}", - request, recipient - ); - return Err(NetworkError::Libp2p { - source: Box::new(err), - }); + return Err(NetworkError::LookupError(format!( + "failed to look up node: {err}" + ))); } }; let result = match self.inner.handle.request_data(&request, pid).await { Ok(response) => match response { Some(msg) => { if msg.0.len() < 8 { - return Err(NetworkError::FailedToDeserialize { - source: anyhow!("insufficient bytes"), - }); + return Err(NetworkError::FailedToDeserialize( + "message was too small".to_string(), + )); } - let res: Message = bincode::deserialize(&msg.0) - .map_err(|e| NetworkError::FailedToDeserialize { source: e.into() })?; + let res: Message = bincode::deserialize(&msg.0).map_err(|err| { + NetworkError::FailedToDeserialize(format!( + "failed to serialize request data: {err}" + )) + })?; match res.kind { MessageKind::Data(DataResponse(data)) => data, @@ -863,13 +869,13 @@ impl ConnectedNetwork for Libp2pNetwork { }, Err(e) => { self.inner.metrics.num_failed_messages.add(1); - return Err(e.into()); + return Err(e); } }; - Ok(bincode::serialize(&result).map_err(|e| { + Ok(bincode::serialize(&result).map_err(|err| { self.inner.metrics.num_failed_messages.add(1); - NetworkError::Libp2p { source: e.into() } + NetworkError::FailedToSerialize(format!("failed to serialize request response: {err}")) })?) } @@ -940,7 +946,7 @@ impl ConnectedNetwork for Libp2pNetwork { // If we're not ready, return an error if !self.is_ready() { self.inner.metrics.num_failed_messages.add(1); - return Err(NetworkError::NotReady); + return Err(NetworkError::NotReadyYet); }; // If we are subscribed to the topic, @@ -981,7 +987,7 @@ impl ConnectedNetwork for Libp2pNetwork { if let Err(e) = self.inner.handle.gossip(topic, &message).await { self.inner.metrics.num_failed_messages.add(1); - return Err(e.into()); + return Err(e); } Ok(()) @@ -997,7 +1003,7 @@ impl ConnectedNetwork for Libp2pNetwork { // If we're not ready, return an error if !self.is_ready() { self.inner.metrics.num_failed_messages.add(1); - return Err(NetworkError::NotReady); + return Err(NetworkError::NotReadyYet); }; let future_results = recipients @@ -1008,7 +1014,7 @@ impl ConnectedNetwork for Libp2pNetwork { let errors: Vec<_> = results .into_iter() .filter_map(|r| match r { - Err(NetworkError::Libp2p { source }) => Some(source), + Err(err) => Some(err), _ => None, }) .collect(); @@ -1016,7 +1022,7 @@ impl ConnectedNetwork for Libp2pNetwork { if errors.is_empty() { Ok(()) } else { - Err(NetworkError::Libp2pMulti { sources: errors }) + Err(NetworkError::Multiple(errors)) } } @@ -1025,7 +1031,7 @@ impl ConnectedNetwork for Libp2pNetwork { // If we're not ready, return an error if !self.is_ready() { self.inner.metrics.num_failed_messages.add(1); - return Err(NetworkError::NotReady); + return Err(NetworkError::NotReadyYet); }; // short circuit if we're dming ourselves @@ -1047,13 +1053,9 @@ impl ConnectedNetwork for Libp2pNetwork { Ok(pid) => pid, Err(err) => { self.inner.metrics.num_failed_messages.add(1); - debug!( - "Failed to message {:?} because could not find recipient peer id for pk {:?}", - message, recipient - ); - return Err(NetworkError::Libp2p { - source: Box::new(err), - }); + return Err(NetworkError::LookupError(format!( + "failed to look up node for direct message: {err}" + ))); } }; @@ -1085,7 +1087,7 @@ impl ConnectedNetwork for Libp2pNetwork { Ok(()) => Ok(()), Err(e) => { self.inner.metrics.num_failed_messages.add(1); - Err(e.into()) + Err(e) } } } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index ee3a55684f..e3993073c6 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -316,7 +316,9 @@ impl ConnectedNetwork for MemoryNetwork { .iter() .find(|v| v.value().iter().all(|(k, _)| recipients.contains(k))) .map(|v| v.key().clone()) - .ok_or(NetworkError::NotFound)?; + .ok_or(NetworkError::MessageSendError( + "no topic found for recipients".to_string(), + ))?; self.broadcast_message(message, topic, broadcast_delay) .await @@ -352,18 +354,15 @@ impl ConnectedNetwork for MemoryNetwork { trace!(?recipient, "Delivered message to remote"); Ok(()) } - Err(e) => { - warn!(?e, ?recipient, "Error delivering direct message"); - Err(NetworkError::CouldNotDeliver) - } + Err(e) => Err(NetworkError::MessageSendError(format!( + "error sending direct message to node: {e}", + ))), } } } else { - warn!( - "{:#?} {:#?} {:#?}", - recipient, self.inner.master_map.map, "Node does not exist in map" - ); - Err(NetworkError::NoSuchNode) + Err(NetworkError::MessageSendError( + "node does not exist".to_string(), + )) } } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 229b7e9eb4..76d689b70e 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -44,7 +44,7 @@ use hotshot_types::{ request_response::NetworkMsgResponseChannel, traits::{ metrics::{Counter, Metrics, NoMetrics}, - network::{BroadcastDelay, ConnectedNetwork, PushCdnNetworkError, Topic as HotShotTopic}, + network::{BroadcastDelay, ConnectedNetwork, Topic as HotShotTopic}, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -247,14 +247,14 @@ impl PushCdnNetwork { } // Send the message - // TODO: check if we need to print this error - if self + if let Err(err) = self .client .send_broadcast_message(vec![topic as u8], message) .await - .is_err() { - return Err(NetworkError::CouldNotDeliver); + return Err(NetworkError::MessageReceiveError(format!( + "failed to send broadcast message: {err}" + ))); }; Ok(()) @@ -526,15 +526,15 @@ impl ConnectedNetwork for PushCdnNetwork { } // Send the message - // TODO: check if we need to print this error - if self + if let Err(e) = self .client .send_direct_message(&WrappedSignatureKey(recipient), message) .await - .is_err() { self.metrics.num_failed_messages.add(1); - return Err(NetworkError::CouldNotDeliver); + return Err(NetworkError::MessageSendError(format!( + "failed to send direct message: {e}" + ))); }; Ok(()) @@ -559,10 +559,9 @@ impl ConnectedNetwork for PushCdnNetwork { let message = match message { Ok(message) => message, Err(error) => { - error!("failed to receive message: {error}"); - return Err(NetworkError::PushCdnNetwork { - source: PushCdnNetworkError::FailedToReceive, - }); + return Err(NetworkError::MessageReceiveError(format!( + "failed to receive message: {error}" + ))); } }; diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 288ba0f627..ecf4aa20fb 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -30,7 +30,7 @@ rand = { workspace = true } serde = { workspace = true } serde_bytes = { workspace = true } serde_json = { workspace = true } -snafu = { workspace = true } +thiserror = { workspace = true } tide = { version = "0.16", optional = true, default-features = false, features = [ "h1-server", ] } diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 1204190d25..a11e0ffbf4 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -166,16 +166,12 @@ impl DHTBehaviour { } /// Publish a key/value to the kv store. /// Once replicated upon all nodes, the caller is notified over - /// `chan`. If there is an error, a [`crate::network::error::DHTError`] is - /// sent instead. + /// `chan` pub fn put_record(&mut self, id: QueryId, query: KadPutQuery) { self.in_progress_put_record_queries.insert(id, query); } /// Retrieve a value for a key from the DHT. - /// Value (serialized) is sent over `chan`, and if a value is not found, - /// a [`crate::network::error::DHTError`] is sent instead. - /// NOTE: noop if `retry_count` is 0 pub fn get_record( &mut self, key: Vec, diff --git a/libp2p-networking/src/network/error.rs b/libp2p-networking/src/network/error.rs deleted file mode 100644 index 05d7857b7f..0000000000 --- a/libp2p-networking/src/network/error.rs +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -//! Contains the [`NetworkError`] snafu types - -use std::fmt::{Debug, Display}; - -use futures::channel::oneshot::Canceled; -use libp2p::{ - gossipsub::PublishError, - kad::{GetRecordError, PutRecordError}, - swarm::DialError, - TransportError, -}; -use snafu::Snafu; - -/// wrapper type for errors generated by the `Network` -#[derive(Debug, Snafu)] -#[snafu(visibility(pub))] -pub enum NetworkError { - /// Error initiating dial of peer - DialError { - /// The underlying source of the error - source: DialError, - }, - /// Error during dialing or listening - Transport { - /// The underlying source of the error - source: TransportError, - }, - /// Error establishing backend connection - TransportLaunch { - /// The underlying source of the error - source: std::io::Error, - }, - /// Error building the gossipsub configuration - #[snafu(display("Error building the gossipsub configuration: {message}"))] - GossipsubConfig { - /// The underlying source of the error - message: String, - }, - /// Error building the gossipsub instance - #[snafu(display("Error building the gossipsub implementation {message}"))] - GossipsubBuild { - /// The underlying source of the error - message: String, - }, - /// Error if one of the channels to or from the swarm is closed - StreamClosed, - /// Error publishing a gossipsub message - PublishError { - /// The underlying source of the error - source: PublishError, - }, - /// Error when there are no known peers to bootstrap off - NoKnownPeers, -} - -/// Error enum for querying store -/// because for some reason, [`libp2p::kad::GetRecordError`] -/// does not derive `Error` -#[derive(Debug, Clone, Snafu)] -#[snafu(visibility(pub))] -pub enum DHTError { - /// Get Record Error - #[snafu(display("DHT GET internal error: {source}"))] - GetRecord { - /// source of error - source: GetRecordWrapperError, - }, - /// Get Record Error - #[snafu(display("DHT PUT internal error: {source}"))] - PutRecord { - /// source of error - source: PutRecordError, - }, - /// nodes disagreed on the value - #[snafu(display("Nodes disagreed on value"))] - Disagreement, - /// could not find 2 or more nodes that had the value - #[snafu(display("Could not find key in DHT"))] - NotFound, - /// request was ignored serverside - CancelledRequest { - /// source of error - source: Canceled, - }, -} - -/// Wrapper Error enum for [`libp2p::kad::GetRecordError`]. -/// [`libp2p::kad::GetRecordError`] does not derive [`std::error::Error`] -/// so in order to feed this into [`DHTError`] and snafu derive, -/// we need a wrapper type -#[derive(Debug, Clone)] -pub enum GetRecordWrapperError { - /// wrapper - GetRecordError { - /// source of error - source: GetRecordError, - }, -} - -impl Display for GetRecordWrapperError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self:?}") - } -} - -impl std::error::Error for GetRecordWrapperError {} diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 620ee93bf9..ea9b5adda6 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -8,8 +8,6 @@ pub mod behaviours; /// defines the swarm and network definition (internal) mod def; -/// libp2p network errors -pub mod error; /// functionality of a libp2p network node mod node; /// Alternative Libp2p transport implementations @@ -20,7 +18,7 @@ use std::{collections::HashSet, fmt::Debug}; use futures::channel::oneshot::{self, Sender}; use hotshot_types::{ request_response::{Request, Response}, - traits::signature_key::SignatureKey, + traits::{network::NetworkError, signature_key::SignatureKey}, }; #[cfg(async_executor_impl = "async-std")] use libp2p::dns::async_std::Transport as DnsTransport; @@ -46,11 +44,10 @@ use transport::StakeTableAuthentication; pub use self::{ def::NetworkDef, - error::NetworkError, node::{ - network_node_handle_error, spawn_network_node, GossipConfig, NetworkNode, - NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, - NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, DEFAULT_REPLICATION_FACTOR, + spawn_network_node, GossipConfig, NetworkNode, NetworkNodeConfig, NetworkNodeConfigBuilder, + NetworkNodeConfigBuilderError, NetworkNodeHandle, NetworkNodeReceiver, + DEFAULT_REPLICATION_FACTOR, }, }; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] @@ -215,7 +212,7 @@ pub async fn gen_transport( DnsTransport::system(transport) } } - .map_err(|e| NetworkError::TransportLaunch { source: e })?; + .map_err(|e| NetworkError::ConfigError(format!("failed to build DNS transport: {e}")))?; Ok(transport .map(|(peer_id, connection), _| (peer_id, StreamMuxerBox::new(connection))) diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 47e6e6d5e3..d8488e5a09 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -50,7 +50,6 @@ use libp2p::{ }; use libp2p_identity::PeerId; use rand::{prelude::SliceRandom, thread_rng}; -use snafu::ResultExt; use tracing::{debug, error, info, info_span, instrument, warn, Instrument}; pub use self::{ @@ -58,18 +57,15 @@ pub use self::{ GossipConfig, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, DEFAULT_REPLICATION_FACTOR, }, - handle::{ - network_node_handle_error, spawn_network_node, NetworkNodeHandle, NetworkNodeHandleError, - NetworkNodeReceiver, - }, + handle::{spawn_network_node, NetworkNodeHandle, NetworkNodeReceiver}, }; use super::{ behaviours::dht::{ bootstrap::{self, DHTBootstrapTask, InputEvent}, store::ValidatedStore, }, - error::{GossipsubBuildSnafu, GossipsubConfigSnafu, NetworkError, TransportSnafu}, - gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkEvent, NetworkEventInternal, + gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkError, NetworkEvent, + NetworkEventInternal, }; use crate::network::behaviours::{ dht::{DHTBehaviour, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, @@ -135,7 +131,9 @@ impl NetworkNode { &mut self, listen_addr: Multiaddr, ) -> Result { - self.listener_id = Some(self.swarm.listen_on(listen_addr).context(TransportSnafu)?); + self.listener_id = Some(self.swarm.listen_on(listen_addr).map_err(|err| { + NetworkError::ListenError(format!("failed to listen for Libp2p: {err}")) + })?); let addr = loop { if let Some(SwarmEvent::NewListenAddr { address, .. }) = self.swarm.next().await { break address; @@ -213,11 +211,8 @@ impl NetworkNode { .mesh_outbound_min(config.gossip_config.mesh_outbound_min) // Minimum number of outbound peers in mesh .max_transmit_size(config.gossip_config.max_transmit_size) // Maximum size of a message .build() - .map_err(|s| { - GossipsubConfigSnafu { - message: s.to_string(), - } - .build() + .map_err(|err| { + NetworkError::ConfigError(format!("error building gossipsub config: {err:?}")) })?; // - Build a gossipsub network behavior @@ -225,7 +220,9 @@ impl NetworkNode { MessageAuthenticity::Signed(keypair.clone()), gossipsub_config, ) - .map_err(|s| GossipsubBuildSnafu { message: s }.build())?; + .map_err(|err| { + NetworkError::ConfigError(format!("error building gossipsub behaviour: {err:?}")) + })?; // Build a identify network behavior needed for own // node connection information @@ -338,10 +335,7 @@ impl NetworkNode { }) } - /// Publish a key/value to the kv store. - /// Once replicated upon all nodes, the caller is notified over - /// `chan`. If there is an error, a [`super::error::DHTError`] is - /// sent instead. + /// Publish a key/value to the record store. /// /// # Panics /// If the default replication factor is `None` @@ -557,7 +551,7 @@ impl NetworkNode { send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) .await - .map_err(|_e| NetworkError::StreamClosed)?; + .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } SwarmEvent::ConnectionClosed { connection_id: _, @@ -582,7 +576,7 @@ impl NetworkNode { send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) .await - .map_err(|_e| NetworkError::StreamClosed)?; + .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } SwarmEvent::Dialing { peer_id, @@ -694,7 +688,7 @@ impl NetworkNode { send_to_client .send(event) .await - .map_err(|_e| NetworkError::StreamClosed)?; + .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } } SwarmEvent::OutgoingConnectionError { diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 5f4311d3f9..5e19c4c2fb 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -7,24 +7,21 @@ use std::{collections::HashSet, fmt::Debug, marker::PhantomData, time::Duration}; use async_compatibility_layer::{ - art::{async_sleep, async_timeout, future::to}, - channel::{Receiver, SendError, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, + art::{async_sleep, async_timeout}, + channel::{Receiver, UnboundedReceiver, UnboundedSender}, }; use futures::channel::oneshot; use hotshot_types::{ request_response::{Request, Response}, - traits::{network::NetworkError as HotshotNetworkError, signature_key::SignatureKey}, + traits::{network::NetworkError, signature_key::SignatureKey}, }; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; -use snafu::{ResultExt, Snafu}; use tracing::{debug, info, instrument}; use crate::network::{ behaviours::dht::record::{Namespace, RecordKey, RecordValue}, - error::{CancelledRequestSnafu, DHTError}, - gen_multiaddr, ClientRequest, NetworkError, NetworkEvent, NetworkNode, NetworkNodeConfig, - NetworkNodeConfigBuilderError, + gen_multiaddr, ClientRequest, NetworkEvent, NetworkNode, NetworkNodeConfig, }; /// A handle containing: @@ -65,8 +62,11 @@ impl NetworkNodeReceiver { /// recv a network event /// # Errors /// Errors if the receiver channel is closed - pub async fn recv(&self) -> Result { - self.receiver.recv().await.context(ReceiverEndedSnafu) + pub async fn recv(&self) -> Result { + self.receiver + .recv() + .await + .map_err(|e| NetworkError::ChannelReceiveError(e.to_string())) } /// Add a kill switch to the receiver pub fn set_kill_switch(&mut self, kill_switch: Receiver<()>) { @@ -85,25 +85,24 @@ impl NetworkNodeReceiver { pub async fn spawn_network_node( config: NetworkNodeConfig, id: usize, -) -> Result<(NetworkNodeReceiver, NetworkNodeHandle), NetworkNodeHandleError> { +) -> Result<(NetworkNodeReceiver, NetworkNodeHandle), NetworkError> { let mut network = NetworkNode::new(config.clone()) .await - .context(NetworkSnafu)?; + .map_err(|e| NetworkError::ConfigError(format!("failed to create network node: {e}")))?; // randomly assigned port let listen_addr = config .bind_address .clone() .unwrap_or_else(|| gen_multiaddr(0)); let peer_id = network.peer_id(); - let listen_addr = network - .start_listen(listen_addr) - .await - .context(NetworkSnafu)?; + let listen_addr = network.start_listen(listen_addr).await.map_err(|e| { + NetworkError::ListenError(format!("failed to start listening on Libp2p: {e}")) + })?; // pin here to force the future onto the heap since it can be large // in the case of flume - let (send_chan, recv_chan) = Box::pin(network.spawn_listeners()) - .await - .context(NetworkSnafu)?; + let (send_chan, recv_chan) = Box::pin(network.spawn_listeners()).await.map_err(|err| { + NetworkError::ListenError(format!("failed to spawn listeners for Libp2p: {err}")) + })?; let receiver = NetworkNodeReceiver { receiver: recv_chan, recv_kill: None, @@ -125,7 +124,7 @@ impl NetworkNodeHandle { /// This is done by sending a message to /// the swarm itself to spin down #[instrument] - pub async fn shutdown(&self) -> Result<(), NetworkNodeHandleError> { + pub async fn shutdown(&self) -> Result<(), NetworkError> { self.send_request(ClientRequest::Shutdown).await?; Ok(()) } @@ -133,7 +132,7 @@ impl NetworkNodeHandle { /// # Errors /// If unable to send via `send_network`. This should only happen /// if the network is shut down. - pub async fn begin_bootstrap(&self) -> Result<(), NetworkNodeHandleError> { + pub async fn begin_bootstrap(&self) -> Result<(), NetworkError> { let req = ClientRequest::BeginBootstrap; self.send_request(req).await } @@ -148,11 +147,12 @@ impl NetworkNodeHandle { /// NOTE: only for debugging purposes currently /// # Errors /// if the client has stopped listening for a response - pub async fn print_routing_table(&self) -> Result<(), NetworkNodeHandleError> { + pub async fn print_routing_table(&self) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetRoutingTable(s); self.send_request(req).await?; - r.await.map_err(|_| NetworkNodeHandleError::RecvError) + r.await + .map_err(|e| NetworkError::ChannelReceiveError(e.to_string())) } /// Wait until at least `num_peers` have connected /// @@ -162,7 +162,7 @@ impl NetworkNodeHandle { &self, num_required_peers: usize, node_id: usize, - ) -> Result<(), NetworkNodeHandleError> { + ) -> Result<(), NetworkError> { // Wait for the required number of peers to connect loop { // Get the number of currently connected peers @@ -194,7 +194,7 @@ impl NetworkNodeHandle { &self, request: &[u8], peer: PeerId, - ) -> Result, NetworkNodeHandleError> { + ) -> Result, NetworkError> { let (tx, rx) = oneshot::channel(); let req = ClientRequest::DataRequest { request: Request(request.to_vec()), @@ -204,7 +204,8 @@ impl NetworkNodeHandle { self.send_request(req).await?; - rx.await.map_err(|_| NetworkNodeHandleError::RecvError) + rx.await + .map_err(|e| NetworkError::ChannelReceiveError(e.to_string())) } /// Send a response to a request with the response channel @@ -214,7 +215,7 @@ impl NetworkNodeHandle { &self, response: Vec, chan: ResponseChannel, - ) -> Result<(), NetworkNodeHandleError> { + ) -> Result<(), NetworkError> { let req = ClientRequest::DataResponse { response: Response(response), chan, @@ -226,11 +227,12 @@ impl NetworkNodeHandle { /// NOTE: this should always be called before any `request_response` is initiated /// # Errors /// if the client has stopped listening for a response - pub async fn lookup_pid(&self, peer_id: PeerId) -> Result<(), NetworkNodeHandleError> { + pub async fn lookup_pid(&self, peer_id: PeerId) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::LookupPeer(peer_id, s); self.send_request(req).await?; - r.await.map_err(|_| NetworkNodeHandleError::RecvError) + r.await + .map_err(|err| NetworkError::ChannelReceiveError(err.to_string())) } /// Looks up a node's `PeerId` by its staking key. Is authenticated through @@ -242,31 +244,30 @@ impl NetworkNodeHandle { &self, key: &[u8], dht_timeout: Duration, - ) -> Result { + ) -> Result { // Create the record key let key = RecordKey::new(Namespace::Lookup, key.to_vec()); // Get the record from the DHT let pid = self.get_record_timeout(key, dht_timeout).await?; - PeerId::from_bytes(&pid).map_err(|_| NetworkNodeHandleError::FailedToDeserialize) + PeerId::from_bytes(&pid).map_err(|err| NetworkError::FailedToDeserialize(err.to_string())) } /// Insert a record into the kademlia DHT /// # Errors - /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT - /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key or value + /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize the key or value pub async fn put_record( &self, key: RecordKey, value: RecordValue, - ) -> Result<(), NetworkNodeHandleError> { + ) -> Result<(), NetworkError> { // Serialize the key let key = key.to_bytes(); // Serialize the record let value = bincode::serialize(&value) - .map_err(|e| NetworkNodeHandleError::SerializationError { source: e.into() })?; + .map_err(|e| NetworkError::FailedToSerialize(e.to_string()))?; let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::PutDHT { @@ -277,20 +278,19 @@ impl NetworkNodeHandle { self.send_request(req).await?; - r.await.context(CancelledRequestSnafu).context(DHTSnafu) + r.await.map_err(|_| NetworkError::RequestCancelled) } /// Receive a record from the kademlia DHT if it exists. /// Must be replicated on at least 2 nodes /// # Errors - /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT - /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key - /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value + /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize the key + /// - Will return [`NetworkError::FailedToDeserialize`] when unable to deserialize the returned value pub async fn get_record( &self, key: RecordKey, retry_count: u8, - ) -> Result, NetworkNodeHandleError> { + ) -> Result, NetworkError> { // Serialize the key let serialized_key = key.to_bytes(); @@ -303,105 +303,94 @@ impl NetworkNodeHandle { self.send_request(req).await?; // Map the error - let result = match r.await.context(CancelledRequestSnafu) { - Ok(result) => Ok(result), - Err(e) => Err(e).context(DHTSnafu), - }?; + let result = r.await.map_err(|_| NetworkError::RequestCancelled)?; // Deserialize the record's value let record: RecordValue = bincode::deserialize(&result) - .map_err(|e| NetworkNodeHandleError::DeserializationError { source: e.into() })?; + .map_err(|e| NetworkError::FailedToDeserialize(e.to_string()))?; Ok(record.value().to_vec()) } /// Get a record from the kademlia DHT with a timeout /// # Errors - /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT - /// - Will return [`NetworkNodeHandleError::TimeoutError`] when times out - /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key - /// - Will return [`NetworkNodeHandleError::DeserializationError`] when unable to deserialize the returned value + /// - Will return [`NetworkError::Timeout`] when times out + /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize the key or value + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed pub async fn get_record_timeout( &self, key: RecordKey, timeout: Duration, - ) -> Result, NetworkNodeHandleError> { - let result = async_timeout(timeout, self.get_record(key, 3)).await; - match result { - Err(e) => Err(e).context(TimeoutSnafu), - Ok(r) => r, - } + ) -> Result, NetworkError> { + async_timeout(timeout, self.get_record(key, 3)) + .await + .map_err(|err| NetworkError::Timeout(err.to_string()))? } /// Insert a record into the kademlia DHT with a timeout /// # Errors - /// - Will return [`NetworkNodeHandleError::DHTError`] when encountering an error putting to DHT - /// - Will return [`NetworkNodeHandleError::TimeoutError`] when times out - /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize the key or value - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkError::Timeout`] when times out + /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize the key or value + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed pub async fn put_record_timeout( &self, key: RecordKey, value: RecordValue, timeout: Duration, - ) -> Result<(), NetworkNodeHandleError> { - let result = async_timeout(timeout, self.put_record(key, value)).await; - match result { - Err(e) => Err(e).context(TimeoutSnafu), - Ok(r) => r, - } + ) -> Result<(), NetworkError> { + async_timeout(timeout, self.put_record(key, value)) + .await + .map_err(|err| NetworkError::Timeout(err.to_string()))? } /// Subscribe to a topic /// # Errors - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - pub async fn subscribe(&self, topic: String) -> Result<(), NetworkNodeHandleError> { + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed + pub async fn subscribe(&self, topic: String) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::Subscribe(topic, Some(s)); self.send_request(req).await?; - r.await.map_err(|_| NetworkNodeHandleError::RecvError) + r.await + .map_err(|err| NetworkError::ChannelReceiveError(err.to_string())) } /// Unsubscribe from a topic /// # Errors - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - pub async fn unsubscribe(&self, topic: String) -> Result<(), NetworkNodeHandleError> { + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed + pub async fn unsubscribe(&self, topic: String) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::Unsubscribe(topic, Some(s)); self.send_request(req).await?; - r.await.map_err(|_| NetworkNodeHandleError::RecvError) + r.await + .map_err(|err| NetworkError::ChannelReceiveError(err.to_string())) } /// Ignore `peers` when pruning /// e.g. maintain their connection /// # Errors - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - pub async fn ignore_peers(&self, peers: Vec) -> Result<(), NetworkNodeHandleError> { + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed + pub async fn ignore_peers(&self, peers: Vec) -> Result<(), NetworkError> { let req = ClientRequest::IgnorePeers(peers); self.send_request(req).await } /// Make a direct request to `peer_id` containing `msg` /// # Errors - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` - pub async fn direct_request( - &self, - pid: PeerId, - msg: &[u8], - ) -> Result<(), NetworkNodeHandleError> { + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` + pub async fn direct_request(&self, pid: PeerId, msg: &[u8]) -> Result<(), NetworkError> { self.direct_request_no_serialize(pid, msg.to_vec()).await } /// Make a direct request to `peer_id` containing `msg` without serializing /// # Errors - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` pub async fn direct_request_no_serialize( &self, pid: PeerId, contents: Vec, - ) -> Result<(), NetworkNodeHandleError> { + ) -> Result<(), NetworkError> { let req = ClientRequest::DirectRequest { pid, contents, @@ -412,13 +401,13 @@ impl NetworkNodeHandle { /// Reply with `msg` to a request over `chan` /// # Errors - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` pub async fn direct_response( &self, chan: ResponseChannel>, msg: &[u8], - ) -> Result<(), NetworkNodeHandleError> { + ) -> Result<(), NetworkError> { let req = ClientRequest::DirectResponse(chan, msg.to_vec()); self.send_request(req).await } @@ -430,39 +419,39 @@ impl NetworkNodeHandle { /// # Panics /// If channel errors out /// shouldn't happen. - pub async fn prune_peer(&self, pid: PeerId) -> Result<(), NetworkNodeHandleError> { + pub async fn prune_peer(&self, pid: PeerId) -> Result<(), NetworkError> { let req = ClientRequest::Prune(pid); self.send_request(req).await } /// Gossip a message to peers /// # Errors - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` - pub async fn gossip(&self, topic: String, msg: &[u8]) -> Result<(), NetworkNodeHandleError> { + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` + pub async fn gossip(&self, topic: String, msg: &[u8]) -> Result<(), NetworkError> { self.gossip_no_serialize(topic, msg.to_vec()).await } /// Gossip a message to peers without serializing /// # Errors - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - /// - Will return [`NetworkNodeHandleError::SerializationError`] when unable to serialize `msg` + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` pub async fn gossip_no_serialize( &self, topic: String, msg: Vec, - ) -> Result<(), NetworkNodeHandleError> { + ) -> Result<(), NetworkError> { let req = ClientRequest::GossipMsg(topic, msg); self.send_request(req).await } /// Tell libp2p about known network nodes /// # Errors - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed pub async fn add_known_peers( &self, known_peers: Vec<(PeerId, Multiaddr)>, - ) -> Result<(), NetworkNodeHandleError> { + ) -> Result<(), NetworkError> { debug!("Adding {} known peers", known_peers.len()); let req = ClientRequest::AddKnownPeers(known_peers); self.send_request(req).await @@ -471,13 +460,12 @@ impl NetworkNodeHandle { /// Send a client request to the network /// /// # Errors - /// - Will return [`NetworkNodeHandleError::SendError`] when underlying `NetworkNode` has been killed - async fn send_request(&self, req: ClientRequest) -> Result<(), NetworkNodeHandleError> { + /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed + async fn send_request(&self, req: ClientRequest) -> Result<(), NetworkError> { self.send_network .send(req) .await - .map_err(|_| NetworkNodeHandleError::SendError)?; - Ok(()) + .map_err(|err| NetworkError::ChannelSendError(err.to_string())) } /// Returns number of peers this node is connected to @@ -487,7 +475,7 @@ impl NetworkNodeHandle { /// # Panics /// If channel errors out /// shouldn't happen. - pub async fn num_connected(&self) -> Result { + pub async fn num_connected(&self) -> Result { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetConnectedPeerNum(s); self.send_request(req).await?; @@ -501,7 +489,7 @@ impl NetworkNodeHandle { /// # Panics /// If channel errors out /// shouldn't happen. - pub async fn connected_pids(&self) -> Result, NetworkNodeHandleError> { + pub async fn connected_pids(&self) -> Result, NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetConnectedPeers(s); self.send_request(req).await?; @@ -526,92 +514,3 @@ impl NetworkNodeHandle { &self.network_config } } - -/// Error wrapper type for interacting with swarm handle -#[derive(Debug, Snafu)] -#[snafu(visibility(pub))] -pub enum NetworkNodeHandleError { - /// Error generating network - NetworkError { - /// source of error - source: NetworkError, - }, - /// Failure to serialize a message - SerializationError { - /// source of error - source: anyhow::Error, - }, - /// Failure to deserialize a message - DeserializationError { - /// source of error - source: anyhow::Error, - }, - /// Error sending request to network - SendError, - /// Error receiving message from network - RecvError, - /// Error building Node config - NodeConfigError { - /// source of error - source: NetworkNodeConfigBuilderError, - }, - /// Error waiting for connections - TimeoutError { - /// source of error - source: to::TimeoutError, - }, - /// Could not connect to the network in time - ConnectTimeout, - /// Error in the kademlia DHT - DHTError { - /// source of error - source: DHTError, - }, - /// The inner [`NetworkNode`] has already been killed - CantKillTwice { - /// dummy source - source: SendError<()>, - }, - /// The network node has been killed - Killed, - /// The receiver was unable to receive a new message - ReceiverEnded { - /// source of error - source: UnboundedRecvError, - }, - /// no known topic matches the hashset of keys - NoSuchTopic, - - /// Deserialization error - FailedToDeserialize, - - /// Signature verification error - FailedToVerify, -} - -impl From for HotshotNetworkError { - fn from(error: NetworkNodeHandleError) -> Self { - match error { - NetworkNodeHandleError::SerializationError { source } => { - HotshotNetworkError::FailedToSerialize { source } - } - NetworkNodeHandleError::DeserializationError { source } => { - HotshotNetworkError::FailedToDeserialize { source } - } - NetworkNodeHandleError::TimeoutError { source } => { - HotshotNetworkError::Timeout { source } - } - NetworkNodeHandleError::Killed => HotshotNetworkError::ShutDown, - source => HotshotNetworkError::Libp2p { - source: Box::new(source), - }, - } - } -} - -/// Re-exports of the snafu errors that [`NetworkNodeHandleError`] can throw -pub mod network_node_handle_error { - pub use super::{ - NetworkSnafu, NodeConfigSnafu, RecvSnafu, SendSnafu, SerializationSnafu, TimeoutSnafu, - }; -} diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index 222ee41795..c4620baf9e 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -16,18 +16,18 @@ use std::{ use async_compatibility_layer::{ art::{async_sleep, async_spawn}, async_primitives::subscribable_mutex::SubscribableMutex, - channel::{bounded, RecvError}, + channel::bounded, logging::{setup_backtrace, setup_logging}, }; use futures::{future::join_all, Future, FutureExt}; -use hotshot_types::traits::signature_key::SignatureKey; +use hotshot_types::traits::{network::NetworkError, signature_key::SignatureKey}; use libp2p::Multiaddr; use libp2p_identity::PeerId; use libp2p_networking::network::{ - network_node_handle_error::NodeConfigSnafu, spawn_network_node, NetworkEvent, - NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeHandleError, NetworkNodeReceiver, + spawn_network_node, NetworkEvent, NetworkNodeConfigBuilder, NetworkNodeHandle, + NetworkNodeReceiver, }; -use snafu::{ResultExt, Snafu}; +use thiserror::Error; use tracing::{instrument, warn}; #[derive(Clone, Debug)] @@ -48,7 +48,7 @@ pub fn spawn_handler( ) -> impl Future where F: Fn(NetworkEvent, HandleWithState) -> RET + Sync + Send + 'static, - RET: Future> + Send + 'static, + RET: Future> + Send + 'static, S: Debug + Default + Send + Clone + 'static, { async_spawn(async move { @@ -111,7 +111,7 @@ pub async fn test_bed< timeout: Duration, ) where FutF: Future, - FutG: Future> + 'static + Send + Sync, + FutG: Future> + 'static + Send + Sync, F: FnOnce(Vec>, Duration) -> FutF, G: Fn(NetworkEvent, HandleWithState) -> FutG + 'static + Send + Sync + Clone, { @@ -206,8 +206,7 @@ pub async fn spin_up_swarms { - #[snafu(display("Channel error {source:?}"))] - Recv { - source: RecvError, - }, - #[snafu(display( - "Timeout while running direct message round. Timed out when {requester} dmed {requestee}" - ))] - DirectTimeout { - requester: usize, - requestee: usize, - }, - #[snafu(display("Timeout while running gossip round. Timed out on {failing:?}."))] - GossipTimeout { - failing: Vec, - }, - #[snafu(display( + #[error("Error with network node handle: {0}")] + HandleError(String), + + #[error("Configuration error: {0}")] + ConfigError(String), + + #[error("The following nodes timed out: {0:?} while {1}")] + Timeout(Vec, String), + + #[error( "Inconsistent state while running test. Expected {expected:?}, got {actual:?} on node {id}" - ))] - State { - id: usize, - expected: S, - actual: S, - }, - #[snafu(display("Handler error while running test. {source:?}"))] - Handle { - source: NetworkNodeHandleError, - }, - #[snafu(display("Failed to spin up nodes. Hit timeout instead. {failing_nodes:?}"))] - SpinupTimeout { - failing_nodes: Vec, - }, - DHTTimeout, + )] + InconsistentState { id: usize, expected: S, actual: S }, } diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 8b8d58a76b..f6e8144ac8 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -13,15 +13,17 @@ use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::prelude::StreamExt; -use common::{test_bed, HandleSnafu, HandleWithState, TestError}; -use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; +use common::{test_bed, HandleWithState, TestError}; +use hotshot_types::{ + signature_key::BLSPubKey, + traits::{network::NetworkError, signature_key::SignatureKey}, +}; use libp2p_networking::network::{ behaviours::dht::record::{Namespace, RecordKey, RecordValue}, - NetworkEvent, NetworkNodeHandleError, + NetworkEvent, }; use rand::{rngs::StdRng, seq::IteratorRandom, Rng, SeedableRng}; use serde::{Deserialize, Serialize}; -use snafu::ResultExt; #[cfg(async_executor_impl = "tokio")] use tokio_stream::StreamExt; use tracing::{debug, error, info, instrument, warn}; @@ -75,7 +77,7 @@ fn random_handle( pub async fn counter_handle_network_event( event: NetworkEvent, handle: HandleWithState, -) -> Result<(), NetworkNodeHandleError> { +) -> Result<(), NetworkError> { use CounterMessage::*; use NetworkEvent::*; match event { @@ -194,7 +196,7 @@ async fn run_request_response_increment<'a, K: SignatureKey + 'static>( requester_handle.handle .direct_request(requestee_pid, &bincode::serialize(&CounterMessage::AskForCounter).unwrap()) .await - .context(HandleSnafu)?; + .map_err(|e| TestError::HandleError(format!("failed to send direct request: {e}")))?; match stream.next().await.unwrap() { Ok(()) => {} Err(e) => {error!("timed out waiting for {requestee_pid:?} to update state: {e}"); @@ -206,7 +208,7 @@ async fn run_request_response_increment<'a, K: SignatureKey + 'static>( if s1 == new_state { Ok(()) } else { - Err(TestError::State { + Err(TestError::InconsistentState { id: requester_handle.handle.id(), expected: new_state, actual: s1, @@ -264,7 +266,7 @@ async fn run_gossip_round( .handle .gossip("global".to_string(), &bincode::serialize(&msg).unwrap()) .await - .context(HandleSnafu)?; + .map_err(|e| TestError::HandleError(format!("failed to gossip: {e}")))?; for _ in 0..len - 1 { // wait for all events to finish @@ -287,7 +289,7 @@ async fn run_gossip_round( .map(|h| h.handle) .collect::>(); print_connections(nodes.as_slice()).await; - return Err(TestError::GossipTimeout { failing }); + return Err(TestError::Timeout(failing, "gossiping".to_string())); } Ok(()) @@ -406,7 +408,7 @@ async fn run_dht_rounds( // get the key from the other nodes for handle in handles { - let result: Result, NetworkNodeHandleError> = + let result: Result, NetworkError> = handle.handle.get_record_timeout(key.clone(), timeout).await; match result { Err(e) => { diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index b22adee2fb..71fb1013f5 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -33,7 +33,7 @@ jf-vid = { workspace = true } rand = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } -snafu = { workspace = true } +thiserror = { workspace = true } surf-disco = { workspace = true } tagged-base64 = { workspace = true } time = { workspace = true } diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index b2c07aae75..fba215217b 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -17,47 +17,42 @@ use hotshot_types::{ vid::VidCommitment, }; use serde::{Deserialize, Serialize}; -use snafu::Snafu; use surf_disco::{client::HealthStatus, Client, Url}; use tagged_base64::TaggedBase64; +use thiserror::Error; use vbs::version::StaticVersionType; -#[derive(Debug, Snafu, Serialize, Deserialize)] -/// Represents errors than builder client may return +#[derive(Debug, Error, Serialize, Deserialize)] +/// Represents errors that can occur while interacting with the builder pub enum BuilderClientError { - // NOTE: folds BuilderError::NotFound & builderError::Missing - // into one. Maybe we'll want to handle that separately in - // the future - /// Block not found - #[snafu(display("Requested block not found"))] - NotFound, - /// Generic error while accessing the API, - /// i.e. when API isn't available or compatible - #[snafu(display("Builder API error: {message}"))] - Api { - /// Underlying error - message: String, - }, + /// The requested block was not found + #[error("Requested block not found")] + BlockNotFound, + + /// The requested block was missing + #[error("Requested block was missing")] + BlockMissing, + + /// Generic error while accessing the API + #[error("Builder API error: {0}")] + Api(String), } impl From for BuilderClientError { fn from(value: BuilderApiError) -> Self { match value { - BuilderApiError::Request { source } | BuilderApiError::TxnUnpack { source } => { - Self::Api { - message: source.to_string(), - } + BuilderApiError::Request(source) | BuilderApiError::TxnUnpack(source) => { + Self::Api(source.to_string()) } - BuilderApiError::TxnSubmit { source } | BuilderApiError::BuilderAddress { source } => { - Self::Api { - message: source.to_string(), - } + BuilderApiError::TxnSubmit(source) | BuilderApiError::BuilderAddress(source) => { + Self::Api(source.to_string()) } - BuilderApiError::Custom { message, .. } => Self::Api { message }, + BuilderApiError::Custom { message, .. } => Self::Api(message), BuilderApiError::BlockAvailable { source, .. } | BuilderApiError::BlockClaim { source, .. } => match source { - BuildError::NotFound | BuildError::Missing => Self::NotFound, - BuildError::Error { message } => Self::Api { message }, + BuildError::NotFound => Self::BlockNotFound, + BuildError::Missing => Self::BlockMissing, + BuildError::Error(message) => Self::Api(message), }, } } @@ -110,7 +105,7 @@ impl BuilderClient { /// Query builder for available blocks /// /// # Errors - /// - [`BuilderClientError::NotFound`] if blocks aren't available for this parent + /// - [`BuilderClientError::BlockNotFound`] if blocks aren't available for this parent /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly pub async fn available_blocks( &self, @@ -150,7 +145,7 @@ pub mod v0_1 { /// Claim block header input /// /// # Errors - /// - [`BuilderClientError::NotFound`] if block isn't available + /// - [`BuilderClientError::BlockNotFound`] if block isn't available /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly pub async fn claim_block_header_input( &self, @@ -172,7 +167,7 @@ pub mod v0_1 { /// Claim block /// /// # Errors - /// - [`BuilderClientError::NotFound`] if block isn't available + /// - [`BuilderClientError::BlockNotFound`] if block isn't available /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly pub async fn claim_block( &self, @@ -221,7 +216,7 @@ pub mod v0_3 { /// Claim block /// /// # Errors - /// - [`BuilderClientError::NotFound`] if block isn't available + /// - [`BuilderClientError::BlockNotFound`] if block isn't available /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly pub async fn bundle( &self, diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 02ed054d5d..0bc5566167 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -42,7 +42,7 @@ rand = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } sha3 = "^0.10" -snafu = { workspace = true } +thiserror = { workspace = true } tide-disco = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index f6f68f3db6..711794d76b 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -11,18 +11,11 @@ use async_compatibility_layer::art::{async_spawn, async_timeout}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use hotshot_task_impls::helpers::broadcast_event; -use snafu::Snafu; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use crate::test_task::TestEvent; -/// the idea here is to run as long as we want - -/// Completion Task error -#[derive(Snafu, Debug)] -pub struct CompletionTaskErr {} - /// Completion task state pub struct CompletionTask { pub tx: Sender, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index f259215730..3cbe364517 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -22,7 +22,7 @@ use hotshot_types::{ traits::node_implementation::{ConsensusTime, NodeType, Versions}, vid::VidCommitment, }; -use snafu::Snafu; +use thiserror::Error; use tracing::error; use crate::{ @@ -46,48 +46,38 @@ pub enum ViewStatus { } /// possible errors -#[derive(Snafu, Debug, Clone)] +#[derive(Error, Debug, Clone)] pub enum OverallSafetyTaskErr { - /// inconsistent txn nums - InconsistentTxnsNum { - /// number of transactions -> number of nodes reporting that number - map: HashMap, - }, - /// too many failed views - TooManyFailures { - /// vec of failed views - failed_views: HashSet, - }, - /// not enough decides - NotEnoughDecides { - /// expected number of decides - expected: usize, - /// actual number of decides - got: usize, - }, - /// mismatched leaves for a view + #[error("Mismatched leaf")] MismatchedLeaf, - /// mismatched states for a view - InconsistentStates, - /// mismatched blocks for a view + + #[error("Inconsistent blocks")] InconsistentBlocks, - /// not enough failures. this likely means there is an issue in the test - NotEnoughFailures { - expected: usize, - failed_views: HashSet, - }, - /// mismatched expected failed view vs actual failed view + #[error("Inconsistent number of transactions: {map:?}")] + InconsistentTxnsNum { map: HashMap }, + + #[error("Not enough decides: got: {got}, expected: {expected}")] + NotEnoughDecides { got: usize, expected: usize }, + + #[error("Too many view failures: {0:?}")] + TooManyFailures(HashSet), + + #[error("Inconsistent failed views: expected: {expected_failed_views:?}, actual: {actual_failed_views:?}")] InconsistentFailedViews { - expected_failed_views: HashSet, + expected_failed_views: Vec, actual_failed_views: HashSet, }, - /// This is a case where we have too many failed + succesful views over round results - /// This should never be the case and requires debugging if we see this get thrown + #[error( + "Not enough round results: results_count: {results_count}, views_count: {views_count}" + )] NotEnoughRoundResults { results_count: usize, views_count: usize, }, + + #[error("View timed out")] + ViewTimeout, } /// Data availability task state @@ -113,9 +103,9 @@ impl, V: Versions> self.ctx.failed_views.insert(view_number); if self.ctx.failed_views.len() > num_failed_views { let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; - self.error = Some(Box::new(OverallSafetyTaskErr::::TooManyFailures { - failed_views: self.ctx.failed_views.clone(), - })); + self.error = Some(Box::new(OverallSafetyTaskErr::::TooManyFailures( + self.ctx.failed_views.clone(), + ))); } else if !expected_views_to_fail.is_empty() { match expected_views_to_fail.entry(view_number) { Entry::Occupied(mut view_seen) => { @@ -183,7 +173,7 @@ impl, V: Versions> TestTas } } EventType::ReplicaViewTimeout { view_number } => { - let error = Arc::new(HotShotError::::ViewTimeoutError { + let error = Arc::new(HotShotError::::ViewTimedOut { view_number, state: RoundTimedoutState::TestCollectRoundEventsTimedOut, }); @@ -281,9 +271,9 @@ impl, V: Versions> TestTas } if self.ctx.failed_views.len() + num_incomplete_views > num_failed_rounds_total { - return TestResult::Fail(Box::new(OverallSafetyTaskErr::::TooManyFailures { - failed_views: self.ctx.failed_views.clone(), - })); + return TestResult::Fail(Box::new(OverallSafetyTaskErr::::TooManyFailures( + self.ctx.failed_views.clone(), + ))); } if !expected_views_to_fail diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index f134376209..9c0efc2a43 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -36,7 +36,6 @@ use hotshot_types::{ vote::HasViewNumber, ValidatorConfig, }; -use snafu::Snafu; use crate::{ test_runner::{LateNodeContext, LateNodeContextParameters, LateStartNode, Node, TestRunner}, @@ -46,10 +45,6 @@ use crate::{ /// convience type for state and block pub type StateAndBlock = (Vec, Vec); -/// error for the spinning task -#[derive(Snafu, Debug)] -pub struct SpinningTaskErr {} - /// Spinning task state pub struct SpinningTask< TYPES: NodeType, diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index 20e46c2baa..8f363847d0 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -14,7 +14,6 @@ use async_std::task::JoinHandle; use hotshot::traits::TestableNodeImplementation; use hotshot_types::traits::node_implementation::{NodeType, Versions}; use rand::thread_rng; -use snafu::Snafu; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; @@ -23,10 +22,6 @@ use crate::{test_runner::Node, test_task::TestEvent}; // the obvious idea here is to pass in a "stream" that completes every `n` seconds // the stream construction can definitely be fancier but that's the baseline idea -/// Data Availability task error -#[derive(Snafu, Debug)] -pub struct TxnTaskErr {} - /// state of task that decides when things are completed pub struct TxnTask, V: Versions> { // TODO should this be in a rwlock? Or maybe a similar abstraction to the registry is in order diff --git a/testing/src/view_sync_task.rs b/testing/src/view_sync_task.rs index b107fc68f9..733164d341 100644 --- a/testing/src/view_sync_task.rs +++ b/testing/src/view_sync_task.rs @@ -10,15 +10,15 @@ use anyhow::Result; use async_trait::async_trait; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplementation}; -use snafu::Snafu; +use thiserror::Error; use crate::test_task::{TestResult, TestTaskState}; /// `ViewSync` Task error -#[derive(Snafu, Debug, Clone)] -pub struct ViewSyncTaskErr { - /// set of node ids that hit view sync - hit_view_sync: HashSet, +#[derive(Error, Debug, Clone)] +pub enum ViewSyncTaskError { + #[error("{} nodes hit view sync", hit_view_sync.len())] + HitViewSync { hit_view_sync: HashSet }, } /// `ViewSync` task state @@ -70,7 +70,7 @@ impl> TestTaskState if min <= num_hits && num_hits <= max { TestResult::Pass } else { - TestResult::Fail(Box::new(ViewSyncTaskErr { + TestResult::Fail(Box::new(ViewSyncTaskError::HitViewSync { hit_view_sync: self.hit_view_sync.clone(), })) } diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index db878c078a..9be33d78a6 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -115,5 +115,5 @@ async fn test_random_block_builder() { &signature, ) .await; - assert!(matches!(result, Err(BuilderClientError::NotFound))); + assert!(matches!(result, Err(BuilderClientError::BlockNotFound))); } diff --git a/types/Cargo.toml b/types/Cargo.toml index 652c035e89..e34b82bf8b 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -33,7 +33,7 @@ lazy_static = { workspace = true } memoize = { workspace = true } rand = { workspace = true } sha2 = { workspace = true } -snafu = { workspace = true } +thiserror = { workspace = true } time = { workspace = true } tracing = { workspace = true } typenum = { workspace = true } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 3fc23951e8..26d7d8ca3f 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -660,16 +660,15 @@ impl Consensus { ) -> bool, { let mut next_leaf = if let Some(view) = self.validated_state_map.get(&start_from) { - view.leaf_commitment() - .ok_or_else(|| HotShotError::InvalidState { - context: format!( - "Visited failed view {start_from:?} leaf. Expected successful leaf" - ), - })? + view.leaf_commitment().ok_or_else(|| { + HotShotError::InvalidState(format!( + "Visited failed view {start_from:?} leaf. Expected successful leaf" + )) + })? } else { - return Err(HotShotError::InvalidState { - context: format!("View {start_from:?} leaf does not exist in state map "), - }); + return Err(HotShotError::InvalidState(format!( + "View {start_from:?} leaf does not exist in state map " + ))); }; while let Some(leaf) = self.saved_leaves.get(&next_leaf) { @@ -696,12 +695,12 @@ impl Consensus { } } } else { - return Err(HotShotError::InvalidState { - context: format!("View {view:?} state does not exist in state map "), - }); + return Err(HotShotError::InvalidState(format!( + "View {view:?} state does not exist in state map" + ))); } } - Err(HotShotError::LeafNotFound {}) + Err(HotShotError::MissingLeaf(next_leaf)) } /// Garbage collects based on state change right now, this removes from both the diff --git a/types/src/data.rs b/types/src/data.rs index a51b512ae4..d05760bf7a 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -28,7 +28,7 @@ use derivative::Derivative; use jf_vid::{precomputable::Precomputable, VidDisperse as JfVidDisperse, VidScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; -use snafu::Snafu; +use thiserror::Error; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; use tracing::error; @@ -88,6 +88,12 @@ impl ConsensusTime for ViewNumber { } } +impl Display for ViewNumber { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + impl Committable for ViewNumber { fn commit(&self) -> Commitment { let builder = RawCommitmentBuilder::new("View Number Commitment"); @@ -393,13 +399,14 @@ impl HasViewNumber for UpgradeProposal { } /// The error type for block and its transactions. -#[derive(Snafu, Debug, Serialize, Deserialize)] +#[derive(Error, Debug, Serialize, Deserialize)] pub enum BlockError { - /// Invalid block header. - InvalidBlockHeader, - /// Invalid transaction length. - InvalidTransactionLength, - /// Inconsistent payload commitment. + /// The block header is invalid + #[error("Invalid block header: {0}")] + InvalidBlockHeader(String), + + /// The payload commitment does not match the block header's payload commitment + #[error("Inconsistent payload commitment")] InconsistentPayloadCommitment, } diff --git a/types/src/error.rs b/types/src/error.rs index a73b0e1a88..f76c46906f 100644 --- a/types/src/error.rs +++ b/types/src/error.rs @@ -9,88 +9,44 @@ //! This module provides [`HotShotError`], which is an enum representing possible faults that can //! occur while interacting with this crate. -//use crate::traits::network::TimeoutErr; -use std::num::NonZeroU64; - -#[cfg(async_executor_impl = "async-std")] -use async_std::future::TimeoutError; +use committable::Commitment; use serde::{Deserialize, Serialize}; -use snafu::Snafu; -#[cfg(async_executor_impl = "tokio")] -use tokio::time::error::Elapsed as TimeoutError; +use thiserror::Error; -use crate::traits::{block_contents::BlockPayload, node_implementation::NodeType}; +use crate::{data::Leaf, traits::node_implementation::NodeType}; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} /// Error type for `HotShot` -#[derive(Debug, Snafu)] -#[snafu(visibility(pub))] +#[derive(Debug, Error)] #[non_exhaustive] pub enum HotShotError { - /// Failed to Message the leader in the given stage - #[snafu(display("Failed to message leader with error: {source}"))] - FailedToMessageLeader { - /// The underlying network fault - source: crate::traits::network::NetworkError, - }, - /// Failed to broadcast a message on the network - #[snafu(display("Failed to broadcast a message"))] - FailedToBroadcast { - /// The underlying network fault - source: crate::traits::network::NetworkError, - }, - /// Failure in the block. - #[snafu(display("Failed to build or verify a block: {source}"))] - BlockError { - /// The underlying block error. - source: >::Error, - }, - /// Failure in networking layer - #[snafu(display("Failure in networking layer: {source}"))] - NetworkFault { - /// Underlying network fault - source: crate::traits::network::NetworkError, - }, - /// Item was not present in storage - LeafNotFound {/* TODO we should create a way to to_string */}, - /// Error accessing storage - /// Invalid state machine state - #[snafu(display("Invalid state machine state: {}", context))] - InvalidState { - /// Context - context: String, - }, - /// HotShot timed out waiting for msgs - TimeoutError { - /// source of error - source: TimeoutError, - }, - /// HotShot timed out during round - ViewTimeoutError { - /// view number + /// The consensus state machine is in an invalid state + #[error("Invalid state: {0}")] + InvalidState(String), + + /// Leaf was not present in storage + #[error("Missing leaf with commitment: {0}")] + MissingLeaf(Commitment>), + + /// Failed to serialize data + #[error("Failed to serialize: {0}")] + FailedToSerialize(String), + + /// Failed to deserialize data + #[error("Failed to deserialize: {0}")] + FailedToDeserialize(String), + + /// The view timed out + #[error("View {view_number} timed out: {state:?}")] + ViewTimedOut { + /// The view number that timed out view_number: TYPES::Time, /// The state that the round was in when it timed out state: RoundTimedoutState, }, - /// Not enough valid signatures for a quorum - #[snafu(display("Insufficient number of valid signatures: the threshold is {}, but only {} signatures were valid", threshold, num_valid_signatures))] - InsufficientValidSignatures { - /// Number of valid signatures - num_valid_signatures: usize, - /// Threshold of signatures needed for a quorum - threshold: NonZeroU64, - }, - /// Miscellaneous error - Misc { - /// source of error - context: String, - }, - /// Failed to serialize message - FailedToSerialize, - /// Internal value used to drive the state machine - Continue, } + /// Contains information about what the state of the hotshot-consensus was when a round timed out #[derive(Debug, Clone, Serialize, Deserialize)] #[non_exhaustive] diff --git a/types/src/event.rs b/types/src/event.rs index 8f027b7370..84f052d651 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -89,7 +89,7 @@ pub mod error_adaptor { deserializer: D, ) -> Result>, D::Error> { let str = String::deserialize(deserializer)?; - Ok(Arc::new(HotShotError::Misc { context: str })) + Ok(Arc::new(HotShotError::FailedToDeserialize(str))) } } /// The type and contents of a status event emitted by a `HotShot` instance diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index c67644eecd..c742eb8c21 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -9,16 +9,13 @@ //! Contains types and traits used by `HotShot` to abstract over network access use async_compatibility_layer::art::async_sleep; -#[cfg(async_executor_impl = "async-std")] -use async_std::future::TimeoutError; use derivative::Derivative; use dyn_clone::DynClone; use futures::{ channel::mpsc::{self}, Future, }; -#[cfg(async_executor_impl = "tokio")] -use tokio::time::error::Elapsed as TimeoutError; +use thiserror::Error; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} @@ -39,7 +36,6 @@ use rand::{ prelude::Distribution, }; use serde::{Deserialize, Serialize}; -use snafu::Snafu; use super::{node_implementation::NodeType, signature_key::SignatureKey}; use crate::{ @@ -49,31 +45,9 @@ use crate::{ BoxSyncFuture, }; -/// for any errors we decide to add to memory network -#[derive(Debug, Snafu, Serialize, Deserialize)] -#[snafu(visibility(pub))] -pub enum MemoryNetworkError { - /// stub - Stub, -} - -/// Centralized server specific errors -#[derive(Debug, Snafu, Serialize, Deserialize)] -#[snafu(visibility(pub))] -pub enum CentralizedServerNetworkError { - /// The centralized server could not find a specific message. - NoMessagesInQueue, -} - /// Centralized server specific errors -#[derive(Debug, Snafu, Serialize, Deserialize)] -#[snafu(visibility(pub))] -pub enum PushCdnNetworkError { - /// Failed to receive a message from the server - FailedToReceive, - /// Failed to send a message to the server - FailedToSend, -} +#[derive(Debug, Error, Serialize, Deserialize)] +pub enum PushCdnNetworkError {} /// the type of transmission #[derive(Debug, Clone, Serialize, Deserialize)] @@ -88,74 +62,68 @@ pub enum TransmitType { DaCommitteeAndLeaderBroadcast(TYPES::SignatureKey), } -/// Error type for networking -#[derive(Debug, Snafu)] -#[snafu(visibility(pub))] +/// Errors that can occur in the network +#[derive(Debug, Error)] pub enum NetworkError { - /// Libp2p specific errors - Libp2p { - /// source of error - source: Box, - }, - /// collection of libp2p specific errors - Libp2pMulti { - /// sources of errors - sources: Vec>, - }, - /// memory network specific errors - MemoryNetwork { - /// source of error - source: MemoryNetworkError, - }, - /// Push CDN network-specific errors - PushCdnNetwork { - /// source of error - source: PushCdnNetworkError, - }, - /// Centralized server specific errors - CentralizedServer { - /// source of error - source: CentralizedServerNetworkError, - }, - /// unimplemented functionality - UnimplementedFeature, - /// Could not deliver a message to a specified recipient - CouldNotDeliver, - /// Attempted to deliver a message to an unknown node - NoSuchNode, - /// No bootstrap nodes were specified on network creation - NoBootstrapNodesSpecified, - /// Failed to serialize a network message - FailedToSerialize { - /// Originating bincode error - source: anyhow::Error, - }, - /// Failed to deserealize a network message - FailedToDeserialize { - /// originating bincode error - source: anyhow::Error, - }, - /// A timeout occurred - Timeout { - /// Source of error - source: TimeoutError, - }, - /// Error sending output to consumer of NetworkingImplementation - /// TODO this should have more information - ChannelSend, - /// The underlying connection has been shut down + /// Multiple errors. Allows us to roll up multiple errors into one. + #[error("Multiple errors: {0:?}")] + Multiple(Vec), + + /// A configuration error + #[error("Configuration error: {0}")] + ConfigError(String), + + /// An error occurred while sending a message + #[error("Failed to send message: {0}")] + MessageSendError(String), + + /// An error occurred while receiving a message + #[error("Failed to receive message: {0}")] + MessageReceiveError(String), + + /// The feature is unimplemented + #[error("Unimplemented")] + Unimplemented, + + /// An error occurred while attempting to listen + #[error("Listen error: {0}")] + ListenError(String), + + /// Failed to send over a channel + #[error("Channel send error: {0}")] + ChannelSendError(String), + + /// Failed to receive over a channel + #[error("Channel receive error: {0}")] + ChannelReceiveError(String), + + /// The network has been shut down and can no longer be used + #[error("Network has been shut down")] ShutDown, - /// unable to cancel a request, the request has already been cancelled - UnableToCancel, - /// The requested data was not found - NotFound, - /// Multiple errors - MultipleErrors { - /// vec of errors - errors: Vec>, - }, - /// The network is not ready yet - NotReady, + + /// Failed to serialize + #[error("Failed to serialize: {0}")] + FailedToSerialize(String), + + /// Failed to deserialize + #[error("Failed to deserialize: {0}")] + FailedToDeserialize(String), + + /// Timed out performing an operation + #[error("Timeout: {0}")] + Timeout(String), + + /// The network request had been cancelled before it could be fulfilled + #[error("The request was cancelled before it could be fulfilled")] + RequestCancelled, + + /// The network was not ready yet + #[error("The network was not ready yet")] + NotReadyYet, + + /// Failed to look up a node on the network + #[error("Node lookup failed: {0}")] + LookupError(String), } /// common traits we would like our network messages to implement @@ -284,7 +252,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st let errors: Vec<_> = results .into_iter() .filter_map(|r| match r { - Err(error) => Some(Box::new(error)), + Err(error) => Some(error), _ => None, }) .collect(); @@ -292,7 +260,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st if errors.is_empty() { Ok(()) } else { - Err(NetworkError::MultipleErrors { errors }) + Err(NetworkError::Multiple(errors)) } } @@ -313,7 +281,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st _request: Vec, _recipient: &K, ) -> Result, NetworkError> { - Err(NetworkError::UnimplementedFeature) + Err(NetworkError::Unimplemented) } /// Spawn a request task in the given network layer. If it supports diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 1bb9bcb3fc..060df7adf3 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -10,7 +10,7 @@ //! describing the overall behavior of a node, as a composition of implementations of the node trait. use std::{ - fmt::Debug, + fmt::{Debug, Display}, hash::Hash, ops::{self, Deref, Sub}, sync::Arc, @@ -210,7 +210,7 @@ pub trait NodeType: /// The time type that this hotshot setup is using. /// /// This should be the same `Time` that `ValidatedState::Time` is using. - type Time: ConsensusTime; + type Time: ConsensusTime + Display; /// The AuctionSolverResult is a type that holds the data associated with a particular solver /// run, for a particular view. type AuctionResult: Debug From aefb8b0d5fec92a62445c57eef8cde7fd83ceda1 Mon Sep 17 00:00:00 2001 From: lukeiannucci Date: Wed, 9 Oct 2024 09:45:53 -0600 Subject: [PATCH 1237/1393] [CATCHUP] Generate Network on the fly for restart nodes (#3723) * generate network for restarted nodes in spinning task * cleanup --- task-impls/src/network.rs | 4 +--- testing/src/spinning_task.rs | 29 +++++++++++++++++++++-------- testing/src/test_runner.rs | 10 +++++----- testing/tests/tests_2/catchup.rs | 4 ---- types/src/traits/network.rs | 3 ++- 5 files changed, 29 insertions(+), 21 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index dcd7e85945..91831ccb1b 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -360,9 +360,7 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ProposalRequested(req.clone(), signature), )), - TransmitType::DaCommitteeAndLeaderBroadcast( - self.quorum_membership.leader(req.view_number), - ), + TransmitType::Broadcast, )), HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => Some(( sender_key.clone(), diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 9c0efc2a43..eafc52fbb2 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -30,7 +30,7 @@ use hotshot_types::{ event::Event, simple_certificate::QuorumCertificate, traits::{ - network::ConnectedNetwork, + network::{AsyncGenerator, ConnectedNetwork}, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, }, vote::HasViewNumber, @@ -38,6 +38,7 @@ use hotshot_types::{ }; use crate::{ + test_launcher::Network, test_runner::{LateNodeContext, LateNodeContextParameters, LateStartNode, Node, TestRunner}, test_task::{TestResult, TestTaskState}, }; @@ -68,6 +69,8 @@ pub struct SpinningTask< pub(crate) async_delay_config: DelayConfig, /// Context stored for nodes to be restarted with pub(crate) restart_contexts: HashMap>, + /// Generate network channel for restart nodes + pub(crate) channel_generator: AsyncGenerator>, } #[async_trait] @@ -127,6 +130,13 @@ where let node_id = idx.try_into().unwrap(); if let Some(node) = self.late_start.remove(&node_id) { tracing::error!("Node {} spinning up late", idx); + let network = if let Some(network) = node.network { + network + } else { + let generated_network = (self.channel_generator)(node_id).await; + generated_network.wait_for_ready().await; + generated_network + }; let node_id = idx.try_into().unwrap(); let context = match node.context { LateNodeContext::InitializedContext(context) => context, @@ -162,9 +172,10 @@ where // For tests, make the node DA based on its index node_id < config.da_staked_committee_size as u64, ); + TestRunner::add_node_with_config( node_id, - node.network.clone(), + network.clone(), memberships, initializer, config, @@ -186,7 +197,7 @@ where // safety task. let node = Node { node_id, - network: node.network, + network, handle, }; node.handle.hotshot.start_consensus().await; @@ -205,13 +216,15 @@ where if let Some(node) = self.handles.write().await.get_mut(idx) { tracing::error!("Node {} shutting down", idx); node.handle.shut_down().await; + // For restarted nodes generate the network on correct view + let generated_network = (self.channel_generator)(node_id).await; let Some(LateStartNode { - network, + network: _, context: LateNodeContext::Restart, }) = self.late_start.get(&node_id) else { - panic!("Restated Nodes must have an unitialized context"); + panic!("Restarted Nodes must have an unitialized context"); }; let storage = node.handle.storage().clone(); @@ -250,7 +263,7 @@ where let context = TestRunner::::add_node_with_config_and_channels( node_id, - network.clone(), + generated_network.clone(), (*memberships).clone(), initializer, config, @@ -266,7 +279,7 @@ where .await; if delay_views == 0 { new_nodes.push((context, idx)); - new_networks.push(network.clone()); + new_networks.push(generated_network.clone()); } else { let up_view = view_number + delay_views; let change = ChangeNode { @@ -276,7 +289,7 @@ where self.changes.entry(up_view).or_default().push(change); let new_ctx = RestartContext { context, - network: network.clone(), + network: generated_network.clone(), }; self.restart_contexts.insert(idx, new_ctx); } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 8ef534860f..16a649060e 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -195,6 +195,7 @@ where .await, async_delay_config: self.launcher.metadata.async_delay_config, restart_contexts: HashMap::new(), + channel_generator: self.launcher.resource_generator.channel_generator, }; let spinning_task = TestTask::>::new( spinning_task_state, @@ -481,7 +482,7 @@ where self.late_start.insert( node_id, LateStartNode { - network, + network: None, context: LateNodeContext::UninitializedContext( LateNodeContextParameters { storage, @@ -520,7 +521,7 @@ where self.late_start.insert( node_id, LateStartNode { - network, + network: Some(network), context: LateNodeContext::InitializedContext(hotshot), }, ); @@ -546,8 +547,7 @@ where self.late_start.insert( *node_id, LateStartNode { - network: (self.launcher.resource_generator.channel_generator)(*node_id) - .await, + network: None, context: LateNodeContext::Restart, }, ); @@ -714,7 +714,7 @@ pub enum LateNodeContext, /// A yet-to-be-started node that participates in tests pub struct LateStartNode, V: Versions> { /// The underlying network belonging to the node - pub network: Network, + pub network: Option>, /// Either the context to which we will use to launch HotShot for initialized node when it's /// time, or the parameters that will be used to initialize the node and launch HotShot. pub context: LateNodeContext, diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 4e4cb38f99..ff0aeb08b6 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -9,10 +9,6 @@ use std::time::Duration; use hotshot_example_types::node_types::{ CombinedImpl, PushCdnImpl, TestTypes, TestTypesRandomizedLeader, TestVersions, }; -#[cfg(feature = "dependency-tasks")] -use hotshot_example_types::testable_delay::{ - DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay, -}; use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index c742eb8c21..2c27a417e6 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -323,7 +323,8 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st } /// A channel generator for types that need asynchronous execution -pub type AsyncGenerator = Pin Pin>>>>; +pub type AsyncGenerator = + Pin Pin + Send>> + Send + Sync>>; /// Describes additional functionality needed by the test network implementation pub trait TestableNetworkingImplementation From e0fe9d48f7a8140cdc8f671de4ed2f16a91c1b25 Mon Sep 17 00:00:00 2001 From: Toralf Wittner Date: Wed, 9 Oct 2024 17:14:15 -0600 Subject: [PATCH 1238/1393] Remove some `ConnectedNetwork` methods. (#3740) I.e. `ConnectedNetwork::{request_data, spawn_request_receiver_task}`. Closes #3713. --- .../src/traits/networking/combined_network.rs | 19 +-- .../src/traits/networking/libp2p_network.rs | 123 ++---------------- .../src/traits/networking/push_cdn_network.rs | 16 --- types/src/request_response.rs | 22 +--- types/src/traits/network.rs | 28 +--- 5 files changed, 12 insertions(+), 196 deletions(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 2a4bd88f8f..1e350ba5ef 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -25,7 +25,7 @@ use async_compatibility_layer::{ }; use async_lock::RwLock; use async_trait::async_trait; -use futures::{channel::mpsc, join, select, FutureExt}; +use futures::{join, select, FutureExt}; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{ AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, @@ -37,7 +37,6 @@ use hotshot_types::{ COMBINED_NETWORK_MIN_PRIMARY_FAILURES, COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, }, data::ViewNumber, - request_response::NetworkMsgResponseChannel, traits::{ network::{BroadcastDelay, ConnectedNetwork, Topic}, node_implementation::NodeType, @@ -335,22 +334,6 @@ impl TestableNetworkingImplementation for CombinedNetwor #[async_trait] impl ConnectedNetwork for CombinedNetworks { - async fn request_data( - &self, - request: Vec, - recipient: &TYPES::SignatureKey, - ) -> Result, NetworkError> { - self.secondary() - .request_data::(request, recipient) - .await - } - - async fn spawn_request_receiver_task( - &self, - ) -> Option, NetworkMsgResponseChannel>)>> { - self.secondary().spawn_request_receiver_task().await - } - fn pause(&self) { self.networks.0.pause(); } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 8dd97b6aeb..99d8f7c3fc 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -30,13 +30,12 @@ use async_compatibility_layer::{ TrySendError, UnboundedReceiver, UnboundedSender, }, }; -use async_lock::{Mutex, RwLock}; +use async_lock::RwLock; use async_trait::async_trait; use bimap::BiHashMap; use futures::{ - channel::mpsc::{self, channel, Sender}, future::{join_all, Either}, - FutureExt, StreamExt, + FutureExt, }; use hotshot_orchestrator::config::NetworkConfig; #[cfg(feature = "hotshot-testing")] @@ -47,12 +46,11 @@ use hotshot_types::{ boxed_sync, constants::LOOK_AHEAD, data::ViewNumber, - message::{DataMessage::DataResponse, Message, MessageKind}, - request_response::{NetworkMsgResponseChannel, Request, Response, TakeReceiver}, + request_response::Request, traits::{ election::Membership, metrics::{Counter, Gauge, Metrics, NoMetrics}, - network::{ConnectedNetwork, NetworkError, ResponseMessage, Topic}, + network::{ConnectedNetwork, NetworkError, Topic}, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, @@ -72,7 +70,7 @@ use libp2p_networking::{ NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeHandle, NetworkNodeReceiver, DEFAULT_REPLICATION_FACTOR, }, - reexport::{Multiaddr, ResponseChannel}, + reexport::Multiaddr, }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; @@ -153,10 +151,6 @@ struct Libp2pNetworkInner { handle: Arc>, /// Message Receiver receiver: UnboundedReceiver>, - /// Receiver for Requests for Data, includes the request and the response chan - /// Lock should only be used once to take the channel and move it into the request - /// handler task - requests_rx: TakeReceiver, /// Sender for broadcast messages sender: UnboundedSender>, /// Sender for node lookup (relevant view number, key of node) (None for shutdown) @@ -577,7 +571,6 @@ impl Libp2pNetwork { // unbounded channels may not be the best choice (spammed?) // if bounded figure out a way to log dropped msgs let (sender, receiver) = unbounded(); - let (requests_tx, requests_rx) = channel(100); let (node_lookup_send, node_lookup_recv) = bounded(10); let (kill_tx, kill_rx) = bounded(1); rx.set_kill_switch(kill_rx); @@ -586,7 +579,6 @@ impl Libp2pNetwork { inner: Arc::new(Libp2pNetworkInner { handle: Arc::new(network_handle), receiver, - requests_rx: Mutex::new(Some(requests_rx)), sender: sender.clone(), pk, bootstrap_addrs, @@ -611,7 +603,7 @@ impl Libp2pNetwork { // Set the network as not ready result.inner.metrics.is_ready.set(0); - result.handle_event_generator(sender, requests_tx, rx); + result.handle_event_generator(sender, rx); result.spawn_node_lookup(node_lookup_recv); result.spawn_connect(id, lookup_record_value); @@ -712,7 +704,6 @@ impl Libp2pNetwork { &self, msg: NetworkEvent, sender: &UnboundedSender>, - mut request_tx: Sender<(Vec, ResponseChannel)>, ) -> Result<(), NetworkError> { match msg { GossipMsg(msg) => { @@ -747,13 +738,8 @@ impl Libp2pNetwork { NetworkEvent::IsBootstrapped => { error!("handle_recvd_events received `NetworkEvent::IsBootstrapped`, which should be impossible."); } - NetworkEvent::ResponseRequested(Request(msg), chan) => { - let res = request_tx.try_send((msg, chan)); - res.map_err(|err| { - NetworkError::ChannelSendError(format!( - "failed to respond to a peer's data request: {err}" - )) - })?; + NetworkEvent::ResponseRequested(..) => { + error!("received unexpected `NetworkEvent::ResponseRequested`"); } NetworkEvent::ConnectedPeersUpdate(_) => {} } @@ -765,7 +751,6 @@ impl Libp2pNetwork { fn handle_event_generator( &self, sender: UnboundedSender>, - request_tx: Sender<(Vec, ResponseChannel)>, mut network_rx: NetworkNodeReceiver, ) { let handle = self.clone(); @@ -792,9 +777,7 @@ impl Libp2pNetwork { | DirectRequest(_, _, _) | DirectResponse(_, _) | NetworkEvent::ResponseRequested(Request(_), _) => { - let _ = handle - .handle_recvd_events(message, &sender, request_tx.clone()) - .await; + let _ = handle.handle_recvd_events(message, &sender).await; } NetworkEvent::ConnectedPeersUpdate(num_peers) => { handle.inner.metrics.num_connected_peers.set(*num_peers); @@ -821,94 +804,6 @@ impl Libp2pNetwork { #[async_trait] impl ConnectedNetwork for Libp2pNetwork { - async fn request_data( - &self, - request: Vec, - recipient: &K, - ) -> Result, NetworkError> { - // If we're not ready, return an error - if !self.is_ready() { - self.inner.metrics.num_failed_messages.add(1); - return Err(NetworkError::NotReadyYet); - }; - - let pid = match self - .inner - .handle - .lookup_node(&recipient.to_bytes(), self.inner.dht_timeout) - .await - { - Ok(pid) => pid, - Err(err) => { - self.inner.metrics.num_failed_messages.add(1); - return Err(NetworkError::LookupError(format!( - "failed to look up node: {err}" - ))); - } - }; - let result = match self.inner.handle.request_data(&request, pid).await { - Ok(response) => match response { - Some(msg) => { - if msg.0.len() < 8 { - return Err(NetworkError::FailedToDeserialize( - "message was too small".to_string(), - )); - } - let res: Message = bincode::deserialize(&msg.0).map_err(|err| { - NetworkError::FailedToDeserialize(format!( - "failed to serialize request data: {err}" - )) - })?; - - match res.kind { - MessageKind::Data(DataResponse(data)) => data, - _ => ResponseMessage::NotFound, - } - } - None => ResponseMessage::NotFound, - }, - Err(e) => { - self.inner.metrics.num_failed_messages.add(1); - return Err(e); - } - }; - - Ok(bincode::serialize(&result).map_err(|err| { - self.inner.metrics.num_failed_messages.add(1); - NetworkError::FailedToSerialize(format!("failed to serialize request response: {err}")) - })?) - } - - async fn spawn_request_receiver_task( - &self, - ) -> Option, NetworkMsgResponseChannel>)>> { - let mut internal_rx = self.inner.requests_rx.lock().await.take()?; - let handle = Arc::clone(&self.inner.handle); - let (mut tx, rx) = mpsc::channel(100); - async_spawn(async move { - while let Some((request, chan)) = internal_rx.next().await { - let (response_tx, response_rx) = futures::channel::oneshot::channel(); - if tx - .try_send(( - request, - NetworkMsgResponseChannel { - sender: response_tx, - }, - )) - .is_err() - { - continue; - } - let Ok(response) = response_rx.await else { - continue; - }; - - let _ = handle.respond_data(response, chan).await; - } - }); - - Some(rx) - } #[instrument(name = "Libp2pNetwork::ready_blocking", skip_all)] async fn wait_for_ready(&self) { self.wait_for_ready().await; diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 76d689b70e..94b3f8d30c 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -33,7 +33,6 @@ use cdn_client::{ }; #[cfg(feature = "hotshot-testing")] use cdn_marshal::{Config as MarshalConfig, Marshal}; -use futures::channel::mpsc; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{ AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, @@ -41,7 +40,6 @@ use hotshot_types::traits::network::{ use hotshot_types::{ boxed_sync, data::ViewNumber, - request_response::NetworkMsgResponseChannel, traits::{ metrics::{Counter, Metrics, NoMetrics}, network::{BroadcastDelay, ConnectedNetwork, Topic as HotShotTopic}, @@ -436,20 +434,6 @@ impl TestableNetworkingImplementation #[async_trait] impl ConnectedNetwork for PushCdnNetwork { - async fn request_data( - &self, - _request: Vec, - _recipient: &K, - ) -> Result, NetworkError> { - Ok(vec![]) - } - - async fn spawn_request_receiver_task( - &self, - ) -> Option, NetworkMsgResponseChannel>)>> { - None - } - /// Pause sending and receiving on the PushCDN network. fn pause(&self) { #[cfg(feature = "hotshot-testing")] diff --git a/types/src/request_response.rs b/types/src/request_response.rs index 8b6aab7785..156008fed6 100644 --- a/types/src/request_response.rs +++ b/types/src/request_response.rs @@ -7,15 +7,10 @@ //! Types for the request/response implementations. This module incorporates all //! of the shared types for all of the network backends. -use async_lock::Mutex; use committable::{Committable, RawCommitmentBuilder}; -use futures::channel::{mpsc::Receiver, oneshot}; -use libp2p::request_response::ResponseChannel; use serde::{Deserialize, Serialize}; -use crate::traits::{ - network::NetworkMsg, node_implementation::NodeType, signature_key::SignatureKey, -}; +use crate::traits::{node_implementation::NodeType, signature_key::SignatureKey}; /// Request for Consenus data #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -29,21 +24,6 @@ pub struct Response( pub Vec, ); -/// Wraps a oneshot channel for responding to requests. This is a -/// specialized version of the libp2p request-response `ResponseChannel` -/// which accepts any generic response. -pub struct NetworkMsgResponseChannel { - /// underlying sender for this channel - pub sender: oneshot::Sender, -} - -/// Type alias for the channel that we receive requests from the network on. -pub type RequestReceiver = Receiver<(Vec, NetworkMsgResponseChannel>)>; - -/// Locked Option of a receiver for moving the value out of the option. This -/// type takes any `Response` type depending on the underlying network impl. -pub type TakeReceiver = Mutex, ResponseChannel)>>>; - #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] /// A signed request for a proposal. pub struct ProposalRequestPayload { diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 2c27a417e6..40a55423cc 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -11,10 +11,7 @@ use async_compatibility_layer::art::async_sleep; use derivative::Derivative; use dyn_clone::DynClone; -use futures::{ - channel::mpsc::{self}, - Future, -}; +use futures::Future; use thiserror::Error; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] @@ -41,7 +38,6 @@ use super::{node_implementation::NodeType, signature_key::SignatureKey}; use crate::{ data::ViewNumber, message::{MessagePurpose, SequencingMessage}, - request_response::NetworkMsgResponseChannel, BoxSyncFuture, }; @@ -274,28 +270,6 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st /// If there is a network-related failure. async fn recv_message(&self) -> Result, NetworkError>; - /// Ask request the network for some data. Returns the request ID for that data, - /// The ID returned can be used for cancelling the request - async fn request_data( - &self, - _request: Vec, - _recipient: &K, - ) -> Result, NetworkError> { - Err(NetworkError::Unimplemented) - } - - /// Spawn a request task in the given network layer. If it supports - /// Request and responses it will return the receiving end of a channel. - /// Requests the network receives will be sent over this channel along - /// with a return channel to send the response back to. - /// - /// Returns `None`` if network does not support handling requests - async fn spawn_request_receiver_task( - &self, - ) -> Option, NetworkMsgResponseChannel>)>> { - None - } - /// queues lookup of a node /// /// # Errors From c14ce9ffd07d8008d5dd45726ce895ce2191c3da Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 10 Oct 2024 10:02:24 -0600 Subject: [PATCH 1239/1393] Remove Dependency on Orchestrator from HotShot Types Module (#3745) * temp * fix build * bump online default, move constats, remove dead code * fix linting for default method * panic on failure to read file --- examples/infra/mod.rs | 14 +- examples/push-cdn/whitelist-adapter.rs | 7 +- hotshot/Cargo.toml | 1 - .../src/traits/networking/libp2p_network.rs | 2 +- orchestrator/Cargo.toml | 1 - orchestrator/src/client.rs | 36 +- orchestrator/src/config.rs | 754 ------------------ orchestrator/src/lib.rs | 6 +- testing/src/block_builder/random.rs | 2 +- testing/tests/tests_1/block_builder.rs | 14 +- testing/tests/tests_1/gen_key_pair.rs | 3 +- types/Cargo.toml | 7 +- types/src/constants.rs | 9 + types/src/hotshot_config_file.rs | 153 ++++ types/src/lib.rs | 10 + types/src/network.rs | 396 +++++++++ types/src/upgrade_config.rs | 44 + types/src/validator_config.rs | 53 ++ 18 files changed, 729 insertions(+), 783 deletions(-) delete mode 100644 orchestrator/src/config.rs create mode 100644 types/src/hotshot_config_file.rs create mode 100644 types/src/network.rs create mode 100644 types/src/upgrade_config.rs create mode 100644 types/src/validator_config.rs diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 056eb92782..4875dfd149 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -45,8 +45,7 @@ use hotshot_example_types::{ }; use hotshot_orchestrator::{ self, - client::{BenchResults, OrchestratorClient, ValidatorArgs}, - config::{BuilderType, NetworkConfig, NetworkConfigFile, NetworkConfigSource}, + client::{get_complete_config, BenchResults, OrchestratorClient, ValidatorArgs}, }; use hotshot_testing::block_builder::{ BuilderTask, RandomBuilderImplementation, SimpleBuilderImplementation, @@ -56,6 +55,7 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, TestableLeaf}, event::{Event, EventType}, + network::{BuilderType, NetworkConfig, NetworkConfigFile, NetworkConfigSource}, traits::{ block_contents::{BlockHeader, TestableBlock}, election::Membership, @@ -903,11 +903,12 @@ pub async fn main_entry_point< // We assume one node will not call this twice to generate two validator_config-s with same identity. let my_own_validator_config = NetworkConfig::::generate_init_validator_config( - &orchestrator_client, + orchestrator_client + .get_node_index_for_init_validator_config() + .await, // we assign nodes to the DA committee by default true, - ) - .await; + ); // Derives our Libp2p private key from our private key, and then returns the public key of that key let libp2p_public_key = @@ -930,7 +931,8 @@ pub async fn main_entry_point< // It returns the complete config which also includes peer's public key and public config. // This function will be taken solely by sequencer right after OrchestratorClient::new, // which means the previous `generate_validator_config_when_init` will not be taken by sequencer, it's only for key pair generation for testing in hotshot. - let (mut run_config, source) = NetworkConfig::::get_complete_config( + + let (mut run_config, source) = get_complete_config( &orchestrator_client, my_own_validator_config, advertise_multiaddress, diff --git a/examples/push-cdn/whitelist-adapter.rs b/examples/push-cdn/whitelist-adapter.rs index d68f618fb5..f787f271e4 100644 --- a/examples/push-cdn/whitelist-adapter.rs +++ b/examples/push-cdn/whitelist-adapter.rs @@ -14,8 +14,11 @@ use anyhow::{Context, Result}; use cdn_broker::reexports::discovery::{DiscoveryClient, Embedded, Redis}; use clap::Parser; use hotshot_example_types::node_types::TestTypes; -use hotshot_orchestrator::{client::OrchestratorClient, config::NetworkConfig}; -use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; +use hotshot_orchestrator::client::OrchestratorClient; +use hotshot_types::{ + network::NetworkConfig, + traits::{node_implementation::NodeType, signature_key::SignatureKey}, +}; use surf_disco::Url; #[derive(Parser, Debug)] diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 1bbf122a8a..3b00f7e0a9 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -54,7 +54,6 @@ time = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } jf-signature.workspace = true -hotshot-orchestrator = { path = "../orchestrator" } blake3.workspace = true sha2 = { workspace = true } url = { workspace = true } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 99d8f7c3fc..382a695e1e 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -37,7 +37,6 @@ use futures::{ future::{join_all, Either}, FutureExt, }; -use hotshot_orchestrator::config::NetworkConfig; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{ AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, @@ -46,6 +45,7 @@ use hotshot_types::{ boxed_sync, constants::LOOK_AHEAD, data::ViewNumber, + network::NetworkConfig, request_response::Request, traits::{ election::Membership, diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 9526203411..59bce1ae84 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -18,7 +18,6 @@ serde = { workspace = true } serde_json = { workspace = true } toml = { workspace = true } thiserror = "1" -serde-inline-default = "0.1" csv = "1" vbs = { workspace = true } vec1 = { workspace = true } diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index da7a050ae8..afe75d9399 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -6,17 +6,22 @@ use std::{net::SocketAddr, time::Duration}; +use crate::OrchestratorVersion; use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; -use hotshot_types::{traits::signature_key::SignatureKey, PeerConfig, ValidatorConfig}; +use hotshot_types::{ + network::{NetworkConfig, NetworkConfigSource}, + traits::signature_key::SignatureKey, + PeerConfig, ValidatorConfig, +}; use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; +use tracing::info; use tracing::instrument; use vbs::BinarySerializer; -use crate::{config::NetworkConfig, OrchestratorVersion}; /// Holds the client connection to the orchestrator pub struct OrchestratorClient { /// the client @@ -156,6 +161,33 @@ pub struct MultiValidatorArgs { pub network_config_file: Option, } +/// Asynchronously retrieves a `NetworkConfig` from an orchestrator. +/// The retrieved one includes correct `node_index` and peer's public config. +/// +/// # Errors +/// If we are unable to get the configuration from the orchestrator +pub async fn get_complete_config( + client: &OrchestratorClient, + my_own_validator_config: ValidatorConfig, + libp2p_advertise_address: Option, + libp2p_public_key: Option, +) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { + // get the configuration from the orchestrator + let run_config: NetworkConfig = client + .post_and_wait_all_public_keys::( + my_own_validator_config, + libp2p_advertise_address, + libp2p_public_key, + ) + .await; + + info!( + "Retrieved config; our node index is {}. DA committee member: {}", + run_config.node_index, run_config.config.my_own_validator_config.is_da + ); + Ok((run_config, NetworkConfigSource::Orchestrator)) +} + impl ValidatorArgs { /// Constructs `ValidatorArgs` from `MultiValidatorArgs` and a node index. /// diff --git a/orchestrator/src/config.rs b/orchestrator/src/config.rs deleted file mode 100644 index 81bd03cf9d..0000000000 --- a/orchestrator/src/config.rs +++ /dev/null @@ -1,754 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -use std::{ - env, fs, - num::NonZeroUsize, - ops::Range, - path::{Path, PathBuf}, - time::Duration, - vec, -}; - -use clap::ValueEnum; -use hotshot_types::{ - constants::REQUEST_DATA_DELAY, light_client::StateVerKey, traits::signature_key::SignatureKey, - ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, -}; -use libp2p::{Multiaddr, PeerId}; -use serde_inline_default::serde_inline_default; -use surf_disco::Url; -use thiserror::Error; -use toml; -use tracing::{error, info}; -use vec1::Vec1; - -use crate::client::OrchestratorClient; - -/// Configuration describing a libp2p node -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -pub struct Libp2pConfig { - /// The bootstrap nodes to connect to (multiaddress, serialized public key) - pub bootstrap_nodes: Vec<(PeerId, Multiaddr)>, -} - -/// configuration for a web server -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -pub struct WebServerConfig { - /// the url to run on - pub url: Url, - /// the time to wait between polls - pub wait_between_polls: Duration, -} - -/// configuration for combined network -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -pub struct CombinedNetworkConfig { - /// delay duration before sending a message through the secondary network - pub delay_duration: Duration, -} - -/// a network configuration error -#[derive(Error, Debug)] -pub enum NetworkConfigError { - /// Failed to read NetworkConfig from file - #[error("Failed to read NetworkConfig from file")] - ReadFromFileError(std::io::Error), - /// Failed to deserialize loaded NetworkConfig - #[error("Failed to deserialize loaded NetworkConfig")] - DeserializeError(serde_json::Error), - /// Failed to write NetworkConfig to file - #[error("Failed to write NetworkConfig to file")] - WriteToFileError(std::io::Error), - /// Failed to serialize NetworkConfig - #[error("Failed to serialize NetworkConfig")] - SerializeError(serde_json::Error), - /// Failed to recursively create path to NetworkConfig - #[error("Failed to recursively create path to NetworkConfig")] - FailedToCreatePath(std::io::Error), -} - -#[derive(Clone, Copy, Debug, serde::Serialize, serde::Deserialize, Default, ValueEnum)] -/// configuration for builder type to use -pub enum BuilderType { - /// Use external builder, [config.builder_url] must be - /// set to correct builder address - External, - #[default] - /// Simple integrated builder will be started and used by each hotshot node - Simple, - /// Random integrated builder will be started and used by each hotshot node - Random, -} - -/// Node PeerConfig keys -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -#[serde(bound(deserialize = ""))] -pub struct PeerConfigKeys { - /// The peer's public key - pub stake_table_key: KEY, - /// the peer's state public key - pub state_ver_key: StateVerKey, - /// the peer's stake - pub stake: u64, - /// whether the node is a DA node - pub da: bool, -} - -/// Options controlling how the random builder generates blocks -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -pub struct RandomBuilderConfig { - /// How many transactions to include in a block - pub txn_in_block: u64, - /// How many blocks to generate per second - pub blocks_per_second: u32, - /// Range of how big a transaction can be (in bytes) - pub txn_size: Range, -} - -impl Default for RandomBuilderConfig { - fn default() -> Self { - Self { - txn_in_block: 100, - blocks_per_second: 1, - txn_size: 20..100, - } - } -} - -/// a network configuration -#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] -#[serde(bound(deserialize = ""))] -pub struct NetworkConfig { - /// number of views to run - pub rounds: usize, - /// whether DA membership is determined by index. - /// if true, the first k nodes to register form the DA committee - /// if false, DA membership is requested by the nodes - pub indexed_da: bool, - /// number of transactions per view - pub transactions_per_round: usize, - /// password to have the orchestrator start the network, - /// regardless of the number of nodes connected. - pub manual_start_password: Option, - /// number of bootstrap nodes - pub num_bootrap: usize, - /// timeout before starting the next view - pub next_view_timeout: u64, - /// timeout before starting next view sync round - pub view_sync_timeout: Duration, - /// The maximum amount of time a leader can wait to get a block from a builder - pub builder_timeout: Duration, - /// time to wait until we request data associated with a proposal - pub data_request_delay: Duration, - /// global index of node (for testing purposes a uid) - pub node_index: u64, - /// unique seed (for randomness? TODO) - pub seed: [u8; 32], - /// size of transactions - pub transaction_size: usize, - /// delay before beginning consensus - pub start_delay_seconds: u64, - /// name of the key type (for debugging) - pub key_type_name: String, - /// the libp2p config - pub libp2p_config: Option, - /// the hotshot config - pub config: HotShotConfig, - /// The address for the Push CDN's "marshal", A.K.A. load balancer - pub cdn_marshal_address: Option, - /// combined network config - pub combined_network_config: Option, - /// the commit this run is based on - pub commit_sha: String, - /// builder to use - pub builder: BuilderType, - /// random builder config - pub random_builder: Option, - /// The list of public keys that are allowed to connect to the orchestrator - pub public_keys: Vec>, -} - -/// the source of the network config -pub enum NetworkConfigSource { - /// we source the network configuration from the orchestrator - Orchestrator, - /// we source the network configuration from a config file on disk - File, -} - -impl NetworkConfig { - /// Asynchronously retrieves a `NetworkConfig` either from a file or from an orchestrator. - /// - /// This function takes an `OrchestratorClient`, an optional file path, and Libp2p-specific parameters. - /// - /// If a file path is provided, the function will first attempt to load the `NetworkConfig` from the file. - /// If the file does not exist or cannot be read, the function will fall back to retrieving the `NetworkConfig` from the orchestrator. - /// In this case, if the path to the file does not exist, it will be created. - /// The retrieved `NetworkConfig` is then saved back to the file for future use. - /// - /// If no file path is provided, the function will directly retrieve the `NetworkConfig` from the orchestrator. - /// - /// # Errors - /// If we were unable to load the configuration. - /// - /// # Arguments - /// - /// * `client` - An `OrchestratorClient` used to retrieve the `NetworkConfig` from the orchestrator. - /// * `identity` - A string representing the identity for which to retrieve the `NetworkConfig`. - /// * `file` - An optional string representing the path to the file from which to load the `NetworkConfig`. - /// * `libp2p_address` - An optional address specifying where other Libp2p nodes can reach us - /// * `libp2p_public_key` - The public key in which other Libp2p nodes can reach us with - /// - /// # Returns - /// - /// This function returns a tuple containing a `NetworkConfig` and a `NetworkConfigSource`. The `NetworkConfigSource` indicates whether the `NetworkConfig` was loaded from a file or retrieved from the orchestrator. - pub async fn from_file_or_orchestrator( - client: &OrchestratorClient, - file: Option, - libp2p_advertise_address: Option, - libp2p_public_key: Option, - ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { - if let Some(file) = file { - info!("Retrieving config from the file"); - // if we pass in file, try there first - match Self::from_file(file.clone()) { - Ok(config) => Ok((config, NetworkConfigSource::File)), - Err(e) => { - // fallback to orchestrator - error!("{e}, falling back to orchestrator"); - - let config = client - .get_config_without_peer(libp2p_advertise_address, libp2p_public_key) - .await?; - - // save to file if we fell back - if let Err(e) = config.to_file(file) { - error!("{e}"); - }; - - Ok((config, NetworkConfigSource::File)) - } - } - } else { - info!("Retrieving config from the orchestrator"); - - // otherwise just get from orchestrator - Ok(( - client - .get_config_without_peer(libp2p_advertise_address, libp2p_public_key) - .await?, - NetworkConfigSource::Orchestrator, - )) - } - } - - /// Get a temporary node index for generating a validator config - pub async fn generate_init_validator_config( - client: &OrchestratorClient, - is_da: bool, - ) -> ValidatorConfig { - // This cur_node_index is only used for key pair generation, it's not bound with the node, - // lather the node with the generated key pair will get a new node_index from orchestrator. - let cur_node_index = client.get_node_index_for_init_validator_config().await; - ValidatorConfig::generated_from_seed_indexed([0u8; 32], cur_node_index.into(), 1, is_da) - } - - /// Asynchronously retrieves a `NetworkConfig` from an orchestrator. - /// The retrieved one includes correct `node_index` and peer's public config. - /// - /// # Errors - /// If we are unable to get the configuration from the orchestrator - pub async fn get_complete_config( - client: &OrchestratorClient, - my_own_validator_config: ValidatorConfig, - libp2p_advertise_address: Option, - libp2p_public_key: Option, - ) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { - // get the configuration from the orchestrator - let run_config: NetworkConfig = client - .post_and_wait_all_public_keys::( - my_own_validator_config, - libp2p_advertise_address, - libp2p_public_key, - ) - .await; - - info!( - "Retrieved config; our node index is {}. DA committee member: {}", - run_config.node_index, run_config.config.my_own_validator_config.is_da - ); - Ok((run_config, NetworkConfigSource::Orchestrator)) - } - - /// Loads a `NetworkConfig` from a file. - /// - /// This function takes a file path as a string, reads the file, and then deserializes the contents into a `NetworkConfig`. - /// - /// # Arguments - /// - /// * `file` - A string representing the path to the file from which to load the `NetworkConfig`. - /// - /// # Returns - /// - /// This function returns a `Result` that contains a `NetworkConfig` if the file was successfully read and deserialized, or a `NetworkConfigError` if an error occurred. - /// - /// # Errors - /// - /// This function will return an error if the file cannot be read or if the contents cannot be deserialized into a `NetworkConfig`. - /// - /// # Examples - /// - /// ```ignore - /// # use hotshot_orchestrator::config::NetworkConfig; - /// # use hotshot_types::signature_key::BLSPubKey; - /// // # use hotshot::traits::election::static_committee::StaticElectionConfig; - /// let file = "/path/to/my/config".to_string(); - /// // NOTE: broken due to staticelectionconfig not being importable - /// // cannot import staticelectionconfig from hotshot without creating circular dependency - /// // making this work probably involves the `types` crate implementing a dummy - /// // electionconfigtype just ot make this example work - /// let config = NetworkConfig::::from_file(file).unwrap(); - /// ``` - pub fn from_file(file: String) -> Result { - // read from file - let data = match fs::read(file) { - Ok(data) => data, - Err(e) => { - return Err(NetworkConfigError::ReadFromFileError(e)); - } - }; - - // deserialize - match serde_json::from_slice(&data) { - Ok(data) => Ok(data), - Err(e) => Err(NetworkConfigError::DeserializeError(e)), - } - } - - /// Serializes the `NetworkConfig` and writes it to a file. - /// - /// This function takes a file path as a string, serializes the `NetworkConfig` into JSON format using `serde_json` and then writes the serialized data to the file. - /// - /// # Arguments - /// - /// * `file` - A string representing the path to the file where the `NetworkConfig` should be saved. - /// - /// # Returns - /// - /// This function returns a `Result` that contains `()` if the `NetworkConfig` was successfully serialized and written to the file, or a `NetworkConfigError` if an error occurred. - /// - /// # Errors - /// - /// This function will return an error if the `NetworkConfig` cannot be serialized or if the file cannot be written. - /// - /// # Examples - /// - /// ```ignore - /// # use hotshot_orchestrator::config::NetworkConfig; - /// let file = "/path/to/my/config".to_string(); - /// let config = NetworkConfig::from_file(file); - /// config.to_file(file).unwrap(); - /// ``` - pub fn to_file(&self, file: String) -> Result<(), NetworkConfigError> { - // ensure the directory containing the config file exists - if let Some(dir) = Path::new(&file).parent() { - if let Err(e) = fs::create_dir_all(dir) { - return Err(NetworkConfigError::FailedToCreatePath(e)); - } - } - - // serialize - let serialized = match serde_json::to_string_pretty(self) { - Ok(data) => data, - Err(e) => { - return Err(NetworkConfigError::SerializeError(e)); - } - }; - - // write to file - match fs::write(file, serialized) { - Ok(()) => Ok(()), - Err(e) => Err(NetworkConfigError::WriteToFileError(e)), - } - } -} - -impl Default for NetworkConfig { - fn default() -> Self { - Self { - rounds: ORCHESTRATOR_DEFAULT_NUM_ROUNDS, - indexed_da: true, - transactions_per_round: ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND, - node_index: 0, - seed: [0u8; 32], - transaction_size: ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE, - manual_start_password: None, - libp2p_config: None, - config: HotShotConfigFile::default().into(), - start_delay_seconds: 60, - key_type_name: std::any::type_name::().to_string(), - cdn_marshal_address: None, - combined_network_config: None, - next_view_timeout: 10, - view_sync_timeout: Duration::from_secs(2), - num_bootrap: 5, - builder_timeout: Duration::from_secs(10), - data_request_delay: Duration::from_millis(2500), - commit_sha: String::new(), - builder: BuilderType::default(), - random_builder: None, - public_keys: vec![], - } - } -} - -/// a network config stored in a file -#[serde_inline_default] -#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] -#[serde(bound(deserialize = ""))] -pub struct PublicKeysFile { - /// The list of public keys that are allowed to connect to the orchestrator - /// - /// If nonempty, this list becomes the stake table and is used to determine DA membership (ignoring the node's request). - #[serde(default)] - pub public_keys: Vec>, -} - -/// a network config stored in a file -#[serde_inline_default] -#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] -#[serde(bound(deserialize = ""))] -pub struct NetworkConfigFile { - /// number of views to run - #[serde_inline_default(ORCHESTRATOR_DEFAULT_NUM_ROUNDS)] - pub rounds: usize, - /// number of views to run - #[serde(default)] - pub indexed_da: bool, - /// number of transactions per view - #[serde_inline_default(ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND)] - pub transactions_per_round: usize, - /// password to have the orchestrator start the network, - /// regardless of the number of nodes connected. - #[serde(default)] - pub manual_start_password: Option, - /// global index of node (for testing purposes a uid) - #[serde(default)] - pub node_index: u64, - /// unique seed (for randomness? TODO) - #[serde(default)] - pub seed: [u8; 32], - /// size of transactions - #[serde_inline_default(ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE)] - pub transaction_size: usize, - /// delay before beginning consensus - #[serde_inline_default(ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS)] - pub start_delay_seconds: u64, - /// the hotshot config file - #[serde(default)] - pub config: HotShotConfigFile, - /// The address of the Push CDN's "marshal", A.K.A. load balancer - #[serde(default)] - pub cdn_marshal_address: Option, - /// combined network config - #[serde(default)] - pub combined_network_config: Option, - /// builder to use - #[serde(default)] - pub builder: BuilderType, - /// random builder configuration - #[serde(default)] - pub random_builder: Option, - /// The list of public keys that are allowed to connect to the orchestrator - /// - /// If nonempty, this list becomes the stake table and is used to determine DA membership (ignoring the node's request). - #[serde(default)] - pub public_keys: Vec>, -} - -impl From> for NetworkConfig { - fn from(val: NetworkConfigFile) -> Self { - NetworkConfig { - rounds: val.rounds, - indexed_da: val.indexed_da, - transactions_per_round: val.transactions_per_round, - node_index: 0, - num_bootrap: val.config.num_bootstrap, - manual_start_password: val.manual_start_password, - next_view_timeout: val.config.next_view_timeout, - view_sync_timeout: val.config.view_sync_timeout, - builder_timeout: val.config.builder_timeout, - data_request_delay: val - .config - .data_request_delay - .unwrap_or(Duration::from_millis(REQUEST_DATA_DELAY)), - seed: val.seed, - transaction_size: val.transaction_size, - libp2p_config: Some(Libp2pConfig { - bootstrap_nodes: Vec::new(), - }), - config: val.config.into(), - key_type_name: std::any::type_name::().to_string(), - start_delay_seconds: val.start_delay_seconds, - cdn_marshal_address: val.cdn_marshal_address, - combined_network_config: val.combined_network_config, - commit_sha: String::new(), - builder: val.builder, - random_builder: val.random_builder, - public_keys: val.public_keys, - } - } -} - -/// Default builder URL, used as placeholder -fn default_builder_urls() -> Vec1 { - vec1::vec1![Url::parse("http://0.0.0.0:3311").unwrap()] -} - -/// Holds configuration for a `HotShot` -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -#[serde(bound(deserialize = ""))] -pub struct HotShotConfigFile { - /// The proportion of nodes required before the orchestrator issues the ready signal, - /// expressed as (numerator, denominator) - pub start_threshold: (u64, u64), - /// Total number of staked nodes in the network - pub num_nodes_with_stake: NonZeroUsize, - #[serde(skip)] - /// My own public key, secret key, stake value - pub my_own_validator_config: ValidatorConfig, - #[serde(skip)] - /// The known nodes' public key and stake value - pub known_nodes_with_stake: Vec>, - #[serde(skip)] - /// The known DA nodes' public key and stake values - pub known_da_nodes: Vec>, - #[serde(skip)] - /// The known non-staking nodes' - pub known_nodes_without_stake: Vec, - /// Number of staking DA nodes - pub staked_da_nodes: usize, - /// Number of fixed leaders for GPU VID - pub fixed_leader_for_gpuvid: usize, - /// Base duration for next-view timeout, in milliseconds - pub next_view_timeout: u64, - /// Duration for view sync round timeout - pub view_sync_timeout: Duration, - /// The exponential backoff ration for the next-view timeout - pub timeout_ratio: (u64, u64), - /// The delay a leader inserts before starting pre-commit, in milliseconds - pub round_start_delay: u64, - /// Delay after init before starting consensus, in milliseconds - pub start_delay: u64, - /// Number of network bootstrap nodes - pub num_bootstrap: usize, - /// The maximum amount of time a leader can wait to get a block from a builder - pub builder_timeout: Duration, - /// Time to wait until we request data associated with a proposal - pub data_request_delay: Option, - /// Builder API base URL - #[serde(default = "default_builder_urls")] - pub builder_urls: Vec1, - /// Upgrade config - pub upgrade: UpgradeConfig, -} - -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -#[serde(bound(deserialize = ""))] -/// Holds configuration for the upgrade task. -pub struct UpgradeConfig { - /// View to start proposing an upgrade - pub start_proposing_view: u64, - /// View to stop proposing an upgrade. To prevent proposing an upgrade, set stop_proposing_view <= start_proposing_view. - pub stop_proposing_view: u64, - /// View to start voting on an upgrade - pub start_voting_view: u64, - /// View to stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_view <= start_voting_view. - pub stop_voting_view: u64, - /// Unix time in seconds at which we start proposing an upgrade - pub start_proposing_time: u64, - /// Unix time in seconds at which we stop proposing an upgrade. To prevent proposing an upgrade, set stop_proposing_time <= start_proposing_time. - pub stop_proposing_time: u64, - /// Unix time in seconds at which we start voting on an upgrade - pub start_voting_time: u64, - /// Unix time in seconds at which we stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_time <= start_voting_time. - pub stop_voting_time: u64, -} - -// Explicitly implementing `Default` for clarity. -#[allow(clippy::derivable_impls)] -impl Default for UpgradeConfig { - fn default() -> Self { - UpgradeConfig { - start_proposing_view: u64::MAX, - stop_proposing_view: 0, - start_voting_view: u64::MAX, - stop_voting_view: 0, - start_proposing_time: u64::MAX, - stop_proposing_time: 0, - start_voting_time: u64::MAX, - stop_voting_time: 0, - } - } -} - -/// Holds configuration for a validator node -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, Default)] -#[serde(bound(deserialize = ""))] -pub struct ValidatorConfigFile { - /// The validator's seed - pub seed: [u8; 32], - /// The validator's index, which can be treated as another input to the seed - pub node_id: u64, - // The validator's stake, commented for now - // pub stake_value: u64, - /// Whether or not we are DA - pub is_da: bool, -} - -impl ValidatorConfigFile { - /// read the validator config from a file - /// # Panics - /// Panics if unable to get the current working directory - pub fn from_file(dir_str: &str) -> Self { - let current_working_dir = match env::current_dir() { - Ok(dir) => dir, - Err(e) => { - error!("get_current_working_dir error: {:?}", e); - PathBuf::from("") - } - }; - let filename = - current_working_dir.into_os_string().into_string().unwrap() + "/../../" + dir_str; - match fs::read_to_string(filename.clone()) { - // If successful return the files text as `contents`. - Ok(contents) => { - let data: ValidatorConfigFile = match toml::from_str(&contents) { - // If successful, return data as `Data` struct. - // `d` is a local variable. - Ok(d) => d, - // Handle the `error` case. - Err(e) => { - // Write `msg` to `stderr`. - error!("Unable to load data from `{}`: {}", filename, e); - ValidatorConfigFile::default() - } - }; - data - } - // Handle the `error` case. - Err(e) => { - // Write `msg` to `stderr`. - error!("Could not read file `{}`: {}", filename, e); - ValidatorConfigFile::default() - } - } - } -} - -impl From> for HotShotConfig { - fn from(val: HotShotConfigFile) -> Self { - HotShotConfig { - execution_type: ExecutionType::Continuous, - start_threshold: val.start_threshold, - num_nodes_with_stake: val.num_nodes_with_stake, - known_da_nodes: val.known_da_nodes, - known_nodes_with_stake: val.known_nodes_with_stake, - known_nodes_without_stake: val.known_nodes_without_stake, - my_own_validator_config: val.my_own_validator_config, - da_staked_committee_size: val.staked_da_nodes, - fixed_leader_for_gpuvid: val.fixed_leader_for_gpuvid, - next_view_timeout: val.next_view_timeout, - view_sync_timeout: val.view_sync_timeout, - timeout_ratio: val.timeout_ratio, - round_start_delay: val.round_start_delay, - start_delay: val.start_delay, - num_bootstrap: val.num_bootstrap, - builder_timeout: val.builder_timeout, - data_request_delay: val - .data_request_delay - .unwrap_or(Duration::from_millis(REQUEST_DATA_DELAY)), - builder_urls: val.builder_urls, - start_proposing_view: val.upgrade.start_proposing_view, - stop_proposing_view: val.upgrade.stop_proposing_view, - start_voting_view: val.upgrade.start_voting_view, - stop_voting_view: val.upgrade.stop_voting_view, - start_proposing_time: val.upgrade.start_proposing_time, - stop_proposing_time: val.upgrade.stop_proposing_time, - start_voting_time: val.upgrade.start_voting_time, - stop_voting_time: val.upgrade.stop_voting_time, - } - } -} -/// default number of rounds to run -pub const ORCHESTRATOR_DEFAULT_NUM_ROUNDS: usize = 100; -/// default number of transactions per round -pub const ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND: usize = 10; -/// default size of transactions -pub const ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE: usize = 100; -/// default delay before beginning consensus -pub const ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS: u64 = 60; - -impl From for ValidatorConfig { - fn from(val: ValidatorConfigFile) -> Self { - // here stake_value is set to 1, since we don't input stake_value from ValidatorConfigFile for now - ValidatorConfig::generated_from_seed_indexed(val.seed, val.node_id, 1, val.is_da) - } -} -impl From for HotShotConfig { - fn from(value: ValidatorConfigFile) -> Self { - let mut config: HotShotConfig = HotShotConfigFile::default().into(); - config.my_own_validator_config = value.into(); - config - } -} - -impl Default for HotShotConfigFile { - fn default() -> Self { - // The default number of nodes is 5 - let staked_da_nodes: usize = 5; - - // Aggregate the DA nodes - let mut known_da_nodes = Vec::new(); - - let gen_known_nodes_with_stake = (0..10) - .map(|node_id| { - let mut cur_validator_config: ValidatorConfig = - ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, false); - - // Add to DA nodes based on index - if node_id < staked_da_nodes as u64 { - known_da_nodes.push(cur_validator_config.public_config()); - cur_validator_config.is_da = true; - } - - cur_validator_config.public_config() - }) - .collect(); - - Self { - num_nodes_with_stake: NonZeroUsize::new(10).unwrap(), - start_threshold: (1, 1), - my_own_validator_config: ValidatorConfig::default(), - known_nodes_with_stake: gen_known_nodes_with_stake, - known_nodes_without_stake: vec![], - staked_da_nodes, - known_da_nodes, - fixed_leader_for_gpuvid: 1, - next_view_timeout: 10000, - view_sync_timeout: Duration::from_millis(1000), - timeout_ratio: (11, 10), - round_start_delay: 1, - start_delay: 1, - num_bootstrap: 5, - builder_timeout: Duration::from_secs(10), - data_request_delay: Some(Duration::from_millis(REQUEST_DATA_DELAY)), - builder_urls: default_builder_urls(), - upgrade: UpgradeConfig::default(), - } - } -} diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 2090eaf273..f5a019084a 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -8,8 +8,6 @@ /// The orchestrator's clients pub mod client; -/// Configuration for the orchestrator -pub mod config; use std::{ collections::{HashMap, HashSet}, @@ -21,10 +19,10 @@ use std::{ use async_lock::RwLock; use client::{BenchResults, BenchResultsDownloadConfig}; -use config::BuilderType; use csv::Writer; use futures::{stream::FuturesUnordered, FutureExt, StreamExt}; use hotshot_types::{ + network::BuilderType, traits::signature_key::{SignatureKey, StakeTableEntryType}, PeerConfig, }; @@ -47,7 +45,7 @@ use vbs::{ BinarySerializer, }; -use crate::config::{NetworkConfig, PublicKeysFile}; +use hotshot_types::network::{NetworkConfig, PublicKeysFile}; /// Orchestrator is not, strictly speaking, bound to the network; it can have its own versioning. /// Orchestrator Version (major) diff --git a/testing/src/block_builder/random.rs b/testing/src/block_builder/random.rs index b264fb7dfb..cea9e40328 100644 --- a/testing/src/block_builder/random.rs +++ b/testing/src/block_builder/random.rs @@ -27,8 +27,8 @@ use hotshot_builder_api::v0_1::{ data_source::BuilderDataSource, }; use hotshot_example_types::block_types::TestTransaction; -use hotshot_orchestrator::config::RandomBuilderConfig; use hotshot_types::{ + network::RandomBuilderConfig, traits::{node_implementation::NodeType, signature_key::BuilderSignatureKey}, utils::BuilderCommitment, vid::VidCommitment, diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 9be33d78a6..f63560078f 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -15,16 +15,18 @@ use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, node_types::{TestTypes, TestVersions}, }; -use hotshot_orchestrator::config::RandomBuilderConfig; use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; use hotshot_testing::block_builder::{ BuilderTask, RandomBuilderImplementation, TestBuilderImplementation, }; -use hotshot_types::traits::{ - block_contents::vid_commitment, - node_implementation::{NodeType, Versions}, - signature_key::SignatureKey, - BlockPayload, +use hotshot_types::{ + network::RandomBuilderConfig, + traits::{ + block_contents::vid_commitment, + node_implementation::{NodeType, Versions}, + signature_key::SignatureKey, + BlockPayload, + }, }; use tide_disco::Url; diff --git a/testing/tests/tests_1/gen_key_pair.rs b/testing/tests/tests_1/gen_key_pair.rs index 3af843264d..301542a0a1 100644 --- a/testing/tests/tests_1/gen_key_pair.rs +++ b/testing/tests/tests_1/gen_key_pair.rs @@ -12,8 +12,7 @@ mod tests { use std::{env, fs::File, io::prelude::*}; use hotshot::types::{BLSPubKey, SignatureKey}; - use hotshot_orchestrator::config::ValidatorConfigFile; - use hotshot_types::ValidatorConfig; + use hotshot_types::{validator_config::ValidatorConfigFile, ValidatorConfig}; #[test] fn gen_key_pair_gen_from_config_file() { let config_file = ValidatorConfigFile::from_file("config/ValidatorConfigFile.toml"); diff --git a/types/Cargo.toml b/types/Cargo.toml index e34b82bf8b..ca2541d709 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -28,7 +28,7 @@ ethereum-types = { workspace = true } futures = { workspace = true } cdn-proto = { workspace = true } reqwest = { workspace = true } - +serde-inline-default = "0.2" lazy_static = { workspace = true } memoize = { workspace = true } rand = { workspace = true } @@ -52,9 +52,10 @@ dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } url = { workspace = true } vec1 = { workspace = true } libp2p = { workspace = true } - -[dev-dependencies] serde_json = { workspace = true } +surf-disco = { workspace = true } +toml = { workspace = true } +clap = { workspace = true } [features] gpu-vid = ["jf-vid/gpu-vid"] diff --git a/types/src/constants.rs b/types/src/constants.rs index d559e93655..7851ff5219 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -69,3 +69,12 @@ pub const LEGACY_BUILDER_MODULE: &str = "block_info"; /// The `tide` module name for the marketplace builder pub const MARKETPLACE_BUILDER_MODULE: &str = "bundle_info"; + +/// default number of rounds to run +pub const ORCHESTRATOR_DEFAULT_NUM_ROUNDS: usize = 100; +/// default number of transactions per round +pub const ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND: usize = 10; +/// default size of transactions +pub const ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE: usize = 100; +/// default delay before beginning consensus +pub const ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS: u64 = 60; diff --git a/types/src/hotshot_config_file.rs b/types/src/hotshot_config_file.rs new file mode 100644 index 0000000000..9eb6be0104 --- /dev/null +++ b/types/src/hotshot_config_file.rs @@ -0,0 +1,153 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{num::NonZeroUsize, time::Duration, vec}; + +use crate::{ + constants::REQUEST_DATA_DELAY, traits::signature_key::SignatureKey, ExecutionType, + HotShotConfig, PeerConfig, ValidatorConfig, +}; +use surf_disco::Url; +use vec1::Vec1; + +use crate::upgrade_config::UpgradeConfig; + +/// Default builder URL, used as placeholder +fn default_builder_urls() -> Vec1 { + vec1::vec1![Url::parse("http://0.0.0.0:3311").unwrap()] +} + +/// Holds configuration for a `HotShot` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(bound(deserialize = ""))] +pub struct HotShotConfigFile { + /// The proportion of nodes required before the orchestrator issues the ready signal, + /// expressed as (numerator, denominator) + pub start_threshold: (u64, u64), + /// Total number of staked nodes in the network + pub num_nodes_with_stake: NonZeroUsize, + #[serde(skip)] + /// My own public key, secret key, stake value + pub my_own_validator_config: ValidatorConfig, + #[serde(skip)] + /// The known nodes' public key and stake value + pub known_nodes_with_stake: Vec>, + #[serde(skip)] + /// The known DA nodes' public key and stake values + pub known_da_nodes: Vec>, + #[serde(skip)] + /// The known non-staking nodes' + pub known_nodes_without_stake: Vec, + /// Number of staking DA nodes + pub staked_da_nodes: usize, + /// Number of fixed leaders for GPU VID + pub fixed_leader_for_gpuvid: usize, + /// Base duration for next-view timeout, in milliseconds + pub next_view_timeout: u64, + /// Duration for view sync round timeout + pub view_sync_timeout: Duration, + /// The exponential backoff ration for the next-view timeout + pub timeout_ratio: (u64, u64), + /// The delay a leader inserts before starting pre-commit, in milliseconds + pub round_start_delay: u64, + /// Delay after init before starting consensus, in milliseconds + pub start_delay: u64, + /// Number of network bootstrap nodes + pub num_bootstrap: usize, + /// The maximum amount of time a leader can wait to get a block from a builder + pub builder_timeout: Duration, + /// Time to wait until we request data associated with a proposal + pub data_request_delay: Option, + /// Builder API base URL + #[serde(default = "default_builder_urls")] + pub builder_urls: Vec1, + /// Upgrade config + pub upgrade: UpgradeConfig, +} + +impl From> for HotShotConfig { + fn from(val: HotShotConfigFile) -> Self { + HotShotConfig { + execution_type: ExecutionType::Continuous, + start_threshold: val.start_threshold, + num_nodes_with_stake: val.num_nodes_with_stake, + known_da_nodes: val.known_da_nodes, + known_nodes_with_stake: val.known_nodes_with_stake, + known_nodes_without_stake: val.known_nodes_without_stake, + my_own_validator_config: val.my_own_validator_config, + da_staked_committee_size: val.staked_da_nodes, + fixed_leader_for_gpuvid: val.fixed_leader_for_gpuvid, + next_view_timeout: val.next_view_timeout, + view_sync_timeout: val.view_sync_timeout, + timeout_ratio: val.timeout_ratio, + round_start_delay: val.round_start_delay, + start_delay: val.start_delay, + num_bootstrap: val.num_bootstrap, + builder_timeout: val.builder_timeout, + data_request_delay: val + .data_request_delay + .unwrap_or(Duration::from_millis(REQUEST_DATA_DELAY)), + builder_urls: val.builder_urls, + start_proposing_view: val.upgrade.start_proposing_view, + stop_proposing_view: val.upgrade.stop_proposing_view, + start_voting_view: val.upgrade.start_voting_view, + stop_voting_view: val.upgrade.stop_voting_view, + start_proposing_time: val.upgrade.start_proposing_time, + stop_proposing_time: val.upgrade.stop_proposing_time, + start_voting_time: val.upgrade.start_voting_time, + stop_voting_time: val.upgrade.stop_voting_time, + } + } +} + +impl HotShotConfigFile { + /// Creates a new `HotShotConfigFile` with 5 nodes and 10 DA nodes. + /// + /// # Panics + /// + /// Cannot panic, but will if `NonZeroUsize` is somehow an error. + #[must_use] + pub fn hotshot_config_5_nodes_10_da() -> Self { + let staked_da_nodes: usize = 5; + + let mut known_da_nodes = Vec::new(); + + let gen_known_nodes_with_stake = (0..10) + .map(|node_id| { + let mut cur_validator_config: ValidatorConfig = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, false); + + if node_id < staked_da_nodes as u64 { + known_da_nodes.push(cur_validator_config.public_config()); + cur_validator_config.is_da = true; + } + + cur_validator_config.public_config() + }) + .collect(); + + Self { + num_nodes_with_stake: NonZeroUsize::new(10).unwrap(), + start_threshold: (1, 1), + my_own_validator_config: ValidatorConfig::default(), + known_nodes_with_stake: gen_known_nodes_with_stake, + known_nodes_without_stake: vec![], + staked_da_nodes, + known_da_nodes, + fixed_leader_for_gpuvid: 1, + next_view_timeout: 10000, + view_sync_timeout: Duration::from_millis(1000), + timeout_ratio: (11, 10), + round_start_delay: 1, + start_delay: 1, + num_bootstrap: 5, + builder_timeout: Duration::from_secs(10), + data_request_delay: Some(Duration::from_millis(REQUEST_DATA_DELAY)), + builder_urls: default_builder_urls(), + upgrade: UpgradeConfig::default(), + } + } +} diff --git a/types/src/lib.rs b/types/src/lib.rs index 1a9ff34b6c..806667bf9b 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -23,8 +23,13 @@ pub mod constants; pub mod data; pub mod error; pub mod event; +/// Holds the configuration file specification for a HotShot node. +pub mod hotshot_config_file; pub mod light_client; pub mod message; + +/// Holds the network configuration specification for HotShot nodes. +pub mod network; pub mod qc; pub mod request_response; pub mod signature_key; @@ -32,7 +37,12 @@ pub mod simple_certificate; pub mod simple_vote; pub mod stake_table; pub mod traits; + +/// Holds the upgrade configuration specification for HotShot nodes. +pub mod upgrade_config; pub mod utils; +/// Holds the validator configuration specification for HotShot nodes. +pub mod validator_config; pub mod vid; pub mod vote; diff --git a/types/src/network.rs b/types/src/network.rs new file mode 100644 index 0000000000..5c33a2c6e7 --- /dev/null +++ b/types/src/network.rs @@ -0,0 +1,396 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{fs, ops::Range, path::Path, time::Duration, vec}; + +use crate::hotshot_config_file::HotShotConfigFile; +use crate::{ + constants::{ + ORCHESTRATOR_DEFAULT_NUM_ROUNDS, ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS, + ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND, ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE, + REQUEST_DATA_DELAY, + }, + light_client::StateVerKey, + traits::signature_key::SignatureKey, + HotShotConfig, ValidatorConfig, +}; +use clap::ValueEnum; +use libp2p::{Multiaddr, PeerId}; +use serde_inline_default::serde_inline_default; +use thiserror::Error; +use tracing::error; + +/// Configuration describing a libp2p node +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct Libp2pConfig { + /// The bootstrap nodes to connect to (multiaddress, serialized public key) + pub bootstrap_nodes: Vec<(PeerId, Multiaddr)>, +} + +/// configuration for combined network +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct CombinedNetworkConfig { + /// delay duration before sending a message through the secondary network + pub delay_duration: Duration, +} + +/// a network configuration error +#[derive(Error, Debug)] +pub enum NetworkConfigError { + /// Failed to read NetworkConfig from file + #[error("Failed to read NetworkConfig from file")] + ReadFromFileError(std::io::Error), + /// Failed to deserialize loaded NetworkConfig + #[error("Failed to deserialize loaded NetworkConfig")] + DeserializeError(serde_json::Error), + /// Failed to write NetworkConfig to file + #[error("Failed to write NetworkConfig to file")] + WriteToFileError(std::io::Error), + /// Failed to serialize NetworkConfig + #[error("Failed to serialize NetworkConfig")] + SerializeError(serde_json::Error), + /// Failed to recursively create path to NetworkConfig + #[error("Failed to recursively create path to NetworkConfig")] + FailedToCreatePath(std::io::Error), +} + +#[derive(Clone, Copy, Debug, serde::Serialize, serde::Deserialize, Default, ValueEnum)] +/// configuration for builder type to use +pub enum BuilderType { + /// Use external builder, [config.builder_url] must be + /// set to correct builder address + External, + #[default] + /// Simple integrated builder will be started and used by each hotshot node + Simple, + /// Random integrated builder will be started and used by each hotshot node + Random, +} + +/// Node PeerConfig keys +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +#[serde(bound(deserialize = ""))] +pub struct PeerConfigKeys { + /// The peer's public key + pub stake_table_key: KEY, + /// the peer's state public key + pub state_ver_key: StateVerKey, + /// the peer's stake + pub stake: u64, + /// whether the node is a DA node + pub da: bool, +} + +/// Options controlling how the random builder generates blocks +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct RandomBuilderConfig { + /// How many transactions to include in a block + pub txn_in_block: u64, + /// How many blocks to generate per second + pub blocks_per_second: u32, + /// Range of how big a transaction can be (in bytes) + pub txn_size: Range, +} + +impl Default for RandomBuilderConfig { + fn default() -> Self { + Self { + txn_in_block: 100, + blocks_per_second: 1, + txn_size: 20..100, + } + } +} + +/// a network configuration +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +#[serde(bound(deserialize = ""))] +pub struct NetworkConfig { + /// number of views to run + pub rounds: usize, + /// whether DA membership is determined by index. + /// if true, the first k nodes to register form the DA committee + /// if false, DA membership is requested by the nodes + pub indexed_da: bool, + /// number of transactions per view + pub transactions_per_round: usize, + /// password to have the orchestrator start the network, + /// regardless of the number of nodes connected. + pub manual_start_password: Option, + /// number of bootstrap nodes + pub num_bootrap: usize, + /// timeout before starting the next view + pub next_view_timeout: u64, + /// timeout before starting next view sync round + pub view_sync_timeout: Duration, + /// The maximum amount of time a leader can wait to get a block from a builder + pub builder_timeout: Duration, + /// time to wait until we request data associated with a proposal + pub data_request_delay: Duration, + /// global index of node (for testing purposes a uid) + pub node_index: u64, + /// unique seed (for randomness? TODO) + pub seed: [u8; 32], + /// size of transactions + pub transaction_size: usize, + /// delay before beginning consensus + pub start_delay_seconds: u64, + /// name of the key type (for debugging) + pub key_type_name: String, + /// the libp2p config + pub libp2p_config: Option, + /// the hotshot config + pub config: HotShotConfig, + /// The address for the Push CDN's "marshal", A.K.A. load balancer + pub cdn_marshal_address: Option, + /// combined network config + pub combined_network_config: Option, + /// the commit this run is based on + pub commit_sha: String, + /// builder to use + pub builder: BuilderType, + /// random builder config + pub random_builder: Option, + /// The list of public keys that are allowed to connect to the orchestrator + pub public_keys: Vec>, +} + +/// the source of the network config +pub enum NetworkConfigSource { + /// we source the network configuration from the orchestrator + Orchestrator, + /// we source the network configuration from a config file on disk + File, +} + +impl NetworkConfig { + /// Get a temporary node index for generating a validator config + #[must_use] + pub fn generate_init_validator_config(cur_node_index: u16, is_da: bool) -> ValidatorConfig { + // This cur_node_index is only used for key pair generation, it's not bound with the node, + // later the node with the generated key pair will get a new node_index from orchestrator. + ValidatorConfig::generated_from_seed_indexed([0u8; 32], cur_node_index.into(), 1, is_da) + } + + /// Loads a `NetworkConfig` from a file. + /// + /// This function takes a file path as a string, reads the file, and then deserializes the contents into a `NetworkConfig`. + /// + /// # Arguments + /// + /// * `file` - A string representing the path to the file from which to load the `NetworkConfig`. + /// + /// # Returns + /// + /// This function returns a `Result` that contains a `NetworkConfig` if the file was successfully read and deserialized, or a `NetworkConfigError` if an error occurred. + /// + /// # Errors + /// + /// This function will return an error if the file cannot be read or if the contents cannot be deserialized into a `NetworkConfig`. + /// + /// # Examples + /// + /// ```ignore + /// # use hotshot_orchestrator::config::NetworkConfig; + /// # use hotshot_types::signature_key::BLSPubKey; + /// // # use hotshot::traits::election::static_committee::StaticElectionConfig; + /// let file = "/path/to/my/config".to_string(); + /// // NOTE: broken due to staticelectionconfig not being importable + /// // cannot import staticelectionconfig from hotshot without creating circular dependency + /// // making this work probably involves the `types` crate implementing a dummy + /// // electionconfigtype just ot make this example work + /// let config = NetworkConfig::::from_file(file).unwrap(); + /// ``` + pub fn from_file(file: String) -> Result { + // read from file + let data = match fs::read(file) { + Ok(data) => data, + Err(e) => { + return Err(NetworkConfigError::ReadFromFileError(e)); + } + }; + + // deserialize + match serde_json::from_slice(&data) { + Ok(data) => Ok(data), + Err(e) => Err(NetworkConfigError::DeserializeError(e)), + } + } + + /// Serializes the `NetworkConfig` and writes it to a file. + /// + /// This function takes a file path as a string, serializes the `NetworkConfig` into JSON format using `serde_json` and then writes the serialized data to the file. + /// + /// # Arguments + /// + /// * `file` - A string representing the path to the file where the `NetworkConfig` should be saved. + /// + /// # Returns + /// + /// This function returns a `Result` that contains `()` if the `NetworkConfig` was successfully serialized and written to the file, or a `NetworkConfigError` if an error occurred. + /// + /// # Errors + /// + /// This function will return an error if the `NetworkConfig` cannot be serialized or if the file cannot be written. + /// + /// # Examples + /// + /// ```ignore + /// # use hotshot_orchestrator::config::NetworkConfig; + /// let file = "/path/to/my/config".to_string(); + /// let config = NetworkConfig::from_file(file); + /// config.to_file(file).unwrap(); + /// ``` + pub fn to_file(&self, file: String) -> Result<(), NetworkConfigError> { + // ensure the directory containing the config file exists + if let Some(dir) = Path::new(&file).parent() { + if let Err(e) = fs::create_dir_all(dir) { + return Err(NetworkConfigError::FailedToCreatePath(e)); + } + } + + // serialize + let serialized = match serde_json::to_string_pretty(self) { + Ok(data) => data, + Err(e) => { + return Err(NetworkConfigError::SerializeError(e)); + } + }; + + // write to file + match fs::write(file, serialized) { + Ok(()) => Ok(()), + Err(e) => Err(NetworkConfigError::WriteToFileError(e)), + } + } +} + +impl Default for NetworkConfig { + fn default() -> Self { + Self { + rounds: ORCHESTRATOR_DEFAULT_NUM_ROUNDS, + indexed_da: true, + transactions_per_round: ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND, + node_index: 0, + seed: [0u8; 32], + transaction_size: ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE, + manual_start_password: None, + libp2p_config: None, + config: HotShotConfigFile::hotshot_config_5_nodes_10_da().into(), + start_delay_seconds: 60, + key_type_name: std::any::type_name::().to_string(), + cdn_marshal_address: None, + combined_network_config: None, + next_view_timeout: 10, + view_sync_timeout: Duration::from_secs(2), + num_bootrap: 5, + builder_timeout: Duration::from_secs(10), + data_request_delay: Duration::from_millis(2500), + commit_sha: String::new(), + builder: BuilderType::default(), + random_builder: None, + public_keys: vec![], + } + } +} + +/// a network config stored in a file +#[serde_inline_default] +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] +#[serde(bound(deserialize = ""))] +pub struct PublicKeysFile { + /// The list of public keys that are allowed to connect to the orchestrator + /// + /// If nonempty, this list becomes the stake table and is used to determine DA membership (ignoring the node's request). + #[serde(default)] + pub public_keys: Vec>, +} + +/// a network config stored in a file +#[serde_inline_default] +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] +#[serde(bound(deserialize = ""))] +pub struct NetworkConfigFile { + /// number of views to run + #[serde_inline_default(ORCHESTRATOR_DEFAULT_NUM_ROUNDS)] + pub rounds: usize, + /// number of views to run + #[serde(default)] + pub indexed_da: bool, + /// number of transactions per view + #[serde_inline_default(ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND)] + pub transactions_per_round: usize, + /// password to have the orchestrator start the network, + /// regardless of the number of nodes connected. + #[serde(default)] + pub manual_start_password: Option, + /// global index of node (for testing purposes a uid) + #[serde(default)] + pub node_index: u64, + /// unique seed (for randomness? TODO) + #[serde(default)] + pub seed: [u8; 32], + /// size of transactions + #[serde_inline_default(ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE)] + pub transaction_size: usize, + /// delay before beginning consensus + #[serde_inline_default(ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS)] + pub start_delay_seconds: u64, + /// the hotshot config file + #[serde(default = "HotShotConfigFile::hotshot_config_5_nodes_10_da")] + pub config: HotShotConfigFile, + /// The address of the Push CDN's "marshal", A.K.A. load balancer + #[serde(default)] + pub cdn_marshal_address: Option, + /// combined network config + #[serde(default)] + pub combined_network_config: Option, + /// builder to use + #[serde(default)] + pub builder: BuilderType, + /// random builder configuration + #[serde(default)] + pub random_builder: Option, + /// The list of public keys that are allowed to connect to the orchestrator + /// + /// If nonempty, this list becomes the stake table and is used to determine DA membership (ignoring the node's request). + #[serde(default)] + pub public_keys: Vec>, +} + +impl From> for NetworkConfig { + fn from(val: NetworkConfigFile) -> Self { + NetworkConfig { + rounds: val.rounds, + indexed_da: val.indexed_da, + transactions_per_round: val.transactions_per_round, + node_index: 0, + num_bootrap: val.config.num_bootstrap, + manual_start_password: val.manual_start_password, + next_view_timeout: val.config.next_view_timeout, + view_sync_timeout: val.config.view_sync_timeout, + builder_timeout: val.config.builder_timeout, + data_request_delay: val + .config + .data_request_delay + .unwrap_or(Duration::from_millis(REQUEST_DATA_DELAY)), + seed: val.seed, + transaction_size: val.transaction_size, + libp2p_config: Some(Libp2pConfig { + bootstrap_nodes: Vec::new(), + }), + config: val.config.into(), + key_type_name: std::any::type_name::().to_string(), + start_delay_seconds: val.start_delay_seconds, + cdn_marshal_address: val.cdn_marshal_address, + combined_network_config: val.combined_network_config, + commit_sha: String::new(), + builder: val.builder, + random_builder: val.random_builder, + public_keys: val.public_keys, + } + } +} diff --git a/types/src/upgrade_config.rs b/types/src/upgrade_config.rs new file mode 100644 index 0000000000..5c3b5a3fba --- /dev/null +++ b/types/src/upgrade_config.rs @@ -0,0 +1,44 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(bound(deserialize = ""))] +/// Holds configuration for the upgrade task. +pub struct UpgradeConfig { + /// View to start proposing an upgrade + pub start_proposing_view: u64, + /// View to stop proposing an upgrade. To prevent proposing an upgrade, set stop_proposing_view <= start_proposing_view. + pub stop_proposing_view: u64, + /// View to start voting on an upgrade + pub start_voting_view: u64, + /// View to stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_view <= start_voting_view. + pub stop_voting_view: u64, + /// Unix time in seconds at which we start proposing an upgrade + pub start_proposing_time: u64, + /// Unix time in seconds at which we stop proposing an upgrade. To prevent proposing an upgrade, set stop_proposing_time <= start_proposing_time. + pub stop_proposing_time: u64, + /// Unix time in seconds at which we start voting on an upgrade + pub start_voting_time: u64, + /// Unix time in seconds at which we stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_time <= start_voting_time. + pub stop_voting_time: u64, +} + +// Explicitly implementing `Default` for clarity. +#[allow(clippy::derivable_impls)] +impl Default for UpgradeConfig { + fn default() -> Self { + UpgradeConfig { + start_proposing_view: u64::MAX, + stop_proposing_view: 0, + start_voting_view: u64::MAX, + stop_voting_view: 0, + start_proposing_time: u64::MAX, + stop_proposing_time: 0, + start_voting_time: u64::MAX, + stop_voting_time: 0, + } + } +} diff --git a/types/src/validator_config.rs b/types/src/validator_config.rs new file mode 100644 index 0000000000..7ede69b09b --- /dev/null +++ b/types/src/validator_config.rs @@ -0,0 +1,53 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{env, fs, path::PathBuf}; + +use crate::{traits::signature_key::SignatureKey, ValidatorConfig}; +use toml; +use tracing::error; + +/// Holds configuration for a validator node +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, Default)] +#[serde(bound(deserialize = ""))] +pub struct ValidatorConfigFile { + /// The validator's seed + pub seed: [u8; 32], + /// The validator's index, which can be treated as another input to the seed + pub node_id: u64, + // The validator's stake, commented for now + // pub stake_value: u64, + /// Whether or not we are DA + pub is_da: bool, +} + +impl ValidatorConfigFile { + /// read the validator config from a file + /// # Panics + /// Panics if unable to get the current working directory + pub fn from_file(dir_str: &str) -> Self { + let current_working_dir = match env::current_dir() { + Ok(dir) => dir, + Err(e) => { + error!("get_current_working_dir error: {:?}", e); + PathBuf::from("") + } + }; + let filename = + current_working_dir.into_os_string().into_string().unwrap() + "/../../" + dir_str; + let contents = fs::read_to_string(filename.clone()).expect("Could not read file"); + let data: ValidatorConfigFile = + toml::from_str(&contents).expect("Unable to load data from file"); + data + } +} + +impl From for ValidatorConfig { + fn from(val: ValidatorConfigFile) -> Self { + // here stake_value is set to 1, since we don't input stake_value from ValidatorConfigFile for now + ValidatorConfig::generated_from_seed_indexed(val.seed, val.node_id, 1, val.is_da) + } +} From fe78089dd7aa9573d67ccb76317995af4ac1128c Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Thu, 10 Oct 2024 13:52:34 -0600 Subject: [PATCH 1240/1393] Add a condition (#3749) --- types/src/consensus.rs | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 26d7d8ca3f..759127fef5 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -563,15 +563,22 @@ impl Consensus { pub fn update_validated_state_map( &mut self, view_number: TYPES::Time, - view: View, + new_view: View, ) -> Result<()> { if let Some(existing_view) = self.validated_state_map().get(&view_number) { - if let ViewInner::Leaf { .. } = existing_view.view_inner { - match view.view_inner { - ViewInner::Leaf { ref delta, .. } => { + if let ViewInner::Leaf { + delta: ref existing_delta, + .. + } = existing_view.view_inner + { + match new_view.view_inner { + ViewInner::Leaf { + delta: ref new_delta, + .. + } => { ensure!( - delta.is_some(), - "Skipping the state update to not override a `Leaf` view with `None` state delta." + new_delta.is_some() || existing_delta.is_none(), + "Skipping the state update to not override a `Leaf` view with `Some` state delta." ); } _ => { @@ -580,7 +587,7 @@ impl Consensus { } } } - self.validated_state_map.insert(view_number, view); + self.validated_state_map.insert(view_number, new_view); Ok(()) } From 90185e683e9fd0dc4a0853d377bdc4817bb3bfc0 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Thu, 10 Oct 2024 14:10:56 -0600 Subject: [PATCH 1241/1393] [Easy] Remove TYPES from Libp2pNetwork::from_config (#3750) --- examples/infra/mod.rs | 2 +- hotshot/src/traits/networking/libp2p_network.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 4875dfd149..96072cce89 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -760,7 +760,7 @@ where derive_libp2p_multiaddr(&bind_address).expect("failed to derive bind address"); // Create the Libp2p network - let libp2p_network = Libp2pNetwork::from_config::( + let libp2p_network = Libp2pNetwork::from_config( config.clone(), GossipConfig::default(), bind_address, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 382a695e1e..41b25def7f 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -408,7 +408,7 @@ impl Libp2pNetwork { /// /// # Panics /// If we are unable to calculate the replication factor - pub async fn from_config( + pub async fn from_config( mut config: NetworkConfig, gossip_config: GossipConfig, bind_address: Multiaddr, From 4d2373ec8843ef057d292849611ba34d86c10f17 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 10 Oct 2024 15:17:58 -0600 Subject: [PATCH 1242/1393] Expose Requesting Proposal (#3748) * expose requesting proposal * lint * logging return error from dep * fix comment * fix * fix test/when we store --- hotshot/src/types/handle.rs | 85 +++++++++++++++++++++++-- task-impls/src/consensus/handlers.rs | 5 +- task-impls/src/helpers.rs | 12 ++-- testing/tests/tests_1/consensus_task.rs | 1 - 4 files changed, 87 insertions(+), 16 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 0879a349fe..8893fc5058 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -8,16 +8,26 @@ use std::sync::Arc; +use anyhow::{anyhow, Ok, Result}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; +use committable::{Commitment, Committable}; use futures::Stream; -use hotshot_task::task::{ConsensusTaskRegistry, NetworkTaskRegistry, Task, TaskState}; -use hotshot_task_impls::events::HotShotEvent; +use hotshot_task::{ + dependency::{Dependency, EventDependency}, + task::{ConsensusTaskRegistry, NetworkTaskRegistry, Task, TaskState}, +}; +use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; use hotshot_types::{ consensus::Consensus, - data::Leaf, + data::{Leaf, QuorumProposal}, error::HotShotError, - traits::{election::Membership, network::ConnectedNetwork, node_implementation::NodeType}, + request_response::ProposalRequestPayload, + traits::{ + consensus_api::ConsensusApi, election::Membership, network::ConnectedNetwork, + node_implementation::NodeType, signature_key::SignatureKey, + }, + vote::HasViewNumber, }; use tracing::instrument; @@ -77,6 +87,73 @@ impl + 'static, V: Versions> self.output_event_stream.1.activate_cloned() } + /// Request a proposal from the all other nodes. Will block until some node + /// returns a valid proposal with the requested commitment. If nobody has the + /// proposal this will block forever + /// + /// # Errors + /// Errors if signing the request for proposal fails + pub async fn request_proposal( + &self, + view: TYPES::Time, + leaf_commitment: Commitment>, + ) -> Result> { + // We need to be able to sign this request before submitting it to the network. Compute the + // payload first. + let signed_proposal_request = ProposalRequestPayload { + view_number: view, + key: self.public_key(), + }; + + // Finally, compute the signature for the payload. + let signature = TYPES::SignatureKey::sign( + self.private_key(), + signed_proposal_request.commit().as_ref(), + )?; + + // First, broadcast that we need a proposal + broadcast_event( + HotShotEvent::QuorumProposalRequestSend(signed_proposal_request, signature).into(), + &self.internal_event_stream.0, + ) + .await; + + let mem = &self.memberships.quorum_membership; + let upgrade_lock = &self.hotshot.upgrade_lock; + loop { + let hs_event = EventDependency::new( + self.internal_event_stream.1.activate_cloned(), + Box::new(move |event| { + let event = event.as_ref(); + if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = event { + quorum_proposal.data.view_number() == view + } else { + false + } + }), + ) + .completed() + .await + .ok_or(anyhow!("Event dependency failed to get event"))?; + + // Then, if it's `Some`, make sure that the data is correct + + if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() { + // Make sure that the quorum_proposal is valid + if let Err(err) = quorum_proposal.validate_signature(mem, upgrade_lock).await { + tracing::warn!("Invalid Proposal Received after Request. Err {:?}", err); + continue; + } + let proposed_leaf = Leaf::from_quorum_proposal(&quorum_proposal.data); + let commit = proposed_leaf.commit(upgrade_lock).await; + if commit == leaf_commitment { + return Ok(quorum_proposal.data.clone()); + } + tracing::warn!("Proposal receied from request has different commitment than expected.\nExpected = {:?}\nReceived{:?}", leaf_commitment, commit); + } + } + } + /// HACK so we can know the types when running tests... /// there are two cleaner solutions: /// - make the stream generic and in nodetypes or nodeimpelmentation diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 193ebbd257..23e3459d27 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -4,9 +4,6 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use core::time::Duration; -use std::{marker::PhantomData, sync::Arc}; - use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -14,6 +11,7 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; use chrono::Utc; +use core::time::Duration; use futures::FutureExt; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus, View}, @@ -33,6 +31,7 @@ use hotshot_types::{ utils::ViewInner, vote::{Certificate, HasViewNumber}, }; +use std::{marker::PhantomData, sync::Arc}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 1c266029c9..fa0156c424 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -492,13 +492,6 @@ pub async fn validate_proposal_safety_and_liveness< tracing::debug!("Internal proposal update failed; error = {e:#}"); }; - // Update our persistent storage of the proposal. We also itentionally swallow - // this error as it should not affect consensus and would, instead, imply an - // issue on the sequencer side. - if let Err(e) = storage.write().await.append_proposal(&proposal).await { - tracing::debug!("Persisting the proposal update failed; error = {e:#}"); - }; - // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. broadcast_event( Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), @@ -558,8 +551,11 @@ pub async fn validate_proposal_safety_and_liveness< }); } - // We accept the proposal, notify the application layer + // Update our persistent storage of the proposal. If we cannot store the proposal reutrn + // and error so we don't vote + storage.write().await.append_proposal(&proposal).await?; + // We accept the proposal, notify the application layer broadcast_event( Event { view_number, diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs index ad27388433..d5e39eab51 100644 --- a/testing/tests/tests_1/consensus_task.rs +++ b/testing/tests/tests_1/consensus_task.rs @@ -609,7 +609,6 @@ async fn test_vid_disperse_storage_failure() { let expectations = vec![Expectations::from_outputs(all_predicates![ validated_state_updated(), exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), ])]; let consensus_state = From 29455628e404f3135a54f9deb9ee70fbf146bc4e Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 10 Oct 2024 17:09:53 -0600 Subject: [PATCH 1243/1393] Change Return type of `request_proposal` (#3752) * return future from request prop * Change return type * fmt --- hotshot/src/types/handle.rs | 88 ++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 40 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 8893fc5058..7cb3524452 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -22,6 +22,7 @@ use hotshot_types::{ consensus::Consensus, data::{Leaf, QuorumProposal}, error::HotShotError, + message::Proposal, request_response::ProposalRequestPayload, traits::{ consensus_api::ConsensusApi, election::Membership, network::ConnectedNetwork, @@ -93,11 +94,11 @@ impl + 'static, V: Versions> /// /// # Errors /// Errors if signing the request for proposal fails - pub async fn request_proposal( + pub fn request_proposal( &self, view: TYPES::Time, leaf_commitment: Commitment>, - ) -> Result> { + ) -> Result>>>> { // We need to be able to sign this request before submitting it to the network. Compute the // payload first. let signed_proposal_request = ProposalRequestPayload { @@ -111,47 +112,54 @@ impl + 'static, V: Versions> signed_proposal_request.commit().as_ref(), )?; - // First, broadcast that we need a proposal - broadcast_event( - HotShotEvent::QuorumProposalRequestSend(signed_proposal_request, signature).into(), - &self.internal_event_stream.0, - ) - .await; - - let mem = &self.memberships.quorum_membership; - let upgrade_lock = &self.hotshot.upgrade_lock; - loop { - let hs_event = EventDependency::new( - self.internal_event_stream.1.activate_cloned(), - Box::new(move |event| { - let event = event.as_ref(); - if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = event { - quorum_proposal.data.view_number() == view - } else { - false - } - }), + let mem = self.memberships.quorum_membership.clone(); + let upgrade_lock = self.hotshot.upgrade_lock.clone(); + let receiver = self.internal_event_stream.1.activate_cloned(); + let sender = self.internal_event_stream.0.clone(); + Ok(async move { + // First, broadcast that we need a proposal + broadcast_event( + HotShotEvent::QuorumProposalRequestSend(signed_proposal_request, signature).into(), + &sender, ) - .completed() - .await - .ok_or(anyhow!("Event dependency failed to get event"))?; - - // Then, if it's `Some`, make sure that the data is correct - - if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() { - // Make sure that the quorum_proposal is valid - if let Err(err) = quorum_proposal.validate_signature(mem, upgrade_lock).await { - tracing::warn!("Invalid Proposal Received after Request. Err {:?}", err); - continue; - } - let proposed_leaf = Leaf::from_quorum_proposal(&quorum_proposal.data); - let commit = proposed_leaf.commit(upgrade_lock).await; - if commit == leaf_commitment { - return Ok(quorum_proposal.data.clone()); + .await; + loop { + let hs_event = EventDependency::new( + receiver.clone(), + Box::new(move |event| { + let event = event.as_ref(); + if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = event { + quorum_proposal.data.view_number() == view + } else { + false + } + }), + ) + .completed() + .await + .ok_or(anyhow!("Event dependency failed to get event"))?; + + // Then, if it's `Some`, make sure that the data is correct + + if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() + { + // Make sure that the quorum_proposal is valid + if let Err(err) = quorum_proposal + .validate_signature(&mem, &upgrade_lock) + .await + { + tracing::warn!("Invalid Proposal Received after Request. Err {:?}", err); + continue; + } + let proposed_leaf = Leaf::from_quorum_proposal(&quorum_proposal.data); + let commit = proposed_leaf.commit(&upgrade_lock).await; + if commit == leaf_commitment { + return Ok(quorum_proposal.clone()); + } + tracing::warn!("Proposal receied from request has different commitment than expected.\nExpected = {:?}\nReceived{:?}", leaf_commitment, commit); } - tracing::warn!("Proposal receied from request has different commitment than expected.\nExpected = {:?}\nReceived{:?}", leaf_commitment, commit); } - } + }) } /// HACK so we can know the types when running tests... From 5ff69efce32ffb9b7b6a03e837da5d9d7737955e Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 11 Oct 2024 09:33:42 -0600 Subject: [PATCH 1244/1393] fix key type in prp (#3757) --- hotshot/src/types/handle.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 7cb3524452..8f5a79824a 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -103,7 +103,7 @@ impl + 'static, V: Versions> // payload first. let signed_proposal_request = ProposalRequestPayload { view_number: view, - key: self.public_key(), + key: self.public_key().clone(), }; // Finally, compute the signature for the payload. From 02c28a1cf9498b05ae859e1bf2ee7e2364dd72ed Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 11 Oct 2024 11:55:57 -0500 Subject: [PATCH 1245/1393] Adjust some log levels (#3737) * adjust some log levels * format * PR comments --- hotshot/src/traits/networking/combined_network.rs | 4 ++-- libp2p-networking/src/network/behaviours/dht/mod.rs | 6 +++--- task-impls/src/da.rs | 4 ++-- task-impls/src/network.rs | 6 +++--- task-impls/src/transactions.rs | 10 +++++----- task-impls/src/vid.rs | 4 ++-- task-impls/src/view_sync.rs | 10 +++++----- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 1e350ba5ef..297970966f 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -45,7 +45,7 @@ use hotshot_types::{ }; use lru::LruCache; use parking_lot::RwLock as PlRwLock; -use tracing::{debug, warn}; +use tracing::{debug, info, warn}; use super::{push_cdn_network::PushCdnNetwork, NetworkError}; use crate::traits::implementations::Libp2pNetwork; @@ -149,7 +149,7 @@ impl CombinedNetworks { { // If the primary failed more than `COMBINED_NETWORK_MIN_PRIMARY_FAILURES` times, // we don't want to delay this message, and from now on we consider the primary as down - warn!( + info!( "View progression is slower than normally, stop delaying messages on the secondary" ); self.primary_down.store(true, Ordering::Relaxed); diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index a11e0ffbf4..27bf8a1b71 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -277,7 +277,7 @@ impl DHTBehaviour { } }, Err(err) => { - error!("GOT ERROR IN KAD QUERY {:?}", err); + warn!("Error in Kademlia query: {:?}", err); 0 } }, @@ -342,7 +342,7 @@ impl DHTBehaviour { // Initiate new query that hits more replicas if retry_count > 0 { let new_retry_count = retry_count - 1; - error!("Get DHT: Internal disagreement for get dht request {:?}! requerying with more nodes. {:?} retries left", progress, new_retry_count); + warn!("Get DHT: Internal disagreement for get dht request {:?}! requerying with more nodes. {:?} retries left", progress, new_retry_count); let new_factor = NonZeroUsize::max( NonZeroUsize::new(num_replicas.get() + 1).unwrap_or(num_replicas), *MAX_DHT_QUERY_SIZE, @@ -357,7 +357,7 @@ impl DHTBehaviour { records: HashMap::default(), }); } - error!("Get DHT: Internal disagreement for get dht request {:?}! Giving up because out of retries. ", progress); + warn!("Get DHT: Internal disagreement for get dht request {:?}! Giving up because out of retries. ", progress); } } } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 19452752d6..0189e20d41 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -35,7 +35,7 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, info, instrument, warn}; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -284,7 +284,7 @@ impl, V: Versions> DaTaskState 1 { - warn!("View changed by more than 1 going to view {:?}", view); + info!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 91831ccb1b..b841d6a15c 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -287,7 +287,7 @@ impl< } match net.vid_broadcast_message(messages).await { Ok(()) => {} - Err(e) => error!("Failed to send message from network task: {:?}", e), + Err(e) => warn!("Failed to send message from network task: {:?}", e), } }); @@ -579,7 +579,7 @@ impl< .direct_message(serialized_message.clone(), recipient) .await { - error!("Failed to send message from network task: {e:?}"); + warn!("Failed to send message: {e:?}"); } // Otherwise, send the next message. @@ -590,7 +590,7 @@ impl< match transmit_result { Ok(()) => {} - Err(e) => error!("Failed to send message from network task: {:?}", e), + Err(e) => warn!("Failed to send message task: {:?}", e), } }); } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index fc10b549ee..bd1aa73307 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -32,7 +32,7 @@ use hotshot_types::{ utils::ViewInner, vid::{VidCommitment, VidPrecomputeData}, }; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, info, instrument, warn}; use url::Url; use vbs::version::{StaticVersionType, Version}; use vec1::Vec1; @@ -188,7 +188,7 @@ impl, V: Versions> TransactionTask .await; } else { // If we couldn't get a block, send an empty block - warn!( + info!( "Failed to get a block for view {:?}, proposing empty block", block_view ); @@ -445,7 +445,7 @@ impl, V: Versions> TransactionTask let mut make_block = false; if *view - *self.cur_view > 1 { - error!("View changed by more than 1 going to view {:?}", view); + info!("View changed by more than 1 going to view {:?}", view); make_block = self.membership.leader(view) == self.public_key; } self.cur_view = view; @@ -574,7 +574,7 @@ impl, V: Versions> TransactionTask // We failed to get a block Ok(Err(err)) => { - tracing::warn!("Couldn't get a block: {err:#}"); + tracing::info!("Couldn't get a block: {err:#}"); // pause a bit async_sleep(RETRY_DELAY).await; continue; @@ -582,7 +582,7 @@ impl, V: Versions> TransactionTask // We timed out while getting available blocks Err(err) => { - error!(%err, "Timeout while getting available blocks"); + info!(%err, "Timeout while getting available blocks"); return None; } } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 3243f356ae..105aad46dc 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -20,7 +20,7 @@ use hotshot_types::{ BlockPayload, }, }; -use tracing::{debug, error, instrument, warn}; +use tracing::{debug, error, info, instrument}; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -139,7 +139,7 @@ impl> VidTaskState { } if *view - *self.cur_view > 1 { - warn!("View changed by more than 1 going to view {:?}", view); + info!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 0b8452a2f1..630907c0ac 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -440,7 +440,7 @@ impl, V: Versions> ViewSyncTaskSta self.num_timeouts_tracked += 1; let leader = self.membership.leader(view_number); - error!( + warn!( %leader, leader_mnemonic = cdn_proto::util::mnemonic(&leader), view_number = *view_number, @@ -557,7 +557,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); + warn!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( @@ -649,7 +649,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - info!( + warn!( "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", relay ); @@ -745,7 +745,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - info!("Vote sending timed out in ViewSyncTrigger"); + warn!("Vote sending timed out in ViewSyncTrigger"); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), @@ -811,7 +811,7 @@ impl, V: Versions> let last_cert = last_seen_certificate.clone(); async move { async_sleep(timeout).await; - info!( + warn!( "Vote sending timed out in ViewSyncTimeout relay = {}", relay ); From 59f8f1c8e4112fa1b043e4e3376b2cf8b796dd5f Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 11 Oct 2024 20:05:24 +0200 Subject: [PATCH 1246/1393] Allow for different builders in cross-tests (#3756) --- macros/src/lib.rs | 66 +++++++++++++++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 22 deletions(-) diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 9ab4e4aea1..e874468cda 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -19,6 +19,9 @@ use syn::{ struct CrossTestData { /// imlementations impls: ExprArray, + /// builder impl + #[builder(default = "syn::parse_str(\"[SimpleBuilderImplementation]\").unwrap()")] + builder_impls: ExprArray, /// versions versions: ExprArray, /// types @@ -51,6 +54,8 @@ struct TestData { ty: ExprPath, /// impl imply: ExprPath, + /// builder implementation + builder_impl: ExprPath, /// impl version: ExprPath, /// name of test @@ -110,6 +115,7 @@ impl TestData { test_name, metadata, ignore, + builder_impl, } = self; let slow_attribute = if ignore.value() { @@ -130,7 +136,7 @@ impl TestData { async fn #test_name() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - TestDescription::<#ty, #imply, #version>::gen_launcher((#metadata), 0).launch().run_test::().await; + hotshot_testing::test_builder::TestDescription::<#ty, #imply, #version>::gen_launcher((#metadata), 0).launch().run_test::<#builder_impl>().await; } } } @@ -143,6 +149,7 @@ mod keywords { syn::custom_keyword!(TestName); syn::custom_keyword!(Types); syn::custom_keyword!(Impls); + syn::custom_keyword!(BuilderImpls); syn::custom_keyword!(Versions); } @@ -163,6 +170,11 @@ impl Parse for CrossTestData { input.parse::()?; let impls = input.parse::()?; description.impls(impls); + } else if input.peek(keywords::BuilderImpls) { + let _ = input.parse::()?; + input.parse::()?; + let impls = input.parse::()?; + description.builder_impls(impls); } else if input.peek(keywords::Versions) { let _ = input.parse::()?; input.parse::()?; @@ -185,7 +197,7 @@ impl Parse for CrossTestData { description.ignore(ignore); } else { panic!( - "Unexpected token. Expected one of: Metadata, Ignore, Impls, Versions, Types, Testname" + "Unexpected token. Expected one of: Metadata, Ignore, Impls, BuilderImpls, Versions, Types, Testname" ); } if input.peek(Token![,]) { @@ -223,30 +235,40 @@ fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { p }); + let builder_impls = test_spec.builder_impls.elems.iter().map(|t| { + let Expr::Path(p) = t else { + panic!("Expected Path for BuilderImpl! Got {t:?}"); + }; + p + }); + let mut result = quote! {}; for ty in types.clone() { let mut type_mod = quote! {}; for imp in impls.clone() { - for version in versions.clone() { - let test_data = TestDataBuilder::create_empty() - .test_name(test_spec.test_name.clone()) - .metadata(test_spec.metadata.clone()) - .ignore(test_spec.ignore.clone()) - .version(version.clone()) - .imply(imp.clone()) - .ty(ty.clone()) - .build() - .unwrap(); - let test = test_data.generate_test(); - - let impl_str = format_ident!("{}", imp.to_lower_snake_str()); - let impl_result = quote! { - pub mod #impl_str { - use super::*; - #test - } - }; - type_mod.extend(impl_result); + for builder_impl in builder_impls.clone() { + for version in versions.clone() { + let test_data = TestDataBuilder::create_empty() + .test_name(test_spec.test_name.clone()) + .metadata(test_spec.metadata.clone()) + .ignore(test_spec.ignore.clone()) + .version(version.clone()) + .imply(imp.clone()) + .builder_impl(builder_impl.clone()) + .ty(ty.clone()) + .build() + .unwrap(); + let test = test_data.generate_test(); + + let impl_str = format_ident!("{}", imp.to_lower_snake_str()); + let impl_result = quote! { + pub mod #impl_str { + use super::*; + #test + } + }; + type_mod.extend(impl_result); + } } } let ty_str = format_ident!("{}", ty.to_lower_snake_str()); From b1c5e383d0f6193908b94e367c532054b4cd9441 Mon Sep 17 00:00:00 2001 From: Toralf Wittner Date: Fri, 11 Oct 2024 13:21:57 -0600 Subject: [PATCH 1247/1393] Remove data request-response code. (#3747) Follow-up to #3740 which removes the remaining bits of data request-response code. --- .../src/traits/networking/libp2p_network.rs | 9 +-- .../src/network/behaviours/mod.rs | 3 - .../network/behaviours/request_response.rs | 66 ------------------- libp2p-networking/src/network/def.rs | 17 +---- libp2p-networking/src/network/mod.rs | 27 +------- libp2p-networking/src/network/node.rs | 38 +---------- libp2p-networking/src/network/node/handle.rs | 45 +------------ libp2p-networking/tests/counter.rs | 4 +- types/src/request_response.rs | 12 ---- 9 files changed, 7 insertions(+), 214 deletions(-) delete mode 100644 libp2p-networking/src/network/behaviours/request_response.rs diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 41b25def7f..936f905318 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -46,7 +46,6 @@ use hotshot_types::{ constants::LOOK_AHEAD, data::ViewNumber, network::NetworkConfig, - request_response::Request, traits::{ election::Membership, metrics::{Counter, Gauge, Metrics, NoMetrics}, @@ -738,9 +737,6 @@ impl Libp2pNetwork { NetworkEvent::IsBootstrapped => { error!("handle_recvd_events received `NetworkEvent::IsBootstrapped`, which should be impossible."); } - NetworkEvent::ResponseRequested(..) => { - error!("received unexpected `NetworkEvent::ResponseRequested`"); - } NetworkEvent::ConnectedPeersUpdate(_) => {} } Ok::<(), NetworkError>(()) @@ -773,10 +769,7 @@ impl Libp2pNetwork { NetworkEvent::IsBootstrapped => { is_bootstrapped.store(true, Ordering::Relaxed); } - GossipMsg(_) - | DirectRequest(_, _, _) - | DirectResponse(_, _) - | NetworkEvent::ResponseRequested(Request(_), _) => { + GossipMsg(_) | DirectRequest(_, _, _) | DirectResponse(_, _) => { let _ = handle.handle_recvd_events(message, &sender).await; } NetworkEvent::ConnectedPeersUpdate(num_peers) => { diff --git a/libp2p-networking/src/network/behaviours/mod.rs b/libp2p-networking/src/network/behaviours/mod.rs index aa40fd5f25..5b6131c139 100644 --- a/libp2p-networking/src/network/behaviours/mod.rs +++ b/libp2p-networking/src/network/behaviours/mod.rs @@ -12,6 +12,3 @@ pub mod exponential_backoff; /// Wrapper around Kademlia pub mod dht; - -/// Request Response Handling for data requests -pub mod request_response; diff --git a/libp2p-networking/src/network/behaviours/request_response.rs b/libp2p-networking/src/network/behaviours/request_response.rs deleted file mode 100644 index 82dd4dab05..0000000000 --- a/libp2p-networking/src/network/behaviours/request_response.rs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -use std::collections::HashMap; - -use futures::channel::oneshot::Sender; -use hotshot_types::request_response::{Request, Response}; -use libp2p::request_response::{Message, OutboundRequestId}; - -use crate::network::NetworkEvent; - -#[derive(Default, Debug)] -/// Handler for request response messages -pub(crate) struct RequestResponseState { - /// Map requests to the their response channels - request_map: HashMap>>, -} - -impl RequestResponseState { - /// Handles messages from the `request_response` behaviour by sending them to the application - pub fn handle_request_response( - &mut self, - event: libp2p::request_response::Event, - ) -> Option { - match event { - libp2p::request_response::Event::Message { peer: _, message } => match message { - Message::Request { - request_id: _, - request, - channel, - } => Some(NetworkEvent::ResponseRequested(request, channel)), - Message::Response { - request_id, - response, - } => { - let chan = self.request_map.remove(&request_id)?; - if chan.send(Some(response)).is_err() { - tracing::warn!("Failed to send response to client, channel closed."); - } - None - } - }, - libp2p::request_response::Event::OutboundFailure { - peer: _, - request_id, - error, - } => { - tracing::warn!("Error Sending Request {:?}", error); - let chan = self.request_map.remove(&request_id)?; - if chan.send(None).is_err() { - tracing::warn!("Failed to send response to client, channel closed."); - } - None - } - libp2p::request_response::Event::InboundFailure { .. } - | libp2p::request_response::Event::ResponseSent { .. } => None, - } - } - /// Add a requests return channel to the map of pending requests - pub fn add_request(&mut self, id: OutboundRequestId, chan: Sender>) { - self.request_map.insert(id, chan); - } -} diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 9c1871c43f..39d6bdd1b2 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -4,10 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use hotshot_types::{ - request_response::{Request, Response}, - traits::signature_key::SignatureKey, -}; +use hotshot_types::traits::signature_key::SignatureKey; use libp2p::{ autonat, gossipsub::{Behaviour as GossipBehaviour, Event as GossipEvent, IdentTopic}, @@ -50,10 +47,6 @@ pub struct NetworkDef { #[debug(skip)] pub direct_message: libp2p::request_response::cbor::Behaviour, Vec>, - /// Behaviour for requesting and receiving data - #[debug(skip)] - pub request_response: libp2p::request_response::cbor::Behaviour, - /// Auto NAT behaviour to determine if we are publically reachable and /// by which address #[debug(skip)] @@ -68,7 +61,6 @@ impl NetworkDef { dht: libp2p::kad::Behaviour>, identify: IdentifyBehaviour, direct_message: cbor::Behaviour, Vec>, - request_response: cbor::Behaviour, autonat: autonat::Behaviour, ) -> NetworkDef { Self { @@ -76,7 +68,6 @@ impl NetworkDef { dht, identify, direct_message, - request_response, autonat, } } @@ -155,12 +146,6 @@ impl From, Vec>> for NetworkEventInt } } -impl From> for NetworkEventInternal { - fn from(event: libp2p::request_response::Event) -> Self { - Self::RequestResponseEvent(event) - } -} - impl From for NetworkEventInternal { fn from(event: libp2p::autonat::Event) -> Self { Self::AutonatEvent(event) diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index ea9b5adda6..693ef1e292 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -15,11 +15,8 @@ pub mod transport; use std::{collections::HashSet, fmt::Debug}; -use futures::channel::oneshot::{self, Sender}; -use hotshot_types::{ - request_response::{Request, Response}, - traits::{network::NetworkError, signature_key::SignatureKey}, -}; +use futures::channel::oneshot::Sender; +use hotshot_types::traits::{network::NetworkError, signature_key::SignatureKey}; #[cfg(async_executor_impl = "async-std")] use libp2p::dns::async_std::Transport as DnsTransport; #[cfg(async_executor_impl = "tokio")] @@ -77,22 +74,6 @@ pub enum ClientRequest { }, /// client request to send a direct reply to a message DirectResponse(ResponseChannel>, Vec), - /// request for data from another peer - DataRequest { - /// request sent on wire - request: Request, - /// Peer to try sending the request to - peer: PeerId, - /// Send back request ID to client - chan: oneshot::Sender>, - }, - /// Respond with some data to another peer - DataResponse { - /// Data - response: Response, - /// Send back channel - chan: ResponseChannel, - }, /// prune a peer Prune(PeerId), /// add vec of known peers or addresses @@ -139,8 +120,6 @@ pub enum NetworkEvent { DirectRequest(Vec, PeerId, ResponseChannel>), /// Recv-ed a direct response from a node (that hopefully was initiated by this node) DirectResponse(Vec, PeerId), - /// A peer is asking us for data - ResponseRequested(Request, ResponseChannel), /// Report that kademlia has successfully bootstrapped into the network IsBootstrapped, /// The number of connected peers has possibly changed @@ -160,8 +139,6 @@ pub enum NetworkEventInternal { GossipEvent(Box), /// a direct message event DMEvent(libp2p::request_response::Event, Vec>), - /// a request response event - RequestResponseEvent(libp2p::request_response::Event), /// a autonat event AutonatEvent(libp2p::autonat::Event), } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index d8488e5a09..9dde578f58 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -25,9 +25,7 @@ use async_compatibility_layer::{ }; use futures::{channel::mpsc, select, FutureExt, SinkExt, StreamExt}; use hotshot_types::{ - constants::KAD_DEFAULT_REPUB_INTERVAL_SEC, - request_response::{Request, Response}, - traits::signature_key::SignatureKey, + constants::KAD_DEFAULT_REPUB_INTERVAL_SEC, traits::signature_key::SignatureKey, }; use libp2p::{ autonat, @@ -71,7 +69,6 @@ use crate::network::behaviours::{ dht::{DHTBehaviour, DHTProgress, KadPutQuery, NUM_REPLICATED_TO_TRUST}, direct_message::{DMBehaviour, DMRequest}, exponential_backoff::ExponentialBackoff, - request_response::RequestResponseState, }; /// Maximum size of a message @@ -97,8 +94,6 @@ pub struct NetworkNode { config: NetworkNodeConfig, /// the listener id we are listening on, if it exists listener_id: Option, - /// Handler for requests and response behavior events. - request_response_state: RequestResponseState, /// Handler for direct messages direct_message_state: DMBehaviour, /// Handler for DHT Events @@ -271,15 +266,6 @@ impl NetworkNode { .into_iter(), rrconfig.clone(), ); - let request_response: libp2p::request_response::cbor::Behaviour = - RequestResponse::new( - [( - StreamProtocol::new("/HotShot/request_response/1.0"), - ProtocolSupport::Full, - )] - .into_iter(), - rrconfig.clone(), - ); let autonat_config = autonat::Config { only_global_ips: false, @@ -291,7 +277,6 @@ impl NetworkNode { kadem, identify, direct_message, - request_response, autonat::Behaviour::new(peer_id, autonat_config), ); @@ -321,7 +306,6 @@ impl NetworkNode { swarm, config: config.clone(), listener_id: None, - request_response_state: RequestResponseState::default(), direct_message_state: DMBehaviour::default(), dht_handler: DHTBehaviour::new( peer_id, @@ -480,23 +464,6 @@ impl NetworkNode { ClientRequest::DirectResponse(chan, msg) => { behaviour.add_direct_response(chan, msg); } - ClientRequest::DataRequest { - request, - peer, - chan, - } => { - let id = behaviour.request_response.send_request(&peer, request); - self.request_response_state.add_request(id, chan); - } - ClientRequest::DataResponse { response, chan } => { - if behaviour - .request_response - .send_response(chan, response) - .is_err() - { - debug!("Data response dropped because client is no longer connected"); - } - } ClientRequest::AddKnownPeers(peers) => { self.add_known_peers(&peers); } @@ -655,9 +622,6 @@ impl NetworkNode { NetworkEventInternal::DMEvent(e) => self .direct_message_state .handle_dm_event(e, self.resend_tx.clone()), - NetworkEventInternal::RequestResponseEvent(e) => { - self.request_response_state.handle_request_response(e) - } NetworkEventInternal::AutonatEvent(e) => { match e { autonat::Event::InboundProbe(_) => {} diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 5e19c4c2fb..3ec68665f2 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -10,11 +10,7 @@ use async_compatibility_layer::{ art::{async_sleep, async_timeout}, channel::{Receiver, UnboundedReceiver, UnboundedSender}, }; -use futures::channel::oneshot; -use hotshot_types::{ - request_response::{Request, Response}, - traits::{network::NetworkError, signature_key::SignatureKey}, -}; +use hotshot_types::traits::{network::NetworkError, signature_key::SignatureKey}; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; use tracing::{debug, info, instrument}; @@ -184,45 +180,6 @@ impl NetworkNodeHandle { Ok(()) } - /// Request another peer for some data we want. Returns the id of the request - /// - /// # Errors - /// - /// Will return a networking error if the channel closes before the result - /// can be sent back - pub async fn request_data( - &self, - request: &[u8], - peer: PeerId, - ) -> Result, NetworkError> { - let (tx, rx) = oneshot::channel(); - let req = ClientRequest::DataRequest { - request: Request(request.to_vec()), - peer, - chan: tx, - }; - - self.send_request(req).await?; - - rx.await - .map_err(|e| NetworkError::ChannelReceiveError(e.to_string())) - } - - /// Send a response to a request with the response channel - /// # Errors - /// Will error if the client request channel is closed, or serialization fails. - pub async fn respond_data( - &self, - response: Vec, - chan: ResponseChannel, - ) -> Result<(), NetworkError> { - let req = ClientRequest::DataResponse { - response: Response(response), - chan, - }; - self.send_request(req).await - } - /// Look up a peer's addresses in kademlia /// NOTE: this should always be called before any `request_response` is initiated /// # Errors diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index f6e8144ac8..6e1214c547 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -81,9 +81,7 @@ pub async fn counter_handle_network_event( use CounterMessage::*; use NetworkEvent::*; match event { - IsBootstrapped - | NetworkEvent::ResponseRequested(..) - | NetworkEvent::ConnectedPeersUpdate(..) => {} + IsBootstrapped | NetworkEvent::ConnectedPeersUpdate(..) => {} GossipMsg(m) | DirectResponse(m, _) => { if let Ok(msg) = bincode::deserialize::(&m) { match msg { diff --git a/types/src/request_response.rs b/types/src/request_response.rs index 156008fed6..d27cc27313 100644 --- a/types/src/request_response.rs +++ b/types/src/request_response.rs @@ -12,18 +12,6 @@ use serde::{Deserialize, Serialize}; use crate::traits::{node_implementation::NodeType, signature_key::SignatureKey}; -/// Request for Consenus data -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Request(#[serde(with = "serde_bytes")] pub Vec); - -/// Response for some VID data that we already collected -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Response( - /// Data - #[serde(with = "serde_bytes")] - pub Vec, -); - #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] /// A signed request for a proposal. pub struct ProposalRequestPayload { From a9cb0ca6201024b762d0b9a7eaaaa4a2d276ee3c Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Fri, 11 Oct 2024 14:04:10 -0600 Subject: [PATCH 1248/1393] remove consensus, extraneous parameter for update_view, repoint deps (#3755) --- hotshot/Cargo.toml | 1 - hotshot/src/lib.rs | 2 - hotshot/src/tasks/mod.rs | 7 - hotshot/src/tasks/task_state.rs | 46 +- orchestrator/src/client.rs | 6 +- orchestrator/src/lib.rs | 4 +- task-impls/Cargo.toml | 1 - task-impls/src/consensus/handlers.rs | 891 ------------------ task-impls/src/consensus/mod.rs | 744 --------------- task-impls/src/helpers.rs | 29 +- task-impls/src/lib.rs | 3 - .../src/quorum_proposal_recv/handlers.rs | 4 +- task-impls/src/quorum_proposal_recv/mod.rs | 1 - testing/Cargo.toml | 1 - testing/src/predicates/event.rs | 53 ++ testing/src/predicates/mod.rs | 4 - .../src/predicates/upgrade_with_consensus.rs | 61 -- testing/src/view_generator.rs | 2 +- testing/tests/tests_1/consensus_task.rs | 698 -------------- .../tests_1/quorum_proposal_recv_task.rs | 3 - testing/tests/tests_1/quorum_proposal_task.rs | 2 - testing/tests/tests_1/quorum_vote_task.rs | 1 - testing/tests/tests_1/test_success.rs | 3 - .../tests_1/upgrade_task_with_consensus.rs | 678 ------------- .../tests_1/upgrade_task_with_proposal.rs | 2 - .../tests/tests_1/upgrade_task_with_vote.rs | 3 - .../tests/tests_1/vote_dependency_handle.rs | 2 - types/src/hotshot_config_file.rs | 9 +- types/src/network.rs | 13 +- types/src/validator_config.rs | 3 +- 30 files changed, 87 insertions(+), 3190 deletions(-) delete mode 100644 task-impls/src/consensus/handlers.rs delete mode 100644 task-impls/src/consensus/mod.rs delete mode 100644 testing/src/predicates/upgrade_with_consensus.rs delete mode 100644 testing/tests/tests_1/consensus_task.rs delete mode 100644 testing/tests/tests_1/upgrade_task_with_consensus.rs diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 3b00f7e0a9..874e94939d 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -11,7 +11,6 @@ rust-version = { workspace = true } default = ["docs", "doc-images"] example-upgrade = ["hotshot-task-impls/example-upgrade"] gpu-vid = ["hotshot-task-impls/gpu-vid"] -dependency-tasks = ["hotshot-task-impls/dependency-tasks"] rewind = ["hotshot-task-impls/rewind"] # Features required for binaries diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index fa85394aeb..eb63cb630b 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -350,7 +350,6 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, } { - #![cfg(not(feature = "dependency-tasks"))] - use hotshot_task_impls::consensus::ConsensusTaskState; - - handle.add_task(ConsensusTaskState::::create_from(handle).await); - } - { - #![cfg(feature = "dependency-tasks")] use hotshot_task_impls::{ consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index c1a10f4646..1946912cd9 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -13,11 +13,11 @@ use async_compatibility_layer::art::async_spawn; use async_trait::async_trait; use chrono::Utc; use hotshot_task_impls::{ - builder::BuilderClient, consensus::ConsensusTaskState, consensus2::Consensus2TaskState, - da::DaTaskState, quorum_proposal::QuorumProposalTaskState, - quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, - request::NetworkRequestState, rewind::RewindTaskState, transactions::TransactionTaskState, - upgrade::UpgradeTaskState, vid::VidTaskState, view_sync::ViewSyncTaskState, + builder::BuilderClient, consensus2::Consensus2TaskState, da::DaTaskState, + quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, + quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, rewind::RewindTaskState, + transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, + view_sync::ViewSyncTaskState, }; use hotshot_types::{ consensus::OuterConsensus, @@ -214,42 +214,6 @@ impl, V: Versions> CreateTaskState } } -#[async_trait] -impl, V: Versions> CreateTaskState - for ConsensusTaskState -{ - async fn create_from(handle: &SystemContextHandle) -> Self { - let consensus = handle.hotshot.consensus(); - - Self { - consensus: OuterConsensus::new(consensus), - instance_state: handle.hotshot.instance_state(), - timeout: handle.hotshot.config.next_view_timeout, - round_start_delay: handle.hotshot.config.round_start_delay, - cur_view: handle.cur_view().await, - cur_view_time: Utc::now().timestamp(), - payload_commitment_and_metadata: None, - vote_collectors: BTreeMap::default(), - timeout_vote_collectors: BTreeMap::default(), - timeout_task: async_spawn(async {}), - spawned_tasks: BTreeMap::new(), - formed_upgrade_certificate: None, - proposal_cert: None, - output_event_stream: handle.hotshot.external_event_stream.0.clone(), - current_proposal: None, - id: handle.hotshot.id, - public_key: handle.public_key().clone(), - private_key: handle.private_key().clone(), - network: Arc::clone(&handle.hotshot.network), - timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - da_membership: handle.hotshot.memberships.da_membership.clone().into(), - storage: Arc::clone(&handle.storage), - upgrade_lock: handle.hotshot.upgrade_lock.clone(), - } - } -} - #[async_trait] impl, V: Versions> CreateTaskState for QuorumVoteTaskState diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index afe75d9399..bac62ecdd7 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -6,7 +6,6 @@ use std::{net::SocketAddr, time::Duration}; -use crate::OrchestratorVersion; use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; @@ -18,10 +17,11 @@ use hotshot_types::{ use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; -use tracing::info; -use tracing::instrument; +use tracing::{info, instrument}; use vbs::BinarySerializer; +use crate::OrchestratorVersion; + /// Holds the client connection to the orchestrator pub struct OrchestratorClient { /// the client diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index f5a019084a..4126961222 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -22,7 +22,7 @@ use client::{BenchResults, BenchResultsDownloadConfig}; use csv::Writer; use futures::{stream::FuturesUnordered, FutureExt, StreamExt}; use hotshot_types::{ - network::BuilderType, + network::{BuilderType, NetworkConfig, PublicKeysFile}, traits::signature_key::{SignatureKey, StakeTableEntryType}, PeerConfig, }; @@ -45,8 +45,6 @@ use vbs::{ BinarySerializer, }; -use hotshot_types::network::{NetworkConfig, PublicKeysFile}; - /// Orchestrator is not, strictly speaking, bound to the network; it can have its own versioning. /// Orchestrator Version (major) pub const ORCHESTRATOR_MAJOR_VERSION: u16 = 0; diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 71fb1013f5..2492438ea4 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -8,7 +8,6 @@ version = { workspace = true } [features] example-upgrade = [] gpu-vid = ["hotshot-types/gpu-vid"] -dependency-tasks = [] rewind = [] test-srs = ["jf-vid/test-srs"] diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs deleted file mode 100644 index 23e3459d27..0000000000 --- a/task-impls/src/consensus/handlers.rs +++ /dev/null @@ -1,891 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -use anyhow::{bail, ensure, Context, Result}; -use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn}; -use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; -use chrono::Utc; -use core::time::Duration; -use futures::FutureExt; -use hotshot_types::{ - consensus::{CommitmentAndMetadata, OuterConsensus, View}, - data::{null_block, Leaf, QuorumProposal, ViewChangeEvidence}, - event::{Event, EventType}, - message::{GeneralConsensusMessage, Proposal}, - simple_certificate::UpgradeCertificate, - simple_vote::QuorumData, - traits::{ - block_contents::BlockHeader, - election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, - signature_key::SignatureKey, - states::ValidatedState, - storage::Storage, - }, - utils::ViewInner, - vote::{Certificate, HasViewNumber}, -}; -use std::{marker::PhantomData, sync::Arc}; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; -use vbs::version::StaticVersionType; - -use super::ConsensusTaskState; -use crate::{ - consensus::{UpgradeLock, Versions}, - events::HotShotEvent, - helpers::{ - broadcast_event, decide_from_proposal, fetch_proposal, parent_leaf_and_state, update_view, - validate_proposal_safety_and_liveness, validate_proposal_view_and_certs, AnyhowTracing, - SEND_VIEW_CHANGE_EVENT, - }, -}; - -/// Create the header for a proposal, build the proposal, and broadcast -/// the proposal send evnet. -#[allow(clippy::too_many_arguments)] -#[instrument(skip_all, fields(id = id, view = *view))] -pub async fn create_and_send_proposal( - public_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: OuterConsensus, - event_stream: Sender>>, - view: TYPES::Time, - commitment_and_metadata: CommitmentAndMetadata, - parent_leaf: Leaf, - state: Arc, - upgrade_cert: Option>, - proposal_cert: Option>, - round_start_delay: u64, - instance_state: Arc, - upgrade_lock: UpgradeLock, - id: u64, -) -> Result<()> { - let consensus_read = consensus.read().await; - let vid_share = consensus_read - .vid_shares() - .get(&view) - .map(|shares| shares.get(&public_key).cloned()) - .context(format!( - "Cannot propopse without our VID share, view {view:?}" - ))? - .context("Failed to get vid share")?; - drop(consensus_read); - - let version = upgrade_lock - .version(view) - .await - .context("Failed to get version number")?; - - let block_header = if version < V::Marketplace::VERSION { - TYPES::BlockHeader::new_legacy( - state.as_ref(), - instance_state.as_ref(), - &parent_leaf, - commitment_and_metadata.commitment, - commitment_and_metadata.builder_commitment, - commitment_and_metadata.metadata, - commitment_and_metadata.fees.first().clone(), - vid_share.data.common, - version, - ) - .await - .context("Failed to construct legacy block header")? - } else { - TYPES::BlockHeader::new_marketplace( - state.as_ref(), - instance_state.as_ref(), - &parent_leaf, - commitment_and_metadata.commitment, - commitment_and_metadata.builder_commitment, - commitment_and_metadata.metadata, - commitment_and_metadata.fees.to_vec(), - vid_share.data.common, - commitment_and_metadata.auction_result, - version, - ) - .await - .context("Failed to construct marketplace block header")? - }; - - let proposal = QuorumProposal { - block_header, - view_number: view, - justify_qc: consensus.read().await.high_qc().clone(), - proposal_certificate: proposal_cert, - upgrade_certificate: upgrade_cert, - }; - - let proposed_leaf = Leaf::from_quorum_proposal(&proposal); - - ensure!(proposed_leaf.parent_commitment() == parent_leaf.commit(&upgrade_lock).await); - - let signature = TYPES::SignatureKey::sign( - &private_key, - proposed_leaf.commit(&upgrade_lock).await.as_ref(), - )?; - - let message = Proposal { - data: proposal, - signature, - _pd: PhantomData, - }; - - debug!( - "Sending proposal for view {:?} ID: {}", - proposed_leaf.view_number(), - id, - ); - - async_sleep(Duration::from_millis(round_start_delay)).await; - - broadcast_event( - Arc::new(HotShotEvent::QuorumProposalSend( - message.clone(), - public_key, - )), - &event_stream, - ) - .await; - - Ok(()) -} - -/// Send a proposal for the view `view` from the latest high_qc given an upgrade cert. This is the -/// standard case proposal scenario. -#[allow(clippy::too_many_arguments)] -#[instrument(skip_all)] -pub async fn publish_proposal_from_commitment_and_metadata( - view: TYPES::Time, - sender: Sender>>, - receiver: Receiver>>, - quorum_membership: Arc, - public_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: OuterConsensus, - delay: u64, - formed_upgrade_certificate: Option>, - upgrade_lock: UpgradeLock, - commitment_and_metadata: Option>, - proposal_cert: Option>, - instance_state: Arc, - id: u64, -) -> Result> { - let (parent_leaf, state) = parent_leaf_and_state( - view, - &sender, - &receiver, - quorum_membership, - public_key.clone(), - private_key.clone(), - OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), - &upgrade_lock, - ) - .await?; - - // In order of priority, we should try to attach: - // - the parent certificate if it exists, or - // - our own certificate that we formed. - // In either case, we need to ensure that the certificate is still relevant. - // - // Note: once we reach a point of potentially propose with our formed upgrade certificate, we will ALWAYS drop it. If we cannot immediately use it for whatever reason, we choose to discard it. - // It is possible that multiple nodes form separate upgrade certificates for the some upgrade if we are not careful about voting. But this shouldn't bother us: the first leader to propose is the one whose certificate will be used. And if that fails to reach a decide for whatever reason, we may lose our own certificate, but something will likely have gone wrong there anyway. - let mut proposal_upgrade_certificate = parent_leaf - .upgrade_certificate() - .or(formed_upgrade_certificate); - - if let Some(cert) = proposal_upgrade_certificate.clone() { - if cert - .is_relevant(view, Arc::clone(&upgrade_lock.decided_upgrade_certificate)) - .await - .is_err() - { - proposal_upgrade_certificate = None; - } - } - - // We only want to proposal to be attached if any of them are valid. - let proposal_certificate = proposal_cert - .as_ref() - .filter(|cert| cert.is_valid_for_view(&view)) - .cloned(); - - ensure!( - commitment_and_metadata.is_some(), - "Cannot propose because we don't have the VID payload commitment and metadata" - ); - - // This is a safe unwrap due to the prior ensure call. - let commitment_and_metadata = commitment_and_metadata.unwrap(); - - ensure!( - commitment_and_metadata.block_view == view, - "Cannot propose because our VID payload commitment and metadata is for an older view." - ); - - let create_and_send_proposal_handle = async_spawn(async move { - match create_and_send_proposal::( - public_key, - private_key, - OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), - sender, - view, - commitment_and_metadata, - parent_leaf.clone(), - state, - proposal_upgrade_certificate, - proposal_certificate, - delay, - instance_state, - upgrade_lock, - id, - ) - .await - { - Ok(()) => {} - Err(e) => { - tracing::error!("Failed to send proposal: {}", e); - } - }; - }); - - Ok(create_and_send_proposal_handle) -} - -/// Handle the received quorum proposal. -/// -/// Returns the proposal that should be used to set the `cur_proposal` for other tasks. -#[allow(clippy::too_many_lines)] -#[instrument(skip_all)] -pub(crate) async fn handle_quorum_proposal_recv< - TYPES: NodeType, - I: NodeImplementation, - V: Versions, ->( - proposal: &Proposal>, - sender: &TYPES::SignatureKey, - event_sender: Sender>>, - event_receiver: Receiver>>, - task_state: &mut ConsensusTaskState, -) -> Result>> { - let sender = sender.clone(); - debug!( - "Received Quorum Proposal for view {}", - *proposal.data.view_number - ); - - let cur_view = task_state.cur_view; - - validate_proposal_view_and_certs( - proposal, - task_state.cur_view, - &task_state.quorum_membership, - &task_state.timeout_membership, - &task_state.upgrade_lock, - ) - .await - .context("Failed to validate proposal view and attached certs")?; - - let view = proposal.data.view_number(); - let justify_qc = proposal.data.justify_qc.clone(); - - if !justify_qc - .is_valid_cert( - task_state.quorum_membership.as_ref(), - &task_state.upgrade_lock, - ) - .await - { - let consensus = task_state.consensus.read().await; - consensus.metrics.invalid_qc.update(1); - bail!("Invalid justify_qc in proposal for view {}", *view); - } - - // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here - if let Err(e) = update_view::( - view, - &event_sender, - task_state.timeout, - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - &mut task_state.cur_view, - &mut task_state.cur_view_time, - &mut task_state.timeout_task, - &task_state.output_event_stream, - SEND_VIEW_CHANGE_EVENT, - task_state.quorum_membership.leader(cur_view) == task_state.public_key, - ) - .await - { - debug!("Failed to update view; error = {e:#}"); - } - - let mut parent_leaf = task_state - .consensus - .read() - .await - .saved_leaves() - .get(&justify_qc.date().leaf_commit) - .cloned(); - - parent_leaf = match parent_leaf { - Some(p) => Some(p), - None => fetch_proposal( - justify_qc.view_number(), - event_sender.clone(), - event_receiver.clone(), - Arc::clone(&task_state.quorum_membership), - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - task_state.public_key.clone(), - task_state.private_key.clone(), - &task_state.upgrade_lock, - ) - .await - .ok(), - }; - let consensus_read = task_state.consensus.read().await; - - // Get the parent leaf and state. - let parent = match parent_leaf { - Some(leaf) => { - if let (Some(state), _) = consensus_read.state_and_delta(leaf.view_number()) { - Some((leaf, Arc::clone(&state))) - } else { - bail!("Parent state not found! Consensus internally inconsistent"); - } - } - None => None, - }; - - if justify_qc.view_number() > consensus_read.high_qc().view_number { - if let Err(e) = task_state - .storage - .write() - .await - .update_high_qc(justify_qc.clone()) - .await - { - bail!("Failed to store High QC not voting. Error: {:?}", e); - } - } - - drop(consensus_read); - let mut consensus_write = task_state.consensus.write().await; - - if let Err(e) = consensus_write.update_high_qc(justify_qc.clone()) { - tracing::trace!("{e:?}"); - } - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some((parent_leaf, _parent_state)) = parent else { - warn!( - "Proposal's parent missing from storage with commitment: {:?}", - justify_qc.date().leaf_commit - ); - let leaf = Leaf::from_quorum_proposal(&proposal.data); - - let state = Arc::new( - >::from_header( - &proposal.data.block_header, - ), - ); - - if let Err(e) = consensus_write.update_validated_state_map( - view, - View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(&task_state.upgrade_lock).await, - state, - delta: None, - }, - }, - ) { - tracing::trace!("{e:?}"); - } - - consensus_write - .update_saved_leaves(leaf.clone(), &task_state.upgrade_lock) - .await; - let new_leaves = consensus_write.saved_leaves().clone(); - let new_state = consensus_write.validated_state_map().clone(); - drop(consensus_write); - - if let Err(e) = task_state - .storage - .write() - .await - .update_undecided_state(new_leaves, new_state) - .await - { - warn!("Couldn't store undecided state. Error: {:?}", e); - } - - // If we are missing the parent from storage, the safety check will fail. But we can - // still vote if the liveness check succeeds. - let consensus_read = task_state.consensus.read().await; - let liveness_check = justify_qc.view_number() > consensus_read.locked_view(); - - let high_qc = consensus_read.high_qc().clone(); - let locked_view = consensus_read.locked_view(); - - drop(consensus_read); - - let mut current_proposal = None; - if liveness_check { - current_proposal = Some(proposal.data.clone()); - let new_view = proposal.data.view_number + 1; - - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = task_state.quorum_membership.leader(new_view) - == task_state.public_key - && high_qc.view_number == current_proposal.clone().unwrap().view_number; - - let qc = high_qc.clone(); - if should_propose { - debug!( - "Attempting to publish proposal after voting for liveness; now in view: {}", - *new_view - ); - let create_and_send_proposal_handle = - publish_proposal_from_commitment_and_metadata( - qc.view_number + 1, - event_sender, - event_receiver, - Arc::clone(&task_state.quorum_membership), - task_state.public_key.clone(), - task_state.private_key.clone(), - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - task_state.round_start_delay, - task_state.formed_upgrade_certificate.clone(), - task_state.upgrade_lock.clone(), - task_state.payload_commitment_and_metadata.clone(), - task_state.proposal_cert.clone(), - Arc::clone(&task_state.instance_state), - task_state.id, - ) - .await?; - - task_state - .spawned_tasks - .entry(view) - .or_default() - .push(create_and_send_proposal_handle); - } - } else { - warn!(?high_qc, ?proposal.data, ?locked_view, "Failed liveneess check; cannot find parent either."); - } - - return Ok(current_proposal); - }; - - task_state - .spawned_tasks - .entry(proposal.data.view_number()) - .or_default() - .push(async_spawn( - validate_proposal_safety_and_liveness::( - proposal.clone(), - parent_leaf, - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), - Arc::clone(&task_state.quorum_membership), - event_sender.clone(), - sender, - task_state.output_event_stream.clone(), - task_state.id, - task_state.upgrade_lock.clone(), - Arc::clone(&task_state.storage), - ) - .map(AnyhowTracing::err_as_debug), - )); - Ok(None) -} - -/// Handle `QuorumProposalValidated` event content and submit a proposal if possible. -#[allow(clippy::too_many_lines)] -#[instrument(skip_all)] -pub async fn handle_quorum_proposal_validated< - TYPES: NodeType, - I: NodeImplementation, - V: Versions, ->( - proposal: &QuorumProposal, - event_sender: Sender>>, - event_receiver: Receiver>>, - task_state: &mut ConsensusTaskState, -) -> Result<()> { - let view = proposal.view_number(); - task_state.current_proposal = Some(proposal.clone()); - - let res = decide_from_proposal( - proposal, - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), - &task_state.public_key, - ) - .await; - - if let Some(cert) = res.decided_upgrade_cert { - let mut decided_certificate_lock = task_state - .upgrade_lock - .decided_upgrade_certificate - .write() - .await; - *decided_certificate_lock = Some(cert.clone()); - drop(decided_certificate_lock); - - let _ = task_state - .storage - .write() - .await - .update_decided_upgrade_certificate(Some(cert.clone())) - .await; - } - - let mut consensus = task_state.consensus.write().await; - if let Some(new_locked_view) = res.new_locked_view_number { - if let Err(e) = consensus.update_locked_view(new_locked_view) { - tracing::trace!("{e:?}"); - } - } - - drop(consensus); - - let new_view = task_state.current_proposal.clone().unwrap().view_number + 1; - // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = task_state.quorum_membership.leader(new_view) == task_state.public_key - && task_state.consensus.read().await.high_qc().view_number - == task_state.current_proposal.clone().unwrap().view_number; - - if let Some(new_decided_view) = res.new_decided_view_number { - task_state.cancel_tasks(new_decided_view).await; - } - task_state.current_proposal = Some(proposal.clone()); - task_state - .spawn_vote_task(view, event_sender.clone(), event_receiver.clone()) - .await; - if should_propose { - debug!( - "Attempting to publish proposal after voting; now in view: {}", - *new_view - ); - if let Err(e) = task_state - .publish_proposal(new_view, event_sender.clone(), event_receiver.clone()) - .await - { - debug!("Failed to propose; error = {e:?}"); - }; - } - - #[allow(clippy::cast_precision_loss)] - if let Some(new_anchor_view) = res.new_decided_view_number { - let block_size = res.included_txns.map(|set| set.len().try_into().unwrap()); - let decide_sent = broadcast_event( - Event { - view_number: new_anchor_view, - event: EventType::Decide { - leaf_chain: Arc::new(res.leaf_views), - qc: Arc::new(res.new_decide_qc.unwrap()), - block_size, - }, - }, - &task_state.output_event_stream, - ); - let mut consensus = task_state.consensus.write().await; - - let old_anchor_view = consensus.last_decided_view(); - consensus.collect_garbage(old_anchor_view, new_anchor_view); - if let Err(e) = consensus.update_last_decided_view(new_anchor_view) { - tracing::trace!("{e:?}"); - } - consensus - .metrics - .last_decided_time - .set(Utc::now().timestamp().try_into().unwrap()); - consensus.metrics.invalid_qc.set(0); - consensus - .metrics - .last_decided_view - .set(usize::try_from(consensus.last_decided_view().u64()).unwrap()); - let cur_number_of_views_per_decide_event = - *task_state.cur_view - consensus.last_decided_view().u64(); - consensus - .metrics - .number_of_views_per_decide_event - .add_point(cur_number_of_views_per_decide_event as f64); - - debug!( - "Sending Decide for view {:?}", - consensus.last_decided_view() - ); - drop(consensus); - debug!("Decided txns len {:?}", block_size); - decide_sent.await; - broadcast_event( - Arc::new(HotShotEvent::LeafDecided(res.leaves_decided)), - &event_sender, - ) - .await; - debug!("decide send succeeded"); - } - - Ok(()) -} - -/// Private key, latest decided upgrade certificate, committee membership, and event stream, for -/// sending the vote. -pub(crate) struct VoteInfo { - /// The private key of the voting node. - pub private_key: <::SignatureKey as SignatureKey>::PrivateKey, - - /// The locked upgrade of the voting node. - pub upgrade_lock: UpgradeLock, - - /// The DA Membership handle - pub da_membership: Arc<::Membership>, - - /// The event sending stream. - pub event_sender: Sender>>, - - /// The event receiver stream. - pub event_receiver: Receiver>>, -} - -#[allow(clippy::too_many_arguments)] -#[allow(clippy::too_many_lines)] -#[allow(unused_variables)] -/// Check if we are able to vote, like whether the proposal is valid, -/// whether we have DAC and VID share, and if so, vote. -#[instrument(skip_all, fields(id = id, view = *cur_view))] -pub async fn update_state_and_vote_if_able< - TYPES: NodeType, - I: NodeImplementation, - V: Versions, ->( - cur_view: TYPES::Time, - proposal: QuorumProposal, - public_key: TYPES::SignatureKey, - private_key: ::PrivateKey, - consensus: OuterConsensus, - storage: Arc>, - quorum_membership: Arc, - instance_state: Arc, - vote_info: VoteInfo, - id: u64, - upgrade_lock: &UpgradeLock, -) -> bool { - use hotshot_types::simple_vote::QuorumVote; - - if !quorum_membership.has_stake(&public_key) { - debug!("We were not chosen for quorum committee on {:?}", cur_view); - return false; - } - - let read_consnesus = consensus.read().await; - // Only vote if you has seen the VID share for this view - let Some(vid_shares) = read_consnesus.vid_shares().get(&proposal.view_number) else { - debug!( - "We have not seen the VID share for this view {:?} yet, so we cannot vote.", - proposal.view_number - ); - return false; - }; - let Some(vid_share) = vid_shares.get(&public_key).cloned() else { - debug!("we have not seen our VID share yet"); - return false; - }; - - if let Some(upgrade_cert) = &vote_info - .upgrade_lock - .decided_upgrade_certificate - .read() - .await - .clone() - { - if upgrade_cert.upgrading_in(cur_view) - && Some(proposal.block_header.payload_commitment()) - != null_block::commitment(quorum_membership.total_nodes()) - { - info!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(quorum_membership.total_nodes()), Some(proposal.block_header.payload_commitment())); - return false; - } - } - - // Only vote if you have the DA cert - // ED Need to update the view number this is stored under? - let Some(cert) = read_consnesus.saved_da_certs().get(&cur_view).cloned() else { - return false; - }; - drop(read_consnesus); - - let view = cert.view_number; - // TODO: do some of this logic without the vote token check, only do that when voting. - let justify_qc = proposal.justify_qc.clone(); - let mut parent = consensus - .read() - .await - .saved_leaves() - .get(&justify_qc.date().leaf_commit) - .cloned(); - parent = match parent { - Some(p) => Some(p), - None => fetch_proposal( - justify_qc.view_number(), - vote_info.event_sender.clone(), - vote_info.event_receiver.clone(), - Arc::clone(&quorum_membership), - OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), - public_key.clone(), - private_key.clone(), - upgrade_lock, - ) - .await - .ok(), - }; - - let read_consnesus = consensus.read().await; - - // Justify qc's leaf commitment is not the same as the parent's leaf commitment, but it should be (in this case) - let Some(parent) = parent else { - error!( - "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.date().leaf_commit, - proposal.view_number, - ); - return false; - }; - let (Some(parent_state), _) = read_consnesus.state_and_delta(parent.view_number()) else { - warn!("Parent state not found! Consensus internally inconsistent"); - return false; - }; - drop(read_consnesus); - - let version = match vote_info.upgrade_lock.version(view).await { - Ok(version) => version, - Err(e) => { - error!("Failed to calculate the version: {e:?}"); - return false; - } - }; - let Ok((validated_state, state_delta)) = parent_state - .validate_and_apply_header( - instance_state.as_ref(), - &parent, - &proposal.block_header.clone(), - vid_share.data.common.clone(), - version, - ) - .await - else { - warn!("Block header doesn't extend the proposal!"); - return false; - }; - - let state = Arc::new(validated_state); - let delta = Arc::new(state_delta); - let parent_commitment = parent.commit(upgrade_lock).await; - - let proposed_leaf = Leaf::from_quorum_proposal(&proposal); - if proposed_leaf.parent_commitment() != parent_commitment { - return false; - } - - // Validate the DAC. - let message = if cert - .is_valid_cert(vote_info.da_membership.as_ref(), upgrade_lock) - .await - { - // Validate the block payload commitment for non-genesis DAC. - if cert.date().payload_commit != proposal.block_header.payload_commitment() { - warn!( - "Block payload commitment does not equal da cert payload commitment. View = {}", - *view - ); - return false; - } - if let Ok(vote) = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: proposed_leaf.commit(upgrade_lock).await, - }, - view, - &public_key, - &vote_info.private_key, - &vote_info.upgrade_lock, - ) - .await - { - GeneralConsensusMessage::::Vote(vote) - } else { - error!("Unable to sign quorum vote!"); - return false; - } - } else { - error!( - "Invalid DAC in proposal! Skipping proposal. {:?} cur view is: {:?}", - cert, cur_view - ); - return false; - }; - - let mut consensus = consensus.write().await; - if let Err(e) = consensus.update_validated_state_map( - cur_view, - View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(upgrade_lock).await, - state: Arc::clone(&state), - delta: Some(Arc::clone(&delta)), - }, - }, - ) { - tracing::trace!("{e:?}"); - } - consensus - .update_saved_leaves(proposed_leaf.clone(), upgrade_lock) - .await; - let new_leaves = consensus.saved_leaves().clone(); - let new_state = consensus.validated_state_map().clone(); - drop(consensus); - - if let Err(e) = storage - .write() - .await - .update_undecided_state(new_leaves, new_state) - .await - { - error!("Couldn't store undecided state. Error: {:?}", e); - } - - if let GeneralConsensusMessage::Vote(vote) = message { - debug!( - "Sending vote to next quorum leader {:?}", - vote.view_number() + 1 - ); - // Add to the storage that we have received the VID disperse for a specific view - if let Err(e) = storage.write().await.append_vid(&vid_share).await { - warn!( - "Failed to store VID Disperse Proposal with error {:?}, aborting vote", - e - ); - return false; - } - broadcast_event( - Arc::new(HotShotEvent::QuorumVoteSend(vote)), - &vote_info.event_sender, - ) - .await; - return true; - } - debug!( - "Received VID share, but couldn't find DAC cert for view {:?}", - *proposal.view_number(), - ); - false -} diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs deleted file mode 100644 index b8fad36a08..0000000000 --- a/task-impls/src/consensus/mod.rs +++ /dev/null @@ -1,744 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -use std::{collections::BTreeMap, sync::Arc}; - -use anyhow::Result; -use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; -use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; -use async_trait::async_trait; -use futures::future::join_all; -use handlers::publish_proposal_from_commitment_and_metadata; -use hotshot_task::task::TaskState; -use hotshot_types::{ - consensus::{CommitmentAndMetadata, OuterConsensus}, - data::{QuorumProposal, VidDisperseShare, ViewChangeEvidence}, - event::{Event, EventType}, - message::{Proposal, UpgradeLock}, - simple_certificate::{QuorumCertificate, TimeoutCertificate, UpgradeCertificate}, - simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, - traits::{ - election::Membership, - node_implementation::{NodeImplementation, NodeType, Versions}, - signature_key::SignatureKey, - storage::Storage, - }, - vid::vid_scheme, - vote::{Certificate, HasViewNumber}, -}; -use jf_vid::VidScheme; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; - -use crate::{ - consensus::handlers::{ - handle_quorum_proposal_recv, handle_quorum_proposal_validated, - update_state_and_vote_if_able, VoteInfo, - }, - events::HotShotEvent, - helpers::{broadcast_event, cancel_task, update_view, DONT_SEND_VIEW_CHANGE_EVENT}, - vote_collection::{handle_vote, VoteCollectorsMap}, -}; - -/// Helper functions to handle proposal-related functionality. -pub(crate) mod handlers; - -/// The state for the consensus task. Contains all of the information for the implementation -/// of consensus -pub struct ConsensusTaskState, V: Versions> { - /// Our public key - pub public_key: TYPES::SignatureKey, - /// Our Private Key - pub private_key: ::PrivateKey, - /// Reference to consensus. The replica will require a write lock on this. - pub consensus: OuterConsensus, - /// Immutable instance state - pub instance_state: Arc, - /// View timeout from config. - pub timeout: u64, - /// Round start delay from config, in milliseconds. - pub round_start_delay: u64, - /// View number this view is executing in. - pub cur_view: TYPES::Time, - - /// Timestamp this view starts at. - pub cur_view_time: i64, - - /// The commitment to the current block payload and its metadata submitted to DA. - pub payload_commitment_and_metadata: Option>, - - /// The underlying network - pub network: Arc, - - /// Membership for Timeout votes/certs - pub timeout_membership: Arc, - - /// Membership for Quorum Certs/votes - pub quorum_membership: Arc, - - /// Membership for DA committee Votes/certs - pub da_membership: Arc, - - /// A map of `QuorumVote` collector tasks. - pub vote_collectors: VoteCollectorsMap, QuorumCertificate, V>, - - /// A map of `TimeoutVote` collector tasks. - pub timeout_vote_collectors: - VoteCollectorsMap, TimeoutCertificate, V>, - - /// timeout task handle - pub timeout_task: JoinHandle<()>, - - /// Spawned tasks related to a specific view, so we can cancel them when - /// they are stale - pub spawned_tasks: BTreeMap>>, - - /// The most recent upgrade certificate this node formed. - /// Note: this is ONLY for certificates that have been formed internally, - /// so that we can propose with them. - /// - /// Certificates received from other nodes will get reattached regardless of this fields, - /// since they will be present in the leaf we propose off of. - pub formed_upgrade_certificate: Option>, - - /// last View Sync Certificate or Timeout Certificate this node formed. - pub proposal_cert: Option>, - - /// Output events to application - pub output_event_stream: async_broadcast::Sender>, - - /// The most recent proposal we have, will correspond to the current view if Some() - /// Will be none if the view advanced through timeout/view_sync - pub current_proposal: Option>, - - // ED Should replace this with config information since we need it anyway - /// The node's id - pub id: u64, - - /// This node's storage ref - pub storage: Arc>, - - /// Lock for a decided upgrade - pub upgrade_lock: UpgradeLock, -} - -impl, V: Versions> ConsensusTaskState { - /// Cancel all tasks the consensus tasks has spawned before the given view - pub async fn cancel_tasks(&mut self, view: TYPES::Time) { - let keep = self.spawned_tasks.split_off(&view); - let mut cancel = Vec::new(); - while let Some((_, tasks)) = self.spawned_tasks.pop_first() { - let mut to_cancel = tasks.into_iter().map(cancel_task).collect(); - cancel.append(&mut to_cancel); - } - self.spawned_tasks = keep; - join_all(cancel).await; - } - - /// Validate the VID disperse is correctly signed and has the correct share. - fn validate_disperse( - &self, - sender: &TYPES::SignatureKey, - disperse: &Proposal>, - ) -> bool { - let view = disperse.data.view_number(); - let payload_commitment = disperse.data.payload_commitment; - - // Check sender of VID disperse share is signed by DA committee member - let validate_sender = sender.validate(&disperse.signature, payload_commitment.as_ref()) - && self.da_membership.committee_members(view).contains(sender); - - // Check whether the data satisfies one of the following. - // * From the right leader for this view. - // * Calculated and signed by the current node. - let validated = self - .public_key - .validate(&disperse.signature, payload_commitment.as_ref()) - || self - .quorum_membership - .leader(view) - .validate(&disperse.signature, payload_commitment.as_ref()); - if !validate_sender && !validated { - return false; - } - - // Validate the VID share. - // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner - // and outer results - matches!( - vid_scheme(self.quorum_membership.total_nodes()).verify_share( - &disperse.data.share, - &disperse.data.common, - &payload_commitment, - ), - Ok(Ok(())) - ) - } - - /// Publishes a proposal - #[instrument(skip_all, target = "ConsensusTaskState", fields(id = self.id, view = *self.cur_view))] - async fn publish_proposal( - &mut self, - view: TYPES::Time, - event_sender: Sender>>, - event_receiver: Receiver>>, - ) -> Result<()> { - let create_and_send_proposal_handle = publish_proposal_from_commitment_and_metadata( - view, - event_sender, - event_receiver, - Arc::clone(&self.quorum_membership), - self.public_key.clone(), - self.private_key.clone(), - OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), - self.round_start_delay, - self.formed_upgrade_certificate.clone(), - self.upgrade_lock.clone(), - self.payload_commitment_and_metadata.clone(), - self.proposal_cert.clone(), - Arc::clone(&self.instance_state), - self.id, - ) - .await?; - - self.spawned_tasks - .entry(view) - .or_default() - .push(create_and_send_proposal_handle); - - Ok(()) - } - - /// Spawn a vote task for the given view. Will try to vote - /// and emit a `QuorumVoteSend` event we should vote on the current proposal - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), target = "ConsensusTaskState")] - async fn spawn_vote_task( - &mut self, - view: TYPES::Time, - event_sender: Sender>>, - event_receiver: Receiver>>, - ) { - let Some(proposal) = self.current_proposal.clone() else { - return; - }; - if proposal.view_number() != view { - return; - } - let upgrade = self.upgrade_lock.clone(); - let pub_key = self.public_key.clone(); - let priv_key = self.private_key.clone(); - let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); - let storage = Arc::clone(&self.storage); - let quorum_mem = Arc::clone(&self.quorum_membership); - let da_mem = Arc::clone(&self.da_membership); - let instance_state = Arc::clone(&self.instance_state); - let id = self.id; - let handle = async_spawn(async move { - let upgrade_lock = upgrade.clone(); - update_state_and_vote_if_able::( - view, - proposal, - pub_key, - priv_key.clone(), - consensus, - storage, - quorum_mem, - instance_state, - VoteInfo { - private_key: priv_key, - upgrade_lock: upgrade, - da_membership: da_mem, - event_sender, - event_receiver, - }, - id, - &upgrade_lock, - ) - .await; - }); - self.spawned_tasks.entry(view).or_default().push(handle); - } - - /// Handles a consensus event received on the event stream - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] - pub async fn handle( - &mut self, - event: Arc>, - event_sender: Sender>>, - event_receiver: Receiver>>, - ) { - match event.as_ref() { - HotShotEvent::QuorumProposalRecv(proposal, sender) => { - debug!("proposal recv view: {:?}", proposal.data.view_number()); - match handle_quorum_proposal_recv( - proposal, - sender, - event_sender.clone(), - event_receiver.clone(), - self, - ) - .await - { - Ok(Some(current_proposal)) => { - let view = current_proposal.view_number(); - self.current_proposal = Some(current_proposal); - self.spawn_vote_task(view, event_sender, event_receiver) - .await; - } - Ok(None) => {} - Err(e) => debug!("Failed to propose {e:#}"), - } - } - HotShotEvent::QuorumProposalValidated(proposal, _) => { - debug!("proposal validated view: {:?}", proposal.view_number()); - if let Err(e) = handle_quorum_proposal_validated( - proposal, - event_sender.clone(), - event_receiver.clone(), - self, - ) - .await - { - warn!("Failed to handle QuorumProposalValidated event {e:#}"); - } - } - HotShotEvent::QuorumVoteRecv(ref vote) => { - debug!("Received quorum vote: {:?}", vote.view_number()); - if self.quorum_membership.leader(vote.view_number() + 1) != self.public_key { - error!( - "We are not the leader for view {} are we the leader for view + 1? {}", - *vote.view_number() + 1, - self.quorum_membership.leader(vote.view_number() + 2) == self.public_key - ); - return; - } - - handle_vote( - &mut self.vote_collectors, - vote, - self.public_key.clone(), - &self.quorum_membership, - self.id, - &event, - &event_sender, - &self.upgrade_lock, - ) - .await; - } - HotShotEvent::TimeoutVoteRecv(ref vote) => { - if self.timeout_membership.leader(vote.view_number() + 1) != self.public_key { - error!( - "We are not the leader for view {} are we the leader for view + 1? {}", - *vote.view_number() + 1, - self.timeout_membership.leader(vote.view_number() + 2) == self.public_key - ); - return; - } - - handle_vote( - &mut self.timeout_vote_collectors, - vote, - self.public_key.clone(), - &self.quorum_membership, - self.id, - &event, - &event_sender, - &self.upgrade_lock, - ) - .await; - } - HotShotEvent::QcFormed(cert) => match cert { - either::Right(qc) => { - self.proposal_cert = Some(ViewChangeEvidence::Timeout(qc.clone())); - - debug!( - "Attempting to publish proposal after forming a TC for view {}", - *qc.view_number - ); - - if let Err(e) = self - .publish_proposal(qc.view_number + 1, event_sender, event_receiver) - .await - { - debug!("Failed to propose; error = {e:?}"); - }; - } - either::Left(qc) => { - if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await { - error!("Failed to store High QC of QC we formed. Error: {:?}", e); - } - - if let Err(e) = self.consensus.write().await.update_high_qc(qc.clone()) { - tracing::trace!("{e:?}"); - } - debug!( - "Attempting to publish proposal after forming a QC for view {}", - *qc.view_number - ); - - if let Err(e) = self - .publish_proposal(qc.view_number + 1, event_sender, event_receiver) - .await - { - debug!("Failed to propose; error = {e:?}"); - }; - } - }, - #[cfg(not(feature = "dependency-tasks"))] - HotShotEvent::UpgradeCertificateFormed(cert) => { - debug!( - "Upgrade certificate received for view {}!", - *cert.view_number - ); - - // Update our current upgrade_cert as long as we still have a chance of reaching a decide on it in time. - if cert.data.decide_by >= self.cur_view + 3 { - debug!("Updating current formed_upgrade_certificate"); - - self.formed_upgrade_certificate = Some(cert.clone()); - } - } - HotShotEvent::DaCertificateRecv(cert) => { - debug!("DAC Received for view {}!", *cert.view_number); - let view = cert.view_number; - - self.consensus - .write() - .await - .update_saved_da_certs(view, cert.clone()); - let Some(proposal) = self.current_proposal.clone() else { - return; - }; - if proposal.view_number() != view { - return; - } - self.spawn_vote_task(view, event_sender, event_receiver) - .await; - } - HotShotEvent::VidShareRecv(sender, disperse) => { - let view = disperse.data.view_number(); - - debug!( - "VID disperse received for view: {:?} in consensus task", - view - ); - - // Allow VID disperse date that is one view older, in case we have updated the - // view. - // Adding `+ 1` on the LHS rather than `- 1` on the RHS, to avoid the overflow - // error due to subtracting the genesis view number. - if view + 1 < self.cur_view { - info!("Throwing away VID disperse data that is more than one view older"); - return; - } - - debug!("VID disperse data is not more than one view older."); - - if !self.validate_disperse(sender, disperse) { - warn!("Failed to validated the VID dispersal/share sig."); - return; - } - - self.consensus - .write() - .await - .update_vid_shares(view, disperse.clone()); - if disperse.data.recipient_key != self.public_key { - return; - } - let Some(proposal) = self.current_proposal.clone() else { - return; - }; - if proposal.view_number() != view { - return; - } - self.spawn_vote_task(view, event_sender.clone(), event_receiver.clone()) - .await; - } - HotShotEvent::ViewChange(new_view) => { - let new_view = *new_view; - tracing::trace!("View Change event for view {} in consensus task", *new_view); - - let old_view_number = self.cur_view; - - // If we have a decided upgrade certificate, the protocol version may also have - // been upgraded. - if let Some(cert) = self - .upgrade_lock - .decided_upgrade_certificate - .read() - .await - .clone() - { - if new_view == cert.data.new_version_first_view { - error!( - "Version upgraded based on a decided upgrade cert: {:?}", - cert - ); - } - } - - if let Some(commitment_and_metadata) = &self.payload_commitment_and_metadata { - if commitment_and_metadata.block_view < old_view_number { - self.payload_commitment_and_metadata = None; - } - } - - // update the view in state to the one in the message - // Publish a view change event to the application - // Returns if the view does not need updating. - if let Err(e) = update_view::( - new_view, - &event_sender, - self.timeout, - OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), - &mut self.cur_view, - &mut self.cur_view_time, - &mut self.timeout_task, - &self.output_event_stream, - DONT_SEND_VIEW_CHANGE_EVENT, - self.quorum_membership.leader(old_view_number) == self.public_key, - ) - .await - { - tracing::trace!("Failed to update view; error = {e}"); - return; - } - } - HotShotEvent::Timeout(view) => { - let view = *view; - // NOTE: We may optionally have the timeout task listen for view change events - if self.cur_view >= view { - return; - } - if !self.timeout_membership.has_stake(&self.public_key) { - debug!( - "We were not chosen for consensus committee on {:?}", - self.cur_view - ); - return; - } - - let Ok(vote) = TimeoutVote::create_signed_vote( - TimeoutData { view }, - view, - &self.public_key, - &self.private_key, - &self.upgrade_lock, - ) - .await - else { - error!("Failed to sign TimeoutData!"); - return; - }; - - broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), &event_sender).await; - broadcast_event( - Event { - view_number: view, - event: EventType::ViewTimeout { view_number: view }, - }, - &self.output_event_stream, - ) - .await; - debug!( - "We did not receive evidence for view {} in time, sending timeout vote for that view!", - *view - ); - - broadcast_event( - Event { - view_number: view, - event: EventType::ReplicaViewTimeout { view_number: view }, - }, - &self.output_event_stream, - ) - .await; - let consensus = self.consensus.read().await; - consensus.metrics.number_of_timeouts.add(1); - if self.quorum_membership.leader(view) == self.public_key { - consensus.metrics.number_of_timeouts_as_leader.add(1); - } - } - HotShotEvent::SendPayloadCommitmentAndMetadata( - payload_commitment, - builder_commitment, - metadata, - view, - fees, - auction_result, - ) => { - let view = *view; - debug!( - "got commit and meta {:?}, view {:?}", - payload_commitment, view - ); - self.payload_commitment_and_metadata = Some(CommitmentAndMetadata { - commitment: *payload_commitment, - builder_commitment: builder_commitment.clone(), - metadata: metadata.clone(), - fees: fees.clone(), - block_view: view, - auction_result: auction_result.clone(), - }); - if self.quorum_membership.leader(view) == self.public_key - && self.consensus.read().await.high_qc().view_number() + 1 == view - { - if let Err(e) = self - .publish_proposal(view, event_sender.clone(), event_receiver.clone()) - .await - { - error!("Failed to propose; error = {e:?}"); - }; - } - - if let Some(cert) = &self.proposal_cert { - if !cert.is_valid_for_view(&view) { - self.proposal_cert = None; - info!("Failed to propose off SendPayloadCommitmentAndMetadata because we had view change evidence, but it was not current."); - return; - } - match cert { - ViewChangeEvidence::Timeout(tc) => { - if self.quorum_membership.leader(tc.view_number() + 1) - == self.public_key - { - if let Err(e) = self - .publish_proposal(view, event_sender, event_receiver) - .await - { - debug!("Failed to propose; error = {e:?}"); - }; - } - } - ViewChangeEvidence::ViewSync(vsc) => { - if self.quorum_membership.leader(vsc.view_number()) == self.public_key { - if let Err(e) = self - .publish_proposal(view, event_sender, event_receiver) - .await - { - debug!("Failed to propose; error = {e:?}"); - }; - } - } - } - } - } - HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { - if !certificate - .is_valid_cert(self.quorum_membership.as_ref(), &self.upgrade_lock) - .await - { - error!( - "View Sync Finalize certificate {:?} was invalid", - certificate.date() - ); - return; - } - - let view = certificate.view_number; - - if self.quorum_membership.leader(view) == self.public_key { - self.proposal_cert = Some(ViewChangeEvidence::ViewSync(certificate.clone())); - - debug!( - "Attempting to publish proposal after forming a View Sync Finalized Cert for view {}", - *certificate.view_number - ); - - if let Err(e) = self - .publish_proposal(view, event_sender, event_receiver) - .await - { - debug!("Failed to propose; error = {e:?}"); - }; - } - } - HotShotEvent::QuorumVoteSend(vote) => { - let Some(proposal) = self.current_proposal.clone() else { - return; - }; - let new_view = proposal.view_number() + 1; - // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here - // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = self.quorum_membership.leader(new_view) == self.public_key - && self.consensus.read().await.high_qc().view_number == proposal.view_number(); - - if should_propose { - debug!( - "Attempting to publish proposal after voting; now in view: {}", - *new_view - ); - if let Err(e) = self - .publish_proposal(new_view, event_sender.clone(), event_receiver.clone()) - .await - { - debug!("failed to propose e = {:?}", e); - } - } - if proposal.view_number() <= vote.view_number() { - self.current_proposal = None; - } - } - HotShotEvent::QuorumProposalSend(proposal, _) => { - if self - .payload_commitment_and_metadata - .as_ref() - .is_some_and(|p| p.block_view <= proposal.data.view_number()) - { - self.payload_commitment_and_metadata = None; - } - if let Some(cert) = &self.proposal_cert { - let view = match cert { - ViewChangeEvidence::Timeout(tc) => tc.view_number() + 1, - ViewChangeEvidence::ViewSync(vsc) => vsc.view_number(), - }; - if view < proposal.data.view_number() { - self.proposal_cert = None; - } - } - } - _ => {} - } - } -} - -#[async_trait] -impl, V: Versions> TaskState - for ConsensusTaskState -{ - type Event = HotShotEvent; - - async fn handle_event( - &mut self, - event: Arc, - sender: &Sender>, - receiver: &Receiver>, - ) -> Result<()> { - self.handle(event, sender.clone(), receiver.clone()).await; - - Ok(()) - } - - async fn cancel_subtasks(&mut self) { - while !self.spawned_tasks.is_empty() { - let Some((_, handles)) = self.spawned_tasks.pop_first() else { - break; - }; - - for handle in handles { - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] - handle.abort(); - } - } - } -} diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index fa0156c424..6390c360aa 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -660,12 +660,6 @@ pub async fn validate_proposal_view_and_certs( Ok(()) } -/// Constant which tells [`update_view`] to send a view change event when called. -pub(crate) const SEND_VIEW_CHANGE_EVENT: bool = true; - -/// Constant which tells `update_view` to not send a view change event when called. -pub const DONT_SEND_VIEW_CHANGE_EVENT: bool = false; - /// Update the view if it actually changed, takes a mutable reference to the `cur_view` and the /// `timeout_task` which are updated during the operation of the function. /// @@ -682,7 +676,6 @@ pub(crate) async fn update_view( cur_view_time: &mut i64, timeout_task: &mut JoinHandle<()>, output_event_stream: &Sender>, - send_view_change_event: bool, is_old_view_leader: bool, ) -> Result<()> { ensure!( @@ -703,20 +696,18 @@ pub(crate) async fn update_view( // The next view is just the current view + 1 let next_view = *cur_view + 1; - if send_view_change_event { - futures::join! { - broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream), - broadcast_event( - Event { + futures::join! { + broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream), + broadcast_event( + Event { + view_number: old_view, + event: EventType::ViewFinished { view_number: old_view, - event: EventType::ViewFinished { - view_number: old_view, - }, }, - output_event_stream, - ) - }; - } + }, + output_event_stream, + ) + }; // Spawn a timeout task if we did actually update view let new_timeout_task = async_spawn({ diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index ed3dc5a0ee..754d2a972e 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -7,9 +7,6 @@ //! The consensus layer for hotshot. This currently implements sequencing //! consensus in an event driven way -/// the task which implements the main parts of consensus -pub mod consensus; - /// The task which implements the core state logic of consensus. pub mod consensus2; diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 7145c3bde8..37b7ba5d12 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -33,7 +33,7 @@ use crate::{ events::HotShotEvent, helpers::{ broadcast_event, fetch_proposal, update_view, validate_proposal_safety_and_liveness, - validate_proposal_view_and_certs, SEND_VIEW_CHANGE_EVENT, + validate_proposal_view_and_certs, }, quorum_proposal_recv::{UpgradeLock, Versions}, }; @@ -103,7 +103,6 @@ async fn validate_proposal_liveness, V: Versions> event_sender: Sender>>, event_receiver: Receiver>>, ) { - #[cfg(feature = "dependency-tasks")] if let HotShotEvent::QuorumProposalRecv(proposal, sender) = event.as_ref() { match handle_quorum_proposal_recv( proposal, diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 0bc5566167..82473f3d34 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -10,7 +10,6 @@ default = [] # NOTE this is used to activate the slow tests we don't wish to run in CI slow-tests = [] gpu-vid = ["hotshot-types/gpu-vid"] -dependency-tasks = ["hotshot/dependency-tasks"] rewind = ["hotshot/rewind"] test-srs = ["jf-vid/test-srs"] diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index 22be12227d..d8484b3bbc 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -272,3 +272,56 @@ where }); Box::new(EventPredicate { check, info }) } + +pub fn vid_share_validated() -> Box> +where + TYPES: NodeType, +{ + let info = "VidShareValidated".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| matches!(e.as_ref(), VidShareValidated(..))); + Box::new(EventPredicate { check, info }) +} + +pub fn da_certificate_validated() -> Box> +where + TYPES: NodeType, +{ + let info = "DaCertificateValidated".to_string(); + let check: EventCallback = Arc::new(move |e: Arc>| { + matches!(e.as_ref(), DaCertificateValidated(..)) + }); + Box::new(EventPredicate { check, info }) +} + +pub fn quorum_proposal_preliminarily_validated() -> Box> +where + TYPES: NodeType, +{ + let info = "QuorumProposalPreliminarilyValidated".to_string(); + let check: EventCallback = Arc::new(move |e: Arc>| { + matches!(e.as_ref(), QuorumProposalPreliminarilyValidated(..)) + }); + Box::new(EventPredicate { check, info }) +} + +pub fn high_qc_updated() -> Box> +where + TYPES: NodeType, +{ + let info = "HighQcUpdated".to_string(); + let check: EventCallback = + Arc::new(move |e: Arc>| matches!(e.as_ref(), HighQcUpdated(..))); + Box::new(EventPredicate { check, info }) +} + +pub fn quorum_vote_dependencies_validated() -> Box> +where + TYPES: NodeType, +{ + let info = "QuorumVoteDependenciesValidated".to_string(); + let check: EventCallback = Arc::new(move |e: Arc>| { + matches!(e.as_ref(), QuorumVoteDependenciesValidated(..)) + }); + Box::new(EventPredicate { check, info }) +} diff --git a/testing/src/predicates/mod.rs b/testing/src/predicates/mod.rs index c4c05e7f11..0c3d344ece 100644 --- a/testing/src/predicates/mod.rs +++ b/testing/src/predicates/mod.rs @@ -5,11 +5,7 @@ // along with the HotShot repository. If not, see . pub mod event; -#[cfg(not(feature = "dependency-tasks"))] -pub mod upgrade_with_consensus; -#[cfg(feature = "dependency-tasks")] pub mod upgrade_with_proposal; -#[cfg(feature = "dependency-tasks")] pub mod upgrade_with_vote; use async_trait::async_trait; diff --git a/testing/src/predicates/upgrade_with_consensus.rs b/testing/src/predicates/upgrade_with_consensus.rs deleted file mode 100644 index 7a63edd583..0000000000 --- a/testing/src/predicates/upgrade_with_consensus.rs +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -#![cfg(not(feature = "dependency-tasks"))] - -use std::sync::Arc; - -use async_trait::async_trait; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; -use hotshot_task_impls::consensus::ConsensusTaskState; -use hotshot_types::simple_certificate::UpgradeCertificate; - -use crate::predicates::{Predicate, PredicateResult}; - -type ConsensusTaskTestState = ConsensusTaskState; - -type UpgradeCertCallback = - Arc>>) -> bool + Send + Sync>; - -pub struct UpgradeCertPredicate { - check: UpgradeCertCallback, - info: String, -} - -impl std::fmt::Debug for UpgradeCertPredicate { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.info) - } -} - -#[async_trait] -impl Predicate for UpgradeCertPredicate { - async fn evaluate(&self, input: &ConsensusTaskTestState) -> PredicateResult { - let upgrade_cert = input - .upgrade_lock - .decided_upgrade_certificate - .read() - .await - .clone(); - PredicateResult::from((self.check)(upgrade_cert.into())) - } - - async fn info(&self) -> String { - self.info.clone() - } -} - -pub fn no_decided_upgrade_certificate() -> Box { - let info = "expected decided_upgrade_certificate to be None".to_string(); - let check: UpgradeCertCallback = Arc::new(move |s| s.is_none()); - Box::new(UpgradeCertPredicate { info, check }) -} - -pub fn decided_upgrade_certificate() -> Box { - let info = "expected decided_upgrade_certificate to be Some(_)".to_string(); - let check: UpgradeCertCallback = Arc::new(move |s| s.is_some()); - Box::new(UpgradeCertPredicate { info, check }) -} diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 315ac2d5e8..e574859669 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -63,7 +63,7 @@ pub struct TestView { pub da_certificate: DaCertificate, pub transactions: Vec, upgrade_data: Option>, - formed_upgrade_certificate: Option>, + pub formed_upgrade_certificate: Option>, view_sync_finalize_data: Option>, timeout_cert_data: Option>, upgrade_lock: UpgradeLock, diff --git a/testing/tests/tests_1/consensus_task.rs b/testing/tests/tests_1/consensus_task.rs deleted file mode 100644 index d5e39eab51..0000000000 --- a/testing/tests/tests_1/consensus_task.rs +++ /dev/null @@ -1,698 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -#![cfg(not(feature = "dependency-tasks"))] -// TODO: Remove after integration of dependency-tasks -#![allow(unused_imports)] - -use std::{sync::Arc, time::Duration}; - -use futures::StreamExt; -use hotshot::tasks::task_state::CreateTaskState; -use hotshot_example_types::{ - block_types::TestMetadata, - node_types::{MemoryImpl, TestTypes, TestVersions}, - state_types::TestInstanceState, -}; -use hotshot_macros::{run_test, test_scripts}; -use hotshot_task_impls::{consensus::ConsensusTaskState, events::HotShotEvent::*}; -use hotshot_testing::{ - all_predicates, - helpers::{ - build_fake_view_with_leaf, build_system_handle, key_pair_for_id, - permute_input_with_index_order, vid_scheme_from_view_number, vid_share, - }, - predicates::event::{ - all_predicates, exact, quorum_proposal_send, quorum_proposal_validated, quorum_vote_send, - timeout_vote_send, validated_state_updated, - }, - random, - script::{Expectations, InputOrder, TaskScript}, - serial, - view_generator::TestViewGenerator, -}; -use hotshot_types::{ - data::{null_block, ViewChangeEvidence, ViewNumber}, - simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}, - traits::{ - election::Membership, - node_implementation::{ConsensusTime, Versions}, - }, - utils::BuilderCommitment, - vote::HasViewNumber, -}; -use jf_vid::VidScheme; -use sha2::Digest; -use vec1::vec1; - -const TIMEOUT: Duration = Duration::from_millis(35); - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_consensus_task() { - use vbs::version::StaticVersionType; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle::(2) - .await - .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - // Make some empty encoded transactions, we just care about having a commitment handy for the - // later calls. We need the VID commitment to be able to propose later. - let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(2)); - let encoded_transactions = Vec::new(); - let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); - let payload_commitment = vid_disperse.commit; - - let mut generator = - TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); - - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut leaves = Vec::new(); - let mut votes = Vec::new(); - let mut dacs = Vec::new(); - let mut vids = Vec::new(); - for view in (&mut generator).take(2).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - leaves.push(view.leaf.clone()); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - } - - let cert = proposals[1].data.justify_qc.clone(); - let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - - let inputs = vec![ - random![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), - ], - serial![ - VidShareRecv(leaders[0], vid_share(&vids[1].0, handle.public_key())), - QuorumProposalRecv(proposals[1].clone(), leaders[1]), - QcFormed(either::Left(cert)), - SendPayloadCommitmentAndMetadata( - payload_commitment, - builder_commitment, - TestMetadata { - num_transactions: 0, - }, - ViewNumber::new(2), - vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), - ::Base::VERSION, - ) - .unwrap()], - None, - ), - ], - ]; - - let expectations = vec![ - Expectations::from_outputs(all_predicates![ - validated_state_updated(), - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ]), - Expectations::from_outputs(all_predicates![ - validated_state_updated(), - exact(ViewChange(ViewNumber::new(2))), - quorum_proposal_validated(), - quorum_proposal_send(), - ]), - ]; - - let consensus_state = - ConsensusTaskState::::create_from(&handle).await; - let mut consensus_script = TaskScript { - timeout: TIMEOUT, - state: consensus_state, - expectations, - }; - - run_test![inputs, consensus_script].await; -} - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_consensus_vote() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle::(2) - .await - .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let mut generator = - TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); - - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut leaves = Vec::new(); - let mut votes = Vec::new(); - let mut dacs = Vec::new(); - let mut vids = Vec::new(); - for view in (&mut generator).take(2).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - leaves.push(view.leaf.clone()); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - } - - // Send a proposal, vote on said proposal, update view based on proposal QC, receive vote as next leader - let inputs = vec![random![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), - QuorumVoteRecv(votes[0].clone()), - ]]; - - let expectations = vec![Expectations::from_outputs(all_predicates![ - validated_state_updated(), - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ])]; - - let consensus_state = - ConsensusTaskState::::create_from(&handle).await; - let mut consensus_script = TaskScript { - timeout: TIMEOUT, - state: consensus_state, - expectations, - }; - - run_test![inputs, consensus_script].await; -} - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_view_sync_finalize_propose() { - use hotshot_example_types::{block_types::TestMetadata, state_types::TestValidatedState}; - use hotshot_types::data::null_block; - use vbs::version::StaticVersionType; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle::(4) - .await - .0; - let (priv_key, pub_key) = key_pair_for_id::(4); - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - // Make some empty encoded transactions, we just care about having a commitment handy for the - // later calls. We need the VID commitment to be able to propose later. - let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(4)); - let encoded_transactions = Vec::new(); - let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); - let payload_commitment = vid_disperse.commit; - - let view_sync_finalize_data: ViewSyncFinalizeData = ViewSyncFinalizeData { - relay: 4, - round: ViewNumber::new(4), - }; - - let mut generator = - TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut votes = Vec::new(); - let mut vids = Vec::new(); - let mut dacs = Vec::new(); - - generator.next().await; - let view = generator.current_view.clone().unwrap(); - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle).await); - vids.push(view.vid_proposal.clone()); - dacs.push(view.da_certificate.clone()); - - // Skip two views - generator.advance_view_number_by(2); - - // Initiate a view sync finalize - generator.add_view_sync_finalize(view_sync_finalize_data); - - // Build the next proposal from view 1 - generator.next_from_anscestor_view(view.clone()).await; - let view = generator.current_view.unwrap(); - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle).await); - vids.push(view.vid_proposal); - - // Handle the view sync finalize cert, get the requisite data, propose. - let cert = match proposals[1].data.proposal_certificate.clone().unwrap() { - ViewChangeEvidence::ViewSync(vsc) => vsc, - _ => panic!("Found a TC when there should have been a view sync cert"), - }; - - let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let timeout_vote_view_2 = TimeoutVote::create_signed_vote( - TimeoutData { - view: ViewNumber::new(2), - }, - ViewNumber::new(2), - &pub_key, - &priv_key, - &handle.hotshot.upgrade_lock, - ) - .await - .unwrap(); - - let timeout_vote_view_3 = TimeoutVote::create_signed_vote( - TimeoutData { - view: ViewNumber::new(3), - }, - ViewNumber::new(3), - &pub_key, - &priv_key, - &handle.hotshot.upgrade_lock, - ) - .await - .unwrap(); - - let inputs = vec![ - serial![VidShareRecv( - leaders[0], - vid_share(&vids[0].0, handle.public_key()) - )], - random![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - ], - serial![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], - serial![VidShareRecv( - leaders[0], - vid_share(&vids[1].0, handle.public_key()) - )], - random![ - QuorumProposalRecv(proposals[1].clone(), leaders[1]), - TimeoutVoteRecv(timeout_vote_view_2), - TimeoutVoteRecv(timeout_vote_view_3), - ViewSyncFinalizeCertificate2Recv(cert), - SendPayloadCommitmentAndMetadata( - payload_commitment, - builder_commitment, - TestMetadata { - num_transactions: 0, - }, - ViewNumber::new(4), - vec1![null_block::builder_fee::( - 4, - ::Base::VERSION - ) - .unwrap()], - None, - ), - ], - ]; - - let expectations = vec![ - Expectations::from_outputs(vec![]), - Expectations::from_outputs(all_predicates![ - validated_state_updated(), - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ]), - Expectations::from_outputs(vec![timeout_vote_send(), timeout_vote_send()]), - Expectations::from_outputs(vec![]), - Expectations::from_outputs(all_predicates![ - validated_state_updated(), - exact(ViewChange(ViewNumber::new(4))), - quorum_proposal_validated(), - quorum_proposal_send(), - ]), - ]; - - let consensus_state = - ConsensusTaskState::::create_from(&handle).await; - let mut consensus_script = TaskScript { - timeout: TIMEOUT, - state: consensus_state, - expectations, - }; - - run_test![inputs, consensus_script].await; -} - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -/// Makes sure that, when a valid ViewSyncFinalize certificate is available, the consensus task -/// will indeed vote if the cert is valid and matches the correct view number. -async fn test_view_sync_finalize_vote() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle::(5) - .await - .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let view_sync_finalize_data: ViewSyncFinalizeData = ViewSyncFinalizeData { - relay: 4, - round: ViewNumber::new(5), - }; - - let mut generator = - TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut votes = Vec::new(); - let mut vids = Vec::new(); - let mut dacs = Vec::new(); - for view in (&mut generator).take(3).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle).await); - vids.push(view.vid_proposal.clone()); - dacs.push(view.da_certificate.clone()); - } - - // Each call to `take` moves us to the next generated view. We advance to view - // 3 and then add the finalize cert for checking there. - generator.add_view_sync_finalize(view_sync_finalize_data); - for view in (&mut generator).take(1).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle).await); - vids.push(view.vid_proposal.clone()); - dacs.push(view.da_certificate.clone()); - } - - // When we're on the latest view. We want to set the quorum - // certificate to be the previous highest QC (before the timeouts). This will be distinct from - // the view sync cert, which is saying "hey, I'm _actually_ at view 4, but my highest QC is - // only for view 1." This forces the QC to be for view 1, and we can move on under this - // assumption. - - // Try to view sync at view 4. - let cert = match proposals[3].data.proposal_certificate.clone().unwrap() { - ViewChangeEvidence::ViewSync(vsc) => vsc, - _ => panic!("Found a TC when there should have been a view sync cert"), - }; - - let inputs = vec![ - serial![VidShareRecv( - leaders[0], - vid_share(&vids[0].0, handle.public_key()) - )], - random![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - ], - serial![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], - random![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - ViewSyncFinalizeCertificate2Recv(cert), - ], - ]; - - let expectations = vec![ - Expectations::from_outputs(vec![]), - Expectations::from_outputs(all_predicates![ - validated_state_updated(), - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())) - ]), - Expectations::from_outputs(vec![timeout_vote_send(), timeout_vote_send()]), - Expectations::from_outputs(all_predicates![ - validated_state_updated(), - quorum_proposal_validated(), - quorum_vote_send() - ]), - ]; - - let consensus_state = - ConsensusTaskState::::create_from(&handle).await; - let mut consensus_script = TaskScript { - timeout: TIMEOUT, - state: consensus_state, - expectations, - }; - - run_test![inputs, consensus_script].await; -} - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -/// Makes sure that, when a valid ViewSyncFinalize certificate is available, the consensus task -/// will NOT vote when the certificate matches a different view number. -async fn test_view_sync_finalize_vote_fail_view_number() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle::(5) - .await - .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let view_sync_finalize_data: ViewSyncFinalizeData = ViewSyncFinalizeData { - relay: 4, - round: ViewNumber::new(10), - }; - - let mut generator = - TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut votes = Vec::new(); - let mut vids = Vec::new(); - let mut dacs = Vec::new(); - for view in (&mut generator).take(3).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle).await); - vids.push(view.vid_proposal.clone()); - dacs.push(view.da_certificate.clone()); - } - - // Each call to `take` moves us to the next generated view. We advance to view - // 3 and then add the finalize cert for checking there. - generator.add_view_sync_finalize(view_sync_finalize_data); - for view in (&mut generator).take(1).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle).await); - vids.push(view.vid_proposal.clone()); - dacs.push(view.da_certificate.clone()); - } - - // When we're on the latest view. We want to set the quorum - // certificate to be the previous highest QC (before the timeouts). This will be distinct from - // the view sync cert, which is saying "hey, I'm _actually_ at view 4, but my highest QC is - // only for view 1." This forces the QC to be for view 1, and we can move on under this - // assumption. - - let mut cert = match proposals[3].data.proposal_certificate.clone().unwrap() { - ViewChangeEvidence::ViewSync(vsc) => vsc, - _ => panic!("Found a TC when there should have been a view sync cert"), - }; - - // Force this to fail by making the cert happen for a view we've never seen. This will - // intentionally skip the proposal for this node so we can get the proposal and fail to vote. - cert.view_number = ViewNumber::new(10); - - // Get a good proposal first. - let good_proposal = proposals[0].clone(); - - // Now We introduce an error by setting a different view number as well, this makes the task check - // for a view sync or timeout cert. This value could be anything as long as it is not the - // previous view number. - proposals[0].data.justify_qc.view_number = proposals[3].data.justify_qc.view_number; - - let inputs = vec![ - random![ - QuorumProposalRecv(good_proposal, leaders[0]), - DaCertificateRecv(dacs[0].clone()), - ], - serial![VidShareRecv( - leaders[0], - vid_share(&vids[0].0, handle.public_key()) - )], - serial![Timeout(ViewNumber::new(2)), Timeout(ViewNumber::new(3))], - random![ - ViewSyncFinalizeCertificate2Recv(cert), - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - ], - ]; - - let expectations = vec![ - Expectations::from_outputs(all_predicates![ - quorum_proposal_validated(), - validated_state_updated(), - exact(ViewChange(ViewNumber::new(1))), - ]), - Expectations::from_outputs(vec![exact(QuorumVoteSend(votes[0].clone()))]), - Expectations::from_outputs(vec![timeout_vote_send(), timeout_vote_send()]), - // We get no output here due to the invalid view number. - Expectations::from_outputs(vec![]), - ]; - - let consensus_state = - ConsensusTaskState::::create_from(&handle).await; - let mut consensus_script = TaskScript { - timeout: TIMEOUT, - state: consensus_state, - expectations, - }; - - run_test![inputs, consensus_script].await; -} - -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -async fn test_vid_disperse_storage_failure() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle::(2) - .await - .0; - - // Set the error flag here for the system handle. This causes it to emit an error on append. - handle.storage().write().await.should_return_err = true; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let mut generator = - TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); - - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut votes = Vec::new(); - let mut dacs = Vec::new(); - let mut vids = Vec::new(); - for view in (&mut generator).take(1).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - } - - let inputs = vec![random![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - DaCertificateRecv(dacs[0].clone()), - VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), - ]]; - - let expectations = vec![Expectations::from_outputs(all_predicates![ - validated_state_updated(), - exact(ViewChange(ViewNumber::new(1))), - ])]; - - let consensus_state = - ConsensusTaskState::::create_from(&handle).await; - let mut consensus_script = TaskScript { - timeout: TIMEOUT, - state: consensus_state, - expectations, - }; - - run_test![inputs, consensus_script].await; -} - -/// Tests that VID shares that return validation with an Ok(Err) result -/// are correctly rejected -#[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[cfg(feature = "test-srs")] -async fn test_invalid_vid_disperse() { - use hotshot_testing::{ - helpers::{build_payload_commitment, build_vid_proposal}, - test_builder::TestDescription, - }; - use hotshot_types::traits::{ - consensus_api::ConsensusApi, network::Topic, node_implementation::NodeType, - }; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle::(0) - .await - .0; - - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let mut generator = - TestViewGenerator::generate(quorum_membership.clone(), da_membership.clone()); - - let mut proposals = Vec::new(); - let mut leaders = Vec::new(); - let mut votes = Vec::new(); - let mut dacs = Vec::new(); - let mut vids = Vec::new(); - for view in (&mut generator).take(1).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - leaders.push(view.leader_public_key); - votes.push(view.create_quorum_vote(&handle)); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - } - - let vid_scheme = - vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); - - let corrupt_share = vid_scheme.corrupt_share_index(vids[0].0[0].data.share.clone()); - - // Corrupt one of the shares - let mut share = vid_share(&vids[0].0, handle.public_key()); - share.data.share = corrupt_share; - - let inputs = vec![random![ - VidShareRecv(share), - DaCertificateRecv(dacs[0].clone()), - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - ]]; - - // If verify_share does not correctly handle this case, a `QuorumVote` - // will be emitted and cause a test failure - let expectations = vec![Expectations::from_outputs(all_predicates![ - validated_state_updated(), - exact(ViewChange(ViewNumber::new(1))), - quorum_proposal_validated(), - ])]; - - let consensus_state = - ConsensusTaskState::::create_from(&handle).await; - let mut consensus_script = TaskScript { - timeout: TIMEOUT, - state: consensus_state, - expectations, - }; - - run_test![inputs, consensus_script].await; -} diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 940e30f62e..55ef973f9d 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -6,7 +6,6 @@ // TODO: Remove after integration #![allow(unused_imports)] -#![cfg(feature = "dependency-tasks")] use committable::Committable; use futures::StreamExt; @@ -38,7 +37,6 @@ use hotshot_types::{ }; #[cfg(test)] -#[cfg(feature = "dependency-tasks")] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_recv_task() { @@ -131,7 +129,6 @@ async fn test_quorum_proposal_recv_task() { } #[cfg(test)] -#[cfg(feature = "dependency-tasks")] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_quorum_proposal_recv_task_liveness_check() { diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 8e5bb64259..455f304114 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -4,8 +4,6 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -#![cfg(feature = "dependency-tasks")] - use std::time::Duration; use futures::StreamExt; diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 72fb7d97e6..e981ed9bda 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -5,7 +5,6 @@ // along with the HotShot repository. If not, see . #![allow(clippy::panic)] -#![cfg(feature = "dependency-tasks")] use std::time::Duration; diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index c4ba56416c..a2ee5f1572 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -10,7 +10,6 @@ use hotshot_example_types::node_types::{ Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestTypes, TestTypesRandomizedLeader, TestVersions, }; -#[cfg(feature = "dependency-tasks")] use hotshot_example_types::testable_delay::{ DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay, }; @@ -41,7 +40,6 @@ cross_tests!( }, ); -#[cfg(feature = "dependency-tasks")] cross_tests!( TestName: test_success_with_async_delay, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -74,7 +72,6 @@ cross_tests!( }, ); -#[cfg(feature = "dependency-tasks")] cross_tests!( TestName: test_success_with_async_delay_2, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], diff --git a/testing/tests/tests_1/upgrade_task_with_consensus.rs b/testing/tests/tests_1/upgrade_task_with_consensus.rs deleted file mode 100644 index e226726246..0000000000 --- a/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ /dev/null @@ -1,678 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -#![cfg(not(feature = "dependency-tasks"))] -// TODO: Remove after integration of dependency-tasks -#![cfg(not(feature = "dependency-tasks"))] -#![allow(unused_imports)] - -use std::time::Duration; - -use futures::StreamExt; -use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; -use hotshot_example_types::{ - block_types::TestTransaction, - node_types::{MemoryImpl, TestTypes, TestVersions}, - state_types::TestInstanceState, -}; -use hotshot_macros::test_scripts; -use hotshot_task_impls::{ - consensus::ConsensusTaskState, events::HotShotEvent::*, upgrade::UpgradeTaskState, -}; -use hotshot_testing::{ - helpers::{build_fake_view_with_leaf, vid_share}, - predicates::{event::*, upgrade_with_consensus::*}, - script::{Expectations, TaskScript}, - view_generator::TestViewGenerator, -}; -use hotshot_types::{ - data::{null_block, ViewNumber}, - simple_vote::UpgradeProposalData, - traits::{ - election::Membership, - node_implementation::{ConsensusTime, Versions}, - }, - vote::HasViewNumber, -}; -use vbs::version::{StaticVersionType, Version}; -use vec1::vec1; - -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -/// Tests that we correctly update our internal consensus state when reaching a decided upgrade certificate. -async fn test_upgrade_task_vote() { - use hotshot_testing::helpers::build_system_handle; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle::(1) - .await - .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let old_version = Version { major: 0, minor: 1 }; - let new_version = Version { major: 0, minor: 2 }; - - let upgrade_data: UpgradeProposalData = UpgradeProposalData { - old_version, - new_version, - decide_by: ViewNumber::new(6), - new_version_hash: [0u8; 12].to_vec(), - old_version_last_view: ViewNumber::new(6), - new_version_first_view: ViewNumber::new(7), - }; - - let mut proposals = Vec::new(); - let mut votes = Vec::new(); - let mut dacs = Vec::new(); - let mut vids = Vec::new(); - let mut leaders = Vec::new(); - let mut leaves = Vec::new(); - - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - - for view in (&mut generator).take(2).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - leaders.push(view.leader_public_key); - leaves.push(view.leaf.clone()); - } - - generator.add_upgrade(upgrade_data); - - for view in generator.take(4).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - leaders.push(view.leader_public_key); - leaves.push(view.leaf.clone()); - } - let inputs = vec![ - vec![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), - DaCertificateRecv(dacs[0].clone()), - ], - vec![ - QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VidShareRecv(leaders[1], vid_share(&vids[1].0, handle.public_key())), - DaCertificateRecv(dacs[1].clone()), - ], - vec![ - QuorumProposalRecv(proposals[2].clone(), leaders[2]), - DaCertificateRecv(dacs[2].clone()), - VidShareRecv(leaders[2], vid_share(&vids[2].0, handle.public_key())), - ], - vec![ - QuorumProposalRecv(proposals[3].clone(), leaders[3]), - DaCertificateRecv(dacs[3].clone()), - VidShareRecv(leaders[3], vid_share(&vids[3].0, handle.public_key())), - ], - vec![ - QuorumProposalRecv(proposals[4].clone(), leaders[4]), - DaCertificateRecv(dacs[4].clone()), - VidShareRecv(leaders[4], vid_share(&vids[4].0, handle.public_key())), - ], - vec![QuorumProposalRecv(proposals[5].clone(), leaders[5])], - ]; - - let expectations = vec![ - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(1))), - validated_state_updated(), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[0].clone())), - ], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(2))), - validated_state_updated(), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[1].clone())), - ], - task_state_asserts: vec![no_decided_upgrade_certificate()], - }, - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(3))), - validated_state_updated(), - quorum_proposal_validated(), - exact(QuorumVoteSend(votes[2].clone())), - ], - task_state_asserts: vec![no_decided_upgrade_certificate()], - }, - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(4))), - validated_state_updated(), - quorum_proposal_validated(), - leaf_decided(), - exact(QuorumVoteSend(votes[3].clone())), - ], - task_state_asserts: vec![no_decided_upgrade_certificate()], - }, - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(5))), - validated_state_updated(), - quorum_proposal_validated(), - leaf_decided(), - exact(QuorumVoteSend(votes[4].clone())), - ], - task_state_asserts: vec![no_decided_upgrade_certificate()], - }, - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(6))), - validated_state_updated(), - quorum_proposal_validated(), - leaf_decided(), - ], - task_state_asserts: vec![decided_upgrade_certificate()], - }, - ]; - - let consensus_state = - ConsensusTaskState::::create_from(&handle).await; - let mut consensus_script = TaskScript { - timeout: Duration::from_millis(65), - state: consensus_state, - expectations, - }; - - test_scripts![inputs, consensus_script].await; -} - -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -/// Test that we correctly form and include an `UpgradeCertificate` when receiving votes. -async fn test_upgrade_task_propose() { - use std::sync::Arc; - - use hotshot_testing::helpers::build_system_handle; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle::(3) - .await - .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let other_handles = futures::future::join_all((0..=9).map(build_system_handle)).await; - - let old_version = Version { major: 0, minor: 1 }; - let new_version = Version { major: 0, minor: 2 }; - - let upgrade_data: UpgradeProposalData = UpgradeProposalData { - old_version, - new_version, - decide_by: ViewNumber::new(4), - new_version_hash: [0u8; 12].to_vec(), - old_version_last_view: ViewNumber::new(5), - new_version_first_view: ViewNumber::new(7), - }; - - let mut proposals = Vec::new(); - let mut votes = Vec::new(); - let mut dacs = Vec::new(); - let mut vids = Vec::new(); - let mut leaders = Vec::new(); - let mut views = Vec::new(); - - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - - for view in (&mut generator).take(1).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - leaders.push(view.leader_public_key); - views.push(view.clone()); - } - - generator.add_upgrade(upgrade_data.clone()); - - for view in generator.take(4).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - leaders.push(view.leader_public_key); - views.push(view.clone()); - } - - let mut upgrade_votes = Vec::new(); - - for handle in other_handles { - upgrade_votes.push( - views[2] - .create_upgrade_vote(upgrade_data.clone(), &handle.0) - .await, - ); - } - - let consensus_state = - ConsensusTaskState::::create_from(&handle).await; - let upgrade_state = - UpgradeTaskState::::create_from(&handle).await; - - let upgrade_vote_recvs: Vec<_> = upgrade_votes.into_iter().map(UpgradeVoteRecv).collect(); - - let inputs = vec![ - vec![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), - DaCertificateRecv(dacs[0].clone()), - ], - upgrade_vote_recvs, - vec![ - QuorumProposalRecv(proposals[1].clone(), leaders[1]), - DaCertificateRecv(dacs[1].clone()), - VidShareRecv(leaders[1], vid_share(&vids[1].0, handle.public_key())), - ], - vec![ - VidShareRecv(leaders[2], vid_share(&vids[2].0, handle.public_key())), - SendPayloadCommitmentAndMetadata( - vids[2].0[0].data.payload_commitment, - proposals[2].data.block_header.builder_commitment.clone(), - proposals[2].data.block_header.metadata, - ViewNumber::new(3), - vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), - ::Base::VERSION - ) - .unwrap()], - None, - ), - QcFormed(either::Either::Left(proposals[2].data.justify_qc.clone())), - ], - ]; - - let mut consensus_script = TaskScript { - timeout: Duration::from_millis(35), - state: consensus_state, - expectations: vec![ - Expectations { - output_asserts: vec![ - exact::(ViewChange(ViewNumber::new(1))), - validated_state_updated(), - quorum_proposal_validated::(), - quorum_vote_send::(), - ], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![ - exact::(ViewChange(ViewNumber::new(2))), - validated_state_updated(), - quorum_proposal_validated::(), - quorum_vote_send(), - ], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![quorum_proposal_send_with_upgrade_certificate::()], - task_state_asserts: vec![], - }, - ], - }; - - let mut upgrade_script = TaskScript { - timeout: Duration::from_millis(35), - state: upgrade_state, - expectations: vec![ - Expectations { - output_asserts: vec![], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![upgrade_certificate_formed::()], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![], - task_state_asserts: vec![], - }, - ], - }; - - test_scripts![inputs, consensus_script, upgrade_script].await; -} - -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -/// Test that we correctly handle blank blocks between versions. -/// Specifically, this test schedules an upgrade between views 4 and 8, -/// and ensures that: -/// - we correctly vote affirmatively on a QuorumProposal with a null block payload in view 5 -/// - we correctly propose with a null block payload in view 6, even if we have indications to do otherwise (via SendPayloadCommitmentAndMetadata, VID etc). -/// - we correctly reject a QuorumProposal with a non-null block payload in view 7. -async fn test_upgrade_task_blank_blocks() { - use hotshot_testing::helpers::build_system_handle; - - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - - let handle = build_system_handle::(6) - .await - .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - - let old_version = Version { major: 0, minor: 1 }; - let new_version = Version { major: 0, minor: 2 }; - - let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(), - ::Base::VERSION, - ) - .unwrap(); - - let upgrade_data: UpgradeProposalData = UpgradeProposalData { - old_version, - new_version, - decide_by: ViewNumber::new(7), - new_version_hash: [0u8; 12].to_vec(), - old_version_last_view: ViewNumber::new(6), - new_version_first_view: ViewNumber::new(8), - }; - - let mut proposals = Vec::new(); - let mut votes = Vec::new(); - let mut dacs = Vec::new(); - let mut vids = Vec::new(); - let mut leaders = Vec::new(); - let mut views = Vec::new(); - - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); - - for view in (&mut generator).take(1).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - leaders.push(view.leader_public_key); - views.push(view.clone()); - } - - generator.add_upgrade(upgrade_data.clone()); - - for view in (&mut generator).take(3).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - leaders.push(view.leader_public_key); - views.push(view.clone()); - } - - // We are now in the upgrade period, and set the transactions to null for the QuorumProposalRecv in view 5. - // Our node should vote affirmatively on this. - generator.add_transactions(vec![]); - - for view in (&mut generator).take(1).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - leaders.push(view.leader_public_key); - views.push(view.clone()); - } - - // The transactions task generates an empty transaction set in this view, - // because we are proposing between versions. - generator.add_transactions(vec![]); - - for view in (&mut generator).take(1).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - leaders.push(view.leader_public_key); - views.push(view.clone()); - } - - // For view 7, we set the transactions to something not null. The node should fail to vote on this. - generator.add_transactions(vec![TestTransaction::new(vec![0])]); - - for view in generator.take(1).collect::>().await { - proposals.push(view.quorum_proposal.clone()); - votes.push(view.create_quorum_vote(&handle).await); - dacs.push(view.da_certificate.clone()); - vids.push(view.vid_proposal.clone()); - leaders.push(view.leader_public_key); - views.push(view.clone()); - } - - let consensus_state = - ConsensusTaskState::::create_from(&handle).await; - let upgrade_state = - UpgradeTaskState::::create_from(&handle).await; - - let inputs = vec![ - vec![ - QuorumProposalRecv(proposals[0].clone(), leaders[0]), - VidShareRecv(leaders[0], vid_share(&vids[0].0, handle.public_key())), - DaCertificateRecv(dacs[0].clone()), - ], - vec![ - QuorumProposalRecv(proposals[1].clone(), leaders[1]), - VidShareRecv(leaders[1], vid_share(&vids[1].0, handle.public_key())), - DaCertificateRecv(dacs[1].clone()), - SendPayloadCommitmentAndMetadata( - vids[1].0[0].data.payload_commitment, - proposals[1].data.block_header.builder_commitment.clone(), - proposals[1].data.block_header.metadata, - ViewNumber::new(2), - vec1![builder_fee.clone()], - None, - ), - ], - vec![ - DaCertificateRecv(dacs[2].clone()), - VidShareRecv(leaders[2], vid_share(&vids[2].0, handle.public_key())), - SendPayloadCommitmentAndMetadata( - vids[2].0[0].data.payload_commitment, - proposals[2].data.block_header.builder_commitment.clone(), - proposals[2].data.block_header.metadata, - ViewNumber::new(3), - vec1![builder_fee.clone()], - None, - ), - QuorumProposalRecv(proposals[2].clone(), leaders[2]), - ], - vec![ - DaCertificateRecv(dacs[3].clone()), - VidShareRecv(leaders[3], vid_share(&vids[3].0, handle.public_key())), - SendPayloadCommitmentAndMetadata( - vids[3].0[0].data.payload_commitment, - proposals[3].data.block_header.builder_commitment.clone(), - proposals[3].data.block_header.metadata, - ViewNumber::new(4), - vec1![builder_fee.clone()], - None, - ), - QuorumProposalRecv(proposals[3].clone(), leaders[3]), - ], - vec![ - DaCertificateRecv(dacs[4].clone()), - VidShareRecv(leaders[4], vid_share(&vids[4].0, handle.public_key())), - SendPayloadCommitmentAndMetadata( - vids[4].0[0].data.payload_commitment, - proposals[4].data.block_header.builder_commitment.clone(), - proposals[4].data.block_header.metadata, - ViewNumber::new(5), - vec1![builder_fee.clone()], - None, - ), - QuorumProposalRecv(proposals[4].clone(), leaders[4]), - ], - vec![ - DaCertificateRecv(dacs[5].clone()), - VidShareRecv(leaders[5], vid_share(&vids[5].0, handle.public_key())), - SendPayloadCommitmentAndMetadata( - vids[5].0[0].data.payload_commitment, - proposals[5].data.block_header.builder_commitment.clone(), - proposals[5].data.block_header.metadata, - ViewNumber::new(6), - vec1![builder_fee.clone()], - None, - ), - QuorumProposalRecv(proposals[5].clone(), leaders[5]), - QcFormed(either::Either::Left(proposals[5].data.justify_qc.clone())), - ], - vec![ - DaCertificateRecv(dacs[6].clone()), - VidShareRecv(leaders[6], vid_share(&vids[6].0, handle.public_key())), - SendPayloadCommitmentAndMetadata( - vids[6].0[0].data.payload_commitment, - proposals[6].data.block_header.builder_commitment.clone(), - proposals[6].data.block_header.metadata, - ViewNumber::new(7), - vec1![builder_fee], - None, - ), - QuorumProposalRecv(proposals[6].clone(), leaders[6]), - ], - ]; - - let mut consensus_script = TaskScript { - timeout: Duration::from_millis(35), - state: consensus_state, - expectations: vec![ - Expectations { - output_asserts: vec![ - exact::(ViewChange(ViewNumber::new(1))), - validated_state_updated(), - quorum_proposal_validated(), - quorum_vote_send(), - ], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(2))), - validated_state_updated(), - quorum_proposal_validated(), - quorum_vote_send(), - ], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(3))), - validated_state_updated(), - quorum_proposal_validated(), - quorum_vote_send(), - ], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(4))), - validated_state_updated(), - quorum_proposal_validated(), - leaf_decided(), - quorum_vote_send(), - ], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(5))), - validated_state_updated(), - quorum_proposal_validated(), - leaf_decided(), - // This is between versions, but we are receiving a null block and hence should vote affirmatively on it. - quorum_vote_send(), - ], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(6))), - validated_state_updated(), - quorum_proposal_validated(), - quorum_proposal_send_with_null_block(quorum_membership.total_nodes()), - leaf_decided(), - quorum_vote_send(), - ], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![ - exact(ViewChange(ViewNumber::new(7))), - validated_state_updated(), - quorum_proposal_validated(), - leaf_decided(), - // We do NOT expect a quorum_vote_send() because we have set the block to be non-null in this view. - ], - task_state_asserts: vec![], - }, - ], - }; - - let mut upgrade_script = TaskScript { - timeout: Duration::from_millis(35), - state: upgrade_state, - expectations: vec![ - Expectations { - output_asserts: vec![], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![], - task_state_asserts: vec![], - }, - Expectations { - output_asserts: vec![], - task_state_asserts: vec![], - }, - ], - }; - - test_scripts![inputs, consensus_script, upgrade_script].await; -} diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index b429fddaf4..74ef7800e1 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -4,8 +4,6 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -#![cfg(feature = "dependency-tasks")] -// TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] use std::time::Duration; diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 49805d53d5..d7bdb28dad 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -4,8 +4,6 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -#![cfg(feature = "dependency-tasks")] -// TODO: Remove after integration of dependency-tasks #![allow(unused_imports)] use std::time::Duration; @@ -40,7 +38,6 @@ use vbs::version::Version; const TIMEOUT: Duration = Duration::from_millis(65); -#[cfg(feature = "dependency-tasks")] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] /// Tests that we correctly update our internal quorum vote state when reaching a decided upgrade diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 62d1d8040b..48fcffe5e7 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -1,5 +1,3 @@ -#![cfg(feature = "dependency-tasks")] - use std::time::Duration; use async_broadcast::broadcast; diff --git a/types/src/hotshot_config_file.rs b/types/src/hotshot_config_file.rs index 9eb6be0104..37457b8d28 100644 --- a/types/src/hotshot_config_file.rs +++ b/types/src/hotshot_config_file.rs @@ -6,14 +6,13 @@ use std::{num::NonZeroUsize, time::Duration, vec}; -use crate::{ - constants::REQUEST_DATA_DELAY, traits::signature_key::SignatureKey, ExecutionType, - HotShotConfig, PeerConfig, ValidatorConfig, -}; use surf_disco::Url; use vec1::Vec1; -use crate::upgrade_config::UpgradeConfig; +use crate::{ + constants::REQUEST_DATA_DELAY, traits::signature_key::SignatureKey, + upgrade_config::UpgradeConfig, ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, +}; /// Default builder URL, used as placeholder fn default_builder_urls() -> Vec1 { diff --git a/types/src/network.rs b/types/src/network.rs index 5c33a2c6e7..31312bc13f 100644 --- a/types/src/network.rs +++ b/types/src/network.rs @@ -6,22 +6,23 @@ use std::{fs, ops::Range, path::Path, time::Duration, vec}; -use crate::hotshot_config_file::HotShotConfigFile; +use clap::ValueEnum; +use libp2p::{Multiaddr, PeerId}; +use serde_inline_default::serde_inline_default; +use thiserror::Error; +use tracing::error; + use crate::{ constants::{ ORCHESTRATOR_DEFAULT_NUM_ROUNDS, ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS, ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND, ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE, REQUEST_DATA_DELAY, }, + hotshot_config_file::HotShotConfigFile, light_client::StateVerKey, traits::signature_key::SignatureKey, HotShotConfig, ValidatorConfig, }; -use clap::ValueEnum; -use libp2p::{Multiaddr, PeerId}; -use serde_inline_default::serde_inline_default; -use thiserror::Error; -use tracing::error; /// Configuration describing a libp2p node #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] diff --git a/types/src/validator_config.rs b/types/src/validator_config.rs index 7ede69b09b..2622c0d23a 100644 --- a/types/src/validator_config.rs +++ b/types/src/validator_config.rs @@ -6,10 +6,11 @@ use std::{env, fs, path::PathBuf}; -use crate::{traits::signature_key::SignatureKey, ValidatorConfig}; use toml; use tracing::error; +use crate::{traits::signature_key::SignatureKey, ValidatorConfig}; + /// Holds configuration for a validator node #[derive(Debug, Clone, serde::Serialize, serde::Deserialize, Default)] #[serde(bound(deserialize = ""))] From 859120e02fe03d5c027d90c93341617eec5597bc Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Tue, 15 Oct 2024 11:57:28 -0600 Subject: [PATCH 1249/1393] push task_state as QuorumProposalRecv task state into helper functions, rename consensus2 (#3758) --- hotshot/src/lib.rs | 2 - hotshot/src/tasks/mod.rs | 4 +- hotshot/src/tasks/task_state.rs | 4 +- .../src/{consensus2 => consensus}/handlers.rs | 12 +- .../src/{consensus2 => consensus}/mod.rs | 8 +- task-impls/src/helpers.rs | 121 +++++++++--------- task-impls/src/lib.rs | 2 +- .../src/quorum_proposal_recv/handlers.rs | 50 +------- testing/src/view_generator.rs | 2 +- testing/tests/tests_1/test_success.rs | 12 +- .../tests_1/upgrade_task_with_proposal.rs | 2 +- .../tests/tests_1/upgrade_task_with_vote.rs | 2 +- 12 files changed, 94 insertions(+), 127 deletions(-) rename task-impls/src/{consensus2 => consensus}/handlers.rs (96%) rename task-impls/src/{consensus2 => consensus}/mod.rs (95%) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index eb63cb630b..577318befb 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -350,8 +350,6 @@ impl, V: Versions> SystemContext, { use hotshot_task_impls::{ - consensus2::Consensus2TaskState, quorum_proposal::QuorumProposalTaskState, + consensus::ConsensusTaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, }; handle.add_task(QuorumProposalTaskState::::create_from(handle).await); handle.add_task(QuorumVoteTaskState::::create_from(handle).await); handle.add_task(QuorumProposalRecvTaskState::::create_from(handle).await); - handle.add_task(Consensus2TaskState::::create_from(handle).await); + handle.add_task(ConsensusTaskState::::create_from(handle).await); } #[cfg(feature = "rewind")] diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 1946912cd9..50d08221c4 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -13,7 +13,7 @@ use async_compatibility_layer::art::async_spawn; use async_trait::async_trait; use chrono::Utc; use hotshot_task_impls::{ - builder::BuilderClient, consensus2::Consensus2TaskState, da::DaTaskState, + builder::BuilderClient, consensus::ConsensusTaskState, da::DaTaskState, quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, rewind::RewindTaskState, transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, @@ -299,7 +299,7 @@ impl, V: Versions> CreateTaskState #[async_trait] impl, V: Versions> CreateTaskState - for Consensus2TaskState + for ConsensusTaskState { async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus/handlers.rs similarity index 96% rename from task-impls/src/consensus2/handlers.rs rename to task-impls/src/consensus/handlers.rs index c766334471..cc0fa65a41 100644 --- a/task-impls/src/consensus2/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -21,9 +21,9 @@ use hotshot_types::{ }; use tracing::{debug, error, instrument}; -use super::Consensus2TaskState; +use super::ConsensusTaskState; use crate::{ - consensus2::Versions, + consensus::Versions, events::HotShotEvent, helpers::{broadcast_event, cancel_task}, vote_collection::handle_vote, @@ -38,7 +38,7 @@ pub(crate) async fn handle_quorum_vote_recv< vote: &QuorumVote, event: Arc>, sender: &Sender>>, - task_state: &mut Consensus2TaskState, + task_state: &mut ConsensusTaskState, ) -> Result<()> { // Are we the leader for this view? ensure!( @@ -73,7 +73,7 @@ pub(crate) async fn handle_timeout_vote_recv< vote: &TimeoutVote, event: Arc>, sender: &Sender>>, - task_state: &mut Consensus2TaskState, + task_state: &mut ConsensusTaskState, ) -> Result<()> { // Are we the leader for this view? ensure!( @@ -108,7 +108,7 @@ pub(crate) async fn handle_view_change< >( new_view_number: TYPES::Time, sender: &Sender>>, - task_state: &mut Consensus2TaskState, + task_state: &mut ConsensusTaskState, ) -> Result<()> { ensure!( new_view_number > task_state.cur_view, @@ -205,7 +205,7 @@ pub(crate) async fn handle_view_change< pub(crate) async fn handle_timeout, V: Versions>( view_number: TYPES::Time, sender: &Sender>>, - task_state: &mut Consensus2TaskState, + task_state: &mut ConsensusTaskState, ) -> Result<()> { ensure!( task_state.cur_view < view_number, diff --git a/task-impls/src/consensus2/mod.rs b/task-impls/src/consensus/mod.rs similarity index 95% rename from task-impls/src/consensus2/mod.rs rename to task-impls/src/consensus/mod.rs index 7b139842f9..82610978a6 100644 --- a/task-impls/src/consensus2/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -37,7 +37,7 @@ use crate::{events::HotShotEvent, vote_collection::VoteCollectorsMap}; mod handlers; /// Task state for the Consensus task. -pub struct Consensus2TaskState, V: Versions> { +pub struct ConsensusTaskState, V: Versions> { /// Our public key pub public_key: TYPES::SignatureKey, @@ -96,9 +96,9 @@ pub struct Consensus2TaskState, V: /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, } -impl, V: Versions> Consensus2TaskState { +impl, V: Versions> ConsensusTaskState { /// Handles a consensus event received on the event stream - #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, last_decided_view = *self.last_decided_view), name = "Consensus replica task", level = "error", target = "Consensus2TaskState")] + #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, last_decided_view = *self.last_decided_view), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -151,7 +151,7 @@ impl, V: Versions> Consensus2TaskS #[async_trait] impl, V: Versions> TaskState - for Consensus2TaskState + for ConsensusTaskState { type Event = HotShotEvent; diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 6390c360aa..e582c93e97 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -41,7 +41,10 @@ use hotshot_types::{ use tokio::task::JoinHandle; use tracing::{debug, info, instrument, warn}; -use crate::{events::HotShotEvent, request::REQUEST_TIMEOUT}; +use crate::{ + events::HotShotEvent, quorum_proposal_recv::QuorumProposalRecvTaskState, + request::REQUEST_TIMEOUT, +}; /// Trigger a request to the network for a proposal for a view and wait for the response or timeout. #[instrument(skip_all)] @@ -436,11 +439,8 @@ pub(crate) async fn parent_leaf_and_state( /// /// # Errors /// If any validation or state update fails. -/// TODO - This should just take the QuorumProposalRecv task state after -/// we merge the dependency tasks. -#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_lines)] -#[instrument(skip_all, fields(id = id, view = *proposal.data.view_number()))] +#[instrument(skip_all, fields(id = task_state.id, view = *proposal.data.view_number()))] pub async fn validate_proposal_safety_and_liveness< TYPES: NodeType, I: NodeImplementation, @@ -448,21 +448,15 @@ pub async fn validate_proposal_safety_and_liveness< >( proposal: Proposal>, parent_leaf: Leaf, - consensus: OuterConsensus, - decided_upgrade_certificate: Arc>>>, - quorum_membership: Arc, + task_state: &mut QuorumProposalRecvTaskState, event_stream: Sender>>, sender: TYPES::SignatureKey, - event_sender: Sender>, - id: u64, - upgrade_lock: UpgradeLock, - storage: Arc>, ) -> Result<()> { let view_number = proposal.data.view_number(); let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); ensure!( - proposed_leaf.parent_commitment() == parent_leaf.commit(&upgrade_lock).await, + proposed_leaf.parent_commitment() == parent_leaf.commit(&task_state.upgrade_lock).await, "Proposed leaf does not extend the parent leaf." ); @@ -471,19 +465,19 @@ pub async fn validate_proposal_safety_and_liveness< ); let view = View { view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(&upgrade_lock).await, + leaf: proposed_leaf.commit(&task_state.upgrade_lock).await, state, delta: None, // May be updated to `Some` in the vote task. }, }; { - let mut consensus_write = consensus.write().await; + let mut consensus_write = task_state.consensus.write().await; if let Err(e) = consensus_write.update_validated_state_map(view_number, view.clone()) { tracing::trace!("{e:?}"); } consensus_write - .update_saved_leaves(proposed_leaf.clone(), &upgrade_lock) + .update_saved_leaves(proposed_leaf.clone(), &task_state.upgrade_lock) .await; // Update our internal storage of the proposal. The proposal is valid, so @@ -502,14 +496,17 @@ pub async fn validate_proposal_safety_and_liveness< UpgradeCertificate::validate( &proposal.data.upgrade_certificate, - &quorum_membership, - &upgrade_lock, + &task_state.quorum_membership, + &task_state.upgrade_lock, ) .await?; // Validate that the upgrade certificate is re-attached, if we saw one on the parent proposed_leaf - .extends_upgrade(&parent_leaf, &decided_upgrade_certificate) + .extends_upgrade( + &parent_leaf, + &task_state.upgrade_lock.decided_upgrade_certificate, + ) .await?; let justify_qc = proposal.data.justify_qc.clone(); @@ -518,7 +515,7 @@ pub async fn validate_proposal_safety_and_liveness< // Liveness check. { - let read_consensus = consensus.read().await; + let read_consensus = task_state.consensus.read().await; let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); // Safety check. @@ -542,7 +539,7 @@ pub async fn validate_proposal_safety_and_liveness< view_number, event: EventType::Error { error: Arc::new(e) }, }, - &event_sender, + &task_state.output_event_stream, ) .await; } @@ -553,7 +550,12 @@ pub async fn validate_proposal_safety_and_liveness< // Update our persistent storage of the proposal. If we cannot store the proposal reutrn // and error so we don't vote - storage.write().await.append_proposal(&proposal).await?; + task_state + .storage + .write() + .await + .append_proposal(&proposal) + .await?; // We accept the proposal, notify the application layer broadcast_event( @@ -564,7 +566,7 @@ pub async fn validate_proposal_safety_and_liveness< sender, }, }, - &event_sender, + &task_state.output_event_stream, ) .await; @@ -587,23 +589,24 @@ pub async fn validate_proposal_safety_and_liveness< /// /// # Errors /// If any validation or view number check fails. -pub async fn validate_proposal_view_and_certs( +pub async fn validate_proposal_view_and_certs< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( proposal: &Proposal>, - cur_view: TYPES::Time, - quorum_membership: &Arc, - timeout_membership: &Arc, - upgrade_lock: &UpgradeLock, + task_state: &mut QuorumProposalRecvTaskState, ) -> Result<()> { let view = proposal.data.view_number(); ensure!( - view >= cur_view, + view >= task_state.cur_view, "Proposal is from an older view {:?}", proposal.data.clone() ); // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment proposal - .validate_signature(quorum_membership, upgrade_lock) + .validate_signature(&task_state.quorum_membership, &task_state.upgrade_lock) .await?; // Verify a timeout certificate OR a view sync certificate exists and is valid. @@ -623,7 +626,10 @@ pub async fn validate_proposal_view_and_certs( ); ensure!( timeout_cert - .is_valid_cert(timeout_membership.as_ref(), upgrade_lock) + .is_valid_cert( + task_state.timeout_membership.as_ref(), + &task_state.upgrade_lock + ) .await, "Timeout certificate for view {} was invalid", *view @@ -640,7 +646,10 @@ pub async fn validate_proposal_view_and_certs( // View sync certs must also be valid. ensure!( view_sync_cert - .is_valid_cert(quorum_membership.as_ref(), upgrade_lock) + .is_valid_cert( + task_state.quorum_membership.as_ref(), + &task_state.upgrade_lock + ) .await, "Invalid view sync finalize cert provided" ); @@ -652,8 +661,8 @@ pub async fn validate_proposal_view_and_certs( // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. UpgradeCertificate::validate( &proposal.data.upgrade_certificate, - quorum_membership, - upgrade_lock, + &task_state.quorum_membership, + &task_state.upgrade_lock, ) .await?; @@ -665,25 +674,19 @@ pub async fn validate_proposal_view_and_certs( /// /// # Errors /// Returns an [`anyhow::Error`] when the new view is not greater than the current view. -/// TODO: Remove args when we merge dependency tasks. -#[allow(clippy::too_many_arguments)] -pub(crate) async fn update_view( +pub(crate) async fn update_view, V: Versions>( new_view: TYPES::Time, event_stream: &Sender>>, - timeout: u64, - consensus: OuterConsensus, - cur_view: &mut TYPES::Time, - cur_view_time: &mut i64, - timeout_task: &mut JoinHandle<()>, - output_event_stream: &Sender>, - is_old_view_leader: bool, + task_state: &mut QuorumProposalRecvTaskState, ) -> Result<()> { ensure!( - new_view > *cur_view, + new_view > task_state.cur_view, "New view is not greater than our current view" ); - let old_view = *cur_view; + let is_old_view_leader = + task_state.quorum_membership.leader(task_state.cur_view) == task_state.public_key; + let old_view = task_state.cur_view; debug!("Updating view from {} to {}", *old_view, *new_view); @@ -691,10 +694,10 @@ pub(crate) async fn update_view( info!("Progress: entered view {:>6}", *new_view); } - *cur_view = new_view; + task_state.cur_view = new_view; // The next view is just the current view + 1 - let next_view = *cur_view + 1; + let next_view = task_state.cur_view + 1; futures::join! { broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream), @@ -705,7 +708,7 @@ pub(crate) async fn update_view( view_number: old_view, }, }, - output_event_stream, + &task_state.output_event_stream, ) }; @@ -715,7 +718,7 @@ pub(crate) async fn update_view( // Nuance: We timeout on the view + 1 here because that means that we have // not seen evidence to transition to this new view let view_number = next_view; - let timeout = Duration::from_millis(timeout); + let timeout = Duration::from_millis(task_state.timeout); async move { async_sleep(timeout).await; broadcast_event( @@ -727,30 +730,34 @@ pub(crate) async fn update_view( }); // cancel the old timeout task - cancel_task(std::mem::replace(timeout_task, new_timeout_task)).await; + cancel_task(std::mem::replace( + &mut task_state.timeout_task, + new_timeout_task, + )) + .await; - let consensus = consensus.upgradable_read().await; + let consensus = task_state.consensus.upgradable_read().await; consensus .metrics .current_view - .set(usize::try_from(cur_view.u64()).unwrap()); + .set(usize::try_from(task_state.cur_view.u64()).unwrap()); let new_view_time = Utc::now().timestamp(); if is_old_view_leader { #[allow(clippy::cast_precision_loss)] consensus .metrics .view_duration_as_leader - .add_point((new_view_time - *cur_view_time) as f64); + .add_point((new_view_time - task_state.cur_view_time) as f64); } - *cur_view_time = new_view_time; + task_state.cur_view_time = new_view_time; // Do the comparison before the subtraction to avoid potential overflow, since // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(cur_view.u64()).unwrap() + if usize::try_from(task_state.cur_view.u64()).unwrap() > usize::try_from(consensus.last_decided_view().u64()).unwrap() { consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(cur_view.u64()).unwrap() + usize::try_from(task_state.cur_view.u64()).unwrap() - usize::try_from(consensus.last_decided_view().u64()).unwrap(), ); } diff --git a/task-impls/src/lib.rs b/task-impls/src/lib.rs index 754d2a972e..1f55bd9146 100644 --- a/task-impls/src/lib.rs +++ b/task-impls/src/lib.rs @@ -8,7 +8,7 @@ //! consensus in an event driven way /// The task which implements the core state logic of consensus. -pub mod consensus2; +pub mod consensus; /// The task which handles the logic for the quorum vote. pub mod quorum_vote; diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 37b7ba5d12..c268c14b0e 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -93,20 +93,7 @@ async fn validate_proposal_liveness( - view_number, - event_sender, - task_state.timeout, - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - &mut task_state.cur_view, - &mut task_state.cur_view_time, - &mut task_state.timeout_task, - &task_state.output_event_stream, - task_state.quorum_membership.leader(cur_view) == task_state.public_key, - ) - .await - { + if let Err(e) = update_view::(view_number, event_sender, task_state).await { debug!("Liveness Branch - Failed to update view; error = {e:#}"); } @@ -138,17 +125,10 @@ pub(crate) async fn handle_quorum_proposal_recv< task_state: &mut QuorumProposalRecvTaskState, ) -> Result<()> { let quorum_proposal_sender_key = quorum_proposal_sender_key.clone(); - let cur_view = task_state.cur_view; - validate_proposal_view_and_certs( - proposal, - task_state.cur_view, - &task_state.quorum_membership, - &task_state.timeout_membership, - &task_state.upgrade_lock, - ) - .await - .context("Failed to validate proposal view or attached certs")?; + validate_proposal_view_and_certs(proposal, task_state) + .await + .context("Failed to validate proposal view or attached certs")?; let view_number = proposal.data.view_number(); let justify_qc = proposal.data.justify_qc.clone(); @@ -250,32 +230,14 @@ pub(crate) async fn handle_quorum_proposal_recv< validate_proposal_safety_and_liveness::( proposal.clone(), parent_leaf, - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), - Arc::clone(&task_state.quorum_membership), + task_state, event_sender.clone(), quorum_proposal_sender_key, - task_state.output_event_stream.clone(), - task_state.id, - task_state.upgrade_lock.clone(), - Arc::clone(&task_state.storage), ) .await?; // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here - if let Err(e) = update_view::( - view_number, - event_sender, - task_state.timeout, - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - &mut task_state.cur_view, - &mut task_state.cur_view_time, - &mut task_state.timeout_task, - &task_state.output_event_stream, - task_state.quorum_membership.leader(cur_view) == task_state.public_key, - ) - .await - { + if let Err(e) = update_view::(view_number, event_sender, task_state).await { debug!("Full Branch - Failed to update view; error = {e:#}"); } diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index e574859669..315ac2d5e8 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -63,7 +63,7 @@ pub struct TestView { pub da_certificate: DaCertificate, pub transactions: Vec, upgrade_data: Option>, - pub formed_upgrade_certificate: Option>, + formed_upgrade_certificate: Option>, view_sync_finalize_data: Option>, timeout_cert_data: Option>, upgrade_lock: UpgradeLock, diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index a2ee5f1572..853f823278 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -6,12 +6,12 @@ use std::time::Duration; -use hotshot_example_types::node_types::{ - Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestTypes, - TestTypesRandomizedLeader, TestVersions, -}; -use hotshot_example_types::testable_delay::{ - DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay, +use hotshot_example_types::{ + node_types::{ + Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestTypes, + TestTypesRandomizedLeader, TestVersions, + }, + testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; use hotshot_macros::cross_tests; use hotshot_testing::{ diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 74ef7800e1..5bf95997c0 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -17,7 +17,7 @@ use hotshot_example_types::{ }; use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ - consensus2::Consensus2TaskState, events::HotShotEvent::*, + consensus::ConsensusTaskState, events::HotShotEvent::*, quorum_proposal::QuorumProposalTaskState, upgrade::UpgradeTaskState, }; use hotshot_testing::{ diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index d7bdb28dad..88112683a5 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -17,7 +17,7 @@ use hotshot_example_types::{ }; use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ - consensus2::Consensus2TaskState, events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState, + consensus::ConsensusTaskState, events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState, upgrade::UpgradeTaskState, }; use hotshot_testing::{ From a4635a41228ef056693a705a78fd1d110603a2cb Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Tue, 15 Oct 2024 19:12:46 -0600 Subject: [PATCH 1250/1393] rename (#3763) --- task-impls/src/helpers.rs | 6 +++--- task-impls/src/quorum_proposal/mod.rs | 2 +- task-impls/src/quorum_vote/mod.rs | 6 +++--- task-impls/src/view_sync.rs | 22 +++++++++++----------- types/src/data.rs | 4 ++-- types/src/simple_certificate.rs | 6 +++--- types/src/simple_vote.rs | 2 +- types/src/vote.rs | 6 +++--- 8 files changed, 27 insertions(+), 27 deletions(-) diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index e582c93e97..5df8200f16 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -399,12 +399,12 @@ pub(crate) async fn parent_leaf_and_state( format!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") )?; - if leaf_commitment != consensus_reader.high_qc().date().leaf_commit { + if leaf_commitment != consensus_reader.high_qc().data().leaf_commit { // NOTE: This happens on the genesis block debug!( "They don't equal: {:?} {:?}", leaf_commitment, - consensus_reader.high_qc().date().leaf_commit + consensus_reader.high_qc().data().leaf_commit ); } @@ -620,7 +620,7 @@ pub async fn validate_proposal_view_and_certs< match received_proposal_cert { ViewChangeEvidence::Timeout(timeout_cert) => { ensure!( - timeout_cert.date().view == view - 1, + timeout_cert.data().view == view - 1, "Timeout certificate for view {} was not for the immediately preceding view", *view ); diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 0d845f2070..2c12b83b28 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -426,7 +426,7 @@ impl, V: Versions> { warn!( "View Sync Finalize certificate {:?} was invalid", - certificate.date() + certificate.data() ); return; } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index e7dd9c1699..ed0d61d4e9 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -104,7 +104,7 @@ impl + 'static, V: Versions> .read() .await .saved_leaves() - .get(&justify_qc.date().leaf_commit) + .get(&justify_qc.data().leaf_commit) .cloned(); maybe_parent = match maybe_parent { Some(p) => Some(p), @@ -123,7 +123,7 @@ impl + 'static, V: Versions> }; let parent = maybe_parent.context(format!( "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.date().leaf_commit, + justify_qc.data().leaf_commit, proposed_leaf.view_number(), ))?; let consensus_reader = self.consensus.read().await; @@ -295,7 +295,7 @@ impl + 'static, V: Versions> Handl leaf = Some(proposed_leaf); } HotShotEvent::DaCertificateValidated(cert) => { - let cert_payload_comm = cert.date().payload_commit; + let cert_payload_comm = cert.data().payload_commit; if let Some(comm) = payload_commitment { if cert_payload_comm != comm { error!("DAC has inconsistent payload commitment with quorum proposal or VID."); diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 630907c0ac..cc06ed170d 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -505,7 +505,7 @@ impl, V: Versions> .is_valid_cert(self.membership.as_ref(), &self.upgrade_lock) .await { - error!("Not valid view sync cert! {:?}", certificate.date()); + error!("Not valid view sync cert! {:?}", certificate.data()); return None; } @@ -516,13 +516,13 @@ impl, V: Versions> return Some(HotShotTaskCompleted); } - if certificate.date().relay > self.relay { - self.relay = certificate.date().relay; + if certificate.data().relay > self.relay { + self.relay = certificate.data().relay; } let Ok(vote) = ViewSyncCommitVote::::create_signed_vote( ViewSyncCommitData { - relay: certificate.date().relay, + relay: certificate.data().relay, round: self.next_view, }, self.next_view, @@ -587,7 +587,7 @@ impl, V: Versions> .is_valid_cert(self.membership.as_ref(), &self.upgrade_lock) .await { - error!("Not valid view sync cert! {:?}", certificate.date()); + error!("Not valid view sync cert! {:?}", certificate.data()); return None; } @@ -598,13 +598,13 @@ impl, V: Versions> return Some(HotShotTaskCompleted); } - if certificate.date().relay > self.relay { - self.relay = certificate.date().relay; + if certificate.data().relay > self.relay { + self.relay = certificate.data().relay; } let Ok(vote) = ViewSyncFinalizeVote::::create_signed_vote( ViewSyncFinalizeData { - relay: certificate.date().relay, + relay: certificate.data().relay, round: self.next_view, }, self.next_view, @@ -679,7 +679,7 @@ impl, V: Versions> .is_valid_cert(self.membership.as_ref(), &self.upgrade_lock) .await { - error!("Not valid view sync cert! {:?}", certificate.date()); + error!("Not valid view sync cert! {:?}", certificate.data()); return None; } @@ -690,8 +690,8 @@ impl, V: Versions> return Some(HotShotTaskCompleted); } - if certificate.date().relay > self.relay { - self.relay = certificate.date().relay; + if certificate.data().relay > self.relay { + self.relay = certificate.data().relay; } if let Some(timeout_task) = self.timeout_task.take() { diff --git a/types/src/data.rs b/types/src/data.rs index d05760bf7a..e70af8c294 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -241,7 +241,7 @@ impl ViewChangeEvidence { /// Check that the given ViewChangeEvidence is relevant to the current view. pub fn is_valid_for_view(&self, view: &TYPES::Time) -> bool { match self { - ViewChangeEvidence::Timeout(timeout_cert) => timeout_cert.date().view == *view - 1, + ViewChangeEvidence::Timeout(timeout_cert) => timeout_cert.data().view == *view - 1, ViewChangeEvidence::ViewSync(view_sync_cert) => view_sync_cert.view_number == *view, } } @@ -769,7 +769,7 @@ impl Leaf { Leaf { view_number: *view_number, justify_qc: justify_qc.clone(), - parent_commitment: justify_qc.date().leaf_commit, + parent_commitment: justify_qc.data().leaf_commit, block_header: block_header.clone(), upgrade_certificate: upgrade_certificate.clone(), block_payload: None, diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index f948e3a20b..6e9e7bbbfb 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -157,7 +157,7 @@ impl> membership.stake_table(), U256::from(Self::threshold(membership)), ); - let Ok(commit) = self.date_commitment(upgrade_lock).await else { + let Ok(commit) = self.data_commitment(upgrade_lock).await else { return false; }; ::check( @@ -169,10 +169,10 @@ impl> fn threshold>(membership: &MEMBERSHIP) -> u64 { THRESHOLD::threshold(membership) } - fn date(&self) -> &Self::Voteable { + fn data(&self) -> &Self::Voteable { &self.data } - async fn date_commitment( + async fn data_commitment( &self, upgrade_lock: &UpgradeLock, ) -> Result>> { diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index a86452b9a9..45e131f756 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -147,7 +147,7 @@ impl Vote for SimpleVote Commitment { + fn data_commitment(&self) -> Commitment { self.data.commit() } } diff --git a/types/src/vote.rs b/types/src/vote.rs index 0123ba0085..1376aa0d03 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -39,7 +39,7 @@ pub trait Vote: HasViewNumber { /// Gets the data which was voted on by this vote fn date(&self) -> &Self::Commitment; /// Gets the Data commitment of the vote - fn date_commitment(&self) -> Commitment; + fn data_commitment(&self) -> Commitment; /// Gets the public signature key of the votes creator/sender fn signing_key(&self) -> TYPES::SignatureKey; @@ -81,9 +81,9 @@ pub trait Certificate: HasViewNumber { // TODO: Make this a static ratio of the total stake of `Membership` fn threshold>(membership: &MEMBERSHIP) -> u64; /// Get the commitment which was voted on - fn date(&self) -> &Self::Voteable; + fn data(&self) -> &Self::Voteable; /// Get the vote commitment which the votes commit to - fn date_commitment( + fn data_commitment( &self, upgrade_lock: &UpgradeLock, ) -> impl std::future::Future>>>; From daf0ae71b08b576c1d2ebdde90bfb139a5cf4a94 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 16 Oct 2024 17:19:35 +0200 Subject: [PATCH 1251/1393] Allow injecting custom tasks into testing harness (#3764) --- testing/src/test_builder.rs | 17 +++++++++++++++- testing/src/test_launcher.rs | 3 +++ testing/src/test_runner.rs | 25 +++++++++++++++-------- testing/src/test_task.rs | 39 +++++++++++++++++++++++++++++++++++- 4 files changed, 74 insertions(+), 10 deletions(-) diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index ea14c428be..2a7414475f 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -33,6 +33,7 @@ use super::{ use crate::{ spinning_task::SpinningTaskDescription, test_launcher::{Network, ResourceGenerators, TestLauncher}, + test_task::TestTaskStateSeed, view_sync_task::ViewSyncTaskDescription, }; @@ -444,8 +445,21 @@ where /// a [`TestLauncher`] that can be used to launch the test. /// # Panics /// if some of the the configuration values are zero - #[must_use] pub fn gen_launcher(self, node_id: u64) -> TestLauncher { + self.gen_launcher_with_tasks(node_id, vec![]) + } + + /// turn a description of a test (e.g. a [`TestDescription`]) into + /// a [`TestLauncher`] that can be used to launch the test, with + /// additional testing tasks to run in test harness + /// # Panics + /// if some of the the configuration values are zero + #[must_use] + pub fn gen_launcher_with_tasks( + self, + node_id: u64, + additional_test_tasks: Vec>>, + ) -> TestLauncher { let TestDescription { num_nodes_with_stake, num_bootstrap_nodes, @@ -561,6 +575,7 @@ where }), }, metadata: self, + additional_test_tasks, } .modify_default_config(mod_config) } diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 7f197fd1ea..a6c5730e1c 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -20,6 +20,7 @@ use hotshot_types::{ }; use super::{test_builder::TestDescription, test_runner::TestRunner}; +use crate::test_task::TestTaskStateSeed; /// A type alias to help readability pub type Network = Arc<>::Network>; @@ -45,6 +46,8 @@ pub struct TestLauncher, V pub resource_generator: ResourceGenerators, /// metadata used for tasks pub metadata: TestDescription, + /// any additional test tasks to run + pub additional_test_tasks: Vec>>, } impl, V: Versions> TestLauncher { diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 16a649060e..221f43ced6 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -133,7 +133,7 @@ where } let TestRunner { - ref launcher, + launcher, nodes, solver_server, late_start, @@ -193,9 +193,9 @@ where &TestInstanceState::default(), ) .await, - async_delay_config: self.launcher.metadata.async_delay_config, + async_delay_config: launcher.metadata.async_delay_config, restart_contexts: HashMap::new(), - channel_generator: self.launcher.resource_generator.channel_generator, + channel_generator: launcher.resource_generator.channel_generator, }; let spinning_task = TestTask::>::new( spinning_task_state, @@ -206,16 +206,16 @@ where let overall_safety_task_state = OverallSafetyTask { handles: Arc::clone(&handles), ctx: RoundCtx::default(), - properties: self.launcher.metadata.overall_safety_properties.clone(), + properties: launcher.metadata.overall_safety_properties.clone(), error: None, test_sender, }; let consistency_task_state = ConsistencyTask { consensus_leaves: BTreeMap::new(), - safety_properties: self.launcher.metadata.overall_safety_properties, - ensure_upgrade: self.launcher.metadata.upgrade_view.is_some(), - validate_transactions: self.launcher.metadata.validate_transactions, + safety_properties: launcher.metadata.overall_safety_properties, + ensure_upgrade: launcher.metadata.upgrade_view.is_some(), + validate_transactions: launcher.metadata.validate_transactions, _pd: PhantomData, }; @@ -234,7 +234,7 @@ where // add view sync task let view_sync_task_state = ViewSyncTask { hit_view_sync: HashSet::new(), - description: self.launcher.metadata.view_sync_properties, + description: launcher.metadata.view_sync_properties, _pd: PhantomData, }; @@ -260,6 +260,15 @@ where drop(nodes); + for seed in launcher.additional_test_tasks { + let task = TestTask::new( + seed.into_state(Arc::clone(&handles)).await, + event_rxs.clone(), + test_receiver.clone(), + ); + task_futs.push(task.run()); + } + task_futs.push(overall_safety_task.run()); task_futs.push(consistency_task.run()); task_futs.push(view_sync_task.run()); diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index 4fae50c233..9df6a23d0d 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -9,11 +9,15 @@ use std::{sync::Arc, time::Duration}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; +use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::{spawn, JoinHandle}; use async_trait::async_trait; use futures::future::select_all; -use hotshot::types::{Event, Message}; +use hotshot::{ + traits::TestableNodeImplementation, + types::{Event, Message}, +}; use hotshot_task_impls::{events::HotShotEvent, network::NetworkMessageTaskState}; use hotshot_types::{ message::UpgradeLock, @@ -26,6 +30,8 @@ use hotshot_types::{ use tokio::task::{spawn, JoinHandle}; use tracing::error; +use crate::test_runner::Node; + /// enum describing how the tasks completed pub enum TestResult { /// the test task passed @@ -47,6 +53,37 @@ pub trait TestTaskState: Send { async fn check(&self) -> TestResult; } +/// Type alias for type-erased [`TestTaskState`] to be used for +/// dynamic dispatch +pub type AnyTestTaskState = + Box> + Send + Sync>; + +#[async_trait] +impl TestTaskState for AnyTestTaskState { + type Event = Event; + + async fn handle_event(&mut self, event: (Self::Event, usize)) -> Result<()> { + (**self).handle_event(event).await + } + + async fn check(&self) -> TestResult { + (**self).check().await + } +} + +#[async_trait] +pub trait TestTaskStateSeed: Send +where + TYPES: NodeType, + I: TestableNodeImplementation, + V: Versions, +{ + async fn into_state( + self: Box, + handles: Arc>>>, + ) -> AnyTestTaskState; +} + /// A basic task which loops waiting for events to come from `event_receiver` /// and then handles them using it's state /// It sends events to other `Task`s through `event_sender` From 335a4508d8afe8bd4c65d5cb39a9318cf33cc48e Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 17 Oct 2024 14:47:42 -0600 Subject: [PATCH 1252/1393] [Networking] Add a hook for application messages (#3759) * add a hook for application messages:wq * revert logging change to memory network * fix tokio * missed one * remove unused import * Change recipients from `BTreeSet` to `Vec` --- hotshot/src/tasks/mod.rs | 1 + .../src/traits/networking/combined_network.rs | 4 +- .../src/traits/networking/libp2p_network.rs | 2 +- .../src/traits/networking/memory_network.rs | 3 +- .../src/traits/networking/push_cdn_network.rs | 4 +- hotshot/src/types/handle.rs | 46 +++++++- task-impls/src/network.rs | 28 +++-- testing/src/helpers.rs | 36 +++++- testing/src/test_task.rs | 2 + testing/tests/tests_1/network_task.rs | 105 ++++++++++++++++++ types/src/event.rs | 8 +- types/src/message.rs | 16 ++- types/src/traits/network.rs | 17 +-- 13 files changed, 226 insertions(+), 46 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 3bee58b19f..8433ed6121 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -107,6 +107,7 @@ pub fn add_network_message_task< let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { internal_event_stream: handle.internal_event_stream.0.clone(), external_event_stream: handle.output_event_stream.0.clone(), + public_key: handle.public_key().clone(), }; let upgrade_lock = handle.hotshot.upgrade_lock.clone(); diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 297970966f..2bb7eba49c 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -7,7 +7,7 @@ //! Networking Implementation that has a primary and a fallback network. If the primary //! Errors we will use the backup to send or receive use std::{ - collections::{hash_map::DefaultHasher, BTreeMap, BTreeSet, HashMap}, + collections::{hash_map::DefaultHasher, BTreeMap, HashMap}, future::Future, hash::{Hash, Hasher}, num::NonZeroUsize, @@ -391,7 +391,7 @@ impl ConnectedNetwork for CombinedNetworks async fn da_broadcast_message( &self, message: Vec, - recipients: BTreeSet, + recipients: Vec, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { let primary = self.primary().clone(); diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 936f905318..32551fba72 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -885,7 +885,7 @@ impl ConnectedNetwork for Libp2pNetwork { async fn da_broadcast_message( &self, message: Vec, - recipients: BTreeSet, + recipients: Vec, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { // If we're not ready, return an error diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index e3993073c6..54fcdd9d1b 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -11,7 +11,6 @@ use core::time::Duration; use std::{ - collections::BTreeSet, fmt::Debug, sync::{ atomic::{AtomicUsize, Ordering}, @@ -305,7 +304,7 @@ impl ConnectedNetwork for MemoryNetwork { async fn da_broadcast_message( &self, message: Vec, - recipients: BTreeSet, + recipients: Vec, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { // Iterate over all topics, compare to recipients, and get the `Topic` diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 94b3f8d30c..dc38f979cf 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -6,7 +6,7 @@ #[cfg(feature = "hotshot-testing")] use std::sync::atomic::{AtomicBool, Ordering}; -use std::{collections::BTreeSet, marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc}; #[cfg(feature = "hotshot-testing")] use std::{path::Path, time::Duration}; @@ -487,7 +487,7 @@ impl ConnectedNetwork for PushCdnNetwork { async fn da_broadcast_message( &self, message: Vec, - _recipients: BTreeSet, + _recipients: Vec, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { self.broadcast_message(message, Topic::Da) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 8f5a79824a..5945e31ceb 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -22,11 +22,14 @@ use hotshot_types::{ consensus::Consensus, data::{Leaf, QuorumProposal}, error::HotShotError, - message::Proposal, + message::{Message, MessageKind, Proposal, RecipientList}, request_response::ProposalRequestPayload, traits::{ - consensus_api::ConsensusApi, election::Membership, network::ConnectedNetwork, - node_implementation::NodeType, signature_key::SignatureKey, + consensus_api::ConsensusApi, + election::Membership, + network::{BroadcastDelay, ConnectedNetwork, Topic}, + node_implementation::NodeType, + signature_key::SignatureKey, }, vote::HasViewNumber, }; @@ -88,6 +91,43 @@ impl + 'static, V: Versions> self.output_event_stream.1.activate_cloned() } + /// Message other participents with a serialized message from the application + /// Receivers of this message will get an `Event::ExternalMessageReceived` via + /// the event stream. + /// + /// # Errors + /// Errors if serializing the request fails, or the request fails to be sent + pub async fn send_external_message( + &self, + msg: Vec, + recipients: RecipientList, + ) -> Result<()> { + let message = Message { + sender: self.public_key().clone(), + kind: MessageKind::External(msg), + }; + let serialized_message = self.hotshot.upgrade_lock.serialize(&message).await?; + + match recipients { + RecipientList::Broadcast => { + self.network + .broadcast_message(serialized_message, Topic::Global, BroadcastDelay::None) + .await?; + } + RecipientList::Direct(recipient) => { + self.network + .direct_message(serialized_message, recipient) + .await?; + } + RecipientList::Many(recipients) => { + self.network + .da_broadcast_message(serialized_message, recipients, BroadcastDelay::None) + .await?; + } + } + Ok(()) + } + /// Request a proposal from the all other nodes. Will block until some node /// returns a valid proposal with the requested commitment. If nobody has the /// proposal this will block forever diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index b841d6a15c..ca5f6c7aeb 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -46,6 +46,9 @@ pub struct NetworkMessageTaskState { /// Sender to send external events this task generates to the event stream pub external_event_stream: Sender>, + + /// This nodes public key + pub public_key: TYPES::SignatureKey, } impl NetworkMessageTaskState { @@ -160,11 +163,14 @@ impl NetworkMessageTaskState { // Handle external messages MessageKind::External(data) => { + if sender == self.public_key { + return; + } // Send the external message to the external event stream so it can be processed broadcast_event( Event { view_number: TYPES::Time::new(1), - event: EventType::ExternalMessageReceived(data), + event: EventType::ExternalMessageReceived { sender, data }, }, &self.external_event_stream, ) @@ -571,20 +577,12 @@ impl< .await } TransmitType::DaCommitteeBroadcast => { - net.da_broadcast_message(serialized_message, da_committee, broadcast_delay) - .await - } - TransmitType::DaCommitteeAndLeaderBroadcast(recipient) => { - if let Err(e) = net - .direct_message(serialized_message.clone(), recipient) - .await - { - warn!("Failed to send message: {e:?}"); - } - - // Otherwise, send the next message. - net.da_broadcast_message(serialized_message, da_committee, broadcast_delay) - .await + net.da_broadcast_message( + serialized_message, + da_committee.iter().cloned().collect(), + broadcast_delay, + ) + .await } }; diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index b370f8d279..54eab9d4fa 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -40,12 +40,12 @@ use hotshot_types::{ utils::{View, ViewInner}, vid::{vid_scheme, VidCommitment, VidProposal, VidSchemeType}, vote::{Certificate, HasViewNumber, Vote}, + ValidatorConfig, }; use jf_vid::VidScheme; use serde::Serialize; -use crate::test_builder::TestDescription; - +use crate::{test_builder::TestDescription, test_launcher::TestLauncher}; /// create the [`SystemContextHandle`] from a node id /// # Panics /// if cannot create a [`HotShotInitializer`] @@ -67,18 +67,46 @@ pub async fn build_system_handle< let builder: TestDescription = TestDescription::default_multiple_rounds(); let launcher = builder.gen_launcher(node_id); + build_system_handle_from_launcher(node_id, &launcher).await +} +/// create the [`SystemContextHandle`] from a node id and `TestLauncher` +/// # Panics +/// if cannot create a [`HotShotInitializer`] +pub async fn build_system_handle_from_launcher< + TYPES: NodeType, + I: NodeImplementation< + TYPES, + Storage = TestStorage, + AuctionResultsProvider = TestAuctionResultsProvider, + > + TestableNodeImplementation, + V: Versions, +>( + node_id: u64, + launcher: &TestLauncher, +) -> ( + SystemContextHandle, + Sender>>, + Receiver>>, +) { let network = (launcher.resource_generator.channel_generator)(node_id).await; let storage = (launcher.resource_generator.storage)(node_id); let marketplace_config = (launcher.resource_generator.marketplace_config)(node_id); - let config = launcher.resource_generator.config.clone(); + let mut config = launcher.resource_generator.config.clone(); let initializer = HotShotInitializer::::from_genesis::(TestInstanceState::new( - launcher.metadata.async_delay_config, + launcher.metadata.async_delay_config.clone(), )) .await .unwrap(); + // See whether or not we should be DA + let is_da = node_id < config.da_staked_committee_size as u64; + + // We assign node's public key and stake value rather than read from config file since it's a test + let validator_config = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, is_da); + config.my_own_validator_config = validator_config; let private_key = config.my_own_validator_config.private_key.clone(); let public_key = config.my_own_validator_config.public_key.clone(); diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index 9df6a23d0d..8d685914d3 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -161,11 +161,13 @@ pub async fn add_network_message_test_task< external_event_stream: Sender>, upgrade_lock: UpgradeLock, channel: Arc, + public_key: TYPES::SignatureKey, ) -> JoinHandle<()> { let net = Arc::clone(&channel); let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { internal_event_stream: internal_event_stream.clone(), external_event_stream: external_event_stream.clone(), + public_key, }; let network = Arc::clone(&net); diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 03a855a05f..272d132ae3 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -84,6 +84,7 @@ async fn test_network_task() { out_tx_external.clone(), upgrade_lock, network.clone(), + public_key, ) .await; @@ -104,6 +105,109 @@ async fn test_network_task() { )); } +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_network_external_mnessages() { + use hotshot::types::EventType; + use hotshot_testing::helpers::build_system_handle_from_launcher; + use hotshot_types::message::RecipientList; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + + let builder: TestDescription = + TestDescription::default_multiple_rounds(); + + let launcher = builder.gen_launcher(0); + + let mut handles = vec![]; + let mut event_streams = vec![]; + for i in 0..launcher.metadata.num_nodes_with_stake { + let handle = build_system_handle_from_launcher::( + i.try_into().unwrap(), + &launcher, + ) + .await + .0; + event_streams.push(handle.event_stream_known_impl()); + handles.push(handle); + } + + // Send a message from 1 -> 2 + handles[1] + .send_external_message(vec![1, 2], RecipientList::Direct(handles[2].public_key())) + .await + .unwrap(); + let event = async_compatibility_layer::art::async_timeout( + Duration::from_millis(100), + event_streams[2].recv(), + ) + .await + .unwrap() + .unwrap() + .event; + + // check that 2 received the message + assert!(matches!( + event, + EventType::ExternalMessageReceived { + sender, + data, + } if sender == handles[1].public_key() && data == vec![1, 2] + )); + + // Send a message from 2 -> 1 + handles[2] + .send_external_message(vec![2, 1], RecipientList::Direct(handles[1].public_key())) + .await + .unwrap(); + let event = async_compatibility_layer::art::async_timeout( + Duration::from_millis(100), + event_streams[1].recv(), + ) + .await + .unwrap() + .unwrap() + .event; + + // check that 1 received the message + assert!(matches!( + event, + EventType::ExternalMessageReceived { + sender, + data, + } if sender == handles[2].public_key() && data == vec![2,1] + )); + + // Check broadcast works + handles[0] + .send_external_message(vec![0, 0, 0], RecipientList::Broadcast) + .await + .unwrap(); + // All other nodes get the broadcast + for stream in event_streams.iter_mut().skip(1) { + let event = async_compatibility_layer::art::async_timeout( + Duration::from_millis(100), + stream.recv(), + ) + .await + .unwrap() + .unwrap() + .event; + assert!(matches!( + event, + EventType::ExternalMessageReceived { + sender, + data, + } if sender == handles[0].public_key() && data == vec![0,0,0] + )); + } + // No event on 0 even after short sleep + async_compatibility_layer::art::async_sleep(Duration::from_millis(2)).await; + assert!(event_streams[0].is_empty()); +} + #[cfg(test)] #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] @@ -161,6 +265,7 @@ async fn test_network_storage_fail() { out_tx_external.clone(), upgrade_lock, network.clone(), + public_key, ) .await; diff --git a/types/src/event.rs b/types/src/event.rs index 84f052d651..e71d0c8196 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -92,6 +92,7 @@ pub mod error_adaptor { Ok(Arc::new(HotShotError::FailedToDeserialize(str))) } } + /// The type and contents of a status event emitted by a `HotShot` instance /// /// This enum does not include metadata shared among all variants, such as the stage and view @@ -171,7 +172,12 @@ pub enum EventType { }, /// A message destined for external listeners was received - ExternalMessageReceived(Vec), + ExternalMessageReceived { + /// Public Key of the message sender + sender: TYPES::SignatureKey, + /// Serialized data of the message + data: Vec, + }, } #[derive(Debug, Serialize, Deserialize, Clone, Copy)] /// A list of actions that we track for nodes diff --git a/types/src/message.rs b/types/src/message.rs index 407845b001..ab72986652 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -9,7 +9,11 @@ //! This module contains types used to represent the various types of messages that //! `HotShot` nodes can send among themselves. -use std::{fmt, fmt::Debug, marker::PhantomData, sync::Arc}; +use std::{ + fmt::{self, Debug}, + marker::PhantomData, + sync::Arc, +}; use anyhow::{bail, ensure, Context, Result}; use async_lock::RwLock; @@ -117,6 +121,16 @@ pub enum MessageKind { External(Vec), } +/// List of keys to send a message to, or broadcast to all known keys +pub enum RecipientList { + /// Broadcast to all + Broadcast, + /// Send a message directly to a key + Direct(K), + /// Send a message directly to many keys + Many(Vec), +} + impl MessageKind { // Can't implement `From` directly due to potential conflict with // `From`. diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 40a55423cc..97851e6963 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -17,7 +17,7 @@ use thiserror::Error; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use std::{ - collections::{BTreeSet, HashMap}, + collections::HashMap, fmt::{Debug, Display}, hash::Hash, pin::Pin, @@ -54,8 +54,6 @@ pub enum TransmitType { Broadcast, /// broadcast to DA committee DaCommitteeBroadcast, - /// broadcast to the leader and the DA - DaCommitteeAndLeaderBroadcast(TYPES::SignatureKey), } /// Errors that can occur in the network @@ -122,17 +120,6 @@ pub enum NetworkError { LookupError(String), } -/// common traits we would like our network messages to implement -pub trait NetworkMsg: - Serialize + for<'a> Deserialize<'a> + Clone + Sync + Send + Debug + 'static -{ -} - -impl NetworkMsg for T where - T: Serialize + for<'a> Deserialize<'a> + Clone + Sync + Send + Debug + 'static -{ -} - /// Trait that bundles what we need from a request ID pub trait Id: Eq + PartialEq + Hash {} @@ -230,7 +217,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st async fn da_broadcast_message( &self, message: Vec, - recipients: BTreeSet, + recipients: Vec, broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError>; From b930cc1e5f2064402dd7a91a2ce21eb5935c1959 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 17 Oct 2024 23:45:00 +0200 Subject: [PATCH 1253/1393] Fix thread::sleep used in async context (#3768) --- fakeapi/Cargo.toml | 1 + fakeapi/src/fake_solver.rs | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/fakeapi/Cargo.toml b/fakeapi/Cargo.toml index 764a3077f0..315af7f949 100644 --- a/fakeapi/Cargo.toml +++ b/fakeapi/Cargo.toml @@ -12,6 +12,7 @@ repository.workspace = true toml = { workspace = true } tide-disco = { workspace = true } anyhow = { workspace = true } +async-compatibility-layer = { workspace = true } hotshot-types = { path = "../types" } vbs = { workspace = true } serde = { workspace = true } diff --git a/fakeapi/src/fake_solver.rs b/fakeapi/src/fake_solver.rs index 0a09b7648b..3c4beabd03 100644 --- a/fakeapi/src/fake_solver.rs +++ b/fakeapi/src/fake_solver.rs @@ -1,9 +1,10 @@ use std::{ io::{self, ErrorKind}, - thread, time, + time, }; use anyhow::Result; +use async_compatibility_layer::art::async_sleep; use async_lock::RwLock; use futures::FutureExt; use hotshot_example_types::auction_results_provider_types::TestAuctionResult; @@ -83,7 +84,7 @@ impl FakeSolverState { /// /// # Errors /// Returns an error if the `should_fault` method is `Some`. - fn dump_builders(&self) -> Result { + async fn dump_builders(&self) -> Result { if let Some(fault) = self.should_fault() { match fault { FakeSolverFaultType::InternalServerFault => { @@ -94,7 +95,7 @@ impl FakeSolverState { } FakeSolverFaultType::TimeoutFault => { // Sleep for the preconfigured 1 second timeout interval - thread::sleep(SOLVER_MAX_TIMEOUT_S); + async_sleep(SOLVER_MAX_TIMEOUT_S).await; } } } @@ -130,7 +131,7 @@ impl FakeSolverApi for FakeSolverState { &self, _view_number: u64, ) -> Result { - self.dump_builders() + self.dump_builders().await } /// Get the auction results with a valid signature. @@ -139,7 +140,7 @@ impl FakeSolverApi for FakeSolverState { _view_number: u64, _signature: &::PureAssembledSignatureType, ) -> Result { - self.dump_builders() + self.dump_builders().await } } From 46c3c7aecc7ef8f8d22665b2516416814deaf11b Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Fri, 18 Oct 2024 20:07:01 +0200 Subject: [PATCH 1254/1393] Add epoch parameter in `Membership` trait's methods (#3751) * Add epoch parameter in `Membership` trait's methods * Fixes for dependency-tasks tests * Fix possible deadlock * Add epoch_height to config file * Rename `ViewTime` and `EpochTime` to `View` and `Epoch` --- .../src/auction_results_provider_types.rs | 2 +- example-types/src/node_types.rs | 27 +-- example-types/src/storage_types.rs | 18 +- examples/infra/mod.rs | 4 +- hotshot/src/lib.rs | 36 ++-- hotshot/src/tasks/mod.rs | 3 +- hotshot/src/tasks/task_state.rs | 10 +- .../traits/election/randomized_committee.rs | 22 +- .../src/traits/election/static_committee.rs | 22 +- .../static_committee_leader_two_views.rs | 22 +- .../src/traits/networking/combined_network.rs | 7 +- .../src/traits/networking/libp2p_network.rs | 7 +- hotshot/src/types/handle.rs | 23 ++- orchestrator/run-config.toml | 1 + task-impls/src/consensus/handlers.rs | 32 ++- task-impls/src/consensus/mod.rs | 7 +- task-impls/src/da.rs | 25 ++- task-impls/src/events.rs | 26 +-- task-impls/src/helpers.rs | 51 +++-- task-impls/src/network.rs | 42 ++-- task-impls/src/quorum_proposal/handlers.rs | 4 +- task-impls/src/quorum_proposal/mod.rs | 35 +++- .../src/quorum_proposal_recv/handlers.rs | 1 + task-impls/src/quorum_proposal_recv/mod.rs | 9 +- task-impls/src/quorum_vote/mod.rs | 56 ++++-- task-impls/src/request.rs | 24 ++- task-impls/src/response.rs | 13 +- task-impls/src/transactions.rs | 56 +++--- task-impls/src/upgrade.rs | 31 +-- task-impls/src/vid.rs | 7 +- task-impls/src/view_sync.rs | 88 +++++--- task-impls/src/vote_collection.rs | 53 +++-- testing/src/byzantine/byzantine_behaviour.rs | 11 +- testing/src/consistency_task.rs | 10 +- testing/src/helpers.rs | 51 +++-- testing/src/overall_safety_task.rs | 20 +- testing/src/spinning_task.rs | 8 +- testing/src/test_builder.rs | 1 + testing/src/test_runner.rs | 4 +- testing/src/view_generator.rs | 34 +++- testing/tests/tests_1/da_task.rs | 17 +- testing/tests/tests_1/network_task.rs | 4 + testing/tests/tests_1/quorum_proposal_task.rs | 92 +++++++-- testing/tests/tests_1/transaction_task.rs | 6 +- .../tests_1/upgrade_task_with_proposal.rs | 21 +- testing/tests/tests_1/vid_task.rs | 19 +- testing/tests/tests_1/view_sync_task.rs | 4 +- .../tests/tests_1/vote_dependency_handle.rs | 2 + testing/tests/tests_3/byzantine_tests.rs | 4 +- testing/tests/tests_3/memory_network.rs | 4 +- types/src/consensus.rs | 104 ++++++---- types/src/data.rs | 189 +++++++++++------- types/src/error.rs | 2 +- types/src/event.rs | 8 +- types/src/hotshot_config_file.rs | 4 + types/src/lib.rs | 2 + types/src/message.rs | 19 +- types/src/request_response.rs | 2 +- types/src/simple_certificate.rs | 21 +- types/src/simple_vote.rs | 28 +-- types/src/traits/auction_results_provider.rs | 2 +- types/src/traits/election.rs | 38 ++-- types/src/traits/network.rs | 18 +- types/src/traits/node_implementation.rs | 6 +- types/src/traits/storage.rs | 4 +- types/src/utils.rs | 2 +- types/src/vote.rs | 17 +- 67 files changed, 1004 insertions(+), 538 deletions(-) diff --git a/example-types/src/auction_results_provider_types.rs b/example-types/src/auction_results_provider_types.rs index af2f8b7026..01978c1359 100644 --- a/example-types/src/auction_results_provider_types.rs +++ b/example-types/src/auction_results_provider_types.rs @@ -48,7 +48,7 @@ pub struct TestAuctionResultsProvider { impl AuctionResultsProvider for TestAuctionResultsProvider { /// Mock fetching the auction results, with optional error injection to simulate failure cases /// in the solver. - async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result { + async fn fetch_auction_result(&self, view_number: TYPES::View) -> Result { if let Some(url) = &self.broadcast_url { let resp = reqwest::get(url.join(&format!("/v0/api/auction_results/{}", *view_number))?) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index a8f06976c3..1ab2446c12 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -4,6 +4,12 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use crate::{ + auction_results_provider_types::{TestAuctionResult, TestAuctionResultsProvider}, + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + state_types::{TestInstanceState, TestValidatedState}, + storage_types::TestStorage, +}; use hotshot::traits::{ election::{ randomized_committee::RandomizedCommittee, static_committee::StaticCommittee, @@ -12,6 +18,7 @@ use hotshot::traits::{ implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork}, NodeImplementation, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::ViewNumber, signature_key::{BLSPubKey, BuilderKey}, @@ -20,13 +27,6 @@ use hotshot_types::{ use serde::{Deserialize, Serialize}; use vbs::version::StaticVersion; -use crate::{ - auction_results_provider_types::{TestAuctionResult, TestAuctionResultsProvider}, - block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::{TestInstanceState, TestValidatedState}, - storage_types::TestStorage, -}; - #[derive( Copy, Clone, @@ -45,7 +45,8 @@ use crate::{ pub struct TestTypes; impl NodeType for TestTypes { type AuctionResult = TestAuctionResult; - type Time = ViewNumber; + type View = ViewNumber; + type Epoch = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; @@ -74,7 +75,8 @@ impl NodeType for TestTypes { pub struct TestTypesRandomizedLeader; impl NodeType for TestTypesRandomizedLeader { type AuctionResult = TestAuctionResult; - type Time = ViewNumber; + type View = ViewNumber; + type Epoch = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; @@ -103,7 +105,8 @@ impl NodeType for TestTypesRandomizedLeader { pub struct TestConsecutiveLeaderTypes; impl NodeType for TestConsecutiveLeaderTypes { type AuctionResult = TestAuctionResult; - type Time = ViewNumber; + type View = ViewNumber; + type Epoch = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; @@ -235,8 +238,8 @@ mod tests { let data = TestData { data: 10 }; - let view_0 = ::Time::new(0); - let view_1 = ::Time::new(1); + let view_0 = ::View::new(0); + let view_1 = ::View::new(1); let versioned_data_0 = VersionedVoteData::::new( diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 5c6277f35c..2a093b1f02 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -29,17 +29,17 @@ use hotshot_types::{ use crate::testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}; type VidShares = HashMap< - ::Time, + ::View, HashMap<::SignatureKey, Proposal>>, >; #[derive(Clone, Debug)] pub struct TestStorageState { vids: VidShares, - das: HashMap>>, - proposals: BTreeMap>>, + das: HashMap>>, + proposals: BTreeMap>>, high_qc: Option>, - action: TYPES::Time, + action: TYPES::View, } impl Default for TestStorageState { @@ -49,7 +49,7 @@ impl Default for TestStorageState { das: HashMap::new(), proposals: BTreeMap::new(), high_qc: None, - action: TYPES::Time::genesis(), + action: TYPES::View::genesis(), } } } @@ -87,7 +87,7 @@ impl TestableDelay for TestStorage { impl TestStorage { pub async fn proposals_cloned( &self, - ) -> BTreeMap>> { + ) -> BTreeMap>> { self.inner.read().await.proposals.clone() } pub async fn high_qc_cloned(&self) -> Option> { @@ -96,7 +96,7 @@ impl TestStorage { pub async fn decided_upgrade_certificate(&self) -> Option> { self.decided_upgrade_certificate.read().await.clone() } - pub async fn last_actioned_view(&self) -> TYPES::Time { + pub async fn last_actioned_view(&self) -> TYPES::View { self.inner.read().await.action } } @@ -145,7 +145,7 @@ impl Storage for TestStorage { async fn record_action( &self, - view: ::Time, + view: ::View, action: hotshot_types::event::HotShotAction, ) -> Result<()> { if self.should_return_err { @@ -180,7 +180,7 @@ impl Storage for TestStorage { async fn update_undecided_state( &self, _leafs: CommitmentMap>, - _state: BTreeMap>, + _state: BTreeMap>, ) -> Result<()> { if self.should_return_err { bail!("Failed to update high qc to storage"); diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 96072cce89..0c7e8db673 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -463,7 +463,7 @@ pub trait RunDa< let start = Instant::now(); let mut event_stream = context.event_stream(); - let mut anchor_view: TYPES::Time = ::genesis(); + let mut anchor_view: TYPES::View = ::genesis(); let mut num_successful_commits = 0; context.hotshot.start_consensus().await; @@ -563,7 +563,7 @@ pub trait RunDa< .hotshot .memberships .quorum_membership - .committee_leaders(TYPES::Time::genesis()) + .committee_leaders(TYPES::View::genesis(), TYPES::Epoch::genesis()) .len(); let total_num_views = usize::try_from(consensus.locked_view().u64()).unwrap(); // `failed_num_views` could include uncommitted views diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 577318befb..bc2fbf8128 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -123,7 +123,7 @@ pub struct SystemContext, V: Versi instance_state: Arc, /// The view to enter when first starting consensus - start_view: TYPES::Time, + start_view: TYPES::View, /// Access to the output event stream. output_event_stream: (Sender>, InactiveReceiver>), @@ -302,9 +302,17 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext Option> { + pub async fn state(&self, view: TYPES::View) -> Option> { self.consensus.read().await.state(view).cloned() } @@ -964,10 +972,10 @@ pub struct HotShotInitializer { state_delta: Option>::Delta>>, /// Starting view number that should be equivelant to the view the node shut down with last. - start_view: TYPES::Time, + start_view: TYPES::View, /// The view we last performed an action in. An action is Proposing or voting for /// Either the quorum or DA. - actioned_view: TYPES::Time, + actioned_view: TYPES::View, /// Highest QC that was seen, for genesis it's the genesis QC. It should be for a view greater /// than `inner`s view number for the non genesis case because we must have seen higher QCs /// to decide on the leaf. @@ -978,9 +986,9 @@ pub struct HotShotInitializer { /// to vote and propose right away if they didn't miss anything while down. undecided_leafs: Vec>, /// Not yet decided state - undecided_state: BTreeMap>, + undecided_state: BTreeMap>, /// Proposals we have sent out to provide to others for catchup - saved_proposals: BTreeMap>>, + saved_proposals: BTreeMap>>, } impl HotShotInitializer { @@ -997,8 +1005,8 @@ impl HotShotInitializer { inner: Leaf::genesis(&validated_state, &instance_state).await, validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), - start_view: TYPES::Time::new(0), - actioned_view: TYPES::Time::new(0), + start_view: TYPES::View::new(0), + actioned_view: TYPES::View::new(0), saved_proposals: BTreeMap::new(), high_qc, decided_upgrade_certificate: None, @@ -1020,13 +1028,13 @@ impl HotShotInitializer { anchor_leaf: Leaf, instance_state: TYPES::InstanceState, validated_state: Option>, - start_view: TYPES::Time, - actioned_view: TYPES::Time, - saved_proposals: BTreeMap>>, + start_view: TYPES::View, + actioned_view: TYPES::View, + saved_proposals: BTreeMap>>, high_qc: QuorumCertificate, decided_upgrade_certificate: Option>, undecided_leafs: Vec>, - undecided_state: BTreeMap>, + undecided_state: BTreeMap>, ) -> Self { Self { inner: anchor_leaf, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 8433ed6121..0f89203f45 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -170,7 +170,8 @@ pub fn add_network_event_task< ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, - view: TYPES::Time::genesis(), + view: TYPES::View::genesis(), + epoch: TYPES::Epoch::genesis(), quorum_membership, da_membership, storage: Arc::clone(&handle.storage()), diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 50d08221c4..d23799cc59 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -70,6 +70,7 @@ impl, V: Versions> CreateTaskState return Self { output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, + cur_epoch: handle.cur_epoch().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), network: Arc::clone(&handle.hotshot.network), vote_collectors: BTreeMap::default(), @@ -91,6 +92,7 @@ impl, V: Versions> CreateTaskState return Self { output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, + cur_epoch: handle.cur_epoch().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), network: Arc::clone(&handle.hotshot.network), vote_collector: None.into(), @@ -118,6 +120,7 @@ impl, V: Versions> CreateTaskState Self { consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, + cur_epoch: handle.cur_epoch().await, vote_collector: None, network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), @@ -140,6 +143,7 @@ impl, V: Versions> CreateTaskState network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), cur_view: handle.cur_view().await, + cur_epoch: handle.cur_epoch().await, vote_collectors: BTreeMap::default(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -160,6 +164,7 @@ impl, V: Versions> CreateTaskState Self { current_view: cur_view, next_view: cur_view, + current_epoch: handle.cur_epoch().await, network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), @@ -171,7 +176,7 @@ impl, V: Versions> CreateTaskState finalize_relay_map: HashMap::default().into(), view_sync_timeout: handle.hotshot.config.view_sync_timeout, id: handle.hotshot.id, - last_garbage_collected_view: TYPES::Time::new(0), + last_garbage_collected_view: TYPES::View::new(0), upgrade_lock: handle.hotshot.upgrade_lock.clone(), } } @@ -187,6 +192,7 @@ impl, V: Versions> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, + cur_epoch: handle.cur_epoch().await, network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), @@ -280,6 +286,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(consensus), cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), + cur_epoch: handle.cur_epoch().await, network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), @@ -317,6 +324,7 @@ impl, V: Versions> CreateTaskState storage: Arc::clone(&handle.storage), cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), + cur_epoch: handle.cur_epoch().await, output_event_stream: handle.hotshot.external_event_stream.0.clone(), timeout_task: async_spawn(async {}), timeout: handle.hotshot.config.next_view_timeout, diff --git a/hotshot/src/traits/election/randomized_committee.rs b/hotshot/src/traits/election/randomized_committee.rs index b761eb1ee2..4fed098e9c 100644 --- a/hotshot/src/traits/election/randomized_committee.rs +++ b/hotshot/src/traits/election/randomized_committee.rs @@ -82,6 +82,7 @@ impl Membership for RandomizedCommittee { /// Get the stake table for the current view fn stake_table( &self, + _epoch: ::Epoch, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -89,7 +90,8 @@ impl Membership for RandomizedCommittee { /// Get all members of the committee for the current view fn committee_members( &self, - _view_number: ::Time, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -100,7 +102,8 @@ impl Membership for RandomizedCommittee { /// Get all eligible leaders of the committee for the current view fn committee_leaders( &self, - _view_number: ::Time, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -112,13 +115,18 @@ impl Membership for RandomizedCommittee { fn stake( &self, pub_key: &::SignatureKey, + _epoch: ::Epoch, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() } /// Check if a node has stake in the committee - fn has_stake(&self, pub_key: &::SignatureKey) -> bool { + fn has_stake( + &self, + pub_key: &::SignatureKey, + _epoch: ::Epoch, + ) -> bool { self.indexed_stake_table .get(pub_key) .is_some_and(|x| x.stake() > U256::zero()) @@ -130,7 +138,11 @@ impl Membership for RandomizedCommittee { } /// Index the vector of public keys with the current view number - fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + fn leader( + &self, + view_number: TYPES::View, + _epoch: ::Epoch, + ) -> TYPES::SignatureKey { let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); let randomized_view_number: u64 = rng.gen_range(0..=u64::MAX); @@ -143,7 +155,7 @@ impl Membership for RandomizedCommittee { } /// Get the total number of nodes in the committee - fn total_nodes(&self) -> usize { + fn total_nodes(&self, _epoch: ::Epoch) -> usize { self.stake_table.len() } diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 69cc5ce9c2..2ef52a66e2 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -80,6 +80,7 @@ impl Membership for StaticCommittee { /// Get the stake table for the current view fn stake_table( &self, + _epoch: ::Epoch, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -87,7 +88,8 @@ impl Membership for StaticCommittee { /// Get all members of the committee for the current view fn committee_members( &self, - _view_number: ::Time, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -98,7 +100,8 @@ impl Membership for StaticCommittee { /// Get all eligible leaders of the committee for the current view fn committee_leaders( &self, - _view_number: ::Time, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -110,13 +113,18 @@ impl Membership for StaticCommittee { fn stake( &self, pub_key: &::SignatureKey, + _epoch: ::Epoch, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() } /// Check if a node has stake in the committee - fn has_stake(&self, pub_key: &::SignatureKey) -> bool { + fn has_stake( + &self, + pub_key: &::SignatureKey, + _epoch: ::Epoch, + ) -> bool { self.indexed_stake_table .get(pub_key) .is_some_and(|x| x.stake() > U256::zero()) @@ -128,7 +136,11 @@ impl Membership for StaticCommittee { } /// Index the vector of public keys with the current view number - fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + fn leader( + &self, + view_number: TYPES::View, + _epoch: ::Epoch, + ) -> TYPES::SignatureKey { #[allow(clippy::cast_possible_truncation)] let index = *view_number as usize % self.eligible_leaders.len(); let res = self.eligible_leaders[index].clone(); @@ -136,7 +148,7 @@ impl Membership for StaticCommittee { } /// Get the total number of nodes in the committee - fn total_nodes(&self) -> usize { + fn total_nodes(&self, _epoch: ::Epoch) -> usize { self.stake_table.len() } diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 9ce83c14a0..db41aad2ab 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -80,6 +80,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -87,7 +88,8 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Time, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -98,7 +100,8 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Time, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -110,13 +113,18 @@ impl Membership for StaticCommitteeLeaderForTwoViews::SignatureKey, + _epoch: ::Epoch, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() } /// Check if a node has stake in the committee - fn has_stake(&self, pub_key: &::SignatureKey) -> bool { + fn has_stake( + &self, + pub_key: &::SignatureKey, + _epoch: ::Epoch, + ) -> bool { self.indexed_stake_table .get(pub_key) .is_some_and(|x| x.stake() > U256::zero()) @@ -128,7 +136,11 @@ impl Membership for StaticCommitteeLeaderForTwoViews TYPES::SignatureKey { + fn leader( + &self, + view_number: TYPES::View, + _epoch: ::Epoch, + ) -> TYPES::SignatureKey { let index = usize::try_from((*view_number / 2) % self.eligible_leaders.len() as u64).unwrap(); let res = self.eligible_leaders[index].clone(); @@ -136,7 +148,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews usize { + fn total_nodes(&self, _epoch: ::Epoch) -> usize { self.stake_table.len() } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 2bb7eba49c..002f5d543c 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -484,7 +484,7 @@ impl ConnectedNetwork for CombinedNetworks self.secondary().queue_node_lookup(view_number, pk) } - async fn update_view<'a, T>(&'a self, view: u64, membership: &T::Membership) + async fn update_view<'a, T>(&'a self, view: u64, epoch: u64, membership: &T::Membership) where T: NodeType + 'a, { @@ -505,7 +505,10 @@ impl ConnectedNetwork for CombinedNetworks } }); // Run `update_view` logic for the libp2p network - self.networks.1.update_view::(view, membership).await; + self.networks + .1 + .update_view::(view, epoch, membership) + .await; } fn is_primary_down(&self) -> bool { diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 32551fba72..d25fc91762 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -1019,12 +1019,13 @@ impl ConnectedNetwork for Libp2pNetwork { /// So the logic with libp2p is to prefetch upcomming leaders libp2p address to /// save time when we later need to direct message the leader our vote. Hence the /// use of the future view and leader to queue the lookups. - async fn update_view<'a, TYPES>(&'a self, view: u64, membership: &TYPES::Membership) + async fn update_view<'a, TYPES>(&'a self, view: u64, epoch: u64, membership: &TYPES::Membership) where TYPES: NodeType + 'a, { - let future_view = ::Time::new(view) + LOOK_AHEAD; - let future_leader = membership.leader(future_view); + let future_view = ::View::new(view) + LOOK_AHEAD; + let epoch = ::Epoch::new(epoch); + let future_leader = membership.leader(future_view, epoch); let _ = self .queue_node_lookup(ViewNumber::new(*future_view), future_leader) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 5945e31ceb..cf6c8ffe02 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -136,7 +136,8 @@ impl + 'static, V: Versions> /// Errors if signing the request for proposal fails pub fn request_proposal( &self, - view: TYPES::Time, + view: TYPES::View, + epoch: TYPES::Epoch, leaf_commitment: Commitment>, ) -> Result>>>> { // We need to be able to sign this request before submitting it to the network. Compute the @@ -185,7 +186,7 @@ impl + 'static, V: Versions> { // Make sure that the quorum_proposal is valid if let Err(err) = quorum_proposal - .validate_signature(&mem, &upgrade_lock) + .validate_signature(&mem, epoch, &upgrade_lock) .await { tracing::warn!("Invalid Proposal Received after Request. Err {:?}", err); @@ -242,7 +243,7 @@ impl + 'static, V: Versions> /// return [`None`] if the requested view has already been decided (but see /// [`decided_state`](Self::decided_state)) or if there is no path for the requested /// view to ever be decided. - pub async fn state(&self, view: TYPES::Time) -> Option> { + pub async fn state(&self, view: TYPES::View) -> Option> { self.hotshot.state(view).await } @@ -315,11 +316,15 @@ impl + 'static, V: Versions> /// Wrapper for `HotShotConsensusApi`'s `leader` function #[allow(clippy::unused_async)] // async for API compatibility reasons - pub async fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + pub async fn leader( + &self, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, + ) -> TYPES::SignatureKey { self.hotshot .memberships .quorum_membership - .leader(view_number) + .leader(view_number, epoch_number) } // Below is for testing only: @@ -346,10 +351,16 @@ impl + 'static, V: Versions> /// Wrapper to get the view number this node is on. #[instrument(skip_all, target = "SystemContextHandle", fields(id = self.hotshot.id))] - pub async fn cur_view(&self) -> TYPES::Time { + pub async fn cur_view(&self) -> TYPES::View { self.hotshot.consensus.read().await.cur_view() } + /// Wrapper to get the epoch number this node is on. + #[instrument(skip_all, target = "SystemContextHandle", fields(id = self.hotshot.id))] + pub async fn cur_epoch(&self) -> TYPES::Epoch { + self.hotshot.consensus.read().await.cur_epoch() + } + /// Provides a reference to the underlying storage for this [`SystemContext`], allowing access to /// historical data #[must_use] diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 909a6043b0..d1fecc10c9 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -38,6 +38,7 @@ timeout_ratio = [11, 10] round_start_delay = 1 start_delay = 1 num_bootstrap = 5 +epoch_height = 0 [random_builder] txn_in_block = 100 diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index cc0fa65a41..fec58d8409 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -42,7 +42,10 @@ pub(crate) async fn handle_quorum_vote_recv< ) -> Result<()> { // Are we the leader for this view? ensure!( - task_state.quorum_membership.leader(vote.view_number() + 1) == task_state.public_key, + task_state + .quorum_membership + .leader(vote.view_number() + 1, task_state.cur_epoch) + == task_state.public_key, format!( "We are not the leader for view {:?}", vote.view_number() + 1 @@ -54,6 +57,7 @@ pub(crate) async fn handle_quorum_vote_recv< vote, task_state.public_key.clone(), &task_state.quorum_membership, + task_state.cur_epoch, task_state.id, &event, sender, @@ -77,7 +81,10 @@ pub(crate) async fn handle_timeout_vote_recv< ) -> Result<()> { // Are we the leader for this view? ensure!( - task_state.timeout_membership.leader(vote.view_number() + 1) == task_state.public_key, + task_state + .timeout_membership + .leader(vote.view_number() + 1, task_state.cur_epoch) + == task_state.public_key, format!( "We are not the leader for view {:?}", vote.view_number() + 1 @@ -89,6 +96,7 @@ pub(crate) async fn handle_timeout_vote_recv< vote, task_state.public_key.clone(), &task_state.quorum_membership, + task_state.cur_epoch, task_state.id, &event, sender, @@ -106,7 +114,7 @@ pub(crate) async fn handle_view_change< I: NodeImplementation, V: Versions, >( - new_view_number: TYPES::Time, + new_view_number: TYPES::View, sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { @@ -147,7 +155,7 @@ pub(crate) async fn handle_view_change< async move { async_sleep(Duration::from_millis(timeout)).await; broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), + Arc::new(HotShotEvent::Timeout(TYPES::View::new(*view_number))), &stream, ) .await; @@ -167,7 +175,11 @@ pub(crate) async fn handle_view_change< .current_view .set(usize::try_from(task_state.cur_view.u64()).unwrap()); let cur_view_time = Utc::now().timestamp(); - if task_state.quorum_membership.leader(old_view_number) == task_state.public_key { + if task_state + .quorum_membership + .leader(old_view_number, task_state.cur_epoch) + == task_state.public_key + { #[allow(clippy::cast_precision_loss)] consensus .metrics @@ -203,7 +215,7 @@ pub(crate) async fn handle_view_change< /// Handle a `Timeout` event. #[instrument(skip_all)] pub(crate) async fn handle_timeout, V: Versions>( - view_number: TYPES::Time, + view_number: TYPES::View, sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { @@ -215,7 +227,7 @@ pub(crate) async fn handle_timeout ensure!( task_state .timeout_membership - .has_stake(&task_state.public_key), + .has_stake(&task_state.public_key, task_state.cur_epoch), format!("We were not chosen for the consensus committee for view {view_number:?}") ); @@ -260,7 +272,11 @@ pub(crate) async fn handle_timeout .metrics .number_of_timeouts .add(1); - if task_state.quorum_membership.leader(view_number) == task_state.public_key { + if task_state + .quorum_membership + .leader(view_number, task_state.cur_epoch) + == task_state.public_key + { task_state .consensus .read() diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 82610978a6..fb1ec86fca 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -70,11 +70,14 @@ pub struct ConsensusTaskState, V: pub storage: Arc>, /// The view number that this node is currently executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::View, /// Timestamp this view starts at. pub cur_view_time: i64, + /// The epoch number that this node is currently executing in. + pub cur_epoch: TYPES::Epoch, + /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -88,7 +91,7 @@ pub struct ConsensusTaskState, V: pub consensus: OuterConsensus, /// The last decided view - pub last_decided_view: TYPES::Time, + pub last_decided_view: TYPES::View, /// The node's id pub id: u64, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 0189e20d41..2e7c1357ff 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -49,7 +49,10 @@ pub struct DaTaskState, V: Version pub output_event_stream: async_broadcast::Sender>, /// View number this view is executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::View, + + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::Epoch, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, @@ -108,7 +111,7 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState TaskEvent for HotShotEvent { #[derive(Debug, Clone)] pub struct ProposalMissing { /// View of missing proposal - pub view: TYPES::Time, + pub view: TYPES::View, /// Channel to send the response back to pub response_chan: Sender>>>, } @@ -93,7 +93,7 @@ pub enum HotShotEvent { /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote), /// All dependencies for the quorum vote are validated. - QuorumVoteDependenciesValidated(TYPES::Time), + QuorumVoteDependenciesValidated(TYPES::View), /// A quorum proposal with the given parent leaf is validated. /// The full validation checks include: /// 1. The proposal is not for an old view @@ -124,9 +124,9 @@ pub enum HotShotEvent { /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DacSend(DaCertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks - ViewChange(TYPES::Time), + ViewChange(TYPES::View), /// Timeout for the view sync protocol; emitted by a replica in the view sync task - ViewSyncTimeout(TYPES::Time, u64, ViewSyncPhase), + ViewSyncTimeout(TYPES::View, u64, ViewSyncPhase), /// Receive a `ViewSyncPreCommitVote` from the network; received by a relay in the view sync task ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote), @@ -157,9 +157,9 @@ pub enum HotShotEvent { ViewSyncFinalizeCertificate2Send(ViewSyncFinalizeCertificate2, TYPES::SignatureKey), /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only - ViewSyncTrigger(TYPES::Time), + ViewSyncTrigger(TYPES::View), /// A consensus view has timed out; emitted by a replica in the consensus task; received by the view sync task; internal event only - Timeout(TYPES::Time), + Timeout(TYPES::View), /// Receive transactions from the network TransactionsRecv(Vec), /// Send transactions to the network @@ -169,14 +169,14 @@ pub enum HotShotEvent { VidCommitment, BuilderCommitment, >::Metadata, - TYPES::Time, + TYPES::View, Vec1>, Option, ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number BlockRecv(PackedBundle), /// Event when the transactions task has a block formed - BlockReady(VidDisperse, TYPES::Time), + BlockReady(VidDisperse, TYPES::View), /// Event when consensus decided on a leaf LeafDecided(Vec>), /// Send VID shares to VID storage nodes; emitted by the DA leader @@ -205,13 +205,13 @@ pub enum HotShotEvent { /* Consensus State Update Events */ /// A undecided view has been created and added to the validated state storage. - ValidatedStateUpdated(TYPES::Time, View), + ValidatedStateUpdated(TYPES::View, View), /// A new locked view has been created (2-chain) - LockedViewUpdated(TYPES::Time), + LockedViewUpdated(TYPES::View), /// A new anchor view has been successfully reached by this node (3-chain). - LastDecidedViewUpdated(TYPES::Time), + LastDecidedViewUpdated(TYPES::View), /// A new high_qc has been reached by this node. UpdateHighQc(QuorumCertificate), @@ -260,7 +260,7 @@ pub enum HotShotEvent { impl HotShotEvent { #[allow(clippy::too_many_lines)] /// Return the view number for a hotshot event if present - pub fn view_number(&self) -> Option { + pub fn view_number(&self) -> Option { match self { HotShotEvent::QuorumVoteRecv(v) => Some(v.view_number()), HotShotEvent::TimeoutVoteRecv(v) | HotShotEvent::TimeoutVoteSend(v) => { @@ -512,7 +512,7 @@ impl Display for HotShotEvent { write!(f, "BlockReady(view_number={view_number:?})") } HotShotEvent::LeafDecided(leaves) => { - let view_numbers: Vec<::Time> = + let view_numbers: Vec<::View> = leaves.iter().map(Leaf::view_number).collect(); write!(f, "LeafDecided({view_numbers:?})") } diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 5df8200f16..cd4bf5b3cd 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -50,7 +50,7 @@ use crate::{ #[instrument(skip_all)] #[allow(clippy::too_many_arguments)] pub(crate) async fn fetch_proposal( - view_number: TYPES::Time, + view_number: TYPES::View, event_sender: Sender>>, event_receiver: Receiver>>, quorum_membership: Arc, @@ -80,6 +80,7 @@ pub(crate) async fn fetch_proposal( .await; let mem = Arc::clone(&quorum_membership); + let current_epoch = consensus.read().await.cur_epoch(); // Make a background task to await the arrival of the event data. let Ok(Some(proposal)) = // We want to explicitly timeout here so we aren't waiting around for the data. @@ -111,7 +112,7 @@ pub(crate) async fn fetch_proposal( hs_event.as_ref() { // Make sure that the quorum_proposal is valid - if quorum_proposal.validate_signature(&mem, upgrade_lock).await.is_ok() { + if quorum_proposal.validate_signature(&mem, current_epoch, upgrade_lock).await.is_ok() { proposal = Some(quorum_proposal.clone()); } @@ -130,7 +131,7 @@ pub(crate) async fn fetch_proposal( let justify_qc = proposal.data.justify_qc.clone(); if !justify_qc - .is_valid_cert(quorum_membership.as_ref(), upgrade_lock) + .is_valid_cert(quorum_membership.as_ref(), current_epoch, upgrade_lock) .await { bail!("Invalid justify_qc in proposal for view {}", *view_number); @@ -167,10 +168,10 @@ pub(crate) async fn fetch_proposal( #[derive(Debug)] pub struct LeafChainTraversalOutcome { /// The new locked view obtained from a 2 chain starting from the proposal's parent. - pub new_locked_view_number: Option, + pub new_locked_view_number: Option, /// The new decided view obtained from a 3 chain starting from the proposal's parent. - pub new_decided_view_number: Option, + pub new_decided_view_number: Option, /// The qc for the decided chain. pub new_decide_qc: Option>, @@ -355,7 +356,7 @@ pub async fn decide_from_proposal( #[instrument(skip_all)] #[allow(clippy::too_many_arguments)] pub(crate) async fn parent_leaf_and_state( - next_proposal_view_number: TYPES::Time, + next_proposal_view_number: TYPES::View, event_sender: &Sender>>, event_receiver: &Receiver>>, quorum_membership: Arc, @@ -364,8 +365,9 @@ pub(crate) async fn parent_leaf_and_state( consensus: OuterConsensus, upgrade_lock: &UpgradeLock, ) -> Result<(Leaf, Arc<::ValidatedState>)> { + let current_epoch = consensus.read().await.cur_epoch(); ensure!( - quorum_membership.leader(next_proposal_view_number) == public_key, + quorum_membership.leader(next_proposal_view_number, current_epoch) == public_key, "Somehow we formed a QC but are not the leader for the next view {next_proposal_view_number:?}", ); let parent_view_number = consensus.read().await.high_qc().view_number(); @@ -485,18 +487,20 @@ pub async fn validate_proposal_safety_and_liveness< if let Err(e) = consensus_write.update_proposed_view(proposal.clone()) { tracing::debug!("Internal proposal update failed; error = {e:#}"); }; - - // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. - broadcast_event( - Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), - &event_stream, - ) - .await; } + // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. + broadcast_event( + Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), + &event_stream, + ) + .await; + + let current_epoch = task_state.cur_epoch; UpgradeCertificate::validate( &proposal.data.upgrade_certificate, &task_state.quorum_membership, + current_epoch, &task_state.upgrade_lock, ) .await?; @@ -606,7 +610,11 @@ pub async fn validate_proposal_view_and_certs< // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment proposal - .validate_signature(&task_state.quorum_membership, &task_state.upgrade_lock) + .validate_signature( + &task_state.quorum_membership, + task_state.cur_epoch, + &task_state.upgrade_lock, + ) .await?; // Verify a timeout certificate OR a view sync certificate exists and is valid. @@ -628,6 +636,7 @@ pub async fn validate_proposal_view_and_certs< timeout_cert .is_valid_cert( task_state.timeout_membership.as_ref(), + task_state.cur_epoch, &task_state.upgrade_lock ) .await, @@ -648,6 +657,7 @@ pub async fn validate_proposal_view_and_certs< view_sync_cert .is_valid_cert( task_state.quorum_membership.as_ref(), + task_state.cur_epoch, &task_state.upgrade_lock ) .await, @@ -662,6 +672,7 @@ pub async fn validate_proposal_view_and_certs< UpgradeCertificate::validate( &proposal.data.upgrade_certificate, &task_state.quorum_membership, + task_state.cur_epoch, &task_state.upgrade_lock, ) .await?; @@ -675,7 +686,7 @@ pub async fn validate_proposal_view_and_certs< /// # Errors /// Returns an [`anyhow::Error`] when the new view is not greater than the current view. pub(crate) async fn update_view, V: Versions>( - new_view: TYPES::Time, + new_view: TYPES::View, event_stream: &Sender>>, task_state: &mut QuorumProposalRecvTaskState, ) -> Result<()> { @@ -684,8 +695,10 @@ pub(crate) async fn update_view, V "New view is not greater than our current view" ); - let is_old_view_leader = - task_state.quorum_membership.leader(task_state.cur_view) == task_state.public_key; + let is_old_view_leader = task_state + .quorum_membership + .leader(task_state.cur_view, task_state.cur_epoch) + == task_state.public_key; let old_view = task_state.cur_view; debug!("Updating view from {} to {}", *old_view, *new_view); @@ -722,7 +735,7 @@ pub(crate) async fn update_view, V async move { async_sleep(timeout).await; broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), + Arc::new(HotShotEvent::Timeout(TYPES::View::new(*view_number))), &stream, ) .await; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index ca5f6c7aeb..5b897c7b4f 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -169,7 +169,7 @@ impl NetworkMessageTaskState { // Send the external message to the external event stream so it can be processed broadcast_event( Event { - view_number: TYPES::Time::new(1), + view_number: TYPES::View::new(1), event: EventType::ExternalMessageReceived { sender, data }, }, &self.external_event_stream, @@ -190,7 +190,9 @@ pub struct NetworkEventTaskState< /// comm network pub network: Arc, /// view number - pub view: TYPES::Time, + pub view: TYPES::View, + /// epoch number + pub epoch: TYPES::Epoch, /// quorum for the network pub quorum_membership: TYPES::Membership, /// da for the network @@ -305,7 +307,7 @@ impl< maybe_action: Option, storage: Arc>, state: Arc>>, - view: ::Time, + view: ::View, ) -> Result<(), ()> { if let Some(action) = maybe_action { if !state.write().await.update_action(action, view) { @@ -358,7 +360,10 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote(vote.clone()), )), - TransmitType::Direct(self.quorum_membership.leader(vote.view_number() + 1)), + TransmitType::Direct( + self.quorum_membership + .leader(vote.view_number() + 1, self.epoch), + ), )) } HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( @@ -396,7 +401,10 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::Da( DaConsensusMessage::DaVote(vote.clone()), )), - TransmitType::Direct(self.quorum_membership.leader(vote.view_number())), + TransmitType::Direct( + self.quorum_membership + .leader(vote.view_number(), self.epoch), + ), )) } HotShotEvent::DacSend(certificate, sender) => { @@ -416,7 +424,7 @@ impl< )), TransmitType::Direct( self.quorum_membership - .leader(vote.view_number() + vote.date().relay), + .leader(vote.view_number() + vote.date().relay, self.epoch), ), )), HotShotEvent::ViewSyncCommitVoteSend(vote) => Some(( @@ -426,7 +434,7 @@ impl< )), TransmitType::Direct( self.quorum_membership - .leader(vote.view_number() + vote.date().relay), + .leader(vote.view_number() + vote.date().relay, self.epoch), ), )), HotShotEvent::ViewSyncFinalizeVoteSend(vote) => Some(( @@ -436,7 +444,7 @@ impl< )), TransmitType::Direct( self.quorum_membership - .leader(vote.view_number() + vote.date().relay), + .leader(vote.view_number() + vote.date().relay, self.epoch), ), )), HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => Some(( @@ -467,7 +475,10 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::TimeoutVote(vote.clone()), )), - TransmitType::Direct(self.quorum_membership.leader(vote.view_number() + 1)), + TransmitType::Direct( + self.quorum_membership + .leader(vote.view_number() + 1, self.epoch), + ), )) } HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( @@ -484,13 +495,20 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::UpgradeVote(vote.clone()), )), - TransmitType::Direct(self.quorum_membership.leader(vote.view_number())), + TransmitType::Direct( + self.quorum_membership + .leader(vote.view_number(), self.epoch), + ), )) } HotShotEvent::ViewChange(view) => { self.view = view; self.network - .update_view::(self.view.u64(), &self.quorum_membership) + .update_view::( + self.view.u64(), + self.epoch.u64(), + &self.quorum_membership, + ) .await; None } @@ -534,7 +552,7 @@ impl< }; let view = message.kind.view_number(); let committee_topic = self.quorum_membership.committee_topic(); - let da_committee = self.da_membership.committee_members(view); + let da_committee = self.da_membership.committee_members(view, self.epoch); let net = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); let state = Arc::clone(&self.consensus); diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index e53a618320..bcdc568e18 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -60,10 +60,10 @@ pub(crate) enum ProposalDependency { /// Handler for the proposal dependency pub struct ProposalDependencyHandle { /// Latest view number that has been proposed for (proxy for cur_view). - pub latest_proposed_view: TYPES::Time, + pub latest_proposed_view: TYPES::View, /// The view number to propose for. - pub view_number: TYPES::Time, + pub view_number: TYPES::View, /// The event sender. pub sender: Sender>>, diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 2c12b83b28..7390427f09 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -46,10 +46,10 @@ mod handlers; /// The state for the quorum proposal task. pub struct QuorumProposalTaskState, V: Versions> { /// Latest view number that has been proposed for. - pub latest_proposed_view: TYPES::Time, + pub latest_proposed_view: TYPES::View, /// Table for the in-progress proposal dependency tasks. - pub proposal_dependencies: HashMap>, + pub proposal_dependencies: HashMap>, /// The underlying network pub network: Arc, @@ -107,7 +107,7 @@ impl, V: Versions> fn create_event_dependency( &self, dependency_type: ProposalDependency, - view_number: TYPES::Time, + view_number: TYPES::View, event_receiver: Receiver>>, ) -> EventDependency>> { EventDependency::new( @@ -181,7 +181,7 @@ impl, V: Versions> /// Creates the requisite dependencies for the Quorum Proposal task. It also handles any event forwarding. fn create_and_complete_dependencies( &self, - view_number: TYPES::Time, + view_number: TYPES::View, event_receiver: &Receiver>>, event: Arc>, ) -> AndDependency>>>> { @@ -283,13 +283,14 @@ impl, V: Versions> #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Create dependency task", level = "error")] fn create_dependency_task_if_new( &mut self, - view_number: TYPES::Time, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, event_receiver: Receiver>>, event_sender: Sender>>, event: Arc>, ) { // Don't even bother making the task if we are not entitled to propose anyway. - if self.quorum_membership.leader(view_number) != self.public_key { + if self.quorum_membership.leader(view_number, epoch_number) != self.public_key { tracing::trace!("We are not the leader of the next view"); return; } @@ -333,7 +334,7 @@ impl, V: Versions> /// Update the latest proposed view number. #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Update latest proposed view", level = "error")] - async fn update_latest_proposed_view(&mut self, new_view: TYPES::Time) -> bool { + async fn update_latest_proposed_view(&mut self, new_view: TYPES::View) -> bool { if *self.latest_proposed_view < *new_view { debug!( "Updating latest proposed view from {} to {}", @@ -342,7 +343,7 @@ impl, V: Versions> // Cancel the old dependency tasks. for view in (*self.latest_proposed_view + 1)..=(*new_view) { - if let Some(dependency) = self.proposal_dependencies.remove(&TYPES::Time::new(view)) + if let Some(dependency) = self.proposal_dependencies.remove(&TYPES::View::new(view)) { cancel_task(dependency).await; } @@ -380,9 +381,11 @@ impl, V: Versions> HotShotEvent::QcFormed(cert) => match cert.clone() { either::Right(timeout_cert) => { let view_number = timeout_cert.view_number + 1; + let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, + epoch_number, event_receiver, event_sender, Arc::clone(&event), @@ -411,17 +414,24 @@ impl, V: Versions> _auction_result, ) => { let view_number = *view_number; + let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, + epoch_number, event_receiver, event_sender, Arc::clone(&event), ); } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + let epoch_number = self.consensus.read().await.cur_epoch(); if !certificate - .is_valid_cert(self.quorum_membership.as_ref(), &self.upgrade_lock) + .is_valid_cert( + self.quorum_membership.as_ref(), + epoch_number, + &self.upgrade_lock, + ) .await { warn!( @@ -435,6 +445,7 @@ impl, V: Versions> self.create_dependency_task_if_new( view_number, + epoch_number, event_receiver, event_sender, event, @@ -447,9 +458,11 @@ impl, V: Versions> if !self.update_latest_proposed_view(view_number).await { tracing::trace!("Failed to update latest proposed view"); } + let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number + 1, + epoch_number, event_receiver, event_sender, Arc::clone(&event), @@ -464,9 +477,11 @@ impl, V: Versions> } HotShotEvent::VidDisperseSend(vid_share, _) => { let view_number = vid_share.data.view_number(); + let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, + epoch_number, event_receiver, event_sender, Arc::clone(&event), @@ -490,8 +505,10 @@ impl, V: Versions> } HotShotEvent::HighQcUpdated(qc) => { let view_number = qc.view_number() + 1; + let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, + epoch_number, event_receiver, event_sender, Arc::clone(&event), diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index c268c14b0e..2193bf3dfd 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -136,6 +136,7 @@ pub(crate) async fn handle_quorum_proposal_recv< if !justify_qc .is_valid_cert( task_state.quorum_membership.as_ref(), + task_state.cur_epoch, &task_state.upgrade_lock, ) .await diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index f51a8d5e00..281e472fd0 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -55,11 +55,14 @@ pub struct QuorumProposalRecvTaskState, /// View number this view is executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::View, /// Timestamp this view starts at. pub cur_view_time: i64, + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::Epoch, + /// The underlying network pub network: Arc, @@ -89,7 +92,7 @@ pub struct QuorumProposalRecvTaskState>>, + pub spawned_tasks: BTreeMap>>, /// Immutable instance state pub instance_state: Arc, @@ -105,7 +108,7 @@ impl, V: Versions> QuorumProposalRecvTaskState { /// Cancel all tasks the consensus tasks has spawned before the given view - pub async fn cancel_tasks(&mut self, view: TYPES::Time) { + pub async fn cancel_tasks(&mut self, view: TYPES::View) { let keep = self.spawned_tasks.split_off(&view); let mut cancel = Vec::new(); while let Some((_, tasks)) = self.spawned_tasks.pop_first() { diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index ed0d61d4e9..909fcf2db2 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -75,7 +75,9 @@ pub struct VoteDependencyHandle, V /// Reference to the storage. pub storage: Arc>, /// View number to vote on. - pub view_number: TYPES::Time, + pub view_number: TYPES::View, + /// Epoch number to vote on. + pub epoch_number: TYPES::Epoch, /// Event sender. pub sender: Sender>>, /// Event receiver. @@ -200,7 +202,8 @@ impl + 'static, V: Versions> vid_share: Proposal>, ) -> Result<()> { ensure!( - self.quorum_membership.has_stake(&self.public_key), + self.quorum_membership + .has_stake(&self.public_key, self.epoch_number), format!( "We were not chosen for quorum committee on {:?}", self.view_number @@ -373,10 +376,10 @@ pub struct QuorumVoteTaskState, V: pub instance_state: Arc, /// Latest view number that has been voted for. - pub latest_voted_view: TYPES::Time, + pub latest_voted_view: TYPES::View, /// Table for the in-progress dependency tasks. - pub vote_dependencies: HashMap>, + pub vote_dependencies: HashMap>, /// The underlying network pub network: Arc, @@ -406,7 +409,7 @@ impl, V: Versions> QuorumVoteTaskS fn create_event_dependency( &self, dependency_type: VoteDependency, - view_number: TYPES::Time, + view_number: TYPES::View, event_receiver: Receiver>>, ) -> EventDependency>> { EventDependency::new( @@ -450,7 +453,8 @@ impl, V: Versions> QuorumVoteTaskS #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote crete dependency task if new", level = "error")] fn create_dependency_task_if_new( &mut self, - view_number: TYPES::Time, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, event_receiver: Receiver>>, event_sender: &Sender>>, event: Option>>, @@ -493,6 +497,7 @@ impl, V: Versions> QuorumVoteTaskS quorum_membership: Arc::clone(&self.quorum_membership), storage: Arc::clone(&self.storage), view_number, + epoch_number, sender: event_sender.clone(), receiver: event_receiver.clone(), upgrade_lock: self.upgrade_lock.clone(), @@ -505,7 +510,7 @@ impl, V: Versions> QuorumVoteTaskS /// Update the latest voted view number. #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote update latest voted view", level = "error")] - async fn update_latest_voted_view(&mut self, new_view: TYPES::Time) -> bool { + async fn update_latest_voted_view(&mut self, new_view: TYPES::View) -> bool { if *self.latest_voted_view < *new_view { debug!( "Updating next vote view from {} to {} in the quorum vote task", @@ -514,7 +519,7 @@ impl, V: Versions> QuorumVoteTaskS // Cancel the old dependency tasks. for view in *self.latest_voted_view..(*new_view) { - if let Some(dependency) = self.vote_dependencies.remove(&TYPES::Time::new(view)) { + if let Some(dependency) = self.vote_dependencies.remove(&TYPES::View::new(view)) { cancel_task(dependency).await; debug!("Vote dependency removed for view {:?}", view); } @@ -535,6 +540,7 @@ impl, V: Versions> QuorumVoteTaskS event_receiver: Receiver>>, event_sender: Sender>>, ) { + let current_epoch = self.consensus.read().await.cur_epoch(); match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _leaf) => { trace!("Received Proposal for view {}", *proposal.view_number()); @@ -548,6 +554,7 @@ impl, V: Versions> QuorumVoteTaskS self.create_dependency_task_if_new( proposal.view_number, + current_epoch, event_receiver, &event_sender, Some(Arc::clone(&event)), @@ -560,9 +567,14 @@ impl, V: Versions> QuorumVoteTaskS return; } + let current_epoch = self.consensus.read().await.cur_epoch(); // Validate the DAC. if !cert - .is_valid_cert(self.da_membership.as_ref(), &self.upgrade_lock) + .is_valid_cert( + self.da_membership.as_ref(), + current_epoch, + &self.upgrade_lock, + ) .await { return; @@ -579,7 +591,13 @@ impl, V: Versions> QuorumVoteTaskS &event_sender.clone(), ) .await; - self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); + self.create_dependency_task_if_new( + view, + current_epoch, + event_receiver, + &event_sender, + None, + ); } HotShotEvent::VidShareRecv(sender, disperse) => { let view = disperse.data.view_number(); @@ -590,10 +608,14 @@ impl, V: Versions> QuorumVoteTaskS // Validate the VID share. let payload_commitment = disperse.data.payload_commitment; + let current_epoch = self.consensus.read().await.cur_epoch(); // Check sender of VID disperse share is signed by DA committee member let validate_sender = sender .validate(&disperse.signature, payload_commitment.as_ref()) - && self.da_membership.committee_members(view).contains(sender); + && self + .da_membership + .committee_members(view, current_epoch) + .contains(sender); // Check whether the data satisfies one of the following. // * From the right leader for this view. @@ -603,7 +625,7 @@ impl, V: Versions> QuorumVoteTaskS .validate(&disperse.signature, payload_commitment.as_ref()) || self .quorum_membership - .leader(view) + .leader(view, current_epoch) .validate(&disperse.signature, payload_commitment.as_ref()); if !validate_sender && !validated { warn!("Failed to validated the VID dispersal/share sig."); @@ -613,7 +635,7 @@ impl, V: Versions> QuorumVoteTaskS // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner // and outer results #[allow(clippy::no_effect)] - match vid_scheme(self.quorum_membership.total_nodes()).verify_share( + match vid_scheme(self.quorum_membership.total_nodes(current_epoch)).verify_share( &disperse.data.share, &disperse.data.common, &payload_commitment, @@ -641,7 +663,13 @@ impl, V: Versions> QuorumVoteTaskS &event_sender.clone(), ) .await; - self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); + self.create_dependency_task_if_new( + view, + current_epoch, + event_receiver, + &event_sender, + None, + ); } HotShotEvent::QuorumVoteDependenciesValidated(view_number) => { debug!("All vote dependencies verified for view {:?}", view_number); diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 4ea5966fec..8cd336e7b1 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -56,7 +56,7 @@ pub struct NetworkRequestState> { /// before sending a request pub state: OuterConsensus, /// Last seen view, we won't request for proposals before older than this view - pub view: TYPES::Time, + pub view: TYPES::View, /// Delay before requesting peers pub delay: Duration, /// DA Membership @@ -70,7 +70,7 @@ pub struct NetworkRequestState> { /// A flag indicating that `HotShotEvent::Shutdown` has been received pub shutdown_flag: Arc, /// A flag indicating that `HotShotEvent::Shutdown` has been received - pub spawned_tasks: BTreeMap>>, + pub spawned_tasks: BTreeMap>>, } impl> Drop for NetworkRequestState { @@ -97,6 +97,7 @@ impl> TaskState for NetworkRequest match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _) => { let prop_view = proposal.view_number(); + let current_epoch = self.state.read().await.cur_epoch(); // If we already have the VID shares for the next view, do nothing. if prop_view >= self.view @@ -107,7 +108,7 @@ impl> TaskState for NetworkRequest .vid_shares() .contains_key(&prop_view) { - self.spawn_requests(prop_view, sender, receiver); + self.spawn_requests(prop_view, current_epoch, sender, receiver); } Ok(()) } @@ -144,7 +145,8 @@ impl> NetworkRequestState>>, receiver: &Receiver>>, ) { @@ -158,6 +160,7 @@ impl> NetworkRequestState> NetworkRequestState, sender: Sender>>, receiver: Receiver>>, - view: TYPES::Time, + view: TYPES::View, + epoch: TYPES::Epoch, ) { let state = OuterConsensus::new(Arc::clone(&self.state.inner_consensus)); let network = Arc::clone(&self.network); let shutdown_flag = Arc::clone(&self.shutdown_flag); let delay = self.delay; - let da_committee_for_view = self.da_membership.committee_members(view); + let da_committee_for_view = self.da_membership.committee_members(view, epoch); let public_key = self.public_key.clone(); // Get committee members for view let mut recipients: Vec = self .da_membership - .committee_members(view) + .committee_members(view, epoch) .into_iter() .collect(); // Randomize the recipients so all replicas don't overload the same 1 recipients @@ -256,7 +260,7 @@ impl> NetworkRequestState::SignatureKey>, public_key: &::SignatureKey, - view: TYPES::Time, + view: TYPES::View, ) -> bool { // First send request to a random DA member for the view broadcast_event( @@ -299,7 +303,7 @@ impl> NetworkRequestState>>, da_members_for_view: BTreeSet<::SignatureKey>, - view: TYPES::Time, + view: TYPES::View, ) -> Option>> { EventDependency::new( receiver.clone(), @@ -326,7 +330,7 @@ impl> NetworkRequestState, sender: &Sender>>, public_key: &::SignatureKey, - view: &TYPES::Time, + view: &TYPES::View, shutdown_flag: &Arc, ) -> bool { let state = state.read().await; diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index e8e8483b08..475a843896 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -76,7 +76,7 @@ impl NetworkResponseState { match event.as_ref() { HotShotEvent::VidRequestRecv(request, sender) => { // Verify request is valid - if !self.valid_sender(sender) + if !self.valid_sender(sender, self.consensus.read().await.cur_epoch()) || !valid_signature::(request, sender) { continue; @@ -140,7 +140,7 @@ impl NetworkResponseState { #[instrument(skip_all, target = "NetworkResponseState", fields(id = self.id))] async fn get_or_calc_vid_share( &self, - view: TYPES::Time, + view: TYPES::View, key: &TYPES::SignatureKey, ) -> Option>> { let contained = self @@ -151,11 +151,13 @@ impl NetworkResponseState { .get(&view) .is_some_and(|m| m.contains_key(key)); if !contained { + let current_epoch = self.consensus.read().await.cur_epoch(); if Consensus::calculate_and_update_vid( OuterConsensus::new(Arc::clone(&self.consensus)), view, Arc::clone(&self.quorum), &self.private_key, + current_epoch, ) .await .is_none() @@ -167,6 +169,7 @@ impl NetworkResponseState { view, Arc::clone(&self.quorum), &self.private_key, + current_epoch, ) .await?; } @@ -188,9 +191,9 @@ impl NetworkResponseState { .cloned() } - /// Makes sure the sender is allowed to send a request. - fn valid_sender(&self, sender: &TYPES::SignatureKey) -> bool { - self.quorum.has_stake(sender) + /// Makes sure the sender is allowed to send a request in the given epoch. + fn valid_sender(&self, sender: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool { + self.quorum.has_stake(sender, epoch) } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index bd1aa73307..354d9d1fd6 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -82,7 +82,10 @@ pub struct TransactionTaskState, V pub output_event_stream: async_broadcast::Sender>, /// View number this view is executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::View, + + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::Epoch, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, @@ -117,7 +120,7 @@ impl, V: Versions> TransactionTask pub async fn handle_view_change( &mut self, event_stream: &Sender>>, - block_view: TYPES::Time, + block_view: TYPES::View, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -141,7 +144,7 @@ impl, V: Versions> TransactionTask pub async fn handle_view_change_legacy( &mut self, event_stream: &Sender>>, - block_view: TYPES::Time, + block_view: TYPES::View, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -201,10 +204,11 @@ impl, V: Versions> TransactionTask .number_of_empty_blocks_proposed .add(1); - let membership_total_nodes = self.membership.total_nodes(); - let Some(null_fee) = - null_block::builder_fee::(self.membership.total_nodes(), version) - else { + let membership_total_nodes = self.membership.total_nodes(self.cur_epoch); + let Some(null_fee) = null_block::builder_fee::( + self.membership.total_nodes(self.cur_epoch), + version, + ) else { error!("Failed to get null fee"); return None; }; @@ -239,7 +243,7 @@ impl, V: Versions> TransactionTask /// Returns an error if the solver cannot be contacted, or if none of the builders respond. async fn produce_block_marketplace( &mut self, - block_view: TYPES::Time, + block_view: TYPES::View, task_start_time: Instant, ) -> Result> { ensure!( @@ -336,13 +340,14 @@ impl, V: Versions> TransactionTask /// Produce a null block pub fn null_block( &self, - block_view: TYPES::Time, + block_view: TYPES::View, version: Version, ) -> Option> { - let membership_total_nodes = self.membership.total_nodes(); - let Some(null_fee) = - null_block::builder_fee::(self.membership.total_nodes(), version) - else { + let membership_total_nodes = self.membership.total_nodes(self.cur_epoch); + let Some(null_fee) = null_block::builder_fee::( + self.membership.total_nodes(self.cur_epoch), + version, + ) else { error!("Failed to calculate null block fee."); return None; }; @@ -367,7 +372,7 @@ impl, V: Versions> TransactionTask pub async fn handle_view_change_marketplace( &mut self, event_stream: &Sender>>, - block_view: TYPES::Time, + block_view: TYPES::View, ) -> Option { let task_start_time = Instant::now(); @@ -446,12 +451,13 @@ impl, V: Versions> TransactionTask let mut make_block = false; if *view - *self.cur_view > 1 { info!("View changed by more than 1 going to view {:?}", view); - make_block = self.membership.leader(view) == self.public_key; + make_block = self.membership.leader(view, self.cur_epoch) == self.public_key; } self.cur_view = view; let next_view = self.cur_view + 1; - let next_leader = self.membership.leader(next_view) == self.public_key; + let next_leader = + self.membership.leader(next_view, self.cur_epoch) == self.public_key; if !make_block && !next_leader { debug!("Not next leader for view {:?}", self.cur_view); return None; @@ -478,9 +484,9 @@ impl, V: Versions> TransactionTask #[instrument(skip_all, target = "TransactionTaskState", fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view))] async fn last_vid_commitment_retry( &self, - block_view: TYPES::Time, + block_view: TYPES::View, task_start_time: Instant, - ) -> Result<(TYPES::Time, VidCommitment)> { + ) -> Result<(TYPES::View, VidCommitment)> { loop { match self.last_vid_commitment(block_view).await { Ok((view, comm)) => break Ok((view, comm)), @@ -499,10 +505,10 @@ impl, V: Versions> TransactionTask #[instrument(skip_all, target = "TransactionTaskState", fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view))] async fn last_vid_commitment( &self, - block_view: TYPES::Time, - ) -> Result<(TYPES::Time, VidCommitment)> { + block_view: TYPES::View, + ) -> Result<(TYPES::View, VidCommitment)> { let consensus = self.consensus.read().await; - let mut target_view = TYPES::Time::new(block_view.saturating_sub(1)); + let mut target_view = TYPES::View::new(block_view.saturating_sub(1)); loop { let view_data = consensus @@ -525,7 +531,7 @@ impl, V: Versions> TransactionTask ViewInner::Failed => { // For failed views, backtrack target_view = - TYPES::Time::new(target_view.checked_sub(1).context("Reached genesis")?); + TYPES::View::new(target_view.checked_sub(1).context("Reached genesis")?); continue; } } @@ -533,7 +539,7 @@ impl, V: Versions> TransactionTask } #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view), name = "wait_for_block", level = "error")] - async fn wait_for_block(&self, block_view: TYPES::Time) -> Option> { + async fn wait_for_block(&self, block_view: TYPES::View) -> Option> { let task_start_time = Instant::now(); // Find commitment to the block we want to build upon @@ -597,7 +603,7 @@ impl, V: Versions> TransactionTask async fn get_available_blocks( &self, parent_comm: VidCommitment, - view_number: TYPES::Time, + view_number: TYPES::View, parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Vec<(AvailableBlockInfo, usize)> { let tasks = self @@ -666,7 +672,7 @@ impl, V: Versions> TransactionTask async fn block_from_builder( &self, parent_comm: VidCommitment, - view_number: TYPES::Time, + view_number: TYPES::View, parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> anyhow::Result> { let mut available_blocks = self diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index d050ef4e54..4992b06887 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -43,7 +43,10 @@ pub struct UpgradeTaskState, V: Ve pub output_event_stream: async_broadcast::Sender>, /// View number this view is executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::View, + + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::Epoch, /// Membership for Quorum Certs/votes pub quorum_membership: Arc, @@ -166,7 +169,7 @@ impl, V: Versions> UpgradeTaskStat // the `UpgradeProposalRecv` event. Otherwise, the view number subtraction below will // cause an overflow error. // TODO Come back to this - we probably don't need this, but we should also never receive a UpgradeCertificate where this fails, investigate block ready so it doesn't make one for the genesis block - if self.cur_view != TYPES::Time::genesis() && view < self.cur_view - 1 { + if self.cur_view != TYPES::View::genesis() && view < self.cur_view - 1 { warn!("Discarding old upgrade proposal; the proposal is for view {:?}, but the current view is {:?}.", view, self.cur_view @@ -175,7 +178,7 @@ impl, V: Versions> UpgradeTaskStat } // We then validate that the proposal was issued by the leader for the view. - let view_leader_key = self.quorum_membership.leader(view); + let view_leader_key = self.quorum_membership.leader(view, self.cur_epoch); if &view_leader_key != sender { error!("Upgrade proposal doesn't have expected leader key for view {} \n Upgrade proposal is: {:?}", *view, proposal.data.clone()); return None; @@ -219,11 +222,12 @@ impl, V: Versions> UpgradeTaskStat // Check if we are the leader. { let view = vote.view_number(); - if self.quorum_membership.leader(view) != self.public_key { + if self.quorum_membership.leader(view, self.cur_epoch) != self.public_key { error!( "We are not the leader for view {} are we leader for next view? {}", *view, - self.quorum_membership.leader(view + 1) == self.public_key + self.quorum_membership.leader(view + 1, self.cur_epoch) + == self.public_key ); return None; } @@ -234,6 +238,7 @@ impl, V: Versions> UpgradeTaskStat vote, self.public_key.clone(), &self.quorum_membership, + self.cur_epoch, self.id, &event, &tx, @@ -260,23 +265,23 @@ impl, V: Versions> UpgradeTaskStat && time >= self.start_proposing_time && time < self.stop_proposing_time && !self.upgraded().await - && self - .quorum_membership - .leader(TYPES::Time::new(view + UPGRADE_PROPOSE_OFFSET)) - == self.public_key + && self.quorum_membership.leader( + TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), + self.cur_epoch, + ) == self.public_key { let upgrade_proposal_data = UpgradeProposalData { old_version: V::Base::VERSION, new_version: V::Upgrade::VERSION, new_version_hash: V::UPGRADE_HASH.to_vec(), - old_version_last_view: TYPES::Time::new(view + UPGRADE_BEGIN_OFFSET), - new_version_first_view: TYPES::Time::new(view + UPGRADE_FINISH_OFFSET), - decide_by: TYPES::Time::new(view + UPGRADE_DECIDE_BY_OFFSET), + old_version_last_view: TYPES::View::new(view + UPGRADE_BEGIN_OFFSET), + new_version_first_view: TYPES::View::new(view + UPGRADE_FINISH_OFFSET), + decide_by: TYPES::View::new(view + UPGRADE_DECIDE_BY_OFFSET), }; let upgrade_proposal = UpgradeProposal { upgrade_proposal: upgrade_proposal_data.clone(), - view_number: TYPES::Time::new(view + UPGRADE_PROPOSE_OFFSET), + view_number: TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), }; let signature = TYPES::SignatureKey::sign( diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 105aad46dc..106203bd07 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -30,7 +30,9 @@ use crate::{ /// Tracks state of a VID task pub struct VidTaskState> { /// View number this view is executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::View, + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::Epoch, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, /// The underlying network @@ -42,7 +44,7 @@ pub struct VidTaskState> { /// Our Private Key pub private_key: ::PrivateKey, /// The view and ID of the current vote collection task, if there is one. - pub vote_collector: Option<(TYPES::Time, usize, usize)>, + pub vote_collector: Option<(TYPES::View, usize, usize)>, /// This state's ID pub id: u64, } @@ -73,6 +75,7 @@ impl> VidTaskState { Arc::clone(encoded_transactions), &Arc::clone(&self.membership), *view_number, + self.cur_epoch, vid_precompute.clone(), ) .await; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index cc06ed170d..fb00054f88 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -62,16 +62,18 @@ pub enum ViewSyncPhase { /// Type alias for a map from View Number to Relay to Vote Task type RelayMap = HashMap< - ::Time, + ::View, BTreeMap>, >; /// Main view sync task state pub struct ViewSyncTaskState, V: Versions> { /// View HotShot is currently in - pub current_view: TYPES::Time, + pub current_view: TYPES::View, /// View HotShot wishes to be in - pub next_view: TYPES::Time, + pub next_view: TYPES::View, + /// Epoch HotShot is currently in + pub current_epoch: TYPES::Epoch, /// The underlying network pub network: Arc, /// Membership for the quorum @@ -87,7 +89,7 @@ pub struct ViewSyncTaskState, V: V pub num_timeouts_tracked: u64, /// Map of running replica tasks - pub replica_task_map: RwLock>>, + pub replica_task_map: RwLock>>, /// Map of pre-commit vote accumulates for the relay pub pre_commit_relay_map: RwLock< @@ -105,7 +107,7 @@ pub struct ViewSyncTaskState, V: V pub view_sync_timeout: Duration, /// Last view we garbage collected old tasks - pub last_garbage_collected_view: TYPES::Time, + pub last_garbage_collected_view: TYPES::View, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, @@ -136,9 +138,11 @@ pub struct ViewSyncReplicaTaskState, V: Versions> ViewSyncTaskSta pub async fn send_to_or_create_replica( &mut self, event: Arc>, - view: TYPES::Time, + view: TYPES::View, sender: &Sender>>, ) { // This certificate is old, we can throw it away @@ -221,6 +225,7 @@ impl, V: Versions> ViewSyncTaskSta let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { current_view: view, next_view: view, + current_epoch: self.current_epoch, relay: 0, finalized: false, sent_view_change_event: false, @@ -299,7 +304,11 @@ impl, V: Versions> ViewSyncTaskSta } // We do not have a relay task already running, so start one - if self.membership.leader(vote_view + relay) != self.public_key { + if self + .membership + .leader(vote_view + relay, self.current_epoch) + != self.public_key + { debug!("View sync vote sent to wrong leader"); return; } @@ -308,6 +317,7 @@ impl, V: Versions> ViewSyncTaskSta public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, + epoch: self.current_epoch, id: self.id, }; let vote_collector = @@ -337,7 +347,11 @@ impl, V: Versions> ViewSyncTaskSta } // We do not have a relay task already running, so start one - if self.membership.leader(vote_view + relay) != self.public_key { + if self + .membership + .leader(vote_view + relay, self.current_epoch) + != self.public_key + { debug!("View sync vote sent to wrong leader"); return; } @@ -346,6 +360,7 @@ impl, V: Versions> ViewSyncTaskSta public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, + epoch: self.current_epoch, id: self.id, }; let vote_collector = @@ -375,7 +390,11 @@ impl, V: Versions> ViewSyncTaskSta } // We do not have a relay task already running, so start one - if self.membership.leader(vote_view + relay) != self.public_key { + if self + .membership + .leader(vote_view + relay, self.current_epoch) + != self.public_key + { debug!("View sync vote sent to wrong leader"); return; } @@ -384,6 +403,7 @@ impl, V: Versions> ViewSyncTaskSta public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, + epoch: self.current_epoch, id: self.id, }; let vote_collector = @@ -395,7 +415,7 @@ impl, V: Versions> ViewSyncTaskSta } &HotShotEvent::ViewChange(new_view) => { - let new_view = TYPES::Time::new(*new_view); + let new_view = TYPES::View::new(*new_view); if self.current_view < new_view { debug!( "Change from view {} to view {} in view sync task", @@ -414,19 +434,19 @@ impl, V: Versions> ViewSyncTaskSta self.replica_task_map .write() .await - .remove_entry(&TYPES::Time::new(i)); + .remove_entry(&TYPES::View::new(i)); self.pre_commit_relay_map .write() .await - .remove_entry(&TYPES::Time::new(i)); + .remove_entry(&TYPES::View::new(i)); self.commit_relay_map .write() .await - .remove_entry(&TYPES::Time::new(i)); + .remove_entry(&TYPES::View::new(i)); self.finalize_relay_map .write() .await - .remove_entry(&TYPES::Time::new(i)); + .remove_entry(&TYPES::View::new(i)); } self.last_garbage_collected_view = self.current_view - 1; @@ -434,12 +454,12 @@ impl, V: Versions> ViewSyncTaskSta } &HotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it - if view_number <= TYPES::Time::new(*self.current_view) { + if view_number <= TYPES::View::new(*self.current_view) { return; } self.num_timeouts_tracked += 1; - let leader = self.membership.leader(view_number); + let leader = self.membership.leader(view_number, self.current_epoch); warn!( %leader, leader_mnemonic = cdn_proto::util::mnemonic(&leader), @@ -465,7 +485,7 @@ impl, V: Versions> ViewSyncTaskSta // If this is the first timeout we've seen advance to the next view self.current_view = view_number; broadcast_event( - Arc::new(HotShotEvent::ViewChange(TYPES::Time::new( + Arc::new(HotShotEvent::ViewChange(TYPES::View::new( *self.current_view, ))), &event_stream, @@ -502,7 +522,11 @@ impl, V: Versions> // If certificate is not valid, return current state if !certificate - .is_valid_cert(self.membership.as_ref(), &self.upgrade_lock) + .is_valid_cert( + self.membership.as_ref(), + self.current_epoch, + &self.upgrade_lock, + ) .await { error!("Not valid view sync cert! {:?}", certificate.data()); @@ -561,7 +585,7 @@ impl, V: Versions> broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*next_view), + TYPES::View::new(*next_view), relay, phase, )), @@ -584,7 +608,11 @@ impl, V: Versions> // If certificate is not valid, return current state if !certificate - .is_valid_cert(self.membership.as_ref(), &self.upgrade_lock) + .is_valid_cert( + self.membership.as_ref(), + self.current_epoch, + &self.upgrade_lock, + ) .await { error!("Not valid view sync cert! {:?}", certificate.data()); @@ -655,7 +683,7 @@ impl, V: Versions> ); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*next_view), + TYPES::View::new(*next_view), relay, phase, )), @@ -676,7 +704,11 @@ impl, V: Versions> // If certificate is not valid, return current state if !certificate - .is_valid_cert(self.membership.as_ref(), &self.upgrade_lock) + .is_valid_cert( + self.membership.as_ref(), + self.current_epoch, + &self.upgrade_lock, + ) .await { error!("Not valid view sync cert! {:?}", certificate.data()); @@ -708,7 +740,7 @@ impl, V: Versions> HotShotEvent::ViewSyncTrigger(view_number) => { let view_number = *view_number; - if self.next_view != TYPES::Time::new(*view_number) { + if self.next_view != TYPES::View::new(*view_number) { error!("Unexpected view number to triger view sync"); return None; } @@ -748,7 +780,7 @@ impl, V: Versions> warn!("Vote sending timed out in ViewSyncTrigger"); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*next_view), + TYPES::View::new(*next_view), relay, ViewSyncPhase::None, )), @@ -764,7 +796,7 @@ impl, V: Versions> HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { let round = *round; // Shouldn't ever receive a timeout for a relay higher than ours - if TYPES::Time::new(*round) == self.next_view && *relay == self.relay { + if TYPES::View::new(*round) == self.next_view && *relay == self.relay { if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; } @@ -817,7 +849,7 @@ impl, V: Versions> ); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*next_view), + TYPES::View::new(*next_view), relay, last_cert, )), diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index a7ec9cd7e9..533a96b719 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -39,7 +39,7 @@ use crate::{ /// Alias for a map of Vote Collectors pub type VoteCollectorsMap = - BTreeMap<::Time, VoteCollectionTaskState>; + BTreeMap<::View, VoteCollectionTaskState>; /// Task state for collecting votes of one type and emitting a certificate pub struct VoteCollectionTaskState< @@ -58,7 +58,10 @@ pub struct VoteCollectionTaskState< pub accumulator: Option>, /// The view which we are collecting votes for - pub view: TYPES::Time, + pub view: TYPES::View, + + /// The epoch which we are collecting votes for + pub epoch: TYPES::Epoch, /// Node id pub id: u64, @@ -71,8 +74,8 @@ pub trait AggregatableVote< CERT: Certificate, > { - /// return the leader for this votes - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey; + /// return the leader for this votes in the given epoch + fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey; /// return the Hotshot event for the completion of this CERT fn make_cert_event(certificate: CERT, key: &TYPES::SignatureKey) -> HotShotEvent; @@ -93,7 +96,7 @@ impl< vote: &VOTE, event_stream: &Sender>>, ) -> Option { - if vote.leader(&self.membership) != self.public_key { + if vote.leader(&self.membership, self.epoch) != self.public_key { error!("Received vote for a view in which we were not the leader."); return None; } @@ -108,7 +111,10 @@ impl< } let accumulator = self.accumulator.as_mut()?; - match accumulator.accumulate(vote, &self.membership).await { + match accumulator + .accumulate(vote, &self.membership, self.epoch) + .await + { Either::Left(()) => None, Either::Right(cert) => { debug!("Certificate Formed! {:?}", cert); @@ -151,7 +157,9 @@ pub struct AccumulatorInfo { /// Membership we are accumulation votes for pub membership: Arc, /// View of the votes we are collecting - pub view: TYPES::Time, + pub view: TYPES::View, + /// Epoch of the votes we are collecting + pub epoch: TYPES::Epoch, /// This nodes id pub id: u64, } @@ -192,6 +200,7 @@ where public_key: info.public_key.clone(), accumulator: Some(new_accumulator), view: info.view, + epoch: info.epoch, id: info.id, }; @@ -217,6 +226,7 @@ pub async fn handle_vote< vote: &VOTE, public_key: TYPES::SignatureKey, membership: &Arc, + epoch: TYPES::Epoch, id: u64, event: &Arc>, event_stream: &Sender>>, @@ -231,6 +241,7 @@ pub async fn handle_vote< public_key, membership: Arc::clone(membership), view: vote.view_number(), + epoch, id, }; if let Some(collector) = create_vote_accumulator( @@ -292,8 +303,8 @@ type ViewSyncFinalizeVoteState = VoteCollectionTaskState< impl AggregatableVote, QuorumCertificate> for QuorumVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.view_number() + 1) + fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + membership.leader(self.view_number() + 1, epoch) } fn make_cert_event( certificate: QuorumCertificate, @@ -306,8 +317,8 @@ impl AggregatableVote, QuorumCertifica impl AggregatableVote, UpgradeCertificate> for UpgradeVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.view_number()) + fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + membership.leader(self.view_number(), epoch) } fn make_cert_event( certificate: UpgradeCertificate, @@ -320,8 +331,8 @@ impl AggregatableVote, UpgradeCertifi impl AggregatableVote, DaCertificate> for DaVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.view_number()) + fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + membership.leader(self.view_number(), epoch) } fn make_cert_event( certificate: DaCertificate, @@ -334,8 +345,8 @@ impl AggregatableVote, DaCertificate AggregatableVote, TimeoutCertificate> for TimeoutVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.view_number() + 1) + fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + membership.leader(self.view_number() + 1, epoch) } fn make_cert_event( certificate: TimeoutCertificate, @@ -349,8 +360,8 @@ impl AggregatableVote, ViewSyncCommitCertificate2> for ViewSyncCommitVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.date().round + self.date().relay) + fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( certificate: ViewSyncCommitCertificate2, @@ -364,8 +375,8 @@ impl AggregatableVote, ViewSyncPreCommitCertificate2> for ViewSyncPreCommitVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.date().round + self.date().relay) + fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( certificate: ViewSyncPreCommitCertificate2, @@ -379,8 +390,8 @@ impl AggregatableVote, ViewSyncFinalizeCertificate2> for ViewSyncFinalizeVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.date().round + self.date().relay) + fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( certificate: ViewSyncFinalizeCertificate2, diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index beab8b5ae6..825108e48b 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -116,7 +116,7 @@ pub struct DishonestLeader { /// How far back to look for a QC pub view_look_back: usize, /// Shared state of all view numbers we send bad proposal at - pub dishonest_proposal_view_numbers: Arc>>, + pub dishonest_proposal_view_numbers: Arc>>, } /// Add method that will handle `QuorumProposalSend` events @@ -246,7 +246,7 @@ pub struct ViewDelay { /// How many views the node will be delayed pub number_of_views_to_delay: u64, /// A map that is from view number to vector of events - pub events_for_view: HashMap>>, + pub events_for_view: HashMap>>, /// Specify which view number to stop delaying pub stop_view_delay_at_view_number: u64, } @@ -271,7 +271,7 @@ impl + std::fmt::Debug, V: Version if view_diff > 0 { return match self .events_for_view - .remove(&::Time::new(view_diff)) + .remove(&::View::new(view_diff)) { Some(lookback_events) => lookback_events.clone(), // we have already return all received events for this view @@ -346,7 +346,8 @@ impl + std::fmt::Debug, V: Version ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, - view: TYPES::Time::genesis(), + view: TYPES::View::genesis(), + epoch: TYPES::Epoch::genesis(), quorum_membership, da_membership, storage: Arc::clone(&handle.storage()), @@ -375,7 +376,7 @@ pub struct DishonestVoter { /// Collect all votes the node sends pub votes_sent: Vec>, /// Shared state with views numbers that leaders were dishonest at - pub dishonest_proposal_view_numbers: Arc>>, + pub dishonest_proposal_view_numbers: Arc>>, } #[async_trait] diff --git a/testing/src/consistency_task.rs b/testing/src/consistency_task.rs index 3cb04f65c1..0d78c4d12c 100644 --- a/testing/src/consistency_task.rs +++ b/testing/src/consistency_task.rs @@ -24,10 +24,10 @@ use crate::{ }; /// Map from views to leaves for a single node, allowing multiple leaves for each view (because the node may a priori send us multiple leaves for a given view). -pub type NodeMap = BTreeMap<::Time, Vec>>; +pub type NodeMap = BTreeMap<::View, Vec>>; /// A sanitized map from views to leaves for a single node, with only a single leaf per view. -pub type NodeMapSanitized = BTreeMap<::Time, Leaf>; +pub type NodeMapSanitized = BTreeMap<::View, Leaf>; /// Validate that the `NodeMap` only has a single leaf per view. fn sanitize_node_map( @@ -68,7 +68,7 @@ async fn validate_node_map( .map(|((a, b), c)| (a, b, c)); let mut decided_upgrade_certificate = None; - let mut view_decided = TYPES::Time::new(0); + let mut view_decided = TYPES::View::new(0); for (grandparent, _parent, child) in leaf_triples { if let Some(cert) = grandparent.upgrade_certificate() { @@ -144,7 +144,7 @@ fn sanitize_network_map( Ok(result) } -pub type ViewMap = BTreeMap<::Time, BTreeMap>>; +pub type ViewMap = BTreeMap<::View, BTreeMap>>; // Invert the network map by interchanging the roles of the node_id and view number. // @@ -171,7 +171,7 @@ async fn invert_network_map( } /// A view map, sanitized to have exactly one leaf per view. -pub type ViewMapSanitized = BTreeMap<::Time, Leaf>; +pub type ViewMapSanitized = BTreeMap<::View, Leaf>; fn sanitize_view_map( view_map: &ViewMap, diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 54eab9d4fa..842ef3e538 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -150,7 +150,8 @@ pub async fn build_cert< >( data: DATAType, membership: &TYPES::Membership, - view: TYPES::Time, + view: TYPES::View, + epoch: TYPES::Epoch, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, @@ -159,6 +160,7 @@ pub async fn build_cert< &data, membership, view, + epoch, upgrade_lock, ) .await; @@ -214,10 +216,11 @@ pub async fn build_assembled_sig< >( data: &DATAType, membership: &TYPES::Membership, - view: TYPES::Time, + view: TYPES::View, + epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> ::QcType { - let stake_table = membership.stake_table(); + let stake_table = membership.stake_table(epoch); let real_qc_pp: ::QcParams = ::public_parameter( stake_table.clone(), @@ -272,18 +275,23 @@ pub fn key_pair_for_id( #[must_use] pub fn vid_scheme_from_view_number( membership: &TYPES::Membership, - view_number: TYPES::Time, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, ) -> VidSchemeType { - let num_storage_nodes = membership.committee_members(view_number).len(); + let num_storage_nodes = membership + .committee_members(view_number, epoch_number) + .len(); vid_scheme(num_storage_nodes) } pub fn vid_payload_commitment( quorum_membership: &::Membership, - view_number: TYPES::Time, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, transactions: Vec, ) -> VidCommitment { - let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); + let mut vid = + vid_scheme_from_view_number::(quorum_membership, view_number, epoch_number); let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); @@ -293,19 +301,24 @@ pub fn vid_payload_commitment( pub fn da_payload_commitment( quorum_membership: &::Membership, transactions: Vec, + epoch_number: TYPES::Epoch, ) -> VidCommitment { let encoded_transactions = TestTransaction::encode(&transactions); - vid_commitment(&encoded_transactions, quorum_membership.total_nodes()) + vid_commitment( + &encoded_transactions, + quorum_membership.total_nodes(epoch_number), + ) } pub fn build_payload_commitment( membership: &::Membership, - view: TYPES::Time, + view: TYPES::View, + epoch: TYPES::Epoch, ) -> ::Commit { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. - let mut vid = vid_scheme_from_view_number::(membership, view); + let mut vid = vid_scheme_from_view_number::(membership, view, epoch); let encoded_transactions = Vec::new(); vid.commit_only(&encoded_transactions).unwrap() } @@ -313,17 +326,20 @@ pub fn build_payload_commitment( /// TODO: pub fn build_vid_proposal( quorum_membership: &::Membership, - view_number: TYPES::Time, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, transactions: Vec, private_key: &::PrivateKey, ) -> VidProposal { - let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); + let mut vid = + vid_scheme_from_view_number::(quorum_membership, view_number, epoch_number); let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = VidDisperse::from_membership( view_number, vid.disperse(&encoded_transactions).unwrap(), quorum_membership, + epoch_number, ); let signature = @@ -348,10 +364,12 @@ pub fn build_vid_proposal( ) } +#[allow(clippy::too_many_arguments)] pub async fn build_da_certificate( quorum_membership: &::Membership, da_membership: &::Membership, - view_number: TYPES::Time, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, transactions: Vec, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, @@ -359,8 +377,10 @@ pub async fn build_da_certificate( ) -> DaCertificate { let encoded_transactions = TestTransaction::encode(&transactions); - let da_payload_commitment = - vid_commitment(&encoded_transactions, quorum_membership.total_nodes()); + let da_payload_commitment = vid_commitment( + &encoded_transactions, + quorum_membership.total_nodes(epoch_number), + ); let da_data = DaData { payload_commit: da_payload_commitment, @@ -370,6 +390,7 @@ pub async fn build_da_certificate( da_data, da_membership, view_number, + epoch_number, public_key, private_key, upgrade_lock, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 3cbe364517..979c2c2a04 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -61,12 +61,12 @@ pub enum OverallSafetyTaskErr { NotEnoughDecides { got: usize, expected: usize }, #[error("Too many view failures: {0:?}")] - TooManyFailures(HashSet), + TooManyFailures(HashSet), #[error("Inconsistent failed views: expected: {expected_failed_views:?}, actual: {actual_failed_views:?}")] InconsistentFailedViews { - expected_failed_views: Vec, - actual_failed_views: HashSet, + expected_failed_views: Vec, + actual_failed_views: HashSet, }, #[error( "Not enough round results: results_count: {results_count}, views_count: {views_count}" @@ -97,7 +97,7 @@ pub struct OverallSafetyTask, V: Versions> OverallSafetyTask { - async fn handle_view_failure(&mut self, num_failed_views: usize, view_number: TYPES::Time) { + async fn handle_view_failure(&mut self, num_failed_views: usize, view_number: TYPES::View) { let expected_views_to_fail = &mut self.properties.expected_views_to_fail; self.ctx.failed_views.insert(view_number); @@ -155,7 +155,7 @@ impl, V: Versions> TestTas block_size: maybe_block_size, } => { // Skip the genesis leaf. - if leaf_chain.last().unwrap().leaf.view_number() == TYPES::Time::genesis() { + if leaf_chain.last().unwrap().leaf.view_number() == TYPES::View::genesis() { return Ok(()); } let paired_up = (leaf_chain.to_vec(), (*qc).clone()); @@ -364,18 +364,18 @@ impl Default for RoundCtx { pub struct RoundCtx { /// results from previous rounds /// view number -> round result - pub round_results: HashMap>, + pub round_results: HashMap>, /// during the run view refactor - pub failed_views: HashSet, + pub failed_views: HashSet, /// successful views - pub successful_views: HashSet, + pub successful_views: HashSet, } impl RoundCtx { /// inserts an error into the context pub fn insert_error_to_context( &mut self, - view_number: TYPES::Time, + view_number: TYPES::View, idx: usize, error: Arc>, ) { @@ -569,7 +569,7 @@ pub struct OverallSafetyPropertiesDescription { /// required to mark view as successful pub threshold_calculator: Arc usize + Send + Sync>, /// pass in the views that we expect to fail - pub expected_views_to_fail: HashMap, + pub expected_views_to_fail: HashMap, } impl std::fmt::Debug for OverallSafetyPropertiesDescription { diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index eafc52fbb2..5593a4336e 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -58,9 +58,9 @@ pub struct SpinningTask< /// late start nodes pub(crate) late_start: HashMap>, /// time based changes - pub(crate) changes: BTreeMap>, + pub(crate) changes: BTreeMap>, /// most recent view seen by spinning task - pub(crate) latest_view: Option, + pub(crate) latest_view: Option, /// Last decided leaf that can be used as the anchor leaf to initialize the node. pub(crate) last_decided_leaf: Leaf, /// Highest qc seen in the test for restarting nodes @@ -155,8 +155,8 @@ where self.last_decided_leaf.clone(), TestInstanceState::new(self.async_delay_config.clone()), None, - TYPES::Time::genesis(), - TYPES::Time::genesis(), + TYPES::View::genesis(), + TYPES::View::genesis(), BTreeMap::new(), self.high_qc.clone(), None, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 2a7414475f..64d16ede22 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -529,6 +529,7 @@ where stop_proposing_time: 0, start_voting_time: u64::MAX, stop_voting_time: 0, + epoch_height: 0, }; let TimingData { next_view_timeout, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 221f43ced6..cb2c8119ed 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -170,10 +170,10 @@ where // add spinning task // map spinning to view - let mut changes: BTreeMap> = BTreeMap::new(); + let mut changes: BTreeMap> = BTreeMap::new(); for (view, mut change) in spinning_changes { changes - .entry(TYPES::Time::new(view)) + .entry(TYPES::View::new(view)) .or_insert_with(Vec::new) .append(&mut change); } diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 315ac2d5e8..dcaedd5dc1 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -12,6 +12,9 @@ use std::{ task::{Context, Poll}, }; +use crate::helpers::{ + build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, +}; use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_example_types::{ @@ -19,6 +22,7 @@ use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{ DaProposal, Leaf, QuorumProposal, VidDisperse, VidDisperseShare, ViewChangeEvidence, @@ -42,16 +46,13 @@ use hotshot_types::{ use rand::{thread_rng, Rng}; use sha2::{Digest, Sha256}; -use crate::helpers::{ - build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, -}; - #[derive(Clone)] pub struct TestView { pub da_proposal: Proposal>, pub quorum_proposal: Proposal>, pub leaf: Leaf, pub view_number: ViewNumber, + pub epoch_number: EpochNumber, pub quorum_membership: ::Membership, pub da_membership: ::Membership, pub vid_disperse: Proposal>, @@ -75,6 +76,7 @@ impl TestView { da_membership: &::Membership, ) -> Self { let genesis_view = ViewNumber::new(1); + let genesis_epoch = EpochNumber::new(1); let upgrade_lock = UpgradeLock::new(); let transactions = Vec::new(); @@ -97,12 +99,16 @@ impl TestView { let leader_public_key = public_key; - let payload_commitment = - da_payload_commitment::(quorum_membership, transactions.clone()); + let payload_commitment = da_payload_commitment::( + quorum_membership, + transactions.clone(), + genesis_epoch, + ); let (vid_disperse, vid_proposal) = build_vid_proposal( quorum_membership, genesis_view, + genesis_epoch, transactions.clone(), &private_key, ); @@ -111,6 +117,7 @@ impl TestView { quorum_membership, da_membership, genesis_view, + genesis_epoch, transactions.clone(), &public_key, &private_key, @@ -180,6 +187,7 @@ impl TestView { quorum_proposal, leaf, view_number: genesis_view, + epoch_number: genesis_epoch, quorum_membership: quorum_membership.clone(), da_membership: da_membership.clone(), vid_disperse, @@ -237,12 +245,16 @@ impl TestView { &metadata, ); - let payload_commitment = - da_payload_commitment::(quorum_membership, transactions.clone()); + let payload_commitment = da_payload_commitment::( + quorum_membership, + transactions.clone(), + self.epoch_number, + ); let (vid_disperse, vid_proposal) = build_vid_proposal( quorum_membership, next_view, + self.epoch_number, transactions.clone(), &private_key, ); @@ -251,6 +263,7 @@ impl TestView { quorum_membership, da_membership, next_view, + self.epoch_number, transactions.clone(), &public_key, &private_key, @@ -268,6 +281,7 @@ impl TestView { quorum_data, quorum_membership, old_view, + self.epoch_number, &old_public_key, &old_private_key, &self.upgrade_lock, @@ -285,6 +299,7 @@ impl TestView { data.clone(), quorum_membership, next_view, + self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -307,6 +322,7 @@ impl TestView { data.clone(), quorum_membership, next_view, + self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -329,6 +345,7 @@ impl TestView { data.clone(), quorum_membership, next_view, + self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -406,6 +423,7 @@ impl TestView { quorum_proposal, leaf, view_number: next_view, + epoch_number: self.epoch_number, quorum_membership: quorum_membership.clone(), da_membership: self.da_membership.clone(), vid_disperse, diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index b39f59ff32..1f78de5467 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -21,6 +21,7 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{null_block, PackedBundle, ViewNumber}, simple_vote::DaData, @@ -50,7 +51,11 @@ async fn test_da_task() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, - handle.hotshot.memberships.quorum_membership.total_nodes(), + handle + .hotshot + .memberships + .quorum_membership + .total_nodes(EpochNumber::new(0)), ); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -96,7 +101,7 @@ async fn test_da_task() { }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION ) .unwrap()], @@ -147,7 +152,11 @@ async fn test_da_task_storage_failure() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, - handle.hotshot.memberships.quorum_membership.total_nodes(), + handle + .hotshot + .memberships + .quorum_membership + .total_nodes(EpochNumber::new(0)), ); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -193,7 +202,7 @@ async fn test_da_task_storage_failure() { }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION ) .unwrap()], diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 272d132ae3..6a583c058f 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -17,6 +17,7 @@ use hotshot_testing::{ helpers::build_system_handle, test_builder::TestDescription, test_task::add_network_message_test_task, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::ViewNumber, message::UpgradeLock, @@ -25,6 +26,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, }, }; + // Test that the event task sends a message, and the message task receives it // and emits the proper event #[cfg(test)] @@ -62,6 +64,7 @@ async fn test_network_task() { NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), + epoch: EpochNumber::new(0), quorum_membership: membership.clone(), da_membership: membership.clone(), upgrade_lock: upgrade_lock.clone(), @@ -242,6 +245,7 @@ async fn test_network_storage_fail() { NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), + epoch: EpochNumber::new(0), quorum_membership: membership.clone(), da_membership: membership.clone(), upgrade_lock: upgrade_lock.clone(), diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 455f304114..5be8b11ba1 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -24,6 +24,7 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{null_block, Leaf, ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, ViewSyncFinalizeData}, @@ -56,8 +57,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = - build_payload_commitment::(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = build_payload_commitment::( + &quorum_membership, + ViewNumber::new(node_id), + EpochNumber::new(1), + ); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -89,7 +93,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let genesis_cert = proposals[0].data.justify_qc.clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, ) .unwrap(); @@ -201,7 +205,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, ) .unwrap(); @@ -210,7 +214,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(1), + EpochNumber::new(1) + ), builder_commitment.clone(), TestMetadata { num_transactions: 0 @@ -229,7 +237,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(2), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[0].data.block_header.metadata, ViewNumber::new(2), @@ -246,7 +258,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(3), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[1].data.block_header.metadata, ViewNumber::new(3), @@ -263,7 +279,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[2].clone()), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(4)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(4), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[2].data.block_header.metadata, ViewNumber::new(4), @@ -280,7 +300,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[3].clone()), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(5)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(5), + EpochNumber::new(1) + ), builder_commitment, proposals[3].data.block_header.metadata, ViewNumber::new(5), @@ -347,8 +371,11 @@ async fn test_quorum_proposal_task_qc_timeout() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = - build_payload_commitment::(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = build_payload_commitment::( + &quorum_membership, + ViewNumber::new(node_id), + EpochNumber::new(1), + ); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -393,7 +420,7 @@ async fn test_quorum_proposal_task_qc_timeout() { }, ViewNumber::new(3), vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION ) .unwrap()], @@ -437,8 +464,11 @@ async fn test_quorum_proposal_task_view_sync() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = - build_payload_commitment::(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = build_payload_commitment::( + &quorum_membership, + ViewNumber::new(node_id), + EpochNumber::new(1), + ); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -485,7 +515,7 @@ async fn test_quorum_proposal_task_view_sync() { }, ViewNumber::new(2), vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION ) .unwrap()], @@ -562,7 +592,7 @@ async fn test_quorum_proposal_task_liveness_check() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, ) .unwrap(); @@ -580,7 +610,11 @@ async fn test_quorum_proposal_task_liveness_check() { random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(1), + EpochNumber::new(1) + ), builder_commitment.clone(), TestMetadata { num_transactions: 0 @@ -599,7 +633,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(2), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[0].data.block_header.metadata, ViewNumber::new(2), @@ -616,7 +654,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(3), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[1].data.block_header.metadata, ViewNumber::new(3), @@ -633,7 +675,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[2].clone()), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(4)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(4), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[2].data.block_header.metadata, ViewNumber::new(4), @@ -650,7 +696,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[3].clone()), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(5)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(5), + EpochNumber::new(1) + ), builder_commitment, proposals[3].data.block_header.metadata, ViewNumber::new(5), diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index d73cd48ea7..b8c6a194b9 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -7,6 +7,7 @@ use hotshot_task_impls::{ events::HotShotEvent, harness::run_harness, transactions::TransactionTaskState, }; use hotshot_testing::helpers::build_system_handle; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{null_block, PackedBundle, ViewNumber}, traits::{ @@ -39,7 +40,8 @@ async fn test_transaction_task_leader_two_views_in_a_row() { input.push(HotShotEvent::Shutdown); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let (_, precompute_data) = precompute_vid_commitment(&[], quorum_membership.total_nodes()); + let (_, precompute_data) = + precompute_vid_commitment(&[], quorum_membership.total_nodes(EpochNumber::new(0))); // current view let mut exp_packed_bundle = PackedBundle::new( @@ -50,7 +52,7 @@ async fn test_transaction_task_leader_two_views_in_a_row() { current_view, vec1::vec1![ null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION ) .unwrap() diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 5bf95997c0..f0493fb5e6 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -29,6 +29,7 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{null_block, Leaf, ViewNumber}, simple_vote::UpgradeProposalData, @@ -145,7 +146,7 @@ async fn test_upgrade_task_with_proposal() { let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, ) .unwrap(); @@ -171,7 +172,11 @@ async fn test_upgrade_task_with_proposal() { random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(1), + EpochNumber::new(1) + ), builder_commitment.clone(), TestMetadata { num_transactions: 0 @@ -190,7 +195,11 @@ async fn test_upgrade_task_with_proposal() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(2), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[0].data.block_header.metadata, ViewNumber::new(2), @@ -208,7 +217,11 @@ async fn test_upgrade_task_with_proposal() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(3), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[1].data.block_header.metadata, ViewNumber::new(3), diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 17922d42cb..714a12a3b5 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -20,6 +20,7 @@ use hotshot_testing::{ script::{Expectations, InputOrder, TaskScript}, serial, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{null_block, DaProposal, PackedBundle, VidDisperse, ViewNumber}, traits::{ @@ -50,7 +51,11 @@ async fn test_vid_task() { // quorum membership for VID share distribution let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); + let mut vid = vid_scheme_from_view_number::( + &quorum_membership, + ViewNumber::new(0), + EpochNumber::new(0), + ); let transactions = vec![TestTransaction::new(vec![0])]; let (payload, metadata) = >::from_transactions( @@ -85,8 +90,12 @@ async fn test_vid_task() { _pd: PhantomData, }; - let vid_disperse = - VidDisperse::from_membership(message.data.view_number, vid_disperse, &quorum_membership); + let vid_disperse = VidDisperse::from_membership( + message.data.view_number, + vid_disperse, + &quorum_membership, + EpochNumber::new(0), + ); let vid_proposal = Proposal { data: vid_disperse.clone(), @@ -104,7 +113,7 @@ async fn test_vid_task() { }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION ) .unwrap()], @@ -125,7 +134,7 @@ async fn test_vid_task() { }, ViewNumber::new(2), vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION ) .unwrap()], diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index c0c4913981..5b2a9bf2d3 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -29,11 +29,11 @@ async fn test_view_sync_task() { let vote_data = ViewSyncPreCommitData { relay: 0, - round: ::Time::new(4), + round: ::View::new(4), }; let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote( vote_data, - ::Time::new(4), + ::View::new(4), hotshot_types::traits::consensus_api::ConsensusApi::public_key(&handle), hotshot_types::traits::consensus_api::ConsensusApi::private_key(&handle), &handle.hotshot.upgrade_lock, diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 48fcffe5e7..6fe2979420 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -11,6 +11,7 @@ use hotshot_testing::{ predicates::{event::*, Predicate, PredicateResult}, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ consensus::OuterConsensus, data::ViewNumber, @@ -96,6 +97,7 @@ async fn test_vote_dependency_handle() { quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), storage: Arc::clone(&handle.storage()), view_number, + epoch_number: EpochNumber::new(1), sender: event_sender.clone(), receiver: event_receiver.clone(), upgrade_lock: handle.hotshot.upgrade_lock.clone(), diff --git a/testing/tests/tests_3/byzantine_tests.rs b/testing/tests/tests_3/byzantine_tests.rs index 4687d3ff66..e5ac199aaf 100644 --- a/testing/tests/tests_3/byzantine_tests.rs +++ b/testing/tests/tests_3/byzantine_tests.rs @@ -21,7 +21,7 @@ use hotshot_testing::{ test_builder::{Behaviour, TestDescription}, }; use hotshot_types::{ - data::ViewNumber, + data::{EpochNumber, ViewNumber}, message::{GeneralConsensusMessage, MessageKind, SequencingMessage}, traits::{ election::Membership, @@ -176,7 +176,7 @@ cross_tests!( view_increment: nodes_count as u64, modifier: Arc::new(move |_pk, message_kind, transmit_type: &mut TransmitType, membership: &::Membership| { if let MessageKind::Consensus(SequencingMessage::General(GeneralConsensusMessage::Vote(vote))) = message_kind { - *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64)); + *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64, EpochNumber::new(0))); } else { {} } diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 38b2da02e1..3050cd7d32 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -23,6 +23,7 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::ViewNumber, message::{DataMessage, Message, MessageKind, UpgradeLock}, @@ -53,7 +54,8 @@ pub struct Test; impl NodeType for Test { type AuctionResult = TestAuctionResult; - type Time = ViewNumber; + type View = ViewNumber; + type Epoch = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 759127fef5..a351a09b96 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -43,7 +43,7 @@ pub type CommitmentMap = HashMap, T>; /// A type alias for `BTreeMap>>>` pub type VidShares = BTreeMap< - ::Time, + ::View, HashMap<::SignatureKey, Proposal>>, >; @@ -272,27 +272,30 @@ impl HotShotActionViews { #[derive(custom_debug::Debug, Clone)] pub struct Consensus { /// The validated states that are currently loaded in memory. - validated_state_map: BTreeMap>, + validated_state_map: BTreeMap>, /// All the VID shares we've received for current and future views. vid_shares: VidShares, /// All the DA certs we've received for current and future views. /// view -> DA cert - saved_da_certs: HashMap>, + saved_da_certs: HashMap>, /// View number that is currently on. - cur_view: TYPES::Time, + cur_view: TYPES::View, + + /// Epoch number that is currently on. + cur_epoch: TYPES::Epoch, /// Last proposals we sent out, None if we haven't proposed yet. /// Prevents duplicate proposals, and can be served to those trying to catchup - last_proposals: BTreeMap>>, + last_proposals: BTreeMap>>, /// last view had a successful decide event - last_decided_view: TYPES::Time, + last_decided_view: TYPES::View, /// The `locked_qc` view number - locked_view: TYPES::Time, + locked_view: TYPES::View, /// Map of leaf hash -> leaf /// - contains undecided leaves @@ -302,12 +305,12 @@ pub struct Consensus { /// Bundle of views which we performed the most recent action /// visibible to the network. Actions are votes and proposals /// for DA and Quorum - last_actions: HotShotActionViews, + last_actions: HotShotActionViews, /// Saved payloads. /// /// Encoded transactions for every view if we got a payload for that view. - saved_payloads: BTreeMap>, + saved_payloads: BTreeMap>, /// the highqc per spec high_qc: QuorumCertificate, @@ -387,14 +390,15 @@ impl Consensus { /// Constructor. #[allow(clippy::too_many_arguments)] pub fn new( - validated_state_map: BTreeMap>, - cur_view: TYPES::Time, - locked_view: TYPES::Time, - last_decided_view: TYPES::Time, - last_actioned_view: TYPES::Time, - last_proposals: BTreeMap>>, + validated_state_map: BTreeMap>, + cur_view: TYPES::View, + cur_epoch: TYPES::Epoch, + locked_view: TYPES::View, + last_decided_view: TYPES::View, + last_actioned_view: TYPES::View, + last_proposals: BTreeMap>>, saved_leaves: CommitmentMap>, - saved_payloads: BTreeMap>, + saved_payloads: BTreeMap>, high_qc: QuorumCertificate, metrics: Arc, ) -> Self { @@ -403,6 +407,7 @@ impl Consensus { vid_shares: BTreeMap::new(), saved_da_certs: HashMap::new(), cur_view, + cur_epoch, last_decided_view, last_proposals, last_actions: HotShotActionViews::from_view(last_actioned_view), @@ -415,17 +420,22 @@ impl Consensus { } /// Get the current view. - pub fn cur_view(&self) -> TYPES::Time { + pub fn cur_view(&self) -> TYPES::View { self.cur_view } + /// Get the current epoch. + pub fn cur_epoch(&self) -> TYPES::Epoch { + self.cur_epoch + } + /// Get the last decided view. - pub fn last_decided_view(&self) -> TYPES::Time { + pub fn last_decided_view(&self) -> TYPES::View { self.last_decided_view } /// Get the locked view. - pub fn locked_view(&self) -> TYPES::Time { + pub fn locked_view(&self) -> TYPES::View { self.locked_view } @@ -435,7 +445,7 @@ impl Consensus { } /// Get the validated state map. - pub fn validated_state_map(&self) -> &BTreeMap> { + pub fn validated_state_map(&self) -> &BTreeMap> { &self.validated_state_map } @@ -445,7 +455,7 @@ impl Consensus { } /// Get the saved payloads. - pub fn saved_payloads(&self) -> &BTreeMap> { + pub fn saved_payloads(&self) -> &BTreeMap> { &self.saved_payloads } @@ -455,19 +465,19 @@ impl Consensus { } /// Get the saved DA certs. - pub fn saved_da_certs(&self) -> &HashMap> { + pub fn saved_da_certs(&self) -> &HashMap> { &self.saved_da_certs } /// Get the map of our recent proposals - pub fn last_proposals(&self) -> &BTreeMap>> { + pub fn last_proposals(&self) -> &BTreeMap>> { &self.last_proposals } /// Update the current view. /// # Errors /// Can return an error when the new view_number is not higher than the existing view number. - pub fn update_view(&mut self, view_number: TYPES::Time) -> Result<()> { + pub fn update_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.cur_view, "New view isn't newer than the current view." @@ -476,10 +486,22 @@ impl Consensus { Ok(()) } + /// Update the current epoch. + /// # Errors + /// Can return an error when the new epoch_number is not higher than the existing epoch number. + pub fn update_epoch(&mut self, epoch_number: TYPES::Epoch) -> Result<()> { + ensure!( + epoch_number > self.cur_epoch, + "New epoch isn't newer than the current epoch." + ); + self.cur_epoch = epoch_number; + Ok(()) + } + /// Update the last actioned view internally for votes and proposals /// /// Returns true if the action is for a newer view than the last action of that type - pub fn update_action(&mut self, action: HotShotAction, view: TYPES::Time) -> bool { + pub fn update_action(&mut self, action: HotShotAction, view: TYPES::View) -> bool { let old_view = match action { HotShotAction::Vote => &mut self.last_actions.voted, HotShotAction::Propose => &mut self.last_actions.proposed, @@ -521,7 +543,7 @@ impl Consensus { > self .last_proposals .last_key_value() - .map_or(TYPES::Time::genesis(), |(k, _)| { *k }), + .map_or(TYPES::View::genesis(), |(k, _)| { *k }), "New view isn't newer than the previously proposed view." ); self.last_proposals @@ -533,7 +555,7 @@ impl Consensus { /// /// # Errors /// Can return an error when the new view_number is not higher than the existing decided view number. - pub fn update_last_decided_view(&mut self, view_number: TYPES::Time) -> Result<()> { + pub fn update_last_decided_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.last_decided_view, "New view isn't newer than the previously decided view." @@ -546,7 +568,7 @@ impl Consensus { /// /// # Errors /// Can return an error when the new view_number is not higher than the existing locked view number. - pub fn update_locked_view(&mut self, view_number: TYPES::Time) -> Result<()> { + pub fn update_locked_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.locked_view, "New view isn't newer than the previously locked view." @@ -562,7 +584,7 @@ impl Consensus { /// with the same view number. pub fn update_validated_state_map( &mut self, - view_number: TYPES::Time, + view_number: TYPES::View, new_view: View, ) -> Result<()> { if let Some(existing_view) = self.validated_state_map().get(&view_number) { @@ -607,7 +629,7 @@ impl Consensus { /// Can return an error when there's an existing payload corresponding to the same view number. pub fn update_saved_payloads( &mut self, - view_number: TYPES::Time, + view_number: TYPES::View, encoded_transaction: Arc<[u8]>, ) -> Result<()> { ensure!( @@ -635,7 +657,7 @@ impl Consensus { /// Add a new entry to the vid_shares map. pub fn update_vid_shares( &mut self, - view_number: TYPES::Time, + view_number: TYPES::View, disperse: Proposal>, ) { self.vid_shares @@ -645,7 +667,7 @@ impl Consensus { } /// Add a new entry to the da_certs map. - pub fn update_saved_da_certs(&mut self, view_number: TYPES::Time, cert: DaCertificate) { + pub fn update_saved_da_certs(&mut self, view_number: TYPES::View, cert: DaCertificate) { self.saved_da_certs.insert(view_number, cert); } @@ -654,8 +676,8 @@ impl Consensus { /// If the leaf or its ancestors are not found in storage pub fn visit_leaf_ancestors( &self, - start_from: TYPES::Time, - terminator: Terminator, + start_from: TYPES::View, + terminator: Terminator, ok_when_finished: bool, mut f: F, ) -> Result<(), HotShotError> @@ -714,7 +736,7 @@ impl Consensus { /// `saved_payloads` and `validated_state_map` fields of `Consensus`. /// # Panics /// On inconsistent stored entries - pub fn collect_garbage(&mut self, old_anchor_view: TYPES::Time, new_anchor_view: TYPES::Time) { + pub fn collect_garbage(&mut self, old_anchor_view: TYPES::View, new_anchor_view: TYPES::View) { // state check let anchor_entry = self .validated_state_map @@ -758,7 +780,7 @@ impl Consensus { /// Gets the validated state with the given view number, if in the state map. #[must_use] - pub fn state(&self, view_number: TYPES::Time) -> Option<&Arc> { + pub fn state(&self, view_number: TYPES::View) -> Option<&Arc> { match self.validated_state_map.get(&view_number) { Some(view) => view.state(), None => None, @@ -767,7 +789,7 @@ impl Consensus { /// Gets the validated state and state delta with the given view number, if in the state map. #[must_use] - pub fn state_and_delta(&self, view_number: TYPES::Time) -> StateAndDelta { + pub fn state_and_delta(&self, view_number: TYPES::View) -> StateAndDelta { match self.validated_state_map.get(&view_number) { Some(view) => view.state_and_delta(), None => (None, None), @@ -795,14 +817,16 @@ impl Consensus { #[instrument(skip_all, target = "Consensus", fields(view = *view))] pub async fn calculate_and_update_vid( consensus: OuterConsensus, - view: ::Time, + view: ::View, membership: Arc, private_key: &::PrivateKey, + epoch: TYPES::Epoch, ) -> Option<()> { let consensus = consensus.upgradable_read().await; let txns = consensus.saved_payloads().get(&view)?; let vid = - VidDisperse::calculate_vid_disperse(Arc::clone(txns), &membership, view, None).await; + VidDisperse::calculate_vid_disperse(Arc::clone(txns), &membership, view, epoch, None) + .await; let shares = VidDisperseShare::from_vid_disperse(vid); let mut consensus = ConsensusUpgradableReadLockGuard::upgrade(consensus).await; for share in shares { @@ -827,7 +851,7 @@ pub struct CommitmentAndMetadata { /// Builder fee data pub fees: Vec1>, /// View number this block is for - pub block_view: TYPES::Time, + pub block_view: TYPES::View, /// auction result that the block was produced from, if any pub auction_result: Option, } diff --git a/types/src/data.rs b/types/src/data.rs index e70af8c294..3518f2e8bd 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -9,14 +9,6 @@ //! This module provides types for representing consensus internal state, such as leaves, //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. -use std::{ - collections::BTreeMap, - fmt::{Debug, Display}, - hash::Hash, - marker::PhantomData, - sync::Arc, -}; - use anyhow::{ensure, Result}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use async_lock::RwLock; @@ -28,6 +20,13 @@ use derivative::Derivative; use jf_vid::{precomputable::Precomputable, VidDisperse as JfVidDisperse, VidScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; +use std::{ + collections::BTreeMap, + fmt::{Debug, Display}, + hash::Hash, + marker::PhantomData, + sync::Arc, +}; use thiserror::Error; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; @@ -56,6 +55,62 @@ use crate::{ vote::{Certificate, HasViewNumber}, }; +/// Implements `ConsensusTime`, `Display`, `Add`, `AddAssign`, `Deref` and `Sub` +/// for the given thing wrapper type around u64. +macro_rules! impl_u64_wrapper { + ($t:ty) => { + impl ConsensusTime for $t { + /// Create a genesis number (0) + fn genesis() -> Self { + Self(0) + } + /// Create a new number with the given value. + fn new(n: u64) -> Self { + Self(n) + } + /// Return the u64 format + fn u64(&self) -> u64 { + self.0 + } + } + + impl Display for $t { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } + } + + impl std::ops::Add for $t { + type Output = $t; + + fn add(self, rhs: u64) -> Self::Output { + Self(self.0 + rhs) + } + } + + impl std::ops::AddAssign for $t { + fn add_assign(&mut self, rhs: u64) { + self.0 += rhs; + } + } + + impl std::ops::Deref for $t { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl std::ops::Sub for $t { + type Output = $t; + fn sub(self, rhs: u64) -> Self::Output { + Self(self.0 - rhs) + } + } + }; +} + /// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. #[derive( Copy, @@ -73,27 +128,6 @@ use crate::{ )] pub struct ViewNumber(u64); -impl ConsensusTime for ViewNumber { - /// Create a genesis view number (0) - fn genesis() -> Self { - Self(0) - } - /// Create a new `ViewNumber` with the given value. - fn new(n: u64) -> Self { - Self(n) - } - /// Returen the u64 format - fn u64(&self) -> u64 { - self.0 - } -} - -impl Display for ViewNumber { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - impl Committable for ViewNumber { fn commit(&self) -> Commitment { let builder = RawCommitmentBuilder::new("View Number Commitment"); @@ -101,34 +135,33 @@ impl Committable for ViewNumber { } } -impl std::ops::Add for ViewNumber { - type Output = ViewNumber; - - fn add(self, rhs: u64) -> Self::Output { - Self(self.0 + rhs) - } -} - -impl std::ops::AddAssign for ViewNumber { - fn add_assign(&mut self, rhs: u64) { - self.0 += rhs; - } -} +impl_u64_wrapper!(ViewNumber); -impl std::ops::Deref for ViewNumber { - type Target = u64; +/// Type-safe wrapper around `u64` so we know the thing we're talking about is a epoch number. +#[derive( + Copy, + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + CanonicalSerialize, + CanonicalDeserialize, +)] +pub struct EpochNumber(u64); - fn deref(&self) -> &Self::Target { - &self.0 +impl Committable for EpochNumber { + fn commit(&self) -> Commitment { + let builder = RawCommitmentBuilder::new("Epoch Number Commitment"); + builder.u64(self.0).finalize() } } -impl std::ops::Sub for ViewNumber { - type Output = ViewNumber; - fn sub(self, rhs: u64) -> Self::Output { - Self(self.0 - rhs) - } -} +impl_u64_wrapper!(EpochNumber); /// A proposal to start providing data availability for a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] @@ -139,7 +172,7 @@ pub struct DaProposal { /// Metadata of the block to be applied. pub metadata: >::Metadata, /// View this proposal applies to - pub view_number: TYPES::Time, + pub view_number: TYPES::View, } /// A proposal to upgrade the network @@ -152,7 +185,7 @@ where /// The information about which version we are upgrading to. pub upgrade_proposal: UpgradeProposalData, /// View this proposal applies to - pub view_number: TYPES::Time, + pub view_number: TYPES::View, } /// VID dispersal data @@ -163,7 +196,7 @@ where #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct VidDisperse { /// The view number for which this VID data is intended - pub view_number: TYPES::Time, + pub view_number: TYPES::View, /// Block payload commitment pub payload_commitment: VidCommitment, /// A storage node's key and its corresponding VID share @@ -173,16 +206,17 @@ pub struct VidDisperse { } impl VidDisperse { - /// Create VID dispersal from a specified membership + /// Create VID dispersal from a specified membership for a given epoch. /// Uses the specified function to calculate share dispersal /// Allows for more complex stake table functionality pub fn from_membership( - view_number: TYPES::Time, + view_number: TYPES::View, mut vid_disperse: JfVidDisperse, membership: &TYPES::Membership, + epoch: TYPES::Epoch, ) -> Self { let shares = membership - .committee_members(view_number) + .committee_members(view_number, epoch) .iter() .map(|node| (node.clone(), vid_disperse.shares.remove(0))) .collect(); @@ -195,7 +229,7 @@ impl VidDisperse { } } - /// Calculate the vid disperse information from the payload given a view and membership, + /// Calculate the vid disperse information from the payload given a view, epoch and membership, /// optionally using precompute data from builder /// /// # Panics @@ -204,10 +238,11 @@ impl VidDisperse { pub async fn calculate_vid_disperse( txns: Arc<[u8]>, membership: &Arc, - view: TYPES::Time, + view: TYPES::View, + epoch: TYPES::Epoch, precompute_data: Option, ) -> Self { - let num_nodes = membership.total_nodes(); + let num_nodes = membership.total_nodes(epoch); let vid_disperse = spawn_blocking(move || { precompute_data @@ -222,7 +257,7 @@ impl VidDisperse { // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); - Self::from_membership(view, vid_disperse, membership.as_ref()) + Self::from_membership(view, vid_disperse, membership.as_ref(), epoch) } } @@ -239,7 +274,7 @@ pub enum ViewChangeEvidence { impl ViewChangeEvidence { /// Check that the given ViewChangeEvidence is relevant to the current view. - pub fn is_valid_for_view(&self, view: &TYPES::Time) -> bool { + pub fn is_valid_for_view(&self, view: &TYPES::View) -> bool { match self { ViewChangeEvidence::Timeout(timeout_cert) => timeout_cert.data().view == *view - 1, ViewChangeEvidence::ViewSync(view_sync_cert) => view_sync_cert.view_number == *view, @@ -251,7 +286,7 @@ impl ViewChangeEvidence { /// VID share and associated metadata for a single node pub struct VidDisperseShare { /// The view number for which this VID data is intended - pub view_number: TYPES::Time, + pub view_number: TYPES::View, /// Block payload commitment pub payload_commitment: VidCommitment, /// A storage node's key and its corresponding VID share @@ -353,7 +388,7 @@ pub struct QuorumProposal { pub block_header: TYPES::BlockHeader, /// CurView from leader when proposing leaf - pub view_number: TYPES::Time, + pub view_number: TYPES::View, /// Per spec, justification pub justify_qc: QuorumCertificate, @@ -369,31 +404,31 @@ pub struct QuorumProposal { } impl HasViewNumber for DaProposal { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::View { self.view_number } } impl HasViewNumber for VidDisperse { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::View { self.view_number } } impl HasViewNumber for VidDisperseShare { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::View { self.view_number } } impl HasViewNumber for QuorumProposal { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::View { self.view_number } } impl HasViewNumber for UpgradeProposal { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::View { self.view_number } } @@ -430,7 +465,7 @@ pub trait TestableLeaf { #[serde(bound(deserialize = ""))] pub struct Leaf { /// CurView from leader when proposing leaf - view_number: TYPES::Time, + view_number: TYPES::View, /// Per spec, justification justify_qc: QuorumCertificate, @@ -503,7 +538,7 @@ impl QuorumCertificate { // since this is genesis, we should never have a decided upgrade certificate. let upgrade_lock = UpgradeLock::::new(); - let genesis_view = ::genesis(); + let genesis_view = ::genesis(); let data = QuorumData { leaf_commit: Leaf::genesis(validated_state, instance_state) @@ -563,13 +598,13 @@ impl Leaf { let justify_qc = QuorumCertificate::new( null_quorum_data.clone(), null_quorum_data.commit(), - ::genesis(), + ::genesis(), None, PhantomData, ); Self { - view_number: TYPES::Time::genesis(), + view_number: TYPES::View::genesis(), justify_qc, parent_commitment: null_quorum_data.leaf_commit, upgrade_certificate: None, @@ -579,7 +614,7 @@ impl Leaf { } /// Time when this leaf was created. - pub fn view_number(&self) -> TYPES::Time { + pub fn view_number(&self) -> TYPES::View { self.view_number } /// Height of this leaf in the chain. @@ -866,7 +901,7 @@ pub struct PackedBundle { pub metadata: >::Metadata, /// The view number that this block is associated with. - pub view_number: TYPES::Time, + pub view_number: TYPES::View, /// The sequencing fee for submitting bundles. pub sequencing_fees: Vec1>, @@ -883,7 +918,7 @@ impl PackedBundle { pub fn new( encoded_transactions: Arc<[u8]>, metadata: >::Metadata, - view_number: TYPES::Time, + view_number: TYPES::View, sequencing_fees: Vec1>, vid_precompute: Option, auction_result: Option, diff --git a/types/src/error.rs b/types/src/error.rs index f76c46906f..80c1baae8d 100644 --- a/types/src/error.rs +++ b/types/src/error.rs @@ -41,7 +41,7 @@ pub enum HotShotError { #[error("View {view_number} timed out: {state:?}")] ViewTimedOut { /// The view number that timed out - view_number: TYPES::Time, + view_number: TYPES::View, /// The state that the round was in when it timed out state: RoundTimedoutState, }, diff --git a/types/src/event.rs b/types/src/event.rs index e71d0c8196..ca833c9f5d 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -25,7 +25,7 @@ use crate::{ #[serde(bound(deserialize = "TYPES: NodeType"))] pub struct Event { /// The view number that this event originates from - pub view_number: TYPES::Time, + pub view_number: TYPES::View, /// The underlying event pub event: EventType, } @@ -128,17 +128,17 @@ pub enum EventType { /// A replica task was canceled by a timeout interrupt ReplicaViewTimeout { /// The view that timed out - view_number: TYPES::Time, + view_number: TYPES::View, }, /// The view has finished. If values were decided on, a `Decide` event will also be emitted. ViewFinished { /// The view number that has just finished - view_number: TYPES::Time, + view_number: TYPES::View, }, /// The view timed out ViewTimeout { /// The view that timed out - view_number: TYPES::Time, + view_number: TYPES::View, }, /// New transactions were received from the network /// or submitted to the network by us diff --git a/types/src/hotshot_config_file.rs b/types/src/hotshot_config_file.rs index 37457b8d28..08b81af3e4 100644 --- a/types/src/hotshot_config_file.rs +++ b/types/src/hotshot_config_file.rs @@ -65,6 +65,8 @@ pub struct HotShotConfigFile { pub builder_urls: Vec1, /// Upgrade config pub upgrade: UpgradeConfig, + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl From> for HotShotConfig { @@ -98,6 +100,7 @@ impl From> for HotShotConfig { stop_proposing_time: val.upgrade.stop_proposing_time, start_voting_time: val.upgrade.start_voting_time, stop_voting_time: val.upgrade.stop_voting_time, + epoch_height: val.epoch_height, } } } @@ -147,6 +150,7 @@ impl HotShotConfigFile { data_request_delay: Some(Duration::from_millis(REQUEST_DATA_DELAY)), builder_urls: default_builder_urls(), upgrade: UpgradeConfig::default(), + epoch_height: 0, } } } diff --git a/types/src/lib.rs b/types/src/lib.rs index 806667bf9b..897fef5c73 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -232,6 +232,8 @@ pub struct HotShotConfig { pub start_voting_time: u64, /// Unix time in seconds at which we stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_time <= start_voting_time. pub stop_voting_time: u64, + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl HotShotConfig { diff --git a/types/src/message.rs b/types/src/message.rs index ab72986652..42ae087b9a 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -67,7 +67,7 @@ impl fmt::Debug for Message { impl HasViewNumber for Message { /// get the view number out of a message - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::View { self.kind.view_number() } } @@ -147,16 +147,16 @@ impl From> for MessageKind { } impl ViewMessage for MessageKind { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::View { match &self { MessageKind::Consensus(message) => message.view_number(), MessageKind::Data(DataMessage::SubmitTransaction(_, v)) => *v, MessageKind::Data(DataMessage::RequestData(msg)) => msg.view, MessageKind::Data(DataMessage::DataResponse(msg)) => match msg { ResponseMessage::Found(m) => m.view_number(), - ResponseMessage::NotFound | ResponseMessage::Denied => TYPES::Time::new(1), + ResponseMessage::NotFound | ResponseMessage::Denied => TYPES::View::new(1), }, - MessageKind::External(_) => TYPES::Time::new(1), + MessageKind::External(_) => TYPES::View::new(1), } } @@ -248,7 +248,7 @@ pub enum SequencingMessage { impl SequencingMessage { /// Get the view number this message relates to - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::View { match &self { SequencingMessage::General(general_message) => { match general_message { @@ -342,7 +342,7 @@ pub enum DataMessage { /// Contains a transaction to be submitted /// TODO rethink this when we start to send these messages /// we only need the view number for broadcast - SubmitTransaction(TYPES::Transaction, TYPES::Time), + SubmitTransaction(TYPES::Transaction, TYPES::View), /// A request for data RequestData(DataRequest), /// A response to a data request @@ -373,10 +373,11 @@ where pub async fn validate_signature( &self, quorum_membership: &TYPES::Membership, + epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> Result<()> { let view_number = self.data.view_number(); - let view_leader_key = quorum_membership.leader(view_number); + let view_leader_key = quorum_membership.leader(view_number, epoch); let proposed_leaf = Leaf::from_quorum_proposal(&self.data); ensure!( @@ -424,7 +425,7 @@ impl UpgradeLock { /// /// # Errors /// Returns an error if we do not support the version required by the decided upgrade certificate. - pub async fn version(&self, view: TYPES::Time) -> Result { + pub async fn version(&self, view: TYPES::View) -> Result { let upgrade_certificate = self.decided_upgrade_certificate.read().await; let version = match *upgrade_certificate { @@ -448,7 +449,7 @@ impl UpgradeLock { /// Calculate the version applied in a view, based on the provided upgrade lock. /// /// This function does not fail, since it does not check that the version is supported. - pub async fn version_infallible(&self, view: TYPES::Time) -> Version { + pub async fn version_infallible(&self, view: TYPES::View) -> Version { let upgrade_certificate = self.decided_upgrade_certificate.read().await; match *upgrade_certificate { diff --git a/types/src/request_response.rs b/types/src/request_response.rs index d27cc27313..6829d19743 100644 --- a/types/src/request_response.rs +++ b/types/src/request_response.rs @@ -16,7 +16,7 @@ use crate::traits::{node_implementation::NodeType, signature_key::SignatureKey}; /// A signed request for a proposal. pub struct ProposalRequestPayload { /// The view number that we're requesting a proposal for. - pub view_number: TYPES::Time, + pub view_number: TYPES::View, /// Our public key. The ensures that the receipient can reply to /// us directly. diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 6e9e7bbbfb..bbdc88eb05 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -78,7 +78,7 @@ pub struct SimpleCertificate, /// Which view this QC relates to - pub view_number: TYPES::Time, + pub view_number: TYPES::View, /// assembled signature for certificate aggregation pub signatures: Option<::QcType>, /// phantom data for `THRESHOLD` and `TYPES` @@ -92,7 +92,7 @@ impl> pub fn new( data: VOTEABLE, vote_commitment: Commitment, - view_number: TYPES::Time, + view_number: TYPES::View, signatures: Option<::QcType>, pd: PhantomData<(TYPES, THRESHOLD)>, ) -> Self { @@ -133,7 +133,7 @@ impl> vote_commitment: Commitment>, data: Self::Voteable, sig: ::QcType, - view: TYPES::Time, + view: TYPES::View, ) -> Self { let vote_commitment_bytes: [u8; 32] = vote_commitment.into(); @@ -148,13 +148,14 @@ impl> async fn is_valid_cert, V: Versions>( &self, membership: &MEMBERSHIP, + epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> bool { - if self.view_number == TYPES::Time::genesis() { + if self.view_number == TYPES::View::genesis() { return true; } let real_qc_pp = ::public_parameter( - membership.stake_table(), + membership.stake_table(epoch), U256::from(Self::threshold(membership)), ); let Ok(commit) = self.data_commitment(upgrade_lock).await else { @@ -187,7 +188,7 @@ impl> impl> HasViewNumber for SimpleCertificate { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::View { self.view_number } } @@ -205,7 +206,7 @@ impl UpgradeCertificate { /// Returns an error when the certificate is no longer relevant pub async fn is_relevant( &self, - view_number: TYPES::Time, + view_number: TYPES::View, decided_upgrade_certificate: Arc>>, ) -> Result<()> { let decided_upgrade_certificate_read = decided_upgrade_certificate.read().await; @@ -226,11 +227,13 @@ impl UpgradeCertificate { pub async fn validate( upgrade_certificate: &Option, quorum_membership: &TYPES::Membership, + epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> Result<()> { if let Some(ref cert) = upgrade_certificate { ensure!( - cert.is_valid_cert(quorum_membership, upgrade_lock).await, + cert.is_valid_cert(quorum_membership, epoch, upgrade_lock) + .await, "Invalid upgrade certificate." ); Ok(()) @@ -241,7 +244,7 @@ impl UpgradeCertificate { /// Given an upgrade certificate and a view, tests whether the view is in the period /// where we are upgrading, which requires that we propose with null blocks. - pub fn upgrading_in(&self, view: TYPES::Time) -> bool { + pub fn upgrading_in(&self, view: TYPES::View) -> bool { view > self.data.old_version_last_view && view < self.data.new_version_first_view } } diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 45e131f756..1c1fcba31b 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -41,7 +41,7 @@ pub struct DaData { /// Data used for a timeout vote. pub struct TimeoutData { /// View the timeout is for - pub view: TYPES::Time, + pub view: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a VID vote. @@ -55,7 +55,7 @@ pub struct ViewSyncPreCommitData { /// The relay this vote is intended for pub relay: u64, /// The view number we are trying to sync on - pub round: TYPES::Time, + pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Commit vote. @@ -63,7 +63,7 @@ pub struct ViewSyncCommitData { /// The relay this vote is intended for pub relay: u64, /// The view number we are trying to sync on - pub round: TYPES::Time, + pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Finalize vote. @@ -71,7 +71,7 @@ pub struct ViewSyncFinalizeData { /// The relay this vote is intended for pub relay: u64, /// The view number we are trying to sync on - pub round: TYPES::Time, + pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Upgrade vote. @@ -82,13 +82,13 @@ pub struct UpgradeProposalData { pub new_version: Version, /// The last view in which we are allowed to reach a decide on this upgrade. /// If it is not decided by that view, we discard it. - pub decide_by: TYPES::Time, + pub decide_by: TYPES::View, /// A unique identifier for the specific protocol being voted on. pub new_version_hash: Vec, /// The last block for which the old version will be in effect. - pub old_version_last_view: TYPES::Time, + pub old_version_last_view: TYPES::View, /// The first block for which the new version will be in effect. - pub new_version_first_view: TYPES::Time, + pub new_version_first_view: TYPES::View, } /// Marker trait for data or commitments that can be voted on. @@ -123,11 +123,11 @@ pub struct SimpleVote { /// The leaf commitment being voted on. pub data: DATA, /// The view this vote was cast for - pub view_number: TYPES::Time, + pub view_number: TYPES::View, } impl HasViewNumber for SimpleVote { - fn view_number(&self) -> ::Time { + fn view_number(&self) -> ::View { self.view_number } } @@ -158,7 +158,7 @@ impl SimpleVote { /// If we are unable to sign the data pub async fn create_signed_vote( data: DATA, - view: TYPES::Time, + view: TYPES::View, pub_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, @@ -187,7 +187,7 @@ pub struct VersionedVoteData { data: DATA, /// view number - view: TYPES::Time, + view: TYPES::View, /// version applied to the view number version: Version, @@ -204,7 +204,7 @@ impl VersionedVoteData, ) -> Result { let version = upgrade_lock.version(view).await?; @@ -222,7 +222,7 @@ impl VersionedVoteData, ) -> Self { let version = upgrade_lock.version_infallible(view).await; @@ -303,7 +303,7 @@ impl Committable for UpgradeProposalData { /// This implements commit for all the types which contain a view and relay public key. fn view_and_relay_commit( - view: TYPES::Time, + view: TYPES::View, relay: u64, tag: &str, ) -> Commitment { diff --git a/types/src/traits/auction_results_provider.rs b/types/src/traits/auction_results_provider.rs index 283ada9d68..7fcd8498e4 100644 --- a/types/src/traits/auction_results_provider.rs +++ b/types/src/traits/auction_results_provider.rs @@ -20,5 +20,5 @@ use super::node_implementation::NodeType; pub trait AuctionResultsProvider: Send + Sync + Clone { /// Fetches the auction result for a view. Does not cache the result, /// subsequent calls will invoke additional wasted calls. - async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result; + async fn fetch_auction_result(&self, view_number: TYPES::View) -> Result; } diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index b03bf8a843..43d0ebf12f 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -23,33 +23,45 @@ pub trait Membership: committee_topic: Topic, ) -> Self; - /// Get all participants in the committee (including their stake) - fn stake_table(&self) -> Vec<::StakeTableEntry>; + /// Get all participants in the committee (including their stake) for a specific epoch + fn stake_table( + &self, + epoch: TYPES::Epoch, + ) -> Vec<::StakeTableEntry>; - /// Get all participants in the committee for a specific view - fn committee_members(&self, view_number: TYPES::Time) -> BTreeSet; + /// Get all participants in the committee for a specific view for a specific epoch + fn committee_members( + &self, + view_number: TYPES::View, + epoch: TYPES::Epoch, + ) -> BTreeSet; - /// Get all leaders in the committee for a specific view - fn committee_leaders(&self, view_number: TYPES::Time) -> BTreeSet; + /// Get all leaders in the committee for a specific view for a specific epoch + fn committee_leaders( + &self, + view_number: TYPES::View, + epoch: TYPES::Epoch, + ) -> BTreeSet; /// Get the stake table entry for a public key, returns `None` if the - /// key is not in the table + /// key is not in the table for a specific epoch fn stake( &self, pub_key: &TYPES::SignatureKey, + epoch: TYPES::Epoch, ) -> Option<::StakeTableEntry>; - /// See if a node has stake in the committee - fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; + /// See if a node has stake in the committee in a specific epoch + fn has_stake(&self, pub_key: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool; - /// The leader of the committee for view `view_number`. - fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey; + /// The leader of the committee for view `view_number` in an epoch `epoch`. + fn leader(&self, view_number: TYPES::View, epoch: TYPES::Epoch) -> TYPES::SignatureKey; /// Get the network topic for the committee fn committee_topic(&self) -> Topic; - /// Returns the number of total nodes in the committee - fn total_nodes(&self) -> usize; + /// Returns the number of total nodes in the committee in an epoch `epoch` + fn total_nodes(&self, epoch: TYPES::Epoch) -> usize; /// Returns the threshold for a specific `Membership` implementation fn success_threshold(&self) -> NonZeroU64; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 97851e6963..27fe0ec3c9 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -126,7 +126,7 @@ pub trait Id: Eq + PartialEq + Hash {} /// a message pub trait ViewMessage { /// get the view out of the message - fn view_number(&self) -> TYPES::Time; + fn view_number(&self) -> TYPES::View; // TODO move out of this trait. /// get the purpose of the message fn purpose(&self) -> MessagePurpose; @@ -139,7 +139,7 @@ pub struct DataRequest { /// Request pub request: RequestKind, /// View this message is for - pub view: TYPES::Time, + pub view: TYPES::View, /// signature of the Sha256 hash of the data so outsiders can't use know /// public keys with stake. pub signature: ::PureAssembledSignatureType, @@ -149,11 +149,11 @@ pub struct DataRequest { #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] pub enum RequestKind { /// Request VID data by our key and the VID commitment - Vid(TYPES::Time, TYPES::SignatureKey), + Vid(TYPES::View, TYPES::SignatureKey), /// Request a DA proposal for a certain view - DaProposal(TYPES::Time), + DaProposal(TYPES::View), /// Request for quorum proposal for a view - Proposal(TYPES::Time), + Proposal(TYPES::View), } /// A response for a request. `SequencingMessage` is the same as other network messages @@ -271,8 +271,12 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st /// Update view can be used for any reason, but mostly it's for canceling tasks, /// and looking up the address of the leader of a future view. - async fn update_view<'a, TYPES>(&'a self, _view: u64, _membership: &TYPES::Membership) - where + async fn update_view<'a, TYPES>( + &'a self, + _view: u64, + _epoch: u64, + _membership: &TYPES::Membership, + ) where TYPES: NodeType + 'a, { } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 060df7adf3..c84031218c 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -210,7 +210,9 @@ pub trait NodeType: /// The time type that this hotshot setup is using. /// /// This should be the same `Time` that `ValidatedState::Time` is using. - type Time: ConsensusTime + Display; + type View: ConsensusTime + Display; + /// Same as above but for epoch. + type Epoch: ConsensusTime + Display; /// The AuctionSolverResult is a type that holds the data associated with a particular solver /// run, for a particular view. type AuctionResult: Debug @@ -244,7 +246,7 @@ pub trait NodeType: type InstanceState: InstanceState; /// The validated state type that this hotshot setup is using. - type ValidatedState: ValidatedState; + type ValidatedState: ValidatedState; /// Membership used for this implementation type Membership: Membership; diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index c11ba771b3..d400ce455b 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -36,7 +36,7 @@ pub trait Storage: Send + Sync + Clone { proposal: &Proposal>, ) -> Result<()>; /// Record a HotShotAction taken. - async fn record_action(&self, view: TYPES::Time, action: HotShotAction) -> Result<()>; + async fn record_action(&self, view: TYPES::View, action: HotShotAction) -> Result<()>; /// Update the current high QC in storage. async fn update_high_qc(&self, high_qc: QuorumCertificate) -> Result<()>; /// Update the currently undecided state of consensus. This includes the undecided leaf chain, @@ -44,7 +44,7 @@ pub trait Storage: Send + Sync + Clone { async fn update_undecided_state( &self, leafs: CommitmentMap>, - state: BTreeMap>, + state: BTreeMap>, ) -> Result<()>; /// Upgrade the current decided upgrade certificate in storage. async fn update_decided_upgrade_certificate( diff --git a/types/src/utils.rs b/types/src/utils.rs index 8795ab028a..e3d19a8286 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -150,7 +150,7 @@ pub struct View { #[derive(Debug, Clone)] pub struct RoundFinishedEvent { /// The round that finished - pub view_number: TYPES::Time, + pub view_number: TYPES::View, } /// Whether or not to stop inclusively or exclusively when walking diff --git a/types/src/vote.rs b/types/src/vote.rs index 1376aa0d03..882512eae9 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -48,7 +48,7 @@ pub trait Vote: HasViewNumber { /// Any type that is associated with a view pub trait HasViewNumber { /// Returns the view number the type refers to. - fn view_number(&self) -> TYPES::Time; + fn view_number(&self) -> TYPES::View; } /** @@ -68,13 +68,14 @@ pub trait Certificate: HasViewNumber { vote_commitment: Commitment>, data: Self::Voteable, sig: ::QcType, - view: TYPES::Time, + view: TYPES::View, ) -> Self; - /// Checks if the cert is valid + /// Checks if the cert is valid in the given epoch fn is_valid_cert, V: Versions>( &self, membership: &MEMBERSHIP, + epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> impl std::future::Future; /// Returns the amount of stake needed to create this certificate @@ -130,12 +131,14 @@ impl< V: Versions, > VoteAccumulator { - /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we + /// Add a vote to the total accumulated votes for the given epoch. + /// Returns the accumulator or the certificate if we /// have accumulated enough votes to exceed the threshold for creating a certificate. pub async fn accumulate( &mut self, vote: &VOTE, membership: &TYPES::Membership, + epoch: TYPES::Epoch, ) -> Either<(), CERT> { let key = vote.signing_key(); @@ -158,10 +161,10 @@ impl< return Either::Left(()); } - let Some(stake_table_entry) = membership.stake(&key) else { + let Some(stake_table_entry) = membership.stake(&key, epoch) else { return Either::Left(()); }; - let stake_table = membership.stake_table(); + let stake_table = membership.stake_table(epoch); let Some(vote_node_id) = stake_table .iter() .position(|x| *x == stake_table_entry.clone()) @@ -184,7 +187,7 @@ impl< let (signers, sig_list) = self .signers .entry(vote_commitment) - .or_insert((bitvec![0; membership.total_nodes()], Vec::new())); + .or_insert((bitvec![0; membership.total_nodes(epoch)], Vec::new())); if signers.get(vote_node_id).as_deref() == Some(&true) { error!("Node id is already in signers list"); return Either::Left(()); From b469dfed937a62619ab734215607908cd7432005 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 21 Oct 2024 10:46:10 -0400 Subject: [PATCH 1255/1393] memoize vid_scheme (#3772) --- types/src/vid.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/types/src/vid.rs b/types/src/vid.rs index 5c2fd2cead..7d3bfb21a7 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -14,6 +14,7 @@ //! This crate and all downstream crates should talk to the VID scheme only //! via the traits exposed here. +#![allow(missing_docs)] use std::{fmt::Debug, ops::Range}; use ark_bn254::Bn254; @@ -59,6 +60,7 @@ use crate::{ /// # Panics /// When the construction fails for the underlying VID scheme. #[must_use] +#[memoize::memoize(SharedCache, Capacity: 10)] pub fn vid_scheme(num_storage_nodes: usize) -> VidSchemeType { // recovery_threshold is currently num_storage_nodes rounded down to a power of two // TODO recovery_threshold should be a function of the desired erasure code rate @@ -85,6 +87,7 @@ pub fn vid_scheme(num_storage_nodes: usize) -> VidSchemeType { /// Similar to [`vid_scheme()`], but with `KZG_SRS_TEST` for testing purpose only. #[cfg(feature = "test-srs")] +#[memoize::memoize(SharedCache, Capacity: 10)] pub fn vid_scheme_for_test(num_storage_nodes: usize) -> VidSchemeType { let recovery_threshold = 1 << num_storage_nodes.ilog2(); #[allow(clippy::panic)] @@ -123,6 +126,7 @@ type Advz = advz::AdvzGPU<'static, E, H>; /// Newtype wrapper for a VID scheme type that impls /// [`VidScheme`], [`PayloadProver`], [`Precomputable`]. +#[derive(Clone)] pub struct VidSchemeType(Advz); /// Newtype wrapper for a large payload range proof. From 24732444bfdef4685ac1ee8ed7d94437deac73d7 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 21 Oct 2024 16:33:55 -0400 Subject: [PATCH 1256/1393] Libp2p and overflow fixes (#3781) * libp2p & overflow fix * metrics for queue len, and make inactive receiver * cancel vote deps on timeout * Bf/fix patch (#3780) * add the queue len task * spawn task, clear on view change * build errors --------- Co-authored-by: Brendon Fish --- hotshot/src/lib.rs | 8 ++- hotshot/src/tasks/mod.rs | 28 ++++++++++- hotshot/src/tasks/task_state.rs | 2 +- libp2p-networking/src/network/node.rs | 14 ++++++ libp2p-networking/src/network/node/config.rs | 50 +++++++++++++++++++ task-impls/src/quorum_vote/mod.rs | 33 +++++++++--- .../tests/tests_1/vote_dependency_handle.rs | 2 +- types/src/consensus.rs | 4 ++ 8 files changed, 127 insertions(+), 14 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index bc2fbf8128..8c91554e4d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -252,15 +252,19 @@ impl, V: Versions> SystemContext::from_certificate(&initializer.decided_upgrade_certificate); - // Allow overflow on the channel, otherwise sending to it may block. + // Allow overflow on the external channel, otherwise sending to it may block. external_rx.set_overflow(true); + // Allow overflow on the internal channel as well. We don't want to block consensus if we + // have a slow receiver + internal_rx.set_overflow(true); + // Get the validated state from the initializer or construct an incomplete one from the // block header. let validated_state = match initializer.validated_state { diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 0f89203f45..33341f9013 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -8,9 +8,10 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, sync::Arc, time::Duration}; use async_broadcast::{broadcast, RecvError}; +use async_compatibility_layer::art::async_sleep; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; @@ -94,6 +95,29 @@ pub fn add_response_task, V: Versi )); } +/// Add a task which updates our queue lenght metric at a set interval +pub fn add_queue_len_task, V: Versions>( + handle: &mut SystemContextHandle, +) { + let consensus = handle.hotshot.consensus(); + let rx = handle.internal_event_stream.1.clone(); + let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); + let task_handle = async_spawn(async move { + futures::pin_mut!(shutdown_signal); + loop { + futures::select! { + () = shutdown_signal => { + return; + }, + () = async_sleep(Duration::from_millis(500)).fuse() => { + consensus.read().await.metrics.internal_event_queue_len.set(rx.len()); + } + } + } + }); + handle.network_registry.register(task_handle); +} + /// Add the network task to handle messages and publish events. pub fn add_network_message_task< TYPES: NodeType, @@ -228,7 +252,7 @@ pub async fn add_consensus_tasks, handle.add_task(QuorumProposalRecvTaskState::::create_from(handle).await); handle.add_task(ConsensusTaskState::::create_from(handle).await); } - + add_queue_len_task(handle); #[cfg(feature = "rewind")] handle.add_task(RewindTaskState::::create_from(&handle).await); } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index d23799cc59..625b0ff7cd 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -233,7 +233,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(consensus), instance_state: handle.hotshot.instance_state(), latest_voted_view: handle.cur_view().await, - vote_dependencies: HashMap::new(), + vote_dependencies: BTreeMap::new(), network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), da_membership: handle.hotshot.memberships.da_membership.clone().into(), diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 9dde578f58..08e896d785 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -205,6 +205,20 @@ impl NetworkNode { .mesh_n_low(config.gossip_config.mesh_n_low) // Lower limit of mesh peers .mesh_outbound_min(config.gossip_config.mesh_outbound_min) // Minimum number of outbound peers in mesh .max_transmit_size(config.gossip_config.max_transmit_size) // Maximum size of a message + .max_ihave_length(config.gossip_config.max_ihave_length) // Maximum number of messages to include in an IHAVE message + .max_ihave_messages(config.gossip_config.max_ihave_messages) // Maximum number of IHAVE messages to accept from a peer within a heartbeat + .published_message_ids_cache_time( + config.gossip_config.published_message_ids_cache_time, + ) // Cache duration for published message IDs + .iwant_followup_time(config.gossip_config.iwant_followup_time) // Time to wait for a message requested through IWANT following an IHAVE advertisement + .max_messages_per_rpc(config.gossip_config.max_messages_per_rpc) // The maximum number of messages we will process in a given RPC + .gossip_retransimission(config.gossip_config.gossip_retransmission) // Controls how many times we will allow a peer to request the same message id through IWANT gossip before we start ignoring them. + .flood_publish(config.gossip_config.flood_publish) // If enabled newly created messages will always be sent to all peers that are subscribed to the topic and have a good enough score. + .duplicate_cache_time(config.gossip_config.duplicate_cache_time) // The time period that messages are stored in the cache + .fanout_ttl(config.gossip_config.fanout_ttl) // Time to live for fanout peers + .heartbeat_initial_delay(config.gossip_config.heartbeat_initial_delay) // Initial delay in each heartbeat + .gossip_factor(config.gossip_config.gossip_factor) // Affects how many peers we will emit gossip to at each heartbeat + .gossip_lazy(config.gossip_config.gossip_lazy) // Minimum number of peers to emit gossip to during a heartbeat .build() .map_err(|err| { NetworkError::ConfigError(format!("error building gossipsub config: {err:?}")) diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index e7eb748342..c51fc281ea 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -59,6 +59,7 @@ pub struct NetworkNodeConfig { /// Configuration for Libp2p's Gossipsub #[derive(Clone, Debug)] +#[allow(missing_docs)] pub struct GossipConfig { /// The heartbeat interval pub heartbeat_interval: Duration, @@ -79,6 +80,42 @@ pub struct GossipConfig { /// The maximum gossip message size pub max_transmit_size: usize, + + /// The maximum number of messages in an IHAVE message + pub max_ihave_length: usize, + + /// Maximum number of IHAVE messages to accept from a peer within a heartbeat + pub max_ihave_messages: usize, + + /// Cache duration for published message IDs + pub published_message_ids_cache_time: Duration, + + /// Time to wait for a message requested through IWANT following an IHAVE advertisement + pub iwant_followup_time: Duration, + + /// The maximum number of messages we will process in a given RPC + pub max_messages_per_rpc: Option, + + /// Controls how many times we will allow a peer to request the same message id through IWANT gossip before we start ignoring them. + pub gossip_retransmission: u32, + + /// If enabled newly created messages will always be sent to all peers that are subscribed to the topic and have a good enough score. + pub flood_publish: bool, + + /// The time period that messages are stored in the cache + pub duplicate_cache_time: Duration, + + /// Time to live for fanout peers + pub fanout_ttl: Duration, + + /// Initial delay in each heartbeat + pub heartbeat_initial_delay: Duration, + + /// Affects how many peers we will emit gossip to at each heartbeat + pub gossip_factor: f64, + + /// Minimum number of peers to emit gossip to during a heartbeat + pub gossip_lazy: usize, } impl Default for GossipConfig { @@ -97,6 +134,19 @@ impl Default for GossipConfig { mesh_n_low: 6, // The minimum number of peers in the mesh mesh_outbound_min: 2, // The minimum number of mesh peers that must be outbound + max_ihave_length: 5000, + max_ihave_messages: 10, + published_message_ids_cache_time: Duration::from_secs(60 * 20), // 20 minutes + iwant_followup_time: Duration::from_secs(3), + max_messages_per_rpc: None, + gossip_retransmission: 3, + flood_publish: true, + duplicate_cache_time: Duration::from_secs(60), + fanout_ttl: Duration::from_secs(60), + heartbeat_initial_delay: Duration::from_secs(5), + gossip_factor: 0.25, + gossip_lazy: 6, + max_transmit_size: MAX_GOSSIP_MSG_SIZE, // The maximum gossip message size } } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 909fcf2db2..7d0fa4833b 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -4,10 +4,10 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashMap, sync::Arc}; +use std::{collections::BTreeMap, sync::Arc}; use anyhow::{bail, ensure, Context, Result}; -use async_broadcast::{Receiver, Sender}; +use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; @@ -81,7 +81,7 @@ pub struct VoteDependencyHandle, V /// Event sender. pub sender: Sender>>, /// Event receiver. - pub receiver: Receiver>>, + pub receiver: InactiveReceiver>>, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, /// The node's id @@ -113,7 +113,7 @@ impl + 'static, V: Versions> None => fetch_proposal( justify_qc.view_number(), self.sender.clone(), - self.receiver.clone(), + self.receiver.activate_cloned(), Arc::clone(&self.quorum_membership), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), self.public_key.clone(), @@ -259,7 +259,7 @@ impl + 'static, V: Versions> Handl { // Block on receiving the event from the event stream. EventDependency::new( - self.receiver.clone(), + self.receiver.activate_cloned(), Box::new(move |event| { let event = event.as_ref(); if let HotShotEvent::ValidatedStateUpdated(view_number, _) = event { @@ -379,7 +379,7 @@ pub struct QuorumVoteTaskState, V: pub latest_voted_view: TYPES::View, /// Table for the in-progress dependency tasks. - pub vote_dependencies: HashMap>, + pub vote_dependencies: BTreeMap>, /// The underlying network pub network: Arc, @@ -499,7 +499,7 @@ impl, V: Versions> QuorumVoteTaskS view_number, epoch_number, sender: event_sender.clone(), - receiver: event_receiver.clone(), + receiver: event_receiver.clone().deactivate(), upgrade_lock: self.upgrade_lock.clone(), id: self.id, }, @@ -678,6 +678,23 @@ impl, V: Versions> QuorumVoteTaskS return; } } + HotShotEvent::Timeout(view) => { + // cancel old tasks + let current_tasks = self.vote_dependencies.split_off(view); + while let Some((_, task)) = self.vote_dependencies.pop_last() { + cancel_task(task).await; + } + self.vote_dependencies = current_tasks; + } + HotShotEvent::ViewChange(mut view) => { + view = TYPES::View::new(view.saturating_sub(1)); + // cancel old tasks + let current_tasks = self.vote_dependencies.split_off(&view); + while let Some((_, task)) = self.vote_dependencies.pop_last() { + cancel_task(task).await; + } + self.vote_dependencies = current_tasks; + } _ => {} } } @@ -701,7 +718,7 @@ impl, V: Versions> TaskState } async fn cancel_subtasks(&mut self) { - for handle in self.vote_dependencies.drain().map(|(_view, handle)| handle) { + while let Some((_, handle)) = self.vote_dependencies.pop_last() { #[cfg(async_executor_impl = "async-std")] handle.cancel().await; #[cfg(async_executor_impl = "tokio")] diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 6fe2979420..1a8519ce2c 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -99,7 +99,7 @@ async fn test_vote_dependency_handle() { view_number, epoch_number: EpochNumber::new(1), sender: event_sender.clone(), - receiver: event_receiver.clone(), + receiver: event_receiver.clone().deactivate(), upgrade_lock: handle.hotshot.upgrade_lock.clone(), id: handle.hotshot.id, }; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a351a09b96..b84ab99b19 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -348,6 +348,8 @@ pub struct ConsensusMetricsValue { pub number_of_timeouts_as_leader: Box, /// The number of empty blocks that have been proposed pub number_of_empty_blocks_proposed: Box, + /// Number of events in the hotshot event queue + pub internal_event_queue_len: Box, } impl ConsensusMetricsValue { @@ -376,6 +378,8 @@ impl ConsensusMetricsValue { .create_counter(String::from("number_of_timeouts_as_leader"), None), number_of_empty_blocks_proposed: metrics .create_counter(String::from("number_of_empty_blocks_proposed"), None), + internal_event_queue_len: metrics + .create_gauge(String::from("internal_event_queue_len"), None), } } } From 0dbbff739a82067b35a4f646a7c9419e55cf1f65 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Tue, 22 Oct 2024 20:13:54 +0200 Subject: [PATCH 1257/1393] Remove unused code in `parent_leaf_and_state` (#3785) --- task-impls/src/helpers.rs | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index cd4bf5b3cd..268b43b2d3 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -415,24 +415,7 @@ pub(crate) async fn parent_leaf_and_state( .get(&leaf_commitment) .context("Failed to find high QC of parent")?; - let reached_decided = leaf.view_number() == consensus_reader.last_decided_view(); - let parent_leaf = leaf.clone(); - let original_parent_hash = parent_leaf.commit(upgrade_lock).await; - let mut next_parent_hash = original_parent_hash; - - // Walk back until we find a decide - if !reached_decided { - debug!("We have not reached decide"); - while let Some(next_parent_leaf) = consensus_reader.saved_leaves().get(&next_parent_hash) { - if next_parent_leaf.view_number() <= consensus_reader.last_decided_view() { - break; - } - next_parent_hash = next_parent_leaf.parent_commitment(); - } - // TODO do some sort of sanity check on the view number that it matches decided - } - - Ok((parent_leaf, Arc::clone(state))) + Ok((leaf.clone(), Arc::clone(state))) } /// Validate the state and safety and liveness of a proposal then emit From 96496dc10075ee8e74c879e56e8364ecd4412eea Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 22 Oct 2024 16:38:16 -0400 Subject: [PATCH 1258/1393] Revert "gate vote commitment again (#3732)" (#3786) --- .../src/network/behaviours/dht/store.rs | 12 ++---------- types/src/simple_vote.rs | 16 +++++----------- 2 files changed, 7 insertions(+), 21 deletions(-) diff --git a/libp2p-networking/src/network/behaviours/dht/store.rs b/libp2p-networking/src/network/behaviours/dht/store.rs index cf5c22d61e..6969ced1ff 100644 --- a/libp2p-networking/src/network/behaviours/dht/store.rs +++ b/libp2p-networking/src/network/behaviours/dht/store.rs @@ -37,16 +37,8 @@ impl RecordStore for ValidatedStore where K: 'static, { - type ProvidedIter<'a> - = R::ProvidedIter<'a> - where - R: 'a, - K: 'a; - type RecordsIter<'a> - = R::RecordsIter<'a> - where - R: 'a, - K: 'a; + type ProvidedIter<'a> = R::ProvidedIter<'a> where R: 'a, K: 'a; + type RecordsIter<'a> = R::RecordsIter<'a> where R: 'a, K: 'a; // Delegate all `RecordStore` methods except `put` to the inner store delegate! { diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 1c1fcba31b..07c3139eef 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -11,7 +11,7 @@ use std::{fmt::Debug, hash::Hash, marker::PhantomData}; use anyhow::Result; use committable::{Commitment, Committable}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use vbs::version::{StaticVersionType, Version}; +use vbs::version::Version; use crate::{ data::Leaf, @@ -240,16 +240,10 @@ impl Committable for VersionedVoteData { fn commit(&self) -> Commitment { - if self.version < V::Marketplace::VERSION { - let bytes: [u8; 32] = self.data.commit().into(); - - Commitment::::from_raw(bytes) - } else { - committable::RawCommitmentBuilder::new("Vote") - .var_size_bytes(self.data.commit().as_ref()) - .u64(*self.view) - .finalize() - } + committable::RawCommitmentBuilder::new("Vote") + .var_size_bytes(self.data.commit().as_ref()) + .u64(*self.view) + .finalize() } } From c71daae8afa3da644abf042b67ca7e8dab49ae61 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 23 Oct 2024 12:40:09 -0400 Subject: [PATCH 1259/1393] Make `Membership::leader()` return a `Result<_>` (#3738) --- example-types/src/node_types.rs | 16 +- hotshot/Cargo.toml | 1 + hotshot/src/tasks/mod.rs | 3 +- .../traits/election/randomized_committee.rs | 5 +- .../src/traits/election/static_committee.rs | 5 +- .../static_committee_leader_two_views.rs | 6 +- .../src/traits/networking/libp2p_network.rs | 10 +- hotshot/src/types/handle.rs | 8 +- .../src/network/behaviours/dht/store.rs | 12 +- task-impls/Cargo.toml | 1 + task-impls/src/consensus/handlers.rs | 31 +- task-impls/src/consensus/mod.rs | 10 +- task-impls/src/consensus2/handlers.rs | 280 +++++++++++++++++ task-impls/src/da.rs | 170 +++++----- task-impls/src/helpers.rs | 64 ++-- task-impls/src/network.rs | 188 +++++++---- task-impls/src/quorum_proposal/handlers.rs | 21 +- task-impls/src/quorum_proposal/mod.rs | 126 ++++---- .../src/quorum_proposal_recv/handlers.rs | 14 +- task-impls/src/quorum_proposal_recv/mod.rs | 2 +- task-impls/src/quorum_vote/handlers.rs | 8 +- task-impls/src/quorum_vote/mod.rs | 155 ++++----- task-impls/src/request.rs | 2 +- task-impls/src/rewind.rs | 2 +- task-impls/src/transactions.rs | 88 +++--- task-impls/src/upgrade.rs | 127 ++++---- task-impls/src/vid.rs | 2 +- task-impls/src/view_sync.rs | 182 ++++++----- task-impls/src/vote_collection.rs | 180 +++++++---- task/Cargo.toml | 1 + task/src/task.rs | 2 +- testing/src/view_generator.rs | 12 +- testing/tests/tests_1/da_task.rs | 3 +- testing/tests/tests_1/network_task.rs | 3 +- testing/tests/tests_1/quorum_proposal_task.rs | 3 +- testing/tests/tests_1/transaction_task.rs | 3 +- .../tests_1/upgrade_task_with_proposal.rs | 3 +- testing/tests/tests_1/vid_task.rs | 3 +- .../tests/tests_1/vote_dependency_handle.rs | 3 +- testing/tests/tests_3/byzantine_tests.rs | 2 +- testing/tests/tests_3/memory_network.rs | 3 +- types/Cargo.toml | 1 + types/src/consensus.rs | 70 ++--- types/src/data.rs | 50 +-- types/src/message.rs | 14 +- types/src/simple_certificate.rs | 2 +- types/src/simple_vote.rs | 6 +- types/src/traits/election.rs | 9 +- types/src/vote.rs | 2 +- utils/Cargo.toml | 11 + utils/src/anytrace.rs | 191 ++++++++++++ utils/src/anytrace/macros.rs | 293 ++++++++++++++++++ utils/src/lib.rs | 4 + 53 files changed, 1670 insertions(+), 743 deletions(-) create mode 100644 task-impls/src/consensus2/handlers.rs create mode 100644 utils/Cargo.toml create mode 100644 utils/src/anytrace.rs create mode 100644 utils/src/anytrace/macros.rs create mode 100644 utils/src/lib.rs diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 1ab2446c12..8884d3e7ce 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -4,12 +4,6 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use crate::{ - auction_results_provider_types::{TestAuctionResult, TestAuctionResultsProvider}, - block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::{TestInstanceState, TestValidatedState}, - storage_types::TestStorage, -}; use hotshot::traits::{ election::{ randomized_committee::RandomizedCommittee, static_committee::StaticCommittee, @@ -18,15 +12,21 @@ use hotshot::traits::{ implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork}, NodeImplementation, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::ViewNumber, + data::{EpochNumber, ViewNumber}, signature_key::{BLSPubKey, BuilderKey}, traits::node_implementation::{NodeType, Versions}, }; use serde::{Deserialize, Serialize}; use vbs::version::StaticVersion; +use crate::{ + auction_results_provider_types::{TestAuctionResult, TestAuctionResultsProvider}, + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + state_types::{TestInstanceState, TestValidatedState}, + storage_types::TestStorage, +}; + #[derive( Copy, Clone, diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 874e94939d..c3e345f43a 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -58,6 +58,7 @@ sha2 = { workspace = true } url = { workspace = true } num_enum = "0.7" parking_lot = "0.12" +utils = { path = "../utils" } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 33341f9013..cca95d3c76 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -11,8 +11,7 @@ pub mod task_state; use std::{fmt::Debug, sync::Arc, time::Duration}; use async_broadcast::{broadcast, RecvError}; -use async_compatibility_layer::art::async_sleep; -use async_compatibility_layer::art::async_spawn; +use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; use futures::{ diff --git a/hotshot/src/traits/election/randomized_committee.rs b/hotshot/src/traits/election/randomized_committee.rs index 4fed098e9c..d664e2a6e8 100644 --- a/hotshot/src/traits/election/randomized_committee.rs +++ b/hotshot/src/traits/election/randomized_committee.rs @@ -17,6 +17,7 @@ use hotshot_types::{ PeerConfig, }; use rand::{rngs::StdRng, Rng}; +use utils::anytrace::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] @@ -142,7 +143,7 @@ impl Membership for RandomizedCommittee { &self, view_number: TYPES::View, _epoch: ::Epoch, - ) -> TYPES::SignatureKey { + ) -> Result { let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); let randomized_view_number: u64 = rng.gen_range(0..=u64::MAX); @@ -151,7 +152,7 @@ impl Membership for RandomizedCommittee { let res = self.eligible_leaders[index].clone(); - TYPES::SignatureKey::public_key(&res) + Ok(TYPES::SignatureKey::public_key(&res)) } /// Get the total number of nodes in the committee diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 2ef52a66e2..acacc51cb6 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -16,6 +16,7 @@ use hotshot_types::{ }, PeerConfig, }; +use utils::anytrace::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] @@ -140,11 +141,11 @@ impl Membership for StaticCommittee { &self, view_number: TYPES::View, _epoch: ::Epoch, - ) -> TYPES::SignatureKey { + ) -> Result { #[allow(clippy::cast_possible_truncation)] let index = *view_number as usize % self.eligible_leaders.len(); let res = self.eligible_leaders[index].clone(); - TYPES::SignatureKey::public_key(&res) + Ok(TYPES::SignatureKey::public_key(&res)) } /// Get the total number of nodes in the committee diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index db41aad2ab..bb9574e37e 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -16,6 +16,7 @@ use hotshot_types::{ }, PeerConfig, }; +use utils::anytrace::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] @@ -140,11 +141,12 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch, - ) -> TYPES::SignatureKey { + ) -> Result { let index = usize::try_from((*view_number / 2) % self.eligible_leaders.len() as u64).unwrap(); let res = self.eligible_leaders[index].clone(); - TYPES::SignatureKey::public_key(&res) + + Ok(TYPES::SignatureKey::public_key(&res)) } /// Get the total number of nodes in the committee diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index d25fc91762..34e5992f13 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -1025,7 +1025,15 @@ impl ConnectedNetwork for Libp2pNetwork { { let future_view = ::View::new(view) + LOOK_AHEAD; let epoch = ::Epoch::new(epoch); - let future_leader = membership.leader(future_view, epoch); + let future_leader = match membership.leader(future_view, epoch) { + Ok(l) => l, + Err(e) => { + return tracing::info!( + "Failed to calculate leader for view {:?}: {e}", + future_view + ); + } + }; let _ = self .queue_node_lookup(ViewNumber::new(*future_view), future_leader) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index cf6c8ffe02..735c219420 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -8,7 +8,7 @@ use std::sync::Arc; -use anyhow::{anyhow, Ok, Result}; +use anyhow::{anyhow, Context, Ok, Result}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; use committable::{Commitment, Committable}; @@ -315,16 +315,20 @@ impl + 'static, V: Versions> } /// Wrapper for `HotShotConsensusApi`'s `leader` function + /// + /// # Errors + /// Returns an error if the leader cannot be calculated #[allow(clippy::unused_async)] // async for API compatibility reasons pub async fn leader( &self, view_number: TYPES::View, epoch_number: TYPES::Epoch, - ) -> TYPES::SignatureKey { + ) -> Result { self.hotshot .memberships .quorum_membership .leader(view_number, epoch_number) + .context("Failed to lookup leader") } // Below is for testing only: diff --git a/libp2p-networking/src/network/behaviours/dht/store.rs b/libp2p-networking/src/network/behaviours/dht/store.rs index 6969ced1ff..cf5c22d61e 100644 --- a/libp2p-networking/src/network/behaviours/dht/store.rs +++ b/libp2p-networking/src/network/behaviours/dht/store.rs @@ -37,8 +37,16 @@ impl RecordStore for ValidatedStore where K: 'static, { - type ProvidedIter<'a> = R::ProvidedIter<'a> where R: 'a, K: 'a; - type RecordsIter<'a> = R::RecordsIter<'a> where R: 'a, K: 'a; + type ProvidedIter<'a> + = R::ProvidedIter<'a> + where + R: 'a, + K: 'a; + type RecordsIter<'a> + = R::RecordsIter<'a> + where + R: 'a, + K: 'a; // Delegate all `RecordStore` methods except `put` to the inner store delegate! { diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 2492438ea4..c9a67c0c6a 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -38,6 +38,7 @@ tagged-base64 = { workspace = true } time = { workspace = true } tracing = { workspace = true } url = { workspace = true } +utils = { path = "../utils" } vbs = { workspace = true } vec1 = { workspace = true } diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index fec58d8409..b6f2b843d3 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -6,7 +6,6 @@ use std::{sync::Arc, time::Duration}; -use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use chrono::Utc; @@ -19,7 +18,8 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -use tracing::{debug, error, instrument}; +use tracing::instrument; +use utils::anytrace::*; use super::ConsensusTaskState; use crate::{ @@ -44,9 +44,9 @@ pub(crate) async fn handle_quorum_vote_recv< ensure!( task_state .quorum_membership - .leader(vote.view_number() + 1, task_state.cur_epoch) + .leader(vote.view_number() + 1, task_state.cur_epoch)? == task_state.public_key, - format!( + info!( "We are not the leader for view {:?}", vote.view_number() + 1 ) @@ -63,7 +63,7 @@ pub(crate) async fn handle_quorum_vote_recv< sender, &task_state.upgrade_lock, ) - .await; + .await?; Ok(()) } @@ -83,9 +83,9 @@ pub(crate) async fn handle_timeout_vote_recv< ensure!( task_state .timeout_membership - .leader(vote.view_number() + 1, task_state.cur_epoch) + .leader(vote.view_number() + 1, task_state.cur_epoch)? == task_state.public_key, - format!( + info!( "We are not the leader for view {:?}", vote.view_number() + 1 ) @@ -102,7 +102,7 @@ pub(crate) async fn handle_timeout_vote_recv< sender, &task_state.upgrade_lock, ) - .await; + .await?; Ok(()) } @@ -124,7 +124,7 @@ pub(crate) async fn handle_view_change< ); let old_view_number = task_state.cur_view; - debug!("Updating view from {old_view_number:?} to {new_view_number:?}"); + tracing::debug!("Updating view from {old_view_number:?} to {new_view_number:?}"); // Move this node to the next view task_state.cur_view = new_view_number; @@ -138,7 +138,7 @@ pub(crate) async fn handle_view_change< .clone(); if let Some(cert) = decided_upgrade_certificate_read { if new_view_number == cert.data.new_version_first_view { - error!( + tracing::error!( "Version upgraded based on a decided upgrade cert: {:?}", cert ); @@ -177,7 +177,7 @@ pub(crate) async fn handle_view_change< let cur_view_time = Utc::now().timestamp(); if task_state .quorum_membership - .leader(old_view_number, task_state.cur_epoch) + .leader(old_view_number, task_state.cur_epoch)? == task_state.public_key { #[allow(clippy::cast_precision_loss)] @@ -228,7 +228,7 @@ pub(crate) async fn handle_timeout task_state .timeout_membership .has_stake(&task_state.public_key, task_state.cur_epoch), - format!("We were not chosen for the consensus committee for view {view_number:?}") + debug!("We were not chosen for the consensus committee for view {view_number:?}") ); let vote = TimeoutVote::create_signed_vote( @@ -239,7 +239,8 @@ pub(crate) async fn handle_timeout &task_state.upgrade_lock, ) .await - .context("Failed to sign TimeoutData")?; + .wrap() + .context(error!("Failed to sign TimeoutData"))?; broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), sender).await; broadcast_event( @@ -251,7 +252,7 @@ pub(crate) async fn handle_timeout ) .await; - debug!( + tracing::debug!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view_number ); @@ -274,7 +275,7 @@ pub(crate) async fn handle_timeout .add(1); if task_state .quorum_membership - .leader(view_number, task_state.cur_epoch) + .leader(view_number, task_state.cur_epoch)? == task_state.public_key { task_state diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index fb1ec86fca..edff8f6078 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -6,7 +6,6 @@ use std::sync::Arc; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -27,6 +26,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; +use utils::anytrace::Result; use self::handlers::{ handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, @@ -106,7 +106,7 @@ impl, V: Versions> ConsensusTaskSt &mut self, event: Arc>, sender: Sender>>, - ) { + ) -> Result<()> { match event.as_ref() { HotShotEvent::QuorumVoteRecv(ref vote) => { if let Err(e) = @@ -149,6 +149,8 @@ impl, V: Versions> ConsensusTaskSt } _ => {} } + + Ok(()) } } @@ -164,9 +166,7 @@ impl, V: Versions> TaskState sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; - - Ok(()) + self.handle(event, sender.clone()).await } /// Joins all subtasks. diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs new file mode 100644 index 0000000000..ec87f1b159 --- /dev/null +++ b/task-impls/src/consensus2/handlers.rs @@ -0,0 +1,280 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{sync::Arc, time::Duration}; + +use utils::anytrace::{ensure, Context, Result}; +use async_broadcast::Sender; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use chrono::Utc; +use hotshot_types::{ + event::{Event, EventType}, + simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + }, + vote::HasViewNumber, +}; +use tracing::{debug, error, instrument}; + +use super::Consensus2TaskState; +use crate::{ + consensus2::Versions, + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, + vote_collection::handle_vote, +}; + +/// Handle a `QuorumVoteRecv` event. +pub(crate) async fn handle_quorum_vote_recv< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + vote: &QuorumVote, + event: Arc>, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + // Are we the leader for this view? + ensure!( + task_state + .quorum_membership + .leader(vote.view_number() + 1)? + == task_state.public_key, + format!( + "We are not the leader for view {:?}", + vote.view_number() + 1 + ) + ); + + handle_vote( + &mut task_state.vote_collectors, + vote, + task_state.public_key.clone(), + &task_state.quorum_membership, + task_state.id, + &event, + sender, + &task_state.upgrade_lock, + ) + .await?; + + Ok(()) +} + +/// Handle a `TimeoutVoteRecv` event. +pub(crate) async fn handle_timeout_vote_recv< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + vote: &TimeoutVote, + event: Arc>, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + // Are we the leader for this view? + ensure!( + task_state + .timeout_membership + .leader(vote.view_number() + 1)? + == task_state.public_key, + format!( + "We are not the leader for view {:?}", + vote.view_number() + 1 + ) + ); + + handle_vote( + &mut task_state.timeout_vote_collectors, + vote, + task_state.public_key.clone(), + &task_state.quorum_membership, + task_state.id, + &event, + sender, + &task_state.upgrade_lock, + ) + .await?; + + Ok(()) +} + +/// Handle a `ViewChange` event. +#[instrument(skip_all)] +pub(crate) async fn handle_view_change< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + new_view_number: TYPES::Time, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + ensure!( + new_view_number > task_state.cur_view, + "New view is not larger than the current view" + ); + + let old_view_number = task_state.cur_view; + debug!("Updating view from {old_view_number:?} to {new_view_number:?}"); + + // Move this node to the next view + task_state.cur_view = new_view_number; + + // If we have a decided upgrade certificate, the protocol version may also have been upgraded. + let decided_upgrade_certificate_read = task_state + .upgrade_lock + .decided_upgrade_certificate + .read() + .await + .clone(); + if let Some(cert) = decided_upgrade_certificate_read { + if new_view_number == cert.data.new_version_first_view { + error!( + "Version upgraded based on a decided upgrade cert: {:?}", + cert + ); + } + } + + // Spawn a timeout task if we did actually update view + let timeout = task_state.timeout; + let new_timeout_task = async_spawn({ + let stream = sender.clone(); + // Nuance: We timeout on the view + 1 here because that means that we have + // not seen evidence to transition to this new view + let view_number = new_view_number + 1; + async move { + async_sleep(Duration::from_millis(timeout)).await; + broadcast_event( + Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), + &stream, + ) + .await; + } + }); + + // Cancel the old timeout task + cancel_task(std::mem::replace( + &mut task_state.timeout_task, + new_timeout_task, + )) + .await; + + let consensus = task_state.consensus.read().await; + consensus + .metrics + .current_view + .set(usize::try_from(task_state.cur_view.u64()).unwrap()); + let cur_view_time = Utc::now().timestamp(); + if task_state.quorum_membership.leader(old_view_number)? == task_state.public_key { + #[allow(clippy::cast_precision_loss)] + consensus + .metrics + .view_duration_as_leader + .add_point((cur_view_time - task_state.cur_view_time) as f64); + } + task_state.cur_view_time = cur_view_time; + + // Do the comparison before the subtraction to avoid potential overflow, since + // `last_decided_view` may be greater than `cur_view` if the node is catching up. + if usize::try_from(task_state.cur_view.u64()).unwrap() + > usize::try_from(task_state.last_decided_view.u64()).unwrap() + { + consensus.metrics.number_of_views_since_last_decide.set( + usize::try_from(task_state.cur_view.u64()).unwrap() + - usize::try_from(task_state.last_decided_view.u64()).unwrap(), + ); + } + + broadcast_event( + Event { + view_number: old_view_number, + event: EventType::ViewFinished { + view_number: old_view_number, + }, + }, + &task_state.output_event_stream, + ) + .await; + Ok(()) +} + +/// Handle a `Timeout` event. +#[instrument(skip_all)] +pub(crate) async fn handle_timeout, V: Versions>( + view_number: TYPES::Time, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + ensure!( + task_state.cur_view < view_number, + "Timeout event is for an old view" + ); + + ensure!( + task_state + .timeout_membership + .has_stake(&task_state.public_key), + format!("We were not chosen for the consensus committee for view {view_number:?}") + ); + + let vote = TimeoutVote::create_signed_vote( + TimeoutData:: { view: view_number }, + view_number, + &task_state.public_key, + &task_state.private_key, + &task_state.upgrade_lock, + ) + .await + .context("Failed to sign TimeoutData")?; + + broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), sender).await; + broadcast_event( + Event { + view_number, + event: EventType::ViewTimeout { view_number }, + }, + &task_state.output_event_stream, + ) + .await; + + debug!( + "We did not receive evidence for view {} in time, sending timeout vote for that view!", + *view_number + ); + + broadcast_event( + Event { + view_number, + event: EventType::ReplicaViewTimeout { view_number }, + }, + &task_state.output_event_stream, + ) + .await; + + task_state + .consensus + .read() + .await + .metrics + .number_of_timeouts + .add(1); + if task_state.quorum_membership.leader(view_number)? == task_state.public_key { + task_state + .consensus + .read() + .await + .metrics + .number_of_timeouts_as_leader + .add(1); + } + + Ok(()) +} diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 2e7c1357ff..5ccb50e091 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -6,7 +6,6 @@ use std::{marker::PhantomData, sync::Arc}; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; @@ -35,10 +34,11 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; -use tracing::{debug, error, info, instrument, warn}; +use tracing::instrument; +use utils::anytrace::*; use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, + events::HotShotEvent, helpers::broadcast_event, vote_collection::{handle_vote, VoteCollectorsMap}, }; @@ -94,11 +94,11 @@ impl, V: Versions> DaTaskState>, event_stream: Sender>>, - ) -> Option { + ) -> Result<()> { match event.as_ref() { HotShotEvent::DaProposalRecv(proposal, sender) => { let sender = sender.clone(); - debug!( + tracing::debug!( "DA proposal received for view: {:?}", proposal.data.view_number() ); @@ -111,35 +111,40 @@ impl, V: Versions> DaTaskState= self.cur_view - 1, + "Throwing away DA proposal that is more than one view older" + ); - if self - .consensus - .read() - .await - .saved_payloads() - .contains_key(&view) - { - warn!("Received DA proposal for view {:?} but we already have a payload for that view. Throwing it away", view); - return None; - } + ensure!( + !self + .consensus + .read() + .await + .saved_payloads() + .contains_key(&view), + info!( + "Received DA proposal for view {:?} but we already have a payload for that view. Throwing it away", + view + ) + ); let encoded_transactions_hash = Sha256::digest(&proposal.data.encoded_transactions); - // ED Is this the right leader? - let view_leader_key = self.da_membership.leader(view, self.cur_epoch); - if view_leader_key != sender { - error!("DA proposal doesn't have expected leader key for view {} \n DA proposal is: {:?}", *view, proposal.data.clone()); - return None; - } + let view_leader_key = self.da_membership.leader(view, self.cur_epoch)?; + ensure!( + view_leader_key == sender, + warn!( + "DA proposal doesn't have expected leader key for view {} \n DA proposal is: {:?}", + *view, + proposal.data.clone() + ) + ); - if !view_leader_key.validate(&proposal.signature, &encoded_transactions_hash) { - error!("Could not verify proposal."); - return None; - } + ensure!( + view_leader_key.validate(&proposal.signature, &encoded_transactions_hash), + warn!("Could not verify proposal.") + ); broadcast_event( Arc::new(HotShotEvent::DaProposalValidated(proposal.clone(), sender)), @@ -149,10 +154,15 @@ impl, V: Versions> DaTaskState { let curr_view = self.consensus.read().await.cur_view(); - if curr_view > proposal.data.view_number() + 1 { - tracing::debug!("Validated DA proposal for prior view but it's too old now Current view {:?}, DA Proposal view {:?}", curr_view, proposal.data.view_number()); - return None; - } + ensure!( + curr_view <= proposal.data.view_number() + 1, + debug!( + "Validated DA proposal for prior view but it's too old now Current view {:?}, DA Proposal view {:?}", + curr_view, + proposal.data.view_number() + ) + ); + // Proposal is fresh and valid, notify the application layer broadcast_event( Event { @@ -166,23 +176,23 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState { - debug!("DA vote recv, Main Task {:?}", vote.view_number()); + tracing::debug!("DA vote recv, Main Task {:?}", vote.view_number()); // Check if we are the leader and the vote is from the sender. let view = vote.view_number(); - if self.da_membership.leader(view, self.cur_epoch) != self.public_key { - error!("We are not the DA committee leader for view {} are we leader for next view? {}", *view, self.da_membership.leader(view + 1, self.cur_epoch) == self.public_key); - return None; - } + + ensure!( + self.da_membership.leader(view, self.cur_epoch)? == self.public_key, + debug!( + "We are not the DA committee leader for view {} are we leader for next view? {}", + *view, + self.da_membership.leader(view + 1, self.cur_epoch)? == self.public_key + ) + ); handle_vote( &mut self.vote_collectors, @@ -284,26 +295,29 @@ impl, V: Versions> DaTaskState { let view = *view; - if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { - return None; - } + + ensure!( + *self.cur_view < *view, + info!("Received a view change to an older view.") + ); if *view - *self.cur_view > 1 { - info!("View changed by more than 1 going to view {:?}", view); + tracing::info!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; // If we are not the next leader (DA leader for this view) immediately exit - if self.da_membership.leader(self.cur_view + 1, self.cur_epoch) != self.public_key { - return None; - } - debug!("Polling for DA votes for view {}", *self.cur_view + 1); + ensure!( + self.da_membership + .leader(self.cur_view + 1, self.cur_epoch)? + == self.public_key + ); - return None; + tracing::debug!("Polling for DA votes for view {}", *self.cur_view + 1); } HotShotEvent::BlockRecv(packed_bundle) => { let PackedBundle:: { @@ -318,12 +332,9 @@ impl, V: Versions> DaTaskState = DaProposal { encoded_transactions: Arc::clone(encoded_transactions), @@ -347,14 +358,9 @@ impl, V: Versions> DaTaskState { - error!("Shutting down because of shutdown signal!"); - return Some(HotShotTaskCompleted); - } _ => {} } - None + Ok(()) } } @@ -371,9 +377,7 @@ impl, V: Versions> TaskState sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; - - Ok(()) + self.handle(event, sender.clone()).await } async fn cancel_subtasks(&mut self) {} diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 268b43b2d3..a3cd151971 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -10,7 +10,6 @@ use std::{ sync::Arc, }; -use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, SendError, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_lock::RwLock; @@ -39,7 +38,8 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, info, instrument, warn}; +use tracing::instrument; +use utils::anytrace::*; use crate::{ events::HotShotEvent, quorum_proposal_recv::QuorumProposalRecvTaskState, @@ -70,7 +70,9 @@ pub(crate) async fn fetch_proposal( let signature = TYPES::SignatureKey::sign( &sender_private_key, signed_proposal_request.commit().as_ref(), - )?; + ) + .wrap() + .context(error!("Failed to sign proposal. This should never happen."))?; // First, broadcast that we need a proposal to the current leader broadcast_event( @@ -298,9 +300,11 @@ pub async fn decide_from_proposal( if let Some(cert) = leaf.upgrade_certificate() { if leaf.upgrade_certificate() != *existing_upgrade_cert_reader { if cert.data.decide_by < view_number { - warn!("Failed to decide an upgrade certificate in time. Ignoring."); + tracing::warn!( + "Failed to decide an upgrade certificate in time. Ignoring." + ); } else { - info!("Reached decide on upgrade certificate: {:?}", cert); + tracing::info!("Reached decide on upgrade certificate: {:?}", cert); res.decided_upgrade_cert = Some(cert.clone()); } } @@ -346,13 +350,13 @@ pub async fn decide_from_proposal( true }, ) { - debug!("Leaf ascension failed; error={e}"); + tracing::debug!("Leaf ascension failed; error={e}"); } res } -/// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. +/// Gets the parent leaf and state from the parent of a proposal, returning an [`utils::anytrace::Error`] if not. #[instrument(skip_all)] #[allow(clippy::too_many_arguments)] pub(crate) async fn parent_leaf_and_state( @@ -367,8 +371,11 @@ pub(crate) async fn parent_leaf_and_state( ) -> Result<(Leaf, Arc<::ValidatedState>)> { let current_epoch = consensus.read().await.cur_epoch(); ensure!( - quorum_membership.leader(next_proposal_view_number, current_epoch) == public_key, - "Somehow we formed a QC but are not the leader for the next view {next_proposal_view_number:?}", + quorum_membership.leader(next_proposal_view_number, current_epoch)? == public_key, + info!( + "Somehow we formed a QC but are not the leader for the next view {:?}", + next_proposal_view_number + ) ); let parent_view_number = consensus.read().await.high_qc().view_number(); if !consensus @@ -388,22 +395,21 @@ pub(crate) async fn parent_leaf_and_state( upgrade_lock, ) .await - .context("Failed to fetch proposal")?; + .context(info!("Failed to fetch proposal"))?; } let consensus_reader = consensus.read().await; let parent_view_number = consensus_reader.high_qc().view_number(); let parent_view = consensus_reader.validated_state_map().get(&parent_view_number).context( - format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", *parent_view_number) + debug!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", *parent_view_number) )?; - // Leaf hash in view inner does not match high qc hash - Why? let (leaf_commitment, state) = parent_view.leaf_and_state().context( - format!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") + info!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") )?; if leaf_commitment != consensus_reader.high_qc().data().leaf_commit { // NOTE: This happens on the genesis block - debug!( + tracing::debug!( "They don't equal: {:?} {:?}", leaf_commitment, consensus_reader.high_qc().data().leaf_commit @@ -413,7 +419,7 @@ pub(crate) async fn parent_leaf_and_state( let leaf = consensus_reader .saved_leaves() .get(&leaf_commitment) - .context("Failed to find high QC of parent")?; + .context(info!("Failed to find high QC of parent"))?; Ok((leaf.clone(), Arc::clone(state))) } @@ -531,7 +537,7 @@ pub async fn validate_proposal_safety_and_liveness< .await; } - format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) + error!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) }); } @@ -542,7 +548,9 @@ pub async fn validate_proposal_safety_and_liveness< .write() .await .append_proposal(&proposal) - .await?; + .await + .wrap() + .context(error!("Failed to append proposal in storage!"))?; // We accept the proposal, notify the application layer broadcast_event( @@ -603,7 +611,7 @@ pub async fn validate_proposal_view_and_certs< // Verify a timeout certificate OR a view sync certificate exists and is valid. if proposal.data.justify_qc.view_number() != view - 1 { let received_proposal_cert = - proposal.data.proposal_certificate.clone().context(format!( + proposal.data.proposal_certificate.clone().context(debug!( "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", *view ))?; @@ -667,7 +675,7 @@ pub async fn validate_proposal_view_and_certs< /// `timeout_task` which are updated during the operation of the function. /// /// # Errors -/// Returns an [`anyhow::Error`] when the new view is not greater than the current view. +/// Returns an [`utils::anytrace::Error`] when the new view is not greater than the current view. pub(crate) async fn update_view, V: Versions>( new_view: TYPES::View, event_stream: &Sender>>, @@ -680,14 +688,14 @@ pub(crate) async fn update_view, V let is_old_view_leader = task_state .quorum_membership - .leader(task_state.cur_view, task_state.cur_epoch) + .leader(task_state.cur_view, task_state.cur_epoch)? == task_state.public_key; let old_view = task_state.cur_view; - debug!("Updating view from {} to {}", *old_view, *new_view); + tracing::debug!("Updating view from {} to {}", *old_view, *new_view); if *old_view / 100 != *new_view / 100 { - info!("Progress: entered view {:>6}", *new_view); + tracing::info!("Progress: entered view {:>6}", *new_view); } task_state.cur_view = new_view; @@ -792,15 +800,3 @@ pub async fn broadcast_event(event: E, sender: &Send } } } - -/// Utilities to print anyhow logs. -pub trait AnyhowTracing { - /// Print logs as debug - fn err_as_debug(self); -} - -impl AnyhowTracing for anyhow::Result { - fn err_as_debug(self) { - let _ = self.inspect_err(|e| tracing::debug!("{}", format!("{:?}", e))); - } -} diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 5b897c7b4f..d8ef530b54 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -6,7 +6,6 @@ use std::{collections::HashMap, sync::Arc}; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; @@ -31,7 +30,8 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; -use tracing::{error, instrument, warn}; +use tracing::instrument; +use utils::anytrace::*; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -104,7 +104,7 @@ impl NetworkMessageTaskState { HotShotEvent::UpgradeProposalRecv(message, sender) } GeneralConsensusMessage::UpgradeVote(message) => { - error!("Received upgrade vote!"); + tracing::error!("Received upgrade vote!"); HotShotEvent::UpgradeVoteRecv(message) } }, @@ -270,7 +270,7 @@ impl< let serialized_message = match self.upgrade_lock.serialize(&message).await { Ok(serialized) => serialized, Err(e) => { - error!("Failed to serialize message: {}", e); + tracing::error!("Failed to serialize message: {}", e); continue; } }; @@ -295,7 +295,7 @@ impl< } match net.vid_broadcast_message(messages).await { Ok(()) => {} - Err(e) => warn!("Failed to send message from network task: {:?}", e), + Err(e) => tracing::warn!("Failed to send message from network task: {:?}", e), } }); @@ -308,16 +308,16 @@ impl< storage: Arc>, state: Arc>>, view: ::View, - ) -> Result<(), ()> { + ) -> std::result::Result<(), ()> { if let Some(action) = maybe_action { if !state.write().await.update_action(action, view) { - warn!("Already actioned {:?} in view {:?}", action, view); + tracing::warn!("Already actioned {:?} in view {:?}", action, view); return Err(()); } match storage.write().await.record_action(view, action).await { Ok(()) => Ok(()), Err(e) => { - warn!("Not Sending {:?} because of storage error: {:?}", action, e); + tracing::warn!("Not Sending {:?} because of storage error: {:?}", action, e); Err(()) } } @@ -355,15 +355,25 @@ impl< // ED Each network task is subscribed to all these message types. Need filters per network task HotShotEvent::QuorumVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); + let view_number = vote.view_number() + 1; + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; + Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote(vote.clone()), )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number() + 1, self.epoch), - ), + TransmitType::Direct(leader), )) } HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( @@ -396,15 +406,25 @@ impl< } HotShotEvent::DaVoteSend(vote) => { *maybe_action = Some(HotShotAction::DaVote); + let view_number = vote.view_number(); + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; + Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::Da( DaConsensusMessage::DaVote(vote.clone()), )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number(), self.epoch), - ), + TransmitType::Direct(leader), )) } HotShotEvent::DacSend(certificate, sender) => { @@ -417,36 +437,72 @@ impl< TransmitType::Broadcast, )) } - HotShotEvent::ViewSyncPreCommitVoteSend(vote) => Some(( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), - )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number() + vote.date().relay, self.epoch), - ), - )), - HotShotEvent::ViewSyncCommitVoteSend(vote) => Some(( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), - )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number() + vote.date().relay, self.epoch), - ), - )), - HotShotEvent::ViewSyncFinalizeVoteSend(vote) => Some(( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), - )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number() + vote.date().relay, self.epoch), - ), - )), + HotShotEvent::ViewSyncPreCommitVoteSend(vote) => { + let view_number = vote.view_number() + vote.date().relay; + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; + + Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), + )), + TransmitType::Direct(leader), + )) + } + HotShotEvent::ViewSyncCommitVoteSend(vote) => { + let view_number = vote.view_number() + vote.date().relay; + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; + + Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), + )), + TransmitType::Direct(leader), + )) + } + HotShotEvent::ViewSyncFinalizeVoteSend(vote) => { + let view_number = vote.view_number() + vote.date().relay; + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; + + Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), + )), + TransmitType::Direct(leader), + )) + } HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( @@ -470,15 +526,24 @@ impl< )), HotShotEvent::TimeoutVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); + let view_number = vote.view_number() + 1; + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::TimeoutVote(vote.clone()), )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number() + 1, self.epoch), - ), + TransmitType::Direct(leader), )) } HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( @@ -489,16 +554,25 @@ impl< TransmitType::Broadcast, )), HotShotEvent::UpgradeVoteSend(vote) => { - error!("Sending upgrade vote!"); + tracing::error!("Sending upgrade vote!"); + let view_number = vote.view_number(); + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::UpgradeVote(vote.clone()), )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number(), self.epoch), - ), + TransmitType::Direct(leader), )) } HotShotEvent::ViewChange(view) => { @@ -581,7 +655,7 @@ impl< let serialized_message = match upgrade_lock.serialize(&message).await { Ok(serialized) => serialized, Err(e) => { - error!("Failed to serialize message: {}", e); + tracing::error!("Failed to serialize message: {}", e); return; } }; @@ -606,7 +680,7 @@ impl< match transmit_result { Ok(()) => {} - Err(e) => warn!("Failed to send message task: {:?}", e), + Err(e) => tracing::warn!("Failed to send message task: {:?}", e), } }); } diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index bcdc568e18..6ee2c2579e 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -9,7 +9,6 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; -use anyhow::{ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; @@ -26,7 +25,8 @@ use hotshot_types::{ block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, }, }; -use tracing::{debug, error, instrument}; +use tracing::instrument; +use utils::anytrace::*; use vbs::version::StaticVersionType; use crate::{ @@ -182,7 +182,8 @@ impl ProposalDependencyHandle { version, ) .await - .context("Failed to construct legacy block header")? + .wrap() + .context(warn!("Failed to construct legacy block header"))? } else { TYPES::BlockHeader::new_marketplace( state.as_ref(), @@ -197,7 +198,8 @@ impl ProposalDependencyHandle { version, ) .await - .context("Failed to construct marketplace block header")? + .wrap() + .context(warn!("Failed to construct marketplace block header"))? }; let proposal = QuorumProposal { @@ -218,14 +220,15 @@ impl ProposalDependencyHandle { &self.private_key, proposed_leaf.commit(&self.upgrade_lock).await.as_ref(), ) - .context("Failed to compute proposed_leaf.commit()")?; + .wrap() + .context(error!("Failed to compute proposed_leaf.commit()"))?; let message = Proposal { data: proposal, signature, _pd: PhantomData, }; - debug!( + tracing::debug!( "Sending proposal for view {:?}", proposed_leaf.view_number(), ); @@ -335,14 +338,14 @@ impl HandleDepOutput for ProposalDependencyHandle< } if commit_and_metadata.is_none() { - error!( + tracing::error!( "Somehow completed the proposal dependency task without a commitment and metadata" ); return; } if vid_share.is_none() { - error!("Somehow completed the proposal dependency task without a VID share"); + tracing::error!("Somehow completed the proposal dependency task without a VID share"); return; } @@ -362,7 +365,7 @@ impl HandleDepOutput for ProposalDependencyHandle< ) .await { - error!("Failed to publish proposal; error = {e:#}"); + tracing::error!("Failed to publish proposal; error = {e:#}"); } } } diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 7390427f09..022b608698 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -6,7 +6,6 @@ use std::{collections::HashMap, sync::Arc}; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -33,7 +32,8 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, instrument, warn}; +use tracing::instrument; +use utils::anytrace::*; use self::handlers::{ProposalDependency, ProposalDependencyHandle}; use crate::{ @@ -171,7 +171,9 @@ impl, V: Versions> }; let valid = event_view == view_number; if valid { - debug!("Dependency {dependency_type:?} is complete for view {event_view:?}!",); + tracing::debug!( + "Dependency {dependency_type:?} is complete for view {event_view:?}!", + ); } valid }), @@ -288,24 +290,27 @@ impl, V: Versions> event_receiver: Receiver>>, event_sender: Sender>>, event: Arc>, - ) { + ) -> Result<()> { // Don't even bother making the task if we are not entitled to propose anyway. - if self.quorum_membership.leader(view_number, epoch_number) != self.public_key { - tracing::trace!("We are not the leader of the next view"); - return; - } + ensure!( + self.quorum_membership.leader(view_number, epoch_number)? == self.public_key, + debug!("We are not the leader of the next view") + ); // Don't try to propose twice for the same view. - if view_number <= self.latest_proposed_view { - tracing::trace!("We have already proposed for this view"); - return; - } + ensure!( + view_number > self.latest_proposed_view, + "We have already proposed for this view" + ); - debug!("Attempting to make dependency task for view {view_number:?} and event {event:?}"); - if self.proposal_dependencies.contains_key(&view_number) { - debug!("Task already exists"); - return; - } + tracing::debug!( + "Attempting to make dependency task for view {view_number:?} and event {event:?}" + ); + + ensure!( + !self.proposal_dependencies.contains_key(&view_number), + "Task already exists" + ); let dependency_chain = self.create_and_complete_dependencies(view_number, &event_receiver, event); @@ -330,15 +335,18 @@ impl, V: Versions> ); self.proposal_dependencies .insert(view_number, dependency_task.run()); + + Ok(()) } /// Update the latest proposed view number. #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Update latest proposed view", level = "error")] async fn update_latest_proposed_view(&mut self, new_view: TYPES::View) -> bool { if *self.latest_proposed_view < *new_view { - debug!( + tracing::debug!( "Updating latest proposed view from {} to {}", - *self.latest_proposed_view, *new_view + *self.latest_proposed_view, + *new_view ); // Cancel the old dependency tasks. @@ -363,17 +371,17 @@ impl, V: Versions> event: Arc>, event_receiver: Receiver>>, event_sender: Sender>>, - ) { + ) -> Result<()> { match event.as_ref() { HotShotEvent::UpgradeCertificateFormed(cert) => { - debug!( + tracing::debug!( "Upgrade certificate received for view {}!", *cert.view_number ); // Update our current upgrade_cert as long as we still have a chance of reaching a decide on it in time. if cert.data.decide_by >= self.latest_proposed_view + 3 { - debug!("Updating current formed_upgrade_certificate"); + tracing::debug!("Updating current formed_upgrade_certificate"); self.formed_upgrade_certificate = Some(cert.clone()); } @@ -389,7 +397,7 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), - ); + )?; } either::Left(qc) => { // Only update if the qc is from a newer view @@ -422,24 +430,24 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), - ); + )?; } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { let epoch_number = self.consensus.read().await.cur_epoch(); - if !certificate - .is_valid_cert( - self.quorum_membership.as_ref(), - epoch_number, - &self.upgrade_lock, - ) - .await - { + + ensure!( + certificate + .is_valid_cert( + self.quorum_membership.as_ref(), + epoch_number, + &self.upgrade_lock + ) + .await, warn!( "View Sync Finalize certificate {:?} was invalid", certificate.data() - ); - return; - } + ) + ); let view_number = certificate.view_number; @@ -449,7 +457,7 @@ impl, V: Versions> event_receiver, event_sender, event, - ); + )?; } HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { let view_number = proposal.data.view_number(); @@ -466,14 +474,15 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), - ); + )?; } HotShotEvent::QuorumProposalSend(proposal, _) => { let view = proposal.data.view_number(); - if !self.update_latest_proposed_view(view).await { - tracing::trace!("Failed to update latest proposed view"); - return; - } + + ensure!( + self.update_latest_proposed_view(view).await, + "Failed to update latest proposed view" + ); } HotShotEvent::VidDisperseSend(vid_share, _) => { let view_number = vid_share.data.view_number(); @@ -485,17 +494,27 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), - ); + )?; } HotShotEvent::UpdateHighQc(qc) => { - // First, update the high QC. - if let Err(e) = self.consensus.write().await.update_high_qc(qc.clone()) { - tracing::trace!("Failed to update high qc; error = {e}"); - } - - if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await { - warn!("Failed to store High QC of QC we formed; error = {:?}", e); - } + // First update the high QC internally + self.consensus + .write() + .await + .update_high_qc(qc.clone()) + .wrap() + .context(error!( + "Failed to update high QC in internal consensus state!" + ))?; + + // Then update the high QC in storage + self.storage + .write() + .await + .update_high_qc(qc.clone()) + .await + .wrap() + .context(error!("Failed to update high QC in storage!"))?; broadcast_event( HotShotEvent::HighQcUpdated(qc.clone()).into(), @@ -512,10 +531,11 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), - ); + )?; } _ => {} } + Ok(()) } } @@ -531,9 +551,7 @@ impl, V: Versions> TaskState sender: &Sender>, receiver: &Receiver>, ) -> Result<()> { - self.handle(event, receiver.clone(), sender.clone()).await; - - Ok(()) + self.handle(event, receiver.clone(), sender.clone()).await } async fn cancel_subtasks(&mut self) { diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 2193bf3dfd..3d1957c84a 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -8,7 +8,6 @@ use std::sync::Arc; -use anyhow::{bail, Context, Result}; use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLockUpgradableReadGuard; use committable::Committable; @@ -26,7 +25,8 @@ use hotshot_types::{ utils::{View, ViewInner}, vote::{Certificate, HasViewNumber}, }; -use tracing::{debug, error, instrument, warn}; +use tracing::instrument; +use utils::anytrace::*; use super::QuorumProposalRecvTaskState; use crate::{ @@ -78,7 +78,7 @@ async fn validate_proposal_liveness(view_number, event_sender, task_state).await { - debug!("Liveness Branch - Failed to update view; error = {e:#}"); + tracing::debug!("Liveness Branch - Failed to update view; error = {e:#}"); } if !liveness_check { @@ -128,7 +128,7 @@ pub(crate) async fn handle_quorum_proposal_recv< validate_proposal_view_and_certs(proposal, task_state) .await - .context("Failed to validate proposal view or attached certs")?; + .context(warn!("Failed to validate proposal view or attached certs"))?; let view_number = proposal.data.view_number(); let justify_qc = proposal.data.justify_qc.clone(); @@ -220,7 +220,7 @@ pub(crate) async fn handle_quorum_proposal_recv< .await; let Some((parent_leaf, _parent_state)) = parent else { - warn!( + tracing::warn!( "Proposal's parent missing from storage with commitment: {:?}", justify_qc.data.leaf_commit ); @@ -239,7 +239,7 @@ pub(crate) async fn handle_quorum_proposal_recv< // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here if let Err(e) = update_view::(view_number, event_sender, task_state).await { - debug!("Full Branch - Failed to update view; error = {e:#}"); + tracing::debug!("Full Branch - Failed to update view; error = {e:#}"); } Ok(()) diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 281e472fd0..7c3ab2ed24 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -8,7 +8,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use anyhow::{bail, Result}; use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -31,6 +30,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; +use utils::anytrace::{bail, Result}; use vbs::version::Version; use self::handlers::handle_quorum_proposal_recv; diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index d414573e19..656737524f 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -6,7 +6,6 @@ use std::sync::Arc; -use anyhow::Result; use async_broadcast::Sender; use chrono::Utc; use hotshot_types::{ @@ -19,7 +18,8 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -use tracing::{debug, instrument}; +use tracing::instrument; +use utils::anytrace::*; use super::QuorumVoteTaskState; use crate::{ @@ -115,7 +115,7 @@ pub(crate) async fn handle_quorum_proposal_validated< .number_of_views_per_decide_event .add_point(cur_number_of_views_per_decide_event as f64); - debug!( + tracing::debug!( "Sending Decide for view {:?}", consensus_writer.last_decided_view() ); @@ -139,7 +139,7 @@ pub(crate) async fn handle_quorum_proposal_validated< .await; broadcast_event(Arc::new(HotShotEvent::LeafDecided(leaves_decided)), sender).await; - debug!("Successfully sent decide event"); + tracing::debug!("Successfully sent decide event"); } Ok(()) diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 7d0fa4833b..9285e627ab 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -6,7 +6,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -38,7 +37,8 @@ use hotshot_types::{ use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, instrument, trace, warn}; +use tracing::instrument; +use utils::anytrace::*; use crate::{ events::HotShotEvent, @@ -123,7 +123,7 @@ impl + 'static, V: Versions> .await .ok(), }; - let parent = maybe_parent.context(format!( + let parent = maybe_parent.context(info!( "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", justify_qc.data().leaf_commit, proposed_leaf.view_number(), @@ -131,7 +131,7 @@ impl + 'static, V: Versions> let consensus_reader = self.consensus.read().await; let (Some(parent_state), _) = consensus_reader.state_and_delta(parent.view_number()) else { - bail!("Parent state not found! Consensus internally inconsistent") + bail!("Parent state not found! Consensus internally inconsistent"); }; drop(consensus_reader); @@ -147,7 +147,8 @@ impl + 'static, V: Versions> version, ) .await - .context("Block header doesn't extend the proposal!")?; + .wrap() + .context(warn!("Block header doesn't extend the proposal!"))?; let state = Arc::new(validated_state); let delta = Arc::new(state_delta); @@ -189,7 +190,9 @@ impl + 'static, V: Versions> .write() .await .update_undecided_state(new_leaves, new_state) - .await?; + .await + .wrap() + .context(error!("Failed to update undecided state"))?; Ok(()) } @@ -204,10 +207,10 @@ impl + 'static, V: Versions> ensure!( self.quorum_membership .has_stake(&self.public_key, self.epoch_number), - format!( + info!( "We were not chosen for quorum committee on {:?}", self.view_number - ), + ) ); // Create and send the vote. @@ -221,8 +224,9 @@ impl + 'static, V: Versions> &self.upgrade_lock, ) .await - .context("Failed to sign vote")?; - debug!( + .wrap() + .context(error!("Failed to sign vote. This should never happen."))?; + tracing::debug!( "sending vote to next quorum leader {:?}", vote.view_number() + 1 ); @@ -232,7 +236,8 @@ impl + 'static, V: Versions> .await .append_vid(&vid_share) .await - .context("Failed to store VID share")?; + .wrap() + .context(error!("Failed to store VID share"))?; broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &self.sender).await; Ok(()) @@ -283,7 +288,7 @@ impl + 'static, V: Versions> Handl let proposal_payload_comm = proposal.block_header.payload_commitment(); if let Some(comm) = payload_commitment { if proposal_payload_comm != comm { - error!("Quorum proposal has inconsistent payload commitment with DAC or VID."); + tracing::error!("Quorum proposal has inconsistent payload commitment with DAC or VID."); return; } } else { @@ -292,7 +297,7 @@ impl + 'static, V: Versions> Handl let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; let proposed_leaf = Leaf::from_quorum_proposal(proposal); if proposed_leaf.parent_commitment() != parent_commitment { - warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); + tracing::warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); return; } leaf = Some(proposed_leaf); @@ -301,7 +306,7 @@ impl + 'static, V: Versions> Handl let cert_payload_comm = cert.data().payload_commit; if let Some(comm) = payload_commitment { if cert_payload_comm != comm { - error!("DAC has inconsistent payload commitment with quorum proposal or VID."); + tracing::error!("DAC has inconsistent payload commitment with quorum proposal or VID."); return; } } else { @@ -313,7 +318,7 @@ impl + 'static, V: Versions> Handl vid_share = Some(share.clone()); if let Some(comm) = payload_commitment { if vid_payload_commitment != comm { - error!("VID has inconsistent payload commitment with quorum proposal or DAC."); + tracing::error!("VID has inconsistent payload commitment with quorum proposal or DAC."); return; } } else { @@ -332,7 +337,7 @@ impl + 'static, V: Versions> Handl .await; let Some(vid_share) = vid_share else { - error!( + tracing::error!( "We don't have the VID share for this view {:?}, but we should, because the vote dependencies have completed.", self.view_number ); @@ -340,7 +345,7 @@ impl + 'static, V: Versions> Handl }; let Some(leaf) = leaf else { - error!( + tracing::error!( "We don't have the leaf for this view {:?}, but we should, because the vote dependencies have completed.", self.view_number ); @@ -349,12 +354,12 @@ impl + 'static, V: Versions> Handl // Update internal state if let Err(e) = self.update_shared_state(&leaf, &vid_share).await { - error!("Failed to update shared consensus state; error = {e:#}"); + tracing::error!("Failed to update shared consensus state; error = {e:#}"); return; } if let Err(e) = self.submit_vote(leaf, vid_share).await { - debug!("Failed to vote; error = {e:#}"); + tracing::debug!("Failed to vote; error = {e:#}"); } } } @@ -440,7 +445,7 @@ impl, V: Versions> QuorumVoteTaskS } }; if event_view == view_number { - trace!("Vote dependency {:?} completed", dependency_type); + tracing::trace!("Vote dependency {:?} completed", dependency_type); return true; } false @@ -512,16 +517,17 @@ impl, V: Versions> QuorumVoteTaskS #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote update latest voted view", level = "error")] async fn update_latest_voted_view(&mut self, new_view: TYPES::View) -> bool { if *self.latest_voted_view < *new_view { - debug!( + tracing::debug!( "Updating next vote view from {} to {} in the quorum vote task", - *self.latest_voted_view, *new_view + *self.latest_voted_view, + *new_view ); // Cancel the old dependency tasks. for view in *self.latest_voted_view..(*new_view) { if let Some(dependency) = self.vote_dependencies.remove(&TYPES::View::new(view)) { cancel_task(dependency).await; - debug!("Vote dependency removed for view {:?}", view); + tracing::debug!("Vote dependency removed for view {:?}", view); } } @@ -539,17 +545,20 @@ impl, V: Versions> QuorumVoteTaskS event: Arc>, event_receiver: Receiver>>, event_sender: Sender>>, - ) { + ) -> Result<()> { let current_epoch = self.consensus.read().await.cur_epoch(); + match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _leaf) => { - trace!("Received Proposal for view {}", *proposal.view_number()); + tracing::trace!("Received Proposal for view {}", *proposal.view_number()); // Handle the event before creating the dependency task. if let Err(e) = handle_quorum_proposal_validated(proposal, &event_sender, self).await { - debug!("Failed to handle QuorumProposalValidated event; error = {e:#}"); + tracing::debug!( + "Failed to handle QuorumProposalValidated event; error = {e:#}" + ); } self.create_dependency_task_if_new( @@ -562,23 +571,25 @@ impl, V: Versions> QuorumVoteTaskS } HotShotEvent::DaCertificateRecv(cert) => { let view = cert.view_number; - trace!("Received DAC for view {}", *view); - if view <= self.latest_voted_view { - return; - } + + tracing::trace!("Received DAC for view {}", *view); + // Do nothing if the DAC is old + ensure!( + view > self.latest_voted_view, + "Received DAC for an older view." + ); let current_epoch = self.consensus.read().await.cur_epoch(); // Validate the DAC. - if !cert - .is_valid_cert( + ensure!( + cert.is_valid_cert( self.da_membership.as_ref(), current_epoch, - &self.upgrade_lock, + &self.upgrade_lock ) - .await - { - return; - } + .await, + warn!("Invalid DAC") + ); // Add to the storage. self.consensus @@ -601,51 +612,43 @@ impl, V: Versions> QuorumVoteTaskS } HotShotEvent::VidShareRecv(sender, disperse) => { let view = disperse.data.view_number(); - trace!("Received VID share for view {}", *view); - if view <= self.latest_voted_view { - return; - } + // Do nothing if the VID share is old + tracing::trace!("Received VID share for view {}", *view); + ensure!( + view > self.latest_voted_view, + "Received VID share for an older view." + ); // Validate the VID share. let payload_commitment = disperse.data.payload_commitment; let current_epoch = self.consensus.read().await.cur_epoch(); - // Check sender of VID disperse share is signed by DA committee member - let validate_sender = sender - .validate(&disperse.signature, payload_commitment.as_ref()) - && self - .da_membership + + // Check that the signature is valid + ensure!( + sender.validate(&disperse.signature, payload_commitment.as_ref()), + "VID share signature is invalid" + ); + + // ensure that the VID share was sent by a DA member OR the view leader + ensure!( + self.da_membership .committee_members(view, current_epoch) - .contains(sender); - - // Check whether the data satisfies one of the following. - // * From the right leader for this view. - // * Calculated and signed by the current node. - let validated = self - .public_key - .validate(&disperse.signature, payload_commitment.as_ref()) - || self - .quorum_membership - .leader(view, current_epoch) - .validate(&disperse.signature, payload_commitment.as_ref()); - if !validate_sender && !validated { - warn!("Failed to validated the VID dispersal/share sig."); - return; - } + .contains(sender) + || *sender == self.quorum_membership.leader(view, current_epoch)?, + "VID share was not sent by a DA member or the view leader." + ); // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner // and outer results - #[allow(clippy::no_effect)] match vid_scheme(self.quorum_membership.total_nodes(current_epoch)).verify_share( &disperse.data.share, &disperse.data.common, &payload_commitment, ) { Ok(Err(())) | Err(_) => { - return; - } - Ok(Ok(())) => { - (); + bail!("Failed to verify VID share"); } + Ok(Ok(())) => {} } self.consensus @@ -653,10 +656,10 @@ impl, V: Versions> QuorumVoteTaskS .await .update_vid_shares(view, disperse.clone()); - if disperse.data.recipient_key != self.public_key { - debug!("Got a Valid VID share but it's not for our key"); - return; - } + ensure!( + disperse.data.recipient_key == self.public_key, + "Got a Valid VID share but it's not for our key" + ); broadcast_event( Arc::new(HotShotEvent::VidShareValidated(disperse.clone())), @@ -672,10 +675,9 @@ impl, V: Versions> QuorumVoteTaskS ); } HotShotEvent::QuorumVoteDependenciesValidated(view_number) => { - debug!("All vote dependencies verified for view {:?}", view_number); + tracing::debug!("All vote dependencies verified for view {:?}", view_number); if !self.update_latest_voted_view(*view_number).await { - debug!("view not updated"); - return; + tracing::debug!("view not updated"); } } HotShotEvent::Timeout(view) => { @@ -697,6 +699,7 @@ impl, V: Versions> QuorumVoteTaskS } _ => {} } + Ok(()) } } @@ -712,9 +715,7 @@ impl, V: Versions> TaskState sender: &Sender>, receiver: &Receiver>, ) -> Result<()> { - self.handle(event, receiver.clone(), sender.clone()).await; - - Ok(()) + self.handle(event, receiver.clone(), sender.clone()).await } async fn cancel_subtasks(&mut self) { diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 8cd336e7b1..c7bc3c8957 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -13,7 +13,6 @@ use std::{ time::Duration, }; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; #[cfg(async_executor_impl = "async-std")] @@ -38,6 +37,7 @@ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; +use utils::anytrace::Result; use crate::{events::HotShotEvent, helpers::broadcast_event}; diff --git a/task-impls/src/rewind.rs b/task-impls/src/rewind.rs index 669b410b52..9ae424b62b 100644 --- a/task-impls/src/rewind.rs +++ b/task-impls/src/rewind.rs @@ -6,11 +6,11 @@ use std::{fs::OpenOptions, io::Write, sync::Arc}; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::traits::node_implementation::NodeType; +use utils::anytrace::Result; use crate::events::HotShotEvent; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 354d9d1fd6..72bcd8f78b 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -9,7 +9,6 @@ use std::{ time::{Duration, Instant}, }; -use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_timeout}; use async_trait::async_trait; @@ -32,8 +31,9 @@ use hotshot_types::{ utils::ViewInner, vid::{VidCommitment, VidPrecomputeData}, }; -use tracing::{debug, error, info, instrument, warn}; +use tracing::instrument; use url::Url; +use utils::anytrace::*; use vbs::version::{StaticVersionType, Version}; use vec1::Vec1; @@ -149,7 +149,7 @@ impl, V: Versions> TransactionTask let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, Err(err) => { - error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); + tracing::error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); return None; } }; @@ -191,7 +191,7 @@ impl, V: Versions> TransactionTask .await; } else { // If we couldn't get a block, send an empty block - info!( + tracing::info!( "Failed to get a block for view {:?}, proposing empty block", block_view ); @@ -209,7 +209,7 @@ impl, V: Versions> TransactionTask self.membership.total_nodes(self.cur_epoch), version, ) else { - error!("Failed to get null fee"); + tracing::error!("Failed to get null fee"); return None; }; @@ -254,13 +254,14 @@ impl, V: Versions> TransactionTask .await .as_ref() .is_some_and(|cert| cert.upgrading_in(block_view)), - "Not requesting block because we are upgrading", + info!("Not requesting block because we are upgrading") ); let (parent_view, parent_hash) = self .last_vid_commitment_retry(block_view, task_start_time) .await - .context("Failed to find parent hash in time")?; + .wrap() + .context(warn!("Failed to find parent hash in time"))?; let start = Instant::now(); @@ -270,10 +271,11 @@ impl, V: Versions> TransactionTask .fetch_auction_result(block_view), ) .await - .context("Timeout while getting auction result")?; + .wrap() + .context(warn!("Timeout while getting auction result"))?; let auction_result = maybe_auction_result - .map_err(|e| warn!("Failed to get auction results: {e:#}")) + .map_err(|e| tracing::warn!("Failed to get auction results: {e:#}")) .unwrap_or_default(); // We continue here, as we still have fallback builder URL let mut futures = Vec::new(); @@ -319,13 +321,16 @@ impl, V: Versions> TransactionTask let validated_state = self.consensus.read().await.decided_state(); let sequencing_fees = Vec1::try_from_vec(sequencing_fees) - .context("Failed to receive a bundle from any builder.")?; + .wrap() + .context(warn!("Failed to receive a bundle from any builder."))?; let (block_payload, metadata) = TYPES::BlockPayload::from_transactions( transactions, &validated_state, &Arc::clone(&self.instance_state), ) - .await?; + .await + .wrap() + .context(error!("Failed to construct block payload"))?; Ok(PackedBundle::new( block_payload.encode(), @@ -348,7 +353,7 @@ impl, V: Versions> TransactionTask self.membership.total_nodes(self.cur_epoch), version, ) else { - error!("Failed to calculate null block fee."); + tracing::error!("Failed to calculate null block fee."); return None; }; @@ -379,7 +384,7 @@ impl, V: Versions> TransactionTask let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, Err(err) => { - error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); + tracing::error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); return None; } }; @@ -425,7 +430,7 @@ impl, V: Versions> TransactionTask &mut self, event: Arc>, event_stream: Sender>>, - ) -> Option { + ) -> Result<()> { match event.as_ref() { HotShotEvent::TransactionsRecv(transactions) => { broadcast_event( @@ -438,30 +443,36 @@ impl, V: Versions> TransactionTask &self.output_event_stream, ) .await; - - return None; } HotShotEvent::ViewChange(view) => { let view = *view; - debug!("view change in transactions to view {:?}", view); - if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { - return None; - } + + tracing::debug!("view change in transactions to view {:?}", view); + ensure!( + *view > *self.cur_view || *self.cur_view == 0, + debug!( + "Received a view change to an older view: tried to change view to {:?} though we are at view {:?}", view, self.cur_view + ) + ); let mut make_block = false; if *view - *self.cur_view > 1 { - info!("View changed by more than 1 going to view {:?}", view); - make_block = self.membership.leader(view, self.cur_epoch) == self.public_key; + tracing::info!("View changed by more than 1 going to view {:?}", view); + make_block = self.membership.leader(view, self.cur_epoch)? == self.public_key; } self.cur_view = view; let next_view = self.cur_view + 1; let next_leader = - self.membership.leader(next_view, self.cur_epoch) == self.public_key; - if !make_block && !next_leader { - debug!("Not next leader for view {:?}", self.cur_view); - return None; - } + self.membership.leader(next_view, self.cur_epoch)? == self.public_key; + + ensure!( + make_block || next_leader, + debug!( + "Not making the block because we are not leader for view {:?}", + self.cur_view + ) + ); if make_block { self.handle_view_change(&event_stream, self.cur_view).await; @@ -471,12 +482,9 @@ impl, V: Versions> TransactionTask self.handle_view_change(&event_stream, next_view).await; } } - HotShotEvent::Shutdown => { - return Some(HotShotTaskCompleted); - } _ => {} } - None + Ok(()) } /// Get VID commitment for the last successful view before `block_view`. @@ -514,7 +522,9 @@ impl, V: Versions> TransactionTask let view_data = consensus .validated_state_map() .get(&target_view) - .context("Missing record for view {?target_view} in validated state")?; + .context(info!( + "Missing record for view {?target_view} in validated state" + ))?; match view_data.view_inner { ViewInner::Da { payload_commitment } => { @@ -525,13 +535,13 @@ impl, V: Versions> TransactionTask .. } => { let leaf = consensus.saved_leaves().get(&leaf_commitment).context - ("Missing leaf with commitment {leaf_commitment} for view {target_view} in saved_leaves")?; + (info!("Missing leaf with commitment {leaf_commitment} for view {target_view} in saved_leaves"))?; return Ok((target_view, leaf.payload_commitment())); } ViewInner::Failed => { // For failed views, backtrack target_view = - TYPES::View::new(target_view.checked_sub(1).context("Reached genesis")?); + TYPES::View::new(target_view.checked_sub(1).context(warn!("Reached genesis. Something is wrong -- have we not decided any blocks since genesis?"))?); continue; } } @@ -560,7 +570,7 @@ impl, V: Versions> TransactionTask ) { Ok(sig) => sig, Err(err) => { - error!(%err, "Failed to sign block hash"); + tracing::error!(%err, "Failed to sign block hash"); return None; } }; @@ -588,7 +598,7 @@ impl, V: Versions> TransactionTask // We timed out while getting available blocks Err(err) => { - info!(%err, "Timeout while getting available blocks"); + tracing::info!(%err, "Timeout while getting available blocks"); return None; } } @@ -674,7 +684,7 @@ impl, V: Versions> TransactionTask parent_comm: VidCommitment, view_number: TYPES::View, parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> anyhow::Result> { + ) -> Result> { let mut available_blocks = self .get_available_blocks(parent_comm, view_number, parent_comm_sig) .await; @@ -792,9 +802,7 @@ impl, V: Versions> TaskState sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; - - Ok(()) + self.handle(event, sender.clone()).await } async fn cancel_subtasks(&mut self) {} diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 4992b06887..fb4f4de7f4 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -6,7 +6,6 @@ use std::{marker::PhantomData, sync::Arc, time::SystemTime}; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; use committable::Committable; @@ -28,11 +27,12 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -use tracing::{debug, error, info, instrument, warn}; +use tracing::instrument; +use utils::anytrace::*; use vbs::version::StaticVersionType; use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, + events::HotShotEvent, helpers::broadcast_event, vote_collection::{handle_vote, VoteCollectorsMap}, }; @@ -109,45 +109,47 @@ impl, V: Versions> UpgradeTaskStat &mut self, event: Arc>, tx: Sender>>, - ) -> Option { + ) -> Result<()> { match event.as_ref() { HotShotEvent::UpgradeProposalRecv(proposal, sender) => { - info!("Received upgrade proposal: {:?}", proposal); + tracing::info!("Received upgrade proposal: {:?}", proposal); let view = *proposal.data.view_number(); // Skip voting if the version has already been upgraded. - if self.upgraded().await { - info!( - "Already upgraded to {:?}, skip voting.", - V::Upgrade::VERSION - ); - return None; - } + ensure!( + !self.upgraded().await, + info!("Already upgraded to {:?}; not voting.", V::Upgrade::VERSION) + ); let time = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) - .ok()? + .wrap() + .context(error!( + "Failed to calculate duration. This should never happen." + ))? .as_secs(); - if time < self.start_voting_time || time >= self.stop_voting_time { - return None; - } + ensure!( + time >= self.start_voting_time && time < self.stop_voting_time, + "Refusing to vote because we are no longer in the configured vote time window." + ); - if view < self.start_voting_view || view >= self.stop_voting_view { - return None; - } + ensure!( + view >= self.start_voting_view && view < self.stop_voting_view, + "Refusing to vote because we are no longer in the configured vote view window." + ); // If the proposal does not match our upgrade target, we immediately exit. - if proposal.data.upgrade_proposal.new_version_hash != V::UPGRADE_HASH - || proposal.data.upgrade_proposal.old_version != V::Base::VERSION - || proposal.data.upgrade_proposal.new_version != V::Upgrade::VERSION - { - return None; - } + ensure!( + proposal.data.upgrade_proposal.new_version_hash == V::UPGRADE_HASH + && proposal.data.upgrade_proposal.old_version == V::Base::VERSION + && proposal.data.upgrade_proposal.new_version == V::Upgrade::VERSION, + "Proposal does not match our upgrade target" + ); // If we have an upgrade target, we validate that the proposal is relevant for the current view. - info!( + tracing::info!( "Upgrade proposal received for view: {:?}", proposal.data.view_number() ); @@ -169,20 +171,23 @@ impl, V: Versions> UpgradeTaskStat // the `UpgradeProposalRecv` event. Otherwise, the view number subtraction below will // cause an overflow error. // TODO Come back to this - we probably don't need this, but we should also never receive a UpgradeCertificate where this fails, investigate block ready so it doesn't make one for the genesis block - if self.cur_view != TYPES::View::genesis() && view < self.cur_view - 1 { - warn!("Discarding old upgrade proposal; the proposal is for view {:?}, but the current view is {:?}.", + ensure!( + self.cur_view != TYPES::View::genesis() && *view >= self.cur_view.saturating_sub(1), + warn!( + "Discarding old upgrade proposal; the proposal is for view {:?}, but the current view is {:?}.", view, self.cur_view - ); - return None; - } + ) + ); // We then validate that the proposal was issued by the leader for the view. - let view_leader_key = self.quorum_membership.leader(view, self.cur_epoch); - if &view_leader_key != sender { - error!("Upgrade proposal doesn't have expected leader key for view {} \n Upgrade proposal is: {:?}", *view, proposal.data.clone()); - return None; - } + let view_leader_key = self.quorum_membership.leader(view, self.cur_epoch)?; + ensure!( + view_leader_key == *sender, + info!( + "Upgrade proposal doesn't have expected leader key for view {} \n Upgrade proposal is: {:?}", *view, proposal.data.clone() + ) + ); // At this point, we've checked that: // * the proposal was expected, @@ -201,36 +206,33 @@ impl, V: Versions> UpgradeTaskStat .await; // If everything is fine up to here, we generate and send a vote on the proposal. - let Ok(vote) = UpgradeVote::create_signed_vote( + let vote = UpgradeVote::create_signed_vote( proposal.data.upgrade_proposal.clone(), view, &self.public_key, &self.private_key, &self.upgrade_lock, ) - .await - else { - error!("Failed to sign UpgradeVote!"); - return None; - }; - debug!("Sending upgrade vote {:?}", vote.view_number()); + .await?; + + tracing::debug!("Sending upgrade vote {:?}", vote.view_number()); broadcast_event(Arc::new(HotShotEvent::UpgradeVoteSend(vote)), &tx).await; } HotShotEvent::UpgradeVoteRecv(ref vote) => { - debug!("Upgrade vote recv, Main Task {:?}", vote.view_number()); + tracing::debug!("Upgrade vote recv, Main Task {:?}", vote.view_number()); // Check if we are the leader. { let view = vote.view_number(); - if self.quorum_membership.leader(view, self.cur_epoch) != self.public_key { - error!( + ensure!( + self.quorum_membership.leader(view, self.cur_epoch)? == self.public_key, + debug!( "We are not the leader for view {} are we leader for next view? {}", *view, - self.quorum_membership.leader(view + 1, self.cur_epoch) + self.quorum_membership.leader(view + 1, self.cur_epoch)? == self.public_key - ); - return None; - } + ) + ); } handle_vote( @@ -244,19 +246,20 @@ impl, V: Versions> UpgradeTaskStat &tx, &self.upgrade_lock, ) - .await; + .await?; } HotShotEvent::ViewChange(new_view) => { - if self.cur_view >= *new_view { - return None; - } + ensure!(self.cur_view < *new_view || *self.cur_view == 0); self.cur_view = *new_view; let view: u64 = *self.cur_view; let time = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) - .ok()? + .wrap() + .context(error!( + "Failed to calculate duration. This should never happen." + ))? .as_secs(); // We try to form a certificate 5 views before we're leader. @@ -268,7 +271,7 @@ impl, V: Versions> UpgradeTaskStat && self.quorum_membership.leader( TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), self.cur_epoch, - ) == self.public_key + )? == self.public_key { let upgrade_proposal_data = UpgradeProposalData { old_version: V::Base::VERSION, @@ -290,7 +293,7 @@ impl, V: Versions> UpgradeTaskStat ) .expect("Failed to sign upgrade proposal commitment!"); - warn!("Sending upgrade proposal:\n\n {:?}", upgrade_proposal); + tracing::warn!("Sending upgrade proposal:\n\n {:?}", upgrade_proposal); let message = Proposal { data: upgrade_proposal, @@ -307,16 +310,10 @@ impl, V: Versions> UpgradeTaskStat ) .await; } - - return None; - } - HotShotEvent::Shutdown => { - error!("Shutting down because of shutdown signal!"); - return Some(HotShotTaskCompleted); } _ => {} } - None + Ok(()) } } @@ -333,7 +330,7 @@ impl, V: Versions> TaskState sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; + self.handle(event, sender.clone()).await?; Ok(()) } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 106203bd07..30d31dff7d 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -6,7 +6,6 @@ use std::{marker::PhantomData, sync::Arc}; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; use hotshot_task::task::TaskState; @@ -21,6 +20,7 @@ use hotshot_types::{ }, }; use tracing::{debug, error, info, instrument}; +use utils::anytrace::Result; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index fb00054f88..46e4b11c12 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -4,7 +4,6 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -#![allow(clippy::module_name_repetitions)] use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, @@ -12,7 +11,6 @@ use std::{ time::Duration, }; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; @@ -38,7 +36,8 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; +use tracing::instrument; +use utils::anytrace::*; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -125,9 +124,7 @@ impl, V: Versions> TaskState sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; - - Ok(()) + self.handle(event, sender.clone()).await } async fn cancel_subtasks(&mut self) {} @@ -199,7 +196,7 @@ impl, V: Versions> ViewSyncTaskSta // This certificate is old, we can throw it away // If next view = cert round, then that means we should already have a task running for it if self.current_view > view { - debug!("Already in a higher view than the view sync message"); + tracing::debug!("Already in a higher view than the view sync message"); return; } @@ -207,7 +204,7 @@ impl, V: Versions> ViewSyncTaskSta if let Some(replica_task) = task_map.get_mut(&view) { // Forward event then return - debug!("Forwarding message"); + tracing::debug!("Forwarding message"); let result = replica_task .handle(Arc::clone(&event), sender.clone()) .await; @@ -258,28 +255,28 @@ impl, V: Versions> ViewSyncTaskSta &mut self, event: Arc>, event_stream: Sender>>, - ) { + ) -> Result<()> { match event.as_ref() { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { - debug!("Received view sync cert for phase {:?}", certificate); + tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { - debug!("Received view sync cert for phase {:?}", certificate); + tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { - debug!("Received view sync cert for phase {:?}", certificate); + tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } HotShotEvent::ViewSyncTimeout(view, _, _) => { - debug!("view sync timeout in main task {:?}", view); + tracing::debug!("view sync timeout in main task {:?}", view); let view = *view; self.send_to_or_create_replica(event, view, &event_stream) .await; @@ -291,27 +288,27 @@ impl, V: Versions> ViewSyncTaskSta let relay = vote.date().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { - debug!("Forwarding message"); - let result = relay_task - .handle_vote_event(Arc::clone(&event), &event_stream) - .await; + tracing::debug!("Forwarding message"); - if result == Some(HotShotTaskCompleted) { - // The protocol has finished + // Handle the vote and check if the accumulator has returned successfully + if relay_task + .handle_vote_event(Arc::clone(&event), &event_stream) + .await? + .is_some() + { map.remove(&vote_view); } - return; + + return Ok(()); } // We do not have a relay task already running, so start one - if self - .membership - .leader(vote_view + relay, self.current_epoch) - != self.public_key - { - debug!("View sync vote sent to wrong leader"); - return; - } + ensure!( + self.membership + .leader(vote_view + relay, self.current_epoch)? + == self.public_key, + "View sync vote sent to wrong leader" + ); let info = AccumulatorInfo { public_key: self.public_key.clone(), @@ -322,10 +319,9 @@ impl, V: Versions> ViewSyncTaskSta }; let vote_collector = create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) - .await; - if let Some(vote_task) = vote_collector { - relay_map.insert(relay, vote_task); - } + .await?; + + relay_map.insert(relay, vote_collector); } HotShotEvent::ViewSyncCommitVoteRecv(ref vote) => { @@ -334,27 +330,27 @@ impl, V: Versions> ViewSyncTaskSta let relay = vote.date().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { - debug!("Forwarding message"); - let result = relay_task - .handle_vote_event(Arc::clone(&event), &event_stream) - .await; + tracing::debug!("Forwarding message"); - if result == Some(HotShotTaskCompleted) { - // The protocol has finished + // Handle the vote and check if the accumulator has returned successfully + if relay_task + .handle_vote_event(Arc::clone(&event), &event_stream) + .await? + .is_some() + { map.remove(&vote_view); } - return; + + return Ok(()); } // We do not have a relay task already running, so start one - if self - .membership - .leader(vote_view + relay, self.current_epoch) - != self.public_key - { - debug!("View sync vote sent to wrong leader"); - return; - } + ensure!( + self.membership + .leader(vote_view + relay, self.current_epoch)? + == self.public_key, + debug!("View sync vote sent to wrong leader") + ); let info = AccumulatorInfo { public_key: self.public_key.clone(), @@ -363,12 +359,11 @@ impl, V: Versions> ViewSyncTaskSta epoch: self.current_epoch, id: self.id, }; + let vote_collector = create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) - .await; - if let Some(vote_task) = vote_collector { - relay_map.insert(relay, vote_task); - } + .await?; + relay_map.insert(relay, vote_collector); } HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { @@ -377,27 +372,27 @@ impl, V: Versions> ViewSyncTaskSta let relay = vote.date().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { - debug!("Forwarding message"); - let result = relay_task - .handle_vote_event(Arc::clone(&event), &event_stream) - .await; + tracing::debug!("Forwarding message"); - if result == Some(HotShotTaskCompleted) { - // The protocol has finished + // Handle the vote and check if the accumulator has returned successfully + if relay_task + .handle_vote_event(Arc::clone(&event), &event_stream) + .await? + .is_some() + { map.remove(&vote_view); } - return; + + return Ok(()); } // We do not have a relay task already running, so start one - if self - .membership - .leader(vote_view + relay, self.current_epoch) - != self.public_key - { - debug!("View sync vote sent to wrong leader"); - return; - } + ensure!( + self.membership + .leader(vote_view + relay, self.current_epoch)? + == self.public_key, + debug!("View sync vote sent to wrong leader") + ); let info = AccumulatorInfo { public_key: self.public_key.clone(), @@ -409,7 +404,7 @@ impl, V: Versions> ViewSyncTaskSta let vote_collector = create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) .await; - if let Some(vote_task) = vote_collector { + if let Ok(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } } @@ -417,9 +412,10 @@ impl, V: Versions> ViewSyncTaskSta &HotShotEvent::ViewChange(new_view) => { let new_view = TYPES::View::new(*new_view); if self.current_view < new_view { - debug!( + tracing::debug!( "Change from view {} to view {} in view sync task", - *self.current_view, *new_view + *self.current_view, + *new_view ); self.current_view = new_view; @@ -454,13 +450,14 @@ impl, V: Versions> ViewSyncTaskSta } &HotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it - if view_number <= TYPES::View::new(*self.current_view) { - return; - } + ensure!( + view_number > self.current_view, + debug!("Discarding old timeout vote.") + ); self.num_timeouts_tracked += 1; - let leader = self.membership.leader(view_number, self.current_epoch); - warn!( + let leader = self.membership.leader(view_number, self.current_epoch)?; + tracing::warn!( %leader, leader_mnemonic = cdn_proto::util::mnemonic(&leader), view_number = *view_number, @@ -469,11 +466,11 @@ impl, V: Versions> ViewSyncTaskSta ); if self.num_timeouts_tracked >= 3 { - error!("Too many consecutive timeouts! This shouldn't happen"); + tracing::error!("Too many consecutive timeouts! This shouldn't happen"); } if self.num_timeouts_tracked >= 2 { - error!("Starting view sync protocol for view {}", *view_number + 1); + tracing::error!("Starting view sync protocol for view {}", *view_number + 1); self.send_to_or_create_replica( Arc::new(HotShotEvent::ViewSyncTrigger(view_number + 1)), @@ -496,6 +493,7 @@ impl, V: Versions> ViewSyncTaskSta _ => {} } + Ok(()) } } @@ -515,7 +513,7 @@ impl, V: Versions> // Ignore certificate if it is for an older round if certificate.view_number() < self.next_view { - warn!("We're already in a higher round"); + tracing::warn!("We're already in a higher round"); return None; } @@ -529,7 +527,7 @@ impl, V: Versions> ) .await { - error!("Not valid view sync cert! {:?}", certificate.data()); + tracing::error!("Not valid view sync cert! {:?}", certificate.data()); return None; } @@ -556,7 +554,7 @@ impl, V: Versions> ) .await else { - error!("Failed to sign ViewSyncCommitData!"); + tracing::error!("Failed to sign ViewSyncCommitData!"); return None; }; let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); @@ -581,7 +579,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - warn!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); + tracing::warn!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( @@ -601,7 +599,7 @@ impl, V: Versions> // Ignore certificate if it is for an older round if certificate.view_number() < self.next_view { - warn!("We're already in a higher round"); + tracing::warn!("We're already in a higher round"); return None; } @@ -615,7 +613,7 @@ impl, V: Versions> ) .await { - error!("Not valid view sync cert! {:?}", certificate.data()); + tracing::error!("Not valid view sync cert! {:?}", certificate.data()); return None; } @@ -642,7 +640,7 @@ impl, V: Versions> ) .await else { - error!("Failed to sign view sync finalized vote!"); + tracing::error!("Failed to sign view sync finalized vote!"); return None; }; let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); @@ -655,7 +653,7 @@ impl, V: Versions> .await; } - info!( + tracing::info!( "View sync protocol has received view sync evidence to update the view to {}", *self.next_view ); @@ -677,7 +675,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - warn!( + tracing::warn!( "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", relay ); @@ -697,7 +695,7 @@ impl, V: Versions> HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { // Ignore certificate if it is for an older round if certificate.view_number() < self.next_view { - warn!("We're already in a higher round"); + tracing::warn!("We're already in a higher round"); return None; } @@ -711,7 +709,7 @@ impl, V: Versions> ) .await { - error!("Not valid view sync cert! {:?}", certificate.data()); + tracing::error!("Not valid view sync cert! {:?}", certificate.data()); return None; } @@ -741,7 +739,7 @@ impl, V: Versions> HotShotEvent::ViewSyncTrigger(view_number) => { let view_number = *view_number; if self.next_view != TYPES::View::new(*view_number) { - error!("Unexpected view number to triger view sync"); + tracing::error!("Unexpected view number to triger view sync"); return None; } @@ -757,7 +755,7 @@ impl, V: Versions> ) .await else { - error!("Failed to sign pre commit vote!"); + tracing::error!("Failed to sign pre commit vote!"); return None; }; let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); @@ -777,7 +775,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - warn!("Vote sending timed out in ViewSyncTrigger"); + tracing::warn!("Vote sending timed out in ViewSyncTrigger"); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( TYPES::View::new(*next_view), @@ -815,7 +813,7 @@ impl, V: Versions> ) .await else { - error!("Failed to sign ViewSyncPreCommitData!"); + tracing::error!("Failed to sign ViewSyncPreCommitData!"); return None; }; let message = @@ -843,7 +841,7 @@ impl, V: Versions> let last_cert = last_seen_certificate.clone(); async move { async_sleep(timeout).await; - warn!( + tracing::warn!( "Vote sending timed out in ViewSyncTimeout relay = {}", relay ); diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 533a96b719..4c685ca978 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -30,12 +30,9 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; -use tracing::{debug, error}; +use utils::anytrace::*; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, -}; +use crate::{events::HotShotEvent, helpers::broadcast_event}; /// Alias for a map of Vote Collectors pub type VoteCollectorsMap = @@ -74,8 +71,15 @@ pub trait AggregatableVote< CERT: Certificate, > { - /// return the leader for this votes in the given epoch - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey; + /// return the leader for this votes + /// + /// # Errors + /// if the leader cannot be calculated + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result; /// return the Hotshot event for the completion of this CERT fn make_cert_event(certificate: CERT, key: &TYPES::SignatureKey) -> HotShotEvent; @@ -84,48 +88,54 @@ pub trait AggregatableVote< impl< TYPES: NodeType, VOTE: Vote + AggregatableVote, - CERT: Certificate + Debug, + CERT: Certificate + Clone + Debug, V: Versions, > VoteCollectionTaskState { /// Take one vote and accumulate it. Returns either the cert or the updated state /// after the vote is accumulated + /// + /// # Errors + /// If are unable to accumulate the vote #[allow(clippy::question_mark)] pub async fn accumulate_vote( &mut self, vote: &VOTE, event_stream: &Sender>>, - ) -> Option { - if vote.leader(&self.membership, self.epoch) != self.public_key { - error!("Received vote for a view in which we were not the leader."); - return None; - } - - if vote.view_number() != self.view { + ) -> Result> { + ensure!( + vote.leader(&self.membership, self.epoch)? == self.public_key, + info!("Received vote for a view in which we were not the leader.") + ); + ensure!( + vote.view_number() == self.view, error!( - "Vote view does not match! vote view is {} current view is {}", + "Vote view does not match! vote view is {} current view is {}. This vote should not have been passed to this accumulator.", *vote.view_number(), *self.view - ); - return None; - } + ) + ); + + let accumulator = self.accumulator.as_mut().context(warn!( + "No accumulator to handle vote with. This shouldn't happen." + ))?; - let accumulator = self.accumulator.as_mut()?; match accumulator .accumulate(vote, &self.membership, self.epoch) .await { - Either::Left(()) => None, + Either::Left(()) => Ok(None), Either::Right(cert) => { - debug!("Certificate Formed! {:?}", cert); + tracing::debug!("Certificate Formed! {:?}", cert); broadcast_event( - Arc::new(VOTE::make_cert_event(cert, &self.public_key)), + Arc::new(VOTE::make_cert_event(cert.clone(), &self.public_key)), event_stream, ) .await; self.accumulator = None; - Some(HotShotTaskCompleted) + + Ok(Some(cert)) } } } @@ -140,11 +150,14 @@ where CERT: Certificate + Debug, { /// Handle a vote event + /// + /// # Errors + /// Returns an error if we fail to handle the vote async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Option; + ) -> Result>; /// Event filter to use for this event fn filter(event: Arc>) -> bool; @@ -165,6 +178,10 @@ pub struct AccumulatorInfo { } /// Generic function for spawning a vote task. Returns the event stream id of the spawned task if created +/// +/// # Errors +/// If we faile to create the accumulator +/// /// # Panics /// Calls unwrap but should never panic. pub async fn create_vote_accumulator( @@ -172,7 +189,7 @@ pub async fn create_vote_accumulator( event: Arc>, sender: &Sender>>, upgrade_lock: UpgradeLock, -) -> Option> +) -> Result> where TYPES: NodeType, VOTE: Vote @@ -204,17 +221,15 @@ where id: info.id, }; - let result = state.handle_vote_event(Arc::clone(&event), sender).await; + state.handle_vote_event(Arc::clone(&event), sender).await?; - if result == Some(HotShotTaskCompleted) { - // The protocol has finished - return None; - } - - Some(state) + Ok(state) } /// A helper function that handles a vote regardless whether it's the first vote in the view or not. +/// +/// # Errors +/// If we fail to handle the vote #[allow(clippy::too_many_arguments)] pub async fn handle_vote< TYPES: NodeType, @@ -231,12 +246,13 @@ pub async fn handle_vote< event: &Arc>, event_stream: &Sender>>, upgrade_lock: &UpgradeLock, -) where +) -> Result<()> +where VoteCollectionTaskState: HandleVoteEvent, { match collectors.entry(vote.view_number()) { Entry::Vacant(entry) => { - debug!("Starting vote handle for view {:?}", vote.view_number()); + tracing::debug!("Starting vote handle for view {:?}", vote.view_number()); let info = AccumulatorInfo { public_key, membership: Arc::clone(membership), @@ -244,29 +260,31 @@ pub async fn handle_vote< epoch, id, }; - if let Some(collector) = create_vote_accumulator( + let collector = create_vote_accumulator( &info, Arc::clone(event), event_stream, upgrade_lock.clone(), ) - .await - { - entry.insert(collector); - }; + .await?; + + entry.insert(collector); + + Ok(()) } Entry::Occupied(mut entry) => { - let result = entry + // handle the vote, and garbage collect if the vote collector is finished + if entry .get_mut() .handle_vote_event(Arc::clone(event), event_stream) - .await; - - if result == Some(HotShotTaskCompleted) { - // garbage collect vote collectors for old views (including the one just finished) + .await? + .is_some() + { entry.remove(); *collectors = collectors.split_off(&vote.view_number()); - // The protocol has finished } + + Ok(()) } } } @@ -303,7 +321,11 @@ type ViewSyncFinalizeVoteState = VoteCollectionTaskState< impl AggregatableVote, QuorumCertificate> for QuorumVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.view_number() + 1, epoch) } fn make_cert_event( @@ -317,7 +339,11 @@ impl AggregatableVote, QuorumCertifica impl AggregatableVote, UpgradeCertificate> for UpgradeVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.view_number(), epoch) } fn make_cert_event( @@ -331,7 +357,11 @@ impl AggregatableVote, UpgradeCertifi impl AggregatableVote, DaCertificate> for DaVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.view_number(), epoch) } fn make_cert_event( @@ -345,7 +375,11 @@ impl AggregatableVote, DaCertificate AggregatableVote, TimeoutCertificate> for TimeoutVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.view_number() + 1, epoch) } fn make_cert_event( @@ -360,7 +394,11 @@ impl AggregatableVote, ViewSyncCommitCertificate2> for ViewSyncCommitVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( @@ -375,7 +413,11 @@ impl AggregatableVote, ViewSyncPreCommitCertificate2> for ViewSyncPreCommitVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( @@ -390,7 +432,11 @@ impl AggregatableVote, ViewSyncFinalizeCertificate2> for ViewSyncFinalizeVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( @@ -411,10 +457,10 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(vote, sender).await, - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -432,10 +478,10 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::UpgradeVoteRecv(vote) => self.accumulate_vote(vote, sender).await, - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -451,10 +497,10 @@ impl HandleVoteEvent, DaCerti &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::DaVoteRecv(vote) => self.accumulate_vote(vote, sender).await, - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -471,10 +517,10 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(vote, sender).await, - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -491,12 +537,12 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { self.accumulate_vote(vote, sender).await } - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -513,10 +559,10 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(vote, sender).await, - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -533,12 +579,12 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { self.accumulate_vote(vote, sender).await } - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { diff --git a/task/Cargo.toml b/task/Cargo.toml index 3983158a31..47261bfbd5 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -14,6 +14,7 @@ tracing = { workspace = true } async-compatibility-layer = { workspace = true } anyhow = { workspace = true } async-trait = { workspace = true } +utils = { path = "../utils" } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true, features = [ diff --git a/task/src/task.rs b/task/src/task.rs index c623c5e43a..fc60db3064 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -6,7 +6,6 @@ use std::sync::Arc; -use anyhow::Result; use async_broadcast::{Receiver, RecvError, Sender}; #[cfg(async_executor_impl = "async-std")] use async_std::task::{spawn, JoinHandle}; @@ -17,6 +16,7 @@ use futures::future::join_all; use futures::future::try_join_all; #[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn, JoinHandle}; +use utils::anytrace::Result; /// Trait for events that long-running tasks handle pub trait TaskEvent: PartialEq { diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index dcaedd5dc1..87a688b74b 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -12,9 +12,6 @@ use std::{ task::{Context, Poll}, }; -use crate::helpers::{ - build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, -}; use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_example_types::{ @@ -22,11 +19,10 @@ use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{ - DaProposal, Leaf, QuorumProposal, VidDisperse, VidDisperseShare, ViewChangeEvidence, - ViewNumber, + DaProposal, EpochNumber, Leaf, QuorumProposal, VidDisperse, VidDisperseShare, + ViewChangeEvidence, ViewNumber, }, message::{Proposal, UpgradeLock}, simple_certificate::{ @@ -46,6 +42,10 @@ use hotshot_types::{ use rand::{thread_rng, Rng}; use sha2::{Digest, Sha256}; +use crate::helpers::{ + build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, +}; + #[derive(Clone)] pub struct TestView { pub da_proposal: Proposal>, diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 1f78de5467..ad417a0431 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -21,9 +21,8 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::{null_block, PackedBundle, ViewNumber}, + data::{null_block, EpochNumber, PackedBundle, ViewNumber}, simple_vote::DaData, traits::{ block_contents::precompute_vid_commitment, diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 6a583c058f..08e4d7f88e 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -17,9 +17,8 @@ use hotshot_testing::{ helpers::build_system_handle, test_builder::TestDescription, test_task::add_network_message_test_task, view_generator::TestViewGenerator, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::ViewNumber, + data::{EpochNumber, ViewNumber}, message::UpgradeLock, traits::{ election::Membership, diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 5be8b11ba1..ae5fa54fdc 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -24,9 +24,8 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::{null_block, Leaf, ViewChangeEvidence, ViewNumber}, + data::{null_block, EpochNumber, Leaf, ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, ViewSyncFinalizeData}, traits::{ election::Membership, diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index b8c6a194b9..04cd0d528e 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -7,9 +7,8 @@ use hotshot_task_impls::{ events::HotShotEvent, harness::run_harness, transactions::TransactionTaskState, }; use hotshot_testing::helpers::build_system_handle; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::{null_block, PackedBundle, ViewNumber}, + data::{null_block, EpochNumber, PackedBundle, ViewNumber}, traits::{ block_contents::precompute_vid_commitment, election::Membership, diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index f0493fb5e6..b82804f4f8 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -29,9 +29,8 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::{null_block, Leaf, ViewNumber}, + data::{null_block, EpochNumber, Leaf, ViewNumber}, simple_vote::UpgradeProposalData, traits::{ election::Membership, diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 714a12a3b5..c2ca9aec09 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -20,9 +20,8 @@ use hotshot_testing::{ script::{Expectations, InputOrder, TaskScript}, serial, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::{null_block, DaProposal, PackedBundle, VidDisperse, ViewNumber}, + data::{null_block, DaProposal, EpochNumber, PackedBundle, VidDisperse, ViewNumber}, traits::{ consensus_api::ConsensusApi, election::Membership, diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 1a8519ce2c..a65d360428 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -11,10 +11,9 @@ use hotshot_testing::{ predicates::{event::*, Predicate, PredicateResult}, view_generator::TestViewGenerator, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ consensus::OuterConsensus, - data::ViewNumber, + data::{EpochNumber, ViewNumber}, traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, vote::HasViewNumber, }; diff --git a/testing/tests/tests_3/byzantine_tests.rs b/testing/tests/tests_3/byzantine_tests.rs index e5ac199aaf..00ea1ff4d7 100644 --- a/testing/tests/tests_3/byzantine_tests.rs +++ b/testing/tests/tests_3/byzantine_tests.rs @@ -176,7 +176,7 @@ cross_tests!( view_increment: nodes_count as u64, modifier: Arc::new(move |_pk, message_kind, transmit_type: &mut TransmitType, membership: &::Membership| { if let MessageKind::Consensus(SequencingMessage::General(GeneralConsensusMessage::Vote(vote))) = message_kind { - *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64, EpochNumber::new(0))); + *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64, EpochNumber::new(0)).unwrap()); } else { {} } diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 3050cd7d32..3e56630313 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -23,9 +23,8 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::ViewNumber, + data::{EpochNumber, ViewNumber}, message::{DataMessage, Message, MessageKind, UpgradeLock}, signature_key::{BLSPubKey, BuilderKey}, traits::{ diff --git a/types/Cargo.toml b/types/Cargo.toml index ca2541d709..e9ad6368b5 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -50,6 +50,7 @@ vbs = { workspace = true } displaydoc = { version = "0.2.5", default-features = false } dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } url = { workspace = true } +utils = { path = "../utils" } vec1 = { workspace = true } libp2p = { workspace = true } serde_json = { workspace = true } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index b84ab99b19..3ab3444b94 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -13,10 +13,10 @@ use std::{ sync::Arc, }; -use anyhow::{bail, ensure, Result}; use async_lock::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard}; use committable::Commitment; -use tracing::{debug, error, instrument, trace}; +use tracing::instrument; +use utils::anytrace::*; use vec1::Vec1; pub use crate::utils::{View, ViewInner}; @@ -68,31 +68,31 @@ impl OuterConsensus { /// Locks inner consensus for reading and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub async fn read(&self) -> ConsensusReadLockGuard<'_, TYPES> { - trace!("Trying to acquire read lock on consensus"); + tracing::trace!("Trying to acquire read lock on consensus"); let ret = self.inner_consensus.read().await; - trace!("Acquired read lock on consensus"); + tracing::trace!("Acquired read lock on consensus"); ConsensusReadLockGuard::new(ret) } /// Locks inner consensus for writing and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub async fn write(&self) -> ConsensusWriteLockGuard<'_, TYPES> { - trace!("Trying to acquire write lock on consensus"); + tracing::trace!("Trying to acquire write lock on consensus"); let ret = self.inner_consensus.write().await; - trace!("Acquired write lock on consensus"); + tracing::trace!("Acquired write lock on consensus"); ConsensusWriteLockGuard::new(ret) } /// Tries to acquire write lock on inner consensus and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub fn try_write(&self) -> Option> { - trace!("Trying to acquire write lock on consensus"); + tracing::trace!("Trying to acquire write lock on consensus"); let ret = self.inner_consensus.try_write(); if let Some(guard) = ret { - trace!("Acquired write lock on consensus"); + tracing::trace!("Acquired write lock on consensus"); Some(ConsensusWriteLockGuard::new(guard)) } else { - trace!("Failed to acquire write lock"); + tracing::trace!("Failed to acquire write lock"); None } } @@ -100,22 +100,22 @@ impl OuterConsensus { /// Acquires upgradable read lock on inner consensus and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub async fn upgradable_read(&self) -> ConsensusUpgradableReadLockGuard<'_, TYPES> { - trace!("Trying to acquire upgradable read lock on consensus"); + tracing::trace!("Trying to acquire upgradable read lock on consensus"); let ret = self.inner_consensus.upgradable_read().await; - trace!("Acquired upgradable read lock on consensus"); + tracing::trace!("Acquired upgradable read lock on consensus"); ConsensusUpgradableReadLockGuard::new(ret) } /// Tries to acquire read lock on inner consensus and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub fn try_read(&self) -> Option> { - trace!("Trying to acquire read lock on consensus"); + tracing::trace!("Trying to acquire read lock on consensus"); let ret = self.inner_consensus.try_read(); if let Some(guard) = ret { - trace!("Acquired read lock on consensus"); + tracing::trace!("Acquired read lock on consensus"); Some(ConsensusReadLockGuard::new(guard)) } else { - trace!("Failed to acquire read lock"); + tracing::trace!("Failed to acquire read lock"); None } } @@ -145,7 +145,7 @@ impl<'a, TYPES: NodeType> Deref for ConsensusReadLockGuard<'a, TYPES> { impl<'a, TYPES: NodeType> Drop for ConsensusReadLockGuard<'a, TYPES> { #[instrument(skip_all, target = "ConsensusReadLockGuard")] fn drop(&mut self) { - trace!("Read lock on consensus dropped"); + tracing::trace!("Read lock on consensus dropped"); } } @@ -179,7 +179,7 @@ impl<'a, TYPES: NodeType> DerefMut for ConsensusWriteLockGuard<'a, TYPES> { impl<'a, TYPES: NodeType> Drop for ConsensusWriteLockGuard<'a, TYPES> { #[instrument(skip_all, target = "ConsensusWriteLockGuard")] fn drop(&mut self) { - debug!("Write lock on consensus dropped"); + tracing::debug!("Write lock on consensus dropped"); } } @@ -206,9 +206,9 @@ impl<'a, TYPES: NodeType> ConsensusUpgradableReadLockGuard<'a, TYPES> { pub async fn upgrade(mut guard: Self) -> ConsensusWriteLockGuard<'a, TYPES> { let inner_guard = unsafe { ManuallyDrop::take(&mut guard.lock_guard) }; guard.taken = true; - debug!("Trying to upgrade upgradable read lock on consensus"); + tracing::debug!("Trying to upgrade upgradable read lock on consensus"); let ret = RwLockUpgradableReadGuard::upgrade(inner_guard).await; - debug!("Upgraded upgradable read lock on consensus"); + tracing::debug!("Upgraded upgradable read lock on consensus"); ConsensusWriteLockGuard::new(ret) } } @@ -226,7 +226,7 @@ impl<'a, TYPES: NodeType> Drop for ConsensusUpgradableReadLockGuard<'a, TYPES> { fn drop(&mut self) { if !self.taken { unsafe { ManuallyDrop::drop(&mut self.lock_guard) } - debug!("Upgradable read lock on consensus dropped"); + tracing::debug!("Upgradable read lock on consensus dropped"); } } } @@ -597,19 +597,17 @@ impl Consensus { .. } = existing_view.view_inner { - match new_view.view_inner { - ViewInner::Leaf { - delta: ref new_delta, - .. - } => { - ensure!( - new_delta.is_some() || existing_delta.is_none(), - "Skipping the state update to not override a `Leaf` view with `Some` state delta." - ); - } - _ => { - bail!("Skipping the state update to not override a `Leaf` view with a non-`Leaf` view."); - } + if let ViewInner::Leaf { + delta: ref new_delta, + .. + } = new_view.view_inner + { + ensure!( + new_delta.is_some() || existing_delta.is_none(), + "Skipping the state update to not override a `Leaf` view with `Some` state delta." + ); + } else { + bail!("Skipping the state update to not override a `Leaf` view with a non-`Leaf` view."); } } } @@ -649,10 +647,10 @@ impl Consensus { /// Can return an error when the provided high_qc is not newer than the existing entry. pub fn update_high_qc(&mut self, high_qc: QuorumCertificate) -> Result<()> { ensure!( - high_qc.view_number > self.high_qc.view_number, + high_qc.view_number > self.high_qc.view_number || high_qc == self.high_qc, "High QC with an equal or higher view exists." ); - debug!("Updating high QC"); + tracing::debug!("Updating high QC"); self.high_qc = high_qc; Ok(()) @@ -684,7 +682,7 @@ impl Consensus { terminator: Terminator, ok_when_finished: bool, mut f: F, - ) -> Result<(), HotShotError> + ) -> std::result::Result<(), HotShotError> where F: FnMut( &Leaf, @@ -748,7 +746,7 @@ impl Consensus { .next() .expect("INCONSISTENT STATE: anchor leaf not in state map!"); if *anchor_entry.0 != old_anchor_view { - error!( + tracing::error!( "Something about GC has failed. Older leaf exists than the previous anchor leaf." ); } diff --git a/types/src/data.rs b/types/src/data.rs index 3518f2e8bd..3ea8566184 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -9,8 +9,14 @@ //! This module provides types for representing consensus internal state, such as leaves, //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. -use anyhow::{ensure, Result}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use std::{ + collections::BTreeMap, + fmt::{Debug, Display}, + hash::Hash, + marker::PhantomData, + sync::Arc, +}; + use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; @@ -20,17 +26,11 @@ use derivative::Derivative; use jf_vid::{precomputable::Precomputable, VidDisperse as JfVidDisperse, VidScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; -use std::{ - collections::BTreeMap, - fmt::{Debug, Display}, - hash::Hash, - marker::PhantomData, - sync::Arc, -}; use thiserror::Error; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; use tracing::error; +use utils::anytrace::*; use vec1::Vec1; use crate::{ @@ -112,20 +112,7 @@ macro_rules! impl_u64_wrapper { } /// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. -#[derive( - Copy, - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - CanonicalSerialize, - CanonicalDeserialize, -)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct ViewNumber(u64); impl Committable for ViewNumber { @@ -138,20 +125,7 @@ impl Committable for ViewNumber { impl_u64_wrapper!(ViewNumber); /// Type-safe wrapper around `u64` so we know the thing we're talking about is a epoch number. -#[derive( - Copy, - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - CanonicalSerialize, - CanonicalDeserialize, -)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct EpochNumber(u64); impl Committable for EpochNumber { @@ -654,7 +628,7 @@ impl Leaf { &mut self, block_payload: TYPES::BlockPayload, num_storage_nodes: usize, - ) -> Result<(), BlockError> { + ) -> std::result::Result<(), BlockError> { let encoded_txns = block_payload.encode(); let commitment = vid_commitment(&encoded_txns, num_storage_nodes); if commitment != self.block_header.payload_commitment() { diff --git a/types/src/message.rs b/types/src/message.rs index 42ae087b9a..217b5d7578 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -15,11 +15,11 @@ use std::{ sync::Arc, }; -use anyhow::{bail, ensure, Context, Result}; use async_lock::RwLock; use cdn_proto::util::mnemonic; use derivative::Derivative; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use utils::anytrace::*; use vbs::{ version::{StaticVersionType, Version}, BinarySerializer, Serializer, @@ -377,7 +377,7 @@ where upgrade_lock: &UpgradeLock, ) -> Result<()> { let view_number = self.data.view_number(); - let view_leader_key = quorum_membership.leader(view_number, epoch); + let view_leader_key = quorum_membership.leader(view_number, epoch)?; let proposed_leaf = Leaf::from_quorum_proposal(&self.data); ensure!( @@ -486,7 +486,9 @@ impl UpgradeLock { } }; - serialized_message.context("Failed to serialize message!") + serialized_message + .wrap() + .context(info!("Failed to serialize message!")) } /// Deserialize a message with a version number, using `message.view_number()` to determine the message's version. This function will fail on improperly versioned messages. @@ -499,7 +501,8 @@ impl UpgradeLock { message: &[u8], ) -> Result { let actual_version = Version::deserialize(message) - .context("Failed to read message version!")? + .wrap() + .context(info!("Failed to read message version!"))? .0; let deserialized_message: M = match actual_version { @@ -509,7 +512,8 @@ impl UpgradeLock { bail!("Cannot deserialize message with stated version {}", v); } } - .context("Failed to deserialize message!")?; + .wrap() + .context(info!("Failed to deserialize message!"))?; let view = deserialized_message.view_number(); diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index bbdc88eb05..f7f58fd4e1 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -13,11 +13,11 @@ use std::{ sync::Arc, }; -use anyhow::{ensure, Result}; use async_lock::RwLock; use committable::{Commitment, Committable}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; +use utils::anytrace::*; use crate::{ data::serialize_signature2, diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 07c3139eef..f5111084b8 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -8,9 +8,9 @@ use std::{fmt::Debug, hash::Hash, marker::PhantomData}; -use anyhow::Result; use committable::{Commitment, Committable}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use utils::anytrace::*; use vbs::version::Version; use crate::{ @@ -169,7 +169,9 @@ impl SimpleVote { let signature = ( pub_key.clone(), - TYPES::SignatureKey::sign(private_key, commit.as_ref())?, + TYPES::SignatureKey::sign(private_key, commit.as_ref()) + .wrap() + .context(error!("Failed to sign vote"))?, ); Ok(Self { diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 43d0ebf12f..d22f226875 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -7,6 +7,8 @@ //! The election trait, used to decide which node is the leader and determine if a vote is valid. use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; +use utils::anytrace::Result; + use super::{network::Topic, node_implementation::NodeType}; use crate::{traits::signature_key::SignatureKey, PeerConfig}; @@ -54,8 +56,11 @@ pub trait Membership: /// See if a node has stake in the committee in a specific epoch fn has_stake(&self, pub_key: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool; - /// The leader of the committee for view `view_number` in an epoch `epoch`. - fn leader(&self, view_number: TYPES::View, epoch: TYPES::Epoch) -> TYPES::SignatureKey; + /// The leader of the committee for view `view_number` in `epoch`. + /// + /// # Errors + /// Returns an error if the leader cannot be calculated + fn leader(&self, view: TYPES::View, epoch: TYPES::Epoch) -> Result; /// Get the network topic for the committee fn committee_topic(&self) -> Topic; diff --git a/types/src/vote.rs b/types/src/vote.rs index 882512eae9..1275e172e0 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -11,12 +11,12 @@ use std::{ marker::PhantomData, }; -use anyhow::Result; use bitvec::{bitvec, vec::BitVec}; use committable::{Commitment, Committable}; use either::Either; use ethereum_types::U256; use tracing::error; +use utils::anytrace::Result; use crate::{ message::UpgradeLock, diff --git a/utils/Cargo.toml b/utils/Cargo.toml new file mode 100644 index 0000000000..fba1648dd7 --- /dev/null +++ b/utils/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "utils" +version = { workspace = true } +edition = { workspace = true } +description = "Utils" + +[dependencies] +tracing = { workspace = true } + +[lints] +workspace = true diff --git a/utils/src/anytrace.rs b/utils/src/anytrace.rs new file mode 100644 index 0000000000..62628207d1 --- /dev/null +++ b/utils/src/anytrace.rs @@ -0,0 +1,191 @@ +use std::{cmp::max, fmt::Display}; + +/// Macros +mod macros; +#[allow(unused_imports)] +pub use macros::*; + +/// Default log level for the crate +pub const DEFAULT_LOG_LEVEL: Level = Level::Info; + +/// Trait for logging errors +pub trait Log { + /// Log an error via `tracing` utilities, printing it. + fn log(&self); +} + +impl Log for Error { + fn log(&self) { + let mut error_level = self.level; + if error_level == Level::Unspecified { + error_level = DEFAULT_LOG_LEVEL; + } + + match error_level { + Level::Trace => { + tracing::trace!("{}", self.message); + } + Level::Debug => { + tracing::debug!("{}", self.message); + } + Level::Info => { + tracing::info!("{}", self.message); + } + Level::Warn => { + tracing::warn!("{}", self.message); + } + Level::Error => { + tracing::error!("{}", self.message); + } + // impossible + Level::Unspecified => {} + } + } +} + +impl Log for Result { + fn log(&self) { + let error = match self { + Ok(_) => { + return; + } + Err(e) => e, + }; + + let mut error_level = error.level; + if error_level == Level::Unspecified { + error_level = DEFAULT_LOG_LEVEL; + } + + match error_level { + Level::Trace => { + tracing::trace!("{}", error.message); + } + Level::Debug => { + tracing::debug!("{}", error.message); + } + Level::Info => { + tracing::info!("{}", error.message); + } + Level::Warn => { + tracing::warn!("{}", error.message); + } + Level::Error => { + tracing::error!("{}", error.message); + } + // impossible + Level::Unspecified => {} + } + } +} + +#[derive(Debug, Clone)] +#[must_use] +/// main error type +pub struct Error { + /// level + pub level: Level, + /// message + pub message: String, +} + +impl std::error::Error for Error {} + +/// Trait for a `std::result::Result` that can be wrapped into a `Result` +pub trait Wrap { + /// Wrap the value into a `Result` + /// + /// # Errors + /// Propagates errors from `self` + fn wrap(self) -> Result; +} + +impl Wrap for std::result::Result +where + E: Display, +{ + fn wrap(self) -> Result { + match self { + Ok(t) => Ok(t), + Err(e) => Err(Error { + level: Level::Unspecified, + message: format!("{e}"), + }), + } + } +} + +impl Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.message) + } +} + +/// Alias for the main `Result` type used by the crate. +pub type Result = std::result::Result; + +#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] +/// Possible log levels +pub enum Level { + /// Unspecified log level + Unspecified, + /// TRACE + Trace, + /// DEBUG + Debug, + /// INFO + Info, + /// WARN + Warn, + /// ERROR + Error, +} + +/// Prepend an error to its cause +fn concatenate(error: &String, cause: &String) -> String { + format!("{error}\ncaused by: {cause}") +} + +/// Trait for converting error types to a `Result`. +pub trait Context { + /// Attach context to the given error. + /// + /// # Errors + /// Propagates errors from `self` + fn context(self, error: Error) -> Result; +} + +impl Context for Result { + fn context(self, error: Error) -> Result { + match self { + Ok(t) => Ok(t), + Err(cause) => Err(Error { + level: max(error.level, cause.level), + message: concatenate(&error.message, &format!("{cause}")), + }), + } + } +} + +impl Context for Option { + fn context(self, error: Error) -> Result { + match self { + Some(t) => Ok(t), + None => Err(error), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn ordering() { + assert!(Level::Trace < Level::Debug); + assert!(Level::Debug < Level::Info); + assert!(Level::Info < Level::Warn); + assert!(Level::Warn < Level::Error); + assert!(max(Level::Trace, Level::Error) == Level::Error); + } +} diff --git a/utils/src/anytrace/macros.rs b/utils/src/anytrace/macros.rs new file mode 100644 index 0000000000..71036d21fb --- /dev/null +++ b/utils/src/anytrace/macros.rs @@ -0,0 +1,293 @@ +#[macro_export] +/// Print the file and line number of the location this macro is invoked +macro_rules! line_info { + () => { + format!("{}:{}", file!(), line!()) + }; +} +pub use line_info; + +#[macro_export] +/// Create an error at the trace level. +/// +/// The argument can be either: +/// - an expression implementing `Display` +/// - a string literal +/// - a format string, similar to the `format!()` macro +macro_rules! trace { + ($error:expr) => { + Error { + level: Level::Trace, + message: format!("{}: {}", line_info!(), $error) + } + }; + ($message:literal) => { + Error { + level: Level::Trace, + message: format!("{}: {}", line_info!(), $message) + } + }; + ($fmt:expr, $($arg:tt)*) => { + Error { + level: Level::Trace, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + } + }; +} +pub use trace; + +#[macro_export] +/// Create an error at the debug level. +/// +/// The argument can be either: +/// - an expression implementing `Display` +/// - a string literal +/// - a format string, similar to the `format!()` macro +macro_rules! debug { + ($error:expr) => { + Error { + level: Level::Debug, + message: format!("{}: {}", line_info!(), $error) + } + }; + ($message:literal) => { + Error { + level: Level::Debug, + message: format!("{}: {}", line_info!(), $message) + } + }; + ($fmt:expr, $($arg:tt)*) => { + Error { + level: Level::Debug, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + } + }; +} +pub use debug; + +#[macro_export] +/// Create an error at the info level. +/// +/// The argument can be either: +/// - an expression implementing `Display` +/// - a string literal +/// - a format string, similar to the `format!()` macro +macro_rules! info { + ($error:expr) => { + Error { + level: Level::Info, + message: format!("{}: {}", line_info!(), $error) + } + }; + ($message:literal) => { + Error { + level: Level::Info, + message: format!("{}: {}", line_info!(), $message) + } + }; + ($fmt:expr, $($arg:tt)*) => { + Error { + level: Level::Info, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + } + }; +} +pub use info; + +#[macro_export] +/// Create an error at the warn level. +/// +/// The argument can be either: +/// - an expression implementing `Display` +/// - a string literal +/// - a format string, similar to the `format!()` macro +macro_rules! warn { + ($error:expr) => { + Error { + level: Level::Warn, + message: format!("{}: {}", line_info!(), $error) + } + }; + ($message:literal) => { + Error { + level: Level::Warn, + message: format!("{}: {}", line_info!(), $message) + } + }; + ($fmt:expr, $($arg:tt)*) => { + Error { + level: Level::Warn, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + } + }; +} +pub use crate::warn; + +#[macro_export] +/// Create an error at the error level. +/// +/// The argument can be either: +/// - an expression implementing `Display` +/// - a string literal +/// - a format string, similar to the `format!()` macro +macro_rules! error { + ($error:expr) => { + Error { + level: Level::Error, + message: format!("{}: {}", line_info!(), $error) + } + }; + ($message:literal) => { + Error { + level: Level::Error, + message: format!("{}: {}", line_info!(), $message) + } + }; + ($fmt:expr, $($arg:tt)*) => { + Error { + level: Level::Error, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + } + }; +} +pub use error; + +#[macro_export] +/// Log a `anytrace::Error` at the corresponding level. +macro_rules! log { + ($result:expr) => { + if let Err(ref error) = $result { + let mut error_level = error.level; + if error_level == Level::Unspecified { + error_level = DEFAULT_LOG_LEVEL; + } + + match error_level { + Level::Trace => { + tracing::trace!("{}", error.message); + } + Level::Debug => { + tracing::debug!("{}", error.message); + } + Level::Info => { + tracing::info!("{}", error.message); + } + Level::Warn => { + tracing::warn!("{}", error.message); + } + Level::Error => { + tracing::error!("{}", error.message); + } + // impossible + Level::Unspecified => {} + } + } + }; +} +pub use log; + +#[macro_export] +/// Check that the given condition holds, otherwise return an error. +/// +/// The argument can be either: +/// - a condition, in which case a generic error is logged at the `Unspecified` level. +/// - a condition and a string literal, in which case the provided literal is logged at the `Unspecified` level. +/// - a condition and a format expression, in which case the message is formatted and logged at the `Unspecified` level. +/// - a condition and an `Error`, in which case the given error is logged unchanged. +macro_rules! ensure { + ($condition:expr) => { + if !$condition { + let result = Err(Error { + level: Level::Unspecified, + message: format!("{}: condition '{}' failed.", line_info!(), stringify!($condition)) + }); + + log!(result); + + return result; + } + }; + ($condition:expr, $message:literal) => { + if !$condition { + let result = Err(Error { + level: Level::Unspecified, + message: format!("{}: {}", line_info!(), $message) + }); + + log!(result); + + return result; + } + }; + ($condition:expr, $fmt:expr, $($arg:tt)*) => { + if !$condition { + let result = Err(Error { + level: Level::Unspecified, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + }); + + log!(result); + + return result; + } + }; + ($condition:expr, $error:expr) => { + if !$condition { + let result = Err($error); + + log!(result); + + return result; + } + }; +} +pub use ensure; + +#[macro_export] +/// Return an error. +/// +/// The argument can be either: +/// - nothing, in which case a generic message is logged at the `Unspecified` level. +/// - a string literal, in which case the provided literal is logged at the `Unspecified` level. +/// - a format expression, in which case the message is formatted and logged at the `Unspecified` level. +/// - an `Error`, in which case the given error is logged unchanged. +macro_rules! bail { + () => { + let result = Err(Error { + level: Level::Unspecified, + message: format!("{}: bailed.", line_info!()), + }); + + log!(result); + + return result; + }; + ($message:literal) => { + let result = Err(Error { + level: Level::Unspecified, + message: format!("{}: {}", line_info!(), $message) + }); + + log!(result); + + return result; + }; + ($fmt:expr, $($arg:tt)*) => { + let result = Err(Error { + level: Level::Unspecified, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + }); + + log!(result); + + return result; + }; + ($error:expr) => { + let result = Err($error); + + log!(result); + + return result; + }; +} +pub use bail; diff --git a/utils/src/lib.rs b/utils/src/lib.rs new file mode 100644 index 0000000000..ba89a2efbb --- /dev/null +++ b/utils/src/lib.rs @@ -0,0 +1,4 @@ +//! General (not HotShot-specific) utilities + +/// Error utilities, intended to function as a replacement to `anyhow`. +pub mod anytrace; From d494d97fdfca4315d891eaaa5a1494ee5d688e23 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 24 Oct 2024 10:50:41 -0400 Subject: [PATCH 1260/1393] minor da fix (#3792) --- task-impls/src/da.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 5ccb50e091..e811f7ac7e 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -24,7 +24,7 @@ use hotshot_types::{ block_contents::vid_commitment, election::Membership, network::ConnectedNetwork, - node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, + node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, }, @@ -107,12 +107,10 @@ impl, V: Versions> DaTaskState= self.cur_view - 1, + self.cur_view <= view + 1, "Throwing away DA proposal that is more than one view older" ); From b21a49dedc480cadff7a87d4001724fa3a7bbbd3 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 24 Oct 2024 16:27:19 -0400 Subject: [PATCH 1261/1393] [Combined Net] Remove optimistic VID (#3797) * remove optimistic vid calculation for combined network * clippy * reduce contention on consensus lock while calculating vid * fmt --- task-impls/src/da.rs | 40 +--------------------------------------- types/src/consensus.rs | 9 +++------ 2 files changed, 4 insertions(+), 45 deletions(-) diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index e811f7ac7e..633bcaff14 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -7,14 +7,13 @@ use std::{marker::PhantomData, sync::Arc}; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::{Consensus, OuterConsensus, View}, + consensus::{OuterConsensus, View}, data::{DaProposal, PackedBundle}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, @@ -23,7 +22,6 @@ use hotshot_types::{ traits::{ block_contents::vid_commitment, election::Membership, - network::ConnectedNetwork, node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, @@ -231,42 +229,6 @@ impl, V: Versions> DaTaskState { tracing::debug!("DA vote recv, Main Task {:?}", vote.view_number()); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 3ab3444b94..c40091f857 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -824,13 +824,10 @@ impl Consensus { private_key: &::PrivateKey, epoch: TYPES::Epoch, ) -> Option<()> { - let consensus = consensus.upgradable_read().await; - let txns = consensus.saved_payloads().get(&view)?; - let vid = - VidDisperse::calculate_vid_disperse(Arc::clone(txns), &membership, view, epoch, None) - .await; + let txns = Arc::clone(consensus.read().await.saved_payloads().get(&view)?); + let vid = VidDisperse::calculate_vid_disperse(txns, &membership, view, epoch, None).await; let shares = VidDisperseShare::from_vid_disperse(vid); - let mut consensus = ConsensusUpgradableReadLockGuard::upgrade(consensus).await; + let mut consensus = consensus.write().await; for share in shares { if let Some(prop) = share.to_proposal(private_key) { consensus.update_vid_shares(view, prop); From 16f19a0ace278fd107a01e1a64a77ad70d9176e1 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 24 Oct 2024 16:52:28 -0400 Subject: [PATCH 1262/1393] unfill (#3799) --- types/src/data.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/types/src/data.rs b/types/src/data.rs index 3ea8566184..db891c8a0b 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -638,6 +638,11 @@ impl Leaf { Ok(()) } + /// Take the block payload from the leaf and return it if it is present + pub fn unfill_block_payload(&mut self) -> Option { + self.block_payload.take() + } + /// Fill this leaf with the block payload, without checking /// header and payload consistency pub fn fill_block_payload_unchecked(&mut self, block_payload: TYPES::BlockPayload) { From c5f59800d642fa787e4eedca94cebe1e86255ecf Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:31:30 -0400 Subject: [PATCH 1263/1393] Add Error type to Membership trait (#3801) --- .../traits/election/randomized_committee.rs | 4 +++- .../src/traits/election/static_committee.rs | 4 +++- .../static_committee_leader_two_views.rs | 4 +++- types/src/traits/election.rs | 24 ++++++++++++++++++- 4 files changed, 32 insertions(+), 4 deletions(-) diff --git a/hotshot/src/traits/election/randomized_committee.rs b/hotshot/src/traits/election/randomized_committee.rs index d664e2a6e8..401a3102e2 100644 --- a/hotshot/src/traits/election/randomized_committee.rs +++ b/hotshot/src/traits/election/randomized_committee.rs @@ -41,6 +41,8 @@ pub struct RandomizedCommittee { } impl Membership for RandomizedCommittee { + type Error = utils::anytrace::Error; + /// Create a new election fn new( eligible_leaders: Vec::SignatureKey>>, @@ -139,7 +141,7 @@ impl Membership for RandomizedCommittee { } /// Index the vector of public keys with the current view number - fn leader( + fn lookup_leader( &self, view_number: TYPES::View, _epoch: ::Epoch, diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index acacc51cb6..258a2cfeda 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -39,6 +39,8 @@ pub struct StaticCommittee { } impl Membership for StaticCommittee { + type Error = utils::anytrace::Error; + /// Create a new election fn new( eligible_leaders: Vec::SignatureKey>>, @@ -137,7 +139,7 @@ impl Membership for StaticCommittee { } /// Index the vector of public keys with the current view number - fn leader( + fn lookup_leader( &self, view_number: TYPES::View, _epoch: ::Epoch, diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index bb9574e37e..8658744e71 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -39,6 +39,8 @@ pub struct StaticCommitteeLeaderForTwoViews { } impl Membership for StaticCommitteeLeaderForTwoViews { + type Error = utils::anytrace::Error; + /// Create a new election fn new( eligible_leaders: Vec::SignatureKey>>, @@ -137,7 +139,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index d22f226875..580f06c5a8 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -16,6 +16,9 @@ use crate::{traits::signature_key::SignatureKey, PeerConfig}; pub trait Membership: Clone + Debug + Eq + PartialEq + Send + Sync + Hash + 'static { + /// The error type returned by methods like `lookup_leader`. + type Error: std::fmt::Display; + /// Create a committee fn new( // Note: eligible_leaders is currently a hack because the DA leader == the quorum leader @@ -56,11 +59,30 @@ pub trait Membership: /// See if a node has stake in the committee in a specific epoch fn has_stake(&self, pub_key: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool; + /// The leader of the committee for view `view_number` in `epoch`. + /// + /// Note: this function uses a HotShot-internal error type. + /// You should implement `lookup_leader`, rather than implementing this function directly. + /// + /// # Errors + /// Returns an error if the leader cannot be calculated. + fn leader(&self, view: TYPES::View, epoch: TYPES::Epoch) -> Result { + use utils::anytrace::*; + + self.lookup_leader(view, epoch).wrap().context(info!( + "Failed to get leader for view {view} in epoch {epoch}" + )) + } + /// The leader of the committee for view `view_number` in `epoch`. /// /// # Errors /// Returns an error if the leader cannot be calculated - fn leader(&self, view: TYPES::View, epoch: TYPES::Epoch) -> Result; + fn lookup_leader( + &self, + view: TYPES::View, + epoch: TYPES::Epoch, + ) -> std::result::Result; /// Get the network topic for the committee fn committee_topic(&self) -> Topic; From a704e69b7edc5b47f651d3015a1f738f45aa5388 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Fri, 25 Oct 2024 15:53:02 -0400 Subject: [PATCH 1264/1393] Replace VidScheme assoc type aliases with newtype wrappers (#3794) * crates/types/src/vid.rs convert VidCommitment from type alias to newtype wrapper; this newtype is not AsRef or Copy; all other changes in this commit convert as_ref() to bincode::serialize and copy to clone() * convert VidCommon from type alias to newtype wrapper * convert VidShare from type alias to newtype wrapper * convert VidPrecomputeData from type alias to newtype wrapper * clippy pacification * revert bincode::serialize -> as_ref for VID commitment serialization * doc tweaks * VidCommitment impl AsRef<[u8; 32]> instead of AsRef<[u8]> --- example-types/src/block_types.rs | 6 +- task-impls/src/da.rs | 2 +- task-impls/src/quorum_proposal/handlers.rs | 2 +- task-impls/src/quorum_vote/mod.rs | 20 ++-- task-impls/src/transactions.rs | 43 ++++---- task-impls/src/vid.rs | 2 +- testing/tests/tests_1/da_task.rs | 36 +++++-- testing/tests/tests_1/vid_task.rs | 2 +- types/Cargo.toml | 1 + types/src/data.rs | 4 +- types/src/utils.rs | 4 +- types/src/vid.rs | 115 +++++++++++++++------ 12 files changed, 154 insertions(+), 83 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index dc5d5f2045..05ff656ecf 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -370,7 +370,7 @@ impl< } fn payload_commitment(&self) -> VidCommitment { - self.payload_commitment + self.payload_commitment.clone() } fn metadata(&self) -> &>::Metadata { @@ -395,9 +395,7 @@ impl Committable for TestBlockHeader { ) .constant_str("payload commitment") .fixed_size_bytes( - >::payload_commitment(self) - .as_ref() - .as_ref(), + >::payload_commitment(self).as_ref(), ) .finalize() } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 633bcaff14..3210398798 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -200,7 +200,7 @@ impl, V: Versions> DaTaskState HandleDepOutput for ProposalDependencyHandle< auction_result, ) => { commit_and_metadata = Some(CommitmentAndMetadata { - commitment: *payload_commitment, + commitment: payload_commitment.clone(), builder_commitment: builder_commitment.clone(), metadata: metadata.clone(), fees: fees.clone(), diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 9285e627ab..2281dc2660 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -286,8 +286,8 @@ impl + 'static, V: Versions> Handl #[allow(unused_assignments)] HotShotEvent::QuorumProposalValidated(proposal, parent_leaf) => { let proposal_payload_comm = proposal.block_header.payload_commitment(); - if let Some(comm) = payload_commitment { - if proposal_payload_comm != comm { + if let Some(ref comm) = payload_commitment { + if proposal_payload_comm != *comm { tracing::error!("Quorum proposal has inconsistent payload commitment with DAC or VID."); return; } @@ -303,26 +303,26 @@ impl + 'static, V: Versions> Handl leaf = Some(proposed_leaf); } HotShotEvent::DaCertificateValidated(cert) => { - let cert_payload_comm = cert.data().payload_commit; - if let Some(comm) = payload_commitment { + let cert_payload_comm = &cert.data().payload_commit; + if let Some(ref comm) = payload_commitment { if cert_payload_comm != comm { tracing::error!("DAC has inconsistent payload commitment with quorum proposal or VID."); return; } } else { - payload_commitment = Some(cert_payload_comm); + payload_commitment = Some(cert_payload_comm.clone()); } } HotShotEvent::VidShareValidated(share) => { - let vid_payload_commitment = share.data.payload_commitment; + let vid_payload_commitment = &share.data.payload_commitment; vid_share = Some(share.clone()); - if let Some(comm) = payload_commitment { + if let Some(ref comm) = payload_commitment { if vid_payload_commitment != comm { tracing::error!("VID has inconsistent payload commitment with quorum proposal or DAC."); return; } } else { - payload_commitment = Some(vid_payload_commitment); + payload_commitment = Some(vid_payload_commitment.clone()); } } _ => {} @@ -620,7 +620,7 @@ impl, V: Versions> QuorumVoteTaskS ); // Validate the VID share. - let payload_commitment = disperse.data.payload_commitment; + let payload_commitment = &disperse.data.payload_commitment; let current_epoch = self.consensus.read().await.cur_epoch(); // Check that the signature is valid @@ -643,7 +643,7 @@ impl, V: Versions> QuorumVoteTaskS match vid_scheme(self.quorum_membership.total_nodes(current_epoch)).verify_share( &disperse.data.share, &disperse.data.common, - &payload_commitment, + payload_commitment, ) { Ok(Err(())) | Err(_) => { bail!("Failed to verify VID share"); diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 72bcd8f78b..4f91a7b93d 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -288,7 +288,9 @@ impl, V: Versions> TransactionTask self.builder_timeout.saturating_sub(start.elapsed()), async { let client = BuilderClientMarketplace::new(url); - client.bundle(*parent_view, parent_hash, *block_view).await + client + .bundle(*parent_view, parent_hash.clone(), *block_view) + .await }, )); } @@ -526,15 +528,15 @@ impl, V: Versions> TransactionTask "Missing record for view {?target_view} in validated state" ))?; - match view_data.view_inner { + match &view_data.view_inner { ViewInner::Da { payload_commitment } => { - return Ok((target_view, payload_commitment)) + return Ok((target_view, payload_commitment.clone())) } ViewInner::Leaf { leaf: leaf_commitment, .. } => { - let leaf = consensus.saved_leaves().get(&leaf_commitment).context + let leaf = consensus.saved_leaves().get(leaf_commitment).context (info!("Missing leaf with commitment {leaf_commitment} for view {target_view} in saved_leaves"))?; return Ok((target_view, leaf.payload_commitment())); } @@ -579,7 +581,7 @@ impl, V: Versions> TransactionTask match async_timeout( self.builder_timeout .saturating_sub(task_start_time.elapsed()), - self.block_from_builder(parent_comm, parent_view, &parent_comm_sig), + self.block_from_builder(parent_comm.clone(), parent_view, &parent_comm_sig), ) .await { @@ -620,20 +622,23 @@ impl, V: Versions> TransactionTask .builder_clients .iter() .enumerate() - .map(|(builder_idx, client)| async move { - client - .available_blocks( - parent_comm, - view_number.u64(), - self.public_key.clone(), - parent_comm_sig, - ) - .await - .map(move |blocks| { - blocks - .into_iter() - .map(move |block_info| (block_info, builder_idx)) - }) + .map(|(builder_idx, client)| { + let parent_comm = parent_comm.clone(); + async move { + client + .available_blocks( + parent_comm, + view_number.u64(), + self.public_key.clone(), + parent_comm_sig, + ) + .await + .map(move |blocks| { + blocks + .into_iter() + .map(move |block_info| (block_info, builder_idx)) + }) + } }) .collect::>(); let mut results = Vec::with_capacity(self.builder_clients.len()); diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 30d31dff7d..925e028ffd 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -79,7 +79,7 @@ impl> VidTaskState { vid_precompute.clone(), ) .await; - let payload_commitment = vid_disperse.payload_commitment; + let payload_commitment = vid_disperse.payload_commitment.clone(); let shares = VidDisperseShare::from_vid_disperse(vid_disperse.clone()); let mut consensus = self.consensus.write().await; for share in shares { diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index ad417a0431..28a35bbe0b 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -69,8 +69,13 @@ async fn test_da_task() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData { + payload_commit: payload_commit.clone(), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -82,8 +87,13 @@ async fn test_da_task() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData { + payload_commit: payload_commit.clone(), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -170,8 +180,13 @@ async fn test_da_task_storage_failure() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData { + payload_commit: payload_commit.clone(), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -183,8 +198,13 @@ async fn test_da_task_storage_failure() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData { + payload_commit: payload_commit.clone(), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index c2ca9aec09..b5ef8335f7 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -69,7 +69,7 @@ async fn test_vid_task() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let (_, vid_precompute) = vid.commit_only_precompute(&encoded_transactions).unwrap(); - let payload_commitment = vid_disperse.commit; + let payload_commitment = vid_disperse.commit.clone(); let signature = ::SignatureKey::sign( handle.private_key(), diff --git a/types/Cargo.toml b/types/Cargo.toml index e9ad6368b5..25d47d7d9e 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -38,6 +38,7 @@ time = { workspace = true } tracing = { workspace = true } typenum = { workspace = true } derivative = "2" +derive_more = { workspace = true, features = ["display"] } # TODO promote display feature to workspace? jf-vid = { workspace = true } jf-pcs = { workspace = true } jf-signature = { workspace = true, features = ["schnorr"] } diff --git a/types/src/data.rs b/types/src/data.rs index db891c8a0b..5528a1835c 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -282,7 +282,7 @@ impl VidDisperseShare { recipient_key, view_number: vid_disperse.view_number, common: vid_disperse.common.clone(), - payload_commitment: vid_disperse.payload_commitment, + payload_commitment: vid_disperse.payload_commitment.clone(), }) .collect() } @@ -345,7 +345,7 @@ impl VidDisperseShare { recipient_key, view_number: vid_disperse_proposal.data.view_number, common: vid_disperse_proposal.data.common.clone(), - payload_commitment: vid_disperse_proposal.data.payload_commitment, + payload_commitment: vid_disperse_proposal.data.payload_commitment.clone(), }, signature: vid_disperse_proposal.signature.clone(), _pd: vid_disperse_proposal._pd, diff --git a/types/src/utils.rs b/types/src/utils.rs index e3d19a8286..699d1a3830 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -58,7 +58,7 @@ impl Clone for ViewInner { fn clone(&self) -> Self { match self { Self::Da { payload_commitment } => Self::Da { - payload_commitment: *payload_commitment, + payload_commitment: payload_commitment.clone(), }, Self::Leaf { leaf, state, delta } => Self::Leaf { leaf: *leaf, @@ -123,7 +123,7 @@ impl ViewInner { #[must_use] pub fn payload_commitment(&self) -> Option { if let Self::Da { payload_commitment } = self { - Some(*payload_commitment) + Some(payload_commitment.clone()) } else { None } diff --git a/types/src/vid.rs b/types/src/vid.rs index 7d3bfb21a7..8e936f7416 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -4,20 +4,20 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -//! This module provides: -//! - an opaque constructor [`vid_scheme`] that returns a new instance of a -//! VID scheme. -//! - type aliases [`VidCommitment`], [`VidCommon`], [`VidShare`] -//! for [`VidScheme`] assoc types. +//! This module provides an opaque constructor [`vid_scheme`] that returns a new +//! instance of a VID scheme. //! -//! Purpose: the specific choice of VID scheme is an implementation detail. -//! This crate and all downstream crates should talk to the VID scheme only -//! via the traits exposed here. +//! Purpose: the specific choice of VID scheme is an implementation detail. We +//! want all communication with the VID scheme to occur only via the API exposed +//! in the [`VidScheme`] trait, but limitations of Rust make it difficult to +//! achieve this level of abstraction. Hence, there's a lot of boilerplate code +//! in this module. #![allow(missing_docs)] use std::{fmt::Debug, ops::Range}; use ark_bn254::Bn254; +use derive_more::Display; use jf_pcs::{ prelude::{UnivariateKzgPCS, UnivariateUniversalParams}, PolynomialCommitmentScheme, @@ -34,6 +34,7 @@ use jf_vid::{ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use sha2::Sha256; +use tagged_base64::TaggedBase64; use crate::{ constants::SRS_DEGREE, @@ -103,14 +104,6 @@ pub fn vid_scheme_for_test(num_storage_nodes: usize) -> VidSchemeType { ) } -/// VID commitment type -pub type VidCommitment = ::Commit; -/// VID common type -pub type VidCommon = ::Common; -/// VID share type -pub type VidShare = ::Share; -/// VID PrecomputeData type -pub type VidPrecomputeData = ::PrecomputeData; /// VID proposal type pub type VidProposal = ( Proposal>, @@ -129,6 +122,22 @@ type Advz = advz::AdvzGPU<'static, E, H>; #[derive(Clone)] pub struct VidSchemeType(Advz); +/// Newtype wrapper for assoc type [`VidScheme::Commit`]. +#[derive(Clone, Debug, Deserialize, Display, Eq, PartialEq, Hash, Serialize)] +pub struct VidCommitment(::Commit); + +/// Newtype wrapper for assoc type [`VidScheme::Common`]. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)] +pub struct VidCommon(::Common); + +/// Newtype wrapper for assoc type [`VidScheme::Share`]. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)] +pub struct VidShare(::Share); + +/// Newtype wrapper for assoc type [`Precomputable::PrecomputeData`]. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)] +pub struct VidPrecomputeData(::PrecomputeData); + /// Newtype wrapper for a large payload range proof. /// /// Useful for namespace proofs. @@ -198,15 +207,15 @@ type H = Sha256; // type alias impl trait (TAIT): // [rfcs/text/2515-type_alias_impl_trait.md at master · rust-lang/rfcs](https://github.com/rust-lang/rfcs/blob/master/text/2515-type_alias_impl_trait.md) impl VidScheme for VidSchemeType { - type Commit = ::Commit; - type Share = ::Share; - type Common = ::Common; + type Commit = VidCommitment; + type Share = VidShare; + type Common = VidCommon; fn commit_only(&mut self, payload: B) -> VidResult where B: AsRef<[u8]>, { - self.0.commit_only(payload) + self.0.commit_only(payload).map(VidCommitment) } fn disperse(&mut self, payload: B) -> VidResult> @@ -222,27 +231,32 @@ impl VidScheme for VidSchemeType { common: &Self::Common, commit: &Self::Commit, ) -> VidResult> { - self.0.verify_share(share, common, commit) + self.0.verify_share(&share.0, &common.0, &commit.0) } fn recover_payload(&self, shares: &[Self::Share], common: &Self::Common) -> VidResult> { - self.0.recover_payload(shares, common) + // TODO costly Vec copy. Is the compiler smart enough to optimize away + // this Vec, or shall we use unsafe code to cast `shares`? + // It's only `recover_payload` so who cares? + let shares: Vec<_> = shares.iter().map(|s| s.0.clone()).collect(); + + self.0.recover_payload(&shares, &common.0) } fn is_consistent(commit: &Self::Commit, common: &Self::Common) -> VidResult<()> { - ::is_consistent(commit, common) + ::is_consistent(&commit.0, &common.0) } fn get_payload_byte_len(common: &Self::Common) -> u32 { - ::get_payload_byte_len(common) + ::get_payload_byte_len(&common.0) } fn get_num_storage_nodes(common: &Self::Common) -> u32 { - ::get_num_storage_nodes(common) + ::get_num_storage_nodes(&common.0) } fn get_multiplicity(common: &Self::Common) -> u32 { - ::get_multiplicity(common) + ::get_multiplicity(&common.0) } /// Helper function for testing only @@ -252,6 +266,33 @@ impl VidScheme for VidSchemeType { } } +impl From for TaggedBase64 { + fn from(value: VidCommitment) -> Self { + TaggedBase64::from(value.0) + } +} + +impl<'a> TryFrom<&'a TaggedBase64> for VidCommitment { + type Error = <::Commit as TryFrom<&'a TaggedBase64>>::Error; + + fn try_from(value: &'a TaggedBase64) -> Result { + ::Commit::try_from(value).map(Self) + } +} + +// TODO add AsRef trait bound upstream. +// +// We impl `AsRef<[u8; _]>` instead of `AsRef<[u8]>` to maintain backward +// compatibility for downstream code that re-hashes the `VidCommitment`. +// Specifically, if we support only `AsRef<[u8]>` then code that uses +// `Committable` must switch `fixed_size_bytes` to `var_size_bytes`, which +// prepends length data into the hash, thus changing the hash. +impl AsRef<[u8; 32]> for VidCommitment { + fn as_ref(&self) -> &[u8; 32] { + self.0.as_ref().as_ref() + } +} + impl PayloadProver for VidSchemeType { fn payload_proof(&self, payload: B, range: Range) -> VidResult where @@ -291,7 +332,7 @@ impl PayloadProver for VidSchemeType { } impl Precomputable for VidSchemeType { - type PrecomputeData = ::PrecomputeData; + type PrecomputeData = VidPrecomputeData; fn commit_only_precompute( &self, @@ -300,7 +341,9 @@ impl Precomputable for VidSchemeType { where B: AsRef<[u8]>, { - self.0.commit_only_precompute(payload) + self.0 + .commit_only_precompute(payload) + .map(|r| (VidCommitment(r.0), VidPrecomputeData(r.1))) } fn disperse_precompute( @@ -312,7 +355,7 @@ impl Precomputable for VidSchemeType { B: AsRef<[u8]>, { self.0 - .disperse_precompute(payload, data) + .disperse_precompute(payload, &data.0) .map(vid_disperse_conversion) } } @@ -325,10 +368,14 @@ impl Precomputable for VidSchemeType { /// and similarly for `Statement`. /// Thus, we accomplish type conversion via functions. fn vid_disperse_conversion(vid_disperse: VidDisperse) -> VidDisperse { + // TODO costly Vec copy. Is the compiler smart enough to optimize away + // this Vec, or shall we use unsafe code to cast `shares`? + let shares = vid_disperse.shares.into_iter().map(VidShare).collect(); + VidDisperse { - shares: vid_disperse.shares, - common: vid_disperse.common, - commit: vid_disperse.commit, + shares, + common: VidCommon(vid_disperse.common), + commit: VidCommitment(vid_disperse.commit), } } @@ -337,7 +384,7 @@ fn stmt_conversion(stmt: Statement<'_, VidSchemeType>) -> Statement<'_, Advz> { Statement { payload_subslice: stmt.payload_subslice, range: stmt.range, - commit: stmt.commit, - common: stmt.common, + commit: &stmt.commit.0, + common: &stmt.common.0, } } From 73acb47cb49f921d01cc2162ad6a1c62d6c9e8ec Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Sun, 27 Oct 2024 10:43:49 -0600 Subject: [PATCH 1265/1393] Canonicalize some variable names, minor cleanup (#3805) --- hotshot/src/tasks/task_state.rs | 6 +- task-impls/src/consensus/handlers.rs | 17 ++-- task-impls/src/da.rs | 14 +-- task-impls/src/helpers.rs | 94 ++++++++++-------- task-impls/src/network.rs | 40 ++++---- .../src/quorum_proposal_recv/handlers.rs | 32 +++--- task-impls/src/quorum_vote/mod.rs | 27 +++-- task-impls/src/request.rs | 24 ++--- task-impls/src/response.rs | 6 +- task-impls/src/transactions.rs | 15 ++- task-impls/src/vid.rs | 14 ++- task-impls/src/view_sync.rs | 99 ++++++++++--------- types/src/consensus.rs | 4 +- 13 files changed, 211 insertions(+), 181 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 625b0ff7cd..18ef2c91d2 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -48,7 +48,7 @@ impl, V: Versions> CreateTaskState async fn create_from(handle: &SystemContextHandle) -> Self { Self { network: Arc::clone(&handle.hotshot.network), - state: OuterConsensus::new(handle.hotshot.consensus()), + consensus: OuterConsensus::new(handle.hotshot.consensus()), view: handle.cur_view().await, delay: handle.hotshot.config.data_request_delay, da_membership: handle.hotshot.memberships.da_membership.clone(), @@ -162,9 +162,9 @@ impl, V: Versions> CreateTaskState let cur_view = handle.cur_view().await; Self { - current_view: cur_view, + cur_view, next_view: cur_view, - current_epoch: handle.cur_epoch().await, + cur_epoch: handle.cur_epoch().await, network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index b6f2b843d3..c1edf60bc7 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -169,8 +169,8 @@ pub(crate) async fn handle_view_change< )) .await; - let consensus = task_state.consensus.read().await; - consensus + let consensus_reader = task_state.consensus.read().await; + consensus_reader .metrics .current_view .set(usize::try_from(task_state.cur_view.u64()).unwrap()); @@ -181,7 +181,7 @@ pub(crate) async fn handle_view_change< == task_state.public_key { #[allow(clippy::cast_precision_loss)] - consensus + consensus_reader .metrics .view_duration_as_leader .add_point((cur_view_time - task_state.cur_view_time) as f64); @@ -193,10 +193,13 @@ pub(crate) async fn handle_view_change< if usize::try_from(task_state.cur_view.u64()).unwrap() > usize::try_from(task_state.last_decided_view.u64()).unwrap() { - consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(task_state.cur_view.u64()).unwrap() - - usize::try_from(task_state.last_decided_view.u64()).unwrap(), - ); + consensus_reader + .metrics + .number_of_views_since_last_decide + .set( + usize::try_from(task_state.cur_view.u64()).unwrap() + - usize::try_from(task_state.last_decided_view.u64()).unwrap(), + ); } broadcast_event( diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 3210398798..c9fab1c7e3 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -149,12 +149,12 @@ impl, V: Versions> DaTaskState { - let curr_view = self.consensus.read().await.cur_view(); + let cur_view = self.consensus.read().await.cur_view(); ensure!( - curr_view <= proposal.data.view_number() + 1, + cur_view <= proposal.data.view_number() + 1, debug!( "Validated DA proposal for prior view but it's too old now Current view {:?}, DA Proposal view {:?}", - curr_view, + cur_view, proposal.data.view_number() ) ); @@ -212,18 +212,20 @@ impl, V: Versions> DaTaskState( .await; let mem = Arc::clone(&quorum_membership); - let current_epoch = consensus.read().await.cur_epoch(); + let cur_epoch = consensus.read().await.cur_epoch(); // Make a background task to await the arrival of the event data. let Ok(Some(proposal)) = // We want to explicitly timeout here so we aren't waiting around for the data. @@ -114,7 +114,7 @@ pub(crate) async fn fetch_proposal( hs_event.as_ref() { // Make sure that the quorum_proposal is valid - if quorum_proposal.validate_signature(&mem, current_epoch, upgrade_lock).await.is_ok() { + if quorum_proposal.validate_signature(&mem, cur_epoch, upgrade_lock).await.is_ok() { proposal = Some(quorum_proposal.clone()); } @@ -133,12 +133,12 @@ pub(crate) async fn fetch_proposal( let justify_qc = proposal.data.justify_qc.clone(); if !justify_qc - .is_valid_cert(quorum_membership.as_ref(), current_epoch, upgrade_lock) + .is_valid_cert(quorum_membership.as_ref(), cur_epoch, upgrade_lock) .await { bail!("Invalid justify_qc in proposal for view {}", *view_number); } - let mut consensus_write = consensus.write().await; + let mut consensus_writer = consensus.write().await; let leaf = Leaf::from_quorum_proposal(&proposal.data); let state = Arc::new( >::from_header(&proposal.data.block_header), @@ -151,13 +151,14 @@ pub(crate) async fn fetch_proposal( delta: None, }, }; - if let Err(e) = consensus_write.update_validated_state_map(view_number, view.clone()) { + if let Err(e) = consensus_writer.update_validated_state_map(view_number, view.clone()) { tracing::trace!("{e:?}"); } - consensus_write + consensus_writer .update_saved_leaves(leaf.clone(), upgrade_lock) .await; + broadcast_event( HotShotEvent::ValidatedStateUpdated(view_number, view).into(), &event_sender, @@ -369,21 +370,24 @@ pub(crate) async fn parent_leaf_and_state( consensus: OuterConsensus, upgrade_lock: &UpgradeLock, ) -> Result<(Leaf, Arc<::ValidatedState>)> { - let current_epoch = consensus.read().await.cur_epoch(); + let consensus_reader = consensus.read().await; + let cur_epoch = consensus_reader.cur_epoch(); ensure!( - quorum_membership.leader(next_proposal_view_number, current_epoch)? == public_key, + quorum_membership.leader(next_proposal_view_number, cur_epoch)? == public_key, info!( "Somehow we formed a QC but are not the leader for the next view {:?}", next_proposal_view_number ) ); - let parent_view_number = consensus.read().await.high_qc().view_number(); - if !consensus + let parent_view_number = consensus_reader.high_qc().view_number(); + let vsm_contains_parent_view = consensus .read() .await .validated_state_map() - .contains_key(&parent_view_number) - { + .contains_key(&parent_view_number); + drop(consensus_reader); + + if !vsm_contains_parent_view { let _ = fetch_proposal( parent_view_number, event_sender.clone(), @@ -397,6 +401,7 @@ pub(crate) async fn parent_leaf_and_state( .await .context(info!("Failed to fetch proposal"))?; } + let consensus_reader = consensus.read().await; let parent_view_number = consensus_reader.high_qc().view_number(); let parent_view = consensus_reader.validated_state_map().get(&parent_view_number).context( @@ -463,17 +468,17 @@ pub async fn validate_proposal_safety_and_liveness< }; { - let mut consensus_write = task_state.consensus.write().await; - if let Err(e) = consensus_write.update_validated_state_map(view_number, view.clone()) { + let mut consensus_writer = task_state.consensus.write().await; + if let Err(e) = consensus_writer.update_validated_state_map(view_number, view.clone()) { tracing::trace!("{e:?}"); } - consensus_write + consensus_writer .update_saved_leaves(proposed_leaf.clone(), &task_state.upgrade_lock) .await; // Update our internal storage of the proposal. The proposal is valid, so // we swallow this error and just log if it occurs. - if let Err(e) = consensus_write.update_proposed_view(proposal.clone()) { + if let Err(e) = consensus_writer.update_proposed_view(proposal.clone()) { tracing::debug!("Internal proposal update failed; error = {e:#}"); }; } @@ -485,11 +490,11 @@ pub async fn validate_proposal_safety_and_liveness< ) .await; - let current_epoch = task_state.cur_epoch; + let cur_epoch = task_state.cur_epoch; UpgradeCertificate::validate( &proposal.data.upgrade_certificate, &task_state.quorum_membership, - current_epoch, + cur_epoch, &task_state.upgrade_lock, ) .await?; @@ -508,19 +513,19 @@ pub async fn validate_proposal_safety_and_liveness< // Liveness check. { - let read_consensus = task_state.consensus.read().await; - let liveness_check = justify_qc.view_number() > read_consensus.locked_view(); + let consensus_reader = task_state.consensus.read().await; + let liveness_check = justify_qc.view_number() > consensus_reader.locked_view(); // Safety check. // Check if proposal extends from the locked leaf. - let outcome = read_consensus.visit_leaf_ancestors( + let outcome = consensus_reader.visit_leaf_ancestors( justify_qc.view_number(), - Terminator::Inclusive(read_consensus.locked_view()), + Terminator::Inclusive(consensus_reader.locked_view()), false, |leaf, _, _| { // if leaf view no == locked view no then we're done, report success by // returning true - leaf.view_number() != read_consensus.locked_view() + leaf.view_number() != consensus_reader.locked_view() }, ); let safety_check = outcome.is_ok(); @@ -537,7 +542,7 @@ pub async fn validate_proposal_safety_and_liveness< .await; } - error!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) + error!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", consensus_reader.high_qc(), proposal.data.clone(), consensus_reader.locked_view()) }); } @@ -592,9 +597,9 @@ pub async fn validate_proposal_view_and_certs< proposal: &Proposal>, task_state: &mut QuorumProposalRecvTaskState, ) -> Result<()> { - let view = proposal.data.view_number(); + let view_number = proposal.data.view_number(); ensure!( - view >= task_state.cur_view, + view_number >= task_state.cur_view, "Proposal is from an older view {:?}", proposal.data.clone() ); @@ -609,19 +614,19 @@ pub async fn validate_proposal_view_and_certs< .await?; // Verify a timeout certificate OR a view sync certificate exists and is valid. - if proposal.data.justify_qc.view_number() != view - 1 { + if proposal.data.justify_qc.view_number() != view_number - 1 { let received_proposal_cert = proposal.data.proposal_certificate.clone().context(debug!( "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", - *view + *view_number ))?; match received_proposal_cert { ViewChangeEvidence::Timeout(timeout_cert) => { ensure!( - timeout_cert.data().view == view - 1, + timeout_cert.data().view == view_number - 1, "Timeout certificate for view {} was not for the immediately preceding view", - *view + *view_number ); ensure!( timeout_cert @@ -632,15 +637,15 @@ pub async fn validate_proposal_view_and_certs< ) .await, "Timeout certificate for view {} was invalid", - *view + *view_number ); } ViewChangeEvidence::ViewSync(view_sync_cert) => { ensure!( - view_sync_cert.view_number == view, + view_sync_cert.view_number == view_number, "View sync cert view number {:?} does not match proposal view number {:?}", view_sync_cert.view_number, - view + view_number ); // View sync certs must also be valid. @@ -740,15 +745,15 @@ pub(crate) async fn update_view, V )) .await; - let consensus = task_state.consensus.upgradable_read().await; - consensus + let consensus_reader = task_state.consensus.upgradable_read().await; + consensus_reader .metrics .current_view .set(usize::try_from(task_state.cur_view.u64()).unwrap()); let new_view_time = Utc::now().timestamp(); if is_old_view_leader { #[allow(clippy::cast_precision_loss)] - consensus + consensus_reader .metrics .view_duration_as_leader .add_point((new_view_time - task_state.cur_view_time) as f64); @@ -758,15 +763,18 @@ pub(crate) async fn update_view, V // Do the comparison before the subtraction to avoid potential overflow, since // `last_decided_view` may be greater than `cur_view` if the node is catching up. if usize::try_from(task_state.cur_view.u64()).unwrap() - > usize::try_from(consensus.last_decided_view().u64()).unwrap() + > usize::try_from(consensus_reader.last_decided_view().u64()).unwrap() { - consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(task_state.cur_view.u64()).unwrap() - - usize::try_from(consensus.last_decided_view().u64()).unwrap(), - ); + consensus_reader + .metrics + .number_of_views_since_last_decide + .set( + usize::try_from(task_state.cur_view.u64()).unwrap() + - usize::try_from(consensus_reader.last_decided_view().u64()).unwrap(), + ); } - let mut consensus = ConsensusUpgradableReadLockGuard::upgrade(consensus).await; - if let Err(e) = consensus.update_view(new_view) { + let mut consensus_writer = ConsensusUpgradableReadLockGuard::upgrade(consensus_reader).await; + if let Err(e) = consensus_writer.update_view(new_view) { tracing::trace!("{e:?}"); } tracing::trace!("View updated successfully"); diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index d8ef530b54..05ba43a133 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -280,12 +280,12 @@ impl< let net = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); - let state = Arc::clone(&self.consensus); + let consensus = Arc::clone(&self.consensus); async_spawn(async move { if NetworkEventTaskState::::maybe_record_action( Some(HotShotAction::VidDisperse), storage, - state, + consensus, view, ) .await @@ -306,11 +306,11 @@ impl< async fn maybe_record_action( maybe_action: Option, storage: Arc>, - state: Arc>>, + consensus: Arc>>, view: ::View, ) -> std::result::Result<(), ()> { if let Some(action) = maybe_action { - if !state.write().await.update_action(action, view) { + if !consensus.write().await.update_action(action, view) { tracing::warn!("Already actioned {:?} in view {:?}", action, view); return Err(()); } @@ -624,19 +624,21 @@ impl< sender, kind: message_kind, }; - let view = message.kind.view_number(); + let view_number = message.kind.view_number(); let committee_topic = self.quorum_membership.committee_topic(); - let da_committee = self.da_membership.committee_members(view, self.epoch); - let net = Arc::clone(&self.network); + let da_committee = self + .da_membership + .committee_members(view_number, self.epoch); + let network = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); - let state = Arc::clone(&self.consensus); + let consensus = Arc::clone(&self.consensus); let upgrade_lock = self.upgrade_lock.clone(); async_spawn(async move { if NetworkEventTaskState::::maybe_record_action( maybe_action, Arc::clone(&storage), - state, - view, + consensus, + view_number, ) .await .is_err() @@ -662,19 +664,21 @@ impl< let transmit_result = match transmit { TransmitType::Direct(recipient) => { - net.direct_message(serialized_message, recipient).await + network.direct_message(serialized_message, recipient).await } TransmitType::Broadcast => { - net.broadcast_message(serialized_message, committee_topic, broadcast_delay) + network + .broadcast_message(serialized_message, committee_topic, broadcast_delay) .await } TransmitType::DaCommitteeBroadcast => { - net.da_broadcast_message( - serialized_message, - da_committee.iter().cloned().collect(), - broadcast_delay, - ) - .await + network + .da_broadcast_message( + serialized_message, + da_committee.iter().cloned().collect(), + broadcast_delay, + ) + .await } }; diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 3d1957c84a..a1800a8178 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -46,7 +46,7 @@ async fn validate_proposal_liveness, ) -> Result<()> { let view_number = proposal.data.view_number(); - let mut consensus_write = task_state.consensus.write().await; + let mut consensus_writer = task_state.consensus.write().await; let leaf = Leaf::from_quorum_proposal(&proposal.data); @@ -61,10 +61,10 @@ async fn validate_proposal_liveness consensus_write.locked_view(); + proposal.data.justify_qc.clone().view_number() > consensus_writer.locked_view(); - drop(consensus_write); + drop(consensus_writer); // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. broadcast_event( @@ -141,8 +141,8 @@ pub(crate) async fn handle_quorum_proposal_recv< ) .await { - let consensus = task_state.consensus.read().await; - consensus.metrics.invalid_qc.update(1); + let consensus_reader = task_state.consensus.read().await; + consensus_reader.metrics.invalid_qc.update(1); bail!("Invalid justify_qc in proposal for view {}", *view_number); } @@ -181,11 +181,11 @@ pub(crate) async fn handle_quorum_proposal_recv< .await .ok(), }; - let consensus_read = task_state.consensus.read().await; + let consensus_reader = task_state.consensus.read().await; let parent = match parent_leaf { Some(leaf) => { - if let (Some(state), _) = consensus_read.state_and_delta(leaf.view_number()) { + if let (Some(state), _) = consensus_reader.state_and_delta(leaf.view_number()) { Some((leaf, Arc::clone(&state))) } else { bail!("Parent state not found! Consensus internally inconsistent"); @@ -194,7 +194,7 @@ pub(crate) async fn handle_quorum_proposal_recv< None => None, }; - if justify_qc.view_number() > consensus_read.high_qc().view_number { + if justify_qc.view_number() > consensus_reader.high_qc().view_number { if let Err(e) = task_state .storage .write() @@ -205,13 +205,13 @@ pub(crate) async fn handle_quorum_proposal_recv< bail!("Failed to store High QC, not voting; error = {:?}", e); } } - drop(consensus_read); + drop(consensus_reader); - let mut consensus_write = task_state.consensus.write().await; - if let Err(e) = consensus_write.update_high_qc(justify_qc.clone()) { + let mut consensus_writer = task_state.consensus.write().await; + if let Err(e) = consensus_writer.update_high_qc(justify_qc.clone()) { tracing::trace!("{e:?}"); } - drop(consensus_write); + drop(consensus_writer); broadcast_event( HotShotEvent::HighQcUpdated(justify_qc.clone()).into(), diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 2281dc2660..a25b6d2213 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -546,10 +546,9 @@ impl, V: Versions> QuorumVoteTaskS event_receiver: Receiver>>, event_sender: Sender>>, ) -> Result<()> { - let current_epoch = self.consensus.read().await.cur_epoch(); - match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _leaf) => { + let cur_epoch = self.consensus.read().await.cur_epoch(); tracing::trace!("Received Proposal for view {}", *proposal.view_number()); // Handle the event before creating the dependency task. @@ -563,7 +562,7 @@ impl, V: Versions> QuorumVoteTaskS self.create_dependency_task_if_new( proposal.view_number, - current_epoch, + cur_epoch, event_receiver, &event_sender, Some(Arc::clone(&event)), @@ -579,15 +578,11 @@ impl, V: Versions> QuorumVoteTaskS "Received DAC for an older view." ); - let current_epoch = self.consensus.read().await.cur_epoch(); + let cur_epoch = self.consensus.read().await.cur_epoch(); // Validate the DAC. ensure!( - cert.is_valid_cert( - self.da_membership.as_ref(), - current_epoch, - &self.upgrade_lock - ) - .await, + cert.is_valid_cert(self.da_membership.as_ref(), cur_epoch, &self.upgrade_lock) + .await, warn!("Invalid DAC") ); @@ -604,7 +599,7 @@ impl, V: Versions> QuorumVoteTaskS .await; self.create_dependency_task_if_new( view, - current_epoch, + cur_epoch, event_receiver, &event_sender, None, @@ -621,7 +616,7 @@ impl, V: Versions> QuorumVoteTaskS // Validate the VID share. let payload_commitment = &disperse.data.payload_commitment; - let current_epoch = self.consensus.read().await.cur_epoch(); + let cur_epoch = self.consensus.read().await.cur_epoch(); // Check that the signature is valid ensure!( @@ -632,15 +627,15 @@ impl, V: Versions> QuorumVoteTaskS // ensure that the VID share was sent by a DA member OR the view leader ensure!( self.da_membership - .committee_members(view, current_epoch) + .committee_members(view, cur_epoch) .contains(sender) - || *sender == self.quorum_membership.leader(view, current_epoch)?, + || *sender == self.quorum_membership.leader(view, cur_epoch)?, "VID share was not sent by a DA member or the view leader." ); // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner // and outer results - match vid_scheme(self.quorum_membership.total_nodes(current_epoch)).verify_share( + match vid_scheme(self.quorum_membership.total_nodes(cur_epoch)).verify_share( &disperse.data.share, &disperse.data.common, payload_commitment, @@ -668,7 +663,7 @@ impl, V: Versions> QuorumVoteTaskS .await; self.create_dependency_task_if_new( view, - current_epoch, + cur_epoch, event_receiver, &event_sender, None, diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index c7bc3c8957..1344c31ee6 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -54,7 +54,7 @@ pub struct NetworkRequestState> { pub network: Arc, /// Consensus shared state so we can check if we've gotten the information /// before sending a request - pub state: OuterConsensus, + pub consensus: OuterConsensus, /// Last seen view, we won't request for proposals before older than this view pub view: TYPES::View, /// Delay before requesting peers @@ -97,18 +97,18 @@ impl> TaskState for NetworkRequest match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _) => { let prop_view = proposal.view_number(); - let current_epoch = self.state.read().await.cur_epoch(); + let cur_epoch = self.consensus.read().await.cur_epoch(); // If we already have the VID shares for the next view, do nothing. if prop_view >= self.view && !self - .state + .consensus .read() .await .vid_shares() .contains_key(&prop_view) { - self.spawn_requests(prop_view, current_epoch, sender, receiver); + self.spawn_requests(prop_view, cur_epoch, sender, receiver); } Ok(()) } @@ -176,7 +176,7 @@ impl> NetworkRequestState> NetworkRequestState> NetworkRequestState, + consensus: &OuterConsensus, sender: &Sender>>, public_key: &::SignatureKey, view: &TYPES::View, shutdown_flag: &Arc, ) -> bool { - let state = state.read().await; + let consensus_reader = consensus.read().await; let cancel = shutdown_flag.load(Ordering::Relaxed) - || state.vid_shares().contains_key(view) - || state.cur_view() > *view; + || consensus_reader.vid_shares().contains_key(view) + || consensus_reader.cur_view() > *view; if cancel { - if let Some(Some(vid_share)) = state + if let Some(Some(vid_share)) = consensus_reader .vid_shares() .get(view) .map(|shares| shares.get(public_key).cloned()) @@ -356,7 +356,7 @@ impl> NetworkRequestState NetworkResponseState { .get(&view) .is_some_and(|m| m.contains_key(key)); if !contained { - let current_epoch = self.consensus.read().await.cur_epoch(); + let cur_epoch = self.consensus.read().await.cur_epoch(); if Consensus::calculate_and_update_vid( OuterConsensus::new(Arc::clone(&self.consensus)), view, Arc::clone(&self.quorum), &self.private_key, - current_epoch, + cur_epoch, ) .await .is_none() @@ -169,7 +169,7 @@ impl NetworkResponseState { view, Arc::clone(&self.quorum), &self.private_key, - current_epoch, + cur_epoch, ) .await?; } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 4f91a7b93d..0e82920930 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -65,10 +65,13 @@ const RETRY_DELAY: Duration = Duration::from_millis(100); pub struct BuilderResponse { /// Fee information pub fee: BuilderFee, + /// Block payload pub block_payload: TYPES::BlockPayload, + /// Block metadata pub metadata: >::Metadata, + /// Optional precomputed commitment pub precompute_data: Option, } @@ -101,16 +104,22 @@ pub struct TransactionTaskState, V /// This Nodes Public Key pub public_key: TYPES::SignatureKey, + /// Our Private Key pub private_key: ::PrivateKey, + /// InstanceState pub instance_state: Arc, + /// This state's ID pub id: u64, + /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + /// auction results provider pub auction_results_provider: Arc, + /// fallback builder url pub fallback_builder_url: Url, } @@ -517,11 +526,11 @@ impl, V: Versions> TransactionTask &self, block_view: TYPES::View, ) -> Result<(TYPES::View, VidCommitment)> { - let consensus = self.consensus.read().await; + let consensus_reader = self.consensus.read().await; let mut target_view = TYPES::View::new(block_view.saturating_sub(1)); loop { - let view_data = consensus + let view_data = consensus_reader .validated_state_map() .get(&target_view) .context(info!( @@ -536,7 +545,7 @@ impl, V: Versions> TransactionTask leaf: leaf_commitment, .. } => { - let leaf = consensus.saved_leaves().get(leaf_commitment).context + let leaf = consensus_reader.saved_leaves().get(leaf_commitment).context (info!("Missing leaf with commitment {leaf_commitment} for view {target_view} in saved_leaves"))?; return Ok((target_view, leaf.payload_commitment())); } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 925e028ffd..500170cc9c 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -31,20 +31,28 @@ use crate::{ pub struct VidTaskState> { /// View number this view is executing in. pub cur_view: TYPES::View, + /// Epoch number this node is executing in. pub cur_epoch: TYPES::Epoch, + /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, + /// The underlying network pub network: Arc, + /// Membership for the quorum pub membership: Arc, + /// This Nodes Public Key pub public_key: TYPES::SignatureKey, + /// Our Private Key pub private_key: ::PrivateKey, + /// The view and ID of the current vote collection task, if there is one. pub vote_collector: Option<(TYPES::View, usize, usize)>, + /// This state's ID pub id: u64, } @@ -81,13 +89,13 @@ impl> VidTaskState { .await; let payload_commitment = vid_disperse.payload_commitment.clone(); let shares = VidDisperseShare::from_vid_disperse(vid_disperse.clone()); - let mut consensus = self.consensus.write().await; + let mut consensus_writer = self.consensus.write().await; for share in shares { if let Some(disperse) = share.to_proposal(&self.private_key) { - consensus.update_vid_shares(*view_number, disperse); + consensus_writer.update_vid_shares(*view_number, disperse); } } - drop(consensus); + drop(consensus_writer); // send the commitment and metadata to consensus for block building broadcast_event( diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 46e4b11c12..28dfe9d935 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -68,19 +68,26 @@ type RelayMap = HashMap< /// Main view sync task state pub struct ViewSyncTaskState, V: Versions> { /// View HotShot is currently in - pub current_view: TYPES::View, + pub cur_view: TYPES::View, + /// View HotShot wishes to be in pub next_view: TYPES::View, + /// Epoch HotShot is currently in - pub current_epoch: TYPES::Epoch, + pub cur_epoch: TYPES::Epoch, + /// The underlying network pub network: Arc, + /// Membership for the quorum pub membership: Arc, + /// This Nodes Public Key pub public_key: TYPES::SignatureKey, + /// Our Private Key pub private_key: ::PrivateKey, + /// Our node id; for logging pub id: u64, @@ -94,9 +101,11 @@ pub struct ViewSyncTaskState, V: V pub pre_commit_relay_map: RwLock< RelayMap, ViewSyncPreCommitCertificate2, V>, >, + /// Map of commit vote accumulates for the relay pub commit_relay_map: RwLock, ViewSyncCommitCertificate2, V>>, + /// Map of finalize vote accumulates for the relay pub finalize_relay_map: RwLock< RelayMap, ViewSyncFinalizeCertificate2, V>, @@ -134,31 +143,43 @@ impl, V: Versions> TaskState pub struct ViewSyncReplicaTaskState, V: Versions> { /// Timeout for view sync rounds pub view_sync_timeout: Duration, + /// Current round HotShot is in - pub current_view: TYPES::View, + pub cur_view: TYPES::View, + /// Round HotShot wishes to be in pub next_view: TYPES::View, + /// Current epoch HotShot is in - pub current_epoch: TYPES::Epoch, + pub cur_epoch: TYPES::Epoch, + /// The relay index we are currently on pub relay: u64, + /// Whether we have seen a finalized certificate pub finalized: bool, + /// Whether we have already sent a view change event for `next_view` pub sent_view_change_event: bool, + /// Timeout task handle, when it expires we try the next relay pub timeout_task: Option>, + /// Our node id; for logging pub id: u64, /// The underlying network pub network: Arc, + /// Membership for the quorum pub membership: Arc, + /// This Nodes Public Key pub public_key: TYPES::SignatureKey, + /// Our Private Key pub private_key: ::PrivateKey, + /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, } @@ -184,7 +205,7 @@ impl, V: Versions> TaskState } impl, V: Versions> ViewSyncTaskState { - #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task pub async fn send_to_or_create_replica( @@ -195,7 +216,7 @@ impl, V: Versions> ViewSyncTaskSta ) { // This certificate is old, we can throw it away // If next view = cert round, then that means we should already have a task running for it - if self.current_view > view { + if self.cur_view > view { tracing::debug!("Already in a higher view than the view sync message"); return; } @@ -220,9 +241,9 @@ impl, V: Versions> ViewSyncTaskSta // We do not have a replica task already running, so start one let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { - current_view: view, + cur_view: view, next_view: view, - current_epoch: self.current_epoch, + cur_epoch: self.cur_epoch, relay: 0, finalized: false, sent_view_change_event: false, @@ -248,7 +269,7 @@ impl, V: Versions> ViewSyncTaskSta task_map.insert(view, replica_state); } - #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task pub async fn handle( @@ -304,9 +325,7 @@ impl, V: Versions> ViewSyncTaskSta // We do not have a relay task already running, so start one ensure!( - self.membership - .leader(vote_view + relay, self.current_epoch)? - == self.public_key, + self.membership.leader(vote_view + relay, self.cur_epoch)? == self.public_key, "View sync vote sent to wrong leader" ); @@ -314,7 +333,7 @@ impl, V: Versions> ViewSyncTaskSta public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, - epoch: self.current_epoch, + epoch: self.cur_epoch, id: self.id, }; let vote_collector = @@ -346,9 +365,7 @@ impl, V: Versions> ViewSyncTaskSta // We do not have a relay task already running, so start one ensure!( - self.membership - .leader(vote_view + relay, self.current_epoch)? - == self.public_key, + self.membership.leader(vote_view + relay, self.cur_epoch)? == self.public_key, debug!("View sync vote sent to wrong leader") ); @@ -356,7 +373,7 @@ impl, V: Versions> ViewSyncTaskSta public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, - epoch: self.current_epoch, + epoch: self.cur_epoch, id: self.id, }; @@ -388,9 +405,7 @@ impl, V: Versions> ViewSyncTaskSta // We do not have a relay task already running, so start one ensure!( - self.membership - .leader(vote_view + relay, self.current_epoch)? - == self.public_key, + self.membership.leader(vote_view + relay, self.cur_epoch)? == self.public_key, debug!("View sync vote sent to wrong leader") ); @@ -398,7 +413,7 @@ impl, V: Versions> ViewSyncTaskSta public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, - epoch: self.current_epoch, + epoch: self.cur_epoch, id: self.id, }; let vote_collector = @@ -411,22 +426,22 @@ impl, V: Versions> ViewSyncTaskSta &HotShotEvent::ViewChange(new_view) => { let new_view = TYPES::View::new(*new_view); - if self.current_view < new_view { + if self.cur_view < new_view { tracing::debug!( "Change from view {} to view {} in view sync task", - *self.current_view, + *self.cur_view, *new_view ); - self.current_view = new_view; - self.next_view = self.current_view; + self.cur_view = new_view; + self.next_view = self.cur_view; self.num_timeouts_tracked = 0; // Garbage collect old tasks // We could put this into a separate async task, but that would require making several fields on ViewSyncTaskState thread-safe and harm readability. In the common case this will have zero tasks to clean up. // cancel poll for votes // run GC - for i in *self.last_garbage_collected_view..*self.current_view { + for i in *self.last_garbage_collected_view..*self.cur_view { self.replica_task_map .write() .await @@ -445,18 +460,18 @@ impl, V: Versions> ViewSyncTaskSta .remove_entry(&TYPES::View::new(i)); } - self.last_garbage_collected_view = self.current_view - 1; + self.last_garbage_collected_view = self.cur_view - 1; } } &HotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it ensure!( - view_number > self.current_view, + view_number > self.cur_view, debug!("Discarding old timeout vote.") ); self.num_timeouts_tracked += 1; - let leader = self.membership.leader(view_number, self.current_epoch)?; + let leader = self.membership.leader(view_number, self.cur_epoch)?; tracing::warn!( %leader, leader_mnemonic = cdn_proto::util::mnemonic(&leader), @@ -480,11 +495,9 @@ impl, V: Versions> ViewSyncTaskSta .await; } else { // If this is the first timeout we've seen advance to the next view - self.current_view = view_number; + self.cur_view = view_number; broadcast_event( - Arc::new(HotShotEvent::ViewChange(TYPES::View::new( - *self.current_view, - ))), + Arc::new(HotShotEvent::ViewChange(TYPES::View::new(*self.cur_view))), &event_stream, ) .await; @@ -500,7 +513,7 @@ impl, V: Versions> ViewSyncTaskSta impl, V: Versions> ViewSyncReplicaTaskState { - #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Replica Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task pub async fn handle( &mut self, @@ -520,11 +533,7 @@ impl, V: Versions> // If certificate is not valid, return current state if !certificate - .is_valid_cert( - self.membership.as_ref(), - self.current_epoch, - &self.upgrade_lock, - ) + .is_valid_cert(self.membership.as_ref(), self.cur_epoch, &self.upgrade_lock) .await { tracing::error!("Not valid view sync cert! {:?}", certificate.data()); @@ -606,11 +615,7 @@ impl, V: Versions> // If certificate is not valid, return current state if !certificate - .is_valid_cert( - self.membership.as_ref(), - self.current_epoch, - &self.upgrade_lock, - ) + .is_valid_cert(self.membership.as_ref(), self.cur_epoch, &self.upgrade_lock) .await { tracing::error!("Not valid view sync cert! {:?}", certificate.data()); @@ -702,11 +707,7 @@ impl, V: Versions> // If certificate is not valid, return current state if !certificate - .is_valid_cert( - self.membership.as_ref(), - self.current_epoch, - &self.upgrade_lock, - ) + .is_valid_cert(self.membership.as_ref(), self.cur_epoch, &self.upgrade_lock) .await { tracing::error!("Not valid view sync cert! {:?}", certificate.data()); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index c40091f857..ec872af603 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -827,10 +827,10 @@ impl Consensus { let txns = Arc::clone(consensus.read().await.saved_payloads().get(&view)?); let vid = VidDisperse::calculate_vid_disperse(txns, &membership, view, epoch, None).await; let shares = VidDisperseShare::from_vid_disperse(vid); - let mut consensus = consensus.write().await; + let mut consensus_writer = consensus.write().await; for share in shares { if let Some(prop) = share.to_proposal(private_key) { - consensus.update_vid_shares(view, prop); + consensus_writer.update_vid_shares(view, prop); } } Some(()) From 98a042ba30a17885fabc66feb85e2b39c239e467 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 28 Oct 2024 14:48:56 -0400 Subject: [PATCH 1266/1393] store view sync vote as action (#3798) * store view sync vote as action * don't block view sync votes --- task-impls/src/network.rs | 9 ++++++++- types/src/event.rs | 2 ++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 05ba43a133..f9cbcff865 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -309,11 +309,16 @@ impl< consensus: Arc>>, view: ::View, ) -> std::result::Result<(), ()> { - if let Some(action) = maybe_action { + if let Some(mut action) = maybe_action { if !consensus.write().await.update_action(action, view) { tracing::warn!("Already actioned {:?} in view {:?}", action, view); return Err(()); } + // If the action was view sync record it as a vote, but we don't + // want to limit to 1 View sycn vote above so change the action here. + if matches!(action, HotShotAction::ViewSyncVote) { + action = HotShotAction::Vote; + } match storage.write().await.record_action(view, action).await { Ok(()) => Ok(()), Err(e) => { @@ -460,6 +465,7 @@ impl< )) } HotShotEvent::ViewSyncCommitVoteSend(vote) => { + *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; let leader = match self.quorum_membership.leader(view_number, self.epoch) { Ok(l) => l, @@ -482,6 +488,7 @@ impl< )) } HotShotEvent::ViewSyncFinalizeVoteSend(vote) => { + *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; let leader = match self.quorum_membership.leader(view_number, self.epoch) { Ok(l) => l, diff --git a/types/src/event.rs b/types/src/event.rs index ca833c9f5d..f9d48779e8 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -184,6 +184,8 @@ pub enum EventType { pub enum HotShotAction { /// A quorum vote was sent Vote, + /// View Sync Vote + ViewSyncVote, /// A quorum proposal was sent Propose, /// DA proposal was sent From 373d621bb1f55149e99da26ae63b2058a00fc41a Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Mon, 28 Oct 2024 15:40:15 -0400 Subject: [PATCH 1267/1393] Revert crates/types/src/vid.rs to 01ea0ab9 (#3813) * revert crates/types/src/vid.rs to 01ea0ab9 * clippy pacification * more clippy * just fmt_check --- example-types/src/block_types.rs | 6 +- task-impls/src/da.rs | 2 +- task-impls/src/quorum_proposal/handlers.rs | 2 +- task-impls/src/quorum_vote/mod.rs | 4 +- task-impls/src/transactions.rs | 39 +++---- task-impls/src/vid.rs | 2 +- testing/tests/tests_1/da_task.rs | 36 ++----- testing/tests/tests_1/vid_task.rs | 2 +- types/Cargo.toml | 1 - types/src/data.rs | 4 +- types/src/utils.rs | 4 +- types/src/vid.rs | 115 ++++++--------------- 12 files changed, 73 insertions(+), 144 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 05ff656ecf..dc5d5f2045 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -370,7 +370,7 @@ impl< } fn payload_commitment(&self) -> VidCommitment { - self.payload_commitment.clone() + self.payload_commitment } fn metadata(&self) -> &>::Metadata { @@ -395,7 +395,9 @@ impl Committable for TestBlockHeader { ) .constant_str("payload commitment") .fixed_size_bytes( - >::payload_commitment(self).as_ref(), + >::payload_commitment(self) + .as_ref() + .as_ref(), ) .finalize() } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index c9fab1c7e3..bada6f4ddf 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -200,7 +200,7 @@ impl, V: Versions> DaTaskState HandleDepOutput for ProposalDependencyHandle< auction_result, ) => { commit_and_metadata = Some(CommitmentAndMetadata { - commitment: payload_commitment.clone(), + commitment: *payload_commitment, builder_commitment: builder_commitment.clone(), metadata: metadata.clone(), fees: fees.clone(), diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index a25b6d2213..f78bf15b59 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -310,7 +310,7 @@ impl + 'static, V: Versions> Handl return; } } else { - payload_commitment = Some(cert_payload_comm.clone()); + payload_commitment = Some(*cert_payload_comm); } } HotShotEvent::VidShareValidated(share) => { @@ -322,7 +322,7 @@ impl + 'static, V: Versions> Handl return; } } else { - payload_commitment = Some(vid_payload_commitment.clone()); + payload_commitment = Some(*vid_payload_commitment); } } _ => {} diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 0e82920930..69ea07f1a5 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -297,9 +297,7 @@ impl, V: Versions> TransactionTask self.builder_timeout.saturating_sub(start.elapsed()), async { let client = BuilderClientMarketplace::new(url); - client - .bundle(*parent_view, parent_hash.clone(), *block_view) - .await + client.bundle(*parent_view, parent_hash, *block_view).await }, )); } @@ -539,7 +537,7 @@ impl, V: Versions> TransactionTask match &view_data.view_inner { ViewInner::Da { payload_commitment } => { - return Ok((target_view, payload_commitment.clone())) + return Ok((target_view, *payload_commitment)) } ViewInner::Leaf { leaf: leaf_commitment, @@ -590,7 +588,7 @@ impl, V: Versions> TransactionTask match async_timeout( self.builder_timeout .saturating_sub(task_start_time.elapsed()), - self.block_from_builder(parent_comm.clone(), parent_view, &parent_comm_sig), + self.block_from_builder(parent_comm, parent_view, &parent_comm_sig), ) .await { @@ -631,23 +629,20 @@ impl, V: Versions> TransactionTask .builder_clients .iter() .enumerate() - .map(|(builder_idx, client)| { - let parent_comm = parent_comm.clone(); - async move { - client - .available_blocks( - parent_comm, - view_number.u64(), - self.public_key.clone(), - parent_comm_sig, - ) - .await - .map(move |blocks| { - blocks - .into_iter() - .map(move |block_info| (block_info, builder_idx)) - }) - } + .map(|(builder_idx, client)| async move { + client + .available_blocks( + parent_comm, + view_number.u64(), + self.public_key.clone(), + parent_comm_sig, + ) + .await + .map(move |blocks| { + blocks + .into_iter() + .map(move |block_info| (block_info, builder_idx)) + }) }) .collect::>(); let mut results = Vec::with_capacity(self.builder_clients.len()); diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 500170cc9c..68afabf6d0 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -87,7 +87,7 @@ impl> VidTaskState { vid_precompute.clone(), ) .await; - let payload_commitment = vid_disperse.payload_commitment.clone(); + let payload_commitment = vid_disperse.payload_commitment; let shares = VidDisperseShare::from_vid_disperse(vid_disperse.clone()); let mut consensus_writer = self.consensus.write().await; for share in shares { diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 28a35bbe0b..ad417a0431 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -69,13 +69,8 @@ async fn test_da_task() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote( - DaData { - payload_commit: payload_commit.clone(), - }, - &handle, - ) - .await, + view.create_da_vote(DaData { payload_commit }, &handle) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -87,13 +82,8 @@ async fn test_da_task() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote( - DaData { - payload_commit: payload_commit.clone(), - }, - &handle, - ) - .await, + view.create_da_vote(DaData { payload_commit }, &handle) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -180,13 +170,8 @@ async fn test_da_task_storage_failure() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote( - DaData { - payload_commit: payload_commit.clone(), - }, - &handle, - ) - .await, + view.create_da_vote(DaData { payload_commit }, &handle) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -198,13 +183,8 @@ async fn test_da_task_storage_failure() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote( - DaData { - payload_commit: payload_commit.clone(), - }, - &handle, - ) - .await, + view.create_da_vote(DaData { payload_commit }, &handle) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index b5ef8335f7..c2ca9aec09 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -69,7 +69,7 @@ async fn test_vid_task() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let (_, vid_precompute) = vid.commit_only_precompute(&encoded_transactions).unwrap(); - let payload_commitment = vid_disperse.commit.clone(); + let payload_commitment = vid_disperse.commit; let signature = ::SignatureKey::sign( handle.private_key(), diff --git a/types/Cargo.toml b/types/Cargo.toml index 25d47d7d9e..e9ad6368b5 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -38,7 +38,6 @@ time = { workspace = true } tracing = { workspace = true } typenum = { workspace = true } derivative = "2" -derive_more = { workspace = true, features = ["display"] } # TODO promote display feature to workspace? jf-vid = { workspace = true } jf-pcs = { workspace = true } jf-signature = { workspace = true, features = ["schnorr"] } diff --git a/types/src/data.rs b/types/src/data.rs index 5528a1835c..db891c8a0b 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -282,7 +282,7 @@ impl VidDisperseShare { recipient_key, view_number: vid_disperse.view_number, common: vid_disperse.common.clone(), - payload_commitment: vid_disperse.payload_commitment.clone(), + payload_commitment: vid_disperse.payload_commitment, }) .collect() } @@ -345,7 +345,7 @@ impl VidDisperseShare { recipient_key, view_number: vid_disperse_proposal.data.view_number, common: vid_disperse_proposal.data.common.clone(), - payload_commitment: vid_disperse_proposal.data.payload_commitment.clone(), + payload_commitment: vid_disperse_proposal.data.payload_commitment, }, signature: vid_disperse_proposal.signature.clone(), _pd: vid_disperse_proposal._pd, diff --git a/types/src/utils.rs b/types/src/utils.rs index 699d1a3830..e3d19a8286 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -58,7 +58,7 @@ impl Clone for ViewInner { fn clone(&self) -> Self { match self { Self::Da { payload_commitment } => Self::Da { - payload_commitment: payload_commitment.clone(), + payload_commitment: *payload_commitment, }, Self::Leaf { leaf, state, delta } => Self::Leaf { leaf: *leaf, @@ -123,7 +123,7 @@ impl ViewInner { #[must_use] pub fn payload_commitment(&self) -> Option { if let Self::Da { payload_commitment } = self { - Some(payload_commitment.clone()) + Some(*payload_commitment) } else { None } diff --git a/types/src/vid.rs b/types/src/vid.rs index 8e936f7416..7d3bfb21a7 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -4,20 +4,20 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -//! This module provides an opaque constructor [`vid_scheme`] that returns a new -//! instance of a VID scheme. +//! This module provides: +//! - an opaque constructor [`vid_scheme`] that returns a new instance of a +//! VID scheme. +//! - type aliases [`VidCommitment`], [`VidCommon`], [`VidShare`] +//! for [`VidScheme`] assoc types. //! -//! Purpose: the specific choice of VID scheme is an implementation detail. We -//! want all communication with the VID scheme to occur only via the API exposed -//! in the [`VidScheme`] trait, but limitations of Rust make it difficult to -//! achieve this level of abstraction. Hence, there's a lot of boilerplate code -//! in this module. +//! Purpose: the specific choice of VID scheme is an implementation detail. +//! This crate and all downstream crates should talk to the VID scheme only +//! via the traits exposed here. #![allow(missing_docs)] use std::{fmt::Debug, ops::Range}; use ark_bn254::Bn254; -use derive_more::Display; use jf_pcs::{ prelude::{UnivariateKzgPCS, UnivariateUniversalParams}, PolynomialCommitmentScheme, @@ -34,7 +34,6 @@ use jf_vid::{ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use sha2::Sha256; -use tagged_base64::TaggedBase64; use crate::{ constants::SRS_DEGREE, @@ -104,6 +103,14 @@ pub fn vid_scheme_for_test(num_storage_nodes: usize) -> VidSchemeType { ) } +/// VID commitment type +pub type VidCommitment = ::Commit; +/// VID common type +pub type VidCommon = ::Common; +/// VID share type +pub type VidShare = ::Share; +/// VID PrecomputeData type +pub type VidPrecomputeData = ::PrecomputeData; /// VID proposal type pub type VidProposal = ( Proposal>, @@ -122,22 +129,6 @@ type Advz = advz::AdvzGPU<'static, E, H>; #[derive(Clone)] pub struct VidSchemeType(Advz); -/// Newtype wrapper for assoc type [`VidScheme::Commit`]. -#[derive(Clone, Debug, Deserialize, Display, Eq, PartialEq, Hash, Serialize)] -pub struct VidCommitment(::Commit); - -/// Newtype wrapper for assoc type [`VidScheme::Common`]. -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)] -pub struct VidCommon(::Common); - -/// Newtype wrapper for assoc type [`VidScheme::Share`]. -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)] -pub struct VidShare(::Share); - -/// Newtype wrapper for assoc type [`Precomputable::PrecomputeData`]. -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)] -pub struct VidPrecomputeData(::PrecomputeData); - /// Newtype wrapper for a large payload range proof. /// /// Useful for namespace proofs. @@ -207,15 +198,15 @@ type H = Sha256; // type alias impl trait (TAIT): // [rfcs/text/2515-type_alias_impl_trait.md at master · rust-lang/rfcs](https://github.com/rust-lang/rfcs/blob/master/text/2515-type_alias_impl_trait.md) impl VidScheme for VidSchemeType { - type Commit = VidCommitment; - type Share = VidShare; - type Common = VidCommon; + type Commit = ::Commit; + type Share = ::Share; + type Common = ::Common; fn commit_only(&mut self, payload: B) -> VidResult where B: AsRef<[u8]>, { - self.0.commit_only(payload).map(VidCommitment) + self.0.commit_only(payload) } fn disperse(&mut self, payload: B) -> VidResult> @@ -231,32 +222,27 @@ impl VidScheme for VidSchemeType { common: &Self::Common, commit: &Self::Commit, ) -> VidResult> { - self.0.verify_share(&share.0, &common.0, &commit.0) + self.0.verify_share(share, common, commit) } fn recover_payload(&self, shares: &[Self::Share], common: &Self::Common) -> VidResult> { - // TODO costly Vec copy. Is the compiler smart enough to optimize away - // this Vec, or shall we use unsafe code to cast `shares`? - // It's only `recover_payload` so who cares? - let shares: Vec<_> = shares.iter().map(|s| s.0.clone()).collect(); - - self.0.recover_payload(&shares, &common.0) + self.0.recover_payload(shares, common) } fn is_consistent(commit: &Self::Commit, common: &Self::Common) -> VidResult<()> { - ::is_consistent(&commit.0, &common.0) + ::is_consistent(commit, common) } fn get_payload_byte_len(common: &Self::Common) -> u32 { - ::get_payload_byte_len(&common.0) + ::get_payload_byte_len(common) } fn get_num_storage_nodes(common: &Self::Common) -> u32 { - ::get_num_storage_nodes(&common.0) + ::get_num_storage_nodes(common) } fn get_multiplicity(common: &Self::Common) -> u32 { - ::get_multiplicity(&common.0) + ::get_multiplicity(common) } /// Helper function for testing only @@ -266,33 +252,6 @@ impl VidScheme for VidSchemeType { } } -impl From for TaggedBase64 { - fn from(value: VidCommitment) -> Self { - TaggedBase64::from(value.0) - } -} - -impl<'a> TryFrom<&'a TaggedBase64> for VidCommitment { - type Error = <::Commit as TryFrom<&'a TaggedBase64>>::Error; - - fn try_from(value: &'a TaggedBase64) -> Result { - ::Commit::try_from(value).map(Self) - } -} - -// TODO add AsRef trait bound upstream. -// -// We impl `AsRef<[u8; _]>` instead of `AsRef<[u8]>` to maintain backward -// compatibility for downstream code that re-hashes the `VidCommitment`. -// Specifically, if we support only `AsRef<[u8]>` then code that uses -// `Committable` must switch `fixed_size_bytes` to `var_size_bytes`, which -// prepends length data into the hash, thus changing the hash. -impl AsRef<[u8; 32]> for VidCommitment { - fn as_ref(&self) -> &[u8; 32] { - self.0.as_ref().as_ref() - } -} - impl PayloadProver for VidSchemeType { fn payload_proof(&self, payload: B, range: Range) -> VidResult where @@ -332,7 +291,7 @@ impl PayloadProver for VidSchemeType { } impl Precomputable for VidSchemeType { - type PrecomputeData = VidPrecomputeData; + type PrecomputeData = ::PrecomputeData; fn commit_only_precompute( &self, @@ -341,9 +300,7 @@ impl Precomputable for VidSchemeType { where B: AsRef<[u8]>, { - self.0 - .commit_only_precompute(payload) - .map(|r| (VidCommitment(r.0), VidPrecomputeData(r.1))) + self.0.commit_only_precompute(payload) } fn disperse_precompute( @@ -355,7 +312,7 @@ impl Precomputable for VidSchemeType { B: AsRef<[u8]>, { self.0 - .disperse_precompute(payload, &data.0) + .disperse_precompute(payload, data) .map(vid_disperse_conversion) } } @@ -368,14 +325,10 @@ impl Precomputable for VidSchemeType { /// and similarly for `Statement`. /// Thus, we accomplish type conversion via functions. fn vid_disperse_conversion(vid_disperse: VidDisperse) -> VidDisperse { - // TODO costly Vec copy. Is the compiler smart enough to optimize away - // this Vec, or shall we use unsafe code to cast `shares`? - let shares = vid_disperse.shares.into_iter().map(VidShare).collect(); - VidDisperse { - shares, - common: VidCommon(vid_disperse.common), - commit: VidCommitment(vid_disperse.commit), + shares: vid_disperse.shares, + common: vid_disperse.common, + commit: vid_disperse.commit, } } @@ -384,7 +337,7 @@ fn stmt_conversion(stmt: Statement<'_, VidSchemeType>) -> Statement<'_, Advz> { Statement { payload_subslice: stmt.payload_subslice, range: stmt.range, - commit: &stmt.commit.0, - common: &stmt.common.0, + commit: stmt.commit, + common: stmt.common, } } From 6dd722252813be2128360c86a1a943dade2ab4fb Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 28 Oct 2024 16:00:01 -0400 Subject: [PATCH 1268/1393] proposal dep takes inactive receiver and cancel tasks (#3784) --- hotshot/src/tasks/task_state.rs | 2 +- task-impls/src/helpers.rs | 6 ++--- task-impls/src/quorum_proposal/handlers.rs | 11 ++++----- task-impls/src/quorum_proposal/mod.rs | 26 +++++++++++++++------- task-impls/src/quorum_proposal_recv/mod.rs | 1 - 5 files changed, 28 insertions(+), 18 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 18ef2c91d2..d24cd2e646 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -254,7 +254,7 @@ impl, V: Versions> CreateTaskState Self { latest_proposed_view: handle.cur_view().await, - proposal_dependencies: HashMap::new(), + proposal_dependencies: BTreeMap::new(), network: Arc::clone(&handle.hotshot.network), output_event_stream: handle.hotshot.external_event_stream.0.clone(), consensus: OuterConsensus::new(consensus), diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 07b2a3604c..745340ca1f 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -10,7 +10,7 @@ use std::{ sync::Arc, }; -use async_broadcast::{Receiver, SendError, Sender}; +use async_broadcast::{InactiveReceiver, Receiver, SendError, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -363,7 +363,7 @@ pub async fn decide_from_proposal( pub(crate) async fn parent_leaf_and_state( next_proposal_view_number: TYPES::View, event_sender: &Sender>>, - event_receiver: &Receiver>>, + event_receiver: &InactiveReceiver>>, quorum_membership: Arc, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -391,7 +391,7 @@ pub(crate) async fn parent_leaf_and_state( let _ = fetch_proposal( parent_view_number, event_sender.clone(), - event_receiver.clone(), + event_receiver.activate_cloned(), quorum_membership, consensus.clone(), public_key.clone(), diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 6ee2c2579e..4994a228d6 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -9,7 +9,7 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; -use async_broadcast::{Receiver, Sender}; +use async_broadcast::{InactiveReceiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use hotshot_task::{ @@ -69,7 +69,7 @@ pub struct ProposalDependencyHandle { pub sender: Sender>>, /// The event receiver. - pub receiver: Receiver>>, + pub receiver: InactiveReceiver>>, /// Immutable instance state pub instance_state: Arc, @@ -252,6 +252,7 @@ impl HandleDepOutput for ProposalDependencyHandle< #[allow(clippy::no_effect_underscore_binding, clippy::too_many_lines)] async fn handle_dep_result(self, res: Self::Output) { let high_qc_view_number = self.consensus.read().await.high_qc().view_number; + let event_receiver = self.receiver.activate_cloned(); if !self .consensus .read() @@ -262,16 +263,16 @@ impl HandleDepOutput for ProposalDependencyHandle< // The proposal for the high qc view is missing, try to get it asynchronously let membership = Arc::clone(&self.quorum_membership); let event_sender = self.sender.clone(); - let event_receiver = self.receiver.clone(); let sender_public_key = self.public_key.clone(); let sender_private_key = self.private_key.clone(); let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); let upgrade_lock = self.upgrade_lock.clone(); + let rx = event_receiver.clone(); async_spawn(async move { fetch_proposal( high_qc_view_number, event_sender, - event_receiver, + rx, membership, consensus, sender_public_key, @@ -282,7 +283,7 @@ impl HandleDepOutput for ProposalDependencyHandle< }); // Block on receiving the event from the event stream. EventDependency::new( - self.receiver.clone(), + event_receiver, Box::new(move |event| { let event = event.as_ref(); if let HotShotEvent::ValidatedStateUpdated(view_number, _) = event { diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 022b608698..65d905b47b 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashMap, sync::Arc}; +use std::{collections::BTreeMap, sync::Arc}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; @@ -12,6 +12,7 @@ use async_lock::RwLock; use async_std::task::JoinHandle; use async_trait::async_trait; use either::Either; +use futures::future::join_all; use hotshot_task::{ dependency::{AndDependency, EventDependency, OrDependency}, dependency_task::DependencyTask, @@ -49,7 +50,7 @@ pub struct QuorumProposalTaskState pub latest_proposed_view: TYPES::View, /// Table for the in-progress proposal dependency tasks. - pub proposal_dependencies: HashMap>, + pub proposal_dependencies: BTreeMap>, /// The underlying network pub network: Arc, @@ -321,7 +322,7 @@ impl, V: Versions> latest_proposed_view: self.latest_proposed_view, view_number, sender: event_sender, - receiver: event_receiver, + receiver: event_receiver.deactivate(), quorum_membership: Arc::clone(&self.quorum_membership), public_key: self.public_key.clone(), private_key: self.private_key.clone(), @@ -533,10 +534,23 @@ impl, V: Versions> Arc::clone(&event), )?; } + HotShotEvent::ViewChange(view) | HotShotEvent::Timeout(view) => { + self.cancel_tasks(*view).await; + } _ => {} } Ok(()) } + /// Cancel all tasks the consensus tasks has spawned before the given view + pub async fn cancel_tasks(&mut self, view: TYPES::View) { + let keep = self.proposal_dependencies.split_off(&view); + let mut cancel = Vec::new(); + while let Some((_, task)) = self.proposal_dependencies.pop_first() { + cancel.push(cancel_task(task)); + } + self.proposal_dependencies = keep; + join_all(cancel).await; + } } #[async_trait] @@ -555,11 +569,7 @@ impl, V: Versions> TaskState } async fn cancel_subtasks(&mut self) { - for handle in self - .proposal_dependencies - .drain() - .map(|(_view, handle)| handle) - { + while let Some((_, handle)) = self.proposal_dependencies.pop_first() { #[cfg(async_executor_impl = "async-std")] handle.cancel().await; #[cfg(async_executor_impl = "tokio")] diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 7c3ab2ed24..c1f10a038e 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -169,7 +169,6 @@ impl, V: Versions> TaskState let Some((_, handles)) = self.spawned_tasks.pop_first() else { break; }; - for handle in handles { #[cfg(async_executor_impl = "async-std")] handle.cancel().await; From c911d72d4c738c82656a4d8024209b2829d07ffa Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Mon, 28 Oct 2024 13:15:09 -0700 Subject: [PATCH 1269/1393] Add a trait for calculation of exact encoded transaction (#3800) * add trait * adding trait BuilderTransaction * a typo * remove duplicate trait * propagate overhead as a parameter * remove additional_overhead * move trait to builder-api * merge traits * fmt * add a param * remove param * add Copy to VidCommitment --- example-types/Cargo.toml | 1 + example-types/src/block_types.rs | 7 ++++++- types/src/traits/block_contents.rs | 5 +++++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index 1c38092bc1..65dd60d288 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -23,6 +23,7 @@ futures = { workspace = true } hotshot = { path = "../hotshot" } hotshot-types = { path = "../types" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } +hotshot-builder-api = { path = "../builder-api" } rand = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true } diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index dc5d5f2045..7241ac49b1 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -122,7 +122,12 @@ impl Committable for TestTransaction { } } -impl Transaction for TestTransaction {} +impl Transaction for TestTransaction { + fn minimum_block_size(&self) -> u64 { + // the estimation on transaction size is the length of the transaction + self.0.len() as u64 + } +} /// A [`BlockPayload`] that contains a list of `TestTransaction`. #[derive(PartialEq, Eq, Hash, Serialize, Deserialize, Clone, Debug)] diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index c8ba1c3883..de9968488c 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -41,6 +41,11 @@ pub trait EncodeBytes { pub trait Transaction: Clone + Serialize + DeserializeOwned + Debug + PartialEq + Eq + Sync + Send + Committable + Hash { + /// The function to estimate the transaction size + /// It takes in the transaction itself and a boolean indicating if the transaction adds a new namespace + /// Since each new namespace adds overhead + /// just ignore this parameter by default and use it when needed + fn minimum_block_size(&self) -> u64; } /// Abstraction over the full contents of a block From 36cdf0de6edaf06a080207195669687ab425cbe5 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 28 Oct 2024 17:15:20 -0400 Subject: [PATCH 1270/1393] Remove slow operations from critical path (#3788) * move storage to end of proposal processing * don't fetch proposal so until we really need it * whoops wrong fetch * fix test * spawn fetch proposal * revert test * fixes after merge * return no proposal if the dep errors * rename to reader --- task-impls/src/events.rs | 18 ++++---- task-impls/src/helpers.rs | 17 ++----- .../src/quorum_proposal_recv/handlers.rs | 46 +++++++++++++++---- task-impls/src/quorum_vote/mod.rs | 21 ++++++--- task-impls/src/request.rs | 2 +- testing/src/byzantine/byzantine_behaviour.rs | 2 +- .../tests_1/quorum_proposal_recv_task.rs | 2 +- testing/tests/tests_1/quorum_vote_task.rs | 8 ++-- .../tests/tests_1/upgrade_task_with_vote.rs | 10 ++-- .../tests/tests_1/vote_dependency_handle.rs | 2 +- 10 files changed, 78 insertions(+), 50 deletions(-) diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 8fc85a6031..4aea9b5ec8 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -100,7 +100,7 @@ pub enum HotShotEvent { /// 2. The proposal has been correctly signed by the leader of the current view /// 3. The justify QC is valid /// 4. The proposal passes either liveness or safety check. - QuorumProposalValidated(QuorumProposal, Leaf), + QuorumProposalValidated(Proposal>, Leaf), /// A quorum proposal is missing for a view that we need. QuorumProposalRequestSend( ProposalRequestPayload, @@ -267,9 +267,14 @@ impl HotShotEvent { Some(v.view_number()) } HotShotEvent::QuorumProposalRecv(proposal, _) - | HotShotEvent::QuorumProposalSend(proposal, _) => Some(proposal.data.view_number()), + | HotShotEvent::QuorumProposalSend(proposal, _) + | HotShotEvent::QuorumProposalValidated(proposal, _) + | HotShotEvent::QuorumProposalResponseSend(_, proposal) + | HotShotEvent::QuorumProposalResponseRecv(proposal) + | HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { + Some(proposal.data.view_number()) + } HotShotEvent::QuorumVoteSend(vote) => Some(vote.view_number()), - HotShotEvent::QuorumProposalValidated(proposal, _) => Some(proposal.view_number()), HotShotEvent::DaProposalRecv(proposal, _) | HotShotEvent::DaProposalValidated(proposal, _) | HotShotEvent::DaProposalSend(proposal, _) => Some(proposal.data.view_number()), @@ -311,11 +316,6 @@ impl HotShotEvent { } HotShotEvent::QuorumProposalRequestSend(req, _) | HotShotEvent::QuorumProposalRequestRecv(req, _) => Some(req.view_number), - HotShotEvent::QuorumProposalResponseSend(_, proposal) - | HotShotEvent::QuorumProposalResponseRecv(proposal) - | HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { - Some(proposal.data.view_number()) - } HotShotEvent::QuorumVoteDependenciesValidated(view_number) | HotShotEvent::ViewChange(view_number) | HotShotEvent::ViewSyncTimeout(view_number, _, _) @@ -398,7 +398,7 @@ impl Display for HotShotEvent { HotShotEvent::QuorumProposalValidated(proposal, _) => write!( f, "QuorumProposalValidated(view_number={:?})", - proposal.view_number() + proposal.data.view_number() ), HotShotEvent::DaProposalSend(proposal, _) => write!( f, diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 745340ca1f..6156d24131 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -30,7 +30,6 @@ use hotshot_types::{ election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, - storage::Storage, BlockPayload, ValidatedState, }, utils::{Terminator, View, ViewInner}, @@ -119,6 +118,9 @@ pub(crate) async fn fetch_proposal( } } + } else { + // If the dep returns early return none + return None; } } @@ -546,17 +548,6 @@ pub async fn validate_proposal_safety_and_liveness< }); } - // Update our persistent storage of the proposal. If we cannot store the proposal reutrn - // and error so we don't vote - task_state - .storage - .write() - .await - .append_proposal(&proposal) - .await - .wrap() - .context(error!("Failed to append proposal in storage!"))?; - // We accept the proposal, notify the application layer broadcast_event( Event { @@ -573,7 +564,7 @@ pub async fn validate_proposal_safety_and_liveness< // Notify other tasks broadcast_event( Arc::new(HotShotEvent::QuorumProposalValidated( - proposal.data.clone(), + proposal.clone(), parent_leaf, )), &event_stream, diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index a1800a8178..488854e95d 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -9,6 +9,7 @@ use std::sync::Arc; use async_broadcast::{broadcast, Receiver, Sender}; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLockUpgradableReadGuard; use committable::Committable; use hotshot_types::{ @@ -19,6 +20,7 @@ use hotshot_types::{ traits::{ election::Membership, node_implementation::{NodeImplementation, NodeType}, + signature_key::SignatureKey, storage::Storage, ValidatedState, }, @@ -104,6 +106,35 @@ async fn validate_proposal_liveness( + view: TYPES::View, + event_sender: Sender>>, + event_receiver: Receiver>>, + membership: Arc, + consensus: OuterConsensus, + sender_public_key: TYPES::SignatureKey, + sender_private_key: ::PrivateKey, + upgrade_lock: UpgradeLock, +) { + async_spawn(async move { + let lock = upgrade_lock; + + let _ = fetch_proposal( + view, + event_sender, + event_receiver, + membership, + consensus, + sender_public_key, + sender_private_key, + &lock, + ) + .await; + }); +} + /// Handles the `QuorumProposalRecv` event by first validating the cert itself for the view, and then /// updating the states, which runs when the proposal cannot be found in the internal state map. /// @@ -155,7 +186,7 @@ pub(crate) async fn handle_quorum_proposal_recv< .await; // Get the parent leaf and state. - let mut parent_leaf = task_state + let parent_leaf = task_state .consensus .read() .await @@ -163,9 +194,8 @@ pub(crate) async fn handle_quorum_proposal_recv< .get(&justify_qc.data.leaf_commit) .cloned(); - parent_leaf = match parent_leaf { - Some(p) => Some(p), - None => fetch_proposal( + if parent_leaf.is_none() { + spawn_fetch_proposal( justify_qc.view_number(), event_sender.clone(), event_receiver.clone(), @@ -176,11 +206,9 @@ pub(crate) async fn handle_quorum_proposal_recv< // incorrectly. task_state.public_key.clone(), task_state.private_key.clone(), - &task_state.upgrade_lock, - ) - .await - .ok(), - }; + task_state.upgrade_lock.clone(), + ); + } let consensus_reader = task_state.consensus.read().await; let parent = match parent_leaf { diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index f78bf15b59..3f78573d50 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -285,7 +285,7 @@ impl + 'static, V: Versions> Handl match event.as_ref() { #[allow(unused_assignments)] HotShotEvent::QuorumProposalValidated(proposal, parent_leaf) => { - let proposal_payload_comm = proposal.block_header.payload_commitment(); + let proposal_payload_comm = proposal.data.block_header.payload_commitment(); if let Some(ref comm) = payload_commitment { if proposal_payload_comm != *comm { tracing::error!("Quorum proposal has inconsistent payload commitment with DAC or VID."); @@ -295,11 +295,17 @@ impl + 'static, V: Versions> Handl payload_commitment = Some(proposal_payload_comm); } let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; - let proposed_leaf = Leaf::from_quorum_proposal(proposal); + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); if proposed_leaf.parent_commitment() != parent_commitment { tracing::warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); return; } + // Update our persistent storage of the proposal. If we cannot store the proposal reutrn + // and error so we don't vote + if let Err(e) = self.storage.write().await.append_proposal(proposal).await { + tracing::error!("failed to store proposal, not voting. error = {e:#}"); + return; + } leaf = Some(proposed_leaf); } HotShotEvent::DaCertificateValidated(cert) => { @@ -424,7 +430,7 @@ impl, V: Versions> QuorumVoteTaskS let event_view = match dependency_type { VoteDependency::QuorumProposal => { if let HotShotEvent::QuorumProposalValidated(proposal, _) = event { - proposal.view_number + proposal.data.view_number } else { return false; } @@ -549,11 +555,14 @@ impl, V: Versions> QuorumVoteTaskS match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _leaf) => { let cur_epoch = self.consensus.read().await.cur_epoch(); - tracing::trace!("Received Proposal for view {}", *proposal.view_number()); + tracing::trace!( + "Received Proposal for view {}", + *proposal.data.view_number() + ); // Handle the event before creating the dependency task. if let Err(e) = - handle_quorum_proposal_validated(proposal, &event_sender, self).await + handle_quorum_proposal_validated(&proposal.data, &event_sender, self).await { tracing::debug!( "Failed to handle QuorumProposalValidated event; error = {e:#}" @@ -561,7 +570,7 @@ impl, V: Versions> QuorumVoteTaskS } self.create_dependency_task_if_new( - proposal.view_number, + proposal.data.view_number, cur_epoch, event_receiver, &event_sender, diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 1344c31ee6..6964e38cd7 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -96,7 +96,7 @@ impl> TaskState for NetworkRequest ) -> Result<()> { match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _) => { - let prop_view = proposal.view_number(); + let prop_view = proposal.data.view_number(); let cur_epoch = self.consensus.read().await.cur_epoch(); // If we already have the VID shares for the next view, do nothing. diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index 825108e48b..e92e33cdd4 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -186,7 +186,7 @@ impl + std::fmt::Debug, V: Version ]; } HotShotEvent::QuorumProposalValidated(proposal, _) => { - self.validated_proposals.push(proposal.clone()); + self.validated_proposals.push(proposal.data.clone()); } _ => {} } diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 55ef973f9d..d5a83a1953 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -111,7 +111,7 @@ async fn test_quorum_proposal_recv_task() { .await, )), exact(QuorumProposalValidated( - proposals[1].data.clone(), + proposals[1].clone(), leaves[0].clone(), )), exact(ViewChange(ViewNumber::new(2))), diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index e981ed9bda..3030a1aea2 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -75,7 +75,7 @@ async fn test_quorum_vote_task_success() { // Send the quorum proposal, DAC, VID share data, and validated state, in which case a dummy // vote can be formed and the view number will be updated. let inputs = vec![random![ - QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + QuorumProposalValidated(proposals[1].clone(), leaves[0].clone()), DaCertificateRecv(dacs[1].clone()), VidShareRecv(leaders[1], vids[1].0[0].clone()), ]]; @@ -150,11 +150,11 @@ async fn test_quorum_vote_task_miss_dependency() { // Send two of quorum proposal, DAC, VID share data, in which case there's no vote. let inputs = vec![ random![ - QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + QuorumProposalValidated(proposals[1].clone(), leaves[0].clone()), VidShareRecv(leaders[1], vid_share(&vids[1].0, handle.public_key())), ], random![ - QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), + QuorumProposalValidated(proposals[2].clone(), leaves[1].clone()), DaCertificateRecv(dacs[2].clone()), ], random![ @@ -223,7 +223,7 @@ async fn test_quorum_vote_task_incorrect_dependency() { // Send the correct quorum proposal and DAC, and incorrect VID share data. let inputs = vec![random![ - QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + QuorumProposalValidated(proposals[1].clone(), leaves[0].clone()), DaCertificateRecv(dacs[1].clone()), VidShareRecv(leaders[0], vids[0].0[0].clone()), ]]; diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 88112683a5..212ca114bb 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -108,27 +108,27 @@ async fn test_upgrade_task_with_vote() { let inputs = vec![ random![ - QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + QuorumProposalValidated(proposals[1].clone(), leaves[0].clone()), DaCertificateRecv(dacs[1].clone()), VidShareRecv(leaders[1], vids[1].0[0].clone()), ], random![ - QuorumProposalValidated(proposals[2].data.clone(), leaves[1].clone()), + QuorumProposalValidated(proposals[2].clone(), leaves[1].clone()), DaCertificateRecv(dacs[2].clone()), VidShareRecv(leaders[2], vids[2].0[0].clone()), ], random![ - QuorumProposalValidated(proposals[3].data.clone(), leaves[2].clone()), + QuorumProposalValidated(proposals[3].clone(), leaves[2].clone()), DaCertificateRecv(dacs[3].clone()), VidShareRecv(leaders[3], vids[3].0[0].clone()), ], random![ - QuorumProposalValidated(proposals[4].data.clone(), leaves[3].clone()), + QuorumProposalValidated(proposals[4].clone(), leaves[3].clone()), DaCertificateRecv(dacs[4].clone()), VidShareRecv(leaders[4], vids[4].0[0].clone()), ], random![QuorumProposalValidated( - proposals[5].data.clone(), + proposals[5].clone(), leaves[5].clone() ),], ]; diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index a65d360428..085e37862a 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -69,7 +69,7 @@ async fn test_vote_dependency_handle() { // the dependency handles do not (yet) work with the existing test suite. let all_inputs = vec![ DaCertificateValidated(dacs[1].clone()), - QuorumProposalValidated(proposals[1].data.clone(), leaves[0].clone()), + QuorumProposalValidated(proposals[1].clone(), leaves[0].clone()), VidShareValidated(vids[1].0[0].clone()), ] .into_iter() From 6a3faf2c75830b7b9adf2a64b92a31d691933722 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:03:49 -0600 Subject: [PATCH 1271/1393] Remove consensus2 again, as consensus2 has been renamed to just consensus (#3817) --- task-impls/src/consensus2/handlers.rs | 280 -------------------------- 1 file changed, 280 deletions(-) delete mode 100644 task-impls/src/consensus2/handlers.rs diff --git a/task-impls/src/consensus2/handlers.rs b/task-impls/src/consensus2/handlers.rs deleted file mode 100644 index ec87f1b159..0000000000 --- a/task-impls/src/consensus2/handlers.rs +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -use std::{sync::Arc, time::Duration}; - -use utils::anytrace::{ensure, Context, Result}; -use async_broadcast::Sender; -use async_compatibility_layer::art::{async_sleep, async_spawn}; -use chrono::Utc; -use hotshot_types::{ - event::{Event, EventType}, - simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, - traits::{ - election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, - }, - vote::HasViewNumber, -}; -use tracing::{debug, error, instrument}; - -use super::Consensus2TaskState; -use crate::{ - consensus2::Versions, - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, - vote_collection::handle_vote, -}; - -/// Handle a `QuorumVoteRecv` event. -pub(crate) async fn handle_quorum_vote_recv< - TYPES: NodeType, - I: NodeImplementation, - V: Versions, ->( - vote: &QuorumVote, - event: Arc>, - sender: &Sender>>, - task_state: &mut Consensus2TaskState, -) -> Result<()> { - // Are we the leader for this view? - ensure!( - task_state - .quorum_membership - .leader(vote.view_number() + 1)? - == task_state.public_key, - format!( - "We are not the leader for view {:?}", - vote.view_number() + 1 - ) - ); - - handle_vote( - &mut task_state.vote_collectors, - vote, - task_state.public_key.clone(), - &task_state.quorum_membership, - task_state.id, - &event, - sender, - &task_state.upgrade_lock, - ) - .await?; - - Ok(()) -} - -/// Handle a `TimeoutVoteRecv` event. -pub(crate) async fn handle_timeout_vote_recv< - TYPES: NodeType, - I: NodeImplementation, - V: Versions, ->( - vote: &TimeoutVote, - event: Arc>, - sender: &Sender>>, - task_state: &mut Consensus2TaskState, -) -> Result<()> { - // Are we the leader for this view? - ensure!( - task_state - .timeout_membership - .leader(vote.view_number() + 1)? - == task_state.public_key, - format!( - "We are not the leader for view {:?}", - vote.view_number() + 1 - ) - ); - - handle_vote( - &mut task_state.timeout_vote_collectors, - vote, - task_state.public_key.clone(), - &task_state.quorum_membership, - task_state.id, - &event, - sender, - &task_state.upgrade_lock, - ) - .await?; - - Ok(()) -} - -/// Handle a `ViewChange` event. -#[instrument(skip_all)] -pub(crate) async fn handle_view_change< - TYPES: NodeType, - I: NodeImplementation, - V: Versions, ->( - new_view_number: TYPES::Time, - sender: &Sender>>, - task_state: &mut Consensus2TaskState, -) -> Result<()> { - ensure!( - new_view_number > task_state.cur_view, - "New view is not larger than the current view" - ); - - let old_view_number = task_state.cur_view; - debug!("Updating view from {old_view_number:?} to {new_view_number:?}"); - - // Move this node to the next view - task_state.cur_view = new_view_number; - - // If we have a decided upgrade certificate, the protocol version may also have been upgraded. - let decided_upgrade_certificate_read = task_state - .upgrade_lock - .decided_upgrade_certificate - .read() - .await - .clone(); - if let Some(cert) = decided_upgrade_certificate_read { - if new_view_number == cert.data.new_version_first_view { - error!( - "Version upgraded based on a decided upgrade cert: {:?}", - cert - ); - } - } - - // Spawn a timeout task if we did actually update view - let timeout = task_state.timeout; - let new_timeout_task = async_spawn({ - let stream = sender.clone(); - // Nuance: We timeout on the view + 1 here because that means that we have - // not seen evidence to transition to this new view - let view_number = new_view_number + 1; - async move { - async_sleep(Duration::from_millis(timeout)).await; - broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), - &stream, - ) - .await; - } - }); - - // Cancel the old timeout task - cancel_task(std::mem::replace( - &mut task_state.timeout_task, - new_timeout_task, - )) - .await; - - let consensus = task_state.consensus.read().await; - consensus - .metrics - .current_view - .set(usize::try_from(task_state.cur_view.u64()).unwrap()); - let cur_view_time = Utc::now().timestamp(); - if task_state.quorum_membership.leader(old_view_number)? == task_state.public_key { - #[allow(clippy::cast_precision_loss)] - consensus - .metrics - .view_duration_as_leader - .add_point((cur_view_time - task_state.cur_view_time) as f64); - } - task_state.cur_view_time = cur_view_time; - - // Do the comparison before the subtraction to avoid potential overflow, since - // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(task_state.cur_view.u64()).unwrap() - > usize::try_from(task_state.last_decided_view.u64()).unwrap() - { - consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(task_state.cur_view.u64()).unwrap() - - usize::try_from(task_state.last_decided_view.u64()).unwrap(), - ); - } - - broadcast_event( - Event { - view_number: old_view_number, - event: EventType::ViewFinished { - view_number: old_view_number, - }, - }, - &task_state.output_event_stream, - ) - .await; - Ok(()) -} - -/// Handle a `Timeout` event. -#[instrument(skip_all)] -pub(crate) async fn handle_timeout, V: Versions>( - view_number: TYPES::Time, - sender: &Sender>>, - task_state: &mut Consensus2TaskState, -) -> Result<()> { - ensure!( - task_state.cur_view < view_number, - "Timeout event is for an old view" - ); - - ensure!( - task_state - .timeout_membership - .has_stake(&task_state.public_key), - format!("We were not chosen for the consensus committee for view {view_number:?}") - ); - - let vote = TimeoutVote::create_signed_vote( - TimeoutData:: { view: view_number }, - view_number, - &task_state.public_key, - &task_state.private_key, - &task_state.upgrade_lock, - ) - .await - .context("Failed to sign TimeoutData")?; - - broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), sender).await; - broadcast_event( - Event { - view_number, - event: EventType::ViewTimeout { view_number }, - }, - &task_state.output_event_stream, - ) - .await; - - debug!( - "We did not receive evidence for view {} in time, sending timeout vote for that view!", - *view_number - ); - - broadcast_event( - Event { - view_number, - event: EventType::ReplicaViewTimeout { view_number }, - }, - &task_state.output_event_stream, - ) - .await; - - task_state - .consensus - .read() - .await - .metrics - .number_of_timeouts - .add(1); - if task_state.quorum_membership.leader(view_number)? == task_state.public_key { - task_state - .consensus - .read() - .await - .metrics - .number_of_timeouts_as_leader - .add(1); - } - - Ok(()) -} From 0d21954779eb05459450b6254d7f65fa18fabbf7 Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:26:33 -0600 Subject: [PATCH 1272/1393] remove unneeded bounds from `Memberships` trait (#3816) Co-authored-by: tbro --- types/src/traits/election.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 580f06c5a8..aa313fc64a 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -5,7 +5,7 @@ // along with the HotShot repository. If not, see . //! The election trait, used to decide which node is the leader and determine if a vote is valid. -use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; +use std::{collections::BTreeSet, fmt::Debug, num::NonZeroU64}; use utils::anytrace::Result; @@ -13,9 +13,7 @@ use super::{network::Topic, node_implementation::NodeType}; use crate::{traits::signature_key::SignatureKey, PeerConfig}; /// A protocol for determining membership in and participating in a committee. -pub trait Membership: - Clone + Debug + Eq + PartialEq + Send + Sync + Hash + 'static -{ +pub trait Membership: Clone + Debug + Send + Sync { /// The error type returned by methods like `lookup_leader`. type Error: std::fmt::Display; From 3590cf17990b8fe703bfd8b4373563a952773f3d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 29 Oct 2024 14:45:44 -0400 Subject: [PATCH 1273/1393] Remove unused config stuff (#3771) * Remove unused config stuff * remove is_da param in a bunch of places * don't reset lock in bad proposal task * lint * allow one timeout * allow bad proposal leader to fail everytime * fix after merge * correct num fails * final fix * more * reset actions * lint * log at debug * Fix test 3 --- examples/infra/mod.rs | 15 ++------ hotshot/src/lib.rs | 4 +-- hotshot/src/tasks/task_state.rs | 2 -- .../src/traits/networking/combined_network.rs | 3 -- .../src/traits/networking/libp2p_network.rs | 35 +------------------ .../src/traits/networking/memory_network.rs | 1 - .../src/traits/networking/push_cdn_network.rs | 1 - orchestrator/run-config.toml | 4 --- orchestrator/staging-config.toml | 4 --- task-impls/src/quorum_proposal/handlers.rs | 8 ++--- task-impls/src/quorum_proposal/mod.rs | 4 --- task-impls/src/quorum_proposal_recv/mod.rs | 3 -- task/src/task.rs | 2 +- testing/src/byzantine/byzantine_behaviour.rs | 6 ++-- testing/src/test_builder.rs | 28 +-------------- testing/tests/tests_2/catchup.rs | 5 --- testing/tests/tests_3/byzantine_tests.rs | 4 +-- testing/tests/tests_5/combined_network.rs | 10 ------ testing/tests/tests_5/push_cdn.rs | 3 -- testing/tests/tests_5/timeout.rs | 2 -- testing/tests/tests_5/unreliable_network.rs | 2 -- types/src/constants.rs | 2 -- types/src/hotshot_config_file.rs | 22 ++---------- types/src/lib.rs | 22 ------------ types/src/network.rs | 12 ++----- types/src/traits/network.rs | 1 - types/src/traits/node_implementation.rs | 1 - 27 files changed, 18 insertions(+), 188 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 0c7e8db673..929232e45d 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -12,13 +12,10 @@ use std::{ net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4}, num::NonZeroUsize, sync::Arc, - time::{Duration, Instant}, + time::Instant, }; -use async_compatibility_layer::{ - art::async_sleep, - logging::{setup_backtrace, setup_logging}, -}; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_trait::async_trait; use cdn_broker::reexports::crypto::signature::KeyPair; use chrono::Utc; @@ -443,10 +440,7 @@ pub trait RunDa< transaction_size_in_bytes: u64, ) -> BenchResults { let NetworkConfig { - rounds, - node_index, - start_delay_seconds, - .. + rounds, node_index, .. } = self.config(); let mut total_transactions_committed = 0; @@ -456,9 +450,6 @@ pub trait RunDa< let mut total_latency = 0; let mut num_latency = 0; - info!("Sleeping for {start_delay_seconds} seconds before starting hotshot!"); - async_sleep(Duration::from_secs(start_delay_seconds)).await; - info!("Starting HotShot example!"); let start = Instant::now(); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 8c91554e4d..08b1ef1548 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -506,9 +506,9 @@ impl, V: Versions> SystemContext, V: Versions> CreateTaskState private_key: handle.private_key().clone(), storage: Arc::clone(&handle.storage), timeout: handle.hotshot.config.next_view_timeout, - round_start_delay: handle.hotshot.config.round_start_delay, id: handle.hotshot.id, formed_upgrade_certificate: None, upgrade_lock: handle.hotshot.upgrade_lock.clone(), @@ -292,7 +291,6 @@ impl, V: Versions> CreateTaskState timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), timeout_task: async_spawn(async {}), timeout: handle.hotshot.config.next_view_timeout, - round_start_delay: handle.hotshot.config.round_start_delay, output_event_stream: handle.hotshot.external_event_stream.0.clone(), storage: Arc::clone(&handle.storage), proposal_cert: None, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 002f5d543c..584e4e888e 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -261,7 +261,6 @@ impl TestableNetworkingImplementation for CombinedNetwor num_bootstrap: usize, network_id: usize, da_committee_size: usize, - is_da: bool, reliability_config: Option>, secondary_network_delay: Duration, ) -> AsyncGenerator> { @@ -271,7 +270,6 @@ impl TestableNetworkingImplementation for CombinedNetwor num_bootstrap, network_id, da_committee_size, - is_da, None, Duration::default(), ), @@ -280,7 +278,6 @@ impl TestableNetworkingImplementation for CombinedNetwor num_bootstrap, network_id, da_committee_size, - is_da, reliability_config, Duration::default(), ) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 34e5992f13..de0a76156a 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -174,8 +174,6 @@ struct Libp2pNetworkInner { #[cfg(feature = "hotshot-testing")] /// reliability_config reliability_config: Option>, - /// if we're a member of the DA committee or not - is_da: bool, /// Killswitch sender kill_switch: channel::Sender<()>, } @@ -207,7 +205,6 @@ impl TestableNetworkingImplementation num_bootstrap: usize, _network_id: usize, da_committee_size: usize, - _is_da: bool, reliability_config: Option>, _secondary_network_delay: Duration, ) -> AsyncGenerator> { @@ -217,16 +214,6 @@ impl TestableNetworkingImplementation ); let bootstrap_addrs: PeerInfoVec = Arc::default(); let node_ids: Arc>> = Arc::default(); - // We assign known_nodes' public key and stake value rather than read from config file since it's a test - let mut da_keys = BTreeSet::new(); - - for i in 0u64..(expected_node_count as u64) { - let privkey = TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], i).1; - let pubkey = TYPES::SignatureKey::from_private(&privkey); - if i < da_committee_size as u64 { - da_keys.insert(pubkey.clone()); - } - } // NOTE uncomment this for easier debugging // let start_port = 5000; @@ -277,7 +264,6 @@ impl TestableNetworkingImplementation let bootstrap_addrs_ref = Arc::clone(&bootstrap_addrs); let node_ids_ref = Arc::clone(&node_ids); - let da = da_keys.clone(); let reliability_config_dup = reliability_config.clone(); Box::pin(async move { @@ -299,7 +285,6 @@ impl TestableNetworkingImplementation usize::try_from(node_id).unwrap(), #[cfg(feature = "hotshot-testing")] reliability_config_dup, - da.contains(&pubkey), ) .await { @@ -484,12 +469,6 @@ impl Libp2pNetwork { // Calculate all keys so we can keep track of direct message recipients let mut all_keys = BTreeSet::new(); - let mut da_keys = BTreeSet::new(); - - // Make a node DA if it is under the staked committee size - for node in config.config.known_da_nodes { - da_keys.insert(K::public_key(&node.stake_table_entry)); - } // Insert all known nodes into the set of all keys for node in config.config.known_nodes_with_stake { @@ -505,7 +484,6 @@ impl Libp2pNetwork { usize::try_from(config.node_index)?, #[cfg(feature = "hotshot-testing")] None, - da_keys.contains(pub_key), ) .await?) } @@ -547,7 +525,6 @@ impl Libp2pNetwork { bootstrap_addrs: BootstrapAddrs, id: usize, #[cfg(feature = "hotshot-testing")] reliability_config: Option>, - is_da: bool, ) -> Result, NetworkError> { let (mut rx, network_handle) = spawn_network_node::(config.clone(), id) .await @@ -562,10 +539,7 @@ impl Libp2pNetwork { pubkey_pid_map.insert(pk.clone(), network_handle.peer_id()); // Subscribe to the relevant topics - let mut subscribed_topics = HashSet::from_iter(vec![QC_TOPIC.to_string()]); - if is_da { - subscribed_topics.insert("DA".to_string()); - } + let subscribed_topics = HashSet::from_iter(vec![QC_TOPIC.to_string()]); // unbounded channels may not be the best choice (spammed?) // if bounded figure out a way to log dropped msgs @@ -594,7 +568,6 @@ impl Libp2pNetwork { latest_seen_view: Arc::new(AtomicU64::new(0)), #[cfg(feature = "hotshot-testing")] reliability_config, - is_da, kill_switch: kill_tx, }), }; @@ -644,7 +617,6 @@ impl Libp2pNetwork { let handle = Arc::clone(&self.inner.handle); let is_bootstrapped = Arc::clone(&self.inner.is_bootstrapped); let inner = Arc::clone(&self.inner); - let is_da = self.inner.is_da; async_spawn({ let is_ready = Arc::clone(&self.inner.is_ready); @@ -664,11 +636,6 @@ impl Libp2pNetwork { // Subscribe to the QC topic handle.subscribe(QC_TOPIC.to_string()).await.unwrap(); - // Only subscribe to DA events if we are DA - if is_da { - handle.subscribe("DA".to_string()).await.unwrap(); - } - // Map our staking key to our Libp2p Peer ID so we can properly // route direct messages while handle diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 54fcdd9d1b..e86c773e71 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -190,7 +190,6 @@ impl TestableNetworkingImplementation _num_bootstrap: usize, _network_id: usize, da_committee_size: usize, - _is_da: bool, reliability_config: Option>, _secondary_network_delay: Duration, ) -> AsyncGenerator> { diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index dc38f979cf..c0c2f5d957 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -271,7 +271,6 @@ impl TestableNetworkingImplementation _num_bootstrap: usize, _network_id: usize, da_committee_size: usize, - _is_da: bool, _reliability_config: Option>, _secondary_network_delay: Duration, ) -> AsyncGenerator> { diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index d1fecc10c9..04ab883a2e 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -4,7 +4,6 @@ transactions_per_round = 10 transaction_size = 1000 node_index = 0 seed = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] -start_delay_seconds = 0 cdn_marshal_address = "127.0.0.1:9000" public_keys = [ { stake_table_key = "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", state_ver_key = "SCHNORR_VER_KEY~lJqDaVZyM0hWP2Br52IX5FeE-dCAIC-dPX7bL5-qUx-vjbunwe-ENOeZxj6FuOyvDCFzoGeP7yZ0fM995qF-CRE", stake = 1, da = true }, @@ -34,9 +33,6 @@ start_threshold = [8, 10] staked_da_nodes = 10 fixed_leader_for_gpuvid = 1 next_view_timeout = 30000 -timeout_ratio = [11, 10] -round_start_delay = 1 -start_delay = 1 num_bootstrap = 5 epoch_height = 0 diff --git a/orchestrator/staging-config.toml b/orchestrator/staging-config.toml index 417a19f10b..61c5adb696 100644 --- a/orchestrator/staging-config.toml +++ b/orchestrator/staging-config.toml @@ -38,7 +38,6 @@ seed = [ 0 ] transaction_size = 100 -start_delay_seconds = 10 builder = "Simple" [config] @@ -47,9 +46,6 @@ num_nodes_with_stake = 10 staked_da_nodes = 10 fixed_leader_for_gpuvid = 1 next_view_timeout = 15_000 -timeout_ratio = [ 11, 10 ] -round_start_delay = 1 -start_delay = 10_000 num_bootstrap = 5 builder_urls = [ "https://builder.staging.testnet.espresso.network/" ] diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 4994a228d6..ffad0f5d1e 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -7,10 +7,10 @@ //! This module holds the dependency task for the QuorumProposalTask. It is spawned whenever an event that could //! initiate a proposal occurs. -use std::{marker::PhantomData, sync::Arc, time::Duration}; +use std::{marker::PhantomData, sync::Arc}; use async_broadcast::{InactiveReceiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use hotshot_task::{ dependency::{Dependency, EventDependency}, @@ -83,9 +83,6 @@ pub struct ProposalDependencyHandle { /// Our Private Key pub private_key: ::PrivateKey, - /// Round start delay from config, in milliseconds. - pub round_start_delay: u64, - /// Shared consensus task state pub consensus: OuterConsensus, @@ -233,7 +230,6 @@ impl ProposalDependencyHandle { proposed_leaf.view_number(), ); - async_sleep(Duration::from_millis(self.round_start_delay)).await; broadcast_event( Arc::new(HotShotEvent::QuorumProposalSend( message.clone(), diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 65d905b47b..5d33e16664 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -76,9 +76,6 @@ pub struct QuorumProposalTaskState /// View timeout from config. pub timeout: u64, - /// Round start delay from config, in milliseconds. - pub round_start_delay: u64, - /// This node's storage ref pub storage: Arc>, @@ -326,7 +323,6 @@ impl, V: Versions> quorum_membership: Arc::clone(&self.quorum_membership), public_key: self.public_key.clone(), private_key: self.private_key.clone(), - round_start_delay: self.round_start_delay, instance_state: Arc::clone(&self.instance_state), consensus: OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), formed_upgrade_certificate: self.formed_upgrade_certificate.clone(), diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index c1f10a038e..030dc1295c 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -78,9 +78,6 @@ pub struct QuorumProposalRecvTaskState>, diff --git a/task/src/task.rs b/task/src/task.rs index fc60db3064..6d0e0ca461 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -91,7 +91,7 @@ impl Task { let _ = S::handle_event(&mut self.state, input, &self.sender, &self.receiver) .await - .inspect_err(|e| tracing::info!("{e}")); + .inspect_err(|e| tracing::debug!("{e}")); } Err(RecvError::Closed) => { break self.boxed_state(); diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index e92e33cdd4..30233dd269 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -54,9 +54,7 @@ impl, V: Versions> EventTransforme HotShotEvent::QuorumProposalSend(proposal, signature) => { let mut result = Vec::new(); - for n in 0..self.multiplier { - // reset last actioned view so we actually propose multiple times - consensus.write().await.reset_actions(); + for n in 1..self.multiplier { let mut modified_proposal = proposal.clone(); modified_proposal.data.view_number += n * self.increment; @@ -66,8 +64,8 @@ impl, V: Versions> EventTransforme signature.clone(), )); } - consensus.write().await.reset_actions(); + consensus.write().await.reset_actions(); result } _ => vec![event.clone()], diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 64d16ede22..15cbb293ed 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -20,7 +20,7 @@ use hotshot_example_types::{ use hotshot_types::{ consensus::ConsensusMetricsValue, traits::node_implementation::{NodeType, Versions}, - ExecutionType, HotShotConfig, ValidatorConfig, + HotShotConfig, ValidatorConfig, }; use tide_disco::Url; use vec1::Vec1; @@ -44,12 +44,6 @@ pub type TransactionValidator = Arc) -> Result<()> + Sen pub struct TimingData { /// Base duration for next-view timeout, in milliseconds pub next_view_timeout: u64, - /// The exponential backoff ration for the next-view timeout - pub timeout_ratio: (u64, u64), - /// The delay a leader inserts before starting pre-commit, in milliseconds - pub round_start_delay: u64, - /// Delay after init before starting consensus, in milliseconds - pub start_delay: u64, /// The maximum amount of time a leader can wait to get a block from a builder pub builder_timeout: Duration, /// time to wait until we request data associated with a proposal @@ -287,9 +281,6 @@ impl Default for TimingData { fn default() -> Self { Self { next_view_timeout: 4000, - timeout_ratio: (11, 10), - round_start_delay: 100, - start_delay: 100, builder_timeout: Duration::from_millis(500), data_request_delay: Duration::from_millis(200), secondary_network_delay: Duration::from_millis(1000), @@ -320,9 +311,6 @@ impl, V: Versions> TestDescription }, timing_data: TimingData { next_view_timeout: 2000, - timeout_ratio: (1, 1), - start_delay: 20000, - round_start_delay: 25, ..TimingData::default() }, view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), @@ -349,8 +337,6 @@ impl, V: Versions> TestDescription expected_views_to_fail: HashMap::new(), }, timing_data: TimingData { - start_delay: 120_000, - round_start_delay: 25, ..TimingData::default() }, view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), @@ -500,23 +486,17 @@ where ); // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); let config = HotShotConfig { - // TODO this doesn't exist anymore - execution_type: ExecutionType::Incremental, start_threshold: (1, 1), num_nodes_with_stake: NonZeroUsize::new(num_nodes_with_stake).unwrap(), // Currently making this zero for simplicity known_da_nodes, num_bootstrap: num_bootstrap_nodes, known_nodes_with_stake, - known_nodes_without_stake: vec![], my_own_validator_config, da_staked_committee_size, fixed_leader_for_gpuvid: 1, next_view_timeout: 500, view_sync_timeout: Duration::from_millis(250), - timeout_ratio: (11, 10), - round_start_delay: 25, - start_delay: 1, builder_timeout: Duration::from_millis(1000), data_request_delay: Duration::from_millis(200), // Placeholder until we spin up the builder @@ -533,9 +513,6 @@ where }; let TimingData { next_view_timeout, - timeout_ratio, - round_start_delay, - start_delay, builder_timeout, data_request_delay, secondary_network_delay, @@ -545,9 +522,6 @@ where // TODO this should really be using the timing config struct |a: &mut HotShotConfig| { a.next_view_timeout = next_view_timeout; - a.timeout_ratio = timeout_ratio; - a.round_start_delay = round_start_delay; - a.start_delay = start_delay; a.builder_timeout = builder_timeout; a.data_request_delay = data_request_delay; a.view_sync_timeout = view_sync_timeout; diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index ff0aeb08b6..3d4702a3ec 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -37,9 +37,6 @@ async fn test_catchup() { async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { next_view_timeout: 2000, - // increase the round delay for this test - // TODO: remove this delay increase for test - https://github.com/EspressoSystems/HotShot/issues/3673 - round_start_delay: 200, ..Default::default() }; let mut metadata: TestDescription = @@ -324,7 +321,6 @@ cross_tests!( Metadata: { let timing_data = TimingData { next_view_timeout: 2000, - round_start_delay: 500, ..Default::default() }; let mut metadata = TestDescription::default(); @@ -378,7 +374,6 @@ cross_tests!( Metadata: { let timing_data = TimingData { next_view_timeout: 2000, - round_start_delay: 500, ..Default::default() }; let mut metadata: TestDescription = diff --git a/testing/tests/tests_3/byzantine_tests.rs b/testing/tests/tests_3/byzantine_tests.rs index 00ea1ff4d7..0ab5f68d19 100644 --- a/testing/tests/tests_3/byzantine_tests.rs +++ b/testing/tests/tests_3/byzantine_tests.rs @@ -77,11 +77,11 @@ cross_tests!( }, ), behaviour, + num_nodes_with_stake: 12, ..TestDescription::default() }; - metadata.overall_safety_properties.num_failed_views = 0; - + metadata.overall_safety_properties.num_failed_views = 15; metadata }, ); diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index e4d0fb4625..81222cd088 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -29,9 +29,7 @@ async fn test_combined_network() { async_compatibility_layer::logging::setup_backtrace(); let metadata: TestDescription = TestDescription { timing_data: TimingData { - round_start_delay: 25, next_view_timeout: 10_000, - start_delay: 120_000, ..Default::default() }, @@ -64,7 +62,6 @@ async fn test_combined_network_cdn_crash() { async_compatibility_layer::logging::setup_backtrace(); let mut metadata: TestDescription = TestDescription { timing_data: TimingData { - round_start_delay: 25, next_view_timeout: 10_000, ..Default::default() }, @@ -111,9 +108,7 @@ async fn test_combined_network_reup() { async_compatibility_layer::logging::setup_backtrace(); let mut metadata: TestDescription = TestDescription { timing_data: TimingData { - round_start_delay: 25, next_view_timeout: 10_000, - start_delay: 120_000, ..Default::default() }, @@ -164,9 +159,7 @@ async fn test_combined_network_half_dc() { async_compatibility_layer::logging::setup_backtrace(); let mut metadata: TestDescription = TestDescription { timing_data: TimingData { - round_start_delay: 25, next_view_timeout: 10_000, - start_delay: 120_000, ..Default::default() }, @@ -244,10 +237,7 @@ async fn test_stress_combined_network_fuzzy() { start_nodes: 20, timing_data: TimingData { - round_start_delay: 25, next_view_timeout: 10_000, - start_delay: 120_000, - ..Default::default() }, diff --git a/testing/tests/tests_5/push_cdn.rs b/testing/tests/tests_5/push_cdn.rs index 5b2dc0d5b1..97e3dc0cb2 100644 --- a/testing/tests/tests_5/push_cdn.rs +++ b/testing/tests/tests_5/push_cdn.rs @@ -25,10 +25,7 @@ async fn push_cdn_network() { async_compatibility_layer::logging::setup_backtrace(); let metadata: TestDescription = TestDescription { timing_data: TimingData { - round_start_delay: 25, next_view_timeout: 10_000, - start_delay: 120_000, - ..Default::default() }, overall_safety_properties: OverallSafetyPropertiesDescription { diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 791e9551bd..9809ea7eb5 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -81,8 +81,6 @@ async fn test_timeout_libp2p() { async_compatibility_layer::logging::setup_backtrace(); let timing_data = TimingData { next_view_timeout: 2000, - start_delay: 2000, - round_start_delay: 1000, ..Default::default() }; diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index 1525ca55e9..0e4f611a42 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -101,7 +101,6 @@ async fn libp2p_network_async() { }, ), timing_data: TimingData { - timeout_ratio: (1, 1), next_view_timeout: 25000, ..TestDescription::::default_multiple_rounds() .timing_data @@ -150,7 +149,6 @@ async fn test_memory_network_async() { }, ), timing_data: TimingData { - timeout_ratio: (1, 1), next_view_timeout: 1000, ..TestDescription::::default_multiple_rounds() .timing_data diff --git a/types/src/constants.rs b/types/src/constants.rs index 7851ff5219..b4c8a39008 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -76,5 +76,3 @@ pub const ORCHESTRATOR_DEFAULT_NUM_ROUNDS: usize = 100; pub const ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND: usize = 10; /// default size of transactions pub const ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE: usize = 100; -/// default delay before beginning consensus -pub const ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS: u64 = 60; diff --git a/types/src/hotshot_config_file.rs b/types/src/hotshot_config_file.rs index 08b81af3e4..93f37ca022 100644 --- a/types/src/hotshot_config_file.rs +++ b/types/src/hotshot_config_file.rs @@ -4,14 +4,14 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{num::NonZeroUsize, time::Duration, vec}; +use std::{num::NonZeroUsize, time::Duration}; use surf_disco::Url; use vec1::Vec1; use crate::{ constants::REQUEST_DATA_DELAY, traits::signature_key::SignatureKey, - upgrade_config::UpgradeConfig, ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, + upgrade_config::UpgradeConfig, HotShotConfig, PeerConfig, ValidatorConfig, }; /// Default builder URL, used as placeholder @@ -37,9 +37,6 @@ pub struct HotShotConfigFile { #[serde(skip)] /// The known DA nodes' public key and stake values pub known_da_nodes: Vec>, - #[serde(skip)] - /// The known non-staking nodes' - pub known_nodes_without_stake: Vec, /// Number of staking DA nodes pub staked_da_nodes: usize, /// Number of fixed leaders for GPU VID @@ -48,12 +45,6 @@ pub struct HotShotConfigFile { pub next_view_timeout: u64, /// Duration for view sync round timeout pub view_sync_timeout: Duration, - /// The exponential backoff ration for the next-view timeout - pub timeout_ratio: (u64, u64), - /// The delay a leader inserts before starting pre-commit, in milliseconds - pub round_start_delay: u64, - /// Delay after init before starting consensus, in milliseconds - pub start_delay: u64, /// Number of network bootstrap nodes pub num_bootstrap: usize, /// The maximum amount of time a leader can wait to get a block from a builder @@ -72,20 +63,15 @@ pub struct HotShotConfigFile { impl From> for HotShotConfig { fn from(val: HotShotConfigFile) -> Self { HotShotConfig { - execution_type: ExecutionType::Continuous, start_threshold: val.start_threshold, num_nodes_with_stake: val.num_nodes_with_stake, known_da_nodes: val.known_da_nodes, known_nodes_with_stake: val.known_nodes_with_stake, - known_nodes_without_stake: val.known_nodes_without_stake, my_own_validator_config: val.my_own_validator_config, da_staked_committee_size: val.staked_da_nodes, fixed_leader_for_gpuvid: val.fixed_leader_for_gpuvid, next_view_timeout: val.next_view_timeout, view_sync_timeout: val.view_sync_timeout, - timeout_ratio: val.timeout_ratio, - round_start_delay: val.round_start_delay, - start_delay: val.start_delay, num_bootstrap: val.num_bootstrap, builder_timeout: val.builder_timeout, data_request_delay: val @@ -136,15 +122,11 @@ impl HotShotConfigFile { start_threshold: (1, 1), my_own_validator_config: ValidatorConfig::default(), known_nodes_with_stake: gen_known_nodes_with_stake, - known_nodes_without_stake: vec![], staked_da_nodes, known_da_nodes, fixed_leader_for_gpuvid: 1, next_view_timeout: 10000, view_sync_timeout: Duration::from_millis(1000), - timeout_ratio: (11, 10), - round_start_delay: 1, - start_delay: 1, num_bootstrap: 5, builder_timeout: Duration::from_secs(10), data_request_delay: Some(Duration::from_millis(REQUEST_DATA_DELAY)), diff --git a/types/src/lib.rs b/types/src/lib.rs index 897fef5c73..44c81e4e68 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -63,18 +63,6 @@ where { assert_future::(Box::pin(fut)) } -/// the type of consensus to run. Either: -/// wait for a signal to start a view, -/// or constantly run -/// you almost always want continuous -/// incremental is just for testing -#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] -pub enum ExecutionType { - /// constantly increment view as soon as view finishes - Continuous, - /// wait for a signal - Incremental, -} #[derive(serde::Serialize, serde::Deserialize, Clone, Derivative, Display)] #[serde(bound(deserialize = ""))] @@ -178,8 +166,6 @@ impl Default for PeerConfig { #[derive(Clone, custom_debug::Debug, serde::Serialize, serde::Deserialize)] #[serde(bound(deserialize = ""))] pub struct HotShotConfig { - /// Whether to run one view or continuous views - pub execution_type: ExecutionType, /// The proportion of nodes required before the orchestrator issues the ready signal, /// expressed as (numerator, denominator) pub start_threshold: (u64, u64), @@ -190,8 +176,6 @@ pub struct HotShotConfig { pub known_nodes_with_stake: Vec>, /// All public keys known to be DA nodes pub known_da_nodes: Vec>, - /// List of known non-staking nodes' public keys - pub known_nodes_without_stake: Vec, /// My own validator config, including my public key, private key, stake value, serving as private parameter pub my_own_validator_config: ValidatorConfig, /// List of DA committee (staking)nodes for static DA committee @@ -202,12 +186,6 @@ pub struct HotShotConfig { pub next_view_timeout: u64, /// Duration of view sync round timeouts pub view_sync_timeout: Duration, - /// The exponential backoff ration for the next-view timeout - pub timeout_ratio: (u64, u64), - /// The delay a leader inserts before starting pre-commit, in milliseconds - pub round_start_delay: u64, - /// Delay after init before starting consensus, in milliseconds - pub start_delay: u64, /// Number of network bootstrap nodes pub num_bootstrap: usize, /// The maximum amount of time a leader can wait to get a block from a builder diff --git a/types/src/network.rs b/types/src/network.rs index 31312bc13f..0927d59b93 100644 --- a/types/src/network.rs +++ b/types/src/network.rs @@ -14,9 +14,8 @@ use tracing::error; use crate::{ constants::{ - ORCHESTRATOR_DEFAULT_NUM_ROUNDS, ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS, - ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND, ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE, - REQUEST_DATA_DELAY, + ORCHESTRATOR_DEFAULT_NUM_ROUNDS, ORCHESTRATOR_DEFAULT_TRANSACTIONS_PER_ROUND, + ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE, REQUEST_DATA_DELAY, }, hotshot_config_file::HotShotConfigFile, light_client::StateVerKey, @@ -137,8 +136,6 @@ pub struct NetworkConfig { pub seed: [u8; 32], /// size of transactions pub transaction_size: usize, - /// delay before beginning consensus - pub start_delay_seconds: u64, /// name of the key type (for debugging) pub key_type_name: String, /// the libp2p config @@ -281,7 +278,6 @@ impl Default for NetworkConfig { manual_start_password: None, libp2p_config: None, config: HotShotConfigFile::hotshot_config_5_nodes_10_da().into(), - start_delay_seconds: 60, key_type_name: std::any::type_name::().to_string(), cdn_marshal_address: None, combined_network_config: None, @@ -337,9 +333,6 @@ pub struct NetworkConfigFile { /// size of transactions #[serde_inline_default(ORCHESTRATOR_DEFAULT_TRANSACTION_SIZE)] pub transaction_size: usize, - /// delay before beginning consensus - #[serde_inline_default(ORCHESTRATOR_DEFAULT_START_DELAY_SECONDS)] - pub start_delay_seconds: u64, /// the hotshot config file #[serde(default = "HotShotConfigFile::hotshot_config_5_nodes_10_da")] pub config: HotShotConfigFile, @@ -385,7 +378,6 @@ impl From> for NetworkConfig { }), config: val.config.into(), key_type_name: std::any::type_name::().to_string(), - start_delay_seconds: val.start_delay_seconds, cdn_marshal_address: val.cdn_marshal_address, combined_network_config: val.combined_network_config, commit_sha: String::new(), diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 27fe0ec3c9..07c1edd807 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -303,7 +303,6 @@ where num_bootstrap: usize, network_id: usize, da_committee_size: usize, - is_da: bool, reliability_config: Option>, secondary_network_delay: Duration, ) -> AsyncGenerator>; diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index c84031218c..43035b01b7 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -153,7 +153,6 @@ where num_bootstrap, 0, da_committee_size, - false, reliability_config.clone(), secondary_network_delay, ) From 976004813e3957c813a169711807188189d927f2 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 30 Oct 2024 20:02:34 +0300 Subject: [PATCH 1274/1393] Add ViewNumber to marketplace builder fee signature (#3793) --- testing/src/block_builder/simple.rs | 3 ++- types/src/data.rs | 9 ++++++--- types/src/traits/signature_key.rs | 21 ++++++++++++++++++--- 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index 10ea1132dd..e1207a4fc2 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -141,7 +141,7 @@ where &self, _parent_view: u64, _parent_hash: &VidCommitment, - _view_number: u64, + view_number: u64, ) -> Result, BuildError> { let transactions = self .transactions @@ -170,6 +170,7 @@ where fee_signature: TYPES::BuilderSignatureKey::sign_sequencing_fee_marketplace( &self.priv_key.clone(), fee_amount, + view_number, ) .expect("Failed to sign fee!"), }; diff --git a/types/src/data.rs b/types/src/data.rs index db891c8a0b..71b682aa19 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -801,7 +801,7 @@ pub mod null_block { use crate::{ traits::{ block_contents::BuilderFee, - node_implementation::{NodeType, Versions}, + node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::BuilderSignatureKey, BlockPayload, }, @@ -840,8 +840,11 @@ pub mod null_block { ); if version >= V::Marketplace::VERSION { - match TYPES::BuilderSignatureKey::sign_sequencing_fee_marketplace(&priv_key, FEE_AMOUNT) - { + match TYPES::BuilderSignatureKey::sign_sequencing_fee_marketplace( + &priv_key, + FEE_AMOUNT, + *TYPES::View::genesis(), + ) { Ok(sig) => Some(BuilderFee { fee_amount: FEE_AMOUNT, fee_account: pub_key, diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index e9133e9286..52503c0cd5 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -227,8 +227,12 @@ pub trait BuilderSignatureKey: &self, signature: &Self::BuilderSignature, fee_amount: u64, + view_number: u64, ) -> bool { - self.validate_builder_signature(signature, &fee_amount.to_be_bytes()) + self.validate_builder_signature( + signature, + &aggregate_fee_data_marketplace(fee_amount, view_number), + ) } /// validate the bundle's signature using the builder's public key @@ -285,12 +289,15 @@ pub trait BuilderSignatureKey: /// sign fee offer for proposed payload (marketplace version) /// # Errors /// If unable to sign the data with the key - // TODO: this should include view number fn sign_sequencing_fee_marketplace( private_key: &Self::BuilderPrivateKey, fee_amount: u64, + view_number: u64, ) -> Result { - Self::sign_builder_message(private_key, &fee_amount.to_be_bytes()) + Self::sign_builder_message( + private_key, + &aggregate_fee_data_marketplace(fee_amount, view_number), + ) } /// sign transactions (marketplace version) @@ -340,6 +347,14 @@ fn aggregate_fee_data( fee_info } +/// Aggregate all inputs used for signature over fee data +fn aggregate_fee_data_marketplace(fee_amount: u64, view_number: u64) -> Vec { + let mut fee_info = Vec::new(); + fee_info.extend_from_slice(fee_amount.to_be_bytes().as_ref()); + fee_info.extend_from_slice(view_number.to_be_bytes().as_ref()); + fee_info +} + /// Aggregate all inputs used for signature over block info fn aggregate_block_info_data( block_size: u64, From caf2816a721e89a9fcbcc1cf9344939fc24ae54d Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Wed, 30 Oct 2024 20:01:42 +0100 Subject: [PATCH 1275/1393] Fix possible deadlock (#3824) --- task-impls/src/helpers.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 6156d24131..28d59b2038 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -382,9 +382,7 @@ pub(crate) async fn parent_leaf_and_state( ) ); let parent_view_number = consensus_reader.high_qc().view_number(); - let vsm_contains_parent_view = consensus - .read() - .await + let vsm_contains_parent_view = consensus_reader .validated_state_map() .contains_key(&parent_view_number); drop(consensus_reader); From f3cf83f54fe93ff7ca61fe0ad5d1989b942b1ac8 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Wed, 30 Oct 2024 13:44:15 -0600 Subject: [PATCH 1276/1393] Reflow get_or_calc_vid_share (#3823) --- task-impls/src/response.rs | 61 +++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 34 deletions(-) diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index ab1687fc95..3480e51f37 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -143,52 +143,45 @@ impl NetworkResponseState { view: TYPES::View, key: &TYPES::SignatureKey, ) -> Option>> { - let contained = self - .consensus - .read() - .await - .vid_shares() - .get(&view) - .is_some_and(|m| m.contains_key(key)); - if !contained { - let cur_epoch = self.consensus.read().await.cur_epoch(); - if Consensus::calculate_and_update_vid( + let consensus_reader = self.consensus.read().await; + if let Some(view) = consensus_reader.vid_shares().get(&view) { + if let Some(share) = view.get(key) { + return Some(share.clone()); + } + } + + let cur_epoch = consensus_reader.cur_epoch(); + drop(consensus_reader); + + if Consensus::calculate_and_update_vid( + OuterConsensus::new(Arc::clone(&self.consensus)), + view, + Arc::clone(&self.quorum), + &self.private_key, + cur_epoch, + ) + .await + .is_none() + { + // Sleep in hope we receive txns in the meantime + async_sleep(TXNS_TIMEOUT).await; + Consensus::calculate_and_update_vid( OuterConsensus::new(Arc::clone(&self.consensus)), view, Arc::clone(&self.quorum), &self.private_key, cur_epoch, ) - .await - .is_none() - { - // Sleep in hope we receive txns in the meantime - async_sleep(TXNS_TIMEOUT).await; - Consensus::calculate_and_update_vid( - OuterConsensus::new(Arc::clone(&self.consensus)), - view, - Arc::clone(&self.quorum), - &self.private_key, - cur_epoch, - ) - .await?; - } - return self - .consensus - .read() - .await - .vid_shares() - .get(&view)? - .get(key) - .cloned(); + .await?; } - self.consensus + return self + .consensus .read() .await .vid_shares() .get(&view)? .get(key) - .cloned() + .cloned(); } /// Makes sure the sender is allowed to send a request in the given epoch. From 49597b786c6bad15dca5a401d8a0744682dbb9cf Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 1 Nov 2024 14:16:57 -0400 Subject: [PATCH 1277/1393] Libp2p & VID fixes (#3829) * re-introduce optimistic VID * cbor fork * add leader to request pool --- examples/infra/mod.rs | 3 +- hotshot/src/traits.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 4 +- libp2p-networking/Cargo.toml | 1 + libp2p-networking/src/network/cbor.rs | 145 ++++++++++++++++++ libp2p-networking/src/network/def.rs | 8 +- libp2p-networking/src/network/mod.rs | 5 +- libp2p-networking/src/network/node.rs | 18 ++- libp2p-networking/src/network/node/config.rs | 22 +++ task-impls/src/da.rs | 40 ++++- task-impls/src/request.rs | 9 +- 11 files changed, 241 insertions(+), 16 deletions(-) create mode 100644 libp2p-networking/src/network/cbor.rs diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 929232e45d..b79769df6e 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -62,7 +62,7 @@ use hotshot_types::{ }, HotShotConfig, PeerConfig, ValidatorConfig, }; -use libp2p_networking::network::GossipConfig; +use libp2p_networking::network::{GossipConfig, RequestResponseConfig}; use rand::{rngs::StdRng, SeedableRng}; use surf_disco::Url; use tracing::{debug, error, info, warn}; @@ -754,6 +754,7 @@ where let libp2p_network = Libp2pNetwork::from_config( config.clone(), GossipConfig::default(), + RequestResponseConfig::default(), bind_address, &public_key, &private_key, diff --git a/hotshot/src/traits.rs b/hotshot/src/traits.rs index 8667943189..1ff090fc06 100644 --- a/hotshot/src/traits.rs +++ b/hotshot/src/traits.rs @@ -20,7 +20,7 @@ pub mod implementations { combined_network::{CombinedNetworks, UnderlyingCombinedNetworks}, libp2p_network::{ derive_libp2p_keypair, derive_libp2p_multiaddr, derive_libp2p_peer_id, GossipConfig, - Libp2pMetricsValue, Libp2pNetwork, PeerInfoVec, + Libp2pMetricsValue, Libp2pNetwork, PeerInfoVec, RequestResponseConfig, }, memory_network::{MasterMap, MemoryNetwork}, push_cdn_network::{ diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index de0a76156a..6567450243 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -59,7 +59,7 @@ use libp2p_identity::{ ed25519::{self, SecretKey}, Keypair, PeerId, }; -pub use libp2p_networking::network::GossipConfig; +pub use libp2p_networking::network::{GossipConfig, RequestResponseConfig}; use libp2p_networking::{ network::{ behaviours::dht::record::{Namespace, RecordKey, RecordValue}, @@ -395,6 +395,7 @@ impl Libp2pNetwork { pub async fn from_config( mut config: NetworkConfig, gossip_config: GossipConfig, + request_response_config: RequestResponseConfig, bind_address: Multiaddr, pub_key: &K, priv_key: &K::PrivateKey, @@ -414,6 +415,7 @@ impl Libp2pNetwork { // Set the gossip configuration config_builder.gossip_config(gossip_config.clone()); + config_builder.request_response_config(request_response_config); // Extrapolate the stake table from the known nodes let stake_table: HashSet = config diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index ecf4aa20fb..727ee9146a 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -39,6 +39,7 @@ void = "1" lazy_static = { workspace = true } pin-project = "1" portpicker.workspace = true +cbor4ii = "0.3" [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] libp2p = { workspace = true, features = ["tokio"] } diff --git a/libp2p-networking/src/network/cbor.rs b/libp2p-networking/src/network/cbor.rs new file mode 100644 index 0000000000..a289b998b5 --- /dev/null +++ b/libp2p-networking/src/network/cbor.rs @@ -0,0 +1,145 @@ +use async_trait::async_trait; +use cbor4ii::core::error::DecodeError; +use futures::prelude::*; +use libp2p::{ + request_response::{self, Codec}, + StreamProtocol, +}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; + +/// `Behaviour` type alias for the `Cbor` codec +pub type Behaviour = request_response::Behaviour>; + +/// Forked `cbor` codec with altered request/response sizes +pub struct Cbor { + /// Phantom data + phantom: PhantomData<(Req, Resp)>, + /// Maximum request size in bytes + request_size_maximum: u64, + /// Maximum response size in bytes + response_size_maximum: u64, +} + +impl Default for Cbor { + fn default() -> Self { + Cbor { + phantom: PhantomData, + request_size_maximum: 20 * 1024 * 1024, + response_size_maximum: 20 * 1024 * 1024, + } + } +} + +impl Cbor { + /// Create a new `Cbor` codec with the given request and response sizes + #[must_use] + pub fn new(request_size_maximum: u64, response_size_maximum: u64) -> Self { + Cbor { + phantom: PhantomData, + request_size_maximum, + response_size_maximum, + } + } +} + +impl Clone for Cbor { + fn clone(&self) -> Self { + Self::default() + } +} + +#[async_trait] +impl Codec for Cbor +where + Req: Send + Serialize + DeserializeOwned, + Resp: Send + Serialize + DeserializeOwned, +{ + type Protocol = StreamProtocol; + type Request = Req; + type Response = Resp; + + async fn read_request(&mut self, _: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut vec = Vec::new(); + + io.take(self.request_size_maximum) + .read_to_end(&mut vec) + .await?; + + cbor4ii::serde::from_slice(vec.as_slice()).map_err(decode_into_io_error) + } + + async fn read_response(&mut self, _: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut vec = Vec::new(); + + io.take(self.response_size_maximum) + .read_to_end(&mut vec) + .await?; + + cbor4ii::serde::from_slice(vec.as_slice()).map_err(decode_into_io_error) + } + + async fn write_request( + &mut self, + _: &Self::Protocol, + io: &mut T, + req: Self::Request, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + let data: Vec = + cbor4ii::serde::to_vec(Vec::new(), &req).map_err(encode_into_io_error)?; + + io.write_all(data.as_ref()).await?; + + Ok(()) + } + + async fn write_response( + &mut self, + _: &Self::Protocol, + io: &mut T, + resp: Self::Response, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + let data: Vec = + cbor4ii::serde::to_vec(Vec::new(), &resp).map_err(encode_into_io_error)?; + + io.write_all(data.as_ref()).await?; + + Ok(()) + } +} + +/// Convert a `cbor4ii::serde::DecodeError` into an `io::Error` +fn decode_into_io_error(err: cbor4ii::serde::DecodeError) -> io::Error { + match err { + cbor4ii::serde::DecodeError::Core(DecodeError::Read(e)) => { + io::Error::new(io::ErrorKind::Other, e) + } + cbor4ii::serde::DecodeError::Core(e @ DecodeError::Unsupported { .. }) => { + io::Error::new(io::ErrorKind::Unsupported, e) + } + cbor4ii::serde::DecodeError::Core(e @ DecodeError::Eof { .. }) => { + io::Error::new(io::ErrorKind::UnexpectedEof, e) + } + cbor4ii::serde::DecodeError::Core(e) => io::Error::new(io::ErrorKind::InvalidData, e), + cbor4ii::serde::DecodeError::Custom(e) => { + io::Error::new(io::ErrorKind::Other, e.to_string()) + } + } +} + +/// Convert a `cbor4ii::serde::EncodeError` into an `io::Error` +fn encode_into_io_error(err: cbor4ii::serde::EncodeError) -> io::Error { + io::Error::new(io::ErrorKind::Other, err) +} diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 39d6bdd1b2..ca0045dcb0 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -10,14 +10,14 @@ use libp2p::{ gossipsub::{Behaviour as GossipBehaviour, Event as GossipEvent, IdentTopic}, identify::{Behaviour as IdentifyBehaviour, Event as IdentifyEvent}, kad::store::MemoryStore, - request_response::{cbor, OutboundRequestId, ResponseChannel}, + request_response::{OutboundRequestId, ResponseChannel}, Multiaddr, }; use libp2p_identity::PeerId; use libp2p_swarm_derive::NetworkBehaviour; use tracing::{debug, error}; -use super::{behaviours::dht::store::ValidatedStore, NetworkEventInternal}; +use super::{behaviours::dht::store::ValidatedStore, cbor, NetworkEventInternal}; /// Overarching network behaviour performing: /// - network topology discovoery @@ -45,7 +45,7 @@ pub struct NetworkDef { /// purpose: directly messaging peer #[debug(skip)] - pub direct_message: libp2p::request_response::cbor::Behaviour, Vec>, + pub direct_message: cbor::Behaviour, Vec>, /// Auto NAT behaviour to determine if we are publically reachable and /// by which address @@ -60,7 +60,7 @@ impl NetworkDef { gossipsub: GossipBehaviour, dht: libp2p::kad::Behaviour>, identify: IdentifyBehaviour, - direct_message: cbor::Behaviour, Vec>, + direct_message: super::cbor::Behaviour, Vec>, autonat: autonat::Behaviour, ) -> NetworkDef { Self { diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 693ef1e292..dea939556a 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -13,6 +13,9 @@ mod node; /// Alternative Libp2p transport implementations pub mod transport; +/// Forked `cbor` codec with altered request/response sizes +pub mod cbor; + use std::{collections::HashSet, fmt::Debug}; use futures::channel::oneshot::Sender; @@ -44,7 +47,7 @@ pub use self::{ node::{ spawn_network_node, GossipConfig, NetworkNode, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, NetworkNodeHandle, NetworkNodeReceiver, - DEFAULT_REPLICATION_FACTOR, + RequestResponseConfig, DEFAULT_REPLICATION_FACTOR, }, }; #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 08e896d785..c3ec3f6316 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -41,7 +41,7 @@ use libp2p::{ identity::Keypair, kad::{store::MemoryStore, Behaviour, Config, Mode, Record}, request_response::{ - Behaviour as RequestResponse, Config as RequestResponseConfig, ProtocolSupport, + Behaviour as RequestResponse, Config as Libp2pRequestResponseConfig, ProtocolSupport, }, swarm::SwarmEvent, Multiaddr, StreamProtocol, Swarm, SwarmBuilder, @@ -53,7 +53,7 @@ use tracing::{debug, error, info, info_span, instrument, warn, Instrument}; pub use self::{ config::{ GossipConfig, NetworkNodeConfig, NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, - DEFAULT_REPLICATION_FACTOR, + RequestResponseConfig, DEFAULT_REPLICATION_FACTOR, }, handle::{spawn_network_node, NetworkNodeHandle, NetworkNodeReceiver}, }; @@ -62,6 +62,7 @@ use super::{ bootstrap::{self, DHTBootstrapTask, InputEvent}, store::ValidatedStore, }, + cbor::Cbor, gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkError, NetworkEvent, NetworkEventInternal, }; @@ -269,10 +270,17 @@ impl NetworkNode { ); kadem.set_mode(Some(Mode::Server)); - let rrconfig = RequestResponseConfig::default(); + let rrconfig = Libp2pRequestResponseConfig::default(); - let direct_message: libp2p::request_response::cbor::Behaviour, Vec> = - RequestResponse::new( + // Create a new `cbor` codec with the given request and response sizes + let cbor = Cbor::new( + config.request_response_config.request_size_maximum, + config.request_response_config.response_size_maximum, + ); + + let direct_message: super::cbor::Behaviour, Vec> = + RequestResponse::with_codec( + cbor, [( StreamProtocol::new("/HotShot/direct_message/1.0"), ProtocolSupport::Full, diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index c51fc281ea..db9d5c13ad 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -33,6 +33,10 @@ pub struct NetworkNodeConfig { /// Configuration for `GossipSub` pub gossip_config: GossipConfig, + #[builder(default)] + /// Configuration for `RequestResponse` + pub request_response_config: RequestResponseConfig, + /// list of addresses to connect to at initialization pub to_connect_addrs: HashSet<(PeerId, Multiaddr)>, /// republication interval in DHT, must be much less than `ttl` @@ -151,3 +155,21 @@ impl Default for GossipConfig { } } } + +/// Configuration for Libp2p's request-response +#[derive(Clone, Debug)] +pub struct RequestResponseConfig { + /// The maximum request size in bytes + pub request_size_maximum: u64, + /// The maximum response size in bytes + pub response_size_maximum: u64, +} + +impl Default for RequestResponseConfig { + fn default() -> Self { + Self { + request_size_maximum: 20 * 1024 * 1024, + response_size_maximum: 20 * 1024 * 1024, + } + } +} diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index bada6f4ddf..557895e95d 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -7,13 +7,14 @@ use std::{marker::PhantomData, sync::Arc}; use async_broadcast::{Receiver, Sender}; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::{OuterConsensus, View}, + consensus::{Consensus, OuterConsensus, View}, data::{DaProposal, PackedBundle}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, @@ -22,6 +23,7 @@ use hotshot_types::{ traits::{ block_contents::vid_commitment, election::Membership, + network::ConnectedNetwork, node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, @@ -231,6 +233,42 @@ impl, V: Versions> DaTaskState { tracing::debug!("DA vote recv, Main Task {:?}", vote.view_number()); diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 6964e38cd7..abacf32ffd 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -180,9 +180,14 @@ impl> NetworkRequestState = self .da_membership @@ -205,7 +210,7 @@ impl> NetworkRequestState Date: Mon, 4 Nov 2024 09:48:09 -0500 Subject: [PATCH 1278/1393] [Libp2p] Proof of Stake changes (#3811) * libp2p PoS * Fix the merge conflict --- example-types/src/node_types.rs | 2 +- examples/infra/mod.rs | 32 +++--- examples/libp2p/types.rs | 4 +- .../src/traits/networking/combined_network.rs | 10 +- .../src/traits/networking/libp2p_network.rs | 84 +++++++-------- libp2p-networking/Cargo.toml | 3 + libp2p-networking/src/network/mod.rs | 9 +- libp2p-networking/src/network/node.rs | 23 ++-- libp2p-networking/src/network/node/config.rs | 6 +- libp2p-networking/src/network/node/handle.rs | 30 +++--- libp2p-networking/src/network/transport.rs | 70 ++++++++---- libp2p-networking/tests/common/mod.rs | 37 +++---- libp2p-networking/tests/counter.rs | 100 +++++++++--------- 13 files changed, 214 insertions(+), 196 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 8884d3e7ce..785c66b3af 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -159,7 +159,7 @@ impl NodeImplementation for CombinedImpl { } impl NodeImplementation for Libp2pImpl { - type Network = Libp2pNetwork; + type Network = Libp2pNetwork; type Storage = TestStorage; type AuctionResultsProvider = TestAuctionResultsProvider; } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index b79769df6e..8d8bf4d3b7 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -380,9 +380,7 @@ pub trait RunDa< let all_nodes = if cfg!(feature = "fixed-leader-election") { let mut vec = config.config.known_nodes_with_stake.clone(); - vec.truncate(config.config.fixed_leader_for_gpuvid); - vec } else { config.config.known_nodes_with_stake.clone() @@ -689,7 +687,7 @@ pub struct Libp2pDaRun { /// The underlying network configuration config: NetworkConfig, /// The underlying network - network: Libp2pNetwork, + network: Libp2pNetwork, } #[async_trait] @@ -702,12 +700,12 @@ impl< >, NODE: NodeImplementation< TYPES, - Network = Libp2pNetwork, + Network = Libp2pNetwork, Storage = TestStorage, AuctionResultsProvider = TestAuctionResultsProvider, >, V: Versions, - > RunDa, NODE, V> for Libp2pDaRun + > RunDa, NODE, V> for Libp2pDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -746,6 +744,10 @@ where .to_string() }; + // Create the qurorum membership from the list of known nodes + let all_nodes = config.config.known_nodes_with_stake.clone(); + let quorum_membership = TYPES::Membership::new(all_nodes.clone(), all_nodes, Topic::Global); + // Derive the bind address let bind_address = derive_libp2p_multiaddr(&bind_address).expect("failed to derive bind address"); @@ -753,6 +755,7 @@ where // Create the Libp2p network let libp2p_network = Libp2pNetwork::from_config( config.clone(), + quorum_membership, GossipConfig::default(), RequestResponseConfig::default(), bind_address, @@ -772,7 +775,7 @@ where } } - fn network(&self) -> Libp2pNetwork { + fn network(&self) -> Libp2pNetwork { self.network.clone() } @@ -818,14 +821,15 @@ where libp2p_advertise_address: Option, ) -> CombinedDaRun { // Initialize our Libp2p network - let libp2p_network: Libp2pDaRun = - as RunDa< - TYPES, - Libp2pNetwork, - Libp2pImpl, - V, - >>::initialize_networking(config.clone(), libp2p_advertise_address.clone()) - .await; + let libp2p_network: Libp2pDaRun = as RunDa< + TYPES, + Libp2pNetwork, + Libp2pImpl, + V, + >>::initialize_networking( + config.clone(), libp2p_advertise_address.clone() + ) + .await; // Initialize our CDN network let cdn_network: PushCdnDaRun = diff --git a/examples/libp2p/types.rs b/examples/libp2p/types.rs index afcfa236a1..ed8fbcda6f 100644 --- a/examples/libp2p/types.rs +++ b/examples/libp2p/types.rs @@ -11,7 +11,7 @@ use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, state_types::TestTypes, storage_types::TestStorage, }; -use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use crate::infra::Libp2pDaRun; @@ -21,7 +21,7 @@ use crate::infra::Libp2pDaRun; pub struct NodeImpl {} /// Convenience type alias -pub type Network = Libp2pNetwork<::SignatureKey>; +pub type Network = Libp2pNetwork; impl NodeImplementation for NodeImpl { type Network = Network; diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 584e4e888e..0f8a4cdcd5 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -95,7 +95,7 @@ impl CombinedNetworks { #[must_use] pub fn new( primary_network: PushCdnNetwork, - secondary_network: Libp2pNetwork, + secondary_network: Libp2pNetwork, delay_duration: Option, ) -> Self { // Create networks from the ones passed in @@ -127,7 +127,7 @@ impl CombinedNetworks { /// Get a ref to the backup network #[must_use] - pub fn secondary(&self) -> &Libp2pNetwork { + pub fn secondary(&self) -> &Libp2pNetwork { &self.networks.1 } @@ -251,7 +251,7 @@ impl CombinedNetworks { #[derive(Clone)] pub struct UnderlyingCombinedNetworks( pub PushCdnNetwork, - pub Libp2pNetwork, + pub Libp2pNetwork, ); #[cfg(feature = "hotshot-testing")] @@ -273,7 +273,7 @@ impl TestableNetworkingImplementation for CombinedNetwor None, Duration::default(), ), - as TestableNetworkingImplementation>::generator( + as TestableNetworkingImplementation>::generator( expected_node_count, num_bootstrap, network_id, @@ -297,7 +297,7 @@ impl TestableNetworkingImplementation for CombinedNetwor // Combine the two let underlying_combined = UnderlyingCombinedNetworks( cdn.clone(), - Arc::>::unwrap_or_clone(p2p), + Arc::>::unwrap_or_clone(p2p), ); // We want to use the same message cache between the two networks diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 6567450243..f5cbac9f50 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -132,7 +132,7 @@ pub struct Empty { byte: u8, } -impl Debug for Libp2pNetwork { +impl Debug for Libp2pNetwork { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Libp2p").field("inner", &"inner").finish() } @@ -143,17 +143,17 @@ pub type PeerInfoVec = Arc>>; /// The underlying state of the libp2p network #[derive(Debug)] -struct Libp2pNetworkInner { +struct Libp2pNetworkInner { /// this node's public key - pk: K, + pk: T::SignatureKey, /// handle to control the network - handle: Arc>, + handle: Arc>, /// Message Receiver receiver: UnboundedReceiver>, /// Sender for broadcast messages sender: UnboundedSender>, /// Sender for node lookup (relevant view number, key of node) (None for shutdown) - node_lookup_send: BoundedSender>, + node_lookup_send: BoundedSender>, /// this is really cheating to enable local tests /// hashset of (bootstrap_addr, peer_id) bootstrap_addrs: PeerInfoVec, @@ -181,15 +181,13 @@ struct Libp2pNetworkInner { /// Networking implementation that uses libp2p /// generic over `M` which is the message type #[derive(Clone)] -pub struct Libp2pNetwork { +pub struct Libp2pNetwork { /// holds the state of the libp2p network - inner: Arc>, + inner: Arc>, } #[cfg(feature = "hotshot-testing")] -impl TestableNetworkingImplementation - for Libp2pNetwork -{ +impl TestableNetworkingImplementation for Libp2pNetwork { /// Returns a boxed function `f(node_id, public_key) -> Libp2pNetwork` /// with the purpose of generating libp2p networks. /// Generates `num_bootstrap` bootstrap nodes. The remainder of nodes are normal @@ -232,12 +230,11 @@ impl TestableNetworkingImplementation Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{port}/quic-v1")).unwrap(); // We assign node's public key and stake value rather than read from config file since it's a test - let privkey = - TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; - let pubkey = TYPES::SignatureKey::from_private(&privkey); + let privkey = T::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; + let pubkey = T::SignatureKey::from_private(&privkey); // Derive the Libp2p keypair from the private key - let libp2p_keypair = derive_libp2p_keypair::(&privkey) + let libp2p_keypair = derive_libp2p_keypair::(&privkey) .expect("Failed to derive libp2p keypair"); // Sign the lookup record @@ -383,7 +380,7 @@ pub fn derive_libp2p_multiaddr(addr: &String) -> anyhow::Result { }) } -impl Libp2pNetwork { +impl Libp2pNetwork { /// Create and return a Libp2p network from a network config file /// and various other configuration-specific values. /// @@ -392,13 +389,15 @@ impl Libp2pNetwork { /// /// # Panics /// If we are unable to calculate the replication factor + #[allow(clippy::too_many_arguments)] pub async fn from_config( - mut config: NetworkConfig, + mut config: NetworkConfig, + quorum_membership: T::Membership, gossip_config: GossipConfig, request_response_config: RequestResponseConfig, bind_address: Multiaddr, - pub_key: &K, - priv_key: &K::PrivateKey, + pub_key: &T::SignatureKey, + priv_key: &::PrivateKey, metrics: Libp2pMetricsValue, ) -> anyhow::Result { // Try to take our Libp2p config from our broader network config @@ -408,7 +407,7 @@ impl Libp2pNetwork { .ok_or(anyhow!("Libp2p config not supplied"))?; // Derive our Libp2p keypair from our supplied private key - let keypair = derive_libp2p_keypair::(priv_key)?; + let keypair = derive_libp2p_keypair::(priv_key)?; // Build our libp2p configuration let mut config_builder = NetworkNodeConfigBuilder::default(); @@ -417,21 +416,14 @@ impl Libp2pNetwork { config_builder.gossip_config(gossip_config.clone()); config_builder.request_response_config(request_response_config); - // Extrapolate the stake table from the known nodes - let stake_table: HashSet = config - .config - .known_nodes_with_stake - .iter() - .map(|node| K::public_key(&node.stake_table_entry)) - .collect(); - + // Construct the auth message let auth_message = construct_auth_message(pub_key, &keypair.public().to_peer_id(), priv_key) .with_context(|| "Failed to construct auth message")?; // Set the auth message and stake table config_builder - .stake_table(Some(stake_table)) + .stake_table(Some(quorum_membership)) .auth_message(Some(auth_message)); // The replication factor is the minimum of [the default and 2/3 the number of nodes] @@ -474,7 +466,7 @@ impl Libp2pNetwork { // Insert all known nodes into the set of all keys for node in config.config.known_nodes_with_stake { - all_keys.insert(K::public_key(&node.stake_table_entry)); + all_keys.insert(T::SignatureKey::public_key(&node.stake_table_entry)); } Ok(Libp2pNetwork::new( @@ -521,14 +513,14 @@ impl Libp2pNetwork { #[allow(clippy::too_many_arguments)] pub async fn new( metrics: Libp2pMetricsValue, - config: NetworkNodeConfig, - pk: K, - lookup_record_value: RecordValue, + config: NetworkNodeConfig, + pk: T::SignatureKey, + lookup_record_value: RecordValue, bootstrap_addrs: BootstrapAddrs, id: usize, #[cfg(feature = "hotshot-testing")] reliability_config: Option>, - ) -> Result, NetworkError> { - let (mut rx, network_handle) = spawn_network_node::(config.clone(), id) + ) -> Result, NetworkError> { + let (mut rx, network_handle) = spawn_network_node::(config.clone(), id) .await .map_err(|e| NetworkError::ConfigError(format!("failed to spawn network node: {e}")))?; @@ -586,7 +578,10 @@ impl Libp2pNetwork { /// Spawns task for looking up nodes pre-emptively #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] - fn spawn_node_lookup(&self, mut node_lookup_recv: BoundedReceiver>) { + fn spawn_node_lookup( + &self, + mut node_lookup_recv: BoundedReceiver>, + ) { let handle = Arc::clone(&self.inner.handle); let dht_timeout = self.inner.dht_timeout; let latest_seen_view = Arc::clone(&self.inner.latest_seen_view); @@ -613,7 +608,7 @@ impl Libp2pNetwork { } /// Initiates connection to the outside world - fn spawn_connect(&mut self, id: usize, lookup_record_value: RecordValue) { + fn spawn_connect(&mut self, id: usize, lookup_record_value: RecordValue) { let pk = self.inner.pk.clone(); let bootstrap_ref = Arc::clone(&self.inner.bootstrap_addrs); let handle = Arc::clone(&self.inner.handle); @@ -765,7 +760,7 @@ impl Libp2pNetwork { } #[async_trait] -impl ConnectedNetwork for Libp2pNetwork { +impl ConnectedNetwork for Libp2pNetwork { #[instrument(name = "Libp2pNetwork::ready_blocking", skip_all)] async fn wait_for_ready(&self) { self.wait_for_ready().await; @@ -854,7 +849,7 @@ impl ConnectedNetwork for Libp2pNetwork { async fn da_broadcast_message( &self, message: Vec, - recipients: Vec, + recipients: Vec, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { // If we're not ready, return an error @@ -884,7 +879,11 @@ impl ConnectedNetwork for Libp2pNetwork { } #[instrument(name = "Libp2pNetwork::direct_message", skip_all)] - async fn direct_message(&self, message: Vec, recipient: K) -> Result<(), NetworkError> { + async fn direct_message( + &self, + message: Vec, + recipient: T::SignatureKey, + ) -> Result<(), NetworkError> { // If we're not ready, return an error if !self.is_ready() { self.inner.metrics.num_failed_messages.add(1); @@ -966,11 +965,12 @@ impl ConnectedNetwork for Libp2pNetwork { } #[instrument(name = "Libp2pNetwork::queue_node_lookup", skip_all)] + #[allow(clippy::type_complexity)] fn queue_node_lookup( &self, view_number: ViewNumber, - pk: K, - ) -> Result<(), TrySendError>> { + pk: T::SignatureKey, + ) -> Result<(), TrySendError>> { self.inner .node_lookup_send .try_send(Some((view_number, pk))) @@ -990,7 +990,7 @@ impl ConnectedNetwork for Libp2pNetwork { /// use of the future view and leader to queue the lookups. async fn update_view<'a, TYPES>(&'a self, view: u64, epoch: u64, membership: &TYPES::Membership) where - TYPES: NodeType + 'a, + TYPES: NodeType + 'a, { let future_view = ::View::new(view) + LOOK_AHEAD; let epoch = ::Epoch::new(epoch); diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 727ee9146a..16ada4dcfa 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -11,6 +11,9 @@ authors = { workspace = true } default = ["webui"] webui = [] +[dev-dependencies] +hotshot-example-types = { path = "../example-types" } + [dependencies] anyhow = { workspace = true } async-compatibility-layer = { workspace = true } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index dea939556a..a23c74fbd3 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -19,7 +19,7 @@ pub mod cbor; use std::{collections::HashSet, fmt::Debug}; use futures::channel::oneshot::Sender; -use hotshot_types::traits::{network::NetworkError, signature_key::SignatureKey}; +use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType}; #[cfg(async_executor_impl = "async-std")] use libp2p::dns::async_std::Transport as DnsTransport; #[cfg(async_executor_impl = "tokio")] @@ -165,9 +165,9 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; /// # Errors /// If we could not create a DNS transport #[instrument(skip(identity))] -pub async fn gen_transport( +pub async fn gen_transport( identity: Keypair, - stake_table: Option>, + stake_table: Option, auth_message: Option>, ) -> Result { // Create the initial `Quic` transport @@ -178,7 +178,8 @@ pub async fn gen_transport( }; // Require authentication against the stake table - let transport = StakeTableAuthentication::new(transport, stake_table, auth_message); + let transport: StakeTableAuthentication<_, T, _> = + StakeTableAuthentication::new(transport, stake_table, auth_message); // Support DNS resolution let transport = { diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index c3ec3f6316..8e3b7888f0 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -14,7 +14,6 @@ mod handle; use std::{ collections::{HashMap, HashSet}, iter, - marker::PhantomData, num::{NonZeroU32, NonZeroUsize}, time::Duration, }; @@ -25,7 +24,7 @@ use async_compatibility_layer::{ }; use futures::{channel::mpsc, select, FutureExt, SinkExt, StreamExt}; use hotshot_types::{ - constants::KAD_DEFAULT_REPUB_INTERVAL_SEC, traits::signature_key::SignatureKey, + constants::KAD_DEFAULT_REPUB_INTERVAL_SEC, traits::node_implementation::NodeType, }; use libp2p::{ autonat, @@ -83,32 +82,29 @@ pub const ESTABLISHED_LIMIT_UNWR: u32 = 10; /// Network definition #[derive(custom_debug::Debug)] -pub struct NetworkNode { +pub struct NetworkNode { /// The keypair for the node keypair: Keypair, /// peer id of network node peer_id: PeerId, /// the swarm of networkbehaviours #[debug(skip)] - swarm: Swarm>, + swarm: Swarm>, /// the configuration parameters of the netework - config: NetworkNodeConfig, + config: NetworkNodeConfig, /// the listener id we are listening on, if it exists listener_id: Option, /// Handler for direct messages direct_message_state: DMBehaviour, /// Handler for DHT Events - dht_handler: DHTBehaviour, + dht_handler: DHTBehaviour, /// Channel to resend requests, set to Some when we call `spawn_listeners` resend_tx: Option>, /// Send to the bootstrap task to tell it to start a bootstrap bootstrap_tx: Option>, - - /// Phantom data to hold the key type - pd: PhantomData, } -impl NetworkNode { +impl NetworkNode { /// Returns number of peers this node is connected to pub fn num_connected(&self) -> usize { self.swarm.connected_peers().count() @@ -168,7 +164,7 @@ impl NetworkNode { /// * Generates a connection to the "broadcast" topic /// * Creates a swarm to manage peers and events #[instrument] - pub async fn new(config: NetworkNodeConfig) -> Result { + pub async fn new(config: NetworkNodeConfig) -> Result { // Generate a random `KeyPair` if one is not specified let keypair = config .keypair @@ -179,7 +175,7 @@ impl NetworkNode { let peer_id = PeerId::from(keypair.public()); // Generate the transport from the keypair, stake table, and auth message - let transport: BoxedTransport = gen_transport::( + let transport: BoxedTransport = gen_transport::( keypair.clone(), config.stake_table.clone(), config.auth_message.clone(), @@ -187,7 +183,7 @@ impl NetworkNode { .await?; // Generate the swarm - let mut swarm: Swarm> = { + let mut swarm: Swarm> = { // Use the `Blake3` hash of the message's contents as the ID let message_id_fn = |message: &GossipsubMessage| { let hash = blake3::hash(&message.data); @@ -337,7 +333,6 @@ impl NetworkNode { ), resend_tx: None, bootstrap_tx: None, - pd: PhantomData, }) } diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index db9d5c13ad..2dfaaa3639 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -6,7 +6,7 @@ use std::{collections::HashSet, num::NonZeroUsize, time::Duration}; -use hotshot_types::traits::signature_key::SignatureKey; +use hotshot_types::traits::node_implementation::NodeType; use libp2p::{identity::Keypair, Multiaddr}; use libp2p_identity::PeerId; @@ -17,7 +17,7 @@ pub const DEFAULT_REPLICATION_FACTOR: Option = NonZeroUsize::new(1 /// describe the configuration of the network #[derive(Clone, Default, derive_builder::Builder, custom_debug::Debug)] -pub struct NetworkNodeConfig { +pub struct NetworkNodeConfig { /// The keypair for the node #[builder(setter(into, strip_option), default)] #[debug(skip)] @@ -49,7 +49,7 @@ pub struct NetworkNodeConfig { /// The stake table. Used for authenticating other nodes. If not supplied /// we will not check other nodes against the stake table #[builder(default)] - pub stake_table: Option>, + pub stake_table: Option, /// The signed authentication message sent to the remote peer /// If not supplied we will not send an authentication message during the handshake diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index 3ec68665f2..f9317e7aa1 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -4,13 +4,13 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashSet, fmt::Debug, marker::PhantomData, time::Duration}; +use std::{collections::HashSet, fmt::Debug, time::Duration}; use async_compatibility_layer::{ art::{async_sleep, async_timeout}, channel::{Receiver, UnboundedReceiver, UnboundedSender}, }; -use hotshot_types::traits::{network::NetworkError, signature_key::SignatureKey}; +use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType}; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; use tracing::{debug, info, instrument}; @@ -24,9 +24,9 @@ use crate::network::{ /// - A reference to the state /// - Controls for the swarm #[derive(Debug, Clone)] -pub struct NetworkNodeHandle { +pub struct NetworkNodeHandle { /// network configuration - network_config: NetworkNodeConfig, + network_config: NetworkNodeConfig, /// send an action to the networkbehaviour send_network: UnboundedSender, @@ -39,9 +39,6 @@ pub struct NetworkNodeHandle { /// human readable id id: usize, - - /// Phantom data to hold the key type - pd: PhantomData, } /// internal network node receiver @@ -78,10 +75,10 @@ impl NetworkNodeReceiver { /// Spawn a network node task task and return the handle and the receiver for it /// # Errors /// Errors if spawning the task fails -pub async fn spawn_network_node( - config: NetworkNodeConfig, +pub async fn spawn_network_node( + config: NetworkNodeConfig, id: usize, -) -> Result<(NetworkNodeReceiver, NetworkNodeHandle), NetworkError> { +) -> Result<(NetworkNodeReceiver, NetworkNodeHandle), NetworkError> { let mut network = NetworkNode::new(config.clone()) .await .map_err(|e| NetworkError::ConfigError(format!("failed to create network node: {e}")))?; @@ -104,18 +101,17 @@ pub async fn spawn_network_node( recv_kill: None, }; - let handle = NetworkNodeHandle:: { + let handle = NetworkNodeHandle:: { network_config: config, send_network: send_chan, listen_addr, peer_id, id, - pd: PhantomData, }; Ok((receiver, handle)) } -impl NetworkNodeHandle { +impl NetworkNodeHandle { /// Cleanly shuts down a swarm node /// This is done by sending a message to /// the swarm itself to spin down @@ -217,7 +213,7 @@ impl NetworkNodeHandle { pub async fn put_record( &self, key: RecordKey, - value: RecordValue, + value: RecordValue, ) -> Result<(), NetworkError> { // Serialize the key let key = key.to_bytes(); @@ -263,7 +259,7 @@ impl NetworkNodeHandle { let result = r.await.map_err(|_| NetworkError::RequestCancelled)?; // Deserialize the record's value - let record: RecordValue = bincode::deserialize(&result) + let record: RecordValue = bincode::deserialize(&result) .map_err(|e| NetworkError::FailedToDeserialize(e.to_string()))?; Ok(record.value().to_vec()) @@ -292,7 +288,7 @@ impl NetworkNodeHandle { pub async fn put_record_timeout( &self, key: RecordKey, - value: RecordValue, + value: RecordValue, timeout: Duration, ) -> Result<(), NetworkError> { async_timeout(timeout, self.put_record(key, value)) @@ -467,7 +463,7 @@ impl NetworkNodeHandle { /// Return a reference to the network config #[must_use] - pub fn config(&self) -> &NetworkNodeConfig { + pub fn config(&self) -> &NetworkNodeConfig { &self.network_config } } diff --git a/libp2p-networking/src/network/transport.rs b/libp2p-networking/src/network/transport.rs index ea58db2001..c959c11edd 100644 --- a/libp2p-networking/src/network/transport.rs +++ b/libp2p-networking/src/network/transport.rs @@ -1,7 +1,5 @@ use std::{ - collections::HashSet, future::Future, - hash::BuildHasher, io::{Error as IoError, ErrorKind as IoErrorKind}, pin::Pin, sync::Arc, @@ -11,7 +9,10 @@ use std::{ use anyhow::{ensure, Context, Result as AnyhowResult}; use async_compatibility_layer::art::async_timeout; use futures::{future::poll_fn, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; -use hotshot_types::traits::signature_key::SignatureKey; +use hotshot_types::traits::node_implementation::ConsensusTime; +use hotshot_types::traits::{ + election::Membership, node_implementation::NodeType, signature_key::SignatureKey, +}; use libp2p::{ core::{muxing::StreamMuxerExt, transport::TransportEvent, StreamMuxer}, identity::PeerId, @@ -34,14 +35,13 @@ const AUTH_HANDSHAKE_TIMEOUT: std::time::Duration = std::time::Duration::from_se /// by performing a handshake that checks if the remote peer is present in the /// stake table. #[pin_project] -pub struct StakeTableAuthentication -{ +pub struct StakeTableAuthentication { #[pin] /// The underlying transport we are wrapping pub inner: T, /// The stake table we check against to authenticate connections - pub stake_table: Arc>>, + pub stake_table: Arc>, /// A pre-signed message that we send to the remote peer for authentication pub auth_message: Arc>>, @@ -54,10 +54,14 @@ pub struct StakeTableAuthentication = Pin::Output, ::Error>> + Send>>; -impl StakeTableAuthentication { +impl StakeTableAuthentication { /// Create a new `StakeTableAuthentication` transport that wraps the given transport /// and authenticates connections against the stake table. - pub fn new(inner: T, stake_table: Option>, auth_message: Option>) -> Self { + pub fn new( + inner: T, + stake_table: Option, + auth_message: Option>, + ) -> Self { Self { inner, stake_table: Arc::from(stake_table), @@ -98,9 +102,9 @@ impl StakeTableAuthentica /// - The message is invalid /// - The peer is not in the stake table /// - The signature is invalid - pub async fn verify_peer_authentication( + pub async fn verify_peer_authentication( stream: &mut R, - stake_table: Arc>>, + stake_table: Arc>, required_peer_id: &PeerId, ) -> AnyhowResult<()> { // If we have a stake table, check if the remote peer is in it @@ -109,7 +113,7 @@ impl StakeTableAuthentica let message = read_length_delimited(stream, MAX_AUTH_MESSAGE_SIZE).await?; // Deserialize the authentication message - let auth_message: AuthMessage = bincode::deserialize(&message) + let auth_message: AuthMessage = bincode::deserialize(&message) .with_context(|| "Failed to deserialize auth message")?; // Verify the signature on the public keys @@ -127,7 +131,7 @@ impl StakeTableAuthentica } // Check if the public key is in the stake table - if !stake_table.contains(&public_key) { + if !stake_table.has_stake(&public_key, Types::Epoch::new(0)) { return Err(anyhow::anyhow!("Peer not in stake table")); } } @@ -142,7 +146,7 @@ impl StakeTableAuthentica fn gen_handshake> + Send + 'static>( original_future: F, outgoing: bool, - stake_table: Arc>>, + stake_table: Arc>, auth_message: Arc>>, ) -> UpgradeFuture where @@ -286,8 +290,8 @@ pub fn construct_auth_message( bincode::serialize(&auth_message).with_context(|| "Failed to serialize auth message") } -impl Transport - for StakeTableAuthentication +impl Transport + for StakeTableAuthentication where T::Dial: Future> + Send + 'static, T::ListenerUpgrade: Send + 'static, @@ -513,16 +517,22 @@ pub async fn write_length_delimited( #[cfg(test)] mod test { - use std::{collections::HashSet, sync::Arc}; + use std::sync::Arc; - use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; + use hotshot_types::{ + light_client::StateVerKey, + signature_key::BLSPubKey, + traits::{network::Topic, signature_key::SignatureKey}, + PeerConfig, + }; use libp2p::{core::transport::dummy::DummyTransport, quic::Connection}; use rand::Rng; use super::*; + use hotshot_example_types::node_types::TestTypes; /// A mock type to help with readability - type MockStakeTableAuth = StakeTableAuthentication; + type MockStakeTableAuth = StakeTableAuthentication; // Helper macro for generating a new identity and authentication message macro_rules! new_identity { @@ -629,8 +639,15 @@ mod test { let mut stream = cursor_from!(auth_message); // Create a stake table with the key - let mut stake_table = std::collections::HashSet::new(); - stake_table.insert(keypair.0); + let peer_config = PeerConfig { + stake_table_entry: keypair.0.stake_table_entry(1), + state_ver_key: StateVerKey::default(), + }; + let stake_table = ::Membership::new( + vec![peer_config.clone()], + vec![peer_config], + Topic::Global, + ); // Verify the authentication message let result = MockStakeTableAuth::verify_peer_authentication( @@ -656,7 +673,7 @@ mod test { let mut stream = cursor_from!(auth_message); // Create an empty stake table - let stake_table: HashSet = std::collections::HashSet::new(); + let stake_table = ::Membership::new(vec![], vec![], Topic::Global); // Verify the authentication message let result = MockStakeTableAuth::verify_peer_authentication( @@ -689,8 +706,15 @@ mod test { let mut stream = cursor_from!(auth_message); // Create a stake table with the key - let mut stake_table: HashSet = std::collections::HashSet::new(); - stake_table.insert(keypair.0); + let peer_config = PeerConfig { + stake_table_entry: keypair.0.stake_table_entry(1), + state_ver_key: StateVerKey::default(), + }; + let stake_table = ::Membership::new( + vec![peer_config.clone()], + vec![peer_config], + Topic::Global, + ); // Check against the malicious peer ID let result = MockStakeTableAuth::verify_peer_authentication( diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs index c4620baf9e..9e61786f5a 100644 --- a/libp2p-networking/tests/common/mod.rs +++ b/libp2p-networking/tests/common/mod.rs @@ -20,7 +20,7 @@ use async_compatibility_layer::{ logging::{setup_backtrace, setup_logging}, }; use futures::{future::join_all, Future, FutureExt}; -use hotshot_types::traits::{network::NetworkError, signature_key::SignatureKey}; +use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType}; use libp2p::Multiaddr; use libp2p_identity::PeerId; use libp2p_networking::network::{ @@ -31,8 +31,8 @@ use thiserror::Error; use tracing::{instrument, warn}; #[derive(Clone, Debug)] -pub(crate) struct HandleWithState { - pub(crate) handle: Arc>, +pub(crate) struct HandleWithState { + pub(crate) handle: Arc>, pub(crate) state: Arc>, } @@ -41,13 +41,13 @@ pub(crate) struct HandleWithState( - handle_and_state: HandleWithState, +pub fn spawn_handler( + handle_and_state: HandleWithState, mut receiver: NetworkNodeReceiver, cb: F, ) -> impl Future where - F: Fn(NetworkEvent, HandleWithState) -> RET + Sync + Send + 'static, + F: Fn(NetworkEvent, HandleWithState) -> RET + Sync + Send + 'static, RET: Future> + Send + 'static, S: Debug + Default + Send + Clone + 'static, { @@ -97,14 +97,7 @@ where /// - Initialize network nodes /// - Kill network nodes /// - A test assertion fails -pub async fn test_bed< - S: 'static + Send + Default + Debug + Clone, - F, - FutF, - G, - FutG, - K: SignatureKey + 'static, ->( +pub async fn test_bed( run_test: F, client_handler: G, num_nodes: usize, @@ -112,8 +105,8 @@ pub async fn test_bed< ) where FutF: Future, FutG: Future> + 'static + Send + Sync, - F: FnOnce(Vec>, Duration) -> FutF, - G: Fn(NetworkEvent, HandleWithState) -> FutG + 'static + Send + Sync + Clone, + F: FnOnce(Vec>, Duration) -> FutF, + G: Fn(NetworkEvent, HandleWithState) -> FutG + 'static + Send + Sync + Clone, { setup_logging(); setup_backtrace(); @@ -121,7 +114,7 @@ pub async fn test_bed< let mut kill_switches = Vec::new(); // NOTE we want this to panic if we can't spin up the swarms. // that amounts to a failed test. - let handles_and_receivers = spin_up_swarms::(num_nodes, timeout).await.unwrap(); + let handles_and_receivers = spin_up_swarms::(num_nodes, timeout).await.unwrap(); let (handles, receivers): (Vec<_>, Vec<_>) = handles_and_receivers.into_iter().unzip(); let mut handler_futures = Vec::new(); @@ -149,9 +142,7 @@ pub async fn test_bed< } } -fn gen_peerid_map( - handles: &[Arc>], -) -> HashMap { +fn gen_peerid_map(handles: &[Arc>]) -> HashMap { let mut r_val = HashMap::new(); for handle in handles { r_val.insert(handle.peer_id(), handle.id()); @@ -161,7 +152,7 @@ fn gen_peerid_map( /// print the connections for each handle in `handles` /// useful for debugging -pub async fn print_connections(handles: &[Arc>]) { +pub async fn print_connections(handles: &[Arc>]) { let m = gen_peerid_map(handles); warn!("PRINTING CONNECTION STATES"); for handle in handles { @@ -183,10 +174,10 @@ pub async fn print_connections(handles: &[Arc( +pub async fn spin_up_swarms( num_of_nodes: usize, timeout_len: Duration, -) -> Result, NetworkNodeReceiver)>, TestError> { +) -> Result, NetworkNodeReceiver)>, TestError> { let mut handles = Vec::new(); let mut node_addrs = Vec::<(PeerId, Multiaddr)>::new(); let mut connecting_futs = Vec::new(); diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs index 6e1214c547..f207ec4414 100644 --- a/libp2p-networking/tests/counter.rs +++ b/libp2p-networking/tests/counter.rs @@ -14,9 +14,9 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::prelude::StreamExt; use common::{test_bed, HandleWithState, TestError}; -use hotshot_types::{ - signature_key::BLSPubKey, - traits::{network::NetworkError, signature_key::SignatureKey}, +use hotshot_example_types::node_types::TestTypes; +use hotshot_types::traits::{ + network::NetworkError, node_implementation::NodeType, signature_key::SignatureKey, }; use libp2p_networking::network::{ behaviours::dht::record::{Namespace, RecordKey, RecordValue}, @@ -63,10 +63,10 @@ pub enum CounterMessage { /// chooses one /// # Panics /// panics if handles is of length 0 -fn random_handle( - handles: &[HandleWithState], +fn random_handle( + handles: &[HandleWithState], rng: &mut dyn rand::RngCore, -) -> HandleWithState { +) -> HandleWithState { handles.iter().choose(rng).unwrap().clone() } @@ -74,9 +74,9 @@ fn random_handle( /// - updates state based on events received /// - replies to direct messages #[instrument] -pub async fn counter_handle_network_event( +pub async fn counter_handle_network_event( event: NetworkEvent, - handle: HandleWithState, + handle: HandleWithState, ) -> Result<(), NetworkError> { use CounterMessage::*; use NetworkEvent::*; @@ -166,9 +166,9 @@ pub async fn counter_handle_network_event( /// # Panics /// on error #[allow(clippy::similar_names)] -async fn run_request_response_increment<'a, K: SignatureKey + 'static>( - requester_handle: HandleWithState, - requestee_handle: HandleWithState, +async fn run_request_response_increment<'a, T: NodeType>( + requester_handle: HandleWithState, + requestee_handle: HandleWithState, timeout: Duration, ) -> Result<(), TestError> { async move { @@ -218,8 +218,8 @@ async fn run_request_response_increment<'a, K: SignatureKey + 'static>( /// broadcasts `msg` from a randomly chosen handle /// then asserts that all nodes match `new_state` -async fn run_gossip_round( - handles: &[HandleWithState], +async fn run_gossip_round( + handles: &[HandleWithState], msg: CounterMessage, new_state: CounterState, timeout_duration: Duration, @@ -293,8 +293,8 @@ async fn run_gossip_round( Ok(()) } -async fn run_intersperse_many_rounds( - handles: Vec>, +async fn run_intersperse_many_rounds( + handles: Vec>, timeout: Duration, ) { for i in 0..u32::try_from(NUM_ROUNDS).unwrap() { @@ -309,22 +309,22 @@ async fn run_intersperse_many_rounds( } } -async fn run_dht_many_rounds( - handles: Vec>, +async fn run_dht_many_rounds( + handles: Vec>, timeout: Duration, ) { run_dht_rounds(&handles, timeout, 0, NUM_ROUNDS).await; } -async fn run_dht_one_round( - handles: Vec>, +async fn run_dht_one_round( + handles: Vec>, timeout: Duration, ) { run_dht_rounds(&handles, timeout, 0, 1).await; } -async fn run_request_response_many_rounds( - handles: Vec>, +async fn run_request_response_many_rounds( + handles: Vec>, timeout: Duration, ) { for _i in 0..NUM_ROUNDS { @@ -338,8 +338,8 @@ async fn run_request_response_many_rounds( /// runs one round of request response /// # Panics /// on error -async fn run_request_response_one_round( - handles: Vec>, +async fn run_request_response_one_round( + handles: Vec>, timeout: Duration, ) { run_request_response_increment_all(&handles, timeout).await; @@ -351,8 +351,8 @@ async fn run_request_response_one_round( /// runs multiple rounds of gossip /// # Panics /// on error -async fn run_gossip_many_rounds( - handles: Vec>, +async fn run_gossip_many_rounds( + handles: Vec>, timeout: Duration, ) { run_gossip_rounds(&handles, NUM_ROUNDS, 0, timeout).await; @@ -361,8 +361,8 @@ async fn run_gossip_many_rounds( /// runs one round of gossip /// # Panics /// on error -async fn run_gossip_one_round( - handles: Vec>, +async fn run_gossip_one_round( + handles: Vec>, timeout: Duration, ) { run_gossip_rounds(&handles, 1, 0, timeout).await; @@ -371,8 +371,8 @@ async fn run_gossip_one_round( /// runs many rounds of dht /// # Panics /// on error -async fn run_dht_rounds( - handles: &[HandleWithState], +async fn run_dht_rounds( + handles: &[HandleWithState], timeout: Duration, _starting_val: usize, num_rounds: usize, @@ -384,7 +384,11 @@ async fn run_dht_rounds( // Create a random keypair let mut rng = StdRng::from_entropy(); - let (public_key, private_key) = K::generated_from_seed_indexed([1; 32], rng.gen::()); + let (public_key, private_key) = + ::generated_from_seed_indexed( + [1; 32], + rng.gen::(), + ); // Create a random value to sign let value = (0..DHT_KV_PADDING) @@ -422,8 +426,8 @@ async fn run_dht_rounds( } /// runs `num_rounds` of message broadcast, incrementing the state of all nodes each broadcast -async fn run_gossip_rounds( - handles: &[HandleWithState], +async fn run_gossip_rounds( + handles: &[HandleWithState], num_rounds: usize, starting_state: CounterState, timeout: Duration, @@ -448,8 +452,8 @@ async fn run_gossip_rounds( /// then has all other peers request its state /// and update their state to the recv'ed state #[allow(clippy::similar_names)] -async fn run_request_response_increment_all( - handles: &[HandleWithState], +async fn run_request_response_increment_all( + handles: &[HandleWithState], timeout: Duration, ) { let mut rng = rand::thread_rng(); @@ -524,7 +528,7 @@ async fn run_request_response_increment_all( #[instrument] async fn test_coverage_request_response_one_round() { Box::pin(test_bed( - run_request_response_one_round::, + run_request_response_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, TIMEOUT_COVERAGE, @@ -538,7 +542,7 @@ async fn test_coverage_request_response_one_round() { #[instrument] async fn test_coverage_request_response_many_rounds() { Box::pin(test_bed( - run_request_response_many_rounds::, + run_request_response_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, TIMEOUT_COVERAGE, @@ -552,7 +556,7 @@ async fn test_coverage_request_response_many_rounds() { #[instrument] async fn test_coverage_intersperse_many_rounds() { Box::pin(test_bed( - run_intersperse_many_rounds::, + run_intersperse_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, TIMEOUT_COVERAGE, @@ -566,7 +570,7 @@ async fn test_coverage_intersperse_many_rounds() { #[instrument] async fn test_coverage_gossip_many_rounds() { Box::pin(test_bed( - run_gossip_many_rounds::, + run_gossip_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, TIMEOUT_COVERAGE, @@ -580,7 +584,7 @@ async fn test_coverage_gossip_many_rounds() { #[instrument] async fn test_coverage_gossip_one_round() { Box::pin(test_bed( - run_gossip_one_round::, + run_gossip_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, TIMEOUT_COVERAGE, @@ -595,7 +599,7 @@ async fn test_coverage_gossip_one_round() { #[ignore] async fn test_stress_request_response_one_round() { Box::pin(test_bed( - run_request_response_one_round::, + run_request_response_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, TIMEOUT_STRESS, @@ -610,7 +614,7 @@ async fn test_stress_request_response_one_round() { #[ignore] async fn test_stress_request_response_many_rounds() { Box::pin(test_bed( - run_request_response_many_rounds::, + run_request_response_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, TIMEOUT_STRESS, @@ -625,7 +629,7 @@ async fn test_stress_request_response_many_rounds() { #[ignore] async fn test_stress_intersperse_many_rounds() { Box::pin(test_bed( - run_intersperse_many_rounds::, + run_intersperse_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, TIMEOUT_STRESS, @@ -640,7 +644,7 @@ async fn test_stress_intersperse_many_rounds() { #[ignore] async fn test_stress_gossip_many_rounds() { Box::pin(test_bed( - run_gossip_many_rounds::, + run_gossip_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, TIMEOUT_STRESS, @@ -655,7 +659,7 @@ async fn test_stress_gossip_many_rounds() { #[ignore] async fn test_stress_gossip_one_round() { Box::pin(test_bed( - run_gossip_one_round::, + run_gossip_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, TIMEOUT_STRESS, @@ -670,7 +674,7 @@ async fn test_stress_gossip_one_round() { #[ignore] async fn test_stress_dht_one_round() { Box::pin(test_bed( - run_dht_one_round::, + run_dht_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, TIMEOUT_STRESS, @@ -685,7 +689,7 @@ async fn test_stress_dht_one_round() { #[ignore] async fn test_stress_dht_many_rounds() { Box::pin(test_bed( - run_dht_many_rounds::, + run_dht_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_STRESS, TIMEOUT_STRESS, @@ -699,7 +703,7 @@ async fn test_stress_dht_many_rounds() { #[instrument] async fn test_coverage_dht_one_round() { Box::pin(test_bed( - run_dht_one_round::, + run_dht_one_round::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, TIMEOUT_COVERAGE, @@ -713,7 +717,7 @@ async fn test_coverage_dht_one_round() { #[instrument] async fn test_coverage_dht_many_rounds() { Box::pin(test_bed( - run_dht_many_rounds::, + run_dht_many_rounds::, counter_handle_network_event, TOTAL_NUM_PEERS_COVERAGE, TIMEOUT_COVERAGE, From 3b27f902c5b2034a21056e6bf5bd37821d4e66d6 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 4 Nov 2024 10:02:08 -0800 Subject: [PATCH 1279/1393] [PoS] - Add a legacy builder API endpoint that includes `num_nodes` (#3834) * Add num_nodes api * Adjust type and comments * Replace with usize * Fix version * Fix route * Restore Cargo.lock * Fix fmt --- builder-api/api/v0_1/builder.toml | 13 +++++++++++++ builder-api/src/v0_1/builder.rs | 23 +++++++++++++++++++++++ builder-api/src/v0_1/data_source.rs | 13 ++++++++++++- libp2p-networking/src/network/cbor.rs | 3 ++- task-impls/src/builder.rs | 24 ++++++++++++++++++++++++ testing/src/block_builder/random.rs | 12 ++++++++++++ testing/src/block_builder/simple.rs | 12 ++++++++++++ 7 files changed, 98 insertions(+), 2 deletions(-) diff --git a/builder-api/api/v0_1/builder.toml b/builder-api/api/v0_1/builder.toml index f16911a948..7e7ad9d853 100644 --- a/builder-api/api/v0_1/builder.toml +++ b/builder-api/api/v0_1/builder.toml @@ -59,6 +59,19 @@ Get the specified block candidate. Returns application-specific encoded transactions type """ +[route.claim_block_with_num_nodes] +PATH = ["claimblockwithnumnodes/:block_hash/:view_number/:sender/:signature/:num_nodes"] +":block_hash" = "TaggedBase64" +":view_number" = "Integer" +":sender" = "TaggedBase64" +":signature" = "TaggedBase64" +":num_nodes" = "Integer" +DOC = """ +Get the specified block candidate and provide the number of nodes. + +Returns application-specific encoded transactions type +""" + [route.claim_header_input] PATH = ["claimheaderinput/:block_hash/:view_number/:sender/:signature"] ":block_hash" = "TaggedBase64" diff --git a/builder-api/src/v0_1/builder.rs b/builder-api/src/v0_1/builder.rs index 0e1066a8ba..ee70f6b34b 100644 --- a/builder-api/src/v0_1/builder.rs +++ b/builder-api/src/v0_1/builder.rs @@ -158,6 +158,29 @@ where } .boxed() })? + .get("claim_block_with_num_nodes", |req, state| { + async move { + let block_hash: BuilderCommitment = req.blob_param("block_hash")?; + let view_number = req.integer_param("view_number")?; + let signature = try_extract_param(&req, "signature")?; + let sender = try_extract_param(&req, "sender")?; + let num_nodes = req.integer_param("num_nodes")?; + state + .claim_block_with_num_nodes( + &block_hash, + view_number, + sender, + &signature, + num_nodes, + ) + .await + .map_err(|source| Error::BlockClaim { + source, + resource: block_hash.to_string(), + }) + } + .boxed() + })? .get("claim_header_input", |req, state| { async move { let block_hash: BuilderCommitment = req.blob_param("block_hash")?; diff --git a/builder-api/src/v0_1/data_source.rs b/builder-api/src/v0_1/data_source.rs index c36b457623..1c7ee643bf 100644 --- a/builder-api/src/v0_1/data_source.rs +++ b/builder-api/src/v0_1/data_source.rs @@ -28,7 +28,7 @@ pub trait BuilderDataSource { signature: &::PureAssembledSignatureType, ) -> Result>, BuildError>; - /// to claim a block from the list of provided available blocks + /// To claim a block from the list of provided available blocks async fn claim_block( &self, block_hash: &BuilderCommitment, @@ -37,6 +37,17 @@ pub trait BuilderDataSource { signature: &::PureAssembledSignatureType, ) -> Result, BuildError>; + /// To claim a block from the list of provided available blocks and provide the number of nodes + /// information to the builder for VID computation. + async fn claim_block_with_num_nodes( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &::PureAssembledSignatureType, + num_nodes: usize, + ) -> Result, BuildError>; + /// To claim a block header input async fn claim_block_header_input( &self, diff --git a/libp2p-networking/src/network/cbor.rs b/libp2p-networking/src/network/cbor.rs index a289b998b5..4a5685624b 100644 --- a/libp2p-networking/src/network/cbor.rs +++ b/libp2p-networking/src/network/cbor.rs @@ -1,3 +1,5 @@ +use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; + use async_trait::async_trait; use cbor4ii::core::error::DecodeError; use futures::prelude::*; @@ -6,7 +8,6 @@ use libp2p::{ StreamProtocol, }; use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; /// `Behaviour` type alias for the `Cbor` codec pub type Behaviour = request_response::Behaviour>; diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index fba215217b..1762c91680 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -185,6 +185,30 @@ pub mod v0_1 { .await .map_err(Into::into) } + + /// Claim block and provide the number of nodes information to the builder for VID + /// computation. + /// + /// # Errors + /// - [`BuilderClientError::BlockNotFound`] if block isn't available + /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly + pub async fn claim_block_with_num_nodes( + &self, + block_hash: BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + num_nodes: usize, + ) -> Result, BuilderClientError> { + let encoded_signature: TaggedBase64 = signature.clone().into(); + self.client + .get(&format!( + "{LEGACY_BUILDER_MODULE}/claimblockwithnumnodes/{block_hash}/{view_number}/{sender}/{encoded_signature}/{num_nodes}" + )) + .send() + .await + .map_err(Into::into) + } } } diff --git a/testing/src/block_builder/random.rs b/testing/src/block_builder/random.rs index cea9e40328..0d6d767ff4 100644 --- a/testing/src/block_builder/random.rs +++ b/testing/src/block_builder/random.rs @@ -304,6 +304,18 @@ impl BuilderDataSource for RandomBuilderSource { Ok(payload) } + async fn claim_block_with_num_nodes( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &::PureAssembledSignatureType, + _num_nodes: usize, + ) -> Result, BuildError> { + self.claim_block(block_hash, view_number, sender, signature) + .await + } + async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index e1207a4fc2..63a28d854c 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -297,6 +297,18 @@ where Ok(payload) } + async fn claim_block_with_num_nodes( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &::PureAssembledSignatureType, + _num_nodes: usize, + ) -> Result, BuildError> { + self.claim_block(block_hash, view_number, sender, signature) + .await + } + async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, From f6092937559635bc6468df6ab52cb491f9214689 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Mon, 4 Nov 2024 20:59:20 +0100 Subject: [PATCH 1280/1393] Lr/epoch end (#3806) * Initial commit * Propose the same block at the end of the epoch * No VID and DAC required for the additional last block proposals * Guard against division by 0 * Traverse the leaves to check an eQC * Remove unneeded async in function definition * Remove trace * Make sure proposal is for the same block if justify QC references the last block * Gate epoch proposal logic * Gate epochs voting logic * Create helper method to check if QC is part of 3-chain * Fixes after review * Get rid of nasty nested if-elses * Fix fmt * Update VID that we reuse at the end of the epoch * Fix fmt * Do not create VID and DAC dependencies when voting for the last block * Simplify how we get a header --- example-types/src/node_types.rs | 22 ++ hotshot/src/lib.rs | 5 + hotshot/src/tasks/mod.rs | 2 + hotshot/src/tasks/task_state.rs | 1 + hotshot/src/types/handle.rs | 3 + task-impls/src/quorum_proposal/handlers.rs | 37 +- task-impls/src/quorum_proposal/mod.rs | 3 + task-impls/src/quorum_vote/handlers.rs | 184 +++++++++- task-impls/src/quorum_vote/mod.rs | 384 +++++++++++---------- task-impls/src/transactions.rs | 16 + testing/src/test_builder.rs | 6 +- testing/tests/tests_1/test_success.rs | 20 ++ types/src/consensus.rs | 104 ++++++ types/src/traits/node_implementation.rs | 3 + 14 files changed, 591 insertions(+), 199 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 785c66b3af..85f9232309 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -176,6 +176,8 @@ impl Versions for TestVersions { ]; type Marketplace = StaticVersion<0, 3>; + + type Epochs = StaticVersion<0, 4>; } #[derive(Clone, Debug, Copy)] @@ -190,6 +192,8 @@ impl Versions for MarketplaceUpgradeTestVersions { ]; type Marketplace = StaticVersion<0, 3>; + + type Epochs = StaticVersion<0, 4>; } #[derive(Clone, Debug, Copy)] @@ -204,6 +208,24 @@ impl Versions for MarketplaceTestVersions { ]; type Marketplace = StaticVersion<0, 3>; + + type Epochs = StaticVersion<0, 4>; +} + +#[derive(Clone, Debug, Copy)] +pub struct EpochsTestVersions {} + +impl Versions for EpochsTestVersions { + type Base = StaticVersion<0, 4>; + type Upgrade = StaticVersion<0, 4>; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, + ]; + + type Marketplace = StaticVersion<0, 3>; + + type Epochs = StaticVersion<0, 4>; } #[cfg(test)] diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 08b1ef1548..c598b020c5 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -325,6 +325,7 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext(&mut handle).await; @@ -765,6 +767,7 @@ where SystemContextHandle, SystemContextHandle, ) { + let epoch_height = config.epoch_height; let left_system_context = SystemContext::new( public_key.clone(), private_key.clone(), @@ -832,6 +835,7 @@ where storage: Arc::clone(&left_system_context.storage), network: Arc::clone(&left_system_context.network), memberships: Arc::clone(&left_system_context.memberships), + epoch_height, }; let mut right_handle = SystemContextHandle { @@ -843,6 +847,7 @@ where storage: Arc::clone(&right_system_context.storage), network: Arc::clone(&right_system_context.network), memberships: Arc::clone(&right_system_context.memberships), + epoch_height, }; // add consensus tasks to each handle, using their individual internal event streams diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index cca95d3c76..3c61f915f3 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -327,6 +327,7 @@ where storage: I::Storage, marketplace_config: MarketplaceConfig, ) -> SystemContextHandle { + let epoch_height = config.epoch_height; let hotshot = SystemContext::new( public_key, private_key, @@ -355,6 +356,7 @@ where storage: Arc::clone(&hotshot.storage), network: Arc::clone(&hotshot.network), memberships: Arc::clone(&hotshot.memberships), + epoch_height, }; add_consensus_tasks::(&mut handle).await; diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index ead7b745f6..a2de31fc2c 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -268,6 +268,7 @@ impl, V: Versions> CreateTaskState id: handle.hotshot.id, formed_upgrade_certificate: None, upgrade_lock: handle.hotshot.upgrade_lock.clone(), + epoch_height: handle.hotshot.config.epoch_height, } } } diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 735c219420..4759763484 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -70,6 +70,9 @@ pub struct SystemContextHandle, V: /// Memberships used by consensus pub memberships: Arc>, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl + 'static, V: Versions> diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index ffad0f5d1e..9209d410d9 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -9,6 +9,12 @@ use std::{marker::PhantomData, sync::Arc}; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, + quorum_proposal::{UpgradeLock, Versions}, +}; +use anyhow::{ensure, Context, Result}; use async_broadcast::{InactiveReceiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; @@ -29,12 +35,6 @@ use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, - quorum_proposal::{UpgradeLock, Versions}, -}; - /// Proposal dependency types. These types represent events that precipitate a proposal. #[derive(PartialEq, Debug)] pub(crate) enum ProposalDependency { @@ -105,7 +105,7 @@ impl ProposalDependencyHandle { /// Publishes a proposal given the [`CommitmentAndMetadata`], [`VidDisperse`] /// and high qc [`hotshot_types::simple_certificate::QuorumCertificate`], /// with optional [`ViewChangeEvidence`]. - #[instrument(skip_all, target = "ProposalDependencyHandle", fields(id = self.id, view_number = *self.view_number, latest_proposed_view = *self.latest_proposed_view))] + #[instrument(skip_all, fields(id = self.id, view_number = *self.view_number, latest_proposed_view = *self.latest_proposed_view))] async fn publish_proposal( &self, commitment_and_metadata: CommitmentAndMetadata, @@ -166,14 +166,29 @@ impl ProposalDependencyHandle { let version = self.upgrade_lock.version(self.view_number).await?; - let block_header = if version < V::Marketplace::VERSION { + let high_qc = self.consensus.read().await.high_qc().clone(); + + let builder_commitment = commitment_and_metadata.builder_commitment.clone(); + let metadata = commitment_and_metadata.metadata.clone(); + + let block_header = if version >= V::Epochs::VERSION + && self.consensus.read().await.is_high_qc_forming_eqc() + { + tracing::info!("Reached end of epoch. Proposing the same block again to form an eQC."); + let block_header = parent_leaf.block_header().clone(); + tracing::debug!( + "Proposing block no. {} to form the eQC.", + block_header.block_number() + ); + block_header + } else if version < V::Marketplace::VERSION { TYPES::BlockHeader::new_legacy( state.as_ref(), self.instance_state.as_ref(), &parent_leaf, commitment_and_metadata.commitment, - commitment_and_metadata.builder_commitment, - commitment_and_metadata.metadata, + builder_commitment, + metadata, commitment_and_metadata.fees.first().clone(), vid_share.data.common.clone(), version, @@ -202,7 +217,7 @@ impl ProposalDependencyHandle { let proposal = QuorumProposal { block_header, view_number: self.view_number, - justify_qc: self.consensus.read().await.high_qc().clone(), + justify_qc: high_qc, upgrade_certificate, proposal_certificate, }; diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 5d33e16664..ebce0305ca 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -95,6 +95,9 @@ pub struct QuorumProposalTaskState /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl, V: Versions> diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 656737524f..7d3c2f5a41 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -6,8 +6,15 @@ use std::sync::Arc; -use async_broadcast::Sender; +use async_broadcast::{InactiveReceiver, Sender}; +use async_lock::RwLock; use chrono::Utc; +use hotshot_types::data::{Leaf, VidDisperseShare}; +use hotshot_types::message::{Proposal, UpgradeLock}; +use hotshot_types::simple_vote::{QuorumData, QuorumVote}; +use hotshot_types::traits::ValidatedState; +use hotshot_types::traits::{election::Membership, signature_key::SignatureKey}; +use hotshot_types::utils::{View, ViewInner}; use hotshot_types::{ consensus::OuterConsensus, data::QuorumProposal, @@ -22,6 +29,7 @@ use tracing::instrument; use utils::anytrace::*; use super::QuorumVoteTaskState; +use crate::helpers::fetch_proposal; use crate::{ events::HotShotEvent, helpers::{broadcast_event, decide_from_proposal, LeafChainTraversalOutcome}, @@ -29,7 +37,7 @@ use crate::{ }; /// Handles the `QuorumProposalValidated` event. -#[instrument(skip_all)] +#[instrument(skip_all, fields(id = task_state.id, view = *proposal.view_number))] pub(crate) async fn handle_quorum_proposal_validated< TYPES: NodeType, I: NodeImplementation, @@ -144,3 +152,175 @@ pub(crate) async fn handle_quorum_proposal_validated< Ok(()) } + +/// Updates the shared consensus state with the new voting data. +#[instrument(skip_all, target = "VoteDependencyHandle", fields(view = *view_number))] +#[allow(clippy::too_many_arguments)] +pub(crate) async fn update_shared_state< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + consensus: OuterConsensus, + sender: Sender>>, + receiver: InactiveReceiver>>, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + upgrade_lock: UpgradeLock, + view_number: TYPES::View, + instance_state: Arc, + storage: Arc>, + proposed_leaf: &Leaf, + vid_share: &Proposal>, +) -> Result<()> { + let justify_qc = &proposed_leaf.justify_qc(); + + // Justify qc's leaf commitment should be the same as the parent's leaf commitment. + let mut maybe_parent = consensus + .read() + .await + .saved_leaves() + .get(&justify_qc.data.leaf_commit) + .cloned(); + maybe_parent = match maybe_parent { + Some(p) => Some(p), + None => fetch_proposal( + justify_qc.view_number(), + sender.clone(), + receiver.activate_cloned(), + Arc::clone(&quorum_membership), + OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), + public_key.clone(), + private_key.clone(), + &upgrade_lock, + ) + .await + .ok(), + }; + let parent = maybe_parent.context(info!( + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.data.leaf_commit, + proposed_leaf.view_number(), + ))?; + let consensus_reader = consensus.read().await; + + let (Some(parent_state), _) = consensus_reader.state_and_delta(parent.view_number()) else { + bail!("Parent state not found! Consensus internally inconsistent"); + }; + + drop(consensus_reader); + + let version = upgrade_lock.version(view_number).await?; + + let (validated_state, state_delta) = parent_state + .validate_and_apply_header( + &instance_state, + &parent, + &proposed_leaf.block_header().clone(), + vid_share.data.common.clone(), + version, + ) + .await + .wrap() + .context(warn!("Block header doesn't extend the proposal!"))?; + + let state = Arc::new(validated_state); + let delta = Arc::new(state_delta); + + // Now that we've rounded everyone up, we need to update the shared state and broadcast our events. + // We will defer broadcast until all states are updated to avoid holding onto the lock during a network call. + let mut consensus_writer = consensus.write().await; + + let view = View { + view_inner: ViewInner::Leaf { + leaf: proposed_leaf.commit(&upgrade_lock).await, + state: Arc::clone(&state), + delta: Some(Arc::clone(&delta)), + }, + }; + if let Err(e) = + consensus_writer.update_validated_state_map(proposed_leaf.view_number(), view.clone()) + { + tracing::trace!("{e:?}"); + } + consensus_writer + .update_saved_leaves(proposed_leaf.clone(), &upgrade_lock) + .await; + + // Kick back our updated structures for downstream usage. + let new_leaves = consensus_writer.saved_leaves().clone(); + let new_state = consensus_writer.validated_state_map().clone(); + drop(consensus_writer); + + // Broadcast now that the lock is dropped. + broadcast_event( + HotShotEvent::ValidatedStateUpdated(proposed_leaf.view_number(), view).into(), + &sender, + ) + .await; + + // Send the new state up to the sequencer. + storage + .write() + .await + .update_undecided_state(new_leaves, new_state) + .await + .wrap() + .context(error!("Failed to update undecided state"))?; + + Ok(()) +} + +/// Submits the `QuorumVoteSend` event if all the dependencies are met. +#[instrument(skip_all, fields(name = "Submit quorum vote", level = "error"))] +#[allow(clippy::too_many_arguments)] +pub(crate) async fn submit_vote, V: Versions>( + sender: Sender>>, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + upgrade_lock: UpgradeLock, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, + storage: Arc>, + leaf: Leaf, + vid_share: Proposal>, +) -> Result<()> { + ensure!( + quorum_membership.has_stake(&public_key, epoch_number), + info!( + "We were not chosen for quorum committee on {:?}", + view_number + ) + ); + + // Create and send the vote. + let vote = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(&upgrade_lock).await, + }, + view_number, + &public_key, + &private_key, + &upgrade_lock, + ) + .await + .wrap() + .context(error!("Failed to sign vote. This should never happen."))?; + tracing::debug!( + "sending vote to next quorum leader {:?}", + vote.view_number() + 1 + ); + // Add to the storage. + storage + .write() + .await + .append_vid(&vid_share) + .await + .wrap() + .context(error!("Failed to store VID share"))?; + broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &sender).await; + + Ok(()) +} diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 3f78573d50..8ac79e94cc 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -6,6 +6,12 @@ use std::{collections::BTreeMap, sync::Arc}; +use crate::quorum_vote::handlers::{submit_vote, update_shared_state}; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, + quorum_vote::handlers::handle_quorum_proposal_validated, +}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -16,21 +22,19 @@ use hotshot_task::{ dependency_task::{DependencyTask, HandleDepOutput}, task::TaskState, }; +use hotshot_types::data::QuorumProposal; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf, VidDisperseShare, ViewNumber}, + data::{Leaf, ViewNumber}, event::Event, message::{Proposal, UpgradeLock}, - simple_vote::{QuorumData, QuorumVote}, traits::{ block_contents::BlockHeader, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, - ValidatedState, }, - utils::{View, ViewInner}, vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; @@ -39,12 +43,7 @@ use jf_vid::VidScheme; use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::*; - -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task, fetch_proposal}, - quorum_vote::handlers::handle_quorum_proposal_validated, -}; +use vbs::version::StaticVersionType; /// Event handlers for `QuorumProposalValidated`. mod handlers; @@ -88,168 +87,13 @@ pub struct VoteDependencyHandle, V pub id: u64, } -impl + 'static, V: Versions> - VoteDependencyHandle -{ - /// Updates the shared consensus state with the new voting data. - #[instrument(skip_all, target = "VoteDependencyHandle", fields(id = self.id, view = *self.view_number))] - async fn update_shared_state( - &self, - proposed_leaf: &Leaf, - vid_share: &Proposal>, - ) -> Result<()> { - let justify_qc = &proposed_leaf.justify_qc(); - - // Justify qc's leaf commitment should be the same as the parent's leaf commitment. - let mut maybe_parent = self - .consensus - .read() - .await - .saved_leaves() - .get(&justify_qc.data().leaf_commit) - .cloned(); - maybe_parent = match maybe_parent { - Some(p) => Some(p), - None => fetch_proposal( - justify_qc.view_number(), - self.sender.clone(), - self.receiver.activate_cloned(), - Arc::clone(&self.quorum_membership), - OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), - self.public_key.clone(), - self.private_key.clone(), - &self.upgrade_lock, - ) - .await - .ok(), - }; - let parent = maybe_parent.context(info!( - "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.data().leaf_commit, - proposed_leaf.view_number(), - ))?; - let consensus_reader = self.consensus.read().await; - - let (Some(parent_state), _) = consensus_reader.state_and_delta(parent.view_number()) else { - bail!("Parent state not found! Consensus internally inconsistent"); - }; - - drop(consensus_reader); - - let version = self.upgrade_lock.version(self.view_number).await?; - - let (validated_state, state_delta) = parent_state - .validate_and_apply_header( - &self.instance_state, - &parent, - &proposed_leaf.block_header().clone(), - vid_share.data.common.clone(), - version, - ) - .await - .wrap() - .context(warn!("Block header doesn't extend the proposal!"))?; - - let state = Arc::new(validated_state); - let delta = Arc::new(state_delta); - - // Now that we've rounded everyone up, we need to update the shared state and broadcast our events. - // We will defer broadcast until all states are updated to avoid holding onto the lock during a network call. - let mut consensus_writer = self.consensus.write().await; - - let view = View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(&self.upgrade_lock).await, - state: Arc::clone(&state), - delta: Some(Arc::clone(&delta)), - }, - }; - if let Err(e) = - consensus_writer.update_validated_state_map(proposed_leaf.view_number(), view.clone()) - { - tracing::trace!("{e:?}"); - } - consensus_writer - .update_saved_leaves(proposed_leaf.clone(), &self.upgrade_lock) - .await; - - // Kick back our updated structures for downstream usage. - let new_leaves = consensus_writer.saved_leaves().clone(); - let new_state = consensus_writer.validated_state_map().clone(); - drop(consensus_writer); - - // Broadcast now that the lock is dropped. - broadcast_event( - HotShotEvent::ValidatedStateUpdated(proposed_leaf.view_number(), view).into(), - &self.sender, - ) - .await; - - // Send the new state up to the sequencer. - self.storage - .write() - .await - .update_undecided_state(new_leaves, new_state) - .await - .wrap() - .context(error!("Failed to update undecided state"))?; - - Ok(()) - } - - /// Submits the `QuorumVoteSend` event if all the dependencies are met. - #[instrument(skip_all, fields(id = self.id, name = "Submit quorum vote", level = "error"))] - async fn submit_vote( - &self, - leaf: Leaf, - vid_share: Proposal>, - ) -> Result<()> { - ensure!( - self.quorum_membership - .has_stake(&self.public_key, self.epoch_number), - info!( - "We were not chosen for quorum committee on {:?}", - self.view_number - ) - ); - - // Create and send the vote. - let vote = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: leaf.commit(&self.upgrade_lock).await, - }, - self.view_number, - &self.public_key, - &self.private_key, - &self.upgrade_lock, - ) - .await - .wrap() - .context(error!("Failed to sign vote. This should never happen."))?; - tracing::debug!( - "sending vote to next quorum leader {:?}", - vote.view_number() + 1 - ); - // Add to the storage. - self.storage - .write() - .await - .append_vid(&vid_share) - .await - .wrap() - .context(error!("Failed to store VID share"))?; - broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &self.sender).await; - - Ok(()) - } -} - impl + 'static, V: Versions> HandleDepOutput for VoteDependencyHandle { type Output = Vec>>; #[allow(clippy::too_many_lines)] + #[instrument(skip_all, fields(id = self.id, view = *self.view_number))] async fn handle_dep_result(self, res: Self::Output) { let high_qc_view_number = self.consensus.read().await.high_qc().view_number; @@ -285,8 +129,27 @@ impl + 'static, V: Versions> Handl match event.as_ref() { #[allow(unused_assignments)] HotShotEvent::QuorumProposalValidated(proposal, parent_leaf) => { + let version = match self.upgrade_lock.version(self.view_number).await { + Ok(version) => version, + Err(e) => { + tracing::error!("{e:#}"); + return; + } + }; let proposal_payload_comm = proposal.data.block_header.payload_commitment(); - if let Some(ref comm) = payload_commitment { + let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + + if version >= V::Epochs::VERSION + && self + .consensus + .read() + .await + .is_qc_forming_eqc(&proposal.data.justify_qc) + { + tracing::debug!("Do not vote here. Voting for this case is handled in QuorumVoteTaskState"); + return; + } else if let Some(ref comm) = payload_commitment { if proposal_payload_comm != *comm { tracing::error!("Quorum proposal has inconsistent payload commitment with DAC or VID."); return; @@ -294,13 +157,12 @@ impl + 'static, V: Versions> Handl } else { payload_commitment = Some(proposal_payload_comm); } - let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + if proposed_leaf.parent_commitment() != parent_commitment { tracing::warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); return; } - // Update our persistent storage of the proposal. If we cannot store the proposal reutrn + // Update our persistent storage of the proposal. If we cannot store the proposal return // and error so we don't vote if let Err(e) = self.storage.write().await.append_proposal(proposal).await { tracing::error!("failed to store proposal, not voting. error = {e:#}"); @@ -359,12 +221,40 @@ impl + 'static, V: Versions> Handl }; // Update internal state - if let Err(e) = self.update_shared_state(&leaf, &vid_share).await { + if let Err(e) = update_shared_state::( + OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), + self.sender.clone(), + self.receiver.clone(), + Arc::clone(&self.quorum_membership), + self.public_key.clone(), + self.private_key.clone(), + self.upgrade_lock.clone(), + self.view_number, + Arc::clone(&self.instance_state), + Arc::clone(&self.storage), + &leaf, + &vid_share, + ) + .await + { tracing::error!("Failed to update shared consensus state; error = {e:#}"); return; } - if let Err(e) = self.submit_vote(leaf, vid_share).await { + if let Err(e) = submit_vote::( + self.sender.clone(), + Arc::clone(&self.quorum_membership), + self.public_key.clone(), + self.private_key.clone(), + self.upgrade_lock.clone(), + self.view_number, + self.epoch_number, + Arc::clone(&self.storage), + leaf, + vid_share, + ) + .await + { tracing::debug!("Failed to vote; error = {e:#}"); } } @@ -470,11 +360,6 @@ impl, V: Versions> QuorumVoteTaskS event_sender: &Sender>>, event: Option>>, ) { - if view_number <= self.latest_voted_view { - tracing::trace!("We have already voted for this view"); - return; - } - if self.vote_dependencies.contains_key(&view_number) { return; } @@ -496,6 +381,7 @@ impl, V: Versions> QuorumVoteTaskS } let deps = vec![quorum_proposal_dependency, dac_dependency, vid_dependency]; + let dependency_chain = AndDependency::from_deps(deps); let dependency_task = DependencyTask::new( @@ -553,8 +439,7 @@ impl, V: Versions> QuorumVoteTaskS event_sender: Sender>>, ) -> Result<()> { match event.as_ref() { - HotShotEvent::QuorumProposalValidated(proposal, _leaf) => { - let cur_epoch = self.consensus.read().await.cur_epoch(); + HotShotEvent::QuorumProposalValidated(proposal, parent_leaf) => { tracing::trace!( "Received Proposal for view {}", *proposal.data.view_number() @@ -569,13 +454,40 @@ impl, V: Versions> QuorumVoteTaskS ); } - self.create_dependency_task_if_new( - proposal.data.view_number, - cur_epoch, - event_receiver, - &event_sender, - Some(Arc::clone(&event)), + ensure!( + proposal.data.view_number() > self.latest_voted_view, + "We have already voted for this view" ); + + let version = self + .upgrade_lock + .version(proposal.data.view_number()) + .await?; + + let consensus_reader = self.consensus.read().await; + let cur_epoch = consensus_reader.cur_epoch(); + let is_qc_forming_eqc = + consensus_reader.is_qc_forming_eqc(&proposal.data.justify_qc); + drop(consensus_reader); + + if version >= V::Epochs::VERSION && is_qc_forming_eqc { + self.handle_eqc_voting( + proposal, + parent_leaf, + event_sender, + event_receiver, + cur_epoch, + ) + .await; + } else { + self.create_dependency_task_if_new( + proposal.data.view_number, + cur_epoch, + event_receiver, + &event_sender, + Some(Arc::clone(&event)), + ); + } } HotShotEvent::DaCertificateRecv(cert) => { let view = cert.view_number; @@ -705,6 +617,108 @@ impl, V: Versions> QuorumVoteTaskS } Ok(()) } + + /// Handles voting for the last block in the epoch to form the Extended QC. + async fn handle_eqc_voting( + &self, + proposal: &Proposal>, + parent_leaf: &Leaf, + event_sender: Sender>>, + event_receiver: Receiver>>, + epoch_number: TYPES::Epoch, + ) { + tracing::info!("Reached end of epoch. Justify QC is for the last block in the epoch."); + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; + if proposed_leaf.height() != parent_leaf.height() + || proposed_leaf.payload_commitment() != parent_leaf.payload_commitment() + { + tracing::error!("Justify QC is for the last block but it's not extended and a new block is proposed. Not voting!"); + return; + } + + tracing::info!( + "Reached end of epoch. Proposed leaf has the same height and payload as its parent." + ); + + let mut consensus_writer = self.consensus.write().await; + let Some(vid_shares) = consensus_writer + .vid_shares() + .get(&parent_leaf.view_number()) + else { + tracing::warn!( + "Proposed leaf is the same as its parent but we don't have our VID for it" + ); + return; + }; + let Some(vid) = vid_shares.get(&self.public_key) else { + tracing::warn!( + "Proposed leaf is the same as its parent but we don't have our VID for it" + ); + return; + }; + let mut updated_vid = vid.clone(); + updated_vid.data.view_number = proposal.data.view_number; + consensus_writer.update_vid_shares(updated_vid.data.view_number, updated_vid.clone()); + drop(consensus_writer); + + if proposed_leaf.parent_commitment() != parent_commitment { + tracing::warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); + return; + } + // Update our persistent storage of the proposal. If we cannot store the proposal return + // and error so we don't vote + if let Err(e) = self.storage.write().await.append_proposal(proposal).await { + tracing::error!("failed to store proposal, not voting. error = {e:#}"); + return; + } + + broadcast_event( + Arc::new(HotShotEvent::QuorumVoteDependenciesValidated( + proposal.data.view_number(), + )), + &event_sender, + ) + .await; + + // Update internal state + if let Err(e) = update_shared_state::( + OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), + event_sender.clone(), + event_receiver.clone().deactivate(), + Arc::clone(&self.quorum_membership), + self.public_key.clone(), + self.private_key.clone(), + self.upgrade_lock.clone(), + proposal.data.view_number(), + Arc::clone(&self.instance_state), + Arc::clone(&self.storage), + &proposed_leaf, + &updated_vid, + ) + .await + { + tracing::error!("Failed to update shared consensus state; error = {e:#}"); + return; + } + + if let Err(e) = submit_vote::( + event_sender.clone(), + Arc::clone(&self.quorum_membership), + self.public_key.clone(), + self.private_key.clone(), + self.upgrade_lock.clone(), + proposal.data.view_number(), + epoch_number, + Arc::clone(&self.storage), + proposed_leaf, + updated_vid, + ) + .await + { + tracing::debug!("Failed to vote; error = {e:#}"); + } + } } #[async_trait] diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 69ea07f1a5..62290e7449 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -433,6 +433,22 @@ impl, V: Versions> TransactionTask None } + /// epochs view change handler + #[instrument(skip_all, fields(id = self.id, view_number = *self.cur_view))] + pub async fn handle_view_change_epochs( + &mut self, + event_stream: &Sender>>, + block_view: TYPES::View, + ) -> Option { + if self.consensus.read().await.is_high_qc_forming_eqc() { + tracing::info!("Reached end of epoch. Not getting a new block until we form an eQC."); + None + } else { + self.handle_view_change_marketplace(event_stream, block_view) + .await + } + } + /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction task", level = "error", target = "TransactionTaskState")] pub async fn handle( diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 15cbb293ed..f86acd9d31 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -98,6 +98,8 @@ pub struct TestDescription, V: Ver pub start_solver: bool, /// boxed closure used to validate the resulting transactions pub validate_transactions: TransactionValidator, + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } pub fn nonempty_block_threshold(threshold: (u64, u64)) -> TransactionValidator { @@ -415,6 +417,7 @@ impl, V: Versions> Default upgrade_view: None, start_solver: true, validate_transactions: Arc::new(|_| Ok(())), + epoch_height: 0, } } } @@ -452,6 +455,7 @@ where timing_data, da_staked_committee_size, unreliable_network, + epoch_height, .. } = self.clone(); @@ -509,7 +513,7 @@ where stop_proposing_time: 0, start_voting_time: u64::MAX, stop_voting_time: 0, - epoch_height: 0, + epoch_height, }; let TimingData { next_view_timeout, diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 853f823278..d3f23669f0 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -6,6 +6,7 @@ use std::time::Duration; +use hotshot_example_types::node_types::EpochsTestVersions; use hotshot_example_types::{ node_types::{ Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestTypes, @@ -132,3 +133,22 @@ cross_tests!( metadata } ); + +cross_tests!( + TestName: test_epoch_end, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + TestDescription { + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(100000), + }, + ), + epoch_height: 10, + ..TestDescription::default() + } + }, +); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index ec872af603..8c82fc9e25 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -19,7 +19,10 @@ use tracing::instrument; use utils::anytrace::*; use vec1::Vec1; +use crate::traits::block_contents::BlockHeader; +use crate::utils::Terminator::Inclusive; pub use crate::utils::{View, ViewInner}; +use crate::vote::Certificate; use crate::{ data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare}, error::HotShotError, @@ -317,6 +320,9 @@ pub struct Consensus { /// A reference to the metrics trait pub metrics: Arc, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } /// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces @@ -405,6 +411,7 @@ impl Consensus { saved_payloads: BTreeMap>, high_qc: QuorumCertificate, metrics: Arc, + epoch_height: u64, ) -> Self { Consensus { validated_state_map, @@ -420,6 +427,7 @@ impl Consensus { saved_payloads, high_qc, metrics, + epoch_height, } } @@ -835,6 +843,102 @@ impl Consensus { } Some(()) } + + /// Returns true if the current high qc is for the last block in the epoch + pub fn is_high_qc_for_last_block(&self) -> bool { + let high_qc = self.high_qc(); + self.is_qc_for_last_block(high_qc) + } + + /// Returns true if the given qc is for the last block in the epoch + pub fn is_qc_for_last_block(&self, cert: &QuorumCertificate) -> bool { + let Some(leaf) = self.saved_leaves.get(&cert.data().leaf_commit) else { + return false; + }; + let block_height = leaf.height(); + if block_height == 0 || self.epoch_height == 0 { + false + } else { + block_height % self.epoch_height == 0 + } + } + + /// Returns true if the current high qc is an extended Quorum Certificate + /// The Extended Quorum Certificate (eQC) is the third Quorum Certificate formed in three + /// consecutive views for the last block in the epoch. + pub fn is_high_qc_extended(&self) -> bool { + let high_qc = self.high_qc(); + let ret = self.is_qc_extended(high_qc); + if ret { + tracing::debug!("We have formed an eQC!"); + }; + ret + } + + /// Returns true if the given qc is an extended Quorum Certificate + /// The Extended Quorum Certificate (eQC) is the third Quorum Certificate formed in three + /// consecutive views for the last block in the epoch. + pub fn is_qc_extended(&self, cert: &QuorumCertificate) -> bool { + if !self.is_qc_for_last_block(cert) { + tracing::debug!("High QC is not for the last block in the epoch."); + return false; + } + + let qc_view = cert.view_number(); + let high_qc_block_number = + if let Some(leaf) = self.saved_leaves.get(&cert.data().leaf_commit) { + leaf.block_header().block_number() + } else { + return false; + }; + + let mut last_visited_view_number = qc_view; + let mut is_qc_extended = true; + if let Err(e) = + self.visit_leaf_ancestors(qc_view, Inclusive(qc_view - 2), true, |leaf, _, _| { + tracing::trace!( + "last_visited_view_number = {}, leaf.view_number = {}", + *last_visited_view_number, + *leaf.view_number() + ); + + if leaf.view_number() == qc_view { + return true; + } + + if last_visited_view_number - 1 != leaf.view_number() { + tracing::trace!("The chain is broken. Non consecutive views."); + is_qc_extended = false; + return false; + } + if high_qc_block_number != leaf.height() { + tracing::trace!("The chain is broken. Block numbers do not match."); + is_qc_extended = false; + return false; + } + last_visited_view_number = leaf.view_number(); + true + }) + { + is_qc_extended = false; + tracing::trace!("The chain is broken. Leaf ascension failed."); + tracing::debug!("Leaf ascension failed; error={e}"); + } + tracing::trace!("Is the given QC an eQC? {}", is_qc_extended); + is_qc_extended + } + + /// Return true if the given Quorum Certificate takes part in forming an eQC, i.e. + /// it is one of the 3-chain certificates but not the eQC itself + pub fn is_qc_forming_eqc(&self, cert: &QuorumCertificate) -> bool { + self.is_qc_for_last_block(cert) && !self.is_qc_extended(cert) + } + + /// Return true if the high QC takes part in forming an eQC, i.e. + /// it is one of the 3-chain certificates but not the eQC itself + pub fn is_high_qc_forming_eqc(&self) -> bool { + self.is_high_qc_for_last_block() && !self.is_high_qc_extended() + } } /// Alias for the block payload commitment and the associated metadata. The primary data diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 43035b01b7..d05652b783 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -267,4 +267,7 @@ pub trait Versions: Clone + Copy + Debug + Send + Sync + 'static { /// The version at which to switch over to marketplace logic type Marketplace: StaticVersionType; + + /// The version at which to switch over to epochs logic + type Epochs: StaticVersionType; } From 8c39d4757a4339a50f8edda55634214d959d7c6f Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 4 Nov 2024 15:25:26 -0500 Subject: [PATCH 1281/1393] downgrade some logs to debug (#3843) --- libp2p-networking/src/network/transport.rs | 7 ++++--- task-impls/src/quorum_proposal/handlers.rs | 11 ++++++----- task-impls/src/quorum_vote/handlers.rs | 17 ++++++++-------- task-impls/src/quorum_vote/mod.rs | 15 +++++++------- testing/tests/tests_1/test_success.rs | 5 ++--- types/src/consensus.rs | 23 ++++++++++------------ 6 files changed, 37 insertions(+), 41 deletions(-) diff --git a/libp2p-networking/src/network/transport.rs b/libp2p-networking/src/network/transport.rs index c959c11edd..969fed495d 100644 --- a/libp2p-networking/src/network/transport.rs +++ b/libp2p-networking/src/network/transport.rs @@ -9,9 +9,10 @@ use std::{ use anyhow::{ensure, Context, Result as AnyhowResult}; use async_compatibility_layer::art::async_timeout; use futures::{future::poll_fn, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; -use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::traits::{ - election::Membership, node_implementation::NodeType, signature_key::SignatureKey, + election::Membership, + node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, }; use libp2p::{ core::{muxing::StreamMuxerExt, transport::TransportEvent, StreamMuxer}, @@ -519,6 +520,7 @@ pub async fn write_length_delimited( mod test { use std::sync::Arc; + use hotshot_example_types::node_types::TestTypes; use hotshot_types::{ light_client::StateVerKey, signature_key::BLSPubKey, @@ -529,7 +531,6 @@ mod test { use rand::Rng; use super::*; - use hotshot_example_types::node_types::TestTypes; /// A mock type to help with readability type MockStakeTableAuth = StakeTableAuthentication; diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 9209d410d9..cf6e60e40e 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -9,11 +9,6 @@ use std::{marker::PhantomData, sync::Arc}; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, - quorum_proposal::{UpgradeLock, Versions}, -}; use anyhow::{ensure, Context, Result}; use async_broadcast::{InactiveReceiver, Sender}; use async_compatibility_layer::art::async_spawn; @@ -35,6 +30,12 @@ use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, + quorum_proposal::{UpgradeLock, Versions}, +}; + /// Proposal dependency types. These types represent events that precipitate a proposal. #[derive(PartialEq, Debug)] pub(crate) enum ProposalDependency { diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 7d3c2f5a41..578a094879 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -9,30 +9,29 @@ use std::sync::Arc; use async_broadcast::{InactiveReceiver, Sender}; use async_lock::RwLock; use chrono::Utc; -use hotshot_types::data::{Leaf, VidDisperseShare}; -use hotshot_types::message::{Proposal, UpgradeLock}; -use hotshot_types::simple_vote::{QuorumData, QuorumVote}; -use hotshot_types::traits::ValidatedState; -use hotshot_types::traits::{election::Membership, signature_key::SignatureKey}; -use hotshot_types::utils::{View, ViewInner}; use hotshot_types::{ consensus::OuterConsensus, - data::QuorumProposal, + data::{Leaf, QuorumProposal, VidDisperseShare}, event::{Event, EventType}, + message::{Proposal, UpgradeLock}, + simple_vote::{QuorumData, QuorumVote}, traits::{ + election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + signature_key::SignatureKey, storage::Storage, + ValidatedState, }, + utils::{View, ViewInner}, vote::HasViewNumber, }; use tracing::instrument; use utils::anytrace::*; use super::QuorumVoteTaskState; -use crate::helpers::fetch_proposal; use crate::{ events::HotShotEvent, - helpers::{broadcast_event, decide_from_proposal, LeafChainTraversalOutcome}, + helpers::{broadcast_event, decide_from_proposal, fetch_proposal, LeafChainTraversalOutcome}, quorum_vote::Versions, }; diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 8ac79e94cc..5622ad9479 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -6,12 +6,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use crate::quorum_vote::handlers::{submit_vote, update_shared_state}; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, - quorum_vote::handlers::handle_quorum_proposal_validated, -}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -22,10 +16,9 @@ use hotshot_task::{ dependency_task::{DependencyTask, HandleDepOutput}, task::TaskState, }; -use hotshot_types::data::QuorumProposal; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf, ViewNumber}, + data::{Leaf, QuorumProposal, ViewNumber}, event::Event, message::{Proposal, UpgradeLock}, traits::{ @@ -45,6 +38,12 @@ use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, + quorum_vote::handlers::{handle_quorum_proposal_validated, submit_vote, update_shared_state}, +}; + /// Event handlers for `QuorumProposalValidated`. mod handlers; diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index d3f23669f0..588c718a83 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -6,11 +6,10 @@ use std::time::Duration; -use hotshot_example_types::node_types::EpochsTestVersions; use hotshot_example_types::{ node_types::{ - Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestTypes, - TestTypesRandomizedLeader, TestVersions, + EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, + TestTypes, TestTypesRandomizedLeader, TestVersions, }, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 8c82fc9e25..28d488db1c 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -19,10 +19,7 @@ use tracing::instrument; use utils::anytrace::*; use vec1::Vec1; -use crate::traits::block_contents::BlockHeader; -use crate::utils::Terminator::Inclusive; pub use crate::utils::{View, ViewInner}; -use crate::vote::Certificate; use crate::{ data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare}, error::HotShotError, @@ -30,15 +27,15 @@ use crate::{ message::{Proposal, UpgradeLock}, simple_certificate::{DaCertificate, QuorumCertificate}, traits::{ - block_contents::BuilderFee, + block_contents::{BlockHeader, BuilderFee}, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, BlockPayload, ValidatedState, }, - utils::{BuilderCommitment, StateAndDelta, Terminator}, + utils::{BuilderCommitment, StateAndDelta, Terminator, Terminator::Inclusive}, vid::VidCommitment, - vote::HasViewNumber, + vote::{Certificate, HasViewNumber}, }; /// A type alias for `HashMap, T>` @@ -492,7 +489,7 @@ impl Consensus { pub fn update_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.cur_view, - "New view isn't newer than the current view." + debug!("New view isn't newer than the current view.") ); self.cur_view = view_number; Ok(()) @@ -504,7 +501,7 @@ impl Consensus { pub fn update_epoch(&mut self, epoch_number: TYPES::Epoch) -> Result<()> { ensure!( epoch_number > self.cur_epoch, - "New epoch isn't newer than the current epoch." + debug!("New epoch isn't newer than the current epoch.") ); self.cur_epoch = epoch_number; Ok(()) @@ -556,7 +553,7 @@ impl Consensus { .last_proposals .last_key_value() .map_or(TYPES::View::genesis(), |(k, _)| { *k }), - "New view isn't newer than the previously proposed view." + debug!("New view isn't newer than the previously proposed view.") ); self.last_proposals .insert(proposal.data.view_number(), proposal); @@ -570,7 +567,7 @@ impl Consensus { pub fn update_last_decided_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.last_decided_view, - "New view isn't newer than the previously decided view." + debug!("New view isn't newer than the previously decided view.") ); self.last_decided_view = view_number; Ok(()) @@ -583,7 +580,7 @@ impl Consensus { pub fn update_locked_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.locked_view, - "New view isn't newer than the previously locked view." + debug!("New view isn't newer than the previously locked view.") ); self.locked_view = view_number; Ok(()) @@ -612,7 +609,7 @@ impl Consensus { { ensure!( new_delta.is_some() || existing_delta.is_none(), - "Skipping the state update to not override a `Leaf` view with `Some` state delta." + debug!("Skipping the state update to not override a `Leaf` view with `Some` state delta.") ); } else { bail!("Skipping the state update to not override a `Leaf` view with a non-`Leaf` view."); @@ -656,7 +653,7 @@ impl Consensus { pub fn update_high_qc(&mut self, high_qc: QuorumCertificate) -> Result<()> { ensure!( high_qc.view_number > self.high_qc.view_number || high_qc == self.high_qc, - "High QC with an equal or higher view exists." + debug!("High QC with an equal or higher view exists.") ); tracing::debug!("Updating high QC"); self.high_qc = high_qc; From 748f0d7f8bccca29e6ba8f65a03dc8443e2b5a91 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Mon, 4 Nov 2024 16:00:46 -0800 Subject: [PATCH 1282/1393] 3215 dont use high qc for handle_dep_result (#3807) * 3215 dont use high qc for handle_dep_result * use high_qc view_number from events in quorum_proposal::handle_dep_result --- task-impls/src/helpers.rs | 10 ++-- task-impls/src/quorum_proposal/handlers.rs | 66 +++++----------------- task-impls/src/quorum_vote/handlers.rs | 64 ++++++++++++++------- task-impls/src/quorum_vote/mod.rs | 36 +++--------- task-impls/src/upgrade.rs | 3 +- 5 files changed, 72 insertions(+), 107 deletions(-) diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 28d59b2038..26bb596a18 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -57,7 +57,7 @@ pub(crate) async fn fetch_proposal( sender_public_key: TYPES::SignatureKey, sender_private_key: ::PrivateKey, upgrade_lock: &UpgradeLock, -) -> Result> { +) -> Result<(Leaf, View)> { // We need to be able to sign this request before submitting it to the network. Compute the // payload first. let signed_proposal_request = ProposalRequestPayload { @@ -162,11 +162,11 @@ pub(crate) async fn fetch_proposal( .await; broadcast_event( - HotShotEvent::ValidatedStateUpdated(view_number, view).into(), + HotShotEvent::ValidatedStateUpdated(view_number, view.clone()).into(), &event_sender, ) .await; - Ok(leaf) + Ok((leaf, view)) } /// Helper type to give names and to the output values of the leaf chain traversal operation. @@ -371,6 +371,7 @@ pub(crate) async fn parent_leaf_and_state( private_key: ::PrivateKey, consensus: OuterConsensus, upgrade_lock: &UpgradeLock, + parent_view_number: TYPES::View, ) -> Result<(Leaf, Arc<::ValidatedState>)> { let consensus_reader = consensus.read().await; let cur_epoch = consensus_reader.cur_epoch(); @@ -381,7 +382,6 @@ pub(crate) async fn parent_leaf_and_state( next_proposal_view_number ) ); - let parent_view_number = consensus_reader.high_qc().view_number(); let vsm_contains_parent_view = consensus_reader .validated_state_map() .contains_key(&parent_view_number); @@ -403,7 +403,7 @@ pub(crate) async fn parent_leaf_and_state( } let consensus_reader = consensus.read().await; - let parent_view_number = consensus_reader.high_qc().view_number(); + //let parent_view_number = consensus_reader.high_qc().view_number(); let parent_view = consensus_reader.validated_state_map().get(&parent_view_number).context( debug!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", *parent_view_number) )?; diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index cf6e60e40e..1add742a7a 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -11,12 +11,8 @@ use std::{marker::PhantomData, sync::Arc}; use anyhow::{ensure, Context, Result}; use async_broadcast::{InactiveReceiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -use hotshot_task::{ - dependency::{Dependency, EventDependency}, - dependency_task::HandleDepOutput, -}; +use hotshot_task::dependency_task::HandleDepOutput; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, data::{Leaf, QuorumProposal, VidDisperse, ViewChangeEvidence}, @@ -25,6 +21,7 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, }, + vote::HasViewNumber, }; use tracing::instrument; use utils::anytrace::*; @@ -32,7 +29,7 @@ use vbs::version::StaticVersionType; use crate::{ events::HotShotEvent, - helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, + helpers::{broadcast_event, parent_leaf_and_state}, quorum_proposal::{UpgradeLock, Versions}, }; @@ -114,6 +111,7 @@ impl ProposalDependencyHandle { view_change_evidence: Option>, formed_upgrade_certificate: Option>, decided_upgrade_certificate: Arc>>>, + parent_view_number: TYPES::View, ) -> Result<()> { let (parent_leaf, state) = parent_leaf_and_state( self.view_number, @@ -124,6 +122,7 @@ impl ProposalDependencyHandle { self.private_key.clone(), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), &self.upgrade_lock, + parent_view_number, ) .await?; @@ -258,61 +257,17 @@ impl ProposalDependencyHandle { Ok(()) } } + impl HandleDepOutput for ProposalDependencyHandle { type Output = Vec>>>>; #[allow(clippy::no_effect_underscore_binding, clippy::too_many_lines)] async fn handle_dep_result(self, res: Self::Output) { - let high_qc_view_number = self.consensus.read().await.high_qc().view_number; - let event_receiver = self.receiver.activate_cloned(); - if !self - .consensus - .read() - .await - .validated_state_map() - .contains_key(&high_qc_view_number) - { - // The proposal for the high qc view is missing, try to get it asynchronously - let membership = Arc::clone(&self.quorum_membership); - let event_sender = self.sender.clone(); - let sender_public_key = self.public_key.clone(); - let sender_private_key = self.private_key.clone(); - let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); - let upgrade_lock = self.upgrade_lock.clone(); - let rx = event_receiver.clone(); - async_spawn(async move { - fetch_proposal( - high_qc_view_number, - event_sender, - rx, - membership, - consensus, - sender_public_key, - sender_private_key, - &upgrade_lock, - ) - .await - }); - // Block on receiving the event from the event stream. - EventDependency::new( - event_receiver, - Box::new(move |event| { - let event = event.as_ref(); - if let HotShotEvent::ValidatedStateUpdated(view_number, _) = event { - *view_number == high_qc_view_number - } else { - false - } - }), - ) - .completed() - .await; - } - let mut commit_and_metadata: Option> = None; let mut timeout_certificate = None; let mut view_sync_finalize_cert = None; let mut vid_share = None; + let mut parent_view_number = None; for event in res.iter().flatten().flatten() { match event.as_ref() { HotShotEvent::SendPayloadCommitmentAndMetadata( @@ -336,8 +291,9 @@ impl HandleDepOutput for ProposalDependencyHandle< either::Right(timeout) => { timeout_certificate = Some(timeout.clone()); } - either::Left(_) => { + either::Left(qc) => { // Handled by the UpdateHighQc event. + parent_view_number = Some(qc.view_number()); } }, HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { @@ -350,6 +306,9 @@ impl HandleDepOutput for ProposalDependencyHandle< } } + let parent_view_number = + parent_view_number.unwrap_or(self.consensus.read().await.high_qc().view_number()); + if commit_and_metadata.is_none() { tracing::error!( "Somehow completed the proposal dependency task without a commitment and metadata" @@ -375,6 +334,7 @@ impl HandleDepOutput for ProposalDependencyHandle< proposal_cert, self.formed_upgrade_certificate.clone(), Arc::clone(&self.upgrade_lock.decided_upgrade_certificate), + parent_view_number, ) .await { diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 578a094879..bec3bc6201 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -172,43 +172,69 @@ pub(crate) async fn update_shared_state< storage: Arc>, proposed_leaf: &Leaf, vid_share: &Proposal>, + parent_view_number: Option, ) -> Result<()> { let justify_qc = &proposed_leaf.justify_qc(); + let consensus_reader = consensus.read().await; + // Try to find the validated vview within the validasted state map. This will be present + // if we have the saved leaf, but if not we'll get it when we fetch_proposal. + let mut maybe_validated_view = parent_view_number.and_then(|view_number| { + consensus_reader + .validated_state_map() + .get(&view_number) + .cloned() + }); + // Justify qc's leaf commitment should be the same as the parent's leaf commitment. - let mut maybe_parent = consensus - .read() - .await + let mut maybe_parent = consensus_reader .saved_leaves() .get(&justify_qc.data.leaf_commit) .cloned(); + + drop(consensus_reader); + maybe_parent = match maybe_parent { Some(p) => Some(p), - None => fetch_proposal( - justify_qc.view_number(), - sender.clone(), - receiver.activate_cloned(), - Arc::clone(&quorum_membership), - OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), - public_key.clone(), - private_key.clone(), - &upgrade_lock, - ) - .await - .ok(), + None => { + match fetch_proposal( + justify_qc.view_number(), + sender.clone(), + receiver.activate_cloned(), + Arc::clone(&quorum_membership), + OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), + public_key.clone(), + private_key.clone(), + &upgrade_lock, + ) + .await + .ok() + { + Some((leaf, view)) => { + maybe_validated_view = Some(view); + Some(leaf) + } + None => None, + } + } }; + let parent = maybe_parent.context(info!( "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", justify_qc.data.leaf_commit, proposed_leaf.view_number(), ))?; - let consensus_reader = consensus.read().await; - let (Some(parent_state), _) = consensus_reader.state_and_delta(parent.view_number()) else { - bail!("Parent state not found! Consensus internally inconsistent"); + let Some(validated_view) = maybe_validated_view else { + bail!( + "Failed to fetch view for parent, parent view {:?}", + parent_view_number + ); }; - drop(consensus_reader); + let (Some(parent_state), _) = validated_view.state_and_delta() else { + bail!("Parent state not found! Consensus internally inconsistent"); + }; let version = upgrade_lock.version(view_number).await?; diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 5622ad9479..70a4c22aa0 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -12,13 +12,13 @@ use async_lock::RwLock; use async_std::task::JoinHandle; use async_trait::async_trait; use hotshot_task::{ - dependency::{AndDependency, Dependency, EventDependency}, + dependency::{AndDependency, EventDependency}, dependency_task::{DependencyTask, HandleDepOutput}, task::TaskState, }; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf, QuorumProposal, ViewNumber}, + data::{Leaf, QuorumProposal}, event::Event, message::{Proposal, UpgradeLock}, traits::{ @@ -94,36 +94,10 @@ impl + 'static, V: Versions> Handl #[allow(clippy::too_many_lines)] #[instrument(skip_all, fields(id = self.id, view = *self.view_number))] async fn handle_dep_result(self, res: Self::Output) { - let high_qc_view_number = self.consensus.read().await.high_qc().view_number; - - // The validated state of a non-genesis high QC should exist in the state map. - if *high_qc_view_number != *ViewNumber::genesis() - && !self - .consensus - .read() - .await - .validated_state_map() - .contains_key(&high_qc_view_number) - { - // Block on receiving the event from the event stream. - EventDependency::new( - self.receiver.activate_cloned(), - Box::new(move |event| { - let event = event.as_ref(); - if let HotShotEvent::ValidatedStateUpdated(view_number, _) = event { - *view_number == high_qc_view_number - } else { - false - } - }), - ) - .completed() - .await; - } - let mut payload_commitment = None; let mut leaf = None; let mut vid_share = None; + let mut parent_view_number = None; for event in res { match event.as_ref() { #[allow(unused_assignments)] @@ -168,6 +142,7 @@ impl + 'static, V: Versions> Handl return; } leaf = Some(proposed_leaf); + parent_view_number = Some(parent_leaf.view_number()); } HotShotEvent::DaCertificateValidated(cert) => { let cert_payload_comm = &cert.data().payload_commit; @@ -195,6 +170,7 @@ impl + 'static, V: Versions> Handl _ => {} } } + broadcast_event( Arc::new(HotShotEvent::QuorumVoteDependenciesValidated( self.view_number, @@ -233,6 +209,7 @@ impl + 'static, V: Versions> Handl Arc::clone(&self.storage), &leaf, &vid_share, + parent_view_number, ) .await { @@ -694,6 +671,7 @@ impl, V: Versions> QuorumVoteTaskS Arc::clone(&self.storage), &proposed_leaf, &updated_vid, + Some(parent_leaf.view_number()), ) .await { diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index fb4f4de7f4..56107613cd 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -37,7 +37,7 @@ use crate::{ vote_collection::{handle_vote, VoteCollectorsMap}, }; -/// Tracks state of a DA task +/// Tracks state of an upgrade task pub struct UpgradeTaskState, V: Versions> { /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -50,6 +50,7 @@ pub struct UpgradeTaskState, V: Ve /// Membership for Quorum Certs/votes pub quorum_membership: Arc, + /// The underlying network pub network: Arc, From 219639e18ad25b180b1fa87547f9cffea8d04a71 Mon Sep 17 00:00:00 2001 From: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Date: Tue, 5 Nov 2024 20:14:11 +0500 Subject: [PATCH 1283/1393] update dependencies (#3848) * compile without lockfile * lint * change level to allow * Fix flake.nix The hash needs to be updated when updating the rust toolchain version * Fix lint --------- Co-authored-by: sveitser --- example-types/Cargo.toml | 5 +++++ hotshot/src/lib.rs | 4 ++-- hotshot/src/traits/networking/push_cdn_network.rs | 6 ++---- hotshot/src/types/handle.rs | 1 + task-impls/src/harness.rs | 4 ++-- task-impls/src/helpers.rs | 6 +++--- testing/Cargo.toml | 5 +++++ testing/src/lib.rs | 2 +- types/Cargo.toml | 2 +- types/src/data.rs | 4 ++-- types/src/traits/states.rs | 3 ++- types/src/vid.rs | 1 + 12 files changed, 27 insertions(+), 16 deletions(-) diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index 65dd60d288..024c48a064 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -42,3 +42,8 @@ reqwest = { workspace = true } tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "allow", check-cfg = [ + 'cfg(async_executor_impl, values("tokio", "async-std"))', +] } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index c598b020c5..eddf722a0b 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -1029,9 +1029,9 @@ impl HotShotInitializer { /// /// # Arguments /// * `start_view` - The minimum view number that we are confident won't lead to a double vote - /// after restart. + /// after restart. /// * `validated_state` - Optional validated state that if given, will be used to construct the - /// `SystemContext`. + /// `SystemContext`. #[allow(clippy::too_many_arguments)] pub fn from_reload( anchor_leaf: Leaf, diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index c0c2f5d957..2244be517a 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -472,9 +472,8 @@ impl ConnectedNetwork for PushCdnNetwork { ) -> Result<(), NetworkError> { self.broadcast_message(message, topic.into()) .await - .map_err(|e| { + .inspect_err(|_e| { self.metrics.num_failed_messages.add(1); - e }) } @@ -491,9 +490,8 @@ impl ConnectedNetwork for PushCdnNetwork { ) -> Result<(), NetworkError> { self.broadcast_message(message, Topic::Da) .await - .map_err(|e| { + .inspect_err(|_e| { self.metrics.num_failed_messages.add(1); - e }) } diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 4759763484..2ab7874b8f 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -225,6 +225,7 @@ impl + 'static, V: Versions> /// there are two cleaner solutions: /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper + /// /// NOTE: this is only used for sanity checks in our tests #[must_use] pub fn internal_event_stream_receiver_known_impl(&self) -> Receiver>> { diff --git a/task-impls/src/harness.rs b/task-impls/src/harness.rs index 16e8a273b8..63d864392b 100644 --- a/task-impls/src/harness.rs +++ b/task-impls/src/harness.rs @@ -27,7 +27,7 @@ pub struct TestHarnessState { /// # Arguments /// * `event_stream` - if given, will be used to register the task builder. /// * `allow_extra_output` - whether to allow an extra output after we've seen all expected -/// outputs. Should be `false` in most cases. +/// outputs. Should be `false` in most cases. /// /// # Panics /// Panics if any state the test expects is not set. Panicking causes a test failure @@ -82,7 +82,7 @@ pub async fn run_harness> + Send /// /// # Arguments /// * `allow_extra_output` - whether to allow an extra output after we've seen all expected -/// outputs. Should be `false` in most cases. +/// outputs. Should be `false` in most cases. /// /// # Panics /// Will panic to fail the test when it receives and unexpected event diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 26bb596a18..336b692407 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -218,9 +218,9 @@ impl Default for LeafChainTraversalOutcome { /// [HotStuff](https://arxiv.org/pdf/1803.05069) section 5: /// /// > When a node b* carries a QC that refers to a direct parent, i.e., b*.justify.node = b*.parent, -/// we say that it forms a One-Chain. Denote by b'' = b*.justify.node. Node b* forms a Two-Chain, -/// if in addition to forming a One-Chain, b''.justify.node = b''.parent. -/// It forms a Three-Chain, if b'' forms a Two-Chain. +/// > we say that it forms a One-Chain. Denote by b'' = b*.justify.node. Node b* forms a Two-Chain, +/// > if in addition to forming a One-Chain, b''.justify.node = b''.parent. +/// > It forms a Three-Chain, if b'' forms a Two-Chain. /// /// We follow this exact logic to determine if we are able to reach a commit and a decide. A commit /// is reached when we have a two chain, and a decide is reached when we have a three chain. diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 82473f3d34..79a11a7502 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -57,3 +57,8 @@ tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "allow", check-cfg = [ + 'cfg(async_executor_impl, values("tokio", "async-std"))', +] } \ No newline at end of file diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 43861fcbf2..20d513dc2a 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -8,7 +8,7 @@ #![cfg_attr( // hotshot_example option is set manually in justfile when running examples - not(any(test, debug_assertions, hotshot_example)), + not(any(test, debug_assertions)), deprecated = "suspicious usage of testing/demo implementations in non-test/non-debug build" )] diff --git a/types/Cargo.toml b/types/Cargo.toml index e9ad6368b5..9575be6b33 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -48,7 +48,7 @@ serde_bytes = { workspace = true } tagged-base64 = { workspace = true } vbs = { workspace = true } displaydoc = { version = "0.2.5", default-features = false } -dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } +dyn-clone = "1.0.17" url = { workspace = true } utils = { path = "../utils" } vec1 = { workspace = true } diff --git a/types/src/data.rs b/types/src/data.rs index 71b682aa19..38092e0969 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -373,7 +373,7 @@ pub struct QuorumProposal { /// Possible timeout or view sync certificate. /// - A timeout certificate is only present if the justify_qc is not for the preceding view /// - A view sync certificate is only present if the justify_qc and timeout_cert are not - /// present. + /// present. pub proposal_certificate: Option>, } @@ -817,7 +817,7 @@ pub mod null_block { #[memoize(SharedCache, Capacity: 10)] #[must_use] pub fn commitment(num_storage_nodes: usize) -> Option { - let vid_result = vid_scheme(num_storage_nodes).commit_only(&Vec::new()); + let vid_result = vid_scheme(num_storage_nodes).commit_only(Vec::new()); match vid_result { Ok(r) => Some(r), diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index ce93502361..9c951bb8d0 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -41,7 +41,8 @@ pub trait StateDelta: /// * The type of block that modifies this type of state ([`BlockPayload`](`ValidatedStates:: /// BlockPayload`)) /// * The ability to validate that a block header is actually a valid extension of this state and -/// produce a new state, with the modifications from the block applied +/// produce a new state, with the modifications from the block applied +/// /// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header)) pub trait ValidatedState: Serialize + DeserializeOwned + Debug + Default + PartialEq + Eq + Send + Sync + Clone diff --git a/types/src/vid.rs b/types/src/vid.rs index 7d3bfb21a7..b36eaf2020 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -322,6 +322,7 @@ impl Precomputable for VidSchemeType { /// Foreign type rules prevent us from doing: /// - `impl From> for VidDisperse` /// - `impl VidDisperse {...}` +/// /// and similarly for `Statement`. /// Thus, we accomplish type conversion via functions. fn vid_disperse_conversion(vid_disperse: VidDisperse) -> VidDisperse { From c90ef540e47243c6dcf49df38a1da164002fc76a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 5 Nov 2024 14:45:15 -0500 Subject: [PATCH 1284/1393] View Change Fix (#3795) * move storage to end of proposal processing * don't fetch proposal so until we really need it * whoops wrong fetch * fix test * spawn fetch proposal * revert test * Kill old network tasks * check view everywhere * prepare to spawn validation in task * fmt and lint * fixes after merge * fix off by ones * fmt * remove dead da code, off by one fix * remove some checks * move view change event to same spot * actually spawn a new timeout task? * view numbers are hard * log emit view change earlier * fix test with failures * give network tasks one more view * cancel timeout task at the end, let vote live longer * Try not cancelling newtork tasks * one extra view for Request tasks * view - 1 * fire and forget * less cancel * Sapwn vid task off by 1 fix * ignore view delay test b/c it doesn't make sense * don't update view until proposal validated * don't double spawn timeout * proper view numbers * logging * some fixes * test fixes * fix * view change is the view of current proposal * revert logging * fix tests * fix txn task view num * pr comments * Send view change on epoch voting --- hotshot/src/tasks/mod.rs | 2 + hotshot/src/tasks/task_state.rs | 6 - .../src/traits/networking/push_cdn_network.rs | 11 ++ task-impls/src/consensus/handlers.rs | 17 +- task-impls/src/consensus/mod.rs | 12 +- task-impls/src/da.rs | 9 - task-impls/src/helpers.rs | 177 ++++-------------- task-impls/src/network.rs | 56 ++++-- task-impls/src/quorum_proposal/mod.rs | 4 - .../src/quorum_proposal_recv/handlers.rs | 68 +++---- task-impls/src/quorum_proposal_recv/mod.rs | 104 ++++++---- task-impls/src/quorum_vote/mod.rs | 14 +- task-impls/src/transactions.rs | 40 +--- task-impls/src/view_sync.rs | 5 +- testing/src/byzantine/byzantine_behaviour.rs | 3 +- testing/src/overall_safety_task.rs | 1 + testing/src/txn_task.rs | 1 - testing/tests/tests_1/network_task.rs | 6 + testing/tests/tests_1/quorum_vote_task.rs | 1 + testing/tests/tests_1/transaction_task.rs | 1 + .../tests/tests_1/upgrade_task_with_vote.rs | 4 + testing/tests/tests_1/view_sync_task.rs | 2 +- .../tests/tests_1/vote_dependency_handle.rs | 1 + testing/tests/tests_4/byzantine_tests.rs | 112 +++++------ testing/tests/tests_4/test_with_failures_f.rs | 2 +- 25 files changed, 310 insertions(+), 349 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 3c61f915f3..645b0b06d8 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -8,6 +8,7 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; +use std::collections::BTreeMap; use std::{fmt::Debug, sync::Arc, time::Duration}; use async_broadcast::{broadcast, RecvError}; @@ -200,6 +201,7 @@ pub fn add_network_event_task< storage: Arc::clone(&handle.storage()), consensus: Arc::clone(&handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), + transmit_tasks: BTreeMap::new(), }; let task = Task::new( network_state, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index a2de31fc2c..1a9065692d 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -285,18 +285,12 @@ impl, V: Versions> CreateTaskState private_key: handle.private_key().clone(), consensus: OuterConsensus::new(consensus), cur_view: handle.cur_view().await, - cur_view_time: Utc::now().timestamp(), cur_epoch: handle.cur_epoch().await, - network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - timeout_task: async_spawn(async {}), timeout: handle.hotshot.config.next_view_timeout, output_event_stream: handle.hotshot.external_event_stream.0.clone(), storage: Arc::clone(&handle.storage), - proposal_cert: None, spawned_tasks: BTreeMap::new(), - instance_state: handle.hotshot.instance_state(), id: handle.hotshot.id, upgrade_lock: handle.hotshot.upgrade_lock.clone(), } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 2244be517a..a0a4799c4a 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -470,6 +470,11 @@ impl ConnectedNetwork for PushCdnNetwork { topic: HotShotTopic, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { + // If we're paused, don't send the message + #[cfg(feature = "hotshot-testing")] + if self.is_paused.load(Ordering::Relaxed) { + return Ok(()); + } self.broadcast_message(message, topic.into()) .await .inspect_err(|_e| { @@ -488,6 +493,11 @@ impl ConnectedNetwork for PushCdnNetwork { _recipients: Vec, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { + // If we're paused, don't send the message + #[cfg(feature = "hotshot-testing")] + if self.is_paused.load(Ordering::Relaxed) { + return Ok(()); + } self.broadcast_message(message, Topic::Da) .await .inspect_err(|_e| { @@ -533,6 +543,7 @@ impl ConnectedNetwork for PushCdnNetwork { // If we're paused, receive but don't process messages #[cfg(feature = "hotshot-testing")] if self.is_paused.load(Ordering::Relaxed) { + async_sleep(Duration::from_millis(100)).await; return Ok(vec![]); } diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index c1edf60bc7..02e65ba161 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -126,9 +126,18 @@ pub(crate) async fn handle_view_change< let old_view_number = task_state.cur_view; tracing::debug!("Updating view from {old_view_number:?} to {new_view_number:?}"); + if *old_view_number / 100 != *new_view_number / 100 { + tracing::info!("Progress: entered view {:>6}", *new_view_number); + } // Move this node to the next view task_state.cur_view = new_view_number; + task_state + .consensus + .write() + .await + .update_view(new_view_number)?; + // If we have a decided upgrade certificate, the protocol version may also have been upgraded. let decided_upgrade_certificate_read = task_state .upgrade_lock @@ -149,9 +158,7 @@ pub(crate) async fn handle_view_change< let timeout = task_state.timeout; let new_timeout_task = async_spawn({ let stream = sender.clone(); - // Nuance: We timeout on the view + 1 here because that means that we have - // not seen evidence to transition to this new view - let view_number = new_view_number + 1; + let view_number = new_view_number; async move { async_sleep(Duration::from_millis(timeout)).await; broadcast_event( @@ -223,7 +230,7 @@ pub(crate) async fn handle_timeout task_state: &mut ConsensusTaskState, ) -> Result<()> { ensure!( - task_state.cur_view < view_number, + task_state.cur_view <= view_number, "Timeout event is for an old view" ); @@ -255,7 +262,7 @@ pub(crate) async fn handle_timeout ) .await; - tracing::debug!( + tracing::error!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view_number ); diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index edff8f6078..fad629079b 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -7,6 +7,7 @@ use std::sync::Arc; use async_broadcast::{Receiver, Sender}; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; @@ -31,7 +32,7 @@ use utils::anytrace::Result; use self::handlers::{ handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, }; -use crate::{events::HotShotEvent, vote_collection::VoteCollectorsMap}; +use crate::{events::HotShotEvent, helpers::cancel_task, vote_collection::VoteCollectorsMap}; /// Event handlers for use in the `handle` method. mod handlers; @@ -170,5 +171,12 @@ impl, V: Versions> TaskState } /// Joins all subtasks. - async fn cancel_subtasks(&mut self) {} + async fn cancel_subtasks(&mut self) { + // Cancel the old timeout task + cancel_task(std::mem::replace( + &mut self.timeout_task, + async_spawn(async {}), + )) + .await; + } } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 557895e95d..49c6cc1890 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -309,15 +309,6 @@ impl, V: Versions> DaTaskState { let PackedBundle:: { diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 336b692407..26aca6543f 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -4,22 +4,20 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use core::time::Duration; use std::{ collections::{HashMap, HashSet}, sync::Arc, }; use async_broadcast::{InactiveReceiver, Receiver, SendError, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; +use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use chrono::Utc; use committable::{Commitment, Committable}; use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ - consensus::{ConsensusUpgradableReadLockGuard, OuterConsensus}, + consensus::OuterConsensus, data::{Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, message::{Proposal, UpgradeLock}, @@ -28,7 +26,7 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, + node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, BlockPayload, ValidatedState, }, @@ -40,10 +38,7 @@ use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::*; -use crate::{ - events::HotShotEvent, quorum_proposal_recv::QuorumProposalRecvTaskState, - request::REQUEST_TIMEOUT, -}; +use crate::{events::HotShotEvent, quorum_proposal_recv::ValidationInfo, request::REQUEST_TIMEOUT}; /// Trigger a request to the network for a proposal for a view and wait for the response or timeout. #[instrument(skip_all)] @@ -123,7 +118,6 @@ pub(crate) async fn fetch_proposal( return None; } } - proposal }) .await @@ -436,7 +430,7 @@ pub(crate) async fn parent_leaf_and_state( /// # Errors /// If any validation or state update fails. #[allow(clippy::too_many_lines)] -#[instrument(skip_all, fields(id = task_state.id, view = *proposal.data.view_number()))] +#[instrument(skip_all, fields(id = validation_info.id, view = *proposal.data.view_number()))] pub async fn validate_proposal_safety_and_liveness< TYPES: NodeType, I: NodeImplementation, @@ -444,7 +438,7 @@ pub async fn validate_proposal_safety_and_liveness< >( proposal: Proposal>, parent_leaf: Leaf, - task_state: &mut QuorumProposalRecvTaskState, + validation_info: &ValidationInfo, event_stream: Sender>>, sender: TYPES::SignatureKey, ) -> Result<()> { @@ -452,7 +446,8 @@ pub async fn validate_proposal_safety_and_liveness< let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); ensure!( - proposed_leaf.parent_commitment() == parent_leaf.commit(&task_state.upgrade_lock).await, + proposed_leaf.parent_commitment() + == parent_leaf.commit(&validation_info.upgrade_lock).await, "Proposed leaf does not extend the parent leaf." ); @@ -461,19 +456,19 @@ pub async fn validate_proposal_safety_and_liveness< ); let view = View { view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(&task_state.upgrade_lock).await, + leaf: proposed_leaf.commit(&validation_info.upgrade_lock).await, state, delta: None, // May be updated to `Some` in the vote task. }, }; { - let mut consensus_writer = task_state.consensus.write().await; + let mut consensus_writer = validation_info.consensus.write().await; if let Err(e) = consensus_writer.update_validated_state_map(view_number, view.clone()) { tracing::trace!("{e:?}"); } consensus_writer - .update_saved_leaves(proposed_leaf.clone(), &task_state.upgrade_lock) + .update_saved_leaves(proposed_leaf.clone(), &validation_info.upgrade_lock) .await; // Update our internal storage of the proposal. The proposal is valid, so @@ -490,12 +485,12 @@ pub async fn validate_proposal_safety_and_liveness< ) .await; - let cur_epoch = task_state.cur_epoch; + let cur_epoch = validation_info.cur_epoch; UpgradeCertificate::validate( &proposal.data.upgrade_certificate, - &task_state.quorum_membership, + &validation_info.quorum_membership, cur_epoch, - &task_state.upgrade_lock, + &validation_info.upgrade_lock, ) .await?; @@ -503,7 +498,7 @@ pub async fn validate_proposal_safety_and_liveness< proposed_leaf .extends_upgrade( &parent_leaf, - &task_state.upgrade_lock.decided_upgrade_certificate, + &validation_info.upgrade_lock.decided_upgrade_certificate, ) .await?; @@ -513,7 +508,7 @@ pub async fn validate_proposal_safety_and_liveness< // Liveness check. { - let consensus_reader = task_state.consensus.read().await; + let consensus_reader = validation_info.consensus.read().await; let liveness_check = justify_qc.view_number() > consensus_reader.locked_view(); // Safety check. @@ -537,7 +532,7 @@ pub async fn validate_proposal_safety_and_liveness< view_number, event: EventType::Error { error: Arc::new(e) }, }, - &task_state.output_event_stream, + &validation_info.output_event_stream, ) .await; } @@ -555,7 +550,7 @@ pub async fn validate_proposal_safety_and_liveness< sender, }, }, - &task_state.output_event_stream, + &validation_info.output_event_stream, ) .await; @@ -578,17 +573,17 @@ pub async fn validate_proposal_safety_and_liveness< /// /// # Errors /// If any validation or view number check fails. -pub async fn validate_proposal_view_and_certs< +pub(crate) async fn validate_proposal_view_and_certs< TYPES: NodeType, I: NodeImplementation, V: Versions, >( proposal: &Proposal>, - task_state: &mut QuorumProposalRecvTaskState, + validation_info: &ValidationInfo, ) -> Result<()> { let view_number = proposal.data.view_number(); ensure!( - view_number >= task_state.cur_view, + view_number >= validation_info.consensus.read().await.cur_view(), "Proposal is from an older view {:?}", proposal.data.clone() ); @@ -596,9 +591,9 @@ pub async fn validate_proposal_view_and_certs< // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment proposal .validate_signature( - &task_state.quorum_membership, - task_state.cur_epoch, - &task_state.upgrade_lock, + &validation_info.quorum_membership, + validation_info.cur_epoch, + &validation_info.upgrade_lock, ) .await?; @@ -620,9 +615,9 @@ pub async fn validate_proposal_view_and_certs< ensure!( timeout_cert .is_valid_cert( - task_state.timeout_membership.as_ref(), - task_state.cur_epoch, - &task_state.upgrade_lock + validation_info.quorum_membership.as_ref(), + validation_info.cur_epoch, + &validation_info.upgrade_lock ) .await, "Timeout certificate for view {} was invalid", @@ -641,9 +636,9 @@ pub async fn validate_proposal_view_and_certs< ensure!( view_sync_cert .is_valid_cert( - task_state.quorum_membership.as_ref(), - task_state.cur_epoch, - &task_state.upgrade_lock + validation_info.quorum_membership.as_ref(), + validation_info.cur_epoch, + &validation_info.upgrade_lock ) .await, "Invalid view sync finalize cert provided" @@ -656,121 +651,15 @@ pub async fn validate_proposal_view_and_certs< // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. UpgradeCertificate::validate( &proposal.data.upgrade_certificate, - &task_state.quorum_membership, - task_state.cur_epoch, - &task_state.upgrade_lock, + &validation_info.quorum_membership, + validation_info.cur_epoch, + &validation_info.upgrade_lock, ) .await?; Ok(()) } -/// Update the view if it actually changed, takes a mutable reference to the `cur_view` and the -/// `timeout_task` which are updated during the operation of the function. -/// -/// # Errors -/// Returns an [`utils::anytrace::Error`] when the new view is not greater than the current view. -pub(crate) async fn update_view, V: Versions>( - new_view: TYPES::View, - event_stream: &Sender>>, - task_state: &mut QuorumProposalRecvTaskState, -) -> Result<()> { - ensure!( - new_view > task_state.cur_view, - "New view is not greater than our current view" - ); - - let is_old_view_leader = task_state - .quorum_membership - .leader(task_state.cur_view, task_state.cur_epoch)? - == task_state.public_key; - let old_view = task_state.cur_view; - - tracing::debug!("Updating view from {} to {}", *old_view, *new_view); - - if *old_view / 100 != *new_view / 100 { - tracing::info!("Progress: entered view {:>6}", *new_view); - } - - task_state.cur_view = new_view; - - // The next view is just the current view + 1 - let next_view = task_state.cur_view + 1; - - futures::join! { - broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream), - broadcast_event( - Event { - view_number: old_view, - event: EventType::ViewFinished { - view_number: old_view, - }, - }, - &task_state.output_event_stream, - ) - }; - - // Spawn a timeout task if we did actually update view - let new_timeout_task = async_spawn({ - let stream = event_stream.clone(); - // Nuance: We timeout on the view + 1 here because that means that we have - // not seen evidence to transition to this new view - let view_number = next_view; - let timeout = Duration::from_millis(task_state.timeout); - async move { - async_sleep(timeout).await; - broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::View::new(*view_number))), - &stream, - ) - .await; - } - }); - - // cancel the old timeout task - cancel_task(std::mem::replace( - &mut task_state.timeout_task, - new_timeout_task, - )) - .await; - - let consensus_reader = task_state.consensus.upgradable_read().await; - consensus_reader - .metrics - .current_view - .set(usize::try_from(task_state.cur_view.u64()).unwrap()); - let new_view_time = Utc::now().timestamp(); - if is_old_view_leader { - #[allow(clippy::cast_precision_loss)] - consensus_reader - .metrics - .view_duration_as_leader - .add_point((new_view_time - task_state.cur_view_time) as f64); - } - task_state.cur_view_time = new_view_time; - - // Do the comparison before the subtraction to avoid potential overflow, since - // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(task_state.cur_view.u64()).unwrap() - > usize::try_from(consensus_reader.last_decided_view().u64()).unwrap() - { - consensus_reader - .metrics - .number_of_views_since_last_decide - .set( - usize::try_from(task_state.cur_view.u64()).unwrap() - - usize::try_from(consensus_reader.last_decided_view().u64()).unwrap(), - ); - } - let mut consensus_writer = ConsensusUpgradableReadLockGuard::upgrade(consensus_reader).await; - if let Err(e) = consensus_writer.update_view(new_view) { - tracing::trace!("{e:?}"); - } - tracing::trace!("View updated successfully"); - - Ok(()) -} - /// Cancel a task pub async fn cancel_task(task: JoinHandle) { #[cfg(async_executor_impl = "async-std")] diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index f9cbcff865..7fffadfb11 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -4,12 +4,22 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, +}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use async_trait::async_trait; +use futures::future::join_all; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::Consensus, @@ -30,14 +40,11 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::*; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, -}; - /// the network message task state #[derive(Clone)] pub struct NetworkMessageTaskState { @@ -203,6 +210,8 @@ pub struct NetworkEventTaskState< pub consensus: Arc>>, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + /// map view number to transmit tasks + pub transmit_tasks: BTreeMap>>, } #[async_trait] @@ -331,6 +340,18 @@ impl< } } + /// Cancel all tasks for previous views + pub fn cancel_tasks(&mut self, view: TYPES::View) { + let keep = self.transmit_tasks.split_off(&view); + let mut cancel = Vec::new(); + while let Some((_, tasks)) = self.transmit_tasks.pop_first() { + let mut to_cancel = tasks.into_iter().map(cancel_task).collect(); + cancel.append(&mut to_cancel); + } + self.transmit_tasks = keep; + async_spawn(async move { join_all(cancel).await }); + } + /// Parses a `HotShotEvent` and returns a tuple of: (sender's public key, `MessageKind`, `TransmitType`) /// which will be used to create a message and transmit on the wire. /// Returns `None` if the parsing result should not be sent on the wire. @@ -584,13 +605,14 @@ impl< } HotShotEvent::ViewChange(view) => { self.view = view; - self.network - .update_view::( - self.view.u64(), - self.epoch.u64(), - &self.quorum_membership, - ) - .await; + self.cancel_tasks(view); + let net = Arc::clone(&self.network); + let epoch = self.epoch.u64(); + let mem = self.quorum_membership.clone(); + async_spawn(async move { + net.update_view::(view.saturating_sub(1), epoch, &mem) + .await; + }); None } HotShotEvent::VidRequestSend(req, sender, to) => Some(( @@ -614,7 +636,7 @@ impl< /// Creates a network message and spawns a task that transmits it on the wire. fn spawn_transmit_task( - &self, + &mut self, message_kind: MessageKind, maybe_action: Option, transmit: TransmitType, @@ -640,7 +662,7 @@ impl< let storage = Arc::clone(&self.storage); let consensus = Arc::clone(&self.consensus); let upgrade_lock = self.upgrade_lock.clone(); - async_spawn(async move { + let handle = async_spawn(async move { if NetworkEventTaskState::::maybe_record_action( maybe_action, Arc::clone(&storage), @@ -694,6 +716,10 @@ impl< Err(e) => tracing::warn!("Failed to send message task: {:?}", e), } }); + self.transmit_tasks + .entry(view_number) + .or_default() + .push(handle); } } diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index ebce0305ca..1ac07e6abc 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -378,7 +378,6 @@ impl, V: Versions> "Upgrade certificate received for view {}!", *cert.view_number ); - // Update our current upgrade_cert as long as we still have a chance of reaching a decide on it in time. if cert.data.decide_by >= self.latest_proposed_view + 3 { tracing::debug!("Updating current formed_upgrade_certificate"); @@ -390,7 +389,6 @@ impl, V: Versions> either::Right(timeout_cert) => { let view_number = timeout_cert.view_number + 1; let epoch_number = self.consensus.read().await.cur_epoch(); - self.create_dependency_task_if_new( view_number, epoch_number, @@ -461,7 +459,6 @@ impl, V: Versions> } HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { let view_number = proposal.data.view_number(); - // All nodes get the latest proposed view as a proxy of `cur_view` of old. if !self.update_latest_proposed_view(view_number).await { tracing::trace!("Failed to update latest proposed view"); @@ -487,7 +484,6 @@ impl, V: Versions> HotShotEvent::VidDisperseSend(vid_share, _) => { let view_number = vid_share.data.view_number(); let epoch_number = self.consensus.read().await.cur_epoch(); - self.create_dependency_task_if_new( view_number, epoch_number, diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 488854e95d..d1fbac8a30 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -30,11 +30,11 @@ use hotshot_types::{ use tracing::instrument; use utils::anytrace::*; -use super::QuorumProposalRecvTaskState; +use super::{QuorumProposalRecvTaskState, ValidationInfo}; use crate::{ events::HotShotEvent, helpers::{ - broadcast_event, fetch_proposal, update_view, validate_proposal_safety_and_liveness, + broadcast_event, fetch_proposal, validate_proposal_safety_and_liveness, validate_proposal_view_and_certs, }, quorum_proposal_recv::{UpgradeLock, Versions}, @@ -45,10 +45,10 @@ use crate::{ async fn validate_proposal_liveness, V: Versions>( proposal: &Proposal>, event_sender: &Sender>>, - task_state: &mut QuorumProposalRecvTaskState, + validation_info: &ValidationInfo, ) -> Result<()> { let view_number = proposal.data.view_number(); - let mut consensus_writer = task_state.consensus.write().await; + let mut consensus_writer = validation_info.consensus.write().await; let leaf = Leaf::from_quorum_proposal(&proposal.data); @@ -57,7 +57,7 @@ async fn validate_proposal_liveness(view_number, event_sender, task_state).await { - tracing::debug!("Liveness Branch - Failed to update view; error = {e:#}"); - } - if !liveness_check { bail!("Quorum Proposal failed the liveness check"); } @@ -153,11 +149,11 @@ pub(crate) async fn handle_quorum_proposal_recv< quorum_proposal_sender_key: &TYPES::SignatureKey, event_sender: &Sender>>, event_receiver: &Receiver>>, - task_state: &mut QuorumProposalRecvTaskState, + validation_info: ValidationInfo, ) -> Result<()> { let quorum_proposal_sender_key = quorum_proposal_sender_key.clone(); - validate_proposal_view_and_certs(proposal, task_state) + validate_proposal_view_and_certs(proposal, &validation_info) .await .context(warn!("Failed to validate proposal view or attached certs"))?; @@ -166,13 +162,13 @@ pub(crate) async fn handle_quorum_proposal_recv< if !justify_qc .is_valid_cert( - task_state.quorum_membership.as_ref(), - task_state.cur_epoch, - &task_state.upgrade_lock, + validation_info.quorum_membership.as_ref(), + validation_info.cur_epoch, + &validation_info.upgrade_lock, ) .await { - let consensus_reader = task_state.consensus.read().await; + let consensus_reader = validation_info.consensus.read().await; consensus_reader.metrics.invalid_qc.update(1); bail!("Invalid justify_qc in proposal for view {}", *view_number); } @@ -186,7 +182,7 @@ pub(crate) async fn handle_quorum_proposal_recv< .await; // Get the parent leaf and state. - let parent_leaf = task_state + let parent_leaf = validation_info .consensus .read() .await @@ -199,17 +195,17 @@ pub(crate) async fn handle_quorum_proposal_recv< justify_qc.view_number(), event_sender.clone(), event_receiver.clone(), - Arc::clone(&task_state.quorum_membership), - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + Arc::clone(&validation_info.quorum_membership), + OuterConsensus::new(Arc::clone(&validation_info.consensus.inner_consensus)), // Note that we explicitly use the node key here instead of the provided key in the signature. // This is because the key that we receive is for the prior leader, so the payload would be routed // incorrectly. - task_state.public_key.clone(), - task_state.private_key.clone(), - task_state.upgrade_lock.clone(), + validation_info.public_key.clone(), + validation_info.private_key.clone(), + validation_info.upgrade_lock.clone(), ); } - let consensus_reader = task_state.consensus.read().await; + let consensus_reader = validation_info.consensus.read().await; let parent = match parent_leaf { Some(leaf) => { @@ -223,7 +219,7 @@ pub(crate) async fn handle_quorum_proposal_recv< }; if justify_qc.view_number() > consensus_reader.high_qc().view_number { - if let Err(e) = task_state + if let Err(e) = validation_info .storage .write() .await @@ -235,7 +231,7 @@ pub(crate) async fn handle_quorum_proposal_recv< } drop(consensus_reader); - let mut consensus_writer = task_state.consensus.write().await; + let mut consensus_writer = validation_info.consensus.write().await; if let Err(e) = consensus_writer.update_high_qc(justify_qc.clone()) { tracing::trace!("{e:?}"); } @@ -252,23 +248,29 @@ pub(crate) async fn handle_quorum_proposal_recv< "Proposal's parent missing from storage with commitment: {:?}", justify_qc.data.leaf_commit ); - return validate_proposal_liveness(proposal, event_sender, task_state).await; + validate_proposal_liveness(proposal, event_sender, &validation_info).await?; + broadcast_event( + Arc::new(HotShotEvent::ViewChange(view_number)), + event_sender, + ) + .await; + return Ok(()); }; // Validate the proposal validate_proposal_safety_and_liveness::( proposal.clone(), parent_leaf, - task_state, + &validation_info, event_sender.clone(), quorum_proposal_sender_key, ) .await?; - - // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here - if let Err(e) = update_view::(view_number, event_sender, task_state).await { - tracing::debug!("Full Branch - Failed to update view; error = {e:#}"); - } + broadcast_event( + Arc::new(HotShotEvent::ViewChange(view_number)), + event_sender, + ) + .await; Ok(()) } diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 030dc1295c..1cff840bf2 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -9,6 +9,7 @@ use std::{collections::BTreeMap, sync::Arc}; use async_broadcast::{broadcast, Receiver, Sender}; +use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; @@ -38,7 +39,7 @@ use crate::{ events::{HotShotEvent, ProposalMissing}, helpers::{broadcast_event, cancel_task, parent_leaf_and_state}, }; - +use hotshot_types::traits::node_implementation::ConsensusTime; /// Event handlers for this task. mod handlers; @@ -57,24 +58,12 @@ pub struct QuorumProposalRecvTaskState, - /// Membership for Quorum Certs/votes pub quorum_membership: Arc, - /// Membership for Timeout votes/certs - pub timeout_membership: Arc, - - /// timeout task handle - pub timeout_task: JoinHandle<()>, - /// View timeout from config. pub timeout: u64, @@ -84,16 +73,10 @@ pub struct QuorumProposalRecvTaskState>, - /// last View Sync Certificate or Timeout Certificate this node formed. - pub proposal_cert: Option>, - /// Spawned tasks related to a specific view, so we can cancel them when /// they are stale pub spawned_tasks: BTreeMap>>, - /// Immutable instance state - pub instance_state: Arc, - /// The node's id pub id: u64, @@ -101,11 +84,34 @@ pub struct QuorumProposalRecvTaskState, } +/// all the info we need to validate a proposal. This makes it easy to spawn an effemeral task to +/// do all the proposal validation without blocking the long running one +pub(crate) struct ValidationInfo, V: Versions> { + /// The node's id + pub id: u64, + /// Our public key + pub(crate) public_key: TYPES::SignatureKey, + /// Our Private Key + pub(crate) private_key: ::PrivateKey, + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::Epoch, + /// Reference to consensus. The replica will require a write lock on this. + pub(crate) consensus: OuterConsensus, + /// Membership for Quorum Certs/votes + pub quorum_membership: Arc, + /// Output events to application + pub output_event_stream: async_broadcast::Sender>, + /// This node's storage ref + pub(crate) storage: Arc>, + /// Lock for a decided upgrade + pub(crate) upgrade_lock: UpgradeLock, +} + impl, V: Versions> QuorumProposalRecvTaskState { /// Cancel all tasks the consensus tasks has spawned before the given view - pub async fn cancel_tasks(&mut self, view: TYPES::View) { + pub fn cancel_tasks(&mut self, view: TYPES::View) { let keep = self.spawned_tasks.split_off(&view); let mut cancel = Vec::new(); while let Some((_, tasks)) = self.spawned_tasks.pop_first() { @@ -113,7 +119,7 @@ impl, V: Versions> cancel.append(&mut to_cancel); } self.spawned_tasks = keep; - join_all(cancel).await; + async_spawn(async move { join_all(cancel).await }); } /// Handles all consensus events relating to propose and vote-enabling events. @@ -125,21 +131,51 @@ impl, V: Versions> event_sender: Sender>>, event_receiver: Receiver>>, ) { - if let HotShotEvent::QuorumProposalRecv(proposal, sender) = event.as_ref() { - match handle_quorum_proposal_recv( - proposal, - sender, - &event_sender, - &event_receiver, - self, - ) - .await - { - Ok(()) => { - self.cancel_tasks(proposal.data.view_number()).await; + match event.as_ref() { + HotShotEvent::QuorumProposalRecv(proposal, sender) => { + if self.consensus.read().await.cur_view() > proposal.data.view_number() + || self.cur_view > proposal.data.view_number() + { + tracing::error!("Throwing away old proposal"); + return; + } + let validation_info = ValidationInfo:: { + id: self.id, + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), + cur_epoch: self.cur_epoch, + consensus: self.consensus.clone(), + quorum_membership: Arc::clone(&self.quorum_membership), + output_event_stream: self.output_event_stream.clone(), + storage: Arc::clone(&self.storage), + upgrade_lock: self.upgrade_lock.clone(), + }; + match handle_quorum_proposal_recv( + proposal, + sender, + &event_sender, + &event_receiver, + validation_info, + ) + .await + { + Ok(()) => {} + Err(e) => debug!(?e, "Failed to validate the proposal"), + } + } + HotShotEvent::ViewChange(view) => { + if self.cur_view >= *view { + return; } - Err(e) => debug!(?e, "Failed to validate the proposal"), + self.cur_view = *view; + // cancel task for any view 2 views prior or more. The view here is the oldest + // view we want to KEEP tasks for. We keep the view prior to this because + // we might still be processing the proposal from view V which caused us + // to enter view V + 1. + let oldest_view_to_keep = TYPES::View::new(view.saturating_sub(1)); + self.cancel_tasks(oldest_view_to_keep); } + _ => {} } } } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 70a4c22aa0..a3a58e3874 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -178,7 +178,11 @@ impl + 'static, V: Versions> Handl &self.sender, ) .await; - + broadcast_event( + Arc::new(HotShotEvent::ViewChange(self.view_number + 1)), + &self.sender, + ) + .await; let Some(vid_share) = vid_share else { tracing::error!( "We don't have the VID share for this view {:?}, but we should, because the vote dependencies have completed.", @@ -573,8 +577,9 @@ impl, V: Versions> QuorumVoteTaskS } } HotShotEvent::Timeout(view) => { + let view = TYPES::View::new(view.saturating_sub(1)); // cancel old tasks - let current_tasks = self.vote_dependencies.split_off(view); + let current_tasks = self.vote_dependencies.split_off(&view); while let Some((_, task)) = self.vote_dependencies.pop_last() { cancel_task(task).await; } @@ -656,6 +661,11 @@ impl, V: Versions> QuorumVoteTaskS &event_sender, ) .await; + broadcast_event( + Arc::new(HotShotEvent::ViewChange(proposal.data.view_number() + 1)), + &event_sender, + ) + .await; // Update internal state if let Err(e) = update_shared_state::( diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 62290e7449..cac4b9ad06 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -470,41 +470,17 @@ impl, V: Versions> TransactionTask .await; } HotShotEvent::ViewChange(view) => { - let view = *view; - - tracing::debug!("view change in transactions to view {:?}", view); - ensure!( - *view > *self.cur_view || *self.cur_view == 0, - debug!( - "Received a view change to an older view: tried to change view to {:?} though we are at view {:?}", view, self.cur_view - ) - ); - - let mut make_block = false; - if *view - *self.cur_view > 1 { - tracing::info!("View changed by more than 1 going to view {:?}", view); - make_block = self.membership.leader(view, self.cur_epoch)? == self.public_key; - } - self.cur_view = view; - - let next_view = self.cur_view + 1; - let next_leader = - self.membership.leader(next_view, self.cur_epoch)? == self.public_key; - + let view = TYPES::View::new(std::cmp::max(1, **view)); ensure!( - make_block || next_leader, + *view > *self.cur_view, debug!( - "Not making the block because we are not leader for view {:?}", - self.cur_view + "Received a view change to an older view: tried to change view to {:?} though we are at view {:?}", view, self.cur_view ) ); - - if make_block { - self.handle_view_change(&event_stream, self.cur_view).await; - } - - if next_leader { - self.handle_view_change(&event_stream, next_view).await; + self.cur_view = view; + if self.membership.leader(view, self.cur_epoch)? == self.public_key { + self.handle_view_change(&event_stream, view).await; + return Ok(()); } } _ => {} @@ -748,7 +724,7 @@ impl, V: Versions> TransactionTask ) { Ok(request_signature) => request_signature, Err(err) => { - tracing::warn!(%err, "Failed to sign block hash"); + tracing::error!(%err, "Failed to sign block hash"); continue; } }; diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 28dfe9d935..2172d00d41 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -439,7 +439,6 @@ impl, V: Versions> ViewSyncTaskSta // Garbage collect old tasks // We could put this into a separate async task, but that would require making several fields on ViewSyncTaskState thread-safe and harm readability. In the common case this will have zero tasks to clean up. - // cancel poll for votes // run GC for i in *self.last_garbage_collected_view..*self.cur_view { self.replica_task_map @@ -466,7 +465,7 @@ impl, V: Versions> ViewSyncTaskSta &HotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it ensure!( - view_number > self.cur_view, + view_number >= self.cur_view, debug!("Discarding old timeout vote.") ); @@ -495,7 +494,7 @@ impl, V: Versions> ViewSyncTaskSta .await; } else { // If this is the first timeout we've seen advance to the next view - self.cur_view = view_number; + self.cur_view = view_number + 1; broadcast_event( Arc::new(HotShotEvent::ViewChange(TYPES::View::new(*self.cur_view))), &event_stream, diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index 30233dd269..e21580a3a5 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -1,5 +1,5 @@ use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, sync::Arc, }; @@ -351,6 +351,7 @@ impl + std::fmt::Debug, V: Version storage: Arc::clone(&handle.storage()), consensus: Arc::clone(&handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), + transmit_tasks: BTreeMap::new(), }; let modified_network_state = NetworkEventTaskStateModifier { network_event_task_state: network_state, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 979c2c2a04..ba7136c90f 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -498,6 +498,7 @@ impl RoundResult { } if let Some((n_txn, _)) = self.num_txns_map.iter().last() { if *n_txn < transaction_threshold { + tracing::error!("not enough transactions for view {:?}", key.view_number()); self.status = ViewStatus::Failed; return; } diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index 8f363847d0..03fa0be103 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -38,7 +38,6 @@ pub struct TxnTask, V: Ver impl, V: Versions> TxnTask { pub fn run(mut self) -> JoinHandle<()> { async_spawn(async move { - async_sleep(Duration::from_millis(100)).await; loop { async_sleep(self.duration).await; if let Ok(TestEvent::Shutdown) = self.shutdown_chan.try_recv() { diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 08e4d7f88e..2771d6993a 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -33,6 +33,8 @@ use hotshot_types::{ #[cfg_attr(async_executor_impl = "async-std", async_std::test)] #[allow(clippy::too_many_lines)] async fn test_network_task() { + use std::collections::BTreeMap; + use futures::StreamExt; use hotshot_types::traits::network::Topic; @@ -69,6 +71,7 @@ async fn test_network_task() { upgrade_lock: upgrade_lock.clone(), storage, consensus, + transmit_tasks: BTreeMap::new(), }; let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); @@ -214,6 +217,8 @@ async fn test_network_external_mnessages() { #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_network_storage_fail() { + use std::collections::BTreeMap; + use futures::StreamExt; use hotshot_types::traits::network::Topic; @@ -250,6 +255,7 @@ async fn test_network_storage_fail() { upgrade_lock: upgrade_lock.clone(), storage, consensus, + transmit_tasks: BTreeMap::new(), }; let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 3030a1aea2..b167910684 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -84,6 +84,7 @@ async fn test_quorum_vote_task_success() { exact(DaCertificateValidated(dacs[1].clone())), exact(VidShareValidated(vids[1].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), + exact(ViewChange(ViewNumber::new(3))), validated_state_updated(), quorum_vote_send(), ])]; diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index 04cd0d528e..b9834b98c8 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -36,6 +36,7 @@ async fn test_transaction_task_leader_two_views_in_a_row() { let current_view = ViewNumber::new(4); input.push(HotShotEvent::ViewChange(current_view)); + input.push(HotShotEvent::ViewChange(current_view + 1)); input.push(HotShotEvent::Shutdown); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 212ca114bb..658a10c386 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -138,6 +138,7 @@ async fn test_upgrade_task_with_vote() { exact(DaCertificateValidated(dacs[1].clone())), exact(VidShareValidated(vids[1].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), + exact(ViewChange(ViewNumber::new(3))), validated_state_updated(), quorum_vote_send(), ]), @@ -147,6 +148,7 @@ async fn test_upgrade_task_with_vote() { exact(DaCertificateValidated(dacs[2].clone())), exact(VidShareValidated(vids[2].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(3))), + exact(ViewChange(ViewNumber::new(4))), validated_state_updated(), quorum_vote_send(), ], @@ -160,6 +162,7 @@ async fn test_upgrade_task_with_vote() { exact(DaCertificateValidated(dacs[3].clone())), exact(VidShareValidated(vids[3].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(4))), + exact(ViewChange(ViewNumber::new(5))), validated_state_updated(), quorum_vote_send(), ], @@ -173,6 +176,7 @@ async fn test_upgrade_task_with_vote() { exact(DaCertificateValidated(dacs[4].clone())), exact(VidShareValidated(vids[4].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(5))), + exact(ViewChange(ViewNumber::new(6))), validated_state_updated(), quorum_vote_send(), ], diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index 5b2a9bf2d3..145885a657 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -51,7 +51,7 @@ async fn test_view_sync_task() { input.push(HotShotEvent::Shutdown); - output.push(HotShotEvent::ViewChange(ViewNumber::new(2))); + output.push(HotShotEvent::ViewChange(ViewNumber::new(3))); output.push(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone())); let view_sync_state = diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 085e37862a..742ba10f53 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -80,6 +80,7 @@ async fn test_vote_dependency_handle() { // The outputs are static here, but we re-make them since we use `into_iter` below let outputs = vec![ exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), + exact(ViewChange(ViewNumber::new(3))), validated_state_updated(), quorum_vote_send(), ]; diff --git a/testing/tests/tests_4/byzantine_tests.rs b/testing/tests/tests_4/byzantine_tests.rs index 8b460a5665..7d89d6c870 100644 --- a/testing/tests/tests_4/byzantine_tests.rs +++ b/testing/tests/tests_4/byzantine_tests.rs @@ -1,61 +1,61 @@ -use std::{collections::HashMap, rc::Rc, time::Duration}; +// use std::{collections::HashMap, rc::Rc, time::Duration}; -use hotshot_example_types::{ - node_types::{Libp2pImpl, MarketplaceTestVersions, MemoryImpl, PushCdnImpl}, - state_types::TestTypes, -}; -use hotshot_macros::cross_tests; -use hotshot_testing::{ - block_builder::SimpleBuilderImplementation, - byzantine::byzantine_behaviour::ViewDelay, - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - test_builder::{Behaviour, TestDescription}, -}; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +// use hotshot_example_types::{ +// node_types::{Libp2pImpl, MarketplaceTestVersions, MemoryImpl, PushCdnImpl}, +// state_types::TestTypes, +// }; +// use hotshot_macros::cross_tests; +// use hotshot_testing::{ +// block_builder::SimpleBuilderImplementation, +// byzantine::byzantine_behaviour::ViewDelay, +// completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, +// test_builder::{Behaviour, TestDescription}, +// }; +// use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; -cross_tests!( - TestName: view_delay, - Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], - Versions: [MarketplaceTestVersions], - Ignore: false, - Metadata: { +// cross_tests!( +// TestName: view_delay, +// Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], +// Types: [TestTypes], +// Versions: [MarketplaceTestVersions], +// Ignore: false, +// Metadata: { - let behaviour = Rc::new(|node_id| { - let view_delay = ViewDelay { - number_of_views_to_delay: node_id/3, - events_for_view: HashMap::new(), - stop_view_delay_at_view_number: 25, - }; - match node_id { - 6|10|14 => Behaviour::Byzantine(Box::new(view_delay)), - _ => Behaviour::Standard, - } - }); +// let behaviour = Rc::new(|node_id| { +// let view_delay = ViewDelay { +// number_of_views_to_delay: node_id/3, +// events_for_view: HashMap::new(), +// stop_view_delay_at_view_number: 25, +// }; +// match node_id { +// 6|10|14 => Behaviour::Byzantine(Box::new(view_delay)), +// _ => Behaviour::Standard, +// } +// }); - let mut metadata = TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - behaviour, - ..TestDescription::default() - }; +// let mut metadata = TestDescription { +// // allow more time to pass in CI +// completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( +// TimeBasedCompletionTaskDescription { +// duration: Duration::from_secs(60), +// }, +// ), +// behaviour, +// ..TestDescription::default() +// }; - let num_nodes_with_stake = 15; - metadata.num_nodes_with_stake = num_nodes_with_stake; - metadata.da_staked_committee_size = num_nodes_with_stake; - metadata.overall_safety_properties.num_failed_views = 20; - metadata.overall_safety_properties.num_successful_views = 20; - metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ - (ViewNumber::new(6), false), - (ViewNumber::new(10), false), - (ViewNumber::new(14), false), - (ViewNumber::new(21), false), - (ViewNumber::new(25), false), - ]); - metadata - }, -); +// let num_nodes_with_stake = 15; +// metadata.num_nodes_with_stake = num_nodes_with_stake; +// metadata.da_staked_committee_size = num_nodes_with_stake; +// metadata.overall_safety_properties.num_failed_views = 20; +// metadata.overall_safety_properties.num_successful_views = 20; +// metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ +// (ViewNumber::new(6), false), +// (ViewNumber::new(10), false), +// (ViewNumber::new(14), false), +// (ViewNumber::new(21), false), +// (ViewNumber::new(25), false), +// ]); +// metadata +// }, +// ); diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 6ea256fc2d..4fd033d0af 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -23,7 +23,7 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); - metadata.overall_safety_properties.num_failed_views = 5; + metadata.overall_safety_properties.num_failed_views = 6; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 20; metadata.num_bootstrap_nodes = 14; From 69ae779d5763253437a954736d50e65153071304 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 5 Nov 2024 17:52:52 -0500 Subject: [PATCH 1285/1393] Add VidCommitment as parameter to append_da (#3846) * add vid commitment to append_da * fix --- example-types/Cargo.toml | 1 + example-types/src/storage_types.rs | 11 ++++++++--- task-impls/src/da.rs | 16 +++++++--------- types/src/traits/storage.rs | 8 +++++++- 4 files changed, 23 insertions(+), 13 deletions(-) diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index 024c48a064..e4ceb2e8c4 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -34,6 +34,7 @@ async-lock = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } hotshot-task = { path = "../task" } +jf-vid = { workspace = true } vbs = { workspace = true } url = { workspace = true } reqwest = { workspace = true } diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 2a093b1f02..77b6125c52 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -9,6 +9,7 @@ use std::{ sync::Arc, }; +use crate::testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}; use anyhow::{bail, Result}; use async_lock::RwLock; use async_trait::async_trait; @@ -23,10 +24,10 @@ use hotshot_types::{ storage::Storage, }, utils::View, + vid::VidSchemeType, vote::HasViewNumber, }; - -use crate::testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}; +use jf_vid::VidScheme; type VidShares = HashMap< ::View, @@ -117,7 +118,11 @@ impl Storage for TestStorage { Ok(()) } - async fn append_da(&self, proposal: &Proposal>) -> Result<()> { + async fn append_da( + &self, + proposal: &Proposal>, + _vid_commit: ::Commit, + ) -> Result<()> { if self.should_return_err { bail!("Failed to append VID proposal to storage"); } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 49c6cc1890..d96efcfb3d 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -183,21 +183,19 @@ impl, V: Versions> DaTaskState: Send + Sync + Clone { /// Add a proposal to the stored VID proposals. async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; /// Add a proposal to the stored DA proposals. - async fn append_da(&self, proposal: &Proposal>) -> Result<()>; + async fn append_da( + &self, + proposal: &Proposal>, + vid_commit: ::Commit, + ) -> Result<()>; /// Add a proposal we sent to the store async fn append_proposal( &self, From 9b7cc9ec41a21915b39d6592fa6da675577e66a8 Mon Sep 17 00:00:00 2001 From: leopardracer <136604165+leopardracer@users.noreply.github.com> Date: Wed, 6 Nov 2024 20:35:38 +0200 Subject: [PATCH 1286/1393] Correction of Grammatical Errors (#3853) * Update QuorumProposalRecv.md * Update README.md * Update CHANGELOG.md --------- Co-authored-by: Phil <184445976+pls148@users.noreply.github.com> --- libp2p-networking/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libp2p-networking/README.md b/libp2p-networking/README.md index 62a190398b..51afee2c44 100644 --- a/libp2p-networking/README.md +++ b/libp2p-networking/README.md @@ -55,11 +55,11 @@ ulimit -n 4096 In these tests, there are three types of nodes. `Regular` nodes that limit the number of incoming connections, `Bootstrap` nodes that allow all connections, and `Conductor` nodes that all nodes (bootstrap and regular) connect to and periodically ping with their state. This "conductor" node instructs nodes in the swarm to increment their state either via broadcast or direct messages in the same fashion as the single machine tests. -In the direct message case, the conductor will increment the state of a randomly chosen node, `i`. Then the conductor will direct message all other nodes to request node `i`'s counter and increment their counter to the value in `i`'s node. In the broadcast case, the conductor will increment the state of a randomly chose node, `i`, and tell `i` to broadcast this incremented state. +In the direct message case, the conductor will increment the state of a randomly chosen node, `i`. Then the conductor will direct message all other nodes to request node `i`'s counter and increment their counter to the value in `i`'s node. In the broadcast case, the conductor will increment the state of a randomly chosen node, `i`, and tell `i` to broadcast this incremented state. In both cases, the test terminates as successful when the conductor receives the incremented state from all other nodes. Then, the conductor sends a special "kill" message to all known nodes and waits for them to disconnect. -Metadata about the toplogy is currently read from an `identity_mapping.json` file that manually labels the type of node (bootstrap, regular, conductor). The conductor uses this to figure out information about all nodes in the network. The regular nodes use this to learn about their ip address and the addresses necessary to bootstrap onto the network. The boostrap nodes only use this to learn about their ip addresses. +Metadata about the topology is currently read from an `identity_mapping.json` file that manually labels the type of node (bootstrap, regular, conductor). The conductor uses this to figure out information about all nodes in the network. The regular nodes use this to learn about their ip address and the addresses necessary to bootstrap onto the network. The bootstrap nodes only use this to learn about their ip addresses. ### Running counter multi-machine tests @@ -75,7 +75,7 @@ nix develop -c cargo run --features webui,async-std-executor --release --example ``` ### Network Emulation -One may introduce simulated network latency via the network emulationn queueing discipline. This is implemented in two ways: on what is assumed to be a AWS EC2 instance, and in a docker container. Example usage on AWS EC2 instance: +One may introduce simulated network latency via the network emulation queueing discipline. This is implemented in two ways: on what is assumed to be a AWS EC2 instance, and in a docker container. Example usage on AWS EC2 instance: ```bash # run each line in a separate AWS instance From d97ceddc8b04808ec49975e43f4917eff6d1222f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 6 Nov 2024 13:38:11 -0500 Subject: [PATCH 1287/1393] Ignore duplicate txns via LRU cache (#3821) * Ignore duplicate txns via LRU cache * cache just in combined --- hotshot/src/tasks/mod.rs | 1 + hotshot/src/traits/networking/combined_network.rs | 7 ++----- task-impls/Cargo.toml | 1 + types/src/constants.rs | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 645b0b06d8..5d1476673d 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -119,6 +119,7 @@ pub fn add_queue_len_task, V: Vers } /// Add the network task to handle messages and publish events. +#[allow(clippy::missing_panics_doc)] pub fn add_network_message_task< TYPES: NodeType, I: NodeImplementation, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 0f8a4cdcd5..9678c54244 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -462,11 +462,8 @@ impl ConnectedNetwork for CombinedNetworks // Calculate hash of the message let message_hash = calculate_hash_of(&message); - // Check if the hash is in the cache - if !self.message_cache.read().contains(&message_hash) { - // Add the hash to the cache - self.message_cache.write().put(message_hash, ()); - + // Check if the hash is in the cache and update the cache + if self.message_cache.write().put(message_hash, ()).is_none() { break Ok(message); } } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index c9a67c0c6a..e187a96a26 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -29,6 +29,7 @@ hotshot-types = { path = "../types" } hotshot-builder-api = { path = "../builder-api" } jf-signature = { workspace = true } jf-vid = { workspace = true } +lru.workspace = true rand = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } diff --git a/types/src/constants.rs b/types/src/constants.rs index b4c8a39008..6a1969ae31 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -21,7 +21,7 @@ pub const LOOK_AHEAD: u64 = 5; pub const KAD_DEFAULT_REPUB_INTERVAL_SEC: u64 = 28800; /// the number of messages to cache in the combined network -pub const COMBINED_NETWORK_CACHE_SIZE: usize = 1000; +pub const COMBINED_NETWORK_CACHE_SIZE: usize = 200_000; /// the number of messages to attempt to send over the primary network before switching to prefer the secondary network pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; From 1935fcccd47b0401ba45cd60c6995d53a27247c5 Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Thu, 7 Nov 2024 08:41:36 -0500 Subject: [PATCH 1288/1393] chore(deps): bump `jf-signature` crate to v0.2.0 (#3862) * bump jf-signature * fix example * fix dep --- examples/infra/mod.rs | 131 +++++++++++------- .../src/traits/networking/libp2p_network.rs | 4 +- orchestrator/src/client.rs | 17 ++- testing/src/helpers.rs | 9 +- testing/src/test_builder.rs | 4 +- testing/src/test_launcher.rs | 4 +- testing/tests/tests_1/network_task.rs | 6 +- types/Cargo.toml | 2 +- types/src/hotshot_config_file.rs | 5 - types/src/lib.rs | 8 +- types/src/light_client.rs | 2 +- types/src/signature_key.rs | 16 ++- types/src/traits/signature_key.rs | 40 +++--- 13 files changed, 147 insertions(+), 101 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 8d8bf4d3b7..7fbae5db22 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -258,9 +258,6 @@ pub fn read_orchestrator_init_config() -> (NetworkConfig( config_file: &str, @@ -273,11 +270,6 @@ pub fn load_config_from_file( let mut config: NetworkConfig = config_toml.into(); - // my_own_validator_config would be best to load from file, - // but its type is too complex to load so we'll generate it from seed now. - // Also this function is only used for orchestrator initialization now, so this value doesn't matter - config.config.my_own_validator_config = - ValidatorConfig::generated_from_seed_indexed(config.seed, config.node_index, 1, true); // initialize it with size for better assignment of peers' config config.config.known_nodes_with_stake = vec![PeerConfig::default(); config.config.num_nodes_with_stake.get() as usize]; @@ -357,6 +349,7 @@ pub trait RunDa< /// Initializes networking, returns self async fn initialize_networking( config: NetworkConfig, + validator_config: ValidatorConfig, libp2p_advertise_address: Option, ) -> Self; @@ -371,10 +364,11 @@ pub trait RunDa< .expect("Couldn't generate genesis block"); let config = self.config(); + let validator_config = self.validator_config(); // Get KeyPair for certificate Aggregation - let pk = config.config.my_own_validator_config.public_key.clone(); - let sk = config.config.my_own_validator_config.private_key.clone(); + let pk = validator_config.public_key.clone(); + let sk = validator_config.private_key.clone(); let network = self.network(); @@ -600,6 +594,9 @@ pub trait RunDa< /// Returns the config for this run fn config(&self) -> NetworkConfig; + + /// Returns the validator config with private signature keys for this run. + fn validator_config(&self) -> ValidatorConfig; } // Push CDN @@ -608,6 +605,8 @@ pub trait RunDa< pub struct PushCdnDaRun { /// The underlying configuration config: NetworkConfig, + /// The private validator config + validator_config: ValidatorConfig, /// The underlying network network: PushCdnNetwork, } @@ -636,20 +635,18 @@ where { async fn initialize_networking( config: NetworkConfig, + validator_config: ValidatorConfig, _libp2p_advertise_address: Option, ) -> PushCdnDaRun { - // Get our own key - let key = config.config.my_own_validator_config.clone(); - // Convert to the Push-CDN-compatible type let keypair = KeyPair { - public_key: WrappedSignatureKey(key.public_key), - private_key: key.private_key, + public_key: WrappedSignatureKey(validator_config.public_key.clone()), + private_key: validator_config.private_key.clone(), }; // See if we should be DA, subscribe to the DA topic if so let mut topics = vec![CdnTopic::Global]; - if config.config.my_own_validator_config.is_da { + if validator_config.is_da { topics.push(CdnTopic::Da); } @@ -668,7 +665,11 @@ where // Wait for the network to be ready network.wait_for_ready().await; - PushCdnDaRun { config, network } + PushCdnDaRun { + config, + validator_config, + network, + } } fn network(&self) -> PushCdnNetwork { @@ -678,6 +679,10 @@ where fn config(&self) -> NetworkConfig { self.config.clone() } + + fn validator_config(&self) -> ValidatorConfig { + self.validator_config.clone() + } } // Libp2p @@ -686,6 +691,8 @@ where pub struct Libp2pDaRun { /// The underlying network configuration config: NetworkConfig, + /// The private validator config + validator_config: ValidatorConfig, /// The underlying network network: Libp2pNetwork, } @@ -714,12 +721,12 @@ where { async fn initialize_networking( config: NetworkConfig, + validator_config: ValidatorConfig, libp2p_advertise_address: Option, ) -> Libp2pDaRun { // Extrapolate keys for ease of use - let keys = config.clone().config.my_own_validator_config; - let public_key = keys.public_key; - let private_key = keys.private_key; + let public_key = &validator_config.public_key; + let private_key = &validator_config.private_key; // In an example, we can calculate the libp2p bind address as a function // of the advertise address. @@ -759,8 +766,8 @@ where GossipConfig::default(), RequestResponseConfig::default(), bind_address, - &public_key, - &private_key, + public_key, + private_key, Libp2pMetricsValue::default(), ) .await @@ -771,6 +778,7 @@ where Libp2pDaRun { config, + validator_config, network: libp2p_network, } } @@ -782,6 +790,10 @@ where fn config(&self) -> NetworkConfig { self.config.clone() } + + fn validator_config(&self) -> ValidatorConfig { + self.validator_config.clone() + } } // Combined network @@ -790,6 +802,8 @@ where pub struct CombinedDaRun { /// The underlying network configuration config: NetworkConfig, + /// The private validator config + validator_config: ValidatorConfig, /// The underlying network network: CombinedNetworks, } @@ -818,6 +832,7 @@ where { async fn initialize_networking( config: NetworkConfig, + validator_config: ValidatorConfig, libp2p_advertise_address: Option, ) -> CombinedDaRun { // Initialize our Libp2p network @@ -827,19 +842,24 @@ where Libp2pImpl, V, >>::initialize_networking( - config.clone(), libp2p_advertise_address.clone() + config.clone(), + validator_config.clone(), + libp2p_advertise_address.clone(), ) .await; // Initialize our CDN network - let cdn_network: PushCdnDaRun = - as RunDa< - TYPES, - PushCdnNetwork, - PushCdnImpl, - V, - >>::initialize_networking(config.clone(), libp2p_advertise_address) - .await; + let cdn_network: PushCdnDaRun = as RunDa< + TYPES, + PushCdnNetwork, + PushCdnImpl, + V, + >>::initialize_networking( + config.clone(), + validator_config.clone(), + libp2p_advertise_address, + ) + .await; // Create our combined network config let delay_duration = config @@ -852,7 +872,11 @@ where CombinedNetworks::new(cdn_network.network, libp2p_network.network, delay_duration); // Return the run configuration - CombinedDaRun { config, network } + CombinedDaRun { + config, + validator_config, + network, + } } fn network(&self) -> CombinedNetworks { @@ -862,6 +886,10 @@ where fn config(&self) -> NetworkConfig { self.config.clone() } + + fn validator_config(&self) -> ValidatorConfig { + self.validator_config.clone() + } } /// Main entry point for validators @@ -897,24 +925,22 @@ pub async fn main_entry_point< let orchestrator_client: OrchestratorClient = OrchestratorClient::new(args.url.clone()); // We assume one node will not call this twice to generate two validator_config-s with same identity. - let my_own_validator_config = - NetworkConfig::::generate_init_validator_config( - orchestrator_client - .get_node_index_for_init_validator_config() - .await, - // we assign nodes to the DA committee by default - true, - ); + let validator_config = NetworkConfig::::generate_init_validator_config( + orchestrator_client + .get_node_index_for_init_validator_config() + .await, + // we assign nodes to the DA committee by default + true, + ); // Derives our Libp2p private key from our private key, and then returns the public key of that key let libp2p_public_key = - derive_libp2p_peer_id::(&my_own_validator_config.private_key) + derive_libp2p_peer_id::(&validator_config.private_key) .expect("failed to derive Libp2p keypair"); // We need this to be able to register our node let peer_config = - PeerConfig::::to_bytes(&my_own_validator_config.public_config()) - .clone(); + PeerConfig::::to_bytes(&validator_config.public_config()).clone(); // Derive the advertise multiaddress from the supplied string let advertise_multiaddress = args.advertise_address.clone().map(|advertise_address| { @@ -928,16 +954,22 @@ pub async fn main_entry_point< // This function will be taken solely by sequencer right after OrchestratorClient::new, // which means the previous `generate_validator_config_when_init` will not be taken by sequencer, it's only for key pair generation for testing in hotshot. - let (mut run_config, source) = get_complete_config( + let (mut run_config, validator_config, source) = get_complete_config( &orchestrator_client, - my_own_validator_config, + validator_config, advertise_multiaddress, Some(libp2p_public_key), ) .await .expect("failed to get config"); - let builder_task = initialize_builder(&mut run_config, &args, &orchestrator_client).await; + let builder_task = initialize_builder( + &mut run_config, + &validator_config, + &args, + &orchestrator_client, + ) + .await; run_config.config.builder_urls = orchestrator_client .get_builder_addresses() @@ -957,7 +989,9 @@ pub async fn main_entry_point< ); info!("Initializing networking"); - let run = RUNDA::initialize_networking(run_config.clone(), args.advertise_address).await; + let run = + RUNDA::initialize_networking(run_config.clone(), validator_config, args.advertise_address) + .await; let hotshot = run.initialize_state_and_hotshot().await; if let Some(task) = builder_task { @@ -1018,6 +1052,7 @@ async fn initialize_builder< >, >( run_config: &mut NetworkConfig<::SignatureKey>, + validator_config: &ValidatorConfig<::SignatureKey>, args: &ValidatorArgs, orchestrator_client: &OrchestratorClient, ) -> Option>> @@ -1026,7 +1061,7 @@ where ::BlockPayload: TestableBlock, Leaf: TestableLeaf, { - if !run_config.config.my_own_validator_config.is_da { + if !validator_config.is_da { return None; } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index f5cbac9f50..a8bc4ff6e4 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -51,7 +51,7 @@ use hotshot_types::{ metrics::{Counter, Gauge, Metrics, NoMetrics}, network::{ConnectedNetwork, NetworkError, Topic}, node_implementation::{ConsensusTime, NodeType}, - signature_key::SignatureKey, + signature_key::{PrivateSignatureKey, SignatureKey}, }, BoxSyncFuture, }; @@ -310,7 +310,7 @@ pub fn derive_libp2p_keypair( private_key: &K::PrivateKey, ) -> anyhow::Result { // Derive a secondary key from our primary private key - let derived_key = blake3::derive_key("libp2p key", &(bincode::serialize(&private_key)?)); + let derived_key = blake3::derive_key("libp2p key", &private_key.to_bytes()); let derived_key = SecretKey::try_from_bytes(derived_key)?; // Create an `ed25519` keypair from the derived key diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index bac62ecdd7..19799f8e7b 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -168,14 +168,14 @@ pub struct MultiValidatorArgs { /// If we are unable to get the configuration from the orchestrator pub async fn get_complete_config( client: &OrchestratorClient, - my_own_validator_config: ValidatorConfig, + mut validator_config: ValidatorConfig, libp2p_advertise_address: Option, libp2p_public_key: Option, -) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { +) -> anyhow::Result<(NetworkConfig, ValidatorConfig, NetworkConfigSource)> { // get the configuration from the orchestrator let run_config: NetworkConfig = client .post_and_wait_all_public_keys::( - my_own_validator_config, + &mut validator_config, libp2p_advertise_address, libp2p_public_key, ) @@ -183,9 +183,13 @@ pub async fn get_complete_config( info!( "Retrieved config; our node index is {}. DA committee member: {}", - run_config.node_index, run_config.config.my_own_validator_config.is_da + run_config.node_index, validator_config.is_da ); - Ok((run_config, NetworkConfigSource::Orchestrator)) + Ok(( + run_config, + validator_config, + NetworkConfigSource::Orchestrator, + )) } impl ValidatorArgs { @@ -393,7 +397,7 @@ impl OrchestratorClient { #[instrument(skip(self), name = "orchestrator public keys")] pub async fn post_and_wait_all_public_keys( &self, - mut validator_config: ValidatorConfig, + validator_config: &mut ValidatorConfig, libp2p_advertise_address: Option, libp2p_public_key: Option, ) -> NetworkConfig { @@ -445,7 +449,6 @@ impl OrchestratorClient { let mut network_config = self.get_config_after_collection().await; network_config.node_index = node_index; - network_config.config.my_own_validator_config = validator_config; network_config } diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 842ef3e538..40760372c6 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -92,7 +92,7 @@ pub async fn build_system_handle_from_launcher< let network = (launcher.resource_generator.channel_generator)(node_id).await; let storage = (launcher.resource_generator.storage)(node_id); let marketplace_config = (launcher.resource_generator.marketplace_config)(node_id); - let mut config = launcher.resource_generator.config.clone(); + let config = launcher.resource_generator.config.clone(); let initializer = HotShotInitializer::::from_genesis::(TestInstanceState::new( launcher.metadata.async_delay_config.clone(), @@ -104,11 +104,10 @@ pub async fn build_system_handle_from_launcher< let is_da = node_id < config.da_staked_committee_size as u64; // We assign node's public key and stake value rather than read from config file since it's a test - let validator_config = + let validator_config: ValidatorConfig = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, is_da); - config.my_own_validator_config = validator_config; - let private_key = config.my_own_validator_config.private_key.clone(); - let public_key = config.my_own_validator_config.public_key.clone(); + let private_key = validator_config.private_key.clone(); + let public_key = validator_config.public_key.clone(); let all_nodes = config.known_nodes_with_stake.clone(); let da_nodes = config.known_da_nodes.clone(); diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index f86acd9d31..4773e597e0 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -481,7 +481,7 @@ where }) .collect(); // But now to test validator's config, we input the info of my_own_validator from config file when node_id == 0. - let my_own_validator_config = ValidatorConfig::generated_from_seed_indexed( + let validator_config = ValidatorConfig::::generated_from_seed_indexed( [0u8; 32], node_id, 1, @@ -496,7 +496,6 @@ where known_da_nodes, num_bootstrap: num_bootstrap_nodes, known_nodes_with_stake, - my_own_validator_config, da_staked_committee_size, fixed_leader_for_gpuvid: 1, next_view_timeout: 500, @@ -548,6 +547,7 @@ where storage }), config, + validator_config, marketplace_config: Box::new(|_| MarketplaceConfig:: { auction_results_provider: TestAuctionResultsProvider::::default().into(), fallback_builder_url: Url::parse("http://localhost:9999").unwrap(), diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index a6c5730e1c..8edffa33a8 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -16,7 +16,7 @@ use hotshot_types::{ network::{AsyncGenerator, ConnectedNetwork}, node_implementation::{NodeType, Versions}, }, - HotShotConfig, + HotShotConfig, ValidatorConfig, }; use super::{test_builder::TestDescription, test_runner::TestRunner}; @@ -36,6 +36,8 @@ pub struct ResourceGenerators>, /// configuration used to generate each hotshot node pub config: HotShotConfig, + /// config that contains the signature keys + pub validator_config: ValidatorConfig, /// generate a new marketplace config for each node pub marketplace_config: Generator>, } diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 2771d6993a..26367ab7b4 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -55,7 +55,8 @@ async fn test_network_task() { let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); let consensus = handle.hotshot.consensus(); let config = launcher.resource_generator.config.clone(); - let public_key = config.my_own_validator_config.public_key; + let validator_config = launcher.resource_generator.validator_config.clone(); + let public_key = validator_config.public_key; let all_nodes = config.known_nodes_with_stake.clone(); @@ -239,7 +240,8 @@ async fn test_network_storage_fail() { let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); storage.write().await.should_return_err = true; let config = launcher.resource_generator.config.clone(); - let public_key = config.my_own_validator_config.public_key; + let validator_config = launcher.resource_generator.validator_config.clone(); + let public_key = validator_config.public_key; let all_nodes = config.known_nodes_with_stake.clone(); let upgrade_lock = UpgradeLock::::new(); diff --git a/types/Cargo.toml b/types/Cargo.toml index 9575be6b33..f2f161c33c 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -40,7 +40,7 @@ typenum = { workspace = true } derivative = "2" jf-vid = { workspace = true } jf-pcs = { workspace = true } -jf-signature = { workspace = true, features = ["schnorr"] } +jf-signature = { workspace = true, features = ["bls", "schnorr"] } jf-utils = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } diff --git a/types/src/hotshot_config_file.rs b/types/src/hotshot_config_file.rs index 93f37ca022..9a285ae6fc 100644 --- a/types/src/hotshot_config_file.rs +++ b/types/src/hotshot_config_file.rs @@ -29,9 +29,6 @@ pub struct HotShotConfigFile { /// Total number of staked nodes in the network pub num_nodes_with_stake: NonZeroUsize, #[serde(skip)] - /// My own public key, secret key, stake value - pub my_own_validator_config: ValidatorConfig, - #[serde(skip)] /// The known nodes' public key and stake value pub known_nodes_with_stake: Vec>, #[serde(skip)] @@ -67,7 +64,6 @@ impl From> for HotShotConfig { num_nodes_with_stake: val.num_nodes_with_stake, known_da_nodes: val.known_da_nodes, known_nodes_with_stake: val.known_nodes_with_stake, - my_own_validator_config: val.my_own_validator_config, da_staked_committee_size: val.staked_da_nodes, fixed_leader_for_gpuvid: val.fixed_leader_for_gpuvid, next_view_timeout: val.next_view_timeout, @@ -120,7 +116,6 @@ impl HotShotConfigFile { Self { num_nodes_with_stake: NonZeroUsize::new(10).unwrap(), start_threshold: (1, 1), - my_own_validator_config: ValidatorConfig::default(), known_nodes_with_stake: gen_known_nodes_with_stake, staked_da_nodes, known_da_nodes, diff --git a/types/src/lib.rs b/types/src/lib.rs index 44c81e4e68..f158fcf3ae 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -8,7 +8,6 @@ use std::{fmt::Debug, future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; use bincode::Options; -use derivative::Derivative; use displaydoc::Display; use light_client::StateVerKey; use tracing::error; @@ -64,15 +63,12 @@ where assert_future::(Box::pin(fut)) } -#[derive(serde::Serialize, serde::Deserialize, Clone, Derivative, Display)] -#[serde(bound(deserialize = ""))] -#[derivative(Debug(bound = ""))] +#[derive(Clone, Debug, Display)] /// config for validator, including public key, private key, stake value pub struct ValidatorConfig { /// The validator's public key and stake value pub public_key: KEY, /// The validator's private key, should be in the mempool, not public - #[derivative(Debug = "ignore")] pub private_key: KEY::PrivateKey, /// The validator's stake pub stake_value: u64, @@ -176,8 +172,6 @@ pub struct HotShotConfig { pub known_nodes_with_stake: Vec>, /// All public keys known to be DA nodes pub known_da_nodes: Vec>, - /// My own validator config, including my public key, private key, stake value, serving as private parameter - pub my_own_validator_config: ValidatorConfig, /// List of DA committee (staking)nodes for static DA committee pub da_staked_committee_size: usize, /// Number of fixed leaders for GPU VID, normally it will be 0, it's only used when running GPU VID diff --git a/types/src/light_client.rs b/types/src/light_client.rs index 1fac6614e4..07644df0ef 100644 --- a/types/src/light_client.rs +++ b/types/src/light_client.rs @@ -36,7 +36,7 @@ pub type StateSignKey = schnorr::SignKey; /// Concrete for circuit's public input pub type PublicInput = GenericPublicInput; /// Key pairs for signing/verifying a light client state -#[derive(Debug, Default, Clone, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Default, Clone)] pub struct StateKeyPair(pub schnorr::KeyPair); /// Request body to send to the state relay server diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index 2741912f14..43110ed8b2 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -23,7 +23,7 @@ use crate::{ stake_table::StakeTableEntry, traits::{ qc::QuorumCertificateScheme, - signature_key::{BuilderSignatureKey, SignatureKey}, + signature_key::{BuilderSignatureKey, PrivateSignatureKey, SignatureKey}, }, }; @@ -34,6 +34,20 @@ pub type BLSPubKey = VerKey; /// Public parameters for BLS signature scheme pub type BLSPublicParam = (); +impl PrivateSignatureKey for BLSPrivKey { + fn to_bytes(&self) -> Vec { + self.to_bytes() + } + + fn from_bytes(bytes: &[u8]) -> anyhow::Result { + Ok(Self::from_bytes(bytes)) + } + + fn to_tagged_base64(&self) -> Result { + self.to_tagged_base64() + } +} + impl SignatureKey for BLSPubKey { type PrivateKey = BLSPrivKey; type StakeTableEntry = StakeTableEntry; diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 52503c0cd5..93edb4e1ba 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -20,7 +20,7 @@ use committable::Committable; use ethereum_types::U256; use jf_vid::VidScheme; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use tagged_base64::TaggedBase64; +use tagged_base64::{TaggedBase64, Tb64Error}; use super::EncodeBytes; use crate::{ @@ -36,6 +36,24 @@ pub trait StakeTableEntryType { fn public_key(&self) -> K; } +/// Trait for abstracting private signature key +pub trait PrivateSignatureKey: + Send + Sync + Sized + Clone + Debug + Eq + Hash + for<'a> TryFrom<&'a TaggedBase64> +{ + /// Serialize the private key into bytes + fn to_bytes(&self) -> Vec; + + /// Deserialize the private key from bytes + /// # Errors + /// If deserialization fails. + fn from_bytes(bytes: &[u8]) -> anyhow::Result; + + /// Serialize the private key into TaggedBase64 blob. + /// # Errors + /// If serialization fails. + fn to_tagged_base64(&self) -> Result; +} + /// Trait for abstracting public key signatures /// Self is the public key type pub trait SignatureKey: @@ -56,15 +74,7 @@ pub trait SignatureKey: + Into { /// The private key type for this signature algorithm - type PrivateKey: Send - + Sync - + Sized - + Clone - + Debug - + Eq - + Serialize - + for<'a> Deserialize<'a> - + Hash; + type PrivateKey: PrivateSignatureKey; /// The type of the entry that contain both public key and stake value type StakeTableEntry: StakeTableEntryType + Send @@ -179,15 +189,7 @@ pub trait BuilderSignatureKey: + Display { /// The type of the keys builder would use to sign its messages - type BuilderPrivateKey: Send - + Sync - + Sized - + Clone - + Debug - + Eq - + Serialize - + for<'a> Deserialize<'a> - + Hash; + type BuilderPrivateKey: PrivateSignatureKey; /// The type of the signature builder would use to sign its messages type BuilderSignature: Send From e8a2ceb57cde2c103e55f2607fd13c61607a201a Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 7 Nov 2024 15:25:36 -0500 Subject: [PATCH 1289/1393] update logging (#3844) --- example-types/src/storage_types.rs | 3 ++- hotshot/src/tasks/mod.rs | 3 +-- task-impls/src/network.rs | 9 +++++---- task-impls/src/quorum_proposal_recv/mod.rs | 3 +-- utils/src/anytrace.rs | 2 +- utils/src/anytrace/macros.rs | 4 +++- 6 files changed, 13 insertions(+), 11 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 77b6125c52..e93a9b3e72 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -9,7 +9,6 @@ use std::{ sync::Arc, }; -use crate::testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}; use anyhow::{bail, Result}; use async_lock::RwLock; use async_trait::async_trait; @@ -29,6 +28,8 @@ use hotshot_types::{ }; use jf_vid::VidScheme; +use crate::testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}; + type VidShares = HashMap< ::View, HashMap<::SignatureKey, Proposal>>, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 5d1476673d..2b9b1c3f16 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -8,8 +8,7 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; -use std::collections::BTreeMap; -use std::{fmt::Debug, sync::Arc, time::Duration}; +use std::{collections::BTreeMap, fmt::Debug, sync::Arc, time::Duration}; use async_broadcast::{broadcast, RecvError}; use async_compatibility_layer::art::{async_sleep, async_spawn}; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 7fffadfb11..c515d94e5b 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -9,10 +9,6 @@ use std::{ sync::Arc, }; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, -}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; @@ -45,6 +41,11 @@ use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::*; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::{broadcast_event, cancel_task}, +}; + /// the network message task state #[derive(Clone)] pub struct NetworkMessageTaskState { diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 1cff840bf2..06d9088bed 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -23,7 +23,7 @@ use hotshot_types::{ message::UpgradeLock, simple_certificate::UpgradeCertificate, traits::{ - node_implementation::{NodeImplementation, NodeType, Versions}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, vote::HasViewNumber, @@ -39,7 +39,6 @@ use crate::{ events::{HotShotEvent, ProposalMissing}, helpers::{broadcast_event, cancel_task, parent_leaf_and_state}, }; -use hotshot_types::traits::node_implementation::ConsensusTime; /// Event handlers for this task. mod handlers; diff --git a/utils/src/anytrace.rs b/utils/src/anytrace.rs index 62628207d1..05d6c1d9ea 100644 --- a/utils/src/anytrace.rs +++ b/utils/src/anytrace.rs @@ -6,7 +6,7 @@ mod macros; pub use macros::*; /// Default log level for the crate -pub const DEFAULT_LOG_LEVEL: Level = Level::Info; +pub const DEFAULT_LOG_LEVEL: Level = Level::Debug; /// Trait for logging errors pub trait Log { diff --git a/utils/src/anytrace/macros.rs b/utils/src/anytrace/macros.rs index 71036d21fb..29c5178b07 100644 --- a/utils/src/anytrace/macros.rs +++ b/utils/src/anytrace/macros.rs @@ -1,8 +1,10 @@ #[macro_export] /// Print the file and line number of the location this macro is invoked +/// +/// Note: temporarily prints only a null string to reduce verbosity of logging macro_rules! line_info { () => { - format!("{}:{}", file!(), line!()) + format!("") }; } pub use line_info; From e0f414c5daeb84df75d570545ce4fc462acb3346 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 7 Nov 2024 20:50:24 -0500 Subject: [PATCH 1290/1393] [Tech debt] Remove `async-std` (#3845) * tests and CI * remove `async-std` * `fmt` * fix doc build * remove counter tests * remove counter tests * build w/o lockfile * `lock` -> `std` * Revert "`lock` -> `std`" This reverts commit 21ebf054f3dd862a70d6b776f4016647cfd481c4. * lock * `async_broadcast` * overflow * Revert "`async_broadcast`" This reverts commit f03bb57fcc5fcdefe708c79fee75d3ae85ae04f1. * `try_send` --- example-types/Cargo.toml | 10 - example-types/src/node_types.rs | 3 +- example-types/src/testable_delay.rs | 6 +- examples/Cargo.toml | 16 +- examples/combined/all.rs | 21 +- examples/combined/multi-validator.rs | 16 +- examples/combined/orchestrator.rs | 10 +- examples/combined/validator.rs | 9 +- examples/infra/mod.rs | 5 +- examples/libp2p/all.rs | 17 +- examples/libp2p/multi-validator.rs | 16 +- examples/libp2p/validator.rs | 10 +- examples/orchestrator.rs | 10 +- examples/push-cdn/README.md | 30 +- examples/push-cdn/all.rs | 19 +- examples/push-cdn/broker.rs | 4 +- examples/push-cdn/marshal.rs | 3 +- examples/push-cdn/multi-validator.rs | 16 +- examples/push-cdn/validator.rs | 10 +- examples/push-cdn/whitelist-adapter.rs | 3 +- fakeapi/Cargo.toml | 2 +- fakeapi/src/fake_solver.rs | 3 +- hotshot/Cargo.toml | 16 +- hotshot/src/helpers.rs | 35 + hotshot/src/lib.rs | 15 +- hotshot/src/tasks/mod.rs | 12 +- hotshot/src/tasks/task_state.rs | 4 +- .../src/traits/networking/combined_network.rs | 11 +- .../src/traits/networking/libp2p_network.rs | 122 ++- .../src/traits/networking/memory_network.rs | 24 +- .../src/traits/networking/push_cdn_network.rs | 12 +- libp2p-networking/Cargo.toml | 6 +- libp2p-networking/README.md | 111 --- .../src/network/behaviours/dht/bootstrap.rs | 16 +- .../src/network/behaviours/dht/mod.rs | 18 +- .../src/network/behaviours/direct_message.rs | 21 +- libp2p-networking/src/network/mod.rs | 14 - libp2p-networking/src/network/node.rs | 34 +- libp2p-networking/src/network/node/handle.rs | 85 +- libp2p-networking/src/network/transport.rs | 16 +- libp2p-networking/test.py | 121 --- libp2p-networking/tests/common/mod.rs | 271 ------- libp2p-networking/tests/counter.rs | 726 ------------------ macros/src/lib.rs | 16 +- orchestrator/Cargo.toml | 5 - orchestrator/README.md | 2 +- orchestrator/src/client.rs | 6 +- task-impls/Cargo.toml | 5 - task-impls/src/builder.rs | 4 +- task-impls/src/consensus/handlers.rs | 6 +- task-impls/src/consensus/mod.rs | 6 +- task-impls/src/da.rs | 9 +- task-impls/src/harness.rs | 12 +- task-impls/src/helpers.rs | 11 +- task-impls/src/network.rs | 13 +- task-impls/src/quorum_proposal/mod.rs | 6 - .../src/quorum_proposal_recv/handlers.rs | 4 +- task-impls/src/quorum_proposal_recv/mod.rs | 9 +- task-impls/src/quorum_vote/mod.rs | 6 - task-impls/src/request.rs | 19 +- task-impls/src/response.rs | 10 +- task-impls/src/transactions.rs | 14 +- task-impls/src/view_sync.rs | 21 +- task/Cargo.toml | 5 - task/src/dependency.rs | 16 +- task/src/dependency_task.rs | 12 +- task/src/task.rs | 19 +- testing/Cargo.toml | 11 +- testing/src/block_builder/mod.rs | 16 +- testing/src/block_builder/random.rs | 13 +- testing/src/block_builder/simple.rs | 6 +- testing/src/completion_task.rs | 9 +- testing/src/test_runner.rs | 64 +- testing/src/test_task.rs | 16 +- testing/src/txn_task.rs | 9 +- testing/tests/tests_1/block_builder.rs | 10 +- testing/tests/tests_1/da_task.rs | 12 +- testing/tests/tests_1/libp2p.rs | 24 +- testing/tests/tests_1/network_task.rs | 65 +- .../tests_1/quorum_proposal_recv_task.rs | 12 +- testing/tests/tests_1/quorum_proposal_task.rs | 36 +- testing/tests/tests_1/quorum_vote_task.rs | 18 +- testing/tests/tests_1/transaction_task.rs | 6 +- .../tests_1/upgrade_task_with_proposal.rs | 9 +- .../tests/tests_1/upgrade_task_with_vote.rs | 6 +- testing/tests/tests_1/vid_task.rs | 6 +- testing/tests/tests_1/view_sync_task.rs | 6 +- .../tests/tests_1/vote_dependency_handle.rs | 12 +- testing/tests/tests_2/catchup.rs | 35 +- testing/tests/tests_3/memory_network.rs | 61 +- testing/tests/tests_5/broken_3_chain.rs | 8 +- testing/tests/tests_5/combined_network.rs | 39 +- testing/tests/tests_5/fake_solver.rs | 53 +- testing/tests/tests_5/push_cdn.rs | 10 +- testing/tests/tests_5/timeout.rs | 14 +- testing/tests/tests_5/unreliable_network.rs | 56 +- types/Cargo.toml | 7 +- types/src/data.rs | 5 - types/src/error.rs | 2 - types/src/traits/network.rs | 7 +- 100 files changed, 627 insertions(+), 2201 deletions(-) create mode 100644 hotshot/src/helpers.rs delete mode 100644 libp2p-networking/README.md delete mode 100755 libp2p-networking/test.py delete mode 100644 libp2p-networking/tests/common/mod.rs delete mode 100644 libp2p-networking/tests/counter.rs diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index e4ceb2e8c4..0cdba8fc73 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -13,7 +13,6 @@ gpu-vid = ["hotshot-task-impls/gpu-vid"] [dependencies] async-broadcast = { workspace = true } -async-compatibility-layer = { workspace = true } async-trait = { workspace = true } anyhow = { workspace = true } sha3 = "^0.10" @@ -38,13 +37,4 @@ jf-vid = { workspace = true } vbs = { workspace = true } url = { workspace = true } reqwest = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } - -[lints.rust] -unexpected_cfgs = { level = "allow", check-cfg = [ - 'cfg(async_executor_impl, values("tokio", "async-std"))', -] } diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 85f9232309..80b634515c 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -252,8 +252,7 @@ mod tests { } } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] /// Test that the view number affects the commitment post-marketplace async fn test_versioned_commitment_includes_view() { let upgrade_lock = UpgradeLock::new(); diff --git a/example-types/src/testable_delay.rs b/example-types/src/testable_delay.rs index 98bb6c7d6b..07f460eaf3 100644 --- a/example-types/src/testable_delay.rs +++ b/example-types/src/testable_delay.rs @@ -1,8 +1,8 @@ use std::{collections::HashMap, time::Duration}; -use async_compatibility_layer::art::async_sleep; use async_trait::async_trait; use rand::Rng; +use tokio::time::sleep; #[derive(Eq, Hash, PartialEq, Debug, Clone)] /// What type of delay we want to apply to @@ -87,13 +87,13 @@ pub trait TestableDelay { match settings.delay_option { DelayOptions::None => {} DelayOptions::Fixed => { - async_sleep(Duration::from_millis(settings.fixed_time_in_milliseconds)).await; + sleep(Duration::from_millis(settings.fixed_time_in_milliseconds)).await; } DelayOptions::Random => { let sleep_in_millis = rand::thread_rng().gen_range( settings.min_time_in_milliseconds..=settings.max_time_in_milliseconds, ); - async_sleep(Duration::from_millis(sleep_in_millis)).await; + sleep(Duration::from_millis(sleep_in_millis)).await; } } } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 313c6ac746..dc6434bb75 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -82,7 +82,6 @@ path = "push-cdn/whitelist-adapter.rs" [dependencies] async-broadcast = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6" @@ -117,25 +116,12 @@ vec1 = { workspace = true } url = { workspace = true } tracing = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } + cdn-client = { workspace = true } cdn-broker = { workspace = true, features = ["global-permits"] } cdn-marshal = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } -cdn-client = { workspace = true, features = ["runtime-async-std"] } -cdn-broker = { workspace = true, features = [ - "runtime-async-std", - "global-permits", -] } -cdn-marshal = { workspace = true, features = [ - "runtime-async-std", - "global-permits", -] } - [dev-dependencies] clap.workspace = true toml = { workspace = true } diff --git a/examples/combined/all.rs b/examples/combined/all.rs index d6edbf3253..1ccace8b48 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -10,12 +10,9 @@ pub mod types; use std::path::Path; -use async_compatibility_layer::{ - art::async_spawn, - logging::{setup_backtrace, setup_logging}, -}; use cdn_broker::Broker; use cdn_marshal::Marshal; +use hotshot::helpers::initialize_logging; use hotshot::{ traits::implementations::{KeyPair, TestingDef, WrappedSignatureKey}, types::SignatureKey, @@ -25,6 +22,7 @@ use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; use infra::{gen_local_address, BUILDER_BASE_PORT, VALIDATOR_BASE_PORT}; use rand::{rngs::StdRng, RngCore, SeedableRng}; +use tokio::spawn; use tracing::{error, instrument}; use crate::{ @@ -36,12 +34,11 @@ use crate::{ #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); let (config, orchestrator_url) = read_orchestrator_init_config::(); @@ -92,7 +89,7 @@ async fn main() { }; // Create and spawn the broker - async_spawn(async move { + spawn(async move { let broker: Broker::SignatureKey>> = Broker::new(config).await.expect("broker failed to start"); @@ -120,7 +117,7 @@ async fn main() { }; // Spawn the marshal - async_spawn(async move { + spawn(async move { let marshal: Marshal::SignatureKey>> = Marshal::new(marshal_config) .await @@ -133,7 +130,7 @@ async fn main() { }); // orchestrator - async_spawn(run_orchestrator::(OrchestratorArgs { + spawn(run_orchestrator::(OrchestratorArgs { url: orchestrator_url.clone(), config: config.clone(), })); @@ -147,7 +144,7 @@ async fn main() { let orchestrator_url = orchestrator_url.clone(); let builder_address = gen_local_address::(i); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, diff --git a/examples/combined/multi-validator.rs b/examples/combined/multi-validator.rs index 71ab7afd91..b721cb5c4f 100644 --- a/examples/combined/multi-validator.rs +++ b/examples/combined/multi-validator.rs @@ -5,13 +5,11 @@ // along with the HotShot repository. If not, see . //! A multi-validator using both the web server libp2p -use async_compatibility_layer::{ - art::async_spawn, - logging::{setup_backtrace, setup_logging}, -}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; +use tokio::spawn; use tracing::instrument; use crate::types::{Network, NodeImpl, ThisRun}; @@ -23,19 +21,19 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let args = MultiValidatorArgs::parse(); tracing::debug!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for node_index in 0..args.num_nodes { let args = args.clone(); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) diff --git a/examples/combined/orchestrator.rs b/examples/combined/orchestrator.rs index 17b6f2dec2..c3d399f489 100644 --- a/examples/combined/orchestrator.rs +++ b/examples/combined/orchestrator.rs @@ -8,7 +8,7 @@ /// types used for this example pub mod types; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use hotshot::helpers::initialize_logging; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; @@ -17,12 +17,12 @@ use crate::infra::{read_orchestrator_init_config, run_orchestrator, Orchestrator #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let (config, orchestrator_url) = read_orchestrator_init_config::(); run_orchestrator::(OrchestratorArgs:: { url: orchestrator_url.clone(), diff --git a/examples/combined/validator.rs b/examples/combined/validator.rs index 5007181adc..fd6ff83957 100644 --- a/examples/combined/validator.rs +++ b/examples/combined/validator.rs @@ -6,8 +6,8 @@ //! A validator using both the web server and libp2p -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; @@ -22,12 +22,11 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); let mut args = ValidatorArgs::parse(); diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 7fbae5db22..9e031c02fa 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -15,7 +15,6 @@ use std::{ time::Instant, }; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_trait::async_trait; use cdn_broker::reexports::crypto::signature::KeyPair; use chrono::Utc; @@ -917,8 +916,8 @@ pub async fn main_entry_point< ::BlockPayload: TestableBlock, Leaf: TestableLeaf, { - setup_logging(); - setup_backtrace(); + // Initialize logging + hotshot::helpers::initialize_logging(); info!("Starting validator"); diff --git a/examples/libp2p/all.rs b/examples/libp2p/all.rs index 7cf2101d9f..4fd99cd0e8 100644 --- a/examples/libp2p/all.rs +++ b/examples/libp2p/all.rs @@ -8,13 +8,11 @@ /// types used for this example pub mod types; -use async_compatibility_layer::{ - art::async_spawn, - logging::{setup_backtrace, setup_logging}, -}; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use infra::{gen_local_address, BUILDER_BASE_PORT, VALIDATOR_BASE_PORT}; +use tokio::spawn; use tracing::instrument; use crate::{ @@ -26,18 +24,17 @@ use crate::{ #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); // use configfile args let (config, orchestrator_url) = read_orchestrator_init_config::(); // orchestrator - async_spawn(run_orchestrator::(OrchestratorArgs { + spawn(run_orchestrator::(OrchestratorArgs { url: orchestrator_url.clone(), config: config.clone(), })); @@ -50,7 +47,7 @@ async fn main() { let advertise_address = gen_local_address::(i); let builder_address = gen_local_address::(i); let orchestrator_url = orchestrator_url.clone(); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, diff --git a/examples/libp2p/multi-validator.rs b/examples/libp2p/multi-validator.rs index 42621d6946..0767245c3b 100644 --- a/examples/libp2p/multi-validator.rs +++ b/examples/libp2p/multi-validator.rs @@ -5,13 +5,11 @@ // along with the HotShot repository. If not, see . //! A multi-validator using libp2p -use async_compatibility_layer::{ - art::async_spawn, - logging::{setup_backtrace, setup_logging}, -}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; +use tokio::spawn; use tracing::instrument; use crate::types::{Network, NodeImpl, ThisRun}; @@ -23,19 +21,19 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let args = MultiValidatorArgs::parse(); tracing::debug!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for node_index in 0..args.num_nodes { let args = args.clone(); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) diff --git a/examples/libp2p/validator.rs b/examples/libp2p/validator.rs index 1e2bb8d096..c85e52688e 100644 --- a/examples/libp2p/validator.rs +++ b/examples/libp2p/validator.rs @@ -6,8 +6,8 @@ //! A validator using libp2p -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; @@ -22,12 +22,12 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let mut args = ValidatorArgs::parse(); // If we did not set the advertise address, use our local IP and port 8000 diff --git a/examples/orchestrator.rs b/examples/orchestrator.rs index f78c0b35b1..3bb419b980 100644 --- a/examples/orchestrator.rs +++ b/examples/orchestrator.rs @@ -6,7 +6,7 @@ //! A orchestrator -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use hotshot::helpers::initialize_logging; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; @@ -16,12 +16,12 @@ use crate::infra::{read_orchestrator_init_config, run_orchestrator, Orchestrator #[path = "./infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let (config, orchestrator_url) = read_orchestrator_init_config::(); run_orchestrator::(OrchestratorArgs:: { url: orchestrator_url.clone(), diff --git a/examples/push-cdn/README.md b/examples/push-cdn/README.md index c49f7dedb2..c460beb89a 100644 --- a/examples/push-cdn/README.md +++ b/examples/push-cdn/README.md @@ -28,27 +28,27 @@ Examples: **Run Locally** -`just async_std example all-push-cdn -- --config_file ./crates/orchestrator/run-config.toml` +`just example all-push-cdn -- --config_file ./crates/orchestrator/run-config.toml` OR ``` docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb -just async_std example cdn-marshal -- -d redis://localhost:6379 -b 9000 -just async_std example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 -just async_std example orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 -just async_std example multi-validator-push-cdn -- 10 http://127.0.0.1:4444 +just example cdn-marshal -- -d redis://localhost:6379 -b 9000 +just example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 +just example orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 +just example multi-validator-push-cdn -- 10 http://127.0.0.1:4444 ``` **Run with GPU-VID** ``` docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb -just async_std example cdn-marshal -- -d redis://localhost:6379 -b 9000 -just async_std example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 -just async_std example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1 -just async_std example_gpuvid_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444 +just example cdn-marshal -- -d redis://localhost:6379 -b 9000 +just example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 +just example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1 +just example_gpuvid_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444 sleep 1m -just async_std example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 +just example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 ``` Where ones using `example_gpuvid_leader` could be the leader and should be running on a nvidia GPU, and other validators using `example_fixed_leader` will never be a leader. In practice, these url should be changed to the corresponding ip and port. @@ -57,12 +57,12 @@ Where ones using `example_gpuvid_leader` could be the leader and should be runni If you don't have a gpu but want to test out fixed leader, you can run: ``` docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb -just async_std example cdn-marshal -- -d redis://localhost:6379 -b 9000 -just async_std example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 -just async_std example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1 -just async_std example_fixed_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444 +just example cdn-marshal -- -d redis://localhost:6379 -b 9000 +just example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 +just example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1 +just example_fixed_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444 sleep 1m -just async_std example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 +just example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 ``` Remember, you have to run leaders first, then other validators, so that leaders will have lower index. \ No newline at end of file diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 3d3de5d42e..726f41b22e 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -10,9 +10,9 @@ pub mod types; use std::path::Path; -use async_compatibility_layer::art::async_spawn; use cdn_broker::{reexports::crypto::signature::KeyPair, Broker}; use cdn_marshal::Marshal; +use hotshot::helpers::initialize_logging; use hotshot::{ traits::implementations::{TestingDef, WrappedSignatureKey}, types::SignatureKey, @@ -22,6 +22,7 @@ use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; use infra::{gen_local_address, BUILDER_BASE_PORT}; use rand::{rngs::StdRng, RngCore, SeedableRng}; +use tokio::spawn; use crate::{ infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, @@ -34,18 +35,16 @@ pub mod infra; use tracing::error; -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] async fn main() { - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); // use configfile args let (config, orchestrator_url) = read_orchestrator_init_config::(); // Start the orhcestrator - async_spawn(run_orchestrator::(OrchestratorArgs { + spawn(run_orchestrator::(OrchestratorArgs { url: orchestrator_url.clone(), config: config.clone(), })); @@ -98,7 +97,7 @@ async fn main() { }; // Create and spawn the broker - async_spawn(async move { + spawn(async move { let broker: Broker::SignatureKey>> = Broker::new(config).await.expect("broker failed to start"); @@ -124,7 +123,7 @@ async fn main() { }; // Spawn the marshal - async_spawn(async move { + spawn(async move { let marshal: Marshal::SignatureKey>> = Marshal::new(marshal_config) .await @@ -141,7 +140,7 @@ async fn main() { for i in 0..(config.config.num_nodes_with_stake.get()) { let orchestrator_url = orchestrator_url.clone(); let builder_address = gen_local_address::(i); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index 7eabbec50f..a9995805ca 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -68,8 +68,8 @@ struct Args { #[arg(long, default_value_t = 1_073_741_824)] global_memory_pool_size: usize, } -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] + +#[tokio::main] async fn main() -> Result<()> { // Parse command line arguments let args = Args::parse(); diff --git a/examples/push-cdn/marshal.rs b/examples/push-cdn/marshal.rs index 39d2267bd8..569cb0dc33 100644 --- a/examples/push-cdn/marshal.rs +++ b/examples/push-cdn/marshal.rs @@ -52,8 +52,7 @@ struct Args { global_memory_pool_size: usize, } -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] async fn main() -> Result<()> { // Parse command-line arguments let args = Args::parse(); diff --git a/examples/push-cdn/multi-validator.rs b/examples/push-cdn/multi-validator.rs index b8070f8b1c..54718468b3 100644 --- a/examples/push-cdn/multi-validator.rs +++ b/examples/push-cdn/multi-validator.rs @@ -5,13 +5,11 @@ // along with the HotShot repository. If not, see . //! A multi validator -use async_compatibility_layer::{ - art::async_spawn, - logging::{setup_backtrace, setup_logging}, -}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; +use tokio::spawn; use tracing::instrument; use crate::types::{Network, NodeImpl, ThisRun}; @@ -23,19 +21,19 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let args = MultiValidatorArgs::parse(); tracing::debug!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for node_index in 0..args.num_nodes { let args = args.clone(); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) diff --git a/examples/push-cdn/validator.rs b/examples/push-cdn/validator.rs index 70d53cdc33..7b546dfabe 100644 --- a/examples/push-cdn/validator.rs +++ b/examples/push-cdn/validator.rs @@ -5,8 +5,8 @@ // along with the HotShot repository. If not, see . //! A validator -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use tracing::{debug, instrument}; @@ -20,12 +20,12 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let args = ValidatorArgs::parse(); debug!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::(args).await; diff --git a/examples/push-cdn/whitelist-adapter.rs b/examples/push-cdn/whitelist-adapter.rs index f787f271e4..e855a41aba 100644 --- a/examples/push-cdn/whitelist-adapter.rs +++ b/examples/push-cdn/whitelist-adapter.rs @@ -40,8 +40,7 @@ struct Args { local_discovery: bool, } -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] async fn main() -> Result<()> { // Parse the command line arguments let args = Args::parse(); diff --git a/fakeapi/Cargo.toml b/fakeapi/Cargo.toml index 315af7f949..b2bd022c45 100644 --- a/fakeapi/Cargo.toml +++ b/fakeapi/Cargo.toml @@ -11,8 +11,8 @@ repository.workspace = true [dependencies] toml = { workspace = true } tide-disco = { workspace = true } +tokio = { workspace = true } anyhow = { workspace = true } -async-compatibility-layer = { workspace = true } hotshot-types = { path = "../types" } vbs = { workspace = true } serde = { workspace = true } diff --git a/fakeapi/src/fake_solver.rs b/fakeapi/src/fake_solver.rs index 3c4beabd03..b52418cc9b 100644 --- a/fakeapi/src/fake_solver.rs +++ b/fakeapi/src/fake_solver.rs @@ -4,7 +4,6 @@ use std::{ }; use anyhow::Result; -use async_compatibility_layer::art::async_sleep; use async_lock::RwLock; use futures::FutureExt; use hotshot_example_types::auction_results_provider_types::TestAuctionResult; @@ -95,7 +94,7 @@ impl FakeSolverState { } FakeSolverFaultType::TimeoutFault => { // Sleep for the preconfigured 1 second timeout interval - async_sleep(SOLVER_MAX_TIMEOUT_S).await; + tokio::time::sleep(SOLVER_MAX_TIMEOUT_S).await; } } } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index c3e345f43a..0a02ef4131 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -24,7 +24,6 @@ hotshot-testing = [] [dependencies] anyhow = { workspace = true } async-broadcast = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6" @@ -51,6 +50,7 @@ thiserror = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } +tracing-subscriber = { workspace = true } vbs = { workspace = true } jf-signature.workspace = true blake3.workspace = true @@ -60,25 +60,11 @@ num_enum = "0.7" parking_lot = "0.12" utils = { path = "../utils" } -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } cdn-client = { workspace = true } cdn-broker = { workspace = true, features = ["global-permits"] } cdn-marshal = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } -cdn-client = { workspace = true, features = ["runtime-async-std"] } -cdn-broker = { workspace = true, features = [ - "runtime-async-std", - "global-permits", -] } -cdn-marshal = { workspace = true, features = [ - "runtime-async-std", - "global-permits", -] } - - [dev-dependencies] blake3 = { workspace = true } clap.workspace = true diff --git a/hotshot/src/helpers.rs b/hotshot/src/helpers.rs new file mode 100644 index 0000000000..d2a9ae33f5 --- /dev/null +++ b/hotshot/src/helpers.rs @@ -0,0 +1,35 @@ +use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter}; + +/// Initializes logging +pub fn initialize_logging() { + // Parse the `RUST_LOG_SPAN_EVENTS` environment variable + let span_event_filter = match std::env::var("RUST_LOG_SPAN_EVENTS") { + Ok(val) => val + .split(',') + .map(|s| match s.trim() { + "new" => FmtSpan::NEW, + "enter" => FmtSpan::ENTER, + "exit" => FmtSpan::EXIT, + "close" => FmtSpan::CLOSE, + "active" => FmtSpan::ACTIVE, + "full" => FmtSpan::FULL, + _ => FmtSpan::NONE, + }) + .fold(FmtSpan::NONE, |acc, x| acc | x), + Err(_) => FmtSpan::NONE, + }; + + // Conditionally initialize in `json` mode + if std::env::var("RUST_LOG_FORMAT") == Ok("json".to_string()) { + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .with_span_events(span_event_filter) + .json() + .init(); + } else { + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .with_span_events(span_event_filter) + .init(); + }; +} diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index eddf722a0b..e6b189d01c 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -26,6 +26,9 @@ pub mod types; pub mod tasks; +/// Contains helper functions for the crate +pub mod helpers; + use std::{ collections::{BTreeMap, HashMap}, num::NonZeroUsize, @@ -34,12 +37,12 @@ use std::{ }; use async_broadcast::{broadcast, InactiveReceiver, Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; use futures::join; use hotshot_task::task::{ConsensusTaskRegistry, NetworkTaskRegistry}; use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; +use tokio::{spawn, time::sleep}; // Internal /// Reexport error type pub use hotshot_types::error::HotShotError; @@ -388,9 +391,9 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext> = broadcast(EVENT_CHANNEL_SIZE); - let _recv_loop_handle = async_spawn(async move { + let _recv_loop_handle = spawn(async move { loop { let msg = match select(left_receiver.recv(), right_receiver.recv()).await { Either::Left(msg) => Either::Left(msg.0.unwrap().as_ref().clone()), @@ -723,7 +726,7 @@ where } }); - let _send_loop_handle = async_spawn(async move { + let _send_loop_handle = spawn(async move { loop { if let Ok(msg) = receiver_from_network.recv().await { let mut state = send_state.write().await; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 2b9b1c3f16..fcc63c2b20 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -11,7 +11,6 @@ pub mod task_state; use std::{collections::BTreeMap, fmt::Debug, sync::Arc, time::Duration}; use async_broadcast::{broadcast, RecvError}; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; use futures::{ @@ -41,6 +40,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; +use tokio::{spawn, time::sleep}; use vbs::version::StaticVersionType; use crate::{ @@ -101,14 +101,14 @@ pub fn add_queue_len_task, V: Vers let consensus = handle.hotshot.consensus(); let rx = handle.internal_event_stream.1.clone(); let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); - let task_handle = async_spawn(async move { + let task_handle = spawn(async move { futures::pin_mut!(shutdown_signal); loop { futures::select! { () = shutdown_signal => { return; }, - () = async_sleep(Duration::from_millis(500)).fuse() => { + () = sleep(Duration::from_millis(500)).fuse() => { consensus.read().await.metrics.internal_event_queue_len.set(rx.len()); } } @@ -139,7 +139,7 @@ pub fn add_network_message_task< let network = Arc::clone(channel); let mut state = network_state.clone(); let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); - let task_handle = async_spawn(async move { + let task_handle = spawn(async move { futures::pin_mut!(shutdown_signal); loop { @@ -412,7 +412,7 @@ where let private_key = handle.private_key().clone(); let upgrade_lock = handle.hotshot.upgrade_lock.clone(); let consensus = Arc::clone(&handle.hotshot.consensus()); - let send_handle = async_spawn(async move { + let send_handle = spawn(async move { futures::pin_mut!(shutdown_signal); let recv_stream = stream::unfold(original_receiver, |mut recv| async move { @@ -466,7 +466,7 @@ where // spawn a task to listen on the newly created event stream, // and broadcast the transformed events to the original internal event stream let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); - let recv_handle = async_spawn(async move { + let recv_handle = spawn(async move { futures::pin_mut!(shutdown_signal); let network_recv_stream = diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 1a9065692d..97ff8e277e 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -9,7 +9,6 @@ use std::{ sync::{atomic::AtomicBool, Arc}, }; -use async_compatibility_layer::art::async_spawn; use async_trait::async_trait; use chrono::Utc; use hotshot_task_impls::{ @@ -26,6 +25,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; +use tokio::spawn; use crate::{types::SystemContextHandle, Versions}; @@ -319,7 +319,7 @@ impl, V: Versions> CreateTaskState cur_view_time: Utc::now().timestamp(), cur_epoch: handle.cur_epoch().await, output_event_stream: handle.hotshot.external_event_stream.0.clone(), - timeout_task: async_spawn(async {}), + timeout_task: spawn(async {}), timeout: handle.hotshot.config.next_view_timeout, consensus: OuterConsensus::new(consensus), last_decided_view: handle.cur_view().await, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 9678c54244..6c891fa6f7 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -19,10 +19,6 @@ use std::{ }; use async_broadcast::{broadcast, InactiveReceiver, Sender}; -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - channel::TrySendError, -}; use async_lock::RwLock; use async_trait::async_trait; use futures::{join, select, FutureExt}; @@ -45,6 +41,7 @@ use hotshot_types::{ }; use lru::LruCache; use parking_lot::RwLock as PlRwLock; +use tokio::{spawn, sync::mpsc::error::TrySendError, time::sleep}; use tracing::{debug, info, warn}; use super::{push_cdn_network::PushCdnNetwork, NetworkError}; @@ -183,8 +180,8 @@ impl CombinedNetworks { .1 .activate_cloned(); // Spawn a task that sleeps for `duration` and then sends the message if it wasn't cancelled - async_spawn(async move { - async_sleep(duration).await; + spawn(async move { + sleep(duration).await; if receiver.try_recv().is_ok() { // The task has been cancelled because the view progressed, it means the primary is working fine debug!( @@ -483,7 +480,7 @@ impl ConnectedNetwork for CombinedNetworks T: NodeType + 'a, { let delayed_tasks_channels = Arc::clone(&self.delayed_tasks_channels); - async_spawn(async move { + spawn(async move { let mut map_lock = delayed_tasks_channels.write().await; while let Some((first_view, _)) = map_lock.first_key_value() { // Broadcast a cancelling signal to all the tasks related to each view older than the new one diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index a8bc4ff6e4..0713429e66 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -23,20 +23,10 @@ use std::{ }; use anyhow::{anyhow, Context}; -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - channel::{ - self, bounded, unbounded, Receiver as BoundedReceiver, Sender as BoundedSender, - TrySendError, UnboundedReceiver, UnboundedSender, - }, -}; use async_lock::RwLock; use async_trait::async_trait; use bimap::BiHashMap; -use futures::{ - future::{join_all, Either}, - FutureExt, -}; +use futures::future::join_all; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{ AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, @@ -73,6 +63,14 @@ use libp2p_networking::{ }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; +use tokio::{ + select, spawn, + sync::{ + mpsc::{channel, error::TrySendError, Receiver, Sender}, + Mutex, + }, + time::sleep, +}; use tracing::{error, info, instrument, trace, warn}; use crate::BroadcastDelay; @@ -149,11 +147,11 @@ struct Libp2pNetworkInner { /// handle to control the network handle: Arc>, /// Message Receiver - receiver: UnboundedReceiver>, + receiver: Mutex>>, /// Sender for broadcast messages - sender: UnboundedSender>, + sender: Sender>, /// Sender for node lookup (relevant view number, key of node) (None for shutdown) - node_lookup_send: BoundedSender>, + node_lookup_send: Sender>, /// this is really cheating to enable local tests /// hashset of (bootstrap_addr, peer_id) bootstrap_addrs: PeerInfoVec, @@ -175,7 +173,7 @@ struct Libp2pNetworkInner { /// reliability_config reliability_config: Option>, /// Killswitch sender - kill_switch: channel::Sender<()>, + kill_switch: Sender<()>, } /// Networking implementation that uses libp2p @@ -494,7 +492,7 @@ impl Libp2pNetwork { if self.is_ready() { break; } - async_sleep(Duration::from_secs(1)).await; + sleep(Duration::from_secs(1)).await; } } @@ -537,15 +535,15 @@ impl Libp2pNetwork { // unbounded channels may not be the best choice (spammed?) // if bounded figure out a way to log dropped msgs - let (sender, receiver) = unbounded(); - let (node_lookup_send, node_lookup_recv) = bounded(10); - let (kill_tx, kill_rx) = bounded(1); + let (sender, receiver) = channel(1000); + let (node_lookup_send, node_lookup_recv) = channel(10); + let (kill_tx, kill_rx) = channel(1); rx.set_kill_switch(kill_rx); let mut result = Libp2pNetwork { inner: Arc::new(Libp2pNetworkInner { handle: Arc::new(network_handle), - receiver, + receiver: Mutex::new(receiver), sender: sender.clone(), pk, bootstrap_addrs, @@ -580,16 +578,16 @@ impl Libp2pNetwork { #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] fn spawn_node_lookup( &self, - mut node_lookup_recv: BoundedReceiver>, + mut node_lookup_recv: Receiver>, ) { let handle = Arc::clone(&self.inner.handle); let dht_timeout = self.inner.dht_timeout; let latest_seen_view = Arc::clone(&self.inner.latest_seen_view); // deals with handling lookup queue. should be infallible - async_spawn(async move { + spawn(async move { // cancels on shutdown - while let Ok(Some((view_number, pk))) = node_lookup_recv.recv().await { + while let Some(Some((view_number, pk))) = node_lookup_recv.recv().await { /// defines lookahead threshold based on the constant #[allow(clippy::cast_possible_truncation)] const THRESHOLD: u64 = (LOOK_AHEAD as f64 * 0.8) as u64; @@ -615,19 +613,19 @@ impl Libp2pNetwork { let is_bootstrapped = Arc::clone(&self.inner.is_bootstrapped); let inner = Arc::clone(&self.inner); - async_spawn({ + spawn({ let is_ready = Arc::clone(&self.inner.is_ready); async move { let bs_addrs = bootstrap_ref.read().await.clone(); // Add known peers to the network - handle.add_known_peers(bs_addrs).await.unwrap(); + handle.add_known_peers(bs_addrs).unwrap(); // Begin the bootstrap process - handle.begin_bootstrap().await?; + handle.begin_bootstrap()?; while !is_bootstrapped.load(Ordering::Relaxed) { - async_sleep(Duration::from_secs(1)).await; - handle.begin_bootstrap().await?; + sleep(Duration::from_secs(1)).await; + handle.begin_bootstrap()?; } // Subscribe to the QC topic @@ -643,7 +641,7 @@ impl Libp2pNetwork { .await .is_err() { - async_sleep(Duration::from_secs(1)).await; + sleep(Duration::from_secs(1)).await; } // Wait for the network to connect to the required number of peers @@ -663,19 +661,19 @@ impl Libp2pNetwork { } /// Handle events - async fn handle_recvd_events( + fn handle_recvd_events( &self, msg: NetworkEvent, - sender: &UnboundedSender>, + sender: &Sender>, ) -> Result<(), NetworkError> { match msg { GossipMsg(msg) => { - sender.send(msg).await.map_err(|err| { + sender.try_send(msg).map_err(|err| { NetworkError::ChannelSendError(format!("failed to send gossip message: {err}")) })?; } DirectRequest(msg, _pid, chan) => { - sender.send(msg).await.map_err(|err| { + sender.try_send(msg).map_err(|err| { NetworkError::ChannelSendError(format!( "failed to send direct request message: {err}" )) @@ -691,7 +689,6 @@ impl Libp2pNetwork { )) })?, ) - .await .is_err() { error!("failed to ack!"); @@ -708,48 +705,39 @@ impl Libp2pNetwork { /// task to propagate messages to handlers /// terminates on shut down of network - fn handle_event_generator( - &self, - sender: UnboundedSender>, - mut network_rx: NetworkNodeReceiver, - ) { + fn handle_event_generator(&self, sender: Sender>, mut network_rx: NetworkNodeReceiver) { let handle = self.clone(); let is_bootstrapped = Arc::clone(&self.inner.is_bootstrapped); - async_spawn(async move { + spawn(async move { let Some(mut kill_switch) = network_rx.take_kill_switch() else { tracing::error!( "`spawn_handle` was called on a network handle that was already closed" ); return; }; - let mut kill_switch = kill_switch.recv().boxed(); - let mut next_msg = network_rx.recv().boxed(); loop { - let msg_or_killed = futures::future::select(next_msg, kill_switch).await; - match msg_or_killed { - Either::Left((Ok(message), other_stream)) => { - match &message { + select! { + msg = network_rx.recv() => { + let Ok(message) = msg else { + warn!("Network receiver shut down!"); + return; + }; + + match message { NetworkEvent::IsBootstrapped => { is_bootstrapped.store(true, Ordering::Relaxed); } GossipMsg(_) | DirectRequest(_, _, _) | DirectResponse(_, _) => { - let _ = handle.handle_recvd_events(message, &sender).await; + let _ = handle.handle_recvd_events(message, &sender); } NetworkEvent::ConnectedPeersUpdate(num_peers) => { - handle.inner.metrics.num_connected_peers.set(*num_peers); + handle.inner.metrics.num_connected_peers.set(num_peers); } } - // re-set the `kill_switch` for the next loop - kill_switch = other_stream; - // re-set `receiver.recv()` for the next loop - next_msg = network_rx.recv().boxed(); - } - Either::Left((Err(_), _)) => { - warn!("Network receiver shut down!"); - return; } - Either::Right(_) => { + + _kill_switch = kill_switch.recv() => { warn!("Event Handler shutdown"); return; } @@ -805,7 +793,7 @@ impl ConnectedNetwork for Libp2pNetwork { let topic = topic.to_string(); if self.inner.subscribed_topics.contains(&topic) { // Short-circuit-send the message to ourselves - self.inner.sender.send(message.clone()).await.map_err(|_| { + self.inner.sender.try_send(message.clone()).map_err(|_| { self.inner.metrics.num_failed_messages.add(1); NetworkError::ShutDown })?; @@ -825,19 +813,19 @@ impl ConnectedNetwork for Libp2pNetwork { let handle_2 = Arc::clone(&handle); let metrics_2 = metrics.clone(); boxed_sync(async move { - if let Err(e) = handle_2.gossip_no_serialize(topic_2, msg).await { + if let Err(e) = handle_2.gossip_no_serialize(topic_2, msg) { metrics_2.num_failed_messages.add(1); warn!("Failed to broadcast to libp2p: {:?}", e); } }) }), ); - async_spawn(fut); + spawn(fut); return Ok(()); } } - if let Err(e) = self.inner.handle.gossip(topic, &message).await { + if let Err(e) = self.inner.handle.gossip(topic, &message) { self.inner.metrics.num_failed_messages.add(1); return Err(e); } @@ -893,7 +881,7 @@ impl ConnectedNetwork for Libp2pNetwork { // short circuit if we're dming ourselves if recipient == self.inner.pk { // panic if we already shut down? - self.inner.sender.send(message).await.map_err(|_x| { + self.inner.sender.try_send(message).map_err(|_x| { self.inner.metrics.num_failed_messages.add(1); NetworkError::ShutDown })?; @@ -927,19 +915,19 @@ impl ConnectedNetwork for Libp2pNetwork { let handle_2 = Arc::clone(&handle); let metrics_2 = metrics.clone(); boxed_sync(async move { - if let Err(e) = handle_2.direct_request_no_serialize(pid, msg).await { + if let Err(e) = handle_2.direct_request_no_serialize(pid, msg) { metrics_2.num_failed_messages.add(1); warn!("Failed to broadcast to libp2p: {:?}", e); } }) }), ); - async_spawn(fut); + spawn(fut); return Ok(()); } } - match self.inner.handle.direct_request(pid, &message).await { + match self.inner.handle.direct_request(pid, &message) { Ok(()) => Ok(()), Err(e) => { self.inner.metrics.num_failed_messages.add(1); @@ -957,9 +945,11 @@ impl ConnectedNetwork for Libp2pNetwork { let result = self .inner .receiver + .lock() + .await .recv() .await - .map_err(|_x| NetworkError::ShutDown)?; + .ok_or(NetworkError::ShutDown)?; Ok(result) } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index e86c773e71..16eaa8ccbe 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -18,14 +18,9 @@ use std::{ }, }; -use async_compatibility_layer::{ - art::async_spawn, - channel::{bounded, BoundedStream, Receiver, SendError, Sender}, -}; use async_lock::{Mutex, RwLock}; use async_trait::async_trait; use dashmap::DashMap; -use futures::StreamExt; use hotshot_types::{ boxed_sync, traits::{ @@ -39,6 +34,10 @@ use hotshot_types::{ BoxSyncFuture, }; use rand::Rng; +use tokio::{ + spawn, + sync::mpsc::{channel, error::SendError, Receiver, Sender}, +}; use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; use super::{NetworkError, NetworkReliability}; @@ -119,17 +118,16 @@ impl MemoryNetwork { reliability_config: Option>, ) -> MemoryNetwork { info!("Attaching new MemoryNetwork"); - let (input, task_recv) = bounded(128); - let (task_send, output) = bounded(128); + let (input, mut task_recv) = channel(128); + let (task_send, output) = channel(128); let in_flight_message_count = AtomicUsize::new(0); trace!("Channels open, spawning background task"); - async_spawn( + spawn( async move { debug!("Starting background task"); - let mut task_stream: BoundedStream> = task_recv.into_stream(); trace!("Entering processing loop"); - while let Some(vec) = task_stream.next().await { + while let Some(vec) = task_recv.recv().await { trace!(?vec, "Incoming message"); // Attempt to decode message let ts = task_send.clone(); @@ -282,7 +280,7 @@ impl ConnectedNetwork for MemoryNetwork { }) }), ); - async_spawn(fut); + spawn(fut); } } else { let res = node.input(message.clone()).await; @@ -342,7 +340,7 @@ impl ConnectedNetwork for MemoryNetwork { }) }), ); - async_spawn(fut); + spawn(fut); } Ok(()) } else { @@ -377,7 +375,7 @@ impl ConnectedNetwork for MemoryNetwork { .await .recv() .await - .map_err(|_x| NetworkError::ShutDown)?; + .ok_or(NetworkError::ShutDown)?; self.inner .in_flight_message_count .fetch_sub(1, Ordering::Relaxed); diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index a0a4799c4a..a4b9f33499 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -10,9 +10,6 @@ use std::{marker::PhantomData, sync::Arc}; #[cfg(feature = "hotshot-testing")] use std::{path::Path, time::Duration}; -use async_compatibility_layer::channel::TrySendError; -#[cfg(feature = "hotshot-testing")] -use async_compatibility_layer::{art::async_sleep, art::async_spawn}; use async_trait::async_trait; use bincode::config::Options; use cdn_broker::reexports::{ @@ -52,6 +49,7 @@ use hotshot_types::{ use num_enum::{IntoPrimitive, TryFromPrimitive}; #[cfg(feature = "hotshot-testing")] use rand::{rngs::StdRng, RngCore, SeedableRng}; +use tokio::{spawn, sync::mpsc::error::TrySendError, time::sleep}; use tracing::error; use super::NetworkError; @@ -338,14 +336,14 @@ impl TestableNetworkingImplementation }; // Create and spawn the broker - async_spawn(async move { + spawn(async move { let broker: Broker> = Broker::new(config).await.expect("broker failed to start"); // If we are the first broker by identifier, we need to sleep a bit // for discovery to happen first if other_broker_identifier > broker_identifier { - async_sleep(Duration::from_secs(2)).await; + sleep(Duration::from_secs(2)).await; } // Error if we stopped unexpectedly @@ -371,7 +369,7 @@ impl TestableNetworkingImplementation }; // Spawn the marshal - async_spawn(async move { + spawn(async move { let marshal: Marshal> = Marshal::new(marshal_config) .await .expect("failed to spawn marshal"); @@ -543,7 +541,7 @@ impl ConnectedNetwork for PushCdnNetwork { // If we're paused, receive but don't process messages #[cfg(feature = "hotshot-testing")] if self.is_paused.load(Ordering::Relaxed) { - async_sleep(Duration::from_millis(100)).await; + sleep(Duration::from_millis(100)).await; return Ok(vec![]); } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 16ada4dcfa..4488b35b58 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -16,7 +16,6 @@ hotshot-example-types = { path = "../example-types" } [dependencies] anyhow = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } blake3 = { workspace = true } @@ -43,14 +42,11 @@ lazy_static = { workspace = true } pin-project = "1" portpicker.workspace = true cbor4ii = "0.3" +tracing-subscriber = { workspace = true } -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] libp2p = { workspace = true, features = ["tokio"] } tokio = { workspace = true } tokio-stream = "0.1" -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -libp2p = { workspace = true, features = ["async-std"] } -async-std = { workspace = true } [lints] workspace = true diff --git a/libp2p-networking/README.md b/libp2p-networking/README.md deleted file mode 100644 index 51afee2c44..0000000000 --- a/libp2p-networking/README.md +++ /dev/null @@ -1,111 +0,0 @@ -# USAGE - -Networking library intended for use with HotShot. Builds upon abstractions from libp2p-rs. - -## CLI Demo - -To get very verbose logging: - -```bash -RUST_LOG_OUTPUT=OUTFILE RUST_LOG="trace" cargo run --features=async-std-executor --release -``` - -The idea here is to spin up several nodes in a p2p network. These nodes can share messages with each other. - -``` -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- -p 1111" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/1111/quic-v1 -p 2222" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/2222/quic-v1 -p 3333" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/3333/quic-v1 -p 4444" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/4444/quic-v1 -p 5555" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/5555/quic-v1 -p 6666" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/6666/quic-v1 -p 7777" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/7777/quic-v1 -p 8888" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/8888/quic-v1 -p 9999" -``` - -At this point the idea is that each node will continue to attempt to connect to nodes -until it hits at least 5 peers. - -Use `Tab` to switch between messages and prompt. Press `Enter` to broadcast a message to all connected nodes. -Press `Right Arrow` to direct-send a message to a randomly selected peer. -Press `q` to quit the program from the messages view. - -## Counter Single Machine Tests - -Each node has its own counter. The idea behind these tests is to support "broadcast" messages and "direct" messages to increment each nodes counter. - -`cargo test --features=async-std-executor --release stress` - -spawns off five integration tests. - -- Two that uses gossipsub to broadcast a counter increment from one node to all other nodes -- Two where one node increments its counter, then direct messages all nodes to increment their counters -- One that intersperses both broadcast and increments. -- One that intersperses both broadcast and increments. -- Two that publishes entries to the DHT and checks that other nodes can access these entries. - -This can fail on MacOS (and linux) due to "too many open files." The fix is: - -```bash -ulimit -n 4096 -``` - -## Counter Multi-machine tests - -In these tests, there are three types of nodes. `Regular` nodes that limit the number of incoming connections, `Bootstrap` nodes that allow all connections, and `Conductor` nodes that all nodes (bootstrap and regular) connect to and periodically ping with their state. This "conductor" node instructs nodes in the swarm to increment their state either via broadcast or direct messages in the same fashion as the single machine tests. - -In the direct message case, the conductor will increment the state of a randomly chosen node, `i`. Then the conductor will direct message all other nodes to request node `i`'s counter and increment their counter to the value in `i`'s node. In the broadcast case, the conductor will increment the state of a randomly chosen node, `i`, and tell `i` to broadcast this incremented state. - -In both cases, the test terminates as successful when the conductor receives the incremented state from all other nodes. Then, the conductor sends a special "kill" message to all known nodes and waits for them to disconnect. - -Metadata about the topology is currently read from an `identity_mapping.json` file that manually labels the type of node (bootstrap, regular, conductor). The conductor uses this to figure out information about all nodes in the network. The regular nodes use this to learn about their ip address and the addresses necessary to bootstrap onto the network. The bootstrap nodes only use this to learn about their ip addresses. - -### Running counter multi-machine tests - -A sample invocation locally: - -```bash -# run each line in a separate terminal -nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9000 --node_type Bootstrap --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8000 -nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9001 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8001 -nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9002 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8002 -nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9003 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8003 -nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9004 --node_type Conductor --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8004 -``` - -### Network Emulation -One may introduce simulated network latency via the network emulation queueing discipline. This is implemented in two ways: on what is assumed to be a AWS EC2 instance, and in a docker container. Example usage on AWS EC2 instance: - -```bash -# run each line in a separate AWS instance -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9000 --node_type Bootstrap --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9001 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9002 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9003 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9004 --node_type Conductor --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal -``` - -And on docker: - -```bash -# run each line in a separate Docker container instance -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9000 --node_type Bootstrap --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9001 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9002 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9003 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9004 --node_type Conductor --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker -``` - -On an AWS instance, a separate network namespace is created and connected to `ens5` via a network bridge, and a netem qdisc is introduced to the veth interface in the namespace. Within a docker container, a netem qdisc is added on interface `eth0`. - -### Network Emulation Dockerfile - -Usage: - -``` -docker build . -t libp2p-networking -# expose ports -docker run -P 8000:8000 -P 9000:9000 libp2p-networking -``` - diff --git a/libp2p-networking/src/network/behaviours/dht/bootstrap.rs b/libp2p-networking/src/network/behaviours/dht/bootstrap.rs index 075a70ffa7..566db12d4c 100644 --- a/libp2p-networking/src/network/behaviours/dht/bootstrap.rs +++ b/libp2p-networking/src/network/behaviours/dht/bootstrap.rs @@ -6,8 +6,8 @@ use std::time::Duration; -use async_compatibility_layer::{art, channel::UnboundedSender}; use futures::{channel::mpsc, StreamExt}; +use tokio::{spawn, sync::mpsc::UnboundedSender, time::timeout}; use crate::network::ClientRequest; @@ -33,7 +33,7 @@ pub struct DHTBootstrapTask { impl DHTBootstrapTask { /// Run bootstrap task pub fn run(rx: mpsc::Receiver, tx: UnboundedSender) { - art::async_spawn(async move { + spawn(async move { let state = Self { rx, network_tx: tx, @@ -64,13 +64,12 @@ impl DHTBootstrapTask { break; } } - } else if let Ok(maybe_event) = - art::async_timeout(Duration::from_secs(120), self.rx.next()).await + } else if let Ok(maybe_event) = timeout(Duration::from_secs(120), self.rx.next()).await { match maybe_event { Some(InputEvent::StartBootstrap) => { tracing::debug!("Start bootstrap in bootstrap task"); - self.bootstrap().await; + self.bootstrap(); } Some(InputEvent::ShutdownBootstrap) => { tracing::debug!("ShutdownBootstrap received, shutting down"); @@ -86,13 +85,14 @@ impl DHTBootstrapTask { } } else { tracing::debug!("Start bootstrap in bootstrap task after timeout"); - self.bootstrap().await; + self.bootstrap(); } } } + /// Start bootstrap - async fn bootstrap(&mut self) { + fn bootstrap(&mut self) { self.in_progress = true; - let _ = self.network_tx.send(ClientRequest::BeginBootstrap).await; + let _ = self.network_tx.send(ClientRequest::BeginBootstrap); } } diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 27bf8a1b71..f6923dbcb7 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -13,7 +13,6 @@ use std::{ time::Duration, }; -use async_compatibility_layer::{art, channel::UnboundedSender}; /// a local caching layer for the DHT key value pairs use futures::{ channel::{mpsc, oneshot::Sender}, @@ -30,6 +29,7 @@ use libp2p::kad::{ }; use libp2p_identity::PeerId; use store::ValidatedStore; +use tokio::{spawn, sync::mpsc::UnboundedSender, time::sleep}; use tracing::{debug, error, warn}; /// Additional DHT record functionality @@ -222,9 +222,9 @@ impl DHTBehaviour { retry_count: query.retry_count, }; let backoff = query.backoff.next_timeout(false); - art::async_spawn(async move { - art::async_sleep(backoff).await; - let _ = tx.send(req).await; + spawn(async move { + sleep(backoff).await; + let _ = tx.send(req); }); } @@ -238,9 +238,9 @@ impl DHTBehaviour { value: query.value, notify: query.notify, }; - art::async_spawn(async move { - art::async_sleep(query.backoff.next_timeout(false)).await; - let _ = tx.send(req).await; + spawn(async move { + sleep(query.backoff.next_timeout(false)).await; + let _ = tx.send(req); }); } @@ -397,9 +397,7 @@ impl DHTBehaviour { /// Send that the bootsrap suceeded fn finish_bootstrap(&mut self) { if let Some(mut tx) = self.bootstrap_tx.clone() { - art::async_spawn( - async move { tx.send(bootstrap::InputEvent::BootstrapFinished).await }, - ); + spawn(async move { tx.send(bootstrap::InputEvent::BootstrapFinished).await }); } } #[allow(clippy::too_many_lines)] diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/libp2p-networking/src/network/behaviours/direct_message.rs index 61f64b8b91..72d378a587 100644 --- a/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/libp2p-networking/src/network/behaviours/direct_message.rs @@ -6,12 +6,9 @@ use std::collections::HashMap; -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - channel::UnboundedSender, -}; use libp2p::request_response::{Event, Message, OutboundRequestId, ResponseChannel}; use libp2p_identity::PeerId; +use tokio::{spawn, sync::mpsc::UnboundedSender, time::sleep}; use tracing::{debug, error, warn}; use super::exponential_backoff::ExponentialBackoff; @@ -75,15 +72,13 @@ impl DMBehaviour { } req.retry_count -= 1; if let Some(retry_tx) = retry_tx { - async_spawn(async move { - async_sleep(req.backoff.next_timeout(false)).await; - let _ = retry_tx - .send(ClientRequest::DirectRequest { - pid: peer, - contents: req.data, - retry_count: req.retry_count, - }) - .await; + spawn(async move { + sleep(req.backoff.next_timeout(false)).await; + let _ = retry_tx.send(ClientRequest::DirectRequest { + pid: peer, + contents: req.data, + retry_count: req.retry_count, + }); }); } } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index a23c74fbd3..465e8ee25e 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -20,9 +20,6 @@ use std::{collections::HashSet, fmt::Debug}; use futures::channel::oneshot::Sender; use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType}; -#[cfg(async_executor_impl = "async-std")] -use libp2p::dns::async_std::Transport as DnsTransport; -#[cfg(async_executor_impl = "tokio")] use libp2p::dns::tokio::Transport as DnsTransport; use libp2p::{ build_multiaddr, @@ -35,9 +32,6 @@ use libp2p::{ Multiaddr, Transport, }; use libp2p_identity::PeerId; -#[cfg(async_executor_impl = "async-std")] -use quic::async_std::Transport as QuicTransport; -#[cfg(async_executor_impl = "tokio")] use quic::tokio::Transport as QuicTransport; use tracing::instrument; use transport::StakeTableAuthentication; @@ -50,8 +44,6 @@ pub use self::{ RequestResponseConfig, DEFAULT_REPLICATION_FACTOR, }, }; -#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] -compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} /// Actions to send from the client to the swarm #[derive(Debug)] @@ -183,12 +175,6 @@ pub async fn gen_transport( // Support DNS resolution let transport = { - #[cfg(async_executor_impl = "async-std")] - { - DnsTransport::system(transport).await - } - - #[cfg(async_executor_impl = "tokio")] { DnsTransport::system(transport) } diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 8e3b7888f0..7bf55ccc86 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -18,11 +18,7 @@ use std::{ time::Duration, }; -use async_compatibility_layer::{ - art::async_spawn, - channel::{unbounded, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, -}; -use futures::{channel::mpsc, select, FutureExt, SinkExt, StreamExt}; +use futures::{channel::mpsc, SinkExt, StreamExt}; use hotshot_types::{ constants::KAD_DEFAULT_REPUB_INTERVAL_SEC, traits::node_implementation::NodeType, }; @@ -47,6 +43,10 @@ use libp2p::{ }; use libp2p_identity::PeerId; use rand::{prelude::SliceRandom, thread_rng}; +use tokio::{ + select, spawn, + sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, +}; use tracing::{debug, error, info, info_span, instrument, warn, Instrument}; pub use self::{ @@ -300,9 +300,6 @@ impl NetworkNode { // build swarm let swarm = SwarmBuilder::with_existing_identity(keypair.clone()); - #[cfg(async_executor_impl = "async-std")] - let swarm = swarm.with_async_std(); - #[cfg(async_executor_impl = "tokio")] let swarm = swarm.with_tokio(); swarm @@ -377,11 +374,11 @@ impl NetworkNode { #[instrument(skip(self))] async fn handle_client_requests( &mut self, - msg: Result, + msg: Option, ) -> Result { let behaviour = self.swarm.behaviour_mut(); match msg { - Ok(msg) => { + Some(msg) => { match msg { ClientRequest::BeginBootstrap => { debug!("Beginning Libp2p bootstrap"); @@ -491,8 +488,8 @@ impl NetworkNode { } } } - Err(e) => { - error!("Error receiving msg in main behaviour loop: {:?}", e); + None => { + error!("Error receiving msg in main behaviour loop: channel closed"); } } Ok(false) @@ -534,7 +531,6 @@ impl NetworkNode { // Send the number of connected peers to the client send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) - .await .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } SwarmEvent::ConnectionClosed { @@ -559,7 +555,6 @@ impl NetworkNode { // Send the number of connected peers to the client send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) - .await .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } SwarmEvent::Dialing { @@ -668,7 +663,6 @@ impl NetworkNode { // forward messages directly to Client send_to_client .send(event) - .await .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } } @@ -725,16 +719,15 @@ impl NetworkNode { ), NetworkError, > { - let (s_input, s_output) = unbounded::(); - let (r_input, r_output) = unbounded::(); + let (s_input, mut s_output) = unbounded_channel::(); + let (r_input, r_output) = unbounded_channel::(); let (mut bootstrap_tx, bootstrap_rx) = mpsc::channel(100); self.resend_tx = Some(s_input.clone()); self.dht_handler.set_bootstrap_sender(bootstrap_tx.clone()); DHTBootstrapTask::run(bootstrap_rx, s_input.clone()); - async_spawn( + spawn( async move { - let mut fuse = s_output.recv().boxed().fuse(); loop { select! { event = self.swarm.next() => { @@ -744,14 +737,13 @@ impl NetworkNode { self.handle_swarm_events(event, &r_input).await?; } }, - msg = fuse => { + msg = s_output.recv() => { debug!("peerid {:?}\t\thandling msg {:?}", self.peer_id, msg); let shutdown = self.handle_client_requests(msg).await?; if shutdown { let _ = bootstrap_tx.send(InputEvent::ShutdownBootstrap).await; break } - fuse = s_output.recv().boxed().fuse(); } } } diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index f9317e7aa1..b7a6832286 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -6,13 +6,13 @@ use std::{collections::HashSet, fmt::Debug, time::Duration}; -use async_compatibility_layer::{ - art::{async_sleep, async_timeout}, - channel::{Receiver, UnboundedReceiver, UnboundedSender}, -}; use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType}; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; +use tokio::{ + sync::mpsc::{Receiver, UnboundedReceiver, UnboundedSender}, + time::{sleep, timeout}, +}; use tracing::{debug, info, instrument}; use crate::network::{ @@ -55,11 +55,13 @@ impl NetworkNodeReceiver { /// recv a network event /// # Errors /// Errors if the receiver channel is closed - pub async fn recv(&self) -> Result { + pub async fn recv(&mut self) -> Result { self.receiver .recv() .await - .map_err(|e| NetworkError::ChannelReceiveError(e.to_string())) + .ok_or(NetworkError::ChannelReceiveError( + "Receiver channel closed".to_string(), + )) } /// Add a kill switch to the receiver pub fn set_kill_switch(&mut self, kill_switch: Receiver<()>) { @@ -117,16 +119,16 @@ impl NetworkNodeHandle { /// the swarm itself to spin down #[instrument] pub async fn shutdown(&self) -> Result<(), NetworkError> { - self.send_request(ClientRequest::Shutdown).await?; + self.send_request(ClientRequest::Shutdown)?; Ok(()) } /// Notify the network to begin the bootstrap process /// # Errors /// If unable to send via `send_network`. This should only happen /// if the network is shut down. - pub async fn begin_bootstrap(&self) -> Result<(), NetworkError> { + pub fn begin_bootstrap(&self) -> Result<(), NetworkError> { let req = ClientRequest::BeginBootstrap; - self.send_request(req).await + self.send_request(req) } /// Get a reference to the network node handle's listen addr. @@ -142,7 +144,7 @@ impl NetworkNodeHandle { pub async fn print_routing_table(&self) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetRoutingTable(s); - self.send_request(req).await?; + self.send_request(req)?; r.await .map_err(|e| NetworkError::ChannelReceiveError(e.to_string())) } @@ -170,7 +172,7 @@ impl NetworkNodeHandle { ); // Sleep for a second before checking again - async_sleep(Duration::from_secs(1)).await; + sleep(Duration::from_secs(1)).await; } Ok(()) @@ -183,7 +185,7 @@ impl NetworkNodeHandle { pub async fn lookup_pid(&self, peer_id: PeerId) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::LookupPeer(peer_id, s); - self.send_request(req).await?; + self.send_request(req)?; r.await .map_err(|err| NetworkError::ChannelReceiveError(err.to_string())) } @@ -229,7 +231,7 @@ impl NetworkNodeHandle { notify: s, }; - self.send_request(req).await?; + self.send_request(req)?; r.await.map_err(|_| NetworkError::RequestCancelled) } @@ -253,7 +255,7 @@ impl NetworkNodeHandle { notify: s, retry_count, }; - self.send_request(req).await?; + self.send_request(req)?; // Map the error let result = r.await.map_err(|_| NetworkError::RequestCancelled)?; @@ -273,9 +275,9 @@ impl NetworkNodeHandle { pub async fn get_record_timeout( &self, key: RecordKey, - timeout: Duration, + timeout_duration: Duration, ) -> Result, NetworkError> { - async_timeout(timeout, self.get_record(key, 3)) + timeout(timeout_duration, self.get_record(key, 3)) .await .map_err(|err| NetworkError::Timeout(err.to_string()))? } @@ -289,9 +291,9 @@ impl NetworkNodeHandle { &self, key: RecordKey, value: RecordValue, - timeout: Duration, + timeout_duration: Duration, ) -> Result<(), NetworkError> { - async_timeout(timeout, self.put_record(key, value)) + timeout(timeout_duration, self.put_record(key, value)) .await .map_err(|err| NetworkError::Timeout(err.to_string()))? } @@ -302,7 +304,7 @@ impl NetworkNodeHandle { pub async fn subscribe(&self, topic: String) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::Subscribe(topic, Some(s)); - self.send_request(req).await?; + self.send_request(req)?; r.await .map_err(|err| NetworkError::ChannelReceiveError(err.to_string())) } @@ -313,7 +315,7 @@ impl NetworkNodeHandle { pub async fn unsubscribe(&self, topic: String) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::Unsubscribe(topic, Some(s)); - self.send_request(req).await?; + self.send_request(req)?; r.await .map_err(|err| NetworkError::ChannelReceiveError(err.to_string())) } @@ -322,24 +324,24 @@ impl NetworkNodeHandle { /// e.g. maintain their connection /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed - pub async fn ignore_peers(&self, peers: Vec) -> Result<(), NetworkError> { + pub fn ignore_peers(&self, peers: Vec) -> Result<(), NetworkError> { let req = ClientRequest::IgnorePeers(peers); - self.send_request(req).await + self.send_request(req) } /// Make a direct request to `peer_id` containing `msg` /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` - pub async fn direct_request(&self, pid: PeerId, msg: &[u8]) -> Result<(), NetworkError> { - self.direct_request_no_serialize(pid, msg.to_vec()).await + pub fn direct_request(&self, pid: PeerId, msg: &[u8]) -> Result<(), NetworkError> { + self.direct_request_no_serialize(pid, msg.to_vec()) } /// Make a direct request to `peer_id` containing `msg` without serializing /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` - pub async fn direct_request_no_serialize( + pub fn direct_request_no_serialize( &self, pid: PeerId, contents: Vec, @@ -349,20 +351,20 @@ impl NetworkNodeHandle { contents, retry_count: 1, }; - self.send_request(req).await + self.send_request(req) } /// Reply with `msg` to a request over `chan` /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` - pub async fn direct_response( + pub fn direct_response( &self, chan: ResponseChannel>, msg: &[u8], ) -> Result<(), NetworkError> { let req = ClientRequest::DirectResponse(chan, msg.to_vec()); - self.send_request(req).await + self.send_request(req) } /// Forcefully disconnect from a peer @@ -372,52 +374,47 @@ impl NetworkNodeHandle { /// # Panics /// If channel errors out /// shouldn't happen. - pub async fn prune_peer(&self, pid: PeerId) -> Result<(), NetworkError> { + pub fn prune_peer(&self, pid: PeerId) -> Result<(), NetworkError> { let req = ClientRequest::Prune(pid); - self.send_request(req).await + self.send_request(req) } /// Gossip a message to peers /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` - pub async fn gossip(&self, topic: String, msg: &[u8]) -> Result<(), NetworkError> { - self.gossip_no_serialize(topic, msg.to_vec()).await + pub fn gossip(&self, topic: String, msg: &[u8]) -> Result<(), NetworkError> { + self.gossip_no_serialize(topic, msg.to_vec()) } /// Gossip a message to peers without serializing /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` - pub async fn gossip_no_serialize( - &self, - topic: String, - msg: Vec, - ) -> Result<(), NetworkError> { + pub fn gossip_no_serialize(&self, topic: String, msg: Vec) -> Result<(), NetworkError> { let req = ClientRequest::GossipMsg(topic, msg); - self.send_request(req).await + self.send_request(req) } /// Tell libp2p about known network nodes /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed - pub async fn add_known_peers( + pub fn add_known_peers( &self, known_peers: Vec<(PeerId, Multiaddr)>, ) -> Result<(), NetworkError> { debug!("Adding {} known peers", known_peers.len()); let req = ClientRequest::AddKnownPeers(known_peers); - self.send_request(req).await + self.send_request(req) } /// Send a client request to the network /// /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed - async fn send_request(&self, req: ClientRequest) -> Result<(), NetworkError> { + fn send_request(&self, req: ClientRequest) -> Result<(), NetworkError> { self.send_network .send(req) - .await .map_err(|err| NetworkError::ChannelSendError(err.to_string())) } @@ -431,7 +428,7 @@ impl NetworkNodeHandle { pub async fn num_connected(&self) -> Result { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetConnectedPeerNum(s); - self.send_request(req).await?; + self.send_request(req)?; Ok(r.await.unwrap()) } @@ -445,7 +442,7 @@ impl NetworkNodeHandle { pub async fn connected_pids(&self) -> Result, NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetConnectedPeers(s); - self.send_request(req).await?; + self.send_request(req)?; Ok(r.await.unwrap()) } diff --git a/libp2p-networking/src/network/transport.rs b/libp2p-networking/src/network/transport.rs index 969fed495d..7e5987e306 100644 --- a/libp2p-networking/src/network/transport.rs +++ b/libp2p-networking/src/network/transport.rs @@ -7,7 +7,6 @@ use std::{ }; use anyhow::{ensure, Context, Result as AnyhowResult}; -use async_compatibility_layer::art::async_timeout; use futures::{future::poll_fn, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use hotshot_types::traits::{ election::Membership, @@ -21,6 +20,7 @@ use libp2p::{ }; use pin_project::pin_project; use serde::{Deserialize, Serialize}; +use tokio::time::timeout; use tracing::warn; /// The maximum size of an authentication message. This is used to prevent @@ -162,7 +162,7 @@ impl StakeTableAuthentica let mut stream = original_future.await?; // Time out the authentication block - async_timeout(AUTH_HANDSHAKE_TIMEOUT, async { + timeout(AUTH_HANDSHAKE_TIMEOUT, async { // Open a substream for the handshake. // The handshake order depends on whether the connection is incoming or outgoing. let mut substream = if outgoing { @@ -630,8 +630,7 @@ mod test { assert!(public_key.is_err()); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn valid_authentication() { // Create a new identity let (keypair, peer_id, auth_message) = new_identity!(); @@ -664,8 +663,7 @@ mod test { ); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn key_not_in_stake_table() { // Create a new identity let (_, peer_id, auth_message) = new_identity!(); @@ -694,8 +692,7 @@ mod test { ); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn peer_id_mismatch() { // Create a new identity and authentication message let (keypair, _, auth_message) = new_identity!(); @@ -735,8 +732,7 @@ mod test { ); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn read_and_write_length_delimited() { // Create a message let message = b"Hello, world!"; diff --git a/libp2p-networking/test.py b/libp2p-networking/test.py deleted file mode 100755 index 66368b297e..0000000000 --- a/libp2p-networking/test.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python3 - -from enum import Enum -from functools import reduce -from typing import Final -from subprocess import run, Popen -from time import sleep -from os import environ - -class NodeType(Enum): - CONDUCTOR = "Conductor" - REGULAR = "Regular" - BOOTSTRAP = "Bootstrap" - -def gen_invocation( - node_type: NodeType, - num_nodes: int, - to_connect_addrs: list[str], - conductor_addr: str, - num_rounds: int, - bound_addr: str, - ) -> tuple[list[str], str]: - aggr_list = lambda x, y: f'{x},{y}' - to_connect_list : Final[str] = reduce(aggr_list, to_connect_addrs); - out_file_name : Final[str] = f'out_{node_type}_{bound_addr[-4:]}'; - fmt_cmd = [ - f'cargo run --no-default-features --features=async-std-executor --example=counter --profile=release-lto -- ' \ - f' --bound_addr={bound_addr} '\ - f' --node_type={node_type.value} '\ - f' --num_nodes={num_nodes} '\ - f' --num_gossip={num_rounds} '\ - f' --to_connect_addrs={to_connect_list} '\ - f' --conductor_addr={conductor_addr} ']; - return (fmt_cmd, out_file_name) - -# construct a map: - -if __name__ == "__main__": - # cleanup - - run("rm -f out_*".split()) - - - # params - START_PORT : Final[int] = 9100; - NUM_REGULAR_NODES : Final[int] = 100; - NUM_NODES_PER_BOOTSTRAP : Final[int] = 10; - NUM_BOOTSTRAP : Final[int] = (int) (NUM_REGULAR_NODES / NUM_NODES_PER_BOOTSTRAP); - TOTAL_NUM_NODES: Final[int] = NUM_BOOTSTRAP + NUM_REGULAR_NODES + 1; - NUM_ROUNDS = 100; - - bootstrap_addrs : Final[list[str]] = list(map(lambda x: f'127.0.0.1:{x + START_PORT}', range(0, NUM_BOOTSTRAP))); - normal_nodes_addrs : Final[list[str]] = list(map(lambda x: f'127.0.0.1:{x + START_PORT + NUM_BOOTSTRAP}', range(0, NUM_REGULAR_NODES))); - conductor_addr : str = f'127.0.0.1:{START_PORT + NUM_BOOTSTRAP + NUM_REGULAR_NODES + 1}'; - - regular_cmds : list[tuple[list[str], str]] = []; - bootstrap_cmds : list[tuple[list[str], str]] = []; - print("doing conductor") - conductor_cmd : Final[tuple[list[str], str]] = \ - gen_invocation( - node_type=NodeType.CONDUCTOR, - num_nodes=TOTAL_NUM_NODES, - to_connect_addrs=bootstrap_addrs + normal_nodes_addrs,# + normal_nodes_addrs + [conductor_addr], - conductor_addr=conductor_addr, - num_rounds=NUM_ROUNDS, - bound_addr=conductor_addr - ); - print("dfone concuctor") - - for i in range(0, len(bootstrap_addrs)): - bootstrap_addr = bootstrap_addrs[i]; - regulars_list = normal_nodes_addrs[i * NUM_NODES_PER_BOOTSTRAP: (i + 1) * NUM_NODES_PER_BOOTSTRAP]; - - bootstrap_cmd = gen_invocation( - node_type=NodeType.BOOTSTRAP, - num_nodes=TOTAL_NUM_NODES, - to_connect_addrs=bootstrap_addrs, - conductor_addr=conductor_addr, - num_rounds=NUM_ROUNDS, - bound_addr=bootstrap_addr, - ); - bootstrap_cmds.append(bootstrap_cmd); - - for regular_addr in regulars_list: - regular_cmd = gen_invocation( - node_type=NodeType.REGULAR, - num_nodes=TOTAL_NUM_NODES, - # NOTE may need to remove regular_addr from regulars_list - to_connect_addrs= [bootstrap_addr], - num_rounds=NUM_ROUNDS, - bound_addr=regular_addr, - conductor_addr=conductor_addr - ); - regular_cmds.append(regular_cmd); - - print(regular_cmds) - - TIME_TO_SPIN_UP_BOOTSTRAP : Final[int] = 0; - TIME_TO_SPIN_UP_REGULAR : Final[int] = 0; - env = environ.copy(); - env["RUST_BACKTRACE"] = "full" - - print("spinning up bootstrap") - for (node_cmd, file_name) in bootstrap_cmds: - print("running bootstrap", file_name) - file = open(file_name, 'w') - Popen(node_cmd[0].split(), start_new_session=True, stdout=file, stderr=file, env=env); - - sleep(TIME_TO_SPIN_UP_BOOTSTRAP); - - print("spinning up regulars") - for (node_cmd, file_name) in regular_cmds: - file = open(file_name, 'w') - Popen(node_cmd[0].split(), start_new_session=True, stdout=file, stderr=file, env=env); - - sleep(TIME_TO_SPIN_UP_REGULAR); - - file = open(conductor_cmd[1], 'w') - print("spinning up conductor") - Popen(conductor_cmd[0][0].split(), start_new_session=True, stdout=file, stderr=file, env=env); - diff --git a/libp2p-networking/tests/common/mod.rs b/libp2p-networking/tests/common/mod.rs deleted file mode 100644 index 9e61786f5a..0000000000 --- a/libp2p-networking/tests/common/mod.rs +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -use std::{ - collections::{HashMap, HashSet}, - fmt::Debug, - num::NonZeroUsize, - str::FromStr, - sync::Arc, - time::Duration, -}; - -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - async_primitives::subscribable_mutex::SubscribableMutex, - channel::bounded, - logging::{setup_backtrace, setup_logging}, -}; -use futures::{future::join_all, Future, FutureExt}; -use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType}; -use libp2p::Multiaddr; -use libp2p_identity::PeerId; -use libp2p_networking::network::{ - spawn_network_node, NetworkEvent, NetworkNodeConfigBuilder, NetworkNodeHandle, - NetworkNodeReceiver, -}; -use thiserror::Error; -use tracing::{instrument, warn}; - -#[derive(Clone, Debug)] -pub(crate) struct HandleWithState { - pub(crate) handle: Arc>, - pub(crate) state: Arc>, -} - -/// Spawn a handler `F` that will be notified every time a new [`NetworkEvent`] arrives. -/// -/// # Panics -/// -/// Will panic if a handler is already spawned -pub fn spawn_handler( - handle_and_state: HandleWithState, - mut receiver: NetworkNodeReceiver, - cb: F, -) -> impl Future -where - F: Fn(NetworkEvent, HandleWithState) -> RET + Sync + Send + 'static, - RET: Future> + Send + 'static, - S: Debug + Default + Send + Clone + 'static, -{ - async_spawn(async move { - let Some(mut kill_switch) = receiver.take_kill_switch() else { - tracing::error!( - "`spawn_handle` was called on a network handle that was already closed" - ); - return; - }; - let mut next_msg = receiver.recv().boxed(); - let mut kill_switch = kill_switch.recv().boxed(); - loop { - match futures::future::select(next_msg, kill_switch).await { - futures::future::Either::Left((incoming_message, other_stream)) => { - let incoming_message = match incoming_message { - Ok(msg) => msg, - Err(e) => { - tracing::warn!(?e, "NetworkNodeHandle::spawn_handle was unable to receive more messages"); - return; - } - }; - if let Err(e) = cb(incoming_message, handle_and_state.clone()).await { - tracing::error!(?e, "NetworkNodeHandle::spawn_handle returned an error"); - return; - } - - // re-set the `kill_switch` for the next loop - kill_switch = other_stream; - // re-set `receiver.recv()` for the next loop - next_msg = receiver.recv().boxed(); - } - futures::future::Either::Right(_) => { - return; - } - } - } - }) -} - -/// General function to spin up testing infra -/// perform tests by calling `run_test` -/// then cleans up tests -/// # Panics -/// Panics if unable to: -/// - Initialize logging -/// - Initialize network nodes -/// - Kill network nodes -/// - A test assertion fails -pub async fn test_bed( - run_test: F, - client_handler: G, - num_nodes: usize, - timeout: Duration, -) where - FutF: Future, - FutG: Future> + 'static + Send + Sync, - F: FnOnce(Vec>, Duration) -> FutF, - G: Fn(NetworkEvent, HandleWithState) -> FutG + 'static + Send + Sync + Clone, -{ - setup_logging(); - setup_backtrace(); - - let mut kill_switches = Vec::new(); - // NOTE we want this to panic if we can't spin up the swarms. - // that amounts to a failed test. - let handles_and_receivers = spin_up_swarms::(num_nodes, timeout).await.unwrap(); - - let (handles, receivers): (Vec<_>, Vec<_>) = handles_and_receivers.into_iter().unzip(); - let mut handler_futures = Vec::new(); - for (i, mut rx) in receivers.into_iter().enumerate() { - let (kill_tx, kill_rx) = bounded(1); - let handle = &handles[i]; - kill_switches.push(kill_tx); - rx.set_kill_switch(kill_rx); - let handler_fut = spawn_handler(handle.clone(), rx, client_handler.clone()); - handler_futures.push(handler_fut); - } - - run_test(handles.clone(), timeout).await; - - // cleanup - for handle in handles { - handle.handle.shutdown().await.unwrap(); - } - for switch in kill_switches { - let _ = switch.send(()).await; - } - - for fut in handler_futures { - fut.await; - } -} - -fn gen_peerid_map(handles: &[Arc>]) -> HashMap { - let mut r_val = HashMap::new(); - for handle in handles { - r_val.insert(handle.peer_id(), handle.id()); - } - r_val -} - -/// print the connections for each handle in `handles` -/// useful for debugging -pub async fn print_connections(handles: &[Arc>]) { - let m = gen_peerid_map(handles); - warn!("PRINTING CONNECTION STATES"); - for handle in handles { - warn!( - "peer {}, connected to {:?}", - handle.id(), - handle - .connected_pids() - .await - .unwrap() - .iter() - .map(|pid| m.get(pid).unwrap()) - .collect::>() - ); - } -} - -/// Spins up `num_of_nodes` nodes, connects them to each other -/// and waits for connections to propagate to all nodes. -#[allow(clippy::type_complexity)] -#[instrument] -pub async fn spin_up_swarms( - num_of_nodes: usize, - timeout_len: Duration, -) -> Result, NetworkNodeReceiver)>, TestError> { - let mut handles = Vec::new(); - let mut node_addrs = Vec::<(PeerId, Multiaddr)>::new(); - let mut connecting_futs = Vec::new(); - // should never panic unless num_nodes is 0 - let replication_factor = NonZeroUsize::new(num_of_nodes - 1).unwrap(); - - for i in 0..num_of_nodes { - // Get an unused port - let port = portpicker::pick_unused_port().expect("Failed to get an unused port"); - - // Use the port to create a Multiaddr - let addr = - Multiaddr::from_str(format!("/ip4/127.0.0.1/udp/{port}/quic-v1").as_str()).unwrap(); - - let config = NetworkNodeConfigBuilder::default() - .replication_factor(replication_factor) - .bind_address(Some(addr.clone())) - .to_connect_addrs(HashSet::default()) - .build() - .map_err(|e| TestError::ConfigError(format!("failed to build network node: {e}")))?; - - let (rx, node) = spawn_network_node(config.clone(), i).await.unwrap(); - - // Add ourselves to the list of node addresses to connect to - node_addrs.push((node.peer_id(), addr)); - - let node = Arc::new(node); - connecting_futs.push({ - let node = Arc::clone(&node); - async move { - node.begin_bootstrap().await?; - node.lookup_pid(PeerId::random()).await - } - .boxed_local() - }); - let node_with_state = HandleWithState { - handle: Arc::clone(&node), - state: Arc::default(), - }; - handles.push((node_with_state, rx)); - } - - for (handle, _) in &handles[0..num_of_nodes] { - let to_share = node_addrs.clone(); - handle - .handle - .add_known_peers(to_share) - .await - .map_err(|e| TestError::HandleError(format!("failed to add known peers: {e}")))?; - } - - let res = join_all(connecting_futs.into_iter()).await; - let mut failing_nodes = Vec::new(); - for (idx, a_node) in res.iter().enumerate() { - if a_node.is_err() { - failing_nodes.push(idx); - } - } - if !failing_nodes.is_empty() { - return Err(TestError::Timeout(failing_nodes, "spinning up".to_string())); - } - - for (handle, _) in &handles { - handle - .handle - .subscribe("global".to_string()) - .await - .map_err(|e| TestError::HandleError(format!("failed to subscribe: {e}")))?; - } - - async_sleep(Duration::from_secs(5)).await; - - Ok(handles) -} - -#[derive(Debug, Error)] -pub enum TestError { - #[error("Error with network node handle: {0}")] - HandleError(String), - - #[error("Configuration error: {0}")] - ConfigError(String), - - #[error("The following nodes timed out: {0:?} while {1}")] - Timeout(Vec, String), - - #[error( - "Inconsistent state while running test. Expected {expected:?}, got {actual:?} on node {id}" - )] - InconsistentState { id: usize, expected: S, actual: S }, -} diff --git a/libp2p-networking/tests/counter.rs b/libp2p-networking/tests/counter.rs deleted file mode 100644 index f207ec4414..0000000000 --- a/libp2p-networking/tests/counter.rs +++ /dev/null @@ -1,726 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -#![allow(clippy::panic)] - -mod common; -use std::{fmt::Debug, sync::Arc, time::Duration}; - -use async_compatibility_layer::art::{async_sleep, async_spawn}; -use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::prelude::StreamExt; -use common::{test_bed, HandleWithState, TestError}; -use hotshot_example_types::node_types::TestTypes; -use hotshot_types::traits::{ - network::NetworkError, node_implementation::NodeType, signature_key::SignatureKey, -}; -use libp2p_networking::network::{ - behaviours::dht::record::{Namespace, RecordKey, RecordValue}, - NetworkEvent, -}; -use rand::{rngs::StdRng, seq::IteratorRandom, Rng, SeedableRng}; -use serde::{Deserialize, Serialize}; -#[cfg(async_executor_impl = "tokio")] -use tokio_stream::StreamExt; -use tracing::{debug, error, info, instrument, warn}; - -use crate::common::print_connections; -#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] -compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} - -pub type CounterState = u32; - -const NUM_ROUNDS: usize = 100; - -const TOTAL_NUM_PEERS_COVERAGE: usize = 10; -const TIMEOUT_COVERAGE: Duration = Duration::from_secs(120); - -const TOTAL_NUM_PEERS_STRESS: usize = 100; -const TIMEOUT_STRESS: Duration = Duration::from_secs(60); - -const DHT_KV_PADDING: usize = 1024; - -/// Message types. We can either -/// - increment the Counter -/// - request a counter value -/// - reply with a counter value -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub enum CounterMessage { - IncrementCounter { - from: CounterState, - to: CounterState, - }, - AskForCounter, - MyCounterIs(CounterState), - Noop, -} - -/// Given a slice of handles assumed to be larger than 0, -/// chooses one -/// # Panics -/// panics if handles is of length 0 -fn random_handle( - handles: &[HandleWithState], - rng: &mut dyn rand::RngCore, -) -> HandleWithState { - handles.iter().choose(rng).unwrap().clone() -} - -/// event handler for events from the swarm -/// - updates state based on events received -/// - replies to direct messages -#[instrument] -pub async fn counter_handle_network_event( - event: NetworkEvent, - handle: HandleWithState, -) -> Result<(), NetworkError> { - use CounterMessage::*; - use NetworkEvent::*; - match event { - IsBootstrapped | NetworkEvent::ConnectedPeersUpdate(..) => {} - GossipMsg(m) | DirectResponse(m, _) => { - if let Ok(msg) = bincode::deserialize::(&m) { - match msg { - // direct message only - MyCounterIs(c) => { - handle.state.modify(|s| *s = c).await; - } - // gossip message only - IncrementCounter { from, to, .. } => { - handle - .state - .modify(|s| { - if *s == from { - *s = to; - } - }) - .await; - } - // only as a response - AskForCounter | Noop => {} - } - } else { - error!("FAILED TO DESERIALIZE MSG {:?}", m); - } - } - DirectRequest(m, _, chan) => { - if let Ok(msg) = bincode::deserialize::(&m) { - match msg { - // direct message request - IncrementCounter { from, to, .. } => { - handle - .state - .modify(|s| { - if *s == from { - *s = to; - } - }) - .await; - handle - .handle - .direct_response( - chan, - &bincode::serialize(&CounterMessage::Noop).unwrap(), - ) - .await?; - } - // direct message response - AskForCounter => { - let response = MyCounterIs(handle.state.copied().await); - handle - .handle - .direct_response(chan, &bincode::serialize(&response).unwrap()) - .await?; - } - MyCounterIs(_) => { - handle - .handle - .direct_response( - chan, - &bincode::serialize(&CounterMessage::Noop).unwrap(), - ) - .await?; - } - Noop => { - handle - .handle - .direct_response( - chan, - &bincode::serialize(&CounterMessage::Noop).unwrap(), - ) - .await?; - } - } - } - } - }; - Ok(()) -} - -/// `requester_handle` asks for `requestee_handle`'s state, -/// and then `requester_handle` updates its state to equal `requestee_handle`. -/// # Panics -/// on error -#[allow(clippy::similar_names)] -async fn run_request_response_increment<'a, T: NodeType>( - requester_handle: HandleWithState, - requestee_handle: HandleWithState, - timeout: Duration, -) -> Result<(), TestError> { - async move { - let new_state = requestee_handle.state.copied().await; - - // set up state change listener - #[cfg(async_executor_impl = "async-std")] - let mut stream = requester_handle.state.wait_timeout_until_with_trigger(timeout, move |state| *state == new_state); - #[cfg(async_executor_impl = "tokio")] - let mut stream = Box::pin( - requester_handle.state.wait_timeout_until_with_trigger(timeout, move |state| *state == new_state), - ); - #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] - compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} - - let requestee_pid = requestee_handle.handle.peer_id(); - - match stream.next().await.unwrap() { - Ok(()) => {} - Err(e) => {error!("timed out waiting for {requestee_pid:?} to update state: {e}"); - std::process::exit(-1)}, - } - requester_handle.handle - .direct_request(requestee_pid, &bincode::serialize(&CounterMessage::AskForCounter).unwrap()) - .await - .map_err(|e| TestError::HandleError(format!("failed to send direct request: {e}")))?; - match stream.next().await.unwrap() { - Ok(()) => {} - Err(e) => {error!("timed out waiting for {requestee_pid:?} to update state: {e}"); - std::process::exit(-1)}, } - - let s1 = requester_handle.state.copied().await; - - // sanity check - if s1 == new_state { - Ok(()) - } else { - Err(TestError::InconsistentState { - id: requester_handle.handle.id(), - expected: new_state, - actual: s1, - }) - } - } - .await -} - -/// broadcasts `msg` from a randomly chosen handle -/// then asserts that all nodes match `new_state` -async fn run_gossip_round( - handles: &[HandleWithState], - msg: CounterMessage, - new_state: CounterState, - timeout_duration: Duration, -) -> Result<(), TestError> { - let mut rng = rand::thread_rng(); - let msg_handle = random_handle(handles, &mut rng); - msg_handle.state.modify(|s| *s = new_state).await; - - let mut futs = Vec::new(); - - let len = handles.len(); - for handle in handles { - // already modified, so skip msg_handle - if handle.handle.peer_id() != msg_handle.handle.peer_id() { - let stream = handle - .state - .wait_timeout_until_with_trigger(timeout_duration, |state| *state == new_state); - futs.push(Box::pin(stream)); - } - } - - #[cfg(async_executor_impl = "async-std")] - let mut merged_streams = futures::stream::select_all(futs); - #[cfg(async_executor_impl = "tokio")] - let mut merged_streams = Box::pin(futures::stream::select_all(futs)); - #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] - compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} - - // make sure all are ready/listening - for i in 0..len - 1 { - // unwrap is okay because stream must have 2 * (len - 1) elements - match merged_streams.next().await.unwrap() { - Ok(()) => {} - Err(e) => { - error!("timed out waiting for handle {i:?} to subscribe to state events: {e}"); - std::process::exit(-1) - } - } - } - - msg_handle - .handle - .gossip("global".to_string(), &bincode::serialize(&msg).unwrap()) - .await - .map_err(|e| TestError::HandleError(format!("failed to gossip: {e}")))?; - - for _ in 0..len - 1 { - // wait for all events to finish - // then check for failures - let _ = merged_streams.next().await; - } - - let mut failing = Vec::new(); - for handle in handles { - let handle_state = handle.state.copied().await; - if handle_state != new_state { - failing.push(handle.handle.id()); - println!("state: {handle_state:?}, expected: {new_state:?}"); - } - } - if !failing.is_empty() { - let nodes = handles - .iter() - .cloned() - .map(|h| h.handle) - .collect::>(); - print_connections(nodes.as_slice()).await; - return Err(TestError::Timeout(failing, "gossiping".to_string())); - } - - Ok(()) -} - -async fn run_intersperse_many_rounds( - handles: Vec>, - timeout: Duration, -) { - for i in 0..u32::try_from(NUM_ROUNDS).unwrap() { - if i % 2 == 0 { - run_request_response_increment_all(&handles, timeout).await; - } else { - run_gossip_rounds(&handles, 1, i, timeout).await; - } - } - for h in handles { - assert_eq!(h.state.copied().await, u32::try_from(NUM_ROUNDS).unwrap()); - } -} - -async fn run_dht_many_rounds( - handles: Vec>, - timeout: Duration, -) { - run_dht_rounds(&handles, timeout, 0, NUM_ROUNDS).await; -} - -async fn run_dht_one_round( - handles: Vec>, - timeout: Duration, -) { - run_dht_rounds(&handles, timeout, 0, 1).await; -} - -async fn run_request_response_many_rounds( - handles: Vec>, - timeout: Duration, -) { - for _i in 0..NUM_ROUNDS { - run_request_response_increment_all(&handles, timeout).await; - } - for h in handles { - assert_eq!(h.state.copied().await, u32::try_from(NUM_ROUNDS).unwrap()); - } -} - -/// runs one round of request response -/// # Panics -/// on error -async fn run_request_response_one_round( - handles: Vec>, - timeout: Duration, -) { - run_request_response_increment_all(&handles, timeout).await; - for h in handles { - assert_eq!(h.state.copied().await, 1); - } -} - -/// runs multiple rounds of gossip -/// # Panics -/// on error -async fn run_gossip_many_rounds( - handles: Vec>, - timeout: Duration, -) { - run_gossip_rounds(&handles, NUM_ROUNDS, 0, timeout).await; -} - -/// runs one round of gossip -/// # Panics -/// on error -async fn run_gossip_one_round( - handles: Vec>, - timeout: Duration, -) { - run_gossip_rounds(&handles, 1, 0, timeout).await; -} - -/// runs many rounds of dht -/// # Panics -/// on error -async fn run_dht_rounds( - handles: &[HandleWithState], - timeout: Duration, - _starting_val: usize, - num_rounds: usize, -) { - let mut rng = rand::thread_rng(); - for i in 0..num_rounds { - debug!("begin round {}", i); - let msg_handle = random_handle(handles, &mut rng); - - // Create a random keypair - let mut rng = StdRng::from_entropy(); - let (public_key, private_key) = - ::generated_from_seed_indexed( - [1; 32], - rng.gen::(), - ); - - // Create a random value to sign - let value = (0..DHT_KV_PADDING) - .map(|_| rng.gen::()) - .collect::>(); - - // Create the record key - let key = RecordKey::new(Namespace::Lookup, public_key.to_bytes().clone()); - - // Sign the value - let value = RecordValue::new_signed(&key, value, &private_key).expect("signing failed"); - - // Put the key - msg_handle - .handle - .put_record(key.clone(), value.clone()) - .await - .unwrap(); - - // get the key from the other nodes - for handle in handles { - let result: Result, NetworkError> = - handle.handle.get_record_timeout(key.clone(), timeout).await; - match result { - Err(e) => { - error!("DHT error {e:?} during GET"); - std::process::exit(-1); - } - Ok(v) => { - assert_eq!(v, value.value()); - } - } - } - } -} - -/// runs `num_rounds` of message broadcast, incrementing the state of all nodes each broadcast -async fn run_gossip_rounds( - handles: &[HandleWithState], - num_rounds: usize, - starting_state: CounterState, - timeout: Duration, -) { - let mut old_state = starting_state; - for i in 0..num_rounds { - info!("running gossip round {}", i); - let new_state = old_state + 1; - let msg = CounterMessage::IncrementCounter { - from: old_state, - to: new_state, - }; - run_gossip_round(handles, msg, new_state, timeout) - .await - .unwrap(); - old_state = new_state; - } -} - -/// chooses a random handle from `handles` -/// increments its state by 1, -/// then has all other peers request its state -/// and update their state to the recv'ed state -#[allow(clippy::similar_names)] -async fn run_request_response_increment_all( - handles: &[HandleWithState], - timeout: Duration, -) { - let mut rng = rand::thread_rng(); - let requestee_handle = random_handle(handles, &mut rng); - requestee_handle.state.modify(|s| *s += 1).await; - info!("RR REQUESTEE IS {:?}", requestee_handle.handle.peer_id()); - let mut futs = Vec::new(); - for handle in handles { - if handle - .handle - .lookup_pid(requestee_handle.handle.peer_id()) - .await - .is_err() - { - error!("ERROR LOOKING UP REQUESTEE ADDRS"); - } - // NOTE uncomment if debugging - // let _ = h.print_routing_table().await; - // skip `requestee_handle` - if handle.handle.peer_id() != requestee_handle.handle.peer_id() { - let requester_handle = handle.clone(); - futs.push(run_request_response_increment( - requester_handle, - requestee_handle.clone(), - timeout, - )); - } - } - - // NOTE this was originally join_all - // but this is simpler. - let results = Arc::new(RwLock::new(vec![])); - - let len = futs.len(); - - for _ in 0..futs.len() { - let fut = futs.pop().unwrap(); - let results = Arc::clone(&results); - async_spawn(async move { - let res = fut.await; - results.write().await.push(res); - }); - } - loop { - let l = results.read().await.iter().len(); - if l >= len { - break; - } - info!("NUMBER OF RESULTS for increment all is: {}", l); - async_sleep(Duration::from_secs(1)).await; - } - - if results.read().await.iter().any(Result::is_err) { - let nodes = handles - .iter() - .cloned() - .map(|h| h.handle) - .collect::>(); - print_connections(nodes.as_slice()).await; - let mut states = vec![]; - for handle in handles { - states.push(handle.state.copied().await); - } - error!("states: {states:?}"); - std::process::exit(-1); - } -} - -/// simple case of direct message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_request_response_one_round() { - Box::pin(test_bed( - run_request_response_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// stress test of direct message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_request_response_many_rounds() { - Box::pin(test_bed( - run_request_response_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// stress test of broadcast + direct message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_intersperse_many_rounds() { - Box::pin(test_bed( - run_intersperse_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// stress teset that we can broadcast a message out and get counter increments -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_gossip_many_rounds() { - Box::pin(test_bed( - run_gossip_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// simple case of broadcast message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_gossip_one_round() { - Box::pin(test_bed( - run_gossip_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// simple case of direct message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_request_response_one_round() { - Box::pin(test_bed( - run_request_response_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// stress test of direct messsage -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_request_response_many_rounds() { - Box::pin(test_bed( - run_request_response_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// stress test of broadcast + direct message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_intersperse_many_rounds() { - Box::pin(test_bed( - run_intersperse_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// stress teset that we can broadcast a message out and get counter increments -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_gossip_many_rounds() { - Box::pin(test_bed( - run_gossip_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// simple case of broadcast message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_gossip_one_round() { - Box::pin(test_bed( - run_gossip_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// simple case of one dht publish event -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_dht_one_round() { - Box::pin(test_bed( - run_dht_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// many dht publishing events -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_dht_many_rounds() { - Box::pin(test_bed( - run_dht_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// simple case of one dht publish event -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_dht_one_round() { - Box::pin(test_bed( - run_dht_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// many dht publishing events -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_dht_many_rounds() { - Box::pin(test_bed( - run_dht_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} diff --git a/macros/src/lib.rs b/macros/src/lib.rs index e874468cda..e1920bf4fa 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -127,15 +127,11 @@ impl TestData { quote! { #[cfg(test)] #slow_attribute - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread") - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] #[tracing::instrument] async fn #test_name() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + hotshot_testing::test_builder::TestDescription::<#ty, #imply, #version>::gen_launcher((#metadata), 0).launch().run_test::<#builder_impl>().await; } } @@ -364,7 +360,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { use hotshot_testing::{predicates::{Predicate, PredicateResult}}; use async_broadcast::broadcast; use hotshot_task_impls::events::HotShotEvent; - use async_compatibility_layer::art::async_timeout; + use tokio::time::timeout; use hotshot_task::task::{Task, TaskState}; use hotshot_types::traits::node_implementation::NodeType; use std::sync::Arc; @@ -403,7 +399,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { let mut result = PredicateResult::Incomplete; - while let Ok(Ok(received_output)) = async_timeout(#scripts.timeout, from_task.recv_direct()).await { + while let Ok(Ok(received_output)) = timeout(#scripts.timeout, from_task.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); let output_asserts = &mut #task_expectations[stage_number].output_asserts; @@ -446,7 +442,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { while from_test.try_recv().is_ok() {} let mut result = PredicateResult::Incomplete; - while let Ok(Ok(received_output)) = async_timeout(#scripts.timeout, from_task.recv_direct()).await { + while let Ok(Ok(received_output)) = timeout(#scripts.timeout, from_task.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); let output_asserts = &mut #task_expectations[stage_number].output_asserts; diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 59bce1ae84..6dae01b771 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -4,7 +4,6 @@ version = { workspace = true } edition = { workspace = true } [dependencies] -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } clap.workspace = true futures = { workspace = true } @@ -24,11 +23,7 @@ vec1 = { workspace = true } multiaddr = "0.18" anyhow.workspace = true bincode.workspace = true - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } [lints] workspace = true diff --git a/orchestrator/README.md b/orchestrator/README.md index 09df305c34..1bf5ade9c2 100644 --- a/orchestrator/README.md +++ b/orchestrator/README.md @@ -2,4 +2,4 @@ This crate implements an orchestrator that coordinates starting the network with a particular configuration. It is useful for testing and benchmarking. Like the web server, the orchestrator is built using [Tide Disco](https://github.com/EspressoSystems/tide-disco). -To run the orchestrator: `just async_std example orchestrator http://0.0.0.0:3333 ./crates/orchestrator/run-config.toml` \ No newline at end of file +To run the orchestrator: `just example orchestrator http://0.0.0.0:3333 ./crates/orchestrator/run-config.toml` \ No newline at end of file diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 19799f8e7b..53b097428a 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -6,7 +6,6 @@ use std::{net::SocketAddr, time::Duration}; -use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; use hotshot_types::{ @@ -17,6 +16,7 @@ use hotshot_types::{ use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; +use tokio::time::sleep; use tracing::{info, instrument}; use vbs::BinarySerializer; @@ -427,7 +427,7 @@ impl OrchestratorClient { break (index, is_da); } - async_sleep(Duration::from_millis(250)).await; + sleep(Duration::from_millis(250)).await; }; validator_config.is_da = is_da; @@ -513,7 +513,7 @@ impl OrchestratorClient { Ok(x) => break x, Err(err) => { tracing::info!("{err}"); - async_sleep(Duration::from_millis(250)).await; + sleep(Duration::from_millis(250)).await; } } } diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index e187a96a26..12e8b368a3 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -14,7 +14,6 @@ test-srs = ["jf-vid/test-srs"] [dependencies] anyhow = { workspace = true } async-broadcast = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } @@ -42,11 +41,7 @@ url = { workspace = true } utils = { path = "../utils" } vbs = { workspace = true } vec1 = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } [lints] workspace = true diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 1762c91680..fddcd7d845 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -6,7 +6,6 @@ use std::time::{Duration, Instant}; -use async_compatibility_layer::art::async_sleep; use hotshot_builder_api::v0_1::{ block_info::AvailableBlockInfo, builder::{BuildError, Error as BuilderApiError}, @@ -20,6 +19,7 @@ use serde::{Deserialize, Serialize}; use surf_disco::{client::HealthStatus, Client, Url}; use tagged_base64::TaggedBase64; use thiserror::Error; +use tokio::time::sleep; use vbs::version::StaticVersionType; #[derive(Debug, Error, Serialize, Deserialize)] @@ -96,7 +96,7 @@ impl BuilderClient { ) { return true; } - async_sleep(backoff).await; + sleep(backoff).await; backoff *= 2; } false diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 02e65ba161..41db9a2e13 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -7,7 +7,6 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::Sender; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use chrono::Utc; use hotshot_types::{ event::{Event, EventType}, @@ -18,6 +17,7 @@ use hotshot_types::{ }, vote::HasViewNumber, }; +use tokio::{spawn, time::sleep}; use tracing::instrument; use utils::anytrace::*; @@ -156,11 +156,11 @@ pub(crate) async fn handle_view_change< // Spawn a timeout task if we did actually update view let timeout = task_state.timeout; - let new_timeout_task = async_spawn({ + let new_timeout_task = spawn({ let stream = sender.clone(); let view_number = new_view_number; async move { - async_sleep(Duration::from_millis(timeout)).await; + sleep(Duration::from_millis(timeout)).await; broadcast_event( Arc::new(HotShotEvent::Timeout(TYPES::View::new(*view_number))), &stream, diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index fad629079b..2de584199a 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -7,10 +7,7 @@ use std::sync::Arc; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ @@ -24,7 +21,6 @@ use hotshot_types::{ signature_key::SignatureKey, }, }; -#[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::Result; @@ -175,7 +171,7 @@ impl, V: Versions> TaskState // Cancel the old timeout task cancel_task(std::mem::replace( &mut self.timeout_task, - async_spawn(async {}), + tokio::spawn(async {}), )) .await; } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index d96efcfb3d..0301240b6b 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -7,10 +7,7 @@ use std::{marker::PhantomData, sync::Arc}; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::spawn_blocking; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ @@ -32,8 +29,7 @@ use hotshot_types::{ vote::HasViewNumber, }; use sha2::{Digest, Sha256}; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::spawn_blocking; +use tokio::{spawn, task::spawn_blocking}; use tracing::instrument; use utils::anytrace::*; @@ -187,7 +183,6 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState> + Send to_task.broadcast_direct(Arc::new(event)).await.unwrap(); } - if async_timeout(Duration::from_secs(2), test_future) - .await - .is_err() - { - panic!("Test timeout out before all all expected outputs received"); - } + assert!( + timeout(Duration::from_secs(2), test_future).await.is_ok(), + "Test timeout out before all all expected outputs received" + ); } /// Handles an event for the Test Harness Task. If the event is expected, remove it from diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 26aca6543f..229fcfa0a1 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -10,10 +10,7 @@ use std::{ }; use async_broadcast::{InactiveReceiver, Receiver, SendError, Sender}; -use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use committable::{Commitment, Committable}; use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ @@ -33,8 +30,7 @@ use hotshot_types::{ utils::{Terminator, View, ViewInner}, vote::{Certificate, HasViewNumber}, }; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; +use tokio::{task::JoinHandle, time::timeout}; use tracing::instrument; use utils::anytrace::*; @@ -80,7 +76,7 @@ pub(crate) async fn fetch_proposal( // Make a background task to await the arrival of the event data. let Ok(Some(proposal)) = // We want to explicitly timeout here so we aren't waiting around for the data. - async_timeout(REQUEST_TIMEOUT, async move { + timeout(REQUEST_TIMEOUT, async move { // We want to iterate until the proposal is not None, or until we reach the timeout. let mut proposal = None; while proposal.is_none() { @@ -662,9 +658,6 @@ pub(crate) async fn validate_proposal_view_and_certs< /// Cancel a task pub async fn cancel_task(task: JoinHandle) { - #[cfg(async_executor_impl = "async-std")] - task.cancel().await; - #[cfg(async_executor_impl = "tokio")] task.abort(); } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index c515d94e5b..49ae29e964 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -10,10 +10,7 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; use futures::future::join_all; use hotshot_task::task::TaskState; @@ -36,7 +33,7 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; -#[cfg(async_executor_impl = "tokio")] +use tokio::spawn; use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::*; @@ -291,7 +288,7 @@ impl< let net = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); let consensus = Arc::clone(&self.consensus); - async_spawn(async move { + spawn(async move { if NetworkEventTaskState::::maybe_record_action( Some(HotShotAction::VidDisperse), storage, @@ -350,7 +347,7 @@ impl< cancel.append(&mut to_cancel); } self.transmit_tasks = keep; - async_spawn(async move { join_all(cancel).await }); + spawn(async move { join_all(cancel).await }); } /// Parses a `HotShotEvent` and returns a tuple of: (sender's public key, `MessageKind`, `TransmitType`) @@ -610,7 +607,7 @@ impl< let net = Arc::clone(&self.network); let epoch = self.epoch.u64(); let mem = self.quorum_membership.clone(); - async_spawn(async move { + spawn(async move { net.update_view::(view.saturating_sub(1), epoch, &mem) .await; }); @@ -663,7 +660,7 @@ impl< let storage = Arc::clone(&self.storage); let consensus = Arc::clone(&self.consensus); let upgrade_lock = self.upgrade_lock.clone(); - let handle = async_spawn(async move { + let handle = spawn(async move { if NetworkEventTaskState::::maybe_record_action( maybe_action, Arc::clone(&storage), diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 1ac07e6abc..1cc857f335 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -8,8 +8,6 @@ use std::{collections::BTreeMap, sync::Arc}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; use either::Either; use futures::future::join_all; @@ -31,7 +29,6 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber}, }; -#[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::*; @@ -565,9 +562,6 @@ impl, V: Versions> TaskState async fn cancel_subtasks(&mut self) { while let Some((_, handle)) = self.proposal_dependencies.pop_first() { - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] handle.abort(); } } diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index d1fbac8a30..d4abe5dc33 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -9,7 +9,6 @@ use std::sync::Arc; use async_broadcast::{broadcast, Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLockUpgradableReadGuard; use committable::Committable; use hotshot_types::{ @@ -27,6 +26,7 @@ use hotshot_types::{ utils::{View, ViewInner}, vote::{Certificate, HasViewNumber}, }; +use tokio::spawn; use tracing::instrument; use utils::anytrace::*; @@ -114,7 +114,7 @@ fn spawn_fetch_proposal( sender_private_key: ::PrivateKey, upgrade_lock: UpgradeLock, ) { - async_spawn(async move { + spawn(async move { let lock = upgrade_lock; let _ = fetch_proposal( diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 06d9088bed..f228edbc87 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -9,10 +9,7 @@ use std::{collections::BTreeMap, sync::Arc}; use async_broadcast::{broadcast, Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; use futures::future::join_all; use hotshot_task::task::{Task, TaskState}; @@ -28,7 +25,6 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -#[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use utils::anytrace::{bail, Result}; @@ -118,7 +114,7 @@ impl, V: Versions> cancel.append(&mut to_cancel); } self.spawned_tasks = keep; - async_spawn(async move { join_all(cancel).await }); + tokio::spawn(async move { join_all(cancel).await }); } /// Handles all consensus events relating to propose and vote-enabling events. @@ -202,9 +198,6 @@ impl, V: Versions> TaskState break; }; for handle in handles { - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] handle.abort(); } } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index a3a58e3874..563fa6fa41 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -8,8 +8,6 @@ use std::{collections::BTreeMap, sync::Arc}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; use hotshot_task::{ dependency::{AndDependency, EventDependency}, @@ -32,7 +30,6 @@ use hotshot_types::{ vote::{Certificate, HasViewNumber}, }; use jf_vid::VidScheme; -#[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::*; @@ -725,9 +722,6 @@ impl, V: Versions> TaskState async fn cancel_subtasks(&mut self) { while let Some((_, handle)) = self.vote_dependencies.pop_last() { - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] handle.abort(); } } diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index abacf32ffd..6674ad3e2b 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -14,9 +14,6 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; use hotshot_task::{ dependency::{Dependency, EventDependency}, @@ -34,8 +31,11 @@ use hotshot_types::{ }; use rand::{seq::SliceRandom, thread_rng}; use sha2::{Digest, Sha256}; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; +use tokio::{ + spawn, + task::JoinHandle, + time::{sleep, timeout}, +}; use tracing::instrument; use utils::anytrace::Result; @@ -132,9 +132,6 @@ impl> TaskState for NetworkRequest }; for handle in handles { - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] handle.abort(); } } @@ -204,10 +201,10 @@ impl> NetworkRequestState = spawn(async move { // Do the delay only if primary is up and then start sending if !network.is_primary_down() { - async_sleep(delay).await; + sleep(delay).await; } let mut recipients_it = recipients.iter(); @@ -280,7 +277,7 @@ impl> NetworkRequestState NetworkResponseState { .is_none() { // Sleep in hope we receive txns in the meantime - async_sleep(TXNS_TIMEOUT).await; + sleep(TXNS_TIMEOUT).await; Consensus::calculate_and_update_vid( OuterConsensus::new(Arc::clone(&self.consensus)), view, @@ -209,5 +205,5 @@ pub fn run_response_task( event_stream: Receiver>>, sender: Sender>>, ) -> JoinHandle<()> { - async_spawn(task_state.run_response_loop(event_stream, sender)) + spawn(task_state.run_response_loop(event_stream, sender)) } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index cac4b9ad06..2a9d7f1ab6 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -10,7 +10,6 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_timeout}; use async_trait::async_trait; use futures::{future::join_all, stream::FuturesUnordered, StreamExt}; use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; @@ -31,6 +30,7 @@ use hotshot_types::{ utils::ViewInner, vid::{VidCommitment, VidPrecomputeData}, }; +use tokio::time::{sleep, timeout}; use tracing::instrument; use url::Url; use utils::anytrace::*; @@ -274,7 +274,7 @@ impl, V: Versions> TransactionTask let start = Instant::now(); - let maybe_auction_result = async_timeout( + let maybe_auction_result = timeout( self.builder_timeout, self.auction_results_provider .fetch_auction_result(block_view), @@ -293,7 +293,7 @@ impl, V: Versions> TransactionTask builder_urls.push(self.fallback_builder_url.clone()); for url in builder_urls { - futures.push(async_timeout( + futures.push(timeout( self.builder_timeout.saturating_sub(start.elapsed()), async { let client = BuilderClientMarketplace::new(url); @@ -502,7 +502,7 @@ impl, V: Versions> TransactionTask Err(e) if task_start_time.elapsed() >= self.builder_timeout => break Err(e), _ => { // We still have time, will re-try in a bit - async_sleep(RETRY_DELAY).await; + sleep(RETRY_DELAY).await; continue; } } @@ -577,7 +577,7 @@ impl, V: Versions> TransactionTask }; while task_start_time.elapsed() < self.builder_timeout { - match async_timeout( + match timeout( self.builder_timeout .saturating_sub(task_start_time.elapsed()), self.block_from_builder(parent_comm, parent_view, &parent_comm_sig), @@ -593,7 +593,7 @@ impl, V: Versions> TransactionTask Ok(Err(err)) => { tracing::info!("Couldn't get a block: {err:#}"); // pause a bit - async_sleep(RETRY_DELAY).await; + sleep(RETRY_DELAY).await; continue; } @@ -648,7 +648,7 @@ impl, V: Versions> TransactionTask break; } } - let timeout = async_sleep(std::cmp::max( + let timeout = sleep(std::cmp::max( query_start .elapsed() .mul_f32(BUILDER_ADDITIONAL_TIME_MULTIPLIER), diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 2172d00d41..4b810196da 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -12,10 +12,7 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ @@ -34,8 +31,8 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber, Vote}, }; -#[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; +use tokio::{spawn, time::sleep}; use tracing::instrument; use utils::anytrace::*; @@ -579,14 +576,14 @@ impl, V: Versions> cancel_task(timeout_task).await; } - self.timeout_task = Some(async_spawn({ + self.timeout_task = Some(spawn({ let stream = event_stream.clone(); let phase = last_seen_certificate; let relay = self.relay; let next_view = self.next_view; let timeout = self.view_sync_timeout; async move { - async_sleep(timeout).await; + sleep(timeout).await; tracing::warn!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); broadcast_event( @@ -671,14 +668,14 @@ impl, V: Versions> if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; } - self.timeout_task = Some(async_spawn({ + self.timeout_task = Some(spawn({ let stream = event_stream.clone(); let phase = last_seen_certificate; let relay = self.relay; let next_view = self.next_view; let timeout = self.view_sync_timeout; async move { - async_sleep(timeout).await; + sleep(timeout).await; tracing::warn!( "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", relay @@ -768,13 +765,13 @@ impl, V: Versions> .await; } - self.timeout_task = Some(async_spawn({ + self.timeout_task = Some(spawn({ let stream = event_stream.clone(); let relay = self.relay; let next_view = self.next_view; let timeout = self.view_sync_timeout; async move { - async_sleep(timeout).await; + sleep(timeout).await; tracing::warn!("Vote sending timed out in ViewSyncTrigger"); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( @@ -833,14 +830,14 @@ impl, V: Versions> } } - self.timeout_task = Some(async_spawn({ + self.timeout_task = Some(spawn({ let stream = event_stream.clone(); let relay = self.relay; let next_view = self.next_view; let timeout = self.view_sync_timeout; let last_cert = last_seen_certificate.clone(); async move { - async_sleep(timeout).await; + sleep(timeout).await; tracing::warn!( "Vote sending timed out in ViewSyncTimeout relay = {}", relay diff --git a/task/Cargo.toml b/task/Cargo.toml index 47261bfbd5..89461a58cb 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -11,20 +11,15 @@ edition = { workspace = true } futures = { workspace = true } async-broadcast = { workspace = true } tracing = { workspace = true } -async-compatibility-layer = { workspace = true } anyhow = { workspace = true } async-trait = { workspace = true } utils = { path = "../utils" } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true, features = [ "time", "rt-multi-thread", "macros", "sync", ] } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true, features = ["attributes"] } [lints] workspace = true diff --git a/task/src/dependency.rs b/task/src/dependency.rs index c4eee030a8..7b3d7dfa0b 100644 --- a/task/src/dependency.rs +++ b/task/src/dependency.rs @@ -189,8 +189,7 @@ mod tests { } } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn it_works() { let (tx, rx) = broadcast(10); @@ -205,8 +204,8 @@ mod tests { let result = and.completed().await; assert_eq!(result, Some(vec![5; 5])); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + + #[tokio::test(flavor = "multi_thread")] async fn or_dep() { let (tx, rx) = broadcast(10); @@ -220,8 +219,7 @@ mod tests { assert_eq!(result, Some(5)); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn and_or_dep() { let (tx, rx) = broadcast(10); @@ -238,8 +236,7 @@ mod tests { assert_eq!(result, Some(vec![6, 5])); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn or_and_dep() { let (tx, rx) = broadcast(10); @@ -256,8 +253,7 @@ mod tests { assert_eq!(result, Some(vec![4, 5])); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn many_and_dep() { let (tx, rx) = broadcast(10); diff --git a/task/src/dependency_task.rs b/task/src/dependency_task.rs index 2ebe4fc032..fb196151eb 100644 --- a/task/src/dependency_task.rs +++ b/task/src/dependency_task.rs @@ -4,10 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -#[cfg(async_executor_impl = "async-std")] -use async_std::task::{spawn, JoinHandle}; use futures::Future; -#[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn, JoinHandle}; use crate::dependency::Dependency; @@ -57,10 +54,7 @@ mod test { use std::time::Duration; use async_broadcast::{broadcast, Receiver, Sender}; - #[cfg(async_executor_impl = "async-std")] - use async_std::task::sleep; use futures::{stream::FuturesOrdered, StreamExt}; - #[cfg(async_executor_impl = "tokio")] use tokio::time::sleep; use super::*; @@ -89,8 +83,7 @@ mod test { EventDependency::new(rx, Box::new(move |v| *v == val)) } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] // allow unused for tokio because it's a test #[allow(unused_must_use)] async fn it_works() { @@ -105,8 +98,7 @@ mod test { join_handle.await; } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn many_works() { let (tx, rx) = broadcast(20); let (res_tx, mut res_rx) = broadcast(20); diff --git a/task/src/task.rs b/task/src/task.rs index 6d0e0ca461..70367b0cef 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -7,14 +7,8 @@ use std::sync::Arc; use async_broadcast::{Receiver, RecvError, Sender}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::{spawn, JoinHandle}; use async_trait::async_trait; -#[cfg(async_executor_impl = "async-std")] -use futures::future::join_all; -#[cfg(async_executor_impl = "tokio")] use futures::future::try_join_all; -#[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn, JoinHandle}; use utils::anytrace::Result; @@ -133,9 +127,6 @@ impl ConsensusTaskRegistry { let handles = &mut self.task_handles; while let Some(handle) = handles.pop() { - #[cfg(async_executor_impl = "async-std")] - let mut task_state = handle.await; - #[cfg(async_executor_impl = "tokio")] let mut task_state = handle.await.unwrap(); task_state.cancel_subtasks().await; @@ -153,12 +144,7 @@ impl ConsensusTaskRegistry { /// # Panics /// Panics if one of the tasks panicked pub async fn join_all(self) -> Vec>> { - #[cfg(async_executor_impl = "async-std")] - let states = join_all(self.task_handles).await; - #[cfg(async_executor_impl = "tokio")] - let states = try_join_all(self.task_handles).await.unwrap(); - - states + try_join_all(self.task_handles).await.unwrap() } } @@ -187,9 +173,6 @@ impl NetworkTaskRegistry { /// tasks being joined return an error. pub async fn shutdown(&mut self) { let handles = std::mem::take(&mut self.handles); - #[cfg(async_executor_impl = "async-std")] - join_all(handles).await; - #[cfg(async_executor_impl = "tokio")] try_join_all(handles) .await .expect("Failed to join all tasks during shutdown"); diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 79a11a7502..8f8e59500f 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -12,12 +12,12 @@ slow-tests = [] gpu-vid = ["hotshot-types/gpu-vid"] rewind = ["hotshot/rewind"] test-srs = ["jf-vid/test-srs"] +broken_3_chain_fixed = [] [dependencies] automod = "1.0.14" anyhow = { workspace = true } async-broadcast = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bitvec = { workspace = true } @@ -51,14 +51,5 @@ vec1 = { workspace = true } reqwest = { workspace = true } url = { workspace = true } itertools = "0.13.0" - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } - -[lints.rust] -unexpected_cfgs = { level = "allow", check-cfg = [ - 'cfg(async_executor_impl, values("tokio", "async-std"))', -] } \ No newline at end of file diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index b047c7710e..5dcde74d37 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -7,7 +7,6 @@ use std::collections::HashMap; use async_broadcast::Receiver; -use async_compatibility_layer::art::async_spawn; use async_trait::async_trait; use futures::Stream; use hotshot::{traits::BlockPayload, types::Event}; @@ -28,6 +27,7 @@ use hotshot_types::{ }, }; use tide_disco::{method::ReadState, App, Url}; +use tokio::spawn; use vbs::version::StaticVersionType; use crate::test_builder::BuilderChange; @@ -85,7 +85,7 @@ pub fn run_builder_source( + v0_1::data_source::BuilderDataSource + v0_3::data_source::BuilderDataSource, { - async_spawn(async move { + spawn(async move { let start_builder = |url: Url, source: Source| -> _ { let builder_api_0_1 = hotshot_builder_api::v0_1::builder::define_api::( &Options::default(), @@ -100,7 +100,7 @@ pub fn run_builder_source( .expect("Failed to register the builder API 0.1") .register_module(MARKETPLACE_BUILDER_MODULE, builder_api_0_3) .expect("Failed to register the builder API 0.3"); - async_spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())) + spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())) }; let mut handle = Some(start_builder(url.clone(), source.clone())); @@ -112,10 +112,7 @@ pub fn run_builder_source( } BuilderChange::Down => { if let Some(handle) = handle.take() { - #[cfg(async_executor_impl = "tokio")] handle.abort(); - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; } } _ => {} @@ -138,7 +135,7 @@ pub fn run_builder_source_0_1( Source: Clone + Send + Sync + tide_disco::method::ReadState + 'static, ::State: Sync + Send + v0_1::data_source::BuilderDataSource, { - async_spawn(async move { + spawn(async move { let start_builder = |url: Url, source: Source| -> _ { let builder_api = hotshot_builder_api::v0_1::builder::define_api::( &Options::default(), @@ -147,7 +144,7 @@ pub fn run_builder_source_0_1( let mut app: App = App::with_state(source); app.register_module(LEGACY_BUILDER_MODULE, builder_api) .expect("Failed to register the builder API"); - async_spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())) + spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())) }; let mut handle = Some(start_builder(url.clone(), source.clone())); @@ -159,10 +156,7 @@ pub fn run_builder_source_0_1( } BuilderChange::Down => { if let Some(handle) = handle.take() { - #[cfg(async_executor_impl = "tokio")] handle.abort(); - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; } } _ => {} diff --git a/testing/src/block_builder/random.rs b/testing/src/block_builder/random.rs index 0d6d767ff4..da25cf2b9c 100644 --- a/testing/src/block_builder/random.rs +++ b/testing/src/block_builder/random.rs @@ -16,7 +16,6 @@ use std::{ }; use async_broadcast::{broadcast, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; use futures::{future::BoxFuture, Stream, StreamExt}; @@ -36,6 +35,7 @@ use hotshot_types::{ use lru::LruCache; use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; use tide_disco::{method::ReadState, Url}; +use tokio::{spawn, time::sleep}; use super::{ build_block, run_builder_source_0_1, BlockEntry, BuilderTask, TestBuilderImplementation, @@ -156,7 +156,7 @@ impl> RandomBuilderTask { time_per_block.as_millis(), ); } - async_sleep(time_per_block.saturating_sub(start.elapsed())).await; + sleep(time_per_block.saturating_sub(start.elapsed())).await; } } } @@ -169,7 +169,7 @@ where mut self: Box, mut stream: Box> + std::marker::Unpin + Send + 'static>, ) { - let mut task = Some(async_spawn(Self::build_blocks( + let mut task = Some(spawn(Self::build_blocks( self.config.clone(), self.num_storage_nodes, self.pub_key.clone(), @@ -177,7 +177,7 @@ where self.blocks.clone(), ))); - async_spawn(async move { + spawn(async move { loop { match stream.next().await { None => { @@ -189,7 +189,7 @@ where match change { BuilderChange::Up => { if task.is_none() { - task = Some(async_spawn(Self::build_blocks( + task = Some(spawn(Self::build_blocks( self.config.clone(), self.num_storage_nodes, self.pub_key.clone(), @@ -200,10 +200,7 @@ where } BuilderChange::Down => { if let Some(handle) = task.take() { - #[cfg(async_executor_impl = "tokio")] handle.abort(); - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; } } BuilderChange::FailClaims(_) => {} diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index 63a28d854c..371ced1d47 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -15,7 +15,6 @@ use std::{ }; use async_broadcast::{broadcast, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; use committable::{Commitment, Committable}; @@ -45,6 +44,7 @@ use hotshot_types::{ }; use lru::LruCache; use tide_disco::{method::ReadState, App, Url}; +use tokio::spawn; use vbs::version::StaticVersionType; use super::{build_block, run_builder_source, BlockEntry, BuilderTask, TestBuilderImplementation}; @@ -353,7 +353,7 @@ impl SimpleBuilderSource { .register_module::(MARKETPLACE_BUILDER_MODULE, builder_api_0_3) .expect("Failed to register builder API 0.3"); - async_spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())); + spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())); } } @@ -379,7 +379,7 @@ impl BuilderTask for SimpleBuilderTask { mut self: Box, mut stream: Box> + std::marker::Unpin + Send + 'static>, ) { - async_spawn(async move { + spawn(async move { let mut should_build_blocks = true; loop { match stream.next().await { diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index 711794d76b..96de8616e9 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -7,12 +7,9 @@ use std::time::Duration; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_spawn, async_timeout}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use hotshot_task_impls::helpers::broadcast_event; -#[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; +use tokio::{spawn, time::timeout}; use crate::test_task::TestEvent; @@ -27,8 +24,8 @@ pub struct CompletionTask { impl CompletionTask { pub fn run(mut self) -> JoinHandle<()> { - async_spawn(async move { - if async_timeout(self.duration, self.wait_for_shutdown()) + spawn(async move { + if timeout(self.duration, self.wait_for_shutdown()) .await .is_err() { diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index cb2c8119ed..1b55a2b841 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -12,10 +12,7 @@ use std::{ }; use async_broadcast::{broadcast, Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use futures::future::join_all; use hotshot::{ traits::TestableNodeImplementation, @@ -43,7 +40,7 @@ use hotshot_types::{ HotShotConfig, ValidatorConfig, }; use tide_disco::Url; -#[cfg(async_executor_impl = "tokio")] +use tokio::spawn; use tokio::task::JoinHandle; #[allow(deprecated)] use tracing::info; @@ -280,53 +277,29 @@ where let mut error_list = vec![]; - #[cfg(async_executor_impl = "async-std")] - { - let results = join_all(task_futs).await; - for result in results { - match result { + let results = join_all(task_futs).await; + + for result in results { + match result { + Ok(res) => match res { TestResult::Pass => { info!("Task shut down successfully"); } TestResult::Fail(e) => error_list.push(e), + }, + Err(e) => { + tracing::error!("Error Joining the test task {:?}", e); } } - if let Some(handle) = txn_handle { - handle.cancel().await; - } - // Shutdown all of the servers at the end - // Aborting here doesn't cause any problems because we don't maintain any state - if let Some(solver_server) = solver_server { - solver_server.1.cancel().await; - } } - #[cfg(async_executor_impl = "tokio")] - { - let results = join_all(task_futs).await; - - for result in results { - match result { - Ok(res) => match res { - TestResult::Pass => { - info!("Task shut down successfully"); - } - TestResult::Fail(e) => error_list.push(e), - }, - Err(e) => { - tracing::error!("Error Joining the test task {:?}", e); - } - } - } - - if let Some(handle) = txn_handle { - handle.abort(); - } - // Shutdown all of the servers at the end - // Aborting here doesn't cause any problems because we don't maintain any state - if let Some(solver_server) = solver_server { - solver_server.1.abort(); - } + if let Some(handle) = txn_handle { + handle.abort(); + } + // Shutdown all of the servers at the end + // Aborting here doesn't cause any problems because we don't maintain any state + if let Some(solver_server) = solver_server { + solver_server.1.abort(); } let mut nodes = handles.write().await; @@ -336,9 +309,6 @@ where } tracing::info!("Nodes shtudown"); - #[cfg(async_executor_impl = "async-std")] - completion_handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] completion_handle.abort(); assert!( @@ -406,7 +376,7 @@ where // Then, fire it up as a background thread. self.solver_server = Some(( solver_url.clone(), - async_spawn(async move { + spawn(async move { solver_state .run::(solver_url) .await diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index 8d685914d3..036b0d6b5d 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -8,10 +8,7 @@ use std::{sync::Arc, time::Duration}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::{spawn, JoinHandle}; use async_trait::async_trait; use futures::future::select_all; use hotshot::{ @@ -26,8 +23,11 @@ use hotshot_types::{ node_implementation::{NodeType, Versions}, }, }; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::{spawn, JoinHandle}; +use tokio::task::JoinHandle; +use tokio::{ + spawn, + time::{sleep, timeout}, +}; use tracing::error; use crate::test_runner::Node; @@ -134,7 +134,7 @@ impl TestTask { messages.push(receiver.recv()); } - match async_timeout(Duration::from_millis(2500), select_all(messages)).await { + match timeout(Duration::from_millis(2500), select_all(messages)).await { Ok((Ok(input), id, _)) => { let _ = S::handle_event(&mut self.state, (input, id)) .await @@ -142,7 +142,7 @@ impl TestTask { } Ok((Err(e), _id, _)) => { error!("Error from one channel in test task {:?}", e); - async_sleep(Duration::from_millis(4000)).await; + sleep(Duration::from_millis(4000)).await; } _ => {} }; @@ -173,7 +173,7 @@ pub async fn add_network_message_test_task< let network = Arc::clone(&net); let mut state = network_state.clone(); - async_spawn(async move { + spawn(async move { loop { // Get the next message from the network let message = match network.recv_message().await { diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index 03fa0be103..5d62d4232d 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -7,15 +7,12 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::Receiver; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use hotshot::traits::TestableNodeImplementation; use hotshot_types::traits::node_implementation::{NodeType, Versions}; use rand::thread_rng; -#[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; +use tokio::{spawn, time::sleep}; use crate::{test_runner::Node, test_task::TestEvent}; @@ -37,9 +34,9 @@ pub struct TxnTask, V: Ver impl, V: Versions> TxnTask { pub fn run(mut self) -> JoinHandle<()> { - async_spawn(async move { + spawn(async move { loop { - async_sleep(self.duration).await; + sleep(self.duration).await; if let Ok(TestEvent::Shutdown) = self.shutdown_chan.try_recv() { break; } diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index f63560078f..5b0a6cf5c2 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -9,7 +9,6 @@ use std::{ time::{Duration, Instant}, }; -use async_compatibility_layer::art::async_sleep; use hotshot_builder_api::v0_1::block_info::AvailableBlockData; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, @@ -29,13 +28,10 @@ use hotshot_types::{ }, }; use tide_disco::Url; +use tokio::time::sleep; #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[ignore] async fn test_random_block_builder() { let port = portpicker::pick_unused_port().expect("No free ports"); @@ -81,7 +77,7 @@ async fn test_random_block_builder() { }; // Wait for at least one block to be built - async_sleep(Duration::from_millis(20)).await; + sleep(Duration::from_millis(20)).await; if builder_started.elapsed() > Duration::from_secs(2) { panic!("Builder failed to provide blocks in two seconds"); diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index ad417a0431..ca7839e575 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -32,11 +32,9 @@ use hotshot_types::{ }; use vbs::version::StaticVersionType; -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_da_task() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await @@ -130,11 +128,9 @@ async fn test_da_task() { run_test![inputs, da_script].await; } -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_da_task_storage_failure() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index f19e3b0798..c9f7ae33db 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -17,12 +17,12 @@ use hotshot_testing::{ use tracing::instrument; /// libp2p network test -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn libp2p_network() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, @@ -48,12 +48,12 @@ async fn libp2p_network() { } /// libp2p network test with failures -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn libp2p_network_failures_2() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, @@ -95,13 +95,13 @@ async fn libp2p_network_failures_2() { } /// stress test for libp2p -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] #[ignore] async fn test_stress_libp2p_network() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription::default_stress(); metadata diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 26367ab7b4..540baaf447 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -7,7 +7,6 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::Sender; -use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; use hotshot::traits::implementations::MemoryNetwork; use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; @@ -25,12 +24,12 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, }, }; +use tokio::time::timeout; // Test that the event task sends a message, and the message task receives it // and emits the proper event #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[allow(clippy::too_many_lines)] async fn test_network_task() { use std::collections::BTreeMap; @@ -38,8 +37,7 @@ async fn test_network_task() { use futures::StreamExt; use hotshot_types::traits::network::Topic; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let builder: TestDescription = TestDescription::default_multiple_rounds(); @@ -101,7 +99,7 @@ async fn test_network_task() { .await .unwrap(); let res: Arc> = - async_timeout(Duration::from_millis(100), out_rx_internal.recv_direct()) + timeout(Duration::from_millis(100), out_rx_internal.recv_direct()) .await .expect("timed out waiting for response") .expect("channel closed"); @@ -112,15 +110,13 @@ async fn test_network_task() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_network_external_mnessages() { use hotshot::types::EventType; use hotshot_testing::helpers::build_system_handle_from_launcher; use hotshot_types::message::RecipientList; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let builder: TestDescription = TestDescription::default_multiple_rounds(); @@ -145,14 +141,11 @@ async fn test_network_external_mnessages() { .send_external_message(vec![1, 2], RecipientList::Direct(handles[2].public_key())) .await .unwrap(); - let event = async_compatibility_layer::art::async_timeout( - Duration::from_millis(100), - event_streams[2].recv(), - ) - .await - .unwrap() - .unwrap() - .event; + let event = tokio::time::timeout(Duration::from_millis(100), event_streams[2].recv()) + .await + .unwrap() + .unwrap() + .event; // check that 2 received the message assert!(matches!( @@ -168,14 +161,11 @@ async fn test_network_external_mnessages() { .send_external_message(vec![2, 1], RecipientList::Direct(handles[1].public_key())) .await .unwrap(); - let event = async_compatibility_layer::art::async_timeout( - Duration::from_millis(100), - event_streams[1].recv(), - ) - .await - .unwrap() - .unwrap() - .event; + let event = tokio::time::timeout(Duration::from_millis(100), event_streams[1].recv()) + .await + .unwrap() + .unwrap() + .event; // check that 1 received the message assert!(matches!( @@ -193,14 +183,11 @@ async fn test_network_external_mnessages() { .unwrap(); // All other nodes get the broadcast for stream in event_streams.iter_mut().skip(1) { - let event = async_compatibility_layer::art::async_timeout( - Duration::from_millis(100), - stream.recv(), - ) - .await - .unwrap() - .unwrap() - .event; + let event = tokio::time::timeout(Duration::from_millis(100), stream.recv()) + .await + .unwrap() + .unwrap() + .event; assert!(matches!( event, EventType::ExternalMessageReceived { @@ -210,21 +197,19 @@ async fn test_network_external_mnessages() { )); } // No event on 0 even after short sleep - async_compatibility_layer::art::async_sleep(Duration::from_millis(2)).await; + tokio::time::sleep(Duration::from_millis(2)).await; assert!(event_streams[0].is_empty()); } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_network_storage_fail() { use std::collections::BTreeMap; use futures::StreamExt; use hotshot_types::traits::network::Topic; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let builder: TestDescription = TestDescription::default_multiple_rounds(); @@ -286,6 +271,6 @@ async fn test_network_storage_fail() { ))) .await .unwrap(); - let res = async_timeout(Duration::from_millis(100), out_rx_internal.recv_direct()).await; + let res = timeout(Duration::from_millis(100), out_rx_internal.recv_direct()).await; assert!(res.is_err()); } diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index d5a83a1953..e2255d605b 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -37,8 +37,7 @@ use hotshot_types::{ }; #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_recv_task() { use std::time::Duration; @@ -48,8 +47,7 @@ async fn test_quorum_proposal_recv_task() { }; use hotshot_types::data::Leaf; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await @@ -129,8 +127,7 @@ async fn test_quorum_proposal_recv_task() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_recv_task_liveness_check() { use std::time::Duration; @@ -143,8 +140,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { }; use hotshot_types::{data::Leaf, vote::HasViewNumber}; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(4) .await diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index ae5fa54fdc..d0fb03dee7 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -40,14 +40,12 @@ use vec1::vec1; const TIMEOUT: Duration = Duration::from_millis(35); #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_quorum_proposal_view_1() { use hotshot_testing::script::{Expectations, TaskScript}; use vbs::version::StaticVersionType; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let node_id = 1; let handle = build_system_handle::(node_id) @@ -143,13 +141,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { use vbs::version::StaticVersionType; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let node_id = 3; let handle = build_system_handle::(node_id) @@ -355,13 +351,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_qc_timeout() { use vbs::version::StaticVersionType; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let node_id = 3; let handle = build_system_handle::(node_id) @@ -446,15 +440,13 @@ async fn test_quorum_proposal_task_qc_timeout() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_view_sync() { use hotshot_example_types::block_types::TestMetadata; use hotshot_types::data::null_block; use vbs::version::StaticVersionType; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let node_id = 2; let handle = build_system_handle::(node_id) @@ -541,13 +533,11 @@ async fn test_quorum_proposal_task_view_sync() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_liveness_check() { use vbs::version::StaticVersionType; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let node_id = 3; let handle = build_system_handle::(node_id) @@ -750,11 +740,9 @@ async fn test_quorum_proposal_task_liveness_check() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_with_incomplete_events() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index b167910684..2049a7d91f 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -26,8 +26,7 @@ use hotshot_types::{ const TIMEOUT: Duration = Duration::from_millis(35); #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_vote_task_success() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ @@ -36,8 +35,7 @@ async fn test_quorum_vote_task_success() { view_generator::TestViewGenerator, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await @@ -101,16 +99,14 @@ async fn test_quorum_vote_task_success() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_vote_task_miss_dependency() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ helpers::build_system_handle, predicates::event::exact, view_generator::TestViewGenerator, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await @@ -190,16 +186,14 @@ async fn test_quorum_vote_task_miss_dependency() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_vote_task_incorrect_dependency() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ helpers::build_system_handle, predicates::event::exact, view_generator::TestViewGenerator, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index b9834b98c8..d002257a7f 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -18,11 +18,9 @@ use hotshot_types::{ use vbs::version::StaticVersionType; #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_transaction_task_leader_two_views_in_a_row() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); // Build the API for node 2. let node_id = 2; diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index b82804f4f8..26f9e9b462 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -46,19 +46,14 @@ use vec1::vec1; const TIMEOUT: Duration = Duration::from_millis(35); -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] /// Test that we correctly form and include an `UpgradeCertificate` when receiving votes. async fn test_upgrade_task_with_proposal() { use std::sync::Arc; use hotshot_testing::helpers::build_system_handle; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(3) .await diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 658a10c386..af09061c56 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -38,15 +38,13 @@ use vbs::version::Version; const TIMEOUT: Duration = Duration::from_millis(65); -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] /// Tests that we correctly update our internal quorum vote state when reaching a decided upgrade /// certificate. async fn test_upgrade_task_with_vote() { use hotshot_testing::helpers::build_system_handle; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index c2ca9aec09..2ebff4dec7 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -33,13 +33,11 @@ use jf_vid::{precomputable::Precomputable, VidScheme}; use vbs::version::StaticVersionType; use vec1::vec1; -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_vid_task() { use hotshot_types::message::Proposal; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); // Build the API for node 2. let handle = build_system_handle::(2) diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index 145885a657..74a63cee82 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -16,11 +16,9 @@ use hotshot_types::{ }; #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_view_sync_task() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); // Build the API for node 5. let handle = build_system_handle::(5) diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 742ba10f53..b8b08658b5 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -1,7 +1,6 @@ use std::time::Duration; use async_broadcast::broadcast; -use async_compatibility_layer::art::async_timeout; use futures::StreamExt; use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_task::dependency_task::HandleDepOutput; @@ -18,17 +17,16 @@ use hotshot_types::{ vote::HasViewNumber, }; use itertools::Itertools; +use tokio::time::timeout; const TIMEOUT: Duration = Duration::from_millis(35); #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_vote_dependency_handle() { use std::sync::Arc; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); // We use a node ID of 2 here abitrarily. We just need it to build the system handle. let node_id = 2; @@ -111,9 +109,7 @@ async fn test_vote_dependency_handle() { // We need to avoid re-processing the inputs during our output evaluation. This part here is not // strictly necessary, but it makes writing the outputs easier. let mut output_events = vec![]; - while let Ok(Ok(received_output)) = - async_timeout(TIMEOUT, event_receiver.recv_direct()).await - { + while let Ok(Ok(received_output)) = timeout(TIMEOUT, event_receiver.recv_direct()).await { output_events.push(received_output); } diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index 3d4702a3ec..bf48a2558c 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -19,8 +19,7 @@ use hotshot_testing::{ }; #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_catchup() { use std::time::Duration; @@ -33,8 +32,8 @@ async fn test_catchup() { test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() @@ -79,8 +78,7 @@ async fn test_catchup() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_catchup_cdn() { use std::time::Duration; @@ -93,8 +91,8 @@ async fn test_catchup_cdn() { test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() @@ -134,8 +132,7 @@ async fn test_catchup_cdn() { /// Test that one node catches up and has successful views after coming back #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_catchup_one_node() { use std::time::Duration; @@ -147,8 +144,8 @@ async fn test_catchup_one_node() { spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() @@ -190,8 +187,7 @@ async fn test_catchup_one_node() { /// Same as `test_catchup` except we start the nodes after their leadership so they join during view sync #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_catchup_in_view_sync() { use std::time::Duration; @@ -203,8 +199,8 @@ async fn test_catchup_in_view_sync() { spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() @@ -253,8 +249,7 @@ async fn test_catchup_in_view_sync() { // Almost the same as `test_catchup`, but with catchup nodes reloaded from anchor leaf rather than // initialized from genesis. #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_catchup_reload() { use std::time::Duration; @@ -267,8 +262,8 @@ async fn test_catchup_reload() { test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 3e56630313..875e5b40e9 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -7,7 +7,6 @@ #![allow(clippy::panic)] use std::{sync::Arc, time::Duration}; -use async_compatibility_layer::{art::async_timeout, logging::setup_logging}; use hotshot::{ traits::{ election::static_committee::StaticCommittee, @@ -34,6 +33,7 @@ use hotshot_types::{ }; use rand::{rngs::StdRng, RngCore, SeedableRng}; use serde::{Deserialize, Serialize}; +use tokio::time::timeout; use tracing::{instrument, trace}; #[derive( @@ -120,22 +120,22 @@ fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec::SignatureKey>> = MasterMap::new(); trace!(?group); let _pub_key = pubkey(); } // // Spawning a two MemoryNetworks and connecting them should produce no errors -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn memory_network_spawn_double() { - setup_logging(); + hotshot::helpers::initialize_logging(); let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let _pub_key_1 = pubkey(); @@ -143,11 +143,11 @@ async fn memory_network_spawn_double() { } // Check to make sure direct queue works -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn memory_network_direct_queue() { - setup_logging(); + hotshot::helpers::initialize_logging(); // Create some dummy messages // Make and connect the networking instances @@ -177,11 +177,9 @@ async fn memory_network_direct_queue() { .await .expect("Failed to receive message"); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!( - async_timeout(Duration::from_secs(1), network2.recv_message()) - .await - .is_err() - ); + assert!(timeout(Duration::from_secs(1), network2.recv_message()) + .await + .is_err()); fake_message_eq(sent_message, deserialized_message); } @@ -200,18 +198,16 @@ async fn memory_network_direct_queue() { .await .expect("Failed to receive message"); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!( - async_timeout(Duration::from_secs(1), network1.recv_message()) - .await - .is_err() - ); + assert!(timeout(Duration::from_secs(1), network1.recv_message()) + .await + .is_err()); fake_message_eq(sent_message, deserialized_message); } } // Check to make sure direct queue works -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn memory_network_broadcast_queue() { // Make and connect the networking instances @@ -238,11 +234,9 @@ async fn memory_network_broadcast_queue() { .await .expect("Failed to receive message"); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!( - async_timeout(Duration::from_secs(1), network2.recv_message()) - .await - .is_err() - ); + assert!(timeout(Duration::from_secs(1), network2.recv_message()) + .await + .is_err()); fake_message_eq(sent_message, deserialized_message); } @@ -265,21 +259,18 @@ async fn memory_network_broadcast_queue() { .await .expect("Failed to receive message"); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!( - async_timeout(Duration::from_secs(1), network1.recv_message()) - .await - .is_err() - ); + assert!(timeout(Duration::from_secs(1), network1.recv_message()) + .await + .is_err()); fake_message_eq(sent_message, deserialized_message); } } -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] #[allow(deprecated)] async fn memory_network_test_in_flight_message_count() { - setup_logging(); + hotshot::helpers::initialize_logging(); let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); diff --git a/testing/tests/tests_5/broken_3_chain.rs b/testing/tests/tests_5/broken_3_chain.rs index 9d020cb144..e785e0ae42 100644 --- a/testing/tests/tests_5/broken_3_chain.rs +++ b/testing/tests/tests_5/broken_3_chain.rs @@ -12,12 +12,12 @@ use hotshot_testing::{ use tracing::instrument; /// Broken 3-chain test -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn broken_3_chain() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index 81222cd088..67fb0e4030 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -19,14 +19,13 @@ use tracing::instrument; /// A run with both the CDN and libp2p functioning properly #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_combined_network() { use hotshot_testing::block_builder::SimpleBuilderImplementation; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, @@ -54,12 +53,12 @@ async fn test_combined_network() { } // A run where the CDN crashes part-way through -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_combined_network_cdn_crash() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, @@ -100,12 +99,12 @@ async fn test_combined_network_cdn_crash() { // A run where the CDN crashes partway through // and then comes back up -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_combined_network_reup() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, @@ -151,12 +150,12 @@ async fn test_combined_network_reup() { } // A run where half of the nodes disconnect from the CDN -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_combined_network_half_dc() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, @@ -224,13 +223,13 @@ fn generate_random_node_changes( } // A fuzz test, where random network events take place on all nodes -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] #[ignore] async fn test_stress_combined_network_fuzzy() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { num_bootstrap_nodes: 10, num_nodes_with_stake: 20, diff --git a/testing/tests/tests_5/fake_solver.rs b/testing/tests/tests_5/fake_solver.rs index 27f1c91bb8..cfdd0a3b3b 100644 --- a/testing/tests/tests_5/fake_solver.rs +++ b/testing/tests/tests_5/fake_solver.rs @@ -1,4 +1,3 @@ -use async_compatibility_layer::art::async_spawn; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResult, node_types::TestTypes, }; @@ -9,12 +8,12 @@ use tracing::instrument; use url::Url; #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_fake_solver_fetch_non_permissioned_no_error() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + use tokio::spawn; + + hotshot::helpers::initialize_logging(); let solver_state = FakeSolverState::new( None, /* 0% error rate */ @@ -33,7 +32,7 @@ async fn test_fake_solver_fetch_non_permissioned_no_error() { .parse() .unwrap(); let url = solver_url.clone(); - let solver_handle = async_spawn(async move { + let solver_handle = spawn(async move { solver_state.run::(url).await.unwrap(); }); @@ -49,9 +48,6 @@ async fn test_fake_solver_fetch_non_permissioned_no_error() { .await .unwrap(); - #[cfg(async_executor_impl = "async-std")] - solver_handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] solver_handle.abort(); assert_eq!(resp.urls[0], Url::parse("http://localhost:1111/").unwrap()); @@ -60,12 +56,12 @@ async fn test_fake_solver_fetch_non_permissioned_no_error() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_fake_solver_fetch_non_permissioned_with_errors() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + use tokio::spawn; + + hotshot::helpers::initialize_logging(); let solver_state = FakeSolverState::new(Some(0.5), vec!["http://localhost:1111".parse().unwrap()]); @@ -78,7 +74,7 @@ async fn test_fake_solver_fetch_non_permissioned_with_errors() { .parse() .unwrap(); let url = solver_url.clone(); - let solver_handle = async_spawn(async move { + let solver_handle = spawn(async move { solver_state.run::(url).await.unwrap(); }); @@ -132,9 +128,6 @@ async fn test_fake_solver_fetch_non_permissioned_with_errors() { } } - #[cfg(async_executor_impl = "async-std")] - solver_handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] solver_handle.abort(); // Assert over the payloads with a 50% error rate. @@ -147,12 +140,12 @@ async fn test_fake_solver_fetch_non_permissioned_with_errors() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_fake_solver_fetch_permissioned_no_error() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + use tokio::spawn; + + hotshot::helpers::initialize_logging(); let solver_state = FakeSolverState::new( None, /* 0% error rate */ @@ -174,7 +167,7 @@ async fn test_fake_solver_fetch_permissioned_no_error() { .parse() .unwrap(); let url = solver_url.clone(); - let solver_handle = async_spawn(async move { + let solver_handle = spawn(async move { solver_state.run::(url).await.unwrap(); }); @@ -198,9 +191,6 @@ async fn test_fake_solver_fetch_permissioned_no_error() { .await .unwrap(); - #[cfg(async_executor_impl = "async-std")] - solver_handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] solver_handle.abort(); assert_eq!(resp.urls[0], Url::parse("http://localhost:1111/").unwrap()); @@ -209,12 +199,12 @@ async fn test_fake_solver_fetch_permissioned_no_error() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_fake_solver_fetch_permissioned_with_errors() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + use tokio::spawn; + + hotshot::helpers::initialize_logging(); let solver_state = FakeSolverState::new(Some(0.5), vec!["http://localhost:1111".parse().unwrap()]); @@ -230,7 +220,7 @@ async fn test_fake_solver_fetch_permissioned_with_errors() { .parse() .unwrap(); let url = solver_url.clone(); - let solver_handle = async_spawn(async move { + let solver_handle = spawn(async move { solver_state.run::(url).await.unwrap(); }); @@ -288,9 +278,6 @@ async fn test_fake_solver_fetch_permissioned_with_errors() { } } - #[cfg(async_executor_impl = "async-std")] - solver_handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] solver_handle.abort(); // Assert over the payloads with a 50% error rate. diff --git a/testing/tests/tests_5/push_cdn.rs b/testing/tests/tests_5/push_cdn.rs index 97e3dc0cb2..b90a038d99 100644 --- a/testing/tests/tests_5/push_cdn.rs +++ b/testing/tests/tests_5/push_cdn.rs @@ -6,7 +6,6 @@ use std::time::Duration; -use async_compatibility_layer::logging::shutdown_logging; use hotshot_example_types::node_types::{PushCdnImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, @@ -17,12 +16,12 @@ use hotshot_testing::{ use tracing::instrument; /// Push CDN network test -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn push_cdn_network() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, @@ -45,5 +44,4 @@ async fn push_cdn_network() { .launch() .run_test::() .await; - shutdown_logging(); } diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 9809ea7eb5..4466a4e2f6 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -5,8 +5,7 @@ // along with the HotShot repository. If not, see . #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_timeout() { use std::time::Duration; @@ -18,8 +17,8 @@ async fn test_timeout() { spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() @@ -62,8 +61,7 @@ async fn test_timeout() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[ignore] async fn test_timeout_libp2p() { use std::time::Duration; @@ -77,8 +75,8 @@ async fn test_timeout_libp2p() { test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index 0e4f611a42..8bc6e2d885 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -18,12 +18,11 @@ use hotshot_types::traits::network::{ }; use tracing::instrument; -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn libp2p_network_sync() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, @@ -49,8 +48,7 @@ async fn libp2p_network_sync() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_memory_network_sync() { use std::time::Duration; @@ -60,8 +58,8 @@ async fn test_memory_network_sync() { test_builder::TestDescription, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( @@ -82,13 +80,12 @@ async fn test_memory_network_sync() { .await; } -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[ignore] #[instrument] async fn libp2p_network_async() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, @@ -122,9 +119,8 @@ async fn libp2p_network_async() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[ignore] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_memory_network_async() { use std::time::Duration; @@ -134,8 +130,8 @@ async fn test_memory_network_async() { test_builder::TestDescription, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, @@ -169,8 +165,7 @@ async fn test_memory_network_async() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_memory_network_partially_sync() { use std::time::Duration; @@ -180,8 +175,8 @@ async fn test_memory_network_partially_sync() { test_builder::TestDescription, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 0, @@ -220,12 +215,11 @@ async fn test_memory_network_partially_sync() { .await; } -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn libp2p_network_partially_sync() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 0, @@ -261,9 +255,8 @@ async fn libp2p_network_partially_sync() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[ignore] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_memory_network_chaos() { use std::time::Duration; @@ -273,8 +266,8 @@ async fn test_memory_network_chaos() { test_builder::TestDescription, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( @@ -299,13 +292,12 @@ async fn test_memory_network_chaos() { .await; } -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[ignore] #[instrument] async fn libp2p_network_chaos() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, diff --git a/types/Cargo.toml b/types/Cargo.toml index f2f161c33c..0184255278 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -13,7 +13,6 @@ ark-ff = { workspace = true } ark-serialize = { workspace = true } ark-srs = { version = "0.3.1" } ark-std = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } @@ -57,16 +56,12 @@ serde_json = { workspace = true } surf-disco = { workspace = true } toml = { workspace = true } clap = { workspace = true } +tokio = { workspace = true } [features] gpu-vid = ["jf-vid/gpu-vid"] test-srs = ["jf-vid/test-srs"] -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] -tokio = { workspace = true } [lints] workspace = true diff --git a/types/src/data.rs b/types/src/data.rs index 38092e0969..11cd1ac358 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -18,8 +18,6 @@ use std::{ }; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::spawn_blocking; use bincode::Options; use committable::{Commitment, CommitmentBoundsArkless, Committable, RawCommitmentBuilder}; use derivative::Derivative; @@ -27,7 +25,6 @@ use jf_vid::{precomputable::Precomputable, VidDisperse as JfVidDisperse, VidSche use rand::Rng; use serde::{Deserialize, Serialize}; use thiserror::Error; -#[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; use tracing::error; use utils::anytrace::*; @@ -226,8 +223,6 @@ impl VidDisperse { ) .unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) }).await; - #[cfg(async_executor_impl = "tokio")] - // Tokio's JoinHandle's `Output` is `Result`, while in async-std it's just `T` // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); diff --git a/types/src/error.rs b/types/src/error.rs index 80c1baae8d..f6c25e376f 100644 --- a/types/src/error.rs +++ b/types/src/error.rs @@ -14,8 +14,6 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use crate::{data::Leaf, traits::node_implementation::NodeType}; -#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] -compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} /// Error type for `HotShot` #[derive(Debug, Error)] diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 07c1edd807..7d03f17594 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -8,14 +8,12 @@ //! //! Contains types and traits used by `HotShot` to abstract over network access -use async_compatibility_layer::art::async_sleep; use derivative::Derivative; use dyn_clone::DynClone; use futures::Future; use thiserror::Error; +use tokio::{sync::mpsc::error::TrySendError, time::sleep}; -#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] -compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use std::{ collections::HashMap, fmt::{Debug, Display}, @@ -25,7 +23,6 @@ use std::{ time::Duration, }; -use async_compatibility_layer::channel::TrySendError; use async_trait::async_trait; use futures::future::join_all; use rand::{ @@ -376,7 +373,7 @@ pub trait NetworkReliability: Debug + Sync + std::marker::Send + DynClone + 'sta } let closure = async move { if sample_keep { - async_sleep(delay).await; + sleep(delay).await; for msg in msgs { send_fn(msg).await; } From 076e5ed71215fb0c26fe36c6280a0845178051ba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 11:28:03 -0500 Subject: [PATCH 1291/1393] Bump thiserror from 1.0.68 to 2.0.0 in the all group (#3868) Bumps the all group with 1 update: [thiserror](https://github.com/dtolnay/thiserror). Updates `thiserror` from 1.0.68 to 2.0.0 - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/1.0.68...2.0.0) --- updated-dependencies: - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-major dependency-group: all ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- orchestrator/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 6dae01b771..557bb0ed5b 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -16,7 +16,7 @@ tracing = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } toml = { workspace = true } -thiserror = "1" +thiserror = "2" csv = "1" vbs = { workspace = true } vec1 = { workspace = true } From 6ad33115b4dfb00e1064ad236ad5ab8fed47a1b5 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 8 Nov 2024 12:33:30 -0800 Subject: [PATCH 1292/1393] Add builder transaction status API (#3860) * add tx_status_api endpoint * fmt * add PartialEq for TxnStat * upd hotshot * type * rename to status * new route name * rename to txn_status * fmt * remove api from v0_3 * change statuscode * rename error type --- builder-api/api/v0_1/submit.toml | 10 ++++++++++ builder-api/src/v0_1/builder.rs | 23 +++++++++++++++++++++++ builder-api/src/v0_1/data_source.rs | 7 ++++++- task-impls/src/builder.rs | 1 + testing/src/block_builder/simple.rs | 2 +- 5 files changed, 41 insertions(+), 2 deletions(-) diff --git a/builder-api/api/v0_1/submit.toml b/builder-api/api/v0_1/submit.toml index 929ec45854..71894f77be 100644 --- a/builder-api/api/v0_1/submit.toml +++ b/builder-api/api/v0_1/submit.toml @@ -44,3 +44,13 @@ Submit a list of transactions to builder's private mempool." Returns the corresponding list of transaction hashes """ + +[route.get_status] +PATH = ["status/:transaction_hash"] +METHOD = "GET" +":transaction_hash" = "TaggedBase64" +DOC = """ +Get the transaction's status. + +Returns "pending", "sequenced" or "rejected" with error. +""" \ No newline at end of file diff --git a/builder-api/src/v0_1/builder.rs b/builder-api/src/v0_1/builder.rs index ee70f6b34b..235e7c8606 100644 --- a/builder-api/src/v0_1/builder.rs +++ b/builder-api/src/v0_1/builder.rs @@ -50,6 +50,15 @@ pub enum BuildError { Error(String), } +/// Enum to keep track on status of a transaction +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] +pub enum TransactionStatus { + Pending, + Sequenced { leaf: u64 }, + Rejected { reason: String }, // Rejection reason is in the String format + Unknown, +} + #[derive(Clone, Debug, Error, Deserialize, Serialize)] pub enum Error { #[error("Error processing request: {0}")] @@ -70,6 +79,8 @@ pub enum Error { TxnSubmit(BuildError), #[error("Error getting builder address: {0}")] BuilderAddress(#[from] BuildError), + #[error("Error getting transaction status: {0}")] + TxnStat(BuildError), #[error("Custom error {status}: {message}")] Custom { message: String, status: StatusCode }, } @@ -95,6 +106,7 @@ impl tide_disco::error::Error for Error { Error::TxnSubmit { .. } => StatusCode::INTERNAL_SERVER_ERROR, Error::Custom { .. } => StatusCode::INTERNAL_SERVER_ERROR, Error::BuilderAddress { .. } => StatusCode::INTERNAL_SERVER_ERROR, + Error::TxnStat { .. } => StatusCode::INTERNAL_SERVER_ERROR, } } } @@ -239,6 +251,17 @@ where Ok(hashes) } .boxed() + })? + .at("get_status", |req: RequestParams, state| { + async move { + let tx = req + .body_auto::<::Transaction, Ver>(Ver::instance()) + .map_err(Error::TxnUnpack)?; + let hash = tx.commit(); + state.txn_status(hash).await.map_err(Error::TxnStat)?; + Ok(hash) + } + .boxed() })?; Ok(api) } diff --git a/builder-api/src/v0_1/data_source.rs b/builder-api/src/v0_1/data_source.rs index 1c7ee643bf..62f3703762 100644 --- a/builder-api/src/v0_1/data_source.rs +++ b/builder-api/src/v0_1/data_source.rs @@ -14,7 +14,7 @@ use hotshot_types::{ use super::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, - builder::BuildError, + builder::{BuildError, TransactionStatus}, }; #[async_trait] @@ -70,4 +70,9 @@ where &self, txns: Vec<::Transaction>, ) -> Result::Transaction>>, BuildError>; + + async fn txn_status( + &self, + txn_hash: Commitment<::Transaction>, + ) -> Result; } diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index fddcd7d845..330f5200d6 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -54,6 +54,7 @@ impl From for BuilderClientError { BuildError::Missing => Self::BlockMissing, BuildError::Error(message) => Self::Api(message), }, + BuilderApiError::TxnStat(source) => Self::Api(source.to_string()), } } } diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index 371ced1d47..28aabf26b2 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -24,8 +24,8 @@ use hotshot::{ types::{Event, EventType, SignatureKey}, }; use hotshot_builder_api::{ - v0_1, v0_1::{ + self, block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::{BuildError, Error, Options}, }, From c831067155ed2f562a56efb5073e8108286ae42d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 8 Nov 2024 17:29:02 -0500 Subject: [PATCH 1293/1393] Remove unused ValidateStateUpdate Event, Consolidate Shared state insertions (#3866) * Remove unused ValidateStateUpdate Event, consolidate map insertion * fix comments --- hotshot/src/lib.rs | 20 +--- task-impls/src/da.rs | 11 +-- task-impls/src/events.rs | 11 +-- task-impls/src/helpers.rs | 46 +++------ .../src/quorum_proposal_recv/handlers.rs | 26 +---- task-impls/src/quorum_vote/handlers.rs | 31 ++---- testing/src/predicates/event.rs | 11 --- .../tests_1/quorum_proposal_recv_task.rs | 50 ++-------- testing/tests/tests_1/quorum_proposal_task.rs | 98 +++---------------- testing/tests/tests_1/quorum_vote_task.rs | 36 +++---- .../tests_1/upgrade_task_with_proposal.rs | 38 ++----- .../tests/tests_1/upgrade_task_with_vote.rs | 20 ++-- .../tests/tests_1/vote_dependency_handle.rs | 18 ++-- types/src/consensus.rs | 45 ++++++++- 14 files changed, 146 insertions(+), 315 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e6b189d01c..e41120b0b1 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -360,7 +360,7 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> DaTaskState { UpgradeCertificateFormed(UpgradeCertificate), /* Consensus State Update Events */ - /// A undecided view has been created and added to the validated state storage. - ValidatedStateUpdated(TYPES::View, View), - /// A new locked view has been created (2-chain) LockedViewUpdated(TYPES::View), @@ -323,8 +320,7 @@ impl HotShotEvent { | HotShotEvent::Timeout(view_number) | HotShotEvent::BlockReady(_, view_number) | HotShotEvent::LockedViewUpdated(view_number) - | HotShotEvent::LastDecidedViewUpdated(view_number) - | HotShotEvent::ValidatedStateUpdated(view_number, _) => Some(*view_number), + | HotShotEvent::LastDecidedViewUpdated(view_number) => Some(*view_number), HotShotEvent::DaCertificateRecv(cert) | HotShotEvent::DacSend(cert, _) => { Some(cert.view_number()) } @@ -572,9 +568,6 @@ impl Display for HotShotEvent { proposal.data.view_number ) } - HotShotEvent::ValidatedStateUpdated(view_number, _) => { - write!(f, "ValidatedStateUpdated(view_number={view_number:?})") - } HotShotEvent::LockedViewUpdated(view_number) => { write!(f, "LockedViewUpdated(view_number={view_number:?})") } diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 229fcfa0a1..8fbfd6251d 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -136,6 +136,12 @@ pub(crate) async fn fetch_proposal( >::from_header(&proposal.data.block_header), ); + if let Err(e) = consensus_writer + .update_leaf(leaf.clone(), Arc::clone(&state), None, upgrade_lock) + .await + { + tracing::trace!("{e:?}"); + } let view = View { view_inner: ViewInner::Leaf { leaf: leaf.commit(upgrade_lock).await, @@ -143,19 +149,6 @@ pub(crate) async fn fetch_proposal( delta: None, }, }; - if let Err(e) = consensus_writer.update_validated_state_map(view_number, view.clone()) { - tracing::trace!("{e:?}"); - } - - consensus_writer - .update_saved_leaves(leaf.clone(), upgrade_lock) - .await; - - broadcast_event( - HotShotEvent::ValidatedStateUpdated(view_number, view.clone()).into(), - &event_sender, - ) - .await; Ok((leaf, view)) } @@ -450,22 +443,20 @@ pub async fn validate_proposal_safety_and_liveness< let state = Arc::new( >::from_header(&proposal.data.block_header), ); - let view = View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(&validation_info.upgrade_lock).await, - state, - delta: None, // May be updated to `Some` in the vote task. - }, - }; { let mut consensus_writer = validation_info.consensus.write().await; - if let Err(e) = consensus_writer.update_validated_state_map(view_number, view.clone()) { + if let Err(e) = consensus_writer + .update_leaf( + proposed_leaf.clone(), + state, + None, + &validation_info.upgrade_lock, + ) + .await + { tracing::trace!("{e:?}"); } - consensus_writer - .update_saved_leaves(proposed_leaf.clone(), &validation_info.upgrade_lock) - .await; // Update our internal storage of the proposal. The proposal is valid, so // we swallow this error and just log if it occurs. @@ -474,13 +465,6 @@ pub async fn validate_proposal_safety_and_liveness< }; } - // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. - broadcast_event( - Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), - &event_stream, - ) - .await; - let cur_epoch = validation_info.cur_epoch; UpgradeCertificate::validate( &proposal.data.upgrade_certificate, diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index d4abe5dc33..88b04d149f 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -44,10 +44,8 @@ use crate::{ #[instrument(skip_all)] async fn validate_proposal_liveness, V: Versions>( proposal: &Proposal>, - event_sender: &Sender>>, validation_info: &ValidationInfo, ) -> Result<()> { - let view_number = proposal.data.view_number(); let mut consensus_writer = validation_info.consensus.write().await; let leaf = Leaf::from_quorum_proposal(&proposal.data); @@ -55,20 +53,13 @@ async fn validate_proposal_liveness>::from_header(&proposal.data.block_header), ); - let view = View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(&validation_info.upgrade_lock).await, - state, - delta: None, // May be updated to `Some` in the vote task. - }, - }; - if let Err(e) = consensus_writer.update_validated_state_map(view_number, view.clone()) { + if let Err(e) = consensus_writer + .update_leaf(leaf.clone(), state, None, &validation_info.upgrade_lock) + .await + { tracing::trace!("{e:?}"); } - consensus_writer - .update_saved_leaves(leaf.clone(), &validation_info.upgrade_lock) - .await; if let Err(e) = validation_info .storage @@ -88,13 +79,6 @@ async fn validate_proposal_liveness() -> Box> -where - TYPES: NodeType, -{ - let info = "ValidatedStateUpdated".to_string(); - let check: EventCallback = Arc::new(move |e: Arc>| { - matches!(e.as_ref(), ValidatedStateUpdated(..)) - }); - Box::new(EventPredicate { check, info }) -} - pub fn vid_share_validated() -> Box> where TYPES: NodeType, diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index e2255d605b..6b82fa8e7f 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -25,6 +25,7 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; +use hotshot_types::data::Leaf; use hotshot_types::{ data::ViewNumber, request_response::ProposalRequestPayload, @@ -36,6 +37,8 @@ use hotshot_types::{ }, }; +use std::sync::Arc; + #[cfg(test)] #[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_recv_task() { @@ -45,7 +48,6 @@ async fn test_quorum_proposal_recv_task() { helpers::build_fake_view_with_leaf, script::{Expectations, TaskScript}, }; - use hotshot_types::data::Leaf; hotshot::helpers::initialize_logging(); @@ -72,19 +74,16 @@ async fn test_quorum_proposal_recv_task() { vids.push(view.vid_proposal.clone()); leaves.push(view.leaf.clone()); - // These are both updated when we vote. Since we don't have access + // This is updated when we vote. Since we don't have access // to that, we'll just put them in here. consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; - consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number, - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) + .await .unwrap(); } drop(consensus_writer); @@ -97,17 +96,6 @@ async fn test_quorum_proposal_recv_task() { let expectations = vec![Expectations::from_outputs(vec![ exact(QuorumProposalPreliminarilyValidated(proposals[1].clone())), exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), - exact(ValidatedStateUpdated( - ViewNumber::new(2), - build_fake_view_with_leaf_and_state( - leaves[1].clone(), - >::from_header( - &proposals[1].data.block_header, - ), - &handle.hotshot.upgrade_lock, - ) - .await, - )), exact(QuorumProposalValidated( proposals[1].clone(), leaves[0].clone(), @@ -169,17 +157,6 @@ async fn test_quorum_proposal_recv_task_liveness_check() { // there's no reason not to. let inserted_view_number = view.quorum_proposal.data.view_number(); - // These are both updated when we'd have voted previously. However, since - // we don't have access to that, we'll just put them in here. We - // specifically ignore writing the saved leaves so that way - // the parent lookup fails and we trigger a view liveness check. - consensus_writer - .update_validated_state_map( - inserted_view_number, - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) - .unwrap(); - // The index here is important. Since we're proposing for view 4, we need the // value from entry 2 to align the public key from the shares map. consensus_writer.update_vid_shares(inserted_view_number, view.vid_proposal.0[2].clone()); @@ -218,17 +195,6 @@ async fn test_quorum_proposal_recv_task_liveness_check() { let expectations = vec![Expectations::from_outputs(all_predicates![ exact(QuorumProposalPreliminarilyValidated(proposals[2].clone())), exact(ViewChange(ViewNumber::new(3))), - exact(ValidatedStateUpdated( - ViewNumber::new(3), - build_fake_view_with_leaf_and_state( - leaves[2].clone(), - >::from_header( - &proposals[2].data.block_header, - ), - &handle.hotshot.upgrade_lock - ) - .await, - )), exact(QuorumProposalRequestSend(req, signature)), exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), ])]; diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index d0fb03dee7..7b147269ab 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -7,7 +7,7 @@ use std::time::Duration; use futures::StreamExt; -use hotshot::{tasks::task_state::CreateTaskState, traits::ValidatedState}; +use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ block_types::TestMetadata, node_types::{MemoryImpl, TestTypes, TestVersions}, @@ -17,7 +17,7 @@ use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{events::HotShotEvent::*, quorum_proposal::QuorumProposalTaskState}; use hotshot_testing::{ all_predicates, - helpers::{build_fake_view_with_leaf, build_payload_commitment, build_system_handle}, + helpers::{build_payload_commitment, build_system_handle}, predicates::event::{all_predicates, exact, quorum_proposal_send}, random, script::{Expectations, InputOrder, TaskScript}, @@ -32,9 +32,9 @@ use hotshot_types::{ node_implementation::{ConsensusTime, Versions}, }, utils::BuilderCommitment, - vote::HasViewNumber, }; use sha2::Digest; +use std::sync::Arc; use vec1::vec1; const TIMEOUT: Duration = Duration::from_millis(35); @@ -79,11 +79,14 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; + .await + .unwrap(); } // We must send the genesis cert here to initialize hotshot successfully. @@ -113,10 +116,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { vec1![builder_fee.clone()], None, ), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, - ), ], ]; @@ -173,28 +172,19 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; - - consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) + .await .unwrap(); } // We need to handle the views where we aren't the leader to ensure that the states are // updated properly. - - let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); let genesis_cert = proposals[0].data.justify_qc.clone(); - let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; drop(consensus_writer); @@ -223,10 +213,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { None, ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), - ValidatedStateUpdated( - genesis_cert.view_number(), - build_fake_view_with_leaf(genesis_leaf.clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), @@ -244,10 +230,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[1].clone()), @@ -265,10 +247,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[2].clone()), @@ -286,10 +264,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { None, ), VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[2].data.view_number(), - build_fake_view_with_leaf(leaves[2].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[3].clone()), @@ -307,10 +281,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { None, ), VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[3].data.view_number(), - build_fake_view_with_leaf(leaves[3].clone(), &handle.hotshot.upgrade_lock).await, - ), ], ]; @@ -420,10 +390,6 @@ async fn test_quorum_proposal_task_qc_timeout() { None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, - ), ]]; let expectations = vec![Expectations::from_outputs(vec![quorum_proposal_send()])]; @@ -513,10 +479,6 @@ async fn test_quorum_proposal_task_view_sync() { None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, - ), ]]; let expectations = vec![Expectations::from_outputs(vec![quorum_proposal_send()])]; @@ -565,16 +527,13 @@ async fn test_quorum_proposal_task_liveness_check() { // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; - consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) + .await .unwrap(); } drop(consensus_writer); @@ -588,12 +547,7 @@ async fn test_quorum_proposal_task_liveness_check() { // We need to handle the views where we aren't the leader to ensure that the states are // updated properly. - - let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); let genesis_cert = proposals[0].data.justify_qc.clone(); - let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; let inputs = vec![ random![ @@ -613,10 +567,6 @@ async fn test_quorum_proposal_task_liveness_check() { None, ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), - ValidatedStateUpdated( - genesis_cert.view_number(), - build_fake_view_with_leaf(genesis_leaf.clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), @@ -634,10 +584,6 @@ async fn test_quorum_proposal_task_liveness_check() { None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[1].clone()), @@ -655,10 +601,6 @@ async fn test_quorum_proposal_task_liveness_check() { None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[2].clone()), @@ -676,10 +618,6 @@ async fn test_quorum_proposal_task_liveness_check() { None, ), VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[2].data.view_number(), - build_fake_view_with_leaf(leaves[2].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[3].clone()), @@ -697,10 +635,6 @@ async fn test_quorum_proposal_task_liveness_check() { None, ), VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[3].data.view_number(), - build_fake_view_with_leaf(leaves[3].clone(), &handle.hotshot.upgrade_lock).await, - ), ], ]; diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 2049a7d91f..eb3efcbc69 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -11,17 +11,18 @@ use std::time::Duration; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; +use hotshot_example_types::state_types::TestValidatedState; use hotshot_macros::{run_test, test_scripts}; use hotshot_testing::{ all_predicates, - helpers::{build_fake_view_with_leaf, vid_share}, + helpers::vid_share, predicates::event::all_predicates, random, script::{Expectations, InputOrder, TaskScript}, }; -use hotshot_types::{ - data::ViewNumber, traits::node_implementation::ConsensusTime, vote::HasViewNumber, -}; +use hotshot_types::data::Leaf; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use std::sync::Arc; const TIMEOUT: Duration = Duration::from_millis(35); @@ -31,7 +32,7 @@ async fn test_quorum_vote_task_success() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ helpers::build_system_handle, - predicates::event::{exact, quorum_vote_send, validated_state_updated}, + predicates::event::{exact, quorum_vote_send}, view_generator::TestViewGenerator, }; @@ -59,14 +60,14 @@ async fn test_quorum_vote_task_success() { dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, + .update_leaf( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, + &handle.hotshot.upgrade_lock, ) + .await .unwrap(); - consensus_writer - .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) - .await; } drop(consensus_writer); @@ -83,7 +84,6 @@ async fn test_quorum_vote_task_success() { exact(VidShareValidated(vids[1].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), exact(ViewChange(ViewNumber::new(3))), - validated_state_updated(), quorum_vote_send(), ])]; @@ -133,14 +133,14 @@ async fn test_quorum_vote_task_miss_dependency() { leaves.push(view.leaf.clone()); consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, + .update_leaf( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, + &handle.hotshot.upgrade_lock, ) + .await .unwrap(); - consensus_writer - .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) - .await; } drop(consensus_writer); diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 26f9e9b462..828b823ab0 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -95,16 +95,13 @@ async fn test_upgrade_task_with_proposal() { leaders.push(view.leader_public_key); views.push(view.clone()); consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; - consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) + .await .unwrap(); } @@ -119,25 +116,18 @@ async fn test_upgrade_task_with_proposal() { leaves.push(view.leaf.clone()); views.push(view.clone()); consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; - consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) + .await .unwrap(); } drop(consensus_writer); - let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); let genesis_cert = proposals[0].data.justify_qc.clone(); - let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), @@ -180,10 +170,6 @@ async fn test_upgrade_task_with_proposal() { None, ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), - ValidatedStateUpdated( - genesis_cert.view_number(), - build_fake_view_with_leaf(genesis_leaf.clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), @@ -201,10 +187,6 @@ async fn test_upgrade_task_with_proposal() { None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, - ), ], InputOrder::Random(upgrade_vote_recvs), random![ @@ -223,10 +205,6 @@ async fn test_upgrade_task_with_proposal() { None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, - ), ], ]; diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index af09061c56..8e0b10ff71 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -10,6 +10,7 @@ use std::time::Duration; use futures::StreamExt; use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; +use hotshot_example_types::state_types::TestValidatedState; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes, TestVersions}, @@ -28,14 +29,15 @@ use hotshot_testing::{ script::{Expectations, InputOrder, TaskScript}, view_generator::TestViewGenerator, }; +use hotshot_types::data::Leaf; use hotshot_types::{ data::{null_block, ViewNumber}, simple_vote::UpgradeProposalData, traits::{election::Membership, node_implementation::ConsensusTime}, vote::HasViewNumber, }; +use std::sync::Arc; use vbs::version::Version; - const TIMEOUT: Duration = Duration::from_millis(65); #[tokio::test(flavor = "multi_thread")] @@ -82,14 +84,14 @@ async fn test_upgrade_task_with_vote() { leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, + .update_leaf( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, + &handle.hotshot.upgrade_lock, ) + .await .unwrap(); - consensus_writer - .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) - .await; } drop(consensus_writer); @@ -137,7 +139,6 @@ async fn test_upgrade_task_with_vote() { exact(VidShareValidated(vids[1].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), exact(ViewChange(ViewNumber::new(3))), - validated_state_updated(), quorum_vote_send(), ]), Expectations::from_outputs_and_task_states( @@ -147,7 +148,6 @@ async fn test_upgrade_task_with_vote() { exact(VidShareValidated(vids[2].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(3))), exact(ViewChange(ViewNumber::new(4))), - validated_state_updated(), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], @@ -161,7 +161,6 @@ async fn test_upgrade_task_with_vote() { exact(VidShareValidated(vids[3].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(4))), exact(ViewChange(ViewNumber::new(5))), - validated_state_updated(), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], @@ -175,7 +174,6 @@ async fn test_upgrade_task_with_vote() { exact(VidShareValidated(vids[4].0[0].clone())), exact(QuorumVoteDependenciesValidated(ViewNumber::new(5))), exact(ViewChange(ViewNumber::new(6))), - validated_state_updated(), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index b8b08658b5..d58ca1b9c5 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -3,18 +3,19 @@ use std::time::Duration; use async_broadcast::broadcast; use futures::StreamExt; use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; +use hotshot_example_types::state_types::TestValidatedState; use hotshot_task::dependency_task::HandleDepOutput; use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::VoteDependencyHandle}; use hotshot_testing::{ - helpers::{build_fake_view_with_leaf, build_system_handle}, + helpers::build_system_handle, predicates::{event::*, Predicate, PredicateResult}, view_generator::TestViewGenerator, }; +use hotshot_types::data::Leaf; use hotshot_types::{ consensus::OuterConsensus, data::{EpochNumber, ViewNumber}, traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, - vote::HasViewNumber, }; use itertools::Itertools; use tokio::time::timeout; @@ -52,14 +53,14 @@ async fn test_vote_dependency_handle() { dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, + .update_leaf( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, + &handle.hotshot.upgrade_lock, ) + .await .unwrap(); - consensus_writer - .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) - .await; } drop(consensus_writer); @@ -79,7 +80,6 @@ async fn test_vote_dependency_handle() { let outputs = vec![ exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), exact(ViewChange(ViewNumber::new(3))), - validated_state_updated(), quorum_vote_send(), ]; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 28d488db1c..72964d3705 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -591,7 +591,48 @@ impl Consensus { /// # Errors /// Can return an error when the new view contains less information than the exisiting view /// with the same view number. - pub fn update_validated_state_map( + pub fn update_da_view( + &mut self, + view_number: TYPES::View, + payload_commitment: VidCommitment, + ) -> Result<()> { + let view = View { + view_inner: ViewInner::Da { payload_commitment }, + }; + self.update_validated_state_map(view_number, view) + } + + /// Update the validated state map with a new view_number/view combo. + /// + /// # Errors + /// Can return an error when the new view contains less information than the exisiting view + /// with the same view number. + pub async fn update_leaf( + &mut self, + leaf: Leaf, + state: Arc, + delta: Option>::Delta>>, + upgrade_lock: &UpgradeLock, + ) -> Result<()> { + let view_number = leaf.view_number(); + let view = View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(upgrade_lock).await, + state, + delta, + }, + }; + self.update_validated_state_map(view_number, view)?; + self.update_saved_leaves(leaf, upgrade_lock).await; + Ok(()) + } + + /// Update the validated state map with a new view_number/view combo. + /// + /// # Errors + /// Can return an error when the new view contains less information than the exisiting view + /// with the same view number. + fn update_validated_state_map( &mut self, view_number: TYPES::View, new_view: View, @@ -621,7 +662,7 @@ impl Consensus { } /// Update the saved leaves with a new leaf. - pub async fn update_saved_leaves( + async fn update_saved_leaves( &mut self, leaf: Leaf, upgrade_lock: &UpgradeLock, From 40cec2d2da87eae5b55174f56bd3cb3fb024278d Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 11 Nov 2024 14:13:55 -0500 Subject: [PATCH 1294/1393] prevent subscribing twice (#3875) --- hotshot/src/helpers.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hotshot/src/helpers.rs b/hotshot/src/helpers.rs index d2a9ae33f5..e685e26cdd 100644 --- a/hotshot/src/helpers.rs +++ b/hotshot/src/helpers.rs @@ -21,15 +21,15 @@ pub fn initialize_logging() { // Conditionally initialize in `json` mode if std::env::var("RUST_LOG_FORMAT") == Ok("json".to_string()) { - tracing_subscriber::fmt() + let _ = tracing_subscriber::fmt() .with_env_filter(EnvFilter::from_default_env()) .with_span_events(span_event_filter) .json() - .init(); + .try_init(); } else { - tracing_subscriber::fmt() + let _ = tracing_subscriber::fmt() .with_env_filter(EnvFilter::from_default_env()) .with_span_events(span_event_filter) - .init(); + .try_init(); }; } From 66aabcdef87ccd5ffec06ec23fb3e921fd4ebe27 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 11 Nov 2024 12:53:06 -0800 Subject: [PATCH 1295/1393] [Builder Stake Table] - Use the new `num_nodes` endpoint in simple builder (#3876) * Update the use of endpoints * fmt --- examples/combined/all.rs | 2 +- examples/push-cdn/all.rs | 2 +- hotshot/src/lib.rs | 2 +- libp2p-networking/src/network/mod.rs | 2 +- task-impls/src/network.rs | 3 +-- task-impls/src/transactions.rs | 22 ++++++++++++--- task-impls/src/view_sync.rs | 3 +-- testing/src/block_builder/mod.rs | 7 ++--- testing/src/block_builder/random.rs | 27 +++++++++++-------- testing/src/block_builder/simple.rs | 15 ++++++----- testing/src/completion_task.rs | 3 +-- testing/src/test_runner.rs | 3 +-- testing/src/test_task.rs | 2 +- testing/src/txn_task.rs | 3 +-- .../tests_1/quorum_proposal_recv_task.rs | 7 +++-- testing/tests/tests_1/quorum_proposal_task.rs | 3 +-- testing/tests/tests_1/quorum_vote_task.rs | 15 ++++++----- .../tests/tests_1/upgrade_task_with_vote.rs | 9 +++---- .../tests/tests_1/vote_dependency_handle.rs | 9 ++++--- types/src/traits/network.rs | 12 ++++----- 20 files changed, 83 insertions(+), 68 deletions(-) diff --git a/examples/combined/all.rs b/examples/combined/all.rs index 1ccace8b48..ebfd828b0d 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -12,8 +12,8 @@ use std::path::Path; use cdn_broker::Broker; use cdn_marshal::Marshal; -use hotshot::helpers::initialize_logging; use hotshot::{ + helpers::initialize_logging, traits::implementations::{KeyPair, TestingDef, WrappedSignatureKey}, types::SignatureKey, }; diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 726f41b22e..2767bbdaa3 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -12,8 +12,8 @@ use std::path::Path; use cdn_broker::{reexports::crypto::signature::KeyPair, Broker}; use cdn_marshal::Marshal; -use hotshot::helpers::initialize_logging; use hotshot::{ + helpers::initialize_logging, traits::implementations::{TestingDef, WrappedSignatureKey}, types::SignatureKey, }; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e41120b0b1..b507a0a129 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -42,7 +42,6 @@ use async_trait::async_trait; use futures::join; use hotshot_task::task::{ConsensusTaskRegistry, NetworkTaskRegistry}; use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; -use tokio::{spawn, time::sleep}; // Internal /// Reexport error type pub use hotshot_types::error::HotShotError; @@ -68,6 +67,7 @@ use hotshot_types::{ // External /// Reexport rand crate pub use rand; +use tokio::{spawn, time::sleep}; use tracing::{debug, instrument, trace}; use crate::{ diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 465e8ee25e..21a2811bb8 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -20,10 +20,10 @@ use std::{collections::HashSet, fmt::Debug}; use futures::channel::oneshot::Sender; use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType}; -use libp2p::dns::tokio::Transport as DnsTransport; use libp2p::{ build_multiaddr, core::{muxing::StreamMuxerBox, transport::Boxed}, + dns::tokio::Transport as DnsTransport, gossipsub::Event as GossipEvent, identify::Event as IdentifyEvent, identity::Keypair, diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 49ae29e964..4ffab75b68 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -33,8 +33,7 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; -use tokio::spawn; -use tokio::task::JoinHandle; +use tokio::{spawn, task::JoinHandle}; use tracing::instrument; use utils::anytrace::*; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 2a9d7f1ab6..7d2090f9d5 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -706,6 +706,13 @@ impl, V: Versions> TransactionTask bail!("No available blocks"); } + let version = match self.upgrade_lock.version(view_number).await { + Ok(v) => v, + Err(err) => { + bail!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); + } + }; + for (block_info, builder_idx) in available_blocks { // Verify signature over chosen block. if !block_info.sender.validate_block_info_signature( @@ -732,9 +739,18 @@ impl, V: Versions> TransactionTask let response = { let client = &self.builder_clients[builder_idx]; - let (block, header_input) = futures::join! { - client.claim_block(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature), - client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) + // If epochs are supported, provide the latest `num_nodes` information to the + // builder for VID computation. + let (block, header_input) = if version >= V::Epochs::VERSION { + futures::join! { + client.claim_block_with_num_nodes(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature, self.membership.total_nodes(self.cur_epoch)) , + client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) + } + } else { + futures::join! { + client.claim_block(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature), + client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) + } }; let block_data = match block { diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 4b810196da..895c321983 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -31,8 +31,7 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber, Vote}, }; -use tokio::task::JoinHandle; -use tokio::{spawn, time::sleep}; +use tokio::{spawn, task::JoinHandle, time::sleep}; use tracing::instrument; use utils::anytrace::*; diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index 5dcde74d37..a488b96dab 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -4,9 +4,10 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; use async_broadcast::Receiver; +use async_lock::RwLock; use async_trait::async_trait; use futures::Stream; use hotshot::{traits::BlockPayload, types::Event}; @@ -168,7 +169,7 @@ pub fn run_builder_source_0_1( /// Helper function to construct all builder data structures from a list of transactions async fn build_block( transactions: Vec, - num_storage_nodes: usize, + num_storage_nodes: Arc>, pub_key: TYPES::BuilderSignatureKey, priv_key: ::BuilderPrivateKey, ) -> BlockEntry @@ -186,7 +187,7 @@ where let commitment = block_payload.builder_commitment(&metadata); let (vid_commitment, precompute_data) = - precompute_vid_commitment(&block_payload.encode(), num_storage_nodes); + precompute_vid_commitment(&block_payload.encode(), *num_storage_nodes.read_arc().await); // Get block size from the encoded payload let block_size = block_payload.encode().len() as u64; diff --git a/testing/src/block_builder/random.rs b/testing/src/block_builder/random.rs index da25cf2b9c..5f14578a5e 100644 --- a/testing/src/block_builder/random.rs +++ b/testing/src/block_builder/random.rs @@ -46,7 +46,7 @@ pub struct RandomBuilderImplementation; impl RandomBuilderImplementation { pub async fn create>( - num_storage_nodes: usize, + num_nodes: usize, config: RandomBuilderConfig, changes: HashMap, change_sender: Sender, @@ -57,15 +57,17 @@ impl RandomBuilderImplementation { let (pub_key, priv_key) = TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); let blocks = Arc::new(RwLock::new(LruCache::new(NonZeroUsize::new(256).unwrap()))); + let num_nodes = Arc::new(RwLock::new(num_nodes)); let source = RandomBuilderSource { blocks: Arc::clone(&blocks), pub_key: pub_key.clone(), + num_nodes: num_nodes.clone(), should_fail_claims: Arc::new(AtomicBool::new(false)), }; let task = RandomBuilderTask { blocks, config, - num_storage_nodes, + num_nodes: num_nodes.clone(), changes, change_sender, pub_key, @@ -84,21 +86,21 @@ where type Config = RandomBuilderConfig; async fn start( - num_storage_nodes: usize, + num_nodes: usize, url: Url, config: RandomBuilderConfig, changes: HashMap, ) -> Box> { let (change_sender, change_receiver) = broadcast(128); - let (task, source) = Self::create(num_storage_nodes, config, changes, change_sender).await; + let (task, source) = Self::create(num_nodes, config, changes, change_sender).await; run_builder_source_0_1(url, change_receiver, source); Box::new(task) } } pub struct RandomBuilderTask> { - num_storage_nodes: usize, + num_nodes: Arc>, config: RandomBuilderConfig, changes: HashMap, change_sender: Sender, @@ -110,7 +112,7 @@ pub struct RandomBuilderTask> { impl> RandomBuilderTask { async fn build_blocks( options: RandomBuilderConfig, - num_storage_nodes: usize, + num_nodes: Arc>, pub_key: ::BuilderSignatureKey, priv_key: <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, blocks: Arc>>>, @@ -136,7 +138,7 @@ impl> RandomBuilderTask { let block = build_block( transactions, - num_storage_nodes, + num_nodes.clone(), pub_key.clone(), priv_key.clone(), ) @@ -171,7 +173,7 @@ where ) { let mut task = Some(spawn(Self::build_blocks( self.config.clone(), - self.num_storage_nodes, + self.num_nodes.clone(), self.pub_key.clone(), self.priv_key.clone(), self.blocks.clone(), @@ -191,7 +193,7 @@ where if task.is_none() { task = Some(spawn(Self::build_blocks( self.config.clone(), - self.num_storage_nodes, + self.num_nodes.clone(), self.pub_key.clone(), self.priv_key.clone(), self.blocks.clone(), @@ -229,6 +231,7 @@ pub struct RandomBuilderSource { >, >, pub_key: TYPES::BuilderSignatureKey, + num_nodes: Arc>, should_fail_claims: Arc, } @@ -240,10 +243,11 @@ where /// Create new [`RandomBuilderSource`] #[must_use] #[allow(clippy::missing_panics_doc)] // ony panics if 256 == 0 - pub fn new(pub_key: TYPES::BuilderSignatureKey) -> Self { + pub fn new(pub_key: TYPES::BuilderSignatureKey, num_nodes: Arc>) -> Self { Self { blocks: Arc::new(RwLock::new(LruCache::new(NonZeroUsize::new(256).unwrap()))), pub_key, + num_nodes, should_fail_claims: Arc::new(AtomicBool::new(false)), } } @@ -307,8 +311,9 @@ impl BuilderDataSource for RandomBuilderSource { view_number: u64, sender: TYPES::SignatureKey, signature: &::PureAssembledSignatureType, - _num_nodes: usize, + num_nodes: usize, ) -> Result, BuildError> { + *self.num_nodes.write().await = num_nodes; self.claim_block(block_hash, view_number, sender, signature) .await } diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index 28aabf26b2..e490d8c598 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -54,7 +54,7 @@ pub struct SimpleBuilderImplementation; impl SimpleBuilderImplementation { pub async fn create( - num_storage_nodes: usize, + num_nodes: usize, changes: HashMap, change_sender: Sender, ) -> (SimpleBuilderSource, SimpleBuilderTask) { @@ -70,7 +70,7 @@ impl SimpleBuilderImplementation { priv_key, transactions: transactions.clone(), blocks: blocks.clone(), - num_storage_nodes, + num_nodes: Arc::new(RwLock::new(num_nodes)), should_fail_claims: Arc::clone(&should_fail_claims), }; @@ -95,13 +95,13 @@ where type Config = (); async fn start( - num_storage_nodes: usize, + num_nodes: usize, url: Url, _config: Self::Config, changes: HashMap, ) -> Box> { let (change_sender, change_receiver) = broadcast(128); - let (source, task) = Self::create(num_storage_nodes, changes, change_sender).await; + let (source, task) = Self::create(num_nodes, changes, change_sender).await; run_builder_source(url, change_receiver, source); Box::new(task) @@ -112,7 +112,7 @@ where pub struct SimpleBuilderSource { pub_key: TYPES::BuilderSignatureKey, priv_key: ::BuilderPrivateKey, - num_storage_nodes: usize, + num_nodes: Arc>, #[allow(clippy::type_complexity)] transactions: Arc, SubmittedTransaction>>>, blocks: Arc>>>, @@ -248,7 +248,7 @@ where let block_entry = build_block( transactions, - self.num_storage_nodes, + self.num_nodes.clone(), self.pub_key.clone(), self.priv_key.clone(), ) @@ -303,8 +303,9 @@ where view_number: u64, sender: TYPES::SignatureKey, signature: &::PureAssembledSignatureType, - _num_nodes: usize, + num_nodes: usize, ) -> Result, BuildError> { + *self.num_nodes.write().await = num_nodes; self.claim_block(block_hash, view_number, sender, signature) .await } diff --git a/testing/src/completion_task.rs b/testing/src/completion_task.rs index 96de8616e9..5806f8d1c8 100644 --- a/testing/src/completion_task.rs +++ b/testing/src/completion_task.rs @@ -8,8 +8,7 @@ use std::time::Duration; use async_broadcast::{Receiver, Sender}; use hotshot_task_impls::helpers::broadcast_event; -use tokio::task::JoinHandle; -use tokio::{spawn, time::timeout}; +use tokio::{spawn, task::JoinHandle, time::timeout}; use crate::test_task::TestEvent; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 1b55a2b841..2a08fda5e4 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -40,8 +40,7 @@ use hotshot_types::{ HotShotConfig, ValidatorConfig, }; use tide_disco::Url; -use tokio::spawn; -use tokio::task::JoinHandle; +use tokio::{spawn, task::JoinHandle}; #[allow(deprecated)] use tracing::info; diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index 036b0d6b5d..2791646cba 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -23,9 +23,9 @@ use hotshot_types::{ node_implementation::{NodeType, Versions}, }, }; -use tokio::task::JoinHandle; use tokio::{ spawn, + task::JoinHandle, time::{sleep, timeout}, }; use tracing::error; diff --git a/testing/src/txn_task.rs b/testing/src/txn_task.rs index 5d62d4232d..41b5ec3b14 100644 --- a/testing/src/txn_task.rs +++ b/testing/src/txn_task.rs @@ -11,8 +11,7 @@ use async_lock::RwLock; use hotshot::traits::TestableNodeImplementation; use hotshot_types::traits::node_implementation::{NodeType, Versions}; use rand::thread_rng; -use tokio::task::JoinHandle; -use tokio::{spawn, time::sleep}; +use tokio::{spawn, task::JoinHandle, time::sleep}; use crate::{test_runner::Node, test_task::TestEvent}; diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 6b82fa8e7f..cb216376d7 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -7,6 +7,8 @@ // TODO: Remove after integration #![allow(unused_imports)] +use std::sync::Arc; + use committable::Committable; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; @@ -25,9 +27,8 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; -use hotshot_types::data::Leaf; use hotshot_types::{ - data::ViewNumber, + data::{Leaf, ViewNumber}, request_response::ProposalRequestPayload, traits::{ consensus_api::ConsensusApi, @@ -37,8 +38,6 @@ use hotshot_types::{ }, }; -use std::sync::Arc; - #[cfg(test)] #[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_recv_task() { diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 7b147269ab..26fe07ab61 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; @@ -34,7 +34,6 @@ use hotshot_types::{ utils::BuilderCommitment, }; use sha2::Digest; -use std::sync::Arc; use vec1::vec1; const TIMEOUT: Duration = Duration::from_millis(35); diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index eb3efcbc69..9735d07b74 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -6,12 +6,14 @@ #![allow(clippy::panic)] -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; -use hotshot_example_types::state_types::TestValidatedState; +use hotshot_example_types::{ + node_types::{MemoryImpl, TestTypes, TestVersions}, + state_types::TestValidatedState, +}; use hotshot_macros::{run_test, test_scripts}; use hotshot_testing::{ all_predicates, @@ -20,9 +22,10 @@ use hotshot_testing::{ random, script::{Expectations, InputOrder, TaskScript}, }; -use hotshot_types::data::Leaf; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; -use std::sync::Arc; +use hotshot_types::{ + data::{Leaf, ViewNumber}, + traits::node_implementation::ConsensusTime, +}; const TIMEOUT: Duration = Duration::from_millis(35); diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 8e0b10ff71..eaaa70fc94 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -6,15 +6,14 @@ #![allow(unused_imports)] -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use futures::StreamExt; use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; -use hotshot_example_types::state_types::TestValidatedState; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes, TestVersions}, - state_types::TestInstanceState, + state_types::{TestInstanceState, TestValidatedState}, }; use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ @@ -29,14 +28,12 @@ use hotshot_testing::{ script::{Expectations, InputOrder, TaskScript}, view_generator::TestViewGenerator, }; -use hotshot_types::data::Leaf; use hotshot_types::{ - data::{null_block, ViewNumber}, + data::{null_block, Leaf, ViewNumber}, simple_vote::UpgradeProposalData, traits::{election::Membership, node_implementation::ConsensusTime}, vote::HasViewNumber, }; -use std::sync::Arc; use vbs::version::Version; const TIMEOUT: Duration = Duration::from_millis(65); diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index d58ca1b9c5..35fd7200af 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -2,8 +2,10 @@ use std::time::Duration; use async_broadcast::broadcast; use futures::StreamExt; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; -use hotshot_example_types::state_types::TestValidatedState; +use hotshot_example_types::{ + node_types::{MemoryImpl, TestTypes, TestVersions}, + state_types::TestValidatedState, +}; use hotshot_task::dependency_task::HandleDepOutput; use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::VoteDependencyHandle}; use hotshot_testing::{ @@ -11,10 +13,9 @@ use hotshot_testing::{ predicates::{event::*, Predicate, PredicateResult}, view_generator::TestViewGenerator, }; -use hotshot_types::data::Leaf; use hotshot_types::{ consensus::OuterConsensus, - data::{EpochNumber, ViewNumber}, + data::{EpochNumber, Leaf, ViewNumber}, traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, }; use itertools::Itertools; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 7d03f17594..5a01560832 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -8,12 +8,6 @@ //! //! Contains types and traits used by `HotShot` to abstract over network access -use derivative::Derivative; -use dyn_clone::DynClone; -use futures::Future; -use thiserror::Error; -use tokio::{sync::mpsc::error::TrySendError, time::sleep}; - use std::{ collections::HashMap, fmt::{Debug, Display}, @@ -24,12 +18,16 @@ use std::{ }; use async_trait::async_trait; -use futures::future::join_all; +use derivative::Derivative; +use dyn_clone::DynClone; +use futures::{future::join_all, Future}; use rand::{ distributions::{Bernoulli, Uniform}, prelude::Distribution, }; use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tokio::{sync::mpsc::error::TrySendError, time::sleep}; use super::{node_implementation::NodeType, signature_key::SignatureKey}; use crate::{ From 4d1ddfef5042d6acf1de8a148ff7114898b6c906 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:20:40 -0500 Subject: [PATCH 1296/1393] synchronously drop tasks (#3883) --- task-impls/src/consensus/handlers.rs | 10 ++-------- task-impls/src/consensus/mod.rs | 10 +++------- task-impls/src/da.rs | 2 +- task-impls/src/helpers.rs | 7 +------ task-impls/src/network.rs | 16 ++++++++-------- task-impls/src/quorum_proposal/mod.rs | 19 +++++++------------ task-impls/src/quorum_proposal_recv/mod.rs | 11 +++++------ task-impls/src/quorum_vote/mod.rs | 10 +++++----- task-impls/src/request.rs | 4 ++-- task-impls/src/rewind.rs | 2 +- task-impls/src/transactions.rs | 2 +- task-impls/src/upgrade.rs | 2 +- task-impls/src/vid.rs | 2 +- task-impls/src/view_sync.rs | 14 +++++++------- task/src/task.rs | 6 +++--- 15 files changed, 48 insertions(+), 69 deletions(-) diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 41db9a2e13..50a6e4ba2a 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -23,9 +23,7 @@ use utils::anytrace::*; use super::ConsensusTaskState; use crate::{ - consensus::Versions, - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, + consensus::Versions, events::HotShotEvent, helpers::broadcast_event, vote_collection::handle_vote, }; @@ -170,11 +168,7 @@ pub(crate) async fn handle_view_change< }); // Cancel the old timeout task - cancel_task(std::mem::replace( - &mut task_state.timeout_task, - new_timeout_task, - )) - .await; + std::mem::replace(&mut task_state.timeout_task, new_timeout_task).abort(); let consensus_reader = task_state.consensus.read().await; consensus_reader diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 2de584199a..5490ecad8f 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -28,7 +28,7 @@ use utils::anytrace::Result; use self::handlers::{ handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, }; -use crate::{events::HotShotEvent, helpers::cancel_task, vote_collection::VoteCollectorsMap}; +use crate::{events::HotShotEvent, vote_collection::VoteCollectorsMap}; /// Event handlers for use in the `handle` method. mod handlers; @@ -167,12 +167,8 @@ impl, V: Versions> TaskState } /// Joins all subtasks. - async fn cancel_subtasks(&mut self) { + fn cancel_subtasks(&mut self) { // Cancel the old timeout task - cancel_task(std::mem::replace( - &mut self.timeout_task, - tokio::spawn(async {}), - )) - .await; + std::mem::replace(&mut self.timeout_task, tokio::spawn(async {})).abort(); } } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 50469b9809..33e36c04f0 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -359,5 +359,5 @@ impl, V: Versions> TaskState self.handle(event, sender.clone()).await } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 8fbfd6251d..d66bf44abf 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -30,7 +30,7 @@ use hotshot_types::{ utils::{Terminator, View, ViewInner}, vote::{Certificate, HasViewNumber}, }; -use tokio::{task::JoinHandle, time::timeout}; +use tokio::time::timeout; use tracing::instrument; use utils::anytrace::*; @@ -640,11 +640,6 @@ pub(crate) async fn validate_proposal_view_and_certs< Ok(()) } -/// Cancel a task -pub async fn cancel_task(task: JoinHandle) { - task.abort(); -} - /// Helper function to send events and log errors pub async fn broadcast_event(event: E, sender: &Sender) { match sender.broadcast_direct(event).await { diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 4ffab75b68..22ce352a35 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -12,7 +12,6 @@ use std::{ use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; -use futures::future::join_all; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::Consensus, @@ -39,7 +38,7 @@ use utils::anytrace::*; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, - helpers::{broadcast_event, cancel_task}, + helpers::broadcast_event, }; /// the network message task state @@ -232,7 +231,7 @@ impl< Ok(()) } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } impl< @@ -340,13 +339,14 @@ impl< /// Cancel all tasks for previous views pub fn cancel_tasks(&mut self, view: TYPES::View) { let keep = self.transmit_tasks.split_off(&view); - let mut cancel = Vec::new(); + while let Some((_, tasks)) = self.transmit_tasks.pop_first() { - let mut to_cancel = tasks.into_iter().map(cancel_task).collect(); - cancel.append(&mut to_cancel); + for task in tasks { + task.abort(); + } } + self.transmit_tasks = keep; - spawn(async move { join_all(cancel).await }); } /// Parses a `HotShotEvent` and returns a tuple of: (sender's public key, `MessageKind`, `TransmitType`) @@ -801,7 +801,7 @@ pub mod test { Ok(()) } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } impl< diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 1cc857f335..66f149abcc 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -10,7 +10,6 @@ use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use either::Either; -use futures::future::join_all; use hotshot_task::{ dependency::{AndDependency, EventDependency, OrDependency}, dependency_task::DependencyTask, @@ -34,10 +33,7 @@ use tracing::instrument; use utils::anytrace::*; use self::handlers::{ProposalDependency, ProposalDependencyHandle}; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; +use crate::{events::HotShotEvent, helpers::broadcast_event}; mod handlers; @@ -350,7 +346,7 @@ impl, V: Versions> for view in (*self.latest_proposed_view + 1)..=(*new_view) { if let Some(dependency) = self.proposal_dependencies.remove(&TYPES::View::new(view)) { - cancel_task(dependency).await; + dependency.abort(); } } @@ -527,21 +523,20 @@ impl, V: Versions> )?; } HotShotEvent::ViewChange(view) | HotShotEvent::Timeout(view) => { - self.cancel_tasks(*view).await; + self.cancel_tasks(*view); } _ => {} } Ok(()) } + /// Cancel all tasks the consensus tasks has spawned before the given view - pub async fn cancel_tasks(&mut self, view: TYPES::View) { + pub fn cancel_tasks(&mut self, view: TYPES::View) { let keep = self.proposal_dependencies.split_off(&view); - let mut cancel = Vec::new(); while let Some((_, task)) = self.proposal_dependencies.pop_first() { - cancel.push(cancel_task(task)); + task.abort(); } self.proposal_dependencies = keep; - join_all(cancel).await; } } @@ -560,7 +555,7 @@ impl, V: Versions> TaskState self.handle(event, receiver.clone(), sender.clone()).await } - async fn cancel_subtasks(&mut self) { + fn cancel_subtasks(&mut self) { while let Some((_, handle)) = self.proposal_dependencies.pop_first() { handle.abort(); } diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index f228edbc87..fc729596f0 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -33,7 +33,7 @@ use vbs::version::Version; use self::handlers::handle_quorum_proposal_recv; use crate::{ events::{HotShotEvent, ProposalMissing}, - helpers::{broadcast_event, cancel_task, parent_leaf_and_state}, + helpers::{broadcast_event, parent_leaf_and_state}, }; /// Event handlers for this task. mod handlers; @@ -108,13 +108,12 @@ impl, V: Versions> /// Cancel all tasks the consensus tasks has spawned before the given view pub fn cancel_tasks(&mut self, view: TYPES::View) { let keep = self.spawned_tasks.split_off(&view); - let mut cancel = Vec::new(); while let Some((_, tasks)) = self.spawned_tasks.pop_first() { - let mut to_cancel = tasks.into_iter().map(cancel_task).collect(); - cancel.append(&mut to_cancel); + for task in tasks { + task.abort(); + } } self.spawned_tasks = keep; - tokio::spawn(async move { join_all(cancel).await }); } /// Handles all consensus events relating to propose and vote-enabling events. @@ -192,7 +191,7 @@ impl, V: Versions> TaskState Ok(()) } - async fn cancel_subtasks(&mut self) { + fn cancel_subtasks(&mut self) { while !self.spawned_tasks.is_empty() { let Some((_, handles)) = self.spawned_tasks.pop_first() else { break; diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 563fa6fa41..90a8ea44a5 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -37,7 +37,7 @@ use vbs::version::StaticVersionType; use crate::{ events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, + helpers::broadcast_event, quorum_vote::handlers::{handle_quorum_proposal_validated, submit_vote, update_shared_state}, }; @@ -395,7 +395,7 @@ impl, V: Versions> QuorumVoteTaskS // Cancel the old dependency tasks. for view in *self.latest_voted_view..(*new_view) { if let Some(dependency) = self.vote_dependencies.remove(&TYPES::View::new(view)) { - cancel_task(dependency).await; + dependency.abort(); tracing::debug!("Vote dependency removed for view {:?}", view); } } @@ -578,7 +578,7 @@ impl, V: Versions> QuorumVoteTaskS // cancel old tasks let current_tasks = self.vote_dependencies.split_off(&view); while let Some((_, task)) = self.vote_dependencies.pop_last() { - cancel_task(task).await; + task.abort(); } self.vote_dependencies = current_tasks; } @@ -587,7 +587,7 @@ impl, V: Versions> QuorumVoteTaskS // cancel old tasks let current_tasks = self.vote_dependencies.split_off(&view); while let Some((_, task)) = self.vote_dependencies.pop_last() { - cancel_task(task).await; + task.abort(); } self.vote_dependencies = current_tasks; } @@ -720,7 +720,7 @@ impl, V: Versions> TaskState self.handle(event, receiver.clone(), sender.clone()).await } - async fn cancel_subtasks(&mut self) { + fn cancel_subtasks(&mut self) { while let Some((_, handle)) = self.vote_dependencies.pop_last() { handle.abort(); } diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 6674ad3e2b..23646c447e 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -75,7 +75,7 @@ pub struct NetworkRequestState> { impl> Drop for NetworkRequestState { fn drop(&mut self) { - futures::executor::block_on(async move { self.cancel_subtasks().await }); + self.cancel_subtasks(); } } @@ -123,7 +123,7 @@ impl> TaskState for NetworkRequest } } - async fn cancel_subtasks(&mut self) { + fn cancel_subtasks(&mut self) { self.shutdown_flag.store(true, Ordering::Relaxed); while !self.spawned_tasks.is_empty() { diff --git a/task-impls/src/rewind.rs b/task-impls/src/rewind.rs index 9ae424b62b..4f62359aeb 100644 --- a/task-impls/src/rewind.rs +++ b/task-impls/src/rewind.rs @@ -45,7 +45,7 @@ impl TaskState for RewindTaskState { Ok(()) } - async fn cancel_subtasks(&mut self) { + fn cancel_subtasks(&mut self) { tracing::info!("Node ID {} Recording {} events", self.id, self.events.len()); let filename = format!("rewind_{}.log", self.id); let mut file = match OpenOptions::new() diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 7d2090f9d5..8e84aa28b5 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -822,5 +822,5 @@ impl, V: Versions> TaskState self.handle(event, sender.clone()).await } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 56107613cd..1eb60cf98f 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -336,5 +336,5 @@ impl, V: Versions> TaskState Ok(()) } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 68afabf6d0..3f4e6cda37 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -181,5 +181,5 @@ impl> TaskState for VidTaskState, V: Versions> TaskState self.handle(event, sender.clone()).await } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } /// State of a view sync replica task @@ -197,7 +197,7 @@ impl, V: Versions> TaskState Ok(()) } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } impl, V: Versions> ViewSyncTaskState { @@ -572,7 +572,7 @@ impl, V: Versions> } if let Some(timeout_task) = self.timeout_task.take() { - cancel_task(timeout_task).await; + timeout_task.abort(); } self.timeout_task = Some(spawn({ @@ -665,7 +665,7 @@ impl, V: Versions> .await; if let Some(timeout_task) = self.timeout_task.take() { - cancel_task(timeout_task).await; + timeout_task.abort(); } self.timeout_task = Some(spawn({ let stream = event_stream.clone(); @@ -721,7 +721,7 @@ impl, V: Versions> } if let Some(timeout_task) = self.timeout_task.take() { - cancel_task(timeout_task).await; + timeout_task.abort(); } broadcast_event( @@ -792,7 +792,7 @@ impl, V: Versions> // Shouldn't ever receive a timeout for a relay higher than ours if TYPES::View::new(*round) == self.next_view && *relay == self.relay { if let Some(timeout_task) = self.timeout_task.take() { - cancel_task(timeout_task).await; + timeout_task.abort(); } self.relay += 1; match last_seen_certificate { diff --git a/task/src/task.rs b/task/src/task.rs index 70367b0cef..2b4784d00c 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -28,7 +28,7 @@ pub trait TaskState: Send { type Event: TaskEvent + Clone + Send + Sync; /// Joins all subtasks. - async fn cancel_subtasks(&mut self); + fn cancel_subtasks(&mut self); /// Handles an event, providing direct access to the specific channel we received the event on. async fn handle_event( @@ -77,7 +77,7 @@ impl Task { match self.receiver.recv_direct().await { Ok(input) => { if *input == S::Event::shutdown_event() { - self.state.cancel_subtasks().await; + self.state.cancel_subtasks(); break self.boxed_state(); } @@ -129,7 +129,7 @@ impl ConsensusTaskRegistry { while let Some(handle) = handles.pop() { let mut task_state = handle.await.unwrap(); - task_state.cancel_subtasks().await; + task_state.cancel_subtasks(); } } /// Take a task, run it, and register it From 0e1df56e37ecd80c42498af190adc704ee70d68d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 12 Nov 2024 16:29:31 -0500 Subject: [PATCH 1297/1393] [TECH_DEBT] Cleanup Unused or Duplicate Events (#3870) * Ignore duplicate txns via LRU cache * Remove unused ValidateStateUpdate Event, consolidate map insertion * Remove unused events * HighQC fixes and lint * fix empty outputs * fmt --- hotshot/src/tasks/mod.rs | 3 +- hotshot/src/tasks/task_state.rs | 1 - task-impls/src/consensus/handlers.rs | 4 +- task-impls/src/consensus/mod.rs | 20 +---- task-impls/src/events.rs | 58 +------------- task-impls/src/helpers.rs | 5 -- task-impls/src/network.rs | 9 +++ task-impls/src/quorum_proposal/handlers.rs | 1 - task-impls/src/quorum_proposal/mod.rs | 79 +++++++------------ .../src/quorum_proposal_recv/handlers.rs | 6 -- task-impls/src/quorum_vote/handlers.rs | 16 ---- task-impls/src/quorum_vote/mod.rs | 27 +------ task-impls/src/vid.rs | 9 --- testing/src/predicates/event.rs | 32 -------- testing/src/test_task.rs | 3 +- .../tests_1/quorum_proposal_recv_task.rs | 2 - testing/tests/tests_1/quorum_proposal_task.rs | 60 +++----------- testing/tests/tests_1/quorum_vote_task.rs | 8 +- .../tests_1/upgrade_task_with_proposal.rs | 12 +-- .../tests/tests_1/upgrade_task_with_vote.rs | 20 +---- testing/tests/tests_1/vid_task.rs | 1 - .../tests/tests_1/vote_dependency_handle.rs | 6 +- 22 files changed, 72 insertions(+), 310 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index fcc63c2b20..037e72f227 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -8,7 +8,7 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; -use std::{collections::BTreeMap, fmt::Debug, sync::Arc, time::Duration}; +use std::{collections::BTreeMap, fmt::Debug, num::NonZeroUsize, sync::Arc, time::Duration}; use async_broadcast::{broadcast, RecvError}; use async_lock::RwLock; @@ -132,6 +132,7 @@ pub fn add_network_message_task< internal_event_stream: handle.internal_event_stream.0.clone(), external_event_stream: handle.output_event_stream.0.clone(), public_key: handle.public_key().clone(), + transactions_cache: lru::LruCache::new(NonZeroUsize::new(100_000).unwrap()), }; let upgrade_lock = handle.hotshot.upgrade_lock.clone(); diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 97ff8e277e..de7e0b8739 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -322,7 +322,6 @@ impl, V: Versions> CreateTaskState timeout_task: spawn(async {}), timeout: handle.hotshot.config.next_view_timeout, consensus: OuterConsensus::new(consensus), - last_decided_view: handle.cur_view().await, id: handle.hotshot.id, upgrade_lock: handle.hotshot.upgrade_lock.clone(), } diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 50a6e4ba2a..7a6d509913 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -192,14 +192,14 @@ pub(crate) async fn handle_view_change< // Do the comparison before the subtraction to avoid potential overflow, since // `last_decided_view` may be greater than `cur_view` if the node is catching up. if usize::try_from(task_state.cur_view.u64()).unwrap() - > usize::try_from(task_state.last_decided_view.u64()).unwrap() + > usize::try_from(consensus_reader.last_decided_view().u64()).unwrap() { consensus_reader .metrics .number_of_views_since_last_decide .set( usize::try_from(task_state.cur_view.u64()).unwrap() - - usize::try_from(task_state.last_decided_view.u64()).unwrap(), + - usize::try_from(consensus_reader.last_decided_view().u64()).unwrap(), ); } diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 5490ecad8f..e4d75057ac 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -87,9 +87,6 @@ pub struct ConsensusTaskState, V: /// A reference to the metrics trait. pub consensus: OuterConsensus, - /// The last decided view - pub last_decided_view: TYPES::View, - /// The node's id pub id: u64, @@ -98,7 +95,7 @@ pub struct ConsensusTaskState, V: } impl, V: Versions> ConsensusTaskState { /// Handles a consensus event received on the event stream - #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, last_decided_view = *self.last_decided_view), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] + #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -129,21 +126,6 @@ impl, V: Versions> ConsensusTaskSt tracing::debug!("Failed to handle Timeout event; error = {e}"); } } - HotShotEvent::LastDecidedViewUpdated(view_number) => { - if *view_number < self.last_decided_view { - tracing::debug!("New decided view is not newer than ours"); - } else { - self.last_decided_view = *view_number; - if let Err(e) = self - .consensus - .write() - .await - .update_last_decided_view(*view_number) - { - tracing::trace!("{e:?}"); - } - } - } _ => {} } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index b78a688b61..652347f14e 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -92,8 +92,6 @@ pub enum HotShotEvent { QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote), - /// All dependencies for the quorum vote are validated. - QuorumVoteDependenciesValidated(TYPES::View), /// A quorum proposal with the given parent leaf is validated. /// The full validation checks include: /// 1. The proposal is not for an old view @@ -175,10 +173,7 @@ pub enum HotShotEvent { ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number BlockRecv(PackedBundle), - /// Event when the transactions task has a block formed - BlockReady(VidDisperse, TYPES::View), - /// Event when consensus decided on a leaf - LeafDecided(Vec>), + /// Send VID shares to VID storage nodes; emitted by the DA leader /// /// Like [`HotShotEvent::DaProposalSend`]. @@ -203,19 +198,6 @@ pub enum HotShotEvent { /// Upgrade certificate has been sent to the network UpgradeCertificateFormed(UpgradeCertificate), - /* Consensus State Update Events */ - /// A new locked view has been created (2-chain) - LockedViewUpdated(TYPES::View), - - /// A new anchor view has been successfully reached by this node (3-chain). - LastDecidedViewUpdated(TYPES::View), - - /// A new high_qc has been reached by this node. - UpdateHighQc(QuorumCertificate), - - /// A new high_qc has been updated in `Consensus`. - HighQcUpdated(QuorumCertificate), - /// A quorum proposal has been preliminarily validated. /// The preliminary checks include: /// 1. The proposal is not for an old view @@ -300,7 +282,6 @@ impl HotShotEvent { HotShotEvent::BlockRecv(packed_bundle) => Some(packed_bundle.view_number), HotShotEvent::Shutdown | HotShotEvent::TransactionSend(_, _) - | HotShotEvent::LeafDecided(_) | HotShotEvent::TransactionsRecv(_) => None, HotShotEvent::VidDisperseSend(proposal, _) => Some(proposal.data.view_number()), HotShotEvent::VidShareRecv(_, proposal) | HotShotEvent::VidShareValidated(proposal) => { @@ -313,20 +294,13 @@ impl HotShotEvent { } HotShotEvent::QuorumProposalRequestSend(req, _) | HotShotEvent::QuorumProposalRequestRecv(req, _) => Some(req.view_number), - HotShotEvent::QuorumVoteDependenciesValidated(view_number) - | HotShotEvent::ViewChange(view_number) + HotShotEvent::ViewChange(view_number) | HotShotEvent::ViewSyncTimeout(view_number, _, _) | HotShotEvent::ViewSyncTrigger(view_number) - | HotShotEvent::Timeout(view_number) - | HotShotEvent::BlockReady(_, view_number) - | HotShotEvent::LockedViewUpdated(view_number) - | HotShotEvent::LastDecidedViewUpdated(view_number) => Some(*view_number), + | HotShotEvent::Timeout(view_number) => Some(*view_number), HotShotEvent::DaCertificateRecv(cert) | HotShotEvent::DacSend(cert, _) => { Some(cert.view_number()) } - HotShotEvent::UpdateHighQc(cert) | HotShotEvent::HighQcUpdated(cert) => { - Some(cert.view_number()) - } HotShotEvent::DaCertificateValidated(cert) => Some(cert.view_number), HotShotEvent::UpgradeCertificateFormed(cert) => Some(cert.view_number()), HotShotEvent::VidRequestSend(request, _, _) @@ -385,12 +359,6 @@ impl Display for HotShotEvent { HotShotEvent::QuorumVoteSend(vote) => { write!(f, "QuorumVoteSend(view_number={:?})", vote.view_number()) } - HotShotEvent::QuorumVoteDependenciesValidated(view_number) => { - write!( - f, - "QuorumVoteDependenciesValidated(view_number={view_number:?})" - ) - } HotShotEvent::QuorumProposalValidated(proposal, _) => write!( f, "QuorumProposalValidated(view_number={:?})", @@ -504,14 +472,6 @@ impl Display for HotShotEvent { HotShotEvent::BlockRecv(packed_bundle) => { write!(f, "BlockRecv(view_number={:?})", packed_bundle.view_number) } - HotShotEvent::BlockReady(_, view_number) => { - write!(f, "BlockReady(view_number={view_number:?})") - } - HotShotEvent::LeafDecided(leaves) => { - let view_numbers: Vec<::View> = - leaves.iter().map(Leaf::view_number).collect(); - write!(f, "LeafDecided({view_numbers:?})") - } HotShotEvent::VidDisperseSend(proposal, _) => write!( f, "VidDisperseSend(view_number={:?})", @@ -568,18 +528,6 @@ impl Display for HotShotEvent { proposal.data.view_number ) } - HotShotEvent::LockedViewUpdated(view_number) => { - write!(f, "LockedViewUpdated(view_number={view_number:?})") - } - HotShotEvent::LastDecidedViewUpdated(view_number) => { - write!(f, "LastDecidedViewUpdated(view_number={view_number:?})") - } - HotShotEvent::UpdateHighQc(cert) => { - write!(f, "UpdateHighQc(view_number={:?})", cert.view_number()) - } - HotShotEvent::HighQcUpdated(cert) => { - write!(f, "HighQcUpdated(view_number={:?})", cert.view_number()) - } HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { write!( f, diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index d66bf44abf..5c91ca9adf 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -167,9 +167,6 @@ pub struct LeafChainTraversalOutcome { /// The decided leaves with corresponding validated state and VID info. pub leaf_views: Vec>, - /// The decided leaves. - pub leaves_decided: Vec>, - /// The transactions in the block payload for each leaf. pub included_txns: Option::Transaction>>>, @@ -188,7 +185,6 @@ impl Default for LeafChainTraversalOutcome { new_decided_view_number: None, new_decide_qc: None, leaf_views: Vec::new(), - leaves_decided: Vec::new(), included_txns: None, decided_upgrade_cert: None, } @@ -323,7 +319,6 @@ pub async fn decide_from_proposal( delta.clone(), vid_share, )); - res.leaves_decided.push(leaf.clone()); if let Some(ref payload) = leaf.block_payload() { res.included_txns = Some( payload diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 22ce352a35..b36ecdbb23 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -6,6 +6,7 @@ use std::{ collections::{BTreeMap, HashMap}, + hash::{DefaultHasher, Hash, Hasher}, sync::Arc, }; @@ -52,6 +53,9 @@ pub struct NetworkMessageTaskState { /// This nodes public key pub public_key: TYPES::SignatureKey, + + /// Transaction Cache to ignore previously seen transatctions + pub transactions_cache: lru::LruCache, } impl NetworkMessageTaskState { @@ -130,6 +134,11 @@ impl NetworkMessageTaskState { // Handle data messages MessageKind::Data(message) => match message { DataMessage::SubmitTransaction(transaction, _) => { + let mut hasher = DefaultHasher::new(); + transaction.hash(&mut hasher); + if self.transactions_cache.put(hasher.finish(), ()).is_some() { + return; + } broadcast_event( Arc::new(HotShotEvent::TransactionsRecv(vec![transaction])), &self.internal_event_stream, diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 1add742a7a..f3b0f74bdc 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -292,7 +292,6 @@ impl HandleDepOutput for ProposalDependencyHandle< timeout_certificate = Some(timeout.clone()); } either::Left(qc) => { - // Handled by the UpdateHighQc event. parent_view_number = Some(qc.view_number()); } }, diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 66f149abcc..1c63b2086e 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -33,7 +33,7 @@ use tracing::instrument; use utils::anytrace::*; use self::handlers::{ProposalDependency, ProposalDependencyHandle}; -use crate::{events::HotShotEvent, helpers::broadcast_event}; +use crate::events::HotShotEvent; mod handlers; @@ -110,7 +110,7 @@ impl, V: Versions> let event = event.as_ref(); let event_view = match dependency_type { ProposalDependency::Qc => { - if let HotShotEvent::HighQcUpdated(qc) = event { + if let HotShotEvent::QcFormed(either::Left(qc)) = event { qc.view_number() + 1 } else { return false; @@ -229,7 +229,7 @@ impl, V: Versions> timeout_dependency.mark_as_completed(event); } Either::Left(_) => { - // qc_dependency.mark_as_completed(event); + qc_dependency.mark_as_completed(event); } }, HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) => { @@ -238,9 +238,6 @@ impl, V: Versions> HotShotEvent::VidDisperseSend(_, _) => { vid_share_dependency.mark_as_completed(event); } - HotShotEvent::HighQcUpdated(_) => { - qc_dependency.mark_as_completed(event); - } _ => {} }; @@ -392,16 +389,37 @@ impl, V: Versions> } either::Left(qc) => { // Only update if the qc is from a newer view - let consensus_reader = self.consensus.read().await; - if qc.view_number <= consensus_reader.high_qc().view_number { + if qc.view_number <= self.consensus.read().await.high_qc().view_number { tracing::trace!( "Received a QC for a view that was not > than our current high QC" ); } - - // We need to gate on this data actually existing in the consensus shared state. - // So we broadcast here and handle *before* we make the task. - broadcast_event(HotShotEvent::UpdateHighQc(qc).into(), &event_sender).await; + self.consensus + .write() + .await + .update_high_qc(qc.clone()) + .wrap() + .context(error!( + "Failed to update high QC in internal consensus state!" + ))?; + + // Then update the high QC in storage + self.storage + .write() + .await + .update_high_qc(qc.clone()) + .await + .wrap() + .context(error!("Failed to update high QC in storage!"))?; + let view_number = qc.view_number() + 1; + let epoch_number = self.consensus.read().await.cur_epoch(); + self.create_dependency_task_if_new( + view_number, + epoch_number, + event_receiver, + event_sender, + Arc::clone(&event), + )?; } }, HotShotEvent::SendPayloadCommitmentAndMetadata( @@ -485,43 +503,6 @@ impl, V: Versions> Arc::clone(&event), )?; } - HotShotEvent::UpdateHighQc(qc) => { - // First update the high QC internally - self.consensus - .write() - .await - .update_high_qc(qc.clone()) - .wrap() - .context(error!( - "Failed to update high QC in internal consensus state!" - ))?; - - // Then update the high QC in storage - self.storage - .write() - .await - .update_high_qc(qc.clone()) - .await - .wrap() - .context(error!("Failed to update high QC in storage!"))?; - - broadcast_event( - HotShotEvent::HighQcUpdated(qc.clone()).into(), - &event_sender, - ) - .await; - } - HotShotEvent::HighQcUpdated(qc) => { - let view_number = qc.view_number() + 1; - let epoch_number = self.consensus.read().await.cur_epoch(); - self.create_dependency_task_if_new( - view_number, - epoch_number, - event_receiver, - event_sender, - Arc::clone(&event), - )?; - } HotShotEvent::ViewChange(view) | HotShotEvent::Timeout(view) => { self.cancel_tasks(*view); } diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 88b04d149f..1a9479e1ad 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -221,12 +221,6 @@ pub(crate) async fn handle_quorum_proposal_recv< } drop(consensus_writer); - broadcast_event( - HotShotEvent::HighQcUpdated(justify_qc.clone()).into(), - event_sender, - ) - .await; - let Some((parent_leaf, _parent_state)) = parent else { tracing::warn!( "Proposal's parent missing from storage with commitment: {:?}", diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 4225113440..58e7a0849c 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -42,7 +42,6 @@ pub(crate) async fn handle_quorum_proposal_validated< V: Versions, >( proposal: &QuorumProposal, - sender: &Sender>>, task_state: &mut QuorumVoteTaskState, ) -> Result<()> { let LeafChainTraversalOutcome { @@ -50,7 +49,6 @@ pub(crate) async fn handle_quorum_proposal_validated< new_decided_view_number, new_decide_qc, leaf_views, - leaves_decided, included_txns, decided_upgrade_cert, } = decide_from_proposal( @@ -80,13 +78,6 @@ pub(crate) async fn handle_quorum_proposal_validated< let mut consensus_writer = task_state.consensus.write().await; if let Some(locked_view_number) = new_locked_view_number { - // Broadcast the locked view update. - broadcast_event( - HotShotEvent::LockedViewUpdated(locked_view_number).into(), - sender, - ) - .await; - consensus_writer.update_locked_view(locked_view_number)?; } @@ -99,11 +90,6 @@ pub(crate) async fn handle_quorum_proposal_validated< // Set the new decided view. consensus_writer.update_last_decided_view(decided_view_number)?; - broadcast_event( - HotShotEvent::LastDecidedViewUpdated(decided_view_number).into(), - sender, - ) - .await; consensus_writer .metrics @@ -143,8 +129,6 @@ pub(crate) async fn handle_quorum_proposal_validated< &task_state.output_event_stream, ) .await; - - broadcast_event(Arc::new(HotShotEvent::LeafDecided(leaves_decided)), sender).await; tracing::debug!("Successfully sent decide event"); } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 90a8ea44a5..48d4c5997d 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -168,13 +168,6 @@ impl + 'static, V: Versions> Handl } } - broadcast_event( - Arc::new(HotShotEvent::QuorumVoteDependenciesValidated( - self.view_number, - )), - &self.sender, - ) - .await; broadcast_event( Arc::new(HotShotEvent::ViewChange(self.view_number + 1)), &self.sender, @@ -423,9 +416,7 @@ impl, V: Versions> QuorumVoteTaskS ); // Handle the event before creating the dependency task. - if let Err(e) = - handle_quorum_proposal_validated(&proposal.data, &event_sender, self).await - { + if let Err(e) = handle_quorum_proposal_validated(&proposal.data, self).await { tracing::debug!( "Failed to handle QuorumProposalValidated event; error = {e:#}" ); @@ -567,12 +558,6 @@ impl, V: Versions> QuorumVoteTaskS None, ); } - HotShotEvent::QuorumVoteDependenciesValidated(view_number) => { - tracing::debug!("All vote dependencies verified for view {:?}", view_number); - if !self.update_latest_voted_view(*view_number).await { - tracing::debug!("view not updated"); - } - } HotShotEvent::Timeout(view) => { let view = TYPES::View::new(view.saturating_sub(1)); // cancel old tasks @@ -584,6 +569,9 @@ impl, V: Versions> QuorumVoteTaskS } HotShotEvent::ViewChange(mut view) => { view = TYPES::View::new(view.saturating_sub(1)); + if !self.update_latest_voted_view(view).await { + tracing::debug!("view not updated"); + } // cancel old tasks let current_tasks = self.vote_dependencies.split_off(&view); while let Some((_, task)) = self.vote_dependencies.pop_last() { @@ -651,13 +639,6 @@ impl, V: Versions> QuorumVoteTaskS return; } - broadcast_event( - Arc::new(HotShotEvent::QuorumVoteDependenciesValidated( - proposal.data.view_number(), - )), - &event_sender, - ) - .await; broadcast_event( Arc::new(HotShotEvent::ViewChange(proposal.data.view_number() + 1)), &event_sender, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 3f4e6cda37..ec59451f42 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -111,15 +111,6 @@ impl> VidTaskState { ) .await; - // send the block to the VID dispersal function - broadcast_event( - Arc::new(HotShotEvent::BlockReady(vid_disperse, *view_number)), - &event_stream, - ) - .await; - } - - HotShotEvent::BlockReady(vid_disperse, view_number) => { let view_number = *view_number; let Ok(signature) = TYPES::SignatureKey::sign( &self.private_key, diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index 3963f95e78..6af7275aed 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -136,17 +136,6 @@ where Box::new(EventPredicate { check, info }) } -pub fn leaf_decided() -> Box> -where - TYPES: NodeType, -{ - let info = "LeafDecided".to_string(); - let check: EventCallback = - Arc::new(move |e: Arc>| matches!(e.as_ref(), LeafDecided(_))); - - Box::new(EventPredicate { check, info }) -} - pub fn quorum_vote_send() -> Box> where TYPES: NodeType, @@ -293,24 +282,3 @@ where }); Box::new(EventPredicate { check, info }) } - -pub fn high_qc_updated() -> Box> -where - TYPES: NodeType, -{ - let info = "HighQcUpdated".to_string(); - let check: EventCallback = - Arc::new(move |e: Arc>| matches!(e.as_ref(), HighQcUpdated(..))); - Box::new(EventPredicate { check, info }) -} - -pub fn quorum_vote_dependencies_validated() -> Box> -where - TYPES: NodeType, -{ - let info = "QuorumVoteDependenciesValidated".to_string(); - let check: EventCallback = Arc::new(move |e: Arc>| { - matches!(e.as_ref(), QuorumVoteDependenciesValidated(..)) - }); - Box::new(EventPredicate { check, info }) -} diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index 2791646cba..f828a19737 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{sync::Arc, time::Duration}; +use std::{num::NonZeroUsize, sync::Arc, time::Duration}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; @@ -168,6 +168,7 @@ pub async fn add_network_message_test_task< internal_event_stream: internal_event_stream.clone(), external_event_stream: external_event_stream.clone(), public_key, + transactions_cache: lru::LruCache::new(NonZeroUsize::new(100_000).unwrap()), }; let network = Arc::clone(&net); diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index cb216376d7..bb96a52b91 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -94,7 +94,6 @@ async fn test_quorum_proposal_recv_task() { let expectations = vec![Expectations::from_outputs(vec![ exact(QuorumProposalPreliminarilyValidated(proposals[1].clone())), - exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), exact(QuorumProposalValidated( proposals[1].clone(), leaves[0].clone(), @@ -195,7 +194,6 @@ async fn test_quorum_proposal_recv_task_liveness_check() { exact(QuorumProposalPreliminarilyValidated(proposals[2].clone())), exact(ViewChange(ViewNumber::new(3))), exact(QuorumProposalRequestSend(req, signature)), - exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), ])]; let state = diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 26fe07ab61..65f6800bb7 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -18,7 +18,7 @@ use hotshot_task_impls::{events::HotShotEvent::*, quorum_proposal::QuorumProposa use hotshot_testing::{ all_predicates, helpers::{build_payload_commitment, build_system_handle}, - predicates::event::{all_predicates, exact, quorum_proposal_send}, + predicates::event::{all_predicates, quorum_proposal_send}, random, script::{Expectations, InputOrder, TaskScript}, serial, @@ -120,11 +120,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let expectations = vec![ Expectations::from_outputs(vec![]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(genesis_cert.clone())), - exact(HighQcUpdated(genesis_cert.clone())), - quorum_proposal_send(), - ]), + Expectations::from_outputs(all_predicates![quorum_proposal_send(),]), ]; let quorum_proposal_task_state = @@ -284,27 +280,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ]; let expectations = vec![ - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(genesis_cert.clone())), - exact(HighQcUpdated(genesis_cert.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), - quorum_proposal_send(), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[3].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[3].data.justify_qc.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[4].data.justify_qc.clone())), - ]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(all_predicates![quorum_proposal_send(),]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![]), ]; let quorum_proposal_task_state = @@ -638,27 +618,11 @@ async fn test_quorum_proposal_task_liveness_check() { ]; let expectations = vec![ - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(genesis_cert.clone())), - exact(HighQcUpdated(genesis_cert.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), - quorum_proposal_send(), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[3].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[3].data.justify_qc.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[4].data.justify_qc.clone())), - ]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(all_predicates![quorum_proposal_send(),]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![]), ]; let quorum_proposal_task_state = diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 9735d07b74..4bc81b829a 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -85,7 +85,6 @@ async fn test_quorum_vote_task_success() { let expectations = vec![Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[1].clone())), exact(VidShareValidated(vids[1].0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), exact(ViewChange(ViewNumber::new(3))), quorum_vote_send(), ])]; @@ -167,10 +166,9 @@ async fn test_quorum_vote_task_miss_dependency() { Expectations::from_outputs(all_predicates![exact(VidShareValidated( vids[1].0[0].clone() ))]), - Expectations::from_outputs(all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(1))), - exact(DaCertificateValidated(dacs[2].clone())) - ]), + Expectations::from_outputs(all_predicates![exact(DaCertificateValidated( + dacs[2].clone() + ))]), Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[3].clone())), exact(VidShareValidated(vids[3].0[0].clone())), diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 828b823ab0..888fb80f2a 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -212,18 +212,10 @@ async fn test_upgrade_task_with_proposal() { timeout: TIMEOUT, state: proposal_state, expectations: vec![ - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(genesis_cert.clone())), - exact(HighQcUpdated(genesis_cert.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), - ]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![]), Expectations::from_outputs(vec![]), Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), quorum_proposal_send_with_upgrade_certificate::() ]), ], diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index eaaa70fc94..bc0a17617b 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -134,16 +134,13 @@ async fn test_upgrade_task_with_vote() { Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[1].clone())), exact(VidShareValidated(vids[1].0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), exact(ViewChange(ViewNumber::new(3))), quorum_vote_send(), ]), Expectations::from_outputs_and_task_states( all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(1))), exact(DaCertificateValidated(dacs[2].clone())), exact(VidShareValidated(vids[2].0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(3))), exact(ViewChange(ViewNumber::new(4))), quorum_vote_send(), ], @@ -151,12 +148,8 @@ async fn test_upgrade_task_with_vote() { ), Expectations::from_outputs_and_task_states( all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(2))), - exact(LastDecidedViewUpdated(ViewNumber::new(1))), - leaf_decided(), exact(DaCertificateValidated(dacs[3].clone())), exact(VidShareValidated(vids[3].0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(4))), exact(ViewChange(ViewNumber::new(5))), quorum_vote_send(), ], @@ -164,25 +157,14 @@ async fn test_upgrade_task_with_vote() { ), Expectations::from_outputs_and_task_states( all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(3))), - exact(LastDecidedViewUpdated(ViewNumber::new(2))), - leaf_decided(), exact(DaCertificateValidated(dacs[4].clone())), exact(VidShareValidated(vids[4].0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(5))), exact(ViewChange(ViewNumber::new(6))), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], ), - Expectations::from_outputs_and_task_states( - all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(4))), - exact(LastDecidedViewUpdated(ViewNumber::new(3))), - leaf_decided(), - ], - vec![decided_upgrade_certificate()], - ), + Expectations::from_outputs_and_task_states(vec![], vec![decided_upgrade_certificate()]), ]; let vote_state = diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 2ebff4dec7..d52777844c 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -137,7 +137,6 @@ async fn test_vid_task() { .unwrap()], None, )), - exact(BlockReady(vid_disperse, ViewNumber::new(2))), exact(VidDisperseSend(vid_proposal.clone(), pub_key)), ]), ]; diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 35fd7200af..7839b0d1c0 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -78,11 +78,7 @@ async fn test_vote_dependency_handle() { // For each permutation... for inputs in all_inputs.into_iter() { // The outputs are static here, but we re-make them since we use `into_iter` below - let outputs = vec![ - exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), - exact(ViewChange(ViewNumber::new(3))), - quorum_vote_send(), - ]; + let outputs = vec![exact(ViewChange(ViewNumber::new(3))), quorum_vote_send()]; let (event_sender, mut event_receiver) = broadcast(1024); let view_number = ViewNumber::new(node_id); From eee310b5a7128423b64080206e4081b05e8a8b70 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 12 Nov 2024 17:00:00 -0500 Subject: [PATCH 1298/1393] fix null block issue (#3881) (#3882) --- example-types/src/block_types.rs | 1 + example-types/src/state_types.rs | 1 + task-impls/src/quorum_proposal/handlers.rs | 1 + task-impls/src/quorum_vote/handlers.rs | 1 + task-impls/src/transactions.rs | 2 ++ testing/tests/tests_1/da_task.rs | 6 ++++-- testing/tests/tests_1/quorum_proposal_task.rs | 9 +++++++-- testing/tests/tests_1/transaction_task.rs | 3 ++- testing/tests/tests_1/upgrade_task_with_proposal.rs | 1 + testing/tests/tests_1/vid_task.rs | 6 ++++-- types/src/data.rs | 5 +++-- types/src/traits/block_contents.rs | 1 + types/src/traits/states.rs | 1 + 13 files changed, 29 insertions(+), 9 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index 7241ac49b1..add9d76269 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -337,6 +337,7 @@ impl< builder_commitment: BuilderCommitment, metadata: >::Metadata, _builder_fee: Vec>, + _view_number: u64, _vid_common: VidCommon, _auction_results: Option, _version: Version, diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 50c40fee73..c5fde414bf 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -107,6 +107,7 @@ impl ValidatedState for TestValidatedState { _proposed_header: &TYPES::BlockHeader, _vid_common: VidCommon, _version: Version, + _view_number: u64, ) -> Result<(Self, Self::Delta), Self::Error> { Self::run_delay_settings_from_config(&instance.delay_config).await; Ok(( diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index f3b0f74bdc..8c375b4b54 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -205,6 +205,7 @@ impl ProposalDependencyHandle { commitment_and_metadata.builder_commitment, commitment_and_metadata.metadata, commitment_and_metadata.fees.to_vec(), + *self.view_number, vid_share.data.common.clone(), commitment_and_metadata.auction_result, version, diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 58e7a0849c..2f78773f88 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -228,6 +228,7 @@ pub(crate) async fn update_shared_state< &proposed_leaf.block_header().clone(), vid_share.data.common.clone(), version, + *view_number, ) .await .wrap() diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 8e84aa28b5..5b286bb095 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -217,6 +217,7 @@ impl, V: Versions> TransactionTask let Some(null_fee) = null_block::builder_fee::( self.membership.total_nodes(self.cur_epoch), version, + *block_view, ) else { tracing::error!("Failed to get null fee"); return None; @@ -361,6 +362,7 @@ impl, V: Versions> TransactionTask let Some(null_fee) = null_block::builder_fee::( self.membership.total_nodes(self.cur_epoch), version, + *block_view, ) else { tracing::error!("Failed to calculate null block fee."); return None; diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index ca7839e575..b89b141108 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -99,7 +99,8 @@ async fn test_da_task() { ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(0)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(2), ) .unwrap()], Some(precompute), @@ -198,7 +199,8 @@ async fn test_da_task_storage_failure() { ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(0)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(2), ) .unwrap()], Some(precompute), diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 65f6800bb7..d8ae3e2b2b 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -94,6 +94,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let builder_fee = null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, + *ViewNumber::new(1), ) .unwrap(); drop(consensus_writer); @@ -187,6 +188,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let builder_fee = null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, + *ViewNumber::new(1), ) .unwrap(); @@ -363,7 +365,8 @@ async fn test_quorum_proposal_task_qc_timeout() { ViewNumber::new(3), vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(3), ) .unwrap()], None, @@ -452,7 +455,8 @@ async fn test_quorum_proposal_task_view_sync() { ViewNumber::new(2), vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(2), ) .unwrap()], None, @@ -521,6 +525,7 @@ async fn test_quorum_proposal_task_liveness_check() { let builder_fee = null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, + *ViewNumber::new(1), ) .unwrap(); diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index d002257a7f..7205628c87 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -51,7 +51,8 @@ async fn test_transaction_task_leader_two_views_in_a_row() { vec1::vec1![ null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(0)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(4), ) .unwrap() ], diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 888fb80f2a..ac4b338f37 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -132,6 +132,7 @@ async fn test_upgrade_task_with_proposal() { let builder_fee = null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, + *ViewNumber::new(1), ) .unwrap(); diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index d52777844c..d8328b98cf 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -111,7 +111,8 @@ async fn test_vid_task() { ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(0)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(2), ) .unwrap()], Some(vid_precompute), @@ -132,7 +133,8 @@ async fn test_vid_task() { ViewNumber::new(2), vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(0)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(2), ) .unwrap()], None, diff --git a/types/src/data.rs b/types/src/data.rs index 11cd1ac358..fc5506ff25 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -796,7 +796,7 @@ pub mod null_block { use crate::{ traits::{ block_contents::BuilderFee, - node_implementation::{ConsensusTime, NodeType, Versions}, + node_implementation::{NodeType, Versions}, signature_key::BuilderSignatureKey, BlockPayload, }, @@ -825,6 +825,7 @@ pub mod null_block { pub fn builder_fee( num_storage_nodes: usize, version: vbs::version::Version, + view_number: u64, ) -> Option> { /// Arbitrary fee amount, this block doesn't actually come from a builder const FEE_AMOUNT: u64 = 0; @@ -838,7 +839,7 @@ pub mod null_block { match TYPES::BuilderSignatureKey::sign_sequencing_fee_marketplace( &priv_key, FEE_AMOUNT, - *TYPES::View::genesis(), + view_number, ) { Ok(sig) => Some(BuilderFee { fee_amount: FEE_AMOUNT, diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index de9968488c..5901b7242d 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -220,6 +220,7 @@ pub trait BlockHeader: builder_commitment: BuilderCommitment, metadata: >::Metadata, builder_fee: Vec>, + view_number: u64, vid_common: VidCommon, auction_results: Option, version: Version, diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 9c951bb8d0..956fd22d02 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -73,6 +73,7 @@ pub trait ValidatedState: proposed_header: &TYPES::BlockHeader, vid_common: VidCommon, version: Version, + view_number: u64, ) -> impl Future> + Send; /// Construct the state with the given block header. From 89842bf327edefd56209d3fb9f0144c28ec1644e Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Wed, 13 Nov 2024 19:13:43 +0100 Subject: [PATCH 1299/1393] Lr/increment epoch (#3828) * Initial commit * Propose the same block at the end of the epoch * No VID and DAC required for the additional last block proposals * Guard against division by 0 * Traverse the leaves to check an eQC * Remove unneeded async in function definition * Remove trace * Make sure proposal is for the same block if justify QC references the last block * Initial commit * Gate epoch proposal logic * Gate epochs voting logic * Create helper method to check if QC is part of 3-chain * Update epoch on `ViewChange` event * Adjust tests * Update view and epoch when QC is formed * Fixes after review * Get rid of nasty nested if-elses * Fix fmt * Update VID that we reuse at the end of the epoch * Fix fmt * Do not create VID and DAC dependencies when voting for the last block * Simplify how we get a header * Correctly update epoch when voting and forming QC * Send ViewChange event when voting after shared state's been updated * Adjust tests * Remove an obsolete DA ViewChange code * Fetch proposal if missing when changing view after forming QC * Fix deadlock * Update epoch only when receiving or forming an eQC * update logging (#3844) * [Tech debt] Remove `async-std` (#3845) * tests and CI * remove `async-std` * `fmt` * fix doc build * remove counter tests * remove counter tests * build w/o lockfile * `lock` -> `std` * Revert "`lock` -> `std`" This reverts commit 21ebf054f3dd862a70d6b776f4016647cfd481c4. * lock * `async_broadcast` * overflow * Revert "`async_broadcast`" This reverts commit f03bb57fcc5fcdefe708c79fee75d3ae85ae04f1. * `try_send` * Fix fmt * Simplify code * Move a helper function to types crate * Rename storage method * Remove unused code --------- Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> Co-authored-by: rob-maron <132852777+rob-maron@users.noreply.github.com> --- example-types/src/storage_types.rs | 5 + hotshot/src/lib.rs | 15 ++- hotshot/src/tasks/task_state.rs | 3 + task-impls/src/consensus/handlers.rs | 7 +- task-impls/src/consensus/mod.rs | 11 +- task-impls/src/da.rs | 9 +- task-impls/src/events.rs | 11 +- task-impls/src/network.rs | 5 +- task-impls/src/quorum_proposal/mod.rs | 2 +- .../src/quorum_proposal_recv/handlers.rs | 30 ++++- task-impls/src/quorum_proposal_recv/mod.rs | 31 +++-- task-impls/src/quorum_vote/mod.rs | 119 +++++++++--------- task-impls/src/request.rs | 2 +- task-impls/src/transactions.rs | 7 +- task-impls/src/upgrade.rs | 7 +- task-impls/src/vid.rs | 7 +- task-impls/src/view_sync.rs | 17 ++- testing/src/predicates/event.rs | 2 +- testing/src/spinning_task.rs | 2 + testing/tests/tests_1/da_task.rs | 8 +- .../tests_1/quorum_proposal_recv_task.rs | 5 +- testing/tests/tests_1/quorum_vote_task.rs | 4 +- testing/tests/tests_1/transaction_task.rs | 7 +- .../tests/tests_1/upgrade_task_with_vote.rs | 10 +- testing/tests/tests_1/vid_task.rs | 4 +- testing/tests/tests_1/view_sync_task.rs | 6 +- .../tests/tests_1/vote_dependency_handle.rs | 7 +- types/src/consensus.rs | 117 ++++++++--------- types/src/utils.rs | 14 ++- 29 files changed, 290 insertions(+), 184 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index e93a9b3e72..c4be058fe4 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -42,6 +42,7 @@ pub struct TestStorageState { proposals: BTreeMap>>, high_qc: Option>, action: TYPES::View, + epoch: TYPES::Epoch, } impl Default for TestStorageState { @@ -52,6 +53,7 @@ impl Default for TestStorageState { proposals: BTreeMap::new(), high_qc: None, action: TYPES::View::genesis(), + epoch: TYPES::Epoch::genesis(), } } } @@ -101,6 +103,9 @@ impl TestStorage { pub async fn last_actioned_view(&self) -> TYPES::View { self.inner.read().await.action } + pub async fn last_actioned_epoch(&self) -> TYPES::Epoch { + self.inner.read().await.epoch + } } #[async_trait] diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index b507a0a129..05ccea0a0c 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -128,6 +128,9 @@ pub struct SystemContext, V: Versi /// The view to enter when first starting consensus start_view: TYPES::View, + /// The epoch to enter when first starting consensus + start_epoch: TYPES::Epoch, + /// Access to the output event stream. output_event_stream: (Sender>, InactiveReceiver>), @@ -171,6 +174,7 @@ impl, V: Versions> Clone consensus: self.consensus.clone(), instance_state: Arc::clone(&self.instance_state), start_view: self.start_view, + start_epoch: self.start_epoch, output_event_stream: self.output_event_stream.clone(), external_event_stream: self.external_event_stream.clone(), anchored_leaf: self.anchored_leaf.clone(), @@ -345,6 +349,7 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext { /// Starting view number that should be equivelant to the view the node shut down with last. start_view: TYPES::View, + /// Starting epoch number that should be equivelant to the epoch the node shut down with last. + start_epoch: TYPES::Epoch, /// The view we last performed an action in. An action is Proposing or voting for /// Either the quorum or DA. actioned_view: TYPES::View, @@ -1000,6 +1010,7 @@ impl HotShotInitializer { validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::View::new(0), + start_epoch: TYPES::Epoch::new(0), actioned_view: TYPES::View::new(0), saved_proposals: BTreeMap::new(), high_qc, @@ -1023,6 +1034,7 @@ impl HotShotInitializer { instance_state: TYPES::InstanceState, validated_state: Option>, start_view: TYPES::View, + start_epoch: TYPES::Epoch, actioned_view: TYPES::View, saved_proposals: BTreeMap>>, high_qc: QuorumCertificate, @@ -1036,6 +1048,7 @@ impl HotShotInitializer { validated_state, state_delta: None, start_view, + start_epoch, actioned_view, saved_proposals, high_qc, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index de7e0b8739..09bc3b0eb4 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -241,6 +241,7 @@ impl, V: Versions> CreateTaskState id: handle.hotshot.id, storage: Arc::clone(&handle.storage), upgrade_lock: handle.hotshot.upgrade_lock.clone(), + epoch_height: handle.hotshot.config.epoch_height, } } } @@ -293,6 +294,7 @@ impl, V: Versions> CreateTaskState spawned_tasks: BTreeMap::new(), id: handle.hotshot.id, upgrade_lock: handle.hotshot.upgrade_lock.clone(), + epoch_height: handle.hotshot.config.epoch_height, } } } @@ -324,6 +326,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(consensus), id: handle.hotshot.id, upgrade_lock: handle.hotshot.upgrade_lock.clone(), + epoch_height: handle.hotshot.config.epoch_height, } } } diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 7a6d509913..a148ec28a0 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -113,9 +113,15 @@ pub(crate) async fn handle_view_change< V: Versions, >( new_view_number: TYPES::View, + epoch_number: TYPES::Epoch, sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { + if epoch_number > task_state.cur_epoch { + task_state.cur_epoch = epoch_number; + tracing::info!("Progress: entered epoch {:>6}", *epoch_number); + } + ensure!( new_view_number > task_state.cur_view, "New view is not larger than the current view" @@ -129,7 +135,6 @@ pub(crate) async fn handle_view_change< } // Move this node to the next view task_state.cur_view = new_view_number; - task_state .consensus .write() diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index e4d75057ac..15d6bc6ec8 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -92,10 +92,13 @@ pub struct ConsensusTaskState, V: /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl, V: Versions> ConsensusTaskState { /// Handles a consensus event received on the event stream - #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] + #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, cur_epoch = *self.cur_epoch), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -116,8 +119,10 @@ impl, V: Versions> ConsensusTaskSt tracing::debug!("Failed to handle TimeoutVoteRecv event; error = {e}"); } } - HotShotEvent::ViewChange(new_view_number) => { - if let Err(e) = handle_view_change(*new_view_number, &sender, self).await { + HotShotEvent::ViewChange(new_view_number, epoch_number) => { + if let Err(e) = + handle_view_change(*new_view_number, *epoch_number, &sender, self).await + { tracing::trace!("Failed to handle ViewChange event; error = {e}"); } } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 33e36c04f0..d0182460f2 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -84,7 +84,7 @@ pub struct DaTaskState, V: Version impl, V: Versions> DaTaskState { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error", target = "DaTaskState")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "DA Main Task", level = "error", target = "DaTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -285,9 +285,12 @@ impl, V: Versions> DaTaskState { - let view = *view; + HotShotEvent::ViewChange(view, epoch) => { + if *epoch > self.cur_epoch { + self.cur_epoch = *epoch; + } + let view = *view; ensure!( *self.cur_view < *view, info!("Received a view change to an older view.") diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 652347f14e..1a45c23ab5 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -122,7 +122,7 @@ pub enum HotShotEvent { /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DacSend(DaCertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks - ViewChange(TYPES::View), + ViewChange(TYPES::View, TYPES::Epoch), /// Timeout for the view sync protocol; emitted by a replica in the view sync task ViewSyncTimeout(TYPES::View, u64, ViewSyncPhase), @@ -294,7 +294,7 @@ impl HotShotEvent { } HotShotEvent::QuorumProposalRequestSend(req, _) | HotShotEvent::QuorumProposalRequestRecv(req, _) => Some(req.view_number), - HotShotEvent::ViewChange(view_number) + HotShotEvent::ViewChange(view_number, _) | HotShotEvent::ViewSyncTimeout(view_number, _, _) | HotShotEvent::ViewSyncTrigger(view_number) | HotShotEvent::Timeout(view_number) => Some(*view_number), @@ -379,8 +379,11 @@ impl Display for HotShotEvent { HotShotEvent::DacSend(cert, _) => { write!(f, "DacSend(view_number={:?})", cert.view_number()) } - HotShotEvent::ViewChange(view_number) => { - write!(f, "ViewChange(view_number={view_number:?})") + HotShotEvent::ViewChange(view_number, epoch_number) => { + write!( + f, + "ViewChange(view_number={view_number:?}, epoch_number={epoch_number:?})" + ) } HotShotEvent::ViewSyncTimeout(view_number, _, _) => { write!(f, "ViewSyncTimeout(view_number={view_number:?})") diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index b36ecdbb23..42c8a19748 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -609,8 +609,11 @@ impl< TransmitType::Direct(leader), )) } - HotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view, epoch) => { self.view = view; + if epoch > self.epoch { + self.epoch = epoch; + } self.cancel_tasks(view); let net = Arc::clone(&self.network); let epoch = self.epoch.u64(); diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 1c63b2086e..02f6ee23e8 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -503,7 +503,7 @@ impl, V: Versions> Arc::clone(&event), )?; } - HotShotEvent::ViewChange(view) | HotShotEvent::Timeout(view) => { + HotShotEvent::ViewChange(view, _) | HotShotEvent::Timeout(view) => { self.cancel_tasks(*view); } _ => {} diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 1a9479e1ad..24ea89a3c2 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -11,6 +11,7 @@ use std::sync::Arc; use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLockUpgradableReadGuard; use committable::Committable; +use hotshot_types::traits::block_contents::BlockHeader; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf, QuorumProposal}, @@ -18,12 +19,12 @@ use hotshot_types::{ simple_certificate::QuorumCertificate, traits::{ election::Membership, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, storage::Storage, ValidatedState, }, - utils::{View, ViewInner}, + utils::{epoch_from_block_number, View, ViewInner}, vote::{Certificate, HasViewNumber}, }; use tokio::spawn; @@ -227,8 +228,18 @@ pub(crate) async fn handle_quorum_proposal_recv< justify_qc.data.leaf_commit ); validate_proposal_liveness(proposal, &validation_info).await?; + let block_number = proposal.data.block_header.block_number(); + let epoch = TYPES::Epoch::new(epoch_from_block_number( + block_number, + validation_info.epoch_height, + )); + tracing::trace!( + "Sending ViewChange for view {} and epoch {}", + view_number, + *epoch + ); broadcast_event( - Arc::new(HotShotEvent::ViewChange(view_number)), + Arc::new(HotShotEvent::ViewChange(view_number, epoch)), event_sender, ) .await; @@ -244,8 +255,19 @@ pub(crate) async fn handle_quorum_proposal_recv< quorum_proposal_sender_key, ) .await?; + + let epoch_number = TYPES::Epoch::new(epoch_from_block_number( + proposal.data.block_header.block_number(), + validation_info.epoch_height, + )); + + tracing::trace!( + "Sending ViewChange for view {} and epoch {}", + view_number, + *epoch_number + ); broadcast_event( - Arc::new(HotShotEvent::ViewChange(view_number)), + Arc::new(HotShotEvent::ViewChange(view_number, epoch_number)), event_sender, ) .await; diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index fc729596f0..8f9d9e3f05 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -8,14 +8,20 @@ use std::{collections::BTreeMap, sync::Arc}; +use self::handlers::handle_quorum_proposal_recv; +use crate::{ + events::{HotShotEvent, ProposalMissing}, + helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, +}; use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; -use futures::future::join_all; +use either::Either; +use futures::future::{err, join_all}; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{Consensus, OuterConsensus}, - data::{Leaf, ViewChangeEvidence}, + data::{EpochNumber, Leaf, ViewChangeEvidence}, event::Event, message::UpgradeLock, simple_certificate::UpgradeCertificate, @@ -23,18 +29,12 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, - vote::HasViewNumber, + vote::{Certificate, HasViewNumber}, }; use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use utils::anytrace::{bail, Result}; use vbs::version::Version; - -use self::handlers::handle_quorum_proposal_recv; -use crate::{ - events::{HotShotEvent, ProposalMissing}, - helpers::{broadcast_event, parent_leaf_and_state}, -}; /// Event handlers for this task. mod handlers; @@ -77,6 +77,9 @@ pub struct QuorumProposalRecvTaskState, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } /// all the info we need to validate a proposal. This makes it easy to spawn an effemeral task to @@ -100,6 +103,8 @@ pub(crate) struct ValidationInfo, pub(crate) storage: Arc>, /// Lock for a decided upgrade pub(crate) upgrade_lock: UpgradeLock, + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl, V: Versions> @@ -117,7 +122,7 @@ impl, V: Versions> } /// Handles all consensus events relating to propose and vote-enabling events. - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "Consensus replica task", level = "error")] #[allow(unused_variables)] pub async fn handle( &mut self, @@ -143,6 +148,7 @@ impl, V: Versions> output_event_stream: self.output_event_stream.clone(), storage: Arc::clone(&self.storage), upgrade_lock: self.upgrade_lock.clone(), + epoch_height: self.epoch_height, }; match handle_quorum_proposal_recv( proposal, @@ -157,7 +163,10 @@ impl, V: Versions> Err(e) => debug!(?e, "Failed to validate the proposal"), } } - HotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view, epoch) => { + if *epoch > self.cur_epoch { + self.cur_epoch = *epoch; + } if self.cur_view >= *view { return; } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 48d4c5997d..053ef851f9 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -6,6 +6,11 @@ use std::{collections::BTreeMap, sync::Arc}; +use crate::{ + events::HotShotEvent, + helpers::broadcast_event, + quorum_vote::handlers::{handle_quorum_proposal_validated, submit_vote, update_shared_state}, +}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; @@ -26,6 +31,7 @@ use hotshot_types::{ signature_key::SignatureKey, storage::Storage, }, + utils::epoch_from_block_number, vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; @@ -35,12 +41,6 @@ use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; -use crate::{ - events::HotShotEvent, - helpers::broadcast_event, - quorum_vote::handlers::{handle_quorum_proposal_validated, submit_vote, update_shared_state}, -}; - /// Event handlers for `QuorumProposalValidated`. mod handlers; @@ -71,8 +71,6 @@ pub struct VoteDependencyHandle, V pub storage: Arc>, /// View number to vote on. pub view_number: TYPES::View, - /// Epoch number to vote on. - pub epoch_number: TYPES::Epoch, /// Event sender. pub sender: Sender>>, /// Event receiver. @@ -81,6 +79,8 @@ pub struct VoteDependencyHandle, V pub upgrade_lock: UpgradeLock, /// The node's id pub id: u64, + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl + 'static, V: Versions> HandleDepOutput @@ -115,7 +115,7 @@ impl + 'static, V: Versions> Handl .consensus .read() .await - .is_qc_forming_eqc(&proposal.data.justify_qc) + .is_leaf_forming_eqc(proposal.data.justify_qc.data.leaf_commit) { tracing::debug!("Do not vote here. Voting for this case is handled in QuorumVoteTaskState"); return; @@ -168,11 +168,6 @@ impl + 'static, V: Versions> Handl } } - broadcast_event( - Arc::new(HotShotEvent::ViewChange(self.view_number + 1)), - &self.sender, - ) - .await; let Some(vid_share) = vid_share else { tracing::error!( "We don't have the VID share for this view {:?}, but we should, because the vote dependencies have completed.", @@ -211,6 +206,22 @@ impl + 'static, V: Versions> Handl return; } + let current_epoch = + TYPES::Epoch::new(epoch_from_block_number(leaf.height(), self.epoch_height)); + tracing::trace!( + "Sending ViewChange for view {} and epoch {}", + self.view_number + 1, + *current_epoch + ); + broadcast_event( + Arc::new(HotShotEvent::ViewChange( + self.view_number + 1, + current_epoch, + )), + &self.sender, + ) + .await; + if let Err(e) = submit_vote::( self.sender.clone(), Arc::clone(&self.quorum_membership), @@ -218,7 +229,7 @@ impl + 'static, V: Versions> Handl self.private_key.clone(), self.upgrade_lock.clone(), self.view_number, - self.epoch_number, + current_epoch, Arc::clone(&self.storage), leaf, vid_share, @@ -272,6 +283,9 @@ pub struct QuorumVoteTaskState, V: /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl, V: Versions> QuorumVoteTaskState { @@ -325,7 +339,6 @@ impl, V: Versions> QuorumVoteTaskS fn create_dependency_task_if_new( &mut self, view_number: TYPES::View, - epoch_number: TYPES::Epoch, event_receiver: Receiver>>, event_sender: &Sender>>, event: Option>>, @@ -364,11 +377,11 @@ impl, V: Versions> QuorumVoteTaskS quorum_membership: Arc::clone(&self.quorum_membership), storage: Arc::clone(&self.storage), view_number, - epoch_number, sender: event_sender.clone(), receiver: event_receiver.clone().deactivate(), upgrade_lock: self.upgrade_lock.clone(), id: self.id, + epoch_height: self.epoch_height, }, ); self.vote_dependencies @@ -432,25 +445,18 @@ impl, V: Versions> QuorumVoteTaskS .version(proposal.data.view_number()) .await?; - let consensus_reader = self.consensus.read().await; - let cur_epoch = consensus_reader.cur_epoch(); - let is_qc_forming_eqc = - consensus_reader.is_qc_forming_eqc(&proposal.data.justify_qc); - drop(consensus_reader); - - if version >= V::Epochs::VERSION && is_qc_forming_eqc { - self.handle_eqc_voting( - proposal, - parent_leaf, - event_sender, - event_receiver, - cur_epoch, - ) - .await; + let is_justify_qc_forming_eqc = self + .consensus + .read() + .await + .is_leaf_forming_eqc(proposal.data.justify_qc.data.leaf_commit); + + if version >= V::Epochs::VERSION && is_justify_qc_forming_eqc { + self.handle_eqc_voting(proposal, parent_leaf, event_sender, event_receiver) + .await; } else { self.create_dependency_task_if_new( proposal.data.view_number, - cur_epoch, event_receiver, &event_sender, Some(Arc::clone(&event)), @@ -486,13 +492,7 @@ impl, V: Versions> QuorumVoteTaskS &event_sender.clone(), ) .await; - self.create_dependency_task_if_new( - view, - cur_epoch, - event_receiver, - &event_sender, - None, - ); + self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); } HotShotEvent::VidShareRecv(sender, disperse) => { let view = disperse.data.view_number(); @@ -550,13 +550,7 @@ impl, V: Versions> QuorumVoteTaskS &event_sender.clone(), ) .await; - self.create_dependency_task_if_new( - view, - cur_epoch, - event_receiver, - &event_sender, - None, - ); + self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); } HotShotEvent::Timeout(view) => { let view = TYPES::View::new(view.saturating_sub(1)); @@ -567,7 +561,7 @@ impl, V: Versions> QuorumVoteTaskS } self.vote_dependencies = current_tasks; } - HotShotEvent::ViewChange(mut view) => { + HotShotEvent::ViewChange(mut view, _) => { view = TYPES::View::new(view.saturating_sub(1)); if !self.update_latest_voted_view(view).await { tracing::debug!("view not updated"); @@ -585,13 +579,13 @@ impl, V: Versions> QuorumVoteTaskS } /// Handles voting for the last block in the epoch to form the Extended QC. + #[allow(clippy::too_many_lines)] async fn handle_eqc_voting( &self, proposal: &Proposal>, parent_leaf: &Leaf, event_sender: Sender>>, event_receiver: Receiver>>, - epoch_number: TYPES::Epoch, ) { tracing::info!("Reached end of epoch. Justify QC is for the last block in the epoch."); let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); @@ -639,12 +633,6 @@ impl, V: Versions> QuorumVoteTaskS return; } - broadcast_event( - Arc::new(HotShotEvent::ViewChange(proposal.data.view_number() + 1)), - &event_sender, - ) - .await; - // Update internal state if let Err(e) = update_shared_state::( OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), @@ -667,6 +655,25 @@ impl, V: Versions> QuorumVoteTaskS return; } + let current_block_number = proposed_leaf.height(); + let current_epoch = TYPES::Epoch::new(epoch_from_block_number( + current_block_number, + self.epoch_height, + )); + tracing::trace!( + "Sending ViewChange for view {} and epoch {}", + proposal.data.view_number() + 1, + *current_epoch + ); + broadcast_event( + Arc::new(HotShotEvent::ViewChange( + proposal.data.view_number() + 1, + current_epoch, + )), + &event_sender, + ) + .await; + if let Err(e) = submit_vote::( event_sender.clone(), Arc::clone(&self.quorum_membership), @@ -674,7 +681,7 @@ impl, V: Versions> QuorumVoteTaskS self.private_key.clone(), self.upgrade_lock.clone(), proposal.data.view_number(), - epoch_number, + current_epoch, Arc::clone(&self.storage), proposed_leaf, updated_vid, diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 23646c447e..1a565a85de 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -112,7 +112,7 @@ impl> TaskState for NetworkRequest } Ok(()) } - HotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view, _) => { let view = *view; if view > self.view { self.view = view; diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 5b286bb095..43cdef8bfc 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -452,7 +452,7 @@ impl, V: Versions> TransactionTask } /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction task", level = "error", target = "TransactionTaskState")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "Transaction task", level = "error", target = "TransactionTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -471,7 +471,10 @@ impl, V: Versions> TransactionTask ) .await; } - HotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view, epoch) => { + if *epoch > self.cur_epoch { + self.cur_epoch = *epoch; + } let view = TYPES::View::new(std::cmp::max(1, **view)); ensure!( *view > *self.cur_view, diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 1eb60cf98f..71cd0a7ef0 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -105,7 +105,7 @@ impl, V: Versions> UpgradeTaskStat } /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Upgrade Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "Upgrade Task", level = "error")] pub async fn handle( &mut self, event: Arc>, @@ -249,7 +249,10 @@ impl, V: Versions> UpgradeTaskStat ) .await?; } - HotShotEvent::ViewChange(new_view) => { + HotShotEvent::ViewChange(new_view, epoch_number) => { + if *epoch_number > self.cur_epoch { + self.cur_epoch = *epoch_number; + } ensure!(self.cur_view < *new_view || *self.cur_view == 0); self.cur_view = *new_view; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index ec59451f42..3795577bbb 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -59,7 +59,7 @@ pub struct VidTaskState> { impl> VidTaskState { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error", target = "VidTaskState")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "VID Main Task", level = "error", target = "VidTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -134,7 +134,7 @@ impl> VidTaskState { .await; } - HotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view, epoch) => { let view = *view; if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { return None; @@ -144,6 +144,9 @@ impl> VidTaskState { info!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; + if *epoch > self.cur_epoch { + self.cur_epoch = *epoch; + } return None; } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 26720d28eb..4b661a660a 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -265,7 +265,7 @@ impl, V: Versions> ViewSyncTaskSta task_map.insert(view, replica_state); } - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "View Sync Main Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task pub async fn handle( @@ -420,7 +420,10 @@ impl, V: Versions> ViewSyncTaskSta } } - &HotShotEvent::ViewChange(new_view) => { + &HotShotEvent::ViewChange(new_view, epoch) => { + if epoch > self.cur_epoch { + self.cur_epoch = epoch; + } let new_view = TYPES::View::new(*new_view); if self.cur_view < new_view { tracing::debug!( @@ -492,7 +495,7 @@ impl, V: Versions> ViewSyncTaskSta // If this is the first timeout we've seen advance to the next view self.cur_view = view_number + 1; broadcast_event( - Arc::new(HotShotEvent::ViewChange(TYPES::View::new(*self.cur_view))), + Arc::new(HotShotEvent::ViewChange(self.cur_view, self.cur_epoch)), &event_stream, ) .await; @@ -508,7 +511,7 @@ impl, V: Versions> ViewSyncTaskSta impl, V: Versions> ViewSyncReplicaTaskState { - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "View Sync Replica Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task pub async fn handle( &mut self, @@ -658,8 +661,9 @@ impl, V: Versions> *self.next_view ); + // TODO: Figure out the correct way to view sync across epochs if needed broadcast_event( - Arc::new(HotShotEvent::ViewChange(self.next_view)), + Arc::new(HotShotEvent::ViewChange(self.next_view, self.cur_epoch)), &event_stream, ) .await; @@ -724,8 +728,9 @@ impl, V: Versions> timeout_task.abort(); } + // TODO: Figure out the correct way to view sync across epochs if needed broadcast_event( - Arc::new(HotShotEvent::ViewChange(self.next_view)), + Arc::new(HotShotEvent::ViewChange(self.next_view, self.cur_epoch)), &event_stream, ) .await; diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index 6af7275aed..b188338839 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -153,7 +153,7 @@ where { let info = "ViewChange".to_string(); let check: EventCallback = - Arc::new(move |e: Arc>| matches!(e.as_ref(), ViewChange(_))); + Arc::new(move |e: Arc>| matches!(e.as_ref(), ViewChange(_, _))); Box::new(EventPredicate { check, info }) } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 5593a4336e..e2a299a171 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -156,6 +156,7 @@ where TestInstanceState::new(self.async_delay_config.clone()), None, TYPES::View::genesis(), + TYPES::Epoch::genesis(), TYPES::View::genesis(), BTreeMap::new(), self.high_qc.clone(), @@ -238,6 +239,7 @@ where TestInstanceState::new(self.async_delay_config.clone()), None, read_storage.last_actioned_view().await, + read_storage.last_actioned_epoch().await, read_storage.last_actioned_view().await, read_storage.proposals_cloned().await, read_storage.high_qc_cloned().await.unwrap_or( diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index b89b141108..96a7de771d 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -89,8 +89,8 @@ async fn test_da_task() { let inputs = vec![ serial![ - ViewChange(ViewNumber::new(1)), - ViewChange(ViewNumber::new(2)), + ViewChange(ViewNumber::new(1), EpochNumber::new(1)), + ViewChange(ViewNumber::new(2), EpochNumber::new(1)), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { @@ -189,8 +189,8 @@ async fn test_da_task_storage_failure() { let inputs = vec![ serial![ - ViewChange(ViewNumber::new(1)), - ViewChange(ViewNumber::new(2)), + ViewChange(ViewNumber::new(1), EpochNumber::new(1)), + ViewChange(ViewNumber::new(2), EpochNumber::new(1)), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index bb96a52b91..dc42dfc723 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -27,6 +27,7 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{Leaf, ViewNumber}, request_response::ProposalRequestPayload, @@ -98,7 +99,7 @@ async fn test_quorum_proposal_recv_task() { proposals[1].clone(), leaves[0].clone(), )), - exact(ViewChange(ViewNumber::new(2))), + exact(ViewChange(ViewNumber::new(2), EpochNumber::new(0))), ])]; let state = @@ -192,7 +193,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { let expectations = vec![Expectations::from_outputs(all_predicates![ exact(QuorumProposalPreliminarilyValidated(proposals[2].clone())), - exact(ViewChange(ViewNumber::new(3))), + exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), exact(QuorumProposalRequestSend(req, signature)), ])]; diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 4bc81b829a..b4350abe3c 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -23,7 +23,7 @@ use hotshot_testing::{ script::{Expectations, InputOrder, TaskScript}, }; use hotshot_types::{ - data::{Leaf, ViewNumber}, + data::{EpochNumber, Leaf, ViewNumber}, traits::node_implementation::ConsensusTime, }; @@ -85,7 +85,7 @@ async fn test_quorum_vote_task_success() { let expectations = vec![Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[1].clone())), exact(VidShareValidated(vids[1].0[0].clone())), - exact(ViewChange(ViewNumber::new(3))), + exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), quorum_vote_send(), ])]; diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index 7205628c87..0a4877fcc1 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -33,8 +33,11 @@ async fn test_transaction_task_leader_two_views_in_a_row() { let mut output = Vec::new(); let current_view = ViewNumber::new(4); - input.push(HotShotEvent::ViewChange(current_view)); - input.push(HotShotEvent::ViewChange(current_view + 1)); + input.push(HotShotEvent::ViewChange(current_view, EpochNumber::new(1))); + input.push(HotShotEvent::ViewChange( + current_view + 1, + EpochNumber::new(1), + )); input.push(HotShotEvent::Shutdown); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index bc0a17617b..72790bec4d 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -29,7 +29,7 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{null_block, Leaf, ViewNumber}, + data::{null_block, EpochNumber, Leaf, ViewNumber}, simple_vote::UpgradeProposalData, traits::{election::Membership, node_implementation::ConsensusTime}, vote::HasViewNumber, @@ -134,14 +134,14 @@ async fn test_upgrade_task_with_vote() { Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[1].clone())), exact(VidShareValidated(vids[1].0[0].clone())), - exact(ViewChange(ViewNumber::new(3))), + exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), quorum_vote_send(), ]), Expectations::from_outputs_and_task_states( all_predicates![ exact(DaCertificateValidated(dacs[2].clone())), exact(VidShareValidated(vids[2].0[0].clone())), - exact(ViewChange(ViewNumber::new(4))), + exact(ViewChange(ViewNumber::new(4), EpochNumber::new(0))), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], @@ -150,7 +150,7 @@ async fn test_upgrade_task_with_vote() { all_predicates![ exact(DaCertificateValidated(dacs[3].clone())), exact(VidShareValidated(vids[3].0[0].clone())), - exact(ViewChange(ViewNumber::new(5))), + exact(ViewChange(ViewNumber::new(5), EpochNumber::new(0))), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], @@ -159,7 +159,7 @@ async fn test_upgrade_task_with_vote() { all_predicates![ exact(DaCertificateValidated(dacs[4].clone())), exact(VidShareValidated(vids[4].0[0].clone())), - exact(ViewChange(ViewNumber::new(6))), + exact(ViewChange(ViewNumber::new(6), EpochNumber::new(0))), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index d8328b98cf..8d8daf4c5e 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -100,9 +100,9 @@ async fn test_vid_task() { _pd: PhantomData, }; let inputs = vec![ - serial![ViewChange(ViewNumber::new(1))], + serial![ViewChange(ViewNumber::new(1), EpochNumber::new(1))], serial![ - ViewChange(ViewNumber::new(2)), + ViewChange(ViewNumber::new(2), EpochNumber::new(1)), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index 74a63cee82..85827dbbdb 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -10,6 +10,7 @@ use hotshot_task_impls::{ events::HotShotEvent, harness::run_harness, view_sync::ViewSyncTaskState, }; use hotshot_testing::helpers::build_system_handle; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::ViewNumber, simple_vote::ViewSyncPreCommitData, traits::node_implementation::ConsensusTime, @@ -49,7 +50,10 @@ async fn test_view_sync_task() { input.push(HotShotEvent::Shutdown); - output.push(HotShotEvent::ViewChange(ViewNumber::new(3))); + output.push(HotShotEvent::ViewChange( + ViewNumber::new(3), + EpochNumber::new(0), + )); output.push(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone())); let view_sync_state = diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 7839b0d1c0..9a58e4046c 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -78,7 +78,10 @@ async fn test_vote_dependency_handle() { // For each permutation... for inputs in all_inputs.into_iter() { // The outputs are static here, but we re-make them since we use `into_iter` below - let outputs = vec![exact(ViewChange(ViewNumber::new(3))), quorum_vote_send()]; + let outputs = vec![ + exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), + quorum_vote_send(), + ]; let (event_sender, mut event_receiver) = broadcast(1024); let view_number = ViewNumber::new(node_id); @@ -92,11 +95,11 @@ async fn test_vote_dependency_handle() { quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), storage: Arc::clone(&handle.storage()), view_number, - epoch_number: EpochNumber::new(1), sender: event_sender.clone(), receiver: event_receiver.clone().deactivate(), upgrade_lock: handle.hotshot.upgrade_lock.clone(), id: handle.hotshot.id, + epoch_height: handle.hotshot.config.epoch_height, }; vote_dependency_handle_state diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 72964d3705..77161111ae 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -27,15 +27,15 @@ use crate::{ message::{Proposal, UpgradeLock}, simple_certificate::{DaCertificate, QuorumCertificate}, traits::{ - block_contents::{BlockHeader, BuilderFee}, + block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, BlockPayload, ValidatedState, }, - utils::{BuilderCommitment, StateAndDelta, Terminator, Terminator::Inclusive}, + utils::{BuilderCommitment, LeafCommitment, StateAndDelta, Terminator}, vid::VidCommitment, - vote::{Certificate, HasViewNumber}, + vote::HasViewNumber, }; /// A type alias for `HashMap, T>` @@ -503,6 +503,7 @@ impl Consensus { epoch_number > self.cur_epoch, debug!("New epoch isn't newer than the current epoch.") ); + tracing::trace!("Updating epoch from {} to {}", self.cur_epoch, epoch_number); self.cur_epoch = epoch_number; Ok(()) } @@ -882,100 +883,88 @@ impl Consensus { Some(()) } - /// Returns true if the current high qc is for the last block in the epoch - pub fn is_high_qc_for_last_block(&self) -> bool { - let high_qc = self.high_qc(); - self.is_qc_for_last_block(high_qc) - } - - /// Returns true if the given qc is for the last block in the epoch - pub fn is_qc_for_last_block(&self, cert: &QuorumCertificate) -> bool { - let Some(leaf) = self.saved_leaves.get(&cert.data().leaf_commit) else { - return false; - }; - let block_height = leaf.height(); - if block_height == 0 || self.epoch_height == 0 { - false - } else { - block_height % self.epoch_height == 0 + /// Return true if the high QC takes part in forming an eQC, i.e. + /// it is one of the 3-chain certificates but not the eQC itself + pub fn is_high_qc_forming_eqc(&self) -> bool { + let high_qc_leaf_commit = self.high_qc().data.leaf_commit; + let is_high_qc_extended = self.is_leaf_extended(high_qc_leaf_commit); + if is_high_qc_extended { + tracing::debug!("We have formed an eQC!"); } + self.is_leaf_for_last_block(high_qc_leaf_commit) && !is_high_qc_extended } - /// Returns true if the current high qc is an extended Quorum Certificate - /// The Extended Quorum Certificate (eQC) is the third Quorum Certificate formed in three - /// consecutive views for the last block in the epoch. - pub fn is_high_qc_extended(&self) -> bool { - let high_qc = self.high_qc(); - let ret = self.is_qc_extended(high_qc); - if ret { - tracing::debug!("We have formed an eQC!"); - }; - ret + /// Return true if the given leaf takes part in forming an eQC, i.e. + /// it is one of the 3-chain leaves but not the eQC leaf itself + pub fn is_leaf_forming_eqc(&self, leaf_commit: LeafCommitment) -> bool { + self.is_leaf_for_last_block(leaf_commit) && !self.is_leaf_extended(leaf_commit) } - /// Returns true if the given qc is an extended Quorum Certificate + /// Returns true if the given leaf can form an extended Quorum Certificate /// The Extended Quorum Certificate (eQC) is the third Quorum Certificate formed in three /// consecutive views for the last block in the epoch. - pub fn is_qc_extended(&self, cert: &QuorumCertificate) -> bool { - if !self.is_qc_for_last_block(cert) { - tracing::debug!("High QC is not for the last block in the epoch."); + pub fn is_leaf_extended(&self, leaf_commit: LeafCommitment) -> bool { + if !self.is_leaf_for_last_block(leaf_commit) { + tracing::debug!("The given leaf is not for the last block in the epoch."); return false; } - let qc_view = cert.view_number(); - let high_qc_block_number = - if let Some(leaf) = self.saved_leaves.get(&cert.data().leaf_commit) { - leaf.block_header().block_number() - } else { - return false; - }; - - let mut last_visited_view_number = qc_view; - let mut is_qc_extended = true; - if let Err(e) = - self.visit_leaf_ancestors(qc_view, Inclusive(qc_view - 2), true, |leaf, _, _| { + let Some(leaf) = self.saved_leaves.get(&leaf_commit) else { + return false; + }; + let leaf_view = leaf.view_number(); + let leaf_block_number = leaf.height(); + + let mut last_visited_view_number = leaf_view; + let mut is_leaf_extended = true; + if let Err(e) = self.visit_leaf_ancestors( + leaf_view, + Terminator::Inclusive(leaf_view - 2), + true, + |leaf, _, _| { tracing::trace!( "last_visited_view_number = {}, leaf.view_number = {}", *last_visited_view_number, *leaf.view_number() ); - if leaf.view_number() == qc_view { + if leaf.view_number() == leaf_view { return true; } if last_visited_view_number - 1 != leaf.view_number() { tracing::trace!("The chain is broken. Non consecutive views."); - is_qc_extended = false; + is_leaf_extended = false; return false; } - if high_qc_block_number != leaf.height() { + if leaf_block_number != leaf.height() { tracing::trace!("The chain is broken. Block numbers do not match."); - is_qc_extended = false; + is_leaf_extended = false; return false; } last_visited_view_number = leaf.view_number(); true - }) - { - is_qc_extended = false; + }, + ) { + is_leaf_extended = false; tracing::trace!("The chain is broken. Leaf ascension failed."); tracing::debug!("Leaf ascension failed; error={e}"); } - tracing::trace!("Is the given QC an eQC? {}", is_qc_extended); - is_qc_extended + tracing::trace!("Can the given leaf form an eQC? {}", is_leaf_extended); + is_leaf_extended } - /// Return true if the given Quorum Certificate takes part in forming an eQC, i.e. - /// it is one of the 3-chain certificates but not the eQC itself - pub fn is_qc_forming_eqc(&self, cert: &QuorumCertificate) -> bool { - self.is_qc_for_last_block(cert) && !self.is_qc_extended(cert) - } - - /// Return true if the high QC takes part in forming an eQC, i.e. - /// it is one of the 3-chain certificates but not the eQC itself - pub fn is_high_qc_forming_eqc(&self) -> bool { - self.is_high_qc_for_last_block() && !self.is_high_qc_extended() + /// Returns true if a given leaf is for the last block in the epoch + pub fn is_leaf_for_last_block(&self, leaf_commit: LeafCommitment) -> bool { + let Some(leaf) = self.saved_leaves.get(&leaf_commit) else { + return false; + }; + let block_height = leaf.height(); + if block_height == 0 || self.epoch_height == 0 { + false + } else { + block_height % self.epoch_height == 0 + } } } diff --git a/types/src/utils.rs b/types/src/utils.rs index e3d19a8286..c3f8780575 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -70,7 +70,7 @@ impl Clone for ViewInner { } } /// The hash of a leaf. -type LeafCommitment = Commitment>; +pub type LeafCommitment = Commitment>; /// Optional validated state and state delta. pub type StateAndDelta = ( @@ -210,3 +210,15 @@ pub fn bincode_opts() -> WithOtherTrailing< .with_fixint_encoding() .reject_trailing_bytes() } + +/// Returns an epoch number given a block number and an epoch height +#[must_use] +pub fn epoch_from_block_number(block_number: u64, epoch_height: u64) -> u64 { + if epoch_height == 0 { + 0 + } else if block_number % epoch_height == 0 { + block_number / epoch_height + } else { + block_number / epoch_height + 1 + } +} From d9aa94d1ef801abd2077fc2a454bc7094ca72fa2 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Wed, 13 Nov 2024 20:50:51 +0100 Subject: [PATCH 1300/1393] Lr/eqc voting (#3851) * Initial commit * Propose the same block at the end of the epoch * No VID and DAC required for the additional last block proposals * Guard against division by 0 * Traverse the leaves to check an eQC * Remove unneeded async in function definition * Remove trace * Make sure proposal is for the same block if justify QC references the last block * Initial commit * Gate epoch proposal logic * Gate epochs voting logic * Create helper method to check if QC is part of 3-chain * Update epoch on `ViewChange` event * Adjust tests * Update view and epoch when QC is formed * Fixes after review * Get rid of nasty nested if-elses * Fix fmt * Update VID that we reuse at the end of the epoch * Fix fmt * Do not create VID and DAC dependencies when voting for the last block * Simplify how we get a header * NetworkEventTaskState uses OuterConsensus * Refactor some of the eQC related methods in `Consensus` * Add back a debugging trace * Last eQC vote is broadcast * Do not check if we are the leader when receiving last votes for eQC * Add more traces * Correctly update epoch when voting and forming QC * Send ViewChange event when voting after shared state's been updated * Adjust tests * Remove an obsolete DA ViewChange code * Fetch proposal if missing when changing view after forming QC * Fix deadlock * Update epoch only when receiving or forming an eQC * update logging (#3844) * [Tech debt] Remove `async-std` (#3845) * tests and CI * remove `async-std` * `fmt` * fix doc build * remove counter tests * remove counter tests * build w/o lockfile * `lock` -> `std` * Revert "`lock` -> `std`" This reverts commit 21ebf054f3dd862a70d6b776f4016647cfd481c4. * lock * `async_broadcast` * overflow * Revert "`async_broadcast`" This reverts commit f03bb57fcc5fcdefe708c79fee75d3ae85ae04f1. * `try_send` * Fix fmt * Simplify code * Move a helper function to types crate * Add epoch safety check * Rename storage method * Clean up the code * Compare to genesis view --------- Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> Co-authored-by: rob-maron <132852777+rob-maron@users.noreply.github.com> --- hotshot/src/tasks/mod.rs | 15 +++++---- task-impls/src/consensus/handlers.rs | 19 +++++++---- task-impls/src/da.rs | 1 + task-impls/src/events.rs | 13 +++++++- task-impls/src/helpers.rs | 23 +++++++++++++- task-impls/src/network.rs | 20 +++++++++--- task-impls/src/quorum_vote/handlers.rs | 12 ++++++- task-impls/src/quorum_vote/mod.rs | 7 +++++ task-impls/src/upgrade.rs | 1 + task-impls/src/view_sync.rs | 33 ++++++++++++++------ task-impls/src/vote_collection.rs | 17 +++++++--- testing/src/byzantine/byzantine_behaviour.rs | 4 +-- testing/tests/tests_1/network_task.rs | 5 +-- types/src/consensus.rs | 21 +++++++++++-- 14 files changed, 149 insertions(+), 42 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 037e72f227..6ddd00b252 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -10,6 +10,11 @@ pub mod task_state; use std::{collections::BTreeMap, fmt::Debug, num::NonZeroUsize, sync::Arc, time::Duration}; +use crate::{ + tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, + ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, + MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, +}; use async_broadcast::{broadcast, RecvError}; use async_lock::RwLock; use async_trait::async_trait; @@ -32,7 +37,7 @@ use hotshot_task_impls::{ view_sync::ViewSyncTaskState, }; use hotshot_types::{ - consensus::Consensus, + consensus::{Consensus, OuterConsensus}, constants::EVENT_CHANNEL_SIZE, message::{Message, UpgradeLock}, traits::{ @@ -43,12 +48,6 @@ use hotshot_types::{ use tokio::{spawn, time::sleep}; use vbs::version::StaticVersionType; -use crate::{ - tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, - ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, - MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, -}; - /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { @@ -200,7 +199,7 @@ pub fn add_network_event_task< quorum_membership, da_membership, storage: Arc::clone(&handle.storage()), - consensus: Arc::clone(&handle.consensus()), + consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), transmit_tasks: BTreeMap::new(), }; diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index a148ec28a0..cfc399bd56 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -38,14 +38,19 @@ pub(crate) async fn handle_quorum_vote_recv< sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { - // Are we the leader for this view? + let is_vote_leaf_extended = task_state + .consensus + .read() + .await + .is_leaf_extended(vote.data.leaf_commit); + let we_are_leader = task_state + .quorum_membership + .leader(vote.view_number() + 1, task_state.cur_epoch)? + == task_state.public_key; ensure!( - task_state - .quorum_membership - .leader(vote.view_number() + 1, task_state.cur_epoch)? - == task_state.public_key, + is_vote_leaf_extended || we_are_leader, info!( - "We are not the leader for view {:?}", + "We are not the leader for view {:?} and this is not the last vote for eQC", vote.view_number() + 1 ) ); @@ -60,6 +65,7 @@ pub(crate) async fn handle_quorum_vote_recv< &event, sender, &task_state.upgrade_lock, + !is_vote_leaf_extended, ) .await?; @@ -99,6 +105,7 @@ pub(crate) async fn handle_timeout_vote_recv< &event, sender, &task_state.upgrade_lock, + true, ) .await?; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index d0182460f2..edaa5123e5 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -282,6 +282,7 @@ impl, V: Versions> DaTaskState { QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote), + /// Broadcast a quorum vote to form an eQC; emitted by a replica in the consensus task after seeing a valid quorum proposal + ExtendedQuorumVoteSend(QuorumVote), /// A quorum proposal with the given parent leaf is validated. /// The full validation checks include: /// 1. The proposal is not for an old view @@ -253,7 +255,9 @@ impl HotShotEvent { | HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { Some(proposal.data.view_number()) } - HotShotEvent::QuorumVoteSend(vote) => Some(vote.view_number()), + HotShotEvent::QuorumVoteSend(vote) | HotShotEvent::ExtendedQuorumVoteSend(vote) => { + Some(vote.view_number()) + } HotShotEvent::DaProposalRecv(proposal, _) | HotShotEvent::DaProposalValidated(proposal, _) | HotShotEvent::DaProposalSend(proposal, _) => Some(proposal.data.view_number()), @@ -324,6 +328,13 @@ impl Display for HotShotEvent { HotShotEvent::QuorumVoteRecv(v) => { write!(f, "QuorumVoteRecv(view_number={:?})", v.view_number()) } + HotShotEvent::ExtendedQuorumVoteSend(v) => { + write!( + f, + "ExtendedQuorumVoteSend(view_number={:?})", + v.view_number() + ) + } HotShotEvent::TimeoutVoteRecv(v) => { write!(f, "TimeoutVoteRecv(view_number={:?})", v.view_number()) } diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 5c91ca9adf..cce95b6be3 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -13,6 +13,7 @@ use async_broadcast::{InactiveReceiver, Receiver, SendError, Sender}; use async_lock::RwLock; use committable::{Commitment, Committable}; use hotshot_task::dependency::{Dependency, EventDependency}; +use hotshot_types::utils::epoch_from_block_number; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf, QuorumProposal, ViewChangeEvidence}, @@ -481,9 +482,29 @@ pub async fn validate_proposal_safety_and_liveness< // Create a positive vote if either liveness or safety check // passes. - // Liveness check. { let consensus_reader = validation_info.consensus.read().await; + // Epoch safety check: + // The proposal is safe if + // 1. the proposed block and the justify QC block belong to the same epoch or + // 2. the justify QC is the eQC for the previous block + let proposal_epoch = + epoch_from_block_number(proposed_leaf.height(), validation_info.epoch_height); + let justify_qc_epoch = + epoch_from_block_number(parent_leaf.height(), validation_info.epoch_height); + ensure!( + proposal_epoch == justify_qc_epoch + || consensus_reader.check_eqc(&proposed_leaf, &parent_leaf), + { + error!( + "Failed epoch safety check \n Proposed leaf is {:?} \n justify QC leaf is {:?}", + proposed_leaf.clone(), + parent_leaf.clone(), + ) + } + ); + + // Liveness check. let liveness_check = justify_qc.view_number() > consensus_reader.locked_view(); // Safety check. diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 42c8a19748..d623962424 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -15,7 +15,7 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::Consensus, + consensus::OuterConsensus, data::{VidDisperse, VidDisperseShare}, event::{Event, EventType, HotShotAction}, message::{ @@ -212,7 +212,7 @@ pub struct NetworkEventTaskState< /// Storage to store actionable events pub storage: Arc>, /// Shared consensus state - pub consensus: Arc>>, + pub consensus: OuterConsensus, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, /// map view number to transmit tasks @@ -294,7 +294,7 @@ impl< let net = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); - let consensus = Arc::clone(&self.consensus); + let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); spawn(async move { if NetworkEventTaskState::::maybe_record_action( Some(HotShotAction::VidDisperse), @@ -320,7 +320,7 @@ impl< async fn maybe_record_action( maybe_action: Option, storage: Arc>, - consensus: Arc>>, + consensus: OuterConsensus, view: ::View, ) -> std::result::Result<(), ()> { if let Some(mut action) = maybe_action { @@ -408,6 +408,16 @@ impl< TransmitType::Direct(leader), )) } + HotShotEvent::ExtendedQuorumVoteSend(vote) => { + *maybe_action = Some(HotShotAction::Vote); + Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::Vote(vote.clone()), + )), + TransmitType::Broadcast, + )) + } HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( req.key.clone(), MessageKind::::from_consensus_message(SequencingMessage::General( @@ -669,7 +679,7 @@ impl< .committee_members(view_number, self.epoch); let network = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); - let consensus = Arc::clone(&self.consensus); + let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); let upgrade_lock = self.upgrade_lock.clone(); let handle = spawn(async move { if NetworkEventTaskState::::maybe_record_action( diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 2f78773f88..d4b9edc6c7 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -283,6 +283,7 @@ pub(crate) async fn submit_vote, V storage: Arc>, leaf: Leaf, vid_share: Proposal>, + extended_vote: bool, ) -> Result<()> { ensure!( quorum_membership.has_stake(&public_key, epoch_number), @@ -317,7 +318,16 @@ pub(crate) async fn submit_vote, V .await .wrap() .context(error!("Failed to store VID share"))?; - broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &sender).await; + + if extended_vote { + broadcast_event( + Arc::new(HotShotEvent::ExtendedQuorumVoteSend(vote)), + &sender, + ) + .await; + } else { + broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &sender).await; + } Ok(()) } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 053ef851f9..6ac5fe37dd 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -222,6 +222,11 @@ impl + 'static, V: Versions> Handl ) .await; + let is_vote_leaf_extended = self + .consensus + .read() + .await + .is_leaf_extended(leaf.commit(&self.upgrade_lock).await); if let Err(e) = submit_vote::( self.sender.clone(), Arc::clone(&self.quorum_membership), @@ -233,6 +238,7 @@ impl + 'static, V: Versions> Handl Arc::clone(&self.storage), leaf, vid_share, + is_vote_leaf_extended, ) .await { @@ -685,6 +691,7 @@ impl, V: Versions> QuorumVoteTaskS Arc::clone(&self.storage), proposed_leaf, updated_vid, + false, ) .await { diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 71cd0a7ef0..c5e9735701 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -246,6 +246,7 @@ impl, V: Versions> UpgradeTaskStat &event, &tx, &self.upgrade_lock, + true, ) .await?; } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 4b661a660a..9e8b852789 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -332,9 +332,14 @@ impl, V: Versions> ViewSyncTaskSta epoch: self.cur_epoch, id: self.id, }; - let vote_collector = - create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) - .await?; + let vote_collector = create_vote_accumulator( + &info, + event, + &event_stream, + self.upgrade_lock.clone(), + true, + ) + .await?; relay_map.insert(relay, vote_collector); } @@ -373,9 +378,14 @@ impl, V: Versions> ViewSyncTaskSta id: self.id, }; - let vote_collector = - create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) - .await?; + let vote_collector = create_vote_accumulator( + &info, + event, + &event_stream, + self.upgrade_lock.clone(), + true, + ) + .await?; relay_map.insert(relay, vote_collector); } @@ -412,9 +422,14 @@ impl, V: Versions> ViewSyncTaskSta epoch: self.cur_epoch, id: self.id, }; - let vote_collector = - create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) - .await; + let vote_collector = create_vote_accumulator( + &info, + event, + &event_stream, + self.upgrade_lock.clone(), + true, + ) + .await; if let Ok(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 4c685ca978..c9266ae808 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -62,6 +62,9 @@ pub struct VoteCollectionTaskState< /// Node id pub id: u64, + + /// Whether we should check if we are the leader when handling a vote + pub check_if_leader: bool, } /// Describes the functions a vote must implement for it to be aggregatable by the generic vote collection task @@ -103,10 +106,12 @@ impl< vote: &VOTE, event_stream: &Sender>>, ) -> Result> { - ensure!( - vote.leader(&self.membership, self.epoch)? == self.public_key, - info!("Received vote for a view in which we were not the leader.") - ); + if self.check_if_leader { + ensure!( + vote.leader(&self.membership, self.epoch)? == self.public_key, + info!("Received vote for a view in which we were not the leader.") + ); + } ensure!( vote.view_number() == self.view, error!( @@ -189,6 +194,7 @@ pub async fn create_vote_accumulator( event: Arc>, sender: &Sender>>, upgrade_lock: UpgradeLock, + check_if_leader: bool, ) -> Result> where TYPES: NodeType, @@ -219,6 +225,7 @@ where view: info.view, epoch: info.epoch, id: info.id, + check_if_leader, }; state.handle_vote_event(Arc::clone(&event), sender).await?; @@ -246,6 +253,7 @@ pub async fn handle_vote< event: &Arc>, event_stream: &Sender>>, upgrade_lock: &UpgradeLock, + check_if_leader: bool, ) -> Result<()> where VoteCollectionTaskState: HandleVoteEvent, @@ -265,6 +273,7 @@ where Arc::clone(event), event_stream, upgrade_lock.clone(), + check_if_leader, ) .await?; diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index e21580a3a5..1ab38f1f29 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -18,7 +18,7 @@ use hotshot_task_impls::{ }, }; use hotshot_types::{ - consensus::Consensus, + consensus::{Consensus, OuterConsensus}, data::QuorumProposal, message::{Proposal, UpgradeLock}, simple_vote::QuorumVote, @@ -349,7 +349,7 @@ impl + std::fmt::Debug, V: Version quorum_membership, da_membership, storage: Arc::clone(&handle.storage()), - consensus: Arc::clone(&handle.consensus()), + consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), transmit_tasks: BTreeMap::new(), }; diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 540baaf447..9a539f89e4 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -17,6 +17,7 @@ use hotshot_testing::{ test_task::add_network_message_test_task, view_generator::TestViewGenerator, }; use hotshot_types::{ + consensus::OuterConsensus, data::{EpochNumber, ViewNumber}, message::UpgradeLock, traits::{ @@ -51,7 +52,7 @@ async fn test_network_task() { let network = (launcher.resource_generator.channel_generator)(node_id).await; let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); - let consensus = handle.hotshot.consensus(); + let consensus = OuterConsensus::new(handle.hotshot.consensus()); let config = launcher.resource_generator.config.clone(); let validator_config = launcher.resource_generator.validator_config.clone(); let public_key = validator_config.public_key; @@ -221,7 +222,7 @@ async fn test_network_storage_fail() { let network = (launcher.resource_generator.channel_generator)(node_id).await; - let consensus = handle.hotshot.consensus(); + let consensus = OuterConsensus::new(handle.hotshot.consensus()); let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); storage.write().await.should_return_err = true; let config = launcher.resource_generator.config.clone(); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 77161111ae..eb8fe4ade2 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -14,7 +14,7 @@ use std::{ }; use async_lock::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard}; -use committable::Commitment; +use committable::{Commitment, Committable}; use tracing::instrument; use utils::anytrace::*; use vec1::Vec1; @@ -33,7 +33,9 @@ use crate::{ signature_key::SignatureKey, BlockPayload, ValidatedState, }, - utils::{BuilderCommitment, LeafCommitment, StateAndDelta, Terminator}, + utils::{ + epoch_from_block_number, BuilderCommitment, LeafCommitment, StateAndDelta, Terminator, + }, vid::VidCommitment, vote::HasViewNumber, }; @@ -905,11 +907,12 @@ impl Consensus { /// consecutive views for the last block in the epoch. pub fn is_leaf_extended(&self, leaf_commit: LeafCommitment) -> bool { if !self.is_leaf_for_last_block(leaf_commit) { - tracing::debug!("The given leaf is not for the last block in the epoch."); + tracing::trace!("The given leaf is not for the last block in the epoch."); return false; } let Some(leaf) = self.saved_leaves.get(&leaf_commit) else { + tracing::trace!("We don't have a leaf corresponding to the leaf commit"); return false; }; let leaf_view = leaf.view_number(); @@ -957,6 +960,7 @@ impl Consensus { /// Returns true if a given leaf is for the last block in the epoch pub fn is_leaf_for_last_block(&self, leaf_commit: LeafCommitment) -> bool { let Some(leaf) = self.saved_leaves.get(&leaf_commit) else { + tracing::trace!("We don't have a leaf corresponding to the leaf commit"); return false; }; let block_height = leaf.height(); @@ -966,6 +970,17 @@ impl Consensus { block_height % self.epoch_height == 0 } } + + /// Returns true if the `parent_leaf` formed an eQC for the previous epoch to the `proposed_leaf` + pub fn check_eqc(&self, proposed_leaf: &Leaf, parent_leaf: &Leaf) -> bool { + if parent_leaf.view_number() == TYPES::View::genesis() { + return true; + } + let new_epoch = epoch_from_block_number(proposed_leaf.height(), self.epoch_height); + let old_epoch = epoch_from_block_number(parent_leaf.height(), self.epoch_height); + let parent_leaf_commit = as Committable>::commit(parent_leaf); + new_epoch - 1 == old_epoch && self.is_leaf_extended(parent_leaf_commit) + } } /// Alias for the block payload commitment and the associated metadata. The primary data From d959800ce8f60f32bf34ec39520d20f599c9032a Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 14 Nov 2024 15:35:05 +0300 Subject: [PATCH 1301/1393] Fix AcceptsTxnSubmits trait (#3884) --- builder-api/src/v0_1/builder.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/builder-api/src/v0_1/builder.rs b/builder-api/src/v0_1/builder.rs index 235e7c8606..399093908c 100644 --- a/builder-api/src/v0_1/builder.rs +++ b/builder-api/src/v0_1/builder.rs @@ -219,7 +219,8 @@ pub fn submit_api( options: &Options, ) -> Result, ApiError> where - State: 'static + Send + Sync + AcceptsTxnSubmits, + State: 'static + Send + Sync + ReadState, + ::State: Send + Sync + AcceptsTxnSubmits, { let mut api = load_api::( options.api_path.as_ref(), @@ -234,7 +235,7 @@ where .map_err(Error::TxnUnpack)?; let hash = tx.commit(); state - .submit_txns(vec![tx]) + .read(|state| state.submit_txns(vec![tx])) .await .map_err(Error::TxnSubmit)?; Ok(hash) @@ -247,12 +248,15 @@ where .body_auto::::Transaction>, Ver>(Ver::instance()) .map_err(Error::TxnUnpack)?; let hashes = txns.iter().map(|tx| tx.commit()).collect::>(); - state.submit_txns(txns).await.map_err(Error::TxnSubmit)?; + state + .read(|state| state.submit_txns(txns)) + .await + .map_err(Error::TxnSubmit)?; Ok(hashes) } .boxed() })? - .at("get_status", |req: RequestParams, state| { + .get("get_status", |req: RequestParams, state| { async move { let tx = req .body_auto::<::Transaction, Ver>(Ver::instance()) From 4d90570b369f09765585e98d94380b21aa58fe88 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 14 Nov 2024 15:38:32 -0500 Subject: [PATCH 1302/1393] [CDN] Breaking changes (#3770) * breaking CDN changes * the builder was running on the same port as the marshal --- examples/combined/all.rs | 5 +- examples/push-cdn/all.rs | 15 ++++-- examples/push-cdn/broker.rs | 7 ++- .../src/traits/networking/push_cdn_network.rs | 47 +++++++++++++------ orchestrator/run-config.toml | 2 +- 5 files changed, 55 insertions(+), 21 deletions(-) diff --git a/examples/combined/all.rs b/examples/combined/all.rs index ebfd828b0d..89864af1d1 100644 --- a/examples/combined/all.rs +++ b/examples/combined/all.rs @@ -10,7 +10,7 @@ pub mod types; use std::path::Path; -use cdn_broker::Broker; +use cdn_broker::{reexports::def::hook::NoMessageHook, Broker}; use cdn_marshal::Marshal; use hotshot::{ helpers::initialize_logging, @@ -82,6 +82,9 @@ async fn main() { private_key: broker_private_key.clone(), }, + user_message_hook: NoMessageHook, + broker_message_hook: NoMessageHook, + metrics_bind_endpoint: None, ca_cert_path: None, ca_key_path: None, diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 2767bbdaa3..2e04d2d72b 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -10,7 +10,11 @@ pub mod types; use std::path::Path; -use cdn_broker::{reexports::crypto::signature::KeyPair, Broker}; +use cdn_broker::{ + reexports::{crypto::signature::KeyPair, def::hook::NoMessageHook}, + Broker, +}; + use cdn_marshal::Marshal; use hotshot::{ helpers::initialize_logging, @@ -90,6 +94,9 @@ async fn main() { private_key: broker_private_key.clone(), }, + user_message_hook: NoMessageHook, + broker_message_hook: NoMessageHook, + metrics_bind_endpoint: None, ca_cert_path: None, ca_key_path: None, @@ -109,10 +116,12 @@ async fn main() { } // Get the port to use for the marshal - let marshal_port = 9000; + let marshal_endpoint = config + .cdn_marshal_address + .clone() + .expect("CDN marshal address must be specified"); // Configure the marshal - let marshal_endpoint = format!("127.0.0.1:{marshal_port}"); let marshal_config = cdn_marshal::Config { bind_endpoint: marshal_endpoint.clone(), discovery_endpoint, diff --git a/examples/push-cdn/broker.rs b/examples/push-cdn/broker.rs index a9995805ca..8e03999c33 100644 --- a/examples/push-cdn/broker.rs +++ b/examples/push-cdn/broker.rs @@ -7,7 +7,7 @@ //! The following is the main `Broker` binary, which just instantiates and runs //! a `Broker` object. use anyhow::Result; -use cdn_broker::{Broker, Config}; +use cdn_broker::{reexports::def::hook::NoMessageHook, Broker, Config}; use clap::Parser; use hotshot::traits::implementations::{KeyPair, ProductionDef, WrappedSignatureKey}; use hotshot_example_types::node_types::TestTypes; @@ -103,6 +103,9 @@ async fn main() -> Result<()> { private_key, }, + user_message_hook: NoMessageHook, + broker_message_hook: NoMessageHook, + public_bind_endpoint: args.public_bind_endpoint, public_advertise_endpoint: args.public_advertise_endpoint, private_bind_endpoint: args.private_bind_endpoint, @@ -111,7 +114,7 @@ async fn main() -> Result<()> { }; // Create new `Broker` - // Uses TCP from broker connections and Quic for user connections. + // Uses TCP from broker connections and TCP+TLS for user connections. let broker = Broker::new(broker_config).await?; // Start the main loop, consuming it diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index a4b9f33499..d3b948ce6c 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -13,8 +13,8 @@ use std::{path::Path, time::Duration}; use async_trait::async_trait; use bincode::config::Options; use cdn_broker::reexports::{ - connection::protocols::Tcp, - def::{ConnectionDef, RunDef, Topic as TopicTrait}, + connection::protocols::{Tcp, TcpTls}, + def::{hook::NoMessageHook, ConnectionDef, RunDef, Topic as TopicTrait}, discovery::{Embedded, Redis}, }; #[cfg(feature = "hotshot-testing")] @@ -22,7 +22,6 @@ use cdn_broker::{Broker, Config as BrokerConfig}; pub use cdn_client::reexports::crypto::signature::KeyPair; use cdn_client::{ reexports::{ - connection::protocols::Quic, crypto::signature::{Serializable, SignatureScheme}, message::{Broadcast, Direct, Message as PushCdnMessage}, }, @@ -90,22 +89,35 @@ impl SignatureScheme for WrappedSignatureKey { type PublicKey = Self; /// Sign a message of arbitrary data and return the serialized signature - fn sign(private_key: &Self::PrivateKey, message: &[u8]) -> anyhow::Result> { - let signature = T::sign(private_key, message)?; - // TODO: replace with rigorously defined serialization scheme... - // why did we not make `PureAssembledSignatureType` be `CanonicalSerialize + CanonicalDeserialize`? + fn sign( + private_key: &Self::PrivateKey, + namespace: &str, + message: &[u8], + ) -> anyhow::Result> { + // Combine the namespace and message into a single byte array + let message = [namespace.as_bytes(), message].concat(); + + let signature = T::sign(private_key, &message)?; Ok(bincode_opts().serialize(&signature)?) } /// Verify a message of arbitrary data and return the result - fn verify(public_key: &Self::PublicKey, message: &[u8], signature: &[u8]) -> bool { - // TODO: replace with rigorously defined signing scheme + fn verify( + public_key: &Self::PublicKey, + namespace: &str, + message: &[u8], + signature: &[u8], + ) -> bool { + // Deserialize the signature let signature: T::PureAssembledSignatureType = match bincode_opts().deserialize(signature) { Ok(key) => key, Err(_) => return false, }; - public_key.0.validate(&signature, message) + // Combine the namespace and message into a single byte array + let message = [namespace.as_bytes(), message].concat(); + + public_key.0.validate(&signature, &message) } } @@ -132,11 +144,12 @@ impl RunDef for ProductionDef { } /// The user definition for the Push CDN. -/// Uses the Quic protocol and untrusted middleware. +/// Uses the TCP+TLS protocol and untrusted middleware. pub struct UserDef(PhantomData); impl ConnectionDef for UserDef { type Scheme = WrappedSignatureKey; - type Protocol = Quic; + type Protocol = TcpTls; + type MessageHook = NoMessageHook; } /// The broker definition for the Push CDN. @@ -145,16 +158,18 @@ pub struct BrokerDef(PhantomData); impl ConnectionDef for BrokerDef { type Scheme = WrappedSignatureKey; type Protocol = Tcp; + type MessageHook = NoMessageHook; } -/// The client definition for the Push CDN. Uses the Quic +/// The client definition for the Push CDN. Uses the TCP+TLS /// protocol and no middleware. Differs from the user /// definition in that is on the client-side. #[derive(Clone)] pub struct ClientDef(PhantomData); impl ConnectionDef for ClientDef { type Scheme = WrappedSignatureKey; - type Protocol = Quic; + type Protocol = TcpTls; + type MessageHook = NoMessageHook; } /// The testing run definition for the Push CDN. @@ -329,6 +344,10 @@ impl TestableNetworkingImplementation private_key: broker_private_key.clone(), }, discovery_endpoint: discovery_endpoint.clone(), + + user_message_hook: NoMessageHook, + broker_message_hook: NoMessageHook, + ca_cert_path: None, ca_key_path: None, // 1GB diff --git a/orchestrator/run-config.toml b/orchestrator/run-config.toml index 04ab883a2e..1e5a8700d7 100644 --- a/orchestrator/run-config.toml +++ b/orchestrator/run-config.toml @@ -4,7 +4,7 @@ transactions_per_round = 10 transaction_size = 1000 node_index = 0 seed = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] -cdn_marshal_address = "127.0.0.1:9000" +cdn_marshal_address = "127.0.0.1:8999" public_keys = [ { stake_table_key = "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", state_ver_key = "SCHNORR_VER_KEY~lJqDaVZyM0hWP2Br52IX5FeE-dCAIC-dPX7bL5-qUx-vjbunwe-ENOeZxj6FuOyvDCFzoGeP7yZ0fM995qF-CRE", stake = 1, da = true }, From c35c21b87305efdf499cd3197c46dab675740824 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 14 Nov 2024 15:45:21 -0500 Subject: [PATCH 1303/1393] remove some unused deps (#3888) --- example-types/Cargo.toml | 8 -------- examples/Cargo.toml | 21 +-------------------- fakeapi/Cargo.toml | 2 -- hotshot/Cargo.toml | 11 ----------- libp2p-networking/Cargo.toml | 13 ------------- orchestrator/Cargo.toml | 5 ----- task-impls/Cargo.toml | 2 -- task/Cargo.toml | 2 -- testing/Cargo.toml | 3 --- types/Cargo.toml | 3 --- 10 files changed, 1 insertion(+), 69 deletions(-) diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index 0cdba8fc73..026ac7e23b 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -12,27 +12,19 @@ slow-tests = [] gpu-vid = ["hotshot-task-impls/gpu-vid"] [dependencies] -async-broadcast = { workspace = true } async-trait = { workspace = true } anyhow = { workspace = true } sha3 = "^0.10" committable = { workspace = true } -either = { workspace = true } -futures = { workspace = true } hotshot = { path = "../hotshot" } hotshot-types = { path = "../types" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } -hotshot-builder-api = { path = "../builder-api" } rand = { workspace = true } thiserror = { workspace = true } -tracing = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } time = { workspace = true } async-lock = { workspace = true } -bitvec = { workspace = true } -ethereum-types = { workspace = true } -hotshot-task = { path = "../task" } jf-vid = { workspace = true } vbs = { workspace = true } url = { workspace = true } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index dc6434bb75..fb281cd7eb 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -11,9 +11,6 @@ rust-version = { workspace = true } default = ["docs", "doc-images", "hotshot-testing"] gpu-vid = ["hotshot-task-impls/gpu-vid"] -# Features required for binaries -bin-orchestrator = ["clap"] - # Build the extended documentation docs = [] doc-images = [] @@ -81,51 +78,35 @@ name = "whitelist-push-cdn" path = "push-cdn/whitelist-adapter.rs" [dependencies] -async-broadcast = { workspace = true } -async-lock = { workspace = true } async-trait = { workspace = true } -bimap = "0.6" clap = { workspace = true, optional = true } -committable = { workspace = true } -custom_debug = { workspace = true } -dashmap = "6" -either = { workspace = true } futures = { workspace = true } hotshot-orchestrator = { version = "0.5.36", path = "../orchestrator", default-features = false } hotshot-types = { path = "../types" } hotshot-testing = { path = "../testing" } -hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } -libp2p-identity = { workspace = true } +hotshot-task-impls = { path = "../task-impls" } libp2p-networking = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["rc"] } -thiserror = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } -derive_more = { workspace = true } portpicker.workspace = true -lru.workspace = true -hotshot-task = { path = "../task" } hotshot = { path = "../hotshot" } hotshot-example-types = { path = "../example-types" } chrono = { workspace = true } -vbs = { workspace = true } sha2.workspace = true local-ip-address = "0.6" -vec1 = { workspace = true } url = { workspace = true } tracing = { workspace = true } tokio = { workspace = true } -cdn-client = { workspace = true } cdn-broker = { workspace = true, features = ["global-permits"] } cdn-marshal = { workspace = true } [dev-dependencies] clap.workspace = true toml = { workspace = true } -blake3 = { workspace = true } anyhow.workspace = true tracing-subscriber = "0.3" diff --git a/fakeapi/Cargo.toml b/fakeapi/Cargo.toml index b2bd022c45..93c87c2fdf 100644 --- a/fakeapi/Cargo.toml +++ b/fakeapi/Cargo.toml @@ -15,13 +15,11 @@ tokio = { workspace = true } anyhow = { workspace = true } hotshot-types = { path = "../types" } vbs = { workspace = true } -serde = { workspace = true } rand = { workspace = true } hotshot-example-types = { path = "../example-types" } async-trait = { workspace = true } futures = { workspace = true } async-lock = { workspace = true } -tracing = { workspace = true } [lints] workspace = true diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 0a02ef4131..f433e4d78b 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -13,9 +13,6 @@ example-upgrade = ["hotshot-task-impls/example-upgrade"] gpu-vid = ["hotshot-task-impls/gpu-vid"] rewind = ["hotshot-task-impls/rewind"] -# Features required for binaries -bin-orchestrator = ["clap"] - # Build the extended documentation docs = [] doc-images = [] @@ -29,11 +26,9 @@ async-trait = { workspace = true } bimap = "0.6" bincode = { workspace = true } chrono = { workspace = true } -clap = { workspace = true, optional = true } committable = { workspace = true } custom_debug = { workspace = true } dashmap = "6" -derive_more = { workspace = true } either = { workspace = true } ethereum-types = { workspace = true } futures = { workspace = true } @@ -46,15 +41,11 @@ lru.workspace = true portpicker = "0.1" rand = { workspace = true } serde = { workspace = true, features = ["rc"] } -thiserror = { workspace = true } -surf-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } vbs = { workspace = true } -jf-signature.workspace = true blake3.workspace = true -sha2 = { workspace = true } url = { workspace = true } num_enum = "0.7" parking_lot = "0.12" @@ -67,8 +58,6 @@ cdn-marshal = { workspace = true } [dev-dependencies] blake3 = { workspace = true } -clap.workspace = true -toml = { workspace = true } [lints] workspace = true diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 4488b35b58..3f7c3dbd40 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -16,37 +16,24 @@ hotshot-example-types = { path = "../example-types" } [dependencies] anyhow = { workspace = true } -async-lock = { workspace = true } async-trait = { workspace = true } blake3 = { workspace = true } bincode = { workspace = true } custom_debug = { workspace = true } delegate = "0.13" derive_builder = "0.20" -either = { workspace = true } futures = { workspace = true } hotshot-types = { path = "../types" } libp2p-swarm-derive = { workspace = true } libp2p-identity = { workspace = true } rand = { workspace = true } serde = { workspace = true } -serde_bytes = { workspace = true } -serde_json = { workspace = true } -thiserror = { workspace = true } -tide = { version = "0.16", optional = true, default-features = false, features = [ - "h1-server", -] } tracing = { workspace = true } -void = "1" lazy_static = { workspace = true } pin-project = "1" -portpicker.workspace = true cbor4ii = "0.3" -tracing-subscriber = { workspace = true } - libp2p = { workspace = true, features = ["tokio"] } tokio = { workspace = true } -tokio-stream = "0.1" [lints] workspace = true diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 557bb0ed5b..3d929fd5ca 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -14,15 +14,10 @@ tide-disco = { workspace = true } surf-disco = { workspace = true } tracing = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } toml = { workspace = true } -thiserror = "2" csv = "1" vbs = { workspace = true } -vec1 = { workspace = true } -multiaddr = "0.18" anyhow.workspace = true -bincode.workspace = true tokio = { workspace = true } [lints] diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 12e8b368a3..0cb7e9e04c 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -17,7 +17,6 @@ async-broadcast = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } -bitvec = { workspace = true } cdn-proto = { workspace = true } chrono = { workspace = true } committable = { workspace = true } @@ -26,7 +25,6 @@ futures = { workspace = true } hotshot-task = { path = "../task" } hotshot-types = { path = "../types" } hotshot-builder-api = { path = "../builder-api" } -jf-signature = { workspace = true } jf-vid = { workspace = true } lru.workspace = true rand = { workspace = true } diff --git a/task/Cargo.toml b/task/Cargo.toml index 89461a58cb..bbd1f7e0cc 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -7,11 +7,9 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] - futures = { workspace = true } async-broadcast = { workspace = true } tracing = { workspace = true } -anyhow = { workspace = true } async-trait = { workspace = true } utils = { path = "../utils" } tokio = { workspace = true, features = [ diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 8f8e59500f..5f2b168864 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -28,19 +28,16 @@ futures = { workspace = true } hotshot = { path = "../hotshot", features = ["hotshot-testing"] } hotshot-example-types = { path = "../example-types" } hotshot-macros = { path = "../macros" } -hotshot-orchestrator = { version = "0.5.36", path = "../orchestrator", default-features = false } hotshot-task = { path = "../task" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } hotshot-fakeapi = { path = "../fakeapi" } hotshot-types = { path = "../types" } hotshot-builder-api = { path = "../builder-api" } -jf-signature = { workspace = true } jf-vid = { workspace = true } portpicker = { workspace = true } rand = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } -sha3 = "^0.10" thiserror = { workspace = true } tide-disco = { workspace = true } tracing = { workspace = true } diff --git a/types/Cargo.toml b/types/Cargo.toml index 0184255278..4be463a1cb 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -22,11 +22,9 @@ committable = { workspace = true } custom_debug = { workspace = true } digest = { workspace = true, features = ["rand_core"] } either = { workspace = true } -espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } futures = { workspace = true } cdn-proto = { workspace = true } -reqwest = { workspace = true } serde-inline-default = "0.2" lazy_static = { workspace = true } memoize = { workspace = true } @@ -43,7 +41,6 @@ jf-signature = { workspace = true, features = ["bls", "schnorr"] } jf-utils = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } -serde_bytes = { workspace = true } tagged-base64 = { workspace = true } vbs = { workspace = true } displaydoc = { version = "0.2.5", default-features = false } From 5a4c6125da581b67c0576dbf7777310eaaa87c43 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 14 Nov 2024 16:53:05 -0500 Subject: [PATCH 1304/1393] fix qc (#3889) * fix qc * lint --- task-impls/src/quorum_proposal/handlers.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 8c375b4b54..4aa5394dbe 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -17,7 +17,7 @@ use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, data::{Leaf, QuorumProposal, VidDisperse, ViewChangeEvidence}, message::Proposal, - simple_certificate::UpgradeCertificate, + simple_certificate::{QuorumCertificate, UpgradeCertificate}, traits::{ block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, }, @@ -111,7 +111,7 @@ impl ProposalDependencyHandle { view_change_evidence: Option>, formed_upgrade_certificate: Option>, decided_upgrade_certificate: Arc>>>, - parent_view_number: TYPES::View, + parent_qc: QuorumCertificate, ) -> Result<()> { let (parent_leaf, state) = parent_leaf_and_state( self.view_number, @@ -122,7 +122,7 @@ impl ProposalDependencyHandle { self.private_key.clone(), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), &self.upgrade_lock, - parent_view_number, + parent_qc.view_number(), ) .await?; @@ -166,8 +166,6 @@ impl ProposalDependencyHandle { let version = self.upgrade_lock.version(self.view_number).await?; - let high_qc = self.consensus.read().await.high_qc().clone(); - let builder_commitment = commitment_and_metadata.builder_commitment.clone(); let metadata = commitment_and_metadata.metadata.clone(); @@ -218,7 +216,7 @@ impl ProposalDependencyHandle { let proposal = QuorumProposal { block_header, view_number: self.view_number, - justify_qc: high_qc, + justify_qc: parent_qc, upgrade_certificate, proposal_certificate, }; @@ -268,7 +266,7 @@ impl HandleDepOutput for ProposalDependencyHandle< let mut timeout_certificate = None; let mut view_sync_finalize_cert = None; let mut vid_share = None; - let mut parent_view_number = None; + let mut parent_qc = None; for event in res.iter().flatten().flatten() { match event.as_ref() { HotShotEvent::SendPayloadCommitmentAndMetadata( @@ -293,7 +291,7 @@ impl HandleDepOutput for ProposalDependencyHandle< timeout_certificate = Some(timeout.clone()); } either::Left(qc) => { - parent_view_number = Some(qc.view_number()); + parent_qc = Some(qc.clone()); } }, HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { @@ -306,8 +304,7 @@ impl HandleDepOutput for ProposalDependencyHandle< } } - let parent_view_number = - parent_view_number.unwrap_or(self.consensus.read().await.high_qc().view_number()); + let parent_qc = parent_qc.unwrap_or(self.consensus.read().await.high_qc().clone()); if commit_and_metadata.is_none() { tracing::error!( @@ -334,7 +331,7 @@ impl HandleDepOutput for ProposalDependencyHandle< proposal_cert, self.formed_upgrade_certificate.clone(), Arc::clone(&self.upgrade_lock.decided_upgrade_certificate), - parent_view_number, + parent_qc, ) .await { From 15ccc0c4ec831572f863ae26fc43b22e99b11749 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Fri, 15 Nov 2024 10:55:24 -0500 Subject: [PATCH 1305/1393] Hotstuff 2 (#3877) * removing some unused stuff and adding HighQC message + event * Update high qc events, add helper to send * Send high Qc on view change * wait for highest qc in the proposal task * fn stub * working on fn * new decide rule and locked qc rule for hs2 * Merge main * add failures test * actually use new decide rule * add test verifying the new decide rule is one shorter * reduce epoch chain len by 1 * fix waiting and validate QC * use proper timeout * more accurate high qc waiting logic * gate by versios before waiting for QC * Fix GC for epochs * fix version gating --- hotshot/src/tasks/task_state.rs | 1 + task-impls/src/consensus/handlers.rs | 34 +++++ task-impls/src/events.rs | 15 +++ task-impls/src/helpers.rs | 89 ++++++++++++- task-impls/src/network.rs | 5 +- task-impls/src/quorum_proposal/handlers.rs | 119 +++++++++++++++--- task-impls/src/quorum_proposal/mod.rs | 26 +++- .../src/quorum_proposal_recv/handlers.rs | 12 +- task-impls/src/quorum_proposal_recv/mod.rs | 2 +- task-impls/src/quorum_vote/handlers.rs | 35 ++++-- testing/tests/tests_1/test_success.rs | 36 ++++++ .../tests_3/test_with_failures_half_f.rs | 39 +++++- types/src/consensus.rs | 59 +++++++-- types/src/message.rs | 56 ++------- types/src/traits/network.rs | 9 +- 15 files changed, 436 insertions(+), 101 deletions(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 09bc3b0eb4..536e6239a6 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -270,6 +270,7 @@ impl, V: Versions> CreateTaskState formed_upgrade_certificate: None, upgrade_lock: handle.hotshot.upgrade_lock.clone(), epoch_height: handle.hotshot.config.epoch_height, + highest_qc: handle.hotshot.consensus.read().await.high_qc().clone(), } } } diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index cfc399bd56..287e14e03e 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -26,6 +26,7 @@ use crate::{ consensus::Versions, events::HotShotEvent, helpers::broadcast_event, vote_collection::handle_vote, }; +use vbs::version::StaticVersionType; /// Handle a `QuorumVoteRecv` event. pub(crate) async fn handle_quorum_vote_recv< @@ -112,6 +113,30 @@ pub(crate) async fn handle_timeout_vote_recv< Ok(()) } +/// Send an event to the next leader containing the highest QC we have +/// This is a necessary part of HotStuff 2 but not the original HotStuff +/// +/// #Errors +/// Returns and error if we can't get the version or the version doesn't +/// yet support HS 2 +pub async fn send_high_qc>( + new_view_number: TYPES::View, + sender: &Sender>>, + task_state: &mut ConsensusTaskState, +) -> Result<()> { + let version = task_state.upgrade_lock.version(new_view_number).await?; + ensure!( + version >= V::Epochs::VERSION, + debug!("HotStuff 2 updgrade not yet in effect") + ); + let high_qc = task_state.consensus.read().await.high_qc().clone(); + let leader = task_state + .quorum_membership + .leader(new_view_number, TYPES::Epoch::new(0))?; + broadcast_event(Arc::new(HotShotEvent::HighQcSend(high_qc, leader)), sender).await; + Ok(()) +} + /// Handle a `ViewChange` event. #[instrument(skip_all)] pub(crate) async fn handle_view_change< @@ -140,6 +165,15 @@ pub(crate) async fn handle_view_change< if *old_view_number / 100 != *new_view_number / 100 { tracing::info!("Progress: entered view {:>6}", *new_view_number); } + + // Send our high qc to the next leader immediately upon finishing a view. + // Part of HotStuff 2 + let _ = send_high_qc(new_view_number, sender, task_state) + .await + .inspect_err(|e| { + tracing::debug!("High QC sending failed with error: {:?}", e); + }); + // Move this node to the next view task_state.cur_view = new_view_number; task_state diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index bca4b4a12b..6744208fa5 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -236,6 +236,12 @@ pub enum HotShotEvent { TYPES::SignatureKey, Proposal>, ), + + /// A replica send us a High QC + HighQcRecv(QuorumCertificate, TYPES::SignatureKey), + + /// Send our HighQC to the next leader, should go to the same leader as our vote + HighQcSend(QuorumCertificate, TYPES::SignatureKey), } impl HotShotEvent { @@ -311,6 +317,9 @@ impl HotShotEvent { | HotShotEvent::VidRequestRecv(request, _) => Some(request.view), HotShotEvent::VidResponseSend(_, _, proposal) | HotShotEvent::VidResponseRecv(_, proposal) => Some(proposal.data.view_number), + HotShotEvent::HighQcRecv(qc, _) | HotShotEvent::HighQcSend(qc, _) => { + Some(qc.view_number()) + } } } } @@ -569,6 +578,12 @@ impl Display for HotShotEvent { proposal.data.view_number ) } + HotShotEvent::HighQcRecv(qc, _) => { + write!(f, "HighQcRecv(view_number={:?}", qc.view_number()) + } + HotShotEvent::HighQcSend(qc, _) => { + write!(f, "HighQcSend(view_number={:?}", qc.view_number()) + } } } } diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index cce95b6be3..bcabd40e9e 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -9,7 +9,7 @@ use std::{ sync::Arc, }; -use async_broadcast::{InactiveReceiver, Receiver, SendError, Sender}; +use async_broadcast::{Receiver, SendError, Sender}; use async_lock::RwLock; use committable::{Commitment, Committable}; use hotshot_task::dependency::{Dependency, EventDependency}; @@ -192,6 +192,89 @@ impl Default for LeafChainTraversalOutcome { } } +/// calculate the new decided leaf chain based on the rules of hostuff 2 +/// +/// # Panics +/// Can't actually panic +pub async fn decide_from_proposal_2( + proposal: &QuorumProposal, + consensus: OuterConsensus, + existing_upgrade_cert: Arc>>>, + public_key: &TYPES::SignatureKey, +) -> LeafChainTraversalOutcome { + let mut res = LeafChainTraversalOutcome::default(); + let consensus_reader = consensus.read().await; + let proposed_leaf = Leaf::from_quorum_proposal(proposal); + res.new_locked_view_number = Some(proposed_leaf.justify_qc().view_number()); + + // If we don't have the proposals parent return early + let Some(parent_info) = consensus_reader.parent_leaf_info(&proposed_leaf, public_key) else { + return res; + }; + // Get the parents parent and check if it's consecutive in view to the parent, if so we can decided + // the grandparents view. If not we're done. + let Some(grand_parent_info) = consensus_reader.parent_leaf_info(&parent_info.leaf, public_key) + else { + return res; + }; + if grand_parent_info.leaf.view_number() + 1 != parent_info.leaf.view_number() { + return res; + } + res.new_decide_qc = Some(parent_info.leaf.justify_qc().clone()); + let decided_view_number = grand_parent_info.leaf.view_number(); + res.new_decided_view_number = Some(decided_view_number); + // We've reached decide, now get the leaf chain all the way back to the last decided view, not including it. + let old_anchor_view = consensus_reader.last_decided_view(); + let mut current_leaf_info = Some(grand_parent_info); + let existing_upgrade_cert_reader = existing_upgrade_cert.read().await; + let mut txns = HashSet::new(); + while current_leaf_info + .as_ref() + .is_some_and(|info| info.leaf.view_number() > old_anchor_view) + { + // unwrap is safe, we just checked that he option is some + let info = &mut current_leaf_info.unwrap(); + // Check if there's a new upgrade certificate available. + if let Some(cert) = info.leaf.upgrade_certificate() { + if info.leaf.upgrade_certificate() != *existing_upgrade_cert_reader { + if cert.data.decide_by < decided_view_number { + tracing::warn!("Failed to decide an upgrade certificate in time. Ignoring."); + } else { + tracing::info!("Reached decide on upgrade certificate: {:?}", cert); + res.decided_upgrade_cert = Some(cert.clone()); + } + } + } + + res.leaf_views.push(info.clone()); + // If the block payload is available for this leaf, include it in + // the leaf chain that we send to the client. + if let Some(encoded_txns) = consensus_reader + .saved_payloads() + .get(&info.leaf.view_number()) + { + let payload = + BlockPayload::from_bytes(encoded_txns, info.leaf.block_header().metadata()); + + info.leaf.fill_block_payload_unchecked(payload); + } + + if let Some(ref payload) = info.leaf.block_payload() { + for txn in payload.transaction_commitments(info.leaf.block_header().metadata()) { + txns.insert(txn); + } + } + + current_leaf_info = consensus_reader.parent_leaf_info(&info.leaf, public_key); + } + + if !txns.is_empty() { + res.included_txns = Some(txns); + } + + res +} + /// Ascends the leaf chain by traversing through the parent commitments of the proposal. We begin /// by obtaining the parent view, and if we are in a chain (i.e. the next view from the parent is /// one view newer), then we begin attempting to form the chain. This is a direct impl from @@ -344,7 +427,7 @@ pub async fn decide_from_proposal( pub(crate) async fn parent_leaf_and_state( next_proposal_view_number: TYPES::View, event_sender: &Sender>>, - event_receiver: &InactiveReceiver>>, + event_receiver: &Receiver>>, quorum_membership: Arc, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -370,7 +453,7 @@ pub(crate) async fn parent_leaf_and_state( let _ = fetch_proposal( parent_view_number, event_sender.clone(), - event_receiver.activate_cloned(), + event_receiver.clone(), quorum_membership, consensus.clone(), public_key.clone(), diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index d623962424..a80c34759b 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -77,7 +77,7 @@ impl NetworkMessageTaskState { GeneralConsensusMessage::ProposalRequested(req, sig) => { HotShotEvent::QuorumProposalRequestRecv(req, sig) } - GeneralConsensusMessage::LeaderProposalAvailable(proposal) => { + GeneralConsensusMessage::ProposalResponse(proposal) => { HotShotEvent::QuorumProposalResponseRecv(proposal) } GeneralConsensusMessage::Vote(vote) => { @@ -114,6 +114,7 @@ impl NetworkMessageTaskState { tracing::error!("Received upgrade vote!"); HotShotEvent::UpgradeVoteRecv(message) } + GeneralConsensusMessage::HighQC(qc) => HotShotEvent::HighQcRecv(qc, sender), }, SequencingMessage::Da(da_message) => match da_message { DaConsensusMessage::DaProposal(proposal) => { @@ -428,7 +429,7 @@ impl< HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => Some(( sender_key.clone(), MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::LeaderProposalAvailable(proposal), + GeneralConsensusMessage::ProposalResponse(proposal), )), TransmitType::Direct(sender_key), )), diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 4aa5394dbe..cfc46f4201 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -7,10 +7,19 @@ //! This module holds the dependency task for the QuorumProposalTask. It is spawned whenever an event that could //! initiate a proposal occurs. -use std::{marker::PhantomData, sync::Arc}; +use std::{ + marker::PhantomData, + sync::Arc, + time::{Duration, Instant}, +}; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, parent_leaf_and_state}, + quorum_proposal::{UpgradeLock, Versions}, +}; use anyhow::{ensure, Context, Result}; -use async_broadcast::{InactiveReceiver, Sender}; +use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use hotshot_task::dependency_task::HandleDepOutput; use hotshot_types::{ @@ -19,20 +28,16 @@ use hotshot_types::{ message::Proposal, simple_certificate::{QuorumCertificate, UpgradeCertificate}, traits::{ - block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, + block_contents::BlockHeader, + node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, }, - vote::HasViewNumber, + vote::{Certificate, HasViewNumber}, }; use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, parent_leaf_and_state}, - quorum_proposal::{UpgradeLock, Versions}, -}; - /// Proposal dependency types. These types represent events that precipitate a proposal. #[derive(PartialEq, Debug)] pub(crate) enum ProposalDependency { @@ -67,7 +72,7 @@ pub struct ProposalDependencyHandle { pub sender: Sender>>, /// The event receiver. - pub receiver: InactiveReceiver>>, + pub receiver: Receiver>>, /// Immutable instance state pub instance_state: Arc, @@ -84,6 +89,8 @@ pub struct ProposalDependencyHandle { /// Shared consensus task state pub consensus: OuterConsensus, + /// View timeout from config. + pub timeout: u64, /// The most recent upgrade certificate this node formed. /// Note: this is ONLY for certificates that have been formed internally, /// so that we can propose with them. @@ -97,9 +104,79 @@ pub struct ProposalDependencyHandle { /// The node's id pub id: u64, + + /// The time this view started + pub view_start_time: Instant, + + /// The higest_qc we've seen at the start of this task + pub highest_qc: QuorumCertificate, } impl ProposalDependencyHandle { + /// Return the next HighQC we get from the event stream + async fn wait_for_qc_event( + &self, + rx: &mut Receiver>>, + ) -> Option> { + while let Ok(event) = rx.recv_direct().await { + if let HotShotEvent::HighQcRecv(qc, _sender) = event.as_ref() { + if qc + .is_valid_cert( + self.quorum_membership.as_ref(), + TYPES::Epoch::new(0), + &self.upgrade_lock, + ) + .await + { + return Some(qc.clone()); + } + } + } + None + } + /// Waits for the ocnfigured timeout for nodes to send HighQC messages to us. We'll + /// then propose with the higest QC from among these proposals. + async fn wait_for_highest_qc(&mut self) { + tracing::error!("waiting for QC"); + // If we haven't upgraded to Hotstuff 2 just return the high qc right away + if self + .upgrade_lock + .version(self.view_number) + .await + .is_ok_and(|version| version < V::Epochs::VERSION) + { + return; + } + let wait_duration = Duration::from_millis(self.timeout / 2); + + // TODO configure timeout + while self.view_start_time.elapsed() < wait_duration { + let Some(time_spent) = Instant::now().checked_duration_since(self.view_start_time) + else { + // Shouldn't be possible, now must be after the start + return; + }; + let Some(time_left) = wait_duration.checked_sub(time_spent) else { + // No time left + return; + }; + let Ok(maybe_qc) = tokio::time::timeout( + time_left, + self.wait_for_qc_event(&mut self.receiver.clone()), + ) + .await + else { + // we timeout out, don't wait any longer + return; + }; + let Some(qc) = maybe_qc else { + continue; + }; + if qc.view_number() > self.highest_qc.view_number() { + self.highest_qc = qc; + } + } + } /// Publishes a proposal given the [`CommitmentAndMetadata`], [`VidDisperse`] /// and high qc [`hotshot_types::simple_certificate::QuorumCertificate`], /// with optional [`ViewChangeEvidence`]. @@ -170,7 +247,7 @@ impl ProposalDependencyHandle { let metadata = commitment_and_metadata.metadata.clone(); let block_header = if version >= V::Epochs::VERSION - && self.consensus.read().await.is_high_qc_forming_eqc() + && self.consensus.read().await.is_qc_forming_eqc(&parent_qc) { tracing::info!("Reached end of epoch. Proposing the same block again to form an eQC."); let block_header = parent_leaf.block_header().clone(); @@ -261,7 +338,7 @@ impl HandleDepOutput for ProposalDependencyHandle< type Output = Vec>>>>; #[allow(clippy::no_effect_underscore_binding, clippy::too_many_lines)] - async fn handle_dep_result(self, res: Self::Output) { + async fn handle_dep_result(mut self, res: Self::Output) { let mut commit_and_metadata: Option> = None; let mut timeout_certificate = None; let mut view_sync_finalize_cert = None; @@ -304,7 +381,21 @@ impl HandleDepOutput for ProposalDependencyHandle< } } - let parent_qc = parent_qc.unwrap_or(self.consensus.read().await.high_qc().clone()); + let Ok(version) = self.upgrade_lock.version(self.view_number).await else { + tracing::error!( + "Failed to get version for view {:?}, not proposing", + self.view_number + ); + return; + }; + let parent_qc = if let Some(qc) = parent_qc { + qc + } else if version < V::Epochs::VERSION { + self.consensus.read().await.high_qc().clone() + } else { + self.wait_for_highest_qc().await; + self.highest_qc.clone() + }; if commit_and_metadata.is_none() { tracing::error!( diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 02f6ee23e8..a6b641e602 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::BTreeMap, sync::Arc}; +use std::{collections::BTreeMap, sync::Arc, time::Instant}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; @@ -19,7 +19,7 @@ use hotshot_types::{ consensus::OuterConsensus, event::Event, message::UpgradeLock, - simple_certificate::UpgradeCertificate, + simple_certificate::{QuorumCertificate, UpgradeCertificate}, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, @@ -91,6 +91,9 @@ pub struct QuorumProposalTaskState /// Number of blocks in an epoch, zero means there are no epochs pub epoch_height: u64, + + /// The higest_qc we've seen at the start of this task + pub highest_qc: QuorumCertificate, } impl, V: Versions> @@ -312,15 +315,18 @@ impl, V: Versions> latest_proposed_view: self.latest_proposed_view, view_number, sender: event_sender, - receiver: event_receiver.deactivate(), + receiver: event_receiver, quorum_membership: Arc::clone(&self.quorum_membership), public_key: self.public_key.clone(), private_key: self.private_key.clone(), instance_state: Arc::clone(&self.instance_state), consensus: OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), + timeout: self.timeout, formed_upgrade_certificate: self.formed_upgrade_certificate.clone(), upgrade_lock: self.upgrade_lock.clone(), id: self.id, + view_start_time: Instant::now(), + highest_qc: self.highest_qc.clone(), }, ); self.proposal_dependencies @@ -506,6 +512,20 @@ impl, V: Versions> HotShotEvent::ViewChange(view, _) | HotShotEvent::Timeout(view) => { self.cancel_tasks(*view); } + HotShotEvent::HighQcSend(qc, _sender) => { + ensure!(qc.view_number() > self.highest_qc.view_number()); + let epoch_number = self.consensus.read().await.cur_epoch(); + ensure!( + qc.is_valid_cert( + self.quorum_membership.as_ref(), + epoch_number, + &self.upgrade_lock + ) + .await, + warn!("Qurom certificate {:?} was invalid", qc.data()) + ); + self.highest_qc = qc.clone(); + } _ => {} } Ok(()) diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 24ea89a3c2..d7ce8aefc0 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -40,7 +40,7 @@ use crate::{ }, quorum_proposal_recv::{UpgradeLock, Versions}, }; - +use vbs::version::StaticVersionType; /// Update states in the event that the parent state is not found for a given `proposal`. #[instrument(skip_all)] async fn validate_proposal_liveness, V: Versions>( @@ -77,6 +77,16 @@ async fn validate_proposal_liveness consensus_writer.locked_view(); + // if we are using HS2 we update our locked view for any QC from a leader greater than our current lock + if liveness_check + && validation_info + .upgrade_lock + .version(leaf.view_number()) + .await + .is_ok_and(|v| v >= V::Epochs::VERSION) + { + consensus_writer.update_locked_view(proposal.data.justify_qc.clone().view_number())?; + } drop(consensus_writer); diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 8f9d9e3f05..332ecaf241 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -11,7 +11,7 @@ use std::{collections::BTreeMap, sync::Arc}; use self::handlers::handle_quorum_proposal_recv; use crate::{ events::{HotShotEvent, ProposalMissing}, - helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, + helpers::{broadcast_event, fetch_proposal}, }; use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLock; diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index d4b9edc6c7..198d075de5 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -26,11 +26,15 @@ use hotshot_types::{ }; use tracing::instrument; use utils::anytrace::*; +use vbs::version::StaticVersionType; use super::QuorumVoteTaskState; use crate::{ events::HotShotEvent, - helpers::{broadcast_event, decide_from_proposal, fetch_proposal, LeafChainTraversalOutcome}, + helpers::{ + broadcast_event, decide_from_proposal, decide_from_proposal_2, fetch_proposal, + LeafChainTraversalOutcome, + }, quorum_vote::Versions, }; @@ -44,6 +48,11 @@ pub(crate) async fn handle_quorum_proposal_validated< proposal: &QuorumProposal, task_state: &mut QuorumVoteTaskState, ) -> Result<()> { + let version = task_state + .upgrade_lock + .version(proposal.view_number()) + .await?; + let LeafChainTraversalOutcome { new_locked_view_number, new_decided_view_number, @@ -51,13 +60,23 @@ pub(crate) async fn handle_quorum_proposal_validated< leaf_views, included_txns, decided_upgrade_cert, - } = decide_from_proposal( - proposal, - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), - Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), - &task_state.public_key, - ) - .await; + } = if version >= V::Epochs::VERSION { + decide_from_proposal_2( + proposal, + OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), + &task_state.public_key, + ) + .await + } else { + decide_from_proposal( + proposal, + OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), + &task_state.public_key, + ) + .await + }; if let Some(cert) = decided_upgrade_cert.clone() { let mut decided_certificate_lock = task_state diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 588c718a83..e81060aedb 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -17,6 +17,7 @@ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, view_sync_task::ViewSyncTaskDescription, }; @@ -151,3 +152,38 @@ cross_tests!( } }, ); + +// Test to make sure we can decide in just 3 views +// This test fails with the old decide rule +cross_tests!( + TestName: test_shorter_decide, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription { + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(100000), + }, + ), + ..TestDescription::default() + }; + // after the first 3 leaders the next leader is down. It's a hack to make sure we decide in + // 3 views or else we get a timeout + let dead_nodes = vec![ + ChangeNode { + idx: 4, + updown: NodeAction::Down, + }, + + ]; + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(1, dead_nodes)] + }; + metadata.overall_safety_properties.num_successful_views = 1; + metadata.overall_safety_properties.num_failed_views = 0; + metadata + }, +); diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index 797aa77cab..a8a2dbb14b 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -5,7 +5,7 @@ // along with the HotShot repository. If not, see . use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestVersions}, + node_types::{EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, TestVersions}, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -52,3 +52,40 @@ cross_tests!( metadata } ); +cross_tests!( + TestName: test_with_failures_half_f_epochs, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default_more_nodes(); + metadata.num_bootstrap_nodes = 17; + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + let dead_nodes = vec![ + ChangeNode { + idx: 17, + updown: NodeAction::Down, + }, + ChangeNode { + idx: 18, + updown: NodeAction::Down, + }, + ChangeNode { + idx: 19, + updown: NodeAction::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)] + }; + + metadata.overall_safety_properties.num_failed_views = 3; + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 22; + metadata + } +); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index eb8fe4ade2..499430c0ca 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -23,7 +23,7 @@ pub use crate::utils::{View, ViewInner}; use crate::{ data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare}, error::HotShotError, - event::HotShotAction, + event::{HotShotAction, LeafInfo}, message::{Proposal, UpgradeLock}, simple_certificate::{DaCertificate, QuorumCertificate}, traits::{ @@ -37,7 +37,7 @@ use crate::{ epoch_from_block_number, BuilderCommitment, LeafCommitment, StateAndDelta, Terminator, }, vid::VidCommitment, - vote::HasViewNumber, + vote::{Certificate, HasViewNumber}, }; /// A type alias for `HashMap, T>` @@ -497,6 +497,36 @@ impl Consensus { Ok(()) } + /// Get the parent Leaf Info from a given leaf and our public key. + /// Returns None if we don't have the data in out state + pub fn parent_leaf_info( + &self, + leaf: &Leaf, + public_key: &TYPES::SignatureKey, + ) -> Option> { + let parent_view_number = leaf.justify_qc().view_number(); + let parent_leaf = self + .saved_leaves + .get(&leaf.justify_qc().data().leaf_commit)?; + let parent_state_and_delta = self.state_and_delta(parent_view_number); + let (Some(state), delta) = parent_state_and_delta else { + return None; + }; + let parent_vid = self + .vid_shares() + .get(&parent_view_number)? + .get(public_key) + .cloned() + .map(|prop| prop.data); + + Some(LeafInfo { + leaf: parent_leaf.clone(), + state, + delta, + vid_share: parent_vid, + }) + } + /// Update the current epoch. /// # Errors /// Can return an error when the new epoch_number is not higher than the existing epoch number. @@ -788,13 +818,14 @@ impl Consensus { /// # Panics /// On inconsistent stored entries pub fn collect_garbage(&mut self, old_anchor_view: TYPES::View, new_anchor_view: TYPES::View) { + let gc_view = TYPES::View::new(new_anchor_view.saturating_sub(1)); // state check let anchor_entry = self .validated_state_map .iter() .next() .expect("INCONSISTENT STATE: anchor leaf not in state map!"); - if *anchor_entry.0 != old_anchor_view { + if **anchor_entry.0 != old_anchor_view.saturating_sub(1) { tracing::error!( "Something about GC has failed. Older leaf exists than the previous anchor leaf." ); @@ -803,15 +834,15 @@ impl Consensus { self.saved_da_certs .retain(|view_number, _| *view_number >= old_anchor_view); self.validated_state_map - .range(old_anchor_view..new_anchor_view) + .range(old_anchor_view..gc_view) .filter_map(|(_view_number, view)| view.leaf_commitment()) .for_each(|leaf| { self.saved_leaves.remove(&leaf); }); - self.validated_state_map = self.validated_state_map.split_off(&new_anchor_view); - self.saved_payloads = self.saved_payloads.split_off(&new_anchor_view); - self.vid_shares = self.vid_shares.split_off(&new_anchor_view); - self.last_proposals = self.last_proposals.split_off(&new_anchor_view); + self.validated_state_map = self.validated_state_map.split_off(&gc_view); + self.saved_payloads = self.saved_payloads.split_off(&gc_view); + self.vid_shares = self.vid_shares.split_off(&gc_view); + self.last_proposals = self.last_proposals.split_off(&gc_view); } /// Gets the last decided leaf. @@ -885,10 +916,10 @@ impl Consensus { Some(()) } - /// Return true if the high QC takes part in forming an eQC, i.e. + /// Return true if the QC takes part in forming an eQC, i.e. /// it is one of the 3-chain certificates but not the eQC itself - pub fn is_high_qc_forming_eqc(&self) -> bool { - let high_qc_leaf_commit = self.high_qc().data.leaf_commit; + pub fn is_qc_forming_eqc(&self, qc: &QuorumCertificate) -> bool { + let high_qc_leaf_commit = qc.data.leaf_commit; let is_high_qc_extended = self.is_leaf_extended(high_qc_leaf_commit); if is_high_qc_extended { tracing::debug!("We have formed an eQC!"); @@ -896,6 +927,11 @@ impl Consensus { self.is_leaf_for_last_block(high_qc_leaf_commit) && !is_high_qc_extended } + /// Returns true if our high qc is forming an eQC + pub fn is_high_qc_forming_eqc(&self) -> bool { + self.is_qc_forming_eqc(self.high_qc()) + } + /// Return true if the given leaf takes part in forming an eQC, i.e. /// it is one of the 3-chain leaves but not the eQC leaf itself pub fn is_leaf_forming_eqc(&self, leaf_commit: LeafCommitment) -> bool { @@ -950,7 +986,6 @@ impl Consensus { }, ) { is_leaf_extended = false; - tracing::trace!("The chain is broken. Leaf ascension failed."); tracing::debug!("Leaf ascension failed; error={e}"); } tracing::trace!("Can the given leaf form an eQC? {}", is_leaf_extended); diff --git a/types/src/message.rs b/types/src/message.rs index 217b5d7578..bf31005c9a 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -29,7 +29,7 @@ use crate::{ data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, request_response::ProposalRequestPayload, simple_certificate::{ - DaCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, + DaCertificate, QuorumCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ @@ -159,14 +159,6 @@ impl ViewMessage for MessageKind { MessageKind::External(_) => TYPES::View::new(1), } } - - fn purpose(&self) -> MessagePurpose { - match &self { - MessageKind::Consensus(message) => message.purpose(), - MessageKind::Data(_) => MessagePurpose::Data, - MessageKind::External(_) => MessagePurpose::External, - } - } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] @@ -212,8 +204,11 @@ pub enum GeneralConsensusMessage { ::PureAssembledSignatureType, ), - /// The leader has responded with a valid proposal. - LeaderProposalAvailable(Proposal>), + /// A replica has responded with a valid proposal. + ProposalResponse(Proposal>), + + /// Message for the next leader containing our highest QC + HighQC(QuorumCertificate), } #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] @@ -258,7 +253,7 @@ impl SequencingMessage { p.data.view_number() } GeneralConsensusMessage::ProposalRequested(req, _) => req.view_number, - GeneralConsensusMessage::LeaderProposalAvailable(proposal) => { + GeneralConsensusMessage::ProposalResponse(proposal) => { proposal.data.view_number() } GeneralConsensusMessage::Vote(vote_message) => vote_message.view_number(), @@ -279,6 +274,7 @@ impl SequencingMessage { } GeneralConsensusMessage::UpgradeProposal(message) => message.data.view_number(), GeneralConsensusMessage::UpgradeVote(message) => message.view_number(), + GeneralConsensusMessage::HighQC(qc) => qc.view_number(), } } SequencingMessage::Da(da_message) => { @@ -295,42 +291,6 @@ impl SequencingMessage { } } } - - // TODO: Disable panic after the `ViewSync` case is implemented. - /// Get the message purpos - #[allow(clippy::panic)] - fn purpose(&self) -> MessagePurpose { - match &self { - SequencingMessage::General(general_message) => match general_message { - GeneralConsensusMessage::Proposal(_) => MessagePurpose::Proposal, - GeneralConsensusMessage::ProposalRequested(_, _) - | GeneralConsensusMessage::LeaderProposalAvailable(_) => { - MessagePurpose::LatestProposal - } - GeneralConsensusMessage::Vote(_) | GeneralConsensusMessage::TimeoutVote(_) => { - MessagePurpose::Vote - } - GeneralConsensusMessage::ViewSyncPreCommitVote(_) - | GeneralConsensusMessage::ViewSyncCommitVote(_) - | GeneralConsensusMessage::ViewSyncFinalizeVote(_) => MessagePurpose::ViewSyncVote, - - GeneralConsensusMessage::ViewSyncPreCommitCertificate(_) - | GeneralConsensusMessage::ViewSyncCommitCertificate(_) - | GeneralConsensusMessage::ViewSyncFinalizeCertificate(_) => { - MessagePurpose::ViewSyncCertificate - } - - GeneralConsensusMessage::UpgradeProposal(_) => MessagePurpose::UpgradeProposal, - GeneralConsensusMessage::UpgradeVote(_) => MessagePurpose::UpgradeVote, - }, - SequencingMessage::Da(da_message) => match da_message { - DaConsensusMessage::DaProposal(_) => MessagePurpose::Proposal, - DaConsensusMessage::DaVote(_) => MessagePurpose::Vote, - DaConsensusMessage::DaCertificate(_) => MessagePurpose::DaCertificate, - DaConsensusMessage::VidDisperseMsg(_) => MessagePurpose::VidDisperse, - }, - } - } } #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 5a01560832..750ff4d4ec 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -30,11 +30,7 @@ use thiserror::Error; use tokio::{sync::mpsc::error::TrySendError, time::sleep}; use super::{node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{ - data::ViewNumber, - message::{MessagePurpose, SequencingMessage}, - BoxSyncFuture, -}; +use crate::{data::ViewNumber, message::SequencingMessage, BoxSyncFuture}; /// Centralized server specific errors #[derive(Debug, Error, Serialize, Deserialize)] @@ -122,9 +118,6 @@ pub trait Id: Eq + PartialEq + Hash {} pub trait ViewMessage { /// get the view out of the message fn view_number(&self) -> TYPES::View; - // TODO move out of this trait. - /// get the purpose of the message - fn purpose(&self) -> MessagePurpose; } /// A request for some data that the consensus layer is asking for. From 1df96da3b74cf2952ae990a11343e3f587e7c0a3 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:06:34 -0500 Subject: [PATCH 1306/1393] update the CDN (#3897) --- hotshot/src/traits/networking/push_cdn_network.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index d3b948ce6c..553b2545ef 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -464,7 +464,7 @@ impl ConnectedNetwork for PushCdnNetwork { /// Wait for the client to initialize the connection async fn wait_for_ready(&self) { - self.client.ensure_initialized().await; + let _ = self.client.ensure_initialized().await; } /// TODO: shut down the networks. Unneeded for testing. @@ -473,7 +473,7 @@ impl ConnectedNetwork for PushCdnNetwork { 'a: 'b, Self: 'b, { - boxed_sync(async move {}) + boxed_sync(async move { self.client.close().await }) } /// Broadcast a message to all members of the quorum. From fa17d06c88783620baf22a5c48633604d64140bd Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Tue, 19 Nov 2024 19:11:00 +0300 Subject: [PATCH 1307/1393] Remove "testing implementations" warning (#3899) --- testing/src/lib.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/testing/src/lib.rs b/testing/src/lib.rs index 20d513dc2a..28f973a5b3 100644 --- a/testing/src/lib.rs +++ b/testing/src/lib.rs @@ -6,12 +6,6 @@ //! Testing infrastructure for `HotShot` -#![cfg_attr( - // hotshot_example option is set manually in justfile when running examples - not(any(test, debug_assertions)), - deprecated = "suspicious usage of testing/demo implementations in non-test/non-debug build" -)] - /// Helpers for initializing system context handle and building tasks. pub mod helpers; From 249d27a3ce2fc1ff42fdbc4dfed0947cbd07d55f Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 19 Nov 2024 15:24:46 -0500 Subject: [PATCH 1308/1393] Add new leaf type (#3761) --- example-types/src/block_types.rs | 8 +- example-types/src/state_types.rs | 4 +- example-types/src/storage_types.rs | 79 +++- examples/push-cdn/all.rs | 1 - hotshot/src/lib.rs | 77 ++-- hotshot/src/tasks/mod.rs | 11 +- hotshot/src/types/handle.rs | 21 +- task-impls/src/consensus/handlers.rs | 6 +- task-impls/src/consensus/mod.rs | 6 +- task-impls/src/events.rs | 49 ++- task-impls/src/helpers.rs | 64 ++-- task-impls/src/network.rs | 22 +- task-impls/src/quorum_proposal/handlers.rs | 51 +-- task-impls/src/quorum_proposal/mod.rs | 16 +- .../src/quorum_proposal_recv/handlers.rs | 21 +- task-impls/src/quorum_proposal_recv/mod.rs | 11 +- task-impls/src/quorum_vote/handlers.rs | 33 +- task-impls/src/quorum_vote/mod.rs | 36 +- task-impls/src/vote_collection.rs | 35 +- testing/src/byzantine/byzantine_behaviour.rs | 14 +- testing/src/consistency_task.rs | 13 +- testing/src/helpers.rs | 10 +- testing/src/overall_safety_task.rs | 18 +- testing/src/spinning_task.rs | 11 +- testing/src/test_runner.rs | 7 +- testing/src/view_generator.rs | 69 ++-- testing/tests/tests_1/message.rs | 74 ++++ .../tests_1/quorum_proposal_recv_task.rs | 9 +- testing/tests/tests_1/quorum_proposal_task.rs | 42 +-- testing/tests/tests_1/quorum_vote_task.rs | 10 +- .../tests_1/upgrade_task_with_proposal.rs | 16 +- .../tests/tests_1/upgrade_task_with_vote.rs | 6 +- testing/tests/tests_1/view_sync_task.rs | 4 +- .../tests/tests_1/vote_dependency_handle.rs | 6 +- types/Cargo.toml | 3 +- types/src/consensus.rs | 64 ++-- types/src/data.rs | 341 +++++++++++++++++- types/src/error.rs | 8 +- types/src/event.rs | 13 +- types/src/message.rs | 50 ++- types/src/simple_certificate.rs | 48 ++- types/src/simple_vote.rs | 58 ++- types/src/traits/block_contents.rs | 6 +- types/src/traits/node_implementation.rs | 8 +- types/src/traits/states.rs | 4 +- types/src/traits/storage.rs | 26 +- types/src/utils.rs | 4 +- 47 files changed, 1071 insertions(+), 422 deletions(-) diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index add9d76269..b7e763546d 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -13,7 +13,7 @@ use std::{ use async_trait::async_trait; use committable::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{ - data::{BlockError, Leaf}, + data::{BlockError, Leaf2}, traits::{ block_contents::{BlockHeader, BuilderFee, EncodeBytes, TestableBlock, Transaction}, node_implementation::NodeType, @@ -272,7 +272,7 @@ pub struct TestBlockHeader { impl TestBlockHeader { pub fn new>( - parent_leaf: &Leaf, + parent_leaf: &Leaf2, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, metadata: TestMetadata, @@ -312,7 +312,7 @@ impl< async fn new_legacy( _parent_state: &TYPES::ValidatedState, instance_state: &>::Instance, - parent_leaf: &Leaf, + parent_leaf: &Leaf2, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, metadata: >::Metadata, @@ -332,7 +332,7 @@ impl< async fn new_marketplace( _parent_state: &TYPES::ValidatedState, instance_state: &>::Instance, - parent_leaf: &Leaf, + parent_leaf: &Leaf2, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, metadata: >::Metadata, diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index c5fde414bf..9b52e0c662 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -10,7 +10,7 @@ use std::fmt::Debug; use async_trait::async_trait; use committable::{Commitment, Committable}; use hotshot_types::{ - data::{fake_commitment, BlockError, Leaf, ViewNumber}, + data::{fake_commitment, BlockError, Leaf2, ViewNumber}, traits::{ block_contents::BlockHeader, node_implementation::NodeType, @@ -103,7 +103,7 @@ impl ValidatedState for TestValidatedState { async fn validate_and_apply_header( &self, instance: &Self::Instance, - _parent_leaf: &Leaf, + _parent_leaf: &Leaf2, _proposed_header: &TYPES::BlockHeader, _vid_common: VidCommon, _version: Version, diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index c4be058fe4..be94cec84d 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -14,10 +14,10 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_types::{ consensus::CommitmentMap, - data::{DaProposal, Leaf, QuorumProposal, VidDisperseShare}, + data::{DaProposal, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare}, event::HotShotAction, message::Proposal, - simple_certificate::{QuorumCertificate, UpgradeCertificate}, + simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ node_implementation::{ConsensusTime, NodeType}, storage::Storage, @@ -34,13 +34,14 @@ type VidShares = HashMap< ::View, HashMap<::SignatureKey, Proposal>>, >; - #[derive(Clone, Debug)] pub struct TestStorageState { vids: VidShares, das: HashMap>>, proposals: BTreeMap>>, + proposals2: BTreeMap>>, high_qc: Option>, + high_qc2: Option>, action: TYPES::View, epoch: TYPES::Epoch, } @@ -51,7 +52,9 @@ impl Default for TestStorageState { vids: HashMap::new(), das: HashMap::new(), proposals: BTreeMap::new(), + proposals2: BTreeMap::new(), high_qc: None, + high_qc2: None, action: TYPES::View::genesis(), epoch: TYPES::Epoch::genesis(), } @@ -91,11 +94,11 @@ impl TestableDelay for TestStorage { impl TestStorage { pub async fn proposals_cloned( &self, - ) -> BTreeMap>> { - self.inner.read().await.proposals.clone() + ) -> BTreeMap>> { + self.inner.read().await.proposals2.clone() } - pub async fn high_qc_cloned(&self) -> Option> { - self.inner.read().await.high_qc.clone() + pub async fn high_qc_cloned(&self) -> Option> { + self.inner.read().await.high_qc2.clone() } pub async fn decided_upgrade_certificate(&self) -> Option> { self.decided_upgrade_certificate.read().await.clone() @@ -153,6 +156,20 @@ impl Storage for TestStorage { .insert(proposal.data.view_number, proposal.clone()); Ok(()) } + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to append VID proposal to storage"); + } + Self::run_delay_settings_from_config(&self.delay_config).await; + let mut inner = self.inner.write().await; + inner + .proposals2 + .insert(proposal.data.view_number, proposal.clone()); + Ok(()) + } async fn record_action( &self, @@ -188,6 +205,25 @@ impl Storage for TestStorage { } Ok(()) } + + async fn update_high_qc2( + &self, + new_high_qc: hotshot_types::simple_certificate::QuorumCertificate2, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to update high qc to storage"); + } + Self::run_delay_settings_from_config(&self.delay_config).await; + let mut inner = self.inner.write().await; + if let Some(ref current_high_qc) = inner.high_qc2 { + if new_high_qc.view_number() > current_high_qc.view_number() { + inner.high_qc2 = Some(new_high_qc); + } + } else { + inner.high_qc2 = Some(new_high_qc); + } + Ok(()) + } async fn update_undecided_state( &self, _leafs: CommitmentMap>, @@ -199,6 +235,17 @@ impl Storage for TestStorage { Self::run_delay_settings_from_config(&self.delay_config).await; Ok(()) } + async fn update_undecided_state2( + &self, + _leafs: CommitmentMap>, + _state: BTreeMap>, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to update high qc to storage"); + } + Self::run_delay_settings_from_config(&self.delay_config).await; + Ok(()) + } async fn update_decided_upgrade_certificate( &self, decided_upgrade_certificate: Option>, @@ -207,4 +254,22 @@ impl Storage for TestStorage { Ok(()) } + + async fn migrate_consensus( + &self, + _convert_leaf: fn(Leaf) -> Leaf2, + convert_proposal: fn( + Proposal>, + ) -> Proposal>, + ) -> Result<()> { + let mut storage_writer = self.inner.write().await; + + for (view, proposal) in storage_writer.proposals.clone().iter() { + storage_writer + .proposals2 + .insert(*view, convert_proposal(proposal.clone())); + } + + Ok(()) + } } diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 2e04d2d72b..12599c36a4 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -14,7 +14,6 @@ use cdn_broker::{ reexports::{crypto::signature::KeyPair, def::hook::NoMessageHook}, Broker, }; - use cdn_marshal::Marshal; use hotshot::{ helpers::initialize_logging, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 05ccea0a0c..9f6d8591e5 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -11,6 +11,7 @@ #[cfg(feature = "docs")] pub mod documentation; +use committable::Committable; use futures::future::{select, Either}; use hotshot_types::{ message::UpgradeLock, @@ -48,10 +49,10 @@ pub use hotshot_types::error::HotShotError; use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, OuterConsensus, View, ViewInner}, constants::{EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE}, - data::{Leaf, QuorumProposal}, + data::{Leaf, Leaf2, QuorumProposal, QuorumProposal2}, event::{EventType, LeafInfo}, - message::{DataMessage, Message, MessageKind, Proposal}, - simple_certificate::{QuorumCertificate, UpgradeCertificate}, + message::{convert_proposal, DataMessage, Message, MessageKind, Proposal}, + simple_certificate::{QuorumCertificate, QuorumCertificate2, UpgradeCertificate}, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -59,6 +60,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, states::ValidatedState, + storage::Storage, EncodeBytes, }, HotShotConfig, @@ -138,7 +140,7 @@ pub struct SystemContext, V: Versi pub(crate) external_event_stream: (Sender>, InactiveReceiver>), /// Anchored leaf provided by the initializer. - anchored_leaf: Leaf, + anchored_leaf: Leaf2, /// access to the internal event stream, in case we need to, say, shut something down #[allow(clippy::type_complexity)] @@ -195,6 +197,10 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, ) -> Arc { + #[allow(clippy::panic)] + match storage + .migrate_consensus( + Into::>::into, + convert_proposal::, QuorumProposal2>, + ) + .await + { + Ok(()) => {} + Err(e) => { + panic!("Failed to migrate consensus storage: {e}"); + } + } + let interal_chan = broadcast(EVENT_CHANNEL_SIZE); let external_chan = broadcast(EXTERNAL_EVENT_CHANNEL_SIZE); @@ -225,7 +245,6 @@ impl, V: Versions> SystemContext`] with the given configuration options. @@ -236,7 +255,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, @@ -287,7 +306,7 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext(&validated_state, self.instance_state.as_ref()) - .await, + .await + .to_qc2(), ); broadcast_event( @@ -532,7 +549,7 @@ impl, V: Versions> SystemContext Leaf { + pub async fn decided_leaf(&self) -> Leaf2 { self.consensus.read().await.decided_leaf() } @@ -543,7 +560,7 @@ impl, V: Versions> SystemContext Option> { + pub fn try_decided_leaf(&self) -> Option> { self.consensus.try_read().map(|guard| guard.decided_leaf()) } @@ -957,7 +974,7 @@ impl, V: Versions> ConsensusApi { /// the leaf specified initialization - inner: Leaf, + inner: Leaf2, /// Instance-level state. instance_state: TYPES::InstanceState, @@ -983,16 +1000,16 @@ pub struct HotShotInitializer { /// Highest QC that was seen, for genesis it's the genesis QC. It should be for a view greater /// than `inner`s view number for the non genesis case because we must have seen higher QCs /// to decide on the leaf. - high_qc: QuorumCertificate, + high_qc: QuorumCertificate2, /// Previously decided upgrade certificate; this is necessary if an upgrade has happened and we are not restarting with the new version decided_upgrade_certificate: Option>, /// Undecided leafs that were seen, but not yet decided on. These allow a restarting node /// to vote and propose right away if they didn't miss anything while down. - undecided_leafs: Vec>, + undecided_leafs: Vec>, /// Not yet decided state undecided_state: BTreeMap>, /// Proposals we have sent out to provide to others for catchup - saved_proposals: BTreeMap>>, + saved_proposals: BTreeMap>>, } impl HotShotInitializer { @@ -1003,10 +1020,14 @@ impl HotShotInitializer { instance_state: TYPES::InstanceState, ) -> Result> { let (validated_state, state_delta) = TYPES::ValidatedState::genesis(&instance_state); - let high_qc = QuorumCertificate::genesis::(&validated_state, &instance_state).await; + let high_qc = QuorumCertificate::genesis::(&validated_state, &instance_state) + .await + .to_qc2(); Ok(Self { - inner: Leaf::genesis(&validated_state, &instance_state).await, + inner: Leaf::genesis(&validated_state, &instance_state) + .await + .into(), validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::View::new(0), @@ -1030,16 +1051,16 @@ impl HotShotInitializer { /// `SystemContext`. #[allow(clippy::too_many_arguments)] pub fn from_reload( - anchor_leaf: Leaf, + anchor_leaf: Leaf2, instance_state: TYPES::InstanceState, validated_state: Option>, start_view: TYPES::View, start_epoch: TYPES::Epoch, actioned_view: TYPES::View, - saved_proposals: BTreeMap>>, - high_qc: QuorumCertificate, + saved_proposals: BTreeMap>>, + high_qc: QuorumCertificate2, decided_upgrade_certificate: Option>, - undecided_leafs: Vec>, + undecided_leafs: Vec>, undecided_state: BTreeMap>, ) -> Self { Self { diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 6ddd00b252..bb9f7ed630 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -10,11 +10,6 @@ pub mod task_state; use std::{collections::BTreeMap, fmt::Debug, num::NonZeroUsize, sync::Arc, time::Duration}; -use crate::{ - tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, - ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, - MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, -}; use async_broadcast::{broadcast, RecvError}; use async_lock::RwLock; use async_trait::async_trait; @@ -48,6 +43,12 @@ use hotshot_types::{ use tokio::{spawn, time::sleep}; use vbs::version::StaticVersionType; +use crate::{ + tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, + ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, + MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, +}; + /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 2ab7874b8f..19fcb97f1d 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -20,7 +20,7 @@ use hotshot_task::{ use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; use hotshot_types::{ consensus::Consensus, - data::{Leaf, QuorumProposal}, + data::{Leaf2, QuorumProposal2}, error::HotShotError, message::{Message, MessageKind, Proposal, RecipientList}, request_response::ProposalRequestPayload, @@ -141,8 +141,9 @@ impl + 'static, V: Versions> &self, view: TYPES::View, epoch: TYPES::Epoch, - leaf_commitment: Commitment>, - ) -> Result>>>> { + leaf_commitment: Commitment>, + ) -> Result>>>> + { // We need to be able to sign this request before submitting it to the network. Compute the // payload first. let signed_proposal_request = ProposalRequestPayload { @@ -157,7 +158,6 @@ impl + 'static, V: Versions> )?; let mem = self.memberships.quorum_membership.clone(); - let upgrade_lock = self.hotshot.upgrade_lock.clone(); let receiver = self.internal_event_stream.1.activate_cloned(); let sender = self.internal_event_stream.0.clone(); Ok(async move { @@ -188,15 +188,12 @@ impl + 'static, V: Versions> if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() { // Make sure that the quorum_proposal is valid - if let Err(err) = quorum_proposal - .validate_signature(&mem, epoch, &upgrade_lock) - .await - { + if let Err(err) = quorum_proposal.validate_signature(&mem, epoch) { tracing::warn!("Invalid Proposal Received after Request. Err {:?}", err); continue; } - let proposed_leaf = Leaf::from_quorum_proposal(&quorum_proposal.data); - let commit = proposed_leaf.commit(&upgrade_lock).await; + let proposed_leaf = Leaf2::from_quorum_proposal(&quorum_proposal.data); + let commit = proposed_leaf.commit(); if commit == leaf_commitment { return Ok(quorum_proposal.clone()); } @@ -255,7 +252,7 @@ impl + 'static, V: Versions> /// /// # Panics /// If the internal consensus is in an inconsistent state. - pub async fn decided_leaf(&self) -> Leaf { + pub async fn decided_leaf(&self) -> Leaf2 { self.hotshot.decided_leaf().await } @@ -265,7 +262,7 @@ impl + 'static, V: Versions> /// # Panics /// Panics if internal consensus is in an inconsistent state. #[must_use] - pub fn try_decided_leaf(&self) -> Option> { + pub fn try_decided_leaf(&self) -> Option> { self.hotshot.try_decided_leaf() } diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 287e14e03e..1dd319fed9 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -10,7 +10,7 @@ use async_broadcast::Sender; use chrono::Utc; use hotshot_types::{ event::{Event, EventType}, - simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, + simple_vote::{QuorumVote2, TimeoutData, TimeoutVote}, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -20,13 +20,13 @@ use hotshot_types::{ use tokio::{spawn, time::sleep}; use tracing::instrument; use utils::anytrace::*; +use vbs::version::StaticVersionType; use super::ConsensusTaskState; use crate::{ consensus::Versions, events::HotShotEvent, helpers::broadcast_event, vote_collection::handle_vote, }; -use vbs::version::StaticVersionType; /// Handle a `QuorumVoteRecv` event. pub(crate) async fn handle_quorum_vote_recv< @@ -34,7 +34,7 @@ pub(crate) async fn handle_quorum_vote_recv< I: NodeImplementation, V: Versions, >( - vote: &QuorumVote, + vote: &QuorumVote2, event: Arc>, sender: &Sender>>, task_state: &mut ConsensusTaskState, diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 15d6bc6ec8..6def6ebd82 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -14,8 +14,8 @@ use hotshot_types::{ consensus::OuterConsensus, event::Event, message::UpgradeLock, - simple_certificate::{QuorumCertificate, TimeoutCertificate}, - simple_vote::{QuorumVote, TimeoutVote}, + simple_certificate::{QuorumCertificate2, TimeoutCertificate}, + simple_vote::{QuorumVote2, TimeoutVote}, traits::{ node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, @@ -57,7 +57,7 @@ pub struct ConsensusTaskState, V: pub committee_membership: Arc, /// A map of `QuorumVote` collector tasks. - pub vote_collectors: VoteCollectorsMap, QuorumCertificate, V>, + pub vote_collectors: VoteCollectorsMap, QuorumCertificate2, V>, /// A map of `TimeoutVote` collector tasks. pub timeout_vote_collectors: diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 6744208fa5..5153028e99 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -11,17 +11,18 @@ use either::Either; use hotshot_task::task::TaskEvent; use hotshot_types::{ data::{ - DaProposal, Leaf, PackedBundle, QuorumProposal, UpgradeProposal, VidDisperse, + DaProposal, Leaf2, PackedBundle, QuorumProposal2, UpgradeProposal, VidDisperse, VidDisperseShare, }, message::Proposal, request_response::ProposalRequestPayload, simple_certificate::{ - DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, - ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + DaCertificate, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, + UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, + ViewSyncPreCommitCertificate2, }, simple_vote::{ - DaVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + DaVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{ @@ -49,7 +50,7 @@ pub struct ProposalMissing { /// View of missing proposal pub view: TYPES::View, /// Channel to send the response back to - pub response_chan: Sender>>>, + pub response_chan: Sender>>>, } impl PartialEq for ProposalMissing { @@ -71,9 +72,9 @@ pub enum HotShotEvent { /// Shutdown the task Shutdown, /// A quorum proposal has been received from the network; handled by the consensus task - QuorumProposalRecv(Proposal>, TYPES::SignatureKey), + QuorumProposalRecv(Proposal>, TYPES::SignatureKey), /// A quorum vote has been received from the network; handled by the consensus task - QuorumVoteRecv(QuorumVote), + QuorumVoteRecv(QuorumVote2), /// A timeout vote received from the network; handled by consensus task TimeoutVoteRecv(TimeoutVote), /// Send a timeout vote to the network; emitted by consensus task replicas @@ -89,18 +90,18 @@ pub enum HotShotEvent { /// A DAC is validated. DaCertificateValidated(DaCertificate), /// Send a quorum proposal to the network; emitted by the leader in the consensus task - QuorumProposalSend(Proposal>, TYPES::SignatureKey), + QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal - QuorumVoteSend(QuorumVote), + QuorumVoteSend(QuorumVote2), /// Broadcast a quorum vote to form an eQC; emitted by a replica in the consensus task after seeing a valid quorum proposal - ExtendedQuorumVoteSend(QuorumVote), + ExtendedQuorumVoteSend(QuorumVote2), /// A quorum proposal with the given parent leaf is validated. /// The full validation checks include: /// 1. The proposal is not for an old view /// 2. The proposal has been correctly signed by the leader of the current view /// 3. The justify QC is valid /// 4. The proposal passes either liveness or safety check. - QuorumProposalValidated(Proposal>, Leaf), + QuorumProposalValidated(Proposal>, Leaf2), /// A quorum proposal is missing for a view that we need. QuorumProposalRequestSend( ProposalRequestPayload, @@ -112,15 +113,17 @@ pub enum HotShotEvent { ::PureAssembledSignatureType, ), /// A quorum proposal was missing for a view. As the leader, we send a reply to the recipient with their key. - QuorumProposalResponseSend(TYPES::SignatureKey, Proposal>), + QuorumProposalResponseSend(TYPES::SignatureKey, Proposal>), /// A quorum proposal was requested by a node for a view. - QuorumProposalResponseRecv(Proposal>), + QuorumProposalResponseRecv(Proposal>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DaProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal DaVoteSend(DaVote), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only QcFormed(Either, TimeoutCertificate>), + /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only + Qc2Formed(Either, TimeoutCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DacSend(DaCertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks @@ -175,7 +178,6 @@ pub enum HotShotEvent { ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number BlockRecv(PackedBundle), - /// Send VID shares to VID storage nodes; emitted by the DA leader /// /// Like [`HotShotEvent::DaProposalSend`]. @@ -199,13 +201,12 @@ pub enum HotShotEvent { UpgradeVoteSend(UpgradeVote), /// Upgrade certificate has been sent to the network UpgradeCertificateFormed(UpgradeCertificate), - /// A quorum proposal has been preliminarily validated. /// The preliminary checks include: /// 1. The proposal is not for an old view /// 2. The proposal has been correctly signed by the leader of the current view /// 3. The justify QC is valid - QuorumProposalPreliminarilyValidated(Proposal>), + QuorumProposalPreliminarilyValidated(Proposal>), /// Send a VID request to the network; emitted to on of the members of DA committee. /// Includes the data request, node's public key and signature as well as public key of DA committee who we want to send to. @@ -238,10 +239,10 @@ pub enum HotShotEvent { ), /// A replica send us a High QC - HighQcRecv(QuorumCertificate, TYPES::SignatureKey), + HighQcRecv(QuorumCertificate2, TYPES::SignatureKey), - /// Send our HighQC to the next leader, should go to the same leader as our vote - HighQcSend(QuorumCertificate, TYPES::SignatureKey), + /// Send our HighQc to the next leader, should go to the same leader as our vote + HighQcSend(QuorumCertificate2, TYPES::SignatureKey), } impl HotShotEvent { @@ -256,8 +257,8 @@ impl HotShotEvent { HotShotEvent::QuorumProposalRecv(proposal, _) | HotShotEvent::QuorumProposalSend(proposal, _) | HotShotEvent::QuorumProposalValidated(proposal, _) - | HotShotEvent::QuorumProposalResponseSend(_, proposal) | HotShotEvent::QuorumProposalResponseRecv(proposal) + | HotShotEvent::QuorumProposalResponseSend(_, proposal) | HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { Some(proposal.data.view_number()) } @@ -274,6 +275,10 @@ impl HotShotEvent { either::Left(qc) => Some(qc.view_number()), either::Right(tc) => Some(tc.view_number()), }, + HotShotEvent::Qc2Formed(cert) => match cert { + either::Left(qc) => Some(qc.view_number()), + either::Right(tc) => Some(tc.view_number()), + }, HotShotEvent::ViewSyncCommitVoteSend(vote) | HotShotEvent::ViewSyncCommitVoteRecv(vote) => Some(vote.view_number()), HotShotEvent::ViewSyncPreCommitVoteRecv(vote) @@ -396,6 +401,10 @@ impl Display for HotShotEvent { either::Left(qc) => write!(f, "QcFormed(view_number={:?})", qc.view_number()), either::Right(tc) => write!(f, "QcFormed(view_number={:?})", tc.view_number()), }, + HotShotEvent::Qc2Formed(cert) => match cert { + either::Left(qc) => write!(f, "QcFormed(view_number={:?})", qc.view_number()), + either::Right(tc) => write!(f, "QcFormed(view_number={:?})", tc.view_number()), + }, HotShotEvent::DacSend(cert, _) => { write!(f, "DacSend(view_number={:?})", cert.view_number()) } diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index bcabd40e9e..19753c329b 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -13,14 +13,13 @@ use async_broadcast::{Receiver, SendError, Sender}; use async_lock::RwLock; use committable::{Commitment, Committable}; use hotshot_task::dependency::{Dependency, EventDependency}; -use hotshot_types::utils::epoch_from_block_number; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf, QuorumProposal, ViewChangeEvidence}, + data::{Leaf2, QuorumProposal2, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, message::{Proposal, UpgradeLock}, request_response::ProposalRequestPayload, - simple_certificate::{QuorumCertificate, UpgradeCertificate}, + simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ block_contents::BlockHeader, election::Membership, @@ -28,7 +27,7 @@ use hotshot_types::{ signature_key::SignatureKey, BlockPayload, ValidatedState, }, - utils::{Terminator, View, ViewInner}, + utils::{epoch_from_block_number, Terminator, View, ViewInner}, vote::{Certificate, HasViewNumber}, }; use tokio::time::timeout; @@ -49,7 +48,7 @@ pub(crate) async fn fetch_proposal( sender_public_key: TYPES::SignatureKey, sender_private_key: ::PrivateKey, upgrade_lock: &UpgradeLock, -) -> Result<(Leaf, View)> { +) -> Result<(Leaf2, View)> { // We need to be able to sign this request before submitting it to the network. Compute the // payload first. let signed_proposal_request = ProposalRequestPayload { @@ -105,7 +104,7 @@ pub(crate) async fn fetch_proposal( hs_event.as_ref() { // Make sure that the quorum_proposal is valid - if quorum_proposal.validate_signature(&mem, cur_epoch, upgrade_lock).await.is_ok() { + if quorum_proposal.validate_signature(&mem, cur_epoch).is_ok() { proposal = Some(quorum_proposal.clone()); } @@ -132,20 +131,17 @@ pub(crate) async fn fetch_proposal( bail!("Invalid justify_qc in proposal for view {}", *view_number); } let mut consensus_writer = consensus.write().await; - let leaf = Leaf::from_quorum_proposal(&proposal.data); + let leaf = Leaf2::from_quorum_proposal(&proposal.data); let state = Arc::new( >::from_header(&proposal.data.block_header), ); - if let Err(e) = consensus_writer - .update_leaf(leaf.clone(), Arc::clone(&state), None, upgrade_lock) - .await - { + if let Err(e) = consensus_writer.update_leaf(leaf.clone(), Arc::clone(&state), None) { tracing::trace!("{e:?}"); } let view = View { view_inner: ViewInner::Leaf { - leaf: leaf.commit(upgrade_lock).await, + leaf: leaf.commit(), state, delta: None, }, @@ -163,7 +159,7 @@ pub struct LeafChainTraversalOutcome { pub new_decided_view_number: Option, /// The qc for the decided chain. - pub new_decide_qc: Option>, + pub new_decide_qc: Option>, /// The decided leaves with corresponding validated state and VID info. pub leaf_views: Vec>, @@ -197,14 +193,14 @@ impl Default for LeafChainTraversalOutcome { /// # Panics /// Can't actually panic pub async fn decide_from_proposal_2( - proposal: &QuorumProposal, + proposal: &QuorumProposal2, consensus: OuterConsensus, existing_upgrade_cert: Arc>>>, public_key: &TYPES::SignatureKey, ) -> LeafChainTraversalOutcome { let mut res = LeafChainTraversalOutcome::default(); let consensus_reader = consensus.read().await; - let proposed_leaf = Leaf::from_quorum_proposal(proposal); + let proposed_leaf = Leaf2::from_quorum_proposal(proposal); res.new_locked_view_number = Some(proposed_leaf.justify_qc().view_number()); // If we don't have the proposals parent return early @@ -303,7 +299,7 @@ pub async fn decide_from_proposal_2( /// Upon receipt then of a proposal for view 9, assuming it is valid, this entire process will repeat, and /// the anchor view will be set to view 6, with the locked view as view 7. pub async fn decide_from_proposal( - proposal: &QuorumProposal, + proposal: &QuorumProposal2, consensus: OuterConsensus, existing_upgrade_cert: Arc>>>, public_key: &TYPES::SignatureKey, @@ -434,7 +430,7 @@ pub(crate) async fn parent_leaf_and_state( consensus: OuterConsensus, upgrade_lock: &UpgradeLock, parent_view_number: TYPES::View, -) -> Result<(Leaf, Arc<::ValidatedState>)> { +) -> Result<(Leaf2, Arc<::ValidatedState>)> { let consensus_reader = consensus.read().await; let cur_epoch = consensus_reader.cur_epoch(); ensure!( @@ -504,18 +500,17 @@ pub async fn validate_proposal_safety_and_liveness< I: NodeImplementation, V: Versions, >( - proposal: Proposal>, - parent_leaf: Leaf, + proposal: Proposal>, + parent_leaf: Leaf2, validation_info: &ValidationInfo, event_stream: Sender>>, sender: TYPES::SignatureKey, ) -> Result<()> { let view_number = proposal.data.view_number(); - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + let proposed_leaf = Leaf2::from_quorum_proposal(&proposal.data); ensure!( - proposed_leaf.parent_commitment() - == parent_leaf.commit(&validation_info.upgrade_lock).await, + proposed_leaf.parent_commitment() == parent_leaf.commit(), "Proposed leaf does not extend the parent leaf." ); @@ -525,15 +520,7 @@ pub async fn validate_proposal_safety_and_liveness< { let mut consensus_writer = validation_info.consensus.write().await; - if let Err(e) = consensus_writer - .update_leaf( - proposed_leaf.clone(), - state, - None, - &validation_info.upgrade_lock, - ) - .await - { + if let Err(e) = consensus_writer.update_leaf(proposed_leaf.clone(), state, None) { tracing::trace!("{e:?}"); } @@ -657,7 +644,7 @@ pub(crate) async fn validate_proposal_view_and_certs< I: NodeImplementation, V: Versions, >( - proposal: &Proposal>, + proposal: &Proposal>, validation_info: &ValidationInfo, ) -> Result<()> { let view_number = proposal.data.view_number(); @@ -668,18 +655,15 @@ pub(crate) async fn validate_proposal_view_and_certs< ); // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - proposal - .validate_signature( - &validation_info.quorum_membership, - validation_info.cur_epoch, - &validation_info.upgrade_lock, - ) - .await?; + proposal.validate_signature( + &validation_info.quorum_membership, + validation_info.cur_epoch, + )?; // Verify a timeout certificate OR a view sync certificate exists and is valid. if proposal.data.justify_qc.view_number() != view_number - 1 { let received_proposal_cert = - proposal.data.proposal_certificate.clone().context(debug!( + proposal.data.view_change_evidence.clone().context(debug!( "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", *view_number ))?; diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a80c34759b..a4c972b95f 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -19,8 +19,8 @@ use hotshot_types::{ data::{VidDisperse, VidDisperseShare}, event::{Event, EventType, HotShotAction}, message::{ - DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, - SequencingMessage, UpgradeLock, + convert_proposal, DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, + MessageKind, Proposal, SequencingMessage, UpgradeLock, }, traits::{ election::Membership, @@ -72,16 +72,16 @@ impl NetworkMessageTaskState { let event = match consensus_message { SequencingMessage::General(general_message) => match general_message { GeneralConsensusMessage::Proposal(proposal) => { - HotShotEvent::QuorumProposalRecv(proposal, sender) + HotShotEvent::QuorumProposalRecv(convert_proposal(proposal), sender) } GeneralConsensusMessage::ProposalRequested(req, sig) => { HotShotEvent::QuorumProposalRequestRecv(req, sig) } GeneralConsensusMessage::ProposalResponse(proposal) => { - HotShotEvent::QuorumProposalResponseRecv(proposal) + HotShotEvent::QuorumProposalResponseRecv(convert_proposal(proposal)) } GeneralConsensusMessage::Vote(vote) => { - HotShotEvent::QuorumVoteRecv(vote.clone()) + HotShotEvent::QuorumVoteRecv(vote.to_vote2()) } GeneralConsensusMessage::ViewSyncPreCommitVote(view_sync_message) => { HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message) @@ -114,7 +114,9 @@ impl NetworkMessageTaskState { tracing::error!("Received upgrade vote!"); HotShotEvent::UpgradeVoteRecv(message) } - GeneralConsensusMessage::HighQC(qc) => HotShotEvent::HighQcRecv(qc, sender), + GeneralConsensusMessage::HighQc(qc) => { + HotShotEvent::HighQcRecv(qc.to_qc2(), sender) + } }, SequencingMessage::Da(da_message) => match da_message { DaConsensusMessage::DaProposal(proposal) => { @@ -379,7 +381,7 @@ impl< Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::Proposal(proposal), + GeneralConsensusMessage::Proposal(convert_proposal(proposal)), )), TransmitType::Broadcast, )) @@ -404,7 +406,7 @@ impl< Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::Vote(vote.clone()), + GeneralConsensusMessage::Vote(vote.clone().to_vote()), )), TransmitType::Direct(leader), )) @@ -414,7 +416,7 @@ impl< Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::Vote(vote.clone()), + GeneralConsensusMessage::Vote(vote.clone().to_vote()), )), TransmitType::Broadcast, )) @@ -429,7 +431,7 @@ impl< HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => Some(( sender_key.clone(), MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ProposalResponse(proposal), + GeneralConsensusMessage::ProposalResponse(convert_proposal(proposal)), )), TransmitType::Direct(sender_key), )), diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index cfc46f4201..94f2b12207 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -13,20 +13,16 @@ use std::{ time::{Duration, Instant}, }; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, parent_leaf_and_state}, - quorum_proposal::{UpgradeLock, Versions}, -}; use anyhow::{ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; +use committable::Committable; use hotshot_task::dependency_task::HandleDepOutput; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, - data::{Leaf, QuorumProposal, VidDisperse, ViewChangeEvidence}, + data::{Leaf2, QuorumProposal, VidDisperse, ViewChangeEvidence}, message::Proposal, - simple_certificate::{QuorumCertificate, UpgradeCertificate}, + simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ block_contents::BlockHeader, node_implementation::{ConsensusTime, NodeType}, @@ -38,19 +34,25 @@ use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; +use crate::{ + events::HotShotEvent, + helpers::{broadcast_event, parent_leaf_and_state}, + quorum_proposal::{UpgradeLock, Versions}, +}; + /// Proposal dependency types. These types represent events that precipitate a proposal. #[derive(PartialEq, Debug)] pub(crate) enum ProposalDependency { /// For the `SendPayloadCommitmentAndMetadata` event. PayloadAndMetadata, - /// For the `QcFormed` event. + /// For the `Qc2Formed` event. Qc, /// For the `ViewSyncFinalizeCertificate2Recv` event. ViewSyncCert, - /// For the `QcFormed` event timeout branch. + /// For the `Qc2Formed` event timeout branch. TimeoutCert, /// For the `QuroumProposalRecv` event. @@ -109,15 +111,15 @@ pub struct ProposalDependencyHandle { pub view_start_time: Instant, /// The higest_qc we've seen at the start of this task - pub highest_qc: QuorumCertificate, + pub highest_qc: QuorumCertificate2, } impl ProposalDependencyHandle { - /// Return the next HighQC we get from the event stream + /// Return the next HighQc we get from the event stream async fn wait_for_qc_event( &self, rx: &mut Receiver>>, - ) -> Option> { + ) -> Option> { while let Ok(event) = rx.recv_direct().await { if let HotShotEvent::HighQcRecv(qc, _sender) = event.as_ref() { if qc @@ -134,7 +136,7 @@ impl ProposalDependencyHandle { } None } - /// Waits for the ocnfigured timeout for nodes to send HighQC messages to us. We'll + /// Waits for the ocnfigured timeout for nodes to send HighQc messages to us. We'll /// then propose with the higest QC from among these proposals. async fn wait_for_highest_qc(&mut self) { tracing::error!("waiting for QC"); @@ -188,7 +190,7 @@ impl ProposalDependencyHandle { view_change_evidence: Option>, formed_upgrade_certificate: Option>, decided_upgrade_certificate: Arc>>>, - parent_qc: QuorumCertificate, + parent_qc: QuorumCertificate2, ) -> Result<()> { let (parent_leaf, state) = parent_leaf_and_state( self.view_number, @@ -293,23 +295,22 @@ impl ProposalDependencyHandle { let proposal = QuorumProposal { block_header, view_number: self.view_number, - justify_qc: parent_qc, + justify_qc: parent_qc.to_qc(), upgrade_certificate, proposal_certificate, - }; + } + .into(); - let proposed_leaf = Leaf::from_quorum_proposal(&proposal); + let proposed_leaf = Leaf2::from_quorum_proposal(&proposal); ensure!( - proposed_leaf.parent_commitment() == parent_leaf.commit(&self.upgrade_lock).await, + proposed_leaf.parent_commitment() == parent_leaf.commit(), "Proposed leaf parent does not equal high qc" ); - let signature = TYPES::SignatureKey::sign( - &self.private_key, - proposed_leaf.commit(&self.upgrade_lock).await.as_ref(), - ) - .wrap() - .context(error!("Failed to compute proposed_leaf.commit()"))?; + let signature = + TYPES::SignatureKey::sign(&self.private_key, proposed_leaf.commit().as_ref()) + .wrap() + .context(error!("Failed to compute proposed_leaf.commit()"))?; let message = Proposal { data: proposal, @@ -363,7 +364,7 @@ impl HandleDepOutput for ProposalDependencyHandle< auction_result: auction_result.clone(), }); } - HotShotEvent::QcFormed(cert) => match cert { + HotShotEvent::Qc2Formed(cert) => match cert { either::Right(timeout) => { timeout_certificate = Some(timeout.clone()); } diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index a6b641e602..9951f63e6d 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -19,7 +19,7 @@ use hotshot_types::{ consensus::OuterConsensus, event::Event, message::UpgradeLock, - simple_certificate::{QuorumCertificate, UpgradeCertificate}, + simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, @@ -93,7 +93,7 @@ pub struct QuorumProposalTaskState pub epoch_height: u64, /// The higest_qc we've seen at the start of this task - pub highest_qc: QuorumCertificate, + pub highest_qc: QuorumCertificate2, } impl, V: Versions> @@ -113,14 +113,14 @@ impl, V: Versions> let event = event.as_ref(); let event_view = match dependency_type { ProposalDependency::Qc => { - if let HotShotEvent::QcFormed(either::Left(qc)) = event { + if let HotShotEvent::Qc2Formed(either::Left(qc)) = event { qc.view_number() + 1 } else { return false; } } ProposalDependency::TimeoutCert => { - if let HotShotEvent::QcFormed(either::Right(timeout)) = event { + if let HotShotEvent::Qc2Formed(either::Right(timeout)) = event { timeout.view_number() + 1 } else { return false; @@ -227,7 +227,7 @@ impl, V: Versions> HotShotEvent::QuorumProposalPreliminarilyValidated(..) => { proposal_dependency.mark_as_completed(event); } - HotShotEvent::QcFormed(quorum_certificate) => match quorum_certificate { + HotShotEvent::Qc2Formed(quorum_certificate) => match quorum_certificate { Either::Right(_) => { timeout_dependency.mark_as_completed(event); } @@ -251,7 +251,7 @@ impl, V: Versions> // 2. A view sync cert was received. AndDependency::from_deps(vec![view_sync_dependency]), ]; - // 3. A `QcFormed`` event (and `QuorumProposalRecv` event) + // 3. A `Qc2Formed`` event (and `QuorumProposalRecv` event) if *view_number > 1 { secondary_deps.push(AndDependency::from_deps(vec![ qc_dependency, @@ -381,7 +381,7 @@ impl, V: Versions> self.formed_upgrade_certificate = Some(cert.clone()); } } - HotShotEvent::QcFormed(cert) => match cert.clone() { + HotShotEvent::Qc2Formed(cert) => match cert.clone() { either::Right(timeout_cert) => { let view_number = timeout_cert.view_number + 1; let epoch_number = self.consensus.read().await.cur_epoch(); @@ -413,7 +413,7 @@ impl, V: Versions> self.storage .write() .await - .update_high_qc(qc.clone()) + .update_high_qc2(qc.clone()) .await .wrap() .context(error!("Failed to update high QC in storage!"))?; diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index d7ce8aefc0..f5fe251367 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -11,13 +11,13 @@ use std::sync::Arc; use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLockUpgradableReadGuard; use committable::Committable; -use hotshot_types::traits::block_contents::BlockHeader; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf, QuorumProposal}, + data::{Leaf2, QuorumProposal, QuorumProposal2}, message::Proposal, simple_certificate::QuorumCertificate, traits::{ + block_contents::BlockHeader, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, @@ -30,6 +30,7 @@ use hotshot_types::{ use tokio::spawn; use tracing::instrument; use utils::anytrace::*; +use vbs::version::StaticVersionType; use super::{QuorumProposalRecvTaskState, ValidationInfo}; use crate::{ @@ -40,25 +41,21 @@ use crate::{ }, quorum_proposal_recv::{UpgradeLock, Versions}, }; -use vbs::version::StaticVersionType; /// Update states in the event that the parent state is not found for a given `proposal`. #[instrument(skip_all)] async fn validate_proposal_liveness, V: Versions>( - proposal: &Proposal>, + proposal: &Proposal>, validation_info: &ValidationInfo, ) -> Result<()> { let mut consensus_writer = validation_info.consensus.write().await; - let leaf = Leaf::from_quorum_proposal(&proposal.data); + let leaf = Leaf2::from_quorum_proposal(&proposal.data); let state = Arc::new( >::from_header(&proposal.data.block_header), ); - if let Err(e) = consensus_writer - .update_leaf(leaf.clone(), state, None, &validation_info.upgrade_lock) - .await - { + if let Err(e) = consensus_writer.update_leaf(leaf.clone(), state, None) { tracing::trace!("{e:?}"); } @@ -66,7 +63,7 @@ async fn validate_proposal_liveness, V: Versions, >( - proposal: &Proposal>, + proposal: &Proposal>, quorum_proposal_sender_key: &TYPES::SignatureKey, event_sender: &Sender>>, event_receiver: &Receiver>>, @@ -218,7 +215,7 @@ pub(crate) async fn handle_quorum_proposal_recv< .storage .write() .await - .update_high_qc(justify_qc.clone()) + .update_high_qc2(justify_qc.clone()) .await { bail!("Failed to store High QC, not voting; error = {:?}", e); diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 332ecaf241..db62d33e5d 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -8,11 +8,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use self::handlers::handle_quorum_proposal_recv; -use crate::{ - events::{HotShotEvent, ProposalMissing}, - helpers::{broadcast_event, fetch_proposal}, -}; use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; @@ -35,6 +30,12 @@ use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use utils::anytrace::{bail, Result}; use vbs::version::Version; + +use self::handlers::handle_quorum_proposal_recv; +use crate::{ + events::{HotShotEvent, ProposalMissing}, + helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, +}; /// Event handlers for this task. mod handlers; diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 198d075de5..d585259385 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -9,12 +9,13 @@ use std::sync::Arc; use async_broadcast::{InactiveReceiver, Sender}; use async_lock::RwLock; use chrono::Utc; +use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf, QuorumProposal, VidDisperseShare}, + data::{Leaf2, QuorumProposal2, VidDisperseShare}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, - simple_vote::{QuorumData, QuorumVote}, + simple_vote::{QuorumData2, QuorumVote2}, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -45,7 +46,7 @@ pub(crate) async fn handle_quorum_proposal_validated< I: NodeImplementation, V: Versions, >( - proposal: &QuorumProposal, + proposal: &QuorumProposal2, task_state: &mut QuorumVoteTaskState, ) -> Result<()> { let version = task_state @@ -172,7 +173,7 @@ pub(crate) async fn update_shared_state< view_number: TYPES::View, instance_state: Arc, storage: Arc>, - proposed_leaf: &Leaf, + proposed_leaf: &Leaf2, vid_share: &Proposal>, parent_view_number: Option, ) -> Result<()> { @@ -259,15 +260,11 @@ pub(crate) async fn update_shared_state< // Now that we've rounded everyone up, we need to update the shared state let mut consensus_writer = consensus.write().await; - if let Err(e) = consensus_writer - .update_leaf( - proposed_leaf.clone(), - Arc::clone(&state), - Some(Arc::clone(&delta)), - &upgrade_lock, - ) - .await - { + if let Err(e) = consensus_writer.update_leaf( + proposed_leaf.clone(), + Arc::clone(&state), + Some(Arc::clone(&delta)), + ) { tracing::trace!("{e:?}"); } @@ -280,7 +277,7 @@ pub(crate) async fn update_shared_state< storage .write() .await - .update_undecided_state(new_leaves, new_state) + .update_undecided_state2(new_leaves, new_state) .await .wrap() .context(error!("Failed to update undecided state"))?; @@ -300,7 +297,7 @@ pub(crate) async fn submit_vote, V view_number: TYPES::View, epoch_number: TYPES::Epoch, storage: Arc>, - leaf: Leaf, + leaf: Leaf2, vid_share: Proposal>, extended_vote: bool, ) -> Result<()> { @@ -313,9 +310,9 @@ pub(crate) async fn submit_vote, V ); // Create and send the vote. - let vote = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: leaf.commit(&upgrade_lock).await, + let vote = QuorumVote2::::create_signed_vote( + QuorumData2 { + leaf_commit: leaf.commit(), }, view_number, &public_key, diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 6ac5fe37dd..393488402f 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -6,14 +6,10 @@ use std::{collections::BTreeMap, sync::Arc}; -use crate::{ - events::HotShotEvent, - helpers::broadcast_event, - quorum_vote::handlers::{handle_quorum_proposal_validated, submit_vote, update_shared_state}, -}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; +use committable::Committable; use hotshot_task::{ dependency::{AndDependency, EventDependency}, dependency_task::{DependencyTask, HandleDepOutput}, @@ -21,7 +17,7 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf, QuorumProposal}, + data::{Leaf2, QuorumProposal2}, event::Event, message::{Proposal, UpgradeLock}, traits::{ @@ -41,6 +37,12 @@ use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; +use crate::{ + events::HotShotEvent, + helpers::broadcast_event, + quorum_vote::handlers::{handle_quorum_proposal_validated, submit_vote, update_shared_state}, +}; + /// Event handlers for `QuorumProposalValidated`. mod handlers; @@ -107,8 +109,8 @@ impl + 'static, V: Versions> Handl } }; let proposal_payload_comm = proposal.data.block_header.payload_commitment(); - let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + let parent_commitment = parent_leaf.commit(); + let proposed_leaf = Leaf2::from_quorum_proposal(&proposal.data); if version >= V::Epochs::VERSION && self @@ -134,7 +136,7 @@ impl + 'static, V: Versions> Handl } // Update our persistent storage of the proposal. If we cannot store the proposal return // and error so we don't vote - if let Err(e) = self.storage.write().await.append_proposal(proposal).await { + if let Err(e) = self.storage.write().await.append_proposal2(proposal).await { tracing::error!("failed to store proposal, not voting. error = {e:#}"); return; } @@ -222,11 +224,7 @@ impl + 'static, V: Versions> Handl ) .await; - let is_vote_leaf_extended = self - .consensus - .read() - .await - .is_leaf_extended(leaf.commit(&self.upgrade_lock).await); + let is_vote_leaf_extended = self.consensus.read().await.is_leaf_extended(leaf.commit()); if let Err(e) = submit_vote::( self.sender.clone(), Arc::clone(&self.quorum_membership), @@ -588,14 +586,14 @@ impl, V: Versions> QuorumVoteTaskS #[allow(clippy::too_many_lines)] async fn handle_eqc_voting( &self, - proposal: &Proposal>, - parent_leaf: &Leaf, + proposal: &Proposal>, + parent_leaf: &Leaf2, event_sender: Sender>>, event_receiver: Receiver>>, ) { tracing::info!("Reached end of epoch. Justify QC is for the last block in the epoch."); - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); - let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; + let proposed_leaf = Leaf2::from_quorum_proposal(&proposal.data); + let parent_commitment = parent_leaf.commit(); if proposed_leaf.height() != parent_leaf.height() || proposed_leaf.payload_commitment() != parent_leaf.payload_commitment() { @@ -634,7 +632,7 @@ impl, V: Versions> QuorumVoteTaskS } // Update our persistent storage of the proposal. If we cannot store the proposal return // and error so we don't vote - if let Err(e) = self.storage.write().await.append_proposal(proposal).await { + if let Err(e) = self.storage.write().await.append_proposal2(proposal).await { tracing::error!("failed to store proposal, not voting. error = {e:#}"); return; } diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index c9266ae808..6023f5cdfb 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -17,12 +17,13 @@ use either::Either::{self, Left, Right}; use hotshot_types::{ message::UpgradeLock, simple_certificate::{ - DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, - ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + DaCertificate, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, + UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, + ViewSyncPreCommitCertificate2, }, simple_vote::{ - DaVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, - ViewSyncPreCommitVote, + DaVote, QuorumVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, + ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{ election::Membership, @@ -300,7 +301,7 @@ where /// Alias for Quorum vote accumulator type QuorumVoteState = - VoteCollectionTaskState, QuorumCertificate, V>; + VoteCollectionTaskState, QuorumCertificate2, V>; /// Alias for DA vote accumulator type DaVoteState = VoteCollectionTaskState, DaCertificate, V>; /// Alias for Timeout vote accumulator @@ -345,6 +346,24 @@ impl AggregatableVote, QuorumCertifica } } +impl AggregatableVote, QuorumCertificate2> + for QuorumVote2 +{ + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { + membership.leader(self.view_number() + 1, epoch) + } + fn make_cert_event( + certificate: QuorumCertificate2, + _key: &TYPES::SignatureKey, + ) -> HotShotEvent { + HotShotEvent::Qc2Formed(Left(certificate)) + } +} + impl AggregatableVote, UpgradeCertificate> for UpgradeVote { @@ -395,7 +414,7 @@ impl AggregatableVote, TimeoutCertifi certificate: TimeoutCertificate, _key: &TYPES::SignatureKey, ) -> HotShotEvent { - HotShotEvent::QcFormed(Right(certificate)) + HotShotEvent::Qc2Formed(Right(certificate)) } } @@ -459,14 +478,14 @@ impl // Handlers for all vote accumulators #[async_trait] impl - HandleVoteEvent, QuorumCertificate> + HandleVoteEvent, QuorumCertificate2> for QuorumVoteState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index 1ab38f1f29..0ffc97df37 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -19,9 +19,9 @@ use hotshot_task_impls::{ }; use hotshot_types::{ consensus::{Consensus, OuterConsensus}, - data::QuorumProposal, + data::QuorumProposal2, message::{Proposal, UpgradeLock}, - simple_vote::QuorumVote, + simple_vote::QuorumVote2, traits::node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, }; @@ -106,7 +106,7 @@ impl, V: Versions> EventTransforme /// An `EventHandlerState` that modifies justify_qc on `QuorumProposalSend` to that of a previous view to mock dishonest leader pub struct DishonestLeader { /// Store events from previous views - pub validated_proposals: Vec>, + pub validated_proposals: Vec>, /// How many times current node has been elected leader and sent proposal pub total_proposals_from_node: u64, /// Which proposals to be dishonest at @@ -126,7 +126,7 @@ impl DishonestLeader { async fn handle_proposal_send_event( &self, event: &HotShotEvent, - proposal: &Proposal>, + proposal: &Proposal>, sender: &TYPES::SignatureKey, ) -> HotShotEvent { let length = self.validated_proposals.len(); @@ -319,7 +319,7 @@ impl + std::fmt::Debug, V: Version ) -> Vec> { if let HotShotEvent::QuorumVoteSend(vote) = event { let new_view = vote.view_number + self.view_increment; - let spoofed_vote = QuorumVote::::create_signed_vote( + let spoofed_vote = QuorumVote2::::create_signed_vote( vote.data.clone(), new_view, public_key, @@ -373,7 +373,7 @@ impl std::fmt::Debug for DishonestVoting { /// An `EventHandlerState` that will send a vote for a bad proposal pub struct DishonestVoter { /// Collect all votes the node sends - pub votes_sent: Vec>, + pub votes_sent: Vec>, /// Shared state with views numbers that leaders were dishonest at pub dishonest_proposal_view_numbers: Arc>>, } @@ -402,7 +402,7 @@ impl + std::fmt::Debug, V: Version // Create a vote using data from most recent vote and the current event number // We wont update internal consensus state for this Byzantine replica but we are at least // Going to send a vote to the next honest leader - let vote = QuorumVote::::create_signed_vote( + let vote = QuorumVote2::::create_signed_vote( self.votes_sent.last().unwrap().data.clone(), event.view_number().unwrap(), public_key, diff --git a/testing/src/consistency_task.rs b/testing/src/consistency_task.rs index 0d78c4d12c..9d51d50a78 100644 --- a/testing/src/consistency_task.rs +++ b/testing/src/consistency_task.rs @@ -9,9 +9,10 @@ use std::{collections::BTreeMap, marker::PhantomData}; use anyhow::{bail, ensure, Context, Result}; use async_trait::async_trait; +use committable::Committable; use hotshot_example_types::block_types::TestBlockHeader; use hotshot_types::{ - data::Leaf, + data::Leaf2, event::{Event, EventType}, message::UpgradeLock, traits::node_implementation::{ConsensusTime, NodeType, Versions}, @@ -24,10 +25,10 @@ use crate::{ }; /// Map from views to leaves for a single node, allowing multiple leaves for each view (because the node may a priori send us multiple leaves for a given view). -pub type NodeMap = BTreeMap<::View, Vec>>; +pub type NodeMap = BTreeMap<::View, Vec>>; /// A sanitized map from views to leaves for a single node, with only a single leaf per view. -pub type NodeMapSanitized = BTreeMap<::View, Leaf>; +pub type NodeMapSanitized = BTreeMap<::View, Leaf2>; /// Validate that the `NodeMap` only has a single leaf per view. fn sanitize_node_map( @@ -104,7 +105,7 @@ async fn validate_node_map( // We want to make sure the commitment matches, // but allow for the possibility that we may have skipped views in between. if child.justify_qc().view_number == parent.view_number() - && child.justify_qc().data.leaf_commit != parent.commit(&upgrade_lock).await + && child.justify_qc().data.leaf_commit != parent.commit() { bail!("The node has provided leaf:\n\n{child:?}\n\nwhich points to:\n\n{parent:?}\n\nbut the commits do not match."); } @@ -144,7 +145,7 @@ fn sanitize_network_map( Ok(result) } -pub type ViewMap = BTreeMap<::View, BTreeMap>>; +pub type ViewMap = BTreeMap<::View, BTreeMap>>; // Invert the network map by interchanging the roles of the node_id and view number. // @@ -171,7 +172,7 @@ async fn invert_network_map( } /// A view map, sanitized to have exactly one leaf per view. -pub type ViewMapSanitized = BTreeMap<::View, Leaf>; +pub type ViewMapSanitized = BTreeMap<::View, Leaf2>; fn sanitize_view_map( view_map: &ViewMap, diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 40760372c6..49f28e2504 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -26,7 +26,7 @@ use hotshot_example_types::{ use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, - data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare}, + data::{Leaf, Leaf2, QuorumProposal, VidDisperse, VidDisperseShare}, message::{GeneralConsensusMessage, Proposal, UpgradeLock}, simple_certificate::DaCertificate, simple_vote::{DaData, DaVote, QuorumData, QuorumVote, SimpleVote, VersionedVoteData}, @@ -437,7 +437,7 @@ where /// This function will create a fake [`View`] from a provided [`Leaf`]. pub async fn build_fake_view_with_leaf( - leaf: Leaf, + leaf: Leaf2, upgrade_lock: &UpgradeLock, ) -> View { build_fake_view_with_leaf_and_state(leaf, TestValidatedState::default(), upgrade_lock).await @@ -445,13 +445,13 @@ pub async fn build_fake_view_with_leaf( /// This function will create a fake [`View`] from a provided [`Leaf`] and `state`. pub async fn build_fake_view_with_leaf_and_state( - leaf: Leaf, + leaf: Leaf2, state: TestValidatedState, - upgrade_lock: &UpgradeLock, + _upgrade_lock: &UpgradeLock, ) -> View { View { view_inner: ViewInner::Leaf { - leaf: leaf.commit(upgrade_lock).await, + leaf: leaf.commit(), state: state.into(), delta: None, }, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index ba7136c90f..c82315d18d 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -15,10 +15,10 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot::{traits::TestableNodeImplementation, HotShotError}; use hotshot_types::{ - data::Leaf, + data::Leaf2, error::RoundTimedoutState, event::{Event, EventType, LeafChain}, - simple_certificate::QuorumCertificate, + simple_certificate::QuorumCertificate2, traits::node_implementation::{ConsensusTime, NodeType, Versions}, vid::VidCommitment, }; @@ -311,7 +311,7 @@ pub struct RoundResult { /// Nodes that committed this round /// id -> (leaf, qc) - success_nodes: HashMap, QuorumCertificate)>, + success_nodes: HashMap, QuorumCertificate2)>, /// Nodes that failed to commit this round pub failed_nodes: HashMap>>, @@ -322,7 +322,7 @@ pub struct RoundResult { /// NOTE: technically a map is not needed /// left one anyway for ease of viewing /// leaf -> # entries decided on that leaf - pub leaf_map: HashMap, usize>, + pub leaf_map: HashMap, usize>, /// block -> # entries decided on that block pub block_map: HashMap, @@ -403,9 +403,9 @@ impl RoundResult { pub fn insert_into_result( &mut self, idx: usize, - result: (LeafChain, QuorumCertificate), + result: (LeafChain, QuorumCertificate2), maybe_block_size: Option, - ) -> Option> { + ) -> Option> { self.success_nodes.insert(idx as u64, result.clone()); let maybe_leaf = result.0.first(); @@ -458,7 +458,7 @@ impl RoundResult { &mut self, threshold: usize, total_num_nodes: usize, - key: &Leaf, + key: &Leaf2, check_leaf: bool, check_block: bool, transaction_threshold: u64, @@ -530,8 +530,8 @@ impl RoundResult { /// generate leaves #[must_use] - pub fn gen_leaves(&self) -> HashMap, usize> { - let mut leaves = HashMap::, usize>::new(); + pub fn gen_leaves(&self) -> HashMap, usize> { + let mut leaves = HashMap::, usize>::new(); for (leaf_vec, _) in self.success_nodes.values() { let most_recent_leaf = leaf_vec.iter().last(); diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index e2a299a171..e98726ff51 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -26,9 +26,9 @@ use hotshot_example_types::{ }; use hotshot_types::{ constants::EVENT_CHANNEL_SIZE, - data::Leaf, + data::Leaf2, event::Event, - simple_certificate::QuorumCertificate, + simple_certificate::{QuorumCertificate, QuorumCertificate2}, traits::{ network::{AsyncGenerator, ConnectedNetwork}, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, @@ -62,9 +62,9 @@ pub struct SpinningTask< /// most recent view seen by spinning task pub(crate) latest_view: Option, /// Last decided leaf that can be used as the anchor leaf to initialize the node. - pub(crate) last_decided_leaf: Leaf, + pub(crate) last_decided_leaf: Leaf2, /// Highest qc seen in the test for restarting nodes - pub(crate) high_qc: QuorumCertificate, + pub(crate) high_qc: QuorumCertificate2, /// Add specified delay to async calls pub(crate) async_delay_config: DelayConfig, /// Context stored for nodes to be restarted with @@ -247,7 +247,8 @@ where &TestValidatedState::default(), &TestInstanceState::default(), ) - .await, + .await + .to_qc2(), ), read_storage.decided_upgrade_certificate().await, Vec::new(), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 2a08fda5e4..3460795db0 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -183,12 +183,14 @@ where &TestValidatedState::default(), &TestInstanceState::default(), ) - .await, + .await + .into(), high_qc: QuorumCertificate::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await, + .await + .to_qc2(), async_delay_config: launcher.metadata.async_delay_config, restart_contexts: HashMap::new(), channel_generator: launcher.resource_generator.channel_generator, @@ -645,7 +647,6 @@ where internal_channel, external_channel, ) - .await } } diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 87a688b74b..53a0d74b7e 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -12,6 +12,7 @@ use std::{ task::{Context, Poll}, }; +use committable::Committable; use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_example_types::{ @@ -21,16 +22,16 @@ use hotshot_example_types::{ }; use hotshot_types::{ data::{ - DaProposal, EpochNumber, Leaf, QuorumProposal, VidDisperse, VidDisperseShare, + DaProposal, EpochNumber, Leaf, Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare, ViewChangeEvidence, ViewNumber, }, message::{Proposal, UpgradeLock}, simple_certificate::{ - DaCertificate, QuorumCertificate, TimeoutCertificate, UpgradeCertificate, - ViewSyncFinalizeCertificate2, + DaCertificate, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, + UpgradeCertificate, ViewSyncFinalizeCertificate2, }, simple_vote::{ - DaData, DaVote, QuorumData, QuorumVote, TimeoutData, TimeoutVote, UpgradeProposalData, + DaData, DaVote, QuorumData2, QuorumVote2, TimeoutData, TimeoutVote, UpgradeProposalData, UpgradeVote, ViewSyncFinalizeData, ViewSyncFinalizeVote, }, traits::{ @@ -49,8 +50,8 @@ use crate::helpers::{ #[derive(Clone)] pub struct TestView { pub da_proposal: Proposal>, - pub quorum_proposal: Proposal>, - pub leaf: Leaf, + pub quorum_proposal: Proposal>, + pub leaf: Leaf2, pub view_number: ViewNumber, pub epoch_number: EpochNumber, pub quorum_membership: ::Membership, @@ -130,22 +131,26 @@ impl TestView { &TestValidatedState::default(), &TestInstanceState::default(), ) - .await, + .await + .into(), payload_commitment, builder_commitment, metadata, ); - let quorum_proposal_inner = QuorumProposal:: { + let quorum_proposal_inner = QuorumProposal2:: { block_header: block_header.clone(), view_number: genesis_view, justify_qc: QuorumCertificate::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await, + .await + .to_qc2(), upgrade_certificate: None, - proposal_certificate: None, + view_change_evidence: None, + drb_result: [0; 32], + drb_seed: [0; 96], }; let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); @@ -166,16 +171,13 @@ impl TestView { _pd: PhantomData, }; - let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal_inner); + let mut leaf = Leaf2::from_quorum_proposal(&quorum_proposal_inner); leaf.fill_block_payload_unchecked(TestBlockPayload { transactions: transactions.clone(), }); - let signature = ::sign( - &private_key, - leaf.commit(&upgrade_lock).await.as_ref(), - ) - .expect("Failed to sign leaf commitment!"); + let signature = ::sign(&private_key, leaf.commit().as_ref()) + .expect("Failed to sign leaf commitment!"); let quorum_proposal = Proposal { data: quorum_proposal_inner, @@ -222,8 +224,8 @@ impl TestView { let transactions = &self.transactions; - let quorum_data = QuorumData { - leaf_commit: old.leaf.commit(&self.upgrade_lock).await, + let quorum_data = QuorumData2 { + leaf_commit: old.leaf.commit(), }; let (old_private_key, old_public_key) = key_pair_for_id::(*old_view); @@ -274,9 +276,9 @@ impl TestView { let quorum_certificate = build_cert::< TestTypes, TestVersions, - QuorumData, - QuorumVote, - QuorumCertificate, + QuorumData2, + QuorumVote2, + QuorumCertificate2, >( quorum_data, quorum_membership, @@ -357,7 +359,7 @@ impl TestView { None }; - let proposal_certificate = if let Some(tc) = timeout_certificate { + let view_change_evidence = if let Some(tc) = timeout_certificate { Some(ViewChangeEvidence::Timeout(tc)) } else { view_sync_certificate.map(ViewChangeEvidence::ViewSync) @@ -374,24 +376,23 @@ impl TestView { random, }; - let proposal = QuorumProposal:: { + let proposal = QuorumProposal2:: { block_header: block_header.clone(), view_number: next_view, justify_qc: quorum_certificate.clone(), upgrade_certificate: upgrade_certificate.clone(), - proposal_certificate, + view_change_evidence, + drb_result: [0; 32], + drb_seed: [0; 96], }; - let mut leaf = Leaf::from_quorum_proposal(&proposal); + let mut leaf = Leaf2::from_quorum_proposal(&proposal); leaf.fill_block_payload_unchecked(TestBlockPayload { transactions: transactions.clone(), }); - let signature = ::sign( - &private_key, - leaf.commit(&self.upgrade_lock).await.as_ref(), - ) - .expect("Failed to sign leaf commitment."); + let signature = ::sign(&private_key, leaf.commit().as_ref()) + .expect("Failed to sign leaf commitment."); let quorum_proposal = Proposal { data: proposal, @@ -451,10 +452,10 @@ impl TestView { pub async fn create_quorum_vote( &self, handle: &SystemContextHandle, - ) -> QuorumVote { - QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: self.leaf.commit(&handle.hotshot.upgrade_lock).await, + ) -> QuorumVote2 { + QuorumVote2::::create_signed_vote( + QuorumData2 { + leaf_commit: self.leaf.commit(), }, self.view_number, &handle.public_key(), diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index 92c8cb1da7..5e0ff49eaa 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -57,3 +57,77 @@ fn version_number_at_start_of_serialization() { assert_eq!(version.major, version_read.major); assert_eq!(version.minor, version_read.minor); } + +#[cfg(test)] +#[tokio::test(flavor = "multi_thread")] +async fn test_certificate2_validity() { + use futures::StreamExt; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; + use hotshot_testing::{helpers::build_system_handle, view_generator::TestViewGenerator}; + use hotshot_types::{ + data::{EpochNumber, Leaf, Leaf2}, + traits::node_implementation::ConsensusTime, + vote::Certificate, + }; + + hotshot::helpers::initialize_logging(); + + let node_id = 1; + let handle = build_system_handle::(node_id) + .await + .0; + let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + + let mut proposals = Vec::new(); + let mut leaders = Vec::new(); + let mut leaves = Vec::new(); + let mut vids = Vec::new(); + let mut vid_dispersals = Vec::new(); + + for view in (&mut generator).take(4).collect::>().await { + proposals.push(view.quorum_proposal.clone()); + leaders.push(view.leader_public_key); + leaves.push(view.leaf.clone()); + vids.push(view.vid_proposal.clone()); + vid_dispersals.push(view.vid_disperse.clone()); + } + + let proposal = proposals[3].clone(); + let parent_proposal = proposals[2].clone(); + + // ensure that we don't break certificate validation + let qc2 = proposal.data.justify_qc.clone(); + let qc = qc2.clone().to_qc(); + + assert!( + qc.is_valid_cert( + &quorum_membership, + EpochNumber::new(0), + &handle.hotshot.upgrade_lock + ) + .await + ); + + assert!( + qc2.is_valid_cert( + &quorum_membership, + EpochNumber::new(0), + &handle.hotshot.upgrade_lock + ) + .await + ); + + // ensure that we don't break the leaf commitment chain + let leaf2 = Leaf2::from_quorum_proposal(&proposal.data); + let parent_leaf2 = Leaf2::from_quorum_proposal(&parent_proposal.data); + + let leaf = Leaf::from_quorum_proposal(&proposal.data.into()); + let parent_leaf = Leaf::from_quorum_proposal(&parent_proposal.data.into()); + + assert!(leaf.parent_commitment() == parent_leaf.commit(&handle.hotshot.upgrade_lock).await); + + assert!(leaf2.parent_commitment() == parent_leaf2.commit()); +} diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index dc42dfc723..7a1636d9c6 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -27,9 +27,8 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::{Leaf, ViewNumber}, + data::{EpochNumber, Leaf2, ViewNumber}, request_response::ProposalRequestPayload, traits::{ consensus_api::ConsensusApi, @@ -78,12 +77,10 @@ async fn test_quorum_proposal_recv_task() { // to that, we'll just put them in here. consensus_writer .update_leaf( - Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Leaf2::from_quorum_proposal(&view.quorum_proposal.data), Arc::new(TestValidatedState::default()), None, - &handle.hotshot.upgrade_lock, ) - .await .unwrap(); } drop(consensus_writer); @@ -125,7 +122,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { helpers::{build_fake_view_with_leaf, build_fake_view_with_leaf_and_state}, script::{Expectations, TaskScript}, }; - use hotshot_types::{data::Leaf, vote::HasViewNumber}; + use hotshot_types::{data::Leaf2, vote::HasViewNumber}; hotshot::helpers::initialize_logging(); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index d8ae3e2b2b..f522d28f7c 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -25,7 +25,7 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{null_block, EpochNumber, Leaf, ViewChangeEvidence, ViewNumber}, + data::{null_block, EpochNumber, Leaf2, ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, ViewSyncFinalizeData}, traits::{ election::Membership, @@ -79,12 +79,10 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { // to make sure they show up during tests. consensus_writer .update_leaf( - Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Leaf2::from_quorum_proposal(&view.quorum_proposal.data), Arc::new(TestValidatedState::default()), None, - &handle.hotshot.upgrade_lock, ) - .await .unwrap(); } @@ -105,7 +103,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { handle.public_key() )], random![ - QcFormed(either::Left(genesis_cert.clone())), + Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, @@ -169,12 +167,10 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { // to make sure they show up during tests. consensus_writer .update_leaf( - Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Leaf2::from_quorum_proposal(&view.quorum_proposal.data), Arc::new(TestValidatedState::default()), None, - &handle.hotshot.upgrade_lock, ) - .await .unwrap(); } @@ -194,7 +190,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let inputs = vec![ random![ - QcFormed(either::Left(genesis_cert.clone())), + Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, @@ -213,7 +209,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), - QcFormed(either::Left(proposals[1].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, @@ -230,7 +226,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ], random![ QuorumProposalPreliminarilyValidated(proposals[1].clone()), - QcFormed(either::Left(proposals[2].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, @@ -247,7 +243,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ], random![ QuorumProposalPreliminarilyValidated(proposals[2].clone()), - QcFormed(either::Left(proposals[3].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, @@ -264,7 +260,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ], random![ QuorumProposalPreliminarilyValidated(proposals[3].clone()), - QcFormed(either::Left(proposals[4].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, @@ -349,13 +345,13 @@ async fn test_quorum_proposal_task_qc_timeout() { } // Get the proposal cert out for the view sync input - let cert = match proposals[1].data.proposal_certificate.clone().unwrap() { + let cert = match proposals[1].data.view_change_evidence.clone().unwrap() { ViewChangeEvidence::Timeout(tc) => tc, _ => panic!("Found a View Sync Cert when there should have been a Timeout cert"), }; let inputs = vec![random![ - QcFormed(either::Right(cert.clone())), + Qc2Formed(either::Right(cert.clone())), SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, @@ -439,7 +435,7 @@ async fn test_quorum_proposal_task_view_sync() { } // Get the proposal cert out for the view sync input - let cert = match proposals[1].data.proposal_certificate.clone().unwrap() { + let cert = match proposals[1].data.view_change_evidence.clone().unwrap() { ViewChangeEvidence::ViewSync(vsc) => vsc, _ => panic!("Found a TC when there should have been a view sync cert"), }; @@ -511,12 +507,10 @@ async fn test_quorum_proposal_task_liveness_check() { // to make sure they show up during tests. consensus_writer .update_leaf( - Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Leaf2::from_quorum_proposal(&view.quorum_proposal.data), Arc::new(TestValidatedState::default()), None, - &handle.hotshot.upgrade_lock, ) - .await .unwrap(); } drop(consensus_writer); @@ -535,7 +529,7 @@ async fn test_quorum_proposal_task_liveness_check() { let inputs = vec![ random![ - QcFormed(either::Left(genesis_cert.clone())), + Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, @@ -554,7 +548,7 @@ async fn test_quorum_proposal_task_liveness_check() { ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), - QcFormed(either::Left(proposals[1].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, @@ -571,7 +565,7 @@ async fn test_quorum_proposal_task_liveness_check() { ], random![ QuorumProposalPreliminarilyValidated(proposals[1].clone()), - QcFormed(either::Left(proposals[2].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, @@ -588,7 +582,7 @@ async fn test_quorum_proposal_task_liveness_check() { ], random![ QuorumProposalPreliminarilyValidated(proposals[2].clone()), - QcFormed(either::Left(proposals[3].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, @@ -605,7 +599,7 @@ async fn test_quorum_proposal_task_liveness_check() { ], random![ QuorumProposalPreliminarilyValidated(proposals[3].clone()), - QcFormed(either::Left(proposals[4].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index b4350abe3c..f5c5940b20 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -23,7 +23,7 @@ use hotshot_testing::{ script::{Expectations, InputOrder, TaskScript}, }; use hotshot_types::{ - data::{EpochNumber, Leaf, ViewNumber}, + data::{EpochNumber, Leaf2, ViewNumber}, traits::node_implementation::ConsensusTime, }; @@ -64,12 +64,10 @@ async fn test_quorum_vote_task_success() { vids.push(view.vid_proposal.clone()); consensus_writer .update_leaf( - Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Leaf2::from_quorum_proposal(&view.quorum_proposal.data), Arc::new(TestValidatedState::default()), None, - &handle.hotshot.upgrade_lock, ) - .await .unwrap(); } drop(consensus_writer); @@ -136,12 +134,10 @@ async fn test_quorum_vote_task_miss_dependency() { consensus_writer .update_leaf( - Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Leaf2::from_quorum_proposal(&view.quorum_proposal.data), Arc::new(TestValidatedState::default()), None, - &handle.hotshot.upgrade_lock, ) - .await .unwrap(); } drop(consensus_writer); diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index ac4b338f37..0cef44ab81 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -30,7 +30,7 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{null_block, EpochNumber, Leaf, ViewNumber}, + data::{null_block, EpochNumber, Leaf2, ViewNumber}, simple_vote::UpgradeProposalData, traits::{ election::Membership, @@ -96,12 +96,10 @@ async fn test_upgrade_task_with_proposal() { views.push(view.clone()); consensus_writer .update_leaf( - Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Leaf2::from_quorum_proposal(&view.quorum_proposal.data), Arc::new(TestValidatedState::default()), None, - &handle.hotshot.upgrade_lock, ) - .await .unwrap(); } @@ -117,12 +115,10 @@ async fn test_upgrade_task_with_proposal() { views.push(view.clone()); consensus_writer .update_leaf( - Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Leaf2::from_quorum_proposal(&view.quorum_proposal.data), Arc::new(TestValidatedState::default()), None, - &handle.hotshot.upgrade_lock, ) - .await .unwrap(); } drop(consensus_writer); @@ -155,7 +151,7 @@ async fn test_upgrade_task_with_proposal() { let inputs = vec![ random![ - QcFormed(either::Left(genesis_cert.clone())), + Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, @@ -174,7 +170,7 @@ async fn test_upgrade_task_with_proposal() { ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), - QcFormed(either::Left(proposals[1].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, @@ -192,7 +188,7 @@ async fn test_upgrade_task_with_proposal() { InputOrder::Random(upgrade_vote_recvs), random![ QuorumProposalPreliminarilyValidated(proposals[1].clone()), - QcFormed(either::Left(proposals[2].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &quorum_membership, diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 72790bec4d..cc942451f1 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -29,7 +29,7 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{null_block, EpochNumber, Leaf, ViewNumber}, + data::{null_block, EpochNumber, Leaf2, ViewNumber}, simple_vote::UpgradeProposalData, traits::{election::Membership, node_implementation::ConsensusTime}, vote::HasViewNumber, @@ -82,12 +82,10 @@ async fn test_upgrade_task_with_vote() { leaves.push(view.leaf.clone()); consensus_writer .update_leaf( - Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Leaf2::from_quorum_proposal(&view.quorum_proposal.data), Arc::new(TestValidatedState::default()), None, - &handle.hotshot.upgrade_lock, ) - .await .unwrap(); } drop(consensus_writer); diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index 85827dbbdb..ec313ec2a7 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -10,9 +10,9 @@ use hotshot_task_impls::{ events::HotShotEvent, harness::run_harness, view_sync::ViewSyncTaskState, }; use hotshot_testing::helpers::build_system_handle; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::ViewNumber, simple_vote::ViewSyncPreCommitData, + data::{EpochNumber, ViewNumber}, + simple_vote::ViewSyncPreCommitData, traits::node_implementation::ConsensusTime, }; diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 9a58e4046c..f22d658d34 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -15,7 +15,7 @@ use hotshot_testing::{ }; use hotshot_types::{ consensus::OuterConsensus, - data::{EpochNumber, Leaf, ViewNumber}, + data::{EpochNumber, Leaf2, ViewNumber}, traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, }; use itertools::Itertools; @@ -55,12 +55,10 @@ async fn test_vote_dependency_handle() { vids.push(view.vid_proposal.clone()); consensus_writer .update_leaf( - Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Leaf2::from_quorum_proposal(&view.quorum_proposal.data), Arc::new(TestValidatedState::default()), None, - &handle.hotshot.upgrade_lock, ) - .await .unwrap(); } drop(consensus_writer); diff --git a/types/Cargo.toml b/types/Cargo.toml index 4be463a1cb..c15d4865a3 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -25,7 +25,6 @@ either = { workspace = true } ethereum-types = { workspace = true } futures = { workspace = true } cdn-proto = { workspace = true } -serde-inline-default = "0.2" lazy_static = { workspace = true } memoize = { workspace = true } rand = { workspace = true } @@ -41,6 +40,8 @@ jf-signature = { workspace = true, features = ["bls", "schnorr"] } jf-utils = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } +serde-inline-default = { workspace = true } +serde_bytes = { workspace = true } tagged-base64 = { workspace = true } vbs = { workspace = true } displaydoc = { version = "0.2.5", default-features = false } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 499430c0ca..a27220a7ed 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -21,15 +21,15 @@ use vec1::Vec1; pub use crate::utils::{View, ViewInner}; use crate::{ - data::{Leaf, QuorumProposal, VidDisperse, VidDisperseShare}, + data::{Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare}, error::HotShotError, event::{HotShotAction, LeafInfo}, - message::{Proposal, UpgradeLock}, - simple_certificate::{DaCertificate, QuorumCertificate}, + message::Proposal, + simple_certificate::{DaCertificate, QuorumCertificate2}, traits::{ block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, - node_implementation::{ConsensusTime, NodeType, Versions}, + node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, BlockPayload, ValidatedState, }, @@ -291,7 +291,7 @@ pub struct Consensus { /// Last proposals we sent out, None if we haven't proposed yet. /// Prevents duplicate proposals, and can be served to those trying to catchup - last_proposals: BTreeMap>>, + last_proposals: BTreeMap>>, /// last view had a successful decide event last_decided_view: TYPES::View, @@ -302,7 +302,7 @@ pub struct Consensus { /// Map of leaf hash -> leaf /// - contains undecided leaves /// - includes the MOST RECENT decided leaf - saved_leaves: CommitmentMap>, + saved_leaves: CommitmentMap>, /// Bundle of views which we performed the most recent action /// visibible to the network. Actions are votes and proposals @@ -315,7 +315,7 @@ pub struct Consensus { saved_payloads: BTreeMap>, /// the highqc per spec - high_qc: QuorumCertificate, + high_qc: QuorumCertificate2, /// A reference to the metrics trait pub metrics: Arc, @@ -405,10 +405,10 @@ impl Consensus { locked_view: TYPES::View, last_decided_view: TYPES::View, last_actioned_view: TYPES::View, - last_proposals: BTreeMap>>, - saved_leaves: CommitmentMap>, + last_proposals: BTreeMap>>, + saved_leaves: CommitmentMap>, saved_payloads: BTreeMap>, - high_qc: QuorumCertificate, + high_qc: QuorumCertificate2, metrics: Arc, epoch_height: u64, ) -> Self { @@ -451,7 +451,7 @@ impl Consensus { } /// Get the high QC. - pub fn high_qc(&self) -> &QuorumCertificate { + pub fn high_qc(&self) -> &QuorumCertificate2 { &self.high_qc } @@ -461,7 +461,7 @@ impl Consensus { } /// Get the saved leaves. - pub fn saved_leaves(&self) -> &CommitmentMap> { + pub fn saved_leaves(&self) -> &CommitmentMap> { &self.saved_leaves } @@ -481,7 +481,9 @@ impl Consensus { } /// Get the map of our recent proposals - pub fn last_proposals(&self) -> &BTreeMap>> { + pub fn last_proposals( + &self, + ) -> &BTreeMap>> { &self.last_proposals } @@ -501,7 +503,7 @@ impl Consensus { /// Returns None if we don't have the data in out state pub fn parent_leaf_info( &self, - leaf: &Leaf, + leaf: &Leaf2, public_key: &TYPES::SignatureKey, ) -> Option> { let parent_view_number = leaf.justify_qc().view_number(); @@ -578,7 +580,7 @@ impl Consensus { /// Can return an error when the new view_number is not higher than the existing proposed view number. pub fn update_proposed_view( &mut self, - proposal: Proposal>, + proposal: Proposal>, ) -> Result<()> { ensure!( proposal.data.view_number() @@ -640,23 +642,22 @@ impl Consensus { /// # Errors /// Can return an error when the new view contains less information than the exisiting view /// with the same view number. - pub async fn update_leaf( + pub fn update_leaf( &mut self, - leaf: Leaf, + leaf: Leaf2, state: Arc, delta: Option>::Delta>>, - upgrade_lock: &UpgradeLock, ) -> Result<()> { let view_number = leaf.view_number(); let view = View { view_inner: ViewInner::Leaf { - leaf: leaf.commit(upgrade_lock).await, + leaf: leaf.commit(), state, delta, }, }; self.update_validated_state_map(view_number, view)?; - self.update_saved_leaves(leaf, upgrade_lock).await; + self.update_saved_leaves(leaf); Ok(()) } @@ -695,13 +696,8 @@ impl Consensus { } /// Update the saved leaves with a new leaf. - async fn update_saved_leaves( - &mut self, - leaf: Leaf, - upgrade_lock: &UpgradeLock, - ) { - self.saved_leaves - .insert(leaf.commit(upgrade_lock).await, leaf); + fn update_saved_leaves(&mut self, leaf: Leaf2) { + self.saved_leaves.insert(leaf.commit(), leaf); } /// Update the saved payloads with a new encoded transaction. @@ -724,7 +720,7 @@ impl Consensus { /// Update the high QC if given a newer one. /// # Errors /// Can return an error when the provided high_qc is not newer than the existing entry. - pub fn update_high_qc(&mut self, high_qc: QuorumCertificate) -> Result<()> { + pub fn update_high_qc(&mut self, high_qc: QuorumCertificate2) -> Result<()> { ensure!( high_qc.view_number > self.high_qc.view_number || high_qc == self.high_qc, debug!("High QC with an equal or higher view exists.") @@ -764,7 +760,7 @@ impl Consensus { ) -> std::result::Result<(), HotShotError> where F: FnMut( - &Leaf, + &Leaf2, Arc<::ValidatedState>, Option::ValidatedState as ValidatedState>::Delta>>, ) -> bool, @@ -851,7 +847,7 @@ impl Consensus { /// if the last decided view's leaf does not exist in the state map or saved leaves, which /// should never happen. #[must_use] - pub fn decided_leaf(&self) -> Leaf { + pub fn decided_leaf(&self) -> Leaf2 { let decided_view_num = self.last_decided_view; let view = self.validated_state_map.get(&decided_view_num).unwrap(); let leaf = view @@ -918,7 +914,7 @@ impl Consensus { /// Return true if the QC takes part in forming an eQC, i.e. /// it is one of the 3-chain certificates but not the eQC itself - pub fn is_qc_forming_eqc(&self, qc: &QuorumCertificate) -> bool { + pub fn is_qc_forming_eqc(&self, qc: &QuorumCertificate2) -> bool { let high_qc_leaf_commit = qc.data.leaf_commit; let is_high_qc_extended = self.is_leaf_extended(high_qc_leaf_commit); if is_high_qc_extended { @@ -1007,14 +1003,14 @@ impl Consensus { } /// Returns true if the `parent_leaf` formed an eQC for the previous epoch to the `proposed_leaf` - pub fn check_eqc(&self, proposed_leaf: &Leaf, parent_leaf: &Leaf) -> bool { + pub fn check_eqc(&self, proposed_leaf: &Leaf2, parent_leaf: &Leaf2) -> bool { if parent_leaf.view_number() == TYPES::View::genesis() { return true; } let new_epoch = epoch_from_block_number(proposed_leaf.height(), self.epoch_height); let old_epoch = epoch_from_block_number(parent_leaf.height(), self.epoch_height); - let parent_leaf_commit = as Committable>::commit(parent_leaf); - new_epoch - 1 == old_epoch && self.is_leaf_extended(parent_leaf_commit) + + new_epoch - 1 == old_epoch && self.is_leaf_extended(parent_leaf.commit()) } } diff --git a/types/src/data.rs b/types/src/data.rs index fc5506ff25..b97c84a62c 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -33,7 +33,8 @@ use vec1::Vec1; use crate::{ message::{Proposal, UpgradeLock}, simple_certificate::{ - QuorumCertificate, TimeoutCertificate, UpgradeCertificate, ViewSyncFinalizeCertificate2, + QuorumCertificate, QuorumCertificate2, TimeoutCertificate, UpgradeCertificate, + ViewSyncFinalizeCertificate2, }, simple_vote::{QuorumData, UpgradeProposalData, VersionedVoteData}, traits::{ @@ -372,6 +373,78 @@ pub struct QuorumProposal { pub proposal_certificate: Option>, } +/// Proposal to append a block. +#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct QuorumProposal2 { + /// The block header to append + pub block_header: TYPES::BlockHeader, + + /// view number for the proposal + pub view_number: TYPES::View, + + /// certificate that the proposal is chaining from + pub justify_qc: QuorumCertificate2, + + /// Possible upgrade certificate, which the leader may optionally attach. + pub upgrade_certificate: Option>, + + /// Possible timeout or view sync certificate. If the `justify_qc` is not for a proposal in the immediately preceding view, then either a timeout or view sync certificate must be attached. + pub view_change_evidence: Option>, + + /// the DRB seed currently being calculated + #[serde(with = "serde_bytes")] + pub drb_seed: [u8; 96], + + /// the result of the DRB calculation + #[serde(with = "serde_bytes")] + pub drb_result: [u8; 32], +} + +impl From> for QuorumProposal2 { + fn from(quorum_proposal: QuorumProposal) -> Self { + Self { + block_header: quorum_proposal.block_header, + view_number: quorum_proposal.view_number, + justify_qc: quorum_proposal.justify_qc.to_qc2(), + upgrade_certificate: quorum_proposal.upgrade_certificate, + view_change_evidence: quorum_proposal.proposal_certificate, + drb_seed: [0; 96], + drb_result: [0; 32], + } + } +} + +impl From> for QuorumProposal { + fn from(quorum_proposal: QuorumProposal2) -> Self { + Self { + block_header: quorum_proposal.block_header, + view_number: quorum_proposal.view_number, + justify_qc: quorum_proposal.justify_qc.to_qc(), + upgrade_certificate: quorum_proposal.upgrade_certificate, + proposal_certificate: quorum_proposal.view_change_evidence, + } + } +} + +impl From> for Leaf2 { + fn from(leaf: Leaf) -> Self { + let bytes: [u8; 32] = leaf.parent_commitment.into(); + + Self { + view_number: leaf.view_number, + justify_qc: leaf.justify_qc.to_qc2(), + parent_commitment: Commitment::from_raw(bytes), + block_header: leaf.block_header, + upgrade_certificate: leaf.upgrade_certificate, + block_payload: leaf.block_payload, + view_change_evidence: None, + drb_seed: [0; 96], + drb_result: [0; 32], + } + } +} + impl HasViewNumber for DaProposal { fn view_number(&self) -> TYPES::View { self.view_number @@ -396,6 +469,12 @@ impl HasViewNumber for QuorumProposal { } } +impl HasViewNumber for QuorumProposal2 { + fn view_number(&self) -> TYPES::View { + self.view_number + } +} + impl HasViewNumber for UpgradeProposal { fn view_number(&self) -> TYPES::View { self.view_number @@ -455,6 +534,183 @@ pub struct Leaf { block_payload: Option, } +/// This is the consensus-internal analogous concept to a block, and it contains the block proper, +/// as well as the hash of its parent `Leaf`. +#[derive(Serialize, Deserialize, Clone, Debug, Derivative, Eq)] +#[serde(bound(deserialize = ""))] +pub struct Leaf2 { + /// CurView from leader when proposing leaf + view_number: TYPES::View, + + /// Per spec, justification + justify_qc: QuorumCertificate2, + + /// The hash of the parent `Leaf` + /// So we can ask if it extends + parent_commitment: Commitment, + + /// Block header. + block_header: TYPES::BlockHeader, + + /// Optional upgrade certificate, if one was attached to the quorum proposal for this view. + upgrade_certificate: Option>, + + /// Optional block payload. + /// + /// It may be empty for nodes not in the DA committee. + block_payload: Option, + + /// Possible timeout or view sync certificate. If the `justify_qc` is not for a proposal in the immediately preceding view, then either a timeout or view sync certificate must be attached. + pub view_change_evidence: Option>, + + /// the DRB seed currently being calculated + #[serde(with = "serde_bytes")] + pub drb_seed: [u8; 96], + + /// the result of the DRB calculation + #[serde(with = "serde_bytes")] + pub drb_result: [u8; 32], +} + +impl Leaf2 { + /// Time when this leaf was created. + pub fn view_number(&self) -> TYPES::View { + self.view_number + } + /// Height of this leaf in the chain. + /// + /// Equivalently, this is the number of leaves before this one in the chain. + pub fn height(&self) -> u64 { + self.block_header.block_number() + } + /// The QC linking this leaf to its parent in the chain. + pub fn justify_qc(&self) -> QuorumCertificate2 { + self.justify_qc.clone() + } + /// The QC linking this leaf to its parent in the chain. + pub fn upgrade_certificate(&self) -> Option> { + self.upgrade_certificate.clone() + } + /// Commitment to this leaf's parent. + pub fn parent_commitment(&self) -> Commitment { + self.parent_commitment + } + /// The block header contained in this leaf. + pub fn block_header(&self) -> &::BlockHeader { + &self.block_header + } + + /// Get a mutable reference to the block header contained in this leaf. + pub fn block_header_mut(&mut self) -> &mut ::BlockHeader { + &mut self.block_header + } + /// Fill this leaf with the block payload. + /// + /// # Errors + /// + /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()` + /// or if the transactions are of invalid length + pub fn fill_block_payload( + &mut self, + block_payload: TYPES::BlockPayload, + num_storage_nodes: usize, + ) -> std::result::Result<(), BlockError> { + let encoded_txns = block_payload.encode(); + let commitment = vid_commitment(&encoded_txns, num_storage_nodes); + if commitment != self.block_header.payload_commitment() { + return Err(BlockError::InconsistentPayloadCommitment); + } + self.block_payload = Some(block_payload); + Ok(()) + } + + /// Take the block payload from the leaf and return it if it is present + pub fn unfill_block_payload(&mut self) -> Option { + self.block_payload.take() + } + + /// Fill this leaf with the block payload, without checking + /// header and payload consistency + pub fn fill_block_payload_unchecked(&mut self, block_payload: TYPES::BlockPayload) { + self.block_payload = Some(block_payload); + } + + /// Optional block payload. + pub fn block_payload(&self) -> Option { + self.block_payload.clone() + } + + /// A commitment to the block payload contained in this leaf. + pub fn payload_commitment(&self) -> VidCommitment { + self.block_header().payload_commitment() + } + + /// Validate that a leaf has the right upgrade certificate to be the immediate child of another leaf + /// + /// This may not be a complete function. Please double-check that it performs the checks you expect before subtituting validation logic with it. + /// + /// # Errors + /// Returns an error if the certificates are not identical, or that when we no longer see a + /// cert, it's for the right reason. + pub async fn extends_upgrade( + &self, + parent: &Self, + decided_upgrade_certificate: &Arc>>>, + ) -> Result<()> { + match (self.upgrade_certificate(), parent.upgrade_certificate()) { + // Easiest cases are: + // - no upgrade certificate on either: this is the most common case, and is always fine. + // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. + (None | Some(_), None) => {} + // If we no longer see a cert, we have to make sure that we either: + // - no longer care because we have passed new_version_first_view, or + // - no longer care because we have passed `decide_by` without deciding the certificate. + (None, Some(parent_cert)) => { + let decided_upgrade_certificate_read = decided_upgrade_certificate.read().await; + ensure!(self.view_number() > parent_cert.data.new_version_first_view + || (self.view_number() > parent_cert.data.decide_by && decided_upgrade_certificate_read.is_none()), + "The new leaf is missing an upgrade certificate that was present in its parent, and should still be live." + ); + } + // If we both have a certificate, they should be identical. + // Technically, this prevents us from initiating a new upgrade in the view immediately following an upgrade. + // I think this is a fairly lax restriction. + (Some(cert), Some(parent_cert)) => { + ensure!(cert == parent_cert, "The new leaf does not extend the parent leaf, because it has attached a different upgrade certificate."); + } + } + + // This check should be added once we sort out the genesis leaf/justify_qc issue. + // ensure!(self.parent_commitment() == parent_leaf.commit(), "The commitment of the parent leaf does not match the specified parent commitment."); + + Ok(()) + } +} + +impl Committable for Leaf2 { + fn commit(&self) -> committable::Commitment { + if self.drb_seed == [0; 96] && self.drb_result == [0; 32] { + RawCommitmentBuilder::new("leaf commitment") + .u64_field("view number", *self.view_number) + .field("parent leaf commitment", self.parent_commitment) + .field("block header", self.block_header.commit()) + .field("justify qc", self.justify_qc.commit()) + .optional("upgrade certificate", &self.upgrade_certificate) + .finalize() + } else { + RawCommitmentBuilder::new("leaf commitment") + .u64_field("view number", *self.view_number) + .field("parent leaf commitment", self.parent_commitment) + .field("block header", self.block_header.commit()) + .field("justify qc", self.justify_qc.commit()) + .optional("upgrade certificate", &self.upgrade_certificate) + .fixed_size_bytes(&self.drb_seed) + .fixed_size_bytes(&self.drb_result) + .finalize() + } + } +} + impl Leaf { #[allow(clippy::unused_async)] /// Calculate the leaf commitment, @@ -476,6 +732,31 @@ impl PartialEq for Leaf { } } +impl PartialEq for Leaf2 { + fn eq(&self, other: &Self) -> bool { + let Leaf2 { + view_number, + justify_qc, + parent_commitment, + block_header, + upgrade_certificate, + block_payload: _, + view_change_evidence, + drb_seed, + drb_result, + } = self; + + *view_number == other.view_number + && *justify_qc == other.justify_qc + && *parent_commitment == other.parent_commitment + && *block_header == other.block_header + && *upgrade_certificate == other.upgrade_certificate + && *view_change_evidence == other.view_change_evidence + && *drb_seed == other.drb_seed + && *drb_result == other.drb_result + } +} + impl Hash for Leaf { fn hash(&self, state: &mut H) { self.view_number.hash(state); @@ -485,6 +766,16 @@ impl Hash for Leaf { } } +impl Hash for Leaf2 { + fn hash(&self, state: &mut H) { + self.commit().hash(state); + self.view_number.hash(state); + self.justify_qc.hash(state); + self.parent_commitment.hash(state); + self.block_header.hash(state); + } +} + impl Display for Leaf { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( @@ -712,6 +1003,22 @@ where TYPES::ValidatedState::create_random_transaction(None, rng, padding) } } +impl TestableLeaf for Leaf2 +where + TYPES::ValidatedState: TestableState, + TYPES::BlockPayload: TestableBlock, +{ + type NodeType = TYPES; + + fn create_random_transaction( + &self, + rng: &mut dyn rand::RngCore, + padding: u64, + ) -> <::BlockPayload as BlockPayload>::Transaction + { + TYPES::ValidatedState::create_random_transaction(None, rng, padding) + } +} /// Fake the thing a genesis block points to. Needed to avoid infinite recursion #[must_use] pub fn fake_commitment() -> Commitment { @@ -763,6 +1070,35 @@ impl Committable for Leaf { } } +impl Leaf2 { + /// Constructs a leaf from a given quorum proposal. + pub fn from_quorum_proposal(quorum_proposal: &QuorumProposal2) -> Self { + // WARNING: Do NOT change this to a wildcard match, or reference the fields directly in the construction of the leaf. + // The point of this match is that we will get a compile-time error if we add a field without updating this. + let QuorumProposal2 { + view_number, + justify_qc, + block_header, + upgrade_certificate, + view_change_evidence, + drb_seed, + drb_result, + } = quorum_proposal; + + Self { + view_number: *view_number, + justify_qc: justify_qc.clone(), + parent_commitment: justify_qc.data().leaf_commit, + block_header: block_header.clone(), + upgrade_certificate: upgrade_certificate.clone(), + block_payload: None, + view_change_evidence: view_change_evidence.clone(), + drb_seed: *drb_seed, + drb_result: *drb_result, + } + } +} + impl Leaf { /// Constructs a leaf from a given quorum proposal. pub fn from_quorum_proposal(quorum_proposal: &QuorumProposal) -> Self { @@ -775,7 +1111,8 @@ impl Leaf { upgrade_certificate, proposal_certificate: _, } = quorum_proposal; - Leaf { + + Self { view_number: *view_number, justify_qc: justify_qc.clone(), parent_commitment: justify_qc.data().leaf_commit, diff --git a/types/src/error.rs b/types/src/error.rs index f6c25e376f..5357bb8bbd 100644 --- a/types/src/error.rs +++ b/types/src/error.rs @@ -13,7 +13,7 @@ use committable::Commitment; use serde::{Deserialize, Serialize}; use thiserror::Error; -use crate::{data::Leaf, traits::node_implementation::NodeType}; +use crate::{data::Leaf2, traits::node_implementation::NodeType}; /// Error type for `HotShot` #[derive(Debug, Error)] @@ -25,7 +25,7 @@ pub enum HotShotError { /// Leaf was not present in storage #[error("Missing leaf with commitment: {0}")] - MissingLeaf(Commitment>), + MissingLeaf(Commitment>), /// Failed to serialize data #[error("Failed to serialize: {0}")] @@ -49,8 +49,8 @@ pub enum HotShotError { #[derive(Debug, Clone, Serialize, Deserialize)] #[non_exhaustive] pub enum RoundTimedoutState { - /// Leader is in a Prepare phase and is waiting for a HighQC - LeaderWaitingForHighQC, + /// Leader is in a Prepare phase and is waiting for a HighQc + LeaderWaitingForHighQc, /// Leader is in a Prepare phase and timed out before the round min time is reached LeaderMinRoundTimeNotReached, /// Leader is waiting for prepare votes diff --git a/types/src/event.rs b/types/src/event.rs index f9d48779e8..02d7f07ac2 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -11,10 +11,10 @@ use std::sync::Arc; use serde::{Deserialize, Serialize}; use crate::{ - data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, + data::{DaProposal, Leaf2, QuorumProposal2, UpgradeProposal, VidDisperseShare}, error::HotShotError, message::Proposal, - simple_certificate::QuorumCertificate, + simple_certificate::QuorumCertificate2, traits::{node_implementation::NodeType, ValidatedState}, }; /// A status event emitted by a `HotShot` instance @@ -35,7 +35,7 @@ pub struct Event { #[serde(bound(deserialize = "TYPES: NodeType"))] pub struct LeafInfo { /// Decided leaf. - pub leaf: Leaf, + pub leaf: Leaf2, /// Validated state. pub state: Arc<::ValidatedState>, /// Optional application-specific state delta. @@ -47,7 +47,7 @@ pub struct LeafInfo { impl LeafInfo { /// Constructor. pub fn new( - leaf: Leaf, + leaf: Leaf2, state: Arc<::ValidatedState>, delta: Option::ValidatedState as ValidatedState>::Delta>>, vid_share: Option>, @@ -100,6 +100,7 @@ pub mod error_adaptor { #[non_exhaustive] #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound(deserialize = "TYPES: NodeType"))] +#[allow(clippy::large_enum_variant)] pub enum EventType { /// A view encountered an error and was interrupted Error { @@ -121,7 +122,7 @@ pub enum EventType { /// /// Note that the QC for each additional leaf in the chain can be obtained from the leaf /// before it using - qc: Arc>, + qc: Arc>, /// Optional information of the number of transactions in the block, for logging purposes. block_size: Option, }, @@ -158,7 +159,7 @@ pub enum EventType { /// or submitted to the network by us QuorumProposal { /// Contents of the proposal - proposal: Proposal>, + proposal: Proposal>, /// Public key of the leader submitting the proposal sender: TYPES::SignatureKey, }, diff --git a/types/src/message.rs b/types/src/message.rs index bf31005c9a..f57b41ec0d 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -17,6 +17,7 @@ use std::{ use async_lock::RwLock; use cdn_proto::util::mnemonic; +use committable::Committable; use derivative::Derivative; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use utils::anytrace::*; @@ -26,7 +27,9 @@ use vbs::{ }; use crate::{ - data::{DaProposal, Leaf, QuorumProposal, UpgradeProposal, VidDisperseShare}, + data::{ + DaProposal, Leaf, Leaf2, QuorumProposal, QuorumProposal2, UpgradeProposal, VidDisperseShare, + }, request_response::ProposalRequestPayload, simple_certificate::{ DaCertificate, QuorumCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, @@ -208,7 +211,7 @@ pub enum GeneralConsensusMessage { ProposalResponse(Proposal>), /// Message for the next leader containing our highest QC - HighQC(QuorumCertificate), + HighQc(QuorumCertificate), } #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] @@ -274,7 +277,7 @@ impl SequencingMessage { } GeneralConsensusMessage::UpgradeProposal(message) => message.data.view_number(), GeneralConsensusMessage::UpgradeVote(message) => message.view_number(), - GeneralConsensusMessage::HighQC(qc) => qc.view_number(), + GeneralConsensusMessage::HighQc(qc) => qc.view_number(), } } SequencingMessage::Da(da_message) => { @@ -323,6 +326,22 @@ pub struct Proposal + Deserializ pub _pd: PhantomData, } +/// Convert a `Proposal` by converting the underlying proposal type +pub fn convert_proposal( + proposal: Proposal, +) -> Proposal +where + TYPES: NodeType, + PROPOSAL: HasViewNumber + DeserializeOwned, + PROPOSAL2: HasViewNumber + DeserializeOwned + From, +{ + Proposal { + data: proposal.data.into(), + signature: proposal.signature, + _pd: proposal._pd, + } +} + impl Proposal> where TYPES: NodeType, @@ -352,6 +371,31 @@ where } } +impl Proposal> +where + TYPES: NodeType, +{ + /// Checks that the signature of the quorum proposal is valid. + /// # Errors + /// Returns an error when the proposal signature is invalid. + pub fn validate_signature( + &self, + quorum_membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result<()> { + let view_number = self.data.view_number(); + let view_leader_key = quorum_membership.leader(view_number, epoch)?; + let proposed_leaf = Leaf2::from_quorum_proposal(&self.data); + + ensure!( + view_leader_key.validate(&self.signature, proposed_leaf.commit().as_ref()), + "Proposal signature is invalid." + ); + + Ok(()) + } +} + #[derive(Clone, Debug)] /// A lock for an upgrade certificate decided by HotShot, which doubles as `PhantomData` for an instance of the `Versions` trait. pub struct UpgradeLock { diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index f7f58fd4e1..9c75b294a3 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -23,7 +23,7 @@ use crate::{ data::serialize_signature2, message::UpgradeLock, simple_vote::{ - DaData, QuorumData, TimeoutData, UpgradeProposalData, VersionedVoteData, + DaData, QuorumData, QuorumData2, TimeoutData, UpgradeProposalData, VersionedVoteData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, }, traits::{ @@ -249,8 +249,52 @@ impl UpgradeCertificate { } } -/// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` of `QuorumVotes` +impl QuorumCertificate { + /// Convert a `QuorumCertificate` into a `QuorumCertificate2` + pub fn to_qc2(self) -> QuorumCertificate2 { + let bytes: [u8; 32] = self.data.leaf_commit.into(); + let data = QuorumData2 { + leaf_commit: Commitment::from_raw(bytes), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl QuorumCertificate2 { + /// Convert a `QuorumCertificate2` into a `QuorumCertificate` + pub fn to_qc(self) -> QuorumCertificate { + let bytes: [u8; 32] = self.data.leaf_commit.into(); + let data = QuorumData { + leaf_commit: Commitment::from_raw(bytes), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +/// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` over `QuorumData` pub type QuorumCertificate = SimpleCertificate, SuccessThreshold>; +/// Type alias for a `QuorumCertificate2`, which is a `SimpleCertificate` over `QuorumData2` +pub type QuorumCertificate2 = SimpleCertificate, SuccessThreshold>; /// Type alias for a DA certificate over `DaData` pub type DaCertificate = SimpleCertificate; /// Type alias for a Timeout certificate over a view number diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index f5111084b8..013653a959 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -14,7 +14,7 @@ use utils::anytrace::*; use vbs::version::Version; use crate::{ - data::Leaf, + data::{Leaf, Leaf2}, message::UpgradeLock, traits::{ node_implementation::{NodeType, Versions}, @@ -32,6 +32,13 @@ pub struct QuorumData { pub leaf_commit: Commitment>, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a yes vote. +#[serde(bound(deserialize = ""))] +pub struct QuorumData2 { + /// Commitment to the leaf + pub leaf_commit: Commitment>, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a DA vote. pub struct DaData { /// Commitment to a block payload @@ -257,6 +264,14 @@ impl Committable for QuorumData { } } +impl Committable for QuorumData2 { + fn commit(&self) -> Commitment { + committable::RawCommitmentBuilder::new("Quorum data") + .var_size_bytes(self.leaf_commit.as_ref()) + .finalize() + } +} + impl Committable for TimeoutData { fn commit(&self) -> Commitment { committable::RawCommitmentBuilder::new("Timeout data") @@ -331,9 +346,50 @@ impl QuorumVote { + /// Convert a `QuorumVote` to a `QuorumVote2` + pub fn to_vote2(self) -> QuorumVote2 { + let bytes: [u8; 32] = self.data.leaf_commit.into(); + + let signature = self.signature; + let data = QuorumData2 { + leaf_commit: Commitment::from_raw(bytes), + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + +impl QuorumVote2 { + /// Convert a `QuorumVote2` to a `QuorumVote` + pub fn to_vote(self) -> QuorumVote { + let bytes: [u8; 32] = self.data.leaf_commit.into(); + + let signature = self.signature; + let data = QuorumData { + leaf_commit: Commitment::from_raw(bytes), + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + // Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file /// Quorum vote Alias pub type QuorumVote = SimpleVote>; +// Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file +/// Quorum vote Alias +pub type QuorumVote2 = SimpleVote>; /// DA vote type alias pub type DaVote = SimpleVote; /// Timeout Vote type alias diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 5901b7242d..febf32798f 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -25,7 +25,7 @@ use vbs::version::Version; use super::signature_key::BuilderSignatureKey; use crate::{ - data::Leaf, + data::Leaf2, traits::{node_implementation::NodeType, states::InstanceState, ValidatedState}, utils::BuilderCommitment, vid::{vid_scheme, VidCommitment, VidCommon, VidSchemeType}, @@ -199,7 +199,7 @@ pub trait BlockHeader: fn new_legacy( parent_state: &TYPES::ValidatedState, instance_state: &>::Instance, - parent_leaf: &Leaf, + parent_leaf: &Leaf2, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, metadata: >::Metadata, @@ -215,7 +215,7 @@ pub trait BlockHeader: fn new_marketplace( parent_state: &TYPES::ValidatedState, instance_state: &>::Instance, - parent_leaf: &Leaf, + parent_leaf: &Leaf2, payload_commitment: VidCommitment, builder_commitment: BuilderCommitment, metadata: >::Metadata, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index d05652b783..32093b8714 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -35,7 +35,7 @@ use super::{ ValidatedState, }; use crate::{ - data::{Leaf, TestableLeaf}, + data::{Leaf2, TestableLeaf}, traits::{ election::Membership, signature_key::SignatureKey, states::InstanceState, BlockPayload, }, @@ -87,7 +87,7 @@ pub trait TestableNodeImplementation: NodeImplementation /// otherwise panics /// `padding` is the bytes of padding to add to the transaction fn leaf_create_random_transaction( - leaf: &Leaf, + leaf: &Leaf2, rng: &mut dyn rand::RngCore, padding: u64, ) -> >::Transaction; @@ -126,11 +126,11 @@ where } fn leaf_create_random_transaction( - leaf: &Leaf, + leaf: &Leaf2, rng: &mut dyn rand::RngCore, padding: u64, ) -> >::Transaction { - Leaf::create_random_transaction(leaf, rng, padding) + Leaf2::create_random_transaction(leaf, rng, padding) } fn block_genesis() -> TYPES::BlockPayload { diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 956fd22d02..00a5a6aab1 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -17,7 +17,7 @@ use vbs::version::Version; use super::block_contents::TestableBlock; use crate::{ - data::Leaf, + data::Leaf2, traits::{ node_implementation::{ConsensusTime, NodeType}, BlockPayload, @@ -69,7 +69,7 @@ pub trait ValidatedState: fn validate_and_apply_header( &self, instance: &Self::Instance, - parent_leaf: &Leaf, + parent_leaf: &Leaf2, proposed_header: &TYPES::BlockHeader, vid_common: VidCommon, version: Version, diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 81ba2f66f8..4ce9fdeac4 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -18,10 +18,10 @@ use jf_vid::VidScheme; use super::node_implementation::NodeType; use crate::{ consensus::{CommitmentMap, View}, - data::{DaProposal, Leaf, QuorumProposal, VidDisperseShare}, + data::{DaProposal, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare}, event::HotShotAction, message::Proposal, - simple_certificate::{QuorumCertificate, UpgradeCertificate}, + simple_certificate::{QuorumCertificate, QuorumCertificate2, UpgradeCertificate}, vid::VidSchemeType, }; @@ -41,10 +41,17 @@ pub trait Storage: Send + Sync + Clone { &self, proposal: &Proposal>, ) -> Result<()>; + /// Add a proposal we sent to the store + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> Result<()>; /// Record a HotShotAction taken. async fn record_action(&self, view: TYPES::View, action: HotShotAction) -> Result<()>; /// Update the current high QC in storage. async fn update_high_qc(&self, high_qc: QuorumCertificate) -> Result<()>; + /// Update the current high QC in storage. + async fn update_high_qc2(&self, high_qc: QuorumCertificate2) -> Result<()>; /// Update the currently undecided state of consensus. This includes the undecided leaf chain, /// and the undecided state. async fn update_undecided_state( @@ -52,9 +59,24 @@ pub trait Storage: Send + Sync + Clone { leafs: CommitmentMap>, state: BTreeMap>, ) -> Result<()>; + /// Update the currently undecided state of consensus. This includes the undecided leaf chain, + /// and the undecided state. + async fn update_undecided_state2( + &self, + leafs: CommitmentMap>, + state: BTreeMap>, + ) -> Result<()>; /// Upgrade the current decided upgrade certificate in storage. async fn update_decided_upgrade_certificate( &self, decided_upgrade_certificate: Option>, ) -> Result<()>; + /// Migrate leaves from `Leaf` to `Leaf2`, and proposals from `QuorumProposal` to `QuorumProposal2` + async fn migrate_consensus( + &self, + convert_leaf: fn(Leaf) -> Leaf2, + convert_proposal: fn( + Proposal>, + ) -> Proposal>, + ) -> Result<()>; } diff --git a/types/src/utils.rs b/types/src/utils.rs index c3f8780575..aacbafa652 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -24,7 +24,7 @@ use tagged_base64::tagged; use typenum::Unsigned; use crate::{ - data::Leaf, + data::Leaf2, traits::{node_implementation::NodeType, ValidatedState}, vid::VidCommitment, }; @@ -70,7 +70,7 @@ impl Clone for ViewInner { } } /// The hash of a leaf. -pub type LeafCommitment = Commitment>; +pub type LeafCommitment = Commitment>; /// Optional validated state and state delta. pub type StateAndDelta = ( From 7fa55cc03fe6703b87e886e23c47be4cc346edc5 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 19 Nov 2024 13:56:19 -0800 Subject: [PATCH 1309/1393] [DRB] - Add DRB functions (#3890) * Add drb functions * Fix fmt and import * Resolve comments * Fmt * Fix hash * Update drb compute function * Move comment --- hotshot/Cargo.toml | 1 + hotshot/src/traits/election.rs | 2 + hotshot/src/traits/election/dynamic.rs | 76 ++++++++++++++++++++++++++ 3 files changed, 79 insertions(+) create mode 100644 hotshot/src/traits/election/dynamic.rs diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index f433e4d78b..0efbb71aa2 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -41,6 +41,7 @@ lru.workspace = true portpicker = "0.1" rand = { workspace = true } serde = { workspace = true, features = ["rc"] } +sha2 = { workspace = true } time = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } diff --git a/hotshot/src/traits/election.rs b/hotshot/src/traits/election.rs index 4f9212705f..427ed12629 100644 --- a/hotshot/src/traits/election.rs +++ b/hotshot/src/traits/election.rs @@ -6,6 +6,8 @@ //! elections used for consensus +/// Dynamic leader election with epochs. +pub mod dynamic; /// leader completely randomized every view pub mod randomized_committee; /// static (round robin) committee election diff --git a/hotshot/src/traits/election/dynamic.rs b/hotshot/src/traits/election/dynamic.rs new file mode 100644 index 0000000000..eba6f100ec --- /dev/null +++ b/hotshot/src/traits/election/dynamic.rs @@ -0,0 +1,76 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::hash::{DefaultHasher, Hash, Hasher}; + +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; +use sha2::{Digest, Sha256}; + +// TODO: Add the following consts once we bench the hash time. +// +// /// Highest number of hashes that a hardware can complete in a second. +// const `HASHES_PER_SECOND` +// /// Time a DRB calculation will take, in terms of number of views. +// const `DRB_CALCULATION_NUM_VIEW`: u64 = 300; + +// TODO: Replace this with an accurate number calculated by `fn difficulty_level()` once we bench +// the hash time. +// +/// Arbitrary number of times the hash function will be repeatedly called. +const DIFFICULTY_LEVEL: u64 = 10; + +// TODO: Use `HASHES_PER_SECOND` * `VIEW_TIMEOUT` * `DRB_CALCULATION_NUM_VIEW` to calculate this +// once we bench the hash time. +// +/// Difficulty level of the DRB calculation. +/// +/// Represents the number of times the hash function will be repeatedly called. +#[must_use] +pub fn difficulty_level() -> u64 { + unimplemented!("Use an arbitrary `DIFFICULTY_LEVEL` for now before we bench the hash time."); +} + +/// Compute the DRB result for the leader rotation. +/// +/// This is to be started two epochs in advance and spawned in a non-blocking thread. +/// +/// # Arguments +/// * `drb_seed_input` - Serialized QC signature. +#[must_use] +pub fn compute_drb_result(drb_seed_input: [u8; 32]) -> [u8; 32] { + let mut hash = drb_seed_input.to_vec(); + for _iter in 0..DIFFICULTY_LEVEL { + // TODO: This may be optimized to avoid memcopies after we bench the hash time. + // + hash = Sha256::digest(hash).to_vec(); + } + + // Convert the hash to the DRB result. + let mut drb_result = [0u8; 32]; + drb_result.copy_from_slice(&hash); + drb_result +} + +/// Use the DRB result to get the leader. +/// +/// The DRB result is the output of a spawned `compute_drb_result` call. +#[must_use] +pub fn leader( + view_number: usize, + stake_table: &[::StakeTableEntry], + drb_result: [u8; 32], +) -> TYPES::SignatureKey { + let mut hasher = DefaultHasher::new(); + drb_result.hash(&mut hasher); + view_number.hash(&mut hasher); + #[allow(clippy::cast_possible_truncation)] + // TODO: Use the total stake rather than `len()` and update the indexing after switching to + // a weighted stake table. + // + let index = (hasher.finish() as usize) % stake_table.len(); + let entry = stake_table[index].clone(); + TYPES::SignatureKey::public_key(&entry) +} From e6044144ce9316effc20bcc8cca624617c6c247a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 20 Nov 2024 11:09:36 -0500 Subject: [PATCH 1310/1393] [TECH_DEBT] Remove unused Parameters in `TaskState`s (#3892) --- hotshot/src/tasks/mod.rs | 4 +-- hotshot/src/tasks/task_state.rs | 14 ++------- task-impls/src/consensus/handlers.rs | 4 +-- task-impls/src/consensus/mod.rs | 10 ------ task-impls/src/quorum_proposal/mod.rs | 10 ------ task-impls/src/transactions.rs | 3 -- task-impls/src/upgrade.rs | 13 +++----- task-impls/src/vid.rs | 3 -- task-impls/src/view_sync.rs | 31 ++++++------------- .../tests_1/upgrade_task_with_proposal.rs | 3 +- testing/tests/tests_1/view_sync_task.rs | 3 +- types/src/consensus.rs | 2 +- 12 files changed, 22 insertions(+), 78 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index bb9f7ed630..afe72497eb 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -216,7 +216,7 @@ pub fn add_network_event_task< pub async fn add_consensus_tasks, V: Versions>( handle: &mut SystemContextHandle, ) { - handle.add_task(ViewSyncTaskState::::create_from(handle).await); + handle.add_task(ViewSyncTaskState::::create_from(handle).await); handle.add_task(VidTaskState::::create_from(handle).await); handle.add_task(DaTaskState::::create_from(handle).await); handle.add_task(TransactionTaskState::::create_from(handle).await); @@ -240,7 +240,7 @@ pub async fn add_consensus_tasks, // only spawn the upgrade task if we are actually configured to perform an upgrade. if V::Base::VERSION < V::Upgrade::VERSION { - handle.add_task(UpgradeTaskState::::create_from(handle).await); + handle.add_task(UpgradeTaskState::::create_from(handle).await); } { diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 536e6239a6..7d3d085281 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -63,7 +63,7 @@ impl, V: Versions> CreateTaskState #[async_trait] impl, V: Versions> CreateTaskState - for UpgradeTaskState + for UpgradeTaskState { async fn create_from(handle: &SystemContextHandle) -> Self { #[cfg(not(feature = "example-upgrade"))] @@ -72,7 +72,6 @@ impl, V: Versions> CreateTaskState cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - network: Arc::clone(&handle.hotshot.network), vote_collectors: BTreeMap::default(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -121,7 +120,6 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - vote_collector: None, network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), @@ -156,7 +154,7 @@ impl, V: Versions> CreateTaskState #[async_trait] impl, V: Versions> CreateTaskState - for ViewSyncTaskState + for ViewSyncTaskState { async fn create_from(handle: &SystemContextHandle) -> Self { let cur_view = handle.cur_view().await; @@ -165,7 +163,6 @@ impl, V: Versions> CreateTaskState cur_view, next_view: cur_view, cur_epoch: handle.cur_epoch().await, - network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -193,7 +190,6 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -256,11 +252,8 @@ impl, V: Versions> CreateTaskState Self { latest_proposed_view: handle.cur_view().await, proposal_dependencies: BTreeMap::new(), - network: Arc::clone(&handle.hotshot.network), - output_event_stream: handle.hotshot.external_event_stream.0.clone(), consensus: OuterConsensus::new(consensus), instance_state: handle.hotshot.instance_state(), - timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -312,12 +305,9 @@ impl, V: Versions> CreateTaskState private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), network: Arc::clone(&handle.hotshot.network), - timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - committee_membership: handle.hotshot.memberships.da_membership.clone().into(), vote_collectors: BTreeMap::default(), timeout_vote_collectors: BTreeMap::default(), - storage: Arc::clone(&handle.storage), cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), cur_epoch: handle.cur_epoch().await, diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 1dd319fed9..2de541262b 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -87,7 +87,7 @@ pub(crate) async fn handle_timeout_vote_recv< // Are we the leader for this view? ensure!( task_state - .timeout_membership + .quorum_membership .leader(vote.view_number() + 1, task_state.cur_epoch)? == task_state.public_key, info!( @@ -276,7 +276,7 @@ pub(crate) async fn handle_timeout ensure!( task_state - .timeout_membership + .quorum_membership .has_stake(&task_state.public_key, task_state.cur_epoch), debug!("We were not chosen for the consensus committee for view {view_number:?}") ); diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 6def6ebd82..0ed04d30fa 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -7,7 +7,6 @@ use std::sync::Arc; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ @@ -47,15 +46,9 @@ pub struct ConsensusTaskState, V: /// The underlying network pub network: Arc, - /// Membership for Timeout votes/certs - pub timeout_membership: Arc, - /// Membership for Quorum Certs/votes pub quorum_membership: Arc, - /// Membership for DA committee Votes/certs - pub committee_membership: Arc, - /// A map of `QuorumVote` collector tasks. pub vote_collectors: VoteCollectorsMap, QuorumCertificate2, V>, @@ -63,9 +56,6 @@ pub struct ConsensusTaskState, V: pub timeout_vote_collectors: VoteCollectorsMap, TimeoutCertificate, V>, - /// This node's storage ref - pub storage: Arc>, - /// The view number that this node is currently executing in. pub cur_view: TYPES::View, diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 9951f63e6d..cf06c620c6 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -17,7 +17,6 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::OuterConsensus, - event::Event, message::UpgradeLock, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ @@ -45,18 +44,9 @@ pub struct QuorumProposalTaskState /// Table for the in-progress proposal dependency tasks. pub proposal_dependencies: BTreeMap>, - /// The underlying network - pub network: Arc, - - /// Output events to application - pub output_event_stream: async_broadcast::Sender>, - /// Immutable instance state pub instance_state: Arc, - /// Membership for Timeout votes/certs - pub timeout_membership: Arc, - /// Membership for Quorum Certs/votes pub quorum_membership: Arc, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 43cdef8bfc..7018969e3f 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -93,9 +93,6 @@ pub struct TransactionTaskState, V /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, - /// The underlying network - pub network: Arc, - /// Membership for the quorum pub membership: Arc, diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index c5e9735701..a8dec2e3f3 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -22,7 +22,7 @@ use hotshot_types::{ simple_vote::{UpgradeProposalData, UpgradeVote}, traits::{ election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, + node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, vote::HasViewNumber, @@ -38,7 +38,7 @@ use crate::{ }; /// Tracks state of an upgrade task -pub struct UpgradeTaskState, V: Versions> { +pub struct UpgradeTaskState { /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -51,9 +51,6 @@ pub struct UpgradeTaskState, V: Ve /// Membership for Quorum Certs/votes pub quorum_membership: Arc, - /// The underlying network - pub network: Arc, - /// A map of `UpgradeVote` collector tasks pub vote_collectors: VoteCollectorsMap, UpgradeCertificate, V>, @@ -94,7 +91,7 @@ pub struct UpgradeTaskState, V: Ve pub upgrade_lock: UpgradeLock, } -impl, V: Versions> UpgradeTaskState { +impl UpgradeTaskState { /// Check if we have decided on an upgrade certificate async fn upgraded(&self) -> bool { self.upgrade_lock @@ -324,9 +321,7 @@ impl, V: Versions> UpgradeTaskStat #[async_trait] /// task state implementation for the upgrade task -impl, V: Versions> TaskState - for UpgradeTaskState -{ +impl TaskState for UpgradeTaskState { type Event = HotShotEvent; async fn handle_event( diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 3795577bbb..f19f6e7528 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -50,9 +50,6 @@ pub struct VidTaskState> { /// Our Private Key pub private_key: ::PrivateKey, - /// The view and ID of the current vote collection task, if there is one. - pub vote_collector: Option<(TYPES::View, usize, usize)>, - /// This state's ID pub id: u64, } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 9e8b852789..c8f00d3c15 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -26,7 +26,7 @@ use hotshot_types::{ }, traits::{ election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, + node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, vote::{Certificate, HasViewNumber, Vote}, @@ -62,7 +62,7 @@ type RelayMap = HashMap< >; /// Main view sync task state -pub struct ViewSyncTaskState, V: Versions> { +pub struct ViewSyncTaskState { /// View HotShot is currently in pub cur_view: TYPES::View, @@ -72,9 +72,6 @@ pub struct ViewSyncTaskState, V: V /// Epoch HotShot is currently in pub cur_epoch: TYPES::Epoch, - /// The underlying network - pub network: Arc, - /// Membership for the quorum pub membership: Arc, @@ -91,7 +88,7 @@ pub struct ViewSyncTaskState, V: V pub num_timeouts_tracked: u64, /// Map of running replica tasks - pub replica_task_map: RwLock>>, + pub replica_task_map: RwLock>>, /// Map of pre-commit vote accumulates for the relay pub pre_commit_relay_map: RwLock< @@ -118,9 +115,7 @@ pub struct ViewSyncTaskState, V: V } #[async_trait] -impl, V: Versions> TaskState - for ViewSyncTaskState -{ +impl TaskState for ViewSyncTaskState { type Event = HotShotEvent; async fn handle_event( @@ -136,7 +131,7 @@ impl, V: Versions> TaskState } /// State of a view sync replica task -pub struct ViewSyncReplicaTaskState, V: Versions> { +pub struct ViewSyncReplicaTaskState { /// Timeout for view sync rounds pub view_sync_timeout: Duration, @@ -164,9 +159,6 @@ pub struct ViewSyncReplicaTaskState, - /// Membership for the quorum pub membership: Arc, @@ -181,9 +173,7 @@ pub struct ViewSyncReplicaTaskState, V: Versions> TaskState - for ViewSyncReplicaTaskState -{ +impl TaskState for ViewSyncReplicaTaskState { type Event = HotShotEvent; async fn handle_event( @@ -200,7 +190,7 @@ impl, V: Versions> TaskState fn cancel_subtasks(&mut self) {} } -impl, V: Versions> ViewSyncTaskState { +impl ViewSyncTaskState { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task @@ -236,7 +226,7 @@ impl, V: Versions> ViewSyncTaskSta } // We do not have a replica task already running, so start one - let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { + let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { cur_view: view, next_view: view, cur_epoch: self.cur_epoch, @@ -245,7 +235,6 @@ impl, V: Versions> ViewSyncTaskSta sent_view_change_event: false, timeout_task: None, membership: Arc::clone(&self.membership), - network: Arc::clone(&self.network), public_key: self.public_key.clone(), private_key: self.private_key.clone(), view_sync_timeout: self.view_sync_timeout, @@ -523,9 +512,7 @@ impl, V: Versions> ViewSyncTaskSta } } -impl, V: Versions> - ViewSyncReplicaTaskState -{ +impl ViewSyncReplicaTaskState { #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task pub async fn handle( diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 0cef44ab81..0c706efd02 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -144,8 +144,7 @@ async fn test_upgrade_task_with_proposal() { let proposal_state = QuorumProposalTaskState::::create_from(&handle).await; - let upgrade_state = - UpgradeTaskState::::create_from(&handle).await; + let upgrade_state = UpgradeTaskState::::create_from(&handle).await; let upgrade_vote_recvs: Vec<_> = upgrade_votes.into_iter().map(UpgradeVoteRecv).collect(); diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index ec313ec2a7..0c5eb9a719 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -56,7 +56,6 @@ async fn test_view_sync_task() { )); output.push(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone())); - let view_sync_state = - ViewSyncTaskState::::create_from(&handle).await; + let view_sync_state = ViewSyncTaskState::::create_from(&handle).await; run_harness(input, output, view_sync_state, false).await; } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a27220a7ed..648f045619 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -954,7 +954,7 @@ impl Consensus { let mut is_leaf_extended = true; if let Err(e) = self.visit_leaf_ancestors( leaf_view, - Terminator::Inclusive(leaf_view - 2), + Terminator::Inclusive(leaf_view - 1), true, |leaf, _, _| { tracing::trace!( From 508cf334fd1b9b4f5717ba68c7108e7a8bfda527 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Wed, 20 Nov 2024 18:18:06 +0100 Subject: [PATCH 1311/1393] Fix extended voting (#3900) --- task-impls/src/quorum_vote/mod.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 393488402f..5f0b700aaa 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -224,7 +224,6 @@ impl + 'static, V: Versions> Handl ) .await; - let is_vote_leaf_extended = self.consensus.read().await.is_leaf_extended(leaf.commit()); if let Err(e) = submit_vote::( self.sender.clone(), Arc::clone(&self.quorum_membership), @@ -236,7 +235,7 @@ impl + 'static, V: Versions> Handl Arc::clone(&self.storage), leaf, vid_share, - is_vote_leaf_extended, + false, ) .await { @@ -678,6 +677,11 @@ impl, V: Versions> QuorumVoteTaskS ) .await; + let is_vote_leaf_extended = self + .consensus + .read() + .await + .is_leaf_extended(proposed_leaf.commit()); if let Err(e) = submit_vote::( event_sender.clone(), Arc::clone(&self.quorum_membership), @@ -689,7 +693,7 @@ impl, V: Versions> QuorumVoteTaskS Arc::clone(&self.storage), proposed_leaf, updated_vid, - false, + is_vote_leaf_extended, ) .await { From 1730604876973d369535a582979af37e04be5cc3 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Thu, 21 Nov 2024 15:25:49 +0100 Subject: [PATCH 1312/1393] Fix accidentally pulled bug in eQC rule (#3904) --- types/src/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 648f045619..a27220a7ed 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -954,7 +954,7 @@ impl Consensus { let mut is_leaf_extended = true; if let Err(e) = self.visit_leaf_ancestors( leaf_view, - Terminator::Inclusive(leaf_view - 1), + Terminator::Inclusive(leaf_view - 2), true, |leaf, _, _| { tracing::trace!( From 014313bc14b3871d4be1db25411040abce4b0fe6 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 21 Nov 2024 19:12:44 +0300 Subject: [PATCH 1313/1393] Prune dependencies (#3787) --- builder-api/Cargo.toml | 7 ++- example-types/Cargo.toml | 18 ++++---- examples/Cargo.toml | 31 ++++++------- fakeapi/Cargo.toml | 30 ++++++------ hotshot-stake-table/Cargo.toml | 4 +- hotshot-stake-table/src/mt_based.rs | 4 +- hotshot-stake-table/src/mt_based/internal.rs | 4 +- hotshot-stake-table/src/utils.rs | 4 +- hotshot-stake-table/src/vec_based.rs | 4 +- hotshot/Cargo.toml | 24 +++++----- .../traits/election/randomized_committee.rs | 2 +- .../src/traits/election/static_committee.rs | 2 +- .../static_committee_leader_two_views.rs | 2 +- .../src/traits/networking/memory_network.rs | 7 +-- libp2p-networking/Cargo.toml | 17 ++++--- libp2p-networking/src/network/def.rs | 2 +- libp2p-networking/src/network/node.rs | 13 +----- libp2p-networking/src/network/node/config.rs | 2 +- macros/Cargo.toml | 4 +- orchestrator/Cargo.toml | 19 ++++---- orchestrator/src/client.rs | 3 +- orchestrator/src/lib.rs | 10 ++-- task-impls/Cargo.toml | 9 ++-- task-impls/src/view_sync.rs | 2 +- task/Cargo.toml | 7 ++- testing/Cargo.toml | 21 ++++----- testing/src/helpers.rs | 2 +- types/Cargo.toml | 46 +++++++++---------- types/src/consensus.rs | 2 +- types/src/data.rs | 15 +++--- types/src/hotshot_config_file.rs | 2 +- types/src/lib.rs | 2 +- types/src/light_client.rs | 2 +- types/src/message.rs | 7 ++- types/src/network.rs | 3 +- types/src/qc.rs | 2 +- types/src/signature_key.rs | 2 +- types/src/simple_certificate.rs | 2 +- types/src/stake_table.rs | 2 +- types/src/traits/network.rs | 7 ++- types/src/traits/signature_key.rs | 2 +- types/src/utils.rs | 14 +++++- types/src/vote.rs | 2 +- 43 files changed, 177 insertions(+), 189 deletions(-) diff --git a/builder-api/Cargo.toml b/builder-api/Cargo.toml index 45fc2e89be..a28d766154 100644 --- a/builder-api/Cargo.toml +++ b/builder-api/Cargo.toml @@ -2,19 +2,18 @@ name = "hotshot-builder-api" version = "0.1.7" edition = "2021" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] async-trait = { workspace = true } clap = { workspace = true } -derive_more = { workspace = true } +committable = { workspace = true } +derive_more = { workspace = true, features = ["from"] } futures = { workspace = true } hotshot-types = { path = "../types" } serde = { workspace = true } -thiserror = { workspace = true } tagged-base64 = { workspace = true } +thiserror = { workspace = true } tide-disco = { workspace = true } toml = { workspace = true } -committable = { workspace = true } vbs = { workspace = true } diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index 026ac7e23b..d0791ea285 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -12,21 +12,21 @@ slow-tests = [] gpu-vid = ["hotshot-task-impls/gpu-vid"] [dependencies] -async-trait = { workspace = true } anyhow = { workspace = true } -sha3 = "^0.10" +async-lock = { workspace = true } +async-trait = { workspace = true } committable = { workspace = true } hotshot = { path = "../hotshot" } -hotshot-types = { path = "../types" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } +hotshot-types = { path = "../types" } +jf-vid = { workspace = true } rand = { workspace = true } -thiserror = { workspace = true } +reqwest = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } +sha3 = "^0.10" +thiserror = { workspace = true } time = { workspace = true } -async-lock = { workspace = true } -jf-vid = { workspace = true } -vbs = { workspace = true } -url = { workspace = true } -reqwest = { workspace = true } tokio = { workspace = true } +url = { workspace = true } +vbs = { workspace = true } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index fb281cd7eb..09e13b8e7d 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -9,7 +9,7 @@ rust-version = { workspace = true } [features] default = ["docs", "doc-images", "hotshot-testing"] -gpu-vid = ["hotshot-task-impls/gpu-vid"] +gpu-vid = ["hotshot-example-types/gpu-vid"] # Build the extended documentation docs = [] @@ -79,35 +79,34 @@ path = "push-cdn/whitelist-adapter.rs" [dependencies] async-trait = { workspace = true } + +cdn-broker = { workspace = true, features = ["global-permits"] } +cdn-marshal = { workspace = true } +chrono = { workspace = true } clap = { workspace = true, optional = true } futures = { workspace = true } +hotshot = { path = "../hotshot" } +hotshot-example-types = { path = "../example-types" } hotshot-orchestrator = { version = "0.5.36", path = "../orchestrator", default-features = false } -hotshot-types = { path = "../types" } hotshot-testing = { path = "../testing" } -hotshot-task-impls = { path = "../task-impls" } +hotshot-types = { path = "../types" } libp2p-networking = { workspace = true } +local-ip-address = "0.6" +portpicker = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["rc"] } +sha2 = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } -portpicker.workspace = true -hotshot = { path = "../hotshot" } -hotshot-example-types = { path = "../example-types" } -chrono = { workspace = true } -sha2.workspace = true -local-ip-address = "0.6" -url = { workspace = true } - -tracing = { workspace = true } tokio = { workspace = true } -cdn-broker = { workspace = true, features = ["global-permits"] } -cdn-marshal = { workspace = true } +tracing = { workspace = true } +url = { workspace = true } [dev-dependencies] -clap.workspace = true +anyhow = { workspace = true } +clap = { workspace = true } toml = { workspace = true } -anyhow.workspace = true tracing-subscriber = "0.3" [lints] diff --git a/fakeapi/Cargo.toml b/fakeapi/Cargo.toml index 93c87c2fdf..d47908f721 100644 --- a/fakeapi/Cargo.toml +++ b/fakeapi/Cargo.toml @@ -1,25 +1,25 @@ [package] name = "hotshot-fakeapi" -version.workspace = true -authors.workspace = true -edition.workspace = true -rust-version.workspace = true -homepage.workspace = true -documentation.workspace = true -repository.workspace = true +documentation = { workspace = true } +version = { workspace = true } +authors = { workspace = true } +edition = { workspace = true } +rust-version = { workspace = true } +homepage = { workspace = true } +repository = { workspace = true } [dependencies] -toml = { workspace = true } -tide-disco = { workspace = true } -tokio = { workspace = true } anyhow = { workspace = true } -hotshot-types = { path = "../types" } -vbs = { workspace = true } -rand = { workspace = true } -hotshot-example-types = { path = "../example-types" } +async-lock = { workspace = true } async-trait = { workspace = true } futures = { workspace = true } -async-lock = { workspace = true } +hotshot-example-types = { path = "../example-types" } +hotshot-types = { path = "../types" } +rand = { workspace = true } +tide-disco = { workspace = true } +tokio = { workspace = true } +toml = { workspace = true } +vbs = { workspace = true } [lints] workspace = true diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index ef3d9bca62..6db5a340f0 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -13,12 +13,12 @@ ark-ff = "0.4" ark-serialize = { workspace = true } ark-std = { workspace = true } digest = { workspace = true } -ethereum-types = { workspace = true } hotshot-types = { path = "../types" } jf-crhf = { workspace = true } -jf-signature = { workspace = true, features = ["bls", "schnorr"] } jf-rescue = { workspace = true } +jf-signature = { workspace = true, features = ["bls", "schnorr"] } jf-utils = { workspace = true } +primitive-types = { workspace = true } serde = { workspace = true, features = ["rc"] } tagged-base64 = { workspace = true } diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index 0929b78bee..a6804662be 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -11,8 +11,8 @@ mod internal; use ark_std::{collections::HashMap, rand::SeedableRng, sync::Arc}; use digest::crypto_common::rand_core::CryptoRngCore; -use ethereum_types::{U256, U512}; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; +use primitive_types::{U256, U512}; use serde::{Deserialize, Serialize}; use self::internal::{to_merkle_path, Key, MerkleCommitment, MerkleProof, PersistentMerkleNode}; @@ -230,8 +230,8 @@ impl StakeTable { #[cfg(test)] mod tests { use ark_std::{rand::SeedableRng, vec::Vec}; - use ethereum_types::U256; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; + use primitive_types::U256; use super::StakeTable; diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index ee98af4670..b08301ee30 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -8,10 +8,10 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::{hash::Hash, sync::Arc, vec, vec::Vec}; -use ethereum_types::U256; use hotshot_types::traits::stake_table::StakeTableError; use jf_crhf::CRHF; use jf_utils::canonical; +use primitive_types::U256; use serde::{Deserialize, Serialize}; use tagged_base64::tagged; @@ -631,8 +631,8 @@ mod tests { vec, vec::Vec, }; - use ethereum_types::U256; use jf_utils::test_rng; + use primitive_types::U256; use super::{super::config, to_merkle_path, PersistentMerkleNode}; diff --git a/hotshot-stake-table/src/utils.rs b/hotshot-stake-table/src/utils.rs index e3ef251fd0..b295cdeb28 100644 --- a/hotshot-stake-table/src/utils.rs +++ b/hotshot-stake-table/src/utils.rs @@ -7,7 +7,7 @@ //! Utilities to help building a stake table. use ark_ff::{Field, PrimeField}; -use ethereum_types::U256; +use primitive_types::U256; /// A trait that converts into a field element. pub trait ToFields { @@ -21,6 +21,6 @@ pub trait ToFields { /// convert a U256 to a field element. pub(crate) fn u256_to_field(v: &U256) -> F { let mut bytes = vec![0u8; 32]; - v.to_little_endian(&mut bytes); + v.write_as_little_endian(&mut bytes); F::from_le_bytes_mod_order(&bytes) } diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 66b9f23d05..6267dd21ca 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -8,10 +8,10 @@ use ark_std::{collections::HashMap, hash::Hash, rand::SeedableRng}; use digest::crypto_common::rand_core::CryptoRngCore; -use ethereum_types::{U256, U512}; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; use jf_crhf::CRHF; use jf_rescue::{crhf::VariableLengthRescueCRHF, RescueParameter}; +use primitive_types::{U256, U512}; use serde::{Deserialize, Serialize}; use crate::{ @@ -387,12 +387,12 @@ where #[cfg(test)] mod tests { use ark_std::{rand::SeedableRng, vec::Vec}; - use ethereum_types::U256; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme}; use jf_signature::{ bls_over_bn254::BLSOverBN254CurveSignatureScheme, schnorr::SchnorrSignatureScheme, SignatureScheme, }; + use primitive_types::U256; use super::{ config::{FieldType as F, QCVerKey, StateVerKey}, diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 0efbb71aa2..a74bc74746 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -25,37 +25,37 @@ async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6" bincode = { workspace = true } +blake3 = { workspace = true } +cdn-broker = { workspace = true, features = ["global-permits"] } +cdn-client = { workspace = true } +cdn-marshal = { workspace = true } chrono = { workspace = true } committable = { workspace = true } -custom_debug = { workspace = true } dashmap = "6" +derive_more = { workspace = true } either = { workspace = true } -ethereum-types = { workspace = true } futures = { workspace = true } hotshot-task = { path = "../task" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } hotshot-types = { path = "../types" } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } -lru.workspace = true +lru = { workspace = true } +num_enum = "0.7" +parking_lot = "0.12" portpicker = "0.1" +primitive-types = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["rc"] } sha2 = { workspace = true } time = { workspace = true } + +tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } -vbs = { workspace = true } -blake3.workspace = true url = { workspace = true } -num_enum = "0.7" -parking_lot = "0.12" utils = { path = "../utils" } - -tokio = { workspace = true } -cdn-client = { workspace = true } -cdn-broker = { workspace = true, features = ["global-permits"] } -cdn-marshal = { workspace = true } +vbs = { workspace = true } [dev-dependencies] blake3 = { workspace = true } diff --git a/hotshot/src/traits/election/randomized_committee.rs b/hotshot/src/traits/election/randomized_committee.rs index 401a3102e2..89a36f9030 100644 --- a/hotshot/src/traits/election/randomized_committee.rs +++ b/hotshot/src/traits/election/randomized_committee.rs @@ -6,7 +6,6 @@ use std::{cmp::max, collections::BTreeMap, num::NonZeroU64}; -use ethereum_types::U256; use hotshot_types::{ traits::{ election::Membership, @@ -16,6 +15,7 @@ use hotshot_types::{ }, PeerConfig, }; +use primitive_types::U256; use rand::{rngs::StdRng, Rng}; use utils::anytrace::Result; diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 258a2cfeda..200ed530dd 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -6,7 +6,6 @@ use std::{cmp::max, collections::BTreeMap, num::NonZeroU64}; -use ethereum_types::U256; use hotshot_types::{ traits::{ election::Membership, @@ -16,6 +15,7 @@ use hotshot_types::{ }, PeerConfig, }; +use primitive_types::U256; use utils::anytrace::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 8658744e71..922a815e68 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -6,7 +6,6 @@ use std::{collections::BTreeMap, num::NonZeroU64}; -use ethereum_types::U256; use hotshot_types::{ traits::{ election::Membership, @@ -16,6 +15,7 @@ use hotshot_types::{ }, PeerConfig, }; +use primitive_types::U256; use utils::anytrace::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 16eaa8ccbe..d48f6f5f79 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -33,7 +33,6 @@ use hotshot_types::{ }, BoxSyncFuture, }; -use rand::Rng; use tokio::{ spawn, sync::mpsc::{channel, error::SendError, Receiver, Sender}, @@ -46,7 +45,7 @@ use super::{NetworkError, NetworkReliability}; /// /// This type is responsible for keeping track of the channels to each [`MemoryNetwork`], and is /// used to group the [`MemoryNetwork`] instances. -#[derive(custom_debug::Debug)] +#[derive(derive_more::Debug)] pub struct MasterMap { /// The list of `MemoryNetwork`s #[debug(skip)] @@ -54,9 +53,6 @@ pub struct MasterMap { /// The list of `MemoryNetwork`s aggregated by topic subscribed_map: DashMap)>>, - - /// The id of this `MemoryNetwork` cluster - id: u64, } impl MasterMap { @@ -66,7 +62,6 @@ impl MasterMap { Arc::new(MasterMap { map: DashMap::new(), subscribed_map: DashMap::new(), - id: rand::thread_rng().gen(), }) } } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 3f7c3dbd40..edabb02097 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -6,7 +6,6 @@ edition = { workspace = true } authors = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [features] default = ["webui"] webui = [] @@ -17,23 +16,23 @@ hotshot-example-types = { path = "../example-types" } [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } -blake3 = { workspace = true } bincode = { workspace = true } -custom_debug = { workspace = true } +blake3 = { workspace = true } +cbor4ii = "0.3" delegate = "0.13" derive_builder = "0.20" +derive_more = { workspace = true } futures = { workspace = true } hotshot-types = { path = "../types" } -libp2p-swarm-derive = { workspace = true } +lazy_static = { workspace = true } +libp2p = { workspace = true, features = ["tokio"] } libp2p-identity = { workspace = true } +libp2p-swarm-derive = { workspace = true } +pin-project = "1" rand = { workspace = true } serde = { workspace = true } -tracing = { workspace = true } -lazy_static = { workspace = true } -pin-project = "1" -cbor4ii = "0.3" -libp2p = { workspace = true, features = ["tokio"] } tokio = { workspace = true } +tracing = { workspace = true } [lints] workspace = true diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index ca0045dcb0..7fcf2da216 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -24,7 +24,7 @@ use super::{behaviours::dht::store::ValidatedStore, cbor, NetworkEventInternal}; /// - direct messaging /// - p2p broadcast /// - connection management -#[derive(NetworkBehaviour, custom_debug::Debug)] +#[derive(NetworkBehaviour, derive_more::Debug)] #[behaviour(to_swarm = "NetworkEventInternal")] pub struct NetworkDef { /// purpose: broadcasting messages to many peers diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 7bf55ccc86..cc5aed5a70 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -58,7 +58,7 @@ pub use self::{ }; use super::{ behaviours::dht::{ - bootstrap::{self, DHTBootstrapTask, InputEvent}, + bootstrap::{DHTBootstrapTask, InputEvent}, store::ValidatedStore, }, cbor::Cbor, @@ -81,17 +81,13 @@ pub const ESTABLISHED_LIMIT: NonZeroU32 = pub const ESTABLISHED_LIMIT_UNWR: u32 = 10; /// Network definition -#[derive(custom_debug::Debug)] +#[derive(derive_more::Debug)] pub struct NetworkNode { - /// The keypair for the node - keypair: Keypair, /// peer id of network node peer_id: PeerId, /// the swarm of networkbehaviours #[debug(skip)] swarm: Swarm>, - /// the configuration parameters of the netework - config: NetworkNodeConfig, /// the listener id we are listening on, if it exists listener_id: Option, /// Handler for direct messages @@ -100,8 +96,6 @@ pub struct NetworkNode { dht_handler: DHTBehaviour, /// Channel to resend requests, set to Some when we call `spawn_listeners` resend_tx: Option>, - /// Send to the bootstrap task to tell it to start a bootstrap - bootstrap_tx: Option>, } impl NetworkNode { @@ -316,10 +310,8 @@ impl NetworkNode { } Ok(Self { - keypair, peer_id, swarm, - config: config.clone(), listener_id: None, direct_message_state: DMBehaviour::default(), dht_handler: DHTBehaviour::new( @@ -329,7 +321,6 @@ impl NetworkNode { .unwrap_or(NonZeroUsize::new(4).unwrap()), ), resend_tx: None, - bootstrap_tx: None, }) } diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 2dfaaa3639..d459ae0217 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -16,7 +16,7 @@ use super::MAX_GOSSIP_MSG_SIZE; pub const DEFAULT_REPLICATION_FACTOR: Option = NonZeroUsize::new(10); /// describe the configuration of the network -#[derive(Clone, Default, derive_builder::Builder, custom_debug::Debug)] +#[derive(Clone, Default, derive_builder::Builder, derive_more::Debug)] pub struct NetworkNodeConfig { /// The keypair for the node #[builder(setter(into, strip_option), default)] diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 84a5909c87..4fd4ee4c11 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -5,11 +5,11 @@ edition = { workspace = true } description = "Macros for hotshot tests" [dependencies] +derive_builder = "0.20" +proc-macro2 = "1" # proc macro stuff quote = "1" syn = { version = "2", features = ["full", "extra-traits"] } -proc-macro2 = "1" -derive_builder = "0.20" [lib] proc-macro = true diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 3d929fd5ca..a80dcba85a 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -4,21 +4,22 @@ version = { workspace = true } edition = { workspace = true } [dependencies] +anyhow = { workspace = true } async-lock = { workspace = true } -clap.workspace = true -futures = { workspace = true } -libp2p = { workspace = true } blake3 = { workspace = true } +clap = { workspace = true } +csv = "1" +futures = { workspace = true } hotshot-types = { path = "../types" } -tide-disco = { workspace = true } -surf-disco = { workspace = true } -tracing = { workspace = true } +libp2p-identity = { workspace = true } +multiaddr = { workspace = true } serde = { workspace = true } +surf-disco = { workspace = true } +tide-disco = { workspace = true } +tokio = { workspace = true } toml = { workspace = true } -csv = "1" +tracing = { workspace = true } vbs = { workspace = true } -anyhow.workspace = true -tokio = { workspace = true } [lints] workspace = true diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 53b097428a..800aed49cd 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -13,7 +13,8 @@ use hotshot_types::{ traits::signature_key::SignatureKey, PeerConfig, ValidatorConfig, }; -use libp2p::{Multiaddr, PeerId}; +use libp2p_identity::PeerId; +use multiaddr::Multiaddr; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; use tokio::time::sleep; diff --git a/orchestrator/src/lib.rs b/orchestrator/src/lib.rs index 4126961222..bafdccbe2c 100644 --- a/orchestrator/src/lib.rs +++ b/orchestrator/src/lib.rs @@ -26,13 +26,11 @@ use hotshot_types::{ traits::signature_key::{SignatureKey, StakeTableEntryType}, PeerConfig, }; -use libp2p::{ - identity::{ - ed25519::{Keypair as EdKeypair, SecretKey}, - Keypair, - }, - Multiaddr, PeerId, +use libp2p_identity::{ + ed25519::{Keypair as EdKeypair, SecretKey}, + Keypair, PeerId, }; +use multiaddr::Multiaddr; use surf_disco::Url; use tide_disco::{ api::ApiError, diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 0cb7e9e04c..6deb4f96ec 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -17,29 +17,28 @@ async-broadcast = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } -cdn-proto = { workspace = true } chrono = { workspace = true } committable = { workspace = true } either = { workspace = true } futures = { workspace = true } +hotshot-builder-api = { path = "../builder-api" } hotshot-task = { path = "../task" } hotshot-types = { path = "../types" } -hotshot-builder-api = { path = "../builder-api" } jf-vid = { workspace = true } -lru.workspace = true +lru = { workspace = true } rand = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } -thiserror = { workspace = true } surf-disco = { workspace = true } tagged-base64 = { workspace = true } +thiserror = { workspace = true } time = { workspace = true } +tokio = { workspace = true } tracing = { workspace = true } url = { workspace = true } utils = { path = "../utils" } vbs = { workspace = true } vec1 = { workspace = true } -tokio = { workspace = true } [lints] workspace = true diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index c8f00d3c15..ce609032db 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -476,7 +476,7 @@ impl ViewSyncTaskState { let leader = self.membership.leader(view_number, self.cur_epoch)?; tracing::warn!( %leader, - leader_mnemonic = cdn_proto::util::mnemonic(&leader), + leader_mnemonic = hotshot_types::utils::mnemonic(&leader), view_number = *view_number, num_timeouts_tracked = self.num_timeouts_tracked, "view timed out", diff --git a/task/Cargo.toml b/task/Cargo.toml index bbd1f7e0cc..4b6fed92e6 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -3,21 +3,20 @@ authors = { workspace = true } name = "hotshot-task" version = { workspace = true } edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -futures = { workspace = true } async-broadcast = { workspace = true } -tracing = { workspace = true } async-trait = { workspace = true } -utils = { path = "../utils" } +futures = { workspace = true } tokio = { workspace = true, features = [ "time", "rt-multi-thread", "macros", "sync", ] } +tracing = { workspace = true } +utils = { path = "../utils" } [lints] workspace = true diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 5f2b168864..36870d8fc2 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -15,38 +15,37 @@ test-srs = ["jf-vid/test-srs"] broken_3_chain_fixed = [] [dependencies] -automod = "1.0.14" anyhow = { workspace = true } async-broadcast = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } +automod = "1.0.14" bitvec = { workspace = true } committable = { workspace = true } either = { workspace = true } -ethereum-types = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot", features = ["hotshot-testing"] } +hotshot-builder-api = { path = "../builder-api" } hotshot-example-types = { path = "../example-types" } +hotshot-fakeapi = { path = "../fakeapi" } hotshot-macros = { path = "../macros" } hotshot-task = { path = "../task" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } -hotshot-fakeapi = { path = "../fakeapi" } hotshot-types = { path = "../types" } -hotshot-builder-api = { path = "../builder-api" } +itertools = "0.13.0" jf-vid = { workspace = true } +lru = { workspace = true } portpicker = { workspace = true } +primitive-types = { workspace = true } rand = { workspace = true } +reqwest = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } +tagged-base64 = { workspace = true } thiserror = { workspace = true } tide-disco = { workspace = true } +tokio = { workspace = true } tracing = { workspace = true } +url = { workspace = true } vbs = { workspace = true } -lru.workspace = true -tagged-base64.workspace = true vec1 = { workspace = true } -reqwest = { workspace = true } -url = { workspace = true } -itertools = "0.13.0" -tokio = { workspace = true } - diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 49f28e2504..d27c5fe165 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -10,7 +10,6 @@ use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; use async_broadcast::{Receiver, Sender}; use bitvec::bitvec; use committable::Committable; -use ethereum_types::U256; use hotshot::{ traits::{NodeImplementation, TestableNodeImplementation}, types::{SignatureKey, SystemContextHandle}, @@ -43,6 +42,7 @@ use hotshot_types::{ ValidatorConfig, }; use jf_vid::VidScheme; +use primitive_types::U256; use serde::Serialize; use crate::{test_builder::TestDescription, test_launcher::TestLauncher}; diff --git a/types/Cargo.toml b/types/Cargo.toml index c15d4865a3..e99de2e53d 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -18,48 +18,46 @@ async-trait = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } blake3 = { workspace = true } +clap = { workspace = true } committable = { workspace = true } -custom_debug = { workspace = true } +derive_more = { workspace = true, features = ["debug"] } digest = { workspace = true, features = ["rand_core"] } +displaydoc = { version = "0.2.5", default-features = false } +dyn-clone = "1.0.17" either = { workspace = true } -ethereum-types = { workspace = true } -futures = { workspace = true } -cdn-proto = { workspace = true } -lazy_static = { workspace = true } -memoize = { workspace = true } -rand = { workspace = true } -sha2 = { workspace = true } -thiserror = { workspace = true } -time = { workspace = true } -tracing = { workspace = true } -typenum = { workspace = true } -derivative = "2" -jf-vid = { workspace = true } +futures = { workspace = true, features = ["alloc"] } jf-pcs = { workspace = true } jf-signature = { workspace = true, features = ["bls", "schnorr"] } jf-utils = { workspace = true } +jf-vid = { workspace = true } +lazy_static = { workspace = true } +libp2p-identity = { workspace = true } +memoize = { workspace = true } +mnemonic = "1" +multiaddr = { workspace = true } +primitive-types = { workspace = true } +rand = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } serde-inline-default = { workspace = true } serde_bytes = { workspace = true } +serde_json = { workspace = true } +sha2 = { workspace = true } tagged-base64 = { workspace = true } -vbs = { workspace = true } -displaydoc = { version = "0.2.5", default-features = false } -dyn-clone = "1.0.17" +thiserror = { workspace = true } +time = { workspace = true } +tokio = { workspace = true } +toml = { workspace = true } +tracing = { workspace = true } +typenum = { workspace = true } url = { workspace = true } utils = { path = "../utils" } +vbs = { workspace = true } vec1 = { workspace = true } -libp2p = { workspace = true } -serde_json = { workspace = true } -surf-disco = { workspace = true } -toml = { workspace = true } -clap = { workspace = true } -tokio = { workspace = true } [features] gpu-vid = ["jf-vid/gpu-vid"] test-srs = ["jf-vid/test-srs"] - [lints] workspace = true diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a27220a7ed..2fe7d72292 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -271,7 +271,7 @@ impl HotShotActionViews { /// A reference to the consensus algorithm /// /// This will contain the state of all rounds. -#[derive(custom_debug::Debug, Clone)] +#[derive(derive_more::Debug, Clone)] pub struct Consensus { /// The validated states that are currently loaded in memory. validated_state_map: BTreeMap>, diff --git a/types/src/data.rs b/types/src/data.rs index b97c84a62c..16cd7f5b3f 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -20,7 +20,6 @@ use std::{ use async_lock::RwLock; use bincode::Options; use committable::{Commitment, CommitmentBoundsArkless, Committable, RawCommitmentBuilder}; -use derivative::Derivative; use jf_vid::{precomputable::Precomputable, VidDisperse as JfVidDisperse, VidScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; @@ -136,7 +135,7 @@ impl Committable for EpochNumber { impl_u64_wrapper!(EpochNumber); /// A proposal to start providing data availability for a block. -#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound = "TYPES: NodeType")] pub struct DaProposal { /// Encoded transactions in the block to be applied. @@ -148,7 +147,7 @@ pub struct DaProposal { } /// A proposal to upgrade the network -#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound = "TYPES: NodeType")] pub struct UpgradeProposal where @@ -233,7 +232,7 @@ impl VidDisperse { /// Helper type to encapsulate the various ways that proposal certificates can be captured and /// stored. -#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound(deserialize = ""))] pub enum ViewChangeEvidence { /// Holds a timeout certificate. @@ -351,7 +350,7 @@ impl VidDisperseShare { } /// Proposal to append a block. -#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound(deserialize = ""))] pub struct QuorumProposal { /// The block header to append @@ -374,7 +373,7 @@ pub struct QuorumProposal { } /// Proposal to append a block. -#[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound(deserialize = ""))] pub struct QuorumProposal2 { /// The block header to append @@ -509,7 +508,7 @@ pub trait TestableLeaf { /// This is the consensus-internal analogous concept to a block, and it contains the block proper, /// as well as the hash of its parent `Leaf`. /// NOTE: `State` is constrained to implementing `BlockContents`, is `TypeMap::BlockPayload` -#[derive(Serialize, Deserialize, Clone, Debug, Derivative, Eq)] +#[derive(Serialize, Deserialize, Clone, Debug, Eq)] #[serde(bound(deserialize = ""))] pub struct Leaf { /// CurView from leader when proposing leaf @@ -536,7 +535,7 @@ pub struct Leaf { /// This is the consensus-internal analogous concept to a block, and it contains the block proper, /// as well as the hash of its parent `Leaf`. -#[derive(Serialize, Deserialize, Clone, Debug, Derivative, Eq)] +#[derive(Serialize, Deserialize, Clone, Debug, Eq)] #[serde(bound(deserialize = ""))] pub struct Leaf2 { /// CurView from leader when proposing leaf diff --git a/types/src/hotshot_config_file.rs b/types/src/hotshot_config_file.rs index 9a285ae6fc..53633d5d7a 100644 --- a/types/src/hotshot_config_file.rs +++ b/types/src/hotshot_config_file.rs @@ -6,7 +6,7 @@ use std::{num::NonZeroUsize, time::Duration}; -use surf_disco::Url; +use url::Url; use vec1::Vec1; use crate::{ diff --git a/types/src/lib.rs b/types/src/lib.rs index f158fcf3ae..5523b56b28 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -159,7 +159,7 @@ impl Default for PeerConfig { } /// Holds configuration for a `HotShot` -#[derive(Clone, custom_debug::Debug, serde::Serialize, serde::Deserialize)] +#[derive(Clone, derive_more::Debug, serde::Serialize, serde::Deserialize)] #[serde(bound(deserialize = ""))] pub struct HotShotConfig { /// The proportion of nodes required before the orchestrator issues the ready signal, diff --git a/types/src/light_client.rs b/types/src/light_client.rs index 07644df0ef..7bafaee784 100644 --- a/types/src/light_client.rs +++ b/types/src/light_client.rs @@ -11,8 +11,8 @@ use std::collections::HashMap; use ark_ed_on_bn254::EdwardsConfig as Config; use ark_ff::PrimeField; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ethereum_types::U256; use jf_signature::schnorr; +use primitive_types::U256; use rand::SeedableRng; use rand_chacha::ChaCha20Rng; use serde::{Deserialize, Serialize}; diff --git a/types/src/message.rs b/types/src/message.rs index f57b41ec0d..00ed7d9ac5 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -16,9 +16,7 @@ use std::{ }; use async_lock::RwLock; -use cdn_proto::util::mnemonic; use committable::Committable; -use derivative::Derivative; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use utils::anytrace::*; use vbs::{ @@ -45,11 +43,12 @@ use crate::{ node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, + utils::mnemonic, vote::HasViewNumber, }; /// Incoming message -#[derive(Serialize, Deserialize, Clone, Derivative, PartialEq, Eq, Hash)] +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] pub struct Message { /// The sender of this message @@ -296,7 +295,7 @@ impl SequencingMessage { } } -#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] #[allow(clippy::large_enum_variant)] /// TODO: Put `DataResponse` content in a `Box` to make enum smaller diff --git a/types/src/network.rs b/types/src/network.rs index 0927d59b93..bb9e818d5a 100644 --- a/types/src/network.rs +++ b/types/src/network.rs @@ -7,7 +7,8 @@ use std::{fs, ops::Range, path::Path, time::Duration, vec}; use clap::ValueEnum; -use libp2p::{Multiaddr, PeerId}; +use libp2p_identity::PeerId; +use multiaddr::Multiaddr; use serde_inline_default::serde_inline_default; use thiserror::Error; use tracing::error; diff --git a/types/src/qc.rs b/types/src/qc.rs index 2f556159a2..cf86646104 100644 --- a/types/src/qc.rs +++ b/types/src/qc.rs @@ -17,8 +17,8 @@ use ark_std::{ }; use bitvec::prelude::*; use digest::generic_array::GenericArray; -use ethereum_types::U256; use jf_signature::{AggregateableSignatureSchemes, SignatureError}; +use primitive_types::U256; use serde::{Deserialize, Serialize}; use typenum::U32; diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index 43110ed8b2..2ad86804ef 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -9,11 +9,11 @@ use ark_serialize::SerializationError; use bitvec::{slice::BitSlice, vec::BitVec}; use digest::generic_array::GenericArray; -use ethereum_types::U256; use jf_signature::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, KeyPair, SignKey, VerKey}, SignatureError, SignatureScheme, }; +use primitive_types::U256; use rand::SeedableRng; use rand_chacha::ChaCha20Rng; use tracing::instrument; diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 9c75b294a3..17fd4aa1c2 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -15,7 +15,7 @@ use std::{ use async_lock::RwLock; use committable::{Commitment, Committable}; -use ethereum_types::U256; +use primitive_types::U256; use serde::{Deserialize, Serialize}; use utils::anytrace::*; diff --git a/types/src/stake_table.rs b/types/src/stake_table.rs index eddfd1caef..106894c4ac 100644 --- a/types/src/stake_table.rs +++ b/types/src/stake_table.rs @@ -6,7 +6,7 @@ //! Types and structs related to the stake table -use ethereum_types::U256; +use primitive_types::U256; use serde::{Deserialize, Serialize}; use crate::traits::signature_key::{SignatureKey, StakeTableEntryType}; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 750ff4d4ec..5d6670407f 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -18,7 +18,6 @@ use std::{ }; use async_trait::async_trait; -use derivative::Derivative; use dyn_clone::DynClone; use futures::{future::join_all, Future}; use rand::{ @@ -121,7 +120,7 @@ pub trait ViewMessage { } /// A request for some data that the consensus layer is asking for. -#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] pub struct DataRequest { /// Request @@ -134,7 +133,7 @@ pub struct DataRequest { } /// Underlying data request -#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] pub enum RequestKind { /// Request VID data by our key and the VID commitment Vid(TYPES::View, TYPES::SignatureKey), @@ -146,7 +145,7 @@ pub enum RequestKind { /// A response for a request. `SequencingMessage` is the same as other network messages /// The kind of message `M` is is determined by what we requested -#[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] #[allow(clippy::large_enum_variant)] /// TODO: Put `Found` content in a `Box` to make enum smaller diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index 93edb4e1ba..aaa205a549 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -17,8 +17,8 @@ use std::{ use ark_serialize::SerializationError; use bitvec::prelude::*; use committable::Committable; -use ethereum_types::U256; use jf_vid::VidScheme; +use primitive_types::U256; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use tagged_base64::{TaggedBase64, Tb64Error}; diff --git a/types/src/utils.rs b/types/src/utils.rs index aacbafa652..c4a4dfb927 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -6,7 +6,11 @@ //! Utility functions, type aliases, helper structs and enum definitions. -use std::{ops::Deref, sync::Arc}; +use std::{ + hash::{Hash, Hasher}, + ops::Deref, + sync::Arc, +}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::{ @@ -222,3 +226,11 @@ pub fn epoch_from_block_number(block_number: u64, epoch_height: u64) -> u64 { block_number / epoch_height + 1 } } + +/// A function for generating a cute little user mnemonic from a hash +#[must_use] +pub fn mnemonic(bytes: H) -> String { + let mut state = std::collections::hash_map::DefaultHasher::new(); + bytes.hash(&mut state); + mnemonic::to_string(state.finish().to_le_bytes()) +} diff --git a/types/src/vote.rs b/types/src/vote.rs index 1275e172e0..6291599004 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -14,7 +14,7 @@ use std::{ use bitvec::{bitvec, vec::BitVec}; use committable::{Commitment, Committable}; use either::Either; -use ethereum_types::U256; +use primitive_types::U256; use tracing::error; use utils::anytrace::Result; From 8c30e15dc40c0c136d9746ef141f8ecbea5509ad Mon Sep 17 00:00:00 2001 From: Noisy <125606576+donatik27@users.noreply.github.com> Date: Thu, 21 Nov 2024 19:58:58 +0100 Subject: [PATCH 1314/1393] Fix documentation typos and improve clarity (#3894) * Update mod.rs * Update def.rs * Update mod.rs --- hotshot/src/tasks/mod.rs | 2 +- libp2p-networking/src/network/def.rs | 4 ++-- task-impls/src/quorum_vote/mod.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index afe72497eb..b19ba62106 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -94,7 +94,7 @@ pub fn add_response_task, V: Versi )); } -/// Add a task which updates our queue lenght metric at a set interval +/// Add a task which updates our queue length metric at a set interval pub fn add_queue_len_task, V: Versions>( handle: &mut SystemContextHandle, ) { diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 7fcf2da216..a1f119a528 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -20,7 +20,7 @@ use tracing::{debug, error}; use super::{behaviours::dht::store::ValidatedStore, cbor, NetworkEventInternal}; /// Overarching network behaviour performing: -/// - network topology discovoery +/// - network topology discovery /// - direct messaging /// - p2p broadcast /// - connection management @@ -78,7 +78,7 @@ impl NetworkDef { /// Add an address pub fn add_address(&mut self, peer_id: &PeerId, address: Multiaddr) { // NOTE to get this address to play nice with the other - // behaviours using the DHT for ouring + // behaviours using the DHT for routing // we only need to add this address to the DHT since it // is always enabled. If it were not always enabled, // we would need to manually add the address to diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 5f0b700aaa..20e851fa14 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -49,7 +49,7 @@ mod handlers; /// Vote dependency types. #[derive(Debug, PartialEq)] enum VoteDependency { - /// For the `QuroumProposalValidated` event after validating `QuorumProposalRecv`. + /// For the `QuorumProposalValidated` event after validating `QuorumProposalRecv`. QuorumProposal, /// For the `DaCertificateRecv` event. Dac, From 5d464c244aa2338420561d2af3c5b793e205ac01 Mon Sep 17 00:00:00 2001 From: Mathis Date: Mon, 25 Nov 2024 11:17:33 +0100 Subject: [PATCH 1315/1393] CI: add typos job (#3916) * CI: add typos job - Add typos to flake .nix. - Add typos config file: ignore files with false hits. There are mostly fixes to comments but a few small changes to rust code as well. Theres should be no functional changes or changes to public interfaces. * rustls: 0.23.16 -> 0.23.18, fix cargo audit --- .../src/auction_results_provider_types.rs | 2 +- example-types/src/block_types.rs | 2 +- example-types/src/storage_types.rs | 4 ++-- examples/infra/mod.rs | 2 +- hotshot/src/lib.rs | 22 +++++++++---------- .../src/traits/networking/libp2p_network.rs | 4 ++-- hotshot/src/types/handle.rs | 4 ++-- .../src/network/behaviours/dht/mod.rs | 2 +- libp2p-networking/src/network/def.rs | 2 +- libp2p-networking/src/network/node.rs | 2 +- orchestrator/src/client.rs | 2 +- task-impls/src/consensus/handlers.rs | 2 +- task-impls/src/network.rs | 2 +- task-impls/src/quorum_proposal/handlers.rs | 6 ++--- task-impls/src/quorum_proposal/mod.rs | 2 +- task-impls/src/request.rs | 2 +- task-impls/src/view_sync.rs | 2 +- task-impls/src/vote_collection.rs | 2 +- task/src/dependency_task.rs | 2 +- testing/src/block_builder/random.rs | 2 +- testing/src/byzantine/mod.rs | 2 +- testing/src/spinning_task.rs | 6 ++--- testing/src/test_runner.rs | 4 ++-- testing/src/view_generator.rs | 2 +- .../tests/tests_1/vote_dependency_handle.rs | 2 +- types/src/consensus.rs | 8 +++---- types/src/data.rs | 4 ++-- types/src/event.rs | 2 +- types/src/network.rs | 2 +- types/src/request_response.rs | 2 +- types/src/traits/metrics.rs | 4 ++-- types/src/traits/node_implementation.rs | 2 +- types/src/traits/storage.rs | 4 ++-- 33 files changed, 57 insertions(+), 57 deletions(-) diff --git a/example-types/src/auction_results_provider_types.rs b/example-types/src/auction_results_provider_types.rs index 01978c1359..423f3b879c 100644 --- a/example-types/src/auction_results_provider_types.rs +++ b/example-types/src/auction_results_provider_types.rs @@ -29,7 +29,7 @@ impl HasUrls for TestAuctionResult { /// The test auction results type is used to mimic the results from the Solver. #[derive(Clone, Debug, Default)] pub struct TestAuctionResultsProvider { - /// We intentionally allow for the results to be pre-cooked for the unit test to gurantee a + /// We intentionally allow for the results to be pre-cooked for the unit test to guarantee a /// particular outcome is met. pub solver_results: TYPES::AuctionResult, diff --git a/example-types/src/block_types.rs b/example-types/src/block_types.rs index b7e763546d..3d42344bc2 100644 --- a/example-types/src/block_types.rs +++ b/example-types/src/block_types.rs @@ -262,7 +262,7 @@ pub struct TestBlockHeader { pub payload_commitment: VidCommitment, /// Fast commitment for builder verification pub builder_commitment: BuilderCommitment, - /// block metdata + /// block metadata pub metadata: TestMetadata, /// Timestamp when this header was created. pub timestamp: u64, diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index be94cec84d..acedb42007 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -226,7 +226,7 @@ impl Storage for TestStorage { } async fn update_undecided_state( &self, - _leafs: CommitmentMap>, + _leaves: CommitmentMap>, _state: BTreeMap>, ) -> Result<()> { if self.should_return_err { @@ -237,7 +237,7 @@ impl Storage for TestStorage { } async fn update_undecided_state2( &self, - _leafs: CommitmentMap>, + _leaves: CommitmentMap>, _state: BTreeMap>, ) -> Result<()> { if self.should_return_err { diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 9e031c02fa..3624d44ed5 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -556,7 +556,7 @@ pub trait RunDa< let total_time_elapsed = start.elapsed(); // in seconds println!("[{node_index}]: {rounds} rounds completed in {total_time_elapsed:?} - Total transactions sent: {total_transactions_sent} - Total transactions committed: {total_transactions_committed} - Total commitments: {num_successful_commits}"); if total_transactions_committed != 0 { - // prevent devision by 0 + // prevent division by 0 let total_time_elapsed_sec = std::cmp::max(total_time_elapsed.as_secs(), 1u64); // extra 8 bytes for timestamp let throughput_bytes_per_sec = total_transactions_committed diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 9f6d8591e5..3441cd93c8 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -228,7 +228,7 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext { /// If it's given, we'll use it to construct the `SystemContext`. state_delta: Option>::Delta>>, - /// Starting view number that should be equivelant to the view the node shut down with last. + /// Starting view number that should be equivalent to the view the node shut down with last. start_view: TYPES::View, - /// Starting epoch number that should be equivelant to the epoch the node shut down with last. + /// Starting epoch number that should be equivalent to the epoch the node shut down with last. start_epoch: TYPES::Epoch, /// The view we last performed an action in. An action is Proposing or voting for /// Either the quorum or DA. @@ -1003,9 +1003,9 @@ pub struct HotShotInitializer { high_qc: QuorumCertificate2, /// Previously decided upgrade certificate; this is necessary if an upgrade has happened and we are not restarting with the new version decided_upgrade_certificate: Option>, - /// Undecided leafs that were seen, but not yet decided on. These allow a restarting node + /// Undecided leaves that were seen, but not yet decided on. These allow a restarting node /// to vote and propose right away if they didn't miss anything while down. - undecided_leafs: Vec>, + undecided_leaves: Vec>, /// Not yet decided state undecided_state: BTreeMap>, /// Proposals we have sent out to provide to others for catchup @@ -1036,7 +1036,7 @@ impl HotShotInitializer { saved_proposals: BTreeMap::new(), high_qc, decided_upgrade_certificate: None, - undecided_leafs: Vec::new(), + undecided_leaves: Vec::new(), undecided_state: BTreeMap::new(), instance_state, }) @@ -1060,7 +1060,7 @@ impl HotShotInitializer { saved_proposals: BTreeMap>>, high_qc: QuorumCertificate2, decided_upgrade_certificate: Option>, - undecided_leafs: Vec>, + undecided_leaves: Vec>, undecided_state: BTreeMap>, ) -> Self { Self { @@ -1074,7 +1074,7 @@ impl HotShotInitializer { saved_proposals, high_qc, decided_upgrade_certificate, - undecided_leafs, + undecided_leaves, undecided_state, } } diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 0713429e66..0f1fbfc858 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -968,14 +968,14 @@ impl ConnectedNetwork for Libp2pNetwork { /// The libp2p view update is a special operation intrinsic to its internal behavior. /// - /// Libp2p needs to do a lookup because a libp2p address is not releated to + /// Libp2p needs to do a lookup because a libp2p address is not related to /// hotshot keys. So in libp2p we store a mapping of HotShot key to libp2p address /// in a distributed hash table. /// /// This means to directly message someone on libp2p we need to lookup in the hash /// table what their libp2p address is, using their HotShot public key as the key. /// - /// So the logic with libp2p is to prefetch upcomming leaders libp2p address to + /// So the logic with libp2p is to prefetch upcoming leaders libp2p address to /// save time when we later need to direct message the leader our vote. Hence the /// use of the future view and leader to queue the lookups. async fn update_view<'a, TYPES>(&'a self, view: u64, epoch: u64, membership: &TYPES::Membership) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 19fcb97f1d..4719b0cde2 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -94,7 +94,7 @@ impl + 'static, V: Versions> self.output_event_stream.1.activate_cloned() } - /// Message other participents with a serialized message from the application + /// Message other participants with a serialized message from the application /// Receivers of this message will get an `Event::ExternalMessageReceived` via /// the event stream. /// @@ -197,7 +197,7 @@ impl + 'static, V: Versions> if commit == leaf_commitment { return Ok(quorum_proposal.clone()); } - tracing::warn!("Proposal receied from request has different commitment than expected.\nExpected = {:?}\nReceived{:?}", leaf_commitment, commit); + tracing::warn!("Proposal received from request has different commitment than expected.\nExpected = {:?}\nReceived{:?}", leaf_commitment, commit); } } }) diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index f6923dbcb7..3e1bc95673 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -394,7 +394,7 @@ impl DHTBehaviour { } } - /// Send that the bootsrap suceeded + /// Send that the bootstrap succeeded fn finish_bootstrap(&mut self) { if let Some(mut tx) = self.bootstrap_tx.clone() { spawn(async move { tx.send(bootstrap::InputEvent::BootstrapFinished).await }); diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index a1f119a528..3f5189e3ac 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -47,7 +47,7 @@ pub struct NetworkDef { #[debug(skip)] pub direct_message: cbor::Behaviour, Vec>, - /// Auto NAT behaviour to determine if we are publically reachable and + /// Auto NAT behaviour to determine if we are publicly reachable and /// by which address #[debug(skip)] pub autonat: libp2p::autonat::Behaviour, diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index cc5aed5a70..05bacb310d 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -355,7 +355,7 @@ impl NetworkNode { } /// event handler for client events - /// currectly supported actions include + /// currently supported actions include /// - shutting down the swarm /// - gossipping a message to known peers on the `global` topic /// - returning the id of the current peer diff --git a/orchestrator/src/client.rs b/orchestrator/src/client.rs index 800aed49cd..de167ff505 100644 --- a/orchestrator/src/client.rs +++ b/orchestrator/src/client.rs @@ -103,7 +103,7 @@ pub struct BenchResultsDownloadConfig { // Results starting here /// Whether the results are partially collected /// "One" when the results are collected for one node - /// "Half" when the results are collecte for half running nodes if not all nodes terminate successfully + /// "Half" when the results are collective for half running nodes if not all nodes terminate successfully /// "Full" if the results are successfully collected from all nodes pub partial_results: String, /// The average latency of the transactions diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 2de541262b..8c60392cf5 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -127,7 +127,7 @@ pub async fn send_high_qc= V::Epochs::VERSION, - debug!("HotStuff 2 updgrade not yet in effect") + debug!("HotStuff 2 upgrade not yet in effect") ); let high_qc = task_state.consensus.read().await.high_qc().clone(); let leader = task_state diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a4c972b95f..bf23787c05 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -332,7 +332,7 @@ impl< return Err(()); } // If the action was view sync record it as a vote, but we don't - // want to limit to 1 View sycn vote above so change the action here. + // want to limit to 1 View sync vote above so change the action here. if matches!(action, HotShotAction::ViewSyncVote) { action = HotShotAction::Vote; } diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 94f2b12207..f8d0d93d75 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -55,7 +55,7 @@ pub(crate) enum ProposalDependency { /// For the `Qc2Formed` event timeout branch. TimeoutCert, - /// For the `QuroumProposalRecv` event. + /// For the `QuorumProposalRecv` event. Proposal, /// For the `VidShareValidated` event. @@ -110,7 +110,7 @@ pub struct ProposalDependencyHandle { /// The time this view started pub view_start_time: Instant, - /// The higest_qc we've seen at the start of this task + /// The highest_qc we've seen at the start of this task pub highest_qc: QuorumCertificate2, } @@ -137,7 +137,7 @@ impl ProposalDependencyHandle { None } /// Waits for the ocnfigured timeout for nodes to send HighQc messages to us. We'll - /// then propose with the higest QC from among these proposals. + /// then propose with the highest QC from among these proposals. async fn wait_for_highest_qc(&mut self) { tracing::error!("waiting for QC"); // If we haven't upgraded to Hotstuff 2 just return the high qc right away diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index cf06c620c6..f7e50806d0 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -82,7 +82,7 @@ pub struct QuorumProposalTaskState /// Number of blocks in an epoch, zero means there are no epochs pub epoch_height: u64, - /// The higest_qc we've seen at the start of this task + /// The highest_qc we've seen at the start of this task pub highest_qc: QuorumCertificate2, } diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 1a565a85de..6e9384f3ea 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -242,7 +242,7 @@ impl> NetworkRequestState ViewSyncReplicaTaskState { HotShotEvent::ViewSyncTrigger(view_number) => { let view_number = *view_number; if self.next_view != TYPES::View::new(*view_number) { - tracing::error!("Unexpected view number to triger view sync"); + tracing::error!("Unexpected view number to trigger view sync"); return None; } diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 6023f5cdfb..b58f74dd7e 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -186,7 +186,7 @@ pub struct AccumulatorInfo { /// Generic function for spawning a vote task. Returns the event stream id of the spawned task if created /// /// # Errors -/// If we faile to create the accumulator +/// If we failed to create the accumulator /// /// # Panics /// Calls unwrap but should never panic. diff --git a/task/src/dependency_task.rs b/task/src/dependency_task.rs index fb196151eb..b5bb23734f 100644 --- a/task/src/dependency_task.rs +++ b/task/src/dependency_task.rs @@ -20,7 +20,7 @@ pub trait HandleDepOutput: Send + Sized + Sync + 'static { /// A task that runs until it's dependency completes and it handles the result pub struct DependencyTask + Send, H: HandleDepOutput + Send> { - /// Dependency this taks waits for + /// Dependency this tasks waits for pub(crate) dep: D, /// Handles the results returned from `self.dep.completed().await` pub(crate) handle: H, diff --git a/testing/src/block_builder/random.rs b/testing/src/block_builder/random.rs index 5f14578a5e..b5c1fb93f7 100644 --- a/testing/src/block_builder/random.rs +++ b/testing/src/block_builder/random.rs @@ -242,7 +242,7 @@ where { /// Create new [`RandomBuilderSource`] #[must_use] - #[allow(clippy::missing_panics_doc)] // ony panics if 256 == 0 + #[allow(clippy::missing_panics_doc)] // only panics if 256 == 0 pub fn new(pub_key: TYPES::BuilderSignatureKey, num_nodes: Arc>) -> Self { Self { blocks: Arc::new(RwLock::new(LruCache::new(NonZeroUsize::new(256).unwrap()))), diff --git a/testing/src/byzantine/mod.rs b/testing/src/byzantine/mod.rs index 0673cd72b8..874eb6b750 100644 --- a/testing/src/byzantine/mod.rs +++ b/testing/src/byzantine/mod.rs @@ -1,2 +1,2 @@ -/// Byzantine defintions and implementations of different behaviours +/// Byzantine definitions and implementations of different behaviours pub mod byzantine_behaviour; diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index e98726ff51..4382f12ce0 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -43,7 +43,7 @@ use crate::{ test_task::{TestResult, TestTaskState}, }; -/// convience type for state and block +/// convenience type for state and block pub type StateAndBlock = (Vec, Vec); /// Spinning task state @@ -225,7 +225,7 @@ where context: LateNodeContext::Restart, }) = self.late_start.get(&node_id) else { - panic!("Restarted Nodes must have an unitialized context"); + panic!("Restarted Nodes must have an uninitialized context"); }; let storage = node.handle.storage().clone(); @@ -391,7 +391,7 @@ pub enum NodeAction { /// Take a node down to be restarted after a number of views RestartDown(u64), /// Start a node up again after it's been shutdown for restart. This - /// should only be created following a `ResartDown` + /// should only be created following a `RestartDown` RestartUp, } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 3460795db0..95f494808f 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -660,7 +660,7 @@ pub struct Node, V: Versio pub handle: SystemContextHandle, } -/// This type combines all of the paramters needed to build the context for a node that started +/// This type combines all of the parameters needed to build the context for a node that started /// late during a unit test or integration test. pub struct LateNodeContextParameters> { /// The storage trait for Sequencer persistence. @@ -669,7 +669,7 @@ pub struct LateNodeContextParameters, - /// The config associted with this node. + /// The config associated with this node. pub config: HotShotConfig, /// The marketplace config for this node. diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 53a0d74b7e..2da7829fc9 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -577,7 +577,7 @@ impl TestViewGenerator { } } - pub async fn next_from_anscestor_view(&mut self, ancestor: TestView) { + pub async fn next_from_ancestor_view(&mut self, ancestor: TestView) { if let Some(ref view) = self.current_view { self.current_view = Some(view.next_view_from_ancestor(ancestor).await) } else { diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index f22d658d34..938dbbffa8 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -30,7 +30,7 @@ async fn test_vote_dependency_handle() { hotshot::helpers::initialize_logging(); - // We use a node ID of 2 here abitrarily. We just need it to build the system handle. + // We use a node ID of 2 here arbitrarily. We just need it to build the system handle. let node_id = 2; // Construct the system handle for the node ID to build all of the state objects. let handle = build_system_handle::(node_id) diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 2fe7d72292..81250b7b5b 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -556,7 +556,7 @@ impl Consensus { } // TODO Add logic to prevent double voting. For now the simple check if // the last voted view is less than the view we are trying to vote doesn't work - // becuase the leader of view n + 1 may propose to the DA (and we would vote) + // because the leader of view n + 1 may propose to the DA (and we would vote) // before the leader of view n. return true; } @@ -624,7 +624,7 @@ impl Consensus { /// Update the validated state map with a new view_number/view combo. /// /// # Errors - /// Can return an error when the new view contains less information than the exisiting view + /// Can return an error when the new view contains less information than the existing view /// with the same view number. pub fn update_da_view( &mut self, @@ -640,7 +640,7 @@ impl Consensus { /// Update the validated state map with a new view_number/view combo. /// /// # Errors - /// Can return an error when the new view contains less information than the exisiting view + /// Can return an error when the new view contains less information than the existing view /// with the same view number. pub fn update_leaf( &mut self, @@ -664,7 +664,7 @@ impl Consensus { /// Update the validated state map with a new view_number/view combo. /// /// # Errors - /// Can return an error when the new view contains less information than the exisiting view + /// Can return an error when the new view contains less information than the existing view /// with the same view number. fn update_validated_state_map( &mut self, diff --git a/types/src/data.rs b/types/src/data.rs index 16cd7f5b3f..39a608f2f0 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -646,7 +646,7 @@ impl Leaf2 { /// Validate that a leaf has the right upgrade certificate to be the immediate child of another leaf /// - /// This may not be a complete function. Please double-check that it performs the checks you expect before subtituting validation logic with it. + /// This may not be a complete function. Please double-check that it performs the checks you expect before substituting validation logic with it. /// /// # Errors /// Returns an error if the certificates are not identical, or that when we no longer see a @@ -946,7 +946,7 @@ impl Leaf { /// Validate that a leaf has the right upgrade certificate to be the immediate child of another leaf /// - /// This may not be a complete function. Please double-check that it performs the checks you expect before subtituting validation logic with it. + /// This may not be a complete function. Please double-check that it performs the checks you expect before substituting validation logic with it. /// /// # Errors /// Returns an error if the certificates are not identical, or that when we no longer see a diff --git a/types/src/event.rs b/types/src/event.rs index 02d7f07ac2..c4ae586866 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -110,7 +110,7 @@ pub enum EventType { }, /// A new decision event was issued Decide { - /// The chain of Leafs that were committed by this decision + /// The chain of Leaves that were committed by this decision /// /// This list is sorted in reverse view number order, with the newest (highest view number) /// block first in the list. diff --git a/types/src/network.rs b/types/src/network.rs index bb9e818d5a..f2d35b6984 100644 --- a/types/src/network.rs +++ b/types/src/network.rs @@ -200,7 +200,7 @@ impl NetworkConfig { /// // NOTE: broken due to staticelectionconfig not being importable /// // cannot import staticelectionconfig from hotshot without creating circular dependency /// // making this work probably involves the `types` crate implementing a dummy - /// // electionconfigtype just ot make this example work + /// // electionconfigtype just to make this example work /// let config = NetworkConfig::::from_file(file).unwrap(); /// ``` pub fn from_file(file: String) -> Result { diff --git a/types/src/request_response.rs b/types/src/request_response.rs index 6829d19743..89e78c9c1e 100644 --- a/types/src/request_response.rs +++ b/types/src/request_response.rs @@ -18,7 +18,7 @@ pub struct ProposalRequestPayload { /// The view number that we're requesting a proposal for. pub view_number: TYPES::View, - /// Our public key. The ensures that the receipient can reply to + /// Our public key. The ensures that the recipient can reply to /// us directly. pub key: TYPES::SignatureKey, } diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs index e81d4d423f..38d9a8d6ea 100644 --- a/types/src/traits/metrics.rs +++ b/types/src/traits/metrics.rs @@ -58,7 +58,7 @@ pub trait Metrics: Send + Sync + DynClone + Debug { /// A family of related metrics, partitioned by their label values. /// /// All metrics in a family have the same name. They are distinguished by a vector of strings -/// called labels. Each label has a name and a value, and each distinct vector of lable values +/// called labels. Each label has a name and a value, and each distinct vector of label values /// within a family acts like a distinct metric. /// /// The family object is used to instantiate individual metrics within the family via the @@ -217,7 +217,7 @@ pub trait Gauge: Send + Sync + Debug + DynClone { /// Set the gauge value fn set(&self, amount: usize); - /// Update the guage value + /// Update the gauge value fn update(&self, delts: i64); } diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 32093b8714..77a4971e24 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -45,7 +45,7 @@ use crate::{ /// essentially ensures that the results returned by the [`AuctionResultsProvider`] trait includes a /// list of urls for the builders that HotShot must request from. pub trait HasUrls { - /// Returns the builer url associated with the datatype + /// Returns the builder url associated with the datatype fn urls(&self) -> Vec; } diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 4ce9fdeac4..7782ef0101 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -56,14 +56,14 @@ pub trait Storage: Send + Sync + Clone { /// and the undecided state. async fn update_undecided_state( &self, - leafs: CommitmentMap>, + leaves: CommitmentMap>, state: BTreeMap>, ) -> Result<()>; /// Update the currently undecided state of consensus. This includes the undecided leaf chain, /// and the undecided state. async fn update_undecided_state2( &self, - leafs: CommitmentMap>, + leaves: CommitmentMap>, state: BTreeMap>, ) -> Result<()>; /// Upgrade the current decided upgrade certificate in storage. From aa4f9598e25f160547aa9bf13b378631187f8a98 Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Mon, 25 Nov 2024 16:50:10 -0300 Subject: [PATCH 1316/1393] Move marketplace version in build API to 0_99 (#3920) Co-authored-by: tbro --- builder-api/src/lib.rs | 2 +- builder-api/src/{v0_3 => v0_99}/builder.rs | 0 builder-api/src/{v0_3 => v0_99}/data_source.rs | 0 builder-api/src/{v0_3 => v0_99}/mod.rs | 2 +- task-impls/src/builder.rs | 6 +++--- task-impls/src/transactions.rs | 2 +- testing/src/block_builder/mod.rs | 6 +++--- testing/src/block_builder/simple.rs | 6 +++--- 8 files changed, 12 insertions(+), 12 deletions(-) rename builder-api/src/{v0_3 => v0_99}/builder.rs (100%) rename builder-api/src/{v0_3 => v0_99}/data_source.rs (100%) rename builder-api/src/{v0_3 => v0_99}/mod.rs (64%) diff --git a/builder-api/src/lib.rs b/builder-api/src/lib.rs index d273528191..d1714c2705 100644 --- a/builder-api/src/lib.rs +++ b/builder-api/src/lib.rs @@ -10,4 +10,4 @@ pub mod v0_2 { pub use super::v0_1::*; pub type Version = vbs::version::StaticVersion<0, 2>; } -pub mod v0_3; +pub mod v0_99; diff --git a/builder-api/src/v0_3/builder.rs b/builder-api/src/v0_99/builder.rs similarity index 100% rename from builder-api/src/v0_3/builder.rs rename to builder-api/src/v0_99/builder.rs diff --git a/builder-api/src/v0_3/data_source.rs b/builder-api/src/v0_99/data_source.rs similarity index 100% rename from builder-api/src/v0_3/data_source.rs rename to builder-api/src/v0_99/data_source.rs diff --git a/builder-api/src/v0_3/mod.rs b/builder-api/src/v0_99/mod.rs similarity index 64% rename from builder-api/src/v0_3/mod.rs rename to builder-api/src/v0_99/mod.rs index b691543cb9..29691d0c26 100644 --- a/builder-api/src/v0_3/mod.rs +++ b/builder-api/src/v0_99/mod.rs @@ -3,4 +3,4 @@ pub mod data_source; /// No changes to this module pub use super::v0_1::query_data; -pub type Version = vbs::version::StaticVersion<0, 3>; +pub type Version = vbs::version::StaticVersion<0, 99>; diff --git a/task-impls/src/builder.rs b/task-impls/src/builder.rs index 330f5200d6..0372209c3f 100644 --- a/task-impls/src/builder.rs +++ b/task-impls/src/builder.rs @@ -224,8 +224,8 @@ pub mod v0_2 { } /// Version 0.3: marketplace. Bundles. -pub mod v0_3 { - pub use hotshot_builder_api::v0_3::Version; +pub mod v0_99 { + pub use hotshot_builder_api::v0_99::Version; use hotshot_types::{ bundle::Bundle, constants::MARKETPLACE_BUILDER_MODULE, traits::node_implementation::NodeType, vid::VidCommitment, @@ -235,7 +235,7 @@ pub mod v0_3 { pub use super::BuilderClientError; /// Client for builder API - pub type BuilderClient = super::BuilderClient>; + pub type BuilderClient = super::BuilderClient>; impl BuilderClient { /// Claim block diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 7018969e3f..f6da7ffec0 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -39,7 +39,7 @@ use vec1::Vec1; use crate::{ builder::{ - v0_1::BuilderClient as BuilderClientBase, v0_3::BuilderClient as BuilderClientMarketplace, + v0_1::BuilderClient as BuilderClientBase, v0_99::BuilderClient as BuilderClientMarketplace, }, events::{HotShotEvent, HotShotTaskCompleted}, helpers::broadcast_event, diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index a488b96dab..45b8f07461 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -17,7 +17,7 @@ use hotshot_builder_api::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::{Error, Options}, }, - v0_3, + v0_99, }; use hotshot_types::{ constants::{LEGACY_BUILDER_MODULE, MARKETPLACE_BUILDER_MODULE}, @@ -84,7 +84,7 @@ pub fn run_builder_source( ::State: Sync + Send + v0_1::data_source::BuilderDataSource - + v0_3::data_source::BuilderDataSource, + + v0_99::data_source::BuilderDataSource, { spawn(async move { let start_builder = |url: Url, source: Source| -> _ { @@ -92,7 +92,7 @@ pub fn run_builder_source( &Options::default(), ) .expect("Failed to construct the builder API"); - let builder_api_0_3 = hotshot_builder_api::v0_3::builder::define_api::( + let builder_api_0_3 = hotshot_builder_api::v0_99::builder::define_api::( &Options::default(), ) .expect("Failed to construct the builder API"); diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index e490d8c598..8376633afc 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -29,7 +29,7 @@ use hotshot_builder_api::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::{BuildError, Error, Options}, }, - v0_3, + v0_99, }; use hotshot_types::{ bundle::Bundle, @@ -132,7 +132,7 @@ impl ReadState for SimpleBuilderSource { } #[async_trait] -impl v0_3::data_source::BuilderDataSource for SimpleBuilderSource +impl v0_99::data_source::BuilderDataSource for SimpleBuilderSource where ::InstanceState: Default, { @@ -342,7 +342,7 @@ impl SimpleBuilderSource { >(&Options::default()) .expect("Failed to construct the builder API"); - let builder_api_0_3 = hotshot_builder_api::v0_3::builder::define_api::< + let builder_api_0_3 = hotshot_builder_api::v0_99::builder::define_api::< SimpleBuilderSource, TYPES, >(&Options::default()) From 322ee1fa0be18bbf3640ceff2e4d69fb89d9b783 Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Mon, 25 Nov 2024 16:54:17 -0300 Subject: [PATCH 1317/1393] Flatten memberships into a single type (#3867) Delete Memberships and replace functionality. Add some methods to `Membership` trait to deal w/ collapsing into one type both kinds of memberships (stake and DA). * avoid passing membership into `is_valid_cert (see #3918) * for DA, avoid proxying threshold through `Threshold` trait * remove `Topic` param from `Membership::new * Split cert impls by marker (#3891) * add membership methods to Cert trait * remove non-existent test from justfile --------- Co-authored-by: tbro --- examples/infra/mod.rs | 23 +-- hotshot/src/lib.rs | 22 +-- hotshot/src/tasks/mod.rs | 34 ++--- hotshot/src/tasks/task_state.rs | 24 ++- .../traits/election/randomized_committee.rs | 90 ++++++++++-- .../src/traits/election/static_committee.rs | 86 +++++++++-- .../static_committee_leader_two_views.rs | 85 +++++++++-- hotshot/src/types/handle.rs | 7 +- libp2p-networking/src/network/transport.rs | 20 +-- task-impls/src/consensus/handlers.rs | 16 +- task-impls/src/consensus/mod.rs | 2 +- task-impls/src/da.rs | 25 ++-- task-impls/src/helpers.rs | 19 ++- task-impls/src/network.rs | 32 ++-- task-impls/src/quorum_proposal/handlers.rs | 7 +- task-impls/src/quorum_proposal/mod.rs | 8 +- .../src/quorum_proposal_recv/handlers.rs | 6 +- task-impls/src/quorum_vote/mod.rs | 29 ++-- task-impls/src/request.rs | 12 +- task-impls/src/view_sync.rs | 18 ++- task-impls/src/vote_collection.rs | 16 +- testing/src/byzantine/byzantine_behaviour.rs | 6 +- testing/src/helpers.rs | 35 ++--- testing/src/test_builder.rs | 4 +- testing/src/test_runner.rs | 25 ++-- testing/src/view_generator.rs | 65 +++------ testing/tests/tests_1/da_task.rs | 27 ++-- testing/tests/tests_1/message.rs | 14 +- testing/tests/tests_1/network_task.rs | 18 +-- .../tests_1/quorum_proposal_recv_task.rs | 11 +- testing/tests/tests_1/quorum_proposal_task.rs | 70 +++++---- testing/tests/tests_1/quorum_vote_task.rs | 18 +-- testing/tests/tests_1/transaction_task.rs | 9 +- .../tests_1/upgrade_task_with_proposal.rs | 14 +- .../tests/tests_1/upgrade_task_with_vote.rs | 6 +- testing/tests/tests_1/vid_task.rs | 11 +- .../tests/tests_1/vote_dependency_handle.rs | 7 +- types/src/simple_certificate.rs | 137 ++++++++++++++++-- types/src/simple_vote.rs | 26 ++-- types/src/traits/election.rs | 43 +++++- types/src/vote.rs | 41 ++++-- 41 files changed, 723 insertions(+), 445 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 3624d44ed5..2462215dd9 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -30,7 +30,7 @@ use hotshot::{ BlockPayload, NodeImplementation, }, types::SystemContextHandle, - MarketplaceConfig, Memberships, SystemContext, + MarketplaceConfig, SystemContext, }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, @@ -55,7 +55,7 @@ use hotshot_types::{ traits::{ block_contents::{BlockHeader, TestableBlock}, election::Membership, - network::{ConnectedNetwork, Topic}, + network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeType, Versions}, states::TestableState, }, @@ -381,22 +381,9 @@ pub trait RunDa< let da_nodes = config.config.known_da_nodes.clone(); - // Create the quorum membership from all nodes - let quorum_membership = ::Membership::new( - all_nodes.clone(), - all_nodes.clone(), - Topic::Global, - ); - // Create the quorum membership from all nodes, specifying the committee // as the known da nodes - let da_membership = - ::Membership::new(all_nodes.clone(), da_nodes, Topic::Da); - - let memberships = Memberships { - quorum_membership: quorum_membership.clone(), - da_membership, - }; + let memberships = ::Membership::new(all_nodes, da_nodes); let marketplace_config = MarketplaceConfig { auction_results_provider: TestAuctionResultsProvider::::default().into(), @@ -544,7 +531,6 @@ pub trait RunDa< let num_eligible_leaders = context .hotshot .memberships - .quorum_membership .committee_leaders(TYPES::View::genesis(), TYPES::Epoch::genesis()) .len(); let total_num_views = usize::try_from(consensus.locked_view().u64()).unwrap(); @@ -752,7 +738,8 @@ where // Create the qurorum membership from the list of known nodes let all_nodes = config.config.known_nodes_with_stake.clone(); - let quorum_membership = TYPES::Membership::new(all_nodes.clone(), all_nodes, Topic::Global); + let da_nodes = config.config.known_da_nodes.clone(); + let quorum_membership = TYPES::Membership::new(all_nodes, da_nodes); // Derive the bind address let bind_address = diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 3441cd93c8..83d110e380 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -92,15 +92,6 @@ pub struct MarketplaceConfig> { pub fallback_builder_url: Url, } -/// Bundle of all the memberships a consensus instance uses -#[derive(Clone)] -pub struct Memberships { - /// The entire quorum - pub quorum_membership: TYPES::Membership, - /// The DA nodes - pub da_membership: TYPES::Membership, -} - /// Holds the state needed to participate in `HotShot` consensus pub struct SystemContext, V: Versions> { /// The public key of this node @@ -116,7 +107,7 @@ pub struct SystemContext, V: Versi pub network: Arc, /// Memberships used by consensus - pub memberships: Arc>, + pub memberships: Arc, /// the metrics that the implementor is using. metrics: Arc, @@ -207,7 +198,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: Memberships, + memberships: TYPES::Membership, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -260,7 +251,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: Memberships, + memberships: TYPES::Membership, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -507,7 +498,6 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext::PrivateKey, node_id: u64, config: HotShotConfig, - memberships: Memberships, + memberships: TYPES::Membership, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -767,7 +757,7 @@ where private_key: ::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: Memberships, + memberships: TYPES::Membership, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index b19ba62106..7e94aea326 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -46,7 +46,7 @@ use vbs::version::StaticVersionType; use crate::{ tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, - MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, + MarketplaceConfig, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, }; /// event for global event stream @@ -82,7 +82,7 @@ pub fn add_response_task, V: Versi ) { let state = NetworkResponseState::::new( handle.hotshot.consensus(), - handle.hotshot.memberships.quorum_membership.clone().into(), + (*handle.hotshot.memberships).clone().into(), handle.public_key().clone(), handle.private_key().clone(), handle.hotshot.id, @@ -190,15 +190,13 @@ pub fn add_network_event_task< >( handle: &mut SystemContextHandle, network: Arc, - quorum_membership: TYPES::Membership, - da_membership: TYPES::Membership, + membership: TYPES::Membership, ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, view: TYPES::View::genesis(), epoch: TYPES::Epoch::genesis(), - quorum_membership, - da_membership, + membership, storage: Arc::clone(&handle.storage()), consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), @@ -323,7 +321,7 @@ where private_key: ::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: Memberships, + memberships: TYPES::Membership, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -518,15 +516,8 @@ where /// Adds the `NetworkEventTaskState` tasks possibly modifying them as well. fn add_network_event_tasks(&self, handle: &mut SystemContextHandle) { let network = Arc::clone(&handle.network); - let quorum_membership = handle.memberships.quorum_membership.clone(); - let da_membership = handle.memberships.da_membership.clone(); - - self.add_network_event_task( - handle, - Arc::clone(&network), - quorum_membership.clone(), - da_membership, - ); + + self.add_network_event_task(handle, Arc::clone(&network), (*handle.memberships).clone()); } /// Adds a `NetworkEventTaskState` task. Can be reimplemented to modify its behaviour. @@ -534,10 +525,9 @@ where &self, handle: &mut SystemContextHandle, channel: Arc<>::Network>, - quorum_membership: TYPES::Membership, - da_membership: TYPES::Membership, + membership: TYPES::Membership, ) { - add_network_event_task(handle, channel, quorum_membership, da_membership); + add_network_event_task(handle, channel, membership); } } @@ -570,13 +560,9 @@ pub async fn add_network_message_and_request_receiver_tasks< pub fn add_network_event_tasks, V: Versions>( handle: &mut SystemContextHandle, ) { - let quorum_membership = handle.memberships.quorum_membership.clone(); - let da_membership = handle.memberships.da_membership.clone(); - add_network_event_task( handle, Arc::clone(&handle.network), - quorum_membership, - da_membership, + (*handle.memberships).clone(), ); } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 7d3d085281..3c4e81585b 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -51,7 +51,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(handle.hotshot.consensus()), view: handle.cur_view().await, delay: handle.hotshot.config.data_request_delay, - da_membership: handle.hotshot.memberships.da_membership.clone(), + membership: (*handle.hotshot.memberships).clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, @@ -71,7 +71,7 @@ impl, V: Versions> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + quorum_membership: (*handle.hotshot.memberships).clone().into(), vote_collectors: BTreeMap::default(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -92,7 +92,7 @@ impl, V: Versions> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + membership: (*handle.hotshot.memberships).clone().into(), network: Arc::clone(&handle.hotshot.network), vote_collector: None.into(), public_key: handle.public_key().clone(), @@ -121,7 +121,7 @@ impl, V: Versions> CreateTaskState cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, network: Arc::clone(&handle.hotshot.network), - membership: handle.hotshot.memberships.quorum_membership.clone().into(), + membership: (*handle.hotshot.memberships).clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, @@ -137,9 +137,8 @@ impl, V: Versions> CreateTaskState Self { consensus: OuterConsensus::new(handle.hotshot.consensus()), output_event_stream: handle.hotshot.external_event_stream.0.clone(), - da_membership: handle.hotshot.memberships.da_membership.clone().into(), + membership: (*handle.hotshot.memberships).clone().into(), network: Arc::clone(&handle.hotshot.network), - quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, vote_collectors: BTreeMap::default(), @@ -163,7 +162,7 @@ impl, V: Versions> CreateTaskState cur_view, next_view: cur_view, cur_epoch: handle.cur_epoch().await, - membership: handle.hotshot.memberships.quorum_membership.clone().into(), + membership: (*handle.hotshot.memberships).clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), num_timeouts_tracked: 0, @@ -190,7 +189,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - membership: handle.hotshot.memberships.quorum_membership.clone().into(), + membership: (*handle.hotshot.memberships).clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), @@ -231,8 +230,7 @@ impl, V: Versions> CreateTaskState latest_voted_view: handle.cur_view().await, vote_dependencies: BTreeMap::new(), network: Arc::clone(&handle.hotshot.network), - quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - da_membership: handle.hotshot.memberships.da_membership.clone().into(), + membership: (*handle.hotshot.memberships).clone().into(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, storage: Arc::clone(&handle.storage), @@ -254,7 +252,7 @@ impl, V: Versions> CreateTaskState proposal_dependencies: BTreeMap::new(), consensus: OuterConsensus::new(consensus), instance_state: handle.hotshot.instance_state(), - quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + quorum_membership: (*handle.hotshot.memberships).clone().into(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), storage: Arc::clone(&handle.storage), @@ -281,7 +279,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(consensus), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + quorum_membership: (*handle.hotshot.memberships).clone().into(), timeout: handle.hotshot.config.next_view_timeout, output_event_stream: handle.hotshot.external_event_stream.0.clone(), storage: Arc::clone(&handle.storage), @@ -305,7 +303,7 @@ impl, V: Versions> CreateTaskState private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), network: Arc::clone(&handle.hotshot.network), - quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + membership: (*handle.hotshot.memberships).clone().into(), vote_collectors: BTreeMap::default(), timeout_vote_collectors: BTreeMap::default(), cur_view: handle.cur_view().await, diff --git a/hotshot/src/traits/election/randomized_committee.rs b/hotshot/src/traits/election/randomized_committee.rs index 89a36f9030..2b721a66e0 100644 --- a/hotshot/src/traits/election/randomized_committee.rs +++ b/hotshot/src/traits/election/randomized_committee.rs @@ -9,7 +9,6 @@ use std::{cmp::max, collections::BTreeMap, num::NonZeroU64}; use hotshot_types::{ traits::{ election::Membership, - network::Topic, node_implementation::NodeType, signature_key::{SignatureKey, StakeTableEntryType}, }, @@ -32,12 +31,16 @@ pub struct RandomizedCommittee { /// The nodes on the committee and their stake stake_table: Vec<::StakeTableEntry>, + /// The nodes on the committee and their stake + da_stake_table: Vec<::StakeTableEntry>, + /// The nodes on the committee and their stake, indexed by public key indexed_stake_table: BTreeMap::StakeTableEntry>, - /// The network topic of the committee - committee_topic: Topic, + /// The nodes on the committee and their stake, indexed by public key + indexed_da_stake_table: + BTreeMap::StakeTableEntry>, } impl Membership for RandomizedCommittee { @@ -45,13 +48,12 @@ impl Membership for RandomizedCommittee { /// Create a new election fn new( - eligible_leaders: Vec::SignatureKey>>, committee_members: Vec::SignatureKey>>, - committee_topic: Topic, + da_members: Vec::SignatureKey>>, ) -> Self { // For each eligible leader, get the stake table entry let eligible_leaders: Vec<::StakeTableEntry> = - eligible_leaders + committee_members .iter() .map(|member| member.stake_table_entry.clone()) .filter(|entry| entry.stake() > U256::zero()) @@ -65,6 +67,13 @@ impl Membership for RandomizedCommittee { .filter(|entry| entry.stake() > U256::zero()) .collect(); + // For each member, get the stake table entry + let da_members: Vec<::StakeTableEntry> = da_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + // Index the stake table by public key let indexed_stake_table: BTreeMap< TYPES::SignatureKey, @@ -74,11 +83,21 @@ impl Membership for RandomizedCommittee { .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) .collect(); + // Index the stake table by public key + let indexed_da_stake_table: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = da_members + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + Self { eligible_leaders, stake_table: members, + da_stake_table: da_members, indexed_stake_table, - committee_topic, + indexed_da_stake_table, } } @@ -90,6 +109,14 @@ impl Membership for RandomizedCommittee { self.stake_table.clone() } + /// Get the stake table for the current view + fn da_stake_table( + &self, + _epoch: ::Epoch, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + self.da_stake_table.clone() + } + /// Get all members of the committee for the current view fn committee_members( &self, @@ -102,6 +129,18 @@ impl Membership for RandomizedCommittee { .collect() } + /// Get all members of the committee for the current view + fn da_committee_members( + &self, + _view_number: ::View, + _epoch: ::Epoch, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.da_stake_table + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } + /// Get all eligible leaders of the committee for the current view fn committee_leaders( &self, @@ -124,6 +163,16 @@ impl Membership for RandomizedCommittee { self.indexed_stake_table.get(pub_key).cloned() } + /// Get the stake table entry for a public key + fn da_stake( + &self, + pub_key: &::SignatureKey, + _epoch: ::Epoch, + ) -> Option<::StakeTableEntry> { + // Only return the stake if it is above zero + self.indexed_da_stake_table.get(pub_key).cloned() + } + /// Check if a node has stake in the committee fn has_stake( &self, @@ -135,11 +184,22 @@ impl Membership for RandomizedCommittee { .is_some_and(|x| x.stake() > U256::zero()) } - /// Get the network topic for the committee - fn committee_topic(&self) -> Topic { - self.committee_topic.clone() + /// Check if a node has stake in the committee + fn has_da_stake( + &self, + pub_key: &::SignatureKey, + _epoch: ::Epoch, + ) -> bool { + self.indexed_da_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) } + // /// Get the network topic for the committee + // fn committee_topic(&self) -> Topic { + // self.committee_topic.clone() + // } + /// Index the vector of public keys with the current view number fn lookup_leader( &self, @@ -161,12 +221,20 @@ impl Membership for RandomizedCommittee { fn total_nodes(&self, _epoch: ::Epoch) -> usize { self.stake_table.len() } - + /// Get the total number of nodes in the committee + fn da_total_nodes(&self, _epoch: ::Epoch) -> usize { + self.da_stake_table.len() + } /// Get the voting success threshold for the committee fn success_threshold(&self) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } + /// Get the voting success threshold for the committee + fn da_success_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() + } + /// Get the voting failure threshold for the committee fn failure_threshold(&self) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 200ed530dd..fa904c66cf 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -9,7 +9,6 @@ use std::{cmp::max, collections::BTreeMap, num::NonZeroU64}; use hotshot_types::{ traits::{ election::Membership, - network::Topic, node_implementation::NodeType, signature_key::{SignatureKey, StakeTableEntryType}, }, @@ -19,7 +18,6 @@ use primitive_types::U256; use utils::anytrace::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] - /// The static committee election pub struct StaticCommittee { /// The nodes eligible for leadership. @@ -30,12 +28,16 @@ pub struct StaticCommittee { /// The nodes on the committee and their stake stake_table: Vec<::StakeTableEntry>, + /// The nodes on the committee and their stake + da_stake_table: Vec<::StakeTableEntry>, + /// The nodes on the committee and their stake, indexed by public key indexed_stake_table: BTreeMap::StakeTableEntry>, - /// The network topic of the committee - committee_topic: Topic, + /// The nodes on the committee and their stake, indexed by public key + indexed_da_stake_table: + BTreeMap::StakeTableEntry>, } impl Membership for StaticCommittee { @@ -43,13 +45,12 @@ impl Membership for StaticCommittee { /// Create a new election fn new( - eligible_leaders: Vec::SignatureKey>>, committee_members: Vec::SignatureKey>>, - committee_topic: Topic, + da_members: Vec::SignatureKey>>, ) -> Self { // For each eligible leader, get the stake table entry let eligible_leaders: Vec<::StakeTableEntry> = - eligible_leaders + committee_members .iter() .map(|member| member.stake_table_entry.clone()) .filter(|entry| entry.stake() > U256::zero()) @@ -63,6 +64,13 @@ impl Membership for StaticCommittee { .filter(|entry| entry.stake() > U256::zero()) .collect(); + // For each member, get the stake table entry + let da_members: Vec<::StakeTableEntry> = da_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + // Index the stake table by public key let indexed_stake_table: BTreeMap< TYPES::SignatureKey, @@ -72,11 +80,21 @@ impl Membership for StaticCommittee { .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) .collect(); + // Index the stake table by public key + let indexed_da_stake_table: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = da_members + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + Self { eligible_leaders, stake_table: members, + da_stake_table: da_members, indexed_stake_table, - committee_topic, + indexed_da_stake_table, } } @@ -88,6 +106,14 @@ impl Membership for StaticCommittee { self.stake_table.clone() } + /// Get the stake table for the current view + fn da_stake_table( + &self, + _epoch: ::Epoch, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + self.da_stake_table.clone() + } + /// Get all members of the committee for the current view fn committee_members( &self, @@ -100,6 +126,18 @@ impl Membership for StaticCommittee { .collect() } + /// Get all members of the committee for the current view + fn da_committee_members( + &self, + _view_number: ::View, + _epoch: ::Epoch, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.da_stake_table + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } + /// Get all eligible leaders of the committee for the current view fn committee_leaders( &self, @@ -122,6 +160,16 @@ impl Membership for StaticCommittee { self.indexed_stake_table.get(pub_key).cloned() } + /// Get the DA stake table entry for a public key + fn da_stake( + &self, + pub_key: &::SignatureKey, + _epoch: ::Epoch, + ) -> Option<::StakeTableEntry> { + // Only return the stake if it is above zero + self.indexed_da_stake_table.get(pub_key).cloned() + } + /// Check if a node has stake in the committee fn has_stake( &self, @@ -133,9 +181,15 @@ impl Membership for StaticCommittee { .is_some_and(|x| x.stake() > U256::zero()) } - /// Get the network topic for the committee - fn committee_topic(&self) -> Topic { - self.committee_topic.clone() + /// Check if a node has stake in the committee + fn has_da_stake( + &self, + pub_key: &::SignatureKey, + _epoch: ::Epoch, + ) -> bool { + self.indexed_da_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) } /// Index the vector of public keys with the current view number @@ -155,11 +209,21 @@ impl Membership for StaticCommittee { self.stake_table.len() } + /// Get the total number of DA nodes in the committee + fn da_total_nodes(&self, _epoch: ::Epoch) -> usize { + self.da_stake_table.len() + } + /// Get the voting success threshold for the committee fn success_threshold(&self) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } + /// Get the voting success threshold for the committee + fn da_success_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() + } + /// Get the voting failure threshold for the committee fn failure_threshold(&self) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 922a815e68..41ed1d046e 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -9,7 +9,6 @@ use std::{collections::BTreeMap, num::NonZeroU64}; use hotshot_types::{ traits::{ election::Membership, - network::Topic, node_implementation::NodeType, signature_key::{SignatureKey, StakeTableEntryType}, }, @@ -30,12 +29,16 @@ pub struct StaticCommitteeLeaderForTwoViews { /// The nodes on the committee and their stake stake_table: Vec<::StakeTableEntry>, + /// The nodes on the committee and their stake + da_stake_table: Vec<::StakeTableEntry>, + /// The nodes on the committee and their stake, indexed by public key indexed_stake_table: BTreeMap::StakeTableEntry>, - /// The network topic of the committee - committee_topic: Topic, + /// The nodes on the committee and their stake, indexed by public key + indexed_da_stake_table: + BTreeMap::StakeTableEntry>, } impl Membership for StaticCommitteeLeaderForTwoViews { @@ -43,13 +46,12 @@ impl Membership for StaticCommitteeLeaderForTwoViews::SignatureKey>>, committee_members: Vec::SignatureKey>>, - committee_topic: Topic, + da_members: Vec::SignatureKey>>, ) -> Self { // For each eligible leader, get the stake table entry let eligible_leaders: Vec<::StakeTableEntry> = - eligible_leaders + committee_members .iter() .map(|member| member.stake_table_entry.clone()) .filter(|entry| entry.stake() > U256::zero()) @@ -63,6 +65,13 @@ impl Membership for StaticCommitteeLeaderForTwoViews U256::zero()) .collect(); + // For each member, get the stake table entry + let da_members: Vec<::StakeTableEntry> = da_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + // Index the stake table by public key let indexed_stake_table: BTreeMap< TYPES::SignatureKey, @@ -72,11 +81,21 @@ impl Membership for StaticCommitteeLeaderForTwoViews::StakeTableEntry, + > = da_members + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + Self { eligible_leaders, stake_table: members, + da_stake_table: da_members, indexed_stake_table, - committee_topic, + indexed_da_stake_table, } } @@ -88,6 +107,14 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + self.da_stake_table.clone() + } + /// Get all members of the committee for the current view fn committee_members( &self, @@ -100,6 +127,18 @@ impl Membership for StaticCommitteeLeaderForTwoViews::View, + _epoch: ::Epoch, + ) -> std::collections::BTreeSet<::SignatureKey> { + self.da_stake_table + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } + /// Get all eligible leaders of the committee for the current view fn committee_leaders( &self, @@ -122,6 +161,16 @@ impl Membership for StaticCommitteeLeaderForTwoViews::SignatureKey, + _epoch: ::Epoch, + ) -> Option<::StakeTableEntry> { + // Only return the stake if it is above zero + self.indexed_da_stake_table.get(pub_key).cloned() + } + /// Check if a node has stake in the committee fn has_stake( &self, @@ -133,9 +182,15 @@ impl Membership for StaticCommitteeLeaderForTwoViews U256::zero()) } - /// Get the network topic for the committee - fn committee_topic(&self) -> Topic { - self.committee_topic.clone() + /// Check if a node has stake in the committee + fn has_da_stake( + &self, + pub_key: &::SignatureKey, + _epoch: ::Epoch, + ) -> bool { + self.indexed_da_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) } /// Index the vector of public keys with the current view number @@ -156,11 +211,21 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch) -> usize { + self.da_stake_table.len() + } + /// Get the voting success threshold for the committee fn success_threshold(&self) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } + /// Get the voting success threshold for the committee + fn da_success_threshold(&self) -> NonZeroU64 { + NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() + } + /// Get the voting failure threshold for the committee fn failure_threshold(&self) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 4719b0cde2..7b1fd5a424 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -35,7 +35,7 @@ use hotshot_types::{ }; use tracing::instrument; -use crate::{traits::NodeImplementation, types::Event, Memberships, SystemContext, Versions}; +use crate::{traits::NodeImplementation, types::Event, SystemContext, Versions}; /// Event streaming handle for a [`SystemContext`] instance running in the background /// @@ -69,7 +69,7 @@ pub struct SystemContextHandle, V: pub network: Arc, /// Memberships used by consensus - pub memberships: Arc>, + pub memberships: Arc, /// Number of blocks in an epoch, zero means there are no epochs pub epoch_height: u64, @@ -157,7 +157,7 @@ impl + 'static, V: Versions> signed_proposal_request.commit().as_ref(), )?; - let mem = self.memberships.quorum_membership.clone(); + let mem = (*self.memberships).clone(); let receiver = self.internal_event_stream.1.activate_cloned(); let sender = self.internal_event_stream.0.clone(); Ok(async move { @@ -327,7 +327,6 @@ impl + 'static, V: Versions> ) -> Result { self.hotshot .memberships - .quorum_membership .leader(view_number, epoch_number) .context("Failed to lookup leader") } diff --git a/libp2p-networking/src/network/transport.rs b/libp2p-networking/src/network/transport.rs index 7e5987e306..0f3bdf80a9 100644 --- a/libp2p-networking/src/network/transport.rs +++ b/libp2p-networking/src/network/transport.rs @@ -522,9 +522,7 @@ mod test { use hotshot_example_types::node_types::TestTypes; use hotshot_types::{ - light_client::StateVerKey, - signature_key::BLSPubKey, - traits::{network::Topic, signature_key::SignatureKey}, + light_client::StateVerKey, signature_key::BLSPubKey, traits::signature_key::SignatureKey, PeerConfig, }; use libp2p::{core::transport::dummy::DummyTransport, quic::Connection}; @@ -643,11 +641,8 @@ mod test { stake_table_entry: keypair.0.stake_table_entry(1), state_ver_key: StateVerKey::default(), }; - let stake_table = ::Membership::new( - vec![peer_config.clone()], - vec![peer_config], - Topic::Global, - ); + let stake_table = + ::Membership::new(vec![peer_config.clone()], vec![peer_config]); // Verify the authentication message let result = MockStakeTableAuth::verify_peer_authentication( @@ -672,7 +667,7 @@ mod test { let mut stream = cursor_from!(auth_message); // Create an empty stake table - let stake_table = ::Membership::new(vec![], vec![], Topic::Global); + let stake_table = ::Membership::new(vec![], vec![]); // Verify the authentication message let result = MockStakeTableAuth::verify_peer_authentication( @@ -708,11 +703,8 @@ mod test { stake_table_entry: keypair.0.stake_table_entry(1), state_ver_key: StateVerKey::default(), }; - let stake_table = ::Membership::new( - vec![peer_config.clone()], - vec![peer_config], - Topic::Global, - ); + let stake_table = + ::Membership::new(vec![peer_config.clone()], vec![peer_config]); // Check against the malicious peer ID let result = MockStakeTableAuth::verify_peer_authentication( diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 8c60392cf5..684c3b6e34 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -45,7 +45,7 @@ pub(crate) async fn handle_quorum_vote_recv< .await .is_leaf_extended(vote.data.leaf_commit); let we_are_leader = task_state - .quorum_membership + .membership .leader(vote.view_number() + 1, task_state.cur_epoch)? == task_state.public_key; ensure!( @@ -60,7 +60,7 @@ pub(crate) async fn handle_quorum_vote_recv< &mut task_state.vote_collectors, vote, task_state.public_key.clone(), - &task_state.quorum_membership, + &task_state.membership, task_state.cur_epoch, task_state.id, &event, @@ -87,7 +87,7 @@ pub(crate) async fn handle_timeout_vote_recv< // Are we the leader for this view? ensure!( task_state - .quorum_membership + .membership .leader(vote.view_number() + 1, task_state.cur_epoch)? == task_state.public_key, info!( @@ -100,7 +100,7 @@ pub(crate) async fn handle_timeout_vote_recv< &mut task_state.timeout_vote_collectors, vote, task_state.public_key.clone(), - &task_state.quorum_membership, + &task_state.membership, task_state.cur_epoch, task_state.id, &event, @@ -131,7 +131,7 @@ pub async fn send_high_qc ensure!( task_state - .quorum_membership + .membership .has_stake(&task_state.public_key, task_state.cur_epoch), debug!("We were not chosen for the consensus committee for view {view_number:?}") ); @@ -324,7 +324,7 @@ pub(crate) async fn handle_timeout .number_of_timeouts .add(1); if task_state - .quorum_membership + .membership .leader(view_number, task_state.cur_epoch)? == task_state.public_key { diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 0ed04d30fa..c865ed82e1 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -47,7 +47,7 @@ pub struct ConsensusTaskState, V: pub network: Arc, /// Membership for Quorum Certs/votes - pub quorum_membership: Arc, + pub membership: Arc, /// A map of `QuorumVote` collector tasks. pub vote_collectors: VoteCollectorsMap, QuorumCertificate2, V>, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index edaa5123e5..68df39136f 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -52,13 +52,10 @@ pub struct DaTaskState, V: Version /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, - /// Membership for the DA committee - pub da_membership: Arc, - - /// Membership for the quorum committee - /// We need this only for calculating the proper VID scheme + /// Membership for the DA committee and quorum committee. + /// We need the latter only for calculating the proper VID scheme /// from the number of nodes in the quorum. - pub quorum_membership: Arc, + pub membership: Arc, /// The underlying network pub network: Arc, @@ -124,7 +121,7 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState( let justify_qc = proposal.data.justify_qc.clone(); if !justify_qc - .is_valid_cert(quorum_membership.as_ref(), cur_epoch, upgrade_lock) + .is_valid_cert( + quorum_membership.stake_table(cur_epoch), + quorum_membership.success_threshold(), + upgrade_lock, + ) .await { bail!("Invalid justify_qc in proposal for view {}", *view_number); @@ -675,11 +679,14 @@ pub(crate) async fn validate_proposal_view_and_certs< "Timeout certificate for view {} was not for the immediately preceding view", *view_number ); + ensure!( timeout_cert .is_valid_cert( - validation_info.quorum_membership.as_ref(), - validation_info.cur_epoch, + validation_info + .quorum_membership + .stake_table(validation_info.cur_epoch), + validation_info.quorum_membership.success_threshold(), &validation_info.upgrade_lock ) .await, @@ -699,8 +706,10 @@ pub(crate) async fn validate_proposal_view_and_certs< ensure!( view_sync_cert .is_valid_cert( - validation_info.quorum_membership.as_ref(), - validation_info.cur_epoch, + validation_info + .quorum_membership + .stake_table(validation_info.cur_epoch), + validation_info.quorum_membership.success_threshold(), &validation_info.upgrade_lock ) .await, diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index bf23787c05..7cc8dceca0 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -25,7 +25,7 @@ use hotshot_types::{ traits::{ election::Membership, network::{ - BroadcastDelay, ConnectedNetwork, RequestKind, ResponseMessage, TransmitType, + BroadcastDelay, ConnectedNetwork, RequestKind, ResponseMessage, Topic, TransmitType, ViewMessage, }, node_implementation::{ConsensusTime, NodeType, Versions}, @@ -208,10 +208,8 @@ pub struct NetworkEventTaskState< pub view: TYPES::View, /// epoch number pub epoch: TYPES::Epoch, - /// quorum for the network - pub quorum_membership: TYPES::Membership, - /// da for the network - pub da_membership: TYPES::Membership, + /// network memberships + pub membership: TYPES::Membership, /// Storage to store actionable events pub storage: Arc>, /// Shared consensus state @@ -391,7 +389,7 @@ impl< HotShotEvent::QuorumVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; - let leader = match self.quorum_membership.leader(view_number, self.epoch) { + let leader = match self.membership.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -452,7 +450,7 @@ impl< HotShotEvent::DaVoteSend(vote) => { *maybe_action = Some(HotShotAction::DaVote); let view_number = vote.view_number(); - let leader = match self.quorum_membership.leader(view_number, self.epoch) { + let leader = match self.membership.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -484,7 +482,7 @@ impl< } HotShotEvent::ViewSyncPreCommitVoteSend(vote) => { let view_number = vote.view_number() + vote.date().relay; - let leader = match self.quorum_membership.leader(view_number, self.epoch) { + let leader = match self.membership.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -507,7 +505,7 @@ impl< HotShotEvent::ViewSyncCommitVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; - let leader = match self.quorum_membership.leader(view_number, self.epoch) { + let leader = match self.membership.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -530,7 +528,7 @@ impl< HotShotEvent::ViewSyncFinalizeVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; - let leader = match self.quorum_membership.leader(view_number, self.epoch) { + let leader = match self.membership.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -574,7 +572,7 @@ impl< HotShotEvent::TimeoutVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; - let leader = match self.quorum_membership.leader(view_number, self.epoch) { + let leader = match self.membership.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -603,7 +601,7 @@ impl< HotShotEvent::UpgradeVoteSend(vote) => { tracing::error!("Sending upgrade vote!"); let view_number = vote.view_number(); - let leader = match self.quorum_membership.leader(view_number, self.epoch) { + let leader = match self.membership.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -630,7 +628,7 @@ impl< self.cancel_tasks(view); let net = Arc::clone(&self.network); let epoch = self.epoch.u64(); - let mem = self.quorum_membership.clone(); + let mem = self.membership.clone(); spawn(async move { net.update_view::(view.saturating_sub(1), epoch, &mem) .await; @@ -676,10 +674,10 @@ impl< kind: message_kind, }; let view_number = message.kind.view_number(); - let committee_topic = self.quorum_membership.committee_topic(); + let committee_topic = Topic::Global; let da_committee = self - .da_membership - .committee_members(view_number, self.epoch); + .membership + .da_committee_members(view_number, self.epoch); let network = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); @@ -798,7 +796,7 @@ pub mod test { &mut sender, &mut message_kind, &mut transmit, - &self.quorum_membership, + &self.membership, ); self.spawn_transmit_task(message_kind, maybe_action, transmit, sender); } diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index f8d0d93d75..55f283bcd8 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -25,6 +25,7 @@ use hotshot_types::{ simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ block_contents::BlockHeader, + election::Membership, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, @@ -124,8 +125,10 @@ impl ProposalDependencyHandle { if let HotShotEvent::HighQcRecv(qc, _sender) = event.as_ref() { if qc .is_valid_cert( - self.quorum_membership.as_ref(), - TYPES::Epoch::new(0), + // TODO take epoch from `qc` + // https://github.com/EspressoSystems/HotShot/issues/3917 + self.quorum_membership.stake_table(TYPES::Epoch::new(0)), + self.quorum_membership.success_threshold(), &self.upgrade_lock, ) .await diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index f7e50806d0..d226677eb1 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -443,8 +443,8 @@ impl, V: Versions> ensure!( certificate .is_valid_cert( - self.quorum_membership.as_ref(), - epoch_number, + self.quorum_membership.stake_table(epoch_number), + self.quorum_membership.success_threshold(), &self.upgrade_lock ) .await, @@ -507,8 +507,8 @@ impl, V: Versions> let epoch_number = self.consensus.read().await.cur_epoch(); ensure!( qc.is_valid_cert( - self.quorum_membership.as_ref(), - epoch_number, + self.quorum_membership.stake_table(epoch_number), + self.quorum_membership.success_threshold(), &self.upgrade_lock ) .await, diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index f5fe251367..73f6addef0 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -154,8 +154,10 @@ pub(crate) async fn handle_quorum_proposal_recv< if !justify_qc .is_valid_cert( - validation_info.quorum_membership.as_ref(), - validation_info.cur_epoch, + validation_info + .quorum_membership + .stake_table(validation_info.cur_epoch), + validation_info.quorum_membership.success_threshold(), &validation_info.upgrade_lock, ) .await diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 20e851fa14..d1079197f8 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -269,11 +269,8 @@ pub struct QuorumVoteTaskState, V: /// The underlying network pub network: Arc, - /// Membership for Quorum certs/votes. - pub quorum_membership: Arc, - - /// Membership for DA committee certs/votes. - pub da_membership: Arc, + /// Membership for Quorum certs/votes and DA committee certs/votes. + pub membership: Arc, /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -377,7 +374,7 @@ impl, V: Versions> QuorumVoteTaskS private_key: self.private_key.clone(), consensus: OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), instance_state: Arc::clone(&self.instance_state), - quorum_membership: Arc::clone(&self.quorum_membership), + quorum_membership: Arc::clone(&self.membership), storage: Arc::clone(&self.storage), view_number, sender: event_sender.clone(), @@ -479,8 +476,12 @@ impl, V: Versions> QuorumVoteTaskS let cur_epoch = self.consensus.read().await.cur_epoch(); // Validate the DAC. ensure!( - cert.is_valid_cert(self.da_membership.as_ref(), cur_epoch, &self.upgrade_lock) - .await, + cert.is_valid_cert( + self.membership.da_stake_table(cur_epoch), + self.membership.da_success_threshold(), + &self.upgrade_lock + ) + .await, warn!("Invalid DAC") ); @@ -518,16 +519,16 @@ impl, V: Versions> QuorumVoteTaskS // ensure that the VID share was sent by a DA member OR the view leader ensure!( - self.da_membership - .committee_members(view, cur_epoch) + self.membership + .da_committee_members(view, cur_epoch) .contains(sender) - || *sender == self.quorum_membership.leader(view, cur_epoch)?, + || *sender == self.membership.leader(view, cur_epoch)?, "VID share was not sent by a DA member or the view leader." ); // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner // and outer results - match vid_scheme(self.quorum_membership.total_nodes(cur_epoch)).verify_share( + match vid_scheme(self.membership.total_nodes(cur_epoch)).verify_share( &disperse.data.share, &disperse.data.common, payload_commitment, @@ -641,7 +642,7 @@ impl, V: Versions> QuorumVoteTaskS OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), event_sender.clone(), event_receiver.clone().deactivate(), - Arc::clone(&self.quorum_membership), + Arc::clone(&self.membership), self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), @@ -684,7 +685,7 @@ impl, V: Versions> QuorumVoteTaskS .is_leaf_extended(proposed_leaf.commit()); if let Err(e) = submit_vote::( event_sender.clone(), - Arc::clone(&self.quorum_membership), + Arc::clone(&self.membership), self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 6e9384f3ea..a41b04f5d5 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -59,8 +59,8 @@ pub struct NetworkRequestState> { pub view: TYPES::View, /// Delay before requesting peers pub delay: Duration, - /// DA Membership - pub da_membership: TYPES::Membership, + /// Membership (Here containing only DA) + pub membership: TYPES::Membership, /// This nodes public key pub public_key: TYPES::SignatureKey, /// This nodes private/signing key, used to sign requests. @@ -180,15 +180,15 @@ impl> NetworkRequestState = self - .da_membership - .committee_members(view, epoch) + .membership + .da_committee_members(view, epoch) .into_iter() .collect(); // Randomize the recipients so all replicas don't overload the same 1 recipients diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index dbb190be9f..efb87f9cd5 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -533,7 +533,11 @@ impl ViewSyncReplicaTaskState { // If certificate is not valid, return current state if !certificate - .is_valid_cert(self.membership.as_ref(), self.cur_epoch, &self.upgrade_lock) + .is_valid_cert( + self.membership.stake_table(self.cur_epoch), + self.membership.failure_threshold(), + &self.upgrade_lock, + ) .await { tracing::error!("Not valid view sync cert! {:?}", certificate.data()); @@ -615,7 +619,11 @@ impl ViewSyncReplicaTaskState { // If certificate is not valid, return current state if !certificate - .is_valid_cert(self.membership.as_ref(), self.cur_epoch, &self.upgrade_lock) + .is_valid_cert( + self.membership.stake_table(self.cur_epoch), + self.membership.success_threshold(), + &self.upgrade_lock, + ) .await { tracing::error!("Not valid view sync cert! {:?}", certificate.data()); @@ -708,7 +716,11 @@ impl ViewSyncReplicaTaskState { // If certificate is not valid, return current state if !certificate - .is_valid_cert(self.membership.as_ref(), self.cur_epoch, &self.upgrade_lock) + .is_valid_cert( + self.membership.stake_table(self.cur_epoch), + self.membership.success_threshold(), + &self.upgrade_lock, + ) .await { tracing::error!("Not valid view sync cert! {:?}", certificate.data()); diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index b58f74dd7e..e0fc96040b 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -43,7 +43,7 @@ pub type VoteCollectorsMap = pub struct VoteCollectionTaskState< TYPES: NodeType, VOTE: Vote, - CERT: Certificate + Debug, + CERT: Certificate + Debug, V: Versions, > { /// Public key for this node. @@ -72,7 +72,7 @@ pub struct VoteCollectionTaskState< pub trait AggregatableVote< TYPES: NodeType, VOTE: Vote, - CERT: Certificate, + CERT: Certificate, > { /// return the leader for this votes @@ -92,7 +92,7 @@ pub trait AggregatableVote< impl< TYPES: NodeType, VOTE: Vote + AggregatableVote, - CERT: Certificate + Clone + Debug, + CERT: Certificate + Clone + Debug, V: Versions, > VoteCollectionTaskState { @@ -153,7 +153,7 @@ pub trait HandleVoteEvent where TYPES: NodeType, VOTE: Vote + AggregatableVote, - CERT: Certificate + Debug, + CERT: Certificate + Debug, { /// Handle a vote event /// @@ -204,7 +204,7 @@ where + std::marker::Send + std::marker::Sync + 'static, - CERT: Certificate + CERT: Certificate + Debug + std::marker::Send + std::marker::Sync @@ -242,7 +242,11 @@ where pub async fn handle_vote< TYPES: NodeType, VOTE: Vote + AggregatableVote + Send + Sync + 'static, - CERT: Certificate + Debug + Send + Sync + 'static, + CERT: Certificate + + Debug + + Send + + Sync + + 'static, V: Versions, >( collectors: &mut VoteCollectorsMap, diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index 0ffc97df37..449629d5ca 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -339,15 +339,13 @@ impl + std::fmt::Debug, V: Version &self, handle: &mut SystemContextHandle, network: Arc<>::Network>, - quorum_membership: TYPES::Membership, - da_membership: TYPES::Membership, + membership: TYPES::Membership, ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, view: TYPES::View::genesis(), epoch: TYPES::Epoch::genesis(), - quorum_membership, - da_membership, + membership, storage: Arc::clone(&handle.storage()), consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index d27c5fe165..6759d17e69 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -13,7 +13,7 @@ use committable::Committable; use hotshot::{ traits::{NodeImplementation, TestableNodeImplementation}, types::{SignatureKey, SystemContextHandle}, - HotShotInitializer, Memberships, SystemContext, + HotShotInitializer, SystemContext, }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, @@ -33,7 +33,6 @@ use hotshot_types::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, - network::Topic, node_implementation::{NodeType, Versions}, }, utils::{View, ViewInner}, @@ -109,17 +108,10 @@ pub async fn build_system_handle_from_launcher< let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); - let all_nodes = config.known_nodes_with_stake.clone(); - let da_nodes = config.known_da_nodes.clone(); - - let memberships = Memberships { - quorum_membership: TYPES::Membership::new( - all_nodes.clone(), - all_nodes.clone(), - Topic::Global, - ), - da_membership: TYPES::Membership::new(all_nodes, da_nodes, Topic::Da), - }; + let memberships = TYPES::Membership::new( + config.known_nodes_with_stake.clone(), + config.known_da_nodes.clone(), + ); SystemContext::init( public_key, @@ -145,7 +137,7 @@ pub async fn build_cert< V: Versions, DATAType: Committable + Clone + Eq + Hash + Serialize + Debug + 'static, VOTE: Vote, - CERT: Certificate, + CERT: Certificate, >( data: DATAType, membership: &TYPES::Membership, @@ -210,7 +202,7 @@ pub async fn build_assembled_sig< TYPES: NodeType, V: Versions, VOTE: Vote, - CERT: Certificate, + CERT: Certificate, DATAType: Committable + Clone + Eq + Hash + Serialize + Debug + 'static, >( data: &DATAType, @@ -219,7 +211,7 @@ pub async fn build_assembled_sig< epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> ::QcType { - let stake_table = membership.stake_table(epoch); + let stake_table = CERT::stake_table(membership, epoch); let real_qc_pp: ::QcParams = ::public_parameter( stake_table.clone(), @@ -365,8 +357,7 @@ pub fn build_vid_proposal( #[allow(clippy::too_many_arguments)] pub async fn build_da_certificate( - quorum_membership: &::Membership, - da_membership: &::Membership, + membership: &::Membership, view_number: TYPES::View, epoch_number: TYPES::Epoch, transactions: Vec, @@ -376,10 +367,8 @@ pub async fn build_da_certificate( ) -> DaCertificate { let encoded_transactions = TestTransaction::encode(&transactions); - let da_payload_commitment = vid_commitment( - &encoded_transactions, - quorum_membership.total_nodes(epoch_number), - ); + let da_payload_commitment = + vid_commitment(&encoded_transactions, membership.total_nodes(epoch_number)); let da_data = DaData { payload_commit: da_payload_commitment, @@ -387,7 +376,7 @@ pub async fn build_da_certificate( build_cert::, DaCertificate>( da_data, - da_membership, + membership, view_number, epoch_number, public_key, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 4773e597e0..5bd6f38d79 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -11,7 +11,7 @@ use hotshot::{ tasks::EventTransformerState, traits::{NetworkReliability, NodeImplementation, TestableNodeImplementation}, types::SystemContextHandle, - HotShotInitializer, MarketplaceConfig, Memberships, SystemContext, TwinsHandlerState, + HotShotInitializer, MarketplaceConfig, SystemContext, TwinsHandlerState, }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, state_types::TestInstanceState, @@ -175,7 +175,7 @@ pub async fn create_test_handle< metadata: TestDescription, node_id: u64, network: Network, - memberships: Memberships, + memberships: TYPES::Membership, config: HotShotConfig, storage: I::Storage, marketplace_config: MarketplaceConfig, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 95f494808f..414e6e0f4b 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -17,7 +17,7 @@ use futures::future::join_all; use hotshot::{ traits::TestableNodeImplementation, types::{Event, SystemContextHandle}, - HotShotInitializer, MarketplaceConfig, Memberships, SystemContext, + HotShotInitializer, MarketplaceConfig, SystemContext, }; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, @@ -34,7 +34,7 @@ use hotshot_types::{ simple_certificate::QuorumCertificate, traits::{ election::Membership, - network::{ConnectedNetwork, Topic}, + network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, }, HotShotConfig, ValidatorConfig, @@ -419,17 +419,10 @@ where self.next_node_id += 1; tracing::debug!("launch node {}", i); - let all_nodes = config.known_nodes_with_stake.clone(); - let da_nodes = config.known_da_nodes.clone(); - - let memberships = Memberships { - quorum_membership: ::Membership::new( - all_nodes.clone(), - all_nodes.clone(), - Topic::Global, - ), - da_membership: ::Membership::new(all_nodes, da_nodes, Topic::Da), - }; + let memberships = ::Membership::new( + config.known_nodes_with_stake.clone(), + config.known_da_nodes.clone(), + ); config.builder_urls = builder_urls .clone() .try_into() @@ -584,7 +577,7 @@ where pub async fn add_node_with_config( node_id: u64, network: Network, - memberships: Memberships, + memberships: TYPES::Membership, initializer: HotShotInitializer, config: HotShotConfig, validator_config: ValidatorConfig, @@ -617,7 +610,7 @@ where pub async fn add_node_with_config_and_channels( node_id: u64, network: Network, - memberships: Memberships, + memberships: TYPES::Membership, initializer: HotShotInitializer, config: HotShotConfig, validator_config: ValidatorConfig, @@ -667,7 +660,7 @@ pub struct LateNodeContextParameters, + pub memberships: TYPES::Membership, /// The config associated with this node. pub config: HotShotConfig, diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 2da7829fc9..8ff60cf949 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -54,8 +54,7 @@ pub struct TestView { pub leaf: Leaf2, pub view_number: ViewNumber, pub epoch_number: EpochNumber, - pub quorum_membership: ::Membership, - pub da_membership: ::Membership, + pub membership: ::Membership, pub vid_disperse: Proposal>, pub vid_proposal: ( Vec>>, @@ -72,10 +71,7 @@ pub struct TestView { } impl TestView { - pub async fn genesis( - quorum_membership: &::Membership, - da_membership: &::Membership, - ) -> Self { + pub async fn genesis(membership: &::Membership) -> Self { let genesis_view = ViewNumber::new(1); let genesis_epoch = EpochNumber::new(1); let upgrade_lock = UpgradeLock::new(); @@ -100,14 +96,11 @@ impl TestView { let leader_public_key = public_key; - let payload_commitment = da_payload_commitment::( - quorum_membership, - transactions.clone(), - genesis_epoch, - ); + let payload_commitment = + da_payload_commitment::(membership, transactions.clone(), genesis_epoch); let (vid_disperse, vid_proposal) = build_vid_proposal( - quorum_membership, + membership, genesis_view, genesis_epoch, transactions.clone(), @@ -115,8 +108,7 @@ impl TestView { ); let da_certificate = build_da_certificate( - quorum_membership, - da_membership, + membership, genesis_view, genesis_epoch, transactions.clone(), @@ -190,8 +182,7 @@ impl TestView { leaf, view_number: genesis_view, epoch_number: genesis_epoch, - quorum_membership: quorum_membership.clone(), - da_membership: da_membership.clone(), + membership: membership.clone(), vid_disperse, vid_proposal: (vid_proposal, public_key), da_certificate, @@ -219,8 +210,7 @@ impl TestView { // test view here. let next_view = max(old_view, self.view_number) + 1; - let quorum_membership = &self.quorum_membership; - let da_membership = &self.da_membership; + let membership = &self.membership; let transactions = &self.transactions; @@ -247,14 +237,11 @@ impl TestView { &metadata, ); - let payload_commitment = da_payload_commitment::( - quorum_membership, - transactions.clone(), - self.epoch_number, - ); + let payload_commitment = + da_payload_commitment::(membership, transactions.clone(), self.epoch_number); let (vid_disperse, vid_proposal) = build_vid_proposal( - quorum_membership, + membership, next_view, self.epoch_number, transactions.clone(), @@ -262,8 +249,7 @@ impl TestView { ); let da_certificate = build_da_certificate::( - quorum_membership, - da_membership, + membership, next_view, self.epoch_number, transactions.clone(), @@ -281,7 +267,7 @@ impl TestView { QuorumCertificate2, >( quorum_data, - quorum_membership, + membership, old_view, self.epoch_number, &old_public_key, @@ -299,7 +285,7 @@ impl TestView { UpgradeCertificate, >( data.clone(), - quorum_membership, + membership, next_view, self.epoch_number, &public_key, @@ -322,7 +308,7 @@ impl TestView { ViewSyncFinalizeCertificate2, >( data.clone(), - quorum_membership, + membership, next_view, self.epoch_number, &public_key, @@ -345,7 +331,7 @@ impl TestView { TimeoutCertificate, >( data.clone(), - quorum_membership, + membership, next_view, self.epoch_number, &public_key, @@ -425,8 +411,7 @@ impl TestView { leaf, view_number: next_view, epoch_number: self.epoch_number, - quorum_membership: quorum_membership.clone(), - da_membership: self.da_membership.clone(), + membership: self.membership.clone(), vid_disperse, vid_proposal: (vid_proposal, public_key), da_certificate, @@ -501,19 +486,14 @@ impl TestView { pub struct TestViewGenerator { pub current_view: Option, - pub quorum_membership: ::Membership, - pub da_membership: ::Membership, + pub membership: ::Membership, } impl TestViewGenerator { - pub fn generate( - quorum_membership: ::Membership, - da_membership: ::Membership, - ) -> Self { + pub fn generate(membership: ::Membership) -> Self { TestViewGenerator { current_view: None, - quorum_membership, - da_membership, + membership, } } @@ -590,14 +570,13 @@ impl Stream for TestViewGenerator { type Item = TestView; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let qm = &self.quorum_membership.clone(); - let da = &self.da_membership.clone(); + let mem = &self.membership.clone(); let curr_view = &self.current_view.clone(); let mut fut = if let Some(ref view) = curr_view { async move { TestView::next_view(view).await }.boxed() } else { - async move { TestView::genesis(qm, da).await }.boxed() + async move { TestView::genesis(mem).await }.boxed() }; match fut.as_mut().poll(cx) { diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 96a7de771d..6f4c2a38df 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -39,8 +39,8 @@ async fn test_da_task() { let handle = build_system_handle::(2) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let membership = (*handle.hotshot.memberships).clone(); // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. @@ -48,14 +48,10 @@ async fn test_da_task() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, - handle - .hotshot - .memberships - .quorum_membership - .total_nodes(EpochNumber::new(0)), + handle.hotshot.memberships.total_nodes(EpochNumber::new(0)), ); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut generator = TestViewGenerator::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -98,7 +94,7 @@ async fn test_da_task() { }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( - quorum_membership.total_nodes(EpochNumber::new(0)), + membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, *ViewNumber::new(2), ) @@ -139,8 +135,7 @@ async fn test_da_task_storage_failure() { // Set the error flag here for the system handle. This causes it to emit an error on append. handle.storage().write().await.should_return_err = true; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); + let membership = (*handle.hotshot.memberships).clone(); // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. @@ -148,14 +143,10 @@ async fn test_da_task_storage_failure() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, - handle - .hotshot - .memberships - .quorum_membership - .total_nodes(EpochNumber::new(0)), + handle.hotshot.memberships.total_nodes(EpochNumber::new(0)), ); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut generator = TestViewGenerator::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -198,7 +189,7 @@ async fn test_da_task_storage_failure() { }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( - quorum_membership.total_nodes(EpochNumber::new(0)), + membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, *ViewNumber::new(2), ) diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index 5e0ff49eaa..c8869bc3a5 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -66,6 +66,7 @@ async fn test_certificate2_validity() { use hotshot_testing::{helpers::build_system_handle, view_generator::TestViewGenerator}; use hotshot_types::{ data::{EpochNumber, Leaf, Leaf2}, + traits::election::Membership, traits::node_implementation::ConsensusTime, vote::Certificate, }; @@ -76,10 +77,9 @@ async fn test_certificate2_validity() { let handle = build_system_handle::(node_id) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); + let membership = (*handle.hotshot.memberships).clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut generator = TestViewGenerator::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -104,8 +104,8 @@ async fn test_certificate2_validity() { assert!( qc.is_valid_cert( - &quorum_membership, - EpochNumber::new(0), + membership.stake_table(EpochNumber::new(0)), + membership.success_threshold(), &handle.hotshot.upgrade_lock ) .await @@ -113,8 +113,8 @@ async fn test_certificate2_validity() { assert!( qc2.is_valid_cert( - &quorum_membership, - EpochNumber::new(0), + membership.stake_table(EpochNumber::new(0)), + membership.success_threshold(), &handle.hotshot.upgrade_lock ) .await diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 9a539f89e4..a4e1c29a03 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -36,7 +36,6 @@ async fn test_network_task() { use std::collections::BTreeMap; use futures::StreamExt; - use hotshot_types::traits::network::Topic; hotshot::helpers::initialize_logging(); @@ -59,15 +58,13 @@ async fn test_network_task() { let all_nodes = config.known_nodes_with_stake.clone(); - let membership = - ::Membership::new(all_nodes.clone(), all_nodes, Topic::Global); + let membership = ::Membership::new(all_nodes.clone(), all_nodes); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), epoch: EpochNumber::new(0), - quorum_membership: membership.clone(), - da_membership: membership.clone(), + membership: membership.clone(), upgrade_lock: upgrade_lock.clone(), storage, consensus, @@ -79,7 +76,7 @@ async fn test_network_task() { let task = Task::new(network_state, tx.clone(), rx); task_reg.run_task(task); - let mut generator = TestViewGenerator::generate(membership.clone(), membership); + let mut generator = TestViewGenerator::generate(membership); let view = generator.next().await.unwrap(); let (out_tx_internal, mut out_rx_internal) = async_broadcast::broadcast(10); @@ -208,7 +205,6 @@ async fn test_network_storage_fail() { use std::collections::BTreeMap; use futures::StreamExt; - use hotshot_types::traits::network::Topic; hotshot::helpers::initialize_logging(); @@ -231,15 +227,13 @@ async fn test_network_storage_fail() { let all_nodes = config.known_nodes_with_stake.clone(); let upgrade_lock = UpgradeLock::::new(); - let membership = - ::Membership::new(all_nodes.clone(), all_nodes, Topic::Global); + let membership = ::Membership::new(all_nodes.clone(), all_nodes); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), epoch: EpochNumber::new(0), - quorum_membership: membership.clone(), - da_membership: membership.clone(), + membership: membership.clone(), upgrade_lock: upgrade_lock.clone(), storage, consensus, @@ -251,7 +245,7 @@ async fn test_network_storage_fail() { let task = Task::new(network_state, tx.clone(), rx); task_reg.run_task(task); - let mut generator = TestViewGenerator::generate(membership.clone(), membership); + let mut generator = TestViewGenerator::generate(membership); let view = generator.next().await.unwrap(); let (out_tx_internal, mut out_rx_internal): (Sender>>, _) = diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 7a1636d9c6..97742d0deb 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -32,6 +32,7 @@ use hotshot_types::{ request_response::ProposalRequestPayload, traits::{ consensus_api::ConsensusApi, + election::Membership, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, ValidatedState, @@ -53,12 +54,11 @@ async fn test_quorum_proposal_recv_task() { let handle = build_system_handle::(2) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); + let membership = (*handle.hotshot.memberships).clone(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut generator = TestViewGenerator::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut votes = Vec::new(); @@ -129,12 +129,11 @@ async fn test_quorum_proposal_recv_task_liveness_check() { let handle = build_system_handle::(4) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); + let membership = (*handle.hotshot.memberships).clone(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut generator = TestViewGenerator::generate(membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut votes = Vec::new(); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index f522d28f7c..0124e10b5a 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -50,16 +50,16 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let handle = build_system_handle::(node_id) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let membership = (*handle.hotshot.memberships).clone(); let payload_commitment = build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(node_id), EpochNumber::new(1), ); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut generator = TestViewGenerator::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -90,7 +90,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let genesis_cert = proposals[0].data.justify_qc.clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(EpochNumber::new(1)), + membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(1), ) @@ -144,10 +144,10 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let handle = build_system_handle::(node_id) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let membership = (*handle.hotshot.memberships).clone(); + + let mut generator = TestViewGenerator::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -182,7 +182,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(EpochNumber::new(1)), + membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(1), ) @@ -193,7 +193,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(1), EpochNumber::new(1) ), @@ -212,7 +212,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(2), EpochNumber::new(1) ), @@ -229,7 +229,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(3), EpochNumber::new(1) ), @@ -246,7 +246,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(4), EpochNumber::new(1) ), @@ -263,7 +263,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(5), EpochNumber::new(1) ), @@ -308,17 +308,16 @@ async fn test_quorum_proposal_task_qc_timeout() { let handle = build_system_handle::(node_id) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); + let membership = (*handle.hotshot.memberships).clone(); let payload_commitment = build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(node_id), EpochNumber::new(1), ); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut generator = TestViewGenerator::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -360,7 +359,7 @@ async fn test_quorum_proposal_task_qc_timeout() { }, ViewNumber::new(3), vec1![null_block::builder_fee::( - quorum_membership.total_nodes(EpochNumber::new(1)), + membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(3), ) @@ -396,17 +395,17 @@ async fn test_quorum_proposal_task_view_sync() { let handle = build_system_handle::(node_id) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); + + let membership = (*handle.hotshot.memberships).clone(); let payload_commitment = build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(node_id), EpochNumber::new(1), ); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut generator = TestViewGenerator::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -450,7 +449,7 @@ async fn test_quorum_proposal_task_view_sync() { }, ViewNumber::new(2), vec1![null_block::builder_fee::( - quorum_membership.total_nodes(EpochNumber::new(1)), + membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(2), ) @@ -484,10 +483,10 @@ async fn test_quorum_proposal_task_liveness_check() { let handle = build_system_handle::(node_id) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let membership = (*handle.hotshot.memberships).clone(); + + let mut generator = TestViewGenerator::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -517,7 +516,7 @@ async fn test_quorum_proposal_task_liveness_check() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(EpochNumber::new(1)), + membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(1), ) @@ -532,7 +531,7 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(1), EpochNumber::new(1) ), @@ -551,7 +550,7 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(2), EpochNumber::new(1) ), @@ -568,7 +567,7 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(3), EpochNumber::new(1) ), @@ -585,7 +584,7 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(4), EpochNumber::new(1) ), @@ -602,7 +601,7 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(5), EpochNumber::new(1) ), @@ -643,10 +642,9 @@ async fn test_quorum_proposal_task_with_incomplete_events() { let handle = build_system_handle::(2) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); + let membership = (*handle.hotshot.memberships).clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut generator = TestViewGenerator::generate(membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index f5c5940b20..b5d079f56c 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -44,10 +44,10 @@ async fn test_quorum_vote_task_success() { let handle = build_system_handle::(2) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let membership = (*handle.hotshot.memberships).clone(); + + let mut generator = TestViewGenerator::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaves = Vec::new(); @@ -111,10 +111,10 @@ async fn test_quorum_vote_task_miss_dependency() { let handle = build_system_handle::(2) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let membership = (*handle.hotshot.memberships).clone(); + + let mut generator = TestViewGenerator::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -195,10 +195,10 @@ async fn test_quorum_vote_task_incorrect_dependency() { let handle = build_system_handle::(2) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let membership = (*handle.hotshot.memberships).clone(); + + let mut generator = TestViewGenerator::generate(membership); let mut proposals = Vec::new(); let mut leaves = Vec::new(); diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index 0a4877fcc1..43773e63da 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -39,10 +39,11 @@ async fn test_transaction_task_leader_two_views_in_a_row() { EpochNumber::new(1), )); input.push(HotShotEvent::Shutdown); - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let (_, precompute_data) = - precompute_vid_commitment(&[], quorum_membership.total_nodes(EpochNumber::new(0))); + let (_, precompute_data) = precompute_vid_commitment( + &[], + handle.hotshot.memberships.total_nodes(EpochNumber::new(0)), + ); // current view let mut exp_packed_bundle = PackedBundle::new( @@ -53,7 +54,7 @@ async fn test_transaction_task_leader_two_views_in_a_row() { current_view, vec1::vec1![ null_block::builder_fee::( - quorum_membership.total_nodes(EpochNumber::new(0)), + handle.hotshot.memberships.total_nodes(EpochNumber::new(0)), ::Base::VERSION, *ViewNumber::new(4), ) diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 0c706efd02..44833d7727 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -58,8 +58,6 @@ async fn test_upgrade_task_with_proposal() { let handle = build_system_handle::(3) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); let other_handles = futures::future::join_all((0..=9).map(build_system_handle)).await; @@ -85,7 +83,9 @@ async fn test_upgrade_task_with_proposal() { let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let membership = (*handle.hotshot.memberships).clone(); + + let mut generator = TestViewGenerator::generate(membership.clone()); for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); @@ -126,7 +126,7 @@ async fn test_upgrade_task_with_proposal() { let genesis_cert = proposals[0].data.justify_qc.clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(EpochNumber::new(1)), + membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(1), ) @@ -153,7 +153,7 @@ async fn test_upgrade_task_with_proposal() { Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(1), EpochNumber::new(1) ), @@ -172,7 +172,7 @@ async fn test_upgrade_task_with_proposal() { Qc2Formed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(2), EpochNumber::new(1) ), @@ -190,7 +190,7 @@ async fn test_upgrade_task_with_proposal() { Qc2Formed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &quorum_membership, + &membership, ViewNumber::new(3), EpochNumber::new(1) ), diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index cc942451f1..5390f56e03 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -48,8 +48,6 @@ async fn test_upgrade_task_with_vote() { let handle = build_system_handle::(2) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); let old_version = Version { major: 0, minor: 1 }; let new_version = Version { major: 0, minor: 2 }; @@ -72,7 +70,9 @@ async fn test_upgrade_task_with_vote() { let consensus = handle.hotshot.consensus().clone(); let mut consensus_writer = consensus.write().await; - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let membership = (*handle.hotshot.memberships).clone(); + let mut generator = TestViewGenerator::generate(membership); + for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); votes.push(view.create_quorum_vote(&handle).await); diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 8d8daf4c5e..81f9ac2999 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -45,11 +45,10 @@ async fn test_vid_task() { .0; let pub_key = handle.public_key(); - // quorum membership for VID share distribution - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); + let membership = (*handle.hotshot.memberships).clone(); let mut vid = vid_scheme_from_view_number::( - &quorum_membership, + &membership, ViewNumber::new(0), EpochNumber::new(0), ); @@ -90,7 +89,7 @@ async fn test_vid_task() { let vid_disperse = VidDisperse::from_membership( message.data.view_number, vid_disperse, - &quorum_membership, + &membership, EpochNumber::new(0), ); @@ -110,7 +109,7 @@ async fn test_vid_task() { }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( - quorum_membership.total_nodes(EpochNumber::new(0)), + membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, *ViewNumber::new(2), ) @@ -132,7 +131,7 @@ async fn test_vid_task() { }, ViewNumber::new(2), vec1![null_block::builder_fee::( - quorum_membership.total_nodes(EpochNumber::new(0)), + membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, *ViewNumber::new(2), ) diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 938dbbffa8..9de2531361 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -36,10 +36,9 @@ async fn test_vote_dependency_handle() { let handle = build_system_handle::(node_id) .await .0; - let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let da_membership = handle.hotshot.memberships.da_membership.clone(); + let membership = (*handle.hotshot.memberships).clone(); - let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); + let mut generator = TestViewGenerator::generate(membership.clone()); // Generate our state for the test let mut proposals = Vec::new(); @@ -90,7 +89,7 @@ async fn test_vote_dependency_handle() { private_key: handle.private_key().clone(), consensus: OuterConsensus::new(consensus.clone()), instance_state: handle.hotshot.instance_state(), - quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), + quorum_membership: (*handle.hotshot.memberships).clone().into(), storage: Arc::clone(&handle.storage()), view_number, sender: event_sender.clone(), diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 17fd4aa1c2..271d5b0729 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -10,6 +10,7 @@ use std::{ fmt::{self, Debug, Display, Formatter}, hash::Hash, marker::PhantomData, + num::NonZeroU64, sync::Arc, }; @@ -23,8 +24,9 @@ use crate::{ data::serialize_signature2, message::UpgradeLock, simple_vote::{ - DaData, QuorumData, QuorumData2, TimeoutData, UpgradeProposalData, VersionedVoteData, - ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, + DaData, QuorumData, QuorumData2, QuorumMaker, TimeoutData, UpgradeProposalData, + VersionedVoteData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, + Voteable, }, traits::{ election::Membership, @@ -123,14 +125,14 @@ impl> Certificate - for SimpleCertificate +impl> Certificate + for SimpleCertificate { - type Voteable = VOTEABLE; + type Voteable = DaData; type Threshold = THRESHOLD; fn create_signed_certificate( - vote_commitment: Commitment>, + vote_commitment: Commitment>, data: Self::Voteable, sig: ::QcType, view: TYPES::View, @@ -145,18 +147,103 @@ impl> _pd: PhantomData, } } - async fn is_valid_cert, V: Versions>( + async fn is_valid_cert( &self, + stake_table: Vec<::StakeTableEntry>, + threshold: NonZeroU64, + upgrade_lock: &UpgradeLock, + ) -> bool { + if self.view_number == TYPES::View::genesis() { + return true; + } + let real_qc_pp = ::public_parameter( + stake_table, + U256::from(u64::from(threshold)), + ); + let Ok(commit) = self.data_commitment(upgrade_lock).await else { + return false; + }; + ::check( + &real_qc_pp, + commit.as_ref(), + self.signatures.as_ref().unwrap(), + ) + } + /// Proxy's to `Membership.stake` + fn stake_table_entry>( membership: &MEMBERSHIP, + pub_key: &TYPES::SignatureKey, epoch: TYPES::Epoch, + ) -> Option<::StakeTableEntry> { + membership.da_stake(pub_key, epoch) + } + + /// Proxy's to `Membership.da_stake_table` + fn stake_table>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> Vec<::StakeTableEntry> { + membership.da_stake_table(epoch) + } + /// Proxy's to `Membership.da_total_nodes` + fn total_nodes>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> usize { + membership.da_total_nodes(epoch) + } + fn threshold>(membership: &MEMBERSHIP) -> u64 { + membership.da_success_threshold().into() + } + fn data(&self) -> &Self::Voteable { + &self.data + } + async fn data_commitment( + &self, + upgrade_lock: &UpgradeLock, + ) -> Result>> { + Ok( + VersionedVoteData::new(self.data.clone(), self.view_number, upgrade_lock) + .await? + .commit(), + ) + } +} + +impl> + Certificate for SimpleCertificate +{ + type Voteable = VOTEABLE; + type Threshold = THRESHOLD; + + fn create_signed_certificate( + vote_commitment: Commitment>, + data: Self::Voteable, + sig: ::QcType, + view: TYPES::View, + ) -> Self { + let vote_commitment_bytes: [u8; 32] = vote_commitment.into(); + + SimpleCertificate { + data, + vote_commitment: Commitment::from_raw(vote_commitment_bytes), + view_number: view, + signatures: Some(sig), + _pd: PhantomData, + } + } + async fn is_valid_cert( + &self, + stake_table: Vec<::StakeTableEntry>, + threshold: NonZeroU64, upgrade_lock: &UpgradeLock, ) -> bool { if self.view_number == TYPES::View::genesis() { return true; } let real_qc_pp = ::public_parameter( - membership.stake_table(epoch), - U256::from(Self::threshold(membership)), + stake_table, + U256::from(u64::from(threshold)), ); let Ok(commit) = self.data_commitment(upgrade_lock).await else { return false; @@ -170,6 +257,30 @@ impl> fn threshold>(membership: &MEMBERSHIP) -> u64 { THRESHOLD::threshold(membership) } + + fn stake_table_entry>( + membership: &MEMBERSHIP, + pub_key: &TYPES::SignatureKey, + epoch: TYPES::Epoch, + ) -> Option<::StakeTableEntry> { + membership.stake(pub_key, epoch) + } + + fn stake_table>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> Vec<::StakeTableEntry> { + membership.stake_table(epoch) + } + + /// Proxy's to `Membership.total_nodes` + fn total_nodes>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> usize { + membership.total_nodes(epoch) + } + fn data(&self) -> &Self::Voteable { &self.data } @@ -232,8 +343,12 @@ impl UpgradeCertificate { ) -> Result<()> { if let Some(ref cert) = upgrade_certificate { ensure!( - cert.is_valid_cert(quorum_membership, epoch, upgrade_lock) - .await, + cert.is_valid_cert( + quorum_membership.stake_table(epoch), + quorum_membership.upgrade_threshold(), + upgrade_lock + ) + .await, "Invalid upgrade certificate." ); Ok(()) diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 013653a959..35997d3725 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -24,6 +24,9 @@ use crate::{ vote::{HasViewNumber, Vote}, }; +/// Marker that data should use the quorum cert type +pub(crate) trait QuorumMaker {} + #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a yes vote. #[serde(bound(deserialize = ""))] @@ -50,12 +53,7 @@ pub struct TimeoutData { /// View the timeout is for pub view: TYPES::View, } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -/// Data used for a VID vote. -pub struct VidData { - /// Commitment to the block payload the VID vote is on. - pub payload_commit: VidCommitment, -} + #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Pre Commit vote. pub struct ViewSyncPreCommitData { @@ -119,6 +117,14 @@ mod sealed { impl Sealed for C {} } +impl QuorumMaker for QuorumData {} +impl QuorumMaker for QuorumData2 {} +impl QuorumMaker for TimeoutData {} +impl QuorumMaker for ViewSyncPreCommitData {} +impl QuorumMaker for ViewSyncCommitData {} +impl QuorumMaker for ViewSyncFinalizeData {} +impl QuorumMaker for UpgradeProposalData {} + /// A simple yes vote over some votable type. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] pub struct SimpleVote { @@ -288,14 +294,6 @@ impl Committable for DaData { } } -impl Committable for VidData { - fn commit(&self) -> Commitment { - committable::RawCommitmentBuilder::new("VID data") - .var_size_bytes(self.payload_commit.as_ref()) - .finalize() - } -} - impl Committable for UpgradeProposalData { fn commit(&self) -> Commitment { let builder = committable::RawCommitmentBuilder::new("Upgrade data"); diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index aa313fc64a..04aa76ccb4 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -9,7 +9,7 @@ use std::{collections::BTreeSet, fmt::Debug, num::NonZeroU64}; use utils::anytrace::Result; -use super::{network::Topic, node_implementation::NodeType}; +use super::node_implementation::NodeType; use crate::{traits::signature_key::SignatureKey, PeerConfig}; /// A protocol for determining membership in and participating in a committee. @@ -21,9 +21,8 @@ pub trait Membership: Clone + Debug + Send + Sync { fn new( // Note: eligible_leaders is currently a hack because the DA leader == the quorum leader // but they should not have voting power. - eligible_leaders: Vec>, - committee_members: Vec>, - committee_topic: Topic, + stake_committee_members: Vec>, + da_committee_members: Vec>, ) -> Self; /// Get all participants in the committee (including their stake) for a specific epoch @@ -32,6 +31,12 @@ pub trait Membership: Clone + Debug + Send + Sync { epoch: TYPES::Epoch, ) -> Vec<::StakeTableEntry>; + /// Get all participants in the committee (including their stake) for a specific epoch + fn da_stake_table( + &self, + epoch: TYPES::Epoch, + ) -> Vec<::StakeTableEntry>; + /// Get all participants in the committee for a specific view for a specific epoch fn committee_members( &self, @@ -39,6 +44,13 @@ pub trait Membership: Clone + Debug + Send + Sync { epoch: TYPES::Epoch, ) -> BTreeSet; + /// Get all participants in the committee for a specific view for a specific epoch + fn da_committee_members( + &self, + view_number: TYPES::View, + epoch: TYPES::Epoch, + ) -> BTreeSet; + /// Get all leaders in the committee for a specific view for a specific epoch fn committee_leaders( &self, @@ -54,9 +66,20 @@ pub trait Membership: Clone + Debug + Send + Sync { epoch: TYPES::Epoch, ) -> Option<::StakeTableEntry>; + /// Get the DA stake table entry for a public key, returns `None` if the + /// key is not in the table for a specific epoch + fn da_stake( + &self, + pub_key: &TYPES::SignatureKey, + epoch: TYPES::Epoch, + ) -> Option<::StakeTableEntry>; + /// See if a node has stake in the committee in a specific epoch fn has_stake(&self, pub_key: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool; + /// See if a node has stake in the committee in a specific epoch + fn has_da_stake(&self, pub_key: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool; + /// The leader of the committee for view `view_number` in `epoch`. /// /// Note: this function uses a HotShot-internal error type. @@ -74,6 +97,9 @@ pub trait Membership: Clone + Debug + Send + Sync { /// The leader of the committee for view `view_number` in `epoch`. /// + /// Note: There is no such thing as a DA leader, so any consumer + /// requiring a leader should call this. + /// /// # Errors /// Returns an error if the leader cannot be calculated fn lookup_leader( @@ -82,15 +108,18 @@ pub trait Membership: Clone + Debug + Send + Sync { epoch: TYPES::Epoch, ) -> std::result::Result; - /// Get the network topic for the committee - fn committee_topic(&self) -> Topic; - /// Returns the number of total nodes in the committee in an epoch `epoch` fn total_nodes(&self, epoch: TYPES::Epoch) -> usize; + /// Returns the number of total DA nodes in the committee in an epoch `epoch` + fn da_total_nodes(&self, epoch: TYPES::Epoch) -> usize; + /// Returns the threshold for a specific `Membership` implementation fn success_threshold(&self) -> NonZeroU64; + /// Returns the DA threshold for a specific `Membership` implementation + fn da_success_threshold(&self) -> NonZeroU64; + /// Returns the threshold for a specific `Membership` implementation fn failure_threshold(&self) -> NonZeroU64; diff --git a/types/src/vote.rs b/types/src/vote.rs index 6291599004..bdff9d4bb5 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -9,6 +9,7 @@ use std::{ collections::{BTreeMap, HashMap}, marker::PhantomData, + num::NonZeroU64, }; use bitvec::{bitvec, vec::BitVec}; @@ -56,7 +57,7 @@ The certificate formed from the collection of signatures a committee. The committee is defined by the `Membership` associated type. The votes all must be over the `Commitment` associated type. */ -pub trait Certificate: HasViewNumber { +pub trait Certificate: HasViewNumber { /// The data commitment this certificate certifies. type Voteable: Voteable; @@ -72,17 +73,38 @@ pub trait Certificate: HasViewNumber { ) -> Self; /// Checks if the cert is valid in the given epoch - fn is_valid_cert, V: Versions>( + fn is_valid_cert( &self, - membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + stake_table: Vec<::StakeTableEntry>, + threshold: NonZeroU64, upgrade_lock: &UpgradeLock, ) -> impl std::future::Future; /// Returns the amount of stake needed to create this certificate // TODO: Make this a static ratio of the total stake of `Membership` fn threshold>(membership: &MEMBERSHIP) -> u64; + + /// Get Stake Table from Membership implementation. + fn stake_table>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> Vec<::StakeTableEntry>; + + /// Get Total Nodes from Membership implementation. + fn total_nodes>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> usize; + + /// Get `StakeTableEntry` from Membership implementation. + fn stake_table_entry>( + membership: &MEMBERSHIP, + pub_key: &TYPES::SignatureKey, + epoch: TYPES::Epoch, + ) -> Option<::StakeTableEntry>; + /// Get the commitment which was voted on fn data(&self) -> &Self::Voteable; + /// Get the vote commitment which the votes commit to fn data_commitment( &self, @@ -103,7 +125,7 @@ type SignersMap = HashMap< pub struct VoteAccumulator< TYPES: NodeType, VOTE: Vote, - CERT: Certificate, + CERT: Certificate, V: Versions, > { /// Map of all signatures accumulated so far @@ -127,7 +149,7 @@ pub struct VoteAccumulator< impl< TYPES: NodeType, VOTE: Vote, - CERT: Certificate, + CERT: Certificate, V: Versions, > VoteAccumulator { @@ -161,10 +183,10 @@ impl< return Either::Left(()); } - let Some(stake_table_entry) = membership.stake(&key, epoch) else { + let Some(stake_table_entry) = CERT::stake_table_entry(membership, &key, epoch) else { return Either::Left(()); }; - let stake_table = membership.stake_table(epoch); + let stake_table = CERT::stake_table(membership, epoch); let Some(vote_node_id) = stake_table .iter() .position(|x| *x == stake_table_entry.clone()) @@ -187,7 +209,7 @@ impl< let (signers, sig_list) = self .signers .entry(vote_commitment) - .or_insert((bitvec![0; membership.total_nodes(epoch)], Vec::new())); + .or_insert((bitvec![0; CERT::total_nodes(membership, epoch)], Vec::new())); if signers.get(vote_node_id).as_deref() == Some(&true) { error!("Node id is already in signers list"); return Either::Left(()); @@ -195,7 +217,6 @@ impl< signers.set(vote_node_id, true); sig_list.push(original_signature); - // TODO: Get the stake from the stake table entry. *total_stake_casted += stake_table_entry.stake(); total_vote_map.insert(key, (vote.signature(), vote_commitment)); From 3230f0779bdedb984adf56e9da23d4e9c17abecf Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Mon, 2 Dec 2024 11:19:02 +0100 Subject: [PATCH 1318/1393] Handle HighQcSend (#3919) * Handle HighQcSend * Use the new quorum cert struct --- task-impls/src/consensus/handlers.rs | 10 +++++++++- task-impls/src/events.rs | 10 +++++++--- task-impls/src/network.rs | 11 ++++++++--- task-impls/src/quorum_proposal/mod.rs | 2 +- types/src/message.rs | 4 ++-- 5 files changed, 27 insertions(+), 10 deletions(-) diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 684c3b6e34..6dab4f938c 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -133,7 +133,15 @@ pub async fn send_high_qc { HighQcRecv(QuorumCertificate2, TYPES::SignatureKey), /// Send our HighQc to the next leader, should go to the same leader as our vote - HighQcSend(QuorumCertificate2, TYPES::SignatureKey), + HighQcSend( + QuorumCertificate2, + TYPES::SignatureKey, + TYPES::SignatureKey, + ), } impl HotShotEvent { @@ -322,7 +326,7 @@ impl HotShotEvent { | HotShotEvent::VidRequestRecv(request, _) => Some(request.view), HotShotEvent::VidResponseSend(_, _, proposal) | HotShotEvent::VidResponseRecv(_, proposal) => Some(proposal.data.view_number), - HotShotEvent::HighQcRecv(qc, _) | HotShotEvent::HighQcSend(qc, _) => { + HotShotEvent::HighQcRecv(qc, _) | HotShotEvent::HighQcSend(qc, ..) => { Some(qc.view_number()) } } @@ -590,7 +594,7 @@ impl Display for HotShotEvent { HotShotEvent::HighQcRecv(qc, _) => { write!(f, "HighQcRecv(view_number={:?}", qc.view_number()) } - HotShotEvent::HighQcSend(qc, _) => { + HotShotEvent::HighQcSend(qc, ..) => { write!(f, "HighQcSend(view_number={:?}", qc.view_number()) } } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 7cc8dceca0..ad9dcd4168 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -114,9 +114,7 @@ impl NetworkMessageTaskState { tracing::error!("Received upgrade vote!"); HotShotEvent::UpgradeVoteRecv(message) } - GeneralConsensusMessage::HighQc(qc) => { - HotShotEvent::HighQcRecv(qc.to_qc2(), sender) - } + GeneralConsensusMessage::HighQc(qc) => HotShotEvent::HighQcRecv(qc, sender), }, SequencingMessage::Da(da_message) => match da_message { DaConsensusMessage::DaProposal(proposal) => { @@ -650,6 +648,13 @@ impl< TransmitType::Direct(to), )) } + HotShotEvent::HighQcSend(quorum_cert, leader, sender) => Some(( + sender, + MessageKind::Consensus(SequencingMessage::General( + GeneralConsensusMessage::HighQc(quorum_cert), + )), + TransmitType::Direct(leader), + )), _ => None, } } diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index d226677eb1..034db12812 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -502,7 +502,7 @@ impl, V: Versions> HotShotEvent::ViewChange(view, _) | HotShotEvent::Timeout(view) => { self.cancel_tasks(*view); } - HotShotEvent::HighQcSend(qc, _sender) => { + HotShotEvent::HighQcSend(qc, ..) => { ensure!(qc.view_number() > self.highest_qc.view_number()); let epoch_number = self.consensus.read().await.cur_epoch(); ensure!( diff --git a/types/src/message.rs b/types/src/message.rs index 00ed7d9ac5..eb4861d22e 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -30,7 +30,7 @@ use crate::{ }, request_response::ProposalRequestPayload, simple_certificate::{ - DaCertificate, QuorumCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, + DaCertificate, QuorumCertificate2, UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ @@ -210,7 +210,7 @@ pub enum GeneralConsensusMessage { ProposalResponse(Proposal>), /// Message for the next leader containing our highest QC - HighQc(QuorumCertificate), + HighQc(QuorumCertificate2), } #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] From 2797842b764eb1e8a67b49a410452352037b50b0 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Mon, 2 Dec 2024 09:17:52 -0800 Subject: [PATCH 1319/1393] [DRB] - Start the DRB calculation two epochs in advance (#3911) * Implement DRB computation logic * Fix epoch check, replace initial values * Use decided qc for computation --- hotshot/src/tasks/task_state.rs | 1 + hotshot/src/traits/election.rs | 2 - task-impls/src/quorum_vote/handlers.rs | 69 ++++++++++++++++++- task-impls/src/quorum_vote/mod.rs | 4 ++ testing/src/view_generator.rs | 9 +-- testing/tests/tests_1/message.rs | 3 +- types/src/data.rs | 35 ++++++---- .../election/dynamic.rs => types/src/drb.rs | 18 ++++- types/src/lib.rs | 2 + 9 files changed, 116 insertions(+), 27 deletions(-) rename hotshot/src/traits/election/dynamic.rs => types/src/drb.rs (83%) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 3c4e81585b..1df18f9b5e 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -231,6 +231,7 @@ impl, V: Versions> CreateTaskState vote_dependencies: BTreeMap::new(), network: Arc::clone(&handle.hotshot.network), membership: (*handle.hotshot.memberships).clone().into(), + drb_computations: BTreeMap::new(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, storage: Arc::clone(&handle.storage), diff --git a/hotshot/src/traits/election.rs b/hotshot/src/traits/election.rs index 427ed12629..4f9212705f 100644 --- a/hotshot/src/traits/election.rs +++ b/hotshot/src/traits/election.rs @@ -6,8 +6,6 @@ //! elections used for consensus -/// Dynamic leader election with epochs. -pub mod dynamic; /// leader completely randomized every view pub mod randomized_committee; /// static (round robin) committee election diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index d585259385..56804f1d52 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -13,18 +13,22 @@ use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposal2, VidDisperseShare}, + drb::compute_drb_result, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_vote::{QuorumData2, QuorumVote2}, traits::{ + block_contents::BlockHeader, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, storage::Storage, ValidatedState, }, + utils::epoch_from_block_number, vote::HasViewNumber, }; +use tokio::spawn; use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; @@ -135,13 +139,13 @@ pub(crate) async fn handle_quorum_proposal_validated< // We don't need to hold this while we broadcast drop(consensus_writer); - // First, send an update to everyone saying that we've reached a decide + // Send an update to everyone saying that we've reached a decide broadcast_event( Event { view_number: decided_view_number, event: EventType::Decide { - leaf_chain: Arc::new(leaf_views), - // This is never *not* none if we've reached a new decide, so this is safe to unwrap. + leaf_chain: Arc::new(leaf_views.clone()), + // This is never none if we've reached a new decide, so this is safe to unwrap. qc: Arc::new(new_decide_qc.unwrap()), block_size: included_txns.map(|txns| txns.len().try_into().unwrap()), }, @@ -150,6 +154,65 @@ pub(crate) async fn handle_quorum_proposal_validated< ) .await; tracing::debug!("Successfully sent decide event"); + + // Start the DRB computation two epochs in advance, if the decided block is the last but + // third block in the current epoch and we are in the quorum committee of the next epoch. + // + // Special cases: + // * Epoch 0: No DRB computation since we'll transition to epoch 1 immediately. + // * Epoch 1 and 2: Use `[0u8; 32]` as the DRB result since when we first start the + // computation in epoch 1, the result is for epoch 3. + // + // We don't need to handle the special cases explicitly here, because the first proposal + // with which we'll start the DRB computation is for epoch 3. + if version >= V::Epochs::VERSION { + // This is never none if we've reached a new decide, so this is safe to unwrap. + let decided_block_number = leaf_views + .last() + .unwrap() + .leaf + .block_header() + .block_number(); + + // Skip if this is not the expected block. + if task_state.epoch_height != 0 + && (decided_block_number + 3) % task_state.epoch_height == 0 + { + // Cancel old DRB computation tasks. + let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( + decided_block_number, + task_state.epoch_height, + )); + let current_tasks = task_state.drb_computations.split_off(¤t_epoch_number); + while let Some((_, task)) = task_state.drb_computations.pop_last() { + task.abort(); + } + task_state.drb_computations = current_tasks; + + // Skip if we are not in the committee of the next epoch. + if task_state + .membership + .has_stake(&task_state.public_key, current_epoch_number + 1) + { + let new_epoch_number = current_epoch_number + 2; + let Ok(drb_seed_input_vec) = + bincode::serialize(&proposal.justify_qc.signatures) + else { + bail!("Failed to serialize the QC signature."); + }; + let Ok(drb_seed_input) = drb_seed_input_vec.try_into() else { + bail!( + "Failed to convert the serialized QC signature into a DRB seed input." + ); + }; + let new_drb_task = + spawn(async move { compute_drb_result::(drb_seed_input) }); + task_state + .drb_computations + .insert(new_epoch_number, new_drb_task); + } + } + } } Ok(()) diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index d1079197f8..0567329788 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -18,6 +18,7 @@ use hotshot_task::{ use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposal2}, + drb::DrbResult, event::Event, message::{Proposal, UpgradeLock}, traits::{ @@ -272,6 +273,9 @@ pub struct QuorumVoteTaskState, V: /// Membership for Quorum certs/votes and DA committee certs/votes. pub membership: Arc, + /// Table for the in-progress DRB computation tasks. + pub drb_computations: BTreeMap>, + /// Output events to application pub output_event_stream: async_broadcast::Sender>, diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 8ff60cf949..a6651ad55d 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -25,6 +25,7 @@ use hotshot_types::{ DaProposal, EpochNumber, Leaf, Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare, ViewChangeEvidence, ViewNumber, }, + drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, message::{Proposal, UpgradeLock}, simple_certificate::{ DaCertificate, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, @@ -141,8 +142,8 @@ impl TestView { .to_qc2(), upgrade_certificate: None, view_change_evidence: None, - drb_result: [0; 32], - drb_seed: [0; 96], + drb_result: INITIAL_DRB_RESULT, + drb_seed: INITIAL_DRB_SEED_INPUT, }; let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); @@ -368,8 +369,8 @@ impl TestView { justify_qc: quorum_certificate.clone(), upgrade_certificate: upgrade_certificate.clone(), view_change_evidence, - drb_result: [0; 32], - drb_seed: [0; 96], + drb_result: INITIAL_DRB_RESULT, + drb_seed: INITIAL_DRB_SEED_INPUT, }; let mut leaf = Leaf2::from_quorum_proposal(&proposal); diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index c8869bc3a5..3778aa4737 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -66,8 +66,7 @@ async fn test_certificate2_validity() { use hotshot_testing::{helpers::build_system_handle, view_generator::TestViewGenerator}; use hotshot_types::{ data::{EpochNumber, Leaf, Leaf2}, - traits::election::Membership, - traits::node_implementation::ConsensusTime, + traits::{election::Membership, node_implementation::ConsensusTime}, vote::Certificate, }; diff --git a/types/src/data.rs b/types/src/data.rs index 39a608f2f0..422f2887cb 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -30,6 +30,7 @@ use utils::anytrace::*; use vec1::Vec1; use crate::{ + drb::{DrbResult, DrbSeedInput, INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, message::{Proposal, UpgradeLock}, simple_certificate::{ QuorumCertificate, QuorumCertificate2, TimeoutCertificate, UpgradeCertificate, @@ -391,13 +392,17 @@ pub struct QuorumProposal2 { /// Possible timeout or view sync certificate. If the `justify_qc` is not for a proposal in the immediately preceding view, then either a timeout or view sync certificate must be attached. pub view_change_evidence: Option>, - /// the DRB seed currently being calculated + /// The DRB seed for the next epoch. + /// + /// The DRB computation using this seed was started in the previous epoch. #[serde(with = "serde_bytes")] - pub drb_seed: [u8; 96], + pub drb_seed: DrbSeedInput, - /// the result of the DRB calculation + /// The DRB result for the current epoch. + /// + /// The DRB computation with this result was started two epochs ago. #[serde(with = "serde_bytes")] - pub drb_result: [u8; 32], + pub drb_result: DrbResult, } impl From> for QuorumProposal2 { @@ -408,8 +413,8 @@ impl From> for QuorumProposal2 { justify_qc: quorum_proposal.justify_qc.to_qc2(), upgrade_certificate: quorum_proposal.upgrade_certificate, view_change_evidence: quorum_proposal.proposal_certificate, - drb_seed: [0; 96], - drb_result: [0; 32], + drb_seed: INITIAL_DRB_SEED_INPUT, + drb_result: INITIAL_DRB_RESULT, } } } @@ -438,8 +443,8 @@ impl From> for Leaf2 { upgrade_certificate: leaf.upgrade_certificate, block_payload: leaf.block_payload, view_change_evidence: None, - drb_seed: [0; 96], - drb_result: [0; 32], + drb_seed: INITIAL_DRB_SEED_INPUT, + drb_result: INITIAL_DRB_RESULT, } } } @@ -562,13 +567,17 @@ pub struct Leaf2 { /// Possible timeout or view sync certificate. If the `justify_qc` is not for a proposal in the immediately preceding view, then either a timeout or view sync certificate must be attached. pub view_change_evidence: Option>, - /// the DRB seed currently being calculated + /// The DRB seed for the next epoch. + /// + /// The DRB computation using this seed was started in the previous epoch. #[serde(with = "serde_bytes")] - pub drb_seed: [u8; 96], + pub drb_seed: DrbSeedInput, - /// the result of the DRB calculation + /// The DRB result for the current epoch. + /// + /// The DRB computation with this result was started two epochs ago. #[serde(with = "serde_bytes")] - pub drb_result: [u8; 32], + pub drb_result: DrbResult, } impl Leaf2 { @@ -688,7 +697,7 @@ impl Leaf2 { impl Committable for Leaf2 { fn commit(&self) -> committable::Commitment { - if self.drb_seed == [0; 96] && self.drb_result == [0; 32] { + if self.drb_seed == [0; 32] && self.drb_result == [0; 32] { RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) .field("parent leaf commitment", self.parent_commitment) diff --git a/hotshot/src/traits/election/dynamic.rs b/types/src/drb.rs similarity index 83% rename from hotshot/src/traits/election/dynamic.rs rename to types/src/drb.rs index eba6f100ec..1790e9b30f 100644 --- a/hotshot/src/traits/election/dynamic.rs +++ b/types/src/drb.rs @@ -6,9 +6,10 @@ use std::hash::{DefaultHasher, Hash, Hasher}; -use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use sha2::{Digest, Sha256}; +use crate::traits::{node_implementation::NodeType, signature_key::SignatureKey}; + // TODO: Add the following consts once we bench the hash time. // // /// Highest number of hashes that a hardware can complete in a second. @@ -22,6 +23,17 @@ use sha2::{Digest, Sha256}; /// Arbitrary number of times the hash function will be repeatedly called. const DIFFICULTY_LEVEL: u64 = 10; +/// DRB seed input for epoch 1 and 2. +pub const INITIAL_DRB_SEED_INPUT: [u8; 32] = [0; 32]; +/// DRB result for epoch 1 and 2. +pub const INITIAL_DRB_RESULT: [u8; 32] = [0; 32]; + +/// Alias for DRB seed input for `compute_drb_result`, serialized from the QC signature. +pub type DrbSeedInput = [u8; 32]; + +/// Alias for DRB result from `compute_drb_result`. +pub type DrbResult = [u8; 32]; + // TODO: Use `HASHES_PER_SECOND` * `VIEW_TIMEOUT` * `DRB_CALCULATION_NUM_VIEW` to calculate this // once we bench the hash time. // @@ -40,7 +52,7 @@ pub fn difficulty_level() -> u64 { /// # Arguments /// * `drb_seed_input` - Serialized QC signature. #[must_use] -pub fn compute_drb_result(drb_seed_input: [u8; 32]) -> [u8; 32] { +pub fn compute_drb_result(drb_seed_input: DrbSeedInput) -> DrbResult { let mut hash = drb_seed_input.to_vec(); for _iter in 0..DIFFICULTY_LEVEL { // TODO: This may be optimized to avoid memcopies after we bench the hash time. @@ -61,7 +73,7 @@ pub fn compute_drb_result(drb_seed_input: [u8; 32]) -> [u8; 32] pub fn leader( view_number: usize, stake_table: &[::StakeTableEntry], - drb_result: [u8; 32], + drb_result: DrbResult, ) -> TYPES::SignatureKey { let mut hasher = DefaultHasher::new(); drb_result.hash(&mut hasher); diff --git a/types/src/lib.rs b/types/src/lib.rs index 5523b56b28..93076491e1 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -20,6 +20,8 @@ pub mod bundle; pub mod consensus; pub mod constants; pub mod data; +/// Holds the types and functions for DRB computation. +pub mod drb; pub mod error; pub mod event; /// Holds the configuration file specification for a HotShot node. From 8265ec7e1f8d696abb5116ab43c183d265139fcc Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 2 Dec 2024 16:55:09 -0500 Subject: [PATCH 1320/1393] add metric for last voted view (#3935) --- hotshot/src/tasks/task_state.rs | 4 ++++ task-impls/src/quorum_vote/mod.rs | 17 ++++++++++++++++- testing/tests/tests_1/vote_dependency_handle.rs | 1 + types/src/consensus.rs | 3 +++ 4 files changed, 24 insertions(+), 1 deletion(-) diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 1df18f9b5e..ba24fc4bd3 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -222,6 +222,9 @@ impl, V: Versions> CreateTaskState async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); + // Clone the consensus metrics + let consensus_metrics = Arc::clone(&consensus.read().await.metrics); + Self { public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -237,6 +240,7 @@ impl, V: Versions> CreateTaskState storage: Arc::clone(&handle.storage), upgrade_lock: handle.hotshot.upgrade_lock.clone(), epoch_height: handle.hotshot.config.epoch_height, + consensus_metrics, } } } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 0567329788..a89e27a84c 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -16,7 +16,7 @@ use hotshot_task::{ task::TaskState, }; use hotshot_types::{ - consensus::OuterConsensus, + consensus::{ConsensusMetricsValue, OuterConsensus}, data::{Leaf2, QuorumProposal2}, drb::DrbResult, event::Event, @@ -80,6 +80,8 @@ pub struct VoteDependencyHandle, V pub receiver: InactiveReceiver>>, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + /// The consensus metrics + pub consensus_metrics: Arc, /// The node's id pub id: u64, /// Number of blocks in an epoch, zero means there are no epochs @@ -282,6 +284,9 @@ pub struct QuorumVoteTaskState, V: /// The node's id pub id: u64, + /// The consensus metrics + pub consensus_metrics: Arc, + /// Reference to the storage. pub storage: Arc>, @@ -386,6 +391,7 @@ impl, V: Versions> QuorumVoteTaskS upgrade_lock: self.upgrade_lock.clone(), id: self.id, epoch_height: self.epoch_height, + consensus_metrics: Arc::clone(&self.consensus_metrics), }, ); self.vote_dependencies @@ -410,6 +416,15 @@ impl, V: Versions> QuorumVoteTaskS } } + // Update the metric for the last voted view + if let Ok(last_voted_view_usize) = usize::try_from(*new_view) { + self.consensus_metrics + .last_voted_view + .set(last_voted_view_usize); + } else { + tracing::warn!("Failed to convert last voted view to a usize: {}", new_view); + } + self.latest_voted_view = new_view; return true; diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 9de2531361..1b12e0b0f0 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -88,6 +88,7 @@ async fn test_vote_dependency_handle() { public_key: handle.public_key(), private_key: handle.private_key().clone(), consensus: OuterConsensus::new(consensus.clone()), + consensus_metrics: Arc::clone(&consensus.read().await.metrics), instance_state: handle.hotshot.instance_state(), quorum_membership: (*handle.hotshot.memberships).clone().into(), storage: Arc::clone(&handle.storage()), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 81250b7b5b..2d2e814e9f 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -331,6 +331,8 @@ pub struct ConsensusMetricsValue { pub last_synced_block_height: Box, /// The number of last decided view pub last_decided_view: Box, + /// The number of the last voted view + pub last_voted_view: Box, /// Number of timestamp for the last decided time pub last_decided_time: Box, /// The current view @@ -365,6 +367,7 @@ impl ConsensusMetricsValue { last_synced_block_height: metrics .create_gauge(String::from("last_synced_block_height"), None), last_decided_view: metrics.create_gauge(String::from("last_decided_view"), None), + last_voted_view: metrics.create_gauge(String::from("last_voted_view"), None), last_decided_time: metrics.create_gauge(String::from("last_decided_time"), None), current_view: metrics.create_gauge(String::from("current_view"), None), number_of_views_since_last_decide: metrics From 4a692c7141198deea733cf505a572c72a79747f8 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Mon, 2 Dec 2024 14:30:00 -0800 Subject: [PATCH 1321/1393] Add randomized Membership impls for testing (#3863) * replace calls to Memberships Delete Memberships and replace functionality. Add some methods to `Membership` trait to deal w/ collapsing into one type both kinds of memberships (stake and DA). * avoid passing membership into `is_valid_cert * for DA, avoid proxying threshold through `Threshold` trait * remove `Topic` param from `Membership::new * Split cert impls by marker (#3891) * add membership methods to Cert trait * remove non-existent tests from justfile * cleanup * conflict resolution * revert some unnecessary name changes We can keep the old name where we only have one membership type to keep the diff smaller. * Leaders are Leaders Always use quorum for leader selection * Add randomized committees for testing * add randomized overlap committee generator * Add epoch to threshold functions to allow dynamically sized committees * rework randomized_committee_members to work with flattened Membership * fix lingering da_leader things --------- Co-authored-by: tbro --- example-types/src/node_types.rs | 47 +- hotshot/src/traits/election/helpers.rs | 442 ++++++++++++++++++ .../traits/{election.rs => election/mod.rs} | 8 + .../traits/election/randomized_committee.rs | 8 +- .../election/randomized_committee_members.rs | 353 ++++++++++++++ .../src/traits/election/static_committee.rs | 15 +- .../static_committee_leader_two_views.rs | 8 +- .../src/traits/networking/memory_network.rs | 55 ++- hotshot/src/types/handle.rs | 1 - macros/src/lib.rs | 113 ++++- task-impls/src/consensus/handlers.rs | 5 +- task-impls/src/da.rs | 1 - task-impls/src/helpers.rs | 10 +- task-impls/src/quorum_proposal/handlers.rs | 3 +- task-impls/src/quorum_proposal/mod.rs | 6 +- .../src/quorum_proposal_recv/handlers.rs | 4 +- task-impls/src/quorum_vote/mod.rs | 2 +- task-impls/src/view_sync.rs | 6 +- testing/src/helpers.rs | 6 +- testing/src/test_runner.rs | 2 +- testing/tests/tests_1/block_builder.rs | 10 +- testing/tests/tests_1/message.rs | 4 +- testing/tests/tests_1/test_success.rs | 19 + types/src/simple_certificate.rs | 46 +- types/src/simple_vote.rs | 16 +- types/src/traits/election.rs | 8 +- types/src/traits/metrics.rs | 3 +- types/src/vote.rs | 9 +- 28 files changed, 1110 insertions(+), 100 deletions(-) create mode 100644 hotshot/src/traits/election/helpers.rs rename hotshot/src/traits/{election.rs => election/mod.rs} (79%) create mode 100644 hotshot/src/traits/election/randomized_committee_members.rs diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 80b634515c..01160e1cb9 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -4,9 +4,16 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::marker::PhantomData; + +pub use hotshot::traits::election::helpers::{ + RandomOverlapQuorumFilterConfig, StableQuorumFilterConfig, +}; use hotshot::traits::{ election::{ - randomized_committee::RandomizedCommittee, static_committee::StaticCommittee, + helpers::QuorumFilterConfig, randomized_committee::RandomizedCommittee, + randomized_committee_members::RandomizedCommitteeMembers, + static_committee::StaticCommittee, static_committee_leader_two_views::StaticCommitteeLeaderForTwoViews, }, implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork}, @@ -87,6 +94,40 @@ impl NodeType for TestTypesRandomizedLeader { type BuilderSignatureKey = BuilderKey; } +#[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, +)] +/// filler struct to implement node type and allow us +/// to select our traits +pub struct TestTypesRandomizedCommitteeMembers { + _pd: PhantomData, +} + +impl NodeType for TestTypesRandomizedCommitteeMembers { + type AuctionResult = TestAuctionResult; + type View = ViewNumber; + type Epoch = EpochNumber; + type BlockHeader = TestBlockHeader; + type BlockPayload = TestBlockPayload; + type SignatureKey = BLSPubKey; + type Transaction = TestTransaction; + type ValidatedState = TestValidatedState; + type InstanceState = TestInstanceState; + type Membership = + RandomizedCommitteeMembers, CONFIG>; + type BuilderSignatureKey = BuilderKey; +} + #[derive( Copy, Clone, @@ -133,7 +174,7 @@ pub struct Libp2pImpl; #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct WebImpl; -/// Combined Network implementation (libp2p + web sever) +/// Combined Network implementation (libp2p + web server) #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct CombinedImpl; @@ -223,7 +264,7 @@ impl Versions for EpochsTestVersions { 0, 0, ]; - type Marketplace = StaticVersion<0, 3>; + type Marketplace = StaticVersion<0, 99>; type Epochs = StaticVersion<0, 4>; } diff --git a/hotshot/src/traits/election/helpers.rs b/hotshot/src/traits/election/helpers.rs new file mode 100644 index 0000000000..2a2c7fe172 --- /dev/null +++ b/hotshot/src/traits/election/helpers.rs @@ -0,0 +1,442 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{collections::BTreeSet, hash::Hash}; + +use rand::{rngs::StdRng, Rng, SeedableRng}; + +/// Helper which allows producing random numbers within a range and preventing duplicates +/// If consumed as a regular iterator, will return a randomly ordered permutation of all +/// values from 0..max +struct NonRepeatValueIterator { + /// Random number generator to use + rng: StdRng, + + /// Values which have already been emitted, to avoid duplicates + values: BTreeSet, + + /// Maximum value, open-ended. Numbers returned will be 0..max + max: u64, +} + +impl NonRepeatValueIterator { + /// Create a new NonRepeatValueIterator + pub fn new(rng: StdRng, max: u64) -> Self { + Self { + rng, + values: BTreeSet::new(), + max, + } + } +} + +impl Iterator for NonRepeatValueIterator { + type Item = u64; + + fn next(&mut self) -> Option { + if self.values.len() as u64 >= self.max { + return None; + } + + loop { + let v = self.rng.gen_range(0..self.max); + if !self.values.contains(&v) { + self.values.insert(v); + return Some(v); + } + } + } +} + +/// Create a single u64 seed by merging two u64s. Done this way to allow easy seeding of the number generator +/// from both a stable SOUND as well as a moving value ROUND (typically, epoch). Shift left by 8 to avoid +/// scenarios where someone manually stepping seeds would pass over the same space of random numbers across +/// sequential rounds. Doesn't have to be 8, but has to be large enough that it is unlikely that a given +/// test run will collide; using 8 means that 256 rounds (epochs) would have to happen inside of a test before +/// the test starts repeating values from SEED+1. +fn make_seed(seed: u64, round: u64) -> u64 { + seed.wrapping_add(round.wrapping_shl(8)) +} + +/// Create a pair of PRNGs for the given SEED and ROUND. Prev_rng is the PRNG for the previous ROUND, used to +/// deterministically replay random numbers generated for the previous ROUND. +fn make_rngs(seed: u64, round: u64) -> (StdRng, StdRng) { + let prev_rng = SeedableRng::seed_from_u64(make_seed(seed, round.wrapping_sub(1))); + let this_rng = SeedableRng::seed_from_u64(make_seed(seed, round)); + + (prev_rng, this_rng) +} + +/// Iterator which returns odd/even values for a given COUNT of nodes. For OVERLAP=0, this will return +/// [0, 2, 4, 6, ...] for an even round, and [1, 3, 5, 7, ...] for an odd round. Setting OVERLAP>0 will +/// randomly introduce OVERLAP elements from the previous round, so an even round with OVERLAP=2 will contain +/// something like [1, 7, 2, 4, 0, ...]. Note that the total number of nodes will always be COUNT/2, so +/// for OVERLAP>0 a random number of nodes which would have been in the round for OVERLAP=0 will be dropped. +/// Ordering of nodes is random. Outputs is deterministic when prev_rng and this_rng are provided by make_rngs +/// using the same values for SEED and ROUND. +pub struct StableQuorumIterator { + /// PRNG from the previous round + prev_rng: NonRepeatValueIterator, + + /// PRNG for the current round + this_rng: NonRepeatValueIterator, + + /// Current ROUND + round: u64, + + /// Count of nodes in the source quorum being filtered against + count: u64, + + /// OVERLAP of nodes to be carried over from the previous round + overlap: u64, + + /// The next call to next() will emit the value with this index. Starts at 0 and is incremented for each + /// call to next() + index: u64, +} + +/// Determines how many possible values can be made for the given odd/even +/// E.g. if count is 5, then possible values would be [0, 1, 2, 3, 4] +/// if odd = true, slots = 2 (1 or 3), else slots = 3 (0, 2, 4) +fn calc_num_slots(count: u64, odd: bool) -> u64 { + (count / 2) + if odd { 0 } else { count % 2 } +} + +impl StableQuorumIterator { + #[must_use] + /// Create a new StableQuorumIterator + /// + /// # Panics + /// + /// panics if overlap is greater than half of count + pub fn new(seed: u64, round: u64, count: u64, overlap: u64) -> Self { + assert!( + count / 2 > overlap, + "Overlap cannot be greater than the entire set size" + ); + + let (prev_rng, this_rng) = make_rngs(seed, round); + + Self { + prev_rng: NonRepeatValueIterator::new(prev_rng, calc_num_slots(count, round % 2 == 0)), + this_rng: NonRepeatValueIterator::new(this_rng, calc_num_slots(count, round % 2 == 1)), + round, + count, + overlap, + index: 0, + } + } +} + +impl Iterator for StableQuorumIterator { + type Item = u64; + + fn next(&mut self) -> Option { + if self.index >= (self.count / 2) { + // Always return exactly half of the possible values. If we have OVERLAP>0 then + // we need to return (COUNT/2)-OVERLAP of the current set, even if there are additional + // even (or odd) numbers that we can return. + None + } else if self.index < self.overlap { + // Generate enough values for the previous round. If the current round is odd, then + // we want to pick even values that were selected from the previous round to create OVERLAP + // even values. + let v = self.prev_rng.next().unwrap(); + self.index += 1; + Some(v * 2 + (1 - self.round % 2)) + } else { + // Generate new values. If our current round is odd, we'll be creating (COUNT/2)-OVERLAP + // odd values here. + let v = self.this_rng.next().unwrap(); + self.index += 1; + Some(v * 2 + self.round % 2) + } + } +} + +#[must_use] +/// Helper function to convert the arguments to a StableQuorumIterator into an ordered set of values. +/// +/// # Panics +/// +/// panics if the arguments are invalid for StableQuorumIterator::new +pub fn stable_quorum_filter(seed: u64, round: u64, count: usize, overlap: u64) -> BTreeSet { + StableQuorumIterator::new(seed, round, count as u64, overlap) + // We should never have more than u32_max members in a test + .map(|x| usize::try_from(x).unwrap()) + .collect() +} + +/// Constructs a quorum with a random number of members and overlaps. Functions similar to StableQuorumIterator, +/// except that the number of MEMBERS and OVERLAP are also (deterministically) random, to allow additional variance +/// in testing. +pub struct RandomOverlapQuorumIterator { + /// PRNG from the previous round + prev_rng: NonRepeatValueIterator, + + /// PRNG for the current round + this_rng: NonRepeatValueIterator, + + /// Current ROUND + round: u64, + + /// Number of members to emit for the current round + members: u64, + + /// OVERLAP of nodes to be carried over from the previous round + overlap: u64, + + /// The next call to next() will emit the value with this index. Starts at 0 and is incremented for each + /// call to next() + index: u64, +} + +impl RandomOverlapQuorumIterator { + #[must_use] + /// Create a new RandomOverlapQuorumIterator + /// + /// # Panics + /// + /// panics if overlap and members can produce invalid results or if ranges are invalid + pub fn new( + seed: u64, + round: u64, + count: u64, + members_min: u64, + members_max: u64, + overlap_min: u64, + overlap_max: u64, + ) -> Self { + assert!( + members_min <= members_max, + "Members_min cannot be greater than members_max" + ); + assert!( + overlap_min <= overlap_max, + "Overlap_min cannot be greater than overlap_max" + ); + assert!( + overlap_max < members_min, + "Overlap_max must be less than members_min" + ); + assert!( + count / 2 > overlap_max, + "Overlap cannot be greater than the entire set size" + ); + + let (mut prev_rng, mut this_rng) = make_rngs(seed, round); + + // Consume two values from prev_rng to advance it to the same state it was at the beginning of the previous round + let _prev_members = prev_rng.gen_range(members_min..=members_max); + let _prev_overlap = prev_rng.gen_range(overlap_min..=overlap_max); + let this_members = this_rng.gen_range(members_min..=members_max); + let this_overlap = this_rng.gen_range(overlap_min..=overlap_max); + + Self { + prev_rng: NonRepeatValueIterator::new(prev_rng, calc_num_slots(count, round % 2 == 0)), + this_rng: NonRepeatValueIterator::new(this_rng, calc_num_slots(count, round % 2 == 1)), + round, + members: this_members, + overlap: this_overlap, + index: 0, + } + } +} + +impl Iterator for RandomOverlapQuorumIterator { + type Item = u64; + + fn next(&mut self) -> Option { + if self.index >= self.members { + None + } else if self.index < self.overlap { + // Generate enough values for the previous round + let v = self.prev_rng.next().unwrap(); + self.index += 1; + Some(v * 2 + (1 - self.round % 2)) + } else { + // Generate new values + let v = self.this_rng.next().unwrap(); + self.index += 1; + Some(v * 2 + self.round % 2) + } + } +} + +#[must_use] +/// Helper function to convert the arguments to a StableQuorumIterator into an ordered set of values. +/// +/// # Panics +/// +/// panics if the arguments are invalid for RandomOverlapQuorumIterator::new +pub fn random_overlap_quorum_filter( + seed: u64, + round: u64, + count: usize, + members_min: u64, + members_max: u64, + overlap_min: u64, + overlap_max: u64, +) -> BTreeSet { + RandomOverlapQuorumIterator::new( + seed, + round, + count as u64, + members_min, + members_max, + overlap_min, + overlap_max, + ) + // We should never have more than u32_max members in a test + .map(|x| usize::try_from(x).unwrap()) + .collect() +} + +/// Trait wrapping a config for quorum filters. This allows selection between either the StableQuorumIterator or the +/// RandomOverlapQuorumIterator functionality from above +pub trait QuorumFilterConfig: + Copy + + Clone + + std::fmt::Debug + + Default + + Send + + Sync + + Ord + + PartialOrd + + Eq + + PartialEq + + Hash + + 'static +{ + /// Called to run the filter and return a set of indices + fn execute(epoch: u64, count: usize) -> BTreeSet; +} + +#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Hash, Ord, PartialOrd)] +/// Provides parameters to use the StableQuorumIterator +pub struct StableQuorumFilterConfig {} + +impl QuorumFilterConfig + for StableQuorumFilterConfig +{ + fn execute(epoch: u64, count: usize) -> BTreeSet { + stable_quorum_filter(SEED, epoch, count, OVERLAP) + } +} + +#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Hash, Ord, PartialOrd)] +/// Provides parameters to use the RandomOverlapQuorumIterator +pub struct RandomOverlapQuorumFilterConfig< + const SEED: u64, + const MEMBERS_MIN: u64, + const MEMBERS_MAX: u64, + const OVERLAP_MIN: u64, + const OVERLAP_MAX: u64, +> {} + +impl< + const SEED: u64, + const MEMBERS_MIN: u64, + const MEMBERS_MAX: u64, + const OVERLAP_MIN: u64, + const OVERLAP_MAX: u64, + > QuorumFilterConfig + for RandomOverlapQuorumFilterConfig +{ + fn execute(epoch: u64, count: usize) -> BTreeSet { + random_overlap_quorum_filter( + SEED, + epoch, + count, + MEMBERS_MIN, + MEMBERS_MAX, + OVERLAP_MIN, + OVERLAP_MAX, + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_stable() { + for _ in 0..100 { + let seed = rand::random::(); + let prev_set: Vec = StableQuorumIterator::new(seed, 1, 10, 2).collect(); + let this_set: Vec = StableQuorumIterator::new(seed, 2, 10, 2).collect(); + + // The first two elements from prev_set are from its previous round. But its 2nd and 3rd elements + // are new, and should be carried over to become the first two elements from this_set. + assert_eq!( + prev_set[2..4], + this_set[0..2], + "prev_set={prev_set:?}, this_set={this_set:?}" + ); + } + } + + #[test] + fn test_random_overlap() { + for _ in 0..100 { + let seed = rand::random::(); + let prev_set: Vec = + RandomOverlapQuorumIterator::new(seed, 1, 20, 5, 10, 2, 3).collect(); + let this_set: Vec = + RandomOverlapQuorumIterator::new(seed, 2, 20, 5, 10, 2, 3).collect(); + + // Similar to the overlap before, but there are 4 possible cases: the previous set might have had + // either 2 or 3 overlaps, meaning we should start with index 2 or 3, and the overlap size might + // be either 2 or 3. We'll just check for 2 overlaps, meaning we have two possible overlap cases + // to verify. + let matched = (prev_set[2..4] == this_set[0..2]) || (prev_set[3..5] == this_set[0..2]); + assert!(matched, "prev_set={prev_set:?}, this_set={this_set:?}"); + } + } + + #[test] + fn test_odd_even() { + for _ in 0..100 { + let seed = rand::random::(); + + let odd_set: Vec = StableQuorumIterator::new(seed, 1, 10, 2).collect(); + let even_set: Vec = StableQuorumIterator::new(seed, 2, 10, 2).collect(); + + assert!( + odd_set[2] % 2 == 1, + "odd set non-overlap value should be odd (stable)" + ); + assert!( + even_set[2] % 2 == 0, + "even set non-overlap value should be even (stable)" + ); + + let odd_set: Vec = + RandomOverlapQuorumIterator::new(seed, 1, 20, 5, 10, 2, 3).collect(); + let even_set: Vec = + RandomOverlapQuorumIterator::new(seed, 2, 20, 5, 10, 2, 3).collect(); + + assert!( + odd_set[3] % 2 == 1, + "odd set non-overlap value should be odd (random overlap)" + ); + assert!( + even_set[3] % 2 == 0, + "even set non-overlap value should be even (random overlap)" + ); + } + } + + #[test] + fn calc_num_slots_test() { + assert_eq!(calc_num_slots(5, true), 2); + assert_eq!(calc_num_slots(5, false), 3); + + assert_eq!(calc_num_slots(6, true), 3); + assert_eq!(calc_num_slots(6, false), 3); + } +} diff --git a/hotshot/src/traits/election.rs b/hotshot/src/traits/election/mod.rs similarity index 79% rename from hotshot/src/traits/election.rs rename to hotshot/src/traits/election/mod.rs index 4f9212705f..914b9bbb33 100644 --- a/hotshot/src/traits/election.rs +++ b/hotshot/src/traits/election/mod.rs @@ -8,7 +8,15 @@ /// leader completely randomized every view pub mod randomized_committee; + +/// quorum randomized every view, with configurable overlap +pub mod randomized_committee_members; + /// static (round robin) committee election pub mod static_committee; + /// static (round robin leader for 2 consecutive views) committee election pub mod static_committee_leader_two_views; + +/// general helpers +pub mod helpers; diff --git a/hotshot/src/traits/election/randomized_committee.rs b/hotshot/src/traits/election/randomized_committee.rs index 2b721a66e0..4046123553 100644 --- a/hotshot/src/traits/election/randomized_committee.rs +++ b/hotshot/src/traits/election/randomized_committee.rs @@ -226,22 +226,22 @@ impl Membership for RandomizedCommittee { self.da_stake_table.len() } /// Get the voting success threshold for the committee - fn success_threshold(&self) -> NonZeroU64 { + fn success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self) -> NonZeroU64 { + fn da_success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self) -> NonZeroU64 { + fn failure_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self) -> NonZeroU64 { + fn upgrade_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(max( (self.stake_table.len() as u64 * 9) / 10, ((self.stake_table.len() as u64 * 2) / 3) + 1, diff --git a/hotshot/src/traits/election/randomized_committee_members.rs b/hotshot/src/traits/election/randomized_committee_members.rs new file mode 100644 index 0000000000..5c85ad9c07 --- /dev/null +++ b/hotshot/src/traits/election/randomized_committee_members.rs @@ -0,0 +1,353 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{ + cmp::max, + collections::{BTreeMap, BTreeSet}, + marker::PhantomData, + num::NonZeroU64, +}; + +use hotshot_types::{ + traits::{ + election::Membership, + node_implementation::{ConsensusTime, NodeType}, + signature_key::{SignatureKey, StakeTableEntryType}, + }, + PeerConfig, +}; +use primitive_types::U256; +use rand::{rngs::StdRng, Rng}; +use utils::anytrace::Result; + +use crate::traits::election::helpers::QuorumFilterConfig; + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +/// The static committee election +pub struct RandomizedCommitteeMembers { + /// The nodes eligible for leadership. + /// NOTE: This is currently a hack because the DA leader needs to be the quorum + /// leader but without voting rights. + eligible_leaders: Vec<::StakeTableEntry>, + + /// The nodes on the committee and their stake + stake_table: Vec<::StakeTableEntry>, + + /// The nodes on the da committee and their stake + da_stake_table: Vec<::StakeTableEntry>, + + /// The nodes on the committee and their stake, indexed by public key + indexed_stake_table: + BTreeMap::StakeTableEntry>, + + /// The nodes on the da committee and their stake, indexed by public key + indexed_da_stake_table: + BTreeMap::StakeTableEntry>, + + /// Phantom + _pd: PhantomData, +} + +impl RandomizedCommitteeMembers { + /// Creates a set of indices into the stake_table which reference the nodes selected for this epoch's committee + fn make_quorum_filter(&self, epoch: ::Epoch) -> BTreeSet { + CONFIG::execute(epoch.u64(), self.stake_table.len()) + } + + /// Creates a set of indices into the da_stake_table which reference the nodes selected for this epoch's da committee + fn make_da_quorum_filter(&self, epoch: ::Epoch) -> BTreeSet { + CONFIG::execute(epoch.u64(), self.da_stake_table.len()) + } +} + +impl Membership + for RandomizedCommitteeMembers +{ + type Error = utils::anytrace::Error; + + /// Create a new election + fn new( + committee_members: Vec::SignatureKey>>, + da_members: Vec::SignatureKey>>, + ) -> Self { + // For each eligible leader, get the stake table entry + let eligible_leaders: Vec<::StakeTableEntry> = + committee_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // For each member, get the stake table entry + let members: Vec<::StakeTableEntry> = + committee_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // For each da member, get the stake table entry + let da_members: Vec<::StakeTableEntry> = da_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // Index the stake table by public key + let indexed_stake_table: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = members + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + + // Index the stake table by public key + let indexed_da_stake_table: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = da_members + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + + Self { + eligible_leaders, + stake_table: members, + da_stake_table: da_members, + indexed_stake_table, + indexed_da_stake_table, + _pd: PhantomData, + } + } + + /// Get the stake table for the current view + fn stake_table( + &self, + epoch: ::Epoch, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + let filter = self.make_quorum_filter(epoch); + //self.stake_table.clone()s + self.stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| v.clone()) + .collect() + } + + /// Get the da stake table for the current view + fn da_stake_table( + &self, + epoch: ::Epoch, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + let filter = self.make_da_quorum_filter(epoch); + //self.stake_table.clone()s + self.da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| v.clone()) + .collect() + } + + /// Get all members of the committee for the current view + fn committee_members( + &self, + _view_number: ::View, + epoch: ::Epoch, + ) -> BTreeSet<::SignatureKey> { + let filter = self.make_quorum_filter(epoch); + self.stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect() + } + + /// Get all members of the committee for the current view + fn da_committee_members( + &self, + _view_number: ::View, + epoch: ::Epoch, + ) -> BTreeSet<::SignatureKey> { + let filter = self.make_da_quorum_filter(epoch); + self.da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect() + } + + /// Get all eligible leaders of the committee for the current view + fn committee_leaders( + &self, + view_number: ::View, + epoch: ::Epoch, + ) -> BTreeSet<::SignatureKey> { + self.committee_members(view_number, epoch) + } + + /// Get the stake table entry for a public key + fn stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> Option<::StakeTableEntry> { + let filter = self.make_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); + + if actual_members.contains(pub_key) { + // Only return the stake if it is above zero + self.indexed_stake_table.get(pub_key).cloned() + } else { + // Skip members which aren't included based on the quorum filter + None + } + } + + /// Get the da stake table entry for a public key + fn da_stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> Option<::StakeTableEntry> { + let filter = self.make_da_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); + + if actual_members.contains(pub_key) { + // Only return the stake if it is above zero + self.indexed_da_stake_table.get(pub_key).cloned() + } else { + // Skip members which aren't included based on the quorum filter + None + } + } + + /// Check if a node has stake in the committee + fn has_stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> bool { + let filter = self.make_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); + + if actual_members.contains(pub_key) { + self.indexed_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } else { + // Skip members which aren't included based on the quorum filter + false + } + } + + /// Check if a node has stake in the committee + fn has_da_stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> bool { + let filter = self.make_da_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); + + if actual_members.contains(pub_key) { + self.indexed_da_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } else { + // Skip members which aren't included based on the quorum filter + false + } + } + + /// Index the vector of public keys with the current view number + fn lookup_leader( + &self, + view_number: TYPES::View, + epoch: ::Epoch, + ) -> Result { + let filter = self.make_quorum_filter(epoch); + let leader_vec: Vec<_> = self + .stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| v.clone()) + .collect(); + + let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); + + let randomized_view_number: u64 = rng.gen_range(0..=u64::MAX); + #[allow(clippy::cast_possible_truncation)] + let index = randomized_view_number as usize % leader_vec.len(); + + let res = leader_vec[index].clone(); + + Ok(TYPES::SignatureKey::public_key(&res)) + } + + /// Get the total number of nodes in the committee + fn total_nodes(&self, epoch: ::Epoch) -> usize { + self.make_quorum_filter(epoch).len() + } + + /// Get the total number of nodes in the committee + fn da_total_nodes(&self, epoch: ::Epoch) -> usize { + self.make_da_quorum_filter(epoch).len() + } + + /// Get the voting success threshold for the committee + fn success_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + let len = self.total_nodes(epoch); + NonZeroU64::new(((len as u64 * 2) / 3) + 1).unwrap() + } + + /// Get the voting success threshold for the committee + fn da_success_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + let len = self.da_total_nodes(epoch); + NonZeroU64::new(((len as u64 * 2) / 3) + 1).unwrap() + } + + /// Get the voting failure threshold for the committee + fn failure_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + let len = self.total_nodes(epoch); + NonZeroU64::new(((len as u64) / 3) + 1).unwrap() + } + + /// Get the voting upgrade threshold for the committee + fn upgrade_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + let len = self.total_nodes(epoch); + NonZeroU64::new(max((len as u64 * 9) / 10, ((len as u64 * 2) / 3) + 1)).unwrap() + } +} diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index fa904c66cf..d2b62f80b7 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -215,26 +215,23 @@ impl Membership for StaticCommittee { } /// Get the voting success threshold for the committee - fn success_threshold(&self) -> NonZeroU64 { + fn success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self) -> NonZeroU64 { + fn da_success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self) -> NonZeroU64 { + fn failure_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(max( - (self.stake_table.len() as u64 * 9) / 10, - ((self.stake_table.len() as u64 * 2) / 3) + 1, - )) - .unwrap() + fn upgrade_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + let len = self.stake_table.len(); + NonZeroU64::new(max((len as u64 * 9) / 10, ((len as u64 * 2) / 3) + 1)).unwrap() } } diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 41ed1d046e..8833d06872 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -217,22 +217,22 @@ impl Membership for StaticCommitteeLeaderForTwoViews NonZeroU64 { + fn success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self) -> NonZeroU64 { + fn da_success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self) -> NonZeroU64 { + fn failure_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self) -> NonZeroU64 { + fn upgrade_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 9) / 10) + 1).unwrap() } } diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index d48f6f5f79..5925a85eff 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -88,7 +88,7 @@ struct MemoryNetworkInner { /// This provides an in memory simulation of a networking implementation, allowing nodes running on /// the same machine to mock networking while testing other functionality. /// -/// Under the hood, this simply maintains mpmc channels to every other `MemoryNetwork` insane of the +/// Under the hood, this simply maintains mpmc channels to every other `MemoryNetwork` instance of the /// same group. #[derive(Clone)] pub struct MemoryNetwork { @@ -297,22 +297,53 @@ impl ConnectedNetwork for MemoryNetwork { &self, message: Vec, recipients: Vec, - broadcast_delay: BroadcastDelay, + _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - // Iterate over all topics, compare to recipients, and get the `Topic` - let topic = self + trace!(?message, "Broadcasting message to DA"); + for node in self .inner .master_map .subscribed_map + .entry(Topic::Da) + .or_default() .iter() - .find(|v| v.value().iter().all(|(k, _)| recipients.contains(k))) - .map(|v| v.key().clone()) - .ok_or(NetworkError::MessageSendError( - "no topic found for recipients".to_string(), - ))?; - - self.broadcast_message(message, topic, broadcast_delay) - .await + { + if !recipients.contains(&node.0) { + tracing::error!("Skipping node because not in recipient list: {:?}", &node.0); + continue; + } + // TODO delay/drop etc here + let (key, node) = node; + trace!(?key, "Sending message to node"); + if let Some(ref config) = &self.inner.reliability_config { + { + let node2 = node.clone(); + let fut = config.chaos_send_msg( + message.clone(), + Arc::new(move |msg: Vec| { + let node3 = (node2).clone(); + boxed_sync(async move { + let _res = node3.input(msg).await; + // NOTE we're dropping metrics here but this is only for testing + // purposes. I think that should be okay + }) + }), + ); + spawn(fut); + } + } else { + let res = node.input(message.clone()).await; + match res { + Ok(()) => { + trace!(?key, "Delivered message to remote"); + } + Err(e) => { + warn!(?e, ?key, "Error sending broadcast message to node"); + } + } + } + } + Ok(()) } #[instrument(name = "MemoryNetwork::direct_message")] diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 7b1fd5a424..9ea46b34d7 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -184,7 +184,6 @@ impl + 'static, V: Versions> .ok_or(anyhow!("Event dependency failed to get event"))?; // Then, if it's `Some`, make sure that the data is correct - if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() { // Make sure that the quorum_proposal is valid diff --git a/macros/src/lib.rs b/macros/src/lib.rs index e1920bf4fa..80f49e1d6a 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -11,25 +11,41 @@ use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; use syn::{ parse::{Parse, ParseStream, Result}, - parse_macro_input, Expr, ExprArray, ExprPath, ExprTuple, Ident, LitBool, Token, + parse_macro_input, + punctuated::Punctuated, + Expr, ExprArray, ExprPath, ExprTuple, Ident, LitBool, PathArguments, Token, TypePath, }; +/// Bracketed types, e.g. [A, B, C] +/// These types can have generic parameters, whereas ExprArray items must be Expr. +#[derive(derive_builder::Builder, Debug, Clone)] +struct TypePathBracketedArray { + /// elems + pub elems: Punctuated, +} + /// description of a crosstest #[derive(derive_builder::Builder, Debug, Clone)] struct CrossTestData { /// imlementations impls: ExprArray, + /// builder impl #[builder(default = "syn::parse_str(\"[SimpleBuilderImplementation]\").unwrap()")] builder_impls: ExprArray, + /// versions versions: ExprArray, + /// types - types: ExprArray, + types: TypePathBracketedArray, + /// name of the test test_name: Ident, + /// test description/spec metadata: Expr, + /// whether or not to ignore ignore: LitBool, } @@ -51,17 +67,23 @@ impl CrossTestDataBuilder { #[derive(derive_builder::Builder, Debug, Clone)] struct TestData { /// type - ty: ExprPath, + ty: TypePath, + /// impl imply: ExprPath, + /// builder implementation builder_impl: ExprPath, + /// impl version: ExprPath, + /// name of test test_name: Ident, + /// test description metadata: Expr, + /// whether or not to ignore the test ignore: LitBool, } @@ -86,6 +108,58 @@ impl ToLowerSnakeStr for ExprPath { } } +impl ToLowerSnakeStr for syn::GenericArgument { + /// allow panic because this is a compiler error + #[allow(clippy::panic)] + fn to_lower_snake_str(&self) -> String { + match self { + syn::GenericArgument::Lifetime(l) => l.ident.to_string().to_lowercase(), + syn::GenericArgument::Type(t) => match t { + syn::Type::Path(p) => p.to_lower_snake_str(), + _ => { + panic!("Unexpected type for GenericArgument::Type: {t:?}"); + } + }, + syn::GenericArgument::Const(c) => match c { + syn::Expr::Lit(l) => match &l.lit { + syn::Lit::Str(v) => format!("{}_", v.value().to_lowercase()), + syn::Lit::Int(v) => format!("{}_", v.base10_digits()), + _ => { + panic!("Unexpected type for GenericArgument::Const::Lit: {l:?}"); + } + }, + _ => { + panic!("Unexpected type for GenericArgument::Const: {c:?}"); + } + }, + _ => { + panic!("Unexpected type for GenericArgument: {self:?}"); + } + } + } +} + +impl ToLowerSnakeStr for TypePath { + fn to_lower_snake_str(&self) -> String { + self.path + .segments + .iter() + .fold(String::new(), |mut acc, s| { + acc.push_str(&s.ident.to_string().to_lowercase()); + if let PathArguments::AngleBracketed(a) = &s.arguments { + acc.push('_'); + for arg in &a.args { + acc.push_str(&arg.to_lower_snake_str()); + } + } + + acc.push('_'); + acc + }) + .to_lowercase() + } +} + impl ToLowerSnakeStr for ExprTuple { /// allow panic because this is a compiler error #[allow(clippy::panic)] @@ -149,6 +223,28 @@ mod keywords { syn::custom_keyword!(Versions); } +impl Parse for TypePathBracketedArray { + /// allow panic because this is a compiler error + #[allow(clippy::panic)] + fn parse(input: ParseStream<'_>) -> Result { + let content; + syn::bracketed!(content in input); + let mut elems = Punctuated::new(); + + while !content.is_empty() { + let first: TypePath = content.parse()?; + elems.push_value(first); + if content.is_empty() { + break; + } + let punct = content.parse()?; + elems.push_punct(punct); + } + + Ok(Self { elems }) + } +} + impl Parse for CrossTestData { /// allow panic because this is a compiler error #[allow(clippy::panic)] @@ -159,7 +255,7 @@ impl Parse for CrossTestData { if input.peek(keywords::Types) { let _ = input.parse::()?; input.parse::()?; - let types = input.parse::()?; + let types = input.parse::()?; //ExprArray>()?; description.types(types); } else if input.peek(keywords::Impls) { let _ = input.parse::()?; @@ -216,13 +312,8 @@ fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { }; p }); - // - let types = test_spec.types.elems.iter().map(|t| { - let Expr::Path(p) = t else { - panic!("Expected Path for Type! Got {t:?}"); - }; - p - }); + + let types = test_spec.types.elems.iter(); let versions = test_spec.versions.elems.iter().map(|t| { let Expr::Path(p) = t else { diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 6dab4f938c..27fc0a7b43 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -286,7 +286,10 @@ pub(crate) async fn handle_timeout task_state .membership .has_stake(&task_state.public_key, task_state.cur_epoch), - debug!("We were not chosen for the consensus committee for view {view_number:?}") + debug!( + "We were not chosen for the consensus committee for view {:?}", + view_number + ) ); let vote = TimeoutVote::create_signed_vote( diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 68df39136f..2c503d2b13 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -120,7 +120,6 @@ impl, V: Versions> DaTaskState( if !justify_qc .is_valid_cert( quorum_membership.stake_table(cur_epoch), - quorum_membership.success_threshold(), + quorum_membership.success_threshold(cur_epoch), upgrade_lock, ) .await @@ -686,7 +686,9 @@ pub(crate) async fn validate_proposal_view_and_certs< validation_info .quorum_membership .stake_table(validation_info.cur_epoch), - validation_info.quorum_membership.success_threshold(), + validation_info + .quorum_membership + .success_threshold(validation_info.cur_epoch), &validation_info.upgrade_lock ) .await, @@ -709,7 +711,9 @@ pub(crate) async fn validate_proposal_view_and_certs< validation_info .quorum_membership .stake_table(validation_info.cur_epoch), - validation_info.quorum_membership.success_threshold(), + validation_info + .quorum_membership + .success_threshold(validation_info.cur_epoch), &validation_info.upgrade_lock ) .await, diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 55f283bcd8..b1cc8e36fa 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -128,7 +128,8 @@ impl ProposalDependencyHandle { // TODO take epoch from `qc` // https://github.com/EspressoSystems/HotShot/issues/3917 self.quorum_membership.stake_table(TYPES::Epoch::new(0)), - self.quorum_membership.success_threshold(), + self.quorum_membership + .success_threshold(TYPES::Epoch::new(0)), &self.upgrade_lock, ) .await diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 034db12812..06150dff97 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -444,7 +444,7 @@ impl, V: Versions> certificate .is_valid_cert( self.quorum_membership.stake_table(epoch_number), - self.quorum_membership.success_threshold(), + self.quorum_membership.success_threshold(epoch_number), &self.upgrade_lock ) .await, @@ -508,11 +508,11 @@ impl, V: Versions> ensure!( qc.is_valid_cert( self.quorum_membership.stake_table(epoch_number), - self.quorum_membership.success_threshold(), + self.quorum_membership.success_threshold(epoch_number), &self.upgrade_lock ) .await, - warn!("Qurom certificate {:?} was invalid", qc.data()) + warn!("Quorum certificate {:?} was invalid", qc.data()) ); self.highest_qc = qc.clone(); } diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 73f6addef0..3d5c010058 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -157,7 +157,9 @@ pub(crate) async fn handle_quorum_proposal_recv< validation_info .quorum_membership .stake_table(validation_info.cur_epoch), - validation_info.quorum_membership.success_threshold(), + validation_info + .quorum_membership + .success_threshold(validation_info.cur_epoch), &validation_info.upgrade_lock, ) .await diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index a89e27a84c..4162969397 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -497,7 +497,7 @@ impl, V: Versions> QuorumVoteTaskS ensure!( cert.is_valid_cert( self.membership.da_stake_table(cur_epoch), - self.membership.da_success_threshold(), + self.membership.da_success_threshold(cur_epoch), &self.upgrade_lock ) .await, diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index efb87f9cd5..6bb83dfc53 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -535,7 +535,7 @@ impl ViewSyncReplicaTaskState { if !certificate .is_valid_cert( self.membership.stake_table(self.cur_epoch), - self.membership.failure_threshold(), + self.membership.failure_threshold(self.cur_epoch), &self.upgrade_lock, ) .await @@ -621,7 +621,7 @@ impl ViewSyncReplicaTaskState { if !certificate .is_valid_cert( self.membership.stake_table(self.cur_epoch), - self.membership.success_threshold(), + self.membership.success_threshold(self.cur_epoch), &self.upgrade_lock, ) .await @@ -718,7 +718,7 @@ impl ViewSyncReplicaTaskState { if !certificate .is_valid_cert( self.membership.stake_table(self.cur_epoch), - self.membership.success_threshold(), + self.membership.success_threshold(self.cur_epoch), &self.upgrade_lock, ) .await diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 6759d17e69..cc468a1859 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -140,7 +140,7 @@ pub async fn build_cert< CERT: Certificate, >( data: DATAType, - membership: &TYPES::Membership, + da_membership: &TYPES::Membership, view: TYPES::View, epoch: TYPES::Epoch, public_key: &TYPES::SignatureKey, @@ -149,7 +149,7 @@ pub async fn build_cert< ) -> CERT { let real_qc_sig = build_assembled_sig::( &data, - membership, + da_membership, view, epoch, upgrade_lock, @@ -215,7 +215,7 @@ pub async fn build_assembled_sig< let real_qc_pp: ::QcParams = ::public_parameter( stake_table.clone(), - U256::from(CERT::threshold(membership)), + U256::from(CERT::threshold(membership, epoch)), ); let total_nodes = stake_table.len(); let signers = bitvec![1; total_nodes]; diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 414e6e0f4b..ffee9b39e5 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -308,7 +308,7 @@ where for node in &mut *nodes { node.handle.shut_down().await; } - tracing::info!("Nodes shtudown"); + tracing::info!("Nodes shutdown"); completion_handle.abort(); diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index 5b0a6cf5c2..fc29b1c01d 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -12,7 +12,7 @@ use std::{ use hotshot_builder_api::v0_1::block_info::AvailableBlockData; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, - node_types::{TestTypes, TestVersions}, + node_types::TestTypes, }; use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; use hotshot_testing::block_builder::{ @@ -21,14 +21,13 @@ use hotshot_testing::block_builder::{ use hotshot_types::{ network::RandomBuilderConfig, traits::{ - block_contents::vid_commitment, - node_implementation::{NodeType, Versions}, - signature_key::SignatureKey, + block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, BlockPayload, }, }; use tide_disco::Url; use tokio::time::sleep; +use vbs::version::StaticVersion; #[cfg(test)] #[tokio::test(flavor = "multi_thread")] @@ -50,8 +49,7 @@ async fn test_random_block_builder() { let builder_started = Instant::now(); - let client: BuilderClient::Base> = - BuilderClient::new(api_url); + let client: BuilderClient> = BuilderClient::new(api_url); assert!(client.connect(Duration::from_millis(100)).await); let (pub_key, private_key) = diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index 3778aa4737..9536cf0f22 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -104,7 +104,7 @@ async fn test_certificate2_validity() { assert!( qc.is_valid_cert( membership.stake_table(EpochNumber::new(0)), - membership.success_threshold(), + membership.success_threshold(EpochNumber::new(0)), &handle.hotshot.upgrade_lock ) .await @@ -113,7 +113,7 @@ async fn test_certificate2_validity() { assert!( qc2.is_valid_cert( membership.stake_table(EpochNumber::new(0)), - membership.success_threshold(), + membership.success_threshold(EpochNumber::new(0)), &handle.hotshot.upgrade_lock ) .await diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index e81060aedb..982b7018f6 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -41,6 +41,25 @@ cross_tests!( }, ); +// cross_tests!( +// TestName: test_epoch_success, +// Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], +// Types: [TestTypes, TestTypesRandomizedLeader, TestTypesRandomizedCommitteeMembers>, TestTypesRandomizedCommitteeMembers>], +// Versions: [EpochsTestVersions], +// Ignore: false, +// Metadata: { +// TestDescription { +// // allow more time to pass in CI +// completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( +// TimeBasedCompletionTaskDescription { +// duration: Duration::from_secs(60), +// }, +// ), +// ..TestDescription::default() +// } +// }, +// ); + cross_tests!( TestName: test_success_with_async_delay, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 271d5b0729..f2cc8cd689 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -24,7 +24,7 @@ use crate::{ data::serialize_signature2, message::UpgradeLock, simple_vote::{ - DaData, QuorumData, QuorumData2, QuorumMaker, TimeoutData, UpgradeProposalData, + DaData, QuorumData, QuorumData2, QuorumMarker, TimeoutData, UpgradeProposalData, VersionedVoteData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, Voteable, }, @@ -39,7 +39,10 @@ use crate::{ /// Trait which allows use to inject different threshold calculations into a Certificate type pub trait Threshold { /// Calculate a threshold based on the membership - fn threshold>(membership: &MEMBERSHIP) -> u64; + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64; } /// Defines a threshold which is 2f + 1 (Amount needed for Quorum) @@ -47,8 +50,11 @@ pub trait Threshold { pub struct SuccessThreshold {} impl Threshold for SuccessThreshold { - fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.success_threshold().into() + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64 { + membership.success_threshold(epoch).into() } } @@ -57,8 +63,11 @@ impl Threshold for SuccessThreshold { pub struct OneHonestThreshold {} impl Threshold for OneHonestThreshold { - fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.failure_threshold().into() + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64 { + membership.failure_threshold(epoch).into() } } @@ -67,8 +76,11 @@ impl Threshold for OneHonestThreshold { pub struct UpgradeThreshold {} impl Threshold for UpgradeThreshold { - fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.upgrade_threshold().into() + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64 { + membership.upgrade_threshold(epoch).into() } } @@ -192,8 +204,11 @@ impl> Certificate ) -> usize { membership.da_total_nodes(epoch) } - fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.da_success_threshold().into() + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64 { + membership.da_success_threshold(epoch).into() } fn data(&self) -> &Self::Voteable { &self.data @@ -210,7 +225,7 @@ impl> Certificate } } -impl> +impl> Certificate for SimpleCertificate { type Voteable = VOTEABLE; @@ -254,8 +269,11 @@ impl>(membership: &MEMBERSHIP) -> u64 { - THRESHOLD::threshold(membership) + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64 { + THRESHOLD::threshold(membership, epoch) } fn stake_table_entry>( @@ -345,7 +363,7 @@ impl UpgradeCertificate { ensure!( cert.is_valid_cert( quorum_membership.stake_table(epoch), - quorum_membership.upgrade_threshold(), + quorum_membership.upgrade_threshold(epoch), upgrade_lock ) .await, diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 35997d3725..138f730fb9 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -25,7 +25,7 @@ use crate::{ }; /// Marker that data should use the quorum cert type -pub(crate) trait QuorumMaker {} +pub(crate) trait QuorumMarker {} #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a yes vote. @@ -117,13 +117,13 @@ mod sealed { impl Sealed for C {} } -impl QuorumMaker for QuorumData {} -impl QuorumMaker for QuorumData2 {} -impl QuorumMaker for TimeoutData {} -impl QuorumMaker for ViewSyncPreCommitData {} -impl QuorumMaker for ViewSyncCommitData {} -impl QuorumMaker for ViewSyncFinalizeData {} -impl QuorumMaker for UpgradeProposalData {} +impl QuorumMarker for QuorumData {} +impl QuorumMarker for QuorumData2 {} +impl QuorumMarker for TimeoutData {} +impl QuorumMarker for ViewSyncPreCommitData {} +impl QuorumMarker for ViewSyncCommitData {} +impl QuorumMarker for ViewSyncFinalizeData {} +impl QuorumMarker for UpgradeProposalData {} /// A simple yes vote over some votable type. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 04aa76ccb4..5b72ea4f84 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -115,14 +115,14 @@ pub trait Membership: Clone + Debug + Send + Sync { fn da_total_nodes(&self, epoch: TYPES::Epoch) -> usize; /// Returns the threshold for a specific `Membership` implementation - fn success_threshold(&self) -> NonZeroU64; + fn success_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; /// Returns the DA threshold for a specific `Membership` implementation - fn da_success_threshold(&self) -> NonZeroU64; + fn da_success_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; /// Returns the threshold for a specific `Membership` implementation - fn failure_threshold(&self) -> NonZeroU64; + fn failure_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; /// Returns the threshold required to upgrade the network protocol - fn upgrade_threshold(&self) -> NonZeroU64; + fn upgrade_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; } diff --git a/types/src/traits/metrics.rs b/types/src/traits/metrics.rs index 38d9a8d6ea..cd29c75da5 100644 --- a/types/src/traits/metrics.rs +++ b/types/src/traits/metrics.rs @@ -212,13 +212,14 @@ pub trait Counter: Send + Sync + Debug + DynClone { /// Add a value to the counter fn add(&self, amount: usize); } + /// A gauge that stores the latest value. pub trait Gauge: Send + Sync + Debug + DynClone { /// Set the gauge value fn set(&self, amount: usize); /// Update the gauge value - fn update(&self, delts: i64); + fn update(&self, delta: i64); } /// A histogram which will record a series of points. diff --git a/types/src/vote.rs b/types/src/vote.rs index bdff9d4bb5..13112afa12 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -81,7 +81,10 @@ pub trait Certificate: HasViewNumber { ) -> impl std::future::Future; /// Returns the amount of stake needed to create this certificate // TODO: Make this a static ratio of the total stake of `Membership` - fn threshold>(membership: &MEMBERSHIP) -> u64; + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64; /// Get Stake Table from Membership implementation. fn stake_table>( @@ -220,12 +223,12 @@ impl< *total_stake_casted += stake_table_entry.stake(); total_vote_map.insert(key, (vote.signature(), vote_commitment)); - if *total_stake_casted >= CERT::threshold(membership).into() { + if *total_stake_casted >= CERT::threshold(membership, epoch).into() { // Assemble QC let real_qc_pp: <::SignatureKey as SignatureKey>::QcParams = ::public_parameter( stake_table, - U256::from(CERT::threshold(membership)), + U256::from(CERT::threshold(membership, epoch)), ); let real_qc_sig = ::assemble( From 101623509cf2259f6215bbcd9a913cd39e624cda Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:37:46 -0500 Subject: [PATCH 1322/1393] [Libp2p] File-backed Kademlia store (#3934) * libp2p disk backed record store * serialize entire record * minor improvements --- libp2p-networking/Cargo.toml | 1 + .../src/network/behaviours/dht/mod.rs | 10 +- .../behaviours/dht/store/file_backed.rs | 426 ++++++++++++++++++ .../src/network/behaviours/dht/store/mod.rs | 2 + .../dht/{store.rs => store/validated.rs} | 3 +- libp2p-networking/src/network/def.rs | 13 +- libp2p-networking/src/network/node.rs | 15 +- libp2p-networking/src/network/node/config.rs | 4 + 8 files changed, 460 insertions(+), 14 deletions(-) create mode 100644 libp2p-networking/src/network/behaviours/dht/store/file_backed.rs create mode 100644 libp2p-networking/src/network/behaviours/dht/store/mod.rs rename libp2p-networking/src/network/behaviours/dht/{store.rs => store/validated.rs} (98%) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index edabb02097..98f1449508 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -33,6 +33,7 @@ rand = { workspace = true } serde = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } +tracing-subscriber = { workspace = true } [lints] workspace = true diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index 3e1bc95673..f785d1fa10 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -28,7 +28,7 @@ use libp2p::kad::{ store::RecordStore, Behaviour as KademliaBehaviour, BootstrapError, Event as KademliaEvent, }; use libp2p_identity::PeerId; -use store::ValidatedStore; +use store::{file_backed::FileBackedStore, validated::ValidatedStore}; use tokio::{spawn, sync::mpsc::UnboundedSender, time::sleep}; use tracing::{debug, error, warn}; @@ -143,7 +143,7 @@ impl DHTBehaviour { /// print out the routing table to stderr pub fn print_routing_table( &mut self, - kadem: &mut KademliaBehaviour>, + kadem: &mut KademliaBehaviour>>, ) { let mut err = format!("KBUCKETS: PID: {:?}, ", self.peer_id); let v = kadem.kbuckets().collect::>(); @@ -179,7 +179,7 @@ impl DHTBehaviour { factor: NonZeroUsize, backoff: ExponentialBackoff, retry_count: u8, - kad: &mut KademliaBehaviour>, + kad: &mut KademliaBehaviour>>, ) { // noop if retry_count == 0 { @@ -247,7 +247,7 @@ impl DHTBehaviour { /// update state based on recv-ed get query fn handle_get_query( &mut self, - store: &mut ValidatedStore, + store: &mut FileBackedStore>, record_results: GetRecordResult, id: QueryId, mut last: bool, @@ -405,7 +405,7 @@ impl DHTBehaviour { pub fn dht_handle_event( &mut self, event: KademliaEvent, - store: &mut ValidatedStore, + store: &mut FileBackedStore>, ) -> Option { match event { KademliaEvent::OutboundQueryProgressed { diff --git a/libp2p-networking/src/network/behaviours/dht/store/file_backed.rs b/libp2p-networking/src/network/behaviours/dht/store/file_backed.rs new file mode 100644 index 0000000000..ba442d5a9b --- /dev/null +++ b/libp2p-networking/src/network/behaviours/dht/store/file_backed.rs @@ -0,0 +1,426 @@ +//! This file contains the `FileBackedStore` struct, which is a wrapper around a `RecordStore` +//! that occasionally saves the DHT to a file on disk. + +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +use anyhow::Context; +use delegate::delegate; +use libp2p::kad::store::{RecordStore, Result}; +use serde::{Deserialize, Serialize}; +use tracing::{debug, warn}; + +/// A `RecordStore` wrapper that occasionally saves the DHT to a file on disk. +pub struct FileBackedStore { + /// The underlying store + underlying_store: R, + + /// The path to the file + path: String, + + /// The maximum number of records that can be added to the store before the store is saved to a file + max_record_delta: u64, + + /// The running delta between the records in the file and the records in the underlying store + record_delta: u64, +} + +/// A serializable version of a Libp2p `Record` +#[derive(Serialize, Deserialize)] +pub struct SerializableRecord { + /// The key of the record + pub key: libp2p::kad::RecordKey, + /// The value of the record + pub value: Vec, + /// The (original) publisher of the record. + pub publisher: Option, + /// The record expiration time in seconds since the Unix epoch + /// + /// This is an approximation of the expiration time because we can't + /// serialize an `Instant` directly. + pub expires_unix_secs: Option, +} + +/// Approximate an `Instant` to the number of seconds since the Unix epoch +fn instant_to_unix_seconds(instant: Instant) -> anyhow::Result { + // Get the current instant and system time + let now_instant = Instant::now(); + let now_system = SystemTime::now(); + + // Get the duration of time between the instant and now + if instant > now_instant { + Ok(now_system + .checked_add(instant - now_instant) + .with_context(|| "Overflow when approximating expiration time")? + .duration_since(UNIX_EPOCH) + .with_context(|| "Failed to get duration since Unix epoch")? + .as_secs()) + } else { + Ok(now_system + .checked_sub(now_instant - instant) + .with_context(|| "Underflow when approximating expiration time")? + .duration_since(UNIX_EPOCH) + .with_context(|| "Failed to get duration since Unix epoch")? + .as_secs()) + } +} + +/// Convert a unix-second timestamp to an `Instant` +fn unix_seconds_to_instant(unix_secs: u64) -> anyhow::Result { + // Get the current instant and unix time + let now_instant = Instant::now(); + let unix_secs_now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .with_context(|| "Failed to get duration since Unix epoch")? + .as_secs(); + + if unix_secs > unix_secs_now { + // If the instant is in the future, add the duration to the current time + now_instant + .checked_add(Duration::from_secs(unix_secs - unix_secs_now)) + .with_context(|| "Overflow when calculating future instant") + } else { + // If the instant is in the past, subtract the duration from the current time + now_instant + .checked_sub(Duration::from_secs(unix_secs_now - unix_secs)) + .with_context(|| "Underflow when calculating past instant") + } +} + +/// Allow conversion from a `libp2p::kad::Record` to a `SerializableRecord` +impl TryFrom for SerializableRecord { + type Error = anyhow::Error; + + fn try_from(record: libp2p::kad::Record) -> anyhow::Result { + Ok(SerializableRecord { + key: record.key, + value: record.value, + publisher: record.publisher, + expires_unix_secs: record.expires.map(instant_to_unix_seconds).transpose()?, + }) + } +} + +/// Allow conversion from a `SerializableRecord` to a `libp2p::kad::Record` +impl TryFrom for libp2p::kad::Record { + type Error = anyhow::Error; + + fn try_from(record: SerializableRecord) -> anyhow::Result { + Ok(libp2p::kad::Record { + key: record.key, + value: record.value, + publisher: record.publisher, + expires: record + .expires_unix_secs + .map(unix_seconds_to_instant) + .transpose()?, + }) + } +} + +impl FileBackedStore { + /// Create a new `FileBackedStore` with the given underlying store and path. + /// + /// `max_record_delta` is the maximum number of records that can be added to the store before + /// the store is saved to a file. + pub fn new(underlying_store: R, path: String, max_record_delta: u64) -> Self { + // Create the new store + let mut store = FileBackedStore { + underlying_store, + path: path.clone(), + max_record_delta, + record_delta: 0, + }; + + // Try to restore the DHT from a file. If it fails, warn and start with an empty store + if let Err(err) = store.restore_from_file(path) { + warn!( + "Failed to restore DHT from file: {:?}. Starting with empty store", + err + ); + } + + // Return the new store + store + } + + /// Attempt to save the DHT to the file at the given path + /// + /// # Errors + /// - If we fail to serialize the DHT + /// - If we fail to write the serialized DHT to the file + pub fn save_to_file(&mut self) -> anyhow::Result<()> { + debug!("Saving DHT to file"); + + // Get all records and convert them to their serializable counterparts + let serializable_records: Vec<_> = self + .underlying_store + .records() + .filter_map(|record| { + SerializableRecord::try_from(record.into_owned()) + .map_err(|err| { + warn!("Failed to convert record to serializable record: {:?}", err); + }) + .ok() + }) + .collect(); + + // Serialize the records + let contents = bincode::serialize(&serializable_records) + .with_context(|| "Failed to serialize records")?; + + // Write the contents to the file + std::fs::write(self.path.clone(), contents) + .with_context(|| "Failed to write DHT to file")?; + + debug!("Saved DHT to file"); + + Ok(()) + } + + /// Attempt to restore the DHT to the underlying store from the file at the given path + /// + /// # Errors + /// - If we fail to read the file + /// - If we fail to deserialize the file + pub fn restore_from_file(&mut self, path: String) -> anyhow::Result<()> { + debug!("Restoring DHT from file"); + + // Read the contents of the file as a `HashMap` of `Key` to `Vec` + let contents = std::fs::read(path).with_context(|| "Failed to read DHT file")?; + + // Convert the contents to a `HashMap` of `RecordKey` to `Vec` + let serializable_records: Vec = + bincode::deserialize(&contents).with_context(|| "Failed to parse DHT file")?; + + // Put all records into the new store + for serializable_record in serializable_records { + // Convert the serializable record back to a `libp2p::kad::Record` + match libp2p::kad::Record::try_from(serializable_record) { + Ok(record) => { + // Put the record into the new store + if let Err(err) = self.underlying_store.put(record) { + warn!("Failed to restore record from file: {:?}", err); + } + } + Err(err) => { + warn!("Failed to parse record from file: {:?}", err); + } + }; + } + + debug!("Restored DHT from file"); + + Ok(()) + } +} + +/// Implement the `RecordStore` trait for `FileBackedStore` +impl RecordStore for FileBackedStore { + type ProvidedIter<'a> + = R::ProvidedIter<'a> + where + R: 'a; + type RecordsIter<'a> + = R::RecordsIter<'a> + where + R: 'a; + + // Delegate all `RecordStore` methods except `put` to the inner store + delegate! { + to self.underlying_store { + fn add_provider(&mut self, record: libp2p::kad::ProviderRecord) -> libp2p::kad::store::Result<()>; + fn get(&self, k: &libp2p::kad::RecordKey) -> Option>; + fn provided(&self) -> Self::ProvidedIter<'_>; + fn providers(&self, key: &libp2p::kad::RecordKey) -> Vec; + fn records(&self) -> Self::RecordsIter<'_>; + fn remove_provider(&mut self, k: &libp2p::kad::RecordKey, p: &libp2p::PeerId); + } + } + + /// Overwrite the `put` method to potentially save the record to a file + fn put(&mut self, record: libp2p::kad::Record) -> Result<()> { + // Try to write to the underlying store + let result = self.underlying_store.put(record); + + // If the record was successfully written, update the record delta + if result.is_ok() { + self.record_delta += 1; + + // If the record delta is greater than the maximum record delta, try to save the file + if self.record_delta > self.max_record_delta { + if let Err(e) = self.save_to_file() { + warn!("Failed to save DHT to file: {:?}", e); + } + } + } + + result + } + + /// Overwrite the `remove` method to potentially remove the record from a file + fn remove(&mut self, k: &libp2p::kad::RecordKey) { + // Remove the record from the underlying store + self.underlying_store.remove(k); + + // Update the record delta + self.record_delta += 1; + + // If the record delta is greater than 10, try to save the file + if self.record_delta > 10 { + if let Err(e) = self.save_to_file() { + warn!("Failed to save DHT to file: {:?}", e); + } + } + } +} + +#[cfg(test)] +mod tests { + use libp2p::{ + kad::{store::MemoryStore, RecordKey}, + PeerId, + }; + use tracing_subscriber::EnvFilter; + + use super::*; + + #[test] + fn test_save_and_restore() { + // Try initializing tracing + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + // Create a test store + let mut store = FileBackedStore::new( + MemoryStore::new(PeerId::random()), + "/tmp/test.dht".to_string(), + 10, + ); + + // The key is a random 16-byte array + let key = RecordKey::new(&rand::random::<[u8; 16]>().to_vec()); + + // The value is a random 16-byte array + let random_value = rand::random::<[u8; 16]>(); + + // Put a record into the store + store + .put(libp2p::kad::Record::new(key.clone(), random_value.to_vec())) + .expect("Failed to put record into store"); + + // Save the store to a file + store.save_to_file().expect("Failed to save store to file"); + + // Create a new store from the file + let new_store = FileBackedStore::new( + MemoryStore::new(PeerId::random()), + "/tmp/test.dht".to_string(), + 10, + ); + + // Check that the new store has the record + let restored_record = new_store + .get(&key) + .expect("Failed to get record from store"); + + // Check that the restored record has the same value as the original record + assert_eq!(restored_record.value, random_value.to_vec()); + } + + #[test] + fn test_record_delta() { + // Try initializing tracing + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + // Create a test store + let mut store = FileBackedStore::new( + MemoryStore::new(PeerId::random()), + "/tmp/test.dht".to_string(), + 10, + ); + + let mut keys = Vec::new(); + let mut values = Vec::new(); + + // Put 10 records into the store + for _ in 0..10 { + // Create a random key and value + let key = RecordKey::new(&rand::random::<[u8; 16]>().to_vec()); + let value = rand::random::<[u8; 16]>(); + + keys.push(key.clone()); + values.push(value); + + store + .put(libp2p::kad::Record::new(key, value.to_vec())) + .expect("Failed to put record into store"); + } + + // Create a new store from the allegedly unsaved file + let new_store = FileBackedStore::new( + MemoryStore::new(PeerId::random()), + "/tmp/test.dht".to_string(), + 10, + ); + + // Check that the new store has none of the records + for key in &keys { + assert!(new_store.get(key).is_none()); + } + + // Store one more record into the new store + store + .put(libp2p::kad::Record::new( + keys[0].clone(), + values[0].to_vec(), + )) + .expect("Failed to put record into store"); + + // Create a new store from the allegedly saved file + let new_store = FileBackedStore::new( + MemoryStore::new(PeerId::random()), + "/tmp/test.dht".to_string(), + 10, + ); + + // Check that the new store has all of the records + for (i, key) in keys.iter().enumerate() { + let restored_record = new_store.get(key).expect("Failed to get record from store"); + assert_eq!(restored_record.value, values[i]); + } + + // Check that the record delta is 0 + assert_eq!(new_store.record_delta, 0); + } + + #[test] + fn test_approximate_instant() { + // Create an expiry time in the future + let expiry_future = Instant::now() + Duration::from_secs(10); + + // Approximate the expiry time + let approximate_expiry = + unix_seconds_to_instant(instant_to_unix_seconds(expiry_future).unwrap()) + .unwrap() + .duration_since(Instant::now()); + + // Make sure it's close to 10 seconds in the future + assert!(approximate_expiry >= Duration::from_secs(9)); + assert!(approximate_expiry <= Duration::from_secs(11)); + + // Create an expiry time in the past + let expiry_past = Instant::now().checked_sub(Duration::from_secs(10)).unwrap(); + + // Approximate the expiry time + let approximate_expiry = + unix_seconds_to_instant(instant_to_unix_seconds(expiry_past).unwrap()).unwrap(); + let time_difference = approximate_expiry.elapsed(); + + // Make sure it's close to 10 seconds in the past + assert!(time_difference >= Duration::from_secs(9)); + assert!(time_difference <= Duration::from_secs(11)); + } +} diff --git a/libp2p-networking/src/network/behaviours/dht/store/mod.rs b/libp2p-networking/src/network/behaviours/dht/store/mod.rs new file mode 100644 index 0000000000..d9f42b8b1c --- /dev/null +++ b/libp2p-networking/src/network/behaviours/dht/store/mod.rs @@ -0,0 +1,2 @@ +pub mod file_backed; +pub mod validated; diff --git a/libp2p-networking/src/network/behaviours/dht/store.rs b/libp2p-networking/src/network/behaviours/dht/store/validated.rs similarity index 98% rename from libp2p-networking/src/network/behaviours/dht/store.rs rename to libp2p-networking/src/network/behaviours/dht/store/validated.rs index cf5c22d61e..4a65f33b21 100644 --- a/libp2p-networking/src/network/behaviours/dht/store.rs +++ b/libp2p-networking/src/network/behaviours/dht/store/validated.rs @@ -10,8 +10,7 @@ use hotshot_types::traits::signature_key::SignatureKey; use libp2p::kad::store::{Error, RecordStore, Result}; use tracing::warn; -use super::record::RecordValue; -use crate::network::behaviours::dht::record::RecordKey; +use crate::network::behaviours::dht::record::{RecordKey, RecordValue}; /// A `RecordStore` wrapper that validates records before storing them. pub struct ValidatedStore { diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index 3f5189e3ac..a52fdae36c 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -17,7 +17,10 @@ use libp2p_identity::PeerId; use libp2p_swarm_derive::NetworkBehaviour; use tracing::{debug, error}; -use super::{behaviours::dht::store::ValidatedStore, cbor, NetworkEventInternal}; +use super::{ + behaviours::dht::store::{file_backed::FileBackedStore, validated::ValidatedStore}, + cbor, NetworkEventInternal, +}; /// Overarching network behaviour performing: /// - network topology discovery @@ -34,10 +37,10 @@ pub struct NetworkDef { #[debug(skip)] gossipsub: GossipBehaviour, - /// purpose: peer routing - /// purpose: storing pub key <-> peer id bijection + /// The DHT store. We use a `FileBackedStore` to occasionally save the DHT to + /// a file on disk and a `ValidatedStore` to validate the records stored. #[debug(skip)] - pub dht: libp2p::kad::Behaviour>, + pub dht: libp2p::kad::Behaviour>>, /// purpose: identifying the addresses from an outside POV #[debug(skip)] @@ -58,7 +61,7 @@ impl NetworkDef { #[must_use] pub fn new( gossipsub: GossipBehaviour, - dht: libp2p::kad::Behaviour>, + dht: libp2p::kad::Behaviour>>, identify: IdentifyBehaviour, direct_message: super::cbor::Behaviour, Vec>, autonat: autonat::Behaviour, diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 05bacb310d..1ea25a36d0 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -59,7 +59,7 @@ pub use self::{ use super::{ behaviours::dht::{ bootstrap::{DHTBootstrapTask, InputEvent}, - store::ValidatedStore, + store::{file_backed::FileBackedStore, validated::ValidatedStore}, }, cbor::Cbor, gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkError, NetworkEvent, @@ -253,9 +253,20 @@ impl NetworkNode { panic!("Replication factor not set"); } + // Extract the DHT file path from the config, defaulting to `libp2p_dht.json` + let dht_file_path = config + .dht_file_path + .clone() + .unwrap_or_else(|| "libp2p_dht.bin".into()); + + // Create the DHT behaviour let mut kadem = Behaviour::with_config( peer_id, - ValidatedStore::new(MemoryStore::new(peer_id)), + FileBackedStore::new( + ValidatedStore::new(MemoryStore::new(peer_id)), + dht_file_path, + 10, + ), kconfig, ); kadem.set_mode(Some(Mode::Server)); diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index d459ae0217..1f5422e321 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -51,6 +51,10 @@ pub struct NetworkNodeConfig { #[builder(default)] pub stake_table: Option, + /// The path to the file to save the DHT to + #[builder(default)] + pub dht_file_path: Option, + /// The signed authentication message sent to the remote peer /// If not supplied we will not send an authentication message during the handshake #[builder(default)] From c4009237c9e649b0332b08e2e360d74e87d4875a Mon Sep 17 00:00:00 2001 From: Dmytrol <46675332+Dimitrolito@users.noreply.github.com> Date: Thu, 5 Dec 2024 01:23:05 +0200 Subject: [PATCH 1323/1393] Fix Typos in Documentation (#3944) * typos README.md * typos README.md --- examples/push-cdn/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/push-cdn/README.md b/examples/push-cdn/README.md index c460beb89a..a20e46f4ce 100644 --- a/examples/push-cdn/README.md +++ b/examples/push-cdn/README.md @@ -51,7 +51,7 @@ sleep 1m just example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 ``` -Where ones using `example_gpuvid_leader` could be the leader and should be running on a nvidia GPU, and other validators using `example_fixed_leader` will never be a leader. In practice, these url should be changed to the corresponding ip and port. +Where ones using `example_gpuvid_leader` could be the leader and should be running on an nvidia GPU, and other validators using `example_fixed_leader` will never be a leader. In practice, these url should be changed to the corresponding ip and port. If you don't have a gpu but want to test out fixed leader, you can run: @@ -65,4 +65,4 @@ sleep 1m just example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 ``` -Remember, you have to run leaders first, then other validators, so that leaders will have lower index. \ No newline at end of file +Remember, you have to run leaders first, then other validators, so that leaders will have lower index. From 78fcfca5e0c8117d106cecee55ffe9d5d981e0e5 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Wed, 4 Dec 2024 15:44:37 -0800 Subject: [PATCH 1324/1393] #3933 DrbComputations helper for quorum_vote (#3941) * 3933 DrbComputations helper for quorum_vote * 3933 modify logic of drb computation to start at epoch transition * 3933 check for if we should store the seed based on when we're expecting to be in the committee * 3933 always start the task for drb * 3933 fix clippy lints * 3933 avoid unwrapping leaf_views.last() * 3933 additional fixes * 3933 correct nits, use proposal block number to calculate epoch number * 3933 break drb_calculation_start and drb_calculation_seed into separate functions * 3933 fix comment --- hotshot/src/tasks/task_state.rs | 17 +- .../src/quorum_vote/drb_computations.rs | 126 ++++++++++++++ task-impls/src/quorum_vote/handlers.rs | 159 +++++++++++------- task-impls/src/quorum_vote/mod.rs | 8 +- 4 files changed, 244 insertions(+), 66 deletions(-) create mode 100644 task-impls/src/quorum_vote/drb_computations.rs diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index ba24fc4bd3..f97905b6a4 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -12,10 +12,17 @@ use std::{ use async_trait::async_trait; use chrono::Utc; use hotshot_task_impls::{ - builder::BuilderClient, consensus::ConsensusTaskState, da::DaTaskState, - quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, - quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, rewind::RewindTaskState, - transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, + builder::BuilderClient, + consensus::ConsensusTaskState, + da::DaTaskState, + quorum_proposal::QuorumProposalTaskState, + quorum_proposal_recv::QuorumProposalRecvTaskState, + quorum_vote::{drb_computations::DrbComputations, QuorumVoteTaskState}, + request::NetworkRequestState, + rewind::RewindTaskState, + transactions::TransactionTaskState, + upgrade::UpgradeTaskState, + vid::VidTaskState, view_sync::ViewSyncTaskState, }; use hotshot_types::{ @@ -234,7 +241,7 @@ impl, V: Versions> CreateTaskState vote_dependencies: BTreeMap::new(), network: Arc::clone(&handle.hotshot.network), membership: (*handle.hotshot.memberships).clone().into(), - drb_computations: BTreeMap::new(), + drb_computations: DrbComputations::new(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, storage: Arc::clone(&handle.storage), diff --git a/task-impls/src/quorum_vote/drb_computations.rs b/task-impls/src/quorum_vote/drb_computations.rs new file mode 100644 index 0000000000..fc5483f8dd --- /dev/null +++ b/task-impls/src/quorum_vote/drb_computations.rs @@ -0,0 +1,126 @@ +use std::collections::{btree_map, BTreeMap}; + +use hotshot_types::{ + drb::{compute_drb_result, DrbResult, DrbSeedInput}, + traits::node_implementation::{ConsensusTime, NodeType}, +}; +use tokio::{spawn, task::JoinHandle}; + +/// Number of previous results and seeds to keep +pub const KEEP_PREVIOUS_RESULT_COUNT: u64 = 8; + +/// Helper struct to track state of DRB computations +pub struct DrbComputations { + /// Stored results from computations + results: BTreeMap, + + /// Currently live computation + task: Option<(TYPES::Epoch, JoinHandle)>, + + /// Stored inputs to computations + seeds: BTreeMap, +} + +impl DrbComputations { + #[must_use] + /// Create a new DrbComputations + pub fn new() -> Self { + Self { + results: BTreeMap::new(), + task: None, + seeds: BTreeMap::new(), + } + } + + /// If a task is currently live AND has finished, join it and save the result. + /// If the epoch for the calculation was the same as the provided epoch, return true + /// If a task is currently live and NOT finished, abort it UNLESS the task epoch is the same as + /// cur_epoch, in which case keep letting it run and return true. + /// Return false if a task should be spawned for the given epoch. + async fn join_or_abort_old_task(&mut self, epoch: TYPES::Epoch) -> bool { + if let Some((task_epoch, join_handle)) = &mut self.task { + if join_handle.is_finished() { + match join_handle.await { + Ok(result) => { + self.results.insert(*task_epoch, result); + let result = *task_epoch == epoch; + self.task = None; + result + } + Err(e) => { + tracing::error!("error joining DRB computation task: {e:?}"); + false + } + } + } else if *task_epoch == epoch { + true + } else { + join_handle.abort(); + self.task = None; + false + } + } else { + false + } + } + + /// Stores a seed for a particular epoch for later use by start_task_if_not_running, called from handle_quorum_proposal_validated_drb_calculation_start + pub fn store_seed(&mut self, epoch: TYPES::Epoch, drb_seed_input: DrbSeedInput) { + self.seeds.insert(epoch, drb_seed_input); + } + + /// Starts a new task. Cancels a current task if that task is not for the provided epoch. Allows a task to continue + /// running if it was already started for the given epoch. Avoids running the task if we already have a result for + /// the epoch. + pub async fn start_task_if_not_running(&mut self, epoch: TYPES::Epoch) { + // If join_or_abort_task returns true, then we either just completed a task for this epoch, or we currently + // have a running task for the epoch. + if self.join_or_abort_old_task(epoch).await { + return; + } + + // In case we somehow ended up processing this epoch already, don't start it again + if self.results.contains_key(&epoch) { + return; + } + + if let btree_map::Entry::Occupied(entry) = self.seeds.entry(epoch) { + let drb_seed_input = *entry.get(); + let new_drb_task = spawn(async move { compute_drb_result::(drb_seed_input) }); + self.task = Some((epoch, new_drb_task)); + entry.remove(); + } + } + + /// Retrieves the result for a given epoch + pub fn get_result(&self, epoch: TYPES::Epoch) -> Option { + self.results.get(&epoch).copied() + } + + /// Retrieves the seed for a given epoch + pub fn get_seed(&self, epoch: TYPES::Epoch) -> Option { + self.seeds.get(&epoch).copied() + } + + /// Garbage collects internal data structures + pub fn garbage_collect(&mut self, epoch: TYPES::Epoch) { + if epoch.u64() < KEEP_PREVIOUS_RESULT_COUNT { + return; + } + + let retain_epoch = epoch - KEEP_PREVIOUS_RESULT_COUNT; + // N.B. x.split_off(y) returns the part of the map where key >= y + + // Remove result entries older than EPOCH + self.results = self.results.split_off(&retain_epoch); + + // Remove result entries older than EPOCH+1 + self.seeds = self.seeds.split_off(&(retain_epoch + 1)); + } +} + +impl Default for DrbComputations { + fn default() -> Self { + Self::new() + } +} diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 56804f1d52..24629116ec 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -13,8 +13,7 @@ use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposal2, VidDisperseShare}, - drb::compute_drb_result, - event::{Event, EventType}, + event::{Event, EventType, LeafInfo}, message::{Proposal, UpgradeLock}, simple_vote::{QuorumData2, QuorumVote2}, traits::{ @@ -28,7 +27,6 @@ use hotshot_types::{ utils::epoch_from_block_number, vote::HasViewNumber, }; -use tokio::spawn; use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; @@ -43,6 +41,96 @@ use crate::{ quorum_vote::Versions, }; +/// Handles starting the DRB calculation. Uses the seed previously stored in +/// handle_quorum_proposal_validated_drb_calculation_seed +async fn handle_quorum_proposal_validated_drb_calculation_start< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + proposal: &QuorumProposal2, + task_state: &mut QuorumVoteTaskState, +) { + let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( + proposal.block_header.block_number(), + task_state.epoch_height, + )); + + // Start the new task if we're in the committee for this epoch + if task_state + .membership + .has_stake(&task_state.public_key, current_epoch_number) + { + task_state + .drb_computations + .start_task_if_not_running(current_epoch_number + 1) + .await; + } +} + +/// Handles storing the seed for an upcoming DRB calculation. +/// +/// We store the DRB computation seed 2 epochs in advance, if the decided block is the last but +/// third block in the current epoch and we are in the quorum committee of the next epoch. +/// +/// Special cases: +/// * Epoch 0: No DRB computation since we'll transition to epoch 1 immediately. +/// * Epoch 1 and 2: Use `[0u8; 32]` as the DRB result since when we first start the +/// computation in epoch 1, the result is for epoch 3. +/// +/// We don't need to handle the special cases explicitly here, because the first proposal +/// with which we'll start the DRB computation is for epoch 3. +fn handle_quorum_proposal_validated_drb_calculation_seed< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + proposal: &QuorumProposal2, + task_state: &mut QuorumVoteTaskState, + leaf_views: &[LeafInfo], +) -> Result<()> { + // This is never none if we've reached a new decide, so this is safe to unwrap. + let decided_block_number = leaf_views + .last() + .unwrap() + .leaf + .block_header() + .block_number(); + + // Skip if this is not the expected block. + if task_state.epoch_height != 0 && (decided_block_number + 3) % task_state.epoch_height == 0 { + // Cancel old DRB computation tasks. + let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( + decided_block_number, + task_state.epoch_height, + )); + + task_state + .drb_computations + .garbage_collect(current_epoch_number); + + // Skip if we are not in the committee of the next epoch. + if task_state + .membership + .has_stake(&task_state.public_key, current_epoch_number + 1) + { + let new_epoch_number = current_epoch_number + 2; + let Ok(drb_seed_input_vec) = bincode::serialize(&proposal.justify_qc.signatures) else { + bail!("Failed to serialize the QC signature."); + }; + let Ok(drb_seed_input) = drb_seed_input_vec.try_into() else { + bail!("Failed to convert the serialized QC signature into a DRB seed input."); + }; + + // Store the drb seed input for the next calculation + task_state + .drb_computations + .store_seed(new_epoch_number, drb_seed_input); + } + } + Ok(()) +} + /// Handles the `QuorumProposalValidated` event. #[instrument(skip_all, fields(id = task_state.id, view = *proposal.view_number))] pub(crate) async fn handle_quorum_proposal_validated< @@ -58,6 +146,10 @@ pub(crate) async fn handle_quorum_proposal_validated< .version(proposal.view_number()) .await?; + if version >= V::Epochs::VERSION { + handle_quorum_proposal_validated_drb_calculation_start(proposal, task_state).await; + } + let LeafChainTraversalOutcome { new_locked_view_number, new_decided_view_number, @@ -155,63 +247,12 @@ pub(crate) async fn handle_quorum_proposal_validated< .await; tracing::debug!("Successfully sent decide event"); - // Start the DRB computation two epochs in advance, if the decided block is the last but - // third block in the current epoch and we are in the quorum committee of the next epoch. - // - // Special cases: - // * Epoch 0: No DRB computation since we'll transition to epoch 1 immediately. - // * Epoch 1 and 2: Use `[0u8; 32]` as the DRB result since when we first start the - // computation in epoch 1, the result is for epoch 3. - // - // We don't need to handle the special cases explicitly here, because the first proposal - // with which we'll start the DRB computation is for epoch 3. if version >= V::Epochs::VERSION { - // This is never none if we've reached a new decide, so this is safe to unwrap. - let decided_block_number = leaf_views - .last() - .unwrap() - .leaf - .block_header() - .block_number(); - - // Skip if this is not the expected block. - if task_state.epoch_height != 0 - && (decided_block_number + 3) % task_state.epoch_height == 0 - { - // Cancel old DRB computation tasks. - let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( - decided_block_number, - task_state.epoch_height, - )); - let current_tasks = task_state.drb_computations.split_off(¤t_epoch_number); - while let Some((_, task)) = task_state.drb_computations.pop_last() { - task.abort(); - } - task_state.drb_computations = current_tasks; - - // Skip if we are not in the committee of the next epoch. - if task_state - .membership - .has_stake(&task_state.public_key, current_epoch_number + 1) - { - let new_epoch_number = current_epoch_number + 2; - let Ok(drb_seed_input_vec) = - bincode::serialize(&proposal.justify_qc.signatures) - else { - bail!("Failed to serialize the QC signature."); - }; - let Ok(drb_seed_input) = drb_seed_input_vec.try_into() else { - bail!( - "Failed to convert the serialized QC signature into a DRB seed input." - ); - }; - let new_drb_task = - spawn(async move { compute_drb_result::(drb_seed_input) }); - task_state - .drb_computations - .insert(new_epoch_number, new_drb_task); - } - } + handle_quorum_proposal_validated_drb_calculation_seed( + proposal, + task_state, + &leaf_views, + )?; } } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 4162969397..cf8181d771 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -10,6 +10,7 @@ use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use committable::Committable; +use drb_computations::DrbComputations; use hotshot_task::{ dependency::{AndDependency, EventDependency}, dependency_task::{DependencyTask, HandleDepOutput}, @@ -18,7 +19,6 @@ use hotshot_task::{ use hotshot_types::{ consensus::{ConsensusMetricsValue, OuterConsensus}, data::{Leaf2, QuorumProposal2}, - drb::DrbResult, event::Event, message::{Proposal, UpgradeLock}, traits::{ @@ -44,6 +44,9 @@ use crate::{ quorum_vote::handlers::{handle_quorum_proposal_validated, submit_vote, update_shared_state}, }; +/// Helper for DRB Computations +pub mod drb_computations; + /// Event handlers for `QuorumProposalValidated`. mod handlers; @@ -276,7 +279,8 @@ pub struct QuorumVoteTaskState, V: pub membership: Arc, /// Table for the in-progress DRB computation tasks. - pub drb_computations: BTreeMap>, + //pub drb_computations: BTreeMap>, + pub drb_computations: DrbComputations, /// Output events to application pub output_event_stream: async_broadcast::Sender>, From d747f8dbc0a8a66fc395f540b97a36a6ad7ec52b Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 5 Dec 2024 15:48:52 +0100 Subject: [PATCH 1325/1393] [WEEKLY RELEASE] Downgrade primitive-types (#3930) --- hotshot-stake-table/src/utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot-stake-table/src/utils.rs b/hotshot-stake-table/src/utils.rs index b295cdeb28..07a3d261b3 100644 --- a/hotshot-stake-table/src/utils.rs +++ b/hotshot-stake-table/src/utils.rs @@ -21,6 +21,6 @@ pub trait ToFields { /// convert a U256 to a field element. pub(crate) fn u256_to_field(v: &U256) -> F { let mut bytes = vec![0u8; 32]; - v.write_as_little_endian(&mut bytes); + v.to_little_endian(&mut bytes); F::from_le_bytes_mod_order(&bytes) } From 1242e303dbd764da79f13842df18021d361f8712 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 6 Dec 2024 01:42:34 -0800 Subject: [PATCH 1326/1393] fix the return value of txn stat api (#3943) --- builder-api/src/v0_1/builder.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/builder-api/src/v0_1/builder.rs b/builder-api/src/v0_1/builder.rs index 399093908c..068d9f6e0a 100644 --- a/builder-api/src/v0_1/builder.rs +++ b/builder-api/src/v0_1/builder.rs @@ -262,8 +262,7 @@ where .body_auto::<::Transaction, Ver>(Ver::instance()) .map_err(Error::TxnUnpack)?; let hash = tx.commit(); - state.txn_status(hash).await.map_err(Error::TxnStat)?; - Ok(hash) + state.txn_status(hash).await.map_err(Error::TxnStat) } .boxed() })?; From c497ad6dd1f413edfec06c805961a3f00076b22a Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 6 Dec 2024 11:24:06 -0500 Subject: [PATCH 1327/1393] Add remaining *2 types (#3932) --- example-types/src/storage_types.rs | 21 +- task-impls/src/da.rs | 40 ++- task-impls/src/events.rs | 86 ++--- task-impls/src/network.rs | 156 ++++++--- task-impls/src/quorum_proposal/handlers.rs | 18 +- task-impls/src/quorum_proposal/mod.rs | 7 +- .../src/quorum_proposal_recv/handlers.rs | 4 +- task-impls/src/quorum_vote/handlers.rs | 8 +- task-impls/src/quorum_vote/mod.rs | 4 +- task-impls/src/transactions.rs | 22 +- task-impls/src/view_sync.rs | 23 +- task-impls/src/vote_collection.rs | 57 +-- testing/src/helpers.rs | 11 +- testing/src/view_generator.rs | 34 +- testing/src/view_sync_task.rs | 12 +- testing/tests/tests_1/da_task.rs | 52 ++- testing/tests/tests_1/quorum_proposal_task.rs | 2 +- testing/tests/tests_1/transaction_task.rs | 1 + testing/tests/tests_1/vid_task.rs | 1 + types/src/consensus.rs | 8 +- types/src/data.rs | 62 +++- types/src/event.rs | 5 +- types/src/message.rs | 55 ++- types/src/simple_certificate.rs | 327 +++++++++++++++++- types/src/simple_vote.rs | 256 +++++++++++++- types/src/traits/storage.rs | 10 +- 26 files changed, 1037 insertions(+), 245 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index acedb42007..1a666c737e 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -14,7 +14,9 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_types::{ consensus::CommitmentMap, - data::{DaProposal, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare}, + data::{ + DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare, + }, event::HotShotAction, message::Proposal, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, @@ -38,6 +40,7 @@ type VidShares = HashMap< pub struct TestStorageState { vids: VidShares, das: HashMap>>, + da2s: HashMap>>, proposals: BTreeMap>>, proposals2: BTreeMap>>, high_qc: Option>, @@ -51,6 +54,7 @@ impl Default for TestStorageState { Self { vids: HashMap::new(), das: HashMap::new(), + da2s: HashMap::new(), proposals: BTreeMap::new(), proposals2: BTreeMap::new(), high_qc: None, @@ -142,6 +146,21 @@ impl Storage for TestStorage { .insert(proposal.data.view_number, proposal.clone()); Ok(()) } + async fn append_da2( + &self, + proposal: &Proposal>, + _vid_commit: ::Commit, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to append VID proposal to storage"); + } + Self::run_delay_settings_from_config(&self.delay_config).await; + let mut inner = self.inner.write().await; + inner + .da2s + .insert(proposal.data.view_number, proposal.clone()); + Ok(()) + } async fn append_proposal( &self, proposal: &Proposal>, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 2c503d2b13..923b15e010 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -12,11 +12,11 @@ use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::{Consensus, OuterConsensus}, - data::{DaProposal, PackedBundle}, + data::{DaProposal2, PackedBundle}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, - simple_certificate::DaCertificate, - simple_vote::{DaData, DaVote}, + simple_certificate::DaCertificate2, + simple_vote::{DaData2, DaVote2}, traits::{ block_contents::vid_commitment, election::Membership, @@ -61,7 +61,7 @@ pub struct DaTaskState, V: Version pub network: Arc, /// A map of `DaVote` collector tasks. - pub vote_collectors: VoteCollectorsMap, DaCertificate, V>, + pub vote_collectors: VoteCollectorsMap, DaCertificate2, V>, /// This Nodes public key pub public_key: TYPES::SignatureKey, @@ -143,8 +143,11 @@ impl, V: Versions> DaTaskState { let cur_view = self.consensus.read().await.cur_view(); + let view_number = proposal.data.view_number(); + let epoch_number = proposal.data.epoch_number; + ensure!( - cur_view <= proposal.data.view_number() + 1, + cur_view <= view_number + 1, debug!( "Validated DA proposal for prior view but it's too old now Current view {:?}, DA Proposal view {:?}", cur_view, @@ -155,7 +158,7 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState = DaProposal { + let data: DaProposal2 = DaProposal2 { encoded_transactions: Arc::clone(encoded_transactions), metadata: metadata.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? view_number, + epoch_number: *epoch_number, }; let message = Proposal { diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 721a8b9cc0..c75ef7c752 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -11,18 +11,18 @@ use either::Either; use hotshot_task::task::TaskEvent; use hotshot_types::{ data::{ - DaProposal, Leaf2, PackedBundle, QuorumProposal2, UpgradeProposal, VidDisperse, + DaProposal2, Leaf2, PackedBundle, QuorumProposal2, UpgradeProposal, VidDisperse, VidDisperseShare, }, message::Proposal, request_response::ProposalRequestPayload, simple_certificate::{ - DaCertificate, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, - UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, - ViewSyncPreCommitCertificate2, + DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, + UpgradeCertificate, ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, + ViewSyncPreCommitCertificate, }, simple_vote::{ - DaVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + DaVote2, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{ @@ -80,15 +80,15 @@ pub enum HotShotEvent { /// Send a timeout vote to the network; emitted by consensus task replicas TimeoutVoteSend(TimeoutVote), /// A DA proposal has been received from the network; handled by the DA task - DaProposalRecv(Proposal>, TYPES::SignatureKey), + DaProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA proposal has been validated; handled by the DA task and VID task - DaProposalValidated(Proposal>, TYPES::SignatureKey), + DaProposalValidated(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task - DaVoteRecv(DaVote), + DaVoteRecv(DaVote2), /// A Data Availability Certificate (DAC) has been received by the network; handled by the consensus task - DaCertificateRecv(DaCertificate), + DaCertificateRecv(DaCertificate2), /// A DAC is validated. - DaCertificateValidated(DaCertificate), + DaCertificateValidated(DaCertificate2), /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal @@ -117,15 +117,15 @@ pub enum HotShotEvent { /// A quorum proposal was requested by a node for a view. QuorumProposalResponseRecv(Proposal>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task - DaProposalSend(Proposal>, TYPES::SignatureKey), + DaProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal - DaVoteSend(DaVote), + DaVoteSend(DaVote2), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only QcFormed(Either, TimeoutCertificate>), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only Qc2Formed(Either, TimeoutCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task - DacSend(DaCertificate, TYPES::SignatureKey), + DacSend(DaCertificate2, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks ViewChange(TYPES::View, TYPES::Epoch), /// Timeout for the view sync protocol; emitted by a replica in the view sync task @@ -145,19 +145,19 @@ pub enum HotShotEvent { /// Send a `ViewSyncFinalizeVote` from the network; emitted by a replica in the view sync task ViewSyncFinalizeVoteSend(ViewSyncFinalizeVote), - /// Receive a `ViewSyncPreCommitCertificate2` from the network; received by a replica in the view sync task - ViewSyncPreCommitCertificate2Recv(ViewSyncPreCommitCertificate2), - /// Receive a `ViewSyncCommitCertificate2` from the network; received by a replica in the view sync task - ViewSyncCommitCertificate2Recv(ViewSyncCommitCertificate2), - /// Receive a `ViewSyncFinalizeCertificate2` from the network; received by a replica in the view sync task - ViewSyncFinalizeCertificate2Recv(ViewSyncFinalizeCertificate2), + /// Receive a `ViewSyncPreCommitCertificate` from the network; received by a replica in the view sync task + ViewSyncPreCommitCertificateRecv(ViewSyncPreCommitCertificate), + /// Receive a `ViewSyncCommitCertificate` from the network; received by a replica in the view sync task + ViewSyncCommitCertificateRecv(ViewSyncCommitCertificate), + /// Receive a `ViewSyncFinalizeCertificate` from the network; received by a replica in the view sync task + ViewSyncFinalizeCertificateRecv(ViewSyncFinalizeCertificate), - /// Send a `ViewSyncPreCommitCertificate2` from the network; emitted by a relay in the view sync task - ViewSyncPreCommitCertificate2Send(ViewSyncPreCommitCertificate2, TYPES::SignatureKey), - /// Send a `ViewSyncCommitCertificate2` from the network; emitted by a relay in the view sync task - ViewSyncCommitCertificate2Send(ViewSyncCommitCertificate2, TYPES::SignatureKey), - /// Send a `ViewSyncFinalizeCertificate2` from the network; emitted by a relay in the view sync task - ViewSyncFinalizeCertificate2Send(ViewSyncFinalizeCertificate2, TYPES::SignatureKey), + /// Send a `ViewSyncPreCommitCertificate` from the network; emitted by a relay in the view sync task + ViewSyncPreCommitCertificateSend(ViewSyncPreCommitCertificate, TYPES::SignatureKey), + /// Send a `ViewSyncCommitCertificate` from the network; emitted by a relay in the view sync task + ViewSyncCommitCertificateSend(ViewSyncCommitCertificate, TYPES::SignatureKey), + /// Send a `ViewSyncFinalizeCertificate` from the network; emitted by a relay in the view sync task + ViewSyncFinalizeCertificateSend(ViewSyncFinalizeCertificate, TYPES::SignatureKey), /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only ViewSyncTrigger(TYPES::View), @@ -289,12 +289,12 @@ impl HotShotEvent { | HotShotEvent::ViewSyncPreCommitVoteSend(vote) => Some(vote.view_number()), HotShotEvent::ViewSyncFinalizeVoteRecv(vote) | HotShotEvent::ViewSyncFinalizeVoteSend(vote) => Some(vote.view_number()), - HotShotEvent::ViewSyncPreCommitCertificate2Recv(cert) - | HotShotEvent::ViewSyncPreCommitCertificate2Send(cert, _) => Some(cert.view_number()), - HotShotEvent::ViewSyncCommitCertificate2Recv(cert) - | HotShotEvent::ViewSyncCommitCertificate2Send(cert, _) => Some(cert.view_number()), - HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) - | HotShotEvent::ViewSyncFinalizeCertificate2Send(cert, _) => Some(cert.view_number()), + HotShotEvent::ViewSyncPreCommitCertificateRecv(cert) + | HotShotEvent::ViewSyncPreCommitCertificateSend(cert, _) => Some(cert.view_number()), + HotShotEvent::ViewSyncCommitCertificateRecv(cert) + | HotShotEvent::ViewSyncCommitCertificateSend(cert, _) => Some(cert.view_number()), + HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) + | HotShotEvent::ViewSyncFinalizeCertificateSend(cert, _) => Some(cert.view_number()), HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _, _) => { Some(*view_number) } @@ -451,45 +451,45 @@ impl Display for HotShotEvent { "ViewSyncFinalizeVoteSend(view_number={:?})", vote.view_number() ), - HotShotEvent::ViewSyncPreCommitCertificate2Recv(cert) => { + HotShotEvent::ViewSyncPreCommitCertificateRecv(cert) => { write!( f, - "ViewSyncPreCommitCertificate2Recv(view_number={:?})", + "ViewSyncPreCommitCertificateRecv(view_number={:?})", cert.view_number() ) } - HotShotEvent::ViewSyncCommitCertificate2Recv(cert) => { + HotShotEvent::ViewSyncCommitCertificateRecv(cert) => { write!( f, - "ViewSyncCommitCertificate2Recv(view_number={:?})", + "ViewSyncCommitCertificateRecv(view_number={:?})", cert.view_number() ) } - HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) => { write!( f, - "ViewSyncFinalizeCertificate2Recv(view_number={:?})", + "ViewSyncFinalizeCertificateRecv(view_number={:?})", cert.view_number() ) } - HotShotEvent::ViewSyncPreCommitCertificate2Send(cert, _) => { + HotShotEvent::ViewSyncPreCommitCertificateSend(cert, _) => { write!( f, - "ViewSyncPreCommitCertificate2Send(view_number={:?})", + "ViewSyncPreCommitCertificateSend(view_number={:?})", cert.view_number() ) } - HotShotEvent::ViewSyncCommitCertificate2Send(cert, _) => { + HotShotEvent::ViewSyncCommitCertificateSend(cert, _) => { write!( f, - "ViewSyncCommitCertificate2Send(view_number={:?})", + "ViewSyncCommitCertificateSend(view_number={:?})", cert.view_number() ) } - HotShotEvent::ViewSyncFinalizeCertificate2Send(cert, _) => { + HotShotEvent::ViewSyncFinalizeCertificateSend(cert, _) => { write!( f, - "ViewSyncFinalizeCertificate2Send(view_number={:?})", + "ViewSyncFinalizeCertificateSend(view_number={:?})", cert.view_number() ) } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index ad9dcd4168..b885e2f7e5 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -36,6 +36,7 @@ use hotshot_types::{ use tokio::{spawn, task::JoinHandle}; use tracing::instrument; use utils::anytrace::*; +use vbs::version::StaticVersionType; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -88,22 +89,20 @@ impl NetworkMessageTaskState { } GeneralConsensusMessage::ViewSyncPreCommitCertificate( view_sync_message, - ) => HotShotEvent::ViewSyncPreCommitCertificate2Recv(view_sync_message), + ) => HotShotEvent::ViewSyncPreCommitCertificateRecv(view_sync_message), GeneralConsensusMessage::ViewSyncCommitVote(view_sync_message) => { HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncCommitCertificate(view_sync_message) => { - HotShotEvent::ViewSyncCommitCertificate2Recv(view_sync_message) + HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message) } - GeneralConsensusMessage::ViewSyncFinalizeVote(view_sync_message) => { HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncFinalizeCertificate(view_sync_message) => { - HotShotEvent::ViewSyncFinalizeCertificate2Recv(view_sync_message) + HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_message) } - GeneralConsensusMessage::TimeoutVote(message) => { HotShotEvent::TimeoutVoteRecv(message) } @@ -115,18 +114,34 @@ impl NetworkMessageTaskState { HotShotEvent::UpgradeVoteRecv(message) } GeneralConsensusMessage::HighQc(qc) => HotShotEvent::HighQcRecv(qc, sender), + GeneralConsensusMessage::Proposal2(proposal) => { + HotShotEvent::QuorumProposalRecv(proposal, sender) + } + GeneralConsensusMessage::Vote2(vote) => HotShotEvent::QuorumVoteRecv(vote), + GeneralConsensusMessage::Proposal2Response(proposal) => { + HotShotEvent::QuorumProposalResponseRecv(proposal) + } }, SequencingMessage::Da(da_message) => match da_message { DaConsensusMessage::DaProposal(proposal) => { - HotShotEvent::DaProposalRecv(proposal, sender) + HotShotEvent::DaProposalRecv(convert_proposal(proposal), sender) + } + DaConsensusMessage::DaVote(vote) => { + HotShotEvent::DaVoteRecv(vote.clone().to_vote2()) } - DaConsensusMessage::DaVote(vote) => HotShotEvent::DaVoteRecv(vote.clone()), DaConsensusMessage::DaCertificate(cert) => { - HotShotEvent::DaCertificateRecv(cert) + HotShotEvent::DaCertificateRecv(cert.to_dac2()) } DaConsensusMessage::VidDisperseMsg(proposal) => { HotShotEvent::VidShareRecv(sender, proposal) } + DaConsensusMessage::DaProposal2(proposal) => { + HotShotEvent::DaProposalRecv(proposal, sender) + } + DaConsensusMessage::DaVote2(vote) => HotShotEvent::DaVoteRecv(vote.clone()), + DaConsensusMessage::DaCertificate2(cert) => { + HotShotEvent::DaCertificateRecv(cert) + } }, }; broadcast_event(Arc::new(event), &self.internal_event_stream).await; @@ -374,13 +389,23 @@ impl< match event.as_ref().clone() { HotShotEvent::QuorumProposalSend(proposal, sender) => { *maybe_action = Some(HotShotAction::Propose); - Some(( - sender, + + let message = if self + .upgrade_lock + .version_infallible(proposal.data.view_number()) + .await + >= V::Epochs::VERSION + { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::Proposal2(proposal), + )) + } else { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Proposal(convert_proposal(proposal)), - )), - TransmitType::Broadcast, - )) + )) + }; + + Some((sender, message, TransmitType::Broadcast)) } // ED Each network task is subscribed to all these message types. Need filters per network task @@ -399,23 +424,41 @@ impl< } }; - Some(( - vote.signing_key(), + let message = if self + .upgrade_lock + .version_infallible(vote.view_number()) + .await + >= V::Epochs::VERSION + { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::Vote2(vote.clone()), + )) + } else { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote(vote.clone().to_vote()), - )), - TransmitType::Direct(leader), - )) + )) + }; + + Some((vote.signing_key(), message, TransmitType::Direct(leader))) } HotShotEvent::ExtendedQuorumVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); - Some(( - vote.signing_key(), + let message = if self + .upgrade_lock + .version_infallible(vote.view_number()) + .await + >= V::Epochs::VERSION + { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::Vote2(vote.clone()), + )) + } else { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote(vote.clone().to_vote()), - )), - TransmitType::Broadcast, - )) + )) + }; + + Some((vote.signing_key(), message, TransmitType::Broadcast)) } HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( req.key.clone(), @@ -437,13 +480,23 @@ impl< } HotShotEvent::DaProposalSend(proposal, sender) => { *maybe_action = Some(HotShotAction::DaPropose); - Some(( - sender, + + let message = if self + .upgrade_lock + .version_infallible(proposal.data.view_number()) + .await + >= V::Epochs::VERSION + { MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::DaProposal(proposal), - )), - TransmitType::DaCommitteeBroadcast, - )) + DaConsensusMessage::DaProposal2(proposal), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaProposal(convert_proposal(proposal)), + )) + }; + + Some((sender, message, TransmitType::DaCommitteeBroadcast)) } HotShotEvent::DaVoteSend(vote) => { *maybe_action = Some(HotShotAction::DaVote); @@ -460,23 +513,38 @@ impl< } }; - Some(( - vote.signing_key(), + let message = if self.upgrade_lock.version_infallible(view_number).await + >= V::Epochs::VERSION + { MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::DaVote(vote.clone()), - )), - TransmitType::Direct(leader), - )) + DaConsensusMessage::DaVote2(vote.clone()), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaVote(vote.clone().to_vote()), + )) + }; + + Some((vote.signing_key(), message, TransmitType::Direct(leader))) } HotShotEvent::DacSend(certificate, sender) => { *maybe_action = Some(HotShotAction::DaCert); - Some(( - sender, + let message = if self + .upgrade_lock + .version_infallible(certificate.view_number()) + .await + >= V::Epochs::VERSION + { MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::DaCertificate(certificate), - )), - TransmitType::Broadcast, - )) + DaConsensusMessage::DaCertificate2(certificate), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaCertificate(certificate.to_dac()), + )) + }; + + Some((sender, message, TransmitType::Broadcast)) } HotShotEvent::ViewSyncPreCommitVoteSend(vote) => { let view_number = vote.view_number() + vote.date().relay; @@ -546,21 +614,21 @@ impl< TransmitType::Direct(leader), )) } - HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => Some(( + HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate), )), TransmitType::Broadcast, )), - HotShotEvent::ViewSyncCommitCertificate2Send(certificate, sender) => Some(( + HotShotEvent::ViewSyncCommitCertificateSend(certificate, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncCommitCertificate(certificate), )), TransmitType::Broadcast, )), - HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, sender) => Some(( + HotShotEvent::ViewSyncFinalizeCertificateSend(certificate, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate), diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index b1cc8e36fa..e8b124b1e2 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -20,7 +20,8 @@ use committable::Committable; use hotshot_task::dependency_task::HandleDepOutput; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, - data::{Leaf2, QuorumProposal, VidDisperse, ViewChangeEvidence}, + data::{Leaf2, QuorumProposal2, VidDisperse, ViewChangeEvidence}, + drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, message::Proposal, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ @@ -50,7 +51,7 @@ pub(crate) enum ProposalDependency { /// For the `Qc2Formed` event. Qc, - /// For the `ViewSyncFinalizeCertificate2Recv` event. + /// For the `ViewSyncFinalizeCertificateRecv` event. ViewSyncCert, /// For the `Qc2Formed` event timeout branch. @@ -296,14 +297,15 @@ impl ProposalDependencyHandle { .context(warn!("Failed to construct marketplace block header"))? }; - let proposal = QuorumProposal { + let proposal = QuorumProposal2 { block_header, view_number: self.view_number, - justify_qc: parent_qc.to_qc(), + justify_qc: parent_qc, upgrade_certificate, - proposal_certificate, - } - .into(); + view_change_evidence: proposal_certificate, + drb_seed: INITIAL_DRB_SEED_INPUT, + drb_result: INITIAL_DRB_RESULT, + }; let proposed_leaf = Leaf2::from_quorum_proposal(&proposal); ensure!( @@ -376,7 +378,7 @@ impl HandleDepOutput for ProposalDependencyHandle< parent_qc = Some(qc.clone()); } }, - HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) => { view_sync_finalize_cert = Some(cert.clone()); } HotShotEvent::VidDisperseSend(share, _) => { diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 06150dff97..49875c6480 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -117,8 +117,7 @@ impl, V: Versions> } } ProposalDependency::ViewSyncCert => { - if let HotShotEvent::ViewSyncFinalizeCertificate2Recv(view_sync_cert) = - event + if let HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_cert) = event { view_sync_cert.view_number() } else { @@ -225,7 +224,7 @@ impl, V: Versions> qc_dependency.mark_as_completed(event); } }, - HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(_) => { view_sync_dependency.mark_as_completed(event); } HotShotEvent::VidDisperseSend(_, _) => { @@ -437,7 +436,7 @@ impl, V: Versions> Arc::clone(&event), )?; } - HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { let epoch_number = self.consensus.read().await.cur_epoch(); ensure!( diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 3d5c010058..528fc04562 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -156,10 +156,10 @@ pub(crate) async fn handle_quorum_proposal_recv< .is_valid_cert( validation_info .quorum_membership - .stake_table(validation_info.cur_epoch), + .stake_table(justify_qc.data.epoch), validation_info .quorum_membership - .success_threshold(validation_info.cur_epoch), + .success_threshold(justify_qc.data.epoch), &validation_info.upgrade_lock, ) .await diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 24629116ec..c434352ba1 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -399,12 +399,17 @@ pub(crate) async fn submit_vote, V private_key: ::PrivateKey, upgrade_lock: UpgradeLock, view_number: TYPES::View, - epoch_number: TYPES::Epoch, + epoch_height: u64, storage: Arc>, leaf: Leaf2, vid_share: Proposal>, extended_vote: bool, ) -> Result<()> { + let epoch_number = TYPES::Epoch::new(epoch_from_block_number( + leaf.block_header().block_number(), + epoch_height, + )); + ensure!( quorum_membership.has_stake(&public_key, epoch_number), info!( @@ -417,6 +422,7 @@ pub(crate) async fn submit_vote, V let vote = QuorumVote2::::create_signed_vote( QuorumData2 { leaf_commit: leaf.commit(), + epoch: epoch_number, }, view_number, &public_key, diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index cf8181d771..5f2b1df891 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -237,7 +237,7 @@ impl + 'static, V: Versions> Handl self.private_key.clone(), self.upgrade_lock.clone(), self.view_number, - current_epoch, + self.epoch_height, Arc::clone(&self.storage), leaf, vid_share, @@ -713,7 +713,7 @@ impl, V: Versions> QuorumVoteTaskS self.private_key.clone(), self.upgrade_lock.clone(), proposal.data.view_number(), - current_epoch, + self.epoch_height, Arc::clone(&self.storage), proposed_leaf, updated_vid, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index f6da7ffec0..ffe9290d73 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -127,6 +127,7 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, + block_epoch: TYPES::Epoch, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -137,10 +138,10 @@ impl, V: Versions> TransactionTask }; if version < V::Marketplace::VERSION { - self.handle_view_change_legacy(event_stream, block_view) + self.handle_view_change_legacy(event_stream, block_view, block_epoch) .await } else { - self.handle_view_change_marketplace(event_stream, block_view) + self.handle_view_change_marketplace(event_stream, block_view, block_epoch) .await } } @@ -151,6 +152,7 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, + block_epoch: TYPES::Epoch, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -188,6 +190,7 @@ impl, V: Versions> TransactionTask block_payload.encode(), metadata, block_view, + block_epoch, vec1::vec1![fee], precompute_data, None, @@ -231,6 +234,7 @@ impl, V: Versions> TransactionTask vec![].into(), metadata, block_view, + block_epoch, vec1::vec1![null_fee], Some(precompute_data), None, @@ -251,6 +255,7 @@ impl, V: Versions> TransactionTask async fn produce_block_marketplace( &mut self, block_view: TYPES::View, + block_epoch: TYPES::Epoch, task_start_time: Instant, ) -> Result> { ensure!( @@ -343,6 +348,7 @@ impl, V: Versions> TransactionTask block_payload.encode(), metadata, block_view, + block_epoch, sequencing_fees, None, Some(auction_result), @@ -353,6 +359,7 @@ impl, V: Versions> TransactionTask pub fn null_block( &self, block_view: TYPES::View, + block_epoch: TYPES::Epoch, version: Version, ) -> Option> { let membership_total_nodes = self.membership.total_nodes(self.cur_epoch); @@ -374,6 +381,7 @@ impl, V: Versions> TransactionTask vec![].into(), metadata, block_view, + block_epoch, vec1::vec1![null_fee], Some(precompute_data), Some(TYPES::AuctionResult::default()), @@ -386,6 +394,7 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, + block_epoch: TYPES::Epoch, ) -> Option { let task_start_time = Instant::now(); @@ -398,7 +407,7 @@ impl, V: Versions> TransactionTask }; let packed_bundle = match self - .produce_block_marketplace(block_view, task_start_time) + .produce_block_marketplace(block_view, block_epoch, task_start_time) .await { Ok(b) => b, @@ -409,7 +418,7 @@ impl, V: Versions> TransactionTask e ); - let null_block = self.null_block(block_view, version)?; + let null_block = self.null_block(block_view, block_epoch, version)?; // Increment the metric for number of empty blocks proposed self.consensus @@ -438,12 +447,13 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, + block_epoch: TYPES::Epoch, ) -> Option { if self.consensus.read().await.is_high_qc_forming_eqc() { tracing::info!("Reached end of epoch. Not getting a new block until we form an eQC."); None } else { - self.handle_view_change_marketplace(event_stream, block_view) + self.handle_view_change_marketplace(event_stream, block_view, block_epoch) .await } } @@ -481,7 +491,7 @@ impl, V: Versions> TransactionTask ); self.cur_view = view; if self.membership.leader(view, self.cur_epoch)? == self.public_key { - self.handle_view_change(&event_stream, view).await; + self.handle_view_change(&event_stream, view, *epoch).await; return Ok(()); } } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 6bb83dfc53..3574fe0ee6 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -18,7 +18,7 @@ use hotshot_task::task::TaskState; use hotshot_types::{ message::{GeneralConsensusMessage, UpgradeLock}, simple_certificate::{ - ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, ViewSyncPreCommitCertificate, }, simple_vote::{ ViewSyncCommitData, ViewSyncCommitVote, ViewSyncFinalizeData, ViewSyncFinalizeVote, @@ -92,17 +92,16 @@ pub struct ViewSyncTaskState { /// Map of pre-commit vote accumulates for the relay pub pre_commit_relay_map: RwLock< - RelayMap, ViewSyncPreCommitCertificate2, V>, + RelayMap, ViewSyncPreCommitCertificate, V>, >, /// Map of commit vote accumulates for the relay pub commit_relay_map: - RwLock, ViewSyncCommitCertificate2, V>>, + RwLock, ViewSyncCommitCertificate, V>>, /// Map of finalize vote accumulates for the relay - pub finalize_relay_map: RwLock< - RelayMap, ViewSyncFinalizeCertificate2, V>, - >, + pub finalize_relay_map: + RwLock, ViewSyncFinalizeCertificate, V>>, /// Timeout duration for view sync rounds pub view_sync_timeout: Duration, @@ -263,19 +262,19 @@ impl ViewSyncTaskState { event_stream: Sender>>, ) -> Result<()> { match event.as_ref() { - HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncPreCommitCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } - HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncCommitCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } - HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) @@ -521,7 +520,7 @@ impl ViewSyncReplicaTaskState { event_stream: Sender>>, ) -> Option { match event.as_ref() { - HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncPreCommitCertificateRecv(certificate) => { let last_seen_certificate = ViewSyncPhase::PreCommit; // Ignore certificate if it is for an older round @@ -607,7 +606,7 @@ impl ViewSyncReplicaTaskState { })); } - HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncCommitCertificateRecv(certificate) => { let last_seen_certificate = ViewSyncPhase::Commit; // Ignore certificate if it is for an older round @@ -706,7 +705,7 @@ impl ViewSyncReplicaTaskState { })); } - HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { // Ignore certificate if it is for an older round if certificate.view_number() < self.next_view { tracing::warn!("We're already in a higher round"); diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index e0fc96040b..d7ad469746 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -17,12 +17,12 @@ use either::Either::{self, Left, Right}; use hotshot_types::{ message::UpgradeLock, simple_certificate::{ - DaCertificate, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, - UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, - ViewSyncPreCommitCertificate2, + DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, + UpgradeCertificate, ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, + ViewSyncPreCommitCertificate, }, simple_vote::{ - DaVote, QuorumVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, + DaVote2, QuorumVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{ @@ -307,7 +307,8 @@ where type QuorumVoteState = VoteCollectionTaskState, QuorumCertificate2, V>; /// Alias for DA vote accumulator -type DaVoteState = VoteCollectionTaskState, DaCertificate, V>; +type DaVoteState = + VoteCollectionTaskState, DaCertificate2, V>; /// Alias for Timeout vote accumulator type TimeoutVoteState = VoteCollectionTaskState, TimeoutCertificate, V>; @@ -318,17 +319,17 @@ type UpgradeVoteState = type ViewSyncPreCommitState = VoteCollectionTaskState< TYPES, ViewSyncPreCommitVote, - ViewSyncPreCommitCertificate2, + ViewSyncPreCommitCertificate, V, >; /// Alias for View Sync Commit vote accumulator type ViewSyncCommitVoteState = - VoteCollectionTaskState, ViewSyncCommitCertificate2, V>; + VoteCollectionTaskState, ViewSyncCommitCertificate, V>; /// Alias for View Sync Finalize vote accumulator type ViewSyncFinalizeVoteState = VoteCollectionTaskState< TYPES, ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate2, + ViewSyncFinalizeCertificate, V, >; @@ -386,8 +387,8 @@ impl AggregatableVote, UpgradeCertifi } } -impl AggregatableVote, DaCertificate> - for DaVote +impl AggregatableVote, DaCertificate2> + for DaVote2 { fn leader( &self, @@ -397,7 +398,7 @@ impl AggregatableVote, DaCertificate, + certificate: DaCertificate2, key: &TYPES::SignatureKey, ) -> HotShotEvent { HotShotEvent::DacSend(certificate, key.clone()) @@ -423,7 +424,7 @@ impl AggregatableVote, TimeoutCertifi } impl - AggregatableVote, ViewSyncCommitCertificate2> + AggregatableVote, ViewSyncCommitCertificate> for ViewSyncCommitVote { fn leader( @@ -434,15 +435,15 @@ impl membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( - certificate: ViewSyncCommitCertificate2, + certificate: ViewSyncCommitCertificate, key: &TYPES::SignatureKey, ) -> HotShotEvent { - HotShotEvent::ViewSyncCommitCertificate2Send(certificate, key.clone()) + HotShotEvent::ViewSyncCommitCertificateSend(certificate, key.clone()) } } impl - AggregatableVote, ViewSyncPreCommitCertificate2> + AggregatableVote, ViewSyncPreCommitCertificate> for ViewSyncPreCommitVote { fn leader( @@ -453,15 +454,15 @@ impl membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( - certificate: ViewSyncPreCommitCertificate2, + certificate: ViewSyncPreCommitCertificate, key: &TYPES::SignatureKey, ) -> HotShotEvent { - HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, key.clone()) + HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, key.clone()) } } impl - AggregatableVote, ViewSyncFinalizeCertificate2> + AggregatableVote, ViewSyncFinalizeCertificate> for ViewSyncFinalizeVote { fn leader( @@ -472,10 +473,10 @@ impl membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( - certificate: ViewSyncFinalizeCertificate2, + certificate: ViewSyncFinalizeCertificate, key: &TYPES::SignatureKey, ) -> HotShotEvent { - HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, key.clone()) + HotShotEvent::ViewSyncFinalizeCertificateSend(certificate, key.clone()) } } @@ -522,14 +523,14 @@ impl } #[async_trait] -impl HandleVoteEvent, DaCertificate> +impl HandleVoteEvent, DaCertificate2> for DaVoteState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::DaVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), @@ -562,14 +563,14 @@ impl #[async_trait] impl - HandleVoteEvent, ViewSyncPreCommitCertificate2> + HandleVoteEvent, ViewSyncPreCommitCertificate> for ViewSyncPreCommitState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { self.accumulate_vote(vote, sender).await @@ -584,14 +585,14 @@ impl #[async_trait] impl - HandleVoteEvent, ViewSyncCommitCertificate2> + HandleVoteEvent, ViewSyncCommitCertificate> for ViewSyncCommitVoteState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), @@ -604,14 +605,14 @@ impl #[async_trait] impl - HandleVoteEvent, ViewSyncFinalizeCertificate2> + HandleVoteEvent, ViewSyncFinalizeCertificate> for ViewSyncFinalizeVoteState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { self.accumulate_vote(vote, sender).await diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index cc468a1859..9097f38d02 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -27,8 +27,8 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, Leaf2, QuorumProposal, VidDisperse, VidDisperseShare}, message::{GeneralConsensusMessage, Proposal, UpgradeLock}, - simple_certificate::DaCertificate, - simple_vote::{DaData, DaVote, QuorumData, QuorumVote, SimpleVote, VersionedVoteData}, + simple_certificate::DaCertificate2, + simple_vote::{DaData2, DaVote2, QuorumData, QuorumVote, SimpleVote, VersionedVoteData}, traits::{ block_contents::vid_commitment, consensus_api::ConsensusApi, @@ -364,17 +364,18 @@ pub async fn build_da_certificate( public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, -) -> DaCertificate { +) -> DaCertificate2 { let encoded_transactions = TestTransaction::encode(&transactions); let da_payload_commitment = vid_commitment(&encoded_transactions, membership.total_nodes(epoch_number)); - let da_data = DaData { + let da_data = DaData2 { payload_commit: da_payload_commitment, + epoch: epoch_number, }; - build_cert::, DaCertificate>( + build_cert::, DaVote2, DaCertificate2>( da_data, membership, view_number, diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index a6651ad55d..e31f98364f 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -22,17 +22,17 @@ use hotshot_example_types::{ }; use hotshot_types::{ data::{ - DaProposal, EpochNumber, Leaf, Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare, + DaProposal2, EpochNumber, Leaf, Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare, ViewChangeEvidence, ViewNumber, }, drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, message::{Proposal, UpgradeLock}, simple_certificate::{ - DaCertificate, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, - UpgradeCertificate, ViewSyncFinalizeCertificate2, + DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, + UpgradeCertificate, ViewSyncFinalizeCertificate, }, simple_vote::{ - DaData, DaVote, QuorumData2, QuorumVote2, TimeoutData, TimeoutVote, UpgradeProposalData, + DaData2, DaVote2, QuorumData2, QuorumVote2, TimeoutData, TimeoutVote, UpgradeProposalData, UpgradeVote, ViewSyncFinalizeData, ViewSyncFinalizeVote, }, traits::{ @@ -40,6 +40,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, BlockPayload, }, + utils::epoch_from_block_number, }; use rand::{thread_rng, Rng}; use sha2::{Digest, Sha256}; @@ -50,7 +51,7 @@ use crate::helpers::{ #[derive(Clone)] pub struct TestView { - pub da_proposal: Proposal>, + pub da_proposal: Proposal>, pub quorum_proposal: Proposal>, pub leaf: Leaf2, pub view_number: ViewNumber, @@ -62,7 +63,7 @@ pub struct TestView { ::SignatureKey, ), pub leader_public_key: ::SignatureKey, - pub da_certificate: DaCertificate, + pub da_certificate: DaCertificate2, pub transactions: Vec, upgrade_data: Option>, formed_upgrade_certificate: Option>, @@ -74,7 +75,7 @@ pub struct TestView { impl TestView { pub async fn genesis(membership: &::Membership) -> Self { let genesis_view = ViewNumber::new(1); - let genesis_epoch = EpochNumber::new(1); + let genesis_epoch = EpochNumber::new(0); let upgrade_lock = UpgradeLock::new(); let transactions = Vec::new(); @@ -152,10 +153,11 @@ impl TestView { ::SignatureKey::sign(&private_key, &encoded_transactions_hash) .expect("Failed to sign block payload"); - let da_proposal_inner = DaProposal:: { + let da_proposal_inner = DaProposal2:: { encoded_transactions: encoded_transactions.clone(), metadata, view_number: genesis_view, + epoch_number: genesis_epoch, }; let da_proposal = Proposal { @@ -217,6 +219,7 @@ impl TestView { let quorum_data = QuorumData2 { leaf_commit: old.leaf.commit(), + epoch: EpochNumber::new(0), }; let (old_private_key, old_public_key) = key_pair_for_id::(*old_view); @@ -306,7 +309,7 @@ impl TestView { TestVersions, ViewSyncFinalizeData, ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate2, + ViewSyncFinalizeCertificate, >( data.clone(), membership, @@ -393,10 +396,11 @@ impl TestView { ::SignatureKey::sign(&private_key, &encoded_transactions_hash) .expect("Failed to sign block payload"); - let da_proposal_inner = DaProposal:: { + let da_proposal_inner = DaProposal2:: { encoded_transactions: encoded_transactions.clone(), metadata, view_number: next_view, + epoch_number: self.epoch_number, }; let da_proposal = Proposal { @@ -442,6 +446,10 @@ impl TestView { QuorumVote2::::create_signed_vote( QuorumData2 { leaf_commit: self.leaf.commit(), + epoch: EpochNumber::new(epoch_from_block_number( + self.leaf.height(), + handle.hotshot.config.epoch_height, + )), }, self.view_number, &handle.public_key(), @@ -470,10 +478,10 @@ impl TestView { pub async fn create_da_vote( &self, - data: DaData, + data: DaData2, handle: &SystemContextHandle, - ) -> DaVote { - DaVote::create_signed_vote( + ) -> DaVote2 { + DaVote2::create_signed_vote( data, self.view_number, &handle.public_key(), diff --git a/testing/src/view_sync_task.rs b/testing/src/view_sync_task.rs index 733164d341..914c8279cd 100644 --- a/testing/src/view_sync_task.rs +++ b/testing/src/view_sync_task.rs @@ -48,12 +48,12 @@ impl> TestTaskState | HotShotEvent::ViewSyncPreCommitVoteSend(_) | HotShotEvent::ViewSyncCommitVoteSend(_) | HotShotEvent::ViewSyncFinalizeVoteSend(_) - | HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - | HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) + | HotShotEvent::ViewSyncPreCommitCertificateRecv(_) + | HotShotEvent::ViewSyncCommitCertificateRecv(_) + | HotShotEvent::ViewSyncFinalizeCertificateRecv(_) + | HotShotEvent::ViewSyncPreCommitCertificateSend(_, _) + | HotShotEvent::ViewSyncCommitCertificateSend(_, _) + | HotShotEvent::ViewSyncFinalizeCertificateSend(_, _) | HotShotEvent::ViewSyncTrigger(_) => { self.hit_view_sync.insert(id); } diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 6f4c2a38df..e672ff24e3 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -23,7 +23,7 @@ use hotshot_testing::{ }; use hotshot_types::{ data::{null_block, EpochNumber, PackedBundle, ViewNumber}, - simple_vote::DaData, + simple_vote::DaData2, traits::{ block_contents::precompute_vid_commitment, election::Membership, @@ -63,8 +63,14 @@ async fn test_da_task() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData2 { + payload_commit, + epoch: EpochNumber::new(0), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -76,8 +82,14 @@ async fn test_da_task() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData2 { + payload_commit, + epoch: EpochNumber::new(0), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -85,14 +97,15 @@ async fn test_da_task() { let inputs = vec![ serial![ - ViewChange(ViewNumber::new(1), EpochNumber::new(1)), - ViewChange(ViewNumber::new(2), EpochNumber::new(1)), + ViewChange(ViewNumber::new(1), EpochNumber::new(0)), + ViewChange(ViewNumber::new(2), EpochNumber::new(0)), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { num_transactions: transactions.len() as u64 }, ViewNumber::new(2), + EpochNumber::new(0), vec1::vec1![null_block::builder_fee::( membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, @@ -158,8 +171,14 @@ async fn test_da_task_storage_failure() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData2 { + payload_commit, + epoch: EpochNumber::new(0), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -171,8 +190,14 @@ async fn test_da_task_storage_failure() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData2 { + payload_commit, + epoch: EpochNumber::new(0), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -180,14 +205,15 @@ async fn test_da_task_storage_failure() { let inputs = vec![ serial![ - ViewChange(ViewNumber::new(1), EpochNumber::new(1)), - ViewChange(ViewNumber::new(2), EpochNumber::new(1)), + ViewChange(ViewNumber::new(1), EpochNumber::new(0)), + ViewChange(ViewNumber::new(2), EpochNumber::new(0)), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { num_transactions: transactions.len() as u64 }, ViewNumber::new(2), + EpochNumber::new(0), vec1::vec1![null_block::builder_fee::( membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 0124e10b5a..25e70fc6c2 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -440,7 +440,7 @@ async fn test_quorum_proposal_task_view_sync() { }; let inputs = vec![random![ - ViewSyncFinalizeCertificate2Recv(cert.clone()), + ViewSyncFinalizeCertificateRecv(cert.clone()), SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index 43773e63da..e4ed70be64 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -52,6 +52,7 @@ async fn test_transaction_task_leader_two_views_in_a_row() { num_transactions: 0, }, current_view, + EpochNumber::new(1), vec1::vec1![ null_block::builder_fee::( handle.hotshot.memberships.total_nodes(EpochNumber::new(0)), diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 81f9ac2999..3bf19bbc38 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -108,6 +108,7 @@ async fn test_vid_task() { num_transactions: transactions.len() as u64 }, ViewNumber::new(2), + EpochNumber::new(0), vec1::vec1![null_block::builder_fee::( membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 2d2e814e9f..0c2e8d936f 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -25,7 +25,7 @@ use crate::{ error::HotShotError, event::{HotShotAction, LeafInfo}, message::Proposal, - simple_certificate::{DaCertificate, QuorumCertificate2}, + simple_certificate::{DaCertificate2, QuorumCertificate2}, traits::{ block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, @@ -281,7 +281,7 @@ pub struct Consensus { /// All the DA certs we've received for current and future views. /// view -> DA cert - saved_da_certs: HashMap>, + saved_da_certs: HashMap>, /// View number that is currently on. cur_view: TYPES::View, @@ -479,7 +479,7 @@ impl Consensus { } /// Get the saved DA certs. - pub fn saved_da_certs(&self) -> &HashMap> { + pub fn saved_da_certs(&self) -> &HashMap> { &self.saved_da_certs } @@ -747,7 +747,7 @@ impl Consensus { } /// Add a new entry to the da_certs map. - pub fn update_saved_da_certs(&mut self, view_number: TYPES::View, cert: DaCertificate) { + pub fn update_saved_da_certs(&mut self, view_number: TYPES::View, cert: DaCertificate2) { self.saved_da_certs.insert(view_number, cert); } diff --git a/types/src/data.rs b/types/src/data.rs index 422f2887cb..b6ec66f16a 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -34,7 +34,7 @@ use crate::{ message::{Proposal, UpgradeLock}, simple_certificate::{ QuorumCertificate, QuorumCertificate2, TimeoutCertificate, UpgradeCertificate, - ViewSyncFinalizeCertificate2, + ViewSyncFinalizeCertificate, }, simple_vote::{QuorumData, UpgradeProposalData, VersionedVoteData}, traits::{ @@ -147,6 +147,41 @@ pub struct DaProposal { pub view_number: TYPES::View, } +/// A proposal to start providing data availability for a block. +#[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound = "TYPES: NodeType")] +pub struct DaProposal2 { + /// Encoded transactions in the block to be applied. + pub encoded_transactions: Arc<[u8]>, + /// Metadata of the block to be applied. + pub metadata: >::Metadata, + /// View this proposal applies to + pub view_number: TYPES::View, + /// Epoch this proposal applies to + pub epoch_number: TYPES::Epoch, +} + +impl From> for DaProposal2 { + fn from(da_proposal: DaProposal) -> Self { + Self { + encoded_transactions: da_proposal.encoded_transactions, + metadata: da_proposal.metadata, + view_number: da_proposal.view_number, + epoch_number: TYPES::Epoch::new(0), + } + } +} + +impl From> for DaProposal { + fn from(da_proposal2: DaProposal2) -> Self { + Self { + encoded_transactions: da_proposal2.encoded_transactions, + metadata: da_proposal2.metadata, + view_number: da_proposal2.view_number, + } + } +} + /// A proposal to upgrade the network #[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound = "TYPES: NodeType")] @@ -239,7 +274,7 @@ pub enum ViewChangeEvidence { /// Holds a timeout certificate. Timeout(TimeoutCertificate), /// Holds a view sync finalized certificate. - ViewSync(ViewSyncFinalizeCertificate2), + ViewSync(ViewSyncFinalizeCertificate), } impl ViewChangeEvidence { @@ -420,13 +455,13 @@ impl From> for QuorumProposal2 { } impl From> for QuorumProposal { - fn from(quorum_proposal: QuorumProposal2) -> Self { + fn from(quorum_proposal2: QuorumProposal2) -> Self { Self { - block_header: quorum_proposal.block_header, - view_number: quorum_proposal.view_number, - justify_qc: quorum_proposal.justify_qc.to_qc(), - upgrade_certificate: quorum_proposal.upgrade_certificate, - proposal_certificate: quorum_proposal.view_change_evidence, + block_header: quorum_proposal2.block_header, + view_number: quorum_proposal2.view_number, + justify_qc: quorum_proposal2.justify_qc.to_qc(), + upgrade_certificate: quorum_proposal2.upgrade_certificate, + proposal_certificate: quorum_proposal2.view_change_evidence, } } } @@ -455,6 +490,12 @@ impl HasViewNumber for DaProposal { } } +impl HasViewNumber for DaProposal2 { + fn view_number(&self) -> TYPES::View { + self.view_number + } +} + impl HasViewNumber for VidDisperse { fn view_number(&self) -> TYPES::View { self.view_number @@ -1226,6 +1267,9 @@ pub struct PackedBundle { /// The view number that this block is associated with. pub view_number: TYPES::View, + /// The view number that this block is associated with. + pub epoch_number: TYPES::Epoch, + /// The sequencing fee for submitting bundles. pub sequencing_fees: Vec1>, @@ -1242,6 +1286,7 @@ impl PackedBundle { encoded_transactions: Arc<[u8]>, metadata: >::Metadata, view_number: TYPES::View, + epoch_number: TYPES::Epoch, sequencing_fees: Vec1>, vid_precompute: Option, auction_result: Option, @@ -1250,6 +1295,7 @@ impl PackedBundle { encoded_transactions, metadata, view_number, + epoch_number, sequencing_fees, vid_precompute, auction_result, diff --git a/types/src/event.rs b/types/src/event.rs index c4ae586866..0a79913d88 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -11,12 +11,13 @@ use std::sync::Arc; use serde::{Deserialize, Serialize}; use crate::{ - data::{DaProposal, Leaf2, QuorumProposal2, UpgradeProposal, VidDisperseShare}, + data::{DaProposal2, Leaf2, QuorumProposal2, UpgradeProposal, VidDisperseShare}, error::HotShotError, message::Proposal, simple_certificate::QuorumCertificate2, traits::{node_implementation::NodeType, ValidatedState}, }; + /// A status event emitted by a `HotShot` instance /// /// This includes some metadata, such as the stage and view number that the event was generated in, @@ -151,7 +152,7 @@ pub enum EventType { /// or submitted to the network by us DaProposal { /// Contents of the proposal - proposal: Proposal>, + proposal: Proposal>, /// Public key of the leader submitting the proposal sender: TYPES::SignatureKey, }, diff --git a/types/src/message.rs b/types/src/message.rs index eb4861d22e..062ae9c96f 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -26,16 +26,17 @@ use vbs::{ use crate::{ data::{ - DaProposal, Leaf, Leaf2, QuorumProposal, QuorumProposal2, UpgradeProposal, VidDisperseShare, + DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, UpgradeProposal, + VidDisperseShare, }, request_response::ProposalRequestPayload, simple_certificate::{ - DaCertificate, QuorumCertificate2, UpgradeCertificate, ViewSyncCommitCertificate2, - ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + DaCertificate, DaCertificate2, QuorumCertificate2, UpgradeCertificate, + ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, ViewSyncPreCommitCertificate, }, simple_vote::{ - DaVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, - ViewSyncPreCommitVote, + DaVote, DaVote2, QuorumVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, + ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{ election::Membership, @@ -183,13 +184,13 @@ pub enum GeneralConsensusMessage { ViewSyncFinalizeVote(ViewSyncFinalizeVote), /// Message with a view sync pre-commit certificate - ViewSyncPreCommitCertificate(ViewSyncPreCommitCertificate2), + ViewSyncPreCommitCertificate(ViewSyncPreCommitCertificate), /// Message with a view sync commit certificate - ViewSyncCommitCertificate(ViewSyncCommitCertificate2), + ViewSyncCommitCertificate(ViewSyncCommitCertificate), /// Message with a view sync finalize certificate - ViewSyncFinalizeCertificate(ViewSyncFinalizeCertificate2), + ViewSyncFinalizeCertificate(ViewSyncFinalizeCertificate), /// Message with a Timeout vote TimeoutVote(TimeoutVote), @@ -200,6 +201,15 @@ pub enum GeneralConsensusMessage { /// Message with an upgrade vote UpgradeVote(UpgradeVote), + /// Message for the next leader containing our highest QC + HighQc(QuorumCertificate2), + + /// Message with a quorum proposal. + Proposal2(Proposal>), + + /// Message with a quorum vote. + Vote2(QuorumVote2), + /// A peer node needs a proposal from the leader. ProposalRequested( ProposalRequestPayload, @@ -209,8 +219,8 @@ pub enum GeneralConsensusMessage { /// A replica has responded with a valid proposal. ProposalResponse(Proposal>), - /// Message for the next leader containing our highest QC - HighQc(QuorumCertificate2), + /// A replica has responded with a valid proposal. + Proposal2Response(Proposal>), } #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] @@ -230,6 +240,15 @@ pub enum DaConsensusMessage { /// /// Like [`DaProposal`]. Use `Msg` suffix to distinguish from `VidDisperse`. VidDisperseMsg(Proposal>), + + /// Proposal for data availability committee + DaProposal2(Proposal>), + + /// vote for data availability committee + DaVote2(DaVote2), + + /// Certificate data is available + DaCertificate2(DaCertificate2), } /// Messages for sequencing consensus. @@ -258,6 +277,9 @@ impl SequencingMessage { GeneralConsensusMessage::ProposalResponse(proposal) => { proposal.data.view_number() } + GeneralConsensusMessage::Proposal2Response(proposal) => { + proposal.data.view_number() + } GeneralConsensusMessage::Vote(vote_message) => vote_message.view_number(), GeneralConsensusMessage::TimeoutVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { @@ -277,6 +299,12 @@ impl SequencingMessage { GeneralConsensusMessage::UpgradeProposal(message) => message.data.view_number(), GeneralConsensusMessage::UpgradeVote(message) => message.view_number(), GeneralConsensusMessage::HighQc(qc) => qc.view_number(), + GeneralConsensusMessage::Proposal2(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.view_number() + } + GeneralConsensusMessage::Vote2(vote_message) => vote_message.view_number(), } } SequencingMessage::Da(da_message) => { @@ -289,6 +317,13 @@ impl SequencingMessage { DaConsensusMessage::DaVote(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate(cert) => cert.view_number, DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.view_number(), + DaConsensusMessage::DaProposal2(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.view_number() + } + DaConsensusMessage::DaVote2(vote_message) => vote_message.view_number(), + DaConsensusMessage::DaCertificate2(cert) => cert.view_number, } } } diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index f2cc8cd689..f88e549ede 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -24,8 +24,9 @@ use crate::{ data::serialize_signature2, message::UpgradeLock, simple_vote::{ - DaData, QuorumData, QuorumData2, QuorumMarker, TimeoutData, UpgradeProposalData, - VersionedVoteData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, + DaData, DaData2, QuorumData, QuorumData2, QuorumMarker, TimeoutData, TimeoutData2, + UpgradeProposalData, VersionedVoteData, ViewSyncCommitData, ViewSyncCommitData2, + ViewSyncFinalizeData, ViewSyncFinalizeData2, ViewSyncPreCommitData, ViewSyncPreCommitData2, Voteable, }, traits::{ @@ -225,6 +226,94 @@ impl> Certificate } } +impl> Certificate> + for SimpleCertificate, THRESHOLD> +{ + type Voteable = DaData2; + type Threshold = THRESHOLD; + + fn create_signed_certificate( + vote_commitment: Commitment, V>>, + data: Self::Voteable, + sig: ::QcType, + view: TYPES::View, + ) -> Self { + let vote_commitment_bytes: [u8; 32] = vote_commitment.into(); + + SimpleCertificate { + data, + vote_commitment: Commitment::from_raw(vote_commitment_bytes), + view_number: view, + signatures: Some(sig), + _pd: PhantomData, + } + } + async fn is_valid_cert( + &self, + stake_table: Vec<::StakeTableEntry>, + threshold: NonZeroU64, + upgrade_lock: &UpgradeLock, + ) -> bool { + if self.view_number == TYPES::View::genesis() { + return true; + } + let real_qc_pp = ::public_parameter( + stake_table, + U256::from(u64::from(threshold)), + ); + let Ok(commit) = self.data_commitment(upgrade_lock).await else { + return false; + }; + ::check( + &real_qc_pp, + commit.as_ref(), + self.signatures.as_ref().unwrap(), + ) + } + /// Proxy's to `Membership.stake` + fn stake_table_entry>( + membership: &MEMBERSHIP, + pub_key: &TYPES::SignatureKey, + epoch: TYPES::Epoch, + ) -> Option<::StakeTableEntry> { + membership.da_stake(pub_key, epoch) + } + + /// Proxy's to `Membership.da_stake_table` + fn stake_table>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> Vec<::StakeTableEntry> { + membership.da_stake_table(epoch) + } + /// Proxy's to `Membership.da_total_nodes` + fn total_nodes>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> usize { + membership.da_total_nodes(epoch) + } + fn threshold>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> u64 { + membership.da_success_threshold(epoch).into() + } + fn data(&self) -> &Self::Voteable { + &self.data + } + async fn data_commitment( + &self, + upgrade_lock: &UpgradeLock, + ) -> Result, V>>> { + Ok( + VersionedVoteData::new(self.data.clone(), self.view_number, upgrade_lock) + .await? + .commit(), + ) + } +} + impl> Certificate for SimpleCertificate { @@ -388,6 +477,7 @@ impl QuorumCertificate { let bytes: [u8; 32] = self.data.leaf_commit.into(); let data = QuorumData2 { leaf_commit: Commitment::from_raw(bytes), + epoch: TYPES::Epoch::new(0), }; let bytes: [u8; 32] = self.vote_commitment.into(); @@ -424,23 +514,248 @@ impl QuorumCertificate2 { } } +impl DaCertificate { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_dac2(self) -> DaCertificate2 { + let data = DaData2 { + payload_commit: self.data.payload_commit, + epoch: TYPES::Epoch::new(0), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl DaCertificate2 { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_dac(self) -> DaCertificate { + let data = DaData { + payload_commit: self.data.payload_commit, + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncPreCommitCertificate { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc2(self) -> ViewSyncPreCommitCertificate2 { + let data = ViewSyncPreCommitData2 { + relay: self.data.relay, + round: self.data.round, + epoch: TYPES::Epoch::new(0), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncPreCommitCertificate2 { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc(self) -> ViewSyncPreCommitCertificate { + let data = ViewSyncPreCommitData { + relay: self.data.relay, + round: self.data.round, + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncCommitCertificate { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc2(self) -> ViewSyncCommitCertificate2 { + let data = ViewSyncCommitData2 { + relay: self.data.relay, + round: self.data.round, + epoch: TYPES::Epoch::new(0), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncCommitCertificate2 { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc(self) -> ViewSyncCommitCertificate { + let data = ViewSyncCommitData { + relay: self.data.relay, + round: self.data.round, + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncFinalizeCertificate { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc2(self) -> ViewSyncFinalizeCertificate2 { + let data = ViewSyncFinalizeData2 { + relay: self.data.relay, + round: self.data.round, + epoch: TYPES::Epoch::new(0), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncFinalizeCertificate2 { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc(self) -> ViewSyncFinalizeCertificate { + let data = ViewSyncFinalizeData { + relay: self.data.relay, + round: self.data.round, + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl TimeoutCertificate { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc2(self) -> TimeoutCertificate2 { + let data = TimeoutData2 { + view: self.data.view, + epoch: TYPES::Epoch::new(0), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl TimeoutCertificate2 { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc(self) -> TimeoutCertificate { + let data = TimeoutData { + view: self.data.view, + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + /// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` over `QuorumData` pub type QuorumCertificate = SimpleCertificate, SuccessThreshold>; /// Type alias for a `QuorumCertificate2`, which is a `SimpleCertificate` over `QuorumData2` pub type QuorumCertificate2 = SimpleCertificate, SuccessThreshold>; -/// Type alias for a DA certificate over `DaData` +/// Type alias for a `DaCertificate`, which is a `SimpleCertificate` over `DaData` pub type DaCertificate = SimpleCertificate; +/// Type alias for a `DaCertificate2`, which is a `SimpleCertificate` over `DaData2` +pub type DaCertificate2 = SimpleCertificate, SuccessThreshold>; /// Type alias for a Timeout certificate over a view number pub type TimeoutCertificate = SimpleCertificate, SuccessThreshold>; +/// Type alias for a `TimeoutCertificate2`, which is a `SimpleCertificate` over `TimeoutData2` +pub type TimeoutCertificate2 = + SimpleCertificate, SuccessThreshold>; /// Type alias for a `ViewSyncPreCommit` certificate over a view number -pub type ViewSyncPreCommitCertificate2 = +pub type ViewSyncPreCommitCertificate = SimpleCertificate, OneHonestThreshold>; +/// Type alias for a `ViewSyncPreCommitCertificate2`, which is a `SimpleCertificate` over `ViewSyncPreCommitData2` +pub type ViewSyncPreCommitCertificate2 = + SimpleCertificate, OneHonestThreshold>; /// Type alias for a `ViewSyncCommit` certificate over a view number -pub type ViewSyncCommitCertificate2 = +pub type ViewSyncCommitCertificate = SimpleCertificate, SuccessThreshold>; +/// Type alias for a `ViewSyncCommitCertificate2`, which is a `SimpleCertificate` over `ViewSyncCommitData2` +pub type ViewSyncCommitCertificate2 = + SimpleCertificate, SuccessThreshold>; /// Type alias for a `ViewSyncFinalize` certificate over a view number -pub type ViewSyncFinalizeCertificate2 = +pub type ViewSyncFinalizeCertificate = SimpleCertificate, SuccessThreshold>; +/// Type alias for a `ViewSyncFinalizeCertificate2`, which is a `SimpleCertificate` over `ViewSyncFinalizeData2` +pub type ViewSyncFinalizeCertificate2 = + SimpleCertificate, SuccessThreshold>; /// Type alias for a `UpgradeCertificate`, which is a `SimpleCertificate` of `UpgradeProposalData` pub type UpgradeCertificate = SimpleCertificate, UpgradeThreshold>; diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 138f730fb9..ef69520785 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -17,7 +17,7 @@ use crate::{ data::{Leaf, Leaf2}, message::UpgradeLock, traits::{ - node_implementation::{NodeType, Versions}, + node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, vid::VidCommitment, @@ -40,6 +40,8 @@ pub struct QuorumData { pub struct QuorumData2 { /// Commitment to the leaf pub leaf_commit: Commitment>, + /// Epoch number + pub epoch: TYPES::Epoch, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a DA vote. @@ -48,12 +50,27 @@ pub struct DaData { pub payload_commit: VidCommitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a DA vote. +pub struct DaData2 { + /// Commitment to a block payload + pub payload_commit: VidCommitment, + /// Epoch number + pub epoch: TYPES::Epoch, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a timeout vote. pub struct TimeoutData { /// View the timeout is for pub view: TYPES::View, } - +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a timeout vote. +pub struct TimeoutData2 { + /// View the timeout is for + pub view: TYPES::View, + /// Epoch number + pub epoch: TYPES::Epoch, +} #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Pre Commit vote. pub struct ViewSyncPreCommitData { @@ -63,6 +80,16 @@ pub struct ViewSyncPreCommitData { pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Pre Commit vote. +pub struct ViewSyncPreCommitData2 { + /// The relay this vote is intended for + pub relay: u64, + /// The view number we are trying to sync on + pub round: TYPES::View, + /// Epoch number + pub epoch: TYPES::Epoch, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Commit vote. pub struct ViewSyncCommitData { /// The relay this vote is intended for @@ -71,6 +98,16 @@ pub struct ViewSyncCommitData { pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Commit vote. +pub struct ViewSyncCommitData2 { + /// The relay this vote is intended for + pub relay: u64, + /// The view number we are trying to sync on + pub round: TYPES::View, + /// Epoch number + pub epoch: TYPES::Epoch, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Finalize vote. pub struct ViewSyncFinalizeData { /// The relay this vote is intended for @@ -79,6 +116,16 @@ pub struct ViewSyncFinalizeData { pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Finalize vote. +pub struct ViewSyncFinalizeData2 { + /// The relay this vote is intended for + pub relay: u64, + /// The view number we are trying to sync on + pub round: TYPES::View, + /// Epoch number + pub epoch: TYPES::Epoch, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Upgrade vote. pub struct UpgradeProposalData { /// The old version that we are upgrading from. @@ -96,6 +143,18 @@ pub struct UpgradeProposalData { pub new_version_first_view: TYPES::View, } +/// Data used for an upgrade once epochs are implemented +pub struct UpgradeData2 { + /// The old version that we are upgrading from + pub old_version: Version, + /// The new version that we are upgrading to + pub new_version: Version, + /// A unique identifier for the specific protocol being voted on + pub hash: Vec, + /// The first epoch in which the upgrade will be in effect + pub epoch: TYPES::Epoch, +} + /// Marker trait for data or commitments that can be voted on. /// Only structs in this file can implement voteable. This is enforced with the `Sealed` trait /// Sealing this trait prevents creating new vote types outside this file. @@ -120,9 +179,13 @@ mod sealed { impl QuorumMarker for QuorumData {} impl QuorumMarker for QuorumData2 {} impl QuorumMarker for TimeoutData {} +impl QuorumMarker for TimeoutData2 {} impl QuorumMarker for ViewSyncPreCommitData {} impl QuorumMarker for ViewSyncCommitData {} impl QuorumMarker for ViewSyncFinalizeData {} +impl QuorumMarker for ViewSyncPreCommitData2 {} +impl QuorumMarker for ViewSyncCommitData2 {} +impl QuorumMarker for ViewSyncFinalizeData2 {} impl QuorumMarker for UpgradeProposalData {} /// A simple yes vote over some votable type. @@ -272,9 +335,18 @@ impl Committable for QuorumData { impl Committable for QuorumData2 { fn commit(&self) -> Commitment { - committable::RawCommitmentBuilder::new("Quorum data") - .var_size_bytes(self.leaf_commit.as_ref()) - .finalize() + let QuorumData2 { leaf_commit, epoch } = self; + + if **epoch == 0 { + committable::RawCommitmentBuilder::new("Quorum data") + .var_size_bytes(leaf_commit.as_ref()) + .finalize() + } else { + committable::RawCommitmentBuilder::new("Quorum data") + .var_size_bytes(leaf_commit.as_ref()) + .u64(**epoch) + .finalize() + } } } @@ -286,6 +358,23 @@ impl Committable for TimeoutData { } } +impl Committable for TimeoutData2 { + fn commit(&self) -> Commitment { + let TimeoutData2 { view, epoch } = self; + + if **epoch == 0 { + committable::RawCommitmentBuilder::new("Timeout data") + .u64(**view) + .finalize() + } else { + committable::RawCommitmentBuilder::new("Timeout data") + .u64(**view) + .u64(**epoch) + .finalize() + } + } +} + impl Committable for DaData { fn commit(&self) -> Commitment { committable::RawCommitmentBuilder::new("DA data") @@ -294,6 +383,25 @@ impl Committable for DaData { } } +impl Committable for DaData2 { + fn commit(&self) -> Commitment { + let DaData2 { + payload_commit, + epoch, + } = self; + if **epoch == 0 { + committable::RawCommitmentBuilder::new("DA data") + .var_size_bytes(payload_commit.as_ref()) + .finalize() + } else { + committable::RawCommitmentBuilder::new("DA data") + .var_size_bytes(payload_commit.as_ref()) + .u64(**epoch) + .finalize() + } + } +} + impl Committable for UpgradeProposalData { fn commit(&self) -> Commitment { let builder = committable::RawCommitmentBuilder::new("Upgrade data"); @@ -310,6 +418,26 @@ impl Committable for UpgradeProposalData { } } +impl Committable for UpgradeData2 { + fn commit(&self) -> Commitment { + let UpgradeData2 { + old_version, + new_version, + hash, + epoch, + } = self; + + committable::RawCommitmentBuilder::new("Upgrade data") + .u16(old_version.minor) + .u16(old_version.major) + .u16(new_version.minor) + .u16(new_version.major) + .var_size_bytes(hash.as_slice()) + .u64(**epoch) + .finalize() + } +} + /// This implements commit for all the types which contain a view and relay public key. fn view_and_relay_commit( view: TYPES::View, @@ -326,17 +454,78 @@ impl Committable for ViewSyncPreCommitData { } } +impl Committable for ViewSyncPreCommitData2 { + fn commit(&self) -> Commitment { + let ViewSyncPreCommitData2 { + relay, + round, + epoch, + } = self; + + if **epoch == 0 { + view_and_relay_commit::(*round, *relay, "View Sync Precommit") + } else { + committable::RawCommitmentBuilder::new("View Sync Precommit") + .u64(*relay) + .u64(**round) + .u64(**epoch) + .finalize() + } + } +} + impl Committable for ViewSyncFinalizeData { fn commit(&self) -> Commitment { view_and_relay_commit::(self.round, self.relay, "View Sync Finalize") } } + +impl Committable for ViewSyncFinalizeData2 { + fn commit(&self) -> Commitment { + let ViewSyncFinalizeData2 { + relay, + round, + epoch, + } = self; + + if **epoch == 0 { + view_and_relay_commit::(*round, *relay, "View Sync Finalize") + } else { + committable::RawCommitmentBuilder::new("View Sync Finalize") + .u64(*relay) + .u64(**round) + .u64(**epoch) + .finalize() + } + } +} + impl Committable for ViewSyncCommitData { fn commit(&self) -> Commitment { view_and_relay_commit::(self.round, self.relay, "View Sync Commit") } } +impl Committable for ViewSyncCommitData2 { + fn commit(&self) -> Commitment { + let ViewSyncCommitData2 { + relay, + round, + epoch, + } = self; + + if **epoch == 0 { + view_and_relay_commit::(*round, *relay, "View Sync Commit") + } else { + committable::RawCommitmentBuilder::new("View Sync Commit") + .u64(*relay) + .u64(**round) + .u64(**epoch) + .finalize() + } + } +} + // impl votable for all the data types in this file sealed marker should ensure nothing is accidently // implemented for structs that aren't "voteable" impl Voteable @@ -352,6 +541,7 @@ impl QuorumVote { let signature = self.signature; let data = QuorumData2 { leaf_commit: Commitment::from_raw(bytes), + epoch: TYPES::Epoch::new(0), }; let view_number = self.view_number; @@ -382,21 +572,73 @@ impl QuorumVote2 { } } +impl DaVote { + /// Convert a `QuorumVote` to a `QuorumVote2` + pub fn to_vote2(self) -> DaVote2 { + let signature = self.signature; + let data = DaData2 { + payload_commit: self.data.payload_commit, + epoch: TYPES::Epoch::new(0), + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + +impl DaVote2 { + /// Convert a `QuorumVote2` to a `QuorumVote` + pub fn to_vote(self) -> DaVote { + let signature = self.signature; + let data = DaData { + payload_commit: self.data.payload_commit, + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + // Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file + /// Quorum vote Alias pub type QuorumVote = SimpleVote>; // Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file /// Quorum vote Alias pub type QuorumVote2 = SimpleVote>; + /// DA vote type alias pub type DaVote = SimpleVote; +/// DA vote 2 type alias +pub type DaVote2 = SimpleVote>; + /// Timeout Vote type alias pub type TimeoutVote = SimpleVote>; -/// View Sync Commit Vote type alias -pub type ViewSyncCommitVote = SimpleVote>; +/// Timeout Vote 2 type alias +pub type TimeoutVote2 = SimpleVote>; + /// View Sync Pre Commit Vote type alias pub type ViewSyncPreCommitVote = SimpleVote>; +/// View Sync Pre Commit Vote 2 type alias +pub type ViewSyncPreCommitVote2 = SimpleVote>; /// View Sync Finalize Vote type alias pub type ViewSyncFinalizeVote = SimpleVote>; +/// View Sync Finalize Vote 2 type alias +pub type ViewSyncFinalizeVote2 = SimpleVote>; +/// View Sync Commit Vote type alias +pub type ViewSyncCommitVote = SimpleVote>; +/// View Sync Commit Vote 2 type alias +pub type ViewSyncCommitVote2 = SimpleVote>; + /// Upgrade proposal vote pub type UpgradeVote = SimpleVote>; +/// Upgrade proposal 2 vote +pub type UpgradeVote2 = SimpleVote>; diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 7782ef0101..a0e226cdbd 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -18,7 +18,9 @@ use jf_vid::VidScheme; use super::node_implementation::NodeType; use crate::{ consensus::{CommitmentMap, View}, - data::{DaProposal, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare}, + data::{ + DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare, + }, event::HotShotAction, message::Proposal, simple_certificate::{QuorumCertificate, QuorumCertificate2, UpgradeCertificate}, @@ -36,6 +38,12 @@ pub trait Storage: Send + Sync + Clone { proposal: &Proposal>, vid_commit: ::Commit, ) -> Result<()>; + /// Add a proposal to the stored DA proposals. + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: ::Commit, + ) -> Result<()>; /// Add a proposal we sent to the store async fn append_proposal( &self, From 351dfd9912731efb02ad63095dd4cc44007cf214 Mon Sep 17 00:00:00 2001 From: leopardracer <136604165+leopardracer@users.noreply.github.com> Date: Mon, 9 Dec 2024 19:04:00 +0200 Subject: [PATCH 1328/1393] fix: typos in documentation files (#3949) * fix: typos * fix `includs` typo in logic for collecting peer config * Fix spelling mistake in `transactions` in block return logic * fix typo: accidently -> accidentally in sealed marker logic * fix spelling mistake in event message --- macros/src/lib.rs | 2 +- orchestrator/api.toml | 2 +- testing/src/block_builder/simple.rs | 2 +- types/src/simple_vote.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 80f49e1d6a..c409cb6b58 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -27,7 +27,7 @@ struct TypePathBracketedArray { /// description of a crosstest #[derive(derive_builder::Builder, Debug, Clone)] struct CrossTestData { - /// imlementations + /// implementations impls: ExprArray, /// builder impl diff --git a/orchestrator/api.toml b/orchestrator/api.toml index 25fcb2b7c2..7a18fbb193 100644 --- a/orchestrator/api.toml +++ b/orchestrator/api.toml @@ -44,7 +44,7 @@ Supply whether or not we are DA. [route.peer_pubconfig_ready] PATH = ["peer_pub_ready"] DOC = """ -Get whether the node can collect the final config which includs all peer's public config/info like public keys, returns a boolean. +Get whether the node can collect the final config which includes all peer's public config/info like public keys, returns a boolean. """ # POST the updated config with all peers' public keys / configs diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index 8376633afc..e0af714f9a 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -238,7 +238,7 @@ where .await; if transactions.is_empty() { - // We don't want to return an empty block if we have no trasnactions, as we would end up + // We don't want to return an empty block if we have no transactions, as we would end up // driving consensus to produce empty blocks extremely quickly when mempool is empty. // Instead, we return no blocks, so that view leader will keep asking for blocks until // either we have something non-trivial to propose, or leader runs out of time to propose, diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index ef69520785..98af9fe67d 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -526,7 +526,7 @@ impl Committable for ViewSyncCommitData2 { } } -// impl votable for all the data types in this file sealed marker should ensure nothing is accidently +// impl votable for all the data types in this file sealed marker should ensure nothing is accidentally // implemented for structs that aren't "voteable" impl Voteable for V From 53d2379031b042a326b452604d8045162519952b Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 11 Dec 2024 03:19:23 -0500 Subject: [PATCH 1329/1393] update libp2p, fix audit (#3956) --- libp2p-networking/src/network/node.rs | 3 +- libp2p-networking/src/network/transport.rs | 37 ++++------------------ 2 files changed, 9 insertions(+), 31 deletions(-) diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 1ea25a36d0..279d035949 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -233,7 +233,7 @@ impl NetworkNode { let identify = IdentifyBehaviour::new(identify_cfg); // - Build DHT needed for peer discovery - let mut kconfig = Config::default(); + let mut kconfig = Config::new(StreamProtocol::new("/ipfs/kad/1.0.0")); // 8 hours by default let record_republication_interval = config .republication_interval @@ -603,6 +603,7 @@ impl NetworkNode { agent_version: _, observed_addr: _, }, + connection_id: _, } = *e { let behaviour = self.swarm.behaviour_mut(); diff --git a/libp2p-networking/src/network/transport.rs b/libp2p-networking/src/network/transport.rs index 0f3bdf80a9..aac8a66b60 100644 --- a/libp2p-networking/src/network/transport.rs +++ b/libp2p-networking/src/network/transport.rs @@ -14,7 +14,11 @@ use hotshot_types::traits::{ signature_key::SignatureKey, }; use libp2p::{ - core::{muxing::StreamMuxerExt, transport::TransportEvent, StreamMuxer}, + core::{ + muxing::StreamMuxerExt, + transport::{DialOpts, TransportEvent}, + StreamMuxer, + }, identity::PeerId, Transport, }; @@ -314,9 +318,10 @@ where fn dial( &mut self, addr: libp2p::Multiaddr, + opts: DialOpts, ) -> Result> { // Perform the inner dial - let res = self.inner.dial(addr); + let res = self.inner.dial(addr, opts); // Clone the necessary fields let auth_message = Arc::clone(&self.auth_message); @@ -329,27 +334,6 @@ where } } - /// Dial a remote peer as a listener. This function is changed to perform an authentication - /// handshake on top. The flow should be the reverse of the `dial` function and the - /// same as the `poll` function. - fn dial_as_listener( - &mut self, - addr: libp2p::Multiaddr, - ) -> Result> { - // Perform the inner dial - let res = self.inner.dial(addr); - - // Clone the necessary fields - let auth_message = Arc::clone(&self.auth_message); - let stake_table = Arc::clone(&self.stake_table); - - // If the dial was successful, perform the authentication handshake on top - match res { - Ok(dial) => Ok(Self::gen_handshake(dial, false, stake_table, auth_message)), - Err(err) => Err(err), - } - } - /// This function is where we perform the authentication handshake for _incoming_ connections. /// The flow in this case is the reverse of the `dial` function: we first verify the remote peer's /// authentication, and then authenticate with them. @@ -420,13 +404,6 @@ where fn remove_listener(&mut self, id: libp2p::core::transport::ListenerId) -> bool { self.inner.remove_listener(id) } - fn address_translation( - &self, - listen: &libp2p::Multiaddr, - observed: &libp2p::Multiaddr, - ) -> Option { - self.inner.address_translation(listen, observed) - } fn listen_on( &mut self, id: libp2p::core::transport::ListenerId, From 7ac4343a1dfe54d166926a883061553d890ccc87 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Wed, 11 Dec 2024 13:44:55 -0800 Subject: [PATCH 1330/1393] 3917 pull epoch from qc in quorum_proposal::wait_for_qc_event (#3955) --- task-impls/src/quorum_proposal/handlers.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index e8b124b1e2..d68d08b538 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -25,9 +25,7 @@ use hotshot_types::{ message::Proposal, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ - block_contents::BlockHeader, - election::Membership, - node_implementation::{ConsensusTime, NodeType}, + block_contents::BlockHeader, election::Membership, node_implementation::NodeType, signature_key::SignatureKey, }, vote::{Certificate, HasViewNumber}, @@ -128,9 +126,8 @@ impl ProposalDependencyHandle { .is_valid_cert( // TODO take epoch from `qc` // https://github.com/EspressoSystems/HotShot/issues/3917 - self.quorum_membership.stake_table(TYPES::Epoch::new(0)), - self.quorum_membership - .success_threshold(TYPES::Epoch::new(0)), + self.quorum_membership.stake_table(qc.data.epoch), + self.quorum_membership.success_threshold(qc.data.epoch), &self.upgrade_lock, ) .await From 601f665e3394485ffcdadd81b4cfef0cf62c0e44 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Thu, 12 Dec 2024 17:29:21 +0100 Subject: [PATCH 1331/1393] Lr/variable stake table (#3893) * Initial commit * WIP: adding epoch to proposal and vote data, not compiling yet * Make it compile * Adjust tests * Add a test type for two stake tables for even and odd epochs * Debugging * Fix extended voting * Try "in epoch transition" approach * Continue debugging * Use correct epoch with Membership * Adjust tests and lints * Adapt to variable stake table after merge * Fix accidentally pulled bug in eQC rule * Commit includes epoch for vote and proposal data types * Prune dependencies (#3787) * add new message types and gate outgoing messages * Use the proper message for the proposal response * Modify commit for `Leaf2` and `QuorumData2` * Adjust tests * Clean up debug traces * Add TODO * Adjust marketplace ver number * fix types * fix viewsync, timeout and vid * fix upgrade and tests * add VidDisperseMsg2 response * remove extraneous check from quorum_proposal_recv * simplify leaf commitment * remove epoch from commitments * fix merge * revert transaction view change logic * impl HasEpoch for VidDisperse, VidDisperseShare2 and DaData2 --------- Co-authored-by: Artemii Gerasimovich Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> --- example-types/src/node_types.rs | 90 +++- example-types/src/storage_types.rs | 25 ++ hotshot/src/lib.rs | 33 +- hotshot/src/tasks/task_state.rs | 1 + hotshot/src/traits/election/mod.rs | 2 + .../traits/election/two_static_committees.rs | 411 ++++++++++++++++++ hotshot/src/types/handle.rs | 4 +- task-impls/src/consensus/handlers.rs | 34 +- task-impls/src/consensus/mod.rs | 52 ++- task-impls/src/da.rs | 22 +- task-impls/src/events.rs | 56 +-- task-impls/src/helpers.rs | 64 +-- task-impls/src/network.rs | 292 +++++++++---- task-impls/src/quorum_proposal/handlers.rs | 22 +- task-impls/src/quorum_proposal/mod.rs | 61 ++- .../src/quorum_proposal_recv/handlers.rs | 26 +- task-impls/src/quorum_proposal_recv/mod.rs | 3 - task-impls/src/quorum_vote/handlers.rs | 19 +- task-impls/src/quorum_vote/mod.rs | 26 +- task-impls/src/request.rs | 11 +- task-impls/src/response.rs | 7 +- task-impls/src/transactions.rs | 20 +- task-impls/src/vid.rs | 21 +- task-impls/src/view_sync.rs | 95 ++-- task-impls/src/vote_collection.rs | 68 +-- testing/src/helpers.rs | 41 +- testing/src/spinning_task.rs | 7 +- testing/src/test_runner.rs | 14 +- testing/src/view_generator.rs | 55 ++- testing/tests/tests_1/da_task.rs | 8 +- testing/tests/tests_1/message.rs | 8 +- testing/tests/tests_1/quorum_proposal_task.rs | 8 +- testing/tests/tests_1/test_success.rs | 20 +- testing/tests/tests_1/vid_task.rs | 4 +- testing/tests/tests_1/view_sync_task.rs | 17 +- testing/tests/tests_3/memory_network.rs | 2 + types/src/consensus.rs | 38 +- types/src/data.rs | 317 ++++++++++++-- types/src/event.rs | 6 +- types/src/message.rs | 95 +++- types/src/simple_certificate.rs | 23 +- types/src/simple_vote.rs | 315 ++++++++++---- types/src/traits/node_implementation.rs | 2 + types/src/traits/storage.rs | 4 + types/src/utils.rs | 40 +- types/src/vid.rs | 4 +- types/src/vote.rs | 4 +- 47 files changed, 1912 insertions(+), 585 deletions(-) create mode 100644 hotshot/src/traits/election/two_static_committees.rs diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 01160e1cb9..c7899aa3de 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -15,6 +15,7 @@ use hotshot::traits::{ randomized_committee_members::RandomizedCommitteeMembers, static_committee::StaticCommittee, static_committee_leader_two_views::StaticCommitteeLeaderForTwoViews, + two_static_committees::TwoStaticCommittees, }, implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork}, NodeImplementation, @@ -51,6 +52,8 @@ use crate::{ /// to select our traits pub struct TestTypes; impl NodeType for TestTypes { + const EPOCH_HEIGHT: u64 = 10; + type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -81,6 +84,8 @@ impl NodeType for TestTypes { /// to select our traits pub struct TestTypesRandomizedLeader; impl NodeType for TestTypesRandomizedLeader { + const EPOCH_HEIGHT: u64 = 10; + type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -114,6 +119,8 @@ pub struct TestTypesRandomizedCommitteeMembers { } impl NodeType for TestTypesRandomizedCommitteeMembers { + const EPOCH_HEIGHT: u64 = 10; + type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -145,6 +152,8 @@ impl NodeType for TestTypesRandomizedCommitteeMember /// to select our traits pub struct TestConsecutiveLeaderTypes; impl NodeType for TestConsecutiveLeaderTypes { + const EPOCH_HEIGHT: u64 = 10; + type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -158,6 +167,38 @@ impl NodeType for TestConsecutiveLeaderTypes { type BuilderSignatureKey = BuilderKey; } +#[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, +)] +/// filler struct to implement node type and allow us +/// to select our traits +pub struct TestTwoStakeTablesTypes; +impl NodeType for TestTwoStakeTablesTypes { + const EPOCH_HEIGHT: u64 = 10; + + type AuctionResult = TestAuctionResult; + type View = ViewNumber; + type Epoch = EpochNumber; + type BlockHeader = TestBlockHeader; + type BlockPayload = TestBlockPayload; + type SignatureKey = BLSPubKey; + type Transaction = TestTransaction; + type ValidatedState = TestValidatedState; + type InstanceState = TestInstanceState; + type Membership = TwoStaticCommittees; + type BuilderSignatureKey = BuilderKey; +} + /// The Push CDN implementation #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct PushCdnImpl; @@ -264,7 +305,7 @@ impl Versions for EpochsTestVersions { 0, 0, ]; - type Marketplace = StaticVersion<0, 99>; + type Marketplace = StaticVersion<0, 3>; type Epochs = StaticVersion<0, 4>; } @@ -273,7 +314,10 @@ impl Versions for EpochsTestVersions { mod tests { use committable::{Commitment, Committable}; use hotshot_types::{ - message::UpgradeLock, simple_vote::VersionedVoteData, + data::EpochNumber, + impl_has_epoch, + message::UpgradeLock, + simple_vote::{HasEpoch, VersionedVoteData}, traits::node_implementation::ConsensusTime, }; use serde::{Deserialize, Serialize}; @@ -281,11 +325,12 @@ mod tests { use crate::node_types::{MarketplaceTestVersions, NodeType, TestTypes}; #[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Hash, Eq)] /// Dummy data used for test - struct TestData { + struct TestData { data: u64, + epoch: TYPES::Epoch, } - impl Committable for TestData { + impl Committable for TestData { fn commit(&self) -> Commitment { committable::RawCommitmentBuilder::new("Test data") .u64(self.data) @@ -293,32 +338,35 @@ mod tests { } } + impl_has_epoch!(TestData); + #[tokio::test(flavor = "multi_thread")] /// Test that the view number affects the commitment post-marketplace async fn test_versioned_commitment_includes_view() { let upgrade_lock = UpgradeLock::new(); - let data = TestData { data: 10 }; + let data = TestData { + data: 10, + epoch: EpochNumber::new(0), + }; let view_0 = ::View::new(0); let view_1 = ::View::new(1); - let versioned_data_0 = - VersionedVoteData::::new( - data, - view_0, - &upgrade_lock, - ) - .await - .unwrap(); - let versioned_data_1 = - VersionedVoteData::::new( - data, - view_1, - &upgrade_lock, - ) - .await - .unwrap(); + let versioned_data_0 = VersionedVoteData::< + TestTypes, + TestData, + MarketplaceTestVersions, + >::new(data, view_0, &upgrade_lock) + .await + .unwrap(); + let versioned_data_1 = VersionedVoteData::< + TestTypes, + TestData, + MarketplaceTestVersions, + >::new(data, view_1, &upgrade_lock) + .await + .unwrap(); let versioned_data_commitment_0: [u8; 32] = versioned_data_0.commit().into(); let versioned_data_commitment_1: [u8; 32] = versioned_data_1.commit().into(); diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 1a666c737e..4f72a930fb 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -16,6 +16,7 @@ use hotshot_types::{ consensus::CommitmentMap, data::{ DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare, + VidDisperseShare2, }, event::HotShotAction, message::Proposal, @@ -36,9 +37,15 @@ type VidShares = HashMap< ::View, HashMap<::SignatureKey, Proposal>>, >; +type VidShares2 = HashMap< + ::View, + HashMap<::SignatureKey, Proposal>>, +>; + #[derive(Clone, Debug)] pub struct TestStorageState { vids: VidShares, + vid2: VidShares2, das: HashMap>>, da2s: HashMap>>, proposals: BTreeMap>>, @@ -53,6 +60,7 @@ impl Default for TestStorageState { fn default() -> Self { Self { vids: HashMap::new(), + vid2: HashMap::new(), das: HashMap::new(), da2s: HashMap::new(), proposals: BTreeMap::new(), @@ -131,6 +139,23 @@ impl Storage for TestStorage { Ok(()) } + async fn append_vid2( + &self, + proposal: &Proposal>, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to append VID proposal to storage"); + } + Self::run_delay_settings_from_config(&self.delay_config).await; + let mut inner = self.inner.write().await; + inner + .vid2 + .entry(proposal.data.view_number) + .or_default() + .insert(proposal.data.recipient_key.clone(), proposal.clone()); + Ok(()) + } + async fn append_da( &self, proposal: &Proposal>, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 83d110e380..16fa47f31d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -49,10 +49,10 @@ pub use hotshot_types::error::HotShotError; use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, OuterConsensus, View, ViewInner}, constants::{EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE}, - data::{Leaf, Leaf2, QuorumProposal, QuorumProposal2}, + data::{Leaf2, QuorumProposal, QuorumProposal2}, event::{EventType, LeafInfo}, message::{convert_proposal, DataMessage, Message, MessageKind, Proposal}, - simple_certificate::{QuorumCertificate, QuorumCertificate2, UpgradeCertificate}, + simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -63,15 +63,16 @@ use hotshot_types::{ storage::Storage, EncodeBytes, }, + utils::epoch_from_block_number, HotShotConfig, }; -// -- Rexports -// External /// Reexport rand crate pub use rand; use tokio::{spawn, time::sleep}; use tracing::{debug, instrument, trace}; +// -- Rexports +// External use crate::{ tasks::{add_consensus_tasks, add_network_tasks}, traits::NodeImplementation, @@ -291,6 +292,10 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext(&validated_state, self.instance_state.as_ref()) - .await - .to_qc2(), + QuorumCertificate2::genesis::( + &validated_state, + self.instance_state.as_ref(), + ) + .await, ); broadcast_event( @@ -1010,14 +1019,10 @@ impl HotShotInitializer { instance_state: TYPES::InstanceState, ) -> Result> { let (validated_state, state_delta) = TYPES::ValidatedState::genesis(&instance_state); - let high_qc = QuorumCertificate::genesis::(&validated_state, &instance_state) - .await - .to_qc2(); + let high_qc = QuorumCertificate2::genesis::(&validated_state, &instance_state).await; Ok(Self { - inner: Leaf::genesis(&validated_state, &instance_state) - .await - .into(), + inner: Leaf2::genesis(&validated_state, &instance_state).await, validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::View::new(0), diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index f97905b6a4..2c8d8607a4 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -261,6 +261,7 @@ impl, V: Versions> CreateTaskState Self { latest_proposed_view: handle.cur_view().await, + cur_epoch: handle.cur_epoch().await, proposal_dependencies: BTreeMap::new(), consensus: OuterConsensus::new(consensus), instance_state: handle.hotshot.instance_state(), diff --git a/hotshot/src/traits/election/mod.rs b/hotshot/src/traits/election/mod.rs index 914b9bbb33..5cc908a6d6 100644 --- a/hotshot/src/traits/election/mod.rs +++ b/hotshot/src/traits/election/mod.rs @@ -17,6 +17,8 @@ pub mod static_committee; /// static (round robin leader for 2 consecutive views) committee election pub mod static_committee_leader_two_views; +/// two static (round robin) committees for even and odd epochs +pub mod two_static_committees; /// general helpers pub mod helpers; diff --git a/hotshot/src/traits/election/two_static_committees.rs b/hotshot/src/traits/election/two_static_committees.rs new file mode 100644 index 0000000000..25af69905c --- /dev/null +++ b/hotshot/src/traits/election/two_static_committees.rs @@ -0,0 +1,411 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{cmp::max, collections::BTreeMap, num::NonZeroU64}; + +use hotshot_types::{ + traits::{ + election::Membership, + node_implementation::NodeType, + signature_key::{SignatureKey, StakeTableEntryType}, + }, + PeerConfig, +}; +use primitive_types::U256; +use utils::anytrace::Result; + +/// Tuple type for eligible leaders +type EligibleLeaders = ( + Vec<<::SignatureKey as SignatureKey>::StakeTableEntry>, + Vec<<::SignatureKey as SignatureKey>::StakeTableEntry>, +); + +/// Tuple type for stake tables +type StakeTables = ( + Vec<<::SignatureKey as SignatureKey>::StakeTableEntry>, + Vec<<::SignatureKey as SignatureKey>::StakeTableEntry>, +); + +/// Tuple type for indexed stake tables +type IndexedStakeTables = ( + BTreeMap< + ::SignatureKey, + <::SignatureKey as SignatureKey>::StakeTableEntry, + >, + BTreeMap< + ::SignatureKey, + <::SignatureKey as SignatureKey>::StakeTableEntry, + >, +); + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +/// The static committee election +pub struct TwoStaticCommittees { + /// The nodes eligible for leadership. + /// NOTE: This is currently a hack because the DA leader needs to be the quorum + /// leader but without voting rights. + eligible_leaders: EligibleLeaders, + + /// The nodes on the committee and their stake + stake_table: StakeTables, + + /// The nodes on the committee and their stake + da_stake_table: StakeTables, + + /// The nodes on the committee and their stake, indexed by public key + indexed_stake_table: IndexedStakeTables, + + /// The nodes on the committee and their stake, indexed by public key + indexed_da_stake_table: IndexedStakeTables, +} + +impl Membership for TwoStaticCommittees { + type Error = utils::anytrace::Error; + + /// Create a new election + fn new( + committee_members: Vec::SignatureKey>>, + da_members: Vec::SignatureKey>>, + ) -> Self { + // For each eligible leader, get the stake table entry + let eligible_leaders: Vec<::StakeTableEntry> = + committee_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + let eligible_leaders1 = eligible_leaders + .iter() + .enumerate() + .filter(|(idx, _)| idx % 2 == 0) + .map(|(_, leader)| leader.clone()) + .collect(); + let eligible_leaders2 = eligible_leaders + .iter() + .enumerate() + .filter(|(idx, _)| idx % 2 == 1) + .map(|(_, leader)| leader.clone()) + .collect(); + + // For each member, get the stake table entry + let members: Vec<::StakeTableEntry> = + committee_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + let members1: Vec<::StakeTableEntry> = members + .iter() + .enumerate() + .filter(|(idx, _)| idx % 2 == 0) + .map(|(_, leader)| leader.clone()) + .collect(); + let members2: Vec<::StakeTableEntry> = members + .iter() + .enumerate() + .filter(|(idx, _)| idx % 2 == 1) + .map(|(_, leader)| leader.clone()) + .collect(); + + // For each member, get the stake table entry + let da_members: Vec<::StakeTableEntry> = da_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + let da_members1: Vec<::StakeTableEntry> = da_members + .iter() + .enumerate() + .filter(|(idx, _)| idx % 2 == 0) + .map(|(_, leader)| leader.clone()) + .collect(); + let da_members2: Vec<::StakeTableEntry> = da_members + .iter() + .enumerate() + .filter(|(idx, _)| idx % 2 == 1) + .map(|(_, leader)| leader.clone()) + .collect(); + + // Index the stake table by public key + let indexed_stake_table1: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = members1 + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + + let indexed_stake_table2: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = members2 + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + + // Index the stake table by public key + let indexed_da_stake_table1: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = da_members1 + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + + let indexed_da_stake_table2: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = da_members2 + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + + Self { + eligible_leaders: (eligible_leaders1, eligible_leaders2), + stake_table: (members1, members2), + da_stake_table: (da_members1, da_members2), + indexed_stake_table: (indexed_stake_table1, indexed_stake_table2), + indexed_da_stake_table: (indexed_da_stake_table1, indexed_da_stake_table2), + } + } + + /// Get the stake table for the current view + fn stake_table( + &self, + epoch: ::Epoch, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + if *epoch != 0 && *epoch % 2 == 0 { + self.stake_table.0.clone() + } else { + self.stake_table.1.clone() + } + } + + /// Get the stake table for the current view + fn da_stake_table( + &self, + epoch: ::Epoch, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + if *epoch != 0 && *epoch % 2 == 0 { + self.da_stake_table.0.clone() + } else { + self.da_stake_table.1.clone() + } + } + + /// Get all members of the committee for the current view + fn committee_members( + &self, + _view_number: ::View, + epoch: ::Epoch, + ) -> std::collections::BTreeSet<::SignatureKey> { + if *epoch != 0 && *epoch % 2 == 0 { + self.stake_table + .0 + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } else { + self.stake_table + .1 + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } + } + + /// Get all members of the committee for the current view + fn da_committee_members( + &self, + _view_number: ::View, + epoch: ::Epoch, + ) -> std::collections::BTreeSet<::SignatureKey> { + if *epoch != 0 && *epoch % 2 == 0 { + self.da_stake_table + .0 + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } else { + self.da_stake_table + .1 + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } + } + + /// Get all eligible leaders of the committee for the current view + fn committee_leaders( + &self, + _view_number: ::View, + epoch: ::Epoch, + ) -> std::collections::BTreeSet<::SignatureKey> { + if *epoch != 0 && *epoch % 2 == 0 { + self.eligible_leaders + .0 + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } else { + self.eligible_leaders + .1 + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } + } + + /// Get the stake table entry for a public key + fn stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> Option<::StakeTableEntry> { + // Only return the stake if it is above zero + if *epoch != 0 && *epoch % 2 == 0 { + self.indexed_stake_table.0.get(pub_key).cloned() + } else { + self.indexed_stake_table.1.get(pub_key).cloned() + } + } + + /// Get the DA stake table entry for a public key + fn da_stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> Option<::StakeTableEntry> { + // Only return the stake if it is above zero + if *epoch != 0 && *epoch % 2 == 0 { + self.indexed_da_stake_table.0.get(pub_key).cloned() + } else { + self.indexed_da_stake_table.1.get(pub_key).cloned() + } + } + + /// Check if a node has stake in the committee + fn has_stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> bool { + if *epoch != 0 && *epoch % 2 == 0 { + self.indexed_stake_table + .0 + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } else { + self.indexed_stake_table + .1 + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } + } + + /// Check if a node has stake in the committee + fn has_da_stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> bool { + if *epoch != 0 && *epoch % 2 == 0 { + self.indexed_da_stake_table + .0 + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } else { + self.indexed_da_stake_table + .1 + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } + } + + /// Index the vector of public keys with the current view number + fn lookup_leader( + &self, + view_number: TYPES::View, + epoch: ::Epoch, + ) -> Result { + if *epoch != 0 && *epoch % 2 == 0 { + #[allow(clippy::cast_possible_truncation)] + let index = *view_number as usize % self.eligible_leaders.0.len(); + let res = self.eligible_leaders.0[index].clone(); + Ok(TYPES::SignatureKey::public_key(&res)) + } else { + #[allow(clippy::cast_possible_truncation)] + let index = *view_number as usize % self.eligible_leaders.1.len(); + let res = self.eligible_leaders.1[index].clone(); + Ok(TYPES::SignatureKey::public_key(&res)) + } + } + + /// Get the total number of nodes in the committee + fn total_nodes(&self, epoch: ::Epoch) -> usize { + if *epoch != 0 && *epoch % 2 == 0 { + self.stake_table.0.len() + } else { + self.stake_table.1.len() + } + } + + /// Get the total number of DA nodes in the committee + fn da_total_nodes(&self, epoch: ::Epoch) -> usize { + if *epoch != 0 && *epoch % 2 == 0 { + self.da_stake_table.0.len() + } else { + self.da_stake_table.1.len() + } + } + + /// Get the voting success threshold for the committee + fn success_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64 { + if *epoch != 0 && *epoch % 2 == 0 { + NonZeroU64::new(((self.stake_table.0.len() as u64 * 2) / 3) + 1).unwrap() + } else { + NonZeroU64::new(((self.stake_table.1.len() as u64 * 2) / 3) + 1).unwrap() + } + } + + /// Get the voting success threshold for the committee + fn da_success_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64 { + if *epoch != 0 && *epoch % 2 == 0 { + NonZeroU64::new(((self.da_stake_table.0.len() as u64 * 2) / 3) + 1).unwrap() + } else { + NonZeroU64::new(((self.da_stake_table.1.len() as u64 * 2) / 3) + 1).unwrap() + } + } + + /// Get the voting failure threshold for the committee + fn failure_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64 { + if *epoch != 0 && *epoch % 2 == 0 { + NonZeroU64::new(((self.stake_table.0.len() as u64) / 3) + 1).unwrap() + } else { + NonZeroU64::new(((self.stake_table.1.len() as u64) / 3) + 1).unwrap() + } + } + + /// Get the voting upgrade threshold for the committee + fn upgrade_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64 { + if *epoch != 0 && *epoch % 2 == 0 { + NonZeroU64::new(max( + (self.stake_table.0.len() as u64 * 9) / 10, + ((self.stake_table.0.len() as u64 * 2) / 3) + 1, + )) + .unwrap() + } else { + NonZeroU64::new(max( + (self.stake_table.1.len() as u64 * 9) / 10, + ((self.stake_table.1.len() as u64 * 2) / 3) + 1, + )) + .unwrap() + } + } +} diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 9ea46b34d7..e3024e876b 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -140,7 +140,6 @@ impl + 'static, V: Versions> pub fn request_proposal( &self, view: TYPES::View, - epoch: TYPES::Epoch, leaf_commitment: Commitment>, ) -> Result>>>> { @@ -160,6 +159,7 @@ impl + 'static, V: Versions> let mem = (*self.memberships).clone(); let receiver = self.internal_event_stream.1.activate_cloned(); let sender = self.internal_event_stream.0.clone(); + let epoch_height = self.epoch_height; Ok(async move { // First, broadcast that we need a proposal broadcast_event( @@ -187,7 +187,7 @@ impl + 'static, V: Versions> if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() { // Make sure that the quorum_proposal is valid - if let Err(err) = quorum_proposal.validate_signature(&mem, epoch) { + if let Err(err) = quorum_proposal.validate_signature(&mem, epoch_height) { tracing::warn!("Invalid Proposal Received after Request. Err {:?}", err); continue; } diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 27fc0a7b43..fbd1960ce4 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -10,7 +10,7 @@ use async_broadcast::Sender; use chrono::Utc; use hotshot_types::{ event::{Event, EventType}, - simple_vote::{QuorumVote2, TimeoutData, TimeoutVote}, + simple_vote::{QuorumVote2, TimeoutData2, TimeoutVote2}, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, @@ -39,19 +39,19 @@ pub(crate) async fn handle_quorum_vote_recv< sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { - let is_vote_leaf_extended = task_state + let in_transition = task_state .consensus .read() .await - .is_leaf_extended(vote.data.leaf_commit); + .is_high_qc_for_last_block(); let we_are_leader = task_state .membership .leader(vote.view_number() + 1, task_state.cur_epoch)? == task_state.public_key; ensure!( - is_vote_leaf_extended || we_are_leader, + in_transition || we_are_leader, info!( - "We are not the leader for view {:?} and this is not the last vote for eQC", + "We are not the leader for view {:?} and we are not in the epoch transition", vote.view_number() + 1 ) ); @@ -66,7 +66,7 @@ pub(crate) async fn handle_quorum_vote_recv< &event, sender, &task_state.upgrade_lock, - !is_vote_leaf_extended, + !in_transition, ) .await?; @@ -79,7 +79,7 @@ pub(crate) async fn handle_timeout_vote_recv< I: NodeImplementation, V: Versions, >( - vote: &TimeoutVote, + vote: &TimeoutVote2, event: Arc>, sender: &Sender>>, task_state: &mut ConsensusTaskState, @@ -159,6 +159,11 @@ pub(crate) async fn handle_view_change< ) -> Result<()> { if epoch_number > task_state.cur_epoch { task_state.cur_epoch = epoch_number; + let _ = task_state + .consensus + .write() + .await + .update_epoch(epoch_number); tracing::info!("Progress: entered epoch {:>6}", *epoch_number); } @@ -214,7 +219,10 @@ pub(crate) async fn handle_view_change< async move { sleep(Duration::from_millis(timeout)).await; broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::View::new(*view_number))), + Arc::new(HotShotEvent::Timeout( + TYPES::View::new(*view_number), + epoch_number, + )), &stream, ) .await; @@ -274,6 +282,7 @@ pub(crate) async fn handle_view_change< #[instrument(skip_all)] pub(crate) async fn handle_timeout, V: Versions>( view_number: TYPES::View, + epoch: TYPES::Epoch, sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { @@ -285,15 +294,18 @@ pub(crate) async fn handle_timeout ensure!( task_state .membership - .has_stake(&task_state.public_key, task_state.cur_epoch), + .has_stake(&task_state.public_key, epoch), debug!( "We were not chosen for the consensus committee for view {:?}", view_number ) ); - let vote = TimeoutVote::create_signed_vote( - TimeoutData:: { view: view_number }, + let vote = TimeoutVote2::create_signed_vote( + TimeoutData2:: { + view: view_number, + epoch, + }, view_number, &task_state.public_key, &task_state.private_key, diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index c865ed82e1..7c50cd8df6 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -8,26 +8,29 @@ use std::sync::Arc; use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; +use either::Either; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, event::Event, message::UpgradeLock, - simple_certificate::{QuorumCertificate2, TimeoutCertificate}, - simple_vote::{QuorumVote2, TimeoutVote}, + simple_certificate::{QuorumCertificate2, TimeoutCertificate2}, + simple_vote::{QuorumVote2, TimeoutVote2}, traits::{ - node_implementation::{NodeImplementation, NodeType, Versions}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, + utils::epoch_from_block_number, + vote::HasViewNumber, }; use tokio::task::JoinHandle; use tracing::instrument; -use utils::anytrace::Result; +use utils::anytrace::*; use self::handlers::{ handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, }; -use crate::{events::HotShotEvent, vote_collection::VoteCollectorsMap}; +use crate::{events::HotShotEvent, helpers::broadcast_event, vote_collection::VoteCollectorsMap}; /// Event handlers for use in the `handle` method. mod handlers; @@ -54,7 +57,7 @@ pub struct ConsensusTaskState, V: /// A map of `TimeoutVote` collector tasks. pub timeout_vote_collectors: - VoteCollectorsMap, TimeoutCertificate, V>, + VoteCollectorsMap, TimeoutCertificate2, V>, /// The view number that this node is currently executing in. pub cur_view: TYPES::View, @@ -116,11 +119,44 @@ impl, V: Versions> ConsensusTaskSt tracing::trace!("Failed to handle ViewChange event; error = {e}"); } } - HotShotEvent::Timeout(view_number) => { - if let Err(e) = handle_timeout(*view_number, &sender, self).await { + HotShotEvent::Timeout(view_number, epoch) => { + if let Err(e) = handle_timeout(*view_number, *epoch, &sender, self).await { tracing::debug!("Failed to handle Timeout event; error = {e}"); } } + HotShotEvent::Qc2Formed(Either::Left(quorum_cert)) => { + if !self + .consensus + .read() + .await + .is_leaf_extended(quorum_cert.data.leaf_commit) + { + tracing::debug!("We formed QC but not eQC. Do nothing"); + return Ok(()); + } + let cert_view = quorum_cert.view_number(); + let cert_block_number = self + .consensus + .read() + .await + .saved_leaves() + .get(&quorum_cert.data.leaf_commit) + .context(error!( + "Could not find the leaf for the eQC. It shouldn't happen." + ))? + .height(); + let cert_epoch = TYPES::Epoch::new(epoch_from_block_number( + cert_block_number, + self.epoch_height, + )); + // Transition to the new epoch by sending ViewChange + tracing::info!("Entering new epoch: {:?}", cert_epoch + 1); + broadcast_event( + Arc::new(HotShotEvent::ViewChange(cert_view + 1, cert_epoch + 1)), + &sender, + ) + .await; + } _ => {} } diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 923b15e010..fb2775d378 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -144,7 +144,7 @@ impl, V: Versions> DaTaskState { let cur_view = self.consensus.read().await.cur_view(); let view_number = proposal.data.view_number(); - let epoch_number = proposal.data.epoch_number; + let epoch_number = proposal.data.epoch; ensure!( cur_view <= view_number + 1, @@ -209,7 +209,9 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState = DaProposal2 { encoded_transactions: Arc::clone(encoded_transactions), metadata: metadata.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? view_number, - epoch_number: *epoch_number, + epoch: *epoch_number, }; let message = Proposal { diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index c75ef7c752..06d3ff5db6 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -12,18 +12,18 @@ use hotshot_task::task::TaskEvent; use hotshot_types::{ data::{ DaProposal2, Leaf2, PackedBundle, QuorumProposal2, UpgradeProposal, VidDisperse, - VidDisperseShare, + VidDisperseShare2, }, message::Proposal, request_response::ProposalRequestPayload, simple_certificate::{ DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, - UpgradeCertificate, ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, - ViewSyncPreCommitCertificate, + TimeoutCertificate2, UpgradeCertificate, ViewSyncCommitCertificate2, + ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DaVote2, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, - ViewSyncPreCommitVote, + DaVote2, QuorumVote2, TimeoutVote2, UpgradeVote, ViewSyncCommitVote2, + ViewSyncFinalizeVote2, ViewSyncPreCommitVote2, }, traits::{ block_contents::BuilderFee, network::DataRequest, node_implementation::NodeType, @@ -76,9 +76,9 @@ pub enum HotShotEvent { /// A quorum vote has been received from the network; handled by the consensus task QuorumVoteRecv(QuorumVote2), /// A timeout vote received from the network; handled by consensus task - TimeoutVoteRecv(TimeoutVote), + TimeoutVoteRecv(TimeoutVote2), /// Send a timeout vote to the network; emitted by consensus task replicas - TimeoutVoteSend(TimeoutVote), + TimeoutVoteSend(TimeoutVote2), /// A DA proposal has been received from the network; handled by the DA task DaProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA proposal has been validated; handled by the DA task and VID task @@ -123,7 +123,7 @@ pub enum HotShotEvent { /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only QcFormed(Either, TimeoutCertificate>), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only - Qc2Formed(Either, TimeoutCertificate>), + Qc2Formed(Either, TimeoutCertificate2>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DacSend(DaCertificate2, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks @@ -132,37 +132,37 @@ pub enum HotShotEvent { ViewSyncTimeout(TYPES::View, u64, ViewSyncPhase), /// Receive a `ViewSyncPreCommitVote` from the network; received by a relay in the view sync task - ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote), + ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote2), /// Receive a `ViewSyncCommitVote` from the network; received by a relay in the view sync task - ViewSyncCommitVoteRecv(ViewSyncCommitVote), + ViewSyncCommitVoteRecv(ViewSyncCommitVote2), /// Receive a `ViewSyncFinalizeVote` from the network; received by a relay in the view sync task - ViewSyncFinalizeVoteRecv(ViewSyncFinalizeVote), + ViewSyncFinalizeVoteRecv(ViewSyncFinalizeVote2), /// Send a `ViewSyncPreCommitVote` from the network; emitted by a replica in the view sync task - ViewSyncPreCommitVoteSend(ViewSyncPreCommitVote), + ViewSyncPreCommitVoteSend(ViewSyncPreCommitVote2), /// Send a `ViewSyncCommitVote` from the network; emitted by a replica in the view sync task - ViewSyncCommitVoteSend(ViewSyncCommitVote), + ViewSyncCommitVoteSend(ViewSyncCommitVote2), /// Send a `ViewSyncFinalizeVote` from the network; emitted by a replica in the view sync task - ViewSyncFinalizeVoteSend(ViewSyncFinalizeVote), + ViewSyncFinalizeVoteSend(ViewSyncFinalizeVote2), /// Receive a `ViewSyncPreCommitCertificate` from the network; received by a replica in the view sync task - ViewSyncPreCommitCertificateRecv(ViewSyncPreCommitCertificate), + ViewSyncPreCommitCertificateRecv(ViewSyncPreCommitCertificate2), /// Receive a `ViewSyncCommitCertificate` from the network; received by a replica in the view sync task - ViewSyncCommitCertificateRecv(ViewSyncCommitCertificate), + ViewSyncCommitCertificateRecv(ViewSyncCommitCertificate2), /// Receive a `ViewSyncFinalizeCertificate` from the network; received by a replica in the view sync task - ViewSyncFinalizeCertificateRecv(ViewSyncFinalizeCertificate), + ViewSyncFinalizeCertificateRecv(ViewSyncFinalizeCertificate2), /// Send a `ViewSyncPreCommitCertificate` from the network; emitted by a relay in the view sync task - ViewSyncPreCommitCertificateSend(ViewSyncPreCommitCertificate, TYPES::SignatureKey), + ViewSyncPreCommitCertificateSend(ViewSyncPreCommitCertificate2, TYPES::SignatureKey), /// Send a `ViewSyncCommitCertificate` from the network; emitted by a relay in the view sync task - ViewSyncCommitCertificateSend(ViewSyncCommitCertificate, TYPES::SignatureKey), + ViewSyncCommitCertificateSend(ViewSyncCommitCertificate2, TYPES::SignatureKey), /// Send a `ViewSyncFinalizeCertificate` from the network; emitted by a relay in the view sync task - ViewSyncFinalizeCertificateSend(ViewSyncFinalizeCertificate, TYPES::SignatureKey), + ViewSyncFinalizeCertificateSend(ViewSyncFinalizeCertificate2, TYPES::SignatureKey), /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only ViewSyncTrigger(TYPES::View), /// A consensus view has timed out; emitted by a replica in the consensus task; received by the view sync task; internal event only - Timeout(TYPES::View), + Timeout(TYPES::View, TYPES::Epoch), /// Receive transactions from the network TransactionsRecv(Vec), /// Send transactions to the network @@ -187,10 +187,10 @@ pub enum HotShotEvent { /// Like [`HotShotEvent::DaProposalRecv`]. VidShareRecv( TYPES::SignatureKey, - Proposal>, + Proposal>, ), /// VID share data is validated. - VidShareValidated(Proposal>), + VidShareValidated(Proposal>), /// Upgrade proposal has been received from the network UpgradeProposalRecv(Proposal>, TYPES::SignatureKey), /// Upgrade proposal has been sent to the network @@ -229,13 +229,13 @@ pub enum HotShotEvent { TYPES::SignatureKey, /// Recipient key TYPES::SignatureKey, - Proposal>, + Proposal>, ), /// Receive a VID response from the network; received by the node that triggered the VID request. VidResponseRecv( TYPES::SignatureKey, - Proposal>, + Proposal>, ), /// A replica send us a High QC @@ -316,7 +316,7 @@ impl HotShotEvent { HotShotEvent::ViewChange(view_number, _) | HotShotEvent::ViewSyncTimeout(view_number, _, _) | HotShotEvent::ViewSyncTrigger(view_number) - | HotShotEvent::Timeout(view_number) => Some(*view_number), + | HotShotEvent::Timeout(view_number, ..) => Some(*view_number), HotShotEvent::DaCertificateRecv(cert) | HotShotEvent::DacSend(cert, _) => { Some(cert.view_number()) } @@ -496,7 +496,9 @@ impl Display for HotShotEvent { HotShotEvent::ViewSyncTrigger(view_number) => { write!(f, "ViewSyncTrigger(view_number={view_number:?})") } - HotShotEvent::Timeout(view_number) => write!(f, "Timeout(view_number={view_number:?})"), + HotShotEvent::Timeout(view_number, epoch) => { + write!(f, "Timeout(view_number={view_number:?}, epoch={epoch:?})") + } HotShotEvent::TransactionsRecv(_) => write!(f, "TransactionsRecv"), HotShotEvent::TransactionSend(_, _) => write!(f, "TransactionSend"), HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _, _) => { diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 242bf3db0d..6765c76022 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -20,10 +20,11 @@ use hotshot_types::{ message::{Proposal, UpgradeLock}, request_response::ProposalRequestPayload, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, + simple_vote::HasEpoch, traits::{ block_contents::BlockHeader, election::Membership, - node_implementation::{NodeImplementation, NodeType, Versions}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, BlockPayload, ValidatedState, }, @@ -48,6 +49,7 @@ pub(crate) async fn fetch_proposal( sender_public_key: TYPES::SignatureKey, sender_private_key: ::PrivateKey, upgrade_lock: &UpgradeLock, + epoch_height: u64, ) -> Result<(Leaf2, View)> { // We need to be able to sign this request before submitting it to the network. Compute the // payload first. @@ -72,7 +74,6 @@ pub(crate) async fn fetch_proposal( .await; let mem = Arc::clone(&quorum_membership); - let cur_epoch = consensus.read().await.cur_epoch(); // Make a background task to await the arrival of the event data. let Ok(Some(proposal)) = // We want to explicitly timeout here so we aren't waiting around for the data. @@ -104,7 +105,7 @@ pub(crate) async fn fetch_proposal( hs_event.as_ref() { // Make sure that the quorum_proposal is valid - if quorum_proposal.validate_signature(&mem, cur_epoch).is_ok() { + if quorum_proposal.validate_signature(&mem, epoch_height).is_ok() { proposal = Some(quorum_proposal.clone()); } @@ -124,10 +125,11 @@ pub(crate) async fn fetch_proposal( let view_number = proposal.data.view_number(); let justify_qc = proposal.data.justify_qc.clone(); + let justify_qc_epoch = justify_qc.data.epoch(); if !justify_qc .is_valid_cert( - quorum_membership.stake_table(cur_epoch), - quorum_membership.success_threshold(cur_epoch), + quorum_membership.stake_table(justify_qc_epoch), + quorum_membership.success_threshold(justify_qc_epoch), upgrade_lock, ) .await @@ -148,6 +150,7 @@ pub(crate) async fn fetch_proposal( leaf: leaf.commit(), state, delta: None, + epoch: leaf.epoch(), }, }; Ok((leaf, view)) @@ -425,7 +428,6 @@ pub async fn decide_from_proposal( #[instrument(skip_all)] #[allow(clippy::too_many_arguments)] pub(crate) async fn parent_leaf_and_state( - next_proposal_view_number: TYPES::View, event_sender: &Sender>>, event_receiver: &Receiver>>, quorum_membership: Arc, @@ -434,16 +436,9 @@ pub(crate) async fn parent_leaf_and_state( consensus: OuterConsensus, upgrade_lock: &UpgradeLock, parent_view_number: TYPES::View, + epoch_height: u64, ) -> Result<(Leaf2, Arc<::ValidatedState>)> { let consensus_reader = consensus.read().await; - let cur_epoch = consensus_reader.cur_epoch(); - ensure!( - quorum_membership.leader(next_proposal_view_number, cur_epoch)? == public_key, - info!( - "Somehow we formed a QC but are not the leader for the next view {:?}", - next_proposal_view_number - ) - ); let vsm_contains_parent_view = consensus_reader .validated_state_map() .contains_key(&parent_view_number); @@ -459,6 +454,7 @@ pub(crate) async fn parent_leaf_and_state( public_key.clone(), private_key.clone(), upgrade_lock, + epoch_height, ) .await .context(info!("Failed to fetch proposal"))?; @@ -517,6 +513,8 @@ pub async fn validate_proposal_safety_and_liveness< proposed_leaf.parent_commitment() == parent_leaf.commit(), "Proposed leaf does not extend the parent leaf." ); + let proposal_epoch = + epoch_from_block_number(proposed_leaf.height(), validation_info.epoch_height); let state = Arc::new( >::from_header(&proposal.data.block_header), @@ -535,11 +533,10 @@ pub async fn validate_proposal_safety_and_liveness< }; } - let cur_epoch = validation_info.cur_epoch; UpgradeCertificate::validate( &proposal.data.upgrade_certificate, &validation_info.quorum_membership, - cur_epoch, + TYPES::Epoch::new(proposal_epoch), &validation_info.upgrade_lock, ) .await?; @@ -562,8 +559,6 @@ pub async fn validate_proposal_safety_and_liveness< // The proposal is safe if // 1. the proposed block and the justify QC block belong to the same epoch or // 2. the justify QC is the eQC for the previous block - let proposal_epoch = - epoch_from_block_number(proposed_leaf.height(), validation_info.epoch_height); let justify_qc_epoch = epoch_from_block_number(parent_leaf.height(), validation_info.epoch_height); ensure!( @@ -661,7 +656,7 @@ pub(crate) async fn validate_proposal_view_and_certs< // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment proposal.validate_signature( &validation_info.quorum_membership, - validation_info.cur_epoch, + validation_info.epoch_height, )?; // Verify a timeout certificate OR a view sync certificate exists and is valid. @@ -679,16 +674,16 @@ pub(crate) async fn validate_proposal_view_and_certs< "Timeout certificate for view {} was not for the immediately preceding view", *view_number ); - + let timeout_cert_epoch = timeout_cert.data().epoch(); ensure!( timeout_cert .is_valid_cert( validation_info .quorum_membership - .stake_table(validation_info.cur_epoch), + .stake_table(timeout_cert_epoch), validation_info .quorum_membership - .success_threshold(validation_info.cur_epoch), + .success_threshold(timeout_cert_epoch), &validation_info.upgrade_lock ) .await, @@ -704,16 +699,17 @@ pub(crate) async fn validate_proposal_view_and_certs< view_number ); + let view_sync_cert_epoch = view_sync_cert.data().epoch(); // View sync certs must also be valid. ensure!( view_sync_cert .is_valid_cert( validation_info .quorum_membership - .stake_table(validation_info.cur_epoch), + .stake_table(view_sync_cert_epoch), validation_info .quorum_membership - .success_threshold(validation_info.cur_epoch), + .success_threshold(view_sync_cert_epoch), &validation_info.upgrade_lock ) .await, @@ -725,13 +721,19 @@ pub(crate) async fn validate_proposal_view_and_certs< // Validate the upgrade certificate -- this is just a signature validation. // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. - UpgradeCertificate::validate( - &proposal.data.upgrade_certificate, - &validation_info.quorum_membership, - validation_info.cur_epoch, - &validation_info.upgrade_lock, - ) - .await?; + { + let epoch = TYPES::Epoch::new(epoch_from_block_number( + proposal.data.block_header.block_number(), + TYPES::EPOCH_HEIGHT, + )); + UpgradeCertificate::validate( + &proposal.data.upgrade_certificate, + &validation_info.quorum_membership, + epoch, + &validation_info.upgrade_lock, + ) + .await?; + } Ok(()) } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index b885e2f7e5..875e8cd72c 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -75,35 +75,66 @@ impl NetworkMessageTaskState { GeneralConsensusMessage::Proposal(proposal) => { HotShotEvent::QuorumProposalRecv(convert_proposal(proposal), sender) } + GeneralConsensusMessage::Proposal2(proposal) => { + HotShotEvent::QuorumProposalRecv(proposal, sender) + } GeneralConsensusMessage::ProposalRequested(req, sig) => { HotShotEvent::QuorumProposalRequestRecv(req, sig) } GeneralConsensusMessage::ProposalResponse(proposal) => { HotShotEvent::QuorumProposalResponseRecv(convert_proposal(proposal)) } + GeneralConsensusMessage::ProposalResponse2(proposal) => { + HotShotEvent::QuorumProposalResponseRecv(proposal) + } GeneralConsensusMessage::Vote(vote) => { HotShotEvent::QuorumVoteRecv(vote.to_vote2()) } + GeneralConsensusMessage::Vote2(vote) => HotShotEvent::QuorumVoteRecv(vote), GeneralConsensusMessage::ViewSyncPreCommitVote(view_sync_message) => { + HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message.to_vote2()) + } + GeneralConsensusMessage::ViewSyncPreCommitVote2(view_sync_message) => { HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncPreCommitCertificate( view_sync_message, + ) => HotShotEvent::ViewSyncPreCommitCertificateRecv( + view_sync_message.to_vsc2(), + ), + GeneralConsensusMessage::ViewSyncPreCommitCertificate2( + view_sync_message, ) => HotShotEvent::ViewSyncPreCommitCertificateRecv(view_sync_message), - GeneralConsensusMessage::ViewSyncCommitVote(view_sync_message) => { + HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message.to_vote2()) + } + GeneralConsensusMessage::ViewSyncCommitVote2(view_sync_message) => { HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncCommitCertificate(view_sync_message) => { + HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message.to_vsc2()) + } + GeneralConsensusMessage::ViewSyncCommitCertificate2(view_sync_message) => { HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncFinalizeVote(view_sync_message) => { + HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message.to_vote2()) + } + GeneralConsensusMessage::ViewSyncFinalizeVote2(view_sync_message) => { HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncFinalizeCertificate(view_sync_message) => { - HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_message) + HotShotEvent::ViewSyncFinalizeCertificateRecv( + view_sync_message.to_vsc2(), + ) } + GeneralConsensusMessage::ViewSyncFinalizeCertificate2( + view_sync_message, + ) => HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_message), GeneralConsensusMessage::TimeoutVote(message) => { + HotShotEvent::TimeoutVoteRecv(message.to_vote2()) + } + GeneralConsensusMessage::TimeoutVote2(message) => { HotShotEvent::TimeoutVoteRecv(message) } GeneralConsensusMessage::UpgradeProposal(message) => { @@ -114,13 +145,6 @@ impl NetworkMessageTaskState { HotShotEvent::UpgradeVoteRecv(message) } GeneralConsensusMessage::HighQc(qc) => HotShotEvent::HighQcRecv(qc, sender), - GeneralConsensusMessage::Proposal2(proposal) => { - HotShotEvent::QuorumProposalRecv(proposal, sender) - } - GeneralConsensusMessage::Vote2(vote) => HotShotEvent::QuorumVoteRecv(vote), - GeneralConsensusMessage::Proposal2Response(proposal) => { - HotShotEvent::QuorumProposalResponseRecv(proposal) - } }, SequencingMessage::Da(da_message) => match da_message { DaConsensusMessage::DaProposal(proposal) => { @@ -133,6 +157,9 @@ impl NetworkMessageTaskState { HotShotEvent::DaCertificateRecv(cert.to_dac2()) } DaConsensusMessage::VidDisperseMsg(proposal) => { + HotShotEvent::VidShareRecv(sender, convert_proposal(proposal)) + } + DaConsensusMessage::VidDisperseMsg2(proposal) => { HotShotEvent::VidShareRecv(sender, proposal) } DaConsensusMessage::DaProposal2(proposal) => { @@ -164,16 +191,26 @@ impl NetworkMessageTaskState { DataMessage::DataResponse(response) => { if let ResponseMessage::Found(message) = response { match message { - SequencingMessage::Da(da_message) => { - if let DaConsensusMessage::VidDisperseMsg(proposal) = da_message { - broadcast_event( - Arc::new(HotShotEvent::VidResponseRecv(sender, proposal)), - &self.internal_event_stream, - ) - .await; - } + SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg(proposal)) => { + broadcast_event( + Arc::new(HotShotEvent::VidResponseRecv( + sender, + convert_proposal(proposal), + )), + &self.internal_event_stream, + ) + .await; } - SequencingMessage::General(_) => {} + SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg2( + proposal, + )) => { + broadcast_event( + Arc::new(HotShotEvent::VidResponseRecv(sender, proposal)), + &self.internal_event_stream, + ) + .await; + } + _ => {} } } } @@ -467,13 +504,28 @@ impl< )), TransmitType::Broadcast, )), - HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => Some(( - sender_key.clone(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ProposalResponse(convert_proposal(proposal)), - )), - TransmitType::Direct(sender_key), - )), + HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => { + let message = if self + .upgrade_lock + .version_infallible(proposal.data.view_number()) + .await + >= V::Epochs::VERSION + { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ProposalResponse2(proposal), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ProposalResponse(convert_proposal(proposal)), + )) + }; + + Some(( + sender_key.clone(), + message, + TransmitType::Direct(sender_key), + )) + } HotShotEvent::VidDisperseSend(proposal, sender) => { self.handle_vid_disperse_proposal(proposal, &sender).await; None @@ -501,7 +553,8 @@ impl< HotShotEvent::DaVoteSend(vote) => { *maybe_action = Some(HotShotAction::DaVote); let view_number = vote.view_number(); - let leader = match self.membership.leader(view_number, self.epoch) { + let epoch = vote.data.epoch; + let leader = match self.membership.leader(view_number, epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -559,14 +612,22 @@ impl< return None; } }; - - Some(( - vote.signing_key(), + let message = if self + .upgrade_lock + .version_infallible(vote.view_number()) + .await + >= V::Epochs::VERSION + { MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), - )), - TransmitType::Direct(leader), - )) + GeneralConsensusMessage::ViewSyncPreCommitVote2(vote.clone()), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone().to_vote()), + )) + }; + + Some((vote.signing_key(), message, TransmitType::Direct(leader))) } HotShotEvent::ViewSyncCommitVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); @@ -582,14 +643,22 @@ impl< return None; } }; - - Some(( - vote.signing_key(), + let message = if self + .upgrade_lock + .version_infallible(vote.view_number()) + .await + >= V::Epochs::VERSION + { MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), - )), - TransmitType::Direct(leader), - )) + GeneralConsensusMessage::ViewSyncCommitVote2(vote.clone()), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncCommitVote(vote.clone().to_vote()), + )) + }; + + Some((vote.signing_key(), message, TransmitType::Direct(leader))) } HotShotEvent::ViewSyncFinalizeVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); @@ -605,36 +674,71 @@ impl< return None; } }; + let message = if self + .upgrade_lock + .version_infallible(vote.view_number()) + .await + >= V::Epochs::VERSION + { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncFinalizeVote2(vote.clone()), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone().to_vote()), + )) + }; - Some(( - vote.signing_key(), + Some((vote.signing_key(), message, TransmitType::Direct(leader))) + } + HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, sender) => { + let view_number = certificate.view_number(); + let message = if self.upgrade_lock.version_infallible(view_number).await + >= V::Epochs::VERSION + { MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), - )), - TransmitType::Direct(leader), - )) + GeneralConsensusMessage::ViewSyncPreCommitCertificate2(certificate), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate.to_vsc()), + )) + }; + + Some((sender, message, TransmitType::Broadcast)) + } + HotShotEvent::ViewSyncCommitCertificateSend(certificate, sender) => { + let view_number = certificate.view_number(); + let message = if self.upgrade_lock.version_infallible(view_number).await + >= V::Epochs::VERSION + { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncCommitCertificate2(certificate), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncCommitCertificate(certificate.to_vsc()), + )) + }; + + Some((sender, message, TransmitType::Broadcast)) + } + HotShotEvent::ViewSyncFinalizeCertificateSend(certificate, sender) => { + let view_number = certificate.view_number(); + let message = if self.upgrade_lock.version_infallible(view_number).await + >= V::Epochs::VERSION + { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncFinalizeCertificate2(certificate), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate.to_vsc()), + )) + }; + + Some((sender, message, TransmitType::Broadcast)) } - HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, sender) => Some(( - sender, - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate), - )), - TransmitType::Broadcast, - )), - HotShotEvent::ViewSyncCommitCertificateSend(certificate, sender) => Some(( - sender, - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncCommitCertificate(certificate), - )), - TransmitType::Broadcast, - )), - HotShotEvent::ViewSyncFinalizeCertificateSend(certificate, sender) => Some(( - sender, - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate), - )), - TransmitType::Broadcast, - )), HotShotEvent::TimeoutVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; @@ -649,13 +753,22 @@ impl< return None; } }; - Some(( - vote.signing_key(), + let message = if self + .upgrade_lock + .version_infallible(vote.view_number()) + .await + >= V::Epochs::VERSION + { MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::TimeoutVote(vote.clone()), - )), - TransmitType::Direct(leader), - )) + GeneralConsensusMessage::TimeoutVote2(vote.clone()), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::TimeoutVote(vote.clone().to_vote()), + )) + }; + + Some((vote.signing_key(), message, TransmitType::Direct(leader))) } HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( sender, @@ -707,14 +820,23 @@ impl< TransmitType::Direct(to), )), HotShotEvent::VidResponseSend(sender, to, proposal) => { - let da_message = DaConsensusMessage::VidDisperseMsg(proposal); - let sequencing_msg = SequencingMessage::Da(da_message); - let response_message = ResponseMessage::Found(sequencing_msg); - Some(( - sender, - MessageKind::Data(DataMessage::DataResponse(response_message)), - TransmitType::Direct(to), - )) + let message = if self + .upgrade_lock + .version_infallible(proposal.data.view_number()) + .await + >= V::Epochs::VERSION + { + MessageKind::Data(DataMessage::DataResponse(ResponseMessage::Found( + SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg2(proposal)), + ))) + } else { + MessageKind::Data(DataMessage::DataResponse(ResponseMessage::Found( + SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg( + convert_proposal(proposal), + )), + ))) + }; + Some((sender, message, TransmitType::Direct(to))) } HotShotEvent::HighQcSend(quorum_cert, leader, sender) => Some(( sender, @@ -771,7 +893,13 @@ impl< GeneralConsensusMessage::Proposal(prop), )) = &message.kind { - if storage.write().await.append_proposal(prop).await.is_err() { + if storage + .write() + .await + .append_proposal2(&convert_proposal(prop.clone())) + .await + .is_err() + { return; } } diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index d68d08b538..7c29ffe426 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -25,9 +25,12 @@ use hotshot_types::{ message::Proposal, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ - block_contents::BlockHeader, election::Membership, node_implementation::NodeType, + block_contents::BlockHeader, + election::Membership, + node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, + utils::epoch_from_block_number, vote::{Certificate, HasViewNumber}, }; use tracing::instrument; @@ -112,6 +115,9 @@ pub struct ProposalDependencyHandle { /// The highest_qc we've seen at the start of this task pub highest_qc: QuorumCertificate2, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl ProposalDependencyHandle { @@ -195,7 +201,6 @@ impl ProposalDependencyHandle { parent_qc: QuorumCertificate2, ) -> Result<()> { let (parent_leaf, state) = parent_leaf_and_state( - self.view_number, &self.sender, &self.receiver, Arc::clone(&self.quorum_membership), @@ -204,6 +209,7 @@ impl ProposalDependencyHandle { OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), &self.upgrade_lock, parent_qc.view_number(), + self.epoch_height, ) .await?; @@ -294,6 +300,18 @@ impl ProposalDependencyHandle { .context(warn!("Failed to construct marketplace block header"))? }; + let epoch = TYPES::Epoch::new(epoch_from_block_number( + block_header.block_number(), + self.epoch_height, + )); + // Make sure we are the leader for the view and epoch. + // We might have ended up here because we were in the epoch transition. + if self.quorum_membership.leader(self.view_number, epoch)? != self.public_key { + tracing::debug!( + "We are not the leader in the epoch for which we are about to propose. Do not send the quorum proposal." + ); + return Ok(()); + } let proposal = QuorumProposal2 { block_header, view_number: self.view_number, diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 49875c6480..ac1a9d7837 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -25,6 +25,7 @@ use hotshot_types::{ signature_key::SignatureKey, storage::Storage, }, + utils::EpochTransitionIndicator, vote::{Certificate, HasViewNumber}, }; use tokio::task::JoinHandle; @@ -41,6 +42,9 @@ pub struct QuorumProposalTaskState /// Latest view number that has been proposed for. pub latest_proposed_view: TYPES::View, + /// Current epoch + pub cur_epoch: TYPES::Epoch, + /// Table for the in-progress proposal dependency tasks. pub proposal_dependencies: BTreeMap>, @@ -97,6 +101,7 @@ impl, V: Versions> view_number: TYPES::View, event_receiver: Receiver>>, ) -> EventDependency>> { + let id = self.id; EventDependency::new( event_receiver, Box::new(move |event| { @@ -158,7 +163,7 @@ impl, V: Versions> let valid = event_view == view_number; if valid { tracing::debug!( - "Dependency {dependency_type:?} is complete for view {event_view:?}!", + "Dependency {dependency_type:?} is complete for view {event_view:?}, my id is {id:?}!", ); } valid @@ -273,10 +278,22 @@ impl, V: Versions> event_receiver: Receiver>>, event_sender: Sender>>, event: Arc>, + epoch_transition_indicator: EpochTransitionIndicator, ) -> Result<()> { + let leader_in_current_epoch = + self.quorum_membership.leader(view_number, epoch_number)? == self.public_key; + // If we are in the epoch transition and we are the leader in the next epoch, + // we might want to start collecting dependencies for our next epoch proposal. + let leader_in_next_epoch = matches!( + epoch_transition_indicator, + EpochTransitionIndicator::InTransition + ) && self + .quorum_membership + .leader(view_number, epoch_number + 1)? + == self.public_key; // Don't even bother making the task if we are not entitled to propose anyway. ensure!( - self.quorum_membership.leader(view_number, epoch_number)? == self.public_key, + leader_in_current_epoch || leader_in_next_epoch, debug!("We are not the leader of the next view") ); @@ -316,6 +333,7 @@ impl, V: Versions> id: self.id, view_start_time: Instant::now(), highest_qc: self.highest_qc.clone(), + epoch_height: self.epoch_height, }, ); self.proposal_dependencies @@ -350,13 +368,20 @@ impl, V: Versions> } /// Handles a consensus event received on the event stream - #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "handle method", level = "error", target = "QuorumProposalTaskState")] + #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view, epoch = *self.cur_epoch), name = "handle method", level = "error", target = "QuorumProposalTaskState")] pub async fn handle( &mut self, event: Arc>, event_receiver: Receiver>>, event_sender: Sender>>, ) -> Result<()> { + let epoch_number = self.cur_epoch; + let epoch_transition_indicator = if self.consensus.read().await.is_high_qc_for_last_block() + { + EpochTransitionIndicator::InTransition + } else { + EpochTransitionIndicator::NotInTransition + }; match event.as_ref() { HotShotEvent::UpgradeCertificateFormed(cert) => { tracing::debug!( @@ -373,13 +398,13 @@ impl, V: Versions> HotShotEvent::Qc2Formed(cert) => match cert.clone() { either::Right(timeout_cert) => { let view_number = timeout_cert.view_number + 1; - let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, epoch_number, event_receiver, event_sender, Arc::clone(&event), + epoch_transition_indicator, )?; } either::Left(qc) => { @@ -407,13 +432,13 @@ impl, V: Versions> .wrap() .context(error!("Failed to update high QC in storage!"))?; let view_number = qc.view_number() + 1; - let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, epoch_number, event_receiver, event_sender, Arc::clone(&event), + epoch_transition_indicator, )?; } }, @@ -426,7 +451,6 @@ impl, V: Versions> _auction_result, ) => { let view_number = *view_number; - let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, @@ -434,9 +458,15 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), + EpochTransitionIndicator::NotInTransition, )?; } HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { + // MERGE TODO + // + // HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + // let cert_epoch_number = certificate.data.epoch; + // let epoch_number = self.consensus.read().await.cur_epoch(); ensure!( @@ -461,6 +491,7 @@ impl, V: Versions> event_receiver, event_sender, event, + EpochTransitionIndicator::NotInTransition, )?; } HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { @@ -469,7 +500,6 @@ impl, V: Versions> if !self.update_latest_proposed_view(view_number).await { tracing::trace!("Failed to update latest proposed view"); } - let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number + 1, @@ -477,6 +507,7 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), + epoch_transition_indicator, )?; } HotShotEvent::QuorumProposalSend(proposal, _) => { @@ -489,25 +520,31 @@ impl, V: Versions> } HotShotEvent::VidDisperseSend(vid_share, _) => { let view_number = vid_share.data.view_number(); - let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, epoch_number, event_receiver, event_sender, Arc::clone(&event), + EpochTransitionIndicator::NotInTransition, )?; } - HotShotEvent::ViewChange(view, _) | HotShotEvent::Timeout(view) => { + HotShotEvent::ViewChange(view, epoch) => { + if epoch > &self.cur_epoch { + self.cur_epoch = *epoch; + } + self.cancel_tasks(*view); + } + HotShotEvent::Timeout(view, ..) => { self.cancel_tasks(*view); } HotShotEvent::HighQcSend(qc, ..) => { ensure!(qc.view_number() > self.highest_qc.view_number()); - let epoch_number = self.consensus.read().await.cur_epoch(); + let cert_epoch_number = qc.data.epoch; ensure!( qc.is_valid_cert( - self.quorum_membership.stake_table(epoch_number), - self.quorum_membership.success_threshold(epoch_number), + self.quorum_membership.stake_table(cert_epoch_number), + self.quorum_membership.success_threshold(cert_epoch_number), &self.upgrade_lock ) .await, diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 528fc04562..2f5df35a65 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -105,6 +105,7 @@ fn spawn_fetch_proposal( sender_public_key: TYPES::SignatureKey, sender_private_key: ::PrivateKey, upgrade_lock: UpgradeLock, + epoch_height: u64, ) { spawn(async move { let lock = upgrade_lock; @@ -118,6 +119,7 @@ fn spawn_fetch_proposal( sender_public_key, sender_private_key, &lock, + epoch_height, ) .await; }); @@ -151,6 +153,11 @@ pub(crate) async fn handle_quorum_proposal_recv< let view_number = proposal.data.view_number(); let justify_qc = proposal.data.justify_qc.clone(); + let proposal_block_number = proposal.data.block_header.block_number(); + let proposal_epoch = TYPES::Epoch::new(epoch_from_block_number( + proposal_block_number, + validation_info.epoch_height, + )); if !justify_qc .is_valid_cert( @@ -199,6 +206,7 @@ pub(crate) async fn handle_quorum_proposal_recv< validation_info.public_key.clone(), validation_info.private_key.clone(), validation_info.upgrade_lock.clone(), + validation_info.epoch_height, ); } let consensus_reader = validation_info.consensus.read().await; @@ -239,18 +247,13 @@ pub(crate) async fn handle_quorum_proposal_recv< justify_qc.data.leaf_commit ); validate_proposal_liveness(proposal, &validation_info).await?; - let block_number = proposal.data.block_header.block_number(); - let epoch = TYPES::Epoch::new(epoch_from_block_number( - block_number, - validation_info.epoch_height, - )); tracing::trace!( "Sending ViewChange for view {} and epoch {}", view_number, - *epoch + *proposal_epoch ); broadcast_event( - Arc::new(HotShotEvent::ViewChange(view_number, epoch)), + Arc::new(HotShotEvent::ViewChange(view_number, proposal_epoch)), event_sender, ) .await; @@ -267,18 +270,13 @@ pub(crate) async fn handle_quorum_proposal_recv< ) .await?; - let epoch_number = TYPES::Epoch::new(epoch_from_block_number( - proposal.data.block_header.block_number(), - validation_info.epoch_height, - )); - tracing::trace!( "Sending ViewChange for view {} and epoch {}", view_number, - *epoch_number + *proposal_epoch ); broadcast_event( - Arc::new(HotShotEvent::ViewChange(view_number, epoch_number)), + Arc::new(HotShotEvent::ViewChange(view_number, proposal_epoch)), event_sender, ) .await; diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index db62d33e5d..a30e965d69 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -92,8 +92,6 @@ pub(crate) struct ValidationInfo, pub(crate) public_key: TYPES::SignatureKey, /// Our Private Key pub(crate) private_key: ::PrivateKey, - /// Epoch number this node is executing in. - pub cur_epoch: TYPES::Epoch, /// Reference to consensus. The replica will require a write lock on this. pub(crate) consensus: OuterConsensus, /// Membership for Quorum Certs/votes @@ -143,7 +141,6 @@ impl, V: Versions> id: self.id, public_key: self.public_key.clone(), private_key: self.private_key.clone(), - cur_epoch: self.cur_epoch, consensus: self.consensus.clone(), quorum_membership: Arc::clone(&self.quorum_membership), output_event_stream: self.output_event_stream.clone(), diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index c434352ba1..c98f65a63e 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -12,7 +12,7 @@ use chrono::Utc; use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf2, QuorumProposal2, VidDisperseShare}, + data::{Leaf2, QuorumProposal2, VidDisperseShare2}, event::{Event, EventType, LeafInfo}, message::{Proposal, UpgradeLock}, simple_vote::{QuorumData2, QuorumVote2}, @@ -278,8 +278,9 @@ pub(crate) async fn update_shared_state< instance_state: Arc, storage: Arc>, proposed_leaf: &Leaf2, - vid_share: &Proposal>, + vid_share: &Proposal>, parent_view_number: Option, + epoch_height: u64, ) -> Result<()> { let justify_qc = &proposed_leaf.justify_qc(); @@ -313,6 +314,7 @@ pub(crate) async fn update_shared_state< public_key.clone(), private_key.clone(), &upgrade_lock, + epoch_height, ) .await .ok() @@ -402,7 +404,7 @@ pub(crate) async fn submit_vote, V epoch_height: u64, storage: Arc>, leaf: Leaf2, - vid_share: Proposal>, + vid_share: Proposal>, extended_vote: bool, ) -> Result<()> { let epoch_number = TYPES::Epoch::new(epoch_from_block_number( @@ -432,26 +434,27 @@ pub(crate) async fn submit_vote, V .await .wrap() .context(error!("Failed to sign vote. This should never happen."))?; - tracing::debug!( - "sending vote to next quorum leader {:?}", - vote.view_number() + 1 - ); // Add to the storage. storage .write() .await - .append_vid(&vid_share) + .append_vid2(&vid_share) .await .wrap() .context(error!("Failed to store VID share"))?; if extended_vote { + tracing::debug!("sending extended vote to everybody",); broadcast_event( Arc::new(HotShotEvent::ExtendedQuorumVoteSend(vote)), &sender, ) .await; } else { + tracing::debug!( + "sending vote to next quorum leader {:?}", + vote.view_number() + 1 + ); broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &sender).await; } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 5f2b1df891..04b8a677dc 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -207,6 +207,7 @@ impl + 'static, V: Versions> Handl &leaf, &vid_share, parent_view_number, + self.epoch_height, ) .await { @@ -310,6 +311,7 @@ impl, V: Versions> QuorumVoteTaskS view_number: TYPES::View, event_receiver: Receiver>>, ) -> EventDependency>> { + let id = self.id; EventDependency::new( event_receiver.clone(), Box::new(move |event| { @@ -338,7 +340,12 @@ impl, V: Versions> QuorumVoteTaskS } }; if event_view == view_number { - tracing::trace!("Vote dependency {:?} completed", dependency_type); + tracing::trace!( + "Vote dependency {:?} completed for view {:?}, my id is {:?}", + dependency_type, + view_number, + id, + ); return true; } false @@ -496,12 +503,12 @@ impl, V: Versions> QuorumVoteTaskS "Received DAC for an older view." ); - let cur_epoch = self.consensus.read().await.cur_epoch(); + let cert_epoch = cert.data.epoch; // Validate the DAC. ensure!( cert.is_valid_cert( - self.membership.da_stake_table(cur_epoch), - self.membership.da_success_threshold(cur_epoch), + self.membership.da_stake_table(cert_epoch), + self.membership.da_success_threshold(cert_epoch), &self.upgrade_lock ) .await, @@ -532,7 +539,7 @@ impl, V: Versions> QuorumVoteTaskS // Validate the VID share. let payload_commitment = &disperse.data.payload_commitment; - let cur_epoch = self.consensus.read().await.cur_epoch(); + let disperse_epoch = disperse.data.epoch; // Check that the signature is valid ensure!( @@ -543,15 +550,15 @@ impl, V: Versions> QuorumVoteTaskS // ensure that the VID share was sent by a DA member OR the view leader ensure!( self.membership - .da_committee_members(view, cur_epoch) + .da_committee_members(view, disperse_epoch) .contains(sender) - || *sender == self.membership.leader(view, cur_epoch)?, + || *sender == self.membership.leader(view, disperse_epoch)?, "VID share was not sent by a DA member or the view leader." ); // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner // and outer results - match vid_scheme(self.membership.total_nodes(cur_epoch)).verify_share( + match vid_scheme(self.membership.total_nodes(disperse_epoch)).verify_share( &disperse.data.share, &disperse.data.common, payload_commitment, @@ -579,7 +586,7 @@ impl, V: Versions> QuorumVoteTaskS .await; self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); } - HotShotEvent::Timeout(view) => { + HotShotEvent::Timeout(view, ..) => { let view = TYPES::View::new(view.saturating_sub(1)); // cancel old tasks let current_tasks = self.vote_dependencies.split_off(&view); @@ -675,6 +682,7 @@ impl, V: Versions> QuorumVoteTaskS &proposed_leaf, &updated_vid, Some(parent_leaf.view_number()), + self.epoch_height, ) .await { diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index a41b04f5d5..56e16c1a37 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -22,11 +22,13 @@ use hotshot_task::{ use hotshot_types::{ consensus::OuterConsensus, traits::{ + block_contents::BlockHeader, election::Membership, network::{ConnectedNetwork, DataRequest, RequestKind}, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, }, + utils::epoch_from_block_number, vote::HasViewNumber, }; use rand::{seq::SliceRandom, thread_rng}; @@ -97,7 +99,10 @@ impl> TaskState for NetworkRequest match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _) => { let prop_view = proposal.data.view_number(); - let cur_epoch = self.consensus.read().await.cur_epoch(); + let prop_epoch = TYPES::Epoch::new(epoch_from_block_number( + proposal.data.block_header.block_number(), + TYPES::EPOCH_HEIGHT, + )); // If we already have the VID shares for the next view, do nothing. if prop_view >= self.view @@ -108,7 +113,7 @@ impl> TaskState for NetworkRequest .vid_shares() .contains_key(&prop_view) { - self.spawn_requests(prop_view, cur_epoch, sender, receiver); + self.spawn_requests(prop_view, prop_epoch, sender, receiver); } Ok(()) } diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 1ac18d8dab..27983cdffa 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -10,7 +10,7 @@ use async_broadcast::{Receiver, Sender}; use committable::Committable; use hotshot_types::{ consensus::{Consensus, LockedConsensusState, OuterConsensus}, - data::VidDisperseShare, + data::VidDisperseShare2, message::Proposal, traits::{ election::Membership, network::DataRequest, node_implementation::NodeType, @@ -138,7 +138,7 @@ impl NetworkResponseState { &self, view: TYPES::View, key: &TYPES::SignatureKey, - ) -> Option>> { + ) -> Option>> { let consensus_reader = self.consensus.read().await; if let Some(view) = consensus_reader.vid_shares().get(&view) { if let Some(share) = view.get(key) { @@ -146,7 +146,6 @@ impl NetworkResponseState { } } - let cur_epoch = consensus_reader.cur_epoch(); drop(consensus_reader); if Consensus::calculate_and_update_vid( @@ -154,7 +153,6 @@ impl NetworkResponseState { view, Arc::clone(&self.quorum), &self.private_key, - cur_epoch, ) .await .is_none() @@ -166,7 +164,6 @@ impl NetworkResponseState { view, Arc::clone(&self.quorum), &self.private_key, - cur_epoch, ) .await?; } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index ffe9290d73..ce01590734 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -479,18 +479,20 @@ impl, V: Versions> TransactionTask .await; } HotShotEvent::ViewChange(view, epoch) => { - if *epoch > self.cur_epoch { - self.cur_epoch = *epoch; - } let view = TYPES::View::new(std::cmp::max(1, **view)); + ensure!( - *view > *self.cur_view, + *view > *self.cur_view || *epoch > self.cur_epoch, debug!( - "Received a view change to an older view: tried to change view to {:?} though we are at view {:?}", view, self.cur_view + "Received a view change to an older view and epoch: tried to change view to {:?}\ + and epoch {:?} though we are at view {:?} and epoch {:?}", + view, epoch, self.cur_view, self.cur_epoch ) ); self.cur_view = view; - if self.membership.leader(view, self.cur_epoch)? == self.public_key { + self.cur_epoch = *epoch; + + if self.membership.leader(view, *epoch)? == self.public_key { self.handle_view_change(&event_stream, view, *epoch).await; return Ok(()); } @@ -540,9 +542,9 @@ impl, V: Versions> TransactionTask ))?; match &view_data.view_inner { - ViewInner::Da { payload_commitment } => { - return Ok((target_view, *payload_commitment)) - } + ViewInner::Da { + payload_commitment, .. + } => return Ok((target_view, *payload_commitment)), ViewInner::Leaf { leaf: leaf_commitment, .. diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index f19f6e7528..07dd5d1b72 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -11,9 +11,10 @@ use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, - data::{PackedBundle, VidDisperse, VidDisperseShare}, + data::{PackedBundle, VidDisperse, VidDisperseShare2}, message::Proposal, traits::{ + election::Membership, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, BlockPayload, @@ -76,16 +77,23 @@ impl> VidTaskState { let payload = ::BlockPayload::from_bytes(encoded_transactions, metadata); let builder_commitment = payload.builder_commitment(metadata); + let epoch = self.cur_epoch; + if self.membership.leader(*view_number, epoch).ok()? != self.public_key { + tracing::debug!( + "We are not the leader in the current epoch. Do not send the VID dispersal." + ); + return None; + } let vid_disperse = VidDisperse::calculate_vid_disperse( Arc::clone(encoded_transactions), &Arc::clone(&self.membership), *view_number, - self.cur_epoch, + epoch, vid_precompute.clone(), ) .await; let payload_commitment = vid_disperse.payload_commitment; - let shares = VidDisperseShare::from_vid_disperse(vid_disperse.clone()); + let shares = VidDisperseShare2::from_vid_disperse(vid_disperse.clone()); let mut consensus_writer = self.consensus.write().await; for share in shares { if let Some(disperse) = share.to_proposal(&self.private_key) { @@ -132,6 +140,10 @@ impl> VidTaskState { } HotShotEvent::ViewChange(view, epoch) => { + if *epoch > self.cur_epoch { + self.cur_epoch = *epoch; + } + let view = *view; if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { return None; @@ -141,9 +153,6 @@ impl> VidTaskState { info!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; - if *epoch > self.cur_epoch { - self.cur_epoch = *epoch; - } return None; } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 3574fe0ee6..98b9fd1876 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -16,13 +16,13 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - message::{GeneralConsensusMessage, UpgradeLock}, + message::UpgradeLock, simple_certificate::{ - ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, ViewSyncPreCommitCertificate, + ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - ViewSyncCommitData, ViewSyncCommitVote, ViewSyncFinalizeData, ViewSyncFinalizeVote, - ViewSyncPreCommitData, ViewSyncPreCommitVote, + ViewSyncCommitData2, ViewSyncCommitVote2, ViewSyncFinalizeData2, ViewSyncFinalizeVote2, + ViewSyncPreCommitData2, ViewSyncPreCommitVote2, }, traits::{ election::Membership, @@ -92,16 +92,17 @@ pub struct ViewSyncTaskState { /// Map of pre-commit vote accumulates for the relay pub pre_commit_relay_map: RwLock< - RelayMap, ViewSyncPreCommitCertificate, V>, + RelayMap, ViewSyncPreCommitCertificate2, V>, >, /// Map of commit vote accumulates for the relay pub commit_relay_map: - RwLock, ViewSyncCommitCertificate, V>>, + RwLock, ViewSyncCommitCertificate2, V>>, /// Map of finalize vote accumulates for the relay - pub finalize_relay_map: - RwLock, ViewSyncFinalizeCertificate, V>>, + pub finalize_relay_map: RwLock< + RelayMap, ViewSyncFinalizeCertificate2, V>, + >, /// Timeout duration for view sync rounds pub view_sync_timeout: Duration, @@ -464,7 +465,7 @@ impl ViewSyncTaskState { self.last_garbage_collected_view = self.cur_view - 1; } } - &HotShotEvent::Timeout(view_number) => { + &HotShotEvent::Timeout(view_number, ..) => { // This is an old timeout and we can ignore it ensure!( view_number >= self.cur_view, @@ -554,10 +555,11 @@ impl ViewSyncReplicaTaskState { self.relay = certificate.data().relay; } - let Ok(vote) = ViewSyncCommitVote::::create_signed_vote( - ViewSyncCommitData { + let Ok(vote) = ViewSyncCommitVote2::::create_signed_vote( + ViewSyncCommitData2 { relay: certificate.data().relay, round: self.next_view, + epoch: certificate.data().epoch, }, self.next_view, &self.public_key, @@ -569,15 +571,12 @@ impl ViewSyncReplicaTaskState { tracing::error!("Failed to sign ViewSyncCommitData!"); return None; }; - let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); - if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { - broadcast_event( - Arc::new(HotShotEvent::ViewSyncCommitVoteSend(vote)), - &event_stream, - ) - .await; - } + broadcast_event( + Arc::new(HotShotEvent::ViewSyncCommitVoteSend(vote)), + &event_stream, + ) + .await; if let Some(timeout_task) = self.timeout_task.take() { timeout_task.abort(); @@ -640,10 +639,11 @@ impl ViewSyncReplicaTaskState { self.relay = certificate.data().relay; } - let Ok(vote) = ViewSyncFinalizeVote::::create_signed_vote( - ViewSyncFinalizeData { + let Ok(vote) = ViewSyncFinalizeVote2::::create_signed_vote( + ViewSyncFinalizeData2 { relay: certificate.data().relay, round: self.next_view, + epoch: certificate.data().epoch, }, self.next_view, &self.public_key, @@ -655,15 +655,12 @@ impl ViewSyncReplicaTaskState { tracing::error!("Failed to sign view sync finalized vote!"); return None; }; - let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); - if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { - broadcast_event( - Arc::new(HotShotEvent::ViewSyncFinalizeVoteSend(vote)), - &event_stream, - ) - .await; - } + broadcast_event( + Arc::new(HotShotEvent::ViewSyncFinalizeVoteSend(vote)), + &event_stream, + ) + .await; tracing::info!( "View sync protocol has received view sync evidence to update the view to {}", @@ -757,10 +754,12 @@ impl ViewSyncReplicaTaskState { return None; } - let Ok(vote) = ViewSyncPreCommitVote::::create_signed_vote( - ViewSyncPreCommitData { + let epoch = self.cur_epoch; + let Ok(vote) = ViewSyncPreCommitVote2::::create_signed_vote( + ViewSyncPreCommitData2 { relay: 0, round: view_number, + epoch, }, view_number, &self.public_key, @@ -772,15 +771,12 @@ impl ViewSyncReplicaTaskState { tracing::error!("Failed to sign pre commit vote!"); return None; }; - let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); - if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { - broadcast_event( - Arc::new(HotShotEvent::ViewSyncPreCommitVoteSend(vote)), - &event_stream, - ) - .await; - } + broadcast_event( + Arc::new(HotShotEvent::ViewSyncPreCommitVoteSend(vote)), + &event_stream, + ) + .await; self.timeout_task = Some(spawn({ let stream = event_stream.clone(); @@ -815,10 +811,11 @@ impl ViewSyncReplicaTaskState { self.relay += 1; match last_seen_certificate { ViewSyncPhase::None | ViewSyncPhase::PreCommit | ViewSyncPhase::Commit => { - let Ok(vote) = ViewSyncPreCommitVote::::create_signed_vote( - ViewSyncPreCommitData { + let Ok(vote) = ViewSyncPreCommitVote2::::create_signed_vote( + ViewSyncPreCommitData2 { relay: self.relay, round: self.next_view, + epoch: self.cur_epoch, }, self.next_view, &self.public_key, @@ -830,16 +827,12 @@ impl ViewSyncReplicaTaskState { tracing::error!("Failed to sign ViewSyncPreCommitData!"); return None; }; - let message = - GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); - - if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { - broadcast_event( - Arc::new(HotShotEvent::ViewSyncPreCommitVoteSend(vote)), - &event_stream, - ) - .await; - } + + broadcast_event( + Arc::new(HotShotEvent::ViewSyncPreCommitVoteSend(vote)), + &event_stream, + ) + .await; } ViewSyncPhase::Finalize => { // This should never occur diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index d7ad469746..182a40a8c3 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -17,13 +17,13 @@ use either::Either::{self, Left, Right}; use hotshot_types::{ message::UpgradeLock, simple_certificate::{ - DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, - UpgradeCertificate, ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, - ViewSyncPreCommitCertificate, + DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate2, + UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, + ViewSyncPreCommitCertificate2, }, simple_vote::{ - DaVote2, QuorumVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, - ViewSyncFinalizeVote, ViewSyncPreCommitVote, + DaVote2, QuorumVote, QuorumVote2, TimeoutVote2, UpgradeVote, ViewSyncCommitVote2, + ViewSyncFinalizeVote2, ViewSyncPreCommitVote2, }, traits::{ election::Membership, @@ -311,25 +311,29 @@ type DaVoteState = VoteCollectionTaskState, DaCertificate2, V>; /// Alias for Timeout vote accumulator type TimeoutVoteState = - VoteCollectionTaskState, TimeoutCertificate, V>; + VoteCollectionTaskState, TimeoutCertificate2, V>; /// Alias for upgrade vote accumulator type UpgradeVoteState = VoteCollectionTaskState, UpgradeCertificate, V>; /// Alias for View Sync Pre Commit vote accumulator type ViewSyncPreCommitState = VoteCollectionTaskState< TYPES, - ViewSyncPreCommitVote, - ViewSyncPreCommitCertificate, + ViewSyncPreCommitVote2, + ViewSyncPreCommitCertificate2, V, >; /// Alias for View Sync Commit vote accumulator -type ViewSyncCommitVoteState = - VoteCollectionTaskState, ViewSyncCommitCertificate, V>; +type ViewSyncCommitVoteState = VoteCollectionTaskState< + TYPES, + ViewSyncCommitVote2, + ViewSyncCommitCertificate2, + V, +>; /// Alias for View Sync Finalize vote accumulator type ViewSyncFinalizeVoteState = VoteCollectionTaskState< TYPES, - ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate, + ViewSyncFinalizeVote2, + ViewSyncFinalizeCertificate2, V, >; @@ -405,8 +409,8 @@ impl AggregatableVote, DaCertificate2 AggregatableVote, TimeoutCertificate> - for TimeoutVote +impl AggregatableVote, TimeoutCertificate2> + for TimeoutVote2 { fn leader( &self, @@ -416,7 +420,7 @@ impl AggregatableVote, TimeoutCertifi membership.leader(self.view_number() + 1, epoch) } fn make_cert_event( - certificate: TimeoutCertificate, + certificate: TimeoutCertificate2, _key: &TYPES::SignatureKey, ) -> HotShotEvent { HotShotEvent::Qc2Formed(Right(certificate)) @@ -424,8 +428,8 @@ impl AggregatableVote, TimeoutCertifi } impl - AggregatableVote, ViewSyncCommitCertificate> - for ViewSyncCommitVote + AggregatableVote, ViewSyncCommitCertificate2> + for ViewSyncCommitVote2 { fn leader( &self, @@ -435,7 +439,7 @@ impl membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( - certificate: ViewSyncCommitCertificate, + certificate: ViewSyncCommitCertificate2, key: &TYPES::SignatureKey, ) -> HotShotEvent { HotShotEvent::ViewSyncCommitCertificateSend(certificate, key.clone()) @@ -443,8 +447,8 @@ impl } impl - AggregatableVote, ViewSyncPreCommitCertificate> - for ViewSyncPreCommitVote + AggregatableVote, ViewSyncPreCommitCertificate2> + for ViewSyncPreCommitVote2 { fn leader( &self, @@ -454,7 +458,7 @@ impl membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( - certificate: ViewSyncPreCommitCertificate, + certificate: ViewSyncPreCommitCertificate2, key: &TYPES::SignatureKey, ) -> HotShotEvent { HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, key.clone()) @@ -462,8 +466,8 @@ impl } impl - AggregatableVote, ViewSyncFinalizeCertificate> - for ViewSyncFinalizeVote + AggregatableVote, ViewSyncFinalizeCertificate2> + for ViewSyncFinalizeVote2 { fn leader( &self, @@ -473,7 +477,7 @@ impl membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( - certificate: ViewSyncFinalizeCertificate, + certificate: ViewSyncFinalizeCertificate2, key: &TYPES::SignatureKey, ) -> HotShotEvent { HotShotEvent::ViewSyncFinalizeCertificateSend(certificate, key.clone()) @@ -543,14 +547,14 @@ impl HandleVoteEvent, DaCert #[async_trait] impl - HandleVoteEvent, TimeoutCertificate> + HandleVoteEvent, TimeoutCertificate2> for TimeoutVoteState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), @@ -563,14 +567,14 @@ impl #[async_trait] impl - HandleVoteEvent, ViewSyncPreCommitCertificate> + HandleVoteEvent, ViewSyncPreCommitCertificate2> for ViewSyncPreCommitState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { self.accumulate_vote(vote, sender).await @@ -585,14 +589,14 @@ impl #[async_trait] impl - HandleVoteEvent, ViewSyncCommitCertificate> + HandleVoteEvent, ViewSyncCommitCertificate2> for ViewSyncCommitVoteState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), @@ -605,14 +609,14 @@ impl #[async_trait] impl - HandleVoteEvent, ViewSyncFinalizeCertificate> + HandleVoteEvent, ViewSyncFinalizeCertificate2> for ViewSyncFinalizeVoteState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { self.accumulate_vote(vote, sender).await diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 9097f38d02..2fa1fd6eec 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -25,17 +25,17 @@ use hotshot_example_types::{ use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, - data::{Leaf, Leaf2, QuorumProposal, VidDisperse, VidDisperseShare}, + data::{Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare2}, message::{GeneralConsensusMessage, Proposal, UpgradeLock}, simple_certificate::DaCertificate2, - simple_vote::{DaData2, DaVote2, QuorumData, QuorumVote, SimpleVote, VersionedVoteData}, + simple_vote::{DaData2, DaVote2, QuorumData2, QuorumVote2, SimpleVote, VersionedVoteData}, traits::{ block_contents::vid_commitment, consensus_api::ConsensusApi, election::Membership, - node_implementation::{NodeType, Versions}, + node_implementation::{ConsensusTime, NodeType, Versions}, }, - utils::{View, ViewInner}, + utils::{epoch_from_block_number, View, ViewInner}, vid::{vid_scheme, VidCommitment, VidProposal, VidSchemeType}, vote::{Certificate, HasViewNumber, Vote}, ValidatorConfig, @@ -45,6 +45,7 @@ use primitive_types::U256; use serde::Serialize; use crate::{test_builder::TestDescription, test_launcher::TestLauncher}; + /// create the [`SystemContextHandle`] from a node id /// # Panics /// if cannot create a [`HotShotInitializer`] @@ -182,9 +183,9 @@ pub async fn build_cert< } pub fn vid_share( - shares: &[Proposal>], + shares: &[Proposal>], pub_key: TYPES::SignatureKey, -) -> Proposal> { +) -> Proposal> { shares .iter() .filter(|s| s.data.recipient_key == pub_key) @@ -344,7 +345,7 @@ pub fn build_vid_proposal( ( vid_disperse_proposal, - VidDisperseShare::from_vid_disperse(vid_disperse) + VidDisperseShare2::from_vid_disperse(vid_disperse) .into_iter() .map(|vid_disperse| { vid_disperse @@ -389,14 +390,15 @@ pub async fn build_da_certificate( pub async fn build_vote, V: Versions>( handle: &SystemContextHandle, - proposal: QuorumProposal, + proposal: QuorumProposal2, ) -> GeneralConsensusMessage { let view = proposal.view_number; - let leaf: Leaf<_> = Leaf::from_quorum_proposal(&proposal); - let vote = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: leaf.commit(&handle.hotshot.upgrade_lock).await, + let leaf: Leaf2<_> = Leaf2::from_quorum_proposal(&proposal); + let vote = QuorumVote2::::create_signed_vote( + QuorumData2 { + leaf_commit: leaf.commit(), + epoch: leaf.epoch(), }, view, &handle.public_key(), @@ -405,7 +407,7 @@ pub async fn build_vote, V: Versio ) .await .expect("Failed to create quorum vote"); - GeneralConsensusMessage::::Vote(vote) + GeneralConsensusMessage::::Vote(vote.to_vote()) } /// This function permutes the provided input vector `inputs`, given some order provided within the @@ -429,8 +431,15 @@ where pub async fn build_fake_view_with_leaf( leaf: Leaf2, upgrade_lock: &UpgradeLock, + epoch_height: u64, ) -> View { - build_fake_view_with_leaf_and_state(leaf, TestValidatedState::default(), upgrade_lock).await + build_fake_view_with_leaf_and_state( + leaf, + TestValidatedState::default(), + upgrade_lock, + epoch_height, + ) + .await } /// This function will create a fake [`View`] from a provided [`Leaf`] and `state`. @@ -438,12 +447,16 @@ pub async fn build_fake_view_with_leaf_and_state( leaf: Leaf2, state: TestValidatedState, _upgrade_lock: &UpgradeLock, + epoch_height: u64, ) -> View { + let epoch = + ::Epoch::new(epoch_from_block_number(leaf.height(), epoch_height)); View { view_inner: ViewInner::Leaf { leaf: leaf.commit(), state: state.into(), delta: None, + epoch, }, } } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 4382f12ce0..3847a20a62 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -28,7 +28,7 @@ use hotshot_types::{ constants::EVENT_CHANNEL_SIZE, data::Leaf2, event::Event, - simple_certificate::{QuorumCertificate, QuorumCertificate2}, + simple_certificate::QuorumCertificate2, traits::{ network::{AsyncGenerator, ConnectedNetwork}, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, @@ -243,12 +243,11 @@ where read_storage.last_actioned_view().await, read_storage.proposals_cloned().await, read_storage.high_qc_cloned().await.unwrap_or( - QuorumCertificate::genesis::( + QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, ), read_storage.decided_upgrade_certificate().await, Vec::new(), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index ffee9b39e5..f13cba62ca 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -30,8 +30,8 @@ use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, constants::EVENT_CHANNEL_SIZE, - data::Leaf, - simple_certificate::QuorumCertificate, + data::Leaf2, + simple_certificate::QuorumCertificate2, traits::{ election::Membership, network::ConnectedNetwork, @@ -179,18 +179,16 @@ where late_start, latest_view: None, changes, - last_decided_leaf: Leaf::genesis( + last_decided_leaf: Leaf2::genesis( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .into(), - high_qc: QuorumCertificate::genesis::( + .await, + high_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, async_delay_config: launcher.metadata.async_delay_config, restart_contexts: HashMap::new(), channel_generator: launcher.resource_generator.channel_generator, diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index e31f98364f..d5c1cb7ca5 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -22,25 +22,24 @@ use hotshot_example_types::{ }; use hotshot_types::{ data::{ - DaProposal2, EpochNumber, Leaf, Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare, + DaProposal2, EpochNumber, Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare2, ViewChangeEvidence, ViewNumber, }, drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, message::{Proposal, UpgradeLock}, simple_certificate::{ - DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, - UpgradeCertificate, ViewSyncFinalizeCertificate, + DaCertificate2, QuorumCertificate2, TimeoutCertificate2, UpgradeCertificate, + ViewSyncFinalizeCertificate2, }, simple_vote::{ - DaData2, DaVote2, QuorumData2, QuorumVote2, TimeoutData, TimeoutVote, UpgradeProposalData, - UpgradeVote, ViewSyncFinalizeData, ViewSyncFinalizeVote, + DaData2, DaVote2, QuorumData2, QuorumVote2, TimeoutData2, TimeoutVote2, + UpgradeProposalData, UpgradeVote, ViewSyncFinalizeData2, ViewSyncFinalizeVote2, }, traits::{ consensus_api::ConsensusApi, node_implementation::{ConsensusTime, NodeType}, BlockPayload, }, - utils::epoch_from_block_number, }; use rand::{thread_rng, Rng}; use sha2::{Digest, Sha256}; @@ -59,7 +58,7 @@ pub struct TestView { pub membership: ::Membership, pub vid_disperse: Proposal>, pub vid_proposal: ( - Vec>>, + Vec>>, ::SignatureKey, ), pub leader_public_key: ::SignatureKey, @@ -67,8 +66,8 @@ pub struct TestView { pub transactions: Vec, upgrade_data: Option>, formed_upgrade_certificate: Option>, - view_sync_finalize_data: Option>, - timeout_cert_data: Option>, + view_sync_finalize_data: Option>, + timeout_cert_data: Option>, upgrade_lock: UpgradeLock, } @@ -121,12 +120,11 @@ impl TestView { .await; let block_header = TestBlockHeader::new( - &Leaf::::genesis( + &Leaf2::::genesis( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .into(), + .await, payload_commitment, builder_commitment, metadata, @@ -135,12 +133,11 @@ impl TestView { let quorum_proposal_inner = QuorumProposal2:: { block_header: block_header.clone(), view_number: genesis_view, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, drb_result: INITIAL_DRB_RESULT, @@ -157,7 +154,7 @@ impl TestView { encoded_transactions: encoded_transactions.clone(), metadata, view_number: genesis_view, - epoch_number: genesis_epoch, + epoch: genesis_epoch, }; let da_proposal = Proposal { @@ -208,6 +205,7 @@ impl TestView { pub async fn next_view_from_ancestor(&self, ancestor: TestView) -> Self { let old = ancestor; let old_view = old.view_number; + let old_epoch = old.epoch_number; // This ensures that we're always moving forward in time since someone could pass in any // test view here. @@ -219,7 +217,7 @@ impl TestView { let quorum_data = QuorumData2 { leaf_commit: old.leaf.commit(), - epoch: EpochNumber::new(0), + epoch: old_epoch, }; let (old_private_key, old_public_key) = key_pair_for_id::(*old_view); @@ -307,9 +305,9 @@ impl TestView { let cert = build_cert::< TestTypes, TestVersions, - ViewSyncFinalizeData, - ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate, + ViewSyncFinalizeData2, + ViewSyncFinalizeVote2, + ViewSyncFinalizeCertificate2, >( data.clone(), membership, @@ -330,9 +328,9 @@ impl TestView { let cert = build_cert::< TestTypes, TestVersions, - TimeoutData, - TimeoutVote, - TimeoutCertificate, + TimeoutData2, + TimeoutVote2, + TimeoutCertificate2, >( data.clone(), membership, @@ -400,7 +398,7 @@ impl TestView { encoded_transactions: encoded_transactions.clone(), metadata, view_number: next_view, - epoch_number: self.epoch_number, + epoch: old_epoch, }; let da_proposal = Proposal { @@ -446,10 +444,7 @@ impl TestView { QuorumVote2::::create_signed_vote( QuorumData2 { leaf_commit: self.leaf.commit(), - epoch: EpochNumber::new(epoch_from_block_number( - self.leaf.height(), - handle.hotshot.config.epoch_height, - )), + epoch: self.epoch_number, }, self.view_number, &handle.public_key(), @@ -530,7 +525,7 @@ impl TestViewGenerator { pub fn add_view_sync_finalize( &mut self, - view_sync_finalize_data: ViewSyncFinalizeData, + view_sync_finalize_data: ViewSyncFinalizeData2, ) { if let Some(ref view) = self.current_view { self.current_view = Some(TestView { @@ -542,7 +537,7 @@ impl TestViewGenerator { } } - pub fn add_timeout(&mut self, timeout_data: TimeoutData) { + pub fn add_timeout(&mut self, timeout_data: TimeoutData2) { if let Some(ref view) = self.current_view { self.current_view = Some(TestView { timeout_cert_data: Some(timeout_data), diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index e672ff24e3..c7af796395 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -66,7 +66,7 @@ async fn test_da_task() { view.create_da_vote( DaData2 { payload_commit, - epoch: EpochNumber::new(0), + epoch: view.da_proposal.data.epoch, }, &handle, ) @@ -85,7 +85,7 @@ async fn test_da_task() { view.create_da_vote( DaData2 { payload_commit, - epoch: EpochNumber::new(0), + epoch: view.da_proposal.data.epoch, }, &handle, ) @@ -174,7 +174,7 @@ async fn test_da_task_storage_failure() { view.create_da_vote( DaData2 { payload_commit, - epoch: EpochNumber::new(0), + epoch: view.da_proposal.data.epoch, }, &handle, ) @@ -193,7 +193,7 @@ async fn test_da_task_storage_failure() { view.create_da_vote( DaData2 { payload_commit, - epoch: EpochNumber::new(0), + epoch: view.da_proposal.data.epoch, }, &handle, ) diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index 9536cf0f22..e19cce630b 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -13,7 +13,7 @@ use hotshot_types::{ message::{GeneralConsensusMessage, Message, MessageKind, SequencingMessage}, signature_key::BLSPubKey, simple_certificate::SimpleCertificate, - simple_vote::ViewSyncCommitData, + simple_vote::ViewSyncCommitData2, traits::{node_implementation::ConsensusTime, signature_key::SignatureKey}, }; use vbs::{ @@ -27,6 +27,7 @@ use vbs::{ fn version_number_at_start_of_serialization() { let sender = BLSPubKey::generated_from_seed_indexed([0u8; 32], 0).0; let view_number = ConsensusTime::new(17); + let epoch = ConsensusTime::new(0); // The version we set for the message const MAJOR: u16 = 37; const MINOR: u16 = 17; @@ -37,16 +38,17 @@ fn version_number_at_start_of_serialization() { type TestVersion = StaticVersion; // The specific data we attach to our message shouldn't affect the serialization, // we're using ViewSyncCommitData for simplicity. - let data: ViewSyncCommitData = ViewSyncCommitData { + let data: ViewSyncCommitData2 = ViewSyncCommitData2 { relay: 37, round: view_number, + epoch, }; let simple_certificate = SimpleCertificate::new(data.clone(), data.commit(), view_number, None, PhantomData); let message = Message { sender, kind: MessageKind::Consensus(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncCommitCertificate(simple_certificate), + GeneralConsensusMessage::ViewSyncCommitCertificate2(simple_certificate), )), }; let serialized_message: Vec = Serializer::::serialize(&message).unwrap(); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 25e70fc6c2..0935aef8f8 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -26,7 +26,7 @@ use hotshot_testing::{ }; use hotshot_types::{ data::{null_block, EpochNumber, Leaf2, ViewChangeEvidence, ViewNumber}, - simple_vote::{TimeoutData, ViewSyncFinalizeData}, + simple_vote::{TimeoutData2, ViewSyncFinalizeData2}, traits::{ election::Membership, node_implementation::{ConsensusTime, Versions}, @@ -331,8 +331,9 @@ async fn test_quorum_proposal_task_qc_timeout() { vid_dispersals.push(view.vid_disperse.clone()); leaves.push(view.leaf.clone()); } - let timeout_data = TimeoutData { + let timeout_data = TimeoutData2 { view: ViewNumber::new(1), + epoch: EpochNumber::new(0), }; generator.add_timeout(timeout_data); for view in (&mut generator).take(2).collect::>().await { @@ -420,9 +421,10 @@ async fn test_quorum_proposal_task_view_sync() { leaves.push(view.leaf.clone()); } - let view_sync_finalize_data = ViewSyncFinalizeData { + let view_sync_finalize_data = ViewSyncFinalizeData2 { relay: 2, round: ViewNumber::new(node_id), + epoch: EpochNumber::new(0), }; generator.add_view_sync_finalize(view_sync_finalize_data); for view in (&mut generator).take(2).collect::>().await { diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 982b7018f6..8e92935567 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -4,12 +4,12 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use hotshot_example_types::{ node_types::{ EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, - TestTypes, TestTypesRandomizedLeader, TestVersions, + TestTwoStakeTablesTypes, TestTypes, TestTypesRandomizedLeader, TestVersions, }, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; @@ -17,6 +17,7 @@ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, view_sync_task::ViewSyncTaskDescription, @@ -155,8 +156,8 @@ cross_tests!( cross_tests!( TestName: test_epoch_end, - Impls: [MemoryImpl], - Types: [TestTypes], + Impls: [PushCdnImpl], + Types: [TestTwoStakeTablesTypes], Versions: [EpochsTestVersions], Ignore: false, Metadata: { @@ -167,6 +168,17 @@ cross_tests!( }, ), epoch_height: 10, + num_nodes_with_stake: 10, + start_nodes: 10, + num_bootstrap_nodes: 10, + da_staked_committee_size: 10, + overall_safety_properties: OverallSafetyPropertiesDescription { + // Explicitly show that we use normal threshold, i.e. 2 nodes_len / 3 + 1 + // but we divide by two because only half of the nodes are active in each epoch + threshold_calculator: Arc::new(|_, nodes_len| 2 * nodes_len / 2 / 3 + 1), + ..OverallSafetyPropertiesDescription::default() + }, + ..TestDescription::default() } }, diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 3bf19bbc38..3cd850183c 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -99,9 +99,9 @@ async fn test_vid_task() { _pd: PhantomData, }; let inputs = vec![ - serial![ViewChange(ViewNumber::new(1), EpochNumber::new(1))], + serial![ViewChange(ViewNumber::new(1), EpochNumber::new(0))], serial![ - ViewChange(ViewNumber::new(2), EpochNumber::new(1)), + ViewChange(ViewNumber::new(2), EpochNumber::new(0)), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index 0c5eb9a719..255ca4cfa0 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -12,7 +12,7 @@ use hotshot_task_impls::{ use hotshot_testing::helpers::build_system_handle; use hotshot_types::{ data::{EpochNumber, ViewNumber}, - simple_vote::ViewSyncPreCommitData, + simple_vote::ViewSyncPreCommitData2, traits::node_implementation::ConsensusTime, }; @@ -26,11 +26,12 @@ async fn test_view_sync_task() { .await .0; - let vote_data = ViewSyncPreCommitData { + let vote_data = ViewSyncPreCommitData2 { relay: 0, round: ::View::new(4), + epoch: EpochNumber::new(0), }; - let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote( + let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote2::::create_signed_vote( vote_data, ::View::new(4), hotshot_types::traits::consensus_api::ConsensusApi::public_key(&handle), @@ -45,8 +46,14 @@ async fn test_view_sync_task() { let mut input = Vec::new(); let mut output = Vec::new(); - input.push(HotShotEvent::Timeout(ViewNumber::new(2))); - input.push(HotShotEvent::Timeout(ViewNumber::new(3))); + input.push(HotShotEvent::Timeout( + ViewNumber::new(2), + EpochNumber::new(0), + )); + input.push(HotShotEvent::Timeout( + ViewNumber::new(3), + EpochNumber::new(0), + )); input.push(HotShotEvent::Shutdown); diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index 875e5b40e9..b99961af3e 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -52,6 +52,8 @@ use tracing::{instrument, trace}; pub struct Test; impl NodeType for Test { + const EPOCH_HEIGHT: u64 = 10; + type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 0c2e8d936f..5a94692be4 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -21,7 +21,7 @@ use vec1::Vec1; pub use crate::utils::{View, ViewInner}; use crate::{ - data::{Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare}, + data::{Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare2}, error::HotShotError, event::{HotShotAction, LeafInfo}, message::Proposal, @@ -46,7 +46,7 @@ pub type CommitmentMap = HashMap, T>; /// A type alias for `BTreeMap>>>` pub type VidShares = BTreeMap< ::View, - HashMap<::SignatureKey, Proposal>>, + HashMap<::SignatureKey, Proposal>>, >; /// Type alias for consensus state wrapped in a lock. @@ -632,10 +632,14 @@ impl Consensus { pub fn update_da_view( &mut self, view_number: TYPES::View, + epoch: TYPES::Epoch, payload_commitment: VidCommitment, ) -> Result<()> { let view = View { - view_inner: ViewInner::Da { payload_commitment }, + view_inner: ViewInner::Da { + payload_commitment, + epoch, + }, }; self.update_validated_state_map(view_number, view) } @@ -652,11 +656,13 @@ impl Consensus { delta: Option>::Delta>>, ) -> Result<()> { let view_number = leaf.view_number(); + let epoch = TYPES::Epoch::new(epoch_from_block_number(leaf.height(), self.epoch_height)); let view = View { view_inner: ViewInner::Leaf { leaf: leaf.commit(), state, delta, + epoch, }, }; self.update_validated_state_map(view_number, view)?; @@ -738,7 +744,7 @@ impl Consensus { pub fn update_vid_shares( &mut self, view_number: TYPES::View, - disperse: Proposal>, + disperse: Proposal>, ) { self.vid_shares .entry(view_number) @@ -901,11 +907,17 @@ impl Consensus { view: ::View, membership: Arc, private_key: &::PrivateKey, - epoch: TYPES::Epoch, ) -> Option<()> { let txns = Arc::clone(consensus.read().await.saved_payloads().get(&view)?); + let epoch = consensus + .read() + .await + .validated_state_map() + .get(&view)? + .view_inner + .epoch()?; let vid = VidDisperse::calculate_vid_disperse(txns, &membership, view, epoch, None).await; - let shares = VidDisperseShare::from_vid_disperse(vid); + let shares = VidDisperseShare2::from_vid_disperse(vid); let mut consensus_writer = consensus.write().await; for share in shares { if let Some(prop) = share.to_proposal(private_key) { @@ -1005,6 +1017,20 @@ impl Consensus { } } + /// Returns true if our high QC is for the last block in the epoch + pub fn is_high_qc_for_last_block(&self) -> bool { + let Some(leaf) = self.saved_leaves.get(&self.high_qc().data.leaf_commit) else { + tracing::trace!("We don't have a leaf corresponding to the high QC"); + return false; + }; + let block_height = leaf.height(); + if block_height == 0 || self.epoch_height == 0 { + false + } else { + block_height % self.epoch_height == 0 + } + } + /// Returns true if the `parent_leaf` formed an eQC for the previous epoch to the `proposed_leaf` pub fn check_eqc(&self, proposed_leaf: &Leaf2, parent_leaf: &Leaf2) -> bool { if parent_leaf.view_number() == TYPES::View::genesis() { diff --git a/types/src/data.rs b/types/src/data.rs index b6ec66f16a..73acf9d6f0 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -31,12 +31,13 @@ use vec1::Vec1; use crate::{ drb::{DrbResult, DrbSeedInput, INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, + impl_has_epoch, message::{Proposal, UpgradeLock}, simple_certificate::{ - QuorumCertificate, QuorumCertificate2, TimeoutCertificate, UpgradeCertificate, - ViewSyncFinalizeCertificate, + QuorumCertificate, QuorumCertificate2, TimeoutCertificate2, UpgradeCertificate, + ViewSyncFinalizeCertificate2, }, - simple_vote::{QuorumData, UpgradeProposalData, VersionedVoteData}, + simple_vote::{HasEpoch, QuorumData, QuorumData2, UpgradeProposalData, VersionedVoteData}, traits::{ block_contents::{ vid_commitment, BlockHeader, BuilderFee, EncodeBytes, TestableBlock, @@ -48,7 +49,7 @@ use crate::{ states::TestableState, BlockPayload, }, - utils::bincode_opts, + utils::{bincode_opts, epoch_from_block_number}, vid::{vid_scheme, VidCommitment, VidCommon, VidPrecomputeData, VidSchemeType, VidShare}, vote::{Certificate, HasViewNumber}, }; @@ -158,7 +159,7 @@ pub struct DaProposal2 { /// View this proposal applies to pub view_number: TYPES::View, /// Epoch this proposal applies to - pub epoch_number: TYPES::Epoch, + pub epoch: TYPES::Epoch, } impl From> for DaProposal2 { @@ -167,7 +168,7 @@ impl From> for DaProposal2 { encoded_transactions: da_proposal.encoded_transactions, metadata: da_proposal.metadata, view_number: da_proposal.view_number, - epoch_number: TYPES::Epoch::new(0), + epoch: TYPES::Epoch::new(0), } } } @@ -204,6 +205,8 @@ where pub struct VidDisperse { /// The view number for which this VID data is intended pub view_number: TYPES::View, + /// The epoch number for which this VID data is intended + pub epoch: TYPES::Epoch, /// Block payload commitment pub payload_commitment: VidCommitment, /// A storage node's key and its corresponding VID share @@ -230,6 +233,7 @@ impl VidDisperse { Self { view_number, + epoch, shares, common: vid_disperse.common, payload_commitment: vid_disperse.commit, @@ -272,9 +276,9 @@ impl VidDisperse { #[serde(bound(deserialize = ""))] pub enum ViewChangeEvidence { /// Holds a timeout certificate. - Timeout(TimeoutCertificate), + Timeout(TimeoutCertificate2), /// Holds a view sync finalized certificate. - ViewSync(ViewSyncFinalizeCertificate), + ViewSync(ViewSyncFinalizeCertificate2), } impl ViewChangeEvidence { @@ -308,12 +312,157 @@ impl VidDisperseShare { vid_disperse .shares .into_iter() - .map(|(recipient_key, share)| VidDisperseShare { + .map(|(recipient_key, share)| Self { + share, + recipient_key, + view_number: vid_disperse.view_number, + common: vid_disperse.common.clone(), + payload_commitment: vid_disperse.payload_commitment, + }) + .collect() + } + + /// Consume `self` and return a `Proposal` + pub fn to_proposal( + self, + private_key: &::PrivateKey, + ) -> Option> { + let Ok(signature) = + TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref()) + else { + error!("VID: failed to sign dispersal share payload"); + return None; + }; + Some(Proposal { + signature, + _pd: PhantomData, + data: self, + }) + } + + /// Create `VidDisperse` out of an iterator to `VidDisperseShare`s + pub fn to_vid_disperse<'a, I>(mut it: I) -> Option> + where + I: Iterator, + { + let first_vid_disperse_share = it.next()?.clone(); + let mut share_map = BTreeMap::new(); + share_map.insert( + first_vid_disperse_share.recipient_key, + first_vid_disperse_share.share, + ); + let mut vid_disperse = VidDisperse { + view_number: first_vid_disperse_share.view_number, + epoch: TYPES::Epoch::new(0), + payload_commitment: first_vid_disperse_share.payload_commitment, + common: first_vid_disperse_share.common, + shares: share_map, + }; + let _ = it.map(|vid_disperse_share| { + vid_disperse.shares.insert( + vid_disperse_share.recipient_key.clone(), + vid_disperse_share.share.clone(), + ) + }); + Some(vid_disperse) + } + + /// Split a VID share proposal into a proposal for each recipient. + pub fn to_vid_share_proposals( + vid_disperse_proposal: Proposal>, + ) -> Vec> { + vid_disperse_proposal + .data + .shares + .into_iter() + .map(|(recipient_key, share)| Proposal { + data: Self { + share, + recipient_key, + view_number: vid_disperse_proposal.data.view_number, + common: vid_disperse_proposal.data.common.clone(), + payload_commitment: vid_disperse_proposal.data.payload_commitment, + }, + signature: vid_disperse_proposal.signature.clone(), + _pd: vid_disperse_proposal._pd, + }) + .collect() + } +} + +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +/// VID share and associated metadata for a single node +pub struct VidDisperseShare2 { + /// The view number for which this VID data is intended + pub view_number: TYPES::View, + /// The epoch number for which this VID data is intended + pub epoch: TYPES::Epoch, + /// Block payload commitment + pub payload_commitment: VidCommitment, + /// A storage node's key and its corresponding VID share + pub share: VidShare, + /// VID common data sent to all storage nodes + pub common: VidCommon, + /// a public key of the share recipient + pub recipient_key: TYPES::SignatureKey, +} + +impl From> for VidDisperseShare { + fn from(vid_disperse2: VidDisperseShare2) -> Self { + let VidDisperseShare2 { + view_number, + epoch: _, + payload_commitment, + share, + common, + recipient_key, + } = vid_disperse2; + + Self { + view_number, + payload_commitment, + share, + common, + recipient_key, + } + } +} + +impl From> for VidDisperseShare2 { + fn from(vid_disperse: VidDisperseShare) -> Self { + let VidDisperseShare { + view_number, + payload_commitment, + share, + common, + recipient_key, + } = vid_disperse; + + Self { + view_number, + epoch: TYPES::Epoch::new(0), + payload_commitment, + share, + common, + recipient_key, + } + } +} + +impl VidDisperseShare2 { + /// Create a vector of `VidDisperseShare` from `VidDisperse` + pub fn from_vid_disperse(vid_disperse: VidDisperse) -> Vec { + let epoch = vid_disperse.epoch; + vid_disperse + .shares + .into_iter() + .map(|(recipient_key, share)| Self { share, recipient_key, view_number: vid_disperse.view_number, common: vid_disperse.common.clone(), payload_commitment: vid_disperse.payload_commitment, + epoch, }) .collect() } @@ -339,9 +488,10 @@ impl VidDisperseShare { /// Create `VidDisperse` out of an iterator to `VidDisperseShare`s pub fn to_vid_disperse<'a, I>(mut it: I) -> Option> where - I: Iterator>, + I: Iterator, { let first_vid_disperse_share = it.next()?.clone(); + let epoch = first_vid_disperse_share.epoch; let mut share_map = BTreeMap::new(); share_map.insert( first_vid_disperse_share.recipient_key, @@ -349,6 +499,7 @@ impl VidDisperseShare { ); let mut vid_disperse = VidDisperse { view_number: first_vid_disperse_share.view_number, + epoch, payload_commitment: first_vid_disperse_share.payload_commitment, common: first_vid_disperse_share.common, shares: share_map, @@ -365,18 +516,20 @@ impl VidDisperseShare { /// Split a VID share proposal into a proposal for each recipient. pub fn to_vid_share_proposals( vid_disperse_proposal: Proposal>, - ) -> Vec>> { + ) -> Vec> { + let epoch = vid_disperse_proposal.data.epoch; vid_disperse_proposal .data .shares .into_iter() .map(|(recipient_key, share)| Proposal { - data: VidDisperseShare { + data: Self { share, recipient_key, view_number: vid_disperse_proposal.data.view_number, common: vid_disperse_proposal.data.common.clone(), payload_commitment: vid_disperse_proposal.data.payload_commitment, + epoch, }, signature: vid_disperse_proposal.signature.clone(), _pd: vid_disperse_proposal._pd, @@ -472,6 +625,7 @@ impl From> for Leaf2 { Self { view_number: leaf.view_number, + epoch: TYPES::Epoch::genesis(), justify_qc: leaf.justify_qc.to_qc2(), parent_commitment: Commitment::from_raw(bytes), block_header: leaf.block_header, @@ -508,6 +662,12 @@ impl HasViewNumber for VidDisperseShare { } } +impl HasViewNumber for VidDisperseShare2 { + fn view_number(&self) -> TYPES::View { + self.view_number + } +} + impl HasViewNumber for QuorumProposal { fn view_number(&self) -> TYPES::View { self.view_number @@ -526,6 +686,12 @@ impl HasViewNumber for UpgradeProposal { } } +impl_has_epoch!( + DaProposal2, + VidDisperse, + VidDisperseShare2 +); + /// The error type for block and its transactions. #[derive(Error, Debug, Serialize, Deserialize)] pub enum BlockError { @@ -587,6 +753,9 @@ pub struct Leaf2 { /// CurView from leader when proposing leaf view_number: TYPES::View, + /// An epoch to which the data belongs to. Relevant for validating against the correct stake table + epoch: TYPES::Epoch, + /// Per spec, justification justify_qc: QuorumCertificate2, @@ -622,10 +791,67 @@ pub struct Leaf2 { } impl Leaf2 { + /// Create a new leaf from its components. + /// + /// # Panics + /// + /// Panics if the genesis payload (`TYPES::BlockPayload::genesis()`) is malformed (unable to be + /// interpreted as bytes). + #[must_use] + pub async fn genesis( + validated_state: &TYPES::ValidatedState, + instance_state: &TYPES::InstanceState, + ) -> Self { + let (payload, metadata) = + TYPES::BlockPayload::from_transactions([], validated_state, instance_state) + .await + .unwrap(); + let builder_commitment = payload.builder_commitment(&metadata); + let payload_bytes = payload.encode(); + + let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); + + let block_header = TYPES::BlockHeader::genesis( + instance_state, + payload_commitment, + builder_commitment, + metadata, + ); + + let null_quorum_data = QuorumData2 { + leaf_commit: Commitment::>::default_commitment_no_preimage(), + epoch: TYPES::Epoch::genesis(), + }; + + let justify_qc = QuorumCertificate2::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + ::genesis(), + None, + PhantomData, + ); + + Self { + view_number: TYPES::View::genesis(), + justify_qc, + parent_commitment: null_quorum_data.leaf_commit, + upgrade_certificate: None, + block_header: block_header.clone(), + block_payload: Some(payload), + epoch: TYPES::Epoch::genesis(), + view_change_evidence: None, + drb_seed: [0; 32], + drb_result: [0; 32], + } + } /// Time when this leaf was created. pub fn view_number(&self) -> TYPES::View { self.view_number } + /// Epoch in which this leaf was created. + pub fn epoch(&self) -> TYPES::Epoch { + self.epoch + } /// Height of this leaf in the chain. /// /// Equivalently, this is the number of leaves before this one in the chain. @@ -738,25 +964,13 @@ impl Leaf2 { impl Committable for Leaf2 { fn commit(&self) -> committable::Commitment { - if self.drb_seed == [0; 32] && self.drb_result == [0; 32] { - RawCommitmentBuilder::new("leaf commitment") - .u64_field("view number", *self.view_number) - .field("parent leaf commitment", self.parent_commitment) - .field("block header", self.block_header.commit()) - .field("justify qc", self.justify_qc.commit()) - .optional("upgrade certificate", &self.upgrade_certificate) - .finalize() - } else { - RawCommitmentBuilder::new("leaf commitment") - .u64_field("view number", *self.view_number) - .field("parent leaf commitment", self.parent_commitment) - .field("block header", self.block_header.commit()) - .field("justify qc", self.justify_qc.commit()) - .optional("upgrade certificate", &self.upgrade_certificate) - .fixed_size_bytes(&self.drb_seed) - .fixed_size_bytes(&self.drb_result) - .finalize() - } + RawCommitmentBuilder::new("leaf commitment") + .u64_field("view number", *self.view_number) + .field("parent leaf commitment", self.parent_commitment) + .field("block header", self.block_header.commit()) + .field("justify qc", self.justify_qc.commit()) + .optional("upgrade certificate", &self.upgrade_certificate) + .finalize() } } @@ -785,6 +999,7 @@ impl PartialEq for Leaf2 { fn eq(&self, other: &Self) -> bool { let Leaf2 { view_number, + epoch, justify_qc, parent_commitment, block_header, @@ -796,6 +1011,7 @@ impl PartialEq for Leaf2 { } = self; *view_number == other.view_number + && *epoch == other.epoch && *justify_qc == other.justify_qc && *parent_commitment == other.parent_commitment && *block_header == other.block_header @@ -872,6 +1088,41 @@ impl QuorumCertificate { } } +impl QuorumCertificate2 { + #[must_use] + /// Creat the Genesis certificate + pub async fn genesis( + validated_state: &TYPES::ValidatedState, + instance_state: &TYPES::InstanceState, + ) -> Self { + // since this is genesis, we should never have a decided upgrade certificate. + let upgrade_lock = UpgradeLock::::new(); + + let genesis_view = ::genesis(); + + let data = QuorumData2 { + leaf_commit: Leaf2::genesis(validated_state, instance_state) + .await + .commit(), + epoch: TYPES::Epoch::genesis(), + }; + + let versioned_data = + VersionedVoteData::<_, _, V>::new_infallible(data.clone(), genesis_view, &upgrade_lock) + .await; + + let bytes: [u8; 32] = versioned_data.commit().into(); + + Self::new( + data, + Commitment::from_raw(bytes), + genesis_view, + None, + PhantomData, + ) + } +} + impl Leaf { /// Create a new leaf from its components. /// @@ -1136,6 +1387,10 @@ impl Leaf2 { Self { view_number: *view_number, + epoch: TYPES::Epoch::new(epoch_from_block_number( + quorum_proposal.block_header.block_number(), + TYPES::EPOCH_HEIGHT, + )), justify_qc: justify_qc.clone(), parent_commitment: justify_qc.data().leaf_commit, block_header: block_header.clone(), diff --git a/types/src/event.rs b/types/src/event.rs index 0a79913d88..ae66da6d6d 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use serde::{Deserialize, Serialize}; use crate::{ - data::{DaProposal2, Leaf2, QuorumProposal2, UpgradeProposal, VidDisperseShare}, + data::{DaProposal2, Leaf2, QuorumProposal2, UpgradeProposal, VidDisperseShare2}, error::HotShotError, message::Proposal, simple_certificate::QuorumCertificate2, @@ -42,7 +42,7 @@ pub struct LeafInfo { /// Optional application-specific state delta. pub delta: Option::ValidatedState as ValidatedState>::Delta>>, /// Optional VID share data. - pub vid_share: Option>, + pub vid_share: Option>, } impl LeafInfo { @@ -51,7 +51,7 @@ impl LeafInfo { leaf: Leaf2, state: Arc<::ValidatedState>, delta: Option::ValidatedState as ValidatedState>::Delta>>, - vid_share: Option>, + vid_share: Option>, ) -> Self { Self { leaf, diff --git a/types/src/message.rs b/types/src/message.rs index 062ae9c96f..0f36bae2de 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -27,24 +27,27 @@ use vbs::{ use crate::{ data::{ DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, UpgradeProposal, - VidDisperseShare, + VidDisperseShare, VidDisperseShare2, }, request_response::ProposalRequestPayload, simple_certificate::{ DaCertificate, DaCertificate2, QuorumCertificate2, UpgradeCertificate, - ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, ViewSyncPreCommitCertificate, + ViewSyncCommitCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate, + ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DaVote, DaVote2, QuorumVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, - ViewSyncFinalizeVote, ViewSyncPreCommitVote, + DaVote, DaVote2, QuorumVote, QuorumVote2, TimeoutVote, TimeoutVote2, UpgradeVote, + ViewSyncCommitVote, ViewSyncCommitVote2, ViewSyncFinalizeVote, ViewSyncFinalizeVote2, + ViewSyncPreCommitVote, ViewSyncPreCommitVote2, }, traits::{ + block_contents::BlockHeader, election::Membership, network::{DataRequest, ResponseMessage, ViewMessage}, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, - utils::mnemonic, + utils::{epoch_from_block_number, mnemonic}, vote::HasViewNumber, }; @@ -201,9 +204,6 @@ pub enum GeneralConsensusMessage { /// Message with an upgrade vote UpgradeVote(UpgradeVote), - /// Message for the next leader containing our highest QC - HighQc(QuorumCertificate2), - /// Message with a quorum proposal. Proposal2(Proposal>), @@ -220,7 +220,31 @@ pub enum GeneralConsensusMessage { ProposalResponse(Proposal>), /// A replica has responded with a valid proposal. - Proposal2Response(Proposal>), + ProposalResponse2(Proposal>), + + /// Message for the next leader containing our highest QC + HighQc(QuorumCertificate2), + + /// Message with a view sync pre-commit vote + ViewSyncPreCommitVote2(ViewSyncPreCommitVote2), + + /// Message with a view sync commit vote + ViewSyncCommitVote2(ViewSyncCommitVote2), + + /// Message with a view sync finalize vote + ViewSyncFinalizeVote2(ViewSyncFinalizeVote2), + + /// Message with a view sync pre-commit certificate + ViewSyncPreCommitCertificate2(ViewSyncPreCommitCertificate2), + + /// Message with a view sync commit certificate + ViewSyncCommitCertificate2(ViewSyncCommitCertificate2), + + /// Message with a view sync finalize certificate + ViewSyncFinalizeCertificate2(ViewSyncFinalizeCertificate2), + + /// Message with a Timeout vote + TimeoutVote2(TimeoutVote2), } #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] @@ -249,6 +273,11 @@ pub enum DaConsensusMessage { /// Certificate data is available DaCertificate2(DaCertificate2), + + /// Initiate VID dispersal. + /// + /// Like [`DaProposal`]. Use `Msg` suffix to distinguish from `VidDisperse`. + VidDisperseMsg2(Proposal>), } /// Messages for sequencing consensus. @@ -273,14 +302,20 @@ impl SequencingMessage { // this should match replica upon receipt p.data.view_number() } + GeneralConsensusMessage::Proposal2(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.view_number() + } GeneralConsensusMessage::ProposalRequested(req, _) => req.view_number, GeneralConsensusMessage::ProposalResponse(proposal) => { proposal.data.view_number() } - GeneralConsensusMessage::Proposal2Response(proposal) => { + GeneralConsensusMessage::ProposalResponse2(proposal) => { proposal.data.view_number() } GeneralConsensusMessage::Vote(vote_message) => vote_message.view_number(), + GeneralConsensusMessage::Vote2(vote_message) => vote_message.view_number(), GeneralConsensusMessage::TimeoutVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { message.view_number() @@ -296,15 +331,26 @@ impl SequencingMessage { GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { message.view_number() } + GeneralConsensusMessage::TimeoutVote2(message) => message.view_number(), + GeneralConsensusMessage::ViewSyncPreCommitVote2(message) => { + message.view_number() + } + GeneralConsensusMessage::ViewSyncCommitVote2(message) => message.view_number(), + GeneralConsensusMessage::ViewSyncFinalizeVote2(message) => { + message.view_number() + } + GeneralConsensusMessage::ViewSyncPreCommitCertificate2(message) => { + message.view_number() + } + GeneralConsensusMessage::ViewSyncCommitCertificate2(message) => { + message.view_number() + } + GeneralConsensusMessage::ViewSyncFinalizeCertificate2(message) => { + message.view_number() + } GeneralConsensusMessage::UpgradeProposal(message) => message.data.view_number(), GeneralConsensusMessage::UpgradeVote(message) => message.view_number(), GeneralConsensusMessage::HighQc(qc) => qc.view_number(), - GeneralConsensusMessage::Proposal2(p) => { - // view of leader in the leaf when proposal - // this should match replica upon receipt - p.data.view_number() - } - GeneralConsensusMessage::Vote2(vote_message) => vote_message.view_number(), } } SequencingMessage::Da(da_message) => { @@ -317,6 +363,7 @@ impl SequencingMessage { DaConsensusMessage::DaVote(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate(cert) => cert.view_number, DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.view_number(), + DaConsensusMessage::VidDisperseMsg2(disperse) => disperse.data.view_number(), DaConsensusMessage::DaProposal2(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt @@ -386,11 +433,15 @@ where pub async fn validate_signature( &self, quorum_membership: &TYPES::Membership, - epoch: TYPES::Epoch, + epoch_height: u64, upgrade_lock: &UpgradeLock, ) -> Result<()> { let view_number = self.data.view_number(); - let view_leader_key = quorum_membership.leader(view_number, epoch)?; + let proposal_epoch = TYPES::Epoch::new(epoch_from_block_number( + self.data.block_header.block_number(), + epoch_height, + )); + let view_leader_key = quorum_membership.leader(view_number, proposal_epoch)?; let proposed_leaf = Leaf::from_quorum_proposal(&self.data); ensure!( @@ -415,10 +466,14 @@ where pub fn validate_signature( &self, quorum_membership: &TYPES::Membership, - epoch: TYPES::Epoch, + epoch_height: u64, ) -> Result<()> { let view_number = self.data.view_number(); - let view_leader_key = quorum_membership.leader(view_number, epoch)?; + let proposal_epoch = TYPES::Epoch::new(epoch_from_block_number( + self.data.block_header.block_number(), + epoch_height, + )); + let view_leader_key = quorum_membership.leader(view_number, proposal_epoch)?; let proposed_leaf = Leaf2::from_quorum_proposal(&self.data); ensure!( diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index f88e549ede..91d5f00d7a 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -87,7 +87,11 @@ impl Threshold for UpgradeThreshold { /// A certificate which can be created by aggregating many simple votes on the commitment. #[derive(Serialize, Deserialize, Eq, Hash, PartialEq, Debug, Clone)] -pub struct SimpleCertificate> { +pub struct SimpleCertificate< + TYPES: NodeType, + VOTEABLE: Voteable, + THRESHOLD: Threshold, +> { /// The data this certificate is for. I.e the thing that was voted on to create this Certificate pub data: VOTEABLE, /// commitment of all the votes this cert should be signed over @@ -100,7 +104,7 @@ pub struct SimpleCertificate, } -impl> +impl, THRESHOLD: Threshold> SimpleCertificate { /// Creates a new instance of `SimpleCertificate` @@ -121,8 +125,8 @@ impl> } } -impl> Committable - for SimpleCertificate +impl + Committable, THRESHOLD: Threshold> + Committable for SimpleCertificate { fn commit(&self) -> Commitment { let signature_bytes = match self.signatures.as_ref() { @@ -314,8 +318,11 @@ impl> Certificate> - Certificate for SimpleCertificate +impl< + TYPES: NodeType, + VOTEABLE: Voteable + 'static + QuorumMarker, + THRESHOLD: Threshold, + > Certificate for SimpleCertificate { type Voteable = VOTEABLE; type Threshold = THRESHOLD; @@ -403,7 +410,7 @@ impl> +impl + 'static, THRESHOLD: Threshold> HasViewNumber for SimpleCertificate { fn view_number(&self) -> TYPES::View { @@ -477,7 +484,7 @@ impl QuorumCertificate { let bytes: [u8; 32] = self.data.leaf_commit.into(); let data = QuorumData2 { leaf_commit: Commitment::from_raw(bytes), - epoch: TYPES::Epoch::new(0), + epoch: TYPES::Epoch::genesis(), }; let bytes: [u8; 32] = self.vote_commitment.into(); diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 98af9fe67d..1211756644 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -158,11 +158,19 @@ pub struct UpgradeData2 { /// Marker trait for data or commitments that can be voted on. /// Only structs in this file can implement voteable. This is enforced with the `Sealed` trait /// Sealing this trait prevents creating new vote types outside this file. -pub trait Voteable: +pub trait Voteable: sealed::Sealed + Committable + Clone + Serialize + Debug + PartialEq + Hash + Eq { } +/// Marker trait for data or commitments that can be voted on. +/// Only structs in this file can implement voteable. This is enforced with the `Sealed` trait +/// Sealing this trait prevents creating new vote types outside this file. +pub trait Voteable2: + sealed::Sealed + HasEpoch + Committable + Clone + Serialize + Debug + PartialEq + Hash + Eq +{ +} + /// Sealed is used to make sure no other files can implement the Voteable trait. /// All simple voteable types should be implemented here. This prevents us from /// creating/using improper types when using the vote types. @@ -190,7 +198,7 @@ impl QuorumMarker for UpgradeProposalData {} /// A simple yes vote over some votable type. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] -pub struct SimpleVote { +pub struct SimpleVote> { /// The signature share associated with this vote pub signature: ( TYPES::SignatureKey, @@ -202,13 +210,15 @@ pub struct SimpleVote { pub view_number: TYPES::View, } -impl HasViewNumber for SimpleVote { +impl + 'static> HasViewNumber + for SimpleVote +{ fn view_number(&self) -> ::View { self.view_number } } -impl Vote for SimpleVote { +impl + 'static> Vote for SimpleVote { type Commitment = DATA; fn signing_key(&self) -> ::SignatureKey { @@ -228,7 +238,7 @@ impl Vote for SimpleVote SimpleVote { +impl + 'static> SimpleVote { /// Creates and signs a simple vote /// # Errors /// If we are unable to sign the data @@ -260,7 +270,7 @@ impl SimpleVote { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// A wrapper for vote data that carries a view number and an `upgrade_lock`, allowing switching the commitment calculation dynamically depending on the version -pub struct VersionedVoteData { +pub struct VersionedVoteData, V: Versions> { /// underlying vote data data: DATA, @@ -274,7 +284,7 @@ pub struct VersionedVoteData { _pd: PhantomData, } -impl VersionedVoteData { +impl, V: Versions> VersionedVoteData { /// Create a new `VersionedVoteData` struct /// /// # Errors @@ -314,7 +324,7 @@ impl VersionedVoteData Committable +impl, V: Versions> Committable for VersionedVoteData { fn commit(&self) -> Commitment { @@ -335,18 +345,14 @@ impl Committable for QuorumData { impl Committable for QuorumData2 { fn commit(&self) -> Commitment { - let QuorumData2 { leaf_commit, epoch } = self; - - if **epoch == 0 { - committable::RawCommitmentBuilder::new("Quorum data") - .var_size_bytes(leaf_commit.as_ref()) - .finalize() - } else { - committable::RawCommitmentBuilder::new("Quorum data") - .var_size_bytes(leaf_commit.as_ref()) - .u64(**epoch) - .finalize() - } + let QuorumData2 { + leaf_commit, + epoch: _, + } = self; + + committable::RawCommitmentBuilder::new("Quorum data") + .var_size_bytes(leaf_commit.as_ref()) + .finalize() } } @@ -360,18 +366,11 @@ impl Committable for TimeoutData { impl Committable for TimeoutData2 { fn commit(&self) -> Commitment { - let TimeoutData2 { view, epoch } = self; - - if **epoch == 0 { - committable::RawCommitmentBuilder::new("Timeout data") - .u64(**view) - .finalize() - } else { - committable::RawCommitmentBuilder::new("Timeout data") - .u64(**view) - .u64(**epoch) - .finalize() - } + let TimeoutData2 { view, epoch: _ } = self; + + committable::RawCommitmentBuilder::new("Timeout data") + .u64(**view) + .finalize() } } @@ -387,18 +386,12 @@ impl Committable for DaData2 { fn commit(&self) -> Commitment { let DaData2 { payload_commit, - epoch, + epoch: _, } = self; - if **epoch == 0 { - committable::RawCommitmentBuilder::new("DA data") - .var_size_bytes(payload_commit.as_ref()) - .finalize() - } else { - committable::RawCommitmentBuilder::new("DA data") - .var_size_bytes(payload_commit.as_ref()) - .u64(**epoch) - .finalize() - } + + committable::RawCommitmentBuilder::new("DA data") + .var_size_bytes(payload_commit.as_ref()) + .finalize() } } @@ -459,18 +452,10 @@ impl Committable for ViewSyncPreCommitData2 { let ViewSyncPreCommitData2 { relay, round, - epoch, + epoch: _, } = self; - if **epoch == 0 { - view_and_relay_commit::(*round, *relay, "View Sync Precommit") - } else { - committable::RawCommitmentBuilder::new("View Sync Precommit") - .u64(*relay) - .u64(**round) - .u64(**epoch) - .finalize() - } + view_and_relay_commit::(*round, *relay, "View Sync Precommit") } } @@ -485,18 +470,10 @@ impl Committable for ViewSyncFinalizeData2 { let ViewSyncFinalizeData2 { relay, round, - epoch, + epoch: _, } = self; - if **epoch == 0 { - view_and_relay_commit::(*round, *relay, "View Sync Finalize") - } else { - committable::RawCommitmentBuilder::new("View Sync Finalize") - .u64(*relay) - .u64(**round) - .u64(**epoch) - .finalize() - } + view_and_relay_commit::(*round, *relay, "View Sync Finalize") } } @@ -511,25 +488,65 @@ impl Committable for ViewSyncCommitData2 { let ViewSyncCommitData2 { relay, round, - epoch, + epoch: _, } = self; - if **epoch == 0 { - view_and_relay_commit::(*round, *relay, "View Sync Commit") - } else { - committable::RawCommitmentBuilder::new("View Sync Commit") - .u64(*relay) - .u64(**round) - .u64(**epoch) - .finalize() - } + view_and_relay_commit::(*round, *relay, "View Sync Commit") } } +/// A trait for types belonging for specific epoch +pub trait HasEpoch { + /// Returns `Epoch` + fn epoch(&self) -> TYPES::Epoch; +} + +/// Helper macro for trivial implementation of the `HasEpoch` trait +#[macro_export] +macro_rules! impl_has_epoch { + ($($t:ty),*) => { + $( + impl HasEpoch for $t { + fn epoch(&self) -> TYPES::Epoch { + self.epoch + } + } + )* + }; +} + +impl_has_epoch!( + QuorumData2, + DaData2, + TimeoutData2, + ViewSyncPreCommitData2, + ViewSyncCommitData2, + ViewSyncFinalizeData2 +); + // impl votable for all the data types in this file sealed marker should ensure nothing is accidentally // implemented for structs that aren't "voteable" -impl Voteable - for V +impl< + TYPES: NodeType, + V: sealed::Sealed + Committable + Clone + Serialize + Debug + PartialEq + Hash + Eq, + > Voteable for V +{ +} + +// impl votable for all the data types in this file sealed marker should ensure nothing is accidently +// implemented for structs that aren't "voteable" +impl< + TYPES: NodeType, + V: sealed::Sealed + + HasEpoch + + Committable + + Clone + + Serialize + + Debug + + PartialEq + + Hash + + Eq, + > Voteable2 for V { } @@ -541,7 +558,7 @@ impl QuorumVote { let signature = self.signature; let data = QuorumData2 { leaf_commit: Commitment::from_raw(bytes), - epoch: TYPES::Epoch::new(0), + epoch: TYPES::Epoch::genesis(), }; let view_number = self.view_number; @@ -607,6 +624,152 @@ impl DaVote2 { } } +impl TimeoutVote { + /// Convert a `TimeoutVote` to a `TimeoutVote2` + pub fn to_vote2(self) -> TimeoutVote2 { + let signature = self.signature; + let data = TimeoutData2 { + view: self.data.view, + epoch: TYPES::Epoch::new(0), + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + +impl TimeoutVote2 { + /// Convert a `QuorumVote2` to a `QuorumVote` + pub fn to_vote(self) -> TimeoutVote { + let signature = self.signature; + let data = TimeoutData { + view: self.data.view, + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + +impl ViewSyncPreCommitVote { + /// Convert a `ViewSyncPreCommitVote` to a `ViewSyncPreCommitVote2` + pub fn to_vote2(self) -> ViewSyncPreCommitVote2 { + let signature = self.signature; + let data = ViewSyncPreCommitData2 { + relay: self.data.relay, + round: self.data.round, + epoch: TYPES::Epoch::new(0), + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + +impl ViewSyncPreCommitVote2 { + /// Convert a `ViewSyncPreCommitVote2` to a `ViewSyncPreCommitVote` + pub fn to_vote(self) -> ViewSyncPreCommitVote { + let signature = self.signature; + let data = ViewSyncPreCommitData { + relay: self.data.relay, + round: self.data.round, + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + +impl ViewSyncCommitVote { + /// Convert a `ViewSyncCommitVote` to a `ViewSyncCommitVote2` + pub fn to_vote2(self) -> ViewSyncCommitVote2 { + let signature = self.signature; + let data = ViewSyncCommitData2 { + relay: self.data.relay, + round: self.data.round, + epoch: TYPES::Epoch::new(0), + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + +impl ViewSyncCommitVote2 { + /// Convert a `ViewSyncCommitVote2` to a `ViewSyncCommitVote` + pub fn to_vote(self) -> ViewSyncCommitVote { + let signature = self.signature; + let data = ViewSyncCommitData { + relay: self.data.relay, + round: self.data.round, + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + +impl ViewSyncFinalizeVote { + /// Convert a `ViewSyncFinalizeVote` to a `ViewSyncFinalizeVote2` + pub fn to_vote2(self) -> ViewSyncFinalizeVote2 { + let signature = self.signature; + let data = ViewSyncFinalizeData2 { + relay: self.data.relay, + round: self.data.round, + epoch: TYPES::Epoch::new(0), + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + +impl ViewSyncFinalizeVote2 { + /// Convert a `ViewSyncFinalizeVote2` to a `ViewSyncFinalizeVote` + pub fn to_vote(self) -> ViewSyncFinalizeVote { + let signature = self.signature; + let data = ViewSyncFinalizeData { + relay: self.data.relay, + round: self.data.round, + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + // Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file /// Quorum vote Alias diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 77a4971e24..f783c95dec 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -212,6 +212,8 @@ pub trait NodeType: type View: ConsensusTime + Display; /// Same as above but for epoch. type Epoch: ConsensusTime + Display; + /// constant for epoch height + const EPOCH_HEIGHT: u64; /// The AuctionSolverResult is a type that holds the data associated with a particular solver /// run, for a particular view. type AuctionResult: Debug diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index a0e226cdbd..7d3aa42241 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -20,6 +20,7 @@ use crate::{ consensus::{CommitmentMap, View}, data::{ DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare, + VidDisperseShare2, }, event::HotShotAction, message::Proposal, @@ -32,6 +33,9 @@ use crate::{ pub trait Storage: Send + Sync + Clone { /// Add a proposal to the stored VID proposals. async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; + /// Add a proposal to the stored VID proposals. + async fn append_vid2(&self, proposal: &Proposal>) + -> Result<()>; /// Add a proposal to the stored DA proposals. async fn append_da( &self, diff --git a/types/src/utils.rs b/types/src/utils.rs index c4a4dfb927..62503f08b5 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -45,6 +45,8 @@ pub enum ViewInner { Da { /// Payload commitment to the available block. payload_commitment: VidCommitment, + /// An epoch to which the data belongs to. Relevant for validating against the correct stake table + epoch: TYPES::Epoch, }, /// Undecided view Leaf { @@ -54,6 +56,8 @@ pub enum ViewInner { state: Arc, /// Optional state delta. delta: Option>::Delta>>, + /// An epoch to which the data belongs to. Relevant for validating against the correct stake table + epoch: TYPES::Epoch, }, /// Leaf has failed Failed, @@ -61,13 +65,23 @@ pub enum ViewInner { impl Clone for ViewInner { fn clone(&self) -> Self { match self { - Self::Da { payload_commitment } => Self::Da { + Self::Da { + payload_commitment, + epoch, + } => Self::Da { payload_commitment: *payload_commitment, + epoch: *epoch, }, - Self::Leaf { leaf, state, delta } => Self::Leaf { + Self::Leaf { + leaf, + state, + delta, + epoch, + } => Self::Leaf { leaf: *leaf, state: Arc::clone(state), delta: delta.clone(), + epoch: *epoch, }, Self::Failed => Self::Failed, } @@ -126,12 +140,23 @@ impl ViewInner { /// return the underlying block paylod commitment if it exists #[must_use] pub fn payload_commitment(&self) -> Option { - if let Self::Da { payload_commitment } = self { + if let Self::Da { + payload_commitment, .. + } = self + { Some(*payload_commitment) } else { None } } + + /// Returns `Epoch` if possible + pub fn epoch(&self) -> Option { + match self { + Self::Da { epoch, .. } | Self::Leaf { epoch, .. } => Some(*epoch), + Self::Failed => None, + } + } } impl Deref for View { @@ -234,3 +259,12 @@ pub fn mnemonic(bytes: H) -> String { bytes.hash(&mut state); mnemonic::to_string(state.finish().to_le_bytes()) } + +/// A helper enum to indicate whether a node is in the epoch transition +/// A node is in epoch transition when its high QC is for the last block in an epoch +pub enum EpochTransitionIndicator { + /// A node is currently in the epoch transition + InTransition, + /// A node is not in the epoch transition + NotInTransition, +} diff --git a/types/src/vid.rs b/types/src/vid.rs index b36eaf2020..a5462a56c1 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -37,7 +37,7 @@ use sha2::Sha256; use crate::{ constants::SRS_DEGREE, - data::{VidDisperse as HotShotVidDisperse, VidDisperseShare}, + data::{VidDisperse as HotShotVidDisperse, VidDisperseShare2}, message::Proposal, }; @@ -114,7 +114,7 @@ pub type VidPrecomputeData = ::PrecomputeData; /// VID proposal type pub type VidProposal = ( Proposal>, - Vec>>, + Vec>>, ); #[cfg(not(feature = "gpu-vid"))] diff --git a/types/src/vote.rs b/types/src/vote.rs index 13112afa12..e70dbe41a3 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -33,7 +33,7 @@ use crate::{ /// A simple vote that has a signer and commitment to the data voted on. pub trait Vote: HasViewNumber { /// Type of data commitment this vote uses. - type Commitment: Voteable; + type Commitment: Voteable; /// Get the signature of the vote sender fn signature(&self) -> ::PureAssembledSignatureType; @@ -59,7 +59,7 @@ The votes all must be over the `Commitment` associated type. */ pub trait Certificate: HasViewNumber { /// The data commitment this certificate certifies. - type Voteable: Voteable; + type Voteable: Voteable; /// Threshold Functions type Threshold: Threshold; From 2e8696570e5cf016ddbb5e416f3753baa8bb57de Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Thu, 12 Dec 2024 13:49:24 -0800 Subject: [PATCH 1332/1393] fixed a couple of typos (#3963) --- examples/infra/mod.rs | 2 +- task-impls/src/network.rs | 2 +- task-impls/src/quorum_vote/handlers.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 2462215dd9..88a2ff900b 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -736,7 +736,7 @@ where .to_string() }; - // Create the qurorum membership from the list of known nodes + // Create the quorum membership from the list of known nodes let all_nodes = config.config.known_nodes_with_stake.clone(); let da_nodes = config.config.known_da_nodes.clone(); let quorum_membership = TYPES::Membership::new(all_nodes, da_nodes); diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 875e8cd72c..2d459013b6 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -55,7 +55,7 @@ pub struct NetworkMessageTaskState { /// This nodes public key pub public_key: TYPES::SignatureKey, - /// Transaction Cache to ignore previously seen transatctions + /// Transaction Cache to ignore previously seen transactions pub transactions_cache: lru::LruCache, } diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index c98f65a63e..98f8c5561d 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -285,7 +285,7 @@ pub(crate) async fn update_shared_state< let justify_qc = &proposed_leaf.justify_qc(); let consensus_reader = consensus.read().await; - // Try to find the validated vview within the validasted state map. This will be present + // Try to find the validated view within the validated state map. This will be present // if we have the saved leaf, but if not we'll get it when we fetch_proposal. let mut maybe_validated_view = parent_view_number.and_then(|view_number| { consensus_reader From a3397b42487f621b1dc091a28bf5883d8fe95394 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Mon, 16 Dec 2024 13:06:38 +0100 Subject: [PATCH 1333/1393] Fix the transmit task cancelling logic (#3960) * Keep the task for view one less than the current. We might still be transmitting * Use saturating_sub consistently * Fix compiler errors and lints --- task-impls/src/network.rs | 6 +++--- task-impls/src/quorum_proposal/mod.rs | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 2d459013b6..0b9b536a15 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -804,13 +804,13 @@ impl< if epoch > self.epoch { self.epoch = epoch; } - self.cancel_tasks(view); + let keep_view = TYPES::View::new(view.saturating_sub(1)); + self.cancel_tasks(keep_view); let net = Arc::clone(&self.network); let epoch = self.epoch.u64(); let mem = self.membership.clone(); spawn(async move { - net.update_view::(view.saturating_sub(1), epoch, &mem) - .await; + net.update_view::(*keep_view, epoch, &mem).await; }); None } diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index ac1a9d7837..9eda9b13b4 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -533,10 +533,12 @@ impl, V: Versions> if epoch > &self.cur_epoch { self.cur_epoch = *epoch; } - self.cancel_tasks(*view); + let keep_view = TYPES::View::new(view.saturating_sub(1)); + self.cancel_tasks(keep_view); } HotShotEvent::Timeout(view, ..) => { - self.cancel_tasks(*view); + let keep_view = TYPES::View::new(view.saturating_sub(1)); + self.cancel_tasks(keep_view); } HotShotEvent::HighQcSend(qc, ..) => { ensure!(qc.view_number() > self.highest_qc.view_number()); From d6d1f3e5d4ae94e2737e10e45ce2c9e32d6fee91 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Wed, 18 Dec 2024 17:16:54 +0100 Subject: [PATCH 1334/1393] Lr/double quorum (#3922) * Initial commit * WIP: adding epoch to proposal and vote data, not compiling yet * Make it compile * Adjust tests * Add a test type for two stake tables for even and odd epochs * Debugging * Fix extended voting * Try "in epoch transition" approach * Continue debugging * Use correct epoch with Membership * Adjust tests and lints * Adapt to variable stake table after merge * Fix accidentally pulled bug in eQC rule * Commit includes epoch for vote and proposal data types * Prune dependencies (#3787) * add new message types and gate outgoing messages * Use the proper message for the proposal response * Modify commit for `Leaf2` and `QuorumData2` * Adjust tests * Clean up debug traces * Initial commit for double quorum * Add TODO * Next epoch nodes vote during epoch transition * Form the second QC at the end of an epoch * Allow early payload save but check that's it's the same * Attach next epoch justify qc to proposals * Validate the next epoch justify qc * Test with more network types * Fix fmt in tests * Use real threshold in the tests based on an epoch * Membership thresholds depend on an epoch * Make sure epoch transition proposals include the next epoch QC * Use epoch from vote and add more tests * Adjust marketplace ver number * Epochs without Marketplace and adjust tests * fix merge * Fixes after merge * Fix vid share handling * Submit transactions to the correct epoch * Address review comments * Use one lock to get two values --------- Co-authored-by: Artemii Gerasimovich Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> --- example-types/src/storage_types.rs | 28 ++++- hotshot/src/lib.rs | 16 ++- hotshot/src/tasks/task_state.rs | 2 + .../src/traits/election/static_committee.rs | 8 +- .../static_committee_leader_two_views.rs | 8 +- task-impls/src/consensus/handlers.rs | 40 +++++-- task-impls/src/consensus/mod.rs | 12 +- task-impls/src/da.rs | 38 +++--- task-impls/src/events.rs | 22 +++- task-impls/src/helpers.rs | 8 +- task-impls/src/network.rs | 46 +++++--- task-impls/src/quorum_proposal/handlers.rs | 85 +++++++++++++- task-impls/src/quorum_proposal/mod.rs | 33 +++++- .../src/quorum_proposal_recv/handlers.rs | 50 ++++++++ task-impls/src/quorum_vote/handlers.rs | 15 ++- task-impls/src/quorum_vote/mod.rs | 3 +- task-impls/src/request.rs | 2 +- task-impls/src/upgrade.rs | 4 +- task-impls/src/vid.rs | 75 +++++++++++- task-impls/src/view_sync.rs | 13 ++- task-impls/src/vote_collection.rs | 110 ++++++++++++++---- testing/src/helpers.rs | 1 + testing/src/overall_safety_task.rs | 37 +++++- testing/src/spinning_task.rs | 6 +- testing/src/test_runner.rs | 12 +- testing/src/view_generator.rs | 2 + testing/tests/tests_1/test_success.rs | 9 +- testing/tests/tests_1/vid_task.rs | 2 + types/src/consensus.rs | 52 ++++++--- types/src/data.rs | 49 ++++++-- types/src/simple_certificate.rs | 23 ++-- types/src/simple_vote.rs | 67 ++++++++++- types/src/traits/storage.rs | 9 +- types/src/utils.rs | 11 ++ types/src/vote.rs | 2 +- 35 files changed, 740 insertions(+), 160 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 4f72a930fb..b5f3610f21 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -20,7 +20,7 @@ use hotshot_types::{ }, event::HotShotAction, message::Proposal, - simple_certificate::{QuorumCertificate2, UpgradeCertificate}, + simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ node_implementation::{ConsensusTime, NodeType}, storage::Storage, @@ -52,6 +52,8 @@ pub struct TestStorageState { proposals2: BTreeMap>>, high_qc: Option>, high_qc2: Option>, + next_epoch_high_qc2: + Option>, action: TYPES::View, epoch: TYPES::Epoch, } @@ -66,6 +68,7 @@ impl Default for TestStorageState { proposals: BTreeMap::new(), proposals2: BTreeMap::new(), high_qc: None, + next_epoch_high_qc2: None, high_qc2: None, action: TYPES::View::genesis(), epoch: TYPES::Epoch::genesis(), @@ -112,6 +115,9 @@ impl TestStorage { pub async fn high_qc_cloned(&self) -> Option> { self.inner.read().await.high_qc2.clone() } + pub async fn next_epoch_high_qc_cloned(&self) -> Option> { + self.inner.read().await.next_epoch_high_qc2.clone() + } pub async fn decided_upgrade_certificate(&self) -> Option> { self.decided_upgrade_certificate.read().await.clone() } @@ -268,6 +274,26 @@ impl Storage for TestStorage { } Ok(()) } + async fn update_next_epoch_high_qc2( + &self, + new_next_epoch_high_qc: hotshot_types::simple_certificate::NextEpochQuorumCertificate2< + TYPES, + >, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to update next epoch high qc to storage"); + } + Self::run_delay_settings_from_config(&self.delay_config).await; + let mut inner = self.inner.write().await; + if let Some(ref current_next_epoch_high_qc) = inner.next_epoch_high_qc2 { + if new_next_epoch_high_qc.view_number() > current_next_epoch_high_qc.view_number() { + inner.next_epoch_high_qc2 = Some(new_next_epoch_high_qc); + } + } else { + inner.next_epoch_high_qc2 = Some(new_next_epoch_high_qc); + } + Ok(()) + } async fn update_undecided_state( &self, _leaves: CommitmentMap>, diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 16fa47f31d..b76b3cc7c9 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -52,7 +52,7 @@ use hotshot_types::{ data::{Leaf2, QuorumProposal, QuorumProposal2}, event::{EventType, LeafInfo}, message::{convert_proposal, DataMessage, Message, MessageKind, Proposal}, - simple_certificate::{QuorumCertificate2, UpgradeCertificate}, + simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -344,6 +344,7 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext = @@ -518,7 +523,7 @@ impl, V: Versions> SystemContext { /// than `inner`s view number for the non genesis case because we must have seen higher QCs /// to decide on the leaf. high_qc: QuorumCertificate2, + /// Next epoch highest QC that was seen. This is needed to propose during epoch transition after restart. + next_epoch_high_qc: Option>, /// Previously decided upgrade certificate; this is necessary if an upgrade has happened and we are not restarting with the new version decided_upgrade_certificate: Option>, /// Undecided leaves that were seen, but not yet decided on. These allow a restarting node @@ -1030,6 +1037,7 @@ impl HotShotInitializer { actioned_view: TYPES::View::new(0), saved_proposals: BTreeMap::new(), high_qc, + next_epoch_high_qc: None, decided_upgrade_certificate: None, undecided_leaves: Vec::new(), undecided_state: BTreeMap::new(), @@ -1054,6 +1062,7 @@ impl HotShotInitializer { actioned_view: TYPES::View, saved_proposals: BTreeMap>>, high_qc: QuorumCertificate2, + next_epoch_high_qc: Option>, decided_upgrade_certificate: Option>, undecided_leaves: Vec>, undecided_state: BTreeMap>, @@ -1068,6 +1077,7 @@ impl HotShotInitializer { actioned_view, saved_proposals, high_qc, + next_epoch_high_qc, decided_upgrade_certificate, undecided_leaves, undecided_state, diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 2c8d8607a4..3213ce2ce2 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -132,6 +132,7 @@ impl, V: Versions> CreateTaskState public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, + epoch_height: handle.epoch_height, } } } @@ -318,6 +319,7 @@ impl, V: Versions> CreateTaskState network: Arc::clone(&handle.hotshot.network), membership: (*handle.hotshot.memberships).clone().into(), vote_collectors: BTreeMap::default(), + next_epoch_vote_collectors: BTreeMap::default(), timeout_vote_collectors: BTreeMap::default(), cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index d2b62f80b7..b6010c174d 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -215,22 +215,22 @@ impl Membership for StaticCommittee { } /// Get the voting success threshold for the committee - fn success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + fn success_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + fn da_success_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + fn failure_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + fn upgrade_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { let len = self.stake_table.len(); NonZeroU64::new(max((len as u64 * 9) / 10, ((len as u64 * 2) / 3) + 1)).unwrap() } diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 8833d06872..d2635cc273 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -217,22 +217,22 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch) -> NonZeroU64 { + fn success_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + fn da_success_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + fn failure_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + fn upgrade_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 9) / 10) + 1).unwrap() } } diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index fbd1960ce4..3a9f44c34f 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -10,12 +10,13 @@ use async_broadcast::Sender; use chrono::Utc; use hotshot_types::{ event::{Event, EventType}, - simple_vote::{QuorumVote2, TimeoutData2, TimeoutVote2}, + simple_vote::{HasEpoch, QuorumVote2, TimeoutData2, TimeoutVote2}, traits::{ election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, - vote::HasViewNumber, + utils::EpochTransitionIndicator, + vote::{HasViewNumber, Vote}, }; use tokio::{spawn, time::sleep}; use tracing::instrument; @@ -46,7 +47,7 @@ pub(crate) async fn handle_quorum_vote_recv< .is_high_qc_for_last_block(); let we_are_leader = task_state .membership - .leader(vote.view_number() + 1, task_state.cur_epoch)? + .leader(vote.view_number() + 1, vote.data.epoch)? == task_state.public_key; ensure!( in_transition || we_are_leader, @@ -56,20 +57,45 @@ pub(crate) async fn handle_quorum_vote_recv< ) ); + let transition_indicator = if in_transition { + EpochTransitionIndicator::InTransition + } else { + EpochTransitionIndicator::NotInTransition + }; handle_vote( &mut task_state.vote_collectors, vote, task_state.public_key.clone(), &task_state.membership, - task_state.cur_epoch, + vote.data.epoch, task_state.id, &event, sender, &task_state.upgrade_lock, - !in_transition, + transition_indicator.clone(), ) .await?; + // If the vote sender belongs to the next epoch, collect it separately to form the second QC + if task_state + .membership + .has_stake(&vote.signing_key(), vote.epoch() + 1) + { + handle_vote( + &mut task_state.next_epoch_vote_collectors, + &vote.clone().into(), + task_state.public_key.clone(), + &task_state.membership, + vote.data.epoch, + task_state.id, + &event, + sender, + &task_state.upgrade_lock, + transition_indicator, + ) + .await?; + } + Ok(()) } @@ -101,12 +127,12 @@ pub(crate) async fn handle_timeout_vote_recv< vote, task_state.public_key.clone(), &task_state.membership, - task_state.cur_epoch, + vote.data.epoch, task_state.id, &event, sender, &task_state.upgrade_lock, - true, + EpochTransitionIndicator::NotInTransition, ) .await?; diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index 7c50cd8df6..e6897a2fe5 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -14,8 +14,8 @@ use hotshot_types::{ consensus::OuterConsensus, event::Event, message::UpgradeLock, - simple_certificate::{QuorumCertificate2, TimeoutCertificate2}, - simple_vote::{QuorumVote2, TimeoutVote2}, + simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, TimeoutCertificate2}, + simple_vote::{NextEpochQuorumVote2, QuorumVote2, TimeoutVote2}, traits::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, @@ -55,6 +55,14 @@ pub struct ConsensusTaskState, V: /// A map of `QuorumVote` collector tasks. pub vote_collectors: VoteCollectorsMap, QuorumCertificate2, V>, + /// A map of `QuorumVote` collector tasks. They collect votes from the nodes in the next epoch. + pub next_epoch_vote_collectors: VoteCollectorsMap< + TYPES, + NextEpochQuorumVote2, + NextEpochQuorumCertificate2, + V, + >, + /// A map of `TimeoutVote` collector tasks. pub timeout_vote_collectors: VoteCollectorsMap, TimeoutCertificate2, V>, diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index fb2775d378..b292814e24 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -25,6 +25,7 @@ use hotshot_types::{ signature_key::SignatureKey, storage::Storage, }, + utils::EpochTransitionIndicator, vote::HasViewNumber, }; use sha2::{Digest, Sha256}; @@ -106,21 +107,15 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState Is it always the case that this is cur_view + 1? view_number, - epoch: *epoch_number, + epoch, }; let message = Proposal { @@ -348,6 +343,15 @@ impl, V: Versions> DaTaskState {} } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 06d3ff5db6..195b2137cd 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -17,8 +17,8 @@ use hotshot_types::{ message::Proposal, request_response::ProposalRequestPayload, simple_certificate::{ - DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, - TimeoutCertificate2, UpgradeCertificate, ViewSyncCommitCertificate2, + DaCertificate2, NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, + TimeoutCertificate, TimeoutCertificate2, UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ @@ -124,6 +124,8 @@ pub enum HotShotEvent { QcFormed(Either, TimeoutCertificate>), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only Qc2Formed(Either, TimeoutCertificate2>), + /// The next leader has collected enough votes from the next epoch nodes to form a QC; emitted by the next leader in the consensus task; an internal event only + NextEpochQc2Formed(Either, TimeoutCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DacSend(DaCertificate2, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks @@ -283,6 +285,10 @@ impl HotShotEvent { either::Left(qc) => Some(qc.view_number()), either::Right(tc) => Some(tc.view_number()), }, + HotShotEvent::NextEpochQc2Formed(cert) => match cert { + either::Left(qc) => Some(qc.view_number()), + either::Right(tc) => Some(tc.view_number()), + }, HotShotEvent::ViewSyncCommitVoteSend(vote) | HotShotEvent::ViewSyncCommitVoteRecv(vote) => Some(vote.view_number()), HotShotEvent::ViewSyncPreCommitVoteRecv(vote) @@ -406,8 +412,16 @@ impl Display for HotShotEvent { either::Right(tc) => write!(f, "QcFormed(view_number={:?})", tc.view_number()), }, HotShotEvent::Qc2Formed(cert) => match cert { - either::Left(qc) => write!(f, "QcFormed(view_number={:?})", qc.view_number()), - either::Right(tc) => write!(f, "QcFormed(view_number={:?})", tc.view_number()), + either::Left(qc) => write!(f, "QcFormed2(view_number={:?})", qc.view_number()), + either::Right(tc) => write!(f, "QcFormed2(view_number={:?})", tc.view_number()), + }, + HotShotEvent::NextEpochQc2Formed(cert) => match cert { + either::Left(qc) => { + write!(f, "NextEpochQc2Formed(view_number={:?})", qc.view_number()) + } + either::Right(tc) => { + write!(f, "NextEpochQc2Formed(view_number={:?})", tc.view_number()) + } }, HotShotEvent::DacSend(cert, _) => { write!(f, "DacSend(view_number={:?})", cert.view_number()) diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 6765c76022..898ee8e1ee 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -28,7 +28,7 @@ use hotshot_types::{ signature_key::SignatureKey, BlockPayload, ValidatedState, }, - utils::{epoch_from_block_number, Terminator, View, ViewInner}, + utils::{epoch_from_block_number, is_last_block_in_epoch, Terminator, View, ViewInner}, vote::{Certificate, HasViewNumber}, }; use tokio::time::timeout; @@ -573,6 +573,12 @@ pub async fn validate_proposal_safety_and_liveness< } ); + // Make sure that the epoch transition proposal includes the next epoch QC + if is_last_block_in_epoch(parent_leaf.height(), validation_info.epoch_height) { + ensure!(proposal.data.next_epoch_justify_qc.is_some(), + "Epoch transition proposal does not include the next epoch justify QC. Do not vote!"); + } + // Liveness check. let liveness_check = justify_qc.view_number() > consensus_reader.locked_view(); diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 0b9b536a15..a72344dcc6 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -10,18 +10,24 @@ use std::{ sync::Arc, }; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; +use hotshot_types::data::{VidDisperseShare, VidDisperseShare2}; use hotshot_types::{ consensus::OuterConsensus, - data::{VidDisperse, VidDisperseShare}, + data::VidDisperse, event::{Event, EventType, HotShotAction}, message::{ convert_proposal, DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, SequencingMessage, UpgradeLock, }, + simple_vote::HasEpoch, traits::{ election::Membership, network::{ @@ -38,11 +44,6 @@ use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, -}; - /// the network message task state #[derive(Clone)] pub struct NetworkMessageTaskState { @@ -321,16 +322,35 @@ impl< sender: &::SignatureKey, ) -> Option { let view = vid_proposal.data.view_number; - let vid_share_proposals = VidDisperseShare::to_vid_share_proposals(vid_proposal); + let vid_share_proposals = VidDisperseShare2::to_vid_share_proposals(vid_proposal); let mut messages = HashMap::new(); for proposal in vid_share_proposals { let recipient = proposal.data.recipient_key.clone(); - let message = Message { - sender: sender.clone(), - kind: MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::VidDisperseMsg(proposal), - )), + let message = if self + .upgrade_lock + .version_infallible(proposal.data.view_number()) + .await + >= V::Epochs::VERSION + { + Message { + sender: sender.clone(), + kind: MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::VidDisperseMsg2(proposal), + )), + } + } else { + let vid_share_proposal = Proposal { + data: VidDisperseShare::from(proposal.data), + signature: proposal.signature, + _pd: proposal._pd, + }; + Message { + sender: sender.clone(), + kind: MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::VidDisperseMsg(vid_share_proposal), + )), + } }; let serialized_message = match self.upgrade_lock.serialize(&message).await { Ok(serialized) => serialized, @@ -449,7 +469,7 @@ impl< HotShotEvent::QuorumVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; - let leader = match self.membership.leader(view_number, self.epoch) { + let leader = match self.membership.leader(view_number, vote.epoch()) { Ok(l) => l, Err(e) => { tracing::warn!( diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 7c29ffe426..7dbedb9107 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -17,13 +17,17 @@ use anyhow::{ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use committable::Committable; -use hotshot_task::dependency_task::HandleDepOutput; +use either::Either; +use hotshot_task::{ + dependency::{Dependency, EventDependency}, + dependency_task::HandleDepOutput, +}; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, data::{Leaf2, QuorumProposal2, VidDisperse, ViewChangeEvidence}, drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, message::Proposal, - simple_certificate::{QuorumCertificate2, UpgradeCertificate}, + simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ block_contents::BlockHeader, election::Membership, @@ -130,8 +134,6 @@ impl ProposalDependencyHandle { if let HotShotEvent::HighQcRecv(qc, _sender) = event.as_ref() { if qc .is_valid_cert( - // TODO take epoch from `qc` - // https://github.com/EspressoSystems/HotShot/issues/3917 self.quorum_membership.stake_table(qc.data.epoch), self.quorum_membership.success_threshold(qc.data.epoch), &self.upgrade_lock, @@ -144,7 +146,7 @@ impl ProposalDependencyHandle { } None } - /// Waits for the ocnfigured timeout for nodes to send HighQc messages to us. We'll + /// Waits for the configured timeout for nodes to send HighQc messages to us. We'll /// then propose with the highest QC from among these proposals. async fn wait_for_highest_qc(&mut self) { tracing::error!("waiting for QC"); @@ -187,6 +189,68 @@ impl ProposalDependencyHandle { } } } + /// Gets the next epoch QC corresponding to this epoch QC, times out if it takes too long. + /// We need the QC for the epoch transition proposals. + async fn get_next_epoch_qc( + &self, + high_qc: &QuorumCertificate2, + ) -> Option> { + tracing::debug!("getting the next epoch QC"); + // If we haven't upgraded to Epochs just return None right away + if self.upgrade_lock.version_infallible(self.view_number).await < V::Epochs::VERSION { + return None; + } + if let Some(next_epoch_qc) = self.consensus.read().await.next_epoch_high_qc() { + if next_epoch_qc.data.leaf_commit == high_qc.data.leaf_commit { + // We have it already, no reason to wait + return Some(next_epoch_qc.clone()); + } + }; + + let wait_duration = Duration::from_millis(self.timeout / 2); + + // TODO configure timeout + let Some(time_spent) = Instant::now().checked_duration_since(self.view_start_time) else { + // Shouldn't be possible, now must be after the start + return None; + }; + let Some(time_left) = wait_duration.checked_sub(time_spent) else { + // No time left + return None; + }; + let receiver = self.receiver.clone(); + let Ok(Some(event)) = tokio::time::timeout(time_left, async move { + let this_epoch_high_qc = high_qc.clone(); + EventDependency::new( + receiver, + Box::new(move |event| { + let event = event.as_ref(); + if let HotShotEvent::NextEpochQc2Formed(Either::Left(qc)) = event { + qc.data.leaf_commit == this_epoch_high_qc.data.leaf_commit + } else { + false + } + }), + ) + .completed() + .await + }) + .await + else { + // Check again, there is a chance we missed it + if let Some(next_epoch_qc) = self.consensus.read().await.next_epoch_high_qc() { + if next_epoch_qc.data.leaf_commit == high_qc.data.leaf_commit { + return Some(next_epoch_qc.clone()); + } + }; + return None; + }; + let HotShotEvent::NextEpochQc2Formed(Either::Left(qc)) = event.as_ref() else { + // this shouldn't happen + return None; + }; + Some(qc.clone()) + } /// Publishes a proposal given the [`CommitmentAndMetadata`], [`VidDisperse`] /// and high qc [`hotshot_types::simple_certificate::QuorumCertificate`], /// with optional [`ViewChangeEvidence`]. @@ -312,10 +376,21 @@ impl ProposalDependencyHandle { ); return Ok(()); } + let next_epoch_qc = if self + .consensus + .read() + .await + .is_leaf_for_last_block(parent_qc.data.leaf_commit) + { + self.get_next_epoch_qc(&parent_qc).await + } else { + None + }; let proposal = QuorumProposal2 { block_header, view_number: self.view_number, justify_qc: parent_qc, + next_epoch_justify_qc: next_epoch_qc, upgrade_certificate, view_change_evidence: proposal_certificate, drb_seed: INITIAL_DRB_SEED_INPUT, diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 9eda9b13b4..77bfef856c 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -462,12 +462,7 @@ impl, V: Versions> )?; } HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { - // MERGE TODO - // - // HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { - // let cert_epoch_number = certificate.data.epoch; - // - let epoch_number = self.consensus.read().await.cur_epoch(); + let epoch_number = certificate.data.epoch; ensure!( certificate @@ -554,6 +549,32 @@ impl, V: Versions> ); self.highest_qc = qc.clone(); } + HotShotEvent::NextEpochQc2Formed(Either::Left(next_epoch_qc)) => { + // Only update if the qc is from a newer view + let current_next_epoch_qc = + self.consensus.read().await.next_epoch_high_qc().cloned(); + ensure!(current_next_epoch_qc.is_none() || + next_epoch_qc.view_number > current_next_epoch_qc.unwrap().view_number, + debug!("Received a next epoch QC for a view that was not > than our current next epoch high QC") + ); + self.consensus + .write() + .await + .update_next_epoch_high_qc(next_epoch_qc.clone()) + .wrap() + .context(error!( + "Failed to update next epoch high QC in internal consensus state!" + ))?; + + // Then update the next epoch high QC in storage + self.storage + .write() + .await + .update_next_epoch_high_qc2(next_epoch_qc.clone()) + .await + .wrap() + .context(error!("Failed to update next epoch high QC in storage!"))?; + } _ => {} } Ok(()) diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 2f5df35a65..d6a768c2cd 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -152,7 +152,10 @@ pub(crate) async fn handle_quorum_proposal_recv< .context(warn!("Failed to validate proposal view or attached certs"))?; let view_number = proposal.data.view_number(); + let justify_qc = proposal.data.justify_qc.clone(); + let maybe_next_epoch_justify_qc = proposal.data.next_epoch_justify_qc.clone(); + let proposal_block_number = proposal.data.block_header.block_number(); let proposal_epoch = TYPES::Epoch::new(epoch_from_block_number( proposal_block_number, @@ -176,6 +179,34 @@ pub(crate) async fn handle_quorum_proposal_recv< bail!("Invalid justify_qc in proposal for view {}", *view_number); } + if let Some(ref next_epoch_justify_qc) = maybe_next_epoch_justify_qc { + // If the next epoch justify qc exists, make sure it's equal to the justify qc + if justify_qc.view_number() != next_epoch_justify_qc.view_number() + || justify_qc.data.epoch != next_epoch_justify_qc.data.epoch + || justify_qc.data.leaf_commit != next_epoch_justify_qc.data.leaf_commit + { + bail!("Next epoch justify qc exists but it's not equal with justify qc."); + } + // Validate the next epoch justify qc as well + if !next_epoch_justify_qc + .is_valid_cert( + validation_info + .quorum_membership + .stake_table(justify_qc.data.epoch + 1), + validation_info + .quorum_membership + .success_threshold(justify_qc.data.epoch + 1), + &validation_info.upgrade_lock, + ) + .await + { + bail!( + "Invalid next_epoch_justify_qc in proposal for view {}", + *view_number + ); + } + } + broadcast_event( Arc::new(HotShotEvent::QuorumProposalPreliminarilyValidated( proposal.clone(), @@ -232,6 +263,20 @@ pub(crate) async fn handle_quorum_proposal_recv< { bail!("Failed to store High QC, not voting; error = {:?}", e); } + if let Some(ref next_epoch_justify_qc) = maybe_next_epoch_justify_qc { + if let Err(e) = validation_info + .storage + .write() + .await + .update_next_epoch_high_qc2(next_epoch_justify_qc.clone()) + .await + { + bail!( + "Failed to store next epoch High QC, not voting; error = {:?}", + e + ); + } + } } drop(consensus_reader); @@ -239,6 +284,11 @@ pub(crate) async fn handle_quorum_proposal_recv< if let Err(e) = consensus_writer.update_high_qc(justify_qc.clone()) { tracing::trace!("{e:?}"); } + if let Some(ref next_epoch_justify_qc) = maybe_next_epoch_justify_qc { + if let Err(e) = consensus_writer.update_next_epoch_high_qc(next_epoch_justify_qc.clone()) { + tracing::trace!("{e:?}"); + } + } drop(consensus_writer); let Some((parent_leaf, _parent_state)) = parent else { diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 98f8c5561d..577c5325e6 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -24,7 +24,7 @@ use hotshot_types::{ storage::Storage, ValidatedState, }, - utils::epoch_from_block_number, + utils::{epoch_from_block_number, is_last_block_in_epoch}, vote::HasViewNumber, }; use tracing::instrument; @@ -53,7 +53,7 @@ async fn handle_quorum_proposal_validated_drb_calculation_start< ) { let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( proposal.block_header.block_number(), - task_state.epoch_height, + TYPES::EPOCH_HEIGHT, )); // Start the new task if we're in the committee for this epoch @@ -401,7 +401,6 @@ pub(crate) async fn submit_vote, V private_key: ::PrivateKey, upgrade_lock: UpgradeLock, view_number: TYPES::View, - epoch_height: u64, storage: Arc>, leaf: Leaf2, vid_share: Proposal>, @@ -409,11 +408,17 @@ pub(crate) async fn submit_vote, V ) -> Result<()> { let epoch_number = TYPES::Epoch::new(epoch_from_block_number( leaf.block_header().block_number(), - epoch_height, + TYPES::EPOCH_HEIGHT, )); + let committee_member_in_current_epoch = quorum_membership.has_stake(&public_key, epoch_number); + // If the proposed leaf is for the last block in the epoch and the node is part of the quorum committee + // in the next epoch, the node should vote to achieve the double quorum. + let committee_member_in_next_epoch = is_last_block_in_epoch(leaf.height(), TYPES::EPOCH_HEIGHT) + && quorum_membership.has_stake(&public_key, epoch_number + 1); + ensure!( - quorum_membership.has_stake(&public_key, epoch_number), + committee_member_in_current_epoch || committee_member_in_next_epoch, info!( "We were not chosen for quorum committee on {:?}", view_number diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 04b8a677dc..a009e6aee6 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -238,7 +238,6 @@ impl + 'static, V: Versions> Handl self.private_key.clone(), self.upgrade_lock.clone(), self.view_number, - self.epoch_height, Arc::clone(&self.storage), leaf, vid_share, @@ -504,6 +503,7 @@ impl, V: Versions> QuorumVoteTaskS ); let cert_epoch = cert.data.epoch; + // Validate the DAC. ensure!( cert.is_valid_cert( @@ -721,7 +721,6 @@ impl, V: Versions> QuorumVoteTaskS self.private_key.clone(), self.upgrade_lock.clone(), proposal.data.view_number(), - self.epoch_height, Arc::clone(&self.storage), proposed_leaf, updated_vid, diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 56e16c1a37..3951102c24 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -332,7 +332,7 @@ impl> NetworkRequestState, sender: &Sender>>, diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index a8dec2e3f3..16f8c7e555 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -25,6 +25,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, + utils::EpochTransitionIndicator, vote::HasViewNumber, }; use tracing::instrument; @@ -243,7 +244,7 @@ impl UpgradeTaskState { &event, &tx, &self.upgrade_lock, - true, + EpochTransitionIndicator::NotInTransition, ) .await?; } @@ -287,6 +288,7 @@ impl UpgradeTaskState { let upgrade_proposal = UpgradeProposal { upgrade_proposal: upgrade_proposal_data.clone(), view_number: TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), + epoch: self.cur_epoch, }; let signature = TYPES::SignatureKey::sign( diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 07dd5d1b72..b27038fabf 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -14,11 +14,13 @@ use hotshot_types::{ data::{PackedBundle, VidDisperse, VidDisperseShare2}, message::Proposal, traits::{ + block_contents::BlockHeader, election::Membership, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, BlockPayload, }, + utils::epoch_from_block_number, }; use tracing::{debug, error, info, instrument}; use utils::anytrace::Result; @@ -53,6 +55,9 @@ pub struct VidTaskState> { /// This state's ID pub id: u64, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl> VidTaskState { @@ -89,6 +94,7 @@ impl> VidTaskState { &Arc::clone(&self.membership), *view_number, epoch, + None, vid_precompute.clone(), ) .await; @@ -124,7 +130,10 @@ impl> VidTaskState { error!("VID: failed to sign dispersal payload"); return None; }; - debug!("publishing VID disperse for view {}", *view_number); + debug!( + "publishing VID disperse for view {} and epoch {}", + *view_number, *epoch + ); broadcast_event( Arc::new(HotShotEvent::VidDisperseSend( Proposal { @@ -157,6 +166,68 @@ impl> VidTaskState { return None; } + HotShotEvent::QuorumProposalSend(proposal, _) => { + let proposed_block_number = proposal.data.block_header.block_number(); + if self.epoch_height == 0 || proposed_block_number % self.epoch_height != 0 { + // This is not the last block in the epoch, do nothing. + return None; + } + // We just sent a proposal for the last block in the epoch. We need to calculate + // and send VID for the nodes in the next epoch so that they can vote. + let proposal_view_number = proposal.data.view_number; + let sender_epoch = TYPES::Epoch::new(epoch_from_block_number( + proposed_block_number, + self.epoch_height, + )); + let target_epoch = TYPES::Epoch::new( + epoch_from_block_number(proposed_block_number, self.epoch_height) + 1, + ); + + let consensus_reader = self.consensus.read().await; + let Some(txns) = consensus_reader.saved_payloads().get(&proposal_view_number) + else { + tracing::warn!( + "We need to calculate VID for the nodes in the next epoch \ + but we don't have the transactions" + ); + return None; + }; + let txns = Arc::clone(txns); + drop(consensus_reader); + + let next_epoch_vid_disperse = VidDisperse::calculate_vid_disperse( + txns, + &Arc::clone(&self.membership), + proposal_view_number, + target_epoch, + Some(sender_epoch), + None, + ) + .await; + let Ok(next_epoch_signature) = TYPES::SignatureKey::sign( + &self.private_key, + next_epoch_vid_disperse.payload_commitment.as_ref(), + ) else { + error!("VID: failed to sign dispersal payload for the next epoch"); + return None; + }; + debug!( + "publishing VID disperse for view {} and epoch {}", + *proposal_view_number, *target_epoch + ); + broadcast_event( + Arc::new(HotShotEvent::VidDisperseSend( + Proposal { + signature: next_epoch_signature, + data: next_epoch_vid_disperse.clone(), + _pd: PhantomData, + }, + self.public_key.clone(), + )), + &event_stream, + ) + .await; + } HotShotEvent::Shutdown => { return Some(HotShotTaskCompleted); } diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 98b9fd1876..65e1ad7d6b 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -29,6 +29,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, + utils::EpochTransitionIndicator, vote::{Certificate, HasViewNumber, Vote}, }; use tokio::{spawn, task::JoinHandle, time::sleep}; @@ -318,15 +319,15 @@ impl ViewSyncTaskState { public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, - epoch: self.cur_epoch, id: self.id, + epoch: vote.data.epoch, }; let vote_collector = create_vote_accumulator( &info, event, &event_stream, self.upgrade_lock.clone(), - true, + EpochTransitionIndicator::NotInTransition, ) .await?; @@ -363,8 +364,8 @@ impl ViewSyncTaskState { public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, - epoch: self.cur_epoch, id: self.id, + epoch: vote.data.epoch, }; let vote_collector = create_vote_accumulator( @@ -372,7 +373,7 @@ impl ViewSyncTaskState { event, &event_stream, self.upgrade_lock.clone(), - true, + EpochTransitionIndicator::NotInTransition, ) .await?; relay_map.insert(relay, vote_collector); @@ -408,15 +409,15 @@ impl ViewSyncTaskState { public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, - epoch: self.cur_epoch, id: self.id, + epoch: vote.data.epoch, }; let vote_collector = create_vote_accumulator( &info, event, &event_stream, self.upgrade_lock.clone(), - true, + EpochTransitionIndicator::NotInTransition, ) .await; if let Ok(vote_task) = vote_collector { diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 182a40a8c3..cc2ec6c7c9 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -17,18 +17,19 @@ use either::Either::{self, Left, Right}; use hotshot_types::{ message::UpgradeLock, simple_certificate::{ - DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate2, - UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, - ViewSyncPreCommitCertificate2, + DaCertificate2, NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, + TimeoutCertificate2, UpgradeCertificate, ViewSyncCommitCertificate2, + ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DaVote2, QuorumVote, QuorumVote2, TimeoutVote2, UpgradeVote, ViewSyncCommitVote2, - ViewSyncFinalizeVote2, ViewSyncPreCommitVote2, + DaVote2, NextEpochQuorumVote2, QuorumVote, QuorumVote2, TimeoutVote2, UpgradeVote, + ViewSyncCommitVote2, ViewSyncFinalizeVote2, ViewSyncPreCommitVote2, }, traits::{ election::Membership, node_implementation::{NodeType, Versions}, }, + utils::EpochTransitionIndicator, vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; use utils::anytrace::*; @@ -65,7 +66,7 @@ pub struct VoteCollectionTaskState< pub id: u64, /// Whether we should check if we are the leader when handling a vote - pub check_if_leader: bool, + pub transition_indicator: EpochTransitionIndicator, } /// Describes the functions a vote must implement for it to be aggregatable by the generic vote collection task @@ -105,14 +106,17 @@ impl< pub async fn accumulate_vote( &mut self, vote: &VOTE, + sender_epoch: TYPES::Epoch, event_stream: &Sender>>, ) -> Result> { - if self.check_if_leader { - ensure!( - vote.leader(&self.membership, self.epoch)? == self.public_key, - info!("Received vote for a view in which we were not the leader.") - ); - } + ensure!( + matches!( + self.transition_indicator, + EpochTransitionIndicator::InTransition + ) || vote.leader(&self.membership, self.epoch)? == self.public_key, + info!("Received vote for a view in which we were not the leader.") + ); + ensure!( vote.view_number() == self.view, error!( @@ -127,7 +131,7 @@ impl< ))?; match accumulator - .accumulate(vote, &self.membership, self.epoch) + .accumulate(vote, &self.membership, sender_epoch) .await { Either::Left(()) => Ok(None), @@ -195,7 +199,7 @@ pub async fn create_vote_accumulator( event: Arc>, sender: &Sender>>, upgrade_lock: UpgradeLock, - check_if_leader: bool, + transition_indicator: EpochTransitionIndicator, ) -> Result> where TYPES: NodeType, @@ -226,7 +230,7 @@ where view: info.view, epoch: info.epoch, id: info.id, - check_if_leader, + transition_indicator, }; state.handle_vote_event(Arc::clone(&event), sender).await?; @@ -258,7 +262,7 @@ pub async fn handle_vote< event: &Arc>, event_stream: &Sender>>, upgrade_lock: &UpgradeLock, - check_if_leader: bool, + transition_indicator: EpochTransitionIndicator, ) -> Result<()> where VoteCollectionTaskState: HandleVoteEvent, @@ -278,7 +282,7 @@ where Arc::clone(event), event_stream, upgrade_lock.clone(), - check_if_leader, + transition_indicator, ) .await?; @@ -306,6 +310,13 @@ where /// Alias for Quorum vote accumulator type QuorumVoteState = VoteCollectionTaskState, QuorumCertificate2, V>; +/// Alias for Quorum vote accumulator +type NextEpochQuorumVoteState = VoteCollectionTaskState< + TYPES, + NextEpochQuorumVote2, + NextEpochQuorumCertificate2, + V, +>; /// Alias for DA vote accumulator type DaVoteState = VoteCollectionTaskState, DaCertificate2, V>; @@ -373,6 +384,25 @@ impl AggregatableVote, QuorumCertific } } +impl + AggregatableVote, NextEpochQuorumCertificate2> + for NextEpochQuorumVote2 +{ + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { + membership.leader(self.view_number() + 1, epoch) + } + fn make_cert_event( + certificate: NextEpochQuorumCertificate2, + _key: &TYPES::SignatureKey, + ) -> HotShotEvent { + HotShotEvent::NextEpochQc2Formed(Left(certificate)) + } +} + impl AggregatableVote, UpgradeCertificate> for UpgradeVote { @@ -496,7 +526,33 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(vote, sender).await, + HotShotEvent::QuorumVoteRecv(vote) => { + self.accumulate_vote(vote, self.epoch, sender).await + } + _ => Ok(None), + } + } + fn filter(event: Arc>) -> bool { + matches!(event.as_ref(), HotShotEvent::QuorumVoteRecv(_)) + } +} + +// Handlers for all vote accumulators +#[async_trait] +impl + HandleVoteEvent, NextEpochQuorumCertificate2> + for NextEpochQuorumVoteState +{ + async fn handle_vote_event( + &mut self, + event: Arc>, + sender: &Sender>>, + ) -> Result>> { + match event.as_ref() { + HotShotEvent::QuorumVoteRecv(vote) => { + self.accumulate_vote(&vote.clone().into(), self.epoch + 1, sender) + .await + } _ => Ok(None), } } @@ -517,7 +573,9 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::UpgradeVoteRecv(vote) => self.accumulate_vote(vote, sender).await, + HotShotEvent::UpgradeVoteRecv(vote) => { + self.accumulate_vote(vote, self.epoch, sender).await + } _ => Ok(None), } } @@ -536,7 +594,7 @@ impl HandleVoteEvent, DaCert sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::DaVoteRecv(vote) => self.accumulate_vote(vote, sender).await, + HotShotEvent::DaVoteRecv(vote) => self.accumulate_vote(vote, self.epoch, sender).await, _ => Ok(None), } } @@ -556,7 +614,9 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(vote, sender).await, + HotShotEvent::TimeoutVoteRecv(vote) => { + self.accumulate_vote(vote, self.epoch, sender).await + } _ => Ok(None), } } @@ -577,7 +637,7 @@ impl ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { - self.accumulate_vote(vote, sender).await + self.accumulate_vote(vote, self.epoch, sender).await } _ => Ok(None), } @@ -598,7 +658,9 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(vote, sender).await, + HotShotEvent::ViewSyncCommitVoteRecv(vote) => { + self.accumulate_vote(vote, self.epoch, sender).await + } _ => Ok(None), } } @@ -619,7 +681,7 @@ impl ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { - self.accumulate_vote(vote, sender).await + self.accumulate_vote(vote, self.epoch, sender).await } _ => Ok(None), } diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 2fa1fd6eec..49aca83652 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -332,6 +332,7 @@ pub fn build_vid_proposal( vid.disperse(&encoded_transactions).unwrap(), quorum_membership, epoch_number, + None, ); let signature = diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index c82315d18d..042bc7f9b7 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -19,7 +19,10 @@ use hotshot_types::{ error::RoundTimedoutState, event::{Event, EventType, LeafChain}, simple_certificate::QuorumCertificate2, - traits::node_implementation::{ConsensusTime, NodeType, Versions}, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, NodeType, Versions}, + }, vid::VidCommitment, }; use thiserror::Error; @@ -138,7 +141,6 @@ impl, V: Versions> TestTas check_block, num_failed_views, num_successful_views, - threshold_calculator, transaction_threshold, .. }: OverallSafetyPropertiesDescription = self.properties.clone(); @@ -183,10 +185,34 @@ impl, V: Versions> TestTas _ => return Ok(()), }; - let len = self.handles.read().await.len(); + if let Some(ref key) = key { + if *key.epoch() > self.ctx.latest_epoch { + self.ctx.latest_epoch = *key.epoch(); + } + } + + let epoch = TYPES::Epoch::new(self.ctx.latest_epoch); + let len = self + .handles + .read() + .await + .first() + .unwrap() + .handle + .memberships + .total_nodes(epoch); // update view count - let threshold = (threshold_calculator)(len, len); + let threshold = self + .handles + .read() + .await + .first() + .unwrap() + .handle + .memberships + .success_threshold(epoch) + .get() as usize; let view = self.ctx.round_results.get_mut(&view_number).unwrap(); if let Some(key) = key { @@ -352,6 +378,7 @@ impl Default for RoundCtx { round_results: HashMap::default(), failed_views: HashSet::default(), successful_views: HashSet::default(), + latest_epoch: 0u64, } } } @@ -369,6 +396,8 @@ pub struct RoundCtx { pub failed_views: HashSet, /// successful views pub successful_views: HashSet, + /// latest epoch, updated when a leaf with a higher epoch is seen + pub latest_epoch: u64, } impl RoundCtx { diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 3847a20a62..e9dd819802 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -28,7 +28,7 @@ use hotshot_types::{ constants::EVENT_CHANNEL_SIZE, data::Leaf2, event::Event, - simple_certificate::QuorumCertificate2, + simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2}, traits::{ network::{AsyncGenerator, ConnectedNetwork}, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, @@ -65,6 +65,8 @@ pub struct SpinningTask< pub(crate) last_decided_leaf: Leaf2, /// Highest qc seen in the test for restarting nodes pub(crate) high_qc: QuorumCertificate2, + /// Next epoch highest qc seen in the test for restarting nodes + pub(crate) next_epoch_high_qc: Option>, /// Add specified delay to async calls pub(crate) async_delay_config: DelayConfig, /// Context stored for nodes to be restarted with @@ -160,6 +162,7 @@ where TYPES::View::genesis(), BTreeMap::new(), self.high_qc.clone(), + self.next_epoch_high_qc.clone(), None, Vec::new(), BTreeMap::new(), @@ -249,6 +252,7 @@ where ) .await, ), + read_storage.next_epoch_high_qc_cloned().await, read_storage.decided_upgrade_certificate().await, Vec::new(), BTreeMap::new(), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index f13cba62ca..ec03e3770a 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -189,6 +189,7 @@ where &TestInstanceState::default(), ) .await, + next_epoch_high_qc: None, async_delay_config: launcher.metadata.async_delay_config, restart_contexts: HashMap::new(), channel_generator: launcher.resource_generator.channel_generator, @@ -323,6 +324,7 @@ where pub async fn init_builders>( &self, + num_nodes: usize, ) -> (Vec>>, Vec, Url) { let config = self.launcher.resource_generator.config.clone(); let mut builder_tasks = Vec::new(); @@ -332,7 +334,7 @@ where let builder_url = Url::parse(&format!("http://localhost:{builder_port}")).expect("Invalid URL"); let builder_task = B::start( - config.num_nodes_with_stake.into(), + num_nodes, builder_url.clone(), B::Config::default(), metadata.changes.clone(), @@ -397,8 +399,14 @@ where let mut results = vec![]; let config = self.launcher.resource_generator.config.clone(); + // TODO This is only a workaround. Number of nodes changes from epoch to epoch. Builder should be made epoch-aware. + let temp_memberships = ::Membership::new( + config.known_nodes_with_stake.clone(), + config.known_da_nodes.clone(), + ); + let num_nodes = temp_memberships.total_nodes(TYPES::Epoch::new(0)); let (mut builder_tasks, builder_urls, fallback_builder_url) = - self.init_builders::().await; + self.init_builders::(num_nodes).await; if self.launcher.metadata.start_solver { self.add_solver(builder_urls.clone()).await; diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index d5c1cb7ca5..a7390f98ed 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -138,6 +138,7 @@ impl TestView { &TestInstanceState::default(), ) .await, + next_epoch_justify_qc: None, upgrade_certificate: None, view_change_evidence: None, drb_result: INITIAL_DRB_RESULT, @@ -368,6 +369,7 @@ impl TestView { block_header: block_header.clone(), view_number: next_view, justify_qc: quorum_certificate.clone(), + next_epoch_justify_qc: None, upgrade_certificate: upgrade_certificate.clone(), view_change_evidence, drb_result: INITIAL_DRB_RESULT, diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 8e92935567..bdf55c1f6a 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -8,8 +8,9 @@ use std::{sync::Arc, time::Duration}; use hotshot_example_types::{ node_types::{ - EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, - TestTwoStakeTablesTypes, TestTypes, TestTypesRandomizedLeader, TestVersions, + CombinedImpl, EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, + TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, TestTypesRandomizedLeader, + TestVersions, }, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; @@ -156,8 +157,8 @@ cross_tests!( cross_tests!( TestName: test_epoch_end, - Impls: [PushCdnImpl], - Types: [TestTwoStakeTablesTypes], + Impls: [CombinedImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], Versions: [EpochsTestVersions], Ignore: false, Metadata: { diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 3cd850183c..fc33e7f9a4 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -79,6 +79,7 @@ async fn test_vid_task() { num_transactions: encoded_transactions.len() as u64, }, view_number: ViewNumber::new(2), + epoch: EpochNumber::new(0), }; let message = Proposal { data: proposal.clone(), @@ -91,6 +92,7 @@ async fn test_vid_task() { vid_disperse, &membership, EpochNumber::new(0), + None, ); let vid_proposal = Proposal { diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 5a94692be4..edbf78acab 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -25,7 +25,7 @@ use crate::{ error::HotShotError, event::{HotShotAction, LeafInfo}, message::Proposal, - simple_certificate::{DaCertificate2, QuorumCertificate2}, + simple_certificate::{DaCertificate2, NextEpochQuorumCertificate2, QuorumCertificate2}, traits::{ block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, @@ -34,7 +34,8 @@ use crate::{ BlockPayload, ValidatedState, }, utils::{ - epoch_from_block_number, BuilderCommitment, LeafCommitment, StateAndDelta, Terminator, + epoch_from_block_number, is_last_block_in_epoch, BuilderCommitment, LeafCommitment, + StateAndDelta, Terminator, }, vid::VidCommitment, vote::{Certificate, HasViewNumber}, @@ -317,6 +318,9 @@ pub struct Consensus { /// the highqc per spec high_qc: QuorumCertificate2, + /// The high QC for the next epoch + next_epoch_high_qc: Option>, + /// A reference to the metrics trait pub metrics: Arc, @@ -412,6 +416,7 @@ impl Consensus { saved_leaves: CommitmentMap>, saved_payloads: BTreeMap>, high_qc: QuorumCertificate2, + next_epoch_high_qc: Option>, metrics: Arc, epoch_height: u64, ) -> Self { @@ -428,6 +433,7 @@ impl Consensus { saved_leaves, saved_payloads, high_qc, + next_epoch_high_qc, metrics, epoch_height, } @@ -458,6 +464,11 @@ impl Consensus { &self.high_qc } + /// Get the next epoch high QC. + pub fn next_epoch_high_qc(&self) -> Option<&NextEpochQuorumCertificate2> { + self.next_epoch_high_qc.as_ref() + } + /// Get the validated state map. pub fn validated_state_map(&self) -> &BTreeMap> { &self.validated_state_map @@ -740,6 +751,28 @@ impl Consensus { Ok(()) } + /// Update the next epoch high QC if given a newer one. + /// # Errors + /// Can return an error when the provided high_qc is not newer than the existing entry. + /// # Panics + /// It can't actually panic. If the option is None, we will not call unwrap on it. + pub fn update_next_epoch_high_qc( + &mut self, + high_qc: NextEpochQuorumCertificate2, + ) -> Result<()> { + if let Some(next_epoch_high_qc) = self.next_epoch_high_qc() { + ensure!( + high_qc.view_number > next_epoch_high_qc.view_number + || high_qc == *next_epoch_high_qc, + debug!("Next epoch high QC with an equal or higher view exists.") + ); + } + tracing::debug!("Updating next epoch high QC"); + self.next_epoch_high_qc = Some(high_qc); + + Ok(()) + } + /// Add a new entry to the vid_shares map. pub fn update_vid_shares( &mut self, @@ -916,7 +949,8 @@ impl Consensus { .get(&view)? .view_inner .epoch()?; - let vid = VidDisperse::calculate_vid_disperse(txns, &membership, view, epoch, None).await; + let vid = + VidDisperse::calculate_vid_disperse(txns, &membership, view, epoch, None, None).await; let shares = VidDisperseShare2::from_vid_disperse(vid); let mut consensus_writer = consensus.write().await; for share in shares { @@ -1010,11 +1044,7 @@ impl Consensus { return false; }; let block_height = leaf.height(); - if block_height == 0 || self.epoch_height == 0 { - false - } else { - block_height % self.epoch_height == 0 - } + is_last_block_in_epoch(block_height, self.epoch_height) } /// Returns true if our high QC is for the last block in the epoch @@ -1024,11 +1054,7 @@ impl Consensus { return false; }; let block_height = leaf.height(); - if block_height == 0 || self.epoch_height == 0 { - false - } else { - block_height % self.epoch_height == 0 - } + is_last_block_in_epoch(block_height, self.epoch_height) } /// Returns true if the `parent_leaf` formed an eQC for the previous epoch to the `proposed_leaf` diff --git a/types/src/data.rs b/types/src/data.rs index 73acf9d6f0..f3853e412e 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -34,8 +34,8 @@ use crate::{ impl_has_epoch, message::{Proposal, UpgradeLock}, simple_certificate::{ - QuorumCertificate, QuorumCertificate2, TimeoutCertificate2, UpgradeCertificate, - ViewSyncFinalizeCertificate2, + NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate2, + UpgradeCertificate, ViewSyncFinalizeCertificate2, }, simple_vote::{HasEpoch, QuorumData, QuorumData2, UpgradeProposalData, VersionedVoteData}, traits::{ @@ -146,6 +146,8 @@ pub struct DaProposal { pub metadata: >::Metadata, /// View this proposal applies to pub view_number: TYPES::View, + /// Epoch this proposal applies to + pub epoch: TYPES::Epoch, } /// A proposal to start providing data availability for a block. @@ -179,6 +181,7 @@ impl From> for DaProposal { encoded_transactions: da_proposal2.encoded_transactions, metadata: da_proposal2.metadata, view_number: da_proposal2.view_number, + epoch: TYPES::Epoch::new(0), } } } @@ -194,6 +197,8 @@ where pub upgrade_proposal: UpgradeProposalData, /// View this proposal applies to pub view_number: TYPES::View, + /// Epoch this proposal applies to + pub epoch: TYPES::Epoch, } /// VID dispersal data @@ -205,7 +210,7 @@ where pub struct VidDisperse { /// The view number for which this VID data is intended pub view_number: TYPES::View, - /// The epoch number for which this VID data is intended + /// Epoch this proposal applies to pub epoch: TYPES::Epoch, /// Block payload commitment pub payload_commitment: VidCommitment, @@ -216,32 +221,34 @@ pub struct VidDisperse { } impl VidDisperse { - /// Create VID dispersal from a specified membership for a given epoch. + /// Create VID dispersal from a specified membership for the target epoch. /// Uses the specified function to calculate share dispersal /// Allows for more complex stake table functionality pub fn from_membership( view_number: TYPES::View, mut vid_disperse: JfVidDisperse, membership: &TYPES::Membership, - epoch: TYPES::Epoch, + target_epoch: TYPES::Epoch, + sender_epoch: Option, ) -> Self { let shares = membership - .committee_members(view_number, epoch) + .committee_members(view_number, target_epoch) .iter() .map(|node| (node.clone(), vid_disperse.shares.remove(0))) .collect(); Self { view_number, - epoch, shares, common: vid_disperse.common, payload_commitment: vid_disperse.commit, + epoch: sender_epoch.unwrap_or(target_epoch), } } /// Calculate the vid disperse information from the payload given a view, epoch and membership, - /// optionally using precompute data from builder + /// optionally using precompute data from builder. + /// If the sender epoch is missing, it means it's the same as the target epoch. /// /// # Panics /// Panics if the VID calculation fails, this should not happen. @@ -250,10 +257,11 @@ impl VidDisperse { txns: Arc<[u8]>, membership: &Arc, view: TYPES::View, - epoch: TYPES::Epoch, + target_epoch: TYPES::Epoch, + sender_epoch: Option, precompute_data: Option, ) -> Self { - let num_nodes = membership.total_nodes(epoch); + let num_nodes = membership.total_nodes(target_epoch); let vid_disperse = spawn_blocking(move || { precompute_data @@ -266,7 +274,13 @@ impl VidDisperse { // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); - Self::from_membership(view, vid_disperse, membership.as_ref(), epoch) + Self::from_membership( + view, + vid_disperse, + membership.as_ref(), + target_epoch, + sender_epoch, + ) } } @@ -574,6 +588,9 @@ pub struct QuorumProposal2 { /// certificate that the proposal is chaining from pub justify_qc: QuorumCertificate2, + /// certificate that the proposal is chaining from formed by the next epoch nodes + pub next_epoch_justify_qc: Option>, + /// Possible upgrade certificate, which the leader may optionally attach. pub upgrade_certificate: Option>, @@ -599,6 +616,7 @@ impl From> for QuorumProposal2 { block_header: quorum_proposal.block_header, view_number: quorum_proposal.view_number, justify_qc: quorum_proposal.justify_qc.to_qc2(), + next_epoch_justify_qc: None, upgrade_certificate: quorum_proposal.upgrade_certificate, view_change_evidence: quorum_proposal.proposal_certificate, drb_seed: INITIAL_DRB_SEED_INPUT, @@ -627,6 +645,7 @@ impl From> for Leaf2 { view_number: leaf.view_number, epoch: TYPES::Epoch::genesis(), justify_qc: leaf.justify_qc.to_qc2(), + next_epoch_justify_qc: None, parent_commitment: Commitment::from_raw(bytes), block_header: leaf.block_header, upgrade_certificate: leaf.upgrade_certificate, @@ -759,6 +778,9 @@ pub struct Leaf2 { /// Per spec, justification justify_qc: QuorumCertificate2, + /// certificate that the proposal is chaining from formed by the next epoch nodes + next_epoch_justify_qc: Option>, + /// The hash of the parent `Leaf` /// So we can ask if it extends parent_commitment: Commitment, @@ -834,6 +856,7 @@ impl Leaf2 { Self { view_number: TYPES::View::genesis(), justify_qc, + next_epoch_justify_qc: None, parent_commitment: null_quorum_data.leaf_commit, upgrade_certificate: None, block_header: block_header.clone(), @@ -1001,6 +1024,7 @@ impl PartialEq for Leaf2 { view_number, epoch, justify_qc, + next_epoch_justify_qc, parent_commitment, block_header, upgrade_certificate, @@ -1013,6 +1037,7 @@ impl PartialEq for Leaf2 { *view_number == other.view_number && *epoch == other.epoch && *justify_qc == other.justify_qc + && *next_epoch_justify_qc == other.next_epoch_justify_qc && *parent_commitment == other.parent_commitment && *block_header == other.block_header && *upgrade_certificate == other.upgrade_certificate @@ -1378,6 +1403,7 @@ impl Leaf2 { let QuorumProposal2 { view_number, justify_qc, + next_epoch_justify_qc, block_header, upgrade_certificate, view_change_evidence, @@ -1392,6 +1418,7 @@ impl Leaf2 { TYPES::EPOCH_HEIGHT, )), justify_qc: justify_qc.clone(), + next_epoch_justify_qc: next_epoch_justify_qc.clone(), parent_commitment: justify_qc.data().leaf_commit, block_header: block_header.clone(), upgrade_certificate: upgrade_certificate.clone(), diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 91d5f00d7a..5ea857ed21 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -24,10 +24,10 @@ use crate::{ data::serialize_signature2, message::UpgradeLock, simple_vote::{ - DaData, DaData2, QuorumData, QuorumData2, QuorumMarker, TimeoutData, TimeoutData2, - UpgradeProposalData, VersionedVoteData, ViewSyncCommitData, ViewSyncCommitData2, - ViewSyncFinalizeData, ViewSyncFinalizeData2, ViewSyncPreCommitData, ViewSyncPreCommitData2, - Voteable, + DaData, DaData2, NextEpochQuorumData2, QuorumData, QuorumData2, QuorumMarker, TimeoutData, + TimeoutData2, UpgradeProposalData, VersionedVoteData, ViewSyncCommitData, + ViewSyncCommitData2, ViewSyncFinalizeData, ViewSyncFinalizeData2, ViewSyncPreCommitData, + ViewSyncPreCommitData2, Voteable, }, traits::{ election::Membership, @@ -42,7 +42,7 @@ pub trait Threshold { /// Calculate a threshold based on the membership fn threshold>( membership: &MEMBERSHIP, - epoch: ::Epoch, + epoch: TYPES::Epoch, ) -> u64; } @@ -53,7 +53,7 @@ pub struct SuccessThreshold {} impl Threshold for SuccessThreshold { fn threshold>( membership: &MEMBERSHIP, - epoch: ::Epoch, + epoch: TYPES::Epoch, ) -> u64 { membership.success_threshold(epoch).into() } @@ -66,7 +66,7 @@ pub struct OneHonestThreshold {} impl Threshold for OneHonestThreshold { fn threshold>( membership: &MEMBERSHIP, - epoch: ::Epoch, + epoch: TYPES::Epoch, ) -> u64 { membership.failure_threshold(epoch).into() } @@ -79,7 +79,7 @@ pub struct UpgradeThreshold {} impl Threshold for UpgradeThreshold { fn threshold>( membership: &MEMBERSHIP, - epoch: ::Epoch, + epoch: TYPES::Epoch, ) -> u64 { membership.upgrade_threshold(epoch).into() } @@ -211,7 +211,7 @@ impl> Certificate } fn threshold>( membership: &MEMBERSHIP, - epoch: ::Epoch, + epoch: TYPES::Epoch, ) -> u64 { membership.da_success_threshold(epoch).into() } @@ -367,7 +367,7 @@ impl< } fn threshold>( membership: &MEMBERSHIP, - epoch: ::Epoch, + epoch: TYPES::Epoch, ) -> u64 { THRESHOLD::threshold(membership, epoch) } @@ -736,6 +736,9 @@ impl TimeoutCertificate2 { pub type QuorumCertificate = SimpleCertificate, SuccessThreshold>; /// Type alias for a `QuorumCertificate2`, which is a `SimpleCertificate` over `QuorumData2` pub type QuorumCertificate2 = SimpleCertificate, SuccessThreshold>; +/// Type alias for a `QuorumCertificate2`, which is a `SimpleCertificate` over `QuorumData2` +pub type NextEpochQuorumCertificate2 = + SimpleCertificate, SuccessThreshold>; /// Type alias for a `DaCertificate`, which is a `SimpleCertificate` over `DaData` pub type DaCertificate = SimpleCertificate; /// Type alias for a `DaCertificate2`, which is a `SimpleCertificate` over `DaData2` diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 1211756644..f75d451914 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -6,7 +6,12 @@ //! Implementations of the simple vote types. -use std::{fmt::Debug, hash::Hash, marker::PhantomData}; +use std::{ + fmt::Debug, + hash::Hash, + marker::PhantomData, + ops::{Deref, DerefMut}, +}; use committable::{Commitment, Committable}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; @@ -40,9 +45,13 @@ pub struct QuorumData { pub struct QuorumData2 { /// Commitment to the leaf pub leaf_commit: Commitment>, - /// Epoch number + /// An epoch to which the data belongs to. Relevant for validating against the correct stake table pub epoch: TYPES::Epoch, } +/// Data used for a yes vote. Used to distinguish votes sent by the next epoch nodes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +#[serde(bound(deserialize = ""))] +pub struct NextEpochQuorumData2(QuorumData2); #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a DA vote. pub struct DaData { @@ -186,6 +195,7 @@ mod sealed { impl QuorumMarker for QuorumData {} impl QuorumMarker for QuorumData2 {} +impl QuorumMarker for NextEpochQuorumData2 {} impl QuorumMarker for TimeoutData {} impl QuorumMarker for TimeoutData2 {} impl QuorumMarker for ViewSyncPreCommitData {} @@ -356,6 +366,14 @@ impl Committable for QuorumData2 { } } +impl Committable for NextEpochQuorumData2 { + fn commit(&self) -> Commitment { + committable::RawCommitmentBuilder::new("Quorum data") + .var_size_bytes(self.leaf_commit.as_ref()) + .finalize() + } +} + impl Committable for TimeoutData { fn commit(&self) -> Commitment { committable::RawCommitmentBuilder::new("Timeout data") @@ -517,6 +535,7 @@ macro_rules! impl_has_epoch { impl_has_epoch!( QuorumData2, + NextEpochQuorumData2, DaData2, TimeoutData2, ViewSyncPreCommitData2, @@ -524,6 +543,14 @@ impl_has_epoch!( ViewSyncFinalizeData2 ); +impl + HasEpoch> HasEpoch + for SimpleVote +{ + fn epoch(&self) -> TYPES::Epoch { + self.data.epoch() + } +} + // impl votable for all the data types in this file sealed marker should ensure nothing is accidentally // implemented for structs that aren't "voteable" impl< @@ -575,7 +602,7 @@ impl QuorumVote2 { pub fn to_vote(self) -> QuorumVote { let bytes: [u8; 32] = self.data.leaf_commit.into(); - let signature = self.signature; + let signature = self.signature.clone(); let data = QuorumData { leaf_commit: Commitment::from_raw(bytes), }; @@ -777,7 +804,8 @@ pub type QuorumVote = SimpleVote>; // Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file /// Quorum vote Alias pub type QuorumVote2 = SimpleVote>; - +/// Quorum vote Alias. This type is useful to distinguish the next epoch nodes' votes. +pub type NextEpochQuorumVote2 = SimpleVote>; /// DA vote type alias pub type DaVote = SimpleVote; /// DA vote 2 type alias @@ -800,8 +828,37 @@ pub type ViewSyncFinalizeVote2 = SimpleVote = SimpleVote>; /// View Sync Commit Vote 2 type alias pub type ViewSyncCommitVote2 = SimpleVote>; - /// Upgrade proposal vote pub type UpgradeVote = SimpleVote>; /// Upgrade proposal 2 vote pub type UpgradeVote2 = SimpleVote>; + +impl Deref for NextEpochQuorumData2 { + type Target = QuorumData2; + fn deref(&self) -> &Self::Target { + &self.0 + } +} +impl DerefMut for NextEpochQuorumData2 { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} +impl From> for NextEpochQuorumData2 { + fn from(data: QuorumData2) -> Self { + Self(QuorumData2 { + epoch: data.epoch, + leaf_commit: data.leaf_commit, + }) + } +} + +impl From> for NextEpochQuorumVote2 { + fn from(qvote: QuorumVote2) -> Self { + Self { + data: qvote.data.into(), + view_number: qvote.view_number, + signature: qvote.signature.clone(), + } + } +} diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 7d3aa42241..5a4fdb4586 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -24,7 +24,9 @@ use crate::{ }, event::HotShotAction, message::Proposal, - simple_certificate::{QuorumCertificate, QuorumCertificate2, UpgradeCertificate}, + simple_certificate::{ + NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, UpgradeCertificate, + }, vid::VidSchemeType, }; @@ -64,6 +66,11 @@ pub trait Storage: Send + Sync + Clone { async fn update_high_qc(&self, high_qc: QuorumCertificate) -> Result<()>; /// Update the current high QC in storage. async fn update_high_qc2(&self, high_qc: QuorumCertificate2) -> Result<()>; + /// Update the current high QC in storage. + async fn update_next_epoch_high_qc2( + &self, + next_epoch_high_qc: NextEpochQuorumCertificate2, + ) -> Result<()>; /// Update the currently undecided state of consensus. This includes the undecided leaf chain, /// and the undecided state. async fn update_undecided_state( diff --git a/types/src/utils.rs b/types/src/utils.rs index 62503f08b5..50877f047a 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -262,9 +262,20 @@ pub fn mnemonic(bytes: H) -> String { /// A helper enum to indicate whether a node is in the epoch transition /// A node is in epoch transition when its high QC is for the last block in an epoch +#[derive(Debug, Clone)] pub enum EpochTransitionIndicator { /// A node is currently in the epoch transition InTransition, /// A node is not in the epoch transition NotInTransition, } + +/// Returns true if the given block number is the last in the epoch based on the given epoch height. +#[must_use] +pub fn is_last_block_in_epoch(block_number: u64, epoch_height: u64) -> bool { + if block_number == 0 || epoch_height == 0 { + false + } else { + block_number % epoch_height == 0 + } +} diff --git a/types/src/vote.rs b/types/src/vote.rs index e70dbe41a3..103c470e6d 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -83,7 +83,7 @@ pub trait Certificate: HasViewNumber { // TODO: Make this a static ratio of the total stake of `Membership` fn threshold>( membership: &MEMBERSHIP, - epoch: ::Epoch, + epoch: TYPES::Epoch, ) -> u64; /// Get Stake Table from Membership implementation. From 755ba453f5273176deba987ad948899ddf83a8bd Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Thu, 19 Dec 2024 08:04:42 -0800 Subject: [PATCH 1335/1393] 3966 Remove clone from Membership and wrap it in Arc> (#3976) --- examples/Cargo.toml | 1 + examples/infra/mod.rs | 66 ++++++++++-------- hotshot/src/lib.rs | 25 ++++--- hotshot/src/tasks/mod.rs | 13 ++-- hotshot/src/tasks/task_state.rs | 22 +++--- .../src/traits/networking/combined_network.rs | 8 ++- .../src/traits/networking/libp2p_network.rs | 15 ++-- hotshot/src/types/handle.rs | 11 ++- libp2p-networking/Cargo.toml | 1 + libp2p-networking/src/network/mod.rs | 5 +- libp2p-networking/src/network/node/config.rs | 30 +++++++- libp2p-networking/src/network/transport.rs | 28 +++++--- task-impls/src/consensus/handlers.rs | 53 +++++++------- task-impls/src/consensus/mod.rs | 4 +- task-impls/src/da.rs | 23 +++++-- task-impls/src/helpers.rs | 62 ++++++++++------- task-impls/src/network.rs | 59 ++++++++++------ task-impls/src/quorum_proposal/handlers.rs | 23 +++++-- task-impls/src/quorum_proposal/mod.rs | 54 ++++++++++----- .../src/quorum_proposal_recv/handlers.rs | 34 +++++---- task-impls/src/quorum_proposal_recv/mod.rs | 14 +++- task-impls/src/quorum_vote/handlers.rs | 21 ++++-- task-impls/src/quorum_vote/mod.rs | 42 ++++++++--- task-impls/src/request.rs | 34 ++++++--- task-impls/src/response.rs | 29 +++++--- task-impls/src/transactions.rs | 33 +++++---- task-impls/src/upgrade.rs | 23 ++++--- task-impls/src/vid.rs | 12 +++- task-impls/src/view_sync.rs | 58 ++++++++++++---- task-impls/src/vote_collection.rs | 13 ++-- testing/src/byzantine/byzantine_behaviour.rs | 2 +- testing/src/helpers.rs | 63 +++++++++-------- testing/src/overall_safety_task.rs | 34 ++++----- testing/src/spinning_task.rs | 4 +- testing/src/test_builder.rs | 3 +- testing/src/test_runner.rs | 30 +++++--- testing/src/view_generator.rs | 25 ++++--- testing/tests/tests_1/da_task.rs | 24 +++++-- testing/tests/tests_1/message.rs | 18 +++-- testing/tests/tests_1/network_task.rs | 14 ++-- .../tests_1/quorum_proposal_recv_task.rs | 6 +- testing/tests/tests_1/quorum_proposal_task.rs | 69 +++++++++++-------- testing/tests/tests_1/quorum_vote_task.rs | 10 +-- testing/tests/tests_1/transaction_task.rs | 14 +++- .../tests_1/upgrade_task_with_proposal.rs | 15 ++-- .../tests/tests_1/upgrade_task_with_vote.rs | 2 +- testing/tests/tests_1/vid_task.rs | 12 ++-- .../tests/tests_1/vote_dependency_handle.rs | 6 +- types/src/consensus.rs | 2 +- types/src/data.rs | 18 ++--- types/src/message.rs | 8 +-- types/src/simple_certificate.rs | 11 ++- types/src/traits/election.rs | 2 +- types/src/traits/network.rs | 3 +- types/src/vote.rs | 20 ++++-- 55 files changed, 785 insertions(+), 446 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 09e13b8e7d..3136acdae4 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -78,6 +78,7 @@ name = "whitelist-push-cdn" path = "push-cdn/whitelist-adapter.rs" [dependencies] +async-lock = { workspace = true } async-trait = { workspace = true } cdn-broker = { workspace = true, features = ["global-permits"] } diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 88a2ff900b..63dd75d203 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -15,6 +15,7 @@ use std::{ time::Instant, }; +use async_lock::RwLock; use async_trait::async_trait; use cdn_broker::reexports::crypto::signature::KeyPair; use chrono::Utc; @@ -350,13 +351,17 @@ pub trait RunDa< config: NetworkConfig, validator_config: ValidatorConfig, libp2p_advertise_address: Option, + membership: &Arc::Membership>>, ) -> Self; /// Initializes the genesis state and HotShot instance; does not start HotShot consensus /// # Panics if it cannot generate a genesis block, fails to initialize HotShot, or cannot /// get the anchored view /// Note: sequencing leaf does not have state, so does not return state - async fn initialize_state_and_hotshot(&self) -> SystemContextHandle { + async fn initialize_state_and_hotshot( + &self, + membership: Arc::Membership>>, + ) -> SystemContextHandle { let initializer = hotshot::HotShotInitializer::::from_genesis::(TestInstanceState::default()) .await @@ -371,20 +376,6 @@ pub trait RunDa< let network = self.network(); - let all_nodes = if cfg!(feature = "fixed-leader-election") { - let mut vec = config.config.known_nodes_with_stake.clone(); - vec.truncate(config.config.fixed_leader_for_gpuvid); - vec - } else { - config.config.known_nodes_with_stake.clone() - }; - - let da_nodes = config.config.known_da_nodes.clone(); - - // Create the quorum membership from all nodes, specifying the committee - // as the known da nodes - let memberships = ::Membership::new(all_nodes, da_nodes); - let marketplace_config = MarketplaceConfig { auction_results_provider: TestAuctionResultsProvider::::default().into(), // TODO: we need to pass a valid fallback builder url here somehow @@ -396,7 +387,7 @@ pub trait RunDa< sk, config.node_index, config.config, - memberships, + membership, Arc::from(network), initializer, ConsensusMetricsValue::default(), @@ -526,13 +517,15 @@ pub trait RunDa< } } } - let consensus_lock = context.hotshot.consensus(); - let consensus = consensus_lock.read().await; let num_eligible_leaders = context .hotshot .memberships + .read() + .await .committee_leaders(TYPES::View::genesis(), TYPES::Epoch::genesis()) .len(); + let consensus_lock = context.hotshot.consensus(); + let consensus = consensus_lock.read().await; let total_num_views = usize::try_from(consensus.locked_view().u64()).unwrap(); // `failed_num_views` could include uncommitted views let failed_num_views = total_num_views - num_successful_commits; @@ -622,6 +615,7 @@ where config: NetworkConfig, validator_config: ValidatorConfig, _libp2p_advertise_address: Option, + _membership: &Arc::Membership>>, ) -> PushCdnDaRun { // Convert to the Push-CDN-compatible type let keypair = KeyPair { @@ -708,6 +702,7 @@ where config: NetworkConfig, validator_config: ValidatorConfig, libp2p_advertise_address: Option, + membership: &Arc::Membership>>, ) -> Libp2pDaRun { // Extrapolate keys for ease of use let public_key = &validator_config.public_key; @@ -736,11 +731,6 @@ where .to_string() }; - // Create the quorum membership from the list of known nodes - let all_nodes = config.config.known_nodes_with_stake.clone(); - let da_nodes = config.config.known_da_nodes.clone(); - let quorum_membership = TYPES::Membership::new(all_nodes, da_nodes); - // Derive the bind address let bind_address = derive_libp2p_multiaddr(&bind_address).expect("failed to derive bind address"); @@ -748,7 +738,7 @@ where // Create the Libp2p network let libp2p_network = Libp2pNetwork::from_config( config.clone(), - quorum_membership, + Arc::clone(membership), GossipConfig::default(), RequestResponseConfig::default(), bind_address, @@ -820,6 +810,7 @@ where config: NetworkConfig, validator_config: ValidatorConfig, libp2p_advertise_address: Option, + membership: &Arc::Membership>>, ) -> CombinedDaRun { // Initialize our Libp2p network let libp2p_network: Libp2pDaRun = as RunDa< @@ -831,6 +822,7 @@ where config.clone(), validator_config.clone(), libp2p_advertise_address.clone(), + membership, ) .await; @@ -844,6 +836,7 @@ where config.clone(), validator_config.clone(), libp2p_advertise_address, + membership, ) .await; @@ -878,6 +871,7 @@ where } } +#[allow(clippy::too_many_lines)] /// Main entry point for validators /// # Panics /// if unable to get the local ip address @@ -974,11 +968,27 @@ pub async fn main_entry_point< .join(",") ); + let all_nodes = if cfg!(feature = "fixed-leader-election") { + let mut vec = run_config.config.known_nodes_with_stake.clone(); + vec.truncate(run_config.config.fixed_leader_for_gpuvid); + vec + } else { + run_config.config.known_nodes_with_stake.clone() + }; + let membership = Arc::new(RwLock::new(::Membership::new( + all_nodes, + run_config.config.known_da_nodes.clone(), + ))); + info!("Initializing networking"); - let run = - RUNDA::initialize_networking(run_config.clone(), validator_config, args.advertise_address) - .await; - let hotshot = run.initialize_state_and_hotshot().await; + let run = RUNDA::initialize_networking( + run_config.clone(), + validator_config, + args.advertise_address, + &membership, + ) + .await; + let hotshot = run.initialize_state_and_hotshot(membership).await; if let Some(task) = builder_task { task.start(Box::new(hotshot.event_stream())); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index b76b3cc7c9..8055ae595c 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -108,7 +108,7 @@ pub struct SystemContext, V: Versi pub network: Arc, /// Memberships used by consensus - pub memberships: Arc, + pub memberships: Arc>, /// the metrics that the implementor is using. metrics: Arc, @@ -199,7 +199,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: TYPES::Membership, + memberships: Arc>, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -252,7 +252,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: TYPES::Membership, + memberships: Arc>, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -365,7 +365,7 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext::PrivateKey, node_id: u64, config: HotShotConfig, - memberships: TYPES::Membership, + memberships: Arc>, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -771,7 +780,7 @@ where private_key: ::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: TYPES::Membership, + memberships: Arc>, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -787,7 +796,7 @@ where private_key.clone(), nonce, config.clone(), - memberships.clone(), + Arc::clone(&memberships), Arc::clone(&network), initializer.clone(), metrics.clone(), diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 7e94aea326..d42f392675 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -82,7 +82,7 @@ pub fn add_response_task, V: Versi ) { let state = NetworkResponseState::::new( handle.hotshot.consensus(), - (*handle.hotshot.memberships).clone().into(), + Arc::clone(&handle.hotshot.memberships), handle.public_key().clone(), handle.private_key().clone(), handle.hotshot.id, @@ -190,7 +190,7 @@ pub fn add_network_event_task< >( handle: &mut SystemContextHandle, network: Arc, - membership: TYPES::Membership, + membership: Arc>, ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, @@ -321,7 +321,7 @@ where private_key: ::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: TYPES::Membership, + memberships: Arc>, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -516,8 +516,9 @@ where /// Adds the `NetworkEventTaskState` tasks possibly modifying them as well. fn add_network_event_tasks(&self, handle: &mut SystemContextHandle) { let network = Arc::clone(&handle.network); + let memberships = Arc::clone(&handle.memberships); - self.add_network_event_task(handle, Arc::clone(&network), (*handle.memberships).clone()); + self.add_network_event_task(handle, network, memberships); } /// Adds a `NetworkEventTaskState` task. Can be reimplemented to modify its behaviour. @@ -525,7 +526,7 @@ where &self, handle: &mut SystemContextHandle, channel: Arc<>::Network>, - membership: TYPES::Membership, + membership: Arc>, ) { add_network_event_task(handle, channel, membership); } @@ -563,6 +564,6 @@ pub fn add_network_event_tasks, V: add_network_event_task( handle, Arc::clone(&handle.network), - (*handle.memberships).clone(), + Arc::clone(&handle.memberships), ); } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 3213ce2ce2..7a610ac6e1 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -58,7 +58,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(handle.hotshot.consensus()), view: handle.cur_view().await, delay: handle.hotshot.config.data_request_delay, - membership: (*handle.hotshot.memberships).clone(), + membership: Arc::clone(&handle.hotshot.memberships), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, @@ -78,7 +78,7 @@ impl, V: Versions> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - quorum_membership: (*handle.hotshot.memberships).clone().into(), + membership: Arc::clone(&handle.hotshot.memberships), vote_collectors: BTreeMap::default(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -99,7 +99,7 @@ impl, V: Versions> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - membership: (*handle.hotshot.memberships).clone().into(), + membership: Arc::clone(&handle.hotshot.memberships), network: Arc::clone(&handle.hotshot.network), vote_collector: None.into(), public_key: handle.public_key().clone(), @@ -128,7 +128,7 @@ impl, V: Versions> CreateTaskState cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, network: Arc::clone(&handle.hotshot.network), - membership: (*handle.hotshot.memberships).clone().into(), + membership: Arc::clone(&handle.hotshot.memberships), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, @@ -145,7 +145,7 @@ impl, V: Versions> CreateTaskState Self { consensus: OuterConsensus::new(handle.hotshot.consensus()), output_event_stream: handle.hotshot.external_event_stream.0.clone(), - membership: (*handle.hotshot.memberships).clone().into(), + membership: Arc::clone(&handle.hotshot.memberships), network: Arc::clone(&handle.hotshot.network), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, @@ -170,7 +170,7 @@ impl, V: Versions> CreateTaskState cur_view, next_view: cur_view, cur_epoch: handle.cur_epoch().await, - membership: (*handle.hotshot.memberships).clone().into(), + membership: Arc::clone(&handle.hotshot.memberships), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), num_timeouts_tracked: 0, @@ -197,7 +197,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - membership: (*handle.hotshot.memberships).clone().into(), + membership: Arc::clone(&handle.hotshot.memberships), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), @@ -241,7 +241,7 @@ impl, V: Versions> CreateTaskState latest_voted_view: handle.cur_view().await, vote_dependencies: BTreeMap::new(), network: Arc::clone(&handle.hotshot.network), - membership: (*handle.hotshot.memberships).clone().into(), + membership: Arc::clone(&handle.hotshot.memberships), drb_computations: DrbComputations::new(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, @@ -266,7 +266,7 @@ impl, V: Versions> CreateTaskState proposal_dependencies: BTreeMap::new(), consensus: OuterConsensus::new(consensus), instance_state: handle.hotshot.instance_state(), - quorum_membership: (*handle.hotshot.memberships).clone().into(), + membership: Arc::clone(&handle.hotshot.memberships), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), storage: Arc::clone(&handle.storage), @@ -293,7 +293,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(consensus), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - quorum_membership: (*handle.hotshot.memberships).clone().into(), + membership: Arc::clone(&handle.hotshot.memberships), timeout: handle.hotshot.config.next_view_timeout, output_event_stream: handle.hotshot.external_event_stream.0.clone(), storage: Arc::clone(&handle.storage), @@ -317,7 +317,7 @@ impl, V: Versions> CreateTaskState private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), network: Arc::clone(&handle.hotshot.network), - membership: (*handle.hotshot.memberships).clone().into(), + membership: Arc::clone(&handle.hotshot.memberships), vote_collectors: BTreeMap::default(), next_epoch_vote_collectors: BTreeMap::default(), timeout_vote_collectors: BTreeMap::default(), diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 6c891fa6f7..7021c3e892 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -475,8 +475,12 @@ impl ConnectedNetwork for CombinedNetworks self.secondary().queue_node_lookup(view_number, pk) } - async fn update_view<'a, T>(&'a self, view: u64, epoch: u64, membership: &T::Membership) - where + async fn update_view<'a, T>( + &'a self, + view: u64, + epoch: u64, + membership: Arc>, + ) where T: NodeType + 'a, { let delayed_tasks_channels = Arc::clone(&self.delayed_tasks_channels); diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 0f1fbfc858..39c61e638f 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -390,7 +390,7 @@ impl Libp2pNetwork { #[allow(clippy::too_many_arguments)] pub async fn from_config( mut config: NetworkConfig, - quorum_membership: T::Membership, + membership: Arc>, gossip_config: GossipConfig, request_response_config: RequestResponseConfig, bind_address: Multiaddr, @@ -421,7 +421,7 @@ impl Libp2pNetwork { // Set the auth message and stake table config_builder - .stake_table(Some(quorum_membership)) + .stake_table(Some(membership)) .auth_message(Some(auth_message)); // The replication factor is the minimum of [the default and 2/3 the number of nodes] @@ -978,13 +978,18 @@ impl ConnectedNetwork for Libp2pNetwork { /// So the logic with libp2p is to prefetch upcoming leaders libp2p address to /// save time when we later need to direct message the leader our vote. Hence the /// use of the future view and leader to queue the lookups. - async fn update_view<'a, TYPES>(&'a self, view: u64, epoch: u64, membership: &TYPES::Membership) - where + async fn update_view<'a, TYPES>( + &'a self, + view: u64, + epoch: u64, + membership: Arc>, + ) where TYPES: NodeType + 'a, { let future_view = ::View::new(view) + LOOK_AHEAD; let epoch = ::Epoch::new(epoch); - let future_leader = match membership.leader(future_view, epoch) { + + let future_leader = match membership.read().await.leader(future_view, epoch) { Ok(l) => l, Err(e) => { return tracing::info!( diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index e3024e876b..556ff0e1c5 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -69,7 +69,7 @@ pub struct SystemContextHandle, V: pub network: Arc, /// Memberships used by consensus - pub memberships: Arc, + pub memberships: Arc>, /// Number of blocks in an epoch, zero means there are no epochs pub epoch_height: u64, @@ -156,7 +156,7 @@ impl + 'static, V: Versions> signed_proposal_request.commit().as_ref(), )?; - let mem = (*self.memberships).clone(); + let mem = Arc::clone(&self.memberships); let receiver = self.internal_event_stream.1.activate_cloned(); let sender = self.internal_event_stream.0.clone(); let epoch_height = self.epoch_height; @@ -187,10 +187,13 @@ impl + 'static, V: Versions> if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() { // Make sure that the quorum_proposal is valid - if let Err(err) = quorum_proposal.validate_signature(&mem, epoch_height) { + let mem_reader = mem.read().await; + if let Err(err) = quorum_proposal.validate_signature(&mem_reader, epoch_height) + { tracing::warn!("Invalid Proposal Received after Request. Err {:?}", err); continue; } + drop(mem_reader); let proposed_leaf = Leaf2::from_quorum_proposal(&quorum_proposal.data); let commit = proposed_leaf.commit(); if commit == leaf_commitment { @@ -326,6 +329,8 @@ impl + 'static, V: Versions> ) -> Result { self.hotshot .memberships + .read() + .await .leader(view_number, epoch_number) .context("Failed to lookup leader") } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 98f1449508..83856af400 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -15,6 +15,7 @@ hotshot-example-types = { path = "../example-types" } [dependencies] anyhow = { workspace = true } +async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } blake3 = { workspace = true } diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index 21a2811bb8..eeb654997e 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -16,8 +16,9 @@ pub mod transport; /// Forked `cbor` codec with altered request/response sizes pub mod cbor; -use std::{collections::HashSet, fmt::Debug}; +use std::{collections::HashSet, fmt::Debug, sync::Arc}; +use async_lock::RwLock; use futures::channel::oneshot::Sender; use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType}; use libp2p::{ @@ -159,7 +160,7 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; #[instrument(skip(identity))] pub async fn gen_transport( identity: Keypair, - stake_table: Option, + stake_table: Option>>, auth_message: Option>, ) -> Result { // Create the initial `Quic` transport diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 1f5422e321..3958d17f6d 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -4,8 +4,9 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashSet, num::NonZeroUsize, time::Duration}; +use std::{collections::HashSet, num::NonZeroUsize, sync::Arc, time::Duration}; +use async_lock::RwLock; use hotshot_types::traits::node_implementation::NodeType; use libp2p::{identity::Keypair, Multiaddr}; use libp2p_identity::PeerId; @@ -16,15 +17,17 @@ use super::MAX_GOSSIP_MSG_SIZE; pub const DEFAULT_REPLICATION_FACTOR: Option = NonZeroUsize::new(10); /// describe the configuration of the network -#[derive(Clone, Default, derive_builder::Builder, derive_more::Debug)] +#[derive(Default, derive_builder::Builder, derive_more::Debug)] pub struct NetworkNodeConfig { /// The keypair for the node #[builder(setter(into, strip_option), default)] #[debug(skip)] pub keypair: Option, + /// The address to bind to #[builder(default)] pub bind_address: Option, + /// Replication factor for entries in the DHT #[builder(setter(into, strip_option), default = "DEFAULT_REPLICATION_FACTOR")] pub replication_factor: Option, @@ -39,9 +42,11 @@ pub struct NetworkNodeConfig { /// list of addresses to connect to at initialization pub to_connect_addrs: HashSet<(PeerId, Multiaddr)>, + /// republication interval in DHT, must be much less than `ttl` #[builder(default)] pub republication_interval: Option, + /// expiratiry for records in DHT #[builder(default)] pub ttl: Option, @@ -49,7 +54,7 @@ pub struct NetworkNodeConfig { /// The stake table. Used for authenticating other nodes. If not supplied /// we will not check other nodes against the stake table #[builder(default)] - pub stake_table: Option, + pub stake_table: Option>>, /// The path to the file to save the DHT to #[builder(default)] @@ -65,6 +70,25 @@ pub struct NetworkNodeConfig { pub dht_timeout: Option, } +impl Clone for NetworkNodeConfig { + fn clone(&self) -> Self { + Self { + keypair: self.keypair.clone(), + bind_address: self.bind_address.clone(), + replication_factor: self.replication_factor, + gossip_config: self.gossip_config.clone(), + request_response_config: self.request_response_config.clone(), + to_connect_addrs: self.to_connect_addrs.clone(), + republication_interval: self.republication_interval, + ttl: self.ttl, + stake_table: self.stake_table.as_ref().map(Arc::clone), + dht_file_path: self.dht_file_path.clone(), + auth_message: self.auth_message.clone(), + dht_timeout: self.dht_timeout, + } + } +} + /// Configuration for Libp2p's Gossipsub #[derive(Clone, Debug)] #[allow(missing_docs)] diff --git a/libp2p-networking/src/network/transport.rs b/libp2p-networking/src/network/transport.rs index aac8a66b60..458b7f032b 100644 --- a/libp2p-networking/src/network/transport.rs +++ b/libp2p-networking/src/network/transport.rs @@ -7,6 +7,7 @@ use std::{ }; use anyhow::{ensure, Context, Result as AnyhowResult}; +use async_lock::RwLock; use futures::{future::poll_fn, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use hotshot_types::traits::{ election::Membership, @@ -46,7 +47,7 @@ pub struct StakeTableAuthentication>, + pub stake_table: Arc>>>, /// A pre-signed message that we send to the remote peer for authentication pub auth_message: Arc>>, @@ -64,7 +65,7 @@ impl StakeTableAuthentica /// and authenticates connections against the stake table. pub fn new( inner: T, - stake_table: Option, + stake_table: Option>>, auth_message: Option>, ) -> Self { Self { @@ -109,7 +110,7 @@ impl StakeTableAuthentica /// - The signature is invalid pub async fn verify_peer_authentication( stream: &mut R, - stake_table: Arc>, + stake_table: Arc>>>, required_peer_id: &PeerId, ) -> AnyhowResult<()> { // If we have a stake table, check if the remote peer is in it @@ -136,7 +137,11 @@ impl StakeTableAuthentica } // Check if the public key is in the stake table - if !stake_table.has_stake(&public_key, Types::Epoch::new(0)) { + if !stake_table + .read() + .await + .has_stake(&public_key, Types::Epoch::new(0)) + { return Err(anyhow::anyhow!("Peer not in stake table")); } } @@ -151,7 +156,7 @@ impl StakeTableAuthentica fn gen_handshake> + Send + 'static>( original_future: F, outgoing: bool, - stake_table: Arc>, + stake_table: Arc>>>, auth_message: Arc>>, ) -> UpgradeFuture where @@ -624,7 +629,7 @@ mod test { // Verify the authentication message let result = MockStakeTableAuth::verify_peer_authentication( &mut stream, - Arc::new(Some(stake_table)), + Arc::new(Some(Arc::new(RwLock::new(stake_table)))), &peer_id, ) .await; @@ -644,7 +649,10 @@ mod test { let mut stream = cursor_from!(auth_message); // Create an empty stake table - let stake_table = ::Membership::new(vec![], vec![]); + let stake_table = Arc::new(RwLock::new(::Membership::new( + vec![], + vec![], + ))); // Verify the authentication message let result = MockStakeTableAuth::verify_peer_authentication( @@ -680,8 +688,10 @@ mod test { stake_table_entry: keypair.0.stake_table_entry(1), state_ver_key: StateVerKey::default(), }; - let stake_table = - ::Membership::new(vec![peer_config.clone()], vec![peer_config]); + let stake_table = Arc::new(RwLock::new(::Membership::new( + vec![peer_config.clone()], + vec![peer_config], + ))); // Check against the malicious peer ID let result = MockStakeTableAuth::verify_peer_authentication( diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 3a9f44c34f..437e066ea0 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -47,6 +47,8 @@ pub(crate) async fn handle_quorum_vote_recv< .is_high_qc_for_last_block(); let we_are_leader = task_state .membership + .read() + .await .leader(vote.view_number() + 1, vote.data.epoch)? == task_state.public_key; ensure!( @@ -77,10 +79,12 @@ pub(crate) async fn handle_quorum_vote_recv< .await?; // If the vote sender belongs to the next epoch, collect it separately to form the second QC - if task_state + let has_stake = task_state .membership - .has_stake(&vote.signing_key(), vote.epoch() + 1) - { + .read() + .await + .has_stake(&vote.signing_key(), vote.epoch() + 1); + if has_stake { handle_vote( &mut task_state.next_epoch_vote_collectors, &vote.clone().into(), @@ -114,6 +118,8 @@ pub(crate) async fn handle_timeout_vote_recv< ensure!( task_state .membership + .read() + .await .leader(vote.view_number() + 1, task_state.cur_epoch)? == task_state.public_key, info!( @@ -158,6 +164,8 @@ pub async fn send_high_qc ensure!( task_state .membership + .read() + .await .has_stake(&task_state.public_key, epoch), debug!( "We were not chosen for the consensus committee for view {:?}", @@ -365,25 +377,16 @@ pub(crate) async fn handle_timeout ) .await; - task_state - .consensus + let leader = task_state + .membership .read() .await - .metrics - .number_of_timeouts - .add(1); - if task_state - .membership - .leader(view_number, task_state.cur_epoch)? - == task_state.public_key - { - task_state - .consensus - .read() - .await - .metrics - .number_of_timeouts_as_leader - .add(1); + .leader(view_number, task_state.cur_epoch); + + let consensus_reader = task_state.consensus.read().await; + consensus_reader.metrics.number_of_timeouts.add(1); + if leader? == task_state.public_key { + consensus_reader.metrics.number_of_timeouts_as_leader.add(1); } Ok(()) diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index e6897a2fe5..fe25a9ec2e 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -7,6 +7,7 @@ use std::sync::Arc; use async_broadcast::{Receiver, Sender}; +use async_lock::RwLock; use async_trait::async_trait; use either::Either; use hotshot_task::task::TaskState; @@ -50,7 +51,7 @@ pub struct ConsensusTaskState, V: pub network: Arc, /// Membership for Quorum Certs/votes - pub membership: Arc, + pub membership: Arc>, /// A map of `QuorumVote` collector tasks. pub vote_collectors: VoteCollectorsMap, QuorumCertificate2, V>, @@ -97,6 +98,7 @@ pub struct ConsensusTaskState, V: /// Number of blocks in an epoch, zero means there are no epochs pub epoch_height: u64, } + impl, V: Versions> ConsensusTaskState { /// Handles a consensus event received on the event stream #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, cur_epoch = *self.cur_epoch), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index b292814e24..c4c7f0ab46 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -56,7 +56,7 @@ pub struct DaTaskState, V: Version /// Membership for the DA committee and quorum committee. /// We need the latter only for calculating the proper VID scheme /// from the number of nodes in the quorum. - pub membership: Arc, + pub membership: Arc>, /// The underlying network pub network: Arc, @@ -115,7 +115,11 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState( view_number: TYPES::View, event_sender: Sender>>, event_receiver: Receiver>>, - quorum_membership: Arc, + membership: Arc>, consensus: OuterConsensus, sender_public_key: TYPES::SignatureKey, sender_private_key: ::PrivateKey, @@ -73,7 +73,7 @@ pub(crate) async fn fetch_proposal( ) .await; - let mem = Arc::clone(&quorum_membership); + let mem = Arc::clone(&membership); // Make a background task to await the arrival of the event data. let Ok(Some(proposal)) = // We want to explicitly timeout here so we aren't waiting around for the data. @@ -105,7 +105,8 @@ pub(crate) async fn fetch_proposal( hs_event.as_ref() { // Make sure that the quorum_proposal is valid - if quorum_proposal.validate_signature(&mem, epoch_height).is_ok() { + let mem_reader = mem.read().await; + if quorum_proposal.validate_signature(&mem_reader, epoch_height).is_ok() { proposal = Some(quorum_proposal.clone()); } @@ -126,10 +127,16 @@ pub(crate) async fn fetch_proposal( let justify_qc = proposal.data.justify_qc.clone(); let justify_qc_epoch = justify_qc.data.epoch(); + + let membership_reader = membership.read().await; + let membership_stake_table = membership_reader.stake_table(justify_qc_epoch); + let membership_success_threshold = membership_reader.success_threshold(justify_qc_epoch); + drop(membership_reader); + if !justify_qc .is_valid_cert( - quorum_membership.stake_table(justify_qc_epoch), - quorum_membership.success_threshold(justify_qc_epoch), + membership_stake_table, + membership_success_threshold, upgrade_lock, ) .await @@ -430,7 +437,7 @@ pub async fn decide_from_proposal( pub(crate) async fn parent_leaf_and_state( event_sender: &Sender>>, event_receiver: &Receiver>>, - quorum_membership: Arc, + membership: Arc>, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, consensus: OuterConsensus, @@ -449,7 +456,7 @@ pub(crate) async fn parent_leaf_and_state( parent_view_number, event_sender.clone(), event_receiver.clone(), - quorum_membership, + membership, consensus.clone(), public_key.clone(), private_key.clone(), @@ -535,7 +542,7 @@ pub async fn validate_proposal_safety_and_liveness< UpgradeCertificate::validate( &proposal.data.upgrade_certificate, - &validation_info.quorum_membership, + &validation_info.membership, TYPES::Epoch::new(proposal_epoch), &validation_info.upgrade_lock, ) @@ -660,10 +667,9 @@ pub(crate) async fn validate_proposal_view_and_certs< ); // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - proposal.validate_signature( - &validation_info.quorum_membership, - validation_info.epoch_height, - )?; + let membership_reader = validation_info.membership.read().await; + proposal.validate_signature(&membership_reader, validation_info.epoch_height)?; + drop(membership_reader); // Verify a timeout certificate OR a view sync certificate exists and is valid. if proposal.data.justify_qc.view_number() != view_number - 1 { @@ -681,15 +687,18 @@ pub(crate) async fn validate_proposal_view_and_certs< *view_number ); let timeout_cert_epoch = timeout_cert.data().epoch(); + + let membership_reader = validation_info.membership.read().await; + let membership_stake_table = membership_reader.stake_table(timeout_cert_epoch); + let membership_success_threshold = + membership_reader.success_threshold(timeout_cert_epoch); + drop(membership_reader); + ensure!( timeout_cert .is_valid_cert( - validation_info - .quorum_membership - .stake_table(timeout_cert_epoch), - validation_info - .quorum_membership - .success_threshold(timeout_cert_epoch), + membership_stake_table, + membership_success_threshold, &validation_info.upgrade_lock ) .await, @@ -706,16 +715,19 @@ pub(crate) async fn validate_proposal_view_and_certs< ); let view_sync_cert_epoch = view_sync_cert.data().epoch(); + + let membership_reader = validation_info.membership.read().await; + let membership_stake_table = membership_reader.stake_table(view_sync_cert_epoch); + let membership_success_threshold = + membership_reader.success_threshold(view_sync_cert_epoch); + drop(membership_reader); + // View sync certs must also be valid. ensure!( view_sync_cert .is_valid_cert( - validation_info - .quorum_membership - .stake_table(view_sync_cert_epoch), - validation_info - .quorum_membership - .success_threshold(view_sync_cert_epoch), + membership_stake_table, + membership_success_threshold, &validation_info.upgrade_lock ) .await, @@ -734,7 +746,7 @@ pub(crate) async fn validate_proposal_view_and_certs< )); UpgradeCertificate::validate( &proposal.data.upgrade_certificate, - &validation_info.quorum_membership, + &validation_info.membership, epoch, &validation_info.upgrade_lock, ) diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a72344dcc6..42b2235dd3 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -10,18 +10,13 @@ use std::{ sync::Arc, }; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, -}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; -use hotshot_types::data::{VidDisperseShare, VidDisperseShare2}; use hotshot_types::{ consensus::OuterConsensus, - data::VidDisperse, + data::{VidDisperse, VidDisperseShare, VidDisperseShare2}, event::{Event, EventType, HotShotAction}, message::{ convert_proposal, DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, @@ -44,6 +39,11 @@ use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; + /// the network message task state #[derive(Clone)] pub struct NetworkMessageTaskState { @@ -255,18 +255,25 @@ pub struct NetworkEventTaskState< > { /// comm network pub network: Arc, + /// view number pub view: TYPES::View, + /// epoch number pub epoch: TYPES::Epoch, + /// network memberships - pub membership: TYPES::Membership, + pub membership: Arc>, + /// Storage to store actionable events pub storage: Arc>, + /// Shared consensus state pub consensus: OuterConsensus, + /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + /// map view number to transmit tasks pub transmit_tasks: BTreeMap>>, } @@ -311,7 +318,8 @@ impl< if let Some((sender, message_kind, transmit)) = self.parse_event(event, &mut maybe_action).await { - self.spawn_transmit_task(message_kind, maybe_action, transmit, sender); + self.spawn_transmit_task(message_kind, maybe_action, transmit, sender) + .await; }; } @@ -469,7 +477,12 @@ impl< HotShotEvent::QuorumVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; - let leader = match self.membership.leader(view_number, vote.epoch()) { + let leader = match self + .membership + .read() + .await + .leader(view_number, vote.epoch()) + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -574,7 +587,7 @@ impl< *maybe_action = Some(HotShotAction::DaVote); let view_number = vote.view_number(); let epoch = vote.data.epoch; - let leader = match self.membership.leader(view_number, epoch) { + let leader = match self.membership.read().await.leader(view_number, epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -621,7 +634,7 @@ impl< } HotShotEvent::ViewSyncPreCommitVoteSend(vote) => { let view_number = vote.view_number() + vote.date().relay; - let leader = match self.membership.leader(view_number, self.epoch) { + let leader = match self.membership.read().await.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -652,7 +665,7 @@ impl< HotShotEvent::ViewSyncCommitVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; - let leader = match self.membership.leader(view_number, self.epoch) { + let leader = match self.membership.read().await.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -683,7 +696,7 @@ impl< HotShotEvent::ViewSyncFinalizeVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; - let leader = match self.membership.leader(view_number, self.epoch) { + let leader = match self.membership.read().await.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -762,7 +775,7 @@ impl< HotShotEvent::TimeoutVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; - let leader = match self.membership.leader(view_number, self.epoch) { + let leader = match self.membership.read().await.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -800,7 +813,7 @@ impl< HotShotEvent::UpgradeVoteSend(vote) => { tracing::error!("Sending upgrade vote!"); let view_number = vote.view_number(); - let leader = match self.membership.leader(view_number, self.epoch) { + let leader = match self.membership.read().await.leader(view_number, self.epoch) { Ok(l) => l, Err(e) => { tracing::warn!( @@ -828,9 +841,9 @@ impl< self.cancel_tasks(keep_view); let net = Arc::clone(&self.network); let epoch = self.epoch.u64(); - let mem = self.membership.clone(); + let mem = Arc::clone(&self.membership); spawn(async move { - net.update_view::(*keep_view, epoch, &mem).await; + net.update_view::(*keep_view, epoch, mem).await; }); None } @@ -870,7 +883,7 @@ impl< } /// Creates a network message and spawns a task that transmits it on the wire. - fn spawn_transmit_task( + async fn spawn_transmit_task( &mut self, message_kind: MessageKind, maybe_action: Option, @@ -892,6 +905,8 @@ impl< let committee_topic = Topic::Global; let da_committee = self .membership + .read() + .await .da_committee_members(view_number, self.epoch); let network = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); @@ -1013,13 +1028,17 @@ pub mod test { self.parse_event(event, &mut maybe_action).await { // Modify the values acquired by parsing the event. + let membership_reader = self.membership.read().await; (self.modifier)( &mut sender, &mut message_kind, &mut transmit, - &self.membership, + &membership_reader, ); - self.spawn_transmit_task(message_kind, maybe_action, transmit, sender); + drop(membership_reader); + + self.spawn_transmit_task(message_kind, maybe_action, transmit, sender) + .await; } } } diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 7dbedb9107..b9d696f01b 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -87,7 +87,7 @@ pub struct ProposalDependencyHandle { pub instance_state: Arc, /// Membership for Quorum Certs/votes - pub quorum_membership: Arc, + pub membership: Arc>, /// Our public key pub public_key: TYPES::SignatureKey, @@ -100,6 +100,7 @@ pub struct ProposalDependencyHandle { /// View timeout from config. pub timeout: u64, + /// The most recent upgrade certificate this node formed. /// Note: this is ONLY for certificates that have been formed internally, /// so that we can propose with them. @@ -132,10 +133,16 @@ impl ProposalDependencyHandle { ) -> Option> { while let Ok(event) = rx.recv_direct().await { if let HotShotEvent::HighQcRecv(qc, _sender) = event.as_ref() { + let membership_reader = self.membership.read().await; + let membership_stake_table = membership_reader.stake_table(qc.data.epoch); + let membership_success_threshold = + membership_reader.success_threshold(qc.data.epoch); + drop(membership_reader); + if qc .is_valid_cert( - self.quorum_membership.stake_table(qc.data.epoch), - self.quorum_membership.success_threshold(qc.data.epoch), + membership_stake_table, + membership_success_threshold, &self.upgrade_lock, ) .await @@ -267,7 +274,7 @@ impl ProposalDependencyHandle { let (parent_leaf, state) = parent_leaf_and_state( &self.sender, &self.receiver, - Arc::clone(&self.quorum_membership), + Arc::clone(&self.membership), self.public_key.clone(), self.private_key.clone(), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), @@ -370,7 +377,13 @@ impl ProposalDependencyHandle { )); // Make sure we are the leader for the view and epoch. // We might have ended up here because we were in the epoch transition. - if self.quorum_membership.leader(self.view_number, epoch)? != self.public_key { + if self + .membership + .read() + .await + .leader(self.view_number, epoch)? + != self.public_key + { tracing::debug!( "We are not the leader in the epoch for which we are about to propose. Do not send the quorum proposal." ); diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 77bfef856c..9eb75188a3 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -52,7 +52,7 @@ pub struct QuorumProposalTaskState pub instance_state: Arc, /// Membership for Quorum Certs/votes - pub quorum_membership: Arc, + pub membership: Arc>, /// Our public key pub public_key: TYPES::SignatureKey, @@ -271,7 +271,7 @@ impl, V: Versions> /// without losing the data that it received, as the dependency task would otherwise have no /// ability to receive the event and, thus, would never propose. #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Create dependency task", level = "error")] - fn create_dependency_task_if_new( + async fn create_dependency_task_if_new( &mut self, view_number: TYPES::View, epoch_number: TYPES::Epoch, @@ -280,17 +280,18 @@ impl, V: Versions> event: Arc>, epoch_transition_indicator: EpochTransitionIndicator, ) -> Result<()> { + let membership_reader = self.membership.read().await; let leader_in_current_epoch = - self.quorum_membership.leader(view_number, epoch_number)? == self.public_key; + membership_reader.leader(view_number, epoch_number)? == self.public_key; // If we are in the epoch transition and we are the leader in the next epoch, // we might want to start collecting dependencies for our next epoch proposal. let leader_in_next_epoch = matches!( epoch_transition_indicator, EpochTransitionIndicator::InTransition - ) && self - .quorum_membership - .leader(view_number, epoch_number + 1)? + ) && membership_reader.leader(view_number, epoch_number + 1)? == self.public_key; + drop(membership_reader); + // Don't even bother making the task if we are not entitled to propose anyway. ensure!( leader_in_current_epoch || leader_in_next_epoch, @@ -322,7 +323,7 @@ impl, V: Versions> view_number, sender: event_sender, receiver: event_receiver, - quorum_membership: Arc::clone(&self.quorum_membership), + membership: Arc::clone(&self.membership), public_key: self.public_key.clone(), private_key: self.private_key.clone(), instance_state: Arc::clone(&self.instance_state), @@ -405,7 +406,8 @@ impl, V: Versions> event_sender, Arc::clone(&event), epoch_transition_indicator, - )?; + ) + .await?; } either::Left(qc) => { // Only update if the qc is from a newer view @@ -439,7 +441,8 @@ impl, V: Versions> event_sender, Arc::clone(&event), epoch_transition_indicator, - )?; + ) + .await?; } }, HotShotEvent::SendPayloadCommitmentAndMetadata( @@ -459,16 +462,23 @@ impl, V: Versions> event_sender, Arc::clone(&event), EpochTransitionIndicator::NotInTransition, - )?; + ) + .await?; } HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { let epoch_number = certificate.data.epoch; + let membership_reader = self.membership.read().await; + let membership_stake_table = membership_reader.stake_table(epoch_number); + let membership_success_threshold = + membership_reader.success_threshold(epoch_number); + drop(membership_reader); + ensure!( certificate .is_valid_cert( - self.quorum_membership.stake_table(epoch_number), - self.quorum_membership.success_threshold(epoch_number), + membership_stake_table, + membership_success_threshold, &self.upgrade_lock ) .await, @@ -487,7 +497,8 @@ impl, V: Versions> event_sender, event, EpochTransitionIndicator::NotInTransition, - )?; + ) + .await?; } HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { let view_number = proposal.data.view_number(); @@ -503,7 +514,8 @@ impl, V: Versions> event_sender, Arc::clone(&event), epoch_transition_indicator, - )?; + ) + .await?; } HotShotEvent::QuorumProposalSend(proposal, _) => { let view = proposal.data.view_number(); @@ -522,7 +534,8 @@ impl, V: Versions> event_sender, Arc::clone(&event), EpochTransitionIndicator::NotInTransition, - )?; + ) + .await?; } HotShotEvent::ViewChange(view, epoch) => { if epoch > &self.cur_epoch { @@ -538,10 +551,17 @@ impl, V: Versions> HotShotEvent::HighQcSend(qc, ..) => { ensure!(qc.view_number() > self.highest_qc.view_number()); let cert_epoch_number = qc.data.epoch; + + let membership_reader = self.membership.read().await; + let membership_stake_table = membership_reader.stake_table(cert_epoch_number); + let membership_success_threshold = + membership_reader.success_threshold(cert_epoch_number); + drop(membership_reader); + ensure!( qc.is_valid_cert( - self.quorum_membership.stake_table(cert_epoch_number), - self.quorum_membership.success_threshold(cert_epoch_number), + membership_stake_table, + membership_success_threshold, &self.upgrade_lock ) .await, diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index d6a768c2cd..b08064205c 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -9,7 +9,7 @@ use std::sync::Arc; use async_broadcast::{broadcast, Receiver, Sender}; -use async_lock::RwLockUpgradableReadGuard; +use async_lock::{RwLock, RwLockUpgradableReadGuard}; use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, @@ -100,7 +100,7 @@ fn spawn_fetch_proposal( view: TYPES::View, event_sender: Sender>>, event_receiver: Receiver>>, - membership: Arc, + membership: Arc>, consensus: OuterConsensus, sender_public_key: TYPES::SignatureKey, sender_private_key: ::PrivateKey, @@ -162,14 +162,15 @@ pub(crate) async fn handle_quorum_proposal_recv< validation_info.epoch_height, )); + let membership_reader = validation_info.membership.read().await; + let membership_stake_table = membership_reader.stake_table(justify_qc.data.epoch); + let membership_success_threshold = membership_reader.success_threshold(justify_qc.data.epoch); + drop(membership_reader); + if !justify_qc .is_valid_cert( - validation_info - .quorum_membership - .stake_table(justify_qc.data.epoch), - validation_info - .quorum_membership - .success_threshold(justify_qc.data.epoch), + membership_stake_table, + membership_success_threshold, &validation_info.upgrade_lock, ) .await @@ -187,15 +188,18 @@ pub(crate) async fn handle_quorum_proposal_recv< { bail!("Next epoch justify qc exists but it's not equal with justify qc."); } + + let membership_reader = validation_info.membership.read().await; + let membership_next_stake_table = membership_reader.stake_table(justify_qc.data.epoch + 1); + let membership_next_success_threshold = + membership_reader.success_threshold(justify_qc.data.epoch + 1); + drop(membership_reader); + // Validate the next epoch justify qc as well if !next_epoch_justify_qc .is_valid_cert( - validation_info - .quorum_membership - .stake_table(justify_qc.data.epoch + 1), - validation_info - .quorum_membership - .success_threshold(justify_qc.data.epoch + 1), + membership_next_stake_table, + membership_next_success_threshold, &validation_info.upgrade_lock, ) .await @@ -229,7 +233,7 @@ pub(crate) async fn handle_quorum_proposal_recv< justify_qc.view_number(), event_sender.clone(), event_receiver.clone(), - Arc::clone(&validation_info.quorum_membership), + Arc::clone(&validation_info.membership), OuterConsensus::new(Arc::clone(&validation_info.consensus.inner_consensus)), // Note that we explicitly use the node key here instead of the provided key in the signature. // This is because the key that we receive is for the prior leader, so the payload would be routed diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index a30e965d69..f6ed129a6e 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -58,7 +58,7 @@ pub struct QuorumProposalRecvTaskState, + pub membership: Arc>, /// View timeout from config. pub timeout: u64, @@ -88,20 +88,28 @@ pub struct QuorumProposalRecvTaskState, V: Versions> { /// The node's id pub id: u64, + /// Our public key pub(crate) public_key: TYPES::SignatureKey, + /// Our Private Key pub(crate) private_key: ::PrivateKey, + /// Reference to consensus. The replica will require a write lock on this. pub(crate) consensus: OuterConsensus, + /// Membership for Quorum Certs/votes - pub quorum_membership: Arc, + pub membership: Arc>, + /// Output events to application pub output_event_stream: async_broadcast::Sender>, + /// This node's storage ref pub(crate) storage: Arc>, + /// Lock for a decided upgrade pub(crate) upgrade_lock: UpgradeLock, + /// Number of blocks in an epoch, zero means there are no epochs pub epoch_height: u64, } @@ -142,7 +150,7 @@ impl, V: Versions> public_key: self.public_key.clone(), private_key: self.private_key.clone(), consensus: self.consensus.clone(), - quorum_membership: Arc::clone(&self.quorum_membership), + membership: Arc::clone(&self.membership), output_event_stream: self.output_event_stream.clone(), storage: Arc::clone(&self.storage), upgrade_lock: self.upgrade_lock.clone(), diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 577c5325e6..0c9cdbb666 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -59,6 +59,8 @@ async fn handle_quorum_proposal_validated_drb_calculation_start< // Start the new task if we're in the committee for this epoch if task_state .membership + .read() + .await .has_stake(&task_state.public_key, current_epoch_number) { task_state @@ -80,7 +82,7 @@ async fn handle_quorum_proposal_validated_drb_calculation_start< /// /// We don't need to handle the special cases explicitly here, because the first proposal /// with which we'll start the DRB computation is for epoch 3. -fn handle_quorum_proposal_validated_drb_calculation_seed< +async fn handle_quorum_proposal_validated_drb_calculation_seed< TYPES: NodeType, I: NodeImplementation, V: Versions, @@ -112,6 +114,8 @@ fn handle_quorum_proposal_validated_drb_calculation_seed< // Skip if we are not in the committee of the next epoch. if task_state .membership + .read() + .await .has_stake(&task_state.public_key, current_epoch_number + 1) { let new_epoch_number = current_epoch_number + 2; @@ -252,7 +256,8 @@ pub(crate) async fn handle_quorum_proposal_validated< proposal, task_state, &leaf_views, - )?; + ) + .await?; } } @@ -270,7 +275,7 @@ pub(crate) async fn update_shared_state< consensus: OuterConsensus, sender: Sender>>, receiver: InactiveReceiver>>, - quorum_membership: Arc, + membership: Arc>, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, upgrade_lock: UpgradeLock, @@ -309,7 +314,7 @@ pub(crate) async fn update_shared_state< justify_qc.view_number(), sender.clone(), receiver.activate_cloned(), - Arc::clone(&quorum_membership), + Arc::clone(&membership), OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), public_key.clone(), private_key.clone(), @@ -396,7 +401,7 @@ pub(crate) async fn update_shared_state< #[allow(clippy::too_many_arguments)] pub(crate) async fn submit_vote, V: Versions>( sender: Sender>>, - quorum_membership: Arc, + membership: Arc>, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, upgrade_lock: UpgradeLock, @@ -411,11 +416,13 @@ pub(crate) async fn submit_vote, V TYPES::EPOCH_HEIGHT, )); - let committee_member_in_current_epoch = quorum_membership.has_stake(&public_key, epoch_number); + let membership_reader = membership.read().await; + let committee_member_in_current_epoch = membership_reader.has_stake(&public_key, epoch_number); // If the proposed leaf is for the last block in the epoch and the node is part of the quorum committee // in the next epoch, the node should vote to achieve the double quorum. let committee_member_in_next_epoch = is_last_block_in_epoch(leaf.height(), TYPES::EPOCH_HEIGHT) - && quorum_membership.has_stake(&public_key, epoch_number + 1); + && membership_reader.has_stake(&public_key, epoch_number + 1); + drop(membership_reader); ensure!( committee_member_in_current_epoch || committee_member_in_next_epoch, diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index a009e6aee6..5ec3e54b1a 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -65,28 +65,40 @@ enum VoteDependency { pub struct VoteDependencyHandle, V: Versions> { /// Public key. pub public_key: TYPES::SignatureKey, + /// Private Key. pub private_key: ::PrivateKey, + /// Reference to consensus. The replica will require a write lock on this. pub consensus: OuterConsensus, + /// Immutable instance state pub instance_state: Arc, + /// Membership for Quorum certs/votes. - pub quorum_membership: Arc, + pub membership: Arc>, + /// Reference to the storage. pub storage: Arc>, + /// View number to vote on. pub view_number: TYPES::View, + /// Event sender. pub sender: Sender>>, + /// Event receiver. pub receiver: InactiveReceiver>>, + /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + /// The consensus metrics pub consensus_metrics: Arc, + /// The node's id pub id: u64, + /// Number of blocks in an epoch, zero means there are no epochs pub epoch_height: u64, } @@ -197,7 +209,7 @@ impl + 'static, V: Versions> Handl OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), self.sender.clone(), self.receiver.clone(), - Arc::clone(&self.quorum_membership), + Arc::clone(&self.membership), self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), @@ -233,7 +245,7 @@ impl + 'static, V: Versions> Handl if let Err(e) = submit_vote::( self.sender.clone(), - Arc::clone(&self.quorum_membership), + Arc::clone(&self.membership), self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), @@ -276,7 +288,7 @@ pub struct QuorumVoteTaskState, V: pub network: Arc, /// Membership for Quorum certs/votes and DA committee certs/votes. - pub membership: Arc, + pub membership: Arc>, /// Table for the in-progress DRB computation tasks. //pub drb_computations: BTreeMap>, @@ -393,7 +405,7 @@ impl, V: Versions> QuorumVoteTaskS private_key: self.private_key.clone(), consensus: OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), instance_state: Arc::clone(&self.instance_state), - quorum_membership: Arc::clone(&self.membership), + membership: Arc::clone(&self.membership), storage: Arc::clone(&self.storage), view_number, sender: event_sender.clone(), @@ -504,11 +516,17 @@ impl, V: Versions> QuorumVoteTaskS let cert_epoch = cert.data.epoch; + let membership_reader = self.membership.read().await; + let membership_da_stake_table = membership_reader.da_stake_table(cert_epoch); + let membership_da_success_threshold = + membership_reader.da_success_threshold(cert_epoch); + drop(membership_reader); + // Validate the DAC. ensure!( cert.is_valid_cert( - self.membership.da_stake_table(cert_epoch), - self.membership.da_success_threshold(cert_epoch), + membership_da_stake_table, + membership_da_success_threshold, &self.upgrade_lock ) .await, @@ -547,18 +565,22 @@ impl, V: Versions> QuorumVoteTaskS "VID share signature is invalid" ); + let membership_reader = self.membership.read().await; // ensure that the VID share was sent by a DA member OR the view leader ensure!( - self.membership + membership_reader .da_committee_members(view, disperse_epoch) .contains(sender) - || *sender == self.membership.leader(view, disperse_epoch)?, + || *sender == membership_reader.leader(view, disperse_epoch)?, "VID share was not sent by a DA member or the view leader." ); + let membership_total_nodes = membership_reader.total_nodes(disperse_epoch); + drop(membership_reader); + // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner // and outer results - match vid_scheme(self.membership.total_nodes(disperse_epoch)).verify_share( + match vid_scheme(membership_total_nodes).verify_share( &disperse.data.share, &disperse.data.common, payload_commitment, diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 3951102c24..5e1bdef390 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -14,6 +14,7 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; +use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::{ dependency::{Dependency, EventDependency}, @@ -54,23 +55,32 @@ pub struct NetworkRequestState> { /// Network to send requests over /// The underlying network pub network: Arc, + /// Consensus shared state so we can check if we've gotten the information /// before sending a request pub consensus: OuterConsensus, + /// Last seen view, we won't request for proposals before older than this view pub view: TYPES::View, + /// Delay before requesting peers pub delay: Duration, - /// Membership (Here containing only DA) - pub membership: TYPES::Membership, + + /// Membership (Used here only for DA) + pub membership: Arc>, + /// This nodes public key pub public_key: TYPES::SignatureKey, + /// This nodes private/signing key, used to sign requests. pub private_key: ::PrivateKey, + /// The node's id pub id: u64, + /// A flag indicating that `HotShotEvent::Shutdown` has been received pub shutdown_flag: Arc, + /// A flag indicating that `HotShotEvent::Shutdown` has been received pub spawned_tasks: BTreeMap>>, } @@ -113,7 +123,8 @@ impl> TaskState for NetworkRequest .vid_shares() .contains_key(&prop_view) { - self.spawn_requests(prop_view, prop_epoch, sender, receiver); + self.spawn_requests(prop_view, prop_epoch, sender, receiver) + .await; } Ok(()) } @@ -145,7 +156,7 @@ impl> TaskState for NetworkRequest impl> NetworkRequestState { /// Creates and signs the payload, then will create a request task - fn spawn_requests( + async fn spawn_requests( &mut self, view: TYPES::View, epoch: TYPES::Epoch, @@ -163,13 +174,14 @@ impl> NetworkRequestState, signature: Signature, @@ -185,17 +197,19 @@ impl> NetworkRequestState = self - .membership + let mut recipients: Vec = membership_reader .da_committee_members(view, epoch) .into_iter() .collect(); + drop(membership_reader); + // Randomize the recipients so all replicas don't overload the same 1 recipients // and so we don't implicitly rely on the same replica all the time. recipients.shuffle(&mut thread_rng()); diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 27983cdffa..86543a5db9 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -7,6 +7,7 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::{Receiver, Sender}; +use async_lock::RwLock; use committable::Committable; use hotshot_types::{ consensus::{Consensus, LockedConsensusState, OuterConsensus}, @@ -31,12 +32,16 @@ const TXNS_TIMEOUT: Duration = Duration::from_millis(100); pub struct NetworkResponseState { /// Locked consensus state consensus: LockedConsensusState, + /// Quorum membership for checking if requesters have state - quorum: Arc, + membership: Arc>, + /// This replicas public key pub_key: TYPES::SignatureKey, + /// This replicas private key private_key: ::PrivateKey, + /// The node's id id: u64, } @@ -45,14 +50,14 @@ impl NetworkResponseState { /// Create the network request state with the info it needs pub fn new( consensus: LockedConsensusState, - quorum: Arc, + membership: Arc>, pub_key: TYPES::SignatureKey, private_key: ::PrivateKey, id: u64, ) -> Self { Self { consensus, - quorum, + membership, pub_key, private_key, id, @@ -71,8 +76,9 @@ impl NetworkResponseState { // break loop when false, this means shutdown received match event.as_ref() { HotShotEvent::VidRequestRecv(request, sender) => { + let cur_epoch = self.consensus.read().await.cur_epoch(); // Verify request is valid - if !self.valid_sender(sender, self.consensus.read().await.cur_epoch()) + if !self.valid_sender(sender, cur_epoch).await || !valid_signature::(request, sender) { continue; @@ -99,17 +105,18 @@ impl NetworkResponseState { return; } - if let Some(quorum_proposal) = self + let quorum_proposal_result = self .consensus .read() .await .last_proposals() .get(&req.view_number) - { + .cloned(); + if let Some(quorum_proposal) = quorum_proposal_result { broadcast_event( HotShotEvent::QuorumProposalResponseSend( req.key.clone(), - quorum_proposal.clone(), + quorum_proposal, ) .into(), &event_sender, @@ -151,7 +158,7 @@ impl NetworkResponseState { if Consensus::calculate_and_update_vid( OuterConsensus::new(Arc::clone(&self.consensus)), view, - Arc::clone(&self.quorum), + Arc::clone(&self.membership), &self.private_key, ) .await @@ -162,7 +169,7 @@ impl NetworkResponseState { Consensus::calculate_and_update_vid( OuterConsensus::new(Arc::clone(&self.consensus)), view, - Arc::clone(&self.quorum), + Arc::clone(&self.membership), &self.private_key, ) .await?; @@ -178,8 +185,8 @@ impl NetworkResponseState { } /// Makes sure the sender is allowed to send a request in the given epoch. - fn valid_sender(&self, sender: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool { - self.quorum.has_stake(sender, epoch) + async fn valid_sender(&self, sender: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool { + self.membership.read().await.has_stake(sender, epoch) } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index ce01590734..9328572fd8 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -10,6 +10,7 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; +use async_lock::RwLock; use async_trait::async_trait; use futures::{future::join_all, stream::FuturesUnordered, StreamExt}; use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; @@ -94,7 +95,7 @@ pub struct TransactionTaskState, V pub consensus: OuterConsensus, /// Membership for the quorum - pub membership: Arc, + pub membership: Arc>, /// Builder 0.1 API clients pub builder_clients: Vec>, @@ -213,12 +214,10 @@ impl, V: Versions> TransactionTask .number_of_empty_blocks_proposed .add(1); - let membership_total_nodes = self.membership.total_nodes(self.cur_epoch); - let Some(null_fee) = null_block::builder_fee::( - self.membership.total_nodes(self.cur_epoch), - version, - *block_view, - ) else { + let membership_total_nodes = self.membership.read().await.total_nodes(self.cur_epoch); + let Some(null_fee) = + null_block::builder_fee::(membership_total_nodes, version, *block_view) + else { tracing::error!("Failed to get null fee"); return None; }; @@ -356,18 +355,16 @@ impl, V: Versions> TransactionTask } /// Produce a null block - pub fn null_block( + pub async fn null_block( &self, block_view: TYPES::View, block_epoch: TYPES::Epoch, version: Version, ) -> Option> { - let membership_total_nodes = self.membership.total_nodes(self.cur_epoch); - let Some(null_fee) = null_block::builder_fee::( - self.membership.total_nodes(self.cur_epoch), - version, - *block_view, - ) else { + let membership_total_nodes = self.membership.read().await.total_nodes(self.cur_epoch); + let Some(null_fee) = + null_block::builder_fee::(membership_total_nodes, version, *block_view) + else { tracing::error!("Failed to calculate null block fee."); return None; }; @@ -418,7 +415,7 @@ impl, V: Versions> TransactionTask e ); - let null_block = self.null_block(block_view, block_epoch, version)?; + let null_block = self.null_block(block_view, block_epoch, version).await?; // Increment the metric for number of empty blocks proposed self.consensus @@ -492,7 +489,8 @@ impl, V: Versions> TransactionTask self.cur_view = view; self.cur_epoch = *epoch; - if self.membership.leader(view, *epoch)? == self.public_key { + let leader = self.membership.read().await.leader(view, *epoch)?; + if leader == self.public_key { self.handle_view_change(&event_stream, view, *epoch).await; return Ok(()); } @@ -756,8 +754,9 @@ impl, V: Versions> TransactionTask // If epochs are supported, provide the latest `num_nodes` information to the // builder for VID computation. let (block, header_input) = if version >= V::Epochs::VERSION { + let total_nodes = self.membership.read().await.total_nodes(self.cur_epoch); futures::join! { - client.claim_block_with_num_nodes(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature, self.membership.total_nodes(self.cur_epoch)) , + client.claim_block_with_num_nodes(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature, total_nodes), client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) } } else { diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 16f8c7e555..3ff8e073a8 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -7,6 +7,7 @@ use std::{marker::PhantomData, sync::Arc, time::SystemTime}; use async_broadcast::{Receiver, Sender}; +use async_lock::RwLock; use async_trait::async_trait; use committable::Committable; use hotshot_task::task::TaskState; @@ -50,7 +51,7 @@ pub struct UpgradeTaskState { pub cur_epoch: TYPES::Epoch, /// Membership for Quorum Certs/votes - pub quorum_membership: Arc, + pub membership: Arc>, /// A map of `UpgradeVote` collector tasks pub vote_collectors: VoteCollectorsMap, UpgradeCertificate, V>, @@ -180,7 +181,7 @@ impl UpgradeTaskState { ); // We then validate that the proposal was issued by the leader for the view. - let view_leader_key = self.quorum_membership.leader(view, self.cur_epoch)?; + let view_leader_key = self.membership.read().await.leader(view, self.cur_epoch)?; ensure!( view_leader_key == *sender, info!( @@ -223,13 +224,13 @@ impl UpgradeTaskState { // Check if we are the leader. { let view = vote.view_number(); + let membership_reader = self.membership.read().await; ensure!( - self.quorum_membership.leader(view, self.cur_epoch)? == self.public_key, + membership_reader.leader(view, self.cur_epoch)? == self.public_key, debug!( "We are not the leader for view {} are we leader for next view? {}", *view, - self.quorum_membership.leader(view + 1, self.cur_epoch)? - == self.public_key + membership_reader.leader(view + 1, self.cur_epoch)? == self.public_key ) ); } @@ -238,7 +239,7 @@ impl UpgradeTaskState { &mut self.vote_collectors, vote, self.public_key.clone(), - &self.quorum_membership, + &self.membership, self.cur_epoch, self.id, &event, @@ -265,16 +266,18 @@ impl UpgradeTaskState { ))? .as_secs(); + let leader = self.membership.read().await.leader( + TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), + self.cur_epoch, + )?; + // We try to form a certificate 5 views before we're leader. if view >= self.start_proposing_view && view < self.stop_proposing_view && time >= self.start_proposing_time && time < self.stop_proposing_time && !self.upgraded().await - && self.quorum_membership.leader( - TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), - self.cur_epoch, - )? == self.public_key + && leader == self.public_key { let upgrade_proposal_data = UpgradeProposalData { old_version: V::Base::VERSION, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index b27038fabf..1f9b218aaf 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -7,6 +7,7 @@ use std::{marker::PhantomData, sync::Arc}; use async_broadcast::{Receiver, Sender}; +use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ @@ -45,7 +46,7 @@ pub struct VidTaskState> { pub network: Arc, /// Membership for the quorum - pub membership: Arc, + pub membership: Arc>, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -83,7 +84,14 @@ impl> VidTaskState { ::BlockPayload::from_bytes(encoded_transactions, metadata); let builder_commitment = payload.builder_commitment(metadata); let epoch = self.cur_epoch; - if self.membership.leader(*view_number, epoch).ok()? != self.public_key { + if self + .membership + .read() + .await + .leader(*view_number, epoch) + .ok()? + != self.public_key + { tracing::debug!( "We are not the leader in the current epoch. Do not send the VID dispersal." ); diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 65e1ad7d6b..4f40516b3e 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -74,7 +74,7 @@ pub struct ViewSyncTaskState { pub cur_epoch: TYPES::Epoch, /// Membership for the quorum - pub membership: Arc, + pub membership: Arc>, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -161,7 +161,7 @@ pub struct ViewSyncReplicaTaskState { pub id: u64, /// Membership for the quorum - pub membership: Arc, + pub membership: Arc>, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -311,7 +311,11 @@ impl ViewSyncTaskState { // We do not have a relay task already running, so start one ensure!( - self.membership.leader(vote_view + relay, self.cur_epoch)? == self.public_key, + self.membership + .read() + .await + .leader(vote_view + relay, self.cur_epoch)? + == self.public_key, "View sync vote sent to wrong leader" ); @@ -356,7 +360,11 @@ impl ViewSyncTaskState { // We do not have a relay task already running, so start one ensure!( - self.membership.leader(vote_view + relay, self.cur_epoch)? == self.public_key, + self.membership + .read() + .await + .leader(vote_view + relay, self.cur_epoch)? + == self.public_key, debug!("View sync vote sent to wrong leader") ); @@ -401,7 +409,11 @@ impl ViewSyncTaskState { // We do not have a relay task already running, so start one ensure!( - self.membership.leader(vote_view + relay, self.cur_epoch)? == self.public_key, + self.membership + .read() + .await + .leader(vote_view + relay, self.cur_epoch)? + == self.public_key, debug!("View sync vote sent to wrong leader") ); @@ -474,7 +486,11 @@ impl ViewSyncTaskState { ); self.num_timeouts_tracked += 1; - let leader = self.membership.leader(view_number, self.cur_epoch)?; + let leader = self + .membership + .read() + .await + .leader(view_number, self.cur_epoch)?; tracing::warn!( %leader, leader_mnemonic = hotshot_types::utils::mnemonic(&leader), @@ -532,11 +548,17 @@ impl ViewSyncReplicaTaskState { return None; } + let membership_reader = self.membership.read().await; + let membership_stake_table = membership_reader.stake_table(self.cur_epoch); + let membership_failure_threshold = + membership_reader.failure_threshold(self.cur_epoch); + drop(membership_reader); + // If certificate is not valid, return current state if !certificate .is_valid_cert( - self.membership.stake_table(self.cur_epoch), - self.membership.failure_threshold(self.cur_epoch), + membership_stake_table, + membership_failure_threshold, &self.upgrade_lock, ) .await @@ -616,11 +638,17 @@ impl ViewSyncReplicaTaskState { return None; } + let membership_reader = self.membership.read().await; + let membership_stake_table = membership_reader.stake_table(self.cur_epoch); + let membership_success_threshold = + membership_reader.success_threshold(self.cur_epoch); + drop(membership_reader); + // If certificate is not valid, return current state if !certificate .is_valid_cert( - self.membership.stake_table(self.cur_epoch), - self.membership.success_threshold(self.cur_epoch), + membership_stake_table, + membership_success_threshold, &self.upgrade_lock, ) .await @@ -711,11 +739,17 @@ impl ViewSyncReplicaTaskState { return None; } + let membership_reader = self.membership.read().await; + let membership_stake_table = membership_reader.stake_table(self.cur_epoch); + let membership_success_threshold = + membership_reader.success_threshold(self.cur_epoch); + drop(membership_reader); + // If certificate is not valid, return current state if !certificate .is_valid_cert( - self.membership.stake_table(self.cur_epoch), - self.membership.success_threshold(self.cur_epoch), + membership_stake_table, + membership_success_threshold, &self.upgrade_lock, ) .await diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index cc2ec6c7c9..902630794a 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -12,6 +12,7 @@ use std::{ }; use async_broadcast::Sender; +use async_lock::RwLock; use async_trait::async_trait; use either::Either::{self, Left, Right}; use hotshot_types::{ @@ -51,7 +52,7 @@ pub struct VoteCollectionTaskState< pub public_key: TYPES::SignatureKey, /// Membership for voting - pub membership: Arc, + pub membership: Arc>, /// accumulator handles aggregating the votes pub accumulator: Option>, @@ -113,7 +114,7 @@ impl< matches!( self.transition_indicator, EpochTransitionIndicator::InTransition - ) || vote.leader(&self.membership, self.epoch)? == self.public_key, + ) || vote.leader(&*self.membership.read().await, self.epoch)? == self.public_key, info!("Received vote for a view in which we were not the leader.") ); @@ -177,12 +178,16 @@ where pub struct AccumulatorInfo { /// This nodes Pub Key pub public_key: TYPES::SignatureKey, + /// Membership we are accumulation votes for - pub membership: Arc, + pub membership: Arc>, + /// View of the votes we are collecting pub view: TYPES::View, + /// Epoch of the votes we are collecting pub epoch: TYPES::Epoch, + /// This nodes id pub id: u64, } @@ -256,7 +261,7 @@ pub async fn handle_vote< collectors: &mut VoteCollectorsMap, vote: &VOTE, public_key: TYPES::SignatureKey, - membership: &Arc, + membership: &Arc>, epoch: TYPES::Epoch, id: u64, event: &Arc>, diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index 449629d5ca..3c0790b595 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -339,7 +339,7 @@ impl + std::fmt::Debug, V: Version &self, handle: &mut SystemContextHandle, network: Arc<>::Network>, - membership: TYPES::Membership, + membership: Arc>, ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 49aca83652..9ec7e68b93 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -8,6 +8,7 @@ use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; use async_broadcast::{Receiver, Sender}; +use async_lock::RwLock; use bitvec::bitvec; use committable::Committable; use hotshot::{ @@ -109,10 +110,10 @@ pub async fn build_system_handle_from_launcher< let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); - let memberships = TYPES::Membership::new( + let memberships = Arc::new(RwLock::new(TYPES::Membership::new( config.known_nodes_with_stake.clone(), config.known_da_nodes.clone(), - ); + ))); SystemContext::init( public_key, @@ -141,7 +142,7 @@ pub async fn build_cert< CERT: Certificate, >( data: DATAType, - da_membership: &TYPES::Membership, + membership: &Arc>, view: TYPES::View, epoch: TYPES::Epoch, public_key: &TYPES::SignatureKey, @@ -150,7 +151,7 @@ pub async fn build_cert< ) -> CERT { let real_qc_sig = build_assembled_sig::( &data, - da_membership, + membership, view, epoch, upgrade_lock, @@ -207,17 +208,20 @@ pub async fn build_assembled_sig< DATAType: Committable + Clone + Eq + Hash + Serialize + Debug + 'static, >( data: &DATAType, - membership: &TYPES::Membership, + membership: &Arc>, view: TYPES::View, epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> ::QcType { - let stake_table = CERT::stake_table(membership, epoch); + let membership_reader = membership.read().await; + let stake_table = CERT::stake_table(&*membership_reader, epoch); let real_qc_pp: ::QcParams = ::public_parameter( stake_table.clone(), - U256::from(CERT::threshold(membership, epoch)), + U256::from(CERT::threshold(&*membership_reader, epoch)), ); + drop(membership_reader); + let total_nodes = stake_table.len(); let signers = bitvec![1; total_nodes]; let mut sig_lists = Vec::new(); @@ -265,33 +269,34 @@ pub fn key_pair_for_id( /// # Panics /// if unable to create a [`VidSchemeType`] #[must_use] -pub fn vid_scheme_from_view_number( - membership: &TYPES::Membership, +pub async fn vid_scheme_from_view_number( + membership: &Arc>, view_number: TYPES::View, epoch_number: TYPES::Epoch, ) -> VidSchemeType { let num_storage_nodes = membership + .read() + .await .committee_members(view_number, epoch_number) .len(); vid_scheme(num_storage_nodes) } -pub fn vid_payload_commitment( - quorum_membership: &::Membership, +pub async fn vid_payload_commitment( + membership: &Arc::Membership>>, view_number: TYPES::View, epoch_number: TYPES::Epoch, transactions: Vec, ) -> VidCommitment { - let mut vid = - vid_scheme_from_view_number::(quorum_membership, view_number, epoch_number); + let mut vid = vid_scheme_from_view_number::(membership, view_number, epoch_number).await; let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); vid_disperse.commit } -pub fn da_payload_commitment( - quorum_membership: &::Membership, +pub async fn da_payload_commitment( + membership: &Arc::Membership>>, transactions: Vec, epoch_number: TYPES::Epoch, ) -> VidCommitment { @@ -299,41 +304,41 @@ pub fn da_payload_commitment( vid_commitment( &encoded_transactions, - quorum_membership.total_nodes(epoch_number), + membership.read().await.total_nodes(epoch_number), ) } -pub fn build_payload_commitment( - membership: &::Membership, +pub async fn build_payload_commitment( + membership: &Arc::Membership>>, view: TYPES::View, epoch: TYPES::Epoch, ) -> ::Commit { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. - let mut vid = vid_scheme_from_view_number::(membership, view, epoch); + let mut vid = vid_scheme_from_view_number::(membership, view, epoch).await; let encoded_transactions = Vec::new(); vid.commit_only(&encoded_transactions).unwrap() } /// TODO: -pub fn build_vid_proposal( - quorum_membership: &::Membership, +pub async fn build_vid_proposal( + membership: &Arc::Membership>>, view_number: TYPES::View, epoch_number: TYPES::Epoch, transactions: Vec, private_key: &::PrivateKey, ) -> VidProposal { - let mut vid = - vid_scheme_from_view_number::(quorum_membership, view_number, epoch_number); + let mut vid = vid_scheme_from_view_number::(membership, view_number, epoch_number).await; let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = VidDisperse::from_membership( view_number, vid.disperse(&encoded_transactions).unwrap(), - quorum_membership, + membership, epoch_number, None, - ); + ) + .await; let signature = TYPES::SignatureKey::sign(private_key, vid_disperse.payload_commitment.as_ref()) @@ -359,7 +364,7 @@ pub fn build_vid_proposal( #[allow(clippy::too_many_arguments)] pub async fn build_da_certificate( - membership: &::Membership, + membership: &Arc::Membership>>, view_number: TYPES::View, epoch_number: TYPES::Epoch, transactions: Vec, @@ -369,8 +374,10 @@ pub async fn build_da_certificate( ) -> DaCertificate2 { let encoded_transactions = TestTransaction::encode(&transactions); - let da_payload_commitment = - vid_commitment(&encoded_transactions, membership.total_nodes(epoch_number)); + let da_payload_commitment = vid_commitment( + &encoded_transactions, + membership.read().await.total_nodes(epoch_number), + ); let da_data = DaData2 { payload_commit: da_payload_commitment, diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 042bc7f9b7..edabe71f90 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -192,27 +192,23 @@ impl, V: Versions> TestTas } let epoch = TYPES::Epoch::new(self.ctx.latest_epoch); - let len = self - .handles - .read() - .await - .first() - .unwrap() - .handle - .memberships - .total_nodes(epoch); + let memberships_arc = Arc::clone( + &self + .handles + .read() + .await + .first() + .unwrap() + .handle + .memberships, + ); + let memberships_reader = memberships_arc.read().await; + let len = memberships_reader.total_nodes(epoch); // update view count - let threshold = self - .handles - .read() - .await - .first() - .unwrap() - .handle - .memberships - .success_threshold(epoch) - .get() as usize; + let threshold = memberships_reader.success_threshold(epoch).get() as usize; + drop(memberships_reader); + drop(memberships_arc); let view = self.ctx.round_results.get_mut(&view_number).unwrap(); if let Some(key) = key { diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index e9dd819802..66438d701d 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -232,7 +232,7 @@ where }; let storage = node.handle.storage().clone(); - let memberships = node.handle.memberships.clone(); + let memberships = Arc::clone(&node.handle.memberships); let config = node.handle.hotshot.config.clone(); let marketplace_config = node.handle.hotshot.marketplace_config.clone(); @@ -270,7 +270,7 @@ where TestRunner::::add_node_with_config_and_channels( node_id, generated_network.clone(), - (*memberships).clone(), + memberships, initializer, config, validator_config, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 5bd6f38d79..6a2d981cba 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -7,6 +7,7 @@ use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; use anyhow::{ensure, Result}; +use async_lock::RwLock; use hotshot::{ tasks::EventTransformerState, traits::{NetworkReliability, NodeImplementation, TestableNodeImplementation}, @@ -175,7 +176,7 @@ pub async fn create_test_handle< metadata: TestDescription, node_id: u64, network: Network, - memberships: TYPES::Membership, + memberships: Arc>, config: HotShotConfig, storage: I::Storage, marketplace_config: MarketplaceConfig, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index ec03e3770a..7d0c1cc6bb 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -425,10 +425,11 @@ where self.next_node_id += 1; tracing::debug!("launch node {}", i); - let memberships = ::Membership::new( - config.known_nodes_with_stake.clone(), - config.known_da_nodes.clone(), - ); + //let memberships =Arc::new(RwLock::new(::Membership::new( + //config.known_nodes_with_stake.clone(), + //config.known_da_nodes.clone(), + //))); + config.builder_urls = builder_urls .clone() .try_into() @@ -465,7 +466,10 @@ where context: LateNodeContext::UninitializedContext( LateNodeContextParameters { storage, - memberships, + memberships: ::Membership::new( + config.known_nodes_with_stake.clone(), + config.known_da_nodes.clone(), + ), config, marketplace_config, }, @@ -489,7 +493,10 @@ where let hotshot = Self::add_node_with_config( node_id, network.clone(), - memberships, + ::Membership::new( + config.known_nodes_with_stake.clone(), + config.known_da_nodes.clone(), + ), initializer, config, validator_config, @@ -509,7 +516,10 @@ where uninitialized_nodes.push(( node_id, network, - memberships, + ::Membership::new( + config.known_nodes_with_stake.clone(), + config.known_da_nodes.clone(), + ), config, storage, marketplace_config, @@ -544,7 +554,7 @@ where self.launcher.metadata.clone(), node_id, network.clone(), - memberships, + Arc::new(RwLock::new(memberships)), config.clone(), storage, marketplace_config, @@ -599,7 +609,7 @@ where private_key, node_id, config, - memberships, + Arc::new(RwLock::new(memberships)), network, initializer, ConsensusMetricsValue::default(), @@ -616,7 +626,7 @@ where pub async fn add_node_with_config_and_channels( node_id: u64, network: Network, - memberships: TYPES::Membership, + memberships: Arc>, initializer: HotShotInitializer, config: HotShotConfig, validator_config: ValidatorConfig, diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index a7390f98ed..81da7fffb4 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -12,6 +12,7 @@ use std::{ task::{Context, Poll}, }; +use async_lock::RwLock; use committable::Committable; use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; @@ -55,7 +56,7 @@ pub struct TestView { pub leaf: Leaf2, pub view_number: ViewNumber, pub epoch_number: EpochNumber, - pub membership: ::Membership, + pub membership: Arc::Membership>>, pub vid_disperse: Proposal>, pub vid_proposal: ( Vec>>, @@ -72,7 +73,7 @@ pub struct TestView { } impl TestView { - pub async fn genesis(membership: &::Membership) -> Self { + pub async fn genesis(membership: &Arc::Membership>>) -> Self { let genesis_view = ViewNumber::new(1); let genesis_epoch = EpochNumber::new(0); let upgrade_lock = UpgradeLock::new(); @@ -98,7 +99,8 @@ impl TestView { let leader_public_key = public_key; let payload_commitment = - da_payload_commitment::(membership, transactions.clone(), genesis_epoch); + da_payload_commitment::(membership, transactions.clone(), genesis_epoch) + .await; let (vid_disperse, vid_proposal) = build_vid_proposal( membership, @@ -106,7 +108,8 @@ impl TestView { genesis_epoch, transactions.clone(), &private_key, - ); + ) + .await; let da_certificate = build_da_certificate( membership, @@ -241,7 +244,8 @@ impl TestView { ); let payload_commitment = - da_payload_commitment::(membership, transactions.clone(), self.epoch_number); + da_payload_commitment::(membership, transactions.clone(), self.epoch_number) + .await; let (vid_disperse, vid_proposal) = build_vid_proposal( membership, @@ -249,7 +253,8 @@ impl TestView { self.epoch_number, transactions.clone(), &private_key, - ); + ) + .await; let da_certificate = build_da_certificate::( membership, @@ -492,11 +497,11 @@ impl TestView { pub struct TestViewGenerator { pub current_view: Option, - pub membership: ::Membership, + pub membership: Arc::Membership>>, } impl TestViewGenerator { - pub fn generate(membership: ::Membership) -> Self { + pub fn generate(membership: Arc::Membership>>) -> Self { TestViewGenerator { current_view: None, membership, @@ -576,13 +581,13 @@ impl Stream for TestViewGenerator { type Item = TestView; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mem = &self.membership.clone(); + let mem = Arc::clone(&self.membership); let curr_view = &self.current_view.clone(); let mut fut = if let Some(ref view) = curr_view { async move { TestView::next_view(view).await }.boxed() } else { - async move { TestView::genesis(mem).await }.boxed() + async move { TestView::genesis(&mem).await }.boxed() }; match fut.as_mut().poll(cx) { diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index c7af796395..4e594bb4a2 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -40,7 +40,7 @@ async fn test_da_task() { .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. @@ -48,7 +48,12 @@ async fn test_da_task() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, - handle.hotshot.memberships.total_nodes(EpochNumber::new(0)), + handle + .hotshot + .memberships + .read() + .await + .total_nodes(EpochNumber::new(0)), ); let mut generator = TestViewGenerator::generate(membership.clone()); @@ -107,7 +112,7 @@ async fn test_da_task() { ViewNumber::new(2), EpochNumber::new(0), vec1::vec1![null_block::builder_fee::( - membership.total_nodes(EpochNumber::new(0)), + membership.read().await.total_nodes(EpochNumber::new(0)), ::Base::VERSION, *ViewNumber::new(2), ) @@ -148,7 +153,7 @@ async fn test_da_task_storage_failure() { // Set the error flag here for the system handle. This causes it to emit an error on append. handle.storage().write().await.should_return_err = true; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. @@ -156,10 +161,15 @@ async fn test_da_task_storage_failure() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, - handle.hotshot.memberships.total_nodes(EpochNumber::new(0)), + handle + .hotshot + .memberships + .read() + .await + .total_nodes(EpochNumber::new(0)), ); - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -215,7 +225,7 @@ async fn test_da_task_storage_failure() { ViewNumber::new(2), EpochNumber::new(0), vec1::vec1![null_block::builder_fee::( - membership.total_nodes(EpochNumber::new(0)), + membership.read().await.total_nodes(EpochNumber::new(0)), ::Base::VERSION, *ViewNumber::new(2), ) diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index e19cce630b..456a1321bd 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -6,6 +6,7 @@ #[cfg(test)] use std::marker::PhantomData; +use std::sync::Arc; use committable::Committable; use hotshot_example_types::node_types::TestTypes; @@ -78,9 +79,9 @@ async fn test_certificate2_validity() { let handle = build_system_handle::(node_id) .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -103,10 +104,15 @@ async fn test_certificate2_validity() { let qc2 = proposal.data.justify_qc.clone(); let qc = qc2.clone().to_qc(); + let membership_reader = membership.read().await; + let membership_stake_table = membership_reader.stake_table(EpochNumber::new(0)); + let membership_success_threshold = membership_reader.success_threshold(EpochNumber::new(0)); + drop(membership_reader); + assert!( qc.is_valid_cert( - membership.stake_table(EpochNumber::new(0)), - membership.success_threshold(EpochNumber::new(0)), + membership_stake_table.clone(), + membership_success_threshold, &handle.hotshot.upgrade_lock ) .await @@ -114,8 +120,8 @@ async fn test_certificate2_validity() { assert!( qc2.is_valid_cert( - membership.stake_table(EpochNumber::new(0)), - membership.success_threshold(EpochNumber::new(0)), + membership_stake_table, + membership_success_threshold, &handle.hotshot.upgrade_lock ) .await diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index a4e1c29a03..a23ae64cd5 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -58,13 +58,16 @@ async fn test_network_task() { let all_nodes = config.known_nodes_with_stake.clone(); - let membership = ::Membership::new(all_nodes.clone(), all_nodes); + let membership = Arc::new(RwLock::new(::Membership::new( + all_nodes.clone(), + all_nodes, + ))); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), epoch: EpochNumber::new(0), - membership: membership.clone(), + membership: Arc::clone(&membership), upgrade_lock: upgrade_lock.clone(), storage, consensus, @@ -227,13 +230,16 @@ async fn test_network_storage_fail() { let all_nodes = config.known_nodes_with_stake.clone(); let upgrade_lock = UpgradeLock::::new(); - let membership = ::Membership::new(all_nodes.clone(), all_nodes); + let membership = Arc::new(RwLock::new(::Membership::new( + all_nodes.clone(), + all_nodes, + ))); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), epoch: EpochNumber::new(0), - membership: membership.clone(), + membership: Arc::clone(&membership), upgrade_lock: upgrade_lock.clone(), storage, consensus, diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 97742d0deb..97d39cc9ca 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -54,11 +54,11 @@ async fn test_quorum_proposal_recv_task() { let handle = build_system_handle::(2) .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut votes = Vec::new(); @@ -129,7 +129,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { let handle = build_system_handle::(4) .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 0935aef8f8..caf464ac51 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -51,15 +51,16 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); let payload_commitment = build_payload_commitment::( &membership, ViewNumber::new(node_id), EpochNumber::new(1), - ); + ) + .await; - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -90,7 +91,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let genesis_cert = proposals[0].data.justify_qc.clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - membership.total_nodes(EpochNumber::new(1)), + membership.read().await.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(1), ) @@ -145,7 +146,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); let mut generator = TestViewGenerator::generate(membership.clone()); @@ -182,7 +183,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - membership.total_nodes(EpochNumber::new(1)), + membership.read().await.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(1), ) @@ -196,7 +197,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { &membership, ViewNumber::new(1), EpochNumber::new(1) - ), + ) + .await, builder_commitment.clone(), TestMetadata { num_transactions: 0 @@ -215,7 +217,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { &membership, ViewNumber::new(2), EpochNumber::new(1) - ), + ) + .await, builder_commitment.clone(), proposals[0].data.block_header.metadata, ViewNumber::new(2), @@ -232,7 +235,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { &membership, ViewNumber::new(3), EpochNumber::new(1) - ), + ) + .await, builder_commitment.clone(), proposals[1].data.block_header.metadata, ViewNumber::new(3), @@ -249,7 +253,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { &membership, ViewNumber::new(4), EpochNumber::new(1) - ), + ) + .await, builder_commitment.clone(), proposals[2].data.block_header.metadata, ViewNumber::new(4), @@ -266,7 +271,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { &membership, ViewNumber::new(5), EpochNumber::new(1) - ), + ) + .await, builder_commitment, proposals[3].data.block_header.metadata, ViewNumber::new(5), @@ -308,16 +314,17 @@ async fn test_quorum_proposal_task_qc_timeout() { let handle = build_system_handle::(node_id) .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); let payload_commitment = build_payload_commitment::( &membership, ViewNumber::new(node_id), EpochNumber::new(1), - ); + ) + .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -360,7 +367,7 @@ async fn test_quorum_proposal_task_qc_timeout() { }, ViewNumber::new(3), vec1![null_block::builder_fee::( - membership.total_nodes(EpochNumber::new(1)), + membership.read().await.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(3), ) @@ -397,16 +404,17 @@ async fn test_quorum_proposal_task_view_sync() { .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); let payload_commitment = build_payload_commitment::( &membership, ViewNumber::new(node_id), EpochNumber::new(1), - ); + ) + .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -451,7 +459,7 @@ async fn test_quorum_proposal_task_view_sync() { }, ViewNumber::new(2), vec1![null_block::builder_fee::( - membership.total_nodes(EpochNumber::new(1)), + membership.read().await.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(2), ) @@ -486,9 +494,9 @@ async fn test_quorum_proposal_task_liveness_check() { .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -518,7 +526,7 @@ async fn test_quorum_proposal_task_liveness_check() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - membership.total_nodes(EpochNumber::new(1)), + membership.read().await.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(1), ) @@ -536,7 +544,8 @@ async fn test_quorum_proposal_task_liveness_check() { &membership, ViewNumber::new(1), EpochNumber::new(1) - ), + ) + .await, builder_commitment.clone(), TestMetadata { num_transactions: 0 @@ -555,7 +564,8 @@ async fn test_quorum_proposal_task_liveness_check() { &membership, ViewNumber::new(2), EpochNumber::new(1) - ), + ) + .await, builder_commitment.clone(), proposals[0].data.block_header.metadata, ViewNumber::new(2), @@ -572,7 +582,8 @@ async fn test_quorum_proposal_task_liveness_check() { &membership, ViewNumber::new(3), EpochNumber::new(1) - ), + ) + .await, builder_commitment.clone(), proposals[1].data.block_header.metadata, ViewNumber::new(3), @@ -589,7 +600,8 @@ async fn test_quorum_proposal_task_liveness_check() { &membership, ViewNumber::new(4), EpochNumber::new(1) - ), + ) + .await, builder_commitment.clone(), proposals[2].data.block_header.metadata, ViewNumber::new(4), @@ -606,7 +618,8 @@ async fn test_quorum_proposal_task_liveness_check() { &membership, ViewNumber::new(5), EpochNumber::new(1) - ), + ) + .await, builder_commitment, proposals[3].data.block_header.metadata, ViewNumber::new(5), @@ -644,7 +657,7 @@ async fn test_quorum_proposal_task_with_incomplete_events() { let handle = build_system_handle::(2) .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); let mut generator = TestViewGenerator::generate(membership); diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index b5d079f56c..4d6c018fbc 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -45,9 +45,9 @@ async fn test_quorum_vote_task_success() { .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(membership); let mut proposals = Vec::new(); let mut leaves = Vec::new(); @@ -112,9 +112,9 @@ async fn test_quorum_vote_task_miss_dependency() { .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -196,7 +196,7 @@ async fn test_quorum_vote_task_incorrect_dependency() { .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); let mut generator = TestViewGenerator::generate(membership); diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index e4ed70be64..491400d28d 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -42,7 +42,12 @@ async fn test_transaction_task_leader_two_views_in_a_row() { let (_, precompute_data) = precompute_vid_commitment( &[], - handle.hotshot.memberships.total_nodes(EpochNumber::new(0)), + handle + .hotshot + .memberships + .read() + .await + .total_nodes(EpochNumber::new(0)), ); // current view @@ -55,7 +60,12 @@ async fn test_transaction_task_leader_two_views_in_a_row() { EpochNumber::new(1), vec1::vec1![ null_block::builder_fee::( - handle.hotshot.memberships.total_nodes(EpochNumber::new(0)), + handle + .hotshot + .memberships + .read() + .await + .total_nodes(EpochNumber::new(0)), ::Base::VERSION, *ViewNumber::new(4), ) diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 44833d7727..7dd4324426 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -83,9 +83,9 @@ async fn test_upgrade_task_with_proposal() { let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); @@ -126,7 +126,7 @@ async fn test_upgrade_task_with_proposal() { let genesis_cert = proposals[0].data.justify_qc.clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - membership.total_nodes(EpochNumber::new(1)), + membership.read().await.total_nodes(EpochNumber::new(1)), ::Base::VERSION, *ViewNumber::new(1), ) @@ -156,7 +156,8 @@ async fn test_upgrade_task_with_proposal() { &membership, ViewNumber::new(1), EpochNumber::new(1) - ), + ) + .await, builder_commitment.clone(), TestMetadata { num_transactions: 0 @@ -175,7 +176,8 @@ async fn test_upgrade_task_with_proposal() { &membership, ViewNumber::new(2), EpochNumber::new(1) - ), + ) + .await, builder_commitment.clone(), proposals[0].data.block_header.metadata, ViewNumber::new(2), @@ -193,7 +195,8 @@ async fn test_upgrade_task_with_proposal() { &membership, ViewNumber::new(3), EpochNumber::new(1) - ), + ) + .await, builder_commitment.clone(), proposals[1].data.block_header.metadata, ViewNumber::new(3), diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 5390f56e03..7e21efe163 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -70,7 +70,7 @@ async fn test_upgrade_task_with_vote() { let consensus = handle.hotshot.consensus().clone(); let mut consensus_writer = consensus.write().await; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); let mut generator = TestViewGenerator::generate(membership); for view in (&mut generator).take(2).collect::>().await { diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index fc33e7f9a4..fc62f23819 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -45,13 +45,14 @@ async fn test_vid_task() { .0; let pub_key = handle.public_key(); - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); let mut vid = vid_scheme_from_view_number::( &membership, ViewNumber::new(0), EpochNumber::new(0), - ); + ) + .await; let transactions = vec![TestTransaction::new(vec![0])]; let (payload, metadata) = >::from_transactions( @@ -93,7 +94,8 @@ async fn test_vid_task() { &membership, EpochNumber::new(0), None, - ); + ) + .await; let vid_proposal = Proposal { data: vid_disperse.clone(), @@ -112,7 +114,7 @@ async fn test_vid_task() { ViewNumber::new(2), EpochNumber::new(0), vec1::vec1![null_block::builder_fee::( - membership.total_nodes(EpochNumber::new(0)), + membership.read().await.total_nodes(EpochNumber::new(0)), ::Base::VERSION, *ViewNumber::new(2), ) @@ -134,7 +136,7 @@ async fn test_vid_task() { }, ViewNumber::new(2), vec1![null_block::builder_fee::( - membership.total_nodes(EpochNumber::new(0)), + membership.read().await.total_nodes(EpochNumber::new(0)), ::Base::VERSION, *ViewNumber::new(2), ) diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 1b12e0b0f0..51c91750fb 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -36,9 +36,9 @@ async fn test_vote_dependency_handle() { let handle = build_system_handle::(node_id) .await .0; - let membership = (*handle.hotshot.memberships).clone(); + let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::generate(membership); // Generate our state for the test let mut proposals = Vec::new(); @@ -90,7 +90,7 @@ async fn test_vote_dependency_handle() { consensus: OuterConsensus::new(consensus.clone()), consensus_metrics: Arc::clone(&consensus.read().await.metrics), instance_state: handle.hotshot.instance_state(), - quorum_membership: (*handle.hotshot.memberships).clone().into(), + membership: Arc::clone(&handle.hotshot.memberships), storage: Arc::clone(&handle.storage()), view_number, sender: event_sender.clone(), diff --git a/types/src/consensus.rs b/types/src/consensus.rs index edbf78acab..f87c6f365f 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -938,7 +938,7 @@ impl Consensus { pub async fn calculate_and_update_vid( consensus: OuterConsensus, view: ::View, - membership: Arc, + membership: Arc>, private_key: &::PrivateKey, ) -> Option<()> { let txns = Arc::clone(consensus.read().await.saved_payloads().get(&view)?); diff --git a/types/src/data.rs b/types/src/data.rs index f3853e412e..8cbd6ffdc8 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -224,14 +224,16 @@ impl VidDisperse { /// Create VID dispersal from a specified membership for the target epoch. /// Uses the specified function to calculate share dispersal /// Allows for more complex stake table functionality - pub fn from_membership( + pub async fn from_membership( view_number: TYPES::View, mut vid_disperse: JfVidDisperse, - membership: &TYPES::Membership, + membership: &Arc>, target_epoch: TYPES::Epoch, sender_epoch: Option, ) -> Self { let shares = membership + .read() + .await .committee_members(view_number, target_epoch) .iter() .map(|node| (node.clone(), vid_disperse.shares.remove(0))) @@ -255,13 +257,13 @@ impl VidDisperse { #[allow(clippy::panic)] pub async fn calculate_vid_disperse( txns: Arc<[u8]>, - membership: &Arc, + membership: &Arc>, view: TYPES::View, target_epoch: TYPES::Epoch, sender_epoch: Option, precompute_data: Option, ) -> Self { - let num_nodes = membership.total_nodes(target_epoch); + let num_nodes = membership.read().await.total_nodes(target_epoch); let vid_disperse = spawn_blocking(move || { precompute_data @@ -274,13 +276,7 @@ impl VidDisperse { // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); - Self::from_membership( - view, - vid_disperse, - membership.as_ref(), - target_epoch, - sender_epoch, - ) + Self::from_membership(view, vid_disperse, membership, target_epoch, sender_epoch).await } } diff --git a/types/src/message.rs b/types/src/message.rs index 0f36bae2de..877cdd6282 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -432,7 +432,7 @@ where /// Returns an error when the proposal signature is invalid. pub async fn validate_signature( &self, - quorum_membership: &TYPES::Membership, + membership: &TYPES::Membership, epoch_height: u64, upgrade_lock: &UpgradeLock, ) -> Result<()> { @@ -441,7 +441,7 @@ where self.data.block_header.block_number(), epoch_height, )); - let view_leader_key = quorum_membership.leader(view_number, proposal_epoch)?; + let view_leader_key = membership.leader(view_number, proposal_epoch)?; let proposed_leaf = Leaf::from_quorum_proposal(&self.data); ensure!( @@ -465,7 +465,7 @@ where /// Returns an error when the proposal signature is invalid. pub fn validate_signature( &self, - quorum_membership: &TYPES::Membership, + membership: &TYPES::Membership, epoch_height: u64, ) -> Result<()> { let view_number = self.data.view_number(); @@ -473,7 +473,7 @@ where self.data.block_header.block_number(), epoch_height, )); - let view_leader_key = quorum_membership.leader(view_number, proposal_epoch)?; + let view_leader_key = membership.leader(view_number, proposal_epoch)?; let proposed_leaf = Leaf2::from_quorum_proposal(&self.data); ensure!( diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 5ea857ed21..0973f8805a 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -451,15 +451,20 @@ impl UpgradeCertificate { /// Returns an error when the upgrade certificate is invalid. pub async fn validate( upgrade_certificate: &Option, - quorum_membership: &TYPES::Membership, + membership: &RwLock, epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> Result<()> { if let Some(ref cert) = upgrade_certificate { + let membership_reader = membership.read().await; + let membership_stake_table = membership_reader.stake_table(epoch); + let membership_upgrade_threshold = membership_reader.upgrade_threshold(epoch); + drop(membership_reader); + ensure!( cert.is_valid_cert( - quorum_membership.stake_table(epoch), - quorum_membership.upgrade_threshold(epoch), + membership_stake_table, + membership_upgrade_threshold, upgrade_lock ) .await, diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 5b72ea4f84..0509918574 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -13,7 +13,7 @@ use super::node_implementation::NodeType; use crate::{traits::signature_key::SignatureKey, PeerConfig}; /// A protocol for determining membership in and participating in a committee. -pub trait Membership: Clone + Debug + Send + Sync { +pub trait Membership: Debug + Send + Sync { /// The error type returned by methods like `lookup_leader`. type Error: std::fmt::Display; diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 5d6670407f..f85036dfb9 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -17,6 +17,7 @@ use std::{ time::Duration, }; +use async_lock::RwLock; use async_trait::async_trait; use dyn_clone::DynClone; use futures::{future::join_all, Future}; @@ -262,7 +263,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st &'a self, _view: u64, _epoch: u64, - _membership: &TYPES::Membership, + _membership: Arc>, ) where TYPES: NodeType + 'a, { diff --git a/types/src/vote.rs b/types/src/vote.rs index 103c470e6d..20d36e67ee 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -10,8 +10,10 @@ use std::{ collections::{BTreeMap, HashMap}, marker::PhantomData, num::NonZeroU64, + sync::Arc, }; +use async_lock::RwLock; use bitvec::{bitvec, vec::BitVec}; use committable::{Commitment, Committable}; use either::Either; @@ -162,7 +164,7 @@ impl< pub async fn accumulate( &mut self, vote: &VOTE, - membership: &TYPES::Membership, + membership: &Arc>, epoch: TYPES::Epoch, ) -> Either<(), CERT> { let key = vote.signing_key(); @@ -186,10 +188,16 @@ impl< return Either::Left(()); } - let Some(stake_table_entry) = CERT::stake_table_entry(membership, &key, epoch) else { + let membership_reader = membership.read().await; + let Some(stake_table_entry) = CERT::stake_table_entry(&*membership_reader, &key, epoch) + else { return Either::Left(()); }; - let stake_table = CERT::stake_table(membership, epoch); + let stake_table = CERT::stake_table(&*membership_reader, epoch); + let total_nodes = CERT::total_nodes(&*membership_reader, epoch); + let threshold = CERT::threshold(&*membership_reader, epoch); + drop(membership_reader); + let Some(vote_node_id) = stake_table .iter() .position(|x| *x == stake_table_entry.clone()) @@ -212,7 +220,7 @@ impl< let (signers, sig_list) = self .signers .entry(vote_commitment) - .or_insert((bitvec![0; CERT::total_nodes(membership, epoch)], Vec::new())); + .or_insert((bitvec![0; total_nodes], Vec::new())); if signers.get(vote_node_id).as_deref() == Some(&true) { error!("Node id is already in signers list"); return Either::Left(()); @@ -223,12 +231,12 @@ impl< *total_stake_casted += stake_table_entry.stake(); total_vote_map.insert(key, (vote.signature(), vote_commitment)); - if *total_stake_casted >= CERT::threshold(membership, epoch).into() { + if *total_stake_casted >= threshold.into() { // Assemble QC let real_qc_pp: <::SignatureKey as SignatureKey>::QcParams = ::public_parameter( stake_table, - U256::from(CERT::threshold(membership, epoch)), + U256::from(threshold), ); let real_qc_sig = ::assemble( From 827dfa9ea1a35cae63677c2ea4674d3df59f84fe Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Thu, 19 Dec 2024 20:37:18 +0100 Subject: [PATCH 1336/1393] Lr/epoch tests (#3952) * Initial commit * WIP: adding epoch to proposal and vote data, not compiling yet * Make it compile * Adjust tests * Add a test type for two stake tables for even and odd epochs * Debugging * Fix extended voting * Try "in epoch transition" approach * Continue debugging * Use correct epoch with Membership * Adjust tests and lints * Adapt to variable stake table after merge * Fix accidentally pulled bug in eQC rule * Commit includes epoch for vote and proposal data types * Prune dependencies (#3787) * add new message types and gate outgoing messages * Use the proper message for the proposal response * Modify commit for `Leaf2` and `QuorumData2` * Adjust tests * Clean up debug traces * Initial commit for double quorum * Add TODO * Next epoch nodes vote during epoch transition * Form the second QC at the end of an epoch * Allow early payload save but check that's it's the same * Attach next epoch justify qc to proposals * Validate the next epoch justify qc * Test with more network types * Fix fmt in tests * Use real threshold in the tests based on an epoch * Membership thresholds depend on an epoch * Make sure epoch transition proposals include the next epoch QC * Use epoch from vote and add more tests * Adjust marketplace ver number * Epochs without Marketplace and adjust tests * Add epoch to test_success * Add debug traces * try * Adjust view change logic in transactions task * Cleanup debug traces * Don't chage view when voting for eQC * Add a lot of traces to find a deadlock * Keep the task for view one less than the current. We might still be transmitting * Clean debug traces * An epoch should not increment without view being incremented as well * Use saturating_sub consistently * Fix compiler error * fix merge * Add a new test * Fixes after merge * Fix vid share handling * Submit transactions to the correct epoch * Address review comments * Fix compiler error * Adjust test * test_with_failures_2_with_epochs test uses only TestTwoStakeTablesTypes * Modify test_epoch_end to use uneven number of nodes, it now fails * VID share required target epoch as well * VID share needs old epoch payload commitment * Use odd number of nodes in tests * Remove debug trace --------- Co-authored-by: Artemii Gerasimovich Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> Co-authored-by: Rob --- hotshot/src/tasks/mod.rs | 6 +- hotshot/src/tasks/task_state.rs | 1 + .../src/traits/networking/push_cdn_network.rs | 28 +++- task-impls/src/network.rs | 1 - task-impls/src/quorum_vote/mod.rs | 77 ++++++---- task-impls/src/request.rs | 6 +- task-impls/src/transactions.rs | 17 ++- task-impls/src/vid.rs | 4 +- testing/src/helpers.rs | 1 + testing/src/test_builder.rs | 19 +-- testing/tests/tests_1/test_success.rs | 134 ++++++++++++++++-- testing/tests/tests_1/test_with_failures_2.rs | 48 ++++++- testing/tests/tests_1/vid_task.rs | 1 + types/src/consensus.rs | 2 +- types/src/data.rs | 76 +++++++--- 15 files changed, 336 insertions(+), 85 deletions(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index d42f392675..d32a671b94 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -82,7 +82,7 @@ pub fn add_response_task, V: Versi ) { let state = NetworkResponseState::::new( handle.hotshot.consensus(), - Arc::clone(&handle.hotshot.memberships), + Arc::clone(&handle.memberships), handle.public_key().clone(), handle.private_key().clone(), handle.hotshot.id, @@ -156,7 +156,9 @@ pub fn add_network_message_task< message = network.recv_message().fuse() => { // Make sure the message did not fail let message = match message { - Ok(message) => message, + Ok(message) => { + message + } Err(e) => { tracing::error!("Failed to receive message: {:?}", e); continue; diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 7a610ac6e1..9b0230c990 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -219,6 +219,7 @@ impl, V: Versions> CreateTaskState .marketplace_config .fallback_builder_url .clone(), + epoch_height: handle.epoch_height, } } } diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 553b2545ef..3601c38f2d 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -6,7 +6,7 @@ #[cfg(feature = "hotshot-testing")] use std::sync::atomic::{AtomicBool, Ordering}; -use std::{marker::PhantomData, sync::Arc}; +use std::{collections::VecDeque, marker::PhantomData, sync::Arc}; #[cfg(feature = "hotshot-testing")] use std::{path::Path, time::Duration}; @@ -46,6 +46,7 @@ use hotshot_types::{ BoxSyncFuture, }; use num_enum::{IntoPrimitive, TryFromPrimitive}; +use parking_lot::Mutex; #[cfg(feature = "hotshot-testing")] use rand::{rngs::StdRng, RngCore, SeedableRng}; use tokio::{spawn, sync::mpsc::error::TrySendError, time::sleep}; @@ -191,6 +192,10 @@ pub struct PushCdnNetwork { client: Client>, /// The CDN-specific metrics metrics: Arc, + /// The internal queue for messages to ourselves + internal_queue: Arc>>>, + /// The public key of this node + public_key: K, /// Whether or not the underlying network is supposed to be paused #[cfg(feature = "hotshot-testing")] is_paused: Arc, @@ -229,7 +234,7 @@ impl PushCdnNetwork { let config = ClientConfig { endpoint: marshal_endpoint, subscribed_topics: topics.into_iter().map(|t| t as u8).collect(), - keypair, + keypair: keypair.clone(), use_local_authority: true, }; @@ -239,6 +244,8 @@ impl PushCdnNetwork { Ok(Self { client, metrics: Arc::from(metrics), + internal_queue: Arc::new(Mutex::new(VecDeque::new())), + public_key: keypair.public_key.0, // Start unpaused #[cfg(feature = "hotshot-testing")] is_paused: Arc::from(AtomicBool::new(false)), @@ -422,7 +429,7 @@ impl TestableNetworkingImplementation let client_config: ClientConfig> = ClientConfig { keypair: KeyPair { - public_key: WrappedSignatureKey(public_key), + public_key: WrappedSignatureKey(public_key.clone()), private_key, }, subscribed_topics: topics, @@ -434,6 +441,8 @@ impl TestableNetworkingImplementation Arc::new(PushCdnNetwork { client: Client::new(client_config), metrics: Arc::new(CdnMetricsValue::default()), + internal_queue: Arc::new(Mutex::new(VecDeque::new())), + public_key, #[cfg(feature = "hotshot-testing")] is_paused: Arc::from(AtomicBool::new(false)), }) @@ -533,6 +542,12 @@ impl ConnectedNetwork for PushCdnNetwork { return Ok(()); } + // If the message is to ourselves, just add it to the internal queue + if recipient == self.public_key { + self.internal_queue.lock().push_back(message); + return Ok(()); + } + // Send the message if let Err(e) = self .client @@ -554,7 +569,12 @@ impl ConnectedNetwork for PushCdnNetwork { /// # Errors /// - If we fail to receive messages. Will trigger a retry automatically. async fn recv_message(&self) -> Result, NetworkError> { - // Receive a message + // If we have a message in the internal queue, return it + if let Some(message) = self.internal_queue.lock().pop_front() { + return Ok(message); + } + + // Receive a message from the network let message = self.client.receive_message().await; // If we're paused, receive but don't process messages diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 42b2235dd3..7d6664f39c 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -404,7 +404,6 @@ impl< ) -> std::result::Result<(), ()> { if let Some(mut action) = maybe_action { if !consensus.write().await.update_action(action, view) { - tracing::warn!("Already actioned {:?} in view {:?}", action, view); return Err(()); } // If the action was view sync record it as a vote, but we don't diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 5ec3e54b1a..2b30bab7a9 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -173,7 +173,13 @@ impl + 'static, V: Versions> Handl } } HotShotEvent::VidShareValidated(share) => { - let vid_payload_commitment = &share.data.payload_commitment; + let vid_payload_commitment = if let Some(ref data_epoch_payload_commitment) = + share.data.data_epoch_payload_commitment + { + data_epoch_payload_commitment + } else { + &share.data.payload_commitment + }; vid_share = Some(share.clone()); if let Some(ref comm) = payload_commitment { if vid_payload_commitment != comm { @@ -372,8 +378,12 @@ impl, V: Versions> QuorumVoteTaskS view_number: TYPES::View, event_receiver: Receiver>>, event_sender: &Sender>>, - event: Option>>, + event: Arc>, ) { + tracing::debug!( + "Attempting to make dependency task for view {view_number:?} and event {event:?}" + ); + if self.vote_dependencies.contains_key(&view_number) { return; } @@ -388,10 +398,8 @@ impl, V: Versions> QuorumVoteTaskS let vid_dependency = self.create_event_dependency(VoteDependency::Vid, view_number, event_receiver.clone()); // If we have an event provided to us - if let Some(event) = event { - if let HotShotEvent::QuorumProposalValidated(..) = event.as_ref() { - quorum_proposal_dependency.mark_as_completed(event); - } + if let HotShotEvent::QuorumProposalValidated(..) = event.as_ref() { + quorum_proposal_dependency.mark_as_completed(event); } let deps = vec![quorum_proposal_dependency, dac_dependency, vid_dependency]; @@ -500,7 +508,7 @@ impl, V: Versions> QuorumVoteTaskS proposal.data.view_number, event_receiver, &event_sender, - Some(Arc::clone(&event)), + Arc::clone(&event), ); } } @@ -544,7 +552,12 @@ impl, V: Versions> QuorumVoteTaskS &event_sender.clone(), ) .await; - self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); + self.create_dependency_task_if_new( + view, + event_receiver, + &event_sender, + Arc::clone(&event), + ); } HotShotEvent::VidShareRecv(sender, disperse) => { let view = disperse.data.view_number(); @@ -557,25 +570,25 @@ impl, V: Versions> QuorumVoteTaskS // Validate the VID share. let payload_commitment = &disperse.data.payload_commitment; - let disperse_epoch = disperse.data.epoch; - // Check that the signature is valid ensure!( sender.validate(&disperse.signature, payload_commitment.as_ref()), "VID share signature is invalid" ); + let vid_epoch = disperse.data.epoch; + let target_epoch = disperse.data.target_epoch; let membership_reader = self.membership.read().await; // ensure that the VID share was sent by a DA member OR the view leader ensure!( membership_reader - .da_committee_members(view, disperse_epoch) + .da_committee_members(view, vid_epoch) .contains(sender) - || *sender == membership_reader.leader(view, disperse_epoch)?, + || *sender == membership_reader.leader(view, vid_epoch)?, "VID share was not sent by a DA member or the view leader." ); - let membership_total_nodes = membership_reader.total_nodes(disperse_epoch); + let membership_total_nodes = membership_reader.total_nodes(target_epoch); drop(membership_reader); // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner @@ -606,7 +619,12 @@ impl, V: Versions> QuorumVoteTaskS &event_sender.clone(), ) .await; - self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); + self.create_dependency_task_if_new( + view, + event_receiver, + &event_sender, + Arc::clone(&event), + ); } HotShotEvent::Timeout(view, ..) => { let view = TYPES::View::new(view.saturating_sub(1)); @@ -717,25 +735,30 @@ impl, V: Versions> QuorumVoteTaskS current_block_number, self.epoch_height, )); - tracing::trace!( - "Sending ViewChange for view {} and epoch {}", - proposal.data.view_number() + 1, - *current_epoch - ); - broadcast_event( - Arc::new(HotShotEvent::ViewChange( - proposal.data.view_number() + 1, - current_epoch, - )), - &event_sender, - ) - .await; let is_vote_leaf_extended = self .consensus .read() .await .is_leaf_extended(proposed_leaf.commit()); + if !is_vote_leaf_extended { + // We're voting for the proposal that will probably form the eQC. We don't want to change + // the view here because we will probably change it when we form the eQC. + // The main reason is to handle view change event only once in the transaction task. + tracing::trace!( + "Sending ViewChange for view {} and epoch {}", + proposal.data.view_number() + 1, + *current_epoch + ); + broadcast_event( + Arc::new(HotShotEvent::ViewChange( + proposal.data.view_number() + 1, + current_epoch, + )), + &event_sender, + ) + .await; + } if let Err(e) = submit_vote::( event_sender.clone(), Arc::clone(&self.membership), diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 5e1bdef390..c3bb9a2fb8 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -220,6 +220,7 @@ impl> NetworkRequestState = spawn(async move { // Do the delay only if primary is up and then start sending if !network.is_primary_down() { @@ -261,8 +262,9 @@ impl> NetworkRequestState, V /// fallback builder url pub fallback_builder_url: Url, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl, V: Versions> TransactionTaskState { @@ -477,9 +480,13 @@ impl, V: Versions> TransactionTask } HotShotEvent::ViewChange(view, epoch) => { let view = TYPES::View::new(std::cmp::max(1, **view)); - + let epoch = if self.epoch_height != 0 { + TYPES::Epoch::new(std::cmp::max(1, **epoch)) + } else { + *epoch + }; ensure!( - *view > *self.cur_view || *epoch > self.cur_epoch, + *view > *self.cur_view && *epoch >= *self.cur_epoch, debug!( "Received a view change to an older view and epoch: tried to change view to {:?}\ and epoch {:?} though we are at view {:?} and epoch {:?}", @@ -487,11 +494,11 @@ impl, V: Versions> TransactionTask ) ); self.cur_view = view; - self.cur_epoch = *epoch; + self.cur_epoch = epoch; - let leader = self.membership.read().await.leader(view, *epoch)?; + let leader = self.membership.read().await.leader(view, epoch)?; if leader == self.public_key { - self.handle_view_change(&event_stream, view, *epoch).await; + self.handle_view_change(&event_stream, view, epoch).await; return Ok(()); } } diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 1f9b218aaf..3754e2a01d 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -102,7 +102,7 @@ impl> VidTaskState { &Arc::clone(&self.membership), *view_number, epoch, - None, + epoch, vid_precompute.clone(), ) .await; @@ -208,7 +208,7 @@ impl> VidTaskState { &Arc::clone(&self.membership), proposal_view_number, target_epoch, - Some(sender_epoch), + sender_epoch, None, ) .await; diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 9ec7e68b93..6d945894f3 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -336,6 +336,7 @@ pub async fn build_vid_proposal( vid.disperse(&encoded_transactions).unwrap(), membership, epoch_number, + epoch_number, None, ) .await; diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 6a2d981cba..a4083f461c 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -4,7 +4,9 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; +use std::{ + any::TypeId, collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration, +}; use anyhow::{ensure, Result}; use async_lock::RwLock; @@ -15,8 +17,8 @@ use hotshot::{ HotShotInitializer, MarketplaceConfig, SystemContext, TwinsHandlerState, }; use hotshot_example_types::{ - auction_results_provider_types::TestAuctionResultsProvider, state_types::TestInstanceState, - storage_types::TestStorage, testable_delay::DelayConfig, + auction_results_provider_types::TestAuctionResultsProvider, node_types::EpochsTestVersions, + state_types::TestInstanceState, storage_types::TestStorage, testable_delay::DelayConfig, }; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -99,8 +101,6 @@ pub struct TestDescription, V: Ver pub start_solver: bool, /// boxed closure used to validate the resulting transactions pub validate_transactions: TransactionValidator, - /// Number of blocks in an epoch, zero means there are no epochs - pub epoch_height: u64, } pub fn nonempty_block_threshold(threshold: (u64, u64)) -> TransactionValidator { @@ -385,7 +385,7 @@ impl, V: Versions> Default /// by default, just a single round #[allow(clippy::redundant_field_names)] fn default() -> Self { - let num_nodes_with_stake = 6; + let num_nodes_with_stake = 7; Self { timing_data: TimingData::default(), num_nodes_with_stake, @@ -418,7 +418,6 @@ impl, V: Versions> Default upgrade_view: None, start_solver: true, validate_transactions: Arc::new(|_| Ok(())), - epoch_height: 0, } } } @@ -456,7 +455,6 @@ where timing_data, da_staked_committee_size, unreliable_network, - epoch_height, .. } = self.clone(); @@ -490,6 +488,11 @@ where 0 < da_staked_committee_size, ); // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); + let epoch_height = if TypeId::of::() == TypeId::of::() { + 10 + } else { + 0 + }; let config = HotShotConfig { start_threshold: (1, 1), num_nodes_with_stake: NonZeroUsize::new(num_nodes_with_stake).unwrap(), diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index bdf55c1f6a..e7bc509593 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{sync::Arc, time::Duration}; +use std::time::Duration; use hotshot_example_types::{ node_types::{ @@ -18,7 +18,6 @@ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, view_sync_task::ViewSyncTaskDescription, @@ -43,6 +42,25 @@ cross_tests!( }, ); +cross_tests!( + TestName: test_success_with_epochs, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestTypes, TestTypesRandomizedLeader, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + ..TestDescription::default() + } + }, +); + // cross_tests!( // TestName: test_epoch_success, // Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -94,6 +112,38 @@ cross_tests!( }, ); +cross_tests!( + TestName: test_success_with_async_delay_with_epochs, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + ..TestDescription::default() + }; + + metadata.overall_safety_properties.num_failed_views = 0; + metadata.overall_safety_properties.num_successful_views = 0; + let mut config = DelayConfig::default(); + let delay_settings = DelaySettings { + delay_option: DelayOptions::Random, + min_time_in_milliseconds: 10, + max_time_in_milliseconds: 100, + fixed_time_in_milliseconds: 0, + }; + config.add_settings_for_all_types(delay_settings); + metadata.async_delay_config = config; + metadata + }, +); + cross_tests!( TestName: test_success_with_async_delay_2, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -134,6 +184,46 @@ cross_tests!( }, ); +cross_tests!( + TestName: test_success_with_async_delay_2_with_epochs, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + ..TestDescription::default() + }; + + metadata.overall_safety_properties.num_failed_views = 0; + metadata.overall_safety_properties.num_successful_views = 30; + let mut config = DelayConfig::default(); + let mut delay_settings = DelaySettings { + delay_option: DelayOptions::Random, + min_time_in_milliseconds: 10, + max_time_in_milliseconds: 100, + fixed_time_in_milliseconds: 15, + }; + config.add_setting(SupportedTraitTypesForAsyncDelay::Storage, &delay_settings); + + delay_settings.delay_option = DelayOptions::Fixed; + config.add_setting(SupportedTraitTypesForAsyncDelay::BlockHeader, &delay_settings); + + delay_settings.delay_option = DelayOptions::Random; + delay_settings.min_time_in_milliseconds = 5; + delay_settings.max_time_in_milliseconds = 20; + config.add_setting(SupportedTraitTypesForAsyncDelay::ValidatedState, &delay_settings); + metadata.async_delay_config = config; + metadata + }, +); + cross_tests!( TestName: test_with_double_leader_no_failures, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -155,6 +245,27 @@ cross_tests!( } ); +cross_tests!( + TestName: test_with_double_leader_no_failures_with_epochs, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default_more_nodes(); + metadata.num_bootstrap_nodes = 10; + metadata.num_nodes_with_stake = 12; + metadata.da_staked_committee_size = 12; + metadata.start_nodes = 12; + + metadata.overall_safety_properties.num_failed_views = 0; + + metadata.view_sync_properties = ViewSyncTaskDescription::Threshold(0, 0); + + metadata + } +); + cross_tests!( TestName: test_epoch_end, Impls: [CombinedImpl, Libp2pImpl, PushCdnImpl], @@ -168,17 +279,10 @@ cross_tests!( duration: Duration::from_millis(100000), }, ), - epoch_height: 10, - num_nodes_with_stake: 10, - start_nodes: 10, - num_bootstrap_nodes: 10, - da_staked_committee_size: 10, - overall_safety_properties: OverallSafetyPropertiesDescription { - // Explicitly show that we use normal threshold, i.e. 2 nodes_len / 3 + 1 - // but we divide by two because only half of the nodes are active in each epoch - threshold_calculator: Arc::new(|_, nodes_len| 2 * nodes_len / 2 / 3 + 1), - ..OverallSafetyPropertiesDescription::default() - }, + num_nodes_with_stake: 11, + start_nodes: 11, + num_bootstrap_nodes: 11, + da_staked_committee_size: 11, ..TestDescription::default() } @@ -189,8 +293,8 @@ cross_tests!( // This test fails with the old decide rule cross_tests!( TestName: test_shorter_decide, - Impls: [MemoryImpl], - Types: [TestTypes], + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], Versions: [EpochsTestVersions], Ignore: false, Metadata: { diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index c922b7fd6e..242b5ea2c0 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -9,7 +9,10 @@ use std::collections::HashMap; use hotshot_example_types::{ - node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestVersions}, + node_types::{ + CombinedImpl, EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, + TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestVersions, + }, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -70,6 +73,49 @@ cross_tests!( } ); +cross_tests!( + TestName: test_with_failures_2_with_epochs, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default_more_nodes(); + metadata.num_nodes_with_stake = 12; + metadata.da_staked_committee_size = 12; + metadata.start_nodes = 12; + let dead_nodes = vec![ + ChangeNode { + idx: 10, + updown: NodeAction::Down, + }, + ChangeNode { + idx: 11, + updown: NodeAction::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)] + }; + + // 2 nodes fail triggering view sync, expect no other timeouts + metadata.overall_safety_properties.num_failed_views = 6; + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 20; + metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ + (ViewNumber::new(5), false), + (ViewNumber::new(11), false), + (ViewNumber::new(17), false), + (ViewNumber::new(23), false), + (ViewNumber::new(29), false), + (ViewNumber::new(35), false), + ]); + + metadata + } +); + cross_tests!( TestName: test_with_double_leader_failures, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index fc62f23819..adafcbfbf7 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -93,6 +93,7 @@ async fn test_vid_task() { vid_disperse, &membership, EpochNumber::new(0), + EpochNumber::new(0), None, ) .await; diff --git a/types/src/consensus.rs b/types/src/consensus.rs index f87c6f365f..2530417f90 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -950,7 +950,7 @@ impl Consensus { .view_inner .epoch()?; let vid = - VidDisperse::calculate_vid_disperse(txns, &membership, view, epoch, None, None).await; + VidDisperse::calculate_vid_disperse(txns, &membership, view, epoch, epoch, None).await; let shares = VidDisperseShare2::from_vid_disperse(vid); let mut consensus_writer = consensus.write().await; for share in shares { diff --git a/types/src/data.rs b/types/src/data.rs index 8cbd6ffdc8..0d6fcfff7a 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -210,10 +210,14 @@ where pub struct VidDisperse { /// The view number for which this VID data is intended pub view_number: TYPES::View, - /// Epoch this proposal applies to + /// Epoch the data of this proposal belongs to pub epoch: TYPES::Epoch, - /// Block payload commitment + /// Epoch to which the recipients of this VID belong to + pub target_epoch: TYPES::Epoch, + /// VidCommitment calculated based on the number of nodes in `target_epoch`. pub payload_commitment: VidCommitment, + /// VidCommitment calculated based on the number of nodes in `epoch`. Needed during epoch transition. + pub data_epoch_payload_commitment: Option, /// A storage node's key and its corresponding VID share pub shares: BTreeMap, /// VID common data sent to all storage nodes @@ -229,7 +233,8 @@ impl VidDisperse { mut vid_disperse: JfVidDisperse, membership: &Arc>, target_epoch: TYPES::Epoch, - sender_epoch: Option, + data_epoch: TYPES::Epoch, + data_epoch_payload_commitment: Option, ) -> Self { let shares = membership .read() @@ -244,7 +249,9 @@ impl VidDisperse { shares, common: vid_disperse.common, payload_commitment: vid_disperse.commit, - epoch: sender_epoch.unwrap_or(target_epoch), + data_epoch_payload_commitment, + epoch: data_epoch, + target_epoch, } } @@ -260,23 +267,43 @@ impl VidDisperse { membership: &Arc>, view: TYPES::View, target_epoch: TYPES::Epoch, - sender_epoch: Option, + data_epoch: TYPES::Epoch, precompute_data: Option, ) -> Self { let num_nodes = membership.read().await.total_nodes(target_epoch); + let txns_clone = Arc::clone(&txns); let vid_disperse = spawn_blocking(move || { precompute_data .map_or_else( - || vid_scheme(num_nodes).disperse(Arc::clone(&txns)), - |data| vid_scheme(num_nodes).disperse_precompute(Arc::clone(&txns), &data) + || vid_scheme(num_nodes).disperse(&txns_clone), + |data| vid_scheme(num_nodes).disperse_precompute(&txns_clone, &data) ) - .unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) + .unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns_clone.len())) }).await; + let data_epoch_payload_commitment = if target_epoch == data_epoch { + None + } else { + let data_epoch_num_nodes = membership.read().await.total_nodes(data_epoch); + Some(spawn_blocking(move || { + vid_scheme(data_epoch_num_nodes).commit_only(&txns) + .unwrap_or_else(|err| panic!("VID commit_only failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) + }).await) + }; // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); - - Self::from_membership(view, vid_disperse, membership, target_epoch, sender_epoch).await + let data_epoch_payload_commitment = + data_epoch_payload_commitment.map(|result| result.unwrap()); + + Self::from_membership( + view, + vid_disperse, + membership, + target_epoch, + data_epoch, + data_epoch_payload_commitment, + ) + .await } } @@ -364,7 +391,9 @@ impl VidDisperseShare { let mut vid_disperse = VidDisperse { view_number: first_vid_disperse_share.view_number, epoch: TYPES::Epoch::new(0), + target_epoch: TYPES::Epoch::new(0), payload_commitment: first_vid_disperse_share.payload_commitment, + data_epoch_payload_commitment: None, common: first_vid_disperse_share.common, shares: share_map, }; @@ -405,10 +434,14 @@ impl VidDisperseShare { pub struct VidDisperseShare2 { /// The view number for which this VID data is intended pub view_number: TYPES::View, - /// The epoch number for which this VID data is intended + /// The epoch number for which this VID data belongs to pub epoch: TYPES::Epoch, + /// The epoch number to which the recipient of this VID belongs to + pub target_epoch: TYPES::Epoch, /// Block payload commitment pub payload_commitment: VidCommitment, + /// VidCommitment calculated based on the number of nodes in `epoch`. Needed during epoch transition. + pub data_epoch_payload_commitment: Option, /// A storage node's key and its corresponding VID share pub share: VidShare, /// VID common data sent to all storage nodes @@ -422,7 +455,9 @@ impl From> for VidDisperseShare let VidDisperseShare2 { view_number, epoch: _, + target_epoch: _, payload_commitment, + data_epoch_payload_commitment: _, share, common, recipient_key, @@ -451,7 +486,9 @@ impl From> for VidDisperseShare2 Self { view_number, epoch: TYPES::Epoch::new(0), + target_epoch: TYPES::Epoch::new(0), payload_commitment, + data_epoch_payload_commitment: None, share, common, recipient_key, @@ -462,7 +499,6 @@ impl From> for VidDisperseShare2 impl VidDisperseShare2 { /// Create a vector of `VidDisperseShare` from `VidDisperse` pub fn from_vid_disperse(vid_disperse: VidDisperse) -> Vec { - let epoch = vid_disperse.epoch; vid_disperse .shares .into_iter() @@ -472,7 +508,9 @@ impl VidDisperseShare2 { view_number: vid_disperse.view_number, common: vid_disperse.common.clone(), payload_commitment: vid_disperse.payload_commitment, - epoch, + data_epoch_payload_commitment: vid_disperse.data_epoch_payload_commitment, + epoch: vid_disperse.epoch, + target_epoch: vid_disperse.target_epoch, }) .collect() } @@ -501,7 +539,6 @@ impl VidDisperseShare2 { I: Iterator, { let first_vid_disperse_share = it.next()?.clone(); - let epoch = first_vid_disperse_share.epoch; let mut share_map = BTreeMap::new(); share_map.insert( first_vid_disperse_share.recipient_key, @@ -509,8 +546,10 @@ impl VidDisperseShare2 { ); let mut vid_disperse = VidDisperse { view_number: first_vid_disperse_share.view_number, - epoch, + epoch: first_vid_disperse_share.epoch, + target_epoch: first_vid_disperse_share.target_epoch, payload_commitment: first_vid_disperse_share.payload_commitment, + data_epoch_payload_commitment: first_vid_disperse_share.data_epoch_payload_commitment, common: first_vid_disperse_share.common, shares: share_map, }; @@ -527,7 +566,6 @@ impl VidDisperseShare2 { pub fn to_vid_share_proposals( vid_disperse_proposal: Proposal>, ) -> Vec> { - let epoch = vid_disperse_proposal.data.epoch; vid_disperse_proposal .data .shares @@ -539,7 +577,11 @@ impl VidDisperseShare2 { view_number: vid_disperse_proposal.data.view_number, common: vid_disperse_proposal.data.common.clone(), payload_commitment: vid_disperse_proposal.data.payload_commitment, - epoch, + data_epoch_payload_commitment: vid_disperse_proposal + .data + .data_epoch_payload_commitment, + epoch: vid_disperse_proposal.data.epoch, + target_epoch: vid_disperse_proposal.data.target_epoch, }, signature: vid_disperse_proposal.signature.clone(), _pd: vid_disperse_proposal._pd, From d211194ad4607ba036e753d4ab731d080c90b705 Mon Sep 17 00:00:00 2001 From: King Date: Thu, 2 Jan 2025 22:48:27 +0100 Subject: [PATCH 1337/1393] typo fix request.rs (#3993) --- task-impls/src/request.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index c3bb9a2fb8..b487497408 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -260,7 +260,7 @@ impl> NetworkRequestState Date: Thu, 2 Jan 2025 22:48:43 +0100 Subject: [PATCH 1338/1393] Fix grammar issues in documentation and code comments (#3983) * typos orchestrator.rs * typos all.rs * typos mod.rs * typos mod.rs --------- Co-authored-by: Phil <184445976+pls148@users.noreply.github.com> --- examples/orchestrator.rs | 2 +- examples/push-cdn/all.rs | 2 +- libp2p-networking/src/network/mod.rs | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/orchestrator.rs b/examples/orchestrator.rs index 3bb419b980..42ea1d9014 100644 --- a/examples/orchestrator.rs +++ b/examples/orchestrator.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -//! A orchestrator +//! An orchestrator use hotshot::helpers::initialize_logging; use hotshot_example_types::state_types::TestTypes; diff --git a/examples/push-cdn/all.rs b/examples/push-cdn/all.rs index 12599c36a4..444edc60fe 100644 --- a/examples/push-cdn/all.rs +++ b/examples/push-cdn/all.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -//! A example program using the Push CDN +//! An example program using the Push CDN /// The types we're importing pub mod types; diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index eeb654997e..c4348af023 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -128,14 +128,14 @@ pub enum NetworkEvent { pub enum NetworkEventInternal { /// a DHT event DHTEvent(libp2p::kad::Event), - /// a identify event. Is boxed because this event is much larger than the other ones so we want + /// an identify event. Is boxed because this event is much larger than the other ones so we want /// to store it on the heap. IdentifyEvent(Box), /// a gossip event GossipEvent(Box), /// a direct message event DMEvent(libp2p::request_response::Event, Vec>), - /// a autonat event + /// an autonat event AutonatEvent(libp2p::autonat::Event), } From 2f65d0a5703b836f9fe4f44c2db9a6ea198f0223 Mon Sep 17 00:00:00 2001 From: loselarry <166403105+loselarry@users.noreply.github.com> Date: Fri, 3 Jan 2025 05:49:33 +0800 Subject: [PATCH 1339/1393] chore: remove redundant words in comment (#3987) Signed-off-by: loselarry --- hotshot/src/types/handle.rs | 2 +- testing/src/test_builder.rs | 4 ++-- types/src/traits/network.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 556ff0e1c5..d1d45473c1 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -289,7 +289,7 @@ impl + 'static, V: Versions> self.hotshot.consensus() } - /// Shut down the the inner hotshot and wait until all background threads are closed. + /// Shut down the inner hotshot and wait until all background threads are closed. pub async fn shut_down(&mut self) { // this is required because `SystemContextHandle` holds an inactive receiver and // `broadcast_direct` below can wait indefinitely diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index a4083f461c..c91d45a4d3 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -433,7 +433,7 @@ where /// turn a description of a test (e.g. a [`TestDescription`]) into /// a [`TestLauncher`] that can be used to launch the test. /// # Panics - /// if some of the the configuration values are zero + /// if some of the configuration values are zero pub fn gen_launcher(self, node_id: u64) -> TestLauncher { self.gen_launcher_with_tasks(node_id, vec![]) } @@ -442,7 +442,7 @@ where /// a [`TestLauncher`] that can be used to launch the test, with /// additional testing tasks to run in test harness /// # Panics - /// if some of the the configuration values are zero + /// if some of the configuration values are zero #[must_use] pub fn gen_launcher_with_tasks( self, diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index f85036dfb9..2348dc8bc7 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -145,7 +145,7 @@ pub enum RequestKind { } /// A response for a request. `SequencingMessage` is the same as other network messages -/// The kind of message `M` is is determined by what we requested +/// The kind of message `M` is determined by what we requested #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] #[allow(clippy::large_enum_variant)] From 7a7ab87d3c787cd19e052fde1a3e7bf809336061 Mon Sep 17 00:00:00 2001 From: Savely <136869149+savvar9991@users.noreply.github.com> Date: Sat, 4 Jan 2025 03:10:06 +1100 Subject: [PATCH 1340/1393] Fix 404 Error and Improve Debug Logging in README (#3999) * Update README.md * Update README.md --- testing/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/README.md b/testing/README.md index 96016cd427..ee2bfff4a0 100644 --- a/testing/README.md +++ b/testing/README.md @@ -57,7 +57,7 @@ async { let hook = RoundHook(Arc::new(move |_runner, ctx| { async move { - tracing::error!("Context for this view is {:#?})", ctx); + tracing::error!("Context for this view is {:#?}", ctx); Ok(()) } .boxed_local() From 62784c40e7d53f0bc9941fe79f64dc10a0ae4b0b Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Fri, 3 Jan 2025 08:43:31 -0800 Subject: [PATCH 1341/1393] fix debugging config file link (#4000) --- types/src/simple_vote.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index f75d451914..ad651458d1 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -560,7 +560,7 @@ impl< { } -// impl votable for all the data types in this file sealed marker should ensure nothing is accidently +// impl votable for all the data types in this file sealed marker should ensure nothing is accidentally // implemented for structs that aren't "voteable" impl< TYPES: NodeType, From a7201ae9ecfe3cd976ebfd821431aa3023ab4047 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Fri, 3 Jan 2025 11:04:31 -0800 Subject: [PATCH 1342/1393] [DRB] - Verify the DRB result from the computation one epoch in advance (#3948) * Initial commit * WIP: adding epoch to proposal and vote data, not compiling yet * Make it compile * Adjust tests * Add a test type for two stake tables for even and odd epochs * Debugging * Fix extended voting * Try "in epoch transition" approach * Continue debugging * Use correct epoch with Membership * Adjust tests and lints * Adapt to variable stake table after merge * Fix accidentally pulled bug in eQC rule * Commit includes epoch for vote and proposal data types * Prune dependencies (#3787) * add new message types and gate outgoing messages * Use the proper message for the proposal response * Modify commit for `Leaf2` and `QuorumData2` * Adjust tests * Clean up debug traces * Initial commit for double quorum * Add TODO * Next epoch nodes vote during epoch transition * Form the second QC at the end of an epoch * Allow early payload save but check that's it's the same * Attach next epoch justify qc to proposals * Validate the next epoch justify qc * Test with more network types * Fix fmt in tests * Use real threshold in the tests based on an epoch * Membership thresholds depend on an epoch * Make sure epoch transition proposals include the next epoch QC * Use epoch from vote and add more tests * Adjust marketplace ver number * Epochs without Marketplace and adjust tests * Save before sync with main * Complete the function * Refactor and move drb computation types * Add leader handling * Update result storing logic * Fix typo * Remove unused drb fields from leaf and proposal * Fix typo * Fix Leaf2 commit calculation * Rename function --------- Co-authored-by: Lukasz Rzasik Co-authored-by: Artemii Gerasimovich Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> --- hotshot/src/tasks/task_state.rs | 17 +- task-impls/src/quorum_proposal/handlers.rs | 18 +- .../src/quorum_vote/drb_computations.rs | 126 --------- task-impls/src/quorum_vote/handlers.rs | 267 +++++++++++++++--- task-impls/src/quorum_vote/mod.rs | 11 +- testing/src/view_generator.rs | 7 +- testing/tests/tests_1/test_success.rs | 1 - types/src/consensus.rs | 5 + types/src/data.rs | 43 +-- types/src/drb.rs | 71 ++++- types/src/utils.rs | 11 + 11 files changed, 351 insertions(+), 226 deletions(-) delete mode 100644 task-impls/src/quorum_vote/drb_computations.rs diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 9b0230c990..c854624bc3 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -12,17 +12,10 @@ use std::{ use async_trait::async_trait; use chrono::Utc; use hotshot_task_impls::{ - builder::BuilderClient, - consensus::ConsensusTaskState, - da::DaTaskState, - quorum_proposal::QuorumProposalTaskState, - quorum_proposal_recv::QuorumProposalRecvTaskState, - quorum_vote::{drb_computations::DrbComputations, QuorumVoteTaskState}, - request::NetworkRequestState, - rewind::RewindTaskState, - transactions::TransactionTaskState, - upgrade::UpgradeTaskState, - vid::VidTaskState, + builder::BuilderClient, consensus::ConsensusTaskState, da::DaTaskState, + quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, + quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, rewind::RewindTaskState, + transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, view_sync::ViewSyncTaskState, }; use hotshot_types::{ @@ -243,7 +236,7 @@ impl, V: Versions> CreateTaskState vote_dependencies: BTreeMap::new(), network: Arc::clone(&handle.hotshot.network), membership: Arc::clone(&handle.hotshot.memberships), - drb_computations: DrbComputations::new(), + drb_computation: None, output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, storage: Arc::clone(&handle.storage), diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index b9d696f01b..412e3fb41d 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -25,7 +25,6 @@ use hotshot_task::{ use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, data::{Leaf2, QuorumProposal2, VidDisperse, ViewChangeEvidence}, - drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, message::Proposal, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ @@ -34,7 +33,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, }, - utils::epoch_from_block_number, + utils::{epoch_from_block_number, is_last_block_in_epoch}, vote::{Certificate, HasViewNumber}, }; use tracing::instrument; @@ -399,6 +398,18 @@ impl ProposalDependencyHandle { } else { None }; + let next_drb_result = + if is_last_block_in_epoch(block_header.block_number(), self.epoch_height) { + self.consensus + .read() + .await + .drb_seeds_and_results + .results + .get(&epoch) + .copied() + } else { + None + }; let proposal = QuorumProposal2 { block_header, view_number: self.view_number, @@ -406,8 +417,7 @@ impl ProposalDependencyHandle { next_epoch_justify_qc: next_epoch_qc, upgrade_certificate, view_change_evidence: proposal_certificate, - drb_seed: INITIAL_DRB_SEED_INPUT, - drb_result: INITIAL_DRB_RESULT, + next_drb_result, }; let proposed_leaf = Leaf2::from_quorum_proposal(&proposal); diff --git a/task-impls/src/quorum_vote/drb_computations.rs b/task-impls/src/quorum_vote/drb_computations.rs deleted file mode 100644 index fc5483f8dd..0000000000 --- a/task-impls/src/quorum_vote/drb_computations.rs +++ /dev/null @@ -1,126 +0,0 @@ -use std::collections::{btree_map, BTreeMap}; - -use hotshot_types::{ - drb::{compute_drb_result, DrbResult, DrbSeedInput}, - traits::node_implementation::{ConsensusTime, NodeType}, -}; -use tokio::{spawn, task::JoinHandle}; - -/// Number of previous results and seeds to keep -pub const KEEP_PREVIOUS_RESULT_COUNT: u64 = 8; - -/// Helper struct to track state of DRB computations -pub struct DrbComputations { - /// Stored results from computations - results: BTreeMap, - - /// Currently live computation - task: Option<(TYPES::Epoch, JoinHandle)>, - - /// Stored inputs to computations - seeds: BTreeMap, -} - -impl DrbComputations { - #[must_use] - /// Create a new DrbComputations - pub fn new() -> Self { - Self { - results: BTreeMap::new(), - task: None, - seeds: BTreeMap::new(), - } - } - - /// If a task is currently live AND has finished, join it and save the result. - /// If the epoch for the calculation was the same as the provided epoch, return true - /// If a task is currently live and NOT finished, abort it UNLESS the task epoch is the same as - /// cur_epoch, in which case keep letting it run and return true. - /// Return false if a task should be spawned for the given epoch. - async fn join_or_abort_old_task(&mut self, epoch: TYPES::Epoch) -> bool { - if let Some((task_epoch, join_handle)) = &mut self.task { - if join_handle.is_finished() { - match join_handle.await { - Ok(result) => { - self.results.insert(*task_epoch, result); - let result = *task_epoch == epoch; - self.task = None; - result - } - Err(e) => { - tracing::error!("error joining DRB computation task: {e:?}"); - false - } - } - } else if *task_epoch == epoch { - true - } else { - join_handle.abort(); - self.task = None; - false - } - } else { - false - } - } - - /// Stores a seed for a particular epoch for later use by start_task_if_not_running, called from handle_quorum_proposal_validated_drb_calculation_start - pub fn store_seed(&mut self, epoch: TYPES::Epoch, drb_seed_input: DrbSeedInput) { - self.seeds.insert(epoch, drb_seed_input); - } - - /// Starts a new task. Cancels a current task if that task is not for the provided epoch. Allows a task to continue - /// running if it was already started for the given epoch. Avoids running the task if we already have a result for - /// the epoch. - pub async fn start_task_if_not_running(&mut self, epoch: TYPES::Epoch) { - // If join_or_abort_task returns true, then we either just completed a task for this epoch, or we currently - // have a running task for the epoch. - if self.join_or_abort_old_task(epoch).await { - return; - } - - // In case we somehow ended up processing this epoch already, don't start it again - if self.results.contains_key(&epoch) { - return; - } - - if let btree_map::Entry::Occupied(entry) = self.seeds.entry(epoch) { - let drb_seed_input = *entry.get(); - let new_drb_task = spawn(async move { compute_drb_result::(drb_seed_input) }); - self.task = Some((epoch, new_drb_task)); - entry.remove(); - } - } - - /// Retrieves the result for a given epoch - pub fn get_result(&self, epoch: TYPES::Epoch) -> Option { - self.results.get(&epoch).copied() - } - - /// Retrieves the seed for a given epoch - pub fn get_seed(&self, epoch: TYPES::Epoch) -> Option { - self.seeds.get(&epoch).copied() - } - - /// Garbage collects internal data structures - pub fn garbage_collect(&mut self, epoch: TYPES::Epoch) { - if epoch.u64() < KEEP_PREVIOUS_RESULT_COUNT { - return; - } - - let retain_epoch = epoch - KEEP_PREVIOUS_RESULT_COUNT; - // N.B. x.split_off(y) returns the part of the map where key >= y - - // Remove result entries older than EPOCH - self.results = self.results.split_off(&retain_epoch); - - // Remove result entries older than EPOCH+1 - self.seeds = self.seeds.split_off(&(retain_epoch + 1)); - } -} - -impl Default for DrbComputations { - fn default() -> Self { - Self::new() - } -} diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 0c9cdbb666..f7fc9997cd 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::sync::Arc; +use std::{collections::btree_map::Entry, sync::Arc}; use async_broadcast::{InactiveReceiver, Sender}; use async_lock::RwLock; @@ -13,6 +13,7 @@ use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposal2, VidDisperseShare2}, + drb::{compute_drb_result, DrbResult}, event::{Event, EventType, LeafInfo}, message::{Proposal, UpgradeLock}, simple_vote::{QuorumData2, QuorumVote2}, @@ -24,9 +25,10 @@ use hotshot_types::{ storage::Storage, ValidatedState, }, - utils::{epoch_from_block_number, is_last_block_in_epoch}, + utils::{epoch_from_block_number, is_epoch_root, is_last_block_in_epoch}, vote::HasViewNumber, }; +use tokio::spawn; use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; @@ -41,13 +43,161 @@ use crate::{ quorum_vote::Versions, }; -/// Handles starting the DRB calculation. Uses the seed previously stored in -/// handle_quorum_proposal_validated_drb_calculation_seed -async fn handle_quorum_proposal_validated_drb_calculation_start< +/// Store the DRB result from the quorum proposal. +/// +/// Returns an error if receiving an inconsistent result. +async fn store_received_drb_result, V: Versions>( + epoch_number: TYPES::Epoch, + drb_result: DrbResult, + task_state: &mut QuorumVoteTaskState, +) -> Result<()> { + let mut exist = false; + if let Some(stored_result) = task_state + .consensus + .read() + .await + .drb_seeds_and_results + .results + .get(&epoch_number) + { + if drb_result == *stored_result { + return Ok(()); + } + exist = true; + } + + // If there exists an inconsistent result, remove it. + if exist { + task_state + .consensus + .write() + .await + .drb_seeds_and_results + .results + .remove(&epoch_number); + bail!("Inconsistent result with the storage."); + } + // Otherwise, store the result. + else { + task_state + .consensus + .write() + .await + .drb_seeds_and_results + .results + .insert(epoch_number, drb_result); + Ok(()) + } +} + +/// Store the DRB result from the computation task to the shared `results` table. +/// +/// Returns the result if it exists. +async fn store_and_get_computed_drb_result< TYPES: NodeType, I: NodeImplementation, V: Versions, >( + epoch_number: TYPES::Epoch, + task_state: &mut QuorumVoteTaskState, +) -> Result { + // Return the result if it's already in the table. + if let Some(computed_result) = task_state + .consensus + .read() + .await + .drb_seeds_and_results + .results + .get(&epoch_number) + { + return Ok(*computed_result); + } + if let Some((task_epoch, computation)) = &mut task_state.drb_computation { + if *task_epoch == epoch_number { + // If the computation is finished, remove the task and store the result. + if computation.is_finished() { + match computation.await { + Ok(computed_result) => { + let mut consensus_writer = task_state.consensus.write().await; + consensus_writer + .drb_seeds_and_results + .results + .insert(epoch_number, computed_result); + task_state.drb_computation = None; + Ok(computed_result) + } + Err(e) => { + bail!("Failed to get the DRB result though the computation is finished: {:?}.", e); + } + } + } else { + bail!("DRB computation isn't finished."); + } + } else { + bail!("DRB computation isn't for the next epoch."); + } + } else { + bail!("DRB computation task doesn't exist."); + } +} + +/// Verify the DRB result from the proposal for the next epoch if this is the last block of the +/// current epoch. +/// +/// Uses the result from `start_drb_task`. +/// +/// Returns an error if we should not vote. +async fn verify_drb_result, V: Versions>( + proposal: &QuorumProposal2, + task_state: &mut QuorumVoteTaskState, +) -> Result<()> { + let current_block_number = proposal.block_header.block_number(); + + // Skip if this is not the expected block. + if task_state.epoch_height != 0 + && is_last_block_in_epoch(current_block_number, task_state.epoch_height) + { + let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( + proposal.block_header.block_number(), + task_state.epoch_height, + )); + + let Some(proposal_result) = proposal.next_drb_result else { + bail!( + "The proposal for the last block of an epoch should contain the DRB result for the next epoch." + ); + }; + + // Verify and store the result depending on our membership. + if task_state + .membership + .read() + .await + .has_stake(&task_state.public_key, current_epoch_number) + { + let computed_result = + store_and_get_computed_drb_result(current_epoch_number + 1, task_state).await?; + if proposal_result != computed_result { + bail!("Inconsistent DRB result for the next epoch."); + } + return Ok(()); + } else if task_state + .membership + .read() + .await + .has_stake(&task_state.public_key, current_epoch_number + 1) + { + store_received_drb_result(current_epoch_number + 1, proposal_result, task_state) + .await?; + } + } + Ok(()) +} + +/// Start the DRB computation task for the next epoch. +/// +/// Uses the seed previously stored in `store_drb_seed_and_result`. +async fn start_drb_task, V: Versions>( proposal: &QuorumProposal2, task_state: &mut QuorumVoteTaskState, ) { @@ -63,30 +213,74 @@ async fn handle_quorum_proposal_validated_drb_calculation_start< .await .has_stake(&task_state.public_key, current_epoch_number) { - task_state - .drb_computations - .start_task_if_not_running(current_epoch_number + 1) - .await; + let new_epoch_number = current_epoch_number + 1; + + // If a task is currently live AND has finished, join it and save the result. + // If the epoch for the calculation was the same as the provided epoch, return. + // If a task is currently live and NOT finished, abort it UNLESS the task epoch is the + // same as cur_epoch, in which case keep letting it run and return. + // Continue the function if a task should be spawned for the given epoch. + if let Some((task_epoch, join_handle)) = &mut task_state.drb_computation { + if join_handle.is_finished() { + match join_handle.await { + Ok(result) => { + task_state + .consensus + .write() + .await + .drb_seeds_and_results + .results + .insert(*task_epoch, result); + task_state.drb_computation = None; + } + Err(e) => { + tracing::error!("error joining DRB computation task: {e:?}"); + } + } + } else if *task_epoch == new_epoch_number { + return; + } else { + join_handle.abort(); + task_state.drb_computation = None; + } + } + + // In case we somehow ended up processing this epoch already, don't start it again + let mut consensus_writer = task_state.consensus.write().await; + if consensus_writer + .drb_seeds_and_results + .results + .contains_key(&new_epoch_number) + { + return; + } + + if let Entry::Occupied(entry) = consensus_writer + .drb_seeds_and_results + .seeds + .entry(new_epoch_number) + { + let drb_seed_input = *entry.get(); + let new_drb_task = spawn(async move { compute_drb_result::(drb_seed_input) }); + task_state.drb_computation = Some((new_epoch_number, new_drb_task)); + entry.remove(); + } } } -/// Handles storing the seed for an upcoming DRB calculation. +/// Store the DRB seed two epochs in advance and the computed DRB result for next epoch. /// -/// We store the DRB computation seed 2 epochs in advance, if the decided block is the last but -/// third block in the current epoch and we are in the quorum committee of the next epoch. +/// We store the DRB seed and result if the decided block is the third from the last block in the +/// current epoch and for the former, if we are in the quorum committee of the next epoch. /// /// Special cases: /// * Epoch 0: No DRB computation since we'll transition to epoch 1 immediately. -/// * Epoch 1 and 2: Use `[0u8; 32]` as the DRB result since when we first start the -/// computation in epoch 1, the result is for epoch 3. +/// * Epoch 1 and 2: No computed DRB result since when we first start the computation in epoch 1, +/// the result is for epoch 3. /// -/// We don't need to handle the special cases explicitly here, because the first proposal -/// with which we'll start the DRB computation is for epoch 3. -async fn handle_quorum_proposal_validated_drb_calculation_seed< - TYPES: NodeType, - I: NodeImplementation, - V: Versions, ->( +/// We don't need to handle the special cases explicitly here, because the first proposal with +/// which we'll start the DRB computation is for epoch 3. +async fn store_drb_seed_and_result, V: Versions>( proposal: &QuorumProposal2, task_state: &mut QuorumVoteTaskState, leaf_views: &[LeafInfo], @@ -100,16 +294,23 @@ async fn handle_quorum_proposal_validated_drb_calculation_seed< .block_number(); // Skip if this is not the expected block. - if task_state.epoch_height != 0 && (decided_block_number + 3) % task_state.epoch_height == 0 { + if task_state.epoch_height != 0 && is_epoch_root(decided_block_number, task_state.epoch_height) + { // Cancel old DRB computation tasks. let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( decided_block_number, task_state.epoch_height, )); - task_state - .drb_computations + let mut consensus_writer = task_state.consensus.write().await; + consensus_writer + .drb_seeds_and_results .garbage_collect(current_epoch_number); + drop(consensus_writer); + + // Store the DRB result for the next epoch, which will be used by the proposal task to + // include in the proposal in the last block of this epoch. + store_and_get_computed_drb_result(current_epoch_number + 1, task_state).await?; // Skip if we are not in the committee of the next epoch. if task_state @@ -126,9 +327,12 @@ async fn handle_quorum_proposal_validated_drb_calculation_seed< bail!("Failed to convert the serialized QC signature into a DRB seed input."); }; - // Store the drb seed input for the next calculation + // Store the DRB seed input for the epoch after the next one. task_state - .drb_computations + .consensus + .write() + .await + .drb_seeds_and_results .store_seed(new_epoch_number, drb_seed_input); } } @@ -151,7 +355,9 @@ pub(crate) async fn handle_quorum_proposal_validated< .await?; if version >= V::Epochs::VERSION { - handle_quorum_proposal_validated_drb_calculation_start(proposal, task_state).await; + // Don't vote if the DRB result verification fails. + verify_drb_result(proposal, task_state).await?; + start_drb_task(proposal, task_state).await; } let LeafChainTraversalOutcome { @@ -252,12 +458,7 @@ pub(crate) async fn handle_quorum_proposal_validated< tracing::debug!("Successfully sent decide event"); if version >= V::Epochs::VERSION { - handle_quorum_proposal_validated_drb_calculation_seed( - proposal, - task_state, - &leaf_views, - ) - .await?; + store_drb_seed_and_result(proposal, task_state, &leaf_views).await?; } } diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 2b30bab7a9..29ebc612d2 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -10,7 +10,6 @@ use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use committable::Committable; -use drb_computations::DrbComputations; use hotshot_task::{ dependency::{AndDependency, EventDependency}, dependency_task::{DependencyTask, HandleDepOutput}, @@ -19,6 +18,7 @@ use hotshot_task::{ use hotshot_types::{ consensus::{ConsensusMetricsValue, OuterConsensus}, data::{Leaf2, QuorumProposal2}, + drb::DrbComputation, event::Event, message::{Proposal, UpgradeLock}, traits::{ @@ -44,9 +44,6 @@ use crate::{ quorum_vote::handlers::{handle_quorum_proposal_validated, submit_vote, update_shared_state}, }; -/// Helper for DRB Computations -pub mod drb_computations; - /// Event handlers for `QuorumProposalValidated`. mod handlers; @@ -296,9 +293,8 @@ pub struct QuorumVoteTaskState, V: /// Membership for Quorum certs/votes and DA committee certs/votes. pub membership: Arc>, - /// Table for the in-progress DRB computation tasks. - //pub drb_computations: BTreeMap>, - pub drb_computations: DrbComputations, + /// In-progress DRB computation task. + pub drb_computation: DrbComputation, /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -570,6 +566,7 @@ impl, V: Versions> QuorumVoteTaskS // Validate the VID share. let payload_commitment = &disperse.data.payload_commitment; + // Check that the signature is valid ensure!( sender.validate(&disperse.signature, payload_commitment.as_ref()), diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 81da7fffb4..563679297e 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -26,7 +26,6 @@ use hotshot_types::{ DaProposal2, EpochNumber, Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare2, ViewChangeEvidence, ViewNumber, }, - drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, message::{Proposal, UpgradeLock}, simple_certificate::{ DaCertificate2, QuorumCertificate2, TimeoutCertificate2, UpgradeCertificate, @@ -144,8 +143,7 @@ impl TestView { next_epoch_justify_qc: None, upgrade_certificate: None, view_change_evidence: None, - drb_result: INITIAL_DRB_RESULT, - drb_seed: INITIAL_DRB_SEED_INPUT, + next_drb_result: None, }; let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); @@ -377,8 +375,7 @@ impl TestView { next_epoch_justify_qc: None, upgrade_certificate: upgrade_certificate.clone(), view_change_evidence, - drb_result: INITIAL_DRB_RESULT, - drb_seed: INITIAL_DRB_SEED_INPUT, + next_drb_result: None, }; let mut leaf = Leaf2::from_quorum_proposal(&proposal); diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index e7bc509593..987a07224a 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -283,7 +283,6 @@ cross_tests!( start_nodes: 11, num_bootstrap_nodes: 11, da_staked_committee_size: 11, - ..TestDescription::default() } }, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 2530417f90..5a857b360b 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -22,6 +22,7 @@ use vec1::Vec1; pub use crate::utils::{View, ViewInner}; use crate::{ data::{Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare2}, + drb::DrbSeedsAndResults, error::HotShotError, event::{HotShotAction, LeafInfo}, message::Proposal, @@ -326,6 +327,9 @@ pub struct Consensus { /// Number of blocks in an epoch, zero means there are no epochs pub epoch_height: u64, + + /// Tables for the DRB seeds and results. + pub drb_seeds_and_results: DrbSeedsAndResults, } /// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces @@ -436,6 +440,7 @@ impl Consensus { next_epoch_high_qc, metrics, epoch_height, + drb_seeds_and_results: DrbSeedsAndResults::new(), } } diff --git a/types/src/data.rs b/types/src/data.rs index 0d6fcfff7a..2acafe1268 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -30,7 +30,7 @@ use utils::anytrace::*; use vec1::Vec1; use crate::{ - drb::{DrbResult, DrbSeedInput, INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, + drb::DrbResult, impl_has_epoch, message::{Proposal, UpgradeLock}, simple_certificate::{ @@ -635,17 +635,12 @@ pub struct QuorumProposal2 { /// Possible timeout or view sync certificate. If the `justify_qc` is not for a proposal in the immediately preceding view, then either a timeout or view sync certificate must be attached. pub view_change_evidence: Option>, - /// The DRB seed for the next epoch. + /// The DRB result for the next epoch. /// - /// The DRB computation using this seed was started in the previous epoch. + /// This is required only for the last block of the epoch. Nodes will verify that it's + /// consistent with the result from their computations. #[serde(with = "serde_bytes")] - pub drb_seed: DrbSeedInput, - - /// The DRB result for the current epoch. - /// - /// The DRB computation with this result was started two epochs ago. - #[serde(with = "serde_bytes")] - pub drb_result: DrbResult, + pub next_drb_result: Option, } impl From> for QuorumProposal2 { @@ -657,8 +652,7 @@ impl From> for QuorumProposal2 { next_epoch_justify_qc: None, upgrade_certificate: quorum_proposal.upgrade_certificate, view_change_evidence: quorum_proposal.proposal_certificate, - drb_seed: INITIAL_DRB_SEED_INPUT, - drb_result: INITIAL_DRB_RESULT, + next_drb_result: None, } } } @@ -689,8 +683,6 @@ impl From> for Leaf2 { upgrade_certificate: leaf.upgrade_certificate, block_payload: leaf.block_payload, view_change_evidence: None, - drb_seed: INITIAL_DRB_SEED_INPUT, - drb_result: INITIAL_DRB_RESULT, } } } @@ -836,18 +828,6 @@ pub struct Leaf2 { /// Possible timeout or view sync certificate. If the `justify_qc` is not for a proposal in the immediately preceding view, then either a timeout or view sync certificate must be attached. pub view_change_evidence: Option>, - - /// The DRB seed for the next epoch. - /// - /// The DRB computation using this seed was started in the previous epoch. - #[serde(with = "serde_bytes")] - pub drb_seed: DrbSeedInput, - - /// The DRB result for the current epoch. - /// - /// The DRB computation with this result was started two epochs ago. - #[serde(with = "serde_bytes")] - pub drb_result: DrbResult, } impl Leaf2 { @@ -901,8 +881,6 @@ impl Leaf2 { block_payload: Some(payload), epoch: TYPES::Epoch::genesis(), view_change_evidence: None, - drb_seed: [0; 32], - drb_result: [0; 32], } } /// Time when this leaf was created. @@ -1068,8 +1046,6 @@ impl PartialEq for Leaf2 { upgrade_certificate, block_payload: _, view_change_evidence, - drb_seed, - drb_result, } = self; *view_number == other.view_number @@ -1080,8 +1056,6 @@ impl PartialEq for Leaf2 { && *block_header == other.block_header && *upgrade_certificate == other.upgrade_certificate && *view_change_evidence == other.view_change_evidence - && *drb_seed == other.drb_seed - && *drb_result == other.drb_result } } @@ -1445,8 +1419,7 @@ impl Leaf2 { block_header, upgrade_certificate, view_change_evidence, - drb_seed, - drb_result, + next_drb_result: _, } = quorum_proposal; Self { @@ -1462,8 +1435,6 @@ impl Leaf2 { upgrade_certificate: upgrade_certificate.clone(), block_payload: None, view_change_evidence: view_change_evidence.clone(), - drb_seed: *drb_seed, - drb_result: *drb_result, } } } diff --git a/types/src/drb.rs b/types/src/drb.rs index 1790e9b30f..e52fb89605 100644 --- a/types/src/drb.rs +++ b/types/src/drb.rs @@ -4,11 +4,18 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::hash::{DefaultHasher, Hash, Hasher}; +use std::{ + collections::BTreeMap, + hash::{DefaultHasher, Hash, Hasher}, +}; use sha2::{Digest, Sha256}; +use tokio::task::JoinHandle; -use crate::traits::{node_implementation::NodeType, signature_key::SignatureKey}; +use crate::traits::{ + node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, +}; // TODO: Add the following consts once we bench the hash time. // @@ -34,6 +41,9 @@ pub type DrbSeedInput = [u8; 32]; /// Alias for DRB result from `compute_drb_result`. pub type DrbResult = [u8; 32]; +/// Number of previous results and seeds to keep +pub const KEEP_PREVIOUS_RESULT_COUNT: u64 = 8; + // TODO: Use `HASHES_PER_SECOND` * `VIEW_TIMEOUT` * `DRB_CALCULATION_NUM_VIEW` to calculate this // once we bench the hash time. // @@ -86,3 +96,60 @@ pub fn leader( let entry = stake_table[index].clone(); TYPES::SignatureKey::public_key(&entry) } + +/// Alias for in-progress DRB computation task, if there's any. +pub type DrbComputation = Option<(::Epoch, JoinHandle)>; + +/// Seeds for DRB computation and computed results. +#[derive(Clone, Debug)] +pub struct DrbSeedsAndResults { + /// Stored inputs to computations + pub seeds: BTreeMap, + + /// Stored results from computations + pub results: BTreeMap, +} + +impl DrbSeedsAndResults { + #[must_use] + /// Constructor with initial values for epochs 1 and 2. + pub fn new() -> Self { + Self { + seeds: BTreeMap::from([ + (TYPES::Epoch::new(1), INITIAL_DRB_SEED_INPUT), + (TYPES::Epoch::new(2), INITIAL_DRB_SEED_INPUT), + ]), + results: BTreeMap::from([ + (TYPES::Epoch::new(1), INITIAL_DRB_RESULT), + (TYPES::Epoch::new(2), INITIAL_DRB_RESULT), + ]), + } + } + + /// Stores a seed for a particular epoch for later use by `start_drb_task`. + pub fn store_seed(&mut self, epoch: TYPES::Epoch, drb_seed_input: DrbSeedInput) { + self.seeds.insert(epoch, drb_seed_input); + } + + /// Garbage collects internal data structures + pub fn garbage_collect(&mut self, epoch: TYPES::Epoch) { + if epoch.u64() < KEEP_PREVIOUS_RESULT_COUNT { + return; + } + + let retain_epoch = epoch - KEEP_PREVIOUS_RESULT_COUNT; + // N.B. x.split_off(y) returns the part of the map where key >= y + + // Remove result entries older than EPOCH + self.results = self.results.split_off(&retain_epoch); + + // Remove result entries older than EPOCH+1 + self.seeds = self.seeds.split_off(&(retain_epoch + 1)); + } +} + +impl Default for DrbSeedsAndResults { + fn default() -> Self { + Self::new() + } +} diff --git a/types/src/utils.rs b/types/src/utils.rs index 50877f047a..6496e182f8 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -279,3 +279,14 @@ pub fn is_last_block_in_epoch(block_number: u64, epoch_height: u64) -> bool { block_number % epoch_height == 0 } } + +/// Returns true if the given block number is the third from the last in the epoch based on the +/// given epoch height. +#[must_use] +pub fn is_epoch_root(block_number: u64, epoch_height: u64) -> bool { + if block_number == 0 || epoch_height == 0 { + false + } else { + (block_number + 2) % epoch_height == 0 + } +} From 872966e59ecab0cd9bb515646b214e7537b219f1 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Fri, 3 Jan 2025 13:36:00 -0800 Subject: [PATCH 1343/1393] 3966 add_epoch_root and sync_l1 on Membership (#3984) --- task-impls/src/helpers.rs | 76 +++++++++++++++++++++++++- task-impls/src/quorum_vote/handlers.rs | 4 ++ types/src/traits/election.rs | 21 +++++++ 3 files changed, 99 insertions(+), 2 deletions(-) diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 1ae16cf21c..b480841197 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -28,7 +28,9 @@ use hotshot_types::{ signature_key::SignatureKey, BlockPayload, ValidatedState, }, - utils::{epoch_from_block_number, is_last_block_in_epoch, Terminator, View, ViewInner}, + utils::{ + epoch_from_block_number, is_epoch_root, is_last_block_in_epoch, Terminator, View, ViewInner, + }, vote::{Certificate, HasViewNumber}, }; use tokio::time::timeout; @@ -163,6 +165,56 @@ pub(crate) async fn fetch_proposal( Ok((leaf, view)) } +/// Handles calling add_epoch_root and sync_l1 on Membership if necessary. +async fn decide_from_proposal_add_epoch_root( + proposal: &QuorumProposal2, + leaf_views: &[LeafInfo], + epoch_height: u64, + membership: &Arc>, +) { + if leaf_views.is_empty() { + return; + } + + let decided_block_number = leaf_views + .last() + .unwrap() + .leaf + .block_header() + .block_number(); + + // Skip if this is not the expected block. + if epoch_height != 0 && is_epoch_root(decided_block_number, epoch_height) { + let next_epoch_number = + TYPES::Epoch::new(epoch_from_block_number(decided_block_number, epoch_height) + 1); + + let write_callback = { + let membership_reader = membership.read().await; + membership_reader + .add_epoch_root(next_epoch_number, proposal.block_header.clone()) + .await + }; + + if let Some(write_callback) = write_callback { + let mut membership_writer = membership.write().await; + write_callback(&mut *membership_writer); + } else { + // If we didn't get a write callback out of add_epoch_root, then don't bother locking and calling sync_l1 + return; + } + + let write_callback = { + let membership_reader = membership.read().await; + membership_reader.sync_l1().await + }; + + if let Some(write_callback) = write_callback { + let mut membership_writer = membership.write().await; + write_callback(&mut *membership_writer); + } + } +} + /// Helper type to give names and to the output values of the leaf chain traversal operation. #[derive(Debug)] pub struct LeafChainTraversalOutcome { @@ -202,7 +254,7 @@ impl Default for LeafChainTraversalOutcome { } } -/// calculate the new decided leaf chain based on the rules of hostuff 2 +/// calculate the new decided leaf chain based on the rules of HotStuff 2 /// /// # Panics /// Can't actually panic @@ -211,6 +263,8 @@ pub async fn decide_from_proposal_2( consensus: OuterConsensus, existing_upgrade_cert: Arc>>>, public_key: &TYPES::SignatureKey, + with_epochs: bool, + membership: &Arc>, ) -> LeafChainTraversalOutcome { let mut res = LeafChainTraversalOutcome::default(); let consensus_reader = consensus.read().await; @@ -282,6 +336,14 @@ pub async fn decide_from_proposal_2( res.included_txns = Some(txns); } + if with_epochs && res.new_decided_view_number.is_some() { + let epoch_height = consensus_reader.epoch_height; + drop(consensus_reader); + + decide_from_proposal_add_epoch_root(proposal, &res.leaf_views, epoch_height, membership) + .await; + } + res } @@ -317,6 +379,8 @@ pub async fn decide_from_proposal( consensus: OuterConsensus, existing_upgrade_cert: Arc>>>, public_key: &TYPES::SignatureKey, + with_epochs: bool, + membership: &Arc>, ) -> LeafChainTraversalOutcome { let consensus_reader = consensus.read().await; let existing_upgrade_cert_reader = existing_upgrade_cert.read().await; @@ -428,6 +492,14 @@ pub async fn decide_from_proposal( tracing::debug!("Leaf ascension failed; error={e}"); } + if with_epochs && res.new_decided_view_number.is_some() { + let epoch_height = consensus_reader.epoch_height; + drop(consensus_reader); + + decide_from_proposal_add_epoch_root(proposal, &res.leaf_views, epoch_height, membership) + .await; + } + res } diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index f7fc9997cd..bf9c5dec5c 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -373,6 +373,8 @@ pub(crate) async fn handle_quorum_proposal_validated< OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), &task_state.public_key, + version >= V::Epochs::VERSION, + &task_state.membership, ) .await } else { @@ -381,6 +383,8 @@ pub(crate) async fn handle_quorum_proposal_validated< OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), &task_state.public_key, + version >= V::Epochs::VERSION, + &task_state.membership, ) .await }; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index 0509918574..c862062190 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -7,11 +7,13 @@ //! The election trait, used to decide which node is the leader and determine if a vote is valid. use std::{collections::BTreeSet, fmt::Debug, num::NonZeroU64}; +use async_trait::async_trait; use utils::anytrace::Result; use super::node_implementation::NodeType; use crate::{traits::signature_key::SignatureKey, PeerConfig}; +#[async_trait] /// A protocol for determining membership in and participating in a committee. pub trait Membership: Debug + Send + Sync { /// The error type returned by methods like `lookup_leader`. @@ -125,4 +127,23 @@ pub trait Membership: Debug + Send + Sync { /// Returns the threshold required to upgrade the network protocol fn upgrade_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; + + #[allow(clippy::type_complexity)] + /// Handles notifications that a new epoch root has been created + /// Is called under a read lock to the Membership. Return a callback + /// with Some to have that callback invoked under a write lock. + async fn add_epoch_root( + &self, + _epoch: TYPES::Epoch, + _block_header: TYPES::BlockHeader, + ) -> Option> { + None + } + + #[allow(clippy::type_complexity)] + /// Called after add_epoch_root runs and any callback has been invoked. + /// Causes a read lock to be reacquired for this functionality. + async fn sync_l1(&self) -> Option> { + None + } } From c4fccaf9733bcf2e24519552382898aae00304dd Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 6 Jan 2025 12:01:55 -0500 Subject: [PATCH 1344/1393] Remove `EPOCH_HEIGHT` as an associated constant in `NodeType` (#3997) --- example-types/src/node_types.rs | 10 ------ hotshot/src/tasks/task_state.rs | 1 + task-impls/src/helpers.rs | 4 +-- task-impls/src/quorum_vote/handlers.rs | 7 ++-- task-impls/src/quorum_vote/mod.rs | 2 ++ task-impls/src/request.rs | 5 ++- testing/src/helpers.rs | 34 ++++--------------- testing/src/overall_safety_task.rs | 6 ++-- testing/src/test_builder.rs | 19 ++++------- testing/src/test_runner.rs | 1 + testing/tests/tests_1/libp2p.rs | 2 ++ testing/tests/tests_1/test_success.rs | 11 ++++++ testing/tests/tests_1/test_with_failures_2.rs | 3 ++ testing/tests/tests_2/catchup.rs | 8 +++++ testing/tests/tests_3/byzantine_tests.rs | 6 ++++ testing/tests/tests_3/memory_network.rs | 2 -- .../tests_3/test_with_failures_half_f.rs | 3 ++ testing/tests/tests_4/byzantine_tests.rs | 1 + testing/tests/tests_4/test_marketplace.rs | 4 +++ .../tests_4/test_with_builder_failures.rs | 2 ++ testing/tests/tests_4/test_with_failures_f.rs | 1 + testing/tests/tests_5/combined_network.rs | 5 +++ testing/tests/tests_5/push_cdn.rs | 1 + testing/tests/tests_5/test_with_failures.rs | 1 + testing/tests/tests_5/timeout.rs | 1 + testing/tests/tests_5/unreliable_network.rs | 8 +++++ types/src/data.rs | 18 +++------- types/src/traits/node_implementation.rs | 2 -- 28 files changed, 93 insertions(+), 75 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index c7899aa3de..166976c3a0 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -52,8 +52,6 @@ use crate::{ /// to select our traits pub struct TestTypes; impl NodeType for TestTypes { - const EPOCH_HEIGHT: u64 = 10; - type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -84,8 +82,6 @@ impl NodeType for TestTypes { /// to select our traits pub struct TestTypesRandomizedLeader; impl NodeType for TestTypesRandomizedLeader { - const EPOCH_HEIGHT: u64 = 10; - type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -119,8 +115,6 @@ pub struct TestTypesRandomizedCommitteeMembers { } impl NodeType for TestTypesRandomizedCommitteeMembers { - const EPOCH_HEIGHT: u64 = 10; - type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -152,8 +146,6 @@ impl NodeType for TestTypesRandomizedCommitteeMember /// to select our traits pub struct TestConsecutiveLeaderTypes; impl NodeType for TestConsecutiveLeaderTypes { - const EPOCH_HEIGHT: u64 = 10; - type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -184,8 +176,6 @@ impl NodeType for TestConsecutiveLeaderTypes { /// to select our traits pub struct TestTwoStakeTablesTypes; impl NodeType for TestTwoStakeTablesTypes { - const EPOCH_HEIGHT: u64 = 10; - type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index c854624bc3..9023722a66 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -57,6 +57,7 @@ impl, V: Versions> CreateTaskState id: handle.hotshot.id, shutdown_flag: Arc::new(AtomicBool::new(false)), spawned_tasks: BTreeMap::new(), + epoch_height: handle.epoch_height, } } } diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index b480841197..251cb049f0 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -159,7 +159,7 @@ pub(crate) async fn fetch_proposal( leaf: leaf.commit(), state, delta: None, - epoch: leaf.epoch(), + epoch: leaf.epoch(epoch_height), }, }; Ok((leaf, view)) @@ -814,7 +814,7 @@ pub(crate) async fn validate_proposal_view_and_certs< { let epoch = TYPES::Epoch::new(epoch_from_block_number( proposal.data.block_header.block_number(), - TYPES::EPOCH_HEIGHT, + validation_info.epoch_height, )); UpgradeCertificate::validate( &proposal.data.upgrade_certificate, diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index bf9c5dec5c..cbc65bd672 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -203,7 +203,7 @@ async fn start_drb_task, V: Versio ) { let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( proposal.block_header.block_number(), - TYPES::EPOCH_HEIGHT, + task_state.epoch_height, )); // Start the new task if we're in the committee for this epoch @@ -615,17 +615,18 @@ pub(crate) async fn submit_vote, V leaf: Leaf2, vid_share: Proposal>, extended_vote: bool, + epoch_height: u64, ) -> Result<()> { let epoch_number = TYPES::Epoch::new(epoch_from_block_number( leaf.block_header().block_number(), - TYPES::EPOCH_HEIGHT, + epoch_height, )); let membership_reader = membership.read().await; let committee_member_in_current_epoch = membership_reader.has_stake(&public_key, epoch_number); // If the proposed leaf is for the last block in the epoch and the node is part of the quorum committee // in the next epoch, the node should vote to achieve the double quorum. - let committee_member_in_next_epoch = is_last_block_in_epoch(leaf.height(), TYPES::EPOCH_HEIGHT) + let committee_member_in_next_epoch = is_last_block_in_epoch(leaf.height(), epoch_height) && membership_reader.has_stake(&public_key, epoch_number + 1); drop(membership_reader); diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 29ebc612d2..8ee1223cf9 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -257,6 +257,7 @@ impl + 'static, V: Versions> Handl leaf, vid_share, false, + self.epoch_height, ) .await { @@ -767,6 +768,7 @@ impl, V: Versions> QuorumVoteTaskS proposed_leaf, updated_vid, is_vote_leaf_extended, + self.epoch_height, ) .await { diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index b487497408..3e1d75dff2 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -83,6 +83,9 @@ pub struct NetworkRequestState> { /// A flag indicating that `HotShotEvent::Shutdown` has been received pub spawned_tasks: BTreeMap>>, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl> Drop for NetworkRequestState { @@ -111,7 +114,7 @@ impl> TaskState for NetworkRequest let prop_view = proposal.data.view_number(); let prop_epoch = TYPES::Epoch::new(epoch_from_block_number( proposal.data.block_header.block_number(), - TYPES::EPOCH_HEIGHT, + self.epoch_height, )); // If we already have the VID shares for the next view, do nothing. diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index 6d945894f3..d0a5631fc5 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -26,13 +26,12 @@ use hotshot_example_types::{ use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, - data::{Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare2}, - message::{GeneralConsensusMessage, Proposal, UpgradeLock}, + data::{Leaf2, VidDisperse, VidDisperseShare2}, + message::{Proposal, UpgradeLock}, simple_certificate::DaCertificate2, - simple_vote::{DaData2, DaVote2, QuorumData2, QuorumVote2, SimpleVote, VersionedVoteData}, + simple_vote::{DaData2, DaVote2, SimpleVote, VersionedVoteData}, traits::{ block_contents::vid_commitment, - consensus_api::ConsensusApi, election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, }, @@ -47,7 +46,7 @@ use serde::Serialize; use crate::{test_builder::TestDescription, test_launcher::TestLauncher}; -/// create the [`SystemContextHandle`] from a node id +/// create the [`SystemContextHandle`] from a node id, with no epochs /// # Panics /// if cannot create a [`HotShotInitializer`] pub async fn build_system_handle< @@ -65,7 +64,8 @@ pub async fn build_system_handle< Sender>>, Receiver>>, ) { - let builder: TestDescription = TestDescription::default_multiple_rounds(); + let mut builder: TestDescription = TestDescription::default_multiple_rounds(); + builder.epoch_height = 0; let launcher = builder.gen_launcher(node_id); build_system_handle_from_launcher(node_id, &launcher).await @@ -397,28 +397,6 @@ pub async fn build_da_certificate( .await } -pub async fn build_vote, V: Versions>( - handle: &SystemContextHandle, - proposal: QuorumProposal2, -) -> GeneralConsensusMessage { - let view = proposal.view_number; - - let leaf: Leaf2<_> = Leaf2::from_quorum_proposal(&proposal); - let vote = QuorumVote2::::create_signed_vote( - QuorumData2 { - leaf_commit: leaf.commit(), - epoch: leaf.epoch(), - }, - view, - &handle.public_key(), - handle.private_key(), - &handle.hotshot.upgrade_lock, - ) - .await - .expect("Failed to create quorum vote"); - GeneralConsensusMessage::::Vote(vote.to_vote()) -} - /// This function permutes the provided input vector `inputs`, given some order provided within the /// `order` vector. /// diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index edabe71f90..600e4a87a8 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -95,6 +95,8 @@ pub struct OverallSafetyTask>>, /// sender to test event channel pub test_sender: Sender, + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl, V: Versions> @@ -186,8 +188,8 @@ impl, V: Versions> TestTas }; if let Some(ref key) = key { - if *key.epoch() > self.ctx.latest_epoch { - self.ctx.latest_epoch = *key.epoch(); + if *key.epoch(self.epoch_height) > self.ctx.latest_epoch { + self.ctx.latest_epoch = *key.epoch(self.epoch_height); } } diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index c91d45a4d3..266dc39b7e 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -4,9 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{ - any::TypeId, collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration, -}; +use std::{collections::HashMap, num::NonZeroUsize, rc::Rc, sync::Arc, time::Duration}; use anyhow::{ensure, Result}; use async_lock::RwLock; @@ -17,8 +15,8 @@ use hotshot::{ HotShotInitializer, MarketplaceConfig, SystemContext, TwinsHandlerState, }; use hotshot_example_types::{ - auction_results_provider_types::TestAuctionResultsProvider, node_types::EpochsTestVersions, - state_types::TestInstanceState, storage_types::TestStorage, testable_delay::DelayConfig, + auction_results_provider_types::TestAuctionResultsProvider, state_types::TestInstanceState, + storage_types::TestStorage, testable_delay::DelayConfig, }; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -101,6 +99,8 @@ pub struct TestDescription, V: Ver pub start_solver: bool, /// boxed closure used to validate the resulting transactions pub validate_transactions: TransactionValidator, + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } pub fn nonempty_block_threshold(threshold: (u64, u64)) -> TransactionValidator { @@ -387,6 +387,7 @@ impl, V: Versions> Default fn default() -> Self { let num_nodes_with_stake = 7; Self { + epoch_height: 10, timing_data: TimingData::default(), num_nodes_with_stake, start_nodes: num_nodes_with_stake, @@ -487,12 +488,6 @@ where // This is the config for node 0 0 < da_staked_committee_size, ); - // let da_committee_nodes = known_nodes[0..da_committee_size].to_vec(); - let epoch_height = if TypeId::of::() == TypeId::of::() { - 10 - } else { - 0 - }; let config = HotShotConfig { start_threshold: (1, 1), num_nodes_with_stake: NonZeroUsize::new(num_nodes_with_stake).unwrap(), @@ -516,7 +511,7 @@ where stop_proposing_time: 0, start_voting_time: u64::MAX, stop_voting_time: 0, - epoch_height, + epoch_height: self.epoch_height, }; let TimingData { next_view_timeout, diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 7d0c1cc6bb..600be48823 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -202,6 +202,7 @@ where // add safety task let overall_safety_task_state = OverallSafetyTask { handles: Arc::clone(&handles), + epoch_height: launcher.metadata.epoch_height, ctx: RoundCtx::default(), properties: launcher.metadata.overall_safety_properties.clone(), error: None, diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index c9f7ae33db..3a267651bc 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -37,6 +37,7 @@ async fn libp2p_network() { next_view_timeout: 4000, ..Default::default() }, + epoch_height: 0, ..TestDescription::default_multiple_rounds() }; @@ -68,6 +69,7 @@ async fn libp2p_network_failures_2() { next_view_timeout: 4000, ..Default::default() }, + epoch_height: 0, ..TestDescription::default_multiple_rounds() }; diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 987a07224a..e3b7441fa7 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -37,6 +37,7 @@ cross_tests!( duration: Duration::from_secs(60), }, ), + epoch_height: 0, ..TestDescription::default() } }, @@ -56,6 +57,7 @@ cross_tests!( duration: Duration::from_secs(60), }, ), + epoch_height: 10, ..TestDescription::default() } }, @@ -75,6 +77,7 @@ cross_tests!( // duration: Duration::from_secs(60), // }, // ), +// epoch_height: 10, // ..TestDescription::default() // } // }, @@ -94,6 +97,7 @@ cross_tests!( duration: Duration::from_secs(60), }, ), + epoch_height: 0, ..TestDescription::default() }; @@ -126,6 +130,7 @@ cross_tests!( duration: Duration::from_secs(60), }, ), + epoch_height: 10, ..TestDescription::default() }; @@ -158,6 +163,7 @@ cross_tests!( duration: Duration::from_secs(60), }, ), + epoch_height: 0, ..TestDescription::default() }; @@ -198,6 +204,7 @@ cross_tests!( duration: Duration::from_secs(60), }, ), + epoch_height: 10, ..TestDescription::default() }; @@ -232,6 +239,7 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); + metadata.epoch_height = 0; metadata.num_bootstrap_nodes = 10; metadata.num_nodes_with_stake = 12; metadata.da_staked_committee_size = 12; @@ -254,6 +262,7 @@ cross_tests!( Metadata: { let mut metadata = TestDescription::default_more_nodes(); metadata.num_bootstrap_nodes = 10; + metadata.epoch_height = 10; metadata.num_nodes_with_stake = 12; metadata.da_staked_committee_size = 12; metadata.start_nodes = 12; @@ -283,6 +292,7 @@ cross_tests!( start_nodes: 11, num_bootstrap_nodes: 11, da_staked_committee_size: 11, + epoch_height: 10, ..TestDescription::default() } }, @@ -303,6 +313,7 @@ cross_tests!( duration: Duration::from_millis(100000), }, ), + epoch_height: 10, ..TestDescription::default() }; // after the first 3 leaders the next leader is down. It's a hack to make sure we decide in diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index 242b5ea2c0..cf9ac8c83f 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -42,6 +42,7 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); + metadata.epoch_height = 0; metadata.num_bootstrap_nodes = 10; metadata.num_nodes_with_stake = 12; metadata.da_staked_committee_size = 12; @@ -84,6 +85,7 @@ cross_tests!( metadata.num_nodes_with_stake = 12; metadata.da_staked_committee_size = 12; metadata.start_nodes = 12; + metadata.epoch_height = 10; let dead_nodes = vec![ ChangeNode { idx: 10, @@ -124,6 +126,7 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); + metadata.epoch_height = 0; metadata.num_bootstrap_nodes = 10; metadata.num_nodes_with_stake = 12; metadata.da_staked_committee_size = 12; diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index bf48a2558c..f012c22f2a 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -45,6 +45,7 @@ async fn test_catchup() { updown: NodeAction::Up, }]; + metadata.epoch_height = 0; metadata.timing_data = timing_data; metadata.start_nodes = 19; metadata.num_nodes_with_stake = 20; @@ -103,6 +104,7 @@ async fn test_catchup_cdn() { idx: 18, updown: NodeAction::Up, }]; + metadata.epoch_height = 0; metadata.timing_data = timing_data; metadata.start_nodes = 19; metadata.num_nodes_with_stake = 20; @@ -156,6 +158,7 @@ async fn test_catchup_one_node() { idx: 18, updown: NodeAction::Up, }]; + metadata.epoch_height = 0; metadata.timing_data = timing_data; metadata.start_nodes = 19; metadata.num_nodes_with_stake = 20; @@ -218,6 +221,7 @@ async fn test_catchup_in_view_sync() { }, ]; + metadata.epoch_height = 0; metadata.timing_data = timing_data; metadata.start_nodes = 18; metadata.num_nodes_with_stake = 20; @@ -275,6 +279,7 @@ async fn test_catchup_reload() { updown: NodeAction::Up, }]; + metadata.epoch_height = 0; metadata.timing_data = timing_data; metadata.start_nodes = 19; metadata.skip_late = true; @@ -329,6 +334,7 @@ cross_tests!( } metadata.timing_data = timing_data; + metadata.epoch_height = 0; metadata.start_nodes = 20; metadata.num_nodes_with_stake = 20; @@ -385,6 +391,7 @@ cross_tests!( metadata.timing_data = timing_data; metadata.start_nodes = 20; metadata.num_nodes_with_stake = 20; + metadata.epoch_height = 0; // Explicitly make the DA tiny to exaggerate a missing proposal. metadata.da_staked_committee_size = 1; @@ -446,6 +453,7 @@ cross_tests!( metadata.start_nodes = 10; metadata.num_nodes_with_stake = 10; + metadata.epoch_height = 0; // Explicitly make the DA small to simulate real network. metadata.da_staked_committee_size = 4; diff --git a/testing/tests/tests_3/byzantine_tests.rs b/testing/tests/tests_3/byzantine_tests.rs index 0ab5f68d19..2a08d2c871 100644 --- a/testing/tests/tests_3/byzantine_tests.rs +++ b/testing/tests/tests_3/byzantine_tests.rs @@ -51,6 +51,7 @@ cross_tests!( }, ), behaviour, + epoch_height: 0, ..TestDescription::default() } }, @@ -78,6 +79,7 @@ cross_tests!( ), behaviour, num_nodes_with_stake: 12, + epoch_height: 0, ..TestDescription::default() }; @@ -115,6 +117,7 @@ cross_tests!( }, ), behaviour, + epoch_height: 0, ..TestDescription::default() }; @@ -155,6 +158,7 @@ cross_tests!( }, ), behaviour, + epoch_height: 0, ..TestDescription::default() }; @@ -196,6 +200,7 @@ cross_tests!( }, ), behaviour, + epoch_height: 0, ..TestDescription::default() }; @@ -237,6 +242,7 @@ cross_tests!( duration: Duration::from_secs(60), }, ), + epoch_height: 0, behaviour, ..TestDescription::default() }; diff --git a/testing/tests/tests_3/memory_network.rs b/testing/tests/tests_3/memory_network.rs index b99961af3e..875e5b40e9 100644 --- a/testing/tests/tests_3/memory_network.rs +++ b/testing/tests/tests_3/memory_network.rs @@ -52,8 +52,6 @@ use tracing::{instrument, trace}; pub struct Test; impl NodeType for Test { - const EPOCH_HEIGHT: u64 = 10; - type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index a8a2dbb14b..3dd8e3ceb7 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -23,6 +23,7 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); + metadata.epoch_height = 0; metadata.num_bootstrap_nodes = 17; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the @@ -60,7 +61,9 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); + metadata.epoch_height = 0; metadata.num_bootstrap_nodes = 17; + metadata.epoch_height = 10; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. diff --git a/testing/tests/tests_4/byzantine_tests.rs b/testing/tests/tests_4/byzantine_tests.rs index 7d89d6c870..d9173aebd3 100644 --- a/testing/tests/tests_4/byzantine_tests.rs +++ b/testing/tests/tests_4/byzantine_tests.rs @@ -41,6 +41,7 @@ // }, // ), // behaviour, +// epoch_height: 0, // ..TestDescription::default() // }; diff --git a/testing/tests/tests_4/test_marketplace.rs b/testing/tests/tests_4/test_marketplace.rs index b8a7d8c84c..02bb7fee5e 100644 --- a/testing/tests/tests_4/test_marketplace.rs +++ b/testing/tests/tests_4/test_marketplace.rs @@ -37,6 +37,7 @@ cross_tests!( duration: Duration::from_secs(60), }, ), + epoch_height: 0, fallback_builder: BuilderDescription { changes: HashMap::from([(0, BuilderChange::Down)]) @@ -65,6 +66,7 @@ cross_tests!( duration: Duration::from_secs(60), }, ), + epoch_height: 0, upgrade_view: Some(5), validate_transactions: nonempty_block_threshold((40,50)), ..TestDescription::default() @@ -88,6 +90,7 @@ cross_tests!( duration: Duration::from_secs(60), }, ), + epoch_height: 0, builders: vec1![ BuilderDescription { changes: HashMap::from([(0, BuilderChange::Down)]) @@ -118,6 +121,7 @@ cross_tests!( duration: Duration::from_secs(60), }, ), + epoch_height: 0, builders: vec1![ BuilderDescription { changes: HashMap::from([(0, BuilderChange::Down)]) diff --git a/testing/tests/tests_4/test_with_builder_failures.rs b/testing/tests/tests_4/test_with_builder_failures.rs index 947ece0404..03430c41e4 100644 --- a/testing/tests/tests_4/test_with_builder_failures.rs +++ b/testing/tests/tests_4/test_with_builder_failures.rs @@ -23,12 +23,14 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_multiple_rounds(); + metadata.epoch_height = 0; // Every block should contain at least one transaction - builders are never offline // simultaneously metadata.overall_safety_properties.transaction_threshold = 1; // Generate a lot of transactions so that freshly restarted builders still have // transactions metadata.txn_description = TxnTaskDescription::RoundRobinTimeBased(Duration::from_millis(1)); + metadata.epoch_height = 0; // Two builders running as follows: // view 1st 2nd diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 4fd033d0af..04382e8344 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -23,6 +23,7 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); + metadata.epoch_height = 0; metadata.overall_safety_properties.num_failed_views = 6; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 20; diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index 67fb0e4030..e47bf9ad2f 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -42,6 +42,7 @@ async fn test_combined_network() { duration: Duration::from_secs(120), }, ), + epoch_height: 0, ..TestDescription::default_multiple_rounds() }; @@ -75,6 +76,7 @@ async fn test_combined_network_cdn_crash() { duration: Duration::from_secs(220), }, ), + epoch_height: 0, ..TestDescription::default_multiple_rounds() }; @@ -122,6 +124,7 @@ async fn test_combined_network_reup() { duration: Duration::from_secs(220), }, ), + epoch_height: 0, ..TestDescription::default_multiple_rounds() }; @@ -173,6 +176,7 @@ async fn test_combined_network_half_dc() { duration: Duration::from_secs(220), }, ), + epoch_height: 0, ..TestDescription::default_multiple_rounds() }; @@ -245,6 +249,7 @@ async fn test_stress_combined_network_fuzzy() { duration: Duration::from_secs(120), }, ), + epoch_height: 0, ..TestDescription::default_stress() }; diff --git a/testing/tests/tests_5/push_cdn.rs b/testing/tests/tests_5/push_cdn.rs index b90a038d99..bac333ebb5 100644 --- a/testing/tests/tests_5/push_cdn.rs +++ b/testing/tests/tests_5/push_cdn.rs @@ -37,6 +37,7 @@ async fn push_cdn_network() { duration: Duration::from_secs(60), }, ), + epoch_height: 0, ..TestDescription::default() }; metadata diff --git a/testing/tests/tests_5/test_with_failures.rs b/testing/tests/tests_5/test_with_failures.rs index 4fc96e5564..349007d152 100644 --- a/testing/tests/tests_5/test_with_failures.rs +++ b/testing/tests/tests_5/test_with_failures.rs @@ -24,6 +24,7 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); + metadata.epoch_height = 0; metadata.num_bootstrap_nodes = 19; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 4466a4e2f6..54f6e267d2 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -27,6 +27,7 @@ async fn test_timeout() { let mut metadata: TestDescription = TestDescription { num_nodes_with_stake: 10, start_nodes: 10, + epoch_height: 0, ..Default::default() }; let dead_nodes = vec![ChangeNode { diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index 8bc6e2d885..7d1e5545b8 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -37,6 +37,7 @@ async fn libp2p_network_sync() { delay_high_ms: 30, delay_low_ms: 4, })), + epoch_height: 0, ..TestDescription::default_multiple_rounds() }; @@ -71,6 +72,7 @@ async fn test_memory_network_sync() { delay_high_ms: 30, delay_low_ms: 4, })), + epoch_height: 0, ..TestDescription::default() }; metadata @@ -108,6 +110,7 @@ async fn libp2p_network_async() { delay_low_ms: 4, delay_high_ms: 30, })), + epoch_height: 0, ..TestDescription::default_multiple_rounds() }; @@ -155,6 +158,7 @@ async fn test_memory_network_async() { delay_low_ms: 4, delay_high_ms: 30, })), + epoch_height: 0, ..TestDescription::default() }; metadata @@ -206,6 +210,7 @@ async fn test_memory_network_partially_sync() { gst: std::time::Duration::from_millis(1000), start: Instant::now(), })), + epoch_height: 0, ..TestDescription::default() }; metadata @@ -244,6 +249,7 @@ async fn libp2p_network_partially_sync() { gst: std::time::Duration::from_millis(1000), start: Instant::now(), })), + epoch_height: 0, ..TestDescription::default_multiple_rounds() }; @@ -283,6 +289,7 @@ async fn test_memory_network_chaos() { repeat_low: 1, repeat_high: 5, })), + epoch_height: 0, ..TestDescription::default() }; metadata @@ -316,6 +323,7 @@ async fn libp2p_network_chaos() { repeat_low: 1, repeat_high: 5, })), + epoch_height: 0, ..TestDescription::default_multiple_rounds() }; diff --git a/types/src/data.rs b/types/src/data.rs index 2acafe1268..03738c1407 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -675,7 +675,6 @@ impl From> for Leaf2 { Self { view_number: leaf.view_number, - epoch: TYPES::Epoch::genesis(), justify_qc: leaf.justify_qc.to_qc2(), next_epoch_justify_qc: None, parent_commitment: Commitment::from_raw(bytes), @@ -802,9 +801,6 @@ pub struct Leaf2 { /// CurView from leader when proposing leaf view_number: TYPES::View, - /// An epoch to which the data belongs to. Relevant for validating against the correct stake table - epoch: TYPES::Epoch, - /// Per spec, justification justify_qc: QuorumCertificate2, @@ -879,7 +875,6 @@ impl Leaf2 { upgrade_certificate: None, block_header: block_header.clone(), block_payload: Some(payload), - epoch: TYPES::Epoch::genesis(), view_change_evidence: None, } } @@ -888,8 +883,11 @@ impl Leaf2 { self.view_number } /// Epoch in which this leaf was created. - pub fn epoch(&self) -> TYPES::Epoch { - self.epoch + pub fn epoch(&self, epoch_height: u64) -> TYPES::Epoch { + TYPES::Epoch::new(epoch_from_block_number( + self.block_header.block_number(), + epoch_height, + )) } /// Height of this leaf in the chain. /// @@ -1038,7 +1036,6 @@ impl PartialEq for Leaf2 { fn eq(&self, other: &Self) -> bool { let Leaf2 { view_number, - epoch, justify_qc, next_epoch_justify_qc, parent_commitment, @@ -1049,7 +1046,6 @@ impl PartialEq for Leaf2 { } = self; *view_number == other.view_number - && *epoch == other.epoch && *justify_qc == other.justify_qc && *next_epoch_justify_qc == other.next_epoch_justify_qc && *parent_commitment == other.parent_commitment @@ -1424,10 +1420,6 @@ impl Leaf2 { Self { view_number: *view_number, - epoch: TYPES::Epoch::new(epoch_from_block_number( - quorum_proposal.block_header.block_number(), - TYPES::EPOCH_HEIGHT, - )), justify_qc: justify_qc.clone(), next_epoch_justify_qc: next_epoch_justify_qc.clone(), parent_commitment: justify_qc.data().leaf_commit, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index f783c95dec..77a4971e24 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -212,8 +212,6 @@ pub trait NodeType: type View: ConsensusTime + Display; /// Same as above but for epoch. type Epoch: ConsensusTime + Display; - /// constant for epoch height - const EPOCH_HEIGHT: u64; /// The AuctionSolverResult is a type that holds the data associated with a particular solver /// run, for a particular view. type AuctionResult: Debug From 19e3b12e3d9d505485439a704d6e1714a49953f5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 12:26:39 -0500 Subject: [PATCH 1345/1393] Bump the all group across 1 directory with 10 updates (#4001) Bumps the all group with 10 updates in the / directory: | Package | From | To | | --- | --- | --- | | [async-broadcast](https://github.com/smol-rs/async-broadcast) | `0.7.1` | `0.7.2` | | [async-trait](https://github.com/dtolnay/async-trait) | `0.1.83` | `0.1.84` | | [serde](https://github.com/serde-rs/serde) | `1.0.216` | `1.0.217` | | [serde_json](https://github.com/serde-rs/json) | `1.0.133` | `1.0.134` | | [thiserror](https://github.com/dtolnay/thiserror) | `2.0.6` | `2.0.9` | | [reqwest](https://github.com/seanmonstar/reqwest) | `0.12.9` | `0.12.12` | | [anyhow](https://github.com/dtolnay/anyhow) | `1.0.94` | `1.0.95` | | [quote](https://github.com/dtolnay/quote) | `1.0.37` | `1.0.38` | | [syn](https://github.com/dtolnay/syn) | `2.0.90` | `2.0.95` | | [itertools](https://github.com/rust-itertools/itertools) | `0.13.0` | `0.14.0` | Updates `async-broadcast` from 0.7.1 to 0.7.2 - [Release notes](https://github.com/smol-rs/async-broadcast/releases) - [Changelog](https://github.com/smol-rs/async-broadcast/blob/master/CHANGELOG.md) - [Commits](https://github.com/smol-rs/async-broadcast/compare/0.7.1...0.7.2) Updates `async-trait` from 0.1.83 to 0.1.84 - [Release notes](https://github.com/dtolnay/async-trait/releases) - [Commits](https://github.com/dtolnay/async-trait/compare/0.1.83...0.1.84) Updates `serde` from 1.0.216 to 1.0.217 - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.216...v1.0.217) Updates `serde_json` from 1.0.133 to 1.0.134 - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.133...v1.0.134) Updates `thiserror` from 2.0.6 to 2.0.9 - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/2.0.6...2.0.9) Updates `reqwest` from 0.12.9 to 0.12.12 - [Release notes](https://github.com/seanmonstar/reqwest/releases) - [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md) - [Commits](https://github.com/seanmonstar/reqwest/compare/v0.12.9...v0.12.12) Updates `anyhow` from 1.0.94 to 1.0.95 - [Release notes](https://github.com/dtolnay/anyhow/releases) - [Commits](https://github.com/dtolnay/anyhow/compare/1.0.94...1.0.95) Updates `quote` from 1.0.37 to 1.0.38 - [Release notes](https://github.com/dtolnay/quote/releases) - [Commits](https://github.com/dtolnay/quote/compare/1.0.37...1.0.38) Updates `syn` from 2.0.90 to 2.0.95 - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.90...2.0.95) Updates `itertools` from 0.13.0 to 0.14.0 - [Changelog](https://github.com/rust-itertools/itertools/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-itertools/itertools/compare/v0.13.0...v0.14.0) --- updated-dependencies: - dependency-name: async-broadcast dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: async-trait dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: reqwest dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: anyhow dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: quote dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: itertools dependency-type: direct:production update-type: version-update:semver-minor dependency-group: all ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- testing/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/Cargo.toml b/testing/Cargo.toml index 36870d8fc2..b21799ba6b 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -32,7 +32,7 @@ hotshot-macros = { path = "../macros" } hotshot-task = { path = "../task" } hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } hotshot-types = { path = "../types" } -itertools = "0.13.0" +itertools = "0.14.0" jf-vid = { workspace = true } lru = { workspace = true } portpicker = { workspace = true } From 74088c305b10ca27daa0d4897de1d24107f6a330 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 7 Jan 2025 14:03:45 -0500 Subject: [PATCH 1346/1393] Minor cleanup (#4003) --- task-impls/src/quorum_vote/handlers.rs | 118 ++++++++++++------------- task-impls/src/quorum_vote/mod.rs | 72 ++++++++------- utils/src/anytrace.rs | 32 ++++++- 3 files changed, 121 insertions(+), 101 deletions(-) diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index cbc65bd672..fe11d69745 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -112,32 +112,31 @@ async fn store_and_get_computed_drb_result< { return Ok(*computed_result); } - if let Some((task_epoch, computation)) = &mut task_state.drb_computation { - if *task_epoch == epoch_number { - // If the computation is finished, remove the task and store the result. - if computation.is_finished() { - match computation.await { - Ok(computed_result) => { - let mut consensus_writer = task_state.consensus.write().await; - consensus_writer - .drb_seeds_and_results - .results - .insert(epoch_number, computed_result); - task_state.drb_computation = None; - Ok(computed_result) - } - Err(e) => { - bail!("Failed to get the DRB result though the computation is finished: {:?}.", e); - } - } - } else { - bail!("DRB computation isn't finished."); - } - } else { - bail!("DRB computation isn't for the next epoch."); + + let (task_epoch, computation) = + (&mut task_state.drb_computation).context(warn!("DRB computation task doesn't exist."))?; + + ensure!( + *task_epoch == epoch_number, + info!("DRB computation is not for the next epoch.") + ); + + ensure!( + computation.is_finished(), + info!("DRB computation has not yet finished.") + ); + + match computation.await { + Ok(result) => { + let mut consensus_writer = task_state.consensus.write().await; + consensus_writer + .drb_seeds_and_results + .results + .insert(epoch_number, result); + task_state.drb_computation = None; + Ok(result) } - } else { - bail!("DRB computation task doesn't exist."); + Err(e) => Err(warn!("Error in DRB calculation: {:?}.", e)), } } @@ -151,47 +150,46 @@ async fn verify_drb_result, V: Ver proposal: &QuorumProposal2, task_state: &mut QuorumVoteTaskState, ) -> Result<()> { - let current_block_number = proposal.block_header.block_number(); - // Skip if this is not the expected block. - if task_state.epoch_height != 0 - && is_last_block_in_epoch(current_block_number, task_state.epoch_height) - { - let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( + if task_state.epoch_height == 0 + || !is_last_block_in_epoch( proposal.block_header.block_number(), task_state.epoch_height, - )); + ) + { + tracing::debug!("Skipping DRB result verification"); + return Ok(()); + } - let Some(proposal_result) = proposal.next_drb_result else { - bail!( - "The proposal for the last block of an epoch should contain the DRB result for the next epoch." - ); - }; + let epoch = TYPES::Epoch::new(epoch_from_block_number( + proposal.block_header.block_number(), + task_state.epoch_height, + )); - // Verify and store the result depending on our membership. - if task_state - .membership - .read() - .await - .has_stake(&task_state.public_key, current_epoch_number) - { - let computed_result = - store_and_get_computed_drb_result(current_epoch_number + 1, task_state).await?; - if proposal_result != computed_result { - bail!("Inconsistent DRB result for the next epoch."); - } - return Ok(()); - } else if task_state - .membership - .read() - .await - .has_stake(&task_state.public_key, current_epoch_number + 1) - { - store_received_drb_result(current_epoch_number + 1, proposal_result, task_state) - .await?; - } + let proposal_result = proposal + .next_drb_result + .context(info!("Proposal is missing the DRB result."))?; + + let membership_reader = task_state.membership.read().await; + + let has_stake_current_epoch = membership_reader.has_stake(&task_state.public_key, epoch); + let has_stake_next_epoch = membership_reader.has_stake(&task_state.public_key, epoch + 1); + + drop(membership_reader); + + if has_stake_current_epoch { + let computed_result = store_and_get_computed_drb_result(epoch + 1, task_state).await?; + + ensure!(proposal_result == computed_result, warn!("Our calculated DRB result is {:?}, which does not match the proposed DRB result of {:?}", computed_result, proposal_result)); + + Ok(()) + } else if has_stake_next_epoch { + store_received_drb_result(epoch + 1, proposal_result, task_state).await + } else { + Err(error!( + "We are not participating in either the current or next epoch" + )) } - Ok(()) } /// Start the DRB computation task for the next epoch. diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 8ee1223cf9..541c1d5fb2 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -498,7 +498,8 @@ impl, V: Versions> QuorumVoteTaskS .is_leaf_forming_eqc(proposal.data.justify_qc.data.leaf_commit); if version >= V::Epochs::VERSION && is_justify_qc_forming_eqc { - self.handle_eqc_voting(proposal, parent_leaf, event_sender, event_receiver) + let _ = self + .handle_eqc_voting(proposal, parent_leaf, event_sender, event_receiver) .await; } else { self.create_dependency_task_if_new( @@ -658,55 +659,56 @@ impl, V: Versions> QuorumVoteTaskS parent_leaf: &Leaf2, event_sender: Sender>>, event_receiver: Receiver>>, - ) { + ) -> Result<()> { tracing::info!("Reached end of epoch. Justify QC is for the last block in the epoch."); let proposed_leaf = Leaf2::from_quorum_proposal(&proposal.data); let parent_commitment = parent_leaf.commit(); - if proposed_leaf.height() != parent_leaf.height() - || proposed_leaf.payload_commitment() != parent_leaf.payload_commitment() - { - tracing::error!("Justify QC is for the last block but it's not extended and a new block is proposed. Not voting!"); - return; - } + + ensure!( + proposed_leaf.height() == parent_leaf.height() && proposed_leaf.payload_commitment() == parent_leaf.payload_commitment(), + error!("Justify QC is for the last block but it's not extended and a new block is proposed. Not voting!") + ); tracing::info!( "Reached end of epoch. Proposed leaf has the same height and payload as its parent." ); let mut consensus_writer = self.consensus.write().await; - let Some(vid_shares) = consensus_writer + + let vid_shares = consensus_writer .vid_shares() .get(&parent_leaf.view_number()) - else { - tracing::warn!( - "Proposed leaf is the same as its parent but we don't have our VID for it" - ); - return; - }; - let Some(vid) = vid_shares.get(&self.public_key) else { - tracing::warn!( + .context(warn!( "Proposed leaf is the same as its parent but we don't have our VID for it" - ); - return; - }; + ))?; + + let vid = vid_shares.get(&self.public_key).context(warn!( + "Proposed leaf is the same as its parent but we don't have our VID for it" + ))?; + let mut updated_vid = vid.clone(); updated_vid.data.view_number = proposal.data.view_number; consensus_writer.update_vid_shares(updated_vid.data.view_number, updated_vid.clone()); + drop(consensus_writer); - if proposed_leaf.parent_commitment() != parent_commitment { - tracing::warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); - return; - } + ensure!( + proposed_leaf.parent_commitment() == parent_commitment, + warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote.") + ); + // Update our persistent storage of the proposal. If we cannot store the proposal return // and error so we don't vote - if let Err(e) = self.storage.write().await.append_proposal2(proposal).await { - tracing::error!("failed to store proposal, not voting. error = {e:#}"); - return; - } + self.storage + .write() + .await + .append_proposal2(proposal) + .await + .wrap() + .context(|e| error!("failed to store proposal, not voting. error = {}", e))?; // Update internal state - if let Err(e) = update_shared_state::( + update_shared_state::( OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), event_sender.clone(), event_receiver.clone().deactivate(), @@ -723,10 +725,7 @@ impl, V: Versions> QuorumVoteTaskS self.epoch_height, ) .await - { - tracing::error!("Failed to update shared consensus state; error = {e:#}"); - return; - } + .context(|e| error!("Failed to update shared consensus state, error = {}", e))?; let current_block_number = proposed_leaf.height(); let current_epoch = TYPES::Epoch::new(epoch_from_block_number( @@ -757,7 +756,8 @@ impl, V: Versions> QuorumVoteTaskS ) .await; } - if let Err(e) = submit_vote::( + + submit_vote::( event_sender.clone(), Arc::clone(&self.membership), self.public_key.clone(), @@ -771,9 +771,7 @@ impl, V: Versions> QuorumVoteTaskS self.epoch_height, ) .await - { - tracing::debug!("Failed to vote; error = {e:#}"); - } + .context(|e| debug!("Failed to submit vote; error = {}", e)) } } diff --git a/utils/src/anytrace.rs b/utils/src/anytrace.rs index 05d6c1d9ea..12e129ca01 100644 --- a/utils/src/anytrace.rs +++ b/utils/src/anytrace.rs @@ -147,15 +147,15 @@ fn concatenate(error: &String, cause: &String) -> String { } /// Trait for converting error types to a `Result`. -pub trait Context { +pub trait Context { /// Attach context to the given error. /// /// # Errors /// Propagates errors from `self` - fn context(self, error: Error) -> Result; + fn context(self, error: E) -> Result; } -impl Context for Result { +impl Context for Result { fn context(self, error: Error) -> Result { match self { Ok(t) => Ok(t), @@ -167,7 +167,22 @@ impl Context for Result { } } -impl Context for Option { +impl Context for Result +where + F: Fn(Error) -> Error, +{ + fn context(self, error: F) -> Result { + match self { + Ok(t) => Ok(t), + Err(cause) => Err(Error { + level: max(error(cause.clone()).level, cause.level), + message: concatenate(&error(cause.clone()).message, &format!("{cause}")), + }), + } + } +} + +impl Context for Option { fn context(self, error: Error) -> Result { match self { Some(t) => Ok(t), @@ -176,6 +191,15 @@ impl Context for Option { } } +impl<'a, T> Context<&'a mut T, Error> for &'a mut Option { + fn context(self, error: Error) -> Result<&'a mut T> { + match self { + Some(t) => Ok(t), + None => Err(error), + } + } +} + #[cfg(test)] mod test { use super::*; From 7626b56c137be372cce860be3ea590a156757be9 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 7 Jan 2025 15:51:58 -0500 Subject: [PATCH 1347/1393] Add default implementations for new storage methods (#4006) --- types/src/data.rs | 14 ++++++++++ types/src/traits/storage.rs | 55 +++++++++++++++++++++++++++++-------- 2 files changed, 57 insertions(+), 12 deletions(-) diff --git a/types/src/data.rs b/types/src/data.rs index 03738c1407..e84b2f03c1 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -997,6 +997,20 @@ impl Leaf2 { Ok(()) } + + /// Converts a `Leaf2` to a `Leaf`. This operation is fundamentally unsafe and should not be used. + pub fn to_leaf_unsafe(self) -> Leaf { + let bytes: [u8; 32] = self.parent_commitment.into(); + + Leaf { + view_number: self.view_number, + justify_qc: self.justify_qc.to_qc(), + parent_commitment: Commitment::from_raw(bytes), + block_header: self.block_header, + upgrade_certificate: self.upgrade_certificate, + block_payload: self.block_payload, + } + } } impl Committable for Leaf2 { diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 5a4fdb4586..a2ecfdd83a 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -13,6 +13,7 @@ use std::collections::BTreeMap; use anyhow::Result; use async_trait::async_trait; +use committable::Commitment; use jf_vid::VidScheme; use super::node_implementation::NodeType; @@ -23,7 +24,7 @@ use crate::{ VidDisperseShare2, }, event::HotShotAction, - message::Proposal, + message::{convert_proposal, Proposal}, simple_certificate::{ NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, UpgradeCertificate, }, @@ -36,8 +37,12 @@ pub trait Storage: Send + Sync + Clone { /// Add a proposal to the stored VID proposals. async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; /// Add a proposal to the stored VID proposals. - async fn append_vid2(&self, proposal: &Proposal>) - -> Result<()>; + async fn append_vid2( + &self, + proposal: &Proposal>, + ) -> Result<()> { + self.append_vid2(&convert_proposal(proposal.clone())).await + } /// Add a proposal to the stored DA proposals. async fn append_da( &self, @@ -49,7 +54,10 @@ pub trait Storage: Send + Sync + Clone { &self, proposal: &Proposal>, vid_commit: ::Commit, - ) -> Result<()>; + ) -> Result<()> { + self.append_da(&convert_proposal(proposal.clone()), vid_commit) + .await + } /// Add a proposal we sent to the store async fn append_proposal( &self, @@ -59,18 +67,25 @@ pub trait Storage: Send + Sync + Clone { async fn append_proposal2( &self, proposal: &Proposal>, - ) -> Result<()>; + ) -> Result<()> { + self.append_proposal(&convert_proposal(proposal.clone())) + .await + } /// Record a HotShotAction taken. async fn record_action(&self, view: TYPES::View, action: HotShotAction) -> Result<()>; /// Update the current high QC in storage. async fn update_high_qc(&self, high_qc: QuorumCertificate) -> Result<()>; /// Update the current high QC in storage. - async fn update_high_qc2(&self, high_qc: QuorumCertificate2) -> Result<()>; + async fn update_high_qc2(&self, high_qc: QuorumCertificate2) -> Result<()> { + self.update_high_qc(high_qc.to_qc()).await + } /// Update the current high QC in storage. async fn update_next_epoch_high_qc2( &self, - next_epoch_high_qc: NextEpochQuorumCertificate2, - ) -> Result<()>; + _next_epoch_high_qc: NextEpochQuorumCertificate2, + ) -> Result<()> { + Ok(()) + } /// Update the currently undecided state of consensus. This includes the undecided leaf chain, /// and the undecided state. async fn update_undecided_state( @@ -84,7 +99,21 @@ pub trait Storage: Send + Sync + Clone { &self, leaves: CommitmentMap>, state: BTreeMap>, - ) -> Result<()>; + ) -> Result<()> { + self.update_undecided_state( + leaves + .iter() + .map(|(&commitment, leaf)| { + ( + Commitment::from_raw(commitment.into()), + leaf.clone().to_leaf_unsafe(), + ) + }) + .collect(), + state, + ) + .await + } /// Upgrade the current decided upgrade certificate in storage. async fn update_decided_upgrade_certificate( &self, @@ -93,9 +122,11 @@ pub trait Storage: Send + Sync + Clone { /// Migrate leaves from `Leaf` to `Leaf2`, and proposals from `QuorumProposal` to `QuorumProposal2` async fn migrate_consensus( &self, - convert_leaf: fn(Leaf) -> Leaf2, - convert_proposal: fn( + _convert_leaf: fn(Leaf) -> Leaf2, + _convert_proposal: fn( Proposal>, ) -> Proposal>, - ) -> Result<()>; + ) -> Result<()> { + Ok(()) + } } From 8f532f9e9946236c45bef5949e3d0e4a66a0fc2c Mon Sep 17 00:00:00 2001 From: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Date: Wed, 8 Jan 2025 22:54:17 +0500 Subject: [PATCH 1348/1393] fix append_vid2 fn (#4011) --- types/src/traits/storage.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index a2ecfdd83a..f828d99ad1 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -41,7 +41,7 @@ pub trait Storage: Send + Sync + Clone { &self, proposal: &Proposal>, ) -> Result<()> { - self.append_vid2(&convert_proposal(proposal.clone())).await + self.append_vid(&convert_proposal(proposal.clone())).await } /// Add a proposal to the stored DA proposals. async fn append_da( From ab51302c72728dacf879d6398bafffee4c35a82d Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 8 Jan 2025 18:52:45 -0500 Subject: [PATCH 1349/1393] Revert breaking type changes (#4014) --- task-impls/src/upgrade.rs | 1 - testing/tests/tests_1/vid_task.rs | 1 - types/src/data.rs | 5 ----- 3 files changed, 7 deletions(-) diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 3ff8e073a8..de1fcad3f8 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -291,7 +291,6 @@ impl UpgradeTaskState { let upgrade_proposal = UpgradeProposal { upgrade_proposal: upgrade_proposal_data.clone(), view_number: TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), - epoch: self.cur_epoch, }; let signature = TYPES::SignatureKey::sign( diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index adafcbfbf7..9105b2dd02 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -80,7 +80,6 @@ async fn test_vid_task() { num_transactions: encoded_transactions.len() as u64, }, view_number: ViewNumber::new(2), - epoch: EpochNumber::new(0), }; let message = Proposal { data: proposal.clone(), diff --git a/types/src/data.rs b/types/src/data.rs index e84b2f03c1..5daae25d43 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -146,8 +146,6 @@ pub struct DaProposal { pub metadata: >::Metadata, /// View this proposal applies to pub view_number: TYPES::View, - /// Epoch this proposal applies to - pub epoch: TYPES::Epoch, } /// A proposal to start providing data availability for a block. @@ -181,7 +179,6 @@ impl From> for DaProposal { encoded_transactions: da_proposal2.encoded_transactions, metadata: da_proposal2.metadata, view_number: da_proposal2.view_number, - epoch: TYPES::Epoch::new(0), } } } @@ -197,8 +194,6 @@ where pub upgrade_proposal: UpgradeProposalData, /// View this proposal applies to pub view_number: TYPES::View, - /// Epoch this proposal applies to - pub epoch: TYPES::Epoch, } /// VID dispersal data From 2a3f0cc9c95eb44c52929bfb78de7dc0442411a8 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 9 Jan 2025 09:06:01 -0500 Subject: [PATCH 1350/1393] [Libp2p] Generic DHT persistence (#3954) * generic DHT persistence * pr changes --- examples/infra/mod.rs | 5 +- .../src/traits/networking/libp2p_network.rs | 26 +- .../src/network/behaviours/dht/mod.rs | 21 +- .../src/network/behaviours/dht/store/mod.rs | 2 +- .../store/{file_backed.rs => persistent.rs} | 334 ++++++++++++------ libp2p-networking/src/network/def.rs | 25 +- libp2p-networking/src/network/node.rs | 51 +-- libp2p-networking/src/network/node/handle.rs | 12 +- 8 files changed, 321 insertions(+), 155 deletions(-) rename libp2p-networking/src/network/behaviours/dht/store/{file_backed.rs => persistent.rs} (53%) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 63dd75d203..85f161fffd 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -62,7 +62,9 @@ use hotshot_types::{ }, HotShotConfig, PeerConfig, ValidatorConfig, }; -use libp2p_networking::network::{GossipConfig, RequestResponseConfig}; +use libp2p_networking::network::{ + behaviours::dht::store::persistent::DhtNoPersistence, GossipConfig, RequestResponseConfig, +}; use rand::{rngs::StdRng, SeedableRng}; use surf_disco::Url; use tracing::{debug, error, info, warn}; @@ -738,6 +740,7 @@ where // Create the Libp2p network let libp2p_network = Libp2pNetwork::from_config( config.clone(), + DhtNoPersistence, Arc::clone(membership), GossipConfig::default(), RequestResponseConfig::default(), diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 39c61e638f..b832f751b7 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -52,7 +52,10 @@ use libp2p_identity::{ pub use libp2p_networking::network::{GossipConfig, RequestResponseConfig}; use libp2p_networking::{ network::{ - behaviours::dht::record::{Namespace, RecordKey, RecordValue}, + behaviours::dht::{ + record::{Namespace, RecordKey, RecordValue}, + store::persistent::{DhtNoPersistence, DhtPersistentStorage}, + }, spawn_network_node, transport::construct_auth_message, NetworkEvent::{self, DirectRequest, DirectResponse, GossipMsg}, @@ -273,6 +276,7 @@ impl TestableNetworkingImplementation for Libp2pNetwork { Arc::new( match Libp2pNetwork::new( Libp2pMetricsValue::default(), + DhtNoPersistence, config, pubkey.clone(), lookup_record_value, @@ -388,9 +392,10 @@ impl Libp2pNetwork { /// # Panics /// If we are unable to calculate the replication factor #[allow(clippy::too_many_arguments)] - pub async fn from_config( + pub async fn from_config( mut config: NetworkConfig, - membership: Arc>, + dht_persistent_storage: D, + quorum_membership: Arc>, gossip_config: GossipConfig, request_response_config: RequestResponseConfig, bind_address: Multiaddr, @@ -421,7 +426,7 @@ impl Libp2pNetwork { // Set the auth message and stake table config_builder - .stake_table(Some(membership)) + .stake_table(Some(quorum_membership)) .auth_message(Some(auth_message)); // The replication factor is the minimum of [the default and 2/3 the number of nodes] @@ -469,6 +474,7 @@ impl Libp2pNetwork { Ok(Libp2pNetwork::new( metrics, + dht_persistent_storage, node_config, pub_key.clone(), lookup_record_value, @@ -509,8 +515,9 @@ impl Libp2pNetwork { /// /// This will panic if there are less than 5 bootstrap nodes #[allow(clippy::too_many_arguments)] - pub async fn new( + pub async fn new( metrics: Libp2pMetricsValue, + dht_persistent_storage: D, config: NetworkNodeConfig, pk: T::SignatureKey, lookup_record_value: RecordValue, @@ -518,9 +525,12 @@ impl Libp2pNetwork { id: usize, #[cfg(feature = "hotshot-testing")] reliability_config: Option>, ) -> Result, NetworkError> { - let (mut rx, network_handle) = spawn_network_node::(config.clone(), id) - .await - .map_err(|e| NetworkError::ConfigError(format!("failed to spawn network node: {e}")))?; + let (mut rx, network_handle) = + spawn_network_node::(config.clone(), dht_persistent_storage, id) + .await + .map_err(|e| { + NetworkError::ConfigError(format!("failed to spawn network node: {e}")) + })?; // Add our own address to the bootstrap addresses let addr = network_handle.listen_addr(); diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index f785d1fa10..fd10422038 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -28,7 +28,10 @@ use libp2p::kad::{ store::RecordStore, Behaviour as KademliaBehaviour, BootstrapError, Event as KademliaEvent, }; use libp2p_identity::PeerId; -use store::{file_backed::FileBackedStore, validated::ValidatedStore}; +use store::{ + persistent::{DhtPersistentStorage, PersistentStore}, + validated::ValidatedStore, +}; use tokio::{spawn, sync::mpsc::UnboundedSender, time::sleep}; use tracing::{debug, error, warn}; @@ -57,7 +60,7 @@ use crate::network::{ClientRequest, NetworkEvent}; /// - bootstrapping into the network /// - peer discovery #[derive(Debug)] -pub struct DHTBehaviour { +pub struct DHTBehaviour { /// in progress queries for nearby peers pub in_progress_get_closest_peers: HashMap>, /// List of in-progress get requests @@ -77,8 +80,8 @@ pub struct DHTBehaviour { /// Sender to the bootstrap task bootstrap_tx: Option>, - /// Phantom type for the key - phantom: PhantomData, + /// Phantom type for the key and persistent storage + phantom: PhantomData<(K, D)>, } /// State of bootstrapping @@ -106,7 +109,7 @@ pub enum DHTEvent { IsBootstrapped, } -impl DHTBehaviour { +impl DHTBehaviour { /// Give the handler a way to retry requests. pub fn set_retry(&mut self, tx: UnboundedSender) { self.retry_tx = Some(tx); @@ -143,7 +146,7 @@ impl DHTBehaviour { /// print out the routing table to stderr pub fn print_routing_table( &mut self, - kadem: &mut KademliaBehaviour>>, + kadem: &mut KademliaBehaviour, D>>, ) { let mut err = format!("KBUCKETS: PID: {:?}, ", self.peer_id); let v = kadem.kbuckets().collect::>(); @@ -179,7 +182,7 @@ impl DHTBehaviour { factor: NonZeroUsize, backoff: ExponentialBackoff, retry_count: u8, - kad: &mut KademliaBehaviour>>, + kad: &mut KademliaBehaviour, D>>, ) { // noop if retry_count == 0 { @@ -247,7 +250,7 @@ impl DHTBehaviour { /// update state based on recv-ed get query fn handle_get_query( &mut self, - store: &mut FileBackedStore>, + store: &mut PersistentStore, D>, record_results: GetRecordResult, id: QueryId, mut last: bool, @@ -405,7 +408,7 @@ impl DHTBehaviour { pub fn dht_handle_event( &mut self, event: KademliaEvent, - store: &mut FileBackedStore>, + store: &mut PersistentStore, D>, ) -> Option { match event { KademliaEvent::OutboundQueryProgressed { diff --git a/libp2p-networking/src/network/behaviours/dht/store/mod.rs b/libp2p-networking/src/network/behaviours/dht/store/mod.rs index d9f42b8b1c..3e20fe56ff 100644 --- a/libp2p-networking/src/network/behaviours/dht/store/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/store/mod.rs @@ -1,2 +1,2 @@ -pub mod file_backed; +pub mod persistent; pub mod validated; diff --git a/libp2p-networking/src/network/behaviours/dht/store/file_backed.rs b/libp2p-networking/src/network/behaviours/dht/store/persistent.rs similarity index 53% rename from libp2p-networking/src/network/behaviours/dht/store/file_backed.rs rename to libp2p-networking/src/network/behaviours/dht/store/persistent.rs index ba442d5a9b..cd927c2470 100644 --- a/libp2p-networking/src/network/behaviours/dht/store/file_backed.rs +++ b/libp2p-networking/src/network/behaviours/dht/store/persistent.rs @@ -1,27 +1,122 @@ -//! This file contains the `FileBackedStore` struct, which is a wrapper around a `RecordStore` -//! that occasionally saves the DHT to a file on disk. +//! This file contains the `PersistentStore` struct, which is a wrapper around a `RecordStore` +//! that occasionally saves the DHT to a persistent storage. -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use std::{ + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::{Duration, Instant, SystemTime, UNIX_EPOCH}, +}; use anyhow::Context; +use async_trait::async_trait; use delegate::delegate; use libp2p::kad::store::{RecordStore, Result}; use serde::{Deserialize, Serialize}; +use tokio::{sync::Semaphore, time::timeout}; use tracing::{debug, warn}; -/// A `RecordStore` wrapper that occasionally saves the DHT to a file on disk. -pub struct FileBackedStore { - /// The underlying store - underlying_store: R, +/// A trait that we use to save and load the DHT to a file on disk +/// or other storage medium +#[async_trait] +pub trait DhtPersistentStorage: Send + Sync + 'static + Clone { + /// Save the DHT (as a list of serializable records) to the persistent storage + /// + /// # Errors + /// - If we fail to save the DHT to the persistent storage provider + async fn save(&self, _records: Vec) -> anyhow::Result<()>; + + /// Load the DHT (as a list of serializable records) from the persistent storage + /// + /// # Errors + /// - If we fail to load the DHT from the persistent storage provider + async fn load(&self) -> anyhow::Result>; +} + +/// A no-op `PersistentStorage` that does not persist the DHT +#[derive(Clone)] +pub struct DhtNoPersistence; + +#[async_trait] +impl DhtPersistentStorage for DhtNoPersistence { + async fn save(&self, _records: Vec) -> anyhow::Result<()> { + Ok(()) + } + + async fn load(&self) -> anyhow::Result> { + Ok(vec![]) + } +} - /// The path to the file +/// A `PersistentStorage` that persists the DHT to a file on disk. Used mostly for +/// testing. +#[derive(Clone)] +pub struct DhtFilePersistence { + /// The path to the file on disk path: String, +} + +impl DhtFilePersistence { + /// Create a new `DhtFilePersistence` with the given path + #[must_use] + pub fn new(path: String) -> Self { + Self { path } + } +} + +#[async_trait] +impl DhtPersistentStorage for DhtFilePersistence { + /// Save the DHT to the file on disk + /// + /// # Errors + /// - If we fail to serialize the records + /// - If we fail to write the serialized records to the file + async fn save(&self, records: Vec) -> anyhow::Result<()> { + // Bincode-serialize the records + let to_save = + bincode::serialize(&records).with_context(|| "Failed to serialize records")?; + + // Write the serialized records to the file + std::fs::write(&self.path, to_save).with_context(|| "Failed to write records to file")?; + + Ok(()) + } + + /// Load the DHT from the file on disk + /// + /// # Errors + /// - If we fail to read the file + /// - If we fail to deserialize the records + async fn load(&self) -> anyhow::Result> { + // Read the contents of the file + let contents = + std::fs::read(&self.path).with_context(|| "Failed to read records from file")?; + + // Deserialize the contents + let records: Vec = + bincode::deserialize(&contents).with_context(|| "Failed to deserialize records")?; + + Ok(records) + } +} - /// The maximum number of records that can be added to the store before the store is saved to a file +/// A `RecordStore` wrapper that occasionally saves the DHT to a persistent storage. +pub struct PersistentStore { + /// The underlying record store + underlying_record_store: R, + + /// The persistent storage + persistent_storage: D, + + /// The semaphore for limiting the number of concurrent operations (to one) + semaphore: Arc, + + /// The maximum number of records that can be added to the store before the store is saved to the persistent storage max_record_delta: u64, - /// The running delta between the records in the file and the records in the underlying store - record_delta: u64, + /// The running delta between the records in the persistent storage and the records in the underlying store + record_delta: Arc, } /// A serializable version of a Libp2p `Record` @@ -117,24 +212,30 @@ impl TryFrom for libp2p::kad::Record { } } -impl FileBackedStore { - /// Create a new `FileBackedStore` with the given underlying store and path. +impl PersistentStore { + /// Create a new `PersistentStore` with the given underlying store and path. + /// On creation, the DHT is restored from the persistent storage if possible. /// /// `max_record_delta` is the maximum number of records that can be added to the store before - /// the store is saved to a file. - pub fn new(underlying_store: R, path: String, max_record_delta: u64) -> Self { + /// the store is saved to the persistent storage. + pub async fn new( + underlying_record_store: R, + persistent_storage: D, + max_record_delta: u64, + ) -> Self { // Create the new store - let mut store = FileBackedStore { - underlying_store, - path: path.clone(), + let mut store = PersistentStore { + underlying_record_store, + persistent_storage, max_record_delta, - record_delta: 0, + record_delta: Arc::new(AtomicU64::new(0)), + semaphore: Arc::new(Semaphore::new(1)), }; - // Try to restore the DHT from a file. If it fails, warn and start with an empty store - if let Err(err) = store.restore_from_file(path) { + // Try to restore the DHT from the persistent store. If it fails, warn and start with an empty store + if let Err(err) = store.restore_from_persistent_storage().await { warn!( - "Failed to restore DHT from file: {:?}. Starting with empty store", + "Failed to restore DHT from persistent storage: {:?}. Starting with empty store", err ); } @@ -143,17 +244,19 @@ impl FileBackedStore { store } - /// Attempt to save the DHT to the file at the given path + /// Try saving the DHT to persistent storage if a task is not already in progress. /// - /// # Errors - /// - If we fail to serialize the DHT - /// - If we fail to write the serialized DHT to the file - pub fn save_to_file(&mut self) -> anyhow::Result<()> { - debug!("Saving DHT to file"); + /// Returns `true` if the DHT was saved, `false` otherwise. + fn try_save_to_persistent_storage(&mut self) -> bool { + // Try to acquire the semaphore, warning if another save operation is already in progress + let Ok(permit) = Arc::clone(&self.semaphore).try_acquire_owned() else { + warn!("Skipping DHT save to persistent storage - another save operation is already in progress"); + return false; + }; // Get all records and convert them to their serializable counterparts let serializable_records: Vec<_> = self - .underlying_store + .underlying_record_store .records() .filter_map(|record| { SerializableRecord::try_from(record.into_owned()) @@ -164,33 +267,50 @@ impl FileBackedStore { }) .collect(); - // Serialize the records - let contents = bincode::serialize(&serializable_records) - .with_context(|| "Failed to serialize records")?; + // Spawn a task to save the DHT to the persistent storage + let persistent_storage = self.persistent_storage.clone(); + let record_delta = Arc::clone(&self.record_delta); + tokio::spawn(async move { + debug!("Saving DHT to persistent storage"); + + // Save the DHT to the persistent storage + match timeout( + Duration::from_secs(10), + persistent_storage.save(serializable_records), + ) + .await + .map_err(|_| anyhow::anyhow!("save operation timed out")) + { + Ok(Ok(())) => {} + Ok(Err(error)) | Err(error) => { + warn!("Failed to save DHT to persistent storage: {error}"); + } + }; - // Write the contents to the file - std::fs::write(self.path.clone(), contents) - .with_context(|| "Failed to write DHT to file")?; + // Reset the record delta + record_delta.store(0, Ordering::Release); - debug!("Saved DHT to file"); + drop(permit); - Ok(()) + debug!("Saved DHT to persistent storage"); + }); + + true } - /// Attempt to restore the DHT to the underlying store from the file at the given path + /// Attempt to restore the DHT to the underlying store from the persistent storage /// /// # Errors - /// - If we fail to read the file - /// - If we fail to deserialize the file - pub fn restore_from_file(&mut self, path: String) -> anyhow::Result<()> { - debug!("Restoring DHT from file"); - - // Read the contents of the file as a `HashMap` of `Key` to `Vec` - let contents = std::fs::read(path).with_context(|| "Failed to read DHT file")?; + /// - If we fail to load from the persistent storage + pub async fn restore_from_persistent_storage(&mut self) -> anyhow::Result<()> { + debug!("Restoring DHT from persistent storage"); - // Convert the contents to a `HashMap` of `RecordKey` to `Vec` - let serializable_records: Vec = - bincode::deserialize(&contents).with_context(|| "Failed to parse DHT file")?; + // Read the contents of the persistent store + let serializable_records = self + .persistent_storage + .load() + .await + .with_context(|| "Failed to read DHT from persistent storage")?; // Put all records into the new store for serializable_record in serializable_records { @@ -198,36 +318,40 @@ impl FileBackedStore { match libp2p::kad::Record::try_from(serializable_record) { Ok(record) => { // Put the record into the new store - if let Err(err) = self.underlying_store.put(record) { - warn!("Failed to restore record from file: {:?}", err); + if let Err(err) = self.underlying_record_store.put(record) { + warn!( + "Failed to restore record from persistent storage: {:?}", + err + ); } } Err(err) => { - warn!("Failed to parse record from file: {:?}", err); + warn!("Failed to parse record from persistent storage: {:?}", err); } }; } - debug!("Restored DHT from file"); + debug!("Restored DHT from persistent storage"); Ok(()) } } -/// Implement the `RecordStore` trait for `FileBackedStore` -impl RecordStore for FileBackedStore { +/// Implement the `RecordStore` trait for `PersistentStore` +impl RecordStore for PersistentStore { type ProvidedIter<'a> = R::ProvidedIter<'a> where - R: 'a; + R: 'a, + D: 'a; type RecordsIter<'a> = R::RecordsIter<'a> where - R: 'a; - + R: 'a, + D: 'a; // Delegate all `RecordStore` methods except `put` to the inner store delegate! { - to self.underlying_store { + to self.underlying_record_store { fn add_provider(&mut self, record: libp2p::kad::ProviderRecord) -> libp2p::kad::store::Result<()>; fn get(&self, k: &libp2p::kad::RecordKey) -> Option>; fn provided(&self) -> Self::ProvidedIter<'_>; @@ -237,39 +361,38 @@ impl RecordStore for FileBackedStore { } } - /// Overwrite the `put` method to potentially save the record to a file + /// Overwrite the `put` method to potentially sync the DHT to the persistent store fn put(&mut self, record: libp2p::kad::Record) -> Result<()> { // Try to write to the underlying store - let result = self.underlying_store.put(record); + let result = self.underlying_record_store.put(record); - // If the record was successfully written, update the record delta + // If the record was successfully written, if result.is_ok() { - self.record_delta += 1; + // Update the record delta + self.record_delta.fetch_add(1, Ordering::Relaxed); - // If the record delta is greater than the maximum record delta, try to save the file - if self.record_delta > self.max_record_delta { - if let Err(e) = self.save_to_file() { - warn!("Failed to save DHT to file: {:?}", e); - } + // Check if it's above the maximum record delta + if self.record_delta.load(Ordering::Relaxed) > self.max_record_delta { + // Try to save the DHT to persistent storage + self.try_save_to_persistent_storage(); } } result } - /// Overwrite the `remove` method to potentially remove the record from a file + /// Overwrite the `remove` method to potentially sync the DHT to the persistent store fn remove(&mut self, k: &libp2p::kad::RecordKey) { // Remove the record from the underlying store - self.underlying_store.remove(k); + self.underlying_record_store.remove(k); // Update the record delta - self.record_delta += 1; + self.record_delta.fetch_add(1, Ordering::Relaxed); - // If the record delta is greater than 10, try to save the file - if self.record_delta > 10 { - if let Err(e) = self.save_to_file() { - warn!("Failed to save DHT to file: {:?}", e); - } + // Check if it's above the maximum record delta + if self.record_delta.load(Ordering::Relaxed) > self.max_record_delta { + // Try to save the DHT to persistent storage + self.try_save_to_persistent_storage(); } } } @@ -284,19 +407,20 @@ mod tests { use super::*; - #[test] - fn test_save_and_restore() { + #[tokio::test] + async fn test_save_and_restore() { // Try initializing tracing let _ = tracing_subscriber::fmt() .with_env_filter(EnvFilter::from_default_env()) .try_init(); // Create a test store - let mut store = FileBackedStore::new( + let mut store = PersistentStore::new( MemoryStore::new(PeerId::random()), - "/tmp/test.dht".to_string(), + DhtFilePersistence::new("/tmp/test1.dht".to_string()), 10, - ); + ) + .await; // The key is a random 16-byte array let key = RecordKey::new(&rand::random::<[u8; 16]>().to_vec()); @@ -309,15 +433,19 @@ mod tests { .put(libp2p::kad::Record::new(key.clone(), random_value.to_vec())) .expect("Failed to put record into store"); - // Save the store to a file - store.save_to_file().expect("Failed to save store to file"); + // Try to save the store to a persistent storage + assert!(store.try_save_to_persistent_storage()); - // Create a new store from the file - let new_store = FileBackedStore::new( + // Wait a bit for the save to complete + tokio::time::sleep(Duration::from_millis(100)).await; + + // Create a new store from the persistent storage + let new_store = PersistentStore::new( MemoryStore::new(PeerId::random()), - "/tmp/test.dht".to_string(), + DhtFilePersistence::new("/tmp/test1.dht".to_string()), 10, - ); + ) + .await; // Check that the new store has the record let restored_record = new_store @@ -328,19 +456,20 @@ mod tests { assert_eq!(restored_record.value, random_value.to_vec()); } - #[test] - fn test_record_delta() { + #[tokio::test] + async fn test_record_delta() { // Try initializing tracing let _ = tracing_subscriber::fmt() .with_env_filter(EnvFilter::from_default_env()) .try_init(); // Create a test store - let mut store = FileBackedStore::new( + let mut store = PersistentStore::new( MemoryStore::new(PeerId::random()), - "/tmp/test.dht".to_string(), + DhtFilePersistence::new("/tmp/test2.dht".to_string()), 10, - ); + ) + .await; let mut keys = Vec::new(); let mut values = Vec::new(); @@ -359,12 +488,13 @@ mod tests { .expect("Failed to put record into store"); } - // Create a new store from the allegedly unsaved file - let new_store = FileBackedStore::new( + // Create a new store from the allegedly unpersisted DHT + let new_store = PersistentStore::new( MemoryStore::new(PeerId::random()), - "/tmp/test.dht".to_string(), + DhtFilePersistence::new("/tmp/test2.dht".to_string()), 10, - ); + ) + .await; // Check that the new store has none of the records for key in &keys { @@ -379,12 +509,16 @@ mod tests { )) .expect("Failed to put record into store"); - // Create a new store from the allegedly saved file - let new_store = FileBackedStore::new( + // Wait a bit for the save to complete + tokio::time::sleep(Duration::from_millis(100)).await; + + // Create a new store from the allegedly saved DHT + let new_store = PersistentStore::new( MemoryStore::new(PeerId::random()), - "/tmp/test.dht".to_string(), + DhtFilePersistence::new("/tmp/test2.dht".to_string()), 10, - ); + ) + .await; // Check that the new store has all of the records for (i, key) in keys.iter().enumerate() { @@ -393,7 +527,7 @@ mod tests { } // Check that the record delta is 0 - assert_eq!(new_store.record_delta, 0); + assert_eq!(store.record_delta.load(Ordering::Relaxed), 0); } #[test] diff --git a/libp2p-networking/src/network/def.rs b/libp2p-networking/src/network/def.rs index a52fdae36c..46b7ec8fe7 100644 --- a/libp2p-networking/src/network/def.rs +++ b/libp2p-networking/src/network/def.rs @@ -18,7 +18,10 @@ use libp2p_swarm_derive::NetworkBehaviour; use tracing::{debug, error}; use super::{ - behaviours::dht::store::{file_backed::FileBackedStore, validated::ValidatedStore}, + behaviours::dht::store::{ + persistent::{DhtPersistentStorage, PersistentStore}, + validated::ValidatedStore, + }, cbor, NetworkEventInternal, }; @@ -29,7 +32,7 @@ use super::{ /// - connection management #[derive(NetworkBehaviour, derive_more::Debug)] #[behaviour(to_swarm = "NetworkEventInternal")] -pub struct NetworkDef { +pub struct NetworkDef { /// purpose: broadcasting messages to many peers /// NOTE gossipsub works ONLY for sharing messages right now /// in the future it may be able to do peer discovery and routing @@ -37,10 +40,10 @@ pub struct NetworkDef { #[debug(skip)] gossipsub: GossipBehaviour, - /// The DHT store. We use a `FileBackedStore` to occasionally save the DHT to - /// a file on disk and a `ValidatedStore` to validate the records stored. + /// The DHT store. We use a `PersistentStore` to occasionally save the DHT to + /// some persistent store and a `ValidatedStore` to validate the records stored. #[debug(skip)] - pub dht: libp2p::kad::Behaviour>>, + pub dht: libp2p::kad::Behaviour, D>>, /// purpose: identifying the addresses from an outside POV #[debug(skip)] @@ -56,16 +59,16 @@ pub struct NetworkDef { pub autonat: libp2p::autonat::Behaviour, } -impl NetworkDef { +impl NetworkDef { /// Create a new instance of a `NetworkDef` #[must_use] pub fn new( gossipsub: GossipBehaviour, - dht: libp2p::kad::Behaviour>>, + dht: libp2p::kad::Behaviour, D>>, identify: IdentifyBehaviour, direct_message: super::cbor::Behaviour, Vec>, autonat: autonat::Behaviour, - ) -> NetworkDef { + ) -> NetworkDef { Self { gossipsub, dht, @@ -77,7 +80,7 @@ impl NetworkDef { } /// Address functions -impl NetworkDef { +impl NetworkDef { /// Add an address pub fn add_address(&mut self, peer_id: &PeerId, address: Multiaddr) { // NOTE to get this address to play nice with the other @@ -91,7 +94,7 @@ impl NetworkDef { } /// Gossip functions -impl NetworkDef { +impl NetworkDef { /// Publish a given gossip pub fn publish_gossip(&mut self, topic: IdentTopic, contents: Vec) { if let Err(e) = self.gossipsub.publish(topic, contents) { @@ -114,7 +117,7 @@ impl NetworkDef { } /// Request/response functions -impl NetworkDef { +impl NetworkDef { /// Add a direct request for a given peer pub fn add_direct_request(&mut self, peer_id: PeerId, data: Vec) -> OutboundRequestId { self.direct_message.send_request(&peer_id, data) diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 279d035949..103af0e764 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -59,7 +59,10 @@ pub use self::{ use super::{ behaviours::dht::{ bootstrap::{DHTBootstrapTask, InputEvent}, - store::{file_backed::FileBackedStore, validated::ValidatedStore}, + store::{ + persistent::{DhtPersistentStorage, PersistentStore}, + validated::ValidatedStore, + }, }, cbor::Cbor, gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkError, NetworkEvent, @@ -82,23 +85,23 @@ pub const ESTABLISHED_LIMIT_UNWR: u32 = 10; /// Network definition #[derive(derive_more::Debug)] -pub struct NetworkNode { +pub struct NetworkNode { /// peer id of network node peer_id: PeerId, /// the swarm of networkbehaviours #[debug(skip)] - swarm: Swarm>, + swarm: Swarm>, /// the listener id we are listening on, if it exists listener_id: Option, /// Handler for direct messages direct_message_state: DMBehaviour, /// Handler for DHT Events - dht_handler: DHTBehaviour, + dht_handler: DHTBehaviour, /// Channel to resend requests, set to Some when we call `spawn_listeners` resend_tx: Option>, } -impl NetworkNode { +impl NetworkNode { /// Returns number of peers this node is connected to pub fn num_connected(&self) -> usize { self.swarm.connected_peers().count() @@ -157,8 +160,17 @@ impl NetworkNode { /// QUIC v1 (RFC 9000) + DNS /// * Generates a connection to the "broadcast" topic /// * Creates a swarm to manage peers and events - #[instrument] - pub async fn new(config: NetworkNodeConfig) -> Result { + /// + /// # Errors + /// - If we fail to generate the transport or any of the behaviours + /// + /// # Panics + /// If 5 < 0 + #[allow(clippy::too_many_lines)] + pub async fn new( + config: NetworkNodeConfig, + dht_persistent_storage: D, + ) -> Result { // Generate a random `KeyPair` if one is not specified let keypair = config .keypair @@ -177,7 +189,7 @@ impl NetworkNode { .await?; // Generate the swarm - let mut swarm: Swarm> = { + let mut swarm: Swarm> = { // Use the `Blake3` hash of the message's contents as the ID let message_id_fn = |message: &GossipsubMessage| { let hash = blake3::hash(&message.data); @@ -253,20 +265,15 @@ impl NetworkNode { panic!("Replication factor not set"); } - // Extract the DHT file path from the config, defaulting to `libp2p_dht.json` - let dht_file_path = config - .dht_file_path - .clone() - .unwrap_or_else(|| "libp2p_dht.bin".into()); - - // Create the DHT behaviour + // Create the DHT behaviour with the given persistent storage let mut kadem = Behaviour::with_config( peer_id, - FileBackedStore::new( + PersistentStore::new( ValidatedStore::new(MemoryStore::new(peer_id)), - dht_file_path, - 10, - ), + dht_persistent_storage, + 5, + ) + .await, kconfig, ); kadem.set_mode(Some(Mode::Server)); @@ -712,8 +719,10 @@ impl NetworkNode { /// Spawn a task to listen for requests on the returned channel /// as well as any events produced by libp2p - #[instrument] - pub async fn spawn_listeners( + /// + /// # Errors + /// - If we fail to create the channels or the bootstrap channel + pub fn spawn_listeners( mut self, ) -> Result< ( diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index b7a6832286..b9a79bc70c 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -16,7 +16,10 @@ use tokio::{ use tracing::{debug, info, instrument}; use crate::network::{ - behaviours::dht::record::{Namespace, RecordKey, RecordValue}, + behaviours::dht::{ + record::{Namespace, RecordKey, RecordValue}, + store::persistent::DhtPersistentStorage, + }, gen_multiaddr, ClientRequest, NetworkEvent, NetworkNode, NetworkNodeConfig, }; @@ -77,11 +80,12 @@ impl NetworkNodeReceiver { /// Spawn a network node task task and return the handle and the receiver for it /// # Errors /// Errors if spawning the task fails -pub async fn spawn_network_node( +pub async fn spawn_network_node( config: NetworkNodeConfig, + dht_persistent_storage: D, id: usize, ) -> Result<(NetworkNodeReceiver, NetworkNodeHandle), NetworkError> { - let mut network = NetworkNode::new(config.clone()) + let mut network = NetworkNode::new(config.clone(), dht_persistent_storage) .await .map_err(|e| NetworkError::ConfigError(format!("failed to create network node: {e}")))?; // randomly assigned port @@ -95,7 +99,7 @@ pub async fn spawn_network_node( })?; // pin here to force the future onto the heap since it can be large // in the case of flume - let (send_chan, recv_chan) = Box::pin(network.spawn_listeners()).await.map_err(|err| { + let (send_chan, recv_chan) = network.spawn_listeners().map_err(|err| { NetworkError::ListenError(format!("failed to spawn listeners for Libp2p: {err}")) })?; let receiver = NetworkNodeReceiver { From 78c90d870d283c7de7113628db441da8354e6c71 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 9 Jan 2025 16:26:51 -0500 Subject: [PATCH 1351/1393] Fix view change evidence (#4017) --- task-impls/src/helpers.rs | 6 +- task-impls/src/quorum_proposal/handlers.rs | 8 +-- task-impls/src/quorum_proposal_recv/mod.rs | 2 +- testing/src/view_generator.rs | 6 +- testing/tests/tests_1/quorum_proposal_task.rs | 6 +- types/src/data.rs | 66 ++++++++++++++++--- types/src/simple_certificate.rs | 4 +- 7 files changed, 74 insertions(+), 24 deletions(-) diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 251cb049f0..f259f1ab79 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -15,7 +15,7 @@ use committable::{Commitment, Committable}; use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf2, QuorumProposal2, ViewChangeEvidence}, + data::{Leaf2, QuorumProposal2, ViewChangeEvidence2}, event::{Event, EventType, LeafInfo}, message::{Proposal, UpgradeLock}, request_response::ProposalRequestPayload, @@ -752,7 +752,7 @@ pub(crate) async fn validate_proposal_view_and_certs< ))?; match received_proposal_cert { - ViewChangeEvidence::Timeout(timeout_cert) => { + ViewChangeEvidence2::Timeout(timeout_cert) => { ensure!( timeout_cert.data().view == view_number - 1, "Timeout certificate for view {} was not for the immediately preceding view", @@ -778,7 +778,7 @@ pub(crate) async fn validate_proposal_view_and_certs< *view_number ); } - ViewChangeEvidence::ViewSync(view_sync_cert) => { + ViewChangeEvidence2::ViewSync(view_sync_cert) => { ensure!( view_sync_cert.view_number == view_number, "View sync cert view number {:?} does not match proposal view number {:?}", diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 412e3fb41d..c4922af173 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -24,7 +24,7 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, - data::{Leaf2, QuorumProposal2, VidDisperse, ViewChangeEvidence}, + data::{Leaf2, QuorumProposal2, VidDisperse, ViewChangeEvidence2}, message::Proposal, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ @@ -265,7 +265,7 @@ impl ProposalDependencyHandle { &self, commitment_and_metadata: CommitmentAndMetadata, vid_share: Proposal>, - view_change_evidence: Option>, + view_change_evidence: Option>, formed_upgrade_certificate: Option>, decided_upgrade_certificate: Arc>>>, parent_qc: QuorumCertificate2, @@ -530,9 +530,9 @@ impl HandleDepOutput for ProposalDependencyHandle< } let proposal_cert = if let Some(view_sync_cert) = view_sync_finalize_cert { - Some(ViewChangeEvidence::ViewSync(view_sync_cert)) + Some(ViewChangeEvidence2::ViewSync(view_sync_cert)) } else { - timeout_certificate.map(ViewChangeEvidence::Timeout) + timeout_certificate.map(ViewChangeEvidence2::Timeout) }; if let Err(e) = self diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index f6ed129a6e..14ed4edf7e 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -16,7 +16,7 @@ use futures::future::{err, join_all}; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{Consensus, OuterConsensus}, - data::{EpochNumber, Leaf, ViewChangeEvidence}, + data::{EpochNumber, Leaf, ViewChangeEvidence2}, event::Event, message::UpgradeLock, simple_certificate::UpgradeCertificate, diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 563679297e..266c311429 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -24,7 +24,7 @@ use hotshot_example_types::{ use hotshot_types::{ data::{ DaProposal2, EpochNumber, Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare2, - ViewChangeEvidence, ViewNumber, + ViewChangeEvidence2, ViewNumber, }, message::{Proposal, UpgradeLock}, simple_certificate::{ @@ -352,9 +352,9 @@ impl TestView { }; let view_change_evidence = if let Some(tc) = timeout_certificate { - Some(ViewChangeEvidence::Timeout(tc)) + Some(ViewChangeEvidence2::Timeout(tc)) } else { - view_sync_certificate.map(ViewChangeEvidence::ViewSync) + view_sync_certificate.map(ViewChangeEvidence2::ViewSync) }; let random = thread_rng().gen_range(0..=u64::MAX); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index caf464ac51..165338945e 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -25,7 +25,7 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{null_block, EpochNumber, Leaf2, ViewChangeEvidence, ViewNumber}, + data::{null_block, EpochNumber, Leaf2, ViewChangeEvidence2, ViewNumber}, simple_vote::{TimeoutData2, ViewSyncFinalizeData2}, traits::{ election::Membership, @@ -353,7 +353,7 @@ async fn test_quorum_proposal_task_qc_timeout() { // Get the proposal cert out for the view sync input let cert = match proposals[1].data.view_change_evidence.clone().unwrap() { - ViewChangeEvidence::Timeout(tc) => tc, + ViewChangeEvidence2::Timeout(tc) => tc, _ => panic!("Found a View Sync Cert when there should have been a Timeout cert"), }; @@ -445,7 +445,7 @@ async fn test_quorum_proposal_task_view_sync() { // Get the proposal cert out for the view sync input let cert = match proposals[1].data.view_change_evidence.clone().unwrap() { - ViewChangeEvidence::ViewSync(vsc) => vsc, + ViewChangeEvidence2::ViewSync(vsc) => vsc, _ => panic!("Found a TC when there should have been a view sync cert"), }; diff --git a/types/src/data.rs b/types/src/data.rs index 5daae25d43..0ee98d45e5 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -34,8 +34,9 @@ use crate::{ impl_has_epoch, message::{Proposal, UpgradeLock}, simple_certificate::{ - NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate2, - UpgradeCertificate, ViewSyncFinalizeCertificate2, + NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, + TimeoutCertificate2, UpgradeCertificate, ViewSyncFinalizeCertificate, + ViewSyncFinalizeCertificate2, }, simple_vote::{HasEpoch, QuorumData, QuorumData2, UpgradeProposalData, VersionedVoteData}, traits::{ @@ -308,9 +309,9 @@ impl VidDisperse { #[serde(bound(deserialize = ""))] pub enum ViewChangeEvidence { /// Holds a timeout certificate. - Timeout(TimeoutCertificate2), + Timeout(TimeoutCertificate), /// Holds a view sync finalized certificate. - ViewSync(ViewSyncFinalizeCertificate2), + ViewSync(ViewSyncFinalizeCertificate), } impl ViewChangeEvidence { @@ -321,6 +322,51 @@ impl ViewChangeEvidence { ViewChangeEvidence::ViewSync(view_sync_cert) => view_sync_cert.view_number == *view, } } + + /// Convert to ViewChangeEvidence2 + pub fn to_evidence2(self) -> ViewChangeEvidence2 { + match self { + ViewChangeEvidence::Timeout(timeout_cert) => { + ViewChangeEvidence2::Timeout(timeout_cert.to_tc2()) + } + ViewChangeEvidence::ViewSync(view_sync_cert) => { + ViewChangeEvidence2::ViewSync(view_sync_cert.to_vsc2()) + } + } + } +} + +/// Helper type to encapsulate the various ways that proposal certificates can be captured and +/// stored. +#[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound(deserialize = ""))] +pub enum ViewChangeEvidence2 { + /// Holds a timeout certificate. + Timeout(TimeoutCertificate2), + /// Holds a view sync finalized certificate. + ViewSync(ViewSyncFinalizeCertificate2), +} + +impl ViewChangeEvidence2 { + /// Check that the given ViewChangeEvidence2 is relevant to the current view. + pub fn is_valid_for_view(&self, view: &TYPES::View) -> bool { + match self { + ViewChangeEvidence2::Timeout(timeout_cert) => timeout_cert.data().view == *view - 1, + ViewChangeEvidence2::ViewSync(view_sync_cert) => view_sync_cert.view_number == *view, + } + } + + /// Convert to ViewChangeEvidence + pub fn to_evidence(self) -> ViewChangeEvidence { + match self { + ViewChangeEvidence2::Timeout(timeout_cert) => { + ViewChangeEvidence::Timeout(timeout_cert.to_tc()) + } + ViewChangeEvidence2::ViewSync(view_sync_cert) => { + ViewChangeEvidence::ViewSync(view_sync_cert.to_vsc()) + } + } + } } #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] @@ -628,7 +674,7 @@ pub struct QuorumProposal2 { pub upgrade_certificate: Option>, /// Possible timeout or view sync certificate. If the `justify_qc` is not for a proposal in the immediately preceding view, then either a timeout or view sync certificate must be attached. - pub view_change_evidence: Option>, + pub view_change_evidence: Option>, /// The DRB result for the next epoch. /// @@ -646,7 +692,9 @@ impl From> for QuorumProposal2 { justify_qc: quorum_proposal.justify_qc.to_qc2(), next_epoch_justify_qc: None, upgrade_certificate: quorum_proposal.upgrade_certificate, - view_change_evidence: quorum_proposal.proposal_certificate, + view_change_evidence: quorum_proposal + .proposal_certificate + .map(ViewChangeEvidence::to_evidence2), next_drb_result: None, } } @@ -659,7 +707,9 @@ impl From> for QuorumProposal { view_number: quorum_proposal2.view_number, justify_qc: quorum_proposal2.justify_qc.to_qc(), upgrade_certificate: quorum_proposal2.upgrade_certificate, - proposal_certificate: quorum_proposal2.view_change_evidence, + proposal_certificate: quorum_proposal2 + .view_change_evidence + .map(ViewChangeEvidence2::to_evidence), } } } @@ -818,7 +868,7 @@ pub struct Leaf2 { block_payload: Option, /// Possible timeout or view sync certificate. If the `justify_qc` is not for a proposal in the immediately preceding view, then either a timeout or view sync certificate must be attached. - pub view_change_evidence: Option>, + pub view_change_evidence: Option>, } impl Leaf2 { diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 0973f8805a..e7d8f9b437 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -698,7 +698,7 @@ impl ViewSyncFinalizeCertificate2 { impl TimeoutCertificate { /// Convert a `DaCertificate` into a `DaCertificate2` - pub fn to_vsc2(self) -> TimeoutCertificate2 { + pub fn to_tc2(self) -> TimeoutCertificate2 { let data = TimeoutData2 { view: self.data.view, epoch: TYPES::Epoch::new(0), @@ -719,7 +719,7 @@ impl TimeoutCertificate { impl TimeoutCertificate2 { /// Convert a `DaCertificate` into a `DaCertificate2` - pub fn to_vsc(self) -> TimeoutCertificate { + pub fn to_tc(self) -> TimeoutCertificate { let data = TimeoutData { view: self.data.view, }; From cf98f82d93f48f602b028e59d6d7d9fd0638bd47 Mon Sep 17 00:00:00 2001 From: dashangcun <907225865@qq.com> Date: Thu, 9 Jan 2025 22:27:00 +0100 Subject: [PATCH 1352/1393] chore: add missing backticks (#4016) Signed-off-by: dashangcun <907225865@qq.com> --- types/src/traits/states.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/traits/states.rs b/types/src/traits/states.rs index 00a5a6aab1..00f0e54042 100644 --- a/types/src/traits/states.rs +++ b/types/src/traits/states.rs @@ -43,7 +43,7 @@ pub trait StateDelta: /// * The ability to validate that a block header is actually a valid extension of this state and /// produce a new state, with the modifications from the block applied /// -/// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header)) +/// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header`)) pub trait ValidatedState: Serialize + DeserializeOwned + Debug + Default + PartialEq + Eq + Send + Sync + Clone { From 8ea15dfef26c8dc840678cedf93afcf470bc3305 Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Thu, 9 Jan 2025 18:06:17 -0500 Subject: [PATCH 1353/1393] use BlockPayload for `calculate_vid_disperse` (#4018) --- hotshot/src/lib.rs | 5 +---- task-impls/src/da.rs | 18 ++++++++++++------ task-impls/src/helpers.rs | 17 +++++------------ task-impls/src/vid.rs | 8 ++++---- types/src/consensus.rs | 23 +++++++++++++++-------- types/src/data.rs | 3 ++- 6 files changed, 39 insertions(+), 35 deletions(-) diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 8055ae595c..ea213a6338 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -61,7 +61,6 @@ use hotshot_types::{ signature_key::SignatureKey, states::ValidatedState, storage::Storage, - EncodeBytes, }, utils::epoch_from_block_number, HotShotConfig, @@ -321,9 +320,7 @@ impl, V: Versions> SystemContext, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState( res.leaf_views.push(info.clone()); // If the block payload is available for this leaf, include it in // the leaf chain that we send to the client. - if let Some(encoded_txns) = consensus_reader + if let Some(payload) = consensus_reader .saved_payloads() .get(&info.leaf.view_number()) { - let payload = - BlockPayload::from_bytes(encoded_txns, info.leaf.block_header().metadata()); - - info.leaf.fill_block_payload_unchecked(payload); + info.leaf + .fill_block_payload_unchecked(payload.as_ref().clone()); } if let Some(ref payload) = info.leaf.block_payload() { @@ -451,13 +449,8 @@ pub async fn decide_from_proposal( } // If the block payload is available for this leaf, include it in // the leaf chain that we send to the client. - if let Some(encoded_txns) = - consensus_reader.saved_payloads().get(&leaf.view_number()) - { - let payload = - BlockPayload::from_bytes(encoded_txns, leaf.block_header().metadata()); - - leaf.fill_block_payload_unchecked(payload); + if let Some(payload) = consensus_reader.saved_payloads().get(&leaf.view_number()) { + leaf.fill_block_payload_unchecked(payload.as_ref().clone()); } // Get the VID share at the leaf's view number, corresponding to our key diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 3754e2a01d..3d6faad831 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -98,7 +98,7 @@ impl> VidTaskState { return None; } let vid_disperse = VidDisperse::calculate_vid_disperse( - Arc::clone(encoded_transactions), + &payload, &Arc::clone(&self.membership), *view_number, epoch, @@ -192,7 +192,7 @@ impl> VidTaskState { ); let consensus_reader = self.consensus.read().await; - let Some(txns) = consensus_reader.saved_payloads().get(&proposal_view_number) + let Some(payload) = consensus_reader.saved_payloads().get(&proposal_view_number) else { tracing::warn!( "We need to calculate VID for the nodes in the next epoch \ @@ -200,11 +200,11 @@ impl> VidTaskState { ); return None; }; - let txns = Arc::clone(txns); + let payload = Arc::clone(payload); drop(consensus_reader); let next_epoch_vid_disperse = VidDisperse::calculate_vid_disperse( - txns, + payload.as_ref(), &Arc::clone(&self.membership), proposal_view_number, target_epoch, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 5a857b360b..228d37af43 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -314,7 +314,7 @@ pub struct Consensus { /// Saved payloads. /// /// Encoded transactions for every view if we got a payload for that view. - saved_payloads: BTreeMap>, + saved_payloads: BTreeMap>, /// the highqc per spec high_qc: QuorumCertificate2, @@ -418,7 +418,7 @@ impl Consensus { last_actioned_view: TYPES::View, last_proposals: BTreeMap>>, saved_leaves: CommitmentMap>, - saved_payloads: BTreeMap>, + saved_payloads: BTreeMap>, high_qc: QuorumCertificate2, next_epoch_high_qc: Option>, metrics: Arc, @@ -485,7 +485,7 @@ impl Consensus { } /// Get the saved payloads. - pub fn saved_payloads(&self) -> &BTreeMap> { + pub fn saved_payloads(&self) -> &BTreeMap> { &self.saved_payloads } @@ -732,13 +732,13 @@ impl Consensus { pub fn update_saved_payloads( &mut self, view_number: TYPES::View, - encoded_transaction: Arc<[u8]>, + payload: Arc, ) -> Result<()> { ensure!( !self.saved_payloads.contains_key(&view_number), "Payload with the same view already exists." ); - self.saved_payloads.insert(view_number, encoded_transaction); + self.saved_payloads.insert(view_number, payload); Ok(()) } @@ -946,7 +946,7 @@ impl Consensus { membership: Arc>, private_key: &::PrivateKey, ) -> Option<()> { - let txns = Arc::clone(consensus.read().await.saved_payloads().get(&view)?); + let payload = Arc::clone(consensus.read().await.saved_payloads().get(&view)?); let epoch = consensus .read() .await @@ -954,8 +954,15 @@ impl Consensus { .get(&view)? .view_inner .epoch()?; - let vid = - VidDisperse::calculate_vid_disperse(txns, &membership, view, epoch, epoch, None).await; + let vid = VidDisperse::calculate_vid_disperse( + payload.as_ref(), + &membership, + view, + epoch, + epoch, + None, + ) + .await; let shares = VidDisperseShare2::from_vid_disperse(vid); let mut consensus_writer = consensus.write().await; for share in shares { diff --git a/types/src/data.rs b/types/src/data.rs index 0ee98d45e5..9b8457fcd8 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -259,7 +259,7 @@ impl VidDisperse { /// Panics if the VID calculation fails, this should not happen. #[allow(clippy::panic)] pub async fn calculate_vid_disperse( - txns: Arc<[u8]>, + payload: &TYPES::BlockPayload, membership: &Arc>, view: TYPES::View, target_epoch: TYPES::Epoch, @@ -268,6 +268,7 @@ impl VidDisperse { ) -> Self { let num_nodes = membership.read().await.total_nodes(target_epoch); + let txns = payload.encode(); let txns_clone = Arc::clone(&txns); let vid_disperse = spawn_blocking(move || { precompute_data From 24f3a59989ad4981e009c5ed23603335801ad929 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 10 Jan 2025 12:21:04 -0500 Subject: [PATCH 1354/1393] Fix task shutdown panic (#4022) --- task/src/task.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/task/src/task.rs b/task/src/task.rs index 2b4784d00c..ee758cf1cd 100644 --- a/task/src/task.rs +++ b/task/src/task.rs @@ -127,9 +127,9 @@ impl ConsensusTaskRegistry { let handles = &mut self.task_handles; while let Some(handle) = handles.pop() { - let mut task_state = handle.await.unwrap(); - - task_state.cancel_subtasks(); + let _ = handle + .await + .map(|mut task_state| task_state.cancel_subtasks()); } } /// Take a task, run it, and register it From 1f96d812751284110066d0d52b25fda49c71c3c8 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 10 Jan 2025 13:46:02 -0500 Subject: [PATCH 1355/1393] Reorganize tests and add epoch upgrade test (#4012) --- example-types/src/node_types.rs | 18 +- task-impls/src/transactions.rs | 29 +--- task-impls/src/vid.rs | 9 +- testing/tests/tests_1/test_success.rs | 201 +-------------------- testing/tests/tests_6.rs | 9 + testing/tests/tests_6/test_epochs.rs | 240 ++++++++++++++++++++++++++ types/src/consensus.rs | 16 +- types/src/data.rs | 58 +++---- 8 files changed, 316 insertions(+), 264 deletions(-) create mode 100644 testing/tests/tests_6.rs create mode 100644 testing/tests/tests_6/test_epochs.rs diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 166976c3a0..0b1a0989c1 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -295,7 +295,23 @@ impl Versions for EpochsTestVersions { 0, 0, ]; - type Marketplace = StaticVersion<0, 3>; + type Marketplace = StaticVersion<0, 5>; + + type Epochs = StaticVersion<0, 4>; +} + +#[derive(Clone, Debug, Copy)] +pub struct EpochUpgradeTestVersions {} + +impl Versions for EpochUpgradeTestVersions { + type Base = StaticVersion<0, 3>; + type Upgrade = StaticVersion<0, 4>; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, + ]; + + type Marketplace = StaticVersion<0, 5>; type Epochs = StaticVersion<0, 4>; } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index cb911e8522..34dd64fd23 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -725,13 +725,6 @@ impl, V: Versions> TransactionTask bail!("No available blocks"); } - let version = match self.upgrade_lock.version(view_number).await { - Ok(v) => v, - Err(err) => { - bail!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); - } - }; - for (block_info, builder_idx) in available_blocks { // Verify signature over chosen block. if !block_info.sender.validate_block_info_signature( @@ -758,19 +751,9 @@ impl, V: Versions> TransactionTask let response = { let client = &self.builder_clients[builder_idx]; - // If epochs are supported, provide the latest `num_nodes` information to the - // builder for VID computation. - let (block, header_input) = if version >= V::Epochs::VERSION { - let total_nodes = self.membership.read().await.total_nodes(self.cur_epoch); - futures::join! { - client.claim_block_with_num_nodes(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature, total_nodes), - client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) - } - } else { - futures::join! { - client.claim_block(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature), - client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) - } + let (block, header_input) = futures::join! { + client.claim_block(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature), + client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) }; let block_data = match block { @@ -815,7 +798,11 @@ impl, V: Versions> TransactionTask fee, block_payload: block_data.block_payload, metadata: block_data.metadata, - precompute_data: Some(header_input.vid_precompute_data), + // we discard the precompute data, + // because we cannot trust that the builder is able to calculate this correctly. + // + // in particular, the builder needs to know `num_nodes` and there aren't any practical ways to verify the result it sent us. + precompute_data: None, } }; diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 3d6faad831..24603f27e1 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -76,7 +76,6 @@ impl> VidTaskState { metadata, view_number, sequencing_fees, - vid_precompute, auction_result, .. } = packed_bundle; @@ -103,9 +102,9 @@ impl> VidTaskState { *view_number, epoch, epoch, - vid_precompute.clone(), ) - .await; + .await + .ok()?; let payload_commitment = vid_disperse.payload_commitment; let shares = VidDisperseShare2::from_vid_disperse(vid_disperse.clone()); let mut consensus_writer = self.consensus.write().await; @@ -209,9 +208,9 @@ impl> VidTaskState { proposal_view_number, target_epoch, sender_epoch, - None, ) - .await; + .await + .ok()?; let Ok(next_epoch_signature) = TYPES::SignatureKey::sign( &self.private_key, next_epoch_vid_disperse.payload_commitment.as_ref(), diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index e3b7441fa7..55608a41d1 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -8,9 +8,8 @@ use std::time::Duration; use hotshot_example_types::{ node_types::{ - CombinedImpl, EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, - TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, TestTypesRandomizedLeader, - TestVersions, + Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestTypes, + TestTypesRandomizedLeader, TestVersions, }, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; @@ -18,7 +17,6 @@ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, view_sync_task::ViewSyncTaskDescription, }; @@ -43,46 +41,6 @@ cross_tests!( }, ); -cross_tests!( - TestName: test_success_with_epochs, - Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], - Types: [TestTypes, TestTypesRandomizedLeader, TestTwoStakeTablesTypes], - Versions: [EpochsTestVersions], - Ignore: false, - Metadata: { - TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - epoch_height: 10, - ..TestDescription::default() - } - }, -); - -// cross_tests!( -// TestName: test_epoch_success, -// Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], -// Types: [TestTypes, TestTypesRandomizedLeader, TestTypesRandomizedCommitteeMembers>, TestTypesRandomizedCommitteeMembers>], -// Versions: [EpochsTestVersions], -// Ignore: false, -// Metadata: { -// TestDescription { -// // allow more time to pass in CI -// completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( -// TimeBasedCompletionTaskDescription { -// duration: Duration::from_secs(60), -// }, -// ), -// epoch_height: 10, -// ..TestDescription::default() -// } -// }, -// ); - cross_tests!( TestName: test_success_with_async_delay, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -116,39 +74,6 @@ cross_tests!( }, ); -cross_tests!( - TestName: test_success_with_async_delay_with_epochs, - Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], - Types: [TestTypes, TestTwoStakeTablesTypes], - Versions: [EpochsTestVersions], - Ignore: false, - Metadata: { - let mut metadata = TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - epoch_height: 10, - ..TestDescription::default() - }; - - metadata.overall_safety_properties.num_failed_views = 0; - metadata.overall_safety_properties.num_successful_views = 0; - let mut config = DelayConfig::default(); - let delay_settings = DelaySettings { - delay_option: DelayOptions::Random, - min_time_in_milliseconds: 10, - max_time_in_milliseconds: 100, - fixed_time_in_milliseconds: 0, - }; - config.add_settings_for_all_types(delay_settings); - metadata.async_delay_config = config; - metadata - }, -); - cross_tests!( TestName: test_success_with_async_delay_2, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -190,47 +115,6 @@ cross_tests!( }, ); -cross_tests!( - TestName: test_success_with_async_delay_2_with_epochs, - Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], - Types: [TestTypes, TestTwoStakeTablesTypes], - Versions: [EpochsTestVersions], - Ignore: false, - Metadata: { - let mut metadata = TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - epoch_height: 10, - ..TestDescription::default() - }; - - metadata.overall_safety_properties.num_failed_views = 0; - metadata.overall_safety_properties.num_successful_views = 30; - let mut config = DelayConfig::default(); - let mut delay_settings = DelaySettings { - delay_option: DelayOptions::Random, - min_time_in_milliseconds: 10, - max_time_in_milliseconds: 100, - fixed_time_in_milliseconds: 15, - }; - config.add_setting(SupportedTraitTypesForAsyncDelay::Storage, &delay_settings); - - delay_settings.delay_option = DelayOptions::Fixed; - config.add_setting(SupportedTraitTypesForAsyncDelay::BlockHeader, &delay_settings); - - delay_settings.delay_option = DelayOptions::Random; - delay_settings.min_time_in_milliseconds = 5; - delay_settings.max_time_in_milliseconds = 20; - config.add_setting(SupportedTraitTypesForAsyncDelay::ValidatedState, &delay_settings); - metadata.async_delay_config = config; - metadata - }, -); - cross_tests!( TestName: test_with_double_leader_no_failures, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], @@ -252,84 +136,3 @@ cross_tests!( metadata } ); - -cross_tests!( - TestName: test_with_double_leader_no_failures_with_epochs, - Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], - Types: [TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes], - Versions: [EpochsTestVersions], - Ignore: false, - Metadata: { - let mut metadata = TestDescription::default_more_nodes(); - metadata.num_bootstrap_nodes = 10; - metadata.epoch_height = 10; - metadata.num_nodes_with_stake = 12; - metadata.da_staked_committee_size = 12; - metadata.start_nodes = 12; - - metadata.overall_safety_properties.num_failed_views = 0; - - metadata.view_sync_properties = ViewSyncTaskDescription::Threshold(0, 0); - - metadata - } -); - -cross_tests!( - TestName: test_epoch_end, - Impls: [CombinedImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes, TestTwoStakeTablesTypes], - Versions: [EpochsTestVersions], - Ignore: false, - Metadata: { - TestDescription { - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(100000), - }, - ), - num_nodes_with_stake: 11, - start_nodes: 11, - num_bootstrap_nodes: 11, - da_staked_committee_size: 11, - epoch_height: 10, - ..TestDescription::default() - } - }, -); - -// Test to make sure we can decide in just 3 views -// This test fails with the old decide rule -cross_tests!( - TestName: test_shorter_decide, - Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], - Types: [TestTypes, TestTwoStakeTablesTypes], - Versions: [EpochsTestVersions], - Ignore: false, - Metadata: { - let mut metadata = TestDescription { - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_millis(100000), - }, - ), - epoch_height: 10, - ..TestDescription::default() - }; - // after the first 3 leaders the next leader is down. It's a hack to make sure we decide in - // 3 views or else we get a timeout - let dead_nodes = vec![ - ChangeNode { - idx: 4, - updown: NodeAction::Down, - }, - - ]; - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(1, dead_nodes)] - }; - metadata.overall_safety_properties.num_successful_views = 1; - metadata.overall_safety_properties.num_failed_views = 0; - metadata - }, -); diff --git a/testing/tests/tests_6.rs b/testing/tests/tests_6.rs new file mode 100644 index 0000000000..6cd4ae3b34 --- /dev/null +++ b/testing/tests/tests_6.rs @@ -0,0 +1,9 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +mod tests_6 { + automod::dir!("tests/tests_6"); +} diff --git a/testing/tests/tests_6/test_epochs.rs b/testing/tests/tests_6/test_epochs.rs new file mode 100644 index 0000000000..c902483860 --- /dev/null +++ b/testing/tests/tests_6/test_epochs.rs @@ -0,0 +1,240 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::time::Duration; + +use hotshot_example_types::{ + node_types::{ + CombinedImpl, EpochUpgradeTestVersions, EpochsTestVersions, Libp2pImpl, MemoryImpl, + PushCdnImpl, TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, + TestTypesRandomizedLeader, + }, + testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, +}; +use hotshot_macros::cross_tests; +use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, + test_builder::TestDescription, + view_sync_task::ViewSyncTaskDescription, +}; + +cross_tests!( + TestName: test_success_with_epochs, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestTypes, TestTypesRandomizedLeader, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + epoch_height: 10, + ..TestDescription::default() + } + }, +); + +// cross_tests!( +// TestName: test_epoch_success, +// Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], +// Types: [TestTypes, TestTypesRandomizedLeader, TestTypesRandomizedCommitteeMembers>, TestTypesRandomizedCommitteeMembers>], +// Versions: [EpochsTestVersions], +// Ignore: false, +// Metadata: { +// TestDescription { +// // allow more time to pass in CI +// completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( +// TimeBasedCompletionTaskDescription { +// duration: Duration::from_secs(60), +// }, +// ), +// epoch_height: 10, +// ..TestDescription::default() +// } +// }, +// ); + +cross_tests!( + TestName: test_success_with_async_delay_with_epochs, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + epoch_height: 10, + ..TestDescription::default() + }; + + metadata.overall_safety_properties.num_failed_views = 0; + metadata.overall_safety_properties.num_successful_views = 0; + let mut config = DelayConfig::default(); + let delay_settings = DelaySettings { + delay_option: DelayOptions::Random, + min_time_in_milliseconds: 10, + max_time_in_milliseconds: 100, + fixed_time_in_milliseconds: 0, + }; + config.add_settings_for_all_types(delay_settings); + metadata.async_delay_config = config; + metadata + }, +); + +cross_tests!( + TestName: test_success_with_async_delay_2_with_epochs, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + epoch_height: 10, + ..TestDescription::default() + }; + + metadata.overall_safety_properties.num_failed_views = 0; + metadata.overall_safety_properties.num_successful_views = 30; + let mut config = DelayConfig::default(); + let mut delay_settings = DelaySettings { + delay_option: DelayOptions::Random, + min_time_in_milliseconds: 10, + max_time_in_milliseconds: 100, + fixed_time_in_milliseconds: 15, + }; + config.add_setting(SupportedTraitTypesForAsyncDelay::Storage, &delay_settings); + + delay_settings.delay_option = DelayOptions::Fixed; + config.add_setting(SupportedTraitTypesForAsyncDelay::BlockHeader, &delay_settings); + + delay_settings.delay_option = DelayOptions::Random; + delay_settings.min_time_in_milliseconds = 5; + delay_settings.max_time_in_milliseconds = 20; + config.add_setting(SupportedTraitTypesForAsyncDelay::ValidatedState, &delay_settings); + metadata.async_delay_config = config; + metadata + }, +); + +cross_tests!( + TestName: test_with_double_leader_no_failures_with_epochs, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default_more_nodes(); + metadata.num_bootstrap_nodes = 10; + metadata.epoch_height = 10; + metadata.num_nodes_with_stake = 12; + metadata.da_staked_committee_size = 12; + metadata.start_nodes = 12; + + metadata.overall_safety_properties.num_failed_views = 0; + + metadata.view_sync_properties = ViewSyncTaskDescription::Threshold(0, 0); + + metadata + } +); + +cross_tests!( + TestName: test_epoch_end, + Impls: [CombinedImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + TestDescription { + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(100000), + }, + ), + num_nodes_with_stake: 11, + start_nodes: 11, + num_bootstrap_nodes: 11, + da_staked_committee_size: 11, + epoch_height: 10, + ..TestDescription::default() + } + }, +); + +// Test to make sure we can decide in just 3 views +// This test fails with the old decide rule +cross_tests!( + TestName: test_shorter_decide, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription { + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(100000), + }, + ), + epoch_height: 10, + ..TestDescription::default() + }; + // after the first 3 leaders the next leader is down. It's a hack to make sure we decide in + // 3 views or else we get a timeout + let dead_nodes = vec![ + ChangeNode { + idx: 4, + updown: NodeAction::Down, + }, + + ]; + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(1, dead_nodes)] + }; + metadata.overall_safety_properties.num_successful_views = 1; + metadata.overall_safety_properties.num_failed_views = 0; + metadata + }, +); + +cross_tests!( + TestName: test_epoch_upgrade, + Impls: [MemoryImpl], + Types: [TestTypes, TestTypesRandomizedLeader, TestTwoStakeTablesTypes], + Versions: [EpochUpgradeTestVersions], + Ignore: true, + Metadata: { + TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + epoch_height: 50, + upgrade_view: Some(5), + ..TestDescription::default() + } + }, +); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 228d37af43..d9978a2749 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -954,15 +954,12 @@ impl Consensus { .get(&view)? .view_inner .epoch()?; - let vid = VidDisperse::calculate_vid_disperse( - payload.as_ref(), - &membership, - view, - epoch, - epoch, - None, - ) - .await; + + let vid = + VidDisperse::calculate_vid_disperse(payload.as_ref(), &membership, view, epoch, epoch) + .await + .ok()?; + let shares = VidDisperseShare2::from_vid_disperse(vid); let mut consensus_writer = consensus.write().await; for share in shares { @@ -970,6 +967,7 @@ impl Consensus { consensus_writer.update_vid_shares(view, prop); } } + Some(()) } diff --git a/types/src/data.rs b/types/src/data.rs index 9b8457fcd8..fc0ece0b15 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -20,12 +20,11 @@ use std::{ use async_lock::RwLock; use bincode::Options; use committable::{Commitment, CommitmentBoundsArkless, Committable, RawCommitmentBuilder}; -use jf_vid::{precomputable::Precomputable, VidDisperse as JfVidDisperse, VidScheme}; +use jf_vid::{VidDisperse as JfVidDisperse, VidScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; use thiserror::Error; use tokio::task::spawn_blocking; -use tracing::error; use utils::anytrace::*; use vec1::Vec1; @@ -255,8 +254,8 @@ impl VidDisperse { /// optionally using precompute data from builder. /// If the sender epoch is missing, it means it's the same as the target epoch. /// - /// # Panics - /// Panics if the VID calculation fails, this should not happen. + /// # Errors + /// Returns an error if the disperse or commitment calculation fails #[allow(clippy::panic)] pub async fn calculate_vid_disperse( payload: &TYPES::BlockPayload, @@ -264,43 +263,44 @@ impl VidDisperse { view: TYPES::View, target_epoch: TYPES::Epoch, data_epoch: TYPES::Epoch, - precompute_data: Option, - ) -> Self { + ) -> Result { let num_nodes = membership.read().await.total_nodes(target_epoch); let txns = payload.encode(); let txns_clone = Arc::clone(&txns); - let vid_disperse = spawn_blocking(move || { - precompute_data - .map_or_else( - || vid_scheme(num_nodes).disperse(&txns_clone), - |data| vid_scheme(num_nodes).disperse_precompute(&txns_clone, &data) - ) - .unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns_clone.len())) - }).await; - let data_epoch_payload_commitment = if target_epoch == data_epoch { + let num_txns = txns.len(); + + let vid_disperse = spawn_blocking(move || vid_scheme(num_nodes).disperse(&txns_clone)) + .await + .wrap() + .context(error!("Join error"))? + .wrap() + .context(|err| error!("Failed to calculate VID disperse. Error: {}", err))?; + + let payload_commitment = if target_epoch == data_epoch { None } else { - let data_epoch_num_nodes = membership.read().await.total_nodes(data_epoch); - Some(spawn_blocking(move || { - vid_scheme(data_epoch_num_nodes).commit_only(&txns) - .unwrap_or_else(|err| panic!("VID commit_only failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) - }).await) + let num_nodes = membership.read().await.total_nodes(data_epoch); + + Some( + spawn_blocking(move || vid_scheme(num_nodes).commit_only(&txns)) + .await + .wrap() + .context(error!("Join error"))? + .wrap() + .context(|err| error!("Failed to calculate VID commitment with (num_storage_nodes, payload_byte_len) = ({}, {}). Error: {}", num_nodes, num_txns, err))? + ) }; - // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. - let vid_disperse = vid_disperse.unwrap(); - let data_epoch_payload_commitment = - data_epoch_payload_commitment.map(|result| result.unwrap()); - Self::from_membership( + Ok(Self::from_membership( view, vid_disperse, membership, target_epoch, data_epoch, - data_epoch_payload_commitment, + payload_commitment, ) - .await + .await) } } @@ -409,7 +409,7 @@ impl VidDisperseShare { let Ok(signature) = TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref()) else { - error!("VID: failed to sign dispersal share payload"); + tracing::error!("VID: failed to sign dispersal share payload"); return None; }; Some(Proposal { @@ -565,7 +565,7 @@ impl VidDisperseShare2 { let Ok(signature) = TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref()) else { - error!("VID: failed to sign dispersal share payload"); + tracing::error!("VID: failed to sign dispersal share payload"); return None; }; Some(Proposal { From e22d8607284c067cec6ef55c76bb97662a598bfe Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Mon, 13 Jan 2025 13:05:06 -0300 Subject: [PATCH 1356/1393] Add unit test for `epoch_from_block_number` (#4026) To better ensure and understand behavior. Co-authored-by: tbro --- types/src/utils.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/types/src/utils.rs b/types/src/utils.rs index 6496e182f8..3183b4a508 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -290,3 +290,30 @@ pub fn is_epoch_root(block_number: u64, epoch_height: u64) -> bool { (block_number + 2) % epoch_height == 0 } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_epoch_from_block_number() { + // block 0 is always epoch 0 + let epoch = epoch_from_block_number(0, 10); + assert_eq!(0, epoch); + + let epoch = epoch_from_block_number(1, 10); + assert_eq!(1, epoch); + + let epoch = epoch_from_block_number(10, 10); + assert_eq!(1, epoch); + + let epoch = epoch_from_block_number(11, 10); + assert_eq!(2, epoch); + + let epoch = epoch_from_block_number(20, 10); + assert_eq!(2, epoch); + + let epoch = epoch_from_block_number(21, 10); + assert_eq!(3, epoch); + } +} From 2076d90f0d485689c91d258a678e8bc18274c618 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Mon, 13 Jan 2025 12:37:35 -0500 Subject: [PATCH 1357/1393] Libp2p: allow duplicate queries (#4027) * libp2p: allow duplicate queries * fmt & lint --- .../src/network/behaviours/dht/mod.rs | 63 ++++++++++++------- libp2p-networking/src/network/mod.rs | 2 +- libp2p-networking/src/network/node/handle.rs | 2 +- 3 files changed, 43 insertions(+), 24 deletions(-) diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/libp2p-networking/src/network/behaviours/dht/mod.rs index fd10422038..7ef41e1192 100644 --- a/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -6,12 +6,7 @@ /// Task for doing bootstraps at a regular interval pub mod bootstrap; -use std::{ - collections::{HashMap, HashSet}, - marker::PhantomData, - num::NonZeroUsize, - time::Duration, -}; +use std::{collections::HashMap, marker::PhantomData, num::NonZeroUsize, time::Duration}; /// a local caching layer for the DHT key value pairs use futures::{ @@ -65,8 +60,8 @@ pub struct DHTBehaviour { pub in_progress_get_closest_peers: HashMap>, /// List of in-progress get requests in_progress_record_queries: HashMap, - /// The lookup keys for all outstanding DHT queries - outstanding_dht_query_keys: HashSet>, + /// The list of in-progress get requests by key + outstanding_dht_query_keys: HashMap, QueryId>, /// List of in-progress put requests in_progress_put_record_queries: HashMap, /// State of bootstrapping @@ -130,7 +125,7 @@ impl DHTBehaviour { peer_id: pid, in_progress_record_queries: HashMap::default(), in_progress_put_record_queries: HashMap::default(), - outstanding_dht_query_keys: HashSet::default(), + outstanding_dht_query_keys: HashMap::default(), bootstrap_state: Bootstrap { state: State::NotStarted, backoff: ExponentialBackoff::new(2, Duration::from_secs(1)), @@ -178,7 +173,7 @@ impl DHTBehaviour { pub fn get_record( &mut self, key: Vec, - chan: Sender>, + chans: Vec>>, factor: NonZeroUsize, backoff: ExponentialBackoff, retry_count: u8, @@ -191,24 +186,39 @@ impl DHTBehaviour { // Check the cache before making the (expensive) query if let Some(entry) = kad.store_mut().get(&key.clone().into()) { - // The key already exists in the cache - if chan.send(entry.value.clone()).is_err() { - error!("Get DHT: channel closed before get record request result could be sent"); + // The key already exists in the cache, send the value to all channels + for chan in chans { + if chan.send(entry.value.clone()).is_err() { + warn!("Get DHT: channel closed before get record request result could be sent"); + } } } else { // Check if the key is already being queried - if self.outstanding_dht_query_keys.insert(key.clone()) { + if let Some(qid) = self.outstanding_dht_query_keys.get(&key) { + // The key was already being queried. Add the channel to the existing query + // Try to get the query from the query id + let Some(query) = self.in_progress_record_queries.get_mut(qid) else { + warn!("Get DHT: outstanding query not found"); + return; + }; + + // Add the channel to the existing query + query.notify.extend(chans); + } else { // The key was not already being queried and was not in the cache. Start a new query. let qid = kad.get_record(key.clone().into()); let query = KadGetQuery { backoff, progress: DHTProgress::InProgress(qid), - notify: chan, + notify: chans, num_replicas: factor, - key, + key: key.clone(), retry_count: retry_count - 1, records: HashMap::default(), }; + + // Add the key to the outstanding queries and in-progress queries + self.outstanding_dht_query_keys.insert(key, qid); self.in_progress_record_queries.insert(qid, query); } } @@ -308,8 +318,14 @@ impl DHTBehaviour { // Remove the key from the outstanding queries so we are in sync self.outstanding_dht_query_keys.remove(&key); - // if channel has been dropped, cancel request - if notify.is_canceled() { + // `notify` is all channels that are still open + let notify = notify + .into_iter() + .filter(|n| !n.is_canceled()) + .collect::>(); + + // If all are closed, we can exit + if notify.is_empty() { return; } @@ -332,8 +348,11 @@ impl DHTBehaviour { // Only return the record if we can store it (validation passed) if store.put(record).is_ok() { - if notify.send(r).is_err() { - error!("Get DHT: channel closed before get record request result could be sent"); + // Send the record to all channels that are still open + for n in notify { + if n.send(r.clone()).is_err() { + warn!("Get DHT: channel closed before get record request result could be sent"); + } } } else { error!("Failed to store record in local store"); @@ -513,8 +532,8 @@ pub(crate) struct KadGetQuery { pub(crate) backoff: ExponentialBackoff, /// progress through DHT query pub(crate) progress: DHTProgress, - /// notify client of result - pub(crate) notify: Sender>, + /// The channels to notify of the result + pub(crate) notify: Vec>>, /// number of replicas required to replicate over pub(crate) num_replicas: NonZeroUsize, /// the key to look up diff --git a/libp2p-networking/src/network/mod.rs b/libp2p-networking/src/network/mod.rs index c4348af023..8464219eaa 100644 --- a/libp2p-networking/src/network/mod.rs +++ b/libp2p-networking/src/network/mod.rs @@ -92,7 +92,7 @@ pub enum ClientRequest { /// Key to search for key: Vec, /// Channel to notify caller of value (or failure to find value) - notify: Sender>, + notify: Vec>>, /// number of retries to make retry_count: u8, }, diff --git a/libp2p-networking/src/network/node/handle.rs b/libp2p-networking/src/network/node/handle.rs index b9a79bc70c..365825c593 100644 --- a/libp2p-networking/src/network/node/handle.rs +++ b/libp2p-networking/src/network/node/handle.rs @@ -256,7 +256,7 @@ impl NetworkNodeHandle { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetDHT { key: serialized_key.clone(), - notify: s, + notify: vec![s], retry_count, }; self.send_request(req)?; From 0fd7f82fe65ff051e1622a0a95d5ece5027a2a00 Mon Sep 17 00:00:00 2001 From: Keyao Shen Date: Tue, 14 Jan 2025 09:57:47 -0800 Subject: [PATCH 1358/1393] [DRB] - Store DRB seed and epoch root with consistent block info (#4019) * Replace proposal with decided block in two functions * Fix condition for storing received result * Rename function * Fix typo * Update error handling and remove unnecessary membership check * Update logs, remove stake check --- task-impls/src/helpers.rs | 39 +++---- task-impls/src/quorum_vote/handlers.rs | 150 +++++++++---------------- types/src/data.rs | 14 ++- 3 files changed, 86 insertions(+), 117 deletions(-) diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index ed21b3d282..b55a26b74a 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -166,22 +166,12 @@ pub(crate) async fn fetch_proposal( } /// Handles calling add_epoch_root and sync_l1 on Membership if necessary. -async fn decide_from_proposal_add_epoch_root( - proposal: &QuorumProposal2, - leaf_views: &[LeafInfo], +async fn decide_epoch_root( + decided_leaf: &Leaf2, epoch_height: u64, membership: &Arc>, ) { - if leaf_views.is_empty() { - return; - } - - let decided_block_number = leaf_views - .last() - .unwrap() - .leaf - .block_header() - .block_number(); + let decided_block_number = decided_leaf.block_header().block_number(); // Skip if this is not the expected block. if epoch_height != 0 && is_epoch_root(decided_block_number, epoch_height) { @@ -191,7 +181,7 @@ async fn decide_from_proposal_add_epoch_root( let write_callback = { let membership_reader = membership.read().await; membership_reader - .add_epoch_root(next_epoch_number, proposal.block_header.clone()) + .add_epoch_root(next_epoch_number, decided_leaf.block_header().clone()) .await }; @@ -257,7 +247,8 @@ impl Default for LeafChainTraversalOutcome { /// calculate the new decided leaf chain based on the rules of HotStuff 2 /// /// # Panics -/// Can't actually panic +/// If the leaf chain contains no decided leaf while reaching a decided view, which should be +/// impossible. pub async fn decide_from_proposal_2( proposal: &QuorumProposal2, consensus: OuterConsensus, @@ -338,8 +329,11 @@ pub async fn decide_from_proposal_2( let epoch_height = consensus_reader.epoch_height; drop(consensus_reader); - decide_from_proposal_add_epoch_root(proposal, &res.leaf_views, epoch_height, membership) - .await; + if let Some(decided_leaf_info) = res.leaf_views.last() { + decide_epoch_root(&decided_leaf_info.leaf, epoch_height, membership).await; + } else { + tracing::info!("No decided leaf while a view has been decided."); + } } res @@ -372,6 +366,10 @@ pub async fn decide_from_proposal_2( /// /// Upon receipt then of a proposal for view 9, assuming it is valid, this entire process will repeat, and /// the anchor view will be set to view 6, with the locked view as view 7. +/// +/// # Panics +/// If the leaf chain contains no decided leaf while reaching a decided view, which should be +/// impossible. pub async fn decide_from_proposal( proposal: &QuorumProposal2, consensus: OuterConsensus, @@ -489,8 +487,11 @@ pub async fn decide_from_proposal( let epoch_height = consensus_reader.epoch_height; drop(consensus_reader); - decide_from_proposal_add_epoch_root(proposal, &res.leaf_views, epoch_height, membership) - .await; + if let Some(decided_leaf_info) = res.leaf_views.last() { + decide_epoch_root(&decided_leaf_info.leaf, epoch_height, membership).await; + } else { + tracing::info!("No decided leaf while a view has been decided."); + } } res diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index fe11d69745..4a2f180669 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -14,7 +14,7 @@ use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposal2, VidDisperseShare2}, drb::{compute_drb_result, DrbResult}, - event::{Event, EventType, LeafInfo}, + event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_vote::{QuorumData2, QuorumVote2}, traits::{ @@ -43,53 +43,6 @@ use crate::{ quorum_vote::Versions, }; -/// Store the DRB result from the quorum proposal. -/// -/// Returns an error if receiving an inconsistent result. -async fn store_received_drb_result, V: Versions>( - epoch_number: TYPES::Epoch, - drb_result: DrbResult, - task_state: &mut QuorumVoteTaskState, -) -> Result<()> { - let mut exist = false; - if let Some(stored_result) = task_state - .consensus - .read() - .await - .drb_seeds_and_results - .results - .get(&epoch_number) - { - if drb_result == *stored_result { - return Ok(()); - } - exist = true; - } - - // If there exists an inconsistent result, remove it. - if exist { - task_state - .consensus - .write() - .await - .drb_seeds_and_results - .results - .remove(&epoch_number); - bail!("Inconsistent result with the storage."); - } - // Otherwise, store the result. - else { - task_state - .consensus - .write() - .await - .drb_seeds_and_results - .results - .insert(epoch_number, drb_result); - Ok(()) - } -} - /// Store the DRB result from the computation task to the shared `results` table. /// /// Returns the result if it exists. @@ -173,7 +126,6 @@ async fn verify_drb_result, V: Ver let membership_reader = task_state.membership.read().await; let has_stake_current_epoch = membership_reader.has_stake(&task_state.public_key, epoch); - let has_stake_next_epoch = membership_reader.has_stake(&task_state.public_key, epoch + 1); drop(membership_reader); @@ -181,15 +133,9 @@ async fn verify_drb_result, V: Ver let computed_result = store_and_get_computed_drb_result(epoch + 1, task_state).await?; ensure!(proposal_result == computed_result, warn!("Our calculated DRB result is {:?}, which does not match the proposed DRB result of {:?}", computed_result, proposal_result)); - - Ok(()) - } else if has_stake_next_epoch { - store_received_drb_result(epoch + 1, proposal_result, task_state).await - } else { - Err(error!( - "We are not participating in either the current or next epoch" - )) } + + Ok(()) } /// Start the DRB computation task for the next epoch. @@ -266,40 +212,41 @@ async fn start_drb_task, V: Versio } } -/// Store the DRB seed two epochs in advance and the computed DRB result for next epoch. +/// Store the DRB seed two epochs in advance and the computed or received DRB result for next +/// epoch. /// -/// We store the DRB seed and result if the decided block is the third from the last block in the -/// current epoch and for the former, if we are in the quorum committee of the next epoch. +/// We store a combination of the following data. +/// * The DRB seed two epochs in advance, if the third from the last block, i.e., the epoch root, +/// is decided and we are in the quorum committee of the next epoch. +/// * The computed result for the next epoch, if the third from the last block is decided. +/// * The received result for the next epoch, if the last block of the epoch is decided and we are +/// in the quorum committee of the committee of the next epoch. /// /// Special cases: /// * Epoch 0: No DRB computation since we'll transition to epoch 1 immediately. /// * Epoch 1 and 2: No computed DRB result since when we first start the computation in epoch 1, -/// the result is for epoch 3. +/// the result is for epoch 3. /// -/// We don't need to handle the special cases explicitly here, because the first proposal with -/// which we'll start the DRB computation is for epoch 3. +/// We don't need to handle the special cases explicitly here, because the first leaf with which +/// we'll start the DRB computation is for epoch 3. async fn store_drb_seed_and_result, V: Versions>( - proposal: &QuorumProposal2, task_state: &mut QuorumVoteTaskState, - leaf_views: &[LeafInfo], + decided_leaf: &Leaf2, ) -> Result<()> { - // This is never none if we've reached a new decide, so this is safe to unwrap. - let decided_block_number = leaf_views - .last() - .unwrap() - .leaf - .block_header() - .block_number(); + if task_state.epoch_height == 0 { + tracing::info!("Epoch height is 0, skipping DRB storage."); + return Ok(()); + } - // Skip if this is not the expected block. - if task_state.epoch_height != 0 && is_epoch_root(decided_block_number, task_state.epoch_height) - { - // Cancel old DRB computation tasks. - let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( - decided_block_number, - task_state.epoch_height, - )); + let decided_block_number = decided_leaf.block_header().block_number(); + let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( + decided_block_number, + task_state.epoch_height, + )); + // Skip storing the seed and computed result if this is not the epoch root. + if is_epoch_root(decided_block_number, task_state.epoch_height) { + // Cancel old DRB computation tasks. let mut consensus_writer = task_state.consensus.write().await; consensus_writer .drb_seeds_and_results @@ -310,28 +257,35 @@ async fn store_drb_seed_and_result // include in the proposal in the last block of this epoch. store_and_get_computed_drb_result(current_epoch_number + 1, task_state).await?; - // Skip if we are not in the committee of the next epoch. - if task_state - .membership - .read() + // Store the DRB seed input for the epoch after the next one. + let Ok(drb_seed_input_vec) = bincode::serialize(&decided_leaf.justify_qc().signatures) + else { + bail!("Failed to serialize the QC signature."); + }; + let Ok(drb_seed_input) = drb_seed_input_vec.try_into() else { + bail!("Failed to convert the serialized QC signature into a DRB seed input."); + }; + task_state + .consensus + .write() .await - .has_stake(&task_state.public_key, current_epoch_number + 1) - { - let new_epoch_number = current_epoch_number + 2; - let Ok(drb_seed_input_vec) = bincode::serialize(&proposal.justify_qc.signatures) else { - bail!("Failed to serialize the QC signature."); - }; - let Ok(drb_seed_input) = drb_seed_input_vec.try_into() else { - bail!("Failed to convert the serialized QC signature into a DRB seed input."); - }; - - // Store the DRB seed input for the epoch after the next one. + .drb_seeds_and_results + .store_seed(current_epoch_number + 2, drb_seed_input); + } + // Skip storing the received result if this is not the last block. + else if is_last_block_in_epoch(decided_block_number, task_state.epoch_height) { + if let Some(result) = decided_leaf.next_drb_result { + // We don't need to check value existence and consistency because it should be + // impossible to decide on a block with different DRB results. task_state .consensus .write() .await .drb_seeds_and_results - .store_seed(new_epoch_number, drb_seed_input); + .results + .insert(current_epoch_number + 1, result); + } else { + bail!("The last block of the epoch is decided but doesn't contain a DRB result."); } } Ok(()) @@ -460,7 +414,9 @@ pub(crate) async fn handle_quorum_proposal_validated< tracing::debug!("Successfully sent decide event"); if version >= V::Epochs::VERSION { - store_drb_seed_and_result(proposal, task_state, &leaf_views).await?; + // `leaf_views.last()` is never none if we've reached a new decide, so this is safe to + // unwrap. + store_drb_seed_and_result(task_state, &leaf_views.last().unwrap().leaf).await?; } } diff --git a/types/src/data.rs b/types/src/data.rs index fc0ece0b15..7d2d490cdd 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -728,6 +728,7 @@ impl From> for Leaf2 { upgrade_certificate: leaf.upgrade_certificate, block_payload: leaf.block_payload, view_change_evidence: None, + next_drb_result: None, } } } @@ -870,6 +871,13 @@ pub struct Leaf2 { /// Possible timeout or view sync certificate. If the `justify_qc` is not for a proposal in the immediately preceding view, then either a timeout or view sync certificate must be attached. pub view_change_evidence: Option>, + + /// The DRB result for the next epoch. + /// + /// This is required only for the last block of the epoch. Nodes will verify that it's + /// consistent with the result from their computations. + #[serde(with = "serde_bytes")] + pub next_drb_result: Option, } impl Leaf2 { @@ -922,6 +930,7 @@ impl Leaf2 { block_header: block_header.clone(), block_payload: Some(payload), view_change_evidence: None, + next_drb_result: None, } } /// Time when this leaf was created. @@ -1103,6 +1112,7 @@ impl PartialEq for Leaf2 { upgrade_certificate, block_payload: _, view_change_evidence, + next_drb_result, } = self; *view_number == other.view_number @@ -1112,6 +1122,7 @@ impl PartialEq for Leaf2 { && *block_header == other.block_header && *upgrade_certificate == other.upgrade_certificate && *view_change_evidence == other.view_change_evidence + && *next_drb_result == other.next_drb_result } } @@ -1475,7 +1486,7 @@ impl Leaf2 { block_header, upgrade_certificate, view_change_evidence, - next_drb_result: _, + next_drb_result, } = quorum_proposal; Self { @@ -1487,6 +1498,7 @@ impl Leaf2 { upgrade_certificate: upgrade_certificate.clone(), block_payload: None, view_change_evidence: view_change_evidence.clone(), + next_drb_result: *next_drb_result, } } } From a468f3fc44a6d3674486fa163de69fb17b28b5ce Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 14 Jan 2025 19:41:53 -0500 Subject: [PATCH 1359/1393] Add `epochs_enabled` method on upgrade lock (#4030) --- types/src/message.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/types/src/message.rs b/types/src/message.rs index 877cdd6282..de1fddf37d 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -557,6 +557,11 @@ impl UpgradeLock { } } + /// Return whether epochs are enabled in the given view + pub async fn epochs_enabled(&self, view: TYPES::View) -> bool { + self.version_infallible(view).await >= V::Epochs::VERSION + } + /// Serialize a message with a version number, using `message.view_number()` and an optional decided upgrade certificate to determine the message's version. /// /// # Errors From 8b2de36b2c9aa3263ba3af51a2842af37b86dd96 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Wed, 15 Jan 2025 10:23:33 +0100 Subject: [PATCH 1360/1393] Lr/epochs more tests (#4005) * Add more epoch tests * Do not fail if the new locked view is the same as the stored locked view * Add catchup test for epochs and retrieve stored VIDs after restart * Make sure that the decided view is greater than the old one when GCing * Fix test_with_failures_half_f * Revert "Do not fail if the new locked view is the same as the stored locked view" This reverts commit f4f37ef25ef8490629acbbff10a8da5baa16550f. * Revert "Make sure that the decided view is greater than the old one when GCing" This reverts commit 91af3abbc649b48f8beb44d26c58e09e702ca9e7. * Make sure the new view is greater than the old one before GCing * Return None instead of exiting with Error if no VID * Move epoch tests to a separate file --- example-types/src/state_types.rs | 2 +- example-types/src/storage_types.rs | 11 +- hotshot/src/lib.rs | 9 +- testing/src/spinning_task.rs | 2 + testing/tests/tests_1/test_with_failures_2.rs | 44 ---- .../tests_3/test_with_failures_half_f.rs | 41 +--- testing/tests/tests_6/test_epochs.rs | 227 +++++++++++++++++- types/src/consensus.rs | 12 +- 8 files changed, 251 insertions(+), 97 deletions(-) diff --git a/example-types/src/state_types.rs b/example-types/src/state_types.rs index 9b52e0c662..b30bc2603b 100644 --- a/example-types/src/state_types.rs +++ b/example-types/src/state_types.rs @@ -23,7 +23,7 @@ use rand::Rng; use serde::{Deserialize, Serialize}; use vbs::version::Version; -pub use crate::node_types::TestTypes; +pub use crate::node_types::{TestTwoStakeTablesTypes, TestTypes}; use crate::{ block_types::{TestBlockPayload, TestTransaction}, testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}, diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index b5f3610f21..49d44e9bb0 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -33,11 +33,11 @@ use jf_vid::VidScheme; use crate::testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}; -type VidShares = HashMap< +type VidShares = BTreeMap< ::View, HashMap<::SignatureKey, Proposal>>, >; -type VidShares2 = HashMap< +type VidShares2 = BTreeMap< ::View, HashMap<::SignatureKey, Proposal>>, >; @@ -61,8 +61,8 @@ pub struct TestStorageState { impl Default for TestStorageState { fn default() -> Self { Self { - vids: HashMap::new(), - vid2: HashMap::new(), + vids: BTreeMap::new(), + vid2: BTreeMap::new(), das: HashMap::new(), da2s: HashMap::new(), proposals: BTreeMap::new(), @@ -127,6 +127,9 @@ impl TestStorage { pub async fn last_actioned_epoch(&self) -> TYPES::Epoch { self.inner.read().await.epoch } + pub async fn vids_cloned(&self) -> VidShares2 { + self.inner.read().await.vid2.clone() + } } #[async_trait] diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index ea213a6338..e6faf52cab 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -47,7 +47,7 @@ use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; /// Reexport error type pub use hotshot_types::error::HotShotError; use hotshot_types::{ - consensus::{Consensus, ConsensusMetricsValue, OuterConsensus, View, ViewInner}, + consensus::{Consensus, ConsensusMetricsValue, OuterConsensus, VidShares, View, ViewInner}, constants::{EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE}, data::{Leaf2, QuorumProposal, QuorumProposal2}, event::{EventType, LeafInfo}, @@ -69,7 +69,6 @@ use hotshot_types::{ pub use rand; use tokio::{spawn, time::sleep}; use tracing::{debug, instrument, trace}; - // -- Rexports // External use crate::{ @@ -332,6 +331,7 @@ impl, V: Versions> SystemContext { undecided_state: BTreeMap>, /// Proposals we have sent out to provide to others for catchup saved_proposals: BTreeMap>>, + /// Saved VID shares + saved_vid_shares: Option>, } impl HotShotInitializer { @@ -1048,6 +1050,7 @@ impl HotShotInitializer { undecided_leaves: Vec::new(), undecided_state: BTreeMap::new(), instance_state, + saved_vid_shares: None, }) } @@ -1072,6 +1075,7 @@ impl HotShotInitializer { decided_upgrade_certificate: Option>, undecided_leaves: Vec>, undecided_state: BTreeMap>, + saved_vid_shares: Option>, ) -> Self { Self { inner: anchor_leaf, @@ -1087,6 +1091,7 @@ impl HotShotInitializer { decided_upgrade_certificate, undecided_leaves, undecided_state, + saved_vid_shares, } } } diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 66438d701d..fac9856950 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -166,6 +166,7 @@ where None, Vec::new(), BTreeMap::new(), + None, ); // We assign node's public key and stake value rather than read from config file since it's a test let validator_config = @@ -256,6 +257,7 @@ where read_storage.decided_upgrade_certificate().await, Vec::new(), BTreeMap::new(), + Some(read_storage.vids_cloned().await), ); // We assign node's public key and stake value rather than read from config file since it's a test let validator_config = ValidatorConfig::generated_from_seed_indexed( diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index cf9ac8c83f..aa4721e696 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -74,50 +74,6 @@ cross_tests!( } ); -cross_tests!( - TestName: test_with_failures_2_with_epochs, - Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], - Types: [TestTwoStakeTablesTypes], - Versions: [EpochsTestVersions], - Ignore: false, - Metadata: { - let mut metadata = TestDescription::default_more_nodes(); - metadata.num_nodes_with_stake = 12; - metadata.da_staked_committee_size = 12; - metadata.start_nodes = 12; - metadata.epoch_height = 10; - let dead_nodes = vec![ - ChangeNode { - idx: 10, - updown: NodeAction::Down, - }, - ChangeNode { - idx: 11, - updown: NodeAction::Down, - }, - ]; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)] - }; - - // 2 nodes fail triggering view sync, expect no other timeouts - metadata.overall_safety_properties.num_failed_views = 6; - // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 20; - metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ - (ViewNumber::new(5), false), - (ViewNumber::new(11), false), - (ViewNumber::new(17), false), - (ViewNumber::new(23), false), - (ViewNumber::new(29), false), - (ViewNumber::new(35), false), - ]); - - metadata - } -); - cross_tests!( TestName: test_with_double_leader_failures, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index 3dd8e3ceb7..b5fbdfd0a0 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -5,7 +5,7 @@ // along with the HotShot repository. If not, see . use hotshot_example_types::{ - node_types::{EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, TestVersions}, + node_types::{Libp2pImpl, MemoryImpl, PushCdnImpl, TestVersions}, state_types::TestTypes, }; use hotshot_macros::cross_tests; @@ -53,42 +53,3 @@ cross_tests!( metadata } ); -cross_tests!( - TestName: test_with_failures_half_f_epochs, - Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], - Versions: [EpochsTestVersions], - Ignore: false, - Metadata: { - let mut metadata = TestDescription::default_more_nodes(); - metadata.epoch_height = 0; - metadata.num_bootstrap_nodes = 17; - metadata.epoch_height = 10; - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. - let dead_nodes = vec![ - ChangeNode { - idx: 17, - updown: NodeAction::Down, - }, - ChangeNode { - idx: 18, - updown: NodeAction::Down, - }, - ChangeNode { - idx: 19, - updown: NodeAction::Down, - }, - ]; - - metadata.spinning_properties = SpinningTaskDescription { - node_changes: vec![(5, dead_nodes)] - }; - - metadata.overall_safety_properties.num_failed_views = 3; - // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts - metadata.overall_safety_properties.num_successful_views = 22; - metadata - } -); diff --git a/testing/tests/tests_6/test_epochs.rs b/testing/tests/tests_6/test_epochs.rs index c902483860..95d9142a61 100644 --- a/testing/tests/tests_6/test_epochs.rs +++ b/testing/tests/tests_6/test_epochs.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::time::Duration; +use std::{collections::HashMap, time::Duration}; use hotshot_example_types::{ node_types::{ @@ -18,10 +18,12 @@ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, - test_builder::TestDescription, + test_builder::{TestDescription, TimingData}, view_sync_task::ViewSyncTaskDescription, }; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; cross_tests!( TestName: test_success_with_epochs, @@ -238,3 +240,224 @@ cross_tests!( } }, ); + +cross_tests!( + TestName: test_with_failures_2_with_epochs, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default_more_nodes(); + metadata.num_nodes_with_stake = 12; + metadata.da_staked_committee_size = 12; + metadata.start_nodes = 12; + metadata.epoch_height = 10; + let dead_nodes = vec![ + ChangeNode { + idx: 10, + updown: NodeAction::Down, + }, + ChangeNode { + idx: 11, + updown: NodeAction::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)] + }; + + // 2 nodes fail triggering view sync, expect no other timeouts + metadata.overall_safety_properties.num_failed_views = 6; + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 20; + metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ + (ViewNumber::new(5), false), + (ViewNumber::new(11), false), + (ViewNumber::new(17), false), + (ViewNumber::new(23), false), + (ViewNumber::new(29), false), + (ViewNumber::new(35), false), + ]); + + metadata + } +); + +cross_tests!( + TestName: test_with_double_leader_failures_with_epochs, + Impls: [Libp2pImpl, PushCdnImpl, CombinedImpl], + Types: [TestConsecutiveLeaderTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default_more_nodes(); + metadata.num_nodes_with_stake = 12; + metadata.da_staked_committee_size = 12; + metadata.start_nodes = 12; + let dead_nodes = vec![ + ChangeNode { + idx: 5, + updown: NodeAction::Down, + }, + ]; + + // shutdown while node 5 is leader + // we want to trigger `ViewSyncTrigger` during epoch transition + // then ensure we do not fail again as next leader will be leader 2 views also + let view_spin_node_down = 9; + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(view_spin_node_down, dead_nodes)] + }; + + // node 5 is leader twice when we shut down + metadata.overall_safety_properties.num_failed_views = 2; + metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ + // next views after turning node off + (ViewNumber::new(view_spin_node_down + 1), false), + (ViewNumber::new(view_spin_node_down + 2), false) + ]); + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 13; + + // only turning off 1 node, so expected should be num_nodes_with_stake - 1 + let expected_nodes_in_view_sync = metadata.num_nodes_with_stake - 1; + metadata.view_sync_properties = ViewSyncTaskDescription::Threshold(expected_nodes_in_view_sync, expected_nodes_in_view_sync); + + metadata + } +); + +cross_tests!( + TestName: test_with_failures_half_f_epochs, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default_more_nodes(); + metadata.epoch_height = 10; + // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the + // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the + // following issue. + let dead_nodes = vec![ + ChangeNode { + idx: 17, + updown: NodeAction::Down, + }, + ChangeNode { + idx: 18, + updown: NodeAction::Down, + }, + ChangeNode { + idx: 19, + updown: NodeAction::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)] + }; + + metadata.overall_safety_properties.num_failed_views = 3; + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 19; + metadata + } +); + +cross_tests!( + TestName: test_with_failures_f_epochs, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default_more_nodes(); + metadata.overall_safety_properties.num_failed_views = 6; + // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts + metadata.overall_safety_properties.num_successful_views = 15; + let dead_nodes = vec![ + ChangeNode { + idx: 14, + updown: NodeAction::Down, + }, + ChangeNode { + idx: 15, + updown: NodeAction::Down, + }, + ChangeNode { + idx: 16, + updown: NodeAction::Down, + }, + ChangeNode { + idx: 17, + updown: NodeAction::Down, + }, + ChangeNode { + idx: 18, + updown: NodeAction::Down, + }, + ChangeNode { + idx: 19, + updown: NodeAction::Down, + }, + ]; + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, dead_nodes)] + }; + + metadata + } +); + +cross_tests!( + TestName: test_all_restart_epochs, + Impls: [CombinedImpl, PushCdnImpl], + Types: [TestTypes, TestTypesRandomizedLeader], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let timing_data = TimingData { + next_view_timeout: 2000, + ..Default::default() + }; + let mut metadata = TestDescription::default(); + let mut catchup_nodes = vec![]; + + for i in 0..20 { + catchup_nodes.push(ChangeNode { + idx: i, + updown: NodeAction::RestartDown(0), + }) + } + + metadata.timing_data = timing_data; + metadata.start_nodes = 20; + metadata.num_nodes_with_stake = 20; + + metadata.spinning_properties = SpinningTaskDescription { + // Restart all the nodes in view 10 + node_changes: vec![(10, catchup_nodes)], + }; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep committing rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 15, + ..Default::default() + }; + + metadata + }, +); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index d9978a2749..54a093be88 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -411,6 +411,7 @@ impl Consensus { #[allow(clippy::too_many_arguments)] pub fn new( validated_state_map: BTreeMap>, + vid_shares: Option>, cur_view: TYPES::View, cur_epoch: TYPES::Epoch, locked_view: TYPES::View, @@ -426,7 +427,7 @@ impl Consensus { ) -> Self { Consensus { validated_state_map, - vid_shares: BTreeMap::new(), + vid_shares: vid_shares.unwrap_or_default(), saved_da_certs: HashMap::new(), cur_view, cur_epoch, @@ -535,9 +536,8 @@ impl Consensus { }; let parent_vid = self .vid_shares() - .get(&parent_view_number)? - .get(public_key) - .cloned() + .get(&parent_view_number) + .and_then(|inner_map| inner_map.get(public_key).cloned()) .map(|prop| prop.data); Some(LeafInfo { @@ -861,6 +861,10 @@ impl Consensus { /// # Panics /// On inconsistent stored entries pub fn collect_garbage(&mut self, old_anchor_view: TYPES::View, new_anchor_view: TYPES::View) { + // Nothing to collect + if new_anchor_view <= old_anchor_view { + return; + } let gc_view = TYPES::View::new(new_anchor_view.saturating_sub(1)); // state check let anchor_entry = self From 5e92fbfe7467fd8cd350f3ed4e9121fcdab08bc6 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Thu, 16 Jan 2025 12:53:22 -0500 Subject: [PATCH 1361/1393] remove `external` message support (#4034) --- hotshot/src/types/handle.rs | 46 +------------- task-impls/src/network.rs | 18 +----- testing/tests/tests_1/network_task.rs | 92 --------------------------- types/src/event.rs | 8 --- types/src/message.rs | 3 - 5 files changed, 4 insertions(+), 163 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index d1d45473c1..368f7a937c 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -22,14 +22,11 @@ use hotshot_types::{ consensus::Consensus, data::{Leaf2, QuorumProposal2}, error::HotShotError, - message::{Message, MessageKind, Proposal, RecipientList}, + message::Proposal, request_response::ProposalRequestPayload, traits::{ - consensus_api::ConsensusApi, - election::Membership, - network::{BroadcastDelay, ConnectedNetwork, Topic}, - node_implementation::NodeType, - signature_key::SignatureKey, + consensus_api::ConsensusApi, election::Membership, network::ConnectedNetwork, + node_implementation::NodeType, signature_key::SignatureKey, }, vote::HasViewNumber, }; @@ -94,43 +91,6 @@ impl + 'static, V: Versions> self.output_event_stream.1.activate_cloned() } - /// Message other participants with a serialized message from the application - /// Receivers of this message will get an `Event::ExternalMessageReceived` via - /// the event stream. - /// - /// # Errors - /// Errors if serializing the request fails, or the request fails to be sent - pub async fn send_external_message( - &self, - msg: Vec, - recipients: RecipientList, - ) -> Result<()> { - let message = Message { - sender: self.public_key().clone(), - kind: MessageKind::External(msg), - }; - let serialized_message = self.hotshot.upgrade_lock.serialize(&message).await?; - - match recipients { - RecipientList::Broadcast => { - self.network - .broadcast_message(serialized_message, Topic::Global, BroadcastDelay::None) - .await?; - } - RecipientList::Direct(recipient) => { - self.network - .direct_message(serialized_message, recipient) - .await?; - } - RecipientList::Many(recipients) => { - self.network - .da_broadcast_message(serialized_message, recipients, BroadcastDelay::None) - .await?; - } - } - Ok(()) - } - /// Request a proposal from the all other nodes. Will block until some node /// returns a valid proposal with the requested commitment. If nobody has the /// proposal this will block forever diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 7d6664f39c..7da90e547b 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -17,7 +17,7 @@ use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, data::{VidDisperse, VidDisperseShare, VidDisperseShare2}, - event::{Event, EventType, HotShotAction}, + event::{Event, HotShotAction}, message::{ convert_proposal, DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, SequencingMessage, UpgradeLock, @@ -226,22 +226,6 @@ impl NetworkMessageTaskState { } } }, - - // Handle external messages - MessageKind::External(data) => { - if sender == self.public_key { - return; - } - // Send the external message to the external event stream so it can be processed - broadcast_event( - Event { - view_number: TYPES::View::new(1), - event: EventType::ExternalMessageReceived { sender, data }, - }, - &self.external_event_stream, - ) - .await; - } } } } diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index a23ae64cd5..94b2ac1dc2 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -110,98 +110,6 @@ async fn test_network_task() { )); } -#[cfg(test)] -#[tokio::test(flavor = "multi_thread")] -async fn test_network_external_mnessages() { - use hotshot::types::EventType; - use hotshot_testing::helpers::build_system_handle_from_launcher; - use hotshot_types::message::RecipientList; - - hotshot::helpers::initialize_logging(); - - let builder: TestDescription = - TestDescription::default_multiple_rounds(); - - let launcher = builder.gen_launcher(0); - - let mut handles = vec![]; - let mut event_streams = vec![]; - for i in 0..launcher.metadata.num_nodes_with_stake { - let handle = build_system_handle_from_launcher::( - i.try_into().unwrap(), - &launcher, - ) - .await - .0; - event_streams.push(handle.event_stream_known_impl()); - handles.push(handle); - } - - // Send a message from 1 -> 2 - handles[1] - .send_external_message(vec![1, 2], RecipientList::Direct(handles[2].public_key())) - .await - .unwrap(); - let event = tokio::time::timeout(Duration::from_millis(100), event_streams[2].recv()) - .await - .unwrap() - .unwrap() - .event; - - // check that 2 received the message - assert!(matches!( - event, - EventType::ExternalMessageReceived { - sender, - data, - } if sender == handles[1].public_key() && data == vec![1, 2] - )); - - // Send a message from 2 -> 1 - handles[2] - .send_external_message(vec![2, 1], RecipientList::Direct(handles[1].public_key())) - .await - .unwrap(); - let event = tokio::time::timeout(Duration::from_millis(100), event_streams[1].recv()) - .await - .unwrap() - .unwrap() - .event; - - // check that 1 received the message - assert!(matches!( - event, - EventType::ExternalMessageReceived { - sender, - data, - } if sender == handles[2].public_key() && data == vec![2,1] - )); - - // Check broadcast works - handles[0] - .send_external_message(vec![0, 0, 0], RecipientList::Broadcast) - .await - .unwrap(); - // All other nodes get the broadcast - for stream in event_streams.iter_mut().skip(1) { - let event = tokio::time::timeout(Duration::from_millis(100), stream.recv()) - .await - .unwrap() - .unwrap() - .event; - assert!(matches!( - event, - EventType::ExternalMessageReceived { - sender, - data, - } if sender == handles[0].public_key() && data == vec![0,0,0] - )); - } - // No event on 0 even after short sleep - tokio::time::sleep(Duration::from_millis(2)).await; - assert!(event_streams[0].is_empty()); -} - #[cfg(test)] #[tokio::test(flavor = "multi_thread")] async fn test_network_storage_fail() { diff --git a/types/src/event.rs b/types/src/event.rs index ae66da6d6d..1b90190693 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -172,14 +172,6 @@ pub enum EventType { /// Public key of the leader submitting the proposal sender: TYPES::SignatureKey, }, - - /// A message destined for external listeners was received - ExternalMessageReceived { - /// Public Key of the message sender - sender: TYPES::SignatureKey, - /// Serialized data of the message - data: Vec, - }, } #[derive(Debug, Serialize, Deserialize, Clone, Copy)] /// A list of actions that we track for nodes diff --git a/types/src/message.rs b/types/src/message.rs index de1fddf37d..ba62574830 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -123,8 +123,6 @@ pub enum MessageKind { Consensus(SequencingMessage), /// Messages relating to sharing data between nodes Data(DataMessage), - /// A (still serialized) message to be passed through to external listeners - External(Vec), } /// List of keys to send a message to, or broadcast to all known keys @@ -162,7 +160,6 @@ impl ViewMessage for MessageKind { ResponseMessage::Found(m) => m.view_number(), ResponseMessage::NotFound | ResponseMessage::Denied => TYPES::View::new(1), }, - MessageKind::External(_) => TYPES::View::new(1), } } } From ec0128815a84813e1abe712214e0cf29ad4c3dd8 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Fri, 17 Jan 2025 15:57:07 -0800 Subject: [PATCH 1362/1393] #3967 Option for everybody (#4007) * 3967 Option for everybody * First check if epochs enabled than divide by epoch_height --------- Co-authored-by: Lukasz Rzasik --- example-types/src/node_types.rs | 5 +- example-types/src/storage_types.rs | 51 ++- examples/infra/mod.rs | 11 +- hotshot/src/lib.rs | 53 +-- hotshot/src/tasks/mod.rs | 15 +- hotshot/src/tasks/task_state.rs | 3 +- .../traits/election/randomized_committee.rs | 34 +- .../election/randomized_committee_members.rs | 288 +++++++++------ .../src/traits/election/static_committee.rs | 34 +- .../static_committee_leader_two_views.rs | 34 +- .../traits/election/two_static_committees.rs | 50 ++- .../src/traits/networking/combined_network.rs | 2 +- .../src/traits/networking/libp2p_network.rs | 4 +- hotshot/src/types/handle.rs | 9 +- libp2p-networking/src/network/transport.rs | 10 +- task-impls/src/consensus/handlers.rs | 58 +-- task-impls/src/consensus/mod.rs | 25 +- task-impls/src/da.rs | 4 +- task-impls/src/events.rs | 37 +- task-impls/src/helpers.rs | 44 +-- task-impls/src/network.rs | 332 +++++++++++++----- task-impls/src/quorum_proposal/handlers.rs | 51 +-- task-impls/src/quorum_proposal/mod.rs | 19 +- .../src/quorum_proposal_recv/handlers.rs | 40 ++- task-impls/src/quorum_proposal_recv/mod.rs | 4 +- task-impls/src/quorum_vote/handlers.rs | 71 ++-- task-impls/src/quorum_vote/mod.rs | 46 +-- task-impls/src/request.rs | 15 +- task-impls/src/response.rs | 6 +- task-impls/src/transactions.rs | 26 +- task-impls/src/upgrade.rs | 4 +- task-impls/src/vid.rs | 42 ++- task-impls/src/view_sync.rs | 8 +- task-impls/src/vote_collection.rs | 35 +- testing/src/byzantine/byzantine_behaviour.rs | 16 +- testing/src/helpers.rs | 22 +- testing/src/overall_safety_task.rs | 21 +- testing/src/predicates/event.rs | 4 +- testing/src/spinning_task.rs | 7 +- testing/src/test_runner.rs | 7 +- testing/src/test_task.rs | 3 +- testing/src/view_generator.rs | 77 ++-- testing/tests/tests_1/da_task.rs | 36 +- testing/tests/tests_1/message.rs | 14 +- testing/tests/tests_1/network_task.rs | 10 +- .../tests_1/quorum_proposal_recv_task.rs | 10 +- testing/tests/tests_1/quorum_proposal_task.rs | 109 +++--- testing/tests/tests_1/quorum_vote_task.rs | 10 +- testing/tests/tests_1/transaction_task.rs | 13 +- .../tests_1/upgrade_task_with_proposal.rs | 23 +- .../tests/tests_1/upgrade_task_with_vote.rs | 10 +- testing/tests/tests_1/vid_task.rs | 26 +- testing/tests/tests_1/view_sync_task.rs | 20 +- .../tests/tests_1/vote_dependency_handle.rs | 6 +- testing/tests/tests_3/byzantine_tests.rs | 4 +- types/src/consensus.rs | 38 +- types/src/data.rs | 167 +++++++-- types/src/event.rs | 4 +- types/src/message.rs | 51 ++- types/src/simple_certificate.rs | 46 +-- types/src/simple_vote.rs | 46 +-- types/src/traits/election.rs | 40 ++- types/src/traits/network.rs | 2 +- types/src/traits/node_implementation.rs | 2 + types/src/traits/storage.rs | 12 +- types/src/utils.rs | 41 ++- types/src/vote.rs | 10 +- 67 files changed, 1478 insertions(+), 899 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 0b1a0989c1..e26fa788af 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -320,7 +320,6 @@ impl Versions for EpochUpgradeTestVersions { mod tests { use committable::{Commitment, Committable}; use hotshot_types::{ - data::EpochNumber, impl_has_epoch, message::UpgradeLock, simple_vote::{HasEpoch, VersionedVoteData}, @@ -333,7 +332,7 @@ mod tests { /// Dummy data used for test struct TestData { data: u64, - epoch: TYPES::Epoch, + epoch: Option, } impl Committable for TestData { @@ -353,7 +352,7 @@ mod tests { let data = TestData { data: 10, - epoch: EpochNumber::new(0), + epoch: None, }; let view_0 = ::View::new(0); diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index 49d44e9bb0..c44d500d12 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -15,8 +15,8 @@ use async_trait::async_trait; use hotshot_types::{ consensus::CommitmentMap, data::{ - DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare, - VidDisperseShare2, + DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, + QuorumProposalWrapper, VidDisperseShare, VidDisperseShare2, }, event::HotShotAction, message::Proposal, @@ -50,12 +50,13 @@ pub struct TestStorageState { da2s: HashMap>>, proposals: BTreeMap>>, proposals2: BTreeMap>>, + proposals_wrapper: BTreeMap>>, high_qc: Option>, high_qc2: Option>, next_epoch_high_qc2: Option>, action: TYPES::View, - epoch: TYPES::Epoch, + epoch: Option, } impl Default for TestStorageState { @@ -67,11 +68,12 @@ impl Default for TestStorageState { da2s: HashMap::new(), proposals: BTreeMap::new(), proposals2: BTreeMap::new(), + proposals_wrapper: BTreeMap::new(), high_qc: None, next_epoch_high_qc2: None, high_qc2: None, action: TYPES::View::genesis(), - epoch: TYPES::Epoch::genesis(), + epoch: None, } } } @@ -109,22 +111,27 @@ impl TestableDelay for TestStorage { impl TestStorage { pub async fn proposals_cloned( &self, - ) -> BTreeMap>> { - self.inner.read().await.proposals2.clone() + ) -> BTreeMap>> { + self.inner.read().await.proposals_wrapper.clone() } + pub async fn high_qc_cloned(&self) -> Option> { self.inner.read().await.high_qc2.clone() } + pub async fn next_epoch_high_qc_cloned(&self) -> Option> { self.inner.read().await.next_epoch_high_qc2.clone() } + pub async fn decided_upgrade_certificate(&self) -> Option> { self.decided_upgrade_certificate.read().await.clone() } + pub async fn last_actioned_view(&self) -> TYPES::View { self.inner.read().await.action } - pub async fn last_actioned_epoch(&self) -> TYPES::Epoch { + + pub async fn last_actioned_epoch(&self) -> Option { self.inner.read().await.epoch } pub async fn vids_cloned(&self) -> VidShares2 { @@ -171,7 +178,7 @@ impl Storage for TestStorage { _vid_commit: ::Commit, ) -> Result<()> { if self.should_return_err { - bail!("Failed to append VID proposal to storage"); + bail!("Failed to append DA proposal to storage"); } Self::run_delay_settings_from_config(&self.delay_config).await; let mut inner = self.inner.write().await; @@ -180,13 +187,14 @@ impl Storage for TestStorage { .insert(proposal.data.view_number, proposal.clone()); Ok(()) } + async fn append_da2( &self, proposal: &Proposal>, _vid_commit: ::Commit, ) -> Result<()> { if self.should_return_err { - bail!("Failed to append VID proposal to storage"); + bail!("Failed to append DA proposal (2) to storage"); } Self::run_delay_settings_from_config(&self.delay_config).await; let mut inner = self.inner.write().await; @@ -195,12 +203,13 @@ impl Storage for TestStorage { .insert(proposal.data.view_number, proposal.clone()); Ok(()) } + async fn append_proposal( &self, proposal: &Proposal>, ) -> Result<()> { if self.should_return_err { - bail!("Failed to append VID proposal to storage"); + bail!("Failed to append Quorum proposal (1) to storage"); } Self::run_delay_settings_from_config(&self.delay_config).await; let mut inner = self.inner.write().await; @@ -209,12 +218,13 @@ impl Storage for TestStorage { .insert(proposal.data.view_number, proposal.clone()); Ok(()) } + async fn append_proposal2( &self, proposal: &Proposal>, ) -> Result<()> { if self.should_return_err { - bail!("Failed to append VID proposal to storage"); + bail!("Failed to append Quorum proposal (2) to storage"); } Self::run_delay_settings_from_config(&self.delay_config).await; let mut inner = self.inner.write().await; @@ -224,6 +234,21 @@ impl Storage for TestStorage { Ok(()) } + async fn append_proposal_wrapper( + &self, + proposal: &Proposal>, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to append Quorum proposal (wrapped) to storage"); + } + Self::run_delay_settings_from_config(&self.delay_config).await; + let mut inner = self.inner.write().await; + inner + .proposals_wrapper + .insert(proposal.data.view_number(), proposal.clone()); + Ok(()) + } + async fn record_action( &self, view: ::View, @@ -277,6 +302,7 @@ impl Storage for TestStorage { } Ok(()) } + async fn update_next_epoch_high_qc2( &self, new_next_epoch_high_qc: hotshot_types::simple_certificate::NextEpochQuorumCertificate2< @@ -297,6 +323,7 @@ impl Storage for TestStorage { } Ok(()) } + async fn update_undecided_state( &self, _leaves: CommitmentMap>, @@ -308,6 +335,7 @@ impl Storage for TestStorage { Self::run_delay_settings_from_config(&self.delay_config).await; Ok(()) } + async fn update_undecided_state2( &self, _leaves: CommitmentMap>, @@ -319,6 +347,7 @@ impl Storage for TestStorage { Self::run_delay_settings_from_config(&self.delay_config).await; Ok(()) } + async fn update_decided_upgrade_certificate( &self, decided_upgrade_certificate: Option>, diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 85f161fffd..3d31884d81 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -60,6 +60,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType, Versions}, states::TestableState, }, + utils::genesis_epoch_from_version, HotShotConfig, PeerConfig, ValidatorConfig, }; use libp2p_networking::network::{ @@ -524,11 +525,15 @@ pub trait RunDa< .memberships .read() .await - .committee_leaders(TYPES::View::genesis(), TYPES::Epoch::genesis()) + .committee_leaders( + TYPES::View::genesis(), + genesis_epoch_from_version::(), + ) .len(); let consensus_lock = context.hotshot.consensus(); - let consensus = consensus_lock.read().await; - let total_num_views = usize::try_from(consensus.locked_view().u64()).unwrap(); + let consensus_reader = consensus_lock.read().await; + let total_num_views = usize::try_from(consensus_reader.locked_view().u64()).unwrap(); + drop(consensus_reader); // `failed_num_views` could include uncommitted views let failed_num_views = total_num_views - num_successful_commits; // When posting to the orchestrator, note that the total number of views also include un-finalized views. diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e6faf52cab..61f652477c 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -26,6 +26,7 @@ pub mod traits; pub mod types; pub mod tasks; +use hotshot_types::data::QuorumProposalWrapper; /// Contains helper functions for the crate pub mod helpers; @@ -62,13 +63,14 @@ use hotshot_types::{ states::ValidatedState, storage::Storage, }, - utils::epoch_from_block_number, + utils::{genesis_epoch_from_version, option_epoch_from_block_number}, HotShotConfig, }; /// Reexport rand crate pub use rand; use tokio::{spawn, time::sleep}; use tracing::{debug, instrument, trace}; + // -- Rexports // External use crate::{ @@ -121,7 +123,7 @@ pub struct SystemContext, V: Versi start_view: TYPES::View, /// The epoch to enter when first starting consensus - start_epoch: TYPES::Epoch, + start_epoch: Option, /// Access to the output event stream. output_event_stream: (Sender>, InactiveReceiver>), @@ -235,6 +237,7 @@ impl, V: Versions> SystemContext`] with the given configuration options. @@ -245,7 +248,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, @@ -290,10 +293,15 @@ impl, V: Versions> SystemContext( + upgrade_lock + .epochs_enabled(anchored_leaf.view_number()) + .await, anchored_leaf.height(), config.epoch_height, - )); + ); + // Insert the validated state to state map. let mut validated_state_map = BTreeMap::default(); validated_state_map.insert( @@ -322,18 +330,11 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext { /// Starting view number that should be equivalent to the view the node shut down with last. start_view: TYPES::View, + /// Starting epoch number that should be equivalent to the epoch the node shut down with last. - start_epoch: TYPES::Epoch, + start_epoch: Option, + /// The view we last performed an action in. An action is Proposing or voting for /// Either the quorum or DA. actioned_view: TYPES::View, + /// Highest QC that was seen, for genesis it's the genesis QC. It should be for a view greater /// than `inner`s view number for the non genesis case because we must have seen higher QCs /// to decide on the leaf. high_qc: QuorumCertificate2, + /// Next epoch highest QC that was seen. This is needed to propose during epoch transition after restart. next_epoch_high_qc: Option>, + /// Previously decided upgrade certificate; this is necessary if an upgrade has happened and we are not restarting with the new version decided_upgrade_certificate: Option>, + /// Undecided leaves that were seen, but not yet decided on. These allow a restarting node /// to vote and propose right away if they didn't miss anything while down. undecided_leaves: Vec>, + /// Not yet decided state undecided_state: BTreeMap>, + /// Proposals we have sent out to provide to others for catchup - saved_proposals: BTreeMap>>, + saved_proposals: BTreeMap>>, + /// Saved VID shares saved_vid_shares: Option>, } @@ -1037,11 +1050,11 @@ impl HotShotInitializer { let high_qc = QuorumCertificate2::genesis::(&validated_state, &instance_state).await; Ok(Self { - inner: Leaf2::genesis(&validated_state, &instance_state).await, + inner: Leaf2::genesis::(&validated_state, &instance_state).await, validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::View::new(0), - start_epoch: TYPES::Epoch::new(0), + start_epoch: genesis_epoch_from_version::(), actioned_view: TYPES::View::new(0), saved_proposals: BTreeMap::new(), high_qc, @@ -1067,9 +1080,9 @@ impl HotShotInitializer { instance_state: TYPES::InstanceState, validated_state: Option>, start_view: TYPES::View, - start_epoch: TYPES::Epoch, + start_epoch: Option, actioned_view: TYPES::View, - saved_proposals: BTreeMap>>, + saved_proposals: BTreeMap>>, high_qc: QuorumCertificate2, next_epoch_high_qc: Option>, decided_upgrade_certificate: Option>, diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index d32a671b94..00ce5efb2c 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -44,8 +44,8 @@ use tokio::{spawn, time::sleep}; use vbs::version::StaticVersionType; use crate::{ - tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, - ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, + genesis_epoch_from_version, tasks::task_state::CreateTaskState, types::SystemContextHandle, + ConsensusApi, ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, MarketplaceConfig, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, }; @@ -128,15 +128,16 @@ pub fn add_network_message_task< handle: &mut SystemContextHandle, channel: &Arc, ) { - let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { + let upgrade_lock = handle.hotshot.upgrade_lock.clone(); + + let network_state: NetworkMessageTaskState = NetworkMessageTaskState { internal_event_stream: handle.internal_event_stream.0.clone(), external_event_stream: handle.output_event_stream.0.clone(), public_key: handle.public_key().clone(), transactions_cache: lru::LruCache::new(NonZeroUsize::new(100_000).unwrap()), + upgrade_lock: upgrade_lock.clone(), }; - let upgrade_lock = handle.hotshot.upgrade_lock.clone(); - let network = Arc::clone(channel); let mut state = network_state.clone(); let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); @@ -197,7 +198,7 @@ pub fn add_network_event_task< let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, view: TYPES::View::genesis(), - epoch: TYPES::Epoch::genesis(), + epoch: genesis_epoch_from_version::(), membership, storage: Arc::clone(&handle.storage()), consensus: OuterConsensus::new(handle.consensus()), @@ -217,7 +218,7 @@ pub async fn add_consensus_tasks, handle: &mut SystemContextHandle, ) { handle.add_task(ViewSyncTaskState::::create_from(handle).await); - handle.add_task(VidTaskState::::create_from(handle).await); + handle.add_task(VidTaskState::::create_from(handle).await); handle.add_task(DaTaskState::::create_from(handle).await); handle.add_task(TransactionTaskState::::create_from(handle).await); diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index 9023722a66..ef8e811a5f 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -114,7 +114,7 @@ impl, V: Versions> CreateTaskState #[async_trait] impl, V: Versions> CreateTaskState - for VidTaskState + for VidTaskState { async fn create_from(handle: &SystemContextHandle) -> Self { Self { @@ -126,6 +126,7 @@ impl, V: Versions> CreateTaskState public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, + upgrade_lock: handle.hotshot.upgrade_lock.clone(), epoch_height: handle.epoch_height, } } diff --git a/hotshot/src/traits/election/randomized_committee.rs b/hotshot/src/traits/election/randomized_committee.rs index 4046123553..80d9185841 100644 --- a/hotshot/src/traits/election/randomized_committee.rs +++ b/hotshot/src/traits/election/randomized_committee.rs @@ -104,7 +104,7 @@ impl Membership for RandomizedCommittee { /// Get the stake table for the current view fn stake_table( &self, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -112,7 +112,7 @@ impl Membership for RandomizedCommittee { /// Get the stake table for the current view fn da_stake_table( &self, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.da_stake_table.clone() } @@ -121,7 +121,7 @@ impl Membership for RandomizedCommittee { fn committee_members( &self, _view_number: ::View, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -133,7 +133,7 @@ impl Membership for RandomizedCommittee { fn da_committee_members( &self, _view_number: ::View, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { self.da_stake_table .iter() @@ -145,7 +145,7 @@ impl Membership for RandomizedCommittee { fn committee_leaders( &self, _view_number: ::View, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -157,7 +157,7 @@ impl Membership for RandomizedCommittee { fn stake( &self, pub_key: &::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() @@ -167,7 +167,7 @@ impl Membership for RandomizedCommittee { fn da_stake( &self, pub_key: &::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_da_stake_table.get(pub_key).cloned() @@ -177,7 +177,7 @@ impl Membership for RandomizedCommittee { fn has_stake( &self, pub_key: &::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> bool { self.indexed_stake_table .get(pub_key) @@ -188,7 +188,7 @@ impl Membership for RandomizedCommittee { fn has_da_stake( &self, pub_key: &::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> bool { self.indexed_da_stake_table .get(pub_key) @@ -203,8 +203,8 @@ impl Membership for RandomizedCommittee { /// Index the vector of public keys with the current view number fn lookup_leader( &self, - view_number: TYPES::View, - _epoch: ::Epoch, + view_number: ::View, + _epoch: Option<::Epoch>, ) -> Result { let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); @@ -218,30 +218,30 @@ impl Membership for RandomizedCommittee { } /// Get the total number of nodes in the committee - fn total_nodes(&self, _epoch: ::Epoch) -> usize { + fn total_nodes(&self, _epoch: Option<::Epoch>) -> usize { self.stake_table.len() } /// Get the total number of nodes in the committee - fn da_total_nodes(&self, _epoch: ::Epoch) -> usize { + fn da_total_nodes(&self, _epoch: Option<::Epoch>) -> usize { self.da_stake_table.len() } /// Get the voting success threshold for the committee - fn success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + fn success_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + fn da_success_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + fn failure_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + fn upgrade_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { NonZeroU64::new(max( (self.stake_table.len() as u64 * 9) / 10, ((self.stake_table.len() as u64 * 2) / 3) + 1, diff --git a/hotshot/src/traits/election/randomized_committee_members.rs b/hotshot/src/traits/election/randomized_committee_members.rs index 5c85ad9c07..f5a60641e4 100644 --- a/hotshot/src/traits/election/randomized_committee_members.rs +++ b/hotshot/src/traits/election/randomized_committee_members.rs @@ -127,68 +127,90 @@ impl Membership /// Get the stake table for the current view fn stake_table( &self, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { - let filter = self.make_quorum_filter(epoch); - //self.stake_table.clone()s - self.stake_table - .iter() - .enumerate() - .filter(|(idx, _)| filter.contains(idx)) - .map(|(_, v)| v.clone()) - .collect() + if let Some(epoch) = epoch { + let filter = self.make_quorum_filter(epoch); + //self.stake_table.clone()s + self.stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| v.clone()) + .collect() + } else { + self.stake_table.clone() + } } /// Get the da stake table for the current view fn da_stake_table( &self, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { - let filter = self.make_da_quorum_filter(epoch); - //self.stake_table.clone()s - self.da_stake_table - .iter() - .enumerate() - .filter(|(idx, _)| filter.contains(idx)) - .map(|(_, v)| v.clone()) - .collect() + if let Some(epoch) = epoch { + let filter = self.make_da_quorum_filter(epoch); + //self.stake_table.clone()s + self.da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| v.clone()) + .collect() + } else { + self.da_stake_table.clone() + } } /// Get all members of the committee for the current view fn committee_members( &self, _view_number: ::View, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> BTreeSet<::SignatureKey> { - let filter = self.make_quorum_filter(epoch); - self.stake_table - .iter() - .enumerate() - .filter(|(idx, _)| filter.contains(idx)) - .map(|(_, v)| TYPES::SignatureKey::public_key(v)) - .collect() + if let Some(epoch) = epoch { + let filter = self.make_quorum_filter(epoch); + self.stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect() + } else { + self.stake_table + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } } /// Get all members of the committee for the current view fn da_committee_members( &self, _view_number: ::View, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> BTreeSet<::SignatureKey> { - let filter = self.make_da_quorum_filter(epoch); - self.da_stake_table - .iter() - .enumerate() - .filter(|(idx, _)| filter.contains(idx)) - .map(|(_, v)| TYPES::SignatureKey::public_key(v)) - .collect() + if let Some(epoch) = epoch { + let filter = self.make_da_quorum_filter(epoch); + self.da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect() + } else { + self.da_stake_table + .iter() + .map(TYPES::SignatureKey::public_key) + .collect() + } } /// Get all eligible leaders of the committee for the current view fn committee_leaders( &self, view_number: ::View, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> BTreeSet<::SignatureKey> { self.committee_members(view_number, epoch) } @@ -197,23 +219,27 @@ impl Membership fn stake( &self, pub_key: &::SignatureKey, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> Option<::StakeTableEntry> { - let filter = self.make_quorum_filter(epoch); - let actual_members: BTreeSet<_> = self - .stake_table - .iter() - .enumerate() - .filter(|(idx, _)| filter.contains(idx)) - .map(|(_, v)| TYPES::SignatureKey::public_key(v)) - .collect(); + if let Some(epoch) = epoch { + let filter = self.make_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); - if actual_members.contains(pub_key) { - // Only return the stake if it is above zero - self.indexed_stake_table.get(pub_key).cloned() + if actual_members.contains(pub_key) { + // Only return the stake if it is above zero + self.indexed_stake_table.get(pub_key).cloned() + } else { + // Skip members which aren't included based on the quorum filter + None + } } else { - // Skip members which aren't included based on the quorum filter - None + self.indexed_stake_table.get(pub_key).cloned() } } @@ -221,23 +247,27 @@ impl Membership fn da_stake( &self, pub_key: &::SignatureKey, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> Option<::StakeTableEntry> { - let filter = self.make_da_quorum_filter(epoch); - let actual_members: BTreeSet<_> = self - .da_stake_table - .iter() - .enumerate() - .filter(|(idx, _)| filter.contains(idx)) - .map(|(_, v)| TYPES::SignatureKey::public_key(v)) - .collect(); + if let Some(epoch) = epoch { + let filter = self.make_da_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); - if actual_members.contains(pub_key) { - // Only return the stake if it is above zero - self.indexed_da_stake_table.get(pub_key).cloned() + if actual_members.contains(pub_key) { + // Only return the stake if it is above zero + self.indexed_da_stake_table.get(pub_key).cloned() + } else { + // Skip members which aren't included based on the quorum filter + None + } } else { - // Skip members which aren't included based on the quorum filter - None + self.indexed_da_stake_table.get(pub_key).cloned() } } @@ -245,24 +275,30 @@ impl Membership fn has_stake( &self, pub_key: &::SignatureKey, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> bool { - let filter = self.make_quorum_filter(epoch); - let actual_members: BTreeSet<_> = self - .stake_table - .iter() - .enumerate() - .filter(|(idx, _)| filter.contains(idx)) - .map(|(_, v)| TYPES::SignatureKey::public_key(v)) - .collect(); + if let Some(epoch) = epoch { + let filter = self.make_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); - if actual_members.contains(pub_key) { + if actual_members.contains(pub_key) { + self.indexed_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } else { + // Skip members which aren't included based on the quorum filter + false + } + } else { self.indexed_stake_table .get(pub_key) .is_some_and(|x| x.stake() > U256::zero()) - } else { - // Skip members which aren't included based on the quorum filter - false } } @@ -270,83 +306,109 @@ impl Membership fn has_da_stake( &self, pub_key: &::SignatureKey, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> bool { - let filter = self.make_da_quorum_filter(epoch); - let actual_members: BTreeSet<_> = self - .da_stake_table - .iter() - .enumerate() - .filter(|(idx, _)| filter.contains(idx)) - .map(|(_, v)| TYPES::SignatureKey::public_key(v)) - .collect(); + if let Some(epoch) = epoch { + let filter = self.make_da_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); - if actual_members.contains(pub_key) { + if actual_members.contains(pub_key) { + self.indexed_da_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } else { + // Skip members which aren't included based on the quorum filter + false + } + } else { self.indexed_da_stake_table .get(pub_key) .is_some_and(|x| x.stake() > U256::zero()) - } else { - // Skip members which aren't included based on the quorum filter - false } } /// Index the vector of public keys with the current view number fn lookup_leader( &self, - view_number: TYPES::View, - epoch: ::Epoch, + view_number: ::View, + epoch: Option<::Epoch>, ) -> Result { - let filter = self.make_quorum_filter(epoch); - let leader_vec: Vec<_> = self - .stake_table - .iter() - .enumerate() - .filter(|(idx, _)| filter.contains(idx)) - .map(|(_, v)| v.clone()) - .collect(); + if let Some(epoch) = epoch { + let filter = self.make_quorum_filter(epoch); + let leader_vec: Vec<_> = self + .stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| v.clone()) + .collect(); + + let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); + + let randomized_view_number: u64 = rng.gen_range(0..=u64::MAX); + #[allow(clippy::cast_possible_truncation)] + let index = randomized_view_number as usize % leader_vec.len(); - let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); + let res = leader_vec[index].clone(); - let randomized_view_number: u64 = rng.gen_range(0..=u64::MAX); - #[allow(clippy::cast_possible_truncation)] - let index = randomized_view_number as usize % leader_vec.len(); + Ok(TYPES::SignatureKey::public_key(&res)) + } else { + let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); + + let randomized_view_number: u64 = rng.gen_range(0..=u64::MAX); + #[allow(clippy::cast_possible_truncation)] + let index = randomized_view_number as usize % self.eligible_leaders.len(); - let res = leader_vec[index].clone(); + let res = self.eligible_leaders[index].clone(); - Ok(TYPES::SignatureKey::public_key(&res)) + Ok(TYPES::SignatureKey::public_key(&res)) + } } /// Get the total number of nodes in the committee - fn total_nodes(&self, epoch: ::Epoch) -> usize { - self.make_quorum_filter(epoch).len() + fn total_nodes(&self, epoch: Option<::Epoch>) -> usize { + if let Some(epoch) = epoch { + self.make_quorum_filter(epoch).len() + } else { + self.stake_table.len() + } } /// Get the total number of nodes in the committee - fn da_total_nodes(&self, epoch: ::Epoch) -> usize { - self.make_da_quorum_filter(epoch).len() + fn da_total_nodes(&self, epoch: Option<::Epoch>) -> usize { + if let Some(epoch) = epoch { + self.make_da_quorum_filter(epoch).len() + } else { + self.da_stake_table.len() + } } /// Get the voting success threshold for the committee - fn success_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + fn success_threshold(&self, epoch: Option<::Epoch>) -> NonZeroU64 { let len = self.total_nodes(epoch); NonZeroU64::new(((len as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + fn da_success_threshold(&self, epoch: Option<::Epoch>) -> NonZeroU64 { let len = self.da_total_nodes(epoch); NonZeroU64::new(((len as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + fn failure_threshold(&self, epoch: Option<::Epoch>) -> NonZeroU64 { let len = self.total_nodes(epoch); NonZeroU64::new(((len as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + fn upgrade_threshold(&self, epoch: Option<::Epoch>) -> NonZeroU64 { let len = self.total_nodes(epoch); NonZeroU64::new(max((len as u64 * 9) / 10, ((len as u64 * 2) / 3) + 1)).unwrap() } diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index b6010c174d..deae814155 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -101,7 +101,7 @@ impl Membership for StaticCommittee { /// Get the stake table for the current view fn stake_table( &self, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -109,7 +109,7 @@ impl Membership for StaticCommittee { /// Get the stake table for the current view fn da_stake_table( &self, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.da_stake_table.clone() } @@ -118,7 +118,7 @@ impl Membership for StaticCommittee { fn committee_members( &self, _view_number: ::View, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -130,7 +130,7 @@ impl Membership for StaticCommittee { fn da_committee_members( &self, _view_number: ::View, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { self.da_stake_table .iter() @@ -142,7 +142,7 @@ impl Membership for StaticCommittee { fn committee_leaders( &self, _view_number: ::View, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -154,7 +154,7 @@ impl Membership for StaticCommittee { fn stake( &self, pub_key: &::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() @@ -164,7 +164,7 @@ impl Membership for StaticCommittee { fn da_stake( &self, pub_key: &::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_da_stake_table.get(pub_key).cloned() @@ -174,7 +174,7 @@ impl Membership for StaticCommittee { fn has_stake( &self, pub_key: &::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> bool { self.indexed_stake_table .get(pub_key) @@ -185,7 +185,7 @@ impl Membership for StaticCommittee { fn has_da_stake( &self, pub_key: &::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> bool { self.indexed_da_stake_table .get(pub_key) @@ -195,8 +195,8 @@ impl Membership for StaticCommittee { /// Index the vector of public keys with the current view number fn lookup_leader( &self, - view_number: TYPES::View, - _epoch: ::Epoch, + view_number: ::View, + _epoch: Option<::Epoch>, ) -> Result { #[allow(clippy::cast_possible_truncation)] let index = *view_number as usize % self.eligible_leaders.len(); @@ -205,32 +205,32 @@ impl Membership for StaticCommittee { } /// Get the total number of nodes in the committee - fn total_nodes(&self, _epoch: ::Epoch) -> usize { + fn total_nodes(&self, _epoch: Option<::Epoch>) -> usize { self.stake_table.len() } /// Get the total number of DA nodes in the committee - fn da_total_nodes(&self, _epoch: ::Epoch) -> usize { + fn da_total_nodes(&self, _epoch: Option<::Epoch>) -> usize { self.da_stake_table.len() } /// Get the voting success threshold for the committee - fn success_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { + fn success_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { + fn da_success_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { + fn failure_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { + fn upgrade_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { let len = self.stake_table.len(); NonZeroU64::new(max((len as u64 * 9) / 10, ((len as u64 * 2) / 3) + 1)).unwrap() } diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index d2635cc273..60f98684b9 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -102,7 +102,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch, + _epoch: Option<::Epoch>, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -110,7 +110,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch, + _epoch: Option<::Epoch>, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.da_stake_table.clone() } @@ -119,7 +119,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::View, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -131,7 +131,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::View, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { self.da_stake_table .iter() @@ -143,7 +143,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::View, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -155,7 +155,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() @@ -165,7 +165,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_da_stake_table.get(pub_key).cloned() @@ -175,7 +175,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> bool { self.indexed_stake_table .get(pub_key) @@ -186,7 +186,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::SignatureKey, - _epoch: ::Epoch, + _epoch: Option<::Epoch>, ) -> bool { self.indexed_da_stake_table .get(pub_key) @@ -196,8 +196,8 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch, + view_number: ::View, + _epoch: Option<::Epoch>, ) -> Result { let index = usize::try_from((*view_number / 2) % self.eligible_leaders.len() as u64).unwrap(); @@ -207,32 +207,32 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch) -> usize { + fn total_nodes(&self, _epoch: Option<::Epoch>) -> usize { self.stake_table.len() } /// Get the total number of DA nodes in the committee - fn da_total_nodes(&self, _epoch: ::Epoch) -> usize { + fn da_total_nodes(&self, _epoch: Option<::Epoch>) -> usize { self.da_stake_table.len() } /// Get the voting success threshold for the committee - fn success_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { + fn success_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { + fn da_success_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { + fn failure_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self, _epoch: TYPES::Epoch) -> NonZeroU64 { + fn upgrade_threshold(&self, _epoch: Option<::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 9) / 10) + 1).unwrap() } } diff --git a/hotshot/src/traits/election/two_static_committees.rs b/hotshot/src/traits/election/two_static_committees.rs index 25af69905c..f6d5642d9f 100644 --- a/hotshot/src/traits/election/two_static_committees.rs +++ b/hotshot/src/traits/election/two_static_committees.rs @@ -178,8 +178,9 @@ impl Membership for TwoStaticCommittees { /// Get the stake table for the current view fn stake_table( &self, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { self.stake_table.0.clone() } else { @@ -190,8 +191,9 @@ impl Membership for TwoStaticCommittees { /// Get the stake table for the current view fn da_stake_table( &self, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { self.da_stake_table.0.clone() } else { @@ -203,8 +205,9 @@ impl Membership for TwoStaticCommittees { fn committee_members( &self, _view_number: ::View, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { self.stake_table .0 @@ -224,8 +227,9 @@ impl Membership for TwoStaticCommittees { fn da_committee_members( &self, _view_number: ::View, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { self.da_stake_table .0 @@ -245,8 +249,9 @@ impl Membership for TwoStaticCommittees { fn committee_leaders( &self, _view_number: ::View, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> std::collections::BTreeSet<::SignatureKey> { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { self.eligible_leaders .0 @@ -266,9 +271,10 @@ impl Membership for TwoStaticCommittees { fn stake( &self, pub_key: &::SignatureKey, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { self.indexed_stake_table.0.get(pub_key).cloned() } else { @@ -280,9 +286,10 @@ impl Membership for TwoStaticCommittees { fn da_stake( &self, pub_key: &::SignatureKey, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { self.indexed_da_stake_table.0.get(pub_key).cloned() } else { @@ -294,8 +301,9 @@ impl Membership for TwoStaticCommittees { fn has_stake( &self, pub_key: &::SignatureKey, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> bool { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { self.indexed_stake_table .0 @@ -313,8 +321,9 @@ impl Membership for TwoStaticCommittees { fn has_da_stake( &self, pub_key: &::SignatureKey, - epoch: ::Epoch, + epoch: Option<::Epoch>, ) -> bool { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { self.indexed_da_stake_table .0 @@ -331,9 +340,10 @@ impl Membership for TwoStaticCommittees { /// Index the vector of public keys with the current view number fn lookup_leader( &self, - view_number: TYPES::View, - epoch: ::Epoch, + view_number: ::View, + epoch: Option<::Epoch>, ) -> Result { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { #[allow(clippy::cast_possible_truncation)] let index = *view_number as usize % self.eligible_leaders.0.len(); @@ -348,7 +358,8 @@ impl Membership for TwoStaticCommittees { } /// Get the total number of nodes in the committee - fn total_nodes(&self, epoch: ::Epoch) -> usize { + fn total_nodes(&self, epoch: Option<::Epoch>) -> usize { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { self.stake_table.0.len() } else { @@ -357,7 +368,8 @@ impl Membership for TwoStaticCommittees { } /// Get the total number of DA nodes in the committee - fn da_total_nodes(&self, epoch: ::Epoch) -> usize { + fn da_total_nodes(&self, epoch: Option<::Epoch>) -> usize { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { self.da_stake_table.0.len() } else { @@ -366,7 +378,8 @@ impl Membership for TwoStaticCommittees { } /// Get the voting success threshold for the committee - fn success_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64 { + fn success_threshold(&self, epoch: Option<::Epoch>) -> NonZeroU64 { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { NonZeroU64::new(((self.stake_table.0.len() as u64 * 2) / 3) + 1).unwrap() } else { @@ -375,7 +388,8 @@ impl Membership for TwoStaticCommittees { } /// Get the voting success threshold for the committee - fn da_success_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64 { + fn da_success_threshold(&self, epoch: Option) -> NonZeroU64 { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { NonZeroU64::new(((self.da_stake_table.0.len() as u64 * 2) / 3) + 1).unwrap() } else { @@ -384,7 +398,8 @@ impl Membership for TwoStaticCommittees { } /// Get the voting failure threshold for the committee - fn failure_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64 { + fn failure_threshold(&self, epoch: Option<::Epoch>) -> NonZeroU64 { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { NonZeroU64::new(((self.stake_table.0.len() as u64) / 3) + 1).unwrap() } else { @@ -393,7 +408,8 @@ impl Membership for TwoStaticCommittees { } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64 { + fn upgrade_threshold(&self, epoch: Option<::Epoch>) -> NonZeroU64 { + let epoch = epoch.expect("epochs cannot be disabled with TwoStaticCommittees"); if *epoch != 0 && *epoch % 2 == 0 { NonZeroU64::new(max( (self.stake_table.0.len() as u64 * 9) / 10, diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 7021c3e892..298500b473 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -478,7 +478,7 @@ impl ConnectedNetwork for CombinedNetworks async fn update_view<'a, T>( &'a self, view: u64, - epoch: u64, + epoch: Option, membership: Arc>, ) where T: NodeType + 'a, diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index b832f751b7..d288c3e360 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -991,13 +991,13 @@ impl ConnectedNetwork for Libp2pNetwork { async fn update_view<'a, TYPES>( &'a self, view: u64, - epoch: u64, + epoch: Option, membership: Arc>, ) where TYPES: NodeType + 'a, { let future_view = ::View::new(view) + LOOK_AHEAD; - let epoch = ::Epoch::new(epoch); + let epoch = epoch.map(::Epoch::new); let future_leader = match membership.read().await.leader(future_view, epoch) { Ok(l) => l, diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 368f7a937c..15b450f00d 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -20,7 +20,7 @@ use hotshot_task::{ use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; use hotshot_types::{ consensus::Consensus, - data::{Leaf2, QuorumProposal2}, + data::{Leaf2, QuorumProposalWrapper}, error::HotShotError, message::Proposal, request_response::ProposalRequestPayload, @@ -28,7 +28,6 @@ use hotshot_types::{ consensus_api::ConsensusApi, election::Membership, network::ConnectedNetwork, node_implementation::NodeType, signature_key::SignatureKey, }, - vote::HasViewNumber, }; use tracing::instrument; @@ -101,7 +100,7 @@ impl + 'static, V: Versions> &self, view: TYPES::View, leaf_commitment: Commitment>, - ) -> Result>>>> + ) -> Result>>>> { // We need to be able to sign this request before submitting it to the network. Compute the // payload first. @@ -285,7 +284,7 @@ impl + 'static, V: Versions> pub async fn leader( &self, view_number: TYPES::View, - epoch_number: TYPES::Epoch, + epoch_number: Option, ) -> Result { self.hotshot .memberships @@ -325,7 +324,7 @@ impl + 'static, V: Versions> /// Wrapper to get the epoch number this node is on. #[instrument(skip_all, target = "SystemContextHandle", fields(id = self.hotshot.id))] - pub async fn cur_epoch(&self) -> TYPES::Epoch { + pub async fn cur_epoch(&self) -> Option { self.hotshot.consensus.read().await.cur_epoch() } diff --git a/libp2p-networking/src/network/transport.rs b/libp2p-networking/src/network/transport.rs index 458b7f032b..ca27124ea4 100644 --- a/libp2p-networking/src/network/transport.rs +++ b/libp2p-networking/src/network/transport.rs @@ -10,9 +10,7 @@ use anyhow::{ensure, Context, Result as AnyhowResult}; use async_lock::RwLock; use futures::{future::poll_fn, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use hotshot_types::traits::{ - election::Membership, - node_implementation::{ConsensusTime, NodeType}, - signature_key::SignatureKey, + election::Membership, node_implementation::NodeType, signature_key::SignatureKey, }; use libp2p::{ core::{ @@ -137,11 +135,7 @@ impl StakeTableAuthentica } // Check if the public key is in the stake table - if !stake_table - .read() - .await - .has_stake(&public_key, Types::Epoch::new(0)) - { + if !stake_table.read().await.has_stake(&public_key, None) { return Err(anyhow::anyhow!("Peer not in stake table")); } } diff --git a/task-impls/src/consensus/handlers.rs b/task-impls/src/consensus/handlers.rs index 437e066ea0..402c7151bb 100644 --- a/task-impls/src/consensus/handlers.rs +++ b/task-impls/src/consensus/handlers.rs @@ -78,26 +78,28 @@ pub(crate) async fn handle_quorum_vote_recv< ) .await?; - // If the vote sender belongs to the next epoch, collect it separately to form the second QC - let has_stake = task_state - .membership - .read() - .await - .has_stake(&vote.signing_key(), vote.epoch() + 1); - if has_stake { - handle_vote( - &mut task_state.next_epoch_vote_collectors, - &vote.clone().into(), - task_state.public_key.clone(), - &task_state.membership, - vote.data.epoch, - task_state.id, - &event, - sender, - &task_state.upgrade_lock, - transition_indicator, - ) - .await?; + if let Some(vote_epoch) = vote.epoch() { + // If the vote sender belongs to the next epoch, collect it separately to form the second QC + let has_stake = task_state + .membership + .read() + .await + .has_stake(&vote.signing_key(), Some(vote_epoch + 1)); + if has_stake { + handle_vote( + &mut task_state.next_epoch_vote_collectors, + &vote.clone().into(), + task_state.public_key.clone(), + &task_state.membership, + vote.data.epoch, + task_state.id, + &event, + sender, + &task_state.upgrade_lock, + transition_indicator, + ) + .await?; + } } Ok(()) @@ -166,7 +168,7 @@ pub async fn send_high_qc( new_view_number: TYPES::View, - epoch_number: TYPES::Epoch, + epoch_number: Option, sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { if epoch_number > task_state.cur_epoch { task_state.cur_epoch = epoch_number; - let _ = task_state - .consensus - .write() - .await - .update_epoch(epoch_number); - tracing::info!("Progress: entered epoch {:>6}", *epoch_number); + if let Some(new_epoch) = epoch_number { + let _ = task_state.consensus.write().await.update_epoch(new_epoch); + tracing::info!("Progress: entered epoch {:>6}", *new_epoch); + } } ensure!( @@ -318,7 +318,7 @@ pub(crate) async fn handle_view_change< #[instrument(skip_all)] pub(crate) async fn handle_timeout, V: Versions>( view_number: TYPES::View, - epoch: TYPES::Epoch, + epoch: Option, sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { diff --git a/task-impls/src/consensus/mod.rs b/task-impls/src/consensus/mod.rs index fe25a9ec2e..d117b5fec0 100644 --- a/task-impls/src/consensus/mod.rs +++ b/task-impls/src/consensus/mod.rs @@ -18,10 +18,10 @@ use hotshot_types::{ simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, TimeoutCertificate2}, simple_vote::{NextEpochQuorumVote2, QuorumVote2, TimeoutVote2}, traits::{ - node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, + node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, - utils::epoch_from_block_number, + utils::option_epoch_from_block_number, vote::HasViewNumber, }; use tokio::task::JoinHandle; @@ -75,7 +75,7 @@ pub struct ConsensusTaskState, V: pub cur_view_time: i64, /// The epoch number that this node is currently executing in. - pub cur_epoch: TYPES::Epoch, + pub cur_epoch: Option, /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -101,7 +101,7 @@ pub struct ConsensusTaskState, V: impl, V: Versions> ConsensusTaskState { /// Handles a consensus event received on the event stream - #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, cur_epoch = *self.cur_epoch), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] + #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, cur_epoch = self.cur_epoch.map(|x| *x)), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -135,6 +135,11 @@ impl, V: Versions> ConsensusTaskSt } } HotShotEvent::Qc2Formed(Either::Left(quorum_cert)) => { + let cert_view = quorum_cert.view_number(); + if !self.upgrade_lock.epochs_enabled(cert_view).await { + tracing::debug!("QC2 formed but epochs not enabled. Do nothing"); + return Ok(()); + } if !self .consensus .read() @@ -144,7 +149,6 @@ impl, V: Versions> ConsensusTaskSt tracing::debug!("We formed QC but not eQC. Do nothing"); return Ok(()); } - let cert_view = quorum_cert.view_number(); let cert_block_number = self .consensus .read() @@ -155,14 +159,17 @@ impl, V: Versions> ConsensusTaskSt "Could not find the leaf for the eQC. It shouldn't happen." ))? .height(); - let cert_epoch = TYPES::Epoch::new(epoch_from_block_number( + + let cert_epoch = option_epoch_from_block_number::( + true, cert_block_number, self.epoch_height, - )); + ); // Transition to the new epoch by sending ViewChange - tracing::info!("Entering new epoch: {:?}", cert_epoch + 1); + let next_epoch = cert_epoch.map(|x| x + 1); + tracing::info!("Entering new epoch: {:?}", next_epoch); broadcast_event( - Arc::new(HotShotEvent::ViewChange(cert_view + 1, cert_epoch + 1)), + Arc::new(HotShotEvent::ViewChange(cert_view + 1, next_epoch)), &sender, ) .await; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 0a03f09be4..f26a201f15 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -49,7 +49,7 @@ pub struct DaTaskState, V: Version pub cur_view: TYPES::View, /// Epoch number this node is executing in. - pub cur_epoch: TYPES::Epoch, + pub cur_epoch: Option, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, @@ -83,7 +83,7 @@ pub struct DaTaskState, V: Version impl, V: Versions> DaTaskState { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "DA Main Task", level = "error", target = "DaTaskState")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = self.cur_epoch.map(|x| *x)), name = "DA Main Task", level = "error", target = "DaTaskState")] pub async fn handle( &mut self, event: Arc>, diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 195b2137cd..560a54b0ee 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -11,8 +11,8 @@ use either::Either; use hotshot_task::task::TaskEvent; use hotshot_types::{ data::{ - DaProposal2, Leaf2, PackedBundle, QuorumProposal2, UpgradeProposal, VidDisperse, - VidDisperseShare2, + DaProposal2, Leaf2, PackedBundle, QuorumProposal2, QuorumProposalWrapper, UpgradeProposal, + VidDisperse, VidDisperseShare2, }, message::Proposal, request_response::ProposalRequestPayload, @@ -72,7 +72,10 @@ pub enum HotShotEvent { /// Shutdown the task Shutdown, /// A quorum proposal has been received from the network; handled by the consensus task - QuorumProposalRecv(Proposal>, TYPES::SignatureKey), + QuorumProposalRecv( + Proposal>, + TYPES::SignatureKey, + ), /// A quorum vote has been received from the network; handled by the consensus task QuorumVoteRecv(QuorumVote2), /// A timeout vote received from the network; handled by consensus task @@ -90,7 +93,10 @@ pub enum HotShotEvent { /// A DAC is validated. DaCertificateValidated(DaCertificate2), /// Send a quorum proposal to the network; emitted by the leader in the consensus task - QuorumProposalSend(Proposal>, TYPES::SignatureKey), + QuorumProposalSend( + Proposal>, + TYPES::SignatureKey, + ), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote2), /// Broadcast a quorum vote to form an eQC; emitted by a replica in the consensus task after seeing a valid quorum proposal @@ -101,7 +107,7 @@ pub enum HotShotEvent { /// 2. The proposal has been correctly signed by the leader of the current view /// 3. The justify QC is valid /// 4. The proposal passes either liveness or safety check. - QuorumProposalValidated(Proposal>, Leaf2), + QuorumProposalValidated(Proposal>, Leaf2), /// A quorum proposal is missing for a view that we need. QuorumProposalRequestSend( ProposalRequestPayload, @@ -113,9 +119,12 @@ pub enum HotShotEvent { ::PureAssembledSignatureType, ), /// A quorum proposal was missing for a view. As the leader, we send a reply to the recipient with their key. - QuorumProposalResponseSend(TYPES::SignatureKey, Proposal>), + QuorumProposalResponseSend( + TYPES::SignatureKey, + Proposal>, + ), /// A quorum proposal was requested by a node for a view. - QuorumProposalResponseRecv(Proposal>), + QuorumProposalResponseRecv(Proposal>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task DaProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal @@ -129,7 +138,7 @@ pub enum HotShotEvent { /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DacSend(DaCertificate2, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks - ViewChange(TYPES::View, TYPES::Epoch), + ViewChange(TYPES::View, Option), /// Timeout for the view sync protocol; emitted by a replica in the view sync task ViewSyncTimeout(TYPES::View, u64, ViewSyncPhase), @@ -164,7 +173,7 @@ pub enum HotShotEvent { /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only ViewSyncTrigger(TYPES::View), /// A consensus view has timed out; emitted by a replica in the consensus task; received by the view sync task; internal event only - Timeout(TYPES::View, TYPES::Epoch), + Timeout(TYPES::View, Option), /// Receive transactions from the network TransactionsRecv(Vec), /// Send transactions to the network @@ -208,7 +217,7 @@ pub enum HotShotEvent { /// 1. The proposal is not for an old view /// 2. The proposal has been correctly signed by the leader of the current view /// 3. The justify QC is valid - QuorumProposalPreliminarilyValidated(Proposal>), + QuorumProposalPreliminarilyValidated(Proposal>), /// Send a VID request to the network; emitted to on of the members of DA committee. /// Includes the data request, node's public key and signature as well as public key of DA committee who we want to send to. @@ -570,14 +579,14 @@ impl Display for HotShotEvent { write!( f, "QuorumProposalResponseSend(view_number={:?})", - proposal.data.view_number + proposal.data.view_number() ) } HotShotEvent::QuorumProposalResponseRecv(proposal) => { write!( f, "QuorumProposalResponseRecv(view_number={:?})", - proposal.data.view_number + proposal.data.view_number() ) } HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { @@ -597,14 +606,14 @@ impl Display for HotShotEvent { write!( f, "VidResponseSend(view_number={:?}", - proposal.data.view_number + proposal.data.view_number() ) } HotShotEvent::VidResponseRecv(_, proposal) => { write!( f, "VidResponseRecv(view_number={:?}", - proposal.data.view_number + proposal.data.view_number() ) } HotShotEvent::HighQcRecv(qc, _) => { diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index b55a26b74a..a9e58ffb3c 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -15,7 +15,7 @@ use committable::{Commitment, Committable}; use hotshot_task::dependency::{Dependency, EventDependency}; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf2, QuorumProposal2, ViewChangeEvidence2}, + data::{Leaf2, QuorumProposalWrapper, ViewChangeEvidence2}, event::{Event, EventType, LeafInfo}, message::{Proposal, UpgradeLock}, request_response::ProposalRequestPayload, @@ -29,7 +29,8 @@ use hotshot_types::{ BlockPayload, ValidatedState, }, utils::{ - epoch_from_block_number, is_epoch_root, is_last_block_in_epoch, Terminator, View, ViewInner, + epoch_from_block_number, is_epoch_root, is_last_block_in_epoch, + option_epoch_from_block_number, Terminator, View, ViewInner, }, vote::{Certificate, HasViewNumber}, }; @@ -126,7 +127,7 @@ pub(crate) async fn fetch_proposal( }; let view_number = proposal.data.view_number(); - let justify_qc = proposal.data.justify_qc.clone(); + let justify_qc = proposal.data.justify_qc().clone(); let justify_qc_epoch = justify_qc.data.epoch(); @@ -148,7 +149,7 @@ pub(crate) async fn fetch_proposal( let mut consensus_writer = consensus.write().await; let leaf = Leaf2::from_quorum_proposal(&proposal.data); let state = Arc::new( - >::from_header(&proposal.data.block_header), + >::from_header(proposal.data.block_header()), ); if let Err(e) = consensus_writer.update_leaf(leaf.clone(), Arc::clone(&state), None) { @@ -250,7 +251,7 @@ impl Default for LeafChainTraversalOutcome { /// If the leaf chain contains no decided leaf while reaching a decided view, which should be /// impossible. pub async fn decide_from_proposal_2( - proposal: &QuorumProposal2, + proposal: &QuorumProposalWrapper, consensus: OuterConsensus, existing_upgrade_cert: Arc>>>, public_key: &TYPES::SignatureKey, @@ -371,7 +372,7 @@ pub async fn decide_from_proposal_2( /// If the leaf chain contains no decided leaf while reaching a decided view, which should be /// impossible. pub async fn decide_from_proposal( - proposal: &QuorumProposal2, + proposal: &QuorumProposalWrapper, consensus: OuterConsensus, existing_upgrade_cert: Arc>>>, public_key: &TYPES::SignatureKey, @@ -381,7 +382,7 @@ pub async fn decide_from_proposal( let consensus_reader = consensus.read().await; let existing_upgrade_cert_reader = existing_upgrade_cert.read().await; let view_number = proposal.view_number(); - let parent_view_number = proposal.justify_qc.view_number(); + let parent_view_number = proposal.justify_qc().view_number(); let old_anchor_view = consensus_reader.last_decided_view(); let mut last_view_number_visited = view_number; @@ -573,7 +574,7 @@ pub async fn validate_proposal_safety_and_liveness< I: NodeImplementation, V: Versions, >( - proposal: Proposal>, + proposal: Proposal>, parent_leaf: Leaf2, validation_info: &ValidationInfo, event_stream: Sender>>, @@ -590,7 +591,7 @@ pub async fn validate_proposal_safety_and_liveness< epoch_from_block_number(proposed_leaf.height(), validation_info.epoch_height); let state = Arc::new( - >::from_header(&proposal.data.block_header), + >::from_header(proposal.data.block_header()), ); { @@ -607,9 +608,11 @@ pub async fn validate_proposal_safety_and_liveness< } UpgradeCertificate::validate( - &proposal.data.upgrade_certificate, + proposal.data.upgrade_certificate(), &validation_info.membership, - TYPES::Epoch::new(proposal_epoch), + proposed_leaf + .with_epoch + .then(|| TYPES::Epoch::new(proposal_epoch)), // #3967 how do we know if proposal_epoch should be Some() or None? &validation_info.upgrade_lock, ) .await?; @@ -622,7 +625,7 @@ pub async fn validate_proposal_safety_and_liveness< ) .await?; - let justify_qc = proposal.data.justify_qc.clone(); + let justify_qc = proposal.data.justify_qc().clone(); // Create a positive vote if either liveness or safety check // passes. @@ -648,7 +651,7 @@ pub async fn validate_proposal_safety_and_liveness< // Make sure that the epoch transition proposal includes the next epoch QC if is_last_block_in_epoch(parent_leaf.height(), validation_info.epoch_height) { - ensure!(proposal.data.next_epoch_justify_qc.is_some(), + ensure!(proposal.data.next_epoch_justify_qc().is_some(), "Epoch transition proposal does not include the next epoch justify QC. Do not vote!"); } @@ -722,7 +725,7 @@ pub(crate) async fn validate_proposal_view_and_certs< I: NodeImplementation, V: Versions, >( - proposal: &Proposal>, + proposal: &Proposal>, validation_info: &ValidationInfo, ) -> Result<()> { let view_number = proposal.data.view_number(); @@ -738,9 +741,9 @@ pub(crate) async fn validate_proposal_view_and_certs< drop(membership_reader); // Verify a timeout certificate OR a view sync certificate exists and is valid. - if proposal.data.justify_qc.view_number() != view_number - 1 { + if proposal.data.justify_qc().view_number() != view_number - 1 { let received_proposal_cert = - proposal.data.view_change_evidence.clone().context(debug!( + proposal.data.view_change_evidence().clone().context(debug!( "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", *view_number ))?; @@ -806,12 +809,13 @@ pub(crate) async fn validate_proposal_view_and_certs< // Validate the upgrade certificate -- this is just a signature validation. // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. { - let epoch = TYPES::Epoch::new(epoch_from_block_number( - proposal.data.block_header.block_number(), + let epoch = option_epoch_from_block_number::( + proposal.data.with_epoch, + proposal.data.block_header().block_number(), validation_info.epoch_height, - )); + ); UpgradeCertificate::validate( - &proposal.data.upgrade_certificate, + proposal.data.upgrade_certificate(), &validation_info.membership, epoch, &validation_info.upgrade_lock, diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 7da90e547b..a5b7b6df01 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -37,7 +37,6 @@ use hotshot_types::{ use tokio::{spawn, task::JoinHandle}; use tracing::instrument; use utils::anytrace::*; -use vbs::version::StaticVersionType; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -46,7 +45,7 @@ use crate::{ /// the network message task state #[derive(Clone)] -pub struct NetworkMessageTaskState { +pub struct NetworkMessageTaskState { /// Sender to send internal events this task generates to other tasks pub internal_event_stream: Sender>>, @@ -58,9 +57,12 @@ pub struct NetworkMessageTaskState { /// Transaction Cache to ignore previously seen transactions pub transactions_cache: lru::LruCache, + + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, } -impl NetworkMessageTaskState { +impl NetworkMessageTaskState { #[instrument(skip_all, name = "Network message task", level = "trace")] /// Handles a (deserialized) message from the network pub async fn handle_message(&mut self, message: Message) { @@ -74,68 +76,228 @@ impl NetworkMessageTaskState { let event = match consensus_message { SequencingMessage::General(general_message) => match general_message { GeneralConsensusMessage::Proposal(proposal) => { + if self + .upgrade_lock + .epochs_enabled(proposal.data.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::Proposal for view {} but epochs are enabled for that view", proposal.data.view_number()); + return; + } HotShotEvent::QuorumProposalRecv(convert_proposal(proposal), sender) } GeneralConsensusMessage::Proposal2(proposal) => { - HotShotEvent::QuorumProposalRecv(proposal, sender) + if !self + .upgrade_lock + .epochs_enabled(proposal.data.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::Proposal2 for view {} but epochs are not enabled for that view", proposal.data.view_number()); + return; + } + HotShotEvent::QuorumProposalRecv(convert_proposal(proposal), sender) } GeneralConsensusMessage::ProposalRequested(req, sig) => { HotShotEvent::QuorumProposalRequestRecv(req, sig) } GeneralConsensusMessage::ProposalResponse(proposal) => { + if self + .upgrade_lock + .epochs_enabled(proposal.data.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ProposalResponse for view {} but epochs are enabled for that view", proposal.data.view_number()); + return; + } HotShotEvent::QuorumProposalResponseRecv(convert_proposal(proposal)) } GeneralConsensusMessage::ProposalResponse2(proposal) => { - HotShotEvent::QuorumProposalResponseRecv(proposal) + if !self + .upgrade_lock + .epochs_enabled(proposal.data.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ProposalResponse2 for view {} but epochs are not enabled for that view", proposal.data.view_number()); + return; + } + HotShotEvent::QuorumProposalResponseRecv(convert_proposal(proposal)) } GeneralConsensusMessage::Vote(vote) => { + if self.upgrade_lock.epochs_enabled(vote.view_number()).await { + tracing::warn!("received GeneralConsensusMessage::Vote for view {} but epochs are enabled for that view", vote.view_number()); + return; + } HotShotEvent::QuorumVoteRecv(vote.to_vote2()) } - GeneralConsensusMessage::Vote2(vote) => HotShotEvent::QuorumVoteRecv(vote), + GeneralConsensusMessage::Vote2(vote) => { + if !self.upgrade_lock.epochs_enabled(vote.view_number()).await { + tracing::warn!("received GeneralConsensusMessage::Vote2 for view {} but epochs are not enabled for that view", vote.view_number()); + return; + } + HotShotEvent::QuorumVoteRecv(vote) + } GeneralConsensusMessage::ViewSyncPreCommitVote(view_sync_message) => { + if self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncPreCommitVote for view {} but epochs are enabled for that view", view_sync_message.view_number()); + return; + } HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message.to_vote2()) } GeneralConsensusMessage::ViewSyncPreCommitVote2(view_sync_message) => { + if !self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncPreCommitVote2 for view {} but epochs are not enabled for that view", view_sync_message.view_number()); + return; + } HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncPreCommitCertificate( view_sync_message, - ) => HotShotEvent::ViewSyncPreCommitCertificateRecv( - view_sync_message.to_vsc2(), - ), + ) => { + if self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncPreCommitCertificate for view {} but epochs are enabled for that view", view_sync_message.view_number()); + return; + } + HotShotEvent::ViewSyncPreCommitCertificateRecv( + view_sync_message.to_vsc2(), + ) + } GeneralConsensusMessage::ViewSyncPreCommitCertificate2( view_sync_message, - ) => HotShotEvent::ViewSyncPreCommitCertificateRecv(view_sync_message), + ) => { + if !self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncPreCommitCertificate2 for view {} but epochs are not enabled for that view", view_sync_message.view_number()); + return; + } + HotShotEvent::ViewSyncPreCommitCertificateRecv(view_sync_message) + } GeneralConsensusMessage::ViewSyncCommitVote(view_sync_message) => { + if self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncCommitVote for view {} but epochs are enabled for that view", view_sync_message.view_number()); + return; + } HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message.to_vote2()) } GeneralConsensusMessage::ViewSyncCommitVote2(view_sync_message) => { + if !self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncCommitVote2 for view {} but epochs are not enabled for that view", view_sync_message.view_number()); + return; + } HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncCommitCertificate(view_sync_message) => { + if self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncCommitCertificate for view {} but epochs are enabled for that view", view_sync_message.view_number()); + return; + } HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message.to_vsc2()) } GeneralConsensusMessage::ViewSyncCommitCertificate2(view_sync_message) => { + if !self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncCommitCertificate2 for view {} but epochs are not enabled for that view", view_sync_message.view_number()); + return; + } HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncFinalizeVote(view_sync_message) => { + if self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncFinalizeVote for view {} but epochs are enabled for that view", view_sync_message.view_number()); + return; + } HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message.to_vote2()) } GeneralConsensusMessage::ViewSyncFinalizeVote2(view_sync_message) => { + if !self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncFinalizeVote2 for view {} but epochs are not enabled for that view", view_sync_message.view_number()); + return; + } HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncFinalizeCertificate(view_sync_message) => { + if self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncFinalizeCertificate for view {} but epochs are enabled for that view", view_sync_message.view_number()); + return; + } HotShotEvent::ViewSyncFinalizeCertificateRecv( view_sync_message.to_vsc2(), ) } GeneralConsensusMessage::ViewSyncFinalizeCertificate2( view_sync_message, - ) => HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_message), + ) => { + if !self + .upgrade_lock + .epochs_enabled(view_sync_message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::ViewSyncFinalizeCertificate2 for view {} but epochs are not enabled for that view", view_sync_message.view_number()); + return; + } + HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_message) + } GeneralConsensusMessage::TimeoutVote(message) => { + if self + .upgrade_lock + .epochs_enabled(message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::TimeoutVote for view {} but epochs are enabled for that view", message.view_number()); + return; + } HotShotEvent::TimeoutVoteRecv(message.to_vote2()) } GeneralConsensusMessage::TimeoutVote2(message) => { + if !self + .upgrade_lock + .epochs_enabled(message.view_number()) + .await + { + tracing::warn!("received GeneralConsensusMessage::TimeoutVote2 for view {} but epochs are not enabled for that view", message.view_number()); + return; + } HotShotEvent::TimeoutVoteRecv(message) } GeneralConsensusMessage::UpgradeProposal(message) => { @@ -149,27 +311,77 @@ impl NetworkMessageTaskState { }, SequencingMessage::Da(da_message) => match da_message { DaConsensusMessage::DaProposal(proposal) => { + if self + .upgrade_lock + .epochs_enabled(proposal.data.view_number()) + .await + { + tracing::warn!("received DaConsensusMessage::DaProposal for view {} but epochs are enabled for that view", proposal.data.view_number()); + return; + } HotShotEvent::DaProposalRecv(convert_proposal(proposal), sender) } + DaConsensusMessage::DaProposal2(proposal) => { + if !self + .upgrade_lock + .epochs_enabled(proposal.data.view_number()) + .await + { + tracing::warn!("received DaConsensusMessage::DaProposal2 for view {} but epochs are not enabled for that view", proposal.data.view_number()); + return; + } + HotShotEvent::DaProposalRecv(proposal, sender) + } DaConsensusMessage::DaVote(vote) => { + if self.upgrade_lock.epochs_enabled(vote.view_number()).await { + tracing::warn!("received DaConsensusMessage::DaVote for view {} but epochs are enabled for that view", vote.view_number()); + return; + } HotShotEvent::DaVoteRecv(vote.clone().to_vote2()) } + DaConsensusMessage::DaVote2(vote) => { + if !self.upgrade_lock.epochs_enabled(vote.view_number()).await { + tracing::warn!("received DaConsensusMessage::DaVote2 for view {} but epochs are not enabled for that view", vote.view_number()); + return; + } + HotShotEvent::DaVoteRecv(vote.clone()) + } DaConsensusMessage::DaCertificate(cert) => { + if self.upgrade_lock.epochs_enabled(cert.view_number()).await { + tracing::warn!("received DaConsensusMessage::DaCertificate for view {} but epochs are enabled for that view", cert.view_number()); + return; + } HotShotEvent::DaCertificateRecv(cert.to_dac2()) } + DaConsensusMessage::DaCertificate2(cert) => { + if !self.upgrade_lock.epochs_enabled(cert.view_number()).await { + tracing::warn!("received DaConsensusMessage::DaCertificate2 for view {} but epochs are not enabled for that view", cert.view_number()); + return; + } + HotShotEvent::DaCertificateRecv(cert) + } DaConsensusMessage::VidDisperseMsg(proposal) => { + if self + .upgrade_lock + .epochs_enabled(proposal.data.view_number()) + .await + { + tracing::warn!("received DaConsensusMessage::VidDisperseMsg for view {} but epochs are enabled for that view", proposal.data.view_number()); + return; + } HotShotEvent::VidShareRecv(sender, convert_proposal(proposal)) } DaConsensusMessage::VidDisperseMsg2(proposal) => { + if !self + .upgrade_lock + .epochs_enabled(proposal.data.view_number()) + .await + { + tracing::warn!("received DaConsensusMessage::VidDisperseMsg2 for view {} but epochs are not enabled for that view", proposal.data.view_number()); + return; + } HotShotEvent::VidShareRecv(sender, proposal) } - DaConsensusMessage::DaProposal2(proposal) => { - HotShotEvent::DaProposalRecv(proposal, sender) - } - DaConsensusMessage::DaVote2(vote) => HotShotEvent::DaVoteRecv(vote.clone()), - DaConsensusMessage::DaCertificate2(cert) => { - HotShotEvent::DaCertificateRecv(cert) - } }, }; broadcast_event(Arc::new(event), &self.internal_event_stream).await; @@ -244,7 +456,7 @@ pub struct NetworkEventTaskState< pub view: TYPES::View, /// epoch number - pub epoch: TYPES::Epoch, + pub epoch: Option, /// network memberships pub membership: Arc>, @@ -321,9 +533,8 @@ impl< let recipient = proposal.data.recipient_key.clone(); let message = if self .upgrade_lock - .version_infallible(proposal.data.view_number()) + .epochs_enabled(proposal.data.view_number()) .await - >= V::Epochs::VERSION { Message { sender: sender.clone(), @@ -440,12 +651,11 @@ impl< let message = if self .upgrade_lock - .version_infallible(proposal.data.view_number()) + .epochs_enabled(proposal.data.view_number()) .await - >= V::Epochs::VERSION { MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::Proposal2(proposal), + GeneralConsensusMessage::Proposal2(convert_proposal(proposal)), )) } else { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -477,12 +687,7 @@ impl< } }; - let message = if self - .upgrade_lock - .version_infallible(vote.view_number()) - .await - >= V::Epochs::VERSION - { + let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote2(vote.clone()), )) @@ -496,12 +701,7 @@ impl< } HotShotEvent::ExtendedQuorumVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); - let message = if self - .upgrade_lock - .version_infallible(vote.view_number()) - .await - >= V::Epochs::VERSION - { + let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote2(vote.clone()), )) @@ -523,12 +723,11 @@ impl< HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => { let message = if self .upgrade_lock - .version_infallible(proposal.data.view_number()) + .epochs_enabled(proposal.data.view_number()) .await - >= V::Epochs::VERSION { MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ProposalResponse2(proposal), + GeneralConsensusMessage::ProposalResponse2(convert_proposal(proposal)), )) } else { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -551,9 +750,8 @@ impl< let message = if self .upgrade_lock - .version_infallible(proposal.data.view_number()) + .epochs_enabled(proposal.data.view_number()) .await - >= V::Epochs::VERSION { MessageKind::::from_consensus_message(SequencingMessage::Da( DaConsensusMessage::DaProposal2(proposal), @@ -582,9 +780,7 @@ impl< } }; - let message = if self.upgrade_lock.version_infallible(view_number).await - >= V::Epochs::VERSION - { + let message = if self.upgrade_lock.epochs_enabled(view_number).await { MessageKind::::from_consensus_message(SequencingMessage::Da( DaConsensusMessage::DaVote2(vote.clone()), )) @@ -600,9 +796,8 @@ impl< *maybe_action = Some(HotShotAction::DaCert); let message = if self .upgrade_lock - .version_infallible(certificate.view_number()) + .epochs_enabled(certificate.view_number()) .await - >= V::Epochs::VERSION { MessageKind::::from_consensus_message(SequencingMessage::Da( DaConsensusMessage::DaCertificate2(certificate), @@ -628,12 +823,7 @@ impl< return None; } }; - let message = if self - .upgrade_lock - .version_infallible(vote.view_number()) - .await - >= V::Epochs::VERSION - { + let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncPreCommitVote2(vote.clone()), )) @@ -659,12 +849,7 @@ impl< return None; } }; - let message = if self - .upgrade_lock - .version_infallible(vote.view_number()) - .await - >= V::Epochs::VERSION - { + let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncCommitVote2(vote.clone()), )) @@ -690,12 +875,7 @@ impl< return None; } }; - let message = if self - .upgrade_lock - .version_infallible(vote.view_number()) - .await - >= V::Epochs::VERSION - { + let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncFinalizeVote2(vote.clone()), )) @@ -709,9 +889,7 @@ impl< } HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); - let message = if self.upgrade_lock.version_infallible(view_number).await - >= V::Epochs::VERSION - { + let message = if self.upgrade_lock.epochs_enabled(view_number).await { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncPreCommitCertificate2(certificate), )) @@ -725,9 +903,7 @@ impl< } HotShotEvent::ViewSyncCommitCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); - let message = if self.upgrade_lock.version_infallible(view_number).await - >= V::Epochs::VERSION - { + let message = if self.upgrade_lock.epochs_enabled(view_number).await { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncCommitCertificate2(certificate), )) @@ -741,9 +917,7 @@ impl< } HotShotEvent::ViewSyncFinalizeCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); - let message = if self.upgrade_lock.version_infallible(view_number).await - >= V::Epochs::VERSION - { + let message = if self.upgrade_lock.epochs_enabled(view_number).await { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncFinalizeCertificate2(certificate), )) @@ -769,12 +943,7 @@ impl< return None; } }; - let message = if self - .upgrade_lock - .version_infallible(vote.view_number()) - .await - >= V::Epochs::VERSION - { + let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::TimeoutVote2(vote.clone()), )) @@ -823,7 +992,7 @@ impl< let keep_view = TYPES::View::new(view.saturating_sub(1)); self.cancel_tasks(keep_view); let net = Arc::clone(&self.network); - let epoch = self.epoch.u64(); + let epoch = self.epoch.map(|x| x.u64()); let mem = Arc::clone(&self.membership); spawn(async move { net.update_view::(*keep_view, epoch, mem).await; @@ -838,9 +1007,8 @@ impl< HotShotEvent::VidResponseSend(sender, to, proposal) => { let message = if self .upgrade_lock - .version_infallible(proposal.data.view_number()) + .epochs_enabled(proposal.data.view_number()) .await - >= V::Epochs::VERSION { MessageKind::Data(DataMessage::DataResponse(ResponseMessage::Found( SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg2(proposal)), diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index c4922af173..36d24dcf61 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -24,16 +24,14 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, - data::{Leaf2, QuorumProposal2, VidDisperse, ViewChangeEvidence2}, + data::{Leaf2, QuorumProposal2, QuorumProposalWrapper, VidDisperse, ViewChangeEvidence2}, message::Proposal, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ - block_contents::BlockHeader, - election::Membership, - node_implementation::{ConsensusTime, NodeType}, + block_contents::BlockHeader, election::Membership, node_implementation::NodeType, signature_key::SignatureKey, }, - utils::{epoch_from_block_number, is_last_block_in_epoch}, + utils::{is_last_block_in_epoch, option_epoch_from_block_number}, vote::{Certificate, HasViewNumber}, }; use tracing::instrument; @@ -370,10 +368,12 @@ impl ProposalDependencyHandle { .context(warn!("Failed to construct marketplace block header"))? }; - let epoch = TYPES::Epoch::new(epoch_from_block_number( + let epoch = option_epoch_from_block_number::( + version >= V::Epochs::VERSION, block_header.block_number(), self.epoch_height, - )); + ); + // Make sure we are the leader for the view and epoch. // We might have ended up here because we were in the epoch transition. if self @@ -400,24 +400,31 @@ impl ProposalDependencyHandle { }; let next_drb_result = if is_last_block_in_epoch(block_header.block_number(), self.epoch_height) { - self.consensus - .read() - .await - .drb_seeds_and_results - .results - .get(&epoch) - .copied() + if let Some(epoch_val) = &epoch { + self.consensus + .read() + .await + .drb_seeds_and_results + .results + .get(epoch_val) + .copied() + } else { + None + } } else { None }; - let proposal = QuorumProposal2 { - block_header, - view_number: self.view_number, - justify_qc: parent_qc, - next_epoch_justify_qc: next_epoch_qc, - upgrade_certificate, - view_change_evidence: proposal_certificate, - next_drb_result, + let proposal = QuorumProposalWrapper { + proposal: QuorumProposal2 { + block_header, + view_number: self.view_number, + justify_qc: parent_qc, + next_epoch_justify_qc: next_epoch_qc, + upgrade_certificate, + view_change_evidence: proposal_certificate, + next_drb_result, + }, + with_epoch: version >= V::Epochs::VERSION, }; let proposed_leaf = Leaf2::from_quorum_proposal(&proposal); diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 9eb75188a3..054a69bb98 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -43,7 +43,7 @@ pub struct QuorumProposalTaskState pub latest_proposed_view: TYPES::View, /// Current epoch - pub cur_epoch: TYPES::Epoch, + pub cur_epoch: Option, /// Table for the in-progress proposal dependency tasks. pub proposal_dependencies: BTreeMap>, @@ -274,7 +274,7 @@ impl, V: Versions> async fn create_dependency_task_if_new( &mut self, view_number: TYPES::View, - epoch_number: TYPES::Epoch, + epoch_number: Option, event_receiver: Receiver>>, event_sender: Sender>>, event: Arc>, @@ -285,11 +285,14 @@ impl, V: Versions> membership_reader.leader(view_number, epoch_number)? == self.public_key; // If we are in the epoch transition and we are the leader in the next epoch, // we might want to start collecting dependencies for our next epoch proposal. - let leader_in_next_epoch = matches!( - epoch_transition_indicator, - EpochTransitionIndicator::InTransition - ) && membership_reader.leader(view_number, epoch_number + 1)? - == self.public_key; + + let leader_in_next_epoch = epoch_number.is_some() + && matches!( + epoch_transition_indicator, + EpochTransitionIndicator::InTransition + ) + && membership_reader.leader(view_number, epoch_number.map(|x| x + 1))? + == self.public_key; drop(membership_reader); // Don't even bother making the task if we are not entitled to propose anyway. @@ -369,7 +372,7 @@ impl, V: Versions> } /// Handles a consensus event received on the event stream - #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view, epoch = *self.cur_epoch), name = "handle method", level = "error", target = "QuorumProposalTaskState")] + #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view, epoch = self.cur_epoch.map(|x| *x)), name = "handle method", level = "error", target = "QuorumProposalTaskState")] pub async fn handle( &mut self, event: Arc>, diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index b08064205c..9cdc75a562 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -13,7 +13,7 @@ use async_lock::{RwLock, RwLockUpgradableReadGuard}; use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf2, QuorumProposal, QuorumProposal2}, + data::{Leaf2, QuorumProposal, QuorumProposalWrapper}, message::Proposal, simple_certificate::QuorumCertificate, traits::{ @@ -24,7 +24,7 @@ use hotshot_types::{ storage::Storage, ValidatedState, }, - utils::{epoch_from_block_number, View, ViewInner}, + utils::{option_epoch_from_block_number, View, ViewInner}, vote::{Certificate, HasViewNumber}, }; use tokio::spawn; @@ -44,7 +44,7 @@ use crate::{ /// Update states in the event that the parent state is not found for a given `proposal`. #[instrument(skip_all)] async fn validate_proposal_liveness, V: Versions>( - proposal: &Proposal>, + proposal: &Proposal>, validation_info: &ValidationInfo, ) -> Result<()> { let mut consensus_writer = validation_info.consensus.write().await; @@ -52,7 +52,7 @@ async fn validate_proposal_liveness>::from_header(&proposal.data.block_header), + >::from_header(proposal.data.block_header()), ); if let Err(e) = consensus_writer.update_leaf(leaf.clone(), state, None) { @@ -72,8 +72,8 @@ async fn validate_proposal_liveness consensus_writer.locked_view(); + // #3967 REVIEW NOTE: Why are we cloning justify_qc here just to get the view_number out? + let liveness_check = proposal.data.justify_qc().view_number() > consensus_writer.locked_view(); // if we are using HS2 we update our locked view for any QC from a leader greater than our current lock if liveness_check && validation_info @@ -82,7 +82,7 @@ async fn validate_proposal_liveness= V::Epochs::VERSION) { - consensus_writer.update_locked_view(proposal.data.justify_qc.clone().view_number())?; + consensus_writer.update_locked_view(proposal.data.justify_qc().view_number())?; } drop(consensus_writer); @@ -139,7 +139,7 @@ pub(crate) async fn handle_quorum_proposal_recv< I: NodeImplementation, V: Versions, >( - proposal: &Proposal>, + proposal: &Proposal>, quorum_proposal_sender_key: &TYPES::SignatureKey, event_sender: &Sender>>, event_receiver: &Receiver>>, @@ -153,14 +153,15 @@ pub(crate) async fn handle_quorum_proposal_recv< let view_number = proposal.data.view_number(); - let justify_qc = proposal.data.justify_qc.clone(); - let maybe_next_epoch_justify_qc = proposal.data.next_epoch_justify_qc.clone(); + let justify_qc = proposal.data.justify_qc().clone(); + let maybe_next_epoch_justify_qc = proposal.data.next_epoch_justify_qc().clone(); - let proposal_block_number = proposal.data.block_header.block_number(); - let proposal_epoch = TYPES::Epoch::new(epoch_from_block_number( + let proposal_block_number = proposal.data.block_header().block_number(); + let proposal_epoch = option_epoch_from_block_number::( + proposal.data.with_epoch, proposal_block_number, validation_info.epoch_height, - )); + ); let membership_reader = validation_info.membership.read().await; let membership_stake_table = membership_reader.stake_table(justify_qc.data.epoch); @@ -190,9 +191,10 @@ pub(crate) async fn handle_quorum_proposal_recv< } let membership_reader = validation_info.membership.read().await; - let membership_next_stake_table = membership_reader.stake_table(justify_qc.data.epoch + 1); + let membership_next_stake_table = + membership_reader.stake_table(justify_qc.data.epoch.map(|x| x + 1)); let membership_next_success_threshold = - membership_reader.success_threshold(justify_qc.data.epoch + 1); + membership_reader.success_threshold(justify_qc.data.epoch.map(|x| x + 1)); drop(membership_reader); // Validate the next epoch justify qc as well @@ -302,9 +304,9 @@ pub(crate) async fn handle_quorum_proposal_recv< ); validate_proposal_liveness(proposal, &validation_info).await?; tracing::trace!( - "Sending ViewChange for view {} and epoch {}", + "Sending ViewChange for view {} and epoch {:?}", view_number, - *proposal_epoch + proposal_epoch ); broadcast_event( Arc::new(HotShotEvent::ViewChange(view_number, proposal_epoch)), @@ -325,9 +327,9 @@ pub(crate) async fn handle_quorum_proposal_recv< .await?; tracing::trace!( - "Sending ViewChange for view {} and epoch {}", + "Sending ViewChange for view {} and epoch {:?}", view_number, - *proposal_epoch + proposal_epoch ); broadcast_event( Arc::new(HotShotEvent::ViewChange(view_number, proposal_epoch)), diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/task-impls/src/quorum_proposal_recv/mod.rs index 14ed4edf7e..3251fa42a4 100644 --- a/task-impls/src/quorum_proposal_recv/mod.rs +++ b/task-impls/src/quorum_proposal_recv/mod.rs @@ -55,7 +55,7 @@ pub struct QuorumProposalRecvTaskState, /// Membership for Quorum Certs/votes pub membership: Arc>, @@ -129,7 +129,7 @@ impl, V: Versions> } /// Handles all consensus events relating to propose and vote-enabling events. - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "Consensus replica task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = self.cur_epoch.map(|x| *x)), name = "Consensus replica task", level = "error")] #[allow(unused_variables)] pub async fn handle( &mut self, diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 4a2f180669..451604f883 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -12,7 +12,7 @@ use chrono::Utc; use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf2, QuorumProposal2, VidDisperseShare2}, + data::{Leaf2, QuorumProposalWrapper, VidDisperseShare2}, drb::{compute_drb_result, DrbResult}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, @@ -25,7 +25,10 @@ use hotshot_types::{ storage::Storage, ValidatedState, }, - utils::{epoch_from_block_number, is_epoch_root, is_last_block_in_epoch}, + utils::{ + epoch_from_block_number, is_epoch_root, is_last_block_in_epoch, + option_epoch_from_block_number, + }, vote::HasViewNumber, }; use tokio::spawn; @@ -100,13 +103,13 @@ async fn store_and_get_computed_drb_result< /// /// Returns an error if we should not vote. async fn verify_drb_result, V: Versions>( - proposal: &QuorumProposal2, + proposal: &QuorumProposalWrapper, task_state: &mut QuorumVoteTaskState, ) -> Result<()> { // Skip if this is not the expected block. if task_state.epoch_height == 0 || !is_last_block_in_epoch( - proposal.block_header.block_number(), + proposal.block_header().block_number(), task_state.epoch_height, ) { @@ -114,39 +117,57 @@ async fn verify_drb_result, V: Ver return Ok(()); } - let epoch = TYPES::Epoch::new(epoch_from_block_number( - proposal.block_header.block_number(), + // #3967 REVIEW NOTE: Check if this is the right way to decide if we're doing epochs + // Alternatively, should we just return Err() if epochs aren't happening here? Or can we assume + // that epochs are definitely happening by virtue of getting here? + let epoch = option_epoch_from_block_number::( + task_state + .upgrade_lock + .epochs_enabled(proposal.view_number()) + .await, + proposal.block_header().block_number(), task_state.epoch_height, - )); + ); let proposal_result = proposal - .next_drb_result + .next_drb_result() .context(info!("Proposal is missing the DRB result."))?; let membership_reader = task_state.membership.read().await; - let has_stake_current_epoch = membership_reader.has_stake(&task_state.public_key, epoch); + if let Some(epoch_val) = epoch { + let has_stake_current_epoch = + membership_reader.has_stake(&task_state.public_key, Some(epoch_val)); - drop(membership_reader); + drop(membership_reader); - if has_stake_current_epoch { - let computed_result = store_and_get_computed_drb_result(epoch + 1, task_state).await?; + if has_stake_current_epoch { + let computed_result = + store_and_get_computed_drb_result(epoch_val + 1, task_state).await?; - ensure!(proposal_result == computed_result, warn!("Our calculated DRB result is {:?}, which does not match the proposed DRB result of {:?}", computed_result, proposal_result)); - } + ensure!(proposal_result == computed_result, warn!("Our calculated DRB result is {:?}, which does not match the proposed DRB result of {:?}", computed_result, proposal_result)); + } - Ok(()) + Ok(()) + } else { + Err(error!("Epochs are not available")) + } } /// Start the DRB computation task for the next epoch. /// /// Uses the seed previously stored in `store_drb_seed_and_result`. async fn start_drb_task, V: Versions>( - proposal: &QuorumProposal2, + proposal: &QuorumProposalWrapper, task_state: &mut QuorumVoteTaskState, ) { + // #3967 REVIEW NOTE: Should we just exit early if we aren't doing epochs? + if !proposal.with_epoch { + return; + } + let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( - proposal.block_header.block_number(), + proposal.block_header().block_number(), task_state.epoch_height, )); @@ -155,7 +176,7 @@ async fn start_drb_task, V: Versio .membership .read() .await - .has_stake(&task_state.public_key, current_epoch_number) + .has_stake(&task_state.public_key, Some(current_epoch_number)) { let new_epoch_number = current_epoch_number + 1; @@ -292,13 +313,13 @@ async fn store_drb_seed_and_result } /// Handles the `QuorumProposalValidated` event. -#[instrument(skip_all, fields(id = task_state.id, view = *proposal.view_number))] +#[instrument(skip_all, fields(id = task_state.id, view = *proposal.view_number()))] pub(crate) async fn handle_quorum_proposal_validated< TYPES: NodeType, I: NodeImplementation, V: Versions, >( - proposal: &QuorumProposal2, + proposal: &QuorumProposalWrapper, task_state: &mut QuorumVoteTaskState, ) -> Result<()> { let version = task_state @@ -571,17 +592,19 @@ pub(crate) async fn submit_vote, V extended_vote: bool, epoch_height: u64, ) -> Result<()> { - let epoch_number = TYPES::Epoch::new(epoch_from_block_number( + let epoch_number = option_epoch_from_block_number::( + leaf.with_epoch, leaf.block_header().block_number(), epoch_height, - )); + ); let membership_reader = membership.read().await; let committee_member_in_current_epoch = membership_reader.has_stake(&public_key, epoch_number); // If the proposed leaf is for the last block in the epoch and the node is part of the quorum committee // in the next epoch, the node should vote to achieve the double quorum. - let committee_member_in_next_epoch = is_last_block_in_epoch(leaf.height(), epoch_height) - && membership_reader.has_stake(&public_key, epoch_number + 1); + let committee_member_in_next_epoch = leaf.with_epoch + && is_last_block_in_epoch(leaf.height(), epoch_height) + && membership_reader.has_stake(&public_key, epoch_number.map(|x| x + 1)); drop(membership_reader); ensure!( diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 541c1d5fb2..24c04a16a4 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -17,7 +17,7 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::{ConsensusMetricsValue, OuterConsensus}, - data::{Leaf2, QuorumProposal2}, + data::{Leaf2, QuorumProposalWrapper}, drb::DrbComputation, event::Event, message::{Proposal, UpgradeLock}, @@ -28,7 +28,7 @@ use hotshot_types::{ signature_key::SignatureKey, storage::Storage, }, - utils::epoch_from_block_number, + utils::{epoch_from_block_number, option_epoch_from_block_number}, vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; @@ -123,7 +123,7 @@ impl + 'static, V: Versions> Handl return; } }; - let proposal_payload_comm = proposal.data.block_header.payload_commitment(); + let proposal_payload_comm = proposal.data.block_header().payload_commitment(); let parent_commitment = parent_leaf.commit(); let proposed_leaf = Leaf2::from_quorum_proposal(&proposal.data); @@ -132,7 +132,7 @@ impl + 'static, V: Versions> Handl .consensus .read() .await - .is_leaf_forming_eqc(proposal.data.justify_qc.data.leaf_commit) + .is_leaf_forming_eqc(proposal.data.justify_qc().data.leaf_commit) { tracing::debug!("Do not vote here. Voting for this case is handled in QuorumVoteTaskState"); return; @@ -151,7 +151,13 @@ impl + 'static, V: Versions> Handl } // Update our persistent storage of the proposal. If we cannot store the proposal return // and error so we don't vote - if let Err(e) = self.storage.write().await.append_proposal2(proposal).await { + if let Err(e) = self + .storage + .write() + .await + .append_proposal_wrapper(proposal) + .await + { tracing::error!("failed to store proposal, not voting. error = {e:#}"); return; } @@ -230,18 +236,18 @@ impl + 'static, V: Versions> Handl return; } - let current_epoch = - TYPES::Epoch::new(epoch_from_block_number(leaf.height(), self.epoch_height)); + let cur_epoch = option_epoch_from_block_number::( + leaf.with_epoch, + leaf.height(), + self.epoch_height, + ); tracing::trace!( - "Sending ViewChange for view {} and epoch {}", + "Sending ViewChange for view {} and epoch {:?}", self.view_number + 1, - *current_epoch + cur_epoch ); broadcast_event( - Arc::new(HotShotEvent::ViewChange( - self.view_number + 1, - current_epoch, - )), + Arc::new(HotShotEvent::ViewChange(self.view_number + 1, cur_epoch)), &self.sender, ) .await; @@ -333,7 +339,7 @@ impl, V: Versions> QuorumVoteTaskS let event_view = match dependency_type { VoteDependency::QuorumProposal => { if let HotShotEvent::QuorumProposalValidated(proposal, _) = event { - proposal.data.view_number + proposal.data.view_number() } else { return false; } @@ -495,7 +501,7 @@ impl, V: Versions> QuorumVoteTaskS .consensus .read() .await - .is_leaf_forming_eqc(proposal.data.justify_qc.data.leaf_commit); + .is_leaf_forming_eqc(proposal.data.justify_qc().data.leaf_commit); if version >= V::Epochs::VERSION && is_justify_qc_forming_eqc { let _ = self @@ -503,7 +509,7 @@ impl, V: Versions> QuorumVoteTaskS .await; } else { self.create_dependency_task_if_new( - proposal.data.view_number, + proposal.data.view_number(), event_receiver, &event_sender, Arc::clone(&event), @@ -655,7 +661,7 @@ impl, V: Versions> QuorumVoteTaskS #[allow(clippy::too_many_lines)] async fn handle_eqc_voting( &self, - proposal: &Proposal>, + proposal: &Proposal>, parent_leaf: &Leaf2, event_sender: Sender>>, event_receiver: Receiver>>, @@ -687,7 +693,7 @@ impl, V: Versions> QuorumVoteTaskS ))?; let mut updated_vid = vid.clone(); - updated_vid.data.view_number = proposal.data.view_number; + updated_vid.data.view_number = proposal.data.view_number(); consensus_writer.update_vid_shares(updated_vid.data.view_number, updated_vid.clone()); drop(consensus_writer); @@ -702,7 +708,7 @@ impl, V: Versions> QuorumVoteTaskS self.storage .write() .await - .append_proposal2(proposal) + .append_proposal_wrapper(proposal) .await .wrap() .context(|e| error!("failed to store proposal, not voting. error = {}", e))?; @@ -750,7 +756,7 @@ impl, V: Versions> QuorumVoteTaskS broadcast_event( Arc::new(HotShotEvent::ViewChange( proposal.data.view_number() + 1, - current_epoch, + Some(current_epoch), )), &event_sender, ) diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 3e1d75dff2..2500e28ee1 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -26,10 +26,10 @@ use hotshot_types::{ block_contents::BlockHeader, election::Membership, network::{ConnectedNetwork, DataRequest, RequestKind}, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, }, - utils::epoch_from_block_number, + utils::option_epoch_from_block_number, vote::HasViewNumber, }; use rand::{seq::SliceRandom, thread_rng}; @@ -112,10 +112,11 @@ impl> TaskState for NetworkRequest match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _) => { let prop_view = proposal.data.view_number(); - let prop_epoch = TYPES::Epoch::new(epoch_from_block_number( - proposal.data.block_header.block_number(), + let prop_epoch = option_epoch_from_block_number::( + proposal.data.with_epoch, + proposal.data.block_header().block_number(), self.epoch_height, - )); + ); // If we already have the VID shares for the next view, do nothing. if prop_view >= self.view @@ -162,7 +163,7 @@ impl> NetworkRequestState, sender: &Sender>>, receiver: &Receiver>>, ) { @@ -191,7 +192,7 @@ impl> NetworkRequestState>>, receiver: Receiver>>, view: TYPES::View, - epoch: TYPES::Epoch, + epoch: Option, ) { let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); let network = Arc::clone(&self.network); diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 86543a5db9..30a413c85f 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -185,7 +185,11 @@ impl NetworkResponseState { } /// Makes sure the sender is allowed to send a request in the given epoch. - async fn valid_sender(&self, sender: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool { + async fn valid_sender( + &self, + sender: &TYPES::SignatureKey, + epoch: Option, + ) -> bool { self.membership.read().await.has_stake(sender, epoch) } } diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 34dd64fd23..0d47f6ee3e 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -89,7 +89,7 @@ pub struct TransactionTaskState, V pub cur_view: TYPES::View, /// Epoch number this node is executing in. - pub cur_epoch: TYPES::Epoch, + pub cur_epoch: Option, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, @@ -131,7 +131,7 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, - block_epoch: TYPES::Epoch, + block_epoch: Option, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -156,7 +156,7 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, - block_epoch: TYPES::Epoch, + block_epoch: Option, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -257,7 +257,7 @@ impl, V: Versions> TransactionTask async fn produce_block_marketplace( &mut self, block_view: TYPES::View, - block_epoch: TYPES::Epoch, + block_epoch: Option, task_start_time: Instant, ) -> Result> { ensure!( @@ -361,7 +361,7 @@ impl, V: Versions> TransactionTask pub async fn null_block( &self, block_view: TYPES::View, - block_epoch: TYPES::Epoch, + block_epoch: Option, version: Version, ) -> Option> { let membership_total_nodes = self.membership.read().await.total_nodes(self.cur_epoch); @@ -394,7 +394,7 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, - block_epoch: TYPES::Epoch, + block_epoch: Option, ) -> Option { let task_start_time = Instant::now(); @@ -447,7 +447,7 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, - block_epoch: TYPES::Epoch, + block_epoch: Option, ) -> Option { if self.consensus.read().await.is_high_qc_forming_eqc() { tracing::info!("Reached end of epoch. Not getting a new block until we form an eQC."); @@ -459,7 +459,7 @@ impl, V: Versions> TransactionTask } /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "Transaction task", level = "error", target = "TransactionTaskState")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = self.cur_epoch.map(|x| *x)), name = "Transaction task", level = "error", target = "TransactionTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -480,13 +480,17 @@ impl, V: Versions> TransactionTask } HotShotEvent::ViewChange(view, epoch) => { let view = TYPES::View::new(std::cmp::max(1, **view)); - let epoch = if self.epoch_height != 0 { - TYPES::Epoch::new(std::cmp::max(1, **epoch)) + let epoch = if self.upgrade_lock.epochs_enabled(view).await { + // #3967 REVIEW NOTE: Double check this logic + Some(TYPES::Epoch::new(std::cmp::max( + 1, + epoch.map(|x| *x).unwrap_or(0), + ))) } else { *epoch }; ensure!( - *view > *self.cur_view && *epoch >= *self.cur_epoch, + *view > *self.cur_view && epoch >= self.cur_epoch, debug!( "Received a view change to an older view and epoch: tried to change view to {:?}\ and epoch {:?} though we are at view {:?} and epoch {:?}", diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index de1fcad3f8..591d04c9f0 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -48,7 +48,7 @@ pub struct UpgradeTaskState { pub cur_view: TYPES::View, /// Epoch number this node is executing in. - pub cur_epoch: TYPES::Epoch, + pub cur_epoch: Option, /// Membership for Quorum Certs/votes pub membership: Arc>, @@ -104,7 +104,7 @@ impl UpgradeTaskState { } /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "Upgrade Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = self.cur_epoch.map(|x| *x)), name = "Upgrade Task", level = "error")] pub async fn handle( &mut self, event: Arc>, diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 24603f27e1..90430b7a86 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -13,15 +13,15 @@ use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, data::{PackedBundle, VidDisperse, VidDisperseShare2}, - message::Proposal, + message::{Proposal, UpgradeLock}, traits::{ block_contents::BlockHeader, election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, BlockPayload, }, - utils::epoch_from_block_number, + utils::option_epoch_from_block_number, }; use tracing::{debug, error, info, instrument}; use utils::anytrace::Result; @@ -32,12 +32,12 @@ use crate::{ }; /// Tracks state of a VID task -pub struct VidTaskState> { +pub struct VidTaskState, V: Versions> { /// View number this view is executing in. pub cur_view: TYPES::View, /// Epoch number this node is executing in. - pub cur_epoch: TYPES::Epoch, + pub cur_epoch: Option, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, @@ -57,13 +57,16 @@ pub struct VidTaskState> { /// This state's ID pub id: u64, + /// Lock for a decided upgrade + pub upgrade_lock: UpgradeLock, + /// Number of blocks in an epoch, zero means there are no epochs pub epoch_height: u64, } -impl> VidTaskState { +impl, V: Versions> VidTaskState { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "VID Main Task", level = "error", target = "VidTaskState")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = self.cur_epoch.map(|x| *x)), name = "VID Main Task", level = "error", target = "VidTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -138,8 +141,8 @@ impl> VidTaskState { return None; }; debug!( - "publishing VID disperse for view {} and epoch {}", - *view_number, *epoch + "publishing VID disperse for view {} and epoch {:?}", + *view_number, epoch ); broadcast_event( Arc::new(HotShotEvent::VidDisperseSend( @@ -174,21 +177,20 @@ impl> VidTaskState { } HotShotEvent::QuorumProposalSend(proposal, _) => { - let proposed_block_number = proposal.data.block_header.block_number(); - if self.epoch_height == 0 || proposed_block_number % self.epoch_height != 0 { + let proposed_block_number = proposal.data.block_header().block_number(); + if !proposal.data.with_epoch || proposed_block_number % self.epoch_height != 0 { // This is not the last block in the epoch, do nothing. return None; } // We just sent a proposal for the last block in the epoch. We need to calculate // and send VID for the nodes in the next epoch so that they can vote. - let proposal_view_number = proposal.data.view_number; - let sender_epoch = TYPES::Epoch::new(epoch_from_block_number( + let proposal_view_number = proposal.data.view_number(); + let sender_epoch = option_epoch_from_block_number::( + true, proposed_block_number, self.epoch_height, - )); - let target_epoch = TYPES::Epoch::new( - epoch_from_block_number(proposed_block_number, self.epoch_height) + 1, ); + let target_epoch = sender_epoch.map(|x| x + 1); let consensus_reader = self.consensus.read().await; let Some(payload) = consensus_reader.saved_payloads().get(&proposal_view_number) @@ -219,8 +221,8 @@ impl> VidTaskState { return None; }; debug!( - "publishing VID disperse for view {} and epoch {}", - *proposal_view_number, *target_epoch + "publishing VID disperse for view {} and epoch {:?}", + *proposal_view_number, target_epoch ); broadcast_event( Arc::new(HotShotEvent::VidDisperseSend( @@ -246,7 +248,9 @@ impl> VidTaskState { #[async_trait] /// task state implementation for VID Task -impl> TaskState for VidTaskState { +impl, V: Versions> TaskState + for VidTaskState +{ type Event = HotShotEvent; async fn handle_event( diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index 4f40516b3e..dadc7c8c70 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -71,7 +71,7 @@ pub struct ViewSyncTaskState { pub next_view: TYPES::View, /// Epoch HotShot is currently in - pub cur_epoch: TYPES::Epoch, + pub cur_epoch: Option, /// Membership for the quorum pub membership: Arc>, @@ -143,7 +143,7 @@ pub struct ViewSyncReplicaTaskState { pub next_view: TYPES::View, /// Current epoch HotShot is in - pub cur_epoch: TYPES::Epoch, + pub cur_epoch: Option, /// The relay index we are currently on pub relay: u64, @@ -255,7 +255,7 @@ impl ViewSyncTaskState { task_map.insert(view, replica_state); } - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "View Sync Main Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = self.cur_epoch.map(|x| *x)), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task pub async fn handle( @@ -530,7 +530,7 @@ impl ViewSyncTaskState { } impl ViewSyncReplicaTaskState { - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "View Sync Replica Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = self.cur_epoch.map(|x| *x)), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task pub async fn handle( &mut self, diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 902630794a..6e9b57ced1 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -61,7 +61,7 @@ pub struct VoteCollectionTaskState< pub view: TYPES::View, /// The epoch which we are collecting votes for - pub epoch: TYPES::Epoch, + pub epoch: Option, /// Node id pub id: u64, @@ -84,7 +84,7 @@ pub trait AggregatableVote< fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::Epoch, + epoch: Option, ) -> Result; /// return the Hotshot event for the completion of this CERT @@ -107,7 +107,7 @@ impl< pub async fn accumulate_vote( &mut self, vote: &VOTE, - sender_epoch: TYPES::Epoch, + sender_epoch: Option, event_stream: &Sender>>, ) -> Result> { ensure!( @@ -186,7 +186,7 @@ pub struct AccumulatorInfo { pub view: TYPES::View, /// Epoch of the votes we are collecting - pub epoch: TYPES::Epoch, + pub epoch: Option, /// This nodes id pub id: u64, @@ -262,7 +262,7 @@ pub async fn handle_vote< vote: &VOTE, public_key: TYPES::SignatureKey, membership: &Arc>, - epoch: TYPES::Epoch, + epoch: Option, id: u64, event: &Arc>, event_stream: &Sender>>, @@ -359,7 +359,7 @@ impl AggregatableVote, QuorumCertifica fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::Epoch, + epoch: Option, ) -> Result { membership.leader(self.view_number() + 1, epoch) } @@ -377,7 +377,7 @@ impl AggregatableVote, QuorumCertific fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::Epoch, + epoch: Option, ) -> Result { membership.leader(self.view_number() + 1, epoch) } @@ -396,7 +396,7 @@ impl fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::Epoch, + epoch: Option, ) -> Result { membership.leader(self.view_number() + 1, epoch) } @@ -414,7 +414,7 @@ impl AggregatableVote, UpgradeCertifi fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::Epoch, + epoch: Option, ) -> Result { membership.leader(self.view_number(), epoch) } @@ -432,7 +432,7 @@ impl AggregatableVote, DaCertificate2, ) -> Result { membership.leader(self.view_number(), epoch) } @@ -450,7 +450,7 @@ impl AggregatableVote, TimeoutCertif fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::Epoch, + epoch: Option, ) -> Result { membership.leader(self.view_number() + 1, epoch) } @@ -469,7 +469,7 @@ impl fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::Epoch, + epoch: Option, ) -> Result { membership.leader(self.date().round + self.date().relay, epoch) } @@ -488,7 +488,7 @@ impl fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::Epoch, + epoch: Option, ) -> Result { membership.leader(self.date().round + self.date().relay, epoch) } @@ -507,7 +507,7 @@ impl fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::Epoch, + epoch: Option, ) -> Result { membership.leader(self.date().round + self.date().relay, epoch) } @@ -555,7 +555,12 @@ impl ) -> Result>> { match event.as_ref() { HotShotEvent::QuorumVoteRecv(vote) => { - self.accumulate_vote(&vote.clone().into(), self.epoch + 1, sender) + // #3967 REVIEW NOTE: Should we error if self.epoch is None? + let next_epoch = self + .epoch + .map(|x| x + 1) + .ok_or_else(|| error!("epoch should not be none in handle_vote_event"))?; + self.accumulate_vote(&vote.clone().into(), Some(next_epoch), sender) .await } _ => Ok(None), diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index 3c0790b595..d8dac49cc9 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -19,7 +19,7 @@ use hotshot_task_impls::{ }; use hotshot_types::{ consensus::{Consensus, OuterConsensus}, - data::QuorumProposal2, + data::QuorumProposalWrapper, message::{Proposal, UpgradeLock}, simple_vote::QuorumVote2, traits::node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, @@ -57,7 +57,7 @@ impl, V: Versions> EventTransforme for n in 1..self.multiplier { let mut modified_proposal = proposal.clone(); - modified_proposal.data.view_number += n * self.increment; + modified_proposal.data.proposal.view_number += n * self.increment; result.push(HotShotEvent::QuorumProposalSend( modified_proposal, @@ -106,7 +106,7 @@ impl, V: Versions> EventTransforme /// An `EventHandlerState` that modifies justify_qc on `QuorumProposalSend` to that of a previous view to mock dishonest leader pub struct DishonestLeader { /// Store events from previous views - pub validated_proposals: Vec>, + pub validated_proposals: Vec>, /// How many times current node has been elected leader and sent proposal pub total_proposals_from_node: u64, /// Which proposals to be dishonest at @@ -126,7 +126,7 @@ impl DishonestLeader { async fn handle_proposal_send_event( &self, event: &HotShotEvent, - proposal: &Proposal>, + proposal: &Proposal>, sender: &TYPES::SignatureKey, ) -> HotShotEvent { let length = self.validated_proposals.len(); @@ -149,11 +149,11 @@ impl DishonestLeader { // Create a dishonest proposal by using the old proposals qc let mut dishonest_proposal = proposal.clone(); - dishonest_proposal.data.justify_qc = proposal_from_look_back.justify_qc; + dishonest_proposal.data.proposal.justify_qc = proposal_from_look_back.proposal.justify_qc; // Save the view we sent the dishonest proposal on (used for coordination attacks with other byzantine replicas) let mut dishonest_proposal_sent = self.dishonest_proposal_view_numbers.write().await; - dishonest_proposal_sent.insert(proposal.data.view_number); + dishonest_proposal_sent.insert(proposal.data.view_number()); HotShotEvent::QuorumProposalSend(dishonest_proposal, sender.clone()) } @@ -344,7 +344,7 @@ impl + std::fmt::Debug, V: Version let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, view: TYPES::View::genesis(), - epoch: TYPES::Epoch::genesis(), + epoch: None, membership, storage: Arc::clone(&handle.storage()), consensus: OuterConsensus::new(handle.consensus()), @@ -396,7 +396,7 @@ impl + std::fmt::Debug, V: Version HotShotEvent::QuorumProposalRecv(proposal, _sender) => { // Check if view is a dishonest proposal, if true send a vote let dishonest_proposals = self.dishonest_proposal_view_numbers.read().await; - if dishonest_proposals.contains(&proposal.data.view_number) { + if dishonest_proposals.contains(&proposal.data.view_number()) { // Create a vote using data from most recent vote and the current event number // We wont update internal consensus state for this Byzantine replica but we are at least // Going to send a vote to the next honest leader diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index d0a5631fc5..bbabd4c100 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -33,9 +33,9 @@ use hotshot_types::{ traits::{ block_contents::vid_commitment, election::Membership, - node_implementation::{ConsensusTime, NodeType, Versions}, + node_implementation::{NodeType, Versions}, }, - utils::{epoch_from_block_number, View, ViewInner}, + utils::{option_epoch_from_block_number, View, ViewInner}, vid::{vid_scheme, VidCommitment, VidProposal, VidSchemeType}, vote::{Certificate, HasViewNumber, Vote}, ValidatorConfig, @@ -144,7 +144,7 @@ pub async fn build_cert< data: DATAType, membership: &Arc>, view: TYPES::View, - epoch: TYPES::Epoch, + epoch: Option, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, @@ -210,7 +210,7 @@ pub async fn build_assembled_sig< data: &DATAType, membership: &Arc>, view: TYPES::View, - epoch: TYPES::Epoch, + epoch: Option, upgrade_lock: &UpgradeLock, ) -> ::QcType { let membership_reader = membership.read().await; @@ -272,7 +272,7 @@ pub fn key_pair_for_id( pub async fn vid_scheme_from_view_number( membership: &Arc>, view_number: TYPES::View, - epoch_number: TYPES::Epoch, + epoch_number: Option, ) -> VidSchemeType { let num_storage_nodes = membership .read() @@ -285,7 +285,7 @@ pub async fn vid_scheme_from_view_number( pub async fn vid_payload_commitment( membership: &Arc::Membership>>, view_number: TYPES::View, - epoch_number: TYPES::Epoch, + epoch_number: Option, transactions: Vec, ) -> VidCommitment { let mut vid = vid_scheme_from_view_number::(membership, view_number, epoch_number).await; @@ -298,7 +298,7 @@ pub async fn vid_payload_commitment( pub async fn da_payload_commitment( membership: &Arc::Membership>>, transactions: Vec, - epoch_number: TYPES::Epoch, + epoch_number: Option, ) -> VidCommitment { let encoded_transactions = TestTransaction::encode(&transactions); @@ -311,7 +311,7 @@ pub async fn da_payload_commitment( pub async fn build_payload_commitment( membership: &Arc::Membership>>, view: TYPES::View, - epoch: TYPES::Epoch, + epoch: Option, ) -> ::Commit { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. @@ -324,7 +324,7 @@ pub async fn build_payload_commitment( pub async fn build_vid_proposal( membership: &Arc::Membership>>, view_number: TYPES::View, - epoch_number: TYPES::Epoch, + epoch_number: Option, transactions: Vec, private_key: &::PrivateKey, ) -> VidProposal { @@ -367,7 +367,7 @@ pub async fn build_vid_proposal( pub async fn build_da_certificate( membership: &Arc::Membership>>, view_number: TYPES::View, - epoch_number: TYPES::Epoch, + epoch_number: Option, transactions: Vec, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, @@ -437,7 +437,7 @@ pub async fn build_fake_view_with_leaf_and_state( epoch_height: u64, ) -> View { let epoch = - ::Epoch::new(epoch_from_block_number(leaf.height(), epoch_height)); + option_epoch_from_block_number::(leaf.with_epoch, leaf.height(), epoch_height); View { view_inner: ViewInner::Leaf { leaf: leaf.commit(), diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index 600e4a87a8..dbc360978f 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -188,12 +188,23 @@ impl, V: Versions> TestTas }; if let Some(ref key) = key { - if *key.epoch(self.epoch_height) > self.ctx.latest_epoch { - self.ctx.latest_epoch = *key.epoch(self.epoch_height); + match ( + key.epoch(self.epoch_height).map(|x| *x), + self.ctx.latest_epoch, + ) { + (Some(key_epoch), Some(latest_epoch)) => { + if key_epoch > latest_epoch { + self.ctx.latest_epoch = Some(key_epoch); + } + } + (Some(key_epoch), None) => { + self.ctx.latest_epoch = Some(key_epoch); + } + _ => {} } } - let epoch = TYPES::Epoch::new(self.ctx.latest_epoch); + let epoch = self.ctx.latest_epoch.map(TYPES::Epoch::new); let memberships_arc = Arc::clone( &self .handles @@ -376,7 +387,7 @@ impl Default for RoundCtx { round_results: HashMap::default(), failed_views: HashSet::default(), successful_views: HashSet::default(), - latest_epoch: 0u64, + latest_epoch: None, } } } @@ -395,7 +406,7 @@ pub struct RoundCtx { /// successful views pub successful_views: HashSet, /// latest epoch, updated when a leaf with a higher epoch is seen - pub latest_epoch: u64, + pub latest_epoch: Option, } impl RoundCtx { diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index b188338839..ebd33399fb 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -175,7 +175,7 @@ where let info = "QuorumProposalSend with UpgradeCertificate attached".to_string(); let check: EventCallback = Arc::new(move |e: Arc>| match e.as_ref() { - QuorumProposalSend(proposal, _) => proposal.data.upgrade_certificate.is_some(), + QuorumProposalSend(proposal, _) => proposal.data.upgrade_certificate().is_some(), _ => false, }); Box::new(EventPredicate { info, check }) @@ -212,7 +212,7 @@ where let check: EventCallback = Arc::new(move |e: Arc>| match e.as_ref() { QuorumProposalSend(proposal, _) => { - Some(proposal.data.block_header.payload_commitment()) + Some(proposal.data.block_header().payload_commitment()) == null_block::commitment(num_storage_nodes) } _ => false, diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index fac9856950..581c1e3a89 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -33,6 +33,7 @@ use hotshot_types::{ network::{AsyncGenerator, ConnectedNetwork}, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, }, + utils::genesis_epoch_from_version, vote::HasViewNumber, ValidatorConfig, }; @@ -115,8 +116,8 @@ where sender: _, } = event { - if proposal.data.justify_qc.view_number() > self.high_qc.view_number() { - self.high_qc = proposal.data.justify_qc.clone(); + if proposal.data.justify_qc().view_number() > self.high_qc.view_number() { + self.high_qc = proposal.data.justify_qc().clone(); } } @@ -158,7 +159,7 @@ where TestInstanceState::new(self.async_delay_config.clone()), None, TYPES::View::genesis(), - TYPES::Epoch::genesis(), + genesis_epoch_from_version::(), // #3967 is this right now after our earlier discussion? or should i be doing (epoch_height > 0).then(TYPES::Epoch::genesis) TYPES::View::genesis(), BTreeMap::new(), self.high_qc.clone(), diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 600be48823..6e8d21014b 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -37,6 +37,7 @@ use hotshot_types::{ network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, }, + utils::genesis_epoch_from_version, HotShotConfig, ValidatorConfig, }; use tide_disco::Url; @@ -179,7 +180,7 @@ where late_start, latest_view: None, changes, - last_decided_leaf: Leaf2::genesis( + last_decided_leaf: Leaf2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -405,7 +406,8 @@ where config.known_nodes_with_stake.clone(), config.known_da_nodes.clone(), ); - let num_nodes = temp_memberships.total_nodes(TYPES::Epoch::new(0)); + // #3967 is it enough to check versions now? Or should we also be checking epoch_height? + let num_nodes = temp_memberships.total_nodes(genesis_epoch_from_version::()); let (mut builder_tasks, builder_urls, fallback_builder_url) = self.init_builders::(num_nodes).await; @@ -657,6 +659,7 @@ where internal_channel, external_channel, ) + .await } } diff --git a/testing/src/test_task.rs b/testing/src/test_task.rs index f828a19737..fd51a5a7e0 100644 --- a/testing/src/test_task.rs +++ b/testing/src/test_task.rs @@ -164,11 +164,12 @@ pub async fn add_network_message_test_task< public_key: TYPES::SignatureKey, ) -> JoinHandle<()> { let net = Arc::clone(&channel); - let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { + let network_state: NetworkMessageTaskState<_, _> = NetworkMessageTaskState { internal_event_stream: internal_event_stream.clone(), external_event_stream: external_event_stream.clone(), public_key, transactions_cache: lru::LruCache::new(NonZeroUsize::new(100_000).unwrap()), + upgrade_lock: upgrade_lock.clone(), }; let network = Arc::clone(&net); diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 266c311429..30cf89ac45 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -23,8 +23,8 @@ use hotshot_example_types::{ }; use hotshot_types::{ data::{ - DaProposal2, EpochNumber, Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare2, - ViewChangeEvidence2, ViewNumber, + DaProposal2, EpochNumber, Leaf2, QuorumProposal2, QuorumProposalWrapper, VidDisperse, + VidDisperseShare2, ViewChangeEvidence2, ViewNumber, }, message::{Proposal, UpgradeLock}, simple_certificate::{ @@ -37,12 +37,13 @@ use hotshot_types::{ }, traits::{ consensus_api::ConsensusApi, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeType, Versions}, BlockPayload, }, }; use rand::{thread_rng, Rng}; use sha2::{Digest, Sha256}; +use vbs::version::StaticVersionType; use crate::helpers::{ build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, @@ -51,10 +52,10 @@ use crate::helpers::{ #[derive(Clone)] pub struct TestView { pub da_proposal: Proposal>, - pub quorum_proposal: Proposal>, + pub quorum_proposal: Proposal>, pub leaf: Leaf2, pub view_number: ViewNumber, - pub epoch_number: EpochNumber, + pub epoch_number: Option, pub membership: Arc::Membership>>, pub vid_disperse: Proposal>, pub vid_proposal: ( @@ -72,9 +73,11 @@ pub struct TestView { } impl TestView { - pub async fn genesis(membership: &Arc::Membership>>) -> Self { + pub async fn genesis( + membership: &Arc::Membership>>, + ) -> Self { let genesis_view = ViewNumber::new(1); - let genesis_epoch = EpochNumber::new(0); + let genesis_epoch = None; let upgrade_lock = UpgradeLock::new(); let transactions = Vec::new(); @@ -122,7 +125,7 @@ impl TestView { .await; let block_header = TestBlockHeader::new( - &Leaf2::::genesis( + &Leaf2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -132,18 +135,22 @@ impl TestView { metadata, ); - let quorum_proposal_inner = QuorumProposal2:: { - block_header: block_header.clone(), - view_number: genesis_view, - justify_qc: QuorumCertificate2::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await, - next_epoch_justify_qc: None, - upgrade_certificate: None, - view_change_evidence: None, - next_drb_result: None, + let quorum_proposal_inner = QuorumProposalWrapper:: { + proposal: QuorumProposal2:: { + block_header: block_header.clone(), + view_number: genesis_view, + justify_qc: QuorumCertificate2::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await, + next_epoch_justify_qc: None, + upgrade_certificate: None, + view_change_evidence: None, + next_drb_result: None, + }, + // #3967 REVIEW NOTE: Is this right? + with_epoch: V::Base::VERSION >= V::Epochs::VERSION, }; let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); @@ -368,14 +375,18 @@ impl TestView { random, }; - let proposal = QuorumProposal2:: { - block_header: block_header.clone(), - view_number: next_view, - justify_qc: quorum_certificate.clone(), - next_epoch_justify_qc: None, - upgrade_certificate: upgrade_certificate.clone(), - view_change_evidence, - next_drb_result: None, + let proposal = QuorumProposalWrapper:: { + proposal: QuorumProposal2:: { + block_header: block_header.clone(), + view_number: next_view, + justify_qc: quorum_certificate.clone(), + next_epoch_justify_qc: None, + upgrade_certificate: upgrade_certificate.clone(), + view_change_evidence, + next_drb_result: None, + }, + // #3967 REVIEW NOTE: Is this right? + with_epoch: self.upgrade_lock.epochs_enabled(next_view).await, }; let mut leaf = Leaf2::from_quorum_proposal(&proposal); @@ -492,16 +503,18 @@ impl TestView { } } -pub struct TestViewGenerator { +pub struct TestViewGenerator { pub current_view: Option, pub membership: Arc::Membership>>, + pub _pd: PhantomData, } -impl TestViewGenerator { +impl TestViewGenerator { pub fn generate(membership: Arc::Membership>>) -> Self { TestViewGenerator { current_view: None, membership, + _pd: PhantomData, } } @@ -574,7 +587,7 @@ impl TestViewGenerator { } } -impl Stream for TestViewGenerator { +impl Stream for TestViewGenerator { type Item = TestView; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -584,7 +597,7 @@ impl Stream for TestViewGenerator { let mut fut = if let Some(ref view) = curr_view { async move { TestView::next_view(view).await }.boxed() } else { - async move { TestView::genesis(&mem).await }.boxed() + async move { TestView::genesis::(&mem).await }.boxed() }; match fut.as_mut().poll(cx) { diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 4e594bb4a2..8ed94ff23d 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -22,7 +22,7 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{null_block, EpochNumber, PackedBundle, ViewNumber}, + data::{null_block, PackedBundle, ViewNumber}, simple_vote::DaData2, traits::{ block_contents::precompute_vid_commitment, @@ -48,15 +48,10 @@ async fn test_da_task() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, - handle - .hotshot - .memberships - .read() - .await - .total_nodes(EpochNumber::new(0)), + handle.hotshot.memberships.read().await.total_nodes(None), ); - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -102,17 +97,17 @@ async fn test_da_task() { let inputs = vec![ serial![ - ViewChange(ViewNumber::new(1), EpochNumber::new(0)), - ViewChange(ViewNumber::new(2), EpochNumber::new(0)), + ViewChange(ViewNumber::new(1), None), + ViewChange(ViewNumber::new(2), None), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { num_transactions: transactions.len() as u64 }, ViewNumber::new(2), - EpochNumber::new(0), + None, vec1::vec1![null_block::builder_fee::( - membership.read().await.total_nodes(EpochNumber::new(0)), + membership.read().await.total_nodes(None), ::Base::VERSION, *ViewNumber::new(2), ) @@ -161,15 +156,10 @@ async fn test_da_task_storage_failure() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, - handle - .hotshot - .memberships - .read() - .await - .total_nodes(EpochNumber::new(0)), + handle.hotshot.memberships.read().await.total_nodes(None), ); - let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); + let mut generator = TestViewGenerator::::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -215,17 +205,17 @@ async fn test_da_task_storage_failure() { let inputs = vec![ serial![ - ViewChange(ViewNumber::new(1), EpochNumber::new(0)), - ViewChange(ViewNumber::new(2), EpochNumber::new(0)), + ViewChange(ViewNumber::new(1), None), + ViewChange(ViewNumber::new(2), None), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { num_transactions: transactions.len() as u64 }, ViewNumber::new(2), - EpochNumber::new(0), + None, vec1::vec1![null_block::builder_fee::( - membership.read().await.total_nodes(EpochNumber::new(0)), + membership.read().await.total_nodes(None), ::Base::VERSION, *ViewNumber::new(2), ) diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index 456a1321bd..b28d617358 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -28,7 +28,7 @@ use vbs::{ fn version_number_at_start_of_serialization() { let sender = BLSPubKey::generated_from_seed_indexed([0u8; 32], 0).0; let view_number = ConsensusTime::new(17); - let epoch = ConsensusTime::new(0); + let epoch = None; // The version we set for the message const MAJOR: u16 = 37; const MINOR: u16 = 17; @@ -68,8 +68,8 @@ async fn test_certificate2_validity() { use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; use hotshot_testing::{helpers::build_system_handle, view_generator::TestViewGenerator}; use hotshot_types::{ - data::{EpochNumber, Leaf, Leaf2}, - traits::{election::Membership, node_implementation::ConsensusTime}, + data::{Leaf, Leaf2}, + traits::election::Membership, vote::Certificate, }; @@ -81,7 +81,7 @@ async fn test_certificate2_validity() { .0; let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); + let mut generator = TestViewGenerator::::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -101,12 +101,12 @@ async fn test_certificate2_validity() { let parent_proposal = proposals[2].clone(); // ensure that we don't break certificate validation - let qc2 = proposal.data.justify_qc.clone(); + let qc2 = proposal.data.justify_qc().clone(); let qc = qc2.clone().to_qc(); let membership_reader = membership.read().await; - let membership_stake_table = membership_reader.stake_table(EpochNumber::new(0)); - let membership_success_threshold = membership_reader.success_threshold(EpochNumber::new(0)); + let membership_stake_table = membership_reader.stake_table(None); + let membership_success_threshold = membership_reader.success_threshold(None); drop(membership_reader); assert!( diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 94b2ac1dc2..7e391c1024 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -18,7 +18,7 @@ use hotshot_testing::{ }; use hotshot_types::{ consensus::OuterConsensus, - data::{EpochNumber, ViewNumber}, + data::ViewNumber, message::UpgradeLock, traits::{ election::Membership, @@ -66,7 +66,7 @@ async fn test_network_task() { NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), - epoch: EpochNumber::new(0), + epoch: None, membership: Arc::clone(&membership), upgrade_lock: upgrade_lock.clone(), storage, @@ -79,7 +79,7 @@ async fn test_network_task() { let task = Task::new(network_state, tx.clone(), rx); task_reg.run_task(task); - let mut generator = TestViewGenerator::generate(membership); + let mut generator = TestViewGenerator::::generate(membership); let view = generator.next().await.unwrap(); let (out_tx_internal, mut out_rx_internal) = async_broadcast::broadcast(10); @@ -146,7 +146,7 @@ async fn test_network_storage_fail() { NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), - epoch: EpochNumber::new(0), + epoch: None, membership: Arc::clone(&membership), upgrade_lock: upgrade_lock.clone(), storage, @@ -159,7 +159,7 @@ async fn test_network_storage_fail() { let task = Task::new(network_state, tx.clone(), rx); task_reg.run_task(task); - let mut generator = TestViewGenerator::generate(membership); + let mut generator = TestViewGenerator::::generate(membership); let view = generator.next().await.unwrap(); let (out_tx_internal, mut out_rx_internal): (Sender>>, _) = diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/testing/tests/tests_1/quorum_proposal_recv_task.rs index 97d39cc9ca..29d2389f2d 100644 --- a/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -58,7 +58,7 @@ async fn test_quorum_proposal_recv_task() { let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - let mut generator = TestViewGenerator::generate(membership); + let mut generator = TestViewGenerator::::generate(membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut votes = Vec::new(); @@ -96,7 +96,7 @@ async fn test_quorum_proposal_recv_task() { proposals[1].clone(), leaves[0].clone(), )), - exact(ViewChange(ViewNumber::new(2), EpochNumber::new(0))), + exact(ViewChange(ViewNumber::new(2), None)), ])]; let state = @@ -133,7 +133,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - let mut generator = TestViewGenerator::generate(membership); + let mut generator = TestViewGenerator::::generate(membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); let mut votes = Vec::new(); @@ -166,7 +166,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { // ourselves here instead. This is a bit cheesy, but it'll work as we expect for the // purposes of the test. consensus_writer - .update_high_qc(proposals[3].data.justify_qc.clone()) + .update_high_qc(proposals[3].data.justify_qc().clone()) .unwrap(); drop(consensus_writer); @@ -189,7 +189,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { let expectations = vec![Expectations::from_outputs(all_predicates![ exact(QuorumProposalPreliminarilyValidated(proposals[2].clone())), - exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), + exact(ViewChange(ViewNumber::new(3), None)), exact(QuorumProposalRequestSend(req, signature)), ])]; diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 165338945e..95c4010c03 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -56,11 +56,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let payload_commitment = build_payload_commitment::( &membership, ViewNumber::new(node_id), - EpochNumber::new(1), + Some(EpochNumber::new(1)), ) .await; - let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); + let mut generator = TestViewGenerator::::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -88,10 +88,13 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { } // We must send the genesis cert here to initialize hotshot successfully. - let genesis_cert = proposals[0].data.justify_qc.clone(); + let genesis_cert = proposals[0].data.justify_qc().clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - membership.read().await.total_nodes(EpochNumber::new(1)), + membership + .read() + .await + .total_nodes(Some(EpochNumber::new(1))), ::Base::VERSION, *ViewNumber::new(1), ) @@ -148,7 +151,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership.clone()); + let mut generator = TestViewGenerator::::generate(membership.clone()); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -177,13 +180,16 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { // We need to handle the views where we aren't the leader to ensure that the states are // updated properly. - let genesis_cert = proposals[0].data.justify_qc.clone(); + let genesis_cert = proposals[0].data.justify_qc().clone(); drop(consensus_writer); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - membership.read().await.total_nodes(EpochNumber::new(1)), + membership + .read() + .await + .total_nodes(Some(EpochNumber::new(1))), ::Base::VERSION, *ViewNumber::new(1), ) @@ -196,7 +202,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { build_payload_commitment::( &membership, ViewNumber::new(1), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment.clone(), @@ -211,16 +217,16 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), - Qc2Formed(either::Left(proposals[1].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &membership, ViewNumber::new(2), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment.clone(), - proposals[0].data.block_header.metadata, + proposals[0].data.block_header().metadata, ViewNumber::new(2), vec1![builder_fee.clone()], None, @@ -229,16 +235,16 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ], random![ QuorumProposalPreliminarilyValidated(proposals[1].clone()), - Qc2Formed(either::Left(proposals[2].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &membership, ViewNumber::new(3), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment.clone(), - proposals[1].data.block_header.metadata, + proposals[1].data.block_header().metadata, ViewNumber::new(3), vec1![builder_fee.clone()], None, @@ -247,16 +253,16 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ], random![ QuorumProposalPreliminarilyValidated(proposals[2].clone()), - Qc2Formed(either::Left(proposals[3].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[3].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &membership, ViewNumber::new(4), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment.clone(), - proposals[2].data.block_header.metadata, + proposals[2].data.block_header().metadata, ViewNumber::new(4), vec1![builder_fee.clone()], None, @@ -265,16 +271,16 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ], random![ QuorumProposalPreliminarilyValidated(proposals[3].clone()), - Qc2Formed(either::Left(proposals[4].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[4].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &membership, ViewNumber::new(5), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment, - proposals[3].data.block_header.metadata, + proposals[3].data.block_header().metadata, ViewNumber::new(5), vec1![builder_fee.clone()], None, @@ -319,12 +325,12 @@ async fn test_quorum_proposal_task_qc_timeout() { let payload_commitment = build_payload_commitment::( &membership, ViewNumber::new(node_id), - EpochNumber::new(1), + Some(EpochNumber::new(1)), ) .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); + let mut generator = TestViewGenerator::::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -340,7 +346,7 @@ async fn test_quorum_proposal_task_qc_timeout() { } let timeout_data = TimeoutData2 { view: ViewNumber::new(1), - epoch: EpochNumber::new(0), + epoch: None, }; generator.add_timeout(timeout_data); for view in (&mut generator).take(2).collect::>().await { @@ -352,7 +358,7 @@ async fn test_quorum_proposal_task_qc_timeout() { } // Get the proposal cert out for the view sync input - let cert = match proposals[1].data.view_change_evidence.clone().unwrap() { + let cert = match proposals[1].data.view_change_evidence().clone().unwrap() { ViewChangeEvidence2::Timeout(tc) => tc, _ => panic!("Found a View Sync Cert when there should have been a Timeout cert"), }; @@ -367,7 +373,10 @@ async fn test_quorum_proposal_task_qc_timeout() { }, ViewNumber::new(3), vec1![null_block::builder_fee::( - membership.read().await.total_nodes(EpochNumber::new(1)), + membership + .read() + .await + .total_nodes(Some(EpochNumber::new(1))), ::Base::VERSION, *ViewNumber::new(3), ) @@ -409,12 +418,12 @@ async fn test_quorum_proposal_task_view_sync() { let payload_commitment = build_payload_commitment::( &membership, ViewNumber::new(node_id), - EpochNumber::new(1), + Some(EpochNumber::new(1)), ) .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); + let mut generator = TestViewGenerator::::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -432,7 +441,7 @@ async fn test_quorum_proposal_task_view_sync() { let view_sync_finalize_data = ViewSyncFinalizeData2 { relay: 2, round: ViewNumber::new(node_id), - epoch: EpochNumber::new(0), + epoch: None, }; generator.add_view_sync_finalize(view_sync_finalize_data); for view in (&mut generator).take(2).collect::>().await { @@ -444,7 +453,7 @@ async fn test_quorum_proposal_task_view_sync() { } // Get the proposal cert out for the view sync input - let cert = match proposals[1].data.view_change_evidence.clone().unwrap() { + let cert = match proposals[1].data.view_change_evidence().clone().unwrap() { ViewChangeEvidence2::ViewSync(vsc) => vsc, _ => panic!("Found a TC when there should have been a view sync cert"), }; @@ -459,7 +468,10 @@ async fn test_quorum_proposal_task_view_sync() { }, ViewNumber::new(2), vec1![null_block::builder_fee::( - membership.read().await.total_nodes(EpochNumber::new(1)), + membership + .read() + .await + .total_nodes(Some(EpochNumber::new(1))), ::Base::VERSION, *ViewNumber::new(2), ) @@ -496,7 +508,7 @@ async fn test_quorum_proposal_task_liveness_check() { let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); + let mut generator = TestViewGenerator::::generate(Arc::clone(&membership)); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -526,7 +538,10 @@ async fn test_quorum_proposal_task_liveness_check() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - membership.read().await.total_nodes(EpochNumber::new(1)), + membership + .read() + .await + .total_nodes(Some(EpochNumber::new(1))), ::Base::VERSION, *ViewNumber::new(1), ) @@ -534,7 +549,7 @@ async fn test_quorum_proposal_task_liveness_check() { // We need to handle the views where we aren't the leader to ensure that the states are // updated properly. - let genesis_cert = proposals[0].data.justify_qc.clone(); + let genesis_cert = proposals[0].data.justify_qc().clone(); let inputs = vec![ random![ @@ -543,7 +558,7 @@ async fn test_quorum_proposal_task_liveness_check() { build_payload_commitment::( &membership, ViewNumber::new(1), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment.clone(), @@ -558,16 +573,16 @@ async fn test_quorum_proposal_task_liveness_check() { ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), - Qc2Formed(either::Left(proposals[1].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &membership, ViewNumber::new(2), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment.clone(), - proposals[0].data.block_header.metadata, + proposals[0].data.block_header().metadata, ViewNumber::new(2), vec1![builder_fee.clone()], None, @@ -576,16 +591,16 @@ async fn test_quorum_proposal_task_liveness_check() { ], random![ QuorumProposalPreliminarilyValidated(proposals[1].clone()), - Qc2Formed(either::Left(proposals[2].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &membership, ViewNumber::new(3), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment.clone(), - proposals[1].data.block_header.metadata, + proposals[1].data.block_header().metadata, ViewNumber::new(3), vec1![builder_fee.clone()], None, @@ -594,16 +609,16 @@ async fn test_quorum_proposal_task_liveness_check() { ], random![ QuorumProposalPreliminarilyValidated(proposals[2].clone()), - Qc2Formed(either::Left(proposals[3].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[3].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &membership, ViewNumber::new(4), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment.clone(), - proposals[2].data.block_header.metadata, + proposals[2].data.block_header().metadata, ViewNumber::new(4), vec1![builder_fee.clone()], None, @@ -612,16 +627,16 @@ async fn test_quorum_proposal_task_liveness_check() { ], random![ QuorumProposalPreliminarilyValidated(proposals[3].clone()), - Qc2Formed(either::Left(proposals[4].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[4].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &membership, ViewNumber::new(5), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment, - proposals[3].data.block_header.metadata, + proposals[3].data.block_header().metadata, ViewNumber::new(5), vec1![builder_fee.clone()], None, @@ -659,7 +674,7 @@ async fn test_quorum_proposal_task_with_incomplete_events() { .0; let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership); + let mut generator = TestViewGenerator::::generate(membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/testing/tests/tests_1/quorum_vote_task.rs index 4d6c018fbc..4ea37ebe00 100644 --- a/testing/tests/tests_1/quorum_vote_task.rs +++ b/testing/tests/tests_1/quorum_vote_task.rs @@ -23,7 +23,7 @@ use hotshot_testing::{ script::{Expectations, InputOrder, TaskScript}, }; use hotshot_types::{ - data::{EpochNumber, Leaf2, ViewNumber}, + data::{Leaf2, ViewNumber}, traits::node_implementation::ConsensusTime, }; @@ -47,7 +47,7 @@ async fn test_quorum_vote_task_success() { let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership); + let mut generator = TestViewGenerator::::generate(membership); let mut proposals = Vec::new(); let mut leaves = Vec::new(); @@ -83,7 +83,7 @@ async fn test_quorum_vote_task_success() { let expectations = vec![Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[1].clone())), exact(VidShareValidated(vids[1].0[0].clone())), - exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), + exact(ViewChange(ViewNumber::new(3), None)), quorum_vote_send(), ])]; @@ -114,7 +114,7 @@ async fn test_quorum_vote_task_miss_dependency() { let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership); + let mut generator = TestViewGenerator::::generate(membership); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -198,7 +198,7 @@ async fn test_quorum_vote_task_incorrect_dependency() { let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership); + let mut generator = TestViewGenerator::::generate(membership); let mut proposals = Vec::new(); let mut leaves = Vec::new(); diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index 491400d28d..541cd122ec 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -33,10 +33,13 @@ async fn test_transaction_task_leader_two_views_in_a_row() { let mut output = Vec::new(); let current_view = ViewNumber::new(4); - input.push(HotShotEvent::ViewChange(current_view, EpochNumber::new(1))); + input.push(HotShotEvent::ViewChange( + current_view, + Some(EpochNumber::new(1)), + )); input.push(HotShotEvent::ViewChange( current_view + 1, - EpochNumber::new(1), + Some(EpochNumber::new(1)), )); input.push(HotShotEvent::Shutdown); @@ -47,7 +50,7 @@ async fn test_transaction_task_leader_two_views_in_a_row() { .memberships .read() .await - .total_nodes(EpochNumber::new(0)), + .total_nodes(Some(EpochNumber::new(0))), ); // current view @@ -57,7 +60,7 @@ async fn test_transaction_task_leader_two_views_in_a_row() { num_transactions: 0, }, current_view, - EpochNumber::new(1), + Some(EpochNumber::new(1)), vec1::vec1![ null_block::builder_fee::( handle @@ -65,7 +68,7 @@ async fn test_transaction_task_leader_two_views_in_a_row() { .memberships .read() .await - .total_nodes(EpochNumber::new(0)), + .total_nodes(Some(EpochNumber::new(0))), ::Base::VERSION, *ViewNumber::new(4), ) diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 7dd4324426..6a9a9e1f11 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -85,7 +85,7 @@ async fn test_upgrade_task_with_proposal() { let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(Arc::clone(&membership)); + let mut generator = TestViewGenerator::::generate(Arc::clone(&membership)); for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); @@ -123,10 +123,13 @@ async fn test_upgrade_task_with_proposal() { } drop(consensus_writer); - let genesis_cert = proposals[0].data.justify_qc.clone(); + let genesis_cert = proposals[0].data.justify_qc().clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - membership.read().await.total_nodes(EpochNumber::new(1)), + membership + .read() + .await + .total_nodes(Some(EpochNumber::new(1))), ::Base::VERSION, *ViewNumber::new(1), ) @@ -155,7 +158,7 @@ async fn test_upgrade_task_with_proposal() { build_payload_commitment::( &membership, ViewNumber::new(1), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment.clone(), @@ -170,16 +173,16 @@ async fn test_upgrade_task_with_proposal() { ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), - Qc2Formed(either::Left(proposals[1].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &membership, ViewNumber::new(2), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment.clone(), - proposals[0].data.block_header.metadata, + proposals[0].data.block_header().metadata, ViewNumber::new(2), vec1![builder_fee.clone()], None, @@ -189,16 +192,16 @@ async fn test_upgrade_task_with_proposal() { InputOrder::Random(upgrade_vote_recvs), random![ QuorumProposalPreliminarilyValidated(proposals[1].clone()), - Qc2Formed(either::Left(proposals[2].data.justify_qc.clone())), + Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( &membership, ViewNumber::new(3), - EpochNumber::new(1) + Some(EpochNumber::new(1)) ) .await, builder_commitment.clone(), - proposals[1].data.block_header.metadata, + proposals[1].data.block_header().metadata, ViewNumber::new(3), vec1![builder_fee.clone()], None, diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/testing/tests/tests_1/upgrade_task_with_vote.rs index 7e21efe163..841fd86a86 100644 --- a/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -71,7 +71,7 @@ async fn test_upgrade_task_with_vote() { let mut consensus_writer = consensus.write().await; let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership); + let mut generator = TestViewGenerator::::generate(membership); for view in (&mut generator).take(2).collect::>().await { proposals.push(view.quorum_proposal.clone()); @@ -132,14 +132,14 @@ async fn test_upgrade_task_with_vote() { Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[1].clone())), exact(VidShareValidated(vids[1].0[0].clone())), - exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), + exact(ViewChange(ViewNumber::new(3), None)), quorum_vote_send(), ]), Expectations::from_outputs_and_task_states( all_predicates![ exact(DaCertificateValidated(dacs[2].clone())), exact(VidShareValidated(vids[2].0[0].clone())), - exact(ViewChange(ViewNumber::new(4), EpochNumber::new(0))), + exact(ViewChange(ViewNumber::new(4), None)), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], @@ -148,7 +148,7 @@ async fn test_upgrade_task_with_vote() { all_predicates![ exact(DaCertificateValidated(dacs[3].clone())), exact(VidShareValidated(vids[3].0[0].clone())), - exact(ViewChange(ViewNumber::new(5), EpochNumber::new(0))), + exact(ViewChange(ViewNumber::new(5), None)), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], @@ -157,7 +157,7 @@ async fn test_upgrade_task_with_vote() { all_predicates![ exact(DaCertificateValidated(dacs[4].clone())), exact(VidShareValidated(vids[4].0[0].clone())), - exact(ViewChange(ViewNumber::new(6), EpochNumber::new(0))), + exact(ViewChange(ViewNumber::new(6), None)), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index 9105b2dd02..e9d3cb6841 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -21,7 +21,7 @@ use hotshot_testing::{ serial, }; use hotshot_types::{ - data::{null_block, DaProposal, EpochNumber, PackedBundle, VidDisperse, ViewNumber}, + data::{null_block, DaProposal, PackedBundle, VidDisperse, ViewNumber}, traits::{ consensus_api::ConsensusApi, election::Membership, @@ -47,12 +47,8 @@ async fn test_vid_task() { let membership = Arc::clone(&handle.hotshot.memberships); - let mut vid = vid_scheme_from_view_number::( - &membership, - ViewNumber::new(0), - EpochNumber::new(0), - ) - .await; + let mut vid = + vid_scheme_from_view_number::(&membership, ViewNumber::new(0), None).await; let transactions = vec![TestTransaction::new(vec![0])]; let (payload, metadata) = >::from_transactions( @@ -91,8 +87,8 @@ async fn test_vid_task() { message.data.view_number, vid_disperse, &membership, - EpochNumber::new(0), - EpochNumber::new(0), + None, + None, None, ) .await; @@ -103,18 +99,18 @@ async fn test_vid_task() { _pd: PhantomData, }; let inputs = vec![ - serial![ViewChange(ViewNumber::new(1), EpochNumber::new(0))], + serial![ViewChange(ViewNumber::new(1), None)], serial![ - ViewChange(ViewNumber::new(2), EpochNumber::new(0)), + ViewChange(ViewNumber::new(2), None), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { num_transactions: transactions.len() as u64 }, ViewNumber::new(2), - EpochNumber::new(0), + None, vec1::vec1![null_block::builder_fee::( - membership.read().await.total_nodes(EpochNumber::new(0)), + membership.read().await.total_nodes(None), ::Base::VERSION, *ViewNumber::new(2), ) @@ -136,7 +132,7 @@ async fn test_vid_task() { }, ViewNumber::new(2), vec1![null_block::builder_fee::( - membership.read().await.total_nodes(EpochNumber::new(0)), + membership.read().await.total_nodes(None), ::Base::VERSION, *ViewNumber::new(2), ) @@ -147,7 +143,7 @@ async fn test_vid_task() { ]), ]; - let vid_state = VidTaskState::::create_from(&handle).await; + let vid_state = VidTaskState::::create_from(&handle).await; let mut script = TaskScript { timeout: std::time::Duration::from_millis(35), state: vid_state, diff --git a/testing/tests/tests_1/view_sync_task.rs b/testing/tests/tests_1/view_sync_task.rs index 255ca4cfa0..c4c2d73900 100644 --- a/testing/tests/tests_1/view_sync_task.rs +++ b/testing/tests/tests_1/view_sync_task.rs @@ -11,8 +11,7 @@ use hotshot_task_impls::{ }; use hotshot_testing::helpers::build_system_handle; use hotshot_types::{ - data::{EpochNumber, ViewNumber}, - simple_vote::ViewSyncPreCommitData2, + data::ViewNumber, simple_vote::ViewSyncPreCommitData2, traits::node_implementation::ConsensusTime, }; @@ -29,7 +28,7 @@ async fn test_view_sync_task() { let vote_data = ViewSyncPreCommitData2 { relay: 0, round: ::View::new(4), - epoch: EpochNumber::new(0), + epoch: None, }; let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote2::::create_signed_vote( vote_data, @@ -46,21 +45,12 @@ async fn test_view_sync_task() { let mut input = Vec::new(); let mut output = Vec::new(); - input.push(HotShotEvent::Timeout( - ViewNumber::new(2), - EpochNumber::new(0), - )); - input.push(HotShotEvent::Timeout( - ViewNumber::new(3), - EpochNumber::new(0), - )); + input.push(HotShotEvent::Timeout(ViewNumber::new(2), None)); + input.push(HotShotEvent::Timeout(ViewNumber::new(3), None)); input.push(HotShotEvent::Shutdown); - output.push(HotShotEvent::ViewChange( - ViewNumber::new(3), - EpochNumber::new(0), - )); + output.push(HotShotEvent::ViewChange(ViewNumber::new(3), None)); output.push(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone())); let view_sync_state = ViewSyncTaskState::::create_from(&handle).await; diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/testing/tests/tests_1/vote_dependency_handle.rs index 51c91750fb..a6e36c800f 100644 --- a/testing/tests/tests_1/vote_dependency_handle.rs +++ b/testing/tests/tests_1/vote_dependency_handle.rs @@ -15,7 +15,7 @@ use hotshot_testing::{ }; use hotshot_types::{ consensus::OuterConsensus, - data::{EpochNumber, Leaf2, ViewNumber}, + data::{Leaf2, ViewNumber}, traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, }; use itertools::Itertools; @@ -38,7 +38,7 @@ async fn test_vote_dependency_handle() { .0; let membership = Arc::clone(&handle.hotshot.memberships); - let mut generator = TestViewGenerator::generate(membership); + let mut generator = TestViewGenerator::::generate(membership); // Generate our state for the test let mut proposals = Vec::new(); @@ -76,7 +76,7 @@ async fn test_vote_dependency_handle() { for inputs in all_inputs.into_iter() { // The outputs are static here, but we re-make them since we use `into_iter` below let outputs = vec![ - exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), + exact(ViewChange(ViewNumber::new(3), None)), quorum_vote_send(), ]; diff --git a/testing/tests/tests_3/byzantine_tests.rs b/testing/tests/tests_3/byzantine_tests.rs index 2a08d2c871..0608927532 100644 --- a/testing/tests/tests_3/byzantine_tests.rs +++ b/testing/tests/tests_3/byzantine_tests.rs @@ -21,7 +21,7 @@ use hotshot_testing::{ test_builder::{Behaviour, TestDescription}, }; use hotshot_types::{ - data::{EpochNumber, ViewNumber}, + data::ViewNumber, message::{GeneralConsensusMessage, MessageKind, SequencingMessage}, traits::{ election::Membership, @@ -180,7 +180,7 @@ cross_tests!( view_increment: nodes_count as u64, modifier: Arc::new(move |_pk, message_kind, transmit_type: &mut TransmitType, membership: &::Membership| { if let MessageKind::Consensus(SequencingMessage::General(GeneralConsensusMessage::Vote(vote))) = message_kind { - *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64, EpochNumber::new(0)).unwrap()); + *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64, None).unwrap()); } else { {} } diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 54a093be88..7935fad89e 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -21,7 +21,7 @@ use vec1::Vec1; pub use crate::utils::{View, ViewInner}; use crate::{ - data::{Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare2}, + data::{Leaf2, QuorumProposalWrapper, VidDisperse, VidDisperseShare2}, drb::DrbSeedsAndResults, error::HotShotError, event::{HotShotAction, LeafInfo}, @@ -35,8 +35,8 @@ use crate::{ BlockPayload, ValidatedState, }, utils::{ - epoch_from_block_number, is_last_block_in_epoch, BuilderCommitment, LeafCommitment, - StateAndDelta, Terminator, + epoch_from_block_number, is_last_block_in_epoch, option_epoch_from_block_number, + BuilderCommitment, LeafCommitment, StateAndDelta, Terminator, }, vid::VidCommitment, vote::{Certificate, HasViewNumber}, @@ -289,11 +289,11 @@ pub struct Consensus { cur_view: TYPES::View, /// Epoch number that is currently on. - cur_epoch: TYPES::Epoch, + cur_epoch: Option, /// Last proposals we sent out, None if we haven't proposed yet. /// Prevents duplicate proposals, and can be served to those trying to catchup - last_proposals: BTreeMap>>, + last_proposals: BTreeMap>>, /// last view had a successful decide event last_decided_view: TYPES::View, @@ -413,11 +413,11 @@ impl Consensus { validated_state_map: BTreeMap>, vid_shares: Option>, cur_view: TYPES::View, - cur_epoch: TYPES::Epoch, + cur_epoch: Option, locked_view: TYPES::View, last_decided_view: TYPES::View, last_actioned_view: TYPES::View, - last_proposals: BTreeMap>>, + last_proposals: BTreeMap>>, saved_leaves: CommitmentMap>, saved_payloads: BTreeMap>, high_qc: QuorumCertificate2, @@ -451,7 +451,7 @@ impl Consensus { } /// Get the current epoch. - pub fn cur_epoch(&self) -> TYPES::Epoch { + pub fn cur_epoch(&self) -> Option { self.cur_epoch } @@ -503,7 +503,7 @@ impl Consensus { /// Get the map of our recent proposals pub fn last_proposals( &self, - ) -> &BTreeMap>> { + ) -> &BTreeMap>> { &self.last_proposals } @@ -553,11 +553,15 @@ impl Consensus { /// Can return an error when the new epoch_number is not higher than the existing epoch number. pub fn update_epoch(&mut self, epoch_number: TYPES::Epoch) -> Result<()> { ensure!( - epoch_number > self.cur_epoch, + self.cur_epoch.is_none() || Some(epoch_number) > self.cur_epoch, debug!("New epoch isn't newer than the current epoch.") ); - tracing::trace!("Updating epoch from {} to {}", self.cur_epoch, epoch_number); - self.cur_epoch = epoch_number; + tracing::trace!( + "Updating epoch from {:?} to {}", + self.cur_epoch, + epoch_number + ); + self.cur_epoch = Some(epoch_number); Ok(()) } @@ -599,7 +603,7 @@ impl Consensus { /// Can return an error when the new view_number is not higher than the existing proposed view number. pub fn update_proposed_view( &mut self, - proposal: Proposal>, + proposal: Proposal>, ) -> Result<()> { ensure!( proposal.data.view_number() @@ -648,7 +652,7 @@ impl Consensus { pub fn update_da_view( &mut self, view_number: TYPES::View, - epoch: TYPES::Epoch, + epoch: Option, payload_commitment: VidCommitment, ) -> Result<()> { let view = View { @@ -672,7 +676,11 @@ impl Consensus { delta: Option>::Delta>>, ) -> Result<()> { let view_number = leaf.view_number(); - let epoch = TYPES::Epoch::new(epoch_from_block_number(leaf.height(), self.epoch_height)); + let epoch = option_epoch_from_block_number::( + leaf.with_epoch, + leaf.height(), + self.epoch_height, + ); let view = View { view_inner: ViewInner::Leaf { leaf: leaf.commit(), diff --git a/types/src/data.rs b/types/src/data.rs index 7d2d490cdd..a1c6340eb4 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -49,7 +49,7 @@ use crate::{ states::TestableState, BlockPayload, }, - utils::{bincode_opts, epoch_from_block_number}, + utils::{bincode_opts, genesis_epoch_from_version, option_epoch_from_block_number}, vid::{vid_scheme, VidCommitment, VidCommon, VidPrecomputeData, VidSchemeType, VidShare}, vote::{Certificate, HasViewNumber}, }; @@ -159,7 +159,7 @@ pub struct DaProposal2 { /// View this proposal applies to pub view_number: TYPES::View, /// Epoch this proposal applies to - pub epoch: TYPES::Epoch, + pub epoch: Option, } impl From> for DaProposal2 { @@ -168,7 +168,7 @@ impl From> for DaProposal2 { encoded_transactions: da_proposal.encoded_transactions, metadata: da_proposal.metadata, view_number: da_proposal.view_number, - epoch: TYPES::Epoch::new(0), + epoch: None, } } } @@ -206,9 +206,9 @@ pub struct VidDisperse { /// The view number for which this VID data is intended pub view_number: TYPES::View, /// Epoch the data of this proposal belongs to - pub epoch: TYPES::Epoch, + pub epoch: Option, /// Epoch to which the recipients of this VID belong to - pub target_epoch: TYPES::Epoch, + pub target_epoch: Option, /// VidCommitment calculated based on the number of nodes in `target_epoch`. pub payload_commitment: VidCommitment, /// VidCommitment calculated based on the number of nodes in `epoch`. Needed during epoch transition. @@ -227,8 +227,8 @@ impl VidDisperse { view_number: TYPES::View, mut vid_disperse: JfVidDisperse, membership: &Arc>, - target_epoch: TYPES::Epoch, - data_epoch: TYPES::Epoch, + target_epoch: Option, + data_epoch: Option, data_epoch_payload_commitment: Option, ) -> Self { let shares = membership @@ -261,8 +261,8 @@ impl VidDisperse { payload: &TYPES::BlockPayload, membership: &Arc>, view: TYPES::View, - target_epoch: TYPES::Epoch, - data_epoch: TYPES::Epoch, + target_epoch: Option, + data_epoch: Option, ) -> Result { let num_nodes = membership.read().await.total_nodes(target_epoch); @@ -432,8 +432,8 @@ impl VidDisperseShare { ); let mut vid_disperse = VidDisperse { view_number: first_vid_disperse_share.view_number, - epoch: TYPES::Epoch::new(0), - target_epoch: TYPES::Epoch::new(0), + epoch: None, + target_epoch: None, payload_commitment: first_vid_disperse_share.payload_commitment, data_epoch_payload_commitment: None, common: first_vid_disperse_share.common, @@ -477,9 +477,9 @@ pub struct VidDisperseShare2 { /// The view number for which this VID data is intended pub view_number: TYPES::View, /// The epoch number for which this VID data belongs to - pub epoch: TYPES::Epoch, + pub epoch: Option, /// The epoch number to which the recipient of this VID belongs to - pub target_epoch: TYPES::Epoch, + pub target_epoch: Option, /// Block payload commitment pub payload_commitment: VidCommitment, /// VidCommitment calculated based on the number of nodes in `epoch`. Needed during epoch transition. @@ -527,8 +527,8 @@ impl From> for VidDisperseShare2 Self { view_number, - epoch: TYPES::Epoch::new(0), - target_epoch: TYPES::Epoch::new(0), + epoch: None, + target_epoch: None, payload_commitment, data_epoch_payload_commitment: None, share, @@ -685,6 +685,84 @@ pub struct QuorumProposal2 { pub next_drb_result: Option, } +/// Wrapper around a proposal to append a block +#[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound(deserialize = ""))] +pub struct QuorumProposalWrapper { + /// The wrapped proposal + pub proposal: QuorumProposal2, + + /// Indicates whether or not epochs were enabled for this proposal. This is checked when building a QuorumProposalWrapper + pub with_epoch: bool, +} + +impl QuorumProposalWrapper { + /// Helper function to get the proposal's block_header + pub fn block_header(&self) -> &TYPES::BlockHeader { + &self.proposal.block_header + } + + /// Helper function to get the proposal's view_number + pub fn view_number(&self) -> TYPES::View { + self.proposal.view_number + } + + /// Helper function to get the proposal's justify_qc + pub fn justify_qc(&self) -> &QuorumCertificate2 { + &self.proposal.justify_qc + } + + /// Helper function to get the proposal's next_epoch_justify_qc + pub fn next_epoch_justify_qc(&self) -> &Option> { + &self.proposal.next_epoch_justify_qc + } + + /// Helper function to get the proposal's upgrade_certificate + pub fn upgrade_certificate(&self) -> &Option> { + &self.proposal.upgrade_certificate + } + + /// Helper function to get the proposal's view_change_evidence + pub fn view_change_evidence(&self) -> &Option> { + &self.proposal.view_change_evidence + } + + /// Helper function to get the proposal's next_drb_result + pub fn next_drb_result(&self) -> &Option { + &self.proposal.next_drb_result + } +} + +impl From> for QuorumProposalWrapper { + fn from(quorum_proposal: QuorumProposal) -> Self { + Self { + proposal: quorum_proposal.into(), + with_epoch: false, + } + } +} + +impl From> for QuorumProposalWrapper { + fn from(quorum_proposal2: QuorumProposal2) -> Self { + Self { + proposal: quorum_proposal2, + with_epoch: true, + } + } +} + +impl From> for QuorumProposal { + fn from(quorum_proposal_wrapper: QuorumProposalWrapper) -> Self { + quorum_proposal_wrapper.proposal.into() + } +} + +impl From> for QuorumProposal2 { + fn from(quorum_proposal_wrapper: QuorumProposalWrapper) -> Self { + quorum_proposal_wrapper.proposal + } +} + impl From> for QuorumProposal2 { fn from(quorum_proposal: QuorumProposal) -> Self { Self { @@ -729,6 +807,7 @@ impl From> for Leaf2 { block_payload: leaf.block_payload, view_change_evidence: None, next_drb_result: None, + with_epoch: false, } } } @@ -775,6 +854,12 @@ impl HasViewNumber for QuorumProposal2 { } } +impl HasViewNumber for QuorumProposalWrapper { + fn view_number(&self) -> TYPES::View { + self.proposal.view_number + } +} + impl HasViewNumber for UpgradeProposal { fn view_number(&self) -> TYPES::View { self.view_number @@ -878,6 +963,9 @@ pub struct Leaf2 { /// consistent with the result from their computations. #[serde(with = "serde_bytes")] pub next_drb_result: Option, + + /// Indicates whether or not epochs were enabled. + pub with_epoch: bool, } impl Leaf2 { @@ -888,10 +976,12 @@ impl Leaf2 { /// Panics if the genesis payload (`TYPES::BlockPayload::genesis()`) is malformed (unable to be /// interpreted as bytes). #[must_use] - pub async fn genesis( + pub async fn genesis( validated_state: &TYPES::ValidatedState, instance_state: &TYPES::InstanceState, ) -> Self { + let epoch = genesis_epoch_from_version::(); + let (payload, metadata) = TYPES::BlockPayload::from_transactions([], validated_state, instance_state) .await @@ -910,7 +1000,7 @@ impl Leaf2 { let null_quorum_data = QuorumData2 { leaf_commit: Commitment::>::default_commitment_no_preimage(), - epoch: TYPES::Epoch::genesis(), + epoch, }; let justify_qc = QuorumCertificate2::new( @@ -931,6 +1021,7 @@ impl Leaf2 { block_payload: Some(payload), view_change_evidence: None, next_drb_result: None, + with_epoch: epoch.is_some(), } } /// Time when this leaf was created. @@ -938,11 +1029,12 @@ impl Leaf2 { self.view_number } /// Epoch in which this leaf was created. - pub fn epoch(&self, epoch_height: u64) -> TYPES::Epoch { - TYPES::Epoch::new(epoch_from_block_number( + pub fn epoch(&self, epoch_height: u64) -> Option { + option_epoch_from_block_number::( + self.with_epoch, self.block_header.block_number(), epoch_height, - )) + ) } /// Height of this leaf in the chain. /// @@ -1113,6 +1205,7 @@ impl PartialEq for Leaf2 { block_payload: _, view_change_evidence, next_drb_result, + with_epoch, } = self; *view_number == other.view_number @@ -1123,6 +1216,7 @@ impl PartialEq for Leaf2 { && *upgrade_certificate == other.upgrade_certificate && *view_change_evidence == other.view_change_evidence && *next_drb_result == other.next_drb_result + && *with_epoch == other.with_epoch } } @@ -1194,7 +1288,7 @@ impl QuorumCertificate { impl QuorumCertificate2 { #[must_use] - /// Creat the Genesis certificate + /// Create the Genesis certificate pub async fn genesis( validated_state: &TYPES::ValidatedState, instance_state: &TYPES::InstanceState, @@ -1205,10 +1299,10 @@ impl QuorumCertificate2 { let genesis_view = ::genesis(); let data = QuorumData2 { - leaf_commit: Leaf2::genesis(validated_state, instance_state) + leaf_commit: Leaf2::genesis::(validated_state, instance_state) .await .commit(), - epoch: TYPES::Epoch::genesis(), + epoch: genesis_epoch_from_version::(), // #3967 make sure this is enough of a gate for epochs }; let versioned_data = @@ -1476,17 +1570,21 @@ impl Committable for Leaf { impl Leaf2 { /// Constructs a leaf from a given quorum proposal. - pub fn from_quorum_proposal(quorum_proposal: &QuorumProposal2) -> Self { + pub fn from_quorum_proposal(quorum_proposal: &QuorumProposalWrapper) -> Self { // WARNING: Do NOT change this to a wildcard match, or reference the fields directly in the construction of the leaf. // The point of this match is that we will get a compile-time error if we add a field without updating this. - let QuorumProposal2 { - view_number, - justify_qc, - next_epoch_justify_qc, - block_header, - upgrade_certificate, - view_change_evidence, - next_drb_result, + let QuorumProposalWrapper { + proposal: + QuorumProposal2 { + view_number, + justify_qc, + next_epoch_justify_qc, + block_header, + upgrade_certificate, + view_change_evidence, + next_drb_result, + }, + with_epoch, } = quorum_proposal; Self { @@ -1499,6 +1597,7 @@ impl Leaf2 { block_payload: None, view_change_evidence: view_change_evidence.clone(), next_drb_result: *next_drb_result, + with_epoch: *with_epoch, } } } @@ -1623,7 +1722,7 @@ pub struct PackedBundle { pub view_number: TYPES::View, /// The view number that this block is associated with. - pub epoch_number: TYPES::Epoch, + pub epoch_number: Option, /// The sequencing fee for submitting bundles. pub sequencing_fees: Vec1>, @@ -1641,7 +1740,7 @@ impl PackedBundle { encoded_transactions: Arc<[u8]>, metadata: >::Metadata, view_number: TYPES::View, - epoch_number: TYPES::Epoch, + epoch_number: Option, sequencing_fees: Vec1>, vid_precompute: Option, auction_result: Option, diff --git a/types/src/event.rs b/types/src/event.rs index 1b90190693..ab4d6e86c5 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use serde::{Deserialize, Serialize}; use crate::{ - data::{DaProposal2, Leaf2, QuorumProposal2, UpgradeProposal, VidDisperseShare2}, + data::{DaProposal2, Leaf2, QuorumProposalWrapper, UpgradeProposal, VidDisperseShare2}, error::HotShotError, message::Proposal, simple_certificate::QuorumCertificate2, @@ -160,7 +160,7 @@ pub enum EventType { /// or submitted to the network by us QuorumProposal { /// Contents of the proposal - proposal: Proposal>, + proposal: Proposal>, /// Public key of the leader submitting the proposal sender: TYPES::SignatureKey, }, diff --git a/types/src/message.rs b/types/src/message.rs index ba62574830..b47bf952d2 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -26,8 +26,8 @@ use vbs::{ use crate::{ data::{ - DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, UpgradeProposal, - VidDisperseShare, VidDisperseShare2, + DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, + QuorumProposalWrapper, UpgradeProposal, VidDisperseShare, VidDisperseShare2, }, request_response::ProposalRequestPayload, simple_certificate::{ @@ -47,7 +47,7 @@ use crate::{ node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, - utils::{epoch_from_block_number, mnemonic}, + utils::{mnemonic, option_epoch_from_block_number}, vote::HasViewNumber, }; @@ -430,15 +430,11 @@ where pub async fn validate_signature( &self, membership: &TYPES::Membership, - epoch_height: u64, + _epoch_height: u64, upgrade_lock: &UpgradeLock, ) -> Result<()> { let view_number = self.data.view_number(); - let proposal_epoch = TYPES::Epoch::new(epoch_from_block_number( - self.data.block_header.block_number(), - epoch_height, - )); - let view_leader_key = membership.leader(view_number, proposal_epoch)?; + let view_leader_key = membership.leader(view_number, None)?; let proposed_leaf = Leaf::from_quorum_proposal(&self.data); ensure!( @@ -453,7 +449,7 @@ where } } -impl Proposal> +/*impl Proposal> where TYPES: NodeType, { @@ -466,10 +462,41 @@ where epoch_height: u64, ) -> Result<()> { let view_number = self.data.view_number(); - let proposal_epoch = TYPES::Epoch::new(epoch_from_block_number( + let proposal_epoch = option_epoch_from_block_number::( + true, self.data.block_header.block_number(), epoch_height, - )); + ); + let view_leader_key = membership.leader(view_number, proposal_epoch)?; + let proposed_leaf = Leaf2::from_quorum_proposal(&self.data); + + ensure!( + view_leader_key.validate(&self.signature, proposed_leaf.commit().as_ref()), + "Proposal signature is invalid." + ); + + Ok(()) + } +}*/ + +impl Proposal> +where + TYPES: NodeType, +{ + /// Checks that the signature of the quorum proposal is valid. + /// # Errors + /// Returns an error when the proposal signature is invalid. + pub fn validate_signature( + &self, + membership: &TYPES::Membership, + epoch_height: u64, + ) -> Result<()> { + let view_number = self.data.proposal.view_number(); + let proposal_epoch = option_epoch_from_block_number::( + self.data.with_epoch, + self.data.block_header().block_number(), + epoch_height, + ); let view_leader_key = membership.leader(view_number, proposal_epoch)?; let proposed_leaf = Leaf2::from_quorum_proposal(&self.data); diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index e7d8f9b437..a6b4a1b142 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -42,7 +42,7 @@ pub trait Threshold { /// Calculate a threshold based on the membership fn threshold>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> u64; } @@ -53,7 +53,7 @@ pub struct SuccessThreshold {} impl Threshold for SuccessThreshold { fn threshold>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> u64 { membership.success_threshold(epoch).into() } @@ -66,7 +66,7 @@ pub struct OneHonestThreshold {} impl Threshold for OneHonestThreshold { fn threshold>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> u64 { membership.failure_threshold(epoch).into() } @@ -79,7 +79,7 @@ pub struct UpgradeThreshold {} impl Threshold for UpgradeThreshold { fn threshold>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> u64 { membership.upgrade_threshold(epoch).into() } @@ -190,7 +190,7 @@ impl> Certificate fn stake_table_entry>( membership: &MEMBERSHIP, pub_key: &TYPES::SignatureKey, - epoch: TYPES::Epoch, + epoch: Option, ) -> Option<::StakeTableEntry> { membership.da_stake(pub_key, epoch) } @@ -198,20 +198,20 @@ impl> Certificate /// Proxy's to `Membership.da_stake_table` fn stake_table>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> Vec<::StakeTableEntry> { membership.da_stake_table(epoch) } /// Proxy's to `Membership.da_total_nodes` fn total_nodes>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> usize { membership.da_total_nodes(epoch) } fn threshold>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> u64 { membership.da_success_threshold(epoch).into() } @@ -278,7 +278,7 @@ impl> Certificate>( membership: &MEMBERSHIP, pub_key: &TYPES::SignatureKey, - epoch: TYPES::Epoch, + epoch: Option, ) -> Option<::StakeTableEntry> { membership.da_stake(pub_key, epoch) } @@ -286,20 +286,20 @@ impl> Certificate>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> Vec<::StakeTableEntry> { membership.da_stake_table(epoch) } /// Proxy's to `Membership.da_total_nodes` fn total_nodes>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> usize { membership.da_total_nodes(epoch) } fn threshold>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> u64 { membership.da_success_threshold(epoch).into() } @@ -367,7 +367,7 @@ impl< } fn threshold>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> u64 { THRESHOLD::threshold(membership, epoch) } @@ -375,14 +375,14 @@ impl< fn stake_table_entry>( membership: &MEMBERSHIP, pub_key: &TYPES::SignatureKey, - epoch: TYPES::Epoch, + epoch: Option, ) -> Option<::StakeTableEntry> { membership.stake(pub_key, epoch) } fn stake_table>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> Vec<::StakeTableEntry> { membership.stake_table(epoch) } @@ -390,7 +390,7 @@ impl< /// Proxy's to `Membership.total_nodes` fn total_nodes>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> usize { membership.total_nodes(epoch) } @@ -452,7 +452,7 @@ impl UpgradeCertificate { pub async fn validate( upgrade_certificate: &Option, membership: &RwLock, - epoch: TYPES::Epoch, + epoch: Option, upgrade_lock: &UpgradeLock, ) -> Result<()> { if let Some(ref cert) = upgrade_certificate { @@ -489,7 +489,7 @@ impl QuorumCertificate { let bytes: [u8; 32] = self.data.leaf_commit.into(); let data = QuorumData2 { leaf_commit: Commitment::from_raw(bytes), - epoch: TYPES::Epoch::genesis(), + epoch: None, }; let bytes: [u8; 32] = self.vote_commitment.into(); @@ -531,7 +531,7 @@ impl DaCertificate { pub fn to_dac2(self) -> DaCertificate2 { let data = DaData2 { payload_commit: self.data.payload_commit, - epoch: TYPES::Epoch::new(0), + epoch: None, }; let bytes: [u8; 32] = self.vote_commitment.into(); @@ -573,7 +573,7 @@ impl ViewSyncPreCommitCertificate { let data = ViewSyncPreCommitData2 { relay: self.data.relay, round: self.data.round, - epoch: TYPES::Epoch::new(0), + epoch: None, }; let bytes: [u8; 32] = self.vote_commitment.into(); @@ -616,7 +616,7 @@ impl ViewSyncCommitCertificate { let data = ViewSyncCommitData2 { relay: self.data.relay, round: self.data.round, - epoch: TYPES::Epoch::new(0), + epoch: None, }; let bytes: [u8; 32] = self.vote_commitment.into(); @@ -659,7 +659,7 @@ impl ViewSyncFinalizeCertificate { let data = ViewSyncFinalizeData2 { relay: self.data.relay, round: self.data.round, - epoch: TYPES::Epoch::new(0), + epoch: None, }; let bytes: [u8; 32] = self.vote_commitment.into(); @@ -701,7 +701,7 @@ impl TimeoutCertificate { pub fn to_tc2(self) -> TimeoutCertificate2 { let data = TimeoutData2 { view: self.data.view, - epoch: TYPES::Epoch::new(0), + epoch: None, }; let bytes: [u8; 32] = self.vote_commitment.into(); diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index ad651458d1..0ea17c7195 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -22,7 +22,7 @@ use crate::{ data::{Leaf, Leaf2}, message::UpgradeLock, traits::{ - node_implementation::{ConsensusTime, NodeType, Versions}, + node_implementation::{NodeType, Versions}, signature_key::SignatureKey, }, vid::VidCommitment, @@ -46,7 +46,7 @@ pub struct QuorumData2 { /// Commitment to the leaf pub leaf_commit: Commitment>, /// An epoch to which the data belongs to. Relevant for validating against the correct stake table - pub epoch: TYPES::Epoch, + pub epoch: Option, } /// Data used for a yes vote. Used to distinguish votes sent by the next epoch nodes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] @@ -64,7 +64,7 @@ pub struct DaData2 { /// Commitment to a block payload pub payload_commit: VidCommitment, /// Epoch number - pub epoch: TYPES::Epoch, + pub epoch: Option, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a timeout vote. @@ -78,7 +78,7 @@ pub struct TimeoutData2 { /// View the timeout is for pub view: TYPES::View, /// Epoch number - pub epoch: TYPES::Epoch, + pub epoch: Option, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Pre Commit vote. @@ -96,7 +96,7 @@ pub struct ViewSyncPreCommitData2 { /// The view number we are trying to sync on pub round: TYPES::View, /// Epoch number - pub epoch: TYPES::Epoch, + pub epoch: Option, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Commit vote. @@ -114,7 +114,7 @@ pub struct ViewSyncCommitData2 { /// The view number we are trying to sync on pub round: TYPES::View, /// Epoch number - pub epoch: TYPES::Epoch, + pub epoch: Option, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Finalize vote. @@ -132,7 +132,7 @@ pub struct ViewSyncFinalizeData2 { /// The view number we are trying to sync on pub round: TYPES::View, /// Epoch number - pub epoch: TYPES::Epoch, + pub epoch: Option, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Upgrade vote. @@ -161,7 +161,7 @@ pub struct UpgradeData2 { /// A unique identifier for the specific protocol being voted on pub hash: Vec, /// The first epoch in which the upgrade will be in effect - pub epoch: TYPES::Epoch, + pub epoch: Option, } /// Marker trait for data or commitments that can be voted on. @@ -438,14 +438,18 @@ impl Committable for UpgradeData2 { epoch, } = self; - committable::RawCommitmentBuilder::new("Upgrade data") + let mut cb = committable::RawCommitmentBuilder::new("Upgrade data") .u16(old_version.minor) .u16(old_version.major) .u16(new_version.minor) .u16(new_version.major) - .var_size_bytes(hash.as_slice()) - .u64(**epoch) - .finalize() + .var_size_bytes(hash.as_slice()); + + if let Some(ref epoch) = *epoch { + cb = cb.u64(**epoch); + } + + cb.finalize() } } @@ -516,7 +520,7 @@ impl Committable for ViewSyncCommitData2 { /// A trait for types belonging for specific epoch pub trait HasEpoch { /// Returns `Epoch` - fn epoch(&self) -> TYPES::Epoch; + fn epoch(&self) -> Option; } /// Helper macro for trivial implementation of the `HasEpoch` trait @@ -525,7 +529,7 @@ macro_rules! impl_has_epoch { ($($t:ty),*) => { $( impl HasEpoch for $t { - fn epoch(&self) -> TYPES::Epoch { + fn epoch(&self) -> Option { self.epoch } } @@ -546,7 +550,7 @@ impl_has_epoch!( impl + HasEpoch> HasEpoch for SimpleVote { - fn epoch(&self) -> TYPES::Epoch { + fn epoch(&self) -> Option { self.data.epoch() } } @@ -585,7 +589,7 @@ impl QuorumVote { let signature = self.signature; let data = QuorumData2 { leaf_commit: Commitment::from_raw(bytes), - epoch: TYPES::Epoch::genesis(), + epoch: None, }; let view_number = self.view_number; @@ -622,7 +626,7 @@ impl DaVote { let signature = self.signature; let data = DaData2 { payload_commit: self.data.payload_commit, - epoch: TYPES::Epoch::new(0), + epoch: None, }; let view_number = self.view_number; @@ -657,7 +661,7 @@ impl TimeoutVote { let signature = self.signature; let data = TimeoutData2 { view: self.data.view, - epoch: TYPES::Epoch::new(0), + epoch: None, }; let view_number = self.view_number; @@ -693,7 +697,7 @@ impl ViewSyncPreCommitVote { let data = ViewSyncPreCommitData2 { relay: self.data.relay, round: self.data.round, - epoch: TYPES::Epoch::new(0), + epoch: None, }; let view_number = self.view_number; @@ -730,7 +734,7 @@ impl ViewSyncCommitVote { let data = ViewSyncCommitData2 { relay: self.data.relay, round: self.data.round, - epoch: TYPES::Epoch::new(0), + epoch: None, }; let view_number = self.view_number; @@ -767,7 +771,7 @@ impl ViewSyncFinalizeVote { let data = ViewSyncFinalizeData2 { relay: self.data.relay, round: self.data.round, - epoch: TYPES::Epoch::new(0), + epoch: None, }; let view_number = self.view_number; diff --git a/types/src/traits/election.rs b/types/src/traits/election.rs index c862062190..4e5347f49d 100644 --- a/types/src/traits/election.rs +++ b/types/src/traits/election.rs @@ -30,34 +30,34 @@ pub trait Membership: Debug + Send + Sync { /// Get all participants in the committee (including their stake) for a specific epoch fn stake_table( &self, - epoch: TYPES::Epoch, + epoch: Option, ) -> Vec<::StakeTableEntry>; /// Get all participants in the committee (including their stake) for a specific epoch fn da_stake_table( &self, - epoch: TYPES::Epoch, + epoch: Option, ) -> Vec<::StakeTableEntry>; /// Get all participants in the committee for a specific view for a specific epoch fn committee_members( &self, view_number: TYPES::View, - epoch: TYPES::Epoch, + epoch: Option, ) -> BTreeSet; /// Get all participants in the committee for a specific view for a specific epoch fn da_committee_members( &self, view_number: TYPES::View, - epoch: TYPES::Epoch, + epoch: Option, ) -> BTreeSet; /// Get all leaders in the committee for a specific view for a specific epoch fn committee_leaders( &self, view_number: TYPES::View, - epoch: TYPES::Epoch, + epoch: Option, ) -> BTreeSet; /// Get the stake table entry for a public key, returns `None` if the @@ -65,7 +65,7 @@ pub trait Membership: Debug + Send + Sync { fn stake( &self, pub_key: &TYPES::SignatureKey, - epoch: TYPES::Epoch, + epoch: Option, ) -> Option<::StakeTableEntry>; /// Get the DA stake table entry for a public key, returns `None` if the @@ -73,14 +73,14 @@ pub trait Membership: Debug + Send + Sync { fn da_stake( &self, pub_key: &TYPES::SignatureKey, - epoch: TYPES::Epoch, + epoch: Option, ) -> Option<::StakeTableEntry>; /// See if a node has stake in the committee in a specific epoch - fn has_stake(&self, pub_key: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool; + fn has_stake(&self, pub_key: &TYPES::SignatureKey, epoch: Option) -> bool; /// See if a node has stake in the committee in a specific epoch - fn has_da_stake(&self, pub_key: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool; + fn has_da_stake(&self, pub_key: &TYPES::SignatureKey, epoch: Option) -> bool; /// The leader of the committee for view `view_number` in `epoch`. /// @@ -89,7 +89,11 @@ pub trait Membership: Debug + Send + Sync { /// /// # Errors /// Returns an error if the leader cannot be calculated. - fn leader(&self, view: TYPES::View, epoch: TYPES::Epoch) -> Result { + fn leader( + &self, + view: TYPES::View, + epoch: Option, + ) -> Result { use utils::anytrace::*; self.lookup_leader(view, epoch).wrap().context(info!( @@ -107,31 +111,33 @@ pub trait Membership: Debug + Send + Sync { fn lookup_leader( &self, view: TYPES::View, - epoch: TYPES::Epoch, + epoch: Option, ) -> std::result::Result; /// Returns the number of total nodes in the committee in an epoch `epoch` - fn total_nodes(&self, epoch: TYPES::Epoch) -> usize; + fn total_nodes(&self, epoch: Option) -> usize; /// Returns the number of total DA nodes in the committee in an epoch `epoch` - fn da_total_nodes(&self, epoch: TYPES::Epoch) -> usize; + fn da_total_nodes(&self, epoch: Option) -> usize; /// Returns the threshold for a specific `Membership` implementation - fn success_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; + fn success_threshold(&self, epoch: Option) -> NonZeroU64; /// Returns the DA threshold for a specific `Membership` implementation - fn da_success_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; + fn da_success_threshold(&self, epoch: Option) -> NonZeroU64; /// Returns the threshold for a specific `Membership` implementation - fn failure_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; + fn failure_threshold(&self, epoch: Option) -> NonZeroU64; /// Returns the threshold required to upgrade the network protocol - fn upgrade_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; + fn upgrade_threshold(&self, epoch: Option) -> NonZeroU64; #[allow(clippy::type_complexity)] /// Handles notifications that a new epoch root has been created /// Is called under a read lock to the Membership. Return a callback /// with Some to have that callback invoked under a write lock. + /// + /// #3967 REVIEW NOTE: this is only called if epoch is Some. Is there any reason to do otherwise? async fn add_epoch_root( &self, _epoch: TYPES::Epoch, diff --git a/types/src/traits/network.rs b/types/src/traits/network.rs index 2348dc8bc7..4d85ee82dc 100644 --- a/types/src/traits/network.rs +++ b/types/src/traits/network.rs @@ -262,7 +262,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st async fn update_view<'a, TYPES>( &'a self, _view: u64, - _epoch: u64, + _epoch: Option, _membership: Arc>, ) where TYPES: NodeType + 'a, diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 77a4971e24..5ea5764bb0 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -183,8 +183,10 @@ pub trait ConsensusTime: fn genesis() -> Self { Self::new(0) } + /// Create a new instance of this time unit fn new(val: u64) -> Self; + /// Get the u64 format of time fn u64(&self) -> u64; } diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index f828d99ad1..60be1e517e 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -20,8 +20,8 @@ use super::node_implementation::NodeType; use crate::{ consensus::{CommitmentMap, View}, data::{ - DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare, - VidDisperseShare2, + DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, + QuorumProposalWrapper, VidDisperseShare, VidDisperseShare2, }, event::HotShotAction, message::{convert_proposal, Proposal}, @@ -71,6 +71,14 @@ pub trait Storage: Send + Sync + Clone { self.append_proposal(&convert_proposal(proposal.clone())) .await } + /// Add a proposal we sent to the store + async fn append_proposal_wrapper( + &self, + proposal: &Proposal>, + ) -> Result<()> { + self.append_proposal(&convert_proposal(proposal.clone())) + .await + } /// Record a HotShotAction taken. async fn record_action(&self, view: TYPES::View, action: HotShotAction) -> Result<()>; /// Update the current high QC in storage. diff --git a/types/src/utils.rs b/types/src/utils.rs index 3183b4a508..04a472d6a6 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -26,10 +26,14 @@ use serde::{Deserialize, Serialize}; use sha2::Digest; use tagged_base64::tagged; use typenum::Unsigned; +use vbs::version::StaticVersionType; use crate::{ data::Leaf2, - traits::{node_implementation::NodeType, ValidatedState}, + traits::{ + node_implementation::{ConsensusTime, NodeType, Versions}, + ValidatedState, + }, vid::VidCommitment, }; @@ -46,7 +50,7 @@ pub enum ViewInner { /// Payload commitment to the available block. payload_commitment: VidCommitment, /// An epoch to which the data belongs to. Relevant for validating against the correct stake table - epoch: TYPES::Epoch, + epoch: Option, }, /// Undecided view Leaf { @@ -57,7 +61,7 @@ pub enum ViewInner { /// Optional state delta. delta: Option>::Delta>>, /// An epoch to which the data belongs to. Relevant for validating against the correct stake table - epoch: TYPES::Epoch, + epoch: Option, }, /// Leaf has failed Failed, @@ -151,7 +155,8 @@ impl ViewInner { } /// Returns `Epoch` if possible - pub fn epoch(&self) -> Option { + /// #3967 REVIEW NOTE: This type is kinda ugly, should we Result> instead? + pub fn epoch(&self) -> Option> { match self { Self::Da { epoch, .. } | Self::Leaf { epoch, .. } => Some(*epoch), Self::Failed => None, @@ -252,6 +257,34 @@ pub fn epoch_from_block_number(block_number: u64, epoch_height: u64) -> u64 { } } +/// Returns an Option based on a boolean condition of whether or not epochs are enabled, a block number, +/// and the epoch height. If epochs are disabled or the epoch height is zero, returns None. +#[must_use] +pub fn option_epoch_from_block_number( + with_epoch: bool, + block_number: u64, + epoch_height: u64, +) -> Option { + if with_epoch { + if epoch_height == 0 { + None + } else if block_number % epoch_height == 0 { + Some(block_number / epoch_height) + } else { + Some(block_number / epoch_height + 1) + } + .map(TYPES::Epoch::new) + } else { + None + } +} + +/// Returns Some(0) if epochs are enabled by V::Base, otherwise returns None +#[must_use] +pub fn genesis_epoch_from_version() -> Option { + (V::Base::VERSION >= V::Epochs::VERSION).then(|| TYPES::Epoch::new(0)) +} + /// A function for generating a cute little user mnemonic from a hash #[must_use] pub fn mnemonic(bytes: H) -> String { diff --git a/types/src/vote.rs b/types/src/vote.rs index 20d36e67ee..41a7d72e01 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -85,26 +85,26 @@ pub trait Certificate: HasViewNumber { // TODO: Make this a static ratio of the total stake of `Membership` fn threshold>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> u64; /// Get Stake Table from Membership implementation. fn stake_table>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> Vec<::StakeTableEntry>; /// Get Total Nodes from Membership implementation. fn total_nodes>( membership: &MEMBERSHIP, - epoch: TYPES::Epoch, + epoch: Option, ) -> usize; /// Get `StakeTableEntry` from Membership implementation. fn stake_table_entry>( membership: &MEMBERSHIP, pub_key: &TYPES::SignatureKey, - epoch: TYPES::Epoch, + epoch: Option, ) -> Option<::StakeTableEntry>; /// Get the commitment which was voted on @@ -165,7 +165,7 @@ impl< &mut self, vote: &VOTE, membership: &Arc>, - epoch: TYPES::Epoch, + epoch: Option, ) -> Either<(), CERT> { let key = vote.signing_key(); From 648a40816ce599f0ecb277c5dd153a9aada33e3e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 08:40:17 -0500 Subject: [PATCH 1363/1393] Bump the all group with 2 updates (#4041) * Bump the all group with 2 updates Bumps the all group with 2 updates: [serde_json](https://github.com/serde-rs/json) and [cbor4ii](https://github.com/quininer/cbor4ii). Updates `serde_json` from 1.0.135 to 1.0.137 - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.135...v1.0.137) Updates `cbor4ii` from 0.3.3 to 1.0.0 - [Commits](https://github.com/quininer/cbor4ii/commits) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all - dependency-name: cbor4ii dependency-type: direct:production update-type: version-update:semver-major dependency-group: all ... Signed-off-by: dependabot[bot] * fix --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> --- libp2p-networking/Cargo.toml | 2 +- libp2p-networking/src/network/cbor.rs | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index 83856af400..ea9890c15f 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -19,7 +19,7 @@ async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } blake3 = { workspace = true } -cbor4ii = "0.3" +cbor4ii = { workspace = true } delegate = "0.13" derive_builder = "0.20" derive_more = { workspace = true } diff --git a/libp2p-networking/src/network/cbor.rs b/libp2p-networking/src/network/cbor.rs index 4a5685624b..a8ca6afedf 100644 --- a/libp2p-networking/src/network/cbor.rs +++ b/libp2p-networking/src/network/cbor.rs @@ -125,15 +125,17 @@ where fn decode_into_io_error(err: cbor4ii::serde::DecodeError) -> io::Error { match err { cbor4ii::serde::DecodeError::Core(DecodeError::Read(e)) => { - io::Error::new(io::ErrorKind::Other, e) + io::Error::new(io::ErrorKind::Other, e.to_string()) } cbor4ii::serde::DecodeError::Core(e @ DecodeError::Unsupported { .. }) => { - io::Error::new(io::ErrorKind::Unsupported, e) + io::Error::new(io::ErrorKind::Unsupported, e.to_string()) } cbor4ii::serde::DecodeError::Core(e @ DecodeError::Eof { .. }) => { - io::Error::new(io::ErrorKind::UnexpectedEof, e) + io::Error::new(io::ErrorKind::UnexpectedEof, e.to_string()) + } + cbor4ii::serde::DecodeError::Core(e) => { + io::Error::new(io::ErrorKind::InvalidData, e.to_string()) } - cbor4ii::serde::DecodeError::Core(e) => io::Error::new(io::ErrorKind::InvalidData, e), cbor4ii::serde::DecodeError::Custom(e) => { io::Error::new(io::ErrorKind::Other, e.to_string()) } From 211752445738828560fd22fda77c4ff68fd60fbe Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 20 Jan 2025 15:06:21 -0500 Subject: [PATCH 1364/1393] Partial integration test infrastructure refactor (#4035) --- example-types/src/node_types.rs | 12 + macros/src/lib.rs | 2 +- task-impls/src/upgrade.rs | 22 +- testing/src/helpers.rs | 23 +- testing/src/test_builder.rs | 241 ++++++++++-------- testing/src/test_launcher.rs | 29 ++- testing/src/test_runner.rs | 20 +- testing/tests/tests_1/libp2p.rs | 18 +- testing/tests/tests_1/network_task.rs | 20 +- testing/tests/tests_1/test_success.rs | 22 +- testing/tests/tests_1/test_with_failures_2.rs | 21 +- testing/tests/tests_2/catchup.rs | 63 ++--- testing/tests/tests_3/byzantine_tests.rs | 39 ++- .../tests_3/test_with_failures_half_f.rs | 4 +- testing/tests/tests_4/test_marketplace.rs | 36 ++- .../tests_4/test_with_builder_failures.rs | 4 +- testing/tests/tests_4/test_with_failures_f.rs | 4 +- testing/tests/tests_5/combined_network.rs | 37 ++- testing/tests/tests_5/push_cdn.rs | 8 +- testing/tests/tests_5/test_with_failures.rs | 4 +- testing/tests/tests_5/timeout.rs | 18 +- testing/tests/tests_5/unreliable_network.rs | 60 +++-- testing/tests/tests_6/test_epochs.rs | 76 +++--- types/src/constants.rs | 32 +-- types/src/traits/node_implementation.rs | 4 + types/src/upgrade_config.rs | 15 ++ 26 files changed, 458 insertions(+), 376 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index e26fa788af..40b79e0b46 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -21,9 +21,11 @@ use hotshot::traits::{ NodeImplementation, }; use hotshot_types::{ + constants::TEST_UPGRADE_CONSTANTS, data::{EpochNumber, ViewNumber}, signature_key::{BLSPubKey, BuilderKey}, traits::node_implementation::{NodeType, Versions}, + upgrade_config::UpgradeConstants, }; use serde::{Deserialize, Serialize}; use vbs::version::StaticVersion; @@ -52,6 +54,8 @@ use crate::{ /// to select our traits pub struct TestTypes; impl NodeType for TestTypes { + const UPGRADE_CONSTANTS: UpgradeConstants = TEST_UPGRADE_CONSTANTS; + type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -82,6 +86,8 @@ impl NodeType for TestTypes { /// to select our traits pub struct TestTypesRandomizedLeader; impl NodeType for TestTypesRandomizedLeader { + const UPGRADE_CONSTANTS: UpgradeConstants = TEST_UPGRADE_CONSTANTS; + type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -115,6 +121,8 @@ pub struct TestTypesRandomizedCommitteeMembers { } impl NodeType for TestTypesRandomizedCommitteeMembers { + const UPGRADE_CONSTANTS: UpgradeConstants = TEST_UPGRADE_CONSTANTS; + type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -146,6 +154,8 @@ impl NodeType for TestTypesRandomizedCommitteeMember /// to select our traits pub struct TestConsecutiveLeaderTypes; impl NodeType for TestConsecutiveLeaderTypes { + const UPGRADE_CONSTANTS: UpgradeConstants = TEST_UPGRADE_CONSTANTS; + type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; @@ -176,6 +186,8 @@ impl NodeType for TestConsecutiveLeaderTypes { /// to select our traits pub struct TestTwoStakeTablesTypes; impl NodeType for TestTwoStakeTablesTypes { + const UPGRADE_CONSTANTS: UpgradeConstants = TEST_UPGRADE_CONSTANTS; + type AuctionResult = TestAuctionResult; type View = ViewNumber; type Epoch = EpochNumber; diff --git a/macros/src/lib.rs b/macros/src/lib.rs index c409cb6b58..3608ef6da1 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -206,7 +206,7 @@ impl TestData { async fn #test_name() { hotshot::helpers::initialize_logging(); - hotshot_testing::test_builder::TestDescription::<#ty, #imply, #version>::gen_launcher((#metadata), 0).launch().run_test::<#builder_impl>().await; + hotshot_testing::test_builder::TestDescription::<#ty, #imply, #version>::gen_launcher((#metadata)).launch().run_test::<#builder_impl>().await; } } } diff --git a/task-impls/src/upgrade.rs b/task-impls/src/upgrade.rs index 591d04c9f0..008ccfde4b 100644 --- a/task-impls/src/upgrade.rs +++ b/task-impls/src/upgrade.rs @@ -12,10 +12,6 @@ use async_trait::async_trait; use committable::Committable; use hotshot_task::task::TaskState; use hotshot_types::{ - constants::{ - UPGRADE_BEGIN_OFFSET, UPGRADE_DECIDE_BY_OFFSET, UPGRADE_FINISH_OFFSET, - UPGRADE_PROPOSE_OFFSET, - }, data::UpgradeProposal, event::{Event, EventType}, message::{Proposal, UpgradeLock}, @@ -267,7 +263,7 @@ impl UpgradeTaskState { .as_secs(); let leader = self.membership.read().await.leader( - TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), + TYPES::View::new(view + TYPES::UPGRADE_CONSTANTS.propose_offset), self.cur_epoch, )?; @@ -283,14 +279,22 @@ impl UpgradeTaskState { old_version: V::Base::VERSION, new_version: V::Upgrade::VERSION, new_version_hash: V::UPGRADE_HASH.to_vec(), - old_version_last_view: TYPES::View::new(view + UPGRADE_BEGIN_OFFSET), - new_version_first_view: TYPES::View::new(view + UPGRADE_FINISH_OFFSET), - decide_by: TYPES::View::new(view + UPGRADE_DECIDE_BY_OFFSET), + old_version_last_view: TYPES::View::new( + view + TYPES::UPGRADE_CONSTANTS.begin_offset, + ), + new_version_first_view: TYPES::View::new( + view + TYPES::UPGRADE_CONSTANTS.finish_offset, + ), + decide_by: TYPES::View::new( + view + TYPES::UPGRADE_CONSTANTS.decide_by_offset, + ), }; let upgrade_proposal = UpgradeProposal { upgrade_proposal: upgrade_proposal_data.clone(), - view_number: TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), + view_number: TYPES::View::new( + view + TYPES::UPGRADE_CONSTANTS.propose_offset, + ), }; let signature = TYPES::SignatureKey::sign( diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index bbabd4c100..b6d0f7a1cb 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -64,10 +64,11 @@ pub async fn build_system_handle< Sender>>, Receiver>>, ) { - let mut builder: TestDescription = TestDescription::default_multiple_rounds(); - builder.epoch_height = 0; + let builder: TestDescription = TestDescription::default_multiple_rounds(); - let launcher = builder.gen_launcher(node_id); + let launcher = builder.gen_launcher().map_hotshot_config(|hotshot_config| { + hotshot_config.epoch_height = 0; + }); build_system_handle_from_launcher(node_id, &launcher).await } @@ -90,10 +91,10 @@ pub async fn build_system_handle_from_launcher< Sender>>, Receiver>>, ) { - let network = (launcher.resource_generator.channel_generator)(node_id).await; - let storage = (launcher.resource_generator.storage)(node_id); - let marketplace_config = (launcher.resource_generator.marketplace_config)(node_id); - let config = launcher.resource_generator.config.clone(); + let network = (launcher.resource_generators.channel_generator)(node_id).await; + let storage = (launcher.resource_generators.storage)(node_id); + let marketplace_config = (launcher.resource_generators.marketplace_config)(node_id); + let hotshot_config = (launcher.resource_generators.hotshot_config)(node_id); let initializer = HotShotInitializer::::from_genesis::(TestInstanceState::new( launcher.metadata.async_delay_config.clone(), @@ -102,7 +103,7 @@ pub async fn build_system_handle_from_launcher< .unwrap(); // See whether or not we should be DA - let is_da = node_id < config.da_staked_committee_size as u64; + let is_da = node_id < hotshot_config.da_staked_committee_size as u64; // We assign node's public key and stake value rather than read from config file since it's a test let validator_config: ValidatorConfig = @@ -111,15 +112,15 @@ pub async fn build_system_handle_from_launcher< let public_key = validator_config.public_key.clone(); let memberships = Arc::new(RwLock::new(TYPES::Membership::new( - config.known_nodes_with_stake.clone(), - config.known_da_nodes.clone(), + hotshot_config.known_nodes_with_stake.clone(), + hotshot_config.known_da_nodes.clone(), ))); SystemContext::init( public_key, private_key, node_id, - config, + hotshot_config, memberships, network, initializer, diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 266dc39b7e..6e35b0204c 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -21,7 +21,7 @@ use hotshot_example_types::{ use hotshot_types::{ consensus::ConsensusMetricsValue, traits::node_implementation::{NodeType, Versions}, - HotShotConfig, ValidatorConfig, + HotShotConfig, PeerConfig, ValidatorConfig, }; use tide_disco::Url; use vec1::Vec1; @@ -55,20 +55,75 @@ pub struct TimingData { pub view_sync_timeout: Duration, } +pub fn default_hotshot_config( + known_nodes_with_stake: Vec>, + known_da_nodes: Vec>, + num_bootstrap_nodes: usize, + epoch_height: u64, +) -> HotShotConfig { + HotShotConfig { + start_threshold: (1, 1), + num_nodes_with_stake: NonZeroUsize::new(known_nodes_with_stake.len()).unwrap(), + known_da_nodes: known_da_nodes.clone(), + num_bootstrap: num_bootstrap_nodes, + known_nodes_with_stake: known_nodes_with_stake.clone(), + da_staked_committee_size: known_da_nodes.len(), + fixed_leader_for_gpuvid: 1, + next_view_timeout: 500, + view_sync_timeout: Duration::from_millis(250), + builder_timeout: Duration::from_millis(1000), + data_request_delay: Duration::from_millis(200), + // Placeholder until we spin up the builder + builder_urls: vec1::vec1![Url::parse("http://localhost:9999").expect("Valid URL")], + start_proposing_view: u64::MAX, + stop_proposing_view: 0, + start_voting_view: u64::MAX, + stop_voting_view: 0, + start_proposing_time: u64::MAX, + stop_proposing_time: 0, + start_voting_time: u64::MAX, + stop_voting_time: 0, + epoch_height, + } +} + +#[allow(clippy::type_complexity)] +pub fn gen_node_lists( + num_staked_nodes: u64, + num_da_nodes: u64, +) -> ( + Vec>, + Vec>, +) { + let mut staked_nodes = Vec::new(); + let mut da_nodes = Vec::new(); + + for n in 0..num_staked_nodes { + let validator_config: ValidatorConfig = + ValidatorConfig::generated_from_seed_indexed([0u8; 32], n, 1, n < num_da_nodes); + + let peer_config = validator_config.public_config(); + staked_nodes.push(peer_config.clone()); + + if n < num_da_nodes { + da_nodes.push(peer_config) + } + } + + (staked_nodes, da_nodes) +} + /// metadata describing a test #[derive(Clone)] pub struct TestDescription, V: Versions> { - /// Total number of staked nodes in the test - pub num_nodes_with_stake: usize, - /// nodes available at start - pub start_nodes: usize, + /// `HotShotConfig` used for setting up the test infrastructure. + /// + /// Note: this is not the same as the `HotShotConfig` passed to test nodes for `SystemContext::init`; + /// those configs are instead provided by the resource generators in the test launcher. + pub test_config: HotShotConfig, /// Whether to skip initializing nodes that will start late, which will catch up later with /// `HotShotInitializer::from_reload` in the spinning task. pub skip_late: bool, - /// number of bootstrap nodes (libp2p usage only) - pub num_bootstrap_nodes: usize, - /// Size of the staked DA committee for the test - pub da_staked_committee_size: usize, /// overall safety property description pub overall_safety_properties: OverallSafetyPropertiesDescription, /// spinning properties @@ -99,8 +154,6 @@ pub struct TestDescription, V: Ver pub start_solver: bool, /// boxed closure used to validate the resulting transactions pub validate_transactions: TransactionValidator, - /// Number of blocks in an epoch, zero means there are no epochs - pub epoch_height: u64, } pub fn nonempty_block_threshold(threshold: (u64, u64)) -> TransactionValidator { @@ -300,9 +353,6 @@ impl, V: Versions> TestDescription let num_nodes_with_stake = 100; Self { - num_bootstrap_nodes: num_nodes_with_stake, - num_nodes_with_stake, - start_nodes: num_nodes_with_stake, overall_safety_properties: OverallSafetyPropertiesDescription:: { num_successful_views: 50, check_leaf: true, @@ -327,9 +377,6 @@ impl, V: Versions> TestDescription pub fn default_multiple_rounds() -> Self { let num_nodes_with_stake = 10; TestDescription:: { - num_bootstrap_nodes: num_nodes_with_stake, - num_nodes_with_stake, - start_nodes: num_nodes_with_stake, overall_safety_properties: OverallSafetyPropertiesDescription:: { num_successful_views: 20, check_leaf: true, @@ -352,14 +399,21 @@ impl, V: Versions> TestDescription #[allow(clippy::redundant_field_names)] pub fn default_more_nodes() -> Self { let num_nodes_with_stake = 20; + let num_da_nodes = 14; + let epoch_height = 10; + + let (staked_nodes, da_nodes) = gen_node_lists::(num_nodes_with_stake, num_da_nodes); + Self { - num_nodes_with_stake, - start_nodes: num_nodes_with_stake, - num_bootstrap_nodes: num_nodes_with_stake, + test_config: default_hotshot_config::( + staked_nodes, + da_nodes, + num_nodes_with_stake.try_into().unwrap(), + epoch_height, + ), // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. - da_staked_committee_size: 14, completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { // Increase the duration to get the expected number of successful views. @@ -373,10 +427,29 @@ impl, V: Versions> TestDescription next_view_timeout: 5000, ..TimingData::default() }, - view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), + view_sync_properties: ViewSyncTaskDescription::Threshold( + 0, + num_nodes_with_stake.try_into().unwrap(), + ), ..Self::default() } } + + pub fn set_num_nodes(self, num_nodes: u64, num_da_nodes: u64) -> Self { + assert!(num_da_nodes <= num_nodes, "Cannot build test with fewer DA than total nodes. You may have mixed up the arguments to the function"); + + let (staked_nodes, da_nodes) = gen_node_lists::(num_nodes, num_da_nodes); + + Self { + test_config: default_hotshot_config::( + staked_nodes, + da_nodes, + self.test_config.num_bootstrap, + self.test_config.epoch_height, + ), + ..self + } + } } impl, V: Versions> Default @@ -386,14 +459,20 @@ impl, V: Versions> Default #[allow(clippy::redundant_field_names)] fn default() -> Self { let num_nodes_with_stake = 7; + let num_da_nodes = num_nodes_with_stake; + let epoch_height = 10; + + let (staked_nodes, da_nodes) = gen_node_lists::(num_nodes_with_stake, num_da_nodes); + Self { - epoch_height: 10, + test_config: default_hotshot_config::( + staked_nodes, + da_nodes, + num_nodes_with_stake.try_into().unwrap(), + epoch_height, + ), timing_data: TimingData::default(), - num_nodes_with_stake, - start_nodes: num_nodes_with_stake, skip_late: false, - num_bootstrap_nodes: num_nodes_with_stake, - da_staked_committee_size: num_nodes_with_stake, spinning_properties: SpinningTaskDescription { node_changes: vec![], }, @@ -407,7 +486,10 @@ impl, V: Versions> Default }, ), unreliable_network: None, - view_sync_properties: ViewSyncTaskDescription::Threshold(0, num_nodes_with_stake), + view_sync_properties: ViewSyncTaskDescription::Threshold( + 0, + num_nodes_with_stake.try_into().unwrap(), + ), builders: vec1::vec1![BuilderDescription::default(), BuilderDescription::default(),], fallback_builder: BuilderDescription::default(), solver: FakeSolverApiDescription { @@ -435,8 +517,8 @@ where /// a [`TestLauncher`] that can be used to launch the test. /// # Panics /// if some of the configuration values are zero - pub fn gen_launcher(self, node_id: u64) -> TestLauncher { - self.gen_launcher_with_tasks(node_id, vec![]) + pub fn gen_launcher(self) -> TestLauncher { + self.gen_launcher_with_tasks(vec![]) } /// turn a description of a test (e.g. a [`TestDescription`]) into @@ -447,72 +529,30 @@ where #[must_use] pub fn gen_launcher_with_tasks( self, - node_id: u64, additional_test_tasks: Vec>>, ) -> TestLauncher { let TestDescription { - num_nodes_with_stake, - num_bootstrap_nodes, timing_data, - da_staked_committee_size, unreliable_network, + test_config, .. } = self.clone(); - let mut known_da_nodes = Vec::new(); - - // We assign known_nodes' public key and stake value here rather than read from config file since it's a test. - let known_nodes_with_stake = (0..num_nodes_with_stake) - .map(|node_id_| { - let cur_validator_config: ValidatorConfig = - ValidatorConfig::generated_from_seed_indexed( - [0u8; 32], - node_id_ as u64, - 1, - node_id_ < da_staked_committee_size, - ); - - // Add the node to the known DA nodes based on the index (for tests) - if node_id_ < da_staked_committee_size { - known_da_nodes.push(cur_validator_config.public_config()); - } - - cur_validator_config.public_config() - }) - .collect(); - // But now to test validator's config, we input the info of my_own_validator from config file when node_id == 0. - let validator_config = ValidatorConfig::::generated_from_seed_indexed( - [0u8; 32], - node_id, - 1, - // This is the config for node 0 - 0 < da_staked_committee_size, - ); - let config = HotShotConfig { - start_threshold: (1, 1), - num_nodes_with_stake: NonZeroUsize::new(num_nodes_with_stake).unwrap(), - // Currently making this zero for simplicity - known_da_nodes, - num_bootstrap: num_bootstrap_nodes, - known_nodes_with_stake, - da_staked_committee_size, - fixed_leader_for_gpuvid: 1, - next_view_timeout: 500, - view_sync_timeout: Duration::from_millis(250), - builder_timeout: Duration::from_millis(1000), - data_request_delay: Duration::from_millis(200), - // Placeholder until we spin up the builder - builder_urls: vec1::vec1![Url::parse("http://localhost:9999").expect("Valid URL")], - start_proposing_view: u64::MAX, - stop_proposing_view: 0, - start_voting_view: u64::MAX, - stop_voting_view: 0, - start_proposing_time: u64::MAX, - stop_proposing_time: 0, - start_voting_time: u64::MAX, - stop_voting_time: 0, - epoch_height: self.epoch_height, - }; + let num_nodes_with_stake = test_config.num_nodes_with_stake.into(); + let num_bootstrap_nodes = test_config.num_bootstrap; + let da_staked_committee_size = test_config.da_staked_committee_size; + + let validator_config = Rc::new(move |node_id| { + ValidatorConfig::::generated_from_seed_indexed( + [0u8; 32], + node_id, + 1, + // This is the config for node 0 + node_id < test_config.da_staked_committee_size.try_into().unwrap(), + ) + }); + + let hotshot_config = Rc::new(move |_| test_config.clone()); let TimingData { next_view_timeout, builder_timeout, @@ -520,18 +560,17 @@ where secondary_network_delay, view_sync_timeout, } = timing_data; - let mod_config = - // TODO this should really be using the timing config struct - |a: &mut HotShotConfig| { - a.next_view_timeout = next_view_timeout; - a.builder_timeout = builder_timeout; - a.data_request_delay = data_request_delay; - a.view_sync_timeout = view_sync_timeout; - }; + // TODO this should really be using the timing config struct + let mod_hotshot_config = move |hotshot_config: &mut HotShotConfig| { + hotshot_config.next_view_timeout = next_view_timeout; + hotshot_config.builder_timeout = builder_timeout; + hotshot_config.data_request_delay = data_request_delay; + hotshot_config.view_sync_timeout = view_sync_timeout; + }; let metadata = self.clone(); TestLauncher { - resource_generator: ResourceGenerators { + resource_generators: ResourceGenerators { channel_generator: >::gen_networks( num_nodes_with_stake, num_bootstrap_nodes, @@ -539,15 +578,15 @@ where unreliable_network, secondary_network_delay, ), - storage: Box::new(move |_| { + storage: Rc::new(move |_| { let mut storage = TestStorage::::default(); // update storage impl to use settings delay option storage.delay_config = metadata.async_delay_config.clone(); storage }), - config, + hotshot_config, validator_config, - marketplace_config: Box::new(|_| MarketplaceConfig:: { + marketplace_config: Rc::new(|_| MarketplaceConfig:: { auction_results_provider: TestAuctionResultsProvider::::default().into(), fallback_builder_url: Url::parse("http://localhost:9999").unwrap(), }), @@ -555,6 +594,6 @@ where metadata: self, additional_test_tasks, } - .modify_default_config(mod_config) + .map_hotshot_config(mod_hotshot_config) } } diff --git a/testing/src/test_launcher.rs b/testing/src/test_launcher.rs index 8edffa33a8..35c77d64de 100644 --- a/testing/src/test_launcher.rs +++ b/testing/src/test_launcher.rs @@ -4,7 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashMap, marker::PhantomData, sync::Arc}; +use std::{collections::HashMap, marker::PhantomData, rc::Rc, sync::Arc}; use hotshot::{ traits::{NodeImplementation, TestableNodeImplementation}, @@ -26,7 +26,7 @@ use crate::test_task::TestTaskStateSeed; pub type Network = Arc<>::Network>; /// Wrapper for a function that takes a `node_id` and returns an instance of `T`. -pub type Generator = Box T + 'static>; +pub type Generator = Rc T>; /// generators for resources used by each node pub struct ResourceGenerators> { @@ -35,9 +35,9 @@ pub struct ResourceGenerators>, /// configuration used to generate each hotshot node - pub config: HotShotConfig, + pub hotshot_config: Generator>, /// config that contains the signature keys - pub validator_config: ValidatorConfig, + pub validator_config: Generator>, /// generate a new marketplace config for each node pub marketplace_config: Generator>, } @@ -45,7 +45,7 @@ pub struct ResourceGenerators, V: Versions> { /// generator for resources - pub resource_generator: ResourceGenerators, + pub resource_generators: ResourceGenerators, /// metadata used for tasks pub metadata: TestDescription, /// any additional test tasks to run @@ -67,11 +67,24 @@ impl, V: Versions> TestLau } /// Modifies the config used when generating nodes with `f` #[must_use] - pub fn modify_default_config( + pub fn map_hotshot_config( mut self, - mut f: impl FnMut(&mut HotShotConfig), + f: impl Fn(&mut HotShotConfig) + 'static, ) -> Self { - f(&mut self.resource_generator.config); + let mut test_config = self.metadata.test_config.clone(); + f(&mut test_config); + + let hotshot_config_generator = self.resource_generators.hotshot_config.clone(); + let hotshot_config: Generator<_> = Rc::new(move |node_id| { + let mut result = (hotshot_config_generator)(node_id); + f(&mut result); + + result + }); + + self.metadata.test_config = test_config; + self.resource_generators.hotshot_config = hotshot_config; + self } } diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 6e8d21014b..95fd60494d 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -112,7 +112,11 @@ where } self.add_nodes::( - self.launcher.metadata.num_nodes_with_stake, + self.launcher + .metadata + .test_config + .num_nodes_with_stake + .into(), &late_start_nodes, &restart_nodes, ) @@ -193,7 +197,7 @@ where next_epoch_high_qc: None, async_delay_config: launcher.metadata.async_delay_config, restart_contexts: HashMap::new(), - channel_generator: launcher.resource_generator.channel_generator, + channel_generator: launcher.resource_generators.channel_generator, }; let spinning_task = TestTask::>::new( spinning_task_state, @@ -203,7 +207,7 @@ where // add safety task let overall_safety_task_state = OverallSafetyTask { handles: Arc::clone(&handles), - epoch_height: launcher.metadata.epoch_height, + epoch_height: launcher.metadata.test_config.epoch_height, ctx: RoundCtx::default(), properties: launcher.metadata.overall_safety_properties.clone(), error: None, @@ -328,7 +332,7 @@ where &self, num_nodes: usize, ) -> (Vec>>, Vec, Url) { - let config = self.launcher.resource_generator.config.clone(); + let config = self.launcher.metadata.test_config.clone(); let mut builder_tasks = Vec::new(); let mut builder_urls = Vec::new(); for metadata in &self.launcher.metadata.builders { @@ -399,7 +403,7 @@ where restart: &HashSet, ) -> Vec { let mut results = vec![]; - let config = self.launcher.resource_generator.config.clone(); + let config = self.launcher.metadata.test_config.clone(); // TODO This is only a workaround. Number of nodes changes from epoch to epoch. Builder should be made epoch-aware. let temp_memberships = ::Membership::new( @@ -438,10 +442,10 @@ where .try_into() .expect("Non-empty by construction"); - let network = (self.launcher.resource_generator.channel_generator)(node_id).await; - let storage = (self.launcher.resource_generator.storage)(node_id); + let network = (self.launcher.resource_generators.channel_generator)(node_id).await; + let storage = (self.launcher.resource_generators.storage)(node_id); let mut marketplace_config = - (self.launcher.resource_generator.marketplace_config)(node_id); + (self.launcher.resource_generators.marketplace_config)(node_id); if let Some(solver_server) = &self.solver_server { let mut new_auction_results_provider = marketplace_config.auction_results_provider.as_ref().clone(); diff --git a/testing/tests/tests_1/libp2p.rs b/testing/tests/tests_1/libp2p.rs index 3a267651bc..e0886b2c59 100644 --- a/testing/tests/tests_1/libp2p.rs +++ b/testing/tests/tests_1/libp2p.rs @@ -23,7 +23,7 @@ use tracing::instrument; async fn libp2p_network() { hotshot::helpers::initialize_logging(); - let metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -37,12 +37,12 @@ async fn libp2p_network() { next_view_timeout: 4000, ..Default::default() }, - epoch_height: 0, ..TestDescription::default_multiple_rounds() }; + metadata.test_config.epoch_height = 0; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -69,9 +69,10 @@ async fn libp2p_network_failures_2() { next_view_timeout: 4000, ..Default::default() }, - epoch_height: 0, ..TestDescription::default_multiple_rounds() - }; + } + .set_num_nodes(12, 12); + metadata.test_config.epoch_height = 0; let dead_nodes = vec![ChangeNode { idx: 11, @@ -81,16 +82,13 @@ async fn libp2p_network_failures_2() { metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(3, dead_nodes)], }; - metadata.num_nodes_with_stake = 12; - metadata.da_staked_committee_size = 12; - metadata.start_nodes = 12; // 2 nodes fail triggering view sync, expect no other timeouts metadata.overall_safety_properties.num_failed_views = 1; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 15; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -107,7 +105,7 @@ async fn test_stress_libp2p_network() { let metadata: TestDescription = TestDescription::default_stress(); metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 7e391c1024..63892d0a00 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -46,14 +46,14 @@ async fn test_network_task() { let handle = build_system_handle::(node_id) .await .0; - let launcher = builder.gen_launcher(node_id); + let launcher = builder.gen_launcher(); - let network = (launcher.resource_generator.channel_generator)(node_id).await; + let network = (launcher.resource_generators.channel_generator)(node_id).await; - let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); + let storage = Arc::new(RwLock::new((launcher.resource_generators.storage)(node_id))); let consensus = OuterConsensus::new(handle.hotshot.consensus()); - let config = launcher.resource_generator.config.clone(); - let validator_config = launcher.resource_generator.validator_config.clone(); + let config = (launcher.resource_generators.hotshot_config)(node_id); + let validator_config = (launcher.resource_generators.validator_config)(node_id); let public_key = validator_config.public_key; let all_nodes = config.known_nodes_with_stake.clone(); @@ -125,15 +125,15 @@ async fn test_network_storage_fail() { let handle = build_system_handle::(node_id) .await .0; - let launcher = builder.gen_launcher(node_id); + let launcher = builder.gen_launcher(); - let network = (launcher.resource_generator.channel_generator)(node_id).await; + let network = (launcher.resource_generators.channel_generator)(node_id).await; let consensus = OuterConsensus::new(handle.hotshot.consensus()); - let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); + let storage = Arc::new(RwLock::new((launcher.resource_generators.storage)(node_id))); storage.write().await.should_return_err = true; - let config = launcher.resource_generator.config.clone(); - let validator_config = launcher.resource_generator.validator_config.clone(); + let config = (launcher.resource_generators.hotshot_config)(node_id); + let validator_config = (launcher.resource_generators.validator_config)(node_id); let public_key = validator_config.public_key; let all_nodes = config.known_nodes_with_stake.clone(); let upgrade_lock = UpgradeLock::::new(); diff --git a/testing/tests/tests_1/test_success.rs b/testing/tests/tests_1/test_success.rs index 55608a41d1..c4ca20b9de 100644 --- a/testing/tests/tests_1/test_success.rs +++ b/testing/tests/tests_1/test_success.rs @@ -28,16 +28,19 @@ cross_tests!( Versions: [TestVersions], Ignore: false, Metadata: { - TestDescription { + let mut metadata = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { duration: Duration::from_secs(60), }, ), - epoch_height: 0, ..TestDescription::default() - } + }; + + metadata.test_config.epoch_height = 0; + + metadata }, ); @@ -55,9 +58,9 @@ cross_tests!( duration: Duration::from_secs(60), }, ), - epoch_height: 0, ..TestDescription::default() }; + metadata.test_config.epoch_height = 0; metadata.overall_safety_properties.num_failed_views = 0; metadata.overall_safety_properties.num_successful_views = 0; @@ -88,9 +91,9 @@ cross_tests!( duration: Duration::from_secs(60), }, ), - epoch_height: 0, ..TestDescription::default() }; + metadata.test_config.epoch_height = 0; metadata.overall_safety_properties.num_failed_views = 0; metadata.overall_safety_properties.num_successful_views = 10; @@ -122,12 +125,9 @@ cross_tests!( Versions: [TestVersions], Ignore: false, Metadata: { - let mut metadata = TestDescription::default_more_nodes(); - metadata.epoch_height = 0; - metadata.num_bootstrap_nodes = 10; - metadata.num_nodes_with_stake = 12; - metadata.da_staked_committee_size = 12; - metadata.start_nodes = 12; + let mut metadata = TestDescription::default_more_nodes().set_num_nodes(12,12); + metadata.test_config.epoch_height = 0; + metadata.test_config.num_bootstrap = 10; metadata.overall_safety_properties.num_failed_views = 0; diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/testing/tests/tests_1/test_with_failures_2.rs index aa4721e696..f9c7f860b5 100644 --- a/testing/tests/tests_1/test_with_failures_2.rs +++ b/testing/tests/tests_1/test_with_failures_2.rs @@ -41,12 +41,9 @@ cross_tests!( Versions: [TestVersions], Ignore: false, Metadata: { - let mut metadata = TestDescription::default_more_nodes(); - metadata.epoch_height = 0; - metadata.num_bootstrap_nodes = 10; - metadata.num_nodes_with_stake = 12; - metadata.da_staked_committee_size = 12; - metadata.start_nodes = 12; + let mut metadata = TestDescription::default_more_nodes().set_num_nodes(12,12); + metadata.test_config.epoch_height = 0; + metadata.test_config.num_bootstrap = 10; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -81,12 +78,10 @@ cross_tests!( Versions: [TestVersions], Ignore: false, Metadata: { - let mut metadata = TestDescription::default_more_nodes(); - metadata.epoch_height = 0; - metadata.num_bootstrap_nodes = 10; - metadata.num_nodes_with_stake = 12; - metadata.da_staked_committee_size = 12; - metadata.start_nodes = 12; + let mut metadata = TestDescription::default_more_nodes().set_num_nodes(12, 12); + metadata.test_config.epoch_height = 0; + metadata.test_config.num_bootstrap = 10; + let dead_nodes = vec![ ChangeNode { idx: 3, @@ -113,7 +108,7 @@ cross_tests!( metadata.overall_safety_properties.num_successful_views = 13; // only turning off 1 node, so expected should be num_nodes_with_stake - 1 - let expected_nodes_in_view_sync = metadata.num_nodes_with_stake - 1; + let expected_nodes_in_view_sync = 11; metadata.view_sync_properties = ViewSyncTaskDescription::Threshold(expected_nodes_in_view_sync, expected_nodes_in_view_sync); metadata diff --git a/testing/tests/tests_2/catchup.rs b/testing/tests/tests_2/catchup.rs index f012c22f2a..51b6459d90 100644 --- a/testing/tests/tests_2/catchup.rs +++ b/testing/tests/tests_2/catchup.rs @@ -39,16 +39,14 @@ async fn test_catchup() { ..Default::default() }; let mut metadata: TestDescription = - TestDescription::default(); + TestDescription::default().set_num_nodes(20, 7); let catchup_node = vec![ChangeNode { idx: 19, updown: NodeAction::Up, }]; - metadata.epoch_height = 0; + metadata.test_config.epoch_height = 0; metadata.timing_data = timing_data; - metadata.start_nodes = 19; - metadata.num_nodes_with_stake = 20; metadata.view_sync_properties = hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); @@ -72,7 +70,7 @@ async fn test_catchup() { }; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -99,15 +97,13 @@ async fn test_catchup_cdn() { ..Default::default() }; let mut metadata: TestDescription = - TestDescription::default(); + TestDescription::default().set_num_nodes(20, 7); let catchup_nodes = vec![ChangeNode { idx: 18, updown: NodeAction::Up, }]; - metadata.epoch_height = 0; + metadata.test_config.epoch_height = 0; metadata.timing_data = timing_data; - metadata.start_nodes = 19; - metadata.num_nodes_with_stake = 20; metadata.spinning_properties = SpinningTaskDescription { // Start the nodes before their leadership. @@ -126,7 +122,7 @@ async fn test_catchup_cdn() { }; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -153,15 +149,13 @@ async fn test_catchup_one_node() { ..Default::default() }; let mut metadata: TestDescription = - TestDescription::default(); + TestDescription::default().set_num_nodes(20, 7); let catchup_nodes = vec![ChangeNode { idx: 18, updown: NodeAction::Up, }]; - metadata.epoch_height = 0; + metadata.test_config.epoch_height = 0; metadata.timing_data = timing_data; - metadata.start_nodes = 19; - metadata.num_nodes_with_stake = 20; metadata.spinning_properties = SpinningTaskDescription { // Start the nodes before their leadership. @@ -182,7 +176,7 @@ async fn test_catchup_one_node() { }; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -209,7 +203,7 @@ async fn test_catchup_in_view_sync() { ..Default::default() }; let mut metadata: TestDescription = - TestDescription::default(); + TestDescription::default().set_num_nodes(20, 7); let catchup_nodes = vec![ ChangeNode { idx: 18, @@ -221,10 +215,8 @@ async fn test_catchup_in_view_sync() { }, ]; - metadata.epoch_height = 0; + metadata.test_config.epoch_height = 0; metadata.timing_data = timing_data; - metadata.start_nodes = 18; - metadata.num_nodes_with_stake = 20; metadata.view_sync_properties = hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); @@ -244,7 +236,7 @@ async fn test_catchup_in_view_sync() { }; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -273,17 +265,15 @@ async fn test_catchup_reload() { ..Default::default() }; let mut metadata: TestDescription = - TestDescription::default(); + TestDescription::default().set_num_nodes(20, 7); let catchup_node = vec![ChangeNode { idx: 19, updown: NodeAction::Up, }]; - metadata.epoch_height = 0; + metadata.test_config.epoch_height = 0; metadata.timing_data = timing_data; - metadata.start_nodes = 19; metadata.skip_late = true; - metadata.num_nodes_with_stake = 20; metadata.view_sync_properties = hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); @@ -306,7 +296,7 @@ async fn test_catchup_reload() { }; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -323,7 +313,7 @@ cross_tests!( next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestDescription::default(); + let mut metadata = TestDescription::default().set_num_nodes(20,7); let mut catchup_nodes = vec![]; for i in 0..20 { @@ -334,9 +324,7 @@ cross_tests!( } metadata.timing_data = timing_data; - metadata.epoch_height = 0; - metadata.start_nodes = 20; - metadata.num_nodes_with_stake = 20; + metadata.test_config.epoch_height = 0; metadata.spinning_properties = SpinningTaskDescription { // Restart all the nodes in view 13 @@ -378,7 +366,7 @@ cross_tests!( ..Default::default() }; let mut metadata: TestDescription = - TestDescription::default(); + TestDescription::default().set_num_nodes(20,1); let mut catchup_nodes = vec![]; for i in 0..20 { @@ -389,12 +377,7 @@ cross_tests!( } metadata.timing_data = timing_data; - metadata.start_nodes = 20; - metadata.num_nodes_with_stake = 20; - metadata.epoch_height = 0; - - // Explicitly make the DA tiny to exaggerate a missing proposal. - metadata.da_staked_committee_size = 1; + metadata.test_config.epoch_height = 0; metadata.spinning_properties = SpinningTaskDescription { // Restart all the nodes in view 13 @@ -428,7 +411,7 @@ cross_tests!( Versions: [TestVersions], Ignore: false, Metadata: { - let mut metadata = TestDescription::default(); + let mut metadata = TestDescription::default().set_num_nodes(10,4); let mut down_da_nodes = vec![]; for i in 1..4 { @@ -451,12 +434,8 @@ cross_tests!( updown: NodeAction::RestartDown(0), }); - metadata.start_nodes = 10; - metadata.num_nodes_with_stake = 10; - metadata.epoch_height = 0; + metadata.test_config.epoch_height = 0; - // Explicitly make the DA small to simulate real network. - metadata.da_staked_committee_size = 4; metadata.spinning_properties = SpinningTaskDescription { // Restart all the nodes in view 13 diff --git a/testing/tests/tests_3/byzantine_tests.rs b/testing/tests/tests_3/byzantine_tests.rs index 0608927532..68f1ad3a5c 100644 --- a/testing/tests/tests_3/byzantine_tests.rs +++ b/testing/tests/tests_3/byzantine_tests.rs @@ -43,7 +43,7 @@ cross_tests!( _ => Behaviour::Standard, } }); - TestDescription { + let mut metadata = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { @@ -51,9 +51,11 @@ cross_tests!( }, ), behaviour, - epoch_height: 0, ..TestDescription::default() - } + }; + metadata.test_config.epoch_height = 0; + + metadata }, ); @@ -78,11 +80,10 @@ cross_tests!( }, ), behaviour, - num_nodes_with_stake: 12, - epoch_height: 0, ..TestDescription::default() - }; + }.set_num_nodes(12,12); + metadata.test_config.epoch_height = 0; metadata.overall_safety_properties.num_failed_views = 15; metadata }, @@ -117,12 +118,11 @@ cross_tests!( }, ), behaviour, - epoch_height: 0, ..TestDescription::default() - }; + }.set_num_nodes(5,5); + metadata.test_config.epoch_height = 0; metadata.overall_safety_properties.num_failed_views = 2; - metadata.num_nodes_with_stake = 5; metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ (ViewNumber::new(7), false), (ViewNumber::new(12), false) @@ -158,11 +158,10 @@ cross_tests!( }, ), behaviour, - epoch_height: 0, ..TestDescription::default() - }; + }.set_num_nodes(10,10); - metadata.num_nodes_with_stake = 10; + metadata.test_config.epoch_height = 0; metadata }, ); @@ -174,13 +173,13 @@ cross_tests!( Versions: [MarketplaceTestVersions], Ignore: false, Metadata: { - let nodes_count: usize = 10; + let nodes_count = 10; let behaviour = Rc::new(move |node_id| { let dishonest_voting = DishonestVoting { - view_increment: nodes_count as u64, + view_increment: nodes_count, modifier: Arc::new(move |_pk, message_kind, transmit_type: &mut TransmitType, membership: &::Membership| { if let MessageKind::Consensus(SequencingMessage::General(GeneralConsensusMessage::Vote(vote))) = message_kind { - *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64, None).unwrap()); + *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count, None).unwrap()); } else { {} } @@ -200,11 +199,10 @@ cross_tests!( }, ), behaviour, - epoch_height: 0, ..TestDescription::default() - }; + }.set_num_nodes(nodes_count, nodes_count); - metadata.num_nodes_with_stake = nodes_count; + metadata.test_config.epoch_height = 0; metadata }, ); @@ -242,13 +240,12 @@ cross_tests!( duration: Duration::from_secs(60), }, ), - epoch_height: 0, behaviour, ..TestDescription::default() - }; + }.set_num_nodes(10,10); + metadata.test_config.epoch_height = 0; metadata.overall_safety_properties.num_failed_views = 1; - metadata.num_nodes_with_stake = 10; metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ (ViewNumber::new(14), false), ]); diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/testing/tests/tests_3/test_with_failures_half_f.rs index b5fbdfd0a0..71eb77ab4f 100644 --- a/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/testing/tests/tests_3/test_with_failures_half_f.rs @@ -23,8 +23,8 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); - metadata.epoch_height = 0; - metadata.num_bootstrap_nodes = 17; + metadata.test_config.epoch_height = 0; + metadata.test_config.num_bootstrap = 17; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. diff --git a/testing/tests/tests_4/test_marketplace.rs b/testing/tests/tests_4/test_marketplace.rs index 02bb7fee5e..432db4a6ee 100644 --- a/testing/tests/tests_4/test_marketplace.rs +++ b/testing/tests/tests_4/test_marketplace.rs @@ -30,14 +30,13 @@ cross_tests!( Versions: [MarketplaceTestVersions], Ignore: false, Metadata: { - TestDescription { + let mut metadata = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { duration: Duration::from_secs(60), }, ), - epoch_height: 0, fallback_builder: BuilderDescription { changes: HashMap::from([(0, BuilderChange::Down)]) @@ -45,7 +44,11 @@ cross_tests!( validate_transactions: nonempty_block_limit((0,100)), start_solver: false, ..TestDescription::default() - } + }; + + metadata.test_config.epoch_height = 0; + + metadata }, ); @@ -59,18 +62,21 @@ cross_tests!( Versions: [MarketplaceUpgradeTestVersions], Ignore: false, Metadata: { - TestDescription { + let mut metadata = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { duration: Duration::from_secs(60), }, ), - epoch_height: 0, upgrade_view: Some(5), validate_transactions: nonempty_block_threshold((40,50)), ..TestDescription::default() - } + }; + + metadata.test_config.epoch_height = 0; + + metadata }, ); @@ -83,14 +89,13 @@ cross_tests!( Versions: [MarketplaceTestVersions], Ignore: false, Metadata: { - TestDescription { + let mut metadata = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { duration: Duration::from_secs(60), }, ), - epoch_height: 0, builders: vec1![ BuilderDescription { changes: HashMap::from([(0, BuilderChange::Down)]) @@ -101,7 +106,11 @@ cross_tests!( ], validate_transactions: nonempty_block_threshold((35,50)), ..TestDescription::default() - } + }; + + metadata.test_config.epoch_height = 0; + + metadata }, ); @@ -114,14 +123,13 @@ cross_tests!( Versions: [MarketplaceTestVersions], Ignore: false, Metadata: { - TestDescription { + let mut metadata = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { duration: Duration::from_secs(60), }, ), - epoch_height: 0, builders: vec1![ BuilderDescription { changes: HashMap::from([(0, BuilderChange::Down)]) @@ -134,6 +142,10 @@ cross_tests!( }, validate_transactions: nonempty_block_threshold((40,50)), ..TestDescription::default() - } + }; + + metadata.test_config.epoch_height = 0; + + metadata }, ); diff --git a/testing/tests/tests_4/test_with_builder_failures.rs b/testing/tests/tests_4/test_with_builder_failures.rs index 03430c41e4..6497294c75 100644 --- a/testing/tests/tests_4/test_with_builder_failures.rs +++ b/testing/tests/tests_4/test_with_builder_failures.rs @@ -23,14 +23,14 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_multiple_rounds(); - metadata.epoch_height = 0; + metadata.test_config.epoch_height = 0; // Every block should contain at least one transaction - builders are never offline // simultaneously metadata.overall_safety_properties.transaction_threshold = 1; // Generate a lot of transactions so that freshly restarted builders still have // transactions metadata.txn_description = TxnTaskDescription::RoundRobinTimeBased(Duration::from_millis(1)); - metadata.epoch_height = 0; + metadata.test_config.epoch_height = 0; // Two builders running as follows: // view 1st 2nd diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/testing/tests/tests_4/test_with_failures_f.rs index 04382e8344..1a2467ad6b 100644 --- a/testing/tests/tests_4/test_with_failures_f.rs +++ b/testing/tests/tests_4/test_with_failures_f.rs @@ -23,11 +23,11 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); - metadata.epoch_height = 0; + metadata.test_config.epoch_height = 0; metadata.overall_safety_properties.num_failed_views = 6; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 20; - metadata.num_bootstrap_nodes = 14; + metadata.test_config.num_bootstrap = 14; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. diff --git a/testing/tests/tests_5/combined_network.rs b/testing/tests/tests_5/combined_network.rs index e47bf9ad2f..57bff51daa 100644 --- a/testing/tests/tests_5/combined_network.rs +++ b/testing/tests/tests_5/combined_network.rs @@ -26,7 +26,7 @@ async fn test_combined_network() { hotshot::helpers::initialize_logging(); - let metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, @@ -42,12 +42,12 @@ async fn test_combined_network() { duration: Duration::from_secs(120), }, ), - epoch_height: 0, ..TestDescription::default_multiple_rounds() }; + metadata.test_config.epoch_height = 0; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -76,24 +76,24 @@ async fn test_combined_network_cdn_crash() { duration: Duration::from_secs(220), }, ), - epoch_height: 0, ..TestDescription::default_multiple_rounds() }; let mut all_nodes = vec![]; - for node in 0..metadata.num_nodes_with_stake { + for node in 0..metadata.test_config.num_nodes_with_stake.into() { all_nodes.push(ChangeNode { idx: node, updown: NodeAction::NetworkDown, }); } + metadata.test_config.epoch_height = 0; metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(5, all_nodes)], }; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -124,13 +124,12 @@ async fn test_combined_network_reup() { duration: Duration::from_secs(220), }, ), - epoch_height: 0, ..TestDescription::default_multiple_rounds() }; let mut all_down = vec![]; let mut all_up = vec![]; - for node in 0..metadata.num_nodes_with_stake { + for node in 0..metadata.test_config.num_nodes_with_stake.into() { all_down.push(ChangeNode { idx: node, updown: NodeAction::NetworkDown, @@ -141,12 +140,13 @@ async fn test_combined_network_reup() { }); } + metadata.test_config.epoch_height = 0; metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(13, all_up), (5, all_down)], }; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -176,24 +176,24 @@ async fn test_combined_network_half_dc() { duration: Duration::from_secs(220), }, ), - epoch_height: 0, ..TestDescription::default_multiple_rounds() }; let mut half = vec![]; - for node in 0..metadata.num_nodes_with_stake / 2 { + for node in 0..usize::from(metadata.test_config.num_nodes_with_stake) / 2 { half.push(ChangeNode { idx: node, updown: NodeAction::NetworkDown, }); } + metadata.test_config.epoch_height = 0; metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(5, half)], }; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -235,10 +235,6 @@ async fn test_stress_combined_network_fuzzy() { hotshot::helpers::initialize_logging(); let mut metadata: TestDescription = TestDescription { - num_bootstrap_nodes: 10, - num_nodes_with_stake: 20, - start_nodes: 20, - timing_data: TimingData { next_view_timeout: 10_000, ..Default::default() @@ -249,19 +245,20 @@ async fn test_stress_combined_network_fuzzy() { duration: Duration::from_secs(120), }, ), - epoch_height: 0, ..TestDescription::default_stress() - }; + } + .set_num_nodes(20, 20); + metadata.test_config.epoch_height = 0; metadata.spinning_properties = SpinningTaskDescription { node_changes: generate_random_node_changes( - metadata.num_nodes_with_stake, + metadata.test_config.num_nodes_with_stake.into(), metadata.overall_safety_properties.num_successful_views * 2, ), }; metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; diff --git a/testing/tests/tests_5/push_cdn.rs b/testing/tests/tests_5/push_cdn.rs index bac333ebb5..5d569da0c2 100644 --- a/testing/tests/tests_5/push_cdn.rs +++ b/testing/tests/tests_5/push_cdn.rs @@ -22,7 +22,7 @@ use tracing::instrument; async fn push_cdn_network() { hotshot::helpers::initialize_logging(); - let metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, ..Default::default() @@ -37,11 +37,13 @@ async fn push_cdn_network() { duration: Duration::from_secs(60), }, ), - epoch_height: 0, ..TestDescription::default() }; + + metadata.test_config.epoch_height = 0; + metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; diff --git a/testing/tests/tests_5/test_with_failures.rs b/testing/tests/tests_5/test_with_failures.rs index 349007d152..45ff52948b 100644 --- a/testing/tests/tests_5/test_with_failures.rs +++ b/testing/tests/tests_5/test_with_failures.rs @@ -24,8 +24,8 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); - metadata.epoch_height = 0; - metadata.num_bootstrap_nodes = 19; + metadata.test_config.epoch_height = 0; + metadata.test_config.num_bootstrap = 19; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. diff --git a/testing/tests/tests_5/timeout.rs b/testing/tests/tests_5/timeout.rs index 54f6e267d2..19fed7605d 100644 --- a/testing/tests/tests_5/timeout.rs +++ b/testing/tests/tests_5/timeout.rs @@ -25,16 +25,15 @@ async fn test_timeout() { }; let mut metadata: TestDescription = TestDescription { - num_nodes_with_stake: 10, - start_nodes: 10, - epoch_height: 0, ..Default::default() - }; + } + .set_num_nodes(10, 10); let dead_nodes = vec![ChangeNode { idx: 0, updown: NodeAction::Down, }]; + metadata.test_config.epoch_height = 0; metadata.timing_data = timing_data; metadata.overall_safety_properties = OverallSafetyPropertiesDescription { @@ -55,7 +54,7 @@ async fn test_timeout() { ); metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -84,11 +83,10 @@ async fn test_timeout_libp2p() { }; let mut metadata: TestDescription = TestDescription { - num_nodes_with_stake: 10, - start_nodes: 10, - num_bootstrap_nodes: 10, ..Default::default() - }; + } + .set_num_nodes(10, 10); + let dead_nodes = vec![ChangeNode { idx: 9, updown: NodeAction::Down, @@ -114,7 +112,7 @@ async fn test_timeout_libp2p() { ); metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; diff --git a/testing/tests/tests_5/unreliable_network.rs b/testing/tests/tests_5/unreliable_network.rs index 7d1e5545b8..d9a3701166 100644 --- a/testing/tests/tests_5/unreliable_network.rs +++ b/testing/tests/tests_5/unreliable_network.rs @@ -23,7 +23,7 @@ use tracing::instrument; async fn libp2p_network_sync() { hotshot::helpers::initialize_logging(); - let metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -37,12 +37,13 @@ async fn libp2p_network_sync() { delay_high_ms: 30, delay_low_ms: 4, })), - epoch_height: 0, ..TestDescription::default_multiple_rounds() }; + metadata.test_config.epoch_height = 0; + metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -61,7 +62,7 @@ async fn test_memory_network_sync() { hotshot::helpers::initialize_logging(); - let metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { @@ -72,11 +73,13 @@ async fn test_memory_network_sync() { delay_high_ms: 30, delay_low_ms: 4, })), - epoch_height: 0, ..TestDescription::default() }; + + metadata.test_config.epoch_height = 0; + metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -88,7 +91,7 @@ async fn test_memory_network_sync() { async fn libp2p_network_async() { hotshot::helpers::initialize_logging(); - let metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, num_failed_views: 50, @@ -110,12 +113,13 @@ async fn libp2p_network_async() { delay_low_ms: 4, delay_high_ms: 30, })), - epoch_height: 0, ..TestDescription::default_multiple_rounds() }; + metadata.test_config.epoch_height = 0; + metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -135,7 +139,7 @@ async fn test_memory_network_async() { hotshot::helpers::initialize_logging(); - let metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, num_failed_views: 5000, @@ -158,11 +162,13 @@ async fn test_memory_network_async() { delay_low_ms: 4, delay_high_ms: 30, })), - epoch_height: 0, ..TestDescription::default() }; + + metadata.test_config.epoch_height = 0; + metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -181,7 +187,7 @@ async fn test_memory_network_partially_sync() { hotshot::helpers::initialize_logging(); - let metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 0, ..Default::default() @@ -210,11 +216,13 @@ async fn test_memory_network_partially_sync() { gst: std::time::Duration::from_millis(1000), start: Instant::now(), })), - epoch_height: 0, ..TestDescription::default() }; + + metadata.test_config.epoch_height = 0; + metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -225,7 +233,7 @@ async fn test_memory_network_partially_sync() { async fn libp2p_network_partially_sync() { hotshot::helpers::initialize_logging(); - let metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 0, ..Default::default() @@ -249,12 +257,13 @@ async fn libp2p_network_partially_sync() { gst: std::time::Duration::from_millis(1000), start: Instant::now(), })), - epoch_height: 0, ..TestDescription::default_multiple_rounds() }; + metadata.test_config.epoch_height = 0; + metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -274,7 +283,7 @@ async fn test_memory_network_chaos() { hotshot::helpers::initialize_logging(); - let metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { @@ -289,11 +298,13 @@ async fn test_memory_network_chaos() { repeat_low: 1, repeat_high: 5, })), - epoch_height: 0, ..TestDescription::default() }; + + metadata.test_config.epoch_height = 0; + metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; @@ -305,7 +316,7 @@ async fn test_memory_network_chaos() { async fn libp2p_network_chaos() { hotshot::helpers::initialize_logging(); - let metadata: TestDescription = TestDescription { + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, ..Default::default() @@ -323,12 +334,13 @@ async fn libp2p_network_chaos() { repeat_low: 1, repeat_high: 5, })), - epoch_height: 0, ..TestDescription::default_multiple_rounds() }; + metadata.test_config.epoch_height = 0; + metadata - .gen_launcher(0) + .gen_launcher() .launch() .run_test::() .await; diff --git a/testing/tests/tests_6/test_epochs.rs b/testing/tests/tests_6/test_epochs.rs index 95d9142a61..1faaa18d35 100644 --- a/testing/tests/tests_6/test_epochs.rs +++ b/testing/tests/tests_6/test_epochs.rs @@ -32,16 +32,19 @@ cross_tests!( Versions: [EpochsTestVersions], Ignore: false, Metadata: { - TestDescription { + let mut metadata = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { duration: Duration::from_secs(60), }, ), - epoch_height: 10, ..TestDescription::default() - } + }; + + metadata.test_config.epoch_height = 10; + + metadata }, ); @@ -79,10 +82,10 @@ cross_tests!( duration: Duration::from_secs(60), }, ), - epoch_height: 10, ..TestDescription::default() }; + metadata.test_config.epoch_height = 10; metadata.overall_safety_properties.num_failed_views = 0; metadata.overall_safety_properties.num_successful_views = 0; let mut config = DelayConfig::default(); @@ -112,10 +115,10 @@ cross_tests!( duration: Duration::from_secs(60), }, ), - epoch_height: 10, ..TestDescription::default() }; + metadata.test_config.epoch_height = 10; metadata.overall_safety_properties.num_failed_views = 0; metadata.overall_safety_properties.num_successful_views = 30; let mut config = DelayConfig::default(); @@ -146,12 +149,9 @@ cross_tests!( Versions: [EpochsTestVersions], Ignore: false, Metadata: { - let mut metadata = TestDescription::default_more_nodes(); - metadata.num_bootstrap_nodes = 10; - metadata.epoch_height = 10; - metadata.num_nodes_with_stake = 12; - metadata.da_staked_committee_size = 12; - metadata.start_nodes = 12; + let mut metadata = TestDescription::default_more_nodes().set_num_nodes(12,12); + metadata.test_config.num_bootstrap = 10; + metadata.test_config.epoch_height = 10; metadata.overall_safety_properties.num_failed_views = 0; @@ -168,19 +168,18 @@ cross_tests!( Versions: [EpochsTestVersions], Ignore: false, Metadata: { - TestDescription { + let mut metadata = TestDescription { completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { duration: Duration::from_millis(100000), }, ), - num_nodes_with_stake: 11, - start_nodes: 11, - num_bootstrap_nodes: 11, - da_staked_committee_size: 11, - epoch_height: 10, ..TestDescription::default() - } + }.set_num_nodes(11,11); + + metadata.test_config.epoch_height = 10; + + metadata }, ); @@ -199,7 +198,6 @@ cross_tests!( duration: Duration::from_millis(100000), }, ), - epoch_height: 10, ..TestDescription::default() }; // after the first 3 leaders the next leader is down. It's a hack to make sure we decide in @@ -211,6 +209,7 @@ cross_tests!( }, ]; + metadata.test_config.epoch_height = 10; metadata.spinning_properties = SpinningTaskDescription { node_changes: vec![(1, dead_nodes)] }; @@ -223,21 +222,28 @@ cross_tests!( cross_tests!( TestName: test_epoch_upgrade, Impls: [MemoryImpl], - Types: [TestTypes, TestTypesRandomizedLeader, TestTwoStakeTablesTypes], + Types: [TestTypes, TestTypesRandomizedLeader], + // TODO: we need some test infrastructure + Membership trait fixes to get this to work with: + // Types: [TestTypes, TestTypesRandomizedLeader, TestTwoStakeTablesTypes], Versions: [EpochUpgradeTestVersions], - Ignore: true, + Ignore: false, Metadata: { - TestDescription { + let mut metadata = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), + duration: Duration::from_secs(120), }, ), - epoch_height: 50, upgrade_view: Some(5), ..TestDescription::default() - } + }; + + // Keep going until the 2nd epoch transition + metadata.overall_safety_properties.num_successful_views = 110; + metadata.test_config.epoch_height = 50; + + metadata }, ); @@ -248,11 +254,8 @@ cross_tests!( Versions: [EpochsTestVersions], Ignore: false, Metadata: { - let mut metadata = TestDescription::default_more_nodes(); - metadata.num_nodes_with_stake = 12; - metadata.da_staked_committee_size = 12; - metadata.start_nodes = 12; - metadata.epoch_height = 10; + let mut metadata = TestDescription::default_more_nodes().set_num_nodes(12,12); + metadata.test_config.epoch_height = 10; let dead_nodes = vec![ ChangeNode { idx: 10, @@ -292,10 +295,7 @@ cross_tests!( Versions: [EpochsTestVersions], Ignore: false, Metadata: { - let mut metadata = TestDescription::default_more_nodes(); - metadata.num_nodes_with_stake = 12; - metadata.da_staked_committee_size = 12; - metadata.start_nodes = 12; + let mut metadata = TestDescription::default_more_nodes().set_num_nodes(12,12); let dead_nodes = vec![ ChangeNode { idx: 5, @@ -322,7 +322,7 @@ cross_tests!( metadata.overall_safety_properties.num_successful_views = 13; // only turning off 1 node, so expected should be num_nodes_with_stake - 1 - let expected_nodes_in_view_sync = metadata.num_nodes_with_stake - 1; + let expected_nodes_in_view_sync = 11; metadata.view_sync_properties = ViewSyncTaskDescription::Threshold(expected_nodes_in_view_sync, expected_nodes_in_view_sync); metadata @@ -337,7 +337,7 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); - metadata.epoch_height = 10; + metadata.test_config.epoch_height = 10; // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the // following issue. @@ -424,7 +424,7 @@ cross_tests!( next_view_timeout: 2000, ..Default::default() }; - let mut metadata = TestDescription::default(); + let mut metadata = TestDescription::default().set_num_nodes(20,20); let mut catchup_nodes = vec![]; for i in 0..20 { @@ -435,8 +435,6 @@ cross_tests!( } metadata.timing_data = timing_data; - metadata.start_nodes = 20; - metadata.num_nodes_with_stake = 20; metadata.spinning_properties = SpinningTaskDescription { // Restart all the nodes in view 10 diff --git a/types/src/constants.rs b/types/src/constants.rs index 6a1969ae31..641bfb36ee 100644 --- a/types/src/constants.rs +++ b/types/src/constants.rs @@ -8,6 +8,8 @@ use std::time::Duration; +use crate::upgrade_config::UpgradeConstants; + /// timeout for fetching auction results from the solver pub const AUCTION_RESULTS_FETCH_TIMEOUT: Duration = Duration::from_millis(500); @@ -41,21 +43,21 @@ pub const EVENT_CHANNEL_SIZE: usize = 100_000; /// Default channel size for HotShot -> application communication pub const EXTERNAL_EVENT_CHANNEL_SIZE: usize = 100_000; -/// The offset for how far in the future we will send out a `QuorumProposal` with an `UpgradeCertificate` we form. This is also how far in advance of sending a `QuorumProposal` we begin collecting votes on an `UpgradeProposal`. -pub const UPGRADE_PROPOSE_OFFSET: u64 = 5; - -#[cfg(test)] -/// The offset for how far in the future the upgrade certificate we attach should be decided on (or else discarded). -pub const UPGRADE_DECIDE_BY_OFFSET: u64 = UPGRADE_PROPOSE_OFFSET + 5; -#[cfg(not(test))] -/// The offset for how far in the future the upgrade certificate we attach should be decided on (or else discarded). -pub const UPGRADE_DECIDE_BY_OFFSET: u64 = UPGRADE_PROPOSE_OFFSET + 100; - -/// The offset for how far in the future the upgrade actually begins. -pub const UPGRADE_BEGIN_OFFSET: u64 = UPGRADE_DECIDE_BY_OFFSET + 5; - -/// The offset for how far in the future the upgrade ends. -pub const UPGRADE_FINISH_OFFSET: u64 = UPGRADE_BEGIN_OFFSET + 5; +/// Default values for the upgrade constants +pub const DEFAULT_UPGRADE_CONSTANTS: UpgradeConstants = UpgradeConstants { + propose_offset: 5, + decide_by_offset: 105, + begin_offset: 110, + finish_offset: 115, +}; + +/// Default values for the upgrade constants to be used in testing +pub const TEST_UPGRADE_CONSTANTS: UpgradeConstants = UpgradeConstants { + propose_offset: 5, + decide_by_offset: 10, + begin_offset: 15, + finish_offset: 20, +}; /// For `STAKE_TABLE_CAPACITY=200`, the light client prover (a.k.a. `hotshot-state-prover`) /// would need to generate proof for a circuit of slightly below 2^20 gates. diff --git a/types/src/traits/node_implementation.rs b/types/src/traits/node_implementation.rs index 5ea5764bb0..47e35437b8 100644 --- a/types/src/traits/node_implementation.rs +++ b/types/src/traits/node_implementation.rs @@ -35,10 +35,12 @@ use super::{ ValidatedState, }; use crate::{ + constants::DEFAULT_UPGRADE_CONSTANTS, data::{Leaf2, TestableLeaf}, traits::{ election::Membership, signature_key::SignatureKey, states::InstanceState, BlockPayload, }, + upgrade_config::UpgradeConstants, }; /// This trait guarantees that a particular type has urls that can be extracted from it. This trait @@ -208,6 +210,8 @@ pub trait NodeType: + Sync + 'static { + /// Constants used to construct upgrade proposals + const UPGRADE_CONSTANTS: UpgradeConstants = DEFAULT_UPGRADE_CONSTANTS; /// The time type that this hotshot setup is using. /// /// This should be the same `Time` that `ValidatedState::Time` is using. diff --git a/types/src/upgrade_config.rs b/types/src/upgrade_config.rs index 5c3b5a3fba..4edd3c820d 100644 --- a/types/src/upgrade_config.rs +++ b/types/src/upgrade_config.rs @@ -4,6 +4,21 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +/// Constants associated with the upgrade process. +pub struct UpgradeConstants { + /// The offset for how far in the future we will send out a `QuorumProposal` with an `UpgradeCertificate` we form. This is also how far in advance of sending a `QuorumProposal` we begin collecting votes on an `UpgradeProposal`. + pub propose_offset: u64, + + /// The offset for how far in the future the upgrade certificate we attach should be decided on (or else discarded). + pub decide_by_offset: u64, + + /// The offset for how far in the future the upgrade actually begins. + pub begin_offset: u64, + + /// The offset for how far in the future the upgrade ends. + pub finish_offset: u64, +} + #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] #[serde(bound(deserialize = ""))] /// Holds configuration for the upgrade task. From 8ecc370468b35c8b190766a1fa7833fd8183fe06 Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Tue, 21 Jan 2025 12:36:45 -0500 Subject: [PATCH 1365/1393] nuke VID precompute (#4065) --- builder-api/src/v0_1/block_info.rs | 3 +-- task-impls/src/transactions.rs | 21 ++-------------- testing/src/block_builder/mod.rs | 10 ++++---- testing/tests/tests_1/da_task.rs | 11 +++------ testing/tests/tests_1/transaction_task.rs | 12 --------- testing/tests/tests_1/vid_task.rs | 4 +-- types/src/data.rs | 8 +----- types/src/traits/block_contents.rs | 18 +------------- types/src/vid.rs | 30 ----------------------- 9 files changed, 15 insertions(+), 102 deletions(-) diff --git a/builder-api/src/v0_1/block_info.rs b/builder-api/src/v0_1/block_info.rs index 9048283140..0006140900 100644 --- a/builder-api/src/v0_1/block_info.rs +++ b/builder-api/src/v0_1/block_info.rs @@ -9,7 +9,7 @@ use std::marker::PhantomData; use hotshot_types::{ traits::{node_implementation::NodeType, signature_key::BuilderSignatureKey, BlockPayload}, utils::BuilderCommitment, - vid::{VidCommitment, VidPrecomputeData}, + vid::VidCommitment, }; use serde::{Deserialize, Serialize}; @@ -48,7 +48,6 @@ impl AvailableBlockData { #[serde(bound = "")] pub struct AvailableBlockHeaderInput { pub vid_commitment: VidCommitment, - pub vid_precompute_data: VidPrecomputeData, // signature over vid_commitment, BlockPayload::Metadata, and offered_fee pub fee_signature: <::BuilderSignatureKey as BuilderSignatureKey>::BuilderSignature, diff --git a/task-impls/src/transactions.rs b/task-impls/src/transactions.rs index 0d47f6ee3e..e85ad658ff 100644 --- a/task-impls/src/transactions.rs +++ b/task-impls/src/transactions.rs @@ -22,14 +22,14 @@ use hotshot_types::{ message::UpgradeLock, traits::{ auction_results_provider::AuctionResultsProvider, - block_contents::{precompute_vid_commitment, BuilderFee, EncodeBytes}, + block_contents::{BuilderFee, EncodeBytes}, election::Membership, node_implementation::{ConsensusTime, HasUrls, NodeImplementation, NodeType, Versions}, signature_key::{BuilderSignatureKey, SignatureKey}, BlockPayload, }, utils::ViewInner, - vid::{VidCommitment, VidPrecomputeData}, + vid::VidCommitment, }; use tokio::time::{sleep, timeout}; use tracing::instrument; @@ -72,9 +72,6 @@ pub struct BuilderResponse { /// Block metadata pub metadata: >::Metadata, - - /// Optional precomputed commitment - pub precompute_data: Option, } /// Tracks state of a Transaction task @@ -186,7 +183,6 @@ impl, V: Versions> TransactionTask block_payload, metadata, fee, - precompute_data, }) = block { broadcast_event( @@ -196,7 +192,6 @@ impl, V: Versions> TransactionTask block_view, block_epoch, vec1::vec1![fee], - precompute_data, None, ))), event_stream, @@ -228,8 +223,6 @@ impl, V: Versions> TransactionTask // Create an empty block payload and metadata let (_, metadata) = ::BlockPayload::empty(); - let (_, precompute_data) = precompute_vid_commitment(&[], membership_total_nodes); - // Broadcast the empty block broadcast_event( Arc::new(HotShotEvent::BlockRecv(PackedBundle::new( @@ -238,7 +231,6 @@ impl, V: Versions> TransactionTask block_view, block_epoch, vec1::vec1![null_fee], - Some(precompute_data), None, ))), event_stream, @@ -352,7 +344,6 @@ impl, V: Versions> TransactionTask block_view, block_epoch, sequencing_fees, - None, Some(auction_result), )) } @@ -375,15 +366,12 @@ impl, V: Versions> TransactionTask // Create an empty block payload and metadata let (_, metadata) = ::BlockPayload::empty(); - let (_, precompute_data) = precompute_vid_commitment(&[], membership_total_nodes); - Some(PackedBundle::new( vec![].into(), metadata, block_view, block_epoch, vec1::vec1![null_fee], - Some(precompute_data), Some(TYPES::AuctionResult::default()), )) } @@ -802,11 +790,6 @@ impl, V: Versions> TransactionTask fee, block_payload: block_data.block_payload, metadata: block_data.metadata, - // we discard the precompute data, - // because we cannot trust that the builder is able to calculate this correctly. - // - // in particular, the builder needs to know `num_nodes` and there aren't any practical ways to verify the result it sent us. - precompute_data: None, } }; diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index 45b8f07461..891fbbef72 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -22,8 +22,7 @@ use hotshot_builder_api::{ use hotshot_types::{ constants::{LEGACY_BUILDER_MODULE, MARKETPLACE_BUILDER_MODULE}, traits::{ - block_contents::{precompute_vid_commitment, EncodeBytes}, - node_implementation::NodeType, + block_contents::EncodeBytes, node_implementation::NodeType, signature_key::BuilderSignatureKey, }, }; @@ -186,8 +185,10 @@ where let commitment = block_payload.builder_commitment(&metadata); - let (vid_commitment, precompute_data) = - precompute_vid_commitment(&block_payload.encode(), *num_storage_nodes.read_arc().await); + let vid_commitment = hotshot_types::traits::block_contents::vid_commitment( + &block_payload.encode(), + *num_storage_nodes.read_arc().await, + ); // Get block size from the encoded payload let block_size = block_payload.encode().len() as u64; @@ -224,7 +225,6 @@ where }; let header_input = AvailableBlockHeaderInput { vid_commitment, - vid_precompute_data: precompute_data, message_signature: signature_over_vid_commitment.clone(), fee_signature: signature_over_fee_info, sender: pub_key, diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 8ed94ff23d..5422eb2a99 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -25,7 +25,6 @@ use hotshot_types::{ data::{null_block, PackedBundle, ViewNumber}, simple_vote::DaData2, traits::{ - block_contents::precompute_vid_commitment, election::Membership, node_implementation::{ConsensusTime, Versions}, }, @@ -45,8 +44,8 @@ async fn test_da_task() { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. let transactions = vec![TestTransaction::new(vec![0])]; - let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); - let (payload_commit, precompute) = precompute_vid_commitment( + let encoded_transactions: Arc<[u8]> = Arc::from(TestTransaction::encode(&transactions)); + let payload_commit = hotshot_types::traits::block_contents::vid_commitment( &encoded_transactions, handle.hotshot.memberships.read().await.total_nodes(None), ); @@ -112,7 +111,6 @@ async fn test_da_task() { *ViewNumber::new(2), ) .unwrap()], - Some(precompute), None, )), ], @@ -153,8 +151,8 @@ async fn test_da_task_storage_failure() { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. let transactions = vec![TestTransaction::new(vec![0])]; - let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); - let (payload_commit, precompute) = precompute_vid_commitment( + let encoded_transactions: Arc<[u8]> = Arc::from(TestTransaction::encode(&transactions)); + let payload_commit = hotshot_types::traits::block_contents::vid_commitment( &encoded_transactions, handle.hotshot.memberships.read().await.total_nodes(None), ); @@ -220,7 +218,6 @@ async fn test_da_task_storage_failure() { *ViewNumber::new(2), ) .unwrap()], - Some(precompute), None, ),) ], diff --git a/testing/tests/tests_1/transaction_task.rs b/testing/tests/tests_1/transaction_task.rs index 541cd122ec..a5e2f48278 100644 --- a/testing/tests/tests_1/transaction_task.rs +++ b/testing/tests/tests_1/transaction_task.rs @@ -10,7 +10,6 @@ use hotshot_testing::helpers::build_system_handle; use hotshot_types::{ data::{null_block, EpochNumber, PackedBundle, ViewNumber}, traits::{ - block_contents::precompute_vid_commitment, election::Membership, node_implementation::{ConsensusTime, Versions}, }, @@ -43,16 +42,6 @@ async fn test_transaction_task_leader_two_views_in_a_row() { )); input.push(HotShotEvent::Shutdown); - let (_, precompute_data) = precompute_vid_commitment( - &[], - handle - .hotshot - .memberships - .read() - .await - .total_nodes(Some(EpochNumber::new(0))), - ); - // current view let mut exp_packed_bundle = PackedBundle::new( vec![].into(), @@ -74,7 +63,6 @@ async fn test_transaction_task_leader_two_views_in_a_row() { ) .unwrap() ], - Some(precompute_data.clone()), None, ); output.push(HotShotEvent::BlockRecv(exp_packed_bundle.clone())); diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index e9d3cb6841..c79acbcad4 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -29,7 +29,7 @@ use hotshot_types::{ BlockPayload, }, }; -use jf_vid::{precomputable::Precomputable, VidScheme}; +use jf_vid::VidScheme; use vbs::version::StaticVersionType; use vec1::vec1; @@ -62,7 +62,6 @@ async fn test_vid_task() { >::builder_commitment(&payload, &metadata); let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); - let (_, vid_precompute) = vid.commit_only_precompute(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; let signature = ::SignatureKey::sign( @@ -115,7 +114,6 @@ async fn test_vid_task() { *ViewNumber::new(2), ) .unwrap()], - Some(vid_precompute), None, )), ], diff --git a/types/src/data.rs b/types/src/data.rs index a1c6340eb4..37013b80df 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -50,7 +50,7 @@ use crate::{ BlockPayload, }, utils::{bincode_opts, genesis_epoch_from_version, option_epoch_from_block_number}, - vid::{vid_scheme, VidCommitment, VidCommon, VidPrecomputeData, VidSchemeType, VidShare}, + vid::{vid_scheme, VidCommitment, VidCommon, VidSchemeType, VidShare}, vote::{Certificate, HasViewNumber}, }; @@ -251,7 +251,6 @@ impl VidDisperse { } /// Calculate the vid disperse information from the payload given a view, epoch and membership, - /// optionally using precompute data from builder. /// If the sender epoch is missing, it means it's the same as the target epoch. /// /// # Errors @@ -1727,9 +1726,6 @@ pub struct PackedBundle { /// The sequencing fee for submitting bundles. pub sequencing_fees: Vec1>, - /// The Vid precompute for the block. - pub vid_precompute: Option, - /// The auction results for the block, if it was produced as the result of an auction pub auction_result: Option, } @@ -1742,7 +1738,6 @@ impl PackedBundle { view_number: TYPES::View, epoch_number: Option, sequencing_fees: Vec1>, - vid_precompute: Option, auction_result: Option, ) -> Self { Self { @@ -1751,7 +1746,6 @@ impl PackedBundle { view_number, epoch_number, sequencing_fees, - vid_precompute, auction_result, } } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index febf32798f..2c312831b7 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -19,7 +19,7 @@ use std::{ use async_trait::async_trait; use committable::{Commitment, Committable}; -use jf_vid::{precomputable::Precomputable, VidScheme}; +use jf_vid::VidScheme; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use vbs::version::Version; @@ -153,22 +153,6 @@ pub fn vid_commitment( vid_scheme(num_storage_nodes).commit_only(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{encoded_tx_len}) error: {err}")) } -/// Compute the VID payload commitment along with precompute data reducing time in VID Disperse -/// # Panics -/// If the VID computation fails. -#[must_use] -#[allow(clippy::panic)] -pub fn precompute_vid_commitment( - encoded_transactions: &[u8], - num_storage_nodes: usize, -) -> ( - ::Commit, - ::PrecomputeData, -) { - let encoded_tx_len = encoded_transactions.len(); - vid_scheme(num_storage_nodes).commit_only_precompute(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{encoded_tx_len}) error: {err}")) -} - /// The number of storage nodes to use when computing the genesis VID commitment. /// /// The number of storage nodes for the genesis VID commitment is arbitrary, since we don't actually diff --git a/types/src/vid.rs b/types/src/vid.rs index a5462a56c1..4ce85d87b0 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -28,7 +28,6 @@ use jf_vid::{ payload_prover::{LargeRangeProof, SmallRangeProof}, }, payload_prover::{PayloadProver, Statement}, - precomputable::Precomputable, VidDisperse, VidResult, VidScheme, }; use lazy_static::lazy_static; @@ -109,8 +108,6 @@ pub type VidCommitment = ::Commit; pub type VidCommon = ::Common; /// VID share type pub type VidShare = ::Share; -/// VID PrecomputeData type -pub type VidPrecomputeData = ::PrecomputeData; /// VID proposal type pub type VidProposal = ( Proposal>, @@ -290,33 +287,6 @@ impl PayloadProver for VidSchemeType { } } -impl Precomputable for VidSchemeType { - type PrecomputeData = ::PrecomputeData; - - fn commit_only_precompute( - &self, - payload: B, - ) -> VidResult<(Self::Commit, Self::PrecomputeData)> - where - B: AsRef<[u8]>, - { - self.0.commit_only_precompute(payload) - } - - fn disperse_precompute( - &self, - payload: B, - data: &Self::PrecomputeData, - ) -> VidResult> - where - B: AsRef<[u8]>, - { - self.0 - .disperse_precompute(payload, data) - .map(vid_disperse_conversion) - } -} - /// Convert a [`VidDisperse`] to a [`VidDisperse`]. /// /// Foreign type rules prevent us from doing: From 4df744d41e78225f99ac8d534151697a428dc26b Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Wed, 22 Jan 2025 15:37:54 -0300 Subject: [PATCH 1366/1393] use Option instead of Either in vote.rs (#4072) Co-authored-by: tbro --- task-impls/src/vote_collection.rs | 6 +++--- types/src/vote.rs | 27 ++++++++++----------------- 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/task-impls/src/vote_collection.rs b/task-impls/src/vote_collection.rs index 6e9b57ced1..2147c9ca15 100644 --- a/task-impls/src/vote_collection.rs +++ b/task-impls/src/vote_collection.rs @@ -14,7 +14,7 @@ use std::{ use async_broadcast::Sender; use async_lock::RwLock; use async_trait::async_trait; -use either::Either::{self, Left, Right}; +use either::Either::{Left, Right}; use hotshot_types::{ message::UpgradeLock, simple_certificate::{ @@ -135,8 +135,8 @@ impl< .accumulate(vote, &self.membership, sender_epoch) .await { - Either::Left(()) => Ok(None), - Either::Right(cert) => { + None => Ok(None), + Some(cert) => { tracing::debug!("Certificate Formed! {:?}", cert); broadcast_event( diff --git a/types/src/vote.rs b/types/src/vote.rs index 41a7d72e01..7f91dfd01c 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -16,7 +16,6 @@ use std::{ use async_lock::RwLock; use bitvec::{bitvec, vec::BitVec}; use committable::{Commitment, Committable}; -use either::Either; use primitive_types::U256; use tracing::error; use utils::anytrace::Result; @@ -166,7 +165,7 @@ impl< vote: &VOTE, membership: &Arc>, epoch: Option, - ) -> Either<(), CERT> { + ) -> Option { let key = vote.signing_key(); let vote_commitment = match VersionedVoteData::new( @@ -179,31 +178,25 @@ impl< Ok(data) => data.commit(), Err(e) => { tracing::warn!("Failed to generate versioned vote data: {e}"); - return Either::Left(()); + return None; } }; if !key.validate(&vote.signature(), vote_commitment.as_ref()) { error!("Invalid vote! Vote Data {:?}", vote.date()); - return Either::Left(()); + return None; } let membership_reader = membership.read().await; - let Some(stake_table_entry) = CERT::stake_table_entry(&*membership_reader, &key, epoch) - else { - return Either::Left(()); - }; + let stake_table_entry = CERT::stake_table_entry(&*membership_reader, &key, epoch)?; let stake_table = CERT::stake_table(&*membership_reader, epoch); let total_nodes = CERT::total_nodes(&*membership_reader, epoch); let threshold = CERT::threshold(&*membership_reader, epoch); drop(membership_reader); - let Some(vote_node_id) = stake_table + let vote_node_id = stake_table .iter() - .position(|x| *x == stake_table_entry.clone()) - else { - return Either::Left(()); - }; + .position(|x| *x == stake_table_entry.clone())?; let original_signature: ::PureAssembledSignatureType = vote.signature(); @@ -215,7 +208,7 @@ impl< // Check for duplicate vote if total_vote_map.contains_key(&key) { - return Either::Left(()); + return None; } let (signers, sig_list) = self .signers @@ -223,7 +216,7 @@ impl< .or_insert((bitvec![0; total_nodes], Vec::new())); if signers.get(vote_node_id).as_deref() == Some(&true) { error!("Node id is already in signers list"); - return Either::Left(()); + return None; } signers.set(vote_node_id, true); sig_list.push(original_signature); @@ -251,9 +244,9 @@ impl< real_qc_sig, vote.view_number(), ); - return Either::Right(cert); + return Some(cert); } - Either::Left(()) + None } } From 6d3c8453df484f1bc66885b2878e801ab17b5abf Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 22 Jan 2025 17:21:36 -0500 Subject: [PATCH 1367/1393] Add `workspace-hack` crate (#4066) --- builder-api/Cargo.toml | 1 + example-types/Cargo.toml | 2 +- examples/Cargo.toml | 2 +- fakeapi/Cargo.toml | 1 + hotshot-stake-table/Cargo.toml | 1 + hotshot/Cargo.toml | 2 +- libp2p-networking/Cargo.toml | 1 + macros/Cargo.toml | 1 + orchestrator/Cargo.toml | 1 + task-impls/Cargo.toml | 3 +- task/Cargo.toml | 1 + testing/Cargo.toml | 3 +- types/Cargo.toml | 1 + utils/Cargo.toml | 1 + workspace-hack/.gitattributes | 4 + workspace-hack/Cargo.toml | 186 +++++++++++++++++++++++++++++++++ workspace-hack/build.rs | 2 + workspace-hack/src/lib.rs | 1 + 18 files changed, 207 insertions(+), 7 deletions(-) create mode 100644 workspace-hack/.gitattributes create mode 100644 workspace-hack/Cargo.toml create mode 100644 workspace-hack/build.rs create mode 100644 workspace-hack/src/lib.rs diff --git a/builder-api/Cargo.toml b/builder-api/Cargo.toml index a28d766154..f652c2b123 100644 --- a/builder-api/Cargo.toml +++ b/builder-api/Cargo.toml @@ -17,3 +17,4 @@ thiserror = { workspace = true } tide-disco = { workspace = true } toml = { workspace = true } vbs = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/example-types/Cargo.toml b/example-types/Cargo.toml index d0791ea285..ff2eee97d4 100644 --- a/example-types/Cargo.toml +++ b/example-types/Cargo.toml @@ -9,7 +9,6 @@ authors = { workspace = true } default = [] # NOTE this is used to activate the slow tests we don't wish to run in CI slow-tests = [] -gpu-vid = ["hotshot-task-impls/gpu-vid"] [dependencies] anyhow = { workspace = true } @@ -30,3 +29,4 @@ time = { workspace = true } tokio = { workspace = true } url = { workspace = true } vbs = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 3136acdae4..779d6fc709 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -9,7 +9,6 @@ rust-version = { workspace = true } [features] default = ["docs", "doc-images", "hotshot-testing"] -gpu-vid = ["hotshot-example-types/gpu-vid"] # Build the extended documentation docs = [] @@ -103,6 +102,7 @@ tokio = { workspace = true } tracing = { workspace = true } url = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] anyhow = { workspace = true } diff --git a/fakeapi/Cargo.toml b/fakeapi/Cargo.toml index d47908f721..15cd632bd3 100644 --- a/fakeapi/Cargo.toml +++ b/fakeapi/Cargo.toml @@ -20,6 +20,7 @@ tide-disco = { workspace = true } tokio = { workspace = true } toml = { workspace = true } vbs = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } [lints] workspace = true diff --git a/hotshot-stake-table/Cargo.toml b/hotshot-stake-table/Cargo.toml index 6db5a340f0..c42ae0e4f8 100644 --- a/hotshot-stake-table/Cargo.toml +++ b/hotshot-stake-table/Cargo.toml @@ -21,6 +21,7 @@ jf-utils = { workspace = true } primitive-types = { workspace = true } serde = { workspace = true, features = ["rc"] } tagged-base64 = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] rand_chacha = { workspace = true } diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index a74bc74746..9290290b77 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -10,7 +10,6 @@ rust-version = { workspace = true } [features] default = ["docs", "doc-images"] example-upgrade = ["hotshot-task-impls/example-upgrade"] -gpu-vid = ["hotshot-task-impls/gpu-vid"] rewind = ["hotshot-task-impls/rewind"] # Build the extended documentation @@ -56,6 +55,7 @@ tracing-subscriber = { workspace = true } url = { workspace = true } utils = { path = "../utils" } vbs = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] blake3 = { workspace = true } diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index ea9890c15f..e62e0f6b27 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -35,6 +35,7 @@ serde = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } [lints] workspace = true diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 4fd4ee4c11..5d8298c9cc 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -10,6 +10,7 @@ proc-macro2 = "1" # proc macro stuff quote = "1" syn = { version = "2", features = ["full", "extra-traits"] } +workspace-hack = { version = "0.1", path = "../workspace-hack" } [lib] proc-macro = true diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index a80dcba85a..9981174669 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -20,6 +20,7 @@ tokio = { workspace = true } toml = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } [lints] workspace = true diff --git a/task-impls/Cargo.toml b/task-impls/Cargo.toml index 6deb4f96ec..d069aef8bb 100644 --- a/task-impls/Cargo.toml +++ b/task-impls/Cargo.toml @@ -7,9 +7,7 @@ version = { workspace = true } [features] example-upgrade = [] -gpu-vid = ["hotshot-types/gpu-vid"] rewind = [] -test-srs = ["jf-vid/test-srs"] [dependencies] anyhow = { workspace = true } @@ -39,6 +37,7 @@ url = { workspace = true } utils = { path = "../utils" } vbs = { workspace = true } vec1 = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } [lints] workspace = true diff --git a/task/Cargo.toml b/task/Cargo.toml index 4b6fed92e6..ce4042fafe 100644 --- a/task/Cargo.toml +++ b/task/Cargo.toml @@ -17,6 +17,7 @@ tokio = { workspace = true, features = [ ] } tracing = { workspace = true } utils = { path = "../utils" } +workspace-hack = { version = "0.1", path = "../workspace-hack" } [lints] workspace = true diff --git a/testing/Cargo.toml b/testing/Cargo.toml index b21799ba6b..b05bbd9dde 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -9,9 +9,7 @@ authors = { workspace = true } default = [] # NOTE this is used to activate the slow tests we don't wish to run in CI slow-tests = [] -gpu-vid = ["hotshot-types/gpu-vid"] rewind = ["hotshot/rewind"] -test-srs = ["jf-vid/test-srs"] broken_3_chain_fixed = [] [dependencies] @@ -49,3 +47,4 @@ tracing = { workspace = true } url = { workspace = true } vbs = { workspace = true } vec1 = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/types/Cargo.toml b/types/Cargo.toml index e99de2e53d..48e4c30c6b 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -54,6 +54,7 @@ url = { workspace = true } utils = { path = "../utils" } vbs = { workspace = true } vec1 = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } [features] gpu-vid = ["jf-vid/gpu-vid"] diff --git a/utils/Cargo.toml b/utils/Cargo.toml index fba1648dd7..16a70a6467 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -6,6 +6,7 @@ description = "Utils" [dependencies] tracing = { workspace = true } +workspace-hack = { version = "0.1", path = "../workspace-hack" } [lints] workspace = true diff --git a/workspace-hack/.gitattributes b/workspace-hack/.gitattributes new file mode 100644 index 0000000000..3e9dba4b64 --- /dev/null +++ b/workspace-hack/.gitattributes @@ -0,0 +1,4 @@ +# Avoid putting conflict markers in the generated Cargo.toml file, since their presence breaks +# Cargo. +# Also do not check out the file as CRLF on Windows, as that's what hakari needs. +Cargo.toml merge=binary -crlf diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml new file mode 100644 index 0000000000..09791f0e7f --- /dev/null +++ b/workspace-hack/Cargo.toml @@ -0,0 +1,186 @@ +# This file is generated by `cargo hakari`. +# To regenerate, run: +# cargo hakari generate + +[package] +name = "workspace-hack" +version = "0.1.0" +edition = "2021" +description = "workspace-hack package, managed by hakari" +# You can choose to publish this crate: see https://docs.rs/cargo-hakari/latest/cargo_hakari/publishing. +publish = false + +# The parts of the file between the BEGIN HAKARI SECTION and END HAKARI SECTION comments +# are managed by hakari. + +### BEGIN HAKARI SECTION +[dependencies] +ark-bls12-377 = { version = "0.4", default-features = false, features = ["curve", "std"] } +ark-bls12-381 = { version = "0.4", default-features = false, features = ["curve", "std"] } +ark-bn254 = { version = "0.4", features = ["std"] } +ark-bw6-761 = { version = "0.4", default-features = false, features = ["std"] } +ark-crypto-primitives = { version = "0.4", default-features = false, features = ["sponge", "std"] } +ark-ff = { version = "0.4", features = ["asm", "parallel"] } +ark-relations = { version = "0.4", default-features = false, features = ["std"] } +ark-serialize = { version = "0.4", features = ["derive", "std"] } +ark-std = { version = "0.4", features = ["parallel"] } +arrayvec = { version = "0.7" } +async-std = { version = "1", features = ["attributes", "tokio1", "unstable"] } +async-tungstenite = { version = "0.13", features = ["async-native-tls", "async-tls"] } +base64-3c51e837cfc5589a = { package = "base64", version = "0.22" } +base64-594e8ee84c453af0 = { package = "base64", version = "0.13" } +byteorder = { version = "1" } +bytes = { version = "1", features = ["serde"] } +concurrent-queue = { version = "2" } +crossbeam-utils = { version = "0.8" } +crunchy = { version = "0.2", features = ["std"] } +crypto-common = { version = "0.1", default-features = false, features = ["rand_core", "std"] } +data-encoding = { version = "2" } +derive_more = { version = "1", features = ["debug", "deref", "from"] } +digest-274715c4dabd11b0 = { package = "digest", version = "0.9", default-features = false, features = ["std"] } +digest-93f6ce9d446188ac = { package = "digest", version = "0.10", features = ["mac", "rand_core", "std"] } +downcast-rs = { version = "1", default-features = false, features = ["std"] } +either = { version = "1", features = ["serde"] } +event-listener = { version = "5" } +event-listener-strategy = { version = "0.5" } +form_urlencoded = { version = "1" } +futures = { version = "0.3", features = ["thread-pool"] } +futures-channel = { version = "0.3", features = ["sink"] } +futures-core = { version = "0.3" } +futures-executor = { version = "0.3", features = ["thread-pool"] } +futures-io = { version = "0.3" } +futures-sink = { version = "0.3" } +futures-task = { version = "0.3", default-features = false, features = ["std"] } +futures-util = { version = "0.3", features = ["channel", "io", "sink"] } +generic-array = { version = "0.14", default-features = false, features = ["more_lengths"] } +getrandom = { version = "0.2", default-features = false, features = ["std"] } +hashbrown-3575ec1268b04181 = { package = "hashbrown", version = "0.15" } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", features = ["raw"] } +idna = { version = "1", default-features = false, features = ["compiled_data", "std"] } +itertools = { version = "0.12" } +jf-relation = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", default-features = false, features = ["parallel", "std"] } +jf-rescue = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } +jf-signature = { git = "https://github.com/EspressoSystems/jellyfish", tag = "jf-signature-v0.2.0", features = ["bls", "schnorr", "std"] } +jf-utils = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", features = ["parallel", "std"] } +libp2p-identity = { version = "0.2", default-features = false, features = ["ed25519", "peerid", "rand", "secp256k1", "serde"] } +libsecp256k1-core = { version = "0.3" } +log = { version = "0.4", default-features = false, features = ["kv_unstable_std"] } +memchr = { version = "2" } +multihash = { version = "0.19", features = ["serde-codec"] } +num-bigint = { version = "0.4" } +num-integer = { version = "0.1", default-features = false, features = ["i128", "std"] } +num-traits = { version = "0.2", features = ["i128"] } +rand = { version = "0.8", features = ["small_rng"] } +regex = { version = "1" } +regex-automata = { version = "0.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-syntax = { version = "0.8" } +rustls-pki-types = { version = "1", features = ["std"] } +serde = { version = "1", features = ["alloc", "derive", "rc"] } +sha2 = { version = "0.9" } +sha3 = { version = "0.10" } +smallvec = { version = "1", default-features = false, features = ["const_generics"] } +sqlx-core = { version = "0.8", features = ["_rt-tokio", "any", "migrate", "offline", "time"] } +sqlx-sqlite = { version = "0.8", default-features = false, features = ["migrate", "offline", "time"] } +standback = { version = "0.2", default-features = false, features = ["std"] } +subtle = { version = "2", default-features = false, features = ["std"] } +time = { version = "0.3", features = ["formatting", "macros", "parsing"] } +tokio = { version = "1", features = ["fs", "io-util", "macros", "net", "parking_lot", "rt-multi-thread", "sync", "time", "tracing"] } +toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } +toml_edit = { version = "0.22", features = ["serde"] } +tracing = { version = "0.1", features = ["log"] } +tracing-core = { version = "0.1" } +tracing-log = { version = "0.2" } +tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } +tungstenite = { version = "0.13", default-features = false, features = ["native-tls"] } +uint = { version = "0.9" } +url = { version = "2", features = ["serde"] } +zerocopy = { version = "0.7", features = ["derive", "simd"] } +zeroize = { version = "1", features = ["std", "zeroize_derive"] } + +[build-dependencies] +ark-bls12-377 = { version = "0.4", default-features = false, features = ["curve", "std"] } +ark-bls12-381 = { version = "0.4", default-features = false, features = ["curve", "std"] } +ark-bn254 = { version = "0.4", features = ["std"] } +ark-bw6-761 = { version = "0.4", default-features = false, features = ["std"] } +ark-crypto-primitives = { version = "0.4", default-features = false, features = ["sponge", "std"] } +ark-ff = { version = "0.4", features = ["asm", "parallel"] } +ark-relations = { version = "0.4", default-features = false, features = ["std"] } +ark-serialize = { version = "0.4", features = ["derive", "std"] } +ark-std = { version = "0.4", features = ["parallel"] } +arrayvec = { version = "0.7" } +async-std = { version = "1", features = ["attributes", "tokio1", "unstable"] } +async-tungstenite = { version = "0.13", features = ["async-native-tls", "async-tls"] } +base64-3c51e837cfc5589a = { package = "base64", version = "0.22" } +base64-594e8ee84c453af0 = { package = "base64", version = "0.13" } +byteorder = { version = "1" } +bytes = { version = "1", features = ["serde"] } +concurrent-queue = { version = "2" } +crossbeam-utils = { version = "0.8" } +crunchy = { version = "0.2", features = ["std"] } +crypto-common = { version = "0.1", default-features = false, features = ["rand_core", "std"] } +data-encoding = { version = "2" } +derive_more = { version = "1", features = ["debug", "deref", "from"] } +derive_more-impl = { version = "1", features = ["debug", "deref", "from"] } +digest-274715c4dabd11b0 = { package = "digest", version = "0.9", default-features = false, features = ["std"] } +digest-93f6ce9d446188ac = { package = "digest", version = "0.10", features = ["mac", "rand_core", "std"] } +displaydoc = { version = "0.2" } +downcast-rs = { version = "1", default-features = false, features = ["std"] } +either = { version = "1", features = ["serde"] } +event-listener = { version = "5" } +event-listener-strategy = { version = "0.5" } +form_urlencoded = { version = "1" } +futures = { version = "0.3", features = ["thread-pool"] } +futures-channel = { version = "0.3", features = ["sink"] } +futures-core = { version = "0.3" } +futures-executor = { version = "0.3", features = ["thread-pool"] } +futures-io = { version = "0.3" } +futures-sink = { version = "0.3" } +futures-task = { version = "0.3", default-features = false, features = ["std"] } +futures-util = { version = "0.3", features = ["channel", "io", "sink"] } +generic-array = { version = "0.14", default-features = false, features = ["more_lengths"] } +getrandom = { version = "0.2", default-features = false, features = ["std"] } +hashbrown-3575ec1268b04181 = { package = "hashbrown", version = "0.15" } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", features = ["raw"] } +idna = { version = "1", default-features = false, features = ["compiled_data", "std"] } +itertools = { version = "0.12" } +jf-relation = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", default-features = false, features = ["parallel", "std"] } +jf-rescue = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } +jf-signature = { git = "https://github.com/EspressoSystems/jellyfish", tag = "jf-signature-v0.2.0", features = ["bls", "schnorr", "std"] } +jf-utils = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", features = ["parallel", "std"] } +libp2p-identity = { version = "0.2", default-features = false, features = ["ed25519", "peerid", "rand", "secp256k1", "serde"] } +libsecp256k1-core = { version = "0.3" } +log = { version = "0.4", default-features = false, features = ["kv_unstable_std"] } +memchr = { version = "2" } +multihash = { version = "0.19", features = ["serde-codec"] } +num-bigint = { version = "0.4" } +num-integer = { version = "0.1", default-features = false, features = ["i128", "std"] } +num-traits = { version = "0.2", features = ["i128"] } +rand = { version = "0.8", features = ["small_rng"] } +regex = { version = "1" } +regex-automata = { version = "0.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-syntax = { version = "0.8" } +rustls-pki-types = { version = "1", features = ["std"] } +serde = { version = "1", features = ["alloc", "derive", "rc"] } +sha2 = { version = "0.9" } +sha3 = { version = "0.10" } +smallvec = { version = "1", default-features = false, features = ["const_generics"] } +sqlx-core = { version = "0.8", features = ["_rt-tokio", "any", "migrate", "offline", "time"] } +sqlx-sqlite = { version = "0.8", default-features = false, features = ["migrate", "offline", "time"] } +standback = { version = "0.2", default-features = false, features = ["std"] } +subtle = { version = "2", default-features = false, features = ["std"] } +syn = { version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +time = { version = "0.3", features = ["formatting", "macros", "parsing"] } +tokio = { version = "1", features = ["fs", "io-util", "macros", "net", "parking_lot", "rt-multi-thread", "sync", "time", "tracing"] } +toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } +toml_edit = { version = "0.22", features = ["serde"] } +tracing = { version = "0.1", features = ["log"] } +tracing-core = { version = "0.1" } +tracing-log = { version = "0.2" } +tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } +tungstenite = { version = "0.13", default-features = false, features = ["native-tls"] } +uint = { version = "0.9" } +url = { version = "2", features = ["serde"] } +zerocopy = { version = "0.7", features = ["derive", "simd"] } +zeroize = { version = "1", features = ["std", "zeroize_derive"] } + +### END HAKARI SECTION diff --git a/workspace-hack/build.rs b/workspace-hack/build.rs new file mode 100644 index 0000000000..92518ef04c --- /dev/null +++ b/workspace-hack/build.rs @@ -0,0 +1,2 @@ +// A build script is required for cargo to consider build dependencies. +fn main() {} diff --git a/workspace-hack/src/lib.rs b/workspace-hack/src/lib.rs new file mode 100644 index 0000000000..22489f632b --- /dev/null +++ b/workspace-hack/src/lib.rs @@ -0,0 +1 @@ +// This is a stub lib.rs. From 3e506ba9d64920214c04b3d6fb1b644e61637015 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Fri, 24 Jan 2025 15:23:55 -0500 Subject: [PATCH 1368/1393] Add back external messages (#4080) * Revert "remove `external` message support (#4034)" This reverts commit 46e1c0acd7202ec1bd659db2974db71c35021b81. * fix test --- hotshot/src/types/handle.rs | 46 +++++++++++++- task-impls/src/network.rs | 18 +++++- testing/tests/tests_1/network_task.rs | 92 +++++++++++++++++++++++++++ types/src/event.rs | 8 +++ types/src/message.rs | 3 + 5 files changed, 163 insertions(+), 4 deletions(-) diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 15b450f00d..f8c82dfad6 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -22,11 +22,14 @@ use hotshot_types::{ consensus::Consensus, data::{Leaf2, QuorumProposalWrapper}, error::HotShotError, - message::Proposal, + message::{Message, MessageKind, Proposal, RecipientList}, request_response::ProposalRequestPayload, traits::{ - consensus_api::ConsensusApi, election::Membership, network::ConnectedNetwork, - node_implementation::NodeType, signature_key::SignatureKey, + consensus_api::ConsensusApi, + election::Membership, + network::{BroadcastDelay, ConnectedNetwork, Topic}, + node_implementation::NodeType, + signature_key::SignatureKey, }, }; use tracing::instrument; @@ -90,6 +93,43 @@ impl + 'static, V: Versions> self.output_event_stream.1.activate_cloned() } + /// Message other participants with a serialized message from the application + /// Receivers of this message will get an `Event::ExternalMessageReceived` via + /// the event stream. + /// + /// # Errors + /// Errors if serializing the request fails, or the request fails to be sent + pub async fn send_external_message( + &self, + msg: Vec, + recipients: RecipientList, + ) -> Result<()> { + let message = Message { + sender: self.public_key().clone(), + kind: MessageKind::External(msg), + }; + let serialized_message = self.hotshot.upgrade_lock.serialize(&message).await?; + + match recipients { + RecipientList::Broadcast => { + self.network + .broadcast_message(serialized_message, Topic::Global, BroadcastDelay::None) + .await?; + } + RecipientList::Direct(recipient) => { + self.network + .direct_message(serialized_message, recipient) + .await?; + } + RecipientList::Many(recipients) => { + self.network + .da_broadcast_message(serialized_message, recipients, BroadcastDelay::None) + .await?; + } + } + Ok(()) + } + /// Request a proposal from the all other nodes. Will block until some node /// returns a valid proposal with the requested commitment. If nobody has the /// proposal this will block forever diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index a5b7b6df01..ff86ebf20f 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -17,7 +17,7 @@ use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, data::{VidDisperse, VidDisperseShare, VidDisperseShare2}, - event::{Event, HotShotAction}, + event::{Event, EventType, HotShotAction}, message::{ convert_proposal, DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, MessageKind, Proposal, SequencingMessage, UpgradeLock, @@ -438,6 +438,22 @@ impl NetworkMessageTaskState { } } }, + + // Handle external messages + MessageKind::External(data) => { + if sender == self.public_key { + return; + } + // Send the external message to the external event stream so it can be processed + broadcast_event( + Event { + view_number: TYPES::View::new(1), + event: EventType::ExternalMessageReceived { sender, data }, + }, + &self.external_event_stream, + ) + .await; + } } } } diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 63892d0a00..2391fedc91 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -110,6 +110,98 @@ async fn test_network_task() { )); } +#[cfg(test)] +#[tokio::test(flavor = "multi_thread")] +async fn test_network_external_mnessages() { + use hotshot::types::EventType; + use hotshot_testing::helpers::build_system_handle_from_launcher; + use hotshot_types::message::RecipientList; + + hotshot::helpers::initialize_logging(); + + let builder: TestDescription = + TestDescription::default_multiple_rounds(); + + let launcher = builder.gen_launcher(); + + let mut handles = vec![]; + let mut event_streams = vec![]; + for i in 0..launcher.metadata.test_config.num_nodes_with_stake.into() { + let handle = build_system_handle_from_launcher::( + i.try_into().unwrap(), + &launcher, + ) + .await + .0; + event_streams.push(handle.event_stream_known_impl()); + handles.push(handle); + } + + // Send a message from 1 -> 2 + handles[1] + .send_external_message(vec![1, 2], RecipientList::Direct(handles[2].public_key())) + .await + .unwrap(); + let event = tokio::time::timeout(Duration::from_millis(100), event_streams[2].recv()) + .await + .unwrap() + .unwrap() + .event; + + // check that 2 received the message + assert!(matches!( + event, + EventType::ExternalMessageReceived { + sender, + data, + } if sender == handles[1].public_key() && data == vec![1, 2] + )); + + // Send a message from 2 -> 1 + handles[2] + .send_external_message(vec![2, 1], RecipientList::Direct(handles[1].public_key())) + .await + .unwrap(); + let event = tokio::time::timeout(Duration::from_millis(100), event_streams[1].recv()) + .await + .unwrap() + .unwrap() + .event; + + // check that 1 received the message + assert!(matches!( + event, + EventType::ExternalMessageReceived { + sender, + data, + } if sender == handles[2].public_key() && data == vec![2,1] + )); + + // Check broadcast works + handles[0] + .send_external_message(vec![0, 0, 0], RecipientList::Broadcast) + .await + .unwrap(); + // All other nodes get the broadcast + for stream in event_streams.iter_mut().skip(1) { + let event = tokio::time::timeout(Duration::from_millis(100), stream.recv()) + .await + .unwrap() + .unwrap() + .event; + assert!(matches!( + event, + EventType::ExternalMessageReceived { + sender, + data, + } if sender == handles[0].public_key() && data == vec![0,0,0] + )); + } + // No event on 0 even after short sleep + tokio::time::sleep(Duration::from_millis(2)).await; + assert!(event_streams[0].is_empty()); +} + #[cfg(test)] #[tokio::test(flavor = "multi_thread")] async fn test_network_storage_fail() { diff --git a/types/src/event.rs b/types/src/event.rs index ab4d6e86c5..7c57668188 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -172,6 +172,14 @@ pub enum EventType { /// Public key of the leader submitting the proposal sender: TYPES::SignatureKey, }, + + /// A message destined for external listeners was received + ExternalMessageReceived { + /// Public Key of the message sender + sender: TYPES::SignatureKey, + /// Serialized data of the message + data: Vec, + }, } #[derive(Debug, Serialize, Deserialize, Clone, Copy)] /// A list of actions that we track for nodes diff --git a/types/src/message.rs b/types/src/message.rs index b47bf952d2..de9aee29ad 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -123,6 +123,8 @@ pub enum MessageKind { Consensus(SequencingMessage), /// Messages relating to sharing data between nodes Data(DataMessage), + /// A (still serialized) message to be passed through to external listeners + External(Vec), } /// List of keys to send a message to, or broadcast to all known keys @@ -160,6 +162,7 @@ impl ViewMessage for MessageKind { ResponseMessage::Found(m) => m.view_number(), ResponseMessage::NotFound | ResponseMessage::Denied => TYPES::View::new(1), }, + MessageKind::External(_) => TYPES::View::new(1), } } } From 5256d185eacc7584538645e7c56aeeb0451f08b6 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 24 Jan 2025 17:06:05 -0500 Subject: [PATCH 1369/1393] Add `root_block_in_epoch` (#4067) --- types/src/utils.rs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/types/src/utils.rs b/types/src/utils.rs index 04a472d6a6..f8fe4fdf5b 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -257,6 +257,19 @@ pub fn epoch_from_block_number(block_number: u64, epoch_height: u64) -> u64 { } } +/// Returns the block number of the epoch root in the given epoch +/// +/// WARNING: This is NOT the root block for the given epoch. +/// To find that root block number for epoch e, call `root_block_in_epoch(e-2,_)`. +#[must_use] +pub fn root_block_in_epoch(epoch: u64, epoch_height: u64) -> u64 { + if epoch_height == 0 || epoch < 1 { + 0 + } else { + epoch_height * epoch - 2 + } +} + /// Returns an Option based on a boolean condition of whether or not epochs are enabled, a block number, /// and the epoch height. If epochs are disabled or the epoch height is zero, returns None. #[must_use] @@ -349,4 +362,21 @@ mod test { let epoch = epoch_from_block_number(21, 10); assert_eq!(3, epoch); } + + #[test] + fn test_root_block_in_epoch() { + // block 0 is always epoch 0 + let epoch = 3; + let epoch_height = 10; + let epoch_root_block_number = root_block_in_epoch(3, epoch_height); + + assert!(is_epoch_root(28, epoch_height)); + + assert_eq!(epoch_root_block_number, 28); + + assert_eq!( + epoch, + epoch_from_block_number(epoch_root_block_number, epoch_height) + ); + } } From fabab6d10a6ca050447ad228708cd0edc398cafb Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 27 Jan 2025 13:06:31 -0500 Subject: [PATCH 1370/1393] Exclude `sqlx` from `workspace-hack` (#4087) --- workspace-hack/Cargo.toml | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 09791f0e7f..8f52f3fffc 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -29,7 +29,6 @@ async-std = { version = "1", features = ["attributes", "tokio1", "unstable"] } async-tungstenite = { version = "0.13", features = ["async-native-tls", "async-tls"] } base64-3c51e837cfc5589a = { package = "base64", version = "0.22" } base64-594e8ee84c453af0 = { package = "base64", version = "0.13" } -byteorder = { version = "1" } bytes = { version = "1", features = ["serde"] } concurrent-queue = { version = "2" } crossbeam-utils = { version = "0.8" } @@ -40,14 +39,14 @@ derive_more = { version = "1", features = ["debug", "deref", "from"] } digest-274715c4dabd11b0 = { package = "digest", version = "0.9", default-features = false, features = ["std"] } digest-93f6ce9d446188ac = { package = "digest", version = "0.10", features = ["mac", "rand_core", "std"] } downcast-rs = { version = "1", default-features = false, features = ["std"] } -either = { version = "1", features = ["serde"] } +either = { version = "1" } event-listener = { version = "5" } event-listener-strategy = { version = "0.5" } form_urlencoded = { version = "1" } futures = { version = "0.3", features = ["thread-pool"] } futures-channel = { version = "0.3", features = ["sink"] } futures-core = { version = "0.3" } -futures-executor = { version = "0.3", features = ["thread-pool"] } +futures-executor = { version = "0.3", default-features = false, features = ["thread-pool"] } futures-io = { version = "0.3" } futures-sink = { version = "0.3" } futures-task = { version = "0.3", default-features = false, features = ["std"] } @@ -56,7 +55,7 @@ generic-array = { version = "0.14", default-features = false, features = ["more_ getrandom = { version = "0.2", default-features = false, features = ["std"] } hashbrown-3575ec1268b04181 = { package = "hashbrown", version = "0.15" } hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", features = ["raw"] } -idna = { version = "1", default-features = false, features = ["compiled_data", "std"] } +hex = { version = "0.4" } itertools = { version = "0.12" } jf-relation = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", default-features = false, features = ["parallel", "std"] } jf-rescue = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } @@ -70,17 +69,15 @@ multihash = { version = "0.19", features = ["serde-codec"] } num-bigint = { version = "0.4" } num-integer = { version = "0.1", default-features = false, features = ["i128", "std"] } num-traits = { version = "0.2", features = ["i128"] } +percent-encoding = { version = "2" } rand = { version = "0.8", features = ["small_rng"] } regex = { version = "1" } regex-automata = { version = "0.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8" } rustls-pki-types = { version = "1", features = ["std"] } -serde = { version = "1", features = ["alloc", "derive", "rc"] } sha2 = { version = "0.9" } sha3 = { version = "0.10" } smallvec = { version = "1", default-features = false, features = ["const_generics"] } -sqlx-core = { version = "0.8", features = ["_rt-tokio", "any", "migrate", "offline", "time"] } -sqlx-sqlite = { version = "0.8", default-features = false, features = ["migrate", "offline", "time"] } standback = { version = "0.2", default-features = false, features = ["std"] } subtle = { version = "2", default-features = false, features = ["std"] } time = { version = "0.3", features = ["formatting", "macros", "parsing"] } @@ -94,7 +91,6 @@ tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } tungstenite = { version = "0.13", default-features = false, features = ["native-tls"] } uint = { version = "0.9" } url = { version = "2", features = ["serde"] } -zerocopy = { version = "0.7", features = ["derive", "simd"] } zeroize = { version = "1", features = ["std", "zeroize_derive"] } [build-dependencies] @@ -112,7 +108,6 @@ async-std = { version = "1", features = ["attributes", "tokio1", "unstable"] } async-tungstenite = { version = "0.13", features = ["async-native-tls", "async-tls"] } base64-3c51e837cfc5589a = { package = "base64", version = "0.22" } base64-594e8ee84c453af0 = { package = "base64", version = "0.13" } -byteorder = { version = "1" } bytes = { version = "1", features = ["serde"] } concurrent-queue = { version = "2" } crossbeam-utils = { version = "0.8" } @@ -125,14 +120,14 @@ digest-274715c4dabd11b0 = { package = "digest", version = "0.9", default-feature digest-93f6ce9d446188ac = { package = "digest", version = "0.10", features = ["mac", "rand_core", "std"] } displaydoc = { version = "0.2" } downcast-rs = { version = "1", default-features = false, features = ["std"] } -either = { version = "1", features = ["serde"] } +either = { version = "1" } event-listener = { version = "5" } event-listener-strategy = { version = "0.5" } form_urlencoded = { version = "1" } futures = { version = "0.3", features = ["thread-pool"] } futures-channel = { version = "0.3", features = ["sink"] } futures-core = { version = "0.3" } -futures-executor = { version = "0.3", features = ["thread-pool"] } +futures-executor = { version = "0.3", default-features = false, features = ["thread-pool"] } futures-io = { version = "0.3" } futures-sink = { version = "0.3" } futures-task = { version = "0.3", default-features = false, features = ["std"] } @@ -141,7 +136,7 @@ generic-array = { version = "0.14", default-features = false, features = ["more_ getrandom = { version = "0.2", default-features = false, features = ["std"] } hashbrown-3575ec1268b04181 = { package = "hashbrown", version = "0.15" } hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", features = ["raw"] } -idna = { version = "1", default-features = false, features = ["compiled_data", "std"] } +hex = { version = "0.4" } itertools = { version = "0.12" } jf-relation = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", default-features = false, features = ["parallel", "std"] } jf-rescue = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } @@ -155,17 +150,15 @@ multihash = { version = "0.19", features = ["serde-codec"] } num-bigint = { version = "0.4" } num-integer = { version = "0.1", default-features = false, features = ["i128", "std"] } num-traits = { version = "0.2", features = ["i128"] } +percent-encoding = { version = "2" } rand = { version = "0.8", features = ["small_rng"] } regex = { version = "1" } regex-automata = { version = "0.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8" } rustls-pki-types = { version = "1", features = ["std"] } -serde = { version = "1", features = ["alloc", "derive", "rc"] } sha2 = { version = "0.9" } sha3 = { version = "0.10" } smallvec = { version = "1", default-features = false, features = ["const_generics"] } -sqlx-core = { version = "0.8", features = ["_rt-tokio", "any", "migrate", "offline", "time"] } -sqlx-sqlite = { version = "0.8", default-features = false, features = ["migrate", "offline", "time"] } standback = { version = "0.2", default-features = false, features = ["std"] } subtle = { version = "2", default-features = false, features = ["std"] } syn = { version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } @@ -180,7 +173,6 @@ tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } tungstenite = { version = "0.13", default-features = false, features = ["native-tls"] } uint = { version = "0.9" } url = { version = "2", features = ["serde"] } -zerocopy = { version = "0.7", features = ["derive", "simd"] } zeroize = { version = "1", features = ["std", "zeroize_derive"] } ### END HAKARI SECTION From 442ac8b38a98591b86083d174c4d1f0cbb4b5524 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 27 Jan 2025 13:40:46 -0500 Subject: [PATCH 1371/1393] Require strict leaf chain in consistency task (#4077) --- examples/infra/mod.rs | 10 +- hotshot/src/lib.rs | 182 +++++++++++++++++++------------- testing/src/consistency_task.rs | 4 +- testing/src/helpers.rs | 7 +- testing/src/spinning_task.rs | 64 ++++++----- testing/src/test_builder.rs | 7 +- testing/src/test_runner.rs | 2 + 7 files changed, 162 insertions(+), 114 deletions(-) diff --git a/examples/infra/mod.rs b/examples/infra/mod.rs index 3d31884d81..ecd48fb406 100755 --- a/examples/infra/mod.rs +++ b/examples/infra/mod.rs @@ -365,10 +365,12 @@ pub trait RunDa< &self, membership: Arc::Membership>>, ) -> SystemContextHandle { - let initializer = - hotshot::HotShotInitializer::::from_genesis::(TestInstanceState::default()) - .await - .expect("Couldn't generate genesis block"); + let initializer = hotshot::HotShotInitializer::::from_genesis::( + TestInstanceState::default(), + self.config().config.epoch_height, + ) + .await + .expect("Couldn't generate genesis block"); let config = self.config(); let validator_config = self.validator_config(); diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 61f652477c..27030c672d 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -268,7 +268,7 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext state, - None => Arc::new(TYPES::ValidatedState::from_header( - anchored_leaf.block_header(), - )), - }; + let validated_state = initializer.anchor_state; // #3967 REVIEW NOTE: Should this actually be Some()? How do we know? let epoch = option_epoch_from_block_number::( @@ -310,7 +305,7 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> ConsensusApi { - /// the leaf specified initialization - inner: Leaf2, - /// Instance-level state. - instance_state: TYPES::InstanceState, + pub instance_state: TYPES::InstanceState, - /// Optional validated state. - /// - /// If it's given, we'll use it to construct the `SystemContext`. Otherwise, we'll construct - /// the state from the block header. - validated_state: Option>, + /// Epoch height + pub epoch_height: u64, - /// Optional state delta. - /// - /// If it's given, we'll use it to construct the `SystemContext`. - state_delta: Option>::Delta>>, + /// the anchor leaf for the hotshot initializer + pub anchor_leaf: Leaf2, + + /// ValidatedState for the anchor leaf + pub anchor_state: Arc, + + /// ValidatedState::Delta for the anchor leaf, optional. + pub anchor_state_delta: Option>::Delta>>, /// Starting view number that should be equivalent to the view the node shut down with last. - start_view: TYPES::View, + pub start_view: TYPES::View, - /// Starting epoch number that should be equivalent to the epoch the node shut down with last. - start_epoch: Option, + /// The view we last performed an action in. An action is proposing or voting for + /// either the quorum or DA. + pub last_actioned_view: TYPES::View, - /// The view we last performed an action in. An action is Proposing or voting for - /// Either the quorum or DA. - actioned_view: TYPES::View, + /// Starting epoch number that should be equivalent to the epoch the node shut down with last. + pub start_epoch: Option, /// Highest QC that was seen, for genesis it's the genesis QC. It should be for a view greater /// than `inner`s view number for the non genesis case because we must have seen higher QCs /// to decide on the leaf. - high_qc: QuorumCertificate2, + pub high_qc: QuorumCertificate2, /// Next epoch highest QC that was seen. This is needed to propose during epoch transition after restart. - next_epoch_high_qc: Option>, + pub next_epoch_high_qc: Option>, + + /// Proposals we have sent out to provide to others for catchup + pub saved_proposals: BTreeMap>>, /// Previously decided upgrade certificate; this is necessary if an upgrade has happened and we are not restarting with the new version - decided_upgrade_certificate: Option>, + pub decided_upgrade_certificate: Option>, /// Undecided leaves that were seen, but not yet decided on. These allow a restarting node /// to vote and propose right away if they didn't miss anything while down. - undecided_leaves: Vec>, + pub undecided_leaves: BTreeMap>, /// Not yet decided state - undecided_state: BTreeMap>, - - /// Proposals we have sent out to provide to others for catchup - saved_proposals: BTreeMap>>, + pub undecided_state: BTreeMap>, /// Saved VID shares - saved_vid_shares: Option>, + pub saved_vid_shares: VidShares, } impl HotShotInitializer { @@ -1045,66 +1038,111 @@ impl HotShotInitializer { /// If we are unable to apply the genesis block to the default state pub async fn from_genesis( instance_state: TYPES::InstanceState, + epoch_height: u64, ) -> Result> { let (validated_state, state_delta) = TYPES::ValidatedState::genesis(&instance_state); let high_qc = QuorumCertificate2::genesis::(&validated_state, &instance_state).await; Ok(Self { - inner: Leaf2::genesis::(&validated_state, &instance_state).await, - validated_state: Some(Arc::new(validated_state)), - state_delta: Some(Arc::new(state_delta)), + anchor_leaf: Leaf2::genesis::(&validated_state, &instance_state).await, + anchor_state: Arc::new(validated_state), + anchor_state_delta: Some(Arc::new(state_delta)), start_view: TYPES::View::new(0), start_epoch: genesis_epoch_from_version::(), - actioned_view: TYPES::View::new(0), + last_actioned_view: TYPES::View::new(0), saved_proposals: BTreeMap::new(), high_qc, next_epoch_high_qc: None, decided_upgrade_certificate: None, - undecided_leaves: Vec::new(), + undecided_leaves: BTreeMap::new(), undecided_state: BTreeMap::new(), instance_state, - saved_vid_shares: None, + saved_vid_shares: BTreeMap::new(), + epoch_height, }) } - /// Reload previous state based on most recent leaf and the instance-level state. + /// Use saved proposals to update undecided leaves and state + #[must_use] + pub fn update_undecided(self) -> Self { + let mut undecided_leaves = self.undecided_leaves.clone(); + let mut undecided_state = self.undecided_state.clone(); + + for proposal in self.saved_proposals.values() { + // skip proposals unless they're newer than the anchor leaf + if proposal.data.view_number() <= self.anchor_leaf.view_number() { + continue; + } + + undecided_leaves.insert( + proposal.data.view_number(), + Leaf2::from_quorum_proposal(&proposal.data), + ); + } + + for leaf in undecided_leaves.values() { + let view_inner = ViewInner::Leaf { + leaf: leaf.commit(), + state: Arc::new(TYPES::ValidatedState::from_header(leaf.block_header())), + delta: None, + epoch: leaf.epoch(self.epoch_height), + }; + let view = View { view_inner }; + + undecided_state.insert(leaf.view_number(), view); + } + + Self { + undecided_leaves, + undecided_state, + ..self + } + } + + /// Create a `HotShotInitializer` from the given information. /// - /// # Arguments - /// * `start_view` - The minimum view number that we are confident won't lead to a double vote - /// after restart. - /// * `validated_state` - Optional validated state that if given, will be used to construct the - /// `SystemContext`. + /// This function uses the anchor leaf to set the initial validated state, + /// and populates `undecided_leaves` and `undecided_state` using `saved_proposals`. + /// + /// If you are able to or would prefer to set these yourself, + /// you should use the `HotShotInitializer` constructor directly. #[allow(clippy::too_many_arguments)] - pub fn from_reload( - anchor_leaf: Leaf2, + pub fn load( instance_state: TYPES::InstanceState, - validated_state: Option>, - start_view: TYPES::View, - start_epoch: Option, - actioned_view: TYPES::View, + epoch_height: u64, + anchor_leaf: Leaf2, + (start_view, start_epoch): (TYPES::View, Option), + (high_qc, next_epoch_high_qc): ( + QuorumCertificate2, + Option>, + ), saved_proposals: BTreeMap>>, - high_qc: QuorumCertificate2, - next_epoch_high_qc: Option>, + saved_vid_shares: VidShares, decided_upgrade_certificate: Option>, - undecided_leaves: Vec>, - undecided_state: BTreeMap>, - saved_vid_shares: Option>, ) -> Self { - Self { - inner: anchor_leaf, + let anchor_state = Arc::new(TYPES::ValidatedState::from_header( + anchor_leaf.block_header(), + )); + let anchor_state_delta = None; + + let initializer = Self { instance_state, - validated_state, - state_delta: None, + epoch_height, + anchor_leaf, + anchor_state, + anchor_state_delta, + high_qc, start_view, start_epoch, - actioned_view, + last_actioned_view: start_view, saved_proposals, - high_qc, + saved_vid_shares, next_epoch_high_qc, decided_upgrade_certificate, - undecided_leaves, - undecided_state, - saved_vid_shares, - } + undecided_leaves: BTreeMap::new(), + undecided_state: BTreeMap::new(), + }; + + initializer.update_undecided() } } diff --git a/testing/src/consistency_task.rs b/testing/src/consistency_task.rs index 9d51d50a78..d17db57d70 100644 --- a/testing/src/consistency_task.rs +++ b/testing/src/consistency_task.rs @@ -104,9 +104,7 @@ async fn validate_node_map( // We want to make sure the commitment matches, // but allow for the possibility that we may have skipped views in between. - if child.justify_qc().view_number == parent.view_number() - && child.justify_qc().data.leaf_commit != parent.commit() - { + if child.justify_qc().data.leaf_commit != parent.commit() { bail!("The node has provided leaf:\n\n{child:?}\n\nwhich points to:\n\n{parent:?}\n\nbut the commits do not match."); } diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index b6d0f7a1cb..202afa0f47 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -96,9 +96,10 @@ pub async fn build_system_handle_from_launcher< let marketplace_config = (launcher.resource_generators.marketplace_config)(node_id); let hotshot_config = (launcher.resource_generators.hotshot_config)(node_id); - let initializer = HotShotInitializer::::from_genesis::(TestInstanceState::new( - launcher.metadata.async_delay_config.clone(), - )) + let initializer = HotShotInitializer::::from_genesis::( + TestInstanceState::new(launcher.metadata.async_delay_config.clone()), + launcher.metadata.test_config.epoch_height, + ) .await .unwrap(); diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 581c1e3a89..4cd23079d3 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -54,6 +54,8 @@ pub struct SpinningTask< I: TestableNodeImplementation, V: Versions, > { + /// epoch height + pub epoch_height: u64, /// handle to the nodes pub(crate) handles: Arc>>>, /// late start nodes @@ -154,18 +156,16 @@ where marketplace_config, } = late_context_params; - let initializer = HotShotInitializer::::from_reload( - self.last_decided_leaf.clone(), + let initializer = HotShotInitializer::::load( TestInstanceState::new(self.async_delay_config.clone()), - None, - TYPES::View::genesis(), - genesis_epoch_from_version::(), // #3967 is this right now after our earlier discussion? or should i be doing (epoch_height > 0).then(TYPES::Epoch::genesis) - TYPES::View::genesis(), + self.epoch_height, + self.last_decided_leaf.clone(), + ( + TYPES::View::genesis(), + genesis_epoch_from_version::(), + ), + (self.high_qc.clone(), self.next_epoch_high_qc.clone()), BTreeMap::new(), - self.high_qc.clone(), - self.next_epoch_high_qc.clone(), - None, - Vec::new(), BTreeMap::new(), None, ); @@ -238,27 +238,33 @@ where let config = node.handle.hotshot.config.clone(); let marketplace_config = node.handle.hotshot.marketplace_config.clone(); + let read_storage = storage.read().await; - let initializer = HotShotInitializer::::from_reload( - self.last_decided_leaf.clone(), + let next_epoch_high_qc = + read_storage.next_epoch_high_qc_cloned().await; + let start_view = read_storage.last_actioned_view().await; + let start_epoch = read_storage.last_actioned_epoch().await; + let high_qc = read_storage.high_qc_cloned().await.unwrap_or( + QuorumCertificate2::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await, + ); + let saved_proposals = read_storage.proposals_cloned().await; + let saved_vid_shares = read_storage.vids_cloned().await; + let decided_upgrade_certificate = + read_storage.decided_upgrade_certificate().await; + + let initializer = HotShotInitializer::::load( TestInstanceState::new(self.async_delay_config.clone()), - None, - read_storage.last_actioned_view().await, - read_storage.last_actioned_epoch().await, - read_storage.last_actioned_view().await, - read_storage.proposals_cloned().await, - read_storage.high_qc_cloned().await.unwrap_or( - QuorumCertificate2::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await, - ), - read_storage.next_epoch_high_qc_cloned().await, - read_storage.decided_upgrade_certificate().await, - Vec::new(), - BTreeMap::new(), - Some(read_storage.vids_cloned().await), + self.epoch_height, + self.last_decided_leaf.clone(), + (start_view, start_epoch), + (high_qc, next_epoch_high_qc), + saved_proposals, + saved_vid_shares, + decided_upgrade_certificate, ); // We assign node's public key and stake value rather than read from config file since it's a test let validator_config = ValidatorConfig::generated_from_seed_indexed( diff --git a/testing/src/test_builder.rs b/testing/src/test_builder.rs index 6e35b0204c..b2182e4d34 100644 --- a/testing/src/test_builder.rs +++ b/testing/src/test_builder.rs @@ -234,9 +234,10 @@ pub async fn create_test_handle< storage: I::Storage, marketplace_config: MarketplaceConfig, ) -> SystemContextHandle { - let initializer = HotShotInitializer::::from_genesis::(TestInstanceState::new( - metadata.async_delay_config, - )) + let initializer = HotShotInitializer::::from_genesis::( + TestInstanceState::new(metadata.async_delay_config), + metadata.test_config.epoch_height, + ) .await .unwrap(); diff --git a/testing/src/test_runner.rs b/testing/src/test_runner.rs index 95fd60494d..618ffa49e9 100644 --- a/testing/src/test_runner.rs +++ b/testing/src/test_runner.rs @@ -180,6 +180,7 @@ where } let spinning_task_state = SpinningTask { + epoch_height: launcher.metadata.test_config.epoch_height, handles: Arc::clone(&handles), late_start, latest_view: None, @@ -486,6 +487,7 @@ where } else { let initializer = HotShotInitializer::::from_genesis::( TestInstanceState::new(self.launcher.metadata.async_delay_config.clone()), + config.epoch_height, ) .await .unwrap(); From 1378a169f74607443ac4a4be8975d9c447127642 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 27 Jan 2025 17:35:40 -0500 Subject: [PATCH 1372/1393] Serialization fixes (#4089) --- types/src/message.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/types/src/message.rs b/types/src/message.rs index de9aee29ad..6f6020cbd2 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -204,12 +204,6 @@ pub enum GeneralConsensusMessage { /// Message with an upgrade vote UpgradeVote(UpgradeVote), - /// Message with a quorum proposal. - Proposal2(Proposal>), - - /// Message with a quorum vote. - Vote2(QuorumVote2), - /// A peer node needs a proposal from the leader. ProposalRequested( ProposalRequestPayload, @@ -219,6 +213,12 @@ pub enum GeneralConsensusMessage { /// A replica has responded with a valid proposal. ProposalResponse(Proposal>), + /// Message with a quorum proposal. + Proposal2(Proposal>), + + /// Message with a quorum vote. + Vote2(QuorumVote2), + /// A replica has responded with a valid proposal. ProposalResponse2(Proposal>), From 64873d498bcf82b9e6a41ea679447f9adbc064cb Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Tue, 28 Jan 2025 19:12:59 -0500 Subject: [PATCH 1373/1393] [VID Upgrade] version control of `VidDisperse` and `VidDisperseShare` (#4078) * wip * wrapping Disperse and DisperseShares * minor fixes * fix `calculate_vid_disperse` * fix `VidDisperseMsg` handling * revert changes to DaConsensusMessage * delete a unnecessary comment --- example-types/src/storage_types.rs | 7 +- task-impls/src/events.rs | 12 +- task-impls/src/network.rs | 99 +++- task-impls/src/quorum_proposal/handlers.rs | 4 +- task-impls/src/quorum_proposal/mod.rs | 8 +- task-impls/src/quorum_vote/handlers.rs | 12 +- task-impls/src/quorum_vote/mod.rs | 53 +- task-impls/src/request.rs | 6 +- task-impls/src/response.rs | 4 +- task-impls/src/vid.rs | 21 +- testing/src/helpers.rs | 12 +- testing/src/spinning_task.rs | 12 +- testing/src/view_generator.rs | 4 +- types/src/consensus.rs | 10 +- types/src/data.rs | 615 +++++++++------------ types/src/data/vid_disperse.rs | 429 ++++++++++++++ types/src/event.rs | 6 +- types/src/message.rs | 7 +- types/src/traits/storage.rs | 6 +- types/src/vid.rs | 4 +- 20 files changed, 858 insertions(+), 473 deletions(-) create mode 100644 types/src/data/vid_disperse.rs diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index c44d500d12..ec39a66009 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -15,8 +15,9 @@ use async_trait::async_trait; use hotshot_types::{ consensus::CommitmentMap, data::{ + vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, - QuorumProposalWrapper, VidDisperseShare, VidDisperseShare2, + QuorumProposalWrapper, }, event::HotShotAction, message::Proposal, @@ -35,7 +36,7 @@ use crate::testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, Testa type VidShares = BTreeMap< ::View, - HashMap<::SignatureKey, Proposal>>, + HashMap<::SignatureKey, Proposal>>, >; type VidShares2 = BTreeMap< ::View, @@ -141,7 +142,7 @@ impl TestStorage { #[async_trait] impl Storage for TestStorage { - async fn append_vid(&self, proposal: &Proposal>) -> Result<()> { + async fn append_vid(&self, proposal: &Proposal>) -> Result<()> { if self.should_return_err { bail!("Failed to append VID proposal to storage"); } diff --git a/task-impls/src/events.rs b/task-impls/src/events.rs index 560a54b0ee..daa18e6665 100644 --- a/task-impls/src/events.rs +++ b/task-impls/src/events.rs @@ -12,7 +12,7 @@ use hotshot_task::task::TaskEvent; use hotshot_types::{ data::{ DaProposal2, Leaf2, PackedBundle, QuorumProposal2, QuorumProposalWrapper, UpgradeProposal, - VidDisperse, VidDisperseShare2, + VidDisperse, VidDisperseShare, }, message::Proposal, request_response::ProposalRequestPayload, @@ -198,10 +198,10 @@ pub enum HotShotEvent { /// Like [`HotShotEvent::DaProposalRecv`]. VidShareRecv( TYPES::SignatureKey, - Proposal>, + Proposal>, ), /// VID share data is validated. - VidShareValidated(Proposal>), + VidShareValidated(Proposal>), /// Upgrade proposal has been received from the network UpgradeProposalRecv(Proposal>, TYPES::SignatureKey), /// Upgrade proposal has been sent to the network @@ -240,13 +240,13 @@ pub enum HotShotEvent { TYPES::SignatureKey, /// Recipient key TYPES::SignatureKey, - Proposal>, + Proposal>, ), /// Receive a VID response from the network; received by the node that triggered the VID request. VidResponseRecv( TYPES::SignatureKey, - Proposal>, + Proposal>, ), /// A replica send us a High QC @@ -340,7 +340,7 @@ impl HotShotEvent { HotShotEvent::VidRequestSend(request, _, _) | HotShotEvent::VidRequestRecv(request, _) => Some(request.view), HotShotEvent::VidResponseSend(_, _, proposal) - | HotShotEvent::VidResponseRecv(_, proposal) => Some(proposal.data.view_number), + | HotShotEvent::VidResponseRecv(_, proposal) => Some(proposal.data.view_number()), HotShotEvent::HighQcRecv(qc, _) | HotShotEvent::HighQcSend(qc, ..) => { Some(qc.view_number()) } diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index ff86ebf20f..3216a60551 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -16,7 +16,7 @@ use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, - data::{VidDisperse, VidDisperseShare, VidDisperseShare2}, + data::{VidDisperse, VidDisperseShare}, event::{Event, EventType, HotShotAction}, message::{ convert_proposal, DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, @@ -380,7 +380,7 @@ impl NetworkMessageTaskState { tracing::warn!("received DaConsensusMessage::VidDisperseMsg2 for view {} but epochs are not enabled for that view", proposal.data.view_number()); return; } - HotShotEvent::VidShareRecv(sender, proposal) + HotShotEvent::VidShareRecv(sender, convert_proposal(proposal)) } }, }; @@ -418,7 +418,10 @@ impl NetworkMessageTaskState { proposal, )) => { broadcast_event( - Arc::new(HotShotEvent::VidResponseRecv(sender, proposal)), + Arc::new(HotShotEvent::VidResponseRecv( + sender, + convert_proposal(proposal), + )), &self.internal_event_stream, ) .await; @@ -541,28 +544,49 @@ impl< vid_proposal: Proposal>, sender: &::SignatureKey, ) -> Option { - let view = vid_proposal.data.view_number; - let vid_share_proposals = VidDisperseShare2::to_vid_share_proposals(vid_proposal); + let view = vid_proposal.data.view_number(); + let vid_share_proposals = VidDisperseShare::to_vid_share_proposals(vid_proposal); let mut messages = HashMap::new(); for proposal in vid_share_proposals { - let recipient = proposal.data.recipient_key.clone(); + let recipient = proposal.data.recipient_key().clone(); let message = if self .upgrade_lock .epochs_enabled(proposal.data.view_number()) .await { + let vid_share_proposal = if let VidDisperseShare::V1(data) = proposal.data { + Proposal { + data, + signature: proposal.signature, + _pd: proposal._pd, + } + } else { + tracing::warn!( + "Epochs are enabled for view {} but didn't receive VidDisperseShare2", + proposal.data.view_number() + ); + return None; + }; Message { sender: sender.clone(), kind: MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::VidDisperseMsg2(proposal), + DaConsensusMessage::VidDisperseMsg2(vid_share_proposal), )), } } else { - let vid_share_proposal = Proposal { - data: VidDisperseShare::from(proposal.data), - signature: proposal.signature, - _pd: proposal._pd, + let vid_share_proposal = if let VidDisperseShare::V0(data) = proposal.data { + Proposal { + data, + signature: proposal.signature, + _pd: proposal._pd, + } + } else { + tracing::warn!( + "Epochs are not enabled for view {} but didn't receive ADVZDisperseShare", + proposal.data.view_number() + ); + return None; }; Message { sender: sender.clone(), @@ -1021,20 +1045,49 @@ impl< TransmitType::Direct(to), )), HotShotEvent::VidResponseSend(sender, to, proposal) => { - let message = if self + let epochs_enabled = self .upgrade_lock .epochs_enabled(proposal.data.view_number()) - .await - { - MessageKind::Data(DataMessage::DataResponse(ResponseMessage::Found( - SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg2(proposal)), - ))) - } else { - MessageKind::Data(DataMessage::DataResponse(ResponseMessage::Found( - SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg( - convert_proposal(proposal), - )), - ))) + .await; + let message = match proposal.data { + VidDisperseShare::V0(data) => { + if epochs_enabled { + tracing::warn!( + "Epochs are enabled for view {} but didn't receive VidDisperseShare2", + data.view_number() + ); + return None; + } + let vid_share_proposal = Proposal { + data, + signature: proposal.signature, + _pd: proposal._pd, + }; + MessageKind::Data(DataMessage::DataResponse(ResponseMessage::Found( + SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg( + vid_share_proposal, + )), + ))) + } + VidDisperseShare::V1(data) => { + if !epochs_enabled { + tracing::warn!( + "Epochs are enabled for view {} but didn't receive ADVZDisperseShare", + data.view_number() + ); + return None; + } + let vid_share_proposal = Proposal { + data, + signature: proposal.signature, + _pd: proposal._pd, + }; + MessageKind::Data(DataMessage::DataResponse(ResponseMessage::Found( + SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg2( + vid_share_proposal, + )), + ))) + } }; Some((sender, message, TransmitType::Direct(to))) } diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 36d24dcf61..02d7c66d0b 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -343,7 +343,7 @@ impl ProposalDependencyHandle { builder_commitment, metadata, commitment_and_metadata.fees.first().clone(), - vid_share.data.common.clone(), + vid_share.data.vid_common_ref().clone(), version, ) .await @@ -359,7 +359,7 @@ impl ProposalDependencyHandle { commitment_and_metadata.metadata, commitment_and_metadata.fees.to_vec(), *self.view_number, - vid_share.data.common.clone(), + vid_share.data.vid_common_ref().clone(), commitment_and_metadata.auction_result, version, ) diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index 054a69bb98..d8cad2869f 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -153,8 +153,8 @@ impl, V: Versions> } } ProposalDependency::VidShare => { - if let HotShotEvent::VidDisperseSend(vid_share, _) = event { - vid_share.data.view_number() + if let HotShotEvent::VidDisperseSend(vid_disperse, _) = event { + vid_disperse.data.view_number() } else { return false; } @@ -528,8 +528,8 @@ impl, V: Versions> "Failed to update latest proposed view" ); } - HotShotEvent::VidDisperseSend(vid_share, _) => { - let view_number = vid_share.data.view_number(); + HotShotEvent::VidDisperseSend(vid_disperse, _) => { + let view_number = vid_disperse.data.view_number(); self.create_dependency_task_if_new( view_number, epoch_number, diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 451604f883..1579611adc 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -12,10 +12,10 @@ use chrono::Utc; use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf2, QuorumProposalWrapper, VidDisperseShare2}, + data::{Leaf2, QuorumProposalWrapper, VidDisperseShare}, drb::{compute_drb_result, DrbResult}, event::{Event, EventType}, - message::{Proposal, UpgradeLock}, + message::{convert_proposal, Proposal, UpgradeLock}, simple_vote::{QuorumData2, QuorumVote2}, traits::{ block_contents::BlockHeader, @@ -463,7 +463,7 @@ pub(crate) async fn update_shared_state< instance_state: Arc, storage: Arc>, proposed_leaf: &Leaf2, - vid_share: &Proposal>, + vid_share: &Proposal>, parent_view_number: Option, epoch_height: u64, ) -> Result<()> { @@ -537,7 +537,7 @@ pub(crate) async fn update_shared_state< &instance_state, &parent, &proposed_leaf.block_header().clone(), - vid_share.data.common.clone(), + vid_share.data.vid_common_ref().clone(), version, *view_number, ) @@ -588,7 +588,7 @@ pub(crate) async fn submit_vote, V view_number: TYPES::View, storage: Arc>, leaf: Leaf2, - vid_share: Proposal>, + vid_share: Proposal>, extended_vote: bool, epoch_height: u64, ) -> Result<()> { @@ -633,7 +633,7 @@ pub(crate) async fn submit_vote, V storage .write() .await - .append_vid2(&vid_share) + .append_vid2(&convert_proposal(vid_share)) .await .wrap() .context(error!("Failed to store VID share"))?; diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 24c04a16a4..6953ea1f60 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -21,6 +21,7 @@ use hotshot_types::{ drb::DrbComputation, event::Event, message::{Proposal, UpgradeLock}, + simple_vote::HasEpoch, traits::{ block_contents::BlockHeader, election::Membership, @@ -29,10 +30,8 @@ use hotshot_types::{ storage::Storage, }, utils::{epoch_from_block_number, option_epoch_from_block_number}, - vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; -use jf_vid::VidScheme; use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::*; @@ -176,13 +175,10 @@ impl + 'static, V: Versions> Handl } } HotShotEvent::VidShareValidated(share) => { - let vid_payload_commitment = if let Some(ref data_epoch_payload_commitment) = - share.data.data_epoch_payload_commitment - { - data_epoch_payload_commitment - } else { - &share.data.payload_commitment - }; + let vid_payload_commitment = &share + .data + .data_epoch_payload_commitment() + .unwrap_or(share.data.payload_commitment()); vid_share = Some(share.clone()); if let Some(ref comm) = payload_commitment { if vid_payload_commitment != comm { @@ -353,7 +349,7 @@ impl, V: Versions> QuorumVoteTaskS } VoteDependency::Vid => { if let HotShotEvent::VidShareValidated(disperse) = event { - disperse.data.view_number + disperse.data.view_number() } else { return false; } @@ -563,8 +559,8 @@ impl, V: Versions> QuorumVoteTaskS Arc::clone(&event), ); } - HotShotEvent::VidShareRecv(sender, disperse) => { - let view = disperse.data.view_number(); + HotShotEvent::VidShareRecv(sender, share) => { + let view = share.data.view_number(); // Do nothing if the VID share is old tracing::trace!("Received VID share for view {}", *view); ensure!( @@ -573,16 +569,16 @@ impl, V: Versions> QuorumVoteTaskS ); // Validate the VID share. - let payload_commitment = &disperse.data.payload_commitment; + let payload_commitment = share.data.payload_commitment_ref(); // Check that the signature is valid ensure!( - sender.validate(&disperse.signature, payload_commitment.as_ref()), + sender.validate(&share.signature, payload_commitment.as_ref()), "VID share signature is invalid" ); - let vid_epoch = disperse.data.epoch; - let target_epoch = disperse.data.target_epoch; + let vid_epoch = share.data.epoch(); + let target_epoch = share.data.target_epoch(); let membership_reader = self.membership.read().await; // ensure that the VID share was sent by a DA member OR the view leader ensure!( @@ -596,31 +592,22 @@ impl, V: Versions> QuorumVoteTaskS let membership_total_nodes = membership_reader.total_nodes(target_epoch); drop(membership_reader); - // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner - // and outer results - match vid_scheme(membership_total_nodes).verify_share( - &disperse.data.share, - &disperse.data.common, - payload_commitment, - ) { - Ok(Err(())) | Err(_) => { - bail!("Failed to verify VID share"); - } - Ok(Ok(())) => {} + if let Err(()) = share.data.verify_share(membership_total_nodes) { + bail!("Failed to verify VID share"); } self.consensus .write() .await - .update_vid_shares(view, disperse.clone()); + .update_vid_shares(view, share.clone()); ensure!( - disperse.data.recipient_key == self.public_key, + *share.data.recipient_key() == self.public_key, "Got a Valid VID share but it's not for our key" ); broadcast_event( - Arc::new(HotShotEvent::VidShareValidated(disperse.clone())), + Arc::new(HotShotEvent::VidShareValidated(share.clone())), &event_sender.clone(), ) .await; @@ -693,8 +680,10 @@ impl, V: Versions> QuorumVoteTaskS ))?; let mut updated_vid = vid.clone(); - updated_vid.data.view_number = proposal.data.view_number(); - consensus_writer.update_vid_shares(updated_vid.data.view_number, updated_vid.clone()); + updated_vid + .data + .set_view_number(proposal.data.view_number()); + consensus_writer.update_vid_shares(updated_vid.data.view_number(), updated_vid.clone()); drop(consensus_writer); diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index 2500e28ee1..eb33e9f9cc 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -339,10 +339,8 @@ impl> NetworkRequestState NetworkResponseState { &self, view: TYPES::View, key: &TYPES::SignatureKey, - ) -> Option>> { + ) -> Option>> { let consensus_reader = self.consensus.read().await; if let Some(view) = consensus_reader.vid_shares().get(&view) { if let Some(share) = view.get(key) { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 90430b7a86..41af76bae1 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -12,7 +12,7 @@ use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, - data::{PackedBundle, VidDisperse, VidDisperseShare2}, + data::{PackedBundle, VidDisperse, VidDisperseShare}, message::{Proposal, UpgradeLock}, traits::{ block_contents::BlockHeader, @@ -108,12 +108,12 @@ impl, V: Versions> VidTaskState, V: Versions> VidTaskState, V: Versions> VidTaskState, V: Versions> VidTaskState( - shares: &[Proposal>], + shares: &[Proposal>], pub_key: TYPES::SignatureKey, -) -> Proposal> { +) -> Proposal> { shares .iter() - .filter(|s| s.data.recipient_key == pub_key) + .filter(|s| *s.data.recipient_key() == pub_key) .cloned() .collect::>() .first() @@ -344,7 +344,7 @@ pub async fn build_vid_proposal( .await; let signature = - TYPES::SignatureKey::sign(private_key, vid_disperse.payload_commitment.as_ref()) + TYPES::SignatureKey::sign(private_key, vid_disperse.payload_commitment().as_ref()) .expect("Failed to sign VID commitment"); let vid_disperse_proposal = Proposal { data: vid_disperse.clone(), @@ -354,7 +354,7 @@ pub async fn build_vid_proposal( ( vid_disperse_proposal, - VidDisperseShare2::from_vid_disperse(vid_disperse) + VidDisperseShare::from_vid_disperse(vid_disperse) .into_iter() .map(|vid_disperse| { vid_disperse diff --git a/testing/src/spinning_task.rs b/testing/src/spinning_task.rs index 4cd23079d3..17f783a008 100644 --- a/testing/src/spinning_task.rs +++ b/testing/src/spinning_task.rs @@ -28,6 +28,7 @@ use hotshot_types::{ constants::EVENT_CHANNEL_SIZE, data::Leaf2, event::Event, + message::convert_proposal, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2}, traits::{ network::{AsyncGenerator, ConnectedNetwork}, @@ -252,7 +253,14 @@ where .await, ); let saved_proposals = read_storage.proposals_cloned().await; - let saved_vid_shares = read_storage.vids_cloned().await; + let mut vid_shares = BTreeMap::new(); + for (view, hash_map) in read_storage.vids_cloned().await { + let mut converted_hash_map = HashMap::new(); + for (key, proposal) in hash_map { + converted_hash_map.insert(key, convert_proposal(proposal)); + } + vid_shares.insert(view, converted_hash_map); + } let decided_upgrade_certificate = read_storage.decided_upgrade_certificate().await; @@ -263,7 +271,7 @@ where (start_view, start_epoch), (high_qc, next_epoch_high_qc), saved_proposals, - saved_vid_shares, + vid_shares, decided_upgrade_certificate, ); // We assign node's public key and stake value rather than read from config file since it's a test diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 30cf89ac45..c5d77232db 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -24,7 +24,7 @@ use hotshot_example_types::{ use hotshot_types::{ data::{ DaProposal2, EpochNumber, Leaf2, QuorumProposal2, QuorumProposalWrapper, VidDisperse, - VidDisperseShare2, ViewChangeEvidence2, ViewNumber, + VidDisperseShare, ViewChangeEvidence2, ViewNumber, }, message::{Proposal, UpgradeLock}, simple_certificate::{ @@ -59,7 +59,7 @@ pub struct TestView { pub membership: Arc::Membership>>, pub vid_disperse: Proposal>, pub vid_proposal: ( - Vec>>, + Vec>>, ::SignatureKey, ), pub leader_public_key: ::SignatureKey, diff --git a/types/src/consensus.rs b/types/src/consensus.rs index 7935fad89e..a2d92c6578 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -21,7 +21,7 @@ use vec1::Vec1; pub use crate::utils::{View, ViewInner}; use crate::{ - data::{Leaf2, QuorumProposalWrapper, VidDisperse, VidDisperseShare2}, + data::{Leaf2, QuorumProposalWrapper, VidDisperse, VidDisperseShare}, drb::DrbSeedsAndResults, error::HotShotError, event::{HotShotAction, LeafInfo}, @@ -48,7 +48,7 @@ pub type CommitmentMap = HashMap, T>; /// A type alias for `BTreeMap>>>` pub type VidShares = BTreeMap< ::View, - HashMap<::SignatureKey, Proposal>>, + HashMap<::SignatureKey, Proposal>>, >; /// Type alias for consensus state wrapped in a lock. @@ -790,12 +790,12 @@ impl Consensus { pub fn update_vid_shares( &mut self, view_number: TYPES::View, - disperse: Proposal>, + disperse: Proposal>, ) { self.vid_shares .entry(view_number) .or_default() - .insert(disperse.data.recipient_key.clone(), disperse); + .insert(disperse.data.recipient_key().clone(), disperse); } /// Add a new entry to the da_certs map. @@ -972,7 +972,7 @@ impl Consensus { .await .ok()?; - let shares = VidDisperseShare2::from_vid_disperse(vid); + let shares = VidDisperseShare::from_vid_disperse(vid); let mut consensus_writer = consensus.write().await; for share in shares { if let Some(prop) = share.to_proposal(private_key) { diff --git a/types/src/data.rs b/types/src/data.rs index 37013b80df..1bbf6df66c 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -10,7 +10,6 @@ //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. use std::{ - collections::BTreeMap, fmt::{Debug, Display}, hash::Hash, marker::PhantomData, @@ -20,18 +19,18 @@ use std::{ use async_lock::RwLock; use bincode::Options; use committable::{Commitment, CommitmentBoundsArkless, Committable, RawCommitmentBuilder}; -use jf_vid::{VidDisperse as JfVidDisperse, VidScheme}; +use jf_vid::VidDisperse as JfVidDisperse; use rand::Rng; use serde::{Deserialize, Serialize}; use thiserror::Error; -use tokio::task::spawn_blocking; use utils::anytrace::*; use vec1::Vec1; +use vid_disperse::{ADVZDisperse, ADVZDisperseShare, VidDisperseShare2}; use crate::{ drb::DrbResult, impl_has_epoch, - message::{Proposal, UpgradeLock}, + message::{convert_proposal, Proposal, UpgradeLock}, simple_certificate::{ NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, TimeoutCertificate2, UpgradeCertificate, ViewSyncFinalizeCertificate, @@ -43,14 +42,13 @@ use crate::{ vid_commitment, BlockHeader, BuilderFee, EncodeBytes, TestableBlock, GENESIS_VID_NUM_STORAGE_NODES, }, - election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, states::TestableState, BlockPayload, }, utils::{bincode_opts, genesis_epoch_from_version, option_epoch_from_block_number}, - vid::{vid_scheme, VidCommitment, VidCommon, VidSchemeType, VidShare}, + vid::{VidCommitment, VidCommon, VidSchemeType}, vote::{Certificate, HasViewNumber}, }; @@ -196,27 +194,42 @@ where pub view_number: TYPES::View, } +pub mod vid_disperse; + /// VID dispersal data /// /// Like [`DaProposal`]. /// /// TODO move to vid.rs? #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -pub struct VidDisperse { - /// The view number for which this VID data is intended - pub view_number: TYPES::View, - /// Epoch the data of this proposal belongs to - pub epoch: Option, - /// Epoch to which the recipients of this VID belong to - pub target_epoch: Option, - /// VidCommitment calculated based on the number of nodes in `target_epoch`. - pub payload_commitment: VidCommitment, - /// VidCommitment calculated based on the number of nodes in `epoch`. Needed during epoch transition. - pub data_epoch_payload_commitment: Option, - /// A storage node's key and its corresponding VID share - pub shares: BTreeMap, - /// VID common data sent to all storage nodes - pub common: VidCommon, +#[serde(bound = "TYPES: NodeType")] +pub enum VidDisperse { + /// Disperse type for first VID version + V0(vid_disperse::ADVZDisperse), + /// Place holder for VID upgrade + V1(vid_disperse::ADVZDisperse), +} + +impl From> for VidDisperse { + fn from(disperse: vid_disperse::ADVZDisperse) -> Self { + Self::V0(disperse) + } +} + +impl HasViewNumber for VidDisperse { + fn view_number(&self) -> TYPES::View { + match self { + Self::V0(disperse) | Self::V1(disperse) => disperse.view_number(), + } + } +} + +impl HasEpoch for VidDisperse { + fn epoch(&self) -> Option { + match self { + Self::V0(disperse) | Self::V1(disperse) => disperse.epoch(), + } + } } impl VidDisperse { @@ -225,29 +238,23 @@ impl VidDisperse { /// Allows for more complex stake table functionality pub async fn from_membership( view_number: TYPES::View, - mut vid_disperse: JfVidDisperse, + vid_disperse: JfVidDisperse, membership: &Arc>, target_epoch: Option, data_epoch: Option, data_epoch_payload_commitment: Option, ) -> Self { - let shares = membership - .read() - .await - .committee_members(view_number, target_epoch) - .iter() - .map(|node| (node.clone(), vid_disperse.shares.remove(0))) - .collect(); - - Self { - view_number, - shares, - common: vid_disperse.common, - payload_commitment: vid_disperse.commit, - data_epoch_payload_commitment, - epoch: data_epoch, - target_epoch, - } + Self::V0( + ADVZDisperse::from_membership( + view_number, + vid_disperse, + membership, + target_epoch, + data_epoch, + data_epoch_payload_commitment, + ) + .await, + ) } /// Calculate the vid disperse information from the payload given a view, epoch and membership, @@ -263,43 +270,225 @@ impl VidDisperse { target_epoch: Option, data_epoch: Option, ) -> Result { - let num_nodes = membership.read().await.total_nodes(target_epoch); + ADVZDisperse::calculate_vid_disperse(payload, membership, view, target_epoch, data_epoch) + .await + .map(|result| match data_epoch { + None => Self::V0(result), + Some(_) => Self::V1(result), + }) + } - let txns = payload.encode(); - let txns_clone = Arc::clone(&txns); - let num_txns = txns.len(); + /// Return a reference to the internal VidCommon field. + /// TODO(Chengyu): rewrite this after VID upgrade + pub fn vid_common_ref(&self) -> &VidCommon { + match self { + Self::V0(disperse) | Self::V1(disperse) => &disperse.common, + } + } - let vid_disperse = spawn_blocking(move || vid_scheme(num_nodes).disperse(&txns_clone)) - .await - .wrap() - .context(error!("Join error"))? - .wrap() - .context(|err| error!("Failed to calculate VID disperse. Error: {}", err))?; + /// Return the internal payload commitment + /// TODO(Chengyu): rewrite this after VID upgrade + pub fn payload_commitment(&self) -> VidCommitment { + match self { + Self::V0(disperse) | Self::V1(disperse) => disperse.payload_commitment, + } + } - let payload_commitment = if target_epoch == data_epoch { - None - } else { - let num_nodes = membership.read().await.total_nodes(data_epoch); + /// Unwrap self + /// TODO(Chengyu): remove this after VID upgrade + pub fn as_advz(self) -> ADVZDisperse { + match self { + Self::V0(disperse) | Self::V1(disperse) => disperse, + } + } - Some( - spawn_blocking(move || vid_scheme(num_nodes).commit_only(&txns)) - .await - .wrap() - .context(error!("Join error"))? - .wrap() - .context(|err| error!("Failed to calculate VID commitment with (num_storage_nodes, payload_byte_len) = ({}, {}). Error: {}", num_nodes, num_txns, err))? - ) + /// Set the view number + pub fn set_view_number(&mut self, view_number: ::View) { + match self { + Self::V0(share) | Self::V1(share) => share.view_number = view_number, + } + } +} + +/// VID share and associated metadata for a single node +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound = "TYPES: NodeType")] +pub enum VidDisperseShare { + /// VID disperse share type for first version VID + V0(vid_disperse::ADVZDisperseShare), + /// VID disperse share type after epoch upgrade and VID upgrade + V1(vid_disperse::VidDisperseShare2), +} + +impl VidDisperseShare { + /// Create a vector of `VidDisperseShare` from `VidDisperse` + pub fn from_vid_disperse(vid_disperse: VidDisperse) -> Vec { + match vid_disperse { + VidDisperse::V0(vid_disperse) => { + ADVZDisperseShare::::from_advz_disperse(vid_disperse) + .into_iter() + .map(|share| Self::V0(share)) + .collect() + } + VidDisperse::V1(vid_disperse) => { + VidDisperseShare2::::from_vid_disperse(vid_disperse) + .into_iter() + .map(|share| Self::V1(share)) + .collect() + } + } + } + + /// Consume `self` and return a `Proposal` + pub fn to_proposal( + self, + private_key: &::PrivateKey, + ) -> Option> { + let payload_commitment_ref: &[u8] = match &self { + Self::V0(share) => share.payload_commitment.as_ref(), + Self::V1(share) => share.payload_commitment.as_ref(), }; + let Ok(signature) = TYPES::SignatureKey::sign(private_key, payload_commitment_ref) else { + tracing::error!("VID: failed to sign dispersal share payload"); + return None; + }; + Some(Proposal { + signature, + _pd: PhantomData, + data: self, + }) + } - Ok(Self::from_membership( - view, - vid_disperse, - membership, - target_epoch, - data_epoch, - payload_commitment, - ) - .await) + /// Split a VID share proposal into a proposal for each recipient. + pub fn to_vid_share_proposals( + vid_disperse_proposal: Proposal>, + ) -> Vec> { + match vid_disperse_proposal.data { + VidDisperse::V0(disperse) => ADVZDisperseShare::to_vid_share_proposals( + disperse, + &vid_disperse_proposal.signature, + ) + .into_iter() + .map(|proposal| convert_proposal(proposal)) + .collect(), + VidDisperse::V1(disperse) => VidDisperseShare2::to_vid_share_proposals( + disperse, + &vid_disperse_proposal.signature, + ) + .into_iter() + .map(|proposal| convert_proposal(proposal)) + .collect(), + } + } + + /// Return the internal `recipient_key` + pub fn recipient_key(&self) -> &TYPES::SignatureKey { + match self { + Self::V0(share) => &share.recipient_key, + Self::V1(share) => &share.recipient_key, + } + } + + /// Return a reference to the internal payload VID commitment + pub fn payload_commitment_ref(&self) -> &[u8] { + match self { + Self::V0(share) => share.payload_commitment.as_ref(), + Self::V1(share) => share.payload_commitment.as_ref(), + } + } + + /// Return the internal payload VID commitment + /// TODO(Chengyu): restructure this, since payload commitment will have different types given different version. + pub fn payload_commitment(&self) -> VidCommitment { + match self { + Self::V0(share) => share.payload_commitment, + Self::V1(share) => share.payload_commitment, + } + } + /// Return the internal data epoch payload VID commitment + /// TODO(Chengyu): restructure this, since payload commitment will have different types given different version. + pub fn data_epoch_payload_commitment(&self) -> Option { + match self { + Self::V0(_) => None, + Self::V1(share) => share.data_epoch_payload_commitment, + } + } + + /// Return a reference to the internal VidCommon field. + /// TODO(Chengyu): remove this after VID upgrade + pub fn vid_common_ref(&self) -> &VidCommon { + match self { + Self::V0(share) => &share.common, + Self::V1(share) => &share.common, + } + } + + /// Return the target epoch + /// TODO(Chengyu): remove this? + pub fn target_epoch(&self) -> Option<::Epoch> { + match self { + Self::V0(_) => None, + Self::V1(share) => share.target_epoch, + } + } + + /// Internally verify the share given necessary information + /// + /// # Errors + #[allow(clippy::result_unit_err)] + pub fn verify_share(&self, total_nodes: usize) -> std::result::Result<(), ()> { + match self { + Self::V0(share) => share.verify_share(total_nodes), + Self::V1(share) => share.verify_share(total_nodes), + } + } + + /// Set the view number + pub fn set_view_number(&mut self, view_number: ::View) { + match self { + Self::V0(share) => share.view_number = view_number, + Self::V1(share) => share.view_number = view_number, + } + } +} + +impl HasViewNumber for VidDisperseShare { + fn view_number(&self) -> TYPES::View { + match self { + Self::V0(disperse) => disperse.view_number(), + Self::V1(disperse) => disperse.view_number(), + } + } +} + +impl HasEpoch for VidDisperseShare { + fn epoch(&self) -> Option { + match self { + Self::V0(_) => None, + Self::V1(share) => share.epoch(), + } + } +} + +impl From> for VidDisperseShare { + fn from(share: vid_disperse::ADVZDisperseShare) -> Self { + Self::V0(share) + } +} + +impl From> for VidDisperseShare { + fn from(share: vid_disperse::VidDisperseShare2) -> Self { + Self::V1(share) + } +} + +// TODO(Chengyu): this conversion may not be done after vid upgrade. Sync with storage `append_vid2` change later. +impl From> for vid_disperse::VidDisperseShare2 { + fn from(share: VidDisperseShare) -> vid_disperse::VidDisperseShare2 { + match share { + VidDisperseShare::V0(share) => share.into(), + VidDisperseShare::V1(share) => share, + } } } @@ -369,268 +558,6 @@ impl ViewChangeEvidence2 { } } -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -/// VID share and associated metadata for a single node -pub struct VidDisperseShare { - /// The view number for which this VID data is intended - pub view_number: TYPES::View, - /// Block payload commitment - pub payload_commitment: VidCommitment, - /// A storage node's key and its corresponding VID share - pub share: VidShare, - /// VID common data sent to all storage nodes - pub common: VidCommon, - /// a public key of the share recipient - pub recipient_key: TYPES::SignatureKey, -} - -impl VidDisperseShare { - /// Create a vector of `VidDisperseShare` from `VidDisperse` - pub fn from_vid_disperse(vid_disperse: VidDisperse) -> Vec { - vid_disperse - .shares - .into_iter() - .map(|(recipient_key, share)| Self { - share, - recipient_key, - view_number: vid_disperse.view_number, - common: vid_disperse.common.clone(), - payload_commitment: vid_disperse.payload_commitment, - }) - .collect() - } - - /// Consume `self` and return a `Proposal` - pub fn to_proposal( - self, - private_key: &::PrivateKey, - ) -> Option> { - let Ok(signature) = - TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref()) - else { - tracing::error!("VID: failed to sign dispersal share payload"); - return None; - }; - Some(Proposal { - signature, - _pd: PhantomData, - data: self, - }) - } - - /// Create `VidDisperse` out of an iterator to `VidDisperseShare`s - pub fn to_vid_disperse<'a, I>(mut it: I) -> Option> - where - I: Iterator, - { - let first_vid_disperse_share = it.next()?.clone(); - let mut share_map = BTreeMap::new(); - share_map.insert( - first_vid_disperse_share.recipient_key, - first_vid_disperse_share.share, - ); - let mut vid_disperse = VidDisperse { - view_number: first_vid_disperse_share.view_number, - epoch: None, - target_epoch: None, - payload_commitment: first_vid_disperse_share.payload_commitment, - data_epoch_payload_commitment: None, - common: first_vid_disperse_share.common, - shares: share_map, - }; - let _ = it.map(|vid_disperse_share| { - vid_disperse.shares.insert( - vid_disperse_share.recipient_key.clone(), - vid_disperse_share.share.clone(), - ) - }); - Some(vid_disperse) - } - - /// Split a VID share proposal into a proposal for each recipient. - pub fn to_vid_share_proposals( - vid_disperse_proposal: Proposal>, - ) -> Vec> { - vid_disperse_proposal - .data - .shares - .into_iter() - .map(|(recipient_key, share)| Proposal { - data: Self { - share, - recipient_key, - view_number: vid_disperse_proposal.data.view_number, - common: vid_disperse_proposal.data.common.clone(), - payload_commitment: vid_disperse_proposal.data.payload_commitment, - }, - signature: vid_disperse_proposal.signature.clone(), - _pd: vid_disperse_proposal._pd, - }) - .collect() - } -} - -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -/// VID share and associated metadata for a single node -pub struct VidDisperseShare2 { - /// The view number for which this VID data is intended - pub view_number: TYPES::View, - /// The epoch number for which this VID data belongs to - pub epoch: Option, - /// The epoch number to which the recipient of this VID belongs to - pub target_epoch: Option, - /// Block payload commitment - pub payload_commitment: VidCommitment, - /// VidCommitment calculated based on the number of nodes in `epoch`. Needed during epoch transition. - pub data_epoch_payload_commitment: Option, - /// A storage node's key and its corresponding VID share - pub share: VidShare, - /// VID common data sent to all storage nodes - pub common: VidCommon, - /// a public key of the share recipient - pub recipient_key: TYPES::SignatureKey, -} - -impl From> for VidDisperseShare { - fn from(vid_disperse2: VidDisperseShare2) -> Self { - let VidDisperseShare2 { - view_number, - epoch: _, - target_epoch: _, - payload_commitment, - data_epoch_payload_commitment: _, - share, - common, - recipient_key, - } = vid_disperse2; - - Self { - view_number, - payload_commitment, - share, - common, - recipient_key, - } - } -} - -impl From> for VidDisperseShare2 { - fn from(vid_disperse: VidDisperseShare) -> Self { - let VidDisperseShare { - view_number, - payload_commitment, - share, - common, - recipient_key, - } = vid_disperse; - - Self { - view_number, - epoch: None, - target_epoch: None, - payload_commitment, - data_epoch_payload_commitment: None, - share, - common, - recipient_key, - } - } -} - -impl VidDisperseShare2 { - /// Create a vector of `VidDisperseShare` from `VidDisperse` - pub fn from_vid_disperse(vid_disperse: VidDisperse) -> Vec { - vid_disperse - .shares - .into_iter() - .map(|(recipient_key, share)| Self { - share, - recipient_key, - view_number: vid_disperse.view_number, - common: vid_disperse.common.clone(), - payload_commitment: vid_disperse.payload_commitment, - data_epoch_payload_commitment: vid_disperse.data_epoch_payload_commitment, - epoch: vid_disperse.epoch, - target_epoch: vid_disperse.target_epoch, - }) - .collect() - } - - /// Consume `self` and return a `Proposal` - pub fn to_proposal( - self, - private_key: &::PrivateKey, - ) -> Option> { - let Ok(signature) = - TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref()) - else { - tracing::error!("VID: failed to sign dispersal share payload"); - return None; - }; - Some(Proposal { - signature, - _pd: PhantomData, - data: self, - }) - } - - /// Create `VidDisperse` out of an iterator to `VidDisperseShare`s - pub fn to_vid_disperse<'a, I>(mut it: I) -> Option> - where - I: Iterator, - { - let first_vid_disperse_share = it.next()?.clone(); - let mut share_map = BTreeMap::new(); - share_map.insert( - first_vid_disperse_share.recipient_key, - first_vid_disperse_share.share, - ); - let mut vid_disperse = VidDisperse { - view_number: first_vid_disperse_share.view_number, - epoch: first_vid_disperse_share.epoch, - target_epoch: first_vid_disperse_share.target_epoch, - payload_commitment: first_vid_disperse_share.payload_commitment, - data_epoch_payload_commitment: first_vid_disperse_share.data_epoch_payload_commitment, - common: first_vid_disperse_share.common, - shares: share_map, - }; - let _ = it.map(|vid_disperse_share| { - vid_disperse.shares.insert( - vid_disperse_share.recipient_key.clone(), - vid_disperse_share.share.clone(), - ) - }); - Some(vid_disperse) - } - - /// Split a VID share proposal into a proposal for each recipient. - pub fn to_vid_share_proposals( - vid_disperse_proposal: Proposal>, - ) -> Vec> { - vid_disperse_proposal - .data - .shares - .into_iter() - .map(|(recipient_key, share)| Proposal { - data: Self { - share, - recipient_key, - view_number: vid_disperse_proposal.data.view_number, - common: vid_disperse_proposal.data.common.clone(), - payload_commitment: vid_disperse_proposal.data.payload_commitment, - data_epoch_payload_commitment: vid_disperse_proposal - .data - .data_epoch_payload_commitment, - epoch: vid_disperse_proposal.data.epoch, - target_epoch: vid_disperse_proposal.data.target_epoch, - }, - signature: vid_disperse_proposal.signature.clone(), - _pd: vid_disperse_proposal._pd, - }) - .collect() - } -} - /// Proposal to append a block. #[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound(deserialize = ""))] @@ -823,24 +750,6 @@ impl HasViewNumber for DaProposal2 { } } -impl HasViewNumber for VidDisperse { - fn view_number(&self) -> TYPES::View { - self.view_number - } -} - -impl HasViewNumber for VidDisperseShare { - fn view_number(&self) -> TYPES::View { - self.view_number - } -} - -impl HasViewNumber for VidDisperseShare2 { - fn view_number(&self) -> TYPES::View { - self.view_number - } -} - impl HasViewNumber for QuorumProposal { fn view_number(&self) -> TYPES::View { self.view_number @@ -865,11 +774,7 @@ impl HasViewNumber for UpgradeProposal { } } -impl_has_epoch!( - DaProposal2, - VidDisperse, - VidDisperseShare2 -); +impl_has_epoch!(DaProposal2); /// The error type for block and its transactions. #[derive(Error, Debug, Serialize, Deserialize)] diff --git a/types/src/data/vid_disperse.rs b/types/src/data/vid_disperse.rs new file mode 100644 index 0000000000..d416dd8632 --- /dev/null +++ b/types/src/data/vid_disperse.rs @@ -0,0 +1,429 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +//! This module provides types for VID disperse related data structures. + +use crate::{ + impl_has_epoch, + message::Proposal, + simple_vote::HasEpoch, + traits::{ + block_contents::EncodeBytes, election::Membership, node_implementation::NodeType, + signature_key::SignatureKey, + }, + vid::{vid_scheme, VidCommitment, VidCommon, VidSchemeType, VidShare}, + vote::HasViewNumber, +}; +use async_lock::RwLock; +use jf_vid::{VidDisperse as JfVidDisperse, VidScheme}; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; +use tokio::task::spawn_blocking; +use utils::anytrace::*; + +impl_has_epoch!(ADVZDisperse, VidDisperseShare2); + +/// ADVZ dispersal data +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +pub struct ADVZDisperse { + /// The view number for which this VID data is intended + pub view_number: TYPES::View, + /// Epoch the data of this proposal belongs to + pub epoch: Option, + /// Epoch to which the recipients of this VID belong to + pub target_epoch: Option, + /// VidCommitment calculated based on the number of nodes in `target_epoch`. + pub payload_commitment: VidCommitment, + /// VidCommitment calculated based on the number of nodes in `epoch`. Needed during epoch transition. + pub data_epoch_payload_commitment: Option, + /// A storage node's key and its corresponding VID share + pub shares: BTreeMap, + /// VID common data sent to all storage nodes + pub common: VidCommon, +} + +impl HasViewNumber for ADVZDisperse { + fn view_number(&self) -> TYPES::View { + self.view_number + } +} + +impl ADVZDisperse { + /// Create VID dispersal from a specified membership for the target epoch. + /// Uses the specified function to calculate share dispersal + /// Allows for more complex stake table functionality + pub async fn from_membership( + view_number: TYPES::View, + mut vid_disperse: JfVidDisperse, + membership: &Arc>, + target_epoch: Option, + data_epoch: Option, + data_epoch_payload_commitment: Option, + ) -> Self { + let shares = membership + .read() + .await + .committee_members(view_number, target_epoch) + .iter() + .map(|node| (node.clone(), vid_disperse.shares.remove(0))) + .collect(); + + Self { + view_number, + shares, + common: vid_disperse.common, + payload_commitment: vid_disperse.commit, + data_epoch_payload_commitment, + epoch: data_epoch, + target_epoch, + } + } + + /// Calculate the vid disperse information from the payload given a view, epoch and membership, + /// If the sender epoch is missing, it means it's the same as the target epoch. + /// + /// # Errors + /// Returns an error if the disperse or commitment calculation fails + #[allow(clippy::panic)] + pub async fn calculate_vid_disperse( + payload: &TYPES::BlockPayload, + membership: &Arc>, + view: TYPES::View, + target_epoch: Option, + data_epoch: Option, + ) -> Result { + let num_nodes = membership.read().await.total_nodes(target_epoch); + + let txns = payload.encode(); + let txns_clone = Arc::clone(&txns); + let num_txns = txns.len(); + + let vid_disperse = spawn_blocking(move || vid_scheme(num_nodes).disperse(&txns_clone)) + .await + .wrap() + .context(error!("Join error"))? + .wrap() + .context(|err| error!("Failed to calculate VID disperse. Error: {}", err))?; + + let payload_commitment = if target_epoch == data_epoch { + None + } else { + let num_nodes = membership.read().await.total_nodes(data_epoch); + + Some( + spawn_blocking(move || vid_scheme(num_nodes).commit_only(&txns)) + .await + .wrap() + .context(error!("Join error"))? + .wrap() + .context(|err| error!("Failed to calculate VID commitment with (num_storage_nodes, payload_byte_len) = ({}, {}). Error: {}", num_nodes, num_txns, err))? + ) + }; + + Ok(Self::from_membership( + view, + vid_disperse, + membership, + target_epoch, + data_epoch, + payload_commitment, + ) + .await) + } +} + +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +/// ADVZ share and associated metadata for a single node +pub struct ADVZDisperseShare { + /// The view number for which this VID data is intended + pub view_number: TYPES::View, + /// Block payload commitment + pub payload_commitment: VidCommitment, + /// A storage node's key and its corresponding VID share + pub share: VidShare, + /// VID common data sent to all storage nodes + pub common: VidCommon, + /// a public key of the share recipient + pub recipient_key: TYPES::SignatureKey, +} + +impl HasViewNumber for ADVZDisperseShare { + fn view_number(&self) -> TYPES::View { + self.view_number + } +} + +impl ADVZDisperseShare { + /// Create a vector of `VidDisperseShare` from `VidDisperse` + pub fn from_advz_disperse(vid_disperse: ADVZDisperse) -> Vec { + vid_disperse + .shares + .into_iter() + .map(|(recipient_key, share)| Self { + share, + recipient_key, + view_number: vid_disperse.view_number, + common: vid_disperse.common.clone(), + payload_commitment: vid_disperse.payload_commitment, + }) + .collect() + } + + /// Consume `self` and return a `Proposal` + pub fn to_proposal( + self, + private_key: &::PrivateKey, + ) -> Option> { + let Ok(signature) = + TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref()) + else { + tracing::error!("VID: failed to sign dispersal share payload"); + return None; + }; + Some(Proposal { + signature, + _pd: PhantomData, + data: self, + }) + } + + /// Create `VidDisperse` out of an iterator to `VidDisperseShare`s + pub fn to_advz_disperse<'a, I>(mut it: I) -> Option> + where + I: Iterator, + { + let first_vid_disperse_share = it.next()?.clone(); + let mut share_map = BTreeMap::new(); + share_map.insert( + first_vid_disperse_share.recipient_key, + first_vid_disperse_share.share, + ); + let mut vid_disperse = ADVZDisperse { + view_number: first_vid_disperse_share.view_number, + epoch: None, + target_epoch: None, + payload_commitment: first_vid_disperse_share.payload_commitment, + data_epoch_payload_commitment: None, + common: first_vid_disperse_share.common, + shares: share_map, + }; + let _ = it.map(|vid_disperse_share| { + vid_disperse.shares.insert( + vid_disperse_share.recipient_key.clone(), + vid_disperse_share.share.clone(), + ) + }); + Some(vid_disperse) + } + + /// Split a VID share proposal into a proposal for each recipient. + pub fn to_vid_share_proposals( + vid_disperse: ADVZDisperse, + signature: &::PureAssembledSignatureType, + ) -> Vec> { + vid_disperse + .shares + .into_iter() + .map(|(recipient_key, share)| Proposal { + data: Self { + share, + recipient_key, + view_number: vid_disperse.view_number, + common: vid_disperse.common.clone(), + payload_commitment: vid_disperse.payload_commitment, + }, + signature: signature.clone(), + _pd: PhantomData, + }) + .collect() + } + + /// Internally verify the share given necessary information + /// + /// # Errors + /// Verification fail + #[allow(clippy::result_unit_err)] + pub fn verify_share(&self, total_nodes: usize) -> std::result::Result<(), ()> { + vid_scheme(total_nodes) + .verify_share(&self.share, &self.common, &self.payload_commitment) + .unwrap_or(Err(())) + } +} + +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +/// VID share and associated metadata for a single node +pub struct VidDisperseShare2 { + /// The view number for which this VID data is intended + pub view_number: TYPES::View, + /// The epoch number for which this VID data belongs to + pub epoch: Option, + /// The epoch number to which the recipient of this VID belongs to + pub target_epoch: Option, + /// Block payload commitment + pub payload_commitment: VidCommitment, + /// VidCommitment calculated based on the number of nodes in `epoch`. Needed during epoch transition. + pub data_epoch_payload_commitment: Option, + /// A storage node's key and its corresponding VID share + pub share: VidShare, + /// VID common data sent to all storage nodes + pub common: VidCommon, + /// a public key of the share recipient + pub recipient_key: TYPES::SignatureKey, +} + +impl HasViewNumber for VidDisperseShare2 { + fn view_number(&self) -> TYPES::View { + self.view_number + } +} + +impl VidDisperseShare2 { + /// Create a vector of `VidDisperseShare` from `VidDisperse` + pub fn from_vid_disperse(vid_disperse: ADVZDisperse) -> Vec { + vid_disperse + .shares + .into_iter() + .map(|(recipient_key, share)| Self { + share, + recipient_key, + view_number: vid_disperse.view_number, + common: vid_disperse.common.clone(), + payload_commitment: vid_disperse.payload_commitment, + data_epoch_payload_commitment: vid_disperse.data_epoch_payload_commitment, + epoch: vid_disperse.epoch, + target_epoch: vid_disperse.target_epoch, + }) + .collect() + } + + /// Consume `self` and return a `Proposal` + pub fn to_proposal( + self, + private_key: &::PrivateKey, + ) -> Option> { + let Ok(signature) = + TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref()) + else { + tracing::error!("VID: failed to sign dispersal share payload"); + return None; + }; + Some(Proposal { + signature, + _pd: PhantomData, + data: self, + }) + } + + /// Create `VidDisperse` out of an iterator to `VidDisperseShare`s + pub fn to_vid_disperse<'a, I>(mut it: I) -> Option> + where + I: Iterator, + { + let first_vid_disperse_share = it.next()?.clone(); + let mut share_map = BTreeMap::new(); + share_map.insert( + first_vid_disperse_share.recipient_key, + first_vid_disperse_share.share, + ); + let mut vid_disperse = ADVZDisperse { + view_number: first_vid_disperse_share.view_number, + epoch: first_vid_disperse_share.epoch, + target_epoch: first_vid_disperse_share.target_epoch, + payload_commitment: first_vid_disperse_share.payload_commitment, + data_epoch_payload_commitment: first_vid_disperse_share.data_epoch_payload_commitment, + common: first_vid_disperse_share.common, + shares: share_map, + }; + let _ = it.map(|vid_disperse_share| { + vid_disperse.shares.insert( + vid_disperse_share.recipient_key.clone(), + vid_disperse_share.share.clone(), + ) + }); + Some(vid_disperse) + } + + /// Split a VID share proposal into a proposal for each recipient. + pub fn to_vid_share_proposals( + vid_disperse: ADVZDisperse, + signature: &::PureAssembledSignatureType, + ) -> Vec> { + vid_disperse + .shares + .into_iter() + .map(|(recipient_key, share)| Proposal { + data: Self { + share, + recipient_key, + view_number: vid_disperse.view_number, + common: vid_disperse.common.clone(), + payload_commitment: vid_disperse.payload_commitment, + data_epoch_payload_commitment: vid_disperse.data_epoch_payload_commitment, + epoch: vid_disperse.epoch, + target_epoch: vid_disperse.target_epoch, + }, + signature: signature.clone(), + _pd: PhantomData, + }) + .collect() + } + + /// Internally verify the share given necessary information + /// + /// # Errors + #[allow(clippy::result_unit_err)] + pub fn verify_share(&self, total_nodes: usize) -> std::result::Result<(), ()> { + vid_scheme(total_nodes) + .verify_share(&self.share, &self.common, &self.payload_commitment) + .unwrap_or(Err(())) + } +} + +impl From> for ADVZDisperseShare { + fn from(vid_disperse2: VidDisperseShare2) -> Self { + let VidDisperseShare2 { + view_number, + epoch: _, + target_epoch: _, + payload_commitment, + data_epoch_payload_commitment: _, + share, + common, + recipient_key, + } = vid_disperse2; + + Self { + view_number, + payload_commitment, + share, + common, + recipient_key, + } + } +} + +impl From> for VidDisperseShare2 { + fn from(vid_disperse: ADVZDisperseShare) -> Self { + let ADVZDisperseShare { + view_number, + payload_commitment, + share, + common, + recipient_key, + } = vid_disperse; + + Self { + view_number, + epoch: None, + target_epoch: None, + payload_commitment, + data_epoch_payload_commitment: None, + share, + common, + recipient_key, + } + } +} diff --git a/types/src/event.rs b/types/src/event.rs index 7c57668188..7d73a109d1 100644 --- a/types/src/event.rs +++ b/types/src/event.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use serde::{Deserialize, Serialize}; use crate::{ - data::{DaProposal2, Leaf2, QuorumProposalWrapper, UpgradeProposal, VidDisperseShare2}, + data::{DaProposal2, Leaf2, QuorumProposalWrapper, UpgradeProposal, VidDisperseShare}, error::HotShotError, message::Proposal, simple_certificate::QuorumCertificate2, @@ -42,7 +42,7 @@ pub struct LeafInfo { /// Optional application-specific state delta. pub delta: Option::ValidatedState as ValidatedState>::Delta>>, /// Optional VID share data. - pub vid_share: Option>, + pub vid_share: Option>, } impl LeafInfo { @@ -51,7 +51,7 @@ impl LeafInfo { leaf: Leaf2, state: Arc<::ValidatedState>, delta: Option::ValidatedState as ValidatedState>::Delta>>, - vid_share: Option>, + vid_share: Option>, ) -> Self { Self { leaf, diff --git a/types/src/message.rs b/types/src/message.rs index 6f6020cbd2..7dca495314 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -26,8 +26,9 @@ use vbs::{ use crate::{ data::{ + vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, - QuorumProposalWrapper, UpgradeProposal, VidDisperseShare, VidDisperseShare2, + QuorumProposalWrapper, UpgradeProposal, }, request_response::ProposalRequestPayload, simple_certificate::{ @@ -263,7 +264,7 @@ pub enum DaConsensusMessage { /// Initiate VID dispersal. /// /// Like [`DaProposal`]. Use `Msg` suffix to distinguish from `VidDisperse`. - VidDisperseMsg(Proposal>), + VidDisperseMsg(Proposal>), /// Proposal for data availability committee DaProposal2(Proposal>), @@ -363,7 +364,6 @@ impl SequencingMessage { DaConsensusMessage::DaVote(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate(cert) => cert.view_number, DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.view_number(), - DaConsensusMessage::VidDisperseMsg2(disperse) => disperse.data.view_number(), DaConsensusMessage::DaProposal2(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt @@ -371,6 +371,7 @@ impl SequencingMessage { } DaConsensusMessage::DaVote2(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate2(cert) => cert.view_number, + DaConsensusMessage::VidDisperseMsg2(disperse) => disperse.data.view_number(), } } } diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index 60be1e517e..a2a6658c00 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -20,8 +20,9 @@ use super::node_implementation::NodeType; use crate::{ consensus::{CommitmentMap, View}, data::{ + vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, - QuorumProposalWrapper, VidDisperseShare, VidDisperseShare2, + QuorumProposalWrapper, }, event::HotShotAction, message::{convert_proposal, Proposal}, @@ -35,8 +36,9 @@ use crate::{ #[async_trait] pub trait Storage: Send + Sync + Clone { /// Add a proposal to the stored VID proposals. - async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; + async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; /// Add a proposal to the stored VID proposals. + /// TODO(Chengyu): change here because in the future disperse share types might not be convertible. async fn append_vid2( &self, proposal: &Proposal>, diff --git a/types/src/vid.rs b/types/src/vid.rs index 4ce85d87b0..c1a185ab62 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -36,7 +36,7 @@ use sha2::Sha256; use crate::{ constants::SRS_DEGREE, - data::{VidDisperse as HotShotVidDisperse, VidDisperseShare2}, + data::{VidDisperse as HotShotVidDisperse, VidDisperseShare}, message::Proposal, }; @@ -111,7 +111,7 @@ pub type VidShare = ::Share; /// VID proposal type pub type VidProposal = ( Proposal>, - Vec>>, + Vec>>, ); #[cfg(not(feature = "gpu-vid"))] From 82dfba327c24ea70bd583713513e2e1680b3ca43 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 29 Jan 2025 10:43:13 -0500 Subject: [PATCH 1374/1393] Generalized request/response protocol (#4071) * tcp -> quic patch * add r/r protocol * `Serializable` * split out everything * refactor messages, test serialization * move request validation and add tests for it * add handlers * finish most of the responding logic * `blake3`, `bounded_vec_deque` * testing-only `PartialEq` * finish sending logic * move to separate crate, `clippy` * `Vec` -> `Arc>`, begin integration tests * integration tests * `1_10s` -> `1_20s` * reduce test time * revert unnecessary `transport` change * un-type-alias export * remove dep from utils * `fmt` * add `VecDeque` test * add some helpful comments * even moar comments * general improvements * nice refactor * comments, readability * reorg * add test for request joining * add a helper function to request indefinitely * `fmt` * merge main in * run `hakari` * use `tokio`'s `AbortOnDropHandle` instead * cargo hakari * make everything async * move request validation logic up * hakari --- hotshot/Cargo.toml | 4 +- .../src/traits/networking/combined_network.rs | 14 +- libp2p-networking/Cargo.toml | 2 +- macros/Cargo.toml | 2 +- request-response/Cargo.toml | 32 + request-response/src/data_source.rs | 15 + request-response/src/lib.rs | 959 ++++++++++++++++++ request-response/src/message.rs | 449 ++++++++ request-response/src/network.rs | 58 ++ request-response/src/recipient_source.rs | 13 + request-response/src/request.rs | 35 + request-response/src/util.rs | 45 + workspace-hack/Cargo.toml | 4 + 13 files changed, 1617 insertions(+), 15 deletions(-) create mode 100644 request-response/Cargo.toml create mode 100644 request-response/src/data_source.rs create mode 100644 request-response/src/lib.rs create mode 100644 request-response/src/message.rs create mode 100644 request-response/src/network.rs create mode 100644 request-response/src/recipient_source.rs create mode 100644 request-response/src/request.rs create mode 100644 request-response/src/util.rs diff --git a/hotshot/Cargo.toml b/hotshot/Cargo.toml index 9290290b77..074140e081 100644 --- a/hotshot/Cargo.toml +++ b/hotshot/Cargo.toml @@ -30,7 +30,7 @@ cdn-client = { workspace = true } cdn-marshal = { workspace = true } chrono = { workspace = true } committable = { workspace = true } -dashmap = "6" +dashmap = { workspace = true } derive_more = { workspace = true } either = { workspace = true } futures = { workspace = true } @@ -41,7 +41,7 @@ libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } lru = { workspace = true } num_enum = "0.7" -parking_lot = "0.12" +parking_lot.workspace = true portpicker = "0.1" primitive-types = { workspace = true } rand = { workspace = true } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 298500b473..438f790d68 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -7,9 +7,8 @@ //! Networking Implementation that has a primary and a fallback network. If the primary //! Errors we will use the backup to send or receive use std::{ - collections::{hash_map::DefaultHasher, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap}, future::Future, - hash::{Hash, Hasher}, num::NonZeroUsize, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -47,13 +46,6 @@ use tracing::{debug, info, warn}; use super::{push_cdn_network::PushCdnNetwork, NetworkError}; use crate::traits::implementations::Libp2pNetwork; -/// Helper function to calculate a hash of a type that implements Hash -pub fn calculate_hash_of(t: &T) -> u64 { - let mut s = DefaultHasher::new(); - t.hash(&mut s); - s.finish() -} - /// Thread-safe ref counted lock to a map of channels to the delayed tasks type DelayedTasksChannelsMap = Arc, InactiveReceiver<()>)>>>; @@ -65,7 +57,7 @@ pub struct CombinedNetworks { networks: Arc>, /// Last n seen messages to prevent processing duplicates - message_cache: Arc>>, + message_cache: Arc>>, /// How many times primary failed to deliver primary_fail_counter: Arc, @@ -457,7 +449,7 @@ impl ConnectedNetwork for CombinedNetworks }; // Calculate hash of the message - let message_hash = calculate_hash_of(&message); + let message_hash = blake3::hash(&message); // Check if the hash is in the cache and update the cache if self.message_cache.write().put(message_hash, ()).is_none() { diff --git a/libp2p-networking/Cargo.toml b/libp2p-networking/Cargo.toml index e62e0f6b27..a7f684a67d 100644 --- a/libp2p-networking/Cargo.toml +++ b/libp2p-networking/Cargo.toml @@ -21,7 +21,7 @@ bincode = { workspace = true } blake3 = { workspace = true } cbor4ii = { workspace = true } delegate = "0.13" -derive_builder = "0.20" +derive_builder.workspace = true derive_more = { workspace = true } futures = { workspace = true } hotshot-types = { path = "../types" } diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 5d8298c9cc..7076349445 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -5,7 +5,7 @@ edition = { workspace = true } description = "Macros for hotshot tests" [dependencies] -derive_builder = "0.20" +derive_builder.workspace = true proc-macro2 = "1" # proc macro stuff quote = "1" diff --git a/request-response/Cargo.toml b/request-response/Cargo.toml new file mode 100644 index 0000000000..77a4b9cf2e --- /dev/null +++ b/request-response/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "request-response" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +homepage.workspace = true +documentation.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dev-dependencies] +serde.workspace = true + +[dependencies] +anyhow.workspace = true +async-trait.workspace = true +hotshot-types = { path = "../types" } +byteorder = { version = "1", default-features = false } +bincode.workspace = true +blake3.workspace = true +rand.workspace = true +tokio.workspace = true +parking_lot.workspace = true +derive_more.workspace = true +tracing.workspace = true +async-broadcast.workspace = true +derive_builder.workspace = true +thiserror.workspace = true +tokio-util = { version = "0.7", default-features = false, features = ["rt"] } \ No newline at end of file diff --git a/request-response/src/data_source.rs b/request-response/src/data_source.rs new file mode 100644 index 0000000000..43305f6b9c --- /dev/null +++ b/request-response/src/data_source.rs @@ -0,0 +1,15 @@ +//! This file contains the [`DataSource`] trait. This trait allows the [`RequestResponseProtocol`] +//! to calculate/derive a response for a specific request. In the confirmation layer the implementer +//! would be something like a [`FeeMerkleTree`] for fee catchup + +use anyhow::Result; +use async_trait::async_trait; + +use super::request::Request; + +/// The trait that allows the [`RequestResponseProtocol`] to calculate/derive a response for a specific request +#[async_trait] +pub trait DataSource: Send + Sync + 'static + Clone { + /// Calculate/derive the response for a specific request + async fn derive_response_for(&self, request: &R) -> Result; +} diff --git a/request-response/src/lib.rs b/request-response/src/lib.rs new file mode 100644 index 0000000000..8c0ac61987 --- /dev/null +++ b/request-response/src/lib.rs @@ -0,0 +1,959 @@ +//! This crate contains a general request-response protocol. It is used to send requests to +//! a set of recipients and wait for responses. + +use std::collections::HashMap; +use std::sync::Weak; +use std::time::Instant; +use std::{marker::PhantomData, sync::Arc, time::Duration}; + +use anyhow::{anyhow, Context, Result}; +use data_source::DataSource; +use derive_builder::Builder; +use derive_more::derive::Deref; +use hotshot_types::traits::signature_key::SignatureKey; +use message::{Message, RequestMessage, ResponseMessage}; +use network::{Bytes, Receiver, Sender}; +use parking_lot::RwLock; +use rand::seq::SliceRandom; +use recipient_source::RecipientSource; +use request::{Request, Response}; +use tokio::spawn; +use tokio::time::{sleep, timeout}; +use tokio_util::task::AbortOnDropHandle; +use tracing::{error, warn}; +use util::BoundedVecDeque; + +/// The data source trait. Is what we use to derive the response data for a request +pub mod data_source; +/// The message type. Is the base type for all messages in the request-response protocol +pub mod message; +/// The network traits. Is what we use to send and receive messages over the network as +/// the protocol +pub mod network; +/// The recipient source trait. Is what we use to get the recipients that a specific message should +/// expect responses from +pub mod recipient_source; +/// The request trait. Is what we use to define a request and a corresponding response type +pub mod request; +/// Utility types and functions +mod util; + +/// A type alias for the hash of a request +pub type RequestHash = blake3::Hash; + +/// A type alias for the active request map +pub type ActiveRequestsMap = Arc>>>>; + +/// A type alias for the list of tasks that are responding to requests +pub type OutgoingResponses = BoundedVecDeque>; + +/// A type alias for the list of tasks that are validating incoming responses +pub type IncomingResponses = BoundedVecDeque>; + +/// The errors that can occur when making a request for data +#[derive(thiserror::Error, Debug)] +pub enum RequestError { + /// The request timed out + #[error("request timed out")] + Timeout, + /// The request was invalid + #[error("request was invalid")] + InvalidRequest(anyhow::Error), + /// Other errors + #[error("other error")] + Other(anyhow::Error), +} + +/// A trait for serializing and deserializing a type to and from a byte array. [`Request`] types and +/// [`Response`] types will need to implement this trait +pub trait Serializable: Sized { + /// Serialize the type to a byte array. If this is for a [`Request`] and your [`Request`] type + /// is represented as an enum, please make sure that you serialize it with a unique type ID. Otherwise, + /// you may end up with collisions as the request hash is used as a unique identifier + /// + /// # Errors + /// - If the type cannot be serialized to a byte array + fn to_bytes(&self) -> Result>; + + /// Deserialize the type from a byte array + /// + /// # Errors + /// - If the byte array is not a valid representation of the type + fn from_bytes(bytes: &[u8]) -> Result; +} + +/// The underlying configuration for the request-response protocol +#[derive(Clone, Builder)] +pub struct RequestResponseConfig { + /// The timeout for incoming requests. Do not respond to a request after this threshold + /// has passed. + incoming_request_ttl: Duration, + /// The maximum amount of time we will spend trying to both derive a response for a request and + /// send the response over the wire. + response_send_timeout: Duration, + /// The maximum amount of time we will spend trying to validate a response. This is used to prevent + /// an attack where a malicious participant sends us a bunch of requests that take a long time to + /// validate. + response_validate_timeout: Duration, + /// The batch size for outgoing requests. This is the number of request messages that we will + /// send out at a time for a single request before waiting for the [`request_batch_interval`]. + request_batch_size: usize, + /// The time to wait (per request) between sending out batches of request messages + request_batch_interval: Duration, + /// The maximum (global) number of outgoing responses that can be in flight at any given time + max_outgoing_responses: usize, + /// The maximum (global) number of incoming responses that can be processed at any given time. + /// We need this because responses coming in need to be validated [asynchronously] that they + /// satisfy the request they are responding to + max_incoming_responses: usize, +} + +/// A protocol that allows for request-response communication. Is cheaply cloneable, so there is no +/// need to wrap it in an `Arc` +#[derive(Clone, Deref)] +pub struct RequestResponse< + S: Sender, + R: Receiver, + Req: Request, + RS: RecipientSource, + DS: DataSource, + K: SignatureKey + 'static, +> { + #[deref] + /// The inner implementation of the request-response protocol + inner: Arc>, + /// A handle to the receiving task. This will automatically get cancelled when the protocol is dropped + _receiving_task_handle: Arc>, +} + +impl< + S: Sender, + R: Receiver, + Req: Request, + RS: RecipientSource, + DS: DataSource, + K: SignatureKey + 'static, + > RequestResponse +{ + /// Create a new [`RequestResponseProtocol`] + pub fn new( + // The configuration for the protocol + config: RequestResponseConfig, + // The network sender that [`RequestResponseProtocol`] will use to send messages + sender: S, + // The network receiver that [`RequestResponseProtocol`] will use to receive messages + receiver: R, + // The recipient source that [`RequestResponseProtocol`] will use to get the recipients + // that a specific message should expect responses from + recipient_source: RS, + // The [response] data source that [`RequestResponseProtocol`] will use to derive the + // response data for a specific request + data_source: DS, + ) -> Self { + // Create the active requests map + let active_requests = ActiveRequestsMap::default(); + + // Create the inner implementation + let inner = Arc::new(RequestResponseInner { + config, + sender, + recipient_source, + data_source, + active_requests, + phantom_data: PhantomData, + }); + + // Start the task that receives messages and handles them. This will automatically get cancelled + // when the protocol is dropped + let inner_clone = Arc::clone(&inner); + let receive_task_handle = + AbortOnDropHandle::new(tokio::spawn(inner_clone.receiving_task(receiver))); + + // Return the protocol + Self { + inner, + _receiving_task_handle: Arc::new(receive_task_handle), + } + } +} + +/// The inner implementation for the request-response protocol +pub struct RequestResponseInner< + S: Sender, + R: Receiver, + Req: Request, + RS: RecipientSource, + DS: DataSource, + K: SignatureKey + 'static, +> { + /// The configuration of the protocol + config: RequestResponseConfig, + /// The sender to use for the protocol + sender: S, + /// The recipient source to use for the protocol + recipient_source: RS, + /// The data source to use for the protocol + data_source: DS, + /// The map of currently active requests + active_requests: ActiveRequestsMap, + /// Phantom data to help with type inference + phantom_data: PhantomData<(K, R, Req, DS)>, +} +impl< + S: Sender, + R: Receiver, + Req: Request, + RS: RecipientSource, + DS: DataSource, + K: SignatureKey + 'static, + > RequestResponseInner +{ + /// Request something from the protocol indefinitely until we get a response + /// or there was a critical error (e.g. the request could not be signed) + /// + /// # Errors + /// - If the request was invalid + /// - If there was a critical error (e.g. the channel was closed) + pub async fn request_indefinitely( + self: &Arc, + public_key: &K, + private_key: &K::PrivateKey, + // The estimated TTL of other participants. This is used to decide when to + // stop making requests and sign a new one + estimated_request_ttl: Duration, + // The request to make + request: Req, + ) -> std::result::Result { + loop { + // Sign a request message + let request_message = RequestMessage::new_signed(public_key, private_key, &request) + .map_err(|e| { + RequestError::InvalidRequest(anyhow::anyhow!( + "failed to sign request message: {e}" + )) + })?; + + // Request the data, handling the errors appropriately + match self.request(request_message, estimated_request_ttl).await { + Ok(response) => return Ok(response), + Err(RequestError::Timeout) => continue, + Err(e) => return Err(e), + } + } + } + + /// Request something from the protocol and wait for the response. This function + /// will join with an existing request for the same data (determined by `Blake3` hash), + /// however both will make requests until the timeout is reached + /// + /// # Errors + /// - If the request times out + /// - If the channel is closed (this is an internal error) + /// - If the request we sign is invalid + pub async fn request( + self: &Arc, + request_message: RequestMessage, + timeout_duration: Duration, + ) -> std::result::Result { + timeout(timeout_duration, async move { + // Calculate the hash of the request + let request_hash = blake3::hash(&request_message.request.to_bytes().map_err(|e| { + RequestError::InvalidRequest(anyhow::anyhow!( + "failed to serialize request message: {e}" + )) + })?); + + let request = { + // Get a write lock on the active requests map + let mut active_requests_write = self.active_requests.write(); + + // Conditionally get the active request, creating a new one if it doesn't exist or if + // the existing one has been dropped and not yet removed + if let Some(active_request) = active_requests_write + .get(&request_hash) + .and_then(Weak::upgrade) + { + ActiveRequest(active_request) + } else { + // Create a new broadcast channel for the response + let (sender, receiver) = async_broadcast::broadcast(1); + + // Create a new active request + let active_request = ActiveRequest(Arc::new(ActiveRequestInner { + sender, + receiver, + request: request_message.request.clone(), + active_requests: Arc::clone(&self.active_requests), + request_hash, + })); + + // Write the new active request to the map + active_requests_write.insert(request_hash, Arc::downgrade(&active_request.0)); + + // Return the new active request + active_request + } + }; + + // Get the recipients that the request should expect responses from. Shuffle them so + // that we don't always send to the same recipients in the same order + let mut recipients = self + .recipient_source + .get_recipients_for(&request_message.request) + .await; + recipients.shuffle(&mut rand::thread_rng()); + + // Create a request message and serialize it + let message = + Bytes::from(Message::Request(request_message).to_bytes().map_err(|e| { + RequestError::InvalidRequest(anyhow::anyhow!( + "failed to serialize request message: {e}" + )) + })?); + + // Get the current time so we can check when the timeout has elapsed + let start_time = Instant::now(); + + // Spawn a task that sends out requests to the network + let self_clone = Arc::clone(self); + let _handle = AbortOnDropHandle::new(spawn(async move { + // Create a bounded queue for the outgoing requests. We use this to make sure + // we have less than [`config.request_batch_size`] requests in flight at any time. + // + // When newer requests are added, older ones are removed from the queue. Because we use + // `AbortOnDropHandle`, the older ones will automatically get cancelled + let mut outgoing_requests = + BoundedVecDeque::new(self_clone.config.request_batch_size); + + // While the timeout hasn't elapsed, send out requests to the network + while start_time.elapsed() < timeout_duration { + // Send out requests to the network in their own separate tasks + for recipient_batch in recipients.chunks(self_clone.config.request_batch_size) { + for recipient in recipient_batch { + // Clone ourselves, the message, and the recipient so they can be moved + let self_clone = Arc::clone(&self_clone); + let recipient_clone = recipient.clone(); + let message_clone = Arc::clone(&message); + + // Spawn the task that sends the request to the participant + let individual_sending_task = spawn(async move { + let _ = self_clone + .sender + .send_message(&message_clone, recipient_clone) + .await; + }); + + // Add the sending task to the queue + outgoing_requests.push(AbortOnDropHandle::new(individual_sending_task)); + } + + // After we send the batch out, wait the [`config.request_batch_interval`] + // before sending the next one + sleep(self_clone.config.request_batch_interval).await; + } + } + })); + + // Wait for a response on the channel + request + .receiver + .clone() + .recv() + .await + .map_err(|_| RequestError::Other(anyhow!("channel was closed"))) + }) + .await + .map_err(|_| RequestError::Timeout) + .and_then(|result| result) + } + + /// The task responsible for receiving messages from the receiver and handling them + async fn receiving_task(self: Arc, mut receiver: R) { + // Upper bound the number of outgoing and incoming responses + let mut outgoing_responses = BoundedVecDeque::new(self.config.max_outgoing_responses); + let mut incoming_responses = BoundedVecDeque::new(self.config.max_incoming_responses); + + // While the receiver is open, we receive messages and handle them + loop { + // Try to receive a message + match receiver.receive_message().await { + Ok(message) => { + // Deserialize the message, warning if it fails + let message = match Message::from_bytes(&message) { + Ok(message) => message, + Err(e) => { + warn!("Received invalid message: {e}"); + continue; + } + }; + + // Handle the message based on its type + match message { + Message::Request(request_message) => { + self.handle_request(request_message, &mut outgoing_responses); + } + Message::Response(response_message) => { + self.handle_response(response_message, &mut incoming_responses); + } + } + } + // An error here means the receiver will _NEVER_ receive any more messages + Err(e) => { + error!("Request/response receive task exited: {e}"); + return; + } + } + } + } + + /// Handle a request sent to us + fn handle_request( + self: &Arc, + request_message: RequestMessage, + outgoing_responses: &mut OutgoingResponses, + ) { + // Spawn a task to: + // - Validate the request + // - Derive the response data (check if we have it) + // - Send the response to the requester + let self_clone = Arc::clone(self); + let response_task = AbortOnDropHandle::new(tokio::spawn(async move { + let result = timeout(self_clone.config.response_send_timeout, async move { + // Validate the request message. This includes: + // - Checking the signature and making sure it's valid + // - Checking the timestamp and making sure it's not too old + // - Calling the request's application-specific validation function + request_message + .validate(self_clone.config.incoming_request_ttl) + .await + .with_context(|| "failed to validate request")?; + + // Try to fetch the response data from the data source + let response = self_clone + .data_source + .derive_response_for(&request_message.request) + .await + .with_context(|| "failed to derive response for request")?; + + // Create the response message and serialize it + let response = Bytes::from( + Message::Response::(ResponseMessage { + request_hash: blake3::hash(&request_message.request.to_bytes()?), + response, + }) + .to_bytes() + .with_context(|| "failed to serialize response message")?, + ); + + // Send the response to the requester + self_clone + .sender + .send_message(&response, request_message.public_key) + .await + .with_context(|| "failed to send response to requester")?; + + Ok::<(), anyhow::Error>(()) + }) + .await + .map_err(|_| anyhow::anyhow!("timed out while sending response")) + .and_then(|result| result); + + if let Err(e) = result { + warn!("Failed to send response to requester: {e}"); + } + })); + + // Add the response task to the outgoing responses queue. This will automatically cancel an older task + // if there are more than [`config.max_outgoing_responses`] responses in flight. + outgoing_responses.push(response_task); + } + + /// Handle a response sent to us + fn handle_response( + self: &Arc, + response: ResponseMessage, + incoming_responses: &mut IncomingResponses, + ) { + // Get the entry in the map, ignoring it if it doesn't exist + let Some(active_request) = self + .active_requests + .read() + .get(&response.request_hash) + .cloned() + .and_then(|r| r.upgrade()) + else { + return; + }; + + // Spawn a task to validate the response and send it to the requester (us) + let response_validate_timeout = self.config.response_validate_timeout; + let response_task = AbortOnDropHandle::new(tokio::spawn(async move { + if timeout(response_validate_timeout, async move { + // Make sure the response is valid for the given request + if let Err(e) = response.response.validate(&active_request.request).await { + warn!("Received invalid response: {e}"); + return; + } + + // Send the response to the requester (the user of [`RequestResponse::request`]) + let _ = active_request.sender.try_broadcast(response.response); + }) + .await + .is_err() + { + warn!("Timed out while validating response"); + } + })); + + // Add the response task to the incoming responses queue. This will automatically cancel an older task + // if there are more than [`config.max_incoming_responses`] responses being processed + incoming_responses.push(response_task); + } +} + +/// An active request. This is what we use to track a request and its corresponding response +/// in the protocol +#[derive(Clone, Deref)] +pub struct ActiveRequest(Arc>); + +/// The inner implementation of an active request +pub struct ActiveRequestInner { + /// The sender to use for the protocol + sender: async_broadcast::Sender, + /// The receiver to use for the protocol + receiver: async_broadcast::Receiver, + /// The request that we are waiting for a response to + request: R, + + /// A copy of the map of currently active requests + active_requests: ActiveRequestsMap, + /// The hash of the request. We need this so we can remove ourselves from the map + request_hash: RequestHash, +} + +impl Drop for ActiveRequestInner { + fn drop(&mut self) { + self.active_requests.write().remove(&self.request_hash); + } +} + +#[cfg(test)] +mod tests { + use std::{ + collections::HashMap, + sync::{atomic::AtomicBool, Mutex}, + }; + + use async_trait::async_trait; + use hotshot_types::signature_key::{BLSPrivKey, BLSPubKey}; + use rand::Rng; + use tokio::{sync::mpsc, task::JoinSet}; + + use super::*; + + /// This test makes sure that when all references to an active request are dropped, it is + /// removed from the active requests map + #[test] + fn test_active_request_drop() { + // Create an active requests map + let active_requests = ActiveRequestsMap::default(); + + // Create an active request + let (sender, receiver) = async_broadcast::broadcast(1); + let active_request = ActiveRequest(Arc::new(ActiveRequestInner { + sender, + receiver, + request: TestRequest(vec![1, 2, 3]), + active_requests: Arc::clone(&active_requests), + request_hash: blake3::hash(&[1, 2, 3]), + })); + + // Insert the active request into the map + active_requests.write().insert( + active_request.request_hash, + Arc::downgrade(&active_request.0), + ); + + // Clone the active request + let active_request_clone = active_request.clone(); + + // Drop the active request + drop(active_request); + + // Make sure nothing has been removed + assert_eq!(active_requests.read().len(), 1); + + // Drop the clone + drop(active_request_clone); + + // Make sure it has been removed + assert_eq!(active_requests.read().len(), 0); + } + + /// A test sender that has a list of all the participants in the network + #[derive(Clone)] + pub struct TestSender { + network: Arc>>, + } + + /// An implementation of the [`Sender`] trait for the [`TestSender`] type + #[async_trait] + impl Sender for TestSender { + async fn send_message(&self, message: &Bytes, recipient: BLSPubKey) -> Result<()> { + self.network + .get(&recipient) + .ok_or(anyhow::anyhow!("recipient not found"))? + .send(Arc::clone(message)) + .await + .map_err(|_| anyhow::anyhow!("failed to send message"))?; + + Ok(()) + } + } + + // Implement the [`RecipientSource`] trait for the [`TestSender`] type + #[async_trait] + impl RecipientSource for TestSender { + async fn get_recipients_for(&self, _request: &R) -> Vec { + // Get all the participants in the network + self.network.keys().copied().collect() + } + } + + // Create a test request that is just some bytes + #[derive(Clone, Debug)] + struct TestRequest(Vec); + + // Implement the [`Serializable`] trait for the [`TestRequest`] type + impl Serializable for TestRequest { + fn to_bytes(&self) -> Result> { + Ok(self.0.clone()) + } + + fn from_bytes(bytes: &[u8]) -> Result { + Ok(TestRequest(bytes.to_vec())) + } + } + + // Implement the [`Request`] trait for the [`TestRequest`] type + #[async_trait] + impl Request for TestRequest { + type Response = Vec; + async fn validate(&self) -> Result<()> { + Ok(()) + } + } + + // Implement the [`Response`] trait for the [`TestRequest`] type + #[async_trait] + impl Response for Vec { + async fn validate(&self, _request: &TestRequest) -> Result<()> { + Ok(()) + } + } + + // Create a test data source that pretends to have the data or not + #[derive(Clone)] + struct TestDataSource { + /// Whether we have the data or not + has_data: bool, + /// The time at which the data will be available if we have it + data_available_time: Instant, + + /// Whether or not the data will be taken once served + take_data: bool, + /// Whether or not the data has been taken + taken: Arc, + } + + #[async_trait] + impl DataSource> for TestDataSource { + async fn derive_response_for(&self, request: &Vec) -> Result> { + // Return a response if we hit the hit rate + if self.has_data && Instant::now() >= self.data_available_time { + if self.take_data && !self.taken.swap(true, std::sync::atomic::Ordering::Relaxed) { + return Err(anyhow::anyhow!("data already taken")); + } + Ok(blake3::hash(request).as_bytes().to_vec()) + } else { + Err(anyhow::anyhow!("did not have the data")) + } + } + } + + /// Create and return a default protocol configuration + fn default_protocol_config() -> RequestResponseConfig { + RequestResponseConfigBuilder::create_empty() + .incoming_request_ttl(Duration::from_secs(40)) + .response_send_timeout(Duration::from_secs(40)) + .request_batch_size(10) + .request_batch_interval(Duration::from_millis(100)) + .max_outgoing_responses(10) + .response_validate_timeout(Duration::from_secs(1)) + .max_incoming_responses(5) + .build() + .expect("failed to build config") + } + + /// Create fully connected test networks with `num_participants` participants + fn create_participants( + num: usize, + ) -> Vec<(TestSender, mpsc::Receiver, (BLSPubKey, BLSPrivKey))> { + // The entire network + let mut network = HashMap::new(); + + // All receivers in the network + let mut receivers = Vec::new(); + + // All keypairs in the network + let mut keypairs = Vec::new(); + + // For each participant, + for i in 0..num { + // Create a unique `BLSPubKey` + let (public_key, private_key) = + BLSPubKey::generated_from_seed_indexed([2; 32], i.try_into().unwrap()); + + // Add the keypair to the list + keypairs.push((public_key, private_key)); + + // Create a channel for sending and receiving messages + let (sender, receiver) = mpsc::channel::(100); + + // Add the participant to the network + network.insert(public_key, sender); + + // Add the receiver to the list of receivers + receivers.push(receiver); + } + + // Create a test sender from the network + let sender = TestSender { + network: Arc::new(network), + }; + + // Return all senders and receivers + receivers + .into_iter() + .zip(keypairs) + .map(|(r, k)| (sender.clone(), r, k)) + .collect() + } + + /// The configuration for an integration test + #[derive(Clone)] + struct IntegrationTestConfig { + /// The request response protocol configuration + request_response_config: RequestResponseConfig, + /// The number of participants in the network + num_participants: usize, + /// The number of participants that have the data + num_participants_with_data: usize, + /// The timeout for the requests + request_timeout: Duration, + /// The delay before the nodes have the data available + data_available_delay: Duration, + } + + /// The result of an integration test + struct IntegrationTestResult { + /// The number of nodes that received a response + num_succeeded: usize, + } + + /// Run an integration test with the given parameters + async fn run_integration_test(config: IntegrationTestConfig) -> IntegrationTestResult { + // Create a fully connected network with `num_participants` participants + let participants = create_participants(config.num_participants); + + // Create a join set to wait for all the tasks to finish + let mut join_set = JoinSet::new(); + + // We need to keep these here so they don't get dropped + let handles = Arc::new(Mutex::new(Vec::new())); + + // For each one, create a new [`RequestResponse`] protocol + for (i, (sender, receiver, (public_key, private_key))) in + participants.into_iter().enumerate() + { + let config_clone = config.request_response_config.clone(); + let handles_clone = Arc::clone(&handles); + join_set.spawn(async move { + let protocol = RequestResponse::new( + config_clone, + sender.clone(), + receiver, + sender, + TestDataSource { + has_data: i < config.num_participants_with_data, + data_available_time: Instant::now() + config.data_available_delay, + take_data: false, + taken: Arc::new(AtomicBool::new(false)), + }, + ); + + // Add the handle to the handles list so it doesn't get dropped and + // cancelled + #[allow(clippy::used_underscore_binding)] + handles_clone + .lock() + .unwrap() + .push(Arc::clone(&protocol._receiving_task_handle)); + + // Create a random request + let request = vec![rand::thread_rng().gen(); 100]; + + // Get the hash of the request + let request_hash = blake3::hash(&request).as_bytes().to_vec(); + + // Create a new request message + let request = RequestMessage::new_signed(&public_key, &private_key, &request) + .expect("failed to create request message"); + + // Request the data from the protocol + let response = protocol.request(request, config.request_timeout).await?; + + // Make sure the response is the hash of the request + assert_eq!(response, request_hash); + + Ok::<(), anyhow::Error>(()) + }); + } + + // Wait for all the tasks to finish + let mut num_succeeded = config.num_participants; + while let Some(result) = join_set.join_next().await { + if result.is_err() || result.unwrap().is_err() { + num_succeeded -= 1; + } + } + + IntegrationTestResult { num_succeeded } + } + + /// Test the integration of the protocol with 50% of the participants having the data + #[tokio::test(flavor = "multi_thread")] + async fn test_integration_50_0s() { + // Build a config + let config = IntegrationTestConfig { + request_response_config: default_protocol_config(), + num_participants: 100, + num_participants_with_data: 50, + request_timeout: Duration::from_secs(40), + data_available_delay: Duration::from_secs(0), + }; + + // Run the test, making sure all the requests succeed + let result = run_integration_test(config).await; + assert_eq!(result.num_succeeded, 100); + } + + /// Test the integration of the protocol when nobody has the data. Make sure we don't + /// get any responses + #[tokio::test(flavor = "multi_thread")] + async fn test_integration_0() { + // Build a config + let config = IntegrationTestConfig { + request_response_config: default_protocol_config(), + num_participants: 100, + num_participants_with_data: 0, + request_timeout: Duration::from_secs(40), + data_available_delay: Duration::from_secs(0), + }; + + // Run the test + let result = run_integration_test(config).await; + + // Make sure all the requests succeeded + assert_eq!(result.num_succeeded, 0); + } + + /// Test the integration of the protocol when one node has the data after + /// a delay of 1s + #[tokio::test(flavor = "multi_thread")] + async fn test_integration_1_1s() { + // Build a config + let config = IntegrationTestConfig { + request_response_config: default_protocol_config(), + num_participants: 100, + num_participants_with_data: 1, + request_timeout: Duration::from_secs(40), + data_available_delay: Duration::from_secs(2), + }; + + // Run the test + let result = run_integration_test(config).await; + + // Make sure all the requests succeeded + assert_eq!(result.num_succeeded, 100); + } + + /// Test that we can join an existing request for the same data and get the same (single) response + #[tokio::test(flavor = "multi_thread")] + async fn test_join_existing_request() { + // Build a config + let config = default_protocol_config(); + + // Create two participants + let mut participants = Vec::new(); + + for (sender, receiver, (public_key, private_key)) in create_participants(2) { + // For each, create a new [`RequestResponse`] protocol + let protocol = RequestResponse::new( + config.clone(), + sender.clone(), + receiver, + sender, + TestDataSource { + take_data: true, + has_data: true, + data_available_time: Instant::now() + Duration::from_secs(2), + taken: Arc::new(AtomicBool::new(false)), + }, + ); + + // Add the participants to the list + participants.push((protocol, public_key, private_key)); + } + + // Take the first participant + let one = Arc::new(participants.remove(0)); + + // Create the request that they should all be able to join on + let request = vec![rand::thread_rng().gen(); 100]; + + // Create a join set to wait for all the tasks to finish + let mut join_set = JoinSet::new(); + + // Make 10 requests with the same hash + for _ in 0..10 { + // Clone the first participant + let one_clone = Arc::clone(&one); + + // Clone the request + let request_clone = request.clone(); + + // Spawn a task to request the data + join_set.spawn(async move { + // Create a new, signed request message + let request_message = + RequestMessage::new_signed(&one_clone.1, &one_clone.2, &request_clone)?; + + // Start requesting it + one_clone + .0 + .request(request_message, Duration::from_secs(20)) + .await?; + + Ok::<(), anyhow::Error>(()) + }); + } + + // Wait for all the tasks to finish, making sure they all succeed + while let Some(result) = join_set.join_next().await { + result + .expect("failed to join task") + .expect("failed to request data"); + } + } +} diff --git a/request-response/src/message.rs b/request-response/src/message.rs new file mode 100644 index 0000000000..39207b4230 --- /dev/null +++ b/request-response/src/message.rs @@ -0,0 +1,449 @@ +use std::{ + io::{Cursor, Read, Write}, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + +use anyhow::{Context, Result}; +use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; +use hotshot_types::traits::signature_key::SignatureKey; + +use super::{request::Request, RequestHash, Serializable}; + +/// The outer message type for the request-response protocol. Can either be a request or a response +#[derive(Clone, Debug)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub enum Message { + /// A request + Request(RequestMessage), + /// A response + Response(ResponseMessage), +} + +/// A request message, which includes the requester's public key, the request's signature, a timestamp, and the request itself +#[derive(Clone, Debug)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub struct RequestMessage { + /// The requester's public key + pub public_key: K, + /// The requester's signature over the [the actual request content + timestamp] + pub signature: K::PureAssembledSignatureType, + /// The timestamp of when the request was sent (in seconds since the Unix epoch). We use this to + /// ensure that the request is not old, which is useful for preventing replay attacks. + pub timestamp_unix_seconds: u64, + /// The actual request data. This is from the application + pub request: R, +} + +/// A response message, which includes the hash of the request we're responding to and the response itself. +#[derive(Clone, Debug)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub struct ResponseMessage { + /// The hash of the application-specific request we're responding to. The hash is a free way + /// to identify the request and weed out any potential incompatibilities + pub request_hash: RequestHash, + /// The actual response content + pub response: R::Response, +} + +impl RequestMessage { + /// Create a new signed request message from a request + /// + /// # Errors + /// - If the request's content cannot be serialized + /// - If the request cannot be signed + /// + /// # Panics + /// - If time is not monotonic + pub fn new_signed(public_key: &K, private_key: &K::PrivateKey, request: &R) -> Result + where + ::SignError: 'static, + { + // Get the current timestamp + let timestamp_unix_seconds = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time went backwards") + .as_secs(); + + // Concatenate the content and timestamp + let timestamped_content = [ + request + .to_bytes() + .with_context(|| "failed to serialize request content")? + .as_slice(), + timestamp_unix_seconds.to_le_bytes().as_slice(), + ] + .concat(); + + // Sign the actual request content with the private key + let signature = + K::sign(private_key, ×tamped_content).with_context(|| "failed to sign message")?; + + // Return the newly signed request message + Ok(RequestMessage { + public_key: public_key.clone(), + signature, + timestamp_unix_seconds, + request: request.clone(), + }) + } + + /// Validate the [`RequestMessage`], checking the signature and the timestamp and + /// calling the request's application-specific validation function + /// + /// # Errors + /// - If the request's signature is invalid + /// - If the request is too old + /// + /// # Panics + /// - If time is not monotonic + pub async fn validate(&self, incoming_request_ttl: Duration) -> Result<()> { + // Make sure the request is not too old + if self + .timestamp_unix_seconds + .saturating_add(incoming_request_ttl.as_secs()) + < SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time went backwards") + .as_secs() + { + return Err(anyhow::anyhow!("request is too old")); + } + // Check the signature over the request content and timestamp + if !self.public_key.validate( + &self.signature, + &[ + self.request.to_bytes()?, + self.timestamp_unix_seconds.to_le_bytes().to_vec(), + ] + .concat(), + ) { + return Err(anyhow::anyhow!("invalid request signature")); + } + + // Call the request's application-specific validation function + self.request.validate().await + } +} + +/// A blanket implementation of the [`Serializable`] trait for any [`Message`] +impl Serializable for Message { + /// Converts any [`Message`] to bytes if the content is also [`Serializable`] + fn to_bytes(&self) -> Result> { + // Create a buffer for the bytes + let mut bytes = Vec::new(); + + // Convert the message to bytes based on the type. By default it is just type-prefixed + match self { + Message::Request(request_message) => { + // Write the type (request) + bytes.push(0); + + // Write the request content + bytes.extend_from_slice(request_message.to_bytes()?.as_slice()); + } + Message::Response(response_message) => { + // Write the type (response) + bytes.push(1); + + // Write the response content + bytes.extend_from_slice(response_message.to_bytes()?.as_slice()); + } + }; + + Ok(bytes) + } + + /// Convert bytes to a [`Message`] + fn from_bytes(bytes: &[u8]) -> Result { + // Create a cursor so we can easily read the bytes in order + let mut bytes = Cursor::new(bytes); + + // Get the message type + let type_byte = bytes.read_u8()?; + + // Deserialize the message based on the type + match type_byte { + 0 => { + // Read the `RequestMessage` + Ok(Message::Request(RequestMessage::from_bytes(&read_to_end( + &mut bytes, + )?)?)) + } + 1 => { + // Read the `ResponseMessage` + Ok(Message::Response(ResponseMessage::from_bytes( + &read_to_end(&mut bytes)?, + )?)) + } + _ => Err(anyhow::anyhow!("invalid message type")), + } + } +} + +impl Serializable for RequestMessage { + fn to_bytes(&self) -> Result> { + // Create a buffer for the bytes + let mut bytes = Vec::new(); + + // Write the public key (length-prefixed) + write_length_prefixed(&mut bytes, &self.public_key.to_bytes())?; + + // Write the signature (length-prefixed) + write_length_prefixed(&mut bytes, &bincode::serialize(&self.signature)?)?; + + // Write the timestamp + bytes.write_all(&self.timestamp_unix_seconds.to_le_bytes())?; + + // Write the actual request + bytes.write_all(self.request.to_bytes()?.as_slice())?; + + Ok(bytes) + } + + fn from_bytes(bytes: &[u8]) -> Result { + // Create a cursor so we can easily read the bytes in order + let mut bytes = Cursor::new(bytes); + + // Read the public key (length-prefixed) + let public_key = K::from_bytes(&read_length_prefixed(&mut bytes)?)?; + + // Read the signature (length-prefixed) + let signature = bincode::deserialize(&read_length_prefixed(&mut bytes)?)?; + + // Read the timestamp as a [`u64`] + let timestamp = bytes.read_u64::()?; + + // Deserialize the request + let request = R::from_bytes(&read_to_end(&mut bytes)?)?; + + Ok(Self { + public_key, + signature, + timestamp_unix_seconds: timestamp, + request, + }) + } +} + +impl Serializable for ResponseMessage { + fn to_bytes(&self) -> Result> { + // Create a buffer for the bytes + let mut bytes = Vec::new(); + + // Write the request hash as bytes + bytes.write_all(self.request_hash.as_bytes())?; + + // Write the response content + bytes.write_all(self.response.to_bytes()?.as_slice())?; + + Ok(bytes) + } + + fn from_bytes(bytes: &[u8]) -> Result { + // Create a buffer for the bytes + let mut bytes = Cursor::new(bytes); + + // Read the request hash as a [`blake3::Hash`] + let mut request_hash_bytes = [0; 32]; + bytes.read_exact(&mut request_hash_bytes)?; + let request_hash = RequestHash::from(request_hash_bytes); + + // Read the response content to the end + let response = R::Response::from_bytes(&read_to_end(&mut bytes)?)?; + + Ok(Self { + request_hash, + response, + }) + } +} + +/// A helper function to write a length-prefixed value to a writer +fn write_length_prefixed(writer: &mut W, value: &[u8]) -> Result<()> { + // Write the length of the value as a u32 + writer.write_u32::( + u32::try_from(value.len()).with_context(|| "value was too large")?, + )?; + + // Write the (already serialized) value + writer.write_all(value)?; + Ok(()) +} + +/// A helper function to read a length-prefixed value from a reader +fn read_length_prefixed(reader: &mut R) -> Result> { + // Read the length of the value as a u32 + let length = reader.read_u32::()?; + + // Read the value + let mut value = vec![0; length as usize]; + reader.read_exact(&mut value)?; + Ok(value) +} + +/// A helper function to read to the end of the reader +fn read_to_end(reader: &mut R) -> Result> { + let mut value = Vec::new(); + reader.read_to_end(&mut value)?; + Ok(value) +} + +#[cfg(test)] +mod tests { + use async_trait::async_trait; + use hotshot_types::signature_key::BLSPubKey; + use rand::Rng; + + use crate::request::Response; + + use super::*; + + // A testing implementation of the [`Serializable`] trait for [`Vec`] + impl Serializable for Vec { + fn to_bytes(&self) -> Result> { + Ok(self.clone()) + } + fn from_bytes(bytes: &[u8]) -> Result { + Ok(bytes.to_vec()) + } + } + + /// A testing implementation of the [`Request`] trait for [`Vec`] + #[async_trait] + impl Request for Vec { + type Response = Vec; + async fn validate(&self) -> Result<()> { + Ok(()) + } + } + + /// A testing implementation of the [`Response`] trait for [`Vec`] + #[async_trait] + impl Response> for Vec { + async fn validate(&self, _request: &Vec) -> Result<()> { + Ok(()) + } + } + + /// Tests that properly signed requests are validated correctly and that invalid requests + /// (bad timestamp/signature) are rejected + #[tokio::test] + async fn test_request_validation() { + // Create some RNG + let mut rng = rand::thread_rng(); + + for _ in 0..100 { + // Create a random keypair + let (public_key, private_key) = + BLSPubKey::generated_from_seed_indexed([1; 32], rng.gen::()); + + // Create a valid request with some random content + let mut request = RequestMessage::new_signed( + &public_key, + &private_key, + &vec![rng.gen::(); rng.gen_range(1..10000)], + ) + .expect("Failed to create signed request"); + + let (should_be_valid, request_ttl) = match rng.gen_range(0..4) { + 0 => (true, Duration::from_secs(1)), + + 1 => { + // Alter the requests's actual content + request.request[0] = !request.request[0]; + + // It should not be valid anymore + (false, Duration::from_secs(1)) + } + + 2 => { + // Alter the timestamp + request.timestamp_unix_seconds += 1000; + + // It should not be valid anymore + (false, Duration::from_secs(1)) + } + + 3 => { + // Change the request ttl to be 0. This should make the request + // invalid immediately + (true, Duration::from_secs(0)) + } + + _ => unreachable!(), + }; + + // Validate the request + assert_eq!(request.validate(request_ttl).await.is_ok(), should_be_valid); + } + } + + /// Tests that messages are serialized and deserialized correctly + #[test] + fn test_message_parity() { + for _ in 0..100 { + // Create some RNG + let mut rng = rand::thread_rng(); + + // Generate a random message type + let is_request = rng.gen::() % 2 == 0; + + // The request content will be a random vector of bytes + let request = vec![rng.gen::(); rng.gen_range(0..10000)]; + + // Create a message + let message = if is_request { + // Create a random keypair + let (public_key, private_key) = + BLSPubKey::generated_from_seed_indexed([1; 32], rng.gen::()); + + // Create a new signed request + let request = RequestMessage::new_signed(&public_key, &private_key, &request) + .expect("Failed to create signed request"); + + Message::Request(request) + } else { + // Create a response message + Message::Response(ResponseMessage { + request_hash: blake3::hash(&request), + response: vec![rng.gen::(); rng.gen_range(0..10000)], + }) + }; + + // Serialize the message + let serialized = message.to_bytes().expect("Failed to serialize message"); + + // Deserialize the message + let deserialized = + Message::from_bytes(&serialized).expect("Failed to deserialize message"); + + // Assert that the deserialized message is the same as the original message + assert_eq!(message, deserialized); + } + } + + /// Tests that length-prefixed values are read and written correctly + #[test] + fn test_length_prefix_parity() { + // Create some RNG + let mut rng = rand::thread_rng(); + + for _ in 0..100 { + // Create a buffer to test over + let mut bytes = Vec::new(); + + // Generate the value to test over + let value = vec![rng.gen::(); rng.gen_range(0..10000)]; + + // Write the length-prefixed value + write_length_prefixed(&mut bytes, &value).unwrap(); + + // Create a reader from the bytes + let mut reader = Cursor::new(bytes); + + // Read the length-prefixed value + let value = read_length_prefixed(&mut reader).unwrap(); + assert_eq!(value, value); + } + } +} diff --git a/request-response/src/network.rs b/request-response/src/network.rs new file mode 100644 index 0000000000..3a307eeb85 --- /dev/null +++ b/request-response/src/network.rs @@ -0,0 +1,58 @@ +//! This file contains the [`Sender`] and [`Receiver`] traits. These traits are **used** by the +//! [`RequestResponseProtocol`] to send and receive messages from a network or other source. +//! +//! For HotShot I've gone ahead and done a blanket implementation for a [`Sender`] for all +//! [`ConnectedNetwork`]s. The reason it's not done for the [`Receiver`] is because both +//! HS and the confirmation layer will receive messages from a single point and _then_ decide +//! what to do with them (as opposed to having some sort of filtering mechanism). So for +//! [`Receiver`] I've done a blanket implementation for channels that send [`Vec`]s. + +use std::{ops::Deref, sync::Arc}; + +use anyhow::{Context, Result}; +use async_trait::async_trait; +use hotshot_types::traits::{network::ConnectedNetwork, signature_key::SignatureKey}; +use tokio::sync::mpsc; + +/// A type alias for a shareable byte array +pub type Bytes = Arc>; + +/// The [`Sender`] trait is used to allow the [`RequestResponseProtocol`] to send messages to a specific recipient +#[async_trait] +pub trait Sender: Send + Sync + 'static + Clone { + /// Send a message to the specified recipient + async fn send_message(&self, message: &Bytes, recipient: K) -> Result<()>; +} + +/// The [`Receiver`] trait is used to allow the [`RequestResponseProtocol`] to receive messages from a network +/// or other source. +#[async_trait] +pub trait Receiver: Send + Sync + 'static { + /// Receive a message. Returning an error here means the receiver will _NEVER_ receive any more messages + async fn receive_message(&mut self) -> Result; +} + +/// A blanket implementation of the [`Sender`] trait for all types that dereference to [`ConnectedNetwork`] +#[async_trait] +impl Sender for T +where + T: Deref> + Send + Sync + 'static + Clone, + K: SignatureKey + 'static, +{ + async fn send_message(&self, message: &Bytes, recipient: K) -> Result<()> { + // Just send the message to the recipient + self.direct_message(message.to_vec(), recipient) + .await + .with_context(|| "failed to send message") + } +} + +/// An implementation of the [`Receiver`] trait for the [`mpsc::Receiver`] type. Allows us to send messages +/// to a channel and have the protocol receive them. +#[async_trait] +impl Receiver for mpsc::Receiver { + async fn receive_message(&mut self) -> Result { + // Just receive a message from the channel + self.recv().await.ok_or(anyhow::anyhow!("channel closed")) + } +} diff --git a/request-response/src/recipient_source.rs b/request-response/src/recipient_source.rs new file mode 100644 index 0000000000..49da07a2a2 --- /dev/null +++ b/request-response/src/recipient_source.rs @@ -0,0 +1,13 @@ +use async_trait::async_trait; +use hotshot_types::traits::signature_key::SignatureKey; + +use super::request::Request; + +/// A trait that allows the [`RequestResponseProtocol`] to get the recipients that a specific message should +/// expect responses from. In `HotShot` this would go on top of the [`Membership`] trait and determine +/// which nodes are able (quorum/DA) to respond to which requests +#[async_trait] +pub trait RecipientSource: Send + Sync + 'static { + /// Get all the recipients that the specific request should expect responses from + async fn get_recipients_for(&self, request: &R) -> Vec; +} diff --git a/request-response/src/request.rs b/request-response/src/request.rs new file mode 100644 index 0000000000..87bc466452 --- /dev/null +++ b/request-response/src/request.rs @@ -0,0 +1,35 @@ +//! This file contains the [`Request`] and [`Response`] traits. Any upstream +//! that wants to use the [`RequestResponseProtocol`] needs to implement these +//! traits for their specific types. + +use std::fmt::Debug; + +use anyhow::Result; +use async_trait::async_trait; + +use super::Serializable; + +/// A trait for a request. Associates itself with a response type. +#[async_trait] +pub trait Request: Send + Sync + Serializable + 'static + Clone + Debug { + /// The response type associated with this request + type Response: Response; + + /// Validate the request, returning an error if it is not valid + /// + /// # Errors + /// If the request is not valid + async fn validate(&self) -> Result<()>; +} + +/// A trait that a response needs to implement +#[async_trait] +pub trait Response: + Send + Sync + Serializable + Clone + Debug + PartialEq + Eq +{ + /// Validate the response, making sure it is valid for the given request + /// + /// # Errors + /// If the response is not valid for the given request + async fn validate(&self, request: &R) -> Result<()>; +} diff --git a/request-response/src/util.rs b/request-response/src/util.rs new file mode 100644 index 0000000000..f55b8dc2d1 --- /dev/null +++ b/request-response/src/util.rs @@ -0,0 +1,45 @@ +use std::collections::VecDeque; + +/// A [`VecDeque`] with a maximum size +pub struct BoundedVecDeque { + /// The inner [`VecDeque`] + inner: VecDeque, + /// The maximum size of the [`VecDeque`] + max_size: usize, +} + +impl BoundedVecDeque { + /// Create a new bounded [`VecDeque`] with the given maximum size + pub fn new(max_size: usize) -> Self { + Self { + inner: VecDeque::new(), + max_size, + } + } + + /// Push an item into the bounded [`VecDeque`], removing the oldest item if the + /// maximum size is reached + pub fn push(&mut self, item: T) { + if self.inner.len() >= self.max_size { + self.inner.pop_front(); + } + self.inner.push_back(item); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_bounded_vec_deque() { + let mut deque = BoundedVecDeque::new(3); + deque.push(1); + deque.push(2); + deque.push(3); + deque.push(4); + deque.push(5); + assert_eq!(deque.inner.len(), 3); + assert_eq!(deque.inner, vec![3, 4, 5]); + } +} diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 8f52f3fffc..bbb2044e70 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -69,6 +69,7 @@ multihash = { version = "0.19", features = ["serde-codec"] } num-bigint = { version = "0.4" } num-integer = { version = "0.1", default-features = false, features = ["i128", "std"] } num-traits = { version = "0.2", features = ["i128"] } +parking_lot = { version = "0.12", features = ["send_guard"] } percent-encoding = { version = "2" } rand = { version = "0.8", features = ["small_rng"] } regex = { version = "1" } @@ -82,6 +83,7 @@ standback = { version = "0.2", default-features = false, features = ["std"] } subtle = { version = "2", default-features = false, features = ["std"] } time = { version = "0.3", features = ["formatting", "macros", "parsing"] } tokio = { version = "1", features = ["fs", "io-util", "macros", "net", "parking_lot", "rt-multi-thread", "sync", "time", "tracing"] } +tokio-util = { version = "0.7", features = ["codec", "io", "rt"] } toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } toml_edit = { version = "0.22", features = ["serde"] } tracing = { version = "0.1", features = ["log"] } @@ -150,6 +152,7 @@ multihash = { version = "0.19", features = ["serde-codec"] } num-bigint = { version = "0.4" } num-integer = { version = "0.1", default-features = false, features = ["i128", "std"] } num-traits = { version = "0.2", features = ["i128"] } +parking_lot = { version = "0.12", features = ["send_guard"] } percent-encoding = { version = "2" } rand = { version = "0.8", features = ["small_rng"] } regex = { version = "1" } @@ -164,6 +167,7 @@ subtle = { version = "2", default-features = false, features = ["std"] } syn = { version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3", features = ["formatting", "macros", "parsing"] } tokio = { version = "1", features = ["fs", "io-util", "macros", "net", "parking_lot", "rt-multi-thread", "sync", "time", "tracing"] } +tokio-util = { version = "0.7", features = ["codec", "io", "rt"] } toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } toml_edit = { version = "0.22", features = ["serde"] } tracing = { version = "0.1", features = ["log"] } From 562d6e2aaca18c61e8a97d02ea439c36d7cd1a62 Mon Sep 17 00:00:00 2001 From: Chengyu Lin Date: Wed, 29 Jan 2025 13:01:35 -0500 Subject: [PATCH 1375/1393] [VID Upgrade] add version number to VID interfaces (#4062) * add version information for VID * fix merge conflict * advz_scheme & upgrade_lock * fix * address comments --- hotshot/src/tasks/mod.rs | 15 ++-- task-impls/src/da.rs | 8 +- task-impls/src/response.rs | 25 ++++-- task-impls/src/vid.rs | 6 +- testing/src/block_builder/mod.rs | 11 ++- testing/src/block_builder/random.rs | 19 ++-- testing/src/block_builder/simple.rs | 8 +- testing/src/helpers.rs | 37 +++++--- testing/src/predicates/event.rs | 10 ++- testing/src/view_generator.rs | 29 ++++-- testing/tests/tests_1/block_builder.rs | 7 +- testing/tests/tests_1/da_task.rs | 10 ++- testing/tests/tests_1/quorum_proposal_task.rs | 88 ++++++++++++++----- .../tests_1/upgrade_task_with_proposal.rs | 20 +++-- testing/tests/tests_1/vid_task.rs | 13 ++- types/src/consensus.rs | 21 +++-- types/src/data.rs | 56 ++++++++---- types/src/data/vid_disperse.rs | 10 +-- types/src/traits/block_contents.rs | 14 ++- types/src/vid.rs | 3 +- types/src/vid/advz.rs | 0 types/src/vid/avidm.rs | 0 22 files changed, 285 insertions(+), 125 deletions(-) create mode 100644 types/src/vid/advz.rs create mode 100644 types/src/vid/avidm.rs diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 00ce5efb2c..767fe38ec2 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -80,18 +80,21 @@ pub async fn add_request_network_task< pub fn add_response_task, V: Versions>( handle: &mut SystemContextHandle, ) { - let state = NetworkResponseState::::new( + let state = NetworkResponseState::::new( handle.hotshot.consensus(), Arc::clone(&handle.memberships), handle.public_key().clone(), handle.private_key().clone(), handle.hotshot.id, + handle.hotshot.upgrade_lock.clone(), ); - handle.network_registry.register(run_response_task::( - state, - handle.internal_event_stream.1.activate_cloned(), - handle.internal_event_stream.0.clone(), - )); + handle + .network_registry + .register(run_response_task::( + state, + handle.internal_event_stream.1.activate_cloned(), + handle.internal_event_stream.0.clone(), + )); } /// Add a task which updates our queue length metric at a set interval diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index f26a201f15..8c759605a5 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -179,9 +179,11 @@ impl, V: Versions> DaTaskState(&txns, num_nodes, version)).await; let payload_commitment = payload_commitment.unwrap(); self.storage @@ -233,12 +235,14 @@ impl, V: Versions> DaTaskState( OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), view_number, membership, &pk, + &upgrade_lock, ) .await; if let Some(Some(vid_share)) = consensus diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 17116a1a2a..35be5523f4 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -12,9 +12,11 @@ use committable::Committable; use hotshot_types::{ consensus::{Consensus, LockedConsensusState, OuterConsensus}, data::VidDisperseShare, - message::Proposal, + message::{Proposal, UpgradeLock}, traits::{ - election::Membership, network::DataRequest, node_implementation::NodeType, + election::Membership, + network::DataRequest, + node_implementation::{NodeType, Versions}, signature_key::SignatureKey, }, }; @@ -29,7 +31,7 @@ const TXNS_TIMEOUT: Duration = Duration::from_millis(100); /// Task state for the Network Request Task. The task is responsible for handling /// requests sent to this node by the network. It will validate the sender, /// parse the request, and try to find the data request in the consensus stores. -pub struct NetworkResponseState { +pub struct NetworkResponseState { /// Locked consensus state consensus: LockedConsensusState, @@ -44,9 +46,12 @@ pub struct NetworkResponseState { /// The node's id id: u64, + + /// Lock for a decided upgrade + upgrade_lock: UpgradeLock, } -impl NetworkResponseState { +impl NetworkResponseState { /// Create the network request state with the info it needs pub fn new( consensus: LockedConsensusState, @@ -54,6 +59,7 @@ impl NetworkResponseState { pub_key: TYPES::SignatureKey, private_key: ::PrivateKey, id: u64, + upgrade_lock: UpgradeLock, ) -> Self { Self { consensus, @@ -61,6 +67,7 @@ impl NetworkResponseState { pub_key, private_key, id, + upgrade_lock, } } @@ -155,22 +162,24 @@ impl NetworkResponseState { drop(consensus_reader); - if Consensus::calculate_and_update_vid( + if Consensus::calculate_and_update_vid::( OuterConsensus::new(Arc::clone(&self.consensus)), view, Arc::clone(&self.membership), &self.private_key, + &self.upgrade_lock, ) .await .is_none() { // Sleep in hope we receive txns in the meantime sleep(TXNS_TIMEOUT).await; - Consensus::calculate_and_update_vid( + Consensus::calculate_and_update_vid::( OuterConsensus::new(Arc::clone(&self.consensus)), view, Arc::clone(&self.membership), &self.private_key, + &self.upgrade_lock, ) .await?; } @@ -208,8 +217,8 @@ fn valid_signature( /// Spawn the network response task to handle incoming request for data /// from other nodes. It will shutdown when it gets `HotshotEvent::Shutdown` /// on the `event_stream` arg. -pub fn run_response_task( - task_state: NetworkResponseState, +pub fn run_response_task( + task_state: NetworkResponseState, event_stream: Receiver>>, sender: Sender>>, ) -> JoinHandle<()> { diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 41af76bae1..9968ea21dd 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -99,12 +99,13 @@ impl, V: Versions> VidTaskState( &payload, &Arc::clone(&self.membership), *view_number, epoch, epoch, + &self.upgrade_lock, ) .await .ok()?; @@ -203,12 +204,13 @@ impl, V: Versions> VidTaskState( payload.as_ref(), &Arc::clone(&self.membership), proposal_view_number, target_epoch, sender_epoch, + &self.upgrade_lock, ) .await .ok()?; diff --git a/testing/src/block_builder/mod.rs b/testing/src/block_builder/mod.rs index 891fbbef72..10e1cf6aed 100644 --- a/testing/src/block_builder/mod.rs +++ b/testing/src/block_builder/mod.rs @@ -22,13 +22,14 @@ use hotshot_builder_api::{ use hotshot_types::{ constants::{LEGACY_BUILDER_MODULE, MARKETPLACE_BUILDER_MODULE}, traits::{ - block_contents::EncodeBytes, node_implementation::NodeType, + block_contents::EncodeBytes, + node_implementation::{NodeType, Versions}, signature_key::BuilderSignatureKey, }, }; use tide_disco::{method::ReadState, App, Url}; use tokio::spawn; -use vbs::version::StaticVersionType; +use vbs::version::{StaticVersionType, Version}; use crate::test_builder::BuilderChange; @@ -166,11 +167,12 @@ pub fn run_builder_source_0_1( } /// Helper function to construct all builder data structures from a list of transactions -async fn build_block( +async fn build_block( transactions: Vec, num_storage_nodes: Arc>, pub_key: TYPES::BuilderSignatureKey, priv_key: ::BuilderPrivateKey, + version: Version, ) -> BlockEntry where ::InstanceState: Default, @@ -185,9 +187,10 @@ where let commitment = block_payload.builder_commitment(&metadata); - let vid_commitment = hotshot_types::traits::block_contents::vid_commitment( + let vid_commitment = hotshot_types::traits::block_contents::vid_commitment::( &block_payload.encode(), *num_storage_nodes.read_arc().await, + version, ); // Get block size from the encoded payload diff --git a/testing/src/block_builder/random.rs b/testing/src/block_builder/random.rs index b5c1fb93f7..12232a5f59 100644 --- a/testing/src/block_builder/random.rs +++ b/testing/src/block_builder/random.rs @@ -25,10 +25,13 @@ use hotshot_builder_api::v0_1::{ builder::BuildError, data_source::BuilderDataSource, }; -use hotshot_example_types::block_types::TestTransaction; +use hotshot_example_types::{block_types::TestTransaction, node_types::TestVersions}; use hotshot_types::{ network::RandomBuilderConfig, - traits::{node_implementation::NodeType, signature_key::BuilderSignatureKey}, + traits::{ + node_implementation::{NodeType, Versions}, + signature_key::BuilderSignatureKey, + }, utils::BuilderCommitment, vid::VidCommitment, }; @@ -36,6 +39,7 @@ use lru::LruCache; use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; use tide_disco::{method::ReadState, Url}; use tokio::{spawn, time::sleep}; +use vbs::version::StaticVersionType; use super::{ build_block, run_builder_source_0_1, BlockEntry, BuilderTask, TestBuilderImplementation, @@ -110,7 +114,7 @@ pub struct RandomBuilderTask> { } impl> RandomBuilderTask { - async fn build_blocks( + async fn build_blocks( options: RandomBuilderConfig, num_nodes: Arc>, pub_key: ::BuilderSignatureKey, @@ -136,11 +140,14 @@ impl> RandomBuilderTask { }) .collect(); - let block = build_block( + // Let new VID scheme ship with Epochs upgrade. + let version = ::Epochs::VERSION; + let block = build_block::( transactions, num_nodes.clone(), pub_key.clone(), priv_key.clone(), + version, ) .await; @@ -171,7 +178,7 @@ where mut self: Box, mut stream: Box> + std::marker::Unpin + Send + 'static>, ) { - let mut task = Some(spawn(Self::build_blocks( + let mut task = Some(spawn(Self::build_blocks::( self.config.clone(), self.num_nodes.clone(), self.pub_key.clone(), @@ -191,7 +198,7 @@ where match change { BuilderChange::Up => { if task.is_none() { - task = Some(spawn(Self::build_blocks( + task = Some(spawn(Self::build_blocks::( self.config.clone(), self.num_nodes.clone(), self.pub_key.clone(), diff --git a/testing/src/block_builder/simple.rs b/testing/src/block_builder/simple.rs index e0af714f9a..6823b90236 100644 --- a/testing/src/block_builder/simple.rs +++ b/testing/src/block_builder/simple.rs @@ -31,12 +31,13 @@ use hotshot_builder_api::{ }, v0_99, }; +use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ bundle::Bundle, constants::{LEGACY_BUILDER_MODULE, MARKETPLACE_BUILDER_MODULE}, traits::{ block_contents::{BlockHeader, BuilderFee}, - node_implementation::NodeType, + node_implementation::{NodeType, Versions}, signature_key::BuilderSignatureKey, }, utils::BuilderCommitment, @@ -246,11 +247,14 @@ where return Ok(vec![]); } - let block_entry = build_block( + // Let new VID scheme ships with Epochs upgrade + let version = ::Epochs::VERSION; + let block_entry = build_block::( transactions, self.num_nodes.clone(), self.pub_key.clone(), self.priv_key.clone(), + version, ) .await; diff --git a/testing/src/helpers.rs b/testing/src/helpers.rs index c476ccea44..1bbb099c93 100644 --- a/testing/src/helpers.rs +++ b/testing/src/helpers.rs @@ -36,13 +36,14 @@ use hotshot_types::{ node_implementation::{NodeType, Versions}, }, utils::{option_epoch_from_block_number, View, ViewInner}, - vid::{vid_scheme, VidCommitment, VidProposal, VidSchemeType}, + vid::{advz_scheme, VidCommitment, VidProposal, VidSchemeType}, vote::{Certificate, HasViewNumber, Vote}, ValidatorConfig, }; use jf_vid::VidScheme; use primitive_types::U256; use serde::Serialize; +use vbs::version::Version; use crate::{test_builder::TestDescription, test_launcher::TestLauncher}; @@ -270,67 +271,78 @@ pub fn key_pair_for_id( /// initialize VID /// # Panics /// if unable to create a [`VidSchemeType`] +/// TODO(Chengyu): use this version information #[must_use] -pub async fn vid_scheme_from_view_number( +pub async fn vid_scheme_from_view_number( membership: &Arc>, view_number: TYPES::View, epoch_number: Option, + _version: Version, ) -> VidSchemeType { let num_storage_nodes = membership .read() .await .committee_members(view_number, epoch_number) .len(); - vid_scheme(num_storage_nodes) + advz_scheme(num_storage_nodes) } -pub async fn vid_payload_commitment( +pub async fn vid_payload_commitment( membership: &Arc::Membership>>, view_number: TYPES::View, epoch_number: Option, transactions: Vec, + version: Version, ) -> VidCommitment { - let mut vid = vid_scheme_from_view_number::(membership, view_number, epoch_number).await; + let mut vid = + vid_scheme_from_view_number::(membership, view_number, epoch_number, version) + .await; let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); vid_disperse.commit } -pub async fn da_payload_commitment( +pub async fn da_payload_commitment( membership: &Arc::Membership>>, transactions: Vec, epoch_number: Option, + version: Version, ) -> VidCommitment { let encoded_transactions = TestTransaction::encode(&transactions); - vid_commitment( + vid_commitment::( &encoded_transactions, membership.read().await.total_nodes(epoch_number), + version, ) } -pub async fn build_payload_commitment( +pub async fn build_payload_commitment( membership: &Arc::Membership>>, view: TYPES::View, epoch: Option, + version: Version, ) -> ::Commit { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. - let mut vid = vid_scheme_from_view_number::(membership, view, epoch).await; + let mut vid = vid_scheme_from_view_number::(membership, view, epoch, version).await; let encoded_transactions = Vec::new(); vid.commit_only(&encoded_transactions).unwrap() } /// TODO: -pub async fn build_vid_proposal( +pub async fn build_vid_proposal( membership: &Arc::Membership>>, view_number: TYPES::View, epoch_number: Option, transactions: Vec, private_key: &::PrivateKey, + version: Version, ) -> VidProposal { - let mut vid = vid_scheme_from_view_number::(membership, view_number, epoch_number).await; + let mut vid = + vid_scheme_from_view_number::(membership, view_number, epoch_number, version) + .await; let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = VidDisperse::from_membership( @@ -377,9 +389,10 @@ pub async fn build_da_certificate( ) -> DaCertificate2 { let encoded_transactions = TestTransaction::encode(&transactions); - let da_payload_commitment = vid_commitment( + let da_payload_commitment = vid_commitment::( &encoded_transactions, membership.read().await.total_nodes(epoch_number), + upgrade_lock.version_infallible(view_number).await, ); let da_data = DaData2 { diff --git a/testing/src/predicates/event.rs b/testing/src/predicates/event.rs index ebd33399fb..63727b8b21 100644 --- a/testing/src/predicates/event.rs +++ b/testing/src/predicates/event.rs @@ -11,7 +11,10 @@ use async_trait::async_trait; use hotshot_task_impls::events::{HotShotEvent, HotShotEvent::*}; use hotshot_types::{ data::null_block, - traits::{block_contents::BlockHeader, node_implementation::NodeType}, + traits::{ + block_contents::BlockHeader, + node_implementation::{NodeType, Versions}, + }, }; use crate::predicates::{Predicate, PredicateResult}; @@ -202,18 +205,19 @@ where Box::new(EventPredicate { check, info }) } -pub fn quorum_proposal_send_with_null_block( +pub fn quorum_proposal_send_with_null_block( num_storage_nodes: usize, ) -> Box> where TYPES: NodeType, + V: Versions, { let info = "QuorumProposalSend with null block payload".to_string(); let check: EventCallback = Arc::new(move |e: Arc>| match e.as_ref() { QuorumProposalSend(proposal, _) => { Some(proposal.data.block_header().payload_commitment()) - == null_block::commitment(num_storage_nodes) + == null_block::commitment::(num_storage_nodes) } _ => false, }); diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index c5d77232db..707e8593f2 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -100,16 +100,23 @@ impl TestView { let leader_public_key = public_key; - let payload_commitment = - da_payload_commitment::(membership, transactions.clone(), genesis_epoch) - .await; + let genesis_version = upgrade_lock.version_infallible(genesis_view).await; - let (vid_disperse, vid_proposal) = build_vid_proposal( + let payload_commitment = da_payload_commitment::( + membership, + transactions.clone(), + genesis_epoch, + genesis_version, + ) + .await; + + let (vid_disperse, vid_proposal) = build_vid_proposal::( membership, genesis_view, genesis_epoch, transactions.clone(), &private_key, + genesis_version, ) .await; @@ -248,16 +255,22 @@ impl TestView { &metadata, ); - let payload_commitment = - da_payload_commitment::(membership, transactions.clone(), self.epoch_number) - .await; + let version = self.upgrade_lock.version_infallible(next_view).await; + let payload_commitment = da_payload_commitment::( + membership, + transactions.clone(), + self.epoch_number, + version, + ) + .await; - let (vid_disperse, vid_proposal) = build_vid_proposal( + let (vid_disperse, vid_proposal) = build_vid_proposal::( membership, next_view, self.epoch_number, transactions.clone(), &private_key, + version, ) .await; diff --git a/testing/tests/tests_1/block_builder.rs b/testing/tests/tests_1/block_builder.rs index fc29b1c01d..7258c3ff00 100644 --- a/testing/tests/tests_1/block_builder.rs +++ b/testing/tests/tests_1/block_builder.rs @@ -33,6 +33,9 @@ use vbs::version::StaticVersion; #[tokio::test(flavor = "multi_thread")] #[ignore] async fn test_random_block_builder() { + use hotshot_example_types::node_types::TestVersions; + use vbs::version::Version; + let port = portpicker::pick_unused_port().expect("No free ports"); let api_url = Url::parse(&format!("http://localhost:{port}")).expect("Valid URL"); let task: Box> = RandomBuilderImplementation::start( @@ -58,11 +61,13 @@ async fn test_random_block_builder() { .expect("Failed to create dummy signature"); let dummy_view_number = 0u64; + let version = Version { major: 0, minor: 0 }; + let mut blocks = loop { // Test getting blocks let blocks = client .available_blocks( - vid_commitment(&[], 1), + vid_commitment::(&[], 1, version), dummy_view_number, pub_key, &signature, diff --git a/testing/tests/tests_1/da_task.rs b/testing/tests/tests_1/da_task.rs index 5422eb2a99..5006cab51d 100644 --- a/testing/tests/tests_1/da_task.rs +++ b/testing/tests/tests_1/da_task.rs @@ -29,7 +29,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, Versions}, }, }; -use vbs::version::StaticVersionType; +use vbs::version::{StaticVersionType, Version}; #[tokio::test(flavor = "multi_thread")] async fn test_da_task() { @@ -40,14 +40,16 @@ async fn test_da_task() { .0; let membership = Arc::clone(&handle.hotshot.memberships); + let default_version = Version { major: 0, minor: 0 }; // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. let transactions = vec![TestTransaction::new(vec![0])]; let encoded_transactions: Arc<[u8]> = Arc::from(TestTransaction::encode(&transactions)); - let payload_commit = hotshot_types::traits::block_contents::vid_commitment( + let payload_commit = hotshot_types::traits::block_contents::vid_commitment::( &encoded_transactions, handle.hotshot.memberships.read().await.total_nodes(None), + default_version, ); let mut generator = TestViewGenerator::::generate(membership.clone()); @@ -147,14 +149,16 @@ async fn test_da_task_storage_failure() { // Set the error flag here for the system handle. This causes it to emit an error on append. handle.storage().write().await.should_return_err = true; let membership = Arc::clone(&handle.hotshot.memberships); + let default_version = Version { major: 0, minor: 0 }; // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. let transactions = vec![TestTransaction::new(vec![0])]; let encoded_transactions: Arc<[u8]> = Arc::from(TestTransaction::encode(&transactions)); - let payload_commit = hotshot_types::traits::block_contents::vid_commitment( + let payload_commit = hotshot_types::traits::block_contents::vid_commitment::( &encoded_transactions, handle.hotshot.memberships.read().await.total_nodes(None), + default_version, ); let mut generator = TestViewGenerator::::generate(Arc::clone(&membership)); diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/testing/tests/tests_1/quorum_proposal_task.rs index 95c4010c03..9545f79cfc 100644 --- a/testing/tests/tests_1/quorum_proposal_task.rs +++ b/testing/tests/tests_1/quorum_proposal_task.rs @@ -52,11 +52,17 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { .0; let membership = Arc::clone(&handle.hotshot.memberships); + let version = handle + .hotshot + .upgrade_lock + .version_infallible(ViewNumber::new(node_id)) + .await; - let payload_commitment = build_payload_commitment::( + let payload_commitment = build_payload_commitment::( &membership, ViewNumber::new(node_id), Some(EpochNumber::new(1)), + version, ) .await; @@ -195,14 +201,22 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { ) .unwrap(); + let upgrade_lock = &handle.hotshot.upgrade_lock; + let version_1 = upgrade_lock.version_infallible(ViewNumber::new(1)).await; + let version_2 = upgrade_lock.version_infallible(ViewNumber::new(2)).await; + let version_3 = upgrade_lock.version_infallible(ViewNumber::new(3)).await; + let version_4 = upgrade_lock.version_infallible(ViewNumber::new(4)).await; + let version_5 = upgrade_lock.version_infallible(ViewNumber::new(5)).await; + let inputs = vec![ random![ Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(1), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_1, ) .await, builder_commitment.clone(), @@ -219,10 +233,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(2), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_2, ) .await, builder_commitment.clone(), @@ -237,10 +252,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(3), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_3, ) .await, builder_commitment.clone(), @@ -255,10 +271,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[2].clone()), Qc2Formed(either::Left(proposals[3].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(4), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_4, ) .await, builder_commitment.clone(), @@ -273,10 +290,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[3].clone()), Qc2Formed(either::Left(proposals[4].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(5), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_5, ) .await, builder_commitment, @@ -321,11 +339,17 @@ async fn test_quorum_proposal_task_qc_timeout() { .await .0; let membership = Arc::clone(&handle.hotshot.memberships); + let version = handle + .hotshot + .upgrade_lock + .version_infallible(ViewNumber::new(node_id)) + .await; - let payload_commitment = build_payload_commitment::( + let payload_commitment = build_payload_commitment::( &membership, ViewNumber::new(node_id), Some(EpochNumber::new(1)), + version, ) .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); @@ -414,11 +438,17 @@ async fn test_quorum_proposal_task_view_sync() { .0; let membership = Arc::clone(&handle.hotshot.memberships); + let version = handle + .hotshot + .upgrade_lock + .version_infallible(ViewNumber::new(node_id)) + .await; - let payload_commitment = build_payload_commitment::( + let payload_commitment = build_payload_commitment::( &membership, ViewNumber::new(node_id), Some(EpochNumber::new(1)), + version, ) .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); @@ -551,14 +581,22 @@ async fn test_quorum_proposal_task_liveness_check() { // updated properly. let genesis_cert = proposals[0].data.justify_qc().clone(); + let upgrade_lock = &handle.hotshot.upgrade_lock; + let version_1 = upgrade_lock.version_infallible(ViewNumber::new(1)).await; + let version_2 = upgrade_lock.version_infallible(ViewNumber::new(2)).await; + let version_3 = upgrade_lock.version_infallible(ViewNumber::new(3)).await; + let version_4 = upgrade_lock.version_infallible(ViewNumber::new(4)).await; + let version_5 = upgrade_lock.version_infallible(ViewNumber::new(5)).await; + let inputs = vec![ random![ Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(1), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_1, ) .await, builder_commitment.clone(), @@ -575,10 +613,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(2), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_2, ) .await, builder_commitment.clone(), @@ -593,10 +632,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(3), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_3, ) .await, builder_commitment.clone(), @@ -611,10 +651,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[2].clone()), Qc2Formed(either::Left(proposals[3].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(4), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_4, ) .await, builder_commitment.clone(), @@ -629,10 +670,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[3].clone()), Qc2Formed(either::Left(proposals[4].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(5), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_5, ) .await, builder_commitment, diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/testing/tests/tests_1/upgrade_task_with_proposal.rs index 6a9a9e1f11..820fb3a562 100644 --- a/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -151,14 +151,20 @@ async fn test_upgrade_task_with_proposal() { let upgrade_vote_recvs: Vec<_> = upgrade_votes.into_iter().map(UpgradeVoteRecv).collect(); + let upgrade_lock = &upgrade_state.upgrade_lock; + let version_1 = upgrade_lock.version_infallible(ViewNumber::new(1)).await; + let version_2 = upgrade_lock.version_infallible(ViewNumber::new(2)).await; + let version_3 = upgrade_lock.version_infallible(ViewNumber::new(3)).await; + let inputs = vec![ random![ Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(1), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_1, ) .await, builder_commitment.clone(), @@ -175,10 +181,11 @@ async fn test_upgrade_task_with_proposal() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(2), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_2, ) .await, builder_commitment.clone(), @@ -194,10 +201,11 @@ async fn test_upgrade_task_with_proposal() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::( + build_payload_commitment::( &membership, ViewNumber::new(3), - Some(EpochNumber::new(1)) + Some(EpochNumber::new(1)), + version_3, ) .await, builder_commitment.clone(), diff --git a/testing/tests/tests_1/vid_task.rs b/testing/tests/tests_1/vid_task.rs index c79acbcad4..0cb13a0f45 100644 --- a/testing/tests/tests_1/vid_task.rs +++ b/testing/tests/tests_1/vid_task.rs @@ -30,7 +30,7 @@ use hotshot_types::{ }, }; use jf_vid::VidScheme; -use vbs::version::StaticVersionType; +use vbs::version::{StaticVersionType, Version}; use vec1::vec1; #[tokio::test(flavor = "multi_thread")] @@ -47,8 +47,15 @@ async fn test_vid_task() { let membership = Arc::clone(&handle.hotshot.memberships); - let mut vid = - vid_scheme_from_view_number::(&membership, ViewNumber::new(0), None).await; + let default_version = Version { major: 0, minor: 0 }; + + let mut vid = vid_scheme_from_view_number::( + &membership, + ViewNumber::new(0), + None, + default_version, + ) + .await; let transactions = vec![TestTransaction::new(vec![0])]; let (payload, metadata) = >::from_transactions( diff --git a/types/src/consensus.rs b/types/src/consensus.rs index a2d92c6578..c6cc918444 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -25,12 +25,12 @@ use crate::{ drb::DrbSeedsAndResults, error::HotShotError, event::{HotShotAction, LeafInfo}, - message::Proposal, + message::{Proposal, UpgradeLock}, simple_certificate::{DaCertificate2, NextEpochQuorumCertificate2, QuorumCertificate2}, traits::{ block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, - node_implementation::{ConsensusTime, NodeType}, + node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, BlockPayload, ValidatedState, }, @@ -952,11 +952,12 @@ impl Consensus { /// and updates `vid_shares` map with the signed `VidDisperseShare` proposals. /// Returned `Option` indicates whether the update has actually happened or not. #[instrument(skip_all, target = "Consensus", fields(view = *view))] - pub async fn calculate_and_update_vid( + pub async fn calculate_and_update_vid( consensus: OuterConsensus, view: ::View, membership: Arc>, private_key: &::PrivateKey, + upgrade_lock: &UpgradeLock, ) -> Option<()> { let payload = Arc::clone(consensus.read().await.saved_payloads().get(&view)?); let epoch = consensus @@ -967,10 +968,16 @@ impl Consensus { .view_inner .epoch()?; - let vid = - VidDisperse::calculate_vid_disperse(payload.as_ref(), &membership, view, epoch, epoch) - .await - .ok()?; + let vid = VidDisperse::calculate_vid_disperse::( + payload.as_ref(), + &membership, + view, + epoch, + epoch, + upgrade_lock, + ) + .await + .ok()?; let shares = VidDisperseShare::from_vid_disperse(vid); let mut consensus_writer = consensus.write().await; diff --git a/types/src/data.rs b/types/src/data.rs index 1bbf6df66c..c3148551cb 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -24,6 +24,7 @@ use rand::Rng; use serde::{Deserialize, Serialize}; use thiserror::Error; use utils::anytrace::*; +use vbs::version::Version; use vec1::Vec1; use vid_disperse::{ADVZDisperse, ADVZDisperseShare, VidDisperseShare2}; @@ -263,12 +264,13 @@ impl VidDisperse { /// # Errors /// Returns an error if the disperse or commitment calculation fails #[allow(clippy::panic)] - pub async fn calculate_vid_disperse( + pub async fn calculate_vid_disperse( payload: &TYPES::BlockPayload, membership: &Arc>, view: TYPES::View, target_epoch: Option, data_epoch: Option, + _upgrade_lock: &UpgradeLock, ) -> Result { ADVZDisperse::calculate_vid_disperse(payload, membership, view, target_epoch, data_epoch) .await @@ -893,7 +895,14 @@ impl Leaf2 { let builder_commitment = payload.builder_commitment(&metadata); let payload_bytes = payload.encode(); - let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); + let genesis_view = TYPES::View::genesis(); + let upgrade_lock = UpgradeLock::::new(); + let genesis_version = upgrade_lock.version_infallible(genesis_view).await; + let payload_commitment = vid_commitment::( + &payload_bytes, + GENESIS_VID_NUM_STORAGE_NODES, + genesis_version, + ); let block_header = TYPES::BlockHeader::genesis( instance_state, @@ -910,13 +919,13 @@ impl Leaf2 { let justify_qc = QuorumCertificate2::new( null_quorum_data.clone(), null_quorum_data.commit(), - ::genesis(), + genesis_view, None, PhantomData, ); Self { - view_number: TYPES::View::genesis(), + view_number: genesis_view, justify_qc, next_epoch_justify_qc: None, parent_commitment: null_quorum_data.leaf_commit, @@ -973,13 +982,14 @@ impl Leaf2 { /// /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()` /// or if the transactions are of invalid length - pub fn fill_block_payload( + pub fn fill_block_payload( &mut self, block_payload: TYPES::BlockPayload, num_storage_nodes: usize, + version: Version, ) -> std::result::Result<(), BlockError> { let encoded_txns = block_payload.encode(); - let commitment = vid_commitment(&encoded_txns, num_storage_nodes); + let commitment = vid_commitment::(&encoded_txns, num_storage_nodes, version); if commitment != self.block_header.payload_commitment() { return Err(BlockError::InconsistentPayloadCommitment); } @@ -1168,7 +1178,7 @@ impl QuorumCertificate { let genesis_view = ::genesis(); let data = QuorumData { - leaf_commit: Leaf::genesis(validated_state, instance_state) + leaf_commit: Leaf::genesis::(validated_state, instance_state) .await .commit(&upgrade_lock) .await, @@ -1233,7 +1243,7 @@ impl Leaf { /// Panics if the genesis payload (`TYPES::BlockPayload::genesis()`) is malformed (unable to be /// interpreted as bytes). #[must_use] - pub async fn genesis( + pub async fn genesis( validated_state: &TYPES::ValidatedState, instance_state: &TYPES::InstanceState, ) -> Self { @@ -1244,7 +1254,14 @@ impl Leaf { let builder_commitment = payload.builder_commitment(&metadata); let payload_bytes = payload.encode(); - let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); + let genesis_view = TYPES::View::genesis(); + let upgrade_lock = UpgradeLock::::new(); + let genesis_version = upgrade_lock.version_infallible(genesis_view).await; + let payload_commitment = vid_commitment::( + &payload_bytes, + GENESIS_VID_NUM_STORAGE_NODES, + genesis_version, + ); let block_header = TYPES::BlockHeader::genesis( instance_state, @@ -1260,13 +1277,13 @@ impl Leaf { let justify_qc = QuorumCertificate::new( null_quorum_data.clone(), null_quorum_data.commit(), - ::genesis(), + genesis_view, None, PhantomData, ); Self { - view_number: TYPES::View::genesis(), + view_number: genesis_view, justify_qc, parent_commitment: null_quorum_data.leaf_commit, upgrade_certificate: None, @@ -1312,13 +1329,14 @@ impl Leaf { /// /// Fails if the payload commitment doesn't match `self.block_header.payload_commitment()` /// or if the transactions are of invalid length - pub fn fill_block_payload( + pub fn fill_block_payload( &mut self, block_payload: TYPES::BlockPayload, num_storage_nodes: usize, + version: Version, ) -> std::result::Result<(), BlockError> { let encoded_txns = block_payload.encode(); - let commitment = vid_commitment(&encoded_txns, num_storage_nodes); + let commitment = vid_commitment::(&encoded_txns, num_storage_nodes, version); if commitment != self.block_header.payload_commitment() { return Err(BlockError::InconsistentPayloadCommitment); } @@ -1534,7 +1552,6 @@ pub mod null_block { #![allow(missing_docs)] use jf_vid::VidScheme; - use memoize::memoize; use vbs::version::StaticVersionType; use crate::{ @@ -1544,7 +1561,7 @@ pub mod null_block { signature_key::BuilderSignatureKey, BlockPayload, }, - vid::{vid_scheme, VidCommitment}, + vid::{advz_scheme, VidCommitment}, }; /// The commitment for a null block payload. @@ -1553,10 +1570,11 @@ pub mod null_block { /// and may change (albeit rarely) during execution. /// /// We memoize the result to avoid having to recalculate it. - #[memoize(SharedCache, Capacity: 10)] + // TODO(Chengyu): fix it. Empty commitment must be computed at every upgrade. + // #[memoize(SharedCache, Capacity: 10)] #[must_use] - pub fn commitment(num_storage_nodes: usize) -> Option { - let vid_result = vid_scheme(num_storage_nodes).commit_only(Vec::new()); + pub fn commitment(num_storage_nodes: usize) -> Option { + let vid_result = advz_scheme(num_storage_nodes).commit_only(Vec::new()); match vid_result { Ok(r) => Some(r), @@ -1600,7 +1618,7 @@ pub mod null_block { &priv_key, FEE_AMOUNT, &null_block_metadata, - &commitment(num_storage_nodes)?, + &commitment::(num_storage_nodes)?, ) { Ok(sig) => Some(BuilderFee { fee_amount: FEE_AMOUNT, diff --git a/types/src/data/vid_disperse.rs b/types/src/data/vid_disperse.rs index d416dd8632..0534805cbc 100644 --- a/types/src/data/vid_disperse.rs +++ b/types/src/data/vid_disperse.rs @@ -14,7 +14,7 @@ use crate::{ block_contents::EncodeBytes, election::Membership, node_implementation::NodeType, signature_key::SignatureKey, }, - vid::{vid_scheme, VidCommitment, VidCommon, VidSchemeType, VidShare}, + vid::{advz_scheme, VidCommitment, VidCommon, VidSchemeType, VidShare}, vote::HasViewNumber, }; use async_lock::RwLock; @@ -101,7 +101,7 @@ impl ADVZDisperse { let txns_clone = Arc::clone(&txns); let num_txns = txns.len(); - let vid_disperse = spawn_blocking(move || vid_scheme(num_nodes).disperse(&txns_clone)) + let vid_disperse = spawn_blocking(move || advz_scheme(num_nodes).disperse(&txns_clone)) .await .wrap() .context(error!("Join error"))? @@ -114,7 +114,7 @@ impl ADVZDisperse { let num_nodes = membership.read().await.total_nodes(data_epoch); Some( - spawn_blocking(move || vid_scheme(num_nodes).commit_only(&txns)) + spawn_blocking(move || advz_scheme(num_nodes).commit_only(&txns)) .await .wrap() .context(error!("Join error"))? @@ -247,7 +247,7 @@ impl ADVZDisperseShare { /// Verification fail #[allow(clippy::result_unit_err)] pub fn verify_share(&self, total_nodes: usize) -> std::result::Result<(), ()> { - vid_scheme(total_nodes) + advz_scheme(total_nodes) .verify_share(&self.share, &self.common, &self.payload_commitment) .unwrap_or(Err(())) } @@ -376,7 +376,7 @@ impl VidDisperseShare2 { /// # Errors #[allow(clippy::result_unit_err)] pub fn verify_share(&self, total_nodes: usize) -> std::result::Result<(), ()> { - vid_scheme(total_nodes) + advz_scheme(total_nodes) .verify_share(&self.share, &self.common, &self.payload_commitment) .unwrap_or(Err(())) } diff --git a/types/src/traits/block_contents.rs b/types/src/traits/block_contents.rs index 2c312831b7..f9f188ce25 100644 --- a/types/src/traits/block_contents.rs +++ b/types/src/traits/block_contents.rs @@ -26,9 +26,13 @@ use vbs::version::Version; use super::signature_key::BuilderSignatureKey; use crate::{ data::Leaf2, - traits::{node_implementation::NodeType, states::InstanceState, ValidatedState}, + traits::{ + node_implementation::{NodeType, Versions}, + states::InstanceState, + ValidatedState, + }, utils::BuilderCommitment, - vid::{vid_scheme, VidCommitment, VidCommon, VidSchemeType}, + vid::{advz_scheme, VidCommitment, VidCommon, VidSchemeType}, }; /// Trait for structures that need to be unambiguously encoded as bytes. @@ -141,16 +145,18 @@ pub trait TestableBlock: BlockPayload + Debug { /// Compute the VID payload commitment. /// TODO(Gus) delete this function? +/// TODO(Chengyu): use the version information /// # Panics /// If the VID computation fails. #[must_use] #[allow(clippy::panic)] -pub fn vid_commitment( +pub fn vid_commitment( encoded_transactions: &[u8], num_storage_nodes: usize, + _version: Version, ) -> ::Commit { let encoded_tx_len = encoded_transactions.len(); - vid_scheme(num_storage_nodes).commit_only(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{encoded_tx_len}) error: {err}")) + advz_scheme(num_storage_nodes).commit_only(encoded_transactions).unwrap_or_else(|err| panic!("VidScheme::commit_only failure:(num_storage_nodes,payload_byte_len)=({num_storage_nodes},{encoded_tx_len}) error: {err}")) } /// The number of storage nodes to use when computing the genesis VID commitment. diff --git a/types/src/vid.rs b/types/src/vid.rs index c1a185ab62..4d5dd550c7 100644 --- a/types/src/vid.rs +++ b/types/src/vid.rs @@ -58,9 +58,10 @@ use crate::{ /// /// # Panics /// When the construction fails for the underlying VID scheme. +// TODO(Chengyu): move all things below to advz submodule. #[must_use] #[memoize::memoize(SharedCache, Capacity: 10)] -pub fn vid_scheme(num_storage_nodes: usize) -> VidSchemeType { +pub fn advz_scheme(num_storage_nodes: usize) -> VidSchemeType { // recovery_threshold is currently num_storage_nodes rounded down to a power of two // TODO recovery_threshold should be a function of the desired erasure code rate // https://github.com/EspressoSystems/HotShot/issues/2152 diff --git a/types/src/vid/advz.rs b/types/src/vid/advz.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/types/src/vid/avidm.rs b/types/src/vid/avidm.rs new file mode 100644 index 0000000000..e69de29bb2 From e2506c79defb188db01ceed8d0a95913847fc111 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Thu, 30 Jan 2025 08:15:09 -0800 Subject: [PATCH 1376/1393] 4093 add epoch and friends to commitments of new x2 objects (#4094) --- types/src/data.rs | 40 ++++++++++++++++++---- types/src/simple_vote.rs | 71 ++++++++++++++++++++++++++-------------- 2 files changed, 79 insertions(+), 32 deletions(-) diff --git a/types/src/data.rs b/types/src/data.rs index c3148551cb..33c44567c6 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -1076,13 +1076,39 @@ impl Leaf2 { impl Committable for Leaf2 { fn commit(&self) -> committable::Commitment { - RawCommitmentBuilder::new("leaf commitment") - .u64_field("view number", *self.view_number) - .field("parent leaf commitment", self.parent_commitment) - .field("block header", self.block_header.commit()) - .field("justify qc", self.justify_qc.commit()) - .optional("upgrade certificate", &self.upgrade_certificate) - .finalize() + let Leaf2 { + view_number, + justify_qc, + next_epoch_justify_qc, + parent_commitment, + block_header, + upgrade_certificate, + block_payload: _, + view_change_evidence: _, + next_drb_result, + with_epoch, + } = self; + + let mut cb = RawCommitmentBuilder::new("leaf commitment") + .u64_field("view number", **view_number) + .field("parent leaf commitment", *parent_commitment) + .field("block header", block_header.commit()) + .field("justify qc", justify_qc.commit()) + .optional("upgrade certificate", upgrade_certificate); + + if *with_epoch { + cb = cb + .constant_str("with_epoch") + .optional("next_epoch_justify_qc", next_epoch_justify_qc); + + if let Some(next_drb_result) = next_drb_result { + cb = cb + .constant_str("next_drb_result") + .fixed_size_bytes(next_drb_result); + } + } + + cb.finalize() } } diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 0ea17c7195..1dae6fc72e 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -355,22 +355,31 @@ impl Committable for QuorumData { impl Committable for QuorumData2 { fn commit(&self) -> Commitment { - let QuorumData2 { - leaf_commit, - epoch: _, - } = self; + let QuorumData2 { leaf_commit, epoch } = self; - committable::RawCommitmentBuilder::new("Quorum data") - .var_size_bytes(leaf_commit.as_ref()) - .finalize() + let mut cb = committable::RawCommitmentBuilder::new("Quorum data") + .var_size_bytes(leaf_commit.as_ref()); + + if let Some(ref epoch) = *epoch { + cb = cb.u64_field("epoch number", **epoch); + } + + cb.finalize() } } impl Committable for NextEpochQuorumData2 { fn commit(&self) -> Commitment { - committable::RawCommitmentBuilder::new("Quorum data") - .var_size_bytes(self.leaf_commit.as_ref()) - .finalize() + let NextEpochQuorumData2(QuorumData2 { leaf_commit, epoch }) = self; + + let mut cb = committable::RawCommitmentBuilder::new("Quorum data") + .var_size_bytes(leaf_commit.as_ref()); + + if let Some(ref epoch) = *epoch { + cb = cb.u64_field("epoch number", **epoch); + } + + cb.finalize() } } @@ -404,12 +413,17 @@ impl Committable for DaData2 { fn commit(&self) -> Commitment { let DaData2 { payload_commit, - epoch: _, + epoch, } = self; - committable::RawCommitmentBuilder::new("DA data") - .var_size_bytes(payload_commit.as_ref()) - .finalize() + let mut cb = committable::RawCommitmentBuilder::new("DA data") + .var_size_bytes(payload_commit.as_ref()); + + if let Some(ref epoch) = *epoch { + cb = cb.u64_field("epoch number", **epoch); + } + + cb.finalize() } } @@ -446,7 +460,7 @@ impl Committable for UpgradeData2 { .var_size_bytes(hash.as_slice()); if let Some(ref epoch) = *epoch { - cb = cb.u64(**epoch); + cb = cb.u64_field("epoch number", **epoch); } cb.finalize() @@ -457,15 +471,22 @@ impl Committable for UpgradeData2 { fn view_and_relay_commit( view: TYPES::View, relay: u64, + epoch: Option, tag: &str, ) -> Commitment { let builder = committable::RawCommitmentBuilder::new(tag); - builder.u64(*view).u64(relay).finalize() + let mut cb = builder.u64(*view).u64(relay); + + if let Some(epoch) = epoch { + cb = cb.u64_field("epoch number", *epoch); + } + + cb.finalize() } impl Committable for ViewSyncPreCommitData { fn commit(&self) -> Commitment { - view_and_relay_commit::(self.round, self.relay, "View Sync Precommit") + view_and_relay_commit::(self.round, self.relay, None, "View Sync Precommit") } } @@ -474,16 +495,16 @@ impl Committable for ViewSyncPreCommitData2 { let ViewSyncPreCommitData2 { relay, round, - epoch: _, + epoch, } = self; - view_and_relay_commit::(*round, *relay, "View Sync Precommit") + view_and_relay_commit::(*round, *relay, *epoch, "View Sync Precommit") } } impl Committable for ViewSyncFinalizeData { fn commit(&self) -> Commitment { - view_and_relay_commit::(self.round, self.relay, "View Sync Finalize") + view_and_relay_commit::(self.round, self.relay, None, "View Sync Finalize") } } @@ -492,16 +513,16 @@ impl Committable for ViewSyncFinalizeData2 { let ViewSyncFinalizeData2 { relay, round, - epoch: _, + epoch, } = self; - view_and_relay_commit::(*round, *relay, "View Sync Finalize") + view_and_relay_commit::(*round, *relay, *epoch, "View Sync Finalize") } } impl Committable for ViewSyncCommitData { fn commit(&self) -> Commitment { - view_and_relay_commit::(self.round, self.relay, "View Sync Commit") + view_and_relay_commit::(self.round, self.relay, None, "View Sync Commit") } } @@ -510,10 +531,10 @@ impl Committable for ViewSyncCommitData2 { let ViewSyncCommitData2 { relay, round, - epoch: _, + epoch, } = self; - view_and_relay_commit::(*round, *relay, "View Sync Commit") + view_and_relay_commit::(*round, *relay, *epoch, "View Sync Commit") } } From 2ca479f248774eade3d2fee91aeb4ba66dba5426 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:22:57 -0500 Subject: [PATCH 1377/1393] Propagate certificate errors (#4096) --- request-response/src/lib.rs | 16 +++--- request-response/src/message.rs | 3 +- task-impls/src/helpers.rs | 52 +++++++++--------- task-impls/src/quorum_proposal/handlers.rs | 1 + task-impls/src/quorum_proposal/mod.rs | 42 +++++++-------- .../src/quorum_proposal_recv/handlers.rs | 30 +++++------ task-impls/src/quorum_vote/mod.rs | 16 +++--- task-impls/src/view_sync.rs | 24 ++++++--- testing/tests/tests_1/message.rs | 12 ++--- types/src/data/vid_disperse.rs | 14 ++--- types/src/signature_key.rs | 8 ++- types/src/simple_certificate.rs | 54 +++++++++---------- types/src/traits/signature_key.rs | 12 ++++- types/src/vote.rs | 4 +- 14 files changed, 159 insertions(+), 129 deletions(-) diff --git a/request-response/src/lib.rs b/request-response/src/lib.rs index 8c0ac61987..d643547ba6 100644 --- a/request-response/src/lib.rs +++ b/request-response/src/lib.rs @@ -1,10 +1,12 @@ //! This crate contains a general request-response protocol. It is used to send requests to //! a set of recipients and wait for responses. -use std::collections::HashMap; -use std::sync::Weak; -use std::time::Instant; -use std::{marker::PhantomData, sync::Arc, time::Duration}; +use std::{ + collections::HashMap, + marker::PhantomData, + sync::{Arc, Weak}, + time::{Duration, Instant}, +}; use anyhow::{anyhow, Context, Result}; use data_source::DataSource; @@ -17,8 +19,10 @@ use parking_lot::RwLock; use rand::seq::SliceRandom; use recipient_source::RecipientSource; use request::{Request, Response}; -use tokio::spawn; -use tokio::time::{sleep, timeout}; +use tokio::{ + spawn, + time::{sleep, timeout}, +}; use tokio_util::task::AbortOnDropHandle; use tracing::{error, warn}; use util::BoundedVecDeque; diff --git a/request-response/src/message.rs b/request-response/src/message.rs index 39207b4230..622be26f22 100644 --- a/request-response/src/message.rs +++ b/request-response/src/message.rs @@ -294,9 +294,8 @@ mod tests { use hotshot_types::signature_key::BLSPubKey; use rand::Rng; - use crate::request::Response; - use super::*; + use crate::request::Response; // A testing implementation of the [`Serializable`] trait for [`Vec`] impl Serializable for Vec { diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index a9e58ffb3c..6601505b0e 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -136,16 +136,20 @@ pub(crate) async fn fetch_proposal( let membership_success_threshold = membership_reader.success_threshold(justify_qc_epoch); drop(membership_reader); - if !justify_qc + justify_qc .is_valid_cert( membership_stake_table, membership_success_threshold, upgrade_lock, ) .await - { - bail!("Invalid justify_qc in proposal for view {}", *view_number); - } + .context(|e| { + warn!( + "Invalid justify_qc in proposal for view {}: {}", + *view_number, e + ) + })?; + let mut consensus_writer = consensus.write().await; let leaf = Leaf2::from_quorum_proposal(&proposal.data); let state = Arc::new( @@ -763,17 +767,19 @@ pub(crate) async fn validate_proposal_view_and_certs< membership_reader.success_threshold(timeout_cert_epoch); drop(membership_reader); - ensure!( - timeout_cert - .is_valid_cert( - membership_stake_table, - membership_success_threshold, - &validation_info.upgrade_lock + timeout_cert + .is_valid_cert( + membership_stake_table, + membership_success_threshold, + &validation_info.upgrade_lock, + ) + .await + .context(|e| { + warn!( + "Timeout certificate for view {} was invalid: {}", + *view_number, e ) - .await, - "Timeout certificate for view {} was invalid", - *view_number - ); + })?; } ViewChangeEvidence2::ViewSync(view_sync_cert) => { ensure!( @@ -792,16 +798,14 @@ pub(crate) async fn validate_proposal_view_and_certs< drop(membership_reader); // View sync certs must also be valid. - ensure!( - view_sync_cert - .is_valid_cert( - membership_stake_table, - membership_success_threshold, - &validation_info.upgrade_lock - ) - .await, - "Invalid view sync finalize cert provided" - ); + view_sync_cert + .is_valid_cert( + membership_stake_table, + membership_success_threshold, + &validation_info.upgrade_lock, + ) + .await + .context(|e| warn!("Invalid view sync finalize cert provided: {}", e))?; } } } diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 02d7c66d0b..4dedb352bb 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -143,6 +143,7 @@ impl ProposalDependencyHandle { &self.upgrade_lock, ) .await + .is_ok() { return Some(qc.clone()); } diff --git a/task-impls/src/quorum_proposal/mod.rs b/task-impls/src/quorum_proposal/mod.rs index d8cad2869f..90cdd53d27 100644 --- a/task-impls/src/quorum_proposal/mod.rs +++ b/task-impls/src/quorum_proposal/mod.rs @@ -477,19 +477,20 @@ impl, V: Versions> membership_reader.success_threshold(epoch_number); drop(membership_reader); - ensure!( - certificate - .is_valid_cert( - membership_stake_table, - membership_success_threshold, - &self.upgrade_lock - ) - .await, - warn!( - "View Sync Finalize certificate {:?} was invalid", - certificate.data() + certificate + .is_valid_cert( + membership_stake_table, + membership_success_threshold, + &self.upgrade_lock, ) - ); + .await + .context(|e| { + warn!( + "View Sync Finalize certificate {:?} was invalid: {}", + certificate.data(), + e + ) + })?; let view_number = certificate.view_number; @@ -561,15 +562,14 @@ impl, V: Versions> membership_reader.success_threshold(cert_epoch_number); drop(membership_reader); - ensure!( - qc.is_valid_cert( - membership_stake_table, - membership_success_threshold, - &self.upgrade_lock - ) - .await, - warn!("Quorum certificate {:?} was invalid", qc.data()) - ); + qc.is_valid_cert( + membership_stake_table, + membership_success_threshold, + &self.upgrade_lock, + ) + .await + .context(|e| warn!("Quorum certificate {:?} was invalid: {}", qc.data(), e))?; + self.highest_qc = qc.clone(); } HotShotEvent::NextEpochQc2Formed(Either::Left(next_epoch_qc)) => { diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index 9cdc75a562..cf7a594398 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -168,17 +168,20 @@ pub(crate) async fn handle_quorum_proposal_recv< let membership_success_threshold = membership_reader.success_threshold(justify_qc.data.epoch); drop(membership_reader); - if !justify_qc - .is_valid_cert( - membership_stake_table, - membership_success_threshold, - &validation_info.upgrade_lock, - ) - .await { let consensus_reader = validation_info.consensus.read().await; - consensus_reader.metrics.invalid_qc.update(1); - bail!("Invalid justify_qc in proposal for view {}", *view_number); + justify_qc + .is_valid_cert( + membership_stake_table, + membership_success_threshold, + &validation_info.upgrade_lock, + ) + .await + .context(|e| { + consensus_reader.metrics.invalid_qc.update(1); + + warn!("Invalid certificate for view {}: {}", *view_number, e) + })?; } if let Some(ref next_epoch_justify_qc) = maybe_next_epoch_justify_qc { @@ -198,19 +201,14 @@ pub(crate) async fn handle_quorum_proposal_recv< drop(membership_reader); // Validate the next epoch justify qc as well - if !next_epoch_justify_qc + next_epoch_justify_qc .is_valid_cert( membership_next_stake_table, membership_next_success_threshold, &validation_info.upgrade_lock, ) .await - { - bail!( - "Invalid next_epoch_justify_qc in proposal for view {}", - *view_number - ); - } + .context(|e| warn!("Invalid certificate for view {}: {}", *view_number, e))?; } broadcast_event( diff --git a/task-impls/src/quorum_vote/mod.rs b/task-impls/src/quorum_vote/mod.rs index 6953ea1f60..81ee73c0d3 100644 --- a/task-impls/src/quorum_vote/mod.rs +++ b/task-impls/src/quorum_vote/mod.rs @@ -531,15 +531,13 @@ impl, V: Versions> QuorumVoteTaskS drop(membership_reader); // Validate the DAC. - ensure!( - cert.is_valid_cert( - membership_da_stake_table, - membership_da_success_threshold, - &self.upgrade_lock - ) - .await, - warn!("Invalid DAC") - ); + cert.is_valid_cert( + membership_da_stake_table, + membership_da_success_threshold, + &self.upgrade_lock, + ) + .await + .context(|e| warn!("Invalid DAC: {}", e))?; // Add to the storage. self.consensus diff --git a/task-impls/src/view_sync.rs b/task-impls/src/view_sync.rs index dadc7c8c70..579785a89b 100644 --- a/task-impls/src/view_sync.rs +++ b/task-impls/src/view_sync.rs @@ -555,7 +555,7 @@ impl ViewSyncReplicaTaskState { drop(membership_reader); // If certificate is not valid, return current state - if !certificate + if let Err(e) = certificate .is_valid_cert( membership_stake_table, membership_failure_threshold, @@ -563,7 +563,11 @@ impl ViewSyncReplicaTaskState { ) .await { - tracing::error!("Not valid view sync cert! {:?}", certificate.data()); + tracing::error!( + "Not valid view sync cert! data: {:?}, error: {}", + certificate.data(), + e + ); return None; } @@ -645,7 +649,7 @@ impl ViewSyncReplicaTaskState { drop(membership_reader); // If certificate is not valid, return current state - if !certificate + if let Err(e) = certificate .is_valid_cert( membership_stake_table, membership_success_threshold, @@ -653,7 +657,11 @@ impl ViewSyncReplicaTaskState { ) .await { - tracing::error!("Not valid view sync cert! {:?}", certificate.data()); + tracing::error!( + "Not valid view sync cert! data: {:?}, error: {}", + certificate.data(), + e + ); return None; } @@ -746,7 +754,7 @@ impl ViewSyncReplicaTaskState { drop(membership_reader); // If certificate is not valid, return current state - if !certificate + if let Err(e) = certificate .is_valid_cert( membership_stake_table, membership_success_threshold, @@ -754,7 +762,11 @@ impl ViewSyncReplicaTaskState { ) .await { - tracing::error!("Not valid view sync cert! {:?}", certificate.data()); + tracing::error!( + "Not valid view sync cert! data: {:?}, error: {}", + certificate.data(), + e + ); return None; } diff --git a/testing/tests/tests_1/message.rs b/testing/tests/tests_1/message.rs index b28d617358..c2970befd5 100644 --- a/testing/tests/tests_1/message.rs +++ b/testing/tests/tests_1/message.rs @@ -109,23 +109,23 @@ async fn test_certificate2_validity() { let membership_success_threshold = membership_reader.success_threshold(None); drop(membership_reader); - assert!( - qc.is_valid_cert( + assert!(qc + .is_valid_cert( membership_stake_table.clone(), membership_success_threshold, &handle.hotshot.upgrade_lock ) .await - ); + .is_ok()); - assert!( - qc2.is_valid_cert( + assert!(qc2 + .is_valid_cert( membership_stake_table, membership_success_threshold, &handle.hotshot.upgrade_lock ) .await - ); + .is_ok()); // ensure that we don't break the leaf commitment chain let leaf2 = Leaf2::from_quorum_proposal(&proposal.data); diff --git a/types/src/data/vid_disperse.rs b/types/src/data/vid_disperse.rs index 0534805cbc..b81f6505e8 100644 --- a/types/src/data/vid_disperse.rs +++ b/types/src/data/vid_disperse.rs @@ -6,6 +6,14 @@ //! This module provides types for VID disperse related data structures. +use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; + +use async_lock::RwLock; +use jf_vid::{VidDisperse as JfVidDisperse, VidScheme}; +use serde::{Deserialize, Serialize}; +use tokio::task::spawn_blocking; +use utils::anytrace::*; + use crate::{ impl_has_epoch, message::Proposal, @@ -17,12 +25,6 @@ use crate::{ vid::{advz_scheme, VidCommitment, VidCommon, VidSchemeType, VidShare}, vote::HasViewNumber, }; -use async_lock::RwLock; -use jf_vid::{VidDisperse as JfVidDisperse, VidScheme}; -use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; -use tokio::task::spawn_blocking; -use utils::anytrace::*; impl_has_epoch!(ADVZDisperse, VidDisperseShare2); diff --git a/types/src/signature_key.rs b/types/src/signature_key.rs index 2ad86804ef..5df67a872b 100644 --- a/types/src/signature_key.rs +++ b/types/src/signature_key.rs @@ -122,9 +122,13 @@ impl SignatureKey for BLSPubKey { } } - fn check(real_qc_pp: &Self::QcParams, data: &[u8], qc: &Self::QcType) -> bool { + fn check( + real_qc_pp: &Self::QcParams, + data: &[u8], + qc: &Self::QcType, + ) -> Result<(), SignatureError> { let msg = GenericArray::from_slice(data); - BitVectorQc::::check(real_qc_pp, msg, qc).is_ok() + BitVectorQc::::check(real_qc_pp, msg, qc).map(|_| ()) } fn sig_proof(signature: &Self::QcType) -> (Self::PureAssembledSignatureType, BitVec) { diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index a6b4a1b142..0c98f84ba1 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -169,22 +169,23 @@ impl> Certificate stake_table: Vec<::StakeTableEntry>, threshold: NonZeroU64, upgrade_lock: &UpgradeLock, - ) -> bool { + ) -> Result<()> { if self.view_number == TYPES::View::genesis() { - return true; + return Ok(()); } let real_qc_pp = ::public_parameter( stake_table, U256::from(u64::from(threshold)), ); - let Ok(commit) = self.data_commitment(upgrade_lock).await else { - return false; - }; + let commit = self.data_commitment(upgrade_lock).await?; + ::check( &real_qc_pp, commit.as_ref(), self.signatures.as_ref().unwrap(), ) + .wrap() + .context(|e| warn!("Signature check failed: {}", e)) } /// Proxy's to `Membership.stake` fn stake_table_entry>( @@ -257,22 +258,23 @@ impl> Certificate::StakeTableEntry>, threshold: NonZeroU64, upgrade_lock: &UpgradeLock, - ) -> bool { + ) -> Result<()> { if self.view_number == TYPES::View::genesis() { - return true; + return Ok(()); } let real_qc_pp = ::public_parameter( stake_table, U256::from(u64::from(threshold)), ); - let Ok(commit) = self.data_commitment(upgrade_lock).await else { - return false; - }; + let commit = self.data_commitment(upgrade_lock).await?; + ::check( &real_qc_pp, commit.as_ref(), self.signatures.as_ref().unwrap(), ) + .wrap() + .context(|e| warn!("Signature check failed: {}", e)) } /// Proxy's to `Membership.stake` fn stake_table_entry>( @@ -348,22 +350,23 @@ impl< stake_table: Vec<::StakeTableEntry>, threshold: NonZeroU64, upgrade_lock: &UpgradeLock, - ) -> bool { + ) -> Result<()> { if self.view_number == TYPES::View::genesis() { - return true; + return Ok(()); } let real_qc_pp = ::public_parameter( stake_table, U256::from(u64::from(threshold)), ); - let Ok(commit) = self.data_commitment(upgrade_lock).await else { - return false; - }; + let commit = self.data_commitment(upgrade_lock).await?; + ::check( &real_qc_pp, commit.as_ref(), self.signatures.as_ref().unwrap(), ) + .wrap() + .context(|e| warn!("Signature check failed: {}", e)) } fn threshold>( membership: &MEMBERSHIP, @@ -461,19 +464,16 @@ impl UpgradeCertificate { let membership_upgrade_threshold = membership_reader.upgrade_threshold(epoch); drop(membership_reader); - ensure!( - cert.is_valid_cert( - membership_stake_table, - membership_upgrade_threshold, - upgrade_lock - ) - .await, - "Invalid upgrade certificate." - ); - Ok(()) - } else { - Ok(()) + cert.is_valid_cert( + membership_stake_table, + membership_upgrade_threshold, + upgrade_lock, + ) + .await + .context(|e| warn!("Invalid upgrade certificate: {}", e))?; } + + Ok(()) } /// Given an upgrade certificate and a view, tests whether the view is in the period diff --git a/types/src/traits/signature_key.rs b/types/src/traits/signature_key.rs index aaa205a549..bfd62a1e3c 100644 --- a/types/src/traits/signature_key.rs +++ b/types/src/traits/signature_key.rs @@ -17,6 +17,7 @@ use std::{ use ark_serialize::SerializationError; use bitvec::prelude::*; use committable::Committable; +use jf_signature::SignatureError; use jf_vid::VidScheme; use primitive_types::U256; use serde::{de::DeserializeOwned, Deserialize, Serialize}; @@ -154,8 +155,15 @@ pub trait SignatureKey: threshold: U256, ) -> Self::QcParams; - /// check the quorum certificate for the assembled signature - fn check(real_qc_pp: &Self::QcParams, data: &[u8], qc: &Self::QcType) -> bool; + /// check the quorum certificate for the assembled signature, returning `Ok(())` if it is valid. + /// + /// # Errors + /// Returns an error if the signature key fails to validate + fn check( + real_qc_pp: &Self::QcParams, + data: &[u8], + qc: &Self::QcType, + ) -> Result<(), SignatureError>; /// get the assembled signature and the `BitVec` separately from the assembled signature fn sig_proof(signature: &Self::QcType) -> (Self::PureAssembledSignatureType, BitVec); diff --git a/types/src/vote.rs b/types/src/vote.rs index 7f91dfd01c..f30ec77242 100644 --- a/types/src/vote.rs +++ b/types/src/vote.rs @@ -18,7 +18,7 @@ use bitvec::{bitvec, vec::BitVec}; use committable::{Commitment, Committable}; use primitive_types::U256; use tracing::error; -use utils::anytrace::Result; +use utils::anytrace::*; use crate::{ message::UpgradeLock, @@ -79,7 +79,7 @@ pub trait Certificate: HasViewNumber { stake_table: Vec<::StakeTableEntry>, threshold: NonZeroU64, upgrade_lock: &UpgradeLock, - ) -> impl std::future::Future; + ) -> impl std::future::Future>; /// Returns the amount of stake needed to create this certificate // TODO: Make this a static ratio of the total stake of `Membership` fn threshold>( From ed17abcaa749f5316653d84d330ae139f3ce8433 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 31 Jan 2025 10:16:02 -0700 Subject: [PATCH 1378/1393] Fix typos ahead of Monorepo (#4099) --- types/src/utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/utils.rs b/types/src/utils.rs index f8fe4fdf5b..c22d4b43df 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -141,7 +141,7 @@ impl ViewInner { } } - /// return the underlying block paylod commitment if it exists + /// return the underlying block payload commitment if it exists #[must_use] pub fn payload_commitment(&self) -> Option { if let Self::Da { From eb067d3c70b42398ebeef905c2e9905819e37f98 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Mon, 3 Feb 2025 12:29:55 +0100 Subject: [PATCH 1379/1393] Epoch-enabled catchup tests (#4070) * Add catchup tests, store last epoch * Adjust overall safety task to variable stake tables * Use node's current epoch in tests * Adjust to CI debugging * fix CI adjustment * Further CI adjustments * Revert CI adjustments * Temporarily comment test_staggered_restart_with_epochs out * Increase request timeout and uncomment staggered restart test * Handle External message kind * MessageKind uses HasEpoch trait * QuorumProposal2 has an epoch field * Consistency task checks nodes in relevant epochs * QuorumProposal2 now implements HasEpoch * Fix compiler error * Cleanup * fix consistency_task * Revert "QuorumProposal2 has an epoch field" This reverts commit 61e1661bcaa513f7cc9f705c20a49551ab38256b. * Fix cargo audit --------- Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> --- example-types/src/storage_types.rs | 10 +- hotshot/src/tasks/mod.rs | 1 + task-impls/src/helpers.rs | 2 +- task-impls/src/network.rs | 15 +- task-impls/src/quorum_proposal/handlers.rs | 2 +- .../src/quorum_proposal_recv/handlers.rs | 7 +- task-impls/src/quorum_vote/handlers.rs | 22 +- task-impls/src/request.rs | 5 +- task-impls/src/vid.rs | 4 +- testing/src/byzantine/byzantine_behaviour.rs | 1 + testing/src/consistency_task.rs | 11 +- testing/src/overall_safety_task.rs | 251 ++++++++++++------ testing/src/view_generator.rs | 10 +- testing/tests/tests_1/network_task.rs | 2 + testing/tests/tests_6/test_epochs.rs | 112 +++++++- types/src/data.rs | 65 ++++- types/src/message.rs | 99 ++++++- types/src/simple_certificate.rs | 16 +- types/src/simple_vote.rs | 27 +- types/src/traits/storage.rs | 7 +- 20 files changed, 531 insertions(+), 138 deletions(-) diff --git a/example-types/src/storage_types.rs b/example-types/src/storage_types.rs index ec39a66009..890788bd60 100644 --- a/example-types/src/storage_types.rs +++ b/example-types/src/storage_types.rs @@ -253,14 +253,20 @@ impl Storage for TestStorage { async fn record_action( &self, view: ::View, + epoch: Option, action: hotshot_types::event::HotShotAction, ) -> Result<()> { if self.should_return_err { bail!("Failed to append Action to storage"); } let mut inner = self.inner.write().await; - if view > inner.action && matches!(action, HotShotAction::Vote | HotShotAction::Propose) { - inner.action = view; + if matches!(action, HotShotAction::Vote | HotShotAction::Propose) { + if view > inner.action { + inner.action = view; + } + if epoch > inner.epoch { + inner.epoch = epoch; + } } Self::run_delay_settings_from_config(&self.delay_config).await; Ok(()) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 767fe38ec2..e7e4036fc0 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -207,6 +207,7 @@ pub fn add_network_event_task< consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), transmit_tasks: BTreeMap::new(), + epoch_height: handle.epoch_height, }; let task = Task::new( network_state, diff --git a/task-impls/src/helpers.rs b/task-impls/src/helpers.rs index 6601505b0e..6b6f35cd55 100644 --- a/task-impls/src/helpers.rs +++ b/task-impls/src/helpers.rs @@ -814,7 +814,7 @@ pub(crate) async fn validate_proposal_view_and_certs< // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. { let epoch = option_epoch_from_block_number::( - proposal.data.with_epoch, + proposal.data.epoch().is_some(), proposal.data.block_header().block_number(), validation_info.epoch_height, ); diff --git a/task-impls/src/network.rs b/task-impls/src/network.rs index 3216a60551..14cfb08116 100644 --- a/task-impls/src/network.rs +++ b/task-impls/src/network.rs @@ -491,6 +491,9 @@ pub struct NetworkEventTaskState< /// map view number to transmit tasks pub transmit_tasks: BTreeMap>>, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } #[async_trait] @@ -545,6 +548,7 @@ impl< sender: &::SignatureKey, ) -> Option { let view = vid_proposal.data.view_number(); + let epoch = vid_proposal.data.epoch(); let vid_share_proposals = VidDisperseShare::to_vid_share_proposals(vid_proposal); let mut messages = HashMap::new(); @@ -615,6 +619,7 @@ impl< storage, consensus, view, + epoch, ) .await .is_err() @@ -636,6 +641,7 @@ impl< storage: Arc>, consensus: OuterConsensus, view: ::View, + epoch: Option<::Epoch>, ) -> std::result::Result<(), ()> { if let Some(mut action) = maybe_action { if !consensus.write().await.update_action(action, view) { @@ -646,7 +652,12 @@ impl< if matches!(action, HotShotAction::ViewSyncVote) { action = HotShotAction::Vote; } - match storage.write().await.record_action(view, action).await { + match storage + .write() + .await + .record_action(view, epoch, action) + .await + { Ok(()) => Ok(()), Err(e) => { tracing::warn!("Not Sending {:?} because of storage error: {:?}", action, e); @@ -1122,6 +1133,7 @@ impl< kind: message_kind, }; let view_number = message.kind.view_number(); + let epoch = message.kind.epoch(); let committee_topic = Topic::Global; let da_committee = self .membership @@ -1138,6 +1150,7 @@ impl< Arc::clone(&storage), consensus, view_number, + epoch, ) .await .is_err() diff --git a/task-impls/src/quorum_proposal/handlers.rs b/task-impls/src/quorum_proposal/handlers.rs index 4dedb352bb..4f7ff5719a 100644 --- a/task-impls/src/quorum_proposal/handlers.rs +++ b/task-impls/src/quorum_proposal/handlers.rs @@ -419,13 +419,13 @@ impl ProposalDependencyHandle { proposal: QuorumProposal2 { block_header, view_number: self.view_number, + epoch, justify_qc: parent_qc, next_epoch_justify_qc: next_epoch_qc, upgrade_certificate, view_change_evidence: proposal_certificate, next_drb_result, }, - with_epoch: version >= V::Epochs::VERSION, }; let proposed_leaf = Leaf2::from_quorum_proposal(&proposal); diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/task-impls/src/quorum_proposal_recv/handlers.rs index cf7a594398..d497276279 100644 --- a/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/task-impls/src/quorum_proposal_recv/handlers.rs @@ -16,6 +16,7 @@ use hotshot_types::{ data::{Leaf2, QuorumProposal, QuorumProposalWrapper}, message::Proposal, simple_certificate::QuorumCertificate, + simple_vote::HasEpoch, traits::{ block_contents::BlockHeader, election::Membership, @@ -145,6 +146,10 @@ pub(crate) async fn handle_quorum_proposal_recv< event_receiver: &Receiver>>, validation_info: ValidationInfo, ) -> Result<()> { + proposal + .data + .validate_epoch(&validation_info.upgrade_lock, validation_info.epoch_height) + .await?; let quorum_proposal_sender_key = quorum_proposal_sender_key.clone(); validate_proposal_view_and_certs(proposal, &validation_info) @@ -158,7 +163,7 @@ pub(crate) async fn handle_quorum_proposal_recv< let proposal_block_number = proposal.data.block_header().block_number(); let proposal_epoch = option_epoch_from_block_number::( - proposal.data.with_epoch, + proposal.data.epoch().is_some(), proposal_block_number, validation_info.epoch_height, ); diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 1579611adc..38b63fe4ea 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -6,10 +6,20 @@ use std::{collections::btree_map::Entry, sync::Arc}; +use super::QuorumVoteTaskState; +use crate::{ + events::HotShotEvent, + helpers::{ + broadcast_event, decide_from_proposal, decide_from_proposal_2, fetch_proposal, + LeafChainTraversalOutcome, + }, + quorum_vote::Versions, +}; use async_broadcast::{InactiveReceiver, Sender}; use async_lock::RwLock; use chrono::Utc; use committable::Committable; +use hotshot_types::simple_vote::HasEpoch; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposalWrapper, VidDisperseShare}, @@ -36,16 +46,6 @@ use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; -use super::QuorumVoteTaskState; -use crate::{ - events::HotShotEvent, - helpers::{ - broadcast_event, decide_from_proposal, decide_from_proposal_2, fetch_proposal, - LeafChainTraversalOutcome, - }, - quorum_vote::Versions, -}; - /// Store the DRB result from the computation task to the shared `results` table. /// /// Returns the result if it exists. @@ -162,7 +162,7 @@ async fn start_drb_task, V: Versio task_state: &mut QuorumVoteTaskState, ) { // #3967 REVIEW NOTE: Should we just exit early if we aren't doing epochs? - if !proposal.with_epoch { + if proposal.epoch().is_none() { return; } diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index eb33e9f9cc..dc2bc15f2c 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -22,6 +22,7 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::OuterConsensus, + simple_vote::HasEpoch, traits::{ block_contents::BlockHeader, election::Membership, @@ -45,7 +46,7 @@ use utils::anytrace::Result; use crate::{events::HotShotEvent, helpers::broadcast_event}; /// Amount of time to try for a request before timing out. -pub const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); +pub const REQUEST_TIMEOUT: Duration = Duration::from_millis(2000); /// Long running task which will request information after a proposal is received. /// The task will wait a it's `delay` and then send a request iteratively to peers @@ -113,7 +114,7 @@ impl> TaskState for NetworkRequest HotShotEvent::QuorumProposalValidated(proposal, _) => { let prop_view = proposal.data.view_number(); let prop_epoch = option_epoch_from_block_number::( - proposal.data.with_epoch, + proposal.data.epoch().is_some(), proposal.data.block_header().block_number(), self.epoch_height, ); diff --git a/task-impls/src/vid.rs b/task-impls/src/vid.rs index 9968ea21dd..20476bdfca 100644 --- a/task-impls/src/vid.rs +++ b/task-impls/src/vid.rs @@ -14,6 +14,7 @@ use hotshot_types::{ consensus::OuterConsensus, data::{PackedBundle, VidDisperse, VidDisperseShare}, message::{Proposal, UpgradeLock}, + simple_vote::HasEpoch, traits::{ block_contents::BlockHeader, election::Membership, @@ -178,7 +179,8 @@ impl, V: Versions> VidTaskState { let proposed_block_number = proposal.data.block_header().block_number(); - if !proposal.data.with_epoch || proposed_block_number % self.epoch_height != 0 { + if proposal.data.epoch().is_none() || proposed_block_number % self.epoch_height != 0 + { // This is not the last block in the epoch, do nothing. return None; } diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/testing/src/byzantine/byzantine_behaviour.rs index d8dac49cc9..8fab295bd4 100644 --- a/testing/src/byzantine/byzantine_behaviour.rs +++ b/testing/src/byzantine/byzantine_behaviour.rs @@ -350,6 +350,7 @@ impl + std::fmt::Debug, V: Version consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), transmit_tasks: BTreeMap::new(), + epoch_height: handle.epoch_height, }; let modified_network_state = NetworkEventTaskStateModifier { network_event_task_state: network_state, diff --git a/testing/src/consistency_task.rs b/testing/src/consistency_task.rs index d17db57d70..5c0dc3192d 100644 --- a/testing/src/consistency_task.rs +++ b/testing/src/consistency_task.rs @@ -104,7 +104,9 @@ async fn validate_node_map( // We want to make sure the commitment matches, // but allow for the possibility that we may have skipped views in between. - if child.justify_qc().data.leaf_commit != parent.commit() { + if child.justify_qc().view_number == parent.view_number() + && child.justify_qc().data.leaf_commit != parent.commit() + { bail!("The node has provided leaf:\n\n{child:?}\n\nwhich points to:\n\n{parent:?}\n\nbut the commits do not match."); } @@ -195,6 +197,13 @@ fn sanitize_view_map( } } + for (parent, child) in result.values().zip(result.values().skip(1)) { + // We want to make sure the aggregated leafmap has not missed a decide event + if child.justify_qc().data.leaf_commit != parent.commit() { + bail!("The network has decided:\n\n{child:?}\n\nwhich succeeds:\n\n{parent:?}\n\nbut the commits do not match. Did we miss an intermediate leaf?"); + } + } + Ok(result) } diff --git a/testing/src/overall_safety_task.rs b/testing/src/overall_safety_task.rs index dbc360978f..c824cc19b6 100644 --- a/testing/src/overall_safety_task.rs +++ b/testing/src/overall_safety_task.rs @@ -20,8 +20,10 @@ use hotshot_types::{ event::{Event, EventType, LeafChain}, simple_certificate::QuorumCertificate2, traits::{ + block_contents::BlockHeader, election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, + BlockPayload, }, vid::VidCommitment, }; @@ -32,6 +34,7 @@ use crate::{ test_runner::Node, test_task::{TestEvent, TestResult, TestTaskState}, }; + /// convenience type alias for state and block pub type StateAndBlock = (Vec, Vec); @@ -138,6 +141,17 @@ impl, V: Versions> TestTas /// Handles an event from one of multiple receivers. async fn handle_event(&mut self, (message, id): (Self::Event, usize)) -> Result<()> { + let memberships_arc = Arc::clone( + &self + .handles + .read() + .await + .first() + .unwrap() + .handle + .memberships, + ); + let public_key = self.handles.read().await[id].handle.public_key(); let OverallSafetyPropertiesDescription:: { check_leaf, check_block, @@ -147,8 +161,22 @@ impl, V: Versions> TestTas .. }: OverallSafetyPropertiesDescription = self.properties.clone(); let Event { view_number, event } = message; - let key = match event { + let keys: Option> = match event { EventType::Error { error } => { + let cur_epoch = self.handles.read().await[id] + .handle + .consensus() + .read() + .await + .cur_epoch(); + if !memberships_arc + .read() + .await + .has_stake(&public_key, cur_epoch) + { + // Return early, this event comes from a node not belonging to the current epoch + return Ok(()); + } self.ctx .insert_error_to_context(view_number, id, error.clone()); None @@ -156,27 +184,84 @@ impl, V: Versions> TestTas EventType::Decide { leaf_chain, qc, - block_size: maybe_block_size, + block_size: _, } => { // Skip the genesis leaf. if leaf_chain.last().unwrap().leaf.view_number() == TYPES::View::genesis() { return Ok(()); } - let paired_up = (leaf_chain.to_vec(), (*qc).clone()); - match self.ctx.round_results.entry(view_number) { - Entry::Occupied(mut o) => { - let entry = o.get_mut(); - entry.insert_into_result(id, paired_up, maybe_block_size) + let mut keys = Vec::default(); + let mut leaf_qc = (*qc).clone(); + let mut leaf_chain_vec = leaf_chain.to_vec(); + while !leaf_chain_vec.is_empty() { + let paired_up = (leaf_chain_vec.clone(), leaf_qc.clone()); + let leaf_info = leaf_chain_vec.first().unwrap(); + let mut txns = HashSet::new(); + if let Some(ref payload) = leaf_info.leaf.block_payload() { + for txn in payload + .transaction_commitments(leaf_info.leaf.block_header().metadata()) + { + txns.insert(txn); + } } - Entry::Vacant(v) => { - let mut round_result = RoundResult::default(); - let key = round_result.insert_into_result(id, paired_up, maybe_block_size); - v.insert(round_result); - key + let maybe_block_size = if txns.is_empty() { + None + } else { + Some(txns.len().try_into()?) + }; + match self.ctx.round_results.entry(leaf_info.leaf.view_number()) { + Entry::Occupied(mut o) => { + let entry = o.get_mut(); + let key = entry + .insert_into_result( + id, + paired_up, + maybe_block_size, + &memberships_arc, + &public_key, + self.epoch_height, + ) + .await; + keys.push(key); + } + Entry::Vacant(v) => { + let mut round_result = RoundResult::default(); + let key = round_result + .insert_into_result( + id, + paired_up, + maybe_block_size, + &memberships_arc, + &public_key, + self.epoch_height, + ) + .await; + if key.is_some() { + v.insert(round_result); + keys.push(key); + } + } } + leaf_qc = leaf_chain_vec.first().unwrap().leaf.justify_qc(); + leaf_chain_vec.remove(0); } + Some(keys.into_iter().flatten().collect()) } EventType::ReplicaViewTimeout { view_number } => { + let cur_epoch = self.handles.read().await[id] + .handle + .consensus() + .read() + .await + .cur_epoch(); + if !memberships_arc + .read() + .await + .has_stake(&public_key, cur_epoch) + { + // Return early, this event comes from a node not belonging to the current epoch + return Ok(()); + } let error = Arc::new(HotShotError::::ViewTimedOut { view_number, state: RoundTimedoutState::TestCollectRoundEventsTimedOut, @@ -187,82 +272,66 @@ impl, V: Versions> TestTas _ => return Ok(()), }; - if let Some(ref key) = key { - match ( - key.epoch(self.epoch_height).map(|x| *x), - self.ctx.latest_epoch, - ) { - (Some(key_epoch), Some(latest_epoch)) => { - if key_epoch > latest_epoch { - self.ctx.latest_epoch = Some(key_epoch); + if let Some(keys) = keys { + for key in keys { + let key_epoch = key.epoch(self.epoch_height); + let memberships_reader = memberships_arc.read().await; + let key_len = memberships_reader.total_nodes(key_epoch); + let key_threshold = memberships_reader.success_threshold(key_epoch).get() as usize; + drop(memberships_reader); + + let key_view_number = key.view_number(); + let key_view = self.ctx.round_results.get_mut(&key_view_number).unwrap(); + key_view.update_status( + key_threshold, + key_len, + &key, + check_leaf, + check_block, + transaction_threshold, + ); + match key_view.status.clone() { + ViewStatus::Ok => { + self.ctx.successful_views.insert(key_view_number); + // if a view succeeds remove it from the failed views + self.ctx.failed_views.remove(&key_view_number); + if self.ctx.successful_views.len() >= num_successful_views { + let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; + } } + ViewStatus::Failed => { + self.handle_view_failure(num_failed_views, key_view_number) + .await; + } + ViewStatus::Err(e) => { + let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; + self.error = Some(Box::new(e)); + return Ok(()); + } + ViewStatus::InProgress => {} } - (Some(key_epoch), None) => { - self.ctx.latest_epoch = Some(key_epoch); - } - _ => {} } - } - - let epoch = self.ctx.latest_epoch.map(TYPES::Epoch::new); - let memberships_arc = Arc::clone( - &self - .handles + } else { + let Some(view) = self.ctx.round_results.get_mut(&view_number) else { + return Ok(()); + }; + let cur_epoch = self.handles.read().await[id] + .handle + .consensus() .read() .await - .first() - .unwrap() - .handle - .memberships, - ); - let memberships_reader = memberships_arc.read().await; - let len = memberships_reader.total_nodes(epoch); - - // update view count - let threshold = memberships_reader.success_threshold(epoch).get() as usize; - drop(memberships_reader); - drop(memberships_arc); - - let view = self.ctx.round_results.get_mut(&view_number).unwrap(); - if let Some(key) = key { - view.update_status( - threshold, - len, - &key, - check_leaf, - check_block, - transaction_threshold, - ); - match view.status.clone() { - ViewStatus::Ok => { - self.ctx.successful_views.insert(view_number); - // if a view succeeds remove it from the failed views - self.ctx.failed_views.remove(&view_number); - if self.ctx.successful_views.len() >= num_successful_views { - let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; - } - return Ok(()); - } - ViewStatus::Failed => { - self.handle_view_failure(num_failed_views, view_number) - .await; + .cur_epoch(); - return Ok(()); - } - ViewStatus::Err(e) => { - let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; - self.error = Some(Box::new(e)); - return Ok(()); - } - ViewStatus::InProgress => { - return Ok(()); - } + let memberships_reader = memberships_arc.read().await; + let len = memberships_reader.total_nodes(cur_epoch); + let threshold = memberships_reader.success_threshold(cur_epoch).get() as usize; + drop(memberships_reader); + + if view.check_if_failed(threshold, len) { + view.status = ViewStatus::Failed; + self.handle_view_failure(num_failed_views, view_number) + .await; } - } else if view.check_if_failed(threshold, len) { - view.status = ViewStatus::Failed; - self.handle_view_failure(num_failed_views, view_number) - .await; - return Ok(()); } Ok(()) } @@ -387,7 +456,6 @@ impl Default for RoundCtx { round_results: HashMap::default(), failed_views: HashSet::default(), successful_views: HashSet::default(), - latest_epoch: None, } } } @@ -405,8 +473,6 @@ pub struct RoundCtx { pub failed_views: HashSet, /// successful views pub successful_views: HashSet, - /// latest epoch, updated when a leaf with a higher epoch is seen - pub latest_epoch: Option, } impl RoundCtx { @@ -438,17 +504,28 @@ impl RoundCtx { impl RoundResult { /// insert into round result #[allow(clippy::unit_arg)] - pub fn insert_into_result( + pub async fn insert_into_result( &mut self, idx: usize, result: (LeafChain, QuorumCertificate2), maybe_block_size: Option, + membership: &Arc>, + public_key: &TYPES::SignatureKey, + epoch_height: u64, ) -> Option> { - self.success_nodes.insert(idx as u64, result.clone()); - let maybe_leaf = result.0.first(); if let Some(leaf_info) = maybe_leaf { let leaf = &leaf_info.leaf; + let epoch = leaf.epoch(epoch_height); + if !membership.read().await.has_stake(public_key, epoch) { + // The node doesn't belong to the epoch, don't count towards total successes count + return None; + } + if self.success_nodes.contains_key(&(idx as u64)) { + // The success of this node was previously counted, don't continue + return None; + } + self.success_nodes.insert(idx as u64, result.clone()); match self.leaf_map.entry(leaf.clone()) { std::collections::hash_map::Entry::Occupied(mut o) => { *o.get_mut() += 1; diff --git a/testing/src/view_generator.rs b/testing/src/view_generator.rs index 707e8593f2..abe4277590 100644 --- a/testing/src/view_generator.rs +++ b/testing/src/view_generator.rs @@ -40,10 +40,10 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType, Versions}, BlockPayload, }, + utils::genesis_epoch_from_version, }; use rand::{thread_rng, Rng}; use sha2::{Digest, Sha256}; -use vbs::version::StaticVersionType; use crate::helpers::{ build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, @@ -77,7 +77,7 @@ impl TestView { membership: &Arc::Membership>>, ) -> Self { let genesis_view = ViewNumber::new(1); - let genesis_epoch = None; + let genesis_epoch = genesis_epoch_from_version::(); let upgrade_lock = UpgradeLock::new(); let transactions = Vec::new(); @@ -146,6 +146,7 @@ impl TestView { proposal: QuorumProposal2:: { block_header: block_header.clone(), view_number: genesis_view, + epoch: genesis_epoch, justify_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), @@ -156,8 +157,6 @@ impl TestView { view_change_evidence: None, next_drb_result: None, }, - // #3967 REVIEW NOTE: Is this right? - with_epoch: V::Base::VERSION >= V::Epochs::VERSION, }; let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); @@ -392,14 +391,13 @@ impl TestView { proposal: QuorumProposal2:: { block_header: block_header.clone(), view_number: next_view, + epoch: old_epoch, justify_qc: quorum_certificate.clone(), next_epoch_justify_qc: None, upgrade_certificate: upgrade_certificate.clone(), view_change_evidence, next_drb_result: None, }, - // #3967 REVIEW NOTE: Is this right? - with_epoch: self.upgrade_lock.epochs_enabled(next_view).await, }; let mut leaf = Leaf2::from_quorum_proposal(&proposal); diff --git a/testing/tests/tests_1/network_task.rs b/testing/tests/tests_1/network_task.rs index 2391fedc91..f634c60113 100644 --- a/testing/tests/tests_1/network_task.rs +++ b/testing/tests/tests_1/network_task.rs @@ -72,6 +72,7 @@ async fn test_network_task() { storage, consensus, transmit_tasks: BTreeMap::new(), + epoch_height: 0u64, }; let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); @@ -244,6 +245,7 @@ async fn test_network_storage_fail() { storage, consensus, transmit_tasks: BTreeMap::new(), + epoch_height: 0u64, }; let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); diff --git a/testing/tests/tests_6/test_epochs.rs b/testing/tests/tests_6/test_epochs.rs index 1faaa18d35..191ac94e6c 100644 --- a/testing/tests/tests_6/test_epochs.rs +++ b/testing/tests/tests_6/test_epochs.rs @@ -272,7 +272,7 @@ cross_tests!( }; // 2 nodes fail triggering view sync, expect no other timeouts - metadata.overall_safety_properties.num_failed_views = 6; + metadata.overall_safety_properties.num_failed_views = 5; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 20; metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ @@ -281,7 +281,6 @@ cross_tests!( (ViewNumber::new(17), false), (ViewNumber::new(23), false), (ViewNumber::new(29), false), - (ViewNumber::new(35), false), ]); metadata @@ -337,10 +336,6 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); - metadata.test_config.epoch_height = 10; - // The first 14 (i.e., 20 - f) nodes are in the DA committee and we may shutdown the - // remaining 6 (i.e., f) nodes. We could remove this restriction after fixing the - // following issue. let dead_nodes = vec![ ChangeNode { idx: 17, @@ -416,7 +411,7 @@ cross_tests!( cross_tests!( TestName: test_all_restart_epochs, Impls: [CombinedImpl, PushCdnImpl], - Types: [TestTypes, TestTypesRandomizedLeader], + Types: [TestTypes, TestTypesRandomizedLeader, TestTwoStakeTablesTypes], Versions: [EpochsTestVersions], Ignore: false, Metadata: { @@ -459,3 +454,106 @@ cross_tests!( metadata }, ); + +cross_tests!( + TestName: test_all_restart_one_da_with_epochs, + Impls: [CombinedImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let timing_data = TimingData { + next_view_timeout: 2000, + ..Default::default() + }; + let mut metadata = TestDescription::default().set_num_nodes(20,2); + + let mut catchup_nodes = vec![]; + for i in 0..20 { + catchup_nodes.push(ChangeNode { + idx: i, + updown: NodeAction::RestartDown(0), + }) + } + + metadata.timing_data = timing_data; + + metadata.spinning_properties = SpinningTaskDescription { + // Restart all the nodes in view 10 + node_changes: vec![(10, catchup_nodes)], + }; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep committing rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 15, + ..Default::default() + }; + + metadata + }, +); + +cross_tests!( + TestName: test_staggered_restart_with_epochs, + Impls: [CombinedImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let mut metadata = TestDescription::default().set_num_nodes(20,4); + + let mut down_da_nodes = vec![]; + for i in 2..4 { + down_da_nodes.push(ChangeNode { + idx: i, + updown: NodeAction::RestartDown(20), + }); + } + + let mut down_regular_nodes = vec![]; + for i in 4..20 { + down_regular_nodes.push(ChangeNode { + idx: i, + updown: NodeAction::RestartDown(0), + }); + } + // restart the last da so it gets the new libp2p routing table + for i in 0..2 { + down_regular_nodes.push(ChangeNode { + idx: i, + updown: NodeAction::RestartDown(0), + }); + } + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(10, down_da_nodes), (30, down_regular_nodes)], + }; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 50); + + // Give the test some extra time because we are purposely timing out views + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(240), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep committing rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 30, + ..Default::default() + }; + + metadata + }, +); diff --git a/types/src/data.rs b/types/src/data.rs index 33c44567c6..47fae4a823 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -30,7 +30,7 @@ use vid_disperse::{ADVZDisperse, ADVZDisperseShare, VidDisperseShare2}; use crate::{ drb::DrbResult, - impl_has_epoch, + impl_has_epoch, impl_has_none_epoch, message::{convert_proposal, Proposal, UpgradeLock}, simple_certificate::{ NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, @@ -593,6 +593,9 @@ pub struct QuorumProposal2 { /// view number for the proposal pub view_number: TYPES::View, + /// The epoch number corresponding to the block number. Can be `None` for pre-epoch version. + pub epoch: Option, + /// certificate that the proposal is chaining from pub justify_qc: QuorumCertificate2, @@ -619,9 +622,28 @@ pub struct QuorumProposal2 { pub struct QuorumProposalWrapper { /// The wrapped proposal pub proposal: QuorumProposal2, +} - /// Indicates whether or not epochs were enabled for this proposal. This is checked when building a QuorumProposalWrapper - pub with_epoch: bool, +impl QuorumProposal2 { + /// Validates whether the epoch is consistent with the version and the block number + /// # Errors + /// Returns an error if the epoch is inconsistent with the version or the block number + pub async fn validate_epoch( + &self, + upgrade_lock: &UpgradeLock, + epoch_height: u64, + ) -> Result<()> { + let calculated_epoch = option_epoch_from_block_number::( + upgrade_lock.epochs_enabled(self.view_number()).await, + self.block_header.block_number(), + epoch_height, + ); + ensure!( + calculated_epoch == self.epoch(), + "Quorum proposal invalid: inconsistent epoch." + ); + Ok(()) + } } impl QuorumProposalWrapper { @@ -659,13 +681,25 @@ impl QuorumProposalWrapper { pub fn next_drb_result(&self) -> &Option { &self.proposal.next_drb_result } + + /// Validates whether the epoch is consistent with the version and the block number + /// # Errors + /// Returns an error if the epoch is inconsistent with the version or the block number + pub async fn validate_epoch( + &self, + upgrade_lock: &UpgradeLock, + epoch_height: u64, + ) -> Result<()> { + self.proposal + .validate_epoch(upgrade_lock, epoch_height) + .await + } } impl From> for QuorumProposalWrapper { fn from(quorum_proposal: QuorumProposal) -> Self { Self { proposal: quorum_proposal.into(), - with_epoch: false, } } } @@ -674,7 +708,6 @@ impl From> for QuorumProposalWrapper) -> Self { Self { proposal: quorum_proposal2, - with_epoch: true, } } } @@ -696,6 +729,7 @@ impl From> for QuorumProposal2 { Self { block_header: quorum_proposal.block_header, view_number: quorum_proposal.view_number, + epoch: None, justify_qc: quorum_proposal.justify_qc.to_qc2(), next_epoch_justify_qc: None, upgrade_certificate: quorum_proposal.upgrade_certificate, @@ -776,7 +810,22 @@ impl HasViewNumber for UpgradeProposal { } } -impl_has_epoch!(DaProposal2); +impl_has_epoch!(QuorumProposal2, DaProposal2); + +impl_has_none_epoch!( + QuorumProposal, + DaProposal, + UpgradeProposal, + ADVZDisperseShare +); + +impl HasEpoch for QuorumProposalWrapper { + /// Return an underlying proposal's epoch + #[allow(clippy::panic)] + fn epoch(&self) -> Option { + self.proposal.epoch() + } +} /// The error type for block and its transactions. #[derive(Error, Debug, Serialize, Deserialize)] @@ -1525,6 +1574,7 @@ impl Leaf2 { proposal: QuorumProposal2 { view_number, + epoch, justify_qc, next_epoch_justify_qc, block_header, @@ -1532,7 +1582,6 @@ impl Leaf2 { view_change_evidence, next_drb_result, }, - with_epoch, } = quorum_proposal; Self { @@ -1545,7 +1594,7 @@ impl Leaf2 { block_payload: None, view_change_evidence: view_change_evidence.clone(), next_drb_result: *next_drb_result, - with_epoch: *with_epoch, + with_epoch: epoch.is_some(), } } } diff --git a/types/src/message.rs b/types/src/message.rs index 7dca495314..92998ca974 100644 --- a/types/src/message.rs +++ b/types/src/message.rs @@ -37,7 +37,7 @@ use crate::{ ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate, ViewSyncPreCommitCertificate2, }, simple_vote::{ - DaVote, DaVote2, QuorumVote, QuorumVote2, TimeoutVote, TimeoutVote2, UpgradeVote, + DaVote, DaVote2, HasEpoch, QuorumVote, QuorumVote2, TimeoutVote, TimeoutVote2, UpgradeVote, ViewSyncCommitVote, ViewSyncCommitVote2, ViewSyncFinalizeVote, ViewSyncFinalizeVote2, ViewSyncPreCommitVote, ViewSyncPreCommitVote2, }, @@ -168,6 +168,22 @@ impl ViewMessage for MessageKind { } } +impl HasEpoch for MessageKind { + fn epoch(&self) -> Option { + match &self { + MessageKind::Consensus(message) => message.epoch_number(), + MessageKind::Data( + DataMessage::SubmitTransaction(_, _) | DataMessage::RequestData(_), + ) + | MessageKind::External(_) => None, + MessageKind::Data(DataMessage::DataResponse(msg)) => match msg { + ResponseMessage::Found(m) => m.epoch_number(), + ResponseMessage::NotFound | ResponseMessage::Denied => None, + }, + } + } +} + #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = "", serialize = ""))] /// Messages related to both validating and sequencing consensus. @@ -376,6 +392,76 @@ impl SequencingMessage { } } } + + /// Get the epoch number this message relates to, if applicable + fn epoch_number(&self) -> Option { + match &self { + SequencingMessage::General(general_message) => { + match general_message { + GeneralConsensusMessage::Proposal(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.epoch() + } + GeneralConsensusMessage::Proposal2(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.epoch() + } + GeneralConsensusMessage::ProposalRequested(_, _) => None, + GeneralConsensusMessage::ProposalResponse(proposal) => proposal.data.epoch(), + GeneralConsensusMessage::ProposalResponse2(proposal) => proposal.data.epoch(), + GeneralConsensusMessage::Vote(vote_message) => vote_message.epoch(), + GeneralConsensusMessage::Vote2(vote_message) => vote_message.epoch(), + GeneralConsensusMessage::TimeoutVote(message) => message.epoch(), + GeneralConsensusMessage::ViewSyncPreCommitVote(message) => message.epoch(), + GeneralConsensusMessage::ViewSyncCommitVote(message) => message.epoch(), + GeneralConsensusMessage::ViewSyncFinalizeVote(message) => message.epoch(), + GeneralConsensusMessage::ViewSyncPreCommitCertificate(message) => { + message.epoch() + } + GeneralConsensusMessage::ViewSyncCommitCertificate(message) => message.epoch(), + GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { + message.epoch() + } + GeneralConsensusMessage::TimeoutVote2(message) => message.epoch(), + GeneralConsensusMessage::ViewSyncPreCommitVote2(message) => message.epoch(), + GeneralConsensusMessage::ViewSyncCommitVote2(message) => message.epoch(), + GeneralConsensusMessage::ViewSyncFinalizeVote2(message) => message.epoch(), + GeneralConsensusMessage::ViewSyncPreCommitCertificate2(message) => { + message.epoch() + } + GeneralConsensusMessage::ViewSyncCommitCertificate2(message) => message.epoch(), + GeneralConsensusMessage::ViewSyncFinalizeCertificate2(message) => { + message.epoch() + } + GeneralConsensusMessage::UpgradeProposal(message) => message.data.epoch(), + GeneralConsensusMessage::UpgradeVote(message) => message.epoch(), + GeneralConsensusMessage::HighQc(qc) => qc.epoch(), + } + } + SequencingMessage::Da(da_message) => { + match da_message { + DaConsensusMessage::DaProposal(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.epoch() + } + DaConsensusMessage::DaVote(vote_message) => vote_message.epoch(), + DaConsensusMessage::DaCertificate(cert) => cert.epoch(), + DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.epoch(), + DaConsensusMessage::VidDisperseMsg2(disperse) => disperse.data.epoch(), + DaConsensusMessage::DaProposal2(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.epoch() + } + DaConsensusMessage::DaVote2(vote_message) => vote_message.epoch(), + DaConsensusMessage::DaCertificate2(cert) => cert.epoch(), + } + } + } + } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] @@ -397,7 +483,10 @@ pub enum DataMessage { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] #[serde(bound(deserialize = ""))] /// Prepare qc from the leader -pub struct Proposal + DeserializeOwned> { +pub struct Proposal< + TYPES: NodeType, + PROPOSAL: HasViewNumber + HasEpoch + DeserializeOwned, +> { // NOTE: optimization could include view number to help look up parent leaf // could even do 16 bit numbers if we want /// The data being proposed. @@ -414,8 +503,8 @@ pub fn convert_proposal( ) -> Proposal where TYPES: NodeType, - PROPOSAL: HasViewNumber + DeserializeOwned, - PROPOSAL2: HasViewNumber + DeserializeOwned + From, + PROPOSAL: HasViewNumber + HasEpoch + DeserializeOwned, + PROPOSAL2: HasViewNumber + HasEpoch + DeserializeOwned + From, { Proposal { data: proposal.data.into(), @@ -497,7 +586,7 @@ where ) -> Result<()> { let view_number = self.data.proposal.view_number(); let proposal_epoch = option_epoch_from_block_number::( - self.data.with_epoch, + self.data.proposal.epoch().is_some(), self.data.block_header().block_number(), epoch_height, ); diff --git a/types/src/simple_certificate.rs b/types/src/simple_certificate.rs index 0c98f84ba1..4a1ba867ca 100644 --- a/types/src/simple_certificate.rs +++ b/types/src/simple_certificate.rs @@ -24,8 +24,8 @@ use crate::{ data::serialize_signature2, message::UpgradeLock, simple_vote::{ - DaData, DaData2, NextEpochQuorumData2, QuorumData, QuorumData2, QuorumMarker, TimeoutData, - TimeoutData2, UpgradeProposalData, VersionedVoteData, ViewSyncCommitData, + DaData, DaData2, HasEpoch, NextEpochQuorumData2, QuorumData, QuorumData2, QuorumMarker, + TimeoutData, TimeoutData2, UpgradeProposalData, VersionedVoteData, ViewSyncCommitData, ViewSyncCommitData2, ViewSyncFinalizeData, ViewSyncFinalizeData2, ViewSyncPreCommitData, ViewSyncPreCommitData2, Voteable, }, @@ -420,6 +420,18 @@ impl + 'static, THRESHOLD: Threshold< self.view_number } } + +impl< + TYPES: NodeType, + VOTEABLE: Voteable + HasEpoch + 'static, + THRESHOLD: Threshold, + > HasEpoch for SimpleCertificate +{ + fn epoch(&self) -> Option { + self.data.epoch() + } +} + impl Display for QuorumCertificate { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "view: {:?}", self.view_number) diff --git a/types/src/simple_vote.rs b/types/src/simple_vote.rs index 1dae6fc72e..d17dd3cc10 100644 --- a/types/src/simple_vote.rs +++ b/types/src/simple_vote.rs @@ -565,7 +565,32 @@ impl_has_epoch!( TimeoutData2, ViewSyncPreCommitData2, ViewSyncCommitData2, - ViewSyncFinalizeData2 + ViewSyncFinalizeData2, + UpgradeData2 +); + +/// Helper macro for trivial implementation of the `HasEpoch` trait for types that have no epoch +#[macro_export] +macro_rules! impl_has_none_epoch { + ($($t:ty),*) => { + $( + impl HasEpoch for $t { + fn epoch(&self) -> Option { + None + } + } + )* + }; +} + +impl_has_none_epoch!( + QuorumData, + DaData, + TimeoutData, + ViewSyncPreCommitData, + ViewSyncCommitData, + ViewSyncFinalizeData, + UpgradeProposalData ); impl + HasEpoch> HasEpoch diff --git a/types/src/traits/storage.rs b/types/src/traits/storage.rs index a2a6658c00..de1451e566 100644 --- a/types/src/traits/storage.rs +++ b/types/src/traits/storage.rs @@ -82,7 +82,12 @@ pub trait Storage: Send + Sync + Clone { .await } /// Record a HotShotAction taken. - async fn record_action(&self, view: TYPES::View, action: HotShotAction) -> Result<()>; + async fn record_action( + &self, + view: TYPES::View, + epoch: Option, + action: HotShotAction, + ) -> Result<()>; /// Update the current high QC in storage. async fn update_high_qc(&self, high_qc: QuorumCertificate) -> Result<()>; /// Update the current high QC in storage. From e09af46883c467265c469a744de4ffdc503cccbc Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Mon, 3 Feb 2025 11:52:59 -0800 Subject: [PATCH 1380/1393] add tests for option_epoch_from_block_number and friends (#4073) --- example-types/src/node_types.rs | 45 ++++++++++++++++++++++++++++++++- types/src/utils.rs | 23 +++++++++++++++++ 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 40b79e0b46..5b2ffdf531 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -336,10 +336,13 @@ mod tests { message::UpgradeLock, simple_vote::{HasEpoch, VersionedVoteData}, traits::node_implementation::ConsensusTime, + utils::{genesis_epoch_from_version, option_epoch_from_block_number}, }; use serde::{Deserialize, Serialize}; - use crate::node_types::{MarketplaceTestVersions, NodeType, TestTypes}; + use crate::node_types::{ + EpochsTestVersions, MarketplaceTestVersions, NodeType, TestTypes, TestVersions, + }; #[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Hash, Eq)] /// Dummy data used for test struct TestData { @@ -393,4 +396,44 @@ mod tests { "left: {versioned_data_commitment_0:?}, right: {versioned_data_commitment_1:?}" ); } + + #[test] + fn test_option_epoch_from_block_number() { + // block 0 is always epoch 0 + let epoch = option_epoch_from_block_number::(true, 0, 10); + assert_eq!(Some(::Epoch::new(0)), epoch); + + let epoch = option_epoch_from_block_number::(true, 1, 10); + assert_eq!(Some(::Epoch::new(1)), epoch); + + let epoch = option_epoch_from_block_number::(true, 10, 10); + assert_eq!(Some(::Epoch::new(1)), epoch); + + let epoch = option_epoch_from_block_number::(true, 11, 10); + assert_eq!(Some(::Epoch::new(2)), epoch); + + let epoch = option_epoch_from_block_number::(true, 20, 10); + assert_eq!(Some(::Epoch::new(2)), epoch); + + let epoch = option_epoch_from_block_number::(true, 21, 10); + assert_eq!(Some(::Epoch::new(3)), epoch); + + let epoch = option_epoch_from_block_number::(true, 21, 0); + assert_eq!(None, epoch); + + let epoch = option_epoch_from_block_number::(false, 21, 10); + assert_eq!(None, epoch); + + let epoch = option_epoch_from_block_number::(false, 21, 0); + assert_eq!(None, epoch); + } + + #[test] + fn test_genesis_epoch_from_version() { + let epoch = genesis_epoch_from_version::(); + assert_eq!(None, epoch); + + let epoch = genesis_epoch_from_version::(); + assert_eq!(Some(::Epoch::new(0)), epoch); + } } diff --git a/types/src/utils.rs b/types/src/utils.rs index c22d4b43df..6d7dcd170e 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -361,6 +361,29 @@ mod test { let epoch = epoch_from_block_number(21, 10); assert_eq!(3, epoch); + + let epoch = epoch_from_block_number(21, 0); + assert_eq!(0, epoch); + } + + #[test] + fn test_is_last_block_in_epoch() { + assert!(!is_last_block_in_epoch(8, 10)); + assert!(!is_last_block_in_epoch(9, 10)); + assert!(is_last_block_in_epoch(10, 10)); + assert!(!is_last_block_in_epoch(11, 10)); + + assert!(!is_last_block_in_epoch(10, 0)); + } + + #[test] + fn test_is_epoch_root() { + assert!(is_epoch_root(8, 10)); + assert!(!is_epoch_root(9, 10)); + assert!(!is_epoch_root(10, 10)); + assert!(!is_epoch_root(11, 10)); + + assert!(!is_last_block_in_epoch(10, 0)); } #[test] From 8c7ec95ce66b2d8c55166b963e27a53f38533f10 Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Tue, 4 Feb 2025 16:40:20 +0100 Subject: [PATCH 1381/1393] Adapt VID request / response to epochs (#4106) * Add crash CDN test with epochs * Initial commit * Sent to self even when network down * Check if VID share exists * Request VID share only if we need it * Cleanup --- .../src/traits/networking/push_cdn_network.rs | 12 ++--- task-impls/src/da.rs | 15 ++++++ task-impls/src/request.rs | 51 +++++++++++-------- task-impls/src/response.rs | 21 ++++++-- testing/tests/tests_6/test_epochs.rs | 49 +++++++++++++++++- types/src/consensus.rs | 3 +- 6 files changed, 117 insertions(+), 34 deletions(-) diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 3601c38f2d..02ac272599 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -536,18 +536,18 @@ impl ConnectedNetwork for PushCdnNetwork { /// - If we fail to serialize the message /// - If we fail to send the direct message async fn direct_message(&self, message: Vec, recipient: K) -> Result<(), NetworkError> { - // If we're paused, don't send the message - #[cfg(feature = "hotshot-testing")] - if self.is_paused.load(Ordering::Relaxed) { - return Ok(()); - } - // If the message is to ourselves, just add it to the internal queue if recipient == self.public_key { self.internal_queue.lock().push_back(message); return Ok(()); } + // If we're paused, don't send the message + #[cfg(feature = "hotshot-testing")] + if self.is_paused.load(Ordering::Relaxed) { + return Ok(()); + } + // Send the message if let Err(e) = self .client diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 8c759605a5..09320e015d 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -10,6 +10,7 @@ use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; +use hotshot_types::simple_vote::HasEpoch; use hotshot_types::{ consensus::{Consensus, OuterConsensus}, data::{DaProposal2, PackedBundle}, @@ -236,10 +237,24 @@ impl, V: Versions> DaTaskState( OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), view_number, + target_epoch, membership, &pk, &upgrade_lock, diff --git a/task-impls/src/request.rs b/task-impls/src/request.rs index dc2bc15f2c..9f6b144194 100644 --- a/task-impls/src/request.rs +++ b/task-impls/src/request.rs @@ -30,7 +30,7 @@ use hotshot_types::{ node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, }, - utils::option_epoch_from_block_number, + utils::is_last_block_in_epoch, vote::HasViewNumber, }; use rand::{seq::SliceRandom, thread_rng}; @@ -113,21 +113,32 @@ impl> TaskState for NetworkRequest match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _) => { let prop_view = proposal.data.view_number(); - let prop_epoch = option_epoch_from_block_number::( - proposal.data.epoch().is_some(), - proposal.data.block_header().block_number(), - self.epoch_height, - ); + let prop_epoch = proposal.data.epoch(); + let next_epoch = prop_epoch.map(|epoch| epoch + 1); + + // Request VID share only if: + // 1. we are part of the current epoch or + // 2. we are part of the next epoch and this is a proposal for the last block. + let membership_reader = self.membership.read().await; + if !membership_reader.has_stake(&self.public_key, prop_epoch) + && (!membership_reader.has_stake(&self.public_key, next_epoch) + || !is_last_block_in_epoch( + proposal.data.block_header().block_number(), + self.epoch_height, + )) + { + return Ok(()); + } + drop(membership_reader); + let consensus_reader = self.consensus.read().await; + let maybe_vid_share = consensus_reader + .vid_shares() + .get(&prop_view) + .and_then(|shares| shares.get(&self.public_key)); // If we already have the VID shares for the next view, do nothing. - if prop_view >= self.view - && !self - .consensus - .read() - .await - .vid_shares() - .contains_key(&prop_view) - { + if prop_view >= self.view && maybe_vid_share.is_none() { + drop(consensus_reader); self.spawn_requests(prop_view, prop_epoch, sender, receiver) .await; } @@ -361,15 +372,15 @@ impl> NetworkRequestState bool { let consensus_reader = consensus.read().await; + let maybe_vid_share = consensus_reader + .vid_shares() + .get(view) + .and_then(|shares| shares.get(public_key)); let cancel = shutdown_flag.load(Ordering::Relaxed) - || consensus_reader.vid_shares().contains_key(view) + || maybe_vid_share.is_some() || consensus_reader.cur_view() > *view; if cancel { - if let Some(Some(vid_share)) = consensus_reader - .vid_shares() - .get(view) - .map(|shares| shares.get(public_key).cloned()) - { + if let Some(vid_share) = maybe_vid_share { broadcast_event( Arc::new(HotShotEvent::VidShareRecv( public_key.clone(), diff --git a/task-impls/src/response.rs b/task-impls/src/response.rs index 35be5523f4..f0000a7c14 100644 --- a/task-impls/src/response.rs +++ b/task-impls/src/response.rs @@ -84,14 +84,22 @@ impl NetworkResponseState { match event.as_ref() { HotShotEvent::VidRequestRecv(request, sender) => { let cur_epoch = self.consensus.read().await.cur_epoch(); + let next_epoch = cur_epoch.map(|epoch| epoch + 1); + let target_epoch = if self.valid_sender(sender, cur_epoch).await { + cur_epoch + } else if self.valid_sender(sender, next_epoch).await { + next_epoch + } else { + // The sender neither belongs to the current nor to the next epoch. + continue; + }; // Verify request is valid - if !self.valid_sender(sender, cur_epoch).await - || !valid_signature::(request, sender) - { + if !valid_signature::(request, sender) { continue; } - if let Some(proposal) = - self.get_or_calc_vid_share(request.view, sender).await + if let Some(proposal) = self + .get_or_calc_vid_share(request.view, target_epoch, sender) + .await { broadcast_event( HotShotEvent::VidResponseSend( @@ -151,6 +159,7 @@ impl NetworkResponseState { async fn get_or_calc_vid_share( &self, view: TYPES::View, + target_epoch: Option, key: &TYPES::SignatureKey, ) -> Option>> { let consensus_reader = self.consensus.read().await; @@ -165,6 +174,7 @@ impl NetworkResponseState { if Consensus::calculate_and_update_vid::( OuterConsensus::new(Arc::clone(&self.consensus)), view, + target_epoch, Arc::clone(&self.membership), &self.private_key, &self.upgrade_lock, @@ -177,6 +187,7 @@ impl NetworkResponseState { Consensus::calculate_and_update_vid::( OuterConsensus::new(Arc::clone(&self.consensus)), view, + target_epoch, Arc::clone(&self.membership), &self.private_key, &self.upgrade_lock, diff --git a/testing/tests/tests_6/test_epochs.rs b/testing/tests/tests_6/test_epochs.rs index 191ac94e6c..ae2a6fdd3a 100644 --- a/testing/tests/tests_6/test_epochs.rs +++ b/testing/tests/tests_6/test_epochs.rs @@ -4,8 +4,6 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashMap, time::Duration}; - use hotshot_example_types::{ node_types::{ CombinedImpl, EpochUpgradeTestVersions, EpochsTestVersions, Libp2pImpl, MemoryImpl, @@ -24,6 +22,7 @@ use hotshot_testing::{ view_sync_task::ViewSyncTaskDescription, }; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use std::{collections::HashMap, time::Duration}; cross_tests!( TestName: test_success_with_epochs, @@ -557,3 +556,49 @@ cross_tests!( metadata }, ); + +// A run where the CDN crashes part-way through, epochs enabled. +cross_tests!( + TestName: test_combined_network_cdn_crash_with_epochs, + Impls: [CombinedImpl], + Types: [TestTypes, TestTwoStakeTablesTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + let timing_data = TimingData { + next_view_timeout: 10_000, + ..Default::default() + }; + + let overall_safety_properties = OverallSafetyPropertiesDescription { + num_failed_views: 0, + num_successful_views: 35, + ..Default::default() + }; + + let completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(220), + }, + ); + + let mut metadata = TestDescription::default_multiple_rounds(); + metadata.timing_data = timing_data; + metadata.overall_safety_properties = overall_safety_properties; + metadata.completion_task_description = completion_task_description; + + let mut all_nodes = vec![]; + for node in 0..metadata.test_config.num_nodes_with_stake.into() { + all_nodes.push(ChangeNode { + idx: node, + updown: NodeAction::NetworkDown, + }); + } + + metadata.spinning_properties = SpinningTaskDescription { + node_changes: vec![(5, all_nodes)], + }; + + metadata + }, +); diff --git a/types/src/consensus.rs b/types/src/consensus.rs index c6cc918444..a5b75a1b3c 100644 --- a/types/src/consensus.rs +++ b/types/src/consensus.rs @@ -955,6 +955,7 @@ impl Consensus { pub async fn calculate_and_update_vid( consensus: OuterConsensus, view: ::View, + target_epoch: Option<::Epoch>, membership: Arc>, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, @@ -972,7 +973,7 @@ impl Consensus { payload.as_ref(), &membership, view, - epoch, + target_epoch, epoch, upgrade_lock, ) From 2fcb2a2834a7ab7d3b3d0cc5bf9ef0223d8f8849 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 4 Feb 2025 11:11:46 -0500 Subject: [PATCH 1382/1393] Lower CI test logs (#4092) --- hotshot/src/tasks/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index e7e4036fc0..1d50c16bd7 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -164,7 +164,7 @@ pub fn add_network_message_task< message } Err(e) => { - tracing::error!("Failed to receive message: {:?}", e); + tracing::trace!("Failed to receive message: {:?}", e); continue; } }; From 6505cbaea1e48881851de24306346c6a4c8d4b45 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Tue, 4 Feb 2025 15:20:12 -0800 Subject: [PATCH 1383/1393] #4039 remove stake_table check in libp2p, rename stake_table to membership (#4110) --- .../src/traits/networking/libp2p_network.rs | 2 +- libp2p-networking/src/network/node.rs | 4 +- libp2p-networking/src/network/node/config.rs | 4 +- libp2p-networking/src/network/transport.rs | 81 +++++-------------- task-impls/src/da.rs | 3 +- task-impls/src/quorum_vote/handlers.rs | 22 ++--- testing/tests/tests_6/test_epochs.rs | 3 +- 7 files changed, 41 insertions(+), 78 deletions(-) diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index d288c3e360..c41c710a1d 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -426,7 +426,7 @@ impl Libp2pNetwork { // Set the auth message and stake table config_builder - .stake_table(Some(quorum_membership)) + .membership(Some(quorum_membership)) .auth_message(Some(auth_message)); // The replication factor is the minimum of [the default and 2/3 the number of nodes] diff --git a/libp2p-networking/src/network/node.rs b/libp2p-networking/src/network/node.rs index 103af0e764..28d009f846 100644 --- a/libp2p-networking/src/network/node.rs +++ b/libp2p-networking/src/network/node.rs @@ -180,10 +180,10 @@ impl NetworkNode { // Get the `PeerId` from the `KeyPair` let peer_id = PeerId::from(keypair.public()); - // Generate the transport from the keypair, stake table, and auth message + // Generate the transport from the keypair, membership, and auth message let transport: BoxedTransport = gen_transport::( keypair.clone(), - config.stake_table.clone(), + config.membership.clone(), config.auth_message.clone(), ) .await?; diff --git a/libp2p-networking/src/network/node/config.rs b/libp2p-networking/src/network/node/config.rs index 3958d17f6d..183e98d34b 100644 --- a/libp2p-networking/src/network/node/config.rs +++ b/libp2p-networking/src/network/node/config.rs @@ -54,7 +54,7 @@ pub struct NetworkNodeConfig { /// The stake table. Used for authenticating other nodes. If not supplied /// we will not check other nodes against the stake table #[builder(default)] - pub stake_table: Option>>, + pub membership: Option>>, /// The path to the file to save the DHT to #[builder(default)] @@ -81,7 +81,7 @@ impl Clone for NetworkNodeConfig { to_connect_addrs: self.to_connect_addrs.clone(), republication_interval: self.republication_interval, ttl: self.ttl, - stake_table: self.stake_table.as_ref().map(Arc::clone), + membership: self.membership.as_ref().map(Arc::clone), dht_file_path: self.dht_file_path.clone(), auth_message: self.auth_message.clone(), dht_timeout: self.dht_timeout, diff --git a/libp2p-networking/src/network/transport.rs b/libp2p-networking/src/network/transport.rs index ca27124ea4..01e94e6b90 100644 --- a/libp2p-networking/src/network/transport.rs +++ b/libp2p-networking/src/network/transport.rs @@ -9,9 +9,7 @@ use std::{ use anyhow::{ensure, Context, Result as AnyhowResult}; use async_lock::RwLock; use futures::{future::poll_fn, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; -use hotshot_types::traits::{ - election::Membership, node_implementation::NodeType, signature_key::SignatureKey, -}; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use libp2p::{ core::{ muxing::StreamMuxerExt, @@ -45,7 +43,7 @@ pub struct StakeTableAuthentication>>>, + pub membership: Arc>>>, /// A pre-signed message that we send to the remote peer for authentication pub auth_message: Arc>>, @@ -63,12 +61,12 @@ impl StakeTableAuthentica /// and authenticates connections against the stake table. pub fn new( inner: T, - stake_table: Option>>, + membership: Option>>, auth_message: Option>, ) -> Self { Self { inner, - stake_table: Arc::from(stake_table), + membership: Arc::from(membership), auth_message: Arc::from(auth_message), pd: std::marker::PhantomData, } @@ -108,11 +106,11 @@ impl StakeTableAuthentica /// - The signature is invalid pub async fn verify_peer_authentication( stream: &mut R, - stake_table: Arc>>>, + membership: Arc>>>, required_peer_id: &PeerId, ) -> AnyhowResult<()> { - // If we have a stake table, check if the remote peer is in it - if let Some(stake_table) = stake_table.as_ref() { + // If we have a membership, read message and validate + if membership.is_some() { // Read the length-delimited message from the remote peer let message = read_length_delimited(stream, MAX_AUTH_MESSAGE_SIZE).await?; @@ -121,7 +119,7 @@ impl StakeTableAuthentica .with_context(|| "Failed to deserialize auth message")?; // Verify the signature on the public keys - let public_key = auth_message + auth_message .validate() .with_context(|| "Failed to verify authentication message")?; @@ -133,11 +131,6 @@ impl StakeTableAuthentica if peer_id != *required_peer_id { return Err(anyhow::anyhow!("Peer ID mismatch")); } - - // Check if the public key is in the stake table - if !stake_table.read().await.has_stake(&public_key, None) { - return Err(anyhow::anyhow!("Peer not in stake table")); - } } Ok(()) @@ -150,7 +143,7 @@ impl StakeTableAuthentica fn gen_handshake> + Send + 'static>( original_future: F, outgoing: bool, - stake_table: Arc>>>, + membership: Arc>>>, auth_message: Arc>>, ) -> UpgradeFuture where @@ -186,7 +179,7 @@ impl StakeTableAuthentica // Verify the remote peer's authentication Self::verify_peer_authentication( &mut substream, - stake_table, + membership, stream.as_peer_id(), ) .await @@ -198,7 +191,7 @@ impl StakeTableAuthentica // If it is incoming, verify the remote peer's authentication first Self::verify_peer_authentication( &mut substream, - stake_table, + membership, stream.as_peer_id(), ) .await @@ -324,11 +317,11 @@ where // Clone the necessary fields let auth_message = Arc::clone(&self.auth_message); - let stake_table = Arc::clone(&self.stake_table); + let membership = Arc::clone(&self.membership); // If the dial was successful, perform the authentication handshake on top match res { - Ok(dial) => Ok(Self::gen_handshake(dial, true, stake_table, auth_message)), + Ok(dial) => Ok(Self::gen_handshake(dial, true, membership, auth_message)), Err(err) => Err(err), } } @@ -352,11 +345,11 @@ where } => { // Clone the necessary fields let auth_message = Arc::clone(&self.auth_message); - let stake_table = Arc::clone(&self.stake_table); + let membership = Arc::clone(&self.membership); // Generate the handshake upgrade future (inbound) let auth_upgrade = - Self::gen_handshake(upgrade, false, stake_table, auth_message); + Self::gen_handshake(upgrade, false, membership, auth_message); // Return the new event TransportEvent::Incoming { @@ -498,7 +491,9 @@ mod test { use hotshot_example_types::node_types::TestTypes; use hotshot_types::{ - light_client::StateVerKey, signature_key::BLSPubKey, traits::signature_key::SignatureKey, + light_client::StateVerKey, + signature_key::BLSPubKey, + traits::{election::Membership, signature_key::SignatureKey}, PeerConfig, }; use libp2p::{core::transport::dummy::DummyTransport, quic::Connection}; @@ -617,13 +612,13 @@ mod test { stake_table_entry: keypair.0.stake_table_entry(1), state_ver_key: StateVerKey::default(), }; - let stake_table = + let membership = ::Membership::new(vec![peer_config.clone()], vec![peer_config]); // Verify the authentication message let result = MockStakeTableAuth::verify_peer_authentication( &mut stream, - Arc::new(Some(Arc::new(RwLock::new(stake_table)))), + Arc::new(Some(Arc::new(RwLock::new(membership)))), &peer_id, ) .await; @@ -634,38 +629,6 @@ mod test { ); } - #[tokio::test(flavor = "multi_thread")] - async fn key_not_in_stake_table() { - // Create a new identity - let (_, peer_id, auth_message) = new_identity!(); - - // Create a stream and write the message to it - let mut stream = cursor_from!(auth_message); - - // Create an empty stake table - let stake_table = Arc::new(RwLock::new(::Membership::new( - vec![], - vec![], - ))); - - // Verify the authentication message - let result = MockStakeTableAuth::verify_peer_authentication( - &mut stream, - Arc::new(Some(stake_table)), - &peer_id, - ) - .await; - - // Make sure it errored for the right reason - assert!( - result - .expect_err("Should have failed authentication but did not") - .to_string() - .contains("Peer not in stake table"), - "Did not fail with the correct error" - ); - } - #[tokio::test(flavor = "multi_thread")] async fn peer_id_mismatch() { // Create a new identity and authentication message @@ -682,7 +645,7 @@ mod test { stake_table_entry: keypair.0.stake_table_entry(1), state_ver_key: StateVerKey::default(), }; - let stake_table = Arc::new(RwLock::new(::Membership::new( + let membership = Arc::new(RwLock::new(::Membership::new( vec![peer_config.clone()], vec![peer_config], ))); @@ -690,7 +653,7 @@ mod test { // Check against the malicious peer ID let result = MockStakeTableAuth::verify_peer_authentication( &mut stream, - Arc::new(Some(stake_table)), + Arc::new(Some(membership)), &malicious_peer_id, ) .await; diff --git a/task-impls/src/da.rs b/task-impls/src/da.rs index 09320e015d..c165abfb72 100644 --- a/task-impls/src/da.rs +++ b/task-impls/src/da.rs @@ -10,14 +10,13 @@ use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; -use hotshot_types::simple_vote::HasEpoch; use hotshot_types::{ consensus::{Consensus, OuterConsensus}, data::{DaProposal2, PackedBundle}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_certificate::DaCertificate2, - simple_vote::{DaData2, DaVote2}, + simple_vote::{DaData2, DaVote2, HasEpoch}, traits::{ block_contents::vid_commitment, election::Membership, diff --git a/task-impls/src/quorum_vote/handlers.rs b/task-impls/src/quorum_vote/handlers.rs index 38b63fe4ea..1f8179a29d 100644 --- a/task-impls/src/quorum_vote/handlers.rs +++ b/task-impls/src/quorum_vote/handlers.rs @@ -6,27 +6,17 @@ use std::{collections::btree_map::Entry, sync::Arc}; -use super::QuorumVoteTaskState; -use crate::{ - events::HotShotEvent, - helpers::{ - broadcast_event, decide_from_proposal, decide_from_proposal_2, fetch_proposal, - LeafChainTraversalOutcome, - }, - quorum_vote::Versions, -}; use async_broadcast::{InactiveReceiver, Sender}; use async_lock::RwLock; use chrono::Utc; use committable::Committable; -use hotshot_types::simple_vote::HasEpoch; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposalWrapper, VidDisperseShare}, drb::{compute_drb_result, DrbResult}, event::{Event, EventType}, message::{convert_proposal, Proposal, UpgradeLock}, - simple_vote::{QuorumData2, QuorumVote2}, + simple_vote::{HasEpoch, QuorumData2, QuorumVote2}, traits::{ block_contents::BlockHeader, election::Membership, @@ -46,6 +36,16 @@ use tracing::instrument; use utils::anytrace::*; use vbs::version::StaticVersionType; +use super::QuorumVoteTaskState; +use crate::{ + events::HotShotEvent, + helpers::{ + broadcast_event, decide_from_proposal, decide_from_proposal_2, fetch_proposal, + LeafChainTraversalOutcome, + }, + quorum_vote::Versions, +}; + /// Store the DRB result from the computation task to the shared `results` table. /// /// Returns the result if it exists. diff --git a/testing/tests/tests_6/test_epochs.rs b/testing/tests/tests_6/test_epochs.rs index ae2a6fdd3a..e6a311878d 100644 --- a/testing/tests/tests_6/test_epochs.rs +++ b/testing/tests/tests_6/test_epochs.rs @@ -4,6 +4,8 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::{collections::HashMap, time::Duration}; + use hotshot_example_types::{ node_types::{ CombinedImpl, EpochUpgradeTestVersions, EpochsTestVersions, Libp2pImpl, MemoryImpl, @@ -22,7 +24,6 @@ use hotshot_testing::{ view_sync_task::ViewSyncTaskDescription, }; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; -use std::{collections::HashMap, time::Duration}; cross_tests!( TestName: test_success_with_epochs, From 94f1c060c4e29fdf46fc7a7ef166104b14df8524 Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Wed, 5 Feb 2025 09:11:27 -0700 Subject: [PATCH 1384/1393] Patch TCP back to Quic (#4100) * tcp -> quic patch * Serialization fixes (#4089) * Propagate certificate errors (#4096) * verbose is_valid --------- Co-authored-by: Rob Co-authored-by: ss-es <155648797+ss-es@users.noreply.github.com> --- hotshot/src/traits/networking/push_cdn_network.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index 02ac272599..a742e5f857 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -13,7 +13,7 @@ use std::{path::Path, time::Duration}; use async_trait::async_trait; use bincode::config::Options; use cdn_broker::reexports::{ - connection::protocols::{Tcp, TcpTls}, + connection::protocols::{Quic, Tcp}, def::{hook::NoMessageHook, ConnectionDef, RunDef, Topic as TopicTrait}, discovery::{Embedded, Redis}, }; @@ -149,7 +149,7 @@ impl RunDef for ProductionDef { pub struct UserDef(PhantomData); impl ConnectionDef for UserDef { type Scheme = WrappedSignatureKey; - type Protocol = TcpTls; + type Protocol = Quic; type MessageHook = NoMessageHook; } @@ -169,7 +169,7 @@ impl ConnectionDef for BrokerDef { pub struct ClientDef(PhantomData); impl ConnectionDef for ClientDef { type Scheme = WrappedSignatureKey; - type Protocol = TcpTls; + type Protocol = Quic; type MessageHook = NoMessageHook; } From d776e4023418704cbb23000ec7742eb9484b50a3 Mon Sep 17 00:00:00 2001 From: Phil <184445976+pls148@users.noreply.github.com> Date: Thu, 6 Feb 2025 10:24:27 -0800 Subject: [PATCH 1385/1393] #4045 re-enable epochs tests with dynamic quora (#4103) * #4045 re-enable epochs tests with dynamic quora * Epoch 1 is genesis epoch (#4112) * #4045 re-enable epochs tests with dynamic quora * Remove Epoch 0 * Allow dead code for genesis method for EpochNumber The method is not called at the moment but it's better to have it because otherwise the default genesis method returning 0 might be called by accident. * Adjust test_genesis_epoch_from_version --------- Co-authored-by: pls148 <184445976+pls148@users.noreply.github.com> --------- Co-authored-by: lukaszrzasik --- example-types/src/node_types.rs | 2 +- testing/tests/tests_6/test_epochs.rs | 51 ++++++++++++++++------------ types/src/data.rs | 8 +++++ types/src/utils.rs | 2 +- 4 files changed, 40 insertions(+), 23 deletions(-) diff --git a/example-types/src/node_types.rs b/example-types/src/node_types.rs index 5b2ffdf531..033feb514b 100644 --- a/example-types/src/node_types.rs +++ b/example-types/src/node_types.rs @@ -434,6 +434,6 @@ mod tests { assert_eq!(None, epoch); let epoch = genesis_epoch_from_version::(); - assert_eq!(Some(::Epoch::new(0)), epoch); + assert_eq!(Some(::Epoch::new(1)), epoch); } } diff --git a/testing/tests/tests_6/test_epochs.rs b/testing/tests/tests_6/test_epochs.rs index e6a311878d..595a02da7f 100644 --- a/testing/tests/tests_6/test_epochs.rs +++ b/testing/tests/tests_6/test_epochs.rs @@ -9,8 +9,9 @@ use std::{collections::HashMap, time::Duration}; use hotshot_example_types::{ node_types::{ CombinedImpl, EpochUpgradeTestVersions, EpochsTestVersions, Libp2pImpl, MemoryImpl, - PushCdnImpl, TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, - TestTypesRandomizedLeader, + PushCdnImpl, RandomOverlapQuorumFilterConfig, StableQuorumFilterConfig, + TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, + TestTypesRandomizedCommitteeMembers, TestTypesRandomizedLeader, }, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; @@ -48,25 +49,33 @@ cross_tests!( }, ); -// cross_tests!( -// TestName: test_epoch_success, -// Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], -// Types: [TestTypes, TestTypesRandomizedLeader, TestTypesRandomizedCommitteeMembers>, TestTypesRandomizedCommitteeMembers>], -// Versions: [EpochsTestVersions], -// Ignore: false, -// Metadata: { -// TestDescription { -// // allow more time to pass in CI -// completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( -// TimeBasedCompletionTaskDescription { -// duration: Duration::from_secs(60), -// }, -// ), -// epoch_height: 10, -// ..TestDescription::default() -// } -// }, -// ); +cross_tests!( + TestName: test_epoch_success, + Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], + Types: [ + TestTypes, + TestTypesRandomizedLeader, + TestTypesRandomizedCommitteeMembers>, // Overlap = F + TestTypesRandomizedCommitteeMembers>, // Overlap = F+1 + TestTypesRandomizedCommitteeMembers>, // Overlap = 2F + TestTypesRandomizedCommitteeMembers>, // Overlap = 2F+1 + TestTypesRandomizedCommitteeMembers>, // Overlap = 3F + TestTypesRandomizedCommitteeMembers>, // Overlap = Dynamic + ], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + TestDescription { + // allow more time to pass in CI + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + ..TestDescription::default().set_num_nodes(14, 14) + } + }, +); cross_tests!( TestName: test_success_with_async_delay_with_epochs, diff --git a/types/src/data.rs b/types/src/data.rs index 47fae4a823..3ee1fb5bd4 100644 --- a/types/src/data.rs +++ b/types/src/data.rs @@ -135,6 +135,14 @@ impl Committable for EpochNumber { impl_u64_wrapper!(EpochNumber); +impl EpochNumber { + /// Create a genesis number (1) + #[allow(dead_code)] + fn genesis() -> Self { + Self(1) + } +} + /// A proposal to start providing data availability for a block. #[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound = "TYPES: NodeType")] diff --git a/types/src/utils.rs b/types/src/utils.rs index 6d7dcd170e..b4f8c25ec0 100644 --- a/types/src/utils.rs +++ b/types/src/utils.rs @@ -295,7 +295,7 @@ pub fn option_epoch_from_block_number( /// Returns Some(0) if epochs are enabled by V::Base, otherwise returns None #[must_use] pub fn genesis_epoch_from_version() -> Option { - (V::Base::VERSION >= V::Epochs::VERSION).then(|| TYPES::Epoch::new(0)) + (V::Base::VERSION >= V::Epochs::VERSION).then(|| TYPES::Epoch::new(1)) } /// A function for generating a cute little user mnemonic from a hash From 601485864d414372a4f056be7b188b67ddc6a042 Mon Sep 17 00:00:00 2001 From: Alysia Tech Date: Thu, 6 Feb 2025 23:04:53 -0500 Subject: [PATCH 1386/1393] add admin only functions (#2512) * add admin only functions * use Ownable for admin management * remove AdminUpdated event * register with variable stake amount * rename test --- contracts/src/StakeTable.sol | 75 +++++++++--- .../src/interfaces/AbstractStakeTable.sol | 12 ++ contracts/test/StakeTable.t.sol | 109 +++++++++++++++++- 3 files changed, 174 insertions(+), 22 deletions(-) diff --git a/contracts/src/StakeTable.sol b/contracts/src/StakeTable.sol index 54b3a6cdc5..e48e450d09 100644 --- a/contracts/src/StakeTable.sol +++ b/contracts/src/StakeTable.sol @@ -6,11 +6,12 @@ import { BLSSig } from "./libraries/BLSSig.sol"; import { AbstractStakeTable } from "./interfaces/AbstractStakeTable.sol"; import { LightClient } from "../src/LightClient.sol"; import { EdOnBN254 } from "./libraries/EdOnBn254.sol"; +import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; using EdOnBN254 for EdOnBN254.EdOnBN254Point; /// @title Implementation of the Stake Table interface -contract StakeTable is AbstractStakeTable { +contract StakeTable is AbstractStakeTable, Ownable { /// Error to notify restaking is not implemented yet. error RestakingNotImplemented(); @@ -64,6 +65,15 @@ contract StakeTable is AbstractStakeTable { // Error raised when zero point keys are provided error NoKeyChange(); + /// Error raised when the caller is not the owner + error Unauthorized(); + + /// Error raised when the light client address is invalid + error InvalidAddress(); + + /// Error raised when the value is invalid + error InvalidValue(); + /// Mapping from a hash of a BLS key to a node struct defined in the abstract contract. mapping(address account => Node node) public nodes; @@ -92,7 +102,16 @@ contract StakeTable is AbstractStakeTable { uint64 public maxChurnRate; - constructor(address _tokenAddress, address _lightClientAddress, uint64 churnRate) { + uint256 public minStakeAmount; + + /// TODO change constructor to initialize function when we make the contract upgradeable + constructor( + address _tokenAddress, + address _lightClientAddress, + uint64 churnRate, + uint256 _minStakeAmount, + address initialOwner + ) Ownable(initialOwner) { tokenAddress = _tokenAddress; lightClient = LightClient(_lightClientAddress); @@ -105,6 +124,8 @@ contract StakeTable is AbstractStakeTable { // It is not possible to exit during the first epoch. firstAvailableExitEpoch = 1; _numPendingExits = 0; + + minStakeAmount = _minStakeAmount; } /// @dev Computes a hash value of some G2 point. @@ -269,10 +290,8 @@ contract StakeTable is AbstractStakeTable { BN254.G1Point memory blsSig, uint64 validUntilEpoch ) external override { - uint256 fixedStakeAmount = minStakeAmount(); - // Verify that the sender amount is the minStakeAmount - if (amount < fixedStakeAmount) { + if (amount < minStakeAmount) { revert InsufficientStakeAmount(amount); } @@ -285,13 +304,13 @@ contract StakeTable is AbstractStakeTable { // Verify that this contract has permissions to access the validator's stake token. uint256 allowance = ERC20(tokenAddress).allowance(msg.sender, address(this)); - if (allowance < fixedStakeAmount) { - revert InsufficientAllowance(allowance, fixedStakeAmount); + if (allowance < amount) { + revert InsufficientAllowance(allowance, amount); } // Verify that the validator has the balance for this stake token. uint256 balance = ERC20(tokenAddress).balanceOf(msg.sender); - if (balance < fixedStakeAmount) { + if (balance < amount) { revert InsufficientBalance(balance); } @@ -330,23 +349,21 @@ contract StakeTable is AbstractStakeTable { appendRegistrationQueue(registerEpoch, queueSize); // Transfer the stake amount of ERC20 tokens from the sender to this contract. - SafeTransferLib.safeTransferFrom( - ERC20(tokenAddress), msg.sender, address(this), fixedStakeAmount - ); + SafeTransferLib.safeTransferFrom(ERC20(tokenAddress), msg.sender, address(this), amount); // Update the total staked amount - totalStake += fixedStakeAmount; + totalStake += amount; // Create an entry for the node. node.account = msg.sender; - node.balance = fixedStakeAmount; + node.balance = amount; node.blsVK = blsVK; node.schnorrVK = schnorrVK; node.registerEpoch = registerEpoch; nodes[msg.sender] = node; - emit Registered(msg.sender, registerEpoch, fixedStakeAmount); + emit Registered(msg.sender, registerEpoch, amount); } /// @notice Deposit more stakes to registered keys @@ -524,10 +541,30 @@ contract StakeTable is AbstractStakeTable { emit UpdatedConsensusKeys(msg.sender, node.blsVK, node.schnorrVK); } - /// @notice Minimum stake amount - /// @return Minimum stake amount - /// TODO: This value should be a variable modifiable by admin - function minStakeAmount() public pure returns (uint256) { - return 10 ether; + /// @notice Update the min stake amount + /// @dev The min stake amount cannot be set to zero + /// @param _minStakeAmount The new min stake amount + function updateMinStakeAmount(uint256 _minStakeAmount) external onlyOwner { + if (_minStakeAmount == 0) revert InvalidValue(); + minStakeAmount = _minStakeAmount; + emit MinStakeAmountUpdated(minStakeAmount); + } + + /// @notice Update the max churn rate + /// @dev The max churn rate cannot be set to zero + /// @param _maxChurnRate The new max churn rate + function updateMaxChurnRate(uint64 _maxChurnRate) external onlyOwner { + if (_maxChurnRate == 0) revert InvalidValue(); + maxChurnRate = _maxChurnRate; + emit MaxChurnRateUpdated(maxChurnRate); + } + + /// @notice Update the light client address + /// @dev The light client address cannot be set to the zero address + /// @param _lightClientAddress The new light client address + function updateLightClientAddress(address _lightClientAddress) external onlyOwner { + if (_lightClientAddress == address(0)) revert InvalidAddress(); + lightClient = LightClient(_lightClientAddress); + emit LightClientAddressUpdated(_lightClientAddress); } } diff --git a/contracts/src/interfaces/AbstractStakeTable.sol b/contracts/src/interfaces/AbstractStakeTable.sol index a73f285a2a..a5563202d9 100644 --- a/contracts/src/interfaces/AbstractStakeTable.sol +++ b/contracts/src/interfaces/AbstractStakeTable.sol @@ -47,6 +47,18 @@ abstract contract AbstractStakeTable { address account, BN254.G2Point newBlsVK, EdOnBN254.EdOnBN254Point newSchnorrVK ); + /// @notice Signals the min stake amount has been updated + /// @param minStakeAmount the new min stake amount + event MinStakeAmountUpdated(uint256 minStakeAmount); + + /// @notice Signals the max churn rate has been updated + /// @param maxChurnRate the new max churn rate + event MaxChurnRateUpdated(uint256 maxChurnRate); + + /// @notice Signals the light client address has been updated + /// @param lightClientAddress the new light client address + event LightClientAddressUpdated(address lightClientAddress); + /// @dev (sadly, Solidity doesn't support type alias on non-primitive types) // We avoid declaring another struct even if the type info helps with readability, // extra layer of struct introduces overhead and more gas cost. diff --git a/contracts/test/StakeTable.t.sol b/contracts/test/StakeTable.t.sol index 414b86c22e..434a31f683 100644 --- a/contracts/test/StakeTable.t.sol +++ b/contracts/test/StakeTable.t.sol @@ -17,6 +17,7 @@ import { EdOnBN254 } from "../src/libraries/EdOnBn254.sol"; import { AbstractStakeTable } from "../src/interfaces/AbstractStakeTable.sol"; import { LightClient } from "../src/LightClient.sol"; import { LightClientMock } from "../test/mocks/LightClientMock.sol"; +import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; // Token contract import { ExampleToken } from "../src/ExampleToken.sol"; @@ -29,6 +30,7 @@ contract StakeTable_register_Test is Test { ExampleToken public token; LightClientMock public lcMock; uint256 public constant INITIAL_BALANCE = 10 ether; + uint256 public constant MIN_STAKE_AMOUNT = 10 ether; address public exampleTokenCreator; function genClientWallet(address sender, string memory seed) @@ -77,7 +79,8 @@ contract StakeTable_register_Test is Test { lcMock = new LightClientMock(genesis, genesisStakeTableState, 864000); address lightClientAddress = address(lcMock); - stakeTable = new S(address(token), lightClientAddress, 10); + stakeTable = + new S(address(token), lightClientAddress, 10, MIN_STAKE_AMOUNT, exampleTokenCreator); } function testFuzz_RevertWhen_InvalidBLSSig(uint256 scalar) external { @@ -203,8 +206,8 @@ contract StakeTable_register_Test is Test { vm.stopPrank(); } - function test_RevertWhen_WrongStakeAmount() external { - uint64 depositAmount = 5 ether; + function test_RevertWhen_InsufficientStakeAmount() external { + uint64 depositAmount = uint64(stakeTable.minStakeAmount()) - 1; uint64 validUntilEpoch = 10; string memory seed = "123"; @@ -745,4 +748,104 @@ contract StakeTable_register_Test is Test { stakeTable.withdrawFunds(); vm.stopPrank(); } + + // test set admin succeeds + function test_setAdmin_succeeds() public { + vm.prank(exampleTokenCreator); + vm.expectEmit(false, false, false, true, address(stakeTable)); + emit Ownable.OwnershipTransferred(exampleTokenCreator, makeAddr("admin")); + stakeTable.transferOwnership(makeAddr("admin")); + assertEq(stakeTable.owner(), makeAddr("admin")); + } + + // test set admin fails if not admin or invalid admin address + function test_revertWhen_setAdmin_NotAdminOrInvalidAdminAddress() public { + vm.startPrank(makeAddr("randomUser")); + vm.expectRevert( + abi.encodeWithSelector( + Ownable.OwnableUnauthorizedAccount.selector, makeAddr("randomUser") + ) + ); + stakeTable.transferOwnership(makeAddr("admin")); + vm.stopPrank(); + + vm.prank(exampleTokenCreator); + vm.expectRevert(abi.encodeWithSelector(Ownable.OwnableInvalidOwner.selector, address(0))); + stakeTable.transferOwnership(address(0)); + } + + // test update min stake amount succeeds + function test_updateMinStakeAmount_succeeds() public { + vm.prank(exampleTokenCreator); + vm.expectEmit(false, false, false, true, address(stakeTable)); + emit AbstractStakeTable.MinStakeAmountUpdated(10 ether); + stakeTable.updateMinStakeAmount(10 ether); + assertEq(stakeTable.minStakeAmount(), 10 ether); + } + + // test update min stake amount fails if not admin or invalid stake amount + function test_revertWhen_updateMinStakeAmount_NotAdminOrInvalidStakeAmount() public { + vm.startPrank(makeAddr("randomUser")); + vm.expectRevert( + abi.encodeWithSelector( + Ownable.OwnableUnauthorizedAccount.selector, makeAddr("randomUser") + ) + ); + stakeTable.updateMinStakeAmount(10 ether); + vm.stopPrank(); + + vm.prank(exampleTokenCreator); + vm.expectRevert(S.InvalidValue.selector); + stakeTable.updateMinStakeAmount(0); + } + + // test update max churn rate succeeds + function test_updateMaxChurnRate_succeeds() public { + vm.prank(exampleTokenCreator); + vm.expectEmit(false, false, false, true, address(stakeTable)); + emit AbstractStakeTable.MaxChurnRateUpdated(10); + stakeTable.updateMaxChurnRate(10); + assertEq(stakeTable.maxChurnRate(), 10); + } + + // test update max churn rate fails if not admin or invalid churn amount + function test_revertWhen_updateMaxChurnRate_NotAdminOrInvalidChurnAmount() public { + vm.startPrank(makeAddr("randomUser")); + vm.expectRevert( + abi.encodeWithSelector( + Ownable.OwnableUnauthorizedAccount.selector, makeAddr("randomUser") + ) + ); + stakeTable.updateMaxChurnRate(10); + vm.stopPrank(); + + vm.prank(exampleTokenCreator); + vm.expectRevert(S.InvalidValue.selector); + stakeTable.updateMaxChurnRate(0); + } + + // test update light client address succeeds + function test_updateLightClientAddress_succeeds() public { + vm.prank(exampleTokenCreator); + vm.expectEmit(false, false, false, true, address(stakeTable)); + emit AbstractStakeTable.LightClientAddressUpdated(makeAddr("lightClient")); + stakeTable.updateLightClientAddress(makeAddr("lightClient")); + assertEq(address(stakeTable.lightClient()), makeAddr("lightClient")); + } + + // test update light client address fails if not admin or bad address + function test_revertWhen_updateLightClientAddress_NotAdminOrBadAddress() public { + vm.startPrank(makeAddr("randomUser")); + vm.expectRevert( + abi.encodeWithSelector( + Ownable.OwnableUnauthorizedAccount.selector, makeAddr("randomUser") + ) + ); + stakeTable.updateLightClientAddress(makeAddr("lightClient")); + vm.stopPrank(); + + vm.prank(exampleTokenCreator); + vm.expectRevert(S.InvalidAddress.selector); + stakeTable.updateLightClientAddress(address(0)); + } } From 08f813a6f3092c345f8283f40d4dc333ffbadd06 Mon Sep 17 00:00:00 2001 From: Mathis Antony Date: Fri, 7 Feb 2025 13:00:13 +0800 Subject: [PATCH 1387/1393] fix: espresso-types tests without default-features --- types/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/types/Cargo.toml b/types/Cargo.toml index 691fe53af9..65a2d9ce60 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -58,6 +58,7 @@ url = { workspace = true } vbs = { workspace = true } [dev-dependencies] +hotshot-query-service = { workspace = true, features = ["testing"]} portpicker = { workspace = true } [package.metadata.cargo-machete] From fd2f54c95e971fea6086c7cd8e253417984e4ac2 Mon Sep 17 00:00:00 2001 From: Mathis Antony Date: Fri, 7 Feb 2025 13:06:43 +0800 Subject: [PATCH 1388/1393] fix: build node-metrics with all features --- Cargo.lock | 1 + node-metrics/Cargo.toml | 2 +- types/Cargo.toml | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9c30aa9060..b2dbca66c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3626,6 +3626,7 @@ dependencies = [ "contract-bindings-alloy", "contract-bindings-ethers", "derive_more 1.0.0", + "espresso-types", "ethers", "ethers-conv", "fluent-asserter", diff --git a/node-metrics/Cargo.toml b/node-metrics/Cargo.toml index eb441ca658..0092500753 100644 --- a/node-metrics/Cargo.toml +++ b/node-metrics/Cargo.toml @@ -6,7 +6,7 @@ authors = { workspace = true } edition = { workspace = true } [features] -testing = ["serde_json", "espresso-types/testing"] +testing = ["serde_json", "espresso-types/testing", "hotshot-query-service/testing"] [dev-dependencies] node-metrics = { path = ".", features = [ "testing" ] } diff --git a/types/Cargo.toml b/types/Cargo.toml index 65a2d9ce60..40c10d304a 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Espresso Systems "] edition = "2021" [features] -testing = [] +testing = ["hotshot-query-service/testing"] [dependencies] alloy = { workspace = true } @@ -58,7 +58,7 @@ url = { workspace = true } vbs = { workspace = true } [dev-dependencies] -hotshot-query-service = { workspace = true, features = ["testing"]} +espresso-types = { path = ".", features = [ "testing" ] } portpicker = { workspace = true } [package.metadata.cargo-machete] From e668601b5c548ab9105d81c5e4309ef83af78af8 Mon Sep 17 00:00:00 2001 From: Mathis Antony Date: Fri, 7 Feb 2025 16:25:10 +0800 Subject: [PATCH 1389/1393] Fix failing tests We were sometimes decoding as the wrong type. This adds a function that decodes to the old VID type if deserializing the new type fails. I think we want a better solution but this does at least fix the tests for me. --- sequencer/src/persistence/sql.rs | 36 +++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 7b257b4b37..f8c0a7c354 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -583,6 +583,24 @@ pub struct Persistence { gc_opt: ConsensusPruningOptions, } +// TODO: clean up as part of VID migration +fn deserialize_vid_proposal_with_fallback( + bytes: &[u8], +) -> anyhow::Result>> { + bincode::deserialize(bytes).or_else(|err| { + tracing::warn!("error decoding VID share: {err:#}"); + match bincode::deserialize::>>(bytes) { + Ok(proposal) => Ok(convert_proposal(proposal)), + Err(err2) => { + tracing::warn!("error decoding VID share fallback: {err2:#}"); + Err(anyhow::anyhow!( + "Both primary and fallback deserialization failed: {err:#}, {err2:#}" + )) + } + } + }) +} + impl Persistence { /// Ensure the `leaf_hash` column is populated for all existing quorum proposals. /// @@ -710,9 +728,7 @@ impl Persistence { .map(|row| { let view: i64 = row.get("view"); let data: Vec = row.get("data"); - let vid_proposal = bincode::deserialize::< - Proposal>, - >(&data)?; + let vid_proposal = deserialize_vid_proposal_with_fallback(&data)?; Ok((view as u64, vid_proposal.data)) }) .collect::>>()?; @@ -1397,16 +1413,12 @@ impl Provider for Persistence { } }; - let share: Proposal> = - match bincode::deserialize(&bytes) { - Ok(share) => share, - Err(err) => { - tracing::warn!("error decoding VID share: {err:#}"); - return None; - } - }; + let proposal = match deserialize_vid_proposal_with_fallback(&bytes) { + Ok(proposal) => proposal, + Err(_) => return None, + }; - Some(match share.data { + Some(match proposal.data { VidDisperseShare::V0(ADVZDisperseShare:: { common, .. }) => common, VidDisperseShare::V1(VidDisperseShare2:: { common, .. }) => common, }) From 00838ba4ac45216c76a1f6241c320983515583a1 Mon Sep 17 00:00:00 2001 From: Rob Date: Fri, 7 Feb 2025 10:44:32 -0500 Subject: [PATCH 1390/1393] rename some crates --- {builder-api => hotshot-builder-api}/Cargo.toml | 0 {builder-api => hotshot-builder-api}/README.md | 0 .../api/v0_1/builder.toml | 0 .../api/v0_1/submit.toml | 0 .../api/v0_3/builder.toml | 0 .../api/v0_3/submit.toml | 0 {builder-api => hotshot-builder-api}/src/api.rs | 0 {builder-api => hotshot-builder-api}/src/lib.rs | 0 .../src/v0_1/block_info.rs | 0 .../src/v0_1/builder.rs | 0 .../src/v0_1/data_source.rs | 0 .../src/v0_1/mod.rs | 0 .../src/v0_1/query_data.rs | 0 .../src/v0_99/builder.rs | 0 .../src/v0_99/data_source.rs | 0 .../src/v0_99/mod.rs | 0 {example-types => hotshot-example-types}/Cargo.toml | 0 .../src/auction_results_provider_types.rs | 0 .../src/block_types.rs | 0 {example-types => hotshot-example-types}/src/lib.rs | 0 .../src/node_types.rs | 0 .../src/state_types.rs | 0 .../src/storage_types.rs | 0 .../src/testable_delay.rs | 0 {examples => hotshot-examples}/Cargo.toml | 0 {examples => hotshot-examples}/combined/all.rs | 0 .../combined/multi-validator.rs | 0 .../combined/orchestrator.rs | 0 {examples => hotshot-examples}/combined/types.rs | 0 .../combined/validator.rs | 0 {examples => hotshot-examples}/infra/mod.rs | 0 {examples => hotshot-examples}/libp2p/all.rs | 0 .../libp2p/multi-validator.rs | 0 {examples => hotshot-examples}/libp2p/types.rs | 0 {examples => hotshot-examples}/libp2p/validator.rs | 0 {examples => hotshot-examples}/orchestrator.rs | 0 {examples => hotshot-examples}/push-cdn/README.md | 0 {examples => hotshot-examples}/push-cdn/all.rs | 0 {examples => hotshot-examples}/push-cdn/broker.rs | 0 {examples => hotshot-examples}/push-cdn/marshal.rs | 0 .../push-cdn/multi-validator.rs | 0 {examples => hotshot-examples}/push-cdn/types.rs | 0 .../push-cdn/validator.rs | 0 .../push-cdn/whitelist-adapter.rs | 0 {fakeapi => hotshot-fakeapi}/Cargo.toml | 0 {fakeapi => hotshot-fakeapi}/apis/solver.toml | 0 {fakeapi => hotshot-fakeapi}/src/fake_solver.rs | 0 {fakeapi => hotshot-fakeapi}/src/lib.rs | 0 .../.cargo/config | 0 .../.gitignore | 0 .../Cargo.toml | 2 +- .../flamegraph.sh | 0 .../src/lib.rs | 0 .../src/network/behaviours/dht/bootstrap.rs | 0 .../src/network/behaviours/dht/mod.rs | 0 .../src/network/behaviours/dht/record.rs | 0 .../src/network/behaviours/dht/store/mod.rs | 0 .../src/network/behaviours/dht/store/persistent.rs | 0 .../src/network/behaviours/dht/store/validated.rs | 0 .../src/network/behaviours/direct_message.rs | 0 .../src/network/behaviours/exponential_backoff.rs | 0 .../src/network/behaviours/mod.rs | 0 .../src/network/cbor.rs | 0 .../src/network/def.rs | 0 .../src/network/mod.rs | 0 .../src/network/node.rs | 0 .../src/network/node/config.rs | 0 .../src/network/node/handle.rs | 0 .../src/network/transport.rs | 0 .../web/index.html | 0 {macros => hotshot-macros}/Cargo.toml | 0 {macros => hotshot-macros}/src/lib.rs | 0 {orchestrator => hotshot-orchestrator}/Cargo.toml | 0 {orchestrator => hotshot-orchestrator}/README.md | 0 {orchestrator => hotshot-orchestrator}/api.toml | 0 .../run-config.toml | 0 .../src/client.rs | 0 {orchestrator => hotshot-orchestrator}/src/lib.rs | 0 .../staging-config.toml | 0 {task-impls => hotshot-task-impls}/Cargo.toml | 0 .../HotShot_event_architecture.drawio | 0 .../HotShot_event_architecture.png | Bin {task-impls => hotshot-task-impls}/README.md | 0 {task-impls => hotshot-task-impls}/src/builder.rs | 0 .../src/consensus/handlers.rs | 0 .../src/consensus/mod.rs | 0 {task-impls => hotshot-task-impls}/src/da.rs | 0 {task-impls => hotshot-task-impls}/src/events.rs | 0 {task-impls => hotshot-task-impls}/src/harness.rs | 0 {task-impls => hotshot-task-impls}/src/helpers.rs | 0 {task-impls => hotshot-task-impls}/src/lib.rs | 0 {task-impls => hotshot-task-impls}/src/network.rs | 0 .../src/quorum_proposal/handlers.rs | 0 .../src/quorum_proposal/mod.rs | 0 .../src/quorum_proposal_recv/handlers.rs | 0 .../src/quorum_proposal_recv/mod.rs | 0 .../src/quorum_vote/handlers.rs | 0 .../src/quorum_vote/mod.rs | 0 {task-impls => hotshot-task-impls}/src/request.rs | 0 {task-impls => hotshot-task-impls}/src/response.rs | 0 {task-impls => hotshot-task-impls}/src/rewind.rs | 0 .../src/transactions.rs | 0 {task-impls => hotshot-task-impls}/src/upgrade.rs | 0 {task-impls => hotshot-task-impls}/src/vid.rs | 0 {task-impls => hotshot-task-impls}/src/view_sync.rs | 0 .../src/vote_collection.rs | 0 {task => hotshot-task}/Cargo.toml | 0 {task => hotshot-task}/src/dependency.rs | 0 {task => hotshot-task}/src/dependency_task.rs | 0 {task => hotshot-task}/src/lib.rs | 0 {task => hotshot-task}/src/task.rs | 0 {testing => hotshot-testing}/.gitignore | 0 {testing => hotshot-testing}/Cargo.toml | 0 {testing => hotshot-testing}/README.md | 0 .../src/block_builder/mod.rs | 0 .../src/block_builder/random.rs | 0 .../src/block_builder/simple.rs | 0 .../src/byzantine/byzantine_behaviour.rs | 0 {testing => hotshot-testing}/src/byzantine/mod.rs | 0 {testing => hotshot-testing}/src/completion_task.rs | 0 .../src/consistency_task.rs | 0 {testing => hotshot-testing}/src/helpers.rs | 0 {testing => hotshot-testing}/src/lib.rs | 0 {testing => hotshot-testing}/src/node_ctx.rs | 0 .../src/overall_safety_task.rs | 0 .../src/predicates/event.rs | 0 {testing => hotshot-testing}/src/predicates/mod.rs | 0 .../src/predicates/upgrade_with_proposal.rs | 0 .../src/predicates/upgrade_with_vote.rs | 0 {testing => hotshot-testing}/src/script.rs | 0 {testing => hotshot-testing}/src/spinning_task.rs | 0 {testing => hotshot-testing}/src/test_builder.rs | 0 {testing => hotshot-testing}/src/test_helpers.rs | 0 {testing => hotshot-testing}/src/test_launcher.rs | 0 {testing => hotshot-testing}/src/test_runner.rs | 0 {testing => hotshot-testing}/src/test_task.rs | 0 {testing => hotshot-testing}/src/txn_task.rs | 0 {testing => hotshot-testing}/src/view_generator.rs | 0 {testing => hotshot-testing}/src/view_sync_task.rs | 0 {testing => hotshot-testing}/tests/tests_1.rs | 0 .../tests/tests_1/block_builder.rs | 0 .../tests/tests_1/da_task.rs | 0 .../tests/tests_1/gen_key_pair.rs | 0 .../tests/tests_1/libp2p.rs | 0 .../tests/tests_1/message.rs | 0 .../tests/tests_1/network_task.rs | 0 .../tests/tests_1/quorum_proposal_recv_task.rs | 0 .../tests/tests_1/quorum_proposal_task.rs | 0 .../tests/tests_1/quorum_vote_task.rs | 0 .../tests/tests_1/test_success.rs | 0 .../tests/tests_1/test_with_failures_2.rs | 0 .../tests/tests_1/transaction_task.rs | 0 .../tests/tests_1/upgrade_task_with_proposal.rs | 0 .../tests/tests_1/upgrade_task_with_vote.rs | 0 .../tests/tests_1/vid_task.rs | 0 .../tests/tests_1/view_sync_task.rs | 0 .../tests/tests_1/vote_dependency_handle.rs | 0 {testing => hotshot-testing}/tests/tests_2.rs | 0 .../tests/tests_2/catchup.rs | 0 {testing => hotshot-testing}/tests/tests_3.rs | 0 .../tests/tests_3/byzantine_tests.rs | 0 .../tests/tests_3/memory_network.rs | 0 .../tests/tests_3/test_with_failures_half_f.rs | 0 {testing => hotshot-testing}/tests/tests_4.rs | 0 .../tests/tests_4/byzantine_tests.rs | 0 .../tests/tests_4/test_marketplace.rs | 0 .../tests/tests_4/test_with_builder_failures.rs | 0 .../tests/tests_4/test_with_failures_f.rs | 0 {testing => hotshot-testing}/tests/tests_5.rs | 0 .../tests/tests_5/broken_3_chain.rs | 0 .../tests/tests_5/combined_network.rs | 0 .../tests/tests_5/fake_solver.rs | 0 .../tests/tests_5/push_cdn.rs | 0 .../tests/tests_5/test_with_failures.rs | 0 .../tests/tests_5/timeout.rs | 0 .../tests/tests_5/unreliable_network.rs | 0 {testing => hotshot-testing}/tests/tests_6.rs | 0 .../tests/tests_6/test_epochs.rs | 0 {types => hotshot-types}/Cargo.toml | 0 {types => hotshot-types}/src/bundle.rs | 0 {types => hotshot-types}/src/consensus.rs | 0 {types => hotshot-types}/src/constants.rs | 0 {types => hotshot-types}/src/data.rs | 0 {types => hotshot-types}/src/data/vid_disperse.rs | 0 {types => hotshot-types}/src/drb.rs | 0 {types => hotshot-types}/src/error.rs | 0 {types => hotshot-types}/src/event.rs | 0 {types => hotshot-types}/src/hotshot_config_file.rs | 0 {types => hotshot-types}/src/lib.rs | 0 {types => hotshot-types}/src/light_client.rs | 0 {types => hotshot-types}/src/message.rs | 0 {types => hotshot-types}/src/network.rs | 0 {types => hotshot-types}/src/qc.rs | 0 {types => hotshot-types}/src/request_response.rs | 0 {types => hotshot-types}/src/signature_key.rs | 0 {types => hotshot-types}/src/simple_certificate.rs | 0 {types => hotshot-types}/src/simple_vote.rs | 0 {types => hotshot-types}/src/stake_table.rs | 0 {types => hotshot-types}/src/traits.rs | 0 .../src/traits/auction_results_provider.rs | 0 .../src/traits/block_contents.rs | 0 .../src/traits/consensus_api.rs | 0 {types => hotshot-types}/src/traits/election.rs | 0 {types => hotshot-types}/src/traits/metrics.rs | 0 {types => hotshot-types}/src/traits/network.rs | 0 .../src/traits/node_implementation.rs | 0 {types => hotshot-types}/src/traits/qc.rs | 0 .../src/traits/signature_key.rs | 0 {types => hotshot-types}/src/traits/stake_table.rs | 0 {types => hotshot-types}/src/traits/states.rs | 0 {types => hotshot-types}/src/traits/storage.rs | 0 {types => hotshot-types}/src/upgrade_config.rs | 0 {types => hotshot-types}/src/utils.rs | 0 {types => hotshot-types}/src/validator_config.rs | 0 {types => hotshot-types}/src/vid.rs | 0 {types => hotshot-types}/src/vid/advz.rs | 0 {types => hotshot-types}/src/vid/avidm.rs | 0 {types => hotshot-types}/src/vote.rs | 0 {utils => hotshot-utils}/Cargo.toml | 2 +- {utils => hotshot-utils}/src/anytrace.rs | 0 {utils => hotshot-utils}/src/anytrace/macros.rs | 0 {utils => hotshot-utils}/src/lib.rs | 0 222 files changed, 2 insertions(+), 2 deletions(-) rename {builder-api => hotshot-builder-api}/Cargo.toml (100%) rename {builder-api => hotshot-builder-api}/README.md (100%) rename {builder-api => hotshot-builder-api}/api/v0_1/builder.toml (100%) rename {builder-api => hotshot-builder-api}/api/v0_1/submit.toml (100%) rename {builder-api => hotshot-builder-api}/api/v0_3/builder.toml (100%) rename {builder-api => hotshot-builder-api}/api/v0_3/submit.toml (100%) rename {builder-api => hotshot-builder-api}/src/api.rs (100%) rename {builder-api => hotshot-builder-api}/src/lib.rs (100%) rename {builder-api => hotshot-builder-api}/src/v0_1/block_info.rs (100%) rename {builder-api => hotshot-builder-api}/src/v0_1/builder.rs (100%) rename {builder-api => hotshot-builder-api}/src/v0_1/data_source.rs (100%) rename {builder-api => hotshot-builder-api}/src/v0_1/mod.rs (100%) rename {builder-api => hotshot-builder-api}/src/v0_1/query_data.rs (100%) rename {builder-api => hotshot-builder-api}/src/v0_99/builder.rs (100%) rename {builder-api => hotshot-builder-api}/src/v0_99/data_source.rs (100%) rename {builder-api => hotshot-builder-api}/src/v0_99/mod.rs (100%) rename {example-types => hotshot-example-types}/Cargo.toml (100%) rename {example-types => hotshot-example-types}/src/auction_results_provider_types.rs (100%) rename {example-types => hotshot-example-types}/src/block_types.rs (100%) rename {example-types => hotshot-example-types}/src/lib.rs (100%) rename {example-types => hotshot-example-types}/src/node_types.rs (100%) rename {example-types => hotshot-example-types}/src/state_types.rs (100%) rename {example-types => hotshot-example-types}/src/storage_types.rs (100%) rename {example-types => hotshot-example-types}/src/testable_delay.rs (100%) rename {examples => hotshot-examples}/Cargo.toml (100%) rename {examples => hotshot-examples}/combined/all.rs (100%) rename {examples => hotshot-examples}/combined/multi-validator.rs (100%) rename {examples => hotshot-examples}/combined/orchestrator.rs (100%) rename {examples => hotshot-examples}/combined/types.rs (100%) rename {examples => hotshot-examples}/combined/validator.rs (100%) rename {examples => hotshot-examples}/infra/mod.rs (100%) rename {examples => hotshot-examples}/libp2p/all.rs (100%) rename {examples => hotshot-examples}/libp2p/multi-validator.rs (100%) rename {examples => hotshot-examples}/libp2p/types.rs (100%) rename {examples => hotshot-examples}/libp2p/validator.rs (100%) rename {examples => hotshot-examples}/orchestrator.rs (100%) rename {examples => hotshot-examples}/push-cdn/README.md (100%) rename {examples => hotshot-examples}/push-cdn/all.rs (100%) rename {examples => hotshot-examples}/push-cdn/broker.rs (100%) rename {examples => hotshot-examples}/push-cdn/marshal.rs (100%) rename {examples => hotshot-examples}/push-cdn/multi-validator.rs (100%) rename {examples => hotshot-examples}/push-cdn/types.rs (100%) rename {examples => hotshot-examples}/push-cdn/validator.rs (100%) rename {examples => hotshot-examples}/push-cdn/whitelist-adapter.rs (100%) rename {fakeapi => hotshot-fakeapi}/Cargo.toml (100%) rename {fakeapi => hotshot-fakeapi}/apis/solver.toml (100%) rename {fakeapi => hotshot-fakeapi}/src/fake_solver.rs (100%) rename {fakeapi => hotshot-fakeapi}/src/lib.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/.cargo/config (100%) rename {libp2p-networking => hotshot-libp2p-networking}/.gitignore (100%) rename {libp2p-networking => hotshot-libp2p-networking}/Cargo.toml (97%) rename {libp2p-networking => hotshot-libp2p-networking}/flamegraph.sh (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/lib.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/behaviours/dht/bootstrap.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/behaviours/dht/mod.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/behaviours/dht/record.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/behaviours/dht/store/mod.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/behaviours/dht/store/persistent.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/behaviours/dht/store/validated.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/behaviours/direct_message.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/behaviours/exponential_backoff.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/behaviours/mod.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/cbor.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/def.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/mod.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/node.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/node/config.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/node/handle.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/src/network/transport.rs (100%) rename {libp2p-networking => hotshot-libp2p-networking}/web/index.html (100%) rename {macros => hotshot-macros}/Cargo.toml (100%) rename {macros => hotshot-macros}/src/lib.rs (100%) rename {orchestrator => hotshot-orchestrator}/Cargo.toml (100%) rename {orchestrator => hotshot-orchestrator}/README.md (100%) rename {orchestrator => hotshot-orchestrator}/api.toml (100%) rename {orchestrator => hotshot-orchestrator}/run-config.toml (100%) rename {orchestrator => hotshot-orchestrator}/src/client.rs (100%) rename {orchestrator => hotshot-orchestrator}/src/lib.rs (100%) rename {orchestrator => hotshot-orchestrator}/staging-config.toml (100%) rename {task-impls => hotshot-task-impls}/Cargo.toml (100%) rename {task-impls => hotshot-task-impls}/HotShot_event_architecture.drawio (100%) rename {task-impls => hotshot-task-impls}/HotShot_event_architecture.png (100%) rename {task-impls => hotshot-task-impls}/README.md (100%) rename {task-impls => hotshot-task-impls}/src/builder.rs (100%) rename {task-impls => hotshot-task-impls}/src/consensus/handlers.rs (100%) rename {task-impls => hotshot-task-impls}/src/consensus/mod.rs (100%) rename {task-impls => hotshot-task-impls}/src/da.rs (100%) rename {task-impls => hotshot-task-impls}/src/events.rs (100%) rename {task-impls => hotshot-task-impls}/src/harness.rs (100%) rename {task-impls => hotshot-task-impls}/src/helpers.rs (100%) rename {task-impls => hotshot-task-impls}/src/lib.rs (100%) rename {task-impls => hotshot-task-impls}/src/network.rs (100%) rename {task-impls => hotshot-task-impls}/src/quorum_proposal/handlers.rs (100%) rename {task-impls => hotshot-task-impls}/src/quorum_proposal/mod.rs (100%) rename {task-impls => hotshot-task-impls}/src/quorum_proposal_recv/handlers.rs (100%) rename {task-impls => hotshot-task-impls}/src/quorum_proposal_recv/mod.rs (100%) rename {task-impls => hotshot-task-impls}/src/quorum_vote/handlers.rs (100%) rename {task-impls => hotshot-task-impls}/src/quorum_vote/mod.rs (100%) rename {task-impls => hotshot-task-impls}/src/request.rs (100%) rename {task-impls => hotshot-task-impls}/src/response.rs (100%) rename {task-impls => hotshot-task-impls}/src/rewind.rs (100%) rename {task-impls => hotshot-task-impls}/src/transactions.rs (100%) rename {task-impls => hotshot-task-impls}/src/upgrade.rs (100%) rename {task-impls => hotshot-task-impls}/src/vid.rs (100%) rename {task-impls => hotshot-task-impls}/src/view_sync.rs (100%) rename {task-impls => hotshot-task-impls}/src/vote_collection.rs (100%) rename {task => hotshot-task}/Cargo.toml (100%) rename {task => hotshot-task}/src/dependency.rs (100%) rename {task => hotshot-task}/src/dependency_task.rs (100%) rename {task => hotshot-task}/src/lib.rs (100%) rename {task => hotshot-task}/src/task.rs (100%) rename {testing => hotshot-testing}/.gitignore (100%) rename {testing => hotshot-testing}/Cargo.toml (100%) rename {testing => hotshot-testing}/README.md (100%) rename {testing => hotshot-testing}/src/block_builder/mod.rs (100%) rename {testing => hotshot-testing}/src/block_builder/random.rs (100%) rename {testing => hotshot-testing}/src/block_builder/simple.rs (100%) rename {testing => hotshot-testing}/src/byzantine/byzantine_behaviour.rs (100%) rename {testing => hotshot-testing}/src/byzantine/mod.rs (100%) rename {testing => hotshot-testing}/src/completion_task.rs (100%) rename {testing => hotshot-testing}/src/consistency_task.rs (100%) rename {testing => hotshot-testing}/src/helpers.rs (100%) rename {testing => hotshot-testing}/src/lib.rs (100%) rename {testing => hotshot-testing}/src/node_ctx.rs (100%) rename {testing => hotshot-testing}/src/overall_safety_task.rs (100%) rename {testing => hotshot-testing}/src/predicates/event.rs (100%) rename {testing => hotshot-testing}/src/predicates/mod.rs (100%) rename {testing => hotshot-testing}/src/predicates/upgrade_with_proposal.rs (100%) rename {testing => hotshot-testing}/src/predicates/upgrade_with_vote.rs (100%) rename {testing => hotshot-testing}/src/script.rs (100%) rename {testing => hotshot-testing}/src/spinning_task.rs (100%) rename {testing => hotshot-testing}/src/test_builder.rs (100%) rename {testing => hotshot-testing}/src/test_helpers.rs (100%) rename {testing => hotshot-testing}/src/test_launcher.rs (100%) rename {testing => hotshot-testing}/src/test_runner.rs (100%) rename {testing => hotshot-testing}/src/test_task.rs (100%) rename {testing => hotshot-testing}/src/txn_task.rs (100%) rename {testing => hotshot-testing}/src/view_generator.rs (100%) rename {testing => hotshot-testing}/src/view_sync_task.rs (100%) rename {testing => hotshot-testing}/tests/tests_1.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/block_builder.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/da_task.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/gen_key_pair.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/libp2p.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/message.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/network_task.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/quorum_proposal_recv_task.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/quorum_proposal_task.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/quorum_vote_task.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/test_success.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/test_with_failures_2.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/transaction_task.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/upgrade_task_with_proposal.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/upgrade_task_with_vote.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/vid_task.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/view_sync_task.rs (100%) rename {testing => hotshot-testing}/tests/tests_1/vote_dependency_handle.rs (100%) rename {testing => hotshot-testing}/tests/tests_2.rs (100%) rename {testing => hotshot-testing}/tests/tests_2/catchup.rs (100%) rename {testing => hotshot-testing}/tests/tests_3.rs (100%) rename {testing => hotshot-testing}/tests/tests_3/byzantine_tests.rs (100%) rename {testing => hotshot-testing}/tests/tests_3/memory_network.rs (100%) rename {testing => hotshot-testing}/tests/tests_3/test_with_failures_half_f.rs (100%) rename {testing => hotshot-testing}/tests/tests_4.rs (100%) rename {testing => hotshot-testing}/tests/tests_4/byzantine_tests.rs (100%) rename {testing => hotshot-testing}/tests/tests_4/test_marketplace.rs (100%) rename {testing => hotshot-testing}/tests/tests_4/test_with_builder_failures.rs (100%) rename {testing => hotshot-testing}/tests/tests_4/test_with_failures_f.rs (100%) rename {testing => hotshot-testing}/tests/tests_5.rs (100%) rename {testing => hotshot-testing}/tests/tests_5/broken_3_chain.rs (100%) rename {testing => hotshot-testing}/tests/tests_5/combined_network.rs (100%) rename {testing => hotshot-testing}/tests/tests_5/fake_solver.rs (100%) rename {testing => hotshot-testing}/tests/tests_5/push_cdn.rs (100%) rename {testing => hotshot-testing}/tests/tests_5/test_with_failures.rs (100%) rename {testing => hotshot-testing}/tests/tests_5/timeout.rs (100%) rename {testing => hotshot-testing}/tests/tests_5/unreliable_network.rs (100%) rename {testing => hotshot-testing}/tests/tests_6.rs (100%) rename {testing => hotshot-testing}/tests/tests_6/test_epochs.rs (100%) rename {types => hotshot-types}/Cargo.toml (100%) rename {types => hotshot-types}/src/bundle.rs (100%) rename {types => hotshot-types}/src/consensus.rs (100%) rename {types => hotshot-types}/src/constants.rs (100%) rename {types => hotshot-types}/src/data.rs (100%) rename {types => hotshot-types}/src/data/vid_disperse.rs (100%) rename {types => hotshot-types}/src/drb.rs (100%) rename {types => hotshot-types}/src/error.rs (100%) rename {types => hotshot-types}/src/event.rs (100%) rename {types => hotshot-types}/src/hotshot_config_file.rs (100%) rename {types => hotshot-types}/src/lib.rs (100%) rename {types => hotshot-types}/src/light_client.rs (100%) rename {types => hotshot-types}/src/message.rs (100%) rename {types => hotshot-types}/src/network.rs (100%) rename {types => hotshot-types}/src/qc.rs (100%) rename {types => hotshot-types}/src/request_response.rs (100%) rename {types => hotshot-types}/src/signature_key.rs (100%) rename {types => hotshot-types}/src/simple_certificate.rs (100%) rename {types => hotshot-types}/src/simple_vote.rs (100%) rename {types => hotshot-types}/src/stake_table.rs (100%) rename {types => hotshot-types}/src/traits.rs (100%) rename {types => hotshot-types}/src/traits/auction_results_provider.rs (100%) rename {types => hotshot-types}/src/traits/block_contents.rs (100%) rename {types => hotshot-types}/src/traits/consensus_api.rs (100%) rename {types => hotshot-types}/src/traits/election.rs (100%) rename {types => hotshot-types}/src/traits/metrics.rs (100%) rename {types => hotshot-types}/src/traits/network.rs (100%) rename {types => hotshot-types}/src/traits/node_implementation.rs (100%) rename {types => hotshot-types}/src/traits/qc.rs (100%) rename {types => hotshot-types}/src/traits/signature_key.rs (100%) rename {types => hotshot-types}/src/traits/stake_table.rs (100%) rename {types => hotshot-types}/src/traits/states.rs (100%) rename {types => hotshot-types}/src/traits/storage.rs (100%) rename {types => hotshot-types}/src/upgrade_config.rs (100%) rename {types => hotshot-types}/src/utils.rs (100%) rename {types => hotshot-types}/src/validator_config.rs (100%) rename {types => hotshot-types}/src/vid.rs (100%) rename {types => hotshot-types}/src/vid/advz.rs (100%) rename {types => hotshot-types}/src/vid/avidm.rs (100%) rename {types => hotshot-types}/src/vote.rs (100%) rename {utils => hotshot-utils}/Cargo.toml (90%) rename {utils => hotshot-utils}/src/anytrace.rs (100%) rename {utils => hotshot-utils}/src/anytrace/macros.rs (100%) rename {utils => hotshot-utils}/src/lib.rs (100%) diff --git a/builder-api/Cargo.toml b/hotshot-builder-api/Cargo.toml similarity index 100% rename from builder-api/Cargo.toml rename to hotshot-builder-api/Cargo.toml diff --git a/builder-api/README.md b/hotshot-builder-api/README.md similarity index 100% rename from builder-api/README.md rename to hotshot-builder-api/README.md diff --git a/builder-api/api/v0_1/builder.toml b/hotshot-builder-api/api/v0_1/builder.toml similarity index 100% rename from builder-api/api/v0_1/builder.toml rename to hotshot-builder-api/api/v0_1/builder.toml diff --git a/builder-api/api/v0_1/submit.toml b/hotshot-builder-api/api/v0_1/submit.toml similarity index 100% rename from builder-api/api/v0_1/submit.toml rename to hotshot-builder-api/api/v0_1/submit.toml diff --git a/builder-api/api/v0_3/builder.toml b/hotshot-builder-api/api/v0_3/builder.toml similarity index 100% rename from builder-api/api/v0_3/builder.toml rename to hotshot-builder-api/api/v0_3/builder.toml diff --git a/builder-api/api/v0_3/submit.toml b/hotshot-builder-api/api/v0_3/submit.toml similarity index 100% rename from builder-api/api/v0_3/submit.toml rename to hotshot-builder-api/api/v0_3/submit.toml diff --git a/builder-api/src/api.rs b/hotshot-builder-api/src/api.rs similarity index 100% rename from builder-api/src/api.rs rename to hotshot-builder-api/src/api.rs diff --git a/builder-api/src/lib.rs b/hotshot-builder-api/src/lib.rs similarity index 100% rename from builder-api/src/lib.rs rename to hotshot-builder-api/src/lib.rs diff --git a/builder-api/src/v0_1/block_info.rs b/hotshot-builder-api/src/v0_1/block_info.rs similarity index 100% rename from builder-api/src/v0_1/block_info.rs rename to hotshot-builder-api/src/v0_1/block_info.rs diff --git a/builder-api/src/v0_1/builder.rs b/hotshot-builder-api/src/v0_1/builder.rs similarity index 100% rename from builder-api/src/v0_1/builder.rs rename to hotshot-builder-api/src/v0_1/builder.rs diff --git a/builder-api/src/v0_1/data_source.rs b/hotshot-builder-api/src/v0_1/data_source.rs similarity index 100% rename from builder-api/src/v0_1/data_source.rs rename to hotshot-builder-api/src/v0_1/data_source.rs diff --git a/builder-api/src/v0_1/mod.rs b/hotshot-builder-api/src/v0_1/mod.rs similarity index 100% rename from builder-api/src/v0_1/mod.rs rename to hotshot-builder-api/src/v0_1/mod.rs diff --git a/builder-api/src/v0_1/query_data.rs b/hotshot-builder-api/src/v0_1/query_data.rs similarity index 100% rename from builder-api/src/v0_1/query_data.rs rename to hotshot-builder-api/src/v0_1/query_data.rs diff --git a/builder-api/src/v0_99/builder.rs b/hotshot-builder-api/src/v0_99/builder.rs similarity index 100% rename from builder-api/src/v0_99/builder.rs rename to hotshot-builder-api/src/v0_99/builder.rs diff --git a/builder-api/src/v0_99/data_source.rs b/hotshot-builder-api/src/v0_99/data_source.rs similarity index 100% rename from builder-api/src/v0_99/data_source.rs rename to hotshot-builder-api/src/v0_99/data_source.rs diff --git a/builder-api/src/v0_99/mod.rs b/hotshot-builder-api/src/v0_99/mod.rs similarity index 100% rename from builder-api/src/v0_99/mod.rs rename to hotshot-builder-api/src/v0_99/mod.rs diff --git a/example-types/Cargo.toml b/hotshot-example-types/Cargo.toml similarity index 100% rename from example-types/Cargo.toml rename to hotshot-example-types/Cargo.toml diff --git a/example-types/src/auction_results_provider_types.rs b/hotshot-example-types/src/auction_results_provider_types.rs similarity index 100% rename from example-types/src/auction_results_provider_types.rs rename to hotshot-example-types/src/auction_results_provider_types.rs diff --git a/example-types/src/block_types.rs b/hotshot-example-types/src/block_types.rs similarity index 100% rename from example-types/src/block_types.rs rename to hotshot-example-types/src/block_types.rs diff --git a/example-types/src/lib.rs b/hotshot-example-types/src/lib.rs similarity index 100% rename from example-types/src/lib.rs rename to hotshot-example-types/src/lib.rs diff --git a/example-types/src/node_types.rs b/hotshot-example-types/src/node_types.rs similarity index 100% rename from example-types/src/node_types.rs rename to hotshot-example-types/src/node_types.rs diff --git a/example-types/src/state_types.rs b/hotshot-example-types/src/state_types.rs similarity index 100% rename from example-types/src/state_types.rs rename to hotshot-example-types/src/state_types.rs diff --git a/example-types/src/storage_types.rs b/hotshot-example-types/src/storage_types.rs similarity index 100% rename from example-types/src/storage_types.rs rename to hotshot-example-types/src/storage_types.rs diff --git a/example-types/src/testable_delay.rs b/hotshot-example-types/src/testable_delay.rs similarity index 100% rename from example-types/src/testable_delay.rs rename to hotshot-example-types/src/testable_delay.rs diff --git a/examples/Cargo.toml b/hotshot-examples/Cargo.toml similarity index 100% rename from examples/Cargo.toml rename to hotshot-examples/Cargo.toml diff --git a/examples/combined/all.rs b/hotshot-examples/combined/all.rs similarity index 100% rename from examples/combined/all.rs rename to hotshot-examples/combined/all.rs diff --git a/examples/combined/multi-validator.rs b/hotshot-examples/combined/multi-validator.rs similarity index 100% rename from examples/combined/multi-validator.rs rename to hotshot-examples/combined/multi-validator.rs diff --git a/examples/combined/orchestrator.rs b/hotshot-examples/combined/orchestrator.rs similarity index 100% rename from examples/combined/orchestrator.rs rename to hotshot-examples/combined/orchestrator.rs diff --git a/examples/combined/types.rs b/hotshot-examples/combined/types.rs similarity index 100% rename from examples/combined/types.rs rename to hotshot-examples/combined/types.rs diff --git a/examples/combined/validator.rs b/hotshot-examples/combined/validator.rs similarity index 100% rename from examples/combined/validator.rs rename to hotshot-examples/combined/validator.rs diff --git a/examples/infra/mod.rs b/hotshot-examples/infra/mod.rs similarity index 100% rename from examples/infra/mod.rs rename to hotshot-examples/infra/mod.rs diff --git a/examples/libp2p/all.rs b/hotshot-examples/libp2p/all.rs similarity index 100% rename from examples/libp2p/all.rs rename to hotshot-examples/libp2p/all.rs diff --git a/examples/libp2p/multi-validator.rs b/hotshot-examples/libp2p/multi-validator.rs similarity index 100% rename from examples/libp2p/multi-validator.rs rename to hotshot-examples/libp2p/multi-validator.rs diff --git a/examples/libp2p/types.rs b/hotshot-examples/libp2p/types.rs similarity index 100% rename from examples/libp2p/types.rs rename to hotshot-examples/libp2p/types.rs diff --git a/examples/libp2p/validator.rs b/hotshot-examples/libp2p/validator.rs similarity index 100% rename from examples/libp2p/validator.rs rename to hotshot-examples/libp2p/validator.rs diff --git a/examples/orchestrator.rs b/hotshot-examples/orchestrator.rs similarity index 100% rename from examples/orchestrator.rs rename to hotshot-examples/orchestrator.rs diff --git a/examples/push-cdn/README.md b/hotshot-examples/push-cdn/README.md similarity index 100% rename from examples/push-cdn/README.md rename to hotshot-examples/push-cdn/README.md diff --git a/examples/push-cdn/all.rs b/hotshot-examples/push-cdn/all.rs similarity index 100% rename from examples/push-cdn/all.rs rename to hotshot-examples/push-cdn/all.rs diff --git a/examples/push-cdn/broker.rs b/hotshot-examples/push-cdn/broker.rs similarity index 100% rename from examples/push-cdn/broker.rs rename to hotshot-examples/push-cdn/broker.rs diff --git a/examples/push-cdn/marshal.rs b/hotshot-examples/push-cdn/marshal.rs similarity index 100% rename from examples/push-cdn/marshal.rs rename to hotshot-examples/push-cdn/marshal.rs diff --git a/examples/push-cdn/multi-validator.rs b/hotshot-examples/push-cdn/multi-validator.rs similarity index 100% rename from examples/push-cdn/multi-validator.rs rename to hotshot-examples/push-cdn/multi-validator.rs diff --git a/examples/push-cdn/types.rs b/hotshot-examples/push-cdn/types.rs similarity index 100% rename from examples/push-cdn/types.rs rename to hotshot-examples/push-cdn/types.rs diff --git a/examples/push-cdn/validator.rs b/hotshot-examples/push-cdn/validator.rs similarity index 100% rename from examples/push-cdn/validator.rs rename to hotshot-examples/push-cdn/validator.rs diff --git a/examples/push-cdn/whitelist-adapter.rs b/hotshot-examples/push-cdn/whitelist-adapter.rs similarity index 100% rename from examples/push-cdn/whitelist-adapter.rs rename to hotshot-examples/push-cdn/whitelist-adapter.rs diff --git a/fakeapi/Cargo.toml b/hotshot-fakeapi/Cargo.toml similarity index 100% rename from fakeapi/Cargo.toml rename to hotshot-fakeapi/Cargo.toml diff --git a/fakeapi/apis/solver.toml b/hotshot-fakeapi/apis/solver.toml similarity index 100% rename from fakeapi/apis/solver.toml rename to hotshot-fakeapi/apis/solver.toml diff --git a/fakeapi/src/fake_solver.rs b/hotshot-fakeapi/src/fake_solver.rs similarity index 100% rename from fakeapi/src/fake_solver.rs rename to hotshot-fakeapi/src/fake_solver.rs diff --git a/fakeapi/src/lib.rs b/hotshot-fakeapi/src/lib.rs similarity index 100% rename from fakeapi/src/lib.rs rename to hotshot-fakeapi/src/lib.rs diff --git a/libp2p-networking/.cargo/config b/hotshot-libp2p-networking/.cargo/config similarity index 100% rename from libp2p-networking/.cargo/config rename to hotshot-libp2p-networking/.cargo/config diff --git a/libp2p-networking/.gitignore b/hotshot-libp2p-networking/.gitignore similarity index 100% rename from libp2p-networking/.gitignore rename to hotshot-libp2p-networking/.gitignore diff --git a/libp2p-networking/Cargo.toml b/hotshot-libp2p-networking/Cargo.toml similarity index 97% rename from libp2p-networking/Cargo.toml rename to hotshot-libp2p-networking/Cargo.toml index a7f684a67d..5ba973a693 100644 --- a/libp2p-networking/Cargo.toml +++ b/hotshot-libp2p-networking/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "Libp2p Networking Layer" -name = "libp2p-networking" +name = "hotshot-libp2p-networking" version = { workspace = true } edition = { workspace = true } authors = { workspace = true } diff --git a/libp2p-networking/flamegraph.sh b/hotshot-libp2p-networking/flamegraph.sh similarity index 100% rename from libp2p-networking/flamegraph.sh rename to hotshot-libp2p-networking/flamegraph.sh diff --git a/libp2p-networking/src/lib.rs b/hotshot-libp2p-networking/src/lib.rs similarity index 100% rename from libp2p-networking/src/lib.rs rename to hotshot-libp2p-networking/src/lib.rs diff --git a/libp2p-networking/src/network/behaviours/dht/bootstrap.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs similarity index 100% rename from libp2p-networking/src/network/behaviours/dht/bootstrap.rs rename to hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs diff --git a/libp2p-networking/src/network/behaviours/dht/mod.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs similarity index 100% rename from libp2p-networking/src/network/behaviours/dht/mod.rs rename to hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs diff --git a/libp2p-networking/src/network/behaviours/dht/record.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/record.rs similarity index 100% rename from libp2p-networking/src/network/behaviours/dht/record.rs rename to hotshot-libp2p-networking/src/network/behaviours/dht/record.rs diff --git a/libp2p-networking/src/network/behaviours/dht/store/mod.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/store/mod.rs similarity index 100% rename from libp2p-networking/src/network/behaviours/dht/store/mod.rs rename to hotshot-libp2p-networking/src/network/behaviours/dht/store/mod.rs diff --git a/libp2p-networking/src/network/behaviours/dht/store/persistent.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs similarity index 100% rename from libp2p-networking/src/network/behaviours/dht/store/persistent.rs rename to hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs diff --git a/libp2p-networking/src/network/behaviours/dht/store/validated.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/store/validated.rs similarity index 100% rename from libp2p-networking/src/network/behaviours/dht/store/validated.rs rename to hotshot-libp2p-networking/src/network/behaviours/dht/store/validated.rs diff --git a/libp2p-networking/src/network/behaviours/direct_message.rs b/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs similarity index 100% rename from libp2p-networking/src/network/behaviours/direct_message.rs rename to hotshot-libp2p-networking/src/network/behaviours/direct_message.rs diff --git a/libp2p-networking/src/network/behaviours/exponential_backoff.rs b/hotshot-libp2p-networking/src/network/behaviours/exponential_backoff.rs similarity index 100% rename from libp2p-networking/src/network/behaviours/exponential_backoff.rs rename to hotshot-libp2p-networking/src/network/behaviours/exponential_backoff.rs diff --git a/libp2p-networking/src/network/behaviours/mod.rs b/hotshot-libp2p-networking/src/network/behaviours/mod.rs similarity index 100% rename from libp2p-networking/src/network/behaviours/mod.rs rename to hotshot-libp2p-networking/src/network/behaviours/mod.rs diff --git a/libp2p-networking/src/network/cbor.rs b/hotshot-libp2p-networking/src/network/cbor.rs similarity index 100% rename from libp2p-networking/src/network/cbor.rs rename to hotshot-libp2p-networking/src/network/cbor.rs diff --git a/libp2p-networking/src/network/def.rs b/hotshot-libp2p-networking/src/network/def.rs similarity index 100% rename from libp2p-networking/src/network/def.rs rename to hotshot-libp2p-networking/src/network/def.rs diff --git a/libp2p-networking/src/network/mod.rs b/hotshot-libp2p-networking/src/network/mod.rs similarity index 100% rename from libp2p-networking/src/network/mod.rs rename to hotshot-libp2p-networking/src/network/mod.rs diff --git a/libp2p-networking/src/network/node.rs b/hotshot-libp2p-networking/src/network/node.rs similarity index 100% rename from libp2p-networking/src/network/node.rs rename to hotshot-libp2p-networking/src/network/node.rs diff --git a/libp2p-networking/src/network/node/config.rs b/hotshot-libp2p-networking/src/network/node/config.rs similarity index 100% rename from libp2p-networking/src/network/node/config.rs rename to hotshot-libp2p-networking/src/network/node/config.rs diff --git a/libp2p-networking/src/network/node/handle.rs b/hotshot-libp2p-networking/src/network/node/handle.rs similarity index 100% rename from libp2p-networking/src/network/node/handle.rs rename to hotshot-libp2p-networking/src/network/node/handle.rs diff --git a/libp2p-networking/src/network/transport.rs b/hotshot-libp2p-networking/src/network/transport.rs similarity index 100% rename from libp2p-networking/src/network/transport.rs rename to hotshot-libp2p-networking/src/network/transport.rs diff --git a/libp2p-networking/web/index.html b/hotshot-libp2p-networking/web/index.html similarity index 100% rename from libp2p-networking/web/index.html rename to hotshot-libp2p-networking/web/index.html diff --git a/macros/Cargo.toml b/hotshot-macros/Cargo.toml similarity index 100% rename from macros/Cargo.toml rename to hotshot-macros/Cargo.toml diff --git a/macros/src/lib.rs b/hotshot-macros/src/lib.rs similarity index 100% rename from macros/src/lib.rs rename to hotshot-macros/src/lib.rs diff --git a/orchestrator/Cargo.toml b/hotshot-orchestrator/Cargo.toml similarity index 100% rename from orchestrator/Cargo.toml rename to hotshot-orchestrator/Cargo.toml diff --git a/orchestrator/README.md b/hotshot-orchestrator/README.md similarity index 100% rename from orchestrator/README.md rename to hotshot-orchestrator/README.md diff --git a/orchestrator/api.toml b/hotshot-orchestrator/api.toml similarity index 100% rename from orchestrator/api.toml rename to hotshot-orchestrator/api.toml diff --git a/orchestrator/run-config.toml b/hotshot-orchestrator/run-config.toml similarity index 100% rename from orchestrator/run-config.toml rename to hotshot-orchestrator/run-config.toml diff --git a/orchestrator/src/client.rs b/hotshot-orchestrator/src/client.rs similarity index 100% rename from orchestrator/src/client.rs rename to hotshot-orchestrator/src/client.rs diff --git a/orchestrator/src/lib.rs b/hotshot-orchestrator/src/lib.rs similarity index 100% rename from orchestrator/src/lib.rs rename to hotshot-orchestrator/src/lib.rs diff --git a/orchestrator/staging-config.toml b/hotshot-orchestrator/staging-config.toml similarity index 100% rename from orchestrator/staging-config.toml rename to hotshot-orchestrator/staging-config.toml diff --git a/task-impls/Cargo.toml b/hotshot-task-impls/Cargo.toml similarity index 100% rename from task-impls/Cargo.toml rename to hotshot-task-impls/Cargo.toml diff --git a/task-impls/HotShot_event_architecture.drawio b/hotshot-task-impls/HotShot_event_architecture.drawio similarity index 100% rename from task-impls/HotShot_event_architecture.drawio rename to hotshot-task-impls/HotShot_event_architecture.drawio diff --git a/task-impls/HotShot_event_architecture.png b/hotshot-task-impls/HotShot_event_architecture.png similarity index 100% rename from task-impls/HotShot_event_architecture.png rename to hotshot-task-impls/HotShot_event_architecture.png diff --git a/task-impls/README.md b/hotshot-task-impls/README.md similarity index 100% rename from task-impls/README.md rename to hotshot-task-impls/README.md diff --git a/task-impls/src/builder.rs b/hotshot-task-impls/src/builder.rs similarity index 100% rename from task-impls/src/builder.rs rename to hotshot-task-impls/src/builder.rs diff --git a/task-impls/src/consensus/handlers.rs b/hotshot-task-impls/src/consensus/handlers.rs similarity index 100% rename from task-impls/src/consensus/handlers.rs rename to hotshot-task-impls/src/consensus/handlers.rs diff --git a/task-impls/src/consensus/mod.rs b/hotshot-task-impls/src/consensus/mod.rs similarity index 100% rename from task-impls/src/consensus/mod.rs rename to hotshot-task-impls/src/consensus/mod.rs diff --git a/task-impls/src/da.rs b/hotshot-task-impls/src/da.rs similarity index 100% rename from task-impls/src/da.rs rename to hotshot-task-impls/src/da.rs diff --git a/task-impls/src/events.rs b/hotshot-task-impls/src/events.rs similarity index 100% rename from task-impls/src/events.rs rename to hotshot-task-impls/src/events.rs diff --git a/task-impls/src/harness.rs b/hotshot-task-impls/src/harness.rs similarity index 100% rename from task-impls/src/harness.rs rename to hotshot-task-impls/src/harness.rs diff --git a/task-impls/src/helpers.rs b/hotshot-task-impls/src/helpers.rs similarity index 100% rename from task-impls/src/helpers.rs rename to hotshot-task-impls/src/helpers.rs diff --git a/task-impls/src/lib.rs b/hotshot-task-impls/src/lib.rs similarity index 100% rename from task-impls/src/lib.rs rename to hotshot-task-impls/src/lib.rs diff --git a/task-impls/src/network.rs b/hotshot-task-impls/src/network.rs similarity index 100% rename from task-impls/src/network.rs rename to hotshot-task-impls/src/network.rs diff --git a/task-impls/src/quorum_proposal/handlers.rs b/hotshot-task-impls/src/quorum_proposal/handlers.rs similarity index 100% rename from task-impls/src/quorum_proposal/handlers.rs rename to hotshot-task-impls/src/quorum_proposal/handlers.rs diff --git a/task-impls/src/quorum_proposal/mod.rs b/hotshot-task-impls/src/quorum_proposal/mod.rs similarity index 100% rename from task-impls/src/quorum_proposal/mod.rs rename to hotshot-task-impls/src/quorum_proposal/mod.rs diff --git a/task-impls/src/quorum_proposal_recv/handlers.rs b/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs similarity index 100% rename from task-impls/src/quorum_proposal_recv/handlers.rs rename to hotshot-task-impls/src/quorum_proposal_recv/handlers.rs diff --git a/task-impls/src/quorum_proposal_recv/mod.rs b/hotshot-task-impls/src/quorum_proposal_recv/mod.rs similarity index 100% rename from task-impls/src/quorum_proposal_recv/mod.rs rename to hotshot-task-impls/src/quorum_proposal_recv/mod.rs diff --git a/task-impls/src/quorum_vote/handlers.rs b/hotshot-task-impls/src/quorum_vote/handlers.rs similarity index 100% rename from task-impls/src/quorum_vote/handlers.rs rename to hotshot-task-impls/src/quorum_vote/handlers.rs diff --git a/task-impls/src/quorum_vote/mod.rs b/hotshot-task-impls/src/quorum_vote/mod.rs similarity index 100% rename from task-impls/src/quorum_vote/mod.rs rename to hotshot-task-impls/src/quorum_vote/mod.rs diff --git a/task-impls/src/request.rs b/hotshot-task-impls/src/request.rs similarity index 100% rename from task-impls/src/request.rs rename to hotshot-task-impls/src/request.rs diff --git a/task-impls/src/response.rs b/hotshot-task-impls/src/response.rs similarity index 100% rename from task-impls/src/response.rs rename to hotshot-task-impls/src/response.rs diff --git a/task-impls/src/rewind.rs b/hotshot-task-impls/src/rewind.rs similarity index 100% rename from task-impls/src/rewind.rs rename to hotshot-task-impls/src/rewind.rs diff --git a/task-impls/src/transactions.rs b/hotshot-task-impls/src/transactions.rs similarity index 100% rename from task-impls/src/transactions.rs rename to hotshot-task-impls/src/transactions.rs diff --git a/task-impls/src/upgrade.rs b/hotshot-task-impls/src/upgrade.rs similarity index 100% rename from task-impls/src/upgrade.rs rename to hotshot-task-impls/src/upgrade.rs diff --git a/task-impls/src/vid.rs b/hotshot-task-impls/src/vid.rs similarity index 100% rename from task-impls/src/vid.rs rename to hotshot-task-impls/src/vid.rs diff --git a/task-impls/src/view_sync.rs b/hotshot-task-impls/src/view_sync.rs similarity index 100% rename from task-impls/src/view_sync.rs rename to hotshot-task-impls/src/view_sync.rs diff --git a/task-impls/src/vote_collection.rs b/hotshot-task-impls/src/vote_collection.rs similarity index 100% rename from task-impls/src/vote_collection.rs rename to hotshot-task-impls/src/vote_collection.rs diff --git a/task/Cargo.toml b/hotshot-task/Cargo.toml similarity index 100% rename from task/Cargo.toml rename to hotshot-task/Cargo.toml diff --git a/task/src/dependency.rs b/hotshot-task/src/dependency.rs similarity index 100% rename from task/src/dependency.rs rename to hotshot-task/src/dependency.rs diff --git a/task/src/dependency_task.rs b/hotshot-task/src/dependency_task.rs similarity index 100% rename from task/src/dependency_task.rs rename to hotshot-task/src/dependency_task.rs diff --git a/task/src/lib.rs b/hotshot-task/src/lib.rs similarity index 100% rename from task/src/lib.rs rename to hotshot-task/src/lib.rs diff --git a/task/src/task.rs b/hotshot-task/src/task.rs similarity index 100% rename from task/src/task.rs rename to hotshot-task/src/task.rs diff --git a/testing/.gitignore b/hotshot-testing/.gitignore similarity index 100% rename from testing/.gitignore rename to hotshot-testing/.gitignore diff --git a/testing/Cargo.toml b/hotshot-testing/Cargo.toml similarity index 100% rename from testing/Cargo.toml rename to hotshot-testing/Cargo.toml diff --git a/testing/README.md b/hotshot-testing/README.md similarity index 100% rename from testing/README.md rename to hotshot-testing/README.md diff --git a/testing/src/block_builder/mod.rs b/hotshot-testing/src/block_builder/mod.rs similarity index 100% rename from testing/src/block_builder/mod.rs rename to hotshot-testing/src/block_builder/mod.rs diff --git a/testing/src/block_builder/random.rs b/hotshot-testing/src/block_builder/random.rs similarity index 100% rename from testing/src/block_builder/random.rs rename to hotshot-testing/src/block_builder/random.rs diff --git a/testing/src/block_builder/simple.rs b/hotshot-testing/src/block_builder/simple.rs similarity index 100% rename from testing/src/block_builder/simple.rs rename to hotshot-testing/src/block_builder/simple.rs diff --git a/testing/src/byzantine/byzantine_behaviour.rs b/hotshot-testing/src/byzantine/byzantine_behaviour.rs similarity index 100% rename from testing/src/byzantine/byzantine_behaviour.rs rename to hotshot-testing/src/byzantine/byzantine_behaviour.rs diff --git a/testing/src/byzantine/mod.rs b/hotshot-testing/src/byzantine/mod.rs similarity index 100% rename from testing/src/byzantine/mod.rs rename to hotshot-testing/src/byzantine/mod.rs diff --git a/testing/src/completion_task.rs b/hotshot-testing/src/completion_task.rs similarity index 100% rename from testing/src/completion_task.rs rename to hotshot-testing/src/completion_task.rs diff --git a/testing/src/consistency_task.rs b/hotshot-testing/src/consistency_task.rs similarity index 100% rename from testing/src/consistency_task.rs rename to hotshot-testing/src/consistency_task.rs diff --git a/testing/src/helpers.rs b/hotshot-testing/src/helpers.rs similarity index 100% rename from testing/src/helpers.rs rename to hotshot-testing/src/helpers.rs diff --git a/testing/src/lib.rs b/hotshot-testing/src/lib.rs similarity index 100% rename from testing/src/lib.rs rename to hotshot-testing/src/lib.rs diff --git a/testing/src/node_ctx.rs b/hotshot-testing/src/node_ctx.rs similarity index 100% rename from testing/src/node_ctx.rs rename to hotshot-testing/src/node_ctx.rs diff --git a/testing/src/overall_safety_task.rs b/hotshot-testing/src/overall_safety_task.rs similarity index 100% rename from testing/src/overall_safety_task.rs rename to hotshot-testing/src/overall_safety_task.rs diff --git a/testing/src/predicates/event.rs b/hotshot-testing/src/predicates/event.rs similarity index 100% rename from testing/src/predicates/event.rs rename to hotshot-testing/src/predicates/event.rs diff --git a/testing/src/predicates/mod.rs b/hotshot-testing/src/predicates/mod.rs similarity index 100% rename from testing/src/predicates/mod.rs rename to hotshot-testing/src/predicates/mod.rs diff --git a/testing/src/predicates/upgrade_with_proposal.rs b/hotshot-testing/src/predicates/upgrade_with_proposal.rs similarity index 100% rename from testing/src/predicates/upgrade_with_proposal.rs rename to hotshot-testing/src/predicates/upgrade_with_proposal.rs diff --git a/testing/src/predicates/upgrade_with_vote.rs b/hotshot-testing/src/predicates/upgrade_with_vote.rs similarity index 100% rename from testing/src/predicates/upgrade_with_vote.rs rename to hotshot-testing/src/predicates/upgrade_with_vote.rs diff --git a/testing/src/script.rs b/hotshot-testing/src/script.rs similarity index 100% rename from testing/src/script.rs rename to hotshot-testing/src/script.rs diff --git a/testing/src/spinning_task.rs b/hotshot-testing/src/spinning_task.rs similarity index 100% rename from testing/src/spinning_task.rs rename to hotshot-testing/src/spinning_task.rs diff --git a/testing/src/test_builder.rs b/hotshot-testing/src/test_builder.rs similarity index 100% rename from testing/src/test_builder.rs rename to hotshot-testing/src/test_builder.rs diff --git a/testing/src/test_helpers.rs b/hotshot-testing/src/test_helpers.rs similarity index 100% rename from testing/src/test_helpers.rs rename to hotshot-testing/src/test_helpers.rs diff --git a/testing/src/test_launcher.rs b/hotshot-testing/src/test_launcher.rs similarity index 100% rename from testing/src/test_launcher.rs rename to hotshot-testing/src/test_launcher.rs diff --git a/testing/src/test_runner.rs b/hotshot-testing/src/test_runner.rs similarity index 100% rename from testing/src/test_runner.rs rename to hotshot-testing/src/test_runner.rs diff --git a/testing/src/test_task.rs b/hotshot-testing/src/test_task.rs similarity index 100% rename from testing/src/test_task.rs rename to hotshot-testing/src/test_task.rs diff --git a/testing/src/txn_task.rs b/hotshot-testing/src/txn_task.rs similarity index 100% rename from testing/src/txn_task.rs rename to hotshot-testing/src/txn_task.rs diff --git a/testing/src/view_generator.rs b/hotshot-testing/src/view_generator.rs similarity index 100% rename from testing/src/view_generator.rs rename to hotshot-testing/src/view_generator.rs diff --git a/testing/src/view_sync_task.rs b/hotshot-testing/src/view_sync_task.rs similarity index 100% rename from testing/src/view_sync_task.rs rename to hotshot-testing/src/view_sync_task.rs diff --git a/testing/tests/tests_1.rs b/hotshot-testing/tests/tests_1.rs similarity index 100% rename from testing/tests/tests_1.rs rename to hotshot-testing/tests/tests_1.rs diff --git a/testing/tests/tests_1/block_builder.rs b/hotshot-testing/tests/tests_1/block_builder.rs similarity index 100% rename from testing/tests/tests_1/block_builder.rs rename to hotshot-testing/tests/tests_1/block_builder.rs diff --git a/testing/tests/tests_1/da_task.rs b/hotshot-testing/tests/tests_1/da_task.rs similarity index 100% rename from testing/tests/tests_1/da_task.rs rename to hotshot-testing/tests/tests_1/da_task.rs diff --git a/testing/tests/tests_1/gen_key_pair.rs b/hotshot-testing/tests/tests_1/gen_key_pair.rs similarity index 100% rename from testing/tests/tests_1/gen_key_pair.rs rename to hotshot-testing/tests/tests_1/gen_key_pair.rs diff --git a/testing/tests/tests_1/libp2p.rs b/hotshot-testing/tests/tests_1/libp2p.rs similarity index 100% rename from testing/tests/tests_1/libp2p.rs rename to hotshot-testing/tests/tests_1/libp2p.rs diff --git a/testing/tests/tests_1/message.rs b/hotshot-testing/tests/tests_1/message.rs similarity index 100% rename from testing/tests/tests_1/message.rs rename to hotshot-testing/tests/tests_1/message.rs diff --git a/testing/tests/tests_1/network_task.rs b/hotshot-testing/tests/tests_1/network_task.rs similarity index 100% rename from testing/tests/tests_1/network_task.rs rename to hotshot-testing/tests/tests_1/network_task.rs diff --git a/testing/tests/tests_1/quorum_proposal_recv_task.rs b/hotshot-testing/tests/tests_1/quorum_proposal_recv_task.rs similarity index 100% rename from testing/tests/tests_1/quorum_proposal_recv_task.rs rename to hotshot-testing/tests/tests_1/quorum_proposal_recv_task.rs diff --git a/testing/tests/tests_1/quorum_proposal_task.rs b/hotshot-testing/tests/tests_1/quorum_proposal_task.rs similarity index 100% rename from testing/tests/tests_1/quorum_proposal_task.rs rename to hotshot-testing/tests/tests_1/quorum_proposal_task.rs diff --git a/testing/tests/tests_1/quorum_vote_task.rs b/hotshot-testing/tests/tests_1/quorum_vote_task.rs similarity index 100% rename from testing/tests/tests_1/quorum_vote_task.rs rename to hotshot-testing/tests/tests_1/quorum_vote_task.rs diff --git a/testing/tests/tests_1/test_success.rs b/hotshot-testing/tests/tests_1/test_success.rs similarity index 100% rename from testing/tests/tests_1/test_success.rs rename to hotshot-testing/tests/tests_1/test_success.rs diff --git a/testing/tests/tests_1/test_with_failures_2.rs b/hotshot-testing/tests/tests_1/test_with_failures_2.rs similarity index 100% rename from testing/tests/tests_1/test_with_failures_2.rs rename to hotshot-testing/tests/tests_1/test_with_failures_2.rs diff --git a/testing/tests/tests_1/transaction_task.rs b/hotshot-testing/tests/tests_1/transaction_task.rs similarity index 100% rename from testing/tests/tests_1/transaction_task.rs rename to hotshot-testing/tests/tests_1/transaction_task.rs diff --git a/testing/tests/tests_1/upgrade_task_with_proposal.rs b/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs similarity index 100% rename from testing/tests/tests_1/upgrade_task_with_proposal.rs rename to hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs diff --git a/testing/tests/tests_1/upgrade_task_with_vote.rs b/hotshot-testing/tests/tests_1/upgrade_task_with_vote.rs similarity index 100% rename from testing/tests/tests_1/upgrade_task_with_vote.rs rename to hotshot-testing/tests/tests_1/upgrade_task_with_vote.rs diff --git a/testing/tests/tests_1/vid_task.rs b/hotshot-testing/tests/tests_1/vid_task.rs similarity index 100% rename from testing/tests/tests_1/vid_task.rs rename to hotshot-testing/tests/tests_1/vid_task.rs diff --git a/testing/tests/tests_1/view_sync_task.rs b/hotshot-testing/tests/tests_1/view_sync_task.rs similarity index 100% rename from testing/tests/tests_1/view_sync_task.rs rename to hotshot-testing/tests/tests_1/view_sync_task.rs diff --git a/testing/tests/tests_1/vote_dependency_handle.rs b/hotshot-testing/tests/tests_1/vote_dependency_handle.rs similarity index 100% rename from testing/tests/tests_1/vote_dependency_handle.rs rename to hotshot-testing/tests/tests_1/vote_dependency_handle.rs diff --git a/testing/tests/tests_2.rs b/hotshot-testing/tests/tests_2.rs similarity index 100% rename from testing/tests/tests_2.rs rename to hotshot-testing/tests/tests_2.rs diff --git a/testing/tests/tests_2/catchup.rs b/hotshot-testing/tests/tests_2/catchup.rs similarity index 100% rename from testing/tests/tests_2/catchup.rs rename to hotshot-testing/tests/tests_2/catchup.rs diff --git a/testing/tests/tests_3.rs b/hotshot-testing/tests/tests_3.rs similarity index 100% rename from testing/tests/tests_3.rs rename to hotshot-testing/tests/tests_3.rs diff --git a/testing/tests/tests_3/byzantine_tests.rs b/hotshot-testing/tests/tests_3/byzantine_tests.rs similarity index 100% rename from testing/tests/tests_3/byzantine_tests.rs rename to hotshot-testing/tests/tests_3/byzantine_tests.rs diff --git a/testing/tests/tests_3/memory_network.rs b/hotshot-testing/tests/tests_3/memory_network.rs similarity index 100% rename from testing/tests/tests_3/memory_network.rs rename to hotshot-testing/tests/tests_3/memory_network.rs diff --git a/testing/tests/tests_3/test_with_failures_half_f.rs b/hotshot-testing/tests/tests_3/test_with_failures_half_f.rs similarity index 100% rename from testing/tests/tests_3/test_with_failures_half_f.rs rename to hotshot-testing/tests/tests_3/test_with_failures_half_f.rs diff --git a/testing/tests/tests_4.rs b/hotshot-testing/tests/tests_4.rs similarity index 100% rename from testing/tests/tests_4.rs rename to hotshot-testing/tests/tests_4.rs diff --git a/testing/tests/tests_4/byzantine_tests.rs b/hotshot-testing/tests/tests_4/byzantine_tests.rs similarity index 100% rename from testing/tests/tests_4/byzantine_tests.rs rename to hotshot-testing/tests/tests_4/byzantine_tests.rs diff --git a/testing/tests/tests_4/test_marketplace.rs b/hotshot-testing/tests/tests_4/test_marketplace.rs similarity index 100% rename from testing/tests/tests_4/test_marketplace.rs rename to hotshot-testing/tests/tests_4/test_marketplace.rs diff --git a/testing/tests/tests_4/test_with_builder_failures.rs b/hotshot-testing/tests/tests_4/test_with_builder_failures.rs similarity index 100% rename from testing/tests/tests_4/test_with_builder_failures.rs rename to hotshot-testing/tests/tests_4/test_with_builder_failures.rs diff --git a/testing/tests/tests_4/test_with_failures_f.rs b/hotshot-testing/tests/tests_4/test_with_failures_f.rs similarity index 100% rename from testing/tests/tests_4/test_with_failures_f.rs rename to hotshot-testing/tests/tests_4/test_with_failures_f.rs diff --git a/testing/tests/tests_5.rs b/hotshot-testing/tests/tests_5.rs similarity index 100% rename from testing/tests/tests_5.rs rename to hotshot-testing/tests/tests_5.rs diff --git a/testing/tests/tests_5/broken_3_chain.rs b/hotshot-testing/tests/tests_5/broken_3_chain.rs similarity index 100% rename from testing/tests/tests_5/broken_3_chain.rs rename to hotshot-testing/tests/tests_5/broken_3_chain.rs diff --git a/testing/tests/tests_5/combined_network.rs b/hotshot-testing/tests/tests_5/combined_network.rs similarity index 100% rename from testing/tests/tests_5/combined_network.rs rename to hotshot-testing/tests/tests_5/combined_network.rs diff --git a/testing/tests/tests_5/fake_solver.rs b/hotshot-testing/tests/tests_5/fake_solver.rs similarity index 100% rename from testing/tests/tests_5/fake_solver.rs rename to hotshot-testing/tests/tests_5/fake_solver.rs diff --git a/testing/tests/tests_5/push_cdn.rs b/hotshot-testing/tests/tests_5/push_cdn.rs similarity index 100% rename from testing/tests/tests_5/push_cdn.rs rename to hotshot-testing/tests/tests_5/push_cdn.rs diff --git a/testing/tests/tests_5/test_with_failures.rs b/hotshot-testing/tests/tests_5/test_with_failures.rs similarity index 100% rename from testing/tests/tests_5/test_with_failures.rs rename to hotshot-testing/tests/tests_5/test_with_failures.rs diff --git a/testing/tests/tests_5/timeout.rs b/hotshot-testing/tests/tests_5/timeout.rs similarity index 100% rename from testing/tests/tests_5/timeout.rs rename to hotshot-testing/tests/tests_5/timeout.rs diff --git a/testing/tests/tests_5/unreliable_network.rs b/hotshot-testing/tests/tests_5/unreliable_network.rs similarity index 100% rename from testing/tests/tests_5/unreliable_network.rs rename to hotshot-testing/tests/tests_5/unreliable_network.rs diff --git a/testing/tests/tests_6.rs b/hotshot-testing/tests/tests_6.rs similarity index 100% rename from testing/tests/tests_6.rs rename to hotshot-testing/tests/tests_6.rs diff --git a/testing/tests/tests_6/test_epochs.rs b/hotshot-testing/tests/tests_6/test_epochs.rs similarity index 100% rename from testing/tests/tests_6/test_epochs.rs rename to hotshot-testing/tests/tests_6/test_epochs.rs diff --git a/types/Cargo.toml b/hotshot-types/Cargo.toml similarity index 100% rename from types/Cargo.toml rename to hotshot-types/Cargo.toml diff --git a/types/src/bundle.rs b/hotshot-types/src/bundle.rs similarity index 100% rename from types/src/bundle.rs rename to hotshot-types/src/bundle.rs diff --git a/types/src/consensus.rs b/hotshot-types/src/consensus.rs similarity index 100% rename from types/src/consensus.rs rename to hotshot-types/src/consensus.rs diff --git a/types/src/constants.rs b/hotshot-types/src/constants.rs similarity index 100% rename from types/src/constants.rs rename to hotshot-types/src/constants.rs diff --git a/types/src/data.rs b/hotshot-types/src/data.rs similarity index 100% rename from types/src/data.rs rename to hotshot-types/src/data.rs diff --git a/types/src/data/vid_disperse.rs b/hotshot-types/src/data/vid_disperse.rs similarity index 100% rename from types/src/data/vid_disperse.rs rename to hotshot-types/src/data/vid_disperse.rs diff --git a/types/src/drb.rs b/hotshot-types/src/drb.rs similarity index 100% rename from types/src/drb.rs rename to hotshot-types/src/drb.rs diff --git a/types/src/error.rs b/hotshot-types/src/error.rs similarity index 100% rename from types/src/error.rs rename to hotshot-types/src/error.rs diff --git a/types/src/event.rs b/hotshot-types/src/event.rs similarity index 100% rename from types/src/event.rs rename to hotshot-types/src/event.rs diff --git a/types/src/hotshot_config_file.rs b/hotshot-types/src/hotshot_config_file.rs similarity index 100% rename from types/src/hotshot_config_file.rs rename to hotshot-types/src/hotshot_config_file.rs diff --git a/types/src/lib.rs b/hotshot-types/src/lib.rs similarity index 100% rename from types/src/lib.rs rename to hotshot-types/src/lib.rs diff --git a/types/src/light_client.rs b/hotshot-types/src/light_client.rs similarity index 100% rename from types/src/light_client.rs rename to hotshot-types/src/light_client.rs diff --git a/types/src/message.rs b/hotshot-types/src/message.rs similarity index 100% rename from types/src/message.rs rename to hotshot-types/src/message.rs diff --git a/types/src/network.rs b/hotshot-types/src/network.rs similarity index 100% rename from types/src/network.rs rename to hotshot-types/src/network.rs diff --git a/types/src/qc.rs b/hotshot-types/src/qc.rs similarity index 100% rename from types/src/qc.rs rename to hotshot-types/src/qc.rs diff --git a/types/src/request_response.rs b/hotshot-types/src/request_response.rs similarity index 100% rename from types/src/request_response.rs rename to hotshot-types/src/request_response.rs diff --git a/types/src/signature_key.rs b/hotshot-types/src/signature_key.rs similarity index 100% rename from types/src/signature_key.rs rename to hotshot-types/src/signature_key.rs diff --git a/types/src/simple_certificate.rs b/hotshot-types/src/simple_certificate.rs similarity index 100% rename from types/src/simple_certificate.rs rename to hotshot-types/src/simple_certificate.rs diff --git a/types/src/simple_vote.rs b/hotshot-types/src/simple_vote.rs similarity index 100% rename from types/src/simple_vote.rs rename to hotshot-types/src/simple_vote.rs diff --git a/types/src/stake_table.rs b/hotshot-types/src/stake_table.rs similarity index 100% rename from types/src/stake_table.rs rename to hotshot-types/src/stake_table.rs diff --git a/types/src/traits.rs b/hotshot-types/src/traits.rs similarity index 100% rename from types/src/traits.rs rename to hotshot-types/src/traits.rs diff --git a/types/src/traits/auction_results_provider.rs b/hotshot-types/src/traits/auction_results_provider.rs similarity index 100% rename from types/src/traits/auction_results_provider.rs rename to hotshot-types/src/traits/auction_results_provider.rs diff --git a/types/src/traits/block_contents.rs b/hotshot-types/src/traits/block_contents.rs similarity index 100% rename from types/src/traits/block_contents.rs rename to hotshot-types/src/traits/block_contents.rs diff --git a/types/src/traits/consensus_api.rs b/hotshot-types/src/traits/consensus_api.rs similarity index 100% rename from types/src/traits/consensus_api.rs rename to hotshot-types/src/traits/consensus_api.rs diff --git a/types/src/traits/election.rs b/hotshot-types/src/traits/election.rs similarity index 100% rename from types/src/traits/election.rs rename to hotshot-types/src/traits/election.rs diff --git a/types/src/traits/metrics.rs b/hotshot-types/src/traits/metrics.rs similarity index 100% rename from types/src/traits/metrics.rs rename to hotshot-types/src/traits/metrics.rs diff --git a/types/src/traits/network.rs b/hotshot-types/src/traits/network.rs similarity index 100% rename from types/src/traits/network.rs rename to hotshot-types/src/traits/network.rs diff --git a/types/src/traits/node_implementation.rs b/hotshot-types/src/traits/node_implementation.rs similarity index 100% rename from types/src/traits/node_implementation.rs rename to hotshot-types/src/traits/node_implementation.rs diff --git a/types/src/traits/qc.rs b/hotshot-types/src/traits/qc.rs similarity index 100% rename from types/src/traits/qc.rs rename to hotshot-types/src/traits/qc.rs diff --git a/types/src/traits/signature_key.rs b/hotshot-types/src/traits/signature_key.rs similarity index 100% rename from types/src/traits/signature_key.rs rename to hotshot-types/src/traits/signature_key.rs diff --git a/types/src/traits/stake_table.rs b/hotshot-types/src/traits/stake_table.rs similarity index 100% rename from types/src/traits/stake_table.rs rename to hotshot-types/src/traits/stake_table.rs diff --git a/types/src/traits/states.rs b/hotshot-types/src/traits/states.rs similarity index 100% rename from types/src/traits/states.rs rename to hotshot-types/src/traits/states.rs diff --git a/types/src/traits/storage.rs b/hotshot-types/src/traits/storage.rs similarity index 100% rename from types/src/traits/storage.rs rename to hotshot-types/src/traits/storage.rs diff --git a/types/src/upgrade_config.rs b/hotshot-types/src/upgrade_config.rs similarity index 100% rename from types/src/upgrade_config.rs rename to hotshot-types/src/upgrade_config.rs diff --git a/types/src/utils.rs b/hotshot-types/src/utils.rs similarity index 100% rename from types/src/utils.rs rename to hotshot-types/src/utils.rs diff --git a/types/src/validator_config.rs b/hotshot-types/src/validator_config.rs similarity index 100% rename from types/src/validator_config.rs rename to hotshot-types/src/validator_config.rs diff --git a/types/src/vid.rs b/hotshot-types/src/vid.rs similarity index 100% rename from types/src/vid.rs rename to hotshot-types/src/vid.rs diff --git a/types/src/vid/advz.rs b/hotshot-types/src/vid/advz.rs similarity index 100% rename from types/src/vid/advz.rs rename to hotshot-types/src/vid/advz.rs diff --git a/types/src/vid/avidm.rs b/hotshot-types/src/vid/avidm.rs similarity index 100% rename from types/src/vid/avidm.rs rename to hotshot-types/src/vid/avidm.rs diff --git a/types/src/vote.rs b/hotshot-types/src/vote.rs similarity index 100% rename from types/src/vote.rs rename to hotshot-types/src/vote.rs diff --git a/utils/Cargo.toml b/hotshot-utils/Cargo.toml similarity index 90% rename from utils/Cargo.toml rename to hotshot-utils/Cargo.toml index 16a70a6467..9a210f265b 100644 --- a/utils/Cargo.toml +++ b/hotshot-utils/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "utils" +name = "hotshot-utils" version = { workspace = true } edition = { workspace = true } description = "Utils" diff --git a/utils/src/anytrace.rs b/hotshot-utils/src/anytrace.rs similarity index 100% rename from utils/src/anytrace.rs rename to hotshot-utils/src/anytrace.rs diff --git a/utils/src/anytrace/macros.rs b/hotshot-utils/src/anytrace/macros.rs similarity index 100% rename from utils/src/anytrace/macros.rs rename to hotshot-utils/src/anytrace/macros.rs diff --git a/utils/src/lib.rs b/hotshot-utils/src/lib.rs similarity index 100% rename from utils/src/lib.rs rename to hotshot-utils/src/lib.rs From caf7d33c0fb881b6c33b0667a9132f620c805f6e Mon Sep 17 00:00:00 2001 From: Jarred Parr Date: Fri, 7 Feb 2025 10:21:55 -0700 Subject: [PATCH 1391/1393] lockfile --- sequencer-sqlite/Cargo.lock | 414 +++++++++--------------------------- 1 file changed, 102 insertions(+), 312 deletions(-) diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index dbb0805e83..4b9daaa755 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -2072,40 +2072,17 @@ dependencies = [ "shlex", ] -[[package]] -name = "cdn-broker" -version = "0.4.0" -source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.1-upgrade#849e7edb32788e42738541ba4d5c64d3e061d86d" -dependencies = [ - "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.1-upgrade)", - "clap", - "console-subscriber", - "dashmap", - "derivative", - "jf-signature 0.1.0", - "lazy_static", - "local-ip-address", - "parking_lot", - "portpicker", - "prometheus", - "rand 0.8.5", - "rkyv", - "tokio", - "tracing", - "tracing-subscriber 0.3.19", -] - [[package]] name = "cdn-broker" version = "0.4.0" source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.6#9409763dbcb726e43218c3c4cfde91c7d5de6a52" dependencies = [ - "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.6)", + "cdn-proto", "clap", "console-subscriber", "dashmap", "derivative", - "jf-signature 0.2.0", + "jf-signature", "lazy_static", "local-ip-address", "parking_lot", @@ -2123,10 +2100,10 @@ name = "cdn-client" version = "0.4.0" source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.6#9409763dbcb726e43218c3c4cfde91c7d5de6a52" dependencies = [ - "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.6)", + "cdn-proto", "clap", "derive_more 1.0.0", - "jf-signature 0.2.0", + "jf-signature", "parking_lot", "rand 0.8.5", "tokio", @@ -2134,66 +2111,19 @@ dependencies = [ "tracing-subscriber 0.3.19", ] -[[package]] -name = "cdn-marshal" -version = "0.4.0" -source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.1-upgrade#849e7edb32788e42738541ba4d5c64d3e061d86d" -dependencies = [ - "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.1-upgrade)", - "clap", - "jf-signature 0.1.0", - "tokio", - "tracing", - "tracing-subscriber 0.3.19", -] - [[package]] name = "cdn-marshal" version = "0.4.0" source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.6#9409763dbcb726e43218c3c4cfde91c7d5de6a52" dependencies = [ - "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.6)", + "cdn-proto", "clap", - "jf-signature 0.2.0", + "jf-signature", "tokio", "tracing", "tracing-subscriber 0.3.19", ] -[[package]] -name = "cdn-proto" -version = "0.4.0" -source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.1-upgrade#849e7edb32788e42738541ba4d5c64d3e061d86d" -dependencies = [ - "anyhow", - "ark-serialize 0.4.2", - "async-trait", - "capnp", - "capnpc", - "derivative", - "jf-signature 0.1.0", - "kanal", - "lazy_static", - "mnemonic", - "num_enum", - "pem 3.0.4", - "prometheus", - "quinn", - "rand 0.8.5", - "rcgen 0.13.2", - "redis", - "rkyv", - "rustls 0.23.20", - "rustls-pki-types", - "sqlx", - "thiserror 1.0.69", - "tokio", - "tokio-rustls 0.26.1", - "tracing", - "url", - "warp", -] - [[package]] name = "cdn-proto" version = "0.4.0" @@ -2206,7 +2136,7 @@ dependencies = [ "capnpc", "derivative", "derive_more 1.0.0", - "jf-signature 0.2.0", + "jf-signature", "kanal", "lazy_static", "mnemonic", @@ -4652,8 +4582,7 @@ dependencies = [ [[package]] name = "hotshot" -version = "0.5.84" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" +version = "0.1.0" dependencies = [ "anyhow", "async-broadcast", @@ -4662,20 +4591,21 @@ dependencies = [ "bimap", "bincode", "blake3", - "cdn-broker 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.6)", + "cdn-broker", "cdn-client", - "cdn-marshal 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.6)", + "cdn-marshal", "chrono", "committable", "dashmap", "derive_more 1.0.0", "either", "futures", + "hotshot-libp2p-networking", "hotshot-task", "hotshot-task-impls", "hotshot-types", + "hotshot-utils", "libp2p-identity", - "libp2p-networking", "lru 0.12.5", "num_enum", "parking_lot", @@ -4689,7 +4619,6 @@ dependencies = [ "tracing", "tracing-subscriber 0.3.19", "url", - "utils", "vbs", "workspace-hack", ] @@ -4697,7 +4626,6 @@ dependencies = [ [[package]] name = "hotshot-builder-api" version = "0.1.7" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" dependencies = [ "async-trait", "clap", @@ -4707,7 +4635,7 @@ dependencies = [ "hotshot-types", "serde", "tagged-base64", - "thiserror 2.0.10", + "thiserror 1.0.69", "tide-disco", "toml 0.8.19", "vbs", @@ -4764,8 +4692,7 @@ dependencies = [ [[package]] name = "hotshot-example-types" -version = "0.5.84" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" +version = "0.1.0" dependencies = [ "anyhow", "async-lock 3.4.0", @@ -4780,7 +4707,7 @@ dependencies = [ "serde", "sha2 0.10.8", "sha3", - "thiserror 2.0.10", + "thiserror 1.0.69", "time 0.3.37", "tokio", "url", @@ -4790,8 +4717,7 @@ dependencies = [ [[package]] name = "hotshot-fakeapi" -version = "0.5.84" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" +version = "0.1.0" dependencies = [ "anyhow", "async-lock 3.4.0", @@ -4807,10 +4733,37 @@ dependencies = [ "workspace-hack", ] +[[package]] +name = "hotshot-libp2p-networking" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-lock 3.4.0", + "async-trait", + "bincode", + "blake3", + "cbor4ii 1.0.0", + "delegate", + "derive_builder", + "derive_more 1.0.0", + "futures", + "hotshot-types", + "lazy_static", + "libp2p", + "libp2p-identity", + "libp2p-swarm-derive", + "pin-project", + "rand 0.8.5", + "serde", + "tokio", + "tracing", + "tracing-subscriber 0.3.19", + "workspace-hack", +] + [[package]] name = "hotshot-macros" -version = "0.5.84" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" +version = "0.1.0" dependencies = [ "derive_builder", "proc-macro2", @@ -4821,8 +4774,7 @@ dependencies = [ [[package]] name = "hotshot-orchestrator" -version = "0.5.84" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" +version = "0.1.0" dependencies = [ "anyhow", "async-lock 3.4.0", @@ -4894,8 +4846,7 @@ dependencies = [ [[package]] name = "hotshot-stake-table" -version = "0.5.84" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" +version = "0.1.0" dependencies = [ "ark-bn254", "ark-ed-on-bn254", @@ -4906,7 +4857,7 @@ dependencies = [ "hotshot-types", "jf-crhf", "jf-rescue", - "jf-signature 0.2.0", + "jf-signature", "jf-utils", "primitive-types", "serde", @@ -4940,7 +4891,7 @@ dependencies = [ "jf-plonk", "jf-relation", "jf-rescue", - "jf-signature 0.2.0", + "jf-signature", "jf-utils", "reqwest 0.12.12", "sequencer-utils", @@ -4957,22 +4908,20 @@ dependencies = [ [[package]] name = "hotshot-task" -version = "0.5.84" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" +version = "0.1.0" dependencies = [ "async-broadcast", "async-trait", "futures", + "hotshot-utils", "tokio", "tracing", - "utils", "workspace-hack", ] [[package]] name = "hotshot-task-impls" -version = "0.5.84" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" +version = "0.1.0" dependencies = [ "anyhow", "async-broadcast", @@ -4986,6 +4935,7 @@ dependencies = [ "hotshot-builder-api", "hotshot-task", "hotshot-types", + "hotshot-utils", "jf-vid", "lru 0.12.5", "rand 0.8.5", @@ -4993,12 +4943,11 @@ dependencies = [ "sha2 0.10.8", "surf-disco", "tagged-base64", - "thiserror 2.0.10", + "thiserror 1.0.69", "time 0.3.37", "tokio", "tracing", "url", - "utils", "vbs", "vec1", "workspace-hack", @@ -5006,8 +4955,7 @@ dependencies = [ [[package]] name = "hotshot-testing" -version = "0.5.84" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" +version = "0.1.0" dependencies = [ "anyhow", "async-broadcast", @@ -5036,7 +4984,7 @@ dependencies = [ "serde", "sha2 0.10.8", "tagged-base64", - "thiserror 2.0.10", + "thiserror 1.0.69", "tide-disco", "tokio", "tracing", @@ -5049,7 +4997,6 @@ dependencies = [ [[package]] name = "hotshot-types" version = "0.1.11" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" dependencies = [ "anyhow", "ark-bn254", @@ -5071,8 +5018,9 @@ dependencies = [ "dyn-clone", "either", "futures", + "hotshot-utils", "jf-pcs", - "jf-signature 0.2.0", + "jf-signature", "jf-utils", "jf-vid", "lazy_static", @@ -5089,19 +5037,26 @@ dependencies = [ "serde_json", "sha2 0.10.8", "tagged-base64", - "thiserror 2.0.10", + "thiserror 1.0.69", "time 0.3.37", "tokio", "toml 0.8.19", "tracing", "typenum", "url", - "utils", "vbs", "vec1", "workspace-hack", ] +[[package]] +name = "hotshot-utils" +version = "0.1.0" +dependencies = [ + "tracing", + "workspace-hack", +] + [[package]] name = "http" version = "0.2.12" @@ -5942,35 +5897,6 @@ dependencies = [ "jf-utils", ] -[[package]] -name = "jf-signature" -version = "0.1.0" -source = "git+https://github.com/EspressoSystems/jellyfish?tag=0.4.5#7d71dbeff14f1a501b0b0dc391f1dffa1b8374fb" -dependencies = [ - "ark-bls12-381", - "ark-bn254", - "ark-ec", - "ark-ff 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", - "blst", - "derivative", - "digest 0.10.7", - "displaydoc", - "hashbrown 0.14.5", - "itertools 0.12.1", - "jf-crhf", - "jf-relation", - "jf-rescue", - "jf-utils", - "num-bigint", - "num-traits", - "serde", - "sha3", - "tagged-base64", - "zeroize", -] - [[package]] name = "jf-signature" version = "0.2.0" @@ -6202,29 +6128,6 @@ dependencies = [ "libc", ] -[[package]] -name = "libp2p" -version = "0.53.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681fb3f183edfbedd7a57d32ebe5dcdc0b9f94061185acf3c30249349cc6fc99" -dependencies = [ - "bytes 1.9.0", - "either", - "futures", - "futures-timer", - "getrandom 0.2.15", - "instant", - "libp2p-allow-block-list 0.3.0", - "libp2p-connection-limits 0.3.1", - "libp2p-core 0.41.3", - "libp2p-identity", - "libp2p-swarm 0.44.2", - "multiaddr", - "pin-project", - "rw-stream-sink", - "thiserror 1.0.69", -] - [[package]] name = "libp2p" version = "0.54.1" @@ -6236,10 +6139,10 @@ dependencies = [ "futures", "futures-timer", "getrandom 0.2.15", - "libp2p-allow-block-list 0.4.0", + "libp2p-allow-block-list", "libp2p-autonat", - "libp2p-connection-limits 0.4.0", - "libp2p-core 0.42.0", + "libp2p-connection-limits", + "libp2p-core", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -6249,7 +6152,7 @@ dependencies = [ "libp2p-metrics", "libp2p-quic", "libp2p-request-response", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "libp2p-tcp", "libp2p-upnp", "multiaddr", @@ -6258,27 +6161,15 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "libp2p-allow-block-list" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" -dependencies = [ - "libp2p-core 0.41.3", - "libp2p-identity", - "libp2p-swarm 0.44.2", - "void", -] - [[package]] name = "libp2p-allow-block-list" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" dependencies = [ - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "void", ] @@ -6295,10 +6186,10 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-request-response", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", @@ -6309,58 +6200,18 @@ dependencies = [ "web-time", ] -[[package]] -name = "libp2p-connection-limits" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7cd50a78ccfada14de94cbacd3ce4b0138157f376870f13d3a8422cd075b4fd" -dependencies = [ - "libp2p-core 0.41.3", - "libp2p-identity", - "libp2p-swarm 0.44.2", - "void", -] - [[package]] name = "libp2p-connection-limits" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" dependencies = [ - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "void", ] -[[package]] -name = "libp2p-core" -version = "0.41.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5a8920cbd8540059a01950c1e5c96ea8d89eb50c51cd366fc18bdf540a6e48f" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "libp2p-identity", - "multiaddr", - "multihash", - "multistream-select", - "once_cell", - "parking_lot", - "pin-project", - "quick-protobuf", - "rand 0.8.5", - "rw-stream-sink", - "smallvec", - "thiserror 1.0.69", - "tracing", - "unsigned-varint 0.8.0", - "void", - "web-time", -] - [[package]] name = "libp2p-core" version = "0.42.0" @@ -6399,7 +6250,7 @@ dependencies = [ "async-trait", "futures", "hickory-resolver", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "parking_lot", "smallvec", @@ -6422,9 +6273,9 @@ dependencies = [ "futures-ticker", "getrandom 0.2.15", "hex_fmt", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "prometheus-client", "quick-protobuf", "quick-protobuf-codec", @@ -6449,9 +6300,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "lru 0.12.5", "quick-protobuf", "quick-protobuf-codec", @@ -6496,9 +6347,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", @@ -6522,9 +6373,9 @@ dependencies = [ "futures", "hickory-proto", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "rand 0.8.5", "smallvec", "socket2 0.5.8", @@ -6540,46 +6391,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" dependencies = [ "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-gossipsub", "libp2p-identify", "libp2p-identity", "libp2p-kad", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "pin-project", "prometheus-client", "web-time", ] -[[package]] -name = "libp2p-networking" -version = "0.5.84" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" -dependencies = [ - "anyhow", - "async-lock 3.4.0", - "async-trait", - "bincode", - "blake3", - "cbor4ii 1.0.0", - "delegate", - "derive_builder", - "derive_more 1.0.0", - "futures", - "hotshot-types", - "lazy_static", - "libp2p 0.54.1", - "libp2p-identity", - "libp2p-swarm-derive", - "pin-project", - "rand 0.8.5", - "serde", - "tokio", - "tracing", - "tracing-subscriber 0.3.19", - "workspace-hack", -] - [[package]] name = "libp2p-quic" version = "0.11.1" @@ -6590,7 +6412,7 @@ dependencies = [ "futures", "futures-timer", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-tls", "parking_lot", @@ -6615,9 +6437,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "rand 0.8.5", "serde", "smallvec", @@ -6626,28 +6448,6 @@ dependencies = [ "web-time", ] -[[package]] -name = "libp2p-swarm" -version = "0.44.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "libp2p-core 0.41.3", - "libp2p-identity", - "lru 0.12.5", - "multistream-select", - "once_cell", - "rand 0.8.5", - "smallvec", - "tracing", - "void", -] - [[package]] name = "libp2p-swarm" version = "0.45.1" @@ -6658,7 +6458,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", "lru 0.12.5", @@ -6694,7 +6494,7 @@ dependencies = [ "futures-timer", "if-watch", "libc", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "socket2 0.5.8", "tokio", @@ -6709,7 +6509,7 @@ checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "rcgen 0.11.3", "ring 0.17.8", @@ -6729,8 +6529,8 @@ dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core 0.42.0", - "libp2p-swarm 0.45.1", + "libp2p-core", + "libp2p-swarm", "tokio", "tracing", "void", @@ -9324,8 +9124,8 @@ dependencies = [ "async-once-cell", "async-trait", "bincode", - "cdn-broker 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.1-upgrade)", - "cdn-marshal 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.1-upgrade)", + "cdn-broker", + "cdn-marshal", "clap", "client", "committable", @@ -9341,6 +9141,7 @@ dependencies = [ "hotshot", "hotshot-contract-adapter", "hotshot-events-service", + "hotshot-libp2p-networking", "hotshot-orchestrator", "hotshot-query-service", "hotshot-stake-table", @@ -9351,10 +9152,9 @@ dependencies = [ "jf-crhf", "jf-merkle-tree", "jf-rescue", - "jf-signature 0.2.0", + "jf-signature", "jf-vid", - "libp2p 0.53.2", - "libp2p-networking", + "libp2p", "marketplace-solver", "num_enum", "parking_lot", @@ -11465,15 +11265,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" -[[package]] -name = "utils" -version = "0.5.84" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" -dependencies = [ - "tracing", - "workspace-hack", -] - [[package]] name = "uuid" version = "0.8.2" @@ -12094,7 +11885,6 @@ dependencies = [ [[package]] name = "workspace-hack" version = "0.1.0" -source = "git+https://github.com/EspressoSystems/hotshot?branch=main#e4abb5c631d56814ae65316418cf9cb133c403c1" dependencies = [ "ark-bls12-377", "ark-bls12-381", @@ -12142,7 +11932,7 @@ dependencies = [ "itertools 0.12.1", "jf-relation", "jf-rescue", - "jf-signature 0.2.0", + "jf-signature", "jf-utils", "libp2p-identity", "libsecp256k1-core", From 20635fe2ea0cebb2e8219fc4b604c0f1867345b0 Mon Sep 17 00:00:00 2001 From: Rob Date: Fri, 7 Feb 2025 12:24:52 -0500 Subject: [PATCH 1392/1393] remove `--all-features` --- .github/workflows/build-without-lockfile.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-without-lockfile.yml b/.github/workflows/build-without-lockfile.yml index b60d3ae9ba..8766baa69a 100644 --- a/.github/workflows/build-without-lockfile.yml +++ b/.github/workflows/build-without-lockfile.yml @@ -32,4 +32,4 @@ jobs: - name: Build without committed Cargo.lock run: | cargo generate-lockfile - cargo check --all-features --all-targets + cargo check --all-targets From 783c623ea842723e9b2ac3b460d45dc8d739afd1 Mon Sep 17 00:00:00 2001 From: Rob Date: Fri, 7 Feb 2025 12:27:12 -0500 Subject: [PATCH 1393/1393] ignore orchestrator in `typos.toml` --- .typos.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/.typos.toml b/.typos.toml index 27c830fa35..bd8457462f 100644 --- a/.typos.toml +++ b/.typos.toml @@ -11,4 +11,5 @@ extend-exclude = [ "contract-bindings-alloy", "contract-bindings-ethers", "node-metrics/src/api/node_validator/v0/example_prometheus_metrics_output.txt", + "hotshot-orchestrator/run-config.toml" ]